From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- src/spdk/dpdk/drivers/net/Makefile | 84 + src/spdk/dpdk/drivers/net/af_packet/Makefile | 27 + src/spdk/dpdk/drivers/net/af_packet/meson.build | 8 + .../dpdk/drivers/net/af_packet/rte_eth_af_packet.c | 1090 + .../net/af_packet/rte_pmd_af_packet_version.map | 3 + src/spdk/dpdk/drivers/net/af_xdp/Makefile | 26 + src/spdk/dpdk/drivers/net/af_xdp/af_xdp_deps.h | 15 + src/spdk/dpdk/drivers/net/af_xdp/meson.build | 16 + src/spdk/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c | 1382 + .../drivers/net/af_xdp/rte_pmd_af_xdp_version.map | 3 + src/spdk/dpdk/drivers/net/ark/Makefile | 39 + src/spdk/dpdk/drivers/net/ark/ark_ddm.c | 130 + src/spdk/dpdk/drivers/net/ark/ark_ddm.h | 151 + src/spdk/dpdk/drivers/net/ark/ark_ethdev.c | 1027 + src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c | 680 + src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h | 36 + src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c | 436 + src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h | 30 + src/spdk/dpdk/drivers/net/ark/ark_ext.h | 90 + src/spdk/dpdk/drivers/net/ark/ark_global.h | 134 + src/spdk/dpdk/drivers/net/ark/ark_logs.h | 93 + src/spdk/dpdk/drivers/net/ark/ark_mpu.c | 152 + src/spdk/dpdk/drivers/net/ark/ark_mpu.h | 125 + src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c | 450 + src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h | 88 + src/spdk/dpdk/drivers/net/ark/ark_pktdir.c | 56 + src/spdk/dpdk/drivers/net/ark/ark_pktdir.h | 41 + src/spdk/dpdk/drivers/net/ark/ark_pktgen.c | 472 + src/spdk/dpdk/drivers/net/ark/ark_pktgen.h | 79 + src/spdk/dpdk/drivers/net/ark/ark_rqp.c | 68 + src/spdk/dpdk/drivers/net/ark/ark_rqp.h | 57 + src/spdk/dpdk/drivers/net/ark/ark_udm.c | 197 + src/spdk/dpdk/drivers/net/ark/ark_udm.h | 163 + src/spdk/dpdk/drivers/net/ark/meson.build | 13 + .../dpdk/drivers/net/ark/rte_pmd_ark_version.map | 3 + src/spdk/dpdk/drivers/net/atlantic/Makefile | 34 + src/spdk/dpdk/drivers/net/atlantic/atl_common.h | 96 + src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.c | 1941 + src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.h | 119 + src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.c | 52 + src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.h | 53 + src/spdk/dpdk/drivers/net/atlantic/atl_logs.h | 31 + src/spdk/dpdk/drivers/net/atlantic/atl_rxtx.c | 1350 + src/spdk/dpdk/drivers/net/atlantic/atl_types.h | 234 + .../dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c | 513 + .../dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.h | 42 + .../net/atlantic/hw_atl/hw_atl_b0_internal.h | 145 + .../dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.c | 1490 + .../dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.h | 714 + .../net/atlantic/hw_atl/hw_atl_llh_internal.h | 2407 ++ .../drivers/net/atlantic/hw_atl/hw_atl_utils.c | 944 + .../drivers/net/atlantic/hw_atl/hw_atl_utils.h | 654 + .../net/atlantic/hw_atl/hw_atl_utils_fw2x.c | 770 + src/spdk/dpdk/drivers/net/atlantic/meson.build | 13 + .../dpdk/drivers/net/atlantic/rte_pmd_atlantic.c | 102 + .../dpdk/drivers/net/atlantic/rte_pmd_atlantic.h | 144 + .../net/atlantic/rte_pmd_atlantic_version.map | 14 + src/spdk/dpdk/drivers/net/avp/Makefile | 29 + src/spdk/dpdk/drivers/net/avp/avp_ethdev.c | 2315 ++ src/spdk/dpdk/drivers/net/avp/avp_logs.h | 30 + src/spdk/dpdk/drivers/net/avp/meson.build | 9 + src/spdk/dpdk/drivers/net/avp/rte_avp_common.h | 382 + src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h | 118 + .../dpdk/drivers/net/avp/rte_pmd_avp_version.map | 3 + src/spdk/dpdk/drivers/net/axgbe/Makefile | 33 + src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h | 1736 + src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c | 1220 + src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c | 1680 + src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h | 657 + src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c | 341 + src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h | 26 + src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c | 1285 + src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h | 192 + src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c | 2315 ++ src/spdk/dpdk/drivers/net/axgbe/axgbe_regs.h | 229 + src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c | 867 + src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h | 191 + .../dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 93 + src/spdk/dpdk/drivers/net/axgbe/meson.build | 20 + .../drivers/net/axgbe/rte_pmd_axgbe_version.map | 3 + src/spdk/dpdk/drivers/net/bnx2x/Makefile | 33 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c | 11953 ++++++ src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h | 2101 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c | 817 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h | 80 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h | 52 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_osal.h | 35 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c | 510 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h | 81 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c | 1585 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h | 609 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c | 763 + src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h | 338 + src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h | 419 + src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h | 6718 ++++ src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h | 821 + src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h | 845 + src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h | 192 + src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h | 5996 +++ src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c | 5438 +++ src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h | 1977 + src/spdk/dpdk/drivers/net/bnx2x/elink.c | 15236 ++++++++ src/spdk/dpdk/drivers/net/bnx2x/elink.h | 700 + src/spdk/dpdk/drivers/net/bnx2x/meson.build | 15 + .../drivers/net/bnx2x/rte_pmd_bnx2x_version.map | 3 + src/spdk/dpdk/drivers/net/bnxt/Makefile | 77 + src/spdk/dpdk/drivers/net/bnxt/bnxt.h | 783 + src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c | 289 + src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h | 129 + src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c | 5681 +++ src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c | 199 + src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h | 175 + src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c | 2054 + src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c | 5413 +++ src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h | 273 + src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c | 193 + src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h | 24 + src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h | 70 + src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c | 851 + src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h | 127 + src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c | 572 + src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h | 65 + src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c | 1031 + src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h | 244 + src/spdk/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c | 516 + src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c | 1029 + src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h | 31 + src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c | 166 + src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h | 48 + src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c | 528 + src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h | 101 + src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c | 29 + src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h | 16 + src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c | 252 + src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h | 72 + .../dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h | 38674 +++++++++++++++++++ src/spdk/dpdk/drivers/net/bnxt/meson.build | 50 + src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c | 910 + src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h | 326 + .../dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map | 22 + src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.c | 364 + src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.h | 119 + src/spdk/dpdk/drivers/net/bnxt/tf_core/hwrm_tf.h | 972 + src/spdk/dpdk/drivers/net/bnxt/tf_core/lookup3.h | 162 + src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.c | 47 + src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.h | 36 + src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.c | 107 + src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.h | 107 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.c | 656 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.h | 1385 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.c | 515 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.h | 117 + .../drivers/net/bnxt/tf_core/tf_ext_flow_handle.h | 166 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.c | 1251 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.h | 257 + .../dpdk/drivers/net/bnxt/tf_core/tf_msg_common.h | 47 + .../dpdk/drivers/net/bnxt/tf_core/tf_project.h | 24 + .../dpdk/drivers/net/bnxt/tf_core/tf_resources.h | 542 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.c | 3294 ++ src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.h | 321 + .../dpdk/drivers/net/bnxt/tf_core/tf_session.h | 294 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.c | 1803 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.h | 126 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.c | 163 + src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.h | 188 + .../dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h | 54 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 798 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 130 + .../dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 287 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 827 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 213 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.c | 2122 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.h | 123 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.c | 311 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.h | 112 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.c | 150 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.h | 31 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.c | 263 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.h | 134 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 1302 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h | 162 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.c | 1784 + .../dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.h | 456 + .../net/bnxt/tf_ulp/ulp_template_field_db.h | 63 + .../drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 313 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.c | 554 + src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.h | 294 + src/spdk/dpdk/drivers/net/bonding/Makefile | 36 + .../drivers/net/bonding/eth_bond_8023ad_private.h | 308 + .../dpdk/drivers/net/bonding/eth_bond_private.h | 324 + src/spdk/dpdk/drivers/net/bonding/meson.build | 11 + src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h | 351 + .../dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c | 1719 + .../dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h | 334 + .../dpdk/drivers/net/bonding/rte_eth_bond_alb.c | 271 + .../dpdk/drivers/net/bonding/rte_eth_bond_alb.h | 113 + .../dpdk/drivers/net/bonding/rte_eth_bond_api.c | 1052 + .../dpdk/drivers/net/bonding/rte_eth_bond_args.c | 301 + .../dpdk/drivers/net/bonding/rte_eth_bond_flow.c | 245 + .../dpdk/drivers/net/bonding/rte_eth_bond_pmd.c | 3760 ++ .../drivers/net/bonding/rte_pmd_bond_version.map | 33 + src/spdk/dpdk/drivers/net/cxgbe/Makefile | 57 + src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h | 835 + src/spdk/dpdk/drivers/net/cxgbe/base/common.h | 554 + .../dpdk/drivers/net/cxgbe/base/t4_chip_type.h | 60 + src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c | 5701 +++ src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h | 144 + src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h | 625 + .../dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h | 186 + src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h | 966 + .../dpdk/drivers/net/cxgbe/base/t4_regs_values.h | 155 + src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h | 47 + .../dpdk/drivers/net/cxgbe/base/t4fw_interface.h | 2452 ++ src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c | 880 + src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h | 15 + src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c | 193 + src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h | 31 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h | 111 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h | 260 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c | 1259 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c | 1433 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h | 276 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c | 1458 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h | 44 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c | 2238 ++ src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h | 89 + src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h | 55 + src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c | 217 + src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c | 302 + src/spdk/dpdk/drivers/net/cxgbe/l2t.c | 229 + src/spdk/dpdk/drivers/net/cxgbe/l2t.h | 58 + src/spdk/dpdk/drivers/net/cxgbe/meson.build | 17 + src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c | 241 + src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h | 52 + .../drivers/net/cxgbe/rte_pmd_cxgbe_version.map | 3 + src/spdk/dpdk/drivers/net/cxgbe/sge.c | 2658 ++ src/spdk/dpdk/drivers/net/cxgbe/smt.c | 230 + src/spdk/dpdk/drivers/net/cxgbe/smt.h | 44 + src/spdk/dpdk/drivers/net/dpaa/Makefile | 41 + src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c | 1674 + src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h | 210 + src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c | 1025 + src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h | 276 + src/spdk/dpdk/drivers/net/dpaa/meson.build | 17 + src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h | 36 + .../dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map | 14 + src/spdk/dpdk/drivers/net/dpaa2/Makefile | 47 + .../dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 387 + .../drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h | 313 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c | 2702 ++ src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h | 221 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_flow.c | 2016 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_mux.c | 269 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h | 40 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ptp.c | 181 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c | 1643 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.c | 269 + src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.h | 206 + src/spdk/dpdk/drivers/net/dpaa2/mc/dpdmux.c | 929 + src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c | 77 + src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c | 2639 ++ src/spdk/dpdk/drivers/net/dpaa2/mc/dprtc.c | 523 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h | 410 + .../dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 221 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h | 186 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h | 1584 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 858 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc.h | 109 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h | 91 + src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h | 456 + src/spdk/dpdk/drivers/net/dpaa2/meson.build | 27 + src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h | 90 + .../drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 18 + src/spdk/dpdk/drivers/net/e1000/Makefile | 78 + src/spdk/dpdk/drivers/net/e1000/base/README | 37 + .../drivers/net/e1000/base/e1000_80003es2lan.c | 1496 + .../drivers/net/e1000/base/e1000_80003es2lan.h | 71 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c | 688 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c | 1239 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h | 62 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c | 561 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c | 1524 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h | 27 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c | 2006 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h | 36 + src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c | 3753 ++ src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h | 493 + src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c | 1353 + src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h | 138 + .../dpdk/drivers/net/e1000/base/e1000_defines.h | 1485 + src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h | 1020 + src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c | 1005 + src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h | 81 + .../dpdk/drivers/net/e1000/base/e1000_ich8lan.c | 6096 +++ .../dpdk/drivers/net/e1000/base/e1000_ich8lan.h | 311 + src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c | 2220 ++ src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h | 66 + .../dpdk/drivers/net/e1000/base/e1000_manage.c | 547 + .../dpdk/drivers/net/e1000/base/e1000_manage.h | 66 + src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c | 762 + src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h | 76 + src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c | 1356 + src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h | 69 + src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c | 54 + src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h | 166 + src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c | 4231 ++ src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h | 312 + src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h | 666 + src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c | 560 + src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h | 266 + src/spdk/dpdk/drivers/net/e1000/base/meson.build | 37 + src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h | 530 + src/spdk/dpdk/drivers/net/e1000/e1000_logs.c | 56 + src/spdk/dpdk/drivers/net/e1000/e1000_logs.h | 57 + src/spdk/dpdk/drivers/net/e1000/em_ethdev.c | 1851 + src/spdk/dpdk/drivers/net/e1000/em_rxtx.c | 2140 + src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c | 5792 +++ src/spdk/dpdk/drivers/net/e1000/igb_flow.c | 1922 + src/spdk/dpdk/drivers/net/e1000/igb_pf.c | 513 + src/spdk/dpdk/drivers/net/e1000/igb_regs.h | 194 + src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c | 2965 ++ src/spdk/dpdk/drivers/net/e1000/meson.build | 17 + .../drivers/net/e1000/rte_pmd_e1000_version.map | 3 + src/spdk/dpdk/drivers/net/ena/Makefile | 30 + src/spdk/dpdk/drivers/net/ena/base/ena_com.c | 2935 ++ src/spdk/dpdk/drivers/net/ena/base/ena_com.h | 976 + .../drivers/net/ena/base/ena_defs/ena_admin_defs.h | 1656 + .../net/ena/base/ena_defs/ena_common_defs.h | 22 + .../net/ena/base/ena_defs/ena_eth_io_defs.h | 943 + .../drivers/net/ena/base/ena_defs/ena_gen_info.h | 7 + .../drivers/net/ena/base/ena_defs/ena_includes.h | 9 + .../drivers/net/ena/base/ena_defs/ena_regs_defs.h | 132 + src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c | 618 + src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h | 257 + src/spdk/dpdk/drivers/net/ena/base/ena_plat.h | 29 + src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h | 315 + src/spdk/dpdk/drivers/net/ena/ena_ethdev.c | 2967 ++ src/spdk/dpdk/drivers/net/ena/ena_ethdev.h | 235 + src/spdk/dpdk/drivers/net/ena/ena_logs.h | 46 + src/spdk/dpdk/drivers/net/ena/ena_platform.h | 31 + src/spdk/dpdk/drivers/net/ena/meson.build | 10 + .../dpdk/drivers/net/ena/rte_pmd_ena_version.map | 3 + src/spdk/dpdk/drivers/net/enetc/Makefile | 23 + src/spdk/dpdk/drivers/net/enetc/base/enetc_hw.h | 277 + src/spdk/dpdk/drivers/net/enetc/enetc.h | 114 + src/spdk/dpdk/drivers/net/enetc/enetc_ethdev.c | 960 + src/spdk/dpdk/drivers/net/enetc/enetc_logs.h | 42 + src/spdk/dpdk/drivers/net/enetc/enetc_rxtx.c | 401 + src/spdk/dpdk/drivers/net/enetc/meson.build | 13 + .../drivers/net/enetc/rte_pmd_enetc_version.map | 3 + src/spdk/dpdk/drivers/net/enic/Makefile | 68 + src/spdk/dpdk/drivers/net/enic/base/cq_desc.h | 99 + src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h | 252 + src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h | 48 + src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c | 78 + src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h | 77 + src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c | 1216 + src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h | 195 + src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h | 1166 + src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h | 66 + src/spdk/dpdk/drivers/net/enic/base/vnic_flowman.h | 386 + src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c | 48 + src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h | 96 + src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h | 60 + .../dpdk/drivers/net/enic/base/vnic_resource.h | 67 + src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c | 148 + src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h | 143 + src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h | 27 + src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h | 56 + src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c | 175 + src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h | 165 + src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h | 89 + src/spdk/dpdk/drivers/net/enic/enic.h | 368 + src/spdk/dpdk/drivers/net/enic/enic_clsf.c | 502 + src/spdk/dpdk/drivers/net/enic/enic_compat.h | 76 + src/spdk/dpdk/drivers/net/enic/enic_ethdev.c | 1316 + src/spdk/dpdk/drivers/net/enic/enic_flow.c | 1795 + src/spdk/dpdk/drivers/net/enic/enic_fm_flow.c | 2463 ++ src/spdk/dpdk/drivers/net/enic/enic_main.c | 1882 + src/spdk/dpdk/drivers/net/enic/enic_res.c | 280 + src/spdk/dpdk/drivers/net/enic/enic_res.h | 72 + src/spdk/dpdk/drivers/net/enic/enic_rxtx.c | 688 + src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h | 275 + .../dpdk/drivers/net/enic/enic_rxtx_vec_avx2.c | 830 + src/spdk/dpdk/drivers/net/enic/meson.build | 35 + .../dpdk/drivers/net/enic/rte_pmd_enic_version.map | 3 + src/spdk/dpdk/drivers/net/failsafe/Makefile | 43 + src/spdk/dpdk/drivers/net/failsafe/failsafe.c | 419 + src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c | 517 + src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c | 168 + .../dpdk/drivers/net/failsafe/failsafe_ether.c | 638 + src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c | 255 + src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c | 535 + src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c | 1511 + .../dpdk/drivers/net/failsafe/failsafe_private.h | 504 + src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c | 178 + src/spdk/dpdk/drivers/net/failsafe/meson.build | 21 + .../net/failsafe/rte_pmd_failsafe_version.map | 3 + src/spdk/dpdk/drivers/net/fm10k/Makefile | 77 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c | 346 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h | 35 + .../dpdk/drivers/net/fm10k/base/fm10k_common.c | 550 + .../dpdk/drivers/net/fm10k/base/fm10k_common.h | 23 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c | 2225 ++ src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h | 297 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h | 139 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c | 2099 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h | 164 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c | 887 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h | 165 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h | 854 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c | 646 + src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h | 68 + src/spdk/dpdk/drivers/net/fm10k/base/meson.build | 28 + src/spdk/dpdk/drivers/net/fm10k/fm10k.h | 356 + src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c | 3348 ++ src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h | 52 + src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c | 728 + src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c | 892 + src/spdk/dpdk/drivers/net/fm10k/meson.build | 16 + .../drivers/net/fm10k/rte_pmd_fm10k_version.map | 3 + src/spdk/dpdk/drivers/net/hinic/Makefile | 67 + .../dpdk/drivers/net/hinic/base/hinic_compat.h | 279 + src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h | 135 + .../drivers/net/hinic/base/hinic_pmd_api_cmd.c | 1041 + .../drivers/net/hinic/base/hinic_pmd_api_cmd.h | 271 + .../dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c | 244 + .../dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h | 145 + .../dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h | 469 + .../dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c | 855 + .../dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h | 243 + .../dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c | 490 + .../dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h | 98 + .../dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c | 1531 + .../dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h | 491 + .../dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c | 554 + .../dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h | 125 + .../dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c | 933 + .../dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h | 93 + .../dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c | 804 + .../dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h | 119 + .../dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c | 2121 + .../dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h | 944 + .../dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c | 907 + .../dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h | 264 + .../dpdk/drivers/net/hinic/base/hinic_pmd_wq.c | 180 + .../dpdk/drivers/net/hinic/base/hinic_pmd_wq.h | 137 + src/spdk/dpdk/drivers/net/hinic/base/meson.build | 37 + src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c | 3257 ++ src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h | 352 + src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c | 3272 ++ src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c | 1089 + src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h | 131 + src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c | 1334 + src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h | 148 + src/spdk/dpdk/drivers/net/hinic/meson.build | 14 + .../drivers/net/hinic/rte_pmd_hinic_version.map | 3 + src/spdk/dpdk/drivers/net/hns3/Makefile | 42 + src/spdk/dpdk/drivers/net/hns3/hns3_cmd.c | 572 + src/spdk/dpdk/drivers/net/hns3/hns3_cmd.h | 814 + src/spdk/dpdk/drivers/net/hns3/hns3_dcb.c | 1690 + src/spdk/dpdk/drivers/net/hns3/hns3_dcb.h | 168 + src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.c | 5512 +++ src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.h | 670 + src/spdk/dpdk/drivers/net/hns3/hns3_ethdev_vf.c | 2572 ++ src/spdk/dpdk/drivers/net/hns3/hns3_fdir.c | 1080 + src/spdk/dpdk/drivers/net/hns3/hns3_fdir.h | 205 + src/spdk/dpdk/drivers/net/hns3/hns3_flow.c | 1923 + src/spdk/dpdk/drivers/net/hns3/hns3_intr.c | 1169 + src/spdk/dpdk/drivers/net/hns3/hns3_intr.h | 79 + src/spdk/dpdk/drivers/net/hns3/hns3_logs.h | 34 + src/spdk/dpdk/drivers/net/hns3/hns3_mbx.c | 423 + src/spdk/dpdk/drivers/net/hns3/hns3_mbx.h | 166 + src/spdk/dpdk/drivers/net/hns3/hns3_mp.c | 214 + src/spdk/dpdk/drivers/net/hns3/hns3_mp.h | 14 + src/spdk/dpdk/drivers/net/hns3/hns3_regs.c | 375 + src/spdk/dpdk/drivers/net/hns3/hns3_regs.h | 109 + src/spdk/dpdk/drivers/net/hns3/hns3_rss.c | 603 + src/spdk/dpdk/drivers/net/hns3/hns3_rss.h | 116 + src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.c | 2515 ++ src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.h | 380 + src/spdk/dpdk/drivers/net/hns3/hns3_stats.c | 1010 + src/spdk/dpdk/drivers/net/hns3/hns3_stats.h | 151 + src/spdk/dpdk/drivers/net/hns3/meson.build | 30 + .../dpdk/drivers/net/hns3/rte_pmd_hns3_version.map | 3 + src/spdk/dpdk/drivers/net/i40e/Makefile | 112 + src/spdk/dpdk/drivers/net/i40e/base/README | 31 + src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c | 1179 + src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h | 137 + .../dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h | 2973 ++ src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h | 36 + src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c | 8209 ++++ src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c | 1410 + src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h | 208 + src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h | 62 + src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c | 146 + src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h | 30 + src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c | 340 + src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h | 216 + src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c | 1382 + src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h | 171 + src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c | 1792 + src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h | 215 + .../dpdk/drivers/net/i40e/base/i40e_prototype.h | 644 + .../dpdk/drivers/net/i40e/base/i40e_register.h | 5438 +++ src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h | 79 + src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h | 2051 + src/spdk/dpdk/drivers/net/i40e/base/meson.build | 30 + src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h | 761 + src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c | 13447 +++++++ src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h | 1522 + src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c | 2882 ++ src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c | 2339 ++ src/spdk/dpdk/drivers/net/i40e/i40e_flow.c | 5464 +++ src/spdk/dpdk/drivers/net/i40e/i40e_logs.h | 49 + src/spdk/dpdk/drivers/net/i40e/i40e_pf.c | 1607 + src/spdk/dpdk/drivers/net/i40e/i40e_pf.h | 40 + src/spdk/dpdk/drivers/net/i40e/i40e_regs.h | 968 + src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c | 3516 ++ src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h | 812 + .../dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c | 616 + .../dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c | 949 + .../dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h | 255 + .../dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c | 596 + src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c | 763 + src/spdk/dpdk/drivers/net/i40e/i40e_tm.c | 971 + .../dpdk/drivers/net/i40e/i40e_vf_representor.c | 535 + src/spdk/dpdk/drivers/net/i40e/meson.build | 52 + src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c | 3231 ++ src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h | 1082 + .../dpdk/drivers/net/i40e/rte_pmd_i40e_version.map | 46 + src/spdk/dpdk/drivers/net/iavf/Makefile | 54 + src/spdk/dpdk/drivers/net/iavf/iavf.h | 275 + src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c | 1586 + src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c | 971 + src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c | 1044 + src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h | 320 + src/spdk/dpdk/drivers/net/iavf/iavf_hash.c | 1236 + src/spdk/dpdk/drivers/net/iavf/iavf_log.h | 51 + src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c | 2869 ++ src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h | 534 + .../dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c | 1541 + .../dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h | 276 + src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c | 1191 + src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c | 1077 + src/spdk/dpdk/drivers/net/iavf/meson.build | 37 + .../dpdk/drivers/net/iavf/rte_pmd_iavf_version.map | 3 + src/spdk/dpdk/drivers/net/ice/Makefile | 96 + src/spdk/dpdk/drivers/net/ice/base/README | 22 + src/spdk/dpdk/drivers/net/ice/base/ice_acl.c | 629 + src/spdk/dpdk/drivers/net/ice/base/ice_acl.h | 206 + src/spdk/dpdk/drivers/net/ice/base/ice_acl_ctrl.c | 1185 + .../dpdk/drivers/net/ice/base/ice_adminq_cmd.h | 2975 ++ src/spdk/dpdk/drivers/net/ice/base/ice_alloc.h | 22 + src/spdk/dpdk/drivers/net/ice/base/ice_bitops.h | 405 + src/spdk/dpdk/drivers/net/ice/base/ice_common.c | 4409 +++ src/spdk/dpdk/drivers/net/ice/base/ice_common.h | 221 + src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c | 1206 + src/spdk/dpdk/drivers/net/ice/base/ice_controlq.h | 100 + src/spdk/dpdk/drivers/net/ice/base/ice_dcb.c | 1441 + src/spdk/dpdk/drivers/net/ice/base/ice_dcb.h | 224 + src/spdk/dpdk/drivers/net/ice/base/ice_devids.h | 40 + src/spdk/dpdk/drivers/net/ice/base/ice_fdir.c | 1124 + src/spdk/dpdk/drivers/net/ice/base/ice_fdir.h | 239 + src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.c | 5955 +++ src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.h | 82 + src/spdk/dpdk/drivers/net/ice/base/ice_flex_type.h | 790 + src/spdk/dpdk/drivers/net/ice/base/ice_flow.c | 3699 ++ src/spdk/dpdk/drivers/net/ice/base/ice_flow.h | 496 + .../dpdk/drivers/net/ice/base/ice_hw_autogen.h | 9452 +++++ src/spdk/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h | 2377 ++ src/spdk/dpdk/drivers/net/ice/base/ice_nvm.c | 802 + src/spdk/dpdk/drivers/net/ice/base/ice_nvm.h | 97 + src/spdk/dpdk/drivers/net/ice/base/ice_osdep.h | 427 + .../dpdk/drivers/net/ice/base/ice_protocol_type.h | 371 + src/spdk/dpdk/drivers/net/ice/base/ice_sbq_cmd.h | 93 + src/spdk/dpdk/drivers/net/ice/base/ice_sched.c | 5513 +++ src/spdk/dpdk/drivers/net/ice/base/ice_sched.h | 197 + src/spdk/dpdk/drivers/net/ice/base/ice_status.h | 48 + src/spdk/dpdk/drivers/net/ice/base/ice_switch.c | 7611 ++++ src/spdk/dpdk/drivers/net/ice/base/ice_switch.h | 492 + src/spdk/dpdk/drivers/net/ice/base/ice_type.h | 1101 + src/spdk/dpdk/drivers/net/ice/base/meson.build | 34 + src/spdk/dpdk/drivers/net/ice/ice_dcf.c | 658 + src/spdk/dpdk/drivers/net/ice/ice_dcf.h | 63 + src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.c | 327 + src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.h | 33 + src/spdk/dpdk/drivers/net/ice/ice_dcf_parent.c | 397 + src/spdk/dpdk/drivers/net/ice/ice_ethdev.c | 4600 +++ src/spdk/dpdk/drivers/net/ice/ice_ethdev.h | 524 + src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c | 2013 + src/spdk/dpdk/drivers/net/ice/ice_generic_flow.c | 2090 + src/spdk/dpdk/drivers/net/ice/ice_generic_flow.h | 556 + src/spdk/dpdk/drivers/net/ice/ice_hash.c | 588 + src/spdk/dpdk/drivers/net/ice/ice_logs.h | 51 + src/spdk/dpdk/drivers/net/ice/ice_rxtx.c | 3823 ++ src/spdk/dpdk/drivers/net/ice/ice_rxtx.h | 206 + src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c | 838 + .../dpdk/drivers/net/ice/ice_rxtx_vec_common.h | 304 + src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c | 642 + src/spdk/dpdk/drivers/net/ice/ice_switch_filter.c | 1718 + src/spdk/dpdk/drivers/net/ice/meson.build | 42 + src/spdk/dpdk/drivers/net/ice/rte_pmd_ice.h | 231 + .../dpdk/drivers/net/ice/rte_pmd_ice_version.map | 15 + src/spdk/dpdk/drivers/net/igc/Makefile | 40 + src/spdk/dpdk/drivers/net/igc/base/README | 29 + src/spdk/dpdk/drivers/net/igc/base/igc_82571.h | 36 + src/spdk/dpdk/drivers/net/igc/base/igc_82575.h | 351 + src/spdk/dpdk/drivers/net/igc/base/igc_api.c | 1845 + src/spdk/dpdk/drivers/net/igc/base/igc_api.h | 111 + src/spdk/dpdk/drivers/net/igc/base/igc_base.c | 190 + src/spdk/dpdk/drivers/net/igc/base/igc_base.h | 127 + src/spdk/dpdk/drivers/net/igc/base/igc_defines.h | 1649 + src/spdk/dpdk/drivers/net/igc/base/igc_hw.h | 1051 + src/spdk/dpdk/drivers/net/igc/base/igc_i225.c | 1378 + src/spdk/dpdk/drivers/net/igc/base/igc_i225.h | 110 + src/spdk/dpdk/drivers/net/igc/base/igc_ich8lan.h | 296 + src/spdk/dpdk/drivers/net/igc/base/igc_mac.c | 2100 + src/spdk/dpdk/drivers/net/igc/base/igc_mac.h | 64 + src/spdk/dpdk/drivers/net/igc/base/igc_manage.c | 547 + src/spdk/dpdk/drivers/net/igc/base/igc_manage.h | 65 + src/spdk/dpdk/drivers/net/igc/base/igc_nvm.c | 1324 + src/spdk/dpdk/drivers/net/igc/base/igc_nvm.h | 69 + src/spdk/dpdk/drivers/net/igc/base/igc_osdep.c | 64 + src/spdk/dpdk/drivers/net/igc/base/igc_osdep.h | 163 + src/spdk/dpdk/drivers/net/igc/base/igc_phy.c | 4422 +++ src/spdk/dpdk/drivers/net/igc/base/igc_phy.h | 337 + src/spdk/dpdk/drivers/net/igc/base/igc_regs.h | 724 + src/spdk/dpdk/drivers/net/igc/base/meson.build | 18 + src/spdk/dpdk/drivers/net/igc/igc_ethdev.c | 2630 ++ src/spdk/dpdk/drivers/net/igc/igc_ethdev.h | 286 + src/spdk/dpdk/drivers/net/igc/igc_filter.c | 392 + src/spdk/dpdk/drivers/net/igc/igc_filter.h | 39 + src/spdk/dpdk/drivers/net/igc/igc_flow.c | 917 + src/spdk/dpdk/drivers/net/igc/igc_flow.h | 25 + src/spdk/dpdk/drivers/net/igc/igc_logs.c | 22 + src/spdk/dpdk/drivers/net/igc/igc_logs.h | 48 + src/spdk/dpdk/drivers/net/igc/igc_txrx.c | 2279 ++ src/spdk/dpdk/drivers/net/igc/igc_txrx.h | 59 + src/spdk/dpdk/drivers/net/igc/meson.build | 15 + .../dpdk/drivers/net/igc/rte_pmd_igc_version.map | 3 + src/spdk/dpdk/drivers/net/ionic/Makefile | 31 + src/spdk/dpdk/drivers/net/ionic/ionic.h | 82 + src/spdk/dpdk/drivers/net/ionic/ionic_dev.c | 579 + src/spdk/dpdk/drivers/net/ionic/ionic_dev.h | 271 + src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c | 1327 + src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h | 22 + src/spdk/dpdk/drivers/net/ionic/ionic_if.h | 2491 ++ src/spdk/dpdk/drivers/net/ionic/ionic_lif.c | 1696 + src/spdk/dpdk/drivers/net/ionic/ionic_lif.h | 190 + src/spdk/dpdk/drivers/net/ionic/ionic_logs.h | 26 + src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c | 63 + src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h | 13 + src/spdk/dpdk/drivers/net/ionic/ionic_main.c | 443 + src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h | 58 + src/spdk/dpdk/drivers/net/ionic/ionic_regs.h | 142 + src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c | 140 + src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h | 47 + src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c | 1082 + src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h | 44 + src/spdk/dpdk/drivers/net/ionic/meson.build | 12 + .../drivers/net/ionic/rte_pmd_ionic_version.map | 4 + src/spdk/dpdk/drivers/net/ipn3ke/Makefile | 38 + src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c | 596 + src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h | 1078 + src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c | 1380 + src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h | 106 + src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h | 30 + .../dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h | 74 + .../dpdk/drivers/net/ipn3ke/ipn3ke_representor.c | 2985 ++ src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c | 2055 + src/spdk/dpdk/drivers/net/ipn3ke/meson.build | 28 + .../drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map | 9 + src/spdk/dpdk/drivers/net/ixgbe/Makefile | 110 + src/spdk/dpdk/drivers/net/ixgbe/base/README | 34 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c | 1411 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h | 24 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c | 2603 ++ src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h | 35 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c | 1688 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h | 197 + .../dpdk/drivers/net/ixgbe/base/ixgbe_common.c | 5410 +++ .../dpdk/drivers/net/ixgbe/base/ixgbe_common.h | 170 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c | 704 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h | 145 + .../dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c | 343 + .../dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h | 70 + .../dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c | 581 + .../dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h | 124 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c | 228 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h | 12 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c | 740 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h | 136 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h | 140 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c | 2696 ++ src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h | 190 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h | 4367 +++ src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c | 755 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h | 116 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c | 1034 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h | 38 + src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c | 4669 +++ src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h | 96 + src/spdk/dpdk/drivers/net/ixgbe/base/meson.build | 34 + .../dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c | 285 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c | 386 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h | 39 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h | 271 + .../dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h | 131 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c | 9145 +++++ src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h | 819 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c | 1648 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c | 3492 ++ src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c | 755 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h | 118 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h | 50 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c | 936 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h | 347 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c | 5967 +++ src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h | 303 + .../dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 293 + .../dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 586 + .../dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 751 + src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c | 1031 + .../dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c | 237 + src/spdk/dpdk/drivers/net/ixgbe/meson.build | 33 + src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c | 1141 + src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h | 729 + .../drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 46 + src/spdk/dpdk/drivers/net/kni/Makefile | 30 + src/spdk/dpdk/drivers/net/kni/meson.build | 8 + src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c | 517 + .../dpdk/drivers/net/kni/rte_pmd_kni_version.map | 3 + src/spdk/dpdk/drivers/net/liquidio/Makefile | 30 + .../dpdk/drivers/net/liquidio/base/lio_23xx_reg.h | 165 + .../dpdk/drivers/net/liquidio/base/lio_23xx_vf.c | 513 + .../dpdk/drivers/net/liquidio/base/lio_23xx_vf.h | 63 + .../dpdk/drivers/net/liquidio/base/lio_hw_defs.h | 239 + src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c | 246 + src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h | 102 + src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c | 2173 ++ src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h | 176 + src/spdk/dpdk/drivers/net/liquidio/lio_logs.h | 58 + src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c | 1806 + src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h | 740 + src/spdk/dpdk/drivers/net/liquidio/lio_struct.h | 661 + src/spdk/dpdk/drivers/net/liquidio/meson.build | 8 + .../net/liquidio/rte_pmd_liquidio_version.map | 3 + src/spdk/dpdk/drivers/net/memif/Makefile | 26 + src/spdk/dpdk/drivers/net/memif/memif.h | 179 + src/spdk/dpdk/drivers/net/memif/memif_socket.c | 1115 + src/spdk/dpdk/drivers/net/memif/memif_socket.h | 109 + src/spdk/dpdk/drivers/net/memif/meson.build | 12 + src/spdk/dpdk/drivers/net/memif/rte_eth_memif.c | 1816 + src/spdk/dpdk/drivers/net/memif/rte_eth_memif.h | 215 + .../drivers/net/memif/rte_pmd_memif_version.map | 3 + src/spdk/dpdk/drivers/net/meson.build | 58 + src/spdk/dpdk/drivers/net/mlx4/Makefile | 142 + src/spdk/dpdk/drivers/net/mlx4/meson.build | 137 + src/spdk/dpdk/drivers/net/mlx4/mlx4.c | 1333 + src/spdk/dpdk/drivers/net/mlx4/mlx4.h | 251 + src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c | 990 + src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c | 1626 + src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h | 59 + src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c | 279 + src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h | 89 + src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c | 405 + src/spdk/dpdk/drivers/net/mlx4/mlx4_mp.c | 361 + src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c | 1462 + src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h | 123 + src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h | 163 + src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c | 941 + src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c | 1396 + src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h | 251 + src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c | 526 + src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c | 188 + src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h | 113 + .../dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map | 3 + src/spdk/dpdk/drivers/net/mlx5/Makefile | 77 + src/spdk/dpdk/drivers/net/mlx5/meson.build | 54 + src/spdk/dpdk/drivers/net/mlx5/mlx5.c | 3814 ++ src/spdk/dpdk/drivers/net/mlx5/mlx5.h | 848 + src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h | 188 + src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c | 2071 + src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c | 6204 +++ src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.h | 1034 + src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_dv.c | 9666 +++++ src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_meter.c | 1292 + src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c | 1987 + src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c | 255 + src/spdk/dpdk/drivers/net/mlx5/mlx5_mp.c | 211 + src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c | 551 + src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h | 39 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c | 229 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c | 174 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c | 2976 ++ src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c | 5691 +++ src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h | 683 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c | 170 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h | 125 + .../dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 1114 + .../dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 780 + src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 731 + src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c | 230 + src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c | 589 + src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c | 579 + src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c | 1470 + src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.c | 484 + src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h | 423 + src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c | 327 + src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5.h | 35 + .../dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map | 10 + src/spdk/dpdk/drivers/net/mvneta/Makefile | 39 + src/spdk/dpdk/drivers/net/mvneta/meson.build | 29 + src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.c | 991 + src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.h | 93 + src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.c | 1012 + src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.h | 38 + .../drivers/net/mvneta/rte_pmd_mvneta_version.map | 3 + src/spdk/dpdk/drivers/net/mvpp2/Makefile | 42 + src/spdk/dpdk/drivers/net/mvpp2/meson.build | 28 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c | 3049 ++ src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h | 230 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c | 2824 ++ src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.h | 15 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.c | 511 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.h | 15 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c | 912 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h | 107 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.c | 1009 + src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.h | 15 + .../drivers/net/mvpp2/rte_pmd_mvpp2_version.map | 3 + src/spdk/dpdk/drivers/net/netvsc/Makefile | 21 + src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c | 1129 + src/spdk/dpdk/drivers/net/netvsc/hn_logs.h | 36 + src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c | 587 + src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h | 238 + src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c | 1106 + src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h | 34 + src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c | 1535 + src/spdk/dpdk/drivers/net/netvsc/hn_var.h | 252 + src/spdk/dpdk/drivers/net/netvsc/hn_vf.c | 630 + src/spdk/dpdk/drivers/net/netvsc/meson.build | 8 + src/spdk/dpdk/drivers/net/netvsc/ndis.h | 378 + src/spdk/dpdk/drivers/net/netvsc/rndis.h | 414 + .../drivers/net/netvsc/rte_pmd_netvsc_version.map | 3 + src/spdk/dpdk/drivers/net/nfb/Makefile | 40 + src/spdk/dpdk/drivers/net/nfb/meson.build | 11 + src/spdk/dpdk/drivers/net/nfb/nfb.h | 57 + src/spdk/dpdk/drivers/net/nfb/nfb_ethdev.c | 604 + src/spdk/dpdk/drivers/net/nfb/nfb_rx.c | 174 + src/spdk/dpdk/drivers/net/nfb/nfb_rx.h | 223 + src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.c | 111 + src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.h | 77 + src/spdk/dpdk/drivers/net/nfb/nfb_stats.c | 79 + src/spdk/dpdk/drivers/net/nfb/nfb_stats.h | 41 + src/spdk/dpdk/drivers/net/nfb/nfb_tx.c | 113 + src/spdk/dpdk/drivers/net/nfb/nfb_tx.h | 194 + .../dpdk/drivers/net/nfb/rte_pmd_nfb_version.map | 3 + src/spdk/dpdk/drivers/net/nfp/Makefile | 41 + src/spdk/dpdk/drivers/net/nfp/meson.build | 20 + src/spdk/dpdk/drivers/net/nfp/nfp_net.c | 3787 ++ src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h | 326 + src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h | 47 + src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h | 449 + .../drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h | 725 + .../net/nfp/nfpcore/nfp-common/nfp_platform.h | 35 + .../drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h | 592 + .../dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h | 40 + .../dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h | 26 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h | 781 + .../drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c | 937 + .../dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c | 861 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c | 49 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h | 19 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c | 199 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h | 85 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c | 154 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h | 21 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c | 424 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c | 235 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h | 86 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c | 427 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h | 304 + .../dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c | 109 + .../dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c | 665 + .../dpdk/drivers/net/nfp/nfpcore/nfp_resource.c | 266 + .../dpdk/drivers/net/nfp/nfpcore/nfp_resource.h | 52 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c | 327 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h | 61 + src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h | 579 + .../dpdk/drivers/net/nfp/rte_pmd_nfp_version.map | 3 + src/spdk/dpdk/drivers/net/null/Makefile | 24 + src/spdk/dpdk/drivers/net/null/meson.build | 4 + src/spdk/dpdk/drivers/net/null/rte_eth_null.c | 738 + .../dpdk/drivers/net/null/rte_pmd_null_version.map | 3 + src/spdk/dpdk/drivers/net/octeontx/Makefile | 53 + .../dpdk/drivers/net/octeontx/base/meson.build | 25 + .../dpdk/drivers/net/octeontx/base/octeontx_bgx.c | 378 + .../dpdk/drivers/net/octeontx/base/octeontx_bgx.h | 168 + .../dpdk/drivers/net/octeontx/base/octeontx_io.h | 128 + .../drivers/net/octeontx/base/octeontx_pki_var.h | 250 + .../drivers/net/octeontx/base/octeontx_pkivf.c | 239 + .../drivers/net/octeontx/base/octeontx_pkivf.h | 372 + .../drivers/net/octeontx/base/octeontx_pkovf.c | 640 + .../drivers/net/octeontx/base/octeontx_pkovf.h | 83 + src/spdk/dpdk/drivers/net/octeontx/meson.build | 14 + .../dpdk/drivers/net/octeontx/octeontx_ethdev.c | 1672 + .../dpdk/drivers/net/octeontx/octeontx_ethdev.h | 187 + .../drivers/net/octeontx/octeontx_ethdev_ops.c | 343 + src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h | 36 + src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c | 76 + src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h | 504 + .../net/octeontx/rte_pmd_octeontx_version.map | 7 + src/spdk/dpdk/drivers/net/octeontx2/Makefile | 63 + src/spdk/dpdk/drivers/net/octeontx2/meson.build | 44 + src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c | 2553 ++ src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h | 592 + .../dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c | 811 + .../drivers/net/octeontx2/otx2_ethdev_devargs.c | 193 + .../dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c | 494 + .../dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c | 629 + .../dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c | 842 + .../dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h | 139 + .../drivers/net/octeontx2/otx2_ethdev_sec_tx.h | 181 + src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c | 1007 + src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h | 397 + .../dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c | 252 + .../dpdk/drivers/net/octeontx2/otx2_flow_parse.c | 1046 + .../dpdk/drivers/net/octeontx2/otx2_flow_utils.c | 959 + src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c | 264 + src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c | 352 + src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c | 149 + src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c | 339 + src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c | 442 + src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c | 392 + src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c | 424 + src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h | 541 + src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c | 396 + src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c | 3216 ++ src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h | 171 + src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c | 1060 + src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h | 744 + src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c | 1040 + .../net/octeontx2/rte_pmd_octeontx2_version.map | 3 + src/spdk/dpdk/drivers/net/pcap/Makefile | 32 + src/spdk/dpdk/drivers/net/pcap/meson.build | 9 + src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c | 1588 + .../dpdk/drivers/net/pcap/rte_pmd_pcap_version.map | 3 + src/spdk/dpdk/drivers/net/pfe/Makefile | 31 + src/spdk/dpdk/drivers/net/pfe/base/cbus.h | 66 + src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h | 41 + .../dpdk/drivers/net/pfe/base/cbus/class_csr.h | 277 + .../dpdk/drivers/net/pfe/base/cbus/emac_mtip.h | 231 + src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h | 77 + src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h | 86 + .../dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h | 36 + src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h | 154 + src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h | 47 + src/spdk/dpdk/drivers/net/pfe/base/pfe.h | 422 + src/spdk/dpdk/drivers/net/pfe/meson.build | 18 + src/spdk/dpdk/drivers/net/pfe/pfe_eth.h | 76 + src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c | 1190 + src/spdk/dpdk/drivers/net/pfe/pfe_hal.c | 629 + src/spdk/dpdk/drivers/net/pfe/pfe_hif.c | 868 + src/spdk/dpdk/drivers/net/pfe/pfe_hif.h | 156 + src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c | 576 + src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h | 181 + src/spdk/dpdk/drivers/net/pfe/pfe_logs.h | 31 + src/spdk/dpdk/drivers/net/pfe/pfe_mod.h | 64 + .../dpdk/drivers/net/pfe/rte_pmd_pfe_version.map | 3 + src/spdk/dpdk/drivers/net/qede/Makefile | 108 + src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c | 311 + src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h | 459 + src/spdk/dpdk/drivers/net/qede/base/common_hsi.h | 1700 + src/spdk/dpdk/drivers/net/qede/base/ecore.h | 1073 + .../dpdk/drivers/net/qede/base/ecore_attn_values.h | 13285 +++++++ src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h | 842 + src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c | 2308 ++ src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h | 215 + .../dpdk/drivers/net/qede/base/ecore_cxt_api.h | 38 + src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c | 1607 + src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h | 62 + .../dpdk/drivers/net/qede/base/ecore_dcbx_api.h | 250 + src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c | 6799 ++++ .../dpdk/drivers/net/qede/base/ecore_dev_api.h | 701 + .../drivers/net/qede/base/ecore_gtt_reg_addr.h | 60 + .../dpdk/drivers/net/qede/base/ecore_gtt_values.h | 31 + .../dpdk/drivers/net/qede/base/ecore_hsi_common.h | 2546 ++ .../drivers/net/qede/base/ecore_hsi_debug_tools.h | 1053 + .../dpdk/drivers/net/qede/base/ecore_hsi_eth.h | 2315 ++ .../drivers/net/qede/base/ecore_hsi_init_func.h | 143 + .../drivers/net/qede/base/ecore_hsi_init_tool.h | 443 + src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c | 1111 + src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h | 338 + .../dpdk/drivers/net/qede/base/ecore_hw_defs.h | 58 + .../drivers/net/qede/base/ecore_init_fw_funcs.c | 2198 ++ .../drivers/net/qede/base/ecore_init_fw_funcs.h | 592 + .../dpdk/drivers/net/qede/base/ecore_init_ops.c | 585 + .../dpdk/drivers/net/qede/base/ecore_init_ops.h | 100 + src/spdk/dpdk/drivers/net/qede/base/ecore_int.c | 2773 ++ src/spdk/dpdk/drivers/net/qede/base/ecore_int.h | 261 + .../dpdk/drivers/net/qede/base/ecore_int_api.h | 363 + .../dpdk/drivers/net/qede/base/ecore_iov_api.h | 771 + src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h | 273 + .../dpdk/drivers/net/qede/base/ecore_iro_values.h | 227 + src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c | 2388 ++ src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h | 165 + src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h | 517 + src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c | 4339 +++ src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h | 589 + .../dpdk/drivers/net/qede/base/ecore_mcp_api.h | 1265 + .../dpdk/drivers/net/qede/base/ecore_mng_tlv.c | 1540 + .../dpdk/drivers/net/qede/base/ecore_proto_if.h | 110 + .../dpdk/drivers/net/qede/base/ecore_rt_defs.h | 453 + src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h | 64 + .../dpdk/drivers/net/qede/base/ecore_sp_commands.c | 671 + .../dpdk/drivers/net/qede/base/ecore_sp_commands.h | 166 + src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c | 1088 + src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h | 313 + src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c | 5072 +++ src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h | 297 + src/spdk/dpdk/drivers/net/qede/base/ecore_status.h | 29 + src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h | 35 + src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c | 1980 + src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h | 339 + src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h | 181 + .../dpdk/drivers/net/qede/base/ecore_vfpf_if.h | 744 + src/spdk/dpdk/drivers/net/qede/base/eth_common.h | 746 + src/spdk/dpdk/drivers/net/qede/base/mcp_public.h | 2023 + src/spdk/dpdk/drivers/net/qede/base/meson.build | 57 + src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h | 2872 ++ src/spdk/dpdk/drivers/net/qede/base/reg_addr.h | 1247 + src/spdk/dpdk/drivers/net/qede/meson.build | 12 + src/spdk/dpdk/drivers/net/qede/qede_ethdev.c | 2882 ++ src/spdk/dpdk/drivers/net/qede/qede_ethdev.h | 316 + src/spdk/dpdk/drivers/net/qede/qede_filter.c | 1578 + src/spdk/dpdk/drivers/net/qede/qede_if.h | 198 + src/spdk/dpdk/drivers/net/qede/qede_logs.h | 76 + src/spdk/dpdk/drivers/net/qede/qede_main.c | 793 + src/spdk/dpdk/drivers/net/qede/qede_rxtx.c | 2811 ++ src/spdk/dpdk/drivers/net/qede/qede_rxtx.h | 308 + .../dpdk/drivers/net/qede/rte_pmd_qede_version.map | 3 + src/spdk/dpdk/drivers/net/ring/Makefile | 29 + src/spdk/dpdk/drivers/net/ring/meson.build | 5 + src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c | 711 + src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h | 57 + .../dpdk/drivers/net/ring/rte_pmd_ring_version.map | 8 + src/spdk/dpdk/drivers/net/sfc/Makefile | 133 + src/spdk/dpdk/drivers/net/sfc/base/README | 16 + src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c | 1476 + src/spdk/dpdk/drivers/net/sfc/base/ef10_evb.c | 549 + src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c | 2141 + .../dpdk/drivers/net/sfc/base/ef10_firmware_ids.h | 184 + src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c | 904 + src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h | 1470 + src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c | 169 + src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c | 1042 + src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c | 319 + src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c | 2672 ++ src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c | 2561 ++ src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c | 758 + src/spdk/dpdk/drivers/net/sfc/base/ef10_proxy.c | 470 + src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c | 1229 + .../net/sfc/base/ef10_signed_image_layout.h | 70 + .../dpdk/drivers/net/sfc/base/ef10_tlv_layout.h | 1063 + src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c | 772 + src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c | 437 + src/spdk/dpdk/drivers/net/sfc/base/efx.h | 3559 ++ src/spdk/dpdk/drivers/net/sfc/base/efx_annote.h | 103 + src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c | 1125 + src/spdk/dpdk/drivers/net/sfc/base/efx_check.h | 368 + src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c | 98 + src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c | 1486 + src/spdk/dpdk/drivers/net/sfc/base/efx_evb.c | 544 + src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c | 1638 + src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c | 304 + src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h | 1415 + src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c | 565 + src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c | 1680 + src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c | 965 + src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c | 2425 ++ src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h | 420 + src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c | 850 + src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c | 1154 + src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c | 1105 + src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c | 601 + src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h | 27 + src/spdk/dpdk/drivers/net/sfc/base/efx_port.c | 230 + src/spdk/dpdk/drivers/net/sfc/base/efx_proxy.c | 364 + src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h | 3846 ++ src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h | 727 + src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h | 20757 ++++++++++ .../dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h | 2977 ++ .../dpdk/drivers/net/sfc/base/efx_regs_mcdi_strs.h | 102 + src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h | 2332 ++ src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c | 1720 + src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c | 305 + src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c | 469 + src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c | 1142 + src/spdk/dpdk/drivers/net/sfc/base/efx_types.h | 1634 + src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c | 998 + src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h | 54 + src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c | 204 + src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c | 636 + src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h | 55 + src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h | 39 + src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c | 163 + src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h | 39 + src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c | 161 + src/spdk/dpdk/drivers/net/sfc/base/meson.build | 81 + src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h | 204 + src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h | 446 + src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c | 472 + src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c | 243 + src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c | 807 + src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c | 745 + src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c | 776 + src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c | 154 + src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c | 601 + src/spdk/dpdk/drivers/net/sfc/efsys.h | 736 + src/spdk/dpdk/drivers/net/sfc/meson.build | 58 + .../dpdk/drivers/net/sfc/rte_pmd_sfc_version.map | 3 + src/spdk/dpdk/drivers/net/sfc/sfc.c | 1147 + src/spdk/dpdk/drivers/net/sfc/sfc.h | 417 + src/spdk/dpdk/drivers/net/sfc/sfc_debug.h | 38 + src/spdk/dpdk/drivers/net/sfc/sfc_dp.c | 79 + src/spdk/dpdk/drivers/net/sfc/sfc_dp.h | 106 + src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h | 273 + src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h | 296 + src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h | 131 + src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c | 734 + src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c | 816 + src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h | 175 + src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c | 1147 + src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c | 2306 ++ src/spdk/dpdk/drivers/net/sfc/sfc_ev.c | 932 + src/spdk/dpdk/drivers/net/sfc/sfc_ev.h | 105 + src/spdk/dpdk/drivers/net/sfc/sfc_filter.c | 129 + src/spdk/dpdk/drivers/net/sfc/sfc_filter.h | 48 + src/spdk/dpdk/drivers/net/sfc/sfc_flow.c | 2664 ++ src/spdk/dpdk/drivers/net/sfc/sfc_flow.h | 155 + src/spdk/dpdk/drivers/net/sfc/sfc_intr.c | 354 + src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c | 123 + src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h | 85 + src/spdk/dpdk/drivers/net/sfc/sfc_log.h | 104 + src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c | 311 + src/spdk/dpdk/drivers/net/sfc/sfc_port.c | 622 + src/spdk/dpdk/drivers/net/sfc/sfc_rx.c | 1726 + src/spdk/dpdk/drivers/net/sfc/sfc_rx.h | 154 + src/spdk/dpdk/drivers/net/sfc/sfc_tso.c | 171 + src/spdk/dpdk/drivers/net/sfc/sfc_tso.h | 48 + src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h | 45 + src/spdk/dpdk/drivers/net/sfc/sfc_tx.c | 1160 + src/spdk/dpdk/drivers/net/sfc/sfc_tx.h | 137 + src/spdk/dpdk/drivers/net/softnic/Makefile | 54 + src/spdk/dpdk/drivers/net/softnic/conn.c | 331 + src/spdk/dpdk/drivers/net/softnic/conn.h | 49 + src/spdk/dpdk/drivers/net/softnic/firmware.cli | 21 + src/spdk/dpdk/drivers/net/softnic/meson.build | 24 + src/spdk/dpdk/drivers/net/softnic/parser.c | 703 + src/spdk/dpdk/drivers/net/softnic/parser.h | 68 + .../dpdk/drivers/net/softnic/rte_eth_softnic.c | 718 + .../dpdk/drivers/net/softnic/rte_eth_softnic.h | 86 + .../drivers/net/softnic/rte_eth_softnic_action.c | 422 + .../dpdk/drivers/net/softnic/rte_eth_softnic_cli.c | 6571 ++++ .../net/softnic/rte_eth_softnic_cryptodev.c | 170 + .../drivers/net/softnic/rte_eth_softnic_flow.c | 2288 ++ .../net/softnic/rte_eth_softnic_internals.h | 1127 + .../drivers/net/softnic/rte_eth_softnic_link.c | 101 + .../drivers/net/softnic/rte_eth_softnic_mempool.c | 103 + .../drivers/net/softnic/rte_eth_softnic_meter.c | 749 + .../drivers/net/softnic/rte_eth_softnic_pipeline.c | 1116 + .../dpdk/drivers/net/softnic/rte_eth_softnic_swq.c | 114 + .../dpdk/drivers/net/softnic/rte_eth_softnic_tap.c | 118 + .../drivers/net/softnic/rte_eth_softnic_thread.c | 3063 ++ .../dpdk/drivers/net/softnic/rte_eth_softnic_tm.c | 3463 ++ .../net/softnic/rte_pmd_softnic_version.map | 13 + src/spdk/dpdk/drivers/net/szedata2/Makefile | 30 + src/spdk/dpdk/drivers/net/szedata2/meson.build | 8 + .../dpdk/drivers/net/szedata2/rte_eth_szedata2.c | 1953 + .../dpdk/drivers/net/szedata2/rte_eth_szedata2.h | 90 + .../net/szedata2/rte_pmd_szedata2_version.map | 3 + src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h | 22 + src/spdk/dpdk/drivers/net/tap/Makefile | 97 + src/spdk/dpdk/drivers/net/tap/meson.build | 45 + src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c | 2504 ++ src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h | 105 + .../dpdk/drivers/net/tap/rte_pmd_tap_version.map | 3 + src/spdk/dpdk/drivers/net/tap/tap_bpf.h | 117 + src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c | 190 + src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h | 1696 + src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c | 224 + src/spdk/dpdk/drivers/net/tap/tap_flow.c | 2194 ++ src/spdk/dpdk/drivers/net/tap/tap_flow.h | 68 + src/spdk/dpdk/drivers/net/tap/tap_intr.c | 110 + src/spdk/dpdk/drivers/net/tap/tap_log.h | 10 + src/spdk/dpdk/drivers/net/tap/tap_netlink.c | 411 + src/spdk/dpdk/drivers/net/tap/tap_netlink.h | 42 + src/spdk/dpdk/drivers/net/tap/tap_rss.h | 40 + src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c | 296 + src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h | 37 + src/spdk/dpdk/drivers/net/thunderx/Makefile | 41 + .../dpdk/drivers/net/thunderx/base/meson.build | 16 + .../dpdk/drivers/net/thunderx/base/nicvf_bsvf.c | 44 + .../dpdk/drivers/net/thunderx/base/nicvf_bsvf.h | 48 + src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c | 918 + src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h | 218 + .../dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h | 1200 + .../dpdk/drivers/net/thunderx/base/nicvf_mbox.c | 442 + .../dpdk/drivers/net/thunderx/base/nicvf_mbox.h | 226 + .../dpdk/drivers/net/thunderx/base/nicvf_plat.h | 84 + src/spdk/dpdk/drivers/net/thunderx/meson.build | 20 + src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c | 2330 ++ src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h | 134 + src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h | 44 + src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c | 676 + src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h | 114 + src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h | 116 + src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c | 50 + src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h | 38 + .../net/thunderx/rte_pmd_thunderx_version.map | 3 + src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile | 30 + src/spdk/dpdk/drivers/net/vdev_netvsc/meson.build | 19 + .../vdev_netvsc/rte_pmd_vdev_netvsc_version.map | 3 + .../dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c | 824 + src/spdk/dpdk/drivers/net/vhost/Makefile | 31 + src/spdk/dpdk/drivers/net/vhost/meson.build | 8 + src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c | 1579 + src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h | 59 + .../drivers/net/vhost/rte_pmd_vhost_version.map | 8 + src/spdk/dpdk/drivers/net/virtio/Makefile | 82 + src/spdk/dpdk/drivers/net/virtio/meson.build | 47 + .../drivers/net/virtio/rte_pmd_virtio_version.map | 3 + src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c | 2671 ++ src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h | 123 + src/spdk/dpdk/drivers/net/virtio/virtio_logs.h | 36 + src/spdk/dpdk/drivers/net/virtio/virtio_pci.c | 770 + src/spdk/dpdk/drivers/net/virtio/virtio_pci.h | 380 + src/spdk/dpdk/drivers/net/virtio/virtio_ring.h | 188 + src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c | 2039 + src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h | 65 + .../drivers/net/virtio/virtio_rxtx_packed_avx.c | 617 + .../dpdk/drivers/net/virtio/virtio_rxtx_simple.c | 57 + .../dpdk/drivers/net/virtio/virtio_rxtx_simple.h | 58 + .../net/virtio/virtio_rxtx_simple_altivec.c | 208 + .../drivers/net/virtio/virtio_rxtx_simple_neon.c | 213 + .../drivers/net/virtio/virtio_rxtx_simple_sse.c | 198 + .../dpdk/drivers/net/virtio/virtio_user/vhost.h | 94 + .../drivers/net/virtio/virtio_user/vhost_kernel.c | 390 + .../net/virtio/virtio_user/vhost_kernel_tap.c | 163 + .../net/virtio/virtio_user/vhost_kernel_tap.h | 47 + .../drivers/net/virtio/virtio_user/vhost_user.c | 480 + .../net/virtio/virtio_user/virtio_user_dev.c | 748 + .../net/virtio/virtio_user/virtio_user_dev.h | 71 + .../dpdk/drivers/net/virtio/virtio_user_ethdev.c | 817 + src/spdk/dpdk/drivers/net/virtio/virtqueue.c | 217 + src/spdk/dpdk/drivers/net/virtio/virtqueue.h | 907 + src/spdk/dpdk/drivers/net/vmxnet3/Makefile | 53 + src/spdk/dpdk/drivers/net/vmxnet3/base/README | 19 + src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h | 94 + .../drivers/net/vmxnet3/base/vmware_pack_begin.h | 3 + .../drivers/net/vmxnet3/base/vmware_pack_end.h | 3 + .../dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h | 833 + .../dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h | 19 + src/spdk/dpdk/drivers/net/vmxnet3/meson.build | 17 + .../net/vmxnet3/rte_pmd_vmxnet3_version.map | 3 + src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c | 1476 + src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h | 196 + src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h | 40 + src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h | 156 + src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c | 1402 + 1272 files changed, 1001053 insertions(+) create mode 100644 src/spdk/dpdk/drivers/net/Makefile create mode 100644 src/spdk/dpdk/drivers/net/af_packet/Makefile create mode 100644 src/spdk/dpdk/drivers/net/af_packet/meson.build create mode 100644 src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c create mode 100644 src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map create mode 100644 src/spdk/dpdk/drivers/net/af_xdp/Makefile create mode 100644 src/spdk/dpdk/drivers/net/af_xdp/af_xdp_deps.h create mode 100644 src/spdk/dpdk/drivers/net/af_xdp/meson.build create mode 100644 src/spdk/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c create mode 100644 src/spdk/dpdk/drivers/net/af_xdp/rte_pmd_af_xdp_version.map create mode 100644 src/spdk/dpdk/drivers/net/ark/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ddm.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ddm.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_ext.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_global.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_mpu.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_mpu.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktdir.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktdir.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktgen.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_pktgen.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_rqp.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_rqp.h create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_udm.c create mode 100644 src/spdk/dpdk/drivers/net/ark/ark_udm.h create mode 100644 src/spdk/dpdk/drivers/net/ark/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map create mode 100644 src/spdk/dpdk/drivers/net/atlantic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_common.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_logs.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/atl_types.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.c create mode 100644 src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.h create mode 100644 src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic_version.map create mode 100644 src/spdk/dpdk/drivers/net/avp/Makefile create mode 100644 src/spdk/dpdk/drivers/net/avp/avp_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/avp/avp_logs.h create mode 100644 src/spdk/dpdk/drivers/net/avp/meson.build create mode 100644 src/spdk/dpdk/drivers/net/avp/rte_avp_common.h create mode 100644 src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h create mode 100644 src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map create mode 100644 src/spdk/dpdk/drivers/net/axgbe/Makefile create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_regs.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/axgbe/meson.build create mode 100644 src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/Makefile create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_osal.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_hsi.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/elink.c create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/elink.h create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/meson.build create mode 100644 src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map create mode 100644 src/spdk/dpdk/drivers/net/bnxt/Makefile create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/meson.build create mode 100644 src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/hwrm_tf.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/lookup3.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg_common.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_project.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_resources.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_session.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_struct.h create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.c create mode 100644 src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/Makefile create mode 100644 src/spdk/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/eth_bond_private.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/meson.build create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c create mode 100644 src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/Makefile create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/common.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/l2t.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/l2t.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/meson.build create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/sge.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/smt.c create mode 100644 src/spdk/dpdk/drivers/net/cxgbe/smt.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa/Makefile create mode 100644 src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa/meson.build create mode 100644 src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/Makefile create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_flow.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_mux.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ptp.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/dpdmux.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/dprtc.c create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/meson.build create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h create mode 100644 src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map create mode 100644 src/spdk/dpdk/drivers/net/e1000/Makefile create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/README create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/e1000_logs.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/e1000_logs.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/em_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/em_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/igb_flow.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/igb_pf.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/igb_regs.h create mode 100644 src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/e1000/meson.build create mode 100644 src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map create mode 100644 src/spdk/dpdk/drivers/net/ena/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_com.c create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_com.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_plat.h create mode 100644 src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h create mode 100644 src/spdk/dpdk/drivers/net/ena/ena_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ena/ena_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ena/ena_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ena/ena_platform.h create mode 100644 src/spdk/dpdk/drivers/net/ena/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map create mode 100644 src/spdk/dpdk/drivers/net/enetc/Makefile create mode 100644 src/spdk/dpdk/drivers/net/enetc/base/enetc_hw.h create mode 100644 src/spdk/dpdk/drivers/net/enetc/enetc.h create mode 100644 src/spdk/dpdk/drivers/net/enetc/enetc_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/enetc/enetc_logs.h create mode 100644 src/spdk/dpdk/drivers/net/enetc/enetc_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/enetc/meson.build create mode 100644 src/spdk/dpdk/drivers/net/enetc/rte_pmd_enetc_version.map create mode 100644 src/spdk/dpdk/drivers/net/enic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/enic/base/cq_desc.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_flowman.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c create mode 100644 src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h create mode 100644 src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h create mode 100644 src/spdk/dpdk/drivers/net/enic/enic.h create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_clsf.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_compat.h create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_flow.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_fm_flow.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_main.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_res.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_res.h create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h create mode 100644 src/spdk/dpdk/drivers/net/enic/enic_rxtx_vec_avx2.c create mode 100644 src/spdk/dpdk/drivers/net/enic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map create mode 100644 src/spdk/dpdk/drivers/net/failsafe/Makefile create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h create mode 100644 src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/failsafe/meson.build create mode 100644 src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map create mode 100644 src/spdk/dpdk/drivers/net/fm10k/Makefile create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/fm10k/fm10k.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h create mode 100644 src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c create mode 100644 src/spdk/dpdk/drivers/net/fm10k/meson.build create mode 100644 src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map create mode 100644 src/spdk/dpdk/drivers/net/hinic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c create mode 100644 src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h create mode 100644 src/spdk/dpdk/drivers/net/hinic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map create mode 100644 src/spdk/dpdk/drivers/net/hns3/Makefile create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_cmd.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_dcb.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_dcb.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_ethdev_vf.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_fdir.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_fdir.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_flow.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_intr.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_intr.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_logs.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_mbx.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_mbx.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_mp.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_mp.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_regs.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_regs.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_rss.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_rss.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_stats.c create mode 100644 src/spdk/dpdk/drivers/net/hns3/hns3_stats.h create mode 100644 src/spdk/dpdk/drivers/net/hns3/meson.build create mode 100644 src/spdk/dpdk/drivers/net/hns3/rte_pmd_hns3_version.map create mode 100644 src/spdk/dpdk/drivers/net/i40e/Makefile create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/README create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_flow.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_logs.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_pf.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_pf.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_regs.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_tm.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/meson.build create mode 100644 src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c create mode 100644 src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h create mode 100644 src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map create mode 100644 src/spdk/dpdk/drivers/net/iavf/Makefile create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf.h create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_hash.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_log.h create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c create mode 100644 src/spdk/dpdk/drivers/net/iavf/meson.build create mode 100644 src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map create mode 100644 src/spdk/dpdk/drivers/net/ice/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ice/base/README create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_acl.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_acl.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_acl_ctrl.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_adminq_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_alloc.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_bitops.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_common.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_common.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_controlq.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_dcb.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_dcb.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_devids.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_fdir.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_fdir.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_flex_type.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_flow.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_flow.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_hw_autogen.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_nvm.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_nvm.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_protocol_type.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_sbq_cmd.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_sched.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_sched.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_status.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_switch.c create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_switch.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/ice_type.h create mode 100644 src/spdk/dpdk/drivers/net/ice/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_dcf.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_dcf.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_dcf_parent.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_generic_flow.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_generic_flow.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_hash.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_common.h create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/ice/ice_switch_filter.c create mode 100644 src/spdk/dpdk/drivers/net/ice/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ice/rte_pmd_ice.h create mode 100644 src/spdk/dpdk/drivers/net/ice/rte_pmd_ice_version.map create mode 100644 src/spdk/dpdk/drivers/net/igc/Makefile create mode 100644 src/spdk/dpdk/drivers/net/igc/base/README create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_82571.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_82575.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_api.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_api.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_base.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_base.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_defines.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_hw.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_i225.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_i225.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_ich8lan.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_mac.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_mac.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_manage.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_manage.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_nvm.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_nvm.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_osdep.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_phy.c create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_phy.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/igc_regs.h create mode 100644 src/spdk/dpdk/drivers/net/igc/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_filter.c create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_filter.h create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_flow.c create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_flow.h create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_logs.c create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_logs.h create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_txrx.c create mode 100644 src/spdk/dpdk/drivers/net/igc/igc_txrx.h create mode 100644 src/spdk/dpdk/drivers/net/igc/meson.build create mode 100644 src/spdk/dpdk/drivers/net/igc/rte_pmd_igc_version.map create mode 100644 src/spdk/dpdk/drivers/net/ionic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_dev.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_dev.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_if.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_lif.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_lif.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_main.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_regs.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/ionic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/README create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h create mode 100644 src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map create mode 100644 src/spdk/dpdk/drivers/net/kni/Makefile create mode 100644 src/spdk/dpdk/drivers/net/kni/meson.build create mode 100644 src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c create mode 100644 src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map create mode 100644 src/spdk/dpdk/drivers/net/liquidio/Makefile create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c create mode 100644 src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_logs.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/lio_struct.h create mode 100644 src/spdk/dpdk/drivers/net/liquidio/meson.build create mode 100644 src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map create mode 100644 src/spdk/dpdk/drivers/net/memif/Makefile create mode 100644 src/spdk/dpdk/drivers/net/memif/memif.h create mode 100644 src/spdk/dpdk/drivers/net/memif/memif_socket.c create mode 100644 src/spdk/dpdk/drivers/net/memif/memif_socket.h create mode 100644 src/spdk/dpdk/drivers/net/memif/meson.build create mode 100644 src/spdk/dpdk/drivers/net/memif/rte_eth_memif.c create mode 100644 src/spdk/dpdk/drivers/net/memif/rte_eth_memif.h create mode 100644 src/spdk/dpdk/drivers/net/memif/rte_pmd_memif_version.map create mode 100644 src/spdk/dpdk/drivers/net/meson.build create mode 100644 src/spdk/dpdk/drivers/net/mlx4/Makefile create mode 100644 src/spdk/dpdk/drivers/net/mlx4/meson.build create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_mp.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c create mode 100644 src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h create mode 100644 src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map create mode 100644 src/spdk/dpdk/drivers/net/mlx5/Makefile create mode 100644 src/spdk/dpdk/drivers/net/mlx5/meson.build create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_dv.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_meter.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_mp.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c create mode 100644 src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5.h create mode 100644 src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map create mode 100644 src/spdk/dpdk/drivers/net/mvneta/Makefile create mode 100644 src/spdk/dpdk/drivers/net/mvneta/meson.build create mode 100644 src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/mvneta/rte_pmd_mvneta_version.map create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/Makefile create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/meson.build create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.h create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.c create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.h create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.c create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.h create mode 100644 src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map create mode 100644 src/spdk/dpdk/drivers/net/netvsc/Makefile create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_logs.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_var.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/hn_vf.c create mode 100644 src/spdk/dpdk/drivers/net/netvsc/meson.build create mode 100644 src/spdk/dpdk/drivers/net/netvsc/ndis.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/rndis.h create mode 100644 src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map create mode 100644 src/spdk/dpdk/drivers/net/nfb/Makefile create mode 100644 src/spdk/dpdk/drivers/net/nfb/meson.build create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb.h create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_rx.c create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_rx.h create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.c create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.h create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_stats.c create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_stats.h create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_tx.c create mode 100644 src/spdk/dpdk/drivers/net/nfb/nfb_tx.h create mode 100644 src/spdk/dpdk/drivers/net/nfb/rte_pmd_nfb_version.map create mode 100644 src/spdk/dpdk/drivers/net/nfp/Makefile create mode 100644 src/spdk/dpdk/drivers/net/nfp/meson.build create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfp_net.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h create mode 100644 src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map create mode 100644 src/spdk/dpdk/drivers/net/null/Makefile create mode 100644 src/spdk/dpdk/drivers/net/null/meson.build create mode 100644 src/spdk/dpdk/drivers/net/null/rte_eth_null.c create mode 100644 src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map create mode 100644 src/spdk/dpdk/drivers/net/octeontx/Makefile create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/meson.build create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/Makefile create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/meson.build create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c create mode 100644 src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map create mode 100644 src/spdk/dpdk/drivers/net/pcap/Makefile create mode 100644 src/spdk/dpdk/drivers/net/pcap/meson.build create mode 100644 src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c create mode 100644 src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map create mode 100644 src/spdk/dpdk/drivers/net/pfe/Makefile create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/base/pfe.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/meson.build create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_eth.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_hal.c create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_hif.c create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_hif.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_logs.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/pfe_mod.h create mode 100644 src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map create mode 100644 src/spdk/dpdk/drivers/net/qede/Makefile create mode 100644 src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/common_hsi.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_int.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_int.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_status.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/eth_common.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/mcp_public.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h create mode 100644 src/spdk/dpdk/drivers/net/qede/base/reg_addr.h create mode 100644 src/spdk/dpdk/drivers/net/qede/meson.build create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_filter.c create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_if.h create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_logs.h create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_main.c create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/qede/qede_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map create mode 100644 src/spdk/dpdk/drivers/net/ring/Makefile create mode 100644 src/spdk/dpdk/drivers/net/ring/meson.build create mode 100644 src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c create mode 100644 src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h create mode 100644 src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map create mode 100644 src/spdk/dpdk/drivers/net/sfc/Makefile create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/README create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_evb.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_firmware_ids.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_proxy.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_annote.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_check.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_evb.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_port.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_proxy.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_strs.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_types.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/efsys.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/meson.build create mode 100644 src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_debug.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_dp.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_dp.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ev.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_ev.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_filter.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_filter.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_flow.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_flow.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_intr.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_log.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_port.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_rx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_rx.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_tso.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_tso.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_tx.c create mode 100644 src/spdk/dpdk/drivers/net/sfc/sfc_tx.h create mode 100644 src/spdk/dpdk/drivers/net/softnic/Makefile create mode 100644 src/spdk/dpdk/drivers/net/softnic/conn.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/conn.h create mode 100644 src/spdk/dpdk/drivers/net/softnic/firmware.cli create mode 100644 src/spdk/dpdk/drivers/net/softnic/meson.build create mode 100644 src/spdk/dpdk/drivers/net/softnic/parser.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/parser.h create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cryptodev.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c create mode 100644 src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map create mode 100644 src/spdk/dpdk/drivers/net/szedata2/Makefile create mode 100644 src/spdk/dpdk/drivers/net/szedata2/meson.build create mode 100644 src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.c create mode 100644 src/spdk/dpdk/drivers/net/szedata2/rte_eth_szedata2.h create mode 100644 src/spdk/dpdk/drivers/net/szedata2/rte_pmd_szedata2_version.map create mode 100644 src/spdk/dpdk/drivers/net/szedata2/szedata2_logs.h create mode 100644 src/spdk/dpdk/drivers/net/tap/Makefile create mode 100644 src/spdk/dpdk/drivers/net/tap/meson.build create mode 100644 src/spdk/dpdk/drivers/net/tap/rte_eth_tap.c create mode 100644 src/spdk/dpdk/drivers/net/tap/rte_eth_tap.h create mode 100644 src/spdk/dpdk/drivers/net/tap/rte_pmd_tap_version.map create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_bpf.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_bpf_api.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_bpf_insns.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_bpf_program.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_flow.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_flow.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_intr.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_log.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_netlink.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_netlink.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_rss.h create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.c create mode 100644 src/spdk/dpdk/drivers/net/tap/tap_tcmsgs.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/Makefile create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/meson.build create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_bsvf.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_hw_defs.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_mbox.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/base/nicvf_plat.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/meson.build create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_logs.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_struct.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.c create mode 100644 src/spdk/dpdk/drivers/net/thunderx/nicvf_svf.h create mode 100644 src/spdk/dpdk/drivers/net/thunderx/rte_pmd_thunderx_version.map create mode 100644 src/spdk/dpdk/drivers/net/vdev_netvsc/Makefile create mode 100644 src/spdk/dpdk/drivers/net/vdev_netvsc/meson.build create mode 100644 src/spdk/dpdk/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map create mode 100644 src/spdk/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c create mode 100644 src/spdk/dpdk/drivers/net/vhost/Makefile create mode 100644 src/spdk/dpdk/drivers/net/vhost/meson.build create mode 100644 src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.c create mode 100644 src/spdk/dpdk/drivers/net/vhost/rte_eth_vhost.h create mode 100644 src/spdk/dpdk/drivers/net/vhost/rte_pmd_vhost_version.map create mode 100644 src/spdk/dpdk/drivers/net/virtio/Makefile create mode 100644 src/spdk/dpdk/drivers/net/virtio/meson.build create mode 100644 src/spdk/dpdk/drivers/net/virtio/rte_pmd_virtio_version.map create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_logs.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_pci.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_pci.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_ring.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_packed_avx.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_altivec.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_neon.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_kernel_tap.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/vhost_user.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.h create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtio_user_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtqueue.c create mode 100644 src/spdk/dpdk/drivers/net/virtio/virtqueue.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/Makefile create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/README create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/upt1_defs.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_begin.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/vmware_pack_end.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/base/vmxnet3_osdep.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/meson.build create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h create mode 100644 src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c (limited to 'src/spdk/dpdk/drivers/net') diff --git a/src/spdk/dpdk/drivers/net/Makefile b/src/spdk/dpdk/drivers/net/Makefile new file mode 100644 index 000000000..361974eac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/Makefile @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# set in mk/toolchain/xxx/rte.toolchain-compat.mk +ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d) + $(warning thunderx pmd is not supported by old compilers) +endif + +DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet +DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_XDP) += af_xdp +DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark +DIRS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atlantic +DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp +DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe +DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x +DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding +DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) +DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa +endif +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) +DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2 +endif +DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000 +DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena +DIRS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc +DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic +DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe +DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k +DIRS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic +DIRS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3 +DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e +DIRS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf +DIRS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice +DIRS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc +DIRS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic +DIRS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke +DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe +DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio +DIRS-$(CONFIG_RTE_LIBRTE_PMD_MEMIF) += memif +DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4 +DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5 +DIRS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta +DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2 +DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc +DIRS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb +DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp +DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt +DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null +DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx +DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_PMD) += octeontx2 +DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap +DIRS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe +DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede +DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring +DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc +DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2 +DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap +DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx +DIRS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc +DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio +DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3 + +ifeq ($(CONFIG_RTE_LIBRTE_KNI),y) +DIRS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += kni +endif + +ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y) +DIRS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += softnic +endif # $(CONFIG_RTE_LIBRTE_SCHED) + +ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) +DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost +endif # $(CONFIG_RTE_LIBRTE_VHOST) + +ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y) +ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n) +$(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!") +endif +endif + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/src/spdk/dpdk/drivers/net/af_packet/Makefile b/src/spdk/dpdk/drivers/net/af_packet/Makefile new file mode 100644 index 000000000..91dbf0a69 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_packet/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014 John W. Linville +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_af_packet.a + +EXPORT_MAP := rte_pmd_af_packet_version.map + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/af_packet/meson.build b/src/spdk/dpdk/drivers/net/af_packet/meson.build new file mode 100644 index 000000000..a7f392ea1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_packet/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +if not is_linux + build = false + reason = 'only supported on linux' +endif +sources = files('rte_eth_af_packet.c') diff --git a/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c new file mode 100644 index 000000000..22feb72e3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c @@ -0,0 +1,1090 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014 John W. Linville + * Originally based upon librte_pmd_pcap code: + * Copyright(c) 2010-2015 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ETH_AF_PACKET_IFACE_ARG "iface" +#define ETH_AF_PACKET_NUM_Q_ARG "qpairs" +#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz" +#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz" +#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt" +#define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass" + +#define DFLT_FRAME_SIZE (1 << 11) +#define DFLT_FRAME_COUNT (1 << 9) + +struct pkt_rx_queue { + int sockfd; + + struct iovec *rd; + uint8_t *map; + unsigned int framecount; + unsigned int framenum; + + struct rte_mempool *mb_pool; + uint16_t in_port; + + volatile unsigned long rx_pkts; + volatile unsigned long rx_bytes; +}; + +struct pkt_tx_queue { + int sockfd; + unsigned int frame_data_size; + + struct iovec *rd; + uint8_t *map; + unsigned int framecount; + unsigned int framenum; + + volatile unsigned long tx_pkts; + volatile unsigned long err_pkts; + volatile unsigned long tx_bytes; +}; + +struct pmd_internals { + unsigned nb_queues; + + int if_index; + char *if_name; + struct rte_ether_addr eth_addr; + + struct tpacket_req req; + + struct pkt_rx_queue *rx_queue; + struct pkt_tx_queue *tx_queue; +}; + +static const char *valid_arguments[] = { + ETH_AF_PACKET_IFACE_ARG, + ETH_AF_PACKET_NUM_Q_ARG, + ETH_AF_PACKET_BLOCKSIZE_ARG, + ETH_AF_PACKET_FRAMESIZE_ARG, + ETH_AF_PACKET_FRAMECOUNT_ARG, + ETH_AF_PACKET_QDISC_BYPASS_ARG, + NULL +}; + +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_FIXED, +}; + +static int af_packet_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, af_packet_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_LOG_ERRNO(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, af_packet_logtype, \ + "%s(): " fmt ":%s\n", __func__, ##args, strerror(errno)) + +static uint16_t +eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned i; + struct tpacket2_hdr *ppd; + struct rte_mbuf *mbuf; + uint8_t *pbuf; + struct pkt_rx_queue *pkt_q = queue; + uint16_t num_rx = 0; + unsigned long num_rx_bytes = 0; + unsigned int framecount, framenum; + + if (unlikely(nb_pkts == 0)) + return 0; + + /* + * Reads the given number of packets from the AF_PACKET socket one by + * one and copies the packet data into a newly allocated mbuf. + */ + framecount = pkt_q->framecount; + framenum = pkt_q->framenum; + for (i = 0; i < nb_pkts; i++) { + /* point at the next incoming frame */ + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + if ((ppd->tp_status & TP_STATUS_USER) == 0) + break; + + /* allocate the next mbuf */ + mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool); + if (unlikely(mbuf == NULL)) + break; + + /* packet will fit in the mbuf, go ahead and receive it */ + rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen; + pbuf = (uint8_t *) ppd + ppd->tp_mac; + memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf)); + + /* check for vlan info */ + if (ppd->tp_status & TP_STATUS_VLAN_VALID) { + mbuf->vlan_tci = ppd->tp_vlan_tci; + mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + } + + /* release incoming frame and advance ring buffer */ + ppd->tp_status = TP_STATUS_KERNEL; + if (++framenum >= framecount) + framenum = 0; + mbuf->port = pkt_q->in_port; + + /* account for the receive frame */ + bufs[i] = mbuf; + num_rx++; + num_rx_bytes += mbuf->pkt_len; + } + pkt_q->framenum = framenum; + pkt_q->rx_pkts += num_rx; + pkt_q->rx_bytes += num_rx_bytes; + return num_rx; +} + +/* + * Callback to handle sending packets through a real NIC. + */ +static uint16_t +eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct tpacket2_hdr *ppd; + struct rte_mbuf *mbuf; + uint8_t *pbuf; + unsigned int framecount, framenum; + struct pollfd pfd; + struct pkt_tx_queue *pkt_q = queue; + uint16_t num_tx = 0; + unsigned long num_tx_bytes = 0; + int i; + + if (unlikely(nb_pkts == 0)) + return 0; + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = pkt_q->sockfd; + pfd.events = POLLOUT; + pfd.revents = 0; + + framecount = pkt_q->framecount; + framenum = pkt_q->framenum; + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + for (i = 0; i < nb_pkts; i++) { + mbuf = *bufs++; + + /* drop oversized packets */ + if (mbuf->pkt_len > pkt_q->frame_data_size) { + rte_pktmbuf_free(mbuf); + continue; + } + + /* insert vlan info if necessary */ + if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { + if (rte_vlan_insert(&mbuf)) { + rte_pktmbuf_free(mbuf); + continue; + } + } + + /* point at the next incoming frame */ + if ((ppd->tp_status != TP_STATUS_AVAILABLE) && + (poll(&pfd, 1, -1) < 0)) + break; + + /* copy the tx frame data */ + pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN - + sizeof(struct sockaddr_ll); + + struct rte_mbuf *tmp_mbuf = mbuf; + while (tmp_mbuf) { + uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf); + memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len); + pbuf += data_len; + tmp_mbuf = tmp_mbuf->next; + } + + ppd->tp_len = mbuf->pkt_len; + ppd->tp_snaplen = mbuf->pkt_len; + + /* release incoming frame and advance ring buffer */ + ppd->tp_status = TP_STATUS_SEND_REQUEST; + if (++framenum >= framecount) + framenum = 0; + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + + num_tx++; + num_tx_bytes += mbuf->pkt_len; + rte_pktmbuf_free(mbuf); + } + + /* kick-off transmits */ + if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1 && + errno != ENOBUFS && errno != EAGAIN) { + /* + * In case of a ENOBUFS/EAGAIN error all of the enqueued + * packets will be considered successful even though only some + * are sent. + */ + + num_tx = 0; + num_tx_bytes = 0; + } + + pkt_q->framenum = framenum; + pkt_q->tx_pkts += num_tx; + pkt_q->err_pkts += i - num_tx; + pkt_q->tx_bytes += num_tx_bytes; + return i; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +/* + * This function gets called when the current port gets stopped. + */ +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + unsigned i; + int sockfd; + struct pmd_internals *internals = dev->data->dev_private; + + for (i = 0; i < internals->nb_queues; i++) { + sockfd = internals->rx_queue[i].sockfd; + if (sockfd != -1) + close(sockfd); + + /* Prevent use after free in case tx fd == rx fd */ + if (sockfd != internals->tx_queue[i].sockfd) { + sockfd = internals->tx_queue[i].sockfd; + if (sockfd != -1) + close(sockfd); + } + + internals->rx_queue[i].sockfd = -1; + internals->tx_queue[i].sockfd = -1; + } + + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev_info->if_index = internals->if_index; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN; + dev_info->max_rx_queues = (uint16_t)internals->nb_queues; + dev_info->max_tx_queues = (uint16_t)internals->nb_queues; + dev_info->min_rx_bufsize = 0; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT; + + return 0; +} + +static int +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) +{ + unsigned i, imax; + unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0; + unsigned long rx_bytes_total = 0, tx_bytes_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? + internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < imax; i++) { + igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts; + igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes; + rx_total += igb_stats->q_ipackets[i]; + rx_bytes_total += igb_stats->q_ibytes[i]; + } + + imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? + internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < imax; i++) { + igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts; + igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes; + tx_total += igb_stats->q_opackets[i]; + tx_err_total += internal->tx_queue[i].err_pkts; + tx_bytes_total += igb_stats->q_obytes[i]; + } + + igb_stats->ipackets = rx_total; + igb_stats->ibytes = rx_bytes_total; + igb_stats->opackets = tx_total; + igb_stats->oerrors = tx_err_total; + igb_stats->obytes = tx_bytes_total; + return 0; +} + +static int +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < internal->nb_queues; i++) { + internal->rx_queue[i].rx_pkts = 0; + internal->rx_queue[i].rx_bytes = 0; + } + + for (i = 0; i < internal->nb_queues; i++) { + internal->tx_queue[i].tx_pkts = 0; + internal->tx_queue[i].err_pkts = 0; + internal->tx_queue[i].tx_bytes = 0; + } + + return 0; +} + +static void +eth_dev_close(struct rte_eth_dev *dev __rte_unused) +{ +} + +static void +eth_queue_release(void *q __rte_unused) +{ +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id]; + unsigned int buf_size, data_size; + + pkt_q->mb_pool = mb_pool; + + /* Now get the space available for data in the mbuf */ + buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) - + RTE_PKTMBUF_HEADROOM; + data_size = internals->req.tp_frame_size; + data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); + + if (data_size > buf_size) { + PMD_LOG(ERR, + "%s: %d bytes will not fit in mbuf (%d bytes)", + dev->device->name, data_size, buf_size); + return -ENOMEM; + } + + dev->data->rx_queues[rx_queue_id] = pkt_q; + pkt_q->in_port = dev->data->port_id; + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + + struct pmd_internals *internals = dev->data->dev_private; + + dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; + return 0; +} + +static int +eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct ifreq ifr = { .ifr_mtu = mtu }; + int ret; + int s; + unsigned int data_size = internals->req.tp_frame_size - + TPACKET2_HDRLEN; + + if (mtu > data_size) + return -EINVAL; + + s = socket(PF_INET, SOCK_DGRAM, 0); + if (s < 0) + return -EINVAL; + + strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ); + ret = ioctl(s, SIOCSIFMTU, &ifr); + close(s); + + if (ret < 0) + return -EINVAL; + + return 0; +} + +static int +eth_dev_macaddr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct ifreq ifr = { }; + int sockfd = internals->rx_queue[0].sockfd; + int ret; + + if (sockfd == -1) { + PMD_LOG(ERR, "receive socket not found"); + return -EINVAL; + } + + strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ); + ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER; + memcpy(ifr.ifr_hwaddr.sa_data, addr, sizeof(*addr)); + ret = ioctl(sockfd, SIOCSIFHWADDR, &ifr); + + if (ret < 0) { + PMD_LOG_ERRNO(ERR, "ioctl(SIOCSIFHWADDR) failed"); + return -EINVAL; + } + + return 0; +} + +static int +eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask) +{ + struct ifreq ifr; + int ret = 0; + int s; + + s = socket(PF_INET, SOCK_DGRAM, 0); + if (s < 0) + return -errno; + + strlcpy(ifr.ifr_name, if_name, IFNAMSIZ); + if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) { + ret = -errno; + goto out; + } + ifr.ifr_flags &= mask; + ifr.ifr_flags |= flags; + if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) { + ret = -errno; + goto out; + } +out: + close(s); + return ret; +} + +static int +eth_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + + return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0); +} + +static int +eth_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + + return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC); +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .mac_addr_set = eth_dev_macaddr_set, + .mtu_set = eth_dev_mtu_set, + .promiscuous_enable = eth_dev_promiscuous_enable, + .promiscuous_disable = eth_dev_promiscuous_disable, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +/* + * Opens an AF_PACKET socket + */ +static int +open_packet_iface(const char *key __rte_unused, + const char *value __rte_unused, + void *extra_args) +{ + int *sockfd = extra_args; + + /* Open an AF_PACKET socket... */ + *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (*sockfd == -1) { + PMD_LOG(ERR, "Could not open AF_PACKET socket"); + return -1; + } + + return 0; +} + +static int +rte_pmd_init_internals(struct rte_vdev_device *dev, + const int sockfd, + const unsigned nb_queues, + unsigned int blocksize, + unsigned int blockcnt, + unsigned int framesize, + unsigned int framecnt, + unsigned int qdisc_bypass, + struct pmd_internals **internals, + struct rte_eth_dev **eth_dev, + struct rte_kvargs *kvlist) +{ + const char *name = rte_vdev_device_name(dev); + const unsigned int numa_node = dev->device.numa_node; + struct rte_eth_dev_data *data = NULL; + struct rte_kvargs_pair *pair = NULL; + struct ifreq ifr; + size_t ifnamelen; + unsigned k_idx; + struct sockaddr_ll sockaddr; + struct tpacket_req *req; + struct pkt_rx_queue *rx_queue; + struct pkt_tx_queue *tx_queue; + int rc, tpver, discard; + int qsockfd = -1; + unsigned int i, q, rdsize; +#if defined(PACKET_FANOUT) + int fanout_arg; +#endif + + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL) + break; + } + if (pair == NULL) { + PMD_LOG(ERR, + "%s: no interface specified for AF_PACKET ethdev", + name); + return -1; + } + + PMD_LOG(INFO, + "%s: creating AF_PACKET-backed ethdev on numa socket %u", + name, numa_node); + + *internals = rte_zmalloc_socket(name, sizeof(**internals), + 0, numa_node); + if (*internals == NULL) + return -1; + + + (*internals)->rx_queue = rte_calloc_socket("af_packet_rx", + nb_queues, + sizeof(struct pkt_rx_queue), + 0, numa_node); + (*internals)->tx_queue = rte_calloc_socket("af_packet_tx", + nb_queues, + sizeof(struct pkt_tx_queue), + 0, numa_node); + if (!(*internals)->rx_queue || !(*internals)->tx_queue) { + rte_free((*internals)->rx_queue); + rte_free((*internals)->tx_queue); + return -1; + } + + for (q = 0; q < nb_queues; q++) { + (*internals)->rx_queue[q].map = MAP_FAILED; + (*internals)->tx_queue[q].map = MAP_FAILED; + } + + req = &((*internals)->req); + + req->tp_block_size = blocksize; + req->tp_block_nr = blockcnt; + req->tp_frame_size = framesize; + req->tp_frame_nr = framecnt; + + ifnamelen = strlen(pair->value); + if (ifnamelen < sizeof(ifr.ifr_name)) { + memcpy(ifr.ifr_name, pair->value, ifnamelen); + ifr.ifr_name[ifnamelen] = '\0'; + } else { + PMD_LOG(ERR, + "%s: I/F name too long (%s)", + name, pair->value); + return -1; + } + if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) { + PMD_LOG_ERRNO(ERR, "%s: ioctl failed (SIOCGIFINDEX)", name); + return -1; + } + (*internals)->if_name = strdup(pair->value); + if ((*internals)->if_name == NULL) + return -1; + (*internals)->if_index = ifr.ifr_ifindex; + + if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) { + PMD_LOG_ERRNO(ERR, "%s: ioctl failed (SIOCGIFHWADDR)", name); + return -1; + } + memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sll_family = AF_PACKET; + sockaddr.sll_protocol = htons(ETH_P_ALL); + sockaddr.sll_ifindex = (*internals)->if_index; + +#if defined(PACKET_FANOUT) + fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff; + fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16; +#if defined(PACKET_FANOUT_FLAG_ROLLOVER) + fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16; +#endif +#endif + + for (q = 0; q < nb_queues; q++) { + /* Open an AF_PACKET socket for this queue... */ + qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (qsockfd == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not open AF_PACKET socket", + name); + return -1; + } + + tpver = TPACKET_V2; + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION, + &tpver, sizeof(tpver)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_VERSION on AF_PACKET socket for %s", + name, pair->value); + goto error; + } + + discard = 1; + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS, + &discard, sizeof(discard)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_LOSS on AF_PACKET socket for %s", + name, pair->value); + goto error; + } + +#if defined(PACKET_QDISC_BYPASS) + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS, + &qdisc_bypass, sizeof(qdisc_bypass)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s", + name, pair->value); + goto error; + } +#else + RTE_SET_USED(qdisc_bypass); +#endif + + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s", + name, pair->value); + goto error; + } + + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_TX_RING on AF_PACKET " + "socket for %s", name, pair->value); + goto error; + } + + rx_queue = &((*internals)->rx_queue[q]); + rx_queue->framecount = req->tp_frame_nr; + + rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, + qsockfd, 0); + if (rx_queue->map == MAP_FAILED) { + PMD_LOG_ERRNO(ERR, + "%s: call to mmap failed on AF_PACKET socket for %s", + name, pair->value); + goto error; + } + + /* rdsize is same for both Tx and Rx */ + rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd)); + + rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node); + if (rx_queue->rd == NULL) + goto error; + for (i = 0; i < req->tp_frame_nr; ++i) { + rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize); + rx_queue->rd[i].iov_len = req->tp_frame_size; + } + rx_queue->sockfd = qsockfd; + + tx_queue = &((*internals)->tx_queue[q]); + tx_queue->framecount = req->tp_frame_nr; + tx_queue->frame_data_size = req->tp_frame_size; + tx_queue->frame_data_size -= TPACKET2_HDRLEN - + sizeof(struct sockaddr_ll); + + tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr; + + tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node); + if (tx_queue->rd == NULL) + goto error; + for (i = 0; i < req->tp_frame_nr; ++i) { + tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize); + tx_queue->rd[i].iov_len = req->tp_frame_size; + } + tx_queue->sockfd = qsockfd; + + rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not bind AF_PACKET socket to %s", + name, pair->value); + goto error; + } + +#if defined(PACKET_FANOUT) + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT, + &fanout_arg, sizeof(fanout_arg)); + if (rc == -1) { + PMD_LOG_ERRNO(ERR, + "%s: could not set PACKET_FANOUT on AF_PACKET socket for %s", + name, pair->value); + goto error; + } +#endif + } + + /* reserve an ethdev entry */ + *eth_dev = rte_eth_vdev_allocate(dev, 0); + if (*eth_dev == NULL) + goto error; + + /* + * now put it all together + * - store queue data in internals, + * - store numa_node in eth_dev + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + + (*internals)->nb_queues = nb_queues; + + data = (*eth_dev)->data; + data->dev_private = *internals; + data->nb_rx_queues = (uint16_t)nb_queues; + data->nb_tx_queues = (uint16_t)nb_queues; + data->dev_link = pmd_link; + data->mac_addrs = &(*internals)->eth_addr; + + (*eth_dev)->dev_ops = &ops; + + return 0; + +error: + if (qsockfd != -1) + close(qsockfd); + for (q = 0; q < nb_queues; q++) { + munmap((*internals)->rx_queue[q].map, + 2 * req->tp_block_size * req->tp_block_nr); + + rte_free((*internals)->rx_queue[q].rd); + rte_free((*internals)->tx_queue[q].rd); + if (((*internals)->rx_queue[q].sockfd != 0) && + ((*internals)->rx_queue[q].sockfd != qsockfd)) + close((*internals)->rx_queue[q].sockfd); + } + free((*internals)->if_name); + rte_free(*internals); + return -1; +} + +static int +rte_eth_from_packet(struct rte_vdev_device *dev, + int const *sockfd, + struct rte_kvargs *kvlist) +{ + const char *name = rte_vdev_device_name(dev); + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct rte_kvargs_pair *pair = NULL; + unsigned k_idx; + unsigned int blockcount; + unsigned int blocksize; + unsigned int framesize = DFLT_FRAME_SIZE; + unsigned int framecount = DFLT_FRAME_COUNT; + unsigned int qpairs = 1; + unsigned int qdisc_bypass = 1; + + /* do some parameter checking */ + if (*sockfd < 0) + return -1; + + blocksize = getpagesize(); + + /* + * Walk arguments for configurable settings + */ + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) { + qpairs = atoi(pair->value); + if (qpairs < 1) { + PMD_LOG(ERR, + "%s: invalid qpairs value", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) { + blocksize = atoi(pair->value); + if (!blocksize) { + PMD_LOG(ERR, + "%s: invalid blocksize value", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) { + framesize = atoi(pair->value); + if (!framesize) { + PMD_LOG(ERR, + "%s: invalid framesize value", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) { + framecount = atoi(pair->value); + if (!framecount) { + PMD_LOG(ERR, + "%s: invalid framecount value", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) { + qdisc_bypass = atoi(pair->value); + if (qdisc_bypass > 1) { + PMD_LOG(ERR, + "%s: invalid bypass value", + name); + return -1; + } + continue; + } + } + + if (framesize > blocksize) { + PMD_LOG(ERR, + "%s: AF_PACKET MMAP frame size exceeds block size!", + name); + return -1; + } + + blockcount = framecount / (blocksize / framesize); + if (!blockcount) { + PMD_LOG(ERR, + "%s: invalid AF_PACKET MMAP parameters", name); + return -1; + } + + PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name); + PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize); + PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount); + PMD_LOG(INFO, "%s:\tframe size %d", name, framesize); + PMD_LOG(INFO, "%s:\tframe count %d", name, framecount); + + if (rte_pmd_init_internals(dev, *sockfd, qpairs, + blocksize, blockcount, + framesize, framecount, + qdisc_bypass, + &internals, ð_dev, + kvlist) < 0) + return -1; + + eth_dev->rx_pkt_burst = eth_af_packet_rx; + eth_dev->tx_pkt_burst = eth_af_packet_tx; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +} + +static int +rte_pmd_af_packet_probe(struct rte_vdev_device *dev) +{ + int ret = 0; + struct rte_kvargs *kvlist; + int sockfd = -1; + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(dev); + + PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); + if (kvlist == NULL) { + ret = -1; + goto exit; + } + + /* + * If iface argument is passed we open the NICs and use them for + * reading / writing + */ + if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG, + &open_packet_iface, &sockfd); + if (ret < 0) + goto exit; + } + + if (dev->device.numa_node == SOCKET_ID_ANY) + dev->device.numa_node = rte_socket_id(); + + ret = rte_eth_from_packet(dev, &sockfd, kvlist); + close(sockfd); /* no longer needed */ + +exit: + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_af_packet_remove(struct rte_vdev_device *dev) +{ + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals; + struct tpacket_req *req; + unsigned q; + + PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u", + rte_socket_id()); + + if (dev == NULL) + return -1; + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); + if (eth_dev == NULL) + return -1; + + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return rte_eth_dev_release_port(eth_dev); + + internals = eth_dev->data->dev_private; + req = &internals->req; + for (q = 0; q < internals->nb_queues; q++) { + munmap(internals->rx_queue[q].map, + 2 * req->tp_block_size * req->tp_block_nr); + rte_free(internals->rx_queue[q].rd); + rte_free(internals->tx_queue[q].rd); + } + free(internals->if_name); + rte_free(internals->rx_queue); + rte_free(internals->tx_queue); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_vdev_driver pmd_af_packet_drv = { + .probe = rte_pmd_af_packet_probe, + .remove = rte_pmd_af_packet_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv); +RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet); +RTE_PMD_REGISTER_PARAM_STRING(net_af_packet, + "iface= " + "qpairs= " + "blocksz= " + "framesz= " + "framecnt= " + "qdisc_bypass=<0|1>"); + +RTE_INIT(af_packet_init_log) +{ + af_packet_logtype = rte_log_register("pmd.net.packet"); + if (af_packet_logtype >= 0) + rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map b/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_packet/rte_pmd_af_packet_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/af_xdp/Makefile b/src/spdk/dpdk/drivers/net/af_xdp/Makefile new file mode 100644 index 000000000..55db6085a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_xdp/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_af_xdp.a + +EXPORT_MAP := rte_pmd_af_xdp_version.map + +CFLAGS += -O3 + +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev +LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs libbpf || echo "-lbpf") + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_XDP) += rte_eth_af_xdp.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/af_xdp/af_xdp_deps.h b/src/spdk/dpdk/drivers/net/af_xdp/af_xdp_deps.h new file mode 100644 index 000000000..18416d094 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_xdp/af_xdp_deps.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation. + */ + +#ifndef AF_XDP_DEPS_H_ +#define AF_XDP_DEPS_H_ + +#include +#include + +/* This is to fix the xsk.h's dependency on asm/barrier.h */ +#define smp_rmb() rte_rmb() +#define smp_wmb() rte_wmb() + +#endif /* AF_XDP_DEPS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/af_xdp/meson.build b/src/spdk/dpdk/drivers/net/af_xdp/meson.build new file mode 100644 index 000000000..307aa0e38 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_xdp/meson.build @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Intel Corporation + +sources = files('rte_eth_af_xdp.c') + +bpf_dep = dependency('libbpf', required: false) +if not bpf_dep.found() + bpf_dep = cc.find_library('bpf', required: false) +endif + +if bpf_dep.found() and cc.has_header('bpf/xsk.h') and cc.has_header('linux/if_xdp.h') + ext_deps += bpf_dep +else + build = false + reason = 'missing dependency, "libbpf"' +endif diff --git a/src/spdk/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/src/spdk/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c new file mode 100644 index 000000000..06124ba78 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -0,0 +1,1382 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "af_xdp_deps.h" +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef SOL_XDP +#define SOL_XDP 283 +#endif + +#ifndef AF_XDP +#define AF_XDP 44 +#endif + +#ifndef PF_XDP +#define PF_XDP AF_XDP +#endif + +static int af_xdp_logtype; + +#define AF_XDP_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, af_xdp_logtype, \ + "%s(): " fmt, __func__, ##args) + +#define ETH_AF_XDP_FRAME_SIZE 2048 +#define ETH_AF_XDP_NUM_BUFFERS 4096 +#define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS +#define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0 +#define ETH_AF_XDP_DFLT_QUEUE_COUNT 1 + +#define ETH_AF_XDP_RX_BATCH_SIZE 32 +#define ETH_AF_XDP_TX_BATCH_SIZE 32 + + +struct xsk_umem_info { + struct xsk_ring_prod fq; + struct xsk_ring_cons cq; + struct xsk_umem *umem; + struct rte_ring *buf_ring; + const struct rte_memzone *mz; + struct rte_mempool *mb_pool; + void *buffer; +}; + +struct rx_stats { + uint64_t rx_pkts; + uint64_t rx_bytes; + uint64_t rx_dropped; +}; + +struct pkt_rx_queue { + struct xsk_ring_cons rx; + struct xsk_umem_info *umem; + struct xsk_socket *xsk; + struct rte_mempool *mb_pool; + + struct rx_stats stats; + + struct pkt_tx_queue *pair; + struct pollfd fds[1]; + int xsk_queue_idx; +}; + +struct tx_stats { + uint64_t tx_pkts; + uint64_t tx_bytes; + uint64_t tx_dropped; +}; + +struct pkt_tx_queue { + struct xsk_ring_prod tx; + struct xsk_umem_info *umem; + + struct tx_stats stats; + + struct pkt_rx_queue *pair; + int xsk_queue_idx; +}; + +struct pmd_internals { + int if_index; + char if_name[IFNAMSIZ]; + int start_queue_idx; + int queue_cnt; + int max_queue_cnt; + int combined_queue_cnt; + + struct rte_ether_addr eth_addr; + + struct pkt_rx_queue *rx_queues; + struct pkt_tx_queue *tx_queues; +}; + +#define ETH_AF_XDP_IFACE_ARG "iface" +#define ETH_AF_XDP_START_QUEUE_ARG "start_queue" +#define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count" + +static const char * const valid_arguments[] = { + ETH_AF_XDP_IFACE_ARG, + ETH_AF_XDP_START_QUEUE_ARG, + ETH_AF_XDP_QUEUE_COUNT_ARG, + NULL +}; + +static const struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_AUTONEG +}; + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static inline int +reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs) +{ + struct xsk_ring_prod *fq = &umem->fq; + uint32_t idx; + uint16_t i; + + if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) { + for (i = 0; i < reserve_size; i++) + rte_pktmbuf_free(bufs[i]); + AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n"); + return -1; + } + + for (i = 0; i < reserve_size; i++) { + __u64 *fq_addr; + uint64_t addr; + + fq_addr = xsk_ring_prod__fill_addr(fq, idx++); + addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + *fq_addr = addr; + } + + xsk_ring_prod__submit(fq, reserve_size); + + return 0; +} +#else +static inline int +reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs __rte_unused) +{ + struct xsk_ring_prod *fq = &umem->fq; + void *addrs[reserve_size]; + uint32_t idx; + uint16_t i; + + if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL) + != reserve_size) { + AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); + return -1; + } + + if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) { + AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n"); + rte_ring_enqueue_bulk(umem->buf_ring, addrs, + reserve_size, NULL); + return -1; + } + + for (i = 0; i < reserve_size; i++) { + __u64 *fq_addr; + + fq_addr = xsk_ring_prod__fill_addr(fq, idx++); + *fq_addr = (uint64_t)addrs[i]; + } + + xsk_ring_prod__submit(fq, reserve_size); + + return 0; +} +#endif + +static inline int +reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size, + struct rte_mbuf **bufs) +{ +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return reserve_fill_queue_zc(umem, reserve_size, bufs); +#else + return reserve_fill_queue_cp(umem, reserve_size, bufs); +#endif +} + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static uint16_t +af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct pkt_rx_queue *rxq = queue; + struct xsk_ring_cons *rx = &rxq->rx; + struct xsk_umem_info *umem = rxq->umem; + uint32_t idx_rx = 0; + unsigned long rx_bytes = 0; + int rcvd, i; + struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; + + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); + return -1; + } + + rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + + if (rcvd == 0) { +#if defined(XDP_USE_NEED_WAKEUP) + if (xsk_ring_prod__needs_wakeup(&umem->fq)) + (void)poll(rxq->fds, 1, 1000); +#endif + + goto out; + } + + for (i = 0; i < rcvd; i++) { + const struct xdp_desc *desc; + uint64_t addr; + uint32_t len; + uint64_t offset; + + desc = xsk_ring_cons__rx_desc(rx, idx_rx++); + addr = desc->addr; + len = desc->len; + + offset = xsk_umem__extract_offset(addr); + addr = xsk_umem__extract_addr(addr); + + bufs[i] = (struct rte_mbuf *) + xsk_umem__get_data(umem->buffer, addr + + umem->mb_pool->header_size); + bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - + rte_pktmbuf_priv_size(umem->mb_pool) - + umem->mb_pool->header_size; + + rte_pktmbuf_pkt_len(bufs[i]) = len; + rte_pktmbuf_data_len(bufs[i]) = len; + rx_bytes += len; + } + + xsk_ring_cons__release(rx, rcvd); + + (void)reserve_fill_queue(umem, rcvd, fq_bufs); + + /* statistics */ + rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_bytes += rx_bytes; + +out: + if (rcvd != nb_pkts) + rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], + nb_pkts - rcvd); + + return rcvd; +} +#else +static uint16_t +af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct pkt_rx_queue *rxq = queue; + struct xsk_ring_cons *rx = &rxq->rx; + struct xsk_umem_info *umem = rxq->umem; + struct xsk_ring_prod *fq = &umem->fq; + uint32_t idx_rx = 0; + unsigned long rx_bytes = 0; + int rcvd, i; + uint32_t free_thresh = fq->size >> 1; + struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; + + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) + return 0; + + rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (rcvd == 0) { +#if defined(XDP_USE_NEED_WAKEUP) + if (xsk_ring_prod__needs_wakeup(fq)) + (void)poll(rxq->fds, 1, 1000); +#endif + + goto out; + } + + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) + (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL); + + for (i = 0; i < rcvd; i++) { + const struct xdp_desc *desc; + uint64_t addr; + uint32_t len; + void *pkt; + + desc = xsk_ring_cons__rx_desc(rx, idx_rx++); + addr = desc->addr; + len = desc->len; + pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr); + + rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len); + rte_ring_enqueue(umem->buf_ring, (void *)addr); + rte_pktmbuf_pkt_len(mbufs[i]) = len; + rte_pktmbuf_data_len(mbufs[i]) = len; + rx_bytes += len; + bufs[i] = mbufs[i]; + } + + xsk_ring_cons__release(rx, rcvd); + + /* statistics */ + rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_bytes += rx_bytes; + +out: + if (rcvd != nb_pkts) + rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd], + nb_pkts - rcvd); + + return rcvd; +} +#endif + +static uint16_t +eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE); + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return af_xdp_rx_zc(queue, bufs, nb_pkts); +#else + return af_xdp_rx_cp(queue, bufs, nb_pkts); +#endif +} + +static void +pull_umem_cq(struct xsk_umem_info *umem, int size) +{ + struct xsk_ring_cons *cq = &umem->cq; + size_t i, n; + uint32_t idx_cq = 0; + + n = xsk_ring_cons__peek(cq, size, &idx_cq); + + for (i = 0; i < n; i++) { + uint64_t addr; + addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + addr = xsk_umem__extract_addr(addr); + rte_pktmbuf_free((struct rte_mbuf *) + xsk_umem__get_data(umem->buffer, + addr + umem->mb_pool->header_size)); +#else + rte_ring_enqueue(umem->buf_ring, (void *)addr); +#endif + } + + xsk_ring_cons__release(cq, n); +} + +static void +kick_tx(struct pkt_tx_queue *txq) +{ + struct xsk_umem_info *umem = txq->umem; + +#if defined(XDP_USE_NEED_WAKEUP) + if (xsk_ring_prod__needs_wakeup(&txq->tx)) +#endif + while (send(xsk_socket__fd(txq->pair->xsk), NULL, + 0, MSG_DONTWAIT) < 0) { + /* some thing unexpected */ + if (errno != EBUSY && errno != EAGAIN && errno != EINTR) + break; + + /* pull from completion queue to leave more space */ + if (errno == EAGAIN) + pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE); + } +#ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG + pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE); +#endif +} + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static uint16_t +af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct pkt_tx_queue *txq = queue; + struct xsk_umem_info *umem = txq->umem; + struct rte_mbuf *mbuf; + unsigned long tx_bytes = 0; + int i; + uint32_t idx_tx; + uint16_t count = 0; + struct xdp_desc *desc; + uint64_t addr, offset; + + pull_umem_cq(umem, nb_pkts); + + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + + if (mbuf->pool == umem->mb_pool) { + if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) { + kick_tx(txq); + goto out; + } + desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx); + desc->len = mbuf->pkt_len; + addr = (uint64_t)mbuf - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + offset = rte_pktmbuf_mtod(mbuf, uint64_t) - + (uint64_t)mbuf + + umem->mb_pool->header_size; + offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT; + desc->addr = addr | offset; + count++; + } else { + struct rte_mbuf *local_mbuf = + rte_pktmbuf_alloc(umem->mb_pool); + void *pkt; + + if (local_mbuf == NULL) + goto out; + + if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) { + rte_pktmbuf_free(local_mbuf); + kick_tx(txq); + goto out; + } + + desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx); + desc->len = mbuf->pkt_len; + + addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer - + umem->mb_pool->header_size; + offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) - + (uint64_t)local_mbuf + + umem->mb_pool->header_size; + pkt = xsk_umem__get_data(umem->buffer, addr + offset); + offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT; + desc->addr = addr | offset; + rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), + desc->len); + rte_pktmbuf_free(mbuf); + count++; + } + + tx_bytes += mbuf->pkt_len; + } + + kick_tx(txq); + +out: + xsk_ring_prod__submit(&txq->tx, count); + + txq->stats.tx_pkts += count; + txq->stats.tx_bytes += tx_bytes; + txq->stats.tx_dropped += nb_pkts - count; + + return count; +} +#else +static uint16_t +af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct pkt_tx_queue *txq = queue; + struct xsk_umem_info *umem = txq->umem; + struct rte_mbuf *mbuf; + void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; + unsigned long tx_bytes = 0; + int i; + uint32_t idx_tx; + + nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE); + + pull_umem_cq(umem, nb_pkts); + + nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, + nb_pkts, NULL); + if (nb_pkts == 0) + return 0; + + if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { + kick_tx(txq); + rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL); + return 0; + } + + for (i = 0; i < nb_pkts; i++) { + struct xdp_desc *desc; + void *pkt; + + desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); + mbuf = bufs[i]; + desc->len = mbuf->pkt_len; + + desc->addr = (uint64_t)addrs[i]; + pkt = xsk_umem__get_data(umem->mz->addr, + desc->addr); + rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len); + tx_bytes += mbuf->pkt_len; + rte_pktmbuf_free(mbuf); + } + + xsk_ring_prod__submit(&txq->tx, nb_pkts); + + kick_tx(txq); + + txq->stats.tx_pkts += nb_pkts; + txq->stats.tx_bytes += tx_bytes; + + return nb_pkts; +} +#endif + +static uint16_t +eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return af_xdp_tx_zc(queue, bufs, nb_pkts); +#else + return af_xdp_tx_cp(queue, bufs, nb_pkts); +#endif +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +/* This function gets called when the current port gets stopped. */ +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev) +{ + /* rx/tx must be paired */ + if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) + return -EINVAL; + + return 0; +} + +static int +eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev_info->if_index = internals->if_index; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = ETH_FRAME_LEN; + dev_info->max_rx_queues = internals->queue_cnt; + dev_info->max_tx_queues = internals->queue_cnt; + + dev_info->min_mtu = RTE_ETHER_MIN_MTU; +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + dev_info->max_mtu = getpagesize() - + sizeof(struct rte_mempool_objhdr) - + sizeof(struct rte_mbuf) - + RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM; +#else + dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM; +#endif + + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS; + dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS; + + return 0; +} + +static int +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct xdp_statistics xdp_stats; + struct pkt_rx_queue *rxq; + struct pkt_tx_queue *txq; + socklen_t optlen; + int i, ret; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + optlen = sizeof(struct xdp_statistics); + rxq = &internals->rx_queues[i]; + txq = rxq->pair; + stats->q_ipackets[i] = rxq->stats.rx_pkts; + stats->q_ibytes[i] = rxq->stats.rx_bytes; + + stats->q_opackets[i] = txq->stats.tx_pkts; + stats->q_obytes[i] = txq->stats.tx_bytes; + + stats->ipackets += stats->q_ipackets[i]; + stats->ibytes += stats->q_ibytes[i]; + stats->imissed += rxq->stats.rx_dropped; + stats->oerrors += txq->stats.tx_dropped; + ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP, + XDP_STATISTICS, &xdp_stats, &optlen); + if (ret != 0) { + AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); + return -1; + } + stats->imissed += xdp_stats.rx_dropped; + + stats->opackets += stats->q_opackets[i]; + stats->obytes += stats->q_obytes[i]; + } + + return 0; +} + +static int +eth_stats_reset(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + int i; + + for (i = 0; i < internals->queue_cnt; i++) { + memset(&internals->rx_queues[i].stats, 0, + sizeof(struct rx_stats)); + memset(&internals->tx_queues[i].stats, 0, + sizeof(struct tx_stats)); + } + + return 0; +} + +static void +remove_xdp_program(struct pmd_internals *internals) +{ + uint32_t curr_prog_id = 0; + + if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id, + XDP_FLAGS_UPDATE_IF_NOEXIST)) { + AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n"); + return; + } + bpf_set_link_xdp_fd(internals->if_index, -1, + XDP_FLAGS_UPDATE_IF_NOEXIST); +} + +static void +xdp_umem_destroy(struct xsk_umem_info *umem) +{ +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + rte_mempool_free(umem->mb_pool); + umem->mb_pool = NULL; +#else + rte_memzone_free(umem->mz); + umem->mz = NULL; + + rte_ring_free(umem->buf_ring); + umem->buf_ring = NULL; +#endif + + rte_free(umem); + umem = NULL; +} + +static void +eth_dev_close(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pkt_rx_queue *rxq; + int i; + + AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n", + rte_socket_id()); + + for (i = 0; i < internals->queue_cnt; i++) { + rxq = &internals->rx_queues[i]; + if (rxq->umem == NULL) + break; + xsk_socket__delete(rxq->xsk); + (void)xsk_umem__delete(rxq->umem->umem); + xdp_umem_destroy(rxq->umem); + + /* free pkt_tx_queue */ + rte_free(rxq->pair); + rte_free(rxq); + } + + /* + * MAC is not allocated dynamically, setting it to NULL would prevent + * from releasing it in rte_eth_dev_release_port. + */ + dev->data->mac_addrs = NULL; + + remove_xdp_program(internals); +} + +static void +eth_queue_release(void *q __rte_unused) +{ +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) +static inline uint64_t get_base_addr(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)memhdr->addr & ~(getpagesize() - 1); +} + +static struct +xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused, + struct pkt_rx_queue *rxq) +{ + struct xsk_umem_info *umem; + int ret; + struct xsk_umem_config usr_config = { + .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS, + .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, + .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG}; + void *base_addr = NULL; + struct rte_mempool *mb_pool = rxq->mb_pool; + + usr_config.frame_size = rte_mempool_calc_obj_size(mb_pool->elt_size, + mb_pool->flags, + NULL); + usr_config.frame_headroom = mb_pool->header_size + + sizeof(struct rte_mbuf) + + rte_pktmbuf_priv_size(mb_pool) + + RTE_PKTMBUF_HEADROOM; + + umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); + if (umem == NULL) { + AF_XDP_LOG(ERR, "Failed to allocate umem info"); + return NULL; + } + + umem->mb_pool = mb_pool; + base_addr = (void *)get_base_addr(mb_pool); + + ret = xsk_umem__create(&umem->umem, base_addr, + mb_pool->populated_size * usr_config.frame_size, + &umem->fq, &umem->cq, + &usr_config); + + if (ret) { + AF_XDP_LOG(ERR, "Failed to create umem"); + goto err; + } + umem->buffer = base_addr; + +#else +static struct +xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + struct pkt_rx_queue *rxq) +{ + struct xsk_umem_info *umem; + const struct rte_memzone *mz; + struct xsk_umem_config usr_config = { + .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS, + .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, + .frame_size = ETH_AF_XDP_FRAME_SIZE, + .frame_headroom = 0 }; + char ring_name[RTE_RING_NAMESIZE]; + char mz_name[RTE_MEMZONE_NAMESIZE]; + int ret; + uint64_t i; + + umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); + if (umem == NULL) { + AF_XDP_LOG(ERR, "Failed to allocate umem info"); + return NULL; + } + + snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u", + internals->if_name, rxq->xsk_queue_idx); + umem->buf_ring = rte_ring_create(ring_name, + ETH_AF_XDP_NUM_BUFFERS, + rte_socket_id(), + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (umem->buf_ring == NULL) { + AF_XDP_LOG(ERR, "Failed to create rte_ring\n"); + goto err; + } + + for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) + rte_ring_enqueue(umem->buf_ring, + (void *)(i * ETH_AF_XDP_FRAME_SIZE)); + + snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u", + internals->if_name, rxq->xsk_queue_idx); + mz = rte_memzone_reserve_aligned(mz_name, + ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, + rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG, + getpagesize()); + if (mz == NULL) { + AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); + goto err; + } + + ret = xsk_umem__create(&umem->umem, mz->addr, + ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, + &umem->fq, &umem->cq, + &usr_config); + + if (ret) { + AF_XDP_LOG(ERR, "Failed to create umem"); + goto err; + } + umem->mz = mz; + +#endif + return umem; + +err: + xdp_umem_destroy(umem); + return NULL; +} + +static int +xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, + int ring_size) +{ + struct xsk_socket_config cfg; + struct pkt_tx_queue *txq = rxq->pair; + int ret = 0; + int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2; + struct rte_mbuf *fq_bufs[reserve_size]; + + rxq->umem = xdp_umem_configure(internals, rxq); + if (rxq->umem == NULL) + return -ENOMEM; + txq->umem = rxq->umem; + + cfg.rx_size = ring_size; + cfg.tx_size = ring_size; + cfg.libbpf_flags = 0; + cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; + cfg.bind_flags = 0; + +#if defined(XDP_USE_NEED_WAKEUP) + cfg.bind_flags |= XDP_USE_NEED_WAKEUP; +#endif + + ret = xsk_socket__create(&rxq->xsk, internals->if_name, + rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx, + &txq->tx, &cfg); + if (ret) { + AF_XDP_LOG(ERR, "Failed to create xsk socket.\n"); + goto err; + } + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) { + AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); + goto err; + } +#endif + ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs); + if (ret) { + xsk_socket__delete(rxq->xsk); + AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n"); + goto err; + } + + return 0; + +err: + xdp_umem_destroy(rxq->umem); + + return ret; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pkt_rx_queue *rxq; + int ret; + + rxq = &internals->rx_queues[rx_queue_id]; + + AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n", + rx_queue_id, rxq->xsk_queue_idx); + +#ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG + uint32_t buf_size, data_size; + + /* Now get the space available for data in the mbuf */ + buf_size = rte_pktmbuf_data_room_size(mb_pool) - + RTE_PKTMBUF_HEADROOM; + data_size = ETH_AF_XDP_FRAME_SIZE; + + if (data_size > buf_size) { + AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n", + dev->device->name, data_size, buf_size); + ret = -ENOMEM; + goto err; + } +#endif + + rxq->mb_pool = mb_pool; + + if (xsk_configure(internals, rxq, nb_rx_desc)) { + AF_XDP_LOG(ERR, "Failed to configure xdp socket\n"); + ret = -EINVAL; + goto err; + } + + rxq->fds[0].fd = xsk_socket__fd(rxq->xsk); + rxq->fds[0].events = POLLIN; + + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; + +err: + return ret; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pkt_tx_queue *txq; + + txq = &internals->tx_queues[tx_queue_id]; + + dev->data->tx_queues[tx_queue_id] = txq; + return 0; +} + +static int +eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct ifreq ifr = { .ifr_mtu = mtu }; + int ret; + int s; + + s = socket(PF_INET, SOCK_DGRAM, 0); + if (s < 0) + return -EINVAL; + + strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ); + ret = ioctl(s, SIOCSIFMTU, &ifr); + close(s); + + return (ret < 0) ? -errno : 0; +} + +static int +eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask) +{ + struct ifreq ifr; + int ret = 0; + int s; + + s = socket(PF_INET, SOCK_DGRAM, 0); + if (s < 0) + return -errno; + + strlcpy(ifr.ifr_name, if_name, IFNAMSIZ); + if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) { + ret = -errno; + goto out; + } + ifr.ifr_flags &= mask; + ifr.ifr_flags |= flags; + if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) { + ret = -errno; + goto out; + } +out: + close(s); + return ret; +} + +static int +eth_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + + return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0); +} + +static int +eth_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + + return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC); +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .mtu_set = eth_dev_mtu_set, + .promiscuous_enable = eth_dev_promiscuous_enable, + .promiscuous_disable = eth_dev_promiscuous_disable, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +/** parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *)extra_args; + char *end; + + *i = strtol(value, &end, 10); + if (*i < 0) { + AF_XDP_LOG(ERR, "Argument has to be positive.\n"); + return -EINVAL; + } + + return 0; +} + +/** parse name argument */ +static int +parse_name_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + char *name = extra_args; + + if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) { + AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n", + value, IFNAMSIZ); + return -EINVAL; + } + + strlcpy(name, value, IFNAMSIZ); + + return 0; +} + +static int +xdp_get_channels_info(const char *if_name, int *max_queues, + int *combined_queues) +{ + struct ethtool_channels channels; + struct ifreq ifr; + int fd, ret; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) + return -1; + + channels.cmd = ETHTOOL_GCHANNELS; + ifr.ifr_data = (void *)&channels; + strncpy(ifr.ifr_name, if_name, IFNAMSIZ); + ret = ioctl(fd, SIOCETHTOOL, &ifr); + if (ret) { + if (errno == EOPNOTSUPP) { + ret = 0; + } else { + ret = -errno; + goto out; + } + } + + if (channels.max_combined == 0 || errno == EOPNOTSUPP) { + /* If the device says it has no channels, then all traffic + * is sent to a single stream, so max queues = 1. + */ + *max_queues = 1; + *combined_queues = 1; + } else { + *max_queues = channels.max_combined; + *combined_queues = channels.combined_count; + } + + out: + close(fd); + return ret; +} + +static int +parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, + int *queue_cnt) +{ + int ret; + + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG, + &parse_name_arg, if_name); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG, + &parse_integer_arg, start_queue); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG, + &parse_integer_arg, queue_cnt); + if (ret < 0 || *queue_cnt <= 0) { + ret = -EINVAL; + goto free_kvlist; + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +static int +get_iface_info(const char *if_name, + struct rte_ether_addr *eth_addr, + int *if_index) +{ + struct ifreq ifr; + int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); + + if (sock < 0) + return -1; + + strlcpy(ifr.ifr_name, if_name, IFNAMSIZ); + if (ioctl(sock, SIOCGIFINDEX, &ifr)) + goto error; + + *if_index = ifr.ifr_ifindex; + + if (ioctl(sock, SIOCGIFHWADDR, &ifr)) + goto error; + + rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); + + close(sock); + return 0; + +error: + close(sock); + return -1; +} + +static struct rte_eth_dev * +init_internals(struct rte_vdev_device *dev, const char *if_name, + int start_queue_idx, int queue_cnt) +{ + const char *name = rte_vdev_device_name(dev); + const unsigned int numa_node = dev->device.numa_node; + struct pmd_internals *internals; + struct rte_eth_dev *eth_dev; + int ret; + int i; + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); + if (internals == NULL) + return NULL; + + internals->start_queue_idx = start_queue_idx; + internals->queue_cnt = queue_cnt; + strlcpy(internals->if_name, if_name, IFNAMSIZ); + + if (xdp_get_channels_info(if_name, &internals->max_queue_cnt, + &internals->combined_queue_cnt)) { + AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n", + if_name); + goto err_free_internals; + } + + if (queue_cnt > internals->combined_queue_cnt) { + AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n", + queue_cnt, internals->combined_queue_cnt); + goto err_free_internals; + } + + internals->rx_queues = rte_zmalloc_socket(NULL, + sizeof(struct pkt_rx_queue) * queue_cnt, + 0, numa_node); + if (internals->rx_queues == NULL) { + AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n"); + goto err_free_internals; + } + + internals->tx_queues = rte_zmalloc_socket(NULL, + sizeof(struct pkt_tx_queue) * queue_cnt, + 0, numa_node); + if (internals->tx_queues == NULL) { + AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n"); + goto err_free_rx; + } + for (i = 0; i < queue_cnt; i++) { + internals->tx_queues[i].pair = &internals->rx_queues[i]; + internals->rx_queues[i].pair = &internals->tx_queues[i]; + internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i; + internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i; + } + + ret = get_iface_info(if_name, &internals->eth_addr, + &internals->if_index); + if (ret) + goto err_free_tx; + + eth_dev = rte_eth_vdev_allocate(dev, 0); + if (eth_dev == NULL) + goto err_free_tx; + + eth_dev->data->dev_private = internals; + eth_dev->data->dev_link = pmd_link; + eth_dev->data->mac_addrs = &internals->eth_addr; + eth_dev->dev_ops = &ops; + eth_dev->rx_pkt_burst = eth_af_xdp_rx; + eth_dev->tx_pkt_burst = eth_af_xdp_tx; + /* Let rte_eth_dev_close() release the port resources. */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + +#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n"); +#endif + + return eth_dev; + +err_free_tx: + rte_free(internals->tx_queues); +err_free_rx: + rte_free(internals->rx_queues); +err_free_internals: + rte_free(internals); + return NULL; +} + +static int +rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) +{ + struct rte_kvargs *kvlist; + char if_name[IFNAMSIZ] = {'\0'}; + int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX; + int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT; + struct rte_eth_dev *eth_dev = NULL; + const char *name; + + AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n", + rte_vdev_device_name(dev)); + + name = rte_vdev_device_name(dev); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + AF_XDP_LOG(ERR, "Failed to probe %s\n", name); + return -EINVAL; + } + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); + if (kvlist == NULL) { + AF_XDP_LOG(ERR, "Invalid kvargs key\n"); + return -EINVAL; + } + + if (dev->device.numa_node == SOCKET_ID_ANY) + dev->device.numa_node = rte_socket_id(); + + if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx, + &xsk_queue_cnt) < 0) { + AF_XDP_LOG(ERR, "Invalid kvargs value\n"); + return -EINVAL; + } + + if (strlen(if_name) == 0) { + AF_XDP_LOG(ERR, "Network interface must be specified\n"); + return -EINVAL; + } + + eth_dev = init_internals(dev, if_name, xsk_start_queue_idx, + xsk_queue_cnt); + if (eth_dev == NULL) { + AF_XDP_LOG(ERR, "Failed to init internals\n"); + return -1; + } + + rte_eth_dev_probing_finish(eth_dev); + + return 0; +} + +static int +rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) +{ + struct rte_eth_dev *eth_dev = NULL; + + AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n", + rte_socket_id()); + + if (dev == NULL) + return -1; + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); + if (eth_dev == NULL) + return 0; + + eth_dev_close(eth_dev); + rte_eth_dev_release_port(eth_dev); + + + return 0; +} + +static struct rte_vdev_driver pmd_af_xdp_drv = { + .probe = rte_pmd_af_xdp_probe, + .remove = rte_pmd_af_xdp_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp, + "iface= " + "start_queue= " + "queue_count= "); + +RTE_INIT(af_xdp_init_log) +{ + af_xdp_logtype = rte_log_register("pmd.net.af_xdp"); + if (af_xdp_logtype >= 0) + rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/af_xdp/rte_pmd_af_xdp_version.map b/src/spdk/dpdk/drivers/net/af_xdp/rte_pmd_af_xdp_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/af_xdp/rte_pmd_af_xdp_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ark/Makefile b/src/spdk/dpdk/drivers/net/ark/Makefile new file mode 100644 index 000000000..c02080bdd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/Makefile @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2015-2018 Atomic Rules LLC + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ark.a + +CFLAGS += -O3 -I./ +CFLAGS += $(WERROR_FLAGS) -Werror + +EXPORT_MAP := rte_pmd_ark_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c +SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c + +# this lib depends upon: +LDLIBS += -lpthread +ifdef CONFIG_RTE_EXEC_ENV_LINUX +LDLIBS += -ldl +endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.c b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c new file mode 100644 index 000000000..57026f8d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.c @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_logs.h" +#include "ark_ddm.h" + +/* ************************************************************************* */ +int +ark_ddm_verify(struct ark_ddm_t *ddm) +{ + uint32_t hw_const; + if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) { + PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n", + ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t)); + return -1; + } + + hw_const = ddm->cfg.const0; + if (hw_const == ARK_DDM_CONST1) { + PMD_DRV_LOG(ERR, + "ARK: DDM module is version 1, " + "PMD expects version 2\n"); + return -1; + } else if (hw_const != ARK_DDM_CONST2) { + PMD_DRV_LOG(ERR, + "ARK: DDM module not found as expected 0x%08x\n", + ddm->cfg.const0); + return -1; + } + return 0; +} + +void +ark_ddm_start(struct ark_ddm_t *ddm) +{ + ddm->cfg.command = 1; +} + +int +ark_ddm_stop(struct ark_ddm_t *ddm, const int wait) +{ + int cnt = 0; + + ddm->cfg.command = 2; + while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) { + if (cnt++ > 1000) + return 1; + + usleep(10); + } + return 0; +} + +void +ark_ddm_reset(struct ark_ddm_t *ddm) +{ + int status; + + /* reset only works if ddm has stopped properly. */ + status = ark_ddm_stop(ddm, 1); + + if (status != 0) { + PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n", + __func__); + ddm->cfg.command = 4; + usleep(10); + } + ddm->cfg.command = 3; +} + +void +ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval) +{ + ddm->setup.cons_write_index_addr = cons_addr; + ddm->setup.write_index_interval = interval / 4; /* 4 ns period */ +} + +void +ark_ddm_stats_reset(struct ark_ddm_t *ddm) +{ + ddm->cfg.tlp_stats_clear = 1; +} + +void +ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg) +{ + PMD_FUNC_LOG(DEBUG, "%s Stopped: %d\n", msg, + ark_ddm_is_stopped(ddm) + ); +} + +void +ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg) +{ + struct ark_ddm_stats_t *stats = &ddm->stats; + + PMD_STATS_LOG(INFO, "DDM Stats: %s" + ARK_SU64 ARK_SU64 ARK_SU64 + "\n", msg, + "Bytes:", stats->tx_byte_count, + "Packets:", stats->tx_pkt_count, + "MBufs", stats->tx_mbuf_count); +} + +int +ark_ddm_is_stopped(struct ark_ddm_t *ddm) +{ + return (ddm->cfg.stop_flushed & 0x01) != 0; +} + +uint64_t +ark_ddm_queue_byte_count(struct ark_ddm_t *ddm) +{ + return ddm->queue_stats.byte_count; +} + +uint64_t +ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm) +{ + return ddm->queue_stats.pkt_count; +} + +void +ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm) +{ + ddm->queue_stats.byte_count = 1; +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ddm.h b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h new file mode 100644 index 000000000..5456b4b5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ddm.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_DDM_H_ +#define _ARK_DDM_H_ + +#include + +#include + + +/* The DDM or Downstream Data Mover is an internal Arkville hardware + * module for moving packet from host memory to the TX packet streams. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */ +struct ark_tx_meta { + uint64_t physaddr; + uint32_t user1; + uint16_t data_len; /* of this MBUF */ +#define ARK_DDM_EOP 0x01 +#define ARK_DDM_SOP 0x02 + uint8_t flags; /* bit 0 indicates last mbuf in chain. */ + uint8_t reserved[1]; +}; + + +/* + * DDM core hardware structures + * These are overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ +#define ARK_DDM_CFG 0x0000 +/* Set unique HW ID for hardware version */ +#define ARK_DDM_CONST2 (0x324d4444) +#define ARK_DDM_CONST1 (0xfacecafe) + +struct ark_ddm_cfg_t { + uint32_t r0; + volatile uint32_t tlp_stats_clear; + uint32_t const0; + volatile uint32_t tag_max; + volatile uint32_t command; + volatile uint32_t stop_flushed; +}; + +#define ARK_DDM_STATS 0x0020 +struct ark_ddm_stats_t { + volatile uint64_t tx_byte_count; + volatile uint64_t tx_pkt_count; + volatile uint64_t tx_mbuf_count; +}; + +#define ARK_DDM_MRDQ 0x0040 +struct ark_ddm_mrdq_t { + volatile uint32_t mrd_q1; + volatile uint32_t mrd_q2; + volatile uint32_t mrd_q3; + volatile uint32_t mrd_q4; + volatile uint32_t mrd_full; +}; + +#define ARK_DDM_CPLDQ 0x0068 +struct ark_ddm_cpldq_t { + volatile uint32_t cpld_q1; + volatile uint32_t cpld_q2; + volatile uint32_t cpld_q3; + volatile uint32_t cpld_q4; + volatile uint32_t cpld_full; +}; + +#define ARK_DDM_MRD_PS 0x0090 +struct ark_ddm_mrd_ps_t { + volatile uint32_t mrd_ps_min; + volatile uint32_t mrd_ps_max; + volatile uint32_t mrd_full_ps_min; + volatile uint32_t mrd_full_ps_max; + volatile uint32_t mrd_dw_ps_min; + volatile uint32_t mrd_dw_ps_max; +}; + +#define ARK_DDM_QUEUE_STATS 0x00a8 +struct ark_ddm_qstats_t { + volatile uint64_t byte_count; + volatile uint64_t pkt_count; + volatile uint64_t mbuf_count; +}; + +#define ARK_DDM_CPLD_PS 0x00c0 +struct ark_ddm_cpld_ps_t { + volatile uint32_t cpld_ps_min; + volatile uint32_t cpld_ps_max; + volatile uint32_t cpld_full_ps_min; + volatile uint32_t cpld_full_ps_max; + volatile uint32_t cpld_dw_ps_min; + volatile uint32_t cpld_dw_ps_max; +}; + +#define ARK_DDM_SETUP 0x00e0 +struct ark_ddm_setup_t { + rte_iova_t cons_write_index_addr; + uint32_t write_index_interval; /* 4ns each */ + volatile uint32_t cons_index; +}; + +#define ARK_DDM_EXPECTED_SIZE 256 +#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE +/* Consolidated structure */ +struct ark_ddm_t { + struct ark_ddm_cfg_t cfg; + uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) - + sizeof(struct ark_ddm_cfg_t)]; + struct ark_ddm_stats_t stats; + uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) - + sizeof(struct ark_ddm_stats_t)]; + struct ark_ddm_mrdq_t mrdq; + uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) - + sizeof(struct ark_ddm_mrdq_t)]; + struct ark_ddm_cpldq_t cpldq; + uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) - + sizeof(struct ark_ddm_cpldq_t)]; + struct ark_ddm_mrd_ps_t mrd_ps; + struct ark_ddm_qstats_t queue_stats; + struct ark_ddm_cpld_ps_t cpld_ps; + uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) - + sizeof(struct ark_ddm_cpld_ps_t)]; + struct ark_ddm_setup_t setup; + uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) - + sizeof(struct ark_ddm_setup_t)]; +}; + + +/* DDM function prototype */ +int ark_ddm_verify(struct ark_ddm_t *ddm); +void ark_ddm_start(struct ark_ddm_t *ddm); +int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait); +void ark_ddm_reset(struct ark_ddm_t *ddm); +void ark_ddm_stats_reset(struct ark_ddm_t *ddm); +void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, + uint32_t interval); +void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg); +void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg); +int ark_ddm_is_stopped(struct ark_ddm_t *ddm); +uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm); +uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm); +void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c new file mode 100644 index 000000000..c3642012d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev.c @@ -0,0 +1,1027 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include +#include +#include + +#include +#include +#include + +#include "ark_global.h" +#include "ark_logs.h" +#include "ark_ethdev_tx.h" +#include "ark_ethdev_rx.h" +#include "ark_mpu.h" +#include "ark_ddm.h" +#include "ark_udm.h" +#include "ark_rqp.h" +#include "ark_pktdir.h" +#include "ark_pktgen.h" +#include "ark_pktchkr.h" + +/* Internal prototypes */ +static int eth_ark_check_args(struct ark_adapter *ark, const char *params); +static int eth_ark_dev_init(struct rte_eth_dev *dev); +static int ark_config_device(struct rte_eth_dev *dev); +static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev); +static int eth_ark_dev_configure(struct rte_eth_dev *dev); +static int eth_ark_dev_start(struct rte_eth_dev *dev); +static void eth_ark_dev_stop(struct rte_eth_dev *dev); +static void eth_ark_dev_close(struct rte_eth_dev *dev); +static int eth_ark_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_ark_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev); +static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); +static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int eth_ark_dev_stats_reset(struct rte_eth_dev *dev); +static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int eth_ark_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t pool); +static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, + uint32_t index); +static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size); + +/* + * The packet generator is a functional block used to generate packet + * patterns for testing. It is not intended for nominal use. + */ +#define ARK_PKTGEN_ARG "Pkt_gen" + +/* + * The packet checker is a functional block used to verify packet + * patterns for testing. It is not intended for nominal use. + */ +#define ARK_PKTCHKR_ARG "Pkt_chkr" + +/* + * The packet director is used to select the internal ingress and + * egress packets paths during testing. It is not intended for + * nominal use. + */ +#define ARK_PKTDIR_ARG "Pkt_dir" + +/* Devinfo configurations */ +#define ARK_RX_MAX_QUEUE (4096 * 4) +#define ARK_RX_MIN_QUEUE (512) +#define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128) +#define ARK_RX_MIN_BUFSIZE (1024) + +#define ARK_TX_MAX_QUEUE (4096 * 4) +#define ARK_TX_MIN_QUEUE (256) + +int ark_logtype; + +static const char * const valid_arguments[] = { + ARK_PKTGEN_ARG, + ARK_PKTCHKR_ARG, + ARK_PKTDIR_ARG, + NULL +}; + +static const struct rte_pci_id pci_id_ark_map[] = { + {RTE_PCI_DEVICE(0x1d6c, 0x100d)}, + {RTE_PCI_DEVICE(0x1d6c, 0x100e)}, + {.vendor_id = 0, /* sentinel */ }, +}; + +static int +eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *eth_dev; + int ret; + + eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter)); + + if (eth_dev == NULL) + return -ENOMEM; + + ret = eth_ark_dev_init(eth_dev); + if (ret) + rte_eth_dev_pci_release(eth_dev); + + return ret; +} + +static int +eth_ark_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit); +} + +static struct rte_pci_driver rte_ark_pmd = { + .id_table = pci_id_ark_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_ark_pci_probe, + .remove = eth_ark_pci_remove, +}; + +static const struct eth_dev_ops ark_eth_dev_ops = { + .dev_configure = eth_ark_dev_configure, + .dev_start = eth_ark_dev_start, + .dev_stop = eth_ark_dev_stop, + .dev_close = eth_ark_dev_close, + + .dev_infos_get = eth_ark_dev_info_get, + + .rx_queue_setup = eth_ark_dev_rx_queue_setup, + .rx_queue_count = eth_ark_dev_rx_queue_count, + .tx_queue_setup = eth_ark_tx_queue_setup, + + .link_update = eth_ark_dev_link_update, + .dev_set_link_up = eth_ark_dev_set_link_up, + .dev_set_link_down = eth_ark_dev_set_link_down, + + .rx_queue_start = eth_ark_rx_start_queue, + .rx_queue_stop = eth_ark_rx_stop_queue, + + .tx_queue_start = eth_ark_tx_queue_start, + .tx_queue_stop = eth_ark_tx_queue_stop, + + .stats_get = eth_ark_dev_stats_get, + .stats_reset = eth_ark_dev_stats_reset, + + .mac_addr_add = eth_ark_macaddr_add, + .mac_addr_remove = eth_ark_macaddr_remove, + .mac_addr_set = eth_ark_set_default_mac_addr, + + .mtu_set = eth_ark_set_mtu, +}; + +static int +check_for_ext(struct ark_adapter *ark) +{ + int found = 0; + + /* Get the env */ + const char *dllpath = getenv("ARK_EXT_PATH"); + + if (dllpath == NULL) { + PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n"); + return 0; + } + PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath); + + /* Open and load the .so */ + ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY); + if (ark->d_handle == NULL) { + PMD_DRV_LOG(ERR, "Could not load user extension %s\n", + dllpath); + return -1; + } + PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n", + dllpath); + + /* Get the entry points */ + ark->user_ext.dev_init = + (void *(*)(struct rte_eth_dev *, void *, int)) + dlsym(ark->d_handle, "dev_init"); + PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n", + ark->user_ext.dev_init); + ark->user_ext.dev_get_port_count = + (int (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_get_port_count"); + ark->user_ext.dev_uninit = + (void (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_uninit"); + ark->user_ext.dev_configure = + (int (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_configure"); + ark->user_ext.dev_start = + (int (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_start"); + ark->user_ext.dev_stop = + (void (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_stop"); + ark->user_ext.dev_close = + (void (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_close"); + ark->user_ext.link_update = + (int (*)(struct rte_eth_dev *, int, void *)) + dlsym(ark->d_handle, "link_update"); + ark->user_ext.dev_set_link_up = + (int (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_set_link_up"); + ark->user_ext.dev_set_link_down = + (int (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "dev_set_link_down"); + ark->user_ext.stats_get = + (int (*)(struct rte_eth_dev *, struct rte_eth_stats *, + void *)) + dlsym(ark->d_handle, "stats_get"); + ark->user_ext.stats_reset = + (void (*)(struct rte_eth_dev *, void *)) + dlsym(ark->d_handle, "stats_reset"); + ark->user_ext.mac_addr_add = + (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, + uint32_t, uint32_t, void *)) + dlsym(ark->d_handle, "mac_addr_add"); + ark->user_ext.mac_addr_remove = + (void (*)(struct rte_eth_dev *, uint32_t, void *)) + dlsym(ark->d_handle, "mac_addr_remove"); + ark->user_ext.mac_addr_set = + (void (*)(struct rte_eth_dev *, struct rte_ether_addr *, + void *)) + dlsym(ark->d_handle, "mac_addr_set"); + ark->user_ext.set_mtu = + (int (*)(struct rte_eth_dev *, uint16_t, + void *)) + dlsym(ark->d_handle, "set_mtu"); + + return found; +} + +static int +eth_ark_dev_init(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + struct rte_pci_device *pci_dev; + int ret; + int port_count = 1; + int p; + + ark->eth_dev = dev; + + PMD_FUNC_LOG(DEBUG, "\n"); + + /* Check to see if there is an extension that we need to load */ + ret = check_for_ext(ark); + if (ret) + return ret; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + rte_eth_copy_pci_info(dev, pci_dev); + + /* Use dummy function until setup */ + dev->rx_pkt_burst = ð_ark_recv_pkts_noop; + dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; + /* Let rte_eth_dev_close() release the port resources */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr; + ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr; + + ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE]; + ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE]; + ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE]; + ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE]; + ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE]; + ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE]; + ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE]; + ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE]; + ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE]; + ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE]; + + ark->rqpacing = + (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE); + ark->started = 0; + + PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n", + ark->sysctrl.t32[4], + rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); + PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n", + rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4])); + + /* If HW sanity test fails, return an error */ + if (ark->sysctrl.t32[4] != 0xcafef00d) { + PMD_DRV_LOG(ERR, + "HW Sanity test has failed, expected constant" + " 0x%x, read 0x%x (%s)\n", + 0xcafef00d, + ark->sysctrl.t32[4], __func__); + return -1; + } + if (ark->sysctrl.t32[3] != 0) { + if (ark_rqp_lasped(ark->rqpacing)) { + PMD_DRV_LOG(ERR, "Arkville Evaluation System - " + "Timer has Expired\n"); + return -1; + } + PMD_DRV_LOG(WARNING, "Arkville Evaluation System - " + "Timer is Running\n"); + } + + PMD_DRV_LOG(INFO, + "HW Sanity test has PASSED, expected constant" + " 0x%x, read 0x%x (%s)\n", + 0xcafef00d, ark->sysctrl.t32[4], __func__); + + /* We are a single function multi-port device. */ + ret = ark_config_device(dev); + if (ret) + return -1; + + dev->dev_ops = &ark_eth_dev_ops; + + dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0); + if (!dev->data->mac_addrs) { + PMD_DRV_LOG(ERR, + "Failed to allocated memory for storing mac address" + ); + } + + if (ark->user_ext.dev_init) { + ark->user_data[dev->data->port_id] = + ark->user_ext.dev_init(dev, ark->a_bar, 0); + if (!ark->user_data[dev->data->port_id]) { + PMD_DRV_LOG(INFO, + "Failed to initialize PMD extension!" + " continuing without it\n"); + memset(&ark->user_ext, 0, sizeof(struct ark_user_ext)); + dlclose(ark->d_handle); + } + } + + if (pci_dev->device.devargs) + ret = eth_ark_check_args(ark, pci_dev->device.devargs->args); + else + PMD_DRV_LOG(INFO, "No Device args found\n"); + + if (ret) + goto error; + /* + * We will create additional devices based on the number of requested + * ports + */ + if (ark->user_ext.dev_get_port_count) + port_count = + ark->user_ext.dev_get_port_count(dev, + ark->user_data[dev->data->port_id]); + ark->num_ports = port_count; + + for (p = 0; p < port_count; p++) { + struct rte_eth_dev *eth_dev; + char name[RTE_ETH_NAME_MAX_LEN]; + + snprintf(name, sizeof(name), "arketh%d", + dev->data->port_id + p); + + if (p == 0) { + /* First port is already allocated by DPDK */ + eth_dev = ark->eth_dev; + rte_eth_dev_probing_finish(eth_dev); + continue; + } + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + PMD_DRV_LOG(ERR, + "Could not allocate eth_dev for port %d\n", + p); + goto error; + } + + eth_dev->device = &pci_dev->device; + eth_dev->data->dev_private = ark; + eth_dev->dev_ops = ark->eth_dev->dev_ops; + eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst; + eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + eth_dev->data->mac_addrs = rte_zmalloc(name, + RTE_ETHER_ADDR_LEN, 0); + if (!eth_dev->data->mac_addrs) { + PMD_DRV_LOG(ERR, + "Memory allocation for MAC failed!" + " Exiting.\n"); + goto error; + } + + if (ark->user_ext.dev_init) { + ark->user_data[eth_dev->data->port_id] = + ark->user_ext.dev_init(dev, ark->a_bar, p); + } + + rte_eth_dev_probing_finish(eth_dev); + } + + return ret; + +error: + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + return -1; +} + +/* + *Initial device configuration when device is opened + * setup the DDM, and UDM + * Called once per PCIE device + */ +static int +ark_config_device(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + uint16_t num_q, i; + struct ark_mpu_t *mpu; + + /* + * Make sure that the packet director, generator and checker are in a + * known state + */ + ark->start_pg = 0; + ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1); + if (ark->pg == NULL) + return -1; + ark_pktgen_reset(ark->pg); + ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1); + if (ark->pc == NULL) + return -1; + ark_pktchkr_stop(ark->pc); + ark->pd = ark_pktdir_init(ark->pktdir.v); + if (ark->pd == NULL) + return -1; + + /* Verify HW */ + if (ark_udm_verify(ark->udm.v)) + return -1; + if (ark_ddm_verify(ark->ddm.v)) + return -1; + + /* UDM */ + if (ark_udm_reset(ark->udm.v)) { + PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n"); + return -1; + } + /* Keep in reset until the MPU are cleared */ + + /* MPU reset */ + mpu = ark->mpurx.v; + num_q = ark_api_num_queues(mpu); + ark->rx_queues = num_q; + for (i = 0; i < num_q; i++) { + ark_mpu_reset(mpu); + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); + } + + ark_udm_stop(ark->udm.v, 0); + ark_udm_configure(ark->udm.v, + RTE_PKTMBUF_HEADROOM, + RTE_MBUF_DEFAULT_DATAROOM, + ARK_RX_WRITE_TIME_NS); + ark_udm_stats_reset(ark->udm.v); + ark_udm_stop(ark->udm.v, 0); + + /* TX -- DDM */ + if (ark_ddm_stop(ark->ddm.v, 1)) + PMD_DRV_LOG(ERR, "Unable to stop DDM\n"); + + mpu = ark->mputx.v; + num_q = ark_api_num_queues(mpu); + ark->tx_queues = num_q; + for (i = 0; i < num_q; i++) { + ark_mpu_reset(mpu); + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); + } + + ark_ddm_reset(ark->ddm.v); + ark_ddm_stats_reset(ark->ddm.v); + + ark_ddm_stop(ark->ddm.v, 0); + ark_rqp_stats_reset(ark->rqpacing); + + return 0; +} + +static int +eth_ark_dev_uninit(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (ark->user_ext.dev_uninit) + ark->user_ext.dev_uninit(dev, + ark->user_data[dev->data->port_id]); + + ark_pktgen_uninit(ark->pg); + ark_pktchkr_uninit(ark->pc); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + return 0; +} + +static int +eth_ark_dev_configure(struct rte_eth_dev *dev) +{ + PMD_FUNC_LOG(DEBUG, "\n"); + struct ark_adapter *ark = dev->data->dev_private; + + eth_ark_dev_set_link_up(dev); + if (ark->user_ext.dev_configure) + return ark->user_ext.dev_configure(dev, + ark->user_data[dev->data->port_id]); + return 0; +} + +static void * +delay_pg_start(void *arg) +{ + struct ark_adapter *ark = (struct ark_adapter *)arg; + + /* This function is used exclusively for regression testing, We + * perform a blind sleep here to ensure that the external test + * application has time to setup the test before we generate packets + */ + usleep(100000); + ark_pktgen_run(ark->pg); + return NULL; +} + +static int +eth_ark_dev_start(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + int i; + + PMD_FUNC_LOG(DEBUG, "\n"); + + /* RX Side */ + /* start UDM */ + ark_udm_start(ark->udm.v); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + eth_ark_rx_start_queue(dev, i); + + /* TX Side */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + eth_ark_tx_queue_start(dev, i); + + /* start DDM */ + ark_ddm_start(ark->ddm.v); + + ark->started = 1; + /* set xmit and receive function */ + dev->rx_pkt_burst = ð_ark_recv_pkts; + dev->tx_pkt_burst = ð_ark_xmit_pkts; + + if (ark->start_pg) + ark_pktchkr_run(ark->pc); + + if (ark->start_pg && (dev->data->port_id == 0)) { + pthread_t thread; + + /* Delay packet generatpr start allow the hardware to be ready + * This is only used for sanity checking with internal generator + */ + if (pthread_create(&thread, NULL, delay_pg_start, ark)) { + PMD_DRV_LOG(ERR, "Could not create pktgen " + "starter thread\n"); + return -1; + } + } + + if (ark->user_ext.dev_start) + ark->user_ext.dev_start(dev, + ark->user_data[dev->data->port_id]); + + return 0; +} + +static void +eth_ark_dev_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + int status; + struct ark_adapter *ark = dev->data->dev_private; + struct ark_mpu_t *mpu; + + PMD_FUNC_LOG(DEBUG, "\n"); + + if (ark->started == 0) + return; + ark->started = 0; + + /* Stop the extension first */ + if (ark->user_ext.dev_stop) + ark->user_ext.dev_stop(dev, + ark->user_data[dev->data->port_id]); + + /* Stop the packet generator */ + if (ark->start_pg) + ark_pktgen_pause(ark->pg); + + dev->rx_pkt_burst = ð_ark_recv_pkts_noop; + dev->tx_pkt_burst = ð_ark_xmit_pkts_noop; + + /* STOP TX Side */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + status = eth_ark_tx_queue_stop(dev, i); + if (status != 0) { + uint16_t port = dev->data->port_id; + PMD_DRV_LOG(ERR, + "tx_queue stop anomaly" + " port %u, queue %u\n", + port, i); + } + } + + /* Stop DDM */ + /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */ + for (i = 0; i < 10; i++) { + status = ark_ddm_stop(ark->ddm.v, 1); + if (status == 0) + break; + } + if (status || i != 0) { + PMD_DRV_LOG(ERR, "DDM stop anomaly. status:" + " %d iter: %u. (%s)\n", + status, + i, + __func__); + ark_ddm_dump(ark->ddm.v, "Stop anomaly"); + + mpu = ark->mputx.v; + for (i = 0; i < ark->tx_queues; i++) { + ark_mpu_dump(mpu, "DDM failure dump", i); + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); + } + } + + /* STOP RX Side */ + /* Stop UDM multiple tries attempted */ + for (i = 0; i < 10; i++) { + status = ark_udm_stop(ark->udm.v, 1); + if (status == 0) + break; + } + if (status || i != 0) { + PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n", + status, i, __func__); + ark_udm_dump(ark->udm.v, "Stop anomaly"); + + mpu = ark->mpurx.v; + for (i = 0; i < ark->rx_queues; i++) { + ark_mpu_dump(mpu, "UDM Stop anomaly", i); + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET); + } + } + + ark_udm_dump_stats(ark->udm.v, "Post stop"); + ark_udm_dump_perf(ark->udm.v, "Post stop"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + eth_ark_rx_dump_queue(dev, i, __func__); + + /* Stop the packet checker if it is running */ + if (ark->start_pg) { + ark_pktchkr_dump_stats(ark->pc); + ark_pktchkr_stop(ark->pc); + } +} + +static void +eth_ark_dev_close(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + uint16_t i; + + if (ark->user_ext.dev_close) + ark->user_ext.dev_close(dev, + ark->user_data[dev->data->port_id]); + + eth_ark_dev_stop(dev); + eth_ark_udm_force_close(dev); + + /* + * TODO This should only be called once for the device during shutdown + */ + ark_rqp_dump(ark->rqpacing); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_ark_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = 0; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = 0; + } + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = 0; +} + +static int +eth_ark_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ark_adapter *ark = dev->data->dev_private; + struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); + struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); + uint16_t ports = ark->num_ports; + + dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN; + dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE; + + dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports); + dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports); + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = ARK_RX_MAX_QUEUE, + .nb_min = ARK_RX_MIN_QUEUE, + .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */ + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = ARK_TX_MAX_QUEUE, + .nb_min = ARK_TX_MIN_QUEUE, + .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */ + + /* ARK PMD supports all line rates, how do we indicate that here ?? */ + dev_info->speed_capa = (ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G); + + return 0; +} + +static int +eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + PMD_DEBUG_LOG(DEBUG, "link status = %d\n", + dev->data->dev_link.link_status); + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.link_update) { + return ark->user_ext.link_update + (dev, wait_to_complete, + ark->user_data[dev->data->port_id]); + } + return 0; +} + +static int +eth_ark_dev_set_link_up(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = 1; + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.dev_set_link_up) + return ark->user_ext.dev_set_link_up(dev, + ark->user_data[dev->data->port_id]); + return 0; +} + +static int +eth_ark_dev_set_link_down(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = 0; + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.dev_set_link_down) + return ark->user_ext.dev_set_link_down(dev, + ark->user_data[dev->data->port_id]); + return 0; +} + +static int +eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + uint16_t i; + struct ark_adapter *ark = dev->data->dev_private; + + stats->ipackets = 0; + stats->ibytes = 0; + stats->opackets = 0; + stats->obytes = 0; + stats->imissed = 0; + stats->oerrors = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + eth_tx_queue_stats_get(dev->data->tx_queues[i], stats); + for (i = 0; i < dev->data->nb_rx_queues; i++) + eth_rx_queue_stats_get(dev->data->rx_queues[i], stats); + if (ark->user_ext.stats_get) + return ark->user_ext.stats_get(dev, stats, + ark->user_data[dev->data->port_id]); + return 0; +} + +static int +eth_ark_dev_stats_reset(struct rte_eth_dev *dev) +{ + uint16_t i; + struct ark_adapter *ark = dev->data->dev_private; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + eth_tx_queue_stats_reset(dev->data->tx_queues[i]); + for (i = 0; i < dev->data->nb_rx_queues; i++) + eth_rx_queue_stats_reset(dev->data->rx_queues[i]); + if (ark->user_ext.stats_reset) + ark->user_ext.stats_reset(dev, + ark->user_data[dev->data->port_id]); + + return 0; +} + +static int +eth_ark_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t pool) +{ + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.mac_addr_add) { + ark->user_ext.mac_addr_add(dev, + mac_addr, + index, + pool, + ark->user_data[dev->data->port_id]); + return 0; + } + return -ENOTSUP; +} + +static void +eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.mac_addr_remove) + ark->user_ext.mac_addr_remove(dev, index, + ark->user_data[dev->data->port_id]); +} + +static int +eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.mac_addr_set) { + ark->user_ext.mac_addr_set(dev, mac_addr, + ark->user_data[dev->data->port_id]); + return 0; + } + return -ENOTSUP; +} + +static int +eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) +{ + struct ark_adapter *ark = dev->data->dev_private; + + if (ark->user_ext.set_mtu) + return ark->user_ext.set_mtu(dev, size, + ark->user_data[dev->data->port_id]); + + return -ENOTSUP; +} + +static inline int +process_pktdir_arg(const char *key, const char *value, + void *extra_args) +{ + PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", + key, value); + struct ark_adapter *ark = + (struct ark_adapter *)extra_args; + + ark->pkt_dir_v = strtol(value, NULL, 16); + PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v); + return 0; +} + +static inline int +process_file_args(const char *key, const char *value, void *extra_args) +{ + PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n", + key, value); + char *args = (char *)extra_args; + + /* Open the configuration file */ + FILE *file = fopen(value, "r"); + char line[ARK_MAX_ARG_LEN]; + int size = 0; + int first = 1; + + if (file == NULL) { + PMD_DRV_LOG(ERR, "Unable to open " + "config file %s\n", value); + return -1; + } + + while (fgets(line, sizeof(line), file)) { + size += strlen(line); + if (size >= ARK_MAX_ARG_LEN) { + PMD_DRV_LOG(ERR, "Unable to parse file %s args, " + "parameter list is too long\n", value); + fclose(file); + return -1; + } + if (first) { + strncpy(args, line, ARK_MAX_ARG_LEN); + first = 0; + } else { + strncat(args, line, ARK_MAX_ARG_LEN); + } + } + PMD_FUNC_LOG(DEBUG, "file = %s\n", args); + fclose(file); + return 0; +} + +static int +eth_ark_check_args(struct ark_adapter *ark, const char *params) +{ + struct rte_kvargs *kvlist; + unsigned int k_idx; + struct rte_kvargs_pair *pair = NULL; + int ret = -1; + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return 0; + + ark->pkt_gen_args[0] = 0; + ark->pkt_chkr_args[0] = 0; + + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n", + pair->key, + pair->value); + } + + if (rte_kvargs_process(kvlist, + ARK_PKTDIR_ARG, + &process_pktdir_arg, + ark) != 0) { + PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG); + goto free_kvlist; + } + + if (rte_kvargs_process(kvlist, + ARK_PKTGEN_ARG, + &process_file_args, + ark->pkt_gen_args) != 0) { + PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG); + goto free_kvlist; + } + + if (rte_kvargs_process(kvlist, + ARK_PKTCHKR_ARG, + &process_file_args, + ark->pkt_chkr_args) != 0) { + PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG); + goto free_kvlist; + } + + PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v); + /* Setup the packet director */ + ark_pktdir_setup(ark->pd, ark->pkt_dir_v); + + /* Setup the packet generator */ + if (ark->pkt_gen_args[0]) { + PMD_DRV_LOG(INFO, "Setting up the packet generator\n"); + ark_pktgen_parse(ark->pkt_gen_args); + ark_pktgen_reset(ark->pg); + ark_pktgen_setup(ark->pg); + ark->start_pg = 1; + } + + /* Setup the packet checker */ + if (ark->pkt_chkr_args[0]) { + ark_pktchkr_parse(ark->pkt_chkr_args); + ark_pktchkr_setup(ark->pc); + } + + ret = 0; + +free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd); +RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic "); +RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map); +RTE_PMD_REGISTER_PARAM_STRING(net_ark, + ARK_PKTGEN_ARG "= " + ARK_PKTCHKR_ARG "= " + ARK_PKTDIR_ARG "="); + +RTE_INIT(ark_init_log) +{ + ark_logtype = rte_log_register("pmd.net.ark"); + if (ark_logtype >= 0) + rte_log_set_level(ark_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c new file mode 100644 index 000000000..4d518d558 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.c @@ -0,0 +1,680 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_ethdev_rx.h" +#include "ark_global.h" +#include "ark_logs.h" +#include "ark_mpu.h" +#include "ark_udm.h" + +#define ARK_RX_META_SIZE 32 +#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE) +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM) + +/* Forward declarations */ +struct ark_rx_queue; +struct ark_rx_meta; + +static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi); +static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue); +static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue, + struct ark_rx_meta *meta, + struct rte_mbuf *mbuf0, + uint32_t cons_index); +static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue); +static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue, + uint32_t *pnb, + struct rte_mbuf **mbufs); + +/* ************************************************************************* */ +struct ark_rx_queue { + /* array of mbufs to populate */ + struct rte_mbuf **reserve_q; + /* array of physical addresses of the mbuf data pointer */ + /* This point is a virtual address */ + rte_iova_t *paddress_q; + struct rte_mempool *mb_pool; + + struct ark_udm_t *udm; + struct ark_mpu_t *mpu; + + uint32_t queue_size; + uint32_t queue_mask; + + uint32_t seed_index; /* step 1 set with empty mbuf */ + uint32_t cons_index; /* step 3 consumed by driver */ + + /* The queue Id is used to identify the HW Q */ + uint16_t phys_qid; + + /* The queue Index is used within the dpdk device structures */ + uint16_t queue_index; + + uint32_t last_cons; + + /* separate cache line */ + /* second cache line - fields only used in slow path */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; + + volatile uint32_t prod_index; /* step 2 filled by FPGA */ +} __rte_cache_aligned; + + +/* ************************************************************************* */ +static int +eth_ark_rx_hw_setup(struct rte_eth_dev *dev, + struct ark_rx_queue *queue, + uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx) +{ + rte_iova_t queue_base; + rte_iova_t phys_addr_q_base; + rte_iova_t phys_addr_prod_index; + + queue_base = rte_malloc_virt2iova(queue); + phys_addr_prod_index = queue_base + + offsetof(struct ark_rx_queue, prod_index); + + phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q); + + /* Verify HW */ + if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) { + PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n"); + return -1; + } + + /* Stop and Reset and configure MPU */ + ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0); + + ark_udm_write_addr(queue->udm, phys_addr_prod_index); + + /* advance the valid pointer, but don't start until the queue starts */ + ark_mpu_reset_stats(queue->mpu); + + /* The seed is the producer index for the HW */ + ark_mpu_set_producer(queue->mpu, queue->seed_index); + dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static inline void +eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index) +{ + queue->cons_index = cons_index; + eth_ark_rx_seed_mbufs(queue); + if (((cons_index - queue->last_cons) >= 64U)) { + queue->last_cons = cons_index; + ark_mpu_set_producer(queue->mpu, queue->seed_index); + } +} + +/* ************************************************************************* */ +int +eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + static int warning1; /* = 0 */ + struct ark_adapter *ark = dev->data->dev_private; + + struct ark_rx_queue *queue; + uint32_t i; + int status; + + int qidx = queue_idx; + + /* We may already be setup, free memory prior to re-allocation */ + if (dev->data->rx_queues[queue_idx] != NULL) { + eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + if (rx_conf != NULL && warning1 == 0) { + warning1 = 1; + PMD_DRV_LOG(INFO, + "Arkville ignores rte_eth_rxconf argument.\n"); + } + + if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) { + PMD_DRV_LOG(ERR, + "Error: DPDK Arkville requires head room > %d bytes (%s)\n", + ARK_RX_META_SIZE, __func__); + return -1; /* ERROR CODE */ + } + + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, + "DPDK Arkville configuration queue size must be power of two %u (%s)\n", + nb_desc, __func__); + return -1; /* ERROR CODE */ + } + + /* Allocate queue struct */ + queue = rte_zmalloc_socket("Ark_rxqueue", + sizeof(struct ark_rx_queue), + 64, + socket_id); + if (queue == 0) { + PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__); + return -ENOMEM; + } + + /* NOTE zmalloc is used, no need to 0 indexes, etc. */ + queue->mb_pool = mb_pool; + queue->phys_qid = qidx; + queue->queue_index = queue_idx; + queue->queue_size = nb_desc; + queue->queue_mask = nb_desc - 1; + + queue->reserve_q = + rte_zmalloc_socket("Ark_rx_queue mbuf", + nb_desc * sizeof(struct rte_mbuf *), + 64, + socket_id); + queue->paddress_q = + rte_zmalloc_socket("Ark_rx_queue paddr", + nb_desc * sizeof(rte_iova_t), + 64, + socket_id); + + if (queue->reserve_q == 0 || queue->paddress_q == 0) { + PMD_DRV_LOG(ERR, + "Failed to allocate queue memory in %s\n", + __func__); + rte_free(queue->reserve_q); + rte_free(queue->paddress_q); + rte_free(queue); + return -ENOMEM; + } + + dev->data->rx_queues[queue_idx] = queue; + queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET); + queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET); + + /* populate mbuf reserve */ + status = eth_ark_rx_seed_mbufs(queue); + + if (queue->seed_index != nb_desc) { + PMD_DRV_LOG(ERR, "ARK: Failed to allocate %u mbufs for RX queue %d\n", + nb_desc, qidx); + status = -1; + } + /* MPU Setup */ + if (status == 0) + status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); + + if (unlikely(status != 0)) { + struct rte_mbuf **mbuf; + + PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n", + qidx, + __func__); + /* Free the mbufs allocated */ + for (i = 0, mbuf = queue->reserve_q; + i < queue->seed_index; ++i, mbuf++) { + rte_pktmbuf_free(*mbuf); + } + rte_free(queue->reserve_q); + rte_free(queue->paddress_q); + rte_free(queue); + return -1; /* ERROR CODE */ + } + + return 0; +} + +/* ************************************************************************* */ +uint16_t +eth_ark_recv_pkts_noop(void *rx_queue __rte_unused, + struct rte_mbuf **rx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +/* ************************************************************************* */ +uint16_t +eth_ark_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ark_rx_queue *queue; + register uint32_t cons_index, prod_index; + uint16_t nb; + struct rte_mbuf *mbuf; + struct ark_rx_meta *meta; + + queue = (struct ark_rx_queue *)rx_queue; + if (unlikely(queue == 0)) + return 0; + if (unlikely(nb_pkts == 0)) + return 0; + prod_index = queue->prod_index; + cons_index = queue->cons_index; + nb = 0; + + while (prod_index != cons_index) { + mbuf = queue->reserve_q[cons_index & queue->queue_mask]; + /* prefetch mbuf */ + rte_mbuf_prefetch_part1(mbuf); + rte_mbuf_prefetch_part2(mbuf); + + /* META DATA embedded in headroom */ + meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET); + + mbuf->port = meta->port; + mbuf->pkt_len = meta->pkt_len; + mbuf->data_len = meta->pkt_len; + mbuf->timestamp = meta->timestamp; + mbuf->udata64 = meta->user_data; + + if (ARK_RX_DEBUG) { /* debug sanity checks */ + if ((meta->pkt_len > (1024 * 16)) || + (meta->pkt_len == 0)) { + PMD_RX_LOG(DEBUG, "RX: Bad Meta Q: %u" + " cons: %" PRIU32 + " prod: %" PRIU32 + " seed_index %" PRIU32 + "\n", + queue->phys_qid, + cons_index, + queue->prod_index, + queue->seed_index); + + + PMD_RX_LOG(DEBUG, " : UDM" + " prod: %" PRIU32 + " len: %u\n", + queue->udm->rt_cfg.prod_idx, + meta->pkt_len); + ark_mpu_dump(queue->mpu, + " ", + queue->phys_qid); + dump_mbuf_data(mbuf, 0, 256); + /* its FUBAR so fix it */ + mbuf->pkt_len = 63; + meta->pkt_len = 63; + } + /* seqn is only set under debug */ + mbuf->seqn = cons_index; + } + + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) + cons_index = eth_ark_rx_jumbo + (queue, meta, mbuf, cons_index + 1); + else + cons_index += 1; + + rx_pkts[nb] = mbuf; + nb++; + if (nb >= nb_pkts) + break; + } + + if (unlikely(nb != 0)) + /* report next free to FPGA */ + eth_ark_rx_update_cons_index(queue, cons_index); + + return nb; +} + +/* ************************************************************************* */ +static uint32_t +eth_ark_rx_jumbo(struct ark_rx_queue *queue, + struct ark_rx_meta *meta, + struct rte_mbuf *mbuf0, + uint32_t cons_index) +{ + struct rte_mbuf *mbuf_prev; + struct rte_mbuf *mbuf; + + uint16_t remaining; + uint16_t data_len; + uint16_t segments; + + /* first buf populated by called */ + mbuf_prev = mbuf0; + segments = 1; + data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM); + remaining = meta->pkt_len - data_len; + mbuf0->data_len = data_len; + + /* HW guarantees that the data does not exceed prod_index! */ + while (remaining != 0) { + data_len = RTE_MIN(remaining, + RTE_MBUF_DEFAULT_DATAROOM + + RTE_PKTMBUF_HEADROOM); + + remaining -= data_len; + segments += 1; + + mbuf = queue->reserve_q[cons_index & queue->queue_mask]; + mbuf_prev->next = mbuf; + mbuf_prev = mbuf; + mbuf->data_len = data_len; + mbuf->data_off = 0; + if (ARK_RX_DEBUG) + mbuf->seqn = cons_index; /* for debug only */ + + cons_index += 1; + } + + mbuf0->nb_segs = segments; + return cons_index; +} + +/* Drain the internal queue allowing hw to clear out. */ +static void +eth_ark_rx_queue_drain(struct ark_rx_queue *queue) +{ + register uint32_t cons_index; + struct rte_mbuf *mbuf; + + cons_index = queue->cons_index; + + /* NOT performance optimized, since this is a one-shot call */ + while ((cons_index ^ queue->prod_index) & queue->queue_mask) { + mbuf = queue->reserve_q[cons_index & queue->queue_mask]; + rte_pktmbuf_free(mbuf); + cons_index++; + eth_ark_rx_update_cons_index(queue, cons_index); + } +} + +uint32_t +eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ark_rx_queue *queue; + + queue = dev->data->rx_queues[queue_id]; + return (queue->prod_index - queue->cons_index); /* mod arith */ +} + +/* ************************************************************************* */ +int +eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ark_rx_queue *queue; + + queue = dev->data->rx_queues[queue_id]; + if (queue == 0) + return -1; + + dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + ark_mpu_set_producer(queue->mpu, queue->seed_index); + ark_mpu_start(queue->mpu); + + ark_udm_queue_enable(queue->udm, 1); + + return 0; +} + +/* ************************************************************************* */ + +/* Queue can be restarted. data remains + */ +int +eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ark_rx_queue *queue; + + queue = dev->data->rx_queues[queue_id]; + if (queue == 0) + return -1; + + ark_udm_queue_enable(queue->udm, 0); + + dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/* ************************************************************************* */ +static inline int +eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue) +{ + uint32_t limit = queue->cons_index + queue->queue_size; + uint32_t seed_index = queue->seed_index; + + uint32_t count = 0; + uint32_t seed_m = queue->seed_index & queue->queue_mask; + + uint32_t nb = limit - seed_index; + + /* Handle wrap around -- remainder is filled on the next call */ + if (unlikely(seed_m + nb > queue->queue_size)) + nb = queue->queue_size - seed_m; + + struct rte_mbuf **mbufs = &queue->reserve_q[seed_m]; + int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb); + + if (unlikely(status != 0)) { + /* Try to recover from lack of mbufs in pool */ + status = eth_ark_rx_seed_recovery(queue, &nb, mbufs); + if (unlikely(status != 0)) { + return -1; + } + } + + if (ARK_RX_DEBUG) { /* DEBUG */ + while (count != nb) { + struct rte_mbuf *mbuf_init = + queue->reserve_q[seed_m + count]; + + memset(mbuf_init->buf_addr, -1, 512); + *((uint32_t *)mbuf_init->buf_addr) = + seed_index + count; + *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) = + queue->phys_qid; + count++; + } + count = 0; + } /* DEBUG */ + queue->seed_index += nb; + + /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */ + switch (nb % 4) { + case 0: + while (count != nb) { + queue->paddress_q[seed_m++] = + (*mbufs++)->buf_iova; + count++; + /* FALLTHROUGH */ + case 3: + queue->paddress_q[seed_m++] = + (*mbufs++)->buf_iova; + count++; + /* FALLTHROUGH */ + case 2: + queue->paddress_q[seed_m++] = + (*mbufs++)->buf_iova; + count++; + /* FALLTHROUGH */ + case 1: + queue->paddress_q[seed_m++] = + (*mbufs++)->buf_iova; + count++; + /* FALLTHROUGH */ + + } /* while (count != nb) */ + } /* switch */ + + return 0; +} + +int +eth_ark_rx_seed_recovery(struct ark_rx_queue *queue, + uint32_t *pnb, + struct rte_mbuf **mbufs) +{ + int status = -1; + + /* Ignore small allocation failures */ + if (*pnb <= 64) + return -1; + + *pnb = 64U; + status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb); + if (status != 0) { + PMD_DRV_LOG(ERR, + "ARK: Could not allocate %u mbufs from pool for RX queue %u;" + " %u free buffers remaining in queue\n", + *pnb, queue->queue_index, + queue->seed_index - queue->cons_index); + } + return status; +} + +void +eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id, + const char *msg) +{ + struct ark_rx_queue *queue; + + queue = dev->data->rx_queues[queue_id]; + + ark_ethdev_rx_dump(msg, queue); +} + +/* ************************************************************************* */ +/* Call on device closed no user API, queue is stopped */ +void +eth_ark_dev_rx_queue_release(void *vqueue) +{ + struct ark_rx_queue *queue; + uint32_t i; + + queue = (struct ark_rx_queue *)vqueue; + if (queue == 0) + return; + + ark_udm_queue_enable(queue->udm, 0); + /* Stop the MPU since pointer are going away */ + ark_mpu_stop(queue->mpu); + + /* Need to clear out mbufs here, dropping packets along the way */ + eth_ark_rx_queue_drain(queue); + + for (i = 0; i < queue->queue_size; ++i) + rte_pktmbuf_free(queue->reserve_q[i]); + + rte_free(queue->reserve_q); + rte_free(queue->paddress_q); + rte_free(queue); +} + +void +eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats) +{ + struct ark_rx_queue *queue; + struct ark_udm_t *udm; + + queue = vqueue; + if (queue == 0) + return; + udm = queue->udm; + + uint64_t ibytes = ark_udm_bytes(udm); + uint64_t ipackets = ark_udm_packets(udm); + uint64_t idropped = ark_udm_dropped(queue->udm); + + stats->q_ipackets[queue->queue_index] = ipackets; + stats->q_ibytes[queue->queue_index] = ibytes; + stats->q_errors[queue->queue_index] = idropped; + stats->ipackets += ipackets; + stats->ibytes += ibytes; + stats->imissed += idropped; +} + +void +eth_rx_queue_stats_reset(void *vqueue) +{ + struct ark_rx_queue *queue; + + queue = vqueue; + if (queue == 0) + return; + + ark_mpu_reset_stats(queue->mpu); + ark_udm_queue_stats_reset(queue->udm); +} + +void +eth_ark_udm_force_close(struct rte_eth_dev *dev) +{ + struct ark_adapter *ark = dev->data->dev_private; + struct ark_rx_queue *queue; + uint32_t index; + uint16_t i; + + if (!ark_udm_is_flushed(ark->udm.v)) { + /* restart the MPUs */ + PMD_DRV_LOG(ERR, "ARK: %s UDM not flushed\n", __func__); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + queue = (struct ark_rx_queue *)dev->data->rx_queues[i]; + if (queue == 0) + continue; + + ark_mpu_start(queue->mpu); + /* Add some buffers */ + index = 100000 + queue->seed_index; + ark_mpu_set_producer(queue->mpu, index); + } + /* Wait to allow data to pass */ + usleep(100); + + PMD_DEBUG_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n", + ark_udm_is_flushed(ark->udm.v)); + } + ark_udm_reset(ark->udm.v); +} + +static void +ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue) +{ + if (queue == NULL) + return; + PMD_DEBUG_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name); + PMD_DEBUG_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n", + "queue_size", queue->queue_size, + "seed_index", queue->seed_index, + "prod_index", queue->prod_index, + "cons_index", queue->cons_index); + + ark_mpu_dump(queue->mpu, name, queue->phys_qid); + ark_mpu_dump_setup(queue->mpu, queue->phys_qid); + ark_udm_dump(queue->udm, name); + ark_udm_dump_setup(queue->udm, queue->phys_qid); +} + +/* Only used in debug. + * This function is a raw memory dump of a portion of an mbuf's memory + * region. The usual function, rte_pktmbuf_dump() only shows data + * with respect to the data_off field. This function show data + * anywhere in the mbuf's buffer. This is useful for examining + * data in the headroom or tailroom portion of an mbuf. + */ +static void +dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi) +{ + uint16_t i, j; + + PMD_DRV_LOG(INFO, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n", mbuf, + mbuf->pkt_len, mbuf->data_off, mbuf->seqn); + for (i = lo; i < hi; i += 16) { + uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i); + + PMD_DRV_LOG(INFO, " %6d: ", i); + for (j = 0; j < 16; j++) + PMD_DRV_LOG(INFO, " %02x", dp[j]); + + PMD_DRV_LOG(INFO, "\n"); + } +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h new file mode 100644 index 000000000..0fdd29b1a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_rx.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_ETHDEV_RX_H_ +#define _ARK_ETHDEV_RX_H_ + +#include + +#include +#include +#include + + +int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id); +int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id); +uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void eth_ark_dev_rx_queue_release(void *rx_queue); +void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats); +void eth_rx_queue_stats_reset(void *vqueue); +void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id, + const char *msg); +void eth_ark_udm_force_close(struct rte_eth_dev *dev); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c new file mode 100644 index 000000000..289668774 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.c @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_ethdev_tx.h" +#include "ark_global.h" +#include "ark_mpu.h" +#include "ark_ddm.h" +#include "ark_logs.h" + +#define ARK_TX_META_SIZE 32 +#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE) +#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM) + + +/* ************************************************************************* */ +struct ark_tx_queue { + struct ark_tx_meta *meta_q; + struct rte_mbuf **bufs; + + /* handles for hw objects */ + struct ark_mpu_t *mpu; + struct ark_ddm_t *ddm; + + /* Stats HW tracks bytes and packets, need to count send errors */ + uint64_t tx_errors; + + uint32_t queue_size; + uint32_t queue_mask; + + /* 3 indexes to the paired data rings. */ + uint32_t prod_index; /* where to put the next one */ + uint32_t free_index; /* mbuf has been freed */ + + /* The queue Id is used to identify the HW Q */ + uint16_t phys_qid; + /* The queue Index within the dpdk device structures */ + uint16_t queue_index; + + uint32_t pad[1]; + + /* second cache line - fields only used in slow path */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; + uint32_t cons_index; /* hw is done, can be freed */ +} __rte_cache_aligned; + +/* Forward declarations */ +static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue, + struct rte_mbuf *mbuf); +static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue); +static void free_completed_tx(struct ark_tx_queue *queue); + +static inline void +ark_tx_hw_queue_stop(struct ark_tx_queue *queue) +{ + ark_mpu_stop(queue->mpu); +} + +/* ************************************************************************* */ +static inline void +eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta, + const struct rte_mbuf *mbuf, + uint8_t flags) +{ + meta->physaddr = rte_mbuf_data_iova(mbuf); + meta->user1 = (uint32_t)mbuf->udata64; + meta->data_len = rte_pktmbuf_data_len(mbuf); + meta->flags = flags; +} + +/* ************************************************************************* */ +uint16_t +eth_ark_xmit_pkts_noop(void *vtxq __rte_unused, + struct rte_mbuf **tx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +/* ************************************************************************* */ +uint16_t +eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct ark_tx_queue *queue; + struct rte_mbuf *mbuf; + struct ark_tx_meta *meta; + + uint32_t idx; + uint32_t prod_index_limit; + int stat; + uint16_t nb; + + queue = (struct ark_tx_queue *)vtxq; + + /* free any packets after the HW is done with them */ + free_completed_tx(queue); + + prod_index_limit = queue->queue_size + queue->free_index; + + for (nb = 0; + (nb < nb_pkts) && (queue->prod_index != prod_index_limit); + ++nb) { + mbuf = tx_pkts[nb]; + + if (ARK_TX_PAD_TO_60) { + if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) { + /* this packet even if it is small can be split, + * be sure to add to the end mbuf + */ + uint16_t to_add = + 60 - rte_pktmbuf_pkt_len(mbuf); + char *appended = + rte_pktmbuf_append(mbuf, to_add); + + if (appended == 0) { + /* This packet is in error, + * we cannot send it so just + * count it and delete it. + */ + queue->tx_errors += 1; + rte_pktmbuf_free(mbuf); + continue; + } + memset(appended, 0, to_add); + } + } + + if (unlikely(mbuf->nb_segs != 1)) { + stat = eth_ark_tx_jumbo(queue, mbuf); + if (unlikely(stat != 0)) + break; /* Queue is full */ + } else { + idx = queue->prod_index & queue->queue_mask; + queue->bufs[idx] = mbuf; + meta = &queue->meta_q[idx]; + eth_ark_tx_meta_from_mbuf(meta, + mbuf, + ARK_DDM_SOP | + ARK_DDM_EOP); + queue->prod_index++; + } + } + + if (ARK_TX_DEBUG && (nb != nb_pkts)) { + PMD_TX_LOG(DEBUG, "TX: Failure to send:" + " req: %" PRIU32 + " sent: %" PRIU32 + " prod: %" PRIU32 + " cons: %" PRIU32 + " free: %" PRIU32 "\n", + nb_pkts, nb, + queue->prod_index, + queue->cons_index, + queue->free_index); + ark_mpu_dump(queue->mpu, + "TX Failure MPU: ", + queue->phys_qid); + } + + /* let FPGA know producer index. */ + if (likely(nb != 0)) + ark_mpu_set_producer(queue->mpu, queue->prod_index); + + return nb; +} + +/* ************************************************************************* */ +static uint32_t +eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf) +{ + struct rte_mbuf *next; + struct ark_tx_meta *meta; + uint32_t free_queue_space; + uint32_t idx; + uint8_t flags = ARK_DDM_SOP; + + free_queue_space = queue->queue_mask - + (queue->prod_index - queue->free_index); + if (unlikely(free_queue_space < mbuf->nb_segs)) + return -1; + + while (mbuf != NULL) { + next = mbuf->next; + + idx = queue->prod_index & queue->queue_mask; + queue->bufs[idx] = mbuf; + meta = &queue->meta_q[idx]; + + flags |= (next == NULL) ? ARK_DDM_EOP : 0; + eth_ark_tx_meta_from_mbuf(meta, mbuf, flags); + queue->prod_index++; + + flags &= ~ARK_DDM_SOP; /* drop SOP flags */ + mbuf = next; + } + + return 0; +} + +/* ************************************************************************* */ +int +eth_ark_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct ark_adapter *ark = dev->data->dev_private; + struct ark_tx_queue *queue; + int status; + + int qidx = queue_idx; + + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, + "DPDK Arkville configuration queue size" + " must be power of two %u (%s)\n", + nb_desc, __func__); + return -1; + } + + /* Allocate queue struct */ + queue = rte_zmalloc_socket("Ark_txqueue", + sizeof(struct ark_tx_queue), + 64, + socket_id); + if (queue == 0) { + PMD_DRV_LOG(ERR, "Failed to allocate tx " + "queue memory in %s\n", + __func__); + return -ENOMEM; + } + + /* we use zmalloc no need to initialize fields */ + queue->queue_size = nb_desc; + queue->queue_mask = nb_desc - 1; + queue->phys_qid = qidx; + queue->queue_index = queue_idx; + dev->data->tx_queues[queue_idx] = queue; + + queue->meta_q = + rte_zmalloc_socket("Ark_txqueue meta", + nb_desc * sizeof(struct ark_tx_meta), + 64, + socket_id); + queue->bufs = + rte_zmalloc_socket("Ark_txqueue bufs", + nb_desc * sizeof(struct rte_mbuf *), + 64, + socket_id); + + if (queue->meta_q == 0 || queue->bufs == 0) { + PMD_DRV_LOG(ERR, "Failed to allocate " + "queue memory in %s\n", __func__); + rte_free(queue->meta_q); + rte_free(queue->bufs); + rte_free(queue); + return -ENOMEM; + } + + queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET); + queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET); + + status = eth_ark_tx_hw_queue_config(queue); + + if (unlikely(status != 0)) { + rte_free(queue->meta_q); + rte_free(queue->bufs); + rte_free(queue); + return -1; /* ERROR CODE */ + } + + return 0; +} + +/* ************************************************************************* */ +static int +eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue) +{ + rte_iova_t queue_base, ring_base, cons_index_addr; + uint32_t write_interval_ns; + + /* Verify HW -- MPU */ + if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta))) + return -1; + + queue_base = rte_malloc_virt2iova(queue); + ring_base = rte_malloc_virt2iova(queue->meta_q); + cons_index_addr = + queue_base + offsetof(struct ark_tx_queue, cons_index); + + ark_mpu_stop(queue->mpu); + ark_mpu_reset(queue->mpu); + + /* Stop and Reset and configure MPU */ + ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1); + + /* + * Adjust the write interval based on queue size -- + * increase pcie traffic when low mbuf count + * Queue sizes less than 128 are not allowed + */ + switch (queue->queue_size) { + case 128: + write_interval_ns = 500; + break; + case 256: + write_interval_ns = 500; + break; + case 512: + write_interval_ns = 1000; + break; + default: + write_interval_ns = 2000; + break; + } + + /* Completion address in UDM */ + ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns); + + return 0; +} + +/* ************************************************************************* */ +void +eth_ark_tx_queue_release(void *vtx_queue) +{ + struct ark_tx_queue *queue; + + queue = (struct ark_tx_queue *)vtx_queue; + + ark_tx_hw_queue_stop(queue); + + queue->cons_index = queue->prod_index; + free_completed_tx(queue); + + rte_free(queue->meta_q); + rte_free(queue->bufs); + rte_free(queue); +} + +/* ************************************************************************* */ +int +eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ark_tx_queue *queue; + int cnt = 0; + + queue = dev->data->tx_queues[queue_id]; + + /* Wait for DDM to send out all packets. */ + while (queue->cons_index != queue->prod_index) { + usleep(100); + if (cnt++ > 10000) + return -1; + } + + ark_mpu_stop(queue->mpu); + free_completed_tx(queue); + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ark_tx_queue *queue; + + queue = dev->data->tx_queues[queue_id]; + if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + ark_mpu_start(queue->mpu); + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* ************************************************************************* */ +static void +free_completed_tx(struct ark_tx_queue *queue) +{ + struct rte_mbuf *mbuf; + struct ark_tx_meta *meta; + uint32_t top_index; + + top_index = queue->cons_index; /* read once */ + while (queue->free_index != top_index) { + meta = &queue->meta_q[queue->free_index & queue->queue_mask]; + mbuf = queue->bufs[queue->free_index & queue->queue_mask]; + + if (likely((meta->flags & ARK_DDM_SOP) != 0)) { + /* ref count of the mbuf is checked in this call. */ + rte_pktmbuf_free(mbuf); + } + queue->free_index++; + } +} + +/* ************************************************************************* */ +void +eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats) +{ + struct ark_tx_queue *queue; + struct ark_ddm_t *ddm; + uint64_t bytes, pkts; + + queue = vqueue; + ddm = queue->ddm; + + bytes = ark_ddm_queue_byte_count(ddm); + pkts = ark_ddm_queue_pkt_count(ddm); + + stats->q_opackets[queue->queue_index] = pkts; + stats->q_obytes[queue->queue_index] = bytes; + stats->opackets += pkts; + stats->obytes += bytes; + stats->oerrors += queue->tx_errors; +} + +void +eth_tx_queue_stats_reset(void *vqueue) +{ + struct ark_tx_queue *queue; + struct ark_ddm_t *ddm; + + queue = vqueue; + ddm = queue->ddm; + + ark_ddm_queue_reset_stats(ddm); + queue->tx_errors = 0; +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h new file mode 100644 index 000000000..e448ce222 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ethdev_tx.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_ETHDEV_TX_H_ +#define _ARK_ETHDEV_TX_H_ + +#include + +#include + + +uint16_t eth_ark_xmit_pkts_noop(void *vtxq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t eth_ark_xmit_pkts(void *vtxq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int eth_ark_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void eth_ark_tx_queue_release(void *vtx_queue); +int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id); +int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id); +void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats); +void eth_tx_queue_stats_reset(void *vqueue); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_ext.h b/src/spdk/dpdk/drivers/net/ark/ark_ext.h new file mode 100644 index 000000000..5a987e4d6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_ext.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_EXT_H_ +#define _ARK_EXT_H_ + +#include + +/* + * This is the template file for users who which to define a dynamic + * extension to the Arkville PMD. User's who create an extension + * should include this file and define the necessary and desired + * functions. + * Only 1 function is required for an extension, dev_init(); all other + * functions prototyped in this file are optional. + */ + +/* + * Called post PMD init. + * The implementation returns its private data that gets passed into + * all other functions as user_data + * The ARK extension implementation MUST implement this function + */ +void *dev_init(struct rte_eth_dev *dev, void *a_bar, int port_id); + +/* Called during device shutdown */ +void dev_uninit(struct rte_eth_dev *dev, void *user_data); + +/* This call is optional and allows the + * extension to specify the number of supported ports. + */ +uint8_t dev_get_port_count(struct rte_eth_dev *dev, + void *user_data); + +/* + * The following functions are optional and are directly mapped + * from the DPDK PMD ops structure. + * Each function if implemented is called after the ARK PMD + * implementation executes. + */ + +int dev_configure(struct rte_eth_dev *dev, + void *user_data); + +int dev_start(struct rte_eth_dev *dev, + void *user_data); + +void dev_stop(struct rte_eth_dev *dev, + void *user_data); + +void dev_close(struct rte_eth_dev *dev, + void *user_data); + +int link_update(struct rte_eth_dev *dev, + int wait_to_complete, + void *user_data); + +int dev_set_link_up(struct rte_eth_dev *dev, + void *user_data); + +int dev_set_link_down(struct rte_eth_dev *dev, + void *user_data); + +int stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats, + void *user_data); + +void stats_reset(struct rte_eth_dev *dev, + void *user_data); + +void mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *macadr, + uint32_t index, + uint32_t pool, + void *user_data); + +void mac_addr_remove(struct rte_eth_dev *dev, + uint32_t index, + void *user_data); + +void mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + void *user_data); + +int set_mtu(struct rte_eth_dev *dev, + uint16_t size, + void *user_data); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_global.h b/src/spdk/dpdk/drivers/net/ark/ark_global.h new file mode 100644 index 000000000..403df5900 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_global.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_GLOBAL_H_ +#define _ARK_GLOBAL_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ark_pktdir.h" +#include "ark_pktgen.h" +#include "ark_pktchkr.h" + +#define ETH_ARK_ARG_MAXLEN 64 +#define ARK_SYSCTRL_BASE 0x0 +#define ARK_PKTGEN_BASE 0x10000 +#define ARK_MPU_RX_BASE 0x20000 +#define ARK_UDM_BASE 0x30000 +#define ARK_MPU_TX_BASE 0x40000 +#define ARK_DDM_BASE 0x60000 +#define ARK_CMAC_BASE 0x80000 +#define ARK_PKTDIR_BASE 0xa0000 +#define ARK_PKTCHKR_BASE 0x90000 +#define ARK_RCPACING_BASE 0xb0000 +#define ARK_EXTERNAL_BASE 0x100000 +#define ARK_MPU_QOFFSET 0x00100 +#define ARK_MAX_PORTS RTE_MAX_ETHPORTS + +#define offset8(n) n +#define offset16(n) ((n) / 2) +#define offset32(n) ((n) / 4) +#define offset64(n) ((n) / 8) + +/* Maximum length of arg list in bytes */ +#define ARK_MAX_ARG_LEN 256 + +/* + * Structure to store private data for each PF/VF instance. + */ +#define def_ptr(type, name) \ + union type { \ + uint64_t *t64; \ + uint32_t *t32; \ + uint16_t *t16; \ + uint8_t *t8; \ + void *v; \ + } name + +struct ark_user_ext { + void *(*dev_init)(struct rte_eth_dev *, void *abar, int port_id); + void (*dev_uninit)(struct rte_eth_dev *, void *); + int (*dev_get_port_count)(struct rte_eth_dev *, void *); + int (*dev_configure)(struct rte_eth_dev *, void *); + int (*dev_start)(struct rte_eth_dev *, void *); + void (*dev_stop)(struct rte_eth_dev *, void *); + void (*dev_close)(struct rte_eth_dev *, void *); + int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *); + int (*dev_set_link_up)(struct rte_eth_dev *, void *); + int (*dev_set_link_down)(struct rte_eth_dev *, void *); + int (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *); + void (*stats_reset)(struct rte_eth_dev *, void *); + void (*mac_addr_add)(struct rte_eth_dev *, + struct rte_ether_addr *, + uint32_t, + uint32_t, + void *); + void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *); + void (*mac_addr_set)(struct rte_eth_dev *, struct rte_ether_addr *, + void *); + int (*set_mtu)(struct rte_eth_dev *, uint16_t, void *); +}; + +struct ark_adapter { + /* User extension private data */ + void *user_data[ARK_MAX_PORTS]; + + /* Pointers to packet generator and checker */ + int start_pg; + ark_pkt_gen_t pg; + ark_pkt_chkr_t pc; + ark_pkt_dir_t pd; + + int num_ports; + + /* Packet generator/checker args */ + char pkt_gen_args[ARK_MAX_ARG_LEN]; + char pkt_chkr_args[ARK_MAX_ARG_LEN]; + uint32_t pkt_dir_v; + + /* eth device */ + struct rte_eth_dev *eth_dev; + + void *d_handle; + struct ark_user_ext user_ext; + + /* Our Bar 0 */ + uint8_t *bar0; + + /* Application Bar */ + uint8_t *a_bar; + + /* Arkville demo block offsets */ + def_ptr(sys_ctrl, sysctrl); + def_ptr(pkt_gen, pktgen); + def_ptr(mpu_rx, mpurx); + def_ptr(UDM, udm); + def_ptr(mpu_tx, mputx); + def_ptr(DDM, ddm); + def_ptr(CMAC, cmac); + def_ptr(external, external); + def_ptr(pkt_dir, pktdir); + def_ptr(pkt_chkr, pktchkr); + + int started; + uint16_t rx_queues; + uint16_t tx_queues; + + struct ark_rqpace_t *rqpacing; +}; + +typedef uint32_t *ark_t; + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_logs.h b/src/spdk/dpdk/drivers/net/ark/ark_logs.h new file mode 100644 index 000000000..44aac6102 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_logs.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_DEBUG_H_ +#define _ARK_DEBUG_H_ + +#include +#include + + +/* Configuration option to pad TX packets to 60 bytes */ +#ifdef RTE_LIBRTE_ARK_PAD_TX +#define ARK_TX_PAD_TO_60 1 +#else +#define ARK_TX_PAD_TO_60 0 +#endif + +/* system camel case definition changed to upper case */ +#define PRIU32 PRIu32 +#define PRIU64 PRIu64 + +/* Format specifiers for string data pairs */ +#define ARK_SU32 "\n\t%-20s %'20" PRIU32 +#define ARK_SU64 "\n\t%-20s %'20" PRIU64 +#define ARK_SU64X "\n\t%-20s %#20" PRIx64 +#define ARK_SPTR "\n\t%-20s %20p" + +extern int ark_logtype; + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ##level, ark_logtype, fmt, ## args) + +/* Conditional trace definitions */ +#define ARK_TRACE_ON(level, fmt, args...) \ + PMD_DRV_LOG(level, fmt, ## args) + +/* This pattern allows compiler check arguments even if disabled */ +#define ARK_TRACE_OFF(level, fmt, args...) \ + do { \ + if (0) \ + PMD_DRV_LOG(level, fmt, ## args); \ + } while (0) + +/* tracing including the function name */ +#define ARK_FUNC_ON(level, fmt, args...) \ + PMD_DRV_LOG(level, "%s(): " fmt, __func__, ## args) + +/* tracing including the function name */ +#define ARK_FUNC_OFF(level, fmt, args...) \ + do { \ + if (0) \ + PMD_DRV_LOG(level, "%s(): " fmt, __func__, ## args); \ + } while (0) + + +/* Debug macro for tracing full behavior, function tracing and messages*/ +#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE +#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_ON(level, fmt, ##__VA_ARGS__) +#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__) +#else +#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_OFF(level, fmt, ##__VA_ARGS__) +#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__) +#endif + + +/* Debug macro for reporting FPGA statistics */ +#ifdef RTE_LIBRTE_ARK_DEBUG_STATS +#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__) +#else +#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__) +#endif + + +/* Debug macro for RX path */ +#ifdef RTE_LIBRTE_ARK_DEBUG_RX +#define ARK_RX_DEBUG 1 +#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__) +#else +#define ARK_RX_DEBUG 0 +#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__) +#endif + +/* Debug macro for TX path */ +#ifdef RTE_LIBRTE_ARK_DEBUG_TX +#define ARK_TX_DEBUG 1 +#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__) +#else +#define ARK_TX_DEBUG 0 +#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__) +#endif + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.c b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c new file mode 100644 index 000000000..21f840f3c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.c @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_logs.h" +#include "ark_mpu.h" + +uint16_t +ark_api_num_queues(struct ark_mpu_t *mpu) +{ + return mpu->hw.num_queues; +} + +uint16_t +ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports) +{ + return mpu->hw.num_queues / ark_ports; +} + +int +ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size) +{ + uint32_t version; + + version = mpu->id.vernum & 0x0000fF00; + if ((mpu->id.idnum != 0x2055504d) || + (mpu->hw.obj_size != obj_size) || + (version != 0x00003100)) { + PMD_DRV_LOG(ERR, + " MPU module not found as expected %08x" + " \"%c%c%c%c %c%c%c%c\"\n", + mpu->id.idnum, + mpu->id.id[0], mpu->id.id[1], + mpu->id.id[2], mpu->id.id[3], + mpu->id.ver[0], mpu->id.ver[1], + mpu->id.ver[2], mpu->id.ver[3]); + PMD_DRV_LOG(ERR, + " MPU HW num_queues: %u hw_depth %u," + " obj_size: %u, obj_per_mrr: %u" + " Expected size %u\n", + mpu->hw.num_queues, + mpu->hw.hw_depth, + mpu->hw.obj_size, + mpu->hw.obj_per_mrr, + obj_size); + return -1; + } + return 0; +} + +void +ark_mpu_stop(struct ark_mpu_t *mpu) +{ + mpu->cfg.command = MPU_CMD_STOP; +} + +void +ark_mpu_start(struct ark_mpu_t *mpu) +{ + mpu->cfg.command = MPU_CMD_RUN; +} + +int +ark_mpu_reset(struct ark_mpu_t *mpu) +{ + int cnt = 0; + + mpu->cfg.command = MPU_CMD_RESET; + + while (mpu->cfg.command != MPU_CMD_IDLE) { + if (cnt++ > 1000) + break; + usleep(10); + } + if (mpu->cfg.command != MPU_CMD_IDLE) { + mpu->cfg.command = MPU_CMD_FORCE_RESET; + usleep(10); + } + ark_mpu_reset_stats(mpu); + return mpu->cfg.command != MPU_CMD_IDLE; +} + +void +ark_mpu_reset_stats(struct ark_mpu_t *mpu) +{ + mpu->stats.pci_request = 1; /* reset stats */ +} + +int +ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size, + int is_tx) +{ + ark_mpu_reset(mpu); + + if (!rte_is_power_of_2(ring_size)) { + PMD_DRV_LOG(ERR, "ARK: Invalid ring size for MPU %d\n", + ring_size); + return -1; + } + + mpu->cfg.ring_base = ring; + mpu->cfg.ring_size = ring_size; + mpu->cfg.ring_mask = ring_size - 1; + mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr; + mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr; + mpu->cfg.sw_prod_index = 0; + mpu->cfg.hw_cons_index = 0; + return 0; +} + +void +ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid) +{ + /* DUMP to see that we have started */ + PMD_DEBUG_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n", + code, qid, + mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index); + PMD_DEBUG_LOG(DEBUG, "MPU: %s state: %d count %d, reserved %d" + " data 0x%08x_%08x 0x%08x_%08x\n", + code, + mpu->debug.state, mpu->debug.count, + mpu->debug.reserved, + mpu->debug.peek[1], + mpu->debug.peek[0], + mpu->debug.peek[3], + mpu->debug.peek[2] + ); + PMD_STATS_LOG(INFO, "MPU: %s Q: %3u" + ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 + ARK_SU64 ARK_SU64 ARK_SU64 "\n", + code, qid, + "PCI Request:", mpu->stats.pci_request, + "Queue_empty", mpu->stats.q_empty, + "Queue_q1", mpu->stats.q_q1, + "Queue_q2", mpu->stats.q_q2, + "Queue_q3", mpu->stats.q_q3, + "Queue_q4", mpu->stats.q_q4, + "Queue_full", mpu->stats.q_full + ); +} + +void +ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id) +{ + PMD_DEBUG_LOG(DEBUG, "MPU Setup Q: %u" + ARK_SU64X "\n", + q_id, + "ring_base", mpu->cfg.ring_base + ); +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_mpu.h b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h new file mode 100644 index 000000000..92c3e67c8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_mpu.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_MPU_H_ +#define _ARK_MPU_H_ + +#include + +#include + +/* The MPU or Memory Prefetch Unit is an internal Arkville hardware + * module for moving data between host memory and the hardware FPGA. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* + * MPU hardware structures + * These are overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ + +#define ARK_MPU_ID 0x00 +struct ark_mpu_id_t { + union { + char id[4]; + uint32_t idnum; + }; + union { + char ver[4]; + uint32_t vernum; + }; + uint32_t phys_id; + uint32_t mrr_code; +}; + +#define ARK_MPU_HW 0x010 +struct ark_mpu_hw_t { + uint16_t num_queues; + uint16_t reserved; + uint32_t hw_depth; + uint32_t obj_size; + uint32_t obj_per_mrr; +}; + +#define ARK_MPU_CFG 0x040 +struct ark_mpu_cfg_t { + rte_iova_t ring_base; /* rte_iova_t is a uint64_t */ + uint32_t ring_size; + uint32_t ring_mask; + uint32_t min_host_move; + uint32_t min_hw_move; + volatile uint32_t sw_prod_index; + volatile uint32_t hw_cons_index; + volatile uint32_t command; +}; +enum ARK_MPU_COMMAND { + MPU_CMD_IDLE = 1, + MPU_CMD_RUN = 2, + MPU_CMD_STOP = 4, + MPU_CMD_RESET = 8, + MPU_CMD_FORCE_RESET = 16, + MPU_COMMAND_LIMIT = 0xfFFFFFFF +}; + +#define ARK_MPU_STATS 0x080 +struct ark_mpu_stats_t { + volatile uint64_t pci_request; + volatile uint64_t q_empty; + volatile uint64_t q_q1; + volatile uint64_t q_q2; + volatile uint64_t q_q3; + volatile uint64_t q_q4; + volatile uint64_t q_full; +}; + +#define ARK_MPU_DEBUG 0x0C0 +struct ark_mpu_debug_t { + volatile uint32_t state; + uint32_t reserved; + volatile uint32_t count; + volatile uint32_t take; + volatile uint32_t peek[4]; +}; + +/* Consolidated structure */ +struct ark_mpu_t { + struct ark_mpu_id_t id; + uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID) + - sizeof(struct ark_mpu_id_t)]; + struct ark_mpu_hw_t hw; + uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) - + sizeof(struct ark_mpu_hw_t)]; + struct ark_mpu_cfg_t cfg; + uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) - + sizeof(struct ark_mpu_cfg_t)]; + struct ark_mpu_stats_t stats; + uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) - + sizeof(struct ark_mpu_stats_t)]; + struct ark_mpu_debug_t debug; +}; + +uint16_t ark_api_num_queues(struct ark_mpu_t *mpu); +uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu, + uint16_t ark_ports); +int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size); +void ark_mpu_stop(struct ark_mpu_t *mpu); +void ark_mpu_start(struct ark_mpu_t *mpu); +int ark_mpu_reset(struct ark_mpu_t *mpu); +int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, + uint32_t ring_size, int is_tx); + +void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx); +void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid); +void ark_mpu_reset_stats(struct ark_mpu_t *mpu); + +/* this action is in a performance critical path */ +static inline void +ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx) +{ + mpu->cfg.sw_prod_index = idx; +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c new file mode 100644 index 000000000..ef861eea3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.c @@ -0,0 +1,450 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "ark_pktchkr.h" +#include "ark_logs.h" + +static int set_arg(char *arg, char *val); +static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle); + +#define ARK_MAX_STR_LEN 64 +union OPTV { + int INT; + int BOOL; + uint64_t LONG; + char STR[ARK_MAX_STR_LEN]; +}; + +enum OPTYPE { + OTINT, + OTLONG, + OTBOOL, + OTSTRING +}; + +struct OPTIONS { + char opt[ARK_MAX_STR_LEN]; + enum OPTYPE t; + union OPTV v; +}; + +static struct OPTIONS toptions[] = { + {{"configure"}, OTBOOL, {1} }, + {{"port"}, OTINT, {0} }, + {{"mac-dump"}, OTBOOL, {0} }, + {{"dg-mode"}, OTBOOL, {1} }, + {{"run"}, OTBOOL, {0} }, + {{"stop"}, OTBOOL, {0} }, + {{"dump"}, OTBOOL, {0} }, + {{"en_resync"}, OTBOOL, {0} }, + {{"tuser_err_val"}, OTINT, {1} }, + {{"gen_forever"}, OTBOOL, {0} }, + {{"en_slaved_start"}, OTBOOL, {0} }, + {{"vary_length"}, OTBOOL, {0} }, + {{"incr_payload"}, OTINT, {0} }, + {{"incr_first_byte"}, OTBOOL, {0} }, + {{"ins_seq_num"}, OTBOOL, {0} }, + {{"ins_time_stamp"}, OTBOOL, {1} }, + {{"ins_udp_hdr"}, OTBOOL, {0} }, + {{"num_pkts"}, OTLONG, .v.LONG = 10000000000000L}, + {{"payload_byte"}, OTINT, {0x55} }, + {{"pkt_spacing"}, OTINT, {60} }, + {{"pkt_size_min"}, OTINT, {2005} }, + {{"pkt_size_max"}, OTINT, {1514} }, + {{"pkt_size_incr"}, OTINT, {1} }, + {{"eth_type"}, OTINT, {0x0800} }, + {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L}, + {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L}, + {{"hdr_dW0"}, OTINT, {0x0016e319} }, + {{"hdr_dW1"}, OTINT, {0x27150004} }, + {{"hdr_dW2"}, OTINT, {0x76967bda} }, + {{"hdr_dW3"}, OTINT, {0x08004500} }, + {{"hdr_dW4"}, OTINT, {0x005276ed} }, + {{"hdr_dW5"}, OTINT, {0x40004006} }, + {{"hdr_dW6"}, OTINT, {0x56cfc0a8} }, + {{"start_offset"}, OTINT, {0} }, + {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"}, + {{"dst_port"}, OTINT, {65536} }, + {{"src_port"}, OTINT, {65536} }, +}; + +ark_pkt_chkr_t +ark_pktchkr_init(void *addr, int ord, int l2_mode) +{ + struct ark_pkt_chkr_inst *inst = + rte_malloc("ark_pkt_chkr_inst", + sizeof(struct ark_pkt_chkr_inst), 0); + if (inst == NULL) { + PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_chkr_inst.\n"); + return inst; + } + inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr; + inst->cregs = + (struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100); + inst->ordinal = ord; + inst->l2_mode = l2_mode; + return inst; +} + +void +ark_pktchkr_uninit(ark_pkt_chkr_t handle) +{ + rte_free(handle); +} + +void +ark_pktchkr_run(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->sregs->pkt_start_stop = 0; + inst->sregs->pkt_start_stop = 0x1; +} + +int +ark_pktchkr_stopped(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + uint32_t r = inst->sregs->pkt_start_stop; + + return (((r >> 16) & 1) == 1); +} + +void +ark_pktchkr_stop(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + int wait_cycle = 10; + + inst->sregs->pkt_start_stop = 0; + while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) { + usleep(1000); + wait_cycle--; + PMD_DEBUG_LOG(DEBUG, "Waiting for pktchk %d to stop...\n", + inst->ordinal); + } + PMD_DEBUG_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal); +} + +int +ark_pktchkr_is_running(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + uint32_t r = inst->sregs->pkt_start_stop; + + return ((r & 1) == 1); +} + +static void +ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle, + uint32_t gen_forever, + uint32_t vary_length, + uint32_t incr_payload, + uint32_t incr_first_byte, + uint32_t ins_seq_num, + uint32_t ins_udp_hdr, + uint32_t en_resync, + uint32_t tuser_err_val, + uint32_t ins_time_stamp) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + uint32_t r = (tuser_err_val << 16) | (en_resync << 0); + + inst->sregs->pkt_ctrl = r; + if (!inst->l2_mode) + ins_udp_hdr = 0; + r = ((gen_forever << 24) | + (vary_length << 16) | + (incr_payload << 12) | + (incr_first_byte << 8) | + (ins_time_stamp << 5) | + (ins_seq_num << 4) | + ins_udp_hdr); + inst->cregs->pkt_ctrl = r; +} + +static +int +ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + uint32_t r = inst->cregs->pkt_ctrl; + + return (((r >> 24) & 1) == 1); +} + +int +ark_pktchkr_wait_done(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + if (ark_pktchkr_is_gen_forever(handle)) { + PMD_DEBUG_LOG(ERR, "Pktchk wait_done will not terminate" + " because gen_forever=1\n"); + return -1; + } + int wait_cycle = 10; + + while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) { + usleep(1000); + wait_cycle--; + PMD_DEBUG_LOG(DEBUG, "Waiting for packet checker %d's" + " internal pktgen to finish sending...\n", + inst->ordinal); + PMD_DEBUG_LOG(DEBUG, "Pktchk %d's pktgen done.\n", + inst->ordinal); + } + return 0; +} + +int +ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + return inst->cregs->pkts_sent; +} + +void +ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->pkt_payload = b; +} + +void +ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->pkt_size_min = x; +} + +void +ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->pkt_size_max = x; +} + +void +ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->pkt_size_incr = x; +} + +void +ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->num_pkts = x; +} + +void +ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->src_mac_addr_h = (mac_addr >> 32) & 0xffff; + inst->cregs->src_mac_addr_l = mac_addr & 0xffffffff; +} + +void +ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff; + inst->cregs->dst_mac_addr_l = mac_addr & 0xffffffff; +} + +void +ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + inst->cregs->eth_type = x; +} + +void +ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr) +{ + uint32_t i; + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + for (i = 0; i < 7; i++) + inst->cregs->hdr_dw[i] = hdr[i]; +} + +void +ark_pktchkr_dump_stats(ark_pkt_chkr_t handle) +{ + struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle; + + PMD_STATS_LOG(INFO, "pkts_rcvd = (%'u)\n", + inst->sregs->pkts_rcvd); + PMD_STATS_LOG(INFO, "bytes_rcvd = (%'" PRIU64 ")\n", + inst->sregs->bytes_rcvd); + PMD_STATS_LOG(INFO, "pkts_ok = (%'u)\n", + inst->sregs->pkts_ok); + PMD_STATS_LOG(INFO, "pkts_mismatch = (%'u)\n", + inst->sregs->pkts_mismatch); + PMD_STATS_LOG(INFO, "pkts_err = (%'u)\n", + inst->sregs->pkts_err); + PMD_STATS_LOG(INFO, "first_mismatch = (%'u)\n", + inst->sregs->first_mismatch); + PMD_STATS_LOG(INFO, "resync_events = (%'u)\n", + inst->sregs->resync_events); + PMD_STATS_LOG(INFO, "pkts_missing = (%'u)\n", + inst->sregs->pkts_missing); + PMD_STATS_LOG(INFO, "min_latency = (%'u)\n", + inst->sregs->min_latency); + PMD_STATS_LOG(INFO, "max_latency = (%'u)\n", + inst->sregs->max_latency); +} + +static struct OPTIONS * +options(const char *id) +{ + unsigned int i; + + for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) { + if (strcmp(id, toptions[i].opt) == 0) + return &toptions[i]; + } + PMD_DRV_LOG(ERR, + "pktchkr: Could not find requested option!, option = %s\n", + id); + return NULL; +} + +static int +set_arg(char *arg, char *val) +{ + struct OPTIONS *o = options(arg); + + if (o) { + switch (o->t) { + case OTINT: + case OTBOOL: + o->v.INT = atoi(val); + break; + case OTLONG: + o->v.INT = atoll(val); + break; + case OTSTRING: + strlcpy(o->v.STR, val, ARK_MAX_STR_LEN); + break; + } + return 1; + } + return 0; +} + +/****** + * Arg format = "opt0=v,opt_n=v ..." + ******/ +void +ark_pktchkr_parse(char *args) +{ + char *argv, *v; + const char toks[] = "=\n\t\v\f \r"; + argv = strtok(args, toks); + v = strtok(NULL, toks); + while (argv && v) { + set_arg(argv, v); + argv = strtok(NULL, toks); + v = strtok(NULL, toks); + } +} + +static int32_t parse_ipv4_string(char const *ip_address); +static int32_t +parse_ipv4_string(char const *ip_address) +{ + unsigned int ip[4]; + + if (sscanf(ip_address, "%u.%u.%u.%u", + &ip[0], &ip[1], &ip[2], &ip[3]) != 4) + return 0; + return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul; +} + +void +ark_pktchkr_setup(ark_pkt_chkr_t handle) +{ + uint32_t hdr[7]; + int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR); + + if (!options("stop")->v.BOOL && options("configure")->v.BOOL) { + ark_pktchkr_set_payload_byte(handle, + options("payload_byte")->v.INT); + ark_pktchkr_set_src_mac_addr(handle, + options("src_mac_addr")->v.INT); + ark_pktchkr_set_dst_mac_addr(handle, + options("dst_mac_addr")->v.LONG); + + ark_pktchkr_set_eth_type(handle, + options("eth_type")->v.INT); + if (options("dg-mode")->v.BOOL) { + hdr[0] = options("hdr_dW0")->v.INT; + hdr[1] = options("hdr_dW1")->v.INT; + hdr[2] = options("hdr_dW2")->v.INT; + hdr[3] = options("hdr_dW3")->v.INT; + hdr[4] = options("hdr_dW4")->v.INT; + hdr[5] = options("hdr_dW5")->v.INT; + hdr[6] = options("hdr_dW6")->v.INT; + } else { + hdr[0] = dst_ip; + hdr[1] = options("dst_port")->v.INT; + hdr[2] = options("src_port")->v.INT; + hdr[3] = 0; + hdr[4] = 0; + hdr[5] = 0; + hdr[6] = 0; + } + ark_pktchkr_set_hdr_dW(handle, hdr); + ark_pktchkr_set_num_pkts(handle, + options("num_pkts")->v.INT); + ark_pktchkr_set_pkt_size_min(handle, + options("pkt_size_min")->v.INT); + ark_pktchkr_set_pkt_size_max(handle, + options("pkt_size_max")->v.INT); + ark_pktchkr_set_pkt_size_incr(handle, + options("pkt_size_incr")->v.INT); + ark_pktchkr_set_pkt_ctrl(handle, + options("gen_forever")->v.BOOL, + options("vary_length")->v.BOOL, + options("incr_payload")->v.BOOL, + options("incr_first_byte")->v.BOOL, + options("ins_seq_num")->v.INT, + options("ins_udp_hdr")->v.BOOL, + options("en_resync")->v.BOOL, + options("tuser_err_val")->v.INT, + options("ins_time_stamp")->v.INT); + } + + if (options("stop")->v.BOOL) + ark_pktchkr_stop(handle); + + if (options("run")->v.BOOL) { + PMD_DEBUG_LOG(DEBUG, "Starting packet checker on port %d\n", + options("port")->v.INT); + ark_pktchkr_run(handle); + } +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h new file mode 100644 index 000000000..b36228177 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktchkr.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_PKTCHKR_H_ +#define _ARK_PKTCHKR_H_ + +#include +#include + +#define ARK_PKTCHKR_BASE_ADR 0x90000 + +typedef void *ark_pkt_chkr_t; + +/* The packet checker is an internal Arkville hardware module, which + * verifies packet streams generated from the corresponding packet + * generator. This module is used for Arkville testing. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* + * This are overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ +struct ark_pkt_chkr_stat_regs { + uint32_t r0; + uint32_t pkt_start_stop; + uint32_t pkt_ctrl; + uint32_t pkts_rcvd; + uint64_t bytes_rcvd; + uint32_t pkts_ok; + uint32_t pkts_mismatch; + uint32_t pkts_err; + uint32_t first_mismatch; + uint32_t resync_events; + uint32_t pkts_missing; + uint32_t min_latency; + uint32_t max_latency; +} __rte_packed; + +struct ark_pkt_chkr_ctl_regs { + uint32_t pkt_ctrl; + uint32_t pkt_payload; + uint32_t pkt_size_min; + uint32_t pkt_size_max; + uint32_t pkt_size_incr; + uint32_t num_pkts; + uint32_t pkts_sent; + uint32_t src_mac_addr_l; + uint32_t src_mac_addr_h; + uint32_t dst_mac_addr_l; + uint32_t dst_mac_addr_h; + uint32_t eth_type; + uint32_t hdr_dw[7]; +} __rte_packed; + +struct ark_pkt_chkr_inst { + struct rte_eth_dev_info *dev_info; + volatile struct ark_pkt_chkr_stat_regs *sregs; + volatile struct ark_pkt_chkr_ctl_regs *cregs; + int l2_mode; + int ordinal; +}; + +/* packet checker functions */ +ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode); +void ark_pktchkr_uninit(ark_pkt_chkr_t handle); +void ark_pktchkr_run(ark_pkt_chkr_t handle); +int ark_pktchkr_stopped(ark_pkt_chkr_t handle); +void ark_pktchkr_stop(ark_pkt_chkr_t handle); +int ark_pktchkr_is_running(ark_pkt_chkr_t handle); +int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle); +void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b); +void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x); +void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x); +void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x); +void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x); +void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr); +void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr); +void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x); +void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr); +void ark_pktchkr_parse(char *args); +void ark_pktchkr_setup(ark_pkt_chkr_t handle); +void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle); +int ark_pktchkr_wait_done(ark_pkt_chkr_t handle); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c new file mode 100644 index 000000000..1f2c8182a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include +#include + +#include "ark_pktdir.h" +#include "ark_global.h" +#include "ark_logs.h" + + +ark_pkt_dir_t +ark_pktdir_init(void *base) +{ + struct ark_pkt_dir_inst *inst = + rte_malloc("ark_pkt_dir_inst", + sizeof(struct ark_pkt_dir_inst), + 0); + if (inst == NULL) { + PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_dir_inst.\n"); + return inst; + } + inst->regs = (struct ark_pkt_dir_regs *)base; + inst->regs->ctrl = 0x00110110; /* POR state */ + return inst; +} + +void +ark_pktdir_uninit(ark_pkt_dir_t handle) +{ + struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle; + + rte_free(inst); +} + +void +ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v) +{ + struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle; + inst->regs->ctrl = v; +} + +uint32_t +ark_pktdir_status(ark_pkt_dir_t handle) +{ + struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle; + return inst->regs->ctrl; +} + +uint32_t +ark_pktdir_stall_cnt(ark_pkt_dir_t handle) +{ + struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle; + return inst->regs->stall_cnt; +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h new file mode 100644 index 000000000..4afd128f9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktdir.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_PKTDIR_H_ +#define _ARK_PKTDIR_H_ + +#include + +#define ARK_PKTDIR_BASE_ADR 0xa0000 + +typedef void *ark_pkt_dir_t; + + +/* The packet director is an internal Arkville hardware module for + * directing packet data in non-typical flows, such as testing. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* + * This is an overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ +struct ark_pkt_dir_regs { + uint32_t ctrl; + uint32_t status; + uint32_t stall_cnt; +} __rte_packed; + +struct ark_pkt_dir_inst { + volatile struct ark_pkt_dir_regs *regs; +}; + +ark_pkt_dir_t ark_pktdir_init(void *base); +void ark_pktdir_uninit(ark_pkt_dir_t handle); +void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v); +uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle); +uint32_t ark_pktdir_status(ark_pkt_dir_t handle); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c new file mode 100644 index 000000000..2cae252d6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.c @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "ark_pktgen.h" +#include "ark_logs.h" + +#define ARK_MAX_STR_LEN 64 +union OPTV { + int INT; + int BOOL; + uint64_t LONG; + char STR[ARK_MAX_STR_LEN]; +}; + +enum OPTYPE { + OTINT, + OTLONG, + OTBOOL, + OTSTRING +}; + +struct OPTIONS { + char opt[ARK_MAX_STR_LEN]; + enum OPTYPE t; + union OPTV v; +}; + +static struct OPTIONS toptions[] = { + {{"configure"}, OTBOOL, {1} }, + {{"dg-mode"}, OTBOOL, {1} }, + {{"run"}, OTBOOL, {0} }, + {{"pause"}, OTBOOL, {0} }, + {{"reset"}, OTBOOL, {0} }, + {{"dump"}, OTBOOL, {0} }, + {{"gen_forever"}, OTBOOL, {0} }, + {{"en_slaved_start"}, OTBOOL, {0} }, + {{"vary_length"}, OTBOOL, {0} }, + {{"incr_payload"}, OTBOOL, {0} }, + {{"incr_first_byte"}, OTBOOL, {0} }, + {{"ins_seq_num"}, OTBOOL, {0} }, + {{"ins_time_stamp"}, OTBOOL, {1} }, + {{"ins_udp_hdr"}, OTBOOL, {0} }, + {{"num_pkts"}, OTLONG, .v.LONG = 100000000}, + {{"payload_byte"}, OTINT, {0x55} }, + {{"pkt_spacing"}, OTINT, {130} }, + {{"pkt_size_min"}, OTINT, {2006} }, + {{"pkt_size_max"}, OTINT, {1514} }, + {{"pkt_size_incr"}, OTINT, {1} }, + {{"eth_type"}, OTINT, {0x0800} }, + {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L}, + {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L}, + {{"hdr_dW0"}, OTINT, {0x0016e319} }, + {{"hdr_dW1"}, OTINT, {0x27150004} }, + {{"hdr_dW2"}, OTINT, {0x76967bda} }, + {{"hdr_dW3"}, OTINT, {0x08004500} }, + {{"hdr_dW4"}, OTINT, {0x005276ed} }, + {{"hdr_dW5"}, OTINT, {0x40004006} }, + {{"hdr_dW6"}, OTINT, {0x56cfc0a8} }, + {{"start_offset"}, OTINT, {0} }, + {{"bytes_per_cycle"}, OTINT, {10} }, + {{"shaping"}, OTBOOL, {0} }, + {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"}, + {{"dst_port"}, OTINT, {65536} }, + {{"src_port"}, OTINT, {65536} }, +}; + +ark_pkt_gen_t +ark_pktgen_init(void *adr, int ord, int l2_mode) +{ + struct ark_pkt_gen_inst *inst = + rte_malloc("ark_pkt_gen_inst_pmd", + sizeof(struct ark_pkt_gen_inst), 0); + if (inst == NULL) { + PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_gen_inst.\n"); + return inst; + } + inst->regs = (struct ark_pkt_gen_regs *)adr; + inst->ordinal = ord; + inst->l2_mode = l2_mode; + return inst; +} + +void +ark_pktgen_uninit(ark_pkt_gen_t handle) +{ + rte_free(handle); +} + +void +ark_pktgen_run(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + + inst->regs->pkt_start_stop = 1; +} + +uint32_t +ark_pktgen_paused(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + uint32_t r = inst->regs->pkt_start_stop; + + return (((r >> 16) & 1) == 1); +} + +void +ark_pktgen_pause(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + int cnt = 0; + + inst->regs->pkt_start_stop = 0; + + while (!ark_pktgen_paused(handle)) { + usleep(1000); + if (cnt++ > 100) { + PMD_DRV_LOG(ERR, "Pktgen %d failed to pause.\n", + inst->ordinal); + break; + } + } + PMD_DEBUG_LOG(DEBUG, "Pktgen %d paused.\n", inst->ordinal); +} + +void +ark_pktgen_reset(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + + if (!ark_pktgen_is_running(handle) && + !ark_pktgen_paused(handle)) { + PMD_DEBUG_LOG(DEBUG, "Pktgen %d is not running" + " and is not paused. No need to reset.\n", + inst->ordinal); + return; + } + + if (ark_pktgen_is_running(handle) && + !ark_pktgen_paused(handle)) { + PMD_DEBUG_LOG(DEBUG, + "Pktgen %d is not paused. Pausing first.\n", + inst->ordinal); + ark_pktgen_pause(handle); + } + + PMD_DEBUG_LOG(DEBUG, "Resetting pktgen %d.\n", inst->ordinal); + inst->regs->pkt_start_stop = (1 << 8); +} + +uint32_t +ark_pktgen_tx_done(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + uint32_t r = inst->regs->pkt_start_stop; + + return (((r >> 24) & 1) == 1); +} + +uint32_t +ark_pktgen_is_running(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + uint32_t r = inst->regs->pkt_start_stop; + + return ((r & 1) == 1); +} + +uint32_t +ark_pktgen_is_gen_forever(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + uint32_t r = inst->regs->pkt_ctrl; + + return (((r >> 24) & 1) == 1); +} + +void +ark_pktgen_wait_done(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + int wait_cycle = 10; + + if (ark_pktgen_is_gen_forever(handle)) + PMD_DRV_LOG(ERR, "Pktgen wait_done will not terminate" + " because gen_forever=1\n"); + + while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) { + usleep(1000); + wait_cycle--; + PMD_DEBUG_LOG(DEBUG, + "Waiting for pktgen %d to finish sending...\n", + inst->ordinal); + } + PMD_DEBUG_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal); +} + +uint32_t +ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + return inst->regs->pkts_sent; +} + +void +ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->pkt_payload = b; +} + +void +ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->pkt_spacing = x; +} + +void +ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->pkt_size_min = x; +} + +void +ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->pkt_size_max = x; +} + +void +ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->pkt_size_incr = x; +} + +void +ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->num_pkts = x; +} + +void +ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->src_mac_addr_h = (mac_addr >> 32) & 0xffff; + inst->regs->src_mac_addr_l = mac_addr & 0xffffffff; +} + +void +ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff; + inst->regs->dst_mac_addr_l = mac_addr & 0xffffffff; +} + +void +ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + inst->regs->eth_type = x; +} + +void +ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr) +{ + uint32_t i; + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + + for (i = 0; i < 7; i++) + inst->regs->hdr_dw[i] = hdr[i]; +} + +void +ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x) +{ + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + + inst->regs->start_offset = x; +} + +static struct OPTIONS * +options(const char *id) +{ + unsigned int i; + + for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) { + if (strcmp(id, toptions[i].opt) == 0) + return &toptions[i]; + } + + PMD_DRV_LOG(ERR, + "Pktgen: Could not find requested option!, " + "option = %s\n", + id + ); + return NULL; +} + +static int pmd_set_arg(char *arg, char *val); +static int +pmd_set_arg(char *arg, char *val) +{ + struct OPTIONS *o = options(arg); + + if (o) { + switch (o->t) { + case OTINT: + case OTBOOL: + o->v.INT = atoi(val); + break; + case OTLONG: + o->v.INT = atoll(val); + break; + case OTSTRING: + strlcpy(o->v.STR, val, ARK_MAX_STR_LEN); + break; + } + return 1; + } + return 0; +} + +/****** + * Arg format = "opt0=v,opt_n=v ..." + ******/ +void +ark_pktgen_parse(char *args) +{ + char *argv, *v; + const char toks[] = " =\n\t\v\f \r"; + argv = strtok(args, toks); + v = strtok(NULL, toks); + while (argv && v) { + pmd_set_arg(argv, v); + argv = strtok(NULL, toks); + v = strtok(NULL, toks); + } +} + +static int32_t parse_ipv4_string(char const *ip_address); +static int32_t +parse_ipv4_string(char const *ip_address) +{ + unsigned int ip[4]; + + if (sscanf(ip_address, "%u.%u.%u.%u", + &ip[0], &ip[1], &ip[2], &ip[3]) != 4) + return 0; + return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul; +} + +static void +ark_pktgen_set_pkt_ctrl(ark_pkt_gen_t handle, + uint32_t gen_forever, + uint32_t en_slaved_start, + uint32_t vary_length, + uint32_t incr_payload, + uint32_t incr_first_byte, + uint32_t ins_seq_num, + uint32_t ins_udp_hdr, + uint32_t ins_time_stamp) +{ + uint32_t r; + struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle; + + if (!inst->l2_mode) + ins_udp_hdr = 0; + + r = ((gen_forever << 24) | + (en_slaved_start << 20) | + (vary_length << 16) | + (incr_payload << 12) | + (incr_first_byte << 8) | + (ins_time_stamp << 5) | + (ins_seq_num << 4) | + ins_udp_hdr); + + inst->regs->bytes_per_cycle = options("bytes_per_cycle")->v.INT; + if (options("shaping")->v.BOOL) + r = r | (1 << 28); /* enable shaping */ + + inst->regs->pkt_ctrl = r; +} + +void +ark_pktgen_setup(ark_pkt_gen_t handle) +{ + uint32_t hdr[7]; + int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR); + + if (!options("pause")->v.BOOL && + (!options("reset")->v.BOOL && + (options("configure")->v.BOOL))) { + ark_pktgen_set_payload_byte(handle, + options("payload_byte")->v.INT); + ark_pktgen_set_src_mac_addr(handle, + options("src_mac_addr")->v.INT); + ark_pktgen_set_dst_mac_addr(handle, + options("dst_mac_addr")->v.LONG); + ark_pktgen_set_eth_type(handle, + options("eth_type")->v.INT); + + if (options("dg-mode")->v.BOOL) { + hdr[0] = options("hdr_dW0")->v.INT; + hdr[1] = options("hdr_dW1")->v.INT; + hdr[2] = options("hdr_dW2")->v.INT; + hdr[3] = options("hdr_dW3")->v.INT; + hdr[4] = options("hdr_dW4")->v.INT; + hdr[5] = options("hdr_dW5")->v.INT; + hdr[6] = options("hdr_dW6")->v.INT; + } else { + hdr[0] = dst_ip; + hdr[1] = options("dst_port")->v.INT; + hdr[2] = options("src_port")->v.INT; + hdr[3] = 0; + hdr[4] = 0; + hdr[5] = 0; + hdr[6] = 0; + } + ark_pktgen_set_hdr_dW(handle, hdr); + ark_pktgen_set_num_pkts(handle, + options("num_pkts")->v.INT); + ark_pktgen_set_pkt_size_min(handle, + options("pkt_size_min")->v.INT); + ark_pktgen_set_pkt_size_max(handle, + options("pkt_size_max")->v.INT); + ark_pktgen_set_pkt_size_incr(handle, + options("pkt_size_incr")->v.INT); + ark_pktgen_set_pkt_spacing(handle, + options("pkt_spacing")->v.INT); + ark_pktgen_set_start_offset(handle, + options("start_offset")->v.INT); + ark_pktgen_set_pkt_ctrl(handle, + options("gen_forever")->v.BOOL, + options("en_slaved_start")->v.BOOL, + options("vary_length")->v.BOOL, + options("incr_payload")->v.BOOL, + options("incr_first_byte")->v.BOOL, + options("ins_seq_num")->v.INT, + options("ins_udp_hdr")->v.BOOL, + options("ins_time_stamp")->v.INT); + } + + if (options("pause")->v.BOOL) + ark_pktgen_pause(handle); + + if (options("reset")->v.BOOL) + ark_pktgen_reset(handle); + if (options("run")->v.BOOL) { + PMD_DEBUG_LOG(DEBUG, "Starting packet generator on port %d\n", + options("port")->v.INT); + ark_pktgen_run(handle); + } +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h new file mode 100644 index 000000000..c61dfee6d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_pktgen.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_PKTGEN_H_ +#define _ARK_PKTGEN_H_ + +#include +#include + +#define ARK_PKTGEN_BASE_ADR 0x10000 + +typedef void *ark_pkt_gen_t; + +/* The packet generator is an internal Arkville hardware module, which + * generates known packets for use in integrity and line-rate testing. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* + * This is an overlay structure to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ +struct ark_pkt_gen_regs { + uint32_t r0; + volatile uint32_t pkt_start_stop; + volatile uint32_t pkt_ctrl; + uint32_t pkt_payload; + uint32_t pkt_spacing; + uint32_t pkt_size_min; + uint32_t pkt_size_max; + uint32_t pkt_size_incr; + volatile uint32_t num_pkts; + volatile uint32_t pkts_sent; + uint32_t src_mac_addr_l; + uint32_t src_mac_addr_h; + uint32_t dst_mac_addr_l; + uint32_t dst_mac_addr_h; + uint32_t eth_type; + uint32_t hdr_dw[7]; + uint32_t start_offset; + uint32_t bytes_per_cycle; +} __rte_packed; + +struct ark_pkt_gen_inst { + struct rte_eth_dev_info *dev_info; + struct ark_pkt_gen_regs *regs; + int l2_mode; + int ordinal; +}; + +/* packet generator functions */ +ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode); +void ark_pktgen_uninit(ark_pkt_gen_t handle); +void ark_pktgen_run(ark_pkt_gen_t handle); +void ark_pktgen_pause(ark_pkt_gen_t handle); +uint32_t ark_pktgen_paused(ark_pkt_gen_t handle); +uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle); +uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle); +uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle); +void ark_pktgen_reset(ark_pkt_gen_t handle); +void ark_pktgen_wait_done(ark_pkt_gen_t handle); +uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle); +void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b); +void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr); +void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr); +void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr); +void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x); +void ark_pktgen_parse(char *argv); +void ark_pktgen_setup(ark_pkt_gen_t handle); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.c b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c new file mode 100644 index 000000000..bf1af4d61 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.c @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_rqp.h" +#include "ark_logs.h" + +/* ************************************************************************* */ +void +ark_rqp_stats_reset(struct ark_rqpace_t *rqp) +{ + rqp->stats_clear = 1; + /* POR 992 */ + /* rqp->cpld_max = 992; */ + /* POR 64 */ + /* rqp->cplh_max = 64; */ +} + +/* ************************************************************************* */ +void +ark_rqp_dump(struct ark_rqpace_t *rqp) +{ + if (rqp->err_count_other != 0) + PMD_DRV_LOG(ERR, + "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d" + ARK_SU32 + ARK_SU32 "\n", + rqp->ctrl, rqp->cplh_max, rqp->cpld_max, + "Error Count", rqp->err_cnt, + "Error General", rqp->err_count_other); + + PMD_STATS_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d" + ARK_SU32 + ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 + ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 + ARK_SU32 ARK_SU32 ARK_SU32 + ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n", + rqp->ctrl, rqp->cplh_max, rqp->cpld_max, + "Error Count", rqp->err_cnt, + "Error General", rqp->err_count_other, + "stall_pS", rqp->stall_ps, + "stall_pS Min", rqp->stall_ps_min, + "stall_pS Max", rqp->stall_ps_max, + "req_pS", rqp->req_ps, + "req_pS Min", rqp->req_ps_min, + "req_pS Max", rqp->req_ps_max, + "req_dWPS", rqp->req_dw_ps, + "req_dWPS Min", rqp->req_dw_ps_min, + "req_dWPS Max", rqp->req_dw_ps_max, + "cpl_pS", rqp->cpl_ps, + "cpl_pS Min", rqp->cpl_ps_min, + "cpl_pS Max", rqp->cpl_ps_max, + "cpl_dWPS", rqp->cpl_dw_ps, + "cpl_dWPS Min", rqp->cpl_dw_ps_min, + "cpl_dWPS Max", rqp->cpl_dw_ps_max, + "cplh pending", rqp->cplh_pending, + "cpld pending", rqp->cpld_pending, + "cplh pending max", rqp->cplh_pending_max, + "cpld pending max", rqp->cpld_pending_max); +} + +int +ark_rqp_lasped(struct ark_rqpace_t *rqp) +{ + return rqp->lasped; +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_rqp.h b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h new file mode 100644 index 000000000..6c8046062 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_rqp.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_RQP_H_ +#define _ARK_RQP_H_ + +#include + +#include + +/* The RQP or ReQuest Pacer is an internal Arkville hardware module + * which limits the PCIE data flow to insure correct operation for the + * particular hardware PCIE endpoint. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* + * RQ Pacing core hardware structure + * This is an overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ +struct ark_rqpace_t { + volatile uint32_t ctrl; + volatile uint32_t stats_clear; + volatile uint32_t cplh_max; + volatile uint32_t cpld_max; + volatile uint32_t err_cnt; + volatile uint32_t stall_ps; + volatile uint32_t stall_ps_min; + volatile uint32_t stall_ps_max; + volatile uint32_t req_ps; + volatile uint32_t req_ps_min; + volatile uint32_t req_ps_max; + volatile uint32_t req_dw_ps; + volatile uint32_t req_dw_ps_min; + volatile uint32_t req_dw_ps_max; + volatile uint32_t cpl_ps; + volatile uint32_t cpl_ps_min; + volatile uint32_t cpl_ps_max; + volatile uint32_t cpl_dw_ps; + volatile uint32_t cpl_dw_ps_min; + volatile uint32_t cpl_dw_ps_max; + volatile uint32_t cplh_pending; + volatile uint32_t cpld_pending; + volatile uint32_t cplh_pending_max; + volatile uint32_t cpld_pending_max; + volatile uint32_t err_count_other; + char eval[4]; + volatile int lasped; +}; + +void ark_rqp_dump(struct ark_rqpace_t *rqp); +void ark_rqp_stats_reset(struct ark_rqpace_t *rqp); +int ark_rqp_lasped(struct ark_rqpace_t *rqp); +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.c b/src/spdk/dpdk/drivers/net/ark/ark_udm.c new file mode 100644 index 000000000..03f1922c6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.c @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#include + +#include "ark_logs.h" +#include "ark_udm.h" + +int +ark_udm_verify(struct ark_udm_t *udm) +{ + if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) { + PMD_DRV_LOG(ERR, + "ARK: UDM structure looks incorrect %d vs %zd\n", + ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t)); + return -1; + } + + if (udm->setup.const0 != ARK_UDM_CONST) { + PMD_DRV_LOG(ERR, + "ARK: UDM module not found as expected 0x%08x\n", + udm->setup.const0); + return -1; + } + return 0; +} + +int +ark_udm_stop(struct ark_udm_t *udm, const int wait) +{ + int cnt = 0; + + udm->cfg.command = 2; + + while (wait && (udm->cfg.stop_flushed & 0x01) == 0) { + if (cnt++ > 1000) + return 1; + + usleep(10); + } + return 0; +} + +int +ark_udm_reset(struct ark_udm_t *udm) +{ + int status; + + status = ark_udm_stop(udm, 1); + if (status != 0) { + PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n", + __func__); + udm->cfg.command = 4; + usleep(10); + udm->cfg.command = 3; + status = ark_udm_stop(udm, 0); + PMD_DEBUG_LOG(INFO, "%s stop status %d post failure" + " and forced reset\n", + __func__, status); + } else { + udm->cfg.command = 3; + } + + return status; +} + +void +ark_udm_start(struct ark_udm_t *udm) +{ + udm->cfg.command = 1; +} + +void +ark_udm_stats_reset(struct ark_udm_t *udm) +{ + udm->pcibp.pci_clear = 1; + udm->tlp_ps.tlp_clear = 1; +} + +void +ark_udm_configure(struct ark_udm_t *udm, + uint32_t headroom, + uint32_t dataroom, + uint32_t write_interval_ns) +{ + /* headroom and data room are in DWords in the UDM */ + udm->cfg.dataroom = dataroom / 4; + udm->cfg.headroom = headroom / 4; + + /* 4 NS period ns */ + udm->rt_cfg.write_interval = write_interval_ns / 4; +} + +void +ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr) +{ + udm->rt_cfg.hw_prod_addr = addr; +} + +int +ark_udm_is_flushed(struct ark_udm_t *udm) +{ + return (udm->cfg.stop_flushed & 0x01) != 0; +} + +uint64_t +ark_udm_dropped(struct ark_udm_t *udm) +{ + return udm->qstats.q_pkt_drop; +} + +uint64_t +ark_udm_bytes(struct ark_udm_t *udm) +{ + return udm->qstats.q_byte_count; +} + +uint64_t +ark_udm_packets(struct ark_udm_t *udm) +{ + return udm->qstats.q_ff_packet_count; +} + +void +ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg) +{ + PMD_STATS_LOG(INFO, "UDM Stats: %s" + ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n", + msg, + "Pkts Received", udm->stats.rx_packet_count, + "Pkts Finalized", udm->stats.rx_sent_packets, + "Pkts Dropped", udm->tlp.pkt_drop, + "Bytes Count", udm->stats.rx_byte_count, + "MBuf Count", udm->stats.rx_mbuf_count); +} + +void +ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid) +{ + PMD_STATS_LOG(INFO, "UDM Queue %3u Stats: %s" + ARK_SU64 ARK_SU64 + ARK_SU64 ARK_SU64 + ARK_SU64 "\n", + qid, msg, + "Pkts Received", udm->qstats.q_packet_count, + "Pkts Finalized", udm->qstats.q_ff_packet_count, + "Pkts Dropped", udm->qstats.q_pkt_drop, + "Bytes Count", udm->qstats.q_byte_count, + "MBuf Count", udm->qstats.q_mbuf_count); +} + +void +ark_udm_dump(struct ark_udm_t *udm, const char *msg) +{ + PMD_DEBUG_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg, + udm->cfg.stop_flushed); +} + +void +ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id) +{ + PMD_DEBUG_LOG(DEBUG, "UDM Setup Q: %u" + ARK_SU64X ARK_SU32 "\n", + q_id, + "hw_prod_addr", udm->rt_cfg.hw_prod_addr, + "prod_idx", udm->rt_cfg.prod_idx); +} + +void +ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg) +{ + struct ark_udm_pcibp_t *bp = &udm->pcibp; + + PMD_STATS_LOG(INFO, "UDM Performance %s" + ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 + "\n", + msg, + "PCI Empty", bp->pci_empty, + "PCI Q1", bp->pci_q1, + "PCI Q2", bp->pci_q2, + "PCI Q3", bp->pci_q3, + "PCI Q4", bp->pci_q4, + "PCI Full", bp->pci_full); +} + +void +ark_udm_queue_stats_reset(struct ark_udm_t *udm) +{ + udm->qstats.q_byte_count = 1; +} + +void +ark_udm_queue_enable(struct ark_udm_t *udm, int enable) +{ + udm->qstats.q_enable = enable ? 1 : 0; +} diff --git a/src/spdk/dpdk/drivers/net/ark/ark_udm.h b/src/spdk/dpdk/drivers/net/ark/ark_udm.h new file mode 100644 index 000000000..5846c825b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/ark_udm.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2018 Atomic Rules LLC + */ + +#ifndef _ARK_UDM_H_ +#define _ARK_UDM_H_ + +#include + +#include + +/* The UDM or Upstream Data Mover is an internal Arkville hardware + * module for moving packet from the RX packet streams to host memory. + * This module is *not* intended for end-user manipulation, hence + * there is minimal documentation. + */ + +/* Meta data structure apssed from FPGA, must match layout in FPGA */ +struct ark_rx_meta { + uint64_t timestamp; + uint64_t user_data; + uint8_t port; + uint8_t dst_queue; + uint16_t pkt_len; +}; + +/* + * UDM hardware structures + * These are overlay structures to a memory mapped FPGA device. These + * structs will never be instantiated in ram memory + */ + +#define ARK_RX_WRITE_TIME_NS 2500 +#define ARK_UDM_SETUP 0 +#define ARK_UDM_CONST 0xbACECACE +struct ark_udm_setup_t { + uint32_t r0; + uint32_t r4; + volatile uint32_t cycle_count; + uint32_t const0; +}; + +#define ARK_UDM_CFG 0x010 +struct ark_udm_cfg_t { + volatile uint32_t stop_flushed; /* RO */ + volatile uint32_t command; + uint32_t dataroom; + uint32_t headroom; +}; + +typedef enum { + ARK_UDM_START = 0x1, + ARK_UDM_STOP = 0x2, + ARK_UDM_RESET = 0x3 +} ark_udm_commands; + +#define ARK_UDM_STATS 0x020 +struct ark_udm_stats_t { + volatile uint64_t rx_byte_count; + volatile uint64_t rx_packet_count; + volatile uint64_t rx_mbuf_count; + volatile uint64_t rx_sent_packets; +}; + +#define ARK_UDM_PQ 0x040 +struct ark_udm_queue_stats_t { + volatile uint64_t q_byte_count; + volatile uint64_t q_packet_count; /* includes drops */ + volatile uint64_t q_mbuf_count; + volatile uint64_t q_ff_packet_count; + volatile uint64_t q_pkt_drop; + uint32_t q_enable; +}; + +#define ARK_UDM_TLP 0x0070 +struct ark_udm_tlp_t { + volatile uint64_t pkt_drop; /* global */ + volatile uint32_t tlp_q1; + volatile uint32_t tlp_q2; + volatile uint32_t tlp_q3; + volatile uint32_t tlp_q4; + volatile uint32_t tlp_full; +}; + +#define ARK_UDM_PCIBP 0x00a0 +struct ark_udm_pcibp_t { + volatile uint32_t pci_clear; + volatile uint32_t pci_empty; + volatile uint32_t pci_q1; + volatile uint32_t pci_q2; + volatile uint32_t pci_q3; + volatile uint32_t pci_q4; + volatile uint32_t pci_full; +}; + +#define ARK_UDM_TLP_PS 0x00bc +struct ark_udm_tlp_ps_t { + volatile uint32_t tlp_clear; + volatile uint32_t tlp_ps_min; + volatile uint32_t tlp_ps_max; + volatile uint32_t tlp_full_ps_min; + volatile uint32_t tlp_full_ps_max; + volatile uint32_t tlp_dw_ps_min; + volatile uint32_t tlp_dw_ps_max; + volatile uint32_t tlp_pldw_ps_min; + volatile uint32_t tlp_pldw_ps_max; +}; + +#define ARK_UDM_RT_CFG 0x00e0 +struct ark_udm_rt_cfg_t { + rte_iova_t hw_prod_addr; + uint32_t write_interval; /* 4ns cycles */ + volatile uint32_t prod_idx; /* RO */ +}; + +/* Consolidated structure */ +#define ARK_UDM_EXPECT_SIZE (0x00fc + 4) +#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE +struct ark_udm_t { + struct ark_udm_setup_t setup; + struct ark_udm_cfg_t cfg; + struct ark_udm_stats_t stats; + struct ark_udm_queue_stats_t qstats; + uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) - + sizeof(struct ark_udm_queue_stats_t)]; + struct ark_udm_tlp_t tlp; + uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) - + sizeof(struct ark_udm_tlp_t)]; + struct ark_udm_pcibp_t pcibp; + struct ark_udm_tlp_ps_t tlp_ps; + struct ark_udm_rt_cfg_t rt_cfg; + int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) - + sizeof(struct ark_udm_rt_cfg_t)]; +}; + + +int ark_udm_verify(struct ark_udm_t *udm); +int ark_udm_stop(struct ark_udm_t *udm, int wait); +void ark_udm_start(struct ark_udm_t *udm); +int ark_udm_reset(struct ark_udm_t *udm); +void ark_udm_configure(struct ark_udm_t *udm, + uint32_t headroom, + uint32_t dataroom, + uint32_t write_interval_ns); +void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr); +void ark_udm_stats_reset(struct ark_udm_t *udm); +void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg); +void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, + uint16_t qid); +void ark_udm_dump(struct ark_udm_t *udm, const char *msg); +void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg); +void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id); +int ark_udm_is_flushed(struct ark_udm_t *udm); + +/* Per queue data */ +uint64_t ark_udm_dropped(struct ark_udm_t *udm); +uint64_t ark_udm_bytes(struct ark_udm_t *udm); +uint64_t ark_udm_packets(struct ark_udm_t *udm); + +void ark_udm_queue_stats_reset(struct ark_udm_t *udm); +void ark_udm_queue_enable(struct ark_udm_t *udm, int enable); + +#endif diff --git a/src/spdk/dpdk/drivers/net/ark/meson.build b/src/spdk/dpdk/drivers/net/ark/meson.build new file mode 100644 index 000000000..99151bba1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('ark_ddm.c', + 'ark_ethdev.c', + 'ark_ethdev_rx.c', + 'ark_ethdev_tx.c', + 'ark_mpu.c', + 'ark_pktchkr.c', + 'ark_pktdir.c', + 'ark_pktgen.c', + 'ark_rqp.c', + 'ark_udm.c') diff --git a/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ark/rte_pmd_ark_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/atlantic/Makefile b/src/spdk/dpdk/drivers/net/atlantic/Makefile new file mode 100644 index 000000000..0d0d0a502 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Aquantia Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_atlantic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_atlantic_version.map + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net +LDLIBS += -lrte_bus_pci + +VPATH += $(SRCDIR)/hw_atl + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_hw_regs.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_llh.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils_fw2x.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_b0.c +SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += rte_pmd_atlantic.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_common.h b/src/spdk/dpdk/drivers/net/atlantic/atl_common.h new file mode 100644 index 000000000..54b3a3934 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_common.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +#ifndef AQ_COMMON_H +#define AQ_COMMON_H + +#define ATL_PMD_DRIVER_VERSION "0.6.7" + +#define PCI_VENDOR_ID_AQUANTIA 0x1D6A + +#define AQ_DEVICE_ID_0001 0x0001 +#define AQ_DEVICE_ID_D100 0xD100 +#define AQ_DEVICE_ID_D107 0xD107 +#define AQ_DEVICE_ID_D108 0xD108 +#define AQ_DEVICE_ID_D109 0xD109 + +#define AQ_DEVICE_ID_AQC100 0x00B1 +#define AQ_DEVICE_ID_AQC107 0x07B1 +#define AQ_DEVICE_ID_AQC108 0x08B1 +#define AQ_DEVICE_ID_AQC109 0x09B1 +#define AQ_DEVICE_ID_AQC111 0x11B1 +#define AQ_DEVICE_ID_AQC112 0x12B1 + +#define AQ_DEVICE_ID_AQC100S 0x80B1 +#define AQ_DEVICE_ID_AQC107S 0x87B1 +#define AQ_DEVICE_ID_AQC108S 0x88B1 +#define AQ_DEVICE_ID_AQC109S 0x89B1 +#define AQ_DEVICE_ID_AQC111S 0x91B1 +#define AQ_DEVICE_ID_AQC112S 0x92B1 + +#define AQ_DEVICE_ID_AQC111E 0x51B1 +#define AQ_DEVICE_ID_AQC112E 0x52B1 + +#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter" + +#define AQ_HWREV_ANY 0 +#define AQ_HWREV_1 1 +#define AQ_HWREV_2 2 + +#define AQ_NIC_RATE_10G BIT(0) +#define AQ_NIC_RATE_5G BIT(1) +#define AQ_NIC_RATE_5G5R BIT(2) +#define AQ_NIC_RATE_2G5 BIT(3) +#define AQ_NIC_RATE_1G BIT(4) +#define AQ_NIC_RATE_100M BIT(5) + +#define AQ_NIC_RATE_EEE_10G BIT(6) +#define AQ_NIC_RATE_EEE_5G BIT(7) +#define AQ_NIC_RATE_EEE_2G5 BIT(8) +#define AQ_NIC_RATE_EEE_1G BIT(9) + + +#define ATL_MAX_RING_DESC (8 * 1024 - 8) +#define ATL_MIN_RING_DESC 32 +#define ATL_RXD_ALIGN 8 +#define ATL_TXD_ALIGN 8 +#define ATL_TX_MAX_SEG 16 + +#define ATL_MAX_INTR_QUEUE_NUM 15 + +#define ATL_MISC_VEC_ID 10 +#define ATL_RX_VEC_START 0 + +#define AQ_NIC_WOL_ENABLED BIT(0) + + +#define AQ_NIC_FC_OFF 0U +#define AQ_NIC_FC_TX 1U +#define AQ_NIC_FC_RX 2U +#define AQ_NIC_FC_FULL 3U +#define AQ_NIC_FC_AUTO 4U + + +#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (2U * 1024U) + +#define AQ_HW_MULTICAST_ADDRESS_MAX 32 +#define AQ_HW_MAX_SEGS_SIZE 40 + +#define AQ_HW_MAX_RX_QUEUES 8 +#define AQ_HW_MAX_TX_QUEUES 8 +#define AQ_HW_MIN_RX_RING_SIZE 512 +#define AQ_HW_MAX_RX_RING_SIZE 8192 +#define AQ_HW_MIN_TX_RING_SIZE 512 +#define AQ_HW_MAX_TX_RING_SIZE 8192 + +#define ATL_DEFAULT_RX_FREE_THRESH 64 +#define ATL_DEFAULT_TX_FREE_THRESH 64 + +#define ATL_IRQ_CAUSE_LINK 0x8 + +#define AQ_HW_LED_BLINK 0x2U +#define AQ_HW_LED_DEFAULT 0x0U + +#endif /* AQ_COMMON_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.c b/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.c new file mode 100644 index 000000000..b2b3bd36c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.c @@ -0,0 +1,1941 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +#include +#include +#include + +#include "atl_ethdev.h" +#include "atl_common.h" +#include "atl_hw_regs.h" +#include "atl_logs.h" +#include "hw_atl/hw_atl_llh.h" +#include "hw_atl/hw_atl_b0.h" +#include "hw_atl/hw_atl_b0_internal.h" + +static int eth_atl_dev_init(struct rte_eth_dev *eth_dev); +static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev); + +static int atl_dev_configure(struct rte_eth_dev *dev); +static int atl_dev_start(struct rte_eth_dev *dev); +static void atl_dev_stop(struct rte_eth_dev *dev); +static int atl_dev_set_link_up(struct rte_eth_dev *dev); +static int atl_dev_set_link_down(struct rte_eth_dev *dev); +static void atl_dev_close(struct rte_eth_dev *dev); +static int atl_dev_reset(struct rte_eth_dev *dev); +static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int atl_dev_link_update(struct rte_eth_dev *dev, int wait); + +static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); + +static int atl_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); + +static int atl_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n); + +static int atl_dev_stats_reset(struct rte_eth_dev *dev); + +static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); + +static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev); + +static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +/* VLAN stuff */ +static int atl_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); + +static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask); + +static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue_id, int on); + +static int atl_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, uint16_t tpid); + +/* EEPROM */ +static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev); +static int atl_dev_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int atl_dev_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +/* Regs */ +static int atl_dev_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +/* Flow control */ +static int atl_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int atl_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); + +static void atl_dev_link_status_print(struct rte_eth_dev *dev); + +/* Interrupts */ +static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); +static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int atl_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); +static void atl_dev_interrupt_handler(void *param); + + +static int atl_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int atl_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); + +static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +/* RSS */ +static int atl_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int atl_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int atl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int atl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + + +static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev); +static int eth_atl_pci_remove(struct rte_pci_device *pci_dev); + +static int atl_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); + +int atl_logtype_init; +int atl_logtype_driver; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_atl_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) }, + + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) }, + + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) }, + + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct rte_pci_driver rte_atl_pmd = { + .id_table = pci_id_atl_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_atl_pci_probe, + .remove = eth_atl_pci_remove, +}; + +#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \ + | DEV_RX_OFFLOAD_IPV4_CKSUM \ + | DEV_RX_OFFLOAD_UDP_CKSUM \ + | DEV_RX_OFFLOAD_TCP_CKSUM \ + | DEV_RX_OFFLOAD_JUMBO_FRAME \ + | DEV_RX_OFFLOAD_MACSEC_STRIP \ + | DEV_RX_OFFLOAD_VLAN_FILTER) + +#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \ + | DEV_TX_OFFLOAD_IPV4_CKSUM \ + | DEV_TX_OFFLOAD_UDP_CKSUM \ + | DEV_TX_OFFLOAD_TCP_CKSUM \ + | DEV_TX_OFFLOAD_TCP_TSO \ + | DEV_TX_OFFLOAD_MACSEC_INSERT \ + | DEV_TX_OFFLOAD_MULTI_SEGS) + +#define SFP_EEPROM_SIZE 0x100 + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = ATL_MAX_RING_DESC, + .nb_min = ATL_MIN_RING_DESC, + .nb_align = ATL_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = ATL_MAX_RING_DESC, + .nb_min = ATL_MIN_RING_DESC, + .nb_align = ATL_TXD_ALIGN, + .nb_seg_max = ATL_TX_MAX_SEG, + .nb_mtu_seg_max = ATL_TX_MAX_SEG, +}; + +enum atl_xstats_type { + XSTATS_TYPE_MSM = 0, + XSTATS_TYPE_MACSEC, +}; + +#define ATL_XSTATS_FIELD(name) { \ + #name, \ + offsetof(struct aq_stats_s, name), \ + XSTATS_TYPE_MSM \ +} + +#define ATL_MACSEC_XSTATS_FIELD(name) { \ + #name, \ + offsetof(struct macsec_stats, name), \ + XSTATS_TYPE_MACSEC \ +} + +struct atl_xstats_tbl_s { + const char *name; + unsigned int offset; + enum atl_xstats_type type; +}; + +static struct atl_xstats_tbl_s atl_xstats_tbl[] = { + ATL_XSTATS_FIELD(uprc), + ATL_XSTATS_FIELD(mprc), + ATL_XSTATS_FIELD(bprc), + ATL_XSTATS_FIELD(erpt), + ATL_XSTATS_FIELD(uptc), + ATL_XSTATS_FIELD(mptc), + ATL_XSTATS_FIELD(bptc), + ATL_XSTATS_FIELD(erpr), + ATL_XSTATS_FIELD(ubrc), + ATL_XSTATS_FIELD(ubtc), + ATL_XSTATS_FIELD(mbrc), + ATL_XSTATS_FIELD(mbtc), + ATL_XSTATS_FIELD(bbrc), + ATL_XSTATS_FIELD(bbtc), + /* Ingress Common Counters */ + ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts), + ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts), + ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts), + ATL_MACSEC_XSTATS_FIELD(in_notag_pkts), + ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts), + ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts), + ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts), + ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts), + /* Ingress SA Counters */ + ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts), + ATL_MACSEC_XSTATS_FIELD(in_not_using_sa), + ATL_MACSEC_XSTATS_FIELD(in_unused_sa), + ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts), + ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts), + ATL_MACSEC_XSTATS_FIELD(in_ok_pkts), + ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts), + ATL_MACSEC_XSTATS_FIELD(in_validated_octets), + ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets), + /* Egress Common Counters */ + ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts), + ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts), + ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts), + ATL_MACSEC_XSTATS_FIELD(out_too_long), + /* Egress SC Counters */ + ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts), + /* Egress SA Counters */ + ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect), + ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts), + ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts), +}; + +static const struct eth_dev_ops atl_eth_dev_ops = { + .dev_configure = atl_dev_configure, + .dev_start = atl_dev_start, + .dev_stop = atl_dev_stop, + .dev_set_link_up = atl_dev_set_link_up, + .dev_set_link_down = atl_dev_set_link_down, + .dev_close = atl_dev_close, + .dev_reset = atl_dev_reset, + + /* PROMISC */ + .promiscuous_enable = atl_dev_promiscuous_enable, + .promiscuous_disable = atl_dev_promiscuous_disable, + .allmulticast_enable = atl_dev_allmulticast_enable, + .allmulticast_disable = atl_dev_allmulticast_disable, + + /* Link */ + .link_update = atl_dev_link_update, + + .get_reg = atl_dev_get_regs, + + /* Stats */ + .stats_get = atl_dev_stats_get, + .xstats_get = atl_dev_xstats_get, + .xstats_get_names = atl_dev_xstats_get_names, + .stats_reset = atl_dev_stats_reset, + .xstats_reset = atl_dev_stats_reset, + + .fw_version_get = atl_fw_version_get, + .dev_infos_get = atl_dev_info_get, + .dev_supported_ptypes_get = atl_dev_supported_ptypes_get, + + .mtu_set = atl_dev_mtu_set, + + /* VLAN */ + .vlan_filter_set = atl_vlan_filter_set, + .vlan_offload_set = atl_vlan_offload_set, + .vlan_tpid_set = atl_vlan_tpid_set, + .vlan_strip_queue_set = atl_vlan_strip_queue_set, + + /* Queue Control */ + .rx_queue_start = atl_rx_queue_start, + .rx_queue_stop = atl_rx_queue_stop, + .rx_queue_setup = atl_rx_queue_setup, + .rx_queue_release = atl_rx_queue_release, + + .tx_queue_start = atl_tx_queue_start, + .tx_queue_stop = atl_tx_queue_stop, + .tx_queue_setup = atl_tx_queue_setup, + .tx_queue_release = atl_tx_queue_release, + + .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable, + + .rx_queue_count = atl_rx_queue_count, + .rx_descriptor_status = atl_dev_rx_descriptor_status, + .tx_descriptor_status = atl_dev_tx_descriptor_status, + + /* EEPROM */ + .get_eeprom_length = atl_dev_get_eeprom_length, + .get_eeprom = atl_dev_get_eeprom, + .set_eeprom = atl_dev_set_eeprom, + + /* Flow Control */ + .flow_ctrl_get = atl_flow_ctrl_get, + .flow_ctrl_set = atl_flow_ctrl_set, + + /* MAC */ + .mac_addr_add = atl_add_mac_addr, + .mac_addr_remove = atl_remove_mac_addr, + .mac_addr_set = atl_set_default_mac_addr, + .set_mc_addr_list = atl_dev_set_mc_addr_list, + .rxq_info_get = atl_rxq_info_get, + .txq_info_get = atl_txq_info_get, + + .reta_update = atl_reta_update, + .reta_query = atl_reta_query, + .rss_hash_update = atl_rss_hash_update, + .rss_hash_conf_get = atl_rss_hash_conf_get, +}; + +static inline int32_t +atl_reset_hw(struct aq_hw_s *hw) +{ + return hw_atl_b0_hw_reset(hw); +} + +static inline void +atl_enable_intr(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff); +} + +static void +atl_disable_intr(struct aq_hw_s *hw) +{ + PMD_INIT_FUNC_TRACE(); + hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff); +} + +static int +eth_atl_dev_init(struct rte_eth_dev *eth_dev) +{ + struct atl_adapter *adapter = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &atl_eth_dev_ops; + eth_dev->rx_pkt_burst = &atl_recv_pkts; + eth_dev->tx_pkt_burst = &atl_xmit_pkts; + eth_dev->tx_pkt_prepare = &atl_prep_pkts; + + /* For secondary processes, the primary process has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->mmio = (void *)pci_dev->mem_resource[0].addr; + + /* Hardware configuration - hardcode */ + adapter->hw_cfg.is_lro = false; + adapter->hw_cfg.wol = false; + adapter->hw_cfg.is_rss = false; + adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX; + + adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M; + + adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX); + adapter->hw_cfg.aq_rss.indirection_table_size = + HW_ATL_B0_RSS_REDIRECTION_MAX; + + hw->aq_nic_cfg = &adapter->hw_cfg; + + pthread_mutex_init(&hw->mbox_mutex, NULL); + + /* disable interrupt */ + atl_disable_intr(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("atlantic", + RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "MAC Malloc failed"); + return -ENOMEM; + } + + err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops); + if (err) + return err; + + /* Copy the permanent MAC address */ + if (hw->aq_fw_ops->get_mac_permanent(hw, + eth_dev->data->mac_addrs->addr_bytes) != 0) + return -EINVAL; + + /* Reset the hw statistics */ + atl_dev_stats_reset(eth_dev); + + rte_intr_callback_register(intr_handle, + atl_dev_interrupt_handler, eth_dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* enable support intr */ + atl_enable_intr(eth_dev); + + return err; +} + +static int +eth_atl_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct aq_hw_s *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (hw->adapter_stopped == 0) + atl_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + atl_dev_interrupt_handler, eth_dev); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + pthread_mutex_destroy(&hw->mbox_mutex); + + return 0; +} + +static int +eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct atl_adapter), eth_atl_dev_init); +} + +static int +eth_atl_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit); +} + +static int +atl_dev_configure(struct rte_eth_dev *dev) +{ + struct atl_interrupt *intr = + ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* set flag to update link status after init */ + intr->flags |= ATL_FLAG_NEED_LINK_UPDATE; + + return 0; +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +atl_dev_start(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + int status; + int err; + + PMD_INIT_FUNC_TRACE(); + + /* set adapter started */ + hw->adapter_stopped = 0; + + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, fix speed not supported", + dev->data->port_id); + return -EINVAL; + } + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* reinitialize adapter + * this calls reset and start + */ + status = atl_reset_hw(hw); + if (status != 0) + return -EIO; + + err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes); + + hw_atl_b0_hw_start(hw); + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + ATL_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) { + PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed"); + return -1; + } + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* initialize transmission unit */ + atl_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = atl_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + goto error; + } + + PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u", + hw->fw_ver_actual >> 24, + (hw->fw_ver_actual >> 16) & 0xFF, + hw->fw_ver_actual & 0xFFFF); + PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION); + + err = atl_start_queues(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); + goto error; + } + + err = atl_dev_set_link_up(dev); + + err = hw->aq_fw_ops->update_link_status(hw); + + if (err) + goto error; + + dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0; + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + atl_dev_lsc_interrupt_setup(dev, true); + else + atl_dev_lsc_interrupt_setup(dev, false); + } else { + rte_intr_callback_unregister(intr_handle, + atl_dev_interrupt_handler, dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex"); + } + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + atl_dev_rxq_interrupt_setup(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + atl_enable_intr(dev); + + return 0; + +error: + atl_stop_queues(dev); + return -EIO; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +atl_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct aq_hw_s *hw = + ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + /* disable interrupts */ + atl_disable_intr(hw); + + /* reset the NIC */ + atl_reset_hw(hw); + hw->adapter_stopped = 1; + + atl_stop_queues(dev); + + /* Clear stored conf */ + dev->data->scattered_rx = 0; + dev->data->lro = 0; + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + atl_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +/* + * Set device link up: enable tx. + */ +static int +atl_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t link_speeds = dev->data->dev_conf.link_speeds; + uint32_t speed_mask = 0; + + if (link_speeds == ETH_LINK_SPEED_AUTONEG) { + speed_mask = hw->aq_nic_cfg->link_speed_msk; + } else { + if (link_speeds & ETH_LINK_SPEED_10G) + speed_mask |= AQ_NIC_RATE_10G; + if (link_speeds & ETH_LINK_SPEED_5G) + speed_mask |= AQ_NIC_RATE_5G; + if (link_speeds & ETH_LINK_SPEED_1G) + speed_mask |= AQ_NIC_RATE_1G; + if (link_speeds & ETH_LINK_SPEED_2_5G) + speed_mask |= AQ_NIC_RATE_2G5; + if (link_speeds & ETH_LINK_SPEED_100M) + speed_mask |= AQ_NIC_RATE_100M; + } + + return hw->aq_fw_ops->set_link_speed(hw, speed_mask); +} + +/* + * Set device link down: disable tx. + */ +static int +atl_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + return hw->aq_fw_ops->set_link_speed(hw, 0); +} + +/* + * Reset and stop device. + */ +static void +atl_dev_close(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + atl_dev_stop(dev); + + atl_free_queues(dev); +} + +static int +atl_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_atl_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_atl_dev_init(dev); + + return ret; +} + +static int +atl_dev_configure_macsec(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + struct aq_macsec_config *aqcfg = &cf->aq_macsec; + struct macsec_msg_fw_request msg_macsec; + struct macsec_msg_fw_response response; + + if (!aqcfg->common.macsec_enabled || + hw->aq_fw_ops->send_macsec_req == NULL) + return 0; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Creating set of sc/sa structures from parameters provided by DPDK */ + + /* Configure macsec */ + msg_macsec.msg_type = macsec_cfg_msg; + msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled; + msg_macsec.cfg.interrupts_enabled = 1; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure TX SC */ + + msg_macsec.msg_type = macsec_add_tx_sc_msg; + msg_macsec.txsc.index = 0; /* TXSC always one (??) */ + msg_macsec.txsc.protect = aqcfg->common.encryption_enabled; + + /* MAC addr for TX */ + msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]); + msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]); + msg_macsec.txsc.sa_mask = 0x3f; + + msg_macsec.txsc.da_mask = 0; + msg_macsec.txsc.tci = 0x0B; + msg_macsec.txsc.curr_an = 0; /* SA index which currently used */ + + /* + * Creating SCI (Secure Channel Identifier). + * SCI constructed from Source MAC and Port identifier + */ + uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) | + (msg_macsec.txsc.mac_sa[0] >> 16); + uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16); + + uint32_t port_identifier = 1; + + msg_macsec.txsc.sci[1] = sci_hi_part; + msg_macsec.txsc.sci[0] = sci_low_part | port_identifier; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SC */ + + msg_macsec.msg_type = macsec_add_rx_sc_msg; + msg_macsec.rxsc.index = aqcfg->rxsc.pi; + msg_macsec.rxsc.replay_protect = + aqcfg->common.replay_protection_enabled; + msg_macsec.rxsc.anti_replay_window = 0; + + /* MAC addr for RX */ + msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]); + msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]); + msg_macsec.rxsc.da_mask = 0;//0x3f; + + msg_macsec.rxsc.sa_mask = 0; + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SC */ + + msg_macsec.msg_type = macsec_add_tx_sa_msg; + msg_macsec.txsa.index = aqcfg->txsa.idx; + msg_macsec.txsa.next_pn = aqcfg->txsa.pn; + + msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]); + msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]); + msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]); + msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]); + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + memset(&msg_macsec, 0, sizeof(msg_macsec)); + + /* Configure RX SA */ + + msg_macsec.msg_type = macsec_add_rx_sa_msg; + msg_macsec.rxsa.index = aqcfg->rxsa.idx; + msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn; + + msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]); + msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]); + msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]); + msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]); + + hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response); + + if (response.result) + return -1; + + return 0; +} + +int atl_macsec_enable(struct rte_eth_dev *dev, + uint8_t encr, uint8_t repl_prot) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.common.macsec_enabled = 1; + cfg->aq_macsec.common.encryption_enabled = encr; + cfg->aq_macsec.common.replay_protection_enabled = repl_prot; + + return 0; +} + +int atl_macsec_disable(struct rte_eth_dev *dev) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.common.macsec_enabled = 0; + + return 0; +} + +int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac)); + memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, + RTE_ETHER_ADDR_LEN); + + return 0; +} + +int atl_macsec_config_rxsc(struct rte_eth_dev *dev, + uint8_t *mac, uint16_t pi) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac)); + memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, + RTE_ETHER_ADDR_LEN); + cfg->aq_macsec.rxsc.pi = pi; + + return 0; +} + +int atl_macsec_select_txsa(struct rte_eth_dev *dev, + uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.txsa.idx = idx; + cfg->aq_macsec.txsa.pn = pn; + cfg->aq_macsec.txsa.an = an; + + memcpy(&cfg->aq_macsec.txsa.key, key, 16); + return 0; +} + +int atl_macsec_select_rxsa(struct rte_eth_dev *dev, + uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + cfg->aq_macsec.rxsa.idx = idx; + cfg->aq_macsec.rxsa.pn = pn; + cfg->aq_macsec.rxsa.an = an; + + memcpy(&cfg->aq_macsec.rxsa.key, key, 16); + return 0; +} + +static int +atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); + struct aq_hw_s *hw = &adapter->hw; + struct atl_sw_stats *swstats = &adapter->sw_stats; + unsigned int i; + + hw->aq_fw_ops->update_stats(hw); + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = hw->curr_stats.dma_pkt_rc; + stats->ibytes = hw->curr_stats.dma_oct_rc; + stats->imissed = hw->curr_stats.dpc; + stats->ierrors = hw->curr_stats.erpt; + + stats->opackets = hw->curr_stats.dma_pkt_tc; + stats->obytes = hw->curr_stats.dma_oct_tc; + stats->oerrors = 0; + + stats->rx_nombuf = swstats->rx_nombuf; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + stats->q_ipackets[i] = swstats->q_ipackets[i]; + stats->q_opackets[i] = swstats->q_opackets[i]; + stats->q_ibytes[i] = swstats->q_ibytes[i]; + stats->q_obytes[i] = swstats->q_obytes[i]; + stats->q_errors[i] = swstats->q_errors[i]; + } + return 0; +} + +static int +atl_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev); + struct aq_hw_s *hw = &adapter->hw; + + hw->aq_fw_ops->update_stats(hw); + + /* Reset software totals */ + memset(&hw->curr_stats, 0, sizeof(hw->curr_stats)); + + memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats)); + + return 0; +} + +static int +atl_dev_xstats_get_count(struct rte_eth_dev *dev) +{ + struct atl_adapter *adapter = + (struct atl_adapter *)dev->data->dev_private; + + struct aq_hw_s *hw = &adapter->hw; + unsigned int i, count = 0; + + for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) { + if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC && + ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0)) + continue; + + count++; + } + + return count; +} + +static int +atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + unsigned int i; + unsigned int count = atl_dev_xstats_get_count(dev); + + if (xstats_names) { + for (i = 0; i < size && i < count; i++) { + snprintf(xstats_names[i].name, + RTE_ETH_XSTATS_NAME_SIZE, "%s", + atl_xstats_tbl[i].name); + } + } + + return count; +} + +static int +atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) +{ + struct atl_adapter *adapter = dev->data->dev_private; + struct aq_hw_s *hw = &adapter->hw; + struct get_stats req = { 0 }; + struct macsec_msg_fw_request msg = { 0 }; + struct macsec_msg_fw_response resp = { 0 }; + int err = -1; + unsigned int i; + unsigned int count = atl_dev_xstats_get_count(dev); + + if (!stats) + return count; + + if (hw->aq_fw_ops->send_macsec_req != NULL) { + req.ingress_sa_index = 0xff; + req.egress_sc_index = 0xff; + req.egress_sa_index = 0xff; + + msg.msg_type = macsec_get_stats_msg; + msg.stats = req; + + err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); + } + + for (i = 0; i < n && i < count; i++) { + stats[i].id = i; + + switch (atl_xstats_tbl[i].type) { + case XSTATS_TYPE_MSM: + stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats + + atl_xstats_tbl[i].offset); + break; + case XSTATS_TYPE_MACSEC: + if (!err) { + stats[i].value = + *(u64 *)((uint8_t *)&resp.stats + + atl_xstats_tbl[i].offset); + } + break; + } + } + + return i; +} + +static int +atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fw_ver = 0; + unsigned int ret = 0; + + ret = hw_atl_utils_get_fw_version(hw, &fw_ver); + if (ret) + return -EIO; + + ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24, + (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU); + + ret += 1; /* add string null-terminator */ + + if (fw_size < ret) + return ret; + + return 0; +} + +static int +atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES; + dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES; + + dev_info->min_rx_bufsize = 1024; + dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO; + dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX; + dev_info->max_vfs = pci_dev->max_vfs; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vmdq_pools = 0; + dev_info->vmdq_queue_num = 0; + + dev_info->rx_offload_capa = ATL_RX_OFFLOADS; + + dev_info->tx_offload_capa = ATL_TX_OFFLOADS; + + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8; + dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX; + dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= ETH_LINK_SPEED_5G; + + return 0; +} + +static const uint32_t * +atl_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == atl_recv_pkts) + return ptypes; + + return NULL; +} + +static void +atl_dev_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + atl_dev_configure_macsec(dev); +} + + +/* return 0 means link status changed, -1 means not changed */ +static int +atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + u32 fc = AQ_NIC_FC_OFF; + int err = 0; + + link.link_status = ETH_LINK_DOWN; + link.link_speed = 0; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; + memset(&old, 0, sizeof(old)); + + /* load old link status */ + rte_eth_linkstatus_get(dev, &old); + + /* read current link status */ + err = hw->aq_fw_ops->update_link_status(hw); + + if (err) + return 0; + + if (hw->aq_link_status.mbps == 0) { + /* write default (down) link status */ + rte_eth_linkstatus_set(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; + } + + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = hw->aq_link_status.mbps; + + rte_eth_linkstatus_set(dev, &link); + + if (link.link_status == old.link_status) + return -1; + + /* Driver has to update flow control settings on RX block + * on any link event. + * We should query FW whether it negotiated FC. + */ + if (hw->aq_fw_ops->get_flow_control) { + hw->aq_fw_ops->get_flow_control(hw, &fc); + hw_atl_b0_set_fc(hw, fc, 0U); + } + + if (rte_eal_alarm_set(1000 * 1000, + atl_dev_delayed_handler, (void *)dev) < 0) + PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail"); + + return 0; +} + +static int +atl_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw_atl_rpfl2promiscuous_mode_en_set(hw, true); + + return 0; +} + +static int +atl_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw_atl_rpfl2promiscuous_mode_en_set(hw, false); + + return 0; +} + +static int +atl_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw_atl_rpfl2_accept_all_mc_packets_set(hw, true); + + return 0; +} + +static int +atl_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + hw_atl_rpfl2_accept_all_mc_packets_set(hw, false); + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ + +static int +atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused) +{ + atl_dev_link_status_print(dev); + return 0; +} + +static int +atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + + +static int +atl_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + struct atl_interrupt *intr = + ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u64 cause = 0; + + hw_atl_b0_hw_irq_read(hw, &cause); + + atl_disable_intr(hw); + + if (cause & BIT(ATL_IRQ_CAUSE_LINK)) + intr->flags |= ATL_FLAG_NEED_LINK_UPDATE; + + return 0; +} + +/** + * It gets and then prints the link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static void +atl_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + (int)(dev->data->port_id), + (unsigned int)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_DRV_LOG(INFO, " Port %d: Link Down", + (int)(dev->data->port_id)); + } + + +#ifdef DEBUG +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); +} +#endif + + PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed); +} + +/* + * It executes link_update after knowing an interrupt occurred. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +atl_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct atl_interrupt *intr = + ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct atl_adapter *adapter = dev->data->dev_private; + struct aq_hw_s *hw = &adapter->hw; + + if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE)) + goto done; + + intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE; + + /* Notify userapp if link status changed */ + if (!atl_dev_link_update(dev, 0)) { + atl_dev_link_status_print(dev); + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); + } else { + if (hw->aq_fw_ops->send_macsec_req == NULL) + goto done; + + /* Check macsec Keys expired */ + struct get_stats req = { 0 }; + struct macsec_msg_fw_request msg = { 0 }; + struct macsec_msg_fw_response resp = { 0 }; + + req.ingress_sa_index = 0x0; + req.egress_sc_index = 0x0; + req.egress_sa_index = 0x0; + msg.msg_type = macsec_get_stats_msg; + msg.stats = req; + + int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp); + if (err) { + PMD_DRV_LOG(ERR, "send_macsec_req fail"); + goto done; + } + if (resp.stats.egress_threshold_expired || + resp.stats.ingress_threshold_expired || + resp.stats.egress_expired || + resp.stats.ingress_expired) { + PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC"); + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_MACSEC, NULL); + } + } +done: + atl_enable_intr(dev); + rte_intr_ack(intr_handle); + + return 0; +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +atl_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + atl_dev_interrupt_get_status(dev); + atl_dev_interrupt_action(dev, dev->intr_handle); +} + + +static int +atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused) +{ + return SFP_EEPROM_SIZE; +} + +int atl_dev_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t dev_addr = SMBUS_DEVICE_ID; + + if (hw->aq_fw_ops->get_eeprom == NULL) + return -ENOTSUP; + + if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE || + eeprom->data == NULL) + return -EINVAL; + + if (eeprom->magic > 0x7F) + return -EINVAL; + + if (eeprom->magic) + dev_addr = eeprom->magic; + + return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data, + eeprom->length, eeprom->offset); +} + +int atl_dev_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t dev_addr = SMBUS_DEVICE_ID; + + if (hw->aq_fw_ops->set_eeprom == NULL) + return -ENOTSUP; + + if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE || + eeprom->data == NULL) + return -EINVAL; + + if (eeprom->magic > 0x7F) + return -EINVAL; + + if (eeprom->magic) + dev_addr = eeprom->magic; + + return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data, + eeprom->length, eeprom->offset); +} + +static int +atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 mif_id; + int err; + + if (regs->data == NULL) { + regs->length = hw_atl_utils_hw_get_reg_length(); + regs->width = sizeof(u32); + return 0; + } + + /* Only full register dump is supported */ + if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length()) + return -ENOTSUP; + + err = hw_atl_utils_hw_get_regs(hw, regs->data); + + /* Device version */ + mif_id = hw_atl_reg_glb_mif_id_get(hw); + regs->version = mif_id & 0xFFU; + + return err; +} + +static int +atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 fc = AQ_NIC_FC_OFF; + + if (hw->aq_fw_ops->get_flow_control == NULL) + return -ENOTSUP; + + hw->aq_fw_ops->get_flow_control(hw, &fc); + + if (fc == AQ_NIC_FC_OFF) + fc_conf->mode = RTE_FC_NONE; + else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX)) + fc_conf->mode = RTE_FC_FULL; + else if (fc & AQ_NIC_FC_RX) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (fc & AQ_NIC_FC_TX) + fc_conf->mode = RTE_FC_TX_PAUSE; + + return 0; +} + +static int +atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t old_flow_control = hw->aq_nic_cfg->flow_control; + + + if (hw->aq_fw_ops->set_flow_control == NULL) + return -ENOTSUP; + + if (fc_conf->mode == RTE_FC_NONE) + hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF; + else if (fc_conf->mode == RTE_FC_RX_PAUSE) + hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX; + else if (fc_conf->mode == RTE_FC_TX_PAUSE) + hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX; + else if (fc_conf->mode == RTE_FC_FULL) + hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX); + + if (old_flow_control != hw->aq_nic_cfg->flow_control) + return hw->aq_fw_ops->set_flow_control(hw); + + return 0; +} + +static int +atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index, + u8 *mac_addr, bool enable) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + unsigned int h = 0U; + unsigned int l = 0U; + int err; + + if (mac_addr) { + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + } + + hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index); + hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index); + hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index); + + if (enable) + hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index); + + err = aq_hw_err_from_flags(hw); + + return err; +} + +static int +atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused) +{ + if (rte_is_zero_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); + return -EINVAL; + } + + return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true); +} + +static void +atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + atl_update_mac_addr(dev, index, NULL, false); +} + +static int +atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + atl_remove_mac_addr(dev, 0); + atl_add_mac_addr(dev, addr, 0, 0); + return 0; +} + +static int +atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev_info dev_info; + int ret; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + ret = atl_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; + + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) + return -EINVAL; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return 0; +} + +static int +atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err = 0; + int i = 0; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { + if (cfg->vlan_filter[i] == vlan_id) { + if (!on) { + /* Disable VLAN filter. */ + hw_atl_rpf_vlan_flr_en_set(hw, 0U, i); + + /* Clear VLAN filter entry */ + cfg->vlan_filter[i] = 0; + } + break; + } + } + + /* VLAN_ID was not found. So, nothing to delete. */ + if (i == HW_ATL_B0_MAX_VLAN_IDS && !on) + goto exit; + + /* VLAN_ID already exist, or already removed above. Nothing to do. */ + if (i != HW_ATL_B0_MAX_VLAN_IDS) + goto exit; + + /* Try to found free VLAN filter to add new VLAN_ID */ + for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { + if (cfg->vlan_filter[i] == 0) + break; + } + + if (i == HW_ATL_B0_MAX_VLAN_IDS) { + /* We have no free VLAN filter to add new VLAN_ID*/ + err = -ENOMEM; + goto exit; + } + + cfg->vlan_filter[i] = vlan_id; + hw_atl_rpf_vlan_flr_act_set(hw, 1U, i); + hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i); + hw_atl_rpf_vlan_flr_en_set(hw, 1U, i); + +exit: + /* Enable VLAN promisc mode if vlan_filter empty */ + for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { + if (cfg->vlan_filter[i] != 0) + break; + } + + hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS); + + return err; +} + +static int +atl_enable_vlan_filter(struct rte_eth_dev *dev, int en) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) { + if (cfg->vlan_filter[i]) + hw_atl_rpf_vlan_flr_en_set(hw, en, i); + } + return 0; +} + +static int +atl_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + int i; + + PMD_INIT_FUNC_TRACE(); + + ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK); + + cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i); + + if (mask & ETH_VLAN_EXTEND_MASK) + ret = -ENOTSUP; + + return ret; +} + +static int +atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + switch (vlan_type) { + case ETH_VLAN_TYPE_INNER: + hw_atl_rpf_vlan_inner_etht_set(hw, tpid); + break; + case ETH_VLAN_TYPE_OUTER: + hw_atl_rpf_vlan_outer_etht_set(hw, tpid); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported VLAN type"); + err = -ENOTSUP; + } + + return err; +} + +static void +atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (queue_id > dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue id"); + return; + } + + hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id); +} + +static int +atl_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 i; + + if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN) + return -EINVAL; + + /* Update whole uc filters table */ + for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) { + u8 *mac_addr = NULL; + u32 l = 0, h = 0; + + if (i < nb_mc_addr) { + mac_addr = mc_addr_set[i].addr_bytes; + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + h = (mac_addr[0] << 8) | mac_addr[1]; + } + + hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, + HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, + HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr, + HW_ATL_B0_MAC_MIN + i); + } + + return 0; +} + +static int +atl_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + int i; + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++) + cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i], + dev->data->nb_rx_queues - 1); + + hw_atl_b0_hw_rss_set(hw, &cf->aq_rss); + return 0; +} + +static int +atl_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + int i; + struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++) + reta_conf->reta[i] = cf->aq_rss.indirection_table[i]; + reta_conf->mask = ~0U; + return 0; +} + +static int +atl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + static u8 def_rss_key[40] = { + 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, + 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, + 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, + 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, + 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c + }; + + cfg->is_rss = !!rss_conf->rss_hf; + if (rss_conf->rss_key) { + memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key, + rss_conf->rss_key_len); + cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len; + } else { + memcpy(cfg->aq_rss.hash_secret_key, def_rss_key, + sizeof(def_rss_key)); + cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key); + } + + hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss); + return 0; +} + +static int +atl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + + rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0; + if (rss_conf->rss_key) { + rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size; + memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key, + rss_conf->rss_key_len); + } + + return 0; +} + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool +is_atlantic_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_atl_pmd); +} + +RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map); +RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic"); + +RTE_INIT(atl_init_log) +{ + atl_logtype_init = rte_log_register("pmd.net.atlantic.init"); + if (atl_logtype_init >= 0) + rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE); + atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver"); + if (atl_logtype_driver >= 0) + rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.h b/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.h new file mode 100644 index 000000000..f547571b5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_ethdev.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +#ifndef _ATLANTIC_ETHDEV_H_ +#define _ATLANTIC_ETHDEV_H_ +#include +#include "rte_ethdev.h" + +#include "atl_types.h" +#include "hw_atl/hw_atl_utils.h" + +#define ATL_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +#define ATL_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct atl_adapter *)adapter)->hw) + +#define ATL_DEV_TO_ADAPTER(dev) \ + ((struct atl_adapter *)(dev)->data->dev_private) + +#define ATL_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct atl_adapter *)adapter)->intr) + +#define ATL_DEV_PRIVATE_TO_CFG(adapter) \ + (&((struct atl_adapter *)adapter)->hw_cfg) + +#define ATL_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) +#define ATL_FLAG_MACSEC (uint32_t)(4 << 0) + +struct atl_interrupt { + uint32_t flags; + uint32_t mask; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct atl_adapter { + struct aq_hw_s hw; + struct aq_hw_cfg_s hw_cfg; + struct atl_sw_stats sw_stats; + struct atl_interrupt intr; +}; + +/* + * RX/TX function prototypes + */ +void atl_rx_queue_release(void *rxq); +void atl_tx_queue_release(void *txq); + +int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +uint32_t atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + +int atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t queue_id); +int atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t queue_id); + +int atl_rx_init(struct rte_eth_dev *dev); +int atl_tx_init(struct rte_eth_dev *dev); + +int atl_start_queues(struct rte_eth_dev *dev); +int atl_stop_queues(struct rte_eth_dev *dev); +void atl_free_queues(struct rte_eth_dev *dev); + +int atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +void atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +uint16_t atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t atl_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int atl_macsec_enable(struct rte_eth_dev *dev, uint8_t encr, uint8_t repl_prot); +int atl_macsec_disable(struct rte_eth_dev *dev); +int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac); +int atl_macsec_config_rxsc(struct rte_eth_dev *dev, + uint8_t *mac, uint16_t pi); +int atl_macsec_select_txsa(struct rte_eth_dev *dev, uint8_t idx, + uint8_t an, uint32_t pn, uint8_t *key); +int atl_macsec_select_rxsa(struct rte_eth_dev *dev, uint8_t idx, + uint8_t an, uint32_t pn, uint8_t *key); + +bool is_atlantic_supported(struct rte_eth_dev *dev); + +#endif /* _ATLANTIC_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.c b/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.c new file mode 100644 index 000000000..bd42c8341 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File aq_hw_utils.c: Definitions of helper functions used across + * hardware layer. + */ + +#include "atl_hw_regs.h" + +#include +#include + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val) +{ + if (msk ^ ~0) { + u32 reg_old, reg_new; + + reg_old = aq_hw_read_reg(aq_hw, addr); + reg_new = (reg_old & (~msk)) | (val << shift); + + if (reg_old != reg_new) + aq_hw_write_reg(aq_hw, addr, reg_new); + } else { + aq_hw_write_reg(aq_hw, addr, val); + } +} + +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift) +{ + return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift); +} + +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) +{ + return rte_le_to_cpu_32(rte_read32((u8 *)hw->mmio + reg)); +} + +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) +{ + rte_write32((rte_cpu_to_le_32(value)), (u8 *)hw->mmio + reg); +} + +int aq_hw_err_from_flags(struct aq_hw_s *hw) +{ + int err = 0; + + if (aq_hw_read_reg(hw, 0x10U) == ~0U) + return -ENXIO; + + return err; +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.h b/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.h new file mode 100644 index 000000000..a2d6ca804 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_hw_regs.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File aq_hw_utils.h: Declaration of helper functions used across hardware + * layer. + */ + +#ifndef AQ_HW_UTILS_H +#define AQ_HW_UTILS_H + +#include +#include +#include +#include +#include +#include "atl_common.h" +#include "atl_types.h" + + +#ifndef HIDWORD +#define LODWORD(_qw) ((u32)(_qw)) +#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff)) +#endif + +#define AQ_HW_SLEEP(_US_) rte_delay_ms(_US_) + +#define mdelay rte_delay_ms +#define udelay rte_delay_us +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#define BIT(x) (1UL << (x)) + +#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ +do { \ + unsigned int AQ_HW_WAIT_FOR_i; \ + for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ + --AQ_HW_WAIT_FOR_i) {\ + udelay(_US_); \ + } \ + if (!AQ_HW_WAIT_FOR_i) {\ + err = -ETIMEDOUT; \ + } \ +} while (0) + +#define ATL_WRITE_FLUSH(aq_hw) { (void)aq_hw_read_reg(aq_hw, 0x10); } + +void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, + u32 shift, u32 val); +u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); +u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +int aq_hw_err_from_flags(struct aq_hw_s *hw); + +#endif /* AQ_HW_UTILS_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_logs.h b/src/spdk/dpdk/drivers/net/atlantic/atl_logs.h new file mode 100644 index 000000000..e3dba334f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_logs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ +#ifndef ATL_LOGS_H +#define ATL_LOGS_H + +#include + +extern int atl_logtype_init; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, atl_logtype_init, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) + +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) + +extern int atl_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, atl_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_rxtx.c b/src/spdk/dpdk/drivers/net/atlantic/atl_rxtx.c new file mode 100644 index 000000000..449ffd454 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_rxtx.c @@ -0,0 +1,1350 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +#include +#include +#include + +#include "atl_ethdev.h" +#include "atl_hw_regs.h" + +#include "atl_logs.h" +#include "hw_atl/hw_atl_llh.h" +#include "hw_atl/hw_atl_b0.h" +#include "hw_atl/hw_atl_b0_internal.h" + +#define ATL_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define ATL_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define ATL_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct atl_rx_entry { + struct rte_mbuf *mbuf; +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct atl_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +/** + * Structure associated with each RX queue. + */ +struct atl_rx_queue { + struct rte_mempool *mb_pool; + struct hw_atl_rxd_s *hw_ring; + uint64_t hw_ring_phys_addr; + struct atl_rx_entry *sw_ring; + uint16_t nb_rx_desc; + uint16_t rx_tail; + uint16_t nb_rx_hold; + uint16_t rx_free_thresh; + uint16_t queue_id; + uint16_t port_id; + uint16_t buff_size; + bool l3_csum_enabled; + bool l4_csum_enabled; +}; + +/** + * Structure associated with each TX queue. + */ +struct atl_tx_queue { + struct hw_atl_txd_s *hw_ring; + uint64_t hw_ring_phys_addr; + struct atl_tx_entry *sw_ring; + uint16_t nb_tx_desc; + uint16_t tx_tail; + uint16_t tx_head; + uint16_t queue_id; + uint16_t port_id; + uint16_t tx_free_thresh; + uint16_t tx_free; +}; + +static inline void +atl_reset_rx_queue(struct atl_rx_queue *rxq) +{ + struct hw_atl_rxd_s *rxd = NULL; + int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < rxq->nb_rx_desc; i++) { + rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i]; + rxd->buf_addr = 0; + rxd->hdr_addr = 0; + } + + rxq->rx_tail = 0; +} + +int +atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + struct atl_rx_queue *rxq; + const struct rte_memzone *mz; + + PMD_INIT_FUNC_TRACE(); + + /* make sure a valid number of descriptors have been requested */ + if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE || + nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) { + PMD_INIT_LOG(ERR, "Number of Rx descriptors must be " + "less than or equal to %d, " + "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE, + AQ_HW_MIN_RX_RING_SIZE); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->rx_queues[rx_queue_id] != NULL) { + atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queues[rx_queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + rxq->mb_pool = mb_pool; + rxq->nb_rx_desc = nb_rx_desc; + rxq->port_id = dev->data->port_id; + rxq->queue_id = rx_queue_id; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + + rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_IPV4_CKSUM; + rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads & + (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload"); + + /* allocate memory for the software ring */ + rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring", + nb_rx_desc * sizeof(struct atl_rx_entry), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, + "Port %d: Cannot allocate software ring for queue %d", + rxq->port_id, rxq->queue_id); + rte_free(rxq); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id, + HW_ATL_B0_MAX_RXD * + sizeof(struct hw_atl_rxd_s), + 128, socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, + "Port %d: Cannot allocate hardware ring for queue %d", + rxq->port_id, rxq->queue_id); + rte_free(rxq->sw_ring); + rte_free(rxq); + return -ENOMEM; + } + rxq->hw_ring = mz->addr; + rxq->hw_ring_phys_addr = mz->iova; + + atl_reset_rx_queue(rxq); + + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; +} + +static inline void +atl_reset_tx_queue(struct atl_tx_queue *txq) +{ + struct atl_tx_entry *tx_entry; + union hw_atl_txc_s *txc; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + if (!txq) { + PMD_DRV_LOG(ERR, "Pointer to txq is NULL"); + return; + } + + tx_entry = txq->sw_ring; + + for (i = 0; i < txq->nb_tx_desc; i++) { + txc = (union hw_atl_txc_s *)&txq->hw_ring[i]; + txc->flags1 = 0; + txc->flags2 = 2; + } + + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->hw_ring[i].dd = 1; + tx_entry[i].mbuf = NULL; + } + + txq->tx_tail = 0; + txq->tx_head = 0; + txq->tx_free = txq->nb_tx_desc - 1; +} + +int +atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct atl_tx_queue *txq; + const struct rte_memzone *mz; + + PMD_INIT_FUNC_TRACE(); + + /* make sure a valid number of descriptors have been requested */ + if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE || + nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) { + PMD_INIT_LOG(ERR, "Number of Tx descriptors must be " + "less than or equal to %d, " + "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE, + AQ_HW_MIN_TX_RING_SIZE); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->tx_queues[tx_queue_id] != NULL) { + atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queues[tx_queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + txq->nb_tx_desc = nb_tx_desc; + txq->port_id = dev->data->port_id; + txq->queue_id = tx_queue_id; + txq->tx_free_thresh = tx_conf->tx_free_thresh; + + + /* allocate memory for the software ring */ + txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring", + nb_tx_desc * sizeof(struct atl_tx_entry), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, + "Port %d: Cannot allocate software ring for queue %d", + txq->port_id, txq->queue_id); + rte_free(txq); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id, + HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s), + 128, socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, + "Port %d: Cannot allocate hardware ring for queue %d", + txq->port_id, txq->queue_id); + rte_free(txq->sw_ring); + rte_free(txq); + return -ENOMEM; + } + txq->hw_ring = mz->addr; + txq->hw_ring_phys_addr = mz->iova; + + atl_reset_tx_queue(txq); + + dev->data->tx_queues[tx_queue_id] = txq; + return 0; +} + +int +atl_tx_init(struct rte_eth_dev *eth_dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct atl_tx_queue *txq; + uint64_t base_addr = 0; + int i = 0; + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + base_addr = txq->hw_ring_phys_addr; + + err = hw_atl_b0_hw_ring_tx_init(hw, base_addr, + txq->queue_id, + txq->nb_tx_desc, 0, + txq->port_id); + + if (err) { + PMD_INIT_LOG(ERR, + "Port %d: Cannot init TX queue %d", + txq->port_id, txq->queue_id); + break; + } + } + + return err; +} + +int +atl_rx_init(struct rte_eth_dev *eth_dev) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss; + struct atl_rx_queue *rxq; + uint64_t base_addr = 0; + int i = 0; + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + base_addr = rxq->hw_ring_phys_addr; + + /* Take requested pool mbuf size and adapt + * descriptor buffer to best fit + */ + int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + + buff_size = RTE_ALIGN_FLOOR(buff_size, 1024); + if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) { + PMD_INIT_LOG(WARNING, + "Port %d queue %d: mem pool buff size is too big\n", + rxq->port_id, rxq->queue_id); + buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX; + } + if (buff_size < 1024) { + PMD_INIT_LOG(ERR, + "Port %d queue %d: mem pool buff size is too small\n", + rxq->port_id, rxq->queue_id); + return -EINVAL; + } + rxq->buff_size = buff_size; + + err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id, + rxq->nb_rx_desc, buff_size, 0, + rxq->port_id); + + if (err) { + PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d", + rxq->port_id, rxq->queue_id); + break; + } + } + + for (i = rss_params->indirection_table_size; i--;) + rss_params->indirection_table[i] = i & + (eth_dev->data->nb_rx_queues - 1); + hw_atl_b0_hw_rss_set(hw, rss_params); + return err; +} + +static int +atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq) +{ + struct atl_rx_entry *rx_entry = rxq->sw_ring; + struct hw_atl_rxd_s *rxd; + uint64_t dma_addr = 0; + uint32_t i = 0; + + PMD_INIT_FUNC_TRACE(); + + /* fill Rx ring */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, + "Port %d: mbuf alloc failed for rx queue %d", + rxq->port_id, rxq->queue_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i]; + rxd->buf_addr = dma_addr; + rxd->hdr_addr = 0; + rx_entry[i].mbuf = mbuf; + } + + return 0; +} + +static void +atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +int +atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct atl_rx_queue *rxq = NULL; + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + if (atl_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, + "Port %d: Allocate mbufs for queue %d failed", + rxq->port_id, rxq->queue_id); + return -1; + } + + hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id); + + rte_wmb(); + hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1, + rx_queue_id); + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + return -1; + } + + return 0; +} + +int +atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct atl_rx_queue *rxq = NULL; + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id); + + atl_rx_queue_release_mbufs(rxq); + atl_reset_rx_queue(rxq); + + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + } else { + return -1; + } + + return 0; +} + +void +atl_rx_queue_release(void *rx_queue) +{ + PMD_INIT_FUNC_TRACE(); + + if (rx_queue != NULL) { + struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue; + + atl_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +static void +atl_tx_queue_release_mbufs(struct atl_tx_queue *txq) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +int +atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id); + + rte_wmb(); + hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id); + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + return -1; + } + + return 0; +} + +int +atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct atl_tx_queue *txq; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + + hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id); + + atl_tx_queue_release_mbufs(txq); + atl_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +atl_tx_queue_release(void *tx_queue) +{ + PMD_INIT_FUNC_TRACE(); + + if (tx_queue != NULL) { + struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue; + + atl_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +atl_free_queues(struct rte_eth_dev *dev) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + atl_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = 0; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + atl_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = 0; + } + dev->data->nb_tx_queues = 0; +} + +int +atl_start_queues(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (atl_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, + "Port %d: Start Tx queue %d failed", + dev->data->port_id, i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (atl_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, + "Port %d: Start Rx queue %d failed", + dev->data->port_id, i); + return -1; + } + } + + return 0; +} + +int +atl_stop_queues(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (atl_tx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, + "Port %d: Stop Tx queue %d failed", + dev->data->port_id, i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (atl_rx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, + "Port %d: Stop Rx queue %d failed", + dev->data->port_id, i); + return -1; + } + } + + return 0; +} + +void +atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct atl_rx_queue *rxq; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; +} + +void +atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct atl_tx_queue *txq; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; +} + +/* Return Rx queue avail count */ + +uint32_t +atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct atl_rx_queue *rxq; + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + + if (rxq == NULL) + return 0; + + return rxq->nb_rx_desc - rxq->nb_rx_hold; +} + +int +atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct atl_rx_queue *rxq = rx_queue; + struct hw_atl_rxd_wb_s *rxd; + uint32_t idx; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + idx = rxq->rx_tail + offset; + + if (idx >= rxq->nb_rx_desc) + idx -= rxq->nb_rx_desc; + + rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx]; + + if (rxd->dd) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct atl_tx_queue *txq = tx_queue; + struct hw_atl_txd_s *txd; + uint32_t idx; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + idx = txq->tx_tail + offset; + + if (idx >= txq->nb_tx_desc) + idx -= txq->nb_tx_desc; + + txd = &txq->hw_ring[idx]; + + if (txd->dd) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static int +atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable) +{ + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct atl_rx_queue *rxq; + + PMD_INIT_FUNC_TRACE(); + + if (queue_id >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id); + return -EINVAL; + } + + rxq = dev->data->rx_queues[queue_id]; + + if (rxq == NULL) + return 0; + + /* Mapping interrupt vector */ + hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id); + + return 0; +} + +int +atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + return atl_rx_enable_intr(eth_dev, queue_id, true); +} + +int +atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + return atl_rx_enable_intr(eth_dev, queue_id, false); +} + +uint16_t +atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) { + rte_errno = EINVAL; + return i; + } + + if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +static uint64_t +atl_desc_to_offload_flags(struct atl_rx_queue *rxq, + struct hw_atl_rxd_wb_s *rxd_wb) +{ + uint64_t mbuf_flags = 0; + + PMD_INIT_FUNC_TRACE(); + + /* IPv4 ? */ + if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) { + /* IPv4 csum error ? */ + if (rxd_wb->rx_stat & BIT(1)) + mbuf_flags |= PKT_RX_IP_CKSUM_BAD; + else + mbuf_flags |= PKT_RX_IP_CKSUM_GOOD; + } else { + mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + } + + /* CSUM calculated ? */ + if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) { + if (rxd_wb->rx_stat & BIT(2)) + mbuf_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf_flags |= PKT_RX_L4_CKSUM_GOOD; + } else { + mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } + + return mbuf_flags; +} + +static uint32_t +atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb) +{ + uint32_t type = RTE_PTYPE_UNKNOWN; + uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3; + uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2; + + switch (l2_l3_type) { + case 0: + type = RTE_PTYPE_L3_IPV4; + break; + case 1: + type = RTE_PTYPE_L3_IPV6; + break; + case 2: + type = RTE_PTYPE_L2_ETHER; + break; + case 3: + type = RTE_PTYPE_L2_ETHER_ARP; + break; + } + + switch (l4_type) { + case 0: + type |= RTE_PTYPE_L4_TCP; + break; + case 1: + type |= RTE_PTYPE_L4_UDP; + break; + case 2: + type |= RTE_PTYPE_L4_SCTP; + break; + case 3: + type |= RTE_PTYPE_L4_ICMP; + break; + } + + if (rxd_wb->pkt_type & BIT(5)) + type |= RTE_PTYPE_L2_ETHER_VLAN; + + return type; +} + +uint16_t +atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id]; + struct atl_adapter *adapter = + ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]); + struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter); + struct aq_hw_cfg_s *cfg = + ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); + struct atl_rx_entry *sw_ring = rxq->sw_ring; + + struct rte_mbuf *new_mbuf; + struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first; + struct atl_rx_entry *rx_entry; + uint16_t nb_rx = 0; + uint16_t nb_hold = 0; + struct hw_atl_rxd_wb_s rxd_wb; + struct hw_atl_rxd_s *rxd = NULL; + uint16_t tail = rxq->rx_tail; + uint64_t dma_addr; + uint16_t pkt_len = 0; + + while (nb_rx < nb_pkts) { + uint16_t eop_tail = tail; + + rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail]; + rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd; + + if (!rxd_wb.dd) { /* RxD is not done */ + break; + } + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u " + "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x", + (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id, + (unsigned int)tail, (unsigned int)rxd_wb.eop, + (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len), + rxd_wb.rss_hash, rxd_wb.rss_type); + + /* RxD is not done */ + if (!rxd_wb.eop) { + while (true) { + struct hw_atl_rxd_wb_s *eop_rxwbd; + + eop_tail = (eop_tail + 1) % rxq->nb_rx_desc; + eop_rxwbd = (struct hw_atl_rxd_wb_s *) + &rxq->hw_ring[eop_tail]; + if (!eop_rxwbd->dd) { + /* no EOP received yet */ + eop_tail = tail; + break; + } + if (eop_rxwbd->dd && eop_rxwbd->eop) + break; + } + /* No EOP in ring */ + if (eop_tail == tail) + break; + } + rx_mbuf_prev = NULL; + rx_mbuf_first = NULL; + + /* Run through packet segments */ + while (true) { + new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (new_mbuf == NULL) { + PMD_RX_LOG(DEBUG, + "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id); + dev->data->rx_mbuf_alloc_failed++; + adapter->sw_stats.rx_nombuf++; + goto err_stop; + } + + nb_hold++; + rx_entry = &sw_ring[tail]; + + rx_mbuf = rx_entry->mbuf; + rx_entry->mbuf = new_mbuf; + dma_addr = rte_cpu_to_le_64( + rte_mbuf_data_iova_default(new_mbuf)); + + /* setup RX descriptor */ + rxd->hdr_addr = 0; + rxd->buf_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * < - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len); + rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch1((char *)rx_mbuf->buf_addr + + rx_mbuf->data_off); + rx_mbuf->nb_segs = 0; + rx_mbuf->next = NULL; + rx_mbuf->pkt_len = pkt_len; + rx_mbuf->data_len = pkt_len; + if (rxd_wb.eop) { + u16 remainder_len = pkt_len % rxq->buff_size; + if (!remainder_len) + remainder_len = rxq->buff_size; + rx_mbuf->data_len = remainder_len; + } else { + rx_mbuf->data_len = pkt_len > rxq->buff_size ? + rxq->buff_size : pkt_len; + } + rx_mbuf->port = rxq->port_id; + + rx_mbuf->hash.rss = rxd_wb.rss_hash; + + rx_mbuf->vlan_tci = rxd_wb.vlan; + + rx_mbuf->ol_flags = + atl_desc_to_offload_flags(rxq, &rxd_wb); + + rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb); + + if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) { + rx_mbuf->ol_flags |= PKT_RX_VLAN; + rx_mbuf->vlan_tci = rxd_wb.vlan; + + if (cfg->vlan_strip) + rx_mbuf->ol_flags |= + PKT_RX_VLAN_STRIPPED; + } + + if (!rx_mbuf_first) + rx_mbuf_first = rx_mbuf; + rx_mbuf_first->nb_segs++; + + if (rx_mbuf_prev) + rx_mbuf_prev->next = rx_mbuf; + rx_mbuf_prev = rx_mbuf; + + tail = (tail + 1) % rxq->nb_rx_desc; + /* Prefetch next mbufs */ + rte_prefetch0(sw_ring[tail].mbuf); + if ((tail & 0x3) == 0) { + rte_prefetch0(&sw_ring[tail]); + rte_prefetch0(&sw_ring[tail]); + } + + /* filled mbuf_first */ + if (rxd_wb.eop) + break; + rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail]; + rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd; + }; + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rx_mbuf_first; + adapter->sw_stats.q_ipackets[rxq->queue_id]++; + adapter->sw_stats.q_ibytes[rxq->queue_id] += + rx_mbuf_first->pkt_len; + + PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d", + rx_mbuf_first->nb_segs, + rx_mbuf_first->pkt_len); + } + +err_stop: + + rxq->rx_tail = tail; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id, + (unsigned int)tail, (unsigned int)nb_hold, + (unsigned int)nb_rx); + tail = (uint16_t)((tail == 0) ? + (rxq->nb_rx_desc - 1) : (tail - 1)); + + hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id); + + nb_hold = 0; + } + + rxq->nb_rx_hold = nb_hold; + + return nb_rx; +} + +static void +atl_xmit_cleanup(struct atl_tx_queue *txq) +{ + struct atl_tx_entry *sw_ring; + struct hw_atl_txd_s *txd; + int to_clean = 0; + + if (txq != NULL) { + sw_ring = txq->sw_ring; + int head = txq->tx_head; + int cnt; + int i; + + for (i = 0, cnt = head; ; i++) { + txd = &txq->hw_ring[cnt]; + + if (txd->dd) + to_clean++; + + cnt = (cnt + 1) % txq->nb_tx_desc; + if (cnt == txq->tx_tail) + break; + } + + if (to_clean == 0) + return; + + while (to_clean) { + txd = &txq->hw_ring[head]; + + struct atl_tx_entry *rx_entry = &sw_ring[head]; + + if (rx_entry->mbuf) { + rte_pktmbuf_free_seg(rx_entry->mbuf); + rx_entry->mbuf = NULL; + } + + if (txd->dd) + to_clean--; + + txd->buf_addr = 0; + txd->flags = 0; + + head = (head + 1) % txq->nb_tx_desc; + txq->tx_free++; + } + + txq->tx_head = head; + } +} + +static int +atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc) +{ + uint32_t tx_cmd = 0; + uint64_t ol_flags = tx_pkt->ol_flags; + + if (ol_flags & PKT_TX_TCP_SEG) { + tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs; + + txc->cmd = 0x4; + + if (ol_flags & PKT_TX_IPV6) + txc->cmd |= 0x2; + + txc->l2_len = tx_pkt->l2_len; + txc->l3_len = tx_pkt->l3_len; + txc->l4_len = tx_pkt->l4_len; + + txc->mss_len = tx_pkt->tso_segsz; + } + + if (ol_flags & PKT_TX_VLAN) { + tx_cmd |= tx_desc_cmd_vlan; + txc->vlan_tag = tx_pkt->vlan_tci; + } + + if (tx_cmd) { + txc->type = tx_desc_type_ctx; + txc->idx = 0; + } + + return tx_cmd; +} + +static inline void +atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd, + uint32_t tx_cmd) +{ + txd->cmd |= tx_desc_cmd_fcs; + txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0; + /* L4 csum requested */ + txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0; + txd->cmd |= tx_cmd; +} + +static inline void +atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq, + struct rte_mbuf *tx_pkt) +{ + struct atl_adapter *adapter = + ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]); + uint32_t pay_len = 0; + int tail = 0; + struct atl_tx_entry *tx_entry; + uint64_t buf_dma_addr; + struct rte_mbuf *m_seg; + union hw_atl_txc_s *txc = NULL; + struct hw_atl_txd_s *txd = NULL; + u32 tx_cmd = 0U; + int desc_count = 0; + + tail = txq->tx_tail; + + txc = (union hw_atl_txc_s *)&txq->hw_ring[tail]; + + txc->flags1 = 0U; + txc->flags2 = 0U; + + tx_cmd = atl_tso_setup(tx_pkt, txc); + + if (tx_cmd) { + /* We've consumed the first desc, adjust counters */ + tail = (tail + 1) % txq->nb_tx_desc; + txq->tx_tail = tail; + txq->tx_free -= 1; + + txd = &txq->hw_ring[tail]; + txd->flags = 0U; + } else { + txd = (struct hw_atl_txd_s *)txc; + } + + txd->ct_en = !!tx_cmd; + + txd->type = tx_desc_type_desc; + + atl_setup_csum_offload(tx_pkt, txd, tx_cmd); + + if (tx_cmd) + txd->ct_idx = 0; + + pay_len = tx_pkt->pkt_len; + + txd->pay_len = pay_len; + + for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) { + if (desc_count > 0) { + txd = &txq->hw_ring[tail]; + txd->flags = 0U; + } + + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); + + txd->type = tx_desc_type_desc; + txd->len = m_seg->data_len; + txd->pay_len = pay_len; + + /* Store mbuf for freeing later */ + tx_entry = &txq->sw_ring[tail]; + + if (tx_entry->mbuf) + rte_pktmbuf_free_seg(tx_entry->mbuf); + tx_entry->mbuf = m_seg; + + tail = (tail + 1) % txq->nb_tx_desc; + + desc_count++; + } + + // Last descriptor requires EOP and WB + txd->eop = 1U; + txd->cmd |= tx_desc_cmd_wb; + + hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id); + + txq->tx_tail = tail; + + txq->tx_free -= desc_count; + + adapter->sw_stats.q_opackets[txq->queue_id]++; + adapter->sw_stats.q_obytes[txq->queue_id] += pay_len; +} + +uint16_t +atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev = NULL; + struct aq_hw_s *hw = NULL; + struct atl_tx_queue *txq = tx_queue; + struct rte_mbuf *tx_pkt; + uint16_t nb_tx; + + dev = &rte_eth_devices[txq->port_id]; + hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_TX_LOG(DEBUG, + "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d", + txq->port_id, txq->queue_id, nb_pkts, txq->tx_free, + txq->tx_tail, txq->tx_head); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + + /* Clean Tx queue if needed */ + if (txq->tx_free < txq->tx_free_thresh) + atl_xmit_cleanup(txq); + + /* Check if we have enough free descriptors */ + if (txq->tx_free < tx_pkt->nb_segs) + break; + + /* check mbuf is valid */ + if ((tx_pkt->nb_segs == 0) || + ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL))) + break; + + /* Send the packet */ + atl_xmit_pkt(hw, txq, tx_pkt); + } + + PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx); + + return nb_tx; +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/atl_types.h b/src/spdk/dpdk/drivers/net/atlantic/atl_types.h new file mode 100644 index 000000000..e813d9f32 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/atl_types.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ +#ifndef ATL_TYPES_H +#define ATL_TYPES_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; + +#define min(a, b) RTE_MIN(a, b) +#define max(a, b) RTE_MAX(a, b) + +#include "hw_atl/hw_atl_b0_internal.h" +#include "hw_atl/hw_atl_utils.h" + +struct aq_hw_link_status_s { + unsigned int mbps; +}; + +struct aq_stats_s { + u64 uprc; + u64 mprc; + u64 bprc; + u64 erpt; + u64 uptc; + u64 mptc; + u64 bptc; + u64 erpr; + u64 mbtc; + u64 bbtc; + u64 mbrc; + u64 bbrc; + u64 ubrc; + u64 ubtc; + u64 dpc; + u64 dma_pkt_rc; + u64 dma_pkt_tc; + u64 dma_oct_rc; + u64 dma_oct_tc; +}; + +struct aq_rss_parameters { + u16 base_cpu_number; + u16 indirection_table_size; + u16 hash_secret_key_size; + u32 hash_secret_key[HW_ATL_B0_RSS_HASHKEY_BITS / 8]; + u8 indirection_table[HW_ATL_B0_RSS_REDIRECTION_MAX]; +}; + +/* Macsec stuff */ +struct aq_macsec_config { + struct { + u32 macsec_enabled; + u32 encryption_enabled; + u32 replay_protection_enabled; + } common; + + struct { + u32 idx; + u32 mac[2]; /* 6 bytes */ + } txsc; + + struct { + u32 idx; + u32 an; /* association number on the local side */ + u32 pn; /* packet number on the local side */ + u32 key[4]; /* 128 bit key */ + } txsa; + + struct { + u32 mac[2]; /* 6 bytes */ + u32 pi; + } rxsc; + + struct { + u32 idx; + u32 an; /* association number on the remote side */ + u32 pn; /* packet number on the remote side */ + u32 key[4]; /* 128 bit key */ + } rxsa; +}; + +struct aq_hw_cfg_s { + bool is_lro; + bool is_rss; + unsigned int num_rss_queues; + int wol; + + int link_speed_msk; + int irq_type; + int irq_mask; + unsigned int vecs; + + bool vlan_strip; + uint32_t vlan_filter[HW_ATL_B0_MAX_VLAN_IDS]; + uint32_t flow_control; + + struct aq_rss_parameters aq_rss; + struct aq_macsec_config aq_macsec; +}; + +struct aq_hw_s { + u16 device_id; + u16 vendor_id; + bool adapter_stopped; + + u8 rbl_enabled:1; + struct aq_hw_cfg_s *aq_nic_cfg; + const struct aq_fw_ops *aq_fw_ops; + void *mmio; + + struct aq_hw_link_status_s aq_link_status; + bool is_autoneg; + + struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_stats_s last_stats; + struct aq_stats_s curr_stats; + + u32 caps_lo; + + u64 speed; + unsigned int chip_features; + u32 fw_ver_actual; + u32 mbox_addr; + u32 rpc_addr; + u32 rpc_tid; + struct hw_aq_atl_utils_fw_rpc rpc; + + pthread_mutex_t mbox_mutex; +}; + +struct aq_fw_ops { + int (*init)(struct aq_hw_s *self); + + int (*deinit)(struct aq_hw_s *self); + + int (*reset)(struct aq_hw_s *self); + + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); + + int (*set_link_speed)(struct aq_hw_s *self, u32 speed); + + int (*set_state)(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + + int (*update_link_status)(struct aq_hw_s *self); + + int (*update_stats)(struct aq_hw_s *self); + + int (*set_power)(struct aq_hw_s *self, unsigned int power_state, + u8 *mac); + + int (*get_temp)(struct aq_hw_s *self, int *temp); + + int (*get_cable_len)(struct aq_hw_s *self, int *cable_len); + + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed); + + int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates); + + int (*get_flow_control)(struct aq_hw_s *self, u32 *fc); + int (*set_flow_control)(struct aq_hw_s *self); + + int (*led_control)(struct aq_hw_s *self, u32 mode); + + int (*get_eeprom)(struct aq_hw_s *self, int dev_addr, + u32 *data, u32 len, u32 offset); + + int (*set_eeprom)(struct aq_hw_s *self, int dev_addr, + u32 *data, u32 len, u32 offset); + + int (*send_macsec_req)(struct aq_hw_s *self, + struct macsec_msg_fw_request *req, + struct macsec_msg_fw_response *response); +}; + +struct atl_sw_stats { + u64 crcerrs; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tor; + u64 tpr; + u64 tpt; + u64 mptc; + u64 bptc; + u64 xec; + u64 fccrc; + u64 ldpcec; + u64 pcrc8ec; + + u64 rx_nombuf; + u64 q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + u64 q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + u64 q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + u64 q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + u64 q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]; +}; + +#endif diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c new file mode 100644 index 000000000..7d0e72401 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ + +#include "../atl_types.h" +#include "hw_atl_b0.h" + +#include "../atl_hw_regs.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" +#include "hw_atl_b0_internal.h" +#include "hw_atl_llh_internal.h" +#include "../atl_logs.h" + +int hw_atl_b0_hw_reset(struct aq_hw_s *self) +{ + int err = 0; + + err = hw_atl_utils_soft_reset(self); + if (err) + return err; + + self->aq_fw_ops->set_state(self, MPI_RESET); + + return err; +} + +int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) +{ + hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); + return 0; +} + +static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) +{ + u32 tc = 0U; + u32 buff_size = 0U; + unsigned int i_priority = 0U; + + /* TPS Descriptor rate init */ + hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + /* TPS TC credits init */ + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); + + hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U); + hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U); + + /* Tx buf size */ + buff_size = HW_ATL_B0_TXBUF_MAX; + + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 66U) / + 100U, tc); + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024 / 32U) * 50U) / + 100U, tc); + + /* QoS Rx buf size per TC */ + tc = 0; + buff_size = HW_ATL_B0_RXBUF_MAX; + + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 66U) / + 100U, tc); + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, + (buff_size * + (1024U / 32U) * 50U) / + 100U, tc); + hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc); + + /* QoS 802.1p priority -> TC mapping */ + for (i_priority = 8U; i_priority--;) + hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); + + return aq_hw_err_from_flags(self); +} + +/* calc hash only in IPv4 header, regardless of presence of TCP */ +#define pif_rpf_rss_ipv4_hdr_only_i (1 << 4) +/* calc hash only if TCP header and IPv4 */ +#define pif_rpf_rss_ipv4_tcp_hdr_only_i (1 << 3) +/* calc hash only in IPv6 header, regardless of presence of TCP */ +#define pif_rpf_rss_ipv6_hdr_only_i (1 << 2) +/* calc hash only if TCP header and IPv4 */ +#define pif_rpf_rss_ipv6_tcp_hdr_only_i (1 << 1) +/* bug 5124 - rss hashing types - FIXME */ +#define pif_rpf_rss_dont_use_udp_i (1 << 0) + +static int hw_atl_b0_hw_rss_hash_type_set(struct aq_hw_s *self) +{ + /* misc */ + unsigned int control_reg_val = + IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U; + + /* RSS hash type set for IP/TCP */ + control_reg_val |= pif_rpf_rss_ipv4_hdr_only_i;//0x1EU; + + aq_hw_write_reg(self, 0x5040U, control_reg_val); + + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + struct aq_hw_cfg_s *cfg = self->aq_nic_cfg; + int err = 0; + unsigned int i = 0U; + unsigned int addr = 0U; + + for (i = 10, addr = 0U; i--; ++addr) { + u32 key_data = cfg->is_rss ? + htonl(rss_params->hash_secret_key[i]) : 0U; + hw_atl_rpf_rss_key_wr_data_set(self, key_data); + hw_atl_rpf_rss_key_addr_set(self, addr); + hw_atl_rpf_rss_key_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, + 1000U, 10U); + if (err < 0) + goto err_exit; + } + + /* RSS Ring selection */ + hw_atl_reg_rx_flr_rss_control1set(self, + cfg->is_rss ? 0xB3333333U : 0x00000000U); + hw_atl_b0_hw_rss_hash_type_set(self); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + + +int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); + u32 i = 0; + u32 addr = 0; + u32 val = 0; + u32 shift = 0; + int err = 0; + + for (i = 0; i < HW_ATL_B0_RSS_REDIRECTION_MAX; i++) { + val |= (u32)(indirection_table[i] % num_rss_queues) << shift; + shift += 3; + + if (shift < 16) + continue; + + hw_atl_rpf_rss_redir_tbl_wr_data_set(self, val & 0xffff); + hw_atl_rpf_rss_redir_tbl_addr_set(self, addr); + + hw_atl_rpf_rss_redir_wr_en_set(self, 1U); + AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, + 1000U, 10U); + + if (err < 0) + goto err_exit; + + shift -= 16; + val >>= 16; + addr++; + } + +err_exit: + return err; +} + +static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self) + /*struct aq_nic_cfg_s *aq_nic_cfg)*/ +{ + unsigned int i; + + /* TX checksums offloads*/ + hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); + + /* RX checksums offloads*/ + hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); + hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); + + /* LSO offloads*/ + hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); + +/* LRO offloads */ + { + unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : + ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : + ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); + + for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) + hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i); + + hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU); + hw_atl_rpo_lro_inactive_interval_set(self, 0); + hw_atl_rpo_lro_max_coalescing_interval_set(self, 2); + + hw_atl_rpo_lro_qsessions_lim_set(self, 1U); + + hw_atl_rpo_lro_total_desc_lim_set(self, 2U); + + hw_atl_rpo_lro_patch_optimization_en_set(self, 0U); + + hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U); + + hw_atl_rpo_lro_pkt_lim_set(self, 1U); + + hw_atl_rpo_lro_en_set(self, + self->aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + } + return aq_hw_err_from_flags(self); +} + +static +int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) +{ + /* Tx TC/RSS number config */ + hw_atl_rpb_tps_tx_tc_mode_set(self, 1U); + + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + + /* misc */ + aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ? + 0x00010000U : 0x00000000U); + hw_atl_tdm_tx_dca_en_set(self, 0U); + hw_atl_tdm_tx_dca_mode_set(self, 0U); + + hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); + + return aq_hw_err_from_flags(self); +} + +static +int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_hw_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* 1: 4TC/8Queues */ + + /* Rx flow control */ + hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); + + /* RSS Ring selection */ + hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? + 0xB3333333U : 0x00000000U); + + /* Multicast filters */ + for (i = HW_ATL_B0_MAC_MAX; i--;) { + hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); + } + + hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); + + /* Vlan filters */ + hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); + hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); + + /* VLAN proimisc bu defauld */ + hw_atl_rpf_vlan_prom_mode_en_set(self, 1); + + /* Rx Interrupts */ + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + + hw_atl_b0_hw_rss_hash_type_set(self); + + hw_atl_rpfl2broadcast_flr_act_set(self, 1U); + hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + hw_atl_rpfl2broadcast_en_set(self, 1U); + + hw_atl_rdm_rx_dca_en_set(self, 0U); + hw_atl_rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) +{ + int err = 0; + unsigned int h = 0U; + unsigned int l = 0U; + + if (!mac_addr) { + err = -EINVAL; + goto err_exit; + } + h = (mac_addr[0] << 8) | (mac_addr[1]); + l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); + hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) +{ + static u32 aq_hw_atl_igcr_table_[4][2] = { + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_INVALID */ + { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ + { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ + { 0x200000A2U, 0x200000A6U } /* AQ_IRQ_MSIX */ + }; + + int err = 0; + u32 val; + + struct aq_hw_cfg_s *aq_nic_cfg = self->aq_nic_cfg; + + hw_atl_b0_hw_init_tx_path(self); + hw_atl_b0_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_state(self, MPI_INIT); + + hw_atl_b0_hw_qos_set(self); + hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + /* Force limit MRRS on RDM/TDM to 2K */ + val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR); + aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR, + (val & ~0x707) | 0x404); + + /* TX DMA total request limit. B0 hardware is not capable to + * handle more than (8K-MRRS) incoming DMA data. + * Value 24 in 256byte units + */ + aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24); + + /* Reset link status and read out initial hardware counters */ + self->aq_link_status.mbps = 0; + self->aq_fw_ops->update_stats(self); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + hw_atl_reg_irq_glb_ctl_set(self, + aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + hw_atl_itr_irq_auto_masklsw_set(self, 0xffffffff); + + /* Interrupts */ + hw_atl_reg_gen_irq_map_set(self, 0, 0); + hw_atl_reg_gen_irq_map_set(self, 0x80 | ATL_IRQ_CAUSE_LINK, 3); + + hw_atl_b0_hw_offload_set(self); + +err_exit: + return err; +} + +int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index) +{ + hw_atl_tdm_tx_desc_en_set(self, 1, index); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index) +{ + hw_atl_rdm_rx_desc_en_set(self, 1, index); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_start(struct aq_hw_s *self) +{ + hw_atl_tpb_tx_buff_en_set(self, 1); + hw_atl_rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index) +{ + hw_atl_reg_tx_dma_desc_tail_ptr_set(self, tail, index); + return 0; +} + +int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr, + int index, int size, int buff_size, int cpu, int vec) +{ + u32 dma_desc_addr_lsw = (u32)base_addr; + u32 dma_desc_addr_msw = (u32)(base_addr >> 32); + + hw_atl_rdm_rx_desc_en_set(self, false, index); + + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index); + + hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, + index); + + hw_atl_reg_rx_dma_desc_base_addressmswset(self, dma_desc_addr_msw, + index); + + hw_atl_rdm_rx_desc_len_set(self, size / 8U, index); + + hw_atl_rdm_rx_desc_data_buff_size_set(self, buff_size / 1024U, index); + + hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, index); + hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index); + hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, index); + + /* Rx ring set mode */ + + /* Mapping interrupt vector */ + hw_atl_itr_irq_map_rx_set(self, vec, index); + hw_atl_itr_irq_map_en_rx_set(self, true, index); + + hw_atl_rdm_cpu_id_set(self, cpu, index); + hw_atl_rdm_rx_desc_dca_en_set(self, 0U, index); + hw_atl_rdm_rx_head_dca_en_set(self, 0U, index); + hw_atl_rdm_rx_pld_dca_en_set(self, 0U, index); + + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr, + int index, int size, int cpu, int vec) +{ + u32 dma_desc_lsw_addr = (u32)base_addr; + u32 dma_desc_msw_addr = (u32)(base_addr >> 32); + + hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, + index); + + hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, + index); + + hw_atl_tdm_tx_desc_len_set(self, size / 8U, index); + + hw_atl_b0_hw_tx_ring_tail_update(self, 0, index); + + /* Set Tx threshold */ + hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, index); + + /* Mapping interrupt vector */ + hw_atl_itr_irq_map_tx_set(self, vec, index); + hw_atl_itr_irq_map_en_tx_set(self, true, index); + + hw_atl_tdm_cpu_id_set(self, cpu, index); + hw_atl_tdm_tx_desc_dca_en_set(self, 0U, index); + + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) +{ + hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) +{ + hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); + hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); + + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) +{ + *mask = hw_atl_itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index) +{ + hw_atl_tdm_tx_desc_en_set(self, 0U, index); + return aq_hw_err_from_flags(self); +} + +int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index) +{ + hw_atl_rdm_rx_desc_en_set(self, 0U, index); + return aq_hw_err_from_flags(self); +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.h new file mode 100644 index 000000000..d1ba2aceb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware + * specific functions. + */ + +#ifndef HW_ATL_B0_H +#define HW_ATL_B0_H + +int hw_atl_b0_hw_reset(struct aq_hw_s *self); +int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr); + +int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc); + +int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr, + int index, int size, int cpu, int vec); +int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr, + int index, int size, int buff_size, int cpu, int vec); + +int hw_atl_b0_hw_start(struct aq_hw_s *self); + +int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index); +int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index); + + +int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index); +int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index); + + +int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index); + +int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); +int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params); + +int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask); +int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask); +int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask); + +#endif /* HW_ATL_B0_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h new file mode 100644 index 000000000..48152eada --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific + * constants. + */ + +#ifndef HW_ATL_B0_INTERNAL_H +#define HW_ATL_B0_INTERNAL_H + + +#define HW_ATL_B0_MTU_JUMBO 16352U +#define HW_ATL_B0_MTU 1514U + +#define HW_ATL_B0_TX_RINGS 4U +#define HW_ATL_B0_RX_RINGS 4U + +#define HW_ATL_B0_RINGS_MAX 32U +#define HW_ATL_B0_TXD_SIZE (16U) +#define HW_ATL_B0_RXD_SIZE (16U) + +#define HW_ATL_B0_MAC 0U +#define HW_ATL_B0_MAC_MIN 1U +#define HW_ATL_B0_MAC_MAX 33U + +/* Maximum supported VLAN filters */ +#define HW_ATL_B0_MAX_VLAN_IDS 16 + +/* UCAST/MCAST filters */ +#define HW_ATL_B0_UCAST_FILTERS_MAX 38 +#define HW_ATL_B0_MCAST_FILTERS_MAX 8 + +/* interrupts */ +#define HW_ATL_B0_ERR_INT 8U +#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000) +#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000) +#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000) + +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001) +#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002) +#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0) +#define HW_ATL_B0_TXD_CTL_DD (0x00100000) +#define HW_ATL_B0_TXD_CTL_EOP (0x00200000) + +#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000) + +#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22) +#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23) +#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24) +#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25) +#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26) +#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27) +#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28) + +#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21) +#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22) + +#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_B0_MPI_STATE_ADR 0x036CU + +#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU +#define HW_ATL_B0_MPI_SPEED_SHIFT 16U + +#define HW_ATL_B0_TXBUF_MAX 160U +#define HW_ATL_B0_RXBUF_MAX 320U + +#define HW_ATL_B0_RXD_BUF_SIZE_MAX (16 * 1024) + +#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U +#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U +#define HW_ATL_B0_RSS_HASHKEY_BITS 320U + +#define HW_ATL_B0_TCRSS_4_8 1 +#define HW_ATL_B0_TC_MAX 1U +#define HW_ATL_B0_RSS_MAX 8U + +#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_RS_SLIP_ENABLED 0U + +/* (256k -1(max pay_len) - 54(header)) */ +#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U + +/* (256k -1(max pay_len) - 74(header)) */ +#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U + +#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U +#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU + +#define HW_ATL_B0_FW_SEMA_RAM 0x2U + +#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000) + +#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007) +#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008) +#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0) +#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000) +#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000) + +#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */ +#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */ +#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00) +#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000) + +#define HW_ATL_B0_RXD_DD (0x1) +#define HW_ATL_B0_RXD_NCEA0 (0x1) + +#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F) +#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0) +#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000) +#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000) +#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000) + +#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001) +#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002) +#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C) +#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004) +#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008) +#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010) +#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0) +#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000) + +#define L2_FILTER_ACTION_DISCARD (0x0) +#define L2_FILTER_ACTION_HOST (0x1) + +#define HW_ATL_B0_UCP_0X370_REG (0x370) + +#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10) + +#define HW_ATL_INTR_MODER_MAX 0x1FF +#define HW_ATL_INTR_MODER_MIN 0xFF + +#define HW_ATL_B0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_B0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_B0_MAX_RXD 8184U +#define HW_ATL_B0_MAX_TXD 8184U + +/* HW layer capabilities */ + +#endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.c b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.c new file mode 100644 index 000000000..2dc5be2ff --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.c @@ -0,0 +1,1490 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_llh.c: Definitions of bitfield and register access functions for + * Atlantic registers. + */ + +#include "hw_atl_llh.h" + +#include "../atl_hw_regs.h" +#include "hw_atl_llh_internal.h" + +/* global */ +void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore) +{ + aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem); +} + +u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore)); +} + +void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR, + HW_ATL_GLB_REG_RES_DIS_MSK, + HW_ATL_GLB_REG_RES_DIS_SHIFT, + glb_reg_res_dis); +} + +void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR, + HW_ATL_GLB_SOFT_RES_MSK, + HW_ATL_GLB_SOFT_RES_SHIFT, soft_res); +} + +u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR, + HW_ATL_GLB_SOFT_RES_MSK, + HW_ATL_GLB_SOFT_RES_SHIFT); +} + +u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR); +} + +/* stats */ +u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR); +} + +u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW); +} + +u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW); +} + +u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW); +} + +u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW); +} + +u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW); +} + +u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW); +} + +u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW); +} + +u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW); +} + +/* interrupt */ +void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, + u32 irq_auto_masklsw) +{ + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw); +} + +void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, + u32 rx) +{ +/* register address for bitfield imr_rx{r}_en */ + static const u32 itr_imr_rxren_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU + }; + +/* bitmask for bitfield imr_rx{r}_en */ + static const u32 itr_imr_rxren_msk[32] = { + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U, + 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U + }; + +/* lower bit position of bitfield imr_rx{r}_en */ + static const u32 itr_imr_rxren_shift[32] = { + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U, + 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx], + itr_imr_rxren_msk[rx], + itr_imr_rxren_shift[rx], + irq_map_en_rx); +} + +void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, + u32 tx) +{ +/* register address for bitfield imr_tx{t}_en */ + static const u32 itr_imr_txten_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU + }; + +/* bitmask for bitfield imr_tx{t}_en */ + static const u32 itr_imr_txten_msk[32] = { + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U, + 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U + }; + +/* lower bit position of bitfield imr_tx{t}_en */ + static const u32 itr_imr_txten_shift[32] = { + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U, + 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx], + itr_imr_txten_msk[tx], + itr_imr_txten_shift[tx], + irq_map_en_tx); +} + +void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx) +{ +/* register address for bitfield imr_rx{r}[4:0] */ + static const u32 itr_imr_rxr_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU + }; + +/* bitmask for bitfield imr_rx{r}[4:0] */ + static const u32 itr_imr_rxr_msk[32] = { + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU, + 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU + }; + +/* lower bit position of bitfield imr_rx{r}[4:0] */ + static const u32 itr_imr_rxr_shift[32] = { + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U, + 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx], + itr_imr_rxr_msk[rx], + itr_imr_rxr_shift[rx], + irq_map_rx); +} + +void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx) +{ +/* register address for bitfield imr_tx{t}[4:0] */ + static const u32 itr_imr_txt_adr[32] = { + 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U, + 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU, + 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U, + 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU, + 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U, + 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU, + 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U, + 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU + }; + +/* bitmask for bitfield imr_tx{t}[4:0] */ + static const u32 itr_imr_txt_msk[32] = { + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U, + 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U + }; + +/* lower bit position of bitfield imr_tx{t}[4:0] */ + static const u32 itr_imr_txt_shift[32] = { + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U, + 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U + }; + + aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx], + itr_imr_txt_msk[tx], + itr_imr_txt_shift[tx], + irq_map_tx); +} + +void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_msk_clearlsw) +{ + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw); +} + +void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw) +{ + aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw); +} + +void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR, + HW_ATL_ITR_REG_RES_DSBL_MSK, + HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis); +} + +void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw) +{ + aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw); +} + +u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR); +} + +u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK, + HW_ATL_ITR_RES_SHIFT); +} + +void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK, + HW_ATL_ITR_RES_SHIFT, res_irq); +} + +/* rdm */ +void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca), + HW_ATL_RDM_DCADCPUID_MSK, + HW_ATL_RDM_DCADCPUID_SHIFT, cpuid); +} + +void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK, + HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en); +} + +void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR, + HW_ATL_RDM_DCA_MODE_MSK, + HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode); +} + +void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor), + HW_ATL_RDM_DESCDDATA_SIZE_MSK, + HW_ATL_RDM_DESCDDATA_SIZE_SHIFT, + rx_desc_data_buff_size); +} + +void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca), + HW_ATL_RDM_DCADDESC_EN_MSK, + HW_ATL_RDM_DCADDESC_EN_SHIFT, + rx_desc_dca_en); +} + +void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor), + HW_ATL_RDM_DESCDEN_MSK, + HW_ATL_RDM_DESCDEN_SHIFT, + rx_desc_en); +} + +void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor), + HW_ATL_RDM_DESCDHDR_SIZE_MSK, + HW_ATL_RDM_DESCDHDR_SIZE_SHIFT, + rx_desc_head_buff_size); +} + +void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor), + HW_ATL_RDM_DESCDHDR_SPLIT_MSK, + HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT, + rx_desc_head_splitting); +} + +u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor), + HW_ATL_RDM_DESCDHD_MSK, + HW_ATL_RDM_DESCDHD_SHIFT); +} + +void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor), + HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT, + rx_desc_len); +} + +void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor), + HW_ATL_RDM_DESCDRESET_MSK, + HW_ATL_RDM_DESCDRESET_SHIFT, + rx_desc_res); +} + +void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR, + HW_ATL_RDM_INT_DESC_WRB_EN_MSK, + HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT, + rx_desc_wr_wb_irq_en); +} + +void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca), + HW_ATL_RDM_DCADHDR_EN_MSK, + HW_ATL_RDM_DCADHDR_EN_SHIFT, + rx_head_dca_en); +} + +void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, + u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca), + HW_ATL_RDM_DCADPAY_EN_MSK, + HW_ATL_RDM_DCADPAY_EN_SHIFT, + rx_pld_dca_en); +} + +void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 rdm_intr_moder_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR, + HW_ATL_RDM_INT_RIM_EN_MSK, + HW_ATL_RDM_INT_RIM_EN_SHIFT, + rdm_intr_moder_en); +} + +/* reg */ +void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, + u32 regidx) +{ + aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map); +} + +u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR); +} + +void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl) +{ + aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl); +} + +void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle) +{ + aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr); +} + +void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor), + rx_dma_desc_base_addrlsw); +} + +void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor), + rx_dma_desc_base_addrmsw); +} + +u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor)); +} + +void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor), + rx_dma_desc_tail_ptr); +} + +void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR, + rx_flr_mcst_flr_msk); +} + +void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter), + rx_flr_mcst_flr); +} + +void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR, + rx_flr_rss_control1); +} + +void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, + u32 rx_filter_control2) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2); +} + +void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue), + rx_intr_moderation_ctl); +} + +void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, + u32 tx_dma_debug_ctl) +{ + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl); +} + +void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor), + tx_dma_desc_base_addrlsw); +} + +void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor) +{ + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor), + tx_dma_desc_base_addrmsw); +} + +void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor) +{ + rte_wmb(); + + aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor), + tx_dma_desc_tail_ptr); +} + +void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue), + tx_intr_moderation_ctl); +} + +/* RPB: rx packet buffer */ +void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR, + HW_ATL_RPB_DMA_SYS_LBK_MSK, + HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk); +} + +void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR, + HW_ATL_RPB_RPF_RX_TC_MODE_MSK, + HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT, + rx_traf_class_mode); +} + +u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR, + HW_ATL_RPB_RPF_RX_TC_MODE_MSK, + HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT); +} + +void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR, + HW_ATL_RPB_RX_BUF_EN_MSK, + HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en); +} + +void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer), + HW_ATL_RPB_RXBHI_THRESH_MSK, + HW_ATL_RPB_RXBHI_THRESH_SHIFT, + rx_buff_hi_threshold_per_tc); +} + +void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer), + HW_ATL_RPB_RXBLO_THRESH_MSK, + HW_ATL_RPB_RXBLO_THRESH_SHIFT, + rx_buff_lo_threshold_per_tc); +} + +void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, + u32 rx_flow_ctl_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR, + HW_ATL_RPB_RX_FC_MODE_MSK, + HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); +} + +void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer), + HW_ATL_RPB_RXBBUF_SIZE_MSK, + HW_ATL_RPB_RXBBUF_SIZE_SHIFT, + rx_pkt_buff_size_per_tc); +} + +void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_xoff_en_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer), + HW_ATL_RPB_RXBXOFF_EN_MSK, + HW_ATL_RPB_RXBXOFF_EN_SHIFT, + rx_xoff_en_per_tc); +} + +/* rpf */ + +void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR, + HW_ATL_RPFL2BC_THRESH_MSK, + HW_ATL_RPFL2BC_THRESH_SHIFT, + l2broadcast_count_threshold); +} + +void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK, + HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en); +} + +void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR, + HW_ATL_RPFL2BC_ACT_MSK, + HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act); +} + +void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, + u32 l2multicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter), + HW_ATL_RPFL2MC_ENF_MSK, + HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en); +} + +void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR, + HW_ATL_RPFL2PROMIS_MODE_MSK, + HW_ATL_RPFL2PROMIS_MODE_SHIFT, + l2promiscuous_mode_en); +} + +void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2unicast_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter), + HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT, + l2unicast_flr_act); +} + +void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter), + HW_ATL_RPFL2UC_ENF_MSK, + HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en); +} + +void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter), + l2unicast_dest_addresslsw); +} + +void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter), + HW_ATL_RPFL2UC_DAFMSW_MSK, + HW_ATL_RPFL2UC_DAFMSW_SHIFT, + l2unicast_dest_addressmsw); +} + +void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR, + HW_ATL_RPFL2MC_ACCEPT_ALL_MSK, + HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT, + l2_accept_all_mc_packets); +} + +void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc) +{ +/* register address for bitfield rx_tc_up{t}[2:0] */ + static const u32 rpf_rpb_rx_tc_upt_adr[8] = { + 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U, + 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U + }; + +/* bitmask for bitfield rx_tc_up{t}[2:0] */ + static const u32 rpf_rpb_rx_tc_upt_msk[8] = { + 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U, + 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U + }; + +/* lower bit position of bitfield rx_tc_up{t}[2:0] */ + static const u32 rpf_rpb_rx_tc_upt_shft[8] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], + rpf_rpb_rx_tc_upt_msk[tc], + rpf_rpb_rx_tc_upt_shft[tc], + user_priority_tc_map); +} + +void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR, + HW_ATL_RPF_RSS_KEY_ADDR_MSK, + HW_ATL_RPF_RSS_KEY_ADDR_SHIFT, + rss_key_addr); +} + +void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR, + rss_key_wr_data); +} + +u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR, + HW_ATL_RPF_RSS_KEY_WR_ENI_MSK, + HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT); +} + +void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR, + HW_ATL_RPF_RSS_KEY_WR_ENI_MSK, + HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT, + rss_key_wr_en); +} + +void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR, + HW_ATL_RPF_RSS_REDIR_ADDR_MSK, + HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT, + rss_redir_tbl_addr); +} + +void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR, + HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK, + HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT, + rss_redir_tbl_wr_data); +} + +u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR, + HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK, + HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT); +} + +void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR, + HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK, + HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en); +} + +void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR, + HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK, + HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT, + tpo_to_rpf_sys_lbk); +} + +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR, + HW_ATL_RPF_VL_INNER_TPID_MSK, + HW_ATL_RPF_VL_INNER_TPID_SHIFT, + vlan_inner_etht); +} + +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR, + HW_ATL_RPF_VL_OUTER_TPID_MSK, + HW_ATL_RPF_VL_OUTER_TPID_SHIFT, + vlan_outer_etht); +} + +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, + u32 vlan_prom_mode_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR, + HW_ATL_RPF_VL_PROMIS_MODE_MSK, + HW_ATL_RPF_VL_PROMIS_MODE_SHIFT, + vlan_prom_mode_en); +} + +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_acc_untagged_packets) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK, + HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT, + vlan_acc_untagged_packets); +} + +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, + u32 vlan_untagged_act) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR, + HW_ATL_RPF_VL_UNTAGGED_ACT_MSK, + HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT, + vlan_untagged_act); +} + +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter), + HW_ATL_RPF_VL_EN_F_MSK, + HW_ATL_RPF_VL_EN_F_SHIFT, + vlan_flr_en); +} + +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter), + HW_ATL_RPF_VL_ACT_F_MSK, + HW_ATL_RPF_VL_ACT_F_SHIFT, + vlan_flr_act); +} + +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter), + HW_ATL_RPF_VL_ID_F_MSK, + HW_ATL_RPF_VL_ID_F_SHIFT, + vlan_id_flr); +} + +void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter), + HW_ATL_RPF_ET_ENF_MSK, + HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en); +} + +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter), + HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT, + etht_user_priority_en); +} + +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, + u32 etht_rx_queue_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter), + HW_ATL_RPF_ET_RXQFEN_MSK, + HW_ATL_RPF_ET_RXQFEN_SHIFT, + etht_rx_queue_en); +} + +void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter), + HW_ATL_RPF_ET_UPF_MSK, + HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority); +} + +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter), + HW_ATL_RPF_ET_RXQF_MSK, + HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue); +} + +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter), + HW_ATL_RPF_ET_MNG_RXQF_MSK, + HW_ATL_RPF_ET_MNG_RXQF_SHIFT, + etht_mgt_queue); +} + +void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter), + HW_ATL_RPF_ET_ACTF_MSK, + HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act); +} + +void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter), + HW_ATL_RPF_ET_VALF_MSK, + HW_ATL_RPF_ET_VALF_SHIFT, etht_flr); +} + +/* RPO: rx packet offload */ +void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR, + HW_ATL_RPO_IPV4CHK_EN_MSK, + HW_ATL_RPO_IPV4CHK_EN_SHIFT, + ipv4header_crc_offload_en); +} + +void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor), + HW_ATL_RPO_DESCDVL_STRIP_MSK, + HW_ATL_RPO_DESCDVL_STRIP_SHIFT, + rx_desc_vlan_stripping); +} + +void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR, + HW_ATL_RPOL4CHK_EN_MSK, + HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en); +} + +void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en); +} + +void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR, + HW_ATL_RPO_LRO_PTOPT_EN_MSK, + HW_ATL_RPO_LRO_PTOPT_EN_SHIFT, + lro_patch_optimization_en); +} + +void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR, + HW_ATL_RPO_LRO_QSES_LMT_MSK, + HW_ATL_RPO_LRO_QSES_LMT_SHIFT, + lro_qsessions_lim); +} + +void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, + u32 lro_total_desc_lim) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR, + HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK, + HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT, + lro_total_desc_lim); +} + +void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR, + HW_ATL_RPO_LRO_PKT_MIN_MSK, + HW_ATL_RPO_LRO_PKT_MIN_SHIFT, + lro_min_pld_of_first_pkt); +} + +void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim); +} + +void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_number_of_descriptors, + u32 lro) +{ +/* Register address for bitfield lro{L}_des_max[1:0] */ + static const u32 rpo_lro_ldes_max_adr[32] = { + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU, + 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU + }; + +/* Bitmask for bitfield lro{L}_des_max[1:0] */ + static const u32 rpo_lro_ldes_max_msk[32] = { + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U, + 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U, + 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U + }; + +/* Lower bit position of bitfield lro{L}_des_max[1:0] */ + static const u32 rpo_lro_ldes_max_shift[32] = { + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U, + 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U + }; + + aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro], + rpo_lro_ldes_max_msk[lro], + rpo_lro_ldes_max_shift[lro], + lro_max_number_of_descriptors); +} + +void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR, + HW_ATL_RPO_LRO_TB_DIV_MSK, + HW_ATL_RPO_LRO_TB_DIV_SHIFT, + lro_time_base_divider); +} + +void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR, + HW_ATL_RPO_LRO_INA_IVAL_MSK, + HW_ATL_RPO_LRO_INA_IVAL_SHIFT, + lro_inactive_interval); +} + +void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coal_interval) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR, + HW_ATL_RPO_LRO_MAX_IVAL_MSK, + HW_ATL_RPO_LRO_MAX_IVAL_SHIFT, + lro_max_coal_interval); +} + +/* rx */ +void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR, + HW_ATL_RX_REG_RES_DSBL_MSK, + HW_ATL_RX_REG_RES_DSBL_SHIFT, + rx_reg_res_dis); +} + +/* tdm */ +void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca), + HW_ATL_TDM_DCADCPUID_MSK, + HW_ATL_TDM_DCADCPUID_SHIFT, cpuid); +} + +void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en) +{ + aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en); +} + +void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK, + HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en); +} + +void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR, + HW_ATL_TDM_DCA_MODE_MSK, + HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode); +} + +void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, + u32 dca) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca), + HW_ATL_TDM_DCADDESC_EN_MSK, + HW_ATL_TDM_DCADDESC_EN_SHIFT, + tx_desc_dca_en); +} + +void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor), + HW_ATL_TDM_DESCDEN_MSK, + HW_ATL_TDM_DESCDEN_SHIFT, + tx_desc_en); +} + +u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor), + HW_ATL_TDM_DESCDHD_MSK, + HW_ATL_TDM_DESCDHD_SHIFT); +} + +void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor), + HW_ATL_TDM_DESCDLEN_MSK, + HW_ATL_TDM_DESCDLEN_SHIFT, + tx_desc_len); +} + +void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR, + HW_ATL_TDM_INT_DESC_WRB_EN_MSK, + HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT, + tx_desc_wr_wb_irq_en); +} + +void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor), + HW_ATL_TDM_DESCDWRB_THRESH_MSK, + HW_ATL_TDM_DESCDWRB_THRESH_SHIFT, + tx_desc_wr_wb_threshold); +} + +void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR, + HW_ATL_TDM_INT_MOD_EN_MSK, + HW_ATL_TDM_INT_MOD_EN_SHIFT, + tdm_irq_moderation_en); +} + +/* thm */ +void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR, + HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK, + HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT, + lso_tcp_flag_of_first_pkt); +} + +void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR, + HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK, + HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT, + lso_tcp_flag_of_last_pkt); +} + +void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR, + HW_ATL_THM_LSO_TCP_FLAG_MID_MSK, + HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT, + lso_tcp_flag_of_middle_pkt); +} + +/* TPB: tx packet buffer */ +void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR, + HW_ATL_TPB_TX_BUF_EN_MSK, + HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en); +} + +u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR, + HW_ATL_TPB_TX_TC_MODE_MSK, + HW_ATL_TPB_TX_TC_MODE_SHIFT); +} + +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR, + HW_ATL_TPB_TX_TC_MODE_MSK, + HW_ATL_TPB_TX_TC_MODE_SHIFT, + tx_traf_class_mode); +} + +void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer), + HW_ATL_TPB_TXBHI_THRESH_MSK, + HW_ATL_TPB_TXBHI_THRESH_SHIFT, + tx_buff_hi_threshold_per_tc); +} + +void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer), + HW_ATL_TPB_TXBLO_THRESH_MSK, + HW_ATL_TPB_TXBLO_THRESH_SHIFT, + tx_buff_lo_threshold_per_tc); +} + +void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_dma_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR, + HW_ATL_TPB_DMA_SYS_LBK_MSK, + HW_ATL_TPB_DMA_SYS_LBK_SHIFT, + tx_dma_sys_lbk_en); +} + +void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, + u32 buffer) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer), + HW_ATL_TPB_TXBBUF_SIZE_MSK, + HW_ATL_TPB_TXBBUF_SIZE_SHIFT, + tx_pkt_buff_size_per_tc); +} + +void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, + u32 tx_path_scp_ins_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR, + HW_ATL_TPB_TX_SCP_INS_EN_MSK, + HW_ATL_TPB_TX_SCP_INS_EN_SHIFT, + tx_path_scp_ins_en); +} + +/* TPO: tx packet offload */ +void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR, + HW_ATL_TPO_IPV4CHK_EN_MSK, + HW_ATL_TPO_IPV4CHK_EN_SHIFT, + ipv4header_crc_offload_en); +} + +void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR, + HW_ATL_TPOL4CHK_EN_MSK, + HW_ATL_TPOL4CHK_EN_SHIFT, + tcp_udp_crc_offload_en); +} + +void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_sys_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR, + HW_ATL_TPO_PKT_SYS_LBK_MSK, + HW_ATL_TPO_PKT_SYS_LBK_SHIFT, + tx_pkt_sys_lbk_en); +} + +/* TPS: tx packet scheduler */ +void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR, + HW_ATL_TPS_DATA_TC_ARB_MODE_MSK, + HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT, + tx_pkt_shed_data_arb_mode); +} + +void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR, + HW_ATL_TPS_DESC_RATE_TA_RST_MSK, + HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT, + curr_time_res); +} + +void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR, + HW_ATL_TPS_DESC_RATE_LIM_MSK, + HW_ATL_TPS_DESC_RATE_LIM_SHIFT, + tx_pkt_shed_desc_rate_lim); +} + +void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR, + HW_ATL_TPS_DESC_TC_ARB_MODE_MSK, + HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT, + arb_mode); +} + +void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc), + HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK, + HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT, + max_credit); +} + +void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc), + HW_ATL_TPS_DESC_TCTWEIGHT_MSK, + HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT, + tx_pkt_shed_desc_tc_weight); +} + +void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR, + HW_ATL_TPS_DESC_VM_ARB_MODE_MSK, + HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT, + arb_mode); +} + +void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc), + HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK, + HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT, + max_credit); +} + +void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc), + HW_ATL_TPS_DATA_TCTWEIGHT_MSK, + HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT, + tx_pkt_shed_tc_data_weight); +} + +/* tx */ +void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR, + HW_ATL_TX_REG_RES_DSBL_MSK, + HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis); +} + +/* msm */ +u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR, + HW_ATL_MSM_REG_ACCESS_BUSY_MSK, + HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT); +} + +void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR, + HW_ATL_MSM_REG_ADDR_MSK, + HW_ATL_MSM_REG_ADDR_SHIFT, + reg_addr_for_indirect_addr); +} + +void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR, + HW_ATL_MSM_REG_RD_STROBE_MSK, + HW_ATL_MSM_REG_RD_STROBE_SHIFT, + reg_rd_strobe); +} + +u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR); +} + +void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data) +{ + aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data); +} + +void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR, + HW_ATL_MSM_REG_WR_STROBE_MSK, + HW_ATL_MSM_REG_WR_STROBE_SHIFT, + reg_wr_strobe); +} + +/* pci */ +void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR, + HW_ATL_PCI_REG_RES_DSBL_MSK, + HW_ATL_PCI_REG_RES_DSBL_SHIFT, + pci_reg_res_dis); +} + +void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, + u32 scratch_scp) +{ + aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), + glb_cpu_scratch_scp); +} + +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR, + HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK, + HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, up_force_intr); +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.h b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.h new file mode 100644 index 000000000..e30083cea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh.h @@ -0,0 +1,714 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_llh.h: Declarations of bitfield and register access functions for + * Atlantic registers. + */ + +#ifndef HW_ATL_LLH_H +#define HW_ATL_LLH_H + +#include "../atl_types.h" + +struct aq_hw_s; + +/* global */ + +/* set global microprocessor semaphore */ +void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, + u32 semaphore); + +/* get global microprocessor semaphore */ +u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore); + +/* set global register reset disable */ +void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis); + +/* set soft reset */ +void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res); + +/* get soft reset */ +u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw); + +/* stats */ + +u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter lsw */ +u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter lsw */ +u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter lsw */ +u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter lsw */ +u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good octet counter msw */ +u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get rx dma good packet counter msw */ +u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good octet counter msw */ +u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); + +/* get tx dma good packet counter msw */ +u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); + +/* get msm rx errors counter register */ +u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast frames counter register */ +u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx multicast frames counter register */ +u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast frames counter register */ +u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm rx broadcast octets counter register 1 */ +u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm rx unicast octets counter register 0 */ +u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get rx dma statistics counter 7 */ +u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); + +/* get msm tx errors counter register */ +u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast frames counter register */ +u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast frames counter register */ +u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast frames counter register */ +u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw); + +/* get msm tx multicast octets counter register 1 */ +u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx broadcast octets counter register 1 */ +u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); + +/* get msm tx unicast octets counter register 0 */ +u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); + +/* get global mif identification */ +u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw); + +/* interrupt */ + +/* set interrupt auto mask lsw */ +void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, + u32 irq_auto_masklsw); + +/* set interrupt mapping enable rx */ +void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, + u32 rx); + +/* set interrupt mapping enable tx */ +void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, + u32 tx); + +/* set interrupt mapping rx */ +void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx); + +/* set interrupt mapping tx */ +void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx); + +/* set interrupt mask clear lsw */ +void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_msk_clearlsw); + +/* set interrupt mask set lsw */ +void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw); + +/* set interrupt register reset disable */ +void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis); + +/* set interrupt status clear lsw */ +void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw, + u32 irq_status_clearlsw); + +/* get interrupt status lsw */ +u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw); + +/* get reset interrupt */ +u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw); + +/* set reset interrupt */ +void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); + +/* rdm */ + +/* set cpu id */ +void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set rx dca enable */ +void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en); + +/* set rx dca mode */ +void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode); + +/* set rx descriptor data buffer size */ +void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_data_buff_size, + u32 descriptor); + +/* set rx descriptor dca enable */ +void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, + u32 dca); + +/* set rx descriptor enable */ +void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, + u32 descriptor); + +/* set rx descriptor header splitting */ +void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_splitting, + u32 descriptor); + +/* get rx descriptor head pointer */ +u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx descriptor length */ +void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, + u32 descriptor); + +/* set rx descriptor write-back interrupt enable */ +void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 rx_desc_wr_wb_irq_en); + +/* set rx header dca enable */ +void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, + u32 dca); + +/* set rx payload dca enable */ +void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, + u32 dca); + +/* set rx descriptor header buffer size */ +void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw, + u32 rx_desc_head_buff_size, + u32 descriptor); + +/* set rx descriptor reset */ +void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, + u32 descriptor); + +/* Set RDM Interrupt Moderation Enable */ +void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 rdm_intr_moder_en); + +/* reg */ + +/* set general interrupt mapping register */ +void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, + u32 regidx); + +/* get general interrupt status register */ +u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw); + +/* set interrupt global control register */ +void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl); + +/* set interrupt throttle register */ +void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle); + +/* set rx dma descriptor base address lsw */ +void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set rx dma descriptor base address msw */ +void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_base_addrmsw, + u32 descriptor); + +/* get rx dma descriptor status register */ +u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set rx dma descriptor tail pointer register */ +void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 rx_dma_desc_tail_ptr, + u32 descriptor); + +/* set rx filter multicast filter mask register */ +void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, + u32 rx_flr_mcst_flr_msk); + +/* set rx filter multicast filter register */ +void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr, + u32 filter); + +/* set rx filter rss control register 1 */ +void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, + u32 rx_flr_rss_control1); + +/* Set RX Filter Control Register 2 */ +void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2); + +/* Set RX Interrupt Moderation Control Register */ +void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 rx_intr_moderation_ctl, + u32 queue); + +/* set tx dma debug control */ +void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, + u32 tx_dma_debug_ctl); + +/* set tx dma descriptor base address lsw */ +void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrlsw, + u32 descriptor); + +/* set tx dma descriptor base address msw */ +void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_base_addrmsw, + u32 descriptor); + +/* set tx dma descriptor tail pointer register */ +void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw, + u32 tx_dma_desc_tail_ptr, + u32 descriptor); + +/* Set TX Interrupt Moderation Control Register */ +void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* set global microprocessor scratch pad */ +void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, + u32 glb_cpu_scratch_scp, + u32 scratch_scp); + +/* rpb */ + +/* set dma system loopback */ +void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); + +/* set rx traffic class mode */ +void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, + u32 rx_traf_class_mode); + +/* get rx traffic class mode */ +u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw); + +/* set rx buffer enable */ +void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en); + +/* set rx buffer high threshold (per tc) */ +void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set rx buffer low threshold (per tc) */ +void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set rx flow control mode */ +void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, + u32 rx_flow_ctl_mode); + +/* set rx packet buffer size (per tc) */ +void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_pkt_buff_size_per_tc, + u32 buffer); + +/* set rx xoff enable (per tc) */ +void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, + u32 rx_xoff_en_per_tc, + u32 buffer); + +/* rpf */ + +/* set l2 broadcast count threshold */ +void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_count_threshold); + +/* set l2 broadcast enable */ +void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en); + +/* set l2 broadcast filter action */ +void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2broadcast_flr_act); + +/* set l2 multicast filter enable */ +void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, + u32 l2multicast_flr_en, + u32 filter); + +/* set l2 promiscuous mode enable */ +void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw, + u32 l2promiscuous_mode_en); + +/* set l2 unicast filter action */ +void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, + u32 l2unicast_flr_act, + u32 filter); + +/* set l2 unicast filter enable */ +void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en, + u32 filter); + +/* set l2 unicast destination address lsw */ +void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addresslsw, + u32 filter); + +/* set l2 unicast destination address msw */ +void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw, + u32 l2unicast_dest_addressmsw, + u32 filter); + +/* Set L2 Accept all Multicast packets */ +void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw, + u32 l2_accept_all_mc_packets); + +/* set user-priority tc mapping */ +void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw, + u32 user_priority_tc_map, u32 tc); + +/* set rss key address */ +void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr); + +/* set rss key write data */ +void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data); + +/* get rss key write enable */ +u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss key write enable */ +void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en); + +/* set rss redirection table address */ +void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_addr); + +/* set rss redirection table write data */ +void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw, + u32 rss_redir_tbl_wr_data); + +/* get rss redirection write enable */ +u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw); + +/* set rss redirection write enable */ +void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en); + +/* set tpo to rpf system loopback */ +void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, + u32 tpo_to_rpf_sys_lbk); + +/* set vlan inner ethertype */ +void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht); + +/* set vlan outer ethertype */ +void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht); + +/* set vlan promiscuous mode enable */ +void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, + u32 vlan_prom_mode_en); + +/* Set VLAN untagged action */ +void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, + u32 vlan_untagged_act); + +/* Set VLAN accept untagged packets */ +void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw, + u32 vlan_acc_untagged_packets); + +/* Set VLAN filter enable */ +void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, + u32 filter); + +/* Set VLAN Filter Action */ +void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, + u32 filter); + +/* Set VLAN ID Filter */ +void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, + u32 filter); + +/* set ethertype filter enable */ +void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, + u32 filter); + +/* set ethertype user-priority enable */ +void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority_en, + u32 filter); + +/* set ethertype rx queue enable */ +void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, + u32 etht_rx_queue_en, + u32 filter); + +/* set ethertype rx queue */ +void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue, + u32 filter); + +/* set ethertype user-priority */ +void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, + u32 etht_user_priority, + u32 filter); + +/* set ethertype management queue */ +void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue, + u32 filter); + +/* set ethertype filter action */ +void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, + u32 filter); + +/* set ethertype filter */ +void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); + +/* rpo */ + +/* set ipv4 header checksum offload enable */ +void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set rx descriptor vlan stripping */ +void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw, + u32 rx_desc_vlan_stripping, + u32 descriptor); + +/* set tcp/udp checksum offload enable */ +void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* Set LRO Patch Optimization Enable. */ +void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw, + u32 lro_patch_optimization_en); + +/* Set Large Receive Offload Enable */ +void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en); + +/* Set LRO Q Sessions Limit */ +void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, + u32 lro_qsessions_lim); + +/* Set LRO Total Descriptor Limit */ +void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, + u32 lro_total_desc_lim); + +/* Set LRO Min Payload of First Packet */ +void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lro_min_pld_of_first_pkt); + +/* Set LRO Packet Limit */ +void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim); + +/* Set LRO Max Number of Descriptors */ +void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw, + u32 lro_max_desc_num, u32 lro); + +/* Set LRO Time Base Divider */ +void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw, + u32 lro_time_base_divider); + +/*Set LRO Inactive Interval */ +void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw, + u32 lro_inactive_interval); + +/*Set LRO Max Coalescing Interval */ +void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw, + u32 lro_max_coal_interval); + +/* rx */ + +/* set rx register reset disable */ +void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis); + +/* tdm */ + +/* set cpu id */ +void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); + +/* set large send offload enable */ +void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw, + u32 large_send_offload_en); + +/* set tx descriptor enable */ +void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, + u32 descriptor); + +/* set tx dca enable */ +void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en); + +/* set tx dca mode */ +void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode); + +/* set tx descriptor dca enable */ +void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, + u32 dca); + +/* get tx descriptor head pointer */ +u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor); + +/* set tx descriptor length */ +void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len, + u32 descriptor); + +/* set tx descriptor write-back interrupt enable */ +void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_irq_en); + +/* set tx descriptor write-back threshold */ +void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw, + u32 tx_desc_wr_wb_threshold, + u32 descriptor); + +/* Set TDM Interrupt Moderation Enable */ +void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw, + u32 tdm_irq_moderation_en); +/* thm */ + +/* set lso tcp flag of first packet */ +void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_first_pkt); + +/* set lso tcp flag of last packet */ +void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_last_pkt); + +/* set lso tcp flag of middle packet */ +void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, + u32 lso_tcp_flag_of_middle_pkt); + +/* tpb */ + +/* set TX Traffic Class Mode */ +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode); + +/* get TX Traffic Class Mode */ +u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw); + +/* set tx buffer enable */ +void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); + +/* set tx buffer high threshold (per tc) */ +void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_hi_threshold_per_tc, + u32 buffer); + +/* set tx buffer low threshold (per tc) */ +void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_buff_lo_threshold_per_tc, + u32 buffer); + +/* set tx dma system loopback enable */ +void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_dma_sys_lbk_en); + +/* set tx packet buffer size (per tc) */ +void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, + u32 buffer); + +/* set tx path pad insert enable */ +void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, + u32 tx_path_scp_ins_en); + +/* tpo */ + +/* set ipv4 header checksum offload enable */ +void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 ipv4header_crc_offload_en); + +/* set tcp/udp checksum offload enable */ +void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw, + u32 tcp_udp_crc_offload_en); + +/* set tx pkt system loopback enable */ +void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_sys_lbk_en); + +/* tps */ + +/* set tx packet scheduler data arbitration mode */ +void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_data_arb_mode); + +/* set tx packet scheduler descriptor rate current time reset */ +void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw, + u32 curr_time_res); + +/* set tx packet scheduler descriptor rate limit */ +void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_rate_lim); + +/* set tx packet scheduler descriptor tc arbitration mode */ +void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode); + +/* set tx packet scheduler descriptor tc max credit */ +void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc); + +/* set tx packet scheduler descriptor tc weight */ +void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_desc_tc_weight, + u32 tc); + +/* set tx packet scheduler descriptor vm arbitration mode */ +void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw, + u32 arb_mode); + +/* set tx packet scheduler tc data max credit */ +void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + u32 max_credit, + u32 tc); + +/* set tx packet scheduler tc data weight */ +void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_shed_tc_data_weight, + u32 tc); + +/* tx */ + +/* set tx register reset disable */ +void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis); + +/* msm */ + +/* get register access status */ +u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw); + +/* set register address for indirect address */ +void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw, + u32 reg_addr_for_indirect_addr); + +/* set register read strobe */ +void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe); + +/* get register read data */ +u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw); + +/* set register write data */ +void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data); + +/* set register write strobe */ +void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); + +/* pci */ + +/* set pci register reset disable */ +void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); + +/* set uP Force Interrupt */ +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); + + +#endif /* HW_ATL_LLH_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h new file mode 100644 index 000000000..27b9b9cb3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h @@ -0,0 +1,2407 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_llh_internal.h: Preprocessor definitions + * for Atlantic registers. + */ + +#ifndef HW_ATL_LLH_INTERNAL_H +#define HW_ATL_LLH_INTERNAL_H + +/* global microprocessor semaphore definitions + * base address: 0x000003a0 + * parameter: semaphore {s} | stride size 0x4 | range [0, 15] + */ +#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4) +/* register address for bitfield rx dma good octet counter lsw [1f:0] */ +#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808 +/* register address for bitfield rx dma good packet counter lsw [1f:0] */ +#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800 +/* register address for bitfield tx dma good octet counter lsw [1f:0] */ +#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808 +/* register address for bitfield tx dma good packet counter lsw [1f:0] */ +#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800 + +/* register address for bitfield rx dma good octet counter msw [3f:20] */ +#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c +/* register address for bitfield rx dma good packet counter msw [3f:20] */ +#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804 +/* register address for bitfield tx dma good octet counter msw [3f:20] */ +#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c +/* register address for bitfield tx dma good packet counter msw [3f:20] */ +#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804 + +/* preprocessor definitions for msm rx errors counter register */ +#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u + +/* preprocessor definitions for msm rx unicast frames counter register */ +#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u + +/* preprocessor definitions for msm rx multicast frames counter register */ +#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u + +/* preprocessor definitions for msm rx broadcast frames counter register */ +#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u + +/* preprocessor definitions for msm rx broadcast octets counter register 1 */ +#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u + +/* preprocessor definitions for msm rx broadcast octets counter register 2 */ +#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u + +/* preprocessor definitions for msm rx unicast octets counter register 0 */ +#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u + +/* preprocessor definitions for msm tx unicast frames counter register */ +#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u + +/* preprocessor definitions for msm tx multicast frames counter register */ +#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u + +/* preprocessor definitions for global mif identification */ +#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu + +/* register address for bitfield iamr_lsw[1f:0] */ +#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090 +/* register address for bitfield rx dma drop packet counter [1f:0] */ +#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818 + +/* register address for bitfield imcr_lsw[1f:0] */ +#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070 +/* register address for bitfield imsr_lsw[1f:0] */ +#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060 +/* register address for bitfield itr_reg_res_dsbl */ +#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300 +/* bitmask for bitfield itr_reg_res_dsbl */ +#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000 +/* lower bit position of bitfield itr_reg_res_dsbl */ +#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29 +/* register address for bitfield iscr_lsw[1f:0] */ +#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050 +/* register address for bitfield isr_lsw[1f:0] */ +#define HW_ATL_ITR_ISRLSW_ADR 0x00002000 +/* register address for bitfield itr_reset */ +#define HW_ATL_ITR_RES_ADR 0x00002300 +/* bitmask for bitfield itr_reset */ +#define HW_ATL_ITR_RES_MSK 0x80000000 +/* lower bit position of bitfield itr_reset */ +#define HW_ATL_ITR_RES_SHIFT 31 +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_RDM_DCADCPUID_SHIFT 0 +/* register address for bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_ADR 0x00006180 + +/* rx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_rdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_ADR 0x00006180 +/* bitmask for bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_MSK 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_SHIFT 31 +/* width of bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_WIDTH 1 +/* default value of bitfield dca_en */ +#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1 + +/* rx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_rdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180 +/* bitmask for bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_SHIFT 0 +/* width of bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_WIDTH 4 +/* default value of bitfield dca_mode[3:0] */ +#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0 + +/* rx desc{d}_data_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_data_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \ + (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f +/* inverted bitmask for bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0 +/* lower bit position of bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0 +/* width of bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5 +/* default value of bitfield desc{d}_data_size[4:0] */ +#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0 + +/* rx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31 +/* width of bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1 +/* default value of bitfield dca{d}_desc_en */ +#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0 + +/* rx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_MSK 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_SHIFT 31 +/* width of bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_WIDTH 1 +/* default value of bitfield desc{d}_en */ +#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0 + +/* rx desc{d}_hdr_size[4:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_hdr_size_i[4:0]" + */ + +/* register address for bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \ + (0x00005b18 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00 +/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff +/* lower bit position of bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8 +/* width of bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5 +/* default value of bitfield desc{d}_hdr_size[4:0] */ +#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0 + +/* rx desc{d}_hdr_split bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hdr_split". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc_hdr_split_i[0]" + */ + +/* register address for bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \ + (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000 +/* inverted bitmask for bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff +/* lower bit position of bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28 +/* width of bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1 +/* default value of bitfield desc{d}_hdr_split */ +#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0 + +/* rx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="rdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define HW_ATL_RDM_DESCDHD_SHIFT 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define HW_ATL_RDM_DESCDHD_WIDTH 13 + +/* rx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_SHIFT 3 +/* width of bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_WIDTH 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0 + +/* rx desc{d}_reset bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_reset". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rdm_q_pf_res_i[0]" + */ + +/* register address for bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000 +/* inverted bitmask for bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff +/* lower bit position of bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_SHIFT 25 +/* width of bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_WIDTH 1 +/* default value of bitfield desc{d}_reset */ +#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 + +/* rx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_rdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30 +/* bitmask for bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb +/* lower bit position of bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2 +/* width of bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1 +/* default value of bitfield int_desc_wrb_en */ +#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0 + +/* rx dca{d}_hdr_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_hdr_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_hdr_en_i[0]" + */ + +/* register address for bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000 +/* inverted bitmask for bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff +/* lower bit position of bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30 +/* width of bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1 +/* default value of bitfield dca{d}_hdr_en */ +#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0 + +/* rx dca{d}_pay_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_pay_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_rdm_dca_pay_en_i[0]" + */ + +/* register address for bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000 +/* inverted bitmask for bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff +/* lower bit position of bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29 +/* width of bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1 +/* default value of bitfield dca{d}_pay_en */ +#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0 + +/* RX rdm_int_rim_en Bitfield Definitions + * Preprocessor definitions for the bitfield "rdm_int_rim_en". + * PORT="pif_rdm_int_rim_en_i" + */ + +/* Register address for bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30 +/* Bitmask for bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008 +/* Inverted bitmask for bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7 +/* Lower bit position of bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3 +/* Width of bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1 +/* Default value of bitfield rdm_int_rim_en */ +#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0 + +/* general interrupt mapping register definitions + * preprocessor definitions for general interrupt mapping register + * base address: 0x00002180 + * parameter: regidx {f} | stride size 0x4 | range [0, 3] + */ +#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4) + +/* general interrupt status register definitions + * preprocessor definitions for general interrupt status register + * address: 0x000021A0 + */ + +#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U + +/* interrupt global control register definitions + * preprocessor definitions for interrupt global control register + * address: 0x00002300 + */ +#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u + +/* interrupt throttle register definitions + * preprocessor definitions for interrupt throttle register + * base address: 0x00002800 + * parameter: throttle {t} | stride size 0x4 | range [0, 31] + */ +#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4) + +/* rx dma descriptor base address lsw definitions + * preprocessor definitions for rx dma descriptor base address lsw + * base address: 0x00005b00 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \ +(0x00005b00u + (descriptor) * 0x20) + +/* rx dma descriptor base address msw definitions + * preprocessor definitions for rx dma descriptor base address msw + * base address: 0x00005b04 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \ +(0x00005b04u + (descriptor) * 0x20) + +/* rx dma descriptor status register definitions + * preprocessor definitions for rx dma descriptor status register + * base address: 0x00005b14 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \ + (0x00005b14u + (descriptor) * 0x20) + +/* rx dma descriptor tail pointer register definitions + * preprocessor definitions for rx dma descriptor tail pointer register + * base address: 0x00005b10 + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + */ +#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \ + (0x00005b10u + (descriptor) * 0x20) + +/* rx interrupt moderation control register definitions + * Preprocessor definitions for RX Interrupt Moderation Control Register + * Base Address: 0x00005A40 + * Parameter: RIM {R} | stride size 0x4 | range [0, 31] + */ +#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4) + +/* rx filter multicast filter mask register definitions + * preprocessor definitions for rx filter multicast filter mask register + * address: 0x00005270 + */ +#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u + +/* rx filter multicast filter register definitions + * preprocessor definitions for rx filter multicast filter register + * base address: 0x00005250 + * parameter: filter {f} | stride size 0x4 | range [0, 7] + */ +#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4) + +/* RX Filter RSS Control Register 1 Definitions + * Preprocessor definitions for RX Filter RSS Control Register 1 + * Address: 0x000054C0 + */ +#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u + +/* RX Filter Control Register 2 Definitions + * Preprocessor definitions for RX Filter Control Register 2 + * Address: 0x00005104 + */ +#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u + +/* tx tx dma debug control [1f:0] bitfield definitions + * preprocessor definitions for the bitfield "tx dma debug control [1f:0]". + * port="pif_tdm_debug_cntl_i[31:0]" + */ + +/* register address for bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920 +/* bitmask for bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff +/* inverted bitmask for bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000 +/* lower bit position of bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0 +/* width of bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32 +/* default value of bitfield tx dma debug control [1f:0] */ +#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0 + +/* tx dma descriptor base address lsw definitions + * preprocessor definitions for tx dma descriptor base address lsw + * base address: 0x00007c00 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \ + (0x00007c00u + (descriptor) * 0x40) + +/* tx dma descriptor tail pointer register definitions + * preprocessor definitions for tx dma descriptor tail pointer register + * base address: 0x00007c10 + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + */ +#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \ + (0x00007c10u + (descriptor) * 0x40) + +/* rx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_rpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000 +/* bitmask for bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6 +/* width of bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1 +/* default value of bitfield dma_sys_loopback */ +#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0 + +/* rx rx_tc_mode bitfield definitions + * preprocessor definitions for the bitfield "rx_tc_mode". + * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" + */ + +/* register address for bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700 +/* bitmask for bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100 +/* inverted bitmask for bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff +/* lower bit position of bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8 +/* width of bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1 +/* default value of bitfield rx_tc_mode */ +#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0 + +/* rx rx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "rx_buf_en". + * port="pif_rpb_rx_buf_en_i" + */ + +/* register address for bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700 +/* bitmask for bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001 +/* inverted bitmask for bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe +/* lower bit position of bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0 +/* width of bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1 +/* default value of bitfield rx_buf_en */ +#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0 + +/* rx rx{b}_hi_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_hi_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000 +/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff +/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16 +/* width of bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14 +/* default value of bitfield rx{b}_hi_thresh[d:0] */ +#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0 + +/* rx rx{b}_lo_thresh[d:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_lo_thresh_i[13:0]" + */ + +/* register address for bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff +/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000 +/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0 +/* width of bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14 +/* default value of bitfield rx{b}_lo_thresh[d:0] */ +#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0 + +/* rx rx_fc_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "rx_fc_mode[1:0]". + * port="pif_rpb_rx_fc_mode_i[1:0]" + */ + +/* register address for bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700 +/* bitmask for bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030 +/* inverted bitmask for bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf +/* lower bit position of bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4 +/* width of bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2 +/* default value of bitfield rx_fc_mode[1:0] */ +#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0 + +/* rx rx{b}_buf_size[8:0] bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx0_buf_size_i[8:0]" + */ + +/* register address for bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff +/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00 +/* lower bit position of bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0 +/* width of bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9 +/* default value of bitfield rx{b}_buf_size[8:0] */ +#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0 + +/* rx rx{b}_xoff_en bitfield definitions + * preprocessor definitions for the bitfield "rx{b}_xoff_en". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_rpb_rx_xoff_en_i[0]" + */ + +/* register address for bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10) +/* bitmask for bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000 +/* inverted bitmask for bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff +/* lower bit position of bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31 +/* width of bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1 +/* default value of bitfield rx{b}_xoff_en */ +#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0 + +/* rx l2_bc_thresh[f:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]". + * port="pif_rpf_l2_bc_thresh_i[15:0]" + */ + +/* register address for bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100 +/* bitmask for bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000 +/* inverted bitmask for bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff +/* lower bit position of bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_SHIFT 16 +/* width of bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_WIDTH 16 +/* default value of bitfield l2_bc_thresh[f:0] */ +#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0 + +/* rx l2_bc_en bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_en". + * port="pif_rpf_l2_bc_en_i" + */ + +/* register address for bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_ADR 0x00005100 +/* bitmask for bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_MSK 0x00000001 +/* inverted bitmask for bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe +/* lower bit position of bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_SHIFT 0 +/* width of bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_WIDTH 1 +/* default value of bitfield l2_bc_en */ +#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0 + +/* rx l2_bc_act[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_bc_act[2:0]". + * port="pif_rpf_l2_bc_act_i[2:0]" + */ + +/* register address for bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100 +/* bitmask for bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000 +/* inverted bitmask for bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff +/* lower bit position of bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_SHIFT 12 +/* width of bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_WIDTH 3 +/* default value of bitfield l2_bc_act[2:0] */ +#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0 + +/* rx l2_mc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_mc_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 7] + * port="pif_rpf_l2_mc_en_i[0]" + */ + +/* register address for bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4) +/* bitmask for bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000 +/* inverted bitmask for bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff +/* lower bit position of bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_SHIFT 31 +/* width of bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_WIDTH 1 +/* default value of bitfield l2_mc_en{f} */ +#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0 + +/* rx l2_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "l2_promis_mode". + * port="pif_rpf_l2_promis_mode_i" + */ + +/* register address for bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100 +/* bitmask for bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008 +/* inverted bitmask for bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7 +/* lower bit position of bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3 +/* width of bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1 +/* default value of bitfield l2_promis_mode */ +#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0 + +/* rx l2_uc_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_act0_i[2:0]" + */ + +/* register address for bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000 +/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff +/* lower bit position of bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_SHIFT 16 +/* width of bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_WIDTH 3 +/* default value of bitfield l2_uc_act{f}[2:0] */ +#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0 + +/* rx l2_uc_en{f} bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_en{f}". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_en_i[0]" + */ + +/* register address for bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000 +/* inverted bitmask for bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff +/* lower bit position of bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_SHIFT 31 +/* width of bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_WIDTH 1 +/* default value of bitfield l2_uc_en{f} */ +#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0 + +/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */ +#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8) +/* register address for bitfield l2_uc_da{f}_msw[f:0] */ +#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */ +#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff +/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */ +#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0 + +/* rx l2_mc_accept_all bitfield definitions + * Preprocessor definitions for the bitfield "l2_mc_accept_all". + * PORT="pif_rpf_l2_mc_all_accept_i" + */ + +/* Register address for bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270 +/* Bitmask for bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000 +/* Inverted bitmask for bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF +/* Lower bit position of bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14 +/* Width of bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1 +/* Default value of bitfield l2_mc_accept_all */ +#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0 + +/* width of bitfield rx_tc_up{t}[2:0] */ +#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3 +/* default value of bitfield rx_tc_up{t}[2:0] */ +#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0 + +/* rx rss_key_addr[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_addr[4:0]". + * port="pif_rpf_rss_key_addr_i[4:0]" + */ + +/* register address for bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0 +/* bitmask for bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f +/* inverted bitmask for bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0 +/* lower bit position of bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0 +/* width of bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5 +/* default value of bitfield rss_key_addr[4:0] */ +#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0 + +/* rx rss_key_wr_data[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]". + * port="pif_rpf_rss_key_wr_data_i[31:0]" + */ + +/* register address for bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4 +/* bitmask for bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff +/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000 +/* lower bit position of bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0 +/* width of bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32 +/* default value of bitfield rss_key_wr_data[1f:0] */ +#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0 + +/* rx rss_key_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_key_wr_en_i". + * port="pif_rpf_rss_key_wr_en_i" + */ + +/* register address for bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0 +/* bitmask for bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020 +/* inverted bitmask for bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf +/* lower bit position of bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5 +/* width of bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1 +/* default value of bitfield rss_key_wr_en_i */ +#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0 + +/* rx rss_redir_addr[3:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_addr[3:0]". + * port="pif_rpf_rss_redir_addr_i[3:0]" + */ + +/* register address for bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0 +/* bitmask for bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f +/* inverted bitmask for bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0 +/* lower bit position of bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0 +/* width of bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4 +/* default value of bitfield rss_redir_addr[3:0] */ +#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0 + +/* rx rss_redir_wr_data[f:0] bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]". + * port="pif_rpf_rss_redir_wr_data_i[15:0]" + */ + +/* register address for bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4 +/* bitmask for bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff +/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000 +/* lower bit position of bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0 +/* width of bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16 +/* default value of bitfield rss_redir_wr_data[f:0] */ +#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0 + +/* rx rss_redir_wr_en_i bitfield definitions + * preprocessor definitions for the bitfield "rss_redir_wr_en_i". + * port="pif_rpf_rss_redir_wr_en_i" + */ + +/* register address for bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0 +/* bitmask for bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010 +/* inverted bitmask for bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef +/* lower bit position of bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4 +/* width of bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1 +/* default value of bitfield rss_redir_wr_en_i */ +#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0 + +/* rx tpo_rpf_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback". + * port="pif_rpf_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000 +/* bitmask for bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100 +/* inverted bitmask for bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff +/* lower bit position of bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8 +/* width of bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1 +/* default value of bitfield tpo_rpf_sys_loopback */ +#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0 + +/* rx vl_inner_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]". + * port="pif_rpf_vl_inner_tpid_i[15:0]" + */ + +/* register address for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284 +/* bitmask for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff +/* inverted bitmask for bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000 +/* lower bit position of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0 +/* width of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16 +/* default value of bitfield vl_inner_tpid[f:0] */ +#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100 + +/* rx vl_outer_tpid[f:0] bitfield definitions + * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]". + * port="pif_rpf_vl_outer_tpid_i[15:0]" + */ + +/* register address for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284 +/* bitmask for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000 +/* inverted bitmask for bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff +/* lower bit position of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16 +/* width of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16 +/* default value of bitfield vl_outer_tpid[f:0] */ +#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8 + +/* rx vl_promis_mode bitfield definitions + * preprocessor definitions for the bitfield "vl_promis_mode". + * port="pif_rpf_vl_promis_mode_i" + */ + +/* register address for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280 +/* bitmask for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002 +/* inverted bitmask for bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd +/* lower bit position of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1 +/* width of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1 +/* default value of bitfield vl_promis_mode */ +#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0 + +/* RX vl_accept_untagged_mode Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_accept_untagged_mode". + * PORT="pif_rpf_vl_accept_untagged_i" + */ + +/* Register address for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280 +/* Bitmask for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004 +/* Inverted bitmask for bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB +/* Lower bit position of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2 +/* Width of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1 +/* Default value of bitfield vl_accept_untagged_mode */ +#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0 + +/* rX vl_untagged_act[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]". + * PORT="pif_rpf_vl_untagged_act_i[2:0]" + */ + +/* Register address for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280 +/* Bitmask for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038 +/* Inverted bitmask for bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7 +/* Lower bit position of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3 +/* Width of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3 +/* Default value of bitfield vl_untagged_act[2:0] */ +#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0 + +/* RX vl_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_en_i[0]" + */ + +/* Register address for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000 +/* Inverted bitmask for bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF +/* Lower bit position of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_SHIFT 31 +/* Width of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_WIDTH 1 +/* Default value of bitfield vl_en{F} */ +#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0 + +/* RX vl_act{F}[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_act{F}[2:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_act0_i[2:0]" + */ + +/* Register address for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000 +/* Inverted bitmask for bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF +/* Lower bit position of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_SHIFT 16 +/* Width of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_WIDTH 3 +/* Default value of bitfield vl_act{F}[2:0] */ +#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0 + +/* RX vl_id{F}[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_id{F}[B:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_id0_i[11:0]" + */ + +/* Register address for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF +/* Inverted bitmask for bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000 +/* Lower bit position of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_SHIFT 0 +/* Width of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_WIDTH 12 +/* Default value of bitfield vl_id{F}[B:0] */ +#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0 + +/* RX et_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "et_en{F}". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_et_en_i[0]" + */ + +/* Register address for bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4) +/* Bitmask for bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000 +/* Inverted bitmask for bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF +/* Lower bit position of bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_SHIFT 31 +/* Width of bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_WIDTH 1 +/* Default value of bitfield et_en{F} */ +#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0 + +/* rx et_en{f} bitfield definitions + * preprocessor definitions for the bitfield "et_en{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_en_i[0]" + */ + +/* register address for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_MSK 0x80000000 +/* inverted bitmask for bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff +/* lower bit position of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_SHIFT 31 +/* width of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_WIDTH 1 +/* default value of bitfield et_en{f} */ +#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0 + +/* rx et_up{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up_en_i[0]" + */ + +/* register address for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000 +/* inverted bitmask for bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff +/* lower bit position of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_SHIFT 30 +/* width of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_WIDTH 1 +/* default value of bitfield et_up{f}_en */ +#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0 + +/* rx et_rxq{f}_en bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}_en". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq_en_i[0]" + */ + +/* register address for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000 +/* inverted bitmask for bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff +/* lower bit position of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29 +/* width of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1 +/* default value of bitfield et_rxq{f}_en */ +#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0 + +/* rx et_up{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_up{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_up0_i[2:0]" + */ + +/* register address for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000 +/* inverted bitmask for bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff +/* lower bit position of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_SHIFT 26 +/* width of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_WIDTH 3 +/* default value of bitfield et_up{f}[2:0] */ +#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0 + +/* rx et_rxq{f}[4:0] bitfield definitions + * preprocessor definitions for the bitfield "et_rxq{f}[4:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_rxq0_i[4:0]" + */ + +/* register address for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000 +/* inverted bitmask for bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff +/* lower bit position of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_SHIFT 20 +/* width of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_WIDTH 5 +/* default value of bitfield et_rxq{f}[4:0] */ +#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0 + +/* rx et_mng_rxq{f} bitfield definitions + * preprocessor definitions for the bitfield "et_mng_rxq{f}". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_mng_rxq_i[0]" + */ + +/* register address for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000 +/* inverted bitmask for bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff +/* lower bit position of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19 +/* width of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1 +/* default value of bitfield et_mng_rxq{f} */ +#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0 + +/* rx et_act{f}[2:0] bitfield definitions + * preprocessor definitions for the bitfield "et_act{f}[2:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_act0_i[2:0]" + */ + +/* register address for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000 +/* inverted bitmask for bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff +/* lower bit position of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_SHIFT 16 +/* width of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_WIDTH 3 +/* default value of bitfield et_act{f}[2:0] */ +#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0 + +/* rx et_val{f}[f:0] bitfield definitions + * preprocessor definitions for the bitfield "et_val{f}[f:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_et_val0_i[15:0]" + */ + +/* register address for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4) +/* bitmask for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff +/* inverted bitmask for bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000 +/* lower bit position of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_SHIFT 0 +/* width of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_WIDTH 16 +/* default value of bitfield et_val{f}[f:0] */ +#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0 + +/* rx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_rpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580 +/* bitmask for bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1 +/* width of bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1 +/* default value of bitfield ipv4_chk_en */ +#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0 + +/* rx desc{d}_vl_strip bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_vl_strip". + * parameter: descriptor {d} | stride size 0x20 | range [0, 31] + * port="pif_rpo_desc_vl_strip_i[0]" + */ + +/* register address for bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \ + (0x00005b08 + (descriptor) * 0x20) +/* bitmask for bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000 +/* inverted bitmask for bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff +/* lower bit position of bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29 +/* width of bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1 +/* default value of bitfield desc{d}_vl_strip */ +#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0 + +/* rx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_rpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580 +/* bitmask for bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_SHIFT 0 +/* width of bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_WIDTH 1 +/* default value of bitfield l4_chk_en */ +#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0 + +/* rx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_rx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000 +/* bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29 +/* width of bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1 +/* default value of bitfield reg_res_dsbl */ +#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1 + +/* tx dca{d}_cpuid[7:0] bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* register address for bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff +/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00 +/* lower bit position of bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_SHIFT 0 +/* width of bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_WIDTH 8 +/* default value of bitfield dca{d}_cpuid[7:0] */ +#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0 + +/* tx lso_en[1f:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_en[1f:0]". + * port="pif_tdm_lso_en_i[31:0]" + */ + +/* register address for bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_ADR 0x00007810 +/* bitmask for bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff +/* inverted bitmask for bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000 +/* lower bit position of bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_SHIFT 0 +/* width of bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_WIDTH 32 +/* default value of bitfield lso_en[1f:0] */ +#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0 + +/* tx dca_en bitfield definitions + * preprocessor definitions for the bitfield "dca_en". + * port="pif_tdm_dca_en_i" + */ + +/* register address for bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_ADR 0x00008480 +/* bitmask for bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_MSK 0x80000000 +/* inverted bitmask for bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff +/* lower bit position of bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_SHIFT 31 +/* width of bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_WIDTH 1 +/* default value of bitfield dca_en */ +#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1 + +/* tx dca_mode[3:0] bitfield definitions + * preprocessor definitions for the bitfield "dca_mode[3:0]". + * port="pif_tdm_dca_mode_i[3:0]" + */ + +/* register address for bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480 +/* bitmask for bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f +/* inverted bitmask for bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0 +/* lower bit position of bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_SHIFT 0 +/* width of bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_WIDTH 4 +/* default value of bitfield dca_mode[3:0] */ +#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0 + +/* tx dca{d}_desc_en bitfield definitions + * preprocessor definitions for the bitfield "dca{d}_desc_en". + * parameter: dca {d} | stride size 0x4 | range [0, 31] + * port="pif_tdm_dca_desc_en_i[0]" + */ + +/* register address for bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4) +/* bitmask for bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000 +/* inverted bitmask for bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff +/* lower bit position of bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31 +/* width of bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1 +/* default value of bitfield dca{d}_desc_en */ +#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0 + +/* tx desc{d}_en bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_en". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc_en_i[0]" + */ + +/* register address for bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_MSK 0x80000000 +/* inverted bitmask for bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff +/* lower bit position of bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_SHIFT 31 +/* width of bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_WIDTH 1 +/* default value of bitfield desc{d}_en */ +#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0 + +/* tx desc{d}_hd[c:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_hd[c:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="tdm_pif_desc0_hd_o[12:0]" + */ + +/* register address for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff +/* inverted bitmask for bitfield desc{d}_hd[c:0] */ +#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000 +/* lower bit position of bitfield desc{d}_hd[c:0] */ +#define HW_ATL_TDM_DESCDHD_SHIFT 0 +/* width of bitfield desc{d}_hd[c:0] */ +#define HW_ATL_TDM_DESCDHD_WIDTH 13 + +/* tx desc{d}_len[9:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_len[9:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_len_i[9:0]" + */ + +/* register address for bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8 +/* inverted bitmask for bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007 +/* lower bit position of bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_SHIFT 3 +/* width of bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_WIDTH 10 +/* default value of bitfield desc{d}_len[9:0] */ +#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0 + +/* tx int_desc_wrb_en bitfield definitions + * preprocessor definitions for the bitfield "int_desc_wrb_en". + * port="pif_tdm_int_desc_wrb_en_i" + */ + +/* register address for bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40 +/* bitmask for bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002 +/* inverted bitmask for bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd +/* lower bit position of bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1 +/* width of bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1 +/* default value of bitfield int_desc_wrb_en */ +#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0 + +/* tx desc{d}_wrb_thresh[6:0] bitfield definitions + * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]". + * parameter: descriptor {d} | stride size 0x40 | range [0, 31] + * port="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* register address for bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \ + (0x00007c18 + (descriptor) * 0x40) +/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00 +/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff +/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8 +/* width of bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7 +/* default value of bitfield desc{d}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0 + +/* tx lso_tcp_flag_first[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]". + * port="pif_thm_lso_tcp_flag_first_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820 +/* bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0 +/* width of bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12 +/* default value of bitfield lso_tcp_flag_first[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0 + +/* tx lso_tcp_flag_last[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]". + * port="pif_thm_lso_tcp_flag_last_i[11:0]" + */ + +/* register address for bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824 +/* bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff +/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000 +/* lower bit position of bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0 +/* width of bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12 +/* default value of bitfield lso_tcp_flag_last[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0 + +/* tx lso_tcp_flag_mid[b:0] bitfield definitions + * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]". + * port="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ + +/* Register address for bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598 +/* Bitmask for bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000 +/* Lower bit position of bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0 +/* Width of bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32 +/* Default value of bitfield lro_rsc_max[1F:0] */ +#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0 + +/* RX lro_en[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_en[1F:0]". + * PORT="pif_rpo_lro_en_i[31:0]" + */ + +/* Register address for bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_ADR 0x00005590 +/* Bitmask for bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF +/* Inverted bitmask for bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000 +/* Lower bit position of bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_SHIFT 0 +/* Width of bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_WIDTH 32 +/* Default value of bitfield lro_en[1F:0] */ +#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0 + +/* RX lro_ptopt_en Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ptopt_en". + * PORT="pif_rpo_lro_ptopt_en_i" + */ + +/* Register address for bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594 +/* Bitmask for bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000 +/* Inverted bitmask for bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF +/* Lower bit position of bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15 +/* Width of bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1 +/* Default value of bitfield lro_ptopt_en */ +#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1 + +/* RX lro_q_ses_lmt Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_q_ses_lmt". + * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594 +/* Bitmask for bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000 +/* Inverted bitmask for bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF +/* Lower bit position of bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12 +/* Width of bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2 +/* Default value of bitfield lro_q_ses_lmt */ +#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1 + +/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]". + * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]" + */ + +/* Register address for bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594 +/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060 +/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F +/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5 +/* Width of bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2 +/* Default value of bitfield lro_tot_dsc_lmt[1:0] */ +#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1 + +/* RX lro_pkt_min[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]". + * PORT="pif_rpo_lro_pkt_min_i[4:0]" + */ + +/* Register address for bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594 +/* Bitmask for bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F +/* Inverted bitmask for bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0 +/* Lower bit position of bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0 +/* Width of bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5 +/* Default value of bitfield lro_pkt_min[4:0] */ +#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8 + +/* Width of bitfield lro{L}_des_max[1:0] */ +#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2 +/* Default value of bitfield lro{L}_des_max[1:0] */ +#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0 + +/* RX lro_tb_div[11:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_tb_div[11:0]". + * PORT="pif_rpo_lro_tb_div_i[11:0]" + */ + +/* Register address for bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620 +/* Bitmask for bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000 +/* Inverted bitmask for bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF +/* Lower bit position of bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20 +/* Width of bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12 +/* Default value of bitfield lro_tb_div[11:0] */ +#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35 + +/* RX lro_ina_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]". + * PORT="pif_rpo_lro_ina_ival_i[9:0]" + */ + +/* Register address for bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620 +/* Bitmask for bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00 +/* Inverted bitmask for bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF +/* Lower bit position of bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10 +/* Width of bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10 +/* Default value of bitfield lro_ina_ival[9:0] */ +#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA + +/* RX lro_max_ival[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lro_max_ival[9:0]". + * PORT="pif_rpo_lro_max_ival_i[9:0]" + */ + +/* Register address for bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620 +/* Bitmask for bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF +/* Inverted bitmask for bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00 +/* Lower bit position of bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0 +/* Width of bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10 +/* Default value of bitfield lro_max_ival[9:0] */ +#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19 + +/* TX dca{D}_cpuid[7:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca0_cpuid_i[7:0]" + */ + +/* Register address for bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF +/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00 +/* Lower bit position of bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0 +/* Width of bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8 +/* Default value of bitfield dca{D}_cpuid[7:0] */ +#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0 + +/* TX dca{D}_desc_en Bitfield Definitions + * Preprocessor definitions for the bitfield "dca{D}_desc_en". + * Parameter: DCA {D} | stride size 0x4 | range [0, 31] + * PORT="pif_tdm_dca_desc_en_i[0]" + */ + +/* Register address for bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4) +/* Bitmask for bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000 +/* Inverted bitmask for bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF +/* Lower bit position of bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31 +/* Width of bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1 +/* Default value of bitfield dca{D}_desc_en */ +#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0 + +/* TX desc{D}_en Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_en". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc_en_i[0]" + */ + +/* Register address for bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000 +/* Inverted bitmask for bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF +/* Lower bit position of bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_SHIFT 31 +/* Width of bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_WIDTH 1 +/* Default value of bitfield desc{D}_en */ +#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0 + +/* TX desc{D}_hd[C:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="tdm_pif_desc0_hd_o[12:0]" + */ + +/* Register address for bitfield desc{D}_hd[C:0] */ +#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_hd[C:0] */ +#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF +/* Inverted bitmask for bitfield desc{D}_hd[C:0] */ +#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000 +/* Lower bit position of bitfield desc{D}_hd[C:0] */ +#define HW_ATL_TDM_DESC_DHD_SHIFT 0 +/* Width of bitfield desc{D}_hd[C:0] */ +#define HW_ATL_TDM_DESC_DHD_WIDTH 13 + +/* TX desc{D}_len[9:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_len[9:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_len_i[9:0]" + */ + +/* Register address for bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8 +/* Inverted bitmask for bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007 +/* Lower bit position of bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_SHIFT 3 +/* Width of bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_WIDTH 10 +/* Default value of bitfield desc{D}_len[9:0] */ +#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0 + +/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]". + * Parameter: descriptor {D} | stride size 0x40 | range [0, 31] + * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]" + */ + +/* Register address for bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \ + (0x00007C18 + (descriptor) * 0x40) +/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00 +/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF +/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8 +/* Width of bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7 +/* Default value of bitfield desc{D}_wrb_thresh[6:0] */ +#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0 + +/* TX tdm_int_mod_en Bitfield Definitions + * Preprocessor definitions for the bitfield "tdm_int_mod_en". + * PORT="pif_tdm_int_mod_en_i" + */ + +/* Register address for bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40 +/* Bitmask for bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010 +/* Inverted bitmask for bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF +/* Lower bit position of bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4 +/* Width of bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1 +/* Default value of bitfield tdm_int_mod_en */ +#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0 + +/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]". + * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]" + */ +/* register address for bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820 +/* bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000 +/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff +/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16 +/* width of bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12 +/* default value of bitfield lso_tcp_flag_mid[b:0] */ +#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0 + +/* tx tx_buf_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buf_en". + * port="pif_tpb_tx_buf_en_i" + */ + +/* register address for bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900 +/* bitmask for bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001 +/* inverted bitmask for bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe +/* lower bit position of bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0 +/* width of bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1 +/* default value of bitfield tx_buf_en */ +#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0 + +/* register address for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900 +/* bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100 +/* inverted bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF +/* lower bit position of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8 +/* width of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1 +/* default value of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0 + +/* tx tx{b}_hi_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_hi_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000 +/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff +/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16 +/* width of bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13 +/* default value of bitfield tx{b}_hi_thresh[c:0] */ +#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0 + +/* tx tx{b}_lo_thresh[c:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_lo_thresh_i[12:0]" + */ + +/* register address for bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff +/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000 +/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0 +/* width of bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13 +/* default value of bitfield tx{b}_lo_thresh[c:0] */ +#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0 + +/* tx dma_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_sys_loopback". + * port="pif_tpb_dma_sys_lbk_i" + */ + +/* register address for bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000 +/* bitmask for bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040 +/* inverted bitmask for bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf +/* lower bit position of bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6 +/* width of bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1 +/* default value of bitfield dma_sys_loopback */ +#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0 + +/* tx tx{b}_buf_size[7:0] bitfield definitions + * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". + * parameter: buffer {b} | stride size 0x10 | range [0, 7] + * port="pif_tpb_tx0_buf_size_i[7:0]" + */ + +/* register address for bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10) +/* bitmask for bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff +/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00 +/* lower bit position of bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0 +/* width of bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8 +/* default value of bitfield tx{b}_buf_size[7:0] */ +#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0 + +/* tx tx_scp_ins_en bitfield definitions + * preprocessor definitions for the bitfield "tx_scp_ins_en". + * port="pif_tpb_scp_ins_en_i" + */ + +/* register address for bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900 +/* bitmask for bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004 +/* inverted bitmask for bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb +/* lower bit position of bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2 +/* width of bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1 +/* default value of bitfield tx_scp_ins_en */ +#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0 + +/* tx ipv4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "ipv4_chk_en". + * port="pif_tpo_ipv4_chk_en_i" + */ + +/* register address for bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800 +/* bitmask for bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002 +/* inverted bitmask for bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd +/* lower bit position of bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1 +/* width of bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1 +/* default value of bitfield ipv4_chk_en */ +#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0 + +/* tx l4_chk_en bitfield definitions + * preprocessor definitions for the bitfield "l4_chk_en". + * port="pif_tpo_l4_chk_en_i" + */ + +/* register address for bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800 +/* bitmask for bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001 +/* inverted bitmask for bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe +/* lower bit position of bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_SHIFT 0 +/* width of bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_WIDTH 1 +/* default value of bitfield l4_chk_en */ +#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0 + +/* tx pkt_sys_loopback bitfield definitions + * preprocessor definitions for the bitfield "pkt_sys_loopback". + * port="pif_tpo_pkt_sys_lbk_i" + */ + +/* register address for bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000 +/* bitmask for bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080 +/* inverted bitmask for bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f +/* lower bit position of bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7 +/* width of bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1 +/* default value of bitfield pkt_sys_loopback */ +#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe +/* lower bit position of bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0 +/* width of bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1 +/* default value of bitfield data_tc_arb_mode */ +#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0 + +/* tx desc_rate_ta_rst bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_ta_rst". + * port="pif_tps_desc_rate_ta_rst_i" + */ + +/* register address for bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310 +/* bitmask for bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000 +/* inverted bitmask for bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff +/* lower bit position of bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31 +/* width of bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1 +/* default value of bitfield desc_rate_ta_rst */ +#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0 + +/* tx desc_rate_limit[a:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_rate_limit[a:0]". + * port="pif_tps_desc_rate_lim_i[10:0]" + */ + +/* register address for bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310 +/* bitmask for bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff +/* inverted bitmask for bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800 +/* lower bit position of bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0 +/* width of bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11 +/* default value of bitfield desc_rate_limit[a:0] */ +#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0 + +/* tx desc_tc_arb_mode[1:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]". + * port="pif_tps_desc_tc_arb_mode_i[1:0]" + */ + +/* register address for bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200 +/* bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003 +/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc +/* lower bit position of bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0 +/* width of bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2 +/* default value of bitfield desc_tc_arb_mode[1:0] */ +#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0 + +/* tx desc_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000 +/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff +/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16 +/* width of bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12 +/* default value of bitfield desc_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0 + +/* tx desc_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_desc_tc0_weight_i[8:0]" + */ + +/* register address for bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4) +/* bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff +/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00 +/* lower bit position of bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0 +/* width of bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9 +/* default value of bitfield desc_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0 + +/* tx desc_vm_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "desc_vm_arb_mode". + * port="pif_tps_desc_vm_arb_mode_i" + */ + +/* register address for bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300 +/* bitmask for bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001 +/* inverted bitmask for bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe +/* lower bit position of bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0 +/* width of bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1 +/* default value of bitfield desc_vm_arb_mode */ +#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0 + +/* tx data_tc{t}_credit_max[b:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[11:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16 +/* width of bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12 +/* default value of bitfield data_tc{t}_credit_max[b:0] */ +#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0 + +/* tx data_tc{t}_weight[8:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[8:0]" + */ + +/* register address for bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff +/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00 +/* lower bit position of bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0 +/* width of bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9 +/* default value of bitfield data_tc{t}_weight[8:0] */ +#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0 + +/* tx reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_tx_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000 +/* bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29 +/* width of bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1 +/* default value of bitfield reg_res_dsbl */ +#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1 + +/* mac_phy register access busy bitfield definitions + * preprocessor definitions for the bitfield "register access busy". + * port="msm_pif_reg_busy_o" + */ + +/* register address for bitfield register access busy */ +#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400 +/* bitmask for bitfield register access busy */ +#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000 +/* inverted bitmask for bitfield register access busy */ +#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff +/* lower bit position of bitfield register access busy */ +#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12 +/* width of bitfield register access busy */ +#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1 + +/* mac_phy msm register address[7:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register address[7:0]". + * port="pif_msm_reg_addr_i[7:0]" + */ + +/* register address for bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400 +/* bitmask for bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff +/* inverted bitmask for bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00 +/* lower bit position of bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_SHIFT 0 +/* width of bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_WIDTH 8 +/* default value of bitfield msm register address[7:0] */ +#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0 + +/* mac_phy register read strobe bitfield definitions + * preprocessor definitions for the bitfield "register read strobe". + * port="pif_msm_reg_rden_i" + */ + +/* register address for bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400 +/* bitmask for bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200 +/* inverted bitmask for bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff +/* lower bit position of bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9 +/* width of bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1 +/* default value of bitfield register read strobe */ +#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0 + +/* mac_phy msm register read data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register read data[31:0]". + * port="msm_pif_reg_rd_data_o[31:0]" + */ + +/* register address for bitfield msm register read data[31:0] */ +#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408 +/* bitmask for bitfield msm register read data[31:0] */ +#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff +/* inverted bitmask for bitfield msm register read data[31:0] */ +#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000 +/* lower bit position of bitfield msm register read data[31:0] */ +#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0 +/* width of bitfield msm register read data[31:0] */ +#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32 + +/* mac_phy msm register write data[31:0] bitfield definitions + * preprocessor definitions for the bitfield "msm register write data[31:0]". + * port="pif_msm_reg_wr_data_i[31:0]" + */ + +/* register address for bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404 +/* bitmask for bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff +/* inverted bitmask for bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000 +/* lower bit position of bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0 +/* width of bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32 +/* default value of bitfield msm register write data[31:0] */ +#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0 + +/* mac_phy register write strobe bitfield definitions + * preprocessor definitions for the bitfield "register write strobe". + * port="pif_msm_reg_wren_i" + */ + +/* register address for bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400 +/* bitmask for bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100 +/* inverted bitmask for bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff +/* lower bit position of bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8 +/* width of bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1 +/* default value of bitfield register write strobe */ +#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0 + +/* mif soft reset bitfield definitions + * preprocessor definitions for the bitfield "soft reset". + * port="pif_glb_res_i" + */ + +/* register address for bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000 +/* bitmask for bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000 +/* inverted bitmask for bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff +/* lower bit position of bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_SHIFT 15 +/* width of bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_WIDTH 1 +/* default value of bitfield soft reset */ +#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0 + +/* mif register reset disable bitfield definitions + * preprocessor definitions for the bitfield "register reset disable". + * port="pif_glb_reg_res_dsbl_i" + */ + +/* register address for bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000 +/* bitmask for bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000 +/* inverted bitmask for bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff +/* lower bit position of bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14 +/* width of bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1 +/* default value of bitfield register reset disable */ +#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1 + +/* tx dma debug control definitions */ +#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u + +/* tx dma descriptor base address msw definitions */ +#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \ + (0x00007c04u + (descriptor) * 0x40) + +/* tx dma total request limit */ +#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00008980 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4) + +/* pcie reg_res_dsbl bitfield definitions + * preprocessor definitions for the bitfield "reg_res_dsbl". + * port="pif_pci_reg_res_dsbl_i" + */ + +/* register address for bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000 +/* bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000 +/* inverted bitmask for bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff +/* lower bit position of bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29 +/* width of bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1 +/* default value of bitfield reg_res_dsbl */ +#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1 + +/* PCI core control register */ +#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u + +/* global microprocessor scratch pad definitions */ +#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ + (0x00000300u + (scratch_scp) * 0x4) + +/* register address for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404 +/* bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002 +/* inverted bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD +/* lower bit position of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1 +/* width of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1 +/* default value of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 + +#endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c new file mode 100644 index 000000000..84d11ab3a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c @@ -0,0 +1,944 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware + * abstraction layer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../atl_hw_regs.h" + +#include "hw_atl_llh.h" +#include "hw_atl_llh_internal.h" +#include "../atl_logs.h" + +#define HW_ATL_UCP_0X370_REG 0x0370U + +#define HW_ATL_MIF_CMD 0x0200U +#define HW_ATL_MIF_ADDR 0x0208U +#define HW_ATL_MIF_VAL 0x020CU + +#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_MPI_FW_VERSION 0x18 +#define HW_ATL_MPI_CONTROL_ADR 0x0368U +#define HW_ATL_MPI_STATE_ADR 0x036CU + +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U +#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U + +#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704 +#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388 + +#define HW_ATL_MAC_PHY_CONTROL 0x4000 +#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D + +#define HW_ATL_FW_VER_1X 0x01050006U +#define HW_ATL_FW_VER_2X 0x02000000U +#define HW_ATL_FW_VER_3X 0x03000000U + +#define FORCE_FLASHLESS 0 + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + + +int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) +{ + int err = 0; + + err = hw_atl_utils_soft_reset(self); + if (err) + return err; + + hw_atl_utils_hw_chip_features_init(self, + &self->chip_features); + + hw_atl_utils_get_fw_version(self, &self->fw_ver_actual); + + if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_1x_ops; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_2x_ops; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_2x_ops; + } else { + PMD_DRV_LOG(ERR, "Bad FW version detected: %x\n", + self->fw_ver_actual); + return -EOPNOTSUPP; + } + self->aq_fw_ops = *fw_ops; + err = self->aq_fw_ops->init(self); + return err; +} + +static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) +{ + u32 gsr, val; + int k = 0; + + aq_hw_write_reg(self, 0x404, 0x40e1); + AQ_HW_SLEEP(50); + + /* Cleanup SPI */ + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); + + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); + + /* Kickstart MAC */ + aq_hw_write_reg(self, 0x404, 0x80e0); + aq_hw_write_reg(self, 0x32a8, 0x0); + aq_hw_write_reg(self, 0x520, 0x1); + + /* Reset SPI again because of possible interrupted SPI burst */ + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); + AQ_HW_SLEEP(10); + /* Clear SPI reset state */ + aq_hw_write_reg(self, 0x53C, val & ~0x10); + + aq_hw_write_reg(self, 0x404, 0x180e0); + + for (k = 0; k < 1000; k++) { + u32 flb_status = aq_hw_read_reg(self, + HW_ATL_MPI_DAISY_CHAIN_STATUS); + + flb_status = flb_status & 0x10; + if (flb_status) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + PMD_DRV_LOG(ERR, "MAC kickstart failed\n"); + return -EIO; + } + + /* FW reset */ + aq_hw_write_reg(self, 0x404, 0x80e0); + AQ_HW_SLEEP(50); + aq_hw_write_reg(self, 0x3a0, 0x1); + + /* Kickstart PHY - skipped */ + + /* Global software reset*/ + hw_atl_rx_rx_reg_res_dis_set(self, 0U); + hw_atl_tx_tx_reg_res_dis_set(self, 0U); + aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL, + BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT), + HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0); + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); + + for (k = 0; k < 1000; k++) { + u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION); + + if (fw_state) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + PMD_DRV_LOG(ERR, "FW kickstart failed\n"); + return -EIO; + } + /* Old FW requires fixed delay after init */ + AQ_HW_SLEEP(15); + + return 0; +} + +static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) +{ + u32 gsr, val, rbl_status; + int k; + + aq_hw_write_reg(self, 0x404, 0x40e1); + aq_hw_write_reg(self, 0x3a0, 0x1); + aq_hw_write_reg(self, 0x32a8, 0x0); + + /* Alter RBL status */ + aq_hw_write_reg(self, 0x388, 0xDEAD); + + /* Cleanup SPI */ + val = aq_hw_read_reg(self, 0x53C); + aq_hw_write_reg(self, 0x53C, val | 0x10); + + /* Global software reset*/ + hw_atl_rx_rx_reg_res_dis_set(self, 0U); + hw_atl_tx_tx_reg_res_dis_set(self, 0U); + aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL, + BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT), + HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0); + gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); + aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, + (gsr & 0xFFFFBFFF) | 0x8000); + + if (FORCE_FLASHLESS) + aq_hw_write_reg(self, 0x534, 0x0); + + aq_hw_write_reg(self, 0x404, 0x40e0); + + /* Wait for RBL boot */ + for (k = 0; k < 1000; k++) { + rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF; + if (rbl_status && rbl_status != 0xDEAD) + break; + AQ_HW_SLEEP(10); + } + if (!rbl_status || rbl_status == 0xDEAD) { + PMD_DRV_LOG(ERR, "RBL Restart failed"); + return -EIO; + } + + /* Restore NVR */ + if (FORCE_FLASHLESS) + aq_hw_write_reg(self, 0x534, 0xA0); + + if (rbl_status == 0xF1A7) { + PMD_DRV_LOG(ERR, "No FW detected. Dynamic FW load not implemented\n"); + return -EOPNOTSUPP; + } + + for (k = 0; k < 1000; k++) { + u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION); + + if (fw_state) + break; + AQ_HW_SLEEP(10); + } + if (k == 1000) { + PMD_DRV_LOG(ERR, "FW kickstart failed\n"); + return -EIO; + } + /* Old FW requires fixed delay after init */ + AQ_HW_SLEEP(15); + + return 0; +} + +int hw_atl_utils_soft_reset(struct aq_hw_s *self) +{ + int err = 0; + int k; + u32 boot_exit_code = 0; + + for (k = 0; k < 1000; ++k) { + u32 flb_status = aq_hw_read_reg(self, + HW_ATL_MPI_DAISY_CHAIN_STATUS); + boot_exit_code = aq_hw_read_reg(self, + HW_ATL_MPI_BOOT_EXIT_CODE); + if (flb_status != 0x06000000 || boot_exit_code != 0) + break; + } + + if (k == 1000) { + PMD_DRV_LOG(ERR, "Neither RBL nor FLB firmware started\n"); + return -EOPNOTSUPP; + } + + self->rbl_enabled = (boot_exit_code != 0); + + /* FW 1.x may bootup in an invalid POWER state (WOL feature). + * We should work around this by forcing its state back to DEINIT + */ + if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, + aq_hw_read_reg(self, + HW_ATL_MPI_FW_VERSION))) { + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & + HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, + 10, 1000U); + } + + if (self->rbl_enabled) + err = hw_atl_utils_soft_reset_rbl(self); + else + err = hw_atl_utils_soft_reset_flb(self); + + return err; +} + +int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt) +{ + int err = 0; + + AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, + HW_ATL_FW_SM_RAM) == 1U, + 1U, 10000U); + + if (err < 0) { + bool is_locked; + + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIMEDOUT; + goto err_exit; + } + } + + aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a); + + for (++cnt; --cnt && !err;) { + aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U); + + if (IS_CHIP_FEATURE(REVISION_B1)) + AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self, + HW_ATL_MIF_ADDR), + 1, 1000U); + else + AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self, + HW_ATL_MIF_CMD)), + 1, 1000U); + if (err) { + err = -ETIMEDOUT; + goto err_exit; + } + + *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL); + a += 4; + } + + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt) +{ + int err = 0; + bool is_locked; + + is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + if (!is_locked) { + err = -ETIMEDOUT; + goto err_exit; + } + if (IS_CHIP_FEATURE(REVISION_B1)) { + u32 mbox_offset = (a - self->rpc_addr) / sizeof(u32); + u32 data_offset = 0; + + for (; data_offset < cnt; ++mbox_offset, ++data_offset) { + aq_hw_write_reg(self, 0x328, p[data_offset]); + aq_hw_write_reg(self, 0x32C, + (0x80000000 | (0xFFFF & (mbox_offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, + 0x32C) & 0xF0000000) != 0x80000000, + 10, 1000); + } + } else { + u32 offset = 0; + + aq_hw_write_reg(self, 0x208, a); + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) + & 0x100) == 0, 10, 1000); + } + } + + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + +err_exit: + return err; +} + +static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +{ + int err = 0; + const u32 dw_major_mask = 0xff000000U; + const u32 dw_minor_mask = 0x00ffffffU; + + err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; + if (err < 0) + goto err_exit; + err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + -EOPNOTSUPP : 0; +err_exit: + return err; +} + +static int hw_atl_utils_init_ucp(struct aq_hw_s *self) +{ + int err = 0; + + if (!aq_hw_read_reg(self, 0x370U)) { + unsigned int rnd = (uint32_t)rte_rand(); + unsigned int ucp_0x370 = 0U; + + ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (self->mbox_addr = + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, 0x334U)), 1000U, 100U); + + return err; +} + +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + +struct aq_hw_atl_utils_fw_rpc_tid_s { + union { + u32 val; + struct { + u16 tid; + u16 len; + }; + }; +}; + +#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) + +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + + if (!IS_CHIP_FEATURE(MIPS)) { + err = -1; + goto err_exit; + } + err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, + (u32 *)(void *)&self->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); + if (err < 0) + goto err_exit; + + sw.tid = 0xFFFFU & (++self->rpc_tid); + sw.len = (u16)rpc_size; + aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); + +err_exit: + return err; +} + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) +{ + int err = 0; + struct aq_hw_atl_utils_fw_rpc_tid_s sw; + struct aq_hw_atl_utils_fw_rpc_tid_s fw; + + do { + sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); + + self->rpc_tid = sw.tid; + + AQ_HW_WAIT_FOR(sw.tid == + (fw.val = + aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), + fw.tid), 1000U, 100U); + if (err < 0) + goto err_exit; + + if (fw.len == 0xFFFFU) { + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; + } + } while (sw.tid != fw.tid || 0xFFFFU == fw.len); + + if (rpc) { + if (fw.len) { + err = + hw_atl_utils_fw_downld_dwords(self, + self->rpc_addr, + (u32 *)(void *) + &self->rpc, + (fw.len + sizeof(u32) - + sizeof(u8)) / + sizeof(u32)); + if (err < 0) + goto err_exit; + } + + *rpc = &self->rpc; + } + +err_exit: + return err; +} + +static int hw_atl_utils_mpi_create(struct aq_hw_s *self) +{ + int err = 0; + + err = hw_atl_utils_init_ucp(self); + if (err < 0) + goto err_exit; + + err = hw_atl_utils_fw_rpc_init(self); + if (err < 0) + goto err_exit; + +err_exit: + return err; +} + +int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox_header *pmbox) +{ + return hw_atl_utils_fw_downld_dwords(self, + self->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); +} + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox) +{ + int err = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + self->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); + if (err < 0) + goto err_exit; + + if (IS_CHIP_FEATURE(REVISION_A0)) { + unsigned int mtu = 1514; + pmbox->stats.ubrc = pmbox->stats.uprc * mtu; + pmbox->stats.ubtc = pmbox->stats.uptc * mtu; + } else { + pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + } + +err_exit:; +} + +static +int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) +{ + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + + val = val & ~HW_ATL_MPI_SPEED_MSK; + val |= speed << HW_ATL_MPI_SPEED_SHIFT; + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); + + return 0; +} + +int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) +{ + int err = 0; + u32 transaction_id = 0; + struct hw_aq_atl_utils_mbox_header mbox; + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + + if (state == MPI_RESET) { + hw_atl_utils_mpi_read_mbox(self, &mbox); + + transaction_id = mbox.transaction_id; + + AQ_HW_WAIT_FOR(transaction_id != + (hw_atl_utils_mpi_read_mbox(self, &mbox), + mbox.transaction_id), + 1000U, 100U); + if (err < 0) + goto err_exit; + } + /* On interface DEINIT we disable DW (raise bit) + * Otherwise enable DW (clear bit) + */ + if (state == MPI_DEINIT || state == MPI_POWER) + val |= HW_ATL_MPI_DIRTY_WAKE_MSK; + else + val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK; + + /* Set new state bits */ + val = val & ~HW_ATL_MPI_STATE_MSK; + val |= state & HW_ATL_MPI_STATE_MSK; + + aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); +err_exit: + return err; +} + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) +{ + u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + struct aq_hw_link_status_s *link_status = &self->aq_link_status; + + if (!link_speed_mask) { + link_status->mbps = 0U; + } else { + switch (link_speed_mask) { + case HAL_ATLANTIC_RATE_10G: + link_status->mbps = 10000U; + break; + + case HAL_ATLANTIC_RATE_5G: + case HAL_ATLANTIC_RATE_5GSR: + link_status->mbps = 5000U; + break; + + case HAL_ATLANTIC_RATE_2GS: + link_status->mbps = 2500U; + break; + + case HAL_ATLANTIC_RATE_1G: + link_status->mbps = 1000U; + break; + + case HAL_ATLANTIC_RATE_100M: + link_status->mbps = 100U; + break; + + default: + return -EBUSY; + } + } + + return 0; +} + +static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, + u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2]; + + if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { + unsigned int rnd = (uint32_t)rte_rand(); + unsigned int ucp_0x370 = 0; + + //get_random_bytes(&rnd, sizeof(unsigned int)); + + ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd); + aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); + } + + err = hw_atl_utils_fw_downld_dwords(self, + aq_hw_read_reg(self, 0x00000374U) + + (40U * 4U), + mac_addr, + ARRAY_SIZE(mac_addr)); + if (err < 0) { + mac_addr[0] = 0U; + mac_addr[1] = 0U; + err = 0; + } else { + mac_addr[0] = rte_constant_bswap32(mac_addr[0]); + mac_addr[1] = rte_constant_bswap32(mac_addr[1]); + } + + rte_ether_addr_copy((struct rte_ether_addr *)mac_addr, + (struct rte_ether_addr *)mac); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + /* chip revision */ + l = 0xE3000000U + | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + + return err; +} + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) +{ + unsigned int ret = 0U; + + switch (mbps) { + case 100U: + ret = 5U; + break; + + case 1000U: + ret = 4U; + break; + + case 2500U: + ret = 3U; + break; + + case 5000U: + ret = 1U; + break; + + case 10000U: + ret = 0U; + break; + + default: + break; + } + return ret; +} + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) +{ + u32 chip_features = 0U; + u32 val = hw_atl_reg_glb_mif_id_get(self); + u32 mif_rev = val & 0xFFU; + + if ((0xFU & mif_rev) == 1U) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS; + } else if ((0xFU & mif_rev) == 2U) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } else if ((0xFU & mif_rev) == 0xAU) { + chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 | + HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | + HAL_ATLANTIC_UTILS_CHIP_MIPS | + HAL_ATLANTIC_UTILS_CHIP_TPO2 | + HAL_ATLANTIC_UTILS_CHIP_RPF2; + } + + *p = chip_features; +} + +static int hw_atl_fw1x_deinit(struct aq_hw_s *self) +{ + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); + return 0; +} + +int hw_atl_utils_update_stats(struct aq_hw_s *self) +{ + struct hw_aq_atl_utils_mbox mbox; + + hw_atl_utils_mpi_read_stats(self, &mbox); + +#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \ + mbox.stats._N_ - self->last_stats._N_) + + if (1) {//self->aq_link_status.mbps) { + AQ_SDELTA(uprc); + AQ_SDELTA(mprc); + AQ_SDELTA(bprc); + AQ_SDELTA(erpt); + + AQ_SDELTA(uptc); + AQ_SDELTA(mptc); + AQ_SDELTA(bptc); + AQ_SDELTA(erpr); + AQ_SDELTA(ubrc); + AQ_SDELTA(ubtc); + AQ_SDELTA(mbrc); + AQ_SDELTA(mbtc); + AQ_SDELTA(bbrc); + AQ_SDELTA(bbtc); + AQ_SDELTA(dpc); + } +#undef AQ_SDELTA + self->curr_stats.dma_pkt_rc = + hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self) + + ((u64)hw_atl_stats_rx_dma_good_pkt_countermsw_get(self) << 32); + self->curr_stats.dma_pkt_tc = + hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self) + + ((u64)hw_atl_stats_tx_dma_good_pkt_countermsw_get(self) << 32); + self->curr_stats.dma_oct_rc = + hw_atl_stats_rx_dma_good_octet_counterlsw_get(self) + + ((u64)hw_atl_stats_rx_dma_good_octet_countermsw_get(self) << 32); + self->curr_stats.dma_oct_tc = + hw_atl_stats_tx_dma_good_octet_counterlsw_get(self) + + ((u64)hw_atl_stats_tx_dma_good_octet_countermsw_get(self) << 32); + + self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + + memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats)); + + return 0; +} + +struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self) +{ + return &self->curr_stats; +} + +static const u32 hw_atl_utils_hw_mac_regs[] = { + 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U, + 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U, + 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U, + 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U, + 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U, + 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U, + 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U, + 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U, + 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U, + 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U, + 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U, + 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U, + 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U, + 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U, + 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U, + 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U, + 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU, + 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU, + 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U, + 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U, + 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U, + 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U, +}; + +unsigned int hw_atl_utils_hw_get_reg_length(void) +{ + return ARRAY_SIZE(hw_atl_utils_hw_mac_regs); +} + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + u32 *regs_buff) +{ + unsigned int i = 0U; + unsigned int mac_regs_count = hw_atl_utils_hw_get_reg_length(); + + for (i = 0; i < mac_regs_count; i++) + regs_buff[i] = aq_hw_read_reg(self, + hw_atl_utils_hw_mac_regs[i]); + return 0; +} + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) +{ + *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; +} + +static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) +{ + struct hw_aq_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + if (err < 0) + goto err_exit; + + memset(prpc, 0, sizeof(*prpc)); + + if (wol_enabled) { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD; + prpc->msg_wol.priority = 0x10000000; /* normal priority */ + prpc->msg_wol.pattern_id = 1U; + prpc->msg_wol.wol_packet_type = 2U; /* Magic Packet */ + + rte_ether_addr_copy((struct rte_ether_addr *)mac, + (struct rte_ether_addr *)&prpc->msg_wol.wol_pattern); + } else { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL; + prpc->msg_wol.pattern_id = 1U; + } + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + +err_exit: + return err; +} + +static +int aq_fw1x_set_power(struct aq_hw_s *self, + unsigned int power_state __rte_unused, + u8 *mac) +{ + struct hw_aq_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw1x_set_wol(self, 1, mac); + + if (err < 0) + goto err_exit; + + rpc_size = sizeof(prpc->msg_id) + + sizeof(prpc->msg_enable_wakeup); + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + + if (err < 0) + goto err_exit; + + memset(prpc, 0, rpc_size); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP; + prpc->msg_enable_wakeup.pattern_mask = 0x00000002; + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + } + + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); +err_exit: + return err; +} + + + +const struct aq_fw_ops aq_fw_1x_ops = { + .init = hw_atl_utils_mpi_create, + .deinit = hw_atl_fw1x_deinit, + .reset = NULL, + .get_mac_permanent = hw_atl_utils_get_mac_permanent, + .set_link_speed = hw_atl_utils_mpi_set_speed, + .set_state = hw_atl_utils_mpi_set_state, + .update_link_status = hw_atl_utils_mpi_get_link_status, + .update_stats = hw_atl_utils_update_stats, + .set_power = aq_fw1x_set_power, + .get_temp = NULL, + .get_cable_len = NULL, + .set_eee_rate = NULL, + .get_eee_rate = NULL, + .set_flow_control = NULL, + .led_control = NULL, + .get_eeprom = NULL, + .set_eeprom = NULL, +}; diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.h new file mode 100644 index 000000000..d8fab010c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.h @@ -0,0 +1,654 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware + * abstraction layer. + */ + +#ifndef HW_ATL_UTILS_H +#define HW_ATL_UTILS_H + +#define BIT(x) (1UL << (x)) +#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); } + +/* Hardware tx descriptor */ +struct hw_atl_txd_s { + u64 buf_addr; + + union { + struct { + u32 type:3; + u32:1; + u32 len:16; + u32 dd:1; + u32 eop:1; + u32 cmd:8; + u32:14; + u32 ct_idx:1; + u32 ct_en:1; + u32 pay_len:18; + } __rte_packed; + u64 flags; + }; +} __rte_packed; + +/* Hardware tx context descriptor */ +union hw_atl_txc_s { + struct { + u64 flags1; + u64 flags2; + }; + + struct { + u64:40; + u32 tun_len:8; + u32 out_len:16; + u32 type:3; + u32 idx:1; + u32 vlan_tag:16; + u32 cmd:4; + u32 l2_len:7; + u32 l3_len:9; + u32 l4_len:8; + u32 mss_len:16; + } __rte_packed; +} __rte_packed; + +enum aq_tx_desc_type { + tx_desc_type_desc = 1, + tx_desc_type_ctx = 2, +}; + +enum aq_tx_desc_cmd { + tx_desc_cmd_vlan = 1, + tx_desc_cmd_fcs = 2, + tx_desc_cmd_ipv4 = 4, + tx_desc_cmd_l4cs = 8, + tx_desc_cmd_lso = 0x10, + tx_desc_cmd_wb = 0x20, +}; + + +/* Hardware rx descriptor */ +struct hw_atl_rxd_s { + u64 buf_addr; + u64 hdr_addr; +} __rte_packed; + +/* Hardware rx descriptor writeback */ +struct hw_atl_rxd_wb_s { + u32 rss_type:4; + u32 pkt_type:8; + u32 type:20; + u32 rss_hash; + u16 dd:1; + u16 eop:1; + u16 rx_stat:4; + u16 rx_estat:6; + u16 rsc_cnt:4; + u16 pkt_len; + u16 next_desc_ptr; + u16 vlan; +} __rte_packed; + +struct hw_atl_stats_s { + u32 uprc; + u32 mprc; + u32 bprc; + u32 erpt; + u32 uptc; + u32 mptc; + u32 bptc; + u32 erpr; + u32 mbtc; + u32 bbtc; + u32 mbrc; + u32 bbrc; + u32 ubrc; + u32 ubtc; + u32 dpc; +} __rte_packed; + +union ip_addr { + struct { + u8 addr[16]; + } v6; + struct { + u8 padding[12]; + u8 addr[4]; + } v4; +} __rte_packed; + +struct hw_aq_atl_utils_fw_rpc { + u32 msg_id; + + union { + struct { + u32 pong; + } msg_ping; + + struct { + u8 mac_addr[6]; + u32 ip_addr_cnt; + + struct { + union ip_addr addr; + union ip_addr mask; + } ip[1]; + } msg_arp; + + struct { + u32 len; + u8 packet[1514U]; + } msg_inject; + + struct { + u32 priority; + u32 wol_packet_type; + u32 pattern_id; + u32 next_wol_pattern_offset; + union { + struct { + u32 flags; + u8 ipv4_source_address[4]; + u8 ipv4_dest_address[4]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv4_tcp_syn_parameters; + + struct { + u32 flags; + u8 ipv6_source_address[16]; + u8 ipv6_dest_address[16]; + u16 tcp_source_port_number; + u16 tcp_dest_port_number; + } ipv6_tcp_syn_parameters; + + struct { + u32 flags; + } eapol_request_id_message_parameters; + + struct { + u32 flags; + u32 mask_offset; + u32 mask_size; + u32 pattern_offset; + u32 pattern_size; + } wol_bit_map_pattern; + struct { + u8 mac_addr[6]; + } wol_magic_packet_pattern; + + } wol_pattern; + } msg_wol; + + struct { + u16 tc_quanta[8]; + u16 tc_threshold[8]; + } msg_msm_pfc_quantas; + + struct { + union { + u32 pattern_mask; + struct { + u32 aq_pm_wol_reason_arp_v4_pkt : 1; + u32 aq_pm_wol_reason_ipv4_ping_pkt : 1; + u32 aq_pm_wol_reason_ipv6_ns_pkt : 1; + u32 aq_pm_wol_reason_ipv6_ping_pkt : 1; + u32 aq_pm_wol_reason_link_up : 1; + u32 aq_pm_wol_reason_link_down : 1; + u32 aq_pm_wol_reason_maximum : 1; + }; + }; + union { + u32 offload_mask; + }; + } msg_enable_wakeup; + + struct { + u32 priority; + u32 protocol_offload_type; + u32 protocol_offload_id; + u32 next_protocol_offload_offset; + + union { + struct { + u32 flags; + u8 remote_ipv4_addr[4]; + u8 host_ipv4_addr[4]; + u8 mac_addr[6]; + } ipv4_arp_params; + }; + } msg_offload; + + struct { + u32 id; + } msg_del_id; + + }; +} __rte_packed; + +struct hw_aq_atl_utils_mbox_header { + u32 version; + u32 transaction_id; + u32 error; +} __rte_packed; + +struct hw_aq_info { + u8 reserved[6]; + u16 phy_fault_code; + u16 phy_temperature; + u8 cable_len; + u8 reserved1; + u32 cable_diag_data[4]; + u8 reserved2[32]; + u32 caps_lo; + u32 caps_hi; +} __rte_packed; + +struct hw_aq_atl_utils_mbox { + struct hw_aq_atl_utils_mbox_header header; + struct hw_atl_stats_s stats; + struct hw_aq_info info; +} __rte_packed; + +/* fw2x */ +typedef u16 in_port_t; +typedef u32 ip4_addr_t; +typedef int int32_t; +typedef short int16_t; +typedef u32 fw_offset_t; + +struct ip6_addr { + u32 addr[4]; +} __rte_packed; + +struct offload_ka_v4 { + u32 timeout; + in_port_t local_port; + in_port_t remote_port; + u8 remote_mac_addr[6]; + u16 win_size; + u32 seq_num; + u32 ack_num; + ip4_addr_t local_ip; + ip4_addr_t remote_ip; +} __rte_packed; + +struct offload_ka_v6 { + u32 timeout; + in_port_t local_port; + in_port_t remote_port; + u8 remote_mac_addr[6]; + u16 win_size; + u32 seq_num; + u32 ack_num; + struct ip6_addr local_ip; + struct ip6_addr remote_ip; +} __rte_packed; + +struct offload_ip_info { + u8 v4_local_addr_count; + u8 v4_addr_count; + u8 v6_local_addr_count; + u8 v6_addr_count; + fw_offset_t v4_addr; + fw_offset_t v4_prefix; + fw_offset_t v6_addr; + fw_offset_t v6_prefix; +} __rte_packed; + +struct offload_port_info { + u16 udp_port_count; + u16 tcp_port_count; + fw_offset_t udp_port; + fw_offset_t tcp_port; +} __rte_packed; + +struct offload_ka_info { + u16 v4_ka_count; + u16 v6_ka_count; + u32 retry_count; + u32 retry_interval; + fw_offset_t v4_ka; + fw_offset_t v6_ka; +} __rte_packed; + +struct offload_rr_info { + u32 rr_count; + u32 rr_buf_len; + fw_offset_t rr_id_x; + fw_offset_t rr_buf; +} __rte_packed; + +struct offload_info { + u32 version; // current version is 0x00000000 + u32 len; // The whole structure length + // including the variable-size buf + u8 mac_addr[6]; // 8 bytes to keep alignment. Only + // first 6 meaningful. + + u8 reserved[2]; + + struct offload_ip_info ips; + struct offload_port_info ports; + struct offload_ka_info kas; + struct offload_rr_info rrs; + u8 buf[0]; +} __rte_packed; + +struct smbus_request { + u32 msg_id; /* not used */ + u32 device_id; + u32 address; + u32 length; +} __rte_packed; + +enum macsec_msg_type { + macsec_cfg_msg = 0, + macsec_add_rx_sc_msg, + macsec_add_tx_sc_msg, + macsec_add_rx_sa_msg, + macsec_add_tx_sa_msg, + macsec_get_stats_msg, +}; + +struct macsec_cfg { + uint32_t enabled; + uint32_t egress_threshold; + uint32_t ingress_threshold; + uint32_t interrupts_enabled; +} __rte_packed; + +struct add_rx_sc { + uint32_t index; + uint32_t pi; /* Port identifier */ + uint32_t sci[2]; /* Secure Channel identifier */ + uint32_t sci_mask; /* 1: enable comparison of SCI, 0: don't care */ + uint32_t tci; + uint32_t tci_mask; + uint32_t mac_sa[2]; + uint32_t sa_mask; /* 0: ignore mac_sa */ + uint32_t mac_da[2]; + uint32_t da_mask; /* 0: ignore mac_da */ + uint32_t validate_frames; /* 0: strict, 1:check, 2:disabled */ + uint32_t replay_protect; /* 1: enabled, 0:disabled */ + uint32_t anti_replay_window; /* default 0 */ + /* 1: auto_rollover enabled (when SA next_pn is saturated */ + uint32_t an_rol; +} __rte_packed; + +struct add_tx_sc { + uint32_t index; + uint32_t pi; /* Port identifier */ + uint32_t sci[2]; /* Secure Channel identifier */ + uint32_t sci_mask; /* 1: enable comparison of SCI, 0: don't care */ + uint32_t tci; /* TCI value, used if packet is not explicitly tagged */ + uint32_t tci_mask; + uint32_t mac_sa[2]; + uint32_t sa_mask; /* 0: ignore mac_sa */ + uint32_t mac_da[2]; + uint32_t da_mask; /* 0: ignore mac_da */ + uint32_t protect; + uint32_t curr_an; /* SA index which currently used */ +} __rte_packed; + +struct add_rx_sa { + uint32_t index; + uint32_t next_pn; + uint32_t key[4]; /* 128 bit key */ +} __rte_packed; + +struct add_tx_sa { + uint32_t index; + uint32_t next_pn; + uint32_t key[4]; /* 128 bit key */ +} __rte_packed; + +struct get_stats { + uint32_t version_only; + uint32_t ingress_sa_index; + uint32_t egress_sa_index; + uint32_t egress_sc_index; +} __rte_packed; + +struct macsec_stats { + uint32_t api_version; + /* Ingress Common Counters */ + uint64_t in_ctl_pkts; + uint64_t in_tagged_miss_pkts; + uint64_t in_untagged_miss_pkts; + uint64_t in_notag_pkts; + uint64_t in_untagged_pkts; + uint64_t in_bad_tag_pkts; + uint64_t in_no_sci_pkts; + uint64_t in_unknown_sci_pkts; + uint64_t in_ctrl_prt_pass_pkts; + uint64_t in_unctrl_prt_pass_pkts; + uint64_t in_ctrl_prt_fail_pkts; + uint64_t in_unctrl_prt_fail_pkts; + uint64_t in_too_long_pkts; + uint64_t in_igpoc_ctl_pkts; + uint64_t in_ecc_error_pkts; + uint64_t in_unctrl_hit_drop_redir; + + /* Egress Common Counters */ + uint64_t out_ctl_pkts; + uint64_t out_unknown_sa_pkts; + uint64_t out_untagged_pkts; + uint64_t out_too_long; + uint64_t out_ecc_error_pkts; + uint64_t out_unctrl_hit_drop_redir; + + /* Ingress SA Counters */ + uint64_t in_untagged_hit_pkts; + uint64_t in_ctrl_hit_drop_redir_pkts; + uint64_t in_not_using_sa; + uint64_t in_unused_sa; + uint64_t in_not_valid_pkts; + uint64_t in_invalid_pkts; + uint64_t in_ok_pkts; + uint64_t in_late_pkts; + uint64_t in_delayed_pkts; + uint64_t in_unchecked_pkts; + uint64_t in_validated_octets; + uint64_t in_decrypted_octets; + + /* Egress SA Counters */ + uint64_t out_sa_hit_drop_redirect; + uint64_t out_sa_protected2_pkts; + uint64_t out_sa_protected_pkts; + uint64_t out_sa_encrypted_pkts; + + /* Egress SC Counters */ + uint64_t out_sc_protected_pkts; + uint64_t out_sc_encrypted_pkts; + uint64_t out_sc_protected_octets; + uint64_t out_sc_encrypted_octets; + + /* SA Counters expiration info */ + uint32_t egress_threshold_expired; + uint32_t ingress_threshold_expired; + uint32_t egress_expired; + uint32_t ingress_expired; +} __rte_packed; + +struct macsec_msg_fw_request { + uint32_t offset; /* not used */ + uint32_t msg_type; + + union { + struct macsec_cfg cfg; + struct add_rx_sc rxsc; + struct add_tx_sc txsc; + struct add_rx_sa rxsa; + struct add_tx_sa txsa; + struct get_stats stats; + }; +} __rte_packed; + +struct macsec_msg_fw_response { + uint32_t result; + struct macsec_stats stats; +} __rte_packed; + +#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U +#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U +#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U +#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U +#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U + + +#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ + self->chip_features) + +enum hal_atl_utils_fw_state_e { + MPI_DEINIT = 0, + MPI_RESET = 1, + MPI_INIT = 2, + MPI_POWER = 4, +}; + +#define HAL_ATLANTIC_RATE_10G BIT(0) +#define HAL_ATLANTIC_RATE_5G BIT(1) +#define HAL_ATLANTIC_RATE_5GSR BIT(2) +#define HAL_ATLANTIC_RATE_2GS BIT(3) +#define HAL_ATLANTIC_RATE_1G BIT(4) +#define HAL_ATLANTIC_RATE_100M BIT(5) +#define HAL_ATLANTIC_RATE_INVALID BIT(6) + +#define HAL_ATLANTIC_UTILS_FW_MSG_PING 1U +#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 2U +#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 3U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 4U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 5U +#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 6U +#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 7U +#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 8U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 9U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 10U +#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 13U // 0xd + +#define SMBUS_DEVICE_ID 0x50 + +enum hw_atl_fw2x_caps_lo { + CAPS_LO_10BASET_HD = 0x00, + CAPS_LO_10BASET_FD, + CAPS_LO_100BASETX_HD, + CAPS_LO_100BASET4_HD, + CAPS_LO_100BASET2_HD, + CAPS_LO_100BASETX_FD, + CAPS_LO_100BASET2_FD, + CAPS_LO_1000BASET_HD, + CAPS_LO_1000BASET_FD, + CAPS_LO_2P5GBASET_FD, + CAPS_LO_5GBASET_FD, + CAPS_LO_10GBASET_FD, + CAPS_LO_AUTONEG, + CAPS_LO_SMBUS_READ, + CAPS_LO_SMBUS_WRITE, + CAPS_LO_MACSEC +}; + +enum hw_atl_fw2x_caps_hi { + CAPS_HI_RESERVED1 = 0x00, + CAPS_HI_10BASET_EEE, + CAPS_HI_RESERVED2, + CAPS_HI_PAUSE, + CAPS_HI_ASYMMETRIC_PAUSE, + CAPS_HI_100BASETX_EEE, + CAPS_HI_RESERVED3, + CAPS_HI_RESERVED4, + CAPS_HI_1000BASET_FD_EEE, + CAPS_HI_2P5GBASET_FD_EEE, + CAPS_HI_5GBASET_FD_EEE, + CAPS_HI_10GBASET_FD_EEE, + CAPS_HI_RESERVED5, + CAPS_HI_RESERVED6, + CAPS_HI_RESERVED7, + CAPS_HI_RESERVED8, + CAPS_HI_RESERVED9, + CAPS_HI_CABLE_DIAG, + CAPS_HI_TEMPERATURE, + CAPS_HI_DOWNSHIFT, + CAPS_HI_PTP_AVB_EN, + CAPS_HI_MEDIA_DETECT, + CAPS_HI_LINK_DROP, + CAPS_HI_SLEEP_PROXY, + CAPS_HI_WOL, + CAPS_HI_MAC_STOP, + CAPS_HI_EXT_LOOPBACK, + CAPS_HI_INT_LOOPBACK, + CAPS_HI_EFUSE_AGENT, + CAPS_HI_WOL_TIMER, + CAPS_HI_STATISTICS, + CAPS_HI_TRANSACTION_ID, +}; + +enum hw_atl_fw2x_rate { + FW2X_RATE_100M = BIT(CAPS_LO_100BASETX_FD), + FW2X_RATE_1G = BIT(CAPS_LO_1000BASET_FD), + FW2X_RATE_2G5 = BIT(CAPS_LO_2P5GBASET_FD), + FW2X_RATE_5G = BIT(CAPS_LO_5GBASET_FD), + FW2X_RATE_10G = BIT(CAPS_LO_10GBASET_FD), +}; + +struct aq_hw_s; +struct aq_fw_ops; +struct aq_hw_link_status_s; + +int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); + +int hw_atl_utils_soft_reset(struct aq_hw_s *self); + +void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); + +int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox_header *pmbox); + +void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, + struct hw_aq_atl_utils_mbox *pmbox); + +void hw_atl_utils_mpi_set(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state, + u32 speed); + +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self); + +unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps); + +unsigned int hw_atl_utils_hw_get_reg_length(void); + +int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, + u32 *regs_buff); + +int hw_atl_utils_hw_set_power(struct aq_hw_s *self, + unsigned int power_state); + +int hw_atl_utils_hw_deinit(struct aq_hw_s *self); + +int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); + +int hw_atl_utils_update_stats(struct aq_hw_s *self); + +struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); + +int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, + u32 *p, u32 cnt); + +int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, + u32 cnt); + +int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac); + +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc); + +extern const struct aq_fw_ops aq_fw_1x_ops; +extern const struct aq_fw_ops aq_fw_2x_ops; + +#endif /* HW_ATL_UTILS_H */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c new file mode 100644 index 000000000..3a7faf405 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for + * Atlantic hardware abstraction layer. + */ + +#include +#include +#include "../atl_hw_regs.h" + +#include "../atl_types.h" +#include "hw_atl_utils.h" +#include "hw_atl_llh.h" + +#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 +#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 + +#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 +#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C +#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c + +#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 +#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 + +#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) +#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) + +#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE) + +#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8 +#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E + +#define HW_ATL_FW_FEATURE_LED 0x03010026 + +struct fw2x_msg_wol_pattern { + u8 mask[16]; + u32 crc; +} __rte_packed; + +struct fw2x_msg_wol { + u32 msg_id; + u8 hw_addr[6]; + u8 magic_packet_enabled; + u8 filter_count; + struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT]; + u8 link_up_enabled; + u8 link_down_enabled; + u16 reserved; + u32 link_up_timeout; + u32 link_down_timeout; +} __rte_packed; + +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + +static int aq_fw2x_init(struct aq_hw_s *self) +{ + int err = 0; + struct hw_aq_atl_utils_mbox mbox; + + /* check 10 times by 1ms */ + AQ_HW_WAIT_FOR(0U != (self->mbox_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), + 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), + 1000U, 100U); + + /* Read caps */ + hw_atl_utils_mpi_read_stats(self, &mbox); + + self->caps_lo = mbox.info.caps_lo; + + return err; +} + +static int aq_fw2x_deinit(struct aq_hw_s *self) +{ + int err = aq_fw2x_set_link_speed(self, 0); + + if (!err) + err = aq_fw2x_set_state(self, MPI_DEINIT); + + return err; +} + +static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) +{ + enum hw_atl_fw2x_rate rate = 0; + + if (speed & AQ_NIC_RATE_10G) + rate |= FW2X_RATE_10G; + + if (speed & AQ_NIC_RATE_5G) + rate |= FW2X_RATE_5G; + + if (speed & AQ_NIC_RATE_5G5R) + rate |= FW2X_RATE_5G; + + if (speed & AQ_NIC_RATE_2G5) + rate |= FW2X_RATE_2G5; + + if (speed & AQ_NIC_RATE_1G) + rate |= FW2X_RATE_1G; + + if (speed & AQ_NIC_RATE_100M) + rate |= FW2X_RATE_100M; + + return rate; +} + +static u32 fw2x_to_eee_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK) + rate |= AQ_NIC_RATE_EEE_10G; + + if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK) + rate |= AQ_NIC_RATE_EEE_5G; + + if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK) + rate |= AQ_NIC_RATE_EEE_2G5; + + if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK) + rate |= AQ_NIC_RATE_EEE_1G; + + return rate; +} + +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) +{ + u32 rate_mask = link_speed_mask_2fw2x_ratemask(speed); + u32 reg_val = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR); + u32 val = rate_mask | ((BIT(CAPS_LO_SMBUS_READ) | + BIT(CAPS_LO_SMBUS_WRITE) | + BIT(CAPS_LO_MACSEC)) & reg_val); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val); + + return 0; +} + +static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) +{ + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + *mpi_state |= BIT(CAPS_HI_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_PAUSE); + + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); +} + +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + switch (state) { + case MPI_INIT: + mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + break; + case MPI_DEINIT: + mpi_state |= BIT(CAPS_HI_LINK_DROP); + break; + case MPI_RESET: + case MPI_POWER: + /* No actions */ + break; + } + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + return 0; +} + +static int aq_fw2x_update_link_status(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); + u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | + FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); + struct aq_hw_link_status_s *link_status = &self->aq_link_status; + + if (speed) { + if (speed & FW2X_RATE_10G) + link_status->mbps = 10000; + else if (speed & FW2X_RATE_5G) + link_status->mbps = 5000; + else if (speed & FW2X_RATE_2G5) + link_status->mbps = 2500; + else if (speed & FW2X_RATE_1G) + link_status->mbps = 1000; + else if (speed & FW2X_RATE_100M) + link_status->mbps = 100; + else + link_status->mbps = 10000; + } else { + link_status->mbps = 0; + } + + return 0; +} + +static +int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +{ + int err = 0; + u32 h = 0U; + u32 l = 0U; + u32 mac_addr[2] = { 0 }; + u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR); + + pthread_mutex_lock(&self->mbox_mutex); + + if (efuse_addr != 0) { + err = hw_atl_utils_fw_downld_dwords(self, + efuse_addr + (40U * 4U), + mac_addr, + ARRAY_SIZE(mac_addr)); + if (err) + goto exit; + mac_addr[0] = rte_constant_bswap32(mac_addr[0]); + mac_addr[1] = rte_constant_bswap32(mac_addr[1]); + } + + rte_ether_addr_copy((struct rte_ether_addr *)mac_addr, + (struct rte_ether_addr *)mac); + + if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { + unsigned int rnd = (uint32_t)rte_rand(); + + //get_random_bytes(&rnd, sizeof(unsigned int)); + + l = 0xE3000000U + | (0xFFFFU & rnd) + | (0x00 << 16); + h = 0x8001300EU; + + mac[5] = (u8)(0xFFU & l); + l >>= 8; + mac[4] = (u8)(0xFFU & l); + l >>= 8; + mac[3] = (u8)(0xFFU & l); + l >>= 8; + mac[2] = (u8)(0xFFU & l); + mac[1] = (u8)(0xFFU & h); + h >>= 8; + mac[0] = (u8)(0xFFU & h); + } + +exit: + pthread_mutex_unlock(&self->mbox_mutex); + + return err; +} + +static int aq_fw2x_update_stats(struct aq_hw_s *self) +{ + int err = 0; + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS); + + + pthread_mutex_lock(&self->mbox_mutex); + + /* Toggle statistics bit for FW to update */ + mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS); + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + /* Wait FW to report back */ + AQ_HW_WAIT_FOR(orig_stats_val != + (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + BIT(CAPS_HI_STATISTICS)), + 1U, 10000U); + if (err) + goto exit; + + err = hw_atl_utils_update_stats(self); + +exit: + pthread_mutex_unlock(&self->mbox_mutex); + + return err; + +} + +static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp) +{ + int err = 0; + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE); + u32 temp_res; + + pthread_mutex_lock(&self->mbox_mutex); + + /* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */ + mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE); + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + /* Wait FW to report back */ + AQ_HW_WAIT_FOR(temp_val != + (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + BIT(CAPS_HI_TEMPERATURE)), 1U, 10000U); + err = hw_atl_utils_fw_downld_dwords(self, + self->mbox_addr + + offsetof(struct hw_aq_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, phy_temperature), + &temp_res, + sizeof(temp_res) / sizeof(u32)); + + + pthread_mutex_unlock(&self->mbox_mutex); + + if (err) + return err; + + *temp = temp_res * 100 / 256; + return 0; +} + +static int aq_fw2x_get_cable_len(struct aq_hw_s *self, int *cable_len) +{ + int err = 0; + u32 cable_len_res; + + err = hw_atl_utils_fw_downld_dwords(self, + self->mbox_addr + + offsetof(struct hw_aq_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, phy_temperature), + &cable_len_res, + sizeof(cable_len_res) / sizeof(u32)); + + if (err) + return err; + + *cable_len = (cable_len_res >> 16) & 0xFF; + return 0; +} + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif + +static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) +{ + int err = 0; + struct hw_aq_atl_utils_fw_rpc *rpc = NULL; + struct offload_info *cfg = NULL; + unsigned int rpc_size = 0U; + u32 mpi_opts; + + rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + memset(rpc, 0, rpc_size); + cfg = (struct offload_info *)(&rpc->msg_id + 1); + + memcpy(cfg->mac_addr, mac, ETH_ALEN); + cfg->len = sizeof(*cfg); + + /* Clear bit 0x36C.23 */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~HW_ATL_FW2X_CAP_SLEEP_PROXY; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.23 */ + mpi_opts |= HW_ATL_FW2X_CAP_SLEEP_PROXY; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CAP_SLEEP_PROXY), 1U, 10000U); +err_exit: + return err; +} + +static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) +{ + int err = 0; + struct fw2x_msg_wol *msg = NULL; + u32 mpi_opts; + + struct hw_aq_atl_utils_fw_rpc *rpc = NULL; + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + msg = (struct fw2x_msg_wol *)rpc; + + msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; + msg->magic_packet_enabled = true; + memcpy(msg->hw_addr, mac, ETH_ALEN); + + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~(HW_ATL_FW2X_CAP_SLEEP_PROXY | HW_ATL_FW2X_CAP_WOL); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg)); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.24 */ + mpi_opts |= HW_ATL_FW2X_CAP_WOL; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CAP_WOL), 1U, 10000U); +err_exit: + return err; +} + +static int aq_fw2x_set_power(struct aq_hw_s *self, + unsigned int power_state __rte_unused, + u8 *mac) +{ + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw2x_set_sleep_proxy(self, mac); + if (err < 0) + goto err_exit; + err = aq_fw2x_set_wol_params(self, mac); + if (err < 0) + goto err_exit; + } +err_exit: + return err; +} + +static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK | + HW_ATL_FW2X_CAP_EEE_2G5_MASK | HW_ATL_FW2X_CAP_EEE_5G_MASK | + HW_ATL_FW2X_CAP_EEE_10G_MASK); + + if (speed & AQ_NIC_RATE_EEE_10G) + mpi_opts |= HW_ATL_FW2X_CAP_EEE_10G_MASK; + + if (speed & AQ_NIC_RATE_EEE_5G) + mpi_opts |= HW_ATL_FW2X_CAP_EEE_5G_MASK; + + if (speed & AQ_NIC_RATE_EEE_2G5) + mpi_opts |= HW_ATL_FW2X_CAP_EEE_2G5_MASK; + + if (speed & AQ_NIC_RATE_EEE_1G) + mpi_opts |= HW_ATL_FW2X_CAP_EEE_1G_MASK; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + int err = 0; + u32 caps_hi; + u32 mpi_state; + + err = hw_atl_utils_fw_downld_dwords(self, + self->mbox_addr + + offsetof(struct hw_aq_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, caps_hi), + &caps_hi, + sizeof(caps_hi) / sizeof(u32)); + + if (err) + return err; + + *supported_rates = fw2x_to_eee_mask(caps_hi); + + mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + *rate = fw2x_to_eee_mask(mpi_state); + + return err; +} + +static int aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fc) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + *fc = ((mpi_state & BIT(CAPS_HI_PAUSE)) ? AQ_NIC_FC_RX : 0) | + ((mpi_state & BIT(CAPS_HI_ASYMMETRIC_PAUSE)) ? AQ_NIC_FC_TX : 0); + + return 0; +} + +static int aq_fw2x_set_flow_control(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + + return 0; +} + +static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode) +{ + if (self->fw_ver_actual < HW_ATL_FW_FEATURE_LED) + return -EOPNOTSUPP; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode); + return 0; +} + +static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr, + u32 *data, u32 len, u32 offset) +{ + u32 bytes_remains = len % sizeof(u32); + u32 num_dwords = len / sizeof(u32); + struct smbus_request request; + u32 result = 0; + u32 mpi_opts; + int err = 0; + + if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0) + return -EOPNOTSUPP; + + pthread_mutex_lock(&self->mbox_mutex); + + request.msg_id = 0; + request.device_id = dev_addr; + request.address = offset; + request.length = len; + + /* Write SMBUS request to cfg memory */ + err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, + (u32 *)(void *)&request, + sizeof(request) / sizeof(u32)); + + if (err < 0) + goto exit; + + /* Toggle 0x368.CAPS_LO_SMBUS_READ bit */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR); + mpi_opts ^= BIT(CAPS_LO_SMBUS_READ); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts); + + /* Wait until REQUEST_BIT matched in 0x370 */ + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) & + BIT(CAPS_LO_SMBUS_READ)) == (mpi_opts & BIT(CAPS_LO_SMBUS_READ)), + 10U, 10000U); + + if (err < 0) + goto exit; + + err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32), + &result, + sizeof(result) / sizeof(u32)); + + if (err < 0) + goto exit; + + if (result) { + err = -EIO; + goto exit; + } + + if (num_dwords) { + err = hw_atl_utils_fw_downld_dwords(self, + self->rpc_addr + sizeof(u32) * 2, + data, + num_dwords); + + if (err < 0) + goto exit; + } + + if (bytes_remains) { + u32 val = 0; + + err = hw_atl_utils_fw_downld_dwords(self, + self->rpc_addr + (sizeof(u32) * 2) + + (num_dwords * sizeof(u32)), + &val, + 1); + + if (err < 0) + goto exit; + + rte_memcpy((u8 *)data + len - bytes_remains, + &val, bytes_remains); + } + +exit: + pthread_mutex_unlock(&self->mbox_mutex); + + return err; +} + + +static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr, + u32 *data, u32 len, u32 offset) +{ + struct smbus_request request; + u32 mpi_opts, result = 0; + int err = 0; + + if ((self->caps_lo & BIT(CAPS_LO_SMBUS_WRITE)) == 0) + return -EOPNOTSUPP; + + request.msg_id = 0; + request.device_id = dev_addr; + request.address = offset; + request.length = len; + + pthread_mutex_lock(&self->mbox_mutex); + + /* Write SMBUS request to cfg memory */ + err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, + (u32 *)(void *)&request, + sizeof(request) / sizeof(u32)); + + if (err < 0) + goto exit; + + /* Write SMBUS data to cfg memory */ + u32 num_dwords = len / sizeof(u32); + u32 bytes_remains = len % sizeof(u32); + + if (num_dwords) { + err = hw_atl_utils_fw_upload_dwords(self, + self->rpc_addr + sizeof(request), + (u32 *)(void *)data, + num_dwords); + + if (err < 0) + goto exit; + } + + if (bytes_remains) { + u32 val = 0; + + rte_memcpy(&val, (u8 *)data + (sizeof(u32) * num_dwords), + bytes_remains); + + err = hw_atl_utils_fw_upload_dwords(self, + self->rpc_addr + sizeof(request) + + (num_dwords * sizeof(u32)), + &val, + 1); + + if (err < 0) + goto exit; + } + + /* Toggle 0x368.CAPS_LO_SMBUS_WRITE bit */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR); + mpi_opts ^= BIT(CAPS_LO_SMBUS_WRITE); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts); + + /* Wait until REQUEST_BIT matched in 0x370 */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) & + BIT(CAPS_LO_SMBUS_WRITE)) == (mpi_opts & BIT(CAPS_LO_SMBUS_WRITE)), + 10U, 10000U); + + if (err < 0) + goto exit; + + /* Read status of write operation */ + err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32), + &result, + sizeof(result) / sizeof(u32)); + + if (err < 0) + goto exit; + + if (result) { + err = -EIO; + goto exit; + } + +exit: + pthread_mutex_unlock(&self->mbox_mutex); + + return err; +} + +static int aq_fw2x_send_macsec_request(struct aq_hw_s *self, + struct macsec_msg_fw_request *req, + struct macsec_msg_fw_response *response) +{ + int err = 0; + u32 mpi_opts = 0; + + if (!req || !response) + return 0; + + if ((self->caps_lo & BIT(CAPS_LO_MACSEC)) == 0) + return -EOPNOTSUPP; + + pthread_mutex_lock(&self->mbox_mutex); + + /* Write macsec request to cfg memory */ + err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, + (u32 *)(void *)req, + RTE_ALIGN(sizeof(*req) / sizeof(u32), sizeof(u32))); + + if (err < 0) + goto exit; + + /* Toggle 0x368.CAPS_LO_MACSEC bit */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR); + mpi_opts ^= BIT(CAPS_LO_MACSEC); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts); + + /* Wait until REQUEST_BIT matched in 0x370 */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) & + BIT(CAPS_LO_MACSEC)) == (mpi_opts & BIT(CAPS_LO_MACSEC)), + 1000U, 10000U); + + if (err < 0) + goto exit; + + /* Read status of write operation */ + err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32), + (u32 *)(void *)response, + RTE_ALIGN(sizeof(*response) / sizeof(u32), sizeof(u32))); + +exit: + pthread_mutex_unlock(&self->mbox_mutex); + + return err; +} + +const struct aq_fw_ops aq_fw_2x_ops = { + .init = aq_fw2x_init, + .deinit = aq_fw2x_deinit, + .reset = NULL, + .get_mac_permanent = aq_fw2x_get_mac_permanent, + .set_link_speed = aq_fw2x_set_link_speed, + .set_state = aq_fw2x_set_state, + .update_link_status = aq_fw2x_update_link_status, + .update_stats = aq_fw2x_update_stats, + .set_power = aq_fw2x_set_power, + .get_temp = aq_fw2x_get_temp, + .get_cable_len = aq_fw2x_get_cable_len, + .set_eee_rate = aq_fw2x_set_eee_rate, + .get_eee_rate = aq_fw2x_get_eee_rate, + .get_flow_control = aq_fw2x_get_flow_control, + .set_flow_control = aq_fw2x_set_flow_control, + .led_control = aq_fw2x_led_control, + .get_eeprom = aq_fw2x_get_eeprom, + .set_eeprom = aq_fw2x_set_eeprom, + .send_macsec_req = aq_fw2x_send_macsec_request, +}; diff --git a/src/spdk/dpdk/drivers/net/atlantic/meson.build b/src/spdk/dpdk/drivers/net/atlantic/meson.build new file mode 100644 index 000000000..60b84684e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Aquantia Corporation + +sources = files( + 'atl_rxtx.c', + 'atl_ethdev.c', + 'atl_hw_regs.c', + 'hw_atl/hw_atl_b0.c', + 'hw_atl/hw_atl_llh.c', + 'hw_atl/hw_atl_utils_fw2x.c', + 'hw_atl/hw_atl_utils.c', + 'rte_pmd_atlantic.c', +) diff --git a/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.c b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.c new file mode 100644 index 000000000..2962f5c6c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.c @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +#include + +#include "rte_pmd_atlantic.h" +#include "atl_ethdev.h" + + +int +rte_pmd_atl_macsec_enable(uint16_t port, + uint8_t encr, uint8_t repl_prot) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_enable(dev, encr, repl_prot); +} + +int +rte_pmd_atl_macsec_disable(uint16_t port) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_disable(dev); +} + +int +rte_pmd_atl_macsec_config_txsc(uint16_t port, uint8_t *mac) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_config_txsc(dev, mac); +} + +int +rte_pmd_atl_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_config_rxsc(dev, mac, pi); +} + +int +rte_pmd_atl_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_select_txsa(dev, idx, an, pn, key); +} + +int +rte_pmd_atl_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_atlantic_supported(dev)) + return -ENOTSUP; + + return atl_macsec_select_rxsa(dev, idx, an, pn, key); +} diff --git a/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.h b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.h new file mode 100644 index 000000000..c0208569b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Aquantia Corporation + */ + +/** + * @file rte_pmd_atlantic.h + * atlantic PMD specific functions. + * + **/ + +#ifndef _PMD_ATLANTIC_H_ +#define _PMD_ATLANTIC_H_ + +#include + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable MACsec offload. + * + * @param port + * The port identifier of the Ethernet device. + * @param encr + * 1 - Enable encryption (encrypt and add integrity signature). + * 0 - Disable encryption (only add integrity signature). + * @param repl_prot + * 1 - Enable replay protection. + * 0 - Disable replay protection. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +__rte_experimental +int rte_pmd_atl_macsec_enable(uint16_t port, uint8_t encr, uint8_t repl_prot); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Disable MACsec offload. + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +__rte_experimental +int rte_pmd_atl_macsec_disable(uint16_t port); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Configure Tx SC (Secure Connection). + * + * @param port + * The port identifier of the Ethernet device. + * @param mac + * The MAC address on the local side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +__rte_experimental +int rte_pmd_atl_macsec_config_txsc(uint16_t port, uint8_t *mac); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Configure Rx SC (Secure Connection). + * + * @param port + * The port identifier of the Ethernet device. + * @param mac + * The MAC address on the remote side. + * @param pi + * The PI (port identifier) on the remote side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +__rte_experimental +int rte_pmd_atl_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable Tx SA (Secure Association). + * + * @param port + * The port identifier of the Ethernet device. + * @param idx + * The SA to be enabled (0 or 1). + * @param an + * The association number on the local side. + * @param pn + * The packet number on the local side. + * @param key + * The key on the local side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-EINVAL) if bad parameter. + */ +__rte_experimental +int rte_pmd_atl_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable Rx SA (Secure Association). + * + * @param port + * The port identifier of the Ethernet device. + * @param idx + * The SA to be enabled (0 or 1) + * @param an + * The association number on the remote side. + * @param pn + * The packet number on the remote side. + * @param key + * The key on the remote side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-EINVAL) if bad parameter. + */ +__rte_experimental +int rte_pmd_atl_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key); + +#endif /* _PMD_ATLANTIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic_version.map b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic_version.map new file mode 100644 index 000000000..9b04838d8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/atlantic/rte_pmd_atlantic_version.map @@ -0,0 +1,14 @@ +DPDK_20.0 { + local: *; +}; + +EXPERIMENTAL { + global: + + rte_pmd_atl_macsec_enable; + rte_pmd_atl_macsec_disable; + rte_pmd_atl_macsec_config_txsc; + rte_pmd_atl_macsec_config_rxsc; + rte_pmd_atl_macsec_select_txsa; + rte_pmd_atl_macsec_select_rxsa; +}; diff --git a/src/spdk/dpdk/drivers/net/avp/Makefile b/src/spdk/dpdk/drivers/net/avp/Makefile new file mode 100644 index 000000000..075247b13 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2013-2017, Wind River Systems, Inc. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_avp.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_avp_version.map + +# install public header files to enable compilation of the hypervisor level +# dpdk application +SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_common.h +SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_fifo.h + +# +# all source files are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp_ethdev.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c b/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c new file mode 100644 index 000000000..1abe96ce5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/avp_ethdev.c @@ -0,0 +1,2315 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_avp_common.h" +#include "rte_avp_fifo.h" + +#include "avp_logs.h" + +int avp_logtype_driver; + +static int avp_dev_create(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev); + +static int avp_dev_configure(struct rte_eth_dev *dev); +static int avp_dev_start(struct rte_eth_dev *dev); +static void avp_dev_stop(struct rte_eth_dev *dev); +static void avp_dev_close(struct rte_eth_dev *dev); +static int avp_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static int avp_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int avp_dev_promiscuous_disable(struct rte_eth_dev *dev); + +static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *pool); + +static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +static uint16_t avp_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +static uint16_t avp_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +static uint16_t avp_xmit_scattered_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +static uint16_t avp_xmit_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +static void avp_dev_rx_queue_release(void *rxq); +static void avp_dev_tx_queue_release(void *txq); + +static int avp_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int avp_dev_stats_reset(struct rte_eth_dev *dev); + + +#define AVP_MAX_RX_BURST 64 +#define AVP_MAX_TX_BURST 64 +#define AVP_MAX_MAC_ADDRS 1 +#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN + + +/* + * Defines the number of microseconds to wait before checking the response + * queue for completion. + */ +#define AVP_REQUEST_DELAY_USECS (5000) + +/* + * Defines the number times to check the response queue for completion before + * declaring a timeout. + */ +#define AVP_MAX_REQUEST_RETRY (100) + +/* Defines the current PCI driver version number */ +#define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_avp_map[] = { + { .vendor_id = RTE_AVP_PCI_VENDOR_ID, + .device_id = RTE_AVP_PCI_DEVICE_ID, + .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID, + .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID, + .class_id = RTE_CLASS_ANY_ID, + }, + + { .vendor_id = 0, /* sentinel */ + }, +}; + +/* + * dev_ops for avp, bare necessities for basic operation + */ +static const struct eth_dev_ops avp_eth_dev_ops = { + .dev_configure = avp_dev_configure, + .dev_start = avp_dev_start, + .dev_stop = avp_dev_stop, + .dev_close = avp_dev_close, + .dev_infos_get = avp_dev_info_get, + .vlan_offload_set = avp_vlan_offload_set, + .stats_get = avp_dev_stats_get, + .stats_reset = avp_dev_stats_reset, + .link_update = avp_dev_link_update, + .promiscuous_enable = avp_dev_promiscuous_enable, + .promiscuous_disable = avp_dev_promiscuous_disable, + .rx_queue_setup = avp_dev_rx_queue_setup, + .rx_queue_release = avp_dev_rx_queue_release, + .tx_queue_setup = avp_dev_tx_queue_setup, + .tx_queue_release = avp_dev_tx_queue_release, +}; + +/**@{ AVP device flags */ +#define AVP_F_PROMISC (1 << 1) +#define AVP_F_CONFIGURED (1 << 2) +#define AVP_F_LINKUP (1 << 3) +#define AVP_F_DETACHED (1 << 4) +/**@} */ + +/* Ethernet device validation marker */ +#define AVP_ETHDEV_MAGIC 0x92972862 + +/* + * Defines the AVP device attributes which are attached to an RTE ethernet + * device + */ +struct avp_dev { + uint32_t magic; /**< Memory validation marker */ + uint64_t device_id; /**< Unique system identifier */ + struct rte_ether_addr ethaddr; /**< Host specified MAC address */ + struct rte_eth_dev_data *dev_data; + /**< Back pointer to ethernet device data */ + volatile uint32_t flags; /**< Device operational flags */ + uint16_t port_id; /**< Ethernet port identifier */ + struct rte_mempool *pool; /**< pkt mbuf mempool */ + unsigned int guest_mbuf_size; /**< local pool mbuf size */ + unsigned int host_mbuf_size; /**< host mbuf size */ + unsigned int max_rx_pkt_len; /**< maximum receive unit */ + uint32_t host_features; /**< Supported feature bitmap */ + uint32_t features; /**< Enabled feature bitmap */ + unsigned int num_tx_queues; /**< Negotiated number of transmit queues */ + unsigned int max_tx_queues; /**< Maximum number of transmit queues */ + unsigned int num_rx_queues; /**< Negotiated number of receive queues */ + unsigned int max_rx_queues; /**< Maximum number of receive queues */ + + struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */ + struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */ + struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES]; + /**< Allocated mbufs queue */ + struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES]; + /**< To be freed mbufs queue */ + + /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */ + rte_spinlock_t lock; + + /* For request & response */ + struct rte_avp_fifo *req_q; /**< Request queue */ + struct rte_avp_fifo *resp_q; /**< Response queue */ + void *host_sync_addr; /**< (host) Req/Resp Mem address */ + void *sync_addr; /**< Req/Resp Mem address */ + void *host_mbuf_addr; /**< (host) MBUF pool start address */ + void *mbuf_addr; /**< MBUF pool start address */ +} __rte_cache_aligned; + +/* RTE ethernet private data */ +struct avp_adapter { + struct avp_dev avp; +} __rte_cache_aligned; + + +/* 32-bit MMIO register write */ +#define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr)) + +/* 32-bit MMIO register read */ +#define AVP_READ32(_addr) rte_read32_relaxed((_addr)) + +/* Macro to cast the ethernet device private data to a AVP object */ +#define AVP_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct avp_adapter *)adapter)->avp) + +/* + * Defines the structure of a AVP device queue for the purpose of handling the + * receive and transmit burst callback functions + */ +struct avp_queue { + struct rte_eth_dev_data *dev_data; + /**< Backpointer to ethernet device data */ + struct avp_dev *avp; /**< Backpointer to AVP device */ + uint16_t queue_id; + /**< Queue identifier used for indexing current queue */ + uint16_t queue_base; + /**< Base queue identifier for queue servicing */ + uint16_t queue_limit; + /**< Maximum queue identifier for queue servicing */ + + uint64_t packets; + uint64_t bytes; + uint64_t errors; +}; + +/* send a request and wait for a response + * + * @warning must be called while holding the avp->lock spinlock. + */ +static int +avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request) +{ + unsigned int retry = AVP_MAX_REQUEST_RETRY; + void *resp_addr = NULL; + unsigned int count; + int ret; + + PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id); + + request->result = -ENOTSUP; + + /* Discard any stale responses before starting a new request */ + while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1)) + PMD_DRV_LOG(DEBUG, "Discarding stale response\n"); + + rte_memcpy(avp->sync_addr, request, sizeof(*request)); + count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1); + if (count < 1) { + PMD_DRV_LOG(ERR, "Cannot send request %u to host\n", + request->req_id); + ret = -EBUSY; + goto done; + } + + while (retry--) { + /* wait for a response */ + usleep(AVP_REQUEST_DELAY_USECS); + + count = avp_fifo_count(avp->resp_q); + if (count >= 1) { + /* response received */ + break; + } + + if ((count < 1) && (retry == 0)) { + PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n", + request->req_id); + ret = -ETIME; + goto done; + } + } + + /* retrieve the response */ + count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1); + if ((count != 1) || (resp_addr != avp->host_sync_addr)) { + PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n", + count, resp_addr, avp->host_sync_addr); + ret = -ENODATA; + goto done; + } + + /* copy to user buffer */ + rte_memcpy(request, avp->sync_addr, sizeof(*request)); + ret = 0; + + PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n", + request->result, request->req_id); + +done: + return ret; +} + +static int +avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a link state change request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF; + request.if_up = state; + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + +static int +avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev, + struct rte_avp_device_config *config) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a configure request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_CFG_DEVICE; + memcpy(&request.config, config, sizeof(request.config)); + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + +static int +avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a shutdown request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE; + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + +/* translate from host mbuf virtual address to guest virtual address */ +static inline void * +avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address) +{ + return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address, + (uintptr_t)avp->host_mbuf_addr), + (uintptr_t)avp->mbuf_addr); +} + +/* translate from host physical address to guest virtual address */ +static void * +avp_dev_translate_address(struct rte_eth_dev *eth_dev, + rte_iova_t host_phys_addr) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_mem_resource *resource; + struct rte_avp_memmap_info *info; + struct rte_avp_memmap *map; + off_t offset; + void *addr; + unsigned int i; + + addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr; + resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR]; + info = (struct rte_avp_memmap_info *)resource->addr; + + offset = 0; + for (i = 0; i < info->nb_maps; i++) { + /* search all segments looking for a matching address */ + map = &info->maps[i]; + + if ((host_phys_addr >= map->phys_addr) && + (host_phys_addr < (map->phys_addr + map->length))) { + /* address is within this segment */ + offset += (host_phys_addr - map->phys_addr); + addr = RTE_PTR_ADD(addr, (uintptr_t)offset); + + PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n", + host_phys_addr, addr); + + return addr; + } + offset += map->length; + } + + return NULL; +} + +/* verify that the incoming device version is compatible with our version */ +static int +avp_dev_version_check(uint32_t version) +{ + uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION); + uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version); + + if (device <= driver) { + /* the host driver version is less than or equal to ours */ + return 0; + } + + return 1; +} + +/* verify that memory regions have expected version and validation markers */ +static int +avp_dev_check_regions(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_avp_memmap_info *memmap; + struct rte_avp_device_info *info; + struct rte_mem_resource *resource; + unsigned int i; + + /* Dump resource info for debug */ + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + resource = &pci_dev->mem_resource[i]; + if ((resource->phys_addr == 0) || (resource->len == 0)) + continue; + + PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n", + i, resource->phys_addr, + resource->len, resource->addr); + + switch (i) { + case RTE_AVP_PCI_MEMMAP_BAR: + memmap = (struct rte_avp_memmap_info *)resource->addr; + if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) || + (memmap->version != RTE_AVP_MEMMAP_VERSION)) { + PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n", + memmap->magic, memmap->version); + return -EINVAL; + } + break; + + case RTE_AVP_PCI_DEVICE_BAR: + info = (struct rte_avp_device_info *)resource->addr; + if ((info->magic != RTE_AVP_DEVICE_MAGIC) || + avp_dev_version_check(info->version)) { + PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n", + info->magic, info->version, + AVP_DPDK_DRIVER_VERSION); + return -EINVAL; + } + break; + + case RTE_AVP_PCI_MEMORY_BAR: + case RTE_AVP_PCI_MMIO_BAR: + if (resource->addr == NULL) { + PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n", + i); + return -EINVAL; + } + break; + + case RTE_AVP_PCI_MSIX_BAR: + default: + /* no validation required */ + break; + } + } + + return 0; +} + +static int +avp_dev_detach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(NOTICE, "port %u already detached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* shutdown the device first so the host stops sending us packets. */ + ret = avp_dev_ctrl_shutdown(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n", + ret); + avp->flags &= ~AVP_F_DETACHED; + goto unlock; + } + + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* wait for queues to acknowledge the presence of the detach flag */ + rte_delay_ms(1); + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static void +_avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + struct avp_dev *avp = + AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct avp_queue *rxq; + uint16_t queue_count; + uint16_t remainder; + + rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id]; + + /* + * Must map all AVP fifos as evenly as possible between the configured + * device queues. Each device queue will service a subset of the AVP + * fifos. If there is an odd number of device queues the first set of + * device queues will get the extra AVP fifos. + */ + queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues; + remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues; + if (rx_queue_id < remainder) { + /* these queues must service one extra FIFO */ + rxq->queue_base = rx_queue_id * (queue_count + 1); + rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1; + } else { + /* these queues service the regular number of FIFO */ + rxq->queue_base = ((remainder * (queue_count + 1)) + + ((rx_queue_id - remainder) * queue_count)); + rxq->queue_limit = rxq->queue_base + queue_count - 1; + } + + PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n", + rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit); + + rxq->queue_id = rxq->queue_base; +} + +static void +_avp_set_queue_counts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_info *host_info; + void *addr; + + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; + host_info = (struct rte_avp_device_info *)addr; + + /* + * the transmit direction is not negotiated beyond respecting the max + * number of queues because the host can handle arbitrary guest tx + * queues (host rx queues). + */ + avp->num_tx_queues = eth_dev->data->nb_tx_queues; + + /* + * the receive direction is more restrictive. The host requires a + * minimum number of guest rx queues (host tx queues) therefore + * negotiate a value that is at least as large as the host minimum + * requirement. If the host and guest values are not identical then a + * mapping will be established in the receive_queue_setup function. + */ + avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues, + eth_dev->data->nb_rx_queues); + + PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n", + avp->num_tx_queues, avp->num_rx_queues); +} + +static int +avp_dev_attach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_config config; + unsigned int i; + int ret; + + PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (!(avp->flags & AVP_F_DETACHED)) { + PMD_DRV_LOG(NOTICE, "port %u already attached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* + * make sure that the detached flag is set prior to reconfiguring the + * queues. + */ + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* + * re-run the device create utility which will parse the new host info + * and setup the AVP device queue pointers. + */ + ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n", + ret); + goto unlock; + } + + if (avp->flags & AVP_F_CONFIGURED) { + /* + * Update the receive queue mapping to handle cases where the + * source and destination hosts have different queue + * requirements. As long as the DETACHED flag is asserted the + * queue table should not be referenced so it should be safe to + * update it. + */ + _avp_set_queue_counts(eth_dev); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + _avp_set_rx_queue_mappings(eth_dev, i); + + /* + * Update the host with our config details so that it knows the + * device is active. + */ + memset(&config, 0, sizeof(config)); + config.device_id = avp->device_id; + config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; + config.driver_version = AVP_DPDK_DRIVER_VERSION; + config.features = avp->features; + config.num_tx_queues = avp->num_tx_queues; + config.num_rx_queues = avp->num_rx_queues; + config.if_up = !!(avp->flags & AVP_F_LINKUP); + + ret = avp_dev_ctrl_set_config(eth_dev, &config); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", + ret); + goto unlock; + } + } + + rte_wmb(); + avp->flags &= ~AVP_F_DETACHED; + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static void +avp_dev_interrupt_handler(void *data) +{ + struct rte_eth_dev *eth_dev = data; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t status, value; + int ret; + + if (registers == NULL) + rte_panic("no mapped MMIO register space\n"); + + /* read the interrupt status register + * note: this register clears on read so all raised interrupts must be + * handled or remembered for later processing + */ + status = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_INTERRUPT_STATUS_OFFSET)); + + if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) { + /* handle interrupt based on current status */ + value = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + switch (value) { + case RTE_AVP_MIGRATION_DETACHED: + ret = avp_dev_detach(eth_dev); + break; + case RTE_AVP_MIGRATION_ATTACHED: + ret = avp_dev_attach(eth_dev); + break; + default: + PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n", + value); + ret = -EINVAL; + } + + /* acknowledge the request by writing out our current status */ + value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR); + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + + PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n"); + } + + if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK) + PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n", + status); + + /* re-enable UIO interrupt handling */ + ret = rte_intr_ack(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n", + ret); + /* continue */ + } +} + +static int +avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return -EINVAL; + + /* enable UIO interrupt handling */ + ret = rte_intr_enable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + /* inform the device that all interrupts are enabled */ + AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + return 0; +} + +static int +avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return 0; + + /* inform the device that all interrupts are disabled */ + AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + /* enable UIO interrupt handling */ + ret = rte_intr_disable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + return 0; +} + +static int +avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret; + + /* register a callback handler with UIO for interrupt notifications */ + ret = rte_intr_callback_register(&pci_dev->intr_handle, + avp_dev_interrupt_handler, + (void *)eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n", + ret); + return ret; + } + + /* enable interrupt processing */ + return avp_dev_enable_interrupts(eth_dev); +} + +static int +avp_dev_migration_pending(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t value; + + if (registers == NULL) + return 0; + + value = AVP_READ32(RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + if (value == RTE_AVP_MIGRATION_DETACHED) { + /* migration is in progress; ack it if we have not already */ + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + return 1; + } + return 0; +} + +/* + * create a AVP device using the supplied device info by first translating it + * to guest address space(s). + */ +static int +avp_dev_create(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_info *host_info; + struct rte_mem_resource *resource; + unsigned int i; + + resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR]; + if (resource->addr == NULL) { + PMD_DRV_LOG(ERR, "BAR%u is not mapped\n", + RTE_AVP_PCI_DEVICE_BAR); + return -EFAULT; + } + host_info = (struct rte_avp_device_info *)resource->addr; + + if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) || + avp_dev_version_check(host_info->version)) { + PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n", + host_info->magic, host_info->version, + AVP_DPDK_DRIVER_VERSION); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n", + RTE_AVP_GET_RELEASE_VERSION(host_info->version), + RTE_AVP_GET_MAJOR_VERSION(host_info->version), + RTE_AVP_GET_MINOR_VERSION(host_info->version)); + + PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n", + host_info->min_tx_queues, host_info->max_tx_queues); + PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n", + host_info->min_rx_queues, host_info->max_rx_queues); + PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n", + host_info->features); + + if (avp->magic != AVP_ETHDEV_MAGIC) { + /* + * First time initialization (i.e., not during a VM + * migration) + */ + memset(avp, 0, sizeof(*avp)); + avp->magic = AVP_ETHDEV_MAGIC; + avp->dev_data = eth_dev->data; + avp->port_id = eth_dev->data->port_id; + avp->host_mbuf_size = host_info->mbuf_size; + avp->host_features = host_info->features; + rte_spinlock_init(&avp->lock); + memcpy(&avp->ethaddr.addr_bytes[0], + host_info->ethaddr, RTE_ETHER_ADDR_LEN); + /* adjust max values to not exceed our max */ + avp->max_tx_queues = + RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES); + avp->max_rx_queues = + RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES); + } else { + /* Re-attaching during migration */ + + /* TODO... requires validation of host values */ + if ((host_info->features & avp->features) != avp->features) { + PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n", + avp->features, host_info->features); + /* this should not be possible; continue for now */ + } + } + + /* the device id is allowed to change over migrations */ + avp->device_id = host_info->device_id; + + /* translate incoming host addresses to guest address space */ + PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n", + host_info->tx_phys); + PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n", + host_info->alloc_phys); + for (i = 0; i < avp->max_tx_queues; i++) { + avp->tx_q[i] = avp_dev_translate_address(eth_dev, + host_info->tx_phys + (i * host_info->tx_size)); + + avp->alloc_q[i] = avp_dev_translate_address(eth_dev, + host_info->alloc_phys + (i * host_info->alloc_size)); + } + + PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n", + host_info->rx_phys); + PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n", + host_info->free_phys); + for (i = 0; i < avp->max_rx_queues; i++) { + avp->rx_q[i] = avp_dev_translate_address(eth_dev, + host_info->rx_phys + (i * host_info->rx_size)); + avp->free_q[i] = avp_dev_translate_address(eth_dev, + host_info->free_phys + (i * host_info->free_size)); + } + + PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n", + host_info->req_phys); + PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n", + host_info->resp_phys); + PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n", + host_info->sync_phys); + PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n", + host_info->mbuf_phys); + avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys); + avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys); + avp->sync_addr = + avp_dev_translate_address(eth_dev, host_info->sync_phys); + avp->mbuf_addr = + avp_dev_translate_address(eth_dev, host_info->mbuf_phys); + + /* + * store the host mbuf virtual address so that we can calculate + * relative offsets for each mbuf as they are processed + */ + avp->host_mbuf_addr = host_info->mbuf_va; + avp->host_sync_addr = host_info->sync_va; + + /* + * store the maximum packet length that is supported by the host. + */ + avp->max_rx_pkt_len = host_info->max_rx_pkt_len; + PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n", + host_info->max_rx_pkt_len); + + return 0; +} + +/* + * This function is based on probe() function in avp_pci.c + * It returns 0 on success. + */ +static int +eth_avp_dev_init(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = + AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev; + int ret; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + eth_dev->dev_ops = &avp_eth_dev_ops; + eth_dev->rx_pkt_burst = &avp_recv_pkts; + eth_dev->tx_pkt_burst = &avp_xmit_pkts; + /* Let rte_eth_dev_close() release the port resources */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* + * no setup required on secondary processes. All data is saved + * in dev_private by the primary process. All resource should + * be mapped to the same virtual address so all pointers should + * be valid. + */ + if (eth_dev->data->scattered_rx) { + PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n"); + eth_dev->rx_pkt_burst = avp_recv_scattered_pkts; + eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts; + } + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* Check current migration status */ + if (avp_dev_migration_pending(eth_dev)) { + PMD_DRV_LOG(ERR, "VM live migration operation in progress\n"); + return -EBUSY; + } + + /* Check BAR resources */ + ret = avp_dev_check_regions(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n", + ret); + return ret; + } + + /* Enable interrupts */ + ret = avp_dev_setup_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret); + return ret; + } + + /* Handle each subtype */ + ret = avp_dev_create(pci_dev, eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret); + return ret; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", + RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n", + RTE_ETHER_ADDR_LEN); + return -ENOMEM; + } + + /* Get a mac from device config */ + rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]); + + return 0; +} + +static int +eth_avp_dev_uninit(struct rte_eth_dev *eth_dev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + if (eth_dev->data == NULL) + return 0; + + avp_dev_close(eth_dev); + + return 0; +} + +static int +eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), + eth_avp_dev_init); +} + +static int +eth_avp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_avp_dev_uninit); +} + +static struct rte_pci_driver rte_avp_pmd = { + .id_table = pci_id_avp_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_avp_pci_probe, + .remove = eth_avp_pci_remove, +}; + +static int +avp_dev_enable_scattered(struct rte_eth_dev *eth_dev, + struct avp_dev *avp) +{ + unsigned int max_rx_pkt_len; + + max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + + if ((max_rx_pkt_len > avp->guest_mbuf_size) || + (max_rx_pkt_len > avp->host_mbuf_size)) { + /* + * If the guest MTU is greater than either the host or guest + * buffers then chained mbufs have to be enabled in the TX + * direction. It is assumed that the application will not need + * to send packets larger than their max_rx_pkt_len (MRU). + */ + return 1; + } + + if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) || + (avp->max_rx_pkt_len > avp->host_mbuf_size)) { + /* + * If the host MRU is greater than its own mbuf size or the + * guest mbuf size then chained mbufs have to be enabled in the + * RX direction. + */ + return 1; + } + + return 0; +} + +static int +avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *pool) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_pktmbuf_pool_private *mbp_priv; + struct avp_queue *rxq; + + if (rx_queue_id >= eth_dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n", + rx_queue_id, eth_dev->data->nb_rx_queues); + return -EINVAL; + } + + /* Save mbuf pool pointer */ + avp->pool = pool; + + /* Save the local mbuf size */ + mbp_priv = rte_mempool_get_priv(pool); + avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size); + avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM; + + if (avp_dev_enable_scattered(eth_dev, avp)) { + if (!eth_dev->data->scattered_rx) { + PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n"); + eth_dev->data->scattered_rx = 1; + eth_dev->rx_pkt_burst = avp_recv_scattered_pkts; + eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts; + } + } + + PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n", + avp->max_rx_pkt_len, + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len, + avp->host_mbuf_size, + avp->guest_mbuf_size); + + /* allocate a queue object */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n"); + return -ENOMEM; + } + + /* save back pointers to AVP and Ethernet devices */ + rxq->avp = avp; + rxq->dev_data = eth_dev->data; + eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq; + + /* setup the queue receive mapping for the current queue. */ + _avp_set_rx_queue_mappings(eth_dev, rx_queue_id); + + PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq); + + (void)nb_rx_desc; + (void)rx_conf; + return 0; +} + +static int +avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct avp_queue *txq; + + if (tx_queue_id >= eth_dev->data->nb_tx_queues) { + PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n", + tx_queue_id, eth_dev->data->nb_tx_queues); + return -EINVAL; + } + + /* allocate a queue object */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n"); + return -ENOMEM; + } + + /* only the configured set of transmit queues are used */ + txq->queue_id = tx_queue_id; + txq->queue_base = tx_queue_id; + txq->queue_limit = tx_queue_id; + + /* save back pointers to AVP and Ethernet devices */ + txq->avp = avp; + txq->dev_data = eth_dev->data; + eth_dev->data->tx_queues[tx_queue_id] = (void *)txq; + + PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq); + + (void)nb_tx_desc; + (void)tx_conf; + return 0; +} + +static inline int +_avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b) +{ + uint16_t *_a = (uint16_t *)&a->addr_bytes[0]; + uint16_t *_b = (uint16_t *)&b->addr_bytes[0]; + return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]); +} + +static inline int +_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m) +{ + struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + + if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) { + /* allow all packets destined to our address */ + return 0; + } + + if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) { + /* allow all broadcast packets */ + return 0; + } + + if (likely(rte_is_multicast_ether_addr(ð->d_addr))) { + /* allow all multicast packets */ + return 0; + } + + if (avp->flags & AVP_F_PROMISC) { + /* allow all packets when in promiscuous mode */ + return 0; + } + + return -1; +} + +#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS +static inline void +__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf) +{ + struct rte_avp_desc *first_buf; + struct rte_avp_desc *pkt_buf; + unsigned int pkt_len; + unsigned int nb_segs; + void *pkt_data; + unsigned int i; + + first_buf = avp_dev_translate_buffer(avp, buf); + + i = 0; + pkt_len = 0; + nb_segs = first_buf->nb_segs; + do { + /* Adjust pointers for guest addressing */ + pkt_buf = avp_dev_translate_buffer(avp, buf); + if (pkt_buf == NULL) + rte_panic("bad buffer: segment %u has an invalid address %p\n", + i, buf); + pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); + if (pkt_data == NULL) + rte_panic("bad buffer: segment %u has a NULL data pointer\n", + i); + if (pkt_buf->data_len == 0) + rte_panic("bad buffer: segment %u has 0 data length\n", + i); + pkt_len += pkt_buf->data_len; + nb_segs--; + i++; + + } while (nb_segs && (buf = pkt_buf->next) != NULL); + + if (nb_segs != 0) + rte_panic("bad buffer: expected %u segments found %u\n", + first_buf->nb_segs, (first_buf->nb_segs - nb_segs)); + if (pkt_len != first_buf->pkt_len) + rte_panic("bad buffer: expected length %u found %u\n", + first_buf->pkt_len, pkt_len); +} + +#define avp_dev_buffer_sanity_check(a, b) \ + __avp_dev_buffer_sanity_check((a), (b)) + +#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */ + +#define avp_dev_buffer_sanity_check(a, b) do {} while (0) + +#endif + +/* + * Copy a host buffer chain to a set of mbufs. This function assumes that + * there exactly the required number of mbufs to copy all source bytes. + */ +static inline struct rte_mbuf * +avp_dev_copy_from_buffers(struct avp_dev *avp, + struct rte_avp_desc *buf, + struct rte_mbuf **mbufs, + unsigned int count) +{ + struct rte_mbuf *m_previous = NULL; + struct rte_avp_desc *pkt_buf; + unsigned int total_length = 0; + unsigned int copy_length; + unsigned int src_offset; + struct rte_mbuf *m; + uint16_t ol_flags; + uint16_t vlan_tci; + void *pkt_data; + unsigned int i; + + avp_dev_buffer_sanity_check(avp, buf); + + /* setup the first source buffer */ + pkt_buf = avp_dev_translate_buffer(avp, buf); + pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); + total_length = pkt_buf->pkt_len; + src_offset = 0; + + if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { + ol_flags = PKT_RX_VLAN; + vlan_tci = pkt_buf->vlan_tci; + } else { + ol_flags = 0; + vlan_tci = 0; + } + + for (i = 0; (i < count) && (buf != NULL); i++) { + /* fill each destination buffer */ + m = mbufs[i]; + + if (m_previous != NULL) + m_previous->next = m; + + m_previous = m; + + do { + /* + * Copy as many source buffers as will fit in the + * destination buffer. + */ + copy_length = RTE_MIN((avp->guest_mbuf_size - + rte_pktmbuf_data_len(m)), + (pkt_buf->data_len - + src_offset)); + rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *), + rte_pktmbuf_data_len(m)), + RTE_PTR_ADD(pkt_data, src_offset), + copy_length); + rte_pktmbuf_data_len(m) += copy_length; + src_offset += copy_length; + + if (likely(src_offset == pkt_buf->data_len)) { + /* need a new source buffer */ + buf = pkt_buf->next; + if (buf != NULL) { + pkt_buf = avp_dev_translate_buffer( + avp, buf); + pkt_data = avp_dev_translate_buffer( + avp, pkt_buf->data); + src_offset = 0; + } + } + + if (unlikely(rte_pktmbuf_data_len(m) == + avp->guest_mbuf_size)) { + /* need a new destination mbuf */ + break; + } + + } while (buf != NULL); + } + + m = mbufs[0]; + m->ol_flags = ol_flags; + m->nb_segs = count; + rte_pktmbuf_pkt_len(m) = total_length; + m->vlan_tci = vlan_tci; + + __rte_mbuf_sanity_check(m, 1); + + return m; +} + +static uint16_t +avp_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct avp_queue *rxq = (struct avp_queue *)rx_queue; + struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST]; + struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS]; + struct avp_dev *avp = rxq->avp; + struct rte_avp_desc *pkt_buf; + struct rte_avp_fifo *free_q; + struct rte_avp_fifo *rx_q; + struct rte_avp_desc *buf; + unsigned int count, avail, n; + unsigned int guest_mbuf_size; + struct rte_mbuf *m; + unsigned int required; + unsigned int buf_len; + unsigned int port_id; + unsigned int i; + + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + + guest_mbuf_size = avp->guest_mbuf_size; + port_id = avp->port_id; + rx_q = avp->rx_q[rxq->queue_id]; + free_q = avp->free_q[rxq->queue_id]; + + /* setup next queue to service */ + rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ? + (rxq->queue_id + 1) : rxq->queue_base; + + /* determine how many slots are available in the free queue */ + count = avp_fifo_free_count(free_q); + + /* determine how many packets are available in the rx queue */ + avail = avp_fifo_count(rx_q); + + /* determine how many packets can be received */ + count = RTE_MIN(count, avail); + count = RTE_MIN(count, nb_pkts); + count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST); + + if (unlikely(count == 0)) { + /* no free buffers, or no buffers on the rx queue */ + return 0; + } + + /* retrieve pending packets */ + n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); + PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n", + count, rx_q); + + count = 0; + for (i = 0; i < n; i++) { + /* prefetch next entry while processing current one */ + if (i + 1 < n) { + pkt_buf = avp_dev_translate_buffer(avp, + avp_bufs[i + 1]); + rte_prefetch0(pkt_buf); + } + buf = avp_bufs[i]; + + /* Peek into the first buffer to determine the total length */ + pkt_buf = avp_dev_translate_buffer(avp, buf); + buf_len = pkt_buf->pkt_len; + + /* Allocate enough mbufs to receive the entire packet */ + required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size; + if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) { + rxq->dev_data->rx_mbuf_alloc_failed++; + continue; + } + + /* Copy the data from the buffers to our mbufs */ + m = avp_dev_copy_from_buffers(avp, buf, mbufs, required); + + /* finalize mbuf */ + m->port = port_id; + + if (_avp_mac_filter(avp, m) != 0) { + /* silently discard packets not destined to our MAC */ + rte_pktmbuf_free(m); + continue; + } + + /* return new mbuf to caller */ + rx_pkts[count++] = m; + rxq->bytes += buf_len; + } + + rxq->packets += count; + + /* return the buffers to the free queue */ + avp_fifo_put(free_q, (void **)&avp_bufs[0], n); + + return count; +} + + +static uint16_t +avp_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct avp_queue *rxq = (struct avp_queue *)rx_queue; + struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST]; + struct avp_dev *avp = rxq->avp; + struct rte_avp_desc *pkt_buf; + struct rte_avp_fifo *free_q; + struct rte_avp_fifo *rx_q; + unsigned int count, avail, n; + unsigned int pkt_len; + struct rte_mbuf *m; + char *pkt_data; + unsigned int i; + + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + + rx_q = avp->rx_q[rxq->queue_id]; + free_q = avp->free_q[rxq->queue_id]; + + /* setup next queue to service */ + rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ? + (rxq->queue_id + 1) : rxq->queue_base; + + /* determine how many slots are available in the free queue */ + count = avp_fifo_free_count(free_q); + + /* determine how many packets are available in the rx queue */ + avail = avp_fifo_count(rx_q); + + /* determine how many packets can be received */ + count = RTE_MIN(count, avail); + count = RTE_MIN(count, nb_pkts); + count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST); + + if (unlikely(count == 0)) { + /* no free buffers, or no buffers on the rx queue */ + return 0; + } + + /* retrieve pending packets */ + n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); + PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n", + count, rx_q); + + count = 0; + for (i = 0; i < n; i++) { + /* prefetch next entry while processing current one */ + if (i < n - 1) { + pkt_buf = avp_dev_translate_buffer(avp, + avp_bufs[i + 1]); + rte_prefetch0(pkt_buf); + } + + /* Adjust host pointers for guest addressing */ + pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]); + pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); + pkt_len = pkt_buf->pkt_len; + + if (unlikely((pkt_len > avp->guest_mbuf_size) || + (pkt_buf->nb_segs > 1))) { + /* + * application should be using the scattered receive + * function + */ + rxq->errors++; + continue; + } + + /* process each packet to be transmitted */ + m = rte_pktmbuf_alloc(avp->pool); + if (unlikely(m == NULL)) { + rxq->dev_data->rx_mbuf_alloc_failed++; + continue; + } + + /* copy data out of the host buffer to our buffer */ + m->data_off = RTE_PKTMBUF_HEADROOM; + rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len); + + /* initialize the local mbuf */ + rte_pktmbuf_data_len(m) = pkt_len; + rte_pktmbuf_pkt_len(m) = pkt_len; + m->port = avp->port_id; + + if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { + m->ol_flags = PKT_RX_VLAN; + m->vlan_tci = pkt_buf->vlan_tci; + } + + if (_avp_mac_filter(avp, m) != 0) { + /* silently discard packets not destined to our MAC */ + rte_pktmbuf_free(m); + continue; + } + + /* return new mbuf to caller */ + rx_pkts[count++] = m; + rxq->bytes += pkt_len; + } + + rxq->packets += count; + + /* return the buffers to the free queue */ + avp_fifo_put(free_q, (void **)&avp_bufs[0], n); + + return count; +} + +/* + * Copy a chained mbuf to a set of host buffers. This function assumes that + * there are sufficient destination buffers to contain the entire source + * packet. + */ +static inline uint16_t +avp_dev_copy_to_buffers(struct avp_dev *avp, + struct rte_mbuf *mbuf, + struct rte_avp_desc **buffers, + unsigned int count) +{ + struct rte_avp_desc *previous_buf = NULL; + struct rte_avp_desc *first_buf = NULL; + struct rte_avp_desc *pkt_buf; + struct rte_avp_desc *buf; + size_t total_length; + struct rte_mbuf *m; + size_t copy_length; + size_t src_offset; + char *pkt_data; + unsigned int i; + + __rte_mbuf_sanity_check(mbuf, 1); + + m = mbuf; + src_offset = 0; + total_length = rte_pktmbuf_pkt_len(m); + for (i = 0; (i < count) && (m != NULL); i++) { + /* fill each destination buffer */ + buf = buffers[i]; + + if (i < count - 1) { + /* prefetch next entry while processing this one */ + pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]); + rte_prefetch0(pkt_buf); + } + + /* Adjust pointers for guest addressing */ + pkt_buf = avp_dev_translate_buffer(avp, buf); + pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); + + /* setup the buffer chain */ + if (previous_buf != NULL) + previous_buf->next = buf; + else + first_buf = pkt_buf; + + previous_buf = pkt_buf; + + do { + /* + * copy as many source mbuf segments as will fit in the + * destination buffer. + */ + copy_length = RTE_MIN((avp->host_mbuf_size - + pkt_buf->data_len), + (rte_pktmbuf_data_len(m) - + src_offset)); + rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len), + RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *), + src_offset), + copy_length); + pkt_buf->data_len += copy_length; + src_offset += copy_length; + + if (likely(src_offset == rte_pktmbuf_data_len(m))) { + /* need a new source buffer */ + m = m->next; + src_offset = 0; + } + + if (unlikely(pkt_buf->data_len == + avp->host_mbuf_size)) { + /* need a new destination buffer */ + break; + } + + } while (m != NULL); + } + + first_buf->nb_segs = count; + first_buf->pkt_len = total_length; + + if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { + first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT; + first_buf->vlan_tci = mbuf->vlan_tci; + } + + avp_dev_buffer_sanity_check(avp, buffers[0]); + + return total_length; +} + + +static uint16_t +avp_xmit_scattered_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST * + RTE_AVP_MAX_MBUF_SEGMENTS)] = {}; + struct avp_queue *txq = (struct avp_queue *)tx_queue; + struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST]; + struct avp_dev *avp = txq->avp; + struct rte_avp_fifo *alloc_q; + struct rte_avp_fifo *tx_q; + unsigned int count, avail, n; + unsigned int orig_nb_pkts; + struct rte_mbuf *m; + unsigned int required; + unsigned int segments; + unsigned int tx_bytes; + unsigned int i; + + orig_nb_pkts = nb_pkts; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop? */ + txq->errors += nb_pkts; + return 0; + } + + tx_q = avp->tx_q[txq->queue_id]; + alloc_q = avp->alloc_q[txq->queue_id]; + + /* limit the number of transmitted packets to the max burst size */ + if (unlikely(nb_pkts > AVP_MAX_TX_BURST)) + nb_pkts = AVP_MAX_TX_BURST; + + /* determine how many buffers are available to copy into */ + avail = avp_fifo_count(alloc_q); + if (unlikely(avail > (AVP_MAX_TX_BURST * + RTE_AVP_MAX_MBUF_SEGMENTS))) + avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS; + + /* determine how many slots are available in the transmit queue */ + count = avp_fifo_free_count(tx_q); + + /* determine how many packets can be sent */ + nb_pkts = RTE_MIN(count, nb_pkts); + + /* determine how many packets will fit in the available buffers */ + count = 0; + segments = 0; + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + if (likely(i < (unsigned int)nb_pkts - 1)) { + /* prefetch next entry while processing this one */ + rte_prefetch0(tx_pkts[i + 1]); + } + required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) / + avp->host_mbuf_size; + + if (unlikely((required == 0) || + (required > RTE_AVP_MAX_MBUF_SEGMENTS))) + break; + else if (unlikely(required + segments > avail)) + break; + segments += required; + count++; + } + nb_pkts = count; + + if (unlikely(nb_pkts == 0)) { + /* no available buffers, or no space on the tx queue */ + txq->errors += orig_nb_pkts; + return 0; + } + + PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n", + nb_pkts, tx_q); + + /* retrieve sufficient send buffers */ + n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments); + if (unlikely(n != segments)) { + PMD_TX_LOG(DEBUG, "Failed to allocate buffers " + "n=%u, segments=%u, orig=%u\n", + n, segments, orig_nb_pkts); + txq->errors += orig_nb_pkts; + return 0; + } + + tx_bytes = 0; + count = 0; + for (i = 0; i < nb_pkts; i++) { + /* process each packet to be transmitted */ + m = tx_pkts[i]; + + /* determine how many buffers are required for this packet */ + required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) / + avp->host_mbuf_size; + + tx_bytes += avp_dev_copy_to_buffers(avp, m, + &avp_bufs[count], required); + tx_bufs[i] = avp_bufs[count]; + count += required; + + /* free the original mbuf */ + rte_pktmbuf_free(m); + } + + txq->packets += nb_pkts; + txq->bytes += tx_bytes; + +#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS + for (i = 0; i < nb_pkts; i++) + avp_dev_buffer_sanity_check(avp, tx_bufs[i]); +#endif + + /* send the packets */ + n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts); + if (unlikely(n != orig_nb_pkts)) + txq->errors += (orig_nb_pkts - n); + + return n; +} + + +static uint16_t +avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct avp_queue *txq = (struct avp_queue *)tx_queue; + struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST]; + struct avp_dev *avp = txq->avp; + struct rte_avp_desc *pkt_buf; + struct rte_avp_fifo *alloc_q; + struct rte_avp_fifo *tx_q; + unsigned int count, avail, n; + struct rte_mbuf *m; + unsigned int pkt_len; + unsigned int tx_bytes; + char *pkt_data; + unsigned int i; + + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop?! */ + txq->errors++; + return 0; + } + + tx_q = avp->tx_q[txq->queue_id]; + alloc_q = avp->alloc_q[txq->queue_id]; + + /* limit the number of transmitted packets to the max burst size */ + if (unlikely(nb_pkts > AVP_MAX_TX_BURST)) + nb_pkts = AVP_MAX_TX_BURST; + + /* determine how many buffers are available to copy into */ + avail = avp_fifo_count(alloc_q); + + /* determine how many slots are available in the transmit queue */ + count = avp_fifo_free_count(tx_q); + + /* determine how many packets can be sent */ + count = RTE_MIN(count, avail); + count = RTE_MIN(count, nb_pkts); + + if (unlikely(count == 0)) { + /* no available buffers, or no space on the tx queue */ + txq->errors += nb_pkts; + return 0; + } + + PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n", + count, tx_q); + + /* retrieve sufficient send buffers */ + n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count); + if (unlikely(n != count)) { + txq->errors++; + return 0; + } + + tx_bytes = 0; + for (i = 0; i < count; i++) { + /* prefetch next entry while processing the current one */ + if (i < count - 1) { + pkt_buf = avp_dev_translate_buffer(avp, + avp_bufs[i + 1]); + rte_prefetch0(pkt_buf); + } + + /* process each packet to be transmitted */ + m = tx_pkts[i]; + + /* Adjust pointers for guest addressing */ + pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]); + pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); + pkt_len = rte_pktmbuf_pkt_len(m); + + if (unlikely((pkt_len > avp->guest_mbuf_size) || + (pkt_len > avp->host_mbuf_size))) { + /* + * application should be using the scattered transmit + * function; send it truncated to avoid the performance + * hit of having to manage returning the already + * allocated buffer to the free list. This should not + * happen since the application should have set the + * max_rx_pkt_len based on its MTU and it should be + * policing its own packet sizes. + */ + txq->errors++; + pkt_len = RTE_MIN(avp->guest_mbuf_size, + avp->host_mbuf_size); + } + + /* copy data out of our mbuf and into the AVP buffer */ + rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len); + pkt_buf->pkt_len = pkt_len; + pkt_buf->data_len = pkt_len; + pkt_buf->nb_segs = 1; + pkt_buf->next = NULL; + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT; + pkt_buf->vlan_tci = m->vlan_tci; + } + + tx_bytes += pkt_len; + + /* free the original mbuf */ + rte_pktmbuf_free(m); + } + + txq->packets += count; + txq->bytes += tx_bytes; + + /* send the packets */ + n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count); + + return n; +} + +static void +avp_dev_rx_queue_release(void *rx_queue) +{ + struct avp_queue *rxq = (struct avp_queue *)rx_queue; + struct avp_dev *avp = rxq->avp; + struct rte_eth_dev_data *data = avp->dev_data; + unsigned int i; + + for (i = 0; i < avp->num_rx_queues; i++) { + if (data->rx_queues[i] == rxq) { + rte_free(data->rx_queues[i]); + data->rx_queues[i] = NULL; + } + } +} + +static void +avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_dev_data *data = avp->dev_data; + unsigned int i; + + for (i = 0; i < avp->num_rx_queues; i++) { + if (data->rx_queues[i]) { + rte_free(data->rx_queues[i]); + data->rx_queues[i] = NULL; + } + } +} + +static void +avp_dev_tx_queue_release(void *tx_queue) +{ + struct avp_queue *txq = (struct avp_queue *)tx_queue; + struct avp_dev *avp = txq->avp; + struct rte_eth_dev_data *data = avp->dev_data; + unsigned int i; + + for (i = 0; i < avp->num_tx_queues; i++) { + if (data->tx_queues[i] == txq) { + rte_free(data->tx_queues[i]); + data->tx_queues[i] = NULL; + } + } +} + +static void +avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_dev_data *data = avp->dev_data; + unsigned int i; + + for (i = 0; i < avp->num_tx_queues; i++) { + if (data->tx_queues[i]) { + rte_free(data->tx_queues[i]); + data->tx_queues[i] = NULL; + } + } +} + +static int +avp_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_info *host_info; + struct rte_avp_device_config config; + int mask = 0; + void *addr; + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } + + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; + host_info = (struct rte_avp_device_info *)addr; + + /* Setup required number of queues */ + _avp_set_queue_counts(eth_dev); + + mask = (ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + ret = avp_vlan_offload_set(eth_dev, mask); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n", + ret); + goto unlock; + } + + /* update device config */ + memset(&config, 0, sizeof(config)); + config.device_id = host_info->device_id; + config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; + config.driver_version = AVP_DPDK_DRIVER_VERSION; + config.features = avp->features; + config.num_tx_queues = avp->num_tx_queues; + config.num_rx_queues = avp->num_rx_queues; + + ret = avp_dev_ctrl_set_config(eth_dev, &config); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", + ret); + goto unlock; + } + + avp->flags |= AVP_F_CONFIGURED; + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static int +avp_dev_start(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } + + /* update link state */ + ret = avp_dev_ctrl_set_link_state(eth_dev, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", + ret); + goto unlock; + } + + /* remember current link state */ + avp->flags |= AVP_F_LINKUP; + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static void +avp_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + + /* remember current link state */ + avp->flags &= ~AVP_F_LINKUP; + + /* update link state */ + ret = avp_dev_ctrl_set_link_state(eth_dev, 0); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", + ret); + } + +unlock: + rte_spinlock_unlock(&avp->lock); +} + +static void +avp_dev_close(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + + /* remember current link state */ + avp->flags &= ~AVP_F_LINKUP; + avp->flags &= ~AVP_F_CONFIGURED; + + ret = avp_dev_disable_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable interrupts\n"); + /* continue */ + } + + /* update device state */ + ret = avp_dev_ctrl_shutdown(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n", + ret); + /* continue */ + } + + /* release dynamic storage for rx/tx queues */ + avp_dev_rx_queue_release_all(eth_dev); + avp_dev_tx_queue_release_all(eth_dev); + +unlock: + rte_spinlock_unlock(&avp->lock); +} + +static int +avp_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_link *link = ð_dev->data->dev_link; + + link->link_speed = ETH_SPEED_NUM_10G; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_status = !!(avp->flags & AVP_F_LINKUP); + + return -1; +} + +static int +avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + rte_spinlock_lock(&avp->lock); + if ((avp->flags & AVP_F_PROMISC) == 0) { + avp->flags |= AVP_F_PROMISC; + PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n", + eth_dev->data->port_id); + } + rte_spinlock_unlock(&avp->lock); + + return 0; +} + +static int +avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + rte_spinlock_lock(&avp->lock); + if ((avp->flags & AVP_F_PROMISC) != 0) { + avp->flags &= ~AVP_F_PROMISC; + PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n", + eth_dev->data->port_id); + } + rte_spinlock_unlock(&avp->lock); + + return 0; +} + +static int +avp_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + dev_info->max_rx_queues = avp->max_rx_queues; + dev_info->max_tx_queues = avp->max_tx_queues; + dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; + dev_info->max_rx_pktlen = avp->max_rx_pkt_len; + dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS; + if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + } + + return 0; +} + +static int +avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint64_t offloads = dev_conf->rxmode.offloads; + + if (mask & ETH_VLAN_STRIP_MASK) { + if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; + else + avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; + } else { + PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n"); + } + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); + } + + return 0; +} + +static int +avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + unsigned int i; + + for (i = 0; i < avp->num_rx_queues; i++) { + struct avp_queue *rxq = avp->dev_data->rx_queues[i]; + + if (rxq) { + stats->ipackets += rxq->packets; + stats->ibytes += rxq->bytes; + stats->ierrors += rxq->errors; + + stats->q_ipackets[i] += rxq->packets; + stats->q_ibytes[i] += rxq->bytes; + stats->q_errors[i] += rxq->errors; + } + } + + for (i = 0; i < avp->num_tx_queues; i++) { + struct avp_queue *txq = avp->dev_data->tx_queues[i]; + + if (txq) { + stats->opackets += txq->packets; + stats->obytes += txq->bytes; + stats->oerrors += txq->errors; + + stats->q_opackets[i] += txq->packets; + stats->q_obytes[i] += txq->bytes; + } + } + + return 0; +} + +static int +avp_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + unsigned int i; + + for (i = 0; i < avp->num_rx_queues; i++) { + struct avp_queue *rxq = avp->dev_data->rx_queues[i]; + + if (rxq) { + rxq->bytes = 0; + rxq->packets = 0; + rxq->errors = 0; + } + } + + for (i = 0; i < avp->num_tx_queues; i++) { + struct avp_queue *txq = avp->dev_data->tx_queues[i]; + + if (txq) { + txq->bytes = 0; + txq->packets = 0; + txq->errors = 0; + } + } + + return 0; +} + +RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map); + +RTE_INIT(avp_init_log) +{ + avp_logtype_driver = rte_log_register("pmd.net.avp.driver"); + if (avp_logtype_driver >= 0) + rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/avp/avp_logs.h b/src/spdk/dpdk/drivers/net/avp/avp_logs.h new file mode 100644 index 000000000..6e297c7a4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/avp_logs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. + */ + +#ifndef _AVP_LOGS_H_ +#define _AVP_LOGS_H_ + +#include + +#ifdef RTE_LIBRTE_AVP_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_AVP_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int avp_logtype_driver; + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, avp_logtype_driver, \ + "%s(): " fmt, __func__, ## args) + +#endif /* _AVP_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/avp/meson.build b/src/spdk/dpdk/drivers/net/avp/meson.build new file mode 100644 index 000000000..a5f63cdef --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +if not is_linux + build = false + reason = 'only supported on linux' +endif +sources = files('avp_ethdev.c') +install_headers('rte_avp_common.h', 'rte_avp_fifo.h') diff --git a/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h b/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h new file mode 100644 index 000000000..4e82ec0b8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/rte_avp_common.h @@ -0,0 +1,382 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2014-2017 Wind River Systems, Inc. + */ + +#ifndef _RTE_AVP_COMMON_H_ +#define _RTE_AVP_COMMON_H_ + +#ifdef __KERNEL__ +#include +#define RTE_STD_C11 +#else +#include +#include +#include +#include +#include +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * AVP name is part of network device name. + */ +#define RTE_AVP_NAMESIZE 32 + +/** + * AVP alias is a user-defined value used for lookups from secondary + * processes. Typically, this is a UUID. + */ +#define RTE_AVP_ALIASSIZE 128 + +/* + * Request id. + */ +enum rte_avp_req_id { + RTE_AVP_REQ_UNKNOWN = 0, + RTE_AVP_REQ_CHANGE_MTU, + RTE_AVP_REQ_CFG_NETWORK_IF, + RTE_AVP_REQ_CFG_DEVICE, + RTE_AVP_REQ_SHUTDOWN_DEVICE, + RTE_AVP_REQ_MAX, +}; + +/**@{ AVP device driver types */ +#define RTE_AVP_DRIVER_TYPE_UNKNOWN 0 +#define RTE_AVP_DRIVER_TYPE_DPDK 1 +#define RTE_AVP_DRIVER_TYPE_KERNEL 2 +#define RTE_AVP_DRIVER_TYPE_QEMU 3 +/**@} */ + +/**@{ AVP device operational modes */ +#define RTE_AVP_MODE_HOST 0 /**< AVP interface created in host */ +#define RTE_AVP_MODE_GUEST 1 /**< AVP interface created for export to guest */ +#define RTE_AVP_MODE_TRACE 2 /**< AVP interface created for packet tracing */ +/**@} */ + +/* + * Structure for AVP queue configuration query request/result + */ +struct rte_avp_device_config { + uint64_t device_id; /**< Unique system identifier */ + uint32_t driver_type; /**< Device Driver type */ + uint32_t driver_version; /**< Device Driver version */ + uint32_t features; /**< Negotiated features */ + uint16_t num_tx_queues; /**< Number of active transmit queues */ + uint16_t num_rx_queues; /**< Number of active receive queues */ + uint8_t if_up; /**< 1: interface up, 0: interface down */ +} __rte_packed; + +/* + * Structure for AVP request. + */ +struct rte_avp_request { + uint32_t req_id; /**< Request id */ + RTE_STD_C11 + union { + uint32_t new_mtu; /**< New MTU */ + uint8_t if_up; /**< 1: interface up, 0: interface down */ + struct rte_avp_device_config config; /**< Queue configuration */ + }; + int32_t result; /**< Result for processing request */ +} __rte_packed; + +/* + * FIFO struct mapped in a shared memory. It describes a circular buffer FIFO + * Write and read should wrap around. FIFO is empty when write == read + * Writing should never overwrite the read position + */ +struct rte_avp_fifo { + volatile unsigned int write; /**< Next position to be written*/ + volatile unsigned int read; /**< Next position to be read */ + unsigned int len; /**< Circular buffer length */ + unsigned int elem_size; /**< Pointer size - for 32/64 bit OS */ + void *volatile buffer[]; /**< The buffer contains mbuf pointers */ +}; + + +/* + * AVP packet buffer header used to define the exchange of packet data. + */ +struct rte_avp_desc { + uint64_t pad0; + void *pkt_mbuf; /**< Reference to packet mbuf */ + uint8_t pad1[14]; + uint16_t ol_flags; /**< Offload features. */ + void *next; /**< Reference to next buffer in chain */ + void *data; /**< Start address of data in segment buffer. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + uint8_t nb_segs; /**< Number of segments */ + uint8_t pad2; + uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ + uint32_t pad3; + uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */ + uint32_t pad4; +} __rte_packed __rte_cache_aligned; + + +/**{ AVP device features */ +#define RTE_AVP_FEATURE_VLAN_OFFLOAD (1 << 0) /**< Emulated HW VLAN offload */ +/**@} */ + + +/**@{ Offload feature flags */ +#define RTE_AVP_TX_VLAN_PKT 0x0001 /**< TX packet is a 802.1q VLAN packet. */ +#define RTE_AVP_RX_VLAN_PKT 0x0800 /**< RX packet is a 802.1q VLAN packet. */ +/**@} */ + + +/**@{ AVP PCI identifiers */ +#define RTE_AVP_PCI_VENDOR_ID 0x1af4 +#define RTE_AVP_PCI_DEVICE_ID 0x1110 +/**@} */ + +/**@{ AVP PCI subsystem identifiers */ +#define RTE_AVP_PCI_SUB_VENDOR_ID RTE_AVP_PCI_VENDOR_ID +#define RTE_AVP_PCI_SUB_DEVICE_ID 0x1104 +/**@} */ + +/**@{ AVP PCI BAR definitions */ +#define RTE_AVP_PCI_MMIO_BAR 0 +#define RTE_AVP_PCI_MSIX_BAR 1 +#define RTE_AVP_PCI_MEMORY_BAR 2 +#define RTE_AVP_PCI_MEMMAP_BAR 4 +#define RTE_AVP_PCI_DEVICE_BAR 5 +#define RTE_AVP_PCI_MAX_BAR 6 +/**@} */ + +/**@{ AVP PCI BAR name definitions */ +#define RTE_AVP_MMIO_BAR_NAME "avp-mmio" +#define RTE_AVP_MSIX_BAR_NAME "avp-msix" +#define RTE_AVP_MEMORY_BAR_NAME "avp-memory" +#define RTE_AVP_MEMMAP_BAR_NAME "avp-memmap" +#define RTE_AVP_DEVICE_BAR_NAME "avp-device" +/**@} */ + +/**@{ AVP PCI MSI-X vectors */ +#define RTE_AVP_MIGRATION_MSIX_VECTOR 0 /**< Migration interrupts */ +#define RTE_AVP_MAX_MSIX_VECTORS 1 +/**@} */ + +/**@} AVP Migration status/ack register values */ +#define RTE_AVP_MIGRATION_NONE 0 /**< Migration never executed */ +#define RTE_AVP_MIGRATION_DETACHED 1 /**< Device attached during migration */ +#define RTE_AVP_MIGRATION_ATTACHED 2 /**< Device reattached during migration */ +#define RTE_AVP_MIGRATION_ERROR 3 /**< Device failed to attach/detach */ +/**@} */ + +/**@} AVP MMIO Register Offsets */ +#define RTE_AVP_REGISTER_BASE 0 +#define RTE_AVP_INTERRUPT_MASK_OFFSET (RTE_AVP_REGISTER_BASE + 0) +#define RTE_AVP_INTERRUPT_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 4) +#define RTE_AVP_MIGRATION_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 8) +#define RTE_AVP_MIGRATION_ACK_OFFSET (RTE_AVP_REGISTER_BASE + 12) +/**@} */ + +/**@} AVP Interrupt Status Mask */ +#define RTE_AVP_MIGRATION_INTERRUPT_MASK (1 << 1) +#define RTE_AVP_APP_INTERRUPTS_MASK 0xFFFFFFFF +#define RTE_AVP_NO_INTERRUPTS_MASK 0 +/**@} */ + +/* + * Maximum number of memory regions to export + */ +#define RTE_AVP_MAX_MAPS 2048 + +/* + * Description of a single memory region + */ +struct rte_avp_memmap { + void *addr; + rte_iova_t phys_addr; + uint64_t length; +}; + +/* + * AVP memory mapping validation marker + */ +#define RTE_AVP_MEMMAP_MAGIC 0x20131969 + +/**@{ AVP memory map versions */ +#define RTE_AVP_MEMMAP_VERSION_1 1 +#define RTE_AVP_MEMMAP_VERSION RTE_AVP_MEMMAP_VERSION_1 +/**@} */ + +/* + * Defines a list of memory regions exported from the host to the guest + */ +struct rte_avp_memmap_info { + uint32_t magic; /**< Memory validation marker */ + uint32_t version; /**< Data format version */ + uint32_t nb_maps; + struct rte_avp_memmap maps[RTE_AVP_MAX_MAPS]; +}; + +/* + * AVP device memory validation marker + */ +#define RTE_AVP_DEVICE_MAGIC 0x20131975 + +/**@{ AVP device map versions + * WARNING: do not change the format or names of these variables. They are + * automatically parsed from the build system to generate the SDK package + * name. + **/ +#define RTE_AVP_RELEASE_VERSION_1 1 +#define RTE_AVP_RELEASE_VERSION RTE_AVP_RELEASE_VERSION_1 +#define RTE_AVP_MAJOR_VERSION_0 0 +#define RTE_AVP_MAJOR_VERSION_1 1 +#define RTE_AVP_MAJOR_VERSION_2 2 +#define RTE_AVP_MAJOR_VERSION RTE_AVP_MAJOR_VERSION_2 +#define RTE_AVP_MINOR_VERSION_0 0 +#define RTE_AVP_MINOR_VERSION_1 1 +#define RTE_AVP_MINOR_VERSION_13 13 +#define RTE_AVP_MINOR_VERSION RTE_AVP_MINOR_VERSION_13 +/**@} */ + + +/** + * Generates a 32-bit version number from the specified version number + * components + */ +#define RTE_AVP_MAKE_VERSION(_release, _major, _minor) \ +((((_release) & 0xffff) << 16) | (((_major) & 0xff) << 8) | ((_minor) & 0xff)) + + +/** + * Represents the current version of the AVP host driver + * WARNING: in the current development branch the host and guest driver + * version should always be the same. When patching guest features back to + * GA releases the host version number should not be updated unless there was + * an actual change made to the host driver. + */ +#define RTE_AVP_CURRENT_HOST_VERSION \ +RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \ + RTE_AVP_MAJOR_VERSION_0, \ + RTE_AVP_MINOR_VERSION_1) + + +/** + * Represents the current version of the AVP guest drivers + */ +#define RTE_AVP_CURRENT_GUEST_VERSION \ +RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \ + RTE_AVP_MAJOR_VERSION_2, \ + RTE_AVP_MINOR_VERSION_13) + +/** + * Access AVP device version values + */ +#define RTE_AVP_GET_RELEASE_VERSION(_version) (((_version) >> 16) & 0xffff) +#define RTE_AVP_GET_MAJOR_VERSION(_version) (((_version) >> 8) & 0xff) +#define RTE_AVP_GET_MINOR_VERSION(_version) ((_version) & 0xff) +/**@}*/ + + +/** + * Remove the minor version number so that only the release and major versions + * are used for comparisons. + */ +#define RTE_AVP_STRIP_MINOR_VERSION(_version) ((_version) >> 8) + + +/** + * Defines the number of mbuf pools supported per device (1 per socket) + */ +#define RTE_AVP_MAX_MEMPOOLS 8 + +/* + * Defines address translation parameters for each support mbuf pool + */ +struct rte_avp_mempool_info { + void *addr; + rte_iova_t phys_addr; + uint64_t length; +}; + +/* + * Struct used to create a AVP device. Passed to the kernel in IOCTL call or + * via inter-VM shared memory when used in a guest. + */ +struct rte_avp_device_info { + uint32_t magic; /**< Memory validation marker */ + uint32_t version; /**< Data format version */ + + char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */ + + rte_iova_t tx_phys; + rte_iova_t rx_phys; + rte_iova_t alloc_phys; + rte_iova_t free_phys; + + uint32_t features; /**< Supported feature bitmap */ + uint8_t min_rx_queues; /**< Minimum supported receive/free queues */ + uint8_t num_rx_queues; /**< Recommended number of receive/free queues */ + uint8_t max_rx_queues; /**< Maximum supported receive/free queues */ + uint8_t min_tx_queues; /**< Minimum supported transmit/alloc queues */ + uint8_t num_tx_queues; + /**< Recommended number of transmit/alloc queues */ + uint8_t max_tx_queues; /**< Maximum supported transmit/alloc queues */ + + uint32_t tx_size; /**< Size of each transmit queue */ + uint32_t rx_size; /**< Size of each receive queue */ + uint32_t alloc_size; /**< Size of each alloc queue */ + uint32_t free_size; /**< Size of each free queue */ + + /* Used by Ethtool */ + rte_iova_t req_phys; + rte_iova_t resp_phys; + rte_iova_t sync_phys; + void *sync_va; + + /* mbuf mempool (used when a single memory area is supported) */ + void *mbuf_va; + rte_iova_t mbuf_phys; + + /* mbuf mempools */ + struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS]; + +#ifdef __KERNEL__ + /* Ethernet info */ + char ethaddr[ETH_ALEN]; +#else + char ethaddr[RTE_ETHER_ADDR_LEN]; +#endif + + uint8_t mode; /**< device mode, i.e guest, host, trace */ + + /* mbuf size */ + unsigned int mbuf_size; + + /* + * unique id to differentiate between two instantiations of the same + * AVP device (i.e., the guest needs to know if the device has been + * deleted and recreated). + */ + uint64_t device_id; + + uint32_t max_rx_pkt_len; /**< Maximum receive unit size */ +}; + +#define RTE_AVP_MAX_QUEUES 8 /**< Maximum number of queues per device */ + +/** Maximum number of chained mbufs in a packet */ +#define RTE_AVP_MAX_MBUF_SEGMENTS 5 + +#define RTE_AVP_DEVICE "avp" + +#define RTE_AVP_IOCTL_TEST _IOWR(0, 1, int) +#define RTE_AVP_IOCTL_CREATE _IOWR(0, 2, struct rte_avp_device_info) +#define RTE_AVP_IOCTL_RELEASE _IOWR(0, 3, struct rte_avp_device_info) +#define RTE_AVP_IOCTL_QUERY _IOWR(0, 4, struct rte_avp_device_config) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_AVP_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h b/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h new file mode 100644 index 000000000..c1658da68 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/rte_avp_fifo.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2013-2017 Wind River Systems, Inc. + */ + +#ifndef _RTE_AVP_FIFO_H_ +#define _RTE_AVP_FIFO_H_ + +#include "rte_avp_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __KERNEL__ +/* Write memory barrier for kernel compiles */ +#define AVP_WMB() smp_wmb() +/* Read memory barrier for kernel compiles */ +#define AVP_RMB() smp_rmb() +#else +/* Write memory barrier for userspace compiles */ +#define AVP_WMB() rte_wmb() +/* Read memory barrier for userspace compiles */ +#define AVP_RMB() rte_rmb() +#endif + +#ifndef __KERNEL__ +#include + +/** + * Initializes the avp fifo structure + */ +static inline void +avp_fifo_init(struct rte_avp_fifo *fifo, unsigned int size) +{ + /* Ensure size is power of 2 */ + if (size & (size - 1)) + rte_panic("AVP fifo size must be power of 2\n"); + + fifo->write = 0; + fifo->read = 0; + fifo->len = size; + fifo->elem_size = sizeof(void *); +} +#endif + +/** + * Adds num elements into the fifo. Return the number actually written + */ +static inline unsigned +avp_fifo_put(struct rte_avp_fifo *fifo, void **data, unsigned int num) +{ + unsigned int i = 0; + unsigned int fifo_write = fifo->write; + unsigned int fifo_read = fifo->read; + unsigned int new_write = fifo_write; + + for (i = 0; i < num; i++) { + new_write = (new_write + 1) & (fifo->len - 1); + + if (new_write == fifo_read) + break; + fifo->buffer[fifo_write] = data[i]; + fifo_write = new_write; + } + AVP_WMB(); + fifo->write = fifo_write; + return i; +} + +/** + * Get up to num elements from the fifo. Return the number actually read + */ +static inline unsigned int +avp_fifo_get(struct rte_avp_fifo *fifo, void **data, unsigned int num) +{ + unsigned int i = 0; + unsigned int new_read = fifo->read; + unsigned int fifo_write = fifo->write; + + if (new_read == fifo_write) + return 0; /* empty */ + + for (i = 0; i < num; i++) { + if (new_read == fifo_write) + break; + + data[i] = fifo->buffer[new_read]; + new_read = (new_read + 1) & (fifo->len - 1); + } + AVP_RMB(); + fifo->read = new_read; + return i; +} + +/** + * Get the num of elements in the fifo + */ +static inline unsigned int +avp_fifo_count(struct rte_avp_fifo *fifo) +{ + return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1); +} + +/** + * Get the num of available elements in the fifo + */ +static inline unsigned int +avp_fifo_free_count(struct rte_avp_fifo *fifo) +{ + return (fifo->read - fifo->write - 1) & (fifo->len - 1); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_AVP_FIFO_H_ */ diff --git a/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map b/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/avp/rte_pmd_avp_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/axgbe/Makefile b/src/spdk/dpdk/drivers/net/axgbe/Makefile new file mode 100644 index 000000000..e421d0da1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/Makefile @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_axgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_axgbe_version.map + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_ethdev -lrte_net + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c +ifeq ($(CONFIG_RTE_ARCH_X86),y) +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h new file mode 100644 index 000000000..f48117180 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_common.h @@ -0,0 +1,1736 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_COMMON_H__ +#define __AXGBE_COMMON_H__ + +#include "axgbe_logs.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT(nr) (1 << (nr)) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + +#define AXGBE_HZ 250 + +/* DMA register offsets */ +#define DMA_MR 0x3000 +#define DMA_SBMR 0x3004 +#define DMA_ISR 0x3008 +#define DMA_AXIARCR 0x3010 +#define DMA_AXIAWCR 0x3018 +#define DMA_AXIAWRCR 0x301c +#define DMA_DSR0 0x3020 +#define DMA_DSR1 0x3024 +#define EDMA_TX_CONTROL 0x3040 +#define EDMA_RX_CONTROL 0x3044 + +/* DMA register entry bit positions and sizes */ +#define DMA_AXIARCR_DRC_INDEX 0 +#define DMA_AXIARCR_DRC_WIDTH 4 +#define DMA_AXIARCR_DRD_INDEX 4 +#define DMA_AXIARCR_DRD_WIDTH 2 +#define DMA_AXIARCR_TEC_INDEX 8 +#define DMA_AXIARCR_TEC_WIDTH 4 +#define DMA_AXIARCR_TED_INDEX 12 +#define DMA_AXIARCR_TED_WIDTH 2 +#define DMA_AXIARCR_THC_INDEX 16 +#define DMA_AXIARCR_THC_WIDTH 4 +#define DMA_AXIARCR_THD_INDEX 20 +#define DMA_AXIARCR_THD_WIDTH 2 +#define DMA_AXIAWCR_DWC_INDEX 0 +#define DMA_AXIAWCR_DWC_WIDTH 4 +#define DMA_AXIAWCR_DWD_INDEX 4 +#define DMA_AXIAWCR_DWD_WIDTH 2 +#define DMA_AXIAWCR_RPC_INDEX 8 +#define DMA_AXIAWCR_RPC_WIDTH 4 +#define DMA_AXIAWCR_RPD_INDEX 12 +#define DMA_AXIAWCR_RPD_WIDTH 2 +#define DMA_AXIAWCR_RHC_INDEX 16 +#define DMA_AXIAWCR_RHC_WIDTH 4 +#define DMA_AXIAWCR_RHD_INDEX 20 +#define DMA_AXIAWCR_RHD_WIDTH 2 +#define DMA_AXIAWCR_RDC_INDEX 24 +#define DMA_AXIAWCR_RDC_WIDTH 4 +#define DMA_AXIAWCR_RDD_INDEX 28 +#define DMA_AXIAWCR_RDD_WIDTH 2 +#define DMA_AXIAWRCR_TDWC_INDEX 0 +#define DMA_AXIAWRCR_TDWC_WIDTH 4 +#define DMA_AXIAWRCR_TDWD_INDEX 4 +#define DMA_AXIAWRCR_TDWD_WIDTH 4 +#define DMA_AXIAWRCR_RDRC_INDEX 8 +#define DMA_AXIAWRCR_RDRC_WIDTH 4 +#define DMA_ISR_MACIS_INDEX 17 +#define DMA_ISR_MACIS_WIDTH 1 +#define DMA_ISR_MTLIS_INDEX 16 +#define DMA_ISR_MTLIS_WIDTH 1 +#define DMA_MR_INTM_INDEX 12 +#define DMA_MR_INTM_WIDTH 2 +#define DMA_MR_SWR_INDEX 0 +#define DMA_MR_SWR_WIDTH 1 +#define DMA_SBMR_WR_OSR_INDEX 24 +#define DMA_SBMR_WR_OSR_WIDTH 6 +#define DMA_SBMR_RD_OSR_INDEX 16 +#define DMA_SBMR_RD_OSR_WIDTH 6 +#define DMA_SBMR_AAL_INDEX 12 +#define DMA_SBMR_AAL_WIDTH 1 +#define DMA_SBMR_EAME_INDEX 11 +#define DMA_SBMR_EAME_WIDTH 1 +#define DMA_SBMR_BLEN_256_INDEX 7 +#define DMA_SBMR_BLEN_256_WIDTH 1 +#define DMA_SBMR_BLEN_32_INDEX 4 +#define DMA_SBMR_BLEN_32_WIDTH 1 +#define DMA_SBMR_UNDEF_INDEX 0 +#define DMA_SBMR_UNDEF_WIDTH 1 + +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x3100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x3100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x24 +#define DMA_CH_RDTR_LO 0x2c +#define DMA_CH_TDRLR 0x30 +#define DMA_CH_RDRLR 0x34 +#define DMA_CH_IER 0x38 +#define DMA_CH_RIWT 0x3c +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_INDEX 16 +#define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 14 +#define DMA_CH_IER_AIE_WIDTH 1 +#define DMA_CH_IER_FBEE_INDEX 12 +#define DMA_CH_IER_FBEE_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 15 +#define DMA_CH_IER_NIE_WIDTH 1 +#define DMA_CH_IER_RBUE_INDEX 7 +#define DMA_CH_IER_RBUE_WIDTH 1 +#define DMA_CH_IER_RIE_INDEX 6 +#define DMA_CH_IER_RIE_WIDTH 1 +#define DMA_CH_IER_RSE_INDEX 8 +#define DMA_CH_IER_RSE_WIDTH 1 +#define DMA_CH_IER_TBUE_INDEX 2 +#define DMA_CH_IER_TBUE_WIDTH 1 +#define DMA_CH_IER_TIE_INDEX 0 +#define DMA_CH_IER_TIE_WIDTH 1 +#define DMA_CH_IER_TXSE_INDEX 1 +#define DMA_CH_IER_TXSE_WIDTH 1 +#define DMA_CH_RCR_PBL_INDEX 16 +#define DMA_CH_RCR_PBL_WIDTH 6 +#define DMA_CH_RCR_RBSZ_INDEX 1 +#define DMA_CH_RCR_RBSZ_WIDTH 14 +#define DMA_CH_RCR_SR_INDEX 0 +#define DMA_CH_RCR_SR_WIDTH 1 +#define DMA_CH_RIWT_RWT_INDEX 0 +#define DMA_CH_RIWT_RWT_WIDTH 8 +#define DMA_CH_SR_FBE_INDEX 12 +#define DMA_CH_SR_FBE_WIDTH 1 +#define DMA_CH_SR_RBU_INDEX 7 +#define DMA_CH_SR_RBU_WIDTH 1 +#define DMA_CH_SR_RI_INDEX 6 +#define DMA_CH_SR_RI_WIDTH 1 +#define DMA_CH_SR_RPS_INDEX 8 +#define DMA_CH_SR_RPS_WIDTH 1 +#define DMA_CH_SR_TBU_INDEX 2 +#define DMA_CH_SR_TBU_WIDTH 1 +#define DMA_CH_SR_TI_INDEX 0 +#define DMA_CH_SR_TI_WIDTH 1 +#define DMA_CH_SR_TPS_INDEX 1 +#define DMA_CH_SR_TPS_WIDTH 1 +#define DMA_CH_TCR_OSP_INDEX 4 +#define DMA_CH_TCR_OSP_WIDTH 1 +#define DMA_CH_TCR_PBL_INDEX 16 +#define DMA_CH_TCR_PBL_WIDTH 6 +#define DMA_CH_TCR_ST_INDEX 0 +#define DMA_CH_TCR_ST_WIDTH 1 +#define DMA_CH_TCR_TSE_INDEX 12 +#define DMA_CH_TCR_TSE_WIDTH 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 /* 8 x 8 */ +#define DMA_PBL_128 128 /* 8 x 16 */ +#define DMA_PBL_256 256 /* 8 x 32 */ +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* MAC register offsets */ +#define MAC_TCR 0x0000 +#define MAC_RCR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_WTR 0x000c +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 +#define MAC_RETMR 0x006c +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_RTSR 0x00b8 +#define MAC_PMTCSR 0x00c0 +#define MAC_RWKPFR 0x00c4 +#define MAC_LPICSR 0x00d0 +#define MAC_LPITCR 0x00d4 +#define MAC_VR 0x0110 +#define MAC_DR 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_MDIOSCAR 0x0200 +#define MAC_MDIOSCCDR 0x0204 +#define MAC_MDIOISR 0x0214 +#define MAC_MDIOIER 0x0218 +#define MAC_MDIOCL22R 0x0220 +#define MAC_GPIOCR 0x0278 +#define MAC_GPIOSR 0x027c +#define MAC_MACA0HR 0x0300 +#define MAC_MACA0LR 0x0304 +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c +#define MAC_TSCR 0x0d00 +#define MAC_SSIR 0x0d04 +#define MAC_STSR 0x0d08 +#define MAC_STNR 0x0d0c +#define MAC_STSUR 0x0d10 +#define MAC_STNUR 0x0d14 +#define MAC_TSAR 0x0d18 +#define MAC_TSSR 0x0d20 +#define MAC_TXSNR 0x0d30 +#define MAC_TXSSR 0x0d34 + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 + +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +#define MAC_MACAHR(i) (MAC_MACA0HR + ((i) * 8)) +#define MAC_MACALR(i) (MAC_MACA0LR + ((i) * 8)) + +#define MAC_HTR(i) (MAC_HTR0 + ((i) * MAC_HTR_INC)) + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_INDEX 18 +#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 +#define MAC_HWF0R_ARPOFFSEL_INDEX 9 +#define MAC_HWF0R_ARPOFFSEL_WIDTH 1 +#define MAC_HWF0R_EEESEL_INDEX 13 +#define MAC_HWF0R_EEESEL_WIDTH 1 +#define MAC_HWF0R_GMIISEL_INDEX 1 +#define MAC_HWF0R_GMIISEL_WIDTH 1 +#define MAC_HWF0R_MGKSEL_INDEX 7 +#define MAC_HWF0R_MGKSEL_WIDTH 1 +#define MAC_HWF0R_MMCSEL_INDEX 8 +#define MAC_HWF0R_MMCSEL_WIDTH 1 +#define MAC_HWF0R_RWKSEL_INDEX 6 +#define MAC_HWF0R_RWKSEL_WIDTH 1 +#define MAC_HWF0R_RXCOESEL_INDEX 16 +#define MAC_HWF0R_RXCOESEL_WIDTH 1 +#define MAC_HWF0R_SAVLANINS_INDEX 27 +#define MAC_HWF0R_SAVLANINS_WIDTH 1 +#define MAC_HWF0R_SMASEL_INDEX 5 +#define MAC_HWF0R_SMASEL_WIDTH 1 +#define MAC_HWF0R_TSSEL_INDEX 12 +#define MAC_HWF0R_TSSEL_WIDTH 1 +#define MAC_HWF0R_TSSTSSEL_INDEX 25 +#define MAC_HWF0R_TSSTSSEL_WIDTH 2 +#define MAC_HWF0R_TXCOESEL_INDEX 14 +#define MAC_HWF0R_TXCOESEL_WIDTH 1 +#define MAC_HWF0R_VLHASH_INDEX 4 +#define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 +#define MAC_HWF1R_ADVTHWORD_INDEX 13 +#define MAC_HWF1R_ADVTHWORD_WIDTH 1 +#define MAC_HWF1R_DBGMEMA_INDEX 19 +#define MAC_HWF1R_DBGMEMA_WIDTH 1 +#define MAC_HWF1R_DCBEN_INDEX 16 +#define MAC_HWF1R_DCBEN_WIDTH 1 +#define MAC_HWF1R_HASHTBLSZ_INDEX 24 +#define MAC_HWF1R_HASHTBLSZ_WIDTH 3 +#define MAC_HWF1R_L3L4FNUM_INDEX 27 +#define MAC_HWF1R_L3L4FNUM_WIDTH 4 +#define MAC_HWF1R_NUMTC_INDEX 21 +#define MAC_HWF1R_NUMTC_WIDTH 3 +#define MAC_HWF1R_RSSEN_INDEX 20 +#define MAC_HWF1R_RSSEN_WIDTH 1 +#define MAC_HWF1R_RXFIFOSIZE_INDEX 0 +#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 +#define MAC_HWF1R_SPHEN_INDEX 17 +#define MAC_HWF1R_SPHEN_WIDTH 1 +#define MAC_HWF1R_TSOEN_INDEX 18 +#define MAC_HWF1R_TSOEN_WIDTH 1 +#define MAC_HWF1R_TXFIFOSIZE_INDEX 6 +#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 +#define MAC_HWF2R_AUXSNAPNUM_INDEX 28 +#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 +#define MAC_HWF2R_PPSOUTNUM_INDEX 24 +#define MAC_HWF2R_PPSOUTNUM_WIDTH 3 +#define MAC_HWF2R_RXCHCNT_INDEX 12 +#define MAC_HWF2R_RXCHCNT_WIDTH 4 +#define MAC_HWF2R_RXQCNT_INDEX 0 +#define MAC_HWF2R_RXQCNT_WIDTH 4 +#define MAC_HWF2R_TXCHCNT_INDEX 18 +#define MAC_HWF2R_TXCHCNT_WIDTH 4 +#define MAC_HWF2R_TXQCNT_INDEX 6 +#define MAC_HWF2R_TXQCNT_WIDTH 4 +#define MAC_IER_TSIE_INDEX 12 +#define MAC_IER_TSIE_WIDTH 1 +#define MAC_ISR_MMCRXIS_INDEX 9 +#define MAC_ISR_MMCRXIS_WIDTH 1 +#define MAC_ISR_MMCTXIS_INDEX 10 +#define MAC_ISR_MMCTXIS_WIDTH 1 +#define MAC_ISR_PMTIS_INDEX 4 +#define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_SMI_INDEX 1 +#define MAC_ISR_SMI_WIDTH 1 +#define MAC_ISR_LSI_INDEX 0 +#define MAC_ISR_LSI_WIDTH 1 +#define MAC_ISR_LS_INDEX 24 +#define MAC_ISR_LS_WIDTH 2 +#define MAC_ISR_TSIS_INDEX 12 +#define MAC_ISR_TSIS_WIDTH 1 +#define MAC_MACA1HR_AE_INDEX 31 +#define MAC_MACA1HR_AE_WIDTH 1 +#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12 +#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1 +#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12 +#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1 +#define MAC_MDIOSCAR_DA_INDEX 21 +#define MAC_MDIOSCAR_DA_WIDTH 5 +#define MAC_MDIOSCAR_PA_INDEX 16 +#define MAC_MDIOSCAR_PA_WIDTH 5 +#define MAC_MDIOSCAR_RA_INDEX 0 +#define MAC_MDIOSCAR_RA_WIDTH 16 +#define MAC_MDIOSCAR_REG_INDEX 0 +#define MAC_MDIOSCAR_REG_WIDTH 21 +#define MAC_MDIOSCCDR_BUSY_INDEX 22 +#define MAC_MDIOSCCDR_BUSY_WIDTH 1 +#define MAC_MDIOSCCDR_CMD_INDEX 16 +#define MAC_MDIOSCCDR_CMD_WIDTH 2 +#define MAC_MDIOSCCDR_CR_INDEX 19 +#define MAC_MDIOSCCDR_CR_WIDTH 3 +#define MAC_MDIOSCCDR_DATA_INDEX 0 +#define MAC_MDIOSCCDR_DATA_WIDTH 16 +#define MAC_MDIOSCCDR_SADDR_INDEX 18 +#define MAC_MDIOSCCDR_SADDR_WIDTH 1 +#define MAC_PFR_HMC_INDEX 2 +#define MAC_PFR_HMC_WIDTH 1 +#define MAC_PFR_HPF_INDEX 10 +#define MAC_PFR_HPF_WIDTH 1 +#define MAC_PFR_HUC_INDEX 1 +#define MAC_PFR_HUC_WIDTH 1 +#define MAC_PFR_PM_INDEX 4 +#define MAC_PFR_PM_WIDTH 1 +#define MAC_PFR_PR_INDEX 0 +#define MAC_PFR_PR_WIDTH 1 +#define MAC_PFR_VTFE_INDEX 16 +#define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PMTCSR_MGKPKTEN_INDEX 1 +#define MAC_PMTCSR_MGKPKTEN_WIDTH 1 +#define MAC_PMTCSR_PWRDWN_INDEX 0 +#define MAC_PMTCSR_PWRDWN_WIDTH 1 +#define MAC_PMTCSR_RWKFILTRST_INDEX 31 +#define MAC_PMTCSR_RWKFILTRST_WIDTH 1 +#define MAC_PMTCSR_RWKPKTEN_INDEX 2 +#define MAC_PMTCSR_RWKPKTEN_WIDTH 1 +#define MAC_Q0TFCR_PT_INDEX 16 +#define MAC_Q0TFCR_PT_WIDTH 16 +#define MAC_Q0TFCR_TFE_INDEX 1 +#define MAC_Q0TFCR_TFE_WIDTH 1 +#define MAC_RCR_ACS_INDEX 1 +#define MAC_RCR_ACS_WIDTH 1 +#define MAC_RCR_CST_INDEX 2 +#define MAC_RCR_CST_WIDTH 1 +#define MAC_RCR_DCRCC_INDEX 3 +#define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 +#define MAC_RCR_IPC_INDEX 9 +#define MAC_RCR_IPC_WIDTH 1 +#define MAC_RCR_JE_INDEX 8 +#define MAC_RCR_JE_WIDTH 1 +#define MAC_RCR_LM_INDEX 10 +#define MAC_RCR_LM_WIDTH 1 +#define MAC_RCR_RE_INDEX 0 +#define MAC_RCR_RE_WIDTH 1 +#define MAC_RFCR_PFCE_INDEX 8 +#define MAC_RFCR_PFCE_WIDTH 1 +#define MAC_RFCR_RFE_INDEX 0 +#define MAC_RFCR_RFE_WIDTH 1 +#define MAC_RFCR_UP_INDEX 1 +#define MAC_RFCR_UP_WIDTH 1 +#define MAC_RQC0R_RXQ0EN_INDEX 0 +#define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 +#define MAC_SSIR_SNSINC_INDEX 8 +#define MAC_SSIR_SNSINC_WIDTH 8 +#define MAC_SSIR_SSINC_INDEX 16 +#define MAC_SSIR_SSINC_WIDTH 8 +#define MAC_TCR_SS_INDEX 29 +#define MAC_TCR_SS_WIDTH 2 +#define MAC_TCR_TE_INDEX 0 +#define MAC_TCR_TE_WIDTH 1 +#define MAC_TSCR_AV8021ASMEN_INDEX 28 +#define MAC_TSCR_AV8021ASMEN_WIDTH 1 +#define MAC_TSCR_SNAPTYPSEL_INDEX 16 +#define MAC_TSCR_SNAPTYPSEL_WIDTH 2 +#define MAC_TSCR_TSADDREG_INDEX 5 +#define MAC_TSCR_TSADDREG_WIDTH 1 +#define MAC_TSCR_TSCFUPDT_INDEX 1 +#define MAC_TSCR_TSCFUPDT_WIDTH 1 +#define MAC_TSCR_TSCTRLSSR_INDEX 9 +#define MAC_TSCR_TSCTRLSSR_WIDTH 1 +#define MAC_TSCR_TSENA_INDEX 0 +#define MAC_TSCR_TSENA_WIDTH 1 +#define MAC_TSCR_TSENALL_INDEX 8 +#define MAC_TSCR_TSENALL_WIDTH 1 +#define MAC_TSCR_TSEVNTENA_INDEX 14 +#define MAC_TSCR_TSEVNTENA_WIDTH 1 +#define MAC_TSCR_TSINIT_INDEX 2 +#define MAC_TSCR_TSINIT_WIDTH 1 +#define MAC_TSCR_TSIPENA_INDEX 11 +#define MAC_TSCR_TSIPENA_WIDTH 1 +#define MAC_TSCR_TSIPV4ENA_INDEX 13 +#define MAC_TSCR_TSIPV4ENA_WIDTH 1 +#define MAC_TSCR_TSIPV6ENA_INDEX 12 +#define MAC_TSCR_TSIPV6ENA_WIDTH 1 +#define MAC_TSCR_TSMSTRENA_INDEX 15 +#define MAC_TSCR_TSMSTRENA_WIDTH 1 +#define MAC_TSCR_TSVER2ENA_INDEX 10 +#define MAC_TSCR_TSVER2ENA_WIDTH 1 +#define MAC_TSCR_TXTSSTSM_INDEX 24 +#define MAC_TSCR_TXTSSTSM_WIDTH 1 +#define MAC_TSSR_TXTSC_INDEX 15 +#define MAC_TSSR_TXTSC_WIDTH 1 +#define MAC_TXSNR_TXTSSTSMIS_INDEX 31 +#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 +#define MAC_VLANHTR_VLHT_INDEX 0 +#define MAC_VLANHTR_VLHT_WIDTH 16 +#define MAC_VLANIR_VLTI_INDEX 20 +#define MAC_VLANIR_VLTI_WIDTH 1 +#define MAC_VLANIR_CSVL_INDEX 19 +#define MAC_VLANIR_CSVL_WIDTH 1 +#define MAC_VLANTR_DOVLTC_INDEX 20 +#define MAC_VLANTR_DOVLTC_WIDTH 1 +#define MAC_VLANTR_ERSVLM_INDEX 19 +#define MAC_VLANTR_ERSVLM_WIDTH 1 +#define MAC_VLANTR_ESVL_INDEX 18 +#define MAC_VLANTR_ESVL_WIDTH 1 +#define MAC_VLANTR_ETV_INDEX 16 +#define MAC_VLANTR_ETV_WIDTH 1 +#define MAC_VLANTR_EVLS_INDEX 21 +#define MAC_VLANTR_EVLS_WIDTH 2 +#define MAC_VLANTR_EVLRXS_INDEX 24 +#define MAC_VLANTR_EVLRXS_WIDTH 1 +#define MAC_VLANTR_VL_INDEX 0 +#define MAC_VLANTR_VL_WIDTH 16 +#define MAC_VLANTR_VTHM_INDEX 25 +#define MAC_VLANTR_VTHM_WIDTH 1 +#define MAC_VLANTR_VTIM_INDEX 17 +#define MAC_VLANTR_VTIM_WIDTH 1 +#define MAC_VR_DEVID_INDEX 8 +#define MAC_VR_DEVID_WIDTH 8 +#define MAC_VR_SNPSVER_INDEX 0 +#define MAC_VR_SNPSVER_WIDTH 8 +#define MAC_VR_USERVER_INDEX 16 +#define MAC_VR_USERVER_WIDTH 8 + +/* MMC register offsets */ +#define MMC_CR 0x0800 +#define MMC_RISR 0x0804 +#define MMC_TISR 0x0808 +#define MMC_RIER 0x080c +#define MMC_TIER 0x0810 +#define MMC_TXOCTETCOUNT_GB_LO 0x0814 +#define MMC_TXOCTETCOUNT_GB_HI 0x0818 +#define MMC_TXFRAMECOUNT_GB_LO 0x081c +#define MMC_TXFRAMECOUNT_GB_HI 0x0820 +#define MMC_TXBROADCASTFRAMES_G_LO 0x0824 +#define MMC_TXBROADCASTFRAMES_G_HI 0x0828 +#define MMC_TXMULTICASTFRAMES_G_LO 0x082c +#define MMC_TXMULTICASTFRAMES_G_HI 0x0830 +#define MMC_TX64OCTETS_GB_LO 0x0834 +#define MMC_TX64OCTETS_GB_HI 0x0838 +#define MMC_TX65TO127OCTETS_GB_LO 0x083c +#define MMC_TX65TO127OCTETS_GB_HI 0x0840 +#define MMC_TX128TO255OCTETS_GB_LO 0x0844 +#define MMC_TX128TO255OCTETS_GB_HI 0x0848 +#define MMC_TX256TO511OCTETS_GB_LO 0x084c +#define MMC_TX256TO511OCTETS_GB_HI 0x0850 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0854 +#define MMC_TX512TO1023OCTETS_GB_HI 0x0858 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c +#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 +#define MMC_TXUNICASTFRAMES_GB_LO 0x0864 +#define MMC_TXUNICASTFRAMES_GB_HI 0x0868 +#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c +#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 +#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 +#define MMC_TXUNDERFLOWERROR_LO 0x087c +#define MMC_TXUNDERFLOWERROR_HI 0x0880 +#define MMC_TXOCTETCOUNT_G_LO 0x0884 +#define MMC_TXOCTETCOUNT_G_HI 0x0888 +#define MMC_TXFRAMECOUNT_G_LO 0x088c +#define MMC_TXFRAMECOUNT_G_HI 0x0890 +#define MMC_TXPAUSEFRAMES_LO 0x0894 +#define MMC_TXPAUSEFRAMES_HI 0x0898 +#define MMC_TXVLANFRAMES_G_LO 0x089c +#define MMC_TXVLANFRAMES_G_HI 0x08a0 +#define MMC_RXFRAMECOUNT_GB_LO 0x0900 +#define MMC_RXFRAMECOUNT_GB_HI 0x0904 +#define MMC_RXOCTETCOUNT_GB_LO 0x0908 +#define MMC_RXOCTETCOUNT_GB_HI 0x090c +#define MMC_RXOCTETCOUNT_G_LO 0x0910 +#define MMC_RXOCTETCOUNT_G_HI 0x0914 +#define MMC_RXBROADCASTFRAMES_G_LO 0x0918 +#define MMC_RXBROADCASTFRAMES_G_HI 0x091c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0920 +#define MMC_RXMULTICASTFRAMES_G_HI 0x0924 +#define MMC_RXCRCERROR_LO 0x0928 +#define MMC_RXCRCERROR_HI 0x092c +#define MMC_RXRUNTERROR 0x0930 +#define MMC_RXJABBERERROR 0x0934 +#define MMC_RXUNDERSIZE_G 0x0938 +#define MMC_RXOVERSIZE_G 0x093c +#define MMC_RX64OCTETS_GB_LO 0x0940 +#define MMC_RX64OCTETS_GB_HI 0x0944 +#define MMC_RX65TO127OCTETS_GB_LO 0x0948 +#define MMC_RX65TO127OCTETS_GB_HI 0x094c +#define MMC_RX128TO255OCTETS_GB_LO 0x0950 +#define MMC_RX128TO255OCTETS_GB_HI 0x0954 +#define MMC_RX256TO511OCTETS_GB_LO 0x0958 +#define MMC_RX256TO511OCTETS_GB_HI 0x095c +#define MMC_RX512TO1023OCTETS_GB_LO 0x0960 +#define MMC_RX512TO1023OCTETS_GB_HI 0x0964 +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 +#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c +#define MMC_RXUNICASTFRAMES_G_LO 0x0970 +#define MMC_RXUNICASTFRAMES_G_HI 0x0974 +#define MMC_RXLENGTHERROR_LO 0x0978 +#define MMC_RXLENGTHERROR_HI 0x097c +#define MMC_RXOUTOFRANGETYPE_LO 0x0980 +#define MMC_RXOUTOFRANGETYPE_HI 0x0984 +#define MMC_RXPAUSEFRAMES_LO 0x0988 +#define MMC_RXPAUSEFRAMES_HI 0x098c +#define MMC_RXFIFOOVERFLOW_LO 0x0990 +#define MMC_RXFIFOOVERFLOW_HI 0x0994 +#define MMC_RXVLANFRAMES_GB_LO 0x0998 +#define MMC_RXVLANFRAMES_GB_HI 0x099c +#define MMC_RXWATCHDOGERROR 0x09a0 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_INDEX 0 +#define MMC_CR_CR_WIDTH 1 +#define MMC_CR_CSR_INDEX 1 +#define MMC_CR_CSR_WIDTH 1 +#define MMC_CR_ROR_INDEX 2 +#define MMC_CR_ROR_WIDTH 1 +#define MMC_CR_MCF_INDEX 3 +#define MMC_CR_MCF_WIDTH 1 +#define MMC_CR_MCT_INDEX 4 +#define MMC_CR_MCT_WIDTH 2 +#define MMC_RIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 +#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 +#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 +#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 +#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXCRCERROR_INDEX 5 +#define MMC_RISR_RXCRCERROR_WIDTH 1 +#define MMC_RISR_RXRUNTERROR_INDEX 6 +#define MMC_RISR_RXRUNTERROR_WIDTH 1 +#define MMC_RISR_RXJABBERERROR_INDEX 7 +#define MMC_RISR_RXJABBERERROR_WIDTH 1 +#define MMC_RISR_RXUNDERSIZE_G_INDEX 8 +#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 +#define MMC_RISR_RXOVERSIZE_G_INDEX 9 +#define MMC_RISR_RXOVERSIZE_G_WIDTH 1 +#define MMC_RISR_RX64OCTETS_GB_INDEX 10 +#define MMC_RISR_RX64OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 +#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 +#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 +#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 +#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 +#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXLENGTHERROR_INDEX 17 +#define MMC_RISR_RXLENGTHERROR_WIDTH 1 +#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 +#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 +#define MMC_RISR_RXPAUSEFRAMES_INDEX 19 +#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 +#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 +#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 +#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 +#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 +#define MMC_RISR_RXWATCHDOGERROR_INDEX 22 +#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 +#define MMC_TIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 +#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 +#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 +#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TX64OCTETS_GB_INDEX 4 +#define MMC_TISR_TX64OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 +#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 +#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 +#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 +#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 +#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 +#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 +#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 +#define MMC_TISR_TXPAUSEFRAMES_INDEX 16 +#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 +#define MMC_TISR_TXVLANFRAMES_G_INDEX 17 +#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 + +/* MTL register offsets */ +#define MTL_OMR 0x1000 +#define MTL_FDCR 0x1008 +#define MTL_FDSR 0x100c +#define MTL_FDDR 0x1010 +#define MTL_ISR 0x1020 +#define MTL_RQDCM0R 0x1030 +#define MTL_TCPM0R 0x1040 +#define MTL_TCPM1R 0x1044 + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 +#define MTL_TCPM_INC 4 +#define MTL_TCPM_TC_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_INDEX 5 +#define MTL_OMR_ETSALG_WIDTH 2 +#define MTL_OMR_RAA_INDEX 2 +#define MTL_OMR_RAA_WIDTH 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_Q_BASE 0x1100 +#define MTL_Q_INC 0x80 + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 +#define MTL_Q_TQDR 0x08 +#define MTL_Q_RQOMR 0x40 +#define MTL_Q_RQMPOCR 0x44 +#define MTL_Q_RQDR 0x48 +#define MTL_Q_RQFCR 0x50 +#define MTL_Q_IER 0x70 +#define MTL_Q_ISR 0x74 + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 +#define MTL_Q_RQOMR_EHFC_INDEX 7 +#define MTL_Q_RQOMR_EHFC_WIDTH 1 +#define MTL_Q_RQOMR_RQS_INDEX 16 +#define MTL_Q_RQOMR_RQS_WIDTH 9 +#define MTL_Q_RQOMR_RSF_INDEX 5 +#define MTL_Q_RQOMR_RSF_WIDTH 1 +#define MTL_Q_RQOMR_RTC_INDEX 0 +#define MTL_Q_RQOMR_RTC_WIDTH 2 +#define MTL_Q_TQDR_TRCSTS_INDEX 1 +#define MTL_Q_TQDR_TRCSTS_WIDTH 2 +#define MTL_Q_TQDR_TXQSTS_INDEX 4 +#define MTL_Q_TQDR_TXQSTS_WIDTH 1 +#define MTL_Q_TQOMR_FTQ_INDEX 0 +#define MTL_Q_TQOMR_FTQ_WIDTH 1 +#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 +#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 +#define MTL_Q_TQOMR_TQS_INDEX 16 +#define MTL_Q_TQOMR_TQS_WIDTH 10 +#define MTL_Q_TQOMR_TSF_INDEX 1 +#define MTL_Q_TQOMR_TSF_WIDTH 1 +#define MTL_Q_TQOMR_TTC_INDEX 4 +#define MTL_Q_TQOMR_TTC_WIDTH 3 +#define MTL_Q_TQOMR_TXQEN_INDEX 2 +#define MTL_Q_TQOMR_TXQEN_WIDTH 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x01 +#define MTL_TX_THRESHOLD_64 0x00 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_ENABLED 0x02 + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_INDEX 0 +#define MTL_TC_ETSCR_TSA_WIDTH 2 +#define MTL_TC_QWR_QW_INDEX 0 +#define MTL_TC_QWR_QW_WIDTH 21 +#define MTL_TCPM0R_PSTC0_INDEX 0 +#define MTL_TCPM0R_PSTC0_WIDTH 8 +#define MTL_TCPM0R_PSTC1_INDEX 8 +#define MTL_TCPM0R_PSTC1_WIDTH 8 +#define MTL_TCPM0R_PSTC2_INDEX 16 +#define MTL_TCPM0R_PSTC2_WIDTH 8 +#define MTL_TCPM0R_PSTC3_INDEX 24 +#define MTL_TCPM0R_PSTC3_WIDTH 8 +#define MTL_TCPM1R_PSTC4_INDEX 0 +#define MTL_TCPM1R_PSTC4_WIDTH 8 +#define MTL_TCPM1R_PSTC5_INDEX 8 +#define MTL_TCPM1R_PSTC5_WIDTH 8 +#define MTL_TCPM1R_PSTC6_INDEX 16 +#define MTL_TCPM1R_PSTC6_WIDTH 8 +#define MTL_TCPM1R_PSTC7_INDEX 24 +#define MTL_TCPM1R_PSTC7_WIDTH 8 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* PCS register offsets */ +#define PCS_V1_WINDOW_SELECT 0x03fc +#define PCS_V2_WINDOW_DEF 0x9060 +#define PCS_V2_WINDOW_SELECT 0x9064 +#define PCS_V2_RV_WINDOW_DEF 0x1060 +#define PCS_V2_RV_WINDOW_SELECT 0x1064 + +/* PCS register entry bit positions and sizes */ +#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 +#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14 +#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2 +#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4 + +/* SerDes integration register offsets */ +#define SIR0_KR_RT_1 0x002c +#define SIR0_STATUS 0x0040 +#define SIR1_SPEED 0x0000 + +/* SerDes integration register entry bit positions and sizes */ +#define SIR0_KR_RT_1_RESET_INDEX 11 +#define SIR0_KR_RT_1_RESET_WIDTH 1 +#define SIR0_STATUS_RX_READY_INDEX 0 +#define SIR0_STATUS_RX_READY_WIDTH 1 +#define SIR0_STATUS_TX_READY_INDEX 8 +#define SIR0_STATUS_TX_READY_WIDTH 1 +#define SIR1_SPEED_CDR_RATE_INDEX 12 +#define SIR1_SPEED_CDR_RATE_WIDTH 4 +#define SIR1_SPEED_DATARATE_INDEX 4 +#define SIR1_SPEED_DATARATE_WIDTH 2 +#define SIR1_SPEED_PLLSEL_INDEX 3 +#define SIR1_SPEED_PLLSEL_WIDTH 1 +#define SIR1_SPEED_RATECHANGE_INDEX 6 +#define SIR1_SPEED_RATECHANGE_WIDTH 1 +#define SIR1_SPEED_TXAMP_INDEX 8 +#define SIR1_SPEED_TXAMP_WIDTH 4 +#define SIR1_SPEED_WORDMODE_INDEX 0 +#define SIR1_SPEED_WORDMODE_WIDTH 3 + +/* SerDes RxTx register offsets */ +#define RXTX_REG6 0x0018 +#define RXTX_REG20 0x0050 +#define RXTX_REG22 0x0058 +#define RXTX_REG114 0x01c8 +#define RXTX_REG129 0x0204 + +/* SerDes RxTx register entry bit positions and sizes */ +#define RXTX_REG6_RESETB_RXD_INDEX 8 +#define RXTX_REG6_RESETB_RXD_WIDTH 1 +#define RXTX_REG20_BLWC_ENA_INDEX 2 +#define RXTX_REG20_BLWC_ENA_WIDTH 1 +#define RXTX_REG114_PQ_REG_INDEX 9 +#define RXTX_REG114_PQ_REG_WIDTH 7 +#define RXTX_REG129_RXDFE_CONFIG_INDEX 14 +#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 + +/* MAC Control register offsets */ +#define XP_PROP_0 0x0000 +#define XP_PROP_1 0x0004 +#define XP_PROP_2 0x0008 +#define XP_PROP_3 0x000c +#define XP_PROP_4 0x0010 +#define XP_PROP_5 0x0014 +#define XP_MAC_ADDR_LO 0x0020 +#define XP_MAC_ADDR_HI 0x0024 +#define XP_ECC_ISR 0x0030 +#define XP_ECC_IER 0x0034 +#define XP_ECC_CNT0 0x003c +#define XP_ECC_CNT1 0x0040 +#define XP_DRIVER_INT_REQ 0x0060 +#define XP_DRIVER_INT_RO 0x0064 +#define XP_DRIVER_SCRATCH_0 0x0068 +#define XP_DRIVER_SCRATCH_1 0x006c +#define XP_INT_EN 0x0078 +#define XP_I2C_MUTEX 0x0080 +#define XP_MDIO_MUTEX 0x0084 + +/* MAC Control register entry bit positions and sizes */ +#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0 +#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1 +#define XP_DRIVER_INT_RO_STATUS_INDEX 0 +#define XP_DRIVER_INT_RO_STATUS_WIDTH 1 +#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0 +#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8 +#define XP_ECC_CNT0_RX_DED_INDEX 24 +#define XP_ECC_CNT0_RX_DED_WIDTH 8 +#define XP_ECC_CNT0_RX_SEC_INDEX 16 +#define XP_ECC_CNT0_RX_SEC_WIDTH 8 +#define XP_ECC_CNT0_TX_DED_INDEX 8 +#define XP_ECC_CNT0_TX_DED_WIDTH 8 +#define XP_ECC_CNT0_TX_SEC_INDEX 0 +#define XP_ECC_CNT0_TX_SEC_WIDTH 8 +#define XP_ECC_CNT1_DESC_DED_INDEX 8 +#define XP_ECC_CNT1_DESC_DED_WIDTH 8 +#define XP_ECC_CNT1_DESC_SEC_INDEX 0 +#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 +#define XP_ECC_IER_DESC_DED_INDEX 0 +#define XP_ECC_IER_DESC_DED_WIDTH 1 +#define XP_ECC_IER_DESC_SEC_INDEX 1 +#define XP_ECC_IER_DESC_SEC_WIDTH 1 +#define XP_ECC_IER_RX_DED_INDEX 2 +#define XP_ECC_IER_RX_DED_WIDTH 1 +#define XP_ECC_IER_RX_SEC_INDEX 3 +#define XP_ECC_IER_RX_SEC_WIDTH 1 +#define XP_ECC_IER_TX_DED_INDEX 4 +#define XP_ECC_IER_TX_DED_WIDTH 1 +#define XP_ECC_IER_TX_SEC_INDEX 5 +#define XP_ECC_IER_TX_SEC_WIDTH 1 +#define XP_ECC_ISR_DESC_DED_INDEX 0 +#define XP_ECC_ISR_DESC_DED_WIDTH 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 1 +#define XP_ECC_ISR_DESC_SEC_WIDTH 1 +#define XP_ECC_ISR_RX_DED_INDEX 2 +#define XP_ECC_ISR_RX_DED_WIDTH 1 +#define XP_ECC_ISR_RX_SEC_INDEX 3 +#define XP_ECC_ISR_RX_SEC_WIDTH 1 +#define XP_ECC_ISR_TX_DED_INDEX 4 +#define XP_ECC_ISR_TX_DED_WIDTH 1 +#define XP_ECC_ISR_TX_SEC_INDEX 5 +#define XP_ECC_ISR_TX_SEC_WIDTH 1 +#define XP_I2C_MUTEX_BUSY_INDEX 31 +#define XP_I2C_MUTEX_BUSY_WIDTH 1 +#define XP_I2C_MUTEX_ID_INDEX 29 +#define XP_I2C_MUTEX_ID_WIDTH 2 +#define XP_I2C_MUTEX_ACTIVE_INDEX 0 +#define XP_I2C_MUTEX_ACTIVE_WIDTH 1 +#define XP_MAC_ADDR_HI_VALID_INDEX 31 +#define XP_MAC_ADDR_HI_VALID_WIDTH 1 +#define XP_PROP_0_CONN_TYPE_INDEX 28 +#define XP_PROP_0_CONN_TYPE_WIDTH 3 +#define XP_PROP_0_MDIO_ADDR_INDEX 16 +#define XP_PROP_0_MDIO_ADDR_WIDTH 5 +#define XP_PROP_0_PORT_ID_INDEX 0 +#define XP_PROP_0_PORT_ID_WIDTH 8 +#define XP_PROP_0_PORT_MODE_INDEX 8 +#define XP_PROP_0_PORT_MODE_WIDTH 4 +#define XP_PROP_0_PORT_SPEEDS_INDEX 23 +#define XP_PROP_0_PORT_SPEEDS_WIDTH 4 +#define XP_PROP_1_MAX_RX_DMA_INDEX 24 +#define XP_PROP_1_MAX_RX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 +#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5 +#define XP_PROP_1_MAX_TX_DMA_INDEX 16 +#define XP_PROP_1_MAX_TX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0 +#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5 +#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16 +#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0 +#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_3_GPIO_MASK_INDEX 28 +#define XP_PROP_3_GPIO_MASK_WIDTH 4 +#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20 +#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4 +#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16 +#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4 +#define XP_PROP_3_GPIO_RX_LOS_INDEX 24 +#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4 +#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12 +#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4 +#define XP_PROP_3_GPIO_ADDR_INDEX 8 +#define XP_PROP_3_GPIO_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_INDEX 0 +#define XP_PROP_3_MDIO_RESET_WIDTH 2 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2 +#define XP_PROP_4_MUX_ADDR_HI_INDEX 8 +#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5 +#define XP_PROP_4_MUX_ADDR_LO_INDEX 0 +#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3 +#define XP_PROP_4_MUX_CHAN_INDEX 4 +#define XP_PROP_4_MUX_CHAN_WIDTH 3 +#define XP_PROP_4_REDRV_ADDR_INDEX 16 +#define XP_PROP_4_REDRV_ADDR_WIDTH 7 +#define XP_PROP_4_REDRV_IF_INDEX 23 +#define XP_PROP_4_REDRV_IF_WIDTH 1 +#define XP_PROP_4_REDRV_LANE_INDEX 24 +#define XP_PROP_4_REDRV_LANE_WIDTH 3 +#define XP_PROP_4_REDRV_MODEL_INDEX 28 +#define XP_PROP_4_REDRV_MODEL_WIDTH 3 +#define XP_PROP_4_REDRV_PRESENT_INDEX 31 +#define XP_PROP_4_REDRV_PRESENT_WIDTH 1 + +/* I2C Control register offsets */ +#define IC_CON 0x0000 +#define IC_TAR 0x0004 +#define IC_DATA_CMD 0x0010 +#define IC_INTR_STAT 0x002c +#define IC_INTR_MASK 0x0030 +#define IC_RAW_INTR_STAT 0x0034 +#define IC_CLR_INTR 0x0040 +#define IC_CLR_TX_ABRT 0x0054 +#define IC_CLR_STOP_DET 0x0060 +#define IC_ENABLE 0x006c +#define IC_TXFLR 0x0074 +#define IC_RXFLR 0x0078 +#define IC_TX_ABRT_SOURCE 0x0080 +#define IC_ENABLE_STATUS 0x009c +#define IC_COMP_PARAM_1 0x00f4 + +/* I2C Control register entry bit positions and sizes */ +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2 +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8 +#define IC_CON_MASTER_MODE_INDEX 0 +#define IC_CON_MASTER_MODE_WIDTH 1 +#define IC_CON_RESTART_EN_INDEX 5 +#define IC_CON_RESTART_EN_WIDTH 1 +#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9 +#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1 +#define IC_CON_SLAVE_DISABLE_INDEX 6 +#define IC_CON_SLAVE_DISABLE_WIDTH 1 +#define IC_CON_SPEED_INDEX 1 +#define IC_CON_SPEED_WIDTH 2 +#define IC_DATA_CMD_CMD_INDEX 8 +#define IC_DATA_CMD_CMD_WIDTH 1 +#define IC_DATA_CMD_STOP_INDEX 9 +#define IC_DATA_CMD_STOP_WIDTH 1 +#define IC_ENABLE_ABORT_INDEX 1 +#define IC_ENABLE_ABORT_WIDTH 1 +#define IC_ENABLE_EN_INDEX 0 +#define IC_ENABLE_EN_WIDTH 1 +#define IC_ENABLE_STATUS_EN_INDEX 0 +#define IC_ENABLE_STATUS_EN_WIDTH 1 +#define IC_INTR_MASK_TX_EMPTY_INDEX 4 +#define IC_INTR_MASK_TX_EMPTY_WIDTH 1 +#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2 +#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1 +#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9 +#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6 +#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4 +#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1 + +/* I2C Control register value */ +#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001 +#define IC_TX_ABRT_ARB_LOST 0x1000 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_INDEX 2 +#define RX_PACKET_ERRORS_CRC_WIDTH 1 +#define RX_PACKET_ERRORS_FRAME_INDEX 3 +#define RX_PACKET_ERRORS_FRAME_WIDTH 1 +#define RX_PACKET_ERRORS_LENGTH_INDEX 0 +#define RX_PACKET_ERRORS_LENGTH_WIDTH 1 +#define RX_PACKET_ERRORS_OVERRUN_INDEX 1 +#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 + +#define RX_NORMAL_DESC0_OVT_INDEX 0 +#define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC3_CDA_INDEX 27 +#define RX_NORMAL_DESC3_CDA_WIDTH 1 +#define RX_NORMAL_DESC3_CTXT_INDEX 30 +#define RX_NORMAL_DESC3_CTXT_WIDTH 1 +#define RX_NORMAL_DESC3_ES_INDEX 15 +#define RX_NORMAL_DESC3_ES_WIDTH 1 +#define RX_NORMAL_DESC3_ETLT_INDEX 16 +#define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 +#define RX_NORMAL_DESC3_INTE_INDEX 30 +#define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 +#define RX_NORMAL_DESC3_LD_INDEX 28 +#define RX_NORMAL_DESC3_LD_WIDTH 1 +#define RX_NORMAL_DESC3_OWN_INDEX 31 +#define RX_NORMAL_DESC3_OWN_WIDTH 1 +#define RX_NORMAL_DESC3_PL_INDEX 0 +#define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 +#define RX_NORMAL_DESC3_LD_INDEX 28 +#define RX_NORMAL_DESC3_LD_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_CONTEXT_DESC3_TSA_INDEX 4 +#define RX_CONTEXT_DESC3_TSA_WIDTH 1 +#define RX_CONTEXT_DESC3_TSD_INDEX 6 +#define RX_CONTEXT_DESC3_TSD_WIDTH 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 +#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 + +#define TX_CONTEXT_DESC2_MSS_INDEX 0 +#define TX_CONTEXT_DESC2_MSS_WIDTH 15 +#define TX_CONTEXT_DESC3_CTXT_INDEX 30 +#define TX_CONTEXT_DESC3_CTXT_WIDTH 1 +#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 +#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 +#define TX_CONTEXT_DESC3_VLTV_INDEX 16 +#define TX_CONTEXT_DESC3_VLTV_WIDTH 1 +#define TX_CONTEXT_DESC3_VT_INDEX 0 +#define TX_CONTEXT_DESC3_VT_WIDTH 16 + +#define TX_NORMAL_DESC2_HL_B1L_INDEX 0 +#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 +#define TX_NORMAL_DESC2_IC_INDEX 31 +#define TX_NORMAL_DESC2_IC_WIDTH 1 +#define TX_NORMAL_DESC2_TTSE_INDEX 30 +#define TX_NORMAL_DESC2_TTSE_WIDTH 1 +#define TX_NORMAL_DESC2_VTIR_INDEX 14 +#define TX_NORMAL_DESC2_VTIR_WIDTH 2 +#define TX_NORMAL_DESC3_CIC_INDEX 16 +#define TX_NORMAL_DESC3_CIC_WIDTH 2 +#define TX_NORMAL_DESC3_CPC_INDEX 26 +#define TX_NORMAL_DESC3_CPC_WIDTH 2 +#define TX_NORMAL_DESC3_CTXT_INDEX 30 +#define TX_NORMAL_DESC3_CTXT_WIDTH 1 +#define TX_NORMAL_DESC3_FD_INDEX 29 +#define TX_NORMAL_DESC3_FD_WIDTH 1 +#define TX_NORMAL_DESC3_FL_INDEX 0 +#define TX_NORMAL_DESC3_FL_WIDTH 15 +#define TX_NORMAL_DESC3_LD_INDEX 28 +#define TX_NORMAL_DESC3_LD_WIDTH 1 +#define TX_NORMAL_DESC3_OWN_INDEX 31 +#define TX_NORMAL_DESC3_OWN_WIDTH 1 +#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 +#define TX_NORMAL_DESC3_TCPPL_INDEX 0 +#define TX_NORMAL_DESC3_TCPPL_WIDTH 18 +#define TX_NORMAL_DESC3_TSE_INDEX 18 +#define TX_NORMAL_DESC3_TSE_WIDTH 1 + +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +/* MDIO undefined or vendor specific registers */ +#ifndef MDIO_PMA_10GBR_PMD_CTRL +#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 +#endif + +#ifndef MDIO_PMA_10GBR_FECCTRL +#define MDIO_PMA_10GBR_FECCTRL 0x00ab +#endif + +#ifndef MDIO_PCS_DIG_CTRL +#define MDIO_PCS_DIG_CTRL 0x8000 +#endif + +#ifndef MDIO_AN_XNP +#define MDIO_AN_XNP 0x0016 +#endif + +#ifndef MDIO_AN_LPX +#define MDIO_AN_LPX 0x0019 +#endif + +#ifndef MDIO_AN_COMP_STAT +#define MDIO_AN_COMP_STAT 0x0030 +#endif + +#ifndef MDIO_AN_INTMASK +#define MDIO_AN_INTMASK 0x8001 +#endif + +#ifndef MDIO_AN_INT +#define MDIO_AN_INT 0x8002 +#endif + +#ifndef MDIO_VEND2_AN_ADVERTISE +#define MDIO_VEND2_AN_ADVERTISE 0x0004 +#endif + +#ifndef MDIO_VEND2_AN_LP_ABILITY +#define MDIO_VEND2_AN_LP_ABILITY 0x0005 +#endif + +#ifndef MDIO_VEND2_AN_CTRL +#define MDIO_VEND2_AN_CTRL 0x8001 +#endif + +#ifndef MDIO_VEND2_AN_STAT +#define MDIO_VEND2_AN_STAT 0x8002 +#endif + +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + +#ifndef MDIO_CTRL1_SPEED1G +#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_ENABLE +#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_RESTART +#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS6 +#define MDIO_VEND2_CTRL1_SS6 BIT(6) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS13 +#define MDIO_VEND2_CTRL1_SS13 BIT(13) +#endif + +/* MDIO mask values */ +#define AXGBE_AN_CL73_INT_CMPLT BIT(0) +#define AXGBE_AN_CL73_INC_LINK BIT(1) +#define AXGBE_AN_CL73_PG_RCV BIT(2) +#define AXGBE_AN_CL73_INT_MASK 0x07 + +#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001 +#define AXGBE_XNP_ACK_PROCESSED BIT(12) +#define AXGBE_XNP_MP_FORMATTED BIT(13) +#define AXGBE_XNP_NP_EXCHANGE BIT(15) + +#define AXGBE_KR_TRAINING_START BIT(0) +#define AXGBE_KR_TRAINING_ENABLE BIT(1) + +#define AXGBE_PCS_CL37_BP BIT(12) + +#define AXGBE_AN_CL37_INT_CMPLT BIT(0) +#define AXGBE_AN_CL37_INT_MASK 0x01 + +#define AXGBE_AN_CL37_HD_MASK 0x40 +#define AXGBE_AN_CL37_FD_MASK 0x20 + +#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06 +#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00 +#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04 +#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08 +#define AXGBE_AN_CL37_MII_CTRL_8BIT 0x0100 + +#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01 + +/*generic*/ +#define __iomem + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +typedef unsigned long long dma_addr_t; + +static inline uint32_t low32_value(uint64_t addr) +{ + return (addr) & 0x0ffffffff; +} + +static inline uint32_t high32_value(uint64_t addr) +{ + return (addr >> 32) & 0x0ffffffff; +} + +/*END*/ + +/* Bit setting and getting macros + * The get macro will extract the current bit field value from within + * the variable + * + * The set macro will clear the current bit field value within the + * variable and then set the bit field of the variable to the + * specified value + */ +#define GET_BITS(_var, _index, _width) \ + (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS(_var, _index, _width, _val) \ +do { \ + (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ + (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ +} while (0) + +#define GET_BITS_LE(_var, _index, _width) \ + ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS_LE(_var, _index, _width, _val) \ +do { \ + (_var) &= rte_cpu_to_le_32(~(((0x1U << (_width)) - 1) << (_index)));\ + (_var) |= rte_cpu_to_le_32((((_val) & \ + ((0x1U << (_width)) - 1)) << (_index))); \ +} while (0) + +/* Bit setting and getting macros based on register fields + * The get macro uses the bit field definitions formed using the input + * names to extract the current bit field value from within the + * variable + * + * The set macro uses the bit field definitions formed using the input + * names to set the bit field of the variable to the specified value + */ +#define AXGMAC_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \ + GET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ + SET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +/* Macros for reading or writing registers + * The ioread macros will get bit fields or full values using the + * register definitions formed using the input names + * + * The iowrite macros will set bit fields or full values using the + * register definitions formed using the input names + */ +#define AXGMAC_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing MTL queue or traffic class registers + * Similar to the standard read and write macros except that the + * base register value is calculated by the queue or traffic class number + */ +#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ + GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ + rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing DMA channel registers + * Similar to the standard read and write macros except that the + * base register value is obtained from the ring + */ +#define AXGMAC_DMA_IOREAD(_channel, _reg) \ + rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ + GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of XPCS registers. + */ +#define XPCS_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XPCS_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XPCS32_IOWRITE(_pdata, _off, _val) \ + rte_write32(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS32_IOREAD(_pdata, _off) \ + rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOWRITE(_pdata, _off, _val) \ + rte_write16(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOREAD(_pdata, _off) \ + rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes integration registers. + */ +#define XSIR_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XSIR_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XSIR0_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR0_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR0_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +#define XSIR1_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg) + +#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR1_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir1_regs) + (_reg)) + +#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR1_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes RxTx registers. + */ +#define XRXTX_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XRXTX_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XRXTX_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of MAC Control registers. + */ +#define XP_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XP_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XP_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XP_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XP_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XP_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XP_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of I2C Control registers. + */ +#define XI2C_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XI2C_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XI2C_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XI2C_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XI2C_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * using MDIO. Different from above because of the use of standardized + * Linux include values. No shifting is performed with the bit + * operations, everything works on mask values. + */ +#define XMDIO_READ(_pdata, _mmd, _reg) \ + ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff))) + +#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ + (XMDIO_READ((_pdata), _mmd, _reg) & _mask) + +#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ + ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val))) + +#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ +do { \ + u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \ + mmd_val &= ~(_mask); \ + mmd_val |= (_val); \ + XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \ +} while (0) + +/* + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a, b) ((long)((b) - (a)) < 0) +#define time_before(a, b) time_after(b, a) + +#define time_after_eq(a, b) ((long)((a) - (b)) >= 0) +#define time_before_eq(a, b) time_after_eq(b, a) + +/*---bitmap support apis---*/ +static inline int axgbe_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + rte_mb(); + res = ((*addr) & (1UL << nr)) != 0; + rte_mb(); + return res; +} + +static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +static inline unsigned long msecs_to_timer_cycles(unsigned int m) +{ + return rte_get_timer_hz() * (m / 1000); +} + +#endif /* __AXGBE_COMMON_H__ */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c new file mode 100644 index 000000000..5f0f19592 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_dev.c @@ -0,0 +1,1220 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" +#include "axgbe_rxtx.h" + +static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata) +{ + return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_HLEN; +} + +/* query busy bit */ +static int mdio_complete(struct axgbe_port *pdata) +{ + if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY)) + return 1; + + return 0; +} + +static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + return 0; + } + + PMD_DRV_LOG(ERR, "Mdio write operation timed out\n"); + return -ETIMEDOUT; +} + +static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + goto success; + } + + PMD_DRV_LOG(ERR, "Mdio read operation timed out\n"); + return -ETIMEDOUT; + +success: + return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); +} + +static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port, + enum axgbe_mdio_mode mode) +{ + unsigned int reg_val = 0; + + switch (mode) { + case AXGBE_MDIO_MODE_CL22: + if (port > AXGMAC_MAX_C22_PORT) + return -EINVAL; + reg_val |= (1 << port); + break; + case AXGBE_MDIO_MODE_CL45: + break; + default: + return -EINVAL; + } + AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); + + return 0; +} + +static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, int mmd_reg) +{ + unsigned int mmd_address, index, offset; + int mmd_data; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and reading 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + mmd_data = XPCS16_IOREAD(pdata, offset); + + pthread_mutex_unlock(&pdata->xpcs_mutex); + + return mmd_data; +} + +static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, + int mmd_reg, int mmd_data) +{ + unsigned int mmd_address, index, offset; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and writing 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + XPCS16_IOWRITE(pdata, offset, mmd_data); + + pthread_mutex_unlock(&pdata->xpcs_mutex); +} + +static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return -1; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + } +} + +static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); + } +} + +static int axgbe_set_speed(struct axgbe_port *pdata, int speed) +{ + unsigned int ss; + + switch (speed) { + case SPEED_1000: + ss = 0x03; + break; + case SPEED_2500: + ss = 0x02; + break; + case SPEED_10000: + ss = 0x00; + break; + default: + return -EINVAL; + } + + if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); + + return 0; +} + +static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + /* Flow control thresholds are established */ + if (pdata->rx_rfd[i]) + ehfc = 1; + + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + + PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n", + ehfc ? "enabled" : "disabled", i); + } + + /* Set MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + + /* Enable transmit flow control */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); + /* Set pause time */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); + + return 0; +} + +static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); + + return 0; +} + +static int axgbe_config_tx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->tx_pause) + axgbe_enable_tx_flow_control(pdata); + else + axgbe_disable_tx_flow_control(pdata); + + return 0; +} + +static int axgbe_config_rx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->rx_pause) + axgbe_enable_rx_flow_control(pdata); + else + axgbe_disable_rx_flow_control(pdata); + + return 0; +} + +static void axgbe_config_flow_control(struct axgbe_port *pdata) +{ + axgbe_config_tx_flow_control(pdata); + axgbe_config_rx_flow_control(pdata); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); +} + +static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata, + unsigned int queue, + unsigned int q_fifo_size) +{ + unsigned int frame_fifo_size; + unsigned int rfa, rfd; + + frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata)); + + /* This path deals with just maximum frame sizes which are + * limited to a jumbo frame of 9,000 (plus headers, etc.) + * so we can never exceed the maximum allowable RFA/RFD + * values. + */ + if (q_fifo_size <= 2048) { + /* rx_rfd to zero to signal no flow control */ + pdata->rx_rfa[queue] = 0; + pdata->rx_rfd[queue] = 0; + return; + } + + if (q_fifo_size <= 4096) { + /* Between 2048 and 4096 */ + pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ + pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ + return; + } + + if (q_fifo_size <= frame_fifo_size) { + /* Between 4096 and max-frame */ + pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ + pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ + return; + } + + if (q_fifo_size <= (frame_fifo_size * 3)) { + /* Between max-frame and 3 max-frames, + * trigger if we get just over a frame of data and + * resume when we have just under half a frame left. + */ + rfa = q_fifo_size - frame_fifo_size; + rfd = rfa + (frame_fifo_size / 2); + } else { + /* Above 3 max-frames - trigger when just over + * 2 frames of space available + */ + rfa = frame_fifo_size * 2; + rfa += AXGMAC_FLOW_CONTROL_UNIT; + rfd = rfa + frame_fifo_size; + } + + pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa); + pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd); +} + +static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int q_fifo_size; + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT; + + axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); + } +} + +static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, + pdata->rx_rfa[i]); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, + pdata->rx_rfd[i]); + } +} + +static int __axgbe_exit(struct axgbe_port *pdata) +{ + unsigned int count = 2000; + + /* Issue a software reset */ + AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); + rte_delay_us(10); + + /* Poll Until Poll Condition */ + while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + + return 0; +} + +static int axgbe_exit(struct axgbe_port *pdata) +{ + int ret; + + /* To guard against possible incorrectly generated interrupts, + * issue the software reset twice. + */ + ret = __axgbe_exit(pdata); + if (ret) + return ret; + + return __axgbe_exit(pdata); +} + +static int axgbe_flush_tx_queues(struct axgbe_port *pdata) +{ + unsigned int i, count; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); + + /* Poll Until Poll Condition */ + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i, + MTL_Q_TQOMR, FTQ)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + } + + return 0; +} + +static void axgbe_config_dma_bus(struct axgbe_port *pdata) +{ + /* Set enhanced addressing mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); + + /* Out standing read/write requests*/ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f); + + /* Set the System Bus mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1); +} + +static void axgbe_config_dma_cache(struct axgbe_port *pdata) +{ + unsigned int arcache, awcache, arwcache; + + arcache = 0; + AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + + arwcache = 0; + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); +} + +static void axgbe_config_edma_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5); + AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5); +} + +static int axgbe_config_osp_mode(struct axgbe_port *pdata) +{ + /* Force DMA to operate on second packet before closing descriptors + * of first packet + */ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } + + return 0; +} + +static int axgbe_config_pblx8(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, + pdata->pblx8); + } + return 0; +} + +static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, + pdata->tx_pbl); + } + + return 0; +} + +static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, + pdata->rx_pbl); + } + + return 0; +} + +static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) & + ~(AXGBE_RX_BUF_ALIGN - 1); + + if (rxq->buf_size > pdata->rx_buf_size) + pdata->rx_buf_size = rxq->buf_size; + + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, + rxq->buf_size); + } +} + +static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + + if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return -EBUSY; + + AXGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return 0; + + rte_delay_us(1500); + } + + return -EBUSY; +} + +static int axgbe_write_rss_hash_key(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key; + int ret; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + + if (!rss_conf->rss_key) + key = (unsigned int *)&pdata->rss_key; + else + key = (unsigned int *)&rss_conf->rss_key; + + while (key_regs--) { + ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = axgbe_write_rss_reg(pdata, + AXGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_enable_rss(struct axgbe_port *pdata) +{ + int ret; + + /* Program the hash key */ + ret = axgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = axgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static void axgbe_rss_options(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + uint64_t rss_hf; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + rss_hf = rss_conf->rss_hf; + + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); +} + +static int axgbe_config_rss(struct axgbe_port *pdata) +{ + uint32_t i; + + if (pdata->rss_enable) { + /* Initialize RSS hash key and lookup table */ + uint32_t *key = (uint32_t *)pdata->rss_key; + + for (i = 0; i < sizeof(pdata->rss_key) / 4; i++) + *key++ = (uint32_t)rte_rand(); + for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++) + AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->eth_dev->data->nb_rx_queues); + axgbe_rss_options(pdata); + if (axgbe_enable_rss(pdata)) { + PMD_DRV_LOG(ERR, "Error in enabling RSS support\n"); + return -1; + } + } else { + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + } + + return 0; +} + +static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + + /* Clear all the interrupts which are set */ + dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts in edge triggered + * mode) + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + + AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); + } +} + +static void wrapper_tx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + txq->cur = 0; + txq->dirty = 0; + /* Update the total number of Tx descriptors */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI, + high32_value(txq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO, + low32_value(txq->ring_phys_addr)); + } +} + +static int wrapper_rx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + struct rte_mbuf *mbuf; + volatile union axgbe_rx_desc *desc; + unsigned int i, j; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + /* Initialize software ring entries */ + rxq->mbuf_alloc = 0; + rxq->cur = 0; + rxq->dirty = 0; + desc = AXGBE_GET_DESC_PT(rxq, 0); + + for (j = 0; j < rxq->nb_desc; j++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (mbuf == NULL) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n", + (unsigned int)rxq->queue_id, j); + axgbe_dev_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->sw_ring[j] = mbuf; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + desc->read.baddr = + rte_cpu_to_le_64( + rte_mbuf_data_iova_default(mbuf)); + rte_wmb(); + AXGMAC_SET_BITS_LE(desc->read.desc3, + RX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + rxq->mbuf_alloc++; + desc++; + } + /* Update the total number of Rx descriptors */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR, + rxq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI, + high32_value(rxq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO, + low32_value(rxq->ring_phys_addr)); + /* Update the Rx Descriptor Tail Pointer */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (rxq->nb_desc - 1) * + sizeof(union axgbe_rx_desc))); + } + return 0; +} + +static void axgbe_config_mtl_mode(struct axgbe_port *pdata) +{ + unsigned int i; + + /* Set Tx to weighted round robin scheduling algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); + } + + /* Set Rx to strict priority algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); +} + +static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); + + return 0; +} + +static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); + + return 0; +} + +static int axgbe_config_tx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); + + return 0; +} + +static int axgbe_config_rx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); + + return 0; +} + +/*Distrubting fifo size */ +static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->hw_feat.rx_fifo_size); + q_fifo_size = fifo_size / pdata->rx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo); + pdata->fifo = p_fifo; + + /*Calculate and config Flow control threshold*/ + axgbe_calculate_flow_control_threshold(pdata); + axgbe_config_flow_control_threshold(pdata); + + PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, q_fifo_size); +} + +static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->hw_feat.tx_fifo_size); + q_fifo_size = fifo_size / pdata->tx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo); + + PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, q_fifo_size); +} + +static void axgbe_config_queue_mapping(struct axgbe_port *pdata) +{ + unsigned int qptc, qptc_extra, queue; + unsigned int i, j, reg, reg_val; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) { + PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i); + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + } + if (i < qptc_extra) { + PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i); + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + } + } + + if (pdata->rss_enable) { + /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ + reg = MTL_RQDCM0R; + reg_val = 0; + for (i = 0; i < pdata->rx_q_count;) { + reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); + + if ((i % MTL_RQDCM_Q_PER_REG) && + (i != pdata->rx_q_count)) + continue; + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MTL_RQDCM_INC; + reg_val = 0; + } + } +} + +static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata) +{ + unsigned int mtl_q_isr; + unsigned int q_count, i; + + q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); + + /* No MTL interrupts to be enabled */ + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); + } +} + +static uint32_t bitrev32(uint32_t x) +{ + x = (x >> 16) | (x << 16); + x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); + x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); + x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); + x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); + return x; +} + +static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len) +{ + int i; + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); + } + return crc; +} + +void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add) +{ + uint32_t crc, htable_index, htable_bitmask; + + crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN)); + crc >>= pdata->hash_table_shift; + htable_index = crc >> 5; + htable_bitmask = 1 << (crc & 0x1f); + + if (add) { + pdata->uc_hash_table[htable_index] |= htable_bitmask; + pdata->uc_hash_mac_addr++; + } else { + pdata->uc_hash_table[htable_index] &= ~htable_bitmask; + pdata->uc_hash_mac_addr--; + } + PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n", + add ? "set" : "clear", (crc & 0x1f), htable_index); + + AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index), + pdata->uc_hash_table[htable_index]); +} + +void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index) +{ + unsigned int mac_addr_hi, mac_addr_lo; + u8 *mac_addr; + + mac_addr_lo = 0; + mac_addr_hi = 0; + + if (addr) { + mac_addr = (u8 *)&mac_addr_lo; + mac_addr[0] = addr[0]; + mac_addr[1] = addr[1]; + mac_addr[2] = addr[2]; + mac_addr[3] = addr[3]; + mac_addr = (u8 *)&mac_addr_hi; + mac_addr[0] = addr[4]; + mac_addr[1] = addr[5]; + + /*Address Enable: Use this Addr for Perfect Filtering */ + AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); + } + + PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n", + addr ? "set" : "clear", index); + + AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi); + AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo); +} + +static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0] << 0); + + AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); + AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); + + return 0; +} + +static void axgbe_config_mac_hash_table(struct axgbe_port *pdata) +{ + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + pdata->hash_table_shift = 0; + pdata->hash_table_count = 0; + pdata->uc_hash_mac_addr = 0; + memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table)); + + if (hw_feat->hash_table_size) { + pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7); + pdata->hash_table_count = hw_feat->hash_table_size / 32; + } +} + +static void axgbe_config_mac_address(struct axgbe_port *pdata) +{ + axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes); +} + +static void axgbe_config_jumbo_enable(struct axgbe_port *pdata) +{ + unsigned int val; + + val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0; + + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); +} + +static void axgbe_config_mac_speed(struct axgbe_port *pdata) +{ + axgbe_set_speed(pdata, pdata->phy_speed); +} + +static void axgbe_config_checksum_offload(struct axgbe_port *pdata) +{ + if (pdata->rx_csum_enable) + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); + else + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); +} + +static void axgbe_config_mmc(struct axgbe_port *pdata) +{ + struct axgbe_mmc_stats *stats = &pdata->mmc_stats; + + /* Reset stats */ + memset(stats, 0, sizeof(*stats)); + + /* Set counters to reset on read */ + AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); + + /* Reset the counters */ + AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); +} + +static int axgbe_init(struct axgbe_port *pdata) +{ + int ret; + + /* Flush Tx queues */ + ret = axgbe_flush_tx_queues(pdata); + if (ret) + return ret; + /* Initialize DMA related features */ + axgbe_config_dma_bus(pdata); + axgbe_config_dma_cache(pdata); + axgbe_config_edma_control(pdata); + axgbe_config_osp_mode(pdata); + axgbe_config_pblx8(pdata); + axgbe_config_tx_pbl_val(pdata); + axgbe_config_rx_pbl_val(pdata); + axgbe_config_rx_buffer_size(pdata); + axgbe_config_rss(pdata); + wrapper_tx_desc_init(pdata); + ret = wrapper_rx_desc_init(pdata); + if (ret) + return ret; + axgbe_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + axgbe_config_mtl_mode(pdata); + axgbe_config_queue_mapping(pdata); + axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); + axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); + axgbe_config_tx_threshold(pdata, pdata->tx_threshold); + axgbe_config_rx_threshold(pdata, pdata->rx_threshold); + axgbe_config_tx_fifo_size(pdata); + axgbe_config_rx_fifo_size(pdata); + + axgbe_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + axgbe_config_mac_hash_table(pdata); + axgbe_config_mac_address(pdata); + axgbe_config_jumbo_enable(pdata); + axgbe_config_flow_control(pdata); + axgbe_config_mac_speed(pdata); + axgbe_config_checksum_offload(pdata); + axgbe_config_mmc(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if) +{ + hw_if->exit = axgbe_exit; + hw_if->config_flow_control = axgbe_config_flow_control; + + hw_if->init = axgbe_init; + + hw_if->read_mmd_regs = axgbe_read_mmd_regs; + hw_if->write_mmd_regs = axgbe_write_mmd_regs; + + hw_if->set_speed = axgbe_set_speed; + + hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode; + hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs; + hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs; + /* For FLOW ctrl */ + hw_if->config_tx_flow_control = axgbe_config_tx_flow_control; + hw_if->config_rx_flow_control = axgbe_config_rx_flow_control; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c new file mode 100644 index 000000000..867058845 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.c @@ -0,0 +1,1680 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_rxtx.h" +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" +#include "axgbe_regs.h" + +static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int axgbe_dev_configure(struct rte_eth_dev *dev); +static int axgbe_dev_start(struct rte_eth_dev *dev); +static void axgbe_dev_stop(struct rte_eth_dev *dev); +static void axgbe_dev_interrupt_handler(void *param); +static void axgbe_dev_close(struct rte_eth_dev *dev); +static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t vmdq); +static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint8_t add); +static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, + uint8_t add); +static int axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int axgbe_dev_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); +static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, + unsigned int n); +static int +axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int +axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n); +static int +axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int size); +static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int axgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf); +static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); + +struct axgbe_xstats { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + int offset; +}; + +#define AXGMAC_MMC_STAT(_string, _var) \ + { _string, \ + offsetof(struct axgbe_mmc_stats, _var), \ + } + +static const struct axgbe_xstats axgbe_xstats_strings[] = { + AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), + AXGMAC_MMC_STAT("tx_packets", txframecount_gb), + AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), + AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), + AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), + AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), + AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), + AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), + + AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), + AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), + AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), + AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), + AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), + AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), + AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), + AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), + AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), + AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), + AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), + AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), + AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), + AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), + AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), +}; + +#define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) + +/* The set of PCI devices this driver supports */ +#define AMD_PCI_VENDOR_ID 0x1022 +#define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 +#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 +#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 + +int axgbe_logtype_init; +int axgbe_logtype_driver; + +static const struct rte_pci_id pci_id_axgbe_map[] = { + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, + { .vendor_id = 0, }, +}; + +static struct axgbe_version_data axgbe_v2a = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 229376, + .rx_max_fifo_size = 229376, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static struct axgbe_version_data axgbe_v2b = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct eth_dev_ops axgbe_eth_dev_ops = { + .dev_configure = axgbe_dev_configure, + .dev_start = axgbe_dev_start, + .dev_stop = axgbe_dev_stop, + .dev_close = axgbe_dev_close, + .promiscuous_enable = axgbe_dev_promiscuous_enable, + .promiscuous_disable = axgbe_dev_promiscuous_disable, + .allmulticast_enable = axgbe_dev_allmulticast_enable, + .allmulticast_disable = axgbe_dev_allmulticast_disable, + .mac_addr_set = axgbe_dev_mac_addr_set, + .mac_addr_add = axgbe_dev_mac_addr_add, + .mac_addr_remove = axgbe_dev_mac_addr_remove, + .set_mc_addr_list = axgbe_dev_set_mc_addr_list, + .uc_hash_table_set = axgbe_dev_uc_hash_table_set, + .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, + .link_update = axgbe_dev_link_update, + .get_reg = axgbe_dev_get_regs, + .stats_get = axgbe_dev_stats_get, + .stats_reset = axgbe_dev_stats_reset, + .xstats_get = axgbe_dev_xstats_get, + .xstats_reset = axgbe_dev_xstats_reset, + .xstats_get_names = axgbe_dev_xstats_get_names, + .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, + .xstats_get_by_id = axgbe_dev_xstats_get_by_id, + .dev_infos_get = axgbe_dev_info_get, + .rx_queue_setup = axgbe_dev_rx_queue_setup, + .rx_queue_release = axgbe_dev_rx_queue_release, + .tx_queue_setup = axgbe_dev_tx_queue_setup, + .tx_queue_release = axgbe_dev_tx_queue_release, + .flow_ctrl_get = axgbe_flow_ctrl_get, + .flow_ctrl_set = axgbe_flow_ctrl_set, + .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, + .rxq_info_get = axgbe_rxq_info_get, + .txq_info_get = axgbe_txq_info_get, + .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, + .rx_descriptor_status = axgbe_dev_rx_descriptor_status, + .tx_descriptor_status = axgbe_dev_tx_descriptor_status, +}; + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + return pdata->phy_if.phy_reset(pdata); +} + +/* + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +axgbe_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int dma_isr, dma_ch_isr; + + pdata->phy_if.an_isr(pdata); + /*DMA related interrupts*/ + dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); + PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); + if (dma_isr) { + if (dma_isr & 1) { + dma_ch_isr = + AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR); + PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); + AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR, dma_ch_isr); + } + } + /* Unmask interrupts since disabled after generation */ + rte_intr_ack(&pdata->pci_dev->intr_handle); +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +axgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + /* Checksum offload to hardware */ + pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CHECKSUM; + return 0; +} + +static int +axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + pdata->rss_enable = 1; + else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) + pdata->rss_enable = 0; + else + return -1; + return 0; +} + +static int +axgbe_dev_start(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + int ret; + struct rte_eth_dev_data *dev_data = dev->data; + uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; + + dev->dev_ops = &axgbe_eth_dev_ops; + + PMD_INIT_FUNC_TRACE(); + + /* Multiqueue RSS */ + ret = axgbe_dev_rx_mq_config(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); + return ret; + } + ret = axgbe_phy_reset(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "phy reset failed\n"); + return ret; + } + ret = pdata->hw_if.init(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "dev_init failed\n"); + return ret; + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + + /* phy start*/ + pdata->phy_if.phy_start(pdata); + axgbe_dev_enable_tx(dev); + axgbe_dev_enable_rx(dev); + + axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + max_pkt_len > pdata->rx_buf_size) + dev_data->scattered_rx = 1; + + /* Scatter Rx handling */ + if (dev_data->scattered_rx) + dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; + else + dev->rx_pkt_burst = &axgbe_recv_pkts; + + return 0; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static void +axgbe_dev_stop(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + rte_intr_disable(&pdata->pci_dev->intr_handle); + + if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) + return; + + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_dev_disable_tx(dev); + axgbe_dev_disable_rx(dev); + + pdata->phy_if.phy_stop(pdata); + pdata->hw_if.exit(pdata); + memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); +} + +/* Clear all resources like TX/RX queues. */ +static void +axgbe_dev_close(struct rte_eth_dev *dev) +{ + axgbe_dev_clear_queues(dev); +} + +static int +axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); + + return 0; +} + +static int +axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); + + return 0; +} + +static int +axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return 0; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); + + return 0; +} + +static int +axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return 0; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); + + return 0; +} + +static int +axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + /* Set Default MAC Addr */ + axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); + + return 0; +} + +static int +axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool __rte_unused) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + if (index > hw_feat->addn_mac) { + PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); + return -EINVAL; + } + axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); + return 0; +} + +static void +axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + if (index > hw_feat->addn_mac) { + PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); + return; + } + axgbe_set_mac_addn_addr(pdata, NULL, index); +} + +static int +axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + uint32_t index = 1; /* 0 is always default mac */ + uint32_t i; + + if (nb_mc_addr > hw_feat->addn_mac) { + PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); + return -EINVAL; + } + + /* clear unicast addresses */ + for (i = 1; i < hw_feat->addn_mac; i++) { + if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) + continue; + memset(&dev->data->mac_addrs[i], 0, + sizeof(struct rte_ether_addr)); + } + + while (nb_mc_addr--) + axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); + + return 0; +} + +static int +axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t add) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + if (!hw_feat->hash_table_size) { + PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); + return -ENOTSUP; + } + + axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); + + if (pdata->uc_hash_mac_addr > 0) { + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); + } else { + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); + } + return 0; +} + +static int +axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + uint32_t index; + + if (!hw_feat->hash_table_size) { + PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); + return -ENOTSUP; + } + + for (index = 0; index < pdata->hash_table_count; index++) { + if (add) + pdata->uc_hash_table[index] = ~0; + else + pdata->uc_hash_table[index] = 0; + + PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", + add ? "set" : "clear", index); + + AXGMAC_IOWRITE(pdata, MAC_HTR(index), + pdata->uc_hash_table[index]); + } + + if (add) { + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); + } else { + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); + } + return 0; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct rte_eth_link link; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + rte_delay_ms(800); + + pdata->phy_if.phy_status(pdata); + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_duplex = pdata->phy.duplex; + link.link_status = pdata->phy_link; + link.link_speed = pdata->phy_speed; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + PMD_DRV_LOG(ERR, "No change in link status\n"); + + return ret; +} + +static int +axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + if (regs->data == NULL) { + regs->length = axgbe_regs_get_count(pdata); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Only full register dump is supported */ + if (regs->length && + regs->length != (uint32_t)axgbe_regs_get_count(pdata)) + return -ENOTSUP; + + regs->version = pdata->pci_dev->id.vendor_id << 16 | + pdata->pci_dev->id.device_id; + axgbe_regs_dump(pdata, regs->data); + return 0; +} +static void axgbe_read_mmc_stats(struct axgbe_port *pdata) +{ + struct axgbe_mmc_stats *stats = &pdata->mmc_stats; + + /* Freeze counters */ + AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); + + /* Tx counters */ + stats->txoctetcount_gb += + AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); + stats->txoctetcount_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); + + stats->txframecount_gb += + AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); + stats->txframecount_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); + + stats->txbroadcastframes_g += + AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); + stats->txbroadcastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); + + stats->txmulticastframes_g += + AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); + stats->txmulticastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); + + stats->tx64octets_gb += + AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); + stats->tx64octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); + + stats->tx65to127octets_gb += + AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); + stats->tx65to127octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); + + stats->tx128to255octets_gb += + AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); + stats->tx128to255octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); + + stats->tx256to511octets_gb += + AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); + stats->tx256to511octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); + + stats->tx512to1023octets_gb += + AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); + stats->tx512to1023octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); + + stats->tx1024tomaxoctets_gb += + AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + stats->tx1024tomaxoctets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); + + stats->txunicastframes_gb += + AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); + stats->txunicastframes_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); + + stats->txmulticastframes_gb += + AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + stats->txmulticastframes_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); + + stats->txbroadcastframes_g += + AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + stats->txbroadcastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); + + stats->txunderflowerror += + AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); + stats->txunderflowerror += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); + + stats->txoctetcount_g += + AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); + stats->txoctetcount_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); + + stats->txframecount_g += + AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); + stats->txframecount_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); + + stats->txpauseframes += + AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); + stats->txpauseframes += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); + + stats->txvlanframes_g += + AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); + stats->txvlanframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); + + /* Rx counters */ + stats->rxframecount_gb += + AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); + stats->rxframecount_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); + + stats->rxoctetcount_gb += + AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); + stats->rxoctetcount_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); + + stats->rxoctetcount_g += + AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); + stats->rxoctetcount_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); + + stats->rxbroadcastframes_g += + AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); + stats->rxbroadcastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); + + stats->rxmulticastframes_g += + AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); + stats->rxmulticastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); + + stats->rxcrcerror += + AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); + stats->rxcrcerror += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); + + stats->rxrunterror += + AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); + + stats->rxjabbererror += + AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); + + stats->rxundersize_g += + AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); + + stats->rxoversize_g += + AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); + + stats->rx64octets_gb += + AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); + stats->rx64octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); + + stats->rx65to127octets_gb += + AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); + stats->rx65to127octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); + + stats->rx128to255octets_gb += + AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); + stats->rx128to255octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); + + stats->rx256to511octets_gb += + AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); + stats->rx256to511octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); + + stats->rx512to1023octets_gb += + AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); + stats->rx512to1023octets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); + + stats->rx1024tomaxoctets_gb += + AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + stats->rx1024tomaxoctets_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); + + stats->rxunicastframes_g += + AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); + stats->rxunicastframes_g += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); + + stats->rxlengtherror += + AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); + stats->rxlengtherror += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); + + stats->rxoutofrangetype += + AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); + stats->rxoutofrangetype += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); + + stats->rxpauseframes += + AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); + stats->rxpauseframes += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); + + stats->rxfifooverflow += + AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); + stats->rxfifooverflow += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); + + stats->rxvlanframes_gb += + AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); + stats->rxvlanframes_gb += + ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); + + stats->rxwatchdogerror += + AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); + + /* Un-freeze counters */ + AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); +} + +static int +axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) +{ + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + if (!stats) + return 0; + + axgbe_read_mmc_stats(pdata); + + for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { + stats[i].id = i; + stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + + axgbe_xstats_strings[i].offset); + } + + return i; +} + +static int +axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n) +{ + unsigned int i; + + if (n >= AXGBE_XSTATS_COUNT && xstats_names) { + for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { + snprintf(xstats_names[i].name, + RTE_ETH_XSTATS_NAME_SIZE, "%s", + axgbe_xstats_strings[i].name); + } + } + + return AXGBE_XSTATS_COUNT; +} + +static int +axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i; + uint64_t values_copy[AXGBE_XSTATS_COUNT]; + + if (!ids) { + struct axgbe_port *pdata = dev->data->dev_private; + + if (n < AXGBE_XSTATS_COUNT) + return AXGBE_XSTATS_COUNT; + + axgbe_read_mmc_stats(pdata); + + for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { + values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + + axgbe_xstats_strings[i].offset); + } + + return i; + } + + axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); + + for (i = 0; i < n; i++) { + if (ids[i] >= AXGBE_XSTATS_COUNT) { + PMD_DRV_LOG(ERR, "id value isn't valid\n"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int size) +{ + struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; + unsigned int i; + + if (!ids) + return axgbe_dev_xstats_get_names(dev, xstats_names, size); + + axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); + + for (i = 0; i < size; i++) { + if (ids[i] >= AXGBE_XSTATS_COUNT) { + PMD_DRV_LOG(ERR, "id value isn't valid\n"); + return -1; + } + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + return size; +} + +static int +axgbe_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_mmc_stats *stats = &pdata->mmc_stats; + + /* MMC registers are configured for reset on read */ + axgbe_read_mmc_stats(pdata); + + /* Reset stats */ + memset(stats, 0, sizeof(*stats)); + + return 0; +} + +static int +axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; + unsigned int i; + + axgbe_read_mmc_stats(pdata); + + stats->imissed = mmc_stats->rxfifooverflow; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + stats->q_ipackets[i] = rxq->pkts; + stats->ipackets += rxq->pkts; + stats->q_ibytes[i] = rxq->bytes; + stats->ibytes += rxq->bytes; + stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; + stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; + stats->ierrors += rxq->errors; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + stats->q_opackets[i] = txq->pkts; + stats->opackets += txq->pkts; + stats->q_obytes[i] = txq->bytes; + stats->obytes += txq->bytes; + stats->oerrors += txq->errors; + } + + return 0; +} + +static int +axgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->pkts = 0; + rxq->bytes = 0; + rxq->errors = 0; + rxq->rx_mbuf_alloc_failed = 0; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->pkts = 0; + txq->bytes = 0; + txq->errors = 0; + } + + return 0; +} + +static int +axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + dev_info->max_rx_queues = pdata->rx_ring_count; + dev_info->max_tx_queues = pdata->tx_ring_count; + dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; + dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; + dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; + dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; + dev_info->speed_capa = ETH_LINK_SPEED_10G; + + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_KEEP_CRC; + + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (pdata->hw_feat.rss) { + dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; + dev_info->reta_size = pdata->hw_feat.hash_table_size; + dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; + } + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = AXGBE_RX_FREE_THRESH, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = AXGBE_TX_FREE_THRESH, + }; + + return 0; +} + +static int +axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct xgbe_fc_info fc = pdata->fc; + unsigned int reg, reg_val = 0; + + reg = MAC_Q0TFCR; + reg_val = AXGMAC_IOREAD(pdata, reg); + fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); + fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); + fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); + fc.autoneg = pdata->pause_autoneg; + + if (pdata->rx_pause && pdata->tx_pause) + fc.mode = RTE_FC_FULL; + else if (pdata->rx_pause) + fc.mode = RTE_FC_RX_PAUSE; + else if (pdata->tx_pause) + fc.mode = RTE_FC_TX_PAUSE; + else + fc.mode = RTE_FC_NONE; + + fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; + fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; + fc_conf->pause_time = fc.pause_time[0]; + fc_conf->send_xon = fc.send_xon; + fc_conf->mode = fc.mode; + + return 0; +} + +static int +axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct xgbe_fc_info fc = pdata->fc; + unsigned int reg, reg_val = 0; + reg = MAC_Q0TFCR; + + pdata->pause_autoneg = fc_conf->autoneg; + pdata->phy.pause_autoneg = pdata->pause_autoneg; + fc.send_xon = fc_conf->send_xon; + AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, + AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); + AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, + AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); + AXGMAC_IOWRITE(pdata, reg, reg_val); + fc.mode = fc_conf->mode; + + if (fc.mode == RTE_FC_FULL) { + pdata->tx_pause = 1; + pdata->rx_pause = 1; + } else if (fc.mode == RTE_FC_RX_PAUSE) { + pdata->tx_pause = 0; + pdata->rx_pause = 1; + } else if (fc.mode == RTE_FC_TX_PAUSE) { + pdata->tx_pause = 1; + pdata->rx_pause = 0; + } else { + pdata->tx_pause = 0; + pdata->rx_pause = 0; + } + + if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) + pdata->hw_if.config_tx_flow_control(pdata); + + if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) + pdata->hw_if.config_rx_flow_control(pdata); + + pdata->hw_if.config_flow_control(pdata); + pdata->phy.tx_pause = pdata->tx_pause; + pdata->phy.rx_pause = pdata->rx_pause; + + return 0; +} + +static int +axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct xgbe_fc_info fc = pdata->fc; + uint8_t tc_num; + + tc_num = pdata->pfc_map[pfc_conf->priority]; + + if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { + PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", + pdata->hw_feat.tc_cnt); + return -EINVAL; + } + + pdata->pause_autoneg = pfc_conf->fc.autoneg; + pdata->phy.pause_autoneg = pdata->pause_autoneg; + fc.send_xon = pfc_conf->fc.send_xon; + AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, + AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); + AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, + AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); + + switch (tc_num) { + case 0: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, + PSTC0, pfc_conf->fc.pause_time); + break; + case 1: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, + PSTC1, pfc_conf->fc.pause_time); + break; + case 2: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, + PSTC2, pfc_conf->fc.pause_time); + break; + case 3: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, + PSTC3, pfc_conf->fc.pause_time); + break; + case 4: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, + PSTC4, pfc_conf->fc.pause_time); + break; + case 5: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, + PSTC5, pfc_conf->fc.pause_time); + break; + case 7: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, + PSTC6, pfc_conf->fc.pause_time); + break; + case 6: + AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, + PSTC7, pfc_conf->fc.pause_time); + break; + } + + fc.mode = pfc_conf->fc.mode; + + if (fc.mode == RTE_FC_FULL) { + pdata->tx_pause = 1; + pdata->rx_pause = 1; + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); + } else if (fc.mode == RTE_FC_RX_PAUSE) { + pdata->tx_pause = 0; + pdata->rx_pause = 1; + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); + } else if (fc.mode == RTE_FC_TX_PAUSE) { + pdata->tx_pause = 1; + pdata->rx_pause = 0; + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); + } else { + pdata->tx_pause = 0; + pdata->rx_pause = 0; + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); + } + + if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) + pdata->hw_if.config_tx_flow_control(pdata); + + if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) + pdata->hw_if.config_rx_flow_control(pdata); + pdata->hw_if.config_flow_control(pdata); + pdata->phy.tx_pause = pdata->tx_pause; + pdata->phy.rx_pause = pdata->rx_pause; + + return 0; +} + +void +axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct axgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_desc; + qinfo->conf.rx_free_thresh = rxq->free_thresh; +} + +void +axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct axgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + qinfo->nb_desc = txq->nb_desc; + qinfo->conf.tx_free_thresh = txq->free_thresh; +} +const uint32_t * +axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == axgbe_recv_pkts) + return ptypes; + return NULL; +} + +static void axgbe_get_all_hw_features(struct axgbe_port *pdata) +{ + unsigned int mac_hfr0, mac_hfr1, mac_hfr2; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); + mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); + mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); + + /* Hardware feature register 0 */ + hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); + hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); + hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); + hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); + hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); + hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); + hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); + hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); + hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); + hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); + hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); + hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, + ADDMACADRSEL); + hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); + hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + RXFIFOSIZE); + hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + TXFIFOSIZE); + hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, + MAC_HWF1R, ADVTHWORD); + hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); + hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); + hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); + hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); + hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); + hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); + hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + HASHTBLSZ); + hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + L3L4FNUM); + + /* Hardware feature register 2 */ + hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); + hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); + hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); + hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); + hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); + hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, + AUXSNAPNUM); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + /* Translate the fifo sizes into actual numbers */ + hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); + hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); +} + +static void axgbe_init_all_fptrs(struct axgbe_port *pdata) +{ + axgbe_init_function_ptrs_dev(&pdata->hw_if); + axgbe_init_function_ptrs_phy(&pdata->phy_if); + axgbe_init_function_ptrs_i2c(&pdata->i2c_if); + pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); +} + +static void axgbe_set_counts(struct axgbe_port *pdata) +{ + /* Set all the function pointers */ + axgbe_init_all_fptrs(pdata); + + /* Populate the hardware features */ + axgbe_get_all_hw_features(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_channel_count) + pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; + if (!pdata->rx_max_channel_count) + pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; + + if (!pdata->tx_max_q_count) + pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; + if (!pdata->rx_max_q_count) + pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; + + /* Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues or maximum allowed + */ + pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, + pdata->tx_max_channel_count); + pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, + pdata->tx_max_q_count); + + pdata->tx_q_count = pdata->tx_ring_count; + + pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, + pdata->rx_max_channel_count); + + pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, + pdata->rx_max_q_count); +} + +static void axgbe_default_config(struct axgbe_port *pdata) +{ + pdata->pblx8 = DMA_PBL_X8_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->tx_threshold = MTL_TX_THRESHOLD_64; + pdata->tx_pbl = DMA_PBL_32; + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->rx_sf_mode = MTL_RSF_ENABLE; + pdata->rx_threshold = MTL_RX_THRESHOLD_64; + pdata->rx_pbl = DMA_PBL_32; + pdata->pause_autoneg = 1; + pdata->tx_pause = 0; + pdata->rx_pause = 0; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->power_down = 0; +} + +static int +pci_device_cmp(const struct rte_device *dev, const void *_pci_id) +{ + const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); + const struct rte_pci_id *pcid = _pci_id; + + if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && + pdev->id.device_id == pcid->device_id) + return 0; + return 1; +} + +static bool +pci_search_device(int device_id) +{ + struct rte_bus *pci_bus; + struct rte_pci_id dev_id; + + dev_id.device_id = device_id; + pci_bus = rte_bus_find_by_name("pci"); + return (pci_bus != NULL) && + (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); +} + +/* + * It returns 0 on success. + */ +static int +eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata; + struct rte_pci_device *pci_dev; + uint32_t reg, mac_lo, mac_hi; + uint32_t len; + int ret; + + eth_dev->dev_ops = &axgbe_eth_dev_ops; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pdata = eth_dev->data->dev_private; + /* initial state */ + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + pdata->eth_dev = eth_dev; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pdata->pci_dev = pci_dev; + + /* + * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE + */ + if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { + pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + } else { + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + } + + pdata->xgmac_regs = + (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; + pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_MAC_PROP_OFFSET); + pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_I2C_CTRL_OFFSET); + pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; + + /* version specific driver data*/ + if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) + pdata->vdata = &axgbe_v2a; + else + pdata->vdata = &axgbe_v2b; + + /* Configure the PCS indirect addressing support */ + reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); + pdata->xpcs_window <<= 6; + pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); + pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); + pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; + + PMD_INIT_LOG(DEBUG, + "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, + pdata->xpcs_window_size, pdata->xpcs_window_mask); + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + + /* Retrieve the MAC address */ + mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); + mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); + pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; + pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; + pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; + pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; + pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; + pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; + + len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; + eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); + + if (!eth_dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, + "Failed to alloc %u bytes needed to " + "store MAC addresses", len); + return -ENOMEM; + } + + /* Allocate memory for storing hash filter MAC addresses */ + len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; + eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", + len, 0); + + if (eth_dev->data->hash_mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to " + "store MAC addresses", len); + return -ENOMEM; + } + + if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) + rte_eth_random_addr(pdata->mac_addr.addr_bytes); + + /* Copy the permanent MAC address */ + rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); + + /* Clock settings */ + pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; + pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; + + /* Set the DMA coherency values */ + pdata->coherent = 1; + pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; + pdata->arcache = AXGBE_DMA_OS_ARCACHE; + pdata->awcache = AXGBE_DMA_OS_AWCACHE; + + /* Set the maximum channels and queues */ + reg = XP_IOREAD(pdata, XP_PROP_1); + pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); + + /* Set the hardware channel and queue counts */ + axgbe_set_counts(pdata); + + /* Set the maximum fifo amounts */ + reg = XP_IOREAD(pdata, XP_PROP_2); + pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); + pdata->tx_max_fifo_size *= 16384; + pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->vdata->tx_max_fifo_size); + pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); + pdata->rx_max_fifo_size *= 16384; + pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->vdata->rx_max_fifo_size); + /* Issue software reset to DMA */ + ret = pdata->hw_if.exit(pdata); + if (ret) + PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); + + /* Set default configuration data */ + axgbe_default_config(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_fifo_size) + pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; + if (!pdata->rx_max_fifo_size) + pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; + + pdata->tx_desc_count = AXGBE_MAX_RING_DESC; + pdata->rx_desc_count = AXGBE_MAX_RING_DESC; + pthread_mutex_init(&pdata->xpcs_mutex, NULL); + pthread_mutex_init(&pdata->i2c_mutex, NULL); + pthread_mutex_init(&pdata->an_mutex, NULL); + pthread_mutex_init(&pdata->phy_mutex, NULL); + + ret = pdata->phy_if.phy_init(pdata); + if (ret) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return ret; + } + + rte_intr_callback_register(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + return 0; +} + +static int +eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + axgbe_dev_clear_queues(eth_dev); + + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + + return 0; +} + +static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct axgbe_port), eth_axgbe_dev_init); +} + +static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); +} + +static struct rte_pci_driver rte_axgbe_pmd = { + .id_table = pci_id_axgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_axgbe_pci_probe, + .remove = eth_axgbe_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(axgbe_init_log) +{ + axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); + if (axgbe_logtype_init >= 0) + rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); + axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); + if (axgbe_logtype_driver >= 0) + rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h new file mode 100644 index 000000000..f10ec4a40 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_ethdev.h @@ -0,0 +1,657 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef RTE_ETH_AXGBE_H_ +#define RTE_ETH_AXGBE_H_ + +#include +#include +#include "axgbe_common.h" + +#define IRQ 0xff +#define VLAN_HLEN 4 + +#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MIN_BUF_SIZE (RTE_ETHER_MAX_LEN + VLAN_HLEN) +#define AXGBE_MAX_MAC_ADDRS 32 +#define AXGBE_MAX_HASH_MAC_ADDRS 256 + +#define AXGBE_RX_BUF_ALIGN 64 + +#define AXGBE_MAX_DMA_CHANNELS 16 +#define AXGBE_MAX_QUEUES 16 +#define AXGBE_PRIORITY_QUEUES 8 +#define AXGBE_DMA_STOP_TIMEOUT 1 + +/* DMA cache settings - Outer sharable, write-back, write-allocate */ +#define AXGBE_DMA_OS_AXDOMAIN 0x2 +#define AXGBE_DMA_OS_ARCACHE 0xb +#define AXGBE_DMA_OS_AWCACHE 0xf + +/* DMA cache settings - System, no caches used */ +#define AXGBE_DMA_SYS_AXDOMAIN 0x3 +#define AXGBE_DMA_SYS_ARCACHE 0x0 +#define AXGBE_DMA_SYS_AWCACHE 0x0 + +/* DMA channel interrupt modes */ +#define AXGBE_IRQ_MODE_EDGE 0 +#define AXGBE_IRQ_MODE_LEVEL 1 + +#define AXGBE_DMA_INTERRUPT_MASK 0x31c7 + +#define AXGMAC_MIN_PACKET 60 +#define AXGMAC_STD_PACKET_MTU 1500 +#define AXGMAC_MAX_STD_PACKET 1518 +#define AXGMAC_JUMBO_PACKET_MTU 9000 +#define AXGMAC_MAX_JUMBO_PACKET 9018 +/* Inter-frame gap + preamble */ +#define AXGMAC_ETH_PREAMBLE (12 + 8) + +#define AXGMAC_PFC_DATA_LEN 46 +#define AXGMAC_PFC_DELAYS 14000 + +/* PCI BAR mapping */ +#define AXGBE_AXGMAC_BAR 0 +#define AXGBE_XPCS_BAR 1 +#define AXGBE_MAC_PROP_OFFSET 0x1d000 +#define AXGBE_I2C_CTRL_OFFSET 0x1e000 + +/* PCI clock frequencies */ +#define AXGBE_V2_DMA_CLOCK_FREQ 500000000 +#define AXGBE_V2_PTP_CLOCK_FREQ 125000000 + +#define AXGMAC_FIFO_MIN_ALLOC 2048 +#define AXGMAC_FIFO_UNIT 256 +#define AXGMAC_FIFO_ALIGN(_x) \ + (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1)) +#define AXGMAC_FIFO_FC_OFF 2048 +#define AXGMAC_FIFO_FC_MIN 4096 + +#define AXGBE_TC_MIN_QUANTUM 10 + +/* Flow control queue count */ +#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* Flow control threshold units */ +#define AXGMAC_FLOW_CONTROL_UNIT 512 +#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \ + (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \ + ~(AXGMAC_FLOW_CONTROL_UNIT - 1)) +#define AXGMAC_FLOW_CONTROL_VALUE(_x) \ + (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2) +#define AXGMAC_FLOW_CONTROL_MAX 33280 + +/* Maximum MAC address hash table size (256 bits = 8 dword) */ +#define AXGBE_MAC_HASH_TABLE_SIZE 8 + +/* Receive Side Scaling */ +#define AXGBE_RSS_OFFLOAD ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define AXGBE_RSS_HASH_KEY_SIZE 40 +#define AXGBE_RSS_MAX_TABLE_SIZE 256 +#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define AXGBE_RSS_HASH_KEY_TYPE 1 + +/* Auto-negotiation */ +#define AXGBE_AN_MS_TIMEOUT 500 +#define AXGBE_LINK_TIMEOUT 5 + +#define AXGBE_SGMII_AN_LINK_STATUS BIT(1) +#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04 +#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08 +#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4) + +/* ECC correctable error notification window (seconds) */ +#define AXGBE_ECC_LIMIT 60 + +/* MDIO port types */ +#define AXGMAC_MAX_C22_PORT 3 + +/* Helper macro for descriptor handling + * Always use AXGBE_GET_DESC_DATA to access the descriptor data + * since the index is free-running and needs to be and-ed + * with the descriptor count value of the ring to index to + * the proper descriptor data. + */ +#define AXGBE_GET_DESC_DATA(_ring, _idx) \ + ((_ring)->rdata + \ + ((_idx) & ((_ring)->rdesc_count - 1))) + +struct axgbe_port; + +enum axgbe_state { + AXGBE_DOWN, + AXGBE_LINK_INIT, + AXGBE_LINK_ERR, + AXGBE_STOPPED, +}; + +enum axgbe_int { + AXGMAC_INT_DMA_CH_SR_TI, + AXGMAC_INT_DMA_CH_SR_TPS, + AXGMAC_INT_DMA_CH_SR_TBU, + AXGMAC_INT_DMA_CH_SR_RI, + AXGMAC_INT_DMA_CH_SR_RBU, + AXGMAC_INT_DMA_CH_SR_RPS, + AXGMAC_INT_DMA_CH_SR_TI_RI, + AXGMAC_INT_DMA_CH_SR_FBE, + AXGMAC_INT_DMA_ALL, +}; + +enum axgbe_int_state { + AXGMAC_INT_STATE_SAVE, + AXGMAC_INT_STATE_RESTORE, +}; + +enum axgbe_ecc_sec { + AXGBE_ECC_SEC_TX, + AXGBE_ECC_SEC_RX, + AXGBE_ECC_SEC_DESC, +}; + +enum axgbe_speed { + AXGBE_SPEED_1000 = 0, + AXGBE_SPEED_2500, + AXGBE_SPEED_10000, + AXGBE_SPEEDS, +}; + +enum axgbe_xpcs_access { + AXGBE_XPCS_ACCESS_V1 = 0, + AXGBE_XPCS_ACCESS_V2, +}; + +enum axgbe_an_mode { + AXGBE_AN_MODE_CL73 = 0, + AXGBE_AN_MODE_CL73_REDRV, + AXGBE_AN_MODE_CL37, + AXGBE_AN_MODE_CL37_SGMII, + AXGBE_AN_MODE_NONE, +}; + +enum axgbe_an { + AXGBE_AN_READY = 0, + AXGBE_AN_PAGE_RECEIVED, + AXGBE_AN_INCOMPAT_LINK, + AXGBE_AN_COMPLETE, + AXGBE_AN_NO_LINK, + AXGBE_AN_ERROR, +}; + +enum axgbe_rx { + AXGBE_RX_BPA = 0, + AXGBE_RX_XNP, + AXGBE_RX_COMPLETE, + AXGBE_RX_ERROR, +}; + +enum axgbe_mode { + AXGBE_MODE_KX_1000 = 0, + AXGBE_MODE_KX_2500, + AXGBE_MODE_KR, + AXGBE_MODE_X, + AXGBE_MODE_SGMII_100, + AXGBE_MODE_SGMII_1000, + AXGBE_MODE_SFI, + AXGBE_MODE_UNKNOWN, +}; + +enum axgbe_speedset { + AXGBE_SPEEDSET_1000_10000 = 0, + AXGBE_SPEEDSET_2500_10000, +}; + +enum axgbe_mdio_mode { + AXGBE_MDIO_MODE_NONE = 0, + AXGBE_MDIO_MODE_CL22, + AXGBE_MDIO_MODE_CL45, +}; + +struct axgbe_phy { + uint32_t supported; + uint32_t advertising; + uint32_t lp_advertising; + + int address; + + int autoneg; + int speed; + int duplex; + + int link; + + int pause_autoneg; + int tx_pause; + int rx_pause; +}; + +enum axgbe_i2c_cmd { + AXGBE_I2C_CMD_READ = 0, + AXGBE_I2C_CMD_WRITE, +}; + +struct axgbe_i2c_op { + enum axgbe_i2c_cmd cmd; + + unsigned int target; + + uint8_t *buf; + unsigned int len; +}; + +struct axgbe_i2c_op_state { + struct axgbe_i2c_op *op; + + unsigned int tx_len; + unsigned char *tx_buf; + + unsigned int rx_len; + unsigned char *rx_buf; + + unsigned int tx_abort_source; + + int ret; +}; + +struct axgbe_i2c { + unsigned int started; + unsigned int max_speed_mode; + unsigned int rx_fifo_size; + unsigned int tx_fifo_size; + + struct axgbe_i2c_op_state op_state; +}; + +struct axgbe_hw_if { + void (*config_flow_control)(struct axgbe_port *); + int (*config_rx_mode)(struct axgbe_port *); + + int (*init)(struct axgbe_port *); + + int (*read_mmd_regs)(struct axgbe_port *, int, int); + void (*write_mmd_regs)(struct axgbe_port *, int, int, int); + int (*set_speed)(struct axgbe_port *, int); + + int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int, + enum axgbe_mdio_mode); + int (*read_ext_mii_regs)(struct axgbe_port *, int, int); + int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t); + + /* For FLOW ctrl */ + int (*config_tx_flow_control)(struct axgbe_port *); + int (*config_rx_flow_control)(struct axgbe_port *); + + int (*exit)(struct axgbe_port *); +}; + +/* This structure represents implementation specific routines for an + * implementation of a PHY. All routines are required unless noted below. + * Optional routines: + * kr_training_pre, kr_training_post + */ +struct axgbe_phy_impl_if { + /* Perform Setup/teardown actions */ + int (*init)(struct axgbe_port *); + void (*exit)(struct axgbe_port *); + + /* Perform start/stop specific actions */ + int (*reset)(struct axgbe_port *); + int (*start)(struct axgbe_port *); + void (*stop)(struct axgbe_port *); + + /* Return the link status */ + int (*link_status)(struct axgbe_port *, int *); + + /* Indicate if a particular speed is valid */ + int (*valid_speed)(struct axgbe_port *, int); + + /* Check if the specified mode can/should be used */ + bool (*use_mode)(struct axgbe_port *, enum axgbe_mode); + /* Switch the PHY into various modes */ + void (*set_mode)(struct axgbe_port *, enum axgbe_mode); + /* Retrieve mode needed for a specific speed */ + enum axgbe_mode (*get_mode)(struct axgbe_port *, int); + /* Retrieve new/next mode when trying to auto-negotiate */ + enum axgbe_mode (*switch_mode)(struct axgbe_port *); + /* Retrieve current mode */ + enum axgbe_mode (*cur_mode)(struct axgbe_port *); + + /* Retrieve current auto-negotiation mode */ + enum axgbe_an_mode (*an_mode)(struct axgbe_port *); + + /* Configure auto-negotiation settings */ + int (*an_config)(struct axgbe_port *); + + /* Set/override auto-negotiation advertisement settings */ + unsigned int (*an_advertising)(struct axgbe_port *port); + + /* Process results of auto-negotiation */ + enum axgbe_mode (*an_outcome)(struct axgbe_port *); + + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct axgbe_port *port); + void (*an_post)(struct axgbe_port *port); + + /* Pre/Post KR training enablement support */ + void (*kr_training_pre)(struct axgbe_port *); + void (*kr_training_post)(struct axgbe_port *); +}; + +struct axgbe_phy_if { + /* For PHY setup/teardown */ + int (*phy_init)(struct axgbe_port *); + void (*phy_exit)(struct axgbe_port *); + + /* For PHY support when setting device up/down */ + int (*phy_reset)(struct axgbe_port *); + int (*phy_start)(struct axgbe_port *); + void (*phy_stop)(struct axgbe_port *); + + /* For PHY support while device is up */ + void (*phy_status)(struct axgbe_port *); + int (*phy_config_aneg)(struct axgbe_port *); + + /* For PHY settings validation */ + int (*phy_valid_speed)(struct axgbe_port *, int); + /* For single interrupt support */ + void (*an_isr)(struct axgbe_port *); + /* PHY implementation specific services */ + struct axgbe_phy_impl_if phy_impl; +}; + +struct axgbe_i2c_if { + /* For initial I2C setup */ + int (*i2c_init)(struct axgbe_port *); + + /* For I2C support when setting device up/down */ + int (*i2c_start)(struct axgbe_port *); + void (*i2c_stop)(struct axgbe_port *); + + /* For performing I2C operations */ + int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *); +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct axgbe_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int gmii; /* 1000 Mbps support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ +}; + +struct axgbe_version_data { + void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *); + enum axgbe_xpcs_access xpcs_access; + unsigned int mmc_64bit; + unsigned int tx_max_fifo_size; + unsigned int rx_max_fifo_size; + unsigned int tx_tstamp_workaround; + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int an_cdr_workaround; +}; + +struct axgbe_mmc_stats { + /* Tx Stats */ + uint64_t txoctetcount_gb; + uint64_t txframecount_gb; + uint64_t txbroadcastframes_g; + uint64_t txmulticastframes_g; + uint64_t tx64octets_gb; + uint64_t tx65to127octets_gb; + uint64_t tx128to255octets_gb; + uint64_t tx256to511octets_gb; + uint64_t tx512to1023octets_gb; + uint64_t tx1024tomaxoctets_gb; + uint64_t txunicastframes_gb; + uint64_t txmulticastframes_gb; + uint64_t txbroadcastframes_gb; + uint64_t txunderflowerror; + uint64_t txoctetcount_g; + uint64_t txframecount_g; + uint64_t txpauseframes; + uint64_t txvlanframes_g; + + /* Rx Stats */ + uint64_t rxframecount_gb; + uint64_t rxoctetcount_gb; + uint64_t rxoctetcount_g; + uint64_t rxbroadcastframes_g; + uint64_t rxmulticastframes_g; + uint64_t rxcrcerror; + uint64_t rxrunterror; + uint64_t rxjabbererror; + uint64_t rxundersize_g; + uint64_t rxoversize_g; + uint64_t rx64octets_gb; + uint64_t rx65to127octets_gb; + uint64_t rx128to255octets_gb; + uint64_t rx256to511octets_gb; + uint64_t rx512to1023octets_gb; + uint64_t rx1024tomaxoctets_gb; + uint64_t rxunicastframes_g; + uint64_t rxlengtherror; + uint64_t rxoutofrangetype; + uint64_t rxpauseframes; + uint64_t rxfifooverflow; + uint64_t rxvlanframes_gb; + uint64_t rxwatchdogerror; +}; + +/* Flow control parameters */ +struct xgbe_fc_info { + uint32_t high_water[AXGBE_PRIORITY_QUEUES]; + uint32_t low_water[AXGBE_PRIORITY_QUEUES]; + uint16_t pause_time[AXGBE_PRIORITY_QUEUES]; + uint16_t send_xon; + enum rte_eth_fc_mode mode; + uint8_t autoneg; +}; + +/* + * Structure to store private data for each port. + */ +struct axgbe_port { + /* Ethdev where port belongs*/ + struct rte_eth_dev *eth_dev; + /* Pci dev info */ + const struct rte_pci_device *pci_dev; + /* Version related data */ + struct axgbe_version_data *vdata; + + /* AXGMAC/XPCS related mmio registers */ + void *xgmac_regs; /* AXGMAC CSRs */ + void *xpcs_regs; /* XPCS MMD registers */ + void *xprop_regs; /* AXGBE property registers */ + void *xi2c_regs; /* AXGBE I2C CSRs */ + + bool cdr_track_early; + /* XPCS indirect addressing lock */ + unsigned int xpcs_window_def_reg; + unsigned int xpcs_window_sel_reg; + unsigned int xpcs_window; + unsigned int xpcs_window_size; + unsigned int xpcs_window_mask; + + /* Flags representing axgbe_state */ + unsigned long dev_state; + + struct axgbe_hw_if hw_if; + struct axgbe_phy_if phy_if; + struct axgbe_i2c_if i2c_if; + + /* AXI DMA settings */ + unsigned int coherent; + unsigned int axdomain; + unsigned int arcache; + unsigned int awcache; + + unsigned int tx_max_channel_count; + unsigned int rx_max_channel_count; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_ring_count; + unsigned int rx_desc_count; + + unsigned int tx_max_q_count; + unsigned int rx_max_q_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; + unsigned int tx_max_fifo_size; + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + unsigned int rx_max_fifo_size; + unsigned int rx_buf_size; + + /* Device clocks */ + unsigned long sysclk_rate; + unsigned long ptpclk_rate; + + /* Keeps track of power mode */ + unsigned int power_down; + + /* Current PHY settings */ + int phy_link; + int phy_speed; + + pthread_mutex_t xpcs_mutex; + pthread_mutex_t i2c_mutex; + pthread_mutex_t an_mutex; + pthread_mutex_t phy_mutex; + + /* Flow control settings */ + unsigned int pause_autoneg; + unsigned int tx_pause; + unsigned int rx_pause; + unsigned int rx_rfa[AXGBE_MAX_QUEUES]; + unsigned int rx_rfd[AXGBE_MAX_QUEUES]; + unsigned int fifo; + unsigned int pfc_map[AXGBE_MAX_QUEUES]; + + /* Receive Side Scaling settings */ + u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE]; + uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE]; + uint32_t rss_options; + int rss_enable; + + /* Hardware features of the device */ + struct axgbe_hw_features hw_feat; + + struct rte_ether_addr mac_addr; + + /* Software Tx/Rx structure pointers*/ + void **rx_queues; + void **tx_queues; + + /* MDIO/PHY related settings */ + unsigned int phy_started; + void *phy_data; + struct axgbe_phy phy; + int mdio_mmd; + unsigned long link_check; + volatile int mdio_completion; + + unsigned int kr_redrv; + + /* Auto-negotiation atate machine support */ + unsigned int an_int; + unsigned int an_status; + enum axgbe_an an_result; + enum axgbe_an an_state; + enum axgbe_rx kr_state; + enum axgbe_rx kx_state; + unsigned int an_supported; + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; + enum axgbe_an_mode an_mode; + + /* I2C support */ + struct axgbe_i2c i2c; + volatile int i2c_complete; + + /* CRC stripping by H/w for Rx packet*/ + int crc_strip_enable; + /* csum enable to hardware */ + uint32_t rx_csum_enable; + + struct axgbe_mmc_stats mmc_stats; + struct xgbe_fc_info fc; + + /* Hash filtering */ + unsigned int hash_table_shift; + unsigned int hash_table_count; + unsigned int uc_hash_mac_addr; + unsigned int uc_hash_table[AXGBE_MAC_HASH_TABLE_SIZE]; +}; + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if); +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if); +void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, + uint32_t index); +void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add); + +#endif /* RTE_ETH_AXGBE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c new file mode 100644 index 000000000..ab3738a12 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_i2c.c @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" + +#define AXGBE_ABORT_COUNT 500 +#define AXGBE_DISABLE_COUNT 1000 + +#define AXGBE_STD_SPEED 1 + +#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX) +#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX) +#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX) +#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX) +#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \ + AXGBE_INTR_TX_EMPTY | \ + AXGBE_INTR_TX_ABRT | \ + AXGBE_INTR_STOP_DET) + +#define AXGBE_I2C_READ BIT(8) +#define AXGBE_I2C_STOP BIT(9) + +static int axgbe_i2c_abort(struct axgbe_port *pdata) +{ + unsigned int wait = AXGBE_ABORT_COUNT; + + /* Must be enabled to recognize the abort request */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1); + + /* Issue the abort */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1); + + while (wait--) { + if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT)) + return 0; + rte_delay_us(500); + } + + return -EBUSY; +} + +static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable) +{ + unsigned int wait = AXGBE_DISABLE_COUNT; + unsigned int mode = enable ? 1 : 0; + + while (wait--) { + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode); + if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode) + return 0; + + rte_delay_us(100); + } + + return -EBUSY; +} + +static int axgbe_i2c_disable(struct axgbe_port *pdata) +{ + unsigned int ret; + + ret = axgbe_i2c_set_enable(pdata, false); + if (ret) { + /* Disable failed, try an abort */ + ret = axgbe_i2c_abort(pdata); + if (ret) + return ret; + + /* Abort succeeded, try to disable again */ + ret = axgbe_i2c_set_enable(pdata, false); + } + + return ret; +} + +static int axgbe_i2c_enable(struct axgbe_port *pdata) +{ + return axgbe_i2c_set_enable(pdata, true); +} + +static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOREAD(pdata, IC_CLR_INTR); +} + +static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, 0); +} + +static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK); +} + +static void axgbe_i2c_write(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int tx_slots; + unsigned int cmd; + + /* Configured to never receive Rx overflows, so fill up Tx fifo */ + tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR); + while (tx_slots && state->tx_len) { + if (state->op->cmd == AXGBE_I2C_CMD_READ) + cmd = AXGBE_I2C_READ; + else + cmd = *state->tx_buf++; + + if (state->tx_len == 1) + XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1); + + XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd); + + tx_slots--; + state->tx_len--; + } + + /* No more Tx operations, so ignore TX_EMPTY and return */ + if (!state->tx_len) + XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0); +} + +static void axgbe_i2c_read(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int rx_slots; + + /* Anything to be read? */ + if (state->op->cmd != AXGBE_I2C_CMD_READ) + return; + + rx_slots = XI2C_IOREAD(pdata, IC_RXFLR); + while (rx_slots && state->rx_len) { + *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD); + state->rx_len--; + rx_slots--; + } +} + +static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata, + unsigned int isr) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + + if (isr & AXGBE_INTR_TX_ABRT) { + state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE); + XI2C_IOREAD(pdata, IC_CLR_TX_ABRT); + } + + if (isr & AXGBE_INTR_STOP_DET) + XI2C_IOREAD(pdata, IC_CLR_STOP_DET); +} + +static int axgbe_i2c_isr(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int isr; + + isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT); + + PMD_DRV_LOG(DEBUG, "I2C interrupt received: status=%#010x\n", isr); + + axgbe_i2c_clear_isr_interrupts(pdata, isr); + + if (isr & AXGBE_INTR_TX_ABRT) { + PMD_DRV_LOG(DEBUG, + "I2C TX_ABRT received (%#010x) for target %#04x\n", + state->tx_abort_source, state->op->target); + + axgbe_i2c_disable_interrupts(pdata); + + state->ret = -EIO; + goto out; + } + + /* Check for data in the Rx fifo */ + axgbe_i2c_read(pdata); + + /* Fill up the Tx fifo next */ + axgbe_i2c_write(pdata); + +out: + /* Complete on an error or STOP condition */ + if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)) + return 1; + + return 0; +} + +static void axgbe_i2c_set_mode(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_CON); + XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1); + XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1); + XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1); + XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED); + XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1); + XI2C_IOWRITE(pdata, IC_CON, reg); +} + +static void axgbe_i2c_get_features(struct axgbe_port *pdata) +{ + struct axgbe_i2c *i2c = &pdata->i2c; + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1); + i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + MAX_SPEED_MODE); + i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + RX_BUFFER_DEPTH); + i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + TX_BUFFER_DEPTH); +} + +static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr) +{ + XI2C_IOWRITE(pdata, IC_TAR, addr); +} + +static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + int ret; + uint64_t timeout; + + pthread_mutex_lock(&pdata->i2c_mutex); + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_set_target(pdata, op->target); + + memset(state, 0, sizeof(*state)); + state->op = op; + state->tx_len = op->len; + state->tx_buf = (unsigned char *)op->buf; + state->rx_len = op->len; + state->rx_buf = (unsigned char *)op->buf; + + axgbe_i2c_clear_all_interrupts(pdata); + ret = axgbe_i2c_enable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to enable i2c master\n"); + return ret; + } + + /* Enabling the interrupts will cause the TX FIFO empty interrupt to + * fire and begin to process the command via the ISR. + */ + axgbe_i2c_enable_interrupts(pdata); + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) { + if (axgbe_i2c_isr(pdata)) + goto success; + } + } + + PMD_DRV_LOG(ERR, "i2c operation timed out\n"); + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + ret = -ETIMEDOUT; + goto unlock; + +success: + ret = state->ret; + if (ret) { + if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK) + ret = -ENOTCONN; + else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST) + ret = -EAGAIN; + } + +unlock: + pthread_mutex_unlock(&pdata->i2c_mutex); + return ret; +} + +static void axgbe_i2c_stop(struct axgbe_port *pdata) +{ + if (!pdata->i2c.started) + return; + + PMD_DRV_LOG(DEBUG, "stopping I2C\n"); + + pdata->i2c.started = 0; + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + axgbe_i2c_clear_all_interrupts(pdata); +} + +static int axgbe_i2c_start(struct axgbe_port *pdata) +{ + if (pdata->i2c.started) + return 0; + + PMD_DRV_LOG(DEBUG, "starting I2C\n"); + + pdata->i2c.started = 1; + + return 0; +} + +static int axgbe_i2c_init(struct axgbe_port *pdata) +{ + int ret; + + axgbe_i2c_disable_interrupts(pdata); + + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_get_features(pdata); + + axgbe_i2c_set_mode(pdata); + + axgbe_i2c_clear_all_interrupts(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if) +{ + i2c_if->i2c_init = axgbe_i2c_init; + i2c_if->i2c_start = axgbe_i2c_start; + i2c_if->i2c_stop = axgbe_i2c_stop; + i2c_if->i2c_xfer = axgbe_i2c_xfer; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h new file mode 100644 index 000000000..d14870171 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_logs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _AXGBE_LOGS_H_ +#define _AXGBE_LOGS_H_ + +#include + +extern int axgbe_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) + +#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +extern int axgbe_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#endif /* _AXGBE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c new file mode 100644 index 000000000..0f226c3f2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_mdio.c @@ -0,0 +1,1285 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); +} + +static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg &= ~AXGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); +} + +static void axgbe_an37_enable_interrupts(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg |= AXGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg |= AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); +} + +static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); +} + +static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); +} + +static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + AXGBE_AN_CL73_INT_MASK); +} + +static void axgbe_an_enable_interrupts(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_enable_interrupts(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + axgbe_an37_enable_interrupts(pdata); + break; + default: + break; + } +} + +static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata) +{ + axgbe_an73_clear_interrupts(pdata); + axgbe_an37_clear_interrupts(pdata); +} + +static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg |= AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg &= ~AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_kr_mode(struct axgbe_port *pdata) +{ + /* Enable KR training */ + axgbe_an73_enable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR); +} + +static void axgbe_kx_2500_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 2.5G speed */ + pdata->hw_if.set_speed(pdata, SPEED_2500); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500); +} + +static void axgbe_kx_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000); +} + +static void axgbe_sfi_mode(struct axgbe_port *pdata) +{ + /* If a KR re-driver is present, change to KR mode instead */ + if (pdata->kr_redrv) + return axgbe_kr_mode(pdata); + + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI); +} + +static void axgbe_x_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X); +} + +static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000); +} + +static void axgbe_sgmii_100_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100); +} + +static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.cur_mode(pdata); +} + +static bool axgbe_in_kr_mode(struct axgbe_port *pdata) +{ + return axgbe_cur_mode(pdata) == AXGBE_MODE_KR; +} + +static void axgbe_change_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + axgbe_kx_1000_mode(pdata); + break; + case AXGBE_MODE_KX_2500: + axgbe_kx_2500_mode(pdata); + break; + case AXGBE_MODE_KR: + axgbe_kr_mode(pdata); + break; + case AXGBE_MODE_SGMII_100: + axgbe_sgmii_100_mode(pdata); + break; + case AXGBE_MODE_SGMII_1000: + axgbe_sgmii_1000_mode(pdata); + break; + case AXGBE_MODE_X: + axgbe_x_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_sfi_mode(pdata); + break; + case AXGBE_MODE_UNKNOWN: + break; + default: + PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode); + } +} + +static void axgbe_switch_mode(struct axgbe_port *pdata) +{ + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); +} + +static void axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + if (mode == axgbe_cur_mode(pdata)) + return; + + axgbe_change_mode(pdata, mode); +} + +static bool axgbe_use_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + return pdata->phy_if.phy_impl.use_mode(pdata, mode); +} + +static void axgbe_an37_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); + reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; + + if (enable) + reg |= MDIO_VEND2_CTRL1_AN_ENABLE; + + if (restart) + reg |= MDIO_VEND2_CTRL1_AN_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); +} + +static void axgbe_an37_restart(struct axgbe_port *pdata) +{ + axgbe_an37_enable_interrupts(pdata); + axgbe_an37_set(pdata, true, true); +} + +static void axgbe_an37_disable(struct axgbe_port *pdata) +{ + axgbe_an37_set(pdata, false, false); + axgbe_an37_disable_interrupts(pdata); +} + +static void axgbe_an73_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + reg &= ~MDIO_AN_CTRL1_ENABLE; + + if (enable) + reg |= MDIO_AN_CTRL1_ENABLE; + + if (restart) + reg |= MDIO_AN_CTRL1_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); +} + +static void axgbe_an73_restart(struct axgbe_port *pdata) +{ + axgbe_an73_enable_interrupts(pdata); + axgbe_an73_set(pdata, true, true); + + PMD_DRV_LOG(DEBUG, "CL73 AN enabled/restarted\n"); +} + +static void axgbe_an73_disable(struct axgbe_port *pdata) +{ + axgbe_an73_set(pdata, false, false); + axgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + + PMD_DRV_LOG(DEBUG, "CL73 AN disabled\n"); +} + +static void axgbe_an_restart(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_restart(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + axgbe_an37_restart(pdata); + break; + default: + break; + } +} + +static void axgbe_an_disable(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_disable(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + axgbe_an37_disable(pdata); + break; + default: + break; + } +} + +static void axgbe_an_disable_all(struct axgbe_port *pdata) +{ + axgbe_an73_disable(pdata); + axgbe_an37_disable(pdata); +} + +static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg, reg; + + *state = AXGBE_RX_COMPLETE; + + /* If we're not in KR mode then we're done */ + if (!axgbe_in_kr_mode(pdata)) + return AXGBE_AN_PAGE_RECEIVED; + + /* Enable/Disable FEC */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); + reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); + if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) + reg |= pdata->fec_ability; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); + + /* Start KR training */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + if (reg & AXGBE_KR_TRAINING_ENABLE) { + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); + + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, + reg); + + PMD_DRV_LOG(DEBUG, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); + } + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + u16 msg; + + *state = AXGBE_RX_XNP; + + msg = AXGBE_XNP_MCF_NULL_MESSAGE; + msg |= AXGBE_XNP_MP_FORMATTED; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int link_support; + unsigned int reg, ad_reg, lp_reg; + + /* Read Base Ability register 2 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + + /* Check for a supported mode, otherwise restart in a different one */ + link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20; + if (!(reg & link_support)) + return AXGBE_AN_INCOMPAT_LINK; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata) +{ + enum axgbe_rx *state; + unsigned long an_timeout; + enum axgbe_an ret; + unsigned long ticks; + + if (!pdata->an_start) { + pdata->an_start = rte_get_timer_cycles(); + } else { + an_timeout = pdata->an_start + + msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, an_timeout)) { + /* Auto-negotiation timed out, reset state */ + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + pdata->an_start = rte_get_timer_cycles(); + + PMD_DRV_LOG(NOTICE, + "CL73 AN timed out, resetting state\n"); + } + } + + state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state + : &pdata->kx_state; + + switch (*state) { + case AXGBE_RX_BPA: + ret = axgbe_an73_rx_bpa(pdata, state); + break; + case AXGBE_RX_XNP: + ret = axgbe_an73_rx_xnp(pdata, state); + break; + default: + ret = AXGBE_AN_ERROR; + } + + return ret; +} + +static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) +{ + /* Be sure we aren't looping trying to negotiate */ + if (axgbe_in_kr_mode(pdata)) { + pdata->kr_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && + !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kx_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } else { + pdata->kx_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kr_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } + + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); + axgbe_an_restart(pdata); + + return AXGBE_AN_INCOMPAT_LINK; +} + +static const char *axgbe_state_as_string(enum axgbe_an state) +{ + switch (state) { + case AXGBE_AN_READY: + return "Ready"; + case AXGBE_AN_PAGE_RECEIVED: + return "Page-Received"; + case AXGBE_AN_INCOMPAT_LINK: + return "Incompatible-Link"; + case AXGBE_AN_COMPLETE: + return "Complete"; + case AXGBE_AN_NO_LINK: + return "No-Link"; + case AXGBE_AN_ERROR: + return "Error"; + default: + return "Undefined"; + } +} + +static void axgbe_an73_state_machine(struct axgbe_port *pdata) +{ + enum axgbe_an cur_state = pdata->an_state; + + if (!pdata->an_int) + return; + +next_int: + if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) { + pdata->an_state = AXGBE_AN_PAGE_RECEIVED; + pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV; + } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) { + pdata->an_state = AXGBE_AN_INCOMPAT_LINK; + pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK; + } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) { + pdata->an_state = AXGBE_AN_COMPLETE; + pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT; + } else { + pdata->an_state = AXGBE_AN_ERROR; + } + + PMD_DRV_LOG(DEBUG, "CL73 AN : %s\n", + axgbe_state_as_string(pdata->an_state)); + +again: + cur_state = pdata->an_state; + + switch (pdata->an_state) { + case AXGBE_AN_READY: + pdata->an_supported = 0; + break; + case AXGBE_AN_PAGE_RECEIVED: + pdata->an_state = axgbe_an73_page_received(pdata); + pdata->an_supported++; + break; + case AXGBE_AN_INCOMPAT_LINK: + pdata->an_supported = 0; + pdata->parallel_detect = 0; + pdata->an_state = axgbe_an73_incompat_link(pdata); + break; + case AXGBE_AN_COMPLETE: + pdata->parallel_detect = pdata->an_supported ? 0 : 1; + break; + case AXGBE_AN_NO_LINK: + break; + default: + pdata->an_state = AXGBE_AN_ERROR; + } + + if (pdata->an_state == AXGBE_AN_NO_LINK) { + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + pdata->eth_dev->data->dev_link.link_status = + ETH_LINK_DOWN; + } else if (pdata->an_state == AXGBE_AN_ERROR) { + PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n", + cur_state); + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + } + + if (pdata->an_state >= AXGBE_AN_COMPLETE) { + pdata->an_result = pdata->an_state; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + PMD_DRV_LOG(DEBUG, "CL73 AN result: %s\n", + axgbe_state_as_string(pdata->an_result)); + } + + if (cur_state != pdata->an_state) + goto again; + + if (pdata->an_int) + goto next_int; + + axgbe_an73_enable_interrupts(pdata); +} + +static void axgbe_an37_state_machine(struct axgbe_port *pdata) +{ + enum axgbe_an cur_state = pdata->an_state; + + if (!pdata->an_int) + return; + if (pdata->an_int & AXGBE_AN_CL37_INT_CMPLT) { + pdata->an_state = AXGBE_AN_COMPLETE; + pdata->an_int &= ~AXGBE_AN_CL37_INT_CMPLT; + + /* If SGMII is enabled, check the link status */ + if (pdata->an_mode == AXGBE_AN_MODE_CL37_SGMII && + !(pdata->an_status & AXGBE_SGMII_AN_LINK_STATUS)) + pdata->an_state = AXGBE_AN_NO_LINK; + } + + cur_state = pdata->an_state; + + switch (pdata->an_state) { + case AXGBE_AN_READY: + break; + case AXGBE_AN_COMPLETE: + break; + case AXGBE_AN_NO_LINK: + break; + default: + pdata->an_state = AXGBE_AN_ERROR; + break; + } + + if (pdata->an_state == AXGBE_AN_ERROR) { + PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n", + cur_state); + pdata->an_int = 0; + axgbe_an37_clear_interrupts(pdata); + } + + if (pdata->an_state >= AXGBE_AN_COMPLETE) { + pdata->an_result = pdata->an_state; + pdata->an_state = AXGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + } + + axgbe_an37_enable_interrupts(pdata); +} + +static void axgbe_an73_isr(struct axgbe_port *pdata) +{ + /* Disable AN interrupts */ + axgbe_an73_disable_interrupts(pdata); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); + axgbe_an73_clear_interrupts(pdata); + + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + pthread_mutex_lock(&pdata->an_mutex); + axgbe_an73_state_machine(pdata); + pthread_mutex_unlock(&pdata->an_mutex); + } else { + /* Enable AN interrupts */ + axgbe_an73_enable_interrupts(pdata); + } +} + +static void axgbe_an37_isr(struct axgbe_port *pdata) +{ + unsigned int reg = 0; + /* Disable AN interrupts */ + axgbe_an37_disable_interrupts(pdata); + + /* Save the interrupt(s) that fired */ + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + pdata->an_int = reg & AXGBE_AN_CL37_INT_MASK; + pdata->an_status = reg & ~AXGBE_AN_CL37_INT_MASK; + axgbe_an37_clear_interrupts(pdata); + + if (pdata->an_int & 0x01) { + /* Clear the interrupt(s) that fired and process them */ + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); + axgbe_an37_state_machine(pdata); + } else { + /* Enable AN interrupts */ + axgbe_an37_enable_interrupts(pdata); + } +} + +static void axgbe_an_isr(struct axgbe_port *pdata) +{ + PMD_DRV_LOG(DEBUG, "AN interrupt received\n"); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_isr(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + axgbe_an37_isr(pdata); + break; + default: + break; + } +} + +static void axgbe_an_combined_isr(struct axgbe_port *pdata) +{ + axgbe_an_isr(pdata); +} + +static void axgbe_an37_init(struct axgbe_port *pdata) +{ + unsigned int advertising; + unsigned int reg = 0; + + advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); + if (advertising & ADVERTISED_Pause) + reg |= 0x100; + else + reg &= ~0x100; + if (advertising & ADVERTISED_Asym_Pause) + reg |= 0x80; + else + reg &= ~0x80; + + /* Full duplex, but not half */ + reg |= AXGBE_AN_CL37_FD_MASK; + reg &= ~AXGBE_AN_CL37_HD_MASK; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg); + + /* Set up the Control register */ + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg &= ~AXGBE_AN_CL37_TX_CONFIG_MASK; + reg &= ~AXGBE_AN_CL37_PCS_MODE_MASK; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL37: + reg |= AXGBE_AN_CL37_PCS_MODE_BASEX; + break; + case AXGBE_AN_MODE_CL37_SGMII: + reg |= AXGBE_AN_CL37_PCS_MODE_SGMII; + break; + default: + break; + } + reg |= AXGBE_AN_CL37_MII_CTRL_8BIT; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); +} + +static void axgbe_an73_init(struct axgbe_port *pdata) +{ + unsigned int advertising, reg; + + advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + + /* Set up Advertisement register 3 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + if (advertising & ADVERTISED_10000baseR_FEC) + reg |= 0xc000; + else + reg &= ~0xc000; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); + + /* Set up Advertisement register 2 next */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + if (advertising & ADVERTISED_10000baseKR_Full) + reg |= 0x80; + else + reg &= ~0x80; + + if ((advertising & ADVERTISED_1000baseKX_Full) || + (advertising & ADVERTISED_2500baseX_Full)) + reg |= 0x20; + else + reg &= ~0x20; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); + + /* Set up Advertisement register 1 last */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + if (advertising & ADVERTISED_Pause) + reg |= 0x400; + else + reg &= ~0x400; + + if (advertising & ADVERTISED_Asym_Pause) + reg |= 0x800; + else + reg &= ~0x800; + + /* We don't intend to perform XNP */ + reg &= ~AXGBE_XNP_NP_EXCHANGE; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); + + PMD_DRV_LOG(DEBUG, "CL73 AN initialized\n"); +} + +static void axgbe_an_init(struct axgbe_port *pdata) +{ + /* Set up advertisement registers based on current settings */ + pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_init(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + axgbe_an37_init(pdata); + break; + default: + break; + } +} + +static void axgbe_phy_adjust_link(struct axgbe_port *pdata) +{ + if (pdata->phy.link) { + /* Flow control support */ + pdata->pause_autoneg = pdata->phy.pause_autoneg; + + if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) { + pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; + } + + if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) { + pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; + } + + /* Speed support */ + if (pdata->phy_speed != pdata->phy.speed) + pdata->phy_speed = pdata->phy.speed; + if (pdata->phy_link != pdata->phy.link) + pdata->phy_link = pdata->phy.link; + } else if (pdata->phy_link) { + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } +} + +static int axgbe_phy_config_fixed(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + PMD_DRV_LOG(DEBUG, "fixed PHY configuration\n"); + + /* Disable auto-negotiation */ + axgbe_an_disable(pdata); + + /* Set specified mode for specified speed */ + mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); + switch (mode) { + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_KX_2500: + case AXGBE_MODE_KR: + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + case AXGBE_MODE_X: + case AXGBE_MODE_SFI: + break; + case AXGBE_MODE_UNKNOWN: + default: + return -EINVAL; + } + + /* Validate duplex mode */ + if (pdata->phy.duplex != DUPLEX_FULL) + return -EINVAL; + + axgbe_set_mode(pdata, mode); + + return 0; +} + +static int __axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state); + pdata->link_check = rte_get_timer_cycles(); + + ret = pdata->phy_if.phy_impl.an_config(pdata); + if (ret) + return ret; + + if (pdata->phy.autoneg != AUTONEG_ENABLE) { + ret = axgbe_phy_config_fixed(pdata); + if (ret || !pdata->kr_redrv) + return ret; + PMD_DRV_LOG(DEBUG, "AN redriver support\n"); + } else { + PMD_DRV_LOG(DEBUG, "AN PHY configuration\n"); + } + + /* Disable auto-negotiation interrupt */ + rte_intr_disable(&pdata->pci_dev->intr_handle); + + /* Start auto-negotiation in a supported mode */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_set_mode(pdata, AXGBE_MODE_KR); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_2500); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_set_mode(pdata, AXGBE_MODE_SFI); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_set_mode(pdata, AXGBE_MODE_X); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100); + } else { + rte_intr_enable(&pdata->pci_dev->intr_handle); + return -EINVAL; + } + + /* Disable and stop any in progress auto-negotiation */ + axgbe_an_disable_all(pdata); + + pdata->an_result = AXGBE_AN_READY; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + /* Re-enable auto-negotiation interrupt */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + axgbe_an37_enable_interrupts(pdata); + + axgbe_an_init(pdata); + axgbe_an_restart(pdata); + + return 0; +} + +static int axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + pthread_mutex_lock(&pdata->an_mutex); + + ret = __axgbe_phy_config_aneg(pdata); + if (ret) + axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state); + else + axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state); + + pthread_mutex_unlock(&pdata->an_mutex); + + return ret; +} + +static bool axgbe_phy_aneg_done(struct axgbe_port *pdata) +{ + return pdata->an_result == AXGBE_AN_COMPLETE; +} + +static void axgbe_check_link_timeout(struct axgbe_port *pdata) +{ + unsigned long link_timeout; + unsigned long ticks; + + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) { + PMD_DRV_LOG(NOTICE, "AN link timeout\n"); + axgbe_phy_config_aneg(pdata); + } +} + +static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.an_outcome(pdata); +} + +static void axgbe_phy_status_result(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + pdata->phy.lp_advertising = 0; + + if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) + mode = axgbe_cur_mode(pdata); + else + mode = axgbe_phy_status_aneg(pdata); + + switch (mode) { + case AXGBE_MODE_SGMII_100: + pdata->phy.speed = SPEED_100; + break; + case AXGBE_MODE_X: + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_SGMII_1000: + pdata->phy.speed = SPEED_1000; + break; + case AXGBE_MODE_KX_2500: + pdata->phy.speed = SPEED_2500; + break; + case AXGBE_MODE_KR: + case AXGBE_MODE_SFI: + pdata->phy.speed = SPEED_10000; + break; + case AXGBE_MODE_UNKNOWN: + default: + pdata->phy.speed = SPEED_UNKNOWN; + } + + pdata->phy.duplex = DUPLEX_FULL; + + axgbe_set_mode(pdata, mode); +} + +static int autoneg_time_out(unsigned long autoneg_start_time) +{ + unsigned long autoneg_timeout; + unsigned long ticks; + + autoneg_timeout = autoneg_start_time + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, autoneg_timeout)) + return 1; + else + return 0; +} + +static void axgbe_phy_status(struct axgbe_port *pdata) +{ + unsigned int link_aneg; + int an_restart, ret; + unsigned int reg = 0; + unsigned long autoneg_start_time; + + if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) { + pdata->phy.link = 0; + goto adjust_link; + } + + link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); + + pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); + return; + } + + if (pdata->phy.link) { + if (link_aneg && !axgbe_phy_aneg_done(pdata)) { + if (axgbe_cur_mode(pdata) == AXGBE_MODE_SGMII_1000) { + /* autoneg not complete, so re-initializing */ + /* and restarting it */ + axgbe_an_init(pdata); + axgbe_an_restart(pdata); + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, + MDIO_VEND2_AN_STAT); + autoneg_start_time = rte_get_timer_cycles(); + /* poll for autoneg to complete */ + while (!(reg & AXGBE_AN_CL37_INT_CMPLT)) { + ret = + autoneg_time_out(autoneg_start_time); + if (ret) + break; + reg = XMDIO_READ(pdata, + MDIO_MMD_VEND2, + MDIO_VEND2_AN_STAT); + if (reg & AXGBE_AN_CL37_INT_CMPLT) { + axgbe_an37_isr(pdata); + break; + } + } + } else { + axgbe_check_link_timeout(pdata); + return; + } + } + axgbe_phy_status_result(pdata); + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) + axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state); + } else { + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) { + axgbe_check_link_timeout(pdata); + + if (link_aneg) + return; + } + axgbe_phy_status_result(pdata); + } + +adjust_link: + axgbe_phy_adjust_link(pdata); +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + PMD_DRV_LOG(DEBUG, "stopping PHY\n"); + if (!pdata->phy_started) + return; + /* Indicate the PHY is down */ + pdata->phy_started = 0; + /* Disable auto-negotiation */ + axgbe_an_disable_all(pdata); + pdata->phy_if.phy_impl.stop(pdata); + pdata->phy.link = 0; + axgbe_phy_adjust_link(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + int ret; + + PMD_DRV_LOG(DEBUG, "starting PHY\n"); + + ret = pdata->phy_if.phy_impl.start(pdata); + if (ret) + return ret; + /* Set initial mode - call the mode setting routines + * directly to insure we are properly configured + */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_kr_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_kx_2500_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_kx_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_sfi_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_x_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_sgmii_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_sgmii_100_mode(pdata); + } else { + ret = -EINVAL; + goto err_stop; + } + /* Indicate the PHY is up and running */ + pdata->phy_started = 1; + axgbe_an_init(pdata); + axgbe_an_enable_interrupts(pdata); + return axgbe_phy_config_aneg(pdata); + +err_stop: + pdata->phy_if.phy_impl.stop(pdata); + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + int ret; + + ret = pdata->phy_if.phy_impl.reset(pdata); + if (ret) + return ret; + + /* Disable auto-negotiation for now */ + axgbe_an_disable_all(pdata); + + /* Clear auto-negotiation interrupts */ + axgbe_an_clear_interrupts_all(pdata); + + return 0; +} + +static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata) +{ + if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) + return SPEED_2500; + else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_100baseT_Full) + return SPEED_100; + + return SPEED_UNKNOWN; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + int ret; + + pdata->mdio_mmd = MDIO_MMD_PCS; + + /* Check for FEC support */ + pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBR_FECABLE); + pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | + MDIO_PMA_10GBR_FECABLE_ERRABLE); + + /* Setup the phy (including supported features) */ + ret = pdata->phy_if.phy_impl.init(pdata); + if (ret) + return ret; + pdata->phy.advertising = pdata->phy.supported; + + pdata->phy.address = 0; + + if (pdata->phy.advertising & ADVERTISED_Autoneg) { + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + } else { + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata); + pdata->phy.duplex = DUPLEX_FULL; + } + + pdata->phy.link = 0; + + pdata->phy.pause_autoneg = pdata->pause_autoneg; + pdata->phy.tx_pause = pdata->tx_pause; + pdata->phy.rx_pause = pdata->rx_pause; + + /* Fix up Flow Control advertising */ + pdata->phy.advertising &= ~ADVERTISED_Pause; + pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + + if (pdata->rx_pause) { + pdata->phy.advertising |= ADVERTISED_Pause; + pdata->phy.advertising |= ADVERTISED_Asym_Pause; + } + + if (pdata->tx_pause) + pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + return 0; +} + +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if) +{ + phy_if->phy_init = axgbe_phy_init; + phy_if->phy_reset = axgbe_phy_reset; + phy_if->phy_start = axgbe_phy_start; + phy_if->phy_stop = axgbe_phy_stop; + phy_if->phy_status = axgbe_phy_status; + phy_if->phy_config_aneg = axgbe_phy_config_aneg; + phy_if->an_isr = axgbe_an_combined_isr; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h new file mode 100644 index 000000000..77ee20a31 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_PHY_H__ +#define __AXGBE_PHY_H__ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. + */ +#define MII_ADDR_C45 (1 << 30) + +/* Basic mode status register. */ +#define BMSR_LSTATUS 0x0004 /* Link status */ + +/* Status register 1. */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS + +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */ +#define MII_MMD_DATA 0x0e /* MMD Access Data Register */ +#define MII_ESTATUS 0x0f /* Extended Status */ +#define MII_DCOUNTER 0x12 /* Disconnect counter */ +#define MII_FCSCOUNTER 0x13 /* False carrier counter */ +#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ +#define MII_RERRCOUNTER 0x15 /* Receive error counter */ +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 /* Reserved... */ +#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */ +#define MII_PHYADDR 0x19 /* PHY address */ +#define MII_RESV2 0x1a /* Reserved... */ +#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */ +#define MII_NCONFIG 0x1c /* Network interface config */ + +/* Basic mode control register. */ +#define BMCR_RESV 0x003f /* Unused... */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ +#define BMCR_CTST 0x0080 /* Collision test */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ +#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define BMCR_PDOWN 0x0800 /* Enable low power state */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ +#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ +#define BMCR_RESET 0x8000 /* Reset to default state */ +#define BMCR_SPEED10 0x0000 /* Select 10Mbps */ + + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment + * Physical Medium Dependent + */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ +#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ +#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. + */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ +#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */ + + + + + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + + +/* Autoneg related */ +#define ADVERTISED_Autoneg (1 << 6) +#define SUPPORTED_Autoneg (1 << 6) +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) + +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_TP (1 << 7) + +#define ADVERTISED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_FIBRE (1 << 10) + +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_Backplane (1 << 16) + +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_100baseT_Full (1 << 2) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_2500baseX_Full (1 << 15) + + +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +#endif +/* PHY */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c new file mode 100644 index 000000000..02236ec19 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_phy_impl.c @@ -0,0 +1,2315 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +#define AXGBE_PHY_PORT_SPEED_100 BIT(0) +#define AXGBE_PHY_PORT_SPEED_1000 BIT(1) +#define AXGBE_PHY_PORT_SPEED_2500 BIT(2) +#define AXGBE_PHY_PORT_SPEED_10000 BIT(3) + +#define AXGBE_MUTEX_RELEASE 0x80000000 + +#define AXGBE_SFP_DIRECT 7 + +/* I2C target addresses */ +#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50 +#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51 +#define AXGBE_SFP_PHY_ADDRESS 0x56 +#define AXGBE_GPIO_ADDRESS_PCA9555 0x20 + +/* SFP sideband signal indicators */ +#define AXGBE_GPIO_NO_TX_FAULT BIT(0) +#define AXGBE_GPIO_NO_RATE_SELECT BIT(1) +#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2) +#define AXGBE_GPIO_NO_RX_LOS BIT(3) + +/* Rate-change complete wait/retry count */ +#define AXGBE_RATECHANGE_COUNT 500 + +/* CDR delay values for KR support (in usec) */ +#define AXGBE_CDR_DELAY_INIT 10000 +#define AXGBE_CDR_DELAY_INC 10000 +#define AXGBE_CDR_DELAY_MAX 100000 + +enum axgbe_port_mode { + AXGBE_PORT_MODE_RSVD = 0, + AXGBE_PORT_MODE_BACKPLANE, + AXGBE_PORT_MODE_BACKPLANE_2500, + AXGBE_PORT_MODE_1000BASE_T, + AXGBE_PORT_MODE_1000BASE_X, + AXGBE_PORT_MODE_NBASE_T, + AXGBE_PORT_MODE_10GBASE_T, + AXGBE_PORT_MODE_10GBASE_R, + AXGBE_PORT_MODE_SFP, + AXGBE_PORT_MODE_MAX, +}; + +enum axgbe_conn_type { + AXGBE_CONN_TYPE_NONE = 0, + AXGBE_CONN_TYPE_SFP, + AXGBE_CONN_TYPE_MDIO, + AXGBE_CONN_TYPE_RSVD1, + AXGBE_CONN_TYPE_BACKPLANE, + AXGBE_CONN_TYPE_MAX, +}; + +/* SFP/SFP+ related definitions */ +enum axgbe_sfp_comm { + AXGBE_SFP_COMM_DIRECT = 0, + AXGBE_SFP_COMM_PCA9545, +}; + +enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, +}; + +enum axgbe_sfp_base { + AXGBE_SFP_BASE_UNKNOWN = 0, + AXGBE_SFP_BASE_1000_T, + AXGBE_SFP_BASE_1000_SX, + AXGBE_SFP_BASE_1000_LX, + AXGBE_SFP_BASE_1000_CX, + AXGBE_SFP_BASE_10000_SR, + AXGBE_SFP_BASE_10000_LR, + AXGBE_SFP_BASE_10000_LRM, + AXGBE_SFP_BASE_10000_ER, + AXGBE_SFP_BASE_10000_CR, +}; + +enum axgbe_sfp_speed { + AXGBE_SFP_SPEED_UNKNOWN = 0, + AXGBE_SFP_SPEED_100_1000, + AXGBE_SFP_SPEED_1000, + AXGBE_SFP_SPEED_10000, +}; + +/* SFP Serial ID Base ID values relative to an offset of 0 */ +#define AXGBE_SFP_BASE_ID 0 +#define AXGBE_SFP_ID_SFP 0x03 + +#define AXGBE_SFP_BASE_EXT_ID 1 +#define AXGBE_SFP_EXT_ID_SFP 0x04 + +#define AXGBE_SFP_BASE_10GBE_CC 3 +#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4) +#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5) +#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6) +#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7) + +#define AXGBE_SFP_BASE_1GBE_CC 6 +#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0) +#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1) +#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2) +#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3) + +#define AXGBE_SFP_BASE_CABLE 8 +#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2) +#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3) + +#define AXGBE_SFP_BASE_BR 12 +#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d +#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + +#define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +#define AXGBE_SFP_BASE_VENDOR_NAME 20 +#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_PN 40 +#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_REV 56 +#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4 + +#define AXGBE_SFP_BASE_CC 63 + +/* SFP Serial ID Extended ID values relative to an offset of 64 */ +#define AXGBE_SFP_BASE_VENDOR_SN 4 +#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16 + +#define AXGBE_SFP_EXTD_DIAG 28 +#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) + +#define AXGBE_SFP_EXTD_SFF_8472 30 + +#define AXGBE_SFP_EXTD_CC 31 + +struct axgbe_sfp_eeprom { + u8 base[64]; + u8 extd[32]; + u8 vendor[32]; +}; + +#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE" +#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06" + +struct axgbe_sfp_ascii { + union { + char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; + char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1]; + char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1]; + char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1]; + } u; +}; + +/* MDIO PHY reset types */ +enum axgbe_mdio_reset { + AXGBE_MDIO_RESET_NONE = 0, + AXGBE_MDIO_RESET_I2C_GPIO, + AXGBE_MDIO_RESET_INT_GPIO, + AXGBE_MDIO_RESET_MAX, +}; + +/* Re-driver related definitions */ +enum axgbe_phy_redrv_if { + AXGBE_PHY_REDRV_IF_MDIO = 0, + AXGBE_PHY_REDRV_IF_I2C, + AXGBE_PHY_REDRV_IF_MAX, +}; + +enum axgbe_phy_redrv_model { + AXGBE_PHY_REDRV_MODEL_4223 = 0, + AXGBE_PHY_REDRV_MODEL_4227, + AXGBE_PHY_REDRV_MODEL_MAX, +}; + +enum axgbe_phy_redrv_mode { + AXGBE_PHY_REDRV_MODE_CX = 5, + AXGBE_PHY_REDRV_MODE_SR = 9, +}; + +#define AXGBE_PHY_REDRV_MODE_REG 0x12b0 + +/* PHY related configuration information */ +struct axgbe_phy_data { + enum axgbe_port_mode port_mode; + + unsigned int port_id; + + unsigned int port_speeds; + + enum axgbe_conn_type conn_type; + + enum axgbe_mode cur_mode; + enum axgbe_mode start_mode; + + unsigned int rrc_count; + + unsigned int mdio_addr; + + unsigned int comm_owned; + + /* SFP Support */ + enum axgbe_sfp_comm sfp_comm; + unsigned int sfp_mux_address; + unsigned int sfp_mux_channel; + + unsigned int sfp_gpio_address; + unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_rx_los; + unsigned int sfp_gpio_tx_fault; + unsigned int sfp_gpio_mod_absent; + unsigned int sfp_gpio_rate_select; + + unsigned int sfp_rx_los; + unsigned int sfp_tx_fault; + unsigned int sfp_mod_absent; + unsigned int sfp_diags; + unsigned int sfp_changed; + unsigned int sfp_phy_avail; + unsigned int sfp_cable_len; + enum axgbe_sfp_base sfp_base; + enum axgbe_sfp_cable sfp_cable; + enum axgbe_sfp_speed sfp_speed; + struct axgbe_sfp_eeprom sfp_eeprom; + + /* External PHY support */ + enum axgbe_mdio_mode phydev_mode; + enum axgbe_mdio_reset mdio_reset; + unsigned int mdio_reset_addr; + unsigned int mdio_reset_gpio; + + /* Re-driver support */ + unsigned int redrv; + unsigned int redrv_if; + unsigned int redrv_addr; + unsigned int redrv_lane; + unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; +}; + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata); + +static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata, + struct axgbe_i2c_op *i2c_op) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Be sure we own the bus */ + if (!phy_data->comm_owned) + return -EIO; + + return pdata->i2c_if.i2c_xfer(pdata, i2c_op); +} + +static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg, + unsigned int val) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint16_t *redrv_val; + u8 redrv_data[5], csum; + unsigned int i, retry; + int ret; + + /* High byte of register contains read/write indicator */ + redrv_data[0] = ((reg >> 8) & 0xff) << 1; + redrv_data[1] = reg & 0xff; + redrv_val = (uint16_t *)&redrv_data[2]; + *redrv_val = rte_cpu_to_be_16(val); + + /* Calculate 1 byte checksum */ + csum = 0; + for (i = 0; i < 4; i++) { + csum += redrv_data[i]; + if (redrv_data[i] > csum) + csum++; + } + redrv_data[4] = ~csum; + + retry = 1; +again1: + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = sizeof(redrv_data); + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = 1; + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; + } + + if (redrv_data[0] != 0xff) { + PMD_DRV_LOG(ERR, "Redriver write checksum error\n"); + ret = -EIO; + } + + return ret; +} + +static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target, + void *reg, unsigned int reg_len, + void *val, unsigned int val_len) +{ + struct axgbe_i2c_op i2c_op; + int retry, ret; + + retry = 1; +again1: + /* Set the specified register to read */ + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = target; + i2c_op.len = reg_len; + i2c_op.buf = reg; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + /* Read the specfied register */ + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = target; + i2c_op.len = val_len; + i2c_op.buf = val; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; +} + +static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint8_t mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select no mux channels */ + mux_channel = 0; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + u8 mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select desired mux channel */ + mux_channel = 1 << phy_data->sfp_mux_channel; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->comm_owned = 0; + + pthread_mutex_unlock(&pdata->phy_mutex); +} + +static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + uint64_t timeout; + unsigned int mutex_id; + + /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, + * the driver needs to take the software mutex and then the hardware + * mutexes before being able to use the busses. + */ + pthread_mutex_lock(&pdata->phy_mutex); + + if (phy_data->comm_owned) + return 0; + + /* Clear the mutexes */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE); + + /* Mutex formats are the same for I2C and MDIO/GPIO */ + mutex_id = 0; + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); + + timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5); + while (time_before(rte_get_timer_cycles(), timeout)) { + /* Must be all zeroes in order to obtain the mutex */ + if (XP_IOREAD(pdata, XP_I2C_MUTEX) || + XP_IOREAD(pdata, XP_MDIO_MUTEX)) { + rte_delay_us(100); + continue; + } + + /* Obtain the mutex */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); + + phy_data->comm_owned = 1; + return 0; + } + + pthread_mutex_unlock(&pdata->phy_mutex); + + PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n"); + + return -ETIMEDOUT; +} + +static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->sfp_mod_absent) { + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising = pdata->phy.supported; + } + + pdata->phy.advertising &= ~ADVERTISED_Autoneg; + pdata->phy.advertising &= ~ADVERTISED_TP; + pdata->phy.advertising &= ~ADVERTISED_FIBRE; + pdata->phy.advertising &= ~ADVERTISED_100baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising |= ADVERTISED_Autoneg; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + default: + pdata->phy.speed = SPEED_10000; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + break; + } + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_CX: + case AXGBE_SFP_BASE_10000_CR: + pdata->phy.advertising |= ADVERTISED_TP; + break; + default: + pdata->phy.advertising |= ADVERTISED_FIBRE; + } + + switch (phy_data->sfp_speed) { + case AXGBE_SFP_SPEED_100_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_10000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + break; + default: + /* Choose the fastest supported speed */ + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + } +} + +static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) +{ + u8 *sfp_base, min, max; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; + max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; + max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + + return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && + (sfp_base[AXGBE_SFP_BASE_BR] <= max)); +} + +static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!phy_data->sfp_changed) + return; + + phy_data->sfp_phy_avail = 0; + + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return; +} + +static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + + if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME], + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + return true; + } + + return false; +} + +static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata) +{ + if (axgbe_phy_belfuse_parse_quirks(pdata)) + return true; + + return false; +} + +static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + uint8_t *sfp_base; + + sfp_base = sfp_eeprom->base; + + if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP) + return; + + if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP) + return; + + axgbe_phy_sfp_parse_quirks(pdata); + + /* Assume ACTIVE cable unless told it is PASSIVE */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; + } else { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + } + + /* Determine the type of SFP */ + if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & + AXGBE_SFP_BASE_10GBE_CC_LRM) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; + else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && + axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + phy_data->sfp_speed = AXGBE_SFP_SPEED_10000; + break; + default: + break; + } +} + +static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, + unsigned int len) +{ + uint8_t cc; + + for (cc = 0; len; buf++, len--) + cc += *buf; + + return (cc == cc_in) ? true : false; +} + +static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom sfp_eeprom; + uint8_t eeprom_addr; + int ret; + + ret = axgbe_phy_sfp_get_mux(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n"); + return ret; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + &sfp_eeprom, sizeof(sfp_eeprom)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n"); + goto put; + } + + /* Validate the contents read */ + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC], + sfp_eeprom.base, + sizeof(sfp_eeprom.base) - 1)) { + ret = -EINVAL; + goto put; + } + + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC], + sfp_eeprom.extd, + sizeof(sfp_eeprom.extd) - 1)) { + ret = -EINVAL; + goto put; + } + + /* Check for an added or changed SFP */ + if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { + phy_data->sfp_changed = 1; + memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); + + if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) { + uint8_t diag_type; + diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG]; + + if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + phy_data->sfp_diags = 1; + } + } else { + phy_data->sfp_changed = 0; + } + +put: + axgbe_phy_sfp_put_mux(pdata); + + return ret; +} + +static void axgbe_phy_sfp_signals(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int gpio_input; + u8 gpio_reg, gpio_ports[2]; + int ret; + + /* Read the input port registers */ + gpio_reg = 0; + ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, + &gpio_reg, sizeof(gpio_reg), + gpio_ports, sizeof(gpio_ports)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n"); + return; + } + + gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; + + if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) { + /* No GPIO, just assume the module is present for now */ + phy_data->sfp_mod_absent = 0; + } else { + if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) + phy_data->sfp_mod_absent = 0; + } + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) && + (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) + phy_data->sfp_rx_los = 1; + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) && + (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) + phy_data->sfp_tx_fault = 1; +} + +static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->sfp_mod_absent = 1; + phy_data->sfp_phy_avail = 0; + memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); +} + +static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data) +{ + phy_data->sfp_rx_los = 0; + phy_data->sfp_tx_fault = 0; + phy_data->sfp_mod_absent = 1; + phy_data->sfp_diags = 0; + phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN; + phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN; + phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN; +} + +static const char *axgbe_base_as_string(enum axgbe_sfp_base sfp_base) +{ + switch (sfp_base) { + case AXGBE_SFP_BASE_1000_T: + return "1G_T"; + case AXGBE_SFP_BASE_1000_SX: + return "1G_SX"; + case AXGBE_SFP_BASE_1000_LX: + return "1G_LX"; + case AXGBE_SFP_BASE_1000_CX: + return "1G_CX"; + case AXGBE_SFP_BASE_10000_SR: + return "10G_SR"; + case AXGBE_SFP_BASE_10000_LR: + return "10G_LR"; + case AXGBE_SFP_BASE_10000_LRM: + return "10G_LRM"; + case AXGBE_SFP_BASE_10000_ER: + return "10G_ER"; + case AXGBE_SFP_BASE_10000_CR: + return "10G_CR"; + default: + return "Unknown"; + } +} + +static void axgbe_phy_sfp_detect(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Reset the SFP signals and info */ + axgbe_phy_sfp_reset(phy_data); + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + /* Read the SFP signals and check for module presence */ + axgbe_phy_sfp_signals(pdata); + if (phy_data->sfp_mod_absent) { + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + ret = axgbe_phy_sfp_read_eeprom(pdata); + if (ret) { + /* Treat any error as if there isn't an SFP plugged in */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + axgbe_phy_sfp_parse_eeprom(pdata); + axgbe_phy_sfp_external_phy(pdata); + + PMD_DRV_LOG(DEBUG, "SFP Base: %s\n", + axgbe_base_as_string(phy_data->sfp_base)); + +put: + axgbe_phy_sfp_phy_settings(pdata); + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata) +{ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; +} + +static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Use external PHY to determine flow control */ + if (pdata->phy.pause_autoneg) + axgbe_phy_phydev_flowctrl(pdata); + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KR; + break; + default: + mode = AXGBE_MODE_SFI; + break; + } + } else if (ad_reg & 0x20) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KX_1000; + break; + case AXGBE_PORT_MODE_1000BASE_X: + mode = AXGBE_MODE_X; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + mode = AXGBE_MODE_SGMII_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + default: + mode = AXGBE_MODE_X; + break; + } + break; + default: + mode = AXGBE_MODE_SGMII_1000; + break; + } + } else { + mode = AXGBE_MODE_UNKNOWN; + } + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Compare Advertisement and Link Partner register 1 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + if (lp_reg & 0x400) + pdata->phy.lp_advertising |= ADVERTISED_Pause; + if (lp_reg & 0x800) + pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x400) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x800) { + if (ad_reg & 0x400) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x400) + pdata->phy.tx_pause = 1; + } + } + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) + mode = AXGBE_MODE_KR; + else if (ad_reg & 0x20) + mode = AXGBE_MODE_KX_1000; + else + mode = AXGBE_MODE_UNKNOWN; + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an37_sgmii_outcome(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + + if (pdata->phy.pause_autoneg) + axgbe_phy_phydev_flowctrl(pdata); + + switch (pdata->an_status & AXGBE_SGMII_AN_LINK_SPEED) { + case AXGBE_SGMII_AN_LINK_SPEED_100: + if (pdata->an_status & AXGBE_SGMII_AN_LINK_DUPLEX) { + pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full; + mode = AXGBE_MODE_SGMII_100; + } else { + mode = AXGBE_MODE_UNKNOWN; + } + break; + case AXGBE_SGMII_AN_LINK_SPEED_1000: + if (pdata->an_status & AXGBE_SGMII_AN_LINK_DUPLEX) { + pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + mode = AXGBE_MODE_SGMII_1000; + } else { + /* Half-duplex not supported */ + mode = AXGBE_MODE_UNKNOWN; + } + break; + default: + mode = AXGBE_MODE_UNKNOWN; + break; + } + return mode; +} + +static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + return axgbe_phy_an73_outcome(pdata); + case AXGBE_AN_MODE_CL73_REDRV: + return axgbe_phy_an73_redrv_outcome(pdata); + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + return axgbe_phy_an37_sgmii_outcome(pdata); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int advertising; + + /* Without a re-driver, just return current advertising */ + if (!phy_data->redrv) + return pdata->phy.advertising; + + /* With the KR re-driver we need to advertise a single speed */ + advertising = pdata->phy.advertising; + advertising &= ~ADVERTISED_1000baseKX_Full; + advertising &= ~ADVERTISED_10000baseKR_Full; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_10GBASE_T: + PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n"); + break; + case AXGBE_PORT_MODE_10GBASE_R: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + advertising |= ADVERTISED_1000baseKX_Full; + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + + return advertising; +} + +static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused) +{ + return 0; + /* Dummy API since there is no case to support + * external phy devices registred through kerenl apis + */ +} + +static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data) +{ + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + return AXGBE_AN_MODE_CL37; + default: + return AXGBE_AN_MODE_NONE; + } +} + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* A KR re-driver will always require CL73 AN */ + if (phy_data->redrv) + return AXGBE_AN_MODE_CL73_REDRV; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_BACKPLANE_2500: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_1000BASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_1000BASE_X: + return AXGBE_AN_MODE_CL37; + case AXGBE_PORT_MODE_NBASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_10GBASE_T: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_10GBASE_R: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_an_sfp_mode(phy_data); + default: + return AXGBE_AN_MODE_NONE; + } +} + +static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + u16 redrv_reg, redrv_val; + + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + redrv_val = (u16)mode; + + return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, + redrv_reg, redrv_val); +} + +static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int redrv_reg; + int ret; + + /* Calculate the register to write */ + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + + ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode); + + return ret; +} + +static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_phy_redrv_mode mode; + int ret; + + if (!phy_data->redrv) + return; + + mode = AXGBE_PHY_REDRV_MODE_CX; + if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) && + (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) && + (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR)) + mode = AXGBE_PHY_REDRV_MODE_SR; + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + if (phy_data->redrv_if) + axgbe_phy_set_redrv_mode_i2c(pdata, mode); + else + axgbe_phy_set_redrv_mode_mdio(pdata, mode); + + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_start_ratechange(struct axgbe_port *pdata) +{ + /* Log if a previous command did not complete */ + if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + PMD_DRV_LOG(NOTICE, "firmware mailbox not ready for command\n"); + else + return; +} + +static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata) +{ + unsigned int wait; + + /* Wait for command to complete */ + wait = AXGBE_RATECHANGE_COUNT; + while (wait--) { + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + return; + + rte_delay_us(1500); + } + PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n"); +} + +static void axgbe_phy_rrc(struct axgbe_port *pdata) +{ + unsigned int s0; + + axgbe_phy_start_ratechange(pdata); + + /* Receiver Reset Cycle */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + axgbe_phy_complete_ratechange(pdata); + + PMD_DRV_LOG(DEBUG, "receiver reset complete\n"); +} + +static void axgbe_phy_power_off(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + axgbe_phy_start_ratechange(pdata); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; + + PMD_DRV_LOG(DEBUG, "phy powered off\n"); +} + +static void axgbe_phy_sfi_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/SFI */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3); + if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) { + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + } else { + if (phy_data->sfp_cable_len <= 1) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1); + else if (phy_data->sfp_cable_len <= 3) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2); + else + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3); + } + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_SFI; + + PMD_DRV_LOG(DEBUG, "10GbE SFI mode set\n"); +} + +static void axgbe_phy_kr_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/KR */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_KR; + + PMD_DRV_LOG(DEBUG, "10GbE KR mode set\n"); +} + +static void axgbe_phy_kx_2500_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + /* 2.5G/KX */ + axgbe_phy_start_ratechange(pdata); + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + phy_data->cur_mode = AXGBE_MODE_KX_2500; +} + +static void axgbe_phy_sgmii_1000_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + /* 1G/SGMII */ + axgbe_phy_start_ratechange(pdata); + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2); + + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + phy_data->cur_mode = AXGBE_MODE_SGMII_1000; +} + +static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + return phy_data->cur_mode; +} + +static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* No switching if not 10GBase-T */ + if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T) + return axgbe_phy_cur_mode(pdata); + + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_SGMII_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata + __rte_unused) +{ + return AXGBE_MODE_KX_2500; +} + +static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata) +{ + /* If we are in KR switch to KX, and vice-versa */ + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_KX_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_KX_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_switch_bp_mode(pdata); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_switch_bp_2500_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_switch_baset_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + case AXGBE_PORT_MODE_SFP: + /* No switching, so just return current mode */ + return axgbe_phy_cur_mode(pdata); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_X; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + return AXGBE_MODE_SGMII_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return AXGBE_MODE_SGMII_1000; + else + return AXGBE_MODE_X; + case SPEED_10000: + case SPEED_UNKNOWN: + return AXGBE_MODE_SFI; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed) +{ + switch (speed) { + case SPEED_2500: + return AXGBE_MODE_KX_2500; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_mode(int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_KX_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata, + int speed) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_get_bp_mode(speed); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_get_bp_2500_mode(speed); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_get_baset_mode(phy_data, speed); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_get_basex_mode(phy_data, speed); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_get_sfp_mode(phy_data, speed); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KR: + axgbe_phy_kr_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_phy_sfi_mode(pdata); + break; + case AXGBE_MODE_KX_2500: + axgbe_phy_kx_2500_mode(pdata); + break; + case AXGBE_MODE_SGMII_1000: + axgbe_phy_sgmii_1000_mode(pdata); + break; + default: + break; + } +} + +static bool axgbe_phy_check_mode(struct axgbe_port *pdata, + enum axgbe_mode mode, u32 advert) +{ + if (pdata->phy.autoneg == AUTONEG_ENABLE) { + if (pdata->phy.advertising & advert) + return true; + } else { + enum axgbe_mode cur_mode; + + cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed); + if (cur_mode == mode) + return true; + } + + return false; +} + +static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_X: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_SGMII_100: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (mode) { + case AXGBE_MODE_X: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SGMII_100: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SFI: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_2500: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_2500baseX_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseKX_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseKR_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_use_bp_mode(pdata, mode); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_use_bp_2500_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_use_baset_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_use_basex_mode(pdata, mode); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_use_sfp_mode(pdata, mode); + default: + return false; + } +} + +static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + *an_restart = 0; + + if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) { + /* Check SFP signals */ + axgbe_phy_sfp_detect(pdata); + + if (phy_data->sfp_changed) { + *an_restart = 1; + return 0; + } + + if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) + return 0; + } + + /* Link status is latched low, so read once to clear + * and then read again to get current state + */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg & MDIO_STAT1_LSTATUS) + return 1; + + /* No link, attempt a receiver reset cycle */ + if (phy_data->rrc_count++) { + phy_data->rrc_count = 0; + axgbe_phy_rrc(pdata); + } + + return 0; +} + +static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_3); + + phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); + + phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); + + phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RX_LOS); + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, + GPIO_TX_FAULT); + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, + GPIO_MOD_ABS); + phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RATE_SELECT); +} + +static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg, mux_addr_hi, mux_addr_lo; + + reg = XP_IOREAD(pdata, XP_PROP_4); + + mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); + if (mux_addr_lo == AXGBE_SFP_DIRECT) + return; + + phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545; + phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; + phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); +} + +static void axgbe_phy_sfp_setup(struct axgbe_port *pdata) +{ + axgbe_phy_sfp_comm_setup(pdata); + axgbe_phy_sfp_gpio_setup(pdata); +} + +static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data) +{ + if (!phy_data->redrv) + return false; + + if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX) + return true; + + switch (phy_data->redrv_model) { + case AXGBE_PHY_REDRV_MODEL_4223: + if (phy_data->redrv_lane > 3) + return true; + break; + case AXGBE_PHY_REDRV_MODEL_4227: + if (phy_data->redrv_lane > 1) + return true; + break; + default: + return true; + } + + return false; +} + +static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO) + return 0; + reg = XP_IOREAD(pdata, XP_PROP_3); + phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); + switch (phy_data->mdio_reset) { + case AXGBE_MDIO_RESET_NONE: + case AXGBE_MDIO_RESET_I2C_GPIO: + case AXGBE_MDIO_RESET_INT_GPIO: + break; + default: + PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n", + phy_data->mdio_reset); + return -EINVAL; + } + if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) { + phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_ADDR); + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_GPIO); + } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) { + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_INT_GPIO); + } + + return 0; +} + +static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_X: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + return false; + break; + case AXGBE_PORT_MODE_NBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_port_enabled(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_0); + if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) + return false; + if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) + return false; + + return true; +} + +static void axgbe_phy_cdr_track(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + rte_delay_us(phy_data->phy_cdr_delay + 400); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_OFF); + + axgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void axgbe_phy_kr_training_post(struct axgbe_port *pdata) +{ + if (!pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata) +{ + if (pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_an_post(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case AXGBE_AN_READY: + case AXGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC; + break; + } + break; + default: + break; + } +} + +static void axgbe_phy_an_pre(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Reset SFP data */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* Power off the PHY */ + axgbe_phy_power_off(pdata); + + /* Stop the I2C controller */ + pdata->i2c_if.i2c_stop(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Start the I2C controller */ + ret = pdata->i2c_if.i2c_start(pdata); + if (ret) + return ret; + + /* Start in highest supported mode */ + axgbe_phy_set_mode(pdata, phy_data->start_mode); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* After starting the I2C controller, we can check for an SFP */ + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_SFP: + axgbe_phy_sfp_detect(pdata); + break; + default: + break; + } + pdata->phy.advertising &= axgbe_phy_an_advertising(pdata); + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode cur_mode; + + /* Reset by power cycling the PHY */ + cur_mode = phy_data->cur_mode; + axgbe_phy_power_off(pdata); + /* First time reset is done with passed unknown mode*/ + axgbe_phy_set_mode(pdata, cur_mode); + return 0; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data; + unsigned int reg; + int ret; + + /* Check if enabled */ + if (!axgbe_phy_port_enabled(pdata)) { + PMD_DRV_LOG(ERR, "device is not enabled\n"); + return -ENODEV; + } + + /* Initialize the I2C controller */ + ret = pdata->i2c_if.i2c_init(pdata); + if (ret) + return ret; + + phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0); + if (!phy_data) { + PMD_DRV_LOG(ERR, "phy_data allocation failed\n"); + return -ENOMEM; + } + pdata->phy_data = phy_data; + + reg = XP_IOREAD(pdata, XP_PROP_0); + phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); + + reg = XP_IOREAD(pdata, XP_PROP_4); + phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); + + /* Validate the connection requested */ + if (axgbe_phy_conn_type_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->conn_type); + return -EINVAL; + } + + /* Validate the mode requested */ + if (axgbe_phy_port_mode_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->port_speeds); + return -EINVAL; + } + + /* Check for and validate MDIO reset support */ + ret = axgbe_phy_mdio_reset_setup(pdata); + if (ret) + return ret; + + /* Validate the re-driver information */ + if (axgbe_phy_redrv_error(phy_data)) { + PMD_DRV_LOG(ERR, "phy re-driver settings error\n"); + return -EINVAL; + } + pdata->kr_redrv = phy_data->redrv; + + /* Indicate current mode is unknown */ + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; + + /* Initialize supported features */ + pdata->phy.supported = 0; + + switch (phy_data->port_mode) { + /* Backplane support */ + case AXGBE_PORT_MODE_BACKPLANE: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + phy_data->start_mode = AXGBE_MODE_KX_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* MDIO 1GBase-T support */ + case AXGBE_PORT_MODE_1000BASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO Base-X support */ + case AXGBE_PORT_MODE_1000BASE_X: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_FIBRE; + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_X; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO NBase-T support */ + case AXGBE_PORT_MODE_NBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) { + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45; + break; + + /* 10GBase-T support */ + case AXGBE_PORT_MODE_10GBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* 10GBase-R support */ + case AXGBE_PORT_MODE_10GBASE_R: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_SFI; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* SFP support */ + case AXGBE_PORT_MODE_SFP: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_FIBRE; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SFI; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + + axgbe_phy_sfp_setup(pdata); + break; + default: + return -EINVAL; + } + + if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) && + (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return -EINVAL; + } + } + + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + AXGBE_MDIO_MODE_CL22); + if (ret) { + PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return -EINVAL; + } + } + + phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT; + return 0; +} +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if) +{ + struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; + + phy_impl->init = axgbe_phy_init; + phy_impl->reset = axgbe_phy_reset; + phy_impl->start = axgbe_phy_start; + phy_impl->stop = axgbe_phy_stop; + phy_impl->link_status = axgbe_phy_link_status; + phy_impl->use_mode = axgbe_phy_use_mode; + phy_impl->set_mode = axgbe_phy_set_mode; + phy_impl->get_mode = axgbe_phy_get_mode; + phy_impl->switch_mode = axgbe_phy_switch_mode; + phy_impl->cur_mode = axgbe_phy_cur_mode; + phy_impl->an_mode = axgbe_phy_an_mode; + phy_impl->an_config = axgbe_phy_an_config; + phy_impl->an_advertising = axgbe_phy_an_advertising; + phy_impl->an_outcome = axgbe_phy_an_outcome; + + phy_impl->an_pre = axgbe_phy_an_pre; + phy_impl->an_post = axgbe_phy_an_post; + + phy_impl->kr_training_pre = axgbe_phy_kr_training_pre; + phy_impl->kr_training_post = axgbe_phy_kr_training_post; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_regs.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_regs.h new file mode 100644 index 000000000..c7e032620 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_regs.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Advanced Micro Devices, Inc. All rights reserved. + */ +#ifndef RTE_ETH_AXGBE_REGS_H_ +#define RTE_ETH_AXGBE_REGS_H_ + +#include "axgbe_common.h" + +static const uint32_t dma_reg_tbl[] = { + DMA_MR, /* DMA Mode */ + DMA_SBMR, /* DMA Sys Bus Mode */ + DMA_ISR, /* DMA Interrupt Status */ + DMA_AXIARCR, /* DMA AXI Tx AR ACE Ctrl */ + DMA_AXIAWCR, /* DMA AXI Rx AW ACE Ctrl */ + DMA_AXIAWRCR, /* DMA AXI TxRx AWR ACE Ctrl */ + DMA_DSR0, /* DMA Debug Status0 */ + DMA_DSR1, /* DMA Debug Status1 */ + EDMA_TX_CONTROL,/* DMA Tx EDMA Ctrl */ + EDMA_RX_CONTROL,/* DMA Rx EDMA Ctrl */ +}; + +static const uint32_t dma_txch_reg_tbl[] = { + DMA_CH_CR, /* DMA Channel Ctrl */ + DMA_CH_TCR, /* DMA Tx Ctrl */ + DMA_CH_TDLR_HI, /* DMA TxDescList HAddr */ + DMA_CH_TDLR_LO, /* DMA TxDescList LAddr */ + DMA_CH_TDTR_LO, /* DMA TxDescTail LAddr */ + DMA_CH_TDRLR, /* DMA TxDescRing Length */ + DMA_CH_IER, /* DMA Interrupt Enable */ + DMA_CH_CATDR_LO,/* DMA CurrApp TxDesc LAddr */ + DMA_CH_CATBR_HI,/* DMA CurrApp TxBuf HAddr */ + DMA_CH_CATBR_LO,/* DMA CurrApp TxBuf LAddr */ + DMA_CH_SR, /* DMA Channel Status */ +}; + +static const uint32_t dma_rxch_reg_tbl[] = { + DMA_CH_RCR, /* DMA Rx Ctrl */ + DMA_CH_RDLR_HI, /* DMA RxDescList HAddr */ + DMA_CH_RDLR_LO, /* DMA RxDescList LAddr */ + DMA_CH_RDTR_LO, /* DMA RxDescTail LAddr */ + DMA_CH_RDRLR, /* DMA RxDescRing Length */ + DMA_CH_RIWT, /* DMA Rx Interrupt WatchDog Timer */ + DMA_CH_CARDR_LO,/* DMA CurrApp RxDesc LAddr */ + DMA_CH_CARBR_HI,/* DMA CurrApp RxBuf HAddr */ + DMA_CH_CARBR_LO,/* DMA CurrApp RxBuf LAddr */ + +}; + +static const uint32_t mtl_reg_tbl[] = { + MTL_OMR, /* MTL Operation Mode */ + MTL_FDCR, /* MTL FIFO Debug Ctrl */ + MTL_FDSR, /* MTL FIFO Debug Status */ + MTL_FDDR, /* MTL FIFO Debug Data */ + MTL_ISR, /* MTL Interrupt Status */ + MTL_RQDCM0R, /* MTL RxQ DMA Map0 */ + MTL_TCPM0R, /* MTL TC Prty Map0 */ + MTL_TCPM1R, /* MTL TC Prty Map1 */ +}; + +static const uint32_t mtl_txq_reg_tbl[] = { + MTL_Q_TQOMR, /* MTL TxQ Operation Mode */ + MTL_Q_TQUR, /* MTL TxQ Underflow */ + MTL_Q_TQDR, /* MTL TxQ Debug */ + MTL_Q_IER, /* MTL Q Interrupt Enable */ + MTL_Q_ISR, /* MTL Q Interrupt Status */ +}; + +static const uint32_t mtl_rxq_reg_tbl[] = { + MTL_Q_RQOMR, /* MTL RxQ Operation Mode */ + MTL_Q_RQMPOCR, /* MTL RxQ Missed Pkt OverFlow Cnt */ + MTL_Q_RQDR, /* MTL RxQ Debug */ + MTL_Q_RQFCR, /* MTL RxQ Flow Control */ +}; + +static const uint32_t mac_reg_tbl[] = { + MAC_TCR, /* MAC Tx Config */ + MAC_RCR, /* MAC Rx Config */ + MAC_PFR, /* MAC Packet Filter */ + MAC_WTR, /* MAC WatchDog Timeout */ + MAC_HTR0, /* MAC Hash Table0 */ + MAC_VLANTR, /* MAC VLAN Tag Ctrl */ + MAC_VLANHTR, /* MAC VLAN Hash Table */ + MAC_VLANIR, /* MAC VLAN Incl */ + MAC_IVLANIR, /* MAC Inner VLAN Incl */ + MAC_RETMR, /* MAC Rx Eth Type Match */ + MAC_Q0TFCR, /* MAC Q0 Tx Flow Ctrl */ + MAC_RFCR, /* MAC Rx Flow Ctrl */ + MAC_RQC0R, /* MAC RxQ Ctrl0 */ + MAC_RQC1R, /* MAC RxQ Ctrl1 */ + MAC_RQC2R, /* MAC RxQ Ctrl2 */ + MAC_RQC3R, /* MAC RxQ Ctrl3 */ + MAC_ISR, /* MAC Interrupt Status */ + MAC_IER, /* MAC Interrupt Enable */ + MAC_RTSR, /* MAC Rx Tx Status */ + MAC_PMTCSR, /* MAC PMT Ctrl Status */ + MAC_RWKPFR, /* MAC RWK Packet Filter */ + MAC_LPICSR, /* MAC LPI Ctrl Status */ + MAC_LPITCR, /* MAC LPI Timers Ctrl */ + MAC_VR, /* MAC Version */ + MAC_DR, /* MAC Debug Status */ + MAC_HWF0R, /* MAC HW Feature0 */ + MAC_HWF1R, /* MAC HW Feature1 */ + MAC_HWF2R, /* MAC HW Feature2 */ + MAC_MDIOSCAR, /* MDIO Single Cmd Addr */ + MAC_MDIOSCCDR, /* MDIO Single Cmd/Data */ + MAC_MDIOISR, /* MDIO Interrupt Status */ + MAC_MDIOIER, /* MDIO Interrupt Enable */ + MAC_MDIOCL22R, /* MDIO Clause22 Port */ + MAC_GPIOCR, /* MAC GPIO Ctrl */ + MAC_GPIOSR, /* MAC GPIO Status */ + MAC_RSSCR, /* MAC RSS Ctrl */ + MAC_RSSAR, /* MAC RSS Addr */ +}; + +/* MAC Address Register Table */ +static const uint32_t mac_addr_reg_tbl[] = { + MAC_MACAHR(0), MAC_MACALR(0), MAC_MACAHR(1), MAC_MACALR(1), + MAC_MACAHR(2), MAC_MACALR(2), MAC_MACAHR(3), MAC_MACALR(3), + MAC_MACAHR(4), MAC_MACALR(4), MAC_MACAHR(5), MAC_MACALR(5), + MAC_MACAHR(6), MAC_MACALR(6), MAC_MACAHR(7), MAC_MACALR(7), + MAC_MACAHR(8), MAC_MACALR(8), MAC_MACAHR(9), MAC_MACALR(9), + MAC_MACAHR(10), MAC_MACALR(10), MAC_MACAHR(11), MAC_MACALR(11), + MAC_MACAHR(12), MAC_MACALR(12), MAC_MACAHR(13), MAC_MACALR(13), + MAC_MACAHR(14), MAC_MACALR(14), MAC_MACAHR(15), MAC_MACALR(15), + MAC_MACAHR(16), MAC_MACALR(16), MAC_MACAHR(17), MAC_MACALR(17), + MAC_MACAHR(18), MAC_MACALR(18), MAC_MACAHR(19), MAC_MACALR(19), + MAC_MACAHR(20), MAC_MACALR(20), MAC_MACAHR(21), MAC_MACALR(21), + MAC_MACAHR(22), MAC_MACALR(22), MAC_MACAHR(23), MAC_MACALR(23), + MAC_MACAHR(24), MAC_MACALR(24), MAC_MACAHR(25), MAC_MACALR(25), + MAC_MACAHR(26), MAC_MACALR(26), MAC_MACAHR(27), MAC_MACALR(27), + MAC_MACAHR(28), MAC_MACALR(28), MAC_MACAHR(29), MAC_MACALR(29), + MAC_MACAHR(30), MAC_MACALR(30), MAC_MACAHR(31), MAC_MACALR(31), + +}; + +static const uint32_t mac_ieee1558_reg_tbl[] = { + MAC_RSSDR, /* MAC RSS Data */ + MAC_TSCR, /* MAC TimeStamp Ctrl */ + MAC_SSIR, /* MAC Sub Second Incr */ + MAC_STSR, /* MAC Sys Time Secs */ + MAC_STNR, /* MAC Sys Time NSecs */ + MAC_STSUR, /* MAC Sys Time Secs Update */ + MAC_STNUR, /* MAC Sys Time NSecs Update */ + MAC_TSAR, /* MAC TimeStamp Addend */ + MAC_TSSR, /* MAC TimeStamp Status */ + MAC_TXSNR, /* MAC TxTS Status NSecs */ + MAC_TXSSR, /* MAC TxTS Status Secs */ +}; + +static inline int +axgbe_regs_get_count(struct axgbe_port *pdata) +{ + int count = 0; + unsigned int i = 0; + + count = ARRAY_SIZE(dma_reg_tbl); + for (i = 0; i < pdata->tx_ring_count; i++) + count += ARRAY_SIZE(dma_txch_reg_tbl); + for (i = 0; i < pdata->rx_ring_count; i++) + count += ARRAY_SIZE(dma_rxch_reg_tbl); + count += ARRAY_SIZE(mtl_reg_tbl); + for (i = 0; i < pdata->tx_q_count; i++) + count += ARRAY_SIZE(mtl_txq_reg_tbl); + for (i = 0; i < pdata->rx_q_count; i++) + count += ARRAY_SIZE(mtl_rxq_reg_tbl); + count += ARRAY_SIZE(mac_reg_tbl); + count += ARRAY_SIZE(mac_addr_reg_tbl); + count += ARRAY_SIZE(mac_ieee1558_reg_tbl); + + return count; +}; + +static inline int +axgbe_regs_dump(struct axgbe_port *pdata, uint32_t *data) +{ + unsigned int i = 0, j = 0; + unsigned int base_reg, reg; + + for (i = 0; i < ARRAY_SIZE(dma_reg_tbl); i++) + *data++ = AXGMAC_IOREAD(pdata, dma_reg_tbl[i]); + + for (j = 0; j < pdata->tx_ring_count; j++) { + base_reg = DMA_CH_BASE + (j * DMA_CH_INC); + for (i = 0; i < ARRAY_SIZE(dma_txch_reg_tbl); i++) { + reg = base_reg + dma_txch_reg_tbl[i]; + *data++ = AXGMAC_IOREAD(pdata, reg); + } + } + + for (j = 0; j < pdata->rx_ring_count; j++) { + base_reg = DMA_CH_BASE + (j * DMA_CH_INC); + for (i = 0; i < ARRAY_SIZE(dma_rxch_reg_tbl); i++) { + reg = base_reg + dma_rxch_reg_tbl[i]; + *data++ = AXGMAC_IOREAD(pdata, reg); + } + } + + for (i = 0; i < ARRAY_SIZE(mtl_reg_tbl); i++) + *data++ = AXGMAC_IOREAD(pdata, mtl_reg_tbl[i]); + + for (j = 0; j < pdata->tx_q_count; j++) { + base_reg = MTL_Q_BASE + (j * MTL_Q_INC); + for (i = 0; i < ARRAY_SIZE(mtl_txq_reg_tbl); i++) { + reg = base_reg + mtl_txq_reg_tbl[i]; + *data++ = AXGMAC_IOREAD(pdata, reg); + } + } + + for (j = 0; j < pdata->rx_q_count; j++) { + base_reg = MTL_Q_BASE + (j * MTL_Q_INC); + for (i = 0; i < ARRAY_SIZE(mtl_rxq_reg_tbl); i++) { + reg = base_reg + mtl_rxq_reg_tbl[i]; + *data++ = AXGMAC_IOREAD(pdata, reg); + } + } + + for (i = 0; i < ARRAY_SIZE(mac_reg_tbl); i++) + *data++ = AXGMAC_IOREAD(pdata, mac_reg_tbl[i]); + + for (i = 0; i < ARRAY_SIZE(mac_addr_reg_tbl); i++) + *data++ = AXGMAC_IOREAD(pdata, mac_addr_reg_tbl[i]); + + for (i = 0; i < ARRAY_SIZE(mac_ieee1558_reg_tbl); i++) + *data++ = AXGMAC_IOREAD(pdata, mac_ieee1558_reg_tbl[i]); + + return 0; +}; + +#endif /* RTE_ETH_AXGBE_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c new file mode 100644 index 000000000..30c467db7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.c @@ -0,0 +1,867 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +static void +axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (rx_queue) { + sw_ring = rx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < rx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(rx_queue); + } +} + +void axgbe_dev_rx_queue_release(void *rxq) +{ + axgbe_rx_queue_release(rxq); +} + +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t size; + const struct rte_memzone *dma; + struct axgbe_rx_queue *rxq; + uint32_t rx_desc = nb_desc; + struct axgbe_port *pdata = dev->data->dev_private; + + /* + * validate Rx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(rx_desc)) || + rx_desc > pdata->rx_desc_count) + return -EINVAL; + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", + sizeof(struct axgbe_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + return -ENOMEM; + } + + rxq->cur = 0; + rxq->dirty = 0; + rxq->pdata = pdata; + rxq->mb_pool = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->nb_desc = rx_desc; + rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * rxq->queue_id)); + rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + + DMA_CH_RDTR_LO); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + /* CRC strip in AXGBE supports per port not per queue */ + pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; + rxq->free_thresh = rx_conf->rx_free_thresh ? + rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; + if (rxq->free_thresh > rxq->nb_desc) + rxq->free_thresh = rxq->nb_desc >> 3; + + /* Allocate RX ring hardware descriptors */ + size = rxq->nb_desc * sizeof(union axgbe_rx_desc); + dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, + socket_id); + if (!dma) { + PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->ring_phys_addr = (uint64_t)dma->phys_addr; + rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; + memset((void *)rxq->desc, 0, size); + /* Allocate software ring */ + size = rxq->nb_desc * sizeof(struct rte_mbuf *); + rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + dev->data->rx_queues[queue_idx] = rxq; + if (!pdata->rx_queues) + pdata->rx_queues = dev->data->rx_queues; + + return 0; +} + +static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + + while (time_before(rte_get_timer_cycles(), rx_timeout)) { + rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), rx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +void axgbe_dev_disable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Disable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + axgbe_prepare_rx_stop(pdata, i); + } + /* Disable each Rx queue */ + AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Disable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); + } +} + +void axgbe_dev_enable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + unsigned int reg_val = 0; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Enable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); + } + + reg_val = 0; + for (i = 0; i < pdata->rx_q_count; i++) + reg_val |= (0x02 << (i << 1)); + AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); + + /* Enable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); + /* Frame is forwarded after stripping CRC to application*/ + if (pdata->crc_strip_enable) { + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); + } + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); +} + +/* Rx function one to one refresh */ +uint16_t +axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + uint16_t nb_rx = 0; + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + uint64_t old_dirty = rxq->dirty; + struct rte_mbuf *mbuf, *tmbuf; + unsigned int err; + uint32_t error_status; + uint16_t idx, pidx, pkt_len; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + while (nb_rx < nb_pkts) { + if (unlikely(idx == rxq->nb_desc)) + idx = 0; + + desc = &rxq->desc[idx]; + + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) + break; + tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!tmbuf)) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" + " queue_id = %u\n", + (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id); + rte_eth_devices[ + rxq->port_id].data->rx_mbuf_alloc_failed++; + rxq->rx_mbuf_alloc_failed++; + break; + } + pidx = idx + 1; + if (unlikely(pidx == rxq->nb_desc)) + pidx = 0; + + rte_prefetch0(rxq->sw_ring[pidx]); + if ((pidx & 0x3) == 0) { + rte_prefetch0(&rxq->desc[pidx]); + rte_prefetch0(&rxq->sw_ring[pidx]); + } + + mbuf = rxq->sw_ring[idx]; + /* Check for any errors and free mbuf*/ + err = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ES); + error_status = 0; + if (unlikely(err)) { + error_status = desc->write.desc3 & AXGBE_ERR_STATUS; + if ((error_status != AXGBE_L3_CSUM_ERR) && + (error_status != AXGBE_L4_CSUM_ERR)) { + rxq->errors++; + rte_pktmbuf_free(mbuf); + goto err_set; + } + } + if (rxq->pdata->rx_csum_enable) { + mbuf->ol_flags = 0; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else if ( + unlikely(error_status == AXGBE_L4_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); + /* Get the RSS hash */ + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) + mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); + pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, + PL) - rxq->crc_len; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + mbuf->pkt_len = pkt_len; + mbuf->data_len = pkt_len; + rxq->bytes += pkt_len; + rx_pkts[nb_rx++] = mbuf; +err_set: + rxq->cur++; + rxq->sw_ring[idx++] = tmbuf; + desc->read.baddr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); + memset((void *)(&desc->read.desc2), 0, 8); + AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); + rxq->dirty++; + } + rxq->pkts += nb_rx; + if (rxq->dirty != old_dirty) { + rte_wmb(); + idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (idx * sizeof(union axgbe_rx_desc)))); + } + + return nb_rx; +} + + +uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + uint16_t nb_rx = 0; + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + + uint64_t old_dirty = rxq->dirty; + struct rte_mbuf *first_seg = NULL; + struct rte_mbuf *mbuf, *tmbuf; + unsigned int err; + uint32_t error_status; + uint16_t idx, pidx, data_len = 0, pkt_len = 0; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + while (nb_rx < nb_pkts) { + bool eop = 0; +next_desc: + if (unlikely(idx == rxq->nb_desc)) + idx = 0; + + desc = &rxq->desc[idx]; + + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) + break; + + tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!tmbuf)) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" + " queue_id = %u\n", + (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + pidx = idx + 1; + if (unlikely(pidx == rxq->nb_desc)) + pidx = 0; + + rte_prefetch0(rxq->sw_ring[pidx]); + if ((pidx & 0x3) == 0) { + rte_prefetch0(&rxq->desc[pidx]); + rte_prefetch0(&rxq->sw_ring[pidx]); + } + + mbuf = rxq->sw_ring[idx]; + /* Check for any errors and free mbuf*/ + err = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ES); + error_status = 0; + if (unlikely(err)) { + error_status = desc->write.desc3 & AXGBE_ERR_STATUS; + if ((error_status != AXGBE_L3_CSUM_ERR) + && (error_status != AXGBE_L4_CSUM_ERR)) { + rxq->errors++; + rte_pktmbuf_free(mbuf); + goto err_set; + } + } + rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); + + if (!AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, LD)) { + eop = 0; + pkt_len = rxq->buf_size; + data_len = pkt_len; + } else { + eop = 1; + pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, PL); + data_len = pkt_len - rxq->crc_len; + } + + if (first_seg != NULL) { + if (rte_pktmbuf_chain(first_seg, mbuf) != 0) + rte_mempool_put(rxq->mb_pool, + first_seg); + } else { + first_seg = mbuf; + } + + /* Get the RSS hash */ + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) + mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); + + /* Mbuf populate */ + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->data_len = data_len; + +err_set: + rxq->cur++; + rxq->sw_ring[idx++] = tmbuf; + desc->read.baddr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); + memset((void *)(&desc->read.desc2), 0, 8); + AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); + rxq->dirty++; + + if (!eop) { + rte_pktmbuf_free(mbuf); + goto next_desc; + } + + first_seg->pkt_len = pkt_len; + rxq->bytes += pkt_len; + mbuf->next = NULL; + + first_seg->port = rxq->port_id; + if (rxq->pdata->rx_csum_enable) { + mbuf->ol_flags = 0; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else if (unlikely(error_status + == AXGBE_L4_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + + rx_pkts[nb_rx++] = first_seg; + + /* Setup receipt context for a new packet.*/ + first_seg = NULL; + } + + /* Save receive context.*/ + rxq->pkts += nb_rx; + + if (rxq->dirty != old_dirty) { + rte_wmb(); + idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (idx * sizeof(union axgbe_rx_desc)))); + } + return nb_rx; +} + +/* Tx Apis */ +static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (tx_queue) { + sw_ring = tx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < tx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(tx_queue); + } +} + +void axgbe_dev_tx_queue_release(void *txq) +{ + axgbe_tx_queue_release(txq); +} + +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t tx_desc; + struct axgbe_port *pdata; + struct axgbe_tx_queue *txq; + unsigned int tsize; + const struct rte_memzone *tz; + + tx_desc = nb_desc; + pdata = dev->data->dev_private; + + /* + * validate tx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(tx_desc)) || + tx_desc > pdata->tx_desc_count || + tx_desc < AXGBE_MIN_RING_DESC) + return -EINVAL; + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), + RTE_CACHE_LINE_SIZE); + if (!txq) + return -ENOMEM; + txq->pdata = pdata; + + txq->nb_desc = tx_desc; + txq->free_thresh = tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; + if (txq->free_thresh > txq->nb_desc) + txq->free_thresh = (txq->nb_desc >> 1); + txq->free_batch_cnt = txq->free_thresh; + + /* In vector_tx path threshold should be multiple of queue_size*/ + if (txq->nb_desc % txq->free_thresh != 0) + txq->vector_disable = 1; + + if (tx_conf->offloads != 0) + txq->vector_disable = 1; + + /* Allocate TX ring hardware descriptors */ + tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + tsize, AXGBE_DESC_ALIGN, socket_id); + if (!tz) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + memset(tz->addr, 0, tsize); + txq->ring_phys_addr = (uint64_t)tz->phys_addr; + txq->desc = tz->addr; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * txq->queue_id)); + txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + + DMA_CH_TDTR_LO); + txq->cur = 0; + txq->dirty = 0; + txq->nb_desc_free = txq->nb_desc; + /* Allocate software ring */ + tsize = txq->nb_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, + RTE_CACHE_LINE_SIZE); + if (!txq->sw_ring) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + dev->data->tx_queues[queue_idx] = txq; + if (!pdata->tx_queues) + pdata->tx_queues = dev->data->tx_queues; + + if (txq->vector_disable) + dev->tx_pkt_burst = &axgbe_xmit_pkts; + else +#ifdef RTE_ARCH_X86 + dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; +#else + dev->tx_pkt_burst = &axgbe_xmit_pkts; +#endif + + return 0; +} + +static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_status; + unsigned long tx_timeout; + + /* The Tx engine cannot be stopped if it is actively processing + * packets. Wait for the Tx queue to empty the Tx fifo. Don't + * wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); + if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && + (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx queue %u to empty\n", + queue); +} + +static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) + return axgbe_txq_prepare_tx_stop(pdata, queue); + + /* Calculate the status register to read and the position within */ + if (queue < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; + } else { + tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx DMA channel %u to stop\n", + queue); +} + +void axgbe_dev_disable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Prepare for stopping DMA channel */ + for (i = 0; i < pdata->tx_q_count; i++) { + txq = dev->data->tx_queues[i]; + axgbe_prepare_tx_stop(pdata, i); + } + /* Disable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); + /* Disable each Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + 0); + /* Disable each Tx DMA channel */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); + } +} + +void axgbe_dev_enable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Enable Tx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); + } + /* Enable Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + MTL_Q_ENABLED); + /* Enable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); +} + +/* Free Tx conformed mbufs */ +static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); + while (txq->cur != txq->dirty) { + if (unlikely(idx == txq->nb_desc)) + idx = 0; + desc = &txq->desc[idx]; + /* Check for ownership */ + if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) + return; + memset((void *)&desc->desc2, 0, 8); + /* Free mbuf */ + rte_pktmbuf_free(txq->sw_ring[idx]); + txq->sw_ring[idx++] = NULL; + txq->dirty++; + } +} + +/* Tx Descriptor formation + * Considering each mbuf requires one desc + * mbuf is linear + */ +static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, + struct rte_mbuf *mbuf) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + uint64_t mask; + + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + desc = &txq->desc[idx]; + + /* Update buffer address and length */ + desc->baddr = rte_mbuf_data_iova(mbuf); + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, + mbuf->pkt_len); + /* Total msg length to transmit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, + mbuf->pkt_len); + /* Mark it as First and Last Descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); + /* Mark it as a NORMAL descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); + /* configure h/w Offload */ + mask = mbuf->ol_flags & PKT_TX_L4_MASK; + if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM)) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + else if (mbuf->ol_flags & PKT_TX_IP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); + rte_wmb(); + + /* Set OWN bit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + + /* Save mbuf */ + txq->sw_ring[idx] = mbuf; + /* Update current index*/ + txq->cur++; + /* Update stats */ + txq->bytes += mbuf->pkt_len; + + return 0; +} + +/* Eal supported tx wrapper*/ +uint16_t +axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + if (unlikely(nb_pkts == 0)) + return nb_pkts; + + struct axgbe_tx_queue *txq; + uint16_t nb_desc_free; + uint16_t nb_pkt_sent = 0; + uint16_t idx; + uint32_t tail_addr; + struct rte_mbuf *mbuf; + + txq = (struct axgbe_tx_queue *)tx_queue; + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + + if (unlikely(nb_desc_free <= txq->free_thresh)) { + axgbe_xmit_cleanup(txq); + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + if (unlikely(nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); + while (nb_pkts--) { + mbuf = *tx_pkts++; + if (axgbe_xmit_hw(txq, mbuf)) + goto out; + nb_pkt_sent++; + } +out: + /* Sync read and write */ + rte_mb(); + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + tail_addr = low32_value(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); + txq->pkts += nb_pkt_sent; + return nb_pkt_sent; +} + +void axgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + uint8_t i; + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq) { + axgbe_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq) { + axgbe_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } + } +} + +int +axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + uint16_t idx; + + + if (unlikely(offset >= rxq->nb_desc)) + return -EINVAL; + + if (offset >= rxq->nb_desc - rxq->dirty) + return RTE_ETH_RX_DESC_UNAVAIL; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + desc = &rxq->desc[idx + offset]; + + if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct axgbe_tx_queue *txq = tx_queue; + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + + + if (unlikely(offset >= txq->nb_desc)) + return -EINVAL; + + if (offset >= txq->nb_desc - txq->dirty) + return RTE_ETH_TX_DESC_UNAVAIL; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); + desc = &txq->desc[idx + offset]; + + if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h new file mode 100644 index 000000000..f2fbe9299 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef _AXGBE_RXTX_H_ +#define _AXGBE_RXTX_H_ + +/* to suppress gcc warnings related to descriptor casting*/ +#ifdef RTE_TOOLCHAIN_GCC +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#ifdef RTE_TOOLCHAIN_CLANG +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +/* Descriptor related defines */ +#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/ +#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3) +#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1) +#define AXGBE_MIN_RING_DESC 32 +#define RTE_AXGBE_DESCS_PER_LOOP 4 +#define RTE_AXGBE_MAX_RX_BURST 32 + +#define AXGBE_RX_FREE_THRESH 32 +#define AXGBE_TX_FREE_THRESH 32 + +#define AXGBE_DESC_ALIGN 128 +#define AXGBE_DESC_OWN 0x80000000 +#define AXGBE_ERR_STATUS 0x000f0000 +#define AXGBE_L3_CSUM_ERR 0x00050000 +#define AXGBE_L4_CSUM_ERR 0x00060000 + +#include "axgbe_common.h" + +#define AXGBE_GET_DESC_PT(_queue, _idx) \ + (((_queue)->desc) + \ + ((_idx) & ((_queue)->nb_desc - 1))) + +#define AXGBE_GET_DESC_IDX(_queue, _idx) \ + ((_idx) & ((_queue)->nb_desc - 1)) \ + +/* Rx desc format */ +union axgbe_rx_desc { + struct { + uint64_t baddr; + uint32_t desc2; + uint32_t desc3; + } read; + struct { + uint32_t desc0; + uint32_t desc1; + uint32_t desc2; + uint32_t desc3; + } write; +}; + +struct axgbe_rx_queue { + /* membuf pool for rx buffers */ + struct rte_mempool *mb_pool; + /* H/w Rx buffer size configured in DMA */ + unsigned int buf_size; + /* CRC h/w offload */ + uint16_t crc_len; + /* address of s/w rx buffers */ + struct rte_mbuf **sw_ring; + /* Port private data */ + struct axgbe_port *pdata; + /* Number of Rx descriptors in queue */ + uint16_t nb_desc; + /* max free RX desc to hold */ + uint16_t free_thresh; + /* Index of descriptor to check for packet availability */ + uint64_t cur; + /* Index of descriptor to check for buffer reallocation */ + uint64_t dirty; + /* Software Rx descriptor ring*/ + volatile union axgbe_rx_desc *desc; + /* Ring physical address */ + uint64_t ring_phys_addr; + /* Dma Channel register address */ + void *dma_regs; + /* Dma channel tail register address*/ + volatile uint32_t *dma_tail_reg; + /* DPDK queue index */ + uint16_t queue_id; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + uint64_t rx_mbuf_alloc_failed; + /* Number of mbufs allocated from pool*/ + uint64_t mbuf_alloc; + +} __rte_cache_aligned; + +/*Tx descriptor format */ +struct axgbe_tx_desc { + phys_addr_t baddr; + uint32_t desc2; + uint32_t desc3; +}; + +struct axgbe_tx_queue { + /* Port private data reference */ + struct axgbe_port *pdata; + /* Number of Tx descriptors in queue*/ + uint16_t nb_desc; + /* Start freeing TX buffers if there are less free descriptors than + * this value + */ + uint16_t free_thresh; + /* Available descriptors for Tx processing*/ + uint16_t nb_desc_free; + /* Batch of mbufs/descs to release */ + uint16_t free_batch_cnt; + /* Flag for vector support */ + uint16_t vector_disable; + /* Index of descriptor to be used for current transfer */ + uint64_t cur; + /* Index of descriptor to check for transfer complete */ + uint64_t dirty; + /* Virtual address of ring */ + volatile struct axgbe_tx_desc *desc; + /* Physical address of ring */ + uint64_t ring_phys_addr; + /* Dma channel register space */ + void *dma_regs; + /* Dma tail register address of ring*/ + volatile uint32_t *dma_tail_reg; + /* Tx queue index/id*/ + uint16_t queue_id; + /* Reference to hold Tx mbufs mapped to Tx descriptors freed + * after transmission confirmation + */ + struct rte_mbuf **sw_ring; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + +} __rte_cache_aligned; + +/*Queue related APIs */ + +/* + * RX/TX function prototypes + */ + + +void axgbe_dev_tx_queue_release(void *txq); +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void axgbe_dev_enable_tx(struct rte_eth_dev *dev); +void axgbe_dev_disable_tx(struct rte_eth_dev *dev); +int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + + +void axgbe_dev_rx_queue_release(void *rxq); +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +void axgbe_dev_enable_rx(struct rte_eth_dev *dev); +void axgbe_dev_disable_rx(struct rte_eth_dev *dev); +int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void axgbe_dev_clear_queues(struct rte_eth_dev *dev); +int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + +#endif /* _AXGBE_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c new file mode 100644 index 000000000..9be703713 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +/* Useful to avoid shifting for every descriptor prepration*/ +#define TX_DESC_CTRL_FLAGS 0xb000000000000000 +#define TX_FREE_BULK 8 +#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1) + +static inline void +axgbe_vec_tx(volatile struct axgbe_tx_desc *desc, + struct rte_mbuf *mbuf) +{ + __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 | + TX_DESC_CTRL_FLAGS | mbuf->data_len, + mbuf->buf_iova + + mbuf->data_off); + _mm_store_si128((__m128i *)desc, descriptor); +} + +static void +axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + int idx, i; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt + - 1); + desc = &txq->desc[idx]; + if (desc->desc3 & AXGBE_DESC_OWN) + return; + /* memset avoided for desc ctrl fields since in vec_tx path + * all 128 bits are populated + */ + for (i = 0; i < txq->free_batch_cnt; i++, idx--) + rte_pktmbuf_free_seg(txq->sw_ring[idx]); + + + txq->dirty += txq->free_batch_cnt; + txq->nb_desc_free += txq->free_batch_cnt; +} + +uint16_t +axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + struct axgbe_tx_queue *txq; + uint16_t idx, nb_commit, loop, i; + uint32_t tail_addr; + + txq = (struct axgbe_tx_queue *)tx_queue; + if (txq->nb_desc_free < txq->free_thresh) { + axgbe_xmit_cleanup_vec(txq); + if (unlikely(txq->nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts); + nb_commit = nb_pkts; + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + loop = txq->nb_desc - idx; + if (nb_commit >= loop) { + for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + nb_commit -= loop; + idx = 0; + } + for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + txq->cur += nb_pkts; + tail_addr = (uint32_t)(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + rte_write32(tail_addr, (void *)txq->dma_tail_reg); + txq->pkts += nb_pkts; + txq->nb_desc_free -= nb_pkts; + + return nb_pkts; +} diff --git a/src/spdk/dpdk/drivers/net/axgbe/meson.build b/src/spdk/dpdk/drivers/net/axgbe/meson.build new file mode 100644 index 000000000..86873b7ef --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/meson.build @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +if not is_linux + build = false + reason = 'only supported on linux' +endif + +sources = files('axgbe_ethdev.c', + 'axgbe_dev.c', + 'axgbe_mdio.c', + 'axgbe_phy_impl.c', + 'axgbe_i2c.c', + 'axgbe_rxtx.c') + +cflags += '-Wno-cast-qual' + +if arch_subdir == 'x86' + sources += files('axgbe_rxtx_vec_sse.c') +endif diff --git a/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map b/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/axgbe/rte_pmd_axgbe_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/bnx2x/Makefile b/src/spdk/dpdk/drivers/net/bnx2x/Makefile new file mode 100644 index 000000000..451434cc1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/Makefile @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2014 - 2018 Cavium Inc. +# All rights reserved. +# www.cavium.com +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bnx2x.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DZLIB_CONST +LDLIBS += -lz +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_bnx2x_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c new file mode 100644 index 000000000..ff7646b25 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.c @@ -0,0 +1,11953 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#define BNX2X_DRIVER_VERSION "1.78.18" + +#include "bnx2x.h" +#include "bnx2x_vfpf.h" +#include "ecore_sp.h" +#include "ecore_init.h" +#include "ecore_init_ops.h" + +#include "rte_version.h" + +#include +#include +#include +#include +#include + +#define BNX2X_PMD_VER_PREFIX "BNX2X PMD" +#define BNX2X_PMD_VERSION_MAJOR 1 +#define BNX2X_PMD_VERSION_MINOR 1 +#define BNX2X_PMD_VERSION_REVISION 0 +#define BNX2X_PMD_VERSION_PATCH 1 + +static inline const char * +bnx2x_pmd_version(void) +{ + static char version[32]; + + snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d", + BNX2X_PMD_VER_PREFIX, + BNX2X_DRIVER_VERSION, + BNX2X_PMD_VERSION_MAJOR, + BNX2X_PMD_VERSION_MINOR, + BNX2X_PMD_VERSION_REVISION, + BNX2X_PMD_VERSION_PATCH); + + return version; +} + +static z_stream zlib_stream; + +#define EVL_VLID_MASK 0x0FFF + +#define BNX2X_DEF_SB_ATT_IDX 0x0001 +#define BNX2X_DEF_SB_IDX 0x0002 + +/* + * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per + * function HW initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 msecs */ +#define FLR_WAIT_INTERVAL 50 /* usecs */ +#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + uint32_t init_crd; + uint32_t crd; + uint32_t crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + uint32_t lines_occup; + uint32_t lines_freed; +}; + +/* resources needed for unloading a previously loaded device */ + +#define BNX2X_PREV_WAIT_NEEDED 1 +rte_spinlock_t bnx2x_prev_mtx; +struct bnx2x_prev_list_node { + LIST_ENTRY(bnx2x_prev_list_node) node; + uint8_t bus; + uint8_t slot; + uint8_t path; + uint8_t aer; + uint8_t undi; +}; + +static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list + = LIST_HEAD_INITIALIZER(bnx2x_prev_list); + +static int load_count[2][3] = { { 0 } }; + /* per-path: 0-common, 1-port0, 2-port1 */ + +static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, + uint8_t cmng_type); +static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc); +static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, + uint8_t port); +static void bnx2x_set_reset_global(struct bnx2x_softc *sc); +static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc); +static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine); +static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc); +static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, + uint8_t print); +static void bnx2x_int_disable(struct bnx2x_softc *sc); +static int bnx2x_release_leader_lock(struct bnx2x_softc *sc); +static void bnx2x_pf_disable(struct bnx2x_softc *sc); +static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, + struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod); +static void bnx2x_link_report_locked(struct bnx2x_softc *sc); +static void bnx2x_link_report(struct bnx2x_softc *sc); +void bnx2x_link_status_update(struct bnx2x_softc *sc); +static int bnx2x_alloc_mem(struct bnx2x_softc *sc); +static void bnx2x_free_mem(struct bnx2x_softc *sc); +static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc); +static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc); +static __rte_noinline +int bnx2x_nic_load(struct bnx2x_softc *sc); + +static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp); +static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, + uint8_t storm, uint16_t index, uint8_t op, + uint8_t update); + +int bnx2x_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + mb(); + res = ((*addr) & (1UL << nr)) != 0; + mb(); + return res; +} + +void bnx2x_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +void bnx2x_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +int bnx2x_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +int bnx2x_cmpxchg(volatile int *addr, int old, int new) +{ + return __sync_val_compare_and_swap(addr, old, new); +} + +int +bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, + const char *msg, uint32_t align) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *z; + + dma->sc = sc; + if (IS_PF(sc)) + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, + rte_get_timer_cycles()); + else + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, + rte_get_timer_cycles()); + + /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ + z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, + SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, align); + if (z == NULL) { + PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg); + return -ENOMEM; + } + dma->paddr = (uint64_t) z->iova; + dma->vaddr = z->addr; + dma->mzone = (const void *)z; + + PMD_DRV_LOG(DEBUG, sc, + "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr); + + return 0; +} + +void bnx2x_dma_free(struct bnx2x_dma *dma) +{ + if (dma->mzone == NULL) + return; + + rte_memzone_free((const struct rte_memzone *)dma->mzone); + dma->sc = NULL; + dma->paddr = 0; + dma->vaddr = NULL; + dma->nseg = 0; + dma->mzone = NULL; +} + +static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + int cnt; + +#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if (resource) + PMD_INIT_FUNC_TRACE(sc); +#else + PMD_INIT_FUNC_TRACE(sc); +#endif + + /* validate the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(NOTICE, sc, + "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", + resource); + return -1; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); + } + + /* validate the resource is not already taken */ + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + PMD_DRV_LOG(NOTICE, sc, + "resource in use (status 0x%x bit 0x%x)", + lock_status, resource_bit); + return -1; + } + + /* try every 5ms for 5 seconds */ + for (cnt = 0; cnt < 1000; cnt++) { + REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + return 0; + } + DELAY(5000); + } + + PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!", + resource, resource_bit); + return -1; +} + +static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + +#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if (resource) + PMD_INIT_FUNC_TRACE(sc); +#else + PMD_INIT_FUNC_TRACE(sc); +#endif + + /* validate the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(NOTICE, sc, + "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" + " resource_bit 0x%x", resource, resource_bit); + return -1; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); + } + + /* validate the resource is currently taken */ + lock_status = REG_RD(sc, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + PMD_DRV_LOG(NOTICE, sc, + "resource not in use (status 0x%x bit 0x%x)", + lock_status, resource_bit); + return -1; + } + + REG_WR(sc, hw_lock_control_reg, resource_bit); + return 0; +} + +static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc) +{ + BNX2X_PHY_LOCK(sc); + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); +} + +static void bnx2x_release_phy_lock(struct bnx2x_softc *sc) +{ + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO); + BNX2X_PHY_UNLOCK(sc); +} + +/* copy command into DMAE command memory and set DMAE command Go */ +void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx) +{ + uint32_t cmd_offset; + uint32_t i; + + cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); + for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { + REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i)); + } + + REG_WR(sc, dmae_reg_go_c[idx], 1); +} + +uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) +{ + return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | + DMAE_COMMAND_C_TYPE_ENABLE); +} + +uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode) +{ + return opcode & ~DMAE_COMMAND_SRC_RESET; +} + +uint32_t +bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type, + uint8_t with_comp, uint8_t comp_type) +{ + uint32_t opcode = 0; + + opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | + (dst_type << DMAE_COMMAND_DST_SHIFT)); + + opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); + + opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); + + opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | + (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); + + opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); + +#ifdef __BIG_ENDIAN + opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; +#else + opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; +#endif + + if (with_comp) { + opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); + } + + return opcode; +} + +static void +bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae, + uint8_t src_type, uint8_t dst_type) +{ + memset(dmae, 0, sizeof(struct dmae_command)); + + /* set the opcode */ + dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type, + TRUE, DMAE_COMP_PCI); + + /* fill in the completion parameters */ + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp)); + dmae->comp_val = DMAE_COMP_VAL; +} + +/* issue a DMAE command over the init channel and wait for completion */ +static int +bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae) +{ + uint32_t *wb_comp = BNX2X_SP(sc, wb_comp); + int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; + + /* reset completion */ + *wb_comp = 0; + + /* post the command on the channel used for initializations */ + bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); + + /* wait for completion */ + DELAY(500); + + while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + if (!timeout || + (sc->recovery_state != BNX2X_RECOVERY_DONE && + sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { + PMD_DRV_LOG(INFO, sc, "DMAE timeout!"); + return DMAE_TIMEOUT; + } + + timeout--; + DELAY(50); + } + + if (*wb_comp & DMAE_PCI_ERR_FLAG) { + PMD_DRV_LOG(INFO, sc, "DMAE PCI error!"); + return DMAE_PCI_ERROR; + } + + return 0; +} + +void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32) +{ + struct dmae_command dmae; + uint32_t *data; + uint32_t i; + int rc; + + if (!sc->dmae_ready) { + data = BNX2X_SP(sc, wb_data[0]); + + for (i = 0; i < len32; i++) { + data[i] = REG_RD(sc, (src_addr + (i * 4))); + } + + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); + + /* fill in addresses and len */ + dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data)); + dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data)); + dmae.len = len32; + + /* issue the command and wait for completion */ + if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { + rte_panic("DMAE failed (%d)", rc); + }; +} + +void +bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr, + uint32_t len32) +{ + struct dmae_command dmae; + int rc; + + if (!sc->dmae_ready) { + ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); + + /* fill in addresses and len */ + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ + dmae.dst_addr_hi = 0; + dmae.len = len32; + + /* issue the command and wait for completion */ + if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { + rte_panic("DMAE failed (%d)", rc); + } +} + +static void +bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, + uint32_t addr, uint32_t len) +{ + uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc); + uint32_t offset = 0; + + while (len > dmae_wr_max) { + bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ + (addr + offset), /* dst GRC address */ + dmae_wr_max); + offset += (dmae_wr_max * 4); + len -= dmae_wr_max; + } + + bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ + (addr + offset), /* dst GRC address */ + len); +} + +void +bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, + uint32_t cid) +{ + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), + CDU_REGION_NUMBER_UCM_AG, + ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), + CDU_REGION_NUMBER_XCM_AG, + ETH_CONNECTION_TYPE); +} + +static void +bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t ticks) +{ + uint32_t addr = + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); + + REG_WR8(sc, addr, ticks); +} + +static void +bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id, + uint8_t sb_index, uint8_t disable) +{ + uint32_t enable_flag = + (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + uint32_t addr = + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); + uint8_t flags; + + /* clear and set */ + flags = REG_RD8(sc, addr); + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR8(sc, addr, flags); +} + +void +bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t disable, uint16_t usec) +{ + uint8_t ticks = (usec / 4); + + bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks); + + disable = (disable) ? 1 : ((usec) ? 0 : 1); + bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable); +} + +uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr) +{ + return REG_RD(sc, reg_addr); +} + +void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val) +{ + REG_WR(sc, reg_addr, val); +} + +void +elink_cb_event_log(__rte_unused struct bnx2x_softc *sc, + __rte_unused const elink_log_id_t elink_log_id, ...) +{ + PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id); +} + +static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode) +{ + uint32_t spio_reg; + + /* Only 2 SPIOs are configurable */ + if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { + PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); + + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); + + switch (mode) { + case MISC_SPIO_OUTPUT_LOW: + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_CLR_POS); + break; + + case MISC_SPIO_OUTPUT_HIGH: + /* clear FLOAT and set SET */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_SET_POS); + break; + + case MISC_SPIO_INPUT_HI_Z: + /* set FLOAT */ + spio_reg |= (spio << MISC_SPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); + + return 0; +} + +static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); + return -1; + } + + /* read GPIO value */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO); + + /* get the requested pin value */ + return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; +} + +static int +bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int +bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode) +{ + uint32_t gpio_reg; + + /* any port swapping should be handled by caller */ + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + PMD_DRV_LOG(NOTICE, sc, + "Invalid GPIO mode assignment %d", mode); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + return -1; + } + + REG_WR(sc, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int +bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, + uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO int */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +uint32_t +elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port) +{ + return bnx2x_gpio_read(sc, gpio_num, port); +} + +uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ + uint8_t port) +{ + return bnx2x_gpio_write(sc, gpio_num, mode, port); +} + +uint8_t +elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins, + uint8_t mode /* 0=low 1=high */ ) +{ + return bnx2x_gpio_mult_write(sc, pins, mode); +} + +uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ + uint8_t port) +{ + return bnx2x_gpio_int_write(sc, gpio_num, mode, port); +} + +void elink_cb_notify_link_changed(struct bnx2x_softc *sc) +{ + REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + + (SC_FUNC(sc) * sizeof(uint32_t))), 1); +} + +/* send the MCP a request, block until there is a reply */ +uint32_t +elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) +{ + int mb_idx = SC_FW_MB_IDX(sc); + uint32_t seq; + uint32_t rc = 0; + uint32_t cnt = 1; + uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; + + seq = ++sc->fw_seq; + SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); + SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); + + PMD_DRV_LOG(DEBUG, sc, + "wrote command 0x%08x to FW MB param 0x%08x", + (command | seq), param); + + /* Let the FW do it's magic. GIve it up to 5 seconds... */ + do { + DELAY(delay * 1000); + rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { + rc &= FW_MSG_CODE_MASK; + } else { + /* Ruh-roh! */ + PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!"); + rc = 0; + } + + return rc; +} + +static uint32_t +bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) +{ + return elink_cb_fw_command(sc, command, param); +} + +static void +__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr, + rte_iova_t mapping) +{ + REG_WR(sc, addr, U64_LO(mapping)); + REG_WR(sc, (addr + 4), U64_HI(mapping)); +} + +static void +storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping, + uint16_t abs_fid) +{ + uint32_t addr = (XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); + __storm_memset_dma_mapping(sc, addr, mapping); +} + +static void +storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id) +{ + REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); +} + +static void +storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable) +{ + REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), + enable); +} + +static void +storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data, + uint16_t pfid) +{ + uint32_t addr; + size_t size; + + addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); + size = sizeof(struct event_ring_data); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data); +} + +static void +storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) +{ + uint32_t addr = (BAR_CSTRORM_INTMEM + + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); + REG_WR16(sc, addr, eq_prod); +} + +/* + * Post a slowpath command. + * + * A slowpath command is used to propagate a configuration change through + * the controller in a controlled manner, allowing each STORM processor and + * other H/W blocks to phase in the change. The commands sent on the + * slowpath are referred to as ramrods. Depending on the ramrod used the + * completion of the ramrod will occur in different ways. Here's a + * breakdown of ramrods and how they complete: + * + * RAMROD_CMD_ID_ETH_PORT_SETUP + * Used to setup the leading connection on a port. Completes on the + * Receive Completion Queue (RCQ) of that port (typically fp[0]). + * + * RAMROD_CMD_ID_ETH_CLIENT_SETUP + * Used to setup an additional connection on a port. Completes on the + * RCQ of the multi-queue/RSS connection being initialized. + * + * RAMROD_CMD_ID_ETH_STAT_QUERY + * Used to force the storm processors to update the statistics database + * in host memory. This ramrod is send on the leading connection CID and + * completes as an index increment of the CSTORM on the default status + * block. + * + * RAMROD_CMD_ID_ETH_UPDATE + * Used to update the state of the leading connection, usually to udpate + * the RSS indirection table. Completes on the RCQ of the leading + * connection. (Not currently used under FreeBSD until OS support becomes + * available.) + * + * RAMROD_CMD_ID_ETH_HALT + * Used when tearing down a connection prior to driver unload. Completes + * on the RCQ of the multi-queue/RSS connection being torn down. Don't + * use this on the leading connection. + * + * RAMROD_CMD_ID_ETH_SET_MAC + * Sets the Unicast/Broadcast/Multicast used by the port. Completes on + * the RCQ of the leading connection. + * + * RAMROD_CMD_ID_ETH_CFC_DEL + * Used when tearing down a conneciton prior to driver unload. Completes + * on the RCQ of the leading connection (since the current connection + * has been completely removed from controller memory). + * + * RAMROD_CMD_ID_ETH_PORT_DEL + * Used to tear down the leading connection prior to driver unload, + * typically fp[0]. Completes as an index increment of the CSTORM on the + * default status block. + * + * RAMROD_CMD_ID_ETH_FORWARD_SETUP + * Used for connection offload. Completes on the RCQ of the multi-queue + * RSS connection that is being offloaded. (Not currently used under + * FreeBSD.) + * + * There can only be one command pending per function. + * + * Returns: + * 0 = Success, !0 = Failure. + */ + +/* must be called under the spq lock */ +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc) +{ + struct eth_spe *next_spe = sc->spq_prod_bd; + + if (sc->spq_prod_bd == sc->spq_last_bd) { + /* wrap back to the first eth_spq */ + sc->spq_prod_bd = sc->spq; + sc->spq_prod_idx = 0; + } else { + sc->spq_prod_bd++; + sc->spq_prod_idx++; + } + + return next_spe; +} + +/* must be called under the spq lock */ +static void bnx2x_sp_prod_update(struct bnx2x_softc *sc) +{ + int func = SC_FUNC(sc); + + /* + * Make sure that BD data is updated before writing the producer. + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); + + REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), + sc->spq_prod_idx); + + mb(); +} + +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { + return TRUE; + } else { + return FALSE; + } +} + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @sc: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two uint32 fields. + */ +int +bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi, + uint32_t data_lo, int cmd_type) +{ + struct eth_spe *spe; + uint16_t type; + int common; + + common = bnx2x_is_contextless_ramrod(command, cmd_type); + + if (common) { + if (!atomic_load_acq_long(&sc->eq_spq_left)) { + PMD_DRV_LOG(INFO, sc, "EQ ring is full!"); + return -1; + } + } else { + if (!atomic_load_acq_long(&sc->cq_spq_left)) { + PMD_DRV_LOG(INFO, sc, "SPQ ring is full!"); + return -1; + } + } + + spe = bnx2x_sp_get_next(sc); + + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); + + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + + /* TBD: Check if it works for VFs */ + type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + + spe->hdr.type = htole16(type); + + spe->data.update_data_addr.hi = htole32(data_hi); + spe->data.update_data_addr.lo = htole32(data_lo); + + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the lock and unlock. Thus no more explict + * memory barrier is needed. + */ + if (common) { + atomic_subtract_acq_long(&sc->eq_spq_left, 1); + } else { + atomic_subtract_acq_long(&sc->cq_spq_left, 1); + } + + PMD_DRV_LOG(DEBUG, sc, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x" + "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)", + sc->spq_prod_idx, + (uint32_t) U64_HI(sc->spq_dma.paddr), + (uint32_t) (U64_LO(sc->spq_dma.paddr) + + (uint8_t *) sc->spq_prod_bd - + (uint8_t *) sc->spq), command, common, + HW_CID(sc, cid), data_hi, data_lo, type, + atomic_load_acq_long(&sc->cq_spq_left), + atomic_load_acq_long(&sc->eq_spq_left)); + + /* RAMROD completion is processed in bnx2x_intr_legacy() + * which can run from different contexts. + * Ask bnx2x_intr_intr() to process RAMROD + * completion whenever it gets scheduled. + */ + rte_atomic32_set(&sc->scan_fp, 1); + bnx2x_sp_prod_update(sc); + + return 0; +} + +static void bnx2x_drv_pulse(struct bnx2x_softc *sc) +{ + SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, + sc->fw_drv_pulse_wr_seq); +} + +static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp) +{ + uint16_t hw_cons; + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (unlikely(!txq)) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return 0; + } + + mb(); /* status block fields can change */ + hw_cons = le16toh(*fp->tx_cons_sb); + return hw_cons != txq->tx_pkt_head; +} + +static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + /* expand this for multi-cos if ever supported */ + return bnx2x_tx_queue_has_work(fp); +} + +static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + uint16_t rx_cq_cons_sb; + struct bnx2x_rx_queue *rxq; + rxq = fp->sc->rx_queues[fp->index]; + if (unlikely(!rxq)) { + PMD_RX_LOG(ERR, "ERROR: RX queue is NULL"); + return 0; + } + + mb(); /* status block fields can change */ + rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); + if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) == + MAX_RCQ_ENTRIES(rxq))) + rx_cq_cons_sb++; + + PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d", + rx_cq_cons_sb, rxq->rx_cq_head); + + return rxq->rx_cq_head != rx_cq_cons_sb; +} + +static void +bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + union eth_rx_cqe *rr_cqe) +{ + int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); + int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; + struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; + + PMD_DRV_LOG(DEBUG, sc, + "fp=%d cid=%d got ramrod #%d state is %x type is %d", + fp->index, cid, command, sc->state, + rr_cqe->ramrod_cqe.ramrod_type); + + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid); + drv_cmd = ECORE_Q_CMD_UPDATE; + break; + + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid); + drv_cmd = ECORE_Q_CMD_SETUP; + break; + + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + PMD_DRV_LOG(DEBUG, sc, + "got MULTI[%d] tx-only setup ramrod", cid); + drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; + break; + + case (RAMROD_CMD_ID_ETH_HALT): + PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid); + drv_cmd = ECORE_Q_CMD_HALT; + break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid); + drv_cmd = ECORE_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid); + drv_cmd = ECORE_Q_CMD_EMPTY; + break; + + default: + PMD_DRV_LOG(DEBUG, sc, + "ERROR: unexpected MC reply (%d)" + "on fp[%d]", command, fp->index); + return; + } + + if ((drv_cmd != ECORE_Q_CMD_MAX) && + q_obj->complete_cmd(sc, q_obj, drv_cmd)) { + /* + * q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the sc->spq_left + * because apparently we haven't sent this command the first + * place. + */ + // rte_panic("Unexpected SP completion"); + return; + } + + atomic_add_acq_long(&sc->cq_spq_left, 1); + + PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx", + atomic_load_acq_long(&sc->cq_spq_left)); +} + +static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) +{ + struct bnx2x_rx_queue *rxq; + uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; + + rte_spinlock_lock(&(fp)->rx_mtx); + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index); + rte_spinlock_unlock(&(fp)->rx_mtx); + return 0; + } + + /* CQ "next element" is of the size of the regular element */ + hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); + if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == + USABLE_RCQ_ENTRIES_PER_PAGE)) { + hw_cq_cons++; + } + + bd_cons = rxq->rx_bd_head; + bd_prod = rxq->rx_bd_tail; + bd_prod_fw = bd_prod; + sw_cq_cons = rxq->rx_cq_head; + sw_cq_prod = rxq->rx_cq_tail; + + /* + * Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + + while (sw_cq_cons != hw_cq_cons) { + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + uint8_t cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; + + comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq); + bd_prod = RX_BD(bd_prod, rxq); + bd_cons = RX_BD(bd_cons, rxq); + + cqe = &rxq->cq_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + + /* is this a slowpath msg? */ + if (CQE_TYPE_SLOW(cqe_fp_type)) { + bnx2x_sp_event(sc, fp, cqe); + goto next_cqe; + } + + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & + ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { + PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u", + cqe_fp_flags, sw_cq_cons); + goto next_rx; + } + + PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!"); + +next_rx: + bd_cons = NEXT_RX_BD(bd_cons); + bd_prod = NEXT_RX_BD(bd_prod); + bd_prod_fw = NEXT_RX_BD(bd_prod_fw); + +next_cqe: + sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); + sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); + + } /* while work to do */ + + rxq->rx_bd_head = bd_cons; + rxq->rx_bd_tail = bd_prod_fw; + rxq->rx_cq_head = sw_cq_cons; + rxq->rx_cq_tail = sw_cq_prod; + + PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d", + bd_prod_fw, sw_cq_prod); + + /* Update producers */ + bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod); + + rte_spinlock_unlock(&(fp)->rx_mtx); + + return sw_cq_cons != hw_cq_cons; +} + +static uint16_t +bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq, + uint16_t pkt_idx, uint16_t bd_idx) +{ + struct eth_tx_start_bd *tx_start_bd = + &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd; + uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd); + struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; + + if (likely(tx_mbuf != NULL)) { + rte_pktmbuf_free_seg(tx_mbuf); + } else { + PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", + fp->index, (unsigned long)TX_BD(pkt_idx, txq)); + } + + txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL; + txq->nb_tx_avail += nbd; + + while (nbd--) + bd_idx = NEXT_TX_BD(bd_idx); + + return bd_idx; +} + +/* processes transmit completions */ +uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp) +{ + uint16_t bd_cons, hw_cons, sw_cons; + __rte_unused uint16_t tx_bd_avail; + + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (unlikely(!txq)) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return 0; + } + + bd_cons = txq->tx_bd_head; + hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb); + sw_cons = txq->tx_pkt_head; + + while (sw_cons != hw_cons) { + bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons); + sw_cons++; + } + + txq->tx_pkt_head = sw_cons; + txq->tx_bd_head = bd_cons; + + tx_bd_avail = txq->nb_tx_avail; + + PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, " + "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u", + fp->index, tx_bd_avail, hw_cons, + txq->tx_pkt_head, txq->tx_pkt_tail, + txq->tx_bd_head, txq->tx_bd_tail); + return TRUE; +} + +static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i, count; + + /* wait until all TX fastpath tasks have completed */ + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + + count = 1000; + + while (bnx2x_has_tx_work(fp)) { + bnx2x_txeof(sc, fp); + + if (count == 0) { + PMD_TX_LOG(ERR, + "Timeout waiting for fp[%d] " + "transmits to complete!", i); + rte_panic("tx drain failure"); + return; + } + + count--; + DELAY(1000); + rmb(); + } + } + + return; +} + +static int +bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj, + int mac_type, uint8_t wait_for_comp) +{ + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + int rc; + + /* wait for completion of requested */ + if (wait_for_comp) { + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + } + + /* Set the mac type of addresses we want to clear */ + bnx2x_set_bit(mac_type, &vlan_mac_flags); + + rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc); + + return rc; +} + +static int +bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, + unsigned long *rx_accept_flags, + unsigned long *tx_accept_flags) +{ + /* Clear the flags first */ + *rx_accept_flags = 0; + *tx_accept_flags = 0; + + switch (rx_mode) { + case BNX2X_RX_MODE_NONE: + /* + * 'drop all' supersedes any accept flags that may have been + * passed to the function. + */ + break; + + case BNX2X_RX_MODE_NORMAL: + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + break; + + case BNX2X_RX_MODE_ALLMULTI: + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + break; + + case BNX2X_RX_MODE_ALLMULTI_PROMISC: + case BNX2X_RX_MODE_PROMISC: + /* + * According to deffinition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. + */ + bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + if (IS_MF_SI(sc)) { + bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); + } else { + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + } + + break; + + default: + PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode); + return -1; + } + + /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ + if (rx_mode != BNX2X_RX_MODE_NONE) { + bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); + } + + return 0; +} + +static int +bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, unsigned long ramrod_flags) +{ + struct ecore_rx_mode_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &sc->rx_mode_obj; + ramrod_param.func_id = SC_FUNC(sc); + + ramrod_param.pstate = &sc->sp_state; + ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata); + ramrod_param.rdata_mapping = + (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata), + bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; + + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; + + rc = ecore_config_rx_mode(sc, &ramrod_param); + if (rc < 0) { + PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode); + return rc; + } + + return 0; +} + +int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + int rc; + + rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, + &tx_accept_flags); + if (rc) { + return rc; + } + + bnx2x_set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_bit(RAMROD_TX, &ramrod_flags); + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, + rx_accept_flags, tx_accept_flags, + ramrod_flags); +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc) +{ + int path = SC_PATH(sc); + int port = SC_PORT(sc); + + PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + + load_count[path][0]++; + load_count[path][1 + port]++; + PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 1) + return FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[path][1 + port] == 1) + return FW_MSG_CODE_DRV_LOAD_PORT; + else + return FW_MSG_CODE_DRV_LOAD_FUNCTION; +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int path = SC_PATH(sc); + + PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]--; + load_count[path][1 + port]--; + PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 0) { + return FW_MSG_CODE_DRV_UNLOAD_COMMON; + } else if (load_count[path][1 + port] == 0) { + return FW_MSG_CODE_DRV_UNLOAD_PORT; + } else { + return FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } +} + +/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ +static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) +{ + uint32_t reset_code = 0; + + /* Select the UNLOAD request mode */ + if (unload_mode == UNLOAD_NORMAL) { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + } else { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + } + + /* Send the request to the MCP */ + if (!BNX2X_NOMCP(sc)) { + reset_code = bnx2x_fw_command(sc, reset_code, 0); + } else { + reset_code = bnx2x_nic_unload_no_mcp(sc); + } + + return reset_code; +} + +/* send UNLOAD_DONE command to the MCP */ +static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link) +{ + uint32_t reset_param = + keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + + /* Report UNLOAD_DONE to MCP */ + if (!BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); + } +} + +static int bnx2x_func_wait_started(struct bnx2x_softc *sc) +{ + int tout = 50; + + if (!sc->port.pmf) { + return 0; + } + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes + * + * 1+2 guarantee that if DCBX attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + while (ecore_func_get_state(sc, &sc->func_obj) != + ECORE_F_STATE_STARTED && tout--) { + DELAY(20000); + } + + if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit. + */ + struct ecore_func_state_params func_params = { NULL }; + + PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! " + "Forcing STARTED-->TX_STOPPED-->STARTED"); + + func_params.f_obj = &sc->func_obj; + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + + /* STARTED-->TX_STOPPED */ + func_params.cmd = ECORE_F_CMD_TX_STOP; + ecore_func_state_change(sc, &func_params); + + /* TX_STOPPED-->STARTED */ + func_params.cmd = ECORE_F_CMD_TX_START; + return ecore_func_state_change(sc, &func_params); + } + + return 0; +} + +static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index) +{ + struct bnx2x_fastpath *fp = &sc->fp[index]; + struct ecore_queue_state_params q_params = { NULL }; + int rc; + + PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index); + + q_params.q_obj = &sc->sp_objs[fp->index].q_obj; + /* We want to wait for completion in this context */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* Stop the primary connection: */ + + /* ...halt the connection */ + q_params.cmd = ECORE_Q_CMD_HALT; + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + return rc; + } + + /* ...terminate the connection */ + q_params.cmd = ECORE_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + return rc; + } + + /* ...delete cfc entry */ + q_params.cmd = ECORE_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return ecore_queue_state_change(sc, &q_params); +} + +/* wait for the outstanding SP commands */ +static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask) +{ + unsigned long tmp; + int tout = 5000; /* wait for 5 secs tops */ + + while (tout--) { + mb(); + if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { + return TRUE; + } + + DELAY(1000); + } + + mb(); + + tmp = atomic_load_acq_long(&sc->sp_state); + if (tmp & mask) { + PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: " + "sp_state 0x%lx, mask 0x%lx", tmp, mask); + return FALSE; + } + + return FALSE; +} + +static int bnx2x_func_stop(struct bnx2x_softc *sc) +{ + struct ecore_func_state_params func_params = { NULL }; + int rc; + + /* prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_STOP; + + /* + * Try to stop the function the 'good way'. If it fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = ecore_func_state_change(sc, &func_params); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. " + "Running a dry transaction"); + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return ecore_func_state_change(sc, &func_params); + } + + return 0; +} + +static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code) +{ + struct ecore_func_state_params func_params = { NULL }; + + /* Prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_HW_RESET; + + func_params.params.hw_init.load_phase = load_code; + + return ecore_func_state_change(sc, &func_params); +} + +static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw) +{ + if (disable_hw) { + /* prevent the HW from sending interrupts */ + bnx2x_int_disable(sc); + } +} + +static void +bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) +{ + int port = SC_PORT(sc); + struct ecore_mcast_ramrod_params rparam = { NULL }; + uint32_t reset_code; + int i, rc = 0; + + bnx2x_drain_tx_queues(sc); + + /* give HW time to discard old tx messages */ + DELAY(1000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, + FALSE); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to delete all ETH MACs (%d)", rc); + } + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, + TRUE); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to delete UC MACs list (%d)", rc); + } + + /* Disable LLH */ + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); + + /* Set "drop all" to stop Rx */ + + /* + * We need to take the if_maddr_lock() here in order to prevent + * a race between the completion code and this code. + */ + + if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { + bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); + } else { + bnx2x_set_storm_rx_mode(sc); + } + + /* Clean up multicast configuration */ + rparam.mcast_obj = &sc->mcast_obj; + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to send DEL MCAST command (%d)", rc); + } + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNCTION, PORT, or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(sc, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + */ + rc = bnx2x_func_wait_started(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed"); + } + + /* + * Close multi and leading connections + * Completions for ramrods are collected in a synchronous way + */ + for (i = 0; i < sc->num_queues; i++) { + if (bnx2x_stop_queue(sc, i)) { + goto unload_error; + } + } + + /* + * If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) { + PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!"); + } + +unload_error: + + rc = bnx2x_func_stop(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Function stop failed!"); + } + + /* disable HW interrupts */ + bnx2x_int_disable_sync(sc, TRUE); + + /* Reset the chip */ + rc = bnx2x_reset_hw(sc, reset_code); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed"); + } + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(sc, keep_link); +} + +static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc) +{ + uint32_t val; + + PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'"); + + val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrutps are disabled. + */ +static void bnx2x_squeeze_objects(struct bnx2x_softc *sc) +{ + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct ecore_mcast_ramrod_params rparam = { NULL }; + struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; + int rc; + + /* Cleanup MACs' object first... */ + + /* Wait for completion of requested */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + bnx2x_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) { + PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc); + } + + /* Cleanup UC list */ + vlan_mac_flags = 0; + bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc != 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to clean UC list MACs (%d)", rc); + } + + /* Now clean mcast object... */ + + rparam.mcast_obj = &sc->mcast_obj; + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... */ + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to send DEL MCAST command (%d)", rc); + } + + /* now wait until all pending commands are cleared */ + + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to clean MCAST object (%d)", rc); + return; + } + + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + } +} + +/* stop the controller */ +__rte_noinline +int +bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) +{ + uint8_t global = FALSE; + uint32_t val; + + PMD_INIT_FUNC_TRACE(sc); + + PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload..."); + + /* mark driver as unloaded in shmem2 */ + if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { + val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); + SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], + val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + + if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE && + (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) { + /* + * We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifconfig down has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ + sc->recovery_state = BNX2X_RECOVERY_DONE; + sc->is_leader = 0; + bnx2x_release_leader_lock(sc); + mb(); + + PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state"); + return -1; + } + + /* + * Nothing to do during unload if previous bnx2x_nic_load() + * did not completed successfully - all resourses are released. + */ + if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) { + return 0; + } + + sc->state = BNX2X_STATE_CLOSING_WAITING_HALT; + mb(); + + sc->rx_mode = BNX2X_RX_MODE_NONE; + bnx2x_set_rx_mode(sc); + mb(); + + if (IS_PF(sc)) { + /* set ALWAYS_ALIVE bit in shmem */ + sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + + bnx2x_drv_pulse(sc); + + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + bnx2x_save_statistics(sc); + } + + /* wait till consumers catch up with producers in all queues */ + bnx2x_drain_tx_queues(sc); + + /* if VF indicate to PF this function is going down (PF will delete sp + * elements and clear initializations + */ + if (IS_VF(sc)) { + bnx2x_vf_unload(sc); + } else if (unload_mode != UNLOAD_RECOVERY) { + /* if this is a normal/close unload need to clean up chip */ + bnx2x_chip_cleanup(sc, unload_mode, keep_link); + } else { + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(sc, unload_mode); + + /* + * Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once gloabl blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(sc)) { + bnx2x_pf_disable(sc); + } + + /* disable HW interrupts */ + bnx2x_int_disable_sync(sc, TRUE); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(sc, FALSE); + } + + /* + * At this stage no more interrupts will arrive so we may safely clean + * the queue'able objects here in case they failed to get cleaned so far. + */ + if (IS_PF(sc)) { + bnx2x_squeeze_objects(sc); + } + + /* There should be no more pending SP commands at this stage */ + sc->sp_state = 0; + + sc->port.pmf = 0; + + if (IS_PF(sc)) { + bnx2x_free_mem(sc); + } + + /* free the host hardware/software hsi structures */ + bnx2x_free_hsi_mem(sc); + + bnx2x_free_fw_stats_mem(sc); + + sc->state = BNX2X_STATE_CLOSED; + + /* + * Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) { + bnx2x_set_reset_in_progress(sc); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) { + bnx2x_set_reset_global(sc); + } + } + + /* + * The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) && + bnx2x_reset_is_done(sc, SC_PATH(sc))) { + bnx2x_disable_close_the_gate(sc); + } + + PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload"); + + return 0; +} + +/* + * Encapsulte an mbuf cluster into the tx bd chain and makes the memory + * visible to the controller. + * + * If an mbuf is submitted to this routine and cannot be given to the + * controller (e.g. it has too many fragments) then the function may free + * the mbuf and return to the caller. + * + * Returns: + * int: Number of TX BDs used for the mbuf + * + * Note the side effect that an mbuf may be freed if it causes a problem. + */ +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0) +{ + struct eth_tx_start_bd *tx_start_bd; + uint16_t bd_prod, pkt_prod; + struct bnx2x_softc *sc; + uint32_t nbds = 0; + + sc = txq->sc; + bd_prod = txq->tx_bd_tail; + pkt_prod = txq->tx_pkt_tail; + + txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; + + tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; + + tx_start_bd->addr_lo = + rte_cpu_to_le_32(U64_LO(rte_mbuf_data_iova(m0))); + tx_start_bd->addr_hi = + rte_cpu_to_le_32(U64_HI(rte_mbuf_data_iova(m0))); + tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + tx_start_bd->general_data = + (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + + tx_start_bd->nbd = rte_cpu_to_le_16(2); + + if (m0->ol_flags & PKT_TX_VLAN_PKT) { + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(m0->vlan_tci); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else { + if (IS_PF(sc)) + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(pkt_prod); + else { + /* when transmitting in a vf, start bd + * must hold the ethertype for fw to enforce it + */ + struct rte_ether_hdr *eh = + rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); + + /* Still need to consider inband vlan for enforced */ + if (eh->ether_type == + rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { + struct rte_vlan_hdr *vh = + (struct rte_vlan_hdr *)(eh + 1); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(ntohs(vh->vlan_tci)); + } else { + tx_start_bd->vlan_or_ethertype = + (rte_cpu_to_le_16 + (rte_be_to_cpu_16(eh->ether_type))); + } + } + } + + bd_prod = NEXT_TX_BD(bd_prod); + if (IS_VF(sc)) { + struct eth_tx_parse_bd_e2 *tx_parse_bd; + const struct rte_ether_hdr *eh = + rte_pktmbuf_mtod(m0, struct rte_ether_hdr *); + uint8_t mac_type = UNICAST_ADDRESS; + + tx_parse_bd = + &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; + if (rte_is_multicast_ether_addr(&eh->d_addr)) { + if (rte_is_broadcast_ether_addr(&eh->d_addr)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + tx_parse_bd->parsing_data = + (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); + + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, + &eh->d_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, + &eh->d_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, + &eh->d_addr.addr_bytes[4], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, + &eh->s_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, + &eh->s_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, + &eh->s_addr.addr_bytes[4], 2); + + tx_parse_bd->data.mac_addr.dst_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); + tx_parse_bd->data.mac_addr.dst_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.dst_mid); + tx_parse_bd->data.mac_addr.dst_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); + tx_parse_bd->data.mac_addr.src_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); + tx_parse_bd->data.mac_addr.src_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.src_mid); + tx_parse_bd->data.mac_addr.src_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); + + PMD_TX_LOG(DEBUG, + "PBD dst %x %x %x src %x %x %x p_data %x", + tx_parse_bd->data.mac_addr.dst_hi, + tx_parse_bd->data.mac_addr.dst_mid, + tx_parse_bd->data.mac_addr.dst_lo, + tx_parse_bd->data.mac_addr.src_hi, + tx_parse_bd->data.mac_addr.src_mid, + tx_parse_bd->data.mac_addr.src_lo, + tx_parse_bd->parsing_data); + } + + PMD_TX_LOG(DEBUG, + "start bd: nbytes %d flags %x vlan %x", + tx_start_bd->nbytes, + tx_start_bd->bd_flags.as_bitfield, + tx_start_bd->vlan_or_ethertype); + + bd_prod = NEXT_TX_BD(bd_prod); + pkt_prod++; + + if (TX_IDX(bd_prod) < 2) + nbds++; + + txq->nb_tx_avail -= 2; + txq->tx_bd_tail = bd_prod; + txq->tx_pkt_tail = pkt_prod; + + return nbds + 2; +} + +static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) +{ + return L2_ILT_LINES(sc); +} + +static void bnx2x_ilt_set_info(struct bnx2x_softc *sc) +{ + struct ilt_client_info *ilt_client; + struct ecore_ilt *ilt = sc->ilt; + uint16_t line = 0; + + PMD_INIT_FUNC_TRACE(sc); + + ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); + + /* CDU */ + ilt_client = &ilt->clients[ILT_CLIENT_CDU]; + ilt_client->client_num = ILT_CLIENT_CDU; + ilt_client->page_size = CDU_ILT_PAGE_SZ; + ilt_client->flags = ILT_CLIENT_SKIP_MEM; + ilt_client->start = line; + line += bnx2x_cid_ilt_lines(sc); + + if (CNIC_SUPPORT(sc)) { + line += CNIC_ILT_LINES; + } + + ilt_client->end = (line - 1); + + /* QM */ + if (QM_INIT(sc->qm_cid_count)) { + ilt_client = &ilt->clients[ILT_CLIENT_QM]; + ilt_client->client_num = ILT_CLIENT_QM; + ilt_client->page_size = QM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + + /* 4 bytes for each cid */ + line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, + QM_ILT_PAGE_SZ); + + ilt_client->end = (line - 1); + } + + if (CNIC_SUPPORT(sc)) { + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = (line - 1); + + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = (line - 1); + } + + assert((line <= ILT_MAX_LINES)); +} + +static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc) +{ + int i; + + for (i = 0; i < sc->num_queues; i++) { + /* get the Rx buffer size for RX frames */ + sc->fp[i].rx_buf_size = + (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); + } +} + +int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) +{ + + sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE); + + return sc->ilt == NULL; +} + +static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) +{ + sc->ilt->lines = rte_calloc("", + sizeof(struct ilt_line), ILT_MAX_LINES, + RTE_CACHE_LINE_SIZE); + return sc->ilt->lines == NULL; +} + +void bnx2x_free_ilt_mem(struct bnx2x_softc *sc) +{ + rte_free(sc->ilt); + sc->ilt = NULL; +} + +static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc) +{ + if (sc->ilt->lines != NULL) { + rte_free(sc->ilt->lines); + sc->ilt->lines = NULL; + } +} + +static void bnx2x_free_mem(struct bnx2x_softc *sc) +{ + uint32_t i; + + for (i = 0; i < L2_ILT_LINES(sc); i++) { + sc->context[i].vcxt = NULL; + sc->context[i].size = 0; + } + + ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); + + bnx2x_free_ilt_lines_mem(sc); +} + +static int bnx2x_alloc_mem(struct bnx2x_softc *sc) +{ + int context_size; + int allocated; + int i; + char cdu_name[RTE_MEMZONE_NAMESIZE]; + + /* + * Allocate memory for CDU context: + * This memory is allocated separately and not in the generic ILT + * functions because CDU differs in few aspects: + * 1. There can be multiple entities allocating memory for context - + * regular L2, CNIC, and SRIOV drivers. Each separately controls + * its own ILT lines. + * 2. Since CDU page-size is not a single 4KB page (which is the case + * for the other ILT clients), to be efficient we want to support + * allocation of sub-page-size in the last entry. + * 3. Context pointers are used by the driver to pass to FW / update + * the context (for the other ILT clients the pointers are used just to + * free the memory during unload). + */ + context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc)); + for (i = 0, allocated = 0; allocated < context_size; i++) { + sc->context[i].size = min(CDU_ILT_PAGE_SZ, + (context_size - allocated)); + + snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i); + if (bnx2x_dma_alloc(sc, sc->context[i].size, + &sc->context[i].vcxt_dma, + cdu_name, BNX2X_PAGE_SIZE) != 0) { + bnx2x_free_mem(sc); + return -1; + } + + sc->context[i].vcxt = + (union cdu_context *)sc->context[i].vcxt_dma.vaddr; + + allocated += sc->context[i].size; + } + + bnx2x_alloc_ilt_lines_mem(sc); + + if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { + PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed"); + bnx2x_free_mem(sc); + return -1; + } + + return 0; +} + +static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc) +{ + bnx2x_dma_free(&sc->fw_stats_dma); + sc->fw_stats_num = 0; + + sc->fw_stats_req_size = 0; + sc->fw_stats_req = NULL; + sc->fw_stats_req_mapping = 0; + + sc->fw_stats_data_size = 0; + sc->fw_stats_data = NULL; + sc->fw_stats_data_mapping = 0; +} + +static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc) +{ + uint8_t num_queue_stats; + int num_groups, vf_headroom = 0; + + /* number of queues for statistics is number of eth queues */ + num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc); + + /* + * Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + num of queues + */ + sc->fw_stats_num = (2 + num_queue_stats); + + /* + * Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT + * rules. The real number or requests is configured in the + * stats_query_header. + */ + num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT; + if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) + num_groups++; + + sc->fw_stats_req_size = + (sizeof(struct stats_query_header) + + (num_groups * sizeof(struct stats_query_cmd_group))); + + /* + * Data for statistics requests + stats_counter. + * stats_counter holds per-STORM counters that are incremented when + * STORM has finished with the current request. Memory for FCoE + * offloaded statistics are counted anyway, even if they will not be sent. + * VF stats are not accounted for here as the data of VF stats is stored + * in memory allocated by the VF, not here. + */ + sc->fw_stats_data_size = + (sizeof(struct stats_counter) + + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + + /* sizeof(struct fcoe_statistics_params) + */ + (sizeof(struct per_queue_stats) * num_queue_stats)); + + if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), + &sc->fw_stats_dma, "fw_stats", + RTE_CACHE_LINE_SIZE) != 0) { + bnx2x_free_fw_stats_mem(sc); + return -1; + } + + /* set up the shortcuts */ + + sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr; + sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; + + sc->fw_stats_data = + (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr + + sc->fw_stats_req_size); + sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + + sc->fw_stats_req_size); + + return 0; +} + +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active + * function on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not + * for just the one belonging to its engine). + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 + +/* set the GLOBAL_RESET bit, should be run under rtnl lock */ +static void bnx2x_set_reset_global(struct bnx2x_softc *sc) +{ + uint32_t val; + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ +static void bnx2x_clear_reset_global(struct bnx2x_softc *sc) +{ + uint32_t val; + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ +static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc) +{ + return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT; +} + +/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ +static void bnx2x_set_reset_done(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + /* Clear the bit */ + val &= ~bit; + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ +static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + /* Set the bit */ + val |= bit; + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ +static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine) +{ + uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? FALSE : TRUE; +} + +/* get the load status for an engine, should be run under rtnl lock */ +static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine) +{ + uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + val = ((val & mask) >> shift); + + return val != 0; +} + +/* set pf load mark */ +static void bnx2x_set_pf_load(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t val1; + uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + PMD_INIT_FUNC_TRACE(sc); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + /* get the current counter value */ + val1 = ((val & mask) >> shift); + + /* set bit of this PF */ + val1 |= (1 << SC_ABS_FUNC(sc)); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* clear pf load mark */ +static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) +{ + uint32_t val1, val; + uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* clear bit of that PF */ + val1 &= ~(1 << SC_ABS_FUNC(sc)); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + return val1 != 0; +} + +/* send load requrest to mcp and analyze response */ +static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code) +{ + PMD_INIT_FUNC_TRACE(sc); + + /* init fw_seq */ + sc->fw_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + + PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq); + +#ifdef BNX2X_PULSE + /* get the current FW pulse sequence */ + sc->fw_drv_pulse_wr_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); +#else + /* set ALWAYS_ALIVE bit in shmem */ + sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + bnx2x_drv_pulse(sc); +#endif + + /* load request */ + (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + + /* if the MCP fails to respond we must abort */ + if (!(*load_code)) { + PMD_DRV_LOG(NOTICE, sc, "MCP response failure!"); + return -1; + } + + /* if MCP refused then must abort */ + if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { + PMD_DRV_LOG(NOTICE, sc, "MCP refused load request"); + return -1; + } + + return 0; +} + +/* + * Check whether another PF has already loaded FW to chip. In virtualized + * environments a pf from anoth VM may have already initialized the device + * including loading FW. + */ +static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code) +{ + uint32_t my_fw, loaded_fw; + + /* is another pf loaded on this engine? */ + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + /* build my FW version dword */ + my_fw = (BNX2X_5710_FW_MAJOR_VERSION + + (BNX2X_5710_FW_MINOR_VERSION << 8) + + (BNX2X_5710_FW_REVISION_VERSION << 16) + + (BNX2X_5710_FW_ENGINEERING_VERSION << 24)); + + /* read loaded FW from chip */ + loaded_fw = REG_RD(sc, XSEM_REG_PRAM); + PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x", + loaded_fw, my_fw); + + /* abort nic load if version mismatch */ + if (my_fw != loaded_fw) { + PMD_DRV_LOG(NOTICE, sc, + "FW 0x%08x already loaded (mine is 0x%08x)", + loaded_fw, my_fw); + return -1; + } + } + + return 0; +} + +/* mark PMF if applicable */ +static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code) +{ + uint32_t ncsi_oem_data_addr; + + PMD_INIT_FUNC_TRACE(sc); + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + /* + * Barrier here for ordering between the writing to sc->port.pmf here + * and reading it from the periodic task. + */ + sc->port.pmf = 1; + mb(); + } else { + sc->port.pmf = 0; + } + + PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf); + + if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { + if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { + ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); + if (ncsi_oem_data_addr) { + REG_WR(sc, + (ncsi_oem_data_addr + + offsetof(struct glob_ncsi_oem_data, + driver_version)), 0); + } + } + } +} + +static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc) +{ + int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); + int abs_func; + int vn; + + if (BNX2X_NOMCP(sc)) { + return; /* what should be the default bvalue in this case */ + } + + /* + * The formula for computing the absolute function number is... + * For 2 port configuration (4 functions per port): + * abs_func = 2 * vn + SC_PORT + SC_PATH + * For 4 port configuration (2 functions per port): + * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH + */ + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); + if (abs_func >= E1H_FUNC_MAX) { + break; + } + sc->devinfo.mf_info.mf_config[vn] = + MFCFG_RD(sc, func_mf_config[abs_func].config); + } + + if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & + FUNC_MF_CFG_FUNC_DISABLED) { + PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); + sc->flags |= BNX2X_MF_FUNC_DIS; + } else { + PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); + sc->flags &= ~BNX2X_MF_FUNC_DIS; + } +} + +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x_softc *sc) +{ + uint32_t j, val; + + for (j = 0; j < 1000; j++) { + val = (1UL << 31); + REG_WR(sc, GRCBASE_MCP + 0x9c, val); + val = REG_RD(sc, GRCBASE_MCP + 0x9c); + if (val & (1L << 31)) + break; + + DELAY(5000); + } + + if (!(val & (1L << 31))) { + PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register"); + return -1; + } + + return 0; +} + +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x_softc *sc) +{ + REG_WR(sc, GRCBASE_MCP + 0x9c, 0); +} + +static void bnx2x_fan_failure(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t ext_phy_config; + + /* mark the failure */ + ext_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); + + ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, + ext_phy_config); + + /* log the failure */ + PMD_DRV_LOG(INFO, sc, + "Fan Failure has caused the driver to shutdown " + "the card to prevent permanent damage. " + "Please contact OEM Support for assistance"); + + rte_panic("Schedule task to handle fan failure"); +} + +/* this function is called upon a link interrupt */ +static void bnx2x_link_attn(struct bnx2x_softc *sc) +{ + uint32_t pause_enabled = 0; + struct host_port_stats *pstats; + int cmng_fns; + + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + + elink_link_update(&sc->link_params, &sc->link_vars); + + if (sc->link_vars.link_up) { + + /* dropless flow control */ + if (sc->dropless_fc) { + pause_enabled = 0; + + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { + pause_enabled = 1; + } + + REG_WR(sc, + (BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), + pause_enabled); + } + + if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { + pstats = BNX2X_SP(sc, port_stats); + /* reset old mac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + + if (sc->state == BNX2X_STATE_OPEN) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } + } + + if (sc->link_vars.link_up && sc->link_vars.line_speed) { + cmng_fns = bnx2x_get_cmng_fns_mode(sc); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(sc, FALSE, cmng_fns); + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); + } + } + + bnx2x_link_report_locked(sc); + + if (IS_MF(sc)) { + bnx2x_link_sync_notify(sc); + } +} + +static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted) +{ + int port = SC_PORT(sc); + uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + uint32_t aeu_mask; + uint32_t nig_mask = 0; + uint32_t reg_addr; + uint32_t igu_acked; + uint32_t cnt; + + if (sc->attn_state & asserted) { + PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted); + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + aeu_mask = REG_RD(sc, aeu_addr); + + aeu_mask &= ~(asserted & 0x3ff); + + REG_WR(sc, aeu_addr, aeu_mask); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + sc->attn_state |= asserted; + + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { + + bnx2x_acquire_phy_lock(sc); + /* save nig interrupt mask */ + nig_mask = REG_RD(sc, nig_int_mask_addr); + + /* If nig_mask is not set, no need to call the update function */ + if (nig_mask) { + REG_WR(sc, nig_int_mask_addr, 0); + + bnx2x_link_attn(sc); + } + + /* handle unicore attn? */ + } + + if (asserted & ATTN_SW_TIMER_4_FUNC) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!"); + } + + if (asserted & GPIO_2_FUNC) { + PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!"); + } + + if (asserted & GPIO_3_FUNC) { + PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!"); + } + + if (asserted & GPIO_4_FUNC) { + PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!"); + } + + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } + } + } + /* hardwired */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_addr = + (HC_REG_COMMAND_REG + port * 32 + + COMMAND_REG_ATTN_BITS_SET); + } else { + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8); + } + + PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x", + asserted, + (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", + reg_addr); + REG_WR(sc, reg_addr, asserted); + + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + /* + * Verify that IGU ack through BAR was written before restoring + * NIG mask. This loop should exit after 2-3 iterations max. + */ + if (sc->devinfo.int_block != INT_BLOCK_HC) { + cnt = 0; + + do { + igu_acked = + REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); + } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) + && (++cnt < MAX_IGU_ATTN_ACK_TO)); + + if (!igu_acked) { + PMD_DRV_LOG(ERR, sc, + "Failed to verify IGU ack on time"); + } + + mb(); + } + + REG_WR(sc, nig_int_mask_addr, nig_mask); + + bnx2x_release_phy_lock(sc); + } +} + +static void +bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx, + __rte_unused const char *blk) +{ + PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk); +} + +static int +bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "BRB"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TCM"); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XPB"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t * global, uint8_t print) +{ + int i = 0; + uint32_t cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PBF"); + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "QM"); + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TM"); + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XCM"); + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "NIG"); + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "VAUX PCI CORE"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DEBUG"); + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "UCM"); + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "USEMI"); + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "UPB"); + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CSDM"); + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CCM"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PXP"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CFC"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MISC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t * global, uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP ROM"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP UMP RX"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP UMP TX"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP SCPAD"); + *global = TRUE; + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PGLUE_B"); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "ATC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static uint8_t +bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print, + uint32_t * sig) +{ + int par_num = 0; + + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + PMD_DRV_LOG(ERR, sc, + "Parity error: HW block parity attention:" + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x", + (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0), + (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1), + (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2), + (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3), + (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4)); + + if (print) + PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: "); + + par_num = + bnx2x_check_blocks_with_parity0(sc, sig[0] & + HW_PRTY_ASSERT_SET_0, + par_num, print); + par_num = + bnx2x_check_blocks_with_parity1(sc, sig[1] & + HW_PRTY_ASSERT_SET_1, + par_num, global, print); + par_num = + bnx2x_check_blocks_with_parity2(sc, sig[2] & + HW_PRTY_ASSERT_SET_2, + par_num, print); + par_num = + bnx2x_check_blocks_with_parity3(sc, sig[3] & + HW_PRTY_ASSERT_SET_3, + par_num, global, print); + par_num = + bnx2x_check_blocks_with_parity4(sc, sig[4] & + HW_PRTY_ASSERT_SET_4, + par_num, print); + + if (print) + PMD_DRV_LOG(INFO, sc, ""); + + return TRUE; + } + + return FALSE; +} + +static uint8_t +bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print) +{ + struct attn_route attn = { {0} }; + int port = SC_PORT(sc); + + attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); + attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); + attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); + attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); + + if (!CHIP_IS_E1x(sc)) + attn.sig[4] = + REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); + + return bnx2x_parity_attn(sc, global, print, attn.sig); +} + +static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn) +{ + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { + val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); + PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) + PMD_DRV_LOG(INFO, sc, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW"); + } + + if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { + val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); + PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val); + if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND"); + if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS"); + if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR"); + if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) + PMD_DRV_LOG(INFO, sc, + "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU"); + } + + if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { + PMD_DRV_LOG(INFO, sc, + "ERROR: FATAL parity attention set4 0x%08x", + (uint32_t) (attn & + (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR + | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); + } +} + +static void bnx2x_e1h_disable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); +} + +static void bnx2x_e1h_enable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); +} + +/* + * called due to MCP event (on pmf): + * reread new bandwidth configuration + * configure FW + * notify others function about the change + */ +static void bnx2x_config_mf_bw(struct bnx2x_softc *sc) +{ + if (sc->link_vars.link_up) { + bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); + bnx2x_link_sync_notify(sc); + } + + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); +} + +static void bnx2x_set_mf_bw(struct bnx2x_softc *sc) +{ + bnx2x_config_mf_bw(sc); + bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); +} + +static void bnx2x_handle_eee_event(struct bnx2x_softc *sc) +{ + bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); +} + +#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 + +static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc) +{ + struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; + + strncpy(ether_stat->version, BNX2X_DRIVER_VERSION, + ETH_STAT_INFO_VERSION_LEN); + + sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, + MAC_PAD, ETH_ALEN); + + ether_stat->mtu_size = sc->mtu; + + ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; + ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; + + ether_stat->txq_size = sc->tx_ring_size; + ether_stat->rxq_size = sc->rx_ring_size; +} + +static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc) +{ + enum drv_info_opcode op_code; + uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); + + /* if drv_info version supported by MFW doesn't match - send NACK */ + if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> + DRV_INFO_CONTROL_OP_CODE_SHIFT); + + memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); + + switch (op_code) { + case ETH_STATS_OPCODE: + bnx2x_drv_info_ether_stat(sc); + break; + case FCOE_STATS_OPCODE: + case ISCSI_STATS_OPCODE: + default: + /* if op code isn't supported - send NACK */ + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + /* + * If we got drv_info attn from MFW then these fields are defined in + * shmem2 for sure + */ + SHMEM2_WR(sc, drv_info_host_addr_lo, + U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); + SHMEM2_WR(sc, drv_info_host_addr_hi, + U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); + + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); +} + +static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event) +{ + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { +/* + * This is the only place besides the function initialization + * where the sc->flags can change so it is done without any + * locks + */ + if (sc->devinfo. + mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { + PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled"); + sc->flags |= BNX2X_MF_FUNC_DIS; + bnx2x_e1h_disable(sc); + } else { + PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled"); + sc->flags &= ~BNX2X_MF_FUNC_DIS; + bnx2x_e1h_enable(sc); + } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + bnx2x_config_mf_bw(sc); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + } + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); + else + bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); +} + +static void bnx2x_pmf_update(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t val; + + sc->port.pmf = 1; + + /* + * We need the mb() to ensure the ordering between the writing to + * sc->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + mb(); + + /* enable nig attention */ + val = (0xff0f | (1 << (SC_VN(sc) + 4))); + if (sc->devinfo.int_block == INT_BLOCK_HC) { + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val); + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val); + } else if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); + } + + bnx2x_stats_handle(sc, STATS_EVENT_PMF); +} + +static int bnx2x_mc_assert(struct bnx2x_softc *sc) +{ + char last_idx; + int i, rc = 0; + __rte_unused uint32_t row0, row1, row2, row3; + + /* XSTORM */ + last_idx = + REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, sc, + "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* TSTORM */ + last_idx = + REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, sc, + "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* CSTORM */ + last_idx = + REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, sc, + "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* USTORM */ + last_idx = + REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, sc, + "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + return rc; +} + +static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn) +{ + int func = SC_FUNC(sc); + uint32_t val; + + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { + + if (attn & BNX2X_PMF_LINK_ASSERT(sc)) { + + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + bnx2x_read_mf_cfg(sc); + sc->devinfo.mf_info.mf_config[SC_VN(sc)] = + MFCFG_RD(sc, + func_mf_config[SC_ABS_FUNC(sc)].config); + val = + SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); + + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(sc, + (val & + DRV_STATUS_DCC_EVENT_MASK)); + + if (val & DRV_STATUS_SET_MF_BW) + bnx2x_set_mf_bw(sc); + + if (val & DRV_STATUS_DRV_INFO_REQ) + bnx2x_handle_drv_info_req(sc); + + if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(sc); + + if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) + bnx2x_handle_eee_event(sc); + + if (sc->link_vars.periodic_flags & + ELINK_PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + bnx2x_acquire_phy_lock(sc); + sc->link_vars.periodic_flags &= + ~ELINK_PERIODIC_FLAGS_LINK_EVENT; + bnx2x_release_phy_lock(sc); + if (IS_MF(sc)) { + bnx2x_link_sync_notify(sc); + } + bnx2x_link_report(sc); + } + + /* + * Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x_link_status_update(sc); + + } else if (attn & BNX2X_MC_ASSERT_BITS) { + + PMD_DRV_LOG(ERR, sc, "MC assert!"); + bnx2x_mc_assert(sc); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); + rte_panic("MC assert!"); + + } else if (attn & BNX2X_MCP_ASSERT) { + + PMD_DRV_LOG(ERR, sc, "MCP assert!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); + + } else { + PMD_DRV_LOG(ERR, sc, + "Unknown HW assert! (attn 0x%08x)", attn); + } + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); + PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val); + } + if (attn & BNX2X_GRC_RSV) { + val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN); + PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val); + } + REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); + } +} + +static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val0, mask0, val1, mask1; + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); + PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val); +/* CFC error attention */ + if (val & 0x2) { + PMD_DRV_LOG(ERR, sc, "FATAL error from CFC"); + } + } + + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); + PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val); +/* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) { + PMD_DRV_LOG(ERR, sc, "FATAL error from PXP"); + } + + if (!CHIP_IS_E1x(sc)) { + val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); + PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val); + } + } +#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR +#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT + + if (attn & AEU_PXP2_HW_INT_BIT) { +/* CQ47854 workaround do not panic on + * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR + */ + if (!CHIP_IS_E1x(sc)) { + mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); + val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); + mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); + val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); + /* + * If the only PXP2_EOP_ERROR_BIT is set in + * STS0 and STS1 - clear it + * + * probably we lose additional attentions between + * STS0 and STS_CLR0, in this case user will not + * be notified about them + */ + if (val0 & mask0 & PXP2_EOP_ERROR_BIT && + !(val1 & mask1)) + val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); + + /* print the register, since no one can restore it */ + PMD_DRV_LOG(ERR, sc, + "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0); + + /* + * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR + * then notify + */ + if (val0 & PXP2_EOP_ERROR_BIT) { + PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR"); + + /* + * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is + * set then clear attention from PXP2 block without panic + */ + if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && + ((val1 & mask1) == 0)) + attn &= ~AEU_PXP2_HW_INT_BIT; + } + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_2) { + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(ERR, sc, + "FATAL HW block attention set2 0x%x", + (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2)); + rte_panic("HW block attention set2"); + } +} + +static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); + PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val); +/* DORQ discard attention */ + if (val & 0x2) { + PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ"); + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_1) { + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); + + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(ERR, sc, + "FATAL HW block attention set1 0x%08x", + (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1)); + rte_panic("HW block attention set1"); + } +} + +static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val; + + reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + val = REG_RD(sc, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention"); + +/* Fan failure attention */ + elink_hw_reset_phy(&sc->link_params); + bnx2x_fan_failure(sc); + } + + if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { + bnx2x_acquire_phy_lock(sc); + elink_handle_module_detect_int(&sc->link_params); + bnx2x_release_phy_lock(sc); + } + + if (attn & HW_INTERRUT_ASSERT_SET_0) { + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(sc, reg_offset, val); + + rte_panic("FATAL HW block attention set0 0x%lx", + (attn & (unsigned long)HW_INTERRUT_ASSERT_SET_0)); + } +} + +static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted) +{ + struct attn_route attn; + struct attn_route *group_mask; + int port = SC_PORT(sc); + int index; + uint32_t reg_addr; + uint32_t val; + uint32_t aeu_mask; + uint8_t global = FALSE; + + /* + * Need to take HW lock because MCP or other port might also + * try to handle this event. + */ + bnx2x_acquire_alr(sc); + + if (bnx2x_chk_parity_attn(sc, &global, TRUE)) { + sc->recovery_state = BNX2X_RECOVERY_INIT; + +/* disable HW interrupts */ + bnx2x_int_disable(sc); + bnx2x_release_alr(sc); + return; + } + + attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); + attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); + attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); + attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); + if (!CHIP_IS_E1x(sc)) { + attn.sig[4] = + REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); + } else { + attn.sig[4] = 0; + } + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &sc->attn_group[index]; + + bnx2x_attn_int_deasserted4(sc, + attn. + sig[4] & group_mask->sig[4]); + bnx2x_attn_int_deasserted3(sc, + attn. + sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(sc, + attn. + sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(sc, + attn. + sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(sc, + attn. + sig[0] & group_mask->sig[0]); + } + } + + bnx2x_release_alr(sc); + + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_addr = (HC_REG_COMMAND_REG + port * 32 + + COMMAND_REG_ATTN_BITS_CLR); + } else { + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8); + } + + val = ~deasserted; + PMD_DRV_LOG(DEBUG, sc, + "about to mask 0x%08x at %s addr 0x%08x", val, + (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", + reg_addr); + REG_WR(sc, reg_addr, val); + + if (~sc->attn_state & deasserted) { + PMD_DRV_LOG(ERR, sc, "IGU error"); + } + + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + aeu_mask = REG_RD(sc, reg_addr); + + aeu_mask |= (deasserted & 0x3ff); + + REG_WR(sc, reg_addr, aeu_mask); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + sc->attn_state &= ~deasserted; +} + +static void bnx2x_attn_int(struct bnx2x_softc *sc) +{ + /* read local copy of bits */ + uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); + uint32_t attn_ack = + le32toh(sc->def_sb->atten_status_block.attn_bits_ack); + uint32_t attn_state = sc->attn_state; + + /* look for changed bits */ + uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; + uint32_t deasserted = ~attn_bits & attn_ack & attn_state; + + PMD_DRV_LOG(DEBUG, sc, + "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { + PMD_DRV_LOG(ERR, sc, "BAD attention state"); + } + + /* handle bits that were raised */ + if (asserted) { + bnx2x_attn_int_asserted(sc, asserted); + } + + if (deasserted) { + bnx2x_attn_int_deasserted(sc, deasserted); + } +} + +static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc) +{ + struct host_sp_status_block *def_sb = sc->def_sb; + uint16_t rc = 0; + + if (!def_sb) + return 0; + + mb(); /* status block is written to by the chip */ + + if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= BNX2X_DEF_SB_ATT_IDX; + } + + if (sc->def_idx != def_sb->sp_sb.running_index) { + sc->def_idx = def_sb->sp_sb.running_index; + rc |= BNX2X_DEF_SB_IDX; + } + + mb(); + + return rc; +} + +static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc, + uint32_t cid) +{ + return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj; +} + +static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc) +{ + struct ecore_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &sc->mcast_obj; + + /* clear pending state for the last command */ + sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); + + /* if there are pending mcast commands - send them */ + if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + if (rc < 0) { + PMD_DRV_LOG(INFO, sc, + "Failed to send pending mcast commands (%d)", + rc); + } + } +} + +static void +bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct ecore_vlan_mac_obj *vlan_mac_obj; + + /* always push next commands out, don't wait here */ + bnx2x_set_bit(RAMROD_CONT, &ramrod_flags); + + switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) { + case ECORE_FILTER_MAC_PENDING: + PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions"); + vlan_mac_obj = &sc->sp_objs[cid].mac_obj; + break; + + case ECORE_FILTER_MCAST_PENDING: + PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions"); + bnx2x_handle_mcast_eqe(sc); + return; + + default: + PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) { + PMD_DRV_LOG(NOTICE, sc, + "Failed to schedule new commands (%d)", rc); + } else if (rc > 0) { + PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands..."); + } +} + +static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc) +{ + bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); + + /* send rx_mode command again if was requested */ + if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { + bnx2x_set_storm_rx_mode(sc); + } +} + +static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod) +{ + storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); + wmb(); /* keep prod updates ordered */ +} + +static void bnx2x_eq_int(struct bnx2x_softc *sc) +{ + uint16_t hw_cons, sw_cons, sw_prod; + union event_ring_elem *elem; + uint8_t echo; + uint32_t cid; + uint8_t opcode; + int spqe_cnt = 0; + struct ecore_queue_sp_obj *q_obj; + struct ecore_func_sp_obj *f_obj = &sc->func_obj; + struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; + + hw_cons = le16toh(*sc->eq_cons_sb); + + /* + * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. + * when we get to the next-page we need to adjust so the loop + * condition below will be met. The next element is the size of a + * regular element and hence incrementing by 1 + */ + if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { + hw_cons++; + } + + /* + * This function may never run in parallel with itself for a + * specific sc and no need for a read memory barrier here. + */ + sw_cons = sc->eq_cons; + sw_prod = sc->eq_prod; + + for (; + sw_cons != hw_cons; + sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { + + elem = &sc->eq[EQ_DESC(sw_cons)]; + +/* elem CID originates from FW, actually LE */ + cid = SW_CID(elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; + +/* handle eq element */ + switch (opcode) { + case EVENT_RING_OPCODE_STAT_QUERY: + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d", + sc->stats_comp++); + /* nothing to do with stats comp */ + goto next_spqe; + + case EVENT_RING_OPCODE_CFC_DEL: + /* handle according to cid range */ + /* we may want to verify here that the sc state is HALTING */ + PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]", + cid); + q_obj = bnx2x_cid_to_q_obj(sc, cid); + if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_STOP_TRAFFIC: + PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_START_TRAFFIC: + PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC"); + if (f_obj->complete_cmd + (sc, f_obj, ECORE_F_CMD_TX_START)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_UPDATE: + echo = elem->message.data.function_update_event.echo; + if (echo == SWITCH_UPDATE) { + PMD_DRV_LOG(DEBUG, sc, + "got FUNC_SWITCH_UPDATE ramrod"); + if (f_obj->complete_cmd(sc, f_obj, + ECORE_F_CMD_SWITCH_UPDATE)) + { + break; + } + } else { + PMD_DRV_LOG(DEBUG, sc, + "AFEX: ramrod completed FUNCTION_UPDATE"); + f_obj->complete_cmd(sc, f_obj, + ECORE_F_CMD_AFEX_UPDATE); + } + goto next_spqe; + + case EVENT_RING_OPCODE_FORWARD_SETUP: + q_obj = &bnx2x_fwd_sp_obj(sc, q_obj); + if (q_obj->complete_cmd(sc, q_obj, + ECORE_Q_CMD_SETUP_TX_ONLY)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_START: + PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { + break; + } + goto next_spqe; + } + + switch (opcode | sc->state) { + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT): + cid = + elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d", + cid); + rss_raw->clear_pending(rss_raw); + break; + + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, sc, + "got (un)set mac ramrod"); + bnx2x_handle_classification_eqe(sc, elem); + break; + + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, sc, + "got mcast ramrod"); + bnx2x_handle_mcast_eqe(sc); + break; + + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, sc, + "got rx_mode ramrod"); + bnx2x_handle_rx_mode_eqe(sc); + break; + + default: + /* unknown event log error and continue */ + PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x", + elem->message.opcode, sc->state); + } + +next_spqe: + spqe_cnt++; + } /* for */ + + mb(); + atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); + + sc->eq_cons = sw_cons; + sc->eq_prod = sw_prod; + + /* make sure that above mem writes were issued towards the memory */ + wmb(); + + /* update producer */ + bnx2x_update_eq_prod(sc, sc->eq_prod); +} + +static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) +{ + uint16_t status; + int rc = 0; + + PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---"); + + /* what work needs to be performed? */ + status = bnx2x_update_dsb_idx(sc); + + PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---"); + bnx2x_attn_int(sc); + status &= ~BNX2X_DEF_SB_ATT_IDX; + rc = 1; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { +/* handle EQ completions */ + PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---"); + bnx2x_eq_int(sc); + bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, + le16toh(sc->def_idx), IGU_INT_NOP, 1); + status &= ~BNX2X_DEF_SB_IDX; + } + + /* if status is non zero then something went wrong */ + if (unlikely(status)) { + PMD_DRV_LOG(INFO, sc, + "Got an unknown SP interrupt! (0x%04x)", status); + } + + /* ack status block only if something was actually handled */ + bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, + le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); + + return rc; +} + +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) +{ + struct bnx2x_softc *sc = fp->sc; + uint8_t more_rx = FALSE; + + /* Make sure FP is initialized */ + if (!fp->sb_running_index) + return; + + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, + "---> FP TASK QUEUE (%d) <--", fp->index); + + /* update the fastpath index */ + bnx2x_update_fp_sb_idx(fp); + + if (rte_atomic32_read(&sc->scan_fp) == 1) { + if (bnx2x_has_rx_work(fp)) { + more_rx = bnx2x_rxeof(sc, fp); + } + + if (more_rx) { + /* still more work to do */ + bnx2x_handle_fp_tq(fp); + return; + } + /* We have completed slow path completion, clear the flag */ + rte_atomic32_set(&sc->scan_fp, 0); + } + + bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, + le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); +} + +/* + * Legacy interrupt entry point. + * + * Verifies that the controller generated the interrupt and + * then calls a separate routine to handle the various + * interrupt causes: link, RX, and TX. + */ +int bnx2x_intr_legacy(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + uint32_t status, mask; + int i, rc = 0; + + /* + * 0 for ustorm, 1 for cstorm + * the bits returned from ack_int() are 0-15 + * bit 0 = attention status block + * bit 1 = fast path status block + * a mask of 0x2 or more = tx/rx event + * a mask of 1 = slow path event + */ + + status = bnx2x_ack_int(sc); + + /* the interrupt is not for us */ + if (unlikely(status == 0)) { + return 0; + } + + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status); + //bnx2x_dump_status_block(sc); + + FOR_EACH_ETH_QUEUE(sc, i) { + fp = &sc->fp[i]; + mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); + if (status & mask) { + /* acknowledge and disable further fastpath interrupts */ + bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, + 0, IGU_INT_DISABLE, 0); + bnx2x_handle_fp_tq(fp); + status &= ~mask; + } + } + + if (unlikely(status & 0x1)) { + /* acknowledge and disable further slowpath interrupts */ + bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, + 0, IGU_INT_DISABLE, 0); + rc = bnx2x_handle_sp_tq(sc); + status &= ~0x1; + } + + if (unlikely(status)) { + PMD_DRV_LOG(WARNING, sc, + "Unexpected fastpath status (0x%08x)!", status); + } + + return rc; +} + +static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc); +static int bnx2x_init_hw_common(struct bnx2x_softc *sc); +static int bnx2x_init_hw_port(struct bnx2x_softc *sc); +static int bnx2x_init_hw_func(struct bnx2x_softc *sc); +static void bnx2x_reset_common(struct bnx2x_softc *sc); +static void bnx2x_reset_port(struct bnx2x_softc *sc); +static void bnx2x_reset_func(struct bnx2x_softc *sc); +static int bnx2x_init_firmware(struct bnx2x_softc *sc); +static void bnx2x_release_firmware(struct bnx2x_softc *sc); + +static struct +ecore_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +static void bnx2x_init_func_obj(struct bnx2x_softc *sc) +{ + sc->dmae_ready = 0; + + PMD_INIT_FUNC_TRACE(sc); + + ecore_init_func_obj(sc, + &sc->func_obj, + BNX2X_SP(sc, func_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata), + BNX2X_SP(sc, func_afex_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata), + &bnx2x_func_sp_drv); +} + +static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code) +{ + struct ecore_func_state_params func_params = { NULL }; + int rc; + + PMD_INIT_FUNC_TRACE(sc); + + /* prepare the parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + /* + * Via a plethora of function pointers, we will eventually reach + * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func(). + */ + rc = ecore_func_state_change(sc, &func_params); + + return rc; +} + +static void +bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len) +{ + uint32_t i; + + if (!(len % 4) && !(addr % 4)) { + for (i = 0; i < len; i += 4) { + REG_WR(sc, (addr + i), fill); + } + } else { + for (i = 0; i < len; i++) { + REG_WR8(sc, (addr + i), fill); + } + } +} + +/* writes FP SP data to FW - data_size in dwords */ +static void +bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p, + uint32_t data_size) +{ + uint32_t index; + + for (index = 0; index < data_size; index++) { + REG_WR(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + + (sizeof(uint32_t) * index)), *(sb_data_p + index)); + } +} + +static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id) +{ + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + uint32_t *sb_data_p; + uint32_t data_size = 0; + + if (!CHIP_IS_E1x(sc)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_DISABLED; + sb_data_e2.common.p_func.vf_valid = FALSE; + sb_data_p = (uint32_t *) & sb_data_e2; + data_size = (sizeof(struct hc_status_block_data_e2) / + sizeof(uint32_t)); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_DISABLED; + sb_data_e1x.common.p_func.vf_valid = FALSE; + sb_data_p = (uint32_t *) & sb_data_e1x; + data_size = (sizeof(struct hc_status_block_data_e1x) / + sizeof(uint32_t)); + } + + bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); + + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, + CSTORM_STATUS_BLOCK_SIZE); + bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), + 0, CSTORM_SYNC_BLOCK_SIZE); +} + +static void +bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc, + struct hc_sp_status_block_data *sp_sb_data) +{ + uint32_t i; + + for (i = 0; + i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); + i++) { + REG_WR(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + + (i * sizeof(uint32_t))), + *((uint32_t *) sp_sb_data + i)); + } +} + +static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc) +{ + struct hc_sp_status_block_data sp_sb_data; + + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + sp_sb_data.state = SB_DISABLED; + sp_sb_data.p_func.vf_valid = FALSE; + + bnx2x_wr_sp_sb_data(sc, &sp_sb_data); + + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), + 0, CSTORM_SP_STATUS_BLOCK_SIZE); + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), + 0, CSTORM_SP_SYNC_BLOCK_SIZE); +} + +static void +bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, + int igu_seg_id) +{ + hc_sm->igu_sb_id = igu_sb_id; + hc_sm->igu_seg_id = igu_seg_id; + hc_sm->timer_value = 0xFF; + hc_sm->time_to_expire = 0xFFFFFFFF; +} + +static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ + /* zero out state machine indices */ + + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + + /* map indices */ + + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= + (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); +} + +static void +bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid, + uint8_t vf_valid, int fw_sb_id, int igu_sb_id) +{ + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p; + uint32_t *sb_data_p; + int igu_seg_id; + int data_size; + + if (CHIP_INT_MODE_IS_BC(sc)) { + igu_seg_id = HC_SEG_ACCESS_NORM; + } else { + igu_seg_id = IGU_SEG_ACCESS_NORM; + } + + bnx2x_zero_fp_sb(sc, fw_sb_id); + + if (!CHIP_IS_E1x(sc)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; + sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); + sb_data_e2.common.p_func.vf_id = vfid; + sb_data_e2.common.p_func.vf_valid = vf_valid; + sb_data_e2.common.p_func.vnic_id = SC_VN(sc); + sb_data_e2.common.same_igu_sb_1b = TRUE; + sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); + sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); + hc_sm_p = sb_data_e2.common.state_machine; + sb_data_p = (uint32_t *) & sb_data_e2; + data_size = (sizeof(struct hc_status_block_data_e2) / + sizeof(uint32_t)); + bnx2x_map_sb_state_machines(sb_data_e2.index_data); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; + sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); + sb_data_e1x.common.p_func.vf_id = 0xff; + sb_data_e1x.common.p_func.vf_valid = FALSE; + sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); + sb_data_e1x.common.same_igu_sb_1b = TRUE; + sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); + sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); + hc_sm_p = sb_data_e1x.common.state_machine; + sb_data_p = (uint32_t *) & sb_data_e1x; + data_size = (sizeof(struct hc_status_block_data_e1x) / + sizeof(uint32_t)); + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); + } + + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); + + /* write indices to HW - PCI guarantees endianity of regpairs */ + bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); +} + +static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->sc)) { + return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H; + } else { + return fp->cl_id; + } +} + +static uint32_t +bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) +{ + uint32_t offset = BAR_USTRORM_INTMEM; + + if (IS_VF(sc)) { + return PXP_VF_ADDR_USDM_QUEUES_START + + (sc->acquire_resp.resc.hw_qid[fp->index] * + sizeof(struct ustorm_queue_zone_data)); + } else if (!CHIP_IS_E1x(sc)) { + offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + } else { + offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); + } + + return offset; +} + +static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx) +{ + struct bnx2x_fastpath *fp = &sc->fp[idx]; + uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; + unsigned long q_type = 0; + int cos; + + fp->sc = sc; + fp->index = idx; + + fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); + fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); + + if (CHIP_IS_E1x(sc)) + fp->cl_id = SC_L_ID(sc) + idx; + else +/* want client ID same as IGU SB ID for non-E1 */ + fp->cl_id = fp->igu_sb_id; + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + + /* setup sb indices */ + if (!CHIP_IS_E1x(sc)) { + fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; + fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; + } else { + fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; + fp->sb_running_index = + fp->status_block.e1x_sb->sb.running_index; + } + + /* init shortcut */ + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp); + + fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; + + for (cos = 0; cos < sc->max_cos; cos++) { + cids[cos] = idx; + } + fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; + + /* nothing more for a VF to do */ + if (IS_VF(sc)) { + return; + } + + bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE, + fp->fw_sb_id, fp->igu_sb_id); + + bnx2x_update_fp_sb_idx(fp); + + /* Configure Queue State object */ + bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX, &q_type); + bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX, &q_type); + + ecore_init_queue_obj(sc, + &sc->sp_objs[idx].q_obj, + fp->cl_id, + cids, + sc->max_cos, + SC_FUNC(sc), + BNX2X_SP(sc, q_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata), + q_type); + + /* configure classification DBs */ + ecore_init_mac_obj(sc, + &sc->sp_objs[idx].mac_obj, + fp->cl_id, + idx, + SC_FUNC(sc), + BNX2X_SP(sc, mac_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata), + ECORE_FILTER_MAC_PENDING, &sc->sp_state, + ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); +} + +static void +bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod) +{ + struct ustorm_eth_rx_producers rx_prods; + uint32_t i; + + memset(&rx_prods, 0, sizeof(rx_prods)); + + /* update producers */ + rx_prods.bd_prod = rx_bd_prod; + rx_prods.cqe_prod = rx_cq_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < (sizeof(rx_prods) / 4); i++) { + REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), + ((uint32_t *)&rx_prods)[i]); + } + + wmb(); /* keep prod updates ordered */ +} + +static void bnx2x_init_rx_rings(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i; + struct bnx2x_rx_queue *rxq; + + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue is NULL"); + return; + } + + rxq->rx_bd_head = 0; + rxq->rx_bd_tail = rxq->nb_rx_desc; + rxq->rx_cq_head = 0; + rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); + *fp->rx_cq_cons_sb = 0; + + /* + * Activate the BD ring... + * Warning, this will generate an interrupt (to the TSTORM) + * so this can only be done after the chip is initialized + */ + bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail); + + if (i != 0) { + continue; + } + } +} + +static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) +{ + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT; + fp->tx_db.data.zero_fill1 = 0; + fp->tx_db.data.prod = 0; + + if (!txq) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return; + } + + txq->tx_pkt_tail = 0; + txq->tx_pkt_head = 0; + txq->tx_bd_tail = 0; + txq->tx_bd_head = 0; +} + +static void bnx2x_init_tx_rings(struct bnx2x_softc *sc) +{ + int i; + + for (i = 0; i < sc->num_queues; i++) { + bnx2x_init_tx_ring_one(&sc->fp[i]); + } +} + +static void bnx2x_init_def_sb(struct bnx2x_softc *sc) +{ + struct host_sp_status_block *def_sb = sc->def_sb; + rte_iova_t mapping = sc->def_sb_dma.paddr; + int igu_sp_sb_index; + int igu_seg_id; + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int reg_offset, reg_offset_en5; + uint64_t section; + int index, sindex; + struct hc_sp_status_block_data sp_sb_data; + + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + if (CHIP_INT_MODE_IS_BC(sc)) { + igu_sp_sb_index = DEF_SB_IGU_ID; + igu_seg_id = HC_SEG_ACCESS_DEF; + } else { + igu_sp_sb_index = sc->igu_dsb_id; + igu_seg_id = IGU_SEG_ACCESS_DEF; + } + + /* attentions */ + section = ((uint64_t) mapping + + offsetof(struct host_sp_status_block, atten_status_block)); + def_sb->atten_status_block.status_block_id = igu_sp_sb_index; + sc->attn_state = 0; + + reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + + reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { +/* take care of sig[0]..sig[4] */ + for (sindex = 0; sindex < 4; sindex++) { + sc->attn_group[index].sig[sindex] = + REG_RD(sc, + (reg_offset + (sindex * 0x4) + + (0x10 * index))); + } + + if (!CHIP_IS_E1x(sc)) { + /* + * enable5 is separate from the rest of the registers, + * and the address skip is 4 and not 16 between the + * different groups + */ + sc->attn_group[index].sig[4] = + REG_RD(sc, (reg_offset_en5 + (0x4 * index))); + } else { + sc->attn_group[index].sig[4] = 0; + } + } + + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_offset = + port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; + REG_WR(sc, reg_offset, U64_LO(section)); + REG_WR(sc, (reg_offset + 4), U64_HI(section)); + } else if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); + REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); + } + + section = ((uint64_t) mapping + + offsetof(struct host_sp_status_block, sp_sb)); + + bnx2x_zero_sp_sb(sc); + + /* PCI guarantees endianity of regpair */ + sp_sb_data.state = SB_ENABLED; + sp_sb_data.host_sb_addr.lo = U64_LO(section); + sp_sb_data.host_sb_addr.hi = U64_HI(section); + sp_sb_data.igu_sb_id = igu_sp_sb_index; + sp_sb_data.igu_seg_id = igu_seg_id; + sp_sb_data.p_func.pf_id = func; + sp_sb_data.p_func.vnic_id = SC_VN(sc); + sp_sb_data.p_func.vf_id = 0xff; + + bnx2x_wr_sp_sb_data(sc, &sp_sb_data); + + bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); +} + +static void bnx2x_init_sp_ring(struct bnx2x_softc *sc) +{ + atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); + sc->spq_prod_idx = 0; + sc->dsb_sp_prod = + &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; + sc->spq_prod_bd = sc->spq; + sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); +} + +static void bnx2x_init_eq_ring(struct bnx2x_softc *sc) +{ + union event_ring_elem *elem; + int i; + + for (i = 1; i <= NUM_EQ_PAGES; i++) { + elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; + + elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + + BNX2X_PAGE_SIZE * + (i % NUM_EQ_PAGES))); + elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + + BNX2X_PAGE_SIZE * + (i % NUM_EQ_PAGES))); + } + + sc->eq_cons = 0; + sc->eq_prod = NUM_EQ_DESC; + sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; + + atomic_store_rel_long(&sc->eq_spq_left, + (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), + NUM_EQ_DESC) - 1)); +} + +static void bnx2x_init_internal_common(struct bnx2x_softc *sc) +{ + int i; + + /* + * Zero this manually as its initialization is currently missing + * in the initTool. + */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { + REG_WR(sc, + (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), + 0); + } + + if (!CHIP_IS_E1x(sc)) { + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), + CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : + HC_IGU_NBC_MODE); + } +} + +static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + bnx2x_init_internal_common(sc); + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_PORT: + /* nothing to do */ + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + /* internal memory per function is initialized inside bnx2x_pf_init */ + break; + + default: + PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP", + load_code); + break; + } +} + +static void +storm_memset_func_cfg(struct bnx2x_softc *sc, + struct tstorm_eth_function_common_config *tcfg, + uint16_t abs_fid) +{ + uint32_t addr; + size_t size; + + addr = (BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); + size = sizeof(struct tstorm_eth_function_common_config); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg); +} + +static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p) +{ + struct tstorm_eth_function_common_config tcfg = { 0 }; + + if (CHIP_IS_E1x(sc)) { + storm_memset_func_cfg(sc, &tcfg, p->func_id); + } + + /* Enable the function in the FW */ + storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); + storm_memset_func_en(sc, p->func_id, 1); + + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(sc, p->spq_map, p->func_id); + REG_WR(sc, + (XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); + } +} + +/* + * Calculates the sum of vn_min_rates. + * It's needed for further normalizing of the min_rates. + * Returns: + * sum of vn_min_rates. + * or + * 0 - if all the min_rates are 0. + * In the later case fainess algorithm should be deactivated. + * If all min rates are not zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input) +{ + uint32_t vn_cfg; + uint32_t vn_min_rate; + int all_zero = 1; + int vn; + + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + vn_cfg = sc->devinfo.mf_info.mf_config[vn]; + vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100); + + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + /* skip hidden VNs */ + vn_min_rate = 0; + } else if (!vn_min_rate) { + /* If min rate is zero - set it to 100 */ + vn_min_rate = DEF_MIN_RATE; + } else { + all_zero = 0; + } + + input->vnic_min_rate[vn] = vn_min_rate; + } + + /* if ETS or all min rates are zeros - disable fairness */ + if (all_zero) { + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + } else { + input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + } +} + +static uint16_t +bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg) +{ + uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT); + + if (!max_cfg) { + PMD_DRV_LOG(DEBUG, sc, + "Max BW configured to 0 - using 100 instead"); + max_cfg = 100; + } + + return max_cfg; +} + +static void +bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input) +{ + uint16_t vn_max_rate; + uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; + uint32_t max_cfg; + + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + vn_max_rate = 0; + } else { + max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg); + + if (IS_MF_SI(sc)) { + /* max_cfg in percents of linkspeed */ + vn_max_rate = + ((sc->link_vars.line_speed * max_cfg) / 100); + } else { /* SD modes */ + /* max_cfg is absolute in 100Mb units */ + vn_max_rate = (max_cfg * 100); + } + } + + input->vnic_max_rate[vn] = vn_max_rate; +} + +static void +bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type) +{ + struct cmng_init_input input; + int vn; + + memset(&input, 0, sizeof(struct cmng_init_input)); + + input.port_rate = sc->link_vars.line_speed; + + if (cmng_type == CMNG_FNS_MINMAX) { +/* read mf conf from shmem */ + if (read_cfg) { + bnx2x_read_mf_cfg(sc); + } + +/* get VN min rate and enable fairness if not 0 */ + bnx2x_calc_vn_min(sc, &input); + +/* get VN max rate */ + if (sc->port.pmf) { + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + bnx2x_calc_vn_max(sc, vn, &input); + } + } + +/* always enable rate shaping and fairness */ + input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; + + ecore_init_cmng(&input, &sc->cmng); + return; + } +} + +static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc) +{ + if (CHIP_REV_IS_SLOW(sc)) { + return CMNG_FNS_NONE; + } + + if (IS_MF(sc)) { + return CMNG_FNS_MINMAX; + } + + return CMNG_FNS_NONE; +} + +static void +storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port) +{ + int vn; + int func; + uint32_t addr; + size_t size; + + addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); + size = sizeof(struct cmng_struct_per_port); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port); + + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + func = func_by_vn(sc, vn); + + addr = (BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); + size = sizeof(struct rate_shaping_vars_per_vn); + ecore_storm_memset_struct(sc, addr, size, + (uint32_t *) & cmng-> + vnic.vnic_max_rate[vn]); + + addr = (BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); + size = sizeof(struct fairness_vars_per_vn); + ecore_storm_memset_struct(sc, addr, size, + (uint32_t *) & cmng-> + vnic.vnic_min_rate[vn]); + } +} + +static void bnx2x_pf_init(struct bnx2x_softc *sc) +{ + struct bnx2x_func_init_params func_init; + struct event_ring_data eq_data; + uint16_t flags; + + memset(&eq_data, 0, sizeof(struct event_ring_data)); + memset(&func_init, 0, sizeof(struct bnx2x_func_init_params)); + + if (!CHIP_IS_E1x(sc)) { +/* reset IGU PF statistics: MSIX + ATTN */ +/* PF */ + REG_WR(sc, + (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * + 4)), 0); +/* ATTN */ + REG_WR(sc, + (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + + (BNX2X_IGU_STAS_MSG_PF_CNT * 4) + + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * + 4)), 0); + } + + /* function setup flags */ + flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); + + func_init.func_flgs = flags; + func_init.pf_id = SC_FUNC(sc); + func_init.func_id = SC_FUNC(sc); + func_init.spq_map = sc->spq_dma.paddr; + func_init.spq_prod = sc->spq_prod_idx; + + bnx2x_func_init(sc, &func_init); + + memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); + + /* + * Congestion management values depend on the link rate. + * There is no active link so initial link rate is set to 10Gbps. + * When the link comes up the congestion management values are + * re-calculated according to the actual link rate. + */ + sc->link_vars.line_speed = SPEED_10000; + bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc)); + + /* Only the PMF sets the HW */ + if (sc->port.pmf) { + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); + } + + /* init Event Queue - PCI bus guarantees correct endainity */ + eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); + eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); + eq_data.producer = sc->eq_prod; + eq_data.index_id = HC_SP_INDEX_EQ_CONS; + eq_data.sb_id = DEF_SB_ID; + storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); +} + +static void bnx2x_hc_int_enable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + uint32_t val = REG_RD(sc, addr); + uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) + || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); + + if (msix) { + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); + val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + if (single_msix) { + val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; + } + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else { + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + REG_WR(sc, addr, val); + + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } + + REG_WR(sc, addr, val); + + /* ensure that HC_CONFIG is written before leading/trailing edge config */ + mb(); + + /* init leading/trailing edge */ + if (IS_MF(sc)) { + val = (0xee0f | (1 << (SC_VN(sc) + 4))); + if (sc->port.pmf) { + /* enable nig and gpio3 attention */ + val |= 0x1100; + } + } else { + val = 0xffff; + } + + REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val); + REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val); + + /* make sure that interrupts are indeed enabled from here on */ + mb(); +} + +static void bnx2x_igu_int_enable(struct bnx2x_softc *sc) +{ + uint32_t val; + uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) + || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); + + val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + if (msix) { + val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); + if (single_msix) { + val |= IGU_PF_CONF_SINGLE_ISR_EN; + } + } else if (msi) { + val &= ~IGU_PF_CONF_INT_LINE_EN; + val |= (IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); + } else { + val &= ~IGU_PF_CONF_MSI_MSIX_EN; + val |= (IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); + } + + /* clean previous status - need to configure igu prior to ack */ + if ((!msix) || single_msix) { + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + bnx2x_ack_int(sc); + } + + val |= IGU_PF_CONF_FUNC_EN; + + PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s", + val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + + mb(); + + /* init leading/trailing edge */ + if (IS_MF(sc)) { + val = (0xee0f | (1 << (SC_VN(sc) + 4))); + if (sc->port.pmf) { + /* enable nig and gpio3 attention */ + val |= 0x1100; + } + } else { + val = 0xffff; + } + + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); + + /* make sure that interrupts are indeed enabled from here on */ + mb(); +} + +static void bnx2x_int_enable(struct bnx2x_softc *sc) +{ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + bnx2x_hc_int_enable(sc); + } else { + bnx2x_igu_int_enable(sc); + } +} + +static void bnx2x_hc_int_disable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + uint32_t val = REG_RD(sc, addr); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); + /* flush all outstanding writes */ + mb(); + + REG_WR(sc, addr, val); + if (REG_RD(sc, addr) != val) { + PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!"); + } +} + +static void bnx2x_igu_int_disable(struct bnx2x_softc *sc) +{ + uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); + + PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val); + + /* flush all outstanding writes */ + mb(); + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { + PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!"); + } +} + +static void bnx2x_int_disable(struct bnx2x_softc *sc) +{ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + bnx2x_hc_int_disable(sc); + } else { + bnx2x_igu_int_disable(sc); + } +} + +static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code) +{ + int i; + + PMD_INIT_FUNC_TRACE(sc); + + for (i = 0; i < sc->num_queues; i++) { + bnx2x_init_eth_fp(sc, i); + } + + rmb(); /* ensure status block indices were read */ + + bnx2x_init_rx_rings(sc); + bnx2x_init_tx_rings(sc); + + if (IS_VF(sc)) { + bnx2x_memset_stats(sc); + return; + } + + /* initialize MOD_ABS interrupts */ + elink_init_mod_abs_int(sc, &sc->link_vars, + sc->devinfo.chip_id, + sc->devinfo.shmem_base, + sc->devinfo.shmem2_base, SC_PORT(sc)); + + bnx2x_init_def_sb(sc); + bnx2x_update_dsb_idx(sc); + bnx2x_init_sp_ring(sc); + bnx2x_init_eq_ring(sc); + bnx2x_init_internal(sc, load_code); + bnx2x_pf_init(sc); + bnx2x_stats_init(sc); + + /* flush all before enabling interrupts */ + mb(); + + bnx2x_int_enable(sc); + + /* check for SPIO5 */ + bnx2x_attn_int_deasserted0(sc, + REG_RD(sc, + (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + SC_PORT(sc) * 4)) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} + +static void bnx2x_init_objs(struct bnx2x_softc *sc) +{ + /* mcast rules must be added to tx if tx switching is enabled */ + ecore_obj_type o_type; + if (sc->flags & BNX2X_TX_SWITCHING) + o_type = ECORE_OBJ_TYPE_RX_TX; + else + o_type = ECORE_OBJ_TYPE_RX; + + /* RX_MODE controlling object */ + ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); + + /* multicast configuration controlling object */ + ecore_init_mcast_obj(sc, + &sc->mcast_obj, + sc->fp[0].cl_id, + sc->fp[0].index, + SC_FUNC(sc), + SC_FUNC(sc), + BNX2X_SP(sc, mcast_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata), + ECORE_FILTER_MCAST_PENDING, + &sc->sp_state, o_type); + + /* Setup CAM credit pools */ + ecore_init_mac_credit_pool(sc, + &sc->macs_pool, + SC_FUNC(sc), + CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : + VNICS_PER_PATH(sc)); + + ecore_init_vlan_credit_pool(sc, + &sc->vlans_pool, + SC_ABS_FUNC(sc) >> 1, + CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : + VNICS_PER_PATH(sc)); + + /* RSS configuration object */ + ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp->cl_id, + sc->fp->index, SC_FUNC(sc), SC_FUNC(sc), + BNX2X_SP(sc, rss_rdata), + (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata), + ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, + ECORE_OBJ_TYPE_RX); +} + +/* + * Initialize the function. This must be called before sending CLIENT_SETUP + * for the first client. + */ +static int bnx2x_func_start(struct bnx2x_softc *sc) +{ + struct ecore_func_state_params func_params = { NULL }; + struct ecore_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = sc->devinfo.mf_info.mf_mode; + start_params->sd_vlan_tag = OVLAN(sc); + + if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { + start_params->network_cos_mode = STATIC_COS; + } else { /* CHIP_IS_E1X */ + start_params->network_cos_mode = FW_WRR; + } + + return ecore_func_state_change(sc, &func_params); +} + +static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) +{ + uint16_t pmcsr; + + /* If there is no power capability, silently succeed */ + if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) { + PMD_DRV_LOG(INFO, sc, "No power capability"); + return 0; + } + + pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr, + 2); + + switch (state) { + case PCI_PM_D0: + pci_write_word(sc, + (sc->devinfo.pcie_pm_cap_reg + + PCIR_POWER_STATUS), + ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME)); + + if (pmcsr & PCIM_PSTAT_DMASK) { + /* delay required during transition out of D3hot */ + DELAY(20000); + } + + break; + + case PCI_PM_D3hot: + /* don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(sc)) { + return 0; + } + + pmcsr &= ~PCIM_PSTAT_DMASK; + pmcsr |= PCIM_PSTAT_D3; + + if (sc->wol) { + pmcsr |= PCIM_PSTAT_PMEENABLE; + } + + pci_write_long(sc, + (sc->devinfo.pcie_pm_cap_reg + + PCIR_POWER_STATUS), pmcsr); + + /* + * No more memory access after this point until device is brought back + * to D0 state. + */ + break; + + default: + PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d", + state); + return -1; + } + + return 0; +} + +/* return true if succeeded to acquire the lock */ +static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(INFO, sc, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return FALSE; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8); + } + + /* try to acquire the lock */ + REG_WR(sc, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + return TRUE; + } + + PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource); + + return FALSE; +} + +/* + * Get the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc) +{ + if (SC_PATH(sc)) { + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + } else { + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; + } +} + +/* try to acquire a leader lock for current engine */ +static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc) +{ + return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); +} + +static int bnx2x_release_leader_lock(struct bnx2x_softc *sc) +{ + return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); +} + +/* close gates #2, #3 and #4 */ +static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close) +{ + uint32_t val; + + /* gates #2 and #4a are closed/opened */ + /* #4 */ + REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close); + /* #2 */ + REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close); + + /* #3 */ + if (CHIP_IS_E1x(sc)) { +/* prevent interrupts from HC on both ports */ + val = REG_RD(sc, HC_REG_CONFIG_1); + if (close) + REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t) + HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + else + REG_WR(sc, HC_REG_CONFIG_1, + (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(sc, HC_REG_CONFIG_0); + if (close) + REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t) + HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + else + REG_WR(sc, HC_REG_CONFIG_0, + (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + + } else { +/* Prevent incoming interrupts in IGU */ + val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); + + if (close) + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, + (val & ~(uint32_t) + IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + else + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, + (val | + IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } + + wmb(); +} + +/* poll for pending writes bit, it should get cleared in no more than 1s */ +static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc) +{ + uint32_t cnt = 1000; + uint32_t pend_bits = 0; + + do { + pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) { + break; + } + + DELAY(1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!", + pend_bits); + return -1; + } + + return 0; +} + +#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ + +static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val) +{ + /* Do some magic... */ + uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} + +/* restore the value of the 'magic' bit */ +static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val) +{ + /* Restore the 'magic' bit value... */ + uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); + MFCFG_WR(sc, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); +} + +/* prepare for MCP reset, takes care of CLP configurations */ +static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val) +{ + uint32_t shmem; + uint32_t validity_offset; + + /* set `magic' bit in order to save MF config */ + bnx2x_clp_reset_prep(sc, magic_val); + + /* get shmem offset */ + shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + validity_offset = + offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); + + /* Clear validity map flags */ + if (shmem > 0) { + REG_WR(sc, shmem + validity_offset, 0); + } +} + +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ + +static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc) +{ + /* special handling for emulation and FPGA (10 times longer) */ + if (CHIP_REV_IS_SLOW(sc)) { + DELAY((MCP_ONE_TIMEOUT * 10) * 1000); + } else { + DELAY((MCP_ONE_TIMEOUT) * 1000); + } +} + +/* initialize shmem_base and waits for validity signature to appear */ +static int bnx2x_init_shmem(struct bnx2x_softc *sc) +{ + int cnt = 0; + uint32_t val = 0; + + do { + sc->devinfo.shmem_base = + sc->link_params.shmem_base = + REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + + if (sc->devinfo.shmem_base) { + val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); + if (val & SHR_MEM_VALIDITY_MB) + return 0; + } + + bnx2x_mcp_wait_one(sc); + + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); + + PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature"); + + return -1; +} + +static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val) +{ + int rc = bnx2x_init_shmem(sc); + + /* Restore the `magic' bit value */ + bnx2x_clp_reset_done(sc, magic_val); + + return rc; +} + +static void bnx2x_pxp_prep(struct bnx2x_softc *sc) +{ + REG_WR(sc, PXP2_REG_RD_START_INIT, 0); + REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); + wmb(); +} + +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global) +{ + uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + uint32_t global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + + /* + * Don't reset the following blocks. + * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be + * reset, as in 4 port device they might still be owned + * by the MCP (there is only one leader per path). + */ + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; + + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC | + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; + + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the elink code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ + reset_mask1 = 0xffffffff; + + if (CHIP_IS_E1H(sc)) + reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(sc)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; + + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + mb(); + wmb(); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + + mb(); + wmb(); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + wmb(); +} + +static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global) +{ + int cnt = 1000; + uint32_t val = 0; + uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; + uint32_t tags_63_32 = 0; + + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); + if (CHIP_IS_E3(sc)) { + tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); + } + + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff) && + (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) + break; + DELAY(1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + PMD_DRV_LOG(NOTICE, sc, + "ERROR: Tetris buffer didn't get empty or there " + "are still outstanding read requests after 1s! " + "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " + "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -1; + } + + mb(); + + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(sc, TRUE); + + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) { + return -1; + } + + /* clear "unprepared" bit */ + REG_WR(sc, MISC_REG_UNPREPARED, 0); + mb(); + + /* Make sure all is written to the chip before the reset */ + wmb(); + + /* + * Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + DELAY(1000); + + /* Prepare to chip reset: */ + /* MCP */ + if (global) { + bnx2x_reset_mcp_prep(sc, &val); + } + + /* PXP */ + bnx2x_pxp_prep(sc); + mb(); + + /* reset the chip */ + bnx2x_process_kill_chip_reset(sc, global); + mb(); + + /* Recover after reset: */ + /* MCP */ + if (global && bnx2x_reset_mcp_comp(sc, val)) { + return -1; + } + + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(sc, FALSE); + + return 0; +} + +static int bnx2x_leader_reset(struct bnx2x_softc *sc) +{ + int rc = 0; + uint8_t global = bnx2x_reset_is_global(sc); + uint32_t load_code; + + /* + * If not going to reset MCP, load "fake" driver to reset HW while + * driver is owner of the HW. + */ + if (!global && !BNX2X_NOMCP(sc)) { + load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + if (!load_code) { + PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); + rc = -1; + goto exit_leader_reset; + } + + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + PMD_DRV_LOG(NOTICE, sc, + "MCP unexpected response, aborting"); + rc = -1; + goto exit_leader_reset2; + } + + load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); + rc = -1; + goto exit_leader_reset2; + } + } + + /* try to recover after the failure */ + if (bnx2x_process_kill(sc, global)) { + PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!", + SC_PATH(sc)); + rc = -1; + goto exit_leader_reset2; + } + + /* + * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver + * state. + */ + bnx2x_set_reset_done(sc); + if (global) { + bnx2x_clear_reset_global(sc); + } + +exit_leader_reset2: + + /* unload "fake driver" if it was loaded */ + if (!global &&!BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + +exit_leader_reset: + + sc->is_leader = 0; + bnx2x_release_leader_lock(sc); + + mb(); + return rc; +} + +/* + * prepare INIT transition, parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static void +bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_queue_init_params *init_params) +{ + uint8_t cos; + int cxt_index, cxt_offset; + + bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); + bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); + + bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); + bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = + sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; + init_params->tx.hc_rate = + sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; + + /* CQ index among the SB indices */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = sc->max_cos; + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { + cxt_index = fp->index / ILT_PAGE_CIDS; + cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); + init_params->cxts[cos] = + &sc->context[cxt_index].vcxt[cxt_offset].eth; + } +} + +/* set flags that are common for the Tx-only and not normal connections */ +static unsigned long +bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats) +{ + unsigned long flags = 0; + + /* PF driver will always initialize the Queue to an ACTIVE state */ + bnx2x_set_bit(ECORE_Q_FLG_ACTIVE, &flags); + + /* + * tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + + bnx2x_set_bit(ECORE_Q_FLG_STATS, &flags); + if (zero_stats) { + bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); + } + + /* + * tx only connections can support tx-switching, though their + * CoS-ness doesn't survive the loopback + */ + if (sc->flags & BNX2X_TX_SWITCHING) { + bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); + } + + bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); + + return flags; +} + +static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading) +{ + unsigned long flags = 0; + + if (IS_MF_SD(sc)) { + bnx2x_set_bit(ECORE_Q_FLG_OV, &flags); + } + + if (leading) { + bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); + bnx2x_set_bit(ECORE_Q_FLG_MCAST, &flags); + } + + bnx2x_set_bit(ECORE_Q_FLG_VLAN, &flags); + + /* merge with common flags */ + return flags | bnx2x_get_common_flags(sc, TRUE); +} + +static void +bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_general_setup_params *gen_init, uint8_t cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; + gen_init->mtu = sc->mtu; + gen_init->cos = cos; +} + +static void +bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct rxq_pause_params *pause, + struct ecore_rxq_setup_params *rxq_init) +{ + struct bnx2x_rx_queue *rxq; + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue is NULL"); + return; + } + /* pause */ + pause->bd_th_lo = BD_TH_LO(sc); + pause->bd_th_hi = BD_TH_HI(sc); + + pause->rcq_th_lo = RCQ_TH_LO(sc); + pause->rcq_th_hi = RCQ_TH_HI(sc); + + /* validate rings have enough entries to cross high thresholds */ + if (sc->dropless_fc && + pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { + PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit"); + } + + if (sc->dropless_fc && + pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) { + PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit"); + } + + pause->pri_map = 1; + + /* rxq setup */ + rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr; + rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr; + rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr + + BNX2X_PAGE_SIZE); + + /* + * This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); + + rxq_init->cl_qzone_id = fp->cl_qzone_id; + rxq_init->rss_engine_id = SC_FUNC(sc); + rxq_init->mcast_engine_id = SC_FUNC(sc); + + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; + rxq_init->fw_sb_id = fp->fw_sb_id; + + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + + /* + * configure silent vlan removal + * if multi function mode is afex, then mask default vlan + */ + if (IS_MF_AFEX(sc)) { + rxq_init->silent_removal_value = + sc->devinfo.mf_info.afex_def_vlan_tag; + rxq_init->silent_removal_mask = EVL_VLID_MASK; + } +} + +static void +bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_txq_setup_params *txq_init, uint8_t cos) +{ + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (!txq) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return; + } + txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->fw_sb_id = fp->fw_sb_id; + + /* + * set the TSS leading client id for TX classfication to the + * leading RSS client id + */ + txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id); +} + +/* + * This function performs 2 steps in a queue state machine: + * 1) RESET->INIT + * 2) INIT->SETUP + */ +static int +bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading) +{ + struct ecore_queue_state_params q_params = { NULL }; + struct ecore_queue_setup_params *setup_params = &q_params.params.setup; + int rc; + + PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index); + + bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); + + q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; + + /* we want to wait for completion in this context */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* prepare the INIT parameters */ + bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init); + + /* Set the command */ + q_params.cmd = ECORE_Q_CMD_INIT; + + /* Change the state to INIT */ + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index); + return rc; + } + + PMD_DRV_LOG(DEBUG, sc, "init complete"); + + /* now move the Queue to the SETUP state */ + memset(setup_params, 0, sizeof(*setup_params)); + + /* set Queue flags */ + setup_params->flags = bnx2x_get_q_flags(sc, leading); + + /* set general SETUP parameters */ + bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); + + bnx2x_pf_rx_q_prep(sc, fp, + &setup_params->pause_params, + &setup_params->rxq_params); + + bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = ECORE_Q_CMD_SETUP; + + /* change the state to SETUP */ + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index); + return rc; + } + + return rc; +} + +static int bnx2x_setup_leading(struct bnx2x_softc *sc) +{ + if (IS_PF(sc)) + return bnx2x_setup_queue(sc, &sc->fp[0], TRUE); + else /* VF */ + return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE); +} + +static int +bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj, + uint8_t config_hash) +{ + struct ecore_config_rss_params params = { NULL }; + uint32_t i; + + /* + * Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + */ + + params.rss_obj = rss_obj; + + bnx2x_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + bnx2x_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + bnx2x_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) { + bnx2x_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); + } + if (rss_obj->udp_rss_v6) { + bnx2x_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); + } + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + rte_memcpy(params.ind_table, rss_obj->ind_table, + sizeof(params.ind_table)); + + if (config_hash) { +/* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) { + params.rss_key[i] = (uint32_t) rte_rand(); + } + + bnx2x_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); + } + + if (IS_PF(sc)) + return ecore_config_rss(sc, ¶ms); + else + return bnx2x_vf_config_rss(sc, ¶ms); +} + +static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash) +{ + return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash); +} + +static int bnx2x_init_rss_pf(struct bnx2x_softc *sc) +{ + uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc); + uint32_t i; + + /* + * Prepare the initial contents of the indirection table if + * RSS is enabled + */ + for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { + sc->rss_conf_obj.ind_table[i] = + (sc->fp->cl_id + (i % num_eth_queues)); + } + + if (sc->udp_rss) { + sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; + } + + /* + * For 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed, do it only + * for a PMF. + * + * For 57712 and newer it's a per-function configuration. + */ + return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)); +} + +static int +bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac, + struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, + unsigned long *ramrod_flags) +{ + struct ecore_vlan_mac_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* fill in general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* fill a user request section if needed */ + if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) { + rte_memcpy(ramrod_param.user_req.u.mac.mac, mac, + ETH_ALEN); + + bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); + +/* Set the command: ADD or DEL */ + ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : + ECORE_VLAN_MAC_DEL; + } + + rc = ecore_config_vlan_mac(sc, &ramrod_param); + + if (rc == ECORE_EXISTS) { + PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)"); +/* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) { + PMD_DRV_LOG(ERR, sc, + "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc); + } + + return rc; +} + +static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set) +{ + unsigned long ramrod_flags = 0; + + PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC"); + + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + /* Eth MAC is set on RSS leading client (fp[0]) */ + return bnx2x_set_mac_one(sc, sc->link_params.mac_addr, + &sc->sp_objs->mac_obj, + set, ECORE_ETH_MAC, &ramrod_flags); +} + +static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc) +{ + uint32_t sel_phy_idx = 0; + + if (sc->link_params.num_phys <= 1) { + return ELINK_INT_PHY; + } + + if (sc->link_vars.link_up) { + sel_phy_idx = ELINK_EXT_PHY1; +/* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ + if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (sc->link_params.phy[ELINK_EXT_PHY2].supported & + ELINK_SUPPORTED_FIBRE)) + sel_phy_idx = ELINK_EXT_PHY2; + } else { + switch (elink_phy_selection(&sc->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = ELINK_EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = ELINK_EXT_PHY2; + break; + } + } + + return sel_phy_idx; +} + +static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc) +{ + uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc); + + /* + * The selected activated PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == ELINK_EXT_PHY1) + sel_phy_idx = ELINK_EXT_PHY2; + else if (sel_phy_idx == ELINK_EXT_PHY2) + sel_phy_idx = ELINK_EXT_PHY1; + } + + return ELINK_LINK_CONFIG_IDX(sel_phy_idx); +} + +static void bnx2x_set_requested_fc(struct bnx2x_softc *sc) +{ + /* + * Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { + sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; + } else { + sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; + } +} + +static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc) +{ + uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc); + switch (sc->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + default: + sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; + break; + } +} + +static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc) +{ + uint16_t line_speed = sc->link_vars.line_speed; + if (IS_MF(sc)) { + uint16_t maxCfg = bnx2x_extract_max_cfg(sc, + sc->devinfo. + mf_info.mf_config[SC_VN + (sc)]); + +/* calculate the current MAX line speed limit for the MF devices */ + if (IS_MF_SI(sc)) { + line_speed = (line_speed * maxCfg) / 100; + } else { /* SD mode */ + uint16_t vn_max_rate = maxCfg * 100; + + if (vn_max_rate < line_speed) { + line_speed = vn_max_rate; + } + } + } + + return line_speed; +} + +static void +bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data) +{ + uint16_t line_speed = bnx2x_get_mf_speed(sc); + + memset(data, 0, sizeof(*data)); + + /* fill the report data with the effective line speed */ + data->line_speed = line_speed; + + /* Link is down */ + if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) { + bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + } + + /* Full DUPLEX */ + if (sc->link_vars.duplex == DUPLEX_FULL) { + bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, + &data->link_report_flags); + } + + /* Rx Flow Control is ON */ + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { + bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); + } + + /* Tx Flow Control is ON */ + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { + bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); + } +} + +/* report link status to OS, should be called under phy_lock */ +static void bnx2x_link_report_locked(struct bnx2x_softc *sc) +{ + struct bnx2x_link_report_data cur_data; + + /* reread mf_cfg */ + if (IS_PF(sc)) { + bnx2x_read_mf_cfg(sc); + } + + /* Read the current link report info */ + bnx2x_fill_report_data(sc, &cur_data); + + /* Don't report link down or exactly the same link status twice */ + if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || + (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &sc->last_reported_link.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags))) { + return; + } + + ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %lx, last_reported_link = %lx", + cur_data.link_report_flags, + sc->last_reported_link.link_report_flags); + + sc->link_cnt++; + + ELINK_DEBUG_P1(sc, "link status change count = %x", sc->link_cnt); + /* report new link params and remember the state for the next time */ + rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); + + if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags)) { + ELINK_DEBUG_P0(sc, "NIC Link is Down"); + } else { + __rte_unused const char *duplex; + __rte_unused const char *flow; + + if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, + &cur_data.link_report_flags)) { + duplex = "full"; + ELINK_DEBUG_P0(sc, "link set to full duplex"); + } else { + duplex = "half"; + ELINK_DEBUG_P0(sc, "link set to half duplex"); + } + +/* + * Handle the FC at the end so that only these flags would be + * possibly set. This way we may easily check if there is no FC + * enabled. + */ + if (cur_data.link_report_flags) { + if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - receive & transmit"; + } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - receive"; + } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - transmit"; + } else { + flow = "none"; /* possible? */ + } + } else { + flow = "none"; + } + + PMD_DRV_LOG(INFO, sc, + "NIC Link is Up, %d Mbps %s duplex, Flow control: %s", + cur_data.line_speed, duplex, flow); + } +} + +static void +bnx2x_link_report(struct bnx2x_softc *sc) +{ + bnx2x_acquire_phy_lock(sc); + bnx2x_link_report_locked(sc); + bnx2x_release_phy_lock(sc); +} + +void bnx2x_link_status_update(struct bnx2x_softc *sc) +{ + if (sc->state != BNX2X_STATE_OPEN) { + return; + } + + if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { + elink_link_status_update(&sc->link_params, &sc->link_vars); + } else { + sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + sc->port.advertising[0] = sc->port.supported[0]; + + sc->link_params.sc = sc; + sc->link_params.port = SC_PORT(sc); + sc->link_params.req_duplex[0] = DUPLEX_FULL; + sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; + sc->link_params.req_line_speed[0] = SPEED_10000; + sc->link_params.speed_cap_mask[0] = 0x7f0000; + sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; + + if (CHIP_REV_IS_FPGA(sc)) { + sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; + sc->link_vars.line_speed = ELINK_SPEED_1000; + sc->link_vars.link_status = (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); + } else { + sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; + sc->link_vars.line_speed = ELINK_SPEED_10000; + sc->link_vars.link_status = (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); + } + + sc->link_vars.link_up = 1; + + sc->link_vars.duplex = DUPLEX_FULL; + sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (IS_PF(sc)) { + REG_WR(sc, + NIG_REG_EGRESS_DRAIN0_MODE + + sc->link_params.port * 4, 0); + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + bnx2x_link_report(sc); + } + } + + if (IS_PF(sc)) { + if (sc->link_vars.link_up) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } else { + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + } + bnx2x_link_report(sc); + } else { + bnx2x_link_report_locked(sc); + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } +} + +static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) +{ + int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); + uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; + struct elink_params *lp = &sc->link_params; + + bnx2x_set_requested_fc(sc); + + bnx2x_acquire_phy_lock(sc); + + if (load_mode == LOAD_DIAG) { + lp->loopback_mode = ELINK_LOOPBACK_XGXS; +/* Prefer doing PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { + if (lp->speed_cap_mask[cfg_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { + lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; + } else { + lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; + } + } + } + + if (load_mode == LOAD_LOOPBACK_EXT) { + lp->loopback_mode = ELINK_LOOPBACK_EXT; + } + + rc = elink_phy_init(&sc->link_params, &sc->link_vars); + + bnx2x_release_phy_lock(sc); + + bnx2x_calc_fc_adv(sc); + + if (sc->link_vars.link_up) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + bnx2x_link_report(sc); + } + + sc->link_params.req_line_speed[cfg_idx] = req_line_speed; + return rc; +} + +/* update flags in shmem */ +static void +bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set) +{ + uint32_t drv_flags; + + if (SHMEM2_HAS(sc, drv_flags)) { + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); + drv_flags = SHMEM2_RD(sc, drv_flags); + + if (set) { + drv_flags |= flags; + } else { + drv_flags &= ~flags; + } + + SHMEM2_WR(sc, drv_flags, drv_flags); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); + } +} + +/* periodic timer callout routine, only runs when the interface is up */ +void bnx2x_periodic_callout(struct bnx2x_softc *sc) +{ + if ((sc->state != BNX2X_STATE_OPEN) || + (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { + PMD_DRV_LOG(DEBUG, sc, "periodic callout exit (state=0x%x)", + sc->state); + return; + } + if (!CHIP_REV_IS_SLOW(sc)) { +/* + * This barrier is needed to ensure the ordering between the writing + * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + mb(); + if (sc->port.pmf) { + bnx2x_acquire_phy_lock(sc); + elink_period_func(&sc->link_params, &sc->link_vars); + bnx2x_release_phy_lock(sc); + } + } +#ifdef BNX2X_PULSE + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { + int mb_idx = SC_FW_MB_IDX(sc); + uint32_t drv_pulse; + uint32_t mcp_pulse; + + ++sc->fw_drv_pulse_wr_seq; + sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + + drv_pulse = sc->fw_drv_pulse_wr_seq; + bnx2x_drv_pulse(sc); + + mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + +/* + * The delta between driver pulse and mcp response should + * be 1 (before mcp response) or 0 (after mcp response). + */ + if ((drv_pulse != mcp_pulse) && + (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { + /* someone lost a heartbeat... */ + PMD_DRV_LOG(ERR, sc, + "drv_pulse (0x%x) != mcp_pulse (0x%x)", + drv_pulse, mcp_pulse); + } + } +#endif +} + +/* start the controller */ +static __rte_noinline +int bnx2x_nic_load(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t load_code = 0; + int i, rc = 0; + + PMD_INIT_FUNC_TRACE(sc); + + sc->state = BNX2X_STATE_OPENING_WAITING_LOAD; + + if (IS_PF(sc)) { +/* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(sc); + } + + bnx2x_set_fp_rx_buf_size(sc); + + if (IS_PF(sc)) { + if (bnx2x_alloc_mem(sc) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + } + + /* allocate the host hardware/software hsi structures */ + if (bnx2x_alloc_hsi_mem(sc) != 0) { + PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + + if (bnx2x_alloc_fw_stats_mem(sc) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + + if (IS_VF(sc)) { + rc = bnx2x_vf_init(sc); + if (rc) { + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error0; + } + } + + if (IS_PF(sc)) { +/* set pf load just before approaching the MCP */ + bnx2x_set_pf_load(sc); + +/* if MCP exists send load request and analyze response */ + if (!BNX2X_NOMCP(sc)) { + /* attempt to load pf */ + if (bnx2x_nic_load_request(sc, &load_code) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error1; + } + + /* what did the MCP say? */ + if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) { + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error2; + } + } else { + PMD_DRV_LOG(INFO, sc, "Device has no MCP!"); + load_code = bnx2x_nic_load_no_mcp(sc); + } + +/* mark PMF if applicable */ + bnx2x_nic_load_pmf(sc, load_code); + +/* Init Function state controlling object */ + bnx2x_init_func_obj(sc); + +/* Initialize HW */ + if (bnx2x_init_hw(sc, load_code) != 0) { + PMD_DRV_LOG(NOTICE, sc, "HW init failed"); + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error2; + } + } + + bnx2x_nic_init(sc, load_code); + + /* Init per-function objects */ + if (IS_PF(sc)) { + bnx2x_init_objs(sc); + +/* set AFEX default VLAN tag to an invalid value */ + sc->devinfo.mf_info.afex_def_vlan_tag = -1; + + sc->state = BNX2X_STATE_OPENING_WAITING_PORT; + rc = bnx2x_func_start(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Function start failed!"); + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + +/* send LOAD_DONE command to MCP */ + if (!BNX2X_NOMCP(sc)) { + load_code = + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + PMD_DRV_LOG(NOTICE, sc, + "MCP response failure, aborting"); + sc->state = BNX2X_STATE_ERROR; + rc = -ENXIO; + goto bnx2x_nic_load_error3; + } + } + } + + rc = bnx2x_setup_leading(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { + if (IS_PF(sc)) + rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE); + else /* IS_VF(sc) */ + rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE); + + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + } + + rc = bnx2x_init_rss_pf(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + /* now when Clients are configured we are ready to work */ + sc->state = BNX2X_STATE_OPEN; + + /* Configure a ucast MAC */ + if (IS_PF(sc)) { + rc = bnx2x_set_eth_mac(sc, TRUE); + } else { /* IS_VF(sc) */ + rc = bnx2x_vf_set_mac(sc, TRUE); + } + + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + if (sc->port.pmf) { + rc = bnx2x_initial_phy_init(sc, LOAD_OPEN); + if (rc) { + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + } + + sc->link_params.feature_config_flags &= + ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; + + /* start the Tx */ + switch (LOAD_OPEN) { + case LOAD_NORMAL: + case LOAD_OPEN: + break; + + case LOAD_DIAG: + case LOAD_LOOPBACK_EXT: + sc->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (sc->port.pmf) { + bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); + } else { + bnx2x_link_status_update(sc); + } + + if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { +/* mark driver is loaded in shmem2 */ + val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); + SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], + (val | + DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | + DRV_FLAGS_CAPABILITIES_LOADED_L2)); + } + + /* start fast path */ + /* Initialize Rx filter */ + bnx2x_set_rx_mode(sc); + + /* wait for all pending SP commands to complete */ + if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) { + PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!"); + bnx2x_periodic_stop(sc); + bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE); + return -ENXIO; + } + + PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded"); + + return 0; + +bnx2x_nic_load_error3: + + if (IS_PF(sc)) { + bnx2x_int_disable_sync(sc, 1); + +/* clean out queued objects */ + bnx2x_squeeze_objects(sc); + } + +bnx2x_nic_load_error2: + + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + + sc->port.pmf = 0; + +bnx2x_nic_load_error1: + + /* clear pf_load status, as it was already set */ + if (IS_PF(sc)) { + bnx2x_clear_pf_load(sc); + } + +bnx2x_nic_load_error0: + + bnx2x_free_fw_stats_mem(sc); + bnx2x_free_hsi_mem(sc); + bnx2x_free_mem(sc); + + return rc; +} + +/* +* Handles controller initialization. +*/ +int bnx2x_init(struct bnx2x_softc *sc) +{ + int other_engine = SC_PATH(sc) ? 0 : 1; + uint8_t other_load_status, load_status; + uint8_t global = FALSE; + int rc; + + /* Check if the driver is still running and bail out if it is. */ + if (sc->state != BNX2X_STATE_CLOSED) { + PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!"); + rc = 0; + goto bnx2x_init_done; + } + + bnx2x_set_power_state(sc, PCI_PM_D0); + + /* + * If parity occurred during the unload, then attentions and/or + * RECOVERY_IN_PROGRESS may still be set. If so we want the first function + * loaded on the current engine to complete the recovery. Parity recovery + * is only relevant for PF driver. + */ + if (IS_PF(sc)) { + other_load_status = bnx2x_get_load_status(sc, other_engine); + load_status = bnx2x_get_load_status(sc, SC_PATH(sc)); + + if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) || + bnx2x_chk_parity_attn(sc, &global, TRUE)) { + do { + /* + * If there are attentions and they are in global blocks, set + * the GLOBAL_RESET bit regardless whether it will be this + * function that will complete the recovery or not. + */ + if (global) { + bnx2x_set_reset_global(sc); + } + + /* + * Only the first function on the current engine should try + * to recover in open. In case of attentions in global blocks + * only the first in the chip should try to recover. + */ + if ((!load_status + && (!global ||!other_load_status)) + && bnx2x_trylock_leader_lock(sc) + && !bnx2x_leader_reset(sc)) { + PMD_DRV_LOG(INFO, sc, + "Recovered during init"); + break; + } + + /* recovery has failed... */ + bnx2x_set_power_state(sc, PCI_PM_D3hot); + + sc->recovery_state = BNX2X_RECOVERY_FAILED; + + PMD_DRV_LOG(NOTICE, sc, + "Recovery flow hasn't properly " + "completed yet, try again later. " + "If you still see this message after a " + "few retries then power cycle is required."); + + rc = -ENXIO; + goto bnx2x_init_done; + } while (0); + } + } + + sc->recovery_state = BNX2X_RECOVERY_DONE; + + rc = bnx2x_nic_load(sc); + +bnx2x_init_done: + + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "Initialization failed, " + "stack notified driver is NOT running!"); + } + + return rc; +} + +static void bnx2x_get_function_num(struct bnx2x_softc *sc) +{ + uint32_t val = 0; + + /* + * Read the ME register to get the function number. The ME register + * holds the relative-function number and absolute-function number. The + * absolute-function number appears only in E2 and above. Before that + * these bits always contained zero, therefore we cannot blindly use them. + */ + + val = REG_RD(sc, BAR_ME_REGISTER); + + sc->pfunc_rel = + (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); + sc->path_id = + (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & + 1; + + if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { + sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); + } else { + sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); + } + + PMD_DRV_LOG(DEBUG, sc, + "Relative function %d, Absolute function %d, Path %d", + sc->pfunc_rel, sc->pfunc_abs, sc->path_id); +} + +static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc) +{ + uint32_t shmem2_size; + uint32_t offset; + uint32_t mf_cfg_offset_value; + + /* Non 57712 */ + offset = (SHMEM_ADDR(sc, func_mb) + + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); + + /* 57712 plus */ + if (sc->devinfo.shmem2_base != 0) { + shmem2_size = SHMEM2_RD(sc, size); + if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { + mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); + if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { + offset = mf_cfg_offset_value; + } + } + } + + return offset; +} + +static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) +{ + uint32_t ret; + struct bnx2x_pci_cap *caps; + + /* ensure PCIe capability is enabled */ + caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: " + "id=0x%04X type=0x%04X addr=0x%08X", + caps->id, caps->type, caps->addr); + pci_read(sc, (caps->addr + reg), &ret, 2); + return ret; + } + + PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!"); + + return 0; +} + +static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) +{ + return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) & + PCIM_EXP_STA_TRANSACTION_PND; +} + +/* +* Walk the PCI capabiites list for the device to find what features are +* supported. These capabilites may be enabled/disabled by firmware so it's +* best to walk the list rather than make assumptions. +*/ +static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) +{ + PMD_INIT_FUNC_TRACE(sc); + + struct bnx2x_pci_cap *caps; + uint16_t link_status; + int reg = 0; + + /* check if PCI Power Management is enabled */ + caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, sc, "Found PM capability: " + "id=0x%04X type=0x%04X addr=0x%08X", + caps->id, caps->type, caps->addr); + + sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG; + sc->devinfo.pcie_pm_cap_reg = caps->addr; + } + + link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA); + + sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); + sc->devinfo.pcie_link_width = + ((link_status & PCIM_LINK_STA_WIDTH) >> 4); + + PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d", + sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); + + sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; + + /* check if MSI capability is enabled */ + caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg); + + sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG; + sc->devinfo.pcie_msi_cap_reg = caps->addr; + } + + /* check if MSI-X capability is enabled */ + caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg); + + sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG; + sc->devinfo.pcie_msix_cap_reg = caps->addr; + } +} + +static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val; + + /* get the outer vlan if we're in switch-dependent mode */ + + val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + mf_info->ext_id = (uint16_t) val; + + mf_info->multi_vnics_mode = 1; + + if (!VALID_OVLAN(mf_info->ext_id)) { + PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id); + return 1; + } + + /* get the capabilities */ + if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_ISCSI) { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; + } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) + == FUNC_MF_CFG_PROTOCOL_FCOE) { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; + } else { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; + } + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc) +{ + uint32_t retval = 0; + uint32_t val; + + val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); + + if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { + if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { + retval |= MF_PROTO_SUPPORT_ETHERNET; + } + if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { + retval |= MF_PROTO_SUPPORT_ISCSI; + } + if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + retval |= MF_PROTO_SUPPORT_FCOE; + } + } + + return retval; +} + +static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val; + + /* + * There is no outer vlan if we're in switch-independent mode. + * If the mac is valid then assume multi-function. + */ + + val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); + + mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); + + mf_info->mf_protos_supported = + bnx2x_get_shmem_ext_proto_support_flags(sc); + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t e1hov_tag; + uint32_t func_config; + uint32_t niv_config; + + mf_info->multi_vnics_mode = 1; + + e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); + niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); + + mf_info->ext_id = + (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> + FUNC_MF_CFG_E1HOV_TAG_SHIFT); + + mf_info->default_vlan = + (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_SHIFT); + + mf_info->niv_allowed_priorities = + (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> + FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); + + mf_info->niv_default_cos = + (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> + FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); + + mf_info->afex_vlan_mode = + ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); + + mf_info->niv_mba_enabled = + ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> + FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); + + mf_info->mf_protos_supported = + bnx2x_get_shmem_ext_proto_support_flags(sc); + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t mf_cfg1; + uint32_t mf_cfg2; + uint32_t ovlan1; + uint32_t ovlan2; + uint8_t i, j; + + /* various MF mode sanity checks... */ + + if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { + PMD_DRV_LOG(NOTICE, sc, + "Enumerated function %d is marked as hidden", + SC_PORT(sc)); + return 1; + } + + if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { + PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d", + mf_info->vnics_per_port, mf_info->multi_vnics_mode); + return 1; + } + + if (mf_info->mf_mode == MULTI_FUNCTION_SD) { +/* vnic id > 0 must have valid ovlan in switch-dependent mode */ + if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { + PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d", + SC_VN(sc), OVLAN(sc)); + return 1; + } + + if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { + PMD_DRV_LOG(NOTICE, sc, + "mf_mode=SD multi_vnics_mode=%d ovlan=%d", + mf_info->multi_vnics_mode, OVLAN(sc)); + return 1; + } + +/* + * Verify all functions are either MF or SF mode. If MF, make sure + * sure that all non-hidden functions have a valid ovlan. If SF, + * make sure that all non-hidden functions have an invalid ovlan. + */ + FOREACH_ABS_FUNC_IN_PORT(sc, i) { + mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); + ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); + if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && + (((mf_info->multi_vnics_mode) + && !VALID_OVLAN(ovlan1)) + || ((!mf_info->multi_vnics_mode) + && VALID_OVLAN(ovlan1)))) { + PMD_DRV_LOG(NOTICE, sc, + "mf_mode=SD function %d MF config " + "mismatch, multi_vnics_mode=%d ovlan=%d", + i, mf_info->multi_vnics_mode, + ovlan1); + return 1; + } + } + +/* Verify all funcs on the same port each have a different ovlan. */ + FOREACH_ABS_FUNC_IN_PORT(sc, i) { + mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); + ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); + /* iterate from the next function on the port to the max func */ + for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { + mf_cfg2 = + MFCFG_RD(sc, func_mf_config[j].config); + ovlan2 = + MFCFG_RD(sc, func_mf_config[j].e1hov_tag); + if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) + && VALID_OVLAN(ovlan1) + && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) + && VALID_OVLAN(ovlan2) + && (ovlan1 == ovlan2)) { + PMD_DRV_LOG(NOTICE, sc, + "mf_mode=SD functions %d and %d " + "have the same ovlan (%d)", + i, j, ovlan1); + return 1; + } + } + } + } + /* MULTI_FUNCTION_SD */ + return 0; +} + +static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val, mac_upper; + uint8_t i, vnic; + + /* initialize mf_info defaults */ + mf_info->vnics_per_port = 1; + mf_info->multi_vnics_mode = FALSE; + mf_info->path_has_ovlan = FALSE; + mf_info->mf_mode = SINGLE_FUNCTION; + + if (!CHIP_IS_MF_CAP(sc)) { + return 0; + } + + if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { + PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!"); + return 1; + } + + /* get the MF mode (switch dependent / independent / single-function) */ + + val = SHMEM_RD(sc, dev_info.shared_feature_config.config); + + switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { + case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: + + mac_upper = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + + /* check for legal upper mac bytes */ + if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { + mf_info->mf_mode = MULTI_FUNCTION_SI; + } else { + PMD_DRV_LOG(NOTICE, sc, + "Invalid config for Switch Independent mode"); + } + + break; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: + case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: + + /* get outer vlan configuration */ + val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + + if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != + FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + mf_info->mf_mode = MULTI_FUNCTION_SD; + } else { + PMD_DRV_LOG(NOTICE, sc, + "Invalid config for Switch Dependent mode"); + } + + break; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + + /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ + return 0; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: + + /* + * Mark MF mode as NIV if MCP version includes NPAR-SD support + * and the MAC address is valid. + */ + mac_upper = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + + if ((SHMEM2_HAS(sc, afex_driver_support)) && + (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { + mf_info->mf_mode = MULTI_FUNCTION_AFEX; + } else { + PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode"); + } + + break; + + default: + + PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)", + (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); + + return 1; + } + + /* set path mf_mode (which could be different than function mf_mode) */ + if (mf_info->mf_mode == MULTI_FUNCTION_SD) { + mf_info->path_has_ovlan = TRUE; + } else if (mf_info->mf_mode == SINGLE_FUNCTION) { +/* + * Decide on path multi vnics mode. If we're not in MF mode and in + * 4-port mode, this is good enough to check vnic-0 of the other port + * on the same path + */ + if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { + uint8_t other_port = !(PORT_ID(sc) & 1); + uint8_t abs_func_other_port = + (SC_PATH(sc) + (2 * other_port)); + + val = + MFCFG_RD(sc, + func_mf_config + [abs_func_other_port].e1hov_tag); + + mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val); + } + } + + if (mf_info->mf_mode == SINGLE_FUNCTION) { +/* invalid MF config */ + if (SC_VN(sc) >= 1) { + PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode"); + return 1; + } + + return 0; + } + + /* get the MF configuration */ + mf_info->mf_config[SC_VN(sc)] = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); + + switch (mf_info->mf_mode) { + case MULTI_FUNCTION_SD: + + bnx2x_get_shmem_mf_cfg_info_sd(sc); + break; + + case MULTI_FUNCTION_SI: + + bnx2x_get_shmem_mf_cfg_info_si(sc); + break; + + case MULTI_FUNCTION_AFEX: + + bnx2x_get_shmem_mf_cfg_info_niv(sc); + break; + + default: + + PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)", + mf_info->mf_mode); + return 1; + } + + /* get the congestion management parameters */ + + vnic = 0; + FOREACH_ABS_FUNC_IN_PORT(sc, i) { +/* get min/max bw */ + val = MFCFG_RD(sc, func_mf_config[i].config); + mf_info->min_bw[vnic] = + ((val & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT); + mf_info->max_bw[vnic] = + ((val & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT); + vnic++; + } + + return bnx2x_check_valid_mf_cfg(sc); +} + +static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) +{ + int port; + uint32_t mac_hi, mac_lo, val; + + PMD_INIT_FUNC_TRACE(sc); + + port = SC_PORT(sc); + mac_hi = mac_lo = 0; + + sc->link_params.sc = sc; + sc->link_params.port = port; + + /* get the hardware config info */ + sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); + sc->devinfo.hw_config2 = + SHMEM_RD(sc, dev_info.shared_hw_config.config2); + + sc->link_params.hw_led_mode = + ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); + + /* get the port feature config */ + sc->port.config = + SHMEM_RD(sc, dev_info.port_feature_config[port].config); + + /* get the link params */ + sc->link_params.speed_cap_mask[ELINK_INT_PHY] = + SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask) + & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] = + SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2) + & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + + /* get the lane config */ + sc->link_params.lane_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); + + /* get the link config */ + val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); + sc->port.link_config[ELINK_INT_PHY] = val; + sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); + sc->port.link_config[ELINK_EXT_PHY1] = + SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); + + /* get the override preemphasis flag and enable it or turn it off */ + val = SHMEM_RD(sc, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { + sc->link_params.feature_config_flags |= + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + } else { + sc->link_params.feature_config_flags &= + ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + } + + val = sc->devinfo.bc_ver >> 8; + if (val < BNX2X_BC_VER) { + /* for now only warn later we might need to enforce this */ + PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n", + BNX2X_BC_VER, val); + } + sc->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? + ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : + 0; + + sc->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? + ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; + sc->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? + ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; + sc->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? + ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + + /* get the initial value of the link params */ + sc->link_params.multi_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); + + /* get external phy info */ + sc->port.ext_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); + + /* get the multifunction configuration */ + bnx2x_get_mf_cfg_info(sc); + + /* get the mac address */ + if (IS_MF(sc)) { + mac_hi = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + mac_lo = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); + } else { + mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); + mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); + } + + if ((mac_lo == 0) && (mac_hi == 0)) { + *sc->mac_addr_str = 0; + PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!"); + } else { + sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8); + sc->link_params.mac_addr[1] = (uint8_t) (mac_hi); + sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24); + sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16); + sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8); + sc->link_params.mac_addr[5] = (uint8_t) (mac_lo); + snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), + "%02x:%02x:%02x:%02x:%02x:%02x", + sc->link_params.mac_addr[0], + sc->link_params.mac_addr[1], + sc->link_params.mac_addr[2], + sc->link_params.mac_addr[3], + sc->link_params.mac_addr[4], + sc->link_params.mac_addr[5]); + PMD_DRV_LOG(DEBUG, sc, + "Ethernet address: %s", sc->mac_addr_str); + } + + return 0; +} + +static void bnx2x_media_detect(struct bnx2x_softc *sc) +{ + uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc); + switch (sc->link_params.phy[phy_idx].media_type) { + case ELINK_ETH_PHY_SFPP_10G_FIBER: + case ELINK_ETH_PHY_SFP_1G_FIBER: + case ELINK_ETH_PHY_XFP_FIBER: + case ELINK_ETH_PHY_KR: + case ELINK_ETH_PHY_CX4: + PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media."); + sc->media = IFM_10G_CX4; + break; + case ELINK_ETH_PHY_DA_TWINAX: + PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media."); + sc->media = IFM_10G_TWINAX; + break; + case ELINK_ETH_PHY_BASE_T: + PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media."); + sc->media = IFM_10G_T; + break; + case ELINK_ETH_PHY_NOT_PRESENT: + PMD_DRV_LOG(INFO, sc, "Media not present."); + sc->media = 0; + break; + case ELINK_ETH_PHY_UNSPECIFIED: + default: + PMD_DRV_LOG(INFO, sc, "Unknown media!"); + sc->media = 0; + break; + } +} + +#define GET_FIELD(value, fname) \ +(((value) & (fname##_MASK)) >> (fname##_SHIFT)) +#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) +#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) + +static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc) +{ + int pfid = SC_FUNC(sc); + int igu_sb_id; + uint32_t val; + uint8_t fid, igu_sb_cnt = 0; + + sc->igu_base_sb = 0xff; + + if (CHIP_INT_MODE_IS_BC(sc)) { + int vn = SC_VN(sc); + igu_sb_cnt = sc->igu_sb_cnt; + sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * + FP_SB_MAX_E1x); + sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); + return 0; + } + + /* IGU in normal mode - read CAM */ + for (igu_sb_id = 0; + igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { + val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { + continue; + } + fid = IGU_FID(val); + if (fid & IGU_FID_ENCODE_IS_PF) { + if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { + continue; + } + if (IGU_VEC(val) == 0) { + /* default status block */ + sc->igu_dsb_id = igu_sb_id; + } else { + if (sc->igu_base_sb == 0xff) { + sc->igu_base_sb = igu_sb_id; + } + igu_sb_cnt++; + } + } + } + + /* + * Due to new PF resource allocation by MFW T7.4 and above, it's optional + * that number of CAM entries will not be equal to the value advertised in + * PCI. Driver should use the minimal value of both as the actual status + * block count + */ + sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); + + if (igu_sb_cnt == 0) { + PMD_DRV_LOG(ERR, sc, "CAM configuration error"); + return -1; + } + + return 0; +} + +/* +* Gather various information from the device config space, the device itself, +* shmem, and the user input. +*/ +static int bnx2x_get_device_info(struct bnx2x_softc *sc) +{ + uint32_t val; + int rc; + + /* get the chip revision (chip metal comes from pci config space) */ + sc->devinfo.chip_id = sc->link_params.chip_id = + (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | + (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | + ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); + + /* force 57811 according to MISC register */ + if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { + if (CHIP_IS_57810(sc)) { + sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | + (sc-> + devinfo.chip_id & 0x0000ffff)); + } else if (CHIP_IS_57810_MF(sc)) { + sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | + (sc-> + devinfo.chip_id & 0x0000ffff)); + } + sc->devinfo.chip_id |= 0x1; + } + + PMD_DRV_LOG(DEBUG, sc, + "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)", + sc->devinfo.chip_id, + ((sc->devinfo.chip_id >> 16) & 0xffff), + ((sc->devinfo.chip_id >> 12) & 0xf), + ((sc->devinfo.chip_id >> 4) & 0xff), + ((sc->devinfo.chip_id >> 0) & 0xf)); + + val = (REG_RD(sc, 0x2874) & 0x55); + if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) { + sc->flags |= BNX2X_ONE_PORT_FLAG; + PMD_DRV_LOG(DEBUG, sc, "single port device"); + } + + /* set the doorbell size */ + sc->doorbell_size = (1 << BNX2X_DB_SHIFT); + + /* determine whether the device is in 2 port or 4 port mode */ + sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */ + if (CHIP_IS_E2E3(sc)) { +/* + * Read port4mode_en_ovwr[0]: + * If 1, four port mode is in port4mode_en_ovwr[1]. + * If 0, four port mode is in port4mode_en[0]. + */ + val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); + if (val & 1) { + val = ((val >> 1) & 1); + } else { + val = REG_RD(sc, MISC_REG_PORT4MODE_EN); + } + + sc->devinfo.chip_port_mode = + (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; + + PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2"); + } + + /* get the function and path info for the device */ + bnx2x_get_function_num(sc); + + /* get the shared memory base address */ + sc->devinfo.shmem_base = + sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + sc->devinfo.shmem2_base = + REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : + MISC_REG_GENERIC_CR_0)); + + if (!sc->devinfo.shmem_base) { +/* this should ONLY prevent upcoming shmem reads */ + PMD_DRV_LOG(INFO, sc, "MCP not active"); + sc->flags |= BNX2X_NO_MCP_FLAG; + return 0; + } + + /* make sure the shared memory contents are valid */ + val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != + (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { + PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x", + val); + return 0; + } + + /* get the bootcode version */ + sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); + snprintf(sc->devinfo.bc_ver_str, + sizeof(sc->devinfo.bc_ver_str), + "%d.%d.%d", + ((sc->devinfo.bc_ver >> 24) & 0xff), + ((sc->devinfo.bc_ver >> 16) & 0xff), + ((sc->devinfo.bc_ver >> 8) & 0xff)); + PMD_DRV_LOG(DEBUG, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str); + + /* get the bootcode shmem address */ + sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc); + + /* clean indirect addresses as they're not used */ + pci_write_long(sc, PCICFG_GRC_ADDRESS, 0); + if (IS_PF(sc)) { + REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); + if (CHIP_IS_E1x(sc)) { + REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); + } + } + + /* get the nvram size */ + val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); + sc->devinfo.flash_size = + (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); + + bnx2x_set_power_state(sc, PCI_PM_D0); + /* get various configuration parameters from shmem */ + bnx2x_get_shmem_info(sc); + + /* initialize IGU parameters */ + if (CHIP_IS_E1x(sc)) { + sc->devinfo.int_block = INT_BLOCK_HC; + sc->igu_dsb_id = DEF_SB_IGU_ID; + sc->igu_base_sb = 0; + } else { + sc->devinfo.int_block = INT_BLOCK_IGU; + +/* do not allow device reset during IGU info preocessing */ + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + int tout = 5000; + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { + tout--; + DELAY(1000); + } + + if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { + PMD_DRV_LOG(NOTICE, sc, + "FORCING IGU Normal Mode failed!!!"); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + return -1; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode"); + sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; + } else { + PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode"); + } + + rc = bnx2x_get_igu_cam_info(sc); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + if (rc) { + return rc; + } + } + + /* + * Get base FW non-default (fast path) status block ID. This value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(sc)) { + sc->base_fw_ndsb = + ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); + } else { +/* + * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + sc->base_fw_ndsb = sc->igu_base_sb; + } + + elink_phy_probe(&sc->link_params); + + return 0; +} + +static void +bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg) +{ + uint32_t cfg_size = 0; + uint32_t idx; + uint8_t port = SC_PORT(sc); + + /* aggregation of supported attributes of all external phys */ + sc->port.supported[0] = 0; + sc->port.supported[1] = 0; + + switch (sc->link_params.num_phys) { + case 1: + sc->port.supported[0] = + sc->link_params.phy[ELINK_INT_PHY].supported; + cfg_size = 1; + break; + case 2: + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + cfg_size = 1; + break; + case 3: + if (sc->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + sc->port.supported[1] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY2].supported; + } else { + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + sc->port.supported[1] = + sc->link_params.phy[ELINK_EXT_PHY2].supported; + } + cfg_size = 2; + break; + } + + if (!(sc->port.supported[0] || sc->port.supported[1])) { + PMD_DRV_LOG(ERR, sc, + "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)", + SHMEM_RD(sc, + dev_info.port_hw_config + [port].external_phy_config), + SHMEM_RD(sc, + dev_info.port_hw_config + [port].external_phy_config2)); + return; + } + + if (CHIP_IS_E3(sc)) + sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case ELINK_SWITCH_CFG_1G: + sc->port.phy_addr = + REG_RD(sc, + NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10); + break; + case ELINK_SWITCH_CFG_10G: + sc->port.phy_addr = + REG_RD(sc, + NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18); + break; + default: + PMD_DRV_LOG(ERR, sc, + "Invalid switch config in" + "link_config=0x%08x", + sc->port.link_config[0]); + return; + } + } + + PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr); + + /* mask what we support according to speed_cap_mask per configuration */ + for (idx = 0; idx < cfg_size; idx++) { + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10baseT_Half; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_100baseT_Half; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_100baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_1000baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_2500baseX_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10000baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_20000baseKR2_Full; + } + } + + PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x", + sc->port.supported[0], sc->port.supported[1]); +} + +static void bnx2x_link_settings_requested(struct bnx2x_softc *sc) +{ + uint32_t link_config; + uint32_t idx; + uint32_t cfg_size = 0; + + sc->port.advertising[0] = 0; + sc->port.advertising[1] = 0; + + switch (sc->link_params.num_phys) { + case 1: + case 2: + cfg_size = 1; + break; + case 3: + cfg_size = 2; + break; + } + + for (idx = 0; idx < cfg_size; idx++) { + sc->link_params.req_duplex[idx] = DUPLEX_FULL; + link_config = sc->port.link_config[idx]; + + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_AUTO_NEG; + sc->port.advertising[idx] |= + sc->port.supported[idx]; + if (sc->link_params.phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) + sc->port.advertising[idx] |= + (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full); + } else { + /* force 10G, no AN */ + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10000; + sc->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + continue; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10; + sc->port.advertising[idx] |= + (ADVERTISED_10baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10; + sc->link_params.req_duplex[idx] = DUPLEX_HALF; + sc->port.advertising[idx] |= + (ADVERTISED_10baseT_Half | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_100; + sc->port.advertising[idx] |= + (ADVERTISED_100baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_100; + sc->link_params.req_duplex[idx] = DUPLEX_HALF; + sc->port.advertising[idx] |= + (ADVERTISED_100baseT_Half | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_1G: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_1000baseT_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_1000; + sc->port.advertising[idx] |= + (ADVERTISED_1000baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_2_5G: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_2500baseX_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_2500; + sc->port.advertising[idx] |= + (ADVERTISED_2500baseX_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10G_CX4: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_10000baseT_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10000; + sc->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_20G: + sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; + break; + + default: + PMD_DRV_LOG(ERR, sc, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", link_config, + sc->link_params.speed_cap_mask[idx]); + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_AUTO_NEG; + sc->port.advertising[idx] = sc->port.supported[idx]; + break; + } + + sc->link_params.req_flow_ctrl[idx] = + (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); + + if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { + if (! + (sc-> + port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { + sc->link_params.req_flow_ctrl[idx] = + ELINK_FLOW_CTRL_NONE; + } else { + bnx2x_set_requested_fc(sc); + } + } + } +} + +static void bnx2x_get_phy_info(struct bnx2x_softc *sc) +{ + uint8_t port = SC_PORT(sc); + uint32_t eee_mode; + + PMD_INIT_FUNC_TRACE(sc); + + /* shmem data already read in bnx2x_get_shmem_info() */ + + bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg); + bnx2x_link_settings_requested(sc); + + /* configure link feature according to nvram value */ + eee_mode = + (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) + & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { + sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | + ELINK_EEE_MODE_ENABLE_LPI | + ELINK_EEE_MODE_OUTPUT_TIME); + } else { + sc->link_params.eee_mode = 0; + } + + /* get the media type */ + bnx2x_media_detect(sc); +} + +static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc) +{ + uint32_t flags = MODE_ASIC | MODE_PORT2; + + if (CHIP_IS_E2(sc)) { + flags |= MODE_E2; + } else if (CHIP_IS_E3(sc)) { + flags |= MODE_E3; + if (CHIP_REV(sc) == CHIP_REV_Ax) { + flags |= MODE_E3_A0; + } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */ + + flags |= MODE_E3_B0 | MODE_COS3; + } + } + + if (IS_MF(sc)) { + flags |= MODE_MF; + switch (sc->devinfo.mf_info.mf_mode) { + case MULTI_FUNCTION_SD: + flags |= MODE_MF_SD; + break; + case MULTI_FUNCTION_SI: + flags |= MODE_MF_SI; + break; + case MULTI_FUNCTION_AFEX: + flags |= MODE_MF_AFEX; + break; + } + } else { + flags |= MODE_SF; + } + +#if defined(__LITTLE_ENDIAN) + flags |= MODE_LITTLE_ENDIAN; +#else /* __BIG_ENDIAN */ + flags |= MODE_BIG_ENDIAN; +#endif + + INIT_MODE_FLAGS(sc) = flags; +} + +int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + char buf[32]; + uint32_t i; + + if (IS_PF(sc)) { + /************************/ + /* DEFAULT STATUS BLOCK */ + /************************/ + + if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), + &sc->def_sb_dma, "def_sb", + RTE_CACHE_LINE_SIZE) != 0) { + return -1; + } + + sc->def_sb = + (struct host_sp_status_block *)sc->def_sb_dma.vaddr; + /***************/ + /* EVENT QUEUE */ + /***************/ + + if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, + &sc->eq_dma, "ev_queue", + RTE_CACHE_LINE_SIZE) != 0) { + sc->def_sb = NULL; + return -1; + } + + sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; + + /*************/ + /* SLOW PATH */ + /*************/ + + if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), + &sc->sp_dma, "sp", + RTE_CACHE_LINE_SIZE) != 0) { + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; + + /*******************/ + /* SLOW PATH QUEUE */ + /*******************/ + + if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, + &sc->spq_dma, "sp_queue", + RTE_CACHE_LINE_SIZE) != 0) { + sc->sp = NULL; + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; + + /***************************/ + /* FW DECOMPRESSION BUFFER */ + /***************************/ + + if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, + "fw_buf", RTE_CACHE_LINE_SIZE) != 0) { + sc->spq = NULL; + sc->sp = NULL; + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; + } + + /*************/ + /* FASTPATHS */ + /*************/ + + /* allocate DMA memory for each fastpath structure */ + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + fp->sc = sc; + fp->index = i; + + /*******************/ + /* FP STATUS BLOCK */ + /*******************/ + + snprintf(buf, sizeof(buf), "fp_%d_sb", i); + if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), + &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) { + PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf); + return -1; + } else { + if (CHIP_IS_E2E3(sc)) { + fp->status_block.e2_sb = + (struct host_hc_status_block_e2 *) + fp->sb_dma.vaddr; + } else { + fp->status_block.e1x_sb = + (struct host_hc_status_block_e1x *) + fp->sb_dma.vaddr; + } + } + } + + return 0; +} + +void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i; + + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + + /*******************/ + /* FP STATUS BLOCK */ + /*******************/ + + memset(&fp->status_block, 0, sizeof(fp->status_block)); + bnx2x_dma_free(&fp->sb_dma); + } + + if (IS_PF(sc)) { + /***************************/ + /* FW DECOMPRESSION BUFFER */ + /***************************/ + + bnx2x_dma_free(&sc->gz_buf_dma); + sc->gz_buf = NULL; + + /*******************/ + /* SLOW PATH QUEUE */ + /*******************/ + + bnx2x_dma_free(&sc->spq_dma); + sc->spq = NULL; + + /*************/ + /* SLOW PATH */ + /*************/ + + bnx2x_dma_free(&sc->sp_dma); + sc->sp = NULL; + + /***************/ + /* EVENT QUEUE */ + /***************/ + + bnx2x_dma_free(&sc->eq_dma); + sc->eq = NULL; + + /************************/ + /* DEFAULT STATUS BLOCK */ + /************************/ + + bnx2x_dma_free(&sc->def_sb_dma); + sc->def_sb = NULL; + } +} + +/* +* Previous driver DMAE transaction may have occurred when pre-boot stage +* ended and boot began. This would invalidate the addresses of the +* transaction, resulting in was-error bit set in the PCI causing all +* hw-to-host PCIe transactions to timeout. If this happened we want to clear +* the interrupt which detected this from the pglueb and the was-done bit +*/ +static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc) +{ + uint32_t val; + + if (!CHIP_IS_E1x(sc)) { + val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << SC_FUNC(sc)); + } + } +} + +static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc) +{ + uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); + if (!rc) { + PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); + return -1; + } + + return 0; +} + +static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc) +{ + struct bnx2x_prev_list_node *tmp; + + LIST_FOREACH(tmp, &bnx2x_prev_list, node) { + if ((sc->pcie_bus == tmp->bus) && + (sc->pcie_device == tmp->slot) && + (SC_PATH(sc) == tmp->path)) { + return tmp; + } + } + + return NULL; +} + +static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc) +{ + struct bnx2x_prev_list_node *tmp; + int rc = FALSE; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + tmp = bnx2x_prev_path_get_entry(sc); + if (tmp) { + if (tmp->aer) { + PMD_DRV_LOG(DEBUG, sc, + "Path %d/%d/%d was marked by AER", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } else { + rc = TRUE; + PMD_DRV_LOG(DEBUG, sc, + "Path %d/%d/%d was already cleaned from previous drivers", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + return rc; +} + +static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi) +{ + struct bnx2x_prev_list_node *tmp; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + /* Check whether the entry for this path already exists */ + tmp = bnx2x_prev_path_get_entry(sc); + if (tmp) { + if (!tmp->aer) { + PMD_DRV_LOG(DEBUG, sc, + "Re-marking AER in path %d/%d/%d", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } else { + PMD_DRV_LOG(DEBUG, sc, + "Removing AER indication from path %d/%d/%d", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + tmp->aer = 0; + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + return 0; + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + /* Create an entry for this path and add it */ + tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node), + RTE_CACHE_LINE_SIZE); + if (!tmp) { + PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'"); + return -1; + } + + tmp->bus = sc->pcie_bus; + tmp->slot = sc->pcie_device; + tmp->path = SC_PATH(sc); + tmp->aer = 0; + tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node); + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + return 0; +} + +static int bnx2x_do_flr(struct bnx2x_softc *sc) +{ + int i; + + /* only E2 and onwards support FLR */ + if (CHIP_IS_E1x(sc)) { + PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H"); + return -1; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + PMD_DRV_LOG(WARNING, sc, + "FLR not supported by BC_VER: 0x%08x", + sc->devinfo.bc_ver); + return -1; + } + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) { + DELAY(((1 << (i - 1)) * 100) * 1000); + } + + if (!bnx2x_is_pcie_pending(sc)) { + goto clear; + } + } + + PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, " + "proceeding with reset anyway"); + +clear: + bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); + + return 0; +} + +struct bnx2x_mac_vals { + uint32_t xmac_addr; + uint32_t xmac_val; + uint32_t emac_addr; + uint32_t emac_val; + uint32_t umac_addr; + uint32_t umac_val; + uint32_t bmac_addr; + uint32_t bmac_val[2]; +}; + +static void +bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals) +{ + uint32_t val, base_addr, offset, mask, reset_reg; + uint8_t mac_stopped = FALSE; + uint8_t port = SC_PORT(sc); + uint32_t wb_data[2]; + + /* reset addresses as they also mark which values were changed */ + vals->bmac_addr = 0; + vals->umac_addr = 0; + vals->xmac_addr = 0; + vals->emac_addr = 0; + + reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); + + if (!CHIP_IS_E3(sc)) { + val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); + mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; + if ((mask & reset_reg) && val) { + base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM + : NIG_REG_INGRESS_BMAC0_MEM; + offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL + : BIGMAC_REGISTER_BMAC_CONTROL; + + /* + * use rd/wr since we cannot use dmae. This is safe + * since MCP won't access the bus due to the request + * to unload, and no function on the path can be + * loaded at this time. + */ + wb_data[0] = REG_RD(sc, base_addr + offset); + wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); + vals->bmac_addr = base_addr + offset; + vals->bmac_val[0] = wb_data[0]; + vals->bmac_val[1] = wb_data[1]; + wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; + REG_WR(sc, vals->bmac_addr, wb_data[0]); + REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); + } + + vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4; + vals->emac_val = REG_RD(sc, vals->emac_addr); + REG_WR(sc, vals->emac_addr, 0); + mac_stopped = TRUE; + } else { + if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { + base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); + REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, + val & ~(1 << 1)); + REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, + val | (1 << 1)); + vals->xmac_addr = base_addr + XMAC_REG_CTRL; + vals->xmac_val = REG_RD(sc, vals->xmac_addr); + REG_WR(sc, vals->xmac_addr, 0); + mac_stopped = TRUE; + } + + mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; + if (mask & reset_reg) { + base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; + vals->umac_val = REG_RD(sc, vals->umac_addr); + REG_WR(sc, vals->umac_addr, 0); + mac_stopped = TRUE; + } + } + + if (mac_stopped) { + DELAY(20000); + } +} + +#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) +#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) +#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) +#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) + +static void +bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc) +{ + uint16_t rcq, bd; + uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port)); + + rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; + bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; + + tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); + REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); +} + +static int bnx2x_prev_unload_common(struct bnx2x_softc *sc) +{ + uint32_t reset_reg, tmp_reg = 0, rc; + uint8_t prev_undi = FALSE; + struct bnx2x_mac_vals mac_vals; + uint32_t timer_count = 1000; + uint32_t prev_brb; + + /* + * It is possible a previous function received 'common' answer, + * but hasn't loaded yet, therefore creating a scenario of + * multiple functions receiving 'common' on the same path. + */ + memset(&mac_vals, 0, sizeof(mac_vals)); + + if (bnx2x_prev_is_path_marked(sc)) { + return bnx2x_prev_mcp_done(sc); + } + + reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); + + /* Reset should be performed after BRB is emptied */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { + /* Close the MAC Rx to prevent BRB from filling up */ + bnx2x_prev_unload_close_mac(sc, &mac_vals); + + /* close LLH filters towards the BRB */ + elink_set_rx_filter(&sc->link_params, 0); + + /* + * Check if the UNDI driver was previously loaded. + * UNDI driver initializes CID offset for normal bell to 0x7 + */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { + tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); + if (tmp_reg == 0x7) { + PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded"); + prev_undi = TRUE; + /* clear the UNDI indication */ + REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); + /* clear possible idle check errors */ + REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); + } + } + + /* wait until BRB is empty */ + tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); + while (timer_count) { + prev_brb = tmp_reg; + + tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); + if (!tmp_reg) { + break; + } + + PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg); + + /* reset timer as long as BRB actually gets emptied */ + if (prev_brb > tmp_reg) { + timer_count = 1000; + } else { + timer_count--; + } + + /* If UNDI resides in memory, manually increment it */ + if (prev_undi) { + bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1); + } + + DELAY(10); + } + + if (!timer_count) { + PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB"); + } + } + + /* No packets are in the pipeline, path is ready for reset */ + bnx2x_reset_common(sc); + + if (mac_vals.xmac_addr) { + REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); + } + if (mac_vals.umac_addr) { + REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); + } + if (mac_vals.emac_addr) { + REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); + } + if (mac_vals.bmac_addr) { + REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); + REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); + } + + rc = bnx2x_prev_mark_path(sc, prev_undi); + if (rc) { + bnx2x_prev_mcp_done(sc); + return rc; + } + + return bnx2x_prev_mcp_done(sc); +} + +static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc) +{ + int rc; + + /* Test if previous unload process was already finished for this path */ + if (bnx2x_prev_is_path_marked(sc)) { + return bnx2x_prev_mcp_done(sc); + } + + /* + * If function has FLR capabilities, and existing FW version matches + * the one required, then FLR will be sufficient to clean any residue + * left by previous driver + */ + rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); + if (!rc) { + /* fw version is good */ + rc = bnx2x_do_flr(sc); + } + + if (!rc) { + /* FLR was performed */ + return 0; + } + + PMD_DRV_LOG(INFO, sc, "Could not FLR"); + + /* Close the MCP request, return failure */ + rc = bnx2x_prev_mcp_done(sc); + if (!rc) { + rc = BNX2X_PREV_WAIT_NEEDED; + } + + return rc; +} + +static int bnx2x_prev_unload(struct bnx2x_softc *sc) +{ + int time_counter = 10; + uint32_t fw, hw_lock_reg, hw_lock_val; + uint32_t rc = 0; + + PMD_INIT_FUNC_TRACE(sc); + + /* + * Clear HW from errors which may have resulted from an interrupted + * DMAE transaction. + */ + bnx2x_prev_interrupted_dmae(sc); + + /* Release previously held locks */ + hw_lock_reg = (SC_FUNC(sc) <= 5) ? + (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : + (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); + + hw_lock_val = (REG_RD(sc, hw_lock_reg)); + if (hw_lock_val) { + if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { + PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n"); + REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); + } + PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n"); + REG_WR(sc, hw_lock_reg, 0xffffffff); + } + + if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { + PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n"); + REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); + } + + do { + /* Lock MCP using an unload request */ + fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); + if (!fw) { + PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting"); + rc = -1; + break; + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { + rc = bnx2x_prev_unload_common(sc); + break; + } + + /* non-common reply from MCP might require looping */ + rc = bnx2x_prev_unload_uncommon(sc); + if (rc != BNX2X_PREV_WAIT_NEEDED) { + break; + } + + DELAY(20000); + } while (--time_counter); + + if (!time_counter || rc) { + PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!"); + rc = -1; + } + + return rc; +} + +static void +bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) +{ + if (!CHIP_IS_E1x(sc)) { + sc->dcb_state = dcb_on; + sc->dcbx_enabled = dcbx_enabled; + } else { + sc->dcb_state = FALSE; + sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; + } + PMD_DRV_LOG(DEBUG, sc, + "DCB state [%s:%s]", + dcb_on ? "ON" : "OFF", + (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" : + (dcbx_enabled == + BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" + : (dcbx_enabled == + BNX2X_DCBX_ENABLED_ON_NEG_ON) ? + "on-chip with negotiation" : "invalid"); +} + +static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc) +{ + int cid_count = BNX2X_L2_MAX_CID(sc); + + if (CNIC_SUPPORT(sc)) { + cid_count += CNIC_CID_MAX; + } + + return roundup(cid_count, QM_CID_ROUND); +} + +static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) +{ + int pri, cos; + + uint32_t pri_map = 0; + + for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) { + cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); + if (cos < sc->max_cos) { + sc->prio_to_cos[pri] = cos; + } else { + PMD_DRV_LOG(WARNING, sc, + "Invalid COS %d for priority %d " + "(max COS is %d), setting to 0", cos, pri, + (sc->max_cos - 1)); + sc->prio_to_cos[pri] = 0; + } + } +} + +static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) +{ + struct { + uint8_t id; + uint8_t next; + } pci_cap; + uint16_t status; + struct bnx2x_pci_cap *cap; + + cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), + RTE_CACHE_LINE_SIZE); + if (!cap) { + PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); + return -ENOMEM; + } + +#ifndef RTE_EXEC_ENV_FREEBSD + pci_read(sc, PCI_STATUS, &status, 2); + if (!(status & PCI_STATUS_CAP_LIST)) { +#else + pci_read(sc, PCIR_STATUS, &status, 2); + if (!(status & PCIM_STATUS_CAPPRESENT)) { +#endif + PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed"); + return -1; + } + +#ifndef RTE_EXEC_ENV_FREEBSD + pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1); +#else + pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1); +#endif + while (pci_cap.next) { + cap->addr = pci_cap.next & ~3; + pci_read(sc, pci_cap.next & ~3, &pci_cap, 2); + if (pci_cap.id == 0xff) + break; + cap->id = pci_cap.id; + cap->type = BNX2X_PCI_CAP; + cap->next = rte_zmalloc("pci_cap", + sizeof(struct bnx2x_pci_cap), + RTE_CACHE_LINE_SIZE); + if (!cap->next) { + PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory"); + return -ENOMEM; + } + cap = cap->next; + } + + return 0; +} + +static void bnx2x_init_rte(struct bnx2x_softc *sc) +{ + if (IS_VF(sc)) { + sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, + sc->igu_sb_cnt); + sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, + sc->igu_sb_cnt); + } else { + sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc); + sc->max_tx_queues = sc->max_rx_queues; + } +} + +#define FW_HEADER_LEN 104 +#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.13.11.0.fw" +#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.13.11.0.fw" + +void bnx2x_load_firmware(struct bnx2x_softc *sc) +{ + const char *fwname; + int f; + struct stat st; + + fwname = sc->devinfo.device_id == CHIP_NUM_57711 + ? FW_NAME_57711 : FW_NAME_57810; + f = open(fwname, O_RDONLY); + if (f < 0) { + PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file"); + return; + } + + if (fstat(f, &st) < 0) { + PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file"); + close(f); + return; + } + + sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE); + if (!sc->firmware) { + PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware"); + close(f); + return; + } + + if (read(f, sc->firmware, st.st_size) != st.st_size) { + PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data"); + close(f); + return; + } + close(f); + + sc->fw_len = st.st_size; + if (sc->fw_len < FW_HEADER_LEN) { + PMD_DRV_LOG(NOTICE, sc, + "Invalid fw size: %" PRIu64, sc->fw_len); + return; + } + PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len); +} + +static void +bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i, j, tmp; + + for (i = 0, j = 0; i < len / 8; ++i, j += 2) { + tmp = rte_be_to_cpu_32(src[j]); + dst[i].op = (tmp >> 24) & 0xFF; + dst[i].offset = tmp & 0xFFFFFF; + dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]); + } +} + +static void +bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len) +{ + uint16_t *src = (uint16_t *) data; + uint32_t i; + + for (i = 0; i < len / 2; ++i) + dst[i] = rte_be_to_cpu_16(src[i]); +} + +static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i; + + for (i = 0; i < len / 4; ++i) + dst[i] = rte_be_to_cpu_32(src[i]); +} + +static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i, j, tmp; + + for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) { + dst[i].base = rte_be_to_cpu_32(src[j++]); + tmp = rte_be_to_cpu_32(src[j]); + dst[i].m1 = (tmp >> 16) & 0xFFFF; + dst[i].m2 = tmp & 0xFFFF; + ++j; + tmp = rte_be_to_cpu_32(src[j]); + dst[i].m3 = (tmp >> 16) & 0xFFFF; + dst[i].size = tmp & 0xFFFF; + } +} + +/* +* Device attach function. +* +* Allocates device resources, performs secondary chip identification, and +* initializes driver instance variables. This function is called from driver +* load after a successful probe. +* +* Returns: +* 0 = Success, >0 = Failure +*/ +int bnx2x_attach(struct bnx2x_softc *sc) +{ + int rc; + + PMD_DRV_LOG(DEBUG, sc, "Starting attach..."); + + rc = bnx2x_pci_get_caps(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed"); + return rc; + } + + sc->state = BNX2X_STATE_CLOSED; + + pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + + sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; + + /* get PCI capabilites */ + bnx2x_probe_pci_caps(sc); + + if (sc->devinfo.pcie_msix_cap_reg != 0) { + uint32_t val; + pci_read(sc, + (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, + 2); + sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1; + } else { + sc->igu_sb_cnt = 1; + } + + /* Init RTE stuff */ + bnx2x_init_rte(sc); + + if (IS_PF(sc)) { + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, + 1); + DELAY(200000); + } + + /* get device info and set params */ + if (bnx2x_get_device_info(sc) != 0) { + PMD_DRV_LOG(NOTICE, sc, "getting device info"); + return -ENXIO; + } + +/* get phy settings from shmem and 'and' against admin settings */ + bnx2x_get_phy_info(sc); + } else { + /* Left mac of VF unfilled, PF should set it for VF */ + memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN); + } + + sc->wol = 0; + + /* set the default MTU (changed via ifconfig) */ + sc->mtu = RTE_ETHER_MTU; + + bnx2x_set_modes_bitmap(sc); + + /* need to reset chip if UNDI was active */ + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { +/* init fw_seq */ + sc->fw_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x", + sc->fw_seq); + bnx2x_prev_unload(sc); + } + + bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF); + + /* calculate qm_cid_count */ + sc->qm_cid_count = bnx2x_set_qm_cid_count(sc); + + sc->max_cos = 1; + bnx2x_init_multi_cos(sc); + + return 0; +} + +static void +bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment, + uint16_t index, uint8_t op, uint8_t update) +{ + uint32_t igu_addr = sc->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; + bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr); +} + +static void +bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, + uint16_t index, uint8_t op, uint8_t update) +{ + if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC)) + bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); + else { + uint8_t segment; + if (CHIP_INT_MODE_IS_BC(sc)) { + segment = storm; + } else if (igu_sb_id != sc->igu_dsb_id) { + segment = IGU_SEG_ACCESS_DEF; + } else if (storm == ATTENTION_ID) { + segment = IGU_SEG_ACCESS_ATTN; + } else { + segment = IGU_SEG_ACCESS_DEF; + } + bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); + } +} + +static void +bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id, + uint8_t is_pf) +{ + uint32_t data, ctl, cnt = 100; + uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + + (idu_sb_id / 32) * 4; + uint32_t sb_bit = 1 << (idu_sb_id % 32); + uint32_t func_encode = func | + (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(sc)) { + return; + } + + data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << + IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); + + ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | + (func_encode << IGU_CTRL_REG_FID_SHIFT) | + (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); + + REG_WR(sc, igu_addr_data, data); + + mb(); + + PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x", + ctl, igu_addr_ctl); + REG_WR(sc, igu_addr_ctl, ctl); + + mb(); + + /* wait for clean up to finish */ + while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { + DELAY(20000); + } + + if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { + PMD_DRV_LOG(DEBUG, sc, + "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)", + idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt); + } +} + +static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id) +{ + bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); +} + +/*******************/ +/* ECORE CALLBACKS */ +/*******************/ + +static void bnx2x_reset_common(struct bnx2x_softc *sc) +{ + uint32_t val = 0x1400; + + PMD_INIT_FUNC_TRACE(sc); + + /* reset_common */ + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), + 0xd3ffff7f); + + if (CHIP_IS_E3(sc)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); +} + +static void bnx2x_common_init_phy(struct bnx2x_softc *sc) +{ + uint32_t shmem_base[2]; + uint32_t shmem2_base[2]; + + /* Avoid common init in case MFW supports LFA */ + if (SHMEM2_RD(sc, size) > + (uint32_t) offsetof(struct shmem2_region, + lfa_host_addr[SC_PORT(sc)])) { + return; + } + + shmem_base[0] = sc->devinfo.shmem_base; + shmem2_base[0] = sc->devinfo.shmem2_base; + + if (!CHIP_IS_E1x(sc)) { + shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); + shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); + } + + bnx2x_acquire_phy_lock(sc); + elink_common_init_phy(sc, shmem_base, shmem2_base, + sc->devinfo.chip_id, 0); + bnx2x_release_phy_lock(sc); +} + +static void bnx2x_pf_disable(struct bnx2x_softc *sc) +{ + uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + val &= ~IGU_PF_CONF_FUNC_EN; + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); +} + +static void bnx2x_init_pxp(struct bnx2x_softc *sc) +{ + uint16_t devctl; + int r_order, w_order; + + devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL); + + w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); + r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); + + ecore_init_pxp_arb(sc, r_order, w_order); +} + +static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc) +{ + uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; + uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); + return base + (SC_ABS_FUNC(sc)) * stride; +} + +/* + * Called only on E1H or E2. + * When pretending to be PF, the pretend value is the function number 0..7. + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID + * combination. + */ +static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val) +{ + uint32_t pretend_reg; + + if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) + return -1; + + /* get my own pretend register */ + pretend_reg = bnx2x_get_pretend_reg(sc); + REG_WR(sc, pretend_reg, pretend_func_val); + REG_RD(sc, pretend_reg); + return 0; +} + +static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc) +{ + int is_required; + uint32_t val; + int port; + + is_required = 0; + val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK); + + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { + is_required = 1; + } + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { + for (port = PORT_0; port < PORT_MAX; port++) { + is_required |= elink_fan_failure_det_req(sc, + sc-> + devinfo.shmem_base, + sc-> + devinfo.shmem2_base, + port); + } + } + + if (is_required == 0) { + return; + } + + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); + + /* set to active low mode */ + val = REG_RD(sc, MISC_REG_SPIO_INT); + val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); + REG_WR(sc, MISC_REG_SPIO_INT, val); + + /* enable interrupt to signal the IGU */ + val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); + val |= MISC_SPIO_SPIO5; + REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); +} + +static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc) +{ + uint32_t val; + + REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); + } else { + REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); + } + REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); + /* + * mask read length error interrupts in brb for parser + * (parsing unit and 'checksum and crc' unit) + * these errors are legal (PU reads fixed length and CAC can cause + * read length error on truncated packets) + */ + REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); + REG_WR(sc, QM_REG_QM_INT_MASK, 0); + REG_WR(sc, TM_REG_TM_INT_MASK, 0); + REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); + /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ + /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); + /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ + /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); + /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ + /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ + + val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | + PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | + PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); + if (!CHIP_IS_E1x(sc)) { + val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | + PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); + } + REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); + + REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); + /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ + + if (!CHIP_IS_E1x(sc)) { +/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + } + + REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); + REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); + /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @sc: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x_softc *sc) +{ + uint8_t abs_func_id; + uint32_t val; + + PMD_DRV_LOG(DEBUG, sc, + "starting common init for func %d", SC_ABS_FUNC(sc)); + + /* + * take the RESET lock to protect undi_unload flow from accessing + * registers while we are resetting the chip + */ + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + bnx2x_reset_common(sc); + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); + + val = 0xfffc; + if (CHIP_IS_E3(sc)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { +/* + * 4-port mode or 2-port mode we need to turn off master-enable for + * everyone. After that we turn it back on for self. So, we disregard + * multi-function, and always disable all functions on the given path, + * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 + */ + for (abs_func_id = SC_PATH(sc); + abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { + if (abs_func_id == SC_ABS_FUNC(sc)) { + REG_WR(sc, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, + 1); + continue; + } + + bnx2x_pretend_func(sc, abs_func_id); + + /* clear pf enable */ + bnx2x_pf_disable(sc); + + bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); + } + } + + ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); + bnx2x_init_pxp(sc); + +#ifdef __BIG_ENDIAN + REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); + /* make sure this value is 0 */ + REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); + + //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); +#endif + + ecore_ilt_init_page_size(sc, INITOP_SET); + + if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { + REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); + } + + /* let the HW do it's magic... */ + DELAY(100000); + + /* finish PXP init */ + + val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed"); + return -1; + } + val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed"); + return -1; + } + + /* + * Timer bug workaround for E2 only. We need to set the entire ILT to have + * entries with value "0" and valid bit on. This needs to be done by the + * first PF that is loaded in a path (i.e. common phase) + */ + if (!CHIP_IS_E1x(sc)) { +/* + * In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing + * fatal will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occurred while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last enrty in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ + struct ilt_client_info ilt_cli; + struct ecore_ilt ilt; + + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + memset(&ilt, 0, sizeof(struct ecore_ilt)); + +/* initialize dummy TM client */ + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + +/* + * Step 1: set zeroes to all ilt page entries with valid bit on + * Step 2: set the timers first/last ilt entry to point + * to the entire range to prevent ILT range error for 3rd/4th + * vnic (this code assumes existence of the vnic) + * + * both steps performed by call to ecore_ilt_client_init_op() + * with dummy TM client + * + * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT + * and his brother are split registers + */ + + bnx2x_pretend_func(sc, (SC_PATH(sc) + 6)); + ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); + bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); + + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); + } + + REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(sc)) { + int factor = 0; + + ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); + ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); + +/* let the HW do it's magic... */ + do { + DELAY(200000); + val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); + } while (factor-- && (val != 1)); + + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed"); + return -1; + } + } + + ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); + + /* clean the DMAE memory */ + sc->dmae_ready = 1; + ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); + + ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); + + bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); + + ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); + + /* QM queues pointers table */ + ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); + + /* soft reset pulse */ + REG_WR(sc, QM_REG_SOFT_RESET, 1); + REG_WR(sc, QM_REG_SOFT_RESET, 0); + + if (CNIC_SUPPORT(sc)) + ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); + + if (!CHIP_REV_IS_SLOW(sc)) { +/* enable hw interrupt from doorbell Q */ + REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); + } + + ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); + REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); + REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); + + if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { + if (IS_MF_AFEX(sc)) { + /* + * configure that AFEX and VLAN headers must be + * received in AFEX mode + */ + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); + } else { + /* + * Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, + sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); + } + } + + ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { +/* reset VFC memories */ + REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + + DELAY(20000); + } + + ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); + + /* sync semi rtc */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); + + ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); + ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { + if (IS_MF_AFEX(sc)) { + /* + * configure that AFEX and VLAN headers must be + * sent in AFEX mode + */ + REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); + } else { + REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, + sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); + } + } + + REG_WR(sc, SRC_REG_SOFT_RST, 1); + + ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); + + if (CNIC_SUPPORT(sc)) { + REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); + } + REG_WR(sc, SRC_REG_SOFT_RST, 0); + + if (sizeof(union cdu_context) != 1024) { +/* we currently assume that a context is 1024 bytes */ + PMD_DRV_LOG(NOTICE, sc, + "please adjust the size of cdu_context(%ld)", + (long)sizeof(union cdu_context)); + } + + ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); + + ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); + + REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); + + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); + ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) { + REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); + } + + ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); + ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); + + /* Reset PCIE errors for debug */ + REG_WR(sc, 0x2814, 0xffffffff); + REG_WR(sc, 0x3820, 0xffffffff); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, + (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | + PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, + (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, + (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); + } + + ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); + + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); + + if (CHIP_IS_E1H(sc)) { +/* not applicable for E2 (and above ...) */ + REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); + } + + if (CHIP_REV_IS_SLOW(sc)) { + DELAY(200000); + } + + /* finish CFC init */ + val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed"); + return -1; + } + val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed"); + return -1; + } + val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed"); + return -1; + } + REG_WR(sc, CFC_REG_DEBUG0, 0); + + bnx2x_setup_fan_failure_detection(sc); + + /* clear PXP2 attentions */ + REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); + + bnx2x_enable_blocks_attention(sc); + + if (!CHIP_REV_IS_SLOW(sc)) { + ecore_enable_blocks_parity(sc); + } + + if (!BNX2X_NOMCP(sc)) { + if (CHIP_IS_E1x(sc)) { + bnx2x_common_init_phy(sc); + } + } + + return 0; +} + +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @sc: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc) +{ + int rc = bnx2x_init_hw_common(sc); + + if (rc) { + return rc; + } + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BNX2X_NOMCP(sc)) { + bnx2x_common_init_phy(sc); + } + + return 0; +} + +static int bnx2x_init_hw_port(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; + uint32_t low, high; + uint32_t val; + + PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port); + + REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); + + ecore_init_block(sc, BLOCK_MISC, init_phase); + ecore_init_block(sc, BLOCK_PXP, init_phase); + ecore_init_block(sc, BLOCK_PXP2, init_phase); + + /* + * Timers bug workaround: disables the pf_master bit in pglue at + * common phase, we need to enable it here before any dmae access are + * attempted. Therefore we manually added the enable-master to the + * port phase (it also happens in the function phase) + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + } + + ecore_init_block(sc, BLOCK_ATC, init_phase); + ecore_init_block(sc, BLOCK_DMAE, init_phase); + ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); + ecore_init_block(sc, BLOCK_QM, init_phase); + + ecore_init_block(sc, BLOCK_TCM, init_phase); + ecore_init_block(sc, BLOCK_UCM, init_phase); + ecore_init_block(sc, BLOCK_CCM, init_phase); + ecore_init_block(sc, BLOCK_XCM, init_phase); + + /* QM cid (connection) count */ + ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); + + if (CNIC_SUPPORT(sc)) { + ecore_init_block(sc, BLOCK_TM, init_phase); + REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20); + REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31); + } + + ecore_init_block(sc, BLOCK_DORQ, init_phase); + + ecore_init_block(sc, BLOCK_BRB1, init_phase); + + if (CHIP_IS_E1H(sc)) { + if (IS_MF(sc)) { + low = (BNX2X_ONE_PORT(sc) ? 160 : 246); + } else if (sc->mtu > 4096) { + if (BNX2X_ONE_PORT(sc)) { + low = 160; + } else { + val = sc->mtu; + /* (24*1024 + val*4)/256 */ + low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); + } + } else { + low = (BNX2X_ONE_PORT(sc) ? 80 : 160); + } + high = (low + 56); /* 14*1024/256 */ + REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low); + REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high); + } + + if (CHIP_IS_MODE_4_PORT(sc)) { + REG_WR(sc, SC_PORT(sc) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0, 40); + } + + ecore_init_block(sc, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(sc)) { + if (IS_MF_AFEX(sc)) { + /* configure headers for AFEX mode */ + if (SC_PORT(sc)) { + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1, + 0xE); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1, + 0x6); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA); + } else { + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0, + 0xE); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0, + 0x6); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); + } + } else { + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(sc, SC_PORT(sc) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); + } + } + + ecore_init_block(sc, BLOCK_TSDM, init_phase); + ecore_init_block(sc, BLOCK_CSDM, init_phase); + ecore_init_block(sc, BLOCK_USDM, init_phase); + ecore_init_block(sc, BLOCK_XSDM, init_phase); + + ecore_init_block(sc, BLOCK_TSEM, init_phase); + ecore_init_block(sc, BLOCK_USEM, init_phase); + ecore_init_block(sc, BLOCK_CSEM, init_phase); + ecore_init_block(sc, BLOCK_XSEM, init_phase); + + ecore_init_block(sc, BLOCK_UPB, init_phase); + ecore_init_block(sc, BLOCK_XPB, init_phase); + + ecore_init_block(sc, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(sc)) { +/* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); + +/* update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16)); +/* update init credit */ + REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, + (9040 / 16) + 553 - 22); + +/* probe changes */ + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1); + DELAY(50); + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0); + } + + if (CNIC_SUPPORT(sc)) { + ecore_init_block(sc, BLOCK_SRC, init_phase); + } + + ecore_init_block(sc, BLOCK_CDU, init_phase); + ecore_init_block(sc, BLOCK_CFC, init_phase); + ecore_init_block(sc, BLOCK_HC, init_phase); + ecore_init_block(sc, BLOCK_IGU, init_phase); + ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. only bits 0-2 are in use + * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + val = IS_MF(sc) ? 0xF7 : 0x7; + val |= 0x10; + REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val); + + ecore_init_block(sc, BLOCK_NIG, init_phase); + + if (!CHIP_IS_E1x(sc)) { +/* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + if (IS_MF_AFEX(sc)) { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); + } else { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(sc) ? 7 : 6); + } + + if (CHIP_IS_E3(sc)) { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(sc)); + } + } + if (!CHIP_IS_E3(sc)) { + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + } + + /* 0x2 disable mf_ov, 0x1 enable */ + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4, + (IS_MF_SD(sc) ? 0x1 : 0x2)); + + if (!CHIP_IS_E1x(sc)) { + val = 0; + switch (sc->devinfo.mf_info.mf_mode) { + case MULTI_FUNCTION_SD: + val = 1; + break; + case MULTI_FUNCTION_SI: + case MULTI_FUNCTION_AFEX: + val = 2; + break; + } + + REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : + NIG_REG_LLH0_CLS_TYPE), val); + } + REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0); + REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0); + REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1); + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); + if (val & MISC_SPIO_SPIO5) { + uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(sc, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(sc, reg_addr, val); + } + + return 0; +} + +static uint32_t +bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg, + uint32_t expected, uint32_t poll_count) +{ + uint32_t cur_cnt = poll_count; + uint32_t val; + + while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + } + + return val; +} + +static int +bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg, + __rte_unused const char *msg, uint32_t poll_cnt) +{ + uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); + + if (val != 0) { + PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val); + return -1; + } + + return 0; +} + +/* Common routines with VF FLR cleanup */ +static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(sc)) { + return FLR_POLL_CNT * 2000; + } + + if (CHIP_REV_IS_FPGA(sc)) { + return FLR_POLL_CNT * 120; + } + + return FLR_POLL_CNT; +} + +static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt) +{ + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc), + "QM PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc), + "Timers VNIC usage counter timed out", + poll_cnt)) { + return -1; + } + + if (bnx2x_flr_clnup_poll_hw_counter(sc, + TM_REG_LIN0_NUM_SCANS + + 4 * SC_PORT(sc), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + dmae_reg_go_c[INIT_DMAE_C(sc)], + "DMAE dommand register timed out", + poll_cnt)) { + return -1; + } + + return 0; +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + +static int +bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func, + uint32_t poll_cnt) +{ + uint32_t op_gen_command = 0; + uint32_t comp_addr = (BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); + int ret = 0; + + if (REG_RD(sc, comp_addr)) { + PMD_DRV_LOG(NOTICE, sc, + "Cleanup complete was not 0 before sending"); + return -1; + } + + op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen_command |= OP_GEN_AGG_VECT(clnup_func); + op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); + + if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { + PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed"); + PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x", + (REG_RD(sc, comp_addr))); + rte_panic("FLR cleanup failed"); + return -1; + } + + /* Zero completion for nxt FLR */ + REG_WR(sc, comp_addr, 0); + + return ret; +} + +static void +bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs, + uint32_t poll_count) +{ + uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; + uint32_t cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); + crd = crd_start = REG_RD(sc, regs->crd); + init_crd = REG_RD(sc, regs->init_crd); + + while ((crd != init_crd) && + ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + crd = REG_RD(sc, regs->crd); + crd_freed = REG_RD(sc, regs->crd_freed); + } else { + break; + } + } +} + +static void +bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs, + uint32_t poll_count) +{ + uint32_t occup, to_free, freed, freed_start; + uint32_t cur_cnt = poll_count; + + occup = to_free = REG_RD(sc, regs->lines_occup); + freed = freed_start = REG_RD(sc, regs->lines_freed); + + while (occup && + ((uint32_t) ((int32_t) freed - (int32_t) freed_start) < + to_free)) { + if (cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + occup = REG_RD(sc, regs->lines_occup); + freed = REG_RD(sc, regs->lines_freed); + } else { + break; + } + } +} + +static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + uint32_t i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { + bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); + } + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { + bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); + } +} + +static void bnx2x_hw_enable_status(struct bnx2x_softc *sc) +{ + __rte_unused uint32_t val; + + val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); + PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val); + + val = REG_RD(sc, PBF_REG_DISABLE_PF); + PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); + PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); + PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + PMD_DRV_LOG(DEBUG, sc, + "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + PMD_DRV_LOG(DEBUG, sc, + "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x", + val); +} + +/** + * bnx2x_pf_flr_clnup + * a. re-enable target read on the PF + * b. poll cfc per function usgae counter + * c. poll the qm perfunction usage counter + * d. poll the tm per function usage counter + * e. poll the tm per function scan-done indication + * f. clear the dmae channel associated wit hthe PF + * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions) + * h. call the common flr cleanup code with -1 (pf indication) + */ +static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc) +{ + uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc); + + /* Re-enable PF target read access */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) { + return -1; + } + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) { + return -1; + } + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(sc, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + DELAY(100000); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(sc)) { + PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending"); + } + + /* Debug */ + bnx2x_hw_enable_status(sc); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + +static int bnx2x_init_hw_func(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int init_phase = PHASE_PF0 + func; + struct ecore_ilt *ilt = sc->ilt; + uint16_t cdu_ilt_start; + uint32_t addr, val; + uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; + int main_mem_width, rc; + uint32_t i; + + PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func); + + /* FLR cleanup */ + if (!CHIP_IS_E1x(sc)) { + rc = bnx2x_pf_flr_clnup(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!"); + return rc; + } + } + + /* set MSI reconfigure capability */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(sc, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(sc, addr, val); + } + + ecore_init_block(sc, BLOCK_PXP, init_phase); + ecore_init_block(sc, BLOCK_PXP2, init_phase); + + ilt = sc->ilt; + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + + for (i = 0; i < L2_ILT_LINES(sc); i++) { + ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; + ilt->lines[cdu_ilt_start + i].page_mapping = + (rte_iova_t)sc->context[i].vcxt_dma.paddr; + ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; + } + ecore_ilt_init_op(sc, INITOP_SET); + + REG_WR(sc, PRS_REG_NIC_MODE, 1); + + if (!CHIP_IS_E1x(sc)) { + uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; + +/* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ + if ((sc->interrupt_mode != INTR_MODE_MSIX) + || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + } + +/* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, + * needed to make sure there are no requests in + * one of the PXP internal queues with "old" ILT addresses + */ + DELAY(20000); + +/* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function + * init + */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); +/* Enable the function in IGU */ + REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); + } + + sc->dmae_ready = 1; + + ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); + + ecore_init_block(sc, BLOCK_ATC, init_phase); + ecore_init_block(sc, BLOCK_DMAE, init_phase); + ecore_init_block(sc, BLOCK_NIG, init_phase); + ecore_init_block(sc, BLOCK_SRC, init_phase); + ecore_init_block(sc, BLOCK_MISC, init_phase); + ecore_init_block(sc, BLOCK_TCM, init_phase); + ecore_init_block(sc, BLOCK_UCM, init_phase); + ecore_init_block(sc, BLOCK_CCM, init_phase); + ecore_init_block(sc, BLOCK_XCM, init_phase); + ecore_init_block(sc, BLOCK_TSEM, init_phase); + ecore_init_block(sc, BLOCK_USEM, init_phase); + ecore_init_block(sc, BLOCK_CSEM, init_phase); + ecore_init_block(sc, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, QM_REG_PF_EN, 1); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + ecore_init_block(sc, BLOCK_QM, init_phase); + + ecore_init_block(sc, BLOCK_TM, init_phase); + ecore_init_block(sc, BLOCK_DORQ, init_phase); + + ecore_init_block(sc, BLOCK_BRB1, init_phase); + ecore_init_block(sc, BLOCK_PRS, init_phase); + ecore_init_block(sc, BLOCK_TSDM, init_phase); + ecore_init_block(sc, BLOCK_CSDM, init_phase); + ecore_init_block(sc, BLOCK_USDM, init_phase); + ecore_init_block(sc, BLOCK_XSDM, init_phase); + ecore_init_block(sc, BLOCK_UPB, init_phase); + ecore_init_block(sc, BLOCK_XPB, init_phase); + ecore_init_block(sc, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, PBF_REG_DISABLE_PF, 0); + + ecore_init_block(sc, BLOCK_CDU, init_phase); + + ecore_init_block(sc, BLOCK_CFC, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); + + if (IS_MF(sc)) { + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc)); + } + + ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); + + /* HC init per function */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + if (CHIP_IS_E1H(sc)) { + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); + } + ecore_init_block(sc, BLOCK_HC, init_phase); + + } else { + uint32_t num_segs, sb_idx, prod_offset; + + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + ecore_init_block(sc, BLOCK_IGU, init_phase); + + if (!CHIP_IS_E1x(sc)) { + int dsb_idx = 0; + /** + * Producer memory: + * E2 mode: address 0-135 match to the mapping memory; + * 136 - PF0 default prod; 137 - PF1 default prod; + * 138 - PF2 default prod; 139 - PF3 default prod; + * 140 - PF0 attn prod; 141 - PF1 attn prod; + * 142 - PF2 attn prod; 143 - PF3 attn prod; + * 144-147 reserved. + * + * E1.5 mode - In backward compatible mode; + * for non default SB; each even line in the memory + * holds the U producer and each odd line hold + * the C producer. The first 128 producers are for + * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 + * producers are for the DSB for each PF. + * Each PF has five segments: (the order inside each + * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; + * 144-147 attn prods; + */ + /* non-default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; + for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { + prod_offset = (sc->igu_base_sb + sb_idx) * + num_segs; + + for (i = 0; i < num_segs; i++) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(sc, addr, 0); + } + /* send consumer update with value 0 */ + bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); + } + + /* default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; + + if (CHIP_IS_MODE_4_PORT(sc)) + dsb_idx = SC_FUNC(sc); + else + dsb_idx = SC_VN(sc); + + prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_BASE_DSB_PROD + dsb_idx : + IGU_NORM_BASE_DSB_PROD + dsb_idx); + + /* + * igu prods come in chunks of E1HVN_MAX (4) - + * does not matters what is the current chip mode + */ + for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(sc, addr, 0); + } + /* send consumer update with 0 */ + if (CHIP_INT_MODE_IS_BC(sc)) { + bnx2x_ack_sb(sc, sc->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + CSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + XSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + TSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } else { + bnx2x_ack_sb(sc, sc->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } + bnx2x_igu_clear_sb(sc, sc->igu_dsb_id); + + /* !!! these should become driver const once + rf-tool supports split-68 const */ + REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); + REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); + REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); + } + } + + /* Reset PCIE errors for debug */ + REG_WR(sc, 0x2114, 0xffffffff); + REG_WR(sc, 0x2120, 0xffffffff); + + if (CHIP_IS_E1x(sc)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */ + main_mem_base = HC_REG_MAIN_MEMORY + + SC_PORT(sc) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(sc, main_mem_prty_clr); + if (val) { + PMD_DRV_LOG(DEBUG, sc, + "Parity errors in HC block during function init (0x%x)!", + val); + } + +/* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(sc, i, main_mem_width / 4); + bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), + i, main_mem_width / 4); + } +/* Clear HC parity attention */ + REG_RD(sc, main_mem_prty_clr); + } + + /* Enable STORMs SP logging */ + REG_WR8(sc, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + + elink_phy_probe(&sc->link_params); + + return 0; +} + +static void bnx2x_link_reset(struct bnx2x_softc *sc) +{ + if (!BNX2X_NOMCP(sc)) { + bnx2x_acquire_phy_lock(sc); + elink_lfa_reset(&sc->link_params, &sc->link_vars); + bnx2x_release_phy_lock(sc); + } else { + if (!CHIP_REV_IS_SLOW(sc)) { + PMD_DRV_LOG(WARNING, sc, + "Bootcode is missing - cannot reset link"); + } + } +} + +static void bnx2x_reset_port(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t val; + + /* reset physical Link */ + bnx2x_link_reset(sc); + + REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); + + /* Do not rcv packets to BRB */ + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + + /* Configure AEU */ + REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0); + + DELAY(100000); + + /* Check for BRB port occupancy */ + val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4); + if (val) { + PMD_DRV_LOG(DEBUG, sc, + "BRB1 is not empty, %d blocks are occupied", val); + } +} + +static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr) +{ + int reg; + uint32_t wb_write[2]; + + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8; + + wb_write[0] = ONCHIP_ADDR1(addr); + wb_write[1] = ONCHIP_ADDR2(addr); + REG_WR_DMAE(sc, reg, wb_write, 2); +} + +static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func) +{ + uint32_t i, base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) { + bnx2x_ilt_wr(sc, i, 0); + } +} + +static void bnx2x_reset_func(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int i; + + /* Disable the function in the FW */ + REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); + + /* FP SBs */ + FOR_EACH_ETH_QUEUE(sc, i) { + fp = &sc->fp[i]; + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); + } + + /* SP SB */ + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); + + for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { + REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), + 0); + } + + /* Configure IGU */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); + } else { + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + if (CNIC_LOADED(sc)) { +/* Disable Timer scan */ + REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0); +/* + * Wait for at least 10ms and up to 2 second for the timers + * scan to complete + */ + for (i = 0; i < 200; i++) { + DELAY(10000); + if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4)) + break; + } + } + + /* Clear ILT */ + bnx2x_clear_func_ilt(sc, func); + + /* + * Timers workaround bug for E2: if this is vnic-3, + * we need to set the entire ilt range for this timers. + */ + if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { + struct ilt_client_info ilt_cli; +/* use dummy TM client */ + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + ecore_ilt_boundary_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); + } + + /* this assumes that reset_port() called before reset_func() */ + if (!CHIP_IS_E1x(sc)) { + bnx2x_pf_disable(sc); + } + + sc->dmae_ready = 0; +} + +static void bnx2x_release_firmware(struct bnx2x_softc *sc) +{ + rte_free(sc->init_ops); + rte_free(sc->init_ops_offsets); + rte_free(sc->init_data); + rte_free(sc->iro_array); +} + +static int bnx2x_init_firmware(struct bnx2x_softc *sc) +{ + uint32_t len, i; + uint8_t *p = sc->firmware; + uint32_t off[24]; + + for (i = 0; i < 24; ++i) + off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i)); + + len = off[0]; + sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_ops) + goto alloc_failed; + bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len); + + len = off[2]; + sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_ops_offsets) + goto alloc_failed; + bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len); + + len = off[4]; + sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_data) + goto alloc_failed; + bnx2x_data_to_init_data(p + off[5], sc->init_data, len); + + sc->tsem_int_table_data = p + off[7]; + sc->tsem_pram_data = p + off[9]; + sc->usem_int_table_data = p + off[11]; + sc->usem_pram_data = p + off[13]; + sc->csem_int_table_data = p + off[15]; + sc->csem_pram_data = p + off[17]; + sc->xsem_int_table_data = p + off[19]; + sc->xsem_pram_data = p + off[21]; + + len = off[22]; + sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->iro_array) + goto alloc_failed; + bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len); + + return 0; + +alloc_failed: + bnx2x_release_firmware(sc); + return -1; +} + +static int cut_gzip_prefix(const uint8_t * zbuf, int len) +{ +#define MIN_PREFIX_SIZE (10) + + int n = MIN_PREFIX_SIZE; + uint16_t xlen; + + if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) || + len <= MIN_PREFIX_SIZE) { + return -1; + } + + /* optional extra fields are present */ + if (zbuf[3] & 0x4) { + xlen = zbuf[13]; + xlen <<= 8; + xlen += zbuf[12]; + + n += xlen; + } + /* file name is present */ + if (zbuf[3] & 0x8) { + while ((zbuf[n++] != 0) && (n < len)) ; + } + + return n; +} + +static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len) +{ + int ret; + int data_begin = cut_gzip_prefix(zbuf, len); + + PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len); + + if (data_begin <= 0) { + PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix"); + return -1; + } + + memset(&zlib_stream, 0, sizeof(zlib_stream)); + zlib_stream.next_in = zbuf + data_begin; + zlib_stream.avail_in = len - data_begin; + zlib_stream.next_out = sc->gz_buf; + zlib_stream.avail_out = FW_BUF_SIZE; + + ret = inflateInit2(&zlib_stream, -MAX_WBITS); + if (ret != Z_OK) { + PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error"); + return ret; + } + + ret = inflate(&zlib_stream, Z_FINISH); + if ((ret != Z_STREAM_END) && (ret != Z_OK)) { + PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret, + zlib_stream.msg); + } + + sc->gz_outlen = zlib_stream.total_out; + if (sc->gz_outlen & 0x3) { + PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d", + sc->gz_outlen); + } + sc->gz_outlen >>= 2; + + inflateEnd(&zlib_stream); + + if (ret == Z_STREAM_END) + return 0; + + return ret; +} + +static void +ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr, + uint32_t addr, uint32_t len) +{ + bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len); +} + +void +ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size, + uint32_t * data) +{ + uint8_t i; + for (i = 0; i < size / 4; i++) { + REG_WR(sc, addr + (i * 4), data[i]); + } +} + +static const char *get_ext_phy_type(uint32_t ext_phy_type) +{ + uint32_t phy_type_idx = ext_phy_type >> 8; + static const char *types[] = + { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073", + "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101", + "BNX2X-8727", + "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE" + }; + + if (phy_type_idx < 12) + return types[phy_type_idx]; + else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type) + return types[12]; + else + return types[13]; +} + +static const char *get_state(uint32_t state) +{ + uint32_t state_idx = state >> 12; + static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD", + "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT", + "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED" + }; + + if (state_idx <= 0xF) + return states[state_idx]; + else + return states[0x10]; +} + +static const char *get_recovery_state(uint32_t state) +{ + static const char *states[] = { "NONE", "DONE", "INIT", + "WAIT", "FAILED", "NIC_LOADING" + }; + return states[state]; +} + +static const char *get_rx_mode(uint32_t mode) +{ + static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI", + "PROMISC", "MAX_MULTICAST", "ERROR" + }; + + if (mode < 0x4) + return modes[mode]; + else if (BNX2X_MAX_MULTICAST == mode) + return modes[4]; + else + return modes[5]; +} + +#define BNX2X_INFO_STR_MAX 256 +static const char *get_bnx2x_flags(uint32_t flags) +{ + int i; + static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ", + "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ", + "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ", + "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING " + }; + static char flag_str[BNX2X_INFO_STR_MAX]; + memset(flag_str, 0, BNX2X_INFO_STR_MAX); + + for (i = 0; i < 5; i++) + if (flags & (1 << i)) { + strlcat(flag_str, flag[i], sizeof(flag_str)); + flags ^= (1 << i); + } + if (flags) { + static char unknown[BNX2X_INFO_STR_MAX]; + snprintf(unknown, 32, "Unknown flag mask %x", flags); + strlcat(flag_str, unknown, sizeof(flag_str)); + } + return flag_str; +} + +/* Prints useful adapter info. */ +void bnx2x_print_adapter_info(struct bnx2x_softc *sc) +{ + int i = 0; + + PMD_DRV_LOG(INFO, sc, "========================================"); + /* DPDK and Driver versions */ + PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK", + rte_version()); + PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver", + bnx2x_pmd_version()); + /* Firmware versions. */ + PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d", + "Firmware", + BNX2X_5710_FW_MAJOR_VERSION, + BNX2X_5710_FW_MINOR_VERSION, + BNX2X_5710_FW_REVISION_VERSION); + PMD_DRV_LOG(INFO, sc, "%12s : %s", + "Bootcode", sc->devinfo.bc_ver_str); + /* Hardware chip info. */ + PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id); + PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A', + (CHIP_METAL(sc) >> 4)); + /* Bus PCIe info. */ + PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Vendor Id", + sc->devinfo.vendor_id); + PMD_DRV_LOG(INFO, sc, "%12s : 0x%x", "Device Id", + sc->devinfo.device_id); + PMD_DRV_LOG(INFO, sc, "%12s : width x%d, ", "Bus PCIe", + sc->devinfo.pcie_link_width); + switch (sc->devinfo.pcie_link_speed) { + case 1: + PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps"); + break; + case 2: + PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps"); + break; + case 4: + PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps"); + break; + default: + PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed"); + } + /* Device features. */ + PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags"); + /* Miscellaneous flags. */ + if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) { + PMD_DRV_LOG(INFO, sc, "%18s", "MSI"); + i++; + } + if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) { + if (i > 0) + PMD_DRV_LOG(INFO, sc, "|"); + PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X"); + i++; + } + PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO")); + PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO")); + PMD_DRV_LOG(INFO, sc, "========================================"); +} + +/* Prints useful device info. */ +void bnx2x_print_device_info(struct bnx2x_softc *sc) +{ + __rte_unused uint32_t ext_phy_type; + uint32_t offset, reg_val; + + PMD_INIT_FUNC_TRACE(sc); + offset = offsetof(struct shmem_region, + dev_info.port_hw_config[0].external_phy_config); + reg_val = REG_RD(sc, sc->devinfo.shmem_base + offset); + if (sc->link_vars.phy_flags & PHY_XGXS_FLAG) + ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(reg_val); + else + ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(reg_val); + + /* Device features. */ + PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func); + PMD_DRV_LOG(INFO, sc, + "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags)); + PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is", + (sc->dmae_ready ? "Ready" : "Not Ready")); + PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu); + PMD_DRV_LOG(INFO, sc, + "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type)); + PMD_DRV_LOG(INFO, sc, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr", + sc->link_params.mac_addr[0], + sc->link_params.mac_addr[1], + sc->link_params.mac_addr[2], + sc->link_params.mac_addr[3], + sc->link_params.mac_addr[4], + sc->link_params.mac_addr[5]); + PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode)); + PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state)); + if (sc->recovery_state) + PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery", + get_recovery_state(sc->recovery_state)); + /* Queue info. */ + if (IS_PF(sc)) { + switch (sc->sp->rss_rdata.rss_mode) { + case ETH_RSS_MODE_DISABLED: + PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - None"); + break; + case ETH_RSS_MODE_REGULAR: + PMD_DRV_LOG(INFO, sc, "%12s : %s,", "Queues", "RSS mode - Regular"); + PMD_DRV_LOG(INFO, sc, "%16d", sc->num_queues); + break; + default: + PMD_DRV_LOG(INFO, sc, "%12s : %s", "Queues", "RSS mode - Unknown"); + break; + } + } + PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left", + sc->cq_spq_left, sc->eq_spq_left); + + PMD_DRV_LOG(INFO, sc, + "%12s : %x", "Switch", sc->link_params.switch_cfg); + PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d", + sc->pcie_bus, sc->pcie_device); + PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p", + sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); + PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d", + PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h new file mode 100644 index 000000000..3cadb5d82 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x.h @@ -0,0 +1,2101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __BNX2X_H__ +#define __BNX2X_H__ + +#include +#include +#include +#include + +#include "bnx2x_osal.h" +#include "bnx2x_ethdev.h" +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" +#include "bnx2x_stats.h" +#include "bnx2x_vfpf.h" + +#include "elink.h" + +#ifndef RTE_EXEC_ENV_FREEBSD +#include + +#define PCIY_PMG PCI_CAP_ID_PM +#define PCIY_MSI PCI_CAP_ID_MSI +#define PCIY_EXPRESS PCI_CAP_ID_EXP +#define PCIY_MSIX PCI_CAP_ID_MSIX +#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC +#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND +#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA +#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW +#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS +#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ +#define PCIR_POWER_STATUS PCI_PM_CTRL +#define PCIM_PSTAT_DMASK PCI_PM_CTRL_STATE_MASK +#define PCIM_PSTAT_PME PCI_PM_CTRL_PME_STATUS +#define PCIM_PSTAT_D3 0x3 +#define PCIM_PSTAT_PMEENABLE PCI_PM_CTRL_PME_ENABLE +#define PCIR_MSIX_CTRL PCI_MSIX_FLAGS +#define PCIM_MSIXCTRL_TABLE_SIZE PCI_MSIX_FLAGS_QSIZE +#else +#include +#endif + +#define IFM_10G_CX4 20 /* 10GBase CX4 copper */ +#define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */ +#define IFM_10G_T 26 /* 10GBase-T - RJ45 */ + +#ifndef RTE_EXEC_ENV_FREEBSD +#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC +#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND +#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA +#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW +#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS +#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ +#else +#define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA +#define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND +#define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA +#define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH +#define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED +#define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#endif +#ifndef roundup +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#endif +#ifndef ilog2 +static inline +int bnx2x_ilog2(int x) +{ + int log = 0; + x >>= 1; + + while(x) { + log++; + x >>= 1; + } + return log; +} +#define ilog2(x) bnx2x_ilog2(x) +#endif + +#define BNX2X_BC_VER 0x040200 + +#include "ecore_sp.h" + +struct bnx2x_device_type { + uint16_t bnx2x_vid; + uint16_t bnx2x_did; + uint16_t bnx2x_svid; + uint16_t bnx2x_sdid; + char *bnx2x_name; +}; + +#define BNX2X_PAGE_SHIFT 12 +#define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT) +#define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1)) +#define BNX2X_PAGE_ALIGN(addr) ((addr + BNX2X_PAGE_SIZE - 1) & BNX2X_PAGE_MASK) + +#if BNX2X_PAGE_SIZE != 4096 +#error Page sizes other than 4KB are unsupported! +#endif + +#define U64_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF)) +#define U64_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32)) +#define HILO_U64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo)) + +/* dropless fc FW/HW related params */ +#define BRB_SIZE(sc) (CHIP_IS_E3(sc) ? 1024 : 512) +#define MAX_AGG_QS(sc) ETH_MAX_AGGREGATION_QUEUES_E1H_E2 +#define FW_DROP_LEVEL(sc) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(sc)) +#define FW_PREFETCH_CNT 16U +#define DROPLESS_FC_HEADROOM 100 + +/* + * Transmit Buffer Descriptor (tx_bd) definitions* + */ +/* NUM_TX_PAGES must be a power of 2. */ +#define NUM_TX_PAGES 16 +#define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */ +#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */ + +#define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */ +#define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */ +#define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */ +#define MAX_TX_AVAIL (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES - 2) +#define NEXT_TX_BD(x) \ + ((((x) & USABLE_TX_BD_PER_PAGE) == \ + (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1) + +#define TX_BD(x, q) ((x) & MAX_TX_BD(q)) +#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8) +#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE) + +#define BDS_PER_TX_PKT (3) + +/* + * Trigger pending transmits when the number of available BDs is greater + * than 1/8 of the total number of usable BDs. + */ +#define BNX2X_TX_CLEANUP_THRESHOLD(q) (USABLE_TX_BD(q) / 8) +#define BNX2X_TX_TIMEOUT 5 + +/* + * Receive Buffer Descriptor (rx_bd) definitions* + */ +#define MAX_RX_PAGES 8 +#define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */ +#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */ +#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */ +#define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */ +#define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */ +#define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */ +#define MAX_RX_AVAIL (USABLE_RX_BD_PER_PAGE * MAX_RX_PAGES - 2) +#define RX_BD_NEXT_PAGE_DESC_CNT 2 + +#define NEXT_RX_BD(x) \ + ((((x) & RX_BD_PER_PAGE_MASK) == \ + (USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 3 : (x) + 1) + +/* x & 0x3ff */ +#define RX_BD(x, q) ((x) & MAX_RX_BD(q)) +#define RX_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9) +#define RX_IDX(x) ((x) & RX_BD_PER_PAGE_MASK) + +/* + * Receive Completion Queue definitions* + */ +//#define NUM_RCQ_PAGES (NUM_RX_PAGES * 4) +#define TOTAL_RCQ_ENTRIES_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_rx_cqe)) /* 128 */ +#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1) /* 127 */ +#define TOTAL_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 512 */ +#define USABLE_RCQ_ENTRIES(q) (USABLE_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 508 */ +#define MAX_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES(q) - 1) /* 511 */ +#define RCQ_NEXT_PAGE_DESC_CNT 1 + +#define NEXT_RCQ_IDX(x) \ + ((((x) & USABLE_RCQ_ENTRIES_PER_PAGE) == \ + (USABLE_RCQ_ENTRIES_PER_PAGE - 1)) ? (x) + 2 : (x) + 1) + +#define CQE_BD_REL \ + (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) + +#define RCQ_BD_PAGES(q) \ + (q->nb_rx_pages * CQE_BD_REL) + +#define RCQ_ENTRY(x, q) ((x) & MAX_RCQ_ENTRIES(q)) +#define RCQ_PAGE(x) (((x) & ~USABLE_RCQ_ENTRIES_PER_PAGE) >> 7) +#define RCQ_IDX(x) ((x) & USABLE_RCQ_ENTRIES_PER_PAGE) + +/* + * dropless fc calculations for BDs + * Number of BDs should be as number of buffers in BRB: + * Low threshold takes into account RX_BD_NEXT_PAGE_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ(sc) \ + BRB_SIZE(sc) +#define NUM_BD_PG_REQ(sc) \ + ((NUM_BD_REQ(sc) + USABLE_RX_BD_PER_PAGE - 1) / USABLE_RX_BD_PER_PAGE) +#define BD_TH_LO(sc) \ + (NUM_BD_REQ(sc) + \ + NUM_BD_PG_REQ(sc) * RX_BD_NEXT_PAGE_DESC_CNT + \ + FW_DROP_LEVEL(sc)) +#define BD_TH_HI(sc) \ + (BD_TH_LO(sc) + DROPLESS_FC_HEADROOM) +#define MIN_RX_AVAIL(sc) \ + ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128) + +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_NONTPA (RTE_MAX((uint32_t)MIN_RX_SIZE_NONTPA_HW,\ + (uint32_t)MIN_RX_AVAIL(sc))) + +/* + * dropless fc calculations for RCQs + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account RCQ_NEXT_PAGE_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ(sc) \ + BRB_SIZE(sc) +#define NUM_RCQ_PG_REQ(sc) \ + ((NUM_RCQ_REQ(sc) + USABLE_RCQ_ENTRIES_PER_PAGE - 1) / USABLE_RCQ_ENTRIES_PER_PAGE) +#define RCQ_TH_LO(sc) \ + (NUM_RCQ_REQ(sc) + \ + NUM_RCQ_PG_REQ(sc) * RCQ_NEXT_PAGE_DESC_CNT + \ + FW_DROP_LEVEL(sc)) +#define RCQ_TH_HI(sc) \ + (RCQ_TH_LO(sc) + DROPLESS_FC_HEADROOM) + +/* Load / Unload modes */ +#define LOAD_NORMAL 0 +#define LOAD_OPEN 1 +#define LOAD_DIAG 2 +#define LOAD_LOOPBACK_EXT 3 +#define UNLOAD_NORMAL 0 +#define UNLOAD_CLOSE 1 +#define UNLOAD_RECOVERY 2 + +/* Some constants... */ +//#define MAX_PATH_NUM 2 +//#define E2_MAX_NUM_OF_VFS 64 +//#define E1H_FUNC_MAX 8 +//#define E2_FUNC_MAX 4 /* per path */ +#define MAX_VNIC_NUM 4 +#define MAX_FUNC_NUM 8 /* common to all chips */ +//#define MAX_NDSB HC_SB_MAX_SB_E2 /* max non-default status block */ +#define MAX_RSS_CHAINS 16 /* a constant for HW limit */ +#define MAX_MSI_VECTOR 8 /* a constant for HW limit */ + +#define ILT_NUM_PAGE_ENTRIES 3072 +/* + * 57711 we use whole table since we have 8 functions. + * 57712 we have only 4 functions, but use same size per func, so only half + * of the table is used. + */ +#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES / 8) +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) + +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_HLEN 14 +#define ETH_OVERHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE ETHERMTU /* 1500 */ +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +/* TCP with Timestamp Option (32) + IPv6 (40) */ + +/* max supported alignment is 256 (8 shift) */ +#define BNX2X_RX_ALIGN_SHIFT RTE_MAX(6, min(8, RTE_CACHE_LINE_SIZE_LOG2)) + +#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + +struct bnx2x_bar { + void *base_addr; +}; + +/* Used to manage DMA allocations. */ +struct bnx2x_dma { + struct bnx2x_softc *sc; + rte_iova_t paddr; + void *vaddr; + int nseg; + const void *mzone; + char msg[RTE_MEMZONE_NAMESIZE - 6]; +}; + +/* attn group wiring */ +#define MAX_DYNAMIC_ATTN_GRPS 8 + +struct attn_route { + uint32_t sig[5]; +}; + +struct iro { + uint32_t base; + uint16_t m1; + uint16_t m2; + uint16_t m3; + uint16_t size; +}; + +union bnx2x_host_hc_status_block { + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; +}; + +union bnx2x_db_prod { + struct doorbell_set_prod data; + uint32_t raw; +}; + +struct bnx2x_sw_tx_bd { + struct mbuf *m; + uint16_t first_bd; + uint8_t flags; +/* set on the first BD descriptor when there is a split BD */ +#define BNX2X_TSO_SPLIT_BD (1 << 0) +}; + +/* + * This is the HSI fastpath data structure. There can be up to MAX_RSS_CHAIN + * instances of the fastpath structure when using multiple queues. + */ +struct bnx2x_fastpath { + /* pointer back to parent structure */ + struct bnx2x_softc *sc; + + /* Used to synchronize fastpath Rx access */ + rte_spinlock_t rx_mtx; + + /* status block */ + struct bnx2x_dma sb_dma; + union bnx2x_host_hc_status_block status_block; + + rte_iova_t tx_desc_mapping; + + rte_iova_t rx_desc_mapping; + rte_iova_t rx_comp_mapping; + + uint16_t *sb_index_values; + uint16_t *sb_running_index; + uint32_t ustorm_rx_prods_offset; + + uint8_t igu_sb_id; /* status block number in HW */ + uint8_t fw_sb_id; /* status block number in FW */ + + uint32_t rx_buf_size; + + int state; +#define BNX2X_FP_STATE_CLOSED 0x01 +#define BNX2X_FP_STATE_IRQ 0x02 +#define BNX2X_FP_STATE_OPENING 0x04 +#define BNX2X_FP_STATE_OPEN 0x08 +#define BNX2X_FP_STATE_HALTING 0x10 +#define BNX2X_FP_STATE_HALTED 0x20 + + /* reference back to this fastpath queue number */ + uint8_t index; /* this is also the 'cid' */ +#define FP_IDX(fp) (fp->index) + + /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */ + uint8_t cl_id; +#define FP_CL_ID(fp) (fp->cl_id) + uint8_t cl_qzone_id; + + uint16_t fp_hc_idx; + + union bnx2x_db_prod tx_db; + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; + + /* Pointer to the receive consumer in the status block */ + uint16_t *rx_cq_cons_sb; + + /* Pointer to the transmit consumer in the status block */ + uint16_t *tx_cons_sb; + + /* transmit timeout until chip reset */ + int watchdog_timer; + +}; /* struct bnx2x_fastpath */ + +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_ID_INVALID 0xFF + +/* maximum number of fast-path interrupt contexts */ +#define FP_SB_MAX_E1x 16 +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + +union cdu_context { + struct eth_context eth; + char pad[1024]; +}; + +/* CDU host DB constants */ +#define CDU_ILT_PAGE_SZ_HW 2 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + +#define CNIC_ISCSI_CID_MAX 256 +#define CNIC_FCOE_CID_MAX 2048 +#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) +#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) + +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ +#define QM_CID_ROUND 1024 + +/* TM (timers) host DB constants */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ +/*#define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ +#define TM_CONN_NUM 1024 +#define TM_ILT_SZ (8 * TM_CONN_NUM) +#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + +/* SRC (Searcher) host DB constants */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ +#define SRC_HASH_BITS 10 +#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ +#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) +#define SRC_T2_SZ SRC_ILT_SZ +#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + +struct hw_context { + struct bnx2x_dma vcxt_dma; + union cdu_context *vcxt; + //rte_iova_t cxt_mapping; + size_t size; +}; + +#define SM_RX_ID 0 +#define SM_TX_ID 1 + +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +#define CID_TO_FP(cid, sc) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(sc)) + +#define HC_INDEX_ETH_RX_CQ_CONS 1 +#define HC_INDEX_OOO_TX_CQ_CONS 4 +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + +/* congestion management fairness mode */ +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 + +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 + +#define HC_SEG_ACCESS_DEF 0 /* Driver decision 0-3 */ +#define HC_SEG_ACCESS_ATTN 4 +#define HC_SEG_ACCESS_NORM 0 /* Driver decision 0-1 */ + +/* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * + * So this is quite simple for now as no ULPs are supported yet. :-) + */ +#define BNX2X_NUM_QUEUES(sc) ((sc)->num_queues) +#define BNX2X_NUM_ETH_QUEUES(sc) BNX2X_NUM_QUEUES(sc) +#define BNX2X_NUM_NON_CNIC_QUEUES(sc) BNX2X_NUM_QUEUES(sc) +#define BNX2X_NUM_RX_QUEUES(sc) BNX2X_NUM_QUEUES(sc) + +#define FOR_EACH_QUEUE(sc, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(sc); (var)++) + +#define FOR_EACH_NONDEFAULT_QUEUE(sc, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(sc); (var)++) + +#define FOR_EACH_ETH_QUEUE(sc, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++) + +#define FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++) + +#define FOR_EACH_COS_IN_TX_QUEUE(sc, var) \ + for ((var) = 0; (var) < (sc)->max_cos; (var)++) + +#define FOR_EACH_CNIC_QUEUE(sc, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(sc); \ + (var) < BNX2X_NUM_QUEUES(sc); \ + (var)++) + +enum { + OOO_IDX_OFFSET, + FCOE_IDX_OFFSET, + FWD_IDX_OFFSET, +}; + +#define FCOE_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FCOE_IDX_OFFSET) +#define bnx2x_fcoe_fp(sc) (&sc->fp[FCOE_IDX(sc)]) +#define bnx2x_fcoe(sc, var) (bnx2x_fcoe_fp(sc)->var) +#define bnx2x_fcoe_inner_sp_obj(sc) (&sc->sp_objs[FCOE_IDX(sc)]) +#define bnx2x_fcoe_sp_obj(sc, var) (bnx2x_fcoe_inner_sp_obj(sc)->var) +#define bnx2x_fcoe_tx(sc, var) (bnx2x_fcoe_fp(sc)->txdata_ptr[FIRST_TX_COS_INDEX]->var) + +#define OOO_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + OOO_IDX_OFFSET) +#define bnx2x_ooo_fp(sc) (&sc->fp[OOO_IDX(sc)]) +#define bnx2x_ooo(sc, var) (bnx2x_ooo_fp(sc)->var) +#define bnx2x_ooo_inner_sp_obj(sc) (&sc->sp_objs[OOO_IDX(sc)]) +#define bnx2x_ooo_sp_obj(sc, var) (bnx2x_ooo_inner_sp_obj(sc)->var) + +#define FWD_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FWD_IDX_OFFSET) +#define bnx2x_fwd_fp(sc) (&sc->fp[FWD_IDX(sc)]) +#define bnx2x_fwd(sc, var) (bnx2x_fwd_fp(sc)->var) +#define bnx2x_fwd_inner_sp_obj(sc) (&sc->sp_objs[FWD_IDX(sc)]) +#define bnx2x_fwd_sp_obj(sc, var) (bnx2x_fwd_inner_sp_obj(sc)->var) +#define bnx2x_fwd_txdata(fp) (fp->txdata_ptr[FIRST_TX_COS_INDEX]) + +#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->sc)) +#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->sc)) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(sc)) +#define IS_FWD_FP(fp) ((fp)->index == FWD_IDX((fp)->sc)) +#define IS_FWD_IDX(idx) ((idx) == FWD_IDX(sc)) +#define IS_OOO_FP(fp) ((fp)->index == OOO_IDX((fp)->sc)) +#define IS_OOO_IDX(idx) ((idx) == OOO_IDX(sc)) + +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FCOE_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[FP_SB_MAX_E1x + + BNX2X_FIRST_QUEUE_QUERY_IDX]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; +}; + +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ +#define BNX2X_IGU_STAS_MSG_VF_CNT 64 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4 + +#define MAX_DMAE_C 8 + +/* + * This is the slowpath data structure. It is mapped into non-paged memory + * so that the hardware can access it's contents directly and must be page + * aligned. + */ +struct bnx2x_slowpath { + + /* used by the DMAE command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + /* statistics completion */ + uint32_t stats_comp; + + /* firmware defined statistics blocks */ + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + + /* DMAE completion value and data source/sink */ + uint32_t wb_comp; + uint32_t wb_data[4]; + + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + union { + struct function_start_data func_start; + struct flow_control_configuration pfc_config; /* for DCBX ramrod */ + } func_rdata; + + /* Queue State related ramrods */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + /* + * AFEX ramrod can not be a part of func_rdata union because these + * events might arrive in parallel to other events from func_rdata. + * If they were defined in the same union the data can get corrupted. + */ + struct afex_vif_list_ramrod_data func_afex_rdata; + + union drv_info_to_mcp drv_info_to_mcp; +}; /* struct bnx2x_slowpath */ + +/* + * Port specifc data structure. + */ +struct bnx2x_port { + /* + * Port Management Function (for 57711E only). + * When this field is set the driver instance is + * responsible for managing port specifc + * configurations such as handling link attentions. + */ + uint32_t pmf; + + /* Ethernet maximum transmission unit. */ + uint16_t ether_mtu; + + uint32_t link_config[ELINK_LINK_CONFIG_SIZE]; + + uint32_t ext_phy_config; + + /* Port feature config.*/ + uint32_t config; + + /* Defines the features supported by the PHY. */ + uint32_t supported[ELINK_LINK_CONFIG_SIZE]; + + /* Defines the features advertised by the PHY. */ + uint32_t advertising[ELINK_LINK_CONFIG_SIZE]; +#define ADVERTISED_10baseT_Half (1 << 1) +#define ADVERTISED_10baseT_Full (1 << 2) +#define ADVERTISED_100baseT_Half (1 << 3) +#define ADVERTISED_100baseT_Full (1 << 4) +#define ADVERTISED_1000baseT_Half (1 << 5) +#define ADVERTISED_1000baseT_Full (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_FIBRE (1 << 8) +#define ADVERTISED_Autoneg (1 << 9) +#define ADVERTISED_Asym_Pause (1 << 10) +#define ADVERTISED_Pause (1 << 11) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_10000baseT_Full (1 << 16) + + uint32_t phy_addr; + + /* Used to synchronize phy accesses. */ + rte_spinlock_t phy_mtx; + char phy_mtx_name[32]; + +#define BNX2X_PHY_LOCK(sc) rte_spinlock_lock(&sc->port.phy_mtx) +#define BNX2X_PHY_UNLOCK(sc) rte_spinlock_unlock(&sc->port.phy_mtx) + + /* + * MCP scratchpad address for port specific statistics. + * The device is responsible for writing statistcss + * back to the MCP for use with management firmware such + * as UMP/NC-SI. + */ + uint32_t port_stx; + + struct nig_stats old_nig_stats; +}; /* struct bnx2x_port */ + +struct bnx2x_mf_info { + uint32_t mf_config[E1HVN_MAX]; + + uint32_t vnics_per_port; /* 1, 2 or 4 */ + uint32_t multi_vnics_mode; /* can be set even if vnics_per_port = 1 */ + uint32_t path_has_ovlan; /* MF mode in the path (can be different than the MF mode of the function */ + +#define IS_MULTI_VNIC(sc) ((sc)->devinfo.mf_info.multi_vnics_mode) +#define VNICS_PER_PORT(sc) ((sc)->devinfo.mf_info.vnics_per_port) +#define VNICS_PER_PATH(sc) \ + ((sc)->devinfo.mf_info.vnics_per_port * \ + ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 1 )) + + uint8_t min_bw[MAX_VNIC_NUM]; + uint8_t max_bw[MAX_VNIC_NUM]; + + uint16_t ext_id; /* vnic outer vlan or VIF ID */ +#define VALID_OVLAN(ovlan) ((ovlan) <= 4096) +#define INVALID_VIF_ID 0xFFFF +#define OVLAN(sc) ((sc)->devinfo.mf_info.ext_id) +#define VIF_ID(sc) ((sc)->devinfo.mf_info.ext_id) + + uint16_t default_vlan; +#define NIV_DEFAULT_VLAN(sc) ((sc)->devinfo.mf_info.default_vlan) + + uint8_t niv_allowed_priorities; +#define NIV_ALLOWED_PRIORITIES(sc) ((sc)->devinfo.mf_info.niv_allowed_priorities) + + uint8_t niv_default_cos; +#define NIV_DEFAULT_COS(sc) ((sc)->devinfo.mf_info.niv_default_cos) + + uint8_t niv_mba_enabled; + + enum mf_cfg_afex_vlan_mode afex_vlan_mode; +#define AFEX_VLAN_MODE(sc) ((sc)->devinfo.mf_info.afex_vlan_mode) + int afex_def_vlan_tag; + uint32_t pending_max; + + uint16_t flags; +#define MF_INFO_VALID_MAC 0x0001 + + uint16_t mf_ov; + uint8_t mf_mode; /* Switch-Dependent or Switch-Independent */ +#define IS_MF(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode != 0)) +#define IS_MF_SD(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)) +#define IS_MF_SI(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)) +#define IS_MF_AFEX(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX)) +#define IS_MF_SD_MODE(sc) IS_MF_SD(sc) +#define IS_MF_SI_MODE(sc) IS_MF_SI(sc) +#define IS_MF_AFEX_MODE(sc) IS_MF_AFEX(sc) + + uint32_t mf_protos_supported; + #define MF_PROTO_SUPPORT_ETHERNET 0x1 + #define MF_PROTO_SUPPORT_ISCSI 0x2 + #define MF_PROTO_SUPPORT_FCOE 0x4 +}; /* struct bnx2x_mf_info */ + +/* Device information data structure. */ +struct bnx2x_devinfo { +#if 1 +#define NAME_SIZE 128 + char name[NAME_SIZE]; +#endif + /* PCIe info */ + uint16_t vendor_id; + uint16_t device_id; + uint16_t subvendor_id; + uint16_t subdevice_id; + + /* + * chip_id = 0b'CCCCCCCCCCCCCCCCRRRRMMMMMMMMBBBB' + * C = Chip Number (bits 16-31) + * R = Chip Revision (bits 12-15) + * M = Chip Metal (bits 4-11) + * B = Chip Bond ID (bits 0-3) + */ + uint32_t chip_id; +#define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000) +#define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16) +/* device ids */ +#define CHIP_NUM_57710 0x164e +#define CHIP_NUM_57711 0x164f +#define CHIP_NUM_57711E 0x1650 +#define CHIP_NUM_57712 0x1662 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57712_VF 0x166f +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57800_VF 0x16a9 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57810_VF 0x16af +#define CHIP_NUM_57811 0x163d +#define CHIP_NUM_57811_MF 0x163e +#define CHIP_NUM_57811_VF 0x163f +#define CHIP_NUM_57840_OBS 0x168d +#define CHIP_NUM_57840_OBS_MF 0x16ab +#define CHIP_NUM_57840_4_10 0x16a1 +#define CHIP_NUM_57840_2_20 0x16a2 +#define CHIP_NUM_57840_MF 0x16a4 +#define CHIP_NUM_57840_VF 0x16ad + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV(sc) ((sc)->devinfo.chip_id & CHIP_REV_MASK) + +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) +#define CHIP_REV_Cx (0x2 << CHIP_REV_SHIFT) + +#define CHIP_REV_IS_SLOW(sc) \ + (CHIP_REV(sc) > 0x00005000) +#define CHIP_REV_IS_FPGA(sc) \ + (CHIP_REV_IS_SLOW(sc) && (CHIP_REV(sc) & 0x00001000)) +#define CHIP_REV_IS_EMUL(sc) \ + (CHIP_REV_IS_SLOW(sc) && !(CHIP_REV(sc) & 0x00001000)) +#define CHIP_REV_IS_ASIC(sc) \ + (!CHIP_REV_IS_SLOW(sc)) + +#define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0) +#define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f) + +#define CHIP_IS_E1(sc) (CHIP_NUM(sc) == CHIP_NUM_57710) +#define CHIP_IS_57710(sc) (CHIP_NUM(sc) == CHIP_NUM_57710) +#define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711) +#define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E) +#define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \ + (CHIP_IS_57711E(sc))) +#define CHIP_IS_E1x(sc) CHIP_IS_E1H(sc) + +#define CHIP_IS_57712(sc) (CHIP_NUM(sc) == CHIP_NUM_57712) +#define CHIP_IS_57712_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_MF) +#define CHIP_IS_57712_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_VF) +#define CHIP_IS_E2(sc) (CHIP_IS_57712(sc) || \ + CHIP_IS_57712_MF(sc)) + +#define CHIP_IS_57800(sc) (CHIP_NUM(sc) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_MF) +#define CHIP_IS_57800_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_VF) +#define CHIP_IS_57810(sc) (CHIP_NUM(sc) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_MF) +#define CHIP_IS_57810_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_VF) +#define CHIP_IS_57811(sc) (CHIP_NUM(sc) == CHIP_NUM_57811) +#define CHIP_IS_57811_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_MF) +#define CHIP_IS_57811_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_VF) +#define CHIP_IS_57840(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_2_20)) +#define CHIP_IS_57840_MF(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS_MF) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_MF)) +#define CHIP_IS_57840_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57840_VF) + +#define CHIP_IS_E3(sc) (CHIP_IS_57800(sc) || \ + CHIP_IS_57800_MF(sc) || \ + CHIP_IS_57800_VF(sc) || \ + CHIP_IS_57810(sc) || \ + CHIP_IS_57810_MF(sc) || \ + CHIP_IS_57810_VF(sc) || \ + CHIP_IS_57811(sc) || \ + CHIP_IS_57811_MF(sc) || \ + CHIP_IS_57811_VF(sc) || \ + CHIP_IS_57840(sc) || \ + CHIP_IS_57840_MF(sc) || \ + CHIP_IS_57840_VF(sc)) +#define CHIP_IS_E3A0(sc) (CHIP_IS_E3(sc) && \ + (CHIP_REV(sc) == CHIP_REV_Ax)) +#define CHIP_IS_E3B0(sc) (CHIP_IS_E3(sc) && \ + (CHIP_REV(sc) == CHIP_REV_Bx)) + +#define USES_WARPCORE(sc) (CHIP_IS_E3(sc)) +#define CHIP_IS_E2E3(sc) (CHIP_IS_E2(sc) || \ + CHIP_IS_E3(sc)) + +#define CHIP_IS_MF_CAP(sc) (CHIP_IS_57711E(sc) || \ + CHIP_IS_57712_MF(sc) || \ + CHIP_IS_E3(sc)) + +#define IS_VF(sc) ((sc)->flags & BNX2X_IS_VF_FLAG) +#define IS_PF(sc) (!IS_VF(sc)) + +/* + * This define is used in two main places: + * 1. In the early stages of nic_load, to know if to configure Parser/Searcher + * to nic-only mode or to offload mode. Offload mode is configured if either + * the chip is E1x (where NIC_MODE register is not applicable), or if cnic + * already registered for this port (which means that the user wants storage + * services). + * 2. During cnic-related load, to know if offload mode is already configured + * in the HW or needs to be configrued. Since the transition from nic-mode to + * offload-mode in HW causes traffic coruption, nic-mode is configured only + * in ports on which storage services where never requested. + */ +#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc)) + + uint8_t chip_port_mode; +#define CHIP_4_PORT_MODE 0x0 +#define CHIP_2_PORT_MODE 0x1 +#define CHIP_PORT_MODE_NONE 0x2 +#define CHIP_PORT_MODE(sc) ((sc)->devinfo.chip_port_mode) +#define CHIP_IS_MODE_4_PORT(sc) (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) + + uint8_t int_block; +#define INT_BLOCK_HC 0 +#define INT_BLOCK_IGU 1 +#define INT_BLOCK_MODE_NORMAL 0 +#define INT_BLOCK_MODE_BW_COMP 2 +#define CHIP_INT_MODE_IS_NBC(sc) \ + (!CHIP_IS_E1x(sc) && \ + !((sc)->devinfo.int_block & INT_BLOCK_MODE_BW_COMP)) +#define CHIP_INT_MODE_IS_BC(sc) (!CHIP_INT_MODE_IS_NBC(sc)) + + uint32_t shmem_base; + uint32_t shmem2_base; + uint32_t bc_ver; + char bc_ver_str[32]; + uint32_t mf_cfg_base; /* bootcode shmem address in BAR memory */ + struct bnx2x_mf_info mf_info; + + uint32_t flash_size; +#define NVRAM_1MB_SIZE 0x20000 +#define NVRAM_TIMEOUT_COUNT 30000 +#define NVRAM_PAGE_SIZE 256 + + /* PCIe capability information */ + uint32_t pcie_cap_flags; +#define BNX2X_PM_CAPABLE_FLAG 0x00000001 +#define BNX2X_PCIE_CAPABLE_FLAG 0x00000002 +#define BNX2X_MSI_CAPABLE_FLAG 0x00000004 +#define BNX2X_MSIX_CAPABLE_FLAG 0x00000008 + uint16_t pcie_pm_cap_reg; + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + uint16_t pcie_msi_cap_reg; + uint16_t pcie_msix_cap_reg; + + /* device configuration read from bootcode shared memory */ + uint32_t hw_config; + uint32_t hw_config2; +}; /* struct bnx2x_devinfo */ + +struct bnx2x_sp_objs { + struct ecore_vlan_mac_obj mac_obj; /* MACs object */ + struct ecore_queue_sp_obj q_obj; /* Queue State object */ +}; /* struct bnx2x_sp_objs */ + +/* + * Data that will be used to create a link report message. We will keep the + * data used for the last link report in order to prevent reporting the same + * link parameters twice. + */ +struct bnx2x_link_report_data { + uint16_t line_speed; /* Effective line speed */ + unsigned long link_report_flags; /* BNX2X_LINK_REPORT_XXX flags */ +}; + +enum { + BNX2X_LINK_REPORT_FULL_DUPLEX, + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON +}; + +#define BNX2X_RX_CHAIN_PAGE_SZ BNX2X_PAGE_SIZE + +struct bnx2x_pci_cap { + struct bnx2x_pci_cap *next; + uint16_t id; + uint16_t type; + uint16_t addr; +}; + +struct ecore_ilt; + +struct bnx2x_vfdb; + +/* Top level device private data structure. */ +struct bnx2x_softc { + + void **rx_queues; + void **tx_queues; + uint32_t max_tx_queues; + uint32_t max_rx_queues; + const struct rte_pci_device *pci_dev; + uint32_t pci_val; + struct bnx2x_pci_cap *pci_caps; +#define BNX2X_INTRS_POLL_PERIOD 1 + + void *firmware; + uint64_t fw_len; + + /* MAC address operations */ + struct bnx2x_mac_ops mac_ops; + + /* structures for VF mbox/response/bulletin */ + struct bnx2x_vf_mbx_msg *vf2pf_mbox; + struct bnx2x_dma vf2pf_mbox_mapping; + struct vf_acquire_resp_tlv acquire_resp; + struct bnx2x_vf_bulletin *pf2vf_bulletin; + struct bnx2x_dma pf2vf_bulletin_mapping; + struct bnx2x_vf_bulletin old_bulletin; + rte_spinlock_t vf2pf_lock; + + int media; + + int state; /* device state */ +#define BNX2X_STATE_CLOSED 0x0000 +#define BNX2X_STATE_OPENING_WAITING_LOAD 0x1000 +#define BNX2X_STATE_OPENING_WAITING_PORT 0x2000 +#define BNX2X_STATE_OPEN 0x3000 +#define BNX2X_STATE_CLOSING_WAITING_HALT 0x4000 +#define BNX2X_STATE_CLOSING_WAITING_DELETE 0x5000 +#define BNX2X_STATE_CLOSING_WAITING_UNLOAD 0x6000 +#define BNX2X_STATE_DISABLED 0xD000 +#define BNX2X_STATE_DIAG 0xE000 +#define BNX2X_STATE_ERROR 0xF000 + + int flags; +#define BNX2X_ONE_PORT_FLAG 0x1 +#define BNX2X_NO_FCOE_FLAG 0x2 +#define BNX2X_NO_WOL_FLAG 0x4 +#define BNX2X_NO_MCP_FLAG 0x8 +#define BNX2X_NO_ISCSI_OOO_FLAG 0x10 +#define BNX2X_NO_ISCSI_FLAG 0x20 +#define BNX2X_MF_FUNC_DIS 0x40 +#define BNX2X_TX_SWITCHING 0x80 +#define BNX2X_IS_VF_FLAG 0x100 + +#define BNX2X_ONE_PORT(sc) (sc->flags & BNX2X_ONE_PORT_FLAG) +#define BNX2X_NOFCOE(sc) (sc->flags & BNX2X_NO_FCOE_FLAG) +#define BNX2X_NOMCP(sc) (sc->flags & BNX2X_NO_MCP_FLAG) + +#define MAX_BARS 5 + struct bnx2x_bar bar[MAX_BARS]; /* map BARs 0, 2, 4 */ + + uint16_t doorbell_size; + + /* periodic timer callout */ +#define PERIODIC_STOP 0 +#define PERIODIC_GO 1 + volatile unsigned long periodic_flags; + rte_atomic32_t scan_fp; + struct bnx2x_fastpath fp[MAX_RSS_CHAINS]; + struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS]; + + uint8_t unit; /* driver instance number */ + + int pcie_bus; /* PCIe bus number */ + int pcie_device; /* PCIe device/slot number */ + int pcie_func; /* PCIe function number */ + + uint8_t pfunc_rel; /* function relative */ + uint8_t pfunc_abs; /* function absolute */ + uint8_t path_id; /* function absolute */ +#define SC_PATH(sc) (sc->path_id) +#define SC_PORT(sc) (sc->pfunc_rel & 1) +#define SC_FUNC(sc) (sc->pfunc_rel) +#define SC_ABS_FUNC(sc) (sc->pfunc_abs) +#define SC_VN(sc) (sc->pfunc_rel >> 1) +#define SC_L_ID(sc) (SC_VN(sc) << 2) +#define PORT_ID(sc) SC_PORT(sc) +#define PATH_ID(sc) SC_PATH(sc) +#define VNIC_ID(sc) SC_VN(sc) +#define FUNC_ID(sc) SC_FUNC(sc) +#define ABS_FUNC_ID(sc) SC_ABS_FUNC(sc) +#define SC_FW_MB_IDX_VN(sc, vn) \ + (SC_PORT(sc) + (vn) * \ + ((CHIP_IS_E1x(sc) || (CHIP_IS_MODE_4_PORT(sc))) ? 2 : 1)) +#define SC_FW_MB_IDX(sc) SC_FW_MB_IDX_VN(sc, SC_VN(sc)) + + int if_capen; /* enabled interface capabilities */ + + struct bnx2x_devinfo devinfo; + char fw_ver_str[32]; + char mf_mode_str[32]; + char pci_link_str[32]; + + struct iro *iro_array; + + int dmae_ready; +#define DMAE_READY(sc) (sc->dmae_ready) + + struct ecore_credit_pool_obj vlans_pool; + struct ecore_credit_pool_obj macs_pool; + struct ecore_rx_mode_obj rx_mode_obj; + struct ecore_mcast_obj mcast_obj; + struct ecore_rss_config_obj rss_conf_obj; + struct ecore_func_sp_obj func_obj; + + uint16_t fw_seq; + uint16_t fw_drv_pulse_wr_seq; + uint32_t func_stx; + + struct elink_params link_params; + struct elink_vars link_vars; + uint32_t link_cnt; + struct bnx2x_link_report_data last_reported_link; + char mac_addr_str[32]; + + uint32_t tx_ring_size; + uint32_t rx_ring_size; + int wol; + + int is_leader; + int recovery_state; +#define BNX2X_RECOVERY_DONE 1 +#define BNX2X_RECOVERY_INIT 2 +#define BNX2X_RECOVERY_WAIT 3 +#define BNX2X_RECOVERY_FAILED 4 +#define BNX2X_RECOVERY_NIC_LOADING 5 + + uint32_t rx_mode; +#define BNX2X_RX_MODE_NONE 0 +#define BNX2X_RX_MODE_NORMAL 1 +#define BNX2X_RX_MODE_ALLMULTI 2 +#define BNX2X_RX_MODE_ALLMULTI_PROMISC 3 +#define BNX2X_RX_MODE_PROMISC 4 +#define BNX2X_MAX_MULTICAST 64 + + struct bnx2x_port port; + + struct cmng_init cmng; + + /* user configs */ + uint8_t num_queues; + int hc_rx_ticks; + int hc_tx_ticks; + uint32_t rx_budget; + int interrupt_mode; +#define INTR_MODE_INTX 0 +#define INTR_MODE_MSI 1 +#define INTR_MODE_MSIX 2 +#define INTR_MODE_SINGLE_MSIX 3 + int udp_rss; + + uint8_t igu_dsb_id; + uint8_t igu_base_sb; + uint8_t igu_sb_cnt; + uint32_t igu_base_addr; + uint8_t base_fw_ndsb; +#define DEF_SB_IGU_ID 16 +#define DEF_SB_ID HC_SP_SB_ID + + /* default status block */ + struct bnx2x_dma def_sb_dma; + struct host_sp_status_block *def_sb; + uint16_t def_idx; + uint16_t def_att_idx; + uint32_t attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* general SP events - stats query, cfc delete, etc */ +#define HC_SP_INDEX_ETH_DEF_CONS 3 + /* EQ completions */ +#define HC_SP_INDEX_EQ_CONS 7 + /* FCoE L2 connection completions */ +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 + /* iSCSI L2 */ +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + + /* event queue */ + struct bnx2x_dma eq_dma; + union event_ring_elem *eq; + uint16_t eq_prod; + uint16_t eq_cons; + uint16_t *eq_cons_sb; +#define NUM_EQ_PAGES 1 /* must be a power of 2 */ +#define EQ_DESC_CNT_PAGE (BNX2X_PAGE_SIZE / sizeof(union event_ring_elem)) +#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) +#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) +#define EQ_DESC_MASK (NUM_EQ_DESC - 1) +#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + /* depends on EQ_DESC_CNT_PAGE being a power of 2 */ +#define NEXT_EQ_IDX(x) \ + ((((x) & EQ_DESC_MAX_PAGE) == (EQ_DESC_MAX_PAGE - 1)) ? \ + ((x) + 2) : ((x) + 1)) + /* depends on the above and on NUM_EQ_PAGES being a power of 2 */ +#define EQ_DESC(x) ((x) & EQ_DESC_MASK) + + /* slow path */ + struct bnx2x_dma sp_dma; + struct bnx2x_slowpath *sp; + unsigned long sp_state; + + /* slow path queue */ + struct bnx2x_dma spq_dma; + struct eth_spe *spq; +#define SP_DESC_CNT (BNX2X_PAGE_SIZE / sizeof(struct eth_spe)) +#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) +#define MAX_SPQ_PENDING 8 + + uint16_t spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + uint16_t *dsb_sp_prod; + + volatile unsigned long eq_spq_left; /* COMMON_xxx ramrod credit */ + volatile unsigned long cq_spq_left; /* ETH_xxx ramrod credit */ + + /* fw decompression buffer */ + struct bnx2x_dma gz_buf_dma; + void *gz_buf; + uint32_t gz_outlen; +#define GUNZIP_BUF(sc) (sc->gz_buf) +#define GUNZIP_OUTLEN(sc) (sc->gz_outlen) +#define GUNZIP_PHYS(sc) (rte_iova_t)(sc->gz_buf_dma.paddr) +#define FW_BUF_SIZE 0x40000 + + struct raw_op *init_ops; + uint16_t *init_ops_offsets; /* init block offsets inside init_ops */ + uint32_t *init_data; /* data blob, 32 bit granularity */ + uint32_t init_mode_flags; +#define INIT_MODE_FLAGS(sc) (sc->init_mode_flags) + /* PRAM blobs - raw data */ + const uint8_t *tsem_int_table_data; + const uint8_t *tsem_pram_data; + const uint8_t *usem_int_table_data; + const uint8_t *usem_pram_data; + const uint8_t *xsem_int_table_data; + const uint8_t *xsem_pram_data; + const uint8_t *csem_int_table_data; + const uint8_t *csem_pram_data; +#define INIT_OPS(sc) (sc->init_ops) +#define INIT_OPS_OFFSETS(sc) (sc->init_ops_offsets) +#define INIT_DATA(sc) (sc->init_data) +#define INIT_TSEM_INT_TABLE_DATA(sc) (sc->tsem_int_table_data) +#define INIT_TSEM_PRAM_DATA(sc) (sc->tsem_pram_data) +#define INIT_USEM_INT_TABLE_DATA(sc) (sc->usem_int_table_data) +#define INIT_USEM_PRAM_DATA(sc) (sc->usem_pram_data) +#define INIT_XSEM_INT_TABLE_DATA(sc) (sc->xsem_int_table_data) +#define INIT_XSEM_PRAM_DATA(sc) (sc->xsem_pram_data) +#define INIT_CSEM_INT_TABLE_DATA(sc) (sc->csem_int_table_data) +#define INIT_CSEM_PRAM_DATA(sc) (sc->csem_pram_data) + +#define PHY_FW_VER_LEN 20 + char fw_ver[32]; + + /* ILT + * For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB + * context size we need 8 ILT entries. + */ +#define ILT_MAX_L2_LINES 8 + struct hw_context context[ILT_MAX_L2_LINES]; + struct ecore_ilt *ilt; +#define ILT_MAX_LINES 256 + + /* max supported number of RSS queues: IGU SBs minus one for CNIC */ +#define BNX2X_MAX_RSS_COUNT(sc) ((sc)->igu_sb_cnt - CNIC_SUPPORT(sc)) + /* max CID count: Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */ +#define BNX2X_L2_MAX_CID(sc) \ + (BNX2X_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) +#define BNX2X_L2_CID_COUNT(sc) \ + (BNX2X_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) +#define L2_ILT_LINES(sc) \ + (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(sc), ILT_PAGE_CIDS)) + + int qm_cid_count; + + uint8_t dropless_fc; + + /* total number of FW statistics requests */ + uint8_t fw_stats_num; + /* + * This is a memory buffer that will contain both statistics ramrod + * request and data. + */ + struct bnx2x_dma fw_stats_dma; + /* + * FW statistics request shortcut (points at the beginning of fw_stats + * buffer). + */ + int fw_stats_req_size; + struct bnx2x_fw_stats_req *fw_stats_req; + rte_iova_t fw_stats_req_mapping; + /* + * FW statistics data shortcut (points at the beginning of fw_stats + * buffer + fw_stats_req_size). + */ + int fw_stats_data_size; + struct bnx2x_fw_stats_data *fw_stats_data; + rte_iova_t fw_stats_data_mapping; + + /* tracking a pending STAT_QUERY ramrod */ + uint16_t stats_pending; + /* number of completed statistics ramrods */ + uint16_t stats_comp; + uint16_t stats_counter; + uint8_t stats_init; + int stats_state; + + struct bnx2x_eth_stats eth_stats; + struct host_func_stats func_stats; + struct bnx2x_eth_stats_old eth_stats_old; + struct bnx2x_net_stats_old net_stats_old; + struct bnx2x_fw_port_stats_old fw_stats_old; + + struct dmae_command stats_dmae; /* used by dmae command loader */ + int executer_idx; + + int mtu; + + /* DCB support on/off */ + int dcb_state; +#define BNX2X_DCB_STATE_OFF 0 +#define BNX2X_DCB_STATE_ON 1 + /* DCBX engine mode */ + int dcbx_enabled; +#define BNX2X_DCBX_ENABLED_OFF 0 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 +#define BNX2X_DCBX_ENABLED_INVALID -1 + + uint8_t cnic_support; + uint8_t cnic_enabled; + uint8_t cnic_loaded; +#define CNIC_SUPPORT(sc) 0 /* ((sc)->cnic_support) */ +#define CNIC_ENABLED(sc) 0 /* ((sc)->cnic_enabled) */ +#define CNIC_LOADED(sc) 0 /* ((sc)->cnic_loaded) */ + + /* multiple tx classes of service */ + uint8_t max_cos; +#define BNX2X_MAX_PRIORITY 8 + /* priority to cos mapping */ + uint8_t prio_to_cos[BNX2X_MAX_PRIORITY]; + + int panic; + /* Array of Multicast addrs */ + struct rte_ether_addr mc_addrs[VF_MAX_MULTICAST_PER_VF]; + /* Multicast mac addresses number */ + uint16_t mc_addrs_num; +}; /* struct bnx2x_softc */ + +/* IOCTL sub-commands for edebug and firmware upgrade */ +#define BNX2X_IOC_RD_NVRAM 1 +#define BNX2X_IOC_WR_NVRAM 2 +#define BNX2X_IOC_STATS_SHOW_NUM 3 +#define BNX2X_IOC_STATS_SHOW_STR 4 +#define BNX2X_IOC_STATS_SHOW_CNT 5 + +struct bnx2x_nvram_data { + uint32_t op; /* ioctl sub-command */ + uint32_t offset; + uint32_t len; + uint32_t value[1]; /* variable */ +}; + +union bnx2x_stats_show_data { + uint32_t op; /* ioctl sub-command */ + + struct { + uint32_t num; /* return number of stats */ + uint32_t len; /* length of each string item */ + } desc; + + /* variable length... */ + char str[1]; /* holds names of desc.num stats, each desc.len in length */ + + /* variable length... */ + uint64_t stats[1]; /* holds all stats */ +}; + +/* function init flags */ +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ + +struct bnx2x_func_init_params { + rte_iova_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */ + rte_iova_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */ + uint16_t func_flgs; + uint16_t func_id; /* abs function id */ + uint16_t pf_id; + uint16_t spq_prod; /* valid if FUNC_FLG_SPQ */ +}; + +/* memory resources reside at BARs 0, 2, 4 */ +/* Run `pciconf -lb` to see mappings */ +#define BAR0 0 +#define BAR1 2 +#define BAR2 4 + +static inline void +bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val) +{ + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x", + (unsigned long)offset, val); + rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset)); +} + +static inline void +bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val) +{ +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if ((offset % 2) != 0) + PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit write to 0x%08lx", + (unsigned long)offset); +#endif + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%04x", + (unsigned long)offset, val); + rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset)); + +} + +static inline void +bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val) +{ +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if ((offset % 4) != 0) + PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit write to 0x%08lx", + (unsigned long)offset); +#endif + + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x", + (unsigned long)offset, val); + rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset)); +} + +static inline uint8_t +bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset) +{ + uint8_t val; + + val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset); + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x", + (unsigned long)offset, val); + + return val; +} + +static inline uint16_t +bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset) +{ + uint16_t val; + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if ((offset % 2) != 0) + PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit read from 0x%08lx", + (unsigned long)offset); +#endif + + val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset)); + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x", + (unsigned long)offset, val); + + return val; +} + +static inline uint32_t +bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset) +{ + uint32_t val; + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC + if ((offset % 4) != 0) + PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit read from 0x%08lx", + (unsigned long)offset); +#endif + + val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset)); + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x", + (unsigned long)offset, val); + + return val; +} + +#define REG_ADDR(sc, offset) (((uint64_t)sc->bar[BAR0].base_addr) + (offset)) + +#define REG_RD8(sc, offset) bnx2x_reg_read8(sc, (offset)) +#define REG_RD16(sc, offset) bnx2x_reg_read16(sc, (offset)) +#define REG_RD32(sc, offset) bnx2x_reg_read32(sc, (offset)) + +#define REG_WR8(sc, offset, val) bnx2x_reg_write8(sc, (offset), val) +#define REG_WR16(sc, offset, val) bnx2x_reg_write16(sc, (offset), val) +#define REG_WR32(sc, offset, val) bnx2x_reg_write32(sc, (offset), val) + +#define REG_RD(sc, offset) REG_RD32(sc, offset) +#define REG_WR(sc, offset, val) REG_WR32(sc, offset, val) + +#define BNX2X_SP(sc, var) (&(sc)->sp->var) +#define BNX2X_SP_MAPPING(sc, var) \ + (sc->sp_dma.paddr + offsetof(struct bnx2x_slowpath, var)) + +#define BNX2X_FP(sc, nr, var) ((sc)->fp[(nr)].var) +#define BNX2X_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index]) + +#define bnx2x_fp(sc, nr, var) ((sc)->fp[nr].var) + +#define REG_RD_DMAE(sc, offset, valp, len32) \ + do { \ + (void)bnx2x_read_dmae(sc, offset, len32); \ + rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \ + } while (0) + +#define REG_WR_DMAE(sc, offset, valp, len32) \ + do { \ + rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \ + (void)bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), offset, len32); \ + } while (0) + +#define REG_WR_DMAE_LEN(sc, offset, valp, len32) \ + REG_WR_DMAE(sc, offset, valp, len32) + +#define REG_RD_DMAE_LEN(sc, offset, valp, len32) \ + REG_RD_DMAE(sc, offset, valp, len32) + +#define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \ + do { \ + /* if (le32_swap) { */ \ + /* PMD_PWARN_LOG(sc, "VIRT_WR_DMAE_LEN with le32_swap=1"); */ \ + /* } */ \ + rte_memcpy(GUNZIP_BUF(sc), data, len32 * 4); \ + ecore_write_big_buf_wb(sc, addr, len32); \ + } while (0) + +#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ +#define BNX2X_DB_SHIFT 7 /* 128 bytes */ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Minimum DB doorbell stride is 8" +#endif +#define DPM_TRIGGER_TYPE 0x40 + +/* Doorbell macro */ +#define BNX2X_DB_WRITE(db_bar, val) rte_write32_relaxed((val), (db_bar)) + +#define BNX2X_DB_READ(db_bar) rte_read32_relaxed(db_bar) + +#define DOORBELL_ADDR(sc, offset) \ + (volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset))) + +#define DOORBELL(sc, cid, val) \ + if (IS_PF(sc)) \ + BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid) + DPM_TRIGGER_TYPE)), (val)); \ + else \ + BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid))), (val)) \ + +#define SHMEM_ADDR(sc, field) \ + (sc->devinfo.shmem_base + offsetof(struct shmem_region, field)) +#define SHMEM_RD(sc, field) REG_RD(sc, SHMEM_ADDR(sc, field)) +#define SHMEM_RD16(sc, field) REG_RD16(sc, SHMEM_ADDR(sc, field)) +#define SHMEM_WR(sc, field, val) REG_WR(sc, SHMEM_ADDR(sc, field), val) + +#define SHMEM2_ADDR(sc, field) \ + (sc->devinfo.shmem2_base + offsetof(struct shmem2_region, field)) +#define SHMEM2_HAS(sc, field) \ + (sc->devinfo.shmem2_base && (REG_RD(sc, SHMEM2_ADDR(sc, size)) > \ + offsetof(struct shmem2_region, field))) +#define SHMEM2_RD(sc, field) REG_RD(sc, SHMEM2_ADDR(sc, field)) +#define SHMEM2_WR(sc, field, val) REG_WR(sc, SHMEM2_ADDR(sc, field), val) + +#define MFCFG_ADDR(sc, field) \ + (sc->devinfo.mf_cfg_base + offsetof(struct mf_cfg, field)) +#define MFCFG_RD(sc, field) REG_RD(sc, MFCFG_ADDR(sc, field)) +#define MFCFG_RD16(sc, field) REG_RD16(sc, MFCFG_ADDR(sc, field)) +#define MFCFG_WR(sc, field, val) REG_WR(sc, MFCFG_ADDR(sc, field), val) + +/* DMAE command defines */ + +#define DMAE_TIMEOUT -1 +#define DMAE_PCI_ERROR -2 /* E2 and onward */ +#define DMAE_NOT_RDY -3 +#define DMAE_PCI_ERR_FLAG 0x80000000 + +#define DMAE_SRC_PCI 0 +#define DMAE_SRC_GRC 1 + +#define DMAE_DST_NONE 0 +#define DMAE_DST_PCI 1 +#define DMAE_DST_GRC 2 + +#define DMAE_COMP_PCI 0 +#define DMAE_COMP_GRC 1 + +#define DMAE_COMP_REGULAR 0 +#define DMAE_COM_SET_ERR 1 + +#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_COMMAND_DST_SHIFT) +#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_COMMAND_DST_SHIFT) + +#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_COMMAND_C_DST_SHIFT) +#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_COMMAND_C_DST_SHIFT) + +#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + +#define DMAE_CMD_PORT_0 0 +#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + +#define DMAE_SRC_PF 0 +#define DMAE_SRC_VF 1 + +#define DMAE_DST_PF 0 +#define DMAE_DST_VF 1 + +#define DMAE_C_SRC 0 +#define DMAE_C_DST 1 + +#define DMAE_LEN32_RD_MAX 0x80 +#define DMAE_LEN32_WR_MAX(sc) 0x2000 + +#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and beyond, upper bit indicates error */ + +#define MAX_DMAE_C_PER_PORT 8 +#define INIT_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + SC_VN(sc)) +#define PMF_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + E1HVN_MAX) + +static const uint32_t dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + +#define ATTN_NIG_FOR_FUNC (1L << 8) +#define ATTN_SW_TIMER_4_FUNC (1L << 9) +#define GPIO_2_FUNC (1L << 10) +#define GPIO_3_FUNC (1L << 11) +#define GPIO_4_FUNC (1L << 12) +#define ATTN_GENERAL_ATTN_1 (1L << 13) +#define ATTN_GENERAL_ATTN_2 (1L << 14) +#define ATTN_GENERAL_ATTN_3 (1L << 15) +#define ATTN_GENERAL_ATTN_4 (1L << 13) +#define ATTN_GENERAL_ATTN_5 (1L << 14) +#define ATTN_GENERAL_ATTN_6 (1L << 15) +#define ATTN_HARD_WIRED_MASK 0xff00 +#define ATTENTION_ID 4 + +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + +#define MAX_IGU_ATTN_ACK_TO 100 + +#define STORM_ASSERT_ARRAY_SIZE 50 + +#define BNX2X_PMF_LINK_ASSERT(sc) \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + SC_FUNC(sc)) + +#define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + +#define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + +#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) +#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + +#define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + +#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + +#define MULTI_MASK 0x7f + +#define PFS_PER_PORT(sc) \ + ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4) +#define SC_MAX_VN_NUM(sc) PFS_PER_PORT(sc) + +#define FIRST_ABS_FUNC_IN_PORT(sc) \ + ((CHIP_PORT_MODE(sc) == CHIP_PORT_MODE_NONE) ? \ + PORT_ID(sc) : (PATH_ID(sc) + (2 * PORT_ID(sc)))) + +#define FOREACH_ABS_FUNC_IN_PORT(sc, i) \ + for ((i) = FIRST_ABS_FUNC_IN_PORT(sc); \ + (i) < MAX_FUNC_NUM; \ + (i) += (MAX_FUNC_NUM / PFS_PER_PORT(sc))) + +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + +#define SW_CID(x) (le32toh(x) & BNX2X_SWCID_MASK) +#define CQE_CMD(x) (le32toh(x) >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + +/* must be used on a CID before placing it on a HW ring */ +#define HW_CID(sc, x) \ + ((SC_PORT(sc) << 23) | (SC_VN(sc) << BNX2X_SWCID_SHIFT) | (x)) + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + +#define PCI_PM_D0 1 +#define PCI_PM_D3hot 2 + +int bnx2x_test_bit(int nr, volatile unsigned long * addr); +void bnx2x_set_bit(unsigned int nr, volatile unsigned long * addr); +void bnx2x_clear_bit(int nr, volatile unsigned long * addr); +int bnx2x_test_and_clear_bit(int nr, volatile unsigned long * addr); +int bnx2x_cmpxchg(volatile int *addr, int old, int new); + +int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, + struct bnx2x_dma *dma, const char *msg, uint32_t align); +void bnx2x_dma_free(struct bnx2x_dma *dma); +uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type); +uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode); +uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type, + uint8_t dst_type, uint8_t with_comp, + uint8_t comp_type); +void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx); +void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32); +void bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, + uint32_t dst_addr, uint32_t len32); +void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, + uint32_t cid); +void bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t disable, + uint16_t usec); + +int bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, + uint32_t data_hi, uint32_t data_lo, int cmd_type); + +void ecore_init_e1h_firmware(struct bnx2x_softc *sc); +void ecore_init_e2_firmware(struct bnx2x_softc *sc); + +void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, + size_t size, uint32_t *data); + +#define CATC_TRIGGER(sc, data) REG_WR((sc), 0x2000, (data)); +#define CATC_TRIGGER_START(sc) CATC_TRIGGER((sc), 0xcafecafe) + +#define BNX2X_MAC_FMT "%pM" +#define BNX2X_MAC_PRN_LIST(mac) (mac) + +/***********/ +/* INLINES */ +/***********/ + +static inline uint32_t +reg_poll(struct bnx2x_softc *sc, uint32_t reg, uint32_t expected, int ms, int wait) +{ + uint32_t val; + do { + val = REG_RD(sc, reg); + if (val == expected) { + break; + } + ms -= wait; + DELAY(wait * 1000); + } while (ms > 0); + + return val; +} + +static inline void +bnx2x_update_fp_sb_idx(struct bnx2x_fastpath *fp) +{ + mb(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; +} + +static inline void +bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment, + uint16_t index, uint8_t op, uint8_t update, uint32_t igu_addr) +{ + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + REG_WR(sc, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mb(); +} + +static inline void +bnx2x_hc_ack_sb(struct bnx2x_softc *sc, uint8_t sb_id, uint8_t storm, + uint16_t index, uint8_t op, uint8_t update) +{ + uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + uint32_t *val = NULL; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + val = (uint32_t *)&igu_ack; + REG_WR(sc, hc_addr, *val); + + /* Make sure that ACK is written */ + mb(); +} + +static inline uint32_t +bnx2x_hc_ack_int(struct bnx2x_softc *sc) +{ + uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 + + COMMAND_REG_SIMD_MASK); + uint32_t result = REG_RD(sc, hc_addr); + + mb(); + return result; +} + +static inline uint32_t +bnx2x_igu_ack_int(struct bnx2x_softc *sc) +{ + uint32_t igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER * 8); + uint32_t result = REG_RD(sc, igu_addr); + + /* PMD_PDEBUG_LOG(sc, DBG_INTR, "read 0x%08x from IGU addr 0x%x", + result, igu_addr); */ + + mb(); + return result; +} + +static inline uint32_t +bnx2x_ack_int(struct bnx2x_softc *sc) +{ + mb(); + if (sc->devinfo.int_block == INT_BLOCK_HC) { + return bnx2x_hc_ack_int(sc); + } else { + return bnx2x_igu_ack_int(sc); + } +} + +static inline int +func_by_vn(struct bnx2x_softc *sc, int vn) +{ + return 2 * vn + SC_PORT(sc); +} + +/* + * send notification to other functions. + */ +static inline void +bnx2x_link_sync_notify(struct bnx2x_softc *sc) +{ + int func, vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + if (vn == SC_VN(sc)) + continue; + + func = func_by_vn(sc, vn); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1); + } +} + +/* + * Statistics ID are global per chip/path, while Client IDs for E1x + * are per port. + */ +static inline uint8_t +bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + struct bnx2x_softc *sc = fp->sc; + + if (!CHIP_IS_E1x(sc)) { + return fp->cl_id; + } + + return fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x; +} + +int bnx2x_init(struct bnx2x_softc *sc); +void bnx2x_load_firmware(struct bnx2x_softc *sc); +int bnx2x_attach(struct bnx2x_softc *sc); +int bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link); +int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc); +int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc); +void bnx2x_free_ilt_mem(struct bnx2x_softc *sc); +void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count); +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0); +uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp); +void bnx2x_print_adapter_info(struct bnx2x_softc *sc); +void bnx2x_print_device_info(struct bnx2x_softc *sc); +int bnx2x_intr_legacy(struct bnx2x_softc *sc); +void bnx2x_link_status_update(struct bnx2x_softc *sc); +int bnx2x_complete_sp(struct bnx2x_softc *sc); +int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc); +void bnx2x_periodic_callout(struct bnx2x_softc *sc); +void bnx2x_periodic_stop(void *param); + +int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count); +void bnx2x_vf_close(struct bnx2x_softc *sc); +int bnx2x_vf_init(struct bnx2x_softc *sc); +void bnx2x_vf_unload(struct bnx2x_softc *sc); +int bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + int leading); +void bnx2x_free_hsi_mem(struct bnx2x_softc *sc); +int bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc); +int bnx2x_check_bull(struct bnx2x_softc *sc); + +//#define BNX2X_PULSE + +#define BNX2X_PCI_CAP 1 +#define BNX2X_PCI_ECAP 2 + +static inline struct bnx2x_pci_cap* +pci_find_cap(struct bnx2x_softc *sc, uint8_t id, uint8_t type) +{ + struct bnx2x_pci_cap *cap = sc->pci_caps; + + while (cap) { + if (cap->id == id && cap->type == type) + return cap; + cap = cap->next; + } + + return NULL; +} + +static inline void +bnx2x_set_rx_mode(struct bnx2x_softc *sc) +{ + if (sc->state == BNX2X_STATE_OPEN) { + if (IS_PF(sc)) { + bnx2x_set_storm_rx_mode(sc); + } else { + sc->rx_mode = BNX2X_RX_MODE_PROMISC; + bnx2x_vf_set_rx_mode(sc); + } + } else { + PMD_DRV_LOG(INFO, sc, "Card is not ready to change mode"); + } +} + +static inline int pci_read(struct bnx2x_softc *sc, size_t addr, + void *val, uint8_t size) +{ + if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) { + PMD_DRV_LOG(ERR, sc, "Can't read from PCI config space"); + return ENXIO; + } + + return 0; +} + +static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val) +{ + uint16_t val16 = val; + + if (rte_pci_write_config(sc->pci_dev, &val16, + sizeof(val16), addr) <= 0) { + PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space"); + return ENXIO; + } + + return 0; +} + +static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val) +{ + uint32_t val32 = val; + if (rte_pci_write_config(sc->pci_dev, &val32, + sizeof(val32), addr) <= 0) { + PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space"); + return ENXIO; + } + + return 0; +} + +#endif /* __BNX2X_H__ */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c new file mode 100644 index 000000000..adc3690fc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c @@ -0,0 +1,817 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" +#include "bnx2x_rxtx.h" + +#include +#include +#include +#include + +int bnx2x_logtype_init; +int bnx2x_logtype_driver; + +/* + * The set of PCI devices this driver supports + */ +#define BROADCOM_PCI_VENDOR_ID 0x14E4 +static const struct rte_pci_id pci_id_bnx2x_map[] = { + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) }, +#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) }, +#endif + { .vendor_id = 0, } +}; + +static const struct rte_pci_id pci_id_bnx2xvf_map[] = { + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) }, + { .vendor_id = 0, } +}; + +struct rte_bnx2x_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset_hi; + uint32_t offset_lo; +}; + +static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = { + {"rx_buffer_drops", + offsetof(struct bnx2x_eth_stats, brb_drop_hi), + offsetof(struct bnx2x_eth_stats, brb_drop_lo)}, + {"rx_buffer_truncates", + offsetof(struct bnx2x_eth_stats, brb_truncate_hi), + offsetof(struct bnx2x_eth_stats, brb_truncate_lo)}, + {"rx_buffer_truncate_discard", + offsetof(struct bnx2x_eth_stats, brb_truncate_discard), + offsetof(struct bnx2x_eth_stats, brb_truncate_discard)}, + {"mac_filter_discard", + offsetof(struct bnx2x_eth_stats, mac_filter_discard), + offsetof(struct bnx2x_eth_stats, mac_filter_discard)}, + {"no_match_vlan_tag_discard", + offsetof(struct bnx2x_eth_stats, mf_tag_discard), + offsetof(struct bnx2x_eth_stats, mf_tag_discard)}, + {"tx_pause", + offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi), + offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)}, + {"rx_pause", + offsetof(struct bnx2x_eth_stats, pause_frames_received_hi), + offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)}, + {"tx_priority_flow_control", + offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi), + offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)}, + {"rx_priority_flow_control", + offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi), + offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)} +}; + +static int +bnx2x_link_update(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + struct rte_eth_link link; + + PMD_INIT_FUNC_TRACE(sc); + + memset(&link, 0, sizeof(link)); + mb(); + link.link_speed = sc->link_vars.line_speed; + switch (sc->link_vars.duplex) { + case DUPLEX_FULL: + link.link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case DUPLEX_HALF: + link.link_duplex = ETH_LINK_HALF_DUPLEX; + break; + } + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + link.link_status = sc->link_vars.link_up; + + return rte_eth_linkstatus_set(dev, &link); +} + +static void +bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + uint32_t link_status; + + bnx2x_intr_legacy(sc); + + if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) && + !intr_cxt) + bnx2x_periodic_callout(sc); + link_status = REG_RD(sc, sc->link_params.shmem_base + + offsetof(struct shmem_region, + port_mb[sc->link_params.port].link_status)); + if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status) + bnx2x_link_update(dev); +} + +static void +bnx2x_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled"); + + bnx2x_interrupt_action(dev, 1); + rte_intr_ack(&sc->pci_dev->intr_handle); +} + +static void bnx2x_periodic_start(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); + bnx2x_interrupt_action(dev, 0); + if (IS_PF(sc)) { + ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, + bnx2x_periodic_start, (void *)dev); + if (ret) { + PMD_DRV_LOG(ERR, sc, "Unable to start periodic" + " timer rc %d", ret); + } + } +} + +void bnx2x_periodic_stop(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct bnx2x_softc *sc = dev->data->dev_private; + + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); + + rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev); + + PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped"); +} + +/* + * Devops - helper functions can be called from user application + */ + +static int +bnx2x_dev_configure(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + + int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); + + PMD_INIT_FUNC_TRACE(sc); + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; + dev->data->mtu = sc->mtu; + } + + if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues"); + return -EINVAL; + } + + sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + if (sc->num_queues > mp_ncpus) { + PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs"); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d", + sc->num_queues, sc->mtu); + + /* allocate ilt */ + if (bnx2x_alloc_ilt_mem(sc) != 0) { + PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed"); + return -ENXIO; + } + + bnx2x_dev_rxtx_init_dummy(dev); + return 0; +} + +static int +bnx2x_dev_start(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + PMD_INIT_FUNC_TRACE(sc); + + /* start the periodic callout */ + if (IS_PF(sc)) { + if (atomic_load_acq_long(&sc->periodic_flags) == + PERIODIC_STOP) { + bnx2x_periodic_start(dev); + PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started"); + } + } + + ret = bnx2x_init(sc); + if (ret) { + PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret); + return -1; + } + + if (IS_PF(sc)) { + rte_intr_callback_register(&sc->pci_dev->intr_handle, + bnx2x_interrupt_handler, (void *)dev); + + if (rte_intr_enable(&sc->pci_dev->intr_handle)) + PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed"); + } + + /* Configure the previously stored Multicast address list */ + if (IS_VF(sc)) + bnx2x_vfpf_set_mcast(sc, sc->mc_addrs, sc->mc_addrs_num); + bnx2x_dev_rxtx_init(dev); + + bnx2x_print_device_info(sc); + + return ret; +} + +static void +bnx2x_dev_stop(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + PMD_INIT_FUNC_TRACE(sc); + + bnx2x_dev_rxtx_init_dummy(dev); + + if (IS_PF(sc)) { + rte_intr_disable(&sc->pci_dev->intr_handle); + rte_intr_callback_unregister(&sc->pci_dev->intr_handle, + bnx2x_interrupt_handler, (void *)dev); + + /* stop the periodic callout */ + bnx2x_periodic_stop(dev); + } + /* Remove the configured Multicast list + * Sending NULL for the list of address and the + * Number is set to 0 denoting DEL_CMD + */ + if (IS_VF(sc)) + bnx2x_vfpf_set_mcast(sc, NULL, 0); + ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE); + if (ret) { + PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret); + return; + } + + return; +} + +static void +bnx2x_dev_close(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + + if (IS_VF(sc)) + bnx2x_vf_close(sc); + + bnx2x_dev_clear_queues(dev); + memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link)); + + /* free ilt */ + bnx2x_free_ilt_mem(sc); +} + +static int +bnx2x_promisc_enable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + sc->rx_mode = BNX2X_RX_MODE_PROMISC; + if (rte_eth_allmulticast_get(dev->data->port_id) == 1) + sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC; + bnx2x_set_rx_mode(sc); + + return 0; +} + +static int +bnx2x_promisc_disable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + if (rte_eth_allmulticast_get(dev->data->port_id) == 1) + sc->rx_mode = BNX2X_RX_MODE_ALLMULTI; + bnx2x_set_rx_mode(sc); + + return 0; +} + +static int +bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + sc->rx_mode = BNX2X_RX_MODE_ALLMULTI; + if (rte_eth_promiscuous_get(dev->data->port_id) == 1) + sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC; + bnx2x_set_rx_mode(sc); + + return 0; +} + +static int +bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + if (rte_eth_promiscuous_get(dev->data->port_id) == 1) + sc->rx_mode = BNX2X_RX_MODE_PROMISC; + bnx2x_set_rx_mode(sc); + + return 0; +} + +static int +bnx2x_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int err; + PMD_INIT_FUNC_TRACE(sc); + /* flush previous addresses */ + err = bnx2x_vfpf_set_mcast(sc, NULL, 0); + if (err) + return err; + sc->mc_addrs_num = 0; + + /* Add new ones */ + err = bnx2x_vfpf_set_mcast(sc, mc_addrs, mc_addrs_num); + if (err) + return err; + + sc->mc_addrs_num = mc_addrs_num; + memcpy(sc->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); + + return 0; +} + +static int +bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(sc); + + return bnx2x_link_update(dev); +} + +static int +bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + ret = bnx2x_link_update(dev); + + bnx2x_check_bull(sc); + if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { + PMD_DRV_LOG(ERR, sc, "PF indicated channel is down." + "VF device is no longer operational"); + dev->data->dev_link.link_status = ETH_LINK_DOWN; + } + + return ret; +} + +static int +bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + uint32_t brb_truncate_discard; + uint64_t brb_drops; + uint64_t brb_truncates; + + PMD_INIT_FUNC_TRACE(sc); + + bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); + + memset(stats, 0, sizeof (struct rte_eth_stats)); + + stats->ipackets = + HILO_U64(sc->eth_stats.total_unicast_packets_received_hi, + sc->eth_stats.total_unicast_packets_received_lo) + + HILO_U64(sc->eth_stats.total_multicast_packets_received_hi, + sc->eth_stats.total_multicast_packets_received_lo) + + HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi, + sc->eth_stats.total_broadcast_packets_received_lo); + + stats->opackets = + HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi, + sc->eth_stats.total_unicast_packets_transmitted_lo) + + HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi, + sc->eth_stats.total_multicast_packets_transmitted_lo) + + HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi, + sc->eth_stats.total_broadcast_packets_transmitted_lo); + + stats->ibytes = + HILO_U64(sc->eth_stats.total_bytes_received_hi, + sc->eth_stats.total_bytes_received_lo); + + stats->obytes = + HILO_U64(sc->eth_stats.total_bytes_transmitted_hi, + sc->eth_stats.total_bytes_transmitted_lo); + + stats->ierrors = + HILO_U64(sc->eth_stats.error_bytes_received_hi, + sc->eth_stats.error_bytes_received_lo); + + stats->oerrors = 0; + + stats->rx_nombuf = + HILO_U64(sc->eth_stats.no_buff_discard_hi, + sc->eth_stats.no_buff_discard_lo); + + brb_drops = + HILO_U64(sc->eth_stats.brb_drop_hi, + sc->eth_stats.brb_drop_lo); + + brb_truncates = + HILO_U64(sc->eth_stats.brb_truncate_hi, + sc->eth_stats.brb_truncate_lo); + + brb_truncate_discard = sc->eth_stats.brb_truncate_discard; + + stats->imissed = brb_drops + brb_truncates + + brb_truncate_discard + stats->rx_nombuf; + + return 0; +} + +static int +bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings); + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + strlcpy(xstats_names[i].name, + bnx2x_xstats_strings[i].name, + sizeof(xstats_names[i].name)); + + return stat_cnt; +} + +static int +bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + unsigned int num = RTE_DIM(bnx2x_xstats_strings); + + if (n < num) + return num; + + bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); + + for (num = 0; num < n; num++) { + if (bnx2x_xstats_strings[num].offset_hi != + bnx2x_xstats_strings[num].offset_lo) + xstats[num].value = HILO_U64( + *(uint32_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_hi), + *(uint32_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_lo)); + else + xstats[num].value = + *(uint64_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_lo); + xstats[num].id = num; + } + + return num; +} + +static int +bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + dev_info->max_rx_queues = sc->max_rx_queues; + dev_info->max_tx_queues = sc->max_tx_queues; + dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN; + dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; + dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL; + dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA; + dev_info->rx_desc_lim.nb_mtu_seg_max = 1; + dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL; + + return 0; +} + +static int +bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + if (sc->mac_ops.mac_addr_add) { + sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool); + return 0; + } + return -ENOTSUP; +} + +static void +bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + if (sc->mac_ops.mac_addr_remove) + sc->mac_ops.mac_addr_remove(dev, index); +} + +static const struct eth_dev_ops bnx2x_eth_dev_ops = { + .dev_configure = bnx2x_dev_configure, + .dev_start = bnx2x_dev_start, + .dev_stop = bnx2x_dev_stop, + .dev_close = bnx2x_dev_close, + .promiscuous_enable = bnx2x_promisc_enable, + .promiscuous_disable = bnx2x_promisc_disable, + .allmulticast_enable = bnx2x_dev_allmulticast_enable, + .allmulticast_disable = bnx2x_dev_allmulticast_disable, + .link_update = bnx2x_dev_link_update, + .stats_get = bnx2x_dev_stats_get, + .xstats_get = bnx2x_dev_xstats_get, + .xstats_get_names = bnx2x_get_xstats_names, + .dev_infos_get = bnx2x_dev_infos_get, + .rx_queue_setup = bnx2x_dev_rx_queue_setup, + .rx_queue_release = bnx2x_dev_rx_queue_release, + .tx_queue_setup = bnx2x_dev_tx_queue_setup, + .tx_queue_release = bnx2x_dev_tx_queue_release, + .mac_addr_add = bnx2x_mac_addr_add, + .mac_addr_remove = bnx2x_mac_addr_remove, +}; + +/* + * dev_ops for virtual function + */ +static const struct eth_dev_ops bnx2xvf_eth_dev_ops = { + .dev_configure = bnx2x_dev_configure, + .dev_start = bnx2x_dev_start, + .dev_stop = bnx2x_dev_stop, + .dev_close = bnx2x_dev_close, + .promiscuous_enable = bnx2x_promisc_enable, + .promiscuous_disable = bnx2x_promisc_disable, + .allmulticast_enable = bnx2x_dev_allmulticast_enable, + .allmulticast_disable = bnx2x_dev_allmulticast_disable, + .set_mc_addr_list = bnx2x_dev_set_mc_addr_list, + .link_update = bnx2xvf_dev_link_update, + .stats_get = bnx2x_dev_stats_get, + .xstats_get = bnx2x_dev_xstats_get, + .xstats_get_names = bnx2x_get_xstats_names, + .dev_infos_get = bnx2x_dev_infos_get, + .rx_queue_setup = bnx2x_dev_rx_queue_setup, + .rx_queue_release = bnx2x_dev_rx_queue_release, + .tx_queue_setup = bnx2x_dev_tx_queue_setup, + .tx_queue_release = bnx2x_dev_tx_queue_release, + .mac_addr_add = bnx2x_mac_addr_add, + .mac_addr_remove = bnx2x_mac_addr_remove, +}; + + +static int +bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) +{ + int ret = 0; + struct rte_pci_device *pci_dev; + struct rte_pci_addr pci_addr; + struct bnx2x_softc *sc; + static bool adapter_info = true; + + /* Extract key data structures */ + sc = eth_dev->data->dev_private; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pci_addr = pci_dev->addr; + + snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", + pci_addr.bus, pci_addr.devid, pci_addr.function, + eth_dev->data->port_id); + + PMD_INIT_FUNC_TRACE(sc); + + eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + PMD_DRV_LOG(ERR, sc, "Skipping device init from secondary process"); + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + sc->pcie_bus = pci_dev->addr.bus; + sc->pcie_device = pci_dev->addr.devid; + + sc->devinfo.vendor_id = pci_dev->id.vendor_id; + sc->devinfo.device_id = pci_dev->id.device_id; + sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id; + sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id; + + if (is_vf) + sc->flags = BNX2X_IS_VF_FLAG; + + sc->pcie_func = pci_dev->addr.function; + sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr; + if (is_vf) + sc->bar[BAR1].base_addr = (void *) + ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START); + else + sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr; + + assert(sc->bar[BAR0].base_addr); + assert(sc->bar[BAR1].base_addr); + + bnx2x_load_firmware(sc); + assert(sc->firmware); + + if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + sc->udp_rss = 1; + + sc->rx_budget = BNX2X_RX_BUDGET; + sc->hc_rx_ticks = BNX2X_RX_TICKS; + sc->hc_tx_ticks = BNX2X_TX_TICKS; + + sc->interrupt_mode = INTR_MODE_SINGLE_MSIX; + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + + sc->pci_dev = pci_dev; + ret = bnx2x_attach(sc); + if (ret) { + PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret); + return ret; + } + + /* Print important adapter info for the user. */ + if (adapter_info) { + bnx2x_print_adapter_info(sc); + adapter_info = false; + } + + /* schedule periodic poll for slowpath link events */ + if (IS_PF(sc)) { + PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events"); + ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, + bnx2x_periodic_start, (void *)eth_dev); + if (ret) { + PMD_DRV_LOG(ERR, sc, "Unable to start periodic" + " timer rc %d", ret); + return -EINVAL; + } + } + + eth_dev->data->mac_addrs = + (struct rte_ether_addr *)sc->link_params.mac_addr; + + if (IS_VF(sc)) { + rte_spinlock_init(&sc->vf2pf_lock); + + ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), + &sc->vf2pf_mbox_mapping, "vf2pf_mbox", + RTE_CACHE_LINE_SIZE); + if (ret) + goto out; + + sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *) + sc->vf2pf_mbox_mapping.vaddr; + + ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), + &sc->pf2vf_bulletin_mapping, "vf2pf_bull", + RTE_CACHE_LINE_SIZE); + if (ret) + goto out; + + sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *) + sc->pf2vf_bulletin_mapping.vaddr; + + ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues, + sc->max_rx_queues); + if (ret) + goto out; + } + + return 0; + +out: + if (IS_PF(sc)) + bnx2x_periodic_stop(eth_dev); + + return ret; +} + +static int +eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev) +{ + struct bnx2x_softc *sc = eth_dev->data->dev_private; + PMD_INIT_FUNC_TRACE(sc); + return bnx2x_common_dev_init(eth_dev, 0); +} + +static int +eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct bnx2x_softc *sc = eth_dev->data->dev_private; + PMD_INIT_FUNC_TRACE(sc); + return bnx2x_common_dev_init(eth_dev, 1); +} + +static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev) +{ + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + return 0; +} + +static struct rte_pci_driver rte_bnx2x_pmd; +static struct rte_pci_driver rte_bnx2xvf_pmd; + +static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + if (pci_drv == &rte_bnx2x_pmd) + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2x_dev_init); + else if (pci_drv == &rte_bnx2xvf_pmd) + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init); + else + return -EINVAL; +} + +static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit); +} + +static struct rte_pci_driver rte_bnx2x_pmd = { + .id_table = pci_id_bnx2x_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_bnx2x_pci_probe, + .remove = eth_bnx2x_pci_remove, +}; + +/* + * virtual function driver struct + */ +static struct rte_pci_driver rte_bnx2xvf_pmd = { + .id_table = pci_id_bnx2xvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_bnx2x_pci_probe, + .remove = eth_bnx2x_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map); +RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci"); + +RTE_INIT(bnx2x_init_log) +{ + bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init"); + if (bnx2x_logtype_init >= 0) + rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE); + bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver"); + if (bnx2x_logtype_driver >= 0) + rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h new file mode 100644 index 000000000..f712bb3e8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_ethdev.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef PMD_BNX2X_ETHDEV_H +#define PMD_BNX2X_ETHDEV_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x_rxtx.h" +#include "bnx2x_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define DELAY_MS(x) rte_delay_ms(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) + +#define FALSE 0 +#define TRUE 1 + +#define min(a,b) RTE_MIN(a,b) + +#define mb() rte_mb() +#define wmb() rte_wmb() +#define rmb() rte_rmb() + +#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF) + +#define BNX2X_MIN_RX_BUF_SIZE 1024 +#define BNX2X_MAX_RX_PKT_LEN 15872 +#define BNX2X_MAX_MAC_ADDRS 1 + +/* Hardware RX tick timer (usecs) */ +#define BNX2X_RX_TICKS 25 +/* Hardware TX tick timer (usecs) */ +#define BNX2X_TX_TICKS 50 +/* Maximum number of Rx packets to process at a time */ +#define BNX2X_RX_BUDGET 0xffffffff + +#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */ + +#endif + +/* MAC address operations */ +struct bnx2x_mac_ops { + void (*mac_addr_add)(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + uint16_t index, uint32_t pool); /* not implemented yet */ + void (*mac_addr_remove)(struct rte_eth_dev *dev, uint16_t index); /* not implemented yet */ +}; diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h new file mode 100644 index 000000000..f0cf69c16 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_logs.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _PMD_LOGS_H_ +#define _PMD_LOGS_H_ + +extern int bnx2x_logtype_init; +#define PMD_INIT_LOG(level, sc, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnx2x_logtype_init, \ + "[bnx2x_pmd: %s] %s() " fmt "\n", (sc)->devinfo.name, __func__, ##args) + +#define PMD_INIT_FUNC_TRACE(sc) PMD_INIT_LOG(DEBUG, sc, " >>") + +extern int bnx2x_logtype_driver; +#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \ + "[%s:%d(%s)] " fmt, __func__, __LINE__, \ + (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args) + +#define PMD_DRV_LOG(level, sc, fmt, args...) \ + PMD_DRV_LOG_RAW(level, sc, fmt "\n", ## args) + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC +#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \ + "%s(%s): " fmt "\n", __func__, \ + (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args) +#else +#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) do { } while (0) +#endif + +#endif /* _PMD_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_osal.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_osal.h new file mode 100644 index 000000000..c4818bb22 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_osal.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2019 Cavium Inc. + * + * All rights reserved. + * www.cavium.com + */ + +#ifndef BNX2X_OSAL_H +#define BNX2X_OSAL_H + +#ifdef RTE_EXEC_ENV_FREEBSD +#include +#else +#include +#endif + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN +#endif +#undef __BIG_ENDIAN +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN RTE_BIG_ENDIAN +#endif +#undef __LITTLE_ENDIAN +#endif + +#ifdef RTE_EXEC_ENV_FREEBSD +#define __le16 uint16_t +#define __le32 uint32_t +#define __le64 uint64_t +#endif + +#endif /* BNX2X_OSAL_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c new file mode 100644 index 000000000..57e2ce504 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.c @@ -0,0 +1,510 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" +#include "bnx2x_rxtx.h" + +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); +} + +static void +bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (NULL != rx_queue) { + + sw_ring = rx_queue->sw_ring; + if (NULL != sw_ring) { + for (i = 0; i < rx_queue->nb_rx_desc; i++) { + if (NULL != sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(rx_queue); + } +} + +void +bnx2x_dev_rx_queue_release(void *rxq) +{ + bnx2x_rx_queue_release(rxq); +} + +int +bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + uint16_t j, idx; + const struct rte_memzone *dma; + struct bnx2x_rx_queue *rxq; + uint32_t dma_size; + struct rte_mbuf *mbuf; + struct bnx2x_softc *sc = dev->data->dev_private; + struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; + struct eth_rx_cqe_next_page *nextpg; + rte_iova_t *rx_bd; + rte_iova_t busaddr; + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (NULL == rxq) { + PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); + return -ENOMEM; + } + rxq->sc = sc; + rxq->mb_pool = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + + rxq->nb_rx_pages = 1; + while (USABLE_RX_BD(rxq) < nb_desc) + rxq->nb_rx_pages <<= 1; + + rxq->nb_rx_desc = TOTAL_RX_BD(rxq); + sc->rx_ring_size = USABLE_RX_BD(rxq); + rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); + + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " + "total_bd=%lu, rx_pages=%u, cq_pages=%u", + queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); + + /* Allocate RX ring hardware descriptors */ + dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); + dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id); + if (NULL == dma) { + PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova; + rxq->rx_ring = (uint64_t*)dma->addr; + memset((void *)rxq->rx_ring, 0, dma_size); + + /* Link the RX chain pages. */ + for (j = 1; j <= rxq->nb_rx_pages; j++) { + rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2]; + busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages); + *rx_bd = busaddr; + } + + /* Allocate software ring */ + dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry); + rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (NULL == rxq->sw_ring) { + PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + + /* Initialize software ring entries */ + for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { + mbuf = rte_mbuf_raw_alloc(mp); + if (NULL == mbuf) { + PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", + (unsigned)rxq->queue_id, idx); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->sw_ring[idx] = mbuf; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + } + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + rxq->rx_bd_head = 0; + rxq->rx_bd_tail = rxq->nb_rx_desc; + + /* Allocate CQ chain. */ + dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages; + dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); + if (NULL == dma) { + PMD_RX_LOG(ERR, "RCQ alloc failed"); + return -ENOMEM; + } + fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova; + rxq->cq_ring = (union eth_rx_cqe*)dma->addr; + + /* Link the CQ chain pages. */ + for (j = 1; j <= rxq->nb_cq_pages; j++) { + nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe; + busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages); + nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); + nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); + } + rxq->rx_cq_head = 0; + rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues; + + return 0; +} + +static void +bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (NULL != tx_queue) { + + sw_ring = tx_queue->sw_ring; + if (NULL != sw_ring) { + for (i = 0; i < tx_queue->nb_tx_desc; i++) { + if (NULL != sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(tx_queue); + } +} + +void +bnx2x_dev_tx_queue_release(void *txq) +{ + bnx2x_tx_queue_release(txq); +} + +static uint16_t +bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct bnx2x_tx_queue *txq; + struct bnx2x_softc *sc; + struct bnx2x_fastpath *fp; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; + + txq = p_txq; + sc = txq->sc; + fp = &sc->fp[txq->queue_id]; + + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); + + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; + + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } + + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); + + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); + + return nb_pkt_sent; +} + +int +bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + uint16_t i; + unsigned int tsize; + const struct rte_memzone *tz; + struct bnx2x_tx_queue *txq; + struct eth_tx_next_bd *tx_n_bd; + uint64_t busaddr; + struct bnx2x_softc *sc = dev->data->dev_private; + struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + txq->sc = sc; + + txq->nb_tx_pages = 1; + while (USABLE_TX_BD(txq) < nb_desc) + txq->nb_tx_pages <<= 1; + + txq->nb_tx_desc = TOTAL_TX_BD(txq); + sc->tx_ring_size = TOTAL_TX_BD(txq); + + txq->tx_free_thresh = tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + txq->tx_free_thresh = min(txq->tx_free_thresh, + txq->nb_tx_desc - BDS_PER_TX_PKT); + + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + "total_bd=%lu, tx_pages=%u", + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); + + /* Allocate TX ring hardware descriptors */ + tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); + tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); + if (tz == NULL) { + bnx2x_tx_queue_release(txq); + return -ENOMEM; + } + fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova; + txq->tx_ring = (union eth_tx_bd_types *) tz->addr; + memset(txq->tx_ring, 0, tsize); + + /* Allocate software ring */ + tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, + RTE_CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + bnx2x_tx_queue_release(txq); + return -ENOMEM; + } + + /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ + + /* Link TX pages */ + for (i = 1; i <= txq->nb_tx_pages; i++) { + tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; + busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); + tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); + tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); + /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", + * (TOTAL_TX_BD_PER_PAGE * i - 1)); + */ + } + + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->tx_pkt_tail = 0; + txq->tx_pkt_head = 0; + txq->tx_bd_tail = 0; + txq->tx_bd_head = 0; + txq->nb_tx_avail = txq->nb_tx_desc; + dev->data->tx_queues[queue_idx] = txq; + if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; + + return 0; +} + +static inline void +bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod) +{ + struct ustorm_eth_rx_producers rx_prods = { 0 }; + uint32_t *val = NULL; + + rx_prods.bd_prod = rx_bd_prod; + rx_prods.cqe_prod = rx_cq_prod; + + val = (uint32_t *)&rx_prods; + REG_WR(sc, fp->ustorm_rx_prods_offset, val[0]); +} + +static uint16_t +bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct bnx2x_rx_queue *rxq = p_rxq; + struct bnx2x_softc *sc = rxq->sc; + struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id]; + uint32_t nb_rx = 0; + uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; + uint16_t bd_cons, bd_prod; + struct rte_mbuf *new_mb; + uint16_t rx_pref; + struct eth_fast_path_rx_cqe *cqe_fp; + uint16_t len, pad, bd_len, buf_len; + struct rte_mbuf *rx_mb = NULL; + static bool log_once = true; + + rte_spinlock_lock(&(fp)->rx_mtx); + + hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); + if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == + USABLE_RCQ_ENTRIES_PER_PAGE) { + ++hw_cq_cons; + } + + bd_cons = rxq->rx_bd_head; + bd_prod = rxq->rx_bd_tail; + sw_cq_cons = rxq->rx_cq_head; + sw_cq_prod = rxq->rx_cq_tail; + + if (sw_cq_cons == hw_cq_cons) { + rte_spinlock_unlock(&(fp)->rx_mtx); + return 0; + } + + while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { + + bd_prod &= MAX_RX_BD(rxq); + bd_cons &= MAX_RX_BD(rxq); + + cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe; + + if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) { + PMD_RX_LOG(ERR, "slowpath event during traffic processing"); + break; + } + + if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { + PMD_RX_LOG(ERR, "flags 0x%x rx packet %u", + cqe_fp->type_error_flags, sw_cq_cons); + goto next_rx; + } + + len = cqe_fp->pkt_len_or_gro_seg_len; + pad = cqe_fp->placement_offset; + bd_len = cqe_fp->len_on_bd; + buf_len = rxq->sw_ring[bd_cons]->buf_len; + + /* Check for sufficient buffer length */ + if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) { + if (unlikely(log_once)) { + PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d", + buf_len - RTE_PKTMBUF_HEADROOM, + buf_len - + (pad + RTE_PKTMBUF_HEADROOM)); + log_once = false; + } + goto next_rx; + } + + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!new_mb)) { + PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; + goto next_rx; + } + + rx_mb = rxq->sw_ring[bd_cons]; + rxq->sw_ring[bd_cons] = new_mb; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); + + rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); + rte_prefetch0(rxq->sw_ring[rx_pref]); + if ((rx_pref & 0x3) == 0) { + rte_prefetch0(&rxq->rx_ring[rx_pref]); + rte_prefetch0(&rxq->sw_ring[rx_pref]); + } + + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; + rx_mb->nb_segs = 1; + rx_mb->next = NULL; + rx_mb->pkt_len = len; + rx_mb->data_len = bd_len; + rx_mb->port = rxq->port_id; + rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); + + /* + * If we received a packet with a vlan tag, + * attach that information to the packet. + */ + if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { + rx_mb->vlan_tci = cqe_fp->vlan_tag; + rx_mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } + + rx_pkts[nb_rx] = rx_mb; + nb_rx++; + + /* limit spinning on the queue */ + if (unlikely(nb_rx == sc->rx_budget)) { + PMD_RX_LOG(ERR, "Limit spinning on the queue"); + break; + } + +next_rx: + bd_cons = NEXT_RX_BD(bd_cons); + bd_prod = NEXT_RX_BD(bd_prod); + sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); + sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); + } + rxq->rx_bd_head = bd_cons; + rxq->rx_bd_tail = bd_prod; + rxq->rx_cq_head = sw_cq_cons; + rxq->rx_cq_tail = sw_cq_prod; + + bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod); + + rte_spinlock_unlock(&(fp)->rx_mtx); + + return nb_rx; +} + +static uint16_t +bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; + dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; +} + +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + dev->tx_pkt_burst = bnx2x_xmit_pkts; +} + +void +bnx2x_dev_clear_queues(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + uint8_t i; + + PMD_INIT_FUNC_TRACE(sc); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct bnx2x_tx_queue *txq = dev->data->tx_queues[i]; + if (txq != NULL) { + bnx2x_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + bnx2x_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } + } +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h new file mode 100644 index 000000000..3f4692b47 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_rxtx.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _BNX2X_RXTX_H_ +#define _BNX2X_RXTX_H_ + +#define DEFAULT_TX_FREE_THRESH 64 +#define RTE_PMD_BNX2X_TX_MAX_BURST 1 + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct bnx2x_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct bnx2x_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + union eth_rx_cqe *cq_ring; /**< RCQ ring virtual address. */ + uint64_t cq_ring_phys_addr; /**< RCQ ring DMA address. */ + uint64_t *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + struct rte_mbuf **sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_cq_pages; /**< number of RCQ pages. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t nb_rx_pages; /**< number of RX pages. */ + uint16_t rx_bd_head; /**< Index of current rx bd. */ + uint16_t rx_bd_tail; /**< Index of last rx bd. */ + uint16_t rx_cq_head; /**< Index of current rcq bd. */ + uint16_t rx_cq_tail; /**< Index of last rcq bd. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t port_id; /**< Device port identifier. */ + struct bnx2x_softc *sc; /**< Ptr to dev_private data. */ +}; + +/** + * Structure associated with each TX queue. + */ +struct bnx2x_tx_queue { + /** TX ring virtual address. */ + union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct rte_mbuf **sw_ring; /**< virtual address of SW ring. */ + uint16_t tx_pkt_tail; /**< Index of current tx pkt. */ + uint16_t tx_pkt_head; /**< Index of last pkt counted by txeof. */ + uint16_t tx_bd_tail; /**< Index of current tx bd. */ + uint16_t tx_bd_head; /**< Index of last bd counted by txeof. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_free_thresh; /**< minimum TX before freeing. */ + uint16_t nb_tx_avail; /**< Number of TX descriptors available. */ + uint16_t nb_tx_pages; /**< number of TX pages */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t port_id; /**< Device port identifier. */ + struct bnx2x_softc *sc; /**< Ptr to dev_private data */ +}; + +int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +void bnx2x_dev_rx_queue_release(void *rxq); +void bnx2x_dev_tx_queue_release(void *txq); +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev); +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev); +void bnx2x_dev_clear_queues(struct rte_eth_dev *dev); + +#endif /* _BNX2X_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c new file mode 100644 index 000000000..1cd972591 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.c @@ -0,0 +1,1585 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" +#include "bnx2x_stats.h" + +#ifdef __i386__ +#define BITS_PER_LONG 32 +#else +#define BITS_PER_LONG 64 +#endif + +static inline uint16_t +bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc) +{ + uint16_t res = 0; + uint32_t size; + + /* 'newest' convention - shmem2 contains the size of the port stats */ + if (SHMEM2_HAS(sc, sizeof_port_stats)) { + size = SHMEM2_RD(sc, sizeof_port_stats); + if (size) { + res = size; + } + + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) { + res = sizeof(struct host_port_stats); + } + } + + /* + * Older convention - all BCs support the port stats fields up until + * the 'not_used' field + */ + if (!res) { + res = (offsetof(struct host_port_stats, not_used) + 4); + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) { + res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) - + offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4); + } + } + + res >>= 2; + + return res; +} + +/* + * Init service functions + */ + +/* + * Post the next statistics ramrod. Protect it with the lock in + * order to ensure the strict order between statistics ramrods + * (each ramrod has a sequence number passed in a + * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be + * sent in order). + */ +static void +bnx2x_storm_stats_post(struct bnx2x_softc *sc) +{ + int rc; + + if (!sc->stats_pending) { + if (sc->stats_pending) { + return; + } + + sc->fw_stats_req->hdr.drv_stats_counter = + htole16(sc->stats_counter++); + + PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, + "sending statistics ramrod %d", + le16toh(sc->fw_stats_req->hdr.drv_stats_counter)); + + /* adjust the ramrod to include VF queues statistics */ + + /* send FW stats ramrod */ + rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, + U64_HI(sc->fw_stats_req_mapping), + U64_LO(sc->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); + if (rc == 0) { + sc->stats_pending = 1; + } + } +} + +static void +bnx2x_hw_stats_post(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae = &sc->stats_dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + int loader_idx; + uint32_t opcode; + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(sc)) { + return; + } + + /* Update MCP's statistics if possible */ + if (sc->func_stx) { + rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + sizeof(sc->func_stats)); + } + + /* loader */ + if (sc->executer_idx) { + loader_idx = PMF_DMAE_C(sc); + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_GRC); + opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); + + memset(dmae, 0, sizeof(struct dmae_command)); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0])); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0])); + dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2); + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(sc, dmae, loader_idx); + } else if (sc->func_stx) { + *stats_comp = 0; + bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); + } +} + +static int +bnx2x_stats_comp(struct bnx2x_softc *sc) +{ + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + int cnt = 10; + + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + PMD_DRV_LOG(ERR, sc, "Timeout waiting for stats finished"); + break; + } + + cnt--; + DELAY(1000); + } + + return 1; +} + +/* + * Statistics service functions + */ + +static void +bnx2x_stats_pmf_update(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + if (sc->devinfo.bc_ver <= 0x06001400) { + /* + * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing + * BRB registers while the BRB block is in reset. The DMA transfer + * below triggers this issue resulting in the DMAE to stop + * functioning. Skip this initial stats transfer for old bootcode + * versions <= 6.0.20. + */ + return; + } + /* sanity */ + if (!sc->port.pmf || !sc->port.port_stx) { + PMD_DRV_LOG(ERR, sc, "BUG!"); + return; + } + + sc->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0); + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + dmae->src_addr_lo = (sc->port.port_stx >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX); + + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); +} + +static void +bnx2x_port_stats_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + int port = SC_PORT(sc); + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t mac_addr; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->link_vars.link_up || !sc->port.pmf) { + PMD_DRV_LOG(ERR, sc, "BUG!"); + return; + } + + sc->executer_idx = 0; + + /* MCP */ + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_GRC); + + if (sc->port.port_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = sc->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (sc->func_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, + TRUE, DMAE_COMP_GRC); + + /* EMAC is special */ + if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) { + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + uint32_t tx_src_addr_lo, rx_src_addr_lo; + uint16_t rx_len, tx_len; + + /* configure the params according to MAC type */ + switch (sc->link_vars.mac_type) { + case ELINK_MAC_TYPE_BMAC: + mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + if (CHIP_IS_E1x(sc)) { + tx_src_addr_lo = + ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); + tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); + rx_src_addr_lo = + ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2); + rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2); + } else { + tx_src_addr_lo = + ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); + tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); + rx_src_addr_lo = + ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); + rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); + } + + break; + + case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */ + case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */ + default: + mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; + tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2); + rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2); + tx_len = + (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2); + rx_len = + (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2); + break; + } + + /* TX stats */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = tx_src_addr_lo; + dmae->src_addr_hi = 0; + dmae->len = tx_len; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* RX stats */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_hi = 0; + dmae->src_addr_lo = rx_src_addr_lo; + dmae->dst_addr_lo = + U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); + dmae->dst_addr_hi = + U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); + dmae->len = rx_len; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + if (!CHIP_IS_E3(sc)) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt0_lo)); + dmae->len = ((2 * sizeof(uint32_t)) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt1_lo)); + dmae->len = ((2 * sizeof(uint32_t)) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2; + + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void +bnx2x_func_stats_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae = &sc->stats_dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->func_stx) { + PMD_DRV_LOG(ERR, sc, "BUG!"); + return; + } + + sc->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void +bnx2x_stats_start(struct bnx2x_softc *sc) +{ + /* + * VFs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(sc)) { + return; + } + + if (sc->port.pmf) { + bnx2x_port_stats_init(sc); + } + + else if (sc->func_stx) { + bnx2x_func_stats_init(sc); + } + + bnx2x_hw_stats_post(sc); + bnx2x_storm_stats_post(sc); +} + +static void +bnx2x_stats_pmf_start(struct bnx2x_softc *sc) +{ + bnx2x_stats_comp(sc); + bnx2x_stats_pmf_update(sc); + bnx2x_stats_start(sc); +} + +static void +bnx2x_stats_restart(struct bnx2x_softc *sc) +{ + /* + * VFs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(sc)) { + return; + } + + bnx2x_stats_comp(sc); + bnx2x_stats_start(sc); +} + +static void +bnx2x_bmac_stats_update(struct bnx2x_softc *sc) +{ + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct { + uint32_t lo; + uint32_t hi; + } diff; + + if (CHIP_IS_E1x(sc)) { + struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats); + + /* the macros below will use "bmac1_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + } else { + struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats); + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + + /* the macros below will use "bmac2_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + /* collect PFC stats */ + pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; + pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; + ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi, + pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo); + + pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; + pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; + ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi, + pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo); + } + + estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; +} + +static void +bnx2x_mstat_stats_update(struct bnx2x_softc *sc) +{ + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats); + + ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); + ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); + ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); + ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); + ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); + ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + + /* collect pfc stats */ + ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, + pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); + ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, + pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); + + ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); + ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets); + ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets); + ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets); + ADD_STAT64(stats_tx.tx_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + ADD_STAT64(stats_tx.tx_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); + + ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); + ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); + ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); + + ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors); + ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); + + estats->etherstatspkts1024octetsto1522octets_hi = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; + estats->etherstatspkts1024octetsto1522octets_lo = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; + + estats->etherstatspktsover1522octets_hi = + pstats->mac_stx[1].tx_stat_mac_2047_hi; + estats->etherstatspktsover1522octets_lo = + pstats->mac_stx[1].tx_stat_mac_2047_lo; + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_4095_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_4095_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_9216_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_9216_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_16383_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_16383_lo); + + estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; +} + +static void +bnx2x_emac_stats_update(struct bnx2x_softc *sc) +{ + struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats); + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int +bnx2x_hw_stats_update(struct bnx2x_softc *sc) +{ + struct nig_stats *new = BNX2X_SP(sc, nig_stats); + struct nig_stats *old = &(sc->port.old_nig_stats); + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + uint32_t lpi_reg, nig_timer_max; + struct { + uint32_t lo; + uint32_t hi; + } diff; + + switch (sc->link_vars.mac_type) { + case ELINK_MAC_TYPE_BMAC: + bnx2x_bmac_stats_update(sc); + break; + + case ELINK_MAC_TYPE_EMAC: + bnx2x_emac_stats_update(sc); + break; + + case ELINK_MAC_TYPE_UMAC: + case ELINK_MAC_TYPE_XMAC: + bnx2x_mstat_stats_update(sc); + break; + + case ELINK_MAC_TYPE_NONE: /* unreached */ + PMD_DRV_LOG(DEBUG, sc, + "stats updated by DMAE but no MAC active"); + return -1; + + default: /* unreached */ + PMD_DRV_LOG(ERR, sc, "stats update failed, unknown MAC type"); + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + if (!CHIP_IS_E3(sc)) { + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, + etherstatspktsover1522octets); + } + + rte_memcpy(old, new, sizeof(struct nig_stats)); + + rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_counter++; + + if (CHIP_IS_E3(sc)) { + lpi_reg = (SC_PORT(sc)) ? + MISC_REG_CPMU_LP_SM_ENT_CNT_P1 : + MISC_REG_CPMU_LP_SM_ENT_CNT_P0; + estats->eee_tx_lpi += REG_RD(sc, lpi_reg); + } + + if (!BNX2X_NOMCP(sc)) { + nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + PMD_DRV_LOG(ERR, sc, "invalid NIG timer max (%u)", + estats->nig_timer_max); + } + } + + return 0; +} + +static int +bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc) +{ + struct stats_counter *counters = &sc->fw_stats_data->storm_counters; + uint16_t cur_stats_counter; + + /* + * Make sure we use the value of the counter + * used for sending the last stats ramrod. + */ + cur_stats_counter = (sc->stats_counter - 1); + + /* are storm stats valid? */ + if (le16toh(counters->xstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, sc, + "stats not updated by xstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->xstats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->ustats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, sc, + "stats not updated by ustorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->ustats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->cstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, sc, + "stats not updated by cstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->cstats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->tstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, sc, + "stats not updated by tstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->tstats_counter), sc->stats_counter); + return -EAGAIN; + } + + return 0; +} + +static int +bnx2x_storm_stats_update(struct bnx2x_softc *sc) +{ + struct tstorm_per_port_stats *tport = + &sc->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &sc->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &sc->func_stats; + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) { + return -EAGAIN; + } + + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + struct tstorm_per_queue_stats *tclient = + &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_queue_stats *uclient = + &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_queue_stats *xclient = + &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; + + uint32_t diff; + + /* PMD_DRV_LOG(DEBUG, sc, + "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x", + i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, + xclient->mcast_pkts_sent); + + PMD_DRV_LOG(DEBUG, sc, "---------------"); + */ + + UPDATE_QSTAT(tclient->rcv_bcast_bytes, + total_broadcast_bytes_received); + UPDATE_QSTAT(tclient->rcv_mcast_bytes, + total_multicast_bytes_received); + UPDATE_QSTAT(tclient->rcv_ucast_bytes, + total_unicast_bytes_received); + + /* + * sum to total_bytes_received all + * unicast/multicast/broadcast + */ + qstats->total_bytes_received_hi = + qstats->total_broadcast_bytes_received_hi; + qstats->total_bytes_received_lo = + qstats->total_broadcast_bytes_received_lo; + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_multicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_unicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_unicast_bytes_received_lo); + + qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; + + UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); + UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); + + UPDATE_QSTAT(xclient->bcast_bytes_sent, + total_broadcast_bytes_transmitted); + UPDATE_QSTAT(xclient->mcast_bytes_sent, + total_multicast_bytes_transmitted); + UPDATE_QSTAT(xclient->ucast_bytes_sent, + total_unicast_bytes_transmitted); + + /* + * sum to total_bytes_transmitted all + * unicast/multicast/broadcast + */ + qstats->total_bytes_transmitted_hi = + qstats->total_unicast_bytes_transmitted_hi; + qstats->total_bytes_transmitted_lo = + qstats->total_unicast_bytes_transmitted_lo; + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_broadcast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_multicast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_multicast_bytes_transmitted_lo); + + UPDATE_EXTEND_XSTAT(ucast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(mcast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(bcast_pkts_sent, + total_broadcast_packets_transmitted); + + UPDATE_EXTEND_TSTAT(checksum_discard, + total_packets_received_checksum_discarded); + UPDATE_EXTEND_TSTAT(ttl0_discard, + total_packets_received_ttl0_discarded); + + UPDATE_EXTEND_XSTAT(error_drop_pkts, + total_transmitted_dropped_packets_error); + + UPDATE_FSTAT_QSTAT(total_bytes_received); + UPDATE_FSTAT_QSTAT(total_bytes_transmitted); + UPDATE_FSTAT_QSTAT(total_unicast_packets_received); + UPDATE_FSTAT_QSTAT(total_multicast_packets_received); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); + UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); + UPDATE_FSTAT_QSTAT(valid_bytes_received); + } + + ADD_64(estats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); + + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (sc->port.pmf) { + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + UPDATE_FW_STAT(mac_filter_discard); + UPDATE_FW_STAT(mf_tag_discard); + UPDATE_FW_STAT(brb_truncate_discard); + UPDATE_FW_STAT(mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + sc->stats_pending = 0; + + return 0; +} + +static void +bnx2x_drv_stats_update(struct bnx2x_softc *sc) +{ + struct bnx2x_eth_stats *estats = &sc->eth_stats; + int i; + + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old; + + UPDATE_ESTAT_QSTAT(rx_calls); + UPDATE_ESTAT_QSTAT(rx_pkts); + UPDATE_ESTAT_QSTAT(rx_soft_errors); + UPDATE_ESTAT_QSTAT(rx_hw_csum_errors); + UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip); + UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp); + UPDATE_ESTAT_QSTAT(rx_budget_reached); + UPDATE_ESTAT_QSTAT(tx_pkts); + UPDATE_ESTAT_QSTAT(tx_soft_errors); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp); + UPDATE_ESTAT_QSTAT(tx_encap_failures); + UPDATE_ESTAT_QSTAT(tx_hw_queue_full); + UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth); + UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure); + UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth); + UPDATE_ESTAT_QSTAT(tx_window_violation_std); + UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf); + UPDATE_ESTAT_QSTAT(tx_frames_deferred); + UPDATE_ESTAT_QSTAT(tx_queue_xoff); + + /* mbuf driver statistics */ + UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts); + UPDATE_ESTAT_QSTAT(mbuf_defrag_failures); + UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed); + UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed); + + /* track the number of allocated mbufs */ + UPDATE_ESTAT_QSTAT(mbuf_alloc_tx); + UPDATE_ESTAT_QSTAT(mbuf_alloc_rx); + } +} + +static uint8_t +bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc) +{ + uint32_t val; + + if (SHMEM2_HAS(sc, edebug_driver_if[1])) { + val = SHMEM2_RD(sc, edebug_driver_if[1]); + + if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) { + return TRUE; + } + } + + return FALSE; +} + +static void +bnx2x_stats_update(struct bnx2x_softc *sc) +{ + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + if (bnx2x_edebug_stats_stopped(sc)) { + return; + } + + if (IS_PF(sc)) { + + bnx2x_storm_stats_update(sc); + bnx2x_hw_stats_post(sc); + bnx2x_storm_stats_post(sc); + DELAY_MS(5); + + if (*stats_comp != DMAE_COMP_VAL) { + return; + } + + if (sc->port.pmf) { + bnx2x_hw_stats_update(sc); + } + + if (bnx2x_storm_stats_update(sc)) { + if (sc->stats_pending++ == 3) { + rte_panic("storm stats not updated for 3 times"); + } + return; + } + } else { + /* + * VF doesn't collect HW statistics, and doesn't get completions, + * performs only update. + */ + bnx2x_storm_stats_update(sc); + } + + bnx2x_drv_stats_update(sc); +} + +static void +bnx2x_port_stats_stop(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + sc->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0); + + if (sc->port.port_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + + if (sc->func_stx) { + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + } else { + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + } + + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = sc->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + if (sc->func_stx) { + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (sc->func_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void +bnx2x_stats_stop(struct bnx2x_softc *sc) +{ + uint8_t update = FALSE; + + bnx2x_stats_comp(sc); + + if (sc->port.pmf) { + update = bnx2x_hw_stats_update(sc) == 0; + } + + update |= bnx2x_storm_stats_update(sc) == 0; + + if (update) { + + if (sc->port.pmf) { + bnx2x_port_stats_stop(sc); + } + + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); + } +} + +static void +bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc) +{ + return; +} + +static const struct { + void (*action)(struct bnx2x_softc *sc); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { + { + /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED }, + /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED }, + /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }, + /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED } + }, + { + /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED }, + /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED }, + /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED }, + /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED } + } +}; + +void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event) +{ + enum bnx2x_stats_state state; + + if (unlikely(sc->panic)) { + return; + } + + state = sc->stats_state; + sc->stats_state = bnx2x_stats_stm[state][event].next_state; + + bnx2x_stats_stm[state][event].action(sc); + + if (event != STATS_EVENT_UPDATE) { + PMD_DRV_LOG(DEBUG, sc, + "state %d -> event %d -> state %d", + state, event, sc->stats_state); + } +} + +static void +bnx2x_port_stats_base_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->port.pmf || !sc->port.port_stx) { + PMD_DRV_LOG(ERR, sc, "BUG!"); + return; + } + + sc->executer_idx = 0; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = (sc->port.port_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); +} + +/* + * This function will prepare the statistics ramrod data the way + * we will only have to increment the statistics counter and + * send the ramrod each time we have to. + */ +static void +bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc) +{ + int i; + int first_queue_query_index; + struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr; + rte_iova_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + + stats_hdr->cmd_num = sc->fw_stats_num; + stats_hdr->drv_stats_counter = 0; + + /* + * The storm_counters struct contains the counters of completed + * statistics requests per storm which are incremented by FW + * each time it completes hadning a statistics ramrod. We will + * check these counters in the timer handler and discard a + * (statistics) ramrod completion. + */ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, storm_counters)); + + stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset)); + stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset)); + + /* + * Prepare the first stats ramrod (will be completed with + * the counters equal to zero) - init counters to somethig different. + */ + memset(&sc->fw_stats_data->storm_counters, 0xff, + sizeof(struct stats_counter)); + + /**** Port FW statistics data ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, port)); + + cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PORT; + /* For port query index is a DON'T CARE */ + cur_query_entry->index = SC_PORT(sc); + /* For port query funcID is a DON'T CARE */ + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + /**** PF FW statistics data ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, pf)); + + cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PF; + /* For PF query index is a DON'T CARE */ + cur_query_entry->index = SC_PORT(sc); + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + /**** Clients' queries ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats)); + + /* + * First queue query index depends whether FCoE offloaded request will + * be included in the ramrod + */ + first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1); + + for (i = 0; i < sc->num_queues; i++) { + cur_query_entry = + &sc->fw_stats_req->query[first_queue_query_index + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]); + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + cur_data_offset += sizeof(struct per_queue_stats); + } +} + +void bnx2x_memset_stats(struct bnx2x_softc *sc) +{ + int i; + + /* function stats */ + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + + memset(&fp->old_tclient, 0, + sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, + sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, + sizeof(fp->old_xclient)); + if (sc->stats_init) { + memset(&fp->eth_q_stats, 0, + sizeof(fp->eth_q_stats)); + memset(&fp->eth_q_stats_old, 0, + sizeof(fp->eth_q_stats_old)); + } + } + + if (sc->stats_init) { + memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old)); + memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old)); + memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old)); + memset(&sc->eth_stats, 0, sizeof(sc->eth_stats)); + memset(&sc->func_stats, 0, sizeof(sc->func_stats)); + } + + sc->stats_state = STATS_STATE_DISABLED; + + if (sc->port.pmf && sc->port.port_stx) + bnx2x_port_stats_base_init(sc); + + /* mark the end of statistics initialization */ + sc->stats_init = false; +} + +void +bnx2x_stats_init(struct bnx2x_softc *sc) +{ + int /*abs*/port = SC_PORT(sc); + int mb_idx = SC_FW_MB_IDX(sc); + int i; + + sc->stats_pending = 0; + sc->executer_idx = 0; + sc->stats_counter = 0; + + sc->stats_init = TRUE; + + /* port and func stats for management */ + if (!BNX2X_NOMCP(sc)) { + sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx); + sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param); + } else { + sc->port.port_stx = 0; + sc->func_stx = 0; + } + + PMD_DRV_LOG(DEBUG, sc, "port_stx 0x%x func_stx 0x%x", + sc->port.port_stx, sc->func_stx); + + /* pmf should retrieve port statistics from SP on a non-init*/ + if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) { + bnx2x_stats_handle(sc, STATS_EVENT_PMF); + } + + port = SC_PORT(sc); + /* port stats */ + memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats)); + sc->port.old_nig_stats.brb_discard = + REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + sc->port.old_nig_stats.brb_truncate = + REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(sc)) { + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); + } + + /* function stats */ + for (i = 0; i < sc->num_queues; i++) { + memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient)); + memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient)); + memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient)); + if (sc->stats_init) { + memset(&sc->fp[i].eth_q_stats, 0, + sizeof(sc->fp[i].eth_q_stats)); + memset(&sc->fp[i].eth_q_stats_old, 0, + sizeof(sc->fp[i].eth_q_stats_old)); + } + } + + /* prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(sc); + + if (sc->stats_init) { + memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old)); + memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old)); + memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old)); + memset(&sc->eth_stats, 0, sizeof(sc->eth_stats)); + memset(&sc->func_stats, 0, sizeof(sc->func_stats)); + + /* Clean SP from previous statistics */ + if (sc->func_stx) { + memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats)); + bnx2x_func_stats_init(sc); + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); + } + } + + sc->stats_state = STATS_STATE_DISABLED; + + if (sc->port.pmf && sc->port.port_stx) { + bnx2x_port_stats_base_init(sc); + } + + /* mark the end of statistics initialization */ + sc->stats_init = FALSE; +} + +void +bnx2x_save_statistics(struct bnx2x_softc *sc) +{ + int i; + + /* save queue statistics */ + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; + + UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); + } + + /* store port firmware statistics */ + if (sc->port.pmf) { + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + + fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi; + fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo; + fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi; + fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo; + + if (IS_MF(sc)) { + UPDATE_FW_STAT_OLD(mac_filter_discard); + UPDATE_FW_STAT_OLD(mf_tag_discard); + UPDATE_FW_STAT_OLD(brb_truncate_discard); + UPDATE_FW_STAT_OLD(mac_discard); + } + } +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h new file mode 100644 index 000000000..635412bdd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_stats.h @@ -0,0 +1,609 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef BNX2X_STATS_H +#define BNX2X_STATS_H + +#include + +struct nig_stats { + uint32_t brb_discard; + uint32_t brb_packet; + uint32_t brb_truncate; + uint32_t flow_ctrl_discard; + uint32_t flow_ctrl_octets; + uint32_t flow_ctrl_packet; + uint32_t mng_discard; + uint32_t mng_octet_inp; + uint32_t mng_octet_out; + uint32_t mng_packet_inp; + uint32_t mng_packet_out; + uint32_t pbf_octets; + uint32_t pbf_packet; + uint32_t safc_inp; + uint32_t egress_mac_pkt0_lo; + uint32_t egress_mac_pkt0_hi; + uint32_t egress_mac_pkt1_lo; + uint32_t egress_mac_pkt1_hi; +}; + + +enum bnx2x_stats_event { + STATS_EVENT_PMF = 0, + STATS_EVENT_LINK_UP, + STATS_EVENT_UPDATE, + STATS_EVENT_STOP, + STATS_EVENT_MAX +}; + +enum bnx2x_stats_state { + STATS_STATE_DISABLED = 0, + STATS_STATE_ENABLED, + STATS_STATE_MAX +}; + +struct bnx2x_eth_stats { + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t error_bytes_received_hi; + uint32_t error_bytes_received_lo; + uint32_t etherstatsoverrsizepkts_hi; + uint32_t etherstatsoverrsizepkts_lo; + uint32_t no_buff_discard_hi; + uint32_t no_buff_discard_lo; + + uint32_t rx_stat_ifhcinbadoctets_hi; + uint32_t rx_stat_ifhcinbadoctets_lo; + uint32_t tx_stat_ifhcoutbadoctets_hi; + uint32_t tx_stat_ifhcoutbadoctets_lo; + uint32_t rx_stat_dot3statsfcserrors_hi; + uint32_t rx_stat_dot3statsfcserrors_lo; + uint32_t rx_stat_dot3statsalignmenterrors_hi; + uint32_t rx_stat_dot3statsalignmenterrors_lo; + uint32_t rx_stat_dot3statscarriersenseerrors_hi; + uint32_t rx_stat_dot3statscarriersenseerrors_lo; + uint32_t rx_stat_falsecarriererrors_hi; + uint32_t rx_stat_falsecarriererrors_lo; + uint32_t rx_stat_etherstatsundersizepkts_hi; + uint32_t rx_stat_etherstatsundersizepkts_lo; + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; + uint32_t rx_stat_etherstatsfragments_hi; + uint32_t rx_stat_etherstatsfragments_lo; + uint32_t rx_stat_etherstatsjabbers_hi; + uint32_t rx_stat_etherstatsjabbers_lo; + uint32_t rx_stat_maccontrolframesreceived_hi; + uint32_t rx_stat_maccontrolframesreceived_lo; + uint32_t rx_stat_bmac_xpf_hi; + uint32_t rx_stat_bmac_xpf_lo; + uint32_t rx_stat_bmac_xcf_hi; + uint32_t rx_stat_bmac_xcf_lo; + uint32_t rx_stat_xoffstateentered_hi; + uint32_t rx_stat_xoffstateentered_lo; + uint32_t rx_stat_xonpauseframesreceived_hi; + uint32_t rx_stat_xonpauseframesreceived_lo; + uint32_t rx_stat_xoffpauseframesreceived_hi; + uint32_t rx_stat_xoffpauseframesreceived_lo; + uint32_t tx_stat_outxonsent_hi; + uint32_t tx_stat_outxonsent_lo; + uint32_t tx_stat_outxoffsent_hi; + uint32_t tx_stat_outxoffsent_lo; + uint32_t tx_stat_flowcontroldone_hi; + uint32_t tx_stat_flowcontroldone_lo; + uint32_t tx_stat_etherstatscollisions_hi; + uint32_t tx_stat_etherstatscollisions_lo; + uint32_t tx_stat_dot3statssinglecollisionframes_hi; + uint32_t tx_stat_dot3statssinglecollisionframes_lo; + uint32_t tx_stat_dot3statsmultiplecollisionframes_hi; + uint32_t tx_stat_dot3statsmultiplecollisionframes_lo; + uint32_t tx_stat_dot3statsdeferredtransmissions_hi; + uint32_t tx_stat_dot3statsdeferredtransmissions_lo; + uint32_t tx_stat_dot3statsexcessivecollisions_hi; + uint32_t tx_stat_dot3statsexcessivecollisions_lo; + uint32_t tx_stat_dot3statslatecollisions_hi; + uint32_t tx_stat_dot3statslatecollisions_lo; + uint32_t tx_stat_etherstatspkts64octets_hi; + uint32_t tx_stat_etherstatspkts64octets_lo; + uint32_t tx_stat_etherstatspkts65octetsto127octets_hi; + uint32_t tx_stat_etherstatspkts65octetsto127octets_lo; + uint32_t tx_stat_etherstatspkts128octetsto255octets_hi; + uint32_t tx_stat_etherstatspkts128octetsto255octets_lo; + uint32_t tx_stat_etherstatspkts256octetsto511octets_hi; + uint32_t tx_stat_etherstatspkts256octetsto511octets_lo; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo; + uint32_t tx_stat_etherstatspktsover1522octets_hi; + uint32_t tx_stat_etherstatspktsover1522octets_lo; + uint32_t tx_stat_bmac_2047_hi; + uint32_t tx_stat_bmac_2047_lo; + uint32_t tx_stat_bmac_4095_hi; + uint32_t tx_stat_bmac_4095_lo; + uint32_t tx_stat_bmac_9216_hi; + uint32_t tx_stat_bmac_9216_lo; + uint32_t tx_stat_bmac_16383_hi; + uint32_t tx_stat_bmac_16383_lo; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo; + uint32_t tx_stat_bmac_ufl_hi; + uint32_t tx_stat_bmac_ufl_lo; + + uint32_t pause_frames_received_hi; + uint32_t pause_frames_received_lo; + uint32_t pause_frames_sent_hi; + uint32_t pause_frames_sent_lo; + + uint32_t etherstatspkts1024octetsto1522octets_hi; + uint32_t etherstatspkts1024octetsto1522octets_lo; + uint32_t etherstatspktsover1522octets_hi; + uint32_t etherstatspktsover1522octets_lo; + + uint32_t brb_drop_hi; + uint32_t brb_drop_lo; + uint32_t brb_truncate_hi; + uint32_t brb_truncate_lo; + + uint32_t mac_filter_discard; + uint32_t mf_tag_discard; + uint32_t brb_truncate_discard; + uint32_t mac_discard; + + uint32_t nig_timer_max; + + /* PFC */ + uint32_t pfc_frames_received_hi; + uint32_t pfc_frames_received_lo; + uint32_t pfc_frames_sent_hi; + uint32_t pfc_frames_sent_lo; + + /* Recovery */ + uint32_t recoverable_error; + uint32_t unrecoverable_error; + + /* src: Clear-on-Read register; Will not survive PMF Migration */ + uint32_t eee_tx_lpi; + + /* receive path driver statistics */ + uint32_t rx_calls; + uint32_t rx_pkts; + uint32_t rx_soft_errors; + uint32_t rx_hw_csum_errors; + uint32_t rx_ofld_frames_csum_ip; + uint32_t rx_ofld_frames_csum_tcp_udp; + uint32_t rx_budget_reached; + + /* tx path driver statistics */ + uint32_t tx_pkts; + uint32_t tx_soft_errors; + uint32_t tx_ofld_frames_csum_ip; + uint32_t tx_ofld_frames_csum_tcp; + uint32_t tx_ofld_frames_csum_udp; + uint32_t tx_encap_failures; + uint32_t tx_hw_queue_full; + uint32_t tx_hw_max_queue_depth; + uint32_t tx_dma_mapping_failure; + uint32_t tx_max_drbr_queue_depth; + uint32_t tx_window_violation_std; + uint32_t tx_chain_lost_mbuf; + uint32_t tx_frames_deferred; + uint32_t tx_queue_xoff; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts; + uint32_t mbuf_defrag_failures; + uint32_t mbuf_rx_bd_alloc_failed; + uint32_t mbuf_rx_bd_mapping_failed; + + /* track the number of allocated mbufs */ + uint32_t mbuf_alloc_tx; + uint32_t mbuf_alloc_rx; +}; + + +struct bnx2x_eth_q_stats { + uint32_t total_unicast_bytes_received_hi; + uint32_t total_unicast_bytes_received_lo; + uint32_t total_broadcast_bytes_received_hi; + uint32_t total_broadcast_bytes_received_lo; + uint32_t total_multicast_bytes_received_hi; + uint32_t total_multicast_bytes_received_lo; + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_unicast_bytes_transmitted_hi; + uint32_t total_unicast_bytes_transmitted_lo; + uint32_t total_broadcast_bytes_transmitted_hi; + uint32_t total_broadcast_bytes_transmitted_lo; + uint32_t total_multicast_bytes_transmitted_hi; + uint32_t total_multicast_bytes_transmitted_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t etherstatsoverrsizepkts_hi; + uint32_t etherstatsoverrsizepkts_lo; + uint32_t no_buff_discard_hi; + uint32_t no_buff_discard_lo; + + uint32_t total_packets_received_checksum_discarded_hi; + uint32_t total_packets_received_checksum_discarded_lo; + uint32_t total_packets_received_ttl0_discarded_hi; + uint32_t total_packets_received_ttl0_discarded_lo; + uint32_t total_transmitted_dropped_packets_error_hi; + uint32_t total_transmitted_dropped_packets_error_lo; + + /* receive path driver statistics */ + uint32_t rx_calls; + uint32_t rx_pkts; + uint32_t rx_soft_errors; + uint32_t rx_hw_csum_errors; + uint32_t rx_ofld_frames_csum_ip; + uint32_t rx_ofld_frames_csum_tcp_udp; + uint32_t rx_budget_reached; + + /* tx path driver statistics */ + uint32_t tx_pkts; + uint32_t tx_soft_errors; + uint32_t tx_ofld_frames_csum_ip; + uint32_t tx_ofld_frames_csum_tcp; + uint32_t tx_ofld_frames_csum_udp; + uint32_t tx_encap_failures; + uint32_t tx_hw_queue_full; + uint32_t tx_hw_max_queue_depth; + uint32_t tx_dma_mapping_failure; + uint32_t tx_max_drbr_queue_depth; + uint32_t tx_window_violation_std; + uint32_t tx_chain_lost_mbuf; + uint32_t tx_frames_deferred; + uint32_t tx_queue_xoff; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts; + uint32_t mbuf_defrag_failures; + uint32_t mbuf_rx_bd_alloc_failed; + uint32_t mbuf_rx_bd_mapping_failed; + + /* track the number of allocated mbufs */ + uint32_t mbuf_alloc_tx; + uint32_t mbuf_alloc_rx; +}; + +struct bnx2x_eth_stats_old { + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; +}; + +struct bnx2x_eth_q_stats_old { + /* Fields to perserve over fw reset*/ + uint32_t total_unicast_bytes_received_hi; + uint32_t total_unicast_bytes_received_lo; + uint32_t total_broadcast_bytes_received_hi; + uint32_t total_broadcast_bytes_received_lo; + uint32_t total_multicast_bytes_received_hi; + uint32_t total_multicast_bytes_received_lo; + uint32_t total_unicast_bytes_transmitted_hi; + uint32_t total_unicast_bytes_transmitted_lo; + uint32_t total_broadcast_bytes_transmitted_hi; + uint32_t total_broadcast_bytes_transmitted_lo; + uint32_t total_multicast_bytes_transmitted_hi; + uint32_t total_multicast_bytes_transmitted_lo; + + /* Fields to perserve last of */ + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + /* receive path driver statistics */ + uint32_t rx_calls_old; + uint32_t rx_pkts_old; + uint32_t rx_soft_errors_old; + uint32_t rx_hw_csum_errors_old; + uint32_t rx_ofld_frames_csum_ip_old; + uint32_t rx_ofld_frames_csum_tcp_udp_old; + uint32_t rx_budget_reached_old; + + /* tx path driver statistics */ + uint32_t tx_pkts_old; + uint32_t tx_soft_errors_old; + uint32_t tx_ofld_frames_csum_ip_old; + uint32_t tx_ofld_frames_csum_tcp_old; + uint32_t tx_ofld_frames_csum_udp_old; + uint32_t tx_encap_failures_old; + uint32_t tx_hw_queue_full_old; + uint32_t tx_hw_max_queue_depth_old; + uint32_t tx_dma_mapping_failure_old; + uint32_t tx_max_drbr_queue_depth_old; + uint32_t tx_window_violation_std_old; + uint32_t tx_chain_lost_mbuf_old; + uint32_t tx_frames_deferred_old; + uint32_t tx_queue_xoff_old; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts_old; + uint32_t mbuf_defrag_failures_old; + uint32_t mbuf_rx_bd_alloc_failed_old; + uint32_t mbuf_rx_bd_mapping_failed_old; + + /* track the number of allocated mbufs */ + int mbuf_alloc_tx_old; + int mbuf_alloc_rx_old; +}; + +struct bnx2x_net_stats_old { + uint32_t rx_dropped; +}; + +struct bnx2x_fw_port_stats_old { + uint32_t pfc_frames_tx_hi; + uint32_t pfc_frames_tx_lo; + uint32_t pfc_frames_rx_hi; + uint32_t pfc_frames_rx_lo; + + uint32_t mac_filter_discard; + uint32_t mf_tag_discard; + uint32_t brb_truncate_discard; + uint32_t mac_discard; +}; + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +#define LE32_0 ((uint32_t) 0) +#define LE16_0 ((uint16_t) 0) + +/* The _force is for cases where high value is 0 */ +#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le32toh(a_hi_le), \ + s_lo, le32toh(a_lo_le)) + +#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le16toh(a_hi_le), \ + s_lo, le16toh(a_lo_le)) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define ADD_STAT64(diff, t) \ + do { \ + ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ + pstats->mac_stx[1].t##_lo, new->diff##_lo); \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT_X(s, t, size) \ + do { \ + diff = le##size##toh(tclient->s) - \ + le##size##toh(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32) + +#define UPDATE_EXTEND_E_TSTAT(s, t, size) \ + do { \ + UPDATE_EXTEND_TSTAT_X(s, t, size); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32toh(uclient->s) - le32toh(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_E_USTAT(s, t) \ + do { \ + UPDATE_EXTEND_USTAT(s, t); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32toh(xclient->s) - le32toh(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_QSTAT(s, t) \ + do { \ + qstats->t##_hi = qstats_old->t##_hi + le32toh(s.hi); \ + qstats->t##_lo = qstats_old->t##_lo + le32toh(s.lo); \ + } while (0) + +#define UPDATE_QSTAT_OLD(f) \ + do { \ + qstats_old->f = qstats->f; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT_64(s) \ + do { \ + ADD_64(estats->s##_hi, qstats->s##_hi, \ + estats->s##_lo, qstats->s##_lo); \ + SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \ + estats->s##_lo, qstats_old->s##_lo_old); \ + qstats_old->s##_hi_old = qstats->s##_hi; \ + qstats_old->s##_lo_old = qstats->s##_lo; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT(s) \ + do { \ + estats->s += qstats->s; \ + estats->s -= qstats_old->s##_old; \ + qstats_old->s##_old = qstats->s; \ + } while (0) + +#define UPDATE_FSTAT_QSTAT(s) \ + do { \ + ADD_64(fstats->s##_hi, qstats->s##_hi, \ + fstats->s##_lo, qstats->s##_lo); \ + SUB_64(fstats->s##_hi, qstats_old->s##_hi, \ + fstats->s##_lo, qstats_old->s##_lo); \ + estats->s##_hi = fstats->s##_hi; \ + estats->s##_lo = fstats->s##_lo; \ + qstats_old->s##_hi = qstats->s##_hi; \ + qstats_old->s##_lo = qstats->s##_lo; \ + } while (0) + +#define UPDATE_FW_STAT(s) \ + do { \ + estats->s = le32toh(tport->s) + fwstats->s; \ + } while (0) + +#define UPDATE_FW_STAT_OLD(f) \ + do { \ + fwstats->f = estats->f; \ + } while (0) + +#define UPDATE_ESTAT(s, t) \ + do { \ + SUB_64(estats->s##_hi, estats_old->t##_hi, \ + estats->s##_lo, estats_old->t##_lo); \ + ADD_64(estats->s##_hi, estats->t##_hi, \ + estats->s##_lo, estats->t##_lo); \ + estats_old->t##_hi = estats->t##_hi; \ + estats_old->t##_lo = estats->t##_lo; \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + uint32_t s_hi = 0; \ + SUB_64(m_hi, s_hi, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32toh(uclient->s) - le32toh(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +struct bnx2x_softc; +void bnx2x_stats_init(struct bnx2x_softc *sc); +void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event); +void bnx2x_save_statistics(struct bnx2x_softc *sc); +void bnx2x_memset_stats(struct bnx2x_softc *sc); + +#endif /* BNX2X_STATS_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c new file mode 100644 index 000000000..097ccfee1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c @@ -0,0 +1,763 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" + +/* calculate the crc in the bulletin board */ +static inline uint32_t +bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull) +{ + uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz; + + return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length); +} + +/* Checks are there mac/channel updates for VF + * returns TRUE if something was updated +*/ +int +bnx2x_check_bull(struct bnx2x_softc *sc) +{ + struct bnx2x_vf_bulletin *bull; + uint8_t tries = 0; + uint16_t old_version = sc->old_bulletin.version; + uint64_t valid_bitmap; + + bull = sc->pf2vf_bulletin; + if (old_version == bull->version) { + return FALSE; + } else { + /* Check the crc until we get the correct data */ + while (tries < BNX2X_VF_BULLETIN_TRIES) { + bull = sc->pf2vf_bulletin; + if (bull->crc == bnx2x_vf_crc(bull)) + break; + + PMD_DRV_LOG(ERR, sc, "bad crc on bulletin board. contained %x computed %x", + bull->crc, bnx2x_vf_crc(bull)); + ++tries; + } + if (tries == BNX2X_VF_BULLETIN_TRIES) { + PMD_DRV_LOG(ERR, sc, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting", + tries); + return FALSE; + } + } + + valid_bitmap = bull->valid_bitmap; + + /* check the mac address and VLAN and allocate memory if valid */ + if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) + rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + if (valid_bitmap & (1 << VLAN_VALID)) + rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN); + + sc->old_bulletin = *bull; + + return TRUE; +} + +/* place a given tlv on the tlv buffer at a given offset */ +static void +bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list, + uint16_t offset, uint16_t type, uint16_t length) +{ + struct channel_tlv *tl = (struct channel_tlv *) + ((unsigned long)tlvs_list + offset); + + tl->type = type; + tl->length = length; +} + +/* Initiliaze header of the first tlv and clear mailbox*/ +static void +bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv, + uint16_t type, uint16_t length) +{ + struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox; + + rte_spinlock_lock(&sc->vf2pf_lock); + + PMD_DRV_LOG(DEBUG, sc, "Preparing %d tlv for sending", type); + + memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); + + bnx2x_add_tlv(sc, &first_tlv->tl, 0, type, length); + + /* Initialize header of the first tlv */ + first_tlv->reply_offset = sizeof(mbox->query); +} + +/* releases the mailbox */ +static void +bnx2x_vf_finalize(struct bnx2x_softc *sc, + __rte_unused struct vf_first_tlv *first_tlv) +{ + PMD_DRV_LOG(DEBUG, sc, "done sending [%d] tlv over vf pf channel", + first_tlv->tl.type); + + rte_spinlock_unlock(&sc->vf2pf_lock); +} + +#define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START +#define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4 +#define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4 +#define BNX2X_VF_CHANNEL_DELAY 100 +#define BNX2X_VF_CHANNEL_TRIES 100 + +static int +bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr) +{ + uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status; + uint8_t i; + + if (*status) { + PMD_DRV_LOG(ERR, sc, "status should be zero before message" + " to pf was sent"); + return -EINVAL; + } + + bnx2x_check_bull(sc); + if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { + PMD_DRV_LOG(ERR, sc, "channel is down. Aborting message sending"); + return -EINVAL; + } + + REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr)); + REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr)); + + /* memory barrier to ensure that FW can read phys_addr */ + wmb(); + + REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1); + + /* Do several attempts until PF completes */ + for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) { + DELAY_MS(BNX2X_VF_CHANNEL_DELAY); + if (*status) + break; + } + + if (!*status) { + PMD_DRV_LOG(ERR, sc, "Response from PF timed out"); + return -EAGAIN; + } + + PMD_DRV_LOG(DEBUG, sc, "Response from PF was received"); + return 0; +} + +static inline uint16_t bnx2x_check_me_flags(uint32_t val) +{ + if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR))) + return ME_REG_VF_VALID; + else + return 0; +} + +#define BNX2X_ME_ANSWER_DELAY 100 +#define BNX2X_ME_ANSWER_TRIES 10 + +static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc, uint32_t *vf_id) +{ + uint32_t val; + uint8_t i = 0; + + while (i <= BNX2X_ME_ANSWER_TRIES) { + val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0)); + if (bnx2x_check_me_flags(val)) { + PMD_DRV_LOG(DEBUG, sc, + "valid register value: 0x%08x", val); + *vf_id = VF_ID(val); + return 0; + } + + DELAY_MS(BNX2X_ME_ANSWER_DELAY); + i++; + } + + PMD_DRV_LOG(ERR, sc, "Invalid register value: 0x%08x", val); + + return -EINVAL; +} + +#define BNX2X_VF_OBTAIN_MAX_TRIES 3 +#define BNX2X_VF_OBTAIN_MAC_FILTERS 1 +#define BNX2X_VF_OBTAIN_MC_FILTERS 10 + +static +int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc) +{ + struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp, + *sc_resp = &sc->acquire_resp; + struct vf_resource_query *res_query; + struct vf_resc *resc; + int res_obtained = false; + int tries = 0; + int rc; + + do { + PMD_DRV_LOG(DEBUG, sc, "trying to get resources"); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + return rc; + + memcpy(sc_resp, resp, sizeof(sc->acquire_resp)); + + tries++; + + /* check PF to request acceptance */ + if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(DEBUG, sc, "resources obtained successfully"); + res_obtained = true; + } else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES && + tries < BNX2X_VF_OBTAIN_MAX_TRIES) { + PMD_DRV_LOG(DEBUG, sc, + "PF cannot allocate requested amount of resources"); + + res_query = &sc->vf2pf_mbox->query[0].acquire.res_query; + resc = &sc_resp->resc; + + /* PF refused our request. Try to decrease request params */ + res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs); + res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs); + res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs); + res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters); + res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters); + res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters); + + memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs)); + } else { + PMD_DRV_LOG(ERR, sc, "Failed to get the requested " + "amount of resources: %d.", + sc_resp->status); + return -EINVAL; + } + } while (!res_obtained); + + return 0; +} + +int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count) +{ + struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire; + uint32_t vf_id; + int rc; + + bnx2x_vf_close(sc); + bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq)); + + if (bnx2x_read_vf_id(sc, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + acq->vf_id = vf_id; + + acq->res_query.num_rxqs = rx_count; + acq->res_query.num_txqs = tx_count; + acq->res_query.num_sbs = sc->igu_sb_cnt; + acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS; + acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS; + + acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr; + + /* Request physical port identifier */ + bnx2x_add_tlv(sc, acq, acq->first_tlv.tl.length, + BNX2X_VF_TLV_PHYS_PORT_ID, + sizeof(struct channel_tlv)); + + bnx2x_add_tlv(sc, acq, + (acq->first_tlv.tl.length + sizeof(struct channel_tlv)), + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* requesting the resources in loop */ + rc = bnx2x_loop_obtain_resources(sc); + if (rc) + goto out; + + struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp; + + sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF); + sc->devinfo.int_block = INT_BLOCK_IGU; + sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE; + sc->devinfo.mf_info.mf_ov = 0; + sc->devinfo.mf_info.mf_mode = 0; + sc->devinfo.flash_size = 0; + + sc->igu_sb_cnt = sc_resp.resc.num_sbs; + sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF; + sc->igu_dsb_id = -1; + sc->max_tx_queues = sc_resp.resc.num_txqs; + sc->max_rx_queues = sc_resp.resc.num_rxqs; + + sc->link_params.chip_id = sc->devinfo.chip_id; + sc->doorbell_size = sc_resp.db_size; + sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG; + + PMD_DRV_LOG(DEBUG, sc, "status block count = %d, base status block = %x", + sc->igu_sb_cnt, sc->igu_base_sb); + strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver)); + + if (rte_is_valid_assigned_ether_addr(&sc_resp.resc.current_mac_addr)) + rte_ether_addr_copy(&sc_resp.resc.current_mac_addr, + (struct rte_ether_addr *)sc->link_params.mac_addr); + else + rte_eth_random_addr(sc->link_params.mac_addr); + +out: + bnx2x_vf_finalize(sc, &acq->first_tlv); + + return rc; +} + +/* Ask PF to release VF's resources */ +void +bnx2x_vf_close(struct bnx2x_softc *sc) +{ + struct vf_release_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + uint32_t vf_id; + int rc; + + query = &sc->vf2pf_mbox->query[0].release; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE, + sizeof(*query)); + + if (bnx2x_read_vf_id(sc, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + query->vf_id = vf_id; + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, "Failed to release VF"); + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); +} + +/* Let PF know the VF status blocks phys_addrs */ +int +bnx2x_vf_init(struct bnx2x_softc *sc) +{ + struct vf_init_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int i, rc; + + PMD_INIT_FUNC_TRACE(sc); + + query = &sc->vf2pf_mbox->query[0].init; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT, + sizeof(*query)); + + FOR_EACH_QUEUE(sc, i) { + query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr); + } + + query->stats_step = sizeof(struct per_queue_stats); + query->stats_addr = sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, sc, "Failed to init VF"); + rc = -EINVAL; + goto out; + } + + PMD_DRV_LOG(DEBUG, sc, "VF was initialized"); +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + return rc; +} + +void +bnx2x_vf_unload(struct bnx2x_softc *sc) +{ + struct vf_close_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + uint32_t vf_id; + int i, rc; + + PMD_INIT_FUNC_TRACE(sc); + + FOR_EACH_QUEUE(sc, i) + bnx2x_vf_teardown_queue(sc, i); + + bnx2x_vf_set_mac(sc, false); + + query = &sc->vf2pf_mbox->query[0].close; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE, + sizeof(*query)); + + if (bnx2x_read_vf_id(sc, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + query->vf_id = vf_id; + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, + "Bad reply from PF for close message"); + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); +} + +static inline uint16_t +bnx2x_vf_q_flags(uint8_t leading) +{ + uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0; + + flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN; + flags |= BNX2X_VF_Q_FLAG_STATS; + flags |= BNX2X_VF_Q_FLAG_VLAN; + + return flags; +} + +static void +bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct vf_rxq_params *rxq_init, uint16_t flags) +{ + struct bnx2x_rx_queue *rxq; + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_DRV_LOG(ERR, sc, "RX queue %d is NULL", fp->index); + return; + } + + rxq_init->rcq_addr = rxq->cq_ring_phys_addr; + rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE; + rxq_init->rxq_addr = rxq->rx_ring_phys_addr; + rxq_init->vf_sb_id = fp->index; + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + rxq_init->mtu = sc->mtu; + rxq_init->buf_sz = fp->rx_buf_size; + rxq_init->flags = flags; + rxq_init->stat_id = -1; + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; +} + +static void +bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct vf_txq_params *txq_init, uint16_t flags) +{ + struct bnx2x_tx_queue *txq; + + txq = sc->tx_queues[fp->index]; + if (!txq) { + PMD_DRV_LOG(ERR, sc, "TX queue %d is NULL", fp->index); + return; + } + + txq_init->txq_addr = txq->tx_ring_phys_addr; + txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; + txq_init->flags = flags; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->vf_sb_id = fp->index; +} + +int +bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading) +{ + struct vf_setup_q_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + uint16_t flags = bnx2x_vf_q_flags(leading); + int rc; + + query = &sc->vf2pf_mbox->query[0].setup_q; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q, + sizeof(*query)); + + query->vf_qid = fp->index; + query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID; + + bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags); + bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags); + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, sc, "Failed to setup VF queue[%d]", + fp->index); + rc = -EINVAL; + } +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + + return rc; +} + +int +bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid) +{ + struct vf_q_op_tlv *query_op; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int rc; + + query_op = &sc->vf2pf_mbox->query[0].q_op; + bnx2x_vf_prep(sc, &query_op->first_tlv, + BNX2X_VF_TLV_TEARDOWN_Q, + sizeof(*query_op)); + + query_op->vf_qid = qid; + + bnx2x_add_tlv(sc, query_op, + query_op->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, + "Bad reply for vf_q %d teardown", qid); + + bnx2x_vf_finalize(sc, &query_op->first_tlv); + + return rc; +} + +int +bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) +{ + struct vf_set_q_filters_tlv *query; + struct vf_common_reply_tlv *reply; + int rc; + + query = &sc->vf2pf_mbox->query[0].set_q_filters; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS, + sizeof(*query)); + + query->vf_qid = sc->fp->index; + query->mac_filters_cnt = 1; + query->flags = BNX2X_VF_MAC_VLAN_CHANGED; + + query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) | + BNX2X_VF_Q_FILTER_DEST_MAC_VALID; + + bnx2x_check_bull(sc); + + rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + reply = &sc->vf2pf_mbox->resp.common_reply; + + while (BNX2X_VF_STATUS_FAILURE == reply->status && + bnx2x_check_bull(sc)) { + /* A new mac was configured by PF for us */ + rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + ETH_ALEN); + rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + ETH_ALEN); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + } + + if (BNX2X_VF_STATUS_SUCCESS != reply->status) { + PMD_DRV_LOG(ERR, sc, "Bad reply from PF for SET MAC message: %d", + reply->status); + rc = -EINVAL; + } +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + + return rc; +} + +int +bnx2x_vf_config_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *params) +{ + struct vf_rss_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int rc; + + query = &sc->vf2pf_mbox->query[0].update_rss; + + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS, + sizeof(*query)); + + /* add list termination tlv */ + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + query->rss_key_size = T_ETH_RSS_KEY; + + rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + + query->rss_result_mask = params->rss_result_mask; + query->rss_flags = params->rss_flags; + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, sc, "Failed to configure RSS"); + rc = -EINVAL; + } +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + + return rc; +} + +int +bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc) +{ + struct vf_set_q_filters_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int rc; + + query = &sc->vf2pf_mbox->query[0].set_q_filters; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS, + sizeof(*query)); + + query->vf_qid = 0; + query->flags = BNX2X_VF_RX_MASK_CHANGED; + + switch (sc->rx_mode) { + case BNX2X_RX_MODE_NONE: /* no Rx */ + query->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; + break; + case BNX2X_RX_MODE_NORMAL: + query->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + case BNX2X_RX_MODE_ALLMULTI: + query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + case BNX2X_RX_MODE_ALLMULTI_PROMISC: + case BNX2X_RX_MODE_PROMISC: + query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; + query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + break; + default: + PMD_DRV_LOG(ERR, sc, "BAD rx mode (%d)", sc->rx_mode); + rc = -EINVAL; + goto out; + } + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, sc, "Failed to set RX mode"); + rc = -EINVAL; + } + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + + return rc; +} + +int +bnx2x_vfpf_set_mcast(struct bnx2x_softc *sc, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct vf_set_q_filters_tlv *query; + struct vf_common_reply_tlv *reply = + &sc->vf2pf_mbox->resp.common_reply; + int rc = 0; + uint32_t i = 0; + query = &sc->vf2pf_mbox->query[0].set_q_filters; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS, + sizeof(*query)); + /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */ + if (mc_addrs_num > VF_MAX_MULTICAST_PER_VF) { + PMD_DRV_LOG(ERR, sc, + "VF supports not more than %d multicast MAC addresses", + VF_MAX_MULTICAST_PER_VF); + + rc = -EINVAL; + goto out; + } + + for (i = 0; i < mc_addrs_num; i++) { + PMD_DRV_LOG(DEBUG, sc, "Adding mcast MAC:%x:%x:%x:%x:%x:%x", + mc_addrs[i].addr_bytes[0], + mc_addrs[i].addr_bytes[1], + mc_addrs[i].addr_bytes[2], + mc_addrs[i].addr_bytes[3], + mc_addrs[i].addr_bytes[4], + mc_addrs[i].addr_bytes[5]); + memcpy(query->multicast[i], mc_addrs[i].addr_bytes, ETH_ALEN); + } + + query->vf_qid = 0; + query->flags = BNX2X_VF_MULTICAST_CHANGED; + query->multicast_cnt = i; + + /* add list termination tlv */ + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc) + goto out; + + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, sc, "Set Rx mode/multicast failed: %d", + reply->status); + rc = -EINVAL; + } + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h new file mode 100644 index 000000000..7aab8b101 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef BNX2X_VFPF_H +#define BNX2X_VFPF_H + +#include "ecore_sp.h" + +#define VLAN_HLEN 4 + +struct vf_resource_query { + uint8_t num_rxqs; + uint8_t num_txqs; + uint8_t num_sbs; + uint8_t num_mac_filters; + uint8_t num_vlan_filters; + uint8_t num_mc_filters; +}; + +#define BNX2X_VF_STATUS_SUCCESS 1 +#define BNX2X_VF_STATUS_FAILURE 2 +#define BNX2X_VF_STATUS_NO_RESOURCES 4 +#define BNX2X_VF_BULLETIN_TRIES 5 + +#define BNX2X_VF_Q_FLAG_CACHE_ALIGN 0x0008 +#define BNX2X_VF_Q_FLAG_STATS 0x0010 +#define BNX2X_VF_Q_FLAG_OV 0x0020 +#define BNX2X_VF_Q_FLAG_VLAN 0x0040 +#define BNX2X_VF_Q_FLAG_COS 0x0080 +#define BNX2X_VF_Q_FLAG_HC 0x0100 +#define BNX2X_VF_Q_FLAG_DHC 0x0200 +#define BNX2X_VF_Q_FLAG_LEADING_RSS 0x0400 + +#define TLV_BUFFER_SIZE 1024 + +#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000 +#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001 +#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002 +#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 +#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 +#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 + +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + uint16_t type; + uint16_t length; +}; + +struct vf_first_tlv { + struct channel_tlv tl; + uint32_t reply_offset; +}; + +struct tlv_buffer_size { + uint8_t tlv_buffer[TLV_BUFFER_SIZE]; +}; + +/* tlv struct for all PF replies except acquire */ +struct vf_common_reply_tlv { + struct channel_tlv tl; + uint8_t status; + uint8_t pad[3]; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + uint32_t pad; +}; + +/* Acquire */ +struct vf_acquire_tlv { + struct vf_first_tlv first_tlv; + + uint8_t vf_id; + uint8_t pad[3]; + + struct vf_resource_query res_query; + + uint64_t bulletin_addr; +}; + +/* simple operation request on queue */ +struct vf_q_op_tlv { + struct vf_first_tlv first_tlv; + uint8_t vf_qid; + uint8_t pad[3]; +}; + +/* receive side scaling tlv */ +struct vf_rss_tlv { + struct vf_first_tlv first_tlv; + uint32_t rss_flags; + uint8_t rss_result_mask; + uint8_t ind_table_size; + uint8_t rss_key_size; + uint8_t pad; + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + uint32_t rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + +struct vf_resc { +#define BNX2X_VF_MAX_QUEUES_PER_VF 16 +#define BNX2X_VF_MAX_SBS_PER_VF 16 + uint16_t hw_sbs[BNX2X_VF_MAX_SBS_PER_VF]; + uint8_t hw_qid[BNX2X_VF_MAX_QUEUES_PER_VF]; + uint8_t num_rxqs; + uint8_t num_txqs; + uint8_t num_sbs; + uint8_t num_mac_filters; + uint8_t num_vlan_filters; + uint8_t num_mc_filters; + uint8_t permanent_mac_addr[ETH_ALEN]; + struct rte_ether_addr current_mac_addr; + uint16_t pf_link_speed; + uint32_t pf_link_supported; +}; + +/* tlv struct holding reply for acquire */ +struct vf_acquire_resp_tlv { + uint16_t type; + uint16_t length; + uint8_t status; + uint8_t pad1[3]; + uint32_t chip_num; + uint8_t pad2[4]; + char fw_ver[32]; + uint16_t db_size; + uint8_t pad3[2]; + struct vf_resc resc; +}; + +/* Init VF */ +struct vf_init_tlv { + struct vf_first_tlv first_tlv; + uint64_t sb_addr[BNX2X_VF_MAX_SBS_PER_VF]; + uint64_t spq_addr; + uint64_t stats_addr; + uint16_t stats_step; + uint32_t flags; + uint32_t pad[2]; +}; + +struct vf_rxq_params { + /* physical addresses */ + uint64_t rcq_addr; + uint64_t rcq_np_addr; + uint64_t rxq_addr; + uint64_t pad1; + + /* sb + hc info */ + uint8_t vf_sb_id; + uint8_t sb_cq_index; + uint16_t hc_rate; /* desired interrupts per sec. */ + /* rx buffer info */ + uint16_t mtu; + uint16_t buf_sz; + uint16_t flags; /* for BNX2X_VF_Q_FLAG_X flags */ + uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */ + + uint8_t pad2[5]; + + uint8_t drop_flags; + uint8_t cache_line_log; /* BNX2X_VF_Q_FLAG_CACHE_ALIGN */ + uint8_t pad3; +}; + +struct vf_txq_params { + /* physical addresses */ + uint64_t txq_addr; + + /* sb + hc info */ + uint8_t vf_sb_id; /* index in hw_sbs[] */ + uint8_t sb_index; /* Index in the SB */ + uint16_t hc_rate; /* desired interrupts per sec. */ + uint32_t flags; /* for BNX2X_VF_Q_FLAG_X flags */ + uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */ + uint8_t traffic_type; /* see in setup_context() */ + uint8_t pad; +}; + +/* Setup Queue */ +struct vf_setup_q_tlv { + struct vf_first_tlv first_tlv; + + struct vf_rxq_params rxq; + struct vf_txq_params txq; + + uint8_t vf_qid; /* index in hw_qid[] */ + uint8_t param_valid; + #define VF_RXQ_VALID 0x01 + #define VF_TXQ_VALID 0x02 + uint8_t pad[2]; +}; + +/* Set Queue Filters */ +struct vf_q_mac_vlan_filter { + uint32_t flags; + #define BNX2X_VF_Q_FILTER_DEST_MAC_VALID 0x01 + #define BNX2X_VF_Q_FILTER_VLAN_TAG_VALID 0x02 + #define BNX2X_VF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + uint8_t mac[ETH_ALEN]; + uint16_t vlan_tag; +}; + + +#define _UP_ETH_ALEN (6) + +/* configure queue filters */ +struct vf_set_q_filters_tlv { + struct vf_first_tlv first_tlv; + + uint32_t flags; + #define BNX2X_VF_MAC_VLAN_CHANGED 0x01 + #define BNX2X_VF_MULTICAST_CHANGED 0x02 + #define BNX2X_VF_RX_MASK_CHANGED 0x04 + + uint8_t vf_qid; /* index in hw_qid[] */ + uint8_t mac_filters_cnt; + uint8_t multicast_cnt; + uint8_t pad; + + #define VF_MAX_MAC_FILTERS 16 + #define VF_MAX_VLAN_FILTERS 16 + #define VF_MAX_FILTERS (VF_MAX_MAC_FILTERS +\ + VF_MAX_VLAN_FILTERS) + struct vf_q_mac_vlan_filter filters[VF_MAX_FILTERS]; + + #define VF_MAX_MULTICAST_PER_VF 32 + uint8_t multicast[VF_MAX_MULTICAST_PER_VF][_UP_ETH_ALEN]; + unsigned long rx_mask; +}; + + +/* close VF (disable VF) */ +struct vf_close_tlv { + struct vf_first_tlv first_tlv; + uint16_t vf_id; /* for debug */ + uint8_t pad[2]; +}; + +/* rlease the VF's acquired resources */ +struct vf_release_tlv { + struct vf_first_tlv first_tlv; + uint16_t vf_id; /* for debug */ + uint8_t pad[2]; +}; + +union query_tlvs { + struct vf_first_tlv first_tlv; + struct vf_acquire_tlv acquire; + struct vf_init_tlv init; + struct vf_close_tlv close; + struct vf_q_op_tlv q_op; + struct vf_setup_q_tlv setup_q; + struct vf_set_q_filters_tlv set_q_filters; + struct vf_release_tlv release; + struct vf_rss_tlv update_rss; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union resp_tlvs { + struct vf_common_reply_tlv common_reply; + struct vf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* struct allocated by VF driver, PF sends updates to VF via bulletin */ +struct bnx2x_vf_bulletin { + uint32_t crc; /* crc of structure to ensure is not in + * mid-update + */ + uint16_t version; + uint16_t length; + + uint64_t valid_bitmap; /* bitmap indicating which fields + * hold valid values + */ + +#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address + * is available for it + */ +#define VLAN_VALID 1 /* when set, the vf should no access the + * vf channel + */ +#define CHANNEL_DOWN 2 /* vf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ + uint8_t mac[ETH_ALEN]; + uint8_t mac_pad[2]; + + uint16_t vlan; + uint8_t vlan_pad[6]; +}; + +#define MAX_TLVS_IN_LIST 50 +enum channel_tlvs { + BNX2X_VF_TLV_NONE, /* ends tlv sequence */ + BNX2X_VF_TLV_ACQUIRE, + BNX2X_VF_TLV_INIT, + BNX2X_VF_TLV_SETUP_Q, + BNX2X_VF_TLV_SET_Q_FILTERS, + BNX2X_VF_TLV_ACTIVATE_Q, + BNX2X_VF_TLV_DEACTIVATE_Q, + BNX2X_VF_TLV_TEARDOWN_Q, + BNX2X_VF_TLV_CLOSE, + BNX2X_VF_TLV_RELEASE, + BNX2X_VF_TLV_UPDATE_RSS_OLD, + BNX2X_VF_TLV_PF_RELEASE_VF, + BNX2X_VF_TLV_LIST_END, + BNX2X_VF_TLV_FLR, + BNX2X_VF_TLV_PF_SET_MAC, + BNX2X_VF_TLV_PF_SET_VLAN, + BNX2X_VF_TLV_UPDATE_RSS, + BNX2X_VF_TLV_PHYS_PORT_ID, + BNX2X_VF_TLV_MAX +}; + +struct bnx2x_vf_mbx_msg { + union query_tlvs query[BNX2X_VF_MAX_QUEUES_PER_VF]; + union resp_tlvs resp; +}; + +int bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid); +int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set); +int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params); +int bnx2x_vfpf_set_mcast(struct bnx2x_softc *sc, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num); + +#endif /* BNX2X_VFPF_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h new file mode 100644 index 000000000..5397a701a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_fw_defs.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2014-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_FW_DEFS_H +#define ECORE_FW_DEFS_H + +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base) +#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[151].base + ((assertListEntry) * IRO[151].m1)) +#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ + (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \ + IRO[157].m2)) +#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ + (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \ + IRO[158].m2)) +#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ + (IRO[163].base + ((funcId) * IRO[163].m1)) +#define CSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[153].base + ((funcId) * IRO[153].m1)) +#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ + (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2)) +#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ + (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \ + * IRO[142].m2) + ((sbId) * IRO[142].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[161].base) +#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[323].base + ((pfId) * IRO[323].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[324].base + ((pfId) * IRO[324].m1)) +#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) +#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ + (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) +#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) +#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[322].base + ((pfId) * IRO[322].m1)) +#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[314].base + ((pfId) * IRO[314].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[313].base + ((pfId) * IRO[313].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[312].base + ((pfId) * IRO[312].m1)) +#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[155].base + ((funcId) * IRO[155].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ + (IRO[146].base + ((pfId) * IRO[146].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ + (IRO[147].base + ((pfId) * IRO[147].m1)) +#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ + (IRO[145].base + ((pfId) * IRO[145].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size) +#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ + (IRO[148].base + ((pfId) * IRO[148].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size) +#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ + (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2)) +#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ + (IRO[137].base + ((sbId) * IRO[137].m1)) +#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ + (IRO[138].base + ((sbId) * IRO[138].m1)) +#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ + (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2)) +#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ + (IRO[136].base + ((sbId) * IRO[136].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size) +#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ + (IRO[141].base + ((sbId) * IRO[141].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size) +#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ + (IRO[159].base + ((vfId) * IRO[159].m1)) +#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ + (IRO[160].base + ((vfId) * IRO[160].m1)) +#define CSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[154].base + ((funcId) * IRO[154].m1)) +#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ + (IRO[207].base + ((pfId) * IRO[207].m1)) +#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) +#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[101].base + ((assertListEntry) * IRO[101].m1)) +#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ + (IRO[205].base + ((pfId) * IRO[205].m1)) +#define TSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[107].base + ((funcId) * IRO[107].m1)) +#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[278].base + ((pfId) * IRO[278].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ + (IRO[279].base + ((pfId) * IRO[279].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ + (IRO[280].base + ((pfId) * IRO[280].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ + (IRO[281].base + ((pfId) * IRO[281].m1)) +#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[277].base + ((pfId) * IRO[277].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[276].base + ((pfId) * IRO[276].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[275].base + ((pfId) * IRO[275].m1)) +#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[274].base + ((pfId) * IRO[274].m1)) +#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) +#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[270].base + ((pfId) * IRO[270].m1)) +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[271].base + ((pfId) * IRO[271].m1)) +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[272].base + ((pfId) * IRO[272].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[273].base + ((pfId) * IRO[273].m1)) +#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ + (IRO[206].base + ((pfId) * IRO[206].m1)) +#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[109].base + ((funcId) * IRO[109].m1)) +#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ + (IRO[223].base + ((pfId) * IRO[223].m1)) +#define TSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[108].base + ((funcId) * IRO[108].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[212].base) +#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) +#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[180].base + ((assertListEntry) * IRO[180].m1)) +#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ + (IRO[187].base + ((portId) * IRO[187].m1)) +#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ + (IRO[325].base + ((pfId) * IRO[325].m1)) +#define USTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[182].base + ((funcId) * IRO[182].m1)) +#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[289].base + ((pfId) * IRO[289].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[290].base + ((pfId) * IRO[290].m1)) +#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[294].base + ((pfId) * IRO[294].m1)) +#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ + (IRO[291].base + ((pfId) * IRO[291].m1)) +#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[287].base + ((pfId) * IRO[287].m1)) +#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[286].base + ((pfId) * IRO[286].m1)) +#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[285].base + ((pfId) * IRO[285].m1)) +#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[288].base + ((pfId) * IRO[288].m1)) +#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ + (IRO[292].base + ((pfId) * IRO[292].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[293].base + ((pfId) * IRO[293].m1)) +#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ + (IRO[186].base + ((pfId) * IRO[186].m1)) +#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[184].base + ((funcId) * IRO[184].m1)) +#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ + (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ + IRO[215].m2)) +#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ + (IRO[216].base + ((qzoneId) * IRO[216].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[213].base) +#define USTORM_TPA_BTR_SIZE (IRO[213].size) +#define USTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[183].base + ((funcId) * IRO[183].m1)) +#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) +#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) +#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) +#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[50].base + ((assertListEntry) * IRO[50].m1)) +#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ + (IRO[43].base + ((portId) * IRO[43].m1)) +#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ + (IRO[45].base + ((pfId) * IRO[45].m1)) +#define XSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[47].base + ((funcId) * IRO[47].m1)) +#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[302].base + ((pfId) * IRO[302].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ + (IRO[305].base + ((pfId) * IRO[305].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ + (IRO[306].base + ((pfId) * IRO[306].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ + (IRO[307].base + ((pfId) * IRO[307].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ + (IRO[308].base + ((pfId) * IRO[308].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ + (IRO[309].base + ((pfId) * IRO[309].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ + (IRO[310].base + ((pfId) * IRO[310].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[311].base + ((pfId) * IRO[311].m1)) +#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[301].base + ((pfId) * IRO[301].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[300].base + ((pfId) * IRO[300].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[299].base + ((pfId) * IRO[299].m1)) +#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[304].base + ((pfId) * IRO[304].m1)) +#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ + (IRO[303].base + ((pfId) * IRO[303].m1)) +#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ + (IRO[298].base + ((pfId) * IRO[298].m1)) +#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[297].base + ((pfId) * IRO[297].m1)) +#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ + (IRO[296].base + ((pfId) * IRO[296].m1)) +#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ + (IRO[295].base + ((pfId) * IRO[295].m1)) +#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ + (IRO[44].base + ((pfId) * IRO[44].m1)) +#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[49].base + ((funcId) * IRO[49].m1)) +#define XSTORM_SPQ_DATA_OFFSET(funcId) \ + (IRO[32].base + ((funcId) * IRO[32].m1)) +#define XSTORM_SPQ_DATA_SIZE (IRO[32].size) +#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \ + (IRO[30].base + ((funcId) * IRO[30].m1)) +#define XSTORM_SPQ_PROD_OFFSET(funcId) \ + (IRO[31].base + ((funcId) * IRO[31].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ + (IRO[217].base + ((portId) * IRO[217].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ + (IRO[218].base + ((portId) * IRO[218].m1)) +#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ + (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ + IRO[220].m2)) +#define XSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[48].base + ((funcId) * IRO[48].m1)) +#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 + +/* eth hsi version */ +#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2) + + +/* Ethernet Ring parameters */ +#define X_ETH_LOCAL_RING_SIZE 13 +#define FIRST_BD_IN_PKT 0 +#define PARSE_BD_INDEX 1 +#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) +#define U_ETH_NUM_OF_SGES_TO_FETCH 8 +#define U_ETH_MAX_SGES_FOR_PACKET 3 + +/* Rx ring params */ +#define U_ETH_LOCAL_BD_RING_SIZE 8 +#define U_ETH_LOCAL_SGE_RING_SIZE 10 +#define U_ETH_SGL_SIZE 8 + /* The fw will padd the buffer with this value, so the IP header \ + will be align to 4 Byte */ +#define IP_HEADER_ALIGNMENT_PADDING 2 + +#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ + (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) + +#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8)) +#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) +#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) + +#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) +#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) +#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) + +#define U_ETH_UNDEFINED_Q 0xFF + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY 10 +#define ETH_NUM_OF_RSS_ENGINES_E2 72 + +#define FILTER_RULES_COUNT 16 +#define MULTICAST_RULES_COUNT 16 +#define CLASSIFY_RULES_COUNT 16 + +/*The CRC32 seed, that is used for the hash(reduction) multicast address */ +#define ETH_CRC32_HASH_SEED 0x00000000 + +#define ETH_CRC32_HASH_BIT_SIZE (8) +#define ETH_CRC32_HASH_MASK EVAL((1< + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2014-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_HSI_H +#define ECORE_HSI_H + +#include "ecore_fw_defs.h" +#include "ecore_mfw_req.h" +#include "bnx2x_osal.h" + +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e + +struct license_key { + uint32_t reserved[6]; + + uint32_t max_iscsi_conn; +#define ECORE_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF +#define ECORE_MAX_ISCSI_TRGT_CONN_SHIFT 0 +#define ECORE_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 +#define ECORE_MAX_ISCSI_INIT_CONN_SHIFT 16 + + uint32_t reserved_a; + + uint32_t max_fcoe_conn; +#define ECORE_MAX_FCOE_TRGT_CONN_MASK 0xFFFF +#define ECORE_MAX_FCOE_TRGT_CONN_SHIFT 0 +#define ECORE_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 +#define ECORE_MAX_FCOE_INIT_CONN_SHIFT 16 + + uint32_t reserved_b[4]; +}; + + + +/**************************************************************************** + * Shared HW configuration * + ****************************************************************************/ +#define PIN_CFG_NA 0x00000000 +#define PIN_CFG_GPIO0_P0 0x00000001 +#define PIN_CFG_GPIO1_P0 0x00000002 +#define PIN_CFG_GPIO2_P0 0x00000003 +#define PIN_CFG_GPIO3_P0 0x00000004 +#define PIN_CFG_GPIO0_P1 0x00000005 +#define PIN_CFG_GPIO1_P1 0x00000006 +#define PIN_CFG_GPIO2_P1 0x00000007 +#define PIN_CFG_GPIO3_P1 0x00000008 +#define PIN_CFG_EPIO0 0x00000009 +#define PIN_CFG_EPIO1 0x0000000a +#define PIN_CFG_EPIO2 0x0000000b +#define PIN_CFG_EPIO3 0x0000000c +#define PIN_CFG_EPIO4 0x0000000d +#define PIN_CFG_EPIO5 0x0000000e +#define PIN_CFG_EPIO6 0x0000000f +#define PIN_CFG_EPIO7 0x00000010 +#define PIN_CFG_EPIO8 0x00000011 +#define PIN_CFG_EPIO9 0x00000012 +#define PIN_CFG_EPIO10 0x00000013 +#define PIN_CFG_EPIO11 0x00000014 +#define PIN_CFG_EPIO12 0x00000015 +#define PIN_CFG_EPIO13 0x00000016 +#define PIN_CFG_EPIO14 0x00000017 +#define PIN_CFG_EPIO15 0x00000018 +#define PIN_CFG_EPIO16 0x00000019 +#define PIN_CFG_EPIO17 0x0000001a +#define PIN_CFG_EPIO18 0x0000001b +#define PIN_CFG_EPIO19 0x0000001c +#define PIN_CFG_EPIO20 0x0000001d +#define PIN_CFG_EPIO21 0x0000001e +#define PIN_CFG_EPIO22 0x0000001f +#define PIN_CFG_EPIO23 0x00000020 +#define PIN_CFG_EPIO24 0x00000021 +#define PIN_CFG_EPIO25 0x00000022 +#define PIN_CFG_EPIO26 0x00000023 +#define PIN_CFG_EPIO27 0x00000024 +#define PIN_CFG_EPIO28 0x00000025 +#define PIN_CFG_EPIO29 0x00000026 +#define PIN_CFG_EPIO30 0x00000027 +#define PIN_CFG_EPIO31 0x00000028 + +/* EPIO definition */ +#define EPIO_CFG_NA 0x00000000 +#define EPIO_CFG_EPIO0 0x00000001 +#define EPIO_CFG_EPIO1 0x00000002 +#define EPIO_CFG_EPIO2 0x00000003 +#define EPIO_CFG_EPIO3 0x00000004 +#define EPIO_CFG_EPIO4 0x00000005 +#define EPIO_CFG_EPIO5 0x00000006 +#define EPIO_CFG_EPIO6 0x00000007 +#define EPIO_CFG_EPIO7 0x00000008 +#define EPIO_CFG_EPIO8 0x00000009 +#define EPIO_CFG_EPIO9 0x0000000a +#define EPIO_CFG_EPIO10 0x0000000b +#define EPIO_CFG_EPIO11 0x0000000c +#define EPIO_CFG_EPIO12 0x0000000d +#define EPIO_CFG_EPIO13 0x0000000e +#define EPIO_CFG_EPIO14 0x0000000f +#define EPIO_CFG_EPIO15 0x00000010 +#define EPIO_CFG_EPIO16 0x00000011 +#define EPIO_CFG_EPIO17 0x00000012 +#define EPIO_CFG_EPIO18 0x00000013 +#define EPIO_CFG_EPIO19 0x00000014 +#define EPIO_CFG_EPIO20 0x00000015 +#define EPIO_CFG_EPIO21 0x00000016 +#define EPIO_CFG_EPIO22 0x00000017 +#define EPIO_CFG_EPIO23 0x00000018 +#define EPIO_CFG_EPIO24 0x00000019 +#define EPIO_CFG_EPIO25 0x0000001a +#define EPIO_CFG_EPIO26 0x0000001b +#define EPIO_CFG_EPIO27 0x0000001c +#define EPIO_CFG_EPIO28 0x0000001d +#define EPIO_CFG_EPIO29 0x0000001e +#define EPIO_CFG_EPIO30 0x0000001f +#define EPIO_CFG_EPIO31 0x00000020 + +struct mac_addr { + uint32_t upper; + uint32_t lower; +}; + + +struct shared_hw_cfg { /* NVRAM Offset */ + /* Up to 16 bytes of NULL-terminated string */ + uint8_t part_num[16]; /* 0x104 */ + + uint32_t config; /* 0x114 */ + #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 + #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 + #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 + #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 + + #define SHARED_HW_CFG_PORT_SWAP 0x00000004 + + #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 + + #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 + + #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 + #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 + /* Whatever MFW found in NVM + (if multiple found, priority order is: NC-SI, UMP, IPMI) */ + #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 + #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 + #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 + #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 + /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 + /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 + /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 + + /* Adjust the PCIe G2 Tx amplitude driver for all Tx lanes. For + backwards compatibility, value of 0 is disabling this feature. + That means that though 0 is a valid value, it cannot be + configured. */ + #define SHARED_HW_CFG_G2_TX_DRIVE_MASK 0x0000F000 + #define SHARED_HW_CFG_G2_TX_DRIVE_SHIFT 12 + + #define SHARED_HW_CFG_LED_MODE_MASK 0x000F0000 + #define SHARED_HW_CFG_LED_MODE_SHIFT 16 + #define SHARED_HW_CFG_LED_MAC1 0x00000000 + #define SHARED_HW_CFG_LED_PHY1 0x00010000 + #define SHARED_HW_CFG_LED_PHY2 0x00020000 + #define SHARED_HW_CFG_LED_PHY3 0x00030000 + #define SHARED_HW_CFG_LED_MAC2 0x00040000 + #define SHARED_HW_CFG_LED_PHY4 0x00050000 + #define SHARED_HW_CFG_LED_PHY5 0x00060000 + #define SHARED_HW_CFG_LED_PHY6 0x00070000 + #define SHARED_HW_CFG_LED_MAC3 0x00080000 + #define SHARED_HW_CFG_LED_PHY7 0x00090000 + #define SHARED_HW_CFG_LED_PHY9 0x000a0000 + #define SHARED_HW_CFG_LED_PHY11 0x000b0000 + #define SHARED_HW_CFG_LED_MAC4 0x000c0000 + #define SHARED_HW_CFG_LED_PHY8 0x000d0000 + #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 + + #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 + #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 + #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 + + #define SHARED_HW_CFG_ATC_MASK 0x80000000 + #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 + #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 + + uint32_t config2; /* 0x118 */ + + #define SHARED_HW_CFG_PCIE_GEN2_MASK 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_SHIFT 8 + #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + + #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 + #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 + + #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 + + + /* Output low when PERST is asserted */ + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 + + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 + + /* The fan failure mechanism is usually related to the PHY type + since the power consumption of the board is determined by the PHY. + Currently, fan is required for most designs with SFX7101, BNX2X8727 + and BNX2X8481. If a fan is not required for a board which uses one + of those PHYs, this field should be set to "Disabled". If a fan is + required for a different PHY type, this option should be set to + "Enabled". The fan failure indication is expected on SPIO5 */ + #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 + #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 + #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 + #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 + + /* ASPM Power Management support */ + #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 + #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 + + /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register + tl_control_0 (register 0x2800) */ + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 + + + /* Set the MDC/MDIO access for the first external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 + + /* Set the MDC/MDIO access for the second external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 + + /* Max number of PF MSIX vectors */ + uint32_t config_3; /* 0x11C */ + #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_MASK 0x0000007F + #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_SHIFT 0 + + /* This field extends the mf mode chosen in nvm cfg #73 (as we ran + * out of bits) + */ + #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100 + + uint32_t ump_nc_si_config; /* 0x120 */ + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 + + /* Reserved bits: 226-230 */ + + /* The output pin template BSC_SEL which selects the I2C for this + port in the I2C Mux */ + uint32_t board; /* 0x124 */ + #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F + #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 + + #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 + #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 + /* Use the PIN_CFG_XXX defines on top */ + #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000 + #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 + + #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000 + #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 + + #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000 + #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 + + uint32_t wc_lane_config; /* 0x128 */ + #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b + #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_31200213 0x000027d8 + #define SHARED_HW_CFG_LANE_SWAP_CFG_02133120 0x0000d827 + #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b + #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 + #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 + #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 + #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 + #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 + #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 + #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 + + /* Selects the port layout of the board */ + #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01_SIG 0x06000000 +}; + + +/**************************************************************************** + * Port HW configuration * + ****************************************************************************/ +struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ + + uint32_t pci_id; + #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_PCI_DEVICE_ID_SHIFT 0 + + #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_PCI_VENDOR_ID_SHIFT 16 + + uint32_t pci_sub_id; + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_SHIFT 0 + + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_SHIFT 16 + + uint32_t power_dissipated; + #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000FF + #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000FF00 + #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00FF0000 + #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xFF000000 + #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 + + uint32_t power_consumed; + #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000FF + #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000FF00 + #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00FF0000 + #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xFF000000 + #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 + + uint32_t mac_upper; + uint32_t mac_lower; /* 0x140 */ + #define PORT_HW_CFG_UPPERMAC_MASK 0x0000FFFF + #define PORT_HW_CFG_UPPERMAC_SHIFT 0 + + + uint32_t iscsi_mac_upper; /* Upper 16 bits are always zeroes */ + uint32_t iscsi_mac_lower; + + uint32_t rdma_mac_upper; /* Upper 16 bits are always zeroes */ + uint32_t rdma_mac_lower; + + uint32_t serdes_config; + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 + + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000 + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 + + + /* Default values: 2P-64, 4P-32 */ + uint32_t reserved; + + uint32_t vf_config; /* 0x15C */ + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 + + uint32_t mf_pci_id; /* 0x160 */ + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 + + /* Controls the TX laser of the SFP+ module */ + uint32_t sfp_ctrl; /* 0x164 */ + #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_TX_LASER_SHIFT 0 + #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 + #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 + #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 + #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 + #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ + #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 + #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + + /* The output pin TX_DIS that controls the TX laser of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + uint32_t e3_sfp_ctrl; /* 0x168 */ + #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 + + /* The output pin for SFPP_TYPE which turns on the Fault module LED */ + #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 + + /* The input pin MOD_ABS that indicates whether SFP+ module is + present or not. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 + + /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 + #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 + + /* + * The input pin which signals module transmit fault. Use the + * PIN_CFG_XXX defines on top + */ + uint32_t e3_cmn_pin_cfg; /* 0x16C */ + #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 + + /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on + top */ + #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 + + /* + * The output pin which powers down the PHY. Use the PIN_CFG_XXX + * defines on top + */ + #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 + + /* The output pin values BSC_SEL which selects the I2C for this port + in the I2C Mux */ + #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 + #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 + + + /* + * The input pin I_FAULT which indicate over-current has occurred. + * Use the PIN_CFG_XXX defines on top + */ + uint32_t e3_cmn_pin_cfg1; /* 0x170 */ + #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF + #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 + + /* pause on host ring */ + uint32_t generic_features; /* 0x174 */ + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 + + /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2 + * LOM recommended and tested value is 0xBEB2. Using a different + * value means using a value not tested by BRCM + */ + uint32_t sfi_tap_values; /* 0x178 */ + #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF + #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0 + + /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested + * value is 0x2. LOM recommended and tested value is 0x2. Using a + * different value means using a value not tested by BRCM + */ + #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 + #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + /* Set non-default values for TXFIR in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000 + #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20 + + /* Set non-default values for IPREDRIVER in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000 + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24 + + /* Set non-default values for POST2 in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000 + #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28 + + uint32_t reserved0[5]; /* 0x17c */ + + uint32_t aeu_int_mask; /* 0x190 */ + + uint32_t media_type; /* 0x194 */ + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 + + /* 4 times 16 bits for all 4 lanes. In case external PHY is present + (not direct mode), those values will not take effect on the 4 XGXS + lanes. For some external PHYs (such as 8706 and 8726) the values + will be used to configure the external PHY in those cases, not + all 4 values are needed. */ + uint16_t xgxs_config_rx[4]; /* 0x198 */ + uint16_t xgxs_config_tx[4]; /* 0x1A0 */ + + + /* For storing FCOE mac on shared memory */ + uint32_t fcoe_fip_mac_upper; + #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 + uint32_t fcoe_fip_mac_lower; + + uint32_t fcoe_wwn_port_name_upper; + uint32_t fcoe_wwn_port_name_lower; + + uint32_t fcoe_wwn_node_name_upper; + uint32_t fcoe_wwn_node_name_lower; + + /* wwpn for npiv enabled */ + uint32_t wwpn_for_npiv_config; /* 0x1C0 */ + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_MASK 0x00000001 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_SHIFT 0 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_DISABLED 0x00000000 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_ENABLED 0x00000001 + + /* wwpn for npiv valid addresses */ + uint32_t wwpn_for_npiv_valid_addresses; /* 0x1C4 */ + #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_MASK 0x0000FFFF + #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_SHIFT 0 + + struct mac_addr wwpn_for_niv_macs[16]; + + /* Reserved bits: 2272-2336 For storing FCOE mac on shared memory */ + uint32_t Reserved1[14]; + + uint32_t pf_allocation; /* 0x280 */ + /* number of vfs per PF, if 0 - sriov disabled */ + #define PORT_HW_CFG_NUMBER_OF_VFS_MASK 0x000000FF + #define PORT_HW_CFG_NUMBER_OF_VFS_SHIFT 0 + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), + 84833 only */ + uint32_t xgbt_phy_cfg; /* 0x284 */ + #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF + #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 + + uint32_t default_cfg; /* 0x288 */ + #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 + #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 + #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 + #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 + #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + + #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C + #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 + #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 + #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 + #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + + #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 + #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 + #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 + #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 + #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + + #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 + #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 + #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 + #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 + #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* When KR link is required to be set to force which is not + KR-compliant, this parameter determine what is the trigger for it. + When GPIO is selected, low input will force the speed. Currently + default speed is 1G. In the future, it may be widen to select the + forced speed in with another parameter. Note when force-1G is + enabled, it override option 56: Link Speed option. */ + #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 + #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 + #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 + #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + + /* Enable BAM on KR */ + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + + /* Enable Common Mode Sense */ + #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 + #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + + /* Determine the Serdes electrical interface */ + #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 + #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 + #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 + #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 + #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 + #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 + + /* SFP+ main TAP and post TAP volumes */ + #define PORT_HW_CFG_TAP_LEVELS_MASK 0x70000000 + #define PORT_HW_CFG_TAP_LEVELS_SHIFT 28 + #define PORT_HW_CFG_TAP_LEVELS_POST_15_MAIN_43 0x00000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_14_MAIN_44 0x10000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_13_MAIN_45 0x20000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_12_MAIN_46 0x30000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_11_MAIN_47 0x40000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_10_MAIN_48 0x50000000 + + uint32_t speed_capability_mask2; /* 0x28C */ + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 + + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 + + + /* In the case where two media types (e.g. copper and fiber) are + present and electrically active at the same time, PHY Selection + will determine which of the two PHYs will be designated as the + Active PHY and used for a connection to the network. */ + uint32_t multi_phy_config; /* 0x290 */ + #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 + #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 + #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 + + /* When enabled, all second phy nvram parameters will be swapped + with the first phy parameters */ + #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 + #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 + #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 + #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 + + + /* Address of the second external phy */ + uint32_t external_phy_config2; /* 0x294 */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 + + /* The second XGXS external PHY type */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84858 0x00001200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 + + + /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as + 8706, 8726 and 8727) not all 4 values are needed. */ + uint16_t xgxs_config2_rx[4]; /* 0x296 */ + uint16_t xgxs_config2_tx[4]; /* 0x2A0 */ + + uint32_t lane_config; + #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + /* AN and forced */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000C000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 + + /* Indicate whether to swap the external phy polarity */ + #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + + + uint32_t external_phy_config; + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858 0x00001200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 + + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00FF0000 + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 + + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xFF000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BNX2X5482 0x01000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 + + uint32_t speed_capability_mask; + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 + + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 + + /* A place to hold the original MAC address as a backup */ + uint32_t backup_mac_upper; /* 0x2B4 */ + uint32_t backup_mac_lower; /* 0x2B8 */ + +}; + + +/**************************************************************************** + * Shared Feature configuration * + ****************************************************************************/ +struct shared_feat_cfg { /* NVRAM Offset */ + + uint32_t config; /* 0x450 */ + #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + + /* Use NVRAM values instead of HW default values */ + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ + 0x00000002 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ + 0x00000000 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ + 0x00000002 + + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 + + #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 + #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 + + /* Override the OTP back to single function mode. When using GPIO, + high means only SF, 0 is according to CLP configuration */ + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 + + /* Act as if the FCoE license is invalid */ + #define SHARED_FEAT_CFG_PREVENT_FCOE 0x00001000 + + /* Force FLR capability to all ports */ + #define SHARED_FEAT_CFG_FORCE_FLR_CAPABILITY 0x00002000 + + /* Act as if the iSCSI license is invalid */ + #define SHARED_FEAT_CFG_PREVENT_ISCSI_MASK 0x00004000 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_SHIFT 14 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_DISABLED 0x00000000 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_ENABLED 0x00004000 + + /* The interval in seconds between sending LLDP packets. Set to zero + to disable the feature */ + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00FF0000 + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 + + /* The assigned device type ID for LLDP usage */ + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xFF000000 + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 + +}; + + +/**************************************************************************** + * Port Feature configuration * + ****************************************************************************/ +struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ + + uint32_t config; + #define PORT_FEAT_CFG_BAR1_SIZE_MASK 0x0000000F + #define PORT_FEAT_CFG_BAR1_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_BAR1_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_BAR1_SIZE_64K 0x00000001 + #define PORT_FEAT_CFG_BAR1_SIZE_128K 0x00000002 + #define PORT_FEAT_CFG_BAR1_SIZE_256K 0x00000003 + #define PORT_FEAT_CFG_BAR1_SIZE_512K 0x00000004 + #define PORT_FEAT_CFG_BAR1_SIZE_1M 0x00000005 + #define PORT_FEAT_CFG_BAR1_SIZE_2M 0x00000006 + #define PORT_FEAT_CFG_BAR1_SIZE_4M 0x00000007 + #define PORT_FEAT_CFG_BAR1_SIZE_8M 0x00000008 + #define PORT_FEAT_CFG_BAR1_SIZE_16M 0x00000009 + #define PORT_FEAT_CFG_BAR1_SIZE_32M 0x0000000a + #define PORT_FEAT_CFG_BAR1_SIZE_64M 0x0000000b + #define PORT_FEAT_CFG_BAR1_SIZE_128M 0x0000000c + #define PORT_FEAT_CFG_BAR1_SIZE_256M 0x0000000d + #define PORT_FEAT_CFG_BAR1_SIZE_512M 0x0000000e + #define PORT_FEAT_CFG_BAR1_SIZE_1G 0x0000000f + #define PORT_FEAT_CFG_BAR2_SIZE_MASK 0x000000F0 + #define PORT_FEAT_CFG_BAR2_SIZE_SHIFT 4 + #define PORT_FEAT_CFG_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_BAR2_SIZE_64K 0x00000010 + #define PORT_FEAT_CFG_BAR2_SIZE_128K 0x00000020 + #define PORT_FEAT_CFG_BAR2_SIZE_256K 0x00000030 + #define PORT_FEAT_CFG_BAR2_SIZE_512K 0x00000040 + #define PORT_FEAT_CFG_BAR2_SIZE_1M 0x00000050 + #define PORT_FEAT_CFG_BAR2_SIZE_2M 0x00000060 + #define PORT_FEAT_CFG_BAR2_SIZE_4M 0x00000070 + #define PORT_FEAT_CFG_BAR2_SIZE_8M 0x00000080 + #define PORT_FEAT_CFG_BAR2_SIZE_16M 0x00000090 + #define PORT_FEAT_CFG_BAR2_SIZE_32M 0x000000a0 + #define PORT_FEAT_CFG_BAR2_SIZE_64M 0x000000b0 + #define PORT_FEAT_CFG_BAR2_SIZE_128M 0x000000c0 + #define PORT_FEAT_CFG_BAR2_SIZE_256M 0x000000d0 + #define PORT_FEAT_CFG_BAR2_SIZE_512M 0x000000e0 + #define PORT_FEAT_CFG_BAR2_SIZE_1G 0x000000f0 + + #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 + #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 + #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + + #define PORT_FEAT_CFG_AUTOGREEEN_MASK 0x00000200 + #define PORT_FEAT_CFG_AUTOGREEEN_SHIFT 9 + #define PORT_FEAT_CFG_AUTOGREEEN_DISABLED 0x00000000 + #define PORT_FEAT_CFG_AUTOGREEEN_ENABLED 0x00000200 + + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_SHIFT 10 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_DEFAULT 0x00000000 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_BOTH 0x00000c00 + + #define PORT_FEAT_CFG_DCBX_SEL_MASK 0x00003000 + #define PORT_FEAT_CFG_DCBX_SEL_SHIFT 12 + #define PORT_FEAT_CFG_DCBX_SEL_CEE 0x00000000 + #define PORT_FEAT_CFG_DCBX_SEL_IEEE 0x00001000 + #define PORT_FEAT_CFG_DCBX_SEL_AUTO 0x00002000 + + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 + #define PORT_FEATURE_EN_SIZE_SHIFT 24 + #define PORT_FEATURE_WOL_ENABLED 0x01000000 + #define PORT_FEATURE_MBA_ENABLED 0x02000000 + #define PORT_FEATURE_MFW_ENABLED 0x04000000 + + /* Advertise expansion ROM even if MBA is disabled */ + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 + + /* Check the optic vendor via i2c against a list of approved modules + in a separate nvram image */ + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ + 0x00000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ + 0x20000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 + + uint32_t wol_config; + /* Default is used when driver sets to "auto" mode */ + #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 + + uint32_t mba_config; + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 + + #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 + #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 + + #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 + #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 + + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000FF000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00F00000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 + #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3C000000 + #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 + #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10M_HALF 0x04000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10M_FULL 0x08000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100M_HALF 0x0c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100M_FULL 0x10000000 + #define PORT_FEATURE_MBA_LINK_SPEED_1G 0x14000000 + #define PORT_FEATURE_MBA_LINK_SPEED_2_5G 0x18000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10G 0x1c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000 + + /* Secondary MBA configuration, + * see mba_config for the fileds defination. + */ + uint32_t mba_config2; + + uint32_t mba_vlan_cfg; + #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000FFFF + #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 + #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 + #define PORT_FEATUTE_BOFM_CFGD_EN 0x00020000 + #define PORT_FEATURE_BOFM_CFGD_FTGT 0x00040000 + #define PORT_FEATURE_BOFM_CFGD_VEN 0x00080000 + + /* Secondary MBA configuration, + * see mba_vlan_cfg for the fileds defination. + */ + uint32_t mba_vlan_cfg2; + + uint32_t smbus_config; + #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe + #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 + + uint32_t vf_config; + #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000F + #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f + + uint32_t link_config; /* Used as HW defaults for the driver */ + + #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 + #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 + #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 + #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 + #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 + #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 + #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_RX 0x00000500 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_TX 0x00000600 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_BOTH 0x00000700 + + #define PORT_FEATURE_LINK_SPEED_MASK 0x000F0000 + #define PORT_FEATURE_LINK_SPEED_SHIFT 16 + #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00010000 + #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00020000 + #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 + #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 + #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 + #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 + #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 + #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 + + #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 + #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 + /* (forced) low speed switch (< 10G) */ + #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 + /* (forced) high speed switch (>= 10G) */ + #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 + #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 + #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 + + + /* The default for MCP link configuration, + uses the same defines as link_config */ + uint32_t mfw_wol_link_cfg; + + /* The default for the driver of the second external phy, + uses the same defines as link_config */ + uint32_t link_config2; /* 0x47C */ + + /* The default for MCP of the second external phy, + uses the same defines as link_config */ + uint32_t mfw_wol_link_cfg2; /* 0x480 */ + + + /* EEE power saving mode */ + uint32_t eee_power_mode; /* 0x484 */ + #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF + #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0 + #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001 + #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002 + #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003 + + + uint32_t Reserved2[16]; /* 0x48C */ +}; + +/**************************************************************************** + * Device Information * + ****************************************************************************/ +struct shm_dev_info { /* size */ + + uint32_t bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ + + struct shared_hw_cfg shared_hw_config; /* 40 */ + + struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ + + struct shared_feat_cfg shared_feature_config; /* 4 */ + + struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ + +}; + +struct extended_dev_info_shared_cfg { /* NVRAM OFFSET */ + + /* Threshold in celcius to start using the fan */ + uint32_t temperature_monitor1; /* 0x4000 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_MASK 0x0000007F + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_SHIFT 0 + + /* Threshold in celcius to shut down the board */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_MASK 0x00007F00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_SHIFT 8 + + /* EPIO of fan temperature status */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO0 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO1 0x00020000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO2 0x00030000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO3 0x00040000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO4 0x00050000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO5 0x00060000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO6 0x00070000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO7 0x00080000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO8 0x00090000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO9 0x000a0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO10 0x000b0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO11 0x000c0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO12 0x000d0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO13 0x000e0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO14 0x000f0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO15 0x00100000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO16 0x00110000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO17 0x00120000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO18 0x00130000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO19 0x00140000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO20 0x00150000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO21 0x00160000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO22 0x00170000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO23 0x00180000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO24 0x00190000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO25 0x001a0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO26 0x001b0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO27 0x001c0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO28 0x001d0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO29 0x001e0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO30 0x001f0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO31 0x00200000 + + /* EPIO of shut down temperature status */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_SHIFT 24 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO0 0x01000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO1 0x02000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO2 0x03000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO3 0x04000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO4 0x05000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO5 0x06000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO6 0x07000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO7 0x08000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO8 0x09000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO9 0x0a000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO10 0x0b000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO11 0x0c000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO12 0x0d000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO13 0x0e000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO14 0x0f000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO15 0x10000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO16 0x11000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO17 0x12000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO18 0x13000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO19 0x14000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO20 0x15000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO21 0x16000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO22 0x17000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO23 0x18000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO24 0x19000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO25 0x1a000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO26 0x1b000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO27 0x1c000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO28 0x1d000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO29 0x1e000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO30 0x1f000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO31 0x20000000 + + + /* EPIO of shut down temperature status */ + uint32_t temperature_monitor2; /* 0x4004 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_MASK 0x0000FFFF + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_SHIFT 0 + + /* Sensor interface - Disabled / BSC / In the future - SMBUS */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_INTERFACE_MASK 0x00030000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_INTERFACE_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_INTERFACE_DISABLED \ + 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_INTERFACE_BSC 0x00010000 + + /* On Board Sensor Address */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_ADDR_MASK 0x03FC0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SENSOR_ADDR_SHIFT 18 + + /* MFW flavor to be used */ + uint32_t mfw_cfg; /* 0x4008 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_A 0x00000001 + + /* Should NIC data query remain enabled upon last drv unload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_MASK 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_ENABLED 0x00000100 + + /* Prevent OCBB feature */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_PREVENT_MASK 0x00000200 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_PREVENT_SHIFT 9 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_PREVENT_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_PREVENT_ENABLED 0x00000200 + + /* Enable DCi support */ + #define EXTENDED_DEV_INFO_SHARED_CFG_DCI_SUPPORT_MASK 0x00000400 + #define EXTENDED_DEV_INFO_SHARED_CFG_DCI_SUPPORT_SHIFT 10 + #define EXTENDED_DEV_INFO_SHARED_CFG_DCI_SUPPORT_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DCI_SUPPORT_ENABLED 0x00000400 + + /* Reserved bits: 75 */ + + /* PLDM support over MCTP */ + #define EXTENDED_DEV_INFO_SHARED_CFG_PLDM_ENABLE_MASK 0x00001000 + #define EXTENDED_DEV_INFO_SHARED_CFG_PLDM_ENABLE_SHIFT 12 + #define EXTENDED_DEV_INFO_SHARED_CFG_PLDM_ENABLE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_PLDM_ENABLE_ENABLED 0x00001000 + + /* Option to Disable embedded LLDP, 0 - Off, 1 - On */ + #define EXTENDED_DEV_INFO_SHARED_CFG_LLDP_DISABLE_MASK 0x00002000 + #define EXTENDED_DEV_INFO_SHARED_CFG_LLDP_DISABLE_SHIFT 13 + #define EXTENDED_DEV_INFO_SHARED_CFG_LLDP_DISABLE_OFF 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_LLDP_DISABLE_ON 0x00002000 + + /* Hide DCBX feature in CCM/BACS menus */ + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_MASK 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_ENABLED 0x00010000 + + uint32_t smbus_config; /* 0x400C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_SHIFT 0 + + /* Switching regulator loop gain */ + uint32_t board_cfg; /* 0x4010 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_MASK 0x0000000F + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_HW_DEFAULT 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X2 0x00000008 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X4 0x00000009 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X8 0x0000000a + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X16 0x0000000b + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV8 0x0000000c + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV4 0x0000000d + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV2 0x0000000e + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X1 0x0000000f + + /* whether shadow swim feature is supported */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_MASK 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_ENABLED 0x00000100 + + /* whether to show/hide SRIOV menu in CCM */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_MASK 0x00000200 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_SHIFT 9 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_HIDE_MENU 0x00000200 + + /* Override PCIE revision ID when enabled the, + * revision ID will set to B1=='0x11' + */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVR_REV_ID_MASK 0x00000400 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVR_REV_ID_SHIFT 10 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVR_REV_ID_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVR_REV_ID_ENABLED 0x00000400 + + /* Bypass slicer offset tuning */ + #define EXTENDED_DEV_INFO_SHARED_CFG_BYPASS_SLICER_MASK 0x00000800 + #define EXTENDED_DEV_INFO_SHARED_CFG_BYPASS_SLICER_SHIFT 11 + #define EXTENDED_DEV_INFO_SHARED_CFG_BYPASS_SLICER_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_BYPASS_SLICER_ENABLED 0x00000800 + /* Control Revision ID */ + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_MASK 0x00003000 + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_SHIFT 12 + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_PRESERVE 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_ACTUAL 0x00001000 + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_FORCE_B0 0x00002000 + #define EXTENDED_DEV_INFO_SHARED_CFG_REV_ID_CTRL_FORCE_B1 0x00003000 + /* Threshold in celcius for max continuous operation */ + uint32_t temperature_report; /* 0x4014 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_MASK 0x0000007F + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_SHIFT 0 + + /* Threshold in celcius for sensor caution */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_MASK 0x00007F00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_SHIFT 8 + + /* wwn node prefix to be used (unless value is 0) */ + uint32_t wwn_prefix; /* 0x4018 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_SHIFT 0 + + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_SHIFT 8 + + /* wwn port prefix to be used (unless value is 0) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_SHIFT 16 + + /* wwn port prefix to be used (unless value is 0) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_SHIFT 24 + + /* General debug nvm cfg */ + uint32_t dbg_cfg_flags; /* 0x401C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_MASK 0x000FFFFF + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ENABLE 0x00000001 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_EN_SIGDET_FILTER 0x00000002 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_LP_TX_PRESET7 0x00000004 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_TX_ANA_DEFAULT 0x00000008 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_PLL_ANA_DEFAULT 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_G1PLL_RETUNE 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_RX_ANA_DEFAULT 0x00000040 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_SERDES_RX_CLK 0x00000080 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_RX_LP_EIEOS 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FINALIZE_UCODE 0x00000200 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_HOLDOFF_REQ 0x00000400 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_OVERRIDE 0x00000800 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GP_PORG_UC_RESET 0x00001000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SUPPRESS_COMPEN_EVT 0x00002000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ADJ_TXEQ_P0_P1 0x00004000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_G3_PLL_RETUNE 0x00008000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_MAC_PHY_CTL8 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_MAC_G3_FRM_ERR 0x00020000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_INFERRED_EI 0x00040000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000 + + /* Override Rx signal detect threshold when enabled the threshold + * will be set staticaly + */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_MASK 0x00100000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_SHIFT 20 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_ENABLED 0x00100000 + + /* Debug signet rx threshold */ + uint32_t dbg_rx_sigdet_threshold; /* 0x4020 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_MASK 0x00000007 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_SHIFT 0 + + /* Enable IFFE feature */ + uint32_t iffe_features; /* 0x4024 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_MASK 0x00000001 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_ENABLED 0x00000001 + + /* Allowable port enablement (bitmask for ports 3-1) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_MASK 0x0000000E + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_SHIFT 1 + + /* Allow iSCSI offload override */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_MASK 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_SHIFT 4 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_ENABLED 0x00000010 + + /* Allow FCoE offload override */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_MASK 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_SHIFT 5 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_ENABLED 0x00000020 + + /* Tie to adaptor */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_MASK 0x00008000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_SHIFT 15 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_ENABLED 0x00008000 + + /* Currently enabled port(s) (bitmask for ports 3-1) */ + uint32_t current_iffe_mask; /* 0x4028 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_MASK 0x0000000E + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_SHIFT 1 + + /* Current iSCSI offload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_MASK 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_SHIFT 4 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_ENABLED 0x00000010 + + /* Current FCoE offload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_MASK 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_SHIFT 5 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_ENABLED 0x00000020 + + /* FW set this pin to "0" (assert) these signal if either of its MAC + * or PHY specific threshold values is exceeded. + * Values are standard GPIO/EPIO pins. + */ + uint32_t threshold_pin; /* 0x402C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_SHIFT 16 + + /* MAC die temperature threshold in Celsius. */ + uint32_t mac_threshold_val; /* 0x4030 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_SHIFT 16 + + /* PHY die temperature threshold in Celsius. */ + uint32_t phy_threshold_val; /* 0x4034 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_SHIFT 16 + + /* External pins to communicate with host. + * Values are standard GPIO/EPIO pins. + */ + uint32_t host_pin; /* 0x4038 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_SHIFT 24 + + /* Manufacture kit version */ + uint32_t manufacture_ver; /* 0x403C */ + + /* Manufacture timestamp */ + uint32_t manufacture_data; /* 0x4040 */ + + /* Number of ISCSI/FCOE cfg images */ + #define EXTENDED_DEV_INFO_SHARED_CFG_NUM_ISCSI_FCOE_CFGS_MASK 0x00040000 + #define EXTENDED_DEV_INFO_SHARED_CFG_NUM_ISCSI_FCOE_CFGS_SHIFT18 + #define EXTENDED_DEV_INFO_SHARED_CFG_NUM_ISCSI_FCOE_CFGS_2 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_NUM_ISCSI_FCOE_CFGS_4 0x00040000 + + /* MCP crash dump trigger */ + uint32_t mcp_crash_dump; /* 0x4044 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CRASH_DUMP_MASK 0x7FFFFFFF + #define EXTENDED_DEV_INFO_SHARED_CFG_CRASH_DUMP_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRASH_DUMP_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRASH_DUMP_ENABLED 0x00000001 + + /* MBI version */ + uint32_t mbi_version; /* 0x4048 */ + + /* MBI date */ + uint32_t mbi_date; /* 0x404C */ +}; + + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) + #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." +#endif + +#define FUNC_0 0 +#define FUNC_1 1 +#define FUNC_2 2 +#define FUNC_3 3 +#define FUNC_4 4 +#define FUNC_5 5 +#define FUNC_6 6 +#define FUNC_7 7 +#define E1_FUNC_MAX 2 +#define E1H_FUNC_MAX 8 +#define E2_FUNC_MAX 4 /* per path */ + +#define VN_0 0 +#define VN_1 1 +#define VN_2 2 +#define VN_3 3 +#define E1VN_MAX 1 +#define E1HVN_MAX 4 + +#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define FW_ACK_TIME_OUT_MS 5000 + +#define FW_ACK_POLL_TIME_MS 1 + +#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) + +#define MFW_TRACE_SIGNATURE 0x54524342 + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ +struct drv_port_mb { + + uint32_t link_status; + /* Driver should update this field on any link change event */ + + #define LINK_STATUS_NONE (0<<0) + #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 + #define LINK_STATUS_LINK_UP 0x00000001 + #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E + #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) + + #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 + #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + + #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 + #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 + #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + + #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 + #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 + #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 + #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 + #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 + #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 + #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 + + #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 + #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 + + #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 + #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 + + #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 + #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) + #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) + #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) + #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) + + #define LINK_STATUS_SERDES_LINK 0x00100000 + + #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 + #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 + #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 + #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 + + #define LINK_STATUS_PFC_ENABLED 0x20000000 + + #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + #define LINK_STATUS_SFP_TX_FAULT 0x80000000 + + uint32_t port_stx; + + uint32_t stat_nig_timer; + + /* MCP firmware does not use this field */ + uint32_t ext_phy_fw_version; + +}; + + +struct drv_func_mb { + + uint32_t drv_mb_header; + #define DRV_MSG_CODE_MASK 0xffff0000 + #define DRV_MSG_CODE_LOAD_REQ 0x10000000 + #define DRV_MSG_CODE_LOAD_DONE 0x11000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 + #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 + #define DRV_MSG_CODE_DCC_OK 0x30000000 + #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 + #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 + #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 + #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 + #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 + #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 + #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 + #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + #define DRV_MSG_CODE_OEM_OK 0x00010000 + #define DRV_MSG_CODE_OEM_FAILURE 0x00020000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000 + + /* + * The optic module verification command requires bootcode + * v5.0.6 or later, te specific optic module verification command + * requires bootcode v5.2.12 or later + */ + #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 + #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 + #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 + #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 + #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209 + + #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 + #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 + #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 + + #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000 + #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000 + #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000 + #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000 + #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000 + + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 + #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 + + #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 + #define REQ_BC_VER_4_SET_MF_BW 0x00060202 + #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 + + #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + + #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 + #define REQ_BC_VER_4_INITIATE_FLR 0x00070213 + + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 + #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define DRV_MSG_CODE_IMG_OFFSET_REQ 0xe2000000 + #define DRV_MSG_CODE_IMG_SIZE_REQ 0xe3000000 + + #define DRV_MSG_CODE_UFP_CONFIG_ACK 0xe4000000 + + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + #define DRV_MSG_CODE_CONFIG_CHANGE 0xC1000000 + + #define DRV_MSG_CODE_UPDATE_DRIVER_STATE 0xC2000000 + #define REQ_BC_VER_4_UPDATE_DRIVER_STATE 0x00070f35 + + uint32_t drv_mb_param; + #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 + #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + + #define DRV_MSG_CODE_UNLOAD_NON_D3_POWER 0x00000001 + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + + #define DRV_MSG_CODE_USR_BLK_IMAGE_REQ 0x00000001 + #define DRV_MSG_CODE_ISCSI_PERS_IMAGE_REQ 0x00000002 + #define DRV_MSG_CODE_VPD_IMAGE_REQ 0x00000003 + #define DRV_MSG_CODE_VLAN_TABLE_IMAGE_REQ 0x00000004 + + #define DRV_MSG_CODE_CONFIG_CHANGE_MTU_SIZE 0x00000001 + #define DRV_MSG_CODE_CONFIG_CHANGE_MAC_ADD 0x00000002 + #define DRV_MSG_CODE_CONFIG_CHANGE_WOL_ENA 0x00000003 + #define DRV_MSG_CODE_CONFIG_CHANGE_ISCI_BOOT 0x00000004 + #define DRV_MSG_CODE_CONFIG_CHANGE_FCOE_BOOT 0x00000005 + #define DRV_MSG_CODE_CONFIG_CHANGE_RST2DFT 0x00000006 + + #define DRV_MSG_CODE_DRIVER_STATE_UNKNOWN 0x00000001 + #define DRV_MSG_CODE_DRIVER_STATE_NOT_LOADED 0x00000002 + #define DRV_MSG_CODE_DRIVER_STATE_LOADING 0x00000003 + #define DRV_MSG_CODE_DRIVER_STATE_DISABLED 0x00000004 + #define DRV_MSG_CODE_DRIVER_STATE_ACTIVE 0x00000005 + + uint32_t fw_mb_header; + #define FW_MSG_CODE_MASK 0xffff0000 + #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 + #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 + #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 + /* Load common chip is supported from bc 6.0.0 */ + #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 + #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 + + #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 + #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 + #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 + #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 + #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 + #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 + #define FW_MSG_CODE_DCC_DONE 0x30100000 + #define FW_MSG_CODE_LLDP_DONE 0x40100000 + #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 + #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 + #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 + #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 + #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 + #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 + #define FW_MSG_CODE_NO_KEY 0x80f00000 + #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 + #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 + #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 + #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 + #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 + #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 + #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 + #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 + #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 + #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000 + + #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000 + #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000 + #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000 + #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000 + #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000 + + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 + #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 + + #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 + #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 + + #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 + + #define FW_MSG_CODE_FLR_ACK 0x02000000 + #define FW_MSG_CODE_FLR_NACK 0x02100000 + + #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 + #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define FW_MSG_CODE_IMG_OFFSET_RESPONSE 0xe2100000 + #define FW_MSG_CODE_IMG_SIZE_RESPONSE 0xe3100000 + + #define FW_MSG_CODE_OEM_ACK 0x00010000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_ACK 0x00020000 + + #define FW_MSG_CODE_CONFIG_CHANGE_DONE 0xC2000000 + + #define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0xC3000000 + + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + uint32_t fw_mb_param; + + #define FW_PARAM_INVALID_IMG 0xffffffff + + uint32_t drv_pulse_mb; + #define DRV_PULSE_SEQ_MASK 0x00007fff + #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ + #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + uint32_t mcp_pulse_mb; + #define MCP_PULSE_SEQ_MASK 0x00007fff + #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response */ + #define MCP_EVENT_MASK 0xffff0000 + #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + uint32_t iscsi_boot_signature; + uint32_t iscsi_boot_block_offset; + + uint32_t drv_status; + #define DRV_STATUS_PMF 0x00000001 + #define DRV_STATUS_VF_DISABLED 0x00000002 + #define DRV_STATUS_SET_MF_BW 0x00000004 + #define DRV_STATUS_LINK_EVENT 0x00000008 + + #define DRV_STATUS_OEM_EVENT_MASK 0x00000070 + #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010 + #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020 + #define DRV_STATUS_OEM_FC_NPIV_UPDATE 0x00000040 + + #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080 + + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 + #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 + #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 + #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 + #define DRV_STATUS_DCC_RESERVED1 0x00000800 + #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 + #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 + + #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 + #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000 + #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000 + #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000 + #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000 + #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000 + + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 + + #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000 + + uint32_t virt_mac_upper; + #define VIRT_MAC_SIGN_MASK 0xffff0000 + #define VIRT_MAC_SIGNATURE 0x564d0000 + uint32_t virt_mac_lower; + +}; + + +/**************************************************************************** + * Management firmware state * + ****************************************************************************/ +/* Allocate 440 bytes for management firmware */ +#define MGMTFW_STATE_WORD_SIZE 110 + +struct mgmtfw_state { + uint32_t opaque[MGMTFW_STATE_WORD_SIZE]; +}; + + +/**************************************************************************** + * Multi-Function configuration * + ****************************************************************************/ +struct shared_mf_cfg { + + uint32_t clp_mb; + #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 + /* set by CLP */ + #define SHARED_MF_CLP_EXIT 0x00000001 + /* set by MCP */ + #define SHARED_MF_CLP_EXIT_DONE 0x00010000 + +}; + +struct port_mf_cfg { + + uint32_t dynamic_cfg; /* device control channel */ + #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 + #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK + + uint32_t reserved[1]; + +}; + +struct func_mf_cfg { + + uint32_t config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ + #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 + + #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 + #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ + FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA + + #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 + + #define FUNC_MF_CFG_FUNC_BOOT_MASK 0x00000060 + #define FUNC_MF_CFG_FUNC_BOOT_BIOS_CTRL 0x00000000 + #define FUNC_MF_CFG_FUNC_BOOT_VCM_DISABLED 0x00000020 + #define FUNC_MF_CFG_FUNC_BOOT_VCM_ENABLED 0x00000040 + + /* PRI */ + /* 0 - low priority, 3 - high priority */ + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 100Mbps */ + #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 + #define FUNC_MF_CFG_MIN_BW_SHIFT 16 + #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 + #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 + #define FUNC_MF_CFG_MAX_BW_SHIFT 24 + #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 + + uint32_t mac_upper; /* MAC */ + #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff + #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 + #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + uint32_t mac_lower; + #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + uint32_t e1hov_tag; /* VNI */ + #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 + #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK + + /* afex default VLAN ID - 12 bits */ + #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000 + #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16 + + uint32_t afex_config; + #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff + #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16 + + uint32_t pf_allocation; + /* number of vfs in function, if 0 - sriov disabled */ + #define FUNC_MF_CFG_NUMBER_OF_VFS_MASK 0x000000FF + #define FUNC_MF_CFG_NUMBER_OF_VFS_SHIFT 0 +}; + +enum mf_cfg_afex_vlan_mode { + FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0, + FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE, + FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE +}; + +/* This structure is not applicable and should not be accessed on 57711 */ +struct func_ext_cfg { + uint32_t func_cfg; + #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F + #define MACP_FUNC_CFG_FLAGS_SHIFT 0 + #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 + #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 + #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 + #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080 + + uint32_t iscsi_mac_addr_upper; + uint32_t iscsi_mac_addr_lower; + + uint32_t fcoe_mac_addr_upper; + uint32_t fcoe_mac_addr_lower; + + uint32_t fcoe_wwn_port_name_upper; + uint32_t fcoe_wwn_port_name_lower; + + uint32_t fcoe_wwn_node_name_upper; + uint32_t fcoe_wwn_node_name_lower; + + uint32_t preserve_data; + #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) + #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) + #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) +}; + +struct mf_cfg { + + struct shared_mf_cfg shared_mf_config; /* 0x4 */ + struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX]; + /* 0x10*2=0x20 */ + /* for all chips, there are 8 mf functions */ + struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ + /* + * Extended configuration per function - this array does not exist and + * should not be accessed on 57711 + */ + struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ +}; /* 0x224 */ + +/**************************************************************************** + * Shared Memory Region * + ****************************************************************************/ +struct shmem_region { /* SharedMem Offset (size) */ + + uint32_t validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ + #define SHR_MEM_FORMAT_REV_MASK 0xff000000 + #define SHR_MEM_FORMAT_REV_ID ('A'<<24) + /* validity bits */ + #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 + #define SHR_MEM_VALIDITY_MB 0x00200000 + #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 + #define SHR_MEM_VALIDITY_RESERVED 0x00000007 + /* One licensing bit should be set */ + #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 + #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 + #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 + #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + /* Active MFW */ + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + struct shm_dev_info dev_info; /* 0x8 (0x438) */ + + struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52 * 2 = 0x68) */ + + /* FW information (for internal FW use) */ + uint32_t fw_info_fio_offset; /* 0x4a8 (0x4) */ + struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + + struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ + + +#ifdef BMAPI + /* This is a variable length array */ + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#else + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#endif /* BMAPI */ + +}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 64 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* uint32_t. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_ack { + uint32_t pf_ack; + uint32_t vf_ack; + uint32_t iov_dis_ack; +}; + +struct fw_flr_mb { + uint32_t aggint; + uint32_t opgen_addr; + struct fw_flr_ack ack; +}; + +struct eee_remote_vals { + uint32_t tx_tw; + uint32_t rx_tw; +}; + +/**** SUPPORT FOR SHMEM ARRRAYS *** + * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to + * define arrays with storage types smaller then unsigned dwords. + * The macros below add generic support for SHMEM arrays with numeric elements + * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword + * array with individual bit-filed elements accessed using shifts and masks. + * + */ + +/* eb is the bitwidth of a single element */ +#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) +#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) + +/* the bit-position macro allows the used to flip the order of the arrays + * elements on a per byte or word boundary. + * + * example: an array with 8 entries each 4 bit wide. This array will fit into + * a single dword. The diagrmas below show the array order of the nibbles. + * + * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: + * + * | | | | + * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: + * + * | | | | + * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: + * + * | | | | + * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | + * | | | | + */ +#define SHMEM_ARRAY_BITPOS(i, eb, fb) \ + ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ + (((i)%((fb)/(eb))) * (eb))) + +#define SHMEM_ARRAY_GET(a, i, eb, fb) \ + ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ + SHMEM_ARRAY_MASK(eb)) + +#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ +do { \ + a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ +} while (0) + + +/****START OF DCBX STRUCTURES DECLARATIONS****/ +#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8 +#define DCBX_PRI_PG_BITWIDTH 4 +#define DCBX_PRI_PG_FBITS 8 +#define DCBX_PRI_PG_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS) +#define DCBX_PRI_PG_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val) +#define DCBX_MAX_NUM_PG_BW_ENTRIES 8 +#define DCBX_BW_PG_BITWIDTH 8 +#define DCBX_PG_BW_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH) +#define DCBX_PG_BW_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val) +#define DCBX_STRICT_PRI_PG 15 +#define DCBX_MAX_APP_PROTOCOL 16 +#define DCBX_MAX_APP_LOCAL 32 +#define FCOE_APP_IDX 0 +#define ISCSI_APP_IDX 1 +#define PREDEFINED_APP_IDX_MAX 2 + + +/* Big/Little endian have the same representation. */ +struct dcbx_ets_feature { + /* + * For Admin MIB - is this feature supported by the + * driver | For Local MIB - should this feature be enabled. + */ + uint32_t enabled; + uint32_t pg_bw_tbl[2]; + uint32_t pri_pg_tbl[1]; +}; + +/* Driver structure in LE */ +struct dcbx_pfc_feature { +#ifdef __BIG_ENDIAN + uint8_t pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 + uint8_t pfc_caps; + uint8_t reserved; + uint8_t enabled; +#elif defined(__LITTLE_ENDIAN) + uint8_t enabled; + uint8_t reserved; + uint8_t pfc_caps; + uint8_t pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 +#endif +}; + +struct dcbx_app_priority_entry { +#ifdef __BIG_ENDIAN + uint16_t app_id; + uint8_t pri_bitmap; + uint8_t appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 /* TCP */ + #define DCBX_APP_SF_UDP 0x40 /* UDP */ + #define DCBX_APP_SF_DEFAULT 0x80 + #define DCBX_APP_PRI_0 0x01 + #define DCBX_APP_PRI_1 0x02 + #define DCBX_APP_PRI_2 0x04 + #define DCBX_APP_PRI_3 0x08 + #define DCBX_APP_PRI_4 0x10 + #define DCBX_APP_PRI_5 0x20 + #define DCBX_APP_PRI_6 0x40 + #define DCBX_APP_PRI_7 0x80 +#elif defined(__LITTLE_ENDIAN) + uint8_t appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 /* TCP */ + #define DCBX_APP_SF_UDP 0x40 /* UDP */ + #define DCBX_APP_SF_DEFAULT 0x80 + uint8_t pri_bitmap; + uint16_t app_id; +#endif +}; + + +/* FW structure in BE */ +struct dcbx_app_priority_feature { +#ifdef __BIG_ENDIAN + uint8_t reserved; + uint8_t default_pri; + uint8_t tc_supported; + uint8_t enabled; +#elif defined(__LITTLE_ENDIAN) + uint8_t enabled; + uint8_t tc_supported; + uint8_t default_pri; + uint8_t reserved; +#endif + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + struct dcbx_pfc_feature pfc; + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +/* LLDP protocol parameters */ +/* FW structure in BE */ +struct lldp_params { +#ifdef __BIG_ENDIAN + uint8_t msg_fast_tx_interval; + uint8_t msg_tx_hold; + uint8_t msg_tx_interval; + uint8_t admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + uint8_t reserved1; + uint8_t tx_fast; + uint8_t tx_crd_max; + uint8_t tx_crd; +#elif defined(__LITTLE_ENDIAN) + uint8_t admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + uint8_t msg_tx_interval; + uint8_t msg_tx_hold; + uint8_t msg_fast_tx_interval; + uint8_t tx_crd; + uint8_t tx_crd_max; + uint8_t tx_fast; + uint8_t reserved1; +#endif + #define REM_CHASSIS_ID_STAT_LEN 4 + #define REM_PORT_ID_STAT_LEN 4 + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + uint32_t peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + uint32_t peer_port_id[REM_PORT_ID_STAT_LEN]; +}; + +struct lldp_dcbx_stat { + #define LOCAL_CHASSIS_ID_STAT_LEN 2 + #define LOCAL_PORT_ID_STAT_LEN 2 + /* Holds local Chassis ID 8B payload of constant subtype 4. */ + uint32_t local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID 8B payload of constant subtype 3. */ + uint32_t local_port_id[LOCAL_PORT_ID_STAT_LEN]; + /* Number of DCBX frames transmitted. */ + uint32_t num_tx_dcbx_pkts; + /* Number of DCBX frames received. */ + uint32_t num_rx_dcbx_pkts; +}; + +/* ADMIN MIB - DCBX local machine default configuration. */ +struct lldp_admin_mib { + uint32_t ver_cfg_flags; + #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 + #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 + #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 + #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 + #define DCBX_ETS_RECO_VALID 0x00000010 + #define DCBX_ETS_WILLING 0x00000020 + #define DCBX_PFC_WILLING 0x00000040 + #define DCBX_APP_WILLING 0x00000080 + #define DCBX_VERSION_CEE 0x00000100 + #define DCBX_VERSION_IEEE 0x00000200 + #define DCBX_DCBX_ENABLED 0x00000400 + #define DCBX_CEE_VERSION_MASK 0x0000f000 + #define DCBX_CEE_VERSION_SHIFT 12 + #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 + #define DCBX_CEE_MAX_VERSION_SHIFT 16 + struct dcbx_features features; +}; + +/* REMOTE MIB - remote machine DCBX configuration. */ +struct lldp_remote_mib { + uint32_t prefix_seq_num; + uint32_t flags; + #define DCBX_ETS_TLV_RX 0x00000001 + #define DCBX_PFC_TLV_RX 0x00000002 + #define DCBX_APP_TLV_RX 0x00000004 + #define DCBX_ETS_RX_ERROR 0x00000010 + #define DCBX_PFC_RX_ERROR 0x00000020 + #define DCBX_APP_RX_ERROR 0x00000040 + #define DCBX_ETS_REM_WILLING 0x00000100 + #define DCBX_PFC_REM_WILLING 0x00000200 + #define DCBX_APP_REM_WILLING 0x00000400 + #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_REMOTE_MIB_VALID 0x00002000 + struct dcbx_features features; + uint32_t suffix_seq_num; +}; + +/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ +struct lldp_local_mib { + uint32_t prefix_seq_num; + /* Indicates if there is mismatch with negotiation results. */ + uint32_t error; + #define DCBX_LOCAL_ETS_ERROR 0x00000001 + #define DCBX_LOCAL_PFC_ERROR 0x00000002 + #define DCBX_LOCAL_APP_ERROR 0x00000004 + #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 + #define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_REMOTE_MIB_ERROR 0x00000040 + #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080 + #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100 + #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200 + struct dcbx_features features; + uint32_t suffix_seq_num; +}; + +struct lldp_local_mib_ext { + uint32_t prefix_seq_num; + /* APP TLV extension - 16 more entries for negotiation results*/ + struct dcbx_app_priority_entry app_pri_tbl_ext[DCBX_MAX_APP_PROTOCOL]; + uint32_t suffix_seq_num; +}; +/***END OF DCBX STRUCTURES DECLARATIONS***/ + +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + uint32_t req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + uint32_t req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + uint32_t req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + uint32_t speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + uint32_t additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + uint32_t lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) + +}; + +/* + * Used to suppoert NSCI get OS driver version + * On driver load the version value will be set + * On driver unload driver value of 0x0 will be set + */ +struct os_drv_ver { + #define DRV_VER_NOT_LOADED 0 + /*personalites orrder is importent */ + #define DRV_PERS_ETHERNET 0 + #define DRV_PERS_ISCSI 1 + #define DRV_PERS_FCOE 2 + /*shmem2 struct is constatnt can't add more personalites here*/ + #define MAX_DRV_PERS 3 + uint32_t versions[MAX_DRV_PERS]; +}; + +#define OEM_I2C_UUID_STR_ADDR 0x9f +#define OEM_I2C_CARD_SKU_STR_ADDR 0x3c +#define OEM_I2C_CARD_FN_STR_ADDR 0x48 +#define OEM_I2C_CARD_NAME_STR_ADDR 0x10e + +#define OEM_I2C_UUID_STR_LEN 16 +#define OEM_I2C_CARD_SKU_STR_LEN 12 +#define OEM_I2C_CARD_FN_STR_LEN 12 +#define OEM_I2C_CARD_NAME_STR_LEN 128 +#define OEM_I2C_CARD_VERSION_STR_LEN 36 + +struct oem_i2c_data_t { + uint32_t size; + uint8_t uuid[OEM_I2C_UUID_STR_LEN]; + uint8_t card_sku[OEM_I2C_CARD_SKU_STR_LEN]; + uint8_t card_name[OEM_I2C_CARD_NAME_STR_LEN]; + uint8_t card_ver[OEM_I2C_CARD_VERSION_STR_LEN]; + uint8_t card_fn[OEM_I2C_CARD_FN_STR_LEN]; +}; + +enum curr_cfg_method_e { + CURR_CFG_MET_NONE = 0, /* default config */ + CURR_CFG_MET_OS = 1, + CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */ + CURR_CFG_MET_HP_OTHER = 3, + CURR_CFG_MET_VC_CLP = 4, /* C-Class SM-CLP */ + CURR_CFG_MET_HP_CNU = 5, /* Converged Network Utility */ + CURR_CFG_MET_HP_DCI = 6, /* DCi (BD) changes */ +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct bdn_npiv_settings { + uint8_t npiv_wwpn[FC_NPIV_WWPN_SIZE]; + uint8_t npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct bdn_fc_npiv_cfg { + /* hdr used internally by the MFW */ + uint32_t hdr; + uint32_t num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct bdn_fc_npiv_tbl { + struct bdn_fc_npiv_cfg fc_npiv_cfg; + struct bdn_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct mdump_driver_info { + uint32_t epoc; + uint32_t drv_ver; + uint32_t fw_ver; + + uint32_t valid_dump; + #define FIRST_DUMP_VALID (1 << 0) + #define SECOND_DUMP_VALID (1 << 1) + + uint32_t flags; + #define ENABLE_ALL_TRIGGERS (0x7fffffff) + #define TRIGGER_MDUMP_ONCE (1 << 31) +}; + +struct shmem2_region { + + uint32_t size; /* 0x0000 */ + + uint32_t dcc_support; /* 0x0004 */ + #define SHMEM_DCC_SUPPORT_NONE 0x00000000 + #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 + #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 + #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 + #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 + #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 + + uint32_t ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ + /* + * For backwards compatibility, if the mf_cfg_addr does not exist + * (the size filed is smaller than 0xc) the mf_cfg resides at the + * end of struct shmem_region + */ + uint32_t mf_cfg_addr; /* 0x0010 */ + #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 + + struct fw_flr_mb flr_mb; /* 0x0014 */ + uint32_t dcbx_lldp_params_offset; /* 0x0028 */ + #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 + uint32_t dcbx_neg_res_offset; /* 0x002c */ + #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 + uint32_t dcbx_remote_mib_offset; /* 0x0030 */ + #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 + /* + * The other shmemX_base_addr holds the other path's shmem address + * required for example in case of common phy init, or for path1 to know + * the address of mcp debug trace which is located in offset from shmem + * of path0 + */ + uint32_t other_shmem_base_addr; /* 0x0034 */ + uint32_t other_shmem2_base_addr; /* 0x0038 */ + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + uint32_t mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + uint32_t drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ + + uint32_t dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ + #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + + /* + * edebug_driver_if field is used to transfer messages between edebug + * app to the driver through shmem2. + * + * message format: + * bits 0-2 - function number / instance of driver to perform request + * bits 3-5 - op code / is_ack? + * bits 6-63 - data + */ + uint32_t edebug_driver_if[2]; /* 0x0068 */ + #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 + #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 + #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 + + uint32_t nvm_retain_bitmap_addr; /* 0x0070 */ + + /* afex support of that driver */ + uint32_t afex_driver_support; /* 0x0074 */ + #define SHMEM_AFEX_VERSION_MASK 0x100f + #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001 + #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000 + + /* driver receives addr in scratchpad to which it should respond */ + uint32_t afex_scratchpad_addr_to_write[E2_FUNC_MAX]; + + /* + * generic params from MCP to driver (value depends on the msg sent + * to driver + */ + uint32_t afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */ + uint32_t afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */ + + uint32_t swim_base_addr; /* 0x00a8 */ + uint32_t swim_funcs; /* 0x00ac */ + uint32_t swim_main_cb; /* 0x00b0 */ + + /* + * bitmap notifying which VIF profiles stored in nvram are enabled by + * switch + */ + uint32_t afex_profiles_enabled[2]; /* 0x00b4 */ + + /* generic flags controlled by the driver */ + uint32_t drv_flags; /* 0x00bc */ + #define DRV_FLAGS_DCB_CONFIGURED 0x0 + #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1 + #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2 + + #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \ + (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \ + (1 << DRV_FLAGS_DCB_MFW_CONFIGURED)) + /* Port offset*/ + #define DRV_FLAGS_P0_OFFSET 0 + #define DRV_FLAGS_P1_OFFSET 16 + #define DRV_FLAGS_GET_PORT_OFFSET(_port) ((0 == _port) ? \ + DRV_FLAGS_P0_OFFSET : \ + DRV_FLAGS_P1_OFFSET) + + #define DRV_FLAGS_GET_PORT_MASK(_port) (DRV_FLAGS_PORT_MASK << \ + DRV_FLAGS_GET_PORT_OFFSET(_port)) + + #define DRV_FLAGS_FILED_BY_PORT(_field_bit, _port) (1 << ( \ + (_field_bit) + DRV_FLAGS_GET_PORT_OFFSET(_port))) + + /* pointer to extended dev_info shared data copied from nvm image */ + uint32_t extended_dev_info_shared_addr; /* 0x00c0 */ + uint32_t ncsi_oem_data_addr; /* 0x00c4 */ + + uint32_t sensor_data_addr; /* 0x00c8 */ + uint32_t buffer_block_addr; /* 0x00cc */ + uint32_t sensor_data_req_update_interval; /* 0x00d0 */ + uint32_t temperature_in_half_celsius; /* 0x00d4 */ + uint32_t glob_struct_in_host; /* 0x00d8 */ + + uint32_t dcbx_neg_res_ext_offset; /* 0x00dc */ + #define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000 + + uint32_t drv_capabilities_flag[E2_FUNC_MAX]; /* 0x00e0 */ + #define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001 + #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 + #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 + #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 + #define DRV_FLAGS_MTU_MASK 0xffff0000 + #define DRV_FLAGS_MTU_SHIFT 16 + + uint32_t extended_dev_info_shared_cfg_size; /* 0x00f0 */ + + uint32_t dcbx_en[PORT_MAX]; /* 0x00f4 */ + + /* The offset points to the multi threaded meta structure */ + uint32_t multi_thread_data_offset; /* 0x00fc */ + + /* address of DMAable host address holding values from the drivers */ + uint32_t drv_info_host_addr_lo; /* 0x0100 */ + uint32_t drv_info_host_addr_hi; /* 0x0104 */ + + /* general values written by the MFW (such as current version) */ + uint32_t drv_info_control; /* 0x0108 */ + #define DRV_INFO_CONTROL_VER_MASK 0x000000ff + #define DRV_INFO_CONTROL_VER_SHIFT 0 + #define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 + #define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 + uint32_t ibft_host_addr; /* initialized by option ROM */ /* 0x010c */ + + struct eee_remote_vals eee_remote_vals[PORT_MAX]; /* 0x0110 */ + uint32_t pf_allocation[E2_FUNC_MAX]; /* 0x0120 */ + #define PF_ALLOACTION_MSIX_VECTORS_MASK 0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */ + #define PF_ALLOACTION_MSIX_VECTORS_SHIFT 0 + + /* the status of EEE auto-negotiation + * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31. + * bits 19:16 the supported modes for EEE. + * bits 23:20 the speeds advertised for EEE. + * bits 27:24 the speeds the Link partner advertised for EEE. + * The supported/adv. modes in bits 27:19 originate from the + * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed). + * bit 28 when 1'b1 EEE was requested. + * bit 29 when 1'b1 tx lpi was requested. + * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if + * 30:29 are 2'b11. + * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as + * value. When 1'b1 those bits contains a value times 16 microseconds. + */ + uint32_t eee_status[PORT_MAX]; /* 0x0130 */ + #define SHMEM_EEE_TIMER_MASK 0x0000ffff + #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000 + #define SHMEM_EEE_SUPPORTED_SHIFT 16 + #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000 + #define SHMEM_EEE_100M_ADV (1<<0) + #define SHMEM_EEE_1G_ADV (1 << 1) + #define SHMEM_EEE_10G_ADV (1<<2) + #define SHMEM_EEE_ADV_STATUS_SHIFT 20 + #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000 + #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24 + #define SHMEM_EEE_REQUESTED_BIT 0x10000000 + #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000 + #define SHMEM_EEE_ACTIVE_BIT 0x40000000 + #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000 + + uint32_t sizeof_port_stats; /* 0x0138 */ + + /* Link Flap Avoidance */ + uint32_t lfa_host_addr[PORT_MAX]; /* 0x013c */ + + /* External PHY temperature in deg C. */ + uint32_t extphy_temps_in_celsius; /* 0x0144 */ + #define EXTPHY1_TEMP_MASK 0x0000ffff + #define EXTPHY1_TEMP_SHIFT 0 + #define ON_BOARD_TEMP_MASK 0xffff0000 + #define ON_BOARD_TEMP_SHIFT 16 + + uint32_t ocdata_info_addr; /* Offset 0x148 */ + uint32_t drv_func_info_addr; /* Offset 0x14C */ + uint32_t drv_func_info_size; /* Offset 0x150 */ + uint32_t link_attr_sync[PORT_MAX]; /* Offset 0x154 */ + #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_ATTR_84858 0x00000002 + #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 + #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 + #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 + #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 + #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 + + uint32_t ibft_host_addr_hi; /* Initialize by uEFI ROM Offset 0x158 */ + uint32_t fcode_ver; /* Offset 0x15c */ + uint32_t link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */ + #define LINK_CHANGE_COUNT_MASK 0xff /* Offset 0x168 */ + /* driver version for each personality*/ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + uint32_t mfw_drv_indication; /* Offset 0x19c */ + + /* We use inidcation for each PF (0..3) */ + #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) + + union { /* For various OEMs */ /* Offset 0x1a0 */ + uint8_t storage_boot_prog[E2_FUNC_MAX]; + #define STORAGE_BOOT_PROG_MASK 0x000000FF + #define STORAGE_BOOT_PROG_NONE 0x00000000 + #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002 + #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002 + #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004 + #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008 + #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008 + #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010 + #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020 + #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040 + #define STORAGE_BOOT_PROG_COMPLETED 0x00000080 + + uint32_t oem_i2c_data_addr; + }; + + /* 9 entries for the C2S PCP map for each inner VLAN PCP + 1 default */ + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + uint32_t c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */ + + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + uint32_t c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */ + + /* For PCP default value get the MSB byte of the map default */ + uint32_t c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */ + + /* FC_NPIV table offset in NVRAM */ + uint32_t fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */ + + /* Shows last method that changed configuration of this device */ + enum curr_cfg_method_e curr_cfg; /* 0x1dc */ + + /* Storm FW version, shold be kept in the format 0xMMmmbbdd: + * MM - Major, mm - Minor, bb - Build ,dd - Drop + */ + uint32_t netproc_fw_ver; /* 0x1e0 */ + + /* Option ROM SMASH CLP version */ + uint32_t clp_ver; /* 0x1e4 */ + + uint32_t pcie_bus_num; /* 0x1e8 */ + + uint32_t sriov_switch_mode; /* 0x1ec */ + #define SRIOV_SWITCH_MODE_NONE 0x0 + #define SRIOV_SWITCH_MODE_VEB 0x1 + #define SRIOV_SWITCH_MODE_VEPA 0x2 + + uint8_t rsrv2[E2_FUNC_MAX]; /* 0x1f0 */ + + uint32_t img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */ + + uint32_t mtu_size[E2_FUNC_MAX]; /* 0x1f8 */ + + uint32_t os_driver_state[E2_FUNC_MAX]; /* 0x208 */ + #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */ + #define OS_DRIVER_STATE_LOADING 1 /* transition state */ + #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */ + #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */ + + /* mini dump driver info */ + struct mdump_driver_info drv_info; /* 0x218 */ + + /* written by mfw, read by driver, eg. feature capability support */ + uint32_t mfw_flags; /* 0x22c */ + #define DISABLE_EMBEDDED_LLDP_SUPPORT 0x00000001 +}; + +#define VLAN_BITMAP_SIZE 512 +#define VLAN_PF_NUM_MAX 8 + +struct pf_vlan_table { + uint16_t pvid; + uint8_t pcp; + uint8_t rsvd; + uint8_t trunk_vlan_bitmap[VLAN_BITMAP_SIZE]; + uint32_t rsvd1[4]; +}; + +struct vlan_table_s { + uint32_t version; + #define VLAN_TABLE_IMAGE_VERSION_1 1 + uint8_t vlan_mode[NVM_PATH_MAX][PORT_MAX]; + #define VLAN_MODE_NORMAL 0 + #define VLAN_MODE_FILTER 1 + #define VLAN_MODE_QINQ 2 + struct pf_vlan_table pf_vlans[VLAN_PF_NUM_MAX]; + uint32_t rsvd2[8]; +}; + +/* The VLAN table Image is stored in Big Endian format */ +struct nvm_vlan_table_image { + struct vlan_table_s vlan_table; + uint32_t crc; +}; + + +struct emac_stats { + uint32_t rx_stat_ifhcinoctets; + uint32_t rx_stat_ifhcinbadoctets; + uint32_t rx_stat_etherstatsfragments; + uint32_t rx_stat_ifhcinucastpkts; + uint32_t rx_stat_ifhcinmulticastpkts; + uint32_t rx_stat_ifhcinbroadcastpkts; + uint32_t rx_stat_dot3statsfcserrors; + uint32_t rx_stat_dot3statsalignmenterrors; + uint32_t rx_stat_dot3statscarriersenseerrors; + uint32_t rx_stat_xonpauseframesreceived; + uint32_t rx_stat_xoffpauseframesreceived; + uint32_t rx_stat_maccontrolframesreceived; + uint32_t rx_stat_xoffstateentered; + uint32_t rx_stat_dot3statsframestoolong; + uint32_t rx_stat_etherstatsjabbers; + uint32_t rx_stat_etherstatsundersizepkts; + uint32_t rx_stat_etherstatspkts64octets; + uint32_t rx_stat_etherstatspkts65octetsto127octets; + uint32_t rx_stat_etherstatspkts128octetsto255octets; + uint32_t rx_stat_etherstatspkts256octetsto511octets; + uint32_t rx_stat_etherstatspkts512octetsto1023octets; + uint32_t rx_stat_etherstatspkts1024octetsto1522octets; + uint32_t rx_stat_etherstatspktsover1522octets; + + uint32_t rx_stat_falsecarriererrors; + + uint32_t tx_stat_ifhcoutoctets; + uint32_t tx_stat_ifhcoutbadoctets; + uint32_t tx_stat_etherstatscollisions; + uint32_t tx_stat_outxonsent; + uint32_t tx_stat_outxoffsent; + uint32_t tx_stat_flowcontroldone; + uint32_t tx_stat_dot3statssinglecollisionframes; + uint32_t tx_stat_dot3statsmultiplecollisionframes; + uint32_t tx_stat_dot3statsdeferredtransmissions; + uint32_t tx_stat_dot3statsexcessivecollisions; + uint32_t tx_stat_dot3statslatecollisions; + uint32_t tx_stat_ifhcoutucastpkts; + uint32_t tx_stat_ifhcoutmulticastpkts; + uint32_t tx_stat_ifhcoutbroadcastpkts; + uint32_t tx_stat_etherstatspkts64octets; + uint32_t tx_stat_etherstatspkts65octetsto127octets; + uint32_t tx_stat_etherstatspkts128octetsto255octets; + uint32_t tx_stat_etherstatspkts256octetsto511octets; + uint32_t tx_stat_etherstatspkts512octetsto1023octets; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets; + uint32_t tx_stat_etherstatspktsover1522octets; + uint32_t tx_stat_dot3statsinternalmactransmiterrors; +}; + + +struct bmac1_stats { + uint32_t tx_stat_gtpkt_lo; + uint32_t tx_stat_gtpkt_hi; + uint32_t tx_stat_gtxpf_lo; + uint32_t tx_stat_gtxpf_hi; + uint32_t tx_stat_gtfcs_lo; + uint32_t tx_stat_gtfcs_hi; + uint32_t tx_stat_gtmca_lo; + uint32_t tx_stat_gtmca_hi; + uint32_t tx_stat_gtbca_lo; + uint32_t tx_stat_gtbca_hi; + uint32_t tx_stat_gtfrg_lo; + uint32_t tx_stat_gtfrg_hi; + uint32_t tx_stat_gtovr_lo; + uint32_t tx_stat_gtovr_hi; + uint32_t tx_stat_gt64_lo; + uint32_t tx_stat_gt64_hi; + uint32_t tx_stat_gt127_lo; + uint32_t tx_stat_gt127_hi; + uint32_t tx_stat_gt255_lo; + uint32_t tx_stat_gt255_hi; + uint32_t tx_stat_gt511_lo; + uint32_t tx_stat_gt511_hi; + uint32_t tx_stat_gt1023_lo; + uint32_t tx_stat_gt1023_hi; + uint32_t tx_stat_gt1518_lo; + uint32_t tx_stat_gt1518_hi; + uint32_t tx_stat_gt2047_lo; + uint32_t tx_stat_gt2047_hi; + uint32_t tx_stat_gt4095_lo; + uint32_t tx_stat_gt4095_hi; + uint32_t tx_stat_gt9216_lo; + uint32_t tx_stat_gt9216_hi; + uint32_t tx_stat_gt16383_lo; + uint32_t tx_stat_gt16383_hi; + uint32_t tx_stat_gtmax_lo; + uint32_t tx_stat_gtmax_hi; + uint32_t tx_stat_gtufl_lo; + uint32_t tx_stat_gtufl_hi; + uint32_t tx_stat_gterr_lo; + uint32_t tx_stat_gterr_hi; + uint32_t tx_stat_gtbyt_lo; + uint32_t tx_stat_gtbyt_hi; + + uint32_t rx_stat_gr64_lo; + uint32_t rx_stat_gr64_hi; + uint32_t rx_stat_gr127_lo; + uint32_t rx_stat_gr127_hi; + uint32_t rx_stat_gr255_lo; + uint32_t rx_stat_gr255_hi; + uint32_t rx_stat_gr511_lo; + uint32_t rx_stat_gr511_hi; + uint32_t rx_stat_gr1023_lo; + uint32_t rx_stat_gr1023_hi; + uint32_t rx_stat_gr1518_lo; + uint32_t rx_stat_gr1518_hi; + uint32_t rx_stat_gr2047_lo; + uint32_t rx_stat_gr2047_hi; + uint32_t rx_stat_gr4095_lo; + uint32_t rx_stat_gr4095_hi; + uint32_t rx_stat_gr9216_lo; + uint32_t rx_stat_gr9216_hi; + uint32_t rx_stat_gr16383_lo; + uint32_t rx_stat_gr16383_hi; + uint32_t rx_stat_grmax_lo; + uint32_t rx_stat_grmax_hi; + uint32_t rx_stat_grpkt_lo; + uint32_t rx_stat_grpkt_hi; + uint32_t rx_stat_grfcs_lo; + uint32_t rx_stat_grfcs_hi; + uint32_t rx_stat_grmca_lo; + uint32_t rx_stat_grmca_hi; + uint32_t rx_stat_grbca_lo; + uint32_t rx_stat_grbca_hi; + uint32_t rx_stat_grxcf_lo; + uint32_t rx_stat_grxcf_hi; + uint32_t rx_stat_grxpf_lo; + uint32_t rx_stat_grxpf_hi; + uint32_t rx_stat_grxuo_lo; + uint32_t rx_stat_grxuo_hi; + uint32_t rx_stat_grjbr_lo; + uint32_t rx_stat_grjbr_hi; + uint32_t rx_stat_grovr_lo; + uint32_t rx_stat_grovr_hi; + uint32_t rx_stat_grflr_lo; + uint32_t rx_stat_grflr_hi; + uint32_t rx_stat_grmeg_lo; + uint32_t rx_stat_grmeg_hi; + uint32_t rx_stat_grmeb_lo; + uint32_t rx_stat_grmeb_hi; + uint32_t rx_stat_grbyt_lo; + uint32_t rx_stat_grbyt_hi; + uint32_t rx_stat_grund_lo; + uint32_t rx_stat_grund_hi; + uint32_t rx_stat_grfrg_lo; + uint32_t rx_stat_grfrg_hi; + uint32_t rx_stat_grerb_lo; + uint32_t rx_stat_grerb_hi; + uint32_t rx_stat_grfre_lo; + uint32_t rx_stat_grfre_hi; + uint32_t rx_stat_gripj_lo; + uint32_t rx_stat_gripj_hi; +}; + +struct bmac2_stats { + uint32_t tx_stat_gtpk_lo; /* gtpok */ + uint32_t tx_stat_gtpk_hi; /* gtpok */ + uint32_t tx_stat_gtxpf_lo; /* gtpf */ + uint32_t tx_stat_gtxpf_hi; /* gtpf */ + uint32_t tx_stat_gtpp_lo; /* NEW BMAC2 */ + uint32_t tx_stat_gtpp_hi; /* NEW BMAC2 */ + uint32_t tx_stat_gtfcs_lo; + uint32_t tx_stat_gtfcs_hi; + uint32_t tx_stat_gtuca_lo; /* NEW BMAC2 */ + uint32_t tx_stat_gtuca_hi; /* NEW BMAC2 */ + uint32_t tx_stat_gtmca_lo; + uint32_t tx_stat_gtmca_hi; + uint32_t tx_stat_gtbca_lo; + uint32_t tx_stat_gtbca_hi; + uint32_t tx_stat_gtovr_lo; + uint32_t tx_stat_gtovr_hi; + uint32_t tx_stat_gtfrg_lo; + uint32_t tx_stat_gtfrg_hi; + uint32_t tx_stat_gtpkt1_lo; /* gtpkt */ + uint32_t tx_stat_gtpkt1_hi; /* gtpkt */ + uint32_t tx_stat_gt64_lo; + uint32_t tx_stat_gt64_hi; + uint32_t tx_stat_gt127_lo; + uint32_t tx_stat_gt127_hi; + uint32_t tx_stat_gt255_lo; + uint32_t tx_stat_gt255_hi; + uint32_t tx_stat_gt511_lo; + uint32_t tx_stat_gt511_hi; + uint32_t tx_stat_gt1023_lo; + uint32_t tx_stat_gt1023_hi; + uint32_t tx_stat_gt1518_lo; + uint32_t tx_stat_gt1518_hi; + uint32_t tx_stat_gt2047_lo; + uint32_t tx_stat_gt2047_hi; + uint32_t tx_stat_gt4095_lo; + uint32_t tx_stat_gt4095_hi; + uint32_t tx_stat_gt9216_lo; + uint32_t tx_stat_gt9216_hi; + uint32_t tx_stat_gt16383_lo; + uint32_t tx_stat_gt16383_hi; + uint32_t tx_stat_gtmax_lo; + uint32_t tx_stat_gtmax_hi; + uint32_t tx_stat_gtufl_lo; + uint32_t tx_stat_gtufl_hi; + uint32_t tx_stat_gterr_lo; + uint32_t tx_stat_gterr_hi; + uint32_t tx_stat_gtbyt_lo; + uint32_t tx_stat_gtbyt_hi; + + uint32_t rx_stat_gr64_lo; + uint32_t rx_stat_gr64_hi; + uint32_t rx_stat_gr127_lo; + uint32_t rx_stat_gr127_hi; + uint32_t rx_stat_gr255_lo; + uint32_t rx_stat_gr255_hi; + uint32_t rx_stat_gr511_lo; + uint32_t rx_stat_gr511_hi; + uint32_t rx_stat_gr1023_lo; + uint32_t rx_stat_gr1023_hi; + uint32_t rx_stat_gr1518_lo; + uint32_t rx_stat_gr1518_hi; + uint32_t rx_stat_gr2047_lo; + uint32_t rx_stat_gr2047_hi; + uint32_t rx_stat_gr4095_lo; + uint32_t rx_stat_gr4095_hi; + uint32_t rx_stat_gr9216_lo; + uint32_t rx_stat_gr9216_hi; + uint32_t rx_stat_gr16383_lo; + uint32_t rx_stat_gr16383_hi; + uint32_t rx_stat_grmax_lo; + uint32_t rx_stat_grmax_hi; + uint32_t rx_stat_grpkt_lo; + uint32_t rx_stat_grpkt_hi; + uint32_t rx_stat_grfcs_lo; + uint32_t rx_stat_grfcs_hi; + uint32_t rx_stat_gruca_lo; + uint32_t rx_stat_gruca_hi; + uint32_t rx_stat_grmca_lo; + uint32_t rx_stat_grmca_hi; + uint32_t rx_stat_grbca_lo; + uint32_t rx_stat_grbca_hi; + uint32_t rx_stat_grxpf_lo; /* grpf */ + uint32_t rx_stat_grxpf_hi; /* grpf */ + uint32_t rx_stat_grpp_lo; + uint32_t rx_stat_grpp_hi; + uint32_t rx_stat_grxuo_lo; /* gruo */ + uint32_t rx_stat_grxuo_hi; /* gruo */ + uint32_t rx_stat_grjbr_lo; + uint32_t rx_stat_grjbr_hi; + uint32_t rx_stat_grovr_lo; + uint32_t rx_stat_grovr_hi; + uint32_t rx_stat_grxcf_lo; /* grcf */ + uint32_t rx_stat_grxcf_hi; /* grcf */ + uint32_t rx_stat_grflr_lo; + uint32_t rx_stat_grflr_hi; + uint32_t rx_stat_grpok_lo; + uint32_t rx_stat_grpok_hi; + uint32_t rx_stat_grmeg_lo; + uint32_t rx_stat_grmeg_hi; + uint32_t rx_stat_grmeb_lo; + uint32_t rx_stat_grmeb_hi; + uint32_t rx_stat_grbyt_lo; + uint32_t rx_stat_grbyt_hi; + uint32_t rx_stat_grund_lo; + uint32_t rx_stat_grund_hi; + uint32_t rx_stat_grfrg_lo; + uint32_t rx_stat_grfrg_hi; + uint32_t rx_stat_grerb_lo; /* grerrbyt */ + uint32_t rx_stat_grerb_hi; /* grerrbyt */ + uint32_t rx_stat_grfre_lo; /* grfrerr */ + uint32_t rx_stat_grfre_hi; /* grfrerr */ + uint32_t rx_stat_gripj_lo; + uint32_t rx_stat_gripj_hi; +}; + +struct mstat_stats { + struct { + /* OTE MSTAT on E3 has a bug where this register's contents are + * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp + */ + uint32_t tx_gtxpok_lo; + uint32_t tx_gtxpok_hi; + uint32_t tx_gtxpf_lo; + uint32_t tx_gtxpf_hi; + uint32_t tx_gtxpp_lo; + uint32_t tx_gtxpp_hi; + uint32_t tx_gtfcs_lo; + uint32_t tx_gtfcs_hi; + uint32_t tx_gtuca_lo; + uint32_t tx_gtuca_hi; + uint32_t tx_gtmca_lo; + uint32_t tx_gtmca_hi; + uint32_t tx_gtgca_lo; + uint32_t tx_gtgca_hi; + uint32_t tx_gtpkt_lo; + uint32_t tx_gtpkt_hi; + uint32_t tx_gt64_lo; + uint32_t tx_gt64_hi; + uint32_t tx_gt127_lo; + uint32_t tx_gt127_hi; + uint32_t tx_gt255_lo; + uint32_t tx_gt255_hi; + uint32_t tx_gt511_lo; + uint32_t tx_gt511_hi; + uint32_t tx_gt1023_lo; + uint32_t tx_gt1023_hi; + uint32_t tx_gt1518_lo; + uint32_t tx_gt1518_hi; + uint32_t tx_gt2047_lo; + uint32_t tx_gt2047_hi; + uint32_t tx_gt4095_lo; + uint32_t tx_gt4095_hi; + uint32_t tx_gt9216_lo; + uint32_t tx_gt9216_hi; + uint32_t tx_gt16383_lo; + uint32_t tx_gt16383_hi; + uint32_t tx_gtufl_lo; + uint32_t tx_gtufl_hi; + uint32_t tx_gterr_lo; + uint32_t tx_gterr_hi; + uint32_t tx_gtbyt_lo; + uint32_t tx_gtbyt_hi; + uint32_t tx_collisions_lo; + uint32_t tx_collisions_hi; + uint32_t tx_singlecollision_lo; + uint32_t tx_singlecollision_hi; + uint32_t tx_multiplecollisions_lo; + uint32_t tx_multiplecollisions_hi; + uint32_t tx_deferred_lo; + uint32_t tx_deferred_hi; + uint32_t tx_excessivecollisions_lo; + uint32_t tx_excessivecollisions_hi; + uint32_t tx_latecollisions_lo; + uint32_t tx_latecollisions_hi; + } stats_tx; + + struct { + uint32_t rx_gr64_lo; + uint32_t rx_gr64_hi; + uint32_t rx_gr127_lo; + uint32_t rx_gr127_hi; + uint32_t rx_gr255_lo; + uint32_t rx_gr255_hi; + uint32_t rx_gr511_lo; + uint32_t rx_gr511_hi; + uint32_t rx_gr1023_lo; + uint32_t rx_gr1023_hi; + uint32_t rx_gr1518_lo; + uint32_t rx_gr1518_hi; + uint32_t rx_gr2047_lo; + uint32_t rx_gr2047_hi; + uint32_t rx_gr4095_lo; + uint32_t rx_gr4095_hi; + uint32_t rx_gr9216_lo; + uint32_t rx_gr9216_hi; + uint32_t rx_gr16383_lo; + uint32_t rx_gr16383_hi; + uint32_t rx_grpkt_lo; + uint32_t rx_grpkt_hi; + uint32_t rx_grfcs_lo; + uint32_t rx_grfcs_hi; + uint32_t rx_gruca_lo; + uint32_t rx_gruca_hi; + uint32_t rx_grmca_lo; + uint32_t rx_grmca_hi; + uint32_t rx_grbca_lo; + uint32_t rx_grbca_hi; + uint32_t rx_grxpf_lo; + uint32_t rx_grxpf_hi; + uint32_t rx_grxpp_lo; + uint32_t rx_grxpp_hi; + uint32_t rx_grxuo_lo; + uint32_t rx_grxuo_hi; + uint32_t rx_grovr_lo; + uint32_t rx_grovr_hi; + uint32_t rx_grxcf_lo; + uint32_t rx_grxcf_hi; + uint32_t rx_grflr_lo; + uint32_t rx_grflr_hi; + uint32_t rx_grpok_lo; + uint32_t rx_grpok_hi; + uint32_t rx_grbyt_lo; + uint32_t rx_grbyt_hi; + uint32_t rx_grund_lo; + uint32_t rx_grund_hi; + uint32_t rx_grfrg_lo; + uint32_t rx_grfrg_hi; + uint32_t rx_grerb_lo; + uint32_t rx_grerb_hi; + uint32_t rx_grfre_lo; + uint32_t rx_grfre_hi; + + uint32_t rx_alignmenterrors_lo; + uint32_t rx_alignmenterrors_hi; + uint32_t rx_falsecarrier_lo; + uint32_t rx_falsecarrier_hi; + uint32_t rx_llfcmsgcnt_lo; + uint32_t rx_llfcmsgcnt_hi; + } stats_rx; +}; + +union mac_stats { + struct emac_stats emac_stats; + struct bmac1_stats bmac1_stats; + struct bmac2_stats bmac2_stats; + struct mstat_stats mstat_stats; +}; + + +struct mac_stx { + /* in_bad_octets */ + uint32_t rx_stat_ifhcinbadoctets_hi; + uint32_t rx_stat_ifhcinbadoctets_lo; + + /* out_bad_octets */ + uint32_t tx_stat_ifhcoutbadoctets_hi; + uint32_t tx_stat_ifhcoutbadoctets_lo; + + /* crc_receive_errors */ + uint32_t rx_stat_dot3statsfcserrors_hi; + uint32_t rx_stat_dot3statsfcserrors_lo; + /* alignment_errors */ + uint32_t rx_stat_dot3statsalignmenterrors_hi; + uint32_t rx_stat_dot3statsalignmenterrors_lo; + /* carrier_sense_errors */ + uint32_t rx_stat_dot3statscarriersenseerrors_hi; + uint32_t rx_stat_dot3statscarriersenseerrors_lo; + /* false_carrier_detections */ + uint32_t rx_stat_falsecarriererrors_hi; + uint32_t rx_stat_falsecarriererrors_lo; + + /* runt_packets_received */ + uint32_t rx_stat_etherstatsundersizepkts_hi; + uint32_t rx_stat_etherstatsundersizepkts_lo; + /* jabber_packets_received */ + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; + + /* error_runt_packets_received */ + uint32_t rx_stat_etherstatsfragments_hi; + uint32_t rx_stat_etherstatsfragments_lo; + /* error_jabber_packets_received */ + uint32_t rx_stat_etherstatsjabbers_hi; + uint32_t rx_stat_etherstatsjabbers_lo; + + /* control_frames_received */ + uint32_t rx_stat_maccontrolframesreceived_hi; + uint32_t rx_stat_maccontrolframesreceived_lo; + uint32_t rx_stat_mac_xpf_hi; + uint32_t rx_stat_mac_xpf_lo; + uint32_t rx_stat_mac_xcf_hi; + uint32_t rx_stat_mac_xcf_lo; + + /* xoff_state_entered */ + uint32_t rx_stat_xoffstateentered_hi; + uint32_t rx_stat_xoffstateentered_lo; + /* pause_xon_frames_received */ + uint32_t rx_stat_xonpauseframesreceived_hi; + uint32_t rx_stat_xonpauseframesreceived_lo; + /* pause_xoff_frames_received */ + uint32_t rx_stat_xoffpauseframesreceived_hi; + uint32_t rx_stat_xoffpauseframesreceived_lo; + /* pause_xon_frames_transmitted */ + uint32_t tx_stat_outxonsent_hi; + uint32_t tx_stat_outxonsent_lo; + /* pause_xoff_frames_transmitted */ + uint32_t tx_stat_outxoffsent_hi; + uint32_t tx_stat_outxoffsent_lo; + /* flow_control_done */ + uint32_t tx_stat_flowcontroldone_hi; + uint32_t tx_stat_flowcontroldone_lo; + + /* ether_stats_collisions */ + uint32_t tx_stat_etherstatscollisions_hi; + uint32_t tx_stat_etherstatscollisions_lo; + /* single_collision_transmit_frames */ + uint32_t tx_stat_dot3statssinglecollisionframes_hi; + uint32_t tx_stat_dot3statssinglecollisionframes_lo; + /* multiple_collision_transmit_frames */ + uint32_t tx_stat_dot3statsmultiplecollisionframes_hi; + uint32_t tx_stat_dot3statsmultiplecollisionframes_lo; + /* deferred_transmissions */ + uint32_t tx_stat_dot3statsdeferredtransmissions_hi; + uint32_t tx_stat_dot3statsdeferredtransmissions_lo; + /* excessive_collision_frames */ + uint32_t tx_stat_dot3statsexcessivecollisions_hi; + uint32_t tx_stat_dot3statsexcessivecollisions_lo; + /* late_collision_frames */ + uint32_t tx_stat_dot3statslatecollisions_hi; + uint32_t tx_stat_dot3statslatecollisions_lo; + + /* frames_transmitted_64_bytes */ + uint32_t tx_stat_etherstatspkts64octets_hi; + uint32_t tx_stat_etherstatspkts64octets_lo; + /* frames_transmitted_65_127_bytes */ + uint32_t tx_stat_etherstatspkts65octetsto127octets_hi; + uint32_t tx_stat_etherstatspkts65octetsto127octets_lo; + /* frames_transmitted_128_255_bytes */ + uint32_t tx_stat_etherstatspkts128octetsto255octets_hi; + uint32_t tx_stat_etherstatspkts128octetsto255octets_lo; + /* frames_transmitted_256_511_bytes */ + uint32_t tx_stat_etherstatspkts256octetsto511octets_hi; + uint32_t tx_stat_etherstatspkts256octetsto511octets_lo; + /* frames_transmitted_512_1023_bytes */ + uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo; + /* frames_transmitted_1024_1522_bytes */ + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo; + /* frames_transmitted_1523_9022_bytes */ + uint32_t tx_stat_etherstatspktsover1522octets_hi; + uint32_t tx_stat_etherstatspktsover1522octets_lo; + uint32_t tx_stat_mac_2047_hi; + uint32_t tx_stat_mac_2047_lo; + uint32_t tx_stat_mac_4095_hi; + uint32_t tx_stat_mac_4095_lo; + uint32_t tx_stat_mac_9216_hi; + uint32_t tx_stat_mac_9216_lo; + uint32_t tx_stat_mac_16383_hi; + uint32_t tx_stat_mac_16383_lo; + + /* internal_mac_transmit_errors */ + uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo; + + /* if_out_discards */ + uint32_t tx_stat_mac_ufl_hi; + uint32_t tx_stat_mac_ufl_lo; +}; + + +#define MAC_STX_IDX_MAX 2 + +struct host_port_stats { + uint32_t host_port_stats_counter; + + struct mac_stx mac_stx[MAC_STX_IDX_MAX]; + + uint32_t brb_drop_hi; + uint32_t brb_drop_lo; + + uint32_t not_used; /* obsolete as of MFW 7.2.1 */ + + uint32_t pfc_frames_tx_hi; + uint32_t pfc_frames_tx_lo; + uint32_t pfc_frames_rx_hi; + uint32_t pfc_frames_rx_lo; + + uint32_t eee_lpi_count_hi; + uint32_t eee_lpi_count_lo; +}; + + +struct host_func_stats { + uint32_t host_func_stats_start; + + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t host_func_stats_end; +}; + +/* VIC definitions */ +#define VICSTATST_UIF_INDEX 2 + +/* + * stats collected for afex. + * NOTE: structure is exactly as expected to be received by the switch. + * order must remain exactly as is unless protocol changes ! + */ +struct afex_stats { + uint32_t tx_unicast_frames_hi; + uint32_t tx_unicast_frames_lo; + uint32_t tx_unicast_bytes_hi; + uint32_t tx_unicast_bytes_lo; + uint32_t tx_multicast_frames_hi; + uint32_t tx_multicast_frames_lo; + uint32_t tx_multicast_bytes_hi; + uint32_t tx_multicast_bytes_lo; + uint32_t tx_broadcast_frames_hi; + uint32_t tx_broadcast_frames_lo; + uint32_t tx_broadcast_bytes_hi; + uint32_t tx_broadcast_bytes_lo; + uint32_t tx_frames_discarded_hi; + uint32_t tx_frames_discarded_lo; + uint32_t tx_frames_dropped_hi; + uint32_t tx_frames_dropped_lo; + + uint32_t rx_unicast_frames_hi; + uint32_t rx_unicast_frames_lo; + uint32_t rx_unicast_bytes_hi; + uint32_t rx_unicast_bytes_lo; + uint32_t rx_multicast_frames_hi; + uint32_t rx_multicast_frames_lo; + uint32_t rx_multicast_bytes_hi; + uint32_t rx_multicast_bytes_lo; + uint32_t rx_broadcast_frames_hi; + uint32_t rx_broadcast_frames_lo; + uint32_t rx_broadcast_bytes_hi; + uint32_t rx_broadcast_bytes_lo; + uint32_t rx_frames_discarded_hi; + uint32_t rx_frames_discarded_lo; + uint32_t rx_frames_dropped_hi; + uint32_t rx_frames_dropped_lo; +}; + +/* To maintain backward compatibility between FW and drivers, new elements */ +/* should be added to the end of the structure. */ + +/* Per Port Statistics */ +struct port_info { + uint32_t size; /* size of this structure (i.e. sizeof(port_info)) */ + uint32_t enabled; /* 0 =Disabled, 1= Enabled */ + uint32_t link_speed; /* multiplier of 100Mb */ + uint32_t wol_support; /* WoL Support (i.e. Non-Zero if WOL supported ) */ + uint32_t flow_control; /* 802.3X Flow Ctrl. 0=off 1=RX 2=TX 3=RX&TX.*/ + uint32_t flex10; /* Flex10 mode enabled. non zero = yes */ + uint32_t rx_drops; /* RX Discards. Counters roll over, never reset */ + uint32_t rx_errors; /* RX Errors. Physical Port Stats L95, All PFs and NC-SI. + This is flagged by Consumer as an error. */ + uint32_t rx_uncast_lo; /* RX Unicast Packets. Free running counters: */ + uint32_t rx_uncast_hi; /* RX Unicast Packets. Free running counters: */ + uint32_t rx_mcast_lo; /* RX Multicast Packets */ + uint32_t rx_mcast_hi; /* RX Multicast Packets */ + uint32_t rx_bcast_lo; /* RX Broadcast Packets */ + uint32_t rx_bcast_hi; /* RX Broadcast Packets */ + uint32_t tx_uncast_lo; /* TX Unicast Packets */ + uint32_t tx_uncast_hi; /* TX Unicast Packets */ + uint32_t tx_mcast_lo; /* TX Multicast Packets */ + uint32_t tx_mcast_hi; /* TX Multicast Packets */ + uint32_t tx_bcast_lo; /* TX Broadcast Packets */ + uint32_t tx_bcast_hi; /* TX Broadcast Packets */ + uint32_t tx_errors; /* TX Errors */ + uint32_t tx_discards; /* TX Discards */ + uint32_t rx_frames_lo; /* RX Frames received */ + uint32_t rx_frames_hi; /* RX Frames received */ + uint32_t rx_bytes_lo; /* RX Bytes received */ + uint32_t rx_bytes_hi; /* RX Bytes received */ + uint32_t tx_frames_lo; /* TX Frames sent */ + uint32_t tx_frames_hi; /* TX Frames sent */ + uint32_t tx_bytes_lo; /* TX Bytes sent */ + uint32_t tx_bytes_hi; /* TX Bytes sent */ + uint32_t link_status; /* Port P Link Status. 1:0 bit for port enabled. + 1:1 bit for link good, + 2:1 Set if link changed between last poll. */ + uint32_t tx_pfc_frames_lo; /* PFC Frames sent. */ + uint32_t tx_pfc_frames_hi; /* PFC Frames sent. */ + uint32_t rx_pfc_frames_lo; /* PFC Frames Received. */ + uint32_t rx_pfc_frames_hi; /* PFC Frames Received. */ +}; + + +#define BNX2X_5710_FW_MAJOR_VERSION 7 +#define BNX2X_5710_FW_MINOR_VERSION 13 +#define BNX2X_5710_FW_REVISION_VERSION 11 +#define BNX2X_5710_FW_ENGINEERING_VERSION 0 +#define BNX2X_5710_FW_COMPILE_FLAGS 1 + + +/* + * attention bits + */ +struct atten_sp_status_block { + __le32 attn_bits; + __le32 attn_bits_ack; + uint8_t status_block_id; + uint8_t reserved0; + __le16 attn_bits_index; + __le32 reserved1; +}; + + +/* + * The eth aggregative context of Cstorm + */ +struct cstorm_eth_ag_context { + uint32_t __reserved0[10]; +}; + + +/* + * dmae command structure + */ +struct dmae_command { + uint32_t opcode; +#define DMAE_COMMAND_SRC (0x1 << 0) +#define DMAE_COMMAND_SRC_SHIFT 0 +#define DMAE_COMMAND_DST (0x3 << 1) +#define DMAE_COMMAND_DST_SHIFT 1 +#define DMAE_COMMAND_C_DST (0x1 << 3) +#define DMAE_COMMAND_C_DST_SHIFT 3 +#define DMAE_COMMAND_C_TYPE_ENABLE (0x1 << 4) +#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1 << 5) +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7 << 6) +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 +#define DMAE_COMMAND_ENDIANITY (0x3 << 9) +#define DMAE_COMMAND_ENDIANITY_SHIFT 9 +#define DMAE_COMMAND_PORT (0x1 << 11) +#define DMAE_COMMAND_PORT_SHIFT 11 +#define DMAE_COMMAND_CRC_RESET (0x1 << 12) +#define DMAE_COMMAND_CRC_RESET_SHIFT 12 +#define DMAE_COMMAND_SRC_RESET (0x1 << 13) +#define DMAE_COMMAND_SRC_RESET_SHIFT 13 +#define DMAE_COMMAND_DST_RESET (0x1 << 14) +#define DMAE_COMMAND_DST_RESET_SHIFT 14 +#define DMAE_COMMAND_E1HVN (0x3 << 15) +#define DMAE_COMMAND_E1HVN_SHIFT 15 +#define DMAE_COMMAND_DST_VN (0x3 << 17) +#define DMAE_COMMAND_DST_VN_SHIFT 17 +#define DMAE_COMMAND_C_FUNC (0x1 << 19) +#define DMAE_COMMAND_C_FUNC_SHIFT 19 +#define DMAE_COMMAND_ERR_POLICY (0x3 << 20) +#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 +#define DMAE_COMMAND_RESERVED0 (0x3FF << 22) +#define DMAE_COMMAND_RESERVED0_SHIFT 22 + uint32_t src_addr_lo; + uint32_t src_addr_hi; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; +#if defined(__BIG_ENDIAN) + uint16_t opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F << 0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1 << 6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1 << 7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F << 8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1 << 14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1 << 15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 + uint16_t len; +#elif defined(__LITTLE_ENDIAN) + uint16_t len; + uint16_t opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F << 0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1 << 6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1 << 7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F << 8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1 << 14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1 << 15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 +#endif + uint32_t comp_addr_lo; + uint32_t comp_addr_hi; + uint32_t comp_val; + uint32_t crc32; + uint32_t crc32_c; +#if defined(__BIG_ENDIAN) + uint16_t crc16_c; + uint16_t crc16; +#elif defined(__LITTLE_ENDIAN) + uint16_t crc16; + uint16_t crc16_c; +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved3; + uint16_t crc_t10; +#elif defined(__LITTLE_ENDIAN) + uint16_t crc_t10; + uint16_t reserved3; +#endif +#if defined(__BIG_ENDIAN) + uint16_t xsum8; + uint16_t xsum16; +#elif defined(__LITTLE_ENDIAN) + uint16_t xsum16; + uint16_t xsum8; +#endif +}; + + +/* + * common data for all protocols + */ +struct doorbell_hdr { + uint8_t header; +#define DOORBELL_HDR_RX (0x1 << 0) +#define DOORBELL_HDR_RX_SHIFT 0 +#define DOORBELL_HDR_DB_TYPE (0x1 << 1) +#define DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define DOORBELL_HDR_DPM_SIZE (0x3 << 2) +#define DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define DOORBELL_HDR_CONN_TYPE (0xF << 4) +#define DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * Ethernet doorbell + */ +struct eth_tx_doorbell { +#if defined(__BIG_ENDIAN) + uint16_t npackets; + uint8_t params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F << 0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1 << 6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1 << 7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + struct doorbell_hdr hdr; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr hdr; + uint8_t params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F << 0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1 << 6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1 << 7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + uint16_t npackets; +#endif +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e1x { + __le16 index_values[HC_SB_MAX_INDICES_E1X]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 rsrv[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e1x { + struct hc_status_block_e1x sb; +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e2 { + __le16 index_values[HC_SB_MAX_INDICES_E2]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 reserved[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e2 { + struct hc_status_block_e2 sb; +}; + + +/* + * 5 lines. slow-path status block + */ +struct hc_sp_status_block { + __le16 index_values[HC_SP_SB_MAX_INDICES]; + __le16 running_index; + __le16 rsrv; + uint32_t rsrv1; +}; + +/* + * host status block + */ +struct host_sp_status_block { + struct atten_sp_status_block atten_status_block; + struct hc_sp_status_block sp_sb; +}; + + +/* + * IGU driver acknowledgment register + */ +struct igu_ack_register { +#if defined(__BIG_ENDIAN) + uint16_t sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F << 0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7 << 5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1 << 8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3 << 9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F << 11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 + uint16_t status_block_index; +#elif defined(__LITTLE_ENDIAN) + uint16_t status_block_index; + uint16_t sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F << 0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7 << 5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1 << 8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3 << 9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F << 11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 +#endif +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_backward_compatible { + uint32_t sb_id_and_flags; +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF << 0) +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0 +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F << 16) +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16 +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7 << 21) +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21 +#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1 << 24) +#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24 +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3 << 25) +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25 +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F << 27) +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27 + uint32_t reserved_2; +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_regular { + uint32_t sb_id_and_flags; +#define IGU_REGULAR_SB_INDEX (0xFFFFF << 0) +#define IGU_REGULAR_SB_INDEX_SHIFT 0 +#define IGU_REGULAR_RESERVED0 (0x1 << 20) +#define IGU_REGULAR_RESERVED0_SHIFT 20 +#define IGU_REGULAR_SEGMENT_ACCESS (0x7 << 21) +#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21 +#define IGU_REGULAR_BUPDATE (0x1 << 24) +#define IGU_REGULAR_BUPDATE_SHIFT 24 +#define IGU_REGULAR_ENABLE_INT (0x3 << 25) +#define IGU_REGULAR_ENABLE_INT_SHIFT 25 +#define IGU_REGULAR_RESERVED_1 (0x1 << 27) +#define IGU_REGULAR_RESERVED_1_SHIFT 27 +#define IGU_REGULAR_CLEANUP_TYPE (0x3 << 28) +#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28 +#define IGU_REGULAR_CLEANUP_SET (0x1 << 30) +#define IGU_REGULAR_CLEANUP_SET_SHIFT 30 +#define IGU_REGULAR_BCLEANUP (0x1 << 31) +#define IGU_REGULAR_BCLEANUP_SHIFT 31 + uint32_t reserved_2; +}; + +/* + * IGU driver acknowledgement register + */ +union igu_consprod_reg { + struct igu_regular regular; + struct igu_backward_compatible backward_compatible; +}; + + +/* + * Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD}; + + +/* + * Control register for the IGU command register + */ +struct igu_ctrl_reg { + uint32_t ctrl_data; +#define IGU_CTRL_REG_ADDRESS (0xFFF << 0) +#define IGU_CTRL_REG_ADDRESS_SHIFT 0 +#define IGU_CTRL_REG_FID (0x7F << 12) +#define IGU_CTRL_REG_FID_SHIFT 12 +#define IGU_CTRL_REG_RESERVED (0x1 << 19) +#define IGU_CTRL_REG_RESERVED_SHIFT 19 +#define IGU_CTRL_REG_TYPE (0x1 << 20) +#define IGU_CTRL_REG_TYPE_SHIFT 20 +#define IGU_CTRL_REG_UNUSED (0x7FF << 21) +#define IGU_CTRL_REG_UNUSED_SHIFT 21 +}; + + +/* + * Igu interrupt command + */ +enum igu_int_cmd { + IGU_INT_ENABLE, + IGU_INT_DISABLE, + IGU_INT_NOP, + IGU_INT_NOP2, + MAX_IGU_INT_CMD}; + + +/* + * Igu segments + */ +enum igu_seg_access { + IGU_SEG_ACCESS_NORM, + IGU_SEG_ACCESS_DEF, + IGU_SEG_ACCESS_ATTN, + MAX_IGU_SEG_ACCESS}; + + +/* + * Parser parsing flags field + */ +struct parsing_flags { + __le16 flags; +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1 << 0) +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 +#define PARSING_FLAGS_VLAN (0x1 << 1) +#define PARSING_FLAGS_VLAN_SHIFT 1 +#define PARSING_FLAGS_EXTRA_VLAN (0x1 << 2) +#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3 << 3) +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 +#define PARSING_FLAGS_IP_OPTIONS (0x1 << 5) +#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5 +#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1 << 6) +#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6 +#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3 << 7) +#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7 +#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1 << 9) +#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9 +#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1 << 10) +#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10 +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1 << 11) +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11 +#define PARSING_FLAGS_CONNECTION_MATCH (0x1 << 12) +#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12 +#define PARSING_FLAGS_LLC_SNAP (0x1 << 13) +#define PARSING_FLAGS_LLC_SNAP_SHIFT 13 +#define PARSING_FLAGS_RESERVED0 (0x3 << 14) +#define PARSING_FLAGS_RESERVED0_SHIFT 14 +}; + + +/* + * Parsing flags for TCP ACK type + */ +enum prs_flags_ack_type { + PRS_FLAG_PUREACK_PIGGY, + PRS_FLAG_PUREACK_PURE, + MAX_PRS_FLAGS_ACK_TYPE}; + + +/* + * Parsing flags for Ethernet address type + */ +enum prs_flags_eth_addr_type { + PRS_FLAG_ETHTYPE_NON_UNICAST, + PRS_FLAG_ETHTYPE_UNICAST, + MAX_PRS_FLAGS_ETH_ADDR_TYPE}; + + +/* + * Parsing flags for over-ethernet protocol + */ +enum prs_flags_over_eth { + PRS_FLAG_OVERETH_UNKNOWN, + PRS_FLAG_OVERETH_IPV4, + PRS_FLAG_OVERETH_IPV6, + PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, + MAX_PRS_FLAGS_OVER_ETH}; + + +/* + * Parsing flags for over-IP protocol + */ +enum prs_flags_over_ip { + PRS_FLAG_OVERIP_UNKNOWN, + PRS_FLAG_OVERIP_TCP, + PRS_FLAG_OVERIP_UDP, + MAX_PRS_FLAGS_OVER_IP}; + + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM (0x1F << 0) +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE (0x7 << 5) +#define SDM_OP_GEN_COMP_TYPE_SHIFT 5 +#define SDM_OP_GEN_AGG_VECT_IDX (0xFF << 8) +#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8 +#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1 << 16) +#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16 +#define SDM_OP_GEN_RESERVED (0x7FFF << 17) +#define SDM_OP_GEN_RESERVED_SHIFT 17 +}; + + +/* + * Timers connection context + */ +struct timers_block_context { + uint32_t __reserved_0; + uint32_t __reserved_1; + uint32_t __reserved_2; + uint32_t flags; +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3 << 0) +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1 << 2) +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 +#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF << 3) +#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 +}; + + +/* + * The eth aggregative context of Tstorm + */ +struct tstorm_eth_ag_context { + uint32_t __reserved0[14]; +}; + + +/* + * The eth aggregative context of Ustorm + */ +struct ustorm_eth_ag_context { + uint32_t __reserved0; +#if defined(__BIG_ENDIAN) + uint8_t cdu_usage; + uint8_t __reserved2; + uint16_t __reserved1; +#elif defined(__LITTLE_ENDIAN) + uint16_t __reserved1; + uint8_t __reserved2; + uint8_t cdu_usage; +#endif + uint32_t __reserved3[6]; +}; + + +/* + * The eth aggregative context of Xstorm + */ +struct xstorm_eth_ag_context { + uint32_t reserved0; +#if defined(__BIG_ENDIAN) + uint8_t cdu_reserved; + uint8_t reserved2; + uint16_t reserved1; +#elif defined(__LITTLE_ENDIAN) + uint16_t reserved1; + uint8_t reserved2; + uint8_t cdu_reserved; +#endif + uint32_t reserved3[30]; +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell { +#if defined(__BIG_ENDIAN) + uint16_t zero_fill2; + uint8_t zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + uint8_t zero_fill1; + uint16_t zero_fill2; +#endif +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell_set_prod { +#if defined(__BIG_ENDIAN) + uint16_t prod; + uint8_t zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + uint8_t zero_fill1; + uint16_t prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +struct regpair_native { + uint32_t lo; + uint32_t hi; +}; + + +/* + * Classify rule opcodes in E2/E3 + */ +enum classify_rule { + CLASSIFY_RULE_OPCODE_MAC, + CLASSIFY_RULE_OPCODE_VLAN, + CLASSIFY_RULE_OPCODE_PAIR, + CLASSIFY_RULE_OPCODE_IMAC_VNI, + MAX_CLASSIFY_RULE}; + + +/* + * Classify rule types in E2/E3 + */ +enum classify_rule_action_type { + CLASSIFY_RULE_REMOVE, + CLASSIFY_RULE_ADD, + MAX_CLASSIFY_RULE_ACTION_TYPE}; + + +/* + * client init ramrod data + */ +struct client_init_general_data { + uint8_t client_id; + uint8_t statistics_counter_id; + uint8_t statistics_en_flg; + uint8_t is_fcoe_flg; + uint8_t activate_flg; + uint8_t sp_client_id; + __le16 mtu; + uint8_t statistics_zero_flg; + uint8_t func_id; + uint8_t cos; + uint8_t traffic_type; + uint8_t fp_hsi_ver; + uint8_t reserved0[3]; +}; + + +/* + * client init rx data + */ +struct client_init_rx_data { + uint8_t tpa_en; +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1 << 0) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1 << 1) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 +#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1 << 2) +#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2 +#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1 << 3) +#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF << 4) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4 + uint8_t vmqueue_mode_en_flg; + uint8_t extra_data_over_sgl_en_flg; + uint8_t cache_line_alignment_log_size; + uint8_t enable_dynamic_hc; + uint8_t max_sges_for_packet; + uint8_t client_qzone_id; + uint8_t drop_ip_cs_err_flg; + uint8_t drop_tcp_cs_err_flg; + uint8_t drop_ttl0_flg; + uint8_t drop_udp_cs_err_flg; + uint8_t inner_vlan_removal_enable_flg; + uint8_t outer_vlan_removal_enable_flg; + uint8_t status_block_id; + uint8_t rx_sb_index_number; + uint8_t dont_verify_rings_pause_thr_flg; + uint8_t max_tpa_queues; + uint8_t silent_vlan_removal_flg; + __le16 max_bytes_on_bd; + __le16 sge_buff_size; + uint8_t approx_mcast_engine_id; + uint8_t rss_engine_id; + struct regpair bd_page_base; + struct regpair sge_page_base; + struct regpair cqe_page_base; + uint8_t is_leading_rss; + uint8_t is_approx_mcast; + __le16 max_agg_size; + __le16 state; +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1 << 0) +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1 << 1) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1 << 2) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1 << 3) +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1 << 4) +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1 << 5) +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1 << 6) +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 +#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF << 7) +#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 + __le16 cqe_pause_thr_low; + __le16 cqe_pause_thr_high; + __le16 bd_pause_thr_low; + __le16 bd_pause_thr_high; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; + __le16 rx_cos_mask; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + uint8_t handle_ptp_pkts_flg; + uint8_t reserved6[3]; + __le32 reserved7; +}; + +/* + * client init tx data + */ +struct client_init_tx_data { + uint8_t enforce_security_flg; + uint8_t tx_status_block_id; + uint8_t tx_sb_index_number; + uint8_t tss_leading_client_id; + uint8_t tx_switching_flg; + uint8_t anti_spoofing_flg; + __le16 default_vlan; + struct regpair tx_bd_page_base; + __le16 state; +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1 << 0) +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1 << 1) +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1 << 2) +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1 << 3) +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 +#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF << 4) +#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4 + uint8_t default_vlan_flg; + uint8_t force_default_pri_flg; + uint8_t tunnel_lso_inc_ip_id; + uint8_t refuse_outband_vlan_flg; + uint8_t tunnel_non_lso_pcsum_location; + uint8_t tunnel_non_lso_outer_ip_csum_location; +}; + +/* + * client init ramrod data + */ +struct client_init_ramrod_data { + struct client_init_general_data general; + struct client_init_rx_data rx; + struct client_init_tx_data tx; +}; + + +/* + * client update ramrod data + */ +struct client_update_ramrod_data { + uint8_t client_id; + uint8_t func_id; + uint8_t inner_vlan_removal_enable_flg; + uint8_t inner_vlan_removal_change_flg; + uint8_t outer_vlan_removal_enable_flg; + uint8_t outer_vlan_removal_change_flg; + uint8_t anti_spoofing_enable_flg; + uint8_t anti_spoofing_change_flg; + uint8_t activate_flg; + uint8_t activate_change_flg; + __le16 default_vlan; + uint8_t default_vlan_enable_flg; + uint8_t default_vlan_change_flg; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + uint8_t silent_vlan_removal_flg; + uint8_t silent_vlan_change_flg; + uint8_t refuse_outband_vlan_flg; + uint8_t refuse_outband_vlan_change_flg; + uint8_t tx_switching_flg; + uint8_t tx_switching_change_flg; + uint8_t handle_ptp_pkts_flg; + uint8_t handle_ptp_pkts_change_flg; + __le16 reserved1; + __le32 echo; +}; + + +/* + * The eth storm context of Cstorm + */ +struct cstorm_eth_st_context { + uint32_t __reserved0[4]; +}; + + +struct double_regpair { + uint32_t regpair0_lo; + uint32_t regpair0_hi; + uint32_t regpair1_lo; + uint32_t regpair1_hi; +}; + + +/* + * 2nd parse bd type used in ethernet tx BDs + */ +enum eth_2nd_parse_bd_type { + ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL, + MAX_ETH_2ND_PARSE_BD_TYPE}; + + +/* + * Ethernet address typesm used in ethernet tx BDs + */ +enum eth_addr_type { + UNKNOWN_ADDRESS, + UNICAST_ADDRESS, + MULTICAST_ADDRESS, + BROADCAST_ADDRESS, + MAX_ETH_ADDR_TYPE}; + + +/* + * + */ +struct eth_classify_cmd_header { + uint8_t cmd_general_data; +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1 << 0) +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1 << 1) +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 +#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3 << 2) +#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1 << 4) +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7 << 5) +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 + uint8_t func_id; + uint8_t client_id; + uint8_t reserved1; +}; + + +/* + * header for eth classification config ramrod + */ +struct eth_classify_header { + uint8_t rule_cnt; + uint8_t warning_on_error; + __le16 reserved1; + __le32 echo; +}; + + +/* + * Command for adding/removing a Inner-MAC/VNI classification rule + */ +struct eth_classify_imac_vni_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 imac_lsb; + __le16 imac_mid; + __le16 imac_msb; + __le16 reserved1; +}; + + +/* + * Command for adding/removing a MAC classification rule + */ +struct eth_classify_mac_cmd { + struct eth_classify_cmd_header header; + __le16 reserved0; + __le16 inner_mac; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 reserved1; +}; + + +/* + * Command for adding/removing a MAC-VLAN pair classification rule + */ +struct eth_classify_pair_cmd { + struct eth_classify_cmd_header header; + __le16 reserved0; + __le16 inner_mac; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan; +}; + + +/* + * Command for adding/removing a VLAN classification rule + */ +struct eth_classify_vlan_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le32 reserved1; + __le16 reserved2; + __le16 vlan; +}; + +/* + * union for eth classification rule + */ +union eth_classify_rule_cmd { + struct eth_classify_mac_cmd mac; + struct eth_classify_vlan_cmd vlan; + struct eth_classify_pair_cmd pair; + struct eth_classify_imac_vni_cmd imac_vni; +}; + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_classify_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data contain client ID need to the ramrod + */ +struct eth_common_ramrod_data { + __le32 client_id; + __le32 reserved1; +}; + + +/* + * The eth storm context of Ustorm + */ +struct ustorm_eth_st_context { + uint32_t reserved0[52]; +}; + +/* + * The eth storm context of Tstorm + */ +struct tstorm_eth_st_context { + uint32_t __reserved0[28]; +}; + +/* + * The eth storm context of Xstorm + */ +struct xstorm_eth_st_context { + uint32_t reserved0[60]; +}; + +/* + * Ethernet connection context + */ +struct eth_context { + struct ustorm_eth_st_context ustorm_st_context; + struct tstorm_eth_st_context tstorm_st_context; + struct xstorm_eth_ag_context xstorm_ag_context; + struct tstorm_eth_ag_context tstorm_ag_context; + struct cstorm_eth_ag_context cstorm_ag_context; + struct ustorm_eth_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_eth_st_context xstorm_st_context; + struct cstorm_eth_st_context cstorm_st_context; +}; + + +/* + * union for sgl and raw data. + */ +union eth_sgl_or_raw_data { + __le16 sgl[8]; + uint32_t raw_data[4]; +}; + +/* + * eth FP end aggregation CQE parameters struct + */ +struct eth_end_agg_rx_cqe { + uint8_t type_error_flags; +#define ETH_END_AGG_RX_CQE_TYPE (0x3 << 0) +#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1 << 2) +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F << 3) +#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 + uint8_t reserved1; + uint8_t queue_index; + uint8_t reserved2; + __le32 timestamp_delta; + __le16 num_of_coalesced_segs; + __le16 pkt_len; + uint8_t pure_ack_count; + uint8_t reserved3; + __le16 reserved4; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 padding[8]; +}; + +/* + * Ethernet error code + */ +enum eth_error_code { + ETH_OK = 0x00, + ETH_RAMROD_DATA_READ_ERROR = 0x01, + ETH_FILTERS_FUNC_NOT_ENABLED, + ETH_FILTERS_MAC_ADD_FAIL_CAM_FULL, + ETH_FILTERS_MAC_DEL_FAIL_NOF, + ETH_FILTERS_PAIR_ADD_FAIL_CAM_FULL, + ETH_FILTERS_PAIR_DEL_FAIL_NOF, + ETH_FILTERS_VLAN_ADD_FAIL_CAM_FULL, + ETH_FILTERS_VLAN_ADD_FAIL_DUP_TT, + ETH_FILTERS_VLAN_DEL_FAIL_NOF, + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT, + ETH_FILTERS_VLAN_DEL_FAIL_NO_VLAN, + ETH_FILTERS_IMAC_VNI_ADD_UNALLOWED_IN_TX, + ETH_FILTERS_IMAC_VNI_DEL_UNALLOWED_IN_TX, + ETH_FILTERS_IMAC_VNI_ADD_FAIL_CAM_FULL, + ETH_FILTERS_IMAC_VNI_DEL_FAIL_NOF, + MAX_ETH_ERROR_CODE}; + +/* + * regular eth FP CQE parameters struct + */ +struct eth_fast_path_rx_cqe { + uint8_t type_error_flags; +#define ETH_FAST_PATH_RX_CQE_TYPE (0x3 << 0) +#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1 << 2) +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1 << 3) +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1 << 4) +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1 << 5) +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1 << 6) +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1 << 7) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7 + uint8_t status_flags; +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7 << 0) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1 << 3) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1 << 4) +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1 << 5) +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1 << 6) +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1 << 7) +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 + uint8_t queue_index; + uint8_t placement_offset; + __le32 rss_hash_result; + __le16 vlan_tag; + __le16 pkt_len_or_gro_seg_len; + __le16 len_on_bd; + struct parsing_flags pars_flags; + union eth_sgl_or_raw_data sgl_or_raw_data; + uint8_t tunn_type; + uint8_t tunn_inner_hdrs_offset; + __le16 reserved1; + __le32 tunn_tenant_id; + __le32 padding[5]; + __le32 marker; +}; + + +/* + * Command for setting classification flags for a client + */ +struct eth_filter_rules_cmd { + uint8_t cmd_general_data; +#define ETH_FILTER_RULES_CMD_RX_CMD (0x1 << 0) +#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_FILTER_RULES_CMD_TX_CMD (0x1 << 1) +#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F << 2) +#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 + uint8_t func_id; + uint8_t client_id; + uint8_t reserved1; + __le16 state; +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1 << 0) +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1 << 1) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1 << 2) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1 << 3) +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1 << 4) +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1 << 5) +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1 << 6) +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 +#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF << 7) +#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 + __le16 reserved3; + struct regpair reserved4; +}; + + +/* + * parameters for eth classification filters ramrod + */ +struct eth_filter_rules_ramrod_data { + struct eth_classify_header header; + struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; +}; + + +/* + * Hsi version + */ +enum eth_fp_hsi_ver { + ETH_FP_HSI_VER_0, + ETH_FP_HSI_VER_1, + ETH_FP_HSI_VER_2, + MAX_ETH_FP_HSI_VER}; + + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_general_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data for Halt ramrod + */ +struct eth_halt_ramrod_data { + __le32 client_id; + __le32 reserved0; +}; + + +/* + * destination and source mac address. + */ +struct eth_mac_addresses { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_lo; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 src_lo; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_hi; + __le16 src_mid; +#elif defined(__LITTLE_ENDIAN) + __le16 src_mid; + __le16 src_hi; +#endif +}; + + +/* + * tunneling related data. + */ +struct eth_tunnel_data { + __le16 dst_lo; + __le16 dst_mid; + __le16 dst_hi; + __le16 fw_ip_hdr_csum; + __le16 pseudo_csum; + uint8_t ip_hdr_start_inner_w; + uint8_t flags; +#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1 << 0) +#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F << 1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 +}; + +/* + * union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1). + */ +union eth_mac_addr_or_tunnel_data { + struct eth_mac_addresses mac_addr; + struct eth_tunnel_data tunnel_data; +}; + + +/* + * Command for setting multicast classification for a client + */ +struct eth_multicast_rules_cmd { + uint8_t cmd_general_data; +#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1 << 0) +#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1 << 1) +#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1 << 2) +#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 +#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F << 3) +#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 + uint8_t func_id; + uint8_t bin_id; + uint8_t engine_id; + __le32 reserved2; + struct regpair reserved3; +}; + + +/* + * parameters for multicast classification ramrod + */ +struct eth_multicast_rules_ramrod_data { + struct eth_classify_header header; + struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; +}; + + +/* + * Place holder for ramrods protocol specific data + */ +struct ramrod_data { + __le32 data_lo; + __le32 data_hi; +}; + +/* + * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) + */ +union eth_ramrod_data { + struct ramrod_data general; +}; + + +/* + * RSS toeplitz hash type, as reported in CQE + */ +enum eth_rss_hash_type { + DEFAULT_HASH_TYPE, + IPV4_HASH_TYPE, + TCP_IPV4_HASH_TYPE, + IPV6_HASH_TYPE, + TCP_IPV6_HASH_TYPE, + VLAN_PRI_HASH_TYPE, + E1HOV_PRI_HASH_TYPE, + DSCP_HASH_TYPE, + MAX_ETH_RSS_HASH_TYPE}; + + +/* + * Ethernet RSS mode + */ +enum eth_rss_mode { + ETH_RSS_MODE_DISABLED, + ETH_RSS_MODE_REGULAR, + ETH_RSS_MODE_ESX51, + ETH_RSS_MODE_VLAN_PRI, + ETH_RSS_MODE_E1HOV_PRI, + ETH_RSS_MODE_IP_DSCP, + MAX_ETH_RSS_MODE}; + + +/* + * parameters for RSS update ramrod (E2) + */ +struct eth_rss_update_ramrod_data { + uint8_t rss_engine_id; + uint8_t rss_mode; + __le16 capabilities; +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1 << 0) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1 << 1) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1 << 2) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1 << 3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1 << 4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1 << 5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1 << 6) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1 << 7) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1 << 8) +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1 << 9) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F << 10) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10 + uint8_t rss_result_mask; + uint8_t reserved3; + __le16 reserved4; + uint8_t indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; + __le32 rss_key[T_ETH_RSS_KEY]; + __le32 echo; + __le32 reserved5; +}; + + +/* + * The eth Rx Buffer Descriptor + */ +struct eth_rx_bd { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * Eth Rx Cqe structure- general structure for ramrods + */ +struct common_ramrod_eth_rx_cqe { + uint8_t ramrod_type; +#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3 << 0) +#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1 << 2) +#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F << 3) +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 + uint8_t conn_type; + __le16 reserved1; + __le32 conn_and_cmd_data; +#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF << 0) +#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF << 24) +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 + struct ramrod_data protocol_data; + __le32 echo; + __le32 reserved2[11]; +}; + +/* + * Rx Last CQE in page (in ETH) + */ +struct eth_rx_cqe_next_page { + __le32 addr_lo; + __le32 addr_hi; + __le32 reserved[14]; +}; + +/* + * union for all eth rx cqe types (fix their sizes) + */ +union eth_rx_cqe { + struct eth_fast_path_rx_cqe fast_path_cqe; + struct common_ramrod_eth_rx_cqe ramrod_cqe; + struct eth_rx_cqe_next_page next_page_cqe; + struct eth_end_agg_rx_cqe end_agg_cqe; +}; + + +/* + * Values for RX ETH CQE type field + */ +enum eth_rx_cqe_type { + RX_ETH_CQE_TYPE_ETH_FASTPATH, + RX_ETH_CQE_TYPE_ETH_RAMROD, + RX_ETH_CQE_TYPE_ETH_START_AGG, + RX_ETH_CQE_TYPE_ETH_STOP_AGG, + MAX_ETH_RX_CQE_TYPE}; + + +/* + * Type of SGL/Raw field in ETH RX fast path CQE + */ +enum eth_rx_fp_sel { + ETH_FP_CQE_REGULAR, + ETH_FP_CQE_RAW, + MAX_ETH_RX_FP_SEL}; + + +/* + * The eth Rx SGE Descriptor + */ +struct eth_rx_sge { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * common data for all protocols + */ +struct spe_hdr { + __le32 conn_and_cmd_data; +#define SPE_HDR_CID (0xFFFFFF << 0) +#define SPE_HDR_CID_SHIFT 0 +#define SPE_HDR_CMD_ID (0xFF << 24) +#define SPE_HDR_CMD_ID_SHIFT 24 + __le16 type; +#define SPE_HDR_CONN_TYPE (0xFF << 0) +#define SPE_HDR_CONN_TYPE_SHIFT 0 +#define SPE_HDR_FUNCTION_ID (0xFF << 8) +#define SPE_HDR_FUNCTION_ID_SHIFT 8 + __le16 reserved1; +}; + +/* + * specific data for ethernet slow path element + */ +union eth_specific_data { + uint8_t protocol_data[8]; + struct regpair client_update_ramrod_data; + struct regpair client_init_ramrod_init_data; + struct eth_halt_ramrod_data halt_ramrod_data; + struct regpair update_data_addr; + struct eth_common_ramrod_data common_ramrod_data; + struct regpair classify_cfg_addr; + struct regpair filter_cfg_addr; + struct regpair mcast_cfg_addr; +}; + +/* + * Ethernet slow path element + */ +struct eth_spe { + struct spe_hdr hdr; + union eth_specific_data data; +}; + + +/* + * Ethernet command ID for slow path elements + */ +enum eth_spqe_cmd_id { + RAMROD_CMD_ID_ETH_UNUSED, + RAMROD_CMD_ID_ETH_CLIENT_SETUP, + RAMROD_CMD_ID_ETH_HALT, + RAMROD_CMD_ID_ETH_FORWARD_SETUP, + RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, + RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + RAMROD_CMD_ID_ETH_EMPTY, + RAMROD_CMD_ID_ETH_TERMINATE, + RAMROD_CMD_ID_ETH_TPA_UPDATE, + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, + RAMROD_CMD_ID_ETH_FILTER_RULES, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + RAMROD_CMD_ID_ETH_SET_MAC, + MAX_ETH_SPQE_CMD_ID}; + + +/* + * eth tpa update command + */ +enum eth_tpa_update_command { + TPA_UPDATE_NONE_COMMAND, + TPA_UPDATE_ENABLE_COMMAND, + TPA_UPDATE_DISABLE_COMMAND, + MAX_ETH_TPA_UPDATE_COMMAND}; + + +/* + * In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header + */ +enum eth_tunnel_lso_inc_ip_id { + EXT_HEADER, + INT_HEADER, + MAX_ETH_TUNNEL_LSO_INC_IP_ID}; + + +/* + * In case tunnel exist and L4 checksum offload (or outer ip header checksum), the pseudo checksum location, on packet or on BD. + */ +enum eth_tunnel_non_lso_csum_location { + CSUM_ON_PKT, + CSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION}; + + +/* + * Packet Tunneling Type + */ +enum eth_tunn_type { + TUNN_TYPE_NONE, + TUNN_TYPE_VXLAN, + TUNN_TYPE_L2_GRE, + TUNN_TYPE_IPV4_GRE, + TUNN_TYPE_IPV6_GRE, + TUNN_TYPE_L2_GENEVE, + TUNN_TYPE_IPV4_GENEVE, + TUNN_TYPE_IPV6_GENEVE, + MAX_ETH_TUNN_TYPE}; + + +/* + * Tx regular BD structure + */ +struct eth_tx_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 total_pkt_bytes; + __le16 nbytes; + uint8_t reserved[4]; +}; + + +/* + * structure for easy accessibility to assembler + */ +struct eth_tx_bd_flags { + uint8_t as_bitfield; +#define ETH_TX_BD_FLAGS_IP_CSUM (0x1 << 0) +#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 +#define ETH_TX_BD_FLAGS_L4_CSUM (0x1 << 1) +#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 +#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3 << 2) +#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 +#define ETH_TX_BD_FLAGS_START_BD (0x1 << 4) +#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 +#define ETH_TX_BD_FLAGS_IS_UDP (0x1 << 5) +#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 +#define ETH_TX_BD_FLAGS_SW_LSO (0x1 << 6) +#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 +#define ETH_TX_BD_FLAGS_IPV6 (0x1 << 7) +#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 +}; + +/* + * The eth Tx Buffer Descriptor + */ +struct eth_tx_start_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 nbd; + __le16 nbytes; + __le16 vlan_or_ethertype; + struct eth_tx_bd_flags bd_flags; + uint8_t general_data; +#define ETH_TX_START_BD_HDR_NBDS (0x7 << 0) +#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1 << 3) +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3 +#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1 << 4) +#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 +#define ETH_TX_START_BD_PARSE_NBDS (0x3 << 5) +#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 +#define ETH_TX_START_BD_TUNNEL_EXIST (0x1 << 7) +#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7 +}; + +/* + * Tx parsing BD structure for ETH E1/E1h + */ +struct eth_tx_parse_bd_e1x { + __le16 global_data; +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF << 0) +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3 << 4) +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1 << 6) +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1 << 7) +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1 << 8) +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F << 9) +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9 + uint8_t tcp_flags; +#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1 << 0) +#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1 << 1) +#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1 << 2) +#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1 << 3) +#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1 << 4) +#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1 << 5) +#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1 << 6) +#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1 << 7) +#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 + uint8_t ip_hlen_w; + __le16 total_hlen_w; + __le16 tcp_pseudo_csum; + __le16 lso_mss; + __le16 ip_id; + __le32 tcp_send_seq; +}; + +/* + * Tx parsing BD structure for ETH E2 + */ +struct eth_tx_parse_bd_e2 { + union eth_mac_addr_or_tunnel_data data; + __le32 parsing_data; +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF << 0) +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF << 11) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1 << 15) +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF << 16) +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16 +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3 << 30) +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30 +}; + +/* + * Tx 2nd parsing BD structure for ETH packet + */ +struct eth_tx_parse_2nd_bd { + __le16 global_data; +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF << 0) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1 << 4) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1 << 5) +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1 << 6) +#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1 << 7) +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F << 8) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7 << 13) +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + uint8_t bd_type; +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF << 0) +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF << 4) +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4 + uint8_t reserved3; + uint8_t tcp_flags; +#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1 << 0) +#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1 << 1) +#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1 << 2) +#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1 << 3) +#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1 << 4) +#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1 << 5) +#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1 << 6) +#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1 << 7) +#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 + uint8_t reserved4; + uint8_t tunnel_udp_hdr_start_w; + uint8_t fw_ip_hdr_to_payload_w; + __le16 fw_ip_csum_wo_len_flags_frag; + __le16 hw_ip_id; + __le32 tcp_send_seq; +}; + +/* + * The last BD in the BD memory will hold a pointer to the next BD memory + */ +struct eth_tx_next_bd { + __le32 addr_lo; + __le32 addr_hi; + uint8_t reserved[8]; +}; + +/* + * union for 4 Bd types + */ +union eth_tx_bd_types { + struct eth_tx_start_bd start_bd; + struct eth_tx_bd reg_bd; + struct eth_tx_parse_bd_e1x parse_bd_e1x; + struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_parse_2nd_bd parse_2nd_bd; + struct eth_tx_next_bd next_bd; +}; + +/* + * array of 13 bds as appears in the eth xstorm context + */ +struct eth_tx_bds_array { + union eth_tx_bd_types bds[13]; +}; + + +/* + * VLAN mode on TX BDs + */ +enum eth_tx_vlan_type { + X_ETH_NO_VLAN, + X_ETH_OUTBAND_VLAN, + X_ETH_INBAND_VLAN, + X_ETH_FW_ADDED_VLAN, + MAX_ETH_TX_VLAN_TYPE}; + + +/* + * Ethernet VLAN filtering mode in E1x + */ +enum eth_vlan_filter_mode { + ETH_VLAN_FILTER_ANY_VLAN, + ETH_VLAN_FILTER_SPECIFIC_VLAN, + ETH_VLAN_FILTER_CLASSIFY, + MAX_ETH_VLAN_FILTER_MODE}; + + +/* + * MAC filtering configuration command header + */ +struct mac_configuration_hdr { + uint8_t length; + uint8_t offset; + __le16 client_id; + __le32 echo; +}; + +/* + * MAC address in list for ramrod + */ +struct mac_configuration_entry { + __le16 lsb_mac_addr; + __le16 middle_mac_addr; + __le16 msb_mac_addr; + __le16 vlan_id; + uint8_t pf_id; + uint8_t flags; +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1 << 0) +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0 +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1 << 1) +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1 +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3 << 2) +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2 +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1 << 4) +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4 +#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1 << 5) +#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 +#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3 << 6) +#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 + __le16 reserved0; + __le32 clients_bit_vector; +}; + +/* + * MAC filtering configuration command + */ +struct mac_configuration_cmd { + struct mac_configuration_hdr hdr; + struct mac_configuration_entry config_table[64]; +}; + + +/* + * Set-MAC command type (in E1x) + */ +enum set_mac_action_type { + T_ETH_MAC_COMMAND_INVALIDATE, + T_ETH_MAC_COMMAND_SET, + MAX_SET_MAC_ACTION_TYPE}; + + +/* + * Ethernet TPA Modes + */ +enum tpa_mode { + TPA_LRO, + TPA_GRO, + MAX_TPA_MODE}; + + +/* + * tpa update ramrod data + */ +struct tpa_update_ramrod_data { + uint8_t update_ipv4; + uint8_t update_ipv6; + uint8_t client_id; + uint8_t max_tpa_queues; + uint8_t max_sges_for_packet; + uint8_t complete_on_both_clients; + uint8_t dont_verify_rings_pause_thr_flg; + uint8_t tpa_mode; + __le16 sge_buff_size; + __le16 max_agg_size; + __le32 sge_page_base_lo; + __le32 sge_page_base_hi; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; + uint8_t tpa_over_vlan_disable; + uint8_t reserved[7]; +}; + + +/* + * approximate-match multicast filtering for E1H per function in Tstorm + */ +struct tstorm_eth_approximate_match_multicast_filtering { + uint32_t mcast_add_hash_bit_array[8]; +}; + + +/* + * Common configuration parameters per function in Tstorm + */ +struct tstorm_eth_function_common_config { + __le16 config_flags; +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1 << 0) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1 << 1) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1 << 2) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1 << 3) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7 << 4) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1 << 7) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF << 8) +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 + uint8_t rss_result_mask; + uint8_t reserved1; + __le16 vlan_id[2]; +}; + + +/* + * MAC filtering configuration parameters per port in Tstorm + */ +struct tstorm_eth_mac_filter_config { + uint32_t ucast_drop_all; + uint32_t ucast_accept_all; + uint32_t mcast_drop_all; + uint32_t mcast_accept_all; + uint32_t bcast_accept_all; + uint32_t vlan_filter[2]; + uint32_t unmatched_unicast; +}; + + +/* + * tx only queue init ramrod data + */ +struct tx_queue_init_ramrod_data { + struct client_init_general_data general; + struct client_init_tx_data tx; +}; + + +/* + * Three RX producers for ETH + */ +struct ustorm_eth_rx_producers { +#if defined(__BIG_ENDIAN) + uint16_t bd_prod; + uint16_t cqe_prod; +#elif defined(__LITTLE_ENDIAN) + uint16_t cqe_prod; + uint16_t bd_prod; +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved; + uint16_t sge_prod; +#elif defined(__LITTLE_ENDIAN) + uint16_t sge_prod; + uint16_t reserved; +#endif +}; + + +/* + * FCoE RX statistics parameters section#0 + */ +struct fcoe_rx_stat_params_section0 { + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 + */ +struct fcoe_rx_stat_params_section1 { + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; +}; + + +/* + * FCoE TX statistics parameters + */ +struct fcoe_tx_stat_params { + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; +}; + +/* + * FCoE statistics parameters + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; +}; + + +/* + * The data afex vif list ramrod need + */ +struct afex_vif_list_ramrod_data { + uint8_t afex_vif_list_command; + uint8_t func_bit_map; + __le16 vif_list_index; + uint8_t func_to_clear; + uint8_t echo; + __le16 reserved1; +}; + + +/* + * + */ +struct c2s_pri_trans_table_entry { + uint8_t val[MAX_VLAN_PRIORITIES]; +}; + + +/* + * cfc delete event data + */ +struct cfc_del_event_data { + __le32 cid; + __le32 reserved0; + __le32 reserved1; +}; + + +/* + * per-port SAFC demo variables + */ +struct cmng_flags_per_port { + uint32_t cmng_enables; +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1 << 0) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1 << 1) +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1 << 2) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1 << 3) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 +#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF << 4) +#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 + uint32_t __reserved1; +}; + + +/* + * per-port rate shaping variables + */ +struct rate_shaping_vars_per_port { + uint32_t rs_periodic_timeout; + uint32_t rs_threshold; +}; + +/* + * per-port fairness variables + */ +struct fairness_vars_per_port { + uint32_t upper_bound; + uint32_t fair_threshold; + uint32_t fairness_timeout; + uint32_t size_thr; +}; + +/* + * per-port SAFC variables + */ +struct safc_struct_per_port { +#if defined(__BIG_ENDIAN) + uint16_t __reserved1; + uint8_t __reserved0; + uint8_t safc_timeout_usec; +#elif defined(__LITTLE_ENDIAN) + uint8_t safc_timeout_usec; + uint8_t __reserved0; + uint16_t __reserved1; +#endif + uint8_t cos_to_traffic_types[MAX_COS_NUMBER]; + uint16_t cos_to_pause_mask[NUM_OF_SAFC_BITS]; +}; + +/* + * Per-port congestion management variables + */ +struct cmng_struct_per_port { + struct rate_shaping_vars_per_port rs_vars; + struct fairness_vars_per_port fair_vars; + struct safc_struct_per_port safc_vars; + struct cmng_flags_per_port flags; +}; + +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter { + uint32_t quota; +#if defined(__BIG_ENDIAN) + uint16_t __reserved0; + uint16_t rate; +#elif defined(__LITTLE_ENDIAN) + uint16_t rate; + uint16_t __reserved0; +#endif +}; + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn { + struct rate_shaping_counter vn_counter; +}; + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn { + uint32_t cos_credit_delta[MAX_COS_NUMBER]; + uint32_t vn_credit_delta; + uint32_t __reserved0; +}; + +/* + * cmng port init state + */ +struct cmng_vnic { + struct rate_shaping_vars_per_vn vnic_max_rate[4]; + struct fairness_vars_per_vn vnic_min_rate[4]; +}; + +/* + * cmng port init state + */ +struct cmng_init { + struct cmng_struct_per_port port; + struct cmng_vnic vnic; +}; + + +/* + * driver parameters for congestion management init, all rates are in Mbps + */ +struct cmng_init_input { + uint32_t port_rate; + uint32_t size_thr; + uint32_t fairness_thr; + uint16_t vnic_min_rate[4]; + uint16_t vnic_max_rate[4]; + uint16_t cos_min_rate[MAX_COS_NUMBER]; + uint16_t cos_to_pause_mask[MAX_COS_NUMBER]; + struct cmng_flags_per_port flags; +}; + + +/* + * Protocol-common command ID for slow path elements + */ +enum common_spqe_cmd_id { + RAMROD_CMD_ID_COMMON_UNUSED, + RAMROD_CMD_ID_COMMON_FUNCTION_START, + RAMROD_CMD_ID_COMMON_FUNCTION_STOP, + RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, + RAMROD_CMD_ID_COMMON_CFC_DEL, + RAMROD_CMD_ID_COMMON_CFC_DEL_WB, + RAMROD_CMD_ID_COMMON_STAT_QUERY, + RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, + RAMROD_CMD_ID_COMMON_START_TRAFFIC, + RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, + RAMROD_CMD_ID_COMMON_SET_TIMESYNC, + MAX_COMMON_SPQE_CMD_ID}; + + +/* + * Per-protocol connection types + */ +enum connection_type { + ETH_CONNECTION_TYPE, + TOE_CONNECTION_TYPE, + RDMA_CONNECTION_TYPE, + ISCSI_CONNECTION_TYPE, + FCOE_CONNECTION_TYPE, + RESERVED_CONNECTION_TYPE_0, + RESERVED_CONNECTION_TYPE_1, + RESERVED_CONNECTION_TYPE_2, + NONE_CONNECTION_TYPE, + MAX_CONNECTION_TYPE}; + + +/* + * Cos modes + */ +enum cos_mode { + OVERRIDE_COS, + STATIC_COS, + FW_WRR, + MAX_COS_MODE}; + + +/* + * Dynamic HC counters set by the driver + */ +struct hc_dynamic_drv_counter { + uint32_t val[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * zone A per-queue data + */ +struct cstorm_queue_zone_data { + struct hc_dynamic_drv_counter hc_dyn_drv_cnt; + struct regpair reserved[2]; +}; + + +/* + * Vf-PF channel data in cstorm ram (non-triggered zone) + */ +struct vf_pf_channel_zone_data { + uint32_t msg_addr_lo; + uint32_t msg_addr_hi; +}; + +/* + * zone for VF non-triggered data + */ +struct non_trigger_vf_zone { + struct vf_pf_channel_zone_data vf_pf_channel; +}; + +/* + * Vf-PF channel trigger zone in cstorm ram + */ +struct vf_pf_channel_zone_trigger { + uint8_t addr_valid; +}; + +/* + * zone that triggers the in-bound interrupt + */ +struct trigger_vf_zone { + struct vf_pf_channel_zone_trigger vf_pf_channel; + uint8_t reserved0; + uint16_t reserved1; + uint32_t reserved2; +}; + +/* + * zone B per-VF data + */ +struct cstorm_vf_zone_data { + struct non_trigger_vf_zone non_trigger; + struct trigger_vf_zone trigger; +}; + + +/* + * Dynamic host coalescing init parameters, per state machine + */ +struct dynamic_hc_sm_config { + uint32_t threshold[3]; + uint8_t shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; + uint8_t hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; + uint8_t hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; + uint8_t hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; + uint8_t hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * Dynamic host coalescing init parameters + */ +struct dynamic_hc_config { + struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; +}; + + +struct e2_integ_data { +#if defined(__BIG_ENDIAN) + uint8_t flags; +#define E2_INTEG_DATA_TESTING_EN (0x1 << 0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1 << 1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1 << 2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1 << 3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1 << 4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7 << 5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 + uint8_t cos; + uint8_t voq; + uint8_t pbf_queue; +#elif defined(__LITTLE_ENDIAN) + uint8_t pbf_queue; + uint8_t voq; + uint8_t cos; + uint8_t flags; +#define E2_INTEG_DATA_TESTING_EN (0x1 << 0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1 << 1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1 << 2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1 << 3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1 << 4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7 << 5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved3; + uint8_t reserved2; + uint8_t ramEn; +#elif defined(__LITTLE_ENDIAN) + uint8_t ramEn; + uint8_t reserved2; + uint16_t reserved3; +#endif +}; + + +/* + * set mac event data + */ +struct eth_event_data { + __le32 echo; + __le32 reserved0; + __le32 reserved1; +}; + + +/* + * pf-vf event data + */ +struct vf_pf_event_data { + uint8_t vf_id; + uint8_t reserved0; + __le16 reserved1; + __le32 msg_addr_lo; + __le32 msg_addr_hi; +}; + +/* + * VF FLR event data + */ +struct vf_flr_event_data { + uint8_t vf_id; + uint8_t reserved0; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; +}; + +/* + * malicious VF event data + */ +struct malicious_vf_event_data { + uint8_t vf_id; + uint8_t err_id; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; +}; + +/* + * vif list event data + */ +struct vif_list_event_data { + uint8_t func_bit_map; + uint8_t echo; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + +/* + * function update event data + */ +struct function_update_event_data { + uint8_t echo; + uint8_t reserved; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + +/* + * union for all event ring message types + */ +union event_data { + struct vf_pf_event_data vf_pf_event; + struct eth_event_data eth_event; + struct cfc_del_event_data cfc_del_event; + struct vf_flr_event_data vf_flr_event; + struct malicious_vf_event_data malicious_vf_event; + struct vif_list_event_data vif_list_event; + struct function_update_event_data function_update_event; +}; + + +/* + * per PF event ring data + */ +struct event_ring_data { + struct regpair_native base_addr; +#if defined(__BIG_ENDIAN) + uint8_t index_id; + uint8_t sb_id; + uint16_t producer; +#elif defined(__LITTLE_ENDIAN) + uint16_t producer; + uint8_t sb_id; + uint8_t index_id; +#endif + uint32_t reserved0; +}; + + +/* + * event ring message element (each element is 128 bits) + */ +struct event_ring_msg { + uint8_t opcode; + uint8_t error; + uint16_t reserved1; + union event_data data; +}; + +/* + * event ring next page element (128 bits) + */ +struct event_ring_next { + struct regpair addr; + uint32_t reserved[2]; +}; + +/* + * union for event ring element types (each element is 128 bits) + */ +union event_ring_elem { + struct event_ring_msg message; + struct event_ring_next next_page; +}; + + +/* + * Common event ring opcodes + */ +enum event_ring_opcode { + EVENT_RING_OPCODE_VF_PF_CHANNEL, + EVENT_RING_OPCODE_FUNCTION_START, + EVENT_RING_OPCODE_FUNCTION_STOP, + EVENT_RING_OPCODE_CFC_DEL, + EVENT_RING_OPCODE_CFC_DEL_WB, + EVENT_RING_OPCODE_STAT_QUERY, + EVENT_RING_OPCODE_STOP_TRAFFIC, + EVENT_RING_OPCODE_START_TRAFFIC, + EVENT_RING_OPCODE_VF_FLR, + EVENT_RING_OPCODE_MALICIOUS_VF, + EVENT_RING_OPCODE_FORWARD_SETUP, + EVENT_RING_OPCODE_RSS_UPDATE_RULES, + EVENT_RING_OPCODE_FUNCTION_UPDATE, + EVENT_RING_OPCODE_AFEX_VIF_LISTS, + EVENT_RING_OPCODE_SET_MAC, + EVENT_RING_OPCODE_CLASSIFICATION_RULES, + EVENT_RING_OPCODE_FILTERS_RULES, + EVENT_RING_OPCODE_MULTICAST_RULES, + EVENT_RING_OPCODE_SET_TIMESYNC, + MAX_EVENT_RING_OPCODE}; + + +/* + * Modes for fairness algorithm + */ +enum fairness_mode { + FAIRNESS_COS_WRR_MODE, + FAIRNESS_COS_ETS_MODE, + MAX_FAIRNESS_MODE}; + + +/* + * Priority and cos + */ +struct priority_cos { + uint8_t priority; + uint8_t cos; + __le16 reserved1; +}; + +/* + * The data for flow control configuration + */ +struct flow_control_configuration { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + uint8_t dcb_enabled; + uint8_t dcb_version; + uint8_t dont_add_pri_0_en; + uint8_t reserved1; + __le32 reserved2; + uint8_t dcb_outer_pri[MAX_TRAFFIC_TYPES]; +}; + + +/* + * + */ +struct function_start_data { + uint8_t function_mode; + uint8_t allow_npar_tx_switching; + __le16 sd_vlan_tag; + __le16 vif_id; + uint8_t path_id; + uint8_t network_cos_mode; + uint8_t dmae_cmd_id; + uint8_t no_added_tags; + __le16 reserved0; + __le32 reserved1; + uint8_t inner_clss_vxlan; + uint8_t inner_clss_l2gre; + uint8_t inner_clss_l2geneve; + uint8_t inner_rss; + __le16 vxlan_dst_port; + __le16 geneve_dst_port; + uint8_t sd_accept_mf_clss_fail; + uint8_t sd_accept_mf_clss_fail_match_ethtype; + __le16 sd_accept_mf_clss_fail_ethtype; + __le16 sd_vlan_eth_type; + uint8_t sd_vlan_force_pri_flg; + uint8_t sd_vlan_force_pri_val; + uint8_t c2s_pri_tt_valid; + uint8_t c2s_pri_default; + uint8_t tx_vlan_filtering_enable; + uint8_t tx_vlan_filtering_use_pvid; + uint8_t reserved2[4]; + struct c2s_pri_trans_table_entry c2s_pri_trans_table; +}; + + +/* + * + */ +struct function_update_data { + uint8_t vif_id_change_flg; + uint8_t afex_default_vlan_change_flg; + uint8_t allowed_priorities_change_flg; + uint8_t network_cos_mode_change_flg; + __le16 vif_id; + __le16 afex_default_vlan; + uint8_t allowed_priorities; + uint8_t network_cos_mode; + uint8_t lb_mode_en_change_flg; + uint8_t lb_mode_en; + uint8_t tx_switch_suspend_change_flg; + uint8_t tx_switch_suspend; + uint8_t echo; + uint8_t update_tunn_cfg_flg; + uint8_t inner_clss_vxlan; + uint8_t inner_clss_l2gre; + uint8_t inner_clss_l2geneve; + uint8_t inner_rss; + __le16 vxlan_dst_port; + __le16 geneve_dst_port; + uint8_t sd_vlan_force_pri_change_flg; + uint8_t sd_vlan_force_pri_flg; + uint8_t sd_vlan_force_pri_val; + uint8_t sd_vlan_tag_change_flg; + uint8_t sd_vlan_eth_type_change_flg; + uint8_t reserved1; + __le16 sd_vlan_tag; + __le16 sd_vlan_eth_type; + uint8_t tx_vlan_filtering_pvid_change_flg; + uint8_t reserved0; + __le32 reserved2; +}; + + +/* + * FW version stored in the Xstorm RAM + */ +struct fw_version { +#if defined(__BIG_ENDIAN) + uint8_t engineering; + uint8_t revision; + uint8_t minor; + uint8_t major; +#elif defined(__LITTLE_ENDIAN) + uint8_t major; + uint8_t minor; + uint8_t revision; + uint8_t engineering; +#endif + uint32_t flags; +#define FW_VERSION_OPTIMIZED (0x1 << 0) +#define FW_VERSION_OPTIMIZED_SHIFT 0 +#define FW_VERSION_BIG_ENDIEN (0x1 << 1) +#define FW_VERSION_BIG_ENDIEN_SHIFT 1 +#define FW_VERSION_CHIP_VERSION (0x3 << 2) +#define FW_VERSION_CHIP_VERSION_SHIFT 2 +#define __FW_VERSION_RESERVED (0xFFFFFFF << 4) +#define __FW_VERSION_RESERVED_SHIFT 4 +}; + + +/* + * Dynamic Host-Coalescing - Driver(host) counters + */ +struct hc_dynamic_sb_drv_counters { + uint32_t dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES]; +}; + + +/* + * 2 bytes. configuration/state parameters for a single protocol index + */ +struct hc_index_data { +#if defined(__BIG_ENDIAN) + uint8_t flags; +#define HC_INDEX_DATA_SM_ID (0x1 << 0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1 << 1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1 << 2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F << 3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 + uint8_t timeout; +#elif defined(__LITTLE_ENDIAN) + uint8_t timeout; + uint8_t flags; +#define HC_INDEX_DATA_SM_ID (0x1 << 0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1 << 1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1 << 2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F << 3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 +#endif +}; + + +/* + * HC state-machine + */ +struct hc_status_block_sm { +#if defined(__BIG_ENDIAN) + uint8_t igu_seg_id; + uint8_t igu_sb_id; + uint8_t timer_value; + uint8_t __flags; +#elif defined(__LITTLE_ENDIAN) + uint8_t __flags; + uint8_t timer_value; + uint8_t igu_sb_id; + uint8_t igu_seg_id; +#endif + uint32_t time_to_expire; +}; + +/* + * hold PCI identification variables- used in various places in firmware + */ +struct pci_entity { +#if defined(__BIG_ENDIAN) + uint8_t vf_valid; + uint8_t vf_id; + uint8_t vnic_id; + uint8_t pf_id; +#elif defined(__LITTLE_ENDIAN) + uint8_t pf_id; + uint8_t vnic_id; + uint8_t vf_id; + uint8_t vf_valid; +#endif +}; + +/* + * The fast-path status block meta-data, common to all chips + */ +struct hc_sb_data { + struct regpair_native host_sb_addr; + struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; + struct pci_entity p_func; +#if defined(__BIG_ENDIAN) + uint8_t rsrv0; + uint8_t state; + uint8_t dhc_qzone_id; + uint8_t same_igu_sb_1b; +#elif defined(__LITTLE_ENDIAN) + uint8_t same_igu_sb_1b; + uint8_t dhc_qzone_id; + uint8_t state; + uint8_t rsrv0; +#endif + struct regpair_native rsrv1[2]; +}; + + +/* + * Segment types for host coaslescing + */ +enum hc_segment { + HC_REGULAR_SEGMENT, + HC_DEFAULT_SEGMENT, + MAX_HC_SEGMENT}; + + +/* + * The fast-path status block meta-data + */ +struct hc_sp_status_block_data { + struct regpair_native host_sb_addr; +#if defined(__BIG_ENDIAN) + uint8_t rsrv1; + uint8_t state; + uint8_t igu_seg_id; + uint8_t igu_sb_id; +#elif defined(__LITTLE_ENDIAN) + uint8_t igu_sb_id; + uint8_t igu_seg_id; + uint8_t state; + uint8_t rsrv1; +#endif + struct pci_entity p_func; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e1x { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X]; + struct hc_sb_data common; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e2 { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E2]; + struct hc_sb_data common; +}; + + +/* + * IGU block operartion modes (in Everest2) + */ +enum igu_mode { + HC_IGU_BC_MODE, + HC_IGU_NBC_MODE, + MAX_IGU_MODE}; + + +/* + * Inner Headers Classification Type + */ +enum inner_clss_type { + INNER_CLSS_DISABLED, + INNER_CLSS_USE_VLAN, + INNER_CLSS_USE_VNI, + MAX_INNER_CLSS_TYPE}; + + +/* + * IP versions + */ +enum ip_ver { + IP_V4, + IP_V6, + MAX_IP_VER}; + + +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, + VF_PF_CHANNEL_NOT_READY, + ETH_ILLEGAL_BD_LENGTHS, + ETH_PACKET_TOO_SHORT, + ETH_PAYLOAD_TOO_BIG, + ETH_ILLEGAL_ETH_TYPE, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_TOO_MANY_BDS, + ETH_ZERO_HDR_NBDS, + ETH_START_BD_NOT_SET, + ETH_ILLEGAL_PARSE_NBDS, + ETH_IPV6_AND_CHECKSUM, + ETH_VLAN_FLG_INCORRECT, + ETH_ILLEGAL_LSO_MSS, + ETH_TUNNEL_NOT_SUPPORTED, + MAX_MALICIOUS_VF_ERROR_ID}; + + +/* + * Multi-function modes + */ +enum mf_mode { + SINGLE_FUNCTION, + MULTI_FUNCTION_SD, + MULTI_FUNCTION_SI, + MULTI_FUNCTION_AFEX, + MAX_MF_MODE}; + + +/* + * Protocol-common statistics collected by the Tstorm (per pf) + */ +struct tstorm_per_pf_stats { + struct regpair rcv_error_bytes; +}; + +/* + * + */ +struct per_pf_stats { + struct tstorm_per_pf_stats tstorm_pf_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per port) + */ +struct tstorm_per_port_stats { + __le32 mac_discard; + __le32 mac_filter_discard; + __le32 brb_truncate_discard; + __le32 mf_tag_discard; + __le32 packet_drop; + __le32 reserved; +}; + +/* + * + */ +struct per_port_stats { + struct tstorm_per_port_stats tstorm_port_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per client) + */ +struct tstorm_per_queue_stats { + struct regpair rcv_ucast_bytes; + __le32 rcv_ucast_pkts; + __le32 checksum_discard; + struct regpair rcv_bcast_bytes; + __le32 rcv_bcast_pkts; + __le32 pkts_too_big_discard; + struct regpair rcv_mcast_bytes; + __le32 rcv_mcast_pkts; + __le32 ttl0_discard; + __le16 no_buff_discard; + __le16 reserved0; + __le32 reserved1; +}; + +/* + * Protocol-common statistics collected by the Ustorm (per client) + */ +struct ustorm_per_queue_stats { + struct regpair ucast_no_buff_bytes; + struct regpair mcast_no_buff_bytes; + struct regpair bcast_no_buff_bytes; + __le32 ucast_no_buff_pkts; + __le32 mcast_no_buff_pkts; + __le32 bcast_no_buff_pkts; + __le32 coalesced_pkts; + struct regpair coalesced_bytes; + __le32 coalesced_events; + __le32 coalesced_aborts; +}; + +/* + * Protocol-common statistics collected by the Xstorm (per client) + */ +struct xstorm_per_queue_stats { + struct regpair ucast_bytes_sent; + struct regpair mcast_bytes_sent; + struct regpair bcast_bytes_sent; + __le32 ucast_pkts_sent; + __le32 mcast_pkts_sent; + __le32 bcast_pkts_sent; + __le32 error_drop_pkts; +}; + +/* + * + */ +struct per_queue_stats { + struct tstorm_per_queue_stats tstorm_queue_statistics; + struct ustorm_per_queue_stats ustorm_queue_statistics; + struct xstorm_per_queue_stats xstorm_queue_statistics; +}; + + +/* + * FW version stored in first line of pram + */ +struct pram_fw_version { + uint8_t major; + uint8_t minor; + uint8_t revision; + uint8_t engineering; + uint8_t flags; +#define PRAM_FW_VERSION_OPTIMIZED (0x1 << 0) +#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 +#define PRAM_FW_VERSION_STORM_ID (0x3 << 1) +#define PRAM_FW_VERSION_STORM_ID_SHIFT 1 +#define PRAM_FW_VERSION_BIG_ENDIEN (0x1 << 3) +#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 +#define PRAM_FW_VERSION_CHIP_VERSION (0x3 << 4) +#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 +#define __PRAM_FW_VERSION_RESERVED0 (0x3 << 6) +#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 +}; + + +/* + * Ethernet slow path element + */ +union protocol_common_specific_data { + uint8_t protocol_data[8]; + struct regpair phy_address; + struct regpair mac_config_addr; + struct afex_vif_list_ramrod_data afex_vif_list_data; +}; + +/* + * The send queue element + */ +struct protocol_common_spe { + struct spe_hdr hdr; + union protocol_common_specific_data data; +}; + + +/* + * The data for the Set Timesync Ramrod + */ +struct set_timesync_ramrod_data { + uint8_t drift_adjust_cmd; + uint8_t offset_cmd; + uint8_t add_sub_drift_adjust_value; + uint8_t drift_adjust_value; + uint32_t drift_adjust_period; + struct regpair offset_delta; +}; + + +/* + * The send queue element + */ +struct slow_path_element { + struct spe_hdr hdr; + struct regpair protocol_data; +}; + + +/* + * Protocol-common statistics counter + */ +struct stats_counter { + __le16 xstats_counter; + __le16 reserved0; + __le32 reserved1; + __le16 tstats_counter; + __le16 reserved2; + __le32 reserved3; + __le16 ustats_counter; + __le16 reserved4; + __le32 reserved5; + __le16 cstats_counter; + __le16 reserved6; + __le32 reserved7; +}; + + +/* + * + */ +struct stats_query_entry { + uint8_t kind; + uint8_t index; + __le16 funcID; + __le32 reserved; + struct regpair address; +}; + +/* + * statistic command + */ +struct stats_query_cmd_group { + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + + +/* + * statistic command header + */ +struct stats_query_header { + uint8_t cmd_num; + uint8_t reserved0; + __le16 drv_stats_counter; + __le32 reserved1; + struct regpair stats_counters_addrs; +}; + + +/* + * Types of statistcis query entry + */ +enum stats_query_type { + STATS_TYPE_QUEUE, + STATS_TYPE_PORT, + STATS_TYPE_PF, + STATS_TYPE_TOE, + STATS_TYPE_FCOE, + MAX_STATS_QUERY_TYPE}; + + +/* + * Indicate of the function status block state + */ +enum status_block_state { + SB_DISABLED, + SB_ENABLED, + SB_CLEANED, + MAX_STATUS_BLOCK_STATE}; + + +/* + * Storm IDs (including attentions for IGU related enums) + */ +enum storm_id { + USTORM_ID, + CSTORM_ID, + XSTORM_ID, + TSTORM_ID, + ATTENTION_ID, + MAX_STORM_ID}; + + +/* + * Taffic types used in ETS and flow control algorithms + */ +enum traffic_type { + LLFC_TRAFFIC_TYPE_NW, + LLFC_TRAFFIC_TYPE_FCOE, + LLFC_TRAFFIC_TYPE_ISCSI, + MAX_TRAFFIC_TYPE}; + + +/* + * zone A per-queue data + */ +struct tstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct tstorm_vf_zone_data { + struct regpair reserved; +}; + + +/* + * Add or Subtract Value for Set Timesync Ramrod + */ +enum ts_add_sub_value { + TS_SUB_VALUE, + TS_ADD_VALUE, + MAX_TS_ADD_SUB_VALUE}; + + +/* + * Drift-Adjust Commands for Set Timesync Ramrod + */ +enum ts_drift_adjust_cmd { + TS_DRIFT_ADJUST_KEEP, + TS_DRIFT_ADJUST_SET, + TS_DRIFT_ADJUST_RESET, + MAX_TS_DRIFT_ADJUST_CMD}; + + +/* + * Offset Commands for Set Timesync Ramrod + */ +enum ts_offset_cmd { + TS_OFFSET_KEEP, + TS_OFFSET_INC, + TS_OFFSET_DEC, + MAX_TS_OFFSET_CMD}; + + +/* + * Input for measuring Pci Latency + */ +struct t_measure_pci_latency_ctrl { + struct regpair read_addr; +#if defined(__BIG_ENDIAN) + uint8_t sleep; + uint8_t enable; + uint8_t func_id; + uint8_t read_size; +#elif defined(__LITTLE_ENDIAN) + uint8_t read_size; + uint8_t func_id; + uint8_t enable; + uint8_t sleep; +#endif +#if defined(__BIG_ENDIAN) + uint16_t num_meas; + uint8_t reserved; + uint8_t period_10us; +#elif defined(__LITTLE_ENDIAN) + uint8_t period_10us; + uint8_t reserved; + uint16_t num_meas; +#endif +}; + + +/* + * Input for measuring Pci Latency + */ +struct t_measure_pci_latency_data { +#if defined(__BIG_ENDIAN) + uint16_t max_time_ns; + uint16_t min_time_ns; +#elif defined(__LITTLE_ENDIAN) + uint16_t min_time_ns; + uint16_t max_time_ns; +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved; + uint16_t num_reads; +#elif defined(__LITTLE_ENDIAN) + uint16_t num_reads; + uint16_t reserved; +#endif + struct regpair sum_time_ns; +}; + + +/* + * zone A per-queue data + */ +struct ustorm_queue_zone_data { + struct ustorm_eth_rx_producers eth_rx_producers; + struct regpair reserved[3]; +}; + + +/* + * zone B per-VF data + */ +struct ustorm_vf_zone_data { + struct regpair reserved; +}; + + +/* + * data per VF-PF channel + */ +struct vf_pf_channel_data { +#if defined(__BIG_ENDIAN) + uint16_t reserved0; + uint8_t valid; + uint8_t state; +#elif defined(__LITTLE_ENDIAN) + uint8_t state; + uint8_t valid; + uint16_t reserved0; +#endif + uint32_t reserved1; +}; + + +/* + * State of VF-PF channel + */ +enum vf_pf_channel_state { + VF_PF_CHANNEL_STATE_READY, + VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, + MAX_VF_PF_CHANNEL_STATE}; + + +/* + * vif_list_rule_kind + */ +enum vif_list_rule_kind { + VIF_LIST_RULE_SET, + VIF_LIST_RULE_GET, + VIF_LIST_RULE_CLEAR_ALL, + VIF_LIST_RULE_CLEAR_FUNC, + MAX_VIF_LIST_RULE_KIND}; + + +/* + * zone A per-queue data + */ +struct xstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct xstorm_vf_zone_data { + struct regpair reserved; +}; + +#endif /* ECORE_HSI_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h new file mode 100644 index 000000000..4e348612a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init.h @@ -0,0 +1,821 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_INIT_H +#define ECORE_INIT_H + +/* Init operation types and structures */ +enum { + OP_RD = 0x1, /* read a single register */ + OP_WR, /* write a single register */ + OP_SW, /* copy a string to the device */ + OP_ZR, /* clear memory */ + OP_ZP, /* unzip then copy with DMAE */ + OP_WR_64, /* write 64 bit pattern */ + OP_WB, /* copy a string using DMAE */ + OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ + OP_IF_MODE_OR, /* Skip the following ops if all init modes don't match */ + OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */ + OP_MAX +}; + +enum { + STAGE_START, + STAGE_END, +}; + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +/* structs for the various opcodes */ +struct raw_op { + uint32_t op:8; + uint32_t offset:24; + uint32_t raw_data; +}; + +struct op_read { + uint32_t op:8; + uint32_t offset:24; + uint32_t val; +}; + +struct op_write { + uint32_t op:8; + uint32_t offset:24; + uint32_t val; +}; + +struct op_arr_write { + uint32_t op:8; + uint32_t offset:24; +#ifdef __BIG_ENDIAN + uint16_t data_len; + uint16_t data_off; +#else /* __LITTLE_ENDIAN */ + uint16_t data_off; + uint16_t data_len; +#endif +}; + +struct op_zero { + uint32_t op:8; + uint32_t offset:24; + uint32_t len; +}; + +struct op_if_mode { + uint32_t op:8; + uint32_t cmd_offset:24; + uint32_t mode_bit_map; +}; + + +union init_op { + struct op_read read; + struct op_write write; + struct op_arr_write arr_wr; + struct op_zero zero; + struct raw_op raw; + struct op_if_mode if_mode; +}; + + +/* Init Phases */ +enum { + PHASE_COMMON, + PHASE_PORT0, + PHASE_PORT1, + PHASE_PF0, + PHASE_PF1, + PHASE_PF2, + PHASE_PF3, + PHASE_PF4, + PHASE_PF5, + PHASE_PF6, + PHASE_PF7, + NUM_OF_INIT_PHASES +}; + +/* Init Modes */ +enum { + MODE_ASIC = 0x00000001, + MODE_FPGA = 0x00000002, + MODE_EMUL = 0x00000004, + MODE_E2 = 0x00000008, + MODE_E3 = 0x00000010, + MODE_PORT2 = 0x00000020, + MODE_PORT4 = 0x00000040, + MODE_SF = 0x00000080, + MODE_MF = 0x00000100, + MODE_MF_SD = 0x00000200, + MODE_MF_SI = 0x00000400, + MODE_MF_AFEX = 0x00000800, + MODE_E3_A0 = 0x00001000, + MODE_E3_B0 = 0x00002000, + MODE_COS3 = 0x00004000, + MODE_COS6 = 0x00008000, + MODE_LITTLE_ENDIAN = 0x00010000, + MODE_BIG_ENDIAN = 0x00020000, +}; + +/* Init Blocks */ +enum { + BLOCK_ATC, + BLOCK_BRB1, + BLOCK_CCM, + BLOCK_CDU, + BLOCK_CFC, + BLOCK_CSDM, + BLOCK_CSEM, + BLOCK_DBG, + BLOCK_DMAE, + BLOCK_DORQ, + BLOCK_HC, + BLOCK_IGU, + BLOCK_MISC, + BLOCK_NIG, + BLOCK_PBF, + BLOCK_PGLUE_B, + BLOCK_PRS, + BLOCK_PXP2, + BLOCK_PXP, + BLOCK_QM, + BLOCK_SRC, + BLOCK_TCM, + BLOCK_TM, + BLOCK_TSDM, + BLOCK_TSEM, + BLOCK_UCM, + BLOCK_UPB, + BLOCK_USDM, + BLOCK_USEM, + BLOCK_XCM, + BLOCK_XPB, + BLOCK_XSDM, + BLOCK_XSEM, + BLOCK_MISC_AEU, + NUM_OF_INIT_BLOCKS +}; + +#include "bnx2x.h" + +/* Vnics per mode */ +#define ECORE_PORT2_MODE_NUM_VNICS 4 + + +/* QM queue numbers */ +#define ECORE_ETH_Q 0 +#define ECORE_TOE_Q 3 +#define ECORE_TOE_ACK_Q 6 +#define ECORE_ISCSI_Q 9 +#define ECORE_ISCSI_ACK_Q 11 +#define ECORE_FCOE_Q 10 + +/* Vnics per mode */ +#define ECORE_PORT4_MODE_NUM_VNICS 2 + +/* COS offset for port1 in E3 B0 4port mode */ +#define ECORE_E3B0_PORT1_COS_OFFSET 3 + +/* QM Register addresses */ +#define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\ + (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) +#define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\ + (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) +#define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\ + (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) + +/* extracts the QM queue number for the specified port and vnic */ +#define ECORE_PF_Q_NUM(q_num, port, vnic)\ + ((((port) << 1) | (vnic)) * 16 + (q_num)) + + +/* Maps the specified queue to the specified COS */ +static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint32_t new_cos) +{ + /* find current COS mapping */ + uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4); + + /* check if queue->COS mapping has changed */ + if (curr_cos != new_cos) { + uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS; + uint32_t reg_addr, reg_bit_map, vnic; + + /* update parameters for 4port mode */ + if (INIT_MODE_FLAGS(sc) & MODE_PORT4) { + num_vnics = ECORE_PORT4_MODE_NUM_VNICS; + if (SC_PORT(sc)) { + curr_cos += ECORE_E3B0_PORT1_COS_OFFSET; + new_cos += ECORE_E3B0_PORT1_COS_OFFSET; + } + } + + /* change queue mapping for each VNIC */ + for (vnic = 0; vnic < num_vnics; vnic++) { + uint32_t pf_q_num = + ECORE_PF_Q_NUM(q_num, SC_PORT(sc), vnic); + uint32_t q_bit_map = 1 << (pf_q_num & 0x1f); + + /* overwrite queue->VOQ mapping */ + REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos); + + /* clear queue bit from current COS bit map */ + reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map)); + + /* set queue bit in new COS bit map */ + reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + REG_WR(sc, reg_addr, reg_bit_map | q_bit_map); + + /* set/clear queue bit in command-queue bit map + (E2/E3A0 only, valid COS values are 0/1) */ + if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) { + reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + q_bit_map = 1 << (2 * (pf_q_num & 0xf)); + reg_bit_map = new_cos ? + (reg_bit_map | q_bit_map) : + (reg_bit_map & (~q_bit_map)); + REG_WR(sc, reg_addr, reg_bit_map); + } + } + } +} + +/* Configures the QM according to the specified per-traffic-type COSes */ +static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mode, + struct priority_cos *traffic_cos) +{ + ecore_map_q_cos(sc, ECORE_FCOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); + ecore_map_q_cos(sc, ECORE_ISCSI_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + if (mode != STATIC_COS) { + /* required only in OVERRIDE_COS mode */ + ecore_map_q_cos(sc, ECORE_ETH_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + ecore_map_q_cos(sc, ECORE_TOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + ecore_map_q_cos(sc, ECORE_TOE_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + } +} + + +/* + * congestion management port init api description + * the api works as follows: + * the driver should pass the cmng_init_input struct, the port_init function + * will prepare the required internal ram structure which will be passed back + * to the driver (cmng_init) that will write it into the internal ram. + * + * IMPORTANT REMARKS: + * 1. the cmng_init struct does not represent the contiguous internal ram + * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET + * offset in order to write the port sub struct and the + * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other + * words - don't use memcpy!). + * 2. although the cmng_init struct is filled for the maximal vnic number + * possible, the driver should only write the valid vnics into the internal + * ram according to the appropriate port mode. + */ +#define BITS_TO_BYTES(x) ((x)/8) + +/* CMNG constants, as derived from system spec calculations */ + +/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */ +#define DEF_MIN_RATE 100 + +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 + +/* + * number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer + */ +#define QM_ARB_BYTES 160000 + +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 + +/* + * how many bytes above threshold for + * the minimal credit of Min algorithm + */ +#define MIN_ABOVE_THRESH 32768 + +/* + * Fairness algorithm integration time coefficient - + * for calculating the actual Tfair + */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + +/* Memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 +#define SAFC_TIMEOUT_USEC 52 + +#define SDM_TICKS 4 + + +static inline void ecore_init_max(const struct cmng_init_input *input_data, + uint32_t r_param, struct cmng_init *ram_data) +{ + uint32_t vnic; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + /* + * rate shaping per-port variables + * 100 micro seconds in SDM ticks = 25 + * since each tick is 4 microSeconds + */ + + pdata->rs_vars.rs_periodic_timeout = + RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS; + + /* this is the threshold below which no timer arming will occur. + * 1.25 coefficient is for the threshold to be a little bigger + * then the real time to compensate for timer in-accuracy + */ + pdata->rs_vars.rs_threshold = + (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4; + + /* rate shaping per-vnic variables */ + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* global vnic counter */ + vdata->vnic_max_rate[vnic].vn_counter.rate = + input_data->vnic_max_rate[vnic]; + /* + * maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + vdata->vnic_max_rate[vnic].vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * + (uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8; + } + +} + +static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate, + struct rate_shaping_vars_per_vn *ram_data) +{ + /* global vnic counter */ + ram_data->vn_counter.rate = vnic_max_rate; + + /* + * maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + ram_data->vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8; +} + +static inline void ecore_init_min(const struct cmng_init_input *input_data, + uint32_t r_param, struct cmng_init *ram_data) +{ + uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + /* this is the resolution of the fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + + /* + * fairness per-port variables + * for 10G it is 1000usec. for 1G it is 10000usec. + */ + tFair = T_FAIR_COEF / input_data->port_rate; + + /* this is the threshold below which we won't arm the timer anymore */ + pdata->fair_vars.fair_threshold = QM_ARB_BYTES + + input_data->fairness_thr; + + /*New limitation - minimal packet size to cause timeout to be armed */ + pdata->fair_vars.size_thr = input_data->size_thr; + + /* + * we multiply by 1e3/8 to get bytes/msec. We don't want the credits + * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution) + */ + pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM; + + /* since each tick is 4 microSeconds */ + pdata->fair_vars.fairness_timeout = + fair_periodic_timeout_usec / SDM_TICKS; + + /* calculate sum of weights */ + vnicWeightSum = 0; + + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) + vnicWeightSum += input_data->vnic_min_rate[vnic]; + + /* global vnic counter */ + if (vnicWeightSum > 0) { + /* fairness per-vnic variables */ + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* + * this is the credit for each period of the fairness + * algorithm - number of bytes in T_FAIR (this vnic + * share of the port rate) + */ + vdata->vnic_min_rate[vnic].vn_credit_delta = + ((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 * + (T_FAIR_COEF / (8 * 100 * vnicWeightSum))); + if (vdata->vnic_min_rate[vnic].vn_credit_delta < + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + vdata->vnic_min_rate[vnic].vn_credit_delta = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } +} + +static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data, + uint32_t r_param __rte_unused, + struct cmng_init *ram_data) +{ + uint32_t vnic, cos; + uint32_t cosWeightSum = 0; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + for (cos = 0; cos < MAX_COS_NUMBER; cos++) + cosWeightSum += input_data->cos_min_rate[cos]; + + if (cosWeightSum > 0) { + + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* + * Since cos and vnic shouldn't work together the rate + * to divide between the coses is the port rate. + */ + uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta; + for (cos = 0; cos < MAX_COS_NUMBER; cos++) { + /* + * this is the credit for each period of + * the fairness algorithm - number of bytes + * in T_FAIR (this cos share of the vnic rate) + */ + ccd[cos] = + ((uint32_t)input_data->cos_min_rate[cos] * 100 * + (T_FAIR_COEF / (8 * 100 * cosWeightSum))); + if (ccd[cos] < pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + ccd[cos] = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } + } +} + +static inline void +ecore_init_safc(const struct cmng_init_input *input_data __rte_unused, + struct cmng_init *ram_data) +{ + /* in microSeconds */ + ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC; +} + +/* Congestion management port init */ +static inline void ecore_init_cmng(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + uint32_t r_param; + ECORE_MEMSET(ram_data, 0, sizeof(struct cmng_init)); + + ram_data->port.flags = input_data->flags; + + /* + * number of bytes transmitted in a rate of 10Gbps + * in one usec = 1.25KB. + */ + r_param = BITS_TO_BYTES(input_data->port_rate); + ecore_init_max(input_data, r_param, ram_data); + ecore_init_min(input_data, r_param, ram_data); + ecore_init_fw_wrr(input_data, r_param, ram_data); + ecore_init_safc(input_data, ram_data); +} + + + + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +#define INITOP_SET 0 /* set the HW directly */ +#define INITOP_CLEAR 1 /* clear the HW directly */ +#define INITOP_INIT 2 /* set the init-value array */ + +/**************************************************************************** +* ILT management +****************************************************************************/ +struct ilt_line { + ecore_dma_addr_t page_mapping; + void *page; + uint32_t size; +}; + +struct ilt_client_info { + uint32_t page_size; + uint16_t start; + uint16_t end; + uint16_t client_num; + uint16_t flags; +#define ILT_CLIENT_SKIP_INIT 0x1 +#define ILT_CLIENT_SKIP_MEM 0x2 +}; + +struct ecore_ilt { + uint32_t start_line; + struct ilt_line *lines; + struct ilt_client_info clients[4]; +#define ILT_CLIENT_CDU 0 +#define ILT_CLIENT_QM 1 +#define ILT_CLIENT_SRC 2 +#define ILT_CLIENT_TM 3 +}; + +/**************************************************************************** +* SRC configuration +****************************************************************************/ +struct src_ent { + uint8_t opaque[56]; + uint64_t next; +}; + +/**************************************************************************** +* Parity configuration +****************************************************************************/ +#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK, \ + block##_REG_##block##_PRTY_STS_CLR, \ + en_mask, {m1, m1h, m2, m3}, #block \ +} + +#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_0, \ + block##_REG_##block##_PRTY_STS_CLR_0, \ + en_mask, {m1, m1h, m2, m3}, #block "_0" \ +} + +#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_1, \ + block##_REG_##block##_PRTY_STS_CLR_1, \ + en_mask, {m1, m1h, m2, m3}, #block "_1" \ +} + +static const struct { + uint32_t mask_addr; + uint32_t sts_clr_addr; + uint32_t en_mask; /* Mask to enable parity attentions */ + struct { + uint32_t e1; /* 57710 */ + uint32_t e1h; /* 57711 */ + uint32_t e2; /* 57712 */ + uint32_t e3; /* 578xx */ + } reg_mask; /* Register mask (all valid bits) */ + char name[8]; /* Block's longest name is 7 characters long + * (name + suffix) + */ +} ecore_blocks_parity_data[] = { + /* bit 19 masked */ + /* REG_WR(sc, PXP_REG_PXP_PRTY_MASK, 0x80000); */ + /* bit 5,18,20-31 */ + /* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ + /* bit 5 */ + /* REG_WR(sc, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ + /* REG_WR(sc, HC_REG_HC_PRTY_MASK, 0x0); */ + /* REG_WR(sc, MISC_REG_MISC_PRTY_MASK, 0x0); */ + + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't + * want to handle "system kill" flow at the moment. + */ + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff, + 0x7ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0), + BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0), + BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f), + BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3), + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, + {0xf, 0xf, 0xf, 0xf}, "UPB"}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1 << 6), 0xff, 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff), + BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), +}; + + +/* [28] MCP Latched rom_parity + * [29] MCP Latched ump_rx_parity + * [30] MCP Latched ump_tx_parity + * [31] MCP Latched scpad_parity + */ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +/* Below registers control the MCP parity attention output. When + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are + * enabled, when cleared - disabled. + */ +static const struct { + uint32_t addr; + uint32_t bits; +} mcp_attn_ctl_regs[] = { + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } +}; + +static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable) +{ + unsigned int i; + uint32_t reg_val; + + for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { + reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr); + + if (enable) + reg_val |= mcp_attn_ctl_regs[i].bits; + else + reg_val &= ~mcp_attn_ctl_regs[i].bits; + + REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val); + } +} + +static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx) +{ + if (CHIP_IS_E1(sc)) + return ecore_blocks_parity_data[idx].reg_mask.e1; + else if (CHIP_IS_E1H(sc)) + return ecore_blocks_parity_data[idx].reg_mask.e1h; + else if (CHIP_IS_E2(sc)) + return ecore_blocks_parity_data[idx].reg_mask.e2; + else /* CHIP_IS_E3 */ + return ecore_blocks_parity_data[idx].reg_mask.e3; +} + +static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) { + uint32_t dis_mask = ecore_parity_reg_mask(sc, i); + + if (dis_mask) { + REG_WR(sc, ecore_blocks_parity_data[i].mask_addr, + dis_mask); + ECORE_MSG(sc, "Setting parity mask " + "for %s to\t\t0x%x", + ecore_blocks_parity_data[i].name, dis_mask); + } + } + + /* Disable MCP parity attentions */ + ecore_set_mcp_parity(sc, false); +} + +/** + * Clear the parity error status registers. + */ +static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc) +{ + unsigned int i; + uint32_t reg_val, mcp_aeu_bits = + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; + + /* Clear SEM_FAST parities */ + REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + + for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) { + uint32_t reg_mask = ecore_parity_reg_mask(sc, i); + + if (reg_mask) { + reg_val = REG_RD(sc, ecore_blocks_parity_data[i]. + sts_clr_addr); + if (reg_val & reg_mask) + ECORE_MSG(sc, "Parity errors in %s: 0x%x", + ecore_blocks_parity_data[i].name, + reg_val & reg_mask); + } + } + + /* Check if there were parity attentions in MCP */ + reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP); + if (reg_val & mcp_aeu_bits) + ECORE_MSG(sc, "Parity error in MCP: 0x%x", + reg_val & mcp_aeu_bits); + + /* Clear parity attentions in MCP: + * [7] clears Latched rom_parity + * [8] clears Latched ump_rx_parity + * [9] clears Latched ump_tx_parity + * [10] clears Latched scpad_parity (both ports) + */ + REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); +} + +static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ecore_blocks_parity_data); i++) { + uint32_t reg_mask = ecore_parity_reg_mask(sc, i); + + if (reg_mask) + REG_WR(sc, ecore_blocks_parity_data[i].mask_addr, + ecore_blocks_parity_data[i].en_mask & reg_mask); + } + + /* Enable MCP parity attentions */ + ecore_set_mcp_parity(sc, true); +} + + +#endif /* ECORE_INIT_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h new file mode 100644 index 000000000..0945e7999 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_init_ops.h @@ -0,0 +1,845 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_INIT_OPS_H +#define ECORE_INIT_OPS_H + +static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len); +static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc, + ecore_dma_addr_t phys_addr, uint32_t addr, + uint32_t len); + +static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len) +{ + uint32_t i; + + for (i = 0; i < len; i++) + REG_WR(sc, addr + i*4, data[i]); +} + +static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, + uint32_t len, uint8_t wb __rte_unused) +{ + if (DMAE_READY(sc)) + ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); +} + +static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, + uint32_t len, uint8_t wb) +{ + uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); + uint32_t buf_len32 = buf_len/4; + uint32_t i; + + ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len); + + for (i = 0; i < len; i += buf_len32) { + uint32_t cur_len = min(buf_len32, len - i); + + ecore_write_big_buf(sc, addr + i * 4, cur_len, wb); + } +} + +static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) +{ + if (DMAE_READY(sc)) + ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); +} + +static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len64) +{ + uint32_t buf_len32 = FW_BUF_SIZE/4; + uint32_t len = len64*2; + uint64_t data64 = 0; + uint32_t i; + + /* 64 bit value is in a blob: first low DWORD, then high DWORD */ + data64 = HILO_U64((*(data + 1)), (*data)); + + len64 = min((uint32_t)(FW_BUF_SIZE/8), len64); + for (i = 0; i < len64; i++) { + uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i; + + *pdata = data64; + } + + for (i = 0; i < len; i += buf_len32) { + uint32_t cur_len = min(buf_len32, len - i); + + ecore_write_big_buf_wb(sc, addr + i*4, cur_len); + } +} + +/********************************************************* + There are different blobs for each PRAM section. + In addition, each blob write operation is divided into a few operations + in order to decrease the amount of phys. contiguous buffer needed. + Thus, when we select a blob the address may be with some offset + from the beginning of PRAM section. + The same holds for the INT_TABLE sections. +**********************************************************/ +#define IF_IS_INT_TABLE_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) + +#define IF_IS_PRAM_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) + +static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr, + const uint8_t *data) +{ + IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) + data = INIT_TSEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) + data = INIT_CSEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) + data = INIT_USEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) + data = INIT_XSEM_INT_TABLE_DATA(sc); + else + IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) + data = INIT_TSEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) + data = INIT_CSEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) + data = INIT_USEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) + data = INIT_XSEM_PRAM_DATA(sc); + + return data; +} + +static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len) +{ + if (DMAE_READY(sc)) + VIRT_WR_DMAE_LEN(sc, data, addr, len, 0); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + ecore_init_str_wr(sc, addr, data, len); +} + + +static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo, + uint32_t val_hi) +{ + uint32_t wb_write[2]; + + wb_write[0] = val_lo; + wb_write[1] = val_hi; + REG_WR_DMAE_LEN(sc, reg, wb_write, 2); +} + +static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len, + uint32_t blob_off) +{ + const uint8_t *data = NULL; + int rc; + uint32_t i; + + data = ecore_sel_blob(sc, addr, data) + blob_off*4; + + rc = ecore_gunzip(sc, data, len); + if (rc) + return; + + /* gunzip_outlen is in dwords */ + len = GUNZIP_OUTLEN(sc); + for (i = 0; i < len; i++) + ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t) + ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]); + + ecore_write_big_buf_wb(sc, addr, len); +} + +static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage) +{ + uint16_t op_start = + INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, + STAGE_START)]; + uint16_t op_end = + INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, + STAGE_END)]; + const union init_op *op; + uint32_t op_idx, op_type, addr, len; + const uint32_t *data, *data_base; + + /* If empty block */ + if (op_start == op_end) + return; + + data_base = INIT_DATA(sc); + + for (op_idx = op_start; op_idx < op_end; op_idx++) { + + op = (const union init_op *)&(INIT_OPS(sc)[op_idx]); + /* Get generic data */ + op_type = op->raw.op; + addr = op->raw.offset; + /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and + * OP_WR64 (we assume that op_arr_write and op_write have the + * same structure). + */ + len = op->arr_wr.data_len; + data = data_base + op->arr_wr.data_off; + + switch (op_type) { + case OP_RD: + REG_RD(sc, addr); + break; + case OP_WR: + REG_WR(sc, addr, op->write.val); + break; + case OP_SW: + ecore_init_str_wr(sc, addr, data, len); + break; + case OP_WB: + ecore_init_wr_wb(sc, addr, data, len); + break; + case OP_ZR: + ecore_init_fill(sc, addr, 0, op->zero.len, 0); + break; + case OP_WB_ZR: + ecore_init_fill(sc, addr, 0, op->zero.len, 1); + break; + case OP_ZP: + ecore_init_wr_zp(sc, addr, len, + op->arr_wr.data_off); + break; + case OP_WR_64: + ecore_init_wr_64(sc, addr, data, len); + break; + case OP_IF_MODE_AND: + /* if any of the flags doesn't match, skip the + * conditional block. + */ + if ((INIT_MODE_FLAGS(sc) & + op->if_mode.mode_bit_map) != + op->if_mode.mode_bit_map) + op_idx += op->if_mode.cmd_offset; + break; + case OP_IF_MODE_OR: + /* if all the flags don't match, skip the conditional + * block. + */ + if ((INIT_MODE_FLAGS(sc) & + op->if_mode.mode_bit_map) == 0) + op_idx += op->if_mode.cmd_offset; + break; + default: + /* Should never get here! */ + + break; + } + } +} + + +/**************************************************************************** +* PXP Arbiter +****************************************************************************/ +/* + * This code configures the PCI read/write arbiter + * which implements a weighted round robin + * between the virtual queues in the chip. + * + * The values were derived for each PCI max payload and max request size. + * since max payload and max request size are only known at run time, + * this is done as a separate init stage. + */ + +#define NUM_WR_Q 13 +#define NUM_RD_Q 29 +#define MAX_RD_ORD 3 +#define MAX_WR_ORD 2 + +/* configuration for one arbiter queue */ +struct arb_line { + int l; + int add; + int ubound; +}; + +/* derived configuration for each read queue for each max request size */ +static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { +/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, + { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, + { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, +/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, +/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } +}; + +/* derived configuration for each write queue for each max request size */ +static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { +/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, + { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, +/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, + { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, + { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } +}; + +/* register addresses for read queues */ +static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { +/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, + PXP2_REG_RQ_BW_RD_UBOUND0}, + {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, + PXP2_REG_RQ_BW_RD_UBOUND4}, + {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, + PXP2_REG_RQ_BW_RD_UBOUND5}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, +/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, + {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, + PXP2_REG_RQ_BW_RD_UBOUND12}, + {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, + PXP2_REG_RQ_BW_RD_UBOUND13}, + {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, + PXP2_REG_RQ_BW_RD_UBOUND14}, + {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, + PXP2_REG_RQ_BW_RD_UBOUND15}, + {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, + PXP2_REG_RQ_BW_RD_UBOUND16}, + {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, + PXP2_REG_RQ_BW_RD_UBOUND17}, + {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, + PXP2_REG_RQ_BW_RD_UBOUND18}, +/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, + PXP2_REG_RQ_BW_RD_UBOUND19}, + {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, + PXP2_REG_RQ_BW_RD_UBOUND20}, + {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, + PXP2_REG_RQ_BW_RD_UBOUND22}, + {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, + PXP2_REG_RQ_BW_RD_UBOUND23}, + {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, + PXP2_REG_RQ_BW_RD_UBOUND24}, + {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, + PXP2_REG_RQ_BW_RD_UBOUND25}, + {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, + PXP2_REG_RQ_BW_RD_UBOUND26}, + {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, + PXP2_REG_RQ_BW_RD_UBOUND27}, + {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28} +}; + +/* register addresses for write queues */ +static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { +/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, + {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, +/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28}, + {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, + PXP2_REG_RQ_BW_WR_UBOUND29}, + {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, + PXP2_REG_RQ_BW_WR_UBOUND30} +}; + +static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order, + int w_order) +{ + uint32_t val, i; + + if (r_order > MAX_RD_ORD) { + ECORE_MSG(sc, "read order of %d order adjusted to %d", + r_order, MAX_RD_ORD); + r_order = MAX_RD_ORD; + } + if (w_order > MAX_WR_ORD) { + ECORE_MSG(sc, "write order of %d order adjusted to %d", + w_order, MAX_WR_ORD); + w_order = MAX_WR_ORD; + } + if (CHIP_REV_IS_FPGA(sc)) { + ECORE_MSG(sc, "write order adjusted to 1 for FPGA"); + w_order = 0; + } + ECORE_MSG(sc, "read order %d write order %d", r_order, w_order); + + for (i = 0; i < NUM_RD_Q-1; i++) { + REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l); + REG_WR(sc, read_arb_addr[i].add, + read_arb_data[i][r_order].add); + REG_WR(sc, read_arb_addr[i].ubound, + read_arb_data[i][r_order].ubound); + } + + for (i = 0; i < NUM_WR_Q-1; i++) { + if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || + (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { + + REG_WR(sc, write_arb_addr[i].l, + write_arb_data[i][w_order].l); + + REG_WR(sc, write_arb_addr[i].add, + write_arb_data[i][w_order].add); + + REG_WR(sc, write_arb_addr[i].ubound, + write_arb_data[i][w_order].ubound); + } else { + + val = REG_RD(sc, write_arb_addr[i].l); + REG_WR(sc, write_arb_addr[i].l, + val | (write_arb_data[i][w_order].l << 10)); + + val = REG_RD(sc, write_arb_addr[i].add); + REG_WR(sc, write_arb_addr[i].add, + val | (write_arb_data[i][w_order].add << 10)); + + val = REG_RD(sc, write_arb_addr[i].ubound); + REG_WR(sc, write_arb_addr[i].ubound, + val | (write_arb_data[i][w_order].ubound << 7)); + } + } + + val = write_arb_data[NUM_WR_Q-1][w_order].add; + val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; + val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; + REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val); + + val = read_arb_data[NUM_RD_Q-1][r_order].add; + val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; + val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; + REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val); + + REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order); + REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order); + REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order); + REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order); + + if ((CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) && (r_order == MAX_RD_ORD)) + REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00); + + if (CHIP_IS_E3(sc)) + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); + else if (CHIP_IS_E2(sc)) + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); + else + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); + + if (!CHIP_IS_E1(sc)) { + /* MPS w_order optimal TH presently TH + * 128 0 0 2 + * 256 1 1 3 + * >=512 2 2 3 + */ + /* DMAE is special */ + if (!CHIP_IS_E1H(sc)) { + /* E2 can use optimal TH */ + val = w_order; + REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val); + } else { + val = ((w_order == 0) ? 2 : 3); + REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); + } + + REG_WR(sc, PXP2_REG_WR_HC_MPS, val); + REG_WR(sc, PXP2_REG_WR_USDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_QM_MPS, val); + REG_WR(sc, PXP2_REG_WR_TM_MPS, val); + REG_WR(sc, PXP2_REG_WR_SRC_MPS, val); + REG_WR(sc, PXP2_REG_WR_DBG_MPS, val); + REG_WR(sc, PXP2_REG_WR_CDU_MPS, val); + } + + /* Validate number of tags suppoted by device */ +#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 + val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST); + val &= 0xFF; + if (val <= 0x20) + REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20); +} + +/**************************************************************************** +* ILT management +****************************************************************************/ +/* + * This codes hides the low level HW interaction for ILT management and + * configuration. The API consists of a shadow ILT table which is set by the + * driver and a set of routines to use it to configure the HW. + * + */ + +/* ILT HW init operations */ + +/* ILT memory management operations */ +#define ILT_MEMOP_ALLOC 0 +#define ILT_MEMOP_FREE 1 + +/* the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) +#define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) +#define ILT_RANGE(f, l) (((l) << 10) | f) + +static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc __rte_unused, + struct ilt_line *line, uint32_t size, + uint8_t memop) +{ + if (memop == ILT_MEMOP_FREE) { + ECORE_ILT_FREE(line->page, line->page_mapping, line->size); + return 0; + } + ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size); + if (!line->page) + return -1; + line->size = size; + return 0; +} + + +static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num, + uint8_t memop) +{ + int i, rc; + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (!ilt || !ilt->lines) + return -1; + + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) + return 0; + + for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { + rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i], + ilt_cli->page_size, memop); + } + return rc; +} + +static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop) +{ + int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop); + if (!rc) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop); + if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); + + return rc; +} + +static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx, + ecore_dma_addr_t page_mapping) +{ + uint32_t reg; + + if (CHIP_IS_E1(sc)) + reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx * 8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx * 8; + + ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); +} + +static void ecore_ilt_line_init_op(struct bnx2x_softc *sc, + struct ecore_ilt *ilt, int idx, uint8_t initop) +{ + ecore_dma_addr_t null_mapping; + int abs_idx = ilt->start_line + idx; + + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping); + break; + case INITOP_CLEAR: + null_mapping = 0; + ecore_ilt_line_wr(sc, abs_idx, null_mapping); + break; + } +} + +static void ecore_ilt_boundary_init_op(struct bnx2x_softc *sc, + struct ilt_client_info *ilt_cli, + uint32_t ilt_start, + uint8_t initop __rte_unused) +{ + uint32_t start_reg = 0; + uint32_t end_reg = 0; + + /* The boundary is either SET or INIT, + CLEAR => SET and for now SET ~~ INIT */ + + /* find the appropriate regs */ + if (CHIP_IS_E1(sc)) { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_PSWRQ_CDU0_L2P; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_PSWRQ_QM0_L2P; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_PSWRQ_SRC0_L2P; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_PSWRQ_TM0_L2P; + break; + } + REG_WR(sc, start_reg + SC_FUNC(sc) * 4, + ILT_RANGE((ilt_start + ilt_cli->start), + (ilt_start + ilt_cli->end))); + } else { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; + end_reg = PXP2_REG_RQ_CDU_LAST_ILT; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_RQ_QM_FIRST_ILT; + end_reg = PXP2_REG_RQ_QM_LAST_ILT; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; + end_reg = PXP2_REG_RQ_SRC_LAST_ILT; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_RQ_TM_FIRST_ILT; + end_reg = PXP2_REG_RQ_TM_LAST_ILT; + break; + } + REG_WR(sc, start_reg, (ilt_start + ilt_cli->start)); + REG_WR(sc, end_reg, (ilt_start + ilt_cli->end)); + } +} + +static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc, + struct ecore_ilt *ilt, + struct ilt_client_info *ilt_cli, + uint8_t initop) +{ + int i; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + for (i = ilt_cli->start; i <= ilt_cli->end; i++) + ecore_ilt_line_init_op(sc, ilt, i, initop); + + /* init/clear the ILT boundries */ + ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop); +} + +static void ecore_ilt_client_init_op(struct bnx2x_softc *sc, + struct ilt_client_info *ilt_cli, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + + ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop); +} + +static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc, + int cli_num, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + ecore_ilt_client_init_op(sc, ilt_cli, initop); +} + +static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop) +{ + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop); + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop); + if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); +} + +static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num, + uint32_t psz_reg, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12)); + break; + case INITOP_CLEAR: + break; + } +} + +/* + * called during init common stage, ilt clients should be initialized + * prioir to calling this function + */ +static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop) +{ + ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU, + PXP2_REG_RQ_CDU_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM, + PXP2_REG_RQ_QM_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC, + PXP2_REG_RQ_SRC_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM, + PXP2_REG_RQ_TM_P_SIZE, initop); +} + +/**************************************************************************** +* QM initializations +****************************************************************************/ +#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ +#define QM_INIT_MIN_CID_COUNT 31 +#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) + +/* called during init port stage */ +static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count, + uint8_t initop) +{ + int port = SC_PORT(sc); + + if (QM_INIT(qm_cid_count)) { + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(sc, QM_REG_CONNNUM_0 + port*4, + qm_cid_count/16 - 1); + break; + case INITOP_CLEAR: + break; + } + } +} + +static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, + uint32_t base_reg, uint32_t reg) +{ + int i; + uint32_t wb_data[2] = {0, 0}; + for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { + REG_WR(sc, base_reg + i*4, + qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); + ecore_init_wr_wb(sc, reg + i*8, + wb_data, 2); + } +} + +/* called during init common stage */ +static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, + uint8_t initop) +{ + if (!QM_INIT(qm_cid_count)) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + ecore_qm_set_ptr_table(sc, qm_cid_count, + QM_REG_BASEADDR, QM_REG_PTRTBL); + if (CHIP_IS_E1H(sc)) + ecore_qm_set_ptr_table(sc, qm_cid_count, + QM_REG_BASEADDR_EXT_A, + QM_REG_PTRTBL_EXT_A); + break; + case INITOP_CLEAR: + break; + } +} + +#endif /* ECORE_INIT_OPS_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h new file mode 100644 index 000000000..4ffd9daf7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_mfw_req.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2014-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_MFW_REQ_H +#define ECORE_MFW_REQ_H + + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 +#define NVM_PATH_MAX 2 + +/* FCoE capabilities required from the driver */ +struct fcoe_capabilities { + uint32_t capability1; + /* Maximum number of I/Os per connection */ + #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff + #define FCOE_IOS_PER_CONNECTION_SHIFT 0 + /* Maximum number of Logins per port */ + #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000 + #define FCOE_LOGINS_PER_PORT_SHIFT 16 + + uint32_t capability2; + /* Maximum number of exchanges */ + #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff + #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0 + /* Maximum NPIV WWN per port */ + #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000 + #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16 + + uint32_t capability3; + /* Maximum number of targets supported */ + #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff + #define FCOE_TARGETS_SUPPORTED_SHIFT 0 + /* Maximum number of outstanding commands across all connections */ + #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000 + #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16 + + uint32_t capability4; + #define FCOE_CAPABILITY4_STATEFUL 0x00000001 + #define FCOE_CAPABILITY4_STATELESS 0x00000002 + #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004 +}; + +struct glob_ncsi_oem_data +{ + uint32_t driver_version; + uint32_t unused[3]; + struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX]; +}; + +/* current drv_info version */ +#define DRV_INFO_CUR_VER 2 + +/* drv_info op codes supported */ +enum drv_info_opcode { + ETH_STATS_OPCODE, + FCOE_STATS_OPCODE, + ISCSI_STATS_OPCODE +}; + +#define ETH_STAT_INFO_VERSION_LEN 12 +/* Per PCI Function Ethernet Statistics required from the driver */ +struct eth_stats_info { + /* Function's Driver Version. padded to 12 */ + char version[ETH_STAT_INFO_VERSION_LEN]; + /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ + uint8_t mac_local[8]; + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + uint32_t mtu_size; /* MTU Size. Note : Negotiated MTU */ + uint32_t feature_flags; /* Feature_Flags. */ +#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 +#define FEATURE_ETH_LSO_MASK 0x02 +#define FEATURE_ETH_BOOTMODE_MASK 0x1C +#define FEATURE_ETH_BOOTMODE_SHIFT 2 +#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) +#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) +#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) +#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) +#define FEATURE_ETH_TOE_MASK 0x20 + uint32_t lso_max_size; /* LSO MaxOffloadSize. */ + uint32_t lso_min_seg_cnt; /* LSO MinSegmentCount. */ + /* Num Offloaded Connections TCP_IPv4. */ + uint32_t ipv4_ofld_cnt; + /* Num Offloaded Connections TCP_IPv6. */ + uint32_t ipv6_ofld_cnt; + uint32_t promiscuous_mode; /* Promiscuous Mode. non-zero true */ + uint32_t txq_size; /* TX Descriptors Queue Size */ + uint32_t rxq_size; /* RX Descriptors Queue Size */ + /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ + uint32_t txq_avg_depth; + /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ + uint32_t rxq_avg_depth; + /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ + uint32_t iov_offload; + /* Number of NetQueue/VMQ Config'd. */ + uint32_t netq_cnt; + uint32_t vf_cnt; /* Num VF assigned to this PF. */ +}; + +/* Per PCI Function FCOE Statistics required from the driver */ +struct fcoe_stats_info { + uint8_t version[12]; /* Function's Driver Version. */ + uint8_t mac_local[8]; /* Locally Admin Addr. */ + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + /* QoS Priority (per 802.1p). 0-7255 */ + uint32_t qos_priority; + uint32_t txq_size; /* FCoE TX Descriptors Queue Size. */ + uint32_t rxq_size; /* FCoE RX Descriptors Queue Size. */ + /* FCoE TX Descriptor Queue Avg Depth. */ + uint32_t txq_avg_depth; + /* FCoE RX Descriptors Queue Avg Depth. */ + uint32_t rxq_avg_depth; + uint32_t rx_frames_lo; /* FCoE RX Frames received. */ + uint32_t rx_frames_hi; /* FCoE RX Frames received. */ + uint32_t rx_bytes_lo; /* FCoE RX Bytes received. */ + uint32_t rx_bytes_hi; /* FCoE RX Bytes received. */ + uint32_t tx_frames_lo; /* FCoE TX Frames sent. */ + uint32_t tx_frames_hi; /* FCoE TX Frames sent. */ + uint32_t tx_bytes_lo; /* FCoE TX Bytes sent. */ + uint32_t tx_bytes_hi; /* FCoE TX Bytes sent. */ + uint32_t rx_fcs_errors; /* number of receive packets with FCS errors */ + uint32_t rx_fc_crc_errors; /* number of FC frames with CRC errors*/ + uint32_t fip_login_failures; /* number of FCoE/FIP Login failures */ +}; + +/* Per PCI Function iSCSI Statistics required from the driver*/ +struct iscsi_stats_info { + uint8_t version[12]; /* Function's Driver Version. */ + uint8_t mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + /* QoS Priority (per 802.1p). 0-7255 */ + uint32_t qos_priority; +#define ISCSI_QOS_PRIORITY_OFFSET 0 +#define ISCSI_QOS_PRIORITY_MASK (0xffff) + +#define ISCSI_IP_ADDRESS_TYPE_OFFSET 30 +#define ISCSI_IP_ADDRESS_TYPE_MASK (3 << 30) +/* Driver does not have the IP address and type populated */ +#define ISCSI_IP_ADDRESS_TYPE_NOT_SET (0 << 30) +#define ISCSI_IP_ADDRESS_TYPE_IPV4 (1 << 30) /* IPV4 IP address set */ +#define ISCSI_IP_ADDRESS_TYPE_IPV6 (2 << 30) /* IPV6 IP address set */ + + uint8_t initiator_name[64]; /* iSCSI Boot Initiator Node name. */ + + uint8_t ww_port_name[64]; /* iSCSI World wide port name */ + + uint8_t boot_target_name[64];/* iSCSI Boot Target Name. */ + + uint8_t boot_target_ip[16]; /* iSCSI Boot Target IP. */ + uint32_t boot_target_portal; /* iSCSI Boot Target Portal. */ + uint8_t boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ + uint32_t max_frame_size; /* Max Frame Size. bytes */ + uint32_t txq_size; /* PDU TX Descriptors Queue Size. */ + uint32_t rxq_size; /* PDU RX Descriptors Queue Size. */ + + uint32_t txq_avg_depth; /*PDU TX Descriptor Queue Avg Depth. */ + uint32_t rxq_avg_depth; /*PDU RX Descriptors Queue Avg Depth. */ + uint32_t rx_pdus_lo; /* iSCSI PDUs received. */ + uint32_t rx_pdus_hi; /* iSCSI PDUs received. */ + + uint32_t rx_bytes_lo; /* iSCSI RX Bytes received. */ + uint32_t rx_bytes_hi; /* iSCSI RX Bytes received. */ + uint32_t tx_pdus_lo; /* iSCSI PDUs sent. */ + uint32_t tx_pdus_hi; /* iSCSI PDUs sent. */ + + uint32_t tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ + uint32_t tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ + uint32_t pcp_prior_map_tbl; /*C-PCP to S-PCP Priority MapTable. + 9 nibbles, the position of each nibble + represents the C-PCP value, the value + of the nibble = S-PCP value.*/ +}; + +union drv_info_to_mcp { + struct eth_stats_info ether_stat; + struct fcoe_stats_info fcoe_stat; + struct iscsi_stats_info iscsi_stat; +}; + +#endif /* ECORE_MFW_REQ_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h new file mode 100644 index 000000000..bb92d131f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_reg.h @@ -0,0 +1,5996 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2014-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_REG_H +#define ECORE_REG_H + +#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1 << 0) +#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1 << 2) +#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1 << 5) +#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1 << 3) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1 << 4) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1 << 1) +/* [R 1] ATC initalization done */ +#define ATC_REG_ATC_INIT_DONE 0x1100bc +/* [RW 6] Interrupt mask register #0 read/write */ +#define ATC_REG_ATC_INT_MASK 0x1101c8 +/* [R 6] Interrupt register #0 read */ +#define ATC_REG_ATC_INT_STS 0x1101bc +/* [RC 6] Interrupt register #0 read clear */ +#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +/* [RW 5] Parity mask register #0 read/write */ +#define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc +/* [RC 5] Parity register #0 read clear */ +#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 +/* [RW 19] Interrupt mask register #0 read/write */ +#define BRB1_REG_BRB1_INT_MASK 0x60128 +/* [R 19] Interrupt register #0 read */ +#define BRB1_REG_BRB1_INT_STS 0x6011c +/* [RC 19] Interrupt register #0 read clear */ +#define BRB1_REG_BRB1_INT_STS_CLR 0x60120 +/* [RW 4] Parity mask register #0 read/write */ +#define BRB1_REG_BRB1_PRTY_MASK 0x60138 +/* [R 4] Parity register #0 read */ +#define BRB1_REG_BRB1_PRTY_STS 0x6012c +/* [RC 4] Parity register #0 read clear */ +#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 +/* [RW 11] The number of blocks guarantied for the MAC port. The register is + * applicable only when per_class_guaranty_mode is reset. + */ +#define BRB1_REG_MAC_GUARANTIED_0 0x601e8 +#define BRB1_REG_MAC_GUARANTIED_1 0x60240 +/* [R 24] The number of full blocks. */ +#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 +/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 +/* [RW 10] Write client 0: Assert pause threshold. Not Functional */ +#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 +/* [R 24] The number of full blocks occpied by port. */ +#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 +/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ +#define CCM_REG_CAM_OCCUP 0xd0188 +/* [RW 11] Interrupt mask register #0 read/write */ +#define CCM_REG_CCM_INT_MASK 0xd01e4 +/* [R 11] Interrupt register #0 read */ +#define CCM_REG_CCM_INT_STS 0xd01d8 +/* [RC 11] Interrupt register #0 read clear */ +#define CCM_REG_CCM_INT_STS_CLR 0xd01dc +/* [RW 27] Parity mask register #0 read/write */ +#define CCM_REG_CCM_PRTY_MASK 0xd01f4 +/* [R 27] Parity register #0 read */ +#define CCM_REG_CCM_PRTY_STS 0xd01e8 +/* [RC 27] Parity register #0 read clear */ +#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 1 at start-up. + */ +#define CCM_REG_CFC_INIT_CRD 0xd0204 +/* [RW 6] QM output initial credit. Max credit available - 32. Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 32 at start-up. + */ +#define CCM_REG_CQM_INIT_CRD 0xd020c +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the SDM interface is detected. + */ +#define CCM_REG_CSDM_LENGTH_MIS 0xd0170 +/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define CCM_REG_FIC0_INIT_CRD 0xd0210 +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define CCM_REG_FIC1_INIT_CRD 0xd0214 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the pbf interface is detected. + */ +#define CCM_REG_PBF_LENGTH_MIS 0xd0180 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the STORM interface is detected. + */ +#define CCM_REG_STORM_LENGTH_MIS 0xd016c +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the tsem interface is detected. + */ +#define CCM_REG_TSEM_LENGTH_MIS 0xd0174 +/* [RC 1] Set when message length mismatch (relative to last indication) at + * the usem interface is detected. + */ +#define CCM_REG_USEM_LENGTH_MIS 0xd017c +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the xsem interface is detected. + */ +#define CCM_REG_XSEM_LENGTH_MIS 0xd0178 +/* [RW 19] Indirect access to the descriptor table of the XX protection + * mechanism. The fields are: [5:0] - message length; [12:6] - message + * pointer; 18:13] - next pointer. + */ +#define CCM_REG_XX_DESCR_TABLE 0xd0300 +#define CCM_REG_XX_DESCR_TABLE_SIZE 24 +/* [R 7] Used to read the value of XX protection Free counter. */ +#define CCM_REG_XX_FREE 0xd0184 +#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020 +/* [RW 7] Interrupt mask register #0 read/write */ +#define CDU_REG_CDU_INT_MASK 0x10103c +/* [R 7] Interrupt register #0 read */ +#define CDU_REG_CDU_INT_STS 0x101030 +/* [RC 7] Interrupt register #0 read clear */ +#define CDU_REG_CDU_INT_STS_CLR 0x101034 +/* [RW 5] Parity mask register #0 read/write */ +#define CDU_REG_CDU_PRTY_MASK 0x10104c +/* [R 5] Parity register #0 read */ +#define CDU_REG_CDU_PRTY_STS 0x101040 +/* [RC 5] Parity register #0 read clear */ +#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 +/* [RW 32] logging of error data in case of a CDU load error: + * {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; + * ype_error; ctual_active; ctual_compressed_context}; + */ +#define CDU_REG_ERROR_DATA 0x101014 +/* [RW 13] activity counter ram access */ +#define CFC_REG_ACTIVITY_COUNTER 0x104400 +#define CFC_REG_ACTIVITY_COUNTER_SIZE 256 +/* [R 1] indication the initializing the activity counter by the hardware + * was done. + */ +#define CFC_REG_AC_INIT_DONE 0x104078 +/* [R 1] indication the initializing the cams by the hardware was done. */ +#define CFC_REG_CAM_INIT_DONE 0x10407c +/* [RW 2] Interrupt mask register #0 read/write */ +#define CFC_REG_CFC_INT_MASK 0x104108 +/* [R 2] Interrupt register #0 read */ +#define CFC_REG_CFC_INT_STS 0x1040fc +/* [RC 2] Interrupt register #0 read clear */ +#define CFC_REG_CFC_INT_STS_CLR 0x104100 +/* [RW 6] Parity mask register #0 read/write */ +#define CFC_REG_CFC_PRTY_MASK 0x104118 +/* [R 6] Parity register #0 read */ +#define CFC_REG_CFC_PRTY_STS 0x10410c +/* [RC 6] Parity register #0 read clear */ +#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 +/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ +#define CFC_REG_CID_CAM 0x104800 +#define CFC_REG_DEBUG0 0x104050 +/* [R 16] CFC error vector. when the CFC detects an internal error it will + * set one of these bits. the bit description can be found in CFC + * specifications + */ +#define CFC_REG_ERROR_VECTOR 0x10403c +/* [WB 97] LCID info ram access = {96-vpf; 5:93-pfid; 2:89-type; + * 8:85-action; 4-paddrv; 3:20-paddr; 9:4-rstates; -lsf; :0-lstate} + */ +#define CFC_REG_INFO_RAM 0x105000 +#define CFC_REG_INFO_RAM_SIZE 1024 +#define CFC_REG_INIT_REG 0x10404c +/* [RW 22] Link List ram access; data = {prev_pfid; rev_lcid; ext_pfid; + * ext_lcid} + */ +#define CFC_REG_LINK_LIST 0x104c00 +#define CFC_REG_LINK_LIST_SIZE 256 +/* [R 1] indication the initializing the link list by the hardware was done. */ +#define CFC_REG_LL_INIT_DONE 0x104074 +/* [R 9] Number of allocated LCIDs which are at empty state */ +#define CFC_REG_NUM_LCIDS_ALLOC 0x104020 +/* [R 9] Number of Arriving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 +#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 +/* [R 9] Number of Leaving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 +#define CFC_REG_STRONG_ENABLE_PF 0x104128 +#define CFC_REG_WEAK_ENABLE_PF 0x104124 +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSDM_REG_CSDM_INT_MASK_0 0xc229c +#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac +/* [R 32] Interrupt register #0 read */ +#define CSDM_REG_CSDM_INT_STS_0 0xc2290 +#define CSDM_REG_CSDM_INT_STS_1 0xc22a0 +/* [RC 32] Interrupt register #0 read clear */ +#define CSDM_REG_CSDM_INT_STS_CLR_0 0xc2294 +#define CSDM_REG_CSDM_INT_STS_CLR_1 0xc22a4 +/* [RW 11] Parity mask register #0 read/write */ +#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc +/* [R 11] Parity register #0 read */ +#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 +/* [RC 11] Parity register #0 read clear */ +#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558 +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSEM_REG_CSEM_INT_MASK_0 0x200110 +#define CSEM_REG_CSEM_INT_MASK_1 0x200120 +/* [R 32] Interrupt register #0 read */ +#define CSEM_REG_CSEM_INT_STS_0 0x200104 +#define CSEM_REG_CSEM_INT_STS_1 0x200114 +/* [RC 32] Interrupt register #0 read clear */ +#define CSEM_REG_CSEM_INT_STS_CLR_0 0x200108 +#define CSEM_REG_CSEM_INT_STS_CLR_1 0x200118 +/* [RW 32] Parity mask register #0 read/write */ +#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 +#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 +/* [R 32] Parity register #0 read */ +#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 +#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 +/* [RC 32] Parity register #0 read clear */ +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 +/* [RW 32] This address space contains all registers and memories that are + * placed in SEM_FAST block. The SEM_FAST registers are described in + * appendix B. In order to access the SEM_FAST registers the base address + * CSEM_REGISTERS_FAST_MEMORY (Offset: 0x220000) should be added to each + * SEM_FAST register offset. + */ +#define CSEM_REG_FAST_MEMORY 0x220000 +/* [RW 15] Interrupt table Read and write access to it is not possible in + * the middle of the work + */ +#define CSEM_REG_INT_TABLE 0x200400 +/* [WB 128] Debug only. Passive buffer memory */ +#define CSEM_REG_PASSIVE_BUFFER 0x202000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define CSEM_REG_PRAM 0x240000 +/* [R 20] Valid sleeping threads indication have bit per thread */ +#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. + */ +#define CSEM_REG_VFPF_ERR_NUM 0x200380 +/* [RW 2] Interrupt mask register #0 read/write */ +#define DBG_REG_DBG_INT_MASK 0xc098 +/* [R 2] Interrupt register #0 read */ +#define DBG_REG_DBG_INT_STS 0xc08c +/* [RC 2] Interrupt register #0 read clear */ +#define DBG_REG_DBG_INT_STS_CLR 0xc090 +/* [RW 1] Parity mask register #0 read/write */ +#define DBG_REG_DBG_PRTY_MASK 0xc0a8 +/* [R 1] Parity register #0 read */ +#define DBG_REG_DBG_PRTY_STS 0xc09c +/* [RC 1] Parity register #0 read clear */ +#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 +/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The + * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID = 0; + * 4.Completion function=0; 5.Error handling = 0 + */ +#define DMAE_REG_BACKWARD_COMP_EN 0x10207c +/* [RW 32] Commands memory. The address to command X; row Y is to calculated + * as 14 * X+Y. + */ +#define DMAE_REG_CMD_MEM 0x102400 +#define DMAE_REG_CMD_MEM_SIZE 224 +/* [RW 2] Interrupt mask register #0 read/write */ +#define DMAE_REG_DMAE_INT_MASK 0x102054 +/* [R 2] Interrupt register #0 read */ +#define DMAE_REG_DMAE_INT_STS 0x102048 +/* [RC 2] Interrupt register #0 read clear */ +#define DMAE_REG_DMAE_INT_STS_CLR 0x10204c +/* [RW 4] Parity mask register #0 read/write */ +#define DMAE_REG_DMAE_PRTY_MASK 0x102064 +/* [R 4] Parity register #0 read */ +#define DMAE_REG_DMAE_PRTY_STS 0x102058 +/* [RC 4] Parity register #0 read clear */ +#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c +/* [RW 1] Command 0 go. */ +#define DMAE_REG_GO_C0 0x102080 +/* [RW 1] Command 1 go. */ +#define DMAE_REG_GO_C1 0x102084 +/* [RW 1] Command 10 go. */ +#define DMAE_REG_GO_C10 0x102088 +/* [RW 1] Command 11 go. */ +#define DMAE_REG_GO_C11 0x10208c +/* [RW 1] Command 12 go. */ +#define DMAE_REG_GO_C12 0x102090 +/* [RW 1] Command 13 go. */ +#define DMAE_REG_GO_C13 0x102094 +/* [RW 1] Command 14 go. */ +#define DMAE_REG_GO_C14 0x102098 +/* [RW 1] Command 15 go. */ +#define DMAE_REG_GO_C15 0x10209c +/* [RW 1] Command 2 go. */ +#define DMAE_REG_GO_C2 0x1020a0 +/* [RW 1] Command 3 go. */ +#define DMAE_REG_GO_C3 0x1020a4 +/* [RW 1] Command 4 go. */ +#define DMAE_REG_GO_C4 0x1020a8 +/* [RW 1] Command 5 go. */ +#define DMAE_REG_GO_C5 0x1020ac +/* [RW 1] Command 6 go. */ +#define DMAE_REG_GO_C6 0x1020b0 +/* [RW 1] Command 7 go. */ +#define DMAE_REG_GO_C7 0x1020b4 +/* [RW 1] Command 8 go. */ +#define DMAE_REG_GO_C8 0x1020b8 +/* [RW 1] Command 9 go. */ +#define DMAE_REG_GO_C9 0x1020bc +/* [RW 32] Doorbell address for RBC doorbells (function 0). */ +#define DORQ_REG_DB_ADDR0 0x17008c +/* [RW 6] Interrupt mask register #0 read/write */ +#define DORQ_REG_DORQ_INT_MASK 0x170180 +/* [R 6] Interrupt register #0 read */ +#define DORQ_REG_DORQ_INT_STS 0x170174 +/* [RC 6] Interrupt register #0 read clear */ +#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 +/* [RW 2] Parity mask register #0 read/write */ +#define DORQ_REG_DORQ_PRTY_MASK 0x170190 +/* [R 2] Parity register #0 read */ +#define DORQ_REG_DORQ_PRTY_STS 0x170184 +/* [RC 2] Parity register #0 read clear */ +#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 +/* [R 13] Current value of the DQ FIFO fill level according to following + * pointer. The range is 0 - 256 FIFO rows; where each row stands for the + * doorbell. + */ +#define DORQ_REG_DQ_FILL_LVLF 0x1700a4 +/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or + * equal to full threshold; reset on full clear. + */ +#define DORQ_REG_DQ_FULL_ST 0x1700c0 +#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec +#define DORQ_REG_MODE_ACT 0x170008 +/* [RW 5] The normal mode CID extraction offset. */ +#define DORQ_REG_NORM_CID_OFST 0x17002c +#define DORQ_REG_PF_USAGE_CNT 0x1701d0 +/* [R 4] Current value of response A counter credit. Initial credit is + * configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + * register. + */ +#define DORQ_REG_RSPA_CRD_CNT 0x1700ac +/* [R 4] Current value of response B counter credit. Initial credit is + * configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + * register. + */ +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0 +#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4 +#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4 +#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8 +/* [RW 10] VF type validation mask value */ +#define DORQ_REG_VF_TYPE_MASK_0 0x170218 +/* [RW 17] VF type validation Min MCID value */ +#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8 +/* [RW 17] VF type validation Max MCID value */ +#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298 +/* [RW 10] VF type validation comp value */ +#define DORQ_REG_VF_TYPE_VALUE_0 0x170258 +#define DORQ_REG_VF_USAGE_CNT 0x170320 +#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340 +#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1 << 4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1 << 0) +#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1 << 3) +#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1 << 7) +#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1 << 2) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1 << 1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1 << 0) +#define HC_REG_AGG_INT_0 0x108050 +#define HC_REG_ATTN_MSG0_ADDR_L 0x108018 +#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 +#define HC_REG_COMMAND_REG 0x108180 +#define HC_REG_CONFIG_0 0x108000 +#define HC_REG_CONFIG_1 0x108004 +/* [RW 7] Interrupt mask register #0 read/write */ +#define HC_REG_HC_INT_MASK 0x108090 +/* [R 7] Interrupt register #0 read */ +#define HC_REG_HC_INT_STS 0x108084 +/* [RC 7] Interrupt register #0 read clear */ +#define HC_REG_HC_INT_STS_CLR 0x108088 +/* [RW 3] Parity mask register #0 read/write */ +#define HC_REG_HC_PRTY_MASK 0x1080a0 +/* [R 3] Parity register #0 read */ +#define HC_REG_HC_PRTY_STS 0x108094 +/* [RC 3] Parity register #0 read clear */ +#define HC_REG_HC_PRTY_STS_CLR 0x108098 +#define HC_REG_INT_MASK 0x108108 +#define HC_REG_LEADING_EDGE_0 0x108040 +#define HC_REG_MAIN_MEMORY 0x108800 +#define HC_REG_MAIN_MEMORY_SIZE 152 +#define HC_REG_TRAILING_EDGE_0 0x108044 +#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1 << 1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1 << 0) +#define IGU_REG_ATTENTION_ACK_BITS 0x130108 +/* [R 4] Debug: attn_fsm */ +#define IGU_REG_ATTN_FSM 0x130054 +#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c +#define IGU_REG_ATTN_MSG_ADDR_L 0x130120 +/* [R 4] Debug: [3] - attention write done message is pending (0-no pending; + * 1-pending). [2:0] = PFID. Pending means attention message was sent; but + * write done didn't receive. + */ +#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030 +#define IGU_REG_BLOCK_CONFIGURATION 0x130000 +#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124 +#define IGU_REG_COMMAND_REG_CTRL 0x13012c +/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit + * is clear. The bits in this registers are set and clear via the producer + * command. Data valid only in addresses 0-4. all the rest are zero. + */ +#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200 +/* [R 5] Debug: ctrl_fsm */ +#define IGU_REG_CTRL_FSM 0x130064 +/* [R 1] data available for error memory. If this bit is clear do not red + * from error_handling_memory. + */ +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 +/* [RW 11] Interrupt mask register #0 read/write */ +#define IGU_REG_IGU_INT_MASK 0x130098 +/* [R 11] Interrupt register #0 read */ +#define IGU_REG_IGU_INT_STS 0x13008c +/* [RC 11] Interrupt register #0 read clear */ +#define IGU_REG_IGU_INT_STS_CLR 0x130090 +/* [RW 11] Parity mask register #0 read/write */ +#define IGU_REG_IGU_PRTY_MASK 0x1300a8 +/* [R 11] Parity register #0 read */ +#define IGU_REG_IGU_PRTY_STS 0x13009c +/* [RC 11] Parity register #0 read clear */ +#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 +/* [R 4] Debug: int_handle_fsm */ +#define IGU_REG_INT_HANDLE_FSM 0x130050 +#define IGU_REG_LEADING_EDGE_LATCH 0x130134 +/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid. + * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF + * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); + */ +#define IGU_REG_MAPPING_MEMORY 0x131000 +#define IGU_REG_MAPPING_MEMORY_SIZE 136 +#define IGU_REG_PBA_STATUS_LSB 0x130138 +#define IGU_REG_PBA_STATUS_MSB 0x13013c +#define IGU_REG_PCI_PF_MSIX_EN 0x130144 +#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148 +#define IGU_REG_PCI_PF_MSI_EN 0x130140 +/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no + * pending; 1 = pending. Pendings means interrupt was asserted; and write + * done was not received. Data valid only in addresses 0-4. all the rest are + * zero. + */ +#define IGU_REG_PENDING_BITS_STATUS 0x130300 +#define IGU_REG_PF_CONFIGURATION 0x130154 +/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping + * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default + * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; + * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode + * - In backward compatible mode; for non default SB; each even line in the + * memory holds the U producer and each odd line hold the C producer. The + * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The + * last 20 producers are for the DSB for each PF. each PF has five segments + * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; + */ +#define IGU_REG_PROD_CONS_MEMORY 0x132000 +/* [R 3] Debug: pxp_arb_fsm */ +#define IGU_REG_PXP_ARB_FSM 0x130068 +/* [RW 6] Write one for each bit will reset the appropriate memory. When the + * memory reset finished the appropriate bit will be clear. Bit 0 - mapping + * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3 + * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; + */ +#define IGU_REG_RESET_MEMORIES 0x130158 +/* [R 4] Debug: sb_ctrl_fsm */ +#define IGU_REG_SB_CTRL_FSM 0x13004c +#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c +#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160 +#define IGU_REG_SB_MASK_LSB 0x130164 +#define IGU_REG_SB_MASK_MSB 0x130168 +/* [RW 16] Number of command that were dropped without causing an interrupt + * due to: read access for WO BAR address; or write access for RO BAR + * address or any access for reserved address or PCI function error is set + * and address is not MSIX; PBA or cleanup + */ +#define IGU_REG_SILENT_DROP 0x13016c +/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 - + * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per + * PF; 68-71 number of ATTN messages per PF + */ +#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800 +#define IGU_REG_TRAILING_EDGE_LATCH 0x130104 +#define IGU_REG_VF_CONFIGURATION 0x130170 +/* [WB_R 32] Each bit represent write done pending bits status for that SB + * (MSI/MSIX message was sent and write done was not received yet). 0 = + * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. + */ +#define IGU_REG_WRITE_DONE_PENDING 0x130480 +#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_GP_INPUTS 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 +#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 +#define MCP_REG_MCPR_NVM_ADDR 0x8640c +#define MCP_REG_MCPR_NVM_CFG4 0x8642c +#define MCP_REG_MCPR_NVM_COMMAND 0x86400 +#define MCP_REG_MCPR_NVM_READ 0x86410 +#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 +#define MCP_REG_MCPR_NVM_WRITE 0x86408 +#define MCP_REG_MCPR_SCRATCH 0xa0000 +#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1 << 1) +#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1 << 0) +/* [R 32] read first 32 bit after inversion of function 0. mapped as + * follows: [0] NIG attention for function0; [1] NIG attention for + * function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; + * [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9] + * GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE + * glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0; + * [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] + * MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB + * Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw + * interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity + * error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw + * interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] + * PBClient Parity error; [31] PBClient Hw interrupt; + */ +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430 +/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0] + * NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + * mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + * [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + * PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + * function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + * Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + * mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + * BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + * Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + * interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + * Parity error; [29] TSEMI Hw interrupt; [30] PBClient Parity error; [31] + * PBClient Hw interrupt; + */ +#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434 +/* [R 32] read second 32 bit after inversion of function 0. mapped as + * follows: [0] PBF Parity error; [1] PBF Hw interrupt; [2] QM Parity error; + * [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; + * [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] + * XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + * DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + * error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + * PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + * [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + * [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + * [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + * [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; + */ +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438 +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c +/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0] + * PBF Parity error; [1] PBF Hw interrupt; [2] QM Parity error; [3] QM Hw + * interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM + * Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw + * interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + * DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + * error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + * PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + * [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + * [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + * [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + * [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; + */ +#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440 +/* [R 32] read third 32 bit after inversion of function 0. mapped as + * follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity + * error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5] + * PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + * interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + * error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + * Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + * pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + * MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + * SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + * timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + * func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + * attn1; + */ +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444 +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448 +/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0] + * CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP + * Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient + * Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity + * error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw + * interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14] + * MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17] + * Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW + * timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3 + * func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1 + * func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW + * timers attn_4 func1; [30] General attn0; [31] General attn1; + */ +#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c +/* [R 32] read fourth 32 bit after inversion of function 0. mapped as + * follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + * General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + * [7] General attn9; [8] General attn10; [9] General attn11; [10] General + * attn12; [11] General attn13; [12] General attn14; [13] General attn15; + * [14] General attn16; [15] General attn17; [16] General attn18; [17] + * General attn19; [18] General attn20; [19] General attn21; [20] Main power + * interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + * Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + * Latched timeout attention; [27] GRC Latched reserved access attention; + * [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + * Latched ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450 +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454 +/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0] + * General attn2; [1] General attn3; [2] General attn4; [3] General attn5; + * [4] General attn6; [5] General attn7; [6] General attn8; [7] General + * attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11] + * General attn13; [12] General attn14; [13] General attn15; [14] General + * attn16; [15] General attn17; [16] General attn18; [17] General attn19; + * [18] General attn20; [19] General attn21; [20] Main power interrupt; [21] + * RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24] + * RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout + * attention; [27] GRC Latched reserved access attention; [28] MCP Latched + * rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched + * ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 +/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as + * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; + */ +#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700 +/* [W 14] write to this register results with the clear of the latched + * signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in + * d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP + * latch; one in d5 clears GRC Latched timeout attention; one in d6 clears + * GRC Latched reserved access attention; one in d7 clears Latched + * rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears + * Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both + * ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears + * pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read + * from this register return zero + */ +#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c +/* [RW 32] first 32b for enabling the output for function 0 output0. mapped + * as follows: [0] NIG attention for function0; [1] NIG attention for + * function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + * 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + * GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + * function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + * Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + * SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + * indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + * [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + * SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + * TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + * TSEMI Hw interrupt; [30] PBClient Parity error; [31] PBClient Hw + * interrupt; + */ +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c +/* [RW 32] first 32b for enabling the output for function 1 output0. mapped + * as follows: [0] NIG attention for function0; [1] NIG attention for + * function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function + * 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + * GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + * function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + * Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + * SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X + * indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + * [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + * SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + * TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + * TSEMI Hw interrupt; [30] PBClient Parity error; [31] PBClient Hw + * interrupt; + */ +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c +/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped + * as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + * General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + * [7] General attn9; [8] General attn10; [9] General attn11; [10] General + * attn12; [11] General attn13; [12] General attn14; [13] General attn15; + * [14] General attn16; [15] General attn17; [16] General attn18; [17] + * General attn19; [18] General attn20; [19] General attn21; [20] Main power + * interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + * Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + * Latched timeout attention; [27] GRC Latched reserved access attention; + * [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + * Latched ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078 +/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped + * as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + * General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + * [7] General attn9; [8] General attn10; [9] General attn11; [10] General + * attn12; [11] General attn13; [12] General attn14; [13] General attn15; + * [14] General attn16; [15] General attn17; [16] General attn18; [17] + * General attn19; [18] General attn20; [19] General attn21; [20] Main power + * interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + * Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + * Latched timeout attention; [27] GRC Latched reserved access attention; + * [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + * Latched ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118 +/* [RW 32] fourth 32b for enabling the output for close the gate nig. Mapped + * as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + * General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + * [7] General attn9; [8] General attn10; [9] General attn11; [10] General + * attn12; [11] General attn13; [12] General attn14; [13] General attn15; + * [14] General attn16; [15] General attn17; [16] General attn18; [17] + * General attn19; [18] General attn20; [19] General attn21; [20] Main power + * interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + * Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + * Latched timeout attention; [27] GRC Latched reserved access attention; + * [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + * Latched ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8 +#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198 +/* [RW 32] fourth 32b for enabling the output for close the gate pxp. Mapped + * as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + * General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + * [7] General attn9; [8] General attn10; [9] General attn11; [10] General + * attn12; [11] General attn13; [12] General attn14; [13] General attn15; + * [14] General attn16; [15] General attn17; [16] General attn18; [17] + * General attn19; [18] General attn20; [19] General attn21; [20] Main power + * interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + * Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + * Latched timeout attention; [27] GRC Latched reserved access attention; + * [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + * Latched ump_tx_parity; [31] MCP Latched scpad_parity; + */ +#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 +#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; + */ +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; + */ +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 +/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu + * 128 bit vector + */ +#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 +#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004 +#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 +#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c +#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030 +#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 +#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c +#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 +#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 +#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 +#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c +#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 +#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 +#define MISC_REG_AEU_GENERAL_MASK 0xa61c +/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0; + * [9:8] = reserved. 0 = mask; 1 = unmask + */ +#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060 +#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064 +/* [RW 1] If set a system kill occurred. Reset on POR reset. */ +#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610 +/* [RW 32] Represent the status of the input vector to the AEU when a system + * kill occurred. The register is reset in por reset. Mapped as follows: [0] + * NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + * mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + * [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + * PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + * function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + * Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + * mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + * BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + * Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + * interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + * Parity error; [29] TSEMI Hw interrupt; [30] PBClient Parity error; [31] + * PBClient Hw interrupt. Reset on POR reset. + */ +#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600 +#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604 +#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608 +#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c +/* [R 32] This field indicates the type of the device. '0' - 2 Ports; '1' - + * 1 Port. Global register. + */ +#define MISC_REG_BOND_ID 0xa400 +/* [R 16] These bits indicate the part number for the chip. Global register. */ +#define MISC_REG_CHIP_NUM 0xa408 +/* [R 4] These bits indicate the base revision of the chip. This value + * starts at 0x0 for the A0 tape-out and increments by one for each + * all-layer tape-out. Global register. + */ +#define MISC_REG_CHIP_REV 0xa40c +/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11- + * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72]; + * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. + */ +#define MISC_REG_CHIP_TYPE 0xac60 +#define MISC_REG_CHIP_TYPE_57811_MASK (1 << 1) +#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858 +/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled + * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk + * 25MHz. Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c +/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI + * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0 +/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that + * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM + * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that + * the FW command that all Queues are empty is disabled. When 0 indicates + * that the FW command that all Queues are empty is enabled. [2] - FW Early + * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early + * Exit command is disabled. When 0 indicates that the FW Early Exit command + * is enabled. This bit applicable only in the EXIT Events Mask registers. + * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication + * is disabled. When 0 indicates that the PBF Request indication is enabled. + * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF + * Request indication is disabled. When 0 indicates that the Tx Other Than + * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1 + * indicates that the RX EEE LPI Status indication is disabled. When 0 + * indicates that the RX EEE LPI Status indication is enabled. In the EXIT + * Events Masks registers; this bit masks the falling edge detect of the LPI + * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that + * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause + * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the + * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY + * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM + * IDLE indication is disabled. When 0 indicates that the QM IDLE indication + * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When + * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0 + * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1 + * Status Mask. When 1 indicates that the L1 Status indication from the PCIE + * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication + * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this + * bit masks the falling edge detect of the L1 status (L1 is on - off). [11] + * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI + * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1 + * indicates that the P0 EEE LPI REQ indication is disabled. When =0 + * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE + * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is + * disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1 + * REQ indication is disabled. When =0 indicates that the L1 indication is + * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates + * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx + * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status + * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This + * bit is applicable only in the EXIT Events Masks registers. [17] - L1 + * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling + * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off). + * When =0 indicates that the L1 Status Falling Edge Detect indication from + * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in + * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880 +/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates + * that the Vmain SM end state is disabled. When 0 indicates that the Vmain + * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates + * that the FW command that all Queues are empty is disabled. When 0 + * indicates that the FW command that all Queues are empty is enabled. [2] - + * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW + * Early Exit command is disabled. When 0 indicates that the FW Early Exit + * command is enabled. This bit applicable only in the EXIT Events Mask + * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request + * indication is disabled. When 0 indicates that the PBF Request indication + * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other + * Than PBF Request indication is disabled. When 0 indicates that the Tx + * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status + * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled. + * When 0 indicates that the RX LPI Status indication is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1 + * indicates that the Tx Pause indication is disabled. When 0 indicates that + * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1 + * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates + * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1 + * indicates that the QM IDLE indication is disabled. When 0 indicates that + * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9] + * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for + * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for + * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1 + * Status indication from the PCIE CORE is disabled. When 0 indicates that + * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When + * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When + * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1 + * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication + * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates + * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that + * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1 + * indicates that the L1 REQ indication is disabled. When =0 indicates that + * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask. + * When =1 indicates that the RX EEE LPI Status Falling Edge Detect + * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that + * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE + * LPI is on - off). This bit is applicable only in the EXIT Events Masks + * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the + * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled + * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge + * Detect indication from the PCIE CORE is enabled (L1 is on - off). This + * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz. + * Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. + */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 0xa8bc +/* [RW 32] The following driver registers(1...16) represent 16 drivers and + * 32 clients. Each client can be controlled by one driver only. One in each + * bit represent that this driver control the appropriate client (Ex: bit 5 + * is set means this driver control client number 5). addr1 = set; addr0 = + * clear; read from both addresses will give the same result = status. write + * to address 1 will set a request to control all the clients that their + * appropriate bit (in the write command) is set. if the client is free (the + * appropriate bit in all the other drivers is clear) one will be written to + * that driver register; if the client isn't free the bit will remain zero. + * if the appropriate bit is set (the driver request to gain control on a + * client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW + * interrupt will be asserted). write to address 0 will set a request to + * free all the clients that their appropriate bit (in the write command) is + * set. if the appropriate bit is clear (the driver request to free a client + * it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will + * be asserted). + */ +#define MISC_REG_DRIVER_CONTROL_1 0xa510 +#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 +/* [R 1] Status of four port mode path swap input pin. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c +/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - + * the path_swap output is equal to 4 port mode path swap input pin; if it + * is 1 - the path_swap output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the path_swap output. Reset on Hard reset. + */ +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 +/* [R 1] Status of 4 port mode port swap input pin. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 +/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - + * the port_swap output is equal to 4 port mode port swap input pin; if it + * is 1 - the port_swap output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the port_swap output. Reset on Hard reset. + */ +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 +/* [RW 32] Debug only: spare RW register reset by core reset. Global + * register. Reset on core reset. + */ +#define MISC_REG_GENERIC_CR_0 0xa460 +#define MISC_REG_GENERIC_CR_1 0xa464 +/* [RW 32] Debug only: spare RW register reset by por reset. Global + * register. Reset on POR reset. + */ +#define MISC_REG_GENERIC_POR_1 0xa474 +/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to + * use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO + * can not be configured as an output. Each output has its output enable in + * the MCP register space; but this bit needs to be set to make use of that. + * Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When + * set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. + * When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change + * the i/o to an output and will drive the TimeSync output. Bit[31:7]: + * spare. Global register. Reset by hard reset. + */ +#define MISC_REG_GEN_PURP_HWG 0xa9a0 +/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of + * these bits is written as a '1'; the corresponding GPIO bit will turn off + * it's drivers and become an input. This is the reset state of all GPIO + * pins. The read value of these bits will be a '1' if that last command + * (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). + * [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written + * as a '1'; the corresponding GPIO bit will drive low. The read value of + * these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for + * this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; + * SET When any of these bits is written as a '1'; the corresponding GPIO + * bit will drive high (if it has that capability). The read value of these + * bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this + * bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; + * RO; These bits indicate the read value of each of the eight GPIO pins. + * This is the result value of the pin; not the drive value. Writing these + * bits will have not effect. Global register. + */ +#define MISC_REG_GPIO 0xa490 +/* [RW 8] These bits enable the GPIO_INTs to signals event to the + * IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2] + * p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2; + * [7] p1_gpio_3; Global register. + */ +#define MISC_REG_GPIO_EVENT_EN 0xa2bc +/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a + * '1' to these bit clears the corresponding bit in the #OLD_VALUE register. + * This will acknowledge an interrupt on the falling edge of corresponding + * GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0; + * Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE + * register. This will acknowledge an interrupt on the rising edge of + * corresponding GPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1; + * OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input + * value. When the ~INT_STATE bit is set; this bit indicates the OLD value + * of the pin such that if ~INT_STATE is set and this bit is '0'; then the + * interrupt is due to a low to high edge. If ~INT_STATE is set and this bit + * is '1'; then the interrupt is due to a high to low edge (reset value 0). + * [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the + * current GPIO interrupt state for each GPIO pin. This bit is cleared when + * the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is + * set when the GPIO input does not match the current value in #OLD_VALUE + * (reset value 0). Global register. + */ +#define MISC_REG_GPIO_INT 0xa494 +/* [R 28] this field hold the last information that caused reserved + * attention. bits [19:0] - address; [22:20] function; [23] reserved; + * [27:24] the master that caused the attention - according to the following + * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + * dbu; 8 = dmae + */ +#define MISC_REG_GRC_RSV_ATTN 0xa3c0 +/* [R 28] this field hold the last information that caused timeout + * attention. bits [19:0] - address; [22:20] function; [23] reserved; + * [27:24] the master that caused the attention - according to the following + * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + * dbu; 8 = dmae + */ +#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 +/* [R 10] Holds the last FID that caused timeout attention. Need to be used + * in conjunction with ~misc_registers_timeout_attn; where 3 bits of + * function (3 lsb) are also represented. Bit[2:0] - PFID; bit[3] - VFID + * valid; bit[9:4] - VFID. Global register. + */ +#define MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID 0xa714 +/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR + * reset. + */ +#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74 +/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */ +#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78 +/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR + * reset. + */ +#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c +/* [RW 8] Interrupt mask register #0 read/write */ +#define MISC_REG_MISC_INT_MASK 0xa388 +/* [R 8] Interrupt register #0 read */ +#define MISC_REG_MISC_INT_STS 0xa37c +/* [RC 8] Interrupt register #0 read clear */ +#define MISC_REG_MISC_INT_STS_CLR 0xa380 +/* [RW 1] Parity mask register #0 read/write */ +#define MISC_REG_MISC_PRTY_MASK 0xa398 +/* [R 1] Parity register #0 read */ +#define MISC_REG_MISC_PRTY_STS 0xa38c +/* [RC 1] Parity register #0 read clear */ +#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 +/* [R 1] If set indicate that the pcie_rst_b was asserted without perst + * assertion. Global register. + */ +#define MISC_REG_PCIE_HOT_RESET 0xa618 +/* [R 1] Status of 4 port mode enable input pin. */ +#define MISC_REG_PORT4MODE_EN 0xa750 +/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 - + * the port4mode_en output is equal to 4 port mode input pin; if it is 1 - + * the port4mode_en output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the port4mode_en output. Reset on Hard reset. + */ +#define MISC_REG_PORT4MODE_EN_OVWR 0xa720 +/* [RW 32] reset reg#1; rite/read one = the specific block is out of reset; + * write/read zero = the specific block is in reset; addr 0-wr- the write + * value will be written to the register; addr 1-set - one will be written + * to all the bits that have the value of one in the data written (bits that + * have the value of zero will not be change) ; addr 2-clear - zero will be + * written to all the bits that have the value of one in the data written + * (bits that have the value of zero will not be change); addr 3-ignore; + * read ignore from all addr except addr 00; inside order of the bits is: + * [0] rst_brb1; [1] rst_prs; [2] rst_src; [3] rst_tsdm; [4] rst_tsem; [5] + * rst_tcm; [6] rst_rbcr; [7] rst_nig; [8] rst_usdm; [9] rst_ucm; [10] + * rst_usem; [11] rst_upb; [12] rst_ccm; [13] rst_csem; [14] rst_csdm; [15] + * rst_rbcu; [16] rst_pbf; [17] rst_qm; [18] rst_tm; [19] rst_dorq; [20] + * rst_xcm; [21] rst_xsdm; [22] rst_xsem; [23] rst_rbct; [24] rst_cdu; [25] + * rst_cfc; [26] rst_pxp_hst; [27] rst_pxpv (global register); [28] + * rst_rbcp; [29] rst_hc; [30] rst_dmae; [31] rst_semi_rtc; + */ +#define MISC_REG_RESET_REG_1 0xa580 +#define MISC_REG_RESET_REG_2 0xa590 +/* [RW 22] 22 bit GRC address where the scratch-pad of the MCP that is + * shared with the driver resides + */ +#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 +/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; + * the corresponding SPIO bit will turn off it's drivers and become an + * input. This is the reset state of all SPIO pins. The read value of these + * bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this + * bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits + * is written as a '1'; the corresponding SPIO bit will drive low. The read + * value of these bits will be a '1' if that last command (#SET; #CLR; or + * #FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of + * these bits is written as a '1'; the corresponding SPIO bit will drive + * high (if it has that capability). The read value of these bits will be a + * '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. + * (reset value 0). [7-0] VALUE RO; These bits indicate the read value of + * each of the eight SPIO pins. This is the result value of the pin; not the + * drive value. Writing these bits will have not effect. Each 8 bits field + * is divided as follows: [0] VAUX Enable; when pulsed low; enables supply + * from VAUX. (This is an output pin only; the FLOAT field is not applicable + * for this pin); [1] VAUX Disable; when pulsed low; disables supply form + * VAUX. (This is an output pin only; FLOAT field is not applicable for this + * pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to + * select VAUX supply. (This is an output pin only; it is not controlled by + * the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT + * field is not applicable for this pin; only the VALUE fields is relevant - + * it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6] + * Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP + * device ID select; read by UMP firmware. Global register. + */ +#define MISC_REG_SPIO 0xa4fc +/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. + * according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; + * [7:6] reserved. Global register. + */ +#define MISC_REG_SPIO_EVENT_EN 0xa2b8 +/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the + * corresponding bit in the #OLD_VALUE register. This will acknowledge an + * interrupt on the falling edge of corresponding SPIO input (reset value + * 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit + * in the #OLD_VALUE register. This will acknowledge an interrupt on the + * rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE + * RO; These bits indicate the old value of the SPIO input value. When the + * ~INT_STATE bit is set; this bit indicates the OLD value of the pin such + * that if ~INT_STATE is set and this bit is '0'; then the interrupt is due + * to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the + * interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE + * RO; These bits indicate the current SPIO interrupt state for each SPIO + * pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR + * command bit is written. This bit is set when the SPIO input does not + * match the current value in #OLD_VALUE (reset value 0). Global register. + */ +#define MISC_REG_SPIO_INT 0xa500 +/* [R 1] Status of two port mode path swap input pin. */ +#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 +/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the + * path_swap output is equal to 2 port mode path swap input pin; if it is 1 + * - the path_swap output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the path_swap output. Reset on Hard reset. + */ +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c +/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are + * loaded; 0-prepare; -unprepare. Global register. Reset on hard reset. + */ +#define MISC_REG_UNPREPARED 0xa424 +/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or + * not it is the recipient of the message on the MDIO interface. The value + * is compared to the value on ctrl_md_devad. Drives output + * misc_xgxs0_phy_addr. Global register. + */ +#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +/* [RW 10] reset reg#3; rite/read one = the specific block is out of reset; + * write/read zero = the specific block is in reset; addr 0-wr- the write + * value will be written to the register; addr 1-set - one will be written + * to all the bits that have the value of one in the data written (bits that + * have the value of zero will not be change) ; addr 2-clear - zero will be + * written to all the bits that have the value of one in the data written + * (bits that have the value of zero will not be change); addr 3-ignore; + * read ignore from all addr except addr 00. [0]: rstb_hw: Active low reset + * which when asserted drives entire WC into the reset state. All flops + * which within WC are driven into an initial state; as well as the analog + * core. Output clocks txck_out; rxck0_10g; and clk_25 will be driven to 0 + * upon its assertion. [1]: iddq. Enables iddq testing where the supply + * current (Idd) is measured in the quiescent state. [2]: pwrdwn: Active + * high control which forces the analog core of the WC into power-down mode; + * and forces digital logic of the WC into reset. Output clock (refclk) + * remains active. [3]: pwrdwn_sd: Power down signal detect. [4]: + * txd10g_fifo_rstb: Transmit 10Gbps FIFO reset; active low. Used to reset + * the transmit FIFO used in xlgmii operation. [8:5]: txd1g_fifo_rstb: + * Transmit 1Gbps FIFO reset; active low. Used to reset the per-lane + * transmit FIFOs used in the mii/gmii operation. [9]: + * txd10g_fifo_rstb_dxgxs1: Transmit 10Gbps DXGXS FIFO reset; active low. + * Used to reset the transmit FIFO used in the DXGXS logic in xlgmii + * operation. Global register. + */ +#define MISC_REG_WC0_RESET 0xac30 +/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system + * side. This should be less than or equal to phy_port_mode; if some of the + * ports are not used. This enables reduction of frequency on the core side. + * This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - + * Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap + * input for the XMAC_MP core; and should be changed only while reset is + * held low. Reset on Hard reset. + */ +#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 +/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp + * Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; + * 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the + * XMAC_MP core; and should be changed only while reset is held low. Reset + * on Hard reset. + */ +#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 +/* [RW 1] Interrupt mask register #0 read/write */ +#define MSTAT_REG_MSTAT_INT_MASK 0x7fc +/* [R 1] Interrupt register #0 read */ +#define MSTAT_REG_MSTAT_INT_STS 0x7f0 +/* [RC 1] Interrupt register #0 read clear */ +#define MSTAT_REG_MSTAT_INT_STS_CLR 0x7f4 +/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. + * Reads from this register will clear bits 31:0. + */ +#define MSTAT_REG_RX_STAT_GR64_LO 0x200 +/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits + * 31:0. Reads from this register will clear bits 31:0. + */ +#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1 << 0) +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1 << 0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1 << 0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1 << 9) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1 << 15) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf << 18) +/* [R 1] Input enable for RX_BMAC0 IF */ +#define NIG_REG_BMAC0_IN_EN 0x100ac +/* [R 1] output enable for TX_BMAC0 IF */ +#define NIG_REG_BMAC0_OUT_EN 0x100e0 +/* [R 1] output enable for TX BMAC pause port 0 IF */ +#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110 +/* [R 1] output enable for RX_BMAC0_REGS IF */ +#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8 +/* [RW 1] output enable for RX BRB1 port0 IF */ +#define NIG_REG_BRB0_OUT_EN 0x100f8 +/* [RW 1] Input enable for TX BRB1 pause port 0 IF */ +#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4 +/* [RW 1] Input enable for TX BRB1 pause port 1 IF */ +#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8 +/* [WB_W 90] Debug packet to LP from RBC; Data spelling:[63:0] data; 64] + * error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush; + * 72:73]-vnic_num; 89:74]-sideband_info + */ +#define NIG_REG_DEBUG_PACKET_LB 0x10800 +/* [R 1] FIFO empty in DEBUG_FIFO in NIG_TX_DBG */ +#define NIG_REG_EGRESS_DEBUG_FIFO_EMPTY 0x10418 +/* [R 1] FIFO empty in DELAY_PBF_FIFO in NIG_RX_PORT0 */ +#define NIG_REG_EGRESS_DELAY0_EMPTY 0x10420 +/* [R 1] FIFO empty in DELAY_PBF_FIFO in NIG_RX_PORT1 */ +#define NIG_REG_EGRESS_DELAY1_EMPTY 0x10428 +/* [R 1] PBF FIFO empty flag. */ +#define NIG_REG_EGRESS_DELAY2_EMPTY 0x1862c +/* [R 1] PBF FIFO empty flag. */ +#define NIG_REG_EGRESS_DELAY3_EMPTY 0x18630 +/* [R 1] PBF FIFO empty flag. */ +#define NIG_REG_EGRESS_DELAY4_EMPTY 0x18634 +/* [R 1] PBF FIFO empty flag. */ +#define NIG_REG_EGRESS_DELAY5_EMPTY 0x18638 +/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all + * packets from PBFare not forwarded to the MAC and just deleted from FIFO. + * First packet may be deleted from the middle. And last packet will be + * always deleted till the end. + */ +#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060 +/* [R 1] Output enable to EMAC0 */ +#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120 +/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs + * to emac for port0; other way to bmac for port0 + */ +#define NIG_REG_EGRESS_EMAC0_PORT 0x10058 +/* [R 1] FIFO empty in MNG_FIFO in NIG_TX_PORT0 */ +#define NIG_REG_EGRESS_MNG0_FIFO_EMPTY 0x10460 +/* [R 1] FIFO empty in MNG_FIFO in NIG_TX_PORT1 */ +#define NIG_REG_EGRESS_MNG1_FIFO_EMPTY 0x10474 +/* [RW 1] Input enable for TX UMP management packet port0 IF */ +#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4 +/* [R 1] Input enable for RX_EMAC0 IF */ +#define NIG_REG_EMAC0_IN_EN 0x100a4 +/* [R 1] output enable for TX EMAC pause port 0 IF */ +#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118 +/* [R 1] status from emac0. This bit is set when MDINT from either the + * EXT_MDINT pin or from the Copper PHY is driven low. This condition must + * be cleared in the attached PHY device that is driving the MINT pin. + */ +#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494 +/* [R 48] This address space contains BMAC0 registers. The BMAC registers + * are described in appendix A. In order to access the BMAC0 registers; the + * base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be + * added to each BMAC register offset + */ +#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00 +/* [R 48] This address space contains BMAC1 registers. The BMAC registers + * are described in appendix A. In order to access the BMAC0 registers; the + * base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be + * added to each BMAC register offset + */ +#define NIG_REG_INGRESS_BMAC1_MEM 0x11000 +/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0 +/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data + * packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] + */ +#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4 +/* [R 1] FIFO empty in EOP descriptor FIFO of port 0 in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_PORT0_EMPTY 0x104ec +/* [R 1] FIFO empty in EOP descriptor FIFO of port 1 in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_PORT1_EMPTY 0x104f8 +/* [R 1] FIFO empty in PBF_DELAY_lb_FIFO in NIG_RX_lb */ +#define NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY 0x10508 +/* [R 1] FIFO empty in dscr_fifo in NIG_RX_RMP block */ +#define NIG_REG_INGRESS_RMP0_DSCR_EMPTY 0x10530 +/* [R 1] FIFO empty in dscr_fifo in NIG_RX_RMP block */ +#define NIG_REG_INGRESS_RMP1_DSCR_EMPTY 0x10538 +/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch + * logic for interrupts must be used. Enable per bit of interrupt of + * ~latch_status.latch_status + */ +#define NIG_REG_LATCH_BC_0 0x16210 +/* [RW 27] Latch for each interrupt from Unicore.b[0] + * status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete; + * b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status; + * b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn; + * b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete; + * b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status; + * b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete; + * b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet; + * b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g; + * b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact; + * b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx; + * b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx; + * b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs + */ +#define NIG_REG_LATCH_STATUS_0 0x18000 +/* [RW 1] led 10g for port 0 */ +#define NIG_REG_LED_10G_P0 0x10320 +/* [RW 1] Port0: This bit is set to enable the use of the + * ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field + * defined below. If this bit is cleared; then the blink rate will be about + * 8Hz. + */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318 +/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for + * Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field + * is reset to 0x080; giving a default blink period of approximately 8Hz. + */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 +/* [RW 1] Port0: If set along with the + * s_led_control_override_traffic_p0.led_control_override_traffic_p0 + * bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED + * bit; the Traffic LED will blink with the blink rate specified in + * ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + * ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + * fields. + */ +#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308 +/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The + * Traffic LED will then be controlled via bit ~nig_registers_ + * led_control_traffic_p0.led_control_traffic_p0 and bit + * ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 + */ +#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8 +/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit; + * turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also + * set; the LED will blink with blink rate specified in + * ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + * ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + * fields. + */ +#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300 +/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3; + * 9-11PHY7; 12 MAC4; 13-15 PHY10; + */ +#define NIG_REG_LED_MODE_P0 0x102f0 +/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- + * tsdm enable; b2- usdm enable + */ +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074 +/* [RW 1] SAFC enable for port0. This register may get 1 only when + * ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same + * port + */ +#define NIG_REG_LLFC_ENABLE_0 0x16208 +#define NIG_REG_LLFC_ENABLE_1 0x1620c +/* [RW 16] classes are high-priority for port0 */ +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c +/* [RW 16] classes are low-priority for port0 */ +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064 +/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ +#define NIG_REG_LLFC_OUT_EN_0 0x160c8 +#define NIG_REG_LLFC_OUT_EN_1 0x160cc +#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c +#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 +#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 +#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c +/* [RW 2] Determine the classification participants. 0: no classification.1: + * classification upon VLAN id. 2: classification upon MAC address. 3: + * classification upon both VLAN id & MAC addr. + */ +#define NIG_REG_LLH0_CLS_TYPE 0x16080 +#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc +#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0 +/* [RW 16] destination TCP address 1. The LLH will look for this address in + * all incoming packets. + */ +#define NIG_REG_LLH0_DEST_TCP_0 0x10220 +/* [RW 16] destination UDP address 1 The LLH will look for this address in + * all incoming packets. + */ +#define NIG_REG_LLH0_DEST_UDP_0 0x10214 +/* [R 1] FIFO empty in LLH port0 */ +#define NIG_REG_LLH0_FIFO_EMPTY 0x10548 +#define NIG_REG_LLH0_FUNC_EN 0x160fc +#define NIG_REG_LLH0_FUNC_MEM 0x16180 +#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140 +#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 +/* [RW 1] Determine the IP version to look for in + * ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 + */ +#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208 +/* [RW 1] t bit for llh0 */ +#define NIG_REG_LLH0_T_BIT 0x10074 +/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */ +#define NIG_REG_LLH0_VLAN_ID_0 0x1022c +#define NIG_REG_LLH0_XCM_MASK 0x10130 +#define NIG_REG_LLH1_BRB1_DRV_MASK_MF 0x1604c +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc +/* [RW 2] Determine the classification participants. 0: no classification.1: + * classification upon VLAN id. 2: classification upon MAC address. 3: + * classification upon both VLAN id & MAC addr. + */ +#define NIG_REG_LLH1_CLS_TYPE 0x16084 +/* [R 1] FIFO empty in LLH port1 */ +#define NIG_REG_LLH1_FIFO_EMPTY 0x10558 +#define NIG_REG_LLH1_FUNC_EN 0x16104 +#define NIG_REG_LLH1_FUNC_MEM 0x161c0 +#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 +#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit controls port 1 + * only. The legacy llh_multi_function_mode bit controls port 0. + */ +#define NIG_REG_LLH1_MF_MODE 0x18614 +#define NIG_REG_LLH1_XCM_MASK 0x10134 +/* [RW 1] When this bit is set; the LLH will expect all packets to be with + * outer VLAN. This is not applicable to E2. + */ +#define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 16] Outer VLAN type identifier for multi-function mode. In non + * multi-function mode; it will hold the inner VLAN type. Typically 0x8100. + */ +#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit is applicable to + * both ports 0 and 1 for E2. This bit only controls port 0 in E3. + */ +#define NIG_REG_LLH_MF_MODE 0x16024 +#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330 +#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334 +/* [R 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */ +#define NIG_REG_NIG_EMAC0_EN 0x1003c +/* [R 1] Output signal from NIG to TX_EMAC0. When set indicates to the EMAC0 + * to strip the CRC from the ingress packets. + */ +#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 +/* [RW 32] Interrupt mask register #0 read/write */ +#define NIG_REG_NIG_INT_MASK_0 0x103bc +#define NIG_REG_NIG_INT_MASK_1 0x103cc +/* [R 32] Interrupt register #0 read */ +#define NIG_REG_NIG_INT_STS_0 0x103b0 +#define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [RC 32] Interrupt register #0 read clear */ +#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4 +#define NIG_REG_NIG_INT_STS_CLR_1 0x103c4 +/* [R 32] Legacy E1 and E1H location for parity error mask register. */ +#define NIG_REG_NIG_PRTY_MASK 0x103dc +/* [RW 32] Parity mask register #0 read/write */ +#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 +#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 +/* [R 32] Legacy E1 and E1H location for parity error status register. */ +#define NIG_REG_NIG_PRTY_STS 0x103d0 +/* [R 32] Parity register #0 read */ +#define NIG_REG_NIG_PRTY_STS_0 0x183bc +#define NIG_REG_NIG_PRTY_STS_1 0x183cc +/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ +#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 +/* [RC 32] Parity register #0 read clear */ +#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 +#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 +/* [R 1] Indication that HBUF descriptor FIFO is empty. */ +#define NIG_REG_P0_HBUF_DSCR_EMPTY 0x18318 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. + */ +#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 +/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in + * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be + * disabled when this bit is set. + */ +#define NIG_REG_P0_HWPFC_ENABLE 0x18078 +#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4 +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P0_MAC_IN_EN 0x185ac +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P0_MAC_OUT_EN 0x185b0 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priority is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. + */ +#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P0_PTP_EN 0x18788 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A + * priority is mapped to COS 3 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A + * priority is mapped to COS 4 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A + * priority is mapped to COS 5 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P0_RX_MACFIFO_EMPTY 0x18570 +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P0_TLLH_FIFO_EMPTY 0x18308 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4 +/* [R 15] Specify which of the credit registers the client is to be mapped + * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For + * clients that are not subject to WFQ credit blocking - their + * specifications here are not used. + */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. + */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. + */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. + */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. + */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. + */ +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. + */ +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c +/* [RW 12] Specify the number of strict priority arbitration slots between + * two round-robin arbitration slots to avoid starvation. A value of 0 means + * no strict priority cycles - the strict priority with anti-starvation + * arbiter becomes a round-robin arbiter. + */ +#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4 +/* [R 15] Specify the client number to be assigned to each priority of the + * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0] + * are for priority 0 client; bits [14:12] are for priority 4 client. The + * clients are assigned the following IDs: 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000) + * for management at priority 0; debug traffic at priorities 1 and 2; COS0 + * traffic at priority 3; and COS1 traffic at priority 4. + */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. + */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. + */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P0_TX_MACFIFO_EMPTY 0x18578 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter + * for BRB LB interface is bypassed and PBF LB traffic is always selected to + * send to BRB LB. + */ +#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + * forwarded to the host. + */ +#define NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY 0x182a8 +/* [R 1] Indication that HBUF descriptor FIFO is empty. */ +#define NIG_REG_P1_HBUF_DSCR_EMPTY 0x18348 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. + */ +#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c +/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in + * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be + * disabled when this bit is set. + */ +#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 +#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4 +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P1_MAC_IN_EN 0x185c0 +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P1_MAC_OUT_EN 0x185c4 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priority is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. + */ +#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P1_PTP_EN 0x187b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. + */ +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. + */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. + */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. + */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. + */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. + */ +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. + */ +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 +/* [RW 12] Specify the number of strict priority arbitration slots between + * two round-robin arbitration slots to avoid starvation. A value of 0 means + * no strict priority cycles - the strict priority with anti-starvation + * arbiter becomes a round-robin arbiter. + */ +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. Note that this register + * is the same as the one for port 0, except that port 1 only has COS 0-2 + * traffic. There is no traffic for COS 3-5 of port 1. + */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. Note that this register + * is the same as the one for port 0, except that port 1 only has COS 0-2 + * traffic. There is no traffic for COS 3-5 of port 1. + */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + */ +#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + * forwarded to the host. + */ +#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 +/* [RW 1] Pause enable for port0. This register may get 1 only when + * ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same + * port + */ +#define NIG_REG_PAUSE_ENABLE_0 0x160c0 +#define NIG_REG_PAUSE_ENABLE_1 0x160c4 +/* [RW 1] Value of this register will be transmitted to port swap when + * ~nig_registers_strap_override.strap_override =1 + */ +#define NIG_REG_PORT_SWAP 0x10394 +/* [RW 1] PPP enable for port0. This register may get 1 only when + * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the + * same port + */ +#define NIG_REG_PPP_ENABLE_0 0x160b0 +#define NIG_REG_PPP_ENABLE_1 0x160b4 +/* [RW 1] Input enable for RX parser request IF */ +#define NIG_REG_PRS_REQ_IN_EN 0x100b8 +/* [R 5] control to serdes - CL45 DEVAD */ +#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370 +/* [R 1] control to serdes; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c +/* [R 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374 +/* [R 1] status from serdes0 that inputs to interrupt logic of link status */ +#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + * for port 0 COS0 + */ +#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 +/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure + * for port 0 COS0 + */ +#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + * between 1024 and 1522 bytes for port0 + */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + * between 1523 bytes and above for port0 + */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + * for port 1 COS0 + */ +#define NIG_REG_STAT1_BRB_DISCARD 0x10628 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + * between 1024 and 1522 bytes for port1 + */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + * between 1523 bytes and above for port1 + */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 +/* [WB_R 64] Rx statistics : User octets received for LP */ +#define NIG_REG_STAT2_BRB_OCTET 0x107e0 +#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 +/* [RW 1] port swap mux selection. If this register equal to 0 then port + * swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then + * ort swap is equal to ~nig_registers_port_swap.port_swap + */ +#define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [WB 64] Addresses for TimeSync related registers in the timesync + * generator sub-module. + */ +#define NIG_REG_TIMESYNC_GEN_REG 0x18800 +/* [RW 1] output enable for RX_XCM0 IF */ +#define NIG_REG_XCM0_OUT_EN 0x100f0 +/* [RW 1] output enable for RX_XCM1 IF */ +#define NIG_REG_XCM1_OUT_EN 0x100f4 +/* [R 1] control to xgxs - remote PHY in-band MDIO */ +#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348 +/* [R 5] control to xgxs - CL45 DEVAD */ +#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c +/* [R 1] control to xgxs; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338 +/* [R 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340 +/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */ +#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680 +/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */ +#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684 +/* [R 2] selection for XGXS lane of port 0 in NIG_MUX block */ +#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8 +/* [R 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */ +#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0 +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1 << 0) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1 << 9) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1 << 15) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf << 18) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 +/* [RW 1] Interrupt mask register #0 read/write */ +#define NIG_TSGEN_REG_NIG_TSGEN_INT_MASK 0xcc +/* [R 1] Interrupt register #0 read */ +#define NIG_TSGEN_REG_NIG_TSGEN_INT_STS 0xc0 +/* [RC 1] Interrupt register #0 read clear */ +#define NIG_TSGEN_REG_NIG_TSGEN_INT_STS_CLR 0xc4 +/* [R 31] Removed for E3 B0 -The upper bound of the weight of COS0 in the + * ETS command arbiter. + */ +#define PBF_REG_COS0_UPPER_BOUND 0x15c05c +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 0. + */ +#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 1. + */ +#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 +/* [R 31] Removed for E3 B0 - The weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT 0x15c054 +/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 +/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 +/* [R 31] Removed for E3 B0 -The upper bound of the weight of COS1 in the + * ETS command arbiter. + */ +#define PBF_REG_COS1_UPPER_BOUND 0x15c060 +/* [R 31] Removed for E3 B0 - The weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT 0x15c058 +/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac +/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 +/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 +/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 +/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ +#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 +/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ +#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 +/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ +#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc +/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_LB_Q 0x140338 +/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q0 0x14033c +/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q1 0x140340 +/* [R 11] Current credit for queue 2 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q2 0x140344 +/* [R 11] Current credit for queue 3 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q3 0x140348 +/* [R 11] Current credit for queue 4 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q4 0x14034c +/* [R 11] Current credit for queue 5 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_CREDIT_Q5 0x140350 +/* [R 1] Removed for E3 B0 - Disable processing further tasks from port 0 + * (after ending the current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c +/* [R 1] Removed for E3 B0 - Disable processing further tasks from port 1 + * (after ending the current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q0 0x15c1bc +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q1 0x15c1c0 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q2 0x15c1c4 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q3 0x15c1c8 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q4 0x15c1cc +/* [RW 1] Disable processing further tasks from port 0 (after ending the + * current task in process). + */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q5 0x15c1d0 +#define PBF_REG_DISABLE_PF 0x1402e8 +#define PBF_REG_DISABLE_VF 0x1402ec +/* [RW 18] For port 0: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. + */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 +/* [RW 9] For port 1: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. + */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c +/* [RW 6] For port 0: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. + */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 +/* [RW 3] For port 1: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. + */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c +/* [RW 6] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). + */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 +/* [RW 3] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). + */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 +/* [RW 16] For port 0: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. + */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 +/* [RW 16] For port 1: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. + */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 +/* [RW 18] For port 0: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). + */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 +/* [RW 9] For port 1: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). + */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 +/* [R 1] Removed for E3 B0 - Indicates that ETS is performed between the + * COSes in the command arbiter. If reset strict priority w/ anti-starvation + * will be performed w/o WFQ. + */ +#define PBF_REG_ETS_ENABLED 0x15c050 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. + */ +#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 +/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest + * priority in the command arbiter. + */ +#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c +/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_LB_Q 0x15c248 +/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q0 0x15c230 +/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q1 0x15c234 +/* [RW 11] Initial credit for queue 2 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q2 0x15c238 +/* [RW 11] Initial credit for queue 3 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q3 0x15c23c +/* [RW 11] Initial credit for queue 4 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q4 0x15c240 +/* [RW 11] Initial credit for queue 5 in the tx port buffers in 16 byte + * lines. + */ +#define PBF_REG_INIT_CRD_Q5 0x15c244 +/* [R 1] Removed for E3 B0 - Init bit for port 0. When set the initial + * credit of port 0 is copied to the credit register. Should be set and then + * reset after the configuration of the port has ended. + */ +#define PBF_REG_INIT_P0 0x140004 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * the LB queue. Reset upon init. + */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 0. Reset upon init. + */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 1. Reset upon init. + */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c +/* [RW 1] Enable for mac interface 0. */ +#define PBF_REG_MAC_IF0_ENABLE 0x140030 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 +/* [R 16] Removed for E3 B0 - The number of strict priority arbitration + * slots between 2 RR arbitration slots. A value of 0 means no strict + * priority cycles; i.e. the strict-priority w/ anti-starvation arbiter is a + * RR arbiter. + */ +#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064 +/* [R 11] Removed for E3 B0 - Port 0 threshold used by arbiter in 16 byte + * lines used when pause not suppoterd. + */ +#define PBF_REG_P0_ARB_THRSH 0x1400e4 +/* [R 11] Removed for E3 B0 - Current credit for port 0 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P0_CREDIT 0x140200 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P0_INIT_CRD 0x1400d0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 0. Reset upon init. + */ +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 +/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ +#define PBF_REG_P0_PAUSE_ENABLE 0x140014 +/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ +#define PBF_REG_P0_TASK_CNT 0x140204 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 0. Reset upon init. + */ +#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ +#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc +/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P1_CREDIT 0x140208 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P1_INIT_CRD 0x1400d4 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 1. Reset upon init. + */ +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c +/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ +#define PBF_REG_P1_TASK_CNT 0x14020c +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 1. Reset upon init. + */ +#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ +#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 +/* [R 11] Removed for E3 B0 - Current credit for port 4 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P4_CREDIT 0x140210 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. + */ +#define PBF_REG_P4_INIT_CRD 0x1400e0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 4. Reset upon init. + */ +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 +/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ +#define PBF_REG_P4_TASK_CNT 0x140214 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 4. Reset upon init. + */ +#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ +#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 +/* [RW 7] Interrupt mask register #0 read/write */ +#define PBF_REG_PBF_INT_MASK 0x1401d4 +/* [R 7] Interrupt register #0 read */ +#define PBF_REG_PBF_INT_STS 0x1401c8 +/* [RC 7] Interrupt register #0 read clear */ +#define PBF_REG_PBF_INT_STS_CLR 0x1401cc +/* [RW 28] Parity mask register #0 read/write */ +#define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 +/* [RC 28] Parity register #0 read clear */ +#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity + */ +#define PBF_REG_TAG_LEN_0 0x15c09c +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_LB_Q 0x140370 +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q0 0x140374 +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q1 0x140378 +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q2 0x14037c +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q3 0x140380 +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q4 0x140384 +/* [R 8] Number of tasks in queue 0 task queue. */ +#define PBF_REG_TASK_CNT_Q5 0x140388 +/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task + * queue. Reset upon init. + */ +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the task + * queue 0. Reset upon init. + */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 +/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. + * Reset upon init. + */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 +/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB + * queue. + */ +#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ +#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ +#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PBF_REG_VLAN_TYPE_0 0x15c06c +/* [RW 2] Interrupt mask register #0 read/write */ +#define PB_REG_PB_INT_MASK 0x28 +/* [R 2] Interrupt register #0 read */ +#define PB_REG_PB_INT_STS 0x1c +/* [RC 2] Interrupt register #0 read clear */ +#define PB_REG_PB_INT_STS_CLR 0x20 +/* [RW 4] Parity mask register #0 read/write */ +#define PB_REG_PB_PRTY_MASK 0x38 +/* [R 4] Parity register #0 read */ +#define PB_REG_PB_PRTY_STS 0x2c +/* [RC 4] Parity register #0 read clear */ +#define PB_REG_PB_PRTY_STS_CLR 0x30 +#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1 << 0) +#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1 << 8) +#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1 << 1) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1 << 6) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1 << 7) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1 << 4) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1 << 3) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1 << 5) +#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1 << 2) +/* [R 8] Config space A attention dirty bits. Each bit indicates that the + * corresponding PF generates config space A attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits + * from both paths. + */ +#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010 +/* [R 8] Config space B attention dirty bits. Each bit indicates that the + * corresponding PF generates config space B attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits + * from both paths. + */ +#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014 +/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates + * that the FLR register of the corresponding PF was set. Set by PXP. Reset + * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits + * from both paths. + */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028 +/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. Note: register contains bits from both + * paths. + */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418 +/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. + */ +#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024 +/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. + */ +#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018 +/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. + */ +#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c +/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. + */ +#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020 +/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit + * 0 - Target memory read arrived with a correctable error. Bit 1 - Target + * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW + * arrived with a correctable error. Bit 3 - Configuration RW arrived with + * an uncorrectable error. Bit 4 - Completion with Configuration Request + * Retry Status. Bit 5 - Expansion ROM access received with a write request. + * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and + * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010; + * and pcie_rx_last not asserted. + */ +#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 +#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. + */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c +/* [RW 11] Interrupt mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_INT_MASK 0x92a4 +/* [R 11] Interrupt register #0 read */ +#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 +/* [RC 11] Interrupt register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c +/* [RW 2] Parity mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 +/* [R 2] Parity register #0 read */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 +/* [RC 2] Parity register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac +/* [R 13] Details of first request received with error. [2:0] - PFID. [3] - + * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion + * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - + * completer abort. 3 - Illegal value for this field. [12] valid - indicates + * if there was a completion error since the last time this register was + * cleared. + */ +#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080 +/* [R 18] Details of first ATS Translation Completion request received with + * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code - + * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 - + * unsupported request. 2 - completer abort. 3 - Illegal value for this + * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a + * completion error since the last time this register was cleared. + */ +#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084 +/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to + * a bit in this register in order to clear the corresponding bit in + * shadow_bme_pf_7_0 register. MCP should never use this unless a + * work-around is needed. Note: register contains bits from both paths. + */ +#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458 +/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the + * VF enable register of the corresponding PF is written to 0 and was + * previously 1. Set by PXP. Reset by MCP writing 1 to + * sr_iov_disabled_request_clr. Note: register contains bits from both + * paths. + */ +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030 +/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read + * completion did not return yet. 1 - tag is unused. Same functionality as + * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. + */ +#define PGLUE_B_REG_TAGS_63_32 0x9244 +/* [R 32] Address [31:0] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098 +/* [R 32] Address [63:32] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c +/* [R 31] Details of first read request not submitted due to error. [4:0] + * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request. + * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - + * VFID. + */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0 +/* [R 26] Details of first read request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. + */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4 +/* [R 32] Address [31:0] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088 +/* [R 32] Address [63:32] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c +/* [R 31] Details of first write request not submitted due to error. [4:0] + * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] + * - VFID. + */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090 +/* [R 26] Details of first write request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. + */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094 +/* [R 26] Details of first target VF request accessing VF GRC space that + * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write. + * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a + * request accessing VF GRC space that failed permission check since the + * last time this register was cleared. Permission checks are: function + * permission; R/W permission; address range permission. + */ +#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234 +/* [R 31] Details of first target VF request with length violation (too many + * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address). + * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30] + * valid - indicates if there was a request with length violation since the + * last time this register was cleared. Length violations: length of more + * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and + * length is more than 1 DW. + */ +#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230 +/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates + * that there was a completion with uncorrectable error for the + * corresponding PF. Set by PXP. Reset by MCP writing 1 to + * was_error_pf_7_0_clr. + */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c +/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. + */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470 +/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_127_96_clr. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078 +/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP + * writes 1 to a bit in this register in order to clear the corresponding + * bit in was_error_vf_127_96 register. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474 +/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_31_0_clr. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c +/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_31_0 register. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478 +/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_63_32_clr. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070 +/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_63_32 register. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c +/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_95_64_clr. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074 +/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_95_64 register. + */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480 +#define PRS_REG_A_PRSU_20 0x40134 +/* [R 8] debug only: CFC load request current credit. Transaction based. */ +#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 +/* [R 8] debug only: CFC search request current credit. Transaction based. */ +#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168 +/* [RW 6] The initial credit for the search message to the CFC interface. + * Credit is transaction based. + */ +#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c +/* [RW 24] CID for port 0 if no match */ +#define PRS_REG_CID_PORT_0 0x400fc +/* [RW 1] Indicates if in outer vlan mode. 0=non-outer-vlan mode; 1 = outer + * vlan mode. + */ +#define PRS_REG_E1HOV_MODE 0x401c8 +/* [R 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. + */ +#define PRS_REG_HDRS_AFTER_BASIC 0x40238 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header for port 0 packets. + */ +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 +/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for + * port 0 packets + */ +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 +/* [R 6] Bit-map indicating which headers must appear in the packet */ +#define PRS_REG_MUST_HAVE_HDRS 0x40254 +/* [RW 6] Bit-map indicating which headers must appear in the packet for + * port 0 packets + */ +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac +#define PRS_REG_NIC_MODE 0x40138 +/* [ST 24] The number of input packets */ +#define PRS_REG_NUM_OF_PACKETS 0x40124 +/* [R 2] debug only: Number of pending requests for CAC on port 0. */ +#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174 +/* [R 2] debug only: Number of pending requests for header parsing. */ +#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170 +/* [RW 1] Interrupt mask register #0 read/write */ +#define PRS_REG_PRS_INT_MASK 0x40194 +/* [R 1] Interrupt register #0 read */ +#define PRS_REG_PRS_INT_STS 0x40188 +/* [RC 1] Interrupt register #0 read clear */ +#define PRS_REG_PRS_INT_STS_CLR 0x4018c +/* [RW 8] Parity mask register #0 read/write */ +#define PRS_REG_PRS_PRTY_MASK 0x401a4 +/* [R 8] Parity register #0 read */ +#define PRS_REG_PRS_PRTY_STS 0x40198 +/* [RC 8] Parity register #0 read clear */ +#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c +/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this + * serail number was released by SDM but cannot be used because a previous + * serial number was not released. + */ +#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154 +/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this + * serail number was released by SDM but cannot be used because a previous + * serial number was not released. + */ +#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 +/* [R 4] debug only: SRC current credit. Transaction based. */ +#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity + */ +#define PRS_REG_TAG_LEN_0 0x4022c +/* [R 8] debug only: TCM current credit. Cycle based. */ +#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 +/* [R 8] debug only: TSDM current credit. Transaction based. */ +#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PRS_REG_VLAN_TYPE_0 0x401a8 +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1 << 19) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1 << 20) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1 << 22) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1 << 23) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1 << 24) +#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1 << 7) +#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1 << 7) +/* [R 7] Debug only: Number of used entries in the data FIFO */ +#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c +/* [R 7] Debug only: Number of used entries in the header FIFO */ +#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 +#define PXP2_REG_PGL_ADDR_88_F0 0x120534 +/* [R 32] GRC address for configuration access to PCIE config address 0x88. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register + */ +#define PXP2_REG_PGL_ADDR_88_F1 0x120544 +#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 +/* [R 32] GRC address for configuration access to PCIE config address 0x8c. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register + */ +#define PXP2_REG_PGL_ADDR_8C_F1 0x120548 +#define PXP2_REG_PGL_ADDR_90_F0 0x12053c +/* [R 32] GRC address for configuration access to PCIE config address 0x90. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register + */ +#define PXP2_REG_PGL_ADDR_90_F1 0x12054c +#define PXP2_REG_PGL_ADDR_94_F0 0x120540 +/* [R 32] GRC address for configuration access to PCIE config address 0x94. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register + */ +#define PXP2_REG_PGL_ADDR_94_F1 0x120550 +/* [RW 32] third dword data of expansion rom request. this register is + * special. reading from it provides a vector outstanding read requests. if + * a bit is zero it means that a read request on the corresponding tag did + * not finish yet (not all completions have arrived for it) + */ +#define PXP2_REG_PGL_EXP_ROM2 0x120808 +/* [RW 16] this field allows one function to pretend being another function + * when accessing any BAR mapped resource within the device. the value of + * the field is the number of the function that will be accessed + * effectively. after software write to this bit it must read it in order to + * know that the new value is updated. Bits [15] - force. Bits [14] - path + * ID. Bits [13:10] - Reserved. Bits [9:4] - VFID. Bits [3] - VF valid. Bits + * [2:0] - PFID. + */ +#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674 +/* [RW 16] this field allows one function to pretend being another function + * when accessing any BAR mapped resource within the device. the value of + * the field is the number of the function that will be accessed + * effectively. after software write to this bit it must read it in order to + * know that the new value is updated. Bits [15] - force. Bits [14] - path + * ID. Bits [13:10] - Reserved. Bits [9:4] - VFID. Bits [3] - VF valid. Bits + * [2:0] - PFID. + */ +#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678 +/* [R 1] this bit indicates that a read request was blocked because of + * bus_master_en was deasserted + */ +#define PXP2_REG_PGL_READ_BLOCKED 0x120568 +#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8 +/* [R 21] debug only */ +#define PXP2_REG_PGL_TXW_CDTS 0x12052c +/* [R 1] this bit indicates that a write request was blocked because of + * bus_master_en was deasserted + */ +#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564 +#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0 +#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4 +#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8 +#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4 +#define PXP2_REG_PSWRQ_BW_ADD28 0x120228 +#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8 +#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4 +#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8 +#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc +#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0 +#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c +#define PXP2_REG_PSWRQ_BW_L1 0x1202b0 +#define PXP2_REG_PSWRQ_BW_L10 0x1202d4 +#define PXP2_REG_PSWRQ_BW_L11 0x1202d8 +#define PXP2_REG_PSWRQ_BW_L2 0x1202b4 +#define PXP2_REG_PSWRQ_BW_L28 0x120318 +#define PXP2_REG_PSWRQ_BW_L3 0x1202b8 +#define PXP2_REG_PSWRQ_BW_L6 0x1202c4 +#define PXP2_REG_PSWRQ_BW_L7 0x1202c8 +#define PXP2_REG_PSWRQ_BW_L8 0x1202cc +#define PXP2_REG_PSWRQ_BW_L9 0x1202d0 +#define PXP2_REG_PSWRQ_BW_RD 0x120324 +#define PXP2_REG_PSWRQ_BW_UB1 0x120238 +#define PXP2_REG_PSWRQ_BW_UB10 0x12025c +#define PXP2_REG_PSWRQ_BW_UB11 0x120260 +#define PXP2_REG_PSWRQ_BW_UB2 0x12023c +#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0 +#define PXP2_REG_PSWRQ_BW_UB3 0x120240 +#define PXP2_REG_PSWRQ_BW_UB6 0x12024c +#define PXP2_REG_PSWRQ_BW_UB7 0x120250 +#define PXP2_REG_PSWRQ_BW_UB8 0x120254 +#define PXP2_REG_PSWRQ_BW_UB9 0x120258 +#define PXP2_REG_PSWRQ_BW_WR 0x120328 +#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000 +#define PXP2_REG_PSWRQ_QM0_L2P 0x120038 +#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 +#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c +#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP2_REG_PXP2_INT_MASK_0 0x120578 +#define PXP2_REG_PXP2_INT_MASK_1 0x120614 +/* [R 32] Interrupt register #0 read */ +#define PXP2_REG_PXP2_INT_STS_0 0x12056c +#define PXP2_REG_PXP2_INT_STS_1 0x120608 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 +#define PXP2_REG_PXP2_INT_STS_CLR_1 0x12060c +/* [RW 32] Parity mask register #0 read/write */ +#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 +#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 +/* [R 32] Parity register #0 read */ +#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c +#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c +/* [RC 32] Parity register #0 read clear */ +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 +/* [R 1] Debug only: The 'almost full' indication from each fifo (gives + * indication about backpressure) + */ +#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 +/* [R 8] Debug only: The blocks counter - number of unused block ids */ +#define PXP2_REG_RD_BLK_CNT 0x120418 +/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer. + * Must be bigger than 6. Normally should not be changed. + */ +#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c +/* [RW 2] CDU byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404 +/* [R 29] Details of first request with error on receive side: [15:0] - Echo + * ID. [28:16] - sub-request length plus start_offset_2_0 minus 1. + */ +#define PXP2_REG_RD_CPL_ERR_DETAILS 0x120778 +/* [R 10] Details of first request with error on receive side: [4:0] - VQ + * ID. [8:5] - client ID. [9] - valid - indicates if there was a completion + * error since the last time this register was read. + */ +#define PXP2_REG_RD_CPL_ERR_DETAILS2 0x12077c +/* [RW 1] When '1'; inputs to the PSWRD block are ignored */ +#define PXP2_REG_RD_DISABLE_INPUTS 0x120374 +/* [R 1] PSWRD internal memories initialization is done */ +#define PXP2_REG_RD_INIT_DONE 0x120370 +/* [R 1] Debug only: Indication if delivery ports are idle */ +#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c +#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420 +/* [RW 2] QM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8 +/* [RW 2] SRC byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400 +/* [R 7] Debug only: The SR counter - number of unused sub request ids */ +#define PXP2_REG_RD_SR_CNT 0x120414 +/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must + * be bigger than 1. Normally should not be changed. + */ +#define PXP2_REG_RD_SR_NUM_CFG 0x120408 +/* [RW 1] Signals the PSWRD block to start initializing internal memories */ +#define PXP2_REG_RD_START_INIT 0x12036c +/* [RW 2] TM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc +/* [RW 10] Bandwidth addition to VQ0 write requests */ +#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc +/* [RW 10] Bandwidth addition to VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec +/* [RW 10] Bandwidth addition to VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0 +/* [RW 10] Bandwidth addition to VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4 +/* [RW 10] Bandwidth addition to VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8 +/* [RW 10] Bandwidth addition to VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc +/* [RW 10] Bandwidth addition to VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD17 0x120200 +/* [RW 10] Bandwidth addition to VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD18 0x120204 +/* [RW 10] Bandwidth addition to VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD19 0x120208 +/* [RW 10] Bandwidth addition to VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c +/* [RW 10] Bandwidth addition to VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD22 0x120210 +/* [RW 10] Bandwidth addition to VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD23 0x120214 +/* [RW 10] Bandwidth addition to VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD24 0x120218 +/* [RW 10] Bandwidth addition to VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c +/* [RW 10] Bandwidth addition to VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD26 0x120220 +/* [RW 10] Bandwidth addition to VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD27 0x120224 +/* [RW 10] Bandwidth addition to VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc +/* [RW 10] Bandwidth addition to VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0 +/* [RW 10] Bandwidth Typical L for VQ0 Read requests */ +#define PXP2_REG_RQ_BW_RD_L0 0x1202ac +/* [RW 10] Bandwidth Typical L for VQ12 Read requests */ +#define PXP2_REG_RQ_BW_RD_L12 0x1202dc +/* [RW 10] Bandwidth Typical L for VQ13 Read requests */ +#define PXP2_REG_RQ_BW_RD_L13 0x1202e0 +/* [RW 10] Bandwidth Typical L for VQ14 Read requests */ +#define PXP2_REG_RQ_BW_RD_L14 0x1202e4 +/* [RW 10] Bandwidth Typical L for VQ15 Read requests */ +#define PXP2_REG_RQ_BW_RD_L15 0x1202e8 +/* [RW 10] Bandwidth Typical L for VQ16 Read requests */ +#define PXP2_REG_RQ_BW_RD_L16 0x1202ec +/* [RW 10] Bandwidth Typical L for VQ17 Read requests */ +#define PXP2_REG_RQ_BW_RD_L17 0x1202f0 +/* [RW 10] Bandwidth Typical L for VQ18 Read requests */ +#define PXP2_REG_RQ_BW_RD_L18 0x1202f4 +/* [RW 10] Bandwidth Typical L for VQ19 Read requests */ +#define PXP2_REG_RQ_BW_RD_L19 0x1202f8 +/* [RW 10] Bandwidth Typical L for VQ20 Read requests */ +#define PXP2_REG_RQ_BW_RD_L20 0x1202fc +/* [RW 10] Bandwidth Typical L for VQ22 Read requests */ +#define PXP2_REG_RQ_BW_RD_L22 0x120300 +/* [RW 10] Bandwidth Typical L for VQ23 Read requests */ +#define PXP2_REG_RQ_BW_RD_L23 0x120304 +/* [RW 10] Bandwidth Typical L for VQ24 Read requests */ +#define PXP2_REG_RQ_BW_RD_L24 0x120308 +/* [RW 10] Bandwidth Typical L for VQ25 Read requests */ +#define PXP2_REG_RQ_BW_RD_L25 0x12030c +/* [RW 10] Bandwidth Typical L for VQ26 Read requests */ +#define PXP2_REG_RQ_BW_RD_L26 0x120310 +/* [RW 10] Bandwidth Typical L for VQ27 Read requests */ +#define PXP2_REG_RQ_BW_RD_L27 0x120314 +/* [RW 10] Bandwidth Typical L for VQ4 Read requests */ +#define PXP2_REG_RQ_BW_RD_L4 0x1202bc +/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */ +#define PXP2_REG_RQ_BW_RD_L5 0x1202c0 +/* [RW 7] Bandwidth upper bound for VQ0 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234 +/* [RW 7] Bandwidth upper bound for VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264 +/* [RW 7] Bandwidth upper bound for VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268 +/* [RW 7] Bandwidth upper bound for VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c +/* [RW 7] Bandwidth upper bound for VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270 +/* [RW 7] Bandwidth upper bound for VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274 +/* [RW 7] Bandwidth upper bound for VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278 +/* [RW 7] Bandwidth upper bound for VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c +/* [RW 7] Bandwidth upper bound for VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280 +/* [RW 7] Bandwidth upper bound for VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284 +/* [RW 7] Bandwidth upper bound for VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288 +/* [RW 7] Bandwidth upper bound for VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c +/* [RW 7] Bandwidth upper bound for VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290 +/* [RW 7] Bandwidth upper bound for VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294 +/* [RW 7] Bandwidth upper bound for VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298 +/* [RW 7] Bandwidth upper bound for VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c +/* [RW 7] Bandwidth upper bound for VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244 +/* [RW 7] Bandwidth upper bound for VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248 +/* [RW 10] Bandwidth addition to VQ29 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c +/* [RW 10] Bandwidth addition to VQ30 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD30 0x120230 +/* [RW 10] Bandwidth Typical L for VQ29 Write requests */ +#define PXP2_REG_RQ_BW_WR_L29 0x12031c +/* [RW 10] Bandwidth Typical L for VQ30 Write requests */ +#define PXP2_REG_RQ_BW_WR_L30 0x120320 +/* [RW 7] Bandwidth upper bound for VQ29 */ +#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4 +/* [RW 7] Bandwidth upper bound for VQ30 */ +#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8 +/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */ +#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008 +/* [RW 2] Endian mode for cdu */ +#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0 +#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c +#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620 +/* [RW 4] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k; + * -128k; -256k; -512k; -1M; -2M; 0-4M + */ +#define PXP2_REG_RQ_CDU_P_SIZE 0x120018 +/* [R 1] 1' indicates that the requester has finished its internal + * configuration + */ +#define PXP2_REG_RQ_CFG_DONE 0x1201b4 +/* [RW 2] Endian mode for debug */ +#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4 +/* [RW 1] When '1'; requests will enter input buffers but wont get out + * towards the glue + */ +#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 +/* [RW 4] Determines alignment of write SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. + */ +#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 +/* [RW 4] Determines alignment of read SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. + */ +#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c +/* [RW 1] when set the new alignment method (E2) will be applied; when reset + * the original alignment method (E1 E1H) will be applied + */ +#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930 +/* [R 32] Status signals in pswrq_garb module */ +#define PXP2_REG_RQ_GARB 0x120748 +/* [RW 2] Endian mode for hc */ +#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 +/* [WB 53] Onchip address table */ +#define PXP2_REG_RQ_ONCHIP_AT 0x122000 +/* [WB 53] Onchip address table - B0 */ +#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000 +/* [RW 13] Pending read limiter threshold; in Dwords */ +#define PXP2_REG_RQ_PDR_LIMIT 0x12033c +/* [RW 2] Endian mode for qm */ +#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 +#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634 +#define PXP2_REG_RQ_QM_LAST_ILT 0x120638 +/* [RW 4] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; + * -128k; -256k; -512k; -1M; -2M; 0-4M + */ +#define PXP2_REG_RQ_QM_P_SIZE 0x120050 +/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ +#define PXP2_REG_RQ_RBC_DONE 0x1201b0 +/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; + * 001:256B; 010: 512B; 11:1K:100:2K; 01:4K + */ +#define PXP2_REG_RQ_RD_MBS0 0x120160 +/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; + * 001:256B; 010: 512B; 11:1K:100:2K; 01:4K + */ +#define PXP2_REG_RQ_RD_MBS1 0x120168 +/* [RW 2] Endian mode for src */ +#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c +#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c +#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640 +/* [RW 4] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; + * -128k; -256k; -512k; -1M; -2M; 0-4M + */ +#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c +/* [RW 2] Endian mode for tm */ +#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198 +#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644 +#define PXP2_REG_RQ_TM_LAST_ILT 0x120648 +/* [RW 4] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k; + * -128k; -256k; -512k; -1M; -2M; 0-4M + */ +#define PXP2_REG_RQ_TM_P_SIZE 0x120034 +/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */ +#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c +/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */ +#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094 +/* [R 8] Number of entries occupied by vq 0 in pswrq memory */ +#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810 +/* [R 8] Number of entries occupied by vq 10 in pswrq memory */ +#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818 +/* [R 8] Number of entries occupied by vq 11 in pswrq memory */ +#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820 +/* [R 8] Number of entries occupied by vq 12 in pswrq memory */ +#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828 +/* [R 8] Number of entries occupied by vq 13 in pswrq memory */ +#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830 +/* [R 8] Number of entries occupied by vq 14 in pswrq memory */ +#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838 +/* [R 8] Number of entries occupied by vq 15 in pswrq memory */ +#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840 +/* [R 8] Number of entries occupied by vq 16 in pswrq memory */ +#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848 +/* [R 8] Number of entries occupied by vq 17 in pswrq memory */ +#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850 +/* [R 8] Number of entries occupied by vq 18 in pswrq memory */ +#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858 +/* [R 8] Number of entries occupied by vq 19 in pswrq memory */ +#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860 +/* [R 8] Number of entries occupied by vq 1 in pswrq memory */ +#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868 +/* [R 8] Number of entries occupied by vq 20 in pswrq memory */ +#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870 +/* [R 8] Number of entries occupied by vq 21 in pswrq memory */ +#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878 +/* [R 8] Number of entries occupied by vq 22 in pswrq memory */ +#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880 +/* [R 8] Number of entries occupied by vq 23 in pswrq memory */ +#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888 +/* [R 8] Number of entries occupied by vq 24 in pswrq memory */ +#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890 +/* [R 8] Number of entries occupied by vq 25 in pswrq memory */ +#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898 +/* [R 8] Number of entries occupied by vq 26 in pswrq memory */ +#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0 +/* [R 8] Number of entries occupied by vq 27 in pswrq memory */ +#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8 +/* [R 8] Number of entries occupied by vq 28 in pswrq memory */ +#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0 +/* [R 8] Number of entries occupied by vq 29 in pswrq memory */ +#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8 +/* [R 8] Number of entries occupied by vq 2 in pswrq memory */ +#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0 +/* [R 8] Number of entries occupied by vq 30 in pswrq memory */ +#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8 +/* [R 8] Number of entries occupied by vq 31 in pswrq memory */ +#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0 +/* [R 8] Number of entries occupied by vq 3 in pswrq memory */ +#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8 +/* [R 8] Number of entries occupied by vq 4 in pswrq memory */ +#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0 +/* [R 8] Number of entries occupied by vq 5 in pswrq memory */ +#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8 +/* [R 8] Number of entries occupied by vq 6 in pswrq memory */ +#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0 +/* [R 8] Number of entries occupied by vq 7 in pswrq memory */ +#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8 +/* [R 8] Number of entries occupied by vq 8 in pswrq memory */ +#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900 +/* [R 8] Number of entries occupied by vq 9 in pswrq memory */ +#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908 +/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; + * 001:256B; 010: 512B; + */ +#define PXP2_REG_RQ_WR_MBS0 0x12015c +/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; + * 001:256B; 010: 512B; + */ +#define PXP2_REG_RQ_WR_MBS1 0x120164 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_CDU_MPS 0x1205f0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_CSDM_MPS 0x1205d0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_DBG_MPS 0x1205e8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_DMAE_MPS 0x1205ec +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_HC_MPS 0x1205c8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_QM_MPS 0x1205dc +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_SRC_MPS 0x1205e4 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_TM_MPS 0x1205e0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_TSDM_MPS 0x1205d4 +/* [RW 9] a. When pxp2.wr_th_mode_usdmdp = 0 (E1.5-65 mode) should be + * initialized to (MPS/32); b. When pxp2.wr_th_mode_usdmdp = 1 (E1.5-90; + * enhanced mode) and pxp2.wr_usdmdp_outst_req is different than default (3) + * should be initialized to (pxp2.wr_usdmdp_outst_req x MPS/32); when + * pxp2.wr_usdmdp_outst_req is 3 the reset value is the correct + * configuration + */ +#define PXP2_REG_WR_USDMDP_TH 0x120348 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_USDM_MPS 0x1205cc +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + * buffer reaches this number has_payload will be asserted. 1024B is not a + * real MPS; it is a way of indicating that the client needs to wait for EOP + * before asserting has_payload. Register should be initialized according to + * has_payload value. + */ +#define PXP2_REG_WR_XSDM_MPS 0x1205d8 +/* [R 1] debug only: Indication if PSWHST arbiter is idle */ +#define PXP_REG_HST_ARB_IS_IDLE 0x103004 +/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means + * this client is waiting for the arbiter. + */ +#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 +/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue + * block. Should be used for close the gates. + */ +#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 +/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit + * should update according to 'hst_discard_doorbells' register when the state + * machine is idle + */ +#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 +/* [RW 1] When 1; new internal writes arriving to the block are discarded. + * Should be used for close the gates. + */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 +/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' + * means this PSWHST is discarding inputs from this client. Each bit should + * update according to 'hst_discard_internal_writes' register when the state + * machine is idle. + */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c +/* [R 1] 1 - An incorrect access is logged. The valid bit is reset when the + * relevant interrupt register is read (PXP_REG_INT_STS_CLR_1) + */ +#define PXP_REG_HST_INCORRECT_ACCESS_VALID 0x1030cc +/* [R 1] 1- permission violation data is logged. The valid bit is reset when + * the relevant interrupt register is read (PXP_REG_INT_STS_CLR_1) + */ +#define PXP_REG_HST_PER_VIOLATION_VALID 0x1030e0 +/* [R 15] The FID of the first access to a disabled VF; the format is + * [14:12] - pfid; [11:6] - vfid; [5] - vf_valid; [4:1] - client (0 USDM; 1 + * CSDM; 2 XSDM; 3 TSDM; 4 HC; 5 GRC; 6 DQ; 7 RESERVED SPACE; 8 ATC); [0] - + * w_nr(0-read req; 1- write req). The data is written only when the valid + * bit is reset. and it is stays stable until it is reset by the read from + * interrupt_clr register + */ +#define PXP_REG_HST_VF_DISABLED_ERROR_DATA 0x1030b8 +/* [R 1] 1 - An error request is logged and wasn't handled yet. The valid + * bit is reset when the relevant interrupt register is read + * (PXP_REG_INT_STS_CLR_1) + */ +#define PXP_REG_HST_VF_DISABLED_ERROR_VALID 0x1030bc +/* [RW 7] Indirect access to the permission table. The fields are : {Valid; + * VFID[5:0]} + */ +#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP_REG_PXP_INT_MASK_0 0x103074 +#define PXP_REG_PXP_INT_MASK_1 0x103084 +/* [R 32] Interrupt register #0 read */ +#define PXP_REG_PXP_INT_STS_0 0x103068 +#define PXP_REG_PXP_INT_STS_1 0x103078 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c +#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c +/* [RW 27] Parity mask register #0 read/write */ +#define PXP_REG_PXP_PRTY_MASK 0x103094 +/* [R 27] Parity register #0 read */ +#define PXP_REG_PXP_PRTY_STS 0x103088 +/* [RC 27] Parity register #0 read clear */ +#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c +/* [RW 32] The base logical address (in bytes) of each physical queue. The + * index I represents the physical queue number. The 12 lsbs are ignore and + * considered zero so practically there are only 20 bits in this register; + * queues 63-0 + */ +#define QM_REG_BASEADDR 0x168900 +/* [R 32] NOT USED */ +#define QM_REG_BASEADDR_EXT_A 0x16e100 +/* [R 18] The credit value for byte credit 0. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD0 0x16e6fc +/* [R 18] The credit value for byte credit 1. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD1 0x16e700 +/* [R 18] The credit value for byte credit 2. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD2 0x16e704 +/* [R 18] The credit value for byte credit 3. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD3 0x16e7ac +/* [R 18] The credit value for byte credit 4. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD4 0x16e7b0 +/* [R 18] The credit value for byte credit 5. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD5 0x16e7b4 +/* [R 18] The credit value for byte credit 6. The value is 2s complement + * value (i.e. msb is used for the sign). + */ +#define QM_REG_BYTECRD6 0x16e7b8 +/* [R 32] NOT USED - removed for E3 B0 */ +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 +/* [RC 32] byte credit update error register; b2-b0: byte credit id (pbf + * error); b3 - reserved (zero filled); b6-b4: byte credit id (storm + * increment error); b7 - reserved (zero filled); b10-b8: byte credit id + * (storm decrement error); b11 - reserved (zero filled); b12: pbf error + * valid; b13: storm increment error valid; b14: storm decrement error + * valid; b15: reserved; b22-b16: byte credit warning (warning = decremented + * below zero). mask bit per voq counter; b31-b23: reserved; NOTE: VOQ id-s + * represent HW + */ +#define QM_REG_BYTECRDERRREG 0x16e708 +/* [RW 17] The initial byte credit value for all counters */ +#define QM_REG_BYTECRDINITVAL 0x168238 +/* [RW 20] The number of connections divided by 16 which dictates the size + * of each queue which belongs to even function number. + */ +#define QM_REG_CONNNUM_0 0x168020 +/* [R 6] Keep the fill level of the fifo from write client 4 */ +#define QM_REG_CQM_WRC_FIFOLVL 0x168018 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ0 + */ +#define QM_REG_FWVOQ0TOHWVOQ 0x16e7bc +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ1 + */ +#define QM_REG_FWVOQ1TOHWVOQ 0x16e7c0 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ2 + */ +#define QM_REG_FWVOQ2TOHWVOQ 0x16e7c4 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ3 + */ +#define QM_REG_FWVOQ3TOHWVOQ 0x16e7c8 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ4 + */ +#define QM_REG_FWVOQ4TOHWVOQ 0x16e7cc +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ5 + */ +#define QM_REG_FWVOQ5TOHWVOQ 0x16e7d0 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ6 + */ +#define QM_REG_FWVOQ6TOHWVOQ 0x16e7d4 +/* [RW 3] Describes the HW (real) VOQ id (id-s 0-6 used for HW TX VOQ-s) of + * FW (virtual) VOQ7 + */ +#define QM_REG_FWVOQ7TOHWVOQ 0x16e7d8 +/* [RC 1] A flag to indicate that overflow error occurred in one of the + * queues. + */ +#define QM_REG_OVFERROR 0x16805c +/* [RC 6] the Q were the qverflow occurs */ +#define QM_REG_OVFQNUM 0x168058 +/* [R 16] Pause state for physical queues 15-0 */ +#define QM_REG_PAUSESTATE0 0x168410 +/* [R 16] Pause state for physical queues 31-16 */ +#define QM_REG_PAUSESTATE1 0x168414 +/* [R 16] Pause state for physical queues 47-32 */ +#define QM_REG_PAUSESTATE2 0x16e684 +/* [R 16] Pause state for physical queues 63-48 */ +#define QM_REG_PAUSESTATE3 0x16e688 +/* [R 16] NOT USED */ +#define QM_REG_PAUSESTATE4 0x16e68c +/* [R 16] NOT USED */ +#define QM_REG_PAUSESTATE5 0x16e690 +/* [R 16] NOT USED */ +#define QM_REG_PAUSESTATE6 0x16e694 +/* [R 16] NOT USED */ +#define QM_REG_PAUSESTATE7 0x16e698 +#define QM_REG_PF_EN 0x16e70c +/* [R 24] The number of tasks stored in the QM for the PF. only even + * functions are valid in E2 (odd I registers will be hard wired to 0) + */ +#define QM_REG_PF_USG_CNT_0 0x16e040 +/* [R 16] NOT USED */ +#define QM_REG_PORT0BYTECRD 0x168300 +/* [R 16] NOT USED */ +#define QM_REG_PORT1BYTECRD 0x168304 +/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow: + * ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + * bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; + */ +#define QM_REG_PTRTBL 0x168a00 +/* [R 54] NOT USED */ +#define QM_REG_PTRTBL_EXT_A 0x16e200 +/* [RW 14] Interrupt mask register #0 read/write */ +#define QM_REG_QM_INT_MASK 0x168444 +/* [R 14] Interrupt register #0 read */ +#define QM_REG_QM_INT_STS 0x168438 +/* [RC 14] Interrupt register #0 read clear */ +#define QM_REG_QM_INT_STS_CLR 0x16843c +/* [RW 12] Parity mask register #0 read/write */ +#define QM_REG_QM_PRTY_MASK 0x168454 +/* [R 12] Parity register #0 read */ +#define QM_REG_QM_PRTY_STS 0x168448 +/* [RC 12] Parity register #0 read clear */ +#define QM_REG_QM_PRTY_STS_CLR 0x16844c +/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ +#define QM_REG_QSTATUS_HIGH 0x16802c +/* [R 32] NOT USED */ +#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408 +/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ +#define QM_REG_QSTATUS_LOW 0x168028 +/* [R 32] NOT USED */ +#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404 +/* [R 24] The number of tasks queued for each queue; queues 63-0 */ +#define QM_REG_QTASKCTR_0 0x168308 +/* [R 24] NOT USED */ +#define QM_REG_QTASKCTR_EXT_A_0 0x16e584 +/* [RW 4] Queue tied to VOQ */ +#define QM_REG_QVOQIDX_0 0x1680f4 +/* [RW 1] Initialization bit command */ +#define QM_REG_SOFT_RESET 0x168428 +/* [R 6] Keep the fill level of the fifo from write client 3 */ +#define QM_REG_TQM_WRC_FIFOLVL 0x168010 +/* [R 6] Keep the fill level of the fifo from write client 2 */ +#define QM_REG_UQM_WRC_FIFOLVL 0x168008 +/* [RC 32] VOQ credit update error register; b3-b0: voq id (pbf error); + * b7-b4: voq id (storm increment error); b11-b8: voq id (storm decrement + * error); b12: pbf error valid; b13: storm increment error valid; b14: + * storm decrement error valid; b15: reserved; b27-b16: voq warning + * (warning = decremented below zero). mask bit per voq counter; b31-b28: + * reserved; NOTE: VOQ id-s represent HW VOQ id + */ +#define QM_REG_VOQCRDERRREG 0x168408 +/* [R 17] The credit value for each VOQ. The value is 2s complement value + * (i.e. msb is used for the sign). + */ +#define QM_REG_VOQCREDIT_0 0x1682d0 +#define QM_REG_VOQCREDIT_1 0x1682d4 +#define QM_REG_VOQCREDIT_2 0x1682d8 +#define QM_REG_VOQCREDIT_3 0x1682dc +#define QM_REG_VOQCREDIT_4 0x1682e0 +#define QM_REG_VOQCREDIT_5 0x1682e4 +#define QM_REG_VOQCREDIT_6 0x1682e8 +/* [RW 16] The init and maximum credit for each VoQ */ +#define QM_REG_VOQINITCREDIT_0 0x168060 +#define QM_REG_VOQINITCREDIT_1 0x168064 +#define QM_REG_VOQINITCREDIT_2 0x168068 +#define QM_REG_VOQINITCREDIT_3 0x16806c +#define QM_REG_VOQINITCREDIT_4 0x168070 +#define QM_REG_VOQINITCREDIT_5 0x168074 +#define QM_REG_VOQINITCREDIT_6 0x168078 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_0_LSB 0x168240 +/* [R 6] Keep the fill level of the fifo from write client 1 */ +#define QM_REG_XQM_WRC_FIFOLVL 0x168000 +/* [W 1] reset to parity interrupt */ +#define SEM_FAST_REG_PARITY_RST 0x18840 +/* [RW 1] Interrupt mask register #0 read/write */ +#define SEM_FAST_REG_SEM_FAST_INT_MASK 0x1fff0 +/* [R 1] Interrupt register #0 read */ +#define SEM_FAST_REG_SEM_FAST_INT_STS 0x1fffc +/* [RC 1] Interrupt register #0 read clear */ +#define SEM_FAST_REG_SEM_FAST_INT_STS_CLR 0x1fff8 +/* [RW 1] Parity mask register #0 read/write */ +#define SEM_FAST_REG_SEM_FAST_PRTY_MASK 0x1ffe0 +/* [R 1] Parity register #0 read */ +#define SEM_FAST_REG_SEM_FAST_PRTY_STS 0x1ffec +/* [RC 1] Parity register #0 read clear */ +#define SEM_FAST_REG_SEM_FAST_PRTY_STS_CLR 0x1ffe8 +#define SRC_REG_COUNTFREE0 0x40500 +#define SRC_REG_FIRSTFREE0 0x40510 +#define SRC_REG_KEYRSS0_0 0x40408 +#define SRC_REG_KEYRSS0_7 0x40424 +#define SRC_REG_KEYSEARCH_0 0x40458 +#define SRC_REG_KEYSEARCH_1 0x4045c +#define SRC_REG_KEYSEARCH_2 0x40460 +#define SRC_REG_KEYSEARCH_3 0x40464 +#define SRC_REG_KEYSEARCH_4 0x40468 +#define SRC_REG_KEYSEARCH_5 0x4046c +#define SRC_REG_KEYSEARCH_6 0x40470 +#define SRC_REG_KEYSEARCH_7 0x40474 +#define SRC_REG_KEYSEARCH_8 0x40478 +#define SRC_REG_KEYSEARCH_9 0x4047c +#define SRC_REG_LASTFREE0 0x40530 +#define SRC_REG_NUMBER_HASH_BITS0 0x40400 +/* [RW 1] Reset internal state machines. */ +#define SRC_REG_SOFT_RST 0x4049c +/* [RW 3] Interrupt mask register #0 read/write */ +#define SRC_REG_SRC_INT_MASK 0x404b8 +/* [R 3] Interrupt register #0 read */ +#define SRC_REG_SRC_INT_STS 0x404ac +/* [RC 3] Interrupt register #0 read clear */ +#define SRC_REG_SRC_INT_STS_CLR 0x404b0 +/* [RW 3] Parity mask register #0 read/write */ +#define SRC_REG_SRC_PRTY_MASK 0x404c8 +/* [R 3] Parity register #0 read */ +#define SRC_REG_SRC_PRTY_STS 0x404bc +/* [RC 3] Parity register #0 read clear */ +#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 +/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ +#define TCM_REG_CAM_OCCUP 0x5017c +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 1 at start-up. + */ +#define TCM_REG_CFC_INIT_CRD 0x50204 +/* [RC 1] Message length mismatch (relative to last indication) at the In#9 + * interface. + */ +#define TCM_REG_CSEM_LENGTH_MIS 0x50174 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define TCM_REG_FIC0_INIT_CRD 0x5020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define TCM_REG_FIC1_INIT_CRD 0x50210 +/* [RC 1] Message length mismatch (relative to last indication) at the In#7 + * interface. + */ +#define TCM_REG_PBF_LENGTH_MIS 0x5016c +/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded; + * acknowledge output is deasserted; all other signals are treated as usual; + * if 1 - normal activity. + */ +#define TCM_REG_PRS_IFEN 0x50020 +/* [RC 1] Message length mismatch (relative to last indication) at the In#6 + * interface. + */ +#define TCM_REG_PRS_LENGTH_MIS 0x50168 +/* [RC 1] Message length mismatch (relative to last indication) at the STORM + * interface. + */ +#define TCM_REG_STORM_LENGTH_MIS 0x50160 +/* [RW 11] Interrupt mask register #0 read/write */ +#define TCM_REG_TCM_INT_MASK 0x501dc +/* [R 11] Interrupt register #0 read */ +#define TCM_REG_TCM_INT_STS 0x501d0 +/* [RC 11] Interrupt register #0 read clear */ +#define TCM_REG_TCM_INT_STS_CLR 0x501d4 +/* [RW 27] Parity mask register #0 read/write */ +#define TCM_REG_TCM_PRTY_MASK 0x501ec +/* [R 27] Parity register #0 read */ +#define TCM_REG_TCM_PRTY_STS 0x501e0 +/* [RC 27] Parity register #0 read clear */ +#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 32 at start-up. + */ +#define TCM_REG_TQM_INIT_CRD 0x5021c +/* [RC 1] Message length mismatch (relative to last indication) at the SDM + * interface. + */ +#define TCM_REG_TSDM_LENGTH_MIS 0x50164 +/* [RC 1] Message length mismatch (relative to last indication) at the In#8 + * interface. + */ +#define TCM_REG_USEM_LENGTH_MIS 0x50170 +/* [RW 21] Indirect access to the descriptor table of the XX protection + * mechanism. The fields are: [5:0] - length of the message; 15:6] - message + * pointer; 20:16] - next pointer. + */ +#define TCM_REG_XX_DESCR_TABLE 0x50280 +#define TCM_REG_XX_DESCR_TABLE_SIZE 29 +/* [R 6] Use to read the value of XX protection Free counter. */ +#define TCM_REG_XX_FREE 0x50178 +#define TM_REG_EN_LINEAR0_TIMER 0x164014 +/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ +#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 +/* [ST 16] Linear0 Number of scans counter. */ +#define TM_REG_LIN0_NUM_SCANS 0x1640a0 +#define TM_REG_LIN0_SCAN_ON 0x1640d0 +/* [RW 24] Linear0 array scan timeout. */ +#define TM_REG_LIN0_SCAN_TIME 0x16403c +#define TM_REG_LIN0_VNIC_UC 0x164128 +/* [RW 1] Interrupt mask register #0 read/write */ +#define TM_REG_TM_INT_MASK 0x1640fc +/* [R 1] Interrupt register #0 read */ +#define TM_REG_TM_INT_STS 0x1640f0 +/* [RC 1] Interrupt register #0 read clear */ +#define TM_REG_TM_INT_STS_CLR 0x1640f4 +/* [RW 7] Parity mask register #0 read/write */ +#define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 +/* [RC 7] Parity register #0 read clear */ +#define TM_REG_TM_PRTY_STS_CLR 0x164104 +#define TSDM_REG_ENABLE_IN1 0x42238 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558 +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSDM_REG_TSDM_INT_MASK_0 0x4229c +#define TSDM_REG_TSDM_INT_MASK_1 0x422ac +/* [R 32] Interrupt register #0 read */ +#define TSDM_REG_TSDM_INT_STS_0 0x42290 +#define TSDM_REG_TSDM_INT_STS_1 0x422a0 +/* [RC 32] Interrupt register #0 read clear */ +#define TSDM_REG_TSDM_INT_STS_CLR_0 0x42294 +#define TSDM_REG_TSDM_INT_STS_CLR_1 0x422a4 +/* [RW 11] Parity mask register #0 read/write */ +#define TSDM_REG_TSDM_PRTY_MASK 0x422bc +/* [R 11] Parity register #0 read */ +#define TSDM_REG_TSDM_PRTY_STS 0x422b0 +/* [RC 11] Parity register #0 read clear */ +#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 +/* [RW 32] This address space contains all registers and memories that are + * placed in SEM_FAST block. The SEM_FAST registers are described in + * appendix B. In order to access the SEM_FAST registers the base address + * TSEM_REGISTERS_FAST_MEMORY (Offset: 0x1a0000) should be added to each + * SEM_FAST register offset. + */ +#define TSEM_REG_FAST_MEMORY 0x1a0000 +/* [RW 15] Interrupt table Read and write access to it is not possible in + * the middle of the work + */ +#define TSEM_REG_INT_TABLE 0x180400 +/* [WB 128] Debug only. Passive buffer memory */ +#define TSEM_REG_PASSIVE_BUFFER 0x181000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define TSEM_REG_PRAM 0x1c0000 +/* [R 20] Valid sleeping threads indication have bit per thread */ +#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSEM_REG_TSEM_INT_MASK_0 0x180100 +#define TSEM_REG_TSEM_INT_MASK_1 0x180110 +/* [R 32] Interrupt register #0 read */ +#define TSEM_REG_TSEM_INT_STS_0 0x1800f4 +#define TSEM_REG_TSEM_INT_STS_1 0x180104 +/* [RC 32] Interrupt register #0 read clear */ +#define TSEM_REG_TSEM_INT_STS_CLR_0 0x1800f8 +#define TSEM_REG_TSEM_INT_STS_CLR_1 0x180108 +/* [RW 32] Parity mask register #0 read/write */ +#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 +#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 +/* [R 32] Parity register #0 read */ +#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 +#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 +/* [RC 32] Parity register #0 read clear */ +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. + */ +#define TSEM_REG_VFPF_ERR_NUM 0x180380 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define UCM_REG_CAM_OCCUP 0xe0170 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 1 at start-up. + */ +#define UCM_REG_CFC_INIT_CRD 0xe0204 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the csem interface is detected. + */ +#define UCM_REG_CSEM_LENGTH_MIS 0xe0160 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the dorq interface is detected. + */ +#define UCM_REG_DORQ_LENGTH_MIS 0xe0168 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define UCM_REG_FIC0_INIT_CRD 0xe020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define UCM_REG_FIC1_INIT_CRD 0xe0210 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the STORM interface is detected. + */ +#define UCM_REG_STORM_LENGTH_MIS 0xe0154 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 4 at start-up. + */ +#define UCM_REG_TM_INIT_CRD 0xe021c +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the tsem interface is detected. + */ +#define UCM_REG_TSEM_LENGTH_MIS 0xe015c +/* [RW 11] Interrupt mask register #0 read/write */ +#define UCM_REG_UCM_INT_MASK 0xe01d4 +/* [R 11] Interrupt register #0 read */ +#define UCM_REG_UCM_INT_STS 0xe01c8 +/* [RC 11] Interrupt register #0 read clear */ +#define UCM_REG_UCM_INT_STS_CLR 0xe01cc +/* [RW 27] Parity mask register #0 read/write */ +#define UCM_REG_UCM_PRTY_MASK 0xe01e4 +/* [R 27] Parity register #0 read */ +#define UCM_REG_UCM_PRTY_STS 0xe01d8 +/* [RC 27] Parity register #0 read clear */ +#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 32 at start-up. + */ +#define UCM_REG_UQM_INIT_CRD 0xe0220 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the SDM interface is detected. + */ +#define UCM_REG_USDM_LENGTH_MIS 0xe0158 +/* [RC 1] Set when the message length mismatch (relative to last indication) + * at the xsem interface isdetected. + */ +#define UCM_REG_XSEM_LENGTH_MIS 0xe0164 +/* [RW 20] Indirect access to the descriptor table of the XX protection + * mechanism. The fields are:[5:0] - message length; 14:6] - message + * pointer; 19:15] - next pointer. + */ +#define UCM_REG_XX_DESCR_TABLE 0xe0280 +#define UCM_REG_XX_DESCR_TABLE_SIZE 27 +/* [R 6] Use to read the XX protection Free counter. */ +#define UCM_REG_XX_FREE 0xe016c +#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1 << 10) +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1 << 28) +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1 << 15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1 << 24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1 << 5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1 << 8) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1 << 4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1 << 1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1 << 13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1 << 0) +#define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE + * state from LPI state when it receives packet for transmission. The + * decrement unit is 1 micro-second. + */ +#define UMAC_REG_EEE_WAKE_TIMER 0x6c +/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers + * to bit 17 of the MAC address etc. + */ +#define UMAC_REG_MAC_ADDR0 0xc +/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 + * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. + */ +#define UMAC_REG_MAC_ADDR1 0x10 +/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive + * logic to check frames. + */ +#define UMAC_REG_MAXFR 0x14 +#define UMAC_REG_UMAC_EEE_CTRL 0x64 +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1 << 3) +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550 +/* [R 1] parser fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560 +/* [RW 32] Interrupt mask register #0 read/write */ +#define USDM_REG_USDM_INT_MASK_0 0xc42a0 +#define USDM_REG_USDM_INT_MASK_1 0xc42b0 +/* [R 32] Interrupt register #0 read */ +#define USDM_REG_USDM_INT_STS_0 0xc4294 +#define USDM_REG_USDM_INT_STS_1 0xc42a4 +/* [RC 32] Interrupt register #0 read clear */ +#define USDM_REG_USDM_INT_STS_CLR_0 0xc4298 +#define USDM_REG_USDM_INT_STS_CLR_1 0xc42a8 +/* [RW 11] Parity mask register #0 read/write */ +#define USDM_REG_USDM_PRTY_MASK 0xc42c0 +/* [R 11] Parity register #0 read */ +#define USDM_REG_USDM_PRTY_STS 0xc42b4 +/* [RC 11] Parity register #0 read clear */ +#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 +/* [RW 32] This address space contains all registers and memories that are + * placed in SEM_FAST block. The SEM_FAST registers are described in + * appendix B. In order to access the SEM_FAST registers the base address + * USEM_REGISTERS_FAST_MEMORY (Offset: 0x320000) should be added to each + * SEM_FAST register offset. + */ +#define USEM_REG_FAST_MEMORY 0x320000 +/* [RW 15] Interrupt table Read and write access to it is not possible in + * the middle of the work + */ +#define USEM_REG_INT_TABLE 0x300400 +/* [WB 128] Debug only. Passive buffer memory */ +#define USEM_REG_PASSIVE_BUFFER 0x302000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define USEM_REG_PRAM 0x340000 +/* [R 20] Valid sleeping threads indication have bit per thread */ +#define USEM_REG_SLEEP_THREADS_VALID 0x30026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define USEM_REG_USEM_INT_MASK_0 0x300110 +#define USEM_REG_USEM_INT_MASK_1 0x300120 +/* [R 32] Interrupt register #0 read */ +#define USEM_REG_USEM_INT_STS_0 0x300104 +#define USEM_REG_USEM_INT_STS_1 0x300114 +/* [RC 32] Interrupt register #0 read clear */ +#define USEM_REG_USEM_INT_STS_CLR_0 0x300108 +#define USEM_REG_USEM_INT_STS_CLR_1 0x300118 +/* [RW 32] Parity mask register #0 read/write */ +#define USEM_REG_USEM_PRTY_MASK_0 0x300130 +#define USEM_REG_USEM_PRTY_MASK_1 0x300140 +/* [R 32] Parity register #0 read */ +#define USEM_REG_USEM_PRTY_STS_0 0x300124 +#define USEM_REG_USEM_PRTY_STS_1 0x300134 +/* [RC 32] Parity register #0 read clear */ +#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 +#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. + */ +#define USEM_REG_VFPF_ERR_NUM 0x300380 +#define VFC_MEMORIES_RST_REG_CAM_RST (0x1 << 0) +#define VFC_MEMORIES_RST_REG_RAM_RST (0x1 << 1) +#define VFC_REG_MEMORIES_RST 0x1943c +/* [RW 1] Interrupt mask register #0 read/write */ +#define VFC_REG_VFC_INT_MASK 0x194f0 +/* [R 1] Interrupt register #0 read */ +#define VFC_REG_VFC_INT_STS 0x194fc +/* [RC 1] Interrupt register #0 read clear */ +#define VFC_REG_VFC_INT_STS_CLR 0x194f8 +/* [RW 1] Parity mask register #0 read/write */ +#define VFC_REG_VFC_PRTY_MASK 0x194e0 +/* [R 1] Parity register #0 read */ +#define VFC_REG_VFC_PRTY_STS 0x194ec +/* [RC 1] Parity register #0 read clear */ +#define VFC_REG_VFC_PRTY_STS_CLR 0x194e8 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define XCM_REG_CAM_OCCUP 0x20244 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 1 at start-up. + */ +#define XCM_REG_CFC_INIT_CRD 0x20404 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the csem interface. + */ +#define XCM_REG_CSEM_LENGTH_MIS 0x20228 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the dorq interface. + */ +#define XCM_REG_DORQ_LENGTH_MIS 0x20230 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define XCM_REG_FIC0_INIT_CRD 0x2040c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 64 at start-up. + */ +#define XCM_REG_FIC1_INIT_CRD 0x20410 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the nig0 interface. + */ +#define XCM_REG_NIG0_LENGTH_MIS 0x20238 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the nig1 interface. + */ +#define XCM_REG_NIG1_LENGTH_MIS 0x2023c +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the pbf interface. + */ +#define XCM_REG_PBF_LENGTH_MIS 0x20234 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the STORM interface. + */ +#define XCM_REG_STORM_LENGTH_MIS 0x2021c +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + * writes the initial credit value; read returns the current value of the + * credit counter. Must be initialized to 4 at start-up. + */ +#define XCM_REG_TM_INIT_CRD 0x2041c +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the tsem interface. + */ +#define XCM_REG_TSEM_LENGTH_MIS 0x20224 +/* [RC 1] Message length mismatch (relative to last indication) at the usem + * interface. + */ +#define XCM_REG_USEM_LENGTH_MIS 0x2022c +#define XCM_REG_WU_DA_CNT_CMD00 0x201d4 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4 +/* [RW 14] Interrupt mask register #0 read/write */ +#define XCM_REG_XCM_INT_MASK 0x202b4 +/* [R 14] Interrupt register #0 read */ +#define XCM_REG_XCM_INT_STS 0x202a8 +/* [RC 14] Interrupt register #0 read clear */ +#define XCM_REG_XCM_INT_STS_CLR 0x202ac +/* [RW 30] Parity mask register #0 read/write */ +#define XCM_REG_XCM_PRTY_MASK 0x202c4 +/* [R 30] Parity register #0 read */ +#define XCM_REG_XCM_PRTY_STS 0x202b8 +/* [RC 30] Parity register #0 read clear */ +#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + * the initial credit value; read returns the current value of the credit + * counter. Must be initialized to 32 at start-up. + */ +#define XCM_REG_XQM_INIT_CRD 0x20420 +/* [RC 1] Set at message length mismatch (relative to last indication) at + * the SDM interface. + */ +#define XCM_REG_XSDM_LENGTH_MIS 0x20220 +/* [RW 17] Indirect access to the descriptor table of the XX protection + * mechanism. The fields are: [5:0] - message length; 11:6] - message + * pointer; 16:12] - next pointer. + */ +#define XCM_REG_XX_DESCR_TABLE 0x20480 +#define XCM_REG_XX_DESCR_TABLE_SIZE 32 +/* [R 6] Used to read the XX protection Free counter. */ +#define XCM_REG_XX_FREE 0x20240 +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1 << 0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1 << 1) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1 << 2) +#define XMAC_CTRL_REG_RX_EN (0x1 << 1) +#define XMAC_CTRL_REG_SOFT_RESET (0x1 << 6) +#define XMAC_CTRL_REG_TX_EN (0x1 << 0) +#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1 << 7) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1 << 18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1 << 17) +#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1 << 1) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1 << 0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1 << 3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1 << 4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1 << 5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 +#define XMAC_REG_CTRL 0 +/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC + */ +#define XMAC_REG_CTRL_SA_HI 0x2c +/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC + */ +#define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_EEE_CTRL 0xd8 +#define XMAC_REG_EEE_TIMERS_HI 0xe4 +#define XMAC_REG_PAUSE_CTRL 0x68 +#define XMAC_REG_PFC_CTRL 0x70 +#define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_CTRL 0x50 +#define XMAC_REG_RX_LSS_STATUS 0x58 +/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & + * CRC in strip mode + */ +#define XMAC_REG_RX_MAX_SIZE 0x40 +#define XMAC_REG_TX_CTRL 0x20 +#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1 << 0) +#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1 << 1) +/* [W 17] Generate an operation after completion; bit-16 is + * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and + * bits 4:0 are the T124Param[4:0] + */ +#define XSDM_REG_OPERATION_GEN 0x1664c4 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSDM_REG_XSDM_INT_MASK_0 0x16629c +#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac +/* [R 32] Interrupt register #0 read */ +#define XSDM_REG_XSDM_INT_STS_0 0x166290 +#define XSDM_REG_XSDM_INT_STS_1 0x1662a0 +/* [RC 32] Interrupt register #0 read clear */ +#define XSDM_REG_XSDM_INT_STS_CLR_0 0x166294 +#define XSDM_REG_XSDM_INT_STS_CLR_1 0x1662a4 +/* [RW 11] Parity mask register #0 read/write */ +#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc +/* [R 11] Parity register #0 read */ +#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 +/* [RC 11] Parity register #0 read clear */ +#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 +/* [RW 32] This address space contains all registers and memories that are + * placed in SEM_FAST block. The SEM_FAST registers are described in + * appendix B. In order to access the SEM_FAST registers the base address + * XSEM_REGISTERS_FAST_MEMORY (Offset: 0x2a0000) should be added to each + * SEM_FAST register offset. + */ +#define XSEM_REG_FAST_MEMORY 0x2a0000 +/* [RW 15] Interrupt table Read and write access to it is not possible in + * the middle of the work + */ +#define XSEM_REG_INT_TABLE 0x280400 +/* [WB 128] Debug only. Passive buffer memory */ +#define XSEM_REG_PASSIVE_BUFFER 0x282000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define XSEM_REG_PRAM 0x2c0000 +/* [R 20] Valid sleeping threads indication have bit per thread */ +#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. + */ +#define XSEM_REG_VFPF_ERR_NUM 0x280380 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSEM_REG_XSEM_INT_MASK_0 0x280110 +#define XSEM_REG_XSEM_INT_MASK_1 0x280120 +/* [R 32] Interrupt register #0 read */ +#define XSEM_REG_XSEM_INT_STS_0 0x280104 +#define XSEM_REG_XSEM_INT_STS_1 0x280114 +/* [RC 32] Interrupt register #0 read clear */ +#define XSEM_REG_XSEM_INT_STS_CLR_0 0x280108 +#define XSEM_REG_XSEM_INT_STS_CLR_1 0x280118 +/* [RW 32] Parity mask register #0 read/write */ +#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 +#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 +/* [R 32] Parity register #0 read */ +#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 +#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 +/* [RC 32] Parity register #0 read clear */ +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 +#define MCPR_ACCESS_LOCK_LOCK (1L << 31) +#define MCPR_IMC_COMMAND_ENABLE (1L << 31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 +#define MCPR_NVM_ACCESS_ENABLE_EN (1L << 0) +#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L << 1) +#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL << 0) +#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L << 0) +#define MCPR_NVM_COMMAND_DOIT (1L << 4) +#define MCPR_NVM_COMMAND_DONE (1L << 3) +#define MCPR_NVM_COMMAND_FIRST (1L << 7) +#define MCPR_NVM_COMMAND_LAST (1L << 8) +#define MCPR_NVM_COMMAND_WR (1L << 5) +#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L << 9) +#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L << 5) +#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L << 1) +#define BIGMAC_REGISTER_BMAC_CONTROL (0x00 << 3) +#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01 << 3) +#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05 << 3) +#define BIGMAC_REGISTER_RX_CONTROL (0x21 << 3) +#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46 << 3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43 << 3) +#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23 << 3) +#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26 << 3) +#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42 << 3) +#define BIGMAC_REGISTER_TX_CONTROL (0x07 << 3) +#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09 << 3) +#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A << 3) +#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08 << 3) +#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20 << 3) +#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C << 3) +#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00 << 3) +#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01 << 3) +#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05 << 3) +#define BIGMAC2_REGISTER_PFC_CONTROL (0x06 << 3) +#define BIGMAC2_REGISTER_RX_CONTROL (0x3A << 3) +#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62 << 3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E << 3) +#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C << 3) +#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40 << 3) +#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f << 3) +#define BIGMAC2_REGISTER_TX_CONTROL (0x1C << 3) +#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E << 3) +#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20 << 3) +#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D << 3) +#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39 << 3) +#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22 << 3) +#define EMAC_LED_1000MB_OVERRIDE (1L << 1) +#define EMAC_LED_100MB_OVERRIDE (1L << 2) +#define EMAC_LED_10MB_OVERRIDE (1L << 3) +#define EMAC_LED_OVERRIDE (1L << 0) +#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L << 26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L << 26) +#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L << 26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L << 26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L << 26) +#define EMAC_MDIO_COMM_DATA (0xffffL << 0) +#define EMAC_MDIO_COMM_START_BUSY (1L << 29) +#define EMAC_MDIO_MODE_AUTO_POLL (1L << 4) +#define EMAC_MDIO_MODE_CLAUSE_45 (1L << 31) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL << 16) +#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L << 1) +#define EMAC_MODE_25G_MODE (1L << 5) +#define EMAC_MODE_HALF_DUPLEX (1L << 1) +#define EMAC_MODE_PORT_GMII (2L << 2) +#define EMAC_MODE_PORT_MII (1L << 2) +#define EMAC_MODE_PORT_MII_10M (3L << 2) +#define EMAC_MODE_RESET (1L << 0) +#define EMAC_REG_EMAC_LED 0xc +#define EMAC_REG_EMAC_MAC_MATCH 0x10 +#define EMAC_REG_EMAC_MDIO_COMM 0xac +#define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 +#define EMAC_REG_EMAC_MODE 0x0 +#define EMAC_REG_EMAC_RX_MODE 0xc8 +#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c +#define EMAC_REG_EMAC_RX_STAT_AC 0x180 +#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 +#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 +#define EMAC_REG_EMAC_TX_MODE 0xbc +#define EMAC_REG_EMAC_TX_STAT_AC 0x280 +#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 +#define EMAC_REG_RX_PFC_MODE 0x320 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L << 2) +#define EMAC_REG_RX_PFC_MODE_RX_EN (1L << 1) +#define EMAC_REG_RX_PFC_MODE_TX_EN (1L << 0) +#define EMAC_REG_RX_PFC_PARAM 0x324 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff << 0) +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff << 0) +#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff << 0) +#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff << 0) +#define EMAC_RX_MODE_FLOW_EN (1L << 2) +#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L << 3) +#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L << 10) +#define EMAC_RX_MODE_PROMISCUOUS (1L << 8) +#define EMAC_RX_MODE_RESET (1L << 0) +#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L << 31) +#define EMAC_TX_MODE_EXT_PAUSE_EN (1L << 3) +#define EMAC_TX_MODE_FLOW_EN (1L << 4) +#define EMAC_TX_MODE_RESET (1L << 0) +#define MISC_REGISTERS_GPIO_0 0 +#define MISC_REGISTERS_GPIO_1 1 +#define MISC_REGISTERS_GPIO_2 2 +#define MISC_REGISTERS_GPIO_3 3 +#define MISC_REGISTERS_GPIO_CLR_POS 16 +#define MISC_REGISTERS_GPIO_FLOAT (0xffL << 24) +#define MISC_REGISTERS_GPIO_FLOAT_POS 24 +#define MISC_REGISTERS_GPIO_HIGH 1 +#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 +#define MISC_REGISTERS_GPIO_INT_SET_POS 16 +#define MISC_REGISTERS_GPIO_LOW 0 +#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 +#define MISC_REGISTERS_GPIO_SET_POS 8 +#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1 << 0) +#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1 << 19) +#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1 << 29) +#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1 << 26) +#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1 << 27) +#define MISC_REGISTERS_RESET_REG_1_RST_QM (0x1 << 17) +#define MISC_REGISTERS_RESET_REG_1_SET 0x584 +#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1 << 24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1 << 25) +#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1 << 19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1 << 17) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1 << 0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1 << 1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1 << 2) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1 << 14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1 << 3) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1 << 15) +#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1 << 4) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1 << 6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1 << 8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1 << 7) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1 << 5) +#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1 << 11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1 << 13) +#define MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR (0x1 << 16) +#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1 << 9) +#define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1 << 20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1 << 21) +#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1 << 22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1 << 23) +#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1 << 1) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1 << 2) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1 << 3) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1 << 0) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1 << 5) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1 << 6) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1 << 7) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1 << 4) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1 << 8) +#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 +#define MISC_SPIO_CLR_POS 16 +#define MISC_SPIO_FLOAT (0xffL << 24) +#define MISC_SPIO_FLOAT_POS 24 +#define MISC_SPIO_INPUT_HI_Z 2 +#define MISC_SPIO_INT_OLD_SET_POS 16 +#define MISC_SPIO_OUTPUT_HIGH 1 +#define MISC_SPIO_OUTPUT_LOW 0 +#define MISC_SPIO_SET_POS 8 +#define MISC_SPIO_SPIO4 0x10 +#define MISC_SPIO_SPIO5 0x20 +#define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 +#define HW_LOCK_RESOURCE_DRV_FLAGS 10 +#define HW_LOCK_RESOURCE_GPIO 1 +#define HW_LOCK_RESOURCE_MDIO 0 +#define HW_LOCK_RESOURCE_NVRAM 12 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 +#define HW_LOCK_RESOURCE_RECOVERY_REG 11 +#define HW_LOCK_RESOURCE_RESET 5 +#define HW_LOCK_RESOURCE_SPIO 2 +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1 << 4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1 << 5) +#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1 << 19) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1 << 18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1 << 31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1 << 30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1 << 9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1 << 8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1 << 7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1 << 6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1 << 29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1 << 28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1 << 1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1 << 0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1 << 18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1 << 11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1 << 10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1 << 13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1 << 12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1 << 2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1 << 12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1 << 28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1 << 31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1 << 29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1 << 30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1 << 15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1 << 14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1 << 14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1 << 20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1 << 31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1 << 30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1 << 0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1 << 2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1 << 3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1 << 5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1 << 4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1 << 3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1 << 2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1 << 3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1 << 2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1 << 22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1 << 15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1 << 27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1 << 26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1 << 5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1 << 4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1 << 25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1 << 24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1 << 29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1 << 28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1 << 23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1 << 22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1 << 27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1 << 26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1 << 21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1 << 20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1 << 25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1 << 24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1 << 16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1 << 9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1 << 8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1 << 7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1 << 6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1 << 11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1 << 10) +#define RESERVED_GENERAL_ATTENTION_BIT_0 0 + +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 +#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 + +#define RESERVED_GENERAL_ATTENTION_BIT_6 6 +#define RESERVED_GENERAL_ATTENTION_BIT_7 7 +#define RESERVED_GENERAL_ATTENTION_BIT_8 8 +#define RESERVED_GENERAL_ATTENTION_BIT_9 9 +#define RESERVED_GENERAL_ATTENTION_BIT_10 10 +#define RESERVED_GENERAL_ATTENTION_BIT_11 11 +#define RESERVED_GENERAL_ATTENTION_BIT_12 12 +#define RESERVED_GENERAL_ATTENTION_BIT_13 13 +#define RESERVED_GENERAL_ATTENTION_BIT_14 14 +#define RESERVED_GENERAL_ATTENTION_BIT_15 15 +#define RESERVED_GENERAL_ATTENTION_BIT_16 16 +#define RESERVED_GENERAL_ATTENTION_BIT_17 17 +#define RESERVED_GENERAL_ATTENTION_BIT_18 18 +#define RESERVED_GENERAL_ATTENTION_BIT_19 19 +#define RESERVED_GENERAL_ATTENTION_BIT_20 20 +#define RESERVED_GENERAL_ATTENTION_BIT_21 21 + +/* storm asserts attention bits */ +#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 +#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 +#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 +#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 + +/* mcp error attention bit */ +#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 + +/*E1H NIG status sync attention mapped to group 4-7*/ +#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 +#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 +#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 +#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 +#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 +#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 +#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 +#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 + + /* Used For Error Recovery: changing this will require more \ + changes in code that assume + * error recovery uses general attn bit20 ! + */ +#define ERROR_RECOVERY_ATTENTION_BIT \ + RESERVED_GENERAL_ATTENTION_BIT_20 +#define RESERVED_ATTENTION_BIT \ + RESERVED_GENERAL_ATTENTION_BIT_21 + +#define LATCHED_ATTN_RBCR 23 +#define LATCHED_ATTN_RBCT 24 +#define LATCHED_ATTN_RBCN 25 +#define LATCHED_ATTN_RBCU 26 +#define LATCHED_ATTN_RBCP 27 +#define LATCHED_ATTN_TIMEOUT_GRC 28 +#define LATCHED_ATTN_RSVD_GRC 29 +#define LATCHED_ATTN_ROM_PARITY_MCP 30 +#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 +#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 +#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 + +#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) +#define GENERAL_ATTEN_OFFSET(atten_name) (1UL << ((94 + atten_name) % 32)) +/* + * This file defines GRC base address for every block. + * This file is included by chipsim, asm microcode and cpp microcode. + * These values are used in Design.xml on regBase attribute + * Use the base with the generated offsets of specific registers. + */ + +#define GRCBASE_PXPCS 0x000000 +#define GRCBASE_PCICONFIG 0x002000 +#define GRCBASE_PCIREG 0x002400 +#define GRCBASE_EMAC0 0x008000 +#define GRCBASE_EMAC1 0x008400 +#define GRCBASE_DBU 0x008800 +#define GRCBASE_PGLUE_B 0x009000 +#define GRCBASE_MISC 0x00A000 +#define GRCBASE_DBG 0x00C000 +#define GRCBASE_NIG 0x010000 +#define GRCBASE_XCM 0x020000 +#define GRCBASE_PRS 0x040000 +#define GRCBASE_SRCH 0x040400 +#define GRCBASE_TSDM 0x042000 +#define GRCBASE_TCM 0x050000 +#define GRCBASE_BRB1 0x060000 +#define GRCBASE_MCP 0x080000 +#define GRCBASE_UPB 0x0C1000 +#define GRCBASE_CSDM 0x0C2000 +#define GRCBASE_USDM 0x0C4000 +#define GRCBASE_CCM 0x0D0000 +#define GRCBASE_UCM 0x0E0000 +#define GRCBASE_CDU 0x101000 +#define GRCBASE_DMAE 0x102000 +#define GRCBASE_PXP 0x103000 +#define GRCBASE_CFC 0x104000 +#define GRCBASE_HC 0x108000 +#define GRCBASE_ATC 0x110000 +#define GRCBASE_PXP2 0x120000 +#define GRCBASE_IGU 0x130000 +#define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 +#define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 +#define GRCBASE_TIMERS 0x164000 +#define GRCBASE_XSDM 0x166000 +#define GRCBASE_QM 0x168000 +#define GRCBASE_QM_4PORT 0x168000 +#define GRCBASE_DQ 0x170000 +#define GRCBASE_TSEM 0x180000 +#define GRCBASE_CSEM 0x200000 +#define GRCBASE_XSEM 0x280000 +#define GRCBASE_XSEM_4PORT 0x280000 +#define GRCBASE_USEM 0x300000 +#define GRCBASE_MCP_A 0x380000 +#define GRCBASE_MISC_AEU GRCBASE_MISC +#define GRCBASE_Tstorm GRCBASE_TSEM +#define GRCBASE_Cstorm GRCBASE_CSEM +#define GRCBASE_Xstorm GRCBASE_XSEM +#define GRCBASE_Ustorm GRCBASE_USEM + + +/* offset of configuration space in the pci core register */ +#define PCICFG_OFFSET 0x2000 +#define PCICFG_VENDOR_ID_OFFSET 0x00 +#define PCICFG_DEVICE_ID_OFFSET 0x02 +#define PCICFG_COMMAND_OFFSET 0x04 +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) +#define PCICFG_STATUS_OFFSET 0x06 +#define PCICFG_REVISION_ID_OFFSET 0x08 +#define PCICFG_REVESION_ID_MASK 0xff +#define PCICFG_REVESION_ID_ERROR_VAL 0xff +#define PCICFG_CACHE_LINE_SIZE 0x0c +#define PCICFG_LATENCY_TIMER 0x0d +#define PCICFG_HEADER_TYPE 0x0e +#define PCICFG_HEADER_TYPE_NORMAL 0 +#define PCICFG_HEADER_TYPE_BRIDGE 1 +#define PCICFG_HEADER_TYPE_CARDBUS 2 +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_BAR_3_LOW 0x20 +#define PCICFG_BAR_3_HIGH 0x24 +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CAPABILITY 0x48 +#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) +#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) +#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) +#define PCICFG_PM_CAPABILITY_DSI (1<<21) +#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) +#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) +#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) +#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) +#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) +#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_ENABLE (1<<8) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) +#define PCICFG_VPD_FLAG_ADDR_OFFSET 0x50 +#define PCICFG_VPD_DATA_OFFSET 0x54 +#define PCICFG_MSI_CAP_ID_OFFSET 0x58 +#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) +#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) +#define PCICFG_MSI_CONTROL_MENA (0x7<<20) +#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) +#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) +#define PCICFG_MSI_ADDR_LOW_OFFSET 0x5c +#define PCICFG_MSI_ADDR_HIGH_OFFSET 0x60 +#define PCICFG_MSI_DATA_OFFSET 0x64 +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_ME_REGISTER 0x98 +#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 +#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) +#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) +#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) +#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) + +#define PCICFG_DEVICE_CONTROL 0xb4 +#define PCICFG_DEVICE_CONTROL_NP_TRANSACTION_PEND (1<<21) +#define PCICFG_DEVICE_STATUS 0xb6 +#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) +#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) +#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) +#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) +#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) +#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) +#define PCICFG_LINK_CONTROL 0xbc + + +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) + +/* config_3 offset */ +#define GRC_CONFIG_3_SIZE_REG 0x40c +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define GRC_REG_DEVICE_CONTROL 0x4d8 +#define PCIE_SRIOV_DISABLE_IN_PROGRESS \ + (1 << 29) /*When VF Enable is cleared(after it was previously set), + this register will read a value of 1, indicating that all the + VFs that belong to this PF should be flushed. + Software should clear this bit within 1 second of VF Enable + being set by writing a 1 to it, so that VFs are visible to the system again. + WC */ +#define PCIE_FLR_IN_PROGRESS \ + (1 << 27) /*When FLR is initiated, this register will read a \ + value of 1 indicating that the + Function is in FLR state. Func can be brought out of FLR state either by + writing 1 to this register (at least 50 ms after FLR was initiated), + or it can also be cleared automatically after 55 ms if auto_clear bit + in private reg space is set. This bit also exists in VF register space + WC */ + +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) + +#define GRC_BAR3_CONFIG 0x4f4 +#define PCI_CONFIG_2_BAR3_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR3_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR3_64ENA (1L<<4) + +#define PCI_PM_DATA_A 0x410 +#define PCI_PM_DATA_B 0x414 +#define PCI_ID_VAL1 0x434 +#define PCI_ID_VAL2 0x438 +#define PCI_ID_VAL3 0x43c +#define PCI_ID_VAL3_REVISION_ID_ERROR (0xffL<<24) + + +#define GRC_CONFIG_REG_VF_BAR_REG_1 0x608 +#define GRC_CONFIG_REG_VF_BAR_REG_BAR0_SIZE 0xf + +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C +#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK \ + 0x3F /*This field resides in VF only and does not exist in PF. + This register controls the read value of the MSIX_CONTROL[10:0] register + in the VF configuration space. A value of "00000000011" indicates + a table size of 4. The value is controlled by IOV_MSIX_TBL_SIZ + define in version.v */ + +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK \ + 0xf /*First VF_NUM for PF is encoded in this register. + The number of VFs assigned to a PF is assumed to be a multiple of 8. + Software should program these bits based on Total Number of VFs \ + programmed for each PF. + Since registers from 0x000-0x7ff are spilt across functions, each PF will have + the same location for the same 4 bits*/ + +#define PXPCS_TL_CONTROL_5 0x814 +#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ +#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ +#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ +#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ +#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ +#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ +#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ +#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ + + +#define PXPCS_TL_FUNC345_STAT 0x854 +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 \ + (1 << 28) /* Unsupported Request Error Status in function4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4 \ + (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4 \ + (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4 \ + (1 << 25) /* Receiver Overflow Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4 \ + (1 << 24) /* Unexpected Completion Status Status in function 4, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4 \ + (1 << 23) /* Receive UR Statusin function 4. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4 \ + (1 << 22) /* Completer Timeout Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4 \ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 4, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4 \ + (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 \ + (1 << 18) /* Unsupported Request Error Status in function3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3 \ + (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3 \ + (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3 \ + (1 << 15) /* Receiver Overflow Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3 \ + (1 << 14) /* Unexpected Completion Status Status in function 3, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3 \ + (1 << 13) /* Receive UR Statusin function 3. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3 \ + (1 << 12) /* Completer Timeout Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3 \ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 3, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3 \ + (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2 \ + (1 << 8) /* Unsupported Request Error Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2 \ + (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2 \ + (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2 \ + (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2 \ + (1 << 4) /* Unexpected Completion Status Status for Function 2, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \ + (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \ + (1 << 2) /* Completer Timeout Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2 \ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 2, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2 \ + (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define PXPCS_TL_FUNC678_STAT 0x85C +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 \ + (1 << 28) /* Unsupported Request Error Status in function7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7 \ + (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7 \ + (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7 \ + (1 << 25) /* Receiver Overflow Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7 \ + (1 << 24) /* Unexpected Completion Status Status in function 7, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7 \ + (1 << 23) /* Receive UR Statusin function 7. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7 \ + (1 << 22) /* Completer Timeout Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7 \ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 7, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7 \ + (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 \ + (1 << 18) /* Unsupported Request Error Status in function6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6 \ + (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6 \ + (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6 \ + (1 << 15) /* Receiver Overflow Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6 \ + (1 << 14) /* Unexpected Completion Status Status in function 6, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6 \ + (1 << 13) /* Receive UR Statusin function 6. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6 \ + (1 << 12) /* Completer Timeout Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6 \ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 6, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6 \ + (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5 \ + (1 << 8) /* Unsupported Request Error Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5 \ + (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5 \ + (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5 \ + (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5 \ + (1 << 4) /* Unexpected Completion Status Status for Function 5, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \ + (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \ + (1 << 2) /* Completer Timeout Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5 \ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 5, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5 \ + (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 +#define ME_REG_PF_NUM_SHIFT 0 +#define ME_REG_PF_NUM \ + (7L< + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" +#include "ecore_init.h" + +/**** Exe Queue interfaces ****/ + +/** + * ecore_exe_queue_init - init the Exe Queue object + * + * @o: pointer to the object + * @exe_len: length + * @owner: pointer to the owner + * @validate: validate function pointer + * @optimize: optimize function pointer + * @exec: execute function pointer + * @get: get function pointer + */ +static void +ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused, + struct ecore_exe_queue_obj *o, + int exe_len, + union ecore_qable_obj *owner, + exe_q_validate validate, + exe_q_remove remove, + exe_q_optimize optimize, exe_q_execute exec, exe_q_get get) +{ + ECORE_MEMSET(o, 0, sizeof(*o)); + + ECORE_LIST_INIT(&o->exe_queue); + ECORE_LIST_INIT(&o->pending_comp); + + ECORE_SPIN_LOCK_INIT(&o->lock, sc); + + o->exe_chunk_len = exe_len; + o->owner = owner; + + /* Owner specific callbacks */ + o->validate = validate; + o->remove = remove; + o->optimize = optimize; + o->execute = exec; + o->get = get; + + ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d", + exe_len); +} + +static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused, + struct ecore_exeq_elem *elem) +{ + ECORE_MSG(sc, "Deleting an exe_queue element"); + ECORE_FREE(sc, elem, sizeof(*elem)); +} + +static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) +{ + struct ecore_exeq_elem *elem; + int cnt = 0; + + ECORE_SPIN_LOCK_BH(&o->lock); + + ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, + struct ecore_exeq_elem) cnt++; + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return cnt; +} + +/** + * ecore_exe_queue_add - add a new element to the execution queue + * + * @sc: driver handle + * @o: queue + * @cmd: new command to add + * @restore: true - do not optimize the command + * + * If the element is optimized or is illegal, frees it. + */ +static int ecore_exe_queue_add(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem, int restore) +{ + int rc; + + ECORE_SPIN_LOCK_BH(&o->lock); + + if (!restore) { + /* Try to cancel this element queue */ + rc = o->optimize(sc, o->owner, elem); + if (rc) + goto free_and_exit; + + /* Check if this request is ok */ + rc = o->validate(sc, o->owner, elem); + if (rc) { + ECORE_MSG(sc, "Preamble failed: %d", rc); + goto free_and_exit; + } + } + + /* If so, add it to the execution queue */ + ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return ECORE_SUCCESS; + +free_and_exit: + ecore_exe_queue_free_elem(sc, elem); + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return rc; +} + +static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj + *o) +{ + struct ecore_exeq_elem *elem; + + while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { + elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, + struct ecore_exeq_elem, link); + + ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); + ecore_exe_queue_free_elem(sc, elem); + } +} + +static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->lock); + + __ecore_exe_queue_reset_pending(sc, o); + + ECORE_SPIN_UNLOCK_BH(&o->lock); +} + +/** + * ecore_exe_queue_step - execute one execution chunk atomically + * + * @sc: driver handle + * @o: queue + * @ramrod_flags: flags + * + * (Should be called while holding the exe_queue->lock). + */ +static int ecore_exe_queue_step(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o, + unsigned long *ramrod_flags) +{ + struct ecore_exeq_elem *elem, spacer; + int cur_len = 0, rc; + + ECORE_MEMSET(&spacer, 0, sizeof(spacer)); + + /* Next step should not be performed until the current is finished, + * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to + * properly clear object internals without sending any command to the FW + * which also implies there won't be any completion to clear the + * 'pending' list. + */ + if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + ECORE_MSG(sc, + "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list"); + __ecore_exe_queue_reset_pending(sc, o); + } else { + return ECORE_PENDING; + } + } + + /* Run through the pending commands list and create a next + * execution chunk. + */ + while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { + elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, + struct ecore_exeq_elem, link); + ECORE_DBG_BREAK_IF(!elem->cmd_len); + + if (cur_len + elem->cmd_len <= o->exe_chunk_len) { + cur_len += elem->cmd_len; + /* Prevent from both lists being empty when moving an + * element. This will allow the call of + * ecore_exe_queue_empty() without locking. + */ + ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); + mb(); + ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); + ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); + ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); + } else + break; + } + + /* Sanity check */ + if (!cur_len) + return ECORE_SUCCESS; + + rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags); + if (rc < 0) + /* In case of an error return the commands back to the queue + * and reset the pending_comp. + */ + ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); + else if (!rc) + /* If zero is returned, means there are no outstanding pending + * completions and we may dismiss the pending list. + */ + __ecore_exe_queue_reset_pending(sc, o); + + return rc; +} + +static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) +{ + int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); + + /* Don't reorder!!! */ + mb(); + + return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); +} + +static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct + bnx2x_softc *sc + __rte_unused) +{ + ECORE_MSG(sc, "Allocating a new exe_queue element"); + return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc); +} + +/************************ raw_obj functions ***********************************/ +static bool ecore_raw_check_pending(struct ecore_raw_obj *o) +{ + /* + * !! converts the value returned by ECORE_TEST_BIT such that it + * is guaranteed not to be truncated regardless of int definition. + * + * Note we cannot simply define the function's return value type + * to match the type returned by ECORE_TEST_BIT, as it varies by + * platform/implementation. + */ + + return ! !ECORE_TEST_BIT(o->state, o->pstate); +} + +static void ecore_raw_clear_pending(struct ecore_raw_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_CLEAR_BIT(o->state, o->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static void ecore_raw_set_pending(struct ecore_raw_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_SET_BIT(o->state, o->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +/** + * ecore_state_wait - wait until the given bit(state) is cleared + * + * @sc: device handle + * @state: state which is to be cleared + * @state_p: state buffer + * + */ +static int ecore_state_wait(struct bnx2x_softc *sc, int state, + unsigned long *pstate) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + if (CHIP_REV_IS_EMUL(sc)) + cnt *= 20; + + ECORE_MSG(sc, "waiting for state to become %d", state); + + ECORE_MIGHT_SLEEP(); + while (cnt--) { + bnx2x_intr_legacy(sc); + if (!ECORE_TEST_BIT(state, pstate)) { +#ifdef ECORE_STOP_ON_ERROR + ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt); +#endif + rte_atomic32_set(&sc->scan_fp, 0); + return ECORE_SUCCESS; + } + + ECORE_WAIT(sc, delay_us); + + if (sc->panic) { + rte_atomic32_set(&sc->scan_fp, 0); + return ECORE_IO; + } + } + + /* timeout! */ + PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state); + rte_atomic32_set(&sc->scan_fp, 0); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + + return ECORE_TIMEOUT; +} + +static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw) +{ + return ecore_state_wait(sc, raw->state, raw->pstate); +} + +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* credit handling callbacks */ +static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + ECORE_DBG_BREAK_IF(!mp); + + return mp->get_entry(mp, offset); +} + +static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + ECORE_DBG_BREAK_IF(!mp); + + return mp->get(mp, 1); +} + +static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + return mp->put_entry(mp, offset); +} + +static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + return mp->put(mp, 1); +} + +/** + * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac + * head list. + * + * @sc: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o) +{ + if (o->head_reader) { + ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy"); + return ECORE_BUSY; + } + + ECORE_MSG(sc, "vlan_mac_lock writer - Taken"); + return ECORE_SUCCESS; +} + +/** + * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step + * which wasn't able to run due to a taken lock on vlan mac head list. + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu", + ramrod_flags); + o->head_exe_request = FALSE; + o->saved_ramrod_flags = 0; + rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags); + if (rc != ECORE_SUCCESS) { + PMD_DRV_LOG(ERR, sc, + "execution of pending commands failed with rc %d", + rc); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + } +} + +/** + * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been + * called due to vlan mac head list lock being taken. + * + * @sc: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = TRUE; + o->saved_ramrod_flags = ramrod_flags; + ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu", + ramrod_flags); +} + +/** + * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + ECORE_MSG(sc, + "vlan_mac_lock - writer release encountered a pending request"); + __ecore_vlan_mac_h_exec_pending(sc, o); + } +} + +/** + * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would perform it - + * possibly releasing and reclaiming the execution queue lock. + */ +void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + __ecore_vlan_mac_h_write_unlock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); +} + +/** + * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + ECORE_MSG(sc, + "vlan_mac_lock - locked reader - number %d", o->head_reader); + + return ECORE_SUCCESS; +} + +/** + * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int rc; + + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + rc = __ecore_vlan_mac_h_read_lock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); + + return rc; +} + +/** + * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + if (!o->head_reader) { + PMD_DRV_LOG(ERR, sc, + "Need to release vlan mac reader lock, but lock isn't taken"); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + } else { + o->head_reader--; + ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request"); + + /* Writer release will do the trick */ + __ecore_vlan_mac_h_write_unlock(sc, o); + } +} + +/** + * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + __ecore_vlan_mac_h_read_unlock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); +} + +/** + * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * @n: number of elements to get + * @base: base address for element placement + * @stride: stride between elements (in bytes) + */ +static int ecore_get_n_elements(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, int n, + uint8_t * base, uint8_t stride, uint8_t size) +{ + struct ecore_vlan_mac_registry_elem *pos; + uint8_t *next = base; + int counter = 0, read_lock; + + ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)"); + read_lock = ecore_vlan_mac_h_read_lock(sc, o); + if (read_lock != ECORE_SUCCESS) + PMD_DRV_LOG(ERR, sc, + "get_n_elements failed to get vlan mac reader lock; Access without lock"); + + /* traverse list */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) { + if (counter < n) { + ECORE_MEMCPY(next, &pos->u, size); + counter++; + ECORE_MSG + (sc, "copied element number %d to address %p element was:", + counter, next); + next += stride + size; + } + } + + if (read_lock == ECORE_SUCCESS) { + ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)"); + ecore_vlan_mac_h_read_unlock(sc, o); + } + + return counter * ETH_ALEN; +} + +/* check_add() callbacks */ +static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + + ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command", + data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], + data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); + + if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) + return ECORE_INVAL; + + /* Check if a requested MAC already exists */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) + if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return ECORE_EXISTS; + + return ECORE_SUCCESS; +} + +/* check_del() callbacks */ +static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc + *sc + __rte_unused, + struct + ecore_vlan_mac_obj + *o, union + ecore_classification_ramrod_data + *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + + ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command", + data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], + data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); + + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) + if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return pos; + + return NULL; +} + +/* check_move() callback */ +static bool ecore_check_move(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *src_o, + struct ecore_vlan_mac_obj *dst_o, + union ecore_classification_ramrod_data *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + int rc; + + /* Check if we can delete the requested configuration from the first + * object. + */ + pos = src_o->check_del(sc, src_o, data); + + /* check if configuration can be added */ + rc = dst_o->check_add(sc, dst_o, data); + + /* If this classification can not be added (is already set) + * or can't be deleted - return an error. + */ + if (rc || !pos) + return FALSE; + + return TRUE; +} + +static bool ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct ecore_vlan_mac_obj + *src_o, __rte_unused struct ecore_vlan_mac_obj + *dst_o, __rte_unused union + ecore_classification_ramrod_data *data) +{ + return FALSE; +} + +static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj + *o) +{ + struct ecore_raw_obj *raw = &o->raw; + uint8_t rx_tx_flag = 0; + + if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; + + if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; + + return rx_tx_flag; +} + +void ecore_set_mac_in_nig(struct bnx2x_softc *sc, + bool add, unsigned char *dev_addr, int index) +{ + uint32_t wb_data[2]; + uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM : + NIG_REG_LLH0_FUNC_MEM; + + if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc)) + return; + + if (index > ECORE_LLH_CAM_MAX_PF_LINE) + return; + + ECORE_MSG(sc, "Going to %s LLH configuration at entry %d", + (add ? "ADD" : "DELETE"), index); + + if (add) { + /* LLH_FUNC_MEM is a uint64_t WB register */ + reg_offset += 8 * index; + + wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | + (dev_addr[4] << 8) | dev_addr[5]); + wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); + + ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2); + } + + REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : + NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add); +} + +/** + * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod + * + * @sc: device handle + * @o: queue for which we want to configure this rule + * @add: if TRUE the command is an ADD command, DEL otherwise + * @opcode: CLASSIFY_RULE_OPCODE_XXX + * @hdr: pointer to a header to setup + * + */ +static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o, + bool add, int opcode, + struct eth_classify_cmd_header + *hdr) +{ + struct ecore_raw_obj *raw = &o->raw; + + hdr->client_id = raw->cl_id; + hdr->func_id = raw->func_id; + + /* Rx or/and Tx (internal switching) configuration ? */ + hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o); + + if (add) + hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; + + hdr->cmd_general_data |= + (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); +} + +/** + * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header + * + * @cid: connection id + * @type: ECORE_FILTER_XXX_PENDING + * @hdr: pointer to header to setup + * @rule_cnt: + * + * currently we always configure one rule and echo field to contain a CID and an + * opcode type. + */ +static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header + *hdr, int rule_cnt) +{ + hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) | + (type << ECORE_SWCID_SHIFT)); + hdr->rule_cnt = (uint8_t) rule_cnt; +} + +/* hw_config() callbacks */ +static void ecore_set_one_mac_e2(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, int rule_idx, + __rte_unused int cam_offset) +{ + struct ecore_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; + unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; + uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac; + + /* Set LLH CAM entry: currently only iSCSI and ETH macs are + * relevant. In addition, current implementation is tuned for a + * single ETH MAC. + * + * When multiple unicast ETH MACs PF configuration in switch + * independent mode is required (NetQ, multiple netdev MACs, + * etc.), consider better utilisation of 8 per function MAC + * entries in the LLH register. There is also + * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the + * total number of CAM entries to 16. + * + * Currently we won't configure NIG for MACs other than a primary ETH + * MAC and iSCSI L2 MAC. + * + * If this MAC is moving from one Queue to another, no need to change + * NIG configuration. + */ + if (cmd != ECORE_VLAN_MAC_MOVE) { + if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) + ecore_set_mac_in_nig(sc, add, mac, + ECORE_LLH_CAM_ISCSI_ETH_LINE); + else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) + ecore_set_mac_in_nig(sc, add, mac, + ECORE_LLH_CAM_ETH_LINE); + } + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + ECORE_MEMSET(data, 0, sizeof(*data)); + + /* Setup a command header */ + ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d", + (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], + mac[4], mac[5], raw->cl_id); + + /* Set a MAC itself */ + ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac; + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == ECORE_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data. + vlan_mac.target_obj, TRUE, + CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + /* Set a MAC itself */ + ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + elem->cmd_data.vlan_mac.u.mac.is_inner_mac; + } + + /* Set the ramrod data header */ + ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod + * + * @sc: device handle + * @o: queue + * @type: + * @cam_offset: offset in cam memory + * @hdr: pointer to a header to setup + * + * E1H + */ +static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj + *o, int type, int cam_offset, struct mac_configuration_hdr + *hdr) +{ + struct ecore_raw_obj *r = &o->raw; + + hdr->length = 1; + hdr->offset = (uint8_t) cam_offset; + hdr->client_id = ECORE_CPU_TO_LE16(0xff); + hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (type << ECORE_SWCID_SHIFT)); +} + +static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj + *o, int add, int opcode, + uint8_t * mac, + uint16_t vlan_id, struct + mac_configuration_entry + *cfg_entry) +{ + struct ecore_raw_obj *r = &o->raw; + uint32_t cl_bit_vec = (1 << r->cl_id); + + cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec); + cfg_entry->pf_id = r->func_id; + cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id); + + if (add) { + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, + opcode); + + /* Set a MAC in a ramrod data */ + ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, + &cfg_entry->middle_mac_addr, + &cfg_entry->lsb_mac_addr, mac); + } else + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); +} + +static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc + __rte_unused, + struct ecore_vlan_mac_obj *o, + int type, int cam_offset, + int add, uint8_t * mac, + uint16_t vlan_id, int opcode, + struct mac_configuration_cmd + *config) +{ + struct mac_configuration_entry *cfg_entry = &config->config_table[0]; + + ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr); + ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id, + cfg_entry); + + ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d", + (add ? "setting" : "clearing"), + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + o->raw.cl_id, cam_offset); +} + +/** + * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * @elem: ecore_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, + __rte_unused int rule_idx, int cam_offset) +{ + struct ecore_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? + TRUE : FALSE; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(config, 0, sizeof(*config)); + + ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state, + cam_offset, add, + elem->cmd_data.vlan_mac.u.mac.mac, 0, + ETH_VLAN_FILTER_ANY_VLAN, config); +} + +/** + * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element + * + * @sc: device handle + * @p: command parameters + * @ppos: pointer to the cookie + * + * reconfigure next MAC/VLAN/VLAN-MAC element from the + * previously configured elements list. + * + * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken + * into an account + * + * pointer to the cookie - that should be given back in the next call to make + * function handle the next element. If *ppos is set to NULL it will restart the + * iterator. If returned *ppos == NULL this means that the last element has been + * handled. + * + */ +static int ecore_vlan_mac_restore(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_registry_elem **ppos) +{ + struct ecore_vlan_mac_registry_elem *pos; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + + /* If list is empty - there is nothing to do here */ + if (ECORE_LIST_IS_EMPTY(&o->head)) { + *ppos = NULL; + return 0; + } + + /* make a step... */ + if (*ppos == NULL) + *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct + ecore_vlan_mac_registry_elem, + link); + else + *ppos = ECORE_LIST_NEXT(*ppos, link, + struct ecore_vlan_mac_registry_elem); + + pos = *ppos; + + /* If it's the last step - return NULL */ + if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) + *ppos = NULL; + + /* Prepare a 'user_req' */ + ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u)); + + /* Set the command */ + p->user_req.cmd = ECORE_VLAN_MAC_ADD; + + /* Set vlan_mac_flags */ + p->user_req.vlan_mac_flags = pos->vlan_mac_flags; + + /* Set a restore bit */ + ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); + + return ecore_config_vlan_mac(sc, p); +} + +/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a + * pointer to an element with a specific criteria and NULL if such an element + * hasn't been found. + */ +static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem) +{ + struct ecore_exeq_elem *pos; + struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; + + /* Check pending for execution commands */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, + struct ecore_exeq_elem) + if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +/** + * ecore_validate_vlan_mac_add - check if an ADD command can be executed + * + * @sc: device handle + * @qo: ecore_qable_obj + * @elem: ecore_exeq_elem + * + * Checks that the requested configuration can be added. If yes and if + * requested, consume CAM credit. + * + * The 'validate' is run after the 'optimize'. + * + */ +static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + int rc; + + /* Check the registry */ + rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u); + if (rc) { + ECORE_MSG(sc, + "ADD command is not allowed considering current registry state."); + return rc; + } + + /* Check if there is a pending ADD command for this + * MAC/VLAN/VLAN-MAC. Return an error if there is. + */ + if (exeq->get(exeq, elem)) { + ECORE_MSG(sc, "There is a pending ADD command already"); + return ECORE_EXISTS; + } + + /* Consume the credit if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->get_credit(o))) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +/** + * ecore_validate_vlan_mac_del - check if the DEL command can be executed + * + * @sc: device handle + * @qo: quable object to check + * @elem: element that needs to be deleted + * + * Checks that the requested configuration can be deleted. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_vlan_mac_registry_elem *pos; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_exeq_elem query_elem; + + /* If this classification can not be deleted (doesn't exist) + * - return a ECORE_EXIST. + */ + pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); + if (!pos) { + ECORE_MSG(sc, + "DEL command is not allowed considering current registry state"); + return ECORE_EXISTS; + } + + /* Check if there are pending DEL or MOVE commands for this + * MAC/VLAN/VLAN-MAC. Return an error if so. + */ + ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); + + /* Check for MOVE commands */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; + if (exeq->get(exeq, &query_elem)) { + PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already"); + return ECORE_INVAL; + } + + /* Check for DEL commands */ + if (exeq->get(exeq, elem)) { + ECORE_MSG(sc, "There is a pending DEL command already"); + return ECORE_EXISTS; + } + + /* Return the credit to the credit pool if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->put_credit(o))) { + PMD_DRV_LOG(ERR, sc, "Failed to return a credit"); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/** + * ecore_validate_vlan_mac_move - check if the MOVE command can be executed + * + * @sc: device handle + * @qo: quable object to check (source) + * @elem: element that needs to be moved + * + * Checks that the requested configuration can be moved. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; + struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; + struct ecore_exeq_elem query_elem; + struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; + struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; + + /* Check if we can perform this operation based on the current registry + * state. + */ + if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { + ECORE_MSG(sc, + "MOVE command is not allowed considering current registry state"); + return ECORE_INVAL; + } + + /* Check if there is an already pending DEL or MOVE command for the + * source object or ADD command for a destination object. Return an + * error if so. + */ + ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); + + /* Check DEL on source */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; + if (src_exeq->get(src_exeq, &query_elem)) { + PMD_DRV_LOG(ERR, sc, + "There is a pending DEL command on the source queue already"); + return ECORE_INVAL; + } + + /* Check MOVE on source */ + if (src_exeq->get(src_exeq, elem)) { + ECORE_MSG(sc, "There is a pending MOVE command already"); + return ECORE_EXISTS; + } + + /* Check ADD on destination */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; + if (dest_exeq->get(dest_exeq, &query_elem)) { + PMD_DRV_LOG(ERR, sc, + "There is a pending ADD command on the destination queue already"); + return ECORE_INVAL; + } + + /* Consume the credit if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + dest_o->get_credit(dest_o))) + return ECORE_INVAL; + + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + src_o->put_credit(src_o))) { + /* return the credit taken from dest... */ + dest_o->put_credit(dest_o); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static int ecore_validate_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + return ecore_validate_vlan_mac_add(sc, qo, elem); + case ECORE_VLAN_MAC_DEL: + return ecore_validate_vlan_mac_del(sc, qo, elem); + case ECORE_VLAN_MAC_MOVE: + return ecore_validate_vlan_mac_move(sc, qo, elem); + default: + return ECORE_INVAL; + } +} + +static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + int rc = 0; + + /* If consumption wasn't required, nothing to do */ + if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags)) + return ECORE_SUCCESS; + + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + case ECORE_VLAN_MAC_MOVE: + rc = qo->vlan_mac.put_credit(&qo->vlan_mac); + break; + case ECORE_VLAN_MAC_DEL: + rc = qo->vlan_mac.get_credit(&qo->vlan_mac); + break; + default: + return ECORE_INVAL; + } + + if (rc != TRUE) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +/** + * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * + */ +static int ecore_wait_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int cnt = 5000, rc; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_raw_obj *raw = &o->raw; + + while (cnt--) { + /* Wait for the current command to complete */ + rc = raw->wait_comp(sc, raw); + if (rc) + return rc; + + /* Wait until there are no pending commands */ + if (!ecore_exe_queue_empty(exeq)) + ECORE_WAIT(sc, 1000); + else + return ECORE_SUCCESS; + } + + return ECORE_TIMEOUT; +} + +static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = ECORE_SUCCESS; + + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + + ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock"); + rc = __ecore_vlan_mac_h_write_trylock(sc, o); + + if (rc != ECORE_SUCCESS) { + __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags); + + /** Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = ECORE_PENDING; + } else { + rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags); + } + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); + + return rc; +} + +/** + * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * @cqe: + * @cont: if TRUE schedule next execution chunk + * + */ +static int ecore_complete_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags) +{ + struct ecore_raw_obj *r = &o->raw; + int rc; + + /* Reset pending list */ + ecore_exe_queue_reset_pending(sc, &o->exe_queue); + + /* Clear pending */ + r->clear_pending(r); + + /* If ramrod failed this is most likely a SW bug */ + if (cqe->message.error) + return ECORE_INVAL; + + /* Run the next bulk of pending commands if requested */ + if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { + rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags); + if (rc < 0) + return rc; + } + + /* If there is more work to do return PENDING */ + if (!ecore_exe_queue_empty(&o->exe_queue)) + return ECORE_PENDING; + + return ECORE_SUCCESS; +} + +/** + * ecore_optimize_vlan_mac - optimize ADD and DEL commands. + * + * @sc: device handle + * @o: ecore_qable_obj + * @elem: ecore_exeq_elem + */ +static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_exeq_elem query, *pos; + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + + ECORE_MEMCPY(&query, elem, sizeof(query)); + + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; + break; + case ECORE_VLAN_MAC_DEL: + query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; + break; + default: + /* Don't handle anything other than ADD or DEL */ + return 0; + } + + /* If we found the appropriate element - delete it */ + pos = exeq->get(exeq, &query); + if (pos) { + + /* Return the credit of the optimized command */ + if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &pos->cmd_data.vlan_mac.vlan_mac_flags)) { + if ((query.cmd_data.vlan_mac.cmd == + ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { + PMD_DRV_LOG(ERR, sc, + "Failed to return the credit for the optimized ADD command"); + return ECORE_INVAL; + } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ + PMD_DRV_LOG(ERR, sc, + "Failed to recover the credit from the optimized DEL command"); + return ECORE_INVAL; + } + } + + ECORE_MSG(sc, "Optimizing %s command", + (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? + "ADD" : "DEL"); + + ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); + ecore_exe_queue_free_elem(sc, pos); + return 1; + } + + return 0; +} + +/** + * ecore_vlan_mac_get_registry_elem - prepare a registry element + * + * @sc: device handle + * @o: + * @elem: + * @restore: + * @re: + * + * prepare a registry element according to the current command request. + */ +static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, + int restore, struct + ecore_vlan_mac_registry_elem + **re) +{ + enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + struct ecore_vlan_mac_registry_elem *reg_elem; + + /* Allocate a new registry element if needed. */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { + reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc); + if (!reg_elem) + return ECORE_NOMEM; + + /* Get a new CAM offset */ + if (!o->get_cam_offset(o, ®_elem->cam_offset)) { + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. + */ + ECORE_DBG_BREAK_IF(1); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + return ECORE_INVAL; + } + + ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset); + + /* Set a VLAN-MAC data */ + ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u, + sizeof(reg_elem->u)); + + /* Copy the flags (needed for DEL and RESTORE flows) */ + reg_elem->vlan_mac_flags = + elem->cmd_data.vlan_mac.vlan_mac_flags; + } else /* DEL, RESTORE */ + reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); + + *re = reg_elem; + return ECORE_SUCCESS; +} + +/** + * ecore_execute_vlan_mac - execute vlan mac command + * + * @sc: device handle + * @qo: + * @exe_chunk: + * @ramrod_flags: + * + * go and send a ramrod! + */ +static int ecore_execute_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + ecore_list_t * exe_chunk, + unsigned long *ramrod_flags) +{ + struct ecore_exeq_elem *elem; + struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; + struct ecore_raw_obj *r = &o->raw; + int rc, idx = 0; + int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); + int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); + struct ecore_vlan_mac_registry_elem *reg_elem; + enum ecore_vlan_mac_cmd cmd; + + /* If DRIVER_ONLY execution is requested, cleanup a registry + * and exit. Otherwise send a ramrod to FW. + */ + if (!drv_only) { + + /* Set pending */ + r->set_pending(r); + + /* Fill the ramrod data */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, + struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + /* We will add to the target object in MOVE command, so + * change the object for a CAM search. + */ + if (cmd == ECORE_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj, + elem, restore, + ®_elem); + if (rc) + goto error_exit; + + ECORE_DBG_BREAK_IF(!reg_elem); + + /* Push a new entry into the registry */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || + (cmd == ECORE_VLAN_MAC_MOVE))) + ECORE_LIST_PUSH_HEAD(®_elem->link, + &cam_obj->head); + + /* Configure a single command in a ramrod data buffer */ + o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset); + + /* MOVE command consumes 2 entries in the ramrod data */ + if (cmd == ECORE_VLAN_MAC_MOVE) + idx += 2; + else + idx++; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid, + r->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + goto error_exit; + } + + /* Now, when we are done with the ramrod - clean up the registry */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) { + reg_elem = o->check_del(sc, o, + &elem->cmd_data.vlan_mac.u); + + ECORE_DBG_BREAK_IF(!reg_elem); + + o->put_cam_offset(o, reg_elem->cam_offset); + ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + } + } + + if (!drv_only) + return ECORE_PENDING; + else + return ECORE_SUCCESS; + +error_exit: + r->clear_pending(r); + + /* Cleanup a registry in case of a failure */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + + if (cmd == ECORE_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + /* Delete all newly added above entries */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || + (cmd == ECORE_VLAN_MAC_MOVE))) { + reg_elem = o->check_del(sc, cam_obj, + &elem->cmd_data.vlan_mac.u); + if (reg_elem) { + ECORE_LIST_REMOVE_ENTRY(®_elem->link, + &cam_obj->head); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + } + } + } + + return rc; +} + +static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct + ecore_vlan_mac_ramrod_params *p) +{ + struct ecore_exeq_elem *elem; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); + + /* Allocate the execution queue element */ + elem = ecore_exe_queue_alloc_elem(sc); + if (!elem) + return ECORE_NOMEM; + + /* Set the command 'length' */ + switch (p->user_req.cmd) { + case ECORE_VLAN_MAC_MOVE: + elem->cmd_len = 2; + break; + default: + elem->cmd_len = 1; + } + + /* Fill the object specific info */ + ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, + sizeof(p->user_req)); + + /* Try to add a new command to the pending list */ + return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore); +} + +/** + * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. + * + * @sc: device handle + * @p: + * + */ +int ecore_config_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p) +{ + int rc = ECORE_SUCCESS; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + unsigned long *ramrod_flags = &p->ramrod_flags; + int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); + struct ecore_raw_obj *raw = &o->raw; + + /* + * Add new elements to the execution list for commands that require it. + */ + if (!cont) { + rc = ecore_vlan_mac_push_new_cmd(sc, p); + if (rc) + return rc; + } + + /* If nothing will be executed further in this iteration we want to + * return PENDING if there are pending commands + */ + if (!ecore_exe_queue_empty(&o->exe_queue)) + rc = ECORE_PENDING; + + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + ECORE_MSG(sc, + "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit."); + raw->clear_pending(raw); + } + + /* Execute commands if required */ + if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || + ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { + rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + * then user want to wait until the last command is done. + */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + /* Wait maximum for the current exe_queue length iterations plus + * one (for the current pending command). + */ + int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; + + while (!ecore_exe_queue_empty(&o->exe_queue) && + max_iterations--) { + + /* Wait for the current command to complete */ + rc = raw->wait_comp(sc, raw); + if (rc) + return rc; + + /* Make a next step */ + rc = __ecore_vlan_mac_execute_step(sc, + p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + return ECORE_SUCCESS; + } + + return rc; +} + +/** + * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec + * + * @sc: device handle + * @o: + * @vlan_mac_flags: + * @ramrod_flags: execution flags to be used for this deletion + * + * if the last operation has completed successfully and there are no + * more elements left, positive value if the last operation has completed + * successfully and there are more previously configured elements, negative + * value is current operation has failed. + */ +static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags) +{ + struct ecore_vlan_mac_registry_elem *pos = NULL; + int rc = 0, read_lock; + struct ecore_vlan_mac_ramrod_params p; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; + + /* Clear pending commands first */ + + ECORE_SPIN_LOCK_BH(&exeq->lock); + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, + &exeq->exe_queue, link, + struct ecore_exeq_elem) { + if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == + *vlan_mac_flags) { + rc = exeq->remove(sc, exeq->owner, exeq_pos); + if (rc) { + PMD_DRV_LOG(ERR, sc, "Failed to remove command"); + ECORE_SPIN_UNLOCK_BH(&exeq->lock); + return rc; + } + ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, + &exeq->exe_queue); + ecore_exe_queue_free_elem(sc, exeq_pos); + } + } + + ECORE_SPIN_UNLOCK_BH(&exeq->lock); + + /* Prepare a command request */ + ECORE_MEMSET(&p, 0, sizeof(p)); + p.vlan_mac_obj = o; + p.ramrod_flags = *ramrod_flags; + p.user_req.cmd = ECORE_VLAN_MAC_DEL; + + /* Add all but the last VLAN-MAC to the execution queue without actually + * execution anything. + */ + ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); + ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); + ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); + + ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)"); + read_lock = ecore_vlan_mac_h_read_lock(sc, o); + if (read_lock != ECORE_SUCCESS) + return read_lock; + + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) { + if (pos->vlan_mac_flags == *vlan_mac_flags) { + p.user_req.vlan_mac_flags = pos->vlan_mac_flags; + ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u)); + rc = ecore_config_vlan_mac(sc, &p); + if (rc < 0) { + PMD_DRV_LOG(ERR, sc, + "Failed to add a new DEL command"); + ecore_vlan_mac_h_read_unlock(sc, o); + return rc; + } + } + } + + ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)"); + ecore_vlan_mac_h_read_unlock(sc, o); + + p.ramrod_flags = *ramrod_flags; + ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); + + return ecore_config_vlan_mac(sc, &p); +} + +static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id, + uint32_t cid, uint8_t func_id, + void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type) +{ + raw->func_id = func_id; + raw->cid = cid; + raw->cl_id = cl_id; + raw->rdata = rdata; + raw->rdata_mapping = rdata_mapping; + raw->state = state; + raw->pstate = pstate; + raw->obj_type = type; + raw->check_pending = ecore_raw_check_pending; + raw->clear_pending = ecore_raw_clear_pending; + raw->set_pending = ecore_raw_set_pending; + raw->wait_comp = ecore_raw_wait; +} + +static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, + uint8_t cl_id, uint32_t cid, + uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type, + struct ecore_credit_pool_obj + *macs_pool, struct ecore_credit_pool_obj + *vlans_pool) +{ + ECORE_LIST_INIT(&o->head); + o->head_reader = 0; + o->head_exe_request = FALSE; + o->saved_ramrod_flags = 0; + + o->macs_pool = macs_pool; + o->vlans_pool = vlans_pool; + + o->delete_all = ecore_vlan_mac_del_all; + o->restore = ecore_vlan_mac_restore; + o->complete = ecore_complete_vlan_mac; + o->wait = ecore_wait_vlan_mac; + + ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, + state, pstate, type); +} + +void ecore_init_mac_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + void *rdata, ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool) +{ + union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; + + ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, NULL); + + /* CAM credit pool handling */ + mac_obj->get_credit = ecore_get_credit_mac; + mac_obj->put_credit = ecore_put_credit_mac; + mac_obj->get_cam_offset = ecore_get_cam_offset_mac; + mac_obj->put_cam_offset = ecore_put_cam_offset_mac; + + if (CHIP_IS_E1x(sc)) { + mac_obj->set_one_rule = ecore_set_one_mac_e1x; + mac_obj->check_del = ecore_check_mac_del; + mac_obj->check_add = ecore_check_mac_add; + mac_obj->check_move = ecore_check_move_always_err; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + ecore_exe_queue_init(sc, + &mac_obj->exe_queue, 1, qable_obj, + ecore_validate_vlan_mac, + ecore_remove_vlan_mac, + ecore_optimize_vlan_mac, + ecore_execute_vlan_mac, + ecore_exeq_get_mac); + } else { + mac_obj->set_one_rule = ecore_set_one_mac_e2; + mac_obj->check_del = ecore_check_mac_del; + mac_obj->check_add = ecore_check_mac_add; + mac_obj->check_move = ecore_check_move; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + mac_obj->get_n_elements = ecore_get_n_elements; + + /* Exe Queue */ + ecore_exe_queue_init(sc, + &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, ecore_validate_vlan_mac, + ecore_remove_vlan_mac, + ecore_optimize_vlan_mac, + ecore_execute_vlan_mac, + ecore_exeq_get_mac); + } +} + +/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ +static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct + tstorm_eth_mac_filter_config + *mac_filters, uint16_t pf_id) +{ + size_t size = sizeof(struct tstorm_eth_mac_filter_config); + + uint32_t addr = BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); + + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters); +} + +static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + /* update the sc MAC filter structure */ + uint32_t mask = (1 << p->cl_id); + + struct tstorm_eth_mac_filter_config *mac_filters = + (struct tstorm_eth_mac_filter_config *)p->rdata; + + /* initial setting is drop-all */ + uint8_t drop_all_ucast = 1, drop_all_mcast = 1; + uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; + uint8_t unmatched_unicast = 0; + + /* In e1x there we only take into account rx accept flag since tx switching + * isn't enabled. */ + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) + /* accept matched ucast */ + drop_all_ucast = 0; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) + /* accept matched mcast */ + drop_all_mcast = 0; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_ucast = 0; + accp_all_ucast = 1; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_mcast = 0; + accp_all_mcast = 1; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) + /* accept (all) bcast */ + accp_all_bcast = 1; + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) + /* accept unmatched unicasts */ + unmatched_unicast = 1; + + mac_filters->ucast_drop_all = drop_all_ucast ? + mac_filters->ucast_drop_all | mask : + mac_filters->ucast_drop_all & ~mask; + + mac_filters->mcast_drop_all = drop_all_mcast ? + mac_filters->mcast_drop_all | mask : + mac_filters->mcast_drop_all & ~mask; + + mac_filters->ucast_accept_all = accp_all_ucast ? + mac_filters->ucast_accept_all | mask : + mac_filters->ucast_accept_all & ~mask; + + mac_filters->mcast_accept_all = accp_all_mcast ? + mac_filters->mcast_accept_all | mask : + mac_filters->mcast_accept_all & ~mask; + + mac_filters->bcast_accept_all = accp_all_bcast ? + mac_filters->bcast_accept_all | mask : + mac_filters->bcast_accept_all & ~mask; + + mac_filters->unmatched_unicast = unmatched_unicast ? + mac_filters->unmatched_unicast | mask : + mac_filters->unmatched_unicast & ~mask; + + ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x" + "accp_mcast 0x%xaccp_bcast 0x%x", + mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); + + /* write the MAC filter structure */ + __storm_memset_mac_filters(sc, mac_filters, p->func_id); + + /* The operation is completed */ + ECORE_CLEAR_BIT(p->state, p->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +/* Setup ramrod data */ +static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header + *hdr, uint8_t rule_cnt) +{ + hdr->echo = ECORE_CPU_TO_LE32(cid); + hdr->rule_cnt = rule_cnt; +} + +static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd + *cmd, int clear_accept_all) +{ + uint16_t state; + + /* start with 'drop-all' */ + state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | + ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ + if (clear_accept_all) { + state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + cmd->state = ECORE_CPU_TO_LE16(state); +} + +static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + struct eth_filter_rules_ramrod_data *data = p->rdata; + int rc; + uint8_t rule_idx = 0; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(data, 0, sizeof(*data)); + + /* Setup ramrod data */ + + /* Tx (internal switching) */ + if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, + &(data->rules[rule_idx++]), + FALSE); + } + + /* Rx */ + if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, + &(data->rules[rule_idx++]), + FALSE); + } + + /* If FCoE Queue configuration has been requested configure the Rx and + * internal switching modes for this queue in separate rules. + * + * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: + * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. + */ + if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { + /* Tx (internal switching) */ + if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, + &(data->rules + [rule_idx++]), TRUE); + } + + /* Rx */ + if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, + &(data->rules + [rule_idx++]), TRUE); + } + } + + /* Set the ramrod header (most importantly - number of rules to + * configure). + */ + ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); + + ECORE_MSG + (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx", + data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_FILTER_RULES, + p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return ECORE_PENDING; +} + +static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + return ecore_state_wait(sc, p->state, p->pstate); +} + +static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct + ecore_rx_mode_ramrod_params *p) +{ + /* Do nothing */ + return ECORE_SUCCESS; +} + +int ecore_config_rx_mode(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + int rc; + + /* Configure the new classification in the chip */ + if (p->rx_mode_obj->config_rx_mode) { + rc = p->rx_mode_obj->config_rx_mode(sc, p); + if (rc < 0) + return rc; + + /* Wait for a ramrod completion if was requested */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + rc = p->rx_mode_obj->wait_comp(sc, p); + if (rc) + return rc; + } + } else { + ECORE_MSG(sc, "ERROR: config_rx_mode is NULL"); + return -1; + } + + return rc; +} + +void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o) +{ + if (CHIP_IS_E1x(sc)) { + o->wait_comp = ecore_empty_rx_mode_wait; + o->config_rx_mode = ecore_set_rx_mode_e1x; + } else { + o->wait_comp = ecore_wait_rx_mode_comp_e2; + o->config_rx_mode = ecore_set_rx_mode_e2; + } +} + +/********************* Multicast verbs: SET, CLEAR ****************************/ +static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac) +{ + return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff; +} + +struct ecore_mcast_mac_elem { + ecore_list_entry_t link; + uint8_t mac[ETH_ALEN]; + uint8_t pad[2]; /* For a natural alignment of the following buffer */ +}; + +struct ecore_pending_mcast_cmd { + ecore_list_entry_t link; + int type; /* ECORE_MCAST_CMD_X */ + union { + ecore_list_t macs_head; + uint32_t macs_num; /* Needed for DEL command */ + int next_bin; /* Needed for RESTORE flow with aprox match */ + } data; + + int done; /* set to TRUE, when the command has been handled, + * practically used in 57712 handling only, where one pending + * command may be handled in a few operations. As long as for + * other chips every operation handling is completed in a + * single ramrod, there is no need to utilize this field. + */ +}; + +static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o) +{ + if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) || + o->raw.wait_comp(sc, &o->raw)) + return ECORE_TIMEOUT; + + return ECORE_SUCCESS; +} + +static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + int total_sz; + struct ecore_pending_mcast_cmd *new_cmd; + struct ecore_mcast_mac_elem *cur_mac = NULL; + struct ecore_mcast_list_elem *pos; + int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? + p->mcast_list_len : 0); + + /* If the command is empty ("handle pending commands only"), break */ + if (!p->mcast_list_len) + return ECORE_SUCCESS; + + total_sz = sizeof(*new_cmd) + + macs_list_len * sizeof(struct ecore_mcast_mac_elem); + + /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ + new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc); + + if (!new_cmd) + return ECORE_NOMEM; + + ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d", + cmd, macs_list_len); + + ECORE_LIST_INIT(&new_cmd->data.macs_head); + + new_cmd->type = cmd; + new_cmd->done = FALSE; + + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + cur_mac = (struct ecore_mcast_mac_elem *) + ((uint8_t *) new_cmd + sizeof(*new_cmd)); + + /* Push the MACs of the current command into the pending command + * MACs list: FIFO + */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN); + ECORE_LIST_PUSH_TAIL(&cur_mac->link, + &new_cmd->data.macs_head); + cur_mac++; + } + + break; + + case ECORE_MCAST_CMD_DEL: + new_cmd->data.macs_num = p->mcast_list_len; + break; + + case ECORE_MCAST_CMD_RESTORE: + new_cmd->data.next_bin = 0; + break; + + default: + ECORE_FREE(sc, new_cmd, total_sz); + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Push the new pending command to the tail of the pending list: FIFO */ + ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); + + o->set_sched(o); + + return ECORE_PENDING; +} + +/** + * ecore_mcast_get_next_bin - get the next set bin (index) + * + * @o: + * @last: index to start looking from (including) + * + * returns the next found (set) bin or a negative value if none is found. + */ +static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) +{ + int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; + + for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { + if (o->registry.aprox_match.vec[i]) + for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { + int cur_bit = j + BIT_VEC64_ELEM_SZ * i; + if (BIT_VEC64_TEST_BIT + (o->registry.aprox_match.vec, cur_bit)) { + return cur_bit; + } + } + inner_start = 0; + } + + /* None found */ + return -1; +} + +/** + * ecore_mcast_clear_first_bin - find the first set bin and clear it + * + * @o: + * + * returns the index of the found bin or -1 if none is found + */ +static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) +{ + int cur_bit = ecore_mcast_get_next_bin(o, 0); + + if (cur_bit >= 0) + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); + + return cur_bit; +} + +static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) +{ + struct ecore_raw_obj *raw = &o->raw; + uint8_t rx_tx_flag = 0; + + if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; + + if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; + + return rx_tx_flag; +} + +static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, int idx, + union ecore_mcast_config_data *cfg_data, + enum ecore_mcast_cmd cmd) +{ + struct ecore_raw_obj *r = &o->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + uint8_t func_id = r->func_id; + uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); + int bin; + + if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) + rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; + + data->rules[idx].cmd_general_data |= rx_tx_add_flag; + + /* Get a bin and update a bins' vector */ + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + bin = ecore_mcast_bin_from_mac(cfg_data->mac); + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case ECORE_MCAST_CMD_DEL: + /* If there were no more bins to clear + * (ecore_mcast_clear_first_bin() returns -1) then we would + * clear any (0xff) bin. + * See ecore_mcast_validate_e2() for explanation when it may + * happen. + */ + bin = ecore_mcast_clear_first_bin(o); + break; + + case ECORE_MCAST_CMD_RESTORE: + bin = cfg_data->bin; + break; + + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); + return; + } + + ECORE_MSG(sc, "%s bin %d", + ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? + "Setting" : "Clearing"), bin); + + data->rules[idx].bin_id = (uint8_t) bin; + data->rules[idx].func_id = func_id; + data->rules[idx].engine_id = o->engine_id; +} + +/** + * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry + * + * @sc: device handle + * @o: + * @start_bin: index in the registry to start from (including) + * @rdata_idx: index in the ramrod data to start from + * + * returns last handled bin index or -1 if all bins have been handled + */ +static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + int start_bin, int *rdata_idx) +{ + int cur_bin, cnt = *rdata_idx; + union ecore_mcast_config_data cfg_data = { NULL }; + + /* go through the registry and configure the bins from it */ + for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; + cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { + + cfg_data.bin = (uint8_t) cur_bin; + o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE); + + cnt++; + + ECORE_MSG(sc, "About to configure a bin %d", cur_bin); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *rdata_idx = cnt; + + return cur_bin; +} + +static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; + int cnt = *line_idx; + union ecore_mcast_config_data cfg_data = { NULL }; + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, + &cmd_pos->data.macs_head, link, + struct ecore_mcast_mac_elem) { + + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + ECORE_MSG + (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", + pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], + pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); + + ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, + &cmd_pos->data.macs_head); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* if no more MACs to configure - we are done */ + if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) + cmd_pos->done = TRUE; +} + +static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + int cnt = *line_idx; + + while (cmd_pos->data.macs_num) { + o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type); + + cnt++; + + cmd_pos->data.macs_num--; + + ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d", + cmd_pos->data.macs_num, cnt); + + /* Break if we reached the maximum + * number of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* If we cleared all bins - we are done */ + if (!cmd_pos->data.macs_num) + cmd_pos->done = TRUE; +} + +static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, struct + ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin, + line_idx); + + if (cmd_pos->data.next_bin < 0) + /* If o->set_restore returned -1 we are done */ + cmd_pos->done = TRUE; + else + /* Start from the next bin next time */ + cmd_pos->data.next_bin++; +} + +static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct + ecore_mcast_ramrod_params + *p) +{ + struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; + int cnt = 0; + struct ecore_mcast_obj *o = p->mcast_obj; + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, + &o->pending_cmds_head, link, + struct ecore_pending_mcast_cmd) { + switch (cmd_pos->type) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt); + break; + + case ECORE_MCAST_CMD_DEL: + ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt); + break; + + case ECORE_MCAST_CMD_RESTORE: + ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos, + &cnt); + break; + + default: + PMD_DRV_LOG(ERR, sc, + "Unknown command: %d", cmd_pos->type); + return ECORE_INVAL; + } + + /* If the command has been completed - remove it from the list + * and free the memory + */ + if (cmd_pos->done) { + ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, + &o->pending_cmds_head); + ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); + } + + /* Break if we reached the maximum number of rules */ + if (cnt >= o->max_cmd_len) + break; + } + + return cnt; +} + +static void ecore_mcast_hdl_add(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + int *line_idx) +{ + struct ecore_mcast_list_elem *mlist_pos; + union ecore_mcast_config_data cfg_data = { NULL }; + int cnt = *line_idx; + + ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + cfg_data.mac = mlist_pos->mac; + o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); + + cnt++; + + ECORE_MSG + (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", + mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], + mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); + } + + *line_idx = cnt; +} + +static void ecore_mcast_hdl_del(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + int *line_idx) +{ + int cnt = *line_idx, i; + + for (i = 0; i < p->mcast_list_len; i++) { + o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL); + + cnt++; + + ECORE_MSG(sc, + "Deleting MAC. %d left", p->mcast_list_len - i - 1); + } + + *line_idx = cnt; +} + +/** + * ecore_mcast_handle_current_cmd - + * + * @sc: device handle + * @p: + * @cmd: + * @start_cnt: first line in the ramrod data that may be used + * + * This function is called if there is enough place for the current command in + * the ramrod data. + * Returns number of lines filled in the ramrod data in total. + */ +static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct + ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd, + int start_cnt) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + int cnt = start_cnt; + + ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len); + + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_add(sc, o, p, &cnt); + break; + + case ECORE_MCAST_CMD_DEL: + ecore_mcast_hdl_del(sc, o, p, &cnt); + break; + + case ECORE_MCAST_CMD_RESTORE: + o->hdl_restore(sc, o, 0, &cnt); + break; + + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* The current command has been handled */ + p->mcast_list_len = 0; + + return cnt; +} + +static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case ECORE_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* fall-through */ + + /* RESTORE command will restore the entire multicast configuration */ + case ECORE_MCAST_CMD_RESTORE: + /* Here we set the approximate amount of work to do, which in + * fact may be only less as some MACs in postponed ADD + * command(s) scheduled before this command may fall into + * the same bin and the actual number of bins set in the + * registry would be less than we estimated here. See + * ecore_mcast_set_one_rule_e2() for further details. + */ + p->mcast_list_len = reg_sz; + break; + + case ECORE_MCAST_CMD_ADD: + case ECORE_MCAST_CMD_CONT: + /* Here we assume that all new MACs will fall into new bins. + * However we will correct the real registry size after we + * handle all pending commands. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + break; + + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Increase the total number of MACs pending to be configured */ + o->total_pending_num += p->mcast_list_len; + + return ECORE_SUCCESS; +} + +static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + int old_num_bins, + enum ecore_mcast_cmd cmd) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_bins); + o->total_pending_num -= p->mcast_list_len; + + if (cmd == ECORE_MCAST_CMD_SET) + o->total_pending_num -= o->max_cmd_len; +} + +/** + * ecore_mcast_set_rdata_hdr_e2 - sets a header values + * + * @sc: device handle + * @p: + * @len: number of rules to handle + */ +static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc + *sc, struct ecore_mcast_ramrod_params + *p, uint8_t len) +{ + struct ecore_raw_obj *r = &p->mcast_obj->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + + data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (ECORE_FILTER_MCAST_PENDING << + ECORE_SWCID_SHIFT)); + data->header.rule_cnt = len; +} + +/** + * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins + * + * @sc: device handle + * @o: + * + * Recalculate the actual number of set bins in the registry using Brian + * Kernighan's algorithm: it's execution complexity is as a number of set bins. + */ +static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o) +{ + int i, cnt = 0; + uint64_t elem; + + for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { + elem = o->registry.aprox_match.vec[i]; + for (; elem; cnt++) + elem &= elem - 1; + } + + o->set_registry_size(o, cnt); + + return ECORE_SUCCESS; +} + +static int ecore_mcast_setup_e2(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_raw_obj *raw = &p->mcast_obj->raw; + struct ecore_mcast_obj *o = p->mcast_obj; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(raw->rdata); + int cnt = 0, rc; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(data, 0, sizeof(*data)); + + cnt = ecore_mcast_handle_pending_cmds_e2(sc, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be TRUE if there was enough room in ramrod + * data for all pending commands and for the current + * command. Otherwise the current command would have been added + * to the pending commands and p->mcast_list_len would have been + * zeroed. + */ + if (p->mcast_list_len > 0) + cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt); + + /* We've pulled out some MACs - update the total number of + * outstanding. + */ + o->total_pending_num -= cnt; + + /* send a ramrod */ + ECORE_DBG_BREAK_IF(o->total_pending_num < 0); + ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); + + ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt); + + /* Update a registry size if there are no more pending operations. + * + * We don't want to change the value of the registry size if there are + * pending operations because we want it to always be equal to the + * exact or the approximate number (see ecore_mcast_validate_e2()) of + * set bins after the last requested operation in order to properly + * evaluate the size of the next DEL/RESTORE operation. + * + * Note that we update the registry itself during command(s) handling + * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we + * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but + * with a limited amount of update commands (per MAC/bin) and we don't + * know in this scope what the actual state of bins configuration is + * going to be after this ramrod. + */ + if (!o->total_pending_num) + ecore_mcast_refresh_registry_e2(o); + + /* If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return ECORE_SUCCESS; + } else { + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + raw->cid, + raw->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return ECORE_PENDING; + } +} + +static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + /* Mark, that there is a work to do */ + if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) + p->mcast_list_len = 1; + + return ECORE_SUCCESS; +} + +static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct ecore_mcast_ramrod_params + *p, __rte_unused int old_num_bins, + __rte_unused enum ecore_mcast_cmd cmd) +{ + /* Do nothing */ +} + +#define ECORE_57711_SET_MC_FILTER(filter, bit) \ +do { \ + (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ +} while (0) + +static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + uint32_t * mc_filter) +{ + struct ecore_mcast_list_elem *mlist_pos; + int bit; + + ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + bit = ecore_mcast_bin_from_mac(mlist_pos->mac); + ECORE_57711_SET_MC_FILTER(mc_filter, bit); + + ECORE_MSG + (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d", + mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], + mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], + bit); + + /* bookkeeping... */ + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit); + } +} + +static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc + __rte_unused, + struct ecore_mcast_obj *o, + uint32_t * mc_filter) +{ + int bit; + + for (bit = ecore_mcast_get_next_bin(o, 0); + bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) { + ECORE_57711_SET_MC_FILTER(mc_filter, bit); + ECORE_MSG(sc, "About to set bin %d", bit); + } +} + +/* On 57711 we write the multicast MACs' approximate match + * table by directly into the TSTORM's internal RAM. So we don't + * really need to handle any tricks to make it work. + */ +static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + int i; + struct ecore_mcast_obj *o = p->mcast_obj; + struct ecore_raw_obj *r = &o->raw; + + /* If CLEAR_ONLY has been requested - clear the registry + * and clear a pending bit. + */ + if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 }; + + /* Set the multicast filter bits before writing it into + * the internal memory. + */ + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter); + break; + + case ECORE_MCAST_CMD_DEL: + ECORE_MSG(sc, "Invalidating multicast MACs configuration"); + + /* clear the registry */ + ECORE_MEMSET(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + break; + + case ECORE_MCAST_CMD_RESTORE: + ecore_mcast_hdl_restore_e1h(sc, o, mc_filter); + break; + + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Set the mcast filter in the internal memory */ + for (i = 0; i < ECORE_MC_HASH_SIZE; i++) + REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]); + } else + /* clear the registry */ + ECORE_MEMSET(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + + /* We are done */ + r->clear_pending(r); + + return ECORE_SUCCESS; +} + +static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) +{ + return o->registry.aprox_match.num_bins_set; +} + +static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, + int n) +{ + o->registry.aprox_match.num_bins_set = n; +} + +int ecore_config_mcast(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + struct ecore_raw_obj *r = &o->raw; + int rc = 0, old_reg_size; + + /* This is needed to recover number of currently configured mcast macs + * in case of failure. + */ + old_reg_size = o->get_registry_size(o); + + /* Do some calculations and checks */ + rc = o->validate(sc, p, cmd); + if (rc) + return rc; + + /* Return if there is no work to do */ + if ((!p->mcast_list_len) && (!o->check_sched(o))) + return ECORE_SUCCESS; + + ECORE_MSG + (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d", + o->total_pending_num, p->mcast_list_len, o->max_cmd_len); + + /* Enqueue the current command to the pending list if we can't complete + * it in the current iteration + */ + if (r->check_pending(r) || + ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { + rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd); + if (rc < 0) + goto error_exit1; + + /* As long as the current command is in a command list we + * don't need to handle it separately. + */ + p->mcast_list_len = 0; + } + + if (!r->check_pending(r)) { + + /* Set 'pending' state */ + r->set_pending(r); + + /* Configure the new classification in the chip */ + rc = o->config_mcast(sc, p, cmd); + if (rc < 0) + goto error_exit2; + + /* Wait for a ramrod completion if was requested */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = o->wait_comp(sc, o); + } + + return rc; + +error_exit2: + r->clear_pending(r); + +error_exit1: + o->revert(sc, p, old_reg_size, cmd); + + return rc; +} + +static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_SET_BIT(o->sched_state, o->raw.pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o) +{ + return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate); +} + +static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o) +{ + return o->raw.check_pending(&o->raw) || o->check_sched(o); +} + +void ecore_init_mcast_obj(struct bnx2x_softc *sc, + struct ecore_mcast_obj *mcast_obj, + uint8_t mcast_cl_id, uint32_t mcast_cid, + uint8_t func_id, uint8_t engine_id, void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type) +{ + ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj)); + + ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, + rdata, rdata_mapping, state, pstate, type); + + mcast_obj->engine_id = engine_id; + + ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); + + mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; + mcast_obj->check_sched = ecore_mcast_check_sched; + mcast_obj->set_sched = ecore_mcast_set_sched; + mcast_obj->clear_sched = ecore_mcast_clear_sched; + + if (CHIP_IS_E1H(sc)) { + mcast_obj->config_mcast = ecore_mcast_setup_e1h; + mcast_obj->enqueue_cmd = NULL; + mcast_obj->hdl_restore = NULL; + mcast_obj->check_pending = ecore_mcast_check_pending; + + /* 57711 doesn't send a ramrod, so it has unlimited credit + * for one command. + */ + mcast_obj->max_cmd_len = -1; + mcast_obj->wait_comp = ecore_mcast_wait; + mcast_obj->set_one_rule = NULL; + mcast_obj->validate = ecore_mcast_validate_e1h; + mcast_obj->revert = ecore_mcast_revert_e1h; + mcast_obj->get_registry_size = + ecore_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + ecore_mcast_set_registry_size_aprox; + } else { + mcast_obj->config_mcast = ecore_mcast_setup_e2; + mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; + mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2; + mcast_obj->check_pending = ecore_mcast_check_pending; + mcast_obj->max_cmd_len = 16; + mcast_obj->wait_comp = ecore_mcast_wait; + mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; + mcast_obj->validate = ecore_mcast_validate_e2; + mcast_obj->revert = ecore_mcast_revert_e2; + mcast_obj->get_registry_size = + ecore_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + ecore_mcast_set_registry_size_aprox; + } +} + +/*************************** Credit handling **********************************/ + +/** + * atomic_add_ifless - add if the result is less than a given value. + * + * @v: pointer of type ecore_atomic_t + * @a: the amount to add to v... + * @u: ...if (v + a) is less than u. + * + * returns TRUE if (v + a) was less than u, and FALSE otherwise. + * + */ +static bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u) +{ + int c, old; + + c = ECORE_ATOMIC_READ(v); + for (;;) { + if (ECORE_UNLIKELY(c + a >= u)) + return FALSE; + + old = ECORE_ATOMIC_CMPXCHG((v), c, c + a); + if (ECORE_LIKELY(old == c)) + break; + c = old; + } + + return TRUE; +} + +/** + * atomic_dec_ifmoe - dec if the result is more or equal than a given value. + * + * @v: pointer of type ecore_atomic_t + * @a: the amount to dec from v... + * @u: ...if (v - a) is more or equal than u. + * + * returns TRUE if (v - a) was more or equal than u, and FALSE + * otherwise. + */ +static bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u) +{ + int c, old; + + c = ECORE_ATOMIC_READ(v); + for (;;) { + if (ECORE_UNLIKELY(c - a < u)) + return FALSE; + + old = ECORE_ATOMIC_CMPXCHG((v), c, c - a); + if (ECORE_LIKELY(old == c)) + break; + c = old; + } + + return TRUE; +} + +static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) +{ + bool rc; + + ECORE_SMP_MB(); + rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); + ECORE_SMP_MB(); + + return rc; +} + +static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) +{ + bool rc; + + ECORE_SMP_MB(); + + /* Don't let to refill if credit + cnt > pool_sz */ + rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); + + ECORE_SMP_MB(); + + return rc; +} + +static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) +{ + int cur_credit; + + ECORE_SMP_MB(); + cur_credit = ECORE_ATOMIC_READ(&o->credit); + + return cur_credit; +} + +static bool ecore_credit_pool_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int cnt) +{ + return TRUE; +} + +static bool ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o, + int *offset) +{ + int idx, vec, i; + + *offset = -1; + + /* Find "internal cam-offset" then add to base for this object... */ + for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { + + /* Skip the current vector if there are no free entries in it */ + if (!o->pool_mirror[vec]) + continue; + + /* If we've got here we are going to find a free entry */ + for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; + i < BIT_VEC64_ELEM_SZ; idx++, i++) + + if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { + /* Got one!! */ + BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); + *offset = o->base_pool_offset + idx; + return TRUE; + } + } + + return FALSE; +} + +static bool ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o, + int offset) +{ + if (offset < o->base_pool_offset) + return FALSE; + + offset -= o->base_pool_offset; + + if (offset >= o->pool_sz) + return FALSE; + + /* Return the entry to the pool */ + BIT_VEC64_SET_BIT(o->pool_mirror, offset); + + return TRUE; +} + +static bool ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int offset) +{ + return TRUE; +} + +static bool ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int *offset) +{ + *offset = -1; + return TRUE; +} + +/** + * ecore_init_credit_pool - initialize credit pool internals. + * + * @p: + * @base: Base entry in the CAM to use. + * @credit: pool size. + * + * If base is negative no CAM entries handling will be performed. + * If credit is negative pool operations will always succeed (unlimited pool). + * + */ +void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, + int base, int credit) +{ + /* Zero the object first */ + ECORE_MEMSET(p, 0, sizeof(*p)); + + /* Set the table to all 1s */ + ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); + + /* Init a pool as full */ + ECORE_ATOMIC_SET(&p->credit, credit); + + /* The total poll size */ + p->pool_sz = credit; + + p->base_pool_offset = base; + + /* Commit the change */ + ECORE_SMP_MB(); + + p->check = ecore_credit_pool_check; + + /* if pool credit is negative - disable the checks */ + if (credit >= 0) { + p->put = ecore_credit_pool_put; + p->get = ecore_credit_pool_get; + p->put_entry = ecore_credit_pool_put_entry; + p->get_entry = ecore_credit_pool_get_entry; + } else { + p->put = ecore_credit_pool_always_TRUE; + p->get = ecore_credit_pool_always_TRUE; + p->put_entry = ecore_credit_pool_put_entry_always_TRUE; + p->get_entry = ecore_credit_pool_get_entry_always_TRUE; + } + + /* If base is negative - disable entries handling */ + if (base < 0) { + p->put_entry = ecore_credit_pool_put_entry_always_TRUE; + p->get_entry = ecore_credit_pool_get_entry_always_TRUE; + } +} + +void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, + uint8_t func_id, uint8_t func_num) +{ + +#define ECORE_CAM_SIZE_EMUL 5 + + int cam_sz; + + if (CHIP_IS_E1H(sc)) { + /* CAM credit is equally divided between all active functions + * on the PORT!. + */ + if (func_num > 0) { + if (!CHIP_REV_IS_SLOW(sc)) + cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num)); + else + cam_sz = ECORE_CAM_SIZE_EMUL; + ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + ecore_init_credit_pool(p, 0, 0); + } + + } else { + + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + if (!CHIP_REV_IS_SLOW(sc)) + cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + else + cam_sz = ECORE_CAM_SIZE_EMUL; + + /* No need for CAM entries handling for 57712 and + * newer. + */ + ecore_init_credit_pool(p, -1, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + ecore_init_credit_pool(p, 0, 0); + } + } +} + +void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, + uint8_t func_id, uint8_t func_num) +{ + if (CHIP_IS_E1x(sc)) { + /* There is no VLAN credit in HW on 57711 only + * MAC / MAC-VLAN can be set + */ + ecore_init_credit_pool(p, 0, -1); + } else { + /* CAM credit is equally divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + int credit = MAX_VLAN_CREDIT_E2 / func_num; + ecore_init_credit_pool(p, func_id * credit, credit); + } else + /* this should never happen! Block VLAN operations. */ + ecore_init_credit_pool(p, 0, 0); + } +} + +/****************** RSS Configuration ******************/ + +/** + * ecore_setup_rss - configure RSS + * + * @sc: device handle + * @p: rss configuration + * + * sends on UPDATE ramrod for that matter. + */ +static int ecore_setup_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p) +{ + struct ecore_rss_config_obj *o = p->rss_obj; + struct ecore_raw_obj *r = &o->raw; + struct eth_rss_update_ramrod_data *data = + (struct eth_rss_update_ramrod_data *)(r->rdata); + uint8_t rss_mode = 0; + int rc; + + ECORE_MEMSET(data, 0, sizeof(*data)); + + ECORE_MSG(sc, "Configuring RSS"); + + /* Set an echo field */ + data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (r->state << ECORE_SWCID_SHIFT)); + + /* RSS mode */ + if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_DISABLED; + else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_REGULAR; + + data->rss_mode = rss_mode; + + ECORE_MSG(sc, "rss_mode=%d", rss_mode); + + /* RSS capabilities */ + if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + /* Hashing mask */ + data->rss_result_mask = p->rss_result_mask; + + /* RSS engine ID */ + data->rss_engine_id = o->engine_id; + + ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id); + + /* Indirection table */ + ECORE_MEMCPY(data->indirection_table, p->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + + /* Remember the last configuration */ + ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + + /* RSS keys */ + if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { + ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE); + + if (rc < 0) + return rc; + + return ECORE_PENDING; +} + +int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p) +{ + int rc; + struct ecore_rss_config_obj *o = p->rss_obj; + struct ecore_raw_obj *r = &o->raw; + + /* Do nothing if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + return ECORE_SUCCESS; + + r->set_pending(r); + + rc = o->config_rss(sc, p); + if (rc < 0) { + r->clear_pending(r); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = r->wait_comp(sc, r); + + return rc; +} + +void ecore_init_rss_config_obj(struct bnx2x_softc *sc __rte_unused, + struct ecore_rss_config_obj *rss_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + uint8_t engine_id, + void *rdata, ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type) +{ + ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type); + + rss_obj->engine_id = engine_id; + rss_obj->config_rss = ecore_setup_rss; +} + +/********************** Queue state object ***********************************/ + +/** + * ecore_queue_state_change - perform Queue state change transition + * + * @sc: device handle + * @params: parameters to perform the transition + * + * returns 0 in case of successfully completed transition, negative error + * code in case of failure, positive (EBUSY) value if there is a completion + * to that is still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous commands). + * + */ +int ecore_queue_state_change(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + int rc, pending_bit; + unsigned long *pending = &o->pending; + + /* Check that the requested transition is legal */ + rc = o->check_transition(sc, o, params); + if (rc) { + PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d", + rc); + return ECORE_INVAL; + } + + /* Set "pending" bit */ + ECORE_MSG(sc, "pending bit was=%lx", o->pending); + pending_bit = o->set_pending(o, params); + ECORE_MSG(sc, "pending bit now=%lx", o->pending); + + /* Don't send a command if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) + o->complete_cmd(sc, o, pending_bit); + else { + /* Send a ramrod */ + rc = o->send_cmd(sc, params); + if (rc) { + o->next_state = ECORE_Q_STATE_MAX; + ECORE_CLEAR_BIT(pending_bit, pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(sc, o, pending_bit); + if (rc) + return rc; + + return ECORE_SUCCESS; + } + } + + return ECORE_RET_PENDING(pending_bit, pending); +} + +static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, + struct ecore_queue_state_params *params) +{ + enum ecore_queue_cmd cmd = params->cmd, bit; + + /* ACTIVATE and DEACTIVATE commands are implemented on top of + * UPDATE command. + */ + if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE)) + bit = ECORE_Q_CMD_UPDATE; + else + bit = cmd; + + ECORE_SET_BIT(bit, &obj->pending); + return bit; +} + +static int ecore_queue_wait_comp(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd) +{ + return ecore_state_wait(sc, cmd, &o->pending); +} + +/** + * ecore_queue_comp_cmd - complete the state change command. + * + * @sc: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { + PMD_DRV_LOG(ERR, sc, + "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d", + cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state, + cur_pending, o->next_state); + return ECORE_INVAL; + } + + if (o->next_tx_only >= o->max_cos) + /* >= because tx only must always be smaller than cos since the + * primary connection supports COS 0 + */ + PMD_DRV_LOG(ERR, sc, + "illegal value for next tx_only: %d. max cos was %d", + o->next_tx_only, o->max_cos); + + ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d", + cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); + + if (o->next_tx_only) /* print num tx-only if any exist */ + ECORE_MSG(sc, "primary cid %d: num tx-only cons %d", + o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); + + o->state = o->next_state; + o->num_tx_only = o->next_tx_only; + o->next_state = ECORE_Q_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + ECORE_CLEAR_BIT(cmd, &o->pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params + *cmd_params, + struct client_init_ramrod_data *data) +{ + struct ecore_queue_setup_params *params = &cmd_params->params.setup; + + /* Rx data */ + + /* IPv6 TPA supported for E2 and above only */ + data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, + ¶ms->flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV6; +} + +static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_general_setup_params + *params, struct client_init_general_data + *gen_data, unsigned long *flags) +{ + gen_data->client_id = o->cl_id; + + if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { + gen_data->statistics_counter_id = params->stat_id; + gen_data->statistics_en_flg = 1; + gen_data->statistics_zero_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); + } else + gen_data->statistics_counter_id = + DISABLE_STATISTIC_COUNTER_ID_VALUE; + + gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags); + gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags); + gen_data->sp_client_id = params->spcl_id; + gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu); + gen_data->func_id = o->func_id; + + gen_data->cos = params->cos; + + gen_data->traffic_type = + ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? + LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + + ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d", + gen_data->activate_flg, gen_data->cos, + gen_data->statistics_en_flg); +} + +static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params, + struct client_init_tx_data *tx_data, + unsigned long *flags) +{ + tx_data->enforce_security_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); + tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan); + tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); + tx_data->tx_switching_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); + tx_data->anti_spoofing_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); + tx_data->force_default_pri_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); + tx_data->refuse_outband_vlan_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); + tx_data->tunnel_non_lso_pcsum_location = + ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; + + tx_data->tx_status_block_id = params->fw_sb_id; + tx_data->tx_sb_index_number = params->sb_cq_index; + tx_data->tss_leading_client_id = params->tss_leading_cl_id; + + tx_data->tx_bd_page_base.lo = + ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); + tx_data->tx_bd_page_base.hi = + ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); + + /* Don't configure any Tx switching mode during queue SETUP */ + tx_data->state = 0; +} + +static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params, + struct client_init_rx_data *rx_data) +{ + /* flow control data */ + rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo); + rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi); + rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo); + rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi); + rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo); + rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi); + rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map); +} + +static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params, + struct client_init_rx_data *rx_data, + unsigned long *flags) +{ + rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * + CLIENT_INIT_RX_DATA_TPA_MODE; + rx_data->vmqueue_mode_en_flg = 0; + + rx_data->extra_data_over_sgl_en_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); + rx_data->cache_line_alignment_log_size = params->cache_line_log; + rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); + rx_data->client_qzone_id = params->cl_qzone_id; + rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz); + + /* Always start in DROP_ALL mode */ + rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | + CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); + + /* We don't set drop flags */ + rx_data->drop_ip_cs_err_flg = 0; + rx_data->drop_tcp_cs_err_flg = 0; + rx_data->drop_ttl0_flg = 0; + rx_data->drop_udp_cs_err_flg = 0; + rx_data->inner_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); + rx_data->outer_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); + rx_data->status_block_id = params->fw_sb_id; + rx_data->rx_sb_index_number = params->sb_cq_index; + rx_data->max_tpa_queues = params->max_tpa_queues; + rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz); + rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); + rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); + rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map)); + rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map)); + rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, + flags); + + if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { + rx_data->approx_mcast_engine_id = params->mcast_engine_id; + rx_data->is_approx_mcast = 1; + } + + rx_data->rss_engine_id = params->rss_engine_id; + + /* silent vlan removal */ + rx_data->silent_vlan_removal_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); + rx_data->silent_vlan_value = + ECORE_CPU_TO_LE16(params->silent_removal_value); + rx_data->silent_vlan_mask = + ECORE_CPU_TO_LE16(params->silent_removal_mask); +} + +/* initialize the general, tx and rx parts of a queue object */ +static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params + *cmd_params, + struct client_init_ramrod_data *data) +{ + ecore_q_fill_init_general_data(sc, cmd_params->q_obj, + &cmd_params->params.setup.gen_params, + &data->general, + &cmd_params->params.setup.flags); + + ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params, + &data->tx, &cmd_params->params.setup.flags); + + ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params, + &data->rx, &cmd_params->params.setup.flags); + + ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params, + &data->rx); +} + +/* initialize the general and tx parts of a tx-only queue object */ +static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params + *cmd_params, + struct tx_queue_init_ramrod_data *data) +{ + ecore_q_fill_init_general_data(sc, cmd_params->q_obj, + &cmd_params->params.tx_only.gen_params, + &data->general, + &cmd_params->params.tx_only.flags); + + ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params, + &data->tx, &cmd_params->params.tx_only.flags); + + ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x", + cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); +} + +/** + * ecore_q_init - init HW/FW queue + * + * @sc: device handle + * @params: + * + * HW/FW initial Queue configuration: + * - HC: Rx and Tx + * - CDU context validation + * + */ +static int ecore_q_init(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct ecore_queue_init_params *init = ¶ms->params.init; + uint16_t hc_usec; + uint8_t cos; + + /* Tx HC configuration */ + if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && + ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { + hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; + + ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id, + init->tx.sb_cq_index, + !ECORE_TEST_BIT + (ECORE_Q_FLG_HC_EN, + &init->tx.flags), hc_usec); + } + + /* Rx HC configuration */ + if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && + ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { + hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; + + ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id, + init->rx.sb_cq_index, + !ECORE_TEST_BIT + (ECORE_Q_FLG_HC_EN, + &init->rx.flags), hc_usec); + } + + /* Set CDU context validation values */ + for (cos = 0; cos < o->max_cos; cos++) { + ECORE_MSG(sc, "setting context validation. cid %d, cos %d", + o->cids[cos], cos); + ECORE_MSG(sc, "context pointer %p", init->cxts[cos]); + ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]); + } + + /* As no ramrod is sent, complete the command immediately */ + o->complete_cmd(sc, o, ECORE_Q_CMD_INIT); + + ECORE_MMIOWB(); + ECORE_SMP_MB(); + + return ECORE_SUCCESS; +} + +static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_data_cmn(sc, params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, + ramrod, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_setup_e2(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_data_cmn(sc, params, rdata); + ecore_q_fill_setup_data_e2(params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, + ramrod, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct tx_queue_init_ramrod_data *rdata = + (struct tx_queue_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; + struct ecore_queue_setup_tx_only_params *tx_only_params = + ¶ms->params.tx_only; + uint8_t cid_index = tx_only_params->cid_index; + + if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) + ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; + ECORE_MSG(sc, "sending forward tx-only ramrod"); + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d", + tx_only_params->gen_params.cos, + tx_only_params->gen_params.spcl_id); + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_tx_only(sc, params, rdata); + + ECORE_MSG + (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d", + o->cids[cid_index], rdata->general.client_id, + rdata->general.sp_client_id, rdata->general.cos); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, ramrod, o->cids[cid_index], + data_mapping, ETH_CONNECTION_TYPE); +} + +static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj, + struct ecore_queue_update_params *params, + struct client_update_ramrod_data *data) +{ + /* Client ID of the client to update */ + data->client_id = obj->cl_id; + + /* Function ID of the client to update */ + data->func_id = obj->func_id; + + /* Default VLAN value */ + data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan); + + /* Inner VLAN stripping */ + data->inner_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); + data->inner_vlan_removal_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Outer VLAN stripping */ + data->outer_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); + data->outer_vlan_removal_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Drop packets that have source MAC that doesn't belong to this + * Queue. + */ + data->anti_spoofing_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); + data->anti_spoofing_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, + ¶ms->update_flags); + + /* Activate/Deactivate */ + data->activate_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); + data->activate_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); + + /* Enable default VLAN */ + data->default_vlan_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); + data->default_vlan_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, + ¶ms->update_flags); + + /* silent vlan removal */ + data->silent_vlan_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ¶ms->update_flags); + data->silent_vlan_removal_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, + ¶ms->update_flags); + data->silent_vlan_value = + ECORE_CPU_TO_LE16(params->silent_removal_value); + data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask); + + /* tx switching */ + data->tx_switching_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); + data->tx_switching_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, + ¶ms->update_flags); +} + +static int ecore_q_send_update(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_update_ramrod_data *rdata = + (struct client_update_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_queue_update_params *update_params = + ¶ms->params.update; + uint8_t cid_index = update_params->cid_index; + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_update_data(o, update_params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + o->cids[cid_index], data_mapping, + ETH_CONNECTION_TYPE); +} + +/** + * ecore_q_send_deactivate - send DEACTIVATE command + * + * @sc: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_update_params *update = ¶ms->params.update; + + ECORE_MEMSET(update, 0, sizeof(*update)); + + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return ecore_q_send_update(sc, params); +} + +/** + * ecore_q_send_activate - send ACTIVATE command + * + * @sc: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static int ecore_q_send_activate(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_update_params *update = ¶ms->params.update; + + ECORE_MEMSET(update, 0, sizeof(*update)); + + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return ecore_q_send_update(sc, params); +} + +static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct + ecore_queue_state_params *params) +{ + /* Not implemented yet. */ + return -1; +} + +static int ecore_q_send_halt(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + + /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ + ecore_dma_addr_t data_mapping = 0; + data_mapping = (ecore_dma_addr_t) o->cl_id; + + return ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_HALT, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_cfc_del(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + uint8_t cid_idx = params->params.cfc_del.cid_index; + + if (cid_idx >= o->max_cos) { + PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_idx); + return ECORE_INVAL; + } + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL, + o->cids[cid_idx], 0, NONE_CONNECTION_TYPE); +} + +static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + uint8_t cid_index = params->params.terminate.cid_index; + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE, + o->cids[cid_index], 0, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_empty(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY, + o->cids[ECORE_PRIMARY_CID_INDEX], 0, + ETH_CONNECTION_TYPE); +} + +static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_INIT: + return ecore_q_init(sc, params); + case ECORE_Q_CMD_SETUP_TX_ONLY: + return ecore_q_send_setup_tx_only(sc, params); + case ECORE_Q_CMD_DEACTIVATE: + return ecore_q_send_deactivate(sc, params); + case ECORE_Q_CMD_ACTIVATE: + return ecore_q_send_activate(sc, params); + case ECORE_Q_CMD_UPDATE: + return ecore_q_send_update(sc, params); + case ECORE_Q_CMD_UPDATE_TPA: + return ecore_q_send_update_tpa(sc, params); + case ECORE_Q_CMD_HALT: + return ecore_q_send_halt(sc, params); + case ECORE_Q_CMD_CFC_DEL: + return ecore_q_send_cfc_del(sc, params); + case ECORE_Q_CMD_TERMINATE: + return ecore_q_send_terminate(sc, params); + case ECORE_Q_CMD_EMPTY: + return ecore_q_send_empty(sc, params); + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_SETUP: + return ecore_q_send_setup_e1x(sc, params); + case ECORE_Q_CMD_INIT: + case ECORE_Q_CMD_SETUP_TX_ONLY: + case ECORE_Q_CMD_DEACTIVATE: + case ECORE_Q_CMD_ACTIVATE: + case ECORE_Q_CMD_UPDATE: + case ECORE_Q_CMD_UPDATE_TPA: + case ECORE_Q_CMD_HALT: + case ECORE_Q_CMD_CFC_DEL: + case ECORE_Q_CMD_TERMINATE: + case ECORE_Q_CMD_EMPTY: + return ecore_queue_send_cmd_cmn(sc, params); + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_SETUP: + return ecore_q_send_setup_e2(sc, params); + case ECORE_Q_CMD_INIT: + case ECORE_Q_CMD_SETUP_TX_ONLY: + case ECORE_Q_CMD_DEACTIVATE: + case ECORE_Q_CMD_ACTIVATE: + case ECORE_Q_CMD_UPDATE: + case ECORE_Q_CMD_UPDATE_TPA: + case ECORE_Q_CMD_HALT: + case ECORE_Q_CMD_CFC_DEL: + case ECORE_Q_CMD_TERMINATE: + case ECORE_Q_CMD_EMPTY: + return ecore_queue_send_cmd_cmn(sc, params); + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +/** + * ecore_queue_chk_transition - check state machine of a regular Queue + * + * @sc: device handle + * @o: + * @params: + * + * (not Forwarding) + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params) +{ + enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; + enum ecore_queue_cmd cmd = params->cmd; + struct ecore_queue_update_params *update_params = + ¶ms->params.update; + uint8_t next_tx_only = o->num_tx_only; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = ECORE_Q_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) { + PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx", + o->pending); + return ECORE_BUSY; + } + + switch (state) { + case ECORE_Q_STATE_RESET: + if (cmd == ECORE_Q_CMD_INIT) + next_state = ECORE_Q_STATE_INITIALIZED; + + break; + case ECORE_Q_STATE_INITIALIZED: + if (cmd == ECORE_Q_CMD_SETUP) { + if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, + ¶ms->params.setup.flags)) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_ACTIVE: + if (cmd == ECORE_Q_CMD_DEACTIVATE) + next_state = ECORE_Q_STATE_INACTIVE; + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_ACTIVE; + + else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + next_state = ECORE_Q_STATE_MULTI_COS; + next_tx_only = 1; + } + + else if (cmd == ECORE_Q_CMD_HALT) + next_state = ECORE_Q_STATE_STOPPED; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = ECORE_Q_STATE_INACTIVE; + else + next_state = ECORE_Q_STATE_ACTIVE; + } + + break; + case ECORE_Q_STATE_MULTI_COS: + if (cmd == ECORE_Q_CMD_TERMINATE) + next_state = ECORE_Q_STATE_MCOS_TERMINATED; + + else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + next_state = ECORE_Q_STATE_MULTI_COS; + next_tx_only = o->num_tx_only + 1; + } + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_MULTI_COS; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = ECORE_Q_STATE_INACTIVE; + else + next_state = ECORE_Q_STATE_MULTI_COS; + } + + break; + case ECORE_Q_STATE_MCOS_TERMINATED: + if (cmd == ECORE_Q_CMD_CFC_DEL) { + next_tx_only = o->num_tx_only - 1; + if (next_tx_only == 0) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_MULTI_COS; + } + + break; + case ECORE_Q_STATE_INACTIVE: + if (cmd == ECORE_Q_CMD_ACTIVATE) + next_state = ECORE_Q_STATE_ACTIVE; + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_INACTIVE; + + else if (cmd == ECORE_Q_CMD_HALT) + next_state = ECORE_Q_STATE_STOPPED; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) { + if (o->num_tx_only == 0) + next_state = ECORE_Q_STATE_ACTIVE; + else /* tx only queues exist for this queue */ + next_state = ECORE_Q_STATE_MULTI_COS; + } else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_STOPPED: + if (cmd == ECORE_Q_CMD_TERMINATE) + next_state = ECORE_Q_STATE_TERMINATED; + + break; + case ECORE_Q_STATE_TERMINATED: + if (cmd == ECORE_Q_CMD_CFC_DEL) + next_state = ECORE_Q_STATE_RESET; + + break; + default: + PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_Q_STATE_MAX) { + ECORE_MSG(sc, "Good state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + o->next_tx_only = next_tx_only; + return ECORE_SUCCESS; + } + + ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd); + + return ECORE_INVAL; +} + +/** + * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. + * + * @sc: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params + *params) +{ + enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; + enum ecore_queue_cmd cmd = params->cmd; + + switch (state) { + case ECORE_Q_STATE_RESET: + if (cmd == ECORE_Q_CMD_INIT) + next_state = ECORE_Q_STATE_INITIALIZED; + + break; + case ECORE_Q_STATE_INITIALIZED: + if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, + ¶ms->params.tx_only.flags)) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_ACTIVE: + case ECORE_Q_STATE_INACTIVE: + if (cmd == ECORE_Q_CMD_CFC_DEL) + next_state = ECORE_Q_STATE_RESET; + + break; + default: + PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_Q_STATE_MAX) { + ECORE_MSG(sc, "Good state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + return ECORE_SUCCESS; + } + + ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd); + return ECORE_INVAL; +} + +void ecore_init_queue_obj(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *obj, + uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt, + uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, unsigned long type) +{ + ECORE_MEMSET(obj, 0, sizeof(*obj)); + + /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ + ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); + + rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); + obj->max_cos = cid_cnt; + obj->cl_id = cl_id; + obj->func_id = func_id; + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->type = type; + obj->next_state = ECORE_Q_STATE_MAX; + + if (CHIP_IS_E1x(sc)) + obj->send_cmd = ecore_queue_send_cmd_e1x; + else + obj->send_cmd = ecore_queue_send_cmd_e2; + + if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) + obj->check_transition = ecore_queue_chk_fwd_transition; + else + obj->check_transition = ecore_queue_chk_transition; + + obj->complete_cmd = ecore_queue_comp_cmd; + obj->wait_comp = ecore_queue_wait_comp; + obj->set_pending = ecore_queue_set_pending; +} + +/********************** Function state object *********************************/ +enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o) +{ + /* in the middle of transaction - return INVALID state */ + if (o->pending) + return ECORE_F_STATE_MAX; + + /* unsure the order of reading of o->pending and o->state + * o->pending should be read first + */ + rmb(); + + return o->state; +} + +static int ecore_func_wait_comp(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + return ecore_state_wait(sc, cmd, &o->pending); +} + +/** + * ecore_func_state_change_comp - complete the state machine transition + * + * @sc: device handle + * @o: + * @cmd: + * + * Called on state change transition. Completes the state + * machine transition only - no HW interaction. + */ +static int +ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { + PMD_DRV_LOG(ERR, sc, + "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d", + cmd, ECORE_FUNC_ID(sc), o->state, cur_pending, + o->next_state); + return ECORE_INVAL; + } + + ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d", + cmd, ECORE_FUNC_ID(sc), o->next_state); + + o->state = o->next_state; + o->next_state = ECORE_F_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + ECORE_CLEAR_BIT(cmd, &o->pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +/** + * ecore_func_comp_cmd - complete the state change command + * + * @sc: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int ecore_func_comp_cmd(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + /* Complete the state machine part first, check if it's a + * legal completion. + */ + int rc = ecore_func_state_change_comp(sc, o, cmd); + return rc; +} + +/** + * ecore_func_chk_transition - perform function state machine transition + * + * @sc: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_func_sp_obj *o, + struct ecore_func_state_params *params) +{ + enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; + enum ecore_func_cmd cmd = params->cmd; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = ECORE_F_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return ECORE_BUSY; + + switch (state) { + case ECORE_F_STATE_RESET: + if (cmd == ECORE_F_CMD_HW_INIT) + next_state = ECORE_F_STATE_INITIALIZED; + + break; + case ECORE_F_STATE_INITIALIZED: + if (cmd == ECORE_F_CMD_START) + next_state = ECORE_F_STATE_STARTED; + + else if (cmd == ECORE_F_CMD_HW_RESET) + next_state = ECORE_F_STATE_RESET; + + break; + case ECORE_F_STATE_STARTED: + if (cmd == ECORE_F_CMD_STOP) + next_state = ECORE_F_STATE_INITIALIZED; + /* afex ramrods can be sent only in started mode, and only + * if not pending for function_stop ramrod completion + * for these events - next state remained STARTED. + */ + else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + /* Switch_update ramrod can be sent in either started or + * tx_stopped state, and it doesn't change the state. + */ + else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + else if (cmd == ECORE_F_CMD_TX_STOP) + next_state = ECORE_F_STATE_TX_STOPPED; + + break; + case ECORE_F_STATE_TX_STOPPED: + if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_TX_STOPPED; + + else if (cmd == ECORE_F_CMD_TX_START) + next_state = ECORE_F_STATE_STARTED; + + break; + default: + PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_F_STATE_MAX) { + ECORE_MSG(sc, "Good function state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + return ECORE_SUCCESS; + } + + ECORE_MSG(sc, + "Bad function state transition request: %d %d", state, cmd); + + return ECORE_INVAL; +} + +/** + * ecore_func_init_func - performs HW init at function stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only + * HW blocks. + */ +static int ecore_func_init_func(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + return drv->init_hw_func(sc); +} + +/** + * ecore_func_init_port - performs HW init at port stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and + * FUNCTION-only HW blocks. + * + */ +static int ecore_func_init_port(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_port(sc); + if (rc) + return rc; + + return ecore_func_init_func(sc, drv); +} + +/** + * ecore_func_init_cmn_chip - performs HW init at chip-common stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, + * PORT-only and FUNCTION-only HW blocks. + */ +static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + int rc = drv->init_hw_cmn_chip(sc); + if (rc) + return rc; + + return ecore_func_init_port(sc, drv); +} + +/** + * ecore_func_init_cmn - performs HW init at common stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, + * PORT-only and FUNCTION-only HW blocks. + */ +static int ecore_func_init_cmn(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn(sc); + if (rc) + return rc; + + return ecore_func_init_port(sc, drv); +} + +static int ecore_func_hw_init(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + uint32_t load_code = params->params.hw_init.load_phase; + struct ecore_func_sp_obj *o = params->f_obj; + const struct ecore_func_sp_drv_ops *drv = o->drv; + int rc = 0; + + ECORE_MSG(sc, "function %d load_code %x", + ECORE_ABS_FUNC_ID(sc), load_code); + + /* Prepare FW */ + rc = drv->init_fw(sc); + if (rc) { + PMD_DRV_LOG(ERR, sc, "Error loading firmware"); + goto init_err; + } + + /* Handle the beginning of COMMON_XXX pases separately... */ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + rc = ecore_func_init_cmn_chip(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = ecore_func_init_cmn(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = ecore_func_init_port(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = ecore_func_init_func(sc, drv); + if (rc) + goto init_err; + + break; + default: + PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP", + load_code); + rc = ECORE_INVAL; + } + +init_err: + /* In case of success, complete the command immediately: no ramrods + * have been sent. + */ + if (!rc) + o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT); + + return rc; +} + +/** + * ecore_func_reset_func - reset HW at function stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only + * FUNCTION-only HW blocks. + */ +static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + drv->reset_hw_func(sc); +} + +/** + * ecore_func_reset_port - reser HW at port stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset + * FUNCTION-only and PORT-only HW blocks. + * + * !!!IMPORTANT!!! + * + * It's important to call reset_port before reset_func() as the last thing + * reset_func does is pf_disable() thus disabling PGLUE_B, which + * makes impossible any DMAE transactions. + */ +static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + drv->reset_hw_port(sc); + ecore_func_reset_func(sc, drv); +} + +/** + * ecore_func_reset_cmn - reser HW at common stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and + * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, + * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. + */ +static void ecore_func_reset_cmn(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + ecore_func_reset_port(sc, drv); + drv->reset_hw_cmn(sc); +} + +static int ecore_func_hw_reset(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + uint32_t reset_phase = params->params.hw_reset.reset_phase; + struct ecore_func_sp_obj *o = params->f_obj; + const struct ecore_func_sp_drv_ops *drv = o->drv; + + ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc), + reset_phase); + + switch (reset_phase) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + ecore_func_reset_cmn(sc, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_PORT: + ecore_func_reset_port(sc, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + ecore_func_reset_func(sc, drv); + break; + default: + PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP", + reset_phase); + break; + } + + /* Complete the command immediately: no ramrods have been sent. */ + o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET); + + return ECORE_SUCCESS; +} + +static int ecore_func_send_start(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_start_data *rdata = + (struct function_start_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_start_params *start_params = ¶ms->params.start; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->function_mode = (uint8_t) start_params->mf_mode; + rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag); + rdata->path_id = ECORE_PATH_ID(sc); + rdata->network_cos_mode = start_params->network_cos_mode; + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_switch_update_params *switch_update_params = + ¶ms->params.switch_update; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes)) { + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = + ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + } + + rdata->echo = SWITCH_UPDATE; + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->afex_rdata; + ecore_dma_addr_t data_mapping = o->afex_rdata_mapping; + struct ecore_func_afex_update_params *afex_update_params = + ¶ms->params.afex_update; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_id_change_flg = 1; + rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id); + rdata->afex_default_vlan_change_flg = 1; + rdata->afex_default_vlan = + ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan); + rdata->allowed_priorities_change_flg = 1; + rdata->allowed_priorities = afex_update_params->allowed_priorities; + rdata->echo = AFEX_UPDATE; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x", + rdata->vif_id, + rdata->afex_default_vlan, rdata->allowed_priorities); + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static +inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct afex_vif_list_ramrod_data *rdata = + (struct afex_vif_list_ramrod_data *)o->afex_rdata; + struct ecore_func_afex_viflists_params *afex_vif_params = + ¶ms->params.afex_viflists; + uint64_t *p_rdata = (uint64_t *) rdata; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_list_index = + ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index); + rdata->func_bit_map = afex_vif_params->func_bit_map; + rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; + rdata->func_to_clear = afex_vif_params->func_to_clear; + + /* send in echo type of sub command */ + rdata->echo = afex_vif_params->afex_vif_list_command; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + ECORE_MSG + (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x", + rdata->afex_vif_list_command, rdata->vif_list_index, + rdata->func_bit_map, rdata->func_to_clear); + + /* this ramrod sends data directly and not through DMA mapping */ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, + *p_rdata, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct + ecore_func_state_params *params) +{ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, + NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct + ecore_func_state_params *params) +{ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, + NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct flow_control_configuration *rdata = + (struct flow_control_configuration *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_tx_start_params *tx_start_params = + ¶ms->params.tx_start; + uint32_t i; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + rdata->dcb_enabled = tx_start_params->dcb_enabled; + rdata->dcb_version = tx_start_params->dcb_version; + rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; + + for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) + rdata->traffic_type_to_priority_cos[i] = + tx_start_params->traffic_type_to_priority_cos[i]; + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_cmd(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + switch (params->cmd) { + case ECORE_F_CMD_HW_INIT: + return ecore_func_hw_init(sc, params); + case ECORE_F_CMD_START: + return ecore_func_send_start(sc, params); + case ECORE_F_CMD_STOP: + return ecore_func_send_stop(sc, params); + case ECORE_F_CMD_HW_RESET: + return ecore_func_hw_reset(sc, params); + case ECORE_F_CMD_AFEX_UPDATE: + return ecore_func_send_afex_update(sc, params); + case ECORE_F_CMD_AFEX_VIFLISTS: + return ecore_func_send_afex_viflists(sc, params); + case ECORE_F_CMD_TX_STOP: + return ecore_func_send_tx_stop(sc, params); + case ECORE_F_CMD_TX_START: + return ecore_func_send_tx_start(sc, params); + case ECORE_F_CMD_SWITCH_UPDATE: + return ecore_func_send_switch_update(sc, params); + default: + PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc, + struct ecore_func_sp_obj *obj, + void *rdata, ecore_dma_addr_t rdata_mapping, + void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, + struct ecore_func_sp_drv_ops *drv_iface) +{ + ECORE_MEMSET(obj, 0, sizeof(*obj)); + + ECORE_MUTEX_INIT(&obj->one_pending_mutex); + + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->afex_rdata = afex_rdata; + obj->afex_rdata_mapping = afex_rdata_mapping; + obj->send_cmd = ecore_func_send_cmd; + obj->check_transition = ecore_func_chk_transition; + obj->complete_cmd = ecore_func_comp_cmd; + obj->wait_comp = ecore_func_wait_comp; + obj->drv = drv_iface; +} + +/** + * ecore_func_state_change - perform Function state change transition + * + * @sc: device handle + * @params: parameters to perform the transaction + * + * returns 0 in case of successfully completed transition, + * negative error code in case of failure, positive + * (EBUSY) value if there is a completion to that is + * still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous + * commands). + */ +int ecore_func_state_change(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + int rc, cnt = 300; + enum ecore_func_cmd cmd = params->cmd; + unsigned long *pending = &o->pending; + + ECORE_MUTEX_LOCK(&o->one_pending_mutex); + + /* Check that the requested transition is legal */ + rc = o->check_transition(sc, o, params); + if ((rc == ECORE_BUSY) && + (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { + while ((rc == ECORE_BUSY) && (--cnt > 0)) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + ECORE_MSLEEP(10); + ECORE_MUTEX_LOCK(&o->one_pending_mutex); + rc = o->check_transition(sc, o, params); + } + if (rc == ECORE_BUSY) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + PMD_DRV_LOG(ERR, sc, + "timeout waiting for previous ramrod completion"); + return rc; + } + } else if (rc) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + return rc; + } + + /* Set "pending" bit */ + ECORE_SET_BIT(cmd, pending); + + /* Don't send a command if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + ecore_func_state_change_comp(sc, o, cmd); + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + } else { + /* Send a ramrod */ + rc = o->send_cmd(sc, params); + + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + + if (rc) { + o->next_state = ECORE_F_STATE_MAX; + ECORE_CLEAR_BIT(cmd, pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(sc, o, cmd); + if (rc) + return rc; + + return ECORE_SUCCESS; + } + } + + return ECORE_RET_PENDING(cmd, pending); +} + +/****************************************************************************** + * Description: + * Calculates crc 8 on a word value: polynomial 0-1-2-8 + * Code was translated from Verilog. + * Return: + *****************************************************************************/ +uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc) +{ + uint8_t D[32]; + uint8_t NewCRC[8]; + uint8_t C[8]; + uint8_t crc_res; + uint8_t i; + + /* split the data into 31 bits */ + for (i = 0; i < 32; i++) { + D[i] = (uint8_t) (data & 1); + data = data >> 1; + } + + /* split the crc into 8 bits */ + for (i = 0; i < 8; i++) { + C[i] = crc & 1; + crc = crc >> 1; + } + + NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ + D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ + C[6] ^ C[7]; + NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ + D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ + D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6]; + NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ + D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ + C[0] ^ C[1] ^ C[4] ^ C[5]; + NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ + D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ + C[1] ^ C[2] ^ C[5] ^ C[6]; + NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ + D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ + C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; + NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ + D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ + C[3] ^ C[4] ^ C[7]; + NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ + D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5]; + NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ + D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6]; + + crc_res = 0; + for (i = 0; i < 8; i++) { + crc_res |= (NewCRC[i] << i); + } + + return crc_res; +} + +uint32_t +ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic) +{ + int i; + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? magic : 0); + } + return crc; +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h new file mode 100644 index 000000000..cc1db377a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h @@ -0,0 +1,1977 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ECORE_SP_H +#define ECORE_SP_H + +#include + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN +#endif +#undef __BIG_ENDIAN +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN RTE_BIG_ENDIAN +#endif +#undef __LITTLE_ENDIAN +#endif + +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" + +struct bnx2x_softc; +typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */ +typedef volatile int ecore_atomic_t; + + +#define ETH_ALEN RTE_ETHER_ADDR_LEN /* 6 */ + +#define ECORE_SWCID_SHIFT 17 +#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1) + +#define ECORE_MC_HASH_SIZE 8 +#define ECORE_MC_HASH_OFFSET(sc, i) \ + (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4) + +#define ECORE_MAX_MULTICAST 64 +#define ECORE_MAX_EMUL_MULTI 1 + +#define IRO sc->iro_array + +typedef rte_spinlock_t ECORE_MUTEX; +#define ECORE_MUTEX_INIT(_mutex) rte_spinlock_init(_mutex) +#define ECORE_MUTEX_LOCK(_mutex) rte_spinlock_lock(_mutex) +#define ECORE_MUTEX_UNLOCK(_mutex) rte_spinlock_unlock(_mutex) + +typedef rte_spinlock_t ECORE_MUTEX_SPIN; +#define ECORE_SPIN_LOCK_INIT(_spin, _sc) rte_spinlock_init(_spin) +#define ECORE_SPIN_LOCK_BH(_spin) rte_spinlock_lock(_spin) /* bh = bottom-half */ +#define ECORE_SPIN_UNLOCK_BH(_spin) rte_spinlock_unlock(_spin) /* bh = bottom-half */ + +#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb() +#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb() +#define ECORE_SMP_MB() mb() +#define ECORE_SMP_RMB() rmb() +#define ECORE_SMP_WMB() wmb() +#define ECORE_MMIOWB() wmb() + +#define ECORE_SET_BIT_NA(bit, var) (*var |= (1 << bit)) +#define ECORE_CLEAR_BIT_NA(bit, var) (*var &= ~(1 << bit)) + +#define ECORE_TEST_BIT(bit, var) bnx2x_test_bit(bit, var) +#define ECORE_SET_BIT(bit, var) bnx2x_set_bit(bit, var) +#define ECORE_CLEAR_BIT(bit, var) bnx2x_clear_bit(bit, var) +#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bnx2x_test_and_clear_bit(bit, var) + +#define atomic_load_acq_int (int)* +#define atomic_store_rel_int(a, v) (*a = v) +#define atomic_cmpset_acq_int(a, o, n) ((*a = (o & (n)) | (n)) ^ o) + +#define atomic_load_acq_long (long)* +#define atomic_store_rel_long(a, v) (*a = v) +#define atomic_set_acq_long(a, v) (*a |= v) +#define atomic_clear_acq_long(a, v) (*a &= ~v) +#define atomic_cmpset_acq_long(a, o, n) ((*a = (o & (n)) | (n)) ^ o) +#define atomic_subtract_acq_long(a, v) (*a -= v) +#define atomic_add_acq_long(a, v) (*a += v) + +#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a) +#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v) +#define ECORE_ATOMIC_CMPXCHG(a, o, n) bnx2x_cmpxchg((volatile int *)a, o, n) + +#define ECORE_RET_PENDING(pending_bit, pending) \ + (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) + +#define ECORE_SET_FLAG(value, mask, flag) \ + do { \ + (value) &= ~(mask); \ + (value) |= ((flag) << (mask##_SHIFT)); \ + } while (0) + +#define ECORE_GET_FLAG(value, mask) \ + (((value) &= (mask)) >> (mask##_SHIFT)) + +#define ECORE_MIGHT_SLEEP() + +#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id) + +#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s) +#define ECORE_MEMCPY(_a, _b, _s) rte_memcpy(_a, _b, _s) +#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s) + +#define ECORE_CPU_TO_LE16(x) htole16(x) +#define ECORE_CPU_TO_LE32(x) htole32(x) + +#define ECORE_WAIT(_s, _t) DELAY(1000) +#define ECORE_MSLEEP(_t) DELAY((_t) * 1000) + +#define ECORE_LIKELY(x) likely(x) +#define ECORE_UNLIKELY(x) unlikely(x) + +#define ECORE_ZALLOC(_size, _flags, _sc) \ + rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE) + +#define ECORE_CALLOC(_len, _size, _flags, _sc) \ + rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE) + +#define ECORE_FREE(_s, _buf, _size) \ + rte_free(_buf) + +#define SC_ILT(sc) ((sc)->ilt) +#define ILOG2(x) bnx2x_ilog2(x) + +#define ECORE_ILT_ZALLOC(x, y, size) \ + do { \ + x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \ + if (x) { \ + if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \ + size, (struct bnx2x_dma *)x, \ + "ILT", RTE_CACHE_LINE_SIZE) != 0) { \ + rte_free(x); \ + x = NULL; \ + *(y) = 0; \ + } else { \ + *y = ((struct bnx2x_dma *)x)->paddr; \ + } \ + } \ + } while (0) + +#define ECORE_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + bnx2x_dma_free((struct bnx2x_dma *)x); \ + rte_free(x); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define ECORE_IS_VALID_ETHER_ADDR(_mac) true + +#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE +#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE +#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE + +#define ECORE_SET_CTX_VALIDATION bnx2x_set_ctx_validation + +#define ECORE_UPDATE_COALESCE_SB_INDEX bnx2x_update_coalesce_sb_index + +#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a)) + +#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN + +#define ECORE_PATH_ID SC_PATH +#define ECORE_PORT_ID SC_PORT +#define ECORE_FUNC_ID SC_FUNC +#define ECORE_ABS_FUNC_ID SC_ABS_FUNC + +#define CRCPOLY_LE 0xedb88320 +uint32_t ecore_calc_crc32(uint32_t crc, uint8_t const *p, + uint32_t len, uint32_t magic); + +uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc); + + +static inline uint32_t +ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len) +{ + return ecore_calc_crc32(seed, mac, len, CRCPOLY_LE); +} + +#define ecore_sp_post(_sc, _a, _b, _c, _d) \ + bnx2x_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d) + +#define ECORE_DBG_BREAK_IF(exp) \ + do { \ + if (unlikely(exp)) { \ + rte_panic("ECORE"); \ + } \ + } while (0) + +#define ECORE_BUG() \ + do { \ + rte_panic("BUG (%s:%d)", __FILE__, __LINE__); \ + } while(0); + +#define ECORE_BUG_ON(exp) \ + do { \ + if (likely(exp)) { \ + rte_panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \ + } \ + } while (0) + + +#define ECORE_MSG(sc, m, ...) \ + PMD_DRV_LOG(DEBUG, sc, m, ##__VA_ARGS__) + +typedef struct _ecore_list_entry_t +{ + struct _ecore_list_entry_t *next, *prev; +} ecore_list_entry_t; + +typedef struct ecore_list_t +{ + ecore_list_entry_t *head, *tail; + unsigned long cnt; +} ecore_list_t; + +/* initialize the list */ +#define ECORE_LIST_INIT(_list) \ + do { \ + (_list)->head = NULL; \ + (_list)->tail = NULL; \ + (_list)->cnt = 0; \ + } while (0) + +/* return true if the element is the last on the list */ +#define ECORE_LIST_IS_LAST(_elem, _list) \ + (_elem == (_list)->tail) + +/* return true if the list is empty */ +#define ECORE_LIST_IS_EMPTY(_list) \ + ((_list)->cnt == 0) + +/* return the first element */ +#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \ + (cast *)((_list)->head) + +/* return the next element */ +#define ECORE_LIST_NEXT(_elem, _link, cast) \ + (cast *)((&((_elem)->_link))->next) + +/* push an element on the head of the list */ +#define ECORE_LIST_PUSH_HEAD(_elem, _list) \ + do { \ + (_elem)->prev = (ecore_list_entry_t *)0; \ + (_elem)->next = (_list)->head; \ + if ((_list)->tail == (ecore_list_entry_t *)0) { \ + (_list)->tail = (_elem); \ + } else { \ + (_list)->head->prev = (_elem); \ + } \ + (_list)->head = (_elem); \ + (_list)->cnt++; \ + } while (0) + +/* push an element on the tail of the list */ +#define ECORE_LIST_PUSH_TAIL(_elem, _list) \ + do { \ + (_elem)->next = (ecore_list_entry_t *)0; \ + (_elem)->prev = (_list)->tail; \ + if ((_list)->tail) { \ + (_list)->tail->next = (_elem); \ + } else { \ + (_list)->head = (_elem); \ + } \ + (_list)->tail = (_elem); \ + (_list)->cnt++; \ + } while (0) + +/* push list1 on the head of list2 and return with list1 as empty */ +#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \ + do { \ + (_list1)->tail->next = (_list2)->head; \ + if ((_list2)->head) { \ + (_list2)->head->prev = (_list1)->tail; \ + } else { \ + (_list2)->tail = (_list1)->tail; \ + } \ + (_list2)->head = (_list1)->head; \ + (_list2)->cnt += (_list1)->cnt; \ + (_list1)->head = NULL; \ + (_list1)->tail = NULL; \ + (_list1)->cnt = 0; \ + } while (0) + +/* remove an element from the list */ +#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \ + do { \ + if ((_list)->head == (_elem)) { \ + if ((_list)->head) { \ + (_list)->head = (_list)->head->next; \ + if ((_list)->head) { \ + (_list)->head->prev = (ecore_list_entry_t *)0; \ + } else { \ + (_list)->tail = (ecore_list_entry_t *)0; \ + } \ + (_list)->cnt--; \ + } \ + } else if ((_list)->tail == (_elem)) { \ + if ((_list)->tail) { \ + (_list)->tail = (_list)->tail->prev; \ + if ((_list)->tail) { \ + (_list)->tail->next = (ecore_list_entry_t *)0; \ + } else { \ + (_list)->head = (ecore_list_entry_t *)0; \ + } \ + (_list)->cnt--; \ + } \ + } else { \ + (_elem)->prev->next = (_elem)->next; \ + (_elem)->next->prev = (_elem)->prev; \ + (_list)->cnt--; \ + } \ + } while (0) + +/* walk the list */ +#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \ + for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \ + pos; \ + pos = ECORE_LIST_NEXT(pos, _link, cast)) + +/* walk the list (safely) */ +#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \ + for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \ + n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \ + pos != NULL; \ + pos = (cast *)n, \ + n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL) + + +/* Manipulate a bit vector defined as an array of uint64_t */ + +/* Number of bits in one sge_mask array element */ +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1) + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((uint64_t)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((uint64_t)0x1 << (bit)))); \ + } while (0) + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + +/* + * Creates a bitmask of all ones in less significant bits. + * idx - index of the most significant bit in the created mask + */ +#define BIT_VEC64_ONES_MASK(idx) \ + (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0)) + +/* fill in a MAC address the way the FW likes it */ +static inline void +ecore_set_fw_mac_addr(uint16_t *fw_hi, + uint16_t *fw_mid, + uint16_t *fw_lo, + uint8_t *mac) +{ + ((uint8_t *)fw_hi)[0] = mac[1]; + ((uint8_t *)fw_hi)[1] = mac[0]; + ((uint8_t *)fw_mid)[0] = mac[3]; + ((uint8_t *)fw_mid)[1] = mac[2]; + ((uint8_t *)fw_lo)[0] = mac[5]; + ((uint8_t *)fw_lo)[1] = mac[4]; +} + + +enum ecore_status_t { + ECORE_EXISTS = -6, + ECORE_IO = -5, + ECORE_TIMEOUT = -4, + ECORE_INVAL = -3, + ECORE_BUSY = -2, + ECORE_NOMEM = -1, + ECORE_SUCCESS = 0, + /* PENDING is not an error and should be positive */ + ECORE_PENDING = 1, +}; + +enum { + SWITCH_UPDATE, + AFEX_UPDATE, +}; + +struct bnx2x_softc; +struct eth_context; + +/* Bits representing general command's configuration */ +enum { + RAMROD_TX, + RAMROD_RX, + /* Wait until all pending commands complete */ + RAMROD_COMP_WAIT, + /* Don't send a ramrod, only update a registry */ + RAMROD_DRV_CLR_ONLY, + /* Configure HW according to the current object state */ + RAMROD_RESTORE, + /* Execute the next command now */ + RAMROD_EXEC, + /* Don't add a new command and continue execution of posponed + * commands. If not set a new command will be added to the + * pending commands list. + */ + RAMROD_CONT, + /* If there is another pending ramrod, wait until it finishes and + * re-try to submit this one. This flag can be set only in sleepable + * context, and should not be set from the context that completes the + * ramrods as deadlock will occur. + */ + RAMROD_RETRY, +}; + +typedef enum { + ECORE_OBJ_TYPE_RX, + ECORE_OBJ_TYPE_TX, + ECORE_OBJ_TYPE_RX_TX, +} ecore_obj_type; + +/* Public slow path states */ +enum { + ECORE_FILTER_MAC_PENDING, + ECORE_FILTER_VLAN_PENDING, + ECORE_FILTER_VLAN_MAC_PENDING, + ECORE_FILTER_RX_MODE_PENDING, + ECORE_FILTER_RX_MODE_SCHED, + ECORE_FILTER_ISCSI_ETH_START_SCHED, + ECORE_FILTER_ISCSI_ETH_STOP_SCHED, + ECORE_FILTER_FCOE_ETH_START_SCHED, + ECORE_FILTER_FCOE_ETH_STOP_SCHED, +#ifdef ECORE_CHAR_DEV + ECORE_FILTER_BYPASS_RX_MODE_PENDING, + ECORE_FILTER_BYPASS_MAC_PENDING, + ECORE_FILTER_BYPASS_RSS_CONF_PENDING, +#endif + ECORE_FILTER_MCAST_PENDING, + ECORE_FILTER_MCAST_SCHED, + ECORE_FILTER_RSS_CONF_PENDING, + ECORE_AFEX_FCOE_Q_UPDATE_PENDING, + ECORE_AFEX_PENDING_VIFSET_MCP_ACK, + ECORE_FILTER_VXLAN_PENDING, + ECORE_FILTER_PVLAN_PENDING +}; + +struct ecore_raw_obj { + uint8_t func_id; + + /* Queue params */ + uint8_t cl_id; + uint32_t cid; + + /* Ramrod data buffer params */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Ramrod state params */ + int state; /* "ramrod is pending" state bit */ + unsigned long *pstate; /* pointer to state buffer */ + + ecore_obj_type obj_type; + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_raw_obj *o); + + bool (*check_pending)(struct ecore_raw_obj *o); + void (*clear_pending)(struct ecore_raw_obj *o); + void (*set_pending)(struct ecore_raw_obj *o); +}; + +/************************* VLAN-MAC commands related parameters ***************/ +struct ecore_mac_ramrod_data { + uint8_t mac[ETH_ALEN]; + uint8_t is_inner_mac; +}; + +struct ecore_vlan_ramrod_data { + uint16_t vlan; +}; + +struct ecore_vlan_mac_ramrod_data { + uint8_t mac[ETH_ALEN]; + uint8_t is_inner_mac; + uint16_t vlan; +}; + +struct ecore_vxlan_fltr_ramrod_data { + uint8_t innermac[ETH_ALEN]; + uint32_t vni; +}; + +union ecore_classification_ramrod_data { + struct ecore_mac_ramrod_data mac; + struct ecore_vlan_ramrod_data vlan; + struct ecore_vlan_mac_ramrod_data vlan_mac; + struct ecore_vxlan_fltr_ramrod_data vxlan_fltr; +}; + +/* VLAN_MAC commands */ +enum ecore_vlan_mac_cmd { + ECORE_VLAN_MAC_ADD, + ECORE_VLAN_MAC_DEL, + ECORE_VLAN_MAC_MOVE, +}; + +struct ecore_vlan_mac_data { + /* Requested command: ECORE_VLAN_MAC_XX */ + enum ecore_vlan_mac_cmd cmd; + /* used to contain the data related vlan_mac_flags bits from + * ramrod parameters. + */ + unsigned long vlan_mac_flags; + + /* Needed for MOVE command */ + struct ecore_vlan_mac_obj *target_obj; + + union ecore_classification_ramrod_data u; +}; + +/*************************** Exe Queue obj ************************************/ +union ecore_exe_queue_cmd_data { + struct ecore_vlan_mac_data vlan_mac; + + struct { + /* TODO */ + } mcast; +}; + +struct ecore_exeq_elem { + ecore_list_entry_t link; + + /* Length of this element in the exe_chunk. */ + int cmd_len; + + union ecore_exe_queue_cmd_data cmd_data; +}; + +union ecore_qable_obj; + +union ecore_exeq_comp_elem { + union event_ring_elem *elem; +}; + +struct ecore_exe_queue_obj; + +typedef int (*exe_q_validate)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); + +typedef int (*exe_q_remove)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); + +/* Return positive if entry was optimized, 0 - if not, negative + * in case of an error. + */ +typedef int (*exe_q_optimize)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); +typedef int (*exe_q_execute)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + ecore_list_t *exe_chunk, + unsigned long *ramrod_flags); +typedef struct ecore_exeq_elem * + (*exe_q_get)(struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem); + +struct ecore_exe_queue_obj { + /* Commands pending for an execution. */ + ecore_list_t exe_queue; + + /* Commands pending for an completion. */ + ecore_list_t pending_comp; + + ECORE_MUTEX_SPIN lock; + + /* Maximum length of commands' list for one execution */ + int exe_chunk_len; + + union ecore_qable_obj *owner; + + /****** Virtual functions ******/ + /** + * Called before commands execution for commands that are really + * going to be executed (after 'optimize'). + * + * Must run under exe_queue->lock + */ + exe_q_validate validate; + + /** + * Called before removing pending commands, cleaning allocated + * resources (e.g., credits from validate) + */ + exe_q_remove remove; + + /** + * This will try to cancel the current pending commands list + * considering the new command. + * + * Returns the number of optimized commands or a negative error code + * + * Must run under exe_queue->lock + */ + exe_q_optimize optimize; + + /** + * Run the next commands chunk (owner specific). + */ + exe_q_execute execute; + + /** + * Return the exe_queue element containing the specific command + * if any. Otherwise return NULL. + */ + exe_q_get get; +}; +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* + * Element in the VLAN_MAC registry list having all current configured + * rules. + */ +struct ecore_vlan_mac_registry_elem { + ecore_list_entry_t link; + + /* Used to store the cam offset used for the mac/vlan/vlan-mac. + * Relevant for 57710 and 57711 only. VLANs and MACs share the + * same CAM for these chips. + */ + int cam_offset; + + /* Needed for DEL and RESTORE flows */ + unsigned long vlan_mac_flags; + + union ecore_classification_ramrod_data u; +}; + +/* Bits representing VLAN_MAC commands specific flags */ +enum { + ECORE_UC_LIST_MAC, + ECORE_ETH_MAC, + ECORE_ISCSI_ETH_MAC, + ECORE_NETQ_ETH_MAC, + ECORE_VLAN, + ECORE_DONT_CONSUME_CAM_CREDIT, + ECORE_DONT_CONSUME_CAM_CREDIT_DEST, +}; +/* When looking for matching filters, some flags are not interesting */ +#define ECORE_VLAN_MAC_CMP_MASK (1 << ECORE_UC_LIST_MAC | \ + 1 << ECORE_ETH_MAC | \ + 1 << ECORE_ISCSI_ETH_MAC | \ + 1 << ECORE_NETQ_ETH_MAC | \ + 1 << ECORE_VLAN) +#define ECORE_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & ECORE_VLAN_MAC_CMP_MASK) + +struct ecore_vlan_mac_ramrod_params { + /* Object to run the command from */ + struct ecore_vlan_mac_obj *vlan_mac_obj; + + /* General command flags: COMP_WAIT, etc. */ + unsigned long ramrod_flags; + + /* Command specific configuration request */ + struct ecore_vlan_mac_data user_req; +}; + +struct ecore_vlan_mac_obj { + struct ecore_raw_obj raw; + + /* Bookkeeping list: will prevent the addition of already existing + * entries. + */ + ecore_list_t head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + uint8_t head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ + + /* Execution queue interface instance */ + struct ecore_exe_queue_obj exe_queue; + + /* MACs credit pool */ + struct ecore_credit_pool_obj *macs_pool; + + /* VLANs credit pool */ + struct ecore_credit_pool_obj *vlans_pool; + + /* RAMROD command to be used */ + int ramrod_cmd; + + /* copy first n elements onto preallocated buffer + * + * @param n number of elements to get + * @param buf buffer preallocated by caller into which elements + * will be copied. Note elements are 4-byte aligned + * so buffer size must be able to accommodate the + * aligned elements. + * + * @return number of copied bytes + */ + + int (*get_n_elements)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, int n, uint8_t *base, + uint8_t stride, uint8_t size); + + /** + * Checks if ADD-ramrod with the given params may be performed. + * + * @return zero if the element may be added + */ + + int (*check_add)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + struct ecore_vlan_mac_registry_elem * + (*check_del)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + bool (*check_move)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *src_o, + struct ecore_vlan_mac_obj *dst_o, + union ecore_classification_ramrod_data *data); + + /** + * Update the relevant credit object(s) (consume/return + * correspondingly). + */ + bool (*get_credit)(struct ecore_vlan_mac_obj *o); + bool (*put_credit)(struct ecore_vlan_mac_obj *o); + bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset); + bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset); + + /** + * Configures one rule in the ramrod data buffer. + */ + void (*set_one_rule)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, int rule_idx, + int cam_offset); + + /** + * Delete all configured elements having the given + * vlan_mac_flags specification. Assumes no pending for + * execution commands. Will schedule all all currently + * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags + * specification for deletion and will use the given + * ramrod_flags for the last DEL operation. + * + * @param sc + * @param o + * @param ramrod_flags RAMROD_XX flags + * + * @return 0 if the last operation has completed successfully + * and there are no more elements left, positive value + * if there are pending for completion commands, + * negative value in case of failure. + */ + int (*delete_all)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags); + + /** + * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously + * configured elements list. + * + * @param sc + * @param p Command parameters (RAMROD_COMP_WAIT bit in + * ramrod_flags is only taken into an account) + * @param ppos a pointer to the cookie that should be given back in the + * next call to make function handle the next element. If + * *ppos is set to NULL it will restart the iterator. + * If returned *ppos == NULL this means that the last + * element has been handled. + * + * @return int + */ + int (*restore)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_registry_elem **ppos); + + /** + * Should be called on a completion arrival. + * + * @param sc + * @param o + * @param cqe Completion element we are handling + * @param ramrod_flags if RAMROD_CONT is set the next bulk of + * pending commands will be executed. + * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE + * may also be set if needed. + * + * @return 0 if there are neither pending nor waiting for + * completion commands. Positive value if there are + * pending for execution or for completion commands. + * Negative value in case of an error (including an + * error in the cqe). + */ + int (*complete)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags); + + /** + * Wait for completion of all commands. Don't schedule new ones, + * just wait. It assumes that the completion code will schedule + * for new commands. + */ + int (*wait)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o); +}; + +enum { + ECORE_LLH_CAM_ISCSI_ETH_LINE = 0, + ECORE_LLH_CAM_ETH_LINE, + ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + +void ecore_set_mac_in_nig(struct bnx2x_softc *sc, + bool add, unsigned char *dev_addr, int index); + +/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ + +/* RX_MODE ramrod special flags: set in rx_mode_flags field in + * a ecore_rx_mode_ramrod_params. + */ +enum { + ECORE_RX_MODE_FCOE_ETH, + ECORE_RX_MODE_ISCSI_ETH, +}; + +enum { + ECORE_ACCEPT_UNICAST, + ECORE_ACCEPT_MULTICAST, + ECORE_ACCEPT_ALL_UNICAST, + ECORE_ACCEPT_ALL_MULTICAST, + ECORE_ACCEPT_BROADCAST, + ECORE_ACCEPT_UNMATCHED, + ECORE_ACCEPT_ANY_VLAN +}; + +struct ecore_rx_mode_ramrod_params { + struct ecore_rx_mode_obj *rx_mode_obj; + unsigned long *pstate; + int state; + uint8_t cl_id; + uint32_t cid; + uint8_t func_id; + unsigned long ramrod_flags; + unsigned long rx_mode_flags; + + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + * a tstorm_eth_mac_filter_config (e1x). + */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Rx mode settings */ + unsigned long rx_accept_flags; + + /* internal switching settings */ + unsigned long tx_accept_flags; +}; + +struct ecore_rx_mode_obj { + int (*config_rx_mode)(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); +}; + +/********************** Set multicast group ***********************************/ + +struct ecore_mcast_list_elem { + ecore_list_entry_t link; + uint8_t *mac; +}; + +union ecore_mcast_config_data { + uint8_t *mac; + uint8_t bin; /* used in a RESTORE/SET flows */ +}; + +struct ecore_mcast_ramrod_params { + struct ecore_mcast_obj *mcast_obj; + + /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ + unsigned long ramrod_flags; + + ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */ + /** TODO: + * - rename it to macs_num. + * - Add a new command type for handling pending commands + * (remove "zero semantics"). + * + * Length of mcast_list. If zero and ADD_CONT command - post + * pending commands. + */ + int mcast_list_len; +}; + +enum ecore_mcast_cmd { + ECORE_MCAST_CMD_ADD, + ECORE_MCAST_CMD_CONT, + ECORE_MCAST_CMD_DEL, + ECORE_MCAST_CMD_RESTORE, + + /* Following this, multicast configuration should equal to approx + * the set of MACs provided [i.e., remove all else]. + * The two sub-commands are used internally to decide whether a given + * bin is to be added or removed + */ + ECORE_MCAST_CMD_SET, + ECORE_MCAST_CMD_SET_ADD, + ECORE_MCAST_CMD_SET_DEL, +}; + +struct ecore_mcast_obj { + struct ecore_raw_obj raw; + + union { + struct { + #define ECORE_MCAST_BINS_NUM 256 + #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64) + uint64_t vec[ECORE_MCAST_VEC_SZ]; + + /** Number of BINs to clear. Should be updated + * immediately when a command arrives in order to + * properly create DEL commands. + */ + int num_bins_set; + } aprox_match; + + struct { + ecore_list_t macs; + int num_macs_set; + } exact_match; + } registry; + + /* Pending commands */ + ecore_list_t pending_cmds_head; + + /* A state that is set in raw.pstate, when there are pending commands */ + int sched_state; + + /* Maximal number of mcast MACs configured in one command */ + int max_cmd_len; + + /* Total number of currently pending MACs to configure: both + * in the pending commands list and in the current command. + */ + int total_pending_num; + + uint8_t engine_id; + + /** + * @param cmd command to execute (ECORE_MCAST_CMD_X, see above) + */ + int (*config_mcast)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + /** + * Fills the ramrod data during the RESTORE flow. + * + * @param sc + * @param o + * @param start_idx Registry index to start from + * @param rdata_idx Index in the ramrod data to start from + * + * @return -1 if we handled the whole registry or index of the last + * handled registry element. + */ + int (*hdl_restore)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o, + int start_bin, int *rdata_idx); + + int (*enqueue_cmd)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + void (*set_one_rule)(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, int idx, + union ecore_mcast_config_data *cfg_data, + enum ecore_mcast_cmd cmd); + + /** Checks if there are more mcast MACs to be set or a previous + * command is still pending. + */ + bool (*check_pending)(struct ecore_mcast_obj *o); + + /** + * Set/Clear/Check SCHEDULED state of the object + */ + void (*set_sched)(struct ecore_mcast_obj *o); + void (*clear_sched)(struct ecore_mcast_obj *o); + bool (*check_sched)(struct ecore_mcast_obj *o); + + /* Wait until all pending commands complete */ + int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o); + + /** + * Handle the internal object counters needed for proper + * commands handling. Checks that the provided parameters are + * feasible. + */ + int (*validate)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + /** + * Restore the values of internal counters in case of a failure. + */ + void (*revert)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + int old_num_bins, + enum ecore_mcast_cmd cmd); + + int (*get_registry_size)(struct ecore_mcast_obj *o); + void (*set_registry_size)(struct ecore_mcast_obj *o, int n); +}; + +/*************************** Credit handling **********************************/ +struct ecore_credit_pool_obj { + + /* Current amount of credit in the pool */ + ecore_atomic_t credit; + + /* Maximum allowed credit. put() will check against it. */ + int pool_sz; + + /* Allocate a pool table statically. + * + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) + * + * The set bit in the table will mean that the entry is available. + */ +#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) + uint64_t pool_mirror[ECORE_POOL_VEC_SIZE]; + + /* Base pool offset (initialized differently */ + int base_pool_offset; + + /** + * Get the next free pool entry. + * + * @return true if there was a free entry in the pool + */ + bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry); + + /** + * Return the entry back to the pool. + * + * @return true if entry is legal and has been successfully + * returned to the pool. + */ + bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry); + + /** + * Get the requested amount of credit from the pool. + * + * @param cnt Amount of requested credit + * @return true if the operation is successful + */ + bool (*get)(struct ecore_credit_pool_obj *o, int cnt); + + /** + * Returns the credit to the pool. + * + * @param cnt Amount of credit to return + * @return true if the operation is successful + */ + bool (*put)(struct ecore_credit_pool_obj *o, int cnt); + + /** + * Reads the current amount of credit. + */ + int (*check)(struct ecore_credit_pool_obj *o); +}; + +/*************************** RSS configuration ********************************/ +enum { + /* RSS_MODE bits are mutually exclusive */ + ECORE_RSS_MODE_DISABLED, + ECORE_RSS_MODE_REGULAR, + + ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ + + ECORE_RSS_IPV4, + ECORE_RSS_IPV4_TCP, + ECORE_RSS_IPV4_UDP, + ECORE_RSS_IPV6, + ECORE_RSS_IPV6_TCP, + ECORE_RSS_IPV6_UDP, + + ECORE_RSS_IPV4_VXLAN, + ECORE_RSS_IPV6_VXLAN, + ECORE_RSS_TUNN_INNER_HDRS, +}; + +struct ecore_config_rss_params { + struct ecore_rss_config_obj *rss_obj; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* ECORE_RSS_X bits */ + unsigned long rss_flags; + + /* Number hash bits to take into an account */ + uint8_t rss_result_mask; + + /* Indirection table */ + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* RSS hash values */ + uint32_t rss_key[10]; + + /* valid only if ECORE_RSS_UPDATE_TOE is set */ + uint16_t toe_rss_bitmap; +}; + +struct ecore_rss_config_obj { + struct ecore_raw_obj raw; + + /* RSS engine to use */ + uint8_t engine_id; + + /* Last configured indirection table */ + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* flags for enabling 4-tupple hash on UDP */ + uint8_t udp_rss_v4; + uint8_t udp_rss_v6; + + int (*config_rss)(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p); +}; + +/*********************** Queue state update ***********************************/ + +/* UPDATE command options */ +enum { + ECORE_Q_UPDATE_IN_VLAN_REM, + ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, + ECORE_Q_UPDATE_OUT_VLAN_REM, + ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, + ECORE_Q_UPDATE_ANTI_SPOOF, + ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, + ECORE_Q_UPDATE_ACTIVATE, + ECORE_Q_UPDATE_ACTIVATE_CHNG, + ECORE_Q_UPDATE_DEF_VLAN_EN, + ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, + ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ECORE_Q_UPDATE_SILENT_VLAN_REM, + ECORE_Q_UPDATE_TX_SWITCHING_CHNG, + ECORE_Q_UPDATE_TX_SWITCHING, + ECORE_Q_UPDATE_PTP_PKTS_CHNG, + ECORE_Q_UPDATE_PTP_PKTS, +}; + +/* Allowed Queue states */ +enum ecore_q_state { + ECORE_Q_STATE_RESET, + ECORE_Q_STATE_INITIALIZED, + ECORE_Q_STATE_ACTIVE, + ECORE_Q_STATE_MULTI_COS, + ECORE_Q_STATE_MCOS_TERMINATED, + ECORE_Q_STATE_INACTIVE, + ECORE_Q_STATE_STOPPED, + ECORE_Q_STATE_TERMINATED, + ECORE_Q_STATE_FLRED, + ECORE_Q_STATE_MAX, +}; + +/* Allowed Queue states */ +enum ecore_q_logical_state { + ECORE_Q_LOGICAL_STATE_ACTIVE, + ECORE_Q_LOGICAL_STATE_STOPPED, +}; + +/* Allowed commands */ +enum ecore_queue_cmd { + ECORE_Q_CMD_INIT, + ECORE_Q_CMD_SETUP, + ECORE_Q_CMD_SETUP_TX_ONLY, + ECORE_Q_CMD_DEACTIVATE, + ECORE_Q_CMD_ACTIVATE, + ECORE_Q_CMD_UPDATE, + ECORE_Q_CMD_UPDATE_TPA, + ECORE_Q_CMD_HALT, + ECORE_Q_CMD_CFC_DEL, + ECORE_Q_CMD_TERMINATE, + ECORE_Q_CMD_EMPTY, + ECORE_Q_CMD_MAX, +}; + +/* queue SETUP + INIT flags */ +enum { + ECORE_Q_FLG_TPA, + ECORE_Q_FLG_TPA_IPV6, + ECORE_Q_FLG_TPA_GRO, + ECORE_Q_FLG_STATS, + ECORE_Q_FLG_ZERO_STATS, + ECORE_Q_FLG_ACTIVE, + ECORE_Q_FLG_OV, + ECORE_Q_FLG_VLAN, + ECORE_Q_FLG_COS, + ECORE_Q_FLG_HC, + ECORE_Q_FLG_HC_EN, + ECORE_Q_FLG_DHC, + ECORE_Q_FLG_OOO, + ECORE_Q_FLG_FCOE, + ECORE_Q_FLG_LEADING_RSS, + ECORE_Q_FLG_MCAST, + ECORE_Q_FLG_DEF_VLAN, + ECORE_Q_FLG_TX_SWITCH, + ECORE_Q_FLG_TX_SEC, + ECORE_Q_FLG_ANTI_SPOOF, + ECORE_Q_FLG_SILENT_VLAN_REM, + ECORE_Q_FLG_FORCE_DEFAULT_PRI, + ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, + ECORE_Q_FLG_PCSUM_ON_PKT, + ECORE_Q_FLG_TUN_INC_INNER_IP_ID, + ECORE_Q_FLG_TPA_VLAN_DIS, +}; + +/* Queue type options: queue type may be a combination of below. */ +enum ecore_q_type { + ECORE_Q_TYPE_FWD, + /** TODO: Consider moving both these flags into the init() + * ramrod params. + */ + ECORE_Q_TYPE_HAS_RX, + ECORE_Q_TYPE_HAS_TX, +}; + +#define ECORE_PRIMARY_CID_INDEX 0 +#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */ +#define ECORE_MULTI_TX_COS_E2_E3A0 2 +#define ECORE_MULTI_TX_COS_E3B0 3 +#define ECORE_MULTI_TX_COS 3 /* Maximum possible */ +#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN) +/* DMAE channel to be used by FW for timesync workaroun. A driver that sends + * timesync-related ramrods must not use this DMAE command ID. + */ +#define FW_DMAE_CMD_ID 6 + +struct ecore_queue_init_params { + struct { + unsigned long flags; + uint16_t hc_rate; + uint8_t fw_sb_id; + uint8_t sb_cq_index; + } tx; + + struct { + unsigned long flags; + uint16_t hc_rate; + uint8_t fw_sb_id; + uint8_t sb_cq_index; + } rx; + + /* CID context in the host memory */ + struct eth_context *cxts[ECORE_MULTI_TX_COS]; + + /* maximum number of cos supported by hardware */ + uint8_t max_cos; +}; + +struct ecore_queue_terminate_params { + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_cfc_del_params { + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_update_params { + unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */ + uint16_t def_vlan; + uint16_t silent_removal_value; + uint16_t silent_removal_mask; +/* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_update_tpa_params { + ecore_dma_addr_t sge_map; + uint8_t update_ipv4; + uint8_t update_ipv6; + uint8_t max_tpa_queues; + uint8_t max_sges_pkt; + uint8_t complete_on_both_clients; + uint8_t dont_verify_thr; + uint8_t tpa_mode; + uint8_t _pad; + + uint16_t sge_buff_sz; + uint16_t max_agg_sz; + + uint16_t sge_pause_thr_low; + uint16_t sge_pause_thr_high; + + uint8_t disable_tpa_over_vlan; +}; + +struct rxq_pause_params { + uint16_t bd_th_lo; + uint16_t bd_th_hi; + uint16_t rcq_th_lo; + uint16_t rcq_th_hi; + uint16_t sge_th_lo; /* valid if ECORE_Q_FLG_TPA */ + uint16_t sge_th_hi; /* valid if ECORE_Q_FLG_TPA */ + uint16_t pri_map; +}; + +/* general */ +struct ecore_general_setup_params { + /* valid if ECORE_Q_FLG_STATS */ + uint8_t stat_id; + + uint8_t spcl_id; + uint16_t mtu; + uint8_t cos; + + uint8_t fp_hsi; +}; + +struct ecore_rxq_setup_params { + /* dma */ + ecore_dma_addr_t dscr_map; + ecore_dma_addr_t sge_map; + ecore_dma_addr_t rcq_map; + ecore_dma_addr_t rcq_np_map; + + uint16_t drop_flags; + uint16_t buf_sz; + uint8_t fw_sb_id; + uint8_t cl_qzone_id; + + /* valid if ECORE_Q_FLG_TPA */ + uint16_t tpa_agg_sz; + uint16_t sge_buf_sz; + uint8_t max_sges_pkt; + uint8_t max_tpa_queues; + uint8_t rss_engine_id; + + /* valid if ECORE_Q_FLG_MCAST */ + uint8_t mcast_engine_id; + + uint8_t cache_line_log; + + uint8_t sb_cq_index; + + /* valid if ECORE_Q_FLG_SILENT_VLAN_REM */ + uint16_t silent_removal_value; + uint16_t silent_removal_mask; +}; + +struct ecore_txq_setup_params { + /* dma */ + ecore_dma_addr_t dscr_map; + + uint8_t fw_sb_id; + uint8_t sb_cq_index; + uint8_t cos; /* valid if ECORE_Q_FLG_COS */ + uint16_t traffic_type; + /* equals to the leading rss client id, used for TX classification*/ + uint8_t tss_leading_cl_id; + + /* valid if ECORE_Q_FLG_DEF_VLAN */ + uint16_t default_vlan; +}; + +struct ecore_queue_setup_params { + struct ecore_general_setup_params gen_params; + struct ecore_txq_setup_params txq_params; + struct ecore_rxq_setup_params rxq_params; + struct rxq_pause_params pause_params; + unsigned long flags; +}; + +struct ecore_queue_setup_tx_only_params { + struct ecore_general_setup_params gen_params; + struct ecore_txq_setup_params txq_params; + unsigned long flags; + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_state_params { + struct ecore_queue_sp_obj *q_obj; + + /* Current command */ + enum ecore_queue_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct ecore_queue_update_params update; + struct ecore_queue_update_tpa_params update_tpa; + struct ecore_queue_setup_params setup; + struct ecore_queue_init_params init; + struct ecore_queue_setup_tx_only_params tx_only; + struct ecore_queue_terminate_params terminate; + struct ecore_queue_cfc_del_params cfc_del; + } params; +}; + +struct ecore_viflist_params { + uint8_t echo_res; + uint8_t func_bit_map_res; +}; + +struct ecore_queue_sp_obj { + uint32_t cids[ECORE_MULTI_TX_COS]; + uint8_t cl_id; + uint8_t func_id; + + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only + * connection. + * + * Therefore max_cos is also a number of valid entries in the cids + * array. + */ + uint8_t max_cos; + uint8_t num_tx_only, next_tx_only; + + enum ecore_q_state state, next_state; + + /* bits from enum ecore_q_type */ + unsigned long type; + + /* ECORE_Q_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params); + + /** + * Sets the pending bit according to the requested transition. + */ + int (*set_pending)(struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd); + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd); +}; + +/********************** Function state update *********************************/ + +/* UPDATE command options */ +enum { + ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + ECORE_F_UPDATE_TX_SWITCH_SUSPEND, + ECORE_F_UPDATE_SD_VLAN_TAG_CHNG, + ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + ECORE_F_UPDATE_TUNNEL_CFG_CHNG, + ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, + ECORE_F_UPDATE_TUNNEL_INNER_RSS, + ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN_INNER_VNI, + ECORE_F_UPDATE_VLAN_FILTERING_PVID_CHNG, +}; + +/* Allowed Function states */ +enum ecore_func_state { + ECORE_F_STATE_RESET, + ECORE_F_STATE_INITIALIZED, + ECORE_F_STATE_STARTED, + ECORE_F_STATE_TX_STOPPED, + ECORE_F_STATE_MAX, +}; + +/* Allowed Function commands */ +enum ecore_func_cmd { + ECORE_F_CMD_HW_INIT, + ECORE_F_CMD_START, + ECORE_F_CMD_STOP, + ECORE_F_CMD_HW_RESET, + ECORE_F_CMD_AFEX_UPDATE, + ECORE_F_CMD_AFEX_VIFLISTS, + ECORE_F_CMD_TX_STOP, + ECORE_F_CMD_TX_START, + ECORE_F_CMD_SWITCH_UPDATE, + ECORE_F_CMD_SET_TIMESYNC, + ECORE_F_CMD_MAX, +}; + +struct ecore_func_hw_init_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + uint32_t load_phase; +}; + +struct ecore_func_hw_reset_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + uint32_t reset_phase; +}; + +struct ecore_func_start_params { + /* Multi Function mode: + * - Single Function + * - Switch Dependent + * - Switch Independent + */ + uint16_t mf_mode; + + /* Switch Dependent mode outer VLAN tag */ + uint16_t sd_vlan_tag; + + /* Function cos mode */ + uint8_t network_cos_mode; + + /* DMAE command id to be used for FW DMAE transactions */ + uint8_t dmae_cmd_id; + + /* UDP dest port for VXLAN */ + uint16_t vxlan_dst_port; + + /* UDP dest port for Geneve */ + uint16_t geneve_dst_port; + + /* Enable inner Rx classifications for L2GRE packets */ + uint8_t inner_clss_l2gre; + + /* Enable inner Rx classifications for L2-Geneve packets */ + uint8_t inner_clss_l2geneve; + + /* Enable inner Rx classification for vxlan packets */ + uint8_t inner_clss_vxlan; + + /* Enable RSS according to inner header */ + uint8_t inner_rss; + + /** Allows accepting of packets failing MF classification, possibly + * only matching a given ethertype + */ + uint8_t class_fail; + uint16_t class_fail_ethtype; + + /* Override priority of output packets */ + uint8_t sd_vlan_force_pri; + uint8_t sd_vlan_force_pri_val; + + /* Replace vlan's ethertype */ + uint16_t sd_vlan_eth_type; + + /* Prevent inner vlans from being added by FW */ + uint8_t no_added_tags; + + /* Inner-to-Outer vlan priority mapping */ + uint8_t c2s_pri[MAX_VLAN_PRIORITIES]; + uint8_t c2s_pri_default; + uint8_t c2s_pri_valid; + + /* TX Vlan filtering configuration */ + uint8_t tx_vlan_filtering_enable; + uint8_t tx_vlan_filtering_use_pvid; +}; + +struct ecore_func_switch_update_params { + unsigned long changes; /* ECORE_F_UPDATE_XX bits */ + uint16_t vlan; + uint16_t vlan_eth_type; + uint8_t vlan_force_prio; + uint16_t vxlan_dst_port; + uint16_t geneve_dst_port; +}; + +struct ecore_func_afex_update_params { + uint16_t vif_id; + uint16_t afex_default_vlan; + uint8_t allowed_priorities; +}; + +struct ecore_func_afex_viflists_params { + uint16_t vif_list_index; + uint8_t func_bit_map; + uint8_t afex_vif_list_command; + uint8_t func_to_clear; +}; + +struct ecore_func_tx_start_params { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + uint8_t dcb_enabled; + uint8_t dcb_version; + uint8_t dont_add_pri_0_en; + uint8_t dcb_outer_pri[MAX_TRAFFIC_TYPES]; +}; + +struct ecore_func_set_timesync_params { + /* Reset, set or keep the current drift value */ + uint8_t drift_adjust_cmd; + /* Dec, inc or keep the current offset */ + uint8_t offset_cmd; + /* Drift value direction */ + uint8_t add_sub_drift_adjust_value; + /* Drift, period and offset values to be used according to the commands + * above. + */ + uint8_t drift_adjust_value; + uint32_t drift_adjust_period; + uint64_t offset_delta; +}; + +struct ecore_func_state_params { + struct ecore_func_sp_obj *f_obj; + + /* Current command */ + enum ecore_func_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct ecore_func_hw_init_params hw_init; + struct ecore_func_hw_reset_params hw_reset; + struct ecore_func_start_params start; + struct ecore_func_switch_update_params switch_update; + struct ecore_func_afex_update_params afex_update; + struct ecore_func_afex_viflists_params afex_viflists; + struct ecore_func_tx_start_params tx_start; + struct ecore_func_set_timesync_params set_timesync; + } params; +}; + +struct ecore_func_sp_drv_ops { + /* Init tool + runtime initialization: + * - Common Chip + * - Common (per Path) + * - Port + * - Function phases + */ + int (*init_hw_cmn_chip)(struct bnx2x_softc *sc); + int (*init_hw_cmn)(struct bnx2x_softc *sc); + int (*init_hw_port)(struct bnx2x_softc *sc); + int (*init_hw_func)(struct bnx2x_softc *sc); + + /* Reset Function HW: Common, Port, Function phases. */ + void (*reset_hw_cmn)(struct bnx2x_softc *sc); + void (*reset_hw_port)(struct bnx2x_softc *sc); + void (*reset_hw_func)(struct bnx2x_softc *sc); + + /* Init/Free GUNZIP resources */ + int (*gunzip_init)(struct bnx2x_softc *sc); + void (*gunzip_end)(struct bnx2x_softc *sc); + + /* Prepare/Release FW resources */ + int (*init_fw)(struct bnx2x_softc *sc); + void (*release_fw)(struct bnx2x_softc *sc); +}; + +struct ecore_func_sp_obj { + enum ecore_func_state state, next_state; + + /* ECORE_FUNC_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Buffer to use as a afex ramrod data and its mapping. + * This can't be same rdata as above because afex ramrod requests + * can arrive to the object in parallel to other ramrod requests. + */ + void *afex_rdata; + ecore_dma_addr_t afex_rdata_mapping; + + /* this mutex validates that when pending flag is taken, the next + * ramrod to be sent will be the one set the pending bit + */ + ECORE_MUTEX one_pending_mutex; + + /* Driver interface */ + struct ecore_func_sp_drv_ops *drv; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x_softc *sc, + struct ecore_func_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + struct ecore_func_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd); + + int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd); +}; + +/********************** Interfaces ********************************************/ +/* Queueable objects set */ +union ecore_qable_obj { + struct ecore_vlan_mac_obj vlan_mac; +}; +/************** Function state update *********/ +void ecore_init_func_obj(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *obj, + void *rdata, ecore_dma_addr_t rdata_mapping, + void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, + struct ecore_func_sp_drv_ops *drv_iface); + +int ecore_func_state_change(struct bnx2x_softc *sc, + struct ecore_func_state_params *params); + +enum ecore_func_state ecore_func_get_state(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o); +/******************* Queue State **************/ +void ecore_init_queue_obj(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids, + uint8_t cid_cnt, uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, unsigned long type); + +int ecore_queue_state_change(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params); + +int ecore_get_q_logical_state(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *obj); + +/********************* VLAN-MAC ****************/ +void ecore_init_mac_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool); + +void ecore_init_vlan_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *vlan_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *vlans_pool); + +void ecore_init_vlan_mac_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *vlan_mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool, + struct ecore_credit_pool_obj *vlans_pool); + +void ecore_init_vxlan_fltr_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *vlan_mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool, + struct ecore_credit_pool_obj *vlans_pool); + +int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +int ecore_config_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p); + +int ecore_vlan_mac_move(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_obj *dest_o); + +/********************* RX MODE ****************/ + +void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, + struct ecore_rx_mode_obj *o); + +/** + * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. + * + * @p: Command parameters + * + * Return: 0 - if operation was successful and there is no pending completions, + * positive number - if there are pending completions, + * negative - if there were errors + */ +int ecore_config_rx_mode(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); + +/****************** MULTICASTS ****************/ + +void ecore_init_mcast_obj(struct bnx2x_softc *sc, + struct ecore_mcast_obj *mcast_obj, + uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id, + uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type); + +/** + * ecore_config_mcast - Configure multicast MACs list. + * + * @cmd: command to execute: ECORE_MCAST_CMD_X + * + * May configure a new list + * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up + * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current + * configuration, continue to execute the pending commands + * (ECORE_MCAST_CMD_CONT). + * + * If previous command is still pending or if number of MACs to + * configure is more that maximum number of MACs in one command, + * the current command will be enqueued to the tail of the + * pending commands list. + * + * Return: 0 is operation was successful and there are no pending completions, + * negative if there were errors, positive if there are pending + * completions. + */ +int ecore_config_mcast(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + +/****************** CREDIT POOL ****************/ +void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, uint8_t func_id, + uint8_t func_num); +void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, uint8_t func_id, + uint8_t func_num); +void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, + int base, int credit); + +/****************** RSS CONFIGURATION ****************/ +void ecore_init_rss_config_obj(struct bnx2x_softc *sc, + struct ecore_rss_config_obj *rss_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id, + void *rdata, ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type); + +/** + * ecore_config_rss - Updates RSS configuration according to provided parameters + * + * Return: 0 in case of success + */ +int ecore_config_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p); + +/** + * ecore_get_rss_ind_table - Return the current ind_table configuration. + * + * @ind_table: buffer to fill with the current indirection + * table content. Should be at least + * T_ETH_INDIRECTION_TABLE_SIZE bytes long. + */ +void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj, + uint8_t *ind_table); + +#define PF_MAC_CREDIT_E2(sc, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_MAC_CREDIT_CNT) / \ + (func_num) + GET_NUM_VFS_PER_PF(sc) * VF_MAC_CREDIT_CNT) + +#define PF_VLAN_CREDIT_E2(sc, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_VLAN_CREDIT_CNT) / \ + (func_num) + GET_NUM_VFS_PER_PF(sc) * VF_VLAN_CREDIT_CNT) + +#define ECORE_PF_VLAN_CREDIT_VLAN_FILTERING 256 + +#endif /* ECORE_SP_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/elink.c b/src/spdk/dpdk/drivers/net/bnx2x/elink.c new file mode 100644 index 000000000..b65126d71 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/elink.c @@ -0,0 +1,15236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bnx2x.h" +#include "elink.h" +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" + + +#define MDIO_REG_BANK_CL73_IEEEB0 0x0 + #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 + #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 + #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 + #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 + +#define MDIO_REG_BANK_CL73_IEEEB1 0x10 + #define MDIO_CL73_IEEEB1_AN_ADV1 0x00 + #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400 + #define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800 + #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00 + #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00 + #define MDIO_CL73_IEEEB1_AN_ADV2 0x01 + #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 + #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 + #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 + #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 + #define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03 + #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400 + #define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800 + #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00 + #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00 + #define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04 + +#define MDIO_REG_BANK_RX0 0x80b0 + #define MDIO_RX0_RX_STATUS 0x10 + #define MDIO_RX0_RX_STATUS_SIGDET 0x8000 + #define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000 + #define MDIO_RX0_RX_EQ_BOOST 0x1c + #define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 + #define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX1 0x80c0 + #define MDIO_RX1_RX_EQ_BOOST 0x1c + #define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 + #define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX2 0x80d0 + #define MDIO_RX2_RX_EQ_BOOST 0x1c + #define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 + #define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX3 0x80e0 + #define MDIO_RX3_RX_EQ_BOOST 0x1c + #define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 + #define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX_ALL 0x80f0 + #define MDIO_RX_ALL_RX_EQ_BOOST 0x1c + #define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 + #define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_TX0 0x8060 + #define MDIO_TX0_TX_DRIVER 0x17 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 + #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 + #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 + #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e + #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 + #define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX1 0x8070 + #define MDIO_TX1_TX_DRIVER 0x17 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 + #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 + #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 + #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e + #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 + #define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX2 0x8080 + #define MDIO_TX2_TX_DRIVER 0x17 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 + #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 + #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 + #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e + #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 + #define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX3 0x8090 + #define MDIO_TX3_TX_DRIVER 0x17 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 + #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 + #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 + #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 + #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 + #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e + #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 + #define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000 + #define MDIO_BLOCK0_XGXS_CONTROL 0x10 + +#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010 + #define MDIO_BLOCK1_LANE_CTRL0 0x15 + #define MDIO_BLOCK1_LANE_CTRL1 0x16 + #define MDIO_BLOCK1_LANE_CTRL2 0x17 + #define MDIO_BLOCK1_LANE_PRBS 0x19 + +#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100 + #define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10 + #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000 + #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 + #define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 + #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 + #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14 + #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001 + #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010 + #define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 + +#define MDIO_REG_BANK_GP_STATUS 0x8120 +#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B + #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 + #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900 + + +#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1) + +#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1 0x18 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009 + +#define MDIO_REG_BANK_OVER_1G 0x8320 +#define MDIO_OVER_1G_DIGCTL_3_4 0x14 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5 +#define MDIO_OVER_1G_UP1 0x19 +#define MDIO_OVER_1G_UP1_2_5G 0x0001 +#define MDIO_OVER_1G_UP1_5G 0x0002 +#define MDIO_OVER_1G_UP1_6G 0x0004 +#define MDIO_OVER_1G_UP1_10G 0x0010 +#define MDIO_OVER_1G_UP1_10GH 0x0008 +#define MDIO_OVER_1G_UP1_12G 0x0020 +#define MDIO_OVER_1G_UP1_12_5G 0x0040 +#define MDIO_OVER_1G_UP1_13G 0x0080 +#define MDIO_OVER_1G_UP1_15G 0x0100 +#define MDIO_OVER_1G_UP1_16G 0x0200 +#define MDIO_OVER_1G_UP2 0x1A +#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007 +#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038 +#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0 +#define MDIO_OVER_1G_UP3 0x1B +#define MDIO_OVER_1G_UP3_HIGIG2 0x0001 +#define MDIO_OVER_1G_LP_UP1 0x1C +#define MDIO_OVER_1G_LP_UP2 0x1D +#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780 +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7 +#define MDIO_OVER_1G_LP_UP3 0x1E + +#define MDIO_REG_BANK_REMOTE_PHY 0x8330 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600 + +#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002 + +#define MDIO_REG_BANK_CL73_USERB0 0x8370 +#define MDIO_CL73_USERB0_CL73_UCTRL 0x10 +#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002 +#define MDIO_CL73_USERB0_CL73_USTAT1 0x11 +#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100 +#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001 + +#define MDIO_REG_BANK_AER_BLOCK 0xFFD0 +#define MDIO_AER_BLOCK_AER_REG 0x1E + +#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0 +#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040 +#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200 +#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000 +#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000 +#define MDIO_COMBO_IEEE0_MII_STATUS 0x11 +#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004 +#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020 +/*WhenthelinkpartnerisinSGMIImode(bit0=1),then +bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge. +Theotherbitsarereservedandshouldbezero*/ +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 + + +#define MDIO_PMA_DEVAD 0x1 +/*ieee*/ +#define MDIO_PMA_REG_CTRL 0x0 +#define MDIO_PMA_REG_STATUS 0x1 +#define MDIO_PMA_REG_10G_CTRL2 0x7 +#define MDIO_PMA_REG_TX_DISABLE 0x0009 +#define MDIO_PMA_REG_RX_SD 0xa +/*bnx2x*/ +#define MDIO_PMA_REG_BCM_CTRL 0x0096 +#define MDIO_PMA_REG_FEC_CTRL 0x00ab +#define MDIO_PMA_LASI_RXCTRL 0x9000 +#define MDIO_PMA_LASI_TXCTRL 0x9001 +#define MDIO_PMA_LASI_CTRL 0x9002 +#define MDIO_PMA_LASI_RXSTAT 0x9003 +#define MDIO_PMA_LASI_TXSTAT 0x9004 +#define MDIO_PMA_LASI_STAT 0x9005 +#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 +#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 +#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 +#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02 +#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09 +#define MDIO_PMA_REG_MISC_CTRL 0xca0a +#define MDIO_PMA_REG_GEN_CTRL 0xca10 + #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 + #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a +#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12 +#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13 +#define MDIO_PMA_REG_ROM_VER1 0xca19 +#define MDIO_PMA_REG_ROM_VER2 0xca1a +#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b +#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d +#define MDIO_PMA_REG_PLL_CTRL 0xca1e +#define MDIO_PMA_REG_MISC_CTRL0 0xca23 +#define MDIO_PMA_REG_LRM_MODE 0xca3f +#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46 +#define MDIO_PMA_REG_MISC_CTRL1 0xca85 + +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002 +#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003 +#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820 + #define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01 +#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05 + +#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 +#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 + #define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 +#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 +#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 +#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e +#define MDIO_PMA_REG_8727_PCS_GP 0xc842 +#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4 + +#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 +#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 +#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 +#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08 + +#define MDIO_PMA_REG_7101_RESET 0xc000 +#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 +#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009 +#define MDIO_PMA_REG_7101_VER1 0xc026 +#define MDIO_PMA_REG_7101_VER2 0xc027 + +#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 +#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c +#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f +#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 +#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 +#define MDIO_PMA_REG_8481_LED5_MASK 0xa838 +#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 +#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800 +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11 + + + +#define MDIO_WIS_DEVAD 0x2 +/*bnx2x*/ +#define MDIO_WIS_REG_LASI_CNTL 0x9002 +#define MDIO_WIS_REG_LASI_STATUS 0x9005 + +#define MDIO_PCS_DEVAD 0x3 +#define MDIO_PCS_REG_STATUS 0x0020 +#define MDIO_PCS_REG_LASI_STATUS 0x9005 +#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000 +#define MDIO_PCS_REG_7101_SPI_MUX 0xD008 +#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A + #define MDIO_PCS_REG_7101_SPI_RESET_BIT (5) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A + #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6) + #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7) + #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2) +#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028 + + + +#define MDIO_XS_DEVAD 0x4 +#define MDIO_XS_REG_STATUS 0x0001 +#define MDIO_XS_PLL_SEQUENCER 0x8000 +#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a + +#define MDIO_XS_8706_REG_BANK_RX0 0x80bc +#define MDIO_XS_8706_REG_BANK_RX1 0x80cc +#define MDIO_XS_8706_REG_BANK_RX2 0x80dc +#define MDIO_XS_8706_REG_BANK_RX3 0x80ec +#define MDIO_XS_8706_REG_BANK_RXA 0x80fc + +#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA + +#define MDIO_AN_DEVAD 0x7 +/*ieee*/ +#define MDIO_AN_REG_CTRL 0x0000 +#define MDIO_AN_REG_STATUS 0x0001 + #define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020 +#define MDIO_AN_REG_ADV_PAUSE 0x0010 + #define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400 + #define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800 + #define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00 + #define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00 +#define MDIO_AN_REG_ADV 0x0011 +#define MDIO_AN_REG_ADV2 0x0012 +#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 +#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 +#define MDIO_AN_REG_MASTER_STATUS 0x0021 +#define MDIO_AN_REG_EEE_ADV 0x003c +#define MDIO_AN_REG_LP_EEE_ADV 0x003d +/*bnx2x*/ +#define MDIO_AN_REG_LINK_STATUS 0x8304 +#define MDIO_AN_REG_CL37_CL73 0x8370 +#define MDIO_AN_REG_CL37_AN 0xffe0 +#define MDIO_AN_REG_CL37_FC_LD 0xffe4 +#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_1000T_STATUS 0xffea + +#define MDIO_AN_REG_8073_2_5G 0x8329 +#define MDIO_AN_REG_8073_BAM 0x8350 + +#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 +#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 + #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 +#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_848xx_ID_MSB 0xffe2 + #define BNX2X84858_PHY_ID 0x600d +#define MDIO_AN_REG_848xx_ID_LSB 0xffe3 +#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 +#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 +#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 +#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0 + #define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008 +#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 +#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 +#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 +#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc + +/* BNX2X84823 only */ +#define MDIO_CTL_DEVAD 0x1e +#define MDIO_CTL_REG_84823_MEDIA 0x401a + #define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018 + /* These pins configure the BNX2X84823 interface to MAC after reset. */ + #define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008 + #define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010 + /* These pins configure the BNX2X84823 interface to Line after reset. */ + #define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060 + #define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020 + #define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040 + /* When this pin is active high during reset, 10GBASE-T core is power + * down, When it is active low the 10GBASE-T is power up + */ + #define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080 + #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100 + #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 + #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 + #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 +#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 + #define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 +#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b + #define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f +#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 +#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec + #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 + +/* BNX2X84833 only */ +#define MDIO_84833_TOP_CFG_FW_REV 0x400f +#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 +#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a +#define MDIO_84833_SUPER_ISOLATE 0x8000 +/* These are mailbox register set used by 84833/84858. */ +#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0) +#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26) +#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27) +#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28) +#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29) +#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30) +#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31) + +/* Mailbox command set used by 84833/84858 */ +#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001 +#define PHY848xx_CMD_GET_EEE_MODE 0x8008 +#define PHY848xx_CMD_SET_EEE_MODE 0x8009 +#define PHY848xx_CMD_GET_CURRENT_TEMP 0x8031 +/* Mailbox status set used by 84833 only */ +#define PHY84833_STATUS_CMD_RECEIVED 0x0001 +#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox Process */ +#define PHY84833_MB_PROCESS1 1 +#define PHY84833_MB_PROCESS2 2 +#define PHY84833_MB_PROCESS3 3 + + +/* Mailbox status set used by 84858 only */ +#define PHY84858_STATUS_CMD_RECEIVED 0x0001 +#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb + + +/* Warpcore clause 45 addressing */ +#define MDIO_WC_DEVAD 0x3 +#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0 +#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 +#define MDIO_WC_REG_PCS_STATUS2 0x0021 +#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096 +#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 +#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e +#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018 +#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a +#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061 +#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071 +#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 +#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 +#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 +#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01 +#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000 +#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077 +#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087 +#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097 +#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9 +#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9 +#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba +#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca +#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da +#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa +#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 +#define MDIO_WC_REG_XGXSBLK2_LANE_RESET 0x810a +#define MDIO_WC_REG_XGXS_STATUS3 0x8129 +#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 +#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 +#define MDIO_WC_REG_XGXS_STATUS4 0x813c +#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 +#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142 +#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B +#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 +#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 +#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1 +#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 +#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 + #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000 + #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100 + #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010 + #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1 +#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE +#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8 + #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc +#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE +#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e +#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7) +#define MDIO_WC_REG_DSC_SMC 0x8213 +#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e +#define MDIO_WC_REG_TX_FIR_TAP 0x82e2 + #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00 + #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f + #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04 + #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0 + #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a + #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 + #define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 +#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 +#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 +#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 +#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302 +#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304 +#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 +#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 +#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 +#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c +#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c +#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e +#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 +#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 +#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d +#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e +#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 +#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 +#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370 +#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371 +#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372 +#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373 +#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374 +#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b +#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390 +#define MDIO_WC_REG_TX66_CONTROL 0x83b0 +#define MDIO_WC_REG_RX66_CONTROL 0x83c0 +#define MDIO_WC_REG_RX66_SCW0 0x83c2 +#define MDIO_WC_REG_RX66_SCW1 0x83c3 +#define MDIO_WC_REG_RX66_SCW2 0x83c4 +#define MDIO_WC_REG_RX66_SCW3 0x83c5 +#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6 +#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7 +#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8 +#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 +#define MDIO_WC_REG_FX100_CTRL1 0x8400 +#define MDIO_WC_REG_FX100_CTRL3 0x8402 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439 +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b +#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453 +#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454 +#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455 +#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456 +#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457 +#define MDIO_WC_REG_MICROBLK_CMD 0xffc2 +#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 +#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc + +#define MDIO_WC_REG_AERBLK_AER 0xffde +#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0 +#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1 + +#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0 +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4 + +#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141 + +#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f + +/* 54618se */ +#define MDIO_REG_GPHY_MII_STATUS 0x1 +#define MDIO_REG_GPHY_PHYID_LSB 0x3 +#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd + #define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000 + #define MDIO_REG_GPHY_CL45_REG_READ 0xc000 +#define MDIO_REG_GPHY_CL45_DATA_REG 0xe + #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 + #define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 + #define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 +#define MDIO_REG_GPHY_AUX_STATUS 0x19 +#define MDIO_REG_INTR_STATUS 0x1a +#define MDIO_REG_INTR_MASK 0x1b + #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) +#define MDIO_REG_GPHY_SHADOW 0x1c + #define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10) + #define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) + #define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) + #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) + #define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8) + + +typedef elink_status_t (*read_sfp_module_eeprom_func_p)(struct elink_phy *phy, + struct elink_params *params, + uint8_t dev_addr, uint16_t addr, + uint8_t byte_cnt, + uint8_t *o_buf, uint8_t); +/********************************************************/ +#define ELINK_ETH_HLEN 14 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ELINK_ETH_OVREHEAD (ELINK_ETH_HLEN + 8 + 8) +#define ELINK_ETH_MIN_PACKET_SIZE 60 +#define ELINK_ETH_MAX_PACKET_SIZE 1500 +#define ELINK_ETH_MAX_JUMBO_PACKET_SIZE 9600 +#define ELINK_MDIO_ACCESS_TIMEOUT 1000 +#define WC_LANE_MAX 4 +#define I2C_SWITCH_WIDTH 2 +#define I2C_BSC0 0 +#define I2C_BSC1 1 +#define I2C_WA_RETRY_CNT 3 +#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) +#define MCPR_IMC_COMMAND_READ_OP 1 +#define MCPR_IMC_COMMAND_WRITE_OP 2 + +/* LED Blink rate that will achieve ~15.9Hz */ +#define LED_BLINK_RATE_VAL_E3 354 +#define LED_BLINK_RATE_VAL_E1X_E2 480 +/***********************************************************/ +/* Shortcut definitions */ +/***********************************************************/ + +#define ELINK_NIG_LATCH_BC_ENABLE_MI_INT 0 + +#define ELINK_NIG_STATUS_EMAC0_MI_INT \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT +#define ELINK_NIG_STATUS_XGXS0_LINK10G \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G +#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS +#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE +#define ELINK_NIG_STATUS_SERDES0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS +#define ELINK_NIG_MASK_MI_INT \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT +#define ELINK_NIG_MASK_XGXS0_LINK10G \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G +#define ELINK_NIG_MASK_XGXS0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS +#define ELINK_NIG_MASK_SERDES0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS + +#define ELINK_MDIO_AN_CL73_OR_37_COMPLETE \ + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ + MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) + +#define ELINK_XGXS_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) + +#define ELINK_SERDES_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) + +#define ELINK_AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 +#define ELINK_AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 +#define ELINK_AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define ELINK_AUTONEG_PARALLEL \ + SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION +#define ELINK_AUTONEG_SGMII_FIBER_AUTODET \ + SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT +#define ELINK_AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY + +#define ELINK_GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE +#define ELINK_GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE +#define ELINK_GP_STATUS_SPEED_MASK \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK +#define ELINK_GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M +#define ELINK_GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M +#define ELINK_GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G +#define ELINK_GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G +#define ELINK_GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G +#define ELINK_GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G +#define ELINK_GP_STATUS_10G_HIG \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG +#define ELINK_GP_STATUS_10G_CX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 +#define ELINK_GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX +#define ELINK_GP_STATUS_10G_KX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 +#define ELINK_GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR +#define ELINK_GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI +#define ELINK_GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS +#define ELINK_GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI +#define ELINK_GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 +#define ELINK_LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define ELINK_LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define ELINK_LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD +#define ELINK_LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define ELINK_LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD +#define ELINK_LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD +#define ELINK_LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD +#define ELINK_LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD +#define ELINK_LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD +#define ELINK_LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD +#define ELINK_LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD +#define ELINK_LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define ELINK_LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define ELINK_LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD +#define ELINK_LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD + +#define ELINK_LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) + +#define ELINK_SFP_EEPROM_CON_TYPE_ADDR 0x2 + #define ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 + #define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC 0x7 + #define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 + #define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 + + +#define ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 + #define ELINK_SFP_EEPROM_10G_COMP_CODE_SR_MASK (1 << 4) + #define ELINK_SFP_EEPROM_10G_COMP_CODE_LR_MASK (1 << 5) + #define ELINK_SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1 << 6) + +#define ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 + #define ELINK_SFP_EEPROM_1G_COMP_CODE_SX (1 << 0) + #define ELINK_SFP_EEPROM_1G_COMP_CODE_LX (1 << 1) + #define ELINK_SFP_EEPROM_1G_COMP_CODE_CX (1 << 2) + #define ELINK_SFP_EEPROM_1G_COMP_CODE_BASE_T (1 << 3) + +#define ELINK_SFP_EEPROM_FC_TX_TECH_ADDR 0x8 + #define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 + #define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + +#define ELINK_SFP_EEPROM_OPTIONS_ADDR 0x40 + #define ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 +#define ELINK_SFP_EEPROM_OPTIONS_SIZE 2 + +#define ELINK_EDC_MODE_LINEAR 0x0022 +#define ELINK_EDC_MODE_LIMITING 0x0044 +#define ELINK_EDC_MODE_PASSIVE_DAC 0x0055 +#define ELINK_EDC_MODE_ACTIVE_DAC 0x0066 + +/* ETS defines*/ +#define DCBX_INVALID_COS (0xFF) + +#define ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) +#define ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) +#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) +#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) +#define ELINK_ETS_E3B0_PBF_MIN_W_VAL (10000) + +#define ELINK_MAX_PACKET_SIZE (9700) +#define MAX_KR_LINK_RETRY 4 +#define DEFAULT_TX_DRV_BRDCT 2 +#define DEFAULT_TX_DRV_IFIR 0 +#define DEFAULT_TX_DRV_POST2 3 +#define DEFAULT_TX_DRV_IPRE_DRIVER 6 + +/**********************************************************/ +/* INTERFACE */ +/**********************************************************/ + +#define CL22_WR_OVER_CL45(_sc, _phy, _bank, _addr, _val) \ + elink_cl45_write(_sc, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +#define CL22_RD_OVER_CL45(_sc, _phy, _bank, _addr, _val) \ + elink_cl45_read(_sc, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +static elink_status_t elink_check_half_open_conn(struct elink_params *params, + struct elink_vars *vars, uint8_t notify); +static elink_status_t elink_sfp_module_detection(struct elink_phy *phy, + struct elink_params *params); + +static uint32_t elink_bits_en(struct bnx2x_softc *sc, uint32_t reg, uint32_t bits) +{ + uint32_t val = REG_RD(sc, reg); + + val |= bits; + REG_WR(sc, reg, val); + return val; +} + +static uint32_t elink_bits_dis(struct bnx2x_softc *sc, uint32_t reg, + uint32_t bits) +{ + uint32_t val = REG_RD(sc, reg); + + val &= ~bits; + REG_WR(sc, reg, val); + return val; +} + +/* + * elink_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int elink_check_lfa(struct elink_params *params) +{ + uint32_t link_status, cfg_idx, lfa_mask, cfg_size; + uint32_t cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + uint32_t saved_val, req_val, eee_status; + struct bnx2x_softc *sc = params->sc; + + additional_config = + REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + ELINK_DEBUG_P0(sc, "No LFA due to DCC flap after clp exit"); + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* if loaded after BOOT from SAN, don't flap the link in any case and + * rely on link set by preboot driver + */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_BOOT_FROM_SAN) + return 0; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + ELINK_DEBUG_P2(sc, "Duplex mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + ELINK_DEBUG_P2(sc, "Flow control mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + ELINK_DEBUG_P2(sc, "Link speed mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + ELINK_DEBUG_P2(sc, "Speed Cap mismatch %x vs. %x", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((uint16_t)cur_req_fc_auto_adv != params->req_fc_auto_adv) { + ELINK_DEBUG_P2(sc, "Flow Ctrl AN mismatch %x vs. %x", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & ELINK_EEE_MODE_ADV_LPI))) { + ELINK_DEBUG_P2(sc, "EEE mismatch %x vs. %x", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} +/******************************************************************/ +/* EPIO/GPIO section */ +/******************************************************************/ +static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin, + uint32_t *en) +{ + uint32_t epio_mask, gp_oenable; + *en = 0; + /* Sanity check */ + if (epio_pin > 31) { + ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to get", epio_pin); + return; + } + + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE); + REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); + + *en = (REG_RD(sc, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; +} +static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t en) +{ + uint32_t epio_mask, gp_output, gp_oenable; + + /* Sanity check */ + if (epio_pin > 31) { + ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to set", epio_pin); + return; + } + ELINK_DEBUG_P2(sc, "Setting EPIO pin %d to %d", epio_pin, en); + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_output = REG_RD(sc, MCP_REG_MCPR_GP_OUTPUTS); + if (en) + gp_output |= epio_mask; + else + gp_output &= ~epio_mask; + + REG_WR(sc, MCP_REG_MCPR_GP_OUTPUTS, gp_output); + + /* Set the value for this EPIO */ + gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE); + REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); +} + +static void elink_set_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg, + uint32_t val) +{ + if (pin_cfg == PIN_CFG_NA) + return; + if (pin_cfg >= PIN_CFG_EPIO0) { + elink_set_epio(sc, pin_cfg - PIN_CFG_EPIO0, val); + } else { + uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + elink_cb_gpio_write(sc, gpio_num, (uint8_t)val, gpio_port); + } +} + +static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg, + uint32_t *val) +{ + if (pin_cfg == PIN_CFG_NA) + return ELINK_STATUS_ERROR; + if (pin_cfg >= PIN_CFG_EPIO0) { + elink_get_epio(sc, pin_cfg - PIN_CFG_EPIO0, val); + } else { + uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + *val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + } + return ELINK_STATUS_OK; +} + +/******************************************************************/ +/* ETS section */ +/******************************************************************/ +static void elink_ets_e2e3a0_disabled(struct elink_params *params) +{ + /* ETS disabled configuration*/ + struct bnx2x_softc *sc = params->sc; + + ELINK_DEBUG_P0(sc, "ETS E2E3 disabled configuration"); + + /* mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000 + */ + + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); + + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); + REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); + /* ETS mode disable */ + REG_WR(sc, PBF_REG_ETS_ENABLED, 0); + /* If ETS mode is enabled (there is no strict priority) defines a WFQ + * weight for COS0/COS1. + */ + REG_WR(sc, PBF_REG_COS0_WEIGHT, 0x2710); + REG_WR(sc, PBF_REG_COS1_WEIGHT, 0x2710); + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */ + REG_WR(sc, PBF_REG_COS0_UPPER_BOUND, 0x989680); + REG_WR(sc, PBF_REG_COS1_UPPER_BOUND, 0x989680); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); +} +/****************************************************************************** + * Description: + * Getting min_w_val will be set according to line speed . + *. + ******************************************************************************/ +static uint32_t elink_ets_get_min_w_val_nig(const struct elink_vars *vars) +{ + uint32_t min_w_val = 0; + /* Calculate min_w_val.*/ + if (vars->link_up) { + if (vars->line_speed == ELINK_SPEED_20000) + min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + else + min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; + } else { + min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + } + /* If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ + return min_w_val; +} +/****************************************************************************** + * Description: + * Getting credit upper bound form min_w_val. + *. + ******************************************************************************/ +static uint32_t elink_ets_get_credit_upper_bound(const uint32_t min_w_val) +{ + const uint32_t credit_upper_bound = (uint32_t) + ELINK_MAXVAL((150 * min_w_val), + ELINK_MAX_PACKET_SIZE); + return credit_upper_bound; +} +/****************************************************************************** + * Description: + * Set credit upper bound for NIG. + *. + ******************************************************************************/ +static void elink_ets_e3b0_set_credit_upper_bound_nig( + const struct elink_params *params, + const uint32_t min_w_val) +{ + struct bnx2x_softc *sc = params->sc; + const uint8_t port = params->port; + const uint32_t credit_upper_bound = + elink_ets_get_credit_upper_bound(min_w_val); + + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); + + if (!port) { + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, + credit_upper_bound); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, + credit_upper_bound); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, + credit_upper_bound); + } +} +/****************************************************************************** + * Description: + * Will return the NIG ETS registers to init values.Except + * credit_upper_bound. + * That isn't used in this configuration (No WFQ is enabled) and will be + * configured according to spec + *. + ******************************************************************************/ +static void elink_ets_e3b0_nig_disabled(const struct elink_params *params, + const struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + const uint8_t port = params->port; + const uint32_t min_w_val = elink_ets_get_min_w_val_nig(vars); + /* Mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS1, ... 8 - + * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by + * reset value or init tool + */ + if (port) { + REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); + REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); + } else { + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); + } + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : + NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* Mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + if (port) { + /*Port 1 has 6 COS*/ + REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); + REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); + } else { + /*Port 0 has 9 COS*/ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, + 0x43210876); + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); + } + + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + if (port) + REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); + else + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + + /* Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); + if (!port) { + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); + } + + elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); +} +/****************************************************************************** + * Description: + * Set credit upper bound for PBF. + *. + ******************************************************************************/ +static void elink_ets_e3b0_set_credit_upper_bound_pbf( + const struct elink_params *params, + const uint32_t min_w_val) +{ + struct bnx2x_softc *sc = params->sc; + const uint32_t credit_upper_bound = + elink_ets_get_credit_upper_bound(min_w_val); + const uint8_t port = params->port; + uint32_t base_upper_bound = 0; + uint8_t max_cos = 0; + uint8_t i = 0; + /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ + if (!port) { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; + max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; + max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(sc, base_upper_bound + (i << 2), credit_upper_bound); +} + +/****************************************************************************** + * Description: + * Will return the PBF ETS registers to init values.Except + * credit_upper_bound. + * That isn't used in this configuration (No WFQ is enabled) and will be + * configured according to spec + *. + ******************************************************************************/ +static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + const uint8_t port = params->port; + const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL; + uint8_t i = 0; + uint32_t base_weight = 0; + uint8_t max_cos = 0; + + /* Mapping between entry priority to client number 0 - COS0 + * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. + * TODO_ETS - Should be done by reset value or init tool + */ + if (port) + /* 0x688 (|011|0 10|00 1|000) */ + REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, 0x688); + else + /* (10 1|100 |011|0 10|00 1|000) */ + REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, 0x2C688); + + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) + /* 0x688 (|011|0 10|00 1|000)*/ + REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); + else + /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ + REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); + + REG_WR(sc, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : + PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0, 0x100); + + + REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, 0); + + REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, 0); + /* In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ + if (!port) { + base_weight = PBF_REG_COS0_WEIGHT_P0; + max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_weight = PBF_REG_COS0_WEIGHT_P1; + max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(sc, base_weight + (0x4 * i), 0); + + elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); +} +/****************************************************************************** + * Description: + * E3B0 disable will return basicly the values to init values. + *. + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params, + const struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + + if (!CHIP_IS_E3B0(sc)) { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_disabled the chip isn't E3B0"); + return ELINK_STATUS_ERROR; + } + + elink_ets_e3b0_nig_disabled(params, vars); + + elink_ets_e3b0_pbf_disabled(params); + + return ELINK_STATUS_OK; +} + +/****************************************************************************** + * Description: + * Disable will return basicly the values to init values. + * + ******************************************************************************/ +elink_status_t elink_ets_disabled(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + elink_status_t elink_status = ELINK_STATUS_OK; + + if ((CHIP_IS_E2(sc)) || (CHIP_IS_E3A0(sc))) { + elink_ets_e2e3a0_disabled(params); + } else if (CHIP_IS_E3B0(sc)) { + elink_status = elink_ets_e3b0_disabled(params, vars); + } else { + ELINK_DEBUG_P0(sc, "elink_ets_disabled - chip not supported"); + return ELINK_STATUS_ERROR; + } + + return elink_status; +} + +/****************************************************************************** + * Description + * Set the COS mappimg to SP and BW until this point all the COS are not + * set as SP or BW. + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params, + __rte_unused const struct elink_ets_params *ets_params, + const uint8_t cos_sp_bitmap, + const uint8_t cos_bw_bitmap) +{ + struct bnx2x_softc *sc = params->sc; + const uint8_t port = params->port; + const uint8_t nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); + const uint8_t pbf_cli_sp_bitmap = cos_sp_bitmap; + const uint8_t nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; + const uint8_t pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; + + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : + NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); + + REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, pbf_cli_sp_bitmap); + + REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + nig_cli_subject2wfq_bitmap); + + REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, + pbf_cli_subject2wfq_bitmap); + + return ELINK_STATUS_OK; +} + +/****************************************************************************** + * Description: + * This function is needed because NIG ARB_CREDIT_WEIGHT_X are + * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_set_cos_bw(struct bnx2x_softc *sc, + const uint8_t cos_entry, + const uint32_t min_w_val_nig, + const uint32_t min_w_val_pbf, + const uint16_t total_bw, + const uint8_t bw, + const uint8_t port) +{ + uint32_t nig_reg_address_crd_weight = 0; + uint32_t pbf_reg_address_crd_weight = 0; + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ + const uint32_t cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; + const uint32_t cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; + + switch (cos_entry) { + case 0: + nig_reg_address_crd_weight = + (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; + pbf_reg_address_crd_weight = (port) ? + PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; + break; + case 1: + nig_reg_address_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; + pbf_reg_address_crd_weight = (port) ? + PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; + break; + case 2: + nig_reg_address_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; + + pbf_reg_address_crd_weight = (port) ? + PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; + break; + case 3: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_address_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; + pbf_reg_address_crd_weight = + PBF_REG_COS3_WEIGHT_P0; + break; + case 4: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_address_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; + pbf_reg_address_crd_weight = PBF_REG_COS4_WEIGHT_P0; + break; + case 5: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_address_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; + pbf_reg_address_crd_weight = PBF_REG_COS5_WEIGHT_P0; + break; + } + + REG_WR(sc, nig_reg_address_crd_weight, cos_bw_nig); + + REG_WR(sc, pbf_reg_address_crd_weight, cos_bw_pbf); + + return ELINK_STATUS_OK; +} +/****************************************************************************** + * Description: + * Calculate the total BW.A value of 0 isn't legal. + * + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_get_total_bw( + const struct elink_params *params, + struct elink_ets_params *ets_params, + uint16_t *total_bw) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t cos_idx = 0; + uint8_t is_bw_cos_exist = 0; + + *total_bw = 0; + /* Calculate total BW requested */ + for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { + if (ets_params->cos[cos_idx].state == elink_cos_state_bw) { + is_bw_cos_exist = 1; + if (!ets_params->cos[cos_idx].params.bw_params.bw) { + ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config BW" + " was set to 0"); + /* This is to prevent a state when ramrods + * can't be sent + */ + ets_params->cos[cos_idx].params.bw_params.bw + = 1; + } + *total_bw += + ets_params->cos[cos_idx].params.bw_params.bw; + } + } + + /* Check total BW is valid */ + if ((is_bw_cos_exist == 1) && (*total_bw != 100)) { + if (*total_bw == 0) { + ELINK_DEBUG_P0(sc, + "elink_ets_E3B0_config total BW shouldn't be 0"); + return ELINK_STATUS_ERROR; + } + ELINK_DEBUG_P0(sc, + "elink_ets_E3B0_config total BW should be 100"); + /* We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ + } + return ELINK_STATUS_OK; +} + +/****************************************************************************** + * Description: + * Invalidate all the sp_pri_to_cos. + * + ******************************************************************************/ +static void elink_ets_e3b0_sp_pri_to_cos_init(uint8_t *sp_pri_to_cos) +{ + uint8_t pri = 0; + for (pri = 0; pri < ELINK_DCBX_MAX_NUM_COS; pri++) + sp_pri_to_cos[pri] = DCBX_INVALID_COS; +} +/****************************************************************************** + * Description: + * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers + * according to sp_pri_to_cos. + * + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_sp_pri_to_cos_set( + const struct elink_params *params, + uint8_t *sp_pri_to_cos, + const uint8_t pri, + const uint8_t cos_entry) +{ + struct bnx2x_softc *sc = params->sc; + const uint8_t port = params->port; + const uint8_t max_num_of_cos = (port) ? + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 : + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0; + + if (pri >= max_num_of_cos) { + ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter Illegal strict priority"); + return ELINK_STATUS_ERROR; + } + + if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { + ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter There can't be two COS's with " + "the same strict pri"); + return ELINK_STATUS_ERROR; + } + + sp_pri_to_cos[pri] = cos_entry; + return ELINK_STATUS_OK; +} + +/****************************************************************************** + * Description: + * Returns the correct value according to COS and priority in + * the sp_pri_cli register. + * + ******************************************************************************/ +static uint64_t elink_e3b0_sp_get_pri_cli_reg(const uint8_t cos, + const uint8_t cos_offset, + const uint8_t pri_set, + const uint8_t pri_offset, + const uint8_t entry_size) +{ + uint64_t pri_cli_nig = 0; + pri_cli_nig = ((uint64_t)(cos + cos_offset)) << (entry_size * + (pri_set + pri_offset)); + + return pri_cli_nig; +} +/****************************************************************************** + * Description: + * Returns the correct value according to COS and priority in the + * sp_pri_cli register for NIG. + * + ******************************************************************************/ +static uint64_t elink_e3b0_sp_get_pri_cli_reg_nig(const uint8_t cos, + const uint8_t pri_set) +{ + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + const uint8_t nig_cos_offset = 3; + const uint8_t nig_pri_offset = 3; + + return elink_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, + nig_pri_offset, 4); +} + +/****************************************************************************** + * Description: + * Returns the correct value according to COS and priority in the + * sp_pri_cli register for PBF. + * + ******************************************************************************/ +static uint64_t elink_e3b0_sp_get_pri_cli_reg_pbf(const uint8_t cos, + const uint8_t pri_set) +{ + const uint8_t pbf_cos_offset = 0; + const uint8_t pbf_pri_offset = 0; + + return elink_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, + pbf_pri_offset, 3); +} + +/****************************************************************************** + * Description: + * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers + * according to sp_pri_to_cos.(which COS has higher priority) + * + ******************************************************************************/ +static elink_status_t elink_ets_e3b0_sp_set_pri_cli_reg( + const struct elink_params *params, + uint8_t *sp_pri_to_cos) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t i = 0; + const uint8_t port = params->port; + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + uint64_t pri_cli_nig = 0x210; + uint32_t pri_cli_pbf = 0x0; + uint8_t pri_set = 0; + uint8_t pri_bitmask = 0; + const uint8_t max_num_of_cos = (port) ? + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 : + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0; + + uint8_t cos_bit_to_set = (1 << max_num_of_cos) - 1; + + /* Set all the strict priority first */ + for (i = 0; i < max_num_of_cos; i++) { + if (sp_pri_to_cos[i] != DCBX_INVALID_COS) { + if (sp_pri_to_cos[i] >= ELINK_DCBX_MAX_NUM_COS) { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_sp_set_pri_cli_reg " + "invalid cos entry"); + return ELINK_STATUS_ERROR; + } + + pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig( + sp_pri_to_cos[i], pri_set); + + pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf( + sp_pri_to_cos[i], pri_set); + pri_bitmask = 1 << sp_pri_to_cos[i]; + /* COS is used remove it from bitmap.*/ + if (!(pri_bitmask & cos_bit_to_set)) { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_sp_set_pri_cli_reg " + "invalid There can't be two COS's with" + " the same strict pri"); + return ELINK_STATUS_ERROR; + } + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + /* Set all the Non strict priority i= COS*/ + for (i = 0; i < max_num_of_cos; i++) { + pri_bitmask = 1 << i; + /* Check if COS was already used for SP */ + if (pri_bitmask & cos_bit_to_set) { + /* COS wasn't used for SP */ + pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig( + i, pri_set); + + pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf( + i, pri_set); + /* COS is used remove it from bitmap.*/ + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + if (pri_set != max_num_of_cos) { + ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_set_pri_cli_reg not all " + "entries were set"); + return ELINK_STATUS_ERROR; + } + + if (port) { + /* Only 6 usable clients*/ + REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, + (uint32_t)pri_cli_nig); + + REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, pri_cli_pbf); + } else { + /* Only 9 usable clients*/ + const uint32_t pri_cli_nig_lsb = (uint32_t)(pri_cli_nig); + const uint32_t pri_cli_nig_msb = (uint32_t) + ((pri_cli_nig >> 32) & 0xF); + + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, + pri_cli_nig_lsb); + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, + pri_cli_nig_msb); + + REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, pri_cli_pbf); + } + return ELINK_STATUS_OK; +} + +/****************************************************************************** + * Description: + * Configure the COS to ETS according to BW and SP settings. + ******************************************************************************/ +elink_status_t elink_ets_e3b0_config(const struct elink_params *params, + const struct elink_vars *vars, + struct elink_ets_params *ets_params) +{ + struct bnx2x_softc *sc = params->sc; + elink_status_t elink_status = ELINK_STATUS_OK; + const uint8_t port = params->port; + uint16_t total_bw = 0; + const uint32_t min_w_val_nig = elink_ets_get_min_w_val_nig(vars); + const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL; + uint8_t cos_bw_bitmap = 0; + uint8_t cos_sp_bitmap = 0; + uint8_t sp_pri_to_cos[ELINK_DCBX_MAX_NUM_COS] = {0}; + const uint8_t max_num_of_cos = (port) ? + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 : + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0; + uint8_t cos_entry = 0; + + if (!CHIP_IS_E3B0(sc)) { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_disabled the chip isn't E3B0"); + return ELINK_STATUS_ERROR; + } + + if (ets_params->num_of_cos > max_num_of_cos) { + ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config the number of COS " + "isn't supported"); + return ELINK_STATUS_ERROR; + } + + /* Prepare sp strict priority parameters*/ + elink_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); + + /* Prepare BW parameters*/ + elink_status = elink_ets_e3b0_get_total_bw(params, ets_params, + &total_bw); + if (elink_status != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, + "elink_ets_E3B0_config get_total_bw failed"); + return ELINK_STATUS_ERROR; + } + + /* Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). + */ + elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); + elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); + + + for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { + if (elink_cos_state_bw == ets_params->cos[cos_entry].state) { + cos_bw_bitmap |= (1 << cos_entry); + /* The function also sets the BW in HW(not the mappin + * yet) + */ + elink_status = elink_ets_e3b0_set_cos_bw( + sc, cos_entry, min_w_val_nig, min_w_val_pbf, + total_bw, + ets_params->cos[cos_entry].params.bw_params.bw, + port); + } else if (elink_cos_state_strict == + ets_params->cos[cos_entry].state){ + cos_sp_bitmap |= (1 << cos_entry); + + elink_status = elink_ets_e3b0_sp_pri_to_cos_set( + params, + sp_pri_to_cos, + ets_params->cos[cos_entry].params.sp_params.pri, + cos_entry); + + } else { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_config cos state not valid"); + return ELINK_STATUS_ERROR; + } + if (elink_status != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, + "elink_ets_e3b0_config set cos bw failed"); + return elink_status; + } + } + + /* Set SP register (which COS has higher priority) */ + elink_status = elink_ets_e3b0_sp_set_pri_cli_reg(params, + sp_pri_to_cos); + + if (elink_status != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, + "elink_ets_E3B0_config set_pri_cli_reg failed"); + return elink_status; + } + + /* Set client mapping of BW and strict */ + elink_status = elink_ets_e3b0_cli_map(params, ets_params, + cos_sp_bitmap, + cos_bw_bitmap); + + if (elink_status != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config SP failed"); + return elink_status; + } + return ELINK_STATUS_OK; +} +static void elink_ets_bw_limit_common(const struct elink_params *params) +{ + /* ETS disabled configuration */ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration"); + /* Defines which entries (clients) are subjected to WFQ arbitration + * COS0 0x8 + * COS1 0x10 + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); + /* Mapping between the ARB_CREDIT_WEIGHT registers and actual + * client numbers (WEIGHT_0 does not actually have to represent + * client 0) + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); + + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, + ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, + ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + + /* ETS mode enabled*/ + REG_WR(sc, PBF_REG_ETS_ENABLED, 1); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 + * entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ + REG_WR(sc, PBF_REG_COS0_UPPER_BOUND, + ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(sc, PBF_REG_COS1_UPPER_BOUND, + ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND); +} + +void elink_ets_bw_limit(const struct elink_params *params, + const uint32_t cos0_bw, + const uint32_t cos1_bw) +{ + /* ETS disabled configuration*/ + struct bnx2x_softc *sc = params->sc; + const uint32_t total_bw = cos0_bw + cos1_bw; + uint32_t cos0_credit_weight = 0; + uint32_t cos1_credit_weight = 0; + + ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration"); + + if ((!total_bw) || + (!cos0_bw) || + (!cos1_bw)) { + ELINK_DEBUG_P0(sc, "Total BW can't be zero"); + return; + } + + cos0_credit_weight = (cos0_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) / + total_bw; + cos1_credit_weight = (cos1_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) / + total_bw; + + elink_ets_bw_limit_common(params); + + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); + REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); + + REG_WR(sc, PBF_REG_COS0_WEIGHT, cos0_credit_weight); + REG_WR(sc, PBF_REG_COS1_WEIGHT, cos1_credit_weight); +} + +elink_status_t elink_ets_strict(const struct elink_params *params, + const uint8_t strict_cos) +{ + /* ETS disabled configuration*/ + struct bnx2x_softc *sc = params->sc; + uint32_t val = 0; + + ELINK_DEBUG_P0(sc, "ETS enabled strict configuration"); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, + * 3 - COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); + /* For strict priority entries defines the number of consecutive slots + * for the highest priority. + */ + REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* ETS mode disable */ + REG_WR(sc, PBF_REG_ETS_ENABLED, 0); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); + + /* Mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 + * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 + */ + val = (!strict_cos) ? 0x2318 : 0x22E0; + REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); + + return ELINK_STATUS_OK; +} + +/******************************************************************/ +/* PFC section */ +/******************************************************************/ +static void elink_update_pfc_xmac(struct elink_params *params, + struct elink_vars *vars, + __rte_unused uint8_t is_lb) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t xmac_base; + uint32_t pause_val, pfc0_val, pfc1_val; + + /* XMAC base adrr */ + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Initialize pause and pfc registers */ + pause_val = 0x18000; + pfc0_val = 0xFFFF8000; + pfc1_val = 0x2; + + /* No PFC support */ + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) { + + /* RX flow control - Process pause frame in receive direction + */ + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; + + /* TX flow control - Send pause packet when buffer is full */ + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; + } else {/* PFC support */ + pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | + XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | + XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + /* Write pause and PFC registers */ + REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + + } + + /* Write pause and PFC registers */ + REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + + + /* Set MAC address for source TX Pause/PFC frames */ + REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_LO, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_HI, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + DELAY(30); +} + +static void elink_emac_get_pfc_stat(struct elink_params *params, + uint32_t pfc_frames_sent[2], + uint32_t pfc_frames_received[2]) +{ + /* Read pfc statistic */ + struct bnx2x_softc *sc = params->sc; + uint32_t emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + uint32_t val_xon = 0; + uint32_t val_xoff = 0; + + ELINK_DEBUG_P0(sc, "pfc statistic read from EMAC"); + + /* PFC received frames */ + val_xoff = REG_RD(sc, emac_base + + EMAC_REG_RX_PFC_STATS_XOFF_RCVD); + val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; + val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); + val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; + + pfc_frames_received[0] = val_xon + val_xoff; + + /* PFC received sent */ + val_xoff = REG_RD(sc, emac_base + + EMAC_REG_RX_PFC_STATS_XOFF_SENT); + val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; + val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); + val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; + + pfc_frames_sent[0] = val_xon + val_xoff; +} + +/* Read pfc statistic*/ +void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars, + uint32_t pfc_frames_sent[2], + uint32_t pfc_frames_received[2]) +{ + /* Read pfc statistic */ + struct bnx2x_softc *sc = params->sc; + + ELINK_DEBUG_P0(sc, "pfc statistic"); + + if (!vars->link_up) + return; + + if (vars->mac_type == ELINK_MAC_TYPE_EMAC) { + ELINK_DEBUG_P0(sc, "About to read PFC stats from EMAC"); + elink_emac_get_pfc_stat(params, pfc_frames_sent, + pfc_frames_received); + } +} +/******************************************************************/ +/* MAC/PBF section */ +/******************************************************************/ +static void elink_set_mdio_clk(struct bnx2x_softc *sc, + __rte_unused uint32_t chip_id, + uint32_t emac_base) +{ + uint32_t new_mode, cur_mode; + uint32_t clc_cnt; + /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz + * (a value of 49==0x31) and make sure that the AUTO poll is off + */ + cur_mode = REG_RD(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE); + + if (USES_WARPCORE(sc)) + clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + else + clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + + if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) && + (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45))) + return; + + new_mode = cur_mode & + ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); + new_mode |= clc_cnt; + new_mode |= (EMAC_MDIO_MODE_CLAUSE_45); + + ELINK_DEBUG_P2(sc, "Changing emac_mode from 0x%x to 0x%x", + cur_mode, new_mode); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); + DELAY(40); +} + +static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc) +{ + uint32_t port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1 << 0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1 << 1)) == (1 << 1)); + } + /* Return 4-port mode from input pin */ + return (uint8_t)REG_RD(sc, MISC_REG_PORT4MODE_EN); +} + +static void elink_set_mdio_emac_per_phy(struct bnx2x_softc *sc, + struct elink_params *params) +{ + uint8_t phy_index; + + /* Set mdio clock per phy */ + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) + elink_set_mdio_clk(sc, params->chip_id, + params->phy[phy_index].mdio_ctrl); +} + +static void elink_emac_init(struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + /* reset and unreset the emac core */ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + uint32_t val; + uint16_t timeout; + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + DELAY(5); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + + /* init emac - use read-modify-write */ + /* self clear reset */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, + (val | EMAC_MODE_RESET)); + + timeout = 200; + do { + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + ELINK_DEBUG_P1(sc, "EMAC reset reg is %u", val); + if (!timeout) { + ELINK_DEBUG_P0(sc, "EMAC timeout!"); + return; + } + timeout--; + } while (val & EMAC_MODE_RESET); + + elink_set_mdio_emac_per_phy(sc, params); + /* Set mac address */ + val = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH, val); + + val = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH + 4, val); +} + +static void elink_set_xumac_nig(struct elink_params *params, + uint16_t tx_pause_en, + uint8_t enable) +{ + struct bnx2x_softc *sc = params->sc; + + REG_WR(sc, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, + enable); + REG_WR(sc, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, + enable); + REG_WR(sc, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : + NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); +} + +static void elink_set_umac_rxtx(struct elink_params *params, uint8_t en) +{ + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + uint32_t val; + struct bnx2x_softc *sc = params->sc; + if (!(REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) + return; + val = REG_RD(sc, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + /* Disable RX and TX */ + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); +} + +static void elink_umac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + uint32_t val; + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + struct bnx2x_softc *sc = params->sc; + /* Reset UMAC */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + + ELINK_DEBUG_P0(sc, "enabling UMAC"); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1); + + val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | + UMAC_COMMAND_CONFIG_REG_PAD_EN | + UMAC_COMMAND_CONFIG_REG_SW_RESET | + UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; + switch (vars->line_speed) { + case ELINK_SPEED_10: + val |= (0 << 2); + break; + case ELINK_SPEED_100: + val |= (1 << 2); + break; + case ELINK_SPEED_1000: + val |= (2 << 2); + break; + case ELINK_SPEED_2500: + val |= (3 << 2); + break; + default: + ELINK_DEBUG_P1(sc, "Invalid speed for UMAC %d", + vars->line_speed); + break; + } + if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; + + if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + + if (vars->duplex == DUPLEX_HALF) + val |= UMAC_COMMAND_CONFIG_REG_HD_ENA; + + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + DELAY(50); + + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + ELINK_DEBUG_P0(sc, "configured UMAC for EEE"); + REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(sc, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ + REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR0, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR1, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + /* Enable RX and TX */ + val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; + val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA; + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + DELAY(50); + + /* Remove SW Reset */ + val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; + + /* Check loopback mode */ + if (lb) + val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710); + elink_set_xumac_nig(params, + ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1); + vars->mac_type = ELINK_MAC_TYPE_UMAC; + +} + +/* Define the XMAC mode */ +static void elink_xmac_init(struct elink_params *params, uint32_t max_speed) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t is_port4mode = elink_is_4_port_mode(sc); + + /* In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + */ + + if (((CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || + (CHIP_NUM(sc) == CHIP_NUM_57840_2_20) || + (CHIP_NUM(sc) == CHIP_NUM_57840_OBS)) && + is_port4mode && + (REG_RD(sc, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC)) { + ELINK_DEBUG_P0(sc, + "XMAC already out of reset in 4-port mode"); + return; + } + + /* Hard reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC); + if (is_port4mode) { + ELINK_DEBUG_P0(sc, "Init XMAC to 2 ports x 10G per path"); + + /* Set the number of ports on the system side to up to 2 */ + REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + /* Set the number of ports on the system side to 1 */ + REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 0); + if (max_speed == ELINK_SPEED_10000) { + ELINK_DEBUG_P0(sc, + "Init XMAC to 10G x 1 port per path"); + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + ELINK_DEBUG_P0(sc, + "Init XMAC to 20G x 2 ports per path"); + /* Set the number of ports on the Warp Core to 20G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 1); + } + } + /* Soft reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + +} + +static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en) +{ + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + uint32_t val; + + if (REG_RD(sc, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) { + /* Send an indication to change the state in the NIG back to XON + * Clearing this bit enables the next set of this bit to get + * rising edge + */ + pfc_ctrl = REG_RD(sc, xmac_base + XMAC_REG_PFC_CTRL_HI); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl & ~(1 << 1))); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl | (1 << 1))); + ELINK_DEBUG_P1(sc, "Disable XMAC on port %x", port); + val = REG_RD(sc, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(sc, xmac_base + XMAC_REG_CTRL, val); + } +} + +static elink_status_t elink_xmac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + uint32_t val, xmac_base; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "enabling XMAC"); + + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + elink_xmac_init(params, vars->line_speed); + + /* This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /* This register tells the NIG whether to send traffic to UMAC + * or XMAC + */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 0); + + /* When XMAC is in XLGMII mode, disable sending idles for fault + * detection. + */ + if (!(params->phy[ELINK_INT_PHY].flags & ELINK_FLAGS_TX_ERROR_CHECK)) { + REG_WR(sc, xmac_base + XMAC_REG_RX_LSS_CTRL, + (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE | + XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE)); + REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + } + /* Set Max packet size */ + REG_WR(sc, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); + + /* CRC append for Tx packets */ + REG_WR(sc, xmac_base + XMAC_REG_TX_CTRL, 0xC800); + + /* update PFC */ + elink_update_pfc_xmac(params, vars, 0); + + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + ELINK_DEBUG_P0(sc, "Setting XMAC for EEE"); + REG_WR(sc, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); + REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x1); + } else { + REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x0); + } + + /* Enable TX and RX */ + val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + + /* Set MAC in XLGMII mode for dual-mode */ + if ((vars->line_speed == ELINK_SPEED_20000) && + (params->phy[ELINK_INT_PHY].supported & + ELINK_SUPPORTED_20000baseKR2_Full)) + val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB; + + /* Check loopback mode */ + if (lb) + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; + REG_WR(sc, xmac_base + XMAC_REG_CTRL, val); + elink_set_xumac_nig(params, + ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1); + + vars->mac_type = ELINK_MAC_TYPE_XMAC; + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_emac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + uint32_t val; + + ELINK_DEBUG_P0(sc, "enabling EMAC"); + + /* Disable BMAC */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable emac and not bmac */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1); + +#ifdef ELINK_INCLUDE_EMUL + /* for paladium */ + if (CHIP_REV_IS_EMUL(sc)) { + /* Use lane 1 (of lanes 0-3) */ + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + } + /* for fpga */ + else +#endif +#ifdef ELINK_INCLUDE_FPGA + if (CHIP_REV_IS_FPGA(sc)) { + /* Use lane 1 (of lanes 0-3) */ + ELINK_DEBUG_P0(sc, "elink_emac_enable: Setting FPGA"); + + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0); + } else +#endif + /* ASIC */ + if (vars->phy_flags & PHY_XGXS_FLAG) { + uint32_t ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + ELINK_DEBUG_P0(sc, "XGXS"); + /* select the master lanes (out of 0-3) */ + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, ser_lane); + /* select XGXS */ + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + + } else { /* SerDes */ + ELINK_DEBUG_P0(sc, "SerDes"); + /* select SerDes */ + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0); + } + + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_RESET); + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_RESET); + +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (CHIP_REV_IS_SLOW(sc)) { + /* config GMII mode */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, + (val | EMAC_MODE_PORT_GMII)); + } else { /* ASIC */ +#endif + /* pause enable/disable */ + elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) { + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + elink_bits_en(sc, emac_base + + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + elink_bits_en(sc, emac_base + + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + } else + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_FLOW_EN); +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + } +#endif + + /* KEEP_VLAN_TAG, promiscuous */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_RX_MODE); + val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; + + /* Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0); + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) { + ELINK_DEBUG_P0(sc, "PFC is enabled"); + /* Enable PFC again */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, + EMAC_REG_RX_PFC_MODE_RX_EN | + EMAC_REG_RX_PFC_MODE_TX_EN | + EMAC_REG_RX_PFC_MODE_PRIORITIES); + + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_PARAM, + ((0x0101 << + EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | + (0x00ff << + EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); + val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; + } + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MODE, val); + + /* Set Loopback */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + if (lb) + val |= 0x810; + else + val &= ~0x810; + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, val); + + /* Enable emac */ + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 1); + + /* Enable emac for jumbo packets */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE, + (EMAC_RX_MTU_SIZE_JUMBO_ENA | + (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD))); + + /* Strip CRC */ + REG_WR(sc, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port * 4, 0x1); + + /* Disable the NIG in/out to the bmac */ + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x0); + + /* Enable the NIG in/out to the emac */ + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x1); + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + + REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, val); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x1); + +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) { + /* Take the BigMac out of reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* Enable access for bmac registers */ + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1); + } else +#endif + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0); + + vars->mac_type = ELINK_MAC_TYPE_EMAC; + return ELINK_STATUS_OK; +} + +static void elink_update_pfc_bmac1(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t wb_data[2]; + struct bnx2x_softc *sc = params->sc; + uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + uint32_t val = 0x14; + if ((!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1 << 5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); + + /* TX control */ + val = 0xc0; + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); +} + +static void elink_update_pfc_bmac2(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_lb) +{ + /* Set rx control: Strip CRC and enable BigMAC to relay + * control packets to the system as well + */ + uint32_t wb_data[2]; + struct bnx2x_softc *sc = params->sc; + uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t val = 0x14; + + if ((!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1 << 5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); + DELAY(30); + + /* Tx control */ + val = 0xc0; + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) { + ELINK_DEBUG_P0(sc, "PFC is enabled"); + /* Enable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x0; + wb_data[0] |= (1 << 0); /* RX */ + wb_data[0] |= (1 << 1); /* TX */ + wb_data[0] |= (1 << 2); /* Force initial Xon */ + wb_data[0] |= (1 << 3); /* 8 cos */ + wb_data[0] |= (1 << 5); /* STATS */ + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, + wb_data, 2); + /* Clear the force Xon */ + wb_data[0] &= ~(1 << 2); + } else { + ELINK_DEBUG_P0(sc, "PFC is disabled"); + /* Disable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x8; + wb_data[1] = 0; + } + + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); + + /* Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ + val = 0x8000; + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + val |= (1 << 16); /* enable automatic re-send */ + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, + wb_data, 2); + + /* mac control */ + val = 0x3; /* Enable RX and TX */ + if (is_lb) { + val |= 0x4; /* Local loopback */ + ELINK_DEBUG_P0(sc, "enable bmac loopback"); + } + /* When PFC enabled, Pass pause frames towards the NIG. */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + val |= ((1 << 6) | (1 << 5)); + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); +} + +/****************************************************************************** + * Description: + * This function is needed because NIG ARB_CREDIT_WEIGHT_X are + * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. + ******************************************************************************/ +static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc, + uint8_t cos_entry, + uint32_t priority_mask, uint8_t port) +{ + uint32_t nig_reg_rx_priority_mask_add = 0; + + switch (cos_entry) { + case 0: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS0_PRIORITY_MASK : + NIG_REG_P0_RX_COS0_PRIORITY_MASK; + break; + case 1: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS1_PRIORITY_MASK : + NIG_REG_P0_RX_COS1_PRIORITY_MASK; + break; + case 2: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS2_PRIORITY_MASK : + NIG_REG_P0_RX_COS2_PRIORITY_MASK; + break; + case 3: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; + break; + case 4: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; + break; + case 5: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; + break; + } + + REG_WR(sc, nig_reg_rx_priority_mask_add, priority_mask); + + return ELINK_STATUS_OK; +} +static void elink_update_mng(struct elink_params *params, uint32_t link_status) +{ + struct bnx2x_softc *sc = params->sc; + + REG_WR(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); +} + +static void elink_update_pfc_nig(struct elink_params *params, + __rte_unused struct elink_vars *vars, + struct elink_nig_brb_pfc_port_params *nig_params) +{ + uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0; + uint32_t llfc_out_en = 0; + uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0; + uint32_t pkt_priority_to_cos = 0; + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + + int set_pfc = params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED; + ELINK_DEBUG_P0(sc, "updating pfc nig parameters"); + + /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + * MAC control frames (that are not pause packets) + * will be forwarded to the XCM. + */ + xcm_mask = REG_RD(sc, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK); + /* NIG params will override non PFC params, since it's possible to + * do transition from PFC to SAFC + */ + if (set_pfc) { + pause_enable = 0; + llfc_out_en = 0; + llfc_enable = 0; + if (CHIP_IS_E3(sc)) + ppp_enable = 0; + else + ppp_enable = 1; + xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 0; + hwpfc_enable = 1; + } else { + if (nig_params) { + llfc_out_en = nig_params->llfc_out_en; + llfc_enable = nig_params->llfc_enable; + pause_enable = nig_params->pause_enable; + } else /* Default non PFC mode - PAUSE */ + pause_enable = 1; + + xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 1; + } + + if (CHIP_IS_E3(sc)) + REG_WR(sc, port ? NIG_REG_BRB1_PAUSE_IN_EN : + NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); + REG_WR(sc, port ? NIG_REG_LLFC_OUT_EN_1 : + NIG_REG_LLFC_OUT_EN_0, llfc_out_en); + REG_WR(sc, port ? NIG_REG_LLFC_ENABLE_1 : + NIG_REG_LLFC_ENABLE_0, llfc_enable); + REG_WR(sc, port ? NIG_REG_PAUSE_ENABLE_1 : + NIG_REG_PAUSE_ENABLE_0, pause_enable); + + REG_WR(sc, port ? NIG_REG_PPP_ENABLE_1 : + NIG_REG_PPP_ENABLE_0, ppp_enable); + + REG_WR(sc, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK, xcm_mask); + + REG_WR(sc, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : + NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); + + /* Output enable for RX_XCM # IF */ + REG_WR(sc, port ? NIG_REG_XCM1_OUT_EN : + NIG_REG_XCM0_OUT_EN, xcm_out_en); + + /* HW PFC TX enable */ + REG_WR(sc, port ? NIG_REG_P1_HWPFC_ENABLE : + NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable); + + if (nig_params) { + uint8_t i = 0; + pkt_priority_to_cos = nig_params->pkt_priority_to_cos; + + for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) + elink_pfc_nig_rx_priority_mask(sc, i, + nig_params->rx_cos_priority_mask[i], port); + + REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, + nig_params->llfc_high_priority_classes); + + REG_WR(sc, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, + nig_params->llfc_low_priority_classes); + } + REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : + NIG_REG_P0_PKT_PRIORITY_TO_COS, + pkt_priority_to_cos); +} + +elink_status_t elink_update_pfc(struct elink_params *params, + struct elink_vars *vars, + struct elink_nig_brb_pfc_port_params *pfc_params) +{ + /* The PFC and pause are orthogonal to one another, meaning when + * PFC is enabled, the pause are disabled, and when PFC is + * disabled, pause are set according to the pause result. + */ + uint32_t val; + struct bnx2x_softc *sc = params->sc; + uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + elink_update_mng(params, vars->link_status); + + /* Update NIG params */ + elink_update_pfc_nig(params, vars, pfc_params); + + if (!vars->link_up) + return ELINK_STATUS_OK; + + ELINK_DEBUG_P0(sc, "About to update PFC in BMAC"); + + if (CHIP_IS_E3(sc)) { + if (vars->mac_type == ELINK_MAC_TYPE_XMAC) + elink_update_pfc_xmac(params, vars, 0); + } else { + val = REG_RD(sc, MISC_REG_RESET_REG_2); + if ((val & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) + == 0) { + ELINK_DEBUG_P0(sc, "About to update PFC in EMAC"); + elink_emac_enable(params, vars, 0); + return ELINK_STATUS_OK; + } + if (CHIP_IS_E2(sc)) + elink_update_pfc_bmac2(params, vars, bmac_loopback); + else + elink_update_pfc_bmac1(params, vars); + + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_bmac1_enable(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + uint32_t val; + + ELINK_DEBUG_P0(sc, "Enabling BigMAC1"); + + /* XGXS control */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); + + /* MAC control */ + val = 0x3; + if (is_lb) { + val |= 0x4; + ELINK_DEBUG_P0(sc, "enable bmac loopback"); + } + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); + + /* Set rx mtu */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); + + elink_update_pfc_bmac1(params, vars); + + /* Set tx mtu */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); + + /* Set cnt max size */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); +#ifdef ELINK_INCLUDE_EMUL + /* Fix for emulation */ + if (CHIP_REV_IS_EMUL(sc)) { + wb_data[0] = 0xf000; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD, + wb_data, 2); + } +#endif + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_bmac2_enable(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + + ELINK_DEBUG_P0(sc, "Enabling BigMAC2"); + + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + DELAY(30); + + /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + DELAY(30); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, + wb_data, 2); + + DELAY(30); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + DELAY(30); + + /* Set RX MTU */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); + DELAY(30); + + /* Set TX MTU */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); + DELAY(30); + /* Set cnt max size */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD - 2; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); + DELAY(30); + elink_update_pfc_bmac2(params, vars, is_lb); + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_bmac_enable(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_lb, uint8_t reset_bmac) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t val; + /* Reset and unreset the BigMac */ + if (reset_bmac) { + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + DELAY(1000 * 1); + } + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* Enable access for bmac registers */ + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1); + + /* Enable BMAC according to BMAC type*/ + if (CHIP_IS_E2(sc)) + rc = elink_bmac2_enable(params, vars, is_lb); + else + rc = elink_bmac1_enable(params, vars, is_lb); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0x1); + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 0x0); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 0x0); + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, val); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x1); + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x1); + + vars->mac_type = ELINK_MAC_TYPE_BMAC; + return rc; +} + +static void elink_set_bmac_rx(struct bnx2x_softc *sc, + __rte_unused uint32_t chip_id, + uint8_t port, uint8_t en) +{ + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + uint32_t nig_bmac_enable = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + + port * 4); + + if (CHIP_IS_E2(sc)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; + /* Only if the bmac is out of reset */ + if (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && + nig_bmac_enable) { + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(sc, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= ELINK_BMAC_CONTROL_RX_ENABLE; + else + wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; + REG_WR_DMAE(sc, bmac_addr, wb_data, 2); + DELAY(1000 * 1); + } +} + +static elink_status_t elink_pbf_update(struct elink_params *params, + uint32_t flow_ctrl, + uint32_t line_speed) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t init_crd, crd; + uint32_t count = 1000; + + /* Disable port */ + REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x1); + + /* Wait for init credit */ + init_crd = REG_RD(sc, PBF_REG_P0_INIT_CRD + port * 4); + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + ELINK_DEBUG_P2(sc, "init_crd 0x%x crd 0x%x", init_crd, crd); + + while ((init_crd != crd) && count) { + DELAY(1000 * 5); + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + count--; + } + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + if (init_crd != crd) { + ELINK_DEBUG_P2(sc, "BUG! init_crd 0x%x != crd 0x%x", + init_crd, crd); + return ELINK_STATUS_ERROR; + } + + if (flow_ctrl & ELINK_FLOW_CTRL_RX || + line_speed == ELINK_SPEED_10 || + line_speed == ELINK_SPEED_100 || + line_speed == ELINK_SPEED_1000 || + line_speed == ELINK_SPEED_2500) { + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 1); + /* Update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, 0); + /* Update init credit */ + init_crd = 778; /* (800-18-4) */ + + } else { + uint32_t thresh = (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + + ELINK_ETH_OVREHEAD) / 16; + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); + /* Update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, thresh); + /* Update init credit */ + switch (line_speed) { + case ELINK_SPEED_10000: + init_crd = thresh + 553 - 22; + break; + default: + ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x", + line_speed); + return ELINK_STATUS_ERROR; + } + } + REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, init_crd); + ELINK_DEBUG_P2(sc, "PBF updated to speed %d credit %d", + line_speed, init_crd); + + /* Probe the credit changes */ + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x1); + DELAY(1000 * 5); + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x0); + + /* Enable port */ + REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x0); + return ELINK_STATUS_OK; +} + +/** + * elink_get_emac_base - retrive emac base address + * + * @bp: driver handle + * @mdc_mdio_access: access type + * @port: port id + * + * This function selects the MDC/MDIO access (through emac0 or + * emac1) depend on the mdc_mdio_access, port, port swapped. Each + * phy has a default access mode, which could also be overridden + * by nvram configuration. This parameter, whether this is the + * default phy configuration, or the nvram overrun + * configuration, is passed here as mdc_mdio_access and selects + * the emac_base for the CL45 read/writes operations + */ +static uint32_t elink_get_emac_base(struct bnx2x_softc *sc, + uint32_t mdc_mdio_access, uint8_t port) +{ + uint32_t emac_base = 0; + switch (mdc_mdio_access) { + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: + if (REG_RD(sc, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC1; + else + emac_base = GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: + if (REG_RD(sc, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC0; + else + emac_base = GRCBASE_EMAC1; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: + emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; + break; + default: + break; + } + return emac_base; + +} + +/******************************************************************/ +/* CL22 access functions */ +/******************************************************************/ +static elink_status_t elink_cl22_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t reg, uint16_t val) +{ + uint32_t tmp, mode; + uint8_t i; + elink_status_t rc = ELINK_STATUS_OK; + /* Switch to CL22 */ + mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + tmp = ((phy->addr << 21) | (reg << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "write phy register failed"); + rc = ELINK_STATUS_TIMEOUT; + } + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +static elink_status_t elink_cl22_read(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t reg, uint16_t *ret_val) +{ + uint32_t val, mode; + uint16_t i; + elink_status_t rc = ELINK_STATUS_OK; + + /* Switch to CL22 */ + mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + val = ((phy->addr << 21) | (reg << 16) | + EMAC_MDIO_COMM_COMMAND_READ_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (uint16_t)(val & EMAC_MDIO_COMM_DATA); + DELAY(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "read phy register failed"); + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static elink_status_t elink_cl45_read(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t devad, uint16_t reg, uint16_t *ret_val) +{ + uint32_t val; + uint16_t i; + elink_status_t rc = ELINK_STATUS_OK; + uint32_t chip_id; + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12); + elink_set_mdio_clk(sc, chip_id, phy->mdio_ctrl); + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + /* Address */ + val = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "read phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); + /* "MDC/MDIO access timeout" */ + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } else { + /* Data */ + val = ((phy->addr << 21) | (devad << 16) | + EMAC_MDIO_COMM_COMMAND_READ_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (uint16_t) + (val & EMAC_MDIO_COMM_DATA); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "read phy register failed"); + elink_cb_event_log(sc, + ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); + /* "MDC/MDIO access timeout" */ + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) { + phy->flags ^= ELINK_FLAGS_DUMMY_READ; + if (phy->flags & ELINK_FLAGS_DUMMY_READ) { + uint16_t temp_val; + elink_cl45_read(sc, phy, devad, 0xf, &temp_val); + } + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +static elink_status_t elink_cl45_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t devad, uint16_t reg, uint16_t val) +{ + uint32_t tmp; + uint8_t i; + elink_status_t rc = ELINK_STATUS_OK; + uint32_t chip_id; + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12); + elink_set_mdio_clk(sc, chip_id, phy->mdio_ctrl); + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + + /* Address */ + tmp = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "write phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); + /* "MDC/MDIO access timeout" */ + + rc = ELINK_STATUS_TIMEOUT; + } else { + /* Data */ + tmp = ((phy->addr << 21) | (devad << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + ELINK_DEBUG_P0(sc, "write phy register failed"); + elink_cb_event_log(sc, + ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); + /* "MDC/MDIO access timeout" */ + + rc = ELINK_STATUS_TIMEOUT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) { + phy->flags ^= ELINK_FLAGS_DUMMY_READ; + if (phy->flags & ELINK_FLAGS_DUMMY_READ) { + uint16_t temp_val; + elink_cl45_read(sc, phy, devad, 0xf, &temp_val); + } + } + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static uint8_t elink_eee_has_cap(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + if (REG_RD(sc, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static elink_status_t elink_eee_nvram_to_time(uint32_t nvram_mode, + uint32_t *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = ELINK_EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = ELINK_EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_time_to_nvram(uint32_t idle_timer, + uint32_t *nvram_mode) +{ + switch (idle_timer) { + case ELINK_EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case ELINK_EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return ELINK_STATUS_OK; +} + +static uint32_t elink_eee_calc_timer(struct elink_params *params) +{ + uint32_t eee_mode, eee_idle; + struct bnx2x_softc *sc = params->sc; + + if (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & ELINK_EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (elink_eee_nvram_to_time(params->eee_mode & + ELINK_EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (elink_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static elink_status_t elink_eee_set_timers(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t eee_idle = 0, eee_mode; + struct bnx2x_softc *sc = params->sc; + + eee_idle = elink_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(sc, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) && + (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME)) { + ELINK_DEBUG_P0(sc, "Error: Tx LPI is enabled with timer 0"); + return ELINK_STATUS_ERROR; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (elink_eee_time_to_nvram(eee_idle, &eee_mode)) + return ELINK_STATUS_ERROR; + vars->eee_status |= eee_mode; + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_initial_config(struct elink_params *params, + struct elink_vars *vars, uint8_t mode) +{ + vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propagate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & ELINK_EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return elink_eee_set_timers(params, vars); +} + +static elink_status_t elink_eee_disable(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + + /* Make Certain LPI is disabled */ + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_advertise(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, uint8_t modes) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(sc, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + ELINK_DEBUG_P0(sc, "Advertise 10GBase-T EEE"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + ELINK_DEBUG_P0(sc, "Advertise 1GBase-T EEE"); + val |= 0x4; + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return ELINK_STATUS_OK; +} + +static void elink_update_mng_eee(struct elink_params *params, + uint32_t eee_status) +{ + struct bnx2x_softc *sc = params->sc; + + if (elink_eee_has_cap(params)) + REG_WR(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void elink_eee_an_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t adv = 0, lp = 0; + uint32_t lp_adv = 0; + uint8_t neg = 0; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == ELINK_SPEED_100) + neg = 1; + ELINK_DEBUG_P0(sc, "EEE negotiated - 100M"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == ELINK_SPEED_1000) + neg = 1; + ELINK_DEBUG_P0(sc, "EEE negotiated - 1G"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == ELINK_SPEED_10000) + neg = 1; + ELINK_DEBUG_P0(sc, "EEE negotiated - 10G"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + ELINK_DEBUG_P0(sc, "EEE is active"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } +} + +/******************************************************************/ +/* BSC access functions from E3 */ +/******************************************************************/ +static void elink_bsc_module_sel(struct elink_params *params) +{ + int idx; + uint32_t board_cfg, sfp_ctrl; + uint32_t i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + /* Read I2C output PINs */ + board_cfg = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.shared_hw_config.board)); + i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; + i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> + SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; + + /* Read I2C output value */ + sfp_ctrl = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)); + i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; + i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; + ELINK_DEBUG_P0(sc, "Setting BSC switch"); + for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) + elink_set_cfg_pin(sc, i2c_pins[idx], i2c_val[idx]); +} + +static elink_status_t elink_bsc_read(struct bnx2x_softc *sc, + uint8_t sl_devid, + uint16_t sl_addr, + uint8_t lc_addr, + uint8_t xfer_cnt, + uint32_t *data_array) +{ + uint32_t val, i; + elink_status_t rc = ELINK_STATUS_OK; + + if (xfer_cnt > 16) { + ELINK_DEBUG_P1(sc, "invalid xfer_cnt %d. Max is 16 bytes", + xfer_cnt); + return ELINK_STATUS_ERROR; + } + + xfer_cnt = 16 - lc_addr; + + /* Enable the engine */ + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + val |= MCPR_IMC_COMMAND_ENABLE; + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Program slave device ID */ + val = (sl_devid << 16) | sl_addr; + REG_WR(sc, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); + + /* Start xfer with 0 byte to update the address pointer ???*/ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_WRITE_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + DELAY(10); + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + ELINK_DEBUG_P1(sc, "wr 0 byte timed out after %d try", + i); + rc = ELINK_STATUS_TIMEOUT; + break; + } + } + if (rc == ELINK_STATUS_TIMEOUT) + return rc; + + /* Start xfer with read op */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_READ_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | + (xfer_cnt); + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + DELAY(10); + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + ELINK_DEBUG_P1(sc, "rd op timed out after %d try", i); + rc = ELINK_STATUS_TIMEOUT; + break; + } + } + if (rc == ELINK_STATUS_TIMEOUT) + return rc; + + for (i = (lc_addr >> 2); i < 4; i++) { + data_array[i] = REG_RD(sc, (MCP_REG_MCPR_IMC_DATAREG0 + i * 4)); +#ifdef __BIG_ENDIAN + data_array[i] = ((data_array[i] & 0x000000ff) << 24) | + ((data_array[i] & 0x0000ff00) << 8) | + ((data_array[i] & 0x00ff0000) >> 8) | + ((data_array[i] & 0xff000000) >> 24); +#endif + } + return rc; +} + +static void elink_cl45_read_or_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t devad, uint16_t reg, + uint16_t or_val) +{ + uint16_t val; + elink_cl45_read(sc, phy, devad, reg, &val); + elink_cl45_write(sc, phy, devad, reg, val | or_val); +} + +static void elink_cl45_read_and_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t devad, uint16_t reg, + uint16_t and_val) +{ + uint16_t val; + elink_cl45_read(sc, phy, devad, reg, &val); + elink_cl45_write(sc, phy, devad, reg, val & and_val); +} + +elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr, + uint8_t devad, uint16_t reg, uint16_t *ret_val) +{ + uint8_t phy_index; + /* Probe for the phy according to the given phy_addr, and execute + * the read request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return elink_cl45_read(params->sc, + ¶ms->phy[phy_index], devad, + reg, ret_val); + } + } + return ELINK_STATUS_ERROR; +} + +elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr, + uint8_t devad, uint16_t reg, uint16_t val) +{ + uint8_t phy_index; + /* Probe for the phy according to the given phy_addr, and execute + * the write request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return elink_cl45_write(params->sc, + ¶ms->phy[phy_index], devad, + reg, val); + } + } + return ELINK_STATUS_ERROR; +} + +static uint8_t elink_get_warpcore_lane(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + uint8_t lane = 0; + struct bnx2x_softc *sc = params->sc; + uint32_t path_swap, path_swap_ovr; + uint8_t path, port; + + path = SC_PATH(sc); + port = params->port; + + if (elink_is_4_port_mode(sc)) { + uint32_t port_swap, port_swap_ovr; + + /* Figure out path swap value */ + path_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) + path_swap = (path_swap_ovr & 0x2); + else + path_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP); + + if (path_swap) + path = path ^ 1; + + /* Figure out port swap value */ + port_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); + if (port_swap_ovr & 0x1) + port_swap = (port_swap_ovr & 0x2); + else + port_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP); + + if (port_swap) + port = port ^ 1; + + lane = (port << 1) + path; + } else { /* Two port mode - no port swap */ + + /* Figure out path swap value */ + path_swap_ovr = + REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) { + path_swap = (path_swap_ovr & 0x2); + } else { + path_swap = + REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP); + } + if (path_swap) + path = path ^ 1; + + lane = path << 1; + } + return lane; +} + + +static void elink_set_aer_mmd(struct elink_params *params, + struct elink_phy *phy) +{ + uint32_t ser_lane; + uint16_t offset, aer_val; + struct bnx2x_softc *sc = params->sc; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? + (phy->addr + ser_lane) : 0; + + if (USES_WARPCORE(sc)) { + aer_val = elink_get_warpcore_lane(phy, params); + /* In Dual-lane mode, two lanes are joined together, + * so in order to configure them, the AER broadcast method is + * used here. + * 0x200 is the broadcast address for lanes 0,1 + * 0x201 is the broadcast address for lanes 2,3 + */ + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + aer_val = (aer_val >> 1) | 0x200; + } else if (CHIP_IS_E2(sc)) + aer_val = 0x3800 + offset - 1; + else + aer_val = 0x3800 + offset; + + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); + +} + +/******************************************************************/ +/* Internal phy section */ +/******************************************************************/ + +static void elink_set_serdes_access(struct bnx2x_softc *sc, uint8_t port) +{ + uint32_t emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + + /* Set Clause 22 */ + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 1); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); + DELAY(500); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); + DELAY(500); + /* Set Clause 45 */ + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 0); +} + +static void elink_serdes_deassert(struct bnx2x_softc *sc, uint8_t port) +{ + uint32_t val; + + ELINK_DEBUG_P0(sc, "elink_serdes_deassert"); + + val = ELINK_SERDES_RESET_BITS << (port * 16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + DELAY(500); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + + elink_set_serdes_access(sc, port); + + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_DEVAD + port * 0x10, + ELINK_DEFAULT_PHY_DEV_ADDR); +} + +static void elink_xgxs_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + /* Set correct devad */ + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_ST + params->port * 0x18, 0); + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port * 0x18, + phy->def_md_devad); + break; + } +} + +static void elink_xgxs_deassert(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint32_t val; + ELINK_DEBUG_P0(sc, "elink_xgxs_deassert"); + port = params->port; + + val = ELINK_XGXS_RESET_BITS << (port * 16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + DELAY(500); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + elink_xgxs_specific_func(¶ms->phy[ELINK_INT_PHY], params, + ELINK_PHY_INIT); +} + +static void elink_calc_ieee_aneg_adv(struct elink_phy *phy, + struct elink_params *params, + uint16_t *ieee_fc) +{ + struct bnx2x_softc *sc = params->sc; + *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /* Resolve pause mode and advertisement Please refer to Table + * 28B-3 of the 802.3ab-1999 spec + */ + + switch (phy->req_flow_ctrl) { + case ELINK_FLOW_CTRL_AUTO: + switch (params->req_fc_auto_adv) { + case ELINK_FLOW_CTRL_BOTH: + case ELINK_FLOW_CTRL_RX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + case ELINK_FLOW_CTRL_TX: + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + default: + break; + } + break; + case ELINK_FLOW_CTRL_TX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case ELINK_FLOW_CTRL_RX: + case ELINK_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + + case ELINK_FLOW_CTRL_NONE: + default: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + break; + } + ELINK_DEBUG_P1(sc, "ieee_fc = 0x%x", *ieee_fc); +} + +static void set_phy_vars(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t actual_phy_idx, phy_index, link_cfg_idx; + uint8_t phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) { + link_cfg_idx = ELINK_LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == ELINK_EXT_PHY1) + actual_phy_idx = ELINK_EXT_PHY2; + else if (phy_index == ELINK_EXT_PHY2) + actual_phy_idx = ELINK_EXT_PHY1; + } + params->phy[actual_phy_idx].req_flow_ctrl = + params->req_flow_ctrl[link_cfg_idx]; + + params->phy[actual_phy_idx].req_line_speed = + params->req_line_speed[link_cfg_idx]; + + params->phy[actual_phy_idx].speed_cap_mask = + params->speed_cap_mask[link_cfg_idx]; + + params->phy[actual_phy_idx].req_duplex = + params->req_duplex[link_cfg_idx]; + + if (params->req_line_speed[link_cfg_idx] == + ELINK_SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + + ELINK_DEBUG_P3(sc, "req_flow_ctrl %x, req_line_speed %x," + " speed_cap_mask %x", + params->phy[actual_phy_idx].req_flow_ctrl, + params->phy[actual_phy_idx].req_line_speed, + params->phy[actual_phy_idx].speed_cap_mask); + } +} + +static void elink_ext_phy_set_pause(struct elink_params *params, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t val; + struct bnx2x_softc *sc = params->sc; + /* Read modify write pause advertizing */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); + + val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; + + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + } + ELINK_DEBUG_P1(sc, "Ext phy AN advertize 0x%x", val); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); +} + +static void elink_pause_resolve(__rte_unused struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint32_t pause_result) +{ + struct bnx2x_softc *sc = params->sc; + /* LD LP */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ + ELINK_DEBUG_P0(sc, "Flow Control: TX only"); + vars->flow_ctrl = ELINK_FLOW_CTRL_TX; + break; + + case 0xe: /* 1 1 1 0 */ + ELINK_DEBUG_P0(sc, "Flow Control: RX only"); + vars->flow_ctrl = ELINK_FLOW_CTRL_RX; + break; + + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ + /* If the user selected to advertise RX ONLY, + * although we advertised both, need to enable + * RX only. + */ + + if (params->req_fc_auto_adv == ELINK_FLOW_CTRL_BOTH) { + ELINK_DEBUG_P0(sc, "Flow Control: RX & TX"); + vars->flow_ctrl = ELINK_FLOW_CTRL_BOTH; + } else { + ELINK_DEBUG_P0(sc, "Flow Control: RX only"); + vars->flow_ctrl = ELINK_FLOW_CTRL_RX; + } + break; + default: + ELINK_DEBUG_P0(sc, "Flow Control: None"); + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + break; + } + if (pause_result & (1 << 0)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; + if (pause_result & (1 << 1)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; + +} + +static void elink_ext_phy_update_adv_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint16_t ld_pause; /* local */ + uint16_t lp_pause; /* link partner */ + uint16_t pause_result; + struct bnx2x_softc *sc = params->sc; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) { + elink_cl22_read(sc, phy, 0x4, &ld_pause); + elink_cl22_read(sc, phy, 0x5, &lp_pause); + } else if (CHIP_IS_E3(sc) && + ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint8_t lane = elink_get_warpcore_lane(phy, params); + uint16_t gp_status, gp_mask; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status); + gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL | + MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) << + lane; + if ((gp_status & gp_mask) == gp_mask) { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } else { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + ld_pause = ((ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + lp_pause = ((lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + } + } else { + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + ELINK_DEBUG_P1(sc, "Ext PHY pause result 0x%x", pause_result); + elink_pause_resolve(phy, params, vars, pause_result); + +} + +static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t ret = 0; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_ext_phy_update_adv_fc(phy, params, vars); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + ret = 1; + elink_ext_phy_update_adv_fc(phy, params, vars); + } + return ret; +} +/******************************************************************/ +/* Warpcore section */ +/******************************************************************/ +/* The init_internal_warpcore should mirror the xgxs, + * i.e. reset the lane (if needed), set aer for the + * init configuration, and set/clear SGMII flag. Internal + * phy init is done purely in phy_init stage. + */ +#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \ + ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ + (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \ + (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET)) + +#define WC_TX_FIR(post, main, pre) \ + ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ + (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ + (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) + +static void elink_update_link_attr(struct elink_params *params, + uint32_t link_attr) +{ + struct bnx2x_softc *sc = params->sc; + + if (SHMEM2_HAS(sc, link_attr_sync)) + REG_WR(sc, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port]), link_attr); +} + +static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t i; + static struct elink_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537}, + /* Step 2 - Configure the NP registers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620} + }; + ELINK_DEBUG_P0(sc, "Enabling 20G-KR2"); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6)); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + /* Start KR2 work-around timer which handles BNX2X8073 link-parner */ + params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + elink_update_link_attr(params, params->link_attr_sync); +} + +static void elink_disable_kr2(struct elink_params *params, + struct elink_vars *vars, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + int i; + static struct elink_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} + }; + ELINK_DEBUG_P0(sc, "Disabling 20G-KR2"); + + for (i = 0; i < (int)ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + elink_update_link_attr(params, params->link_attr_sync); + + vars->check_kr2_recovery_cnt = ELINK_CHECK_KR2_RECOVERY_CNT; +} + +static void elink_warpcore_set_lpi_passthrough(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + ELINK_DEBUG_P0(sc, "Configure WC for LPI pass through"); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + +static void elink_warpcore_restart_AN_KR(struct elink_phy *phy, + struct elink_params *params) +{ + /* Restart autoneg on the leading lane only */ + struct bnx2x_softc *sc = params->sc; + uint16_t lane = elink_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + /* Restore AER */ + elink_set_aer_mmd(params, phy); +} + +static void elink_warpcore_enable_AN_KR(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { + uint16_t lane, i, cl72_ctrl, an_adv = 0, val; + uint32_t wc_lane_config; + struct bnx2x_softc *sc = params->sc; + static struct elink_reg_set reg_set[] = { + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, + /* Disable Autoneg: re-enable it after adv is done. */ + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0}, + }; + ELINK_DEBUG_P0(sc, "Enable Auto Negotiation for KR"); + /* Set to default registers that may be overridden by 10G force */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0x08ff; + cl72_ctrl |= 0x3800; + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); + + /* Check adding advertisement for 1G KX */ + if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (vars->line_speed == ELINK_SPEED_1000)) { + uint16_t addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + an_adv |= (1 << 5); + + /* Enable CL37 1G Parallel Detect */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1); + ELINK_DEBUG_P0(sc, "Advertize 1G"); + } + if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (vars->line_speed == ELINK_SPEED_10000)) { + /* Check adding advertisement for 10G KR */ + an_adv |= (1 << 7); + /* Enable 10G Parallel Detect */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + elink_set_aer_mmd(params, phy); + ELINK_DEBUG_P0(sc, "Advertize 10G"); + } + + /* Set Transmit PMD settings */ + lane = elink_get_warpcore_lane(phy, params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); + /* Configure the next lane if dual mode */ + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * (lane + 1), + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, + 0x03f0); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, + 0x03f0); + + /* Advertised speeds */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); + + /* Advertised and set FEC (Forward Error Correction) */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); + + /* Enable CL37 BAM */ + if (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, + 1); + ELINK_DEBUG_P0(sc, "Enable CL37 BAM on KR"); + } + + /* Advertise pause */ + elink_ext_phy_set_pause(params, phy, vars); + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0x100); + + /* Over 1G - AN local device user page 1 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1f); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == ELINK_SPEED_20000)) { + + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX1_PCI_CTRL + + (0x10 * lane), + (1 << 11)); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7); + elink_set_aer_mmd(params, phy); + + elink_warpcore_enable_AN_KR2(phy, params, vars); + } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + wc_lane_config = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + shared_hw_config.wc_lane_config)); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val); + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + val |= 1 << 11; + + /* Restore Polarity settings in case it was run over by + * previous link owner + */ + if (wc_lane_config & + (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane)) + val |= 3 << 2; + else + val &= ~(3 << 2); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), + val); + + elink_disable_kr2(params, vars, phy); + } + + /* Enable Autoneg: only on the main lane */ + elink_warpcore_restart_AN_KR(phy, params); +} + +static void elink_warpcore_set_10G_KR(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, i, lane; + static struct elink_reg_set reg_set[] = { + /* Disable Autoneg */ + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3f00}, + {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, + /* Leave cl72 training enable, needed for KR */ + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} + }; + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + lane = elink_get_warpcore_lane(phy, params); + /* Global registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Disable CL36 PCS Tx */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + elink_set_aer_mmd(params, phy); + /* Set speed via PMA/PMD register */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); + + /* Enable encoded forced speed */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); + + /* Turn TX scramble payload only the 64/66 scrambler */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX66_CONTROL, 0x9); + + /* Turn RX scramble payload only the 64/66 scrambler */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0xF9); + + /* Set and clear loopback to cause a reset to 64/66 decoder */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + +} + +static void elink_warpcore_set_10G_XFI(struct elink_phy *phy, + struct elink_params *params, + uint8_t is_xfi) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t misc1_val, tap_val, tx_driver_val, lane, val; + uint32_t cfg_tap_val, tx_drv_brdct, tx_equal; + uint32_t ifir_val, ipost2_val, ipre_driver_val; + /* Hold rxSeqStart */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); + + /* Hold tx_fifo_reset */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); + + /* Disable CL73 AN */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + + /* Disable 100FX Enable and Auto-Detect */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0xFFFA); + + /* Disable 100FX Idle detect */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0080); + + /* Set Block address to Remote PHY & Clear forced_speed[5] */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F); + + /* Turn off auto-detect & fiber mode */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0xFFEE); + + /* Set filter_force_link, disable_false_link and parallel_detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + ((val | 0x0006) & 0xFFFE)); + + /* Set XFI / SFI */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); + + misc1_val &= ~(0x1f); + + if (is_xfi) { + misc1_val |= 0x5; + tap_val = WC_TX_FIR(0x08, 0x37, 0x00); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0); + } else { + cfg_tap_val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port]. + sfi_tap_values)); + + tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; + + misc1_val |= 0x9; + + /* TAP values are controlled by nvram, if value there isn't 0 */ + if (tx_equal) + tap_val = (uint16_t)tx_equal; + else + tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); + + ifir_val = DEFAULT_TX_DRV_IFIR; + ipost2_val = DEFAULT_TX_DRV_POST2; + ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER; + tx_drv_brdct = DEFAULT_TX_DRV_BRDCT; + + /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all + * configuration. + */ + if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK | + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK | + PORT_HW_CFG_TX_DRV_POST2_MASK)) { + ifir_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IFIR_MASK) >> + PORT_HW_CFG_TX_DRV_IFIR_SHIFT; + ipre_driver_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK) + >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT; + ipost2_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_POST2_MASK) >> + PORT_HW_CFG_TX_DRV_POST2_SHIFT; + } + + if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) { + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + } + + tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct, + ipre_driver_val, ifir_val); + } + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); + + /* Set Transmit PMD settings */ + lane = elink_get_warpcore_lane(phy, params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + tx_driver_val); + + /* Enable fiber mode, enable and invert sig_det */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); + + /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + + elink_warpcore_set_lpi_passthrough(phy, params); + + /* 10G XFI Full Duplex */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); + + /* Release tx_fifo_reset */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0xFFFE); + /* Release rxSeqStart */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF); +} + +static void elink_warpcore_set_20G_force_KR2(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t val; + struct bnx2x_softc *sc = params->sc; + /* Set global registers, so set AER lane to 0 */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + /* Disable sequencer */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1 << 13)); + + elink_set_aer_mmd(params, phy); + + elink_cl45_read_and_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_KR_CONTROL, ~(1 << 1)); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CTRL, 0); + /* Turn off CL73 */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, &val); + val &= ~(1 << 5); + val |= (1 << 6); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, val); + + /* Set 20G KR2 force speed */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (1 << 7)); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val); + val &= ~(3 << 14); + val |= (1 << 15); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A); + + /* Enable sequencer (over lane 0) */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1 << 13)); + + elink_set_aer_mmd(params, phy); +} + +static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t lane) +{ + /* Rx0 anaRxControl1G */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); + + /* Rx2 anaRxControl1G */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0, 0xE070); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1, 0xC0D0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2, 0xA0B0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3, 0x8090); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); + + /* Serdes Digital Misc1 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); + + /* Serdes Digital4 Misc3 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); + + /* Set Transmit PMD settings */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + (WC_TX_FIR(0x12, 0x2d, 0x00) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + WC_TX_DRIVER(0x02, 0x02, 0x02, 0)); +} + +static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy, + struct elink_params *params, + uint8_t fiber_mode, + uint8_t always_autoneg) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, digctrl_kx1, digctrl_kx2; + + /* Clear XFI clock comp in non-10G single lane mode. */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, ~(3 << 13)); + + elink_warpcore_set_lpi_passthrough(phy, params); + + if (always_autoneg || phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + /* SGMII Autoneg */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x1000); + ELINK_DEBUG_P0(sc, "set SGMII AUTONEG"); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + val16 &= 0xcebf; + switch (phy->req_line_speed) { + case ELINK_SPEED_10: + break; + case ELINK_SPEED_100: + val16 |= 0x2000; + break; + case ELINK_SPEED_1000: + val16 |= 0x0040; + break; + default: + ELINK_DEBUG_P1(sc, + "Speed not supported: 0x%x", phy->req_line_speed); + return; + } + + if (phy->req_duplex == DUPLEX_FULL) + val16 |= 0x0100; + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); + + ELINK_DEBUG_P1(sc, "set SGMII force speed %d", + phy->req_line_speed); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + ELINK_DEBUG_P1(sc, " (readback) %x", val16); + } + + /* SGMII Slave mode and disable signal detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); + if (fiber_mode) + digctrl_kx1 = 1; + else + digctrl_kx1 &= 0xff4a; + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + digctrl_kx1); + + /* Turn off parallel detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 & ~(1 << 2))); + + /* Re-enable parallel detect */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 | (1 << 2))); + + /* Enable autodet */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (digctrl_kx1 | 0x10)); +} + + +static void elink_warpcore_reset_lane(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t reset) +{ + uint16_t val; + /* Take lane out of reset after configuration is finished */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); + if (reset) + val |= 0xC000; + else + val &= 0x3FFF; + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, val); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); +} + +/* Clear SFI/XFI link settings registers */ +static void elink_warpcore_clear_regs(struct elink_phy *phy, + struct elink_params *params, + uint16_t lane) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t i; + static struct elink_reg_set wc_regs[] = { + {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0x0195}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + 0x0007}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, + {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} + }; + /* Set XFI clock comp as default. */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, (3 << 13)); + + for (i = 0; i < ARRAY_SIZE(wc_regs); i++) + elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg, + wc_regs[i].val); + + lane = elink_get_warpcore_lane(phy, params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, 0x0990); + +} + +static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc, + __rte_unused uint32_t chip_id, + uint32_t shmem_base, + uint8_t port, + uint8_t *gpio_num, + uint8_t *gpio_port) +{ + uint32_t cfg_pin; + *gpio_num = 0; + *gpio_port = 0; + if (CHIP_IS_E3(sc)) { + cfg_pin = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_MOD_ABS_MASK) >> + PORT_HW_CFG_E3_MOD_ABS_SHIFT; + + /* + * This should not happen since this function is called + * from interrupt triggered by GPIO (since EPIO can only + * generate interrupts to MCP). + * So if this function was called and none of the GPIOs was set, + * it means something disastrous has already happened. + */ + if ((cfg_pin < PIN_CFG_GPIO0_P0) || + (cfg_pin > PIN_CFG_GPIO3_P1)) { + ELINK_DEBUG_P1(sc, + "No cfg pin %x for module detect indication", + cfg_pin); + return ELINK_STATUS_ERROR; + } + + *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; + *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; + } else { + *gpio_num = MISC_REGISTERS_GPIO_3; + *gpio_port = port; + } + + return ELINK_STATUS_OK; +} + +static int elink_is_sfp_module_plugged(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_num, gpio_port; + uint32_t gpio_val; + if (elink_get_mod_abs_int_cfg(sc, params->chip_id, + params->shmem_base, params->port, + &gpio_num, &gpio_port) != ELINK_STATUS_OK) + return 0; + gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) + return 1; + else + return 0; +} +static int elink_warpcore_get_sigdet(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t gp2_status_reg0, lane; + struct bnx2x_softc *sc = params->sc; + + lane = elink_get_warpcore_lane(phy, params); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, + &gp2_status_reg0); + + return (gp2_status_reg0 >> (8 + lane)) & 0x1; +} + +static void elink_warpcore_config_runtime(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t serdes_net_if; + uint16_t gp_status1 = 0, lnkup = 0, lnkup_kr = 0; + + vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; + + if (!vars->turn_to_run_wc_rt) + return; + + if (vars->rx_tx_asic_rst) { + uint16_t lane = elink_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Do we get link yet? */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 0x81d1, + &gp_status1); + lnkup = (gp_status1 >> (8 + lane)) & 0x1;/* 1G */ + /*10G KR*/ + lnkup_kr = (gp_status1 >> (12 + lane)) & 0x1; + + if (lnkup_kr || lnkup) { + vars->rx_tx_asic_rst = 0; + } else { + /* Reset the lane to see if link comes up.*/ + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_reset_lane(sc, phy, 0); + + /* Restart Autoneg */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + vars->rx_tx_asic_rst--; + ELINK_DEBUG_P1(sc, "0x%x retry left", + vars->rx_tx_asic_rst); + } + break; + + default: + break; + } + + } /*params->rx_tx_asic_rst*/ +} + +static void elink_warpcore_config_sfi(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t lane = elink_get_warpcore_lane(phy, params); + struct bnx2x_softc *sc = params->sc; + elink_warpcore_clear_regs(phy, params, lane); + if ((params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] == + ELINK_SPEED_10000) && + (phy->media_type != ELINK_ETH_PHY_SFP_1G_FIBER)) { + ELINK_DEBUG_P0(sc, "Setting 10G SFI"); + elink_warpcore_set_10G_XFI(phy, params, 0); + } else { + ELINK_DEBUG_P0(sc, "Setting 1G Fiber"); + elink_warpcore_set_sgmii_speed(phy, params, 1, 0); + } +} + +static void elink_sfp_e3_set_transmitter(struct elink_params *params, + struct elink_phy *phy, + uint8_t tx_en) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port = params->port; + + cfg_pin = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + ELINK_DEBUG_P1(sc, "Setting WC TX to %d", tx_en); + + /* For 20G, the expected pin to be used is 3 pins after the current */ + elink_set_cfg_pin(sc, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1); +} + +static uint8_t elink_warpcore_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t serdes_net_if; + uint8_t fiber_mode; + uint16_t lane = elink_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + ELINK_DEBUG_P2(sc, "Begin Warpcore init, link_speed %d, " + "serdes_net_if = 0x%x", + vars->line_speed, serdes_net_if); + elink_set_aer_mmd(params, phy); + elink_warpcore_reset_lane(sc, phy, 1); + vars->phy_flags |= PHY_XGXS_FLAG; + if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || + (phy->req_line_speed && + ((phy->req_line_speed == ELINK_SPEED_100) || + (phy->req_line_speed == ELINK_SPEED_10)))) { + vars->phy_flags |= PHY_SGMII_FLAG; + ELINK_DEBUG_P0(sc, "Setting SGMII mode"); + elink_warpcore_clear_regs(phy, params, lane); + elink_warpcore_set_sgmii_speed(phy, params, 0, 1); + } else { + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Enable KR Auto Neg */ + if (params->loopback_mode != ELINK_LOOPBACK_EXT) + elink_warpcore_enable_AN_KR(phy, params, vars); + else { + ELINK_DEBUG_P0(sc, "Setting KR 10G-Force"); + elink_warpcore_set_10G_KR(phy, params, vars); + } + break; + + case PORT_HW_CFG_NET_SERDES_IF_XFI: + elink_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == ELINK_SPEED_10000) { + ELINK_DEBUG_P0(sc, "Setting 10G XFI"); + elink_warpcore_set_10G_XFI(phy, params, 1); + } else { + if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + ELINK_DEBUG_P0(sc, "1G Fiber"); + fiber_mode = 1; + } else { + ELINK_DEBUG_P0(sc, "10/100/1G SGMII"); + fiber_mode = 0; + } + elink_warpcore_set_sgmii_speed(phy, + params, + fiber_mode, + 0); + } + + break; + + case PORT_HW_CFG_NET_SERDES_IF_SFI: + /* Issue Module detection if module is plugged, or + * enabled transmitter to avoid current leakage in case + * no module is connected + */ + if ((params->loopback_mode == ELINK_LOOPBACK_NONE) || + (params->loopback_mode == ELINK_LOOPBACK_EXT)) { + if (elink_is_sfp_module_plugged(phy, params)) + elink_sfp_module_detection(phy, params); + else + elink_sfp_e3_set_transmitter(params, + phy, 1); + } + + elink_warpcore_config_sfi(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + if (vars->line_speed != ELINK_SPEED_20000) { + ELINK_DEBUG_P0(sc, "Speed not supported yet"); + return 0; + } + ELINK_DEBUG_P0(sc, "Setting 20G DXGXS"); + elink_warpcore_set_20G_DXGXS(sc, phy, lane); + /* Issue Module detection */ + + elink_sfp_module_detection(phy, params); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + if (!params->loopback_mode) { + elink_warpcore_enable_AN_KR(phy, params, vars); + } else { + ELINK_DEBUG_P0(sc, "Setting KR 20G-Force"); + elink_warpcore_set_20G_force_KR2(phy, params); + } + break; + default: + ELINK_DEBUG_P1(sc, + "Unsupported Serdes Net Interface 0x%x", + serdes_net_if); + return 0; + } + } + + /* Take lane out of reset after configuration is finished */ + elink_warpcore_reset_lane(sc, phy, 0); + ELINK_DEBUG_P0(sc, "Exit config init"); + + return 0; +} + +static void elink_warpcore_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, lane; + elink_sfp_e3_set_transmitter(params, phy, 0); + elink_set_mdio_emac_per_phy(sc, params); + elink_set_aer_mmd(params, phy); + /* Global register */ + elink_warpcore_reset_lane(sc, phy, 1); + + /* Clear loopback settings (if any) */ + /* 10G & 20G */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF); + + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe); + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + ~0x10); + + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00); + lane = elink_get_warpcore_lane(phy, params); + /* Disable CL36 PCS Tx */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + elink_set_aer_mmd(params, phy); + +} + +static void elink_set_warpcore_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16; + uint32_t lane; + ELINK_DEBUG_P2(sc, "Setting Warpcore loopback type %x, speed %d", + params->loopback_mode, phy->req_line_speed); + + if (phy->req_line_speed < ELINK_SPEED_10000 || + phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) { + /* 10/100/1000/20G-KR2 */ + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + 0x10); + /* Set 1G loopback based on lane (1-copy) */ + lane = elink_get_warpcore_lane(phy, params); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + val16 |= (1 << lane); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + val16 |= (2 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16); + + /* Switch back to 4-copy registers */ + elink_set_aer_mmd(params, phy); + } else { + /* 10G / 20G-DXGXS */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x4000); + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); + } +} + + + +static void elink_sync_link(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_10g_plus; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + if (vars->link_up) { + ELINK_DEBUG_P0(sc, "phy link up"); + ELINK_DEBUG_P1(sc, "link status = %x", vars->link_status); + + vars->phy_link_up = 1; + vars->duplex = DUPLEX_FULL; + switch (vars->link_status & + LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + case ELINK_LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_10TFD: + vars->line_speed = ELINK_SPEED_10; + break; + + case ELINK_LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_100T4: + case ELINK_LINK_100TXFD: + vars->line_speed = ELINK_SPEED_100; + break; + + case ELINK_LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_1000TFD: + vars->line_speed = ELINK_SPEED_1000; + break; + + case ELINK_LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_2500TFD: + vars->line_speed = ELINK_SPEED_2500; + break; + + case ELINK_LINK_10GTFD: + vars->line_speed = ELINK_SPEED_10000; + break; + case ELINK_LINK_20GTFD: + vars->line_speed = ELINK_SPEED_20000; + break; + default: + break; + } + vars->flow_ctrl = 0; + if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= ELINK_FLOW_CTRL_TX; + + if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= ELINK_FLOW_CTRL_RX; + + if (!vars->flow_ctrl) + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (vars->line_speed && + ((vars->line_speed == ELINK_SPEED_10) || + (vars->line_speed == ELINK_SPEED_100))) { + vars->phy_flags |= PHY_SGMII_FLAG; + } else { + vars->phy_flags &= ~PHY_SGMII_FLAG; + } + if (vars->line_speed && + USES_WARPCORE(sc) && + (vars->line_speed == ELINK_SPEED_1000)) + vars->phy_flags |= PHY_SGMII_FLAG; + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000); + + if (link_10g_plus) { + if (USES_WARPCORE(sc)) + vars->mac_type = ELINK_MAC_TYPE_XMAC; + else + vars->mac_type = ELINK_MAC_TYPE_BMAC; + } else { + if (USES_WARPCORE(sc)) + vars->mac_type = ELINK_MAC_TYPE_UMAC; + else + vars->mac_type = ELINK_MAC_TYPE_EMAC; + } + } else { /* Link down */ + ELINK_DEBUG_P0(sc, "phy link down"); + + vars->phy_link_up = 0; + + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + /* Indicate no mac active */ + vars->mac_type = ELINK_MAC_TYPE_NONE; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) + vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; + } +} + +void elink_link_status_update(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + + /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ + if (params->loopback_mode != ELINK_LOOPBACK_NONE && + params->loopback_mode != ELINK_LOOPBACK_EXT) + vars->link_status |= LINK_STATUS_LINK_UP; + + if (elink_eee_has_cap(params)) + vars->eee_status = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + vars->phy_flags = PHY_XGXS_FLAG; + elink_sync_link(params, vars); + /* Sync media type */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].media_type); + media_types = REG_RD(sc, sync_offset); + + params->phy[ELINK_INT_PHY].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; + params->phy[ELINK_EXT_PHY1].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; + params->phy[ELINK_EXT_PHY2].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; + ELINK_DEBUG_P1(sc, "media_types = 0x%x", media_types); + + /* Sync AEU offset */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + + vars->aeu_int_mask = REG_RD(sc, sync_offset); + + /* Sync PFC status */ + if (vars->link_status & LINK_STATUS_PFC_ENABLED) + params->feature_config_flags |= + ELINK_FEATURE_CONFIG_PFC_ENABLED; + else + params->feature_config_flags &= + ~ELINK_FEATURE_CONFIG_PFC_ENABLED; + + if (SHMEM2_HAS(sc, link_attr_sync)) + params->link_attr_sync = SHMEM2_RD(sc, + link_attr_sync[params->port]); + + ELINK_DEBUG_P3(sc, "link_status 0x%x phy_link_up %x int_mask 0x%x", + vars->link_status, vars->phy_link_up, vars->aeu_int_mask); + ELINK_DEBUG_P3(sc, "line_speed %x duplex %x flow_ctrl 0x%x", + vars->line_speed, vars->duplex, vars->flow_ctrl); +} + +static void elink_set_master_ln(struct elink_params *params, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t new_master_ln, ser_lane; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + /* Set the master_ln for AN */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + &new_master_ln); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); +} + +static elink_status_t elink_reset_unicore(struct elink_params *params, + struct elink_phy *phy, + uint8_t set_serdes) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mii_control; + uint16_t i; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + /* Reset the unicore */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + if (set_serdes) + elink_set_serdes_access(sc, params->port); + + /* Wait for the reset to self clear */ + for (i = 0; i < ELINK_MDIO_ACCESS_TIMEOUT; i++) { + DELAY(5); + + /* The reset erased the previous bank value */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + + if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { + DELAY(5); + return ELINK_STATUS_OK; + } + } + + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); + /* "Warning: PHY was not initialized," + * " Port %d", + */ + + ELINK_DEBUG_P0(sc, "BUG! XGXS is still in reset!"); + return ELINK_STATUS_ERROR; + +} + +static void elink_set_swap_lanes(struct elink_params *params, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + /* Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap + */ + uint16_t rx_lane_swap, tx_lane_swap; + + rx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + tx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + + if (rx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + } else { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + } + + if (tx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + } else { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + } +} + +static void elink_set_parallel_detection(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t control2; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + &control2); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + else + control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + ELINK_DEBUG_P2(sc, "phy->speed_cap_mask = 0x%x, control2 = 0x%x", + phy->speed_cap_mask, control2); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + control2); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + ELINK_DEBUG_P0(sc, "XGXS"); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); + + + control2 |= + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); + + /* Disable parallel detection of HiG */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + } +} + +static void elink_set_autoneg(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint8_t enable_cl73) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t reg_val; + + /* CL37 Autoneg */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + + /* CL37 Autoneg Enabled */ + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; + else /* CL37 Autoneg Disabled */ + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Enable/Disable Autodetection */ + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + else + reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + + /* Enable TetonII and BAM autoneg */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + ®_val); + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) { + /* Enable BAM aneg Mode and TetonII aneg Mode */ + reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } else { + /* TetonII and BAM Autoneg Disabled */ + reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + reg_val); + + if (enable_cl73) { + /* Enable Cl73 FSM status bits */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, + 0xe); + + /* Enable BAM Station Manager*/ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_BAM_CTRL1, + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); + + /* Advertise CL73 link speeds */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + ®_val); + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + reg_val); + + /* CL73 Autoneg Enabled */ + reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; + + } else /* CL73 Autoneg Disabled */ + reg_val = 0; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); +} + +/* Program SerDes, forced speed */ +static void elink_program_serdes(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t reg_val; + + /* Program duplex, disable autoneg and sgmii*/ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); + if (phy->req_duplex == DUPLEX_FULL) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Program speed + * - needed only if the speed is greater than 1G (2.5G or 10G) + */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); + /* Clearing the speed value before setting the right speed */ + ELINK_DEBUG_P1(sc, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val); + + reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + + if (!((vars->line_speed == ELINK_SPEED_1000) || + (vars->line_speed == ELINK_SPEED_100) || + (vars->line_speed == ELINK_SPEED_10))) { + + reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + if (vars->line_speed == ELINK_SPEED_10000) + reg_val |= + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; + } + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); + +} + +static void elink_set_brcm_cl37_advertisement(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + + /* Set extended capabilities */ + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) + val |= MDIO_OVER_1G_UP1_2_5G; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= MDIO_OVER_1G_UP1_10G; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP1, val); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP3, 0x400); +} + +static void elink_set_ieee_aneg_advertisement(struct elink_phy *phy, + struct elink_params *params, + uint16_t ieee_fc) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + /* For AN, we are always publishing full duplex */ + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); + val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; + val |= ((ieee_fc << 3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); +} + +static void elink_restart_autoneg(struct elink_phy *phy, + struct elink_params *params, + uint8_t enable_cl73) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mii_control; + + ELINK_DEBUG_P0(sc, "elink_restart_autoneg"); + /* Enable and restart BAM/CL37 aneg */ + + if (enable_cl73) { + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + } else { + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + ELINK_DEBUG_P1(sc, + "elink_restart_autoneg mii_control before = 0x%x", + mii_control); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + } +} + +static void elink_initialize_sgmii_process(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t control1; + + /* In SGMII mode, the unicore is always slave */ + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + &control1); + control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; + /* Set sgmii mode (and not fiber) */ + control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + control1); + + /* If forced speed */ + if (!(vars->line_speed == ELINK_SPEED_AUTO_NEG)) { + /* Set speed, disable autoneg */ + uint16_t mii_control; + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK | + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); + + switch (vars->line_speed) { + case ELINK_SPEED_100: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; + break; + case ELINK_SPEED_1000: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; + break; + case ELINK_SPEED_10: + /* There is nothing to set for 10M */ + break; + default: + /* Invalid speed for SGMII */ + ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x", + vars->line_speed); + break; + } + + /* Setting the full duplex */ + if (phy->req_duplex == DUPLEX_FULL) + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + mii_control); + + } else { /* AN mode */ + /* Enable and restart AN */ + elink_restart_autoneg(phy, params, 0); + } +} + +/* Link management + */ +static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t pd_10g, status2_1000x; + if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + return ELINK_STATUS_OK; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { + ELINK_DEBUG_P1(sc, "1G parallel detect link on port %d", + params->port); + return 1; + } + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, + &pd_10g); + + if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { + ELINK_DEBUG_P1(sc, "10G parallel detect link on port %d", + params->port); + return 1; + } + return ELINK_STATUS_OK; +} + +static void elink_update_adv_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint32_t gp_status) +{ + uint16_t ld_pause; /* local driver */ + uint16_t lp_pause; /* link partner */ + uint16_t pause_result; + struct bnx2x_softc *sc = params->sc; + if ((gp_status & + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); + pause_result = (ld_pause & + MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10; + ELINK_DEBUG_P1(sc, "pause_result CL73 0x%x", pause_result); + } else { + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 7; + ELINK_DEBUG_P1(sc, "pause_result CL37 0x%x", pause_result); + } + elink_pause_resolve(phy, params, vars, pause_result); + +} + +static void elink_flow_ctrl_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint32_t gp_status) +{ + struct bnx2x_softc *sc = params->sc; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + /* Resolve from gp_status in case of AN complete and not sgmii */ + if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_update_adv_fc(phy, params, vars, gp_status); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if ((gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE) && + (!(vars->phy_flags & PHY_SGMII_FLAG))) { + if (elink_direct_parallel_detect_used(phy, params)) { + vars->flow_ctrl = params->req_fc_auto_adv; + return; + } + elink_update_adv_fc(phy, params, vars, gp_status); + } + ELINK_DEBUG_P1(sc, "flow_ctrl 0x%x", vars->flow_ctrl); +} + +static void elink_check_fallback_to_cl37(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t rx_status, ustat_val, cl37_fsm_received; + ELINK_DEBUG_P0(sc, "elink_check_fallback_to_cl37"); + /* Step 1: Make sure signal is detected */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_RX0, + MDIO_RX0_RX_STATUS, + &rx_status); + if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != + (MDIO_RX0_RX_STATUS_SIGDET)) { + ELINK_DEBUG_P1(sc, "Signal is not detected. Restoring CL73." + "rx_status(0x80b0) = 0x%x", rx_status); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + return; + } + /* Step 2: Check CL73 state machine */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, + &ustat_val); + if ((ustat_val & + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { + ELINK_DEBUG_P1(sc, "CL73 state-machine is not stable. " + "ustat_val(0x8371) = 0x%x", ustat_val); + return; + } + /* Step 3: Check CL37 Message Pages received to indicate LP + * supports only CL37 + */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, + &cl37_fsm_received); + if ((cl37_fsm_received & + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { + ELINK_DEBUG_P1(sc, "No CL37 FSM were received. " + "misc_rx_status(0x8330) = 0x%x", + cl37_fsm_received); + return; + } + /* The combined cl37/cl73 fsm state information indicating that + * we are connected to a device which does not support cl73, but + * does support cl37 BAM. In this case we disable cl73 and + * restart cl37 auto-neg + */ + + /* Disable CL73 */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + 0); + /* Restart CL37 autoneg */ + elink_restart_autoneg(phy, params, 0); + ELINK_DEBUG_P0(sc, "Disabling CL73, and restarting CL37 autoneg"); +} + +static void elink_xgxs_an_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint32_t gp_status) +{ + if (gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + if (elink_direct_parallel_detect_used(phy, params)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; +} +static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint16_t is_link_up, + uint16_t speed_mask, + uint16_t is_duplex) +{ + struct bnx2x_softc *sc = params->sc; + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + if (is_link_up) { + ELINK_DEBUG_P0(sc, "phy link up"); + + vars->phy_link_up = 1; + vars->link_status |= LINK_STATUS_LINK_UP; + + switch (speed_mask) { + case ELINK_GP_STATUS_10M: + vars->line_speed = ELINK_SPEED_10; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_10TFD; + else + vars->link_status |= ELINK_LINK_10THD; + break; + + case ELINK_GP_STATUS_100M: + vars->line_speed = ELINK_SPEED_100; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_100TXFD; + else + vars->link_status |= ELINK_LINK_100TXHD; + break; + + case ELINK_GP_STATUS_1G: + case ELINK_GP_STATUS_1G_KX: + vars->line_speed = ELINK_SPEED_1000; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_1000TFD; + else + vars->link_status |= ELINK_LINK_1000THD; + break; + + case ELINK_GP_STATUS_2_5G: + vars->line_speed = ELINK_SPEED_2500; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_2500TFD; + else + vars->link_status |= ELINK_LINK_2500THD; + break; + + case ELINK_GP_STATUS_5G: + case ELINK_GP_STATUS_6G: + ELINK_DEBUG_P1(sc, + "link speed unsupported gp_status 0x%x", + speed_mask); + return ELINK_STATUS_ERROR; + + case ELINK_GP_STATUS_10G_KX4: + case ELINK_GP_STATUS_10G_HIG: + case ELINK_GP_STATUS_10G_CX4: + case ELINK_GP_STATUS_10G_KR: + case ELINK_GP_STATUS_10G_SFI: + case ELINK_GP_STATUS_10G_XFI: + vars->line_speed = ELINK_SPEED_10000; + vars->link_status |= ELINK_LINK_10GTFD; + break; + case ELINK_GP_STATUS_20G_DXGXS: + case ELINK_GP_STATUS_20G_KR2: + vars->line_speed = ELINK_SPEED_20000; + vars->link_status |= ELINK_LINK_20GTFD; + break; + default: + ELINK_DEBUG_P1(sc, + "link speed unsupported gp_status 0x%x", + speed_mask); + return ELINK_STATUS_ERROR; + } + } else { /* link_down */ + ELINK_DEBUG_P0(sc, "phy link down"); + + vars->phy_link_up = 0; + + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_NONE; + } + ELINK_DEBUG_P2(sc, " in elink_get_link_speed_duplex vars->link_status = %x, vars->duplex = %x", + vars->link_status, vars->duplex); + ELINK_DEBUG_P2(sc, " phy_link_up %x line_speed %d", + vars->phy_link_up, vars->line_speed); + return ELINK_STATUS_OK; +} + +static uint8_t elink_link_settings_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + + uint16_t gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; + elink_status_t rc = ELINK_STATUS_OK; + + /* Read gp_status */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) { + duplex = DUPLEX_FULL; + ELINK_DEBUG_P1(sc, "duplex status read from phy is = %x", + duplex); + } else { + ELINK_DEBUG_P1(sc, "phy status does not allow interface to be FULL_DUPLEX : %x", + gp_status); + } + + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) + link_up = 1; + speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK; + ELINK_DEBUG_P3(sc, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x", + gp_status, link_up, speed_mask); + rc = elink_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, + duplex); + if (rc == ELINK_STATUS_ERROR) + return rc; + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { + if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; + elink_flow_ctrl_resolve(phy, params, vars, gp_status); + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_xgxs_an_resolve(phy, params, vars, + gp_status); + } + } else { /* Link_down */ + if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + ELINK_SINGLE_MEDIA_DIRECT(params)) { + /* Check signal is detected */ + elink_check_fallback_to_cl37(phy, params); + } + } + + /* Read LP advertised speeds*/ + if (ELINK_SINGLE_MEDIA_DIRECT(params) && + (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) { + uint16_t val; + + CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} + +static uint8_t elink_warpcore_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t lane; + uint16_t gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; + elink_status_t rc = ELINK_STATUS_OK; + lane = elink_get_warpcore_lane(phy, params); + /* Read gp_status */ + if ((params->loopback_mode) && + (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + link_up &= 0x1; + ELINK_DEBUG_P1(sc, "params->loopback_mode link_up read = %x", + link_up); + } else if ((phy->req_line_speed > ELINK_SPEED_10000) && + (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) { + uint16_t temp_link_up; + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + 1, &temp_link_up); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + 1, &link_up); + ELINK_DEBUG_P2(sc, "PCS RX link status = 0x%x-->0x%x", + temp_link_up, link_up); + link_up &= (1 << 2); + if (link_up) + elink_ext_phy_resolve_fc(phy, params, vars); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status1); + ELINK_DEBUG_P1(sc, "0x81d1 = 0x%x", gp_status1); + /* Check for either KR, 1G, or AN up. */ + link_up = ((gp_status1 >> 8) | + (gp_status1 >> 12) | + (gp_status1)) & + (1 << lane); + if (phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) { + uint16_t an_link; + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + link_up |= (an_link & (1 << 2)); + ELINK_DEBUG_P2(sc, "an_link = %x, link_up = %x", + an_link, link_up); + } + if (link_up && ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint16_t pd, gp_status4; + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + /* Check Autoneg complete */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status4); + if (gp_status4 & ((1 << 12) << lane)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + /* Check parallel detect used */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_PAR_DET_10G_STATUS, + &pd); + if (pd & (1 << 15)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + ELINK_DEBUG_P2(sc, "pd = %x, link_status = %x", + pd, vars->link_status); + } + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; + ELINK_DEBUG_P3(sc, " ELINK_SINGLE_MEDIA_DIRECT duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, + vars->link_status); + } + } + ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, vars->link_status); + if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && + ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint16_t val; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + ELINK_DEBUG_P2(sc, "val = %x, link_status = %x", + val, vars->link_status); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + ELINK_DEBUG_P2(sc, "val = %x, link_status = %x", + val, vars->link_status); + + } + + + if (lane < 2) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); + } + ELINK_DEBUG_P2(sc, "lane %d gp_speed 0x%x", lane, gp_speed); + + if ((lane & 1) == 0) + gp_speed <<= 8; + gp_speed &= 0x3f00; + link_up = !!link_up; + + /* Reset the TX FIFO to fix SGMII issue */ + rc = elink_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, + duplex); + + /* In case of KR link down, start up the recovering procedure */ + if ((!link_up) && (phy->media_type == ELINK_ETH_PHY_KR) && + (!(phy->flags & ELINK_FLAGS_WC_DUAL_MODE))) + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + + ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} +static void elink_set_gmii_tx_driver(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + uint16_t lp_up2; + uint16_t tx_driver; + uint16_t bank; + + /* Read precomp */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP2, &lp_up2); + + /* Bits [10:7] at lp_up2, positioned at [15:12] */ + lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> + MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << + MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); + + if (lp_up2 == 0) + return; + + for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { + CL22_RD_OVER_CL45(sc, phy, + bank, + MDIO_TX0_TX_DRIVER, &tx_driver); + + /* Replace tx_driver bits [15:12] */ + if (lp_up2 != + (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { + tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; + tx_driver |= lp_up2; + CL22_WR_OVER_CL45(sc, phy, + bank, + MDIO_TX0_TX_DRIVER, tx_driver); + } + } +} + +static elink_status_t elink_emac_program(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint16_t mode = 0; + + ELINK_DEBUG_P0(sc, "setting link speed & duplex"); + elink_bits_dis(sc, GRCBASE_EMAC0 + port * 0x400 + + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | + EMAC_MODE_HALF_DUPLEX)); + switch (vars->line_speed) { + case ELINK_SPEED_10: + mode |= EMAC_MODE_PORT_MII_10M; + break; + + case ELINK_SPEED_100: + mode |= EMAC_MODE_PORT_MII; + break; + + case ELINK_SPEED_1000: + mode |= EMAC_MODE_PORT_GMII; + break; + + case ELINK_SPEED_2500: + mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); + break; + + default: + /* 10G not valid for EMAC */ + ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x", + vars->line_speed); + return ELINK_STATUS_ERROR; + } + + if (vars->duplex == DUPLEX_HALF) + mode |= EMAC_MODE_HALF_DUPLEX; + elink_bits_en(sc, + GRCBASE_EMAC0 + port * 0x400 + EMAC_REG_EMAC_MODE, + mode); + + elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed); + return ELINK_STATUS_OK; +} + +static void elink_set_preemphasis(struct elink_phy *phy, + struct elink_params *params) +{ + + uint16_t bank, i = 0; + struct bnx2x_softc *sc = params->sc; + + for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; + bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0), i++) { + CL22_WR_OVER_CL45(sc, phy, + bank, + MDIO_RX0_RX_EQ_BOOST, + phy->rx_preemphasis[i]); + } + + for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { + CL22_WR_OVER_CL45(sc, phy, + bank, + MDIO_TX0_TX_DRIVER, + phy->tx_preemphasis[i]); + } +} + +static uint8_t elink_xgxs_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == ELINK_LOOPBACK_XGXS)); + if (!(vars->phy_flags & PHY_SGMII_FLAG)) { + if (ELINK_SINGLE_MEDIA_DIRECT(params) && + (params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) + elink_set_preemphasis(phy, params); + + /* Forced speed requested? */ + if (vars->line_speed != ELINK_SPEED_AUTO_NEG || + (ELINK_SINGLE_MEDIA_DIRECT(params) && + params->loopback_mode == ELINK_LOOPBACK_EXT)) { + ELINK_DEBUG_P0(sc, "not SGMII, no AN"); + + /* Disable autoneg */ + elink_set_autoneg(phy, params, vars, 0); + + /* Program speed and duplex */ + elink_program_serdes(phy, params, vars); + + } else { /* AN_mode */ + ELINK_DEBUG_P0(sc, "not SGMII, AN"); + + /* AN enabled */ + elink_set_brcm_cl37_advertisement(phy, params); + + /* Program duplex & pause advertisement (for aneg) */ + elink_set_ieee_aneg_advertisement(phy, params, + vars->ieee_fc); + + /* Enable autoneg */ + elink_set_autoneg(phy, params, vars, enable_cl73); + + /* Enable and restart AN */ + elink_restart_autoneg(phy, params, enable_cl73); + } + + } else { /* SGMII mode */ + ELINK_DEBUG_P0(sc, "SGMII"); + + elink_initialize_sgmii_process(phy, params, vars); + } + + return 0; +} + +static elink_status_t elink_prepare_xgxs(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + vars->phy_flags |= PHY_XGXS_FLAG; + if ((phy->req_line_speed && + ((phy->req_line_speed == ELINK_SPEED_100) || + (phy->req_line_speed == ELINK_SPEED_10))) || + (!phy->req_line_speed && + (phy->speed_cap_mask >= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->speed_cap_mask < + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + elink_set_aer_mmd(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + elink_set_master_ln(params, phy); + + rc = elink_reset_unicore(params, phy, 0); + /* Reset the SerDes and wait for reset bit return low */ + if (rc != ELINK_STATUS_OK) + return rc; + + elink_set_aer_mmd(params, phy); + /* Setting the masterLn_def again after the reset */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { + elink_set_master_ln(params, phy); + elink_set_swap_lanes(params, phy); + } + + return rc; +} + +static uint16_t elink_wait_reset_complete(struct bnx2x_softc *sc, + struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t cnt, ctrl; + /* Wait for soft reset to get cleared up to 1 sec */ + for (cnt = 0; cnt < 1000; cnt++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) + elink_cl22_read(sc, phy, + MDIO_PMA_REG_CTRL, &ctrl); + else + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &ctrl); + if (!(ctrl & (1 << 15))) + break; + DELAY(1000 * 1); + } + + if (cnt == 1000) + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, + params->port); + /* "Warning: PHY was not initialized," + * " Port %d", + */ + + ELINK_DEBUG_P2(sc, "control reg 0x%x (after %d ms)", ctrl, cnt); + return cnt; +} + +static void elink_link_int_enable(struct elink_params *params) +{ + uint8_t port = params->port; + uint32_t mask; + struct bnx2x_softc *sc = params->sc; + + /* Setting the status to report on link up for either XGXS or SerDes */ + if (CHIP_IS_E3(sc)) { + mask = ELINK_NIG_MASK_XGXS0_LINK_STATUS; + if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) + mask |= ELINK_NIG_MASK_MI_INT; + } else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) { + mask = (ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_XGXS0_LINK_STATUS); + ELINK_DEBUG_P0(sc, "enabled XGXS interrupt"); + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && + params->phy[ELINK_INT_PHY].type != + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { + mask |= ELINK_NIG_MASK_MI_INT; + ELINK_DEBUG_P0(sc, "enabled external phy int"); + } + + } else { /* SerDes */ + mask = ELINK_NIG_MASK_SERDES0_LINK_STATUS; + ELINK_DEBUG_P0(sc, "enabled SerDes interrupt"); + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && + params->phy[ELINK_INT_PHY].type != + PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { + mask |= ELINK_NIG_MASK_MI_INT; + ELINK_DEBUG_P0(sc, "enabled external phy int"); + } + } + elink_bits_en(sc, + NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, + mask); + + ELINK_DEBUG_P3(sc, "port %x, is_xgxs %x, int_status 0x%x", port, + (params->switch_cfg == ELINK_SWITCH_CFG_10G), + REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4)); + ELINK_DEBUG_P3(sc, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x", + REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4), + REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18), + REG_RD(sc, NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c)); + ELINK_DEBUG_P2(sc, " 10G %x, XGXS_LINK %x", + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68), + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68)); +} + +static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port, + uint8_t exp_mi_int) +{ + uint32_t latch_status = 0; + + /* Disable the MI INT ( external phy int ) by writing 1 to the + * status register. Link down indication is high-active-signal, + * so in this case we need to write the status to clear the XOR + */ + /* Read Latched signals */ + latch_status = REG_RD(sc, + NIG_REG_LATCH_STATUS_0 + port * 8); + ELINK_DEBUG_P1(sc, "latch_status = 0x%x", latch_status); + /* Handle only those with latched-signal=up.*/ + if (exp_mi_int) + elink_bits_en(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port * 4, + ELINK_NIG_STATUS_EMAC0_MI_INT); + else + elink_bits_dis(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port * 4, + ELINK_NIG_STATUS_EMAC0_MI_INT); + + if (latch_status & 1) { + + /* For all latched-signal=up : Re-Arm Latch signals */ + REG_WR(sc, NIG_REG_LATCH_STATUS_0 + port * 8, + (latch_status & 0xfffe) | (latch_status & 1)); + } + /* For all latched-signal=up,Write original_signal to status */ +} + +static void elink_link_int_ack(struct elink_params *params, + struct elink_vars *vars, uint8_t is_10g_plus) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t mask; + /* First reset all status we assume only one line will be + * change at a time + */ + elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, + (ELINK_NIG_STATUS_XGXS0_LINK10G | + ELINK_NIG_STATUS_XGXS0_LINK_STATUS | + ELINK_NIG_STATUS_SERDES0_LINK_STATUS)); + if (vars->phy_link_up) { + if (USES_WARPCORE(sc)) + mask = ELINK_NIG_STATUS_XGXS0_LINK_STATUS; + else { + if (is_10g_plus) + mask = ELINK_NIG_STATUS_XGXS0_LINK10G; + else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) { + /* Disable the link interrupt by writing 1 to + * the relevant lane in the status register + */ + uint32_t ser_lane = + ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + mask = ((1 << ser_lane) << + ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE); + } else + mask = ELINK_NIG_STATUS_SERDES0_LINK_STATUS; + } + ELINK_DEBUG_P1(sc, "Ack link up interrupt with mask 0x%x", + mask); + elink_bits_en(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, + mask); + } +} + +static elink_status_t elink_format_ver(uint32_t num, uint8_t *str, + uint16_t *len) +{ + uint8_t *str_ptr = str; + uint32_t mask = 0xf0000000; + uint8_t shift = 8 * 4; + uint8_t digit; + uint8_t remove_leading_zeros = 1; + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return ELINK_STATUS_ERROR; + } + while (shift > 0) { + + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + mask = mask >> 4; + continue; + } else if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + mask = mask >> 4; + if (shift == 4 * 4) { + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + return ELINK_STATUS_OK; +} + + +static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, + uint8_t *str, + uint16_t *len) +{ + str[0] = '\0'; + (*len)--; + return ELINK_STATUS_OK; +} + +elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params, + uint8_t *version, + uint16_t len) +{ + struct bnx2x_softc *sc; + uint32_t spirom_ver = 0; + elink_status_t status = ELINK_STATUS_OK; + uint8_t *ver_p = version; + uint16_t remain_len = len; + if (version == NULL || params == NULL) + return ELINK_STATUS_ERROR; + sc = params->sc; + + /* Extract first external phy*/ + version[0] = '\0'; + spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY1].ver_addr); + + if (params->phy[ELINK_EXT_PHY1].format_fw_ver) { + status |= params->phy[ELINK_EXT_PHY1].format_fw_ver(spirom_ver, + ver_p, + &remain_len); + ver_p += (len - remain_len); + } + if ((params->num_phys == ELINK_MAX_PHYS) && + (params->phy[ELINK_EXT_PHY2].ver_addr != 0)) { + spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY2].ver_addr); + if (params->phy[ELINK_EXT_PHY2].format_fw_ver) { + *ver_p = '/'; + ver_p++; + remain_len--; + status |= params->phy[ELINK_EXT_PHY2].format_fw_ver( + spirom_ver, + ver_p, + &remain_len); + ver_p = version + (len - remain_len); + } + } + *ver_p = '\0'; + return status; +} + +static void elink_set_xgxs_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + + if (phy->req_line_speed != ELINK_SPEED_1000) { + uint32_t md_devad = 0; + + ELINK_DEBUG_P0(sc, "XGXS 10G loopback enable"); + + if (!CHIP_IS_E3(sc)) { + /* Change the uni_phy_addr in the nig */ + md_devad = REG_RD(sc, (NIG_REG_XGXS0_CTRL_MD_DEVAD + + port * 0x18)); + + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18, + 0x5); + } + + elink_cl45_write(sc, phy, + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), + 0x2800); + + elink_cl45_write(sc, phy, + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); + DELAY(1000 * 200); + /* Set aer mmd back */ + elink_set_aer_mmd(params, phy); + + if (!CHIP_IS_E3(sc)) { + /* And md_devad */ + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18, + md_devad); + } + } else { + uint16_t mii_ctrl; + ELINK_DEBUG_P0(sc, "XGXS 1G loopback enable"); + elink_cl45_read(sc, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + &mii_ctrl); + elink_cl45_write(sc, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + mii_ctrl | + MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); + } +} + +elink_status_t elink_set_led(struct elink_params *params, + struct elink_vars *vars, uint8_t mode, uint32_t speed) +{ + uint8_t port = params->port; + uint16_t hw_led_mode = params->hw_led_mode; + elink_status_t rc = ELINK_STATUS_OK; + uint8_t phy_idx; + uint32_t tmp; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P2(sc, "elink_set_led: port %x, mode %d", port, mode); + ELINK_DEBUG_P2(sc, "speed 0x%x, hw_led_mode 0x%x", + speed, hw_led_mode); + /* In case */ + for (phy_idx = ELINK_EXT_PHY1; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].set_link_led) { + params->phy[phy_idx].set_link_led( + ¶ms->phy[phy_idx], params, mode); + } + } +#ifdef ELINK_INCLUDE_EMUL + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC) + return rc; +#endif + + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 0); + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + SHARED_HW_CFG_LED_MAC1); + + tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED); + if (params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) + tmp &= ~(EMAC_LED_1000MB_OVERRIDE | + EMAC_LED_100MB_OVERRIDE | + EMAC_LED_10MB_OVERRIDE); + else + tmp |= EMAC_LED_OVERRIDE; + + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, tmp); + break; + + case ELINK_LED_MODE_OPER: + /* For all other phys, OPER mode is same as ON, so in case + * link is down, do nothing + */ + if (!vars->link_up) + break; + /* fallthrough */ + case ELINK_LED_MODE_ON: + if (((params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) || + (params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722)) && + CHIP_IS_E2(sc) && params->num_phys == 2) { + /* This is a work-around for E2 + 8727 Configurations */ + if (mode == ELINK_LED_MODE_ON || + speed == ELINK_SPEED_10000){ + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1); + + tmp = elink_cb_reg_read(sc, emac_base + + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, emac_base + + EMAC_REG_EMAC_LED, + (tmp | EMAC_LED_OVERRIDE)); + /* Return here without enabling traffic + * LED blink and setting rate in ON mode. + * In oper mode, enabling LED blink + * and setting rate is needed. + */ + if (mode == ELINK_LED_MODE_ON) + return rc; + } + } else if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + /* This is a work-around for HW issue found when link + * is up in CL73 + */ + if ((!CHIP_IS_E3(sc)) || + (CHIP_IS_E3(sc) && + mode == ELINK_LED_MODE_ON)) + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1); + + if (CHIP_IS_E1x(sc) || + CHIP_IS_E2(sc) || + (mode == ELINK_LED_MODE_ON)) + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + else + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + hw_led_mode); + } else if ((params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) && + (mode == ELINK_LED_MODE_ON)) { + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + tmp = elink_cb_reg_read(sc, emac_base + + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, + tmp | EMAC_LED_OVERRIDE | + EMAC_LED_1000MB_OVERRIDE); + /* Break here; otherwise, it'll disable the + * intended override. + */ + break; + } else { + uint32_t nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? + (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + nig_led_mode); + } + + REG_WR(sc, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port * 4, + 0); + /* Set blinking rate to ~15.9Hz */ + if (CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4, + LED_BLINK_RATE_VAL_E3); + else + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4, + LED_BLINK_RATE_VAL_E1X_E2); + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + + port * 4, 1); + tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, + (tmp & (~EMAC_LED_OVERRIDE))); + + if (CHIP_IS_E1(sc) && + ((speed == ELINK_SPEED_2500) || + (speed == ELINK_SPEED_1000) || + (speed == ELINK_SPEED_100) || + (speed == ELINK_SPEED_10))) { + /* For speeds less than 10G LED scheme is different */ + REG_WR(sc, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + + port * 4, 1); + REG_WR(sc, NIG_REG_LED_CONTROL_TRAFFIC_P0 + + port * 4, 0); + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + + port * 4, 1); + } + break; + + default: + rc = ELINK_STATUS_ERROR; + ELINK_DEBUG_P1(sc, "elink_set_led: Invalid led mode %d", + mode); + break; + } + return rc; + +} + +/* This function comes to reflect the actual link state read DIRECTLY from the + * HW + */ +elink_status_t elink_test_link(struct elink_params *params, + __rte_unused struct elink_vars *vars, + uint8_t is_serdes) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t gp_status = 0, phy_index = 0; + uint8_t ext_phy_link_up = 0, serdes_phy_type; + struct elink_vars temp_vars; + struct elink_phy *int_phy = ¶ms->phy[ELINK_INT_PHY]; +#ifdef ELINK_INCLUDE_FPGA + if (CHIP_REV_IS_FPGA(sc)) + return ELINK_STATUS_OK; +#endif +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) + return ELINK_STATUS_OK; +#endif + + if (CHIP_IS_E3(sc)) { + uint16_t link_up; + if (params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] + > ELINK_SPEED_10000) { + /* Check 20G link */ + elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + link_up &= (1 << 2); + } else { + /* Check 10G link and below*/ + uint8_t lane = elink_get_warpcore_lane(int_phy, params); + elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status); + gp_status = ((gp_status >> 8) & 0xf) | + ((gp_status >> 12) & 0xf); + link_up = gp_status & (1 << lane); + } + if (!link_up) + return ELINK_STATUS_NO_LINK; + } else { + CL22_RD_OVER_CL45(sc, int_phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + /* Link is up only if both local phy and external phy are up */ + if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) + return ELINK_STATUS_NO_LINK; + } + /* In XGXS loopback mode, do not check external PHY */ + if (params->loopback_mode == ELINK_LOOPBACK_XGXS) + return ELINK_STATUS_OK; + + switch (params->num_phys) { + case 1: + /* No external PHY */ + return ELINK_STATUS_OK; + case 2: + ext_phy_link_up = params->phy[ELINK_EXT_PHY1].read_status( + ¶ms->phy[ELINK_EXT_PHY1], + params, &temp_vars); + break; + case 3: /* Dual Media */ + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + serdes_phy_type = ((params->phy[phy_index].media_type == + ELINK_ETH_PHY_SFPP_10G_FIBER) || + (params->phy[phy_index].media_type == + ELINK_ETH_PHY_SFP_1G_FIBER) || + (params->phy[phy_index].media_type == + ELINK_ETH_PHY_XFP_FIBER) || + (params->phy[phy_index].media_type == + ELINK_ETH_PHY_DA_TWINAX)); + + if (is_serdes != serdes_phy_type) + continue; + if (params->phy[phy_index].read_status) { + ext_phy_link_up |= + params->phy[phy_index].read_status( + ¶ms->phy[phy_index], + params, &temp_vars); + } + } + break; + } + if (ext_phy_link_up) + return ELINK_STATUS_OK; + return ELINK_STATUS_NO_LINK; +} + +static elink_status_t elink_link_initialize(struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t phy_index, non_ext_phy; + struct bnx2x_softc *sc = params->sc; + /* In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ + vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed; + + /* Initialize the internal phy in case this is a direct board + * (no external phys), or this board has external phy which requires + * to first. + */ + if (!USES_WARPCORE(sc)) + elink_prepare_xgxs(¶ms->phy[ELINK_INT_PHY], params, vars); + /* init ext phy and enable link state int */ + non_ext_phy = (ELINK_SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == ELINK_LOOPBACK_XGXS)); + + if (non_ext_phy || + (params->phy[ELINK_EXT_PHY1].flags & ELINK_FLAGS_INIT_XGXS_FIRST) || + (params->loopback_mode == ELINK_LOOPBACK_EXT_PHY)) { + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + if (vars->line_speed == ELINK_SPEED_AUTO_NEG && + (CHIP_IS_E1x(sc) || + CHIP_IS_E2(sc))) + elink_set_parallel_detection(phy, params); + if (params->phy[ELINK_INT_PHY].config_init) + params->phy[ELINK_INT_PHY].config_init(phy, params, + vars); + } + + /* Re-read this value in case it was changed inside config_init due to + * limitations of optic module + */ + vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed; + + /* Init external phy*/ + if (non_ext_phy) { + if (params->phy[ELINK_INT_PHY].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + } else { + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + /* No need to initialize second phy in case of first + * phy only selection. In case of second phy, we do + * need to initialize the first phy, since they are + * connected. + */ + if (params->phy[phy_index].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + + if (phy_index == ELINK_EXT_PHY2 && + (elink_phy_selection(params) == + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { + ELINK_DEBUG_P0(sc, + "Not initializing second phy"); + continue; + } + params->phy[phy_index].config_init( + ¶ms->phy[phy_index], + params, vars); + } + } + /* Reset the interrupt indication after phy was initialized */ + elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + + params->port * 4, + (ELINK_NIG_STATUS_XGXS0_LINK10G | + ELINK_NIG_STATUS_XGXS0_LINK_STATUS | + ELINK_NIG_STATUS_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + return ELINK_STATUS_OK; +} + +static void elink_int_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + /* Reset the SerDes/XGXS */ + REG_WR(params->sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port * 16))); +} + +static void elink_common_ext_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_port; + /* HW reset */ + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + ELINK_DEBUG_P0(sc, "reset external PHY"); +} + +static elink_status_t elink_update_link_down(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + + ELINK_DEBUG_P1(sc, "Port %x: Link is down", port); + elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0); + vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; + /* Indicate no mac active */ + vars->mac_type = ELINK_MAC_TYPE_NONE; + + /* Update shared memory */ + vars->link_status &= ~ELINK_LINK_UPDATE_MASK; + vars->line_speed = 0; + elink_update_mng(params, vars->link_status); + + /* Activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1); + + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + DELAY(1000 * 10); + /* Reset BigMac/Xmac */ + if (CHIP_IS_E1x(sc) || + CHIP_IS_E2(sc)) + elink_set_bmac_rx(sc, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(sc)) { + /* Prevent LPI Generation by chip */ + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), + 0); + REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), + 0); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + + elink_update_mng_eee(params, vars->eee_status); + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_update_link_up(struct elink_params *params, + struct elink_vars *vars, + uint8_t link_10g) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t phy_idx, port = params->port; + elink_status_t rc = ELINK_STATUS_OK; + + vars->link_status |= (LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG); + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + vars->link_status |= + LINK_STATUS_TX_FLOW_CONTROL_ENABLED; + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + vars->link_status |= + LINK_STATUS_RX_FLOW_CONTROL_ENABLED; + if (USES_WARPCORE(sc)) { + if (link_10g) { + if (elink_xmac_enable(params, vars, 0) == + ELINK_STATUS_NO_LINK) { + ELINK_DEBUG_P0(sc, "Found errors on XMAC"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + } else + elink_umac_enable(params, vars, 0); + elink_set_led(params, vars, + ELINK_LED_MODE_OPER, vars->line_speed); + + if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && + (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { + ELINK_DEBUG_P0(sc, "Enabling LPI assertion"); + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + + (params->port << 2), 1); + REG_WR(sc, MISC_REG_CPMU_LP_DR_ENABLE, 1); + REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 + + (params->port << 2), 0xfc20); + } + } + if ((CHIP_IS_E1x(sc) || + CHIP_IS_E2(sc))) { + if (link_10g) { + if (elink_bmac_enable(params, vars, 0, 1) == + ELINK_STATUS_NO_LINK) { + ELINK_DEBUG_P0(sc, "Found errors on BMAC"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + + elink_set_led(params, vars, + ELINK_LED_MODE_OPER, ELINK_SPEED_10000); + } else { + rc = elink_emac_program(params, vars); + elink_emac_enable(params, vars, 0); + + /* AN complete? */ + if ((vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + && (!(vars->phy_flags & PHY_SGMII_FLAG)) && + ELINK_SINGLE_MEDIA_DIRECT(params)) + elink_set_gmii_tx_driver(params); + } + } + + /* PBF - link up */ + if (CHIP_IS_E1x(sc)) + rc |= elink_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 0); + + /* Update shared memory */ + elink_update_mng(params, vars->link_status); + elink_update_mng_eee(params, vars->eee_status); + /* Check remote fault */ + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) { + elink_check_half_open_conn(params, vars, 0); + break; + } + } + DELAY(1000 * 20); + return rc; +} + +static void elink_chng_link_count(struct elink_params *params, uint8_t clear) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t addr, val; + + /* Verify the link_change_count is supported by the MFW */ + if (!(SHMEM2_HAS(sc, link_change_count))) + return; + + addr = params->shmem2_base + + offsetof(struct shmem2_region, link_change_count[params->port]); + if (clear) + val = 0; + else + val = REG_RD(sc, addr) + 1; + REG_WR(sc, addr, val); +} + +/* The elink_link_update function should be called upon link + * interrupt. + * Link is considered up as follows: + * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs + * to be up + * - SINGLE_MEDIA - The link between the 577xx and the external + * phy (XGXS) need to up as well as the external link of the + * phy (PHY_EXT1) + * - DUAL_MEDIA - The link between the 577xx and the first + * external phy needs to be up, and at least one of the 2 + * external phy link must be up. + */ +elink_status_t elink_link_update(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_vars phy_vars[ELINK_MAX_PHYS]; + uint8_t port = params->port; + uint8_t link_10g_plus, phy_index; + uint32_t prev_link_status = vars->link_status; + uint8_t ext_phy_link_up = 0, cur_link_up; + elink_status_t rc = ELINK_STATUS_OK; + uint16_t ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; + uint8_t active_external_phy = ELINK_INT_PHY; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~ELINK_LINK_UPDATE_MASK; + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) { + phy_vars[phy_index].flow_ctrl = 0; + phy_vars[phy_index].link_status = 0; + phy_vars[phy_index].line_speed = 0; + phy_vars[phy_index].duplex = DUPLEX_FULL; + phy_vars[phy_index].phy_link_up = 0; + phy_vars[phy_index].link_up = 0; + phy_vars[phy_index].fault_detected = 0; + /* different consideration, since vars holds inner state */ + phy_vars[phy_index].eee_status = vars->eee_status; + } + + if (USES_WARPCORE(sc)) + elink_set_aer_mmd(params, ¶ms->phy[ELINK_INT_PHY]); + + ELINK_DEBUG_P3(sc, "port %x, XGXS?%x, int_status 0x%x", + port, (vars->phy_flags & PHY_XGXS_FLAG), + REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4)); + + ELINK_DEBUG_P3(sc, "int_mask 0x%x MI_INT %x, SERDES_LINK %x", + REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4), + REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18) > 0, + REG_RD(sc, NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c)); + + ELINK_DEBUG_P2(sc, " 10G %x, XGXS_LINK %x", + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68), + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68)); + + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + /* Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + struct elink_phy *phy = ¶ms->phy[phy_index]; + if (!phy->read_status) + continue; + /* Read link status and params of this ext phy */ + cur_link_up = phy->read_status(phy, params, + &phy_vars[phy_index]); + if (cur_link_up) { + ELINK_DEBUG_P1(sc, "phy in index %d link is up", + phy_index); + } else { + ELINK_DEBUG_P1(sc, "phy in index %d link is down", + phy_index); + continue; + } + + if (!ext_phy_link_up) { + ext_phy_link_up = 1; + active_external_phy = phy_index; + } else { + switch (elink_phy_selection(params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through itself only. + * Its not clear how to reset the link on the second phy + */ + active_external_phy = ELINK_EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through the second PHY. + */ + active_external_phy = ELINK_EXT_PHY2; + break; + default: + /* Link indication on both PHYs with the following cases + * is invalid: + * - FIRST_PHY means that second phy wasn't initialized, + * hence its link is expected to be down + * - SECOND_PHY means that first phy should not be able + * to link up by itself (using configuration) + * - DEFAULT should be overridden during initialiazation + */ + ELINK_DEBUG_P1(sc, "Invalid link indication" + " mpc=0x%x. DISABLING LINK !!!", + params->multi_phy_config); + ext_phy_link_up = 0; + break; + } + } + } + prev_line_speed = vars->line_speed; + /* Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ + if (params->phy[ELINK_INT_PHY].read_status) + params->phy[ELINK_INT_PHY].read_status( + ¶ms->phy[ELINK_INT_PHY], + params, vars); + /* The INT_PHY flow control reside in the vars. This include the + * case where the speed or flow control are not set to AUTO. + * Otherwise, the active external phy flow control result is set + * to the vars. The ext_phy_line_speed is needed to check if the + * speed is different between the internal phy and external phy. + * This case may be result of intermediate link speed change. + */ + if (active_external_phy > ELINK_INT_PHY) { + vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; + /* Link speed is taken from the XGXS. AN and FC result from + * the external phy. + */ + vars->link_status |= phy_vars[active_external_phy].link_status; + + /* if active_external_phy is first PHY and link is up - disable + * disable TX on second external PHY + */ + if (active_external_phy == ELINK_EXT_PHY1) { + if (params->phy[ELINK_EXT_PHY2].phy_specific_func) { + ELINK_DEBUG_P0(sc, + "Disabling TX on EXT_PHY2"); + params->phy[ELINK_EXT_PHY2].phy_specific_func( + ¶ms->phy[ELINK_EXT_PHY2], + params, ELINK_DISABLE_TX); + } + } + + ext_phy_line_speed = phy_vars[active_external_phy].line_speed; + vars->duplex = phy_vars[active_external_phy].duplex; + if (params->phy[active_external_phy].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + else + vars->link_status &= ~LINK_STATUS_SERDES_LINK; + + vars->eee_status = phy_vars[active_external_phy].eee_status; + + ELINK_DEBUG_P1(sc, "Active external phy selected: %x", + active_external_phy); + } + + ELINK_DEBUG_P3(sc, "vars : phy_flags = %x, mac_type = %x, phy_link_up = %x", + vars->phy_flags, vars->mac_type, vars->phy_link_up); + ELINK_DEBUG_P3(sc, "vars : link_up = %x, line_speed = %x, duplex = %x", + vars->link_up, vars->line_speed, vars->duplex); + ELINK_DEBUG_P3(sc, "vars : flow_ctrl = %x, ieee_fc = %x, link_status = %x", + vars->flow_ctrl, vars->ieee_fc, vars->link_status); + ELINK_DEBUG_P3(sc, "vars : eee_status = %x, fault_detected = %x, check_kr2_recovery_cnt = %x", + vars->eee_status, vars->fault_detected, + vars->check_kr2_recovery_cnt); + ELINK_DEBUG_P3(sc, "vars : periodic_flags = %x, aeu_int_mask = %x, rx_tx_asic_rst = %x", + vars->periodic_flags, vars->aeu_int_mask, + vars->rx_tx_asic_rst); + ELINK_DEBUG_P2(sc, "vars : turn_to_run_wc_rt = %x, rsrv2 = %x", + vars->turn_to_run_wc_rt, vars->rsrv2); + + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].flags & + ELINK_FLAGS_REARM_LATCH_SIGNAL) { + elink_rearm_latch_signal(sc, port, + phy_index == + active_external_phy); + break; + } + } + ELINK_DEBUG_P3(sc, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," + " ext_phy_line_speed = %d", vars->flow_ctrl, + vars->link_status, ext_phy_line_speed); + /* Upon link speed change set the NIG into drain mode. Comes to + * deals with possible FIFO glitch due to clk change when speed + * is decreased without link down indicator + */ + + if (vars->phy_link_up) { + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && + (ext_phy_line_speed != vars->line_speed)) { + ELINK_DEBUG_P2(sc, "Internal link speed %d is" + " different than the external" + " link speed %d", vars->line_speed, + ext_phy_line_speed); + vars->phy_link_up = 0; + ELINK_DEBUG_P0(sc, "phy_link_up set to 0"); + } else if (prev_line_speed != vars->line_speed) { + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + + params->port * 4, 0); + DELAY(1000 * 1); + } + } + + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000); + + elink_link_int_ack(params, vars, link_10g_plus); + + /* In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ + if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) { + ELINK_DEBUG_P3(sc, "ext_phy_link_up = %d, int_link_up = %d," + " init_preceding = %d", ext_phy_link_up, + vars->phy_link_up, + params->phy[ELINK_EXT_PHY1].flags & + ELINK_FLAGS_INIT_XGXS_FIRST); + if (!(params->phy[ELINK_EXT_PHY1].flags & + ELINK_FLAGS_INIT_XGXS_FIRST) + && ext_phy_link_up && !vars->phy_link_up) { + vars->line_speed = ext_phy_line_speed; + if (vars->line_speed < ELINK_SPEED_1000) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + if (params->phy[ELINK_INT_PHY].config_init) + params->phy[ELINK_INT_PHY].config_init( + ¶ms->phy[ELINK_INT_PHY], params, + vars); + } + } + /* Link is up only if both local phy and external phy (in case of + * non-direct board) are up and no fault detected on active PHY. + */ + vars->link_up = (vars->phy_link_up && + (ext_phy_link_up || + ELINK_SINGLE_MEDIA_DIRECT(params)) && + (phy_vars[active_external_phy].fault_detected == 0)); + + if (vars->link_up) + ELINK_DEBUG_P0(sc, "local phy and external phy are up"); + else + ELINK_DEBUG_P0(sc, "either local phy or external phy or both are down"); + + /* Update the PFC configuration in case it was changed */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + if (vars->link_up) + rc = elink_update_link_up(params, vars, link_10g_plus); + else + rc = elink_update_link_down(params, vars); + + if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP) + elink_chng_link_count(params, 0); + + /* Update MCP link status was changed */ + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX) + elink_cb_fw_command(sc, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); + + return rc; +} + +/*****************************************************************************/ +/* External Phy section */ +/*****************************************************************************/ +void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port) +{ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + DELAY(1000 * 1); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); +} + +static void elink_save_spirom_version(struct bnx2x_softc *sc, uint8_t port, + uint32_t spirom_ver, uint32_t ver_addr) +{ + ELINK_DEBUG_P3(sc, "FW version 0x%x:0x%x for port %d", + (uint16_t)(spirom_ver >> 16), (uint16_t)spirom_ver, port); + + if (ver_addr) + REG_WR(sc, ver_addr, spirom_ver); +} + +static void elink_save_bnx2x_spirom_ver(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t port) +{ + uint16_t fw_ver1, fw_ver2; + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &fw_ver2); + elink_save_spirom_version(sc, port, (uint32_t)(fw_ver1 << 16 | fw_ver2), + phy->ver_addr); +} + +static void elink_ext_phy_10G_an_resolve(struct bnx2x_softc *sc, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t val; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + if (val & (1 << 5)) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + if ((val & (1 << 0)) == 0) + vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; +} + +/******************************************************************/ +/* common BNX2X8073/BNX2X8727 PHY SECTION */ +/******************************************************************/ +static void elink_8073_resolve_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + if (phy->req_line_speed == ELINK_SPEED_10 || + phy->req_line_speed == ELINK_SPEED_100) { + vars->flow_ctrl = phy->req_flow_ctrl; + return; + } + + if (elink_ext_phy_resolve_fc(phy, params, vars) && + (vars->flow_ctrl == ELINK_FLOW_CTRL_NONE)) { + uint16_t pause_result; + uint16_t ld_pause; /* local */ + uint16_t lp_pause; /* link partner */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; + + elink_pause_resolve(phy, params, vars, pause_result); + ELINK_DEBUG_P1(sc, "Ext PHY CL37 pause result 0x%x", + pause_result); + } +} +static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t port) +{ + uint32_t count = 0; + uint16_t fw_ver1 = 0, fw_msgout; + elink_status_t rc = ELINK_STATUS_OK; + + /* Boot port from external ROM */ + /* EDC grst */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x0001); + + /* Ucode reboot and rst */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x008c); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + /* Reset internal microprocessor */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + /* Release srst bit */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Delay 100ms per the PHY specifications */ + DELAY(1000 * 100); + + /* 8073 sometimes taking longer to download */ + do { + count++; + if (count > 300) { + ELINK_DEBUG_P2(sc, + "elink_8073_8727_external_rom_boot port %x:" + "Download failed. fw version = 0x%x", + port, fw_ver1); + rc = ELINK_STATUS_ERROR; + break; + } + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); + + DELAY(1000 * 1); + } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || + ((fw_msgout & 0xff) != 0x03 && (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073))); + + /* Clear ser_boot_ctl bit */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + elink_save_bnx2x_spirom_ver(sc, phy, port); + + ELINK_DEBUG_P2(sc, + "elink_8073_8727_external_rom_boot port %x:" + "Download complete. fw version = 0x%x", + port, fw_ver1); + + return rc; +} + +/******************************************************************/ +/* BNX2X8073 PHY SECTION */ +/******************************************************************/ +static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc, + struct elink_phy *phy) +{ + /* This is only required for 8073A1, version 102 only */ + uint16_t val; + + /* Read 8073 HW revision*/ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val != 1) { + /* No need to workaround in 8073 A1 */ + return ELINK_STATUS_OK; + } + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &val); + + /* SNR should be applied only for version 0x102 */ + if (val != 0x102) + return ELINK_STATUS_OK; + + return 1; +} + +static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc, + struct elink_phy *phy) +{ + uint16_t val, cnt, cnt1; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val > 0) { + /* No need to workaround in 8073 A1 */ + return ELINK_STATUS_OK; + } + /* XAUI workaround in 8073 A0: */ + + /* After loading the boot ROM and restarting Autoneg, poll + * Dev1, Reg $C820: + */ + + for (cnt = 0; cnt < 1000; cnt++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &val); + /* If bit [14] = 0 or bit [13] = 0, continue on with + * system initialization (XAUI work-around not required, as + * these bits indicate 2.5G or 1G link up). + */ + if (!(val & (1 << 14)) || !(val & (1 << 13))) { + ELINK_DEBUG_P0(sc, "XAUI work-around not required"); + return ELINK_STATUS_OK; + } else if (!(val & (1 << 15))) { + ELINK_DEBUG_P0(sc, "bit 15 went off"); + /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's + * MSB (bit15) goes to 1 (indicating that the XAUI + * workaround has completed), then continue on with + * system initialization. + */ + for (cnt1 = 0; cnt1 < 1000; cnt1++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_XAUI_WA, &val); + if (val & (1 << 15)) { + ELINK_DEBUG_P0(sc, + "XAUI workaround has completed"); + return ELINK_STATUS_OK; + } + DELAY(1000 * 3); + } + break; + } + DELAY(1000 * 3); + } + ELINK_DEBUG_P0(sc, "Warning: XAUI work-around timeout !!!"); + return ELINK_STATUS_ERROR; +} + +static void elink_807x_force_10G(struct bnx2x_softc *sc, struct elink_phy *phy) +{ + /* Force KR or KX */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); +} + +static void elink_8073_set_pause_cl37(struct elink_params *params, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t cl37_val; + struct bnx2x_softc *sc = params->sc; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); + + cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + } + ELINK_DEBUG_P1(sc, + "Ext phy AN advertize cl37 0x%x", cl37_val); + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); + DELAY(1000 * 500); +} + +static void elink_8073_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + /* Enable LASI */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1 << 2)); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + break; + } +} + +static uint8_t elink_8073_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0, tmp1; + uint8_t gpio_port; + ELINK_DEBUG_P0(sc, "Init 8073"); + + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + /* Restore normal power mode*/ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + elink_8073_specific_func(phy, params, ELINK_PHY_INIT); + elink_8073_set_pause_cl37(params, phy, vars); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + ELINK_DEBUG_P1(sc, "Before rom RX_ALARM(port1): 0x%x", tmp1); + + /* Swap polarity if required - Must be done only in non-1G mode */ + if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap _P and _N of the KR lines */ + ELINK_DEBUG_P0(sc, "Swapping polarity for the 8073"); + /* 10G Rx/Tx and 1G Tx signal polarity swap */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, + (val | (3 << 9))); + } + + + /* Enable CL37 BAM */ + if (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, &val); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, val | 1); + ELINK_DEBUG_P0(sc, "Enable CL37 BAM on KR"); + } + if (params->loopback_mode == ELINK_LOOPBACK_EXT) { + elink_807x_force_10G(sc, phy); + ELINK_DEBUG_P0(sc, "Forced speed 10G on 807X"); + return ELINK_STATUS_OK; + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); + } + if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) { + if (phy->req_line_speed == ELINK_SPEED_10000) { + val = (1 << 7); + } else if (phy->req_line_speed == ELINK_SPEED_2500) { + val = (1 << 5); + /* Note that 2.5G works only when used with 1G + * advertisement + */ + } else + val = (1 << 5); + } else { + val = 0; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= (1 << 7); + + /* Note that 2.5G works only when used with 1G advertisement */ + if (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + val |= (1 << 5); + ELINK_DEBUG_P1(sc, "807x autoneg val = 0x%x", val); + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); + + if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && + (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)) || + (phy->req_line_speed == ELINK_SPEED_2500)) { + uint16_t phy_ver; + /* Allow 2.5G for A1 and above */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, + &phy_ver); + ELINK_DEBUG_P0(sc, "Add 2.5G"); + if (phy_ver > 0) + tmp1 |= 1; + else + tmp1 &= 0xfffe; + } else { + ELINK_DEBUG_P0(sc, "Disable 2.5G"); + tmp1 &= 0xfffe; + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); + /* Add support for CL37 (passive mode) II */ + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, + (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? + 0x20 : 0x40))); + + /* Add support for CL37 (passive mode) III */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + + /* The SNR will improve about 2db by changing BW and FEE main + * tap. Rest commands are executed after link is up + * Change FFE main cursor to 5 in EDC register + */ + if (elink_8073_is_snr_needed(sc, phy)) + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, + 0xFB0C); + + /* Enable FEC (Forware Error Correction) Request in the AN */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); + tmp1 |= (1 << 15); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); + + elink_ext_phy_set_pause(params, phy, vars); + + /* Restart autoneg */ + DELAY(1000 * 500); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + ELINK_DEBUG_P2(sc, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x", + ((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0)); + return ELINK_STATUS_OK; +} + +static uint8_t elink_8073_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up = 0; + uint16_t val1, val2; + uint16_t link_status = 0; + uint16_t an1000_status = 0; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + ELINK_DEBUG_P1(sc, "8703 LASI status 0x%x", val1); + + /* Clear the interrupt LASI status register */ + elink_cl45_read(sc, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + elink_cl45_read(sc, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); + ELINK_DEBUG_P2(sc, "807x PCS status 0x%x->0x%x", val2, val1); + /* Clear MSG-OUT */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* Check the LASI */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + ELINK_DEBUG_P1(sc, "KR 0x9003 0x%x", val2); + + /* Check the link status */ + elink_cl45_read(sc, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + ELINK_DEBUG_P1(sc, "KR PCS status 0x%x", val2); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + link_up = ((val1 & 4) == 4); + ELINK_DEBUG_P1(sc, "PMA_REG_STATUS=0x%x", val1); + + if (link_up && + ((phy->req_line_speed != ELINK_SPEED_10000))) { + if (elink_8073_xaui_wa(sc, phy) != 0) + return 0; + } + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + + /* Check the link status on 1.1.2 */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + ELINK_DEBUG_P3(sc, "KR PMA status 0x%x->0x%x," + "an_link_status=0x%x", val2, val1, an1000_status); + + link_up = (((val1 & 4) == 4) || (an1000_status & (1 << 1))); + if (link_up && elink_8073_is_snr_needed(sc, phy)) { + /* The SNR will improve about 2dbby changing the BW and FEE main + * tap. The 1st write to change FFE main tap is set before + * restart AN. Change PLL Bandwidth in EDC register + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, + 0x26BC); + + /* Change CDR Bandwidth in EDC register */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, + 0x0333); + } + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &link_status); + + /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ + if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + ELINK_DEBUG_P1(sc, "port %x: External link up in 10G", + params->port); + } else if ((link_status & (1 << 1)) && (!(link_status & (1 << 14)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_2500; + ELINK_DEBUG_P1(sc, "port %x: External link up in 2.5G", + params->port); + } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + ELINK_DEBUG_P1(sc, "port %x: External link up in 1G", + params->port); + } else { + link_up = 0; + ELINK_DEBUG_P1(sc, "port %x: External link is down", + params->port); + } + + if (link_up) { + /* Swap polarity if required */ + if (params->lane_config & + PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap P and N of the KR lines */ + elink_cl45_read(sc, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); + /* Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ + if (vars->line_speed == ELINK_SPEED_1000) { + ELINK_DEBUG_P0(sc, "Swapping 1G polarity for" + " the 8073"); + val1 |= (1 << 3); + } else + val1 &= ~(1 << 3); + + elink_cl45_write(sc, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, + val1); + } + elink_ext_phy_10G_an_resolve(sc, phy, vars); + elink_8073_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val1); + + if (val1 & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val1 & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + return link_up; +} + +static void elink_8073_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_port; + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + ELINK_DEBUG_P1(sc, "Setting 8073 port %d into low power mode", + gpio_port); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); +} + +/******************************************************************/ +/* BNX2X8705 PHY SECTION */ +/******************************************************************/ +static uint8_t elink_8705_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "init 8705"); + /* Restore normal power mode*/ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); + elink_cl45_write(sc, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); + /* BNX2X8705 doesn't have microcode, hence the 0 */ + elink_save_spirom_version(sc, params->port, params->shmem_base, 0); + return ELINK_STATUS_OK; +} + +static uint8_t elink_8705_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t link_up = 0; + uint16_t val1, rx_sd; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "read status 8705"); + elink_cl45_read(sc, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + ELINK_DEBUG_P1(sc, "8705 LASI status 0x%x", val1); + + elink_cl45_read(sc, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + ELINK_DEBUG_P1(sc, "8705 LASI status 0x%x", val1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + + ELINK_DEBUG_P1(sc, "8705 1.c809 val=0x%x", val1); + link_up = ((rx_sd & 0x1) && (val1 & (1 << 9)) && + ((val1 & (1 << 8)) == 0)); + if (link_up) { + vars->line_speed = ELINK_SPEED_10000; + elink_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +/******************************************************************/ +/* SFP+ module Section */ +/******************************************************************/ +static void elink_set_disable_pmd_transmit(struct elink_params *params, + struct elink_phy *phy, + uint8_t pmd_dis) +{ + struct bnx2x_softc *sc = params->sc; + /* Disable transmitter only for bootcodes which can enable it afterwards + * (for D3 link) + */ + if (pmd_dis) { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) { + ELINK_DEBUG_P0(sc, "Disabling PMD transmitter"); + } else { + ELINK_DEBUG_P0(sc, "NOT disabling PMD transmitter"); + return; + } + } else + ELINK_DEBUG_P0(sc, "Enabling PMD transmitter"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, pmd_dis); +} + +static uint8_t elink_get_gpio_port(struct elink_params *params) +{ + uint8_t gpio_port; + uint32_t swap_val, swap_override; + struct bnx2x_softc *sc = params->sc; + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + return gpio_port ^ (swap_val && swap_override); +} + +static void elink_sfp_e1e2_set_transmitter(struct elink_params *params, + struct elink_phy *phy, + uint8_t tx_en) +{ + uint16_t val; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t tx_en_mode; + + /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + ELINK_DEBUG_P3(sc, "Setting transmitter tx_en=%x for port %x " + "mode = %x", tx_en, port, tx_en_mode); + switch (tx_en_mode) { + case PORT_HW_CFG_TX_LASER_MDIO: + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &val); + + if (tx_en) + val &= ~(1 << 15); + else + val |= (1 << 15); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + val); + break; + case PORT_HW_CFG_TX_LASER_GPIO0: + case PORT_HW_CFG_TX_LASER_GPIO1: + case PORT_HW_CFG_TX_LASER_GPIO2: + case PORT_HW_CFG_TX_LASER_GPIO3: + { + uint16_t gpio_pin; + uint8_t gpio_port, gpio_mode; + if (tx_en) + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; + else + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + + gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; + gpio_port = elink_get_gpio_port(params); + elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port); + break; + } + default: + ELINK_DEBUG_P1(sc, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode); + break; + } +} + +static void elink_sfp_set_transmitter(struct elink_params *params, + struct elink_phy *phy, + uint8_t tx_en) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P1(sc, "Setting SFP+ transmitter to %d", tx_en); + if (CHIP_IS_E3(sc)) + elink_sfp_e3_set_transmitter(params, phy, tx_en); + else + elink_sfp_e1e2_set_transmitter(params, phy, tx_en); +} + +static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params *params, + uint8_t dev_addr, uint16_t addr, + uint8_t byte_cnt, + uint8_t *o_buf, __rte_unused uint8_t is_init) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + uint16_t i; + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + ELINK_DEBUG_P0(sc, + "Reading from eeprom is limited to 0xf"); + return ELINK_STATUS_ERROR; + } + /* Set the read command byte count */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + (byte_cnt | (dev_addr << 8))); + + /* Set the read command address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + + /* Activate read command */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x2c0f); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + DELAY(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + ELINK_DEBUG_P1(sc, + "Got bad status 0x%x when reading from SFP+ EEPROM", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return ELINK_STATUS_ERROR; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (uint8_t) + (val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return ELINK_STATUS_OK; + DELAY(1000 * 1); + } + return ELINK_STATUS_ERROR; +} + +static void elink_warpcore_power_module(struct elink_params *params, + uint8_t power) +{ + uint32_t pin_cfg; + struct bnx2x_softc *sc = params->sc; + + pin_cfg = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + ELINK_DEBUG_P2(sc, "Setting SFP+ module power to %d using pin cfg %d", + power, pin_cfg); + /* Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + elink_set_cfg_pin(sc, pin_cfg, power ^ 1); +} +static elink_status_t elink_warpcore_read_sfp_module_eeprom( + __rte_unused struct elink_phy *phy, + struct elink_params *params, + uint8_t dev_addr, + uint16_t addr, + uint8_t byte_cnt, + uint8_t *o_buf, + uint8_t is_init) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint8_t i, j = 0, cnt = 0; + uint32_t data_array[4]; + uint16_t addr32; + struct bnx2x_softc *sc = params->sc; + + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + ELINK_DEBUG_P0(sc, + "Reading from eeprom is limited to 16 bytes"); + return ELINK_STATUS_ERROR; + } + + /* 4 byte aligned address */ + addr32 = addr & (~0x3); + do { + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { + elink_warpcore_power_module(params, 0); + /* Note that 100us are not enough here */ + DELAY(1000 * 1); + elink_warpcore_power_module(params, 1); + } + + elink_bsc_module_sel(params); + rc = elink_bsc_read(sc, dev_addr, addr32, 0, byte_cnt, + data_array); + } while ((rc != ELINK_STATUS_OK) && (++cnt < I2C_WA_RETRY_CNT)); + + if (rc == ELINK_STATUS_OK) { + for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { + o_buf[j] = *((uint8_t *)data_array + i); + j++; + } + } + + return rc; +} + +static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params *params, + uint8_t dev_addr, uint16_t addr, + uint8_t byte_cnt, + uint8_t *o_buf, + __rte_unused uint8_t is_init) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val, i; + + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + ELINK_DEBUG_P0(sc, + "Reading from eeprom is limited to 0xf"); + return ELINK_STATUS_ERROR; + } + + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + ((dev_addr << 8) | 1)); + + /* Need to read from 1.8000 to clear it */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + &val); + + /* Set the read command byte count */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); + + /* Set the read command address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + /* Set the destination address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + 0x8004, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + + /* Activate read command */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x8002); + /* Wait appropriate time for two-wire command to finish before + * polling the status register + */ + DELAY(1000 * 1); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + DELAY(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + ELINK_DEBUG_P1(sc, + "Got bad status 0x%x when reading from SFP+ EEPROM", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return ELINK_STATUS_TIMEOUT; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (uint8_t) + (val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return ELINK_STATUS_OK; + DELAY(1000 * 1); + } + + return ELINK_STATUS_ERROR; +} +elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params *params, uint8_t dev_addr, + uint16_t addr, uint16_t byte_cnt, + uint8_t *o_buf) +{ + elink_status_t rc = 0; + struct bnx2x_softc *sc = params->sc; + uint8_t xfer_size; + uint8_t *user_data = o_buf; + read_sfp_module_eeprom_func_p read_func; + if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { + ELINK_DEBUG_P1(sc, "invalid dev_addr 0x%x", dev_addr); + return ELINK_STATUS_ERROR; + } + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + read_func = elink_8726_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + read_func = elink_8727_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + read_func = elink_warpcore_read_sfp_module_eeprom; + break; + default: + return ELINK_OP_NOT_SUPPORTED; + } + + while (!rc && (byte_cnt > 0)) { + xfer_size = (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) ? + ELINK_SFP_EEPROM_PAGE_SIZE : byte_cnt; + rc = read_func(phy, params, dev_addr, addr, xfer_size, + user_data, 0); + byte_cnt -= xfer_size; + user_data += xfer_size; + addr += xfer_size; + } + return rc; +} + +static elink_status_t elink_get_edc_mode(struct elink_phy *phy, + struct elink_params *params, + uint16_t *edc_mode) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t sync_offset = 0, phy_idx, media_types; + uint8_t val[ELINK_SFP_EEPROM_FC_TX_TECH_ADDR + 1]; + uint8_t check_limiting_mode = 0; + *edc_mode = ELINK_EDC_MODE_LIMITING; + phy->media_type = ELINK_ETH_PHY_UNSPECIFIED; + /* First check for copper cable */ + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + 0, + ELINK_SFP_EEPROM_FC_TX_TECH_ADDR + 1, + (uint8_t *)val) != 0) { + ELINK_DEBUG_P0(sc, "Failed to read from SFP+ module EEPROM"); + return ELINK_STATUS_ERROR; + } + params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; + params->link_attr_sync |= val[ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR] << + LINK_SFP_EEPROM_COMP_CODE_SHIFT; + elink_update_link_attr(params, params->link_attr_sync); + switch (val[ELINK_SFP_EEPROM_CON_TYPE_ADDR]) { + case ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER: + { + uint8_t copper_module_type; + phy->media_type = ELINK_ETH_PHY_DA_TWINAX; + /* Check if its active cable (includes SFP+ module) + * of passive cable + */ + copper_module_type = val[ELINK_SFP_EEPROM_FC_TX_TECH_ADDR]; + if (copper_module_type & + ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { + ELINK_DEBUG_P0(sc, "Active Copper cable detected"); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + *edc_mode = ELINK_EDC_MODE_ACTIVE_DAC; + else + check_limiting_mode = 1; + } else { + *edc_mode = ELINK_EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + ELINK_DEBUG_P0(sc, + "Passive Copper cable detected"); + } else { + ELINK_DEBUG_P0(sc, + "Unknown copper-cable-type"); + } + } + break; + } + case ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: + case ELINK_SFP_EEPROM_CON_TYPE_VAL_LC: + case ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45: + check_limiting_mode = 1; + /* Module is considered as 1G in case it's NOT compliant with + * any 10G ethernet protocol, but is 1G Ethernet compliant. + */ + if (((val[ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR] & + (ELINK_SFP_EEPROM_10G_COMP_CODE_SR_MASK | + ELINK_SFP_EEPROM_10G_COMP_CODE_LR_MASK | + ELINK_SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) && + (val[ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) { + ELINK_DEBUG_P0(sc, "1G SFP module detected"); + phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER; + if (phy->req_line_speed != ELINK_SPEED_1000) { + uint8_t gport = params->port; + phy->req_line_speed = ELINK_SPEED_1000; + if (!CHIP_IS_E1x(sc)) { + gport = SC_PATH(sc) + + (params->port << 1); + } + elink_cb_event_log(sc, + ELINK_LOG_ID_NON_10G_MODULE, + gport); + /*"Warning: Link speed was forced to 1000Mbps." + *" Current SFP module in port %d is not" + *" compliant with 10G Ethernet", + */ + } + + if (val[ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR] & + ELINK_SFP_EEPROM_1G_COMP_CODE_BASE_T) { + /* Some 1G-baseT modules will not link up, + * unless TX_EN is toggled with long delay in + * between. + */ + elink_sfp_set_transmitter(params, phy, 0); + DELAY(1000 * 40); + elink_sfp_set_transmitter(params, phy, 1); + } + } else { + int idx, cfg_idx = 0; + ELINK_DEBUG_P0(sc, "10G Optic module detected"); + for (idx = ELINK_INT_PHY; idx < ELINK_MAX_PHYS; idx++) { + if (params->phy[idx].type == phy->type) { + cfg_idx = ELINK_LINK_CONFIG_IDX(idx); + break; + } + } + phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER; + phy->req_line_speed = params->req_line_speed[cfg_idx]; + } + break; + default: + ELINK_DEBUG_P1(sc, "Unable to determine module type 0x%x !!!", + val[ELINK_SFP_EEPROM_CON_TYPE_ADDR]); + return ELINK_STATUS_ERROR; + } + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(sc, sync_offset); + /* Update media type for non-PMF sync */ + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (&(params->phy[phy_idx]) == phy) { + media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + break; + } + } + REG_WR(sc, sync_offset, media_types); + if (check_limiting_mode) { + uint8_t options[ELINK_SFP_EEPROM_OPTIONS_SIZE]; + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_OPTIONS_ADDR, + ELINK_SFP_EEPROM_OPTIONS_SIZE, + options) != 0) { + ELINK_DEBUG_P0(sc, + "Failed to read Option field from module EEPROM"); + return ELINK_STATUS_ERROR; + } + if ((options[0] & ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) + *edc_mode = ELINK_EDC_MODE_LINEAR; + else + *edc_mode = ELINK_EDC_MODE_LIMITING; + } + ELINK_DEBUG_P1(sc, "EDC mode is set to 0x%x", *edc_mode); + return ELINK_STATUS_OK; +} +/* This function read the relevant field from the module (SFP+), and verify it + * is compliant with this board + */ +static elink_status_t elink_verify_sfp_module(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t val, cmd; + uint32_t fw_resp, fw_cmd_param; + char vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE + 1]; + char vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE + 1]; + phy->flags &= ~ELINK_FLAGS_SFP_NOT_APPROVED; + val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { + ELINK_DEBUG_P0(sc, "NOT enforcing module verification"); + return ELINK_STATUS_OK; + } + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { + /* Use specific phy request */ + cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; + } else if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { + /* Use first phy request only in case of non-dual media*/ + if (ELINK_DUAL_MEDIA(params)) { + ELINK_DEBUG_P0(sc, + "FW does not support OPT MDL verification"); + return ELINK_STATUS_ERROR; + } + cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; + } else { + /* No support in OPT MDL detection */ + ELINK_DEBUG_P0(sc, + "FW does not support OPT MDL verification"); + return ELINK_STATUS_ERROR; + } + + fw_cmd_param = ELINK_FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); + fw_resp = elink_cb_fw_command(sc, cmd, fw_cmd_param); + if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { + ELINK_DEBUG_P0(sc, "Approved module"); + return ELINK_STATUS_OK; + } + + /* Format the warning message */ + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_VENDOR_NAME_ADDR, + ELINK_SFP_EEPROM_VENDOR_NAME_SIZE, + (uint8_t *)vendor_name)) + vendor_name[0] = '\0'; + else + vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_PART_NO_ADDR, + ELINK_SFP_EEPROM_PART_NO_SIZE, + (uint8_t *)vendor_pn)) + vendor_pn[0] = '\0'; + else + vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE] = '\0'; + + elink_cb_event_log(sc, ELINK_LOG_ID_UNQUAL_IO_MODULE, params->port, + vendor_name, vendor_pn); + /* "Warning: Unqualified SFP+ module detected," + * " Port %d from %s part number %s", + */ + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG) + phy->flags |= ELINK_FLAGS_SFP_NOT_APPROVED; + return ELINK_STATUS_ERROR; +} + +static elink_status_t elink_wait_for_sfp_module_initialized( + struct elink_phy *phy, + struct elink_params *params) + +{ + uint8_t val; + elink_status_t rc; + struct bnx2x_softc *sc = params->sc; + uint16_t timeout; + /* Initialization time after hot-plug may take up to 300ms for + * some phys type ( e.g. JDSU ) + */ + + for (timeout = 0; timeout < 60; timeout++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = elink_warpcore_read_sfp_module_eeprom( + phy, params, ELINK_I2C_DEV_ADDR_A0, 1, 1, &val, + 1); + else + rc = elink_read_sfp_module_eeprom(phy, params, + ELINK_I2C_DEV_ADDR_A0, + 1, 1, &val); + if (rc == 0) { + ELINK_DEBUG_P1(sc, + "SFP+ module initialization took %d ms", + timeout * 5); + return ELINK_STATUS_OK; + } + DELAY(1000 * 5); + } + rc = elink_read_sfp_module_eeprom(phy, params, ELINK_I2C_DEV_ADDR_A0, + 1, 1, &val); + return rc; +} + +static void elink_8727_power_module(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t is_power_up) { + /* Make sure GPIOs are not using for LED mode */ + uint16_t val; + /* In the GPIO register, bit 4 is use to determine if the GPIOs are + * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for + * output + * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 + * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 + * where the 1st bit is the over-current(only input), and 2nd bit is + * for power( only output ) + * + * In case of NOC feature is disabled and power is up, set GPIO control + * as input to enable listening of over-current indication + */ + if (phy->flags & ELINK_FLAGS_NOC) + return; + if (is_power_up) + val = (1 << 4); + else + /* Set GPIO control to OUTPUT, and set the power bit + * to according to the is_power_up + */ + val = (1 << 1); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} + +static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t cur_limiting_mode; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &cur_limiting_mode); + ELINK_DEBUG_P1(sc, "Current Limiting mode is 0x%x", + cur_limiting_mode); + + if (edc_mode == ELINK_EDC_MODE_LIMITING) { + ELINK_DEBUG_P0(sc, "Setting LIMITING MODE"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + ELINK_EDC_MODE_LIMITING); + } else { /* LRM mode ( default )*/ + + ELINK_DEBUG_P0(sc, "Setting LRM MODE"); + + /* Changing to LRM mode takes quite few seconds. So do it only + * if current mode is limiting (default is LRM) + */ + if (cur_limiting_mode != ELINK_EDC_MODE_LIMITING) + return ELINK_STATUS_OK; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + 0x128); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, + 0x4008); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0xaaaa); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8727_set_limiting_mode(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t phy_identifier; + uint16_t rom_ver2_val; + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &phy_identifier); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1 << 9))); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &rom_ver2_val); + /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1 << 9))); + + return ELINK_STATUS_OK; +} + +static void elink_8727_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + switch (action) { + case ELINK_DISABLE_TX: + elink_sfp_set_transmitter(params, phy, 0); + break; + case ELINK_ENABLE_TX: + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) + elink_sfp_set_transmitter(params, phy, 1); + break; + case ELINK_PHY_INIT: + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1 << 2) | (1 << 5)); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1 << 12); + if (phy->flags & ELINK_FLAGS_NOC) + val |= (3 << 5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & ELINK_FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + break; + default: + ELINK_DEBUG_P1(sc, "Function 0x%x not supported by 8727", + action); + return; + } +} + +static void elink_set_e1e2_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + struct bnx2x_softc *sc = params->sc; + + uint32_t fault_led_gpio = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) & + PORT_HW_CFG_FAULT_MODULE_LED_MASK; + switch (fault_led_gpio) { + case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: + return; + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: + { + uint8_t gpio_port = elink_get_gpio_port(params); + uint16_t gpio_pin = fault_led_gpio - + PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; + ELINK_DEBUG_P3(sc, "Set fault module-detected led " + "pin %x port %x mode %x", + gpio_pin, gpio_port, gpio_mode); + elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port); + } + break; + default: + ELINK_DEBUG_P1(sc, "Error: Invalid fault led mode 0x%x", + fault_led_gpio); + } +} + +static void elink_set_e3_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + uint32_t pin_cfg; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + pin_cfg = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> + PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; + ELINK_DEBUG_P2(sc, "Setting Fault LED to %d using pin cfg %d", + gpio_mode, pin_cfg); + elink_set_cfg_pin(sc, pin_cfg, gpio_mode); +} + +static void elink_set_sfp_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P1(sc, "Setting SFP+ module fault LED to %d", gpio_mode); + if (CHIP_IS_E3(sc)) { + /* Low ==> if SFP+ module is supported otherwise + * High ==> if SFP+ module is not on the approved vendor list + */ + elink_set_e3_module_fault_led(params, gpio_mode); + } else + elink_set_e1e2_module_fault_led(params, gpio_mode); +} + +static void elink_warpcore_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + elink_warpcore_power_module(params, 0); + /* Put Warpcore in low power mode */ + REG_WR(sc, MISC_REG_WC0_RESET, 0x0c0e); + + /* Put LCPLL in low power mode */ + REG_WR(sc, MISC_REG_LCPLL_E40_PWRDWN, 1); + REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_ANA, 0); + REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_DIG, 0); +} + +static void elink_power_sfp_module(struct elink_params *params, + struct elink_phy *phy, + uint8_t power) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P1(sc, "Setting SFP+ power to %x", power); + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + elink_8727_power_module(params->sc, phy, power); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + elink_warpcore_power_module(params, power); + break; + default: + break; + } +} +static void elink_warpcore_set_limiting_mode(struct elink_params *params, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t val = 0; + uint16_t mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + struct bnx2x_softc *sc = params->sc; + + uint8_t lane = elink_get_warpcore_lane(phy, params); + /* This is a global register which controls all lanes */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + val &= ~(0xf << (lane << 2)); + + switch (edc_mode) { + case ELINK_EDC_MODE_LINEAR: + case ELINK_EDC_MODE_LIMITING: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + break; + case ELINK_EDC_MODE_PASSIVE_DAC: + case ELINK_EDC_MODE_ACTIVE_DAC: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; + break; + default: + break; + } + + val |= (mode << (lane << 2)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); + /* A must read */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + + /* Restart microcode to re-read the new mode */ + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_reset_lane(sc, phy, 0); + +} + +static void elink_set_limiting_mode(struct elink_params *params, + struct elink_phy *phy, + uint16_t edc_mode) +{ + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + elink_8726_set_limiting_mode(params->sc, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + elink_8727_set_limiting_mode(params->sc, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + elink_warpcore_set_limiting_mode(params, phy, edc_mode); + break; + } +} + +elink_status_t elink_sfp_module_detection(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t edc_mode; + elink_status_t rc = ELINK_STATUS_OK; + + uint32_t val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + /* Enabled transmitter by default */ + elink_sfp_set_transmitter(params, phy, 1); + ELINK_DEBUG_P1(sc, "SFP+ module plugged in/out detected on port %d", + params->port); + /* Power up module */ + elink_power_sfp_module(params, phy, 1); + if (elink_get_edc_mode(phy, params, &edc_mode) != 0) { + ELINK_DEBUG_P0(sc, "Failed to get valid module type"); + return ELINK_STATUS_ERROR; + } else if (elink_verify_sfp_module(phy, params) != 0) { + /* Check SFP+ module compatibility */ + ELINK_DEBUG_P0(sc, "Module verification failed!!"); + rc = ELINK_STATUS_ERROR; + /* Turn on fault module-detected led */ + elink_set_sfp_module_fault_led(params, + MISC_REGISTERS_GPIO_HIGH); + + /* Check if need to power down the SFP+ module */ + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { + ELINK_DEBUG_P0(sc, "Shutdown SFP+ module!!"); + elink_power_sfp_module(params, phy, 0); + return rc; + } + } else { + /* Turn off fault module-detected led */ + elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); + } + + /* Check and set limiting mode / LRM mode on 8726. On 8727 it + * is done automatically + */ + elink_set_limiting_mode(params, phy, edc_mode); + + /* Disable transmit for this module if the module is not approved, and + * laser needs to be disabled. + */ + if ((rc != 0) && + ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)) + elink_sfp_set_transmitter(params, phy, 0); + + return rc; +} + +void elink_handle_module_detect_int(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy; + uint32_t gpio_val; + uint8_t gpio_num, gpio_port; + if (CHIP_IS_E3(sc)) { + phy = ¶ms->phy[ELINK_INT_PHY]; + /* Always enable TX laser, will be disabled in case of fault */ + elink_sfp_set_transmitter(params, phy, 1); + } else { + phy = ¶ms->phy[ELINK_EXT_PHY1]; + } + if (elink_get_mod_abs_int_cfg(sc, params->chip_id, params->shmem_base, + params->port, &gpio_num, &gpio_port) == + ELINK_STATUS_ERROR) { + ELINK_DEBUG_P0(sc, "Failed to get MOD_ABS interrupt config"); + return; + } + + /* Set valid module led off */ + elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); + + /* Get current gpio val reflecting module plugged in / out*/ + gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) { + elink_set_mdio_emac_per_phy(sc, params); + elink_set_aer_mmd(params, phy); + + elink_power_sfp_module(params, phy, 1); + elink_cb_gpio_int_write(sc, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, + gpio_port); + if (elink_wait_for_sfp_module_initialized(phy, params) == 0) { + elink_sfp_module_detection(phy, params); + if (CHIP_IS_E3(sc)) { + uint16_t rx_tx_in_reset; + /* In case WC is out of reset, reconfigure the + * link speed while taking into account 1G + * module limitation. + */ + elink_cl45_read(sc, phy, + MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, + &rx_tx_in_reset); + if ((!rx_tx_in_reset) && + (params->link_flags & + ELINK_PHY_INITIALIZED)) { + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_config_sfi(phy, params); + elink_warpcore_reset_lane(sc, phy, 0); + } + } + } else { + ELINK_DEBUG_P0(sc, "SFP+ module is not initialized"); + } + } else { + elink_cb_gpio_int_write(sc, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_SET, + gpio_port); + /* Module was plugged out. + * Disable transmit for this module + */ + phy->media_type = ELINK_ETH_PHY_NOT_PRESENT; + } +} + +/******************************************************************/ +/* Used by 8706 and 8727 */ +/******************************************************************/ +static void elink_sfp_mask_fault(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t alarm_status_offset, + uint16_t alarm_ctrl_offset) +{ + uint16_t alarm_status, val; + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + /* Mask or enable the fault event. */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); + if (alarm_status & (1 << 0)) + val &= ~(1 << 0); + else + val |= (1 << 0); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); +} +/******************************************************************/ +/* common BNX2X8706/BNX2X8726 PHY SECTION */ +/******************************************************************/ +static uint8_t elink_8706_8726_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t link_up = 0; + uint16_t val1, val2, rx_sd, pcs_status; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "XGXS 8706/8726"); + /* Clear RX Alarm*/ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + /* Clear LASI indication*/ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + ELINK_DEBUG_P2(sc, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + elink_cl45_read(sc, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + + ELINK_DEBUG_P3(sc, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" + " link_status 0x%x", rx_sd, pcs_status, val2); + /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + * are set, or if the autoneg bit 1 is set + */ + link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1 << 1))); + if (link_up) { + if (val2 & (1 << 1)) + vars->line_speed = ELINK_SPEED_1000; + else + vars->line_speed = ELINK_SPEED_10000; + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + /* Capture 10G link fault. Read twice to clear stale value. */ + if (vars->line_speed == ELINK_SPEED_10000) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + if (val1 & (1 << 0)) + vars->fault_detected = 1; + } + + return link_up; +} + +/******************************************************************/ +/* BNX2X8706 PHY SECTION */ +/******************************************************************/ +static uint8_t elink_8706_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + uint32_t tx_en_mode; + uint16_t cnt, val, tmp1; + struct bnx2x_softc *sc = params->sc; + + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + elink_wait_reset_complete(sc, phy, params); + + /* Wait until fw is loaded */ + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); + if (val) + break; + DELAY(1000 * 10); + } + ELINK_DEBUG_P1(sc, "XGXS 8706 is initialized after %d ms", cnt); + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + uint8_t i; + uint16_t reg; + for (i = 0; i < 4; i++) { + reg = MDIO_XS_8706_REG_BANK_RX0 + + i * (MDIO_XS_8706_REG_BANK_RX1 - + MDIO_XS_8706_REG_BANK_RX0); + elink_cl45_read(sc, phy, MDIO_XS_DEVAD, reg, &val); + /* Clear first 3 bits of the control */ + val &= ~0x7; + /* Set control bits according to configuration */ + val |= (phy->rx_preemphasis[i] & 0x7); + ELINK_DEBUG_P2(sc, "Setting RX Equalizer to BNX2X8706" + " reg 0x%x <-- val 0x%x", reg, val); + elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val); + } + } + /* Force speed */ + if (phy->req_line_speed == ELINK_SPEED_10000) { + ELINK_DEBUG_P0(sc, "XGXS 8706 force 10Gbps"); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_DIGITAL_CTRL, 0x400); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + /* Arm LASI for link and Tx fault. */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); + } else { + /* Force 1Gbps using autoneg with 1G advertisement */ + + /* Allow CL37 through CL73 */ + ELINK_DEBUG_P0(sc, "XGXS 8706 AutoNeg"); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + + /* Enable Full-Duplex advertisement on CL37 */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); + /* Enable CL37 AN */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + /* 1G support */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1 << 5)); + + /* Enable clause 73 AN */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x0400); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + 0x0004); + } + elink_save_bnx2x_spirom_ver(sc, phy, params->port); + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + ELINK_DEBUG_P0(sc, "Enabling TXONOFF_PWRDN_DIS"); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); + tmp1 |= 0x1; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); + } + + return ELINK_STATUS_OK; +} + +static uint8_t elink_8706_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + return elink_8706_8726_read_status(phy, params, vars); +} + +/******************************************************************/ +/* BNX2X8726 PHY SECTION */ +/******************************************************************/ +static void elink_8726_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "PMA/PMD ext_phy_loopback: 8726"); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); +} + +static void elink_8726_external_rom_boot(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + /* Need to wait 100ms after reset */ + DELAY(1000 * 100); + + /* Micro controller re-boot */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); + + /* Set soft reset */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Wait for 150ms for microcode load */ + DELAY(1000 * 150); + + /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + + DELAY(1000 * 200); + elink_save_bnx2x_spirom_ver(sc, phy, params->port); +} + +static uint8_t elink_8726_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val1; + uint8_t link_up = elink_8706_8726_read_status(phy, params, vars); + if (link_up) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &val1); + if (val1 & (1 << 15)) { + ELINK_DEBUG_P0(sc, "Tx is disabled"); + link_up = 0; + vars->line_speed = 0; + } + } + return link_up; +} + + +static uint8_t elink_8726_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "Initializing BNX2X8726"); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + elink_wait_reset_complete(sc, phy, params); + + elink_8726_external_rom_boot(phy, params); + + /* Need to call module detected on initialization since the module + * detection triggered by actual module insertion might occur before + * driver is loaded, and when driver is loaded, it reset all + * registers, including the transmitter + */ + elink_sfp_module_detection(phy, params); + + if (phy->req_line_speed == ELINK_SPEED_1000) { + ELINK_DEBUG_P0(sc, "Setting 1G force"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + ELINK_DEBUG_P0(sc, "Setting 1G clause37"); + /* Set Flow control */ + elink_ext_phy_set_pause(params, phy, vars); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + /* Enable RX-ALARM control to receive interrupt for 1G speed + * change + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + + } else { /* Default 10G. Set only LASI control */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); + } + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + ELINK_DEBUG_P2(sc, + "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL1, + phy->tx_preemphasis[0]); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + return ELINK_STATUS_OK; + +} + +static void elink_8726_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P1(sc, "elink_8726_link_reset port %d", params->port); + /* Set serial boot control for external load */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, 0x0001); +} + +/******************************************************************/ +/* BNX2X8727 PHY SECTION */ +/******************************************************************/ + +static void elink_8727_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t led_mode_bitmask = 0; + uint16_t gpio_pins_bitmask = 0; + uint16_t val; + /* Only NOC flavor requires to set the LED specifically */ + if (!(phy->flags & ELINK_FLAGS_NOC)) + return; + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x03; + break; + case ELINK_LED_MODE_ON: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x02; + break; + case ELINK_LED_MODE_OPER: + led_mode_bitmask = 0x60; + gpio_pins_bitmask = 0x11; + break; + } + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val &= 0xff8f; + val |= led_mode_bitmask; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + &val); + val &= 0xffe0; + val |= gpio_pins_bitmask; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} +static void elink_8727_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) { + uint32_t swap_val, swap_override; + uint8_t port; + /* The PHY reset is controlled by GPIO 1. Fake the port number + * to cancel the swap done in set_gpio() + */ + struct bnx2x_softc *sc = params->sc; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); +} + +static void elink_8727_config_speed(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t tmp1, val; + /* Set option 1G speed */ + if ((phy->req_line_speed == ELINK_SPEED_1000) || + (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER)) { + ELINK_DEBUG_P0(sc, "Setting 1G force"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + ELINK_DEBUG_P1(sc, "1.7 = 0x%x", tmp1); + /* Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (ELINK_DUAL_MEDIA(params)) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3 << 10); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + ELINK_DEBUG_P0(sc, "Setting 1G clause37"); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } +} + +static uint8_t elink_8727_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + uint32_t tx_en_mode; + uint16_t tmp1, mod_abs, tmp2; + struct bnx2x_softc *sc = params->sc; + /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + + elink_wait_reset_complete(sc, phy, params); + + ELINK_DEBUG_P0(sc, "Initializing BNX2X8727"); + + elink_8727_specific_func(phy, params, ELINK_PHY_INIT); + /* Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + /* Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' + */ + mod_abs &= ~(1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs &= ~(1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Enable/Disable PHY transmitter output */ + elink_set_disable_pmd_transmit(params, phy, 0); + + elink_8727_power_module(sc, phy, 1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + elink_8727_config_speed(phy, params); + + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + ELINK_DEBUG_P2(sc, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, + phy->tx_preemphasis[0]); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + + ELINK_DEBUG_P0(sc, "Enabling TXONOFF_PWRDN_DIS"); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); + tmp2 |= 0x1000; + tmp2 &= 0xFFEF; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &tmp2); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + (tmp2 & 0x7fff)); + } + + return ELINK_STATUS_OK; +} + +static void elink_8727_handle_mod_abs(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mod_abs, rx_alarm_status; + uint32_t val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + if (mod_abs & (1 << 8)) { + + /* Module is absent */ + ELINK_DEBUG_P0(sc, + "MOD_ABS indication show module is absent"); + phy->media_type = ELINK_ETH_PHY_NOT_PRESENT; + /* 1. Set mod_abs to detect next module + * presence event + * 2. Set EDC off by setting OPTXLOS signal input to low + * (bit 9). + * When the EDC is off it locks onto a reference clock and + * avoids becoming 'lost'. + */ + mod_abs &= ~(1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs &= ~(1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as + * the mod_abs wasn't changed + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + } else { + /* Module is present */ + ELINK_DEBUG_P0(sc, + "MOD_ABS indication show module is present"); + /* First disable transmitter, and if the module is ok, the + * module_detection will enable it + * 1. Set mod_abs to detect next module absent event ( bit 8) + * 2. Restore the default polarity of the OPRXLOS signal and + * this signal will then correctly indicate the presence or + * absence of the Rx signal. (bit 9) + */ + mod_abs |= (1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs |= (1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as the mod_abs + * wasn't changed. This is need to be done before calling the + * module detection, otherwise it will clear* the link update + * alarm + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + elink_sfp_set_transmitter(params, phy, 0); + + if (elink_wait_for_sfp_module_initialized(phy, params) == 0) + elink_sfp_module_detection(phy, params); + else + ELINK_DEBUG_P0(sc, "SFP+ module is not initialized"); + + /* Reconfigure link speed based on module type limitations */ + elink_8727_config_speed(phy, params); + } + + ELINK_DEBUG_P1(sc, "8727 RX_ALARM_STATUS 0x%x", + rx_alarm_status); + /* No need to check link status in case of module plugged in/out */ +} + +static uint8_t elink_8727_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) + +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up = 0; + uint16_t link_status = 0; + uint16_t rx_alarm_status, lasi_ctrl, val1; + + /* If PHY is not initialized, do not check link status */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (!lasi_ctrl) + return 0; + + /* Check the LASI on Rx */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, + &rx_alarm_status); + vars->line_speed = 0; + ELINK_DEBUG_P1(sc, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status); + + elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + ELINK_DEBUG_P1(sc, "8727 LASI status 0x%x", val1); + + /* Clear MSG-OUT */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* If a module is present and there is need to check + * for over current + */ + if (!(phy->flags & ELINK_FLAGS_NOC) && !(rx_alarm_status & (1 << 5))) { + /* Check over-current using 8727 GPIO0 input*/ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, + &val1); + + if ((val1 & (1 << 8)) == 0) { + uint8_t oc_port = params->port; + if (!CHIP_IS_E1x(sc)) + oc_port = SC_PATH(sc) + (params->port << 1); + ELINK_DEBUG_P1(sc, + "8727 Power fault has been detected on port %d", + oc_port); + elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, + oc_port); + /* "Error: Power fault on Port %d has " + * "been detected and the power to " + * "that SFP+ module has been removed " + * "to prevent failure of the card. " + * "Please remove the SFP+ module and " + * "restart the system to clear this " + * "error.", + */ + /* Disable all RX_ALARMs except for mod_abs */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXCTRL, (1 << 5)); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &val1); + /* Wait for module_absent_event */ + val1 |= (1 << 8); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, val1); + /* Clear RX alarm */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + elink_8727_power_module(params->sc, phy, 0); + return 0; + } + } /* Over current check */ + + /* When module absent bit is set, check module */ + if (rx_alarm_status & (1 << 5)) { + elink_8727_handle_mod_abs(phy, params); + /* Enable all mod_abs and link detection bits */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + ((1 << 5) | (1 << 2))); + } + + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) { + ELINK_DEBUG_P0(sc, "Enabling 8727 TX laser"); + elink_sfp_set_transmitter(params, phy, 1); + } else { + ELINK_DEBUG_P0(sc, "Tx is disabled"); + return 0; + } + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); + + /* Bits 0..2 --> speed detected, + * Bits 13..15--> link is down + */ + if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + ELINK_DEBUG_P1(sc, "port %x: External link up in 10G", + params->port); + } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + ELINK_DEBUG_P1(sc, "port %x: External link up in 1G", + params->port); + } else { + link_up = 0; + ELINK_DEBUG_P1(sc, "port %x: External link is down", + params->port); + } + + /* Capture 10G link fault. */ + if (vars->line_speed == ELINK_SPEED_10000) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + if (val1 & (1 << 0)) { + vars->fault_detected = 1; + } + } + + if (link_up) { + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + ELINK_DEBUG_P1(sc, "duplex = 0x%x", vars->duplex); + } + + if ((ELINK_DUAL_MEDIA(params)) && + (phy->req_line_speed == ELINK_SPEED_1000)) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val1); + /* In case of dual-media board and 1G, power up the XAUI side, + * otherwise power it down. For 10G it is done automatically + */ + if (link_up) + val1 &= ~(3 << 10); + else + val1 |= (3 << 10); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val1); + } + return link_up; +} + +static void elink_8727_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + /* Enable/Disable PHY transmitter output */ + elink_set_disable_pmd_transmit(params, phy, 1); + + /* Disable Transmitter */ + elink_sfp_set_transmitter(params, phy, 0); + /* Clear LASI */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); + +} + +/******************************************************************/ +/* BNX2X8481/BNX2X84823/BNX2X84833 PHY SECTION */ +/******************************************************************/ +static int elink_is_8483x_8485x(struct elink_phy *phy) +{ + return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858)); +} + +static void elink_save_848xx_spirom_version(struct elink_phy *phy, + struct bnx2x_softc *sc, + uint8_t port) +{ + uint16_t val, fw_ver2, cnt, i; + static struct elink_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, 0xA819, 0x0014}, + {MDIO_PMA_DEVAD, 0xA81A, 0xc200}, + {MDIO_PMA_DEVAD, 0xA81B, 0x0000}, + {MDIO_PMA_DEVAD, 0xA81C, 0x0300}, + {MDIO_PMA_DEVAD, 0xA817, 0x0009} + }; + uint16_t fw_ver1; + + if (elink_is_8483x_8485x(phy)) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); + elink_save_spirom_version(sc, port, fw_ver1 & 0xfff, + phy->ver_addr); + } else { + /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ + /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, + reg_set[i].reg, reg_set[i].val); + + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + DELAY(5); + } + if (cnt == 100) { + ELINK_DEBUG_P0(sc, "Unable to read 848xx " + "phy fw version(1)"); + elink_save_spirom_version(sc, port, 0, + phy->ver_addr); + return; + } + + + /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + DELAY(5); + } + if (cnt == 100) { + ELINK_DEBUG_P0(sc, "Unable to read 848xx phy fw " + "version(2)"); + elink_save_spirom_version(sc, port, 0, + phy->ver_addr); + return; + } + + /* lower 16 bits of the register SPI_FW_STATUS */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + /* upper 16 bits of register SPI_FW_STATUS */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); + + elink_save_spirom_version(sc, port, (fw_ver2 << 16) | fw_ver1, + phy->ver_addr); + } + +} +static void elink_848xx_set_led(struct bnx2x_softc *sc, + struct elink_phy *phy) +{ + uint16_t val, offset, i; + static struct elink_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, + MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, + {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} + }; + /* PHYC_CTL_LED_CTL */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= 0xFE00; + val |= 0x0092; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, val); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + if (elink_is_8483x_8485x(phy)) + offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; + else + offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; + + /* stretch_en for LED3*/ + elink_cl45_read_or_write(sc, phy, + MDIO_PMA_DEVAD, offset, + MDIO_PMA_REG_84823_LED3_STRETCH_EN); +} + +static void elink_848xx_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + if (!elink_is_8483x_8485x(phy)) { + /* Save spirom version */ + elink_save_848xx_spirom_version(phy, sc, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + elink_bits_en(sc, NIG_REG_LATCH_BC_0 + params->port * 4, + 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT); + + elink_848xx_set_led(sc, phy); + break; + } +} + +static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t autoneg_val, an_1000_val, an_10_100_val; + + elink_848xx_specific_func(phy, params, ELINK_PHY_INIT); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); + + /* set 1000 speed advertisement */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + &an_1000_val); + + elink_ext_phy_set_pause(params, phy, vars); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_ADV, + &an_10_100_val); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &autoneg_val); + /* Disable forced speed */ + autoneg_val &= ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | + (1 << 13)); + an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8)); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == ELINK_SPEED_1000)) { + an_1000_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1 << 9); + ELINK_DEBUG_P0(sc, "Advertising 1G"); + } else + an_1000_val &= ~((1 << 8) | (1 << 9)); + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + an_1000_val); + + /* Set 10/100 speed advertisement */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1 << 9 | 1 << 12); + an_10_100_val |= (1 << 8); + ELINK_DEBUG_P0(sc, "Advertising 100M-FD"); + } + + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1 << 9 | 1 << 12); + an_10_100_val |= (1 << 7); + ELINK_DEBUG_P0(sc, "Advertising 100M-HD"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->supported & ELINK_SUPPORTED_10baseT_Full)) { + an_10_100_val |= (1 << 6); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 10M-FD"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && + (phy->supported & ELINK_SUPPORTED_10baseT_Half)) { + an_10_100_val |= (1 << 5); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 10M-HD"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if ((phy->req_line_speed == ELINK_SPEED_100) && + (phy->supported & + (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full))) { + autoneg_val |= (1 << 13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1 << 15 | 1 << 9 | 7 << 0)); + /* The PHY needs this set even for forced link. */ + an_10_100_val |= (1 << 8) | (1 << 7); + ELINK_DEBUG_P0(sc, "Setting 100M force"); + } + if ((phy->req_line_speed == ELINK_SPEED_10) && + (phy->supported & + (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full))) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1 << 15 | 1 << 9 | 7 << 0)); + ELINK_DEBUG_P0(sc, "Setting 10M force"); + } + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, + an_10_100_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1 << 8); + + /* Always write this if this is not 84833/4. + * For 84833/4, write it only when it's a forced speed. + */ + if (!elink_is_8483x_8485x(phy) || + ((autoneg_val & (1 << 12)) == 0)) + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (phy->req_line_speed == ELINK_SPEED_10000)) { + ELINK_DEBUG_P0(sc, "Advertising 10G"); + /* Restart autoneg for 10G*/ + + elink_cl45_read_or_write( + sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 0x1000); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, + 0x3200); + } else + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 1); + + return ELINK_STATUS_OK; +} + +static uint8_t elink_8481_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + /* Restore normal power mode*/ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + return elink_848xx_cmn_config_init(phy, params, vars); +} + +#define PHY848xx_CMDHDLR_WAIT 300 +#define PHY848xx_CMDHDLR_MAX_ARGS 5 + +static elink_status_t elink_84858_cmd_hdlr(struct elink_phy *phy, + struct elink_params *params, + uint16_t fw_cmd, + uint16_t cmd_args[], int argc) +{ + int idx; + uint16_t val; + struct bnx2x_softc *sc = params->sc; + + /* Step 1: Poll the STATUS register to see whether the previous command + * is in progress or the system is busy (CMD_IN_PROGRESS or + * SYSTEM_BUSY). If previous command is in progress or system is busy, + * check again until the previous command finishes execution and the + * system is available for taking command + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) && + (val != PHY84858_STATUS_CMD_SYSTEM_BUSY)) + break; + DELAY(1000 * 1); + } + if (idx >= PHY848xx_CMDHDLR_WAIT) { + ELINK_DEBUG_P0(sc, "FW cmd: FW not ready."); + return ELINK_STATUS_ERROR; + } + + /* Step2: If any parameters are required for the function, write them + * to the required DATA registers + */ + + for (idx = 0; idx < argc; idx++) { + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + + /* Step3: When the firmware is ready for commands, write the 'Command + * code' to the CMD register + */ + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + + /* Step4: Once the command has been written, poll the STATUS register + * to check whether the command has completed (CMD_COMPLETED_PASS/ + * CMD_FOR_CMDS or CMD_COMPLETED_ERROR). + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) + break; + DELAY(1000 * 1); + } + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) { + ELINK_DEBUG_P0(sc, "FW cmd failed."); + return ELINK_STATUS_ERROR; + } + /* Step5: Once the command has completed, read the specficied DATA + * registers for any saved results for the command, if applicable + */ + + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy, + struct elink_params *params, uint16_t fw_cmd, + uint16_t cmd_args[], int argc, int process) +{ + int idx; + uint16_t val; + struct bnx2x_softc *sc = params->sc; + elink_status_t rc = ELINK_STATUS_OK; + + if (process == PHY84833_MB_PROCESS2) { + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + } + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) + break; + DELAY(1000 * 1); + } + if (idx >= PHY848xx_CMDHDLR_WAIT) { + ELINK_DEBUG_P0(sc, "FW cmd: FW not ready."); + /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR + * clear the status to CMD_CLEAR_COMPLETE + */ + if (val == PHY84833_STATUS_CMD_COMPLETE_PASS || + val == PHY84833_STATUS_CMD_COMPLETE_ERROR) { + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } + return ELINK_STATUS_ERROR; + } + if (process == PHY84833_MB_PROCESS1 || + process == PHY84833_MB_PROCESS2) { + /* Prepare argument(s) */ + for (idx = 0; idx < argc; idx++) { + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + } + + /* Issue command */ + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + break; + DELAY(1000 * 1); + } + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + ELINK_DEBUG_P0(sc, "FW cmd failed."); + rc = ELINK_STATUS_ERROR; + } + if (process == PHY84833_MB_PROCESS3 && rc == ELINK_STATUS_OK) { + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + } + if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR || + val == PHY84833_STATUS_CMD_COMPLETE_PASS) { + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + } + return rc; +} + +static elink_status_t elink_848xx_cmd_hdlr(struct elink_phy *phy, + struct elink_params *params, + uint16_t fw_cmd, + uint16_t cmd_args[], int argc, + int process) +{ + struct bnx2x_softc *sc = params->sc; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858) || + (REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port])) & + LINK_ATTR_84858)) { + return elink_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } else { + return elink_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc, process); + } +} + +static elink_status_t elink_848xx_pair_swap_cfg(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + uint32_t pair_swap; + uint16_t data[PHY848xx_CMDHDLR_MAX_ARGS]; + elink_status_t status; + struct bnx2x_softc *sc = params->sc; + + /* Check for configuration. */ + pair_swap = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return ELINK_STATUS_OK; + + /* Only the second argument is used for this command */ + data[1] = (uint16_t)pair_swap; + + status = elink_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_PAIR_SWAP, data, + 2, PHY84833_MB_PROCESS2); + if (status == ELINK_STATUS_OK) + ELINK_DEBUG_P1(sc, "Pairswap OK, val=0x%x", data[1]); + + return status; +} + +static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + __rte_unused uint32_t chip_id) +{ + uint32_t reset_pin[2]; + uint32_t idx; + uint8_t reset_gpios; + if (CHIP_IS_E3(sc)) { + /* Assume that these will be GPIOs, not EPIOs. */ + for (idx = 0; idx < 2; idx++) { + /* Map config param to register bit. */ + reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].e3_cmn_pin_cfg)); + reset_pin[idx] = (reset_pin[idx] & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + reset_pin[idx] -= PIN_CFG_GPIO0_P0; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (uint8_t)(reset_pin[0] | reset_pin[1]); + } else { + /* E2, look from diff place of shmem. */ + for (idx = 0; idx < 2; idx++) { + reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].default_cfg)); + reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; + reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; + reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (uint8_t)(reset_pin[0] | reset_pin[1]); + } + + return reset_gpios; +} + +static void elink_84833_hw_reset_phy(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t reset_gpios; + uint32_t other_shmem_base_addr = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + other_shmem_base_addr)); + + uint32_t shmem_base_path[2]; + + /* Work around for 84833 LED failure inside RESET status */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + MDIO_AN_REG_8481_MII_CTRL_FORCE_1G); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, + MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF); + + shmem_base_path[0] = params->shmem_base; + shmem_base_path[1] = other_shmem_base_addr; + + reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, + params->chip_id); + + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_LOW); + DELAY(10); + ELINK_DEBUG_P1(sc, "84833 hw reset on pin values 0x%x", + reset_gpios); +} + +static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + struct bnx2x_softc *sc = params->sc; + uint16_t cmd_args = 0; + + ELINK_DEBUG_P0(sc, "Don't Advertise 10GBase-T EEE"); + + /* Prevent Phy from working in EEE and advertising it */ + rc = elink_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); + if (rc != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "EEE disable failed."); + return rc; + } + + return elink_eee_disable(phy, params, vars); +} + +static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + struct bnx2x_softc *sc = params->sc; + uint16_t cmd_args = 1; + + rc = elink_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE, + &cmd_args, 1, PHY84833_MB_PROCESS1); + if (rc != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "EEE enable failed."); + return rc; + } + + return elink_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); +} + +#define PHY84833_CONSTANT_LATENCY 1193 +static uint8_t elink_848x3_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port, initialize = 1; + uint16_t val; + uint32_t actual_phy_selection; + uint16_t cmd_args[PHY848xx_CMDHDLR_MAX_ARGS]; + elink_status_t rc = ELINK_STATUS_OK; + + DELAY(1000 * 1); + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + } else { + /* MDIO reset */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x8000); + } + + elink_wait_reset_complete(sc, phy, params); + + /* Wait for GPHY to come out of reset */ + DELAY(1000 * 50); + if (!elink_is_8483x_8485x(phy)) { + /* BNX2X84823 requires that XGXS links up first @ 10G for normal + * behavior. + */ + uint16_t temp; + temp = vars->line_speed; + vars->line_speed = ELINK_SPEED_10000; + elink_set_autoneg(¶ms->phy[ELINK_INT_PHY], params, vars, 0); + elink_program_serdes(¶ms->phy[ELINK_INT_PHY], params, vars); + vars->line_speed = temp; + } + /* Check if this is actually BNX2X84858 */ + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858) { + uint16_t hw_rev; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_848xx_ID_MSB, &hw_rev); + if (hw_rev == BNX2X84858_PHY_ID) { + params->link_attr_sync |= LINK_ATTR_84858; + elink_update_link_attr(params, params->link_attr_sync); + } + } + + /* Set dual-media configuration according to configuration */ + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, &val); + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK | + MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | + MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | + MDIO_CTL_REG_84823_MEDIA_FIBER_1G); + + if (CHIP_IS_E3(sc)) { + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK); + } else { + val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | + MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); + } + + actual_phy_selection = elink_phy_selection(params); + + switch (actual_phy_selection) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + /* Do nothing. Essentially this is like the priority copper */ + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + /* Do nothing here. The first PHY won't be initialized at all */ + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; + initialize = 0; + break; + } + if (params->phy[ELINK_EXT_PHY2].req_line_speed == ELINK_SPEED_1000) + val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; + + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, val); + ELINK_DEBUG_P2(sc, "Multi_phy config = 0x%x, Media control = 0x%x", + params->multi_phy_config, val); + + if (elink_is_8483x_8485x(phy)) { + elink_848xx_pair_swap_cfg(phy, params, vars); + + /* Keep AutogrEEEn disabled. */ + cmd_args[0] = 0x0; + cmd_args[1] = 0x0; + cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; + cmd_args[3] = PHY84833_CONSTANT_LATENCY; + rc = elink_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, cmd_args, + 4, PHY84833_MB_PROCESS1); + if (rc != ELINK_STATUS_OK) + ELINK_DEBUG_P0(sc, "Cfg AutogrEEEn failed."); + } + if (initialize) + rc = elink_848xx_cmn_config_init(phy, params, vars); + else + elink_save_848xx_spirom_version(phy, sc, params->port); + /* 84833 PHY has a better feature and doesn't need to support this. */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + uint32_t cms_enable = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_CMS_MASK; + + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + } + + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_FW_REV, &val); + + /* Configure EEE support */ + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + elink_eee_has_cap(params)) { + rc = elink_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); + if (rc != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "Failed to configure EEE timers"); + elink_8483x_disable_eee(phy, params, vars); + return rc; + } + + if ((phy->req_duplex == DUPLEX_FULL) && + (params->eee_mode & ELINK_EEE_MODE_ADV_LPI) && + (elink_eee_calc_timer(params) || + !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI))) + rc = elink_8483x_enable_eee(phy, params, vars); + else + rc = elink_8483x_disable_eee(phy, params, vars); + if (rc != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "Failed to set EEE advertisement"); + return rc; + } + } else { + vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; + } + + if (elink_is_8483x_8485x(phy)) { + /* Bring PHY out of super isolate mode as the final step. */ + elink_cl45_read_and_write(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, + (uint16_t)~MDIO_84833_SUPER_ISOLATE); + } + return rc; +} + +static uint8_t elink_848xx_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val, val1, val2; + uint8_t link_up = 0; + + + /* Check 10G-BaseT link status */ + /* Check PMD signal ok */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, 0xFFFA, &val1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, + &val2); + ELINK_DEBUG_P1(sc, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2); + + /* Check link 10G */ + if (val2 & (1 << 11)) { + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + link_up = 1; + elink_ext_phy_10G_an_resolve(sc, phy, vars); + } else { /* Check Legacy speed link */ + uint16_t legacy_status, legacy_speed; + + /* Enable expansion register 0x42 (Operation mode status) */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); + + /* Get legacy speed operation status */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, + &legacy_status); + + ELINK_DEBUG_P1(sc, "Legacy speed status = 0x%x", + legacy_status); + link_up = ((legacy_status & (1 << 11)) == (1 << 11)); + legacy_speed = (legacy_status & (3 << 9)); + if (legacy_speed == (0 << 9)) + vars->line_speed = ELINK_SPEED_10; + else if (legacy_speed == (1 << 9)) + vars->line_speed = ELINK_SPEED_100; + else if (legacy_speed == (2 << 9)) + vars->line_speed = ELINK_SPEED_1000; + else { /* Should not happen: Treat as link down */ + vars->line_speed = 0; + link_up = 0; + } + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_IEEE_PHY_TEST) { + uint16_t mii_ctrl; + + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &mii_ctrl); + /* For IEEE testing, check for a fake link. */ + link_up |= ((mii_ctrl & 0x3040) == 0x40); + } + + if (link_up) { + if (legacy_status & (1 << 8)) + vars->duplex = DUPLEX_FULL; + else + vars->duplex = DUPLEX_HALF; + + ELINK_DEBUG_P2(sc, + "Link is up in %dMbps, is_duplex_full= %d", + vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + /* Check legacy speed AN resolution */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_STATUS, + &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, + &val); + if ((val & (1 << 0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + } + if (link_up) { + ELINK_DEBUG_P1(sc, "BNX2X848x3: link speed is %d", + vars->line_speed); + elink_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1 << 6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1 << 8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1 << 9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_1000T_STATUS, &val); + + if (val & (1 << 10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_MASTER_STATUS, &val); + + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + /* Determine if EEE was negotiated */ + if (elink_is_8483x_8485x(phy)) + elink_eee_an_resolve(phy, params, vars); + } + + return link_up; +} + +static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t *str, + uint16_t *len) +{ + elink_status_t status = ELINK_STATUS_OK; + uint32_t spirom_ver; + spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); + status = elink_format_ver(spirom_ver, str, len); + return status; +} + +static void elink_8481_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); +} + +static void elink_8481_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + elink_cl45_write(params->sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); + elink_cl45_write(params->sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); +} + +static void elink_848x3_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint16_t val16; + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + } else { + elink_cl45_read(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16); + val16 |= MDIO_84833_SUPER_ISOLATE; + elink_cl45_write(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16); + } +} + +static void elink_848xx_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + uint8_t port; + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + switch (mode) { + case ELINK_LED_MODE_OFF: + + ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE OFF", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + } + break; + case ELINK_LED_MODE_FRONT_PANEL_OFF: + + ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE FRONT PANEL OFF", + port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x20); + + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant off. + */ + if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4) & + ELINK_NIG_MASK_MI_INT) { + params->link_flags |= + ELINK_LINK_FLAGS_INT_DISABLED; + + elink_bits_dis( + sc, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4, + ELINK_NIG_MASK_MI_INT); + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x0); + } + } + break; + case ELINK_LED_MODE_ON: + + ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE ON", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + /* Set control reg */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= 0x8000; + val |= 0x2492; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x20); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x20); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant on. + */ + if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4) & + ELINK_NIG_MASK_MI_INT) { + params->link_flags |= + ELINK_LINK_FLAGS_INT_DISABLED; + + elink_bits_dis( + sc, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4, + ELINK_NIG_MASK_MI_INT); + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x20); + } + } + break; + + case ELINK_LED_MODE_OPER: + + ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE OPER", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set control reg */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + + if (!((val & + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { + ELINK_DEBUG_P0(sc, "Setting LINK_SIGNAL"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + 0xa492); + } + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x10); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x80); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x98); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x40); + + } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + val); + + /* Tell LED3 to blink on source */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7 << 6); + val |= (1 << 6); /* A83B[8:6]= 1 */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Restore LED4 source to external link, + * and re-enable interrupts. + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x40); + if (params->link_flags & + ELINK_LINK_FLAGS_INT_DISABLED) { + elink_link_int_enable(params); + params->link_flags &= + ~ELINK_LINK_FLAGS_INT_DISABLED; + } + } + } + break; + } + + /* This is a workaround for E3 + 84833 until autoneg + * restart is fixed in f/w + */ + if (CHIP_IS_E3(sc)) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); + } +} + +/******************************************************************/ +/* 54618SE PHY SECTION */ +/******************************************************************/ +static void elink_54618se_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t temp; + switch (action) { + case ELINK_PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + elink_cl22_read(sc, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + elink_cl22_write(sc, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + +static uint8_t elink_54618se_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint16_t autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; + uint32_t cfg_pin; + + ELINK_DEBUG_P0(sc, "54618SE cfg init"); + DELAY(1000 * 1); + + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin high to bring the GPHY out of reset. */ + elink_set_cfg_pin(sc, cfg_pin, 1); + + /* wait for GPHY to reset */ + DELAY(1000 * 50); + + /* reset phy */ + elink_cl22_write(sc, phy, + MDIO_PMA_REG_CTRL, 0x8000); + elink_wait_reset_complete(sc, phy, params); + + /* Wait for GPHY to reset */ + DELAY(1000 * 50); + + + elink_54618se_specific_func(phy, params, ELINK_PHY_INIT); + /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); + elink_cl22_read(sc, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + + /* Set up fc */ + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + fc_val = 0; + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) + fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + + /* Read all advertisement */ + elink_cl22_read(sc, phy, + 0x09, + &an_1000_val); + + elink_cl22_read(sc, phy, + 0x04, + &an_10_100_val); + + elink_cl22_read(sc, phy, + MDIO_PMA_REG_CTRL, + &autoneg_val); + + /* Disable forced speed */ + autoneg_val &= ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | + (1 << 13)); + an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | + (1 << 10) | (1 << 11)); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == ELINK_SPEED_1000)) { + an_1000_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1 << 9); + ELINK_DEBUG_P0(sc, "Advertising 1G"); + } else + an_1000_val &= ~((1 << 8) | (1 << 9)); + + elink_cl22_write(sc, phy, + 0x09, + an_1000_val); + elink_cl22_read(sc, phy, + 0x09, + &an_1000_val); + + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1 << 5); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 10M-HD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { + an_10_100_val |= (1 << 6); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 10M-FD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1 << 7); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 100M-HD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + ELINK_DEBUG_P0(sc, "Advertising 100M-FD"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if (phy->req_line_speed == ELINK_SPEED_100) { + autoneg_val |= (1 << 13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl22_write(sc, phy, + 0x18, + (1 << 15 | 1 << 9 | 7 << 0)); + ELINK_DEBUG_P0(sc, "Setting 100M force"); + } + if (phy->req_line_speed == ELINK_SPEED_10) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl22_write(sc, phy, + 0x18, + (1 << 15 | 1 << 9 | 7 << 0)); + ELINK_DEBUG_P0(sc, "Setting 10M force"); + } + + if ((phy->flags & ELINK_FLAGS_EEE) && elink_eee_has_cap(params)) { + elink_status_t rc; + + elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + elink_cl22_read(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = elink_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "Failed to configure EEE timers"); + elink_eee_disable(phy, params, vars); + } else if ((params->eee_mode & ELINK_EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (elink_eee_calc_timer(params) || + !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + elink_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); + } else { + ELINK_DEBUG_P0(sc, "Don't Advertise 1GBase-T EEE"); + elink_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ((uint32_t)(~SHMEM_EEE_1G_ADV) << + SHMEM_EEE_SUPPORTED_SHIFT); + + if (phy->flags & ELINK_FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + ELINK_DEBUG_P0(sc, "Enabling Auto-GrEEEn"); + } else { + temp = 0; + ELINK_DEBUG_P0(sc, "Don't Adv. EEE"); + } + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); + } + } + + elink_cl22_write(sc, phy, + 0x04, + an_10_100_val | fc_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1 << 8); + + elink_cl22_write(sc, phy, + MDIO_PMA_REG_CTRL, autoneg_val); + + return ELINK_STATUS_OK; +} + + +static void elink_5461x_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t temp; + + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL1); + elink_cl22_read(sc, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= 0xff00; + + ELINK_DEBUG_P1(sc, "54618x set link led (mode=%x)", mode); + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + temp |= 0x00ee; + break; + case ELINK_LED_MODE_OPER: + temp |= 0x0001; + break; + case ELINK_LED_MODE_ON: + temp |= 0x00ff; + break; + default: + break; + } + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + return; +} + + +static void elink_54618se_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port; + + /* In case of no EPIO routed to reset the GPHY, put it + * in low power mode. + */ + elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x800); + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin low to put GPHY in reset. */ + elink_set_cfg_pin(sc, cfg_pin, 0); +} + +static uint8_t elink_54618se_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + uint8_t link_up = 0; + uint16_t legacy_status, legacy_speed; + + /* Get speed operation status */ + elink_cl22_read(sc, phy, + MDIO_REG_GPHY_AUX_STATUS, + &legacy_status); + ELINK_DEBUG_P1(sc, "54618SE read_status: 0x%x", legacy_status); + + /* Read status to clear the PHY interrupt. */ + elink_cl22_read(sc, phy, + MDIO_REG_INTR_STATUS, + &val); + + link_up = ((legacy_status & (1 << 2)) == (1 << 2)); + + if (link_up) { + legacy_speed = (legacy_status & (7 << 8)); + if (legacy_speed == (7 << 8)) { + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (6 << 8)) { + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (5 << 8)) { + vars->line_speed = ELINK_SPEED_100; + vars->duplex = DUPLEX_FULL; + } + /* Omitting 100Base-T4 for now */ + else if (legacy_speed == (3 << 8)) { + vars->line_speed = ELINK_SPEED_100; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (2 << 8)) { + vars->line_speed = ELINK_SPEED_10; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (1 << 8)) { + vars->line_speed = ELINK_SPEED_10; + vars->duplex = DUPLEX_HALF; + } else /* Should not happen */ + vars->line_speed = 0; + + ELINK_DEBUG_P2(sc, + "Link is up in %dMbps, is_duplex_full= %d", + vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + + /* Check legacy speed AN resolution */ + elink_cl22_read(sc, phy, + 0x01, + &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + elink_cl22_read(sc, phy, + 0x06, + &val); + if ((val & (1 << 0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + + ELINK_DEBUG_P1(sc, "BNX2X4618SE: link speed is %d", + vars->line_speed); + + elink_ext_phy_resolve_fc(phy, params, vars); + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + /* Report LP advertised speeds */ + elink_cl22_read(sc, phy, 0x5, &val); + + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1 << 6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1 << 8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1 << 9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + elink_cl22_read(sc, phy, 0xa, &val); + if (val & (1 << 10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & ELINK_FLAGS_EEE) && + elink_eee_has_cap(params)) + elink_eee_an_resolve(phy, params, vars); + } + } + return link_up; +} + +static void elink_54618se_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + + ELINK_DEBUG_P0(sc, "2PMA/PMD ext_phy_loopback: 54618se"); + + /* Enable master/slave manual mmode and set to master */ + /* mii write 9 [bits set 11 12] */ + elink_cl22_write(sc, phy, 0x09, 3 << 11); + + /* forced 1G and disable autoneg */ + /* set val [mii read 0] */ + /* set val [expr $val & [bits clear 6 12 13]] */ + /* set val [expr $val | [bits set 6 8]] */ + /* mii write 0 $val */ + elink_cl22_read(sc, phy, 0x00, &val); + val &= ~((1 << 6) | (1 << 12) | (1 << 13)); + val |= (1 << 6) | (1 << 8); + elink_cl22_write(sc, phy, 0x00, val); + + /* Set external loopback and Tx using 6dB coding */ + /* mii write 0x18 7 */ + /* set val [mii read 0x18] */ + /* mii write 0x18 [expr $val | [bits set 10 15]] */ + elink_cl22_write(sc, phy, 0x18, 7); + elink_cl22_read(sc, phy, 0x18, &val); + elink_cl22_write(sc, phy, 0x18, val | (1 << 10) | (1 << 15)); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710); +} + +/******************************************************************/ +/* SFX7101 PHY SECTION */ +/******************************************************************/ +static void elink_7101_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + /* SFX7101_XGXS_TEST1 */ + elink_cl45_write(sc, phy, + MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); +} + +static uint8_t elink_7101_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint16_t fw_ver1, fw_ver2, val; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "Setting the SFX7101 LASI indication"); + + /* Restore normal power mode*/ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); + ELINK_DEBUG_P0(sc, "Setting the SFX7101 LED to blink on traffic"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1 << 3)); + + elink_ext_phy_set_pause(params, phy, vars); + /* Restart autoneg */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); + val |= 0x200; + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); + + /* Save spirom version */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); + elink_save_spirom_version(sc, params->port, + (uint32_t)(fw_ver1 << 16 | fw_ver2), + phy->ver_addr); + return ELINK_STATUS_OK; +} + +static uint8_t elink_7101_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up; + uint16_t val1, val2; + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + ELINK_DEBUG_P2(sc, "10G-base-T LASI status 0x%x->0x%x", + val2, val1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + ELINK_DEBUG_P2(sc, "10G-base-T PMA status 0x%x->0x%x", + val2, val1); + link_up = ((val1 & 4) == 4); + /* If link is up print the AN outcome of the SFX7101 PHY */ + if (link_up) { + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, + &val2); + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + ELINK_DEBUG_P2(sc, "SFX7101 AN status 0x%x->Master=%x", + val2, (val2 & (1 << 14))); + elink_ext_phy_10G_an_resolve(sc, phy, vars); + elink_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + if (val2 & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + return link_up; +} + +static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t *str, + uint16_t *len) +{ + if (*len < 5) + return ELINK_STATUS_ERROR; + str[0] = (spirom_ver & 0xFF); + str[1] = (spirom_ver & 0xFF00) >> 8; + str[2] = (spirom_ver & 0xFF0000) >> 16; + str[3] = (spirom_ver & 0xFF000000) >> 24; + str[4] = '\0'; + *len -= 5; + return ELINK_STATUS_OK; +} + +void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy) +{ + uint16_t val, cnt; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + for (cnt = 0; cnt < 10; cnt++) { + DELAY(1000 * 50); + /* Writes a self-clearing reset */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, + (val | (1 << 15))); + /* Wait for clear */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + if ((val & (1 << 15)) == 0) + break; + } +} + +static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) { + /* Low power mode is controlled by GPIO 2 */ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + /* The PHY reset is controlled by GPIO 1 */ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); +} + +static void elink_7101_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + uint16_t val = 0; + struct bnx2x_softc *sc = params->sc; + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + val = 2; + break; + case ELINK_LED_MODE_ON: + val = 1; + break; + case ELINK_LED_MODE_OPER: + val = 0; + break; + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7107_LINK_LED_CNTL, + val); +} + +/******************************************************************/ +/* STATIC PHY DECLARATION */ +/******************************************************************/ + +static const struct elink_phy phy_null = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, + .addr = 0, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = 0, + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)NULL, + .read_status = (read_status_t)NULL, + .link_reset = (link_reset_t)NULL, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct elink_phy phy_serdes = { + .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_xgxs_config_init, + .read_status = (read_status_t)elink_link_settings_status, + .link_reset = (link_reset_t)elink_int_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct elink_phy phy_xgxs = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_CX4, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_xgxs_config_init, + .read_status = (read_status_t)elink_link_settings_status, + .link_reset = (link_reset_t)elink_int_link_reset, + .config_loopback = (config_loopback_t)elink_set_xgxs_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)elink_xgxs_specific_func +}; +static const struct elink_phy phy_warpcore = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_TX_ERROR_CHECK, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_1000baseKX_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_10000baseKR_Full | + ELINK_SUPPORTED_20000baseKR2_Full | + ELINK_SUPPORTED_20000baseMLD2_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_UNSPECIFIED, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)elink_warpcore_config_init, + .read_status = (read_status_t)elink_warpcore_read_status, + .link_reset = (link_reset_t)elink_warpcore_link_reset, + .config_loopback = (config_loopback_t)elink_set_warpcore_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)elink_warpcore_hw_reset, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + + +static const struct elink_phy phy_7101 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_7101_config_init, + .read_status = (read_status_t)elink_7101_read_status, + .link_reset = (link_reset_t)elink_common_ext_link_reset, + .config_loopback = (config_loopback_t)elink_7101_config_loopback, + .format_fw_ver = (format_fw_ver_t)elink_7101_format_ver, + .hw_reset = (hw_reset_t)elink_7101_hw_reset, + .set_link_led = (set_link_led_t)elink_7101_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static const struct elink_phy phy_8073 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_KR, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8073_config_init, + .read_status = (read_status_t)elink_8073_read_status, + .link_reset = (link_reset_t)elink_8073_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)elink_8073_specific_func +}; +static const struct elink_phy phy_8705 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_XFP_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8705_config_init, + .read_status = (read_status_t)elink_8705_read_status, + .link_reset = (link_reset_t)elink_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_null_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static const struct elink_phy phy_8706 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_SFPP_10G_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8706_config_init, + .read_status = (read_status_t)elink_8706_read_status, + .link_reset = (link_reset_t)elink_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct elink_phy phy_8726 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_INIT_XGXS_FIRST | + ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8726_config_init, + .read_status = (read_status_t)elink_8726_read_status, + .link_reset = (link_reset_t)elink_8726_link_reset, + .config_loopback = (config_loopback_t)elink_8726_config_loopback, + .format_fw_ver = (format_fw_ver_t)elink_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct elink_phy phy_8727 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8727_config_init, + .read_status = (read_status_t)elink_8727_read_status, + .link_reset = (link_reset_t)elink_8727_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_format_ver, + .hw_reset = (hw_reset_t)elink_8727_hw_reset, + .set_link_led = (set_link_led_t)elink_8727_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_8727_specific_func +}; +static const struct elink_phy phy_8481 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_8481_config_init, + .read_status = (read_status_t)elink_848xx_read_status, + .link_reset = (link_reset_t)elink_8481_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver, + .hw_reset = (hw_reset_t)elink_8481_hw_reset, + .set_link_led = (set_link_led_t)elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct elink_phy phy_84823 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL | + ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_848x3_config_init, + .read_status = (read_status_t)elink_848xx_read_status, + .link_reset = (link_reset_t)elink_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func +}; + +static const struct elink_phy phy_84833 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL | + ELINK_FLAGS_TX_ERROR_CHECK | + ELINK_FLAGS_TEMPERATURE), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_848x3_config_init, + .read_status = (read_status_t)elink_848xx_read_status, + .link_reset = (link_reset_t)elink_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver, + .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func +}; + +static const struct elink_phy phy_84834 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_848x3_config_init, + .read_status = (read_status_t)elink_848xx_read_status, + .link_reset = (link_reset_t)elink_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver, + .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func +}; + +static const struct elink_phy phy_84858 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)elink_848x3_config_init, + .read_status = (read_status_t)elink_848xx_read_status, + .link_reset = (link_reset_t)elink_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver, + .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func +}; + + +static const struct elink_phy phy_54618se = { + .type = PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)elink_54618se_config_init, + .read_status = (read_status_t)elink_54618se_read_status, + .link_reset = (link_reset_t)elink_54618se_link_reset, + .config_loopback = (config_loopback_t)elink_54618se_config_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)elink_5461x_set_link_led, + .phy_specific_func = (phy_specific_func_t)elink_54618se_specific_func +}; +/*****************************************************************/ +/* */ +/* Populate the phy according. Main function: elink_populate_phy */ +/* */ +/*****************************************************************/ + +static void elink_populate_preemphasis(struct bnx2x_softc *sc, + uint32_t shmem_base, + struct elink_phy *phy, uint8_t port, + uint8_t phy_index) +{ + /* Get the 4 lanes xgxs config rx and tx */ + uint32_t rx = 0, tx = 0, i; + for (i = 0; i < 2; i++) { + /* INT_PHY and ELINK_EXT_PHY1 share the same value location in + * the shmem. When num_phys is greater than 1, than this value + * applies only to ELINK_EXT_PHY1 + */ + if (phy_index == ELINK_INT_PHY || phy_index == ELINK_EXT_PHY1) { + rx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_rx[i << 1])); + + tx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_tx[i << 1])); + } else { + rx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i << 1])); + + tx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i << 1])); + } + + phy->rx_preemphasis[i << 1] = ((rx >> 16) & 0xffff); + phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); + + phy->tx_preemphasis[i << 1] = ((tx >> 16) & 0xffff); + phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); + ELINK_DEBUG_P2(sc, "phy->rx_preemphasis = %x, phy->tx_preemphasis = %x", + phy->rx_preemphasis[i << 1], + phy->tx_preemphasis[i << 1]); + } +} + +static uint32_t elink_get_ext_phy_config(struct bnx2x_softc *sc, + uint32_t shmem_base, + uint8_t phy_index, uint8_t port) +{ + uint32_t ext_phy_config = 0; + switch (phy_index) { + case ELINK_EXT_PHY1: + ext_phy_config = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config)); + break; + case ELINK_EXT_PHY2: + ext_phy_config = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config2)); + break; + default: + ELINK_DEBUG_P1(sc, "Invalid phy_index %d", phy_index); + return ELINK_STATUS_ERROR; + } + + return ext_phy_config; +} +static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc, + uint32_t shmem_base, uint8_t port, + struct elink_phy *phy) +{ + uint32_t phy_addr; + uint32_t chip_id; + uint32_t switch_cfg = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[port].link_config)) & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12); + + ELINK_DEBUG_P1(sc, ":chip_id = 0x%x", chip_id); + if (USES_WARPCORE(sc)) { + uint32_t serdes_net_if; + phy_addr = REG_RD(sc, + MISC_REG_WC0_CTRL_PHY_ADDR); + *phy = phy_warpcore; + if (REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) + phy->flags |= ELINK_FLAGS_4_PORT_MODE; + else + phy->flags &= ~ELINK_FLAGS_4_PORT_MODE; + /* Check Dual mode */ + serdes_net_if = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + /* Set the appropriate supported and flags indications per + * interface type of the chip + */ + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_SGMII: + phy->supported &= (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_BASE_T; + break; + case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_XFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_SFI: + phy->supported &= (ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_KR: + phy->media_type = ELINK_ETH_PHY_KR; + phy->supported &= (ELINK_SUPPORTED_1000baseKX_Full | + ELINK_SUPPORTED_10000baseKR_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + phy->media_type = ELINK_ETH_PHY_KR; + phy->flags |= ELINK_FLAGS_WC_DUAL_MODE; + phy->supported &= (ELINK_SUPPORTED_20000baseMLD2_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + phy->media_type = ELINK_ETH_PHY_KR; + phy->flags |= ELINK_FLAGS_WC_DUAL_MODE; + phy->supported &= (ELINK_SUPPORTED_20000baseKR2_Full | + ELINK_SUPPORTED_10000baseKR_Full | + ELINK_SUPPORTED_1000baseKX_Full | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK; + break; + default: + ELINK_DEBUG_P1(sc, "Unknown WC interface type 0x%x", + serdes_net_if); + break; + } + + /* Enable MDC/MDIO work-around for E3 A0 since free running MDC + * was not set as expected. For B0, ECO will be enabled so there + * won't be an issue there + */ + if (CHIP_REV(sc) == CHIP_REV_Ax) + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA; + else + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_B0; + ELINK_DEBUG_P3(sc, "media_type = %x, flags = %x, supported = %x", + phy->media_type, phy->flags, phy->supported); + } else { + switch (switch_cfg) { + case ELINK_SWITCH_CFG_1G: + phy_addr = REG_RD(sc, + NIG_REG_SERDES0_CTRL_PHY_ADDR + + port * 0x10); + *phy = phy_serdes; + break; + case ELINK_SWITCH_CFG_10G: + phy_addr = REG_RD(sc, + NIG_REG_XGXS0_CTRL_PHY_ADDR + + port * 0x18); + *phy = phy_xgxs; + break; + default: + ELINK_DEBUG_P0(sc, "Invalid switch_cfg"); + return ELINK_STATUS_ERROR; + } + } + phy->addr = (uint8_t)phy_addr; + phy->mdio_ctrl = elink_get_emac_base(sc, + SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, + port); + if (CHIP_IS_E2(sc)) + phy->def_md_devad = ELINK_E2_DEFAULT_PHY_DEV_ADDR; + else + phy->def_md_devad = ELINK_DEFAULT_PHY_DEV_ADDR; + + ELINK_DEBUG_P3(sc, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x", + port, phy->addr, phy->mdio_ctrl); + + elink_populate_preemphasis(sc, shmem_base, phy, port, ELINK_INT_PHY); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc, + uint8_t phy_index, + uint32_t shmem_base, + uint32_t shmem2_base, + uint8_t port, + struct elink_phy *phy) +{ + uint32_t ext_phy_config, phy_type, config2; + uint32_t mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; + ext_phy_config = elink_get_ext_phy_config(sc, shmem_base, + phy_index, port); + phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config); + /* Select the phy type */ + switch (phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; + *phy = phy_8073; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705: + *phy = phy_8705; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706: + *phy = phy_8706; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8726; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC: + /* BNX2X8727_NOC => BNX2X8727 no over current */ + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + phy->flags |= ELINK_FLAGS_NOC; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481: + *phy = phy_8481; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823: + *phy = phy_84823; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833: + *phy = phy_84833; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834: + *phy = phy_84834; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858: + *phy = phy_84858; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616: + case PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE: + *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) + phy->flags |= ELINK_FLAGS_EEE; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + *phy = phy_7101; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + *phy = phy_null; + return ELINK_STATUS_ERROR; + default: + *phy = phy_null; + /* In case external PHY wasn't found */ + if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + return ELINK_STATUS_ERROR; + return ELINK_STATUS_OK; + } + + phy->addr = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); + elink_populate_preemphasis(sc, shmem_base, phy, port, phy_index); + + /* The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ + config2 = REG_RD(sc, shmem_base + offsetof(struct shmem_region, + dev_info.shared_hw_config.config2)); + if (phy_index == ELINK_EXT_PHY1) { + phy->ver_addr = shmem_base + offsetof(struct shmem_region, + port_mb[port].ext_phy_fw_version); + + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + } else { + uint32_t size = REG_RD(sc, shmem2_base); + + if (size > + offsetof(struct shmem2_region, ext_phy_fw_version2)) { + phy->ver_addr = shmem2_base + + offsetof(struct shmem2_region, + ext_phy_fw_version2[port]); + } + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) + mdc_mdio_access = (config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >> + (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - + SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); + } + phy->mdio_ctrl = elink_get_emac_base(sc, mdc_mdio_access, port); + + if (elink_is_8483x_8485x(phy) && (phy->ver_addr)) { + /* Remove 100Mb link supported for BNX2X84833/4 when phy fw + * version lower than or equal to 1.39 + */ + uint32_t raw_ver = REG_RD(sc, phy->ver_addr); + if (((raw_ver & 0x7F) <= 39) && + (((raw_ver & 0xF80) >> 7) <= 1)) + phy->supported &= ~(ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full); + } + + ELINK_DEBUG_P3(sc, "phy_type 0x%x port %d found in index %d", + phy_type, port, phy_index); + ELINK_DEBUG_P2(sc, " addr=0x%x, mdio_ctl=0x%x", + phy->addr, phy->mdio_ctrl); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_populate_phy(struct bnx2x_softc *sc, + uint8_t phy_index, uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port, + struct elink_phy *phy) +{ + elink_status_t status = ELINK_STATUS_OK; + phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; + if (phy_index == ELINK_INT_PHY) + return elink_populate_int_phy(sc, shmem_base, port, phy); + status = elink_populate_ext_phy(sc, phy_index, shmem_base, shmem2_base, + port, phy); + return status; +} + +static void elink_phy_def_cfg(struct elink_params *params, + struct elink_phy *phy, + uint8_t phy_index) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t link_config; + /* Populate the default phy configuration for MF mode */ + if (phy_index == ELINK_EXT_PHY2) { + link_config = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config2)); + phy->speed_cap_mask = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask2)); + } else { + link_config = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config)); + phy->speed_cap_mask = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask)); + } + ELINK_DEBUG_P3(sc, + "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x", + phy_index, link_config, phy->speed_cap_mask); + + phy->req_duplex = DUPLEX_FULL; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_10M_HALF: + phy->req_duplex = DUPLEX_HALF; + /* fallthrough */ + case PORT_FEATURE_LINK_SPEED_10M_FULL: + phy->req_line_speed = ELINK_SPEED_10; + break; + case PORT_FEATURE_LINK_SPEED_100M_HALF: + phy->req_duplex = DUPLEX_HALF; + /* fallthrough */ + case PORT_FEATURE_LINK_SPEED_100M_FULL: + phy->req_line_speed = ELINK_SPEED_100; + break; + case PORT_FEATURE_LINK_SPEED_1G: + phy->req_line_speed = ELINK_SPEED_1000; + break; + case PORT_FEATURE_LINK_SPEED_2_5G: + phy->req_line_speed = ELINK_SPEED_2500; + break; + case PORT_FEATURE_LINK_SPEED_10G_CX4: + phy->req_line_speed = ELINK_SPEED_10000; + break; + default: + phy->req_line_speed = ELINK_SPEED_AUTO_NEG; + break; + } + + ELINK_DEBUG_P2(sc, "Default config phy idx %x, req_duplex config %x", + phy_index, phy->req_duplex); + + switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { + case PORT_FEATURE_FLOW_CONTROL_AUTO: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_AUTO; + break; + case PORT_FEATURE_FLOW_CONTROL_TX: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_TX; + break; + case PORT_FEATURE_FLOW_CONTROL_RX: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_RX; + break; + case PORT_FEATURE_FLOW_CONTROL_BOTH: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_BOTH; + break; + default: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_NONE; + break; + } + ELINK_DEBUG_P3(sc, "Requested Duplex = %x, line_speed = %x, flow_ctrl = %x", + phy->req_duplex, phy->req_line_speed, + phy->req_flow_ctrl); +} + +uint32_t elink_phy_selection(struct elink_params *params) +{ + uint32_t phy_config_swapped, prio_cfg; + uint32_t return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; + + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + prio_cfg = params->multi_phy_config & + PORT_HW_CFG_PHY_SELECTION_MASK; + + if (phy_config_swapped) { + switch (prio_cfg) { + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + } + } else + return_cfg = prio_cfg; + + return return_cfg; +} + +elink_status_t elink_phy_probe(struct elink_params *params) +{ + uint8_t phy_index, actual_phy_idx; + uint32_t phy_config_swapped, sync_offset, media_types; + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy; + params->num_phys = 0; + ELINK_DEBUG_P0(sc, "Begin phy probe"); +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) + return ELINK_STATUS_OK; +#endif + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; + phy_index++) { + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == ELINK_EXT_PHY1) + actual_phy_idx = ELINK_EXT_PHY2; + else if (phy_index == ELINK_EXT_PHY2) + actual_phy_idx = ELINK_EXT_PHY1; + } + ELINK_DEBUG_P3(sc, "phy_config_swapped %x, phy_index %x," + " actual_phy_idx %x", phy_config_swapped, + phy_index, actual_phy_idx); + phy = ¶ms->phy[actual_phy_idx]; + if (elink_populate_phy(sc, phy_index, params->shmem_base, + params->shmem2_base, params->port, + phy) != ELINK_STATUS_OK) { + params->num_phys = 0; + ELINK_DEBUG_P1(sc, "phy probe failed in phy index %d", + phy_index); + for (phy_index = ELINK_INT_PHY; + phy_index < ELINK_MAX_PHYS; + phy_index++) + *phy = phy_null; + return ELINK_STATUS_ERROR; + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) + break; + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) + phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK; + + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_MT_SUPPORT)) + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_G; + + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(sc, sync_offset); + + /* Update media type for non-PMF sync only for the first time + * In case the media type changes afterwards, it will be updated + * using the update_status function + */ + if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx))) == 0) { + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx)); + } + REG_WR(sc, sync_offset, media_types); + + elink_phy_def_cfg(params, phy, phy_index); + params->num_phys++; + } + + ELINK_DEBUG_P1(sc, "End phy probe. #phys found %x", params->num_phys); + return ELINK_STATUS_OK; +} + +#ifdef ELINK_INCLUDE_EMUL +static elink_status_t elink_init_e3_emul_mac(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->line_speed = params->req_line_speed[0]; + /* In case link speed is auto, set speed the highest as possible */ + if (params->req_line_speed[0] == ELINK_SPEED_AUTO_NEG) { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) + vars->line_speed = ELINK_SPEED_2500; + else if (elink_is_4_port_mode(sc)) + vars->line_speed = ELINK_SPEED_10000; + else + vars->line_speed = ELINK_SPEED_20000; + } + if (vars->line_speed < ELINK_SPEED_10000) { + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC)) { + ELINK_DEBUG_P1(sc, "Invalid line speed %d while UMAC is" + " disabled!", params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + switch (vars->line_speed) { + case ELINK_SPEED_10: + vars->link_status = ELINK_LINK_10TFD; + break; + case ELINK_SPEED_100: + vars->link_status = ELINK_LINK_100TXFD; + break; + case ELINK_SPEED_1000: + vars->link_status = ELINK_LINK_1000TFD; + break; + case ELINK_SPEED_2500: + vars->link_status = ELINK_LINK_2500TFD; + break; + default: + ELINK_DEBUG_P1(sc, "Invalid line speed %d for UMAC", + vars->line_speed); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + + if (params->loopback_mode == ELINK_LOOPBACK_UMAC) + elink_umac_enable(params, vars, 1); + else + elink_umac_enable(params, vars, 0); + } else { + /* Link speed >= 10000 requires XMAC enabled */ + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) { + ELINK_DEBUG_P1(sc, "Invalid line speed %d while XMAC is" + " disabled!", params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + /* Check link speed */ + switch (vars->line_speed) { + case ELINK_SPEED_10000: + vars->link_status = ELINK_LINK_10GTFD; + break; + case ELINK_SPEED_20000: + vars->link_status = ELINK_LINK_20GTFD; + break; + default: + ELINK_DEBUG_P1(sc, "Invalid line speed %d for XMAC", + vars->line_speed); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + if (params->loopback_mode == ELINK_LOOPBACK_XMAC) + elink_xmac_enable(params, vars, 1); + else + elink_xmac_enable(params, vars, 0); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_init_emul(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + if (CHIP_IS_E3(sc)) { + if (elink_init_e3_emul_mac(params, vars) != + ELINK_STATUS_OK) + return ELINK_STATUS_ERROR; + } else { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC) { + vars->line_speed = ELINK_SPEED_1000; + vars->link_status = (LINK_STATUS_LINK_UP | + ELINK_LINK_1000XFD); + if (params->loopback_mode == + ELINK_LOOPBACK_EMAC) + elink_emac_enable(params, vars, 1); + else + elink_emac_enable(params, vars, 0); + } else { + vars->line_speed = ELINK_SPEED_10000; + vars->link_status = (LINK_STATUS_LINK_UP | + ELINK_LINK_10GTFD); + if (params->loopback_mode == + ELINK_LOOPBACK_BMAC) + elink_bmac_enable(params, vars, 1, 1); + else + elink_bmac_enable(params, vars, 0, 1); + } + } + vars->link_up = 1; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (CHIP_IS_E1x(sc)) + elink_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* update shared memory */ + elink_update_mng(params, vars->link_status); + return ELINK_STATUS_OK; +} +#endif +#ifdef ELINK_INCLUDE_FPGA +static elink_status_t elink_init_fpga(struct elink_params *params, + struct elink_vars *vars) +{ + /* Enable on E1.5 FPGA */ + struct bnx2x_softc *sc = params->sc; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + if (!(CHIP_IS_E1(sc))) { + vars->flow_ctrl = (ELINK_FLOW_CTRL_TX | + ELINK_FLOW_CTRL_RX); + vars->link_status |= (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | + LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + } + if (CHIP_IS_E3(sc)) { + vars->line_speed = params->req_line_speed[0]; + switch (vars->line_speed) { + case ELINK_SPEED_AUTO_NEG: + vars->line_speed = ELINK_SPEED_2500; + case ELINK_SPEED_2500: + vars->link_status = ELINK_LINK_2500TFD; + break; + case ELINK_SPEED_1000: + vars->link_status = ELINK_LINK_1000XFD; + break; + case ELINK_SPEED_100: + vars->link_status = ELINK_LINK_100TXFD; + break; + case ELINK_SPEED_10: + vars->link_status = ELINK_LINK_10TFD; + break; + default: + ELINK_DEBUG_P1(sc, "Invalid link speed %d", + params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + if (params->loopback_mode == ELINK_LOOPBACK_UMAC) + elink_umac_enable(params, vars, 1); + else + elink_umac_enable(params, vars, 0); + } else { + vars->line_speed = ELINK_SPEED_10000; + vars->link_status = (LINK_STATUS_LINK_UP | ELINK_LINK_10GTFD); + if (params->loopback_mode == ELINK_LOOPBACK_EMAC) + elink_emac_enable(params, vars, 1); + else + elink_emac_enable(params, vars, 0); + } + vars->link_up = 1; + + if (CHIP_IS_E1x(sc)) + elink_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* Update shared memory */ + elink_update_mng(params, vars->link_status); + return ELINK_STATUS_OK; +} +#endif +static void elink_init_bmac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_BMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + elink_xgxs_deassert(params); + + /* Set bmac loopback */ + elink_bmac_enable(params, vars, 1, 1); + + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_emac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_EMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + elink_xgxs_deassert(params); + /* Set bmac loopback */ + elink_emac_enable(params, vars, 1); + elink_emac_program(params, vars); + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_xmac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + if (!params->req_line_speed[0]) + vars->line_speed = ELINK_SPEED_10000; + else + vars->line_speed = params->req_line_speed[0]; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_XMAC; + vars->phy_flags = PHY_XGXS_FLAG; + /* Set WC to loopback mode since link is required to provide clock + * to the XMAC in 20G mode + */ + elink_set_aer_mmd(params, ¶ms->phy[0]); + elink_warpcore_reset_lane(sc, ¶ms->phy[0], 0); + params->phy[ELINK_INT_PHY].config_loopback( + ¶ms->phy[ELINK_INT_PHY], + params); + + elink_xmac_enable(params, vars, 1); + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_umac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_UMAC; + vars->phy_flags = PHY_XGXS_FLAG; + elink_umac_enable(params, vars, 1); + + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_xgxs_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *int_phy = ¶ms->phy[ELINK_INT_PHY]; + vars->link_up = 1; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->duplex = DUPLEX_FULL; + if (params->req_line_speed[0] == ELINK_SPEED_1000) + vars->line_speed = ELINK_SPEED_1000; + else if ((params->req_line_speed[0] == ELINK_SPEED_20000) || + (int_phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) + vars->line_speed = ELINK_SPEED_20000; + else + vars->line_speed = ELINK_SPEED_10000; + + if (!USES_WARPCORE(sc)) + elink_xgxs_deassert(params); + elink_link_initialize(params, vars); + + if (params->req_line_speed[0] == ELINK_SPEED_1000) { + if (USES_WARPCORE(sc)) + elink_umac_enable(params, vars, 0); + else { + elink_emac_program(params, vars); + elink_emac_enable(params, vars, 0); + } + } else { + if (USES_WARPCORE(sc)) + elink_xmac_enable(params, vars, 0); + else + elink_bmac_enable(params, vars, 0, 1); + } + + if (params->loopback_mode == ELINK_LOOPBACK_XGXS) { + /* Set 10G XGXS loopback */ + int_phy->config_loopback(int_phy, params); + } else { + /* Set external phy loopback */ + uint8_t phy_index; + for (phy_index = ELINK_EXT_PHY1; + phy_index < params->num_phys; phy_index++) + if (params->phy[phy_index].config_loopback) + params->phy[phy_index].config_loopback( + ¶ms->phy[phy_index], + params); + } + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed); +} + +void elink_set_rx_filter(struct elink_params *params, uint8_t en) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t val = en * 0x1F; + + /* Open / close the gate between the NIG and the BRB */ + if (!CHIP_IS_E1x(sc)) + val |= en * 0x20; + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + params->port * 4, val); + + if (!CHIP_IS_E1(sc)) { + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port * 4, + en * 0x3); + } + + REG_WR(sc, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} +static elink_status_t elink_avoid_link_flap(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t phy_idx; + uint32_t dont_clear_stat, lfa_sts; + struct bnx2x_softc *sc = params->sc; + + elink_set_mdio_emac_per_phy(sc, params); + /* Sync the link parameters */ + elink_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = ELINK_INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct elink_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + ELINK_DEBUG_P0(sc, "Calling PHY specific func"); + phy->phy_specific_func(phy, params, ELINK_PHY_INIT); + } + if ((phy->media_type == ELINK_ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ELINK_ETH_PHY_DA_TWINAX)) + elink_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, + lfa_sts)); + + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(sc)) { + if (!dont_clear_stat) { + REG_WR(sc, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(sc, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < ELINK_SPEED_10000) + elink_umac_enable(params, vars, 0); + else + elink_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < ELINK_SPEED_10000) + elink_emac_enable(params, vars, 0); + else + elink_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* Enable interrupts */ + elink_link_int_enable(params); + return ELINK_STATUS_OK; +} + +static void elink_cannot_avoid_link_flap(struct elink_params *params, + struct elink_vars *vars, + int lfa_status) +{ + uint32_t lfa_sts, cfg_idx, tmp_val; + struct bnx2x_softc *sc = params->sc; + + elink_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ +} + +elink_status_t elink_phy_init(struct elink_params *params, + struct elink_vars *vars) +{ + int lfa_status; + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "Phy Initialization started"); + ELINK_DEBUG_P2(sc, "(1) req_speed %d, req_flowctrl %d", + params->req_line_speed[0], params->req_flow_ctrl[0]); + ELINK_DEBUG_P2(sc, "(2) req_speed %d, req_flowctrl %d", + params->req_line_speed[1], params->req_flow_ctrl[1]); + ELINK_DEBUG_P1(sc, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv); + vars->link_status = 0; + vars->phy_link_up = 0; + vars->link_up = 0; + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_NONE; + vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = ELINK_PHY_INITIALIZED; + /* Driver opens NIG-BRB filters */ + elink_set_rx_filter(params, 1); + elink_chng_link_count(params, 1); + /* Check if link flap can be avoided */ + lfa_status = elink_check_lfa(params); + + ELINK_DEBUG_P3(sc, " params : port = %x, loopback_mode = %x req_duplex = %x", + params->port, params->loopback_mode, + params->req_duplex[0]); + ELINK_DEBUG_P3(sc, " params : switch_cfg = %x, lane_config = %x req_duplex[1] = %x", + params->switch_cfg, params->lane_config, + params->req_duplex[1]); + ELINK_DEBUG_P3(sc, " params : chip_id = %x, feature_config_flags = %x, num_phys = %x", + params->chip_id, params->feature_config_flags, + params->num_phys); + ELINK_DEBUG_P3(sc, " params : rsrv = %x, eee_mode = %x, hw_led_mode = %x", + params->rsrv, params->eee_mode, params->hw_led_mode); + ELINK_DEBUG_P3(sc, " params : multi_phy = %x, req_fc_auto_adv = %x, link_flags = %x", + params->multi_phy_config, params->req_fc_auto_adv, + params->link_flags); + ELINK_DEBUG_P2(sc, " params : lfa_base = %x, link_attr = %x", + params->lfa_base, params->link_attr_sync); + if (lfa_status == 0) { + ELINK_DEBUG_P0(sc, "Link Flap Avoidance in progress"); + return elink_avoid_link_flap(params, vars); + } + + ELINK_DEBUG_P1(sc, "Cannot avoid link flap lfa_sta=0x%x", + lfa_status); + elink_cannot_avoid_link_flap(params, vars, lfa_status); + + /* Disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); +#ifdef ELINK_INCLUDE_EMUL + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC)) +#endif + + elink_emac_init(params, vars); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + + if ((params->num_phys == 0) && + !CHIP_REV_IS_SLOW(sc)) { + ELINK_DEBUG_P0(sc, "No phy found for initialization !!"); + return ELINK_STATUS_ERROR; + } + set_phy_vars(params, vars); + + ELINK_DEBUG_P1(sc, "Num of phys on board: %d", params->num_phys); +#ifdef ELINK_INCLUDE_FPGA + if (CHIP_REV_IS_FPGA(sc)) { + return elink_init_fpga(params, vars); + } else +#endif +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) { + return elink_init_emul(params, vars); + } else +#endif + switch (params->loopback_mode) { + case ELINK_LOOPBACK_BMAC: + elink_init_bmac_loopback(params, vars); + break; + case ELINK_LOOPBACK_EMAC: + elink_init_emac_loopback(params, vars); + break; + case ELINK_LOOPBACK_XMAC: + elink_init_xmac_loopback(params, vars); + break; + case ELINK_LOOPBACK_UMAC: + elink_init_umac_loopback(params, vars); + break; + case ELINK_LOOPBACK_XGXS: + case ELINK_LOOPBACK_EXT_PHY: + elink_init_xgxs_loopback(params, vars); + break; + default: + if (!CHIP_IS_E3(sc)) { + if (params->switch_cfg == ELINK_SWITCH_CFG_10G) + elink_xgxs_deassert(params); + else + elink_serdes_deassert(sc, params->port); + } + elink_link_initialize(params, vars); + DELAY(1000 * 30); + elink_link_int_enable(params); + break; + } + elink_update_mng(params, vars->link_status); + + elink_update_mng_eee(params, vars->eee_status); + return ELINK_STATUS_OK; +} + +elink_status_t elink_link_reset(struct elink_params *params, + struct elink_vars *vars, + uint8_t reset_ext_phy) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t phy_index, port = params->port, clear_latch_ind = 0; + ELINK_DEBUG_P1(sc, "Resetting the link of port %d", port); + /* Disable attentions */ + vars->link_status = 0; + elink_chng_link_count(params, 1); + elink_update_mng(params, vars->link_status); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + elink_update_mng_eee(params, vars->eee_status); + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + /* Activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1); + + /* Disable nig egress interface */ + if (!CHIP_IS_E3(sc)) { + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0); + } + +#ifdef ELINK_INCLUDE_EMUL + /* Stop BigMac rx */ + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC)) +#endif + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, params->chip_id, port, 0); +#ifdef ELINK_INCLUDE_EMUL + /* Stop XMAC/UMAC rx */ + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC)) +#endif + if (CHIP_IS_E3(sc) && + !CHIP_REV_IS_FPGA(sc)) { + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + DELAY(1000 * 10); + /* The PHY reset is controlled by GPIO 1 + * Hold it as vars low + */ + /* Clear link led */ + elink_set_mdio_emac_per_phy(sc, params); + elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0); + + if (reset_ext_phy && (!CHIP_REV_IS_SLOW(sc))) { + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].link_reset) { + elink_set_aer_mmd(params, + ¶ms->phy[phy_index]); + params->phy[phy_index].link_reset( + ¶ms->phy[phy_index], + params); + } + if (params->phy[phy_index].flags & + ELINK_FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; + } + } + + if (clear_latch_ind) { + /* Clear latching indication */ + elink_rearm_latch_signal(sc, port, 0); + elink_bits_dis(sc, NIG_REG_LATCH_BC_0 + port * 4, + 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT); + } +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (!CHIP_REV_IS_SLOW(sc)) +#endif + if (params->phy[ELINK_INT_PHY].link_reset) + params->phy[ELINK_INT_PHY].link_reset( + ¶ms->phy[ELINK_INT_PHY], params); + + /* Disable nig ingress interface */ + if (!CHIP_IS_E3(sc)) { + /* Reset BigMac */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0); + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0); + } else { + uint32_t xmac_base = (params->port) ? GRCBASE_XMAC1 : + GRCBASE_XMAC0; + elink_set_xumac_nig(params, 0, 0); + if (REG_RD(sc, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) + REG_WR(sc, xmac_base + XMAC_REG_CTRL, + XMAC_CTRL_REG_SOFT_RESET); + } + vars->link_up = 0; + vars->phy_flags = 0; + return ELINK_STATUS_OK; +} +elink_status_t elink_lfa_reset(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 0; + vars->phy_flags = 0; + params->link_flags &= ~ELINK_PHY_INITIALIZED; + if (!params->lfa_base) + return elink_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(sc)) { + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up*/ + DELAY(1000 * 10); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + elink_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, params->chip_id, params->port, 1); + + if (CHIP_IS_E3(sc)) { + elink_set_xmac_rxtx(params, 1); + elink_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + return ELINK_STATUS_OK; +} + +/****************************************************************************/ +/* Common function */ +/****************************************************************************/ +static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + struct elink_phy phy[PORT_MAX]; + struct elink_phy *phy_blk[PORT_MAX]; + uint16_t val; + int8_t port = 0; + int8_t port_of_path = 0; + uint32_t swap_val, swap_override; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + elink_ext_phy_hw_reset(sc, port); + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + uint32_t shmem_base, shmem2_base; + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "populate_phy failed"); + return ELINK_STATUS_ERROR; + } + /* Disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + /* Need to take the phy out of low power mode in order + * to write to access its registers + */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + + /* Reset the phy */ + elink_cl45_write(sc, &phy[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1 << 15); + } + + /* Add delay of 150ms after reset */ + DELAY(1000 * 150); + + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(sc)) + port_of_path = port; + else + port_of_path = 0; + + ELINK_DEBUG_P1(sc, "Loading spirom for phy address 0x%x", + phy_blk[port]->addr); + if (elink_8073_8727_external_rom_boot(sc, phy_blk[port], + port_of_path)) + return ELINK_STATUS_ERROR; + + /* Only set bit 10 = 1 (Tx power down) */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + /* Phase1 of TX_POWER_DOWN reset */ + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val | 1 << 10)); + } + + /* Toggle Transmitter: Power down and then up with 600ms delay + * between + */ + DELAY(1000 * 600); + + /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + /* Phase2 of POWER_DOWN_RESET */ + /* Release bit 10 (Release Tx power down) */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val & (~(1 << 10)))); + DELAY(1000 * 15); + + /* Read modify write the SPI-ROM version select register */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1 << 12))); + + /* set GPIO2 back to LOW */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + } + return ELINK_STATUS_OK; +} +static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + uint32_t val; + int8_t port; + struct elink_phy phy; + /* Use port1 because of the static port-swap */ + /* Enable the module detection interrupt */ + val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN); + val |= ((1 << MISC_REGISTERS_GPIO_3) | + (1 << (MISC_REGISTERS_GPIO_3 + + MISC_REGISTERS_GPIO_PORT_SHIFT))); + REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val); + + elink_ext_phy_hw_reset(sc, 0); + DELAY(1000 * 5); + for (port = 0; port < PORT_MAX; port++) { + uint32_t shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + } + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port, &phy) != + ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "populate phy failed"); + return ELINK_STATUS_ERROR; + } + + /* Reset phy*/ + elink_cl45_write(sc, &phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); + + + /* Set fault module detected LED on */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, + port); + } + + return ELINK_STATUS_OK; +} +static void elink_get_ext_phy_reset_gpio(struct bnx2x_softc *sc, + uint32_t shmem_base, + uint8_t *io_gpio, uint8_t *io_port) +{ + + uint32_t phy_gpio_reset = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[PORT_0].default_cfg)); + switch (phy_gpio_reset) { + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0: + *io_gpio = 0; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0: + *io_gpio = 1; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0: + *io_gpio = 2; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0: + *io_gpio = 3; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1: + *io_gpio = 0; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1: + *io_gpio = 1; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1: + *io_gpio = 2; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1: + *io_gpio = 3; + *io_port = 1; + break; + default: + /* Don't override the io_gpio and io_port */ + break; + } +} + +static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + int8_t port, reset_gpio; + uint32_t swap_val, swap_override; + struct elink_phy phy[PORT_MAX]; + struct elink_phy *phy_blk[PORT_MAX]; + int8_t port_of_path; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + + reset_gpio = MISC_REGISTERS_GPIO_1; + port = 1; + + /* Retrieve the reset gpio/port which control the reset. + * Default is GPIO1, PORT1 + */ + elink_get_ext_phy_reset_gpio(sc, shmem_base_path[0], + (uint8_t *)&reset_gpio, (uint8_t *)&port); + + /* Calculate the port based on port swap */ + port ^= (swap_val && swap_override); + + /* Initiate PHY reset*/ + elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + DELAY(1000 * 1); + elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + + DELAY(1000 * 5); + + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + uint32_t shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "populate phy failed"); + return ELINK_STATUS_ERROR; + } + /* disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + + /* Reset the phy */ + elink_cl45_write(sc, &phy[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + } + + /* Add delay of 150ms after reset */ + DELAY(1000 * 150); + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(sc)) + port_of_path = port; + else + port_of_path = 0; + ELINK_DEBUG_P1(sc, "Loading spirom for phy address 0x%x", + phy_blk[port]->addr); + if (elink_8073_8727_external_rom_boot(sc, phy_blk[port], + port_of_path)) + return ELINK_STATUS_ERROR; + /* Disable PHY transmitter output */ + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, 1); + + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + __rte_unused uint32_t shmem2_base_path[], + __rte_unused uint8_t phy_index, + uint32_t chip_id) +{ + uint8_t reset_gpios; + reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, chip_id); + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_LOW); + DELAY(10); + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_HIGH); + ELINK_DEBUG_P1(sc, "84833 reset pulse on pin values 0x%x", + reset_gpios); + return ELINK_STATUS_OK; +} +static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + uint32_t ext_phy_type, uint32_t chip_id) +{ + elink_status_t rc = ELINK_STATUS_OK; + + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073: + rc = elink_8073_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC: + rc = elink_8727_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + /* GPIO1 affects both ports, so there's need to pull + * it for single port alone + */ + rc = elink_8726_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858: + /* GPIO3's are linked, and so both need to be toggled + * to obtain required 2us pulse. + */ + rc = elink_84833_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + rc = ELINK_STATUS_ERROR; + break; + default: + ELINK_DEBUG_P1(sc, + "ext_phy 0x%x common init not required", + ext_phy_type); + break; + } + + if (rc != ELINK_STATUS_OK) + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, 0); + /* "Warning: PHY was not initialized," + * " Port %d", + */ + + return rc; +} + +elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], uint32_t chip_id, + __rte_unused uint8_t one_port_enabled) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint32_t phy_ver, val; + uint8_t phy_index = 0; + uint32_t ext_phy_type, ext_phy_config; +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (CHIP_REV_IS_EMUL(sc) || CHIP_REV_IS_FPGA(sc)) + return ELINK_STATUS_OK; +#endif + + elink_set_mdio_clk(sc, chip_id, GRCBASE_EMAC0); + elink_set_mdio_clk(sc, chip_id, GRCBASE_EMAC1); + ELINK_DEBUG_P0(sc, "Begin common phy init"); + if (CHIP_IS_E3(sc)) { + /* Enable EPIO */ + val = REG_RD(sc, MISC_REG_GEN_PURP_HWG); + REG_WR(sc, MISC_REG_GEN_PURP_HWG, val | 1); + } + /* Check if common init was already done */ + phy_ver = REG_RD(sc, shmem_base_path[0] + + offsetof(struct shmem_region, + port_mb[PORT_0].ext_phy_fw_version)); + if (phy_ver) { + ELINK_DEBUG_P1(sc, "Not doing common init; phy ver is 0x%x", + phy_ver); + return ELINK_STATUS_OK; + } + + /* Read the ext_phy_type for arbitrary port(0) */ + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + ext_phy_config = elink_get_ext_phy_config(sc, + shmem_base_path[0], + phy_index, 0); + ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config); + rc |= elink_ext_phy_common_init(sc, shmem_base_path, + shmem2_base_path, + phy_index, ext_phy_type, + chip_id); + } + return rc; +} + +static void elink_check_over_curr(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port = params->port; + uint32_t pin_val; + + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & + PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> + PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; + + /* Ignore check if no external input PIN available */ + if (elink_get_cfg_pin(sc, cfg_pin, &pin_val) != ELINK_STATUS_OK) + return; + + if (!pin_val) { + if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { + elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, + params->port); + /* "Error: Power fault on Port %d has" + * " been detected and the power to " + * "that SFP+ module has been removed" + * " to prevent failure of the card." + * " Please remove the SFP+ module and" + * " restart the system to clear this" + * " error.", + */ + vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + elink_warpcore_power_module(params, 0); + } + } else + vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; +} + +/* Returns 0 if no change occurred since last check; 1 otherwise. */ +static uint8_t elink_analyze_link_error(struct elink_params *params, + struct elink_vars *vars, uint32_t status, + uint32_t phy_flag, uint32_t link_flag, + uint8_t notify) +{ + struct bnx2x_softc *sc = params->sc; + /* Compare new value with previous value */ + uint8_t led_mode; + uint32_t old_status = (vars->phy_flags & phy_flag) ? 1 : 0; + + if ((status ^ old_status) == 0) + return 0; + + /* If values differ */ + switch (phy_flag) { + case PHY_HALF_OPEN_CONN_FLAG: + ELINK_DEBUG_P0(sc, "Analyze Remote Fault"); + break; + case PHY_SFP_TX_FAULT_FLAG: + ELINK_DEBUG_P0(sc, "Analyze TX Fault"); + break; + default: + ELINK_DEBUG_P0(sc, "Analyze UNKNOWN"); + } + ELINK_DEBUG_P3(sc, "Link changed:[%x %x]->%x", vars->link_up, + old_status, status); + + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + + /* a. Update shmem->link_status accordingly + * b. Update elink_vars->link_up + */ + if (status) { + vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_status |= link_flag; + vars->link_up = 0; + vars->phy_flags |= phy_flag; + + /* activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1); + /* Set LED mode to off since the PHY doesn't know about these + * errors + */ + led_mode = ELINK_LED_MODE_OFF; + } else { + vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status &= ~link_flag; + vars->link_up = 1; + vars->phy_flags &= ~phy_flag; + led_mode = ELINK_LED_MODE_OPER; + + /* Clear nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + } + elink_sync_link(params, vars); + /* Update the LED according to the link state */ + elink_set_led(params, vars, led_mode, ELINK_SPEED_10000); + + /* Update link status in the shared memory */ + elink_update_mng(params, vars->link_status); + + /* C. Trigger General Attention */ + vars->periodic_flags |= ELINK_PERIODIC_FLAGS_LINK_EVENT; + if (notify) + elink_cb_notify_link_changed(sc); + + return 1; +} + +/****************************************************************************** + * Description: + * This function checks for half opened connection change indication. + * When such change occurs, it calls the elink_analyze_link_error + * to check if Remote Fault is set or cleared. Reception of remote fault + * status message in the MAC indicates that the peer's MAC has detected + * a fault, for example, due to break in the TX side of fiber. + * + ******************************************************************************/ +static +elink_status_t elink_check_half_open_conn(struct elink_params *params, + struct elink_vars *vars, + uint8_t notify) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t lss_status = 0; + uint32_t mac_base; + /* In case link status is physically up @ 10G do */ + if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || + (REG_RD(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4))) + return ELINK_STATUS_OK; + + if (CHIP_IS_E3(sc) && + (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_XMAC))) { + /* Check E3 XMAC */ + /* Note that link speed cannot be queried here, since it may be + * zero while link is down. In case UMAC is active, LSS will + * simply not be set + */ + mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Clear stick bits (Requires rising edge) */ + REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + if (REG_RD(sc, mac_base + XMAC_REG_RX_LSS_STATUS)) + lss_status = 1; + + elink_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } else if (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { + /* Check E1X / E2 BMAC */ + uint32_t lss_status_reg; + uint32_t wb_data[2]; + mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ + if (CHIP_IS_E2(sc)) + lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; + else + lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; + + REG_RD_DMAE(sc, mac_base + lss_status_reg, wb_data, 2); + lss_status = (wb_data[0] > 0); + + elink_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } + return ELINK_STATUS_OK; +} +static void elink_sfp_tx_fault_detection(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin, value = 0; + uint8_t led_change, port = params->port; + + /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ + cfg_pin = (REG_RD(sc, params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_TX_FAULT_MASK) >> + PORT_HW_CFG_E3_TX_FAULT_SHIFT; + + if (elink_get_cfg_pin(sc, cfg_pin, &value)) { + ELINK_DEBUG_P1(sc, "Failed to read pin 0x%02x", cfg_pin); + return; + } + + led_change = elink_analyze_link_error(params, vars, value, + PHY_SFP_TX_FAULT_FLAG, + LINK_STATUS_SFP_TX_FAULT, 1); + + if (led_change) { + /* Change TX_Fault led, set link status for further syncs */ + uint8_t led_mode; + + if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { + led_mode = MISC_REGISTERS_GPIO_HIGH; + vars->link_status |= LINK_STATUS_SFP_TX_FAULT; + } else { + led_mode = MISC_REGISTERS_GPIO_LOW; + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + } + + /* If module is unapproved, led should be on regardless */ + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) { + ELINK_DEBUG_P1(sc, "Change TX_Fault LED: ->%x", + led_mode); + elink_set_e3_module_fault_led(params, led_mode); + } + } +} +static void elink_kr2_recovery(struct elink_params *params, + struct elink_vars *vars, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + ELINK_DEBUG_P0(sc, "KR2 recovery"); + elink_warpcore_enable_AN_KR2(phy, params, vars); + elink_warpcore_restart_AN_KR(phy, params); +} + +static void elink_check_kr2_wa(struct elink_params *params, + struct elink_vars *vars, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t base_page, next_page, not_kr2_device, lane; + int sigdet; + + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * Since some switches tend to reinit the AN process and clear the + * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } + + sigdet = elink_warpcore_get_sigdet(phy, params); + if (!sigdet) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + elink_kr2_recovery(params, vars, phy); + ELINK_DEBUG_P0(sc, "No sigdet"); + } + return; + } + + lane = elink_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &base_page); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &next_page); + elink_set_aer_mmd(params, phy); + + /* CL73 has not begun yet */ + if (base_page == 0) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + elink_kr2_recovery(params, vars, phy); + ELINK_DEBUG_P0(sc, "No BP"); + } + return; + } + + /* In case NP bit is not set in the BasePage, or it is set, + * but only KX is advertised, declare this link partner as non-KR2 + * device. + */ + not_kr2_device = (((base_page & 0x8000) == 0) || + (((base_page & 0x8000) && + ((next_page & 0xe0) == 0x20)))); + + /* In case KR2 is already disabled, check if we need to re-enable it */ + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!not_kr2_device) { + ELINK_DEBUG_P2(sc, "BP=0x%x, NP=0x%x", base_page, + next_page); + elink_kr2_recovery(params, vars, phy); + } + return; + } + /* KR2 is enabled, but not KR2 device */ + if (not_kr2_device) { + /* Disable KR2 on both lanes */ + ELINK_DEBUG_P2(sc, "BP=0x%x, NP=0x%x", base_page, next_page); + elink_disable_kr2(params, vars, phy); + /* Restart AN on leading lane */ + elink_warpcore_restart_AN_KR(phy, params); + return; + } +} + +void elink_period_func(struct elink_params *params, struct elink_vars *vars) +{ + uint16_t phy_idx; + struct bnx2x_softc *sc = params->sc; + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) { + elink_set_aer_mmd(params, ¶ms->phy[phy_idx]); + if (elink_check_half_open_conn(params, vars, 1) != + ELINK_STATUS_OK) + ELINK_DEBUG_P0(sc, "Fault detection failed"); + break; + } + } + + if (CHIP_IS_E3(sc)) { + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + elink_set_aer_mmd(params, phy); + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == ELINK_SPEED_20000)) + elink_check_kr2_wa(params, vars, phy); + elink_check_over_curr(params, vars); + if (vars->rx_tx_asic_rst) + elink_warpcore_config_runtime(phy, params, vars); + + if ((REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) + & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SFI) { + if (elink_is_sfp_module_plugged(phy, params)) { + elink_sfp_tx_fault_detection(phy, params, vars); + } else if (vars->link_status & + LINK_STATUS_SFP_TX_FAULT) { + /* Clean trail, interrupt corrects the leds */ + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; + /* Update link status in the shared memory */ + elink_update_mng(params, vars->link_status); + } + } + } +} + +uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, + uint32_t shmem_base, + uint32_t shmem2_base, + uint8_t port) +{ + uint8_t phy_index, fan_failure_det_req = 0; + struct elink_phy phy; + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port, &phy) + != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "populate phy failed"); + return 0; + } + fan_failure_det_req |= (phy.flags & + ELINK_FLAGS_FAN_FAILURE_DET_REQ); + } + return fan_failure_det_req; +} + +void elink_hw_reset_phy(struct elink_params *params) +{ + uint8_t phy_index; + struct bnx2x_softc *sc = params->sc; + elink_update_mng(params, 0); + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; + phy_index++) { + if (params->phy[phy_index].hw_reset) { + params->phy[phy_index].hw_reset( + ¶ms->phy[phy_index], + params); + params->phy[phy_index] = phy_null; + } + } +} + +void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars, + uint32_t chip_id, uint32_t shmem_base, + uint32_t shmem2_base, + uint8_t port) +{ + uint8_t gpio_num = 0xff, gpio_port = 0xff, phy_index; + uint32_t val; + uint32_t offset, aeu_mask, swap_val, swap_override, sync_offset; + if (CHIP_IS_E3(sc)) { + if (elink_get_mod_abs_int_cfg(sc, chip_id, + shmem_base, + port, + &gpio_num, + &gpio_port) != ELINK_STATUS_OK) + return; + } else { + struct elink_phy phy; + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + if (elink_populate_phy(sc, phy_index, shmem_base, + shmem2_base, port, &phy) + != ELINK_STATUS_OK) { + ELINK_DEBUG_P0(sc, "populate phy failed"); + return; + } + if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726) { + gpio_num = MISC_REGISTERS_GPIO_3; + gpio_port = port; + break; + } + } + } + + if (gpio_num == 0xff) + return; + + /* Set GPIO3 to trigger SFP+ module insertion/removal */ + elink_cb_gpio_write(sc, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, + gpio_port); + + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + gpio_port ^= (swap_val && swap_override); + + vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << + (gpio_num + (gpio_port << 2)); + + sync_offset = shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + REG_WR(sc, sync_offset, vars->aeu_int_mask); + + ELINK_DEBUG_P3(sc, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x", + gpio_num, gpio_port, vars->aeu_int_mask); + + if (port == 0) + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + else + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + + /* Open appropriate AEU for interrupts */ + aeu_mask = REG_RD(sc, offset); + aeu_mask |= vars->aeu_int_mask; + REG_WR(sc, offset, aeu_mask); + + /* Enable the GPIO to trigger interrupt */ + val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN); + val |= 1 << (gpio_num + (gpio_port << 2)); + REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val); +} diff --git a/src/spdk/dpdk/drivers/net/bnx2x/elink.h b/src/spdk/dpdk/drivers/net/bnx2x/elink.h new file mode 100644 index 000000000..dd70ac6c6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/elink.h @@ -0,0 +1,700 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis + * David Christensen + * Gary Zambrano + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015-2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef ELINK_H +#define ELINK_H + +#include "bnx2x_logs.h" + + + + + + +/***********************************************************/ +/* CLC Call backs functions */ +/***********************************************************/ +/* CLC device structure */ +struct bnx2x_softc; + +extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr); +extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val); +/* wb_write - pointer to 2 32 bits vars to be passed to the DMAE*/ +extern void elink_cb_reg_wb_write(struct bnx2x_softc *sc, uint32_t offset, + uint32_t *wb_write, uint16_t len); +extern void elink_cb_reg_wb_read(struct bnx2x_softc *sc, uint32_t offset, + uint32_t *wb_write, uint16_t len); + +/* mode - 0( LOW ) /1(HIGH)*/ +extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc, + uint16_t gpio_num, + uint8_t mode, uint8_t port); +extern uint8_t elink_cb_gpio_mult_write(struct bnx2x_softc *sc, + uint8_t pins, + uint8_t mode); + +extern uint32_t elink_cb_gpio_read(struct bnx2x_softc *sc, uint16_t gpio_num, uint8_t port); +extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc, + uint16_t gpio_num, + uint8_t mode, uint8_t port); + +extern uint32_t elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param); + +/* Delay */ +extern void elink_cb_udelay(struct bnx2x_softc *sc, uint32_t microsecond); + +/* This function is called every 1024 bytes downloading of phy firmware. +Driver can use it to print to screen indication for download progress */ +extern void elink_cb_download_progress(struct bnx2x_softc *sc, uint32_t cur, uint32_t total); + +/* Each log type has its own parameters */ +typedef enum elink_log_id { + ELINK_LOG_ID_UNQUAL_IO_MODULE = 0, /* uint8_t port, const char* vendor_name, const char* vendor_pn */ + ELINK_LOG_ID_OVER_CURRENT = 1, /* uint8_t port */ + ELINK_LOG_ID_PHY_UNINITIALIZED = 2, /* uint8_t port */ + ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT= 3, /* No params */ + ELINK_LOG_ID_NON_10G_MODULE = 4, /* uint8_t port */ +}elink_log_id_t; + +typedef enum elink_status { + ELINK_STATUS_OK = 0, + ELINK_STATUS_ERROR, + ELINK_STATUS_TIMEOUT, + ELINK_STATUS_NO_LINK, + ELINK_STATUS_INVALID_IMAGE, + ELINK_OP_NOT_SUPPORTED = 122 +} elink_status_t; +extern void elink_cb_event_log(struct bnx2x_softc *sc, const elink_log_id_t log_id, ...); +extern void elink_cb_load_warpcore_microcode(void); + +extern uint8_t elink_cb_path_id(struct bnx2x_softc *sc); + +extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc); + +#define ELINK_EVENT_LOG_LEVEL_ERROR 1 +#define ELINK_EVENT_LOG_LEVEL_WARNING 2 +#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1 +#define ELINK_EVENT_ID_SFP_POWER_FAULT 2 + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) +/* Debug prints */ +#ifdef ELINK_DEBUG + +extern void elink_cb_dbg(struct bnx2x_softc *sc, const char *fmt); +extern void elink_cb_dbg1(struct bnx2x_softc *sc, const char *fmt, + uint32_t arg1); +extern void elink_cb_dbg2(struct bnx2x_softc *sc, const char *fmt, + uint32_t arg1, uint32_t arg2); +extern void elink_cb_dbg3(struct bnx2x_softc *sc, const char *fmt, + uint32_t arg1, uint32_t arg2, + uint32_t arg3); + +#define ELINK_DEBUG_P0(sc, fmt) elink_cb_dbg(sc, fmt) +#define ELINK_DEBUG_P1(sc, fmt, arg1) elink_cb_dbg1(sc, fmt, arg1) +#define ELINK_DEBUG_P2(sc, fmt, arg1, arg2) \ + elink_cb_dbg2(sc, fmt, arg1, arg2) +#define ELINK_DEBUG_P3(sc, fmt, arg1, arg2, arg3) \ + elink_cb_dbg3(sc, fmt, arg1, arg2, arg3) +#else +#define ELINK_DEBUG_P0(sc, fmt) PMD_DRV_LOG(DEBUG, sc, fmt) +#define ELINK_DEBUG_P1(sc, fmt, arg1) \ + PMD_DRV_LOG(DEBUG, sc, fmt, arg1) +#define ELINK_DEBUG_P2(sc, fmt, arg1, arg2) \ + PMD_DRV_LOG(DEBUG, sc, fmt, arg1, arg2) +#define ELINK_DEBUG_P3(sc, fmt, arg1, arg2, arg3) \ + PMD_DRV_LOG(DEBUG, sc, fmt, arg1, arg2, arg3) +#endif + +/***********************************************************/ +/* Defines */ +/***********************************************************/ +#define ELINK_DEFAULT_PHY_DEV_ADDR 3 +#define ELINK_E2_DEFAULT_PHY_DEV_ADDR 5 + + +#define DUPLEX_FULL 1 +#define DUPLEX_HALF 2 + +#define ELINK_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO +#define ELINK_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX +#define ELINK_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX +#define ELINK_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH +#define ELINK_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE + +#define ELINK_NET_SERDES_IF_XFI 1 +#define ELINK_NET_SERDES_IF_SFI 2 +#define ELINK_NET_SERDES_IF_KR 3 +#define ELINK_NET_SERDES_IF_DXGXS 4 + +#define ELINK_SPEED_AUTO_NEG 0 +#define ELINK_SPEED_10 10 +#define ELINK_SPEED_100 100 +#define ELINK_SPEED_1000 1000 +#define ELINK_SPEED_2500 2500 +#define ELINK_SPEED_10000 10000 +#define ELINK_SPEED_20000 20000 + +#define ELINK_I2C_DEV_ADDR_A0 0xa0 +#define ELINK_I2C_DEV_ADDR_A2 0xa2 + +#define ELINK_SFP_EEPROM_PAGE_SIZE 16 +#define ELINK_SFP_EEPROM_VENDOR_NAME_ADDR 0x14 +#define ELINK_SFP_EEPROM_VENDOR_NAME_SIZE 16 +#define ELINK_SFP_EEPROM_VENDOR_OUI_ADDR 0x25 +#define ELINK_SFP_EEPROM_VENDOR_OUI_SIZE 3 +#define ELINK_SFP_EEPROM_PART_NO_ADDR 0x28 +#define ELINK_SFP_EEPROM_PART_NO_SIZE 16 +#define ELINK_SFP_EEPROM_REVISION_ADDR 0x38 +#define ELINK_SFP_EEPROM_REVISION_SIZE 4 +#define ELINK_SFP_EEPROM_SERIAL_ADDR 0x44 +#define ELINK_SFP_EEPROM_SERIAL_SIZE 16 +#define ELINK_SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ +#define ELINK_SFP_EEPROM_DATE_SIZE 6 +#define ELINK_SFP_EEPROM_DIAG_TYPE_ADDR 0x5c +#define ELINK_SFP_EEPROM_DIAG_TYPE_SIZE 1 +#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1 << 2) +#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1 +#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_ADDR 0x60 +#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_SIZE 16 + + +#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e +#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f + +#define ELINK_PWR_FLT_ERR_MSG_LEN 250 + +#define ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) +#define ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config) \ + (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \ + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT) +#define ELINK_SERDES_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) + +/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */ +#define ELINK_SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1) +/* Single Media board contains single external phy */ +#define ELINK_SINGLE_MEDIA(params) (params->num_phys == 2) +/* Dual Media board contains two external phy with different media */ +#define ELINK_DUAL_MEDIA(params) (params->num_phys == 3) + +#define ELINK_FW_PARAM_PHY_ADDR_MASK 0x000000FF +#define ELINK_FW_PARAM_PHY_TYPE_MASK 0x0000FF00 +#define ELINK_FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 +#define ELINK_FW_PARAM_MDIO_CTRL_OFFSET 16 +#define ELINK_FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ + ELINK_FW_PARAM_PHY_ADDR_MASK) +#define ELINK_FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ + ELINK_FW_PARAM_PHY_TYPE_MASK) +#define ELINK_FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ + ELINK_FW_PARAM_MDIO_CTRL_MASK) >> \ + ELINK_FW_PARAM_MDIO_CTRL_OFFSET) +#define ELINK_FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ + (phy_addr | phy_type | mdio_access << ELINK_FW_PARAM_MDIO_CTRL_OFFSET) + + +#define ELINK_PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 +#define ELINK_PFC_BRB_FULL_LB_XON_THRESHOLD 250 + +#define ELINK_MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) + +#define ELINK_BMAC_CONTROL_RX_ENABLE 2 +/***********************************************************/ +/* Structs */ +/***********************************************************/ +#define ELINK_INT_PHY 0 +#define ELINK_EXT_PHY1 1 +#define ELINK_EXT_PHY2 2 +#define ELINK_MAX_PHYS 3 + +/* Same configuration is shared between the XGXS and the first external phy */ +#define ELINK_LINK_CONFIG_SIZE (ELINK_MAX_PHYS - 1) +#define ELINK_LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == ELINK_INT_PHY) ? \ + 0 : (_phy_idx - 1)) +/***********************************************************/ +/* elink_phy struct */ +/* Defines the required arguments and function per phy */ +/***********************************************************/ +struct elink_vars; +struct elink_params; +struct elink_phy; + +typedef uint8_t (*config_init_t)(struct elink_phy *phy, struct elink_params *params, + struct elink_vars *vars); +typedef uint8_t (*read_status_t)(struct elink_phy *phy, struct elink_params *params, + struct elink_vars *vars); +typedef void (*link_reset_t)(struct elink_phy *phy, + struct elink_params *params); +typedef void (*config_loopback_t)(struct elink_phy *phy, + struct elink_params *params); +typedef elink_status_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len); +typedef void (*hw_reset_t)(struct elink_phy *phy, struct elink_params *params); +typedef void (*set_link_led_t)(struct elink_phy *phy, + struct elink_params *params, uint8_t mode); +typedef void (*phy_specific_func_t)(struct elink_phy *phy, + struct elink_params *params, uint32_t action); +struct elink_reg_set { + uint8_t devad; + uint16_t reg; + uint16_t val; +}; + +struct elink_phy { + uint32_t type; + + /* Loaded during init */ + uint8_t addr; + uint8_t def_md_devad; + uint16_t flags; + /* No Over-Current detection */ +#define ELINK_FLAGS_NOC (1 << 1) + /* Fan failure detection required */ +#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1 << 2) + /* Initialize first the XGXS and only then the phy itself */ +#define ELINK_FLAGS_INIT_XGXS_FIRST (1 << 3) +#define ELINK_FLAGS_WC_DUAL_MODE (1 << 4) +#define ELINK_FLAGS_4_PORT_MODE (1 << 5) +#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1 << 6) +#define ELINK_FLAGS_SFP_NOT_APPROVED (1 << 7) +#define ELINK_FLAGS_MDC_MDIO_WA (1 << 8) +#define ELINK_FLAGS_DUMMY_READ (1 << 9) +#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1 << 10) +#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1 << 11) +#define ELINK_FLAGS_TX_ERROR_CHECK (1 << 12) +#define ELINK_FLAGS_EEE (1 << 13) +#define ELINK_FLAGS_TEMPERATURE (1 << 14) +#define ELINK_FLAGS_MDC_MDIO_WA_G (1 << 15) + + /* preemphasis values for the rx side */ + uint16_t rx_preemphasis[4]; + + /* preemphasis values for the tx side */ + uint16_t tx_preemphasis[4]; + + /* EMAC address for access MDIO */ + uint32_t mdio_ctrl; + + uint32_t supported; +#define ELINK_SUPPORTED_10baseT_Half (1 << 0) +#define ELINK_SUPPORTED_10baseT_Full (1 << 1) +#define ELINK_SUPPORTED_100baseT_Half (1 << 2) +#define ELINK_SUPPORTED_100baseT_Full (1 << 3) +#define ELINK_SUPPORTED_1000baseT_Full (1 << 4) +#define ELINK_SUPPORTED_2500baseX_Full (1 << 5) +#define ELINK_SUPPORTED_10000baseT_Full (1 << 6) +#define ELINK_SUPPORTED_TP (1 << 7) +#define ELINK_SUPPORTED_FIBRE (1 << 8) +#define ELINK_SUPPORTED_Autoneg (1 << 9) +#define ELINK_SUPPORTED_Pause (1 << 10) +#define ELINK_SUPPORTED_Asym_Pause (1 << 11) +#define ELINK_SUPPORTED_1000baseKX_Full (1 << 17) +#define ELINK_SUPPORTED_10000baseKR_Full (1 << 19) +#define ELINK_SUPPORTED_20000baseMLD2_Full (1 << 21) +#define ELINK_SUPPORTED_20000baseKR2_Full (1 << 22) + + uint32_t media_type; +#define ELINK_ETH_PHY_UNSPECIFIED 0x0 +#define ELINK_ETH_PHY_SFPP_10G_FIBER 0x1 +#define ELINK_ETH_PHY_XFP_FIBER 0x2 +#define ELINK_ETH_PHY_DA_TWINAX 0x3 +#define ELINK_ETH_PHY_BASE_T 0x4 +#define ELINK_ETH_PHY_SFP_1G_FIBER 0x5 +#define ELINK_ETH_PHY_KR 0xf0 +#define ELINK_ETH_PHY_CX4 0xf1 +#define ELINK_ETH_PHY_NOT_PRESENT 0xff + + /* The address in which version is located*/ + uint32_t ver_addr; + + uint16_t req_flow_ctrl; + + uint16_t req_line_speed; + + uint32_t speed_cap_mask; + + uint16_t req_duplex; + uint16_t rsrv; + /* Called per phy/port init, and it configures LASI, speed, autoneg, + duplex, flow control negotiation, etc. */ + config_init_t config_init; + + /* Called due to interrupt. It determines the link, speed */ + read_status_t read_status; + + /* Called when driver is unloading. Should reset the phy */ + link_reset_t link_reset; + + /* Set the loopback configuration for the phy */ + config_loopback_t config_loopback; + + /* Format the given raw number into str up to len */ + format_fw_ver_t format_fw_ver; + + /* Reset the phy (both ports) */ + hw_reset_t hw_reset; + + /* Set link led mode (on/off/oper)*/ + set_link_led_t set_link_led; + + /* PHY Specific tasks */ + phy_specific_func_t phy_specific_func; +#define ELINK_DISABLE_TX 1 +#define ELINK_ENABLE_TX 2 +#define ELINK_PHY_INIT 3 +}; + +/* Inputs parameters to the CLC */ +struct elink_params { + + uint8_t port; + + /* Default / User Configuration */ + uint8_t loopback_mode; +#define ELINK_LOOPBACK_NONE 0 +#define ELINK_LOOPBACK_EMAC 1 +#define ELINK_LOOPBACK_BMAC 2 +#define ELINK_LOOPBACK_XGXS 3 +#define ELINK_LOOPBACK_EXT_PHY 4 +#define ELINK_LOOPBACK_EXT 5 +#define ELINK_LOOPBACK_UMAC 6 +#define ELINK_LOOPBACK_XMAC 7 + + /* Device parameters */ + uint8_t mac_addr[6]; + + uint16_t req_duplex[ELINK_LINK_CONFIG_SIZE]; + uint16_t req_flow_ctrl[ELINK_LINK_CONFIG_SIZE]; + + uint16_t req_line_speed[ELINK_LINK_CONFIG_SIZE]; /* Also determine AutoNeg */ + + /* shmem parameters */ + uint32_t shmem_base; + uint32_t shmem2_base; + uint32_t speed_cap_mask[ELINK_LINK_CONFIG_SIZE]; + uint32_t switch_cfg; +#define ELINK_SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH +#define ELINK_SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH +#define ELINK_SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT + + uint32_t lane_config; + + /* Phy register parameter */ + uint32_t chip_id; + + /* features */ + uint32_t feature_config_flags; +#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1 << 0) +#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1 << 1) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1 << 2) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1 << 3) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC (1 << 4) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC (1 << 5) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC (1 << 6) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC (1 << 7) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1 << 8) +#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1 << 9) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1 << 10) +#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1 << 11) +#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1 << 12) +#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1 << 13) +#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1 << 14) +#define ELINK_FEATURE_CONFIG_DISABLE_PD (1 << 15) + + /* Will be populated during common init */ + struct elink_phy phy[ELINK_MAX_PHYS]; + + /* Will be populated during common init */ + uint8_t num_phys; + + uint8_t rsrv; + + /* Used to configure the EEE Tx LPI timer, has several modes of + * operation, according to bits 29:28 - + * 2'b00: Timer will be configured by nvram, output will be the value + * from nvram. + * 2'b01: Timer will be configured by nvram, output will be in + * microseconds. + * 2'b10: bits 1:0 contain an nvram value which will be used instead + * of the one located in the nvram. Output will be that value. + * 2'b11: bits 19:0 contain the idle timer in microseconds; output + * will be in microseconds. + * Bits 31:30 should be 2'b11 in order for EEE to be enabled. + */ + uint32_t eee_mode; +#define ELINK_EEE_MODE_NVRAM_BALANCED_TIME (0xa00) +#define ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100) +#define ELINK_EEE_MODE_NVRAM_LATENCY_TIME (0x6000) +#define ELINK_EEE_MODE_NVRAM_MASK (0x3) +#define ELINK_EEE_MODE_TIMER_MASK (0xfffff) +#define ELINK_EEE_MODE_OUTPUT_TIME (1 << 28) +#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1 << 29) +#define ELINK_EEE_MODE_ENABLE_LPI (1 << 30) +#define ELINK_EEE_MODE_ADV_LPI (1U << 31) + + uint16_t hw_led_mode; /* part of the hw_config read from the shmem */ + uint32_t multi_phy_config; + + /* Device pointer passed to all callback functions */ + struct bnx2x_softc *sc; + uint16_t req_fc_auto_adv; /* Should be set to TX / BOTH when + req_flow_ctrl is set to AUTO */ + uint16_t link_flags; +#define ELINK_LINK_FLAGS_INT_DISABLED (1 << 0) +#define ELINK_PHY_INITIALIZED (1 << 1) + uint32_t lfa_base; + + /* The same definitions as the shmem2 parameter */ + uint32_t link_attr_sync; +}; + +/* Output parameters */ +struct elink_vars { + uint8_t phy_flags; +#define PHY_XGXS_FLAG (1 << 0) +#define PHY_SGMII_FLAG (1 << 1) +#define PHY_PHYSICAL_LINK_FLAG (1 << 2) +#define PHY_HALF_OPEN_CONN_FLAG (1 << 3) +#define PHY_OVER_CURRENT_FLAG (1 << 4) +#define PHY_SFP_TX_FAULT_FLAG (1 << 5) + + uint8_t mac_type; +#define ELINK_MAC_TYPE_NONE 0 +#define ELINK_MAC_TYPE_EMAC 1 +#define ELINK_MAC_TYPE_BMAC 2 +#define ELINK_MAC_TYPE_UMAC 3 +#define ELINK_MAC_TYPE_XMAC 4 + + uint8_t phy_link_up; /* internal phy link indication */ + uint8_t link_up; + + uint16_t line_speed; + uint16_t duplex; + + uint16_t flow_ctrl; + uint16_t ieee_fc; + + /* The same definitions as the shmem parameter */ + uint32_t link_status; + uint32_t eee_status; + uint8_t fault_detected; + uint8_t check_kr2_recovery_cnt; +#define ELINK_CHECK_KR2_RECOVERY_CNT 5 + uint16_t periodic_flags; +#define ELINK_PERIODIC_FLAGS_LINK_EVENT 0x0001 + + uint32_t aeu_int_mask; + uint8_t rx_tx_asic_rst; + uint8_t turn_to_run_wc_rt; + uint16_t rsrv2; + +}; + +/***********************************************************/ +/* Functions */ +/***********************************************************/ +elink_status_t elink_phy_init(struct elink_params *params, struct elink_vars *vars); + +/* Reset the link. Should be called when driver or interface goes down + Before calling phy firmware upgrade, the reset_ext_phy should be set + to 0 */ +elink_status_t elink_link_reset(struct elink_params *params, + struct elink_vars *vars, + uint8_t reset_ext_phy); +elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *vars); +/* elink_link_update should be called upon link interrupt */ +elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars); + +/* use the following phy functions to read/write from external_phy + * In order to use it to read/write internal phy registers, use + * ELINK_DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as + * the register + */ +elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr, + uint8_t devad, uint16_t reg, uint16_t *ret_val); + +elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr, + uint8_t devad, uint16_t reg, uint16_t val); + +/* Reads the link_status from the shmem, + and update the link vars accordingly */ +void elink_link_status_update(struct elink_params *input, + struct elink_vars *output); +/* returns string representing the fw_version of the external phy */ +elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params, + uint8_t *version, + uint16_t len); + +/* Set/Unset the led + Basically, the CLC takes care of the led for the link, but in case one needs + to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to + blink the led, and ELINK_LED_MODE_OFF to set the led off.*/ +elink_status_t elink_set_led(struct elink_params *params, + struct elink_vars *vars, uint8_t mode, uint32_t speed); +#define ELINK_LED_MODE_OFF 0 +#define ELINK_LED_MODE_ON 1 +#define ELINK_LED_MODE_OPER 2 +#define ELINK_LED_MODE_FRONT_PANEL_OFF 3 + +/* elink_handle_module_detect_int should be called upon module detection + * interrupt + */ +void elink_handle_module_detect_int(struct elink_params *params); + +/* Get the actual link status. In case it returns ELINK_STATUS_OK, link is up, + * otherwise link is down + */ +elink_status_t elink_test_link(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_serdes); + + +/* One-time initialization for external phy after power up */ +elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], uint32_t chip_id, + uint8_t one_port_enabled); + +/* Reset the external PHY using GPIO */ +void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port); + +/* Reset the external of SFX7101 */ +void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy); + +/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ +elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params *params, uint8_t dev_addr, + uint16_t addr, uint16_t byte_cnt, + uint8_t *o_buf); + +void elink_hw_reset_phy(struct elink_params *params); + +/* Check swap bit and adjust PHY order */ +uint32_t elink_phy_selection(struct elink_params *params); + +/* Probe the phys on board, and populate them in "params" */ +elink_status_t elink_phy_probe(struct elink_params *params); + +/* Checks if fan failure detection is required on one of the phys on board */ +uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port); + +/* Open / close the gate between the NIG and the BRB */ +void elink_set_rx_filter(struct elink_params *params, uint8_t en); + +/* DCBX structs */ + +/* Number of maximum COS per chip */ +#define ELINK_DCBX_E2E3_MAX_NUM_COS (2) +#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0 (6) +#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 (3) +#define ELINK_DCBX_E3B0_MAX_NUM_COS ( \ + ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0, \ + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1)) + +#define ELINK_DCBX_MAX_NUM_COS ( \ + ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS, \ + ELINK_DCBX_E2E3_MAX_NUM_COS)) + +/* PFC port configuration params */ +struct elink_nig_brb_pfc_port_params { + /* NIG */ + uint32_t pause_enable; + uint32_t llfc_out_en; + uint32_t llfc_enable; + uint32_t pkt_priority_to_cos; + uint8_t num_of_rx_cos_priority_mask; + uint32_t rx_cos_priority_mask[ELINK_DCBX_MAX_NUM_COS]; + uint32_t llfc_high_priority_classes; + uint32_t llfc_low_priority_classes; +}; + + +/* ETS port configuration params */ +struct elink_ets_bw_params { + uint8_t bw; +}; + +struct elink_ets_sp_params { + /** + * valid values are 0 - 5. 0 is highest strict priority. + * There can't be two COS's with the same pri. + */ + uint8_t pri; +}; + +enum elink_cos_state { + elink_cos_state_strict = 0, + elink_cos_state_bw = 1, +}; + +struct elink_ets_cos_params { + enum elink_cos_state state ; + union { + struct elink_ets_bw_params bw_params; + struct elink_ets_sp_params sp_params; + } params; +}; + +struct elink_ets_params { + uint8_t num_of_cos; /* Number of valid COS entries*/ + struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS]; +}; + +/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB + * when link is already up + */ +elink_status_t elink_update_pfc(struct elink_params *params, + struct elink_vars *vars, + struct elink_nig_brb_pfc_port_params *pfc_params); + + +/* Used to configure the ETS to disable */ +elink_status_t elink_ets_disabled(struct elink_params *params, + struct elink_vars *vars); + +/* Used to configure the ETS to BW limited */ +void elink_ets_bw_limit(const struct elink_params *params, + const uint32_t cos0_bw, + const uint32_t cos1_bw); + +/* Used to configure the ETS to strict */ +elink_status_t elink_ets_strict(const struct elink_params *params, + const uint8_t strict_cos); + + +/* Configure the COS to ETS according to BW and SP settings.*/ +elink_status_t elink_ets_e3b0_config(const struct elink_params *params, + const struct elink_vars *vars, + struct elink_ets_params *ets_params); +/* Read pfc statistic*/ +void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars, + uint32_t pfc_frames_sent[2], + uint32_t pfc_frames_received[2]); +void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars, + uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base, + uint8_t port); +/* elink_status_t elink_sfp_module_detection(struct elink_phy *phy, + * struct elink_params *params); + */ + +void elink_period_func(struct elink_params *params, struct elink_vars *vars); + +/*elink_status_t elink_check_half_open_conn(struct elink_params *params, + * struct elink_vars *vars, uint8_t notify); + */ + +void elink_enable_pmd_tx(struct elink_params *params); + + + +#endif /* ELINK_H */ diff --git a/src/spdk/dpdk/drivers/net/bnx2x/meson.build b/src/spdk/dpdk/drivers/net/bnx2x/meson.build new file mode 100644 index 000000000..4892bb234 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +dep = dependency('zlib', required: false) +build = dep.found() +reason = 'missing dependency, "zlib"' +ext_deps += dep +cflags += '-DZLIB_CONST' +sources = files('bnx2x.c', + 'bnx2x_ethdev.c', + 'bnx2x_rxtx.c', + 'bnx2x_stats.c', + 'bnx2x_vfpf.c', + 'ecore_sp.c', + 'elink.c') diff --git a/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map b/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnx2x/rte_pmd_bnx2x_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/bnxt/Makefile b/src/spdk/dpdk/drivers/net/bnxt/Makefile new file mode 100644 index 000000000..2a39ed139 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# Copyright(c) Broadcom Limited. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bnxt.a + +EXPORT_MAP := rte_pmd_bnxt_version.map + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_bnxt_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c +ifeq ($(CONFIG_RTE_ARCH_X86), y) +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxtx_vec_sse.c +endif + +ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD), y) +CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/tf_core -I$(SRCDIR)/tf_ulp +endif + +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_core.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/bitalloc.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_msg.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/rand.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/stack.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_rm.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_tbl.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tfp.c + +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/bnxt_ulp.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_mark_mgr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_flow_db.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_template_db.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_utils.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_mapper.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_matcher.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_rte_parser.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/bnxt_ulp_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_ulp/ulp_port_db.c + +# +# Export include files +# +SYMLINK-y-include += +SYMLINK-$(CONFIG_RTE_LIBRTE_BNXT_PMD)-include := rte_pmd_bnxt.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt.h new file mode 100644 index 000000000..446764c57 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt.h @@ -0,0 +1,783 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_H_ +#define _BNXT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt_cpr.h" +#include "bnxt_util.h" + +#include "tf_core.h" +#include "bnxt_ulp.h" + +/* Vendor ID */ +#define PCI_VENDOR_ID_BROADCOM 0x14E4 + +/* Device IDs */ +#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 +#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 +#define BROADCOM_DEV_ID_57414_VF 0x16c1 +#define BROADCOM_DEV_ID_57301 0x16c8 +#define BROADCOM_DEV_ID_57302 0x16c9 +#define BROADCOM_DEV_ID_57304_PF 0x16ca +#define BROADCOM_DEV_ID_57304_VF 0x16cb +#define BROADCOM_DEV_ID_57417_MF 0x16cc +#define BROADCOM_DEV_ID_NS2 0x16cd +#define BROADCOM_DEV_ID_57311 0x16ce +#define BROADCOM_DEV_ID_57312 0x16cf +#define BROADCOM_DEV_ID_57402 0x16d0 +#define BROADCOM_DEV_ID_57404 0x16d1 +#define BROADCOM_DEV_ID_57406_PF 0x16d2 +#define BROADCOM_DEV_ID_57406_VF 0x16d3 +#define BROADCOM_DEV_ID_57402_MF 0x16d4 +#define BROADCOM_DEV_ID_57407_RJ45 0x16d5 +#define BROADCOM_DEV_ID_57412 0x16d6 +#define BROADCOM_DEV_ID_57414 0x16d7 +#define BROADCOM_DEV_ID_57416_RJ45 0x16d8 +#define BROADCOM_DEV_ID_57417_RJ45 0x16d9 +#define BROADCOM_DEV_ID_5741X_VF 0x16dc +#define BROADCOM_DEV_ID_57412_MF 0x16de +#define BROADCOM_DEV_ID_57314 0x16df +#define BROADCOM_DEV_ID_57317_RJ45 0x16e0 +#define BROADCOM_DEV_ID_5731X_VF 0x16e1 +#define BROADCOM_DEV_ID_57417_SFP 0x16e2 +#define BROADCOM_DEV_ID_57416_SFP 0x16e3 +#define BROADCOM_DEV_ID_57317_SFP 0x16e4 +#define BROADCOM_DEV_ID_57404_MF 0x16e7 +#define BROADCOM_DEV_ID_57406_MF 0x16e8 +#define BROADCOM_DEV_ID_57407_SFP 0x16e9 +#define BROADCOM_DEV_ID_57407_MF 0x16ea +#define BROADCOM_DEV_ID_57414_MF 0x16ec +#define BROADCOM_DEV_ID_57416_MF 0x16ee +#define BROADCOM_DEV_ID_57508 0x1750 +#define BROADCOM_DEV_ID_57504 0x1751 +#define BROADCOM_DEV_ID_57502 0x1752 +#define BROADCOM_DEV_ID_57508_MF1 0x1800 +#define BROADCOM_DEV_ID_57504_MF1 0x1801 +#define BROADCOM_DEV_ID_57502_MF1 0x1802 +#define BROADCOM_DEV_ID_57508_MF2 0x1803 +#define BROADCOM_DEV_ID_57504_MF2 0x1804 +#define BROADCOM_DEV_ID_57502_MF2 0x1805 +#define BROADCOM_DEV_ID_57500_VF1 0x1806 +#define BROADCOM_DEV_ID_57500_VF2 0x1807 +#define BROADCOM_DEV_ID_58802 0xd802 +#define BROADCOM_DEV_ID_58804 0xd804 +#define BROADCOM_DEV_ID_58808 0x16f0 +#define BROADCOM_DEV_ID_58802_VF 0xd800 + +#define BNXT_MAX_MTU 9574 +#define VLAN_TAG_SIZE 4 +#define BNXT_NUM_VLANS 2 +#define BNXT_MAX_PKT_LEN (BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +\ + RTE_ETHER_CRC_LEN +\ + (BNXT_NUM_VLANS * VLAN_TAG_SIZE)) +/* FW adds extra 4 bytes for FCS */ +#define BNXT_VNIC_MRU(mtu)\ + ((mtu) + RTE_ETHER_HDR_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS) +#define BNXT_VF_RSV_NUM_RSS_CTX 1 +#define BNXT_VF_RSV_NUM_L2_CTX 4 +/* TODO: For now, do not support VMDq/RFS on VFs. */ +#define BNXT_VF_RSV_NUM_VNIC 1 +#define BNXT_MAX_LED 4 +#define BNXT_MIN_RING_DESC 16 +#define BNXT_MAX_TX_RING_DESC 4096 +#define BNXT_MAX_RX_RING_DESC 8192 +#define BNXT_DB_SIZE 0x80 + +#define TPA_MAX_AGGS 64 +#define TPA_MAX_AGGS_TH 1024 + +#define TPA_MAX_NUM_SEGS 32 +#define TPA_MAX_SEGS_TH 8 /* 32 segments in 4-segment units */ +#define TPA_MAX_SEGS 5 /* 32 segments in log2 units */ + +#define BNXT_TPA_MAX_AGGS(bp) \ + (BNXT_CHIP_THOR(bp) ? TPA_MAX_AGGS_TH : \ + TPA_MAX_AGGS) + +#define BNXT_TPA_MAX_SEGS(bp) \ + (BNXT_CHIP_THOR(bp) ? TPA_MAX_SEGS_TH : \ + TPA_MAX_SEGS) + +#ifdef RTE_ARCH_ARM64 +#define BNXT_NUM_ASYNC_CPR(bp) (BNXT_STINGRAY(bp) ? 0 : 1) +#else +#define BNXT_NUM_ASYNC_CPR(bp) 1 +#endif + +/* In FreeBSD OS, nic_uio driver does not support interrupts */ +#ifdef RTE_EXEC_ENV_FREEBSD +#ifdef BNXT_NUM_ASYNC_CPR +#undef BNXT_NUM_ASYNC_CPR +#endif +#define BNXT_NUM_ASYNC_CPR(bp) 0 +#endif + +#define BNXT_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define BNXT_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* Chimp Communication Channel */ +#define GRCPF_REG_CHIMP_CHANNEL_OFFSET 0x0 +#define GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 +/* Kong Communication Channel */ +#define GRCPF_REG_KONG_CHANNEL_OFFSET 0xA00 +#define GRCPF_REG_KONG_COMM_TRIGGER 0xB00 + +#define BNXT_INT_LAT_TMR_MIN 75 +#define BNXT_INT_LAT_TMR_MAX 150 +#define BNXT_NUM_CMPL_AGGR_INT 36 +#define BNXT_CMPL_AGGR_DMA_TMR 37 +#define BNXT_NUM_CMPL_DMA_AGGR 36 +#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50 +#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12 + +struct bnxt_led_info { + uint8_t num_leds; + uint8_t led_id; + uint8_t led_type; + uint8_t led_group_id; + uint8_t unused; + uint16_t led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + rte_cpu_to_le_16(HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT)) + + uint16_t led_color_caps; +}; + +struct bnxt_led_cfg { + uint8_t led_id; + uint8_t led_state; + uint8_t led_color; + uint8_t unused; + uint16_t led_blink_on; + uint16_t led_blink_off; + uint8_t led_group_id; + uint8_t rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF | \ + HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + rte_cpu_to_le_32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + +enum bnxt_hw_context { + HW_CONTEXT_NONE = 0, + HW_CONTEXT_IS_RSS = 1, + HW_CONTEXT_IS_COS = 2, + HW_CONTEXT_IS_LB = 3, +}; + +struct bnxt_vlan_table_entry { + uint16_t tpid; + uint16_t vid; +} __rte_packed; + +struct bnxt_vlan_antispoof_table_entry { + uint16_t tpid; + uint16_t vid; + uint16_t mask; +} __rte_packed; + +struct bnxt_child_vf_info { + void *req_buf; + struct bnxt_vlan_table_entry *vlan_table; + struct bnxt_vlan_antispoof_table_entry *vlan_as_table; + STAILQ_HEAD(, bnxt_filter_info) filter; + uint32_t func_cfg_flags; + uint32_t l2_rx_mask; + uint16_t fid; + uint16_t max_tx_rate; + uint16_t dflt_vlan; + uint16_t vlan_count; + uint8_t mac_spoof_en; + uint8_t vlan_spoof_en; + bool random_mac; + bool persist_stats; +}; + +struct bnxt_pf_info { +#define BNXT_FIRST_PF_FID 1 +#define BNXT_MAX_VFS(bp) ((bp)->pf->max_vfs) +#define BNXT_TOTAL_VFS(bp) ((bp)->pf->total_vfs) +#define BNXT_FIRST_VF_FID 128 +#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) +#define BNXT_PF_RINGS_AVAIL(bp) ((bp)->pf->max_cp_rings - \ + BNXT_PF_RINGS_USED(bp)) + uint16_t port_id; + uint16_t first_vf_id; + uint16_t active_vfs; + uint16_t max_vfs; + uint16_t total_vfs; /* Total VFs possible. + * Not necessarily enabled. + */ + uint32_t func_cfg_flags; + void *vf_req_buf; + rte_iova_t vf_req_buf_dma_addr; + uint32_t vf_req_fwd[8]; + uint16_t total_vnics; + struct bnxt_child_vf_info *vf_info; +#define BNXT_EVB_MODE_NONE 0 +#define BNXT_EVB_MODE_VEB 1 +#define BNXT_EVB_MODE_VEPA 2 + uint8_t evb_mode; +}; + +/* Max wait time for link up is 10s and link down is 500ms */ +#define BNXT_LINK_UP_WAIT_CNT 200 +#define BNXT_LINK_DOWN_WAIT_CNT 10 +#define BNXT_LINK_WAIT_INTERVAL 50 +struct bnxt_link_info { + uint32_t phy_flags; + uint8_t mac_type; + uint8_t phy_link_status; + uint8_t loop_back; + uint8_t link_up; + uint8_t duplex; + uint8_t pause; + uint8_t force_pause; + uint8_t auto_pause; + uint8_t auto_mode; +#define PHY_VER_LEN 3 + uint8_t phy_ver[PHY_VER_LEN]; + uint16_t link_speed; + uint16_t support_speeds; + uint16_t auto_link_speed; + uint16_t force_link_speed; + uint16_t auto_link_speed_mask; + uint32_t preemphasis; + uint8_t phy_type; + uint8_t media_type; +}; + +#define BNXT_COS_QUEUE_COUNT 8 +struct bnxt_cos_queue_info { + uint8_t id; + uint8_t profile; +}; + +struct rte_flow { + STAILQ_ENTRY(rte_flow) next; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; +}; + +#define BNXT_PTP_FLAGS_PATH_TX 0x0 +#define BNXT_PTP_FLAGS_PATH_RX 0x1 +#define BNXT_PTP_FLAGS_CURRENT_TIME 0x2 + +struct bnxt_ptp_cfg { +#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 +#define BNXT_GRCPF_REG_SYNC_TIME 0x480 +#define BNXT_CYCLECOUNTER_MASK 0xffffffffffffffffULL + struct rte_timecounter tc; + struct rte_timecounter tx_tstamp_tc; + struct rte_timecounter rx_tstamp_tc; + struct bnxt *bp; +#define BNXT_MAX_TX_TS 1 + uint16_t rxctl; +#define BNXT_PTP_MSG_SYNC BIT(0) +#define BNXT_PTP_MSG_DELAY_REQ BIT(1) +#define BNXT_PTP_MSG_PDELAY_REQ BIT(2) +#define BNXT_PTP_MSG_PDELAY_RESP BIT(3) +#define BNXT_PTP_MSG_FOLLOW_UP BIT(8) +#define BNXT_PTP_MSG_DELAY_RESP BIT(9) +#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP BIT(10) +#define BNXT_PTP_MSG_ANNOUNCE BIT(11) +#define BNXT_PTP_MSG_SIGNALING BIT(12) +#define BNXT_PTP_MSG_MANAGEMENT BIT(13) +#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \ + BNXT_PTP_MSG_DELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_RESP) + uint8_t tx_tstamp_en:1; + int rx_filter; + +#define BNXT_PTP_RX_TS_L 0 +#define BNXT_PTP_RX_TS_H 1 +#define BNXT_PTP_RX_SEQ 2 +#define BNXT_PTP_RX_FIFO 3 +#define BNXT_PTP_RX_FIFO_PENDING 0x1 +#define BNXT_PTP_RX_FIFO_ADV 4 +#define BNXT_PTP_RX_REGS 5 + +#define BNXT_PTP_TX_TS_L 0 +#define BNXT_PTP_TX_TS_H 1 +#define BNXT_PTP_TX_SEQ 2 +#define BNXT_PTP_TX_FIFO 3 +#define BNXT_PTP_TX_FIFO_EMPTY 0x2 +#define BNXT_PTP_TX_REGS 4 + uint32_t rx_regs[BNXT_PTP_RX_REGS]; + uint32_t rx_mapped_regs[BNXT_PTP_RX_REGS]; + uint32_t tx_regs[BNXT_PTP_TX_REGS]; + uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS]; + + /* On Thor, the Rx timestamp is present in the Rx completion record */ + uint64_t rx_timestamp; +}; + +struct bnxt_coal { + uint16_t num_cmpl_aggr_int; + uint16_t num_cmpl_dma_aggr; + uint16_t num_cmpl_dma_aggr_during_int; + uint16_t int_lat_tmr_max; + uint16_t int_lat_tmr_min; + uint16_t cmpl_aggr_dma_tmr; + uint16_t cmpl_aggr_dma_tmr_during_int; +}; + +/* 64-bit doorbell */ +#define DBR_XID_SFT 32 +#define DBR_PATH_L2 (0x1ULL << 56) +#define DBR_TYPE_SQ (0x0ULL << 60) +#define DBR_TYPE_SRQ (0x2ULL << 60) +#define DBR_TYPE_CQ (0x4ULL << 60) +#define DBR_TYPE_NQ (0xaULL << 60) +#define DBR_TYPE_NQ_ARM (0xbULL << 60) + +#define BNXT_RSS_TBL_SIZE_THOR 512 +#define BNXT_RSS_ENTRIES_PER_CTX_THOR 64 +#define BNXT_MAX_RSS_CTXTS_THOR \ + (BNXT_RSS_TBL_SIZE_THOR / BNXT_RSS_ENTRIES_PER_CTX_THOR) + +#define BNXT_MAX_TC 8 +#define BNXT_MAX_QUEUE 8 +#define BNXT_MAX_TC_Q (BNXT_MAX_TC + 1) +#define BNXT_PAGE_SHFT 12 +#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT) +#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) + +#define PTU_PTE_VALID 0x1UL +#define PTU_PTE_LAST 0x2UL +#define PTU_PTE_NEXT_TO_LAST 0x4UL + +struct bnxt_ring_mem_info { + int nr_pages; + int page_size; + uint32_t flags; +#define BNXT_RMEM_VALID_PTE_FLAG 1 +#define BNXT_RMEM_RING_PTE_FLAG 2 + + void **pg_arr; + rte_iova_t *dma_arr; + const struct rte_memzone *mz; + + uint64_t *pg_tbl; + rte_iova_t pg_tbl_map; + const struct rte_memzone *pg_tbl_mz; + + int vmem_size; + void **vmem; +}; + +struct bnxt_ctx_pg_info { + uint32_t entries; + void *ctx_pg_arr[MAX_CTX_PAGES]; + rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; + struct bnxt_ring_mem_info ring_mem; +}; + +struct bnxt_ctx_mem_info { + uint32_t qp_max_entries; + uint16_t qp_min_qp1_entries; + uint16_t qp_max_l2_entries; + uint16_t qp_entry_size; + uint16_t srq_max_l2_entries; + uint32_t srq_max_entries; + uint16_t srq_entry_size; + uint16_t cq_max_l2_entries; + uint32_t cq_max_entries; + uint16_t cq_entry_size; + uint16_t vnic_max_vnic_entries; + uint16_t vnic_max_ring_table_entries; + uint16_t vnic_entry_size; + uint32_t stat_max_entries; + uint16_t stat_entry_size; + uint16_t tqm_entry_size; + uint32_t tqm_min_entries_per_ring; + uint32_t tqm_max_entries_per_ring; + uint32_t mrav_max_entries; + uint16_t mrav_entry_size; + uint16_t tim_entry_size; + uint32_t tim_max_entries; + uint8_t tqm_entries_multiple; + uint8_t tqm_fp_rings_count; + + uint32_t flags; +#define BNXT_CTX_FLAG_INITED 0x01 + + struct bnxt_ctx_pg_info qp_mem; + struct bnxt_ctx_pg_info srq_mem; + struct bnxt_ctx_pg_info cq_mem; + struct bnxt_ctx_pg_info vnic_mem; + struct bnxt_ctx_pg_info stat_mem; + struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TC_Q]; +}; + +struct bnxt_ctx_mem_buf_info { + void *va; + rte_iova_t dma; + uint16_t ctx_id; + size_t size; +}; + +/* Maximum Firmware Reset bail out value in milliseconds */ +#define BNXT_MAX_FW_RESET_TIMEOUT 6000 +/* Minimum time required for the firmware readiness in milliseconds */ +#define BNXT_MIN_FW_READY_TIMEOUT 2000 +/* Frequency for the firmware readiness check in milliseconds */ +#define BNXT_FW_READY_WAIT_INTERVAL 100 + +#define US_PER_MS 1000 +#define NS_PER_US 1000 + +struct bnxt_error_recovery_info { + /* All units in milliseconds */ + uint32_t driver_polling_freq; + uint32_t master_func_wait_period; + uint32_t normal_func_wait_period; + uint32_t master_func_wait_period_after_reset; + uint32_t max_bailout_time_after_reset; +#define BNXT_FW_STATUS_REG 0 +#define BNXT_FW_HEARTBEAT_CNT_REG 1 +#define BNXT_FW_RECOVERY_CNT_REG 2 +#define BNXT_FW_RESET_INPROG_REG 3 +#define BNXT_FW_STATUS_REG_CNT 4 + uint32_t status_regs[BNXT_FW_STATUS_REG_CNT]; + uint32_t mapped_status_regs[BNXT_FW_STATUS_REG_CNT]; + uint32_t reset_inprogress_reg_mask; +#define BNXT_NUM_RESET_REG 16 + uint8_t reg_array_cnt; + uint32_t reset_reg[BNXT_NUM_RESET_REG]; + uint32_t reset_reg_val[BNXT_NUM_RESET_REG]; + uint8_t delay_after_reset[BNXT_NUM_RESET_REG]; +#define BNXT_FLAG_ERROR_RECOVERY_HOST BIT(0) +#define BNXT_FLAG_ERROR_RECOVERY_CO_CPU BIT(1) +#define BNXT_FLAG_MASTER_FUNC BIT(2) +#define BNXT_FLAG_RECOVERY_ENABLED BIT(3) + uint32_t flags; + + uint32_t last_heart_beat; + uint32_t last_reset_counter; +}; + +/* Frequency for the FUNC_DRV_IF_CHANGE retry in milliseconds */ +#define BNXT_IF_CHANGE_RETRY_INTERVAL 50 +/* Maximum retry count for FUNC_DRV_IF_CHANGE */ +#define BNXT_IF_CHANGE_RETRY_COUNT 40 + +struct bnxt_mark_info { + uint32_t mark_id; + bool valid; +}; + +/* address space location of register */ +#define BNXT_FW_STATUS_REG_TYPE_MASK 3 +/* register is located in PCIe config space */ +#define BNXT_FW_STATUS_REG_TYPE_CFG 0 +/* register is located in GRC address space */ +#define BNXT_FW_STATUS_REG_TYPE_GRC 1 +/* register is located in BAR0 */ +#define BNXT_FW_STATUS_REG_TYPE_BAR0 2 +/* register is located in BAR1 */ +#define BNXT_FW_STATUS_REG_TYPE_BAR1 3 + +#define BNXT_FW_STATUS_REG_TYPE(reg) ((reg) & BNXT_FW_STATUS_REG_TYPE_MASK) +#define BNXT_FW_STATUS_REG_OFF(reg) ((reg) & ~BNXT_FW_STATUS_REG_TYPE_MASK) + +#define BNXT_GRCP_WINDOW_2_BASE 0x2000 +#define BNXT_GRCP_WINDOW_3_BASE 0x3000 + +#define BNXT_GRCP_BASE_MASK 0xfffff000 +#define BNXT_GRCP_OFFSET_MASK 0x00000ffc + +#define BNXT_FW_STATUS_HEALTHY 0x8000 +#define BNXT_FW_STATUS_SHUTDOWN 0x100000 + +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) + +struct bnxt_flow_stat_info { + uint16_t max_fc; + uint16_t flow_count; + struct bnxt_ctx_mem_buf_info rx_fc_in_tbl; + struct bnxt_ctx_mem_buf_info rx_fc_out_tbl; + struct bnxt_ctx_mem_buf_info tx_fc_in_tbl; + struct bnxt_ctx_mem_buf_info tx_fc_out_tbl; +}; + +struct bnxt { + void *bar0; + + struct rte_eth_dev *eth_dev; + struct rte_pci_device *pdev; + void *doorbell_base; + + uint32_t flags; +#define BNXT_FLAG_REGISTERED BIT(0) +#define BNXT_FLAG_VF BIT(1) +#define BNXT_FLAG_PORT_STATS BIT(2) +#define BNXT_FLAG_JUMBO BIT(3) +#define BNXT_FLAG_SHORT_CMD BIT(4) +#define BNXT_FLAG_UPDATE_HASH BIT(5) +#define BNXT_FLAG_PTP_SUPPORTED BIT(6) +#define BNXT_FLAG_MULTI_HOST BIT(7) +#define BNXT_FLAG_EXT_RX_PORT_STATS BIT(8) +#define BNXT_FLAG_EXT_TX_PORT_STATS BIT(9) +#define BNXT_FLAG_KONG_MB_EN BIT(10) +#define BNXT_FLAG_TRUSTED_VF_EN BIT(11) +#define BNXT_FLAG_DFLT_VNIC_SET BIT(12) +#define BNXT_FLAG_THOR_CHIP BIT(13) +#define BNXT_FLAG_STINGRAY BIT(14) +#define BNXT_FLAG_FW_RESET BIT(15) +#define BNXT_FLAG_FATAL_ERROR BIT(16) +#define BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE BIT(17) +#define BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED BIT(18) +#define BNXT_FLAG_EXT_STATS_SUPPORTED BIT(19) +#define BNXT_FLAG_NEW_RM BIT(20) +#define BNXT_FLAG_NPAR_PF BIT(21) +#define BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS BIT(22) +#define BNXT_FLAG_FC_THREAD BIT(23) +#define BNXT_FLAG_RX_VECTOR_PKT_MODE BIT(24) +#define BNXT_FLAG_FLOW_XSTATS_EN BIT(25) +#define BNXT_FLAG_DFLT_MAC_SET BIT(26) +#define BNXT_FLAG_TRUFLOW_EN BIT(27) +#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) +#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) +#define BNXT_NPAR(bp) ((bp)->flags & BNXT_FLAG_NPAR_PF) +#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) +#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) +#define BNXT_USE_CHIMP_MB 0 //For non-CFA commands, everything uses Chimp. +#define BNXT_USE_KONG(bp) ((bp)->flags & BNXT_FLAG_KONG_MB_EN) +#define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN) +#define BNXT_CHIP_THOR(bp) ((bp)->flags & BNXT_FLAG_THOR_CHIP) +#define BNXT_STINGRAY(bp) ((bp)->flags & BNXT_FLAG_STINGRAY) +#define BNXT_HAS_NQ(bp) BNXT_CHIP_THOR(bp) +#define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp)) +#define BNXT_FLOW_XSTATS_EN(bp) ((bp)->flags & BNXT_FLAG_FLOW_XSTATS_EN) +#define BNXT_HAS_DFLT_MAC_SET(bp) ((bp)->flags & BNXT_FLAG_DFLT_MAC_SET) +#define BNXT_TRUFLOW_EN(bp) ((bp)->flags & BNXT_FLAG_TRUFLOW_EN) + + uint32_t fw_cap; +#define BNXT_FW_CAP_HOT_RESET BIT(0) +#define BNXT_FW_CAP_IF_CHANGE BIT(1) +#define BNXT_FW_CAP_ERROR_RECOVERY BIT(2) +#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT(3) +#define BNXT_FW_CAP_ADV_FLOW_MGMT BIT(5) +#define BNXT_FW_CAP_ADV_FLOW_COUNTERS BIT(6) +#define BNXT_FW_CAP_HCOMM_FW_STATUS BIT(7) + + uint32_t flow_flags; +#define BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN BIT(0) + pthread_mutex_t flow_lock; + + uint32_t vnic_cap_flags; +#define BNXT_VNIC_CAP_COS_CLASSIFY BIT(0) + unsigned int rx_nr_rings; + unsigned int rx_cp_nr_rings; + unsigned int rx_num_qs_per_vnic; + struct bnxt_rx_queue **rx_queues; + const void *rx_mem_zone; + struct rx_port_stats *hw_rx_port_stats; + rte_iova_t hw_rx_port_stats_map; + struct rx_port_stats_ext *hw_rx_port_stats_ext; + rte_iova_t hw_rx_port_stats_ext_map; + uint16_t fw_rx_port_stats_ext_size; + + unsigned int tx_nr_rings; + unsigned int tx_cp_nr_rings; + struct bnxt_tx_queue **tx_queues; + const void *tx_mem_zone; + struct tx_port_stats *hw_tx_port_stats; + rte_iova_t hw_tx_port_stats_map; + struct tx_port_stats_ext *hw_tx_port_stats_ext; + rte_iova_t hw_tx_port_stats_ext_map; + uint16_t fw_tx_port_stats_ext_size; + + /* Default completion ring */ + struct bnxt_cp_ring_info *async_cp_ring; + struct bnxt_cp_ring_info *rxtx_nq_ring; + uint32_t max_ring_grps; + struct bnxt_ring_grp_info *grp_info; + + unsigned int nr_vnics; + +#define BNXT_GET_DEFAULT_VNIC(bp) (&(bp)->vnic_info[0]) + struct bnxt_vnic_info *vnic_info; + STAILQ_HEAD(, bnxt_vnic_info) free_vnic_list; + + struct bnxt_filter_info *filter_info; + STAILQ_HEAD(, bnxt_filter_info) free_filter_list; + + struct bnxt_irq *irq_tbl; + + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + + uint16_t chimp_cmd_seq; + uint16_t kong_cmd_seq; + void *hwrm_cmd_resp_addr; + rte_iova_t hwrm_cmd_resp_dma_addr; + void *hwrm_short_cmd_req_addr; + rte_iova_t hwrm_short_cmd_req_dma_addr; + rte_spinlock_t hwrm_lock; + pthread_mutex_t def_cp_lock; + uint16_t max_req_len; + uint16_t max_resp_len; + uint16_t hwrm_max_ext_req_len; + + /* default command timeout value of 500ms */ +#define DFLT_HWRM_CMD_TIMEOUT 500000 + /* short command timeout value of 50ms */ +#define SHORT_HWRM_CMD_TIMEOUT 50000 + /* default HWRM request timeout value */ + uint32_t hwrm_cmd_timeout; + + struct bnxt_link_info *link_info; + struct bnxt_cos_queue_info *rx_cos_queue; + struct bnxt_cos_queue_info *tx_cos_queue; + uint8_t tx_cosq_id[BNXT_COS_QUEUE_COUNT]; + uint8_t rx_cosq_cnt; + uint8_t max_tc; + uint8_t max_lltc; + uint8_t max_q; + + uint16_t fw_fid; + uint16_t max_rsscos_ctx; + uint16_t max_cp_rings; + uint16_t max_tx_rings; + uint16_t max_rx_rings; +#define MAX_STINGRAY_RINGS 128U +/* For sake of symmetry, max Tx rings == max Rx rings, one stat ctx for each */ +#define BNXT_MAX_RX_RINGS(bp) \ + (BNXT_STINGRAY(bp) ? RTE_MIN(RTE_MIN(bp->max_rx_rings / 2U, \ + MAX_STINGRAY_RINGS), \ + bp->max_stat_ctx / 2U) : \ + RTE_MIN(bp->max_rx_rings / 2U, \ + bp->max_stat_ctx / 2U)) +#define BNXT_MAX_TX_RINGS(bp) \ + (RTE_MIN((bp)->max_tx_rings, BNXT_MAX_RX_RINGS(bp))) + +#define BNXT_MAX_RINGS(bp) \ + (RTE_MIN((((bp)->max_cp_rings - BNXT_NUM_ASYNC_CPR(bp)) / 2U), \ + BNXT_MAX_TX_RINGS(bp))) + uint16_t max_nq_rings; + uint16_t max_l2_ctx; + uint16_t max_rx_em_flows; + uint16_t max_vnics; + uint16_t max_stat_ctx; + uint16_t max_tpa_v2; + uint16_t first_vf_id; + uint16_t vlan; +#define BNXT_OUTER_TPID_MASK 0x0000ffff +#define BNXT_OUTER_TPID_BD_MASK 0xffff0000 +#define BNXT_OUTER_TPID_BD_SHFT 16 + uint32_t outer_tpid_bd; + struct bnxt_pf_info *pf; + uint8_t vxlan_port_cnt; + uint8_t geneve_port_cnt; + uint16_t vxlan_port; + uint16_t geneve_port; + uint16_t vxlan_fw_dst_port_id; + uint16_t geneve_fw_dst_port_id; + uint32_t fw_ver; + uint32_t hwrm_spec_code; + + struct bnxt_led_info *leds; + struct bnxt_ptp_cfg *ptp_cfg; + uint16_t vf_resv_strategy; + struct bnxt_ctx_mem_info *ctx; + + uint16_t fw_reset_min_msecs; + uint16_t fw_reset_max_msecs; + + /* Struct to hold adapter error recovery related info */ + struct bnxt_error_recovery_info *recovery_info; +#define BNXT_MARK_TABLE_SZ (sizeof(struct bnxt_mark_info) * 64 * 1024) +/* TCAM and EM should be 16-bit only. Other modes not supported. */ +#define BNXT_FLOW_ID_MASK 0x0000ffff + struct bnxt_mark_info *mark_table; + +#define BNXT_SVIF_INVALID 0xFFFF + uint16_t func_svif; + uint16_t port_svif; + + struct tf tfp; + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_flow_stat_info *flow_stat; + uint8_t flow_xstat; +}; + +#define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */ + +int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); +int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete, + bool exp_link_status); +int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); +int is_bnxt_in_error(struct bnxt *bp); + +int bnxt_map_fw_health_status_regs(struct bnxt *bp); +uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index); +void bnxt_schedule_fw_health_check(struct bnxt *bp); + +bool is_bnxt_supported(struct rte_eth_dev *dev); +bool bnxt_stratus_device(struct bnxt *bp); +extern const struct rte_flow_ops bnxt_flow_ops; +#define bnxt_acquire_flow_lock(bp) \ + pthread_mutex_lock(&(bp)->flow_lock) + +#define bnxt_release_flow_lock(bp) \ + pthread_mutex_unlock(&(bp)->flow_lock) + +#define BNXT_VALID_VNIC_OR_RET(bp, vnic_id) do { \ + if ((vnic_id) >= (bp)->max_vnics) { \ + rte_flow_error_set(error, \ + EINVAL, \ + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, \ + NULL, \ + "Group id is invalid!"); \ + rc = -rte_errno; \ + goto ret; \ + } \ +} while (0) + +extern int bnxt_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt, ## args) + +extern const struct rte_flow_ops bnxt_ulp_rte_flow_ops; +int32_t bnxt_ulp_init(struct bnxt *bp); +void bnxt_ulp_deinit(struct bnxt *bp); + +uint16_t bnxt_get_vnic_id(uint16_t port); +uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif); +uint16_t bnxt_get_fw_func_id(uint16_t port); + +void bnxt_cancel_fc_thread(struct bnxt *bp); +void bnxt_flow_cnt_alarm_cb(void *arg); +int bnxt_flow_stats_req(struct bnxt *bp); +int bnxt_flow_stats_cnt(struct bnxt *bp); +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c new file mode 100644 index 000000000..40e5350f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.c @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "hsi_struct_def_dpdk.h" + +void bnxt_wait_for_device_shutdown(struct bnxt *bp) +{ + uint32_t val, timeout; + + /* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set + * in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set + * the SHUTDOWN bit in health register + */ + if (!(bp->recovery_info && + (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD))) + return; + + /* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes + * first for FW to collect crash dump. + */ + timeout = bp->fw_reset_max_msecs; + + /* Driver has to poll for shutdown bit in fw_status register + * + * 1. in case of hot fw upgrade, this bit will be set after all + * function drivers unregistered with fw. + * 2. in case of fw initiated error recovery, this bit will be + * set after fw has collected the core dump + */ + do { + val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); + if (val & BNXT_FW_STATUS_SHUTDOWN) + return; + + rte_delay_ms(100); + timeout -= 100; + } while (timeout); +} + +/* + * Async event handling + */ +void bnxt_handle_async_event(struct bnxt *bp, + struct cmpl_base *cmp) +{ + struct hwrm_async_event_cmpl *async_cmp = + (struct hwrm_async_event_cmpl *)cmp; + uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id); + struct bnxt_error_recovery_info *info; + uint32_t event_data; + + switch (event_id) { + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: + /* FALLTHROUGH */ + bnxt_link_update(bp->eth_dev, 0, ETH_LINK_UP); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n"); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + PMD_DRV_LOG(INFO, "Async event: VF config changed\n"); + bnxt_hwrm_func_qcfg(bp, NULL); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: + PMD_DRV_LOG(INFO, "Port conn async event\n"); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + /* Ignore reset notify async events when stopping the port */ + if (!bp->eth_dev->data->dev_started) { + bp->flags |= BNXT_FLAG_FATAL_ERROR; + return; + } + + event_data = rte_le_to_cpu_32(async_cmp->event_data1); + /* timestamp_lo/hi values are in units of 100ms */ + bp->fw_reset_max_msecs = async_cmp->timestamp_hi ? + rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 : + BNXT_MAX_FW_RESET_TIMEOUT; + bp->fw_reset_min_msecs = async_cmp->timestamp_lo ? + async_cmp->timestamp_lo * 100 : + BNXT_MIN_FW_READY_TIMEOUT; + if ((event_data & EVENT_DATA1_REASON_CODE_MASK) == + EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) { + PMD_DRV_LOG(INFO, + "Firmware fatal reset event received\n"); + bp->flags |= BNXT_FLAG_FATAL_ERROR; + } else { + PMD_DRV_LOG(INFO, + "Firmware non-fatal reset event received\n"); + } + + bp->flags |= BNXT_FLAG_FW_RESET; + rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, + (void *)bp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: + info = bp->recovery_info; + + if (!info) + return; + + PMD_DRV_LOG(INFO, "Error recovery async event received\n"); + + event_data = rte_le_to_cpu_32(async_cmp->event_data1) & + EVENT_DATA1_FLAGS_MASK; + + if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC) + info->flags |= BNXT_FLAG_MASTER_FUNC; + else + info->flags &= ~BNXT_FLAG_MASTER_FUNC; + + if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED) + info->flags |= BNXT_FLAG_RECOVERY_ENABLED; + else + info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED; + + PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n", + bnxt_is_recovery_enabled(bp), + bnxt_is_master_func(bp)); + + if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) + return; + + info->last_heart_beat = + bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); + info->last_reset_counter = + bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); + + bnxt_schedule_fw_health_check(bp); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: + PMD_DRV_LOG(INFO, "DNC event: evt_data1 %#x evt_data2 %#x\n", + rte_le_to_cpu_32(async_cmp->event_data1), + rte_le_to_cpu_32(async_cmp->event_data2)); + break; + default: + PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id); + break; + } +} + +void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) +{ + struct hwrm_exec_fwd_resp_input *fwreq; + struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl; + struct input *fwd_cmd; + uint16_t fw_vf_id; + uint16_t vf_id; + uint16_t req_len; + int rc; + + if (bp->pf->active_vfs <= 0) { + PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n"); + return; + } + + /* Qualify the fwd request */ + fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); + vf_id = fw_vf_id - bp->pf->first_vf_id; + + req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & + HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> + HWRM_FWD_REQ_CMPL_REQ_LEN_SFT; + if (req_len > sizeof(fwreq->encap_request)) + req_len = sizeof(fwreq->encap_request); + + /* Locate VF's forwarded command */ + fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf; + + if (fw_vf_id < bp->pf->first_vf_id || + fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) { + PMD_DRV_LOG(ERR, + "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", + fw_vf_id, bp->pf->first_vf_id, + (bp->pf->first_vf_id) + bp->pf->active_vfs - 1, + bp->pf->first_vf_id, bp->pf->active_vfs); + goto reject; + } + + if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) { + /* + * In older firmware versions, the MAC had to be all zeros for + * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all + * zeros if it's being configured and has been ok'd by caller. + */ + if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) { + struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd; + + if (vfc->enables & + HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) { + bnxt_hwrm_func_vf_mac(bp, vf_id, + (const uint8_t *)"\x00\x00\x00\x00\x00"); + } + } + if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) { + struct hwrm_cfa_l2_set_rx_mask_input *srm = + (void *)fwd_cmd; + + srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0); + srm->num_vlan_tags = rte_cpu_to_le_32(0); + srm->mask &= ~rte_cpu_to_le_32( + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY | + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN | + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); + } + /* Forward */ + rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to send FWD req VF 0x%x, type 0x%x.\n", + fw_vf_id - bp->pf->first_vf_id, + rte_le_to_cpu_16(fwd_cmd->req_type)); + } + return; + } + +reject: + rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to send REJECT req VF 0x%x, type 0x%x.\n", + fw_vf_id - bp->pf->first_vf_id, + rte_le_to_cpu_16(fwd_cmd->req_type)); + } + + return; +} + +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) +{ + bool evt = 0; + + if (bp == NULL || cmp == NULL) { + PMD_DRV_LOG(ERR, "invalid NULL argument\n"); + return evt; + } + + if (unlikely(is_bnxt_in_error(bp))) + return 0; + + switch (CMP_TYPE(cmp)) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + /* Handle any async event */ + bnxt_handle_async_event(bp, cmp); + evt = 1; + break; + case CMPL_BASE_TYPE_HWRM_FWD_RESP: + /* Handle HWRM forwarded responses */ + bnxt_handle_fwd_req(bp, cmp); + evt = 1; + break; + default: + /* Ignore any other events */ + PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp)); + break; + } + + return evt; +} + +bool bnxt_is_master_func(struct bnxt *bp) +{ + if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC) + return true; + + return false; +} + +bool bnxt_is_recovery_enabled(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info; + + info = bp->recovery_info; + if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED)) + return true; + + return false; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h new file mode 100644 index 000000000..c2880783f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_cpr.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_CPR_H_ +#define _BNXT_CPR_H_ +#include + +#include + +struct bnxt_db_info; + +#define CMP_VALID(cmp, raw_cons, ring) \ + (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \ + CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size))) + +#define CMPL_VALID(cmp, v) \ + (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \ + CMPL_BASE_V) == !(v)) + +#define NQ_CMP_VALID(nqcmp, raw_cons, ring) \ + (!!((nqcmp)->v & rte_cpu_to_le_32(NQ_CN_V)) == \ + !((raw_cons) & ((ring)->ring_size))) + +#define CMP_TYPE(cmp) \ + (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK) + +#define ADV_RAW_CMP(idx, n) ((idx) + (n)) +#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) +#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) +#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask)) +#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) +#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val)) + +#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) +#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) + +#define NEXT_CMPL(cpr, idx, v, inc) do { \ + (idx) += (inc); \ + if (unlikely((idx) >= (cpr)->cp_ring_struct->ring_size)) { \ + (v) = !(v); \ + (idx) = 0; \ + } \ +} while (0) +#define B_CP_DB_REARM(cpr, raw_cons) \ + rte_write32((DB_CP_REARM_FLAGS | \ + RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ + ((cpr)->cp_db.doorbell)) + +#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), \ + ((cpr)->cp_db.doorbell)) + +#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_db.doorbell) = \ + DB_KEY_CP | DB_IRQ_DIS) + +#define B_CP_DB_IDX_ARM(cpr, cons) \ + (*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_REARM_FLAGS | \ + (cons))) + +#define B_CP_DB_IDX_DISARM(cpr, cons) do { \ + rte_smp_wmb(); \ + (*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_FLAGS | \ + (cons)); \ +} while (0) +#define B_CP_DIS_DB(cpr, raw_cons) \ + rte_write32((DB_CP_FLAGS | \ + RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ + ((cpr)->cp_db.doorbell)) + +#define B_CP_DB(cpr, raw_cons, ring_mask) \ + rte_write32((DB_CP_FLAGS | \ + RING_CMPL((ring_mask), raw_cons)), \ + ((cpr)->cp_db.doorbell)) + +struct bnxt_db_info { + void *doorbell; + union { + uint64_t db_key64; + uint32_t db_key32; + }; + bool db_64; +}; + +struct bnxt_ring; +struct bnxt_cp_ring_info { + uint32_t cp_raw_cons; + + struct cmpl_base *cp_desc_ring; + struct bnxt_db_info cp_db; + rte_iova_t cp_desc_mapping; + + struct ctx_hw_stats *hw_stats; + rte_iova_t hw_stats_map; + uint32_t hw_stats_ctx_id; + + struct bnxt_ring *cp_ring_struct; + uint16_t cp_cons; + bool valid; +}; + +#define RX_CMP_L2_ERRORS \ + (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR) + +struct bnxt; +void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); +void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp); +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp); +void bnxt_dev_reset_and_resume(void *arg); +void bnxt_wait_for_device_shutdown(struct bnxt *bp); + +#define EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL +#define EVENT_DATA1_REASON_CODE_MASK \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK + +#define EVENT_DATA1_FLAGS_MASK \ + HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK + +#define EVENT_DATA1_FLAGS_MASTER_FUNC \ + HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC + +#define EVENT_DATA1_FLAGS_RECOVERY_ENABLED \ + HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED + +bool bnxt_is_recovery_enabled(struct bnxt *bp); +bool bnxt_is_master_func(struct bnxt *bp); + +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c new file mode 100644 index 000000000..ae495da34 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -0,0 +1,5681 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_irq.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_stats.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" +#include "bnxt_nvm_defs.h" + +#define DRV_MODULE_NAME "bnxt" +static const char bnxt_version[] = + "Broadcom NetXtreme driver " DRV_MODULE_NAME; +int bnxt_logtype_driver; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id bnxt_pci_id_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +#define BNXT_ETH_RSS_SUPPORT ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ + DEV_RX_OFFLOAD_TCP_LRO | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_RSS_HASH) + +#define BNXT_DEVARG_TRUFLOW "host-based-truflow" +#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" +static const char *const bnxt_dev_args[] = { + BNXT_DEVARG_TRUFLOW, + BNXT_DEVARG_FLOW_XSTAT, + NULL +}; + +/* + * truflow == false to disable the feature + * truflow == true to enable the feature + */ +#define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1) + +/* + * flow_xstat == false to disable the feature + * flow_xstat == true to enable the feature + */ +#define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) + +static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); +static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); +static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); +static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); +static void bnxt_cancel_fw_health_check(struct bnxt *bp); +static int bnxt_restore_vlan_filters(struct bnxt *bp); +static void bnxt_dev_recover(void *arg); +static void bnxt_free_error_recovery_info(struct bnxt *bp); + +int is_bnxt_in_error(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return -EIO; + if (bp->flags & BNXT_FLAG_FW_RESET) + return -EBUSY; + + return 0; +} + +/***********************/ + +/* + * High level utility functions + */ + +static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) +{ + if (!BNXT_CHIP_THOR(bp)) + return 1; + + return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, + BNXT_RSS_ENTRIES_PER_CTX_THOR) / + BNXT_RSS_ENTRIES_PER_CTX_THOR; +} + +static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) +{ + if (!BNXT_CHIP_THOR(bp)) + return HW_HASH_INDEX_SIZE; + + return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; +} + +static void bnxt_free_pf_info(struct bnxt *bp) +{ + rte_free(bp->pf); +} + +static void bnxt_free_link_info(struct bnxt *bp) +{ + rte_free(bp->link_info); +} + +static void bnxt_free_leds_info(struct bnxt *bp) +{ + rte_free(bp->leds); + bp->leds = NULL; +} + +static void bnxt_free_flow_stats_info(struct bnxt *bp) +{ + rte_free(bp->flow_stat); + bp->flow_stat = NULL; +} + +static void bnxt_free_cos_queues(struct bnxt *bp) +{ + rte_free(bp->rx_cos_queue); + rte_free(bp->tx_cos_queue); +} + +static void bnxt_free_mem(struct bnxt *bp, bool reconfig) +{ + bnxt_free_flow_stats_info(bp); + + bnxt_free_filter_mem(bp); + bnxt_free_vnic_attributes(bp); + bnxt_free_vnic_mem(bp); + + /* tx/rx rings are configured as part of *_queue_setup callbacks. + * If the number of rings change across fw update, + * we don't have much choice except to warn the user. + */ + if (!reconfig) { + bnxt_free_stats(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + } + bnxt_free_async_cp_ring(bp); + bnxt_free_rxtx_nq_ring(bp); + + rte_free(bp->grp_info); + bp->grp_info = NULL; +} + +static int bnxt_alloc_pf_info(struct bnxt *bp) +{ + bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); + if (bp->pf == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_link_info(struct bnxt *bp) +{ + bp->link_info = + rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); + if (bp->link_info == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_leds_info(struct bnxt *bp) +{ + bp->leds = rte_zmalloc("bnxt_leds", + BNXT_MAX_LED * sizeof(struct bnxt_led_info), + 0); + if (bp->leds == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_cos_queues(struct bnxt *bp) +{ + bp->rx_cos_queue = + rte_zmalloc("bnxt_rx_cosq", + BNXT_COS_QUEUE_COUNT * + sizeof(struct bnxt_cos_queue_info), + 0); + if (bp->rx_cos_queue == NULL) + return -ENOMEM; + + bp->tx_cos_queue = + rte_zmalloc("bnxt_tx_cosq", + BNXT_COS_QUEUE_COUNT * + sizeof(struct bnxt_cos_queue_info), + 0); + if (bp->tx_cos_queue == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_flow_stats_info(struct bnxt *bp) +{ + bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", + sizeof(struct bnxt_flow_stat_info), 0); + if (bp->flow_stat == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) +{ + int rc; + + rc = bnxt_alloc_ring_grps(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_async_ring_struct(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnic_mem(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnic_attributes(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_filter_mem(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_async_cp_ring(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_rxtx_nq_ring(bp); + if (rc) + goto alloc_mem_err; + + if (BNXT_FLOW_XSTATS_EN(bp)) { + rc = bnxt_alloc_flow_stats_info(bp); + if (rc) + goto alloc_mem_err; + } + + return 0; + +alloc_mem_err: + bnxt_free_mem(bp, reconfig); + return rc; +} + +static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) +{ + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + uint64_t rx_offloads = dev_conf->rxmode.offloads; + struct bnxt_rx_queue *rxq; + unsigned int j; + int rc; + + rc = bnxt_vnic_grp_alloc(bp, vnic); + if (rc) + goto err_out; + + PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", + vnic_id, vnic, vnic->fw_grp_ids); + + rc = bnxt_hwrm_vnic_alloc(bp, vnic); + if (rc) + goto err_out; + + /* Alloc RSS context only if RSS mode is enabled */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + int j, nr_ctxs = bnxt_rss_ctxts(bp); + + rc = 0; + for (j = 0; j < nr_ctxs; j++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); + if (rc) + break; + } + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic %d ctx %d alloc failure rc: %x\n", + vnic_id, j, rc); + goto err_out; + } + vnic->num_lb_ctxts = nr_ctxs; + } + + /* + * Firmware sets pf pair in default vnic cfg. If the VLAN strip + * setting is not available at this time, it will not be + * configured correctly in the CFA. + */ + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + goto err_out; + + rc = bnxt_set_hwrm_vnic_filters(bp, vnic); + if (rc) + goto err_out; + + for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { + rxq = bp->eth_dev->data->rx_queues[j]; + + PMD_DRV_LOG(DEBUG, + "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", + j, rxq->vnic, rxq->vnic->fw_grp_ids); + + if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) + rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + else + vnic->rx_queue_cnt++; + } + + PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); + + rc = bnxt_vnic_rss_configure(bp, vnic); + if (rc) + goto err_out; + + bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); + else + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); + + return 0; +err_out: + PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", + vnic_id, rc); + return rc; +} + +static int bnxt_register_fc_ctx_mem(struct bnxt *bp) +{ + int rc = 0; + + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, + &bp->flow_stat->rx_fc_in_tbl.ctx_id); + if (rc) + return rc; + + PMD_DRV_LOG(DEBUG, + "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" + " rx_fc_in_tbl.ctx_id = %d\n", + bp->flow_stat->rx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), + bp->flow_stat->rx_fc_in_tbl.ctx_id); + + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, + &bp->flow_stat->rx_fc_out_tbl.ctx_id); + if (rc) + return rc; + + PMD_DRV_LOG(DEBUG, + "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" + " rx_fc_out_tbl.ctx_id = %d\n", + bp->flow_stat->rx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), + bp->flow_stat->rx_fc_out_tbl.ctx_id); + + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, + &bp->flow_stat->tx_fc_in_tbl.ctx_id); + if (rc) + return rc; + + PMD_DRV_LOG(DEBUG, + "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" + " tx_fc_in_tbl.ctx_id = %d\n", + bp->flow_stat->tx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), + bp->flow_stat->tx_fc_in_tbl.ctx_id); + + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, + &bp->flow_stat->tx_fc_out_tbl.ctx_id); + if (rc) + return rc; + + PMD_DRV_LOG(DEBUG, + "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" + " tx_fc_out_tbl.ctx_id = %d\n", + bp->flow_stat->tx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), + bp->flow_stat->tx_fc_out_tbl.ctx_id); + + memset(bp->flow_stat->rx_fc_out_tbl.va, + 0, + bp->flow_stat->rx_fc_out_tbl.size); + rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, + CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, + true); + if (rc) + return rc; + + memset(bp->flow_stat->tx_fc_out_tbl.va, + 0, + bp->flow_stat->tx_fc_out_tbl.size); + rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, + CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, + true); + + return rc; +} + +static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, + struct bnxt_ctx_mem_buf_info *ctx) +{ + if (!ctx) + return -EINVAL; + + ctx->va = rte_zmalloc(type, size, 0); + if (ctx->va == NULL) + return -ENOMEM; + rte_mem_lock_page(ctx->va); + ctx->size = size; + ctx->dma = rte_mem_virt2iova(ctx->va); + if (ctx->dma == RTE_BAD_IOVA) + return -ENOMEM; + + return 0; +} + +static int bnxt_init_fc_ctx_mem(struct bnxt *bp) +{ + struct rte_pci_device *pdev = bp->pdev; + char type[RTE_MEMZONE_NAMESIZE]; + uint16_t max_fc; + int rc = 0; + + max_fc = bp->flow_stat->max_fc; + + sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + /* 4 bytes for each counter-id */ + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->rx_fc_in_tbl); + if (rc) + return rc; + + sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->rx_fc_out_tbl); + if (rc) + return rc; + + sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + /* 4 bytes for each counter-id */ + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->tx_fc_in_tbl); + if (rc) + return rc; + + sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->tx_fc_out_tbl); + if (rc) + return rc; + + rc = bnxt_register_fc_ctx_mem(bp); + + return rc; +} + +static int bnxt_init_ctx_mem(struct bnxt *bp) +{ + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || + !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || + !BNXT_FLOW_XSTATS_EN(bp)) + return 0; + + rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); + if (rc) + return rc; + + rc = bnxt_init_fc_ctx_mem(bp); + + return rc; +} + +static int bnxt_init_chip(struct bnxt *bp) +{ + struct rte_eth_link new; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + uint32_t queue_id, base = BNXT_MISC_VEC_ID; + uint32_t vec = BNXT_MISC_VEC_ID; + unsigned int i, j; + int rc; + + if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags |= BNXT_FLAG_JUMBO; + } else { + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + + /* THOR does not support ring groups. + * But we will use the array to save RSS context IDs. + */ + if (BNXT_CHIP_THOR(bp)) + bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; + + rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_alloc_hwrm_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_alloc_all_hwrm_ring_grps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); + goto err_out; + } + + if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) + goto skip_cosq_cfg; + + for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->rx_cos_queue[i].id != 0xff) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; + + if (!vnic) { + PMD_DRV_LOG(ERR, + "Num pools more than FW profile\n"); + rc = -EINVAL; + goto err_out; + } + vnic->cos_queue_id = bp->rx_cos_queue[i].id; + bp->rx_cosq_cnt++; + } + } + +skip_cosq_cfg: + rc = bnxt_mq_rx_configure(bp); + if (rc) { + PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); + goto err_out; + } + + /* VNIC configuration */ + for (i = 0; i < bp->nr_vnics; i++) { + rc = bnxt_setup_one_vnic(bp, i); + if (rc) + goto err_out; + } + + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM cfa l2 rx mask failure rc: %x\n", rc); + goto err_out; + } + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && + bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = bp->eth_dev->data->nb_rx_queues; + PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); + if (intr_vector > bp->rx_cp_nr_rings) { + PMD_DRV_LOG(ERR, "At most %d intr queues supported", + bp->rx_cp_nr_rings); + return -ENOTSUP; + } + rc = rte_intr_efd_enable(intr_handle, intr_vector); + if (rc) + return rc; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + bp->eth_dev->data->nb_rx_queues * + sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", bp->eth_dev->data->nb_rx_queues); + rc = -ENOMEM; + goto err_disable; + } + PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " + "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", + intr_handle->intr_vec, intr_handle->nb_efd, + intr_handle->max_intr); + for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = + vec + BNXT_RX_VEC_START; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + + /* enable uio/vfio intr/eventfd mapping */ + rc = rte_intr_enable(intr_handle); +#ifndef RTE_EXEC_ENV_FREEBSD + /* In FreeBSD OS, nic_uio driver does not support interrupts */ + if (rc) + goto err_free; +#endif + + rc = bnxt_get_hwrm_link_config(bp, &new); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); + goto err_free; + } + + if (!bp->link_info->link_up) { + rc = bnxt_set_hwrm_link_config(bp, true); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM link config failure rc: %x\n", rc); + goto err_free; + } + } + bnxt_print_link_info(bp->eth_dev); + + bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); + if (!bp->mark_table) + PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); + + return 0; + +err_free: + rte_free(intr_handle->intr_vec); +err_disable: + rte_intr_efd_disable(intr_handle); +err_out: + /* Some of the error status returned by FW may not be from errno.h */ + if (rc > 0) + rc = -EIO; + + return rc; +} + +static int bnxt_shutdown_nic(struct bnxt *bp) +{ + bnxt_free_all_hwrm_resources(bp); + bnxt_free_all_filters(bp); + bnxt_free_all_vnics(bp); + return 0; +} + +/* + * Device configuration and status function + */ + +static uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) +{ + uint32_t link_speed = bp->link_info->support_speeds; + uint32_t speed_capa = 0; + + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) + speed_capa |= ETH_LINK_SPEED_100M; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) + speed_capa |= ETH_LINK_SPEED_100M_HD; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) + speed_capa |= ETH_LINK_SPEED_1G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) + speed_capa |= ETH_LINK_SPEED_2_5G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) + speed_capa |= ETH_LINK_SPEED_10G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) + speed_capa |= ETH_LINK_SPEED_20G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) + speed_capa |= ETH_LINK_SPEED_25G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) + speed_capa |= ETH_LINK_SPEED_40G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) + speed_capa |= ETH_LINK_SPEED_50G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) + speed_capa |= ETH_LINK_SPEED_100G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB) + speed_capa |= ETH_LINK_SPEED_200G; + + if (bp->link_info->auto_mode == + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) + speed_capa |= ETH_LINK_SPEED_FIXED; + else + speed_capa |= ETH_LINK_SPEED_AUTONEG; + + return speed_capa; +} + +static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); + struct bnxt *bp = eth_dev->data->dev_private; + uint16_t max_vnics, i, j, vpool, vrxq; + unsigned int max_rx_rings; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* MAC Specifics */ + dev_info->max_mac_addrs = bp->max_l2_ctx; + dev_info->max_hash_mac_addrs = 0; + + /* PF/VF specifics */ + if (BNXT_PF(bp)) + dev_info->max_vfs = pdev->max_vfs; + + max_rx_rings = BNXT_MAX_RINGS(bp); + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = max_rx_rings; + dev_info->max_tx_queues = max_rx_rings; + dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); + dev_info->hash_key_size = 40; + max_vnics = bp->max_vnics; + + /* MTU specifics */ + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = BNXT_MAX_MTU; + + /* Fast path specifics */ + dev_info->min_rx_bufsize = 1; + dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; + + dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; + if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; + dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; + + dev_info->speed_capa = bnxt_get_speed_capabilities(bp); + + /* *INDENT-OFF* */ + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 0, + }, + .rx_free_thresh = 32, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = 32, + .hthresh = 0, + .wthresh = 0, + }, + .tx_free_thresh = 32, + .tx_rs_thresh = 32, + }; + eth_dev->data->dev_conf.intr_conf.lsc = 1; + + eth_dev->data->dev_conf.intr_conf.rxq = 1; + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; + dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; + + /* *INDENT-ON* */ + + /* + * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim + * need further investigation. + */ + + /* VMDq resources */ + vpool = 64; /* ETH_64_POOLS */ + vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ + for (i = 0; i < 4; vpool >>= 1, i++) { + if (max_vnics > vpool) { + for (j = 0; j < 5; vrxq >>= 1, j++) { + if (dev_info->max_rx_queues > vrxq) { + if (vpool > vrxq) + vpool = vrxq; + goto found; + } + } + /* Not enough resources to support VMDq */ + break; + } + } + /* Not enough resources to support VMDq */ + vpool = 0; + vrxq = 0; +found: + dev_info->max_vmdq_pools = vpool; + dev_info->vmdq_queue_num = vrxq; + + dev_info->vmdq_pool_base = 0; + dev_info->vmdq_queue_base = 0; + + return 0; +} + +/* Configure the device based on the configuration provided */ +static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int rc; + + bp->rx_queues = (void *)eth_dev->data->rx_queues; + bp->tx_queues = (void *)eth_dev->data->tx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { + rc = bnxt_hwrm_check_vf_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); + return -ENOSPC; + } + + /* If a resource has already been allocated - in this case + * it is the async completion ring, free it. Reallocate it after + * resource reservation. This will ensure the resource counts + * are calculated correctly. + */ + + pthread_mutex_lock(&bp->def_cp_lock); + + if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { + bnxt_disable_int(bp); + bnxt_free_cp_ring(bp, bp->async_cp_ring); + } + + rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); + pthread_mutex_unlock(&bp->def_cp_lock); + return -ENOSPC; + } + + if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { + rc = bnxt_alloc_async_cp_ring(bp); + if (rc) { + pthread_mutex_unlock(&bp->def_cp_lock); + return rc; + } + bnxt_enable_int(bp); + } + + pthread_mutex_unlock(&bp->def_cp_lock); + } else { + /* legacy driver needs to get updated values */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); + return rc; + } + } + + /* Inherit new configurations */ + if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || + eth_dev->data->nb_tx_queues > bp->max_tx_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > + bp->max_stat_ctx) + goto resource_error; + + if (BNXT_HAS_RING_GRPS(bp) && + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) + goto resource_error; + + if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + bp->max_vnics < eth_dev->data->nb_rx_queues) + goto resource_error; + + bp->rx_cp_nr_rings = bp->rx_nr_rings; + bp->tx_cp_nr_rings = bp->tx_nr_rings; + + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; + eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * + BNXT_NUM_VLANS; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + } + return 0; + +resource_error: + PMD_DRV_LOG(ERR, + "Insufficient resources to support requested config\n"); + PMD_DRV_LOG(ERR, + "Num Queues Requested: Tx %d, Rx %d\n", + eth_dev->data->nb_tx_queues, + eth_dev->data->nb_rx_queues); + PMD_DRV_LOG(ERR, + "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", + bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, + bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); + return -ENOSPC; +} + +static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_link *link = ð_dev->data->dev_link; + + if (link->link_status) + PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", + eth_dev->data->port_id, + (uint32_t)link->link_speed, + (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + PMD_DRV_LOG(INFO, "Port %d Link Down\n", + eth_dev->data->port_id); +} + +/* + * Determine whether the current configuration requires support for scattered + * receive; return 1 if scattered receive is required and 0 if not. + */ +static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) +{ + uint16_t buf_size; + int i; + + if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) + return 1; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) + return 1; + } + return 0; +} + +static eth_rx_burst_t +bnxt_receive_function(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + +#ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 + /* + * Vector mode receive can be enabled only if scatter rx is not + * in use and rx offloads are limited to VLAN stripping and + * CRC stripping. + */ + if (!eth_dev->data->scattered_rx && + !(eth_dev->data->dev_conf.rxmode.offloads & + ~(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_VLAN_FILTER)) && + !BNXT_TRUFLOW_EN(bp)) { + PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", + eth_dev->data->port_id); + bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; + return bnxt_recv_pkts_vec; + } + PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", + eth_dev->data->port_id); + PMD_DRV_LOG(INFO, + "Port %d scatter: %d rx offload: %" PRIX64 "\n", + eth_dev->data->port_id, + eth_dev->data->scattered_rx, + eth_dev->data->dev_conf.rxmode.offloads); +#endif +#endif + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + return bnxt_recv_pkts; +} + +static eth_tx_burst_t +bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) +{ +#ifdef RTE_ARCH_X86 +#ifndef RTE_LIBRTE_IEEE1588 + /* + * Vector mode transmit can be enabled only if not using scatter rx + * or tx offloads. + */ + if (!eth_dev->data->scattered_rx && + !eth_dev->data->dev_conf.txmode.offloads) { + PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", + eth_dev->data->port_id); + return bnxt_xmit_pkts_vec; + } + PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", + eth_dev->data->port_id); + PMD_DRV_LOG(INFO, + "Port %d scatter: %d tx offload: %" PRIX64 "\n", + eth_dev->data->port_id, + eth_dev->data->scattered_rx, + eth_dev->data->dev_conf.txmode.offloads); +#endif +#endif + return bnxt_xmit_pkts; +} + +static int bnxt_handle_if_change_status(struct bnxt *bp) +{ + int rc; + + /* Since fw has undergone a reset and lost all contexts, + * set fatal flag to not issue hwrm during cleanup + */ + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bnxt_uninit_resources(bp, true); + + /* clear fatal flag so that re-init happens */ + bp->flags &= ~BNXT_FLAG_FATAL_ERROR; + rc = bnxt_init_resources(bp, true); + + bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + + return rc; +} + +static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int vlan_mask = 0; + int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; + + if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); + return -EINVAL; + } + + if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { + PMD_DRV_LOG(ERR, + "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", + bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); + } + + do { + rc = bnxt_hwrm_if_change(bp, true); + if (rc == 0 || rc != -EAGAIN) + break; + + rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); + } while (retry_cnt--); + + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { + rc = bnxt_handle_if_change_status(bp); + if (rc) + return rc; + } + + bnxt_enable_int(bp); + + rc = bnxt_init_chip(bp); + if (rc) + goto error; + + eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); + eth_dev->data->dev_started = 1; + + bnxt_link_update(eth_dev, 1, ETH_LINK_UP); + + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + vlan_mask |= ETH_VLAN_FILTER_MASK; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vlan_mask |= ETH_VLAN_STRIP_MASK; + rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); + if (rc) + goto error; + + eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); + eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); + + pthread_mutex_lock(&bp->def_cp_lock); + bnxt_schedule_fw_health_check(bp); + pthread_mutex_unlock(&bp->def_cp_lock); + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_ulp_init(bp); + + return 0; + +error: + bnxt_shutdown_nic(bp); + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); + bnxt_hwrm_if_change(bp, false); + eth_dev->data->dev_started = 0; + return rc; +} + +static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc = 0; + + if (!bp->link_info->link_up) + rc = bnxt_set_hwrm_link_config(bp, true); + if (!rc) + eth_dev->data->dev_link.link_status = 1; + + bnxt_print_link_info(eth_dev); + return rc; +} + +static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + + eth_dev->data->dev_link.link_status = 0; + bnxt_set_hwrm_link_config(bp, false); + bp->link_info->link_up = 0; + + return 0; +} + +/* Unload the driver, release resources */ +static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_ulp_deinit(bp); + + eth_dev->data->dev_started = 0; + /* Prevent crashes when queues are still in use */ + eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; + + bnxt_disable_int(bp); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + bnxt_cancel_fw_health_check(bp); + + bnxt_dev_set_link_down_op(eth_dev); + + /* Wait for link to be reset and the async notification to process. + * During reset recovery, there is no need to wait and + * VF/NPAR functions do not have privilege to change PHY config. + */ + if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp)) + bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN); + + /* Clean queue intr-vector mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + bnxt_hwrm_port_clr_stats(bp); + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); + /* Process any remaining notifications in default completion queue */ + bnxt_int_handler(eth_dev); + bnxt_shutdown_nic(bp); + bnxt_hwrm_if_change(bp, false); + + rte_free(bp->mark_table); + bp->mark_table = NULL; + + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + bp->rx_cosq_cnt = 0; + /* All filters are deleted on a port stop. */ + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count = 0; +} + +static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + + /* cancel the recovery handler before remove dev */ + rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); + rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); + bnxt_cancel_fc_thread(bp); + + if (eth_dev->data->dev_started) + bnxt_dev_stop_op(eth_dev); + + bnxt_uninit_resources(bp, false); + + bnxt_free_leds_info(bp); + bnxt_free_cos_queues(bp); + bnxt_free_link_info(bp); + bnxt_free_pf_info(bp); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + bp->rx_mem_zone = NULL; + + rte_free(bp->pf->vf_info); + bp->pf->vf_info = NULL; + + rte_free(bp->grp_info); + bp->grp_info = NULL; +} + +static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, + uint32_t index) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter, *temp_filter; + uint32_t i; + + if (is_bnxt_in_error(bp)) + return; + + /* + * Loop through all VNICs from the specified filter flow pools to + * remove the corresponding MAC addr filter + */ + for (i = 0; i < bp->nr_vnics; i++) { + if (!(pool_mask & (1ULL << i))) + continue; + + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + if (filter->mac_index == index) { + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_l2_filter(bp, filter); + bnxt_free_filter(bp, filter); + } + filter = temp_filter; + } + } +} + +static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, + struct rte_ether_addr *mac_addr, uint32_t index, + uint32_t pool) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + /* Attach requested MAC address to the new l2_filter */ + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->mac_index == index) { + PMD_DRV_LOG(DEBUG, + "MAC addr already existed for pool %d\n", + pool); + return 0; + } + } + + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); + return -ENODEV; + } + + /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, + * if the MAC that's been programmed now is a different one, then, + * copy that addr to filter->l2_addr + */ + if (mac_addr) + memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (!rc) { + filter->mac_index = index; + if (filter->mac_index == 0) + STAILQ_INSERT_HEAD(&vnic->filter, filter, next); + else + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + bnxt_free_filter(bp, filter); + } + + return rc; +} + +static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { + PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); + return -ENOTSUP; + } + + if (!vnic) { + PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); + return -EINVAL; + } + + /* Filter settings will get applied when port is started */ + if (!eth_dev->data->dev_started) + return 0; + + rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); + + return rc; +} + +int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete, + bool exp_link_status) +{ + int rc = 0; + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_eth_link new; + int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT : + BNXT_LINK_DOWN_WAIT_CNT; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + memset(&new, 0, sizeof(new)); + do { + /* Retrieve link info from hardware */ + rc = bnxt_get_hwrm_link_config(bp, &new); + if (rc) { + new.link_speed = ETH_LINK_SPEED_100M; + new.link_duplex = ETH_LINK_FULL_DUPLEX; + PMD_DRV_LOG(ERR, + "Failed to retrieve link rc = 0x%x!\n", rc); + goto out; + } + + if (!wait_to_complete || new.link_status == exp_link_status) + break; + + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); + } while (cnt--); + +out: + /* Timed out or success */ + if (new.link_status != eth_dev->data->dev_link.link_status || + new.link_speed != eth_dev->data->dev_link.link_speed) { + rte_eth_linkstatus_set(eth_dev, &new); + + _rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + + bnxt_print_link_info(eth_dev); + } + + return rc; +} + +static int bnxt_link_update_op(struct rte_eth_dev *eth_dev, + int wait_to_complete) +{ + return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP); +} + +static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Filter settings will get applied when port is started */ + if (!eth_dev->data->dev_started) + return 0; + + if (bp->vnic_info == NULL) + return 0; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + old_flags = vnic->flags; + vnic->flags |= BNXT_VNIC_INFO_PROMISC; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; +} + +static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Filter settings will get applied when port is started */ + if (!eth_dev->data->dev_started) + return 0; + + if (bp->vnic_info == NULL) + return 0; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + old_flags = vnic->flags; + vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; +} + +static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Filter settings will get applied when port is started */ + if (!eth_dev->data->dev_started) + return 0; + + if (bp->vnic_info == NULL) + return 0; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + old_flags = vnic->flags; + vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; +} + +static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Filter settings will get applied when port is started */ + if (!eth_dev->data->dev_started) + return 0; + + if (bp->vnic_info == NULL) + return 0; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + old_flags = vnic->flags; + vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; +} + +/* Return bnxt_rx_queue pointer corresponding to a given rxq. */ +static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) +{ + if (qid >= bp->rx_nr_rings) + return NULL; + + return bp->eth_dev->data->rx_queues[qid]; +} + +/* Return rxq corresponding to a given rss table ring/group ID. */ +static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) +{ + struct bnxt_rx_queue *rxq; + unsigned int i; + + if (!BNXT_HAS_RING_GRPS(bp)) { + for (i = 0; i < bp->rx_nr_rings; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) + return rxq->index; + } + } else { + for (i = 0; i < bp->rx_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == fwr) + return i; + } + } + + return INVALID_HW_RING_ID; +} + +static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); + uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); + uint16_t idx, sft; + int i, rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!vnic->rss_table) + return -EINVAL; + + if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + return -EINVAL; + + if (reta_size != tbl_size) { + PMD_DRV_LOG(ERR, "The configured hash table lookup size " + "(%d) must equal the size supported by the hardware " + "(%d)\n", reta_size, tbl_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + struct bnxt_rx_queue *rxq; + + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (!(reta_conf[idx].mask & (1ULL << sft))) + continue; + + rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); + if (!rxq) { + PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); + return -EINVAL; + } + + if (BNXT_CHIP_THOR(bp)) { + vnic->rss_table[i * 2] = + rxq->rx_ring->rx_ring_struct->fw_ring_id; + vnic->rss_table[i * 2 + 1] = + rxq->cp_ring->cp_ring_struct->fw_ring_id; + } else { + vnic->rss_table[i] = + vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; + } + } + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); + return 0; +} + +static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); + uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); + uint16_t idx, sft, i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Retrieve from the default VNIC */ + if (!vnic) + return -EINVAL; + if (!vnic->rss_table) + return -EINVAL; + + if (reta_size != tbl_size) { + PMD_DRV_LOG(ERR, "The configured hash table lookup size " + "(%d) must equal the size supported by the hardware " + "(%d)\n", reta_size, tbl_size); + return -EINVAL; + } + + for (idx = 0, i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (reta_conf[idx].mask & (1ULL << sft)) { + uint16_t qid; + + if (BNXT_CHIP_THOR(bp)) + qid = bnxt_rss_to_qid(bp, + vnic->rss_table[i * 2]); + else + qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); + + if (qid == INVALID_HW_RING_ID) { + PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); + return -EINVAL; + } + reta_conf[idx].reta[sft] = qid; + } + } + + return 0; +} + +static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* + * If RSS enablement were different than dev_configure, + * then return -EINVAL + */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (!rss_conf->rss_hf) + PMD_DRV_LOG(ERR, "Hash type NONE\n"); + } else { + if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) + return -EINVAL; + } + + bp->flags |= BNXT_FLAG_UPDATE_HASH; + memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, + rss_conf, + sizeof(*rss_conf)); + + /* Update the default RSS VNIC(s) */ + vnic = BNXT_GET_DEFAULT_VNIC(bp); + vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); + + /* + * If hashkey is not specified, use the previously configured + * hashkey + */ + if (!rss_conf->rss_key) + goto rss_config; + + if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { + PMD_DRV_LOG(ERR, + "Invalid hashkey length, should be 16 bytes\n"); + return -EINVAL; + } + memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); + +rss_config: + bnxt_hwrm_vnic_rss_cfg(bp, vnic); + return 0; +} + +static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); + int len, rc; + uint32_t hash_types; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* RSS configuration is the same for all VNICs */ + if (vnic && vnic->rss_hash_key) { + if (rss_conf->rss_key) { + len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? + rss_conf->rss_key_len : HW_HASH_KEY_SIZE; + memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); + } + + hash_types = vnic->hash_type; + rss_conf->rss_hf = 0; + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { + rss_conf->rss_hf |= ETH_RSS_IPV4; + hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { + rss_conf->rss_hf |= ETH_RSS_IPV6; + hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; + } + if (hash_types) { + PMD_DRV_LOG(ERR, + "Unknown RSS config from firmware (%08x), RSS disabled", + vnic->hash_type); + return -ENOTSUP; + } + } else { + rss_conf->rss_hf = 0; + } + return 0; +} + +static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_link link_info; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + rc = bnxt_get_hwrm_link_config(bp, &link_info); + if (rc) + return rc; + + memset(fc_conf, 0, sizeof(*fc_conf)); + if (bp->link_info->auto_pause) + fc_conf->autoneg = 1; + switch (bp->link_info->pause) { + case 0: + fc_conf->mode = RTE_FC_NONE; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | + HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): + fc_conf->mode = RTE_FC_FULL; + break; + } + return 0; +} + +static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { + PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); + return -ENOTSUP; + } + + switch (fc_conf->mode) { + case RTE_FC_NONE: + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = 0; + break; + case RTE_FC_RX_PAUSE: + if (fc_conf->autoneg) { + bp->link_info->auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; + bp->link_info->force_pause = 0; + } else { + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; + } + break; + case RTE_FC_TX_PAUSE: + if (fc_conf->autoneg) { + bp->link_info->auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; + bp->link_info->force_pause = 0; + } else { + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; + } + break; + case RTE_FC_FULL: + if (fc_conf->autoneg) { + bp->link_info->auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; + bp->link_info->force_pause = 0; + } else { + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; + } + break; + } + return bnxt_set_hwrm_link_config(bp, true); +} + +/* Add UDP tunneling port */ +static int +bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint16_t tunnel_type = 0; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (bp->vxlan_port_cnt) { + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->vxlan_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Only one port allowed\n"); + return -ENOSPC; + } + bp->vxlan_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; + bp->vxlan_port_cnt++; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (bp->geneve_port_cnt) { + PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", + udp_tunnel->udp_port); + if (bp->geneve_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Only one port allowed\n"); + return -ENOSPC; + } + bp->geneve_port_cnt++; + return 0; + } + tunnel_type = + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; + bp->geneve_port_cnt++; + break; + default: + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, + tunnel_type); + return rc; +} + +static int +bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint16_t tunnel_type = 0; + uint16_t port = 0; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (!bp->vxlan_port_cnt) { + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->vxlan_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->vxlan_port); + return -EINVAL; + } + if (--bp->vxlan_port_cnt) + return 0; + + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; + port = bp->vxlan_fw_dst_port_id; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (!bp->geneve_port_cnt) { + PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); + return -EINVAL; + } + if (bp->geneve_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", + udp_tunnel->udp_port, bp->geneve_port); + return -EINVAL; + } + if (--bp->geneve_port_cnt) + return 0; + + tunnel_type = + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; + port = bp->geneve_fw_dst_port_id; + break; + default: + PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); + return -ENOTSUP; + } + + rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); + if (!rc) { + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) + bp->vxlan_port = 0; + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) + bp->geneve_port = 0; + } + return rc; +} + +static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) +{ + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + int rc = 0; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + /* Search for this matching MAC+VLAN filter */ + if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { + /* Delete the filter */ + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + if (rc) + return rc; + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, filter); + PMD_DRV_LOG(INFO, + "Deleted vlan filter for %d\n", + vlan_id); + return 0; + } + filter = STAILQ_NEXT(filter, next); + } + return -ENOENT; +} + +static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) +{ + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + int rc = 0; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; + + /* Implementation notes on the use of VNIC in this command: + * + * By default, these filters belong to default vnic for the function. + * Once these filters are set up, only destination VNIC can be modified. + * If the destination VNIC is not specified in this command, + * then the HWRM shall only create an l2 context id. + */ + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + filter = STAILQ_FIRST(&vnic->filter); + /* Check if the VLAN has already been added */ + while (filter) { + if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) + return -EEXIST; + + filter = STAILQ_NEXT(filter, next); + } + + /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC + * command to create MAC+VLAN filter with the right flags, enables set. + */ + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, + "MAC/VLAN filter alloc failed\n"); + return -ENOMEM; + } + /* MAC + VLAN ID filter */ + /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only + * untagged packets are received + * + * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged + * packets and only the programmed vlan's packets are received + */ + filter->l2_ivlan = vlan_id; + filter->l2_ivlan_mask = 0x0FFF; + filter->enables |= en; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (rc) { + /* Free the newly allocated filter as we were + * not able to create the filter in hardware. + */ + bnxt_free_filter(bp, filter); + return rc; + } + + filter->mac_index = 0; + /* Add this new filter to the list */ + if (vlan_id == 0) + STAILQ_INSERT_HEAD(&vnic->filter, filter, next); + else + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + + PMD_DRV_LOG(INFO, + "Added Vlan filter for %d\n", vlan_id); + return rc; +} + +static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!eth_dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); + return -EINVAL; + } + + /* These operations apply to ALL existing MAC/VLAN filters */ + if (on) + return bnxt_add_vlan_filter(bp, vlan_id); + else + return bnxt_del_vlan_filter(bp, vlan_id); +} + +static int bnxt_del_dflt_mac_filter(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc; + + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + if (filter->mac_index == 0 && + !memcmp(filter->l2_addr, bp->mac_addr, + RTE_ETHER_ADDR_LEN)) { + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + if (!rc) { + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, filter); + } + return rc; + } + filter = STAILQ_NEXT(filter, next); + } + return 0; +} + +static int +bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) +{ + struct bnxt_vnic_info *vnic; + unsigned int i; + int rc; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { + /* Remove any VLAN filters programmed */ + for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) + bnxt_del_vlan_filter(bp, i); + + rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); + if (rc) + return rc; + } else { + /* Default filter will allow packets that match the + * dest mac. So, it has to be deleted, otherwise, we + * will endup receiving vlan packets for which the + * filter is not programmed, when hw-vlan-filter + * configuration is ON + */ + bnxt_del_dflt_mac_filter(bp, vnic); + /* This filter will allow only untagged packets */ + bnxt_add_vlan_filter(bp, 0); + } + PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); + + return 0; +} + +static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + unsigned int i; + int rc; + + /* Destroy vnic filters and vnic */ + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) { + for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) + bnxt_del_vlan_filter(bp, i); + } + bnxt_del_dflt_mac_filter(bp, vnic); + + rc = bnxt_hwrm_vnic_free(bp, vnic); + if (rc) + return rc; + + rte_free(vnic->fw_grp_ids); + vnic->fw_grp_ids = NULL; + + vnic->rx_queue_cnt = 0; + + return 0; +} + +static int +bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) +{ + struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); + int rc; + + /* Destroy, recreate and reconfigure the default vnic */ + rc = bnxt_free_one_vnic(bp, 0); + if (rc) + return rc; + + /* default vnic 0 */ + rc = bnxt_setup_one_vnic(bp, 0); + if (rc) + return rc; + + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) { + rc = bnxt_add_vlan_filter(bp, 0); + if (rc) + return rc; + rc = bnxt_restore_vlan_filters(bp); + if (rc) + return rc; + } else { + rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); + if (rc) + return rc; + } + + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc) + return rc; + + PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); + + return rc; +} + +static int +bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) +{ + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Filter settings will get applied when port is started */ + if (!dev->data->dev_started) + return 0; + + if (mask & ETH_VLAN_FILTER_MASK) { + /* Enable or disable VLAN filtering */ + rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); + if (rc) + return rc; + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); + if (rc) + return rc; + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); + else + PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); + } + + return 0; +} + +static int +bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct bnxt *bp = dev->data->dev_private; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + + if (vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + if (!qinq) { + PMD_DRV_LOG(ERR, + "QinQ not enabled. Needs to be ON as we can " + "accelerate only outer vlan\n"); + return -EINVAL; + } + + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + switch (tpid) { + case RTE_ETHER_TYPE_QINQ: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; + break; + case RTE_ETHER_TYPE_VLAN: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + break; + case 0x9100: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; + break; + case 0x9200: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; + break; + case 0x9300: + bp->outer_tpid_bd = + TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; + break; + default: + PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); + return -EINVAL; + } + bp->outer_tpid_bd |= tpid; + PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); + } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + PMD_DRV_LOG(ERR, + "Can accelerate only outer vlan in QinQ\n"); + return -EINVAL; + } + + return 0; +} + +static int +bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct bnxt *bp = dev->data->dev_private; + /* Default Filter is tied to VNIC 0 */ + struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return -EPERM; + + if (rte_is_zero_ether_addr(addr)) + return -EINVAL; + + /* Filter settings will get applied when port is started */ + if (!dev->data->dev_started) + return 0; + + /* Check if the requested MAC is already added */ + if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) + return 0; + + /* Destroy filter and re-create it */ + bnxt_del_dflt_mac_filter(bp, vnic); + + memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + /* This filter will allow only untagged packets */ + rc = bnxt_add_vlan_filter(bp, 0); + } else { + rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); + } + + PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); + return rc; +} + +static int +bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct bnxt *bp = eth_dev->data->dev_private; + char *mc_addr_list = (char *)mc_addr_set; + struct bnxt_vnic_info *vnic; + uint32_t off = 0, i = 0; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { + vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; + goto allmulti; + } + + /* TODO Check for Duplicate mcast addresses */ + vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; + for (i = 0; i < nb_mc_addr; i++) { + memcpy(vnic->mc_list + off, &mc_addr_list[i], + RTE_ETHER_ADDR_LEN); + off += RTE_ETHER_ADDR_LEN; + } + + vnic->mc_addr_cnt = i; + if (vnic->mc_addr_cnt) + vnic->flags |= BNXT_VNIC_INFO_MCAST; + else + vnic->flags &= ~BNXT_VNIC_INFO_MCAST; + +allmulti: + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); +} + +static int +bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct bnxt *bp = dev->data->dev_private; + uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; + uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; + uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; + uint8_t fw_rsvd = bp->fw_ver & 0xff; + int ret; + + ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", + fw_major, fw_minor, fw_updt, fw_rsvd); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static void +bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_rx_queue *rxq; + + if (is_bnxt_in_error(bp)) + return; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = 0; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +static void +bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_tx_queue *txq; + + if (is_bnxt_in_error(bp)) + return; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint32_t new_pkt_size; + uint32_t rc = 0; + uint32_t i; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Exit if receive queues are not configured yet */ + if (!eth_dev->data->nb_rx_queues) + return rc; + + new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE * BNXT_NUM_VLANS; + +#ifdef RTE_ARCH_X86 + /* + * If vector-mode tx/rx is active, disallow any MTU change that would + * require scattered receive support. + */ + if (eth_dev->data->dev_started && + (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec || + eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) && + (new_pkt_size > + eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_DRV_LOG(ERR, + "MTU change would require scattered rx support. "); + PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); + return -EINVAL; + } +#endif + + if (new_mtu > RTE_ETHER_MTU) { + bp->flags |= BNXT_FLAG_JUMBO; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + } else { + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + bp->flags &= ~BNXT_FLAG_JUMBO; + } + + /* Is there a change in mtu setting? */ + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) + return rc; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint16_t size = 0; + + vnic->mru = BNXT_VNIC_MRU(new_mtu); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + break; + + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + if (size < new_mtu) { + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } + } + + if (!rc) + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; + + PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); + + return rc; +} + +static int +bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct bnxt *bp = dev->data->dev_private; + uint16_t vlan = bp->vlan; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { + PMD_DRV_LOG(ERR, + "PVID cannot be modified for this function\n"); + return -ENOTSUP; + } + bp->vlan = on ? pvid : 0; + + rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); + if (rc) + bp->vlan = vlan; + return rc; +} + +static int +bnxt_dev_led_on_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + return bnxt_hwrm_port_led_cfg(bp, true); +} + +static int +bnxt_dev_led_off_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + return bnxt_hwrm_port_led_cfg(bp, false); +} + +static uint32_t +bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint32_t desc = 0, raw_cons = 0, cons; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + struct rx_pkt_cmpl *rxcmp; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + rxq = dev->data->rx_queues[rx_queue_id]; + cpr = rxq->cp_ring; + raw_cons = cpr->cp_raw_cons; + + while (1) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rte_prefetch0(&cpr->cp_desc_ring[cons]); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { + break; + } else { + raw_cons++; + desc++; + } + } + + return desc; +} + +static int +bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_pkt_cmpl *rxcmp; + uint32_t cons, cp_cons; + int rc; + + if (!rxq) + return -EINVAL; + + rc = is_bnxt_in_error(rxq->bp); + if (rc) + return rc; + + cpr = rxq->cp_ring; + rxr = rxq->rx_ring; + + if (offset >= rxq->nb_rx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + cp_cons = cpr->cp_raw_cons; + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (cons > cp_cons) { + if (CMPL_VALID(rxcmp, cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } else { + if (CMPL_VALID(rxcmp, !cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } + rx_buf = &rxr->rx_buf_ring[cons]; + if (rx_buf->mbuf == NULL) + return RTE_ETH_RX_DESC_UNAVAIL; + + + return RTE_ETH_RX_DESC_AVAIL; +} + +static int +bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + struct bnxt_tx_ring_info *txr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_pkt_cmpl *txcmp; + uint32_t cons, cp_cons; + int rc; + + if (!txq) + return -EINVAL; + + rc = is_bnxt_in_error(txq->bp); + if (rc) + return rc; + + cpr = txq->cp_ring; + txr = txq->tx_ring; + + if (offset >= txq->nb_tx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + cp_cons = cpr->cp_raw_cons; + + if (cons > cp_cons) { + if (CMPL_VALID(txcmp, cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } else { + if (CMPL_VALID(txcmp, !cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } + tx_buf = &txr->tx_buf_ring[cons]; + if (tx_buf->mbuf == NULL) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static struct bnxt_filter_info * +bnxt_match_and_validate_ether_filter(struct bnxt *bp, + struct rte_eth_ethertype_filter *efilter, + struct bnxt_vnic_info *vnic0, + struct bnxt_vnic_info *vnic, + int *ret) +{ + struct bnxt_filter_info *mfilter = NULL; + int match = 0; + *ret = 0; + + if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 || + efilter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in" + " ethertype filter.", efilter->ether_type); + *ret = -EINVAL; + goto exit; + } + if (efilter->queue >= bp->rx_nr_rings) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + vnic = &bp->vnic_info[efilter->queue]; + if (vnic == NULL) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + STAILQ_FOREACH(mfilter, &vnic0->filter, next) { + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && + mfilter->flags == + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && + mfilter->ethertype == efilter->ether_type)) { + match = 1; + break; + } + } + } else { + STAILQ_FOREACH(mfilter, &vnic->filter, next) + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, RTE_ETHER_ADDR_LEN) && + mfilter->ethertype == efilter->ether_type && + mfilter->flags == + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { + match = 1; + break; + } + } + + if (match) + *ret = -EEXIST; + +exit: + return mfilter; +} + +static int +bnxt_ethertype_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_ethertype_filter *efilter = + (struct rte_eth_ethertype_filter *)arg; + struct bnxt_filter_info *bfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + vnic = &bp->vnic_info[efilter->queue]; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret < 0) + return ret; + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, + RTE_ETHER_ADDR_LEN); + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + bfilter->ethertype = efilter->ether_type; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); + if (filter1 == NULL) { + ret = -EINVAL; + goto cleanup; + } + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + bfilter->dst_id = vnic->fw_vnic_id; + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + bfilter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + } + + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto cleanup; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + break; + case RTE_ETH_FILTER_DELETE: + filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret == -EEXIST) { + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); + + STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, + next); + bnxt_free_filter(bp, filter1); + } else if (ret == 0) { + PMD_DRV_LOG(ERR, "No matching filter found\n"); + } + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + goto error; + } + return ret; +cleanup: + bnxt_free_filter(bp, bfilter); +error: + return ret; +} + +static inline int +parse_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + struct bnxt_filter_info *bfilter) +{ + uint32_t en = 0; + + if (nfilter->queue >= bp->rx_nr_rings) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue); + return -EINVAL; + } + + switch (nfilter->dst_port_mask) { + case UINT16_MAX: + bfilter->dst_port_mask = -1; + bfilter->dst_port = nfilter->dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + + switch (nfilter->proto_mask) { + case UINT8_MAX: + if (nfilter->proto == 17) /* IPPROTO_UDP */ + bfilter->ip_protocol = 17; + else if (nfilter->proto == 6) /* IPPROTO_TCP */ + bfilter->ip_protocol = 6; + else + return -EINVAL; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + switch (nfilter->dst_ip_mask) { + case UINT32_MAX: + bfilter->dst_ipaddr_mask[0] = -1; + bfilter->dst_ipaddr[0] = nfilter->dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_ip_mask) { + case UINT32_MAX: + bfilter->src_ipaddr_mask[0] = -1; + bfilter->src_ipaddr[0] = nfilter->src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_port_mask) { + case UINT16_MAX: + bfilter->src_port_mask = -1; + bfilter->src_port = nfilter->src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + bfilter->enables = en; + return 0; +} + +static struct bnxt_filter_info* +bnxt_match_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *bfilter, + struct bnxt_vnic_info **mvnic) +{ + struct bnxt_filter_info *mfilter = NULL; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + STAILQ_FOREACH(mfilter, &vnic->filter, next) { + if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && + bfilter->src_ipaddr_mask[0] == + mfilter->src_ipaddr_mask[0] && + bfilter->src_port == mfilter->src_port && + bfilter->src_port_mask == mfilter->src_port_mask && + bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && + bfilter->dst_ipaddr_mask[0] == + mfilter->dst_ipaddr_mask[0] && + bfilter->dst_port == mfilter->dst_port && + bfilter->dst_port_mask == mfilter->dst_port_mask && + bfilter->flags == mfilter->flags && + bfilter->enables == mfilter->enables) { + if (mvnic) + *mvnic = vnic; + return mfilter; + } + } + } + return NULL; +} + +static int +bnxt_cfg_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + enum rte_filter_op filter_op) +{ + struct bnxt_filter_info *bfilter, *mfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0, *mvnic; + int ret; + + if (nfilter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n"); + return -EINVAL; + } + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + ret = parse_ntuple_filter(bp, nfilter, bfilter); + if (ret < 0) + goto free_filter; + + vnic = &bp->vnic_info[nfilter->queue]; + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + filter1 = STAILQ_FIRST(&vnic0->filter); + if (filter1 == NULL) { + ret = -EINVAL; + goto free_filter; + } + + bfilter->dst_id = vnic->fw_vnic_id; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->ethertype = 0x800; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); + + if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && + bfilter->dst_id == mfilter->dst_id) { + PMD_DRV_LOG(ERR, "filter exists.\n"); + ret = -EEXIST; + goto free_filter; + } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && + bfilter->dst_id != mfilter->dst_id) { + mfilter->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); + STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); + PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n"); + PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n"); + goto free_filter; + } + if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + } else { + if (mfilter == NULL) { + /* This should not happen. But for Coverity! */ + ret = -ENOENT; + goto free_filter; + } + ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); + + STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); + bnxt_free_filter(bp, mfilter); + bnxt_free_filter(bp, bfilter); + } + + return 0; +free_filter: + bnxt_free_filter(bp, bfilter); + return ret; +} + +static int +bnxt_ntuple_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + case RTE_ETH_FILTER_DELETE: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +bnxt_parse_fdir_filter(struct bnxt *bp, + struct rte_eth_fdir_filter *fdir, + struct bnxt_filter_info *filter) +{ + enum rte_fdir_mode fdir_mode = + bp->eth_dev->data->dev_conf.fdir_conf.mode; + struct bnxt_vnic_info *vnic0, *vnic; + struct bnxt_filter_info *filter1; + uint32_t en = 0; + int i; + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + return -EINVAL; + + filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + + switch (fdir->input.flow_type) { + case RTE_ETH_FLOW_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + /* FALLTHROUGH */ + filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = fdir->input.flow.ip4_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + filter->src_port = fdir->input.flow.tcp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 6; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + filter->src_port = fdir->input.flow.udp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 17; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + /* FALLTHROUGH */ + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.ipv6_flow.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.ipv6_flow.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + filter->src_port = fdir->input.flow.tcp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.tcp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.tcp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + filter->src_port = fdir->input.flow.udp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.udp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.udp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + filter->ethertype = fdir->input.flow.l2_flow.ether_type; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_VXLAN: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_NVGRE: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_UNKNOWN: + case RTE_ETH_FLOW_RAW: + case RTE_ETH_FLOW_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_IPV6_EX: + case RTE_ETH_FLOW_IPV6_TCP_EX: + case RTE_ETH_FLOW_IPV6_UDP_EX: + case RTE_ETH_FLOW_GENEVE: + /* FALLTHROUGH */ + default: + return -EINVAL; + } + + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + vnic = &bp->vnic_info[fdir->action.rx_queue]; + if (vnic == NULL) { + PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue); + return -EINVAL; + } + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy(filter->dst_macaddr, + fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + } + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + filter1 = STAILQ_FIRST(&vnic0->filter); + //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + } else { + filter->dst_id = vnic->fw_vnic_id; + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + if (filter->dst_macaddr[i] == 0x00) + filter1 = STAILQ_FIRST(&vnic0->filter); + else + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + } + + if (filter1 == NULL) + return -EINVAL; + + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + filter->enables = en; + + return 0; +} + +static struct bnxt_filter_info * +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info **mvnic) +{ + struct bnxt_filter_info *mf = NULL; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(mf, &vnic->filter, next) { + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) { + if (mvnic) + *mvnic = vnic; + return mf; + } + } + } + return NULL; +} + +static int +bnxt_fdir_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; + struct bnxt_filter_info *filter, *match; + struct bnxt_vnic_info *vnic, *mvnic; + int ret = 0, i; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + case RTE_ETH_FILTER_DELETE: + /* FALLTHROUGH */ + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, + "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_parse_fdir_filter(bp, fdir, filter); + if (ret != 0) + goto free_filter; + filter->filter_type = HWRM_CFA_NTUPLE_FILTER; + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = &bp->vnic_info[0]; + else + vnic = &bp->vnic_info[fdir->action.rx_queue]; + + match = bnxt_match_fdir(bp, filter, &mvnic); + if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { + if (match->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(ERR, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } else { + match->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, + match->dst_id, + match); + STAILQ_REMOVE(&mvnic->filter, match, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, match, next); + PMD_DRV_LOG(ERR, + "Filter with matching pattern exist\n"); + PMD_DRV_LOG(ERR, + "Updated it to new destination q\n"); + goto free_filter; + } + } + if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + PMD_DRV_LOG(ERR, "Flow does not exist.\n"); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + ret = bnxt_hwrm_set_ntuple_filter(bp, + filter->dst_id, + filter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + ret = bnxt_hwrm_clear_ntuple_filter(bp, match); + STAILQ_REMOVE(&vnic->filter, match, + bnxt_filter_info, next); + bnxt_free_filter(bp, match); + bnxt_free_filter(bp, filter); + } + break; + case RTE_ETH_FILTER_FLUSH: + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->filter_type == + HWRM_CFA_NTUPLE_FILTER) { + ret = + bnxt_hwrm_clear_ntuple_filter(bp, + filter); + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + } + } + } + return ret; + case RTE_ETH_FILTER_UPDATE: + case RTE_ETH_FILTER_STATS: + case RTE_ETH_FILTER_INFO: + PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; + +free_filter: + bnxt_free_filter(bp, filter); + return ret; +} + +static int +bnxt_filter_ctrl_op(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + struct bnxt *bp = dev->data->dev_private; + int ret = 0; + + ret = is_bnxt_in_error(dev->data->dev_private); + if (ret) + return ret; + + switch (filter_type) { + case RTE_ETH_FILTER_TUNNEL: + PMD_DRV_LOG(ERR, + "filter type: %d: To be implemented\n", filter_type); + break; + case RTE_ETH_FILTER_FDIR: + ret = bnxt_fdir_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_NTUPLE: + ret = bnxt_ntuple_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = bnxt_ethertype_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + if (BNXT_TRUFLOW_EN(bp)) + *(const void **)arg = &bnxt_ulp_rte_flow_ops; + else + *(const void **)arg = &bnxt_flow_ops; + break; + default: + PMD_DRV_LOG(ERR, + "Filter type (%d) not supported", filter_type); + ret = -EINVAL; + break; + } + return ret; +} + +static const uint32_t * +bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (!dev->rx_pkt_burst) + return NULL; + + return ptypes; +} + +static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, + int reg_win) +{ + uint32_t reg_base = *reg_arr & 0xfffff000; + uint32_t win_off; + int i; + + for (i = 0; i < count; i++) { + if ((reg_arr[i] & 0xfffff000) != reg_base) + return -ERANGE; + } + win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; + rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); + return 0; +} + +static int bnxt_map_ptp_regs(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t *reg_arr; + int rc, i; + + reg_arr = ptp->rx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); + if (rc) + return rc; + + reg_arr = ptp->tx_regs; + rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); + if (rc) + return rc; + + for (i = 0; i < BNXT_PTP_RX_REGS; i++) + ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); + + for (i = 0; i < BNXT_PTP_TX_REGS; i++) + ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); + + return 0; +} + +static void bnxt_unmap_ptp_regs(struct bnxt *bp) +{ + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); + rte_write32(0, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); +} + +static uint64_t bnxt_cc_read(struct bnxt *bp) +{ + uint64_t ns; + + ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_SYNC_TIME)); + ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; + return ns; +} + +static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t fifo; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); + if (fifo & BNXT_PTP_TX_FIFO_EMPTY) + return -EAGAIN; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); + *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); + *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; + + return 0; +} + +static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pf_info *pf = bp->pf; + uint16_t port_id; + uint32_t fifo; + + if (!ptp) + return -ENODEV; + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); + if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) + return -EAGAIN; + + port_id = pf->port_id; + rte_write32(1 << port_id, (uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); + + fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); + if (fifo & BNXT_PTP_RX_FIFO_PENDING) { +/* bnxt_clr_rx_ts(bp); TBD */ + return -EBUSY; + } + + *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); + *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; + + return 0; +} + +static int +bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + ptp->tc.nsec = ns; + + return 0; +} + +static int +bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t ns, systime_cycles = 0; + int rc = 0; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, + &systime_cycles); + else + systime_cycles = bnxt_cc_read(bp); + + ns = rte_timecounter_update(&ptp->tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return rc; +} +static int +bnxt_timesync_enable(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t shift = 0; + int rc; + + if (!ptp) + return 0; + + ptp->rx_filter = 1; + ptp->tx_tstamp_en = 1; + ptp->rxctl = BNXT_PTP_MSG_EVENTS; + + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + return rc; + + memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); + memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->tc.cc_shift = shift; + ptp->tc.nsec_mask = (1ULL << shift) - 1; + + ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->rx_tstamp_tc.cc_shift = shift; + ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; + ptp->tx_tstamp_tc.cc_shift = shift; + ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + if (!BNXT_CHIP_THOR(bp)) + bnxt_map_ptp_regs(bp); + + return 0; +} + +static int +bnxt_timesync_disable(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ptp->rx_filter = 0; + ptp->tx_tstamp_en = 0; + ptp->rxctl = 0; + + bnxt_hwrm_ptp_cfg(bp); + + if (!BNXT_CHIP_THOR(bp)) + bnxt_unmap_ptp_regs(bp); + + return 0; +} + +static int +bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t rx_tstamp_cycles = 0; + uint64_t ns; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rx_tstamp_cycles = ptp->rx_timestamp; + else + bnxt_get_rx_ts(bp, &rx_tstamp_cycles); + + ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + return 0; +} + +static int +bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint64_t tx_tstamp_cycles = 0; + uint64_t ns; + int rc = 0; + + if (!ptp) + return 0; + + if (BNXT_CHIP_THOR(bp)) + rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, + &tx_tstamp_cycles); + else + rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); + + ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return rc; +} + +static int +bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return 0; + + ptp->tc.nsec += delta; + + return 0; +} + +static int +bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = dev->data->dev_private; + int rc; + uint32_t dir_entries; + uint32_t entry_length; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + return dir_entries * entry_length; +} + +static int +bnxt_get_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = dev->data->dev_private; + uint32_t index; + uint32_t offset; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function, + in_eeprom->offset, in_eeprom->length); + + if (in_eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(bp, in_eeprom->length, + in_eeprom->data); + + index = in_eeprom->offset >> 24; + offset = in_eeprom->offset & 0xffffff; + + if (index != 0) + return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, + in_eeprom->length, in_eeprom->data); + + return 0; +} + +static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + /* FALLTHROUGH */ + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + /* FALLTHROUGH */ + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(uint16_t dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_other_exec_format(dir_type); +} + +static int +bnxt_set_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = dev->data->dev_private; + uint8_t index, dir_op; + uint16_t type, ext, ordinal, attr; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function, + in_eeprom->offset, in_eeprom->length); + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); + return -EINVAL; + } + + type = in_eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = in_eeprom->magic & 0xff; + dir_op = in_eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (in_eeprom->offset != ~in_eeprom->magic) + return -EINVAL; + return bnxt_hwrm_erase_nvram_directory(bp, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EOPNOTSUPP; + ext = in_eeprom->magic & 0xffff; + ordinal = in_eeprom->offset >> 16; + attr = in_eeprom->offset & 0xffff; + + return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, + in_eeprom->data, in_eeprom->length); +} + +/* + * Initialization + */ + +static const struct eth_dev_ops bnxt_dev_ops = { + .dev_infos_get = bnxt_dev_info_get_op, + .dev_close = bnxt_dev_close_op, + .dev_configure = bnxt_dev_configure_op, + .dev_start = bnxt_dev_start_op, + .dev_stop = bnxt_dev_stop_op, + .dev_set_link_up = bnxt_dev_set_link_up_op, + .dev_set_link_down = bnxt_dev_set_link_down_op, + .stats_get = bnxt_stats_get_op, + .stats_reset = bnxt_stats_reset_op, + .rx_queue_setup = bnxt_rx_queue_setup_op, + .rx_queue_release = bnxt_rx_queue_release_op, + .tx_queue_setup = bnxt_tx_queue_setup_op, + .tx_queue_release = bnxt_tx_queue_release_op, + .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, + .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, + .reta_update = bnxt_reta_update_op, + .reta_query = bnxt_reta_query_op, + .rss_hash_update = bnxt_rss_hash_update_op, + .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, + .link_update = bnxt_link_update_op, + .promiscuous_enable = bnxt_promiscuous_enable_op, + .promiscuous_disable = bnxt_promiscuous_disable_op, + .allmulticast_enable = bnxt_allmulticast_enable_op, + .allmulticast_disable = bnxt_allmulticast_disable_op, + .mac_addr_add = bnxt_mac_addr_add_op, + .mac_addr_remove = bnxt_mac_addr_remove_op, + .flow_ctrl_get = bnxt_flow_ctrl_get_op, + .flow_ctrl_set = bnxt_flow_ctrl_set_op, + .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, + .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, + .vlan_filter_set = bnxt_vlan_filter_set_op, + .vlan_offload_set = bnxt_vlan_offload_set_op, + .vlan_tpid_set = bnxt_vlan_tpid_set_op, + .vlan_pvid_set = bnxt_vlan_pvid_set_op, + .mtu_set = bnxt_mtu_set_op, + .mac_addr_set = bnxt_set_default_mac_addr_op, + .xstats_get = bnxt_dev_xstats_get_op, + .xstats_get_names = bnxt_dev_xstats_get_names_op, + .xstats_reset = bnxt_dev_xstats_reset_op, + .fw_version_get = bnxt_fw_version_get, + .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, + .rxq_info_get = bnxt_rxq_info_get_op, + .txq_info_get = bnxt_txq_info_get_op, + .dev_led_on = bnxt_dev_led_on_op, + .dev_led_off = bnxt_dev_led_off_op, + .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, + .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, + .rx_queue_count = bnxt_rx_queue_count_op, + .rx_descriptor_status = bnxt_rx_descriptor_status_op, + .tx_descriptor_status = bnxt_tx_descriptor_status_op, + .rx_queue_start = bnxt_rx_queue_start, + .rx_queue_stop = bnxt_rx_queue_stop, + .tx_queue_start = bnxt_tx_queue_start, + .tx_queue_stop = bnxt_tx_queue_stop, + .filter_ctrl = bnxt_filter_ctrl_op, + .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, + .get_eeprom_length = bnxt_get_eeprom_length_op, + .get_eeprom = bnxt_get_eeprom_op, + .set_eeprom = bnxt_set_eeprom_op, + .timesync_enable = bnxt_timesync_enable, + .timesync_disable = bnxt_timesync_disable, + .timesync_read_time = bnxt_timesync_read_time, + .timesync_write_time = bnxt_timesync_write_time, + .timesync_adjust_time = bnxt_timesync_adjust_time, + .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, +}; + +static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) +{ + uint32_t offset; + + /* Only pre-map the reset GRC registers using window 3 */ + rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); + + offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); + + return offset; +} + +int bnxt_map_fw_health_status_regs(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t reg_base = 0xffffffff; + int i; + + /* Only pre-map the monitoring GRC registers using window 2 */ + for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { + uint32_t reg = info->status_regs[i]; + + if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) + continue; + + if (reg_base == 0xffffffff) + reg_base = reg & 0xfffff000; + if ((reg & 0xfffff000) != reg_base) + return -ERANGE; + + /* Use mask 0xffc as the Lower 2 bits indicates + * address space location + */ + info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + + (reg & 0xffc); + } + + if (reg_base == 0xffffffff) + return 0; + + rte_write32(reg_base, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + + return 0; +} + +static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t delay = info->delay_after_reset[index]; + uint32_t val = info->reset_reg_val[index]; + uint32_t reg = info->reset_reg[index]; + uint32_t type, offset; + + type = BNXT_FW_STATUS_REG_TYPE(reg); + offset = BNXT_FW_STATUS_REG_OFF(reg); + + switch (type) { + case BNXT_FW_STATUS_REG_TYPE_CFG: + rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); + break; + case BNXT_FW_STATUS_REG_TYPE_GRC: + offset = bnxt_map_reset_regs(bp, offset); + rte_write32(val, (uint8_t *)bp->bar0 + offset); + break; + case BNXT_FW_STATUS_REG_TYPE_BAR0: + rte_write32(val, (uint8_t *)bp->bar0 + offset); + break; + } + /* wait on a specific interval of time until core reset is complete */ + if (delay) + rte_delay_ms(delay); +} + +static void bnxt_dev_cleanup(struct bnxt *bp) +{ + bnxt_set_hwrm_link_config(bp, false); + bp->link_info->link_up = 0; + if (bp->eth_dev->data->dev_started) + bnxt_dev_stop_op(bp->eth_dev); + + bnxt_uninit_resources(bp, true); +} + +static int bnxt_restore_vlan_filters(struct bnxt *bp) +{ + struct rte_eth_dev *dev = bp->eth_dev; + struct rte_vlan_filter_conf *vfc; + int vidx, vbit, rc; + uint16_t vlan_id; + + for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { + vfc = &dev->data->vlan_filter_conf; + vidx = vlan_id / 64; + vbit = vlan_id % 64; + + /* Each bit corresponds to a VLAN id */ + if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { + rc = bnxt_add_vlan_filter(bp, vlan_id); + if (rc) + return rc; + } + } + + return 0; +} + +static int bnxt_restore_mac_filters(struct bnxt *bp) +{ + struct rte_eth_dev *dev = bp->eth_dev; + struct rte_eth_dev_info dev_info; + struct rte_ether_addr *addr; + uint64_t pool_mask; + uint32_t pool = 0; + uint16_t i; + int rc; + + if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) + return 0; + + rc = bnxt_dev_info_get_op(dev, &dev_info); + if (rc) + return rc; + + /* replay MAC address configuration */ + for (i = 1; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (rte_is_zero_ether_addr(addr)) + continue; + + pool = 0; + pool_mask = dev->data->mac_pool_sel[i]; + + do { + if (pool_mask & 1ULL) { + rc = bnxt_mac_addr_add_op(dev, addr, i, pool); + if (rc) + return rc; + } + pool_mask >>= 1; + pool++; + } while (pool_mask); + } + + return 0; +} + +static int bnxt_restore_filters(struct bnxt *bp) +{ + struct rte_eth_dev *dev = bp->eth_dev; + int ret = 0; + + if (dev->data->all_multicast) { + ret = bnxt_allmulticast_enable_op(dev); + if (ret) + return ret; + } + if (dev->data->promiscuous) { + ret = bnxt_promiscuous_enable_op(dev); + if (ret) + return ret; + } + + ret = bnxt_restore_mac_filters(bp); + if (ret) + return ret; + + ret = bnxt_restore_vlan_filters(bp); + /* TODO restore other filters as well */ + return ret; +} + +static void bnxt_dev_recover(void *arg) +{ + struct bnxt *bp = arg; + int timeout = bp->fw_reset_max_msecs; + int rc = 0; + + /* Clear Error flag so that device re-init should happen */ + bp->flags &= ~BNXT_FLAG_FATAL_ERROR; + + do { + rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT); + if (rc == 0) + break; + rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); + timeout -= BNXT_FW_READY_WAIT_INTERVAL; + } while (rc && timeout); + + if (rc) { + PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); + goto err; + } + + rc = bnxt_init_resources(bp, true); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize resources after reset\n"); + goto err; + } + /* clear reset flag as the device is initialized now */ + bp->flags &= ~BNXT_FLAG_FW_RESET; + + rc = bnxt_dev_start_op(bp->eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); + goto err_start; + } + + rc = bnxt_restore_filters(bp); + if (rc) + goto err_start; + + PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); + return; +err_start: + bnxt_dev_stop_op(bp->eth_dev); +err: + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bnxt_uninit_resources(bp, false); + PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); +} + +void bnxt_dev_reset_and_resume(void *arg) +{ + struct bnxt *bp = arg; + int rc; + + bnxt_dev_cleanup(bp); + + bnxt_wait_for_device_shutdown(bp); + + rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, + bnxt_dev_recover, (void *)bp); + if (rc) + PMD_DRV_LOG(ERR, "Error setting recovery alarm"); +} + +uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t reg = info->status_regs[index]; + uint32_t type, offset, val = 0; + + type = BNXT_FW_STATUS_REG_TYPE(reg); + offset = BNXT_FW_STATUS_REG_OFF(reg); + + switch (type) { + case BNXT_FW_STATUS_REG_TYPE_CFG: + rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); + break; + case BNXT_FW_STATUS_REG_TYPE_GRC: + offset = info->mapped_status_regs[index]; + /* FALLTHROUGH */ + case BNXT_FW_STATUS_REG_TYPE_BAR0: + val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + offset)); + break; + } + + return val; +} + +static int bnxt_fw_reset_all(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t i; + int rc = 0; + + if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { + /* Reset through master function driver */ + for (i = 0; i < info->reg_array_cnt; i++) + bnxt_write_fw_reset_reg(bp, i); + /* Wait for time specified by FW after triggering reset */ + rte_delay_ms(info->master_func_wait_period_after_reset); + } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { + /* Reset with the help of Kong processor */ + rc = bnxt_hwrm_fw_reset(bp); + if (rc) + PMD_DRV_LOG(ERR, "Failed to reset FW\n"); + } + + return rc; +} + +static void bnxt_fw_reset_cb(void *arg) +{ + struct bnxt *bp = arg; + struct bnxt_error_recovery_info *info = bp->recovery_info; + int rc = 0; + + /* Only Master function can do FW reset */ + if (bnxt_is_master_func(bp) && + bnxt_is_recovery_enabled(bp)) { + rc = bnxt_fw_reset_all(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); + return; + } + } + + /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send + * EXCEPTION_FATAL_ASYNC event to all the functions + * (including MASTER FUNC). After receiving this Async, all the active + * drivers should treat this case as FW initiated recovery + */ + if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { + bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; + bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; + + /* To recover from error */ + rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, + (void *)bp); + } +} + +/* Driver should poll FW heartbeat, reset_counter with the frequency + * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. + * When the driver detects heartbeat stop or change in reset_counter, + * it has to trigger a reset to recover from the error condition. + * A “master PF” is the function who will have the privilege to + * initiate the chimp reset. The master PF will be elected by the + * firmware and will be notified through async message. + */ +static void bnxt_check_fw_health(void *arg) +{ + struct bnxt *bp = arg; + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t val = 0, wait_msec; + + if (!info || !bnxt_is_recovery_enabled(bp) || + is_bnxt_in_error(bp)) + return; + + val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); + if (val == info->last_heart_beat) + goto reset; + + info->last_heart_beat = val; + + val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); + if (val != info->last_reset_counter) + goto reset; + + info->last_reset_counter = val; + + rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, + bnxt_check_fw_health, (void *)bp); + + return; +reset: + /* Stop DMA to/from device */ + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bp->flags |= BNXT_FLAG_FW_RESET; + + PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); + + if (bnxt_is_master_func(bp)) + wait_msec = info->master_func_wait_period; + else + wait_msec = info->normal_func_wait_period; + + rte_eal_alarm_set(US_PER_MS * wait_msec, + bnxt_fw_reset_cb, (void *)bp); +} + +void bnxt_schedule_fw_health_check(struct bnxt *bp) +{ + uint32_t polling_freq; + + if (!bnxt_is_recovery_enabled(bp)) + return; + + if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) + return; + + polling_freq = bp->recovery_info->driver_polling_freq; + + rte_eal_alarm_set(US_PER_MS * polling_freq, + bnxt_check_fw_health, (void *)bp); + bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; +} + +static void bnxt_cancel_fw_health_check(struct bnxt *bp) +{ + if (!bnxt_is_recovery_enabled(bp)) + return; + + rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); + bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; +} + +static bool bnxt_vf_pciid(uint16_t device_id) +{ + switch (device_id) { + case BROADCOM_DEV_ID_57304_VF: + case BROADCOM_DEV_ID_57406_VF: + case BROADCOM_DEV_ID_5731X_VF: + case BROADCOM_DEV_ID_5741X_VF: + case BROADCOM_DEV_ID_57414_VF: + case BROADCOM_DEV_ID_STRATUS_NIC_VF1: + case BROADCOM_DEV_ID_STRATUS_NIC_VF2: + case BROADCOM_DEV_ID_58802_VF: + case BROADCOM_DEV_ID_57500_VF1: + case BROADCOM_DEV_ID_57500_VF2: + /* FALLTHROUGH */ + return true; + default: + return false; + } +} + +static bool bnxt_thor_device(uint16_t device_id) +{ + switch (device_id) { + case BROADCOM_DEV_ID_57508: + case BROADCOM_DEV_ID_57504: + case BROADCOM_DEV_ID_57502: + case BROADCOM_DEV_ID_57508_MF1: + case BROADCOM_DEV_ID_57504_MF1: + case BROADCOM_DEV_ID_57502_MF1: + case BROADCOM_DEV_ID_57508_MF2: + case BROADCOM_DEV_ID_57504_MF2: + case BROADCOM_DEV_ID_57502_MF2: + case BROADCOM_DEV_ID_57500_VF1: + case BROADCOM_DEV_ID_57500_VF2: + /* FALLTHROUGH */ + return true; + default: + return false; + } +} + +bool bnxt_stratus_device(struct bnxt *bp) +{ + uint16_t device_id = bp->pdev->id.device_id; + + switch (device_id) { + case BROADCOM_DEV_ID_STRATUS_NIC: + case BROADCOM_DEV_ID_STRATUS_NIC_VF1: + case BROADCOM_DEV_ID_STRATUS_NIC_VF2: + /* FALLTHROUGH */ + return true; + default: + return false; + } +} + +static int bnxt_init_board(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct bnxt *bp = eth_dev->data->dev_private; + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + bp->bar0 = (void *)pci_dev->mem_resource[0].addr; + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + if (!bp->bar0 || !bp->doorbell_base) { + PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); + return -ENODEV; + } + + bp->eth_dev = eth_dev; + bp->pdev = pci_dev; + + return 0; +} + +static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, + uint32_t mem_size, + const char *suffix, + uint16_t idx) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + const struct rte_memzone *mz = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + uint64_t valid_bits = 0; + uint32_t sz; + int i; + + if (!mem_size) + return 0; + + rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / + BNXT_PAGE_SIZE; + rmem->page_size = BNXT_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + + valid_bits = PTU_PTE_VALID; + + if (rmem->nr_pages > 1) { + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve_aligned(mz_name, + rmem->nr_pages * 8, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + BNXT_PAGE_SIZE); + if (mz == NULL) + return -ENOMEM; + } + + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + + rmem->pg_tbl = mz->addr; + rmem->pg_tbl_map = mz_phys_addr; + rmem->pg_tbl_mz = mz; + } + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve_aligned(mz_name, + mem_size, + SOCKET_ID_ANY, + RTE_MEMZONE_1GB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + BNXT_PAGE_SIZE); + if (mz == NULL) + return -ENOMEM; + } + + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + + for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { + rmem->pg_arr[i] = ((char *)mz->addr) + sz; + rmem->dma_arr[i] = mz_phys_addr + sz; + + if (rmem->nr_pages > 1) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + valid_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + valid_bits |= PTU_PTE_LAST; + + rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | + valid_bits); + } + } + + rmem->mz = mz; + if (rmem->vmem_size) + rmem->vmem = (void **)mz->addr; + rmem->dma_arr[0] = mz_phys_addr; + return 0; +} + +static void bnxt_free_ctx_mem(struct bnxt *bp) +{ + int i; + + if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) + return; + + bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); + rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); + + for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { + if (bp->ctx->tqm_mem[i]) + rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); + } + + rte_free(bp->ctx); + bp->ctx = NULL; +} + +#define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) + +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1 : __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) + +int bnxt_alloc_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + uint32_t mem_size, ena, entries; + uint32_t entries_sp, min; + int i, rc; + + rc = bnxt_hwrm_func_backing_store_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); + return rc; + } + ctx = bp->ctx; + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) + return 0; + + ctx_pg = &ctx->qp_mem; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; + mem_size = ctx->qp_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->srq_mem; + ctx_pg->entries = ctx->srq_max_l2_entries; + mem_size = ctx->srq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->cq_mem; + ctx_pg->entries = ctx->cq_max_l2_entries; + mem_size = ctx->cq_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->vnic_mem; + ctx_pg->entries = ctx->vnic_max_vnic_entries + + ctx->vnic_max_ring_table_entries; + mem_size = ctx->vnic_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); + if (rc) + return rc; + + ctx_pg = &ctx->stat_mem; + ctx_pg->entries = ctx->stat_max_entries; + mem_size = ctx->stat_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); + if (rc) + return rc; + + min = ctx->tqm_min_entries_per_ring; + + entries_sp = ctx->qp_max_l2_entries + + ctx->vnic_max_vnic_entries + + 2 * ctx->qp_min_qp1_entries + min; + entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); + + entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; + entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); + entries = clamp_t(uint32_t, entries, min, + ctx->tqm_max_entries_per_ring); + for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { + ctx_pg = ctx->tqm_mem[i]; + ctx_pg->entries = i ? entries : entries_sp; + mem_size = ctx->tqm_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); + if (rc) + return rc; + ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; + } + + ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); + if (rc) + PMD_DRV_LOG(ERR, + "Failed to configure context mem: rc = %d\n", rc); + else + ctx->flags |= BNXT_CTX_FLAG_INITED; + + return rc; +} + +static int bnxt_alloc_stats_mem(struct bnxt *bp) +{ + struct rte_pci_device *pci_dev = bp->pdev; + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; + uint32_t total_alloc_len; + rte_iova_t mz_phys_addr; + + if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) + return 0; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "rx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = + RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + + sizeof(struct rx_port_stats_ext) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + + bp->rx_mem_zone = (const void *)mz; + bp->hw_rx_port_stats = mz->addr; + bp->hw_rx_port_stats_map = mz_phys_addr; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function, "tx_port_stats"); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + total_alloc_len = + RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + + sizeof(struct tx_port_stats_ext) + 512); + if (!mz) { + mz = rte_memzone_reserve(mz_name, + total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + + bp->tx_mem_zone = (const void *)mz; + bp->hw_tx_port_stats = mz->addr; + bp->hw_tx_port_stats_map = mz_phys_addr; + bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics if FW supports it */ + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || + bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || + !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) + return 0; + + bp->hw_rx_port_stats_ext = (void *) + ((uint8_t *)bp->hw_rx_port_stats + + sizeof(struct rx_port_stats)); + bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats); + bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; + + if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || + bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { + bp->hw_tx_port_stats_ext = (void *) + ((uint8_t *)bp->hw_tx_port_stats + + sizeof(struct tx_port_stats)); + bp->hw_tx_port_stats_ext_map = + bp->hw_tx_port_stats_map + + sizeof(struct tx_port_stats); + bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; + } + + return 0; +} + +static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int rc = 0; + + eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", + RTE_ETHER_ADDR_LEN * + bp->max_l2_ctx, + 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); + return -ENOMEM; + } + + if (!BNXT_HAS_DFLT_MAC_SET(bp)) { + if (BNXT_PF(bp)) + return -EINVAL; + + /* Generate a random MAC address, if none was assigned by PF */ + PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); + bnxt_eth_hw_addr_random(bp->mac_addr); + PMD_DRV_LOG(INFO, + "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", + bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], + bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); + + rc = bnxt_hwrm_set_mac(bp); + if (rc) + return rc; + } + + /* Copy the permanent MAC from the FUNC_QCAPS response */ + memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); + + return rc; +} + +static int bnxt_restore_dflt_mac(struct bnxt *bp) +{ + int rc = 0; + + /* MAC is already configured in FW */ + if (BNXT_HAS_DFLT_MAC_SET(bp)) + return 0; + + /* Restore the old MAC configured */ + rc = bnxt_hwrm_set_mac(bp); + if (rc) + PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); + + return rc; +} + +static void bnxt_config_vf_req_fwd(struct bnxt *bp) +{ + if (!BNXT_PF(bp)) + return; + +#define ALLOW_FUNC(x) \ + { \ + uint32_t arg = (x); \ + bp->pf->vf_req_fwd[((arg) >> 5)] &= \ + ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ + } + + /* Forward all requests if firmware is new enough */ + if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && + (bp->fw_ver < ((20 << 24) | (7 << 16)))) || + ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { + memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd)); + } else { + PMD_DRV_LOG(WARNING, + "Firmware too old for VF mailbox functionality\n"); + memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); + } + + /* + * The following are used for driver cleanup. If we disallow these, + * VF drivers can't clean up cleanly. + */ + ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); + ALLOW_FUNC(HWRM_VNIC_FREE); + ALLOW_FUNC(HWRM_RING_FREE); + ALLOW_FUNC(HWRM_RING_GRP_FREE); + ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); + ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); + ALLOW_FUNC(HWRM_STAT_CTX_FREE); + ALLOW_FUNC(HWRM_PORT_PHY_QCFG); + ALLOW_FUNC(HWRM_VNIC_TPA_CFG); +} + +uint16_t +bnxt_get_svif(uint16_t port_id, bool func_svif) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + + eth_dev = &rte_eth_devices[port_id]; + bp = eth_dev->data->dev_private; + + return func_svif ? bp->func_svif : bp->port_svif; +} + +uint16_t +bnxt_get_vnic_id(uint16_t port) +{ + struct rte_eth_dev *eth_dev; + struct bnxt_vnic_info *vnic; + struct bnxt *bp; + + eth_dev = &rte_eth_devices[port]; + bp = eth_dev->data->dev_private; + + vnic = BNXT_GET_DEFAULT_VNIC(bp); + + return vnic->fw_vnic_id; +} + +uint16_t +bnxt_get_fw_func_id(uint16_t port) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + + eth_dev = &rte_eth_devices[port]; + bp = eth_dev->data->dev_private; + + return bp->fw_fid; +} + +static void bnxt_alloc_error_recovery_info(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + + if (info) { + if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) + memset(info, 0, sizeof(*info)); + return; + } + + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + if (!info) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + + bp->recovery_info = info; +} + +static void bnxt_check_fw_status(struct bnxt *bp) +{ + uint32_t fw_status; + + if (!(bp->recovery_info && + (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) + return; + + fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); + if (fw_status != BNXT_FW_STATUS_HEALTHY) + PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", + fw_status); +} + +static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t status_loc; + uint32_t sig_ver; + + rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCP_WINDOW_2_BASE + + offsetof(struct hcomm_status, + sig_ver))); + /* If the signature is absent, then FW does not support this feature */ + if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != + HCOMM_STATUS_SIGNATURE_VAL) + return 0; + + if (!info) { + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + if (!info) + return -ENOMEM; + bp->recovery_info = info; + } else { + memset(info, 0, sizeof(*info)); + } + + status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCP_WINDOW_2_BASE + + offsetof(struct hcomm_status, + fw_status_loc))); + + /* Only pre-map the FW health status GRC register */ + if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) + return 0; + + info->status_regs[BNXT_FW_STATUS_REG] = status_loc; + info->mapped_status_regs[BNXT_FW_STATUS_REG] = + BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); + + rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + + bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; + + return 0; +} + +static int bnxt_init_fw(struct bnxt *bp) +{ + uint16_t mtu; + int rc = 0; + + bp->fw_cap = 0; + + rc = bnxt_map_hcomm_fw_status_reg(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); + if (rc) { + bnxt_check_fw_status(bp); + return rc; + } + + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -EIO; + + rc = bnxt_hwrm_vnic_qcaps(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) + return rc; + + /* Get the MAX capabilities for this function. + * This function also allocates context memory for TQM rings and + * informs the firmware about this allocated backing store memory. + */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_qcfg(bp, &mtu); + if (rc) + return rc; + + bnxt_hwrm_port_mac_qcfg(bp); + + rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); + if (rc) + return rc; + + bnxt_alloc_error_recovery_info(bp); + /* Get the adapter error recovery support info */ + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + + bnxt_hwrm_port_led_qcaps(bp); + + return 0; +} + +static int +bnxt_init_locks(struct bnxt *bp) +{ + int err; + + err = pthread_mutex_init(&bp->flow_lock, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); + return err; + } + + err = pthread_mutex_init(&bp->def_cp_lock, NULL); + if (err) + PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); + return err; +} + +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + rc = bnxt_init_fw(bp); + if (rc) + return rc; + + if (!reconfig_dev) { + rc = bnxt_setup_mac_addr(bp->eth_dev); + if (rc) + return rc; + } else { + rc = bnxt_restore_dflt_mac(bp); + if (rc) + return rc; + } + + bnxt_config_vf_req_fwd(bp); + + rc = bnxt_hwrm_func_driver_register(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to register driver"); + return -EBUSY; + } + + if (BNXT_PF(bp)) { + if (bp->pdev->max_vfs) { + rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); + return rc; + } + } else { + rc = bnxt_hwrm_allocate_pf_only(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate PF resources"); + return rc; + } + } + } + + rc = bnxt_alloc_mem(bp, reconfig_dev); + if (rc) + return rc; + + rc = bnxt_setup_int(bp); + if (rc) + return rc; + + rc = bnxt_request_int(bp); + if (rc) + return rc; + + rc = bnxt_init_ctx_mem(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); + return rc; + } + + rc = bnxt_init_locks(bp); + if (rc) + return rc; + + return 0; +} + +static int +bnxt_parse_devarg_truflow(__rte_unused const char *key, + const char *value, void *opaque_arg) +{ + struct bnxt *bp = opaque_arg; + unsigned long truflow; + char *end = NULL; + + if (!value || !opaque_arg) { + PMD_DRV_LOG(ERR, + "Invalid parameter passed to truflow devargs.\n"); + return -EINVAL; + } + + truflow = strtoul(value, &end, 10); + if (end == NULL || *end != '\0' || + (truflow == ULONG_MAX && errno == ERANGE)) { + PMD_DRV_LOG(ERR, + "Invalid parameter passed to truflow devargs.\n"); + return -EINVAL; + } + + if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) { + PMD_DRV_LOG(ERR, + "Invalid value passed to truflow devargs.\n"); + return -EINVAL; + } + + bp->flags |= BNXT_FLAG_TRUFLOW_EN; + if (BNXT_TRUFLOW_EN(bp)) + PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); + + return 0; +} + +static int +bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, + const char *value, void *opaque_arg) +{ + struct bnxt *bp = opaque_arg; + unsigned long flow_xstat; + char *end = NULL; + + if (!value || !opaque_arg) { + PMD_DRV_LOG(ERR, + "Invalid parameter passed to flow_xstat devarg.\n"); + return -EINVAL; + } + + flow_xstat = strtoul(value, &end, 10); + if (end == NULL || *end != '\0' || + (flow_xstat == ULONG_MAX && errno == ERANGE)) { + PMD_DRV_LOG(ERR, + "Invalid parameter passed to flow_xstat devarg.\n"); + return -EINVAL; + } + + if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { + PMD_DRV_LOG(ERR, + "Invalid value passed to flow_xstat devarg.\n"); + return -EINVAL; + } + + bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; + if (BNXT_FLOW_XSTATS_EN(bp)) + PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); + + return 0; +} + +static void +bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); + if (kvlist == NULL) + return; + + /* + * Handler for "truflow" devarg. + * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1” + */ + rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW, + bnxt_parse_devarg_truflow, bp); + + /* + * Handler for "flow_xstat" devarg. + * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1” + */ + rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, + bnxt_parse_devarg_flow_xstat, bp); + + rte_kvargs_free(kvlist); +} + +static int +bnxt_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + static int version_printed; + struct bnxt *bp; + int rc; + + if (version_printed++ == 0) + PMD_DRV_LOG(INFO, "%s\n", bnxt_version); + + eth_dev->dev_ops = &bnxt_dev_ops; + eth_dev->rx_pkt_burst = &bnxt_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further + * as primary has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + bp = eth_dev->data->dev_private; + + /* Parse dev arguments passed on when starting the DPDK application. */ + bnxt_parse_dev_args(bp, pci_dev->device.devargs); + + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + + if (bnxt_vf_pciid(pci_dev->id.device_id)) + bp->flags |= BNXT_FLAG_VF; + + if (bnxt_thor_device(pci_dev->id.device_id)) + bp->flags |= BNXT_FLAG_THOR_CHIP; + + if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || + pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) + bp->flags |= BNXT_FLAG_STINGRAY; + + rc = bnxt_init_board(eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize board rc: %x\n", rc); + return rc; + } + + rc = bnxt_alloc_pf_info(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_link_info(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate hwrm resource rc: %x\n", rc); + goto error_free; + } + rc = bnxt_alloc_leds_info(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_cos_queues(bp); + if (rc) + goto error_free; + + rc = bnxt_init_resources(bp, false); + if (rc) + goto error_free; + + rc = bnxt_alloc_stats_mem(bp); + if (rc) + goto error_free; + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + PMD_DRV_LOG(INFO, + DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", + pci_dev->mem_resource[0].phys_addr, + pci_dev->mem_resource[0].addr); + + return 0; + +error_free: + bnxt_dev_uninit(eth_dev); + return rc; +} + + +static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) +{ + if (!ctx) + return; + + if (ctx->va) + rte_free(ctx->va); + + ctx->va = NULL; + ctx->dma = RTE_BAD_IOVA; + ctx->ctx_id = BNXT_CTX_VAL_INVAL; +} + +static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) +{ + bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, + CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, + false); + + bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, + CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, + false); + + if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); + bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + + if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); + bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + + if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); + bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + + if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); + bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; +} + +static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) +{ + bnxt_unregister_fc_ctx_mem(bp); + + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); +} + +static void bnxt_uninit_ctx_mem(struct bnxt *bp) +{ + if (BNXT_FLOW_XSTATS_EN(bp)) + bnxt_uninit_fc_ctx_mem(bp); +} + +static void +bnxt_free_error_recovery_info(struct bnxt *bp) +{ + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; +} + +static void +bnxt_uninit_locks(struct bnxt *bp) +{ + pthread_mutex_destroy(&bp->flow_lock); + pthread_mutex_destroy(&bp->def_cp_lock); +} + +static int +bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + bnxt_free_int(bp); + bnxt_free_mem(bp, reconfig_dev); + bnxt_hwrm_func_buf_unrgtr(bp); + rc = bnxt_hwrm_func_driver_unregister(bp, 0); + bp->flags &= ~BNXT_FLAG_REGISTERED; + bnxt_free_ctx_mem(bp); + if (!reconfig_dev) { + bnxt_free_hwrm_resources(bp); + bnxt_free_error_recovery_info(bp); + } + + bnxt_uninit_ctx_mem(bp); + + bnxt_uninit_locks(bp); + rte_free(bp->ptp_cfg); + bp->ptp_cfg = NULL; + return rc; +} + +static int +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); + + if (eth_dev->state != RTE_ETH_DEV_UNUSED) + bnxt_dev_close_op(eth_dev); + + return 0; +} + +static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), + bnxt_dev_init); +} + +static int bnxt_pci_remove(struct rte_pci_device *pci_dev) +{ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + return rte_eth_dev_pci_generic_remove(pci_dev, + bnxt_dev_uninit); + else + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver bnxt_rte_pmd = { + .id_table = bnxt_pci_id_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = bnxt_pci_probe, + .remove = bnxt_pci_remove, +}; + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool is_bnxt_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &bnxt_rte_pmd); +} + +RTE_INIT(bnxt_init_log) +{ + bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver"); + if (bnxt_logtype_driver >= 0) + rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); +} + +RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); +RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c new file mode 100644 index 000000000..d822ff607 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * Filter Functions + */ + +struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + + filter = bnxt_get_unused_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, "No more free filter resources\n"); + return NULL; + } + + filter->mac_index = INVALID_MAC_INDEX; + /* Default to L2 MAC Addr filter */ + filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); + + return filter; +} + +struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf) +{ + struct bnxt_filter_info *filter; + + filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0); + if (!filter) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n", + vf); + return NULL; + } + + filter->fw_l2_filter_id = UINT64_MAX; + STAILQ_INSERT_TAIL(&bp->pf->vf_info[vf].filter, filter, next); + return filter; +} + +static void bnxt_init_filters(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + int i, max_filters; + + max_filters = bp->max_l2_ctx; + STAILQ_INIT(&bp->free_filter_list); + for (i = 0; i < max_filters; i++) { + filter = &bp->filter_info[i]; + filter->fw_l2_filter_id = UINT64_MAX; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_ntuple_filter_id = UINT64_MAX; + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); + } +} + +void bnxt_free_all_filters(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter, *temp_filter; + unsigned int i; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); + filter = temp_filter; + } + STAILQ_INIT(&vnic->filter); + } + + for (i = 0; i < bp->pf->max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf->vf_info[i].filter, next) { + bnxt_hwrm_clear_l2_filter(bp, filter); + } + } +} + +void bnxt_free_filter_mem(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + uint16_t max_filters, i; + int rc = 0; + + if (bp->filter_info == NULL) + return; + + /* Ensure that all filters are freed */ + max_filters = bp->max_l2_ctx; + for (i = 0; i < max_filters; i++) { + filter = &bp->filter_info[i]; + if (filter->fw_ntuple_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + /* Call HWRM to try to free filter again */ + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + if (rc) + PMD_DRV_LOG(ERR, + "Cannot free ntuple filter: %d\n", + rc); + } + filter->fw_ntuple_filter_id = UINT64_MAX; + + if (filter->fw_l2_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_L2_FILTER) { + PMD_DRV_LOG(DEBUG, "L2 filter is not free\n"); + /* Call HWRM to try to free filter again */ + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + if (rc) + PMD_DRV_LOG(ERR, + "Cannot free L2 filter: %d\n", + rc); + } + filter->fw_l2_filter_id = UINT64_MAX; + + } + STAILQ_INIT(&bp->free_filter_list); + + rte_free(bp->filter_info); + bp->filter_info = NULL; + + for (i = 0; i < bp->pf->max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf->vf_info[i].filter, next) { + rte_free(filter); + STAILQ_REMOVE(&bp->pf->vf_info[i].filter, filter, + bnxt_filter_info, next); + } + } +} + +int bnxt_alloc_filter_mem(struct bnxt *bp) +{ + struct bnxt_filter_info *filter_mem; + uint16_t max_filters; + + max_filters = bp->max_l2_ctx; + /* Allocate memory for VNIC pool and filter pool */ + filter_mem = rte_zmalloc("bnxt_filter_info", + max_filters * sizeof(struct bnxt_filter_info), + 0); + if (filter_mem == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters", + max_filters); + return -ENOMEM; + } + bp->filter_info = filter_mem; + bnxt_init_filters(bp); + return 0; +} + +struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + + /* Find the 1st unused filter from the free_filter_list pool*/ + filter = STAILQ_FIRST(&bp->free_filter_list); + if (!filter) { + PMD_DRV_LOG(ERR, "No more free filter resources\n"); + return NULL; + } + STAILQ_REMOVE_HEAD(&bp->free_filter_list, next); + + return filter; +} + +void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter) +{ + memset(filter, 0, sizeof(*filter)); + filter->mac_index = INVALID_MAC_INDEX; + filter->fw_l2_filter_id = UINT64_MAX; + filter->fw_ntuple_filter_id = UINT64_MAX; + filter->fw_em_filter_id = UINT64_MAX; + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h new file mode 100644 index 000000000..4b2b3cadc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_filter.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_FILTER_H_ +#define _BNXT_FILTER_H_ + +#include + +#define bnxt_vlan_filter_exists(bp, filter, chk, vlan_id) \ + (((filter)->enables & (chk)) && \ + ((filter)->l2_ivlan == (vlan_id) && \ + (filter)->l2_ivlan_mask == 0x0FFF) && \ + !memcmp((filter)->l2_addr, (bp)->mac_addr, \ + RTE_ETHER_ADDR_LEN)) +struct bnxt; + +#define BNXT_FLOW_L2_VALID_FLAG BIT(0) +#define BNXT_FLOW_L2_SRC_VALID_FLAG BIT(1) +#define BNXT_FLOW_L2_INNER_SRC_VALID_FLAG BIT(2) +#define BNXT_FLOW_L2_DST_VALID_FLAG BIT(3) +#define BNXT_FLOW_L2_INNER_DST_VALID_FLAG BIT(4) +#define BNXT_FLOW_L2_DROP_FLAG BIT(5) +#define BNXT_FLOW_PARSE_INNER_FLAG BIT(6) +#define BNXT_FLOW_MARK_FLAG BIT(7) + +struct bnxt_flow_stats { + uint64_t packets; + uint64_t bytes; +}; + +struct bnxt_filter_info { + STAILQ_ENTRY(bnxt_filter_info) next; + uint32_t flow_id; + uint64_t fw_l2_filter_id; + struct bnxt_filter_info *matching_l2_fltr_ptr; + uint64_t fw_em_filter_id; + uint64_t fw_ntuple_filter_id; +#define INVALID_MAC_INDEX ((uint16_t)-1) + uint16_t mac_index; +#define HWRM_CFA_L2_FILTER 0 +#define HWRM_CFA_EM_FILTER 1 +#define HWRM_CFA_NTUPLE_FILTER 2 +#define HWRM_CFA_TUNNEL_REDIRECT_FILTER 3 + uint8_t filter_type; + uint32_t dst_id; + + /* Filter Characteristics */ + uint32_t flags; + uint32_t enables; + uint32_t l2_ref_cnt; + uint8_t l2_addr[RTE_ETHER_ADDR_LEN]; + uint8_t l2_addr_mask[RTE_ETHER_ADDR_LEN]; + uint32_t valid_flags; + uint16_t l2_ovlan; + uint16_t l2_ovlan_mask; + uint16_t l2_ivlan; + uint16_t l2_ivlan_mask; + uint8_t t_l2_addr[RTE_ETHER_ADDR_LEN]; + uint8_t t_l2_addr_mask[RTE_ETHER_ADDR_LEN]; + uint16_t t_l2_ovlan; + uint16_t t_l2_ovlan_mask; + uint16_t t_l2_ivlan; + uint16_t t_l2_ivlan_mask; + uint8_t tunnel_type; + uint16_t mirror_vnic_id; + uint32_t vni; + uint8_t pri_hint; + uint64_t l2_filter_id_hint; + uint32_t src_id; + uint8_t src_type; + uint8_t src_macaddr[6]; + uint8_t dst_macaddr[6]; + uint32_t dst_ipaddr[4]; + uint32_t dst_ipaddr_mask[4]; + uint32_t src_ipaddr[4]; + uint32_t src_ipaddr_mask[4]; + uint16_t dst_port; + uint16_t dst_port_mask; + uint16_t src_port; + uint16_t src_port_mask; + uint16_t ip_protocol; + uint16_t ip_addr_type; + uint16_t ethertype; + uint32_t priority; + /* Backptr to vnic. As of now, used only by an L2 filter + * to remember which vnic it was created on + */ + struct bnxt_vnic_info *vnic; + uint32_t mark; + struct bnxt_flow_stats hw_stats; +}; + +struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp); +struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf); +void bnxt_free_all_filters(struct bnxt *bp); +void bnxt_free_filter_mem(struct bnxt *bp); +int bnxt_alloc_filter_mem(struct bnxt *bp); +struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp); +void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter); +struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp, + struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic); + +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR +#define EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR +#define EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE +#define EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE +#define EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK +#define NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL +#define EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR +#define EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR +#define EM_FLOW_ALLOC_INPUT_EN_SRC_PORT \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT +#define EM_FLOW_ALLOC_INPUT_EN_DST_PORT \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT +#define EM_FLOW_ALLOC_INPUT_EN_IP_PROTO \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL +#define EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 +#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 +#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN +#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE +#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE +#define L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UDP \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_TCP \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN +#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 +#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID +#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID +#define L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_NUM_VLANS +#define L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c new file mode 100644 index 000000000..84a21dba9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_flow.c @@ -0,0 +1,2054 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +static int +bnxt_flow_args_validate(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static const struct rte_flow_item * +bnxt_flow_non_void_item(const struct rte_flow_item *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) + return cur; + cur++; + } +} + +static const struct rte_flow_action * +bnxt_flow_non_void_action(const struct rte_flow_action *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) + return cur; + cur++; + } +} + +static int +bnxt_filter_type_check(const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = + bnxt_flow_non_void_item(pattern); + int use_ntuple = 1; + bool has_vlan = 0; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ANY: + case RTE_FLOW_ITEM_TYPE_ETH: + use_ntuple = 0; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + use_ntuple = 0; + has_vlan = 1; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_UDP: + /* FALLTHROUGH */ + /* need ntuple match, reset exact match */ + use_ntuple |= 1; + break; + default: + PMD_DRV_LOG(DEBUG, "Unknown Flow type\n"); + use_ntuple |= 0; + } + item++; + } + + if (has_vlan && use_ntuple) { + PMD_DRV_LOG(ERR, + "VLAN flow cannot use NTUPLE filter\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Cannot use VLAN with NTUPLE"); + return -rte_errno; + } + + return use_ntuple; +} + +static int +bnxt_validate_and_parse_flow_type(struct bnxt *bp, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_ether_addr *dst, *src; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_gre *gre_spec; + const struct rte_flow_item_gre *gre_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; + const struct rte_flow_item_vf *vf_spec; + uint32_t tenant_id_be = 0, valid_flags = 0; + bool vni_masked = 0; + bool tni_masked = 0; + uint32_t en_ethertype; + uint8_t inner = 0; + uint32_t vf = 0; + uint32_t en = 0; + int use_ntuple; + int dflt_vnic; + + use_ntuple = bnxt_filter_type_check(pattern, error); + if (use_ntuple < 0) + return use_ntuple; + PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); + + filter->filter_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER; + en_ethertype = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : + EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->last) { + /* last or range is NOT supported as match criteria */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "No support for range"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ANY: + inner = + ((const struct rte_flow_item_any *)item->spec)->num > 3; + if (inner) + PMD_DRV_LOG(DEBUG, "Parse inner header\n"); + break; + case RTE_FLOW_ITEM_TYPE_ETH: + if (!item->spec || !item->mask) + break; + + eth_spec = item->spec; + eth_mask = item->mask; + + /* Source MAC address mask cannot be partially set. + * Should be All 0's or all 1's. + * Destination MAC address mask must not be partially + * set. Should be all 1's or all 0's. + */ + if ((!rte_is_zero_ether_addr(ð_mask->src) && + !rte_is_broadcast_ether_addr(ð_mask->src)) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MAC_addr mask not valid"); + return -rte_errno; + } + + /* Mask is not allowed. Only exact matches are */ + if (eth_mask->type && + eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ethertype mask not valid"); + return -rte_errno; + } + + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + dst = ð_spec->dst; + if (!rte_is_valid_assigned_ether_addr(dst)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "DMAC is invalid"); + PMD_DRV_LOG(ERR, + "DMAC is invalid!\n"); + return -rte_errno; + } + rte_memcpy(filter->dst_macaddr, + ð_spec->dst, RTE_ETHER_ADDR_LEN); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; + valid_flags |= inner ? + BNXT_FLOW_L2_INNER_DST_VALID_FLAG : + BNXT_FLOW_L2_DST_VALID_FLAG; + filter->priority = attr->priority; + PMD_DRV_LOG(DEBUG, + "Creating a priority flow\n"); + } + if (rte_is_broadcast_ether_addr(ð_mask->src)) { + src = ð_spec->src; + if (!rte_is_valid_assigned_ether_addr(src)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "SMAC is invalid"); + PMD_DRV_LOG(ERR, + "SMAC is invalid!\n"); + return -rte_errno; + } + rte_memcpy(filter->src_macaddr, + ð_spec->src, RTE_ETHER_ADDR_LEN); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; + valid_flags |= inner ? + BNXT_FLOW_L2_INNER_SRC_VALID_FLAG : + BNXT_FLOW_L2_SRC_VALID_FLAG; + } /* + * else { + * PMD_DRV_LOG(ERR, "Handle this condition\n"); + * } + */ + if (eth_mask->type) { + filter->ethertype = + rte_be_to_cpu_16(eth_spec->type); + en |= en_ethertype; + } + if (inner) + valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG; + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (en & en_ethertype) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN TPID matching is not" + " supported"); + return -rte_errno; + } + if (vlan_mask->tci && + vlan_mask->tci == RTE_BE16(0x0fff)) { + /* Only the VLAN ID can be matched. */ + filter->l2_ovlan = + rte_be_to_cpu_16(vlan_spec->tci & + RTE_BE16(0x0fff)); + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + } else { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN mask is invalid"); + return -rte_errno; + } + if (vlan_mask->inner_type && + vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "inner ethertype mask not" + " valid"); + return -rte_errno; + } + if (vlan_mask->inner_type) { + filter->ethertype = + rte_be_to_cpu_16(vlan_spec->inner_type); + en |= en_ethertype; + } + + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + /* If mask is not involved, we could use EM filters. */ + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + if (!item->spec || !item->mask) + break; + + /* Only IP DST and SRC fields are maskable. */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + + filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; + filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + if (ipv4_mask->hdr.src_addr) { + filter->src_ipaddr_mask[0] = + ipv4_mask->hdr.src_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (ipv4_mask->hdr.dst_addr) { + filter->dst_ipaddr_mask[0] = + ipv4_mask->hdr.dst_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + + if (ipv4_spec->hdr.next_proto_id) { + filter->ip_protocol = + ipv4_spec->hdr.next_proto_id; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + else + en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + if (!item->spec || !item->mask) + break; + + /* Only IP DST and SRC fields are maskable. */ + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask."); + return -rte_errno; + } + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + rte_memcpy(filter->src_ipaddr, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->dst_ipaddr, + ipv6_spec->hdr.dst_addr, 16); + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, + 16)) { + rte_memcpy(filter->src_ipaddr_mask, + ipv6_mask->hdr.src_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, + 16)) { + rte_memcpy(filter->dst_ipaddr_mask, + ipv6_mask->hdr.dst_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : + EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + if (!item->spec || !item->mask) + break; + + /* Check TCP mask. Only DST & SRC ports are maskable */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + filter->src_port = tcp_spec->hdr.src_port; + filter->dst_port = tcp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (tcp_mask->hdr.dst_port) { + filter->dst_port_mask = tcp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (tcp_mask->hdr.src_port) { + filter->src_port_mask = tcp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (!item->spec || !item->mask) + break; + + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + filter->src_port = udp_spec->hdr.src_port; + filter->dst_port = udp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (udp_mask->hdr.dst_port) { + filter->dst_port_mask = udp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (udp_mask->hdr.src_port) { + filter->src_port_mask = udp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + if (!vxlan_spec && !vxlan_mask) { + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + break; + } + + if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || + vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || + vxlan_spec->flags != 0x8) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + /* Check if VNI is masked. */ + if (vxlan_spec && vxlan_mask) { + vni_masked = + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (vni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VNI mask"); + return -rte_errno; + } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (!nvgre_spec && !nvgre_mask) { + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + break; + } + + if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || + nvgre_spec->protocol != 0x6558) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec && nvgre_mask) { + tni_masked = + !!memcmp(nvgre_mask->tni, tni_mask, + RTE_DIM(tni_mask)); + if (tni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TNI mask"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + nvgre_spec->tni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + } + break; + + case RTE_FLOW_ITEM_TYPE_GRE: + gre_spec = (const struct rte_flow_item_gre *)item->spec; + gre_mask = (const struct rte_flow_item_gre *)item->mask; + + /* + *Check if GRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if (!!gre_spec ^ !!gre_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GRE item"); + return -rte_errno; + } + + if (!gre_spec && !gre_mask) { + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE; + break; + } + break; + + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = item->spec; + vf = vf_spec->id; + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Configuring on a VF!"); + return -rte_errno; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Incorrect VF id!"); + return -rte_errno; + } + + if (!attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic without" + " affecting it (transfer attribute)" + " is unsupported"); + return -rte_errno; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver + * loaded. This is not an error. + */ + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unable to get default VNIC for VF"); + return -rte_errno; + } + + filter->mirror_vnic_id = dflt_vnic; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + break; + default: + break; + } + item++; + } + filter->enables = en; + filter->valid_flags = valid_flags; + + return 0; +} + +/* Parse attributes */ +static int +bnxt_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, + "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, + "No support for egress."); + return -rte_errno; + } + + return 0; +} + +static struct bnxt_filter_info * +bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf, *f0; + struct bnxt_vnic_info *vnic0; + int i; + + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + f0 = STAILQ_FIRST(&vnic0->filter); + + /* This flow has same DST MAC as the port/l2 filter. */ + if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0) + return f0; + + for (i = bp->max_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + if (vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + STAILQ_FOREACH(mf, &vnic->filter, next) { + + if (mf->matching_l2_fltr_ptr) + continue; + + if (mf->ethertype == nf->ethertype && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->src_macaddr, nf->src_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + RTE_ETHER_ADDR_LEN)) + return mf; + } + } + return NULL; +} + +static struct bnxt_filter_info * +bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter1; + int rc; + + /* Alloc new L2 filter. + * This flow needs MAC filter which does not match any existing + * L2 filters. + */ + filter1 = bnxt_get_unused_filter(bp); + if (filter1 == NULL) + return NULL; + + memcpy(filter1, nf, sizeof(*filter1)); + + filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE; + filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || + nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) { + filter1->flags |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + PMD_DRV_LOG(DEBUG, "Create Outer filter\n"); + } + + if (nf->filter_type == HWRM_CFA_L2_FILTER && + (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG || + nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) { + PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n"); + filter1->flags |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID; + memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN); + } else { + PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); + memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN); + } + + if (nf->priority && + (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG || + nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { + /* Tell the FW where to place the filter in the table. */ + if (nf->priority > 65535) { + filter1->pri_hint = + HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER; + /* This will place the filter in TCAM */ + filter1->l2_filter_id_hint = (uint64_t)-1; + } + } + + if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG | + BNXT_FLOW_L2_SRC_VALID_FLAG | + BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | + BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) { + filter1->enables = + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; + memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); + } + + if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) { + filter1->flags |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP; + if (nf->ethertype == RTE_ETHER_TYPE_IPV4) { + /* Num VLANs for drop filter will/should be 0. + * If the req is memset to 0, then the count will + * be automatically set to 0. + */ + if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) { + filter1->enables |= + L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS; + } else { + filter1->enables |= + L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS; + filter1->flags |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + } + } + } + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter1); + if (rc) { + bnxt_free_filter(bp, filter1); + return NULL; + } + return filter1; +} + +struct bnxt_filter_info * +bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *l2_filter = NULL; + + l2_filter = bnxt_find_matching_l2_filter(bp, nf); + if (l2_filter) { + l2_filter->l2_ref_cnt++; + } else { + l2_filter = bnxt_create_l2_filter(bp, nf, vnic); + if (l2_filter) { + STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next); + l2_filter->vnic = vnic; + } + } + nf->matching_l2_fltr_ptr = l2_filter; + + return l2_filter; +} + +static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + uint64_t rx_offloads = dev_conf->rxmode.offloads; + int rc; + + rc = bnxt_vnic_grp_alloc(bp, vnic); + if (rc) + goto ret; + + rc = bnxt_hwrm_vnic_alloc(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc); + goto ret; + } + bp->nr_vnics++; + + /* RSS context is required only when there is more than one RSS ring */ + if (vnic->rx_queue_cnt > 1) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic ctx alloc failure: %x\n", rc); + goto ret; + } + } else { + PMD_DRV_LOG(DEBUG, "No RSS context required\n"); + } + + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + vnic->vlan_strip = true; + else + vnic->vlan_strip = false; + + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + goto ret; + + bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + +ret: + return rc; +} + +static int match_vnic_rss_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + const struct rte_flow_action_rss *rss) +{ + unsigned int match = 0, i; + + if (vnic->rx_queue_cnt != rss->queue_num) + return -EINVAL; + + for (i = 0; i < rss->queue_num; i++) { + if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt && + !bp->rx_queues[rss->queue[i]]->rx_started) + return -EINVAL; + } + + for (i = 0; i < vnic->rx_queue_cnt; i++) { + int j; + + for (j = 0; j < vnic->rx_queue_cnt; j++) { + if (bp->grp_info[rss->queue[i]].fw_grp_id == + vnic->fw_grp_ids[j]) + match++; + } + } + + if (match != vnic->rx_queue_cnt) { + PMD_DRV_LOG(ERR, + "VNIC queue count %d vs queues matched %d\n", + match, vnic->rx_queue_cnt); + return -EINVAL; + } + + return 0; +} + +static void +bnxt_update_filter_flags_en(struct bnxt_filter_info *filter, + struct bnxt_filter_info *filter1, + int use_ntuple) +{ + if (!use_ntuple && + !(filter->valid_flags & + ~(BNXT_FLOW_L2_DST_VALID_FLAG | + BNXT_FLOW_L2_SRC_VALID_FLAG | + BNXT_FLOW_L2_INNER_SRC_VALID_FLAG | + BNXT_FLOW_L2_INNER_DST_VALID_FLAG | + BNXT_FLOW_L2_DROP_FLAG | + BNXT_FLOW_PARSE_INNER_FLAG))) { + filter->flags = filter1->flags; + filter->enables = filter1->enables; + filter->filter_type = HWRM_CFA_L2_FILTER; + memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); + filter->pri_hint = filter1->pri_hint; + filter->l2_filter_id_hint = filter1->l2_filter_id_hint; + } + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->l2_ref_cnt = filter1->l2_ref_cnt; + filter->flow_id = filter1->flow_id; + PMD_DRV_LOG(DEBUG, + "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n", + filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt); +} + +static int +bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + const struct rte_flow_attr *attr, + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_action *act = + bnxt_flow_non_void_action(actions); + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct bnxt_filter_info *filter1 = NULL; + const struct rte_flow_action_rss *rss; + struct bnxt_rx_queue *rxq = NULL; + int dflt_vnic, vnic_id; + unsigned int rss_idx; + uint32_t vf = 0, i; + int rc, use_ntuple; + + rc = + bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter); + if (rc != 0) + goto ret; + + rc = bnxt_flow_parse_attr(attr, error); + if (rc != 0) + goto ret; + + /* Since we support ingress attribute only - right now. */ + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; + + use_ntuple = bnxt_filter_type_check(pattern, error); + +start: + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Allow this flow. Redirect to a VNIC. */ + act_q = (const struct rte_flow_action_queue *)act->conf; + if (!act_q->index || act_q->index >= bp->rx_nr_rings) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid queue ID."); + rc = -rte_errno; + goto ret; + } + PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); + + vnic_id = attr->group; + if (!vnic_id) { + PMD_DRV_LOG(DEBUG, "Group id is 0\n"); + vnic_id = act_q->index; + } + + BNXT_VALID_VNIC_OR_RET(bp, vnic_id); + + vnic = &bp->vnic_info[vnic_id]; + if (vnic->rx_queue_cnt) { + if (vnic->start_grp_id != act_q->index) { + PMD_DRV_LOG(ERR, + "VNIC already in use\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "VNIC already in use"); + rc = -rte_errno; + goto ret; + } + goto use_vnic; + } + + rxq = bp->rx_queues[act_q->index]; + + if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq && + vnic->fw_vnic_id != INVALID_HW_RING_ID) + goto use_vnic; + + if (!rxq) { + PMD_DRV_LOG(ERR, + "Queue invalid or used with other VNIC\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Queue invalid queue or in use"); + rc = -rte_errno; + goto ret; + } + + rxq->vnic = vnic; + rxq->rx_started = 1; + vnic->rx_queue_cnt++; + vnic->start_grp_id = act_q->index; + vnic->end_grp_id = act_q->index; + vnic->func_default = 0; //This is not a default VNIC. + + PMD_DRV_LOG(DEBUG, "VNIC found\n"); + + rc = bnxt_vnic_prep(bp, vnic); + if (rc) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "VNIC prep fail"); + rc = -rte_errno; + goto ret; + } + + PMD_DRV_LOG(DEBUG, + "vnic[%d] = %p vnic->fw_grp_ids = %p\n", + act_q->index, vnic, vnic->fw_grp_ids); + +use_vnic: + vnic->ff_pool_idx = vnic_id; + PMD_DRV_LOG(DEBUG, + "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); + filter->dst_id = vnic->fw_vnic_id; + + /* For ntuple filter, create the L2 filter with default VNIC. + * The user specified redirect queue will be set while creating + * the ntuple filter in hardware. + */ + vnic0 = BNXT_GET_DEFAULT_VNIC(bp); + if (use_ntuple) + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + else + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + if (filter1 == NULL) { + rte_flow_error_set(error, + ENOSPC, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Filter not available"); + rc = -rte_errno; + goto ret; + } + + PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n", + filter, filter1, filter1->l2_ref_cnt); + bnxt_update_filter_flags_en(filter, filter1, use_ntuple); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + vnic0 = &bp->vnic_info[0]; + filter->dst_id = vnic0->fw_vnic_id; + filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG; + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rte_flow_error_set(error, + ENOSPC, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Filter not available"); + rc = -rte_errno; + goto ret; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + + bnxt_update_filter_flags_en(filter, filter1, use_ntuple); + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + vnic0 = &bp->vnic_info[0]; + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rte_flow_error_set(error, + ENOSPC, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "New filter not available"); + rc = -rte_errno; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flow_id = filter1->flow_id; + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; + break; + case RTE_FLOW_ACTION_TYPE_VF: + act_vf = (const struct rte_flow_action_vf *)act->conf; + vf = act_vf->id; + + if (filter->tunnel_type == + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN || + filter->tunnel_type == + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) { + /* If issued on a VF, ensure id is 0 and is trusted */ + if (BNXT_VF(bp)) { + if (!BNXT_VF_IS_TRUSTED(bp) || vf) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Incorrect VF"); + rc = -rte_errno; + goto ret; + } + } + + filter->enables |= filter->tunnel_type; + filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER; + goto done; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Incorrect VF id!"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Unable to get default VNIC for VF"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = dflt_vnic; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + + vnic0 = &bp->vnic_info[0]; + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rte_flow_error_set(error, + ENOSPC, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "New filter not available"); + rc = -rte_errno; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flow_id = filter1->flow_id; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = (const struct rte_flow_action_rss *)act->conf; + + vnic_id = attr->group; + + BNXT_VALID_VNIC_OR_RET(bp, vnic_id); + vnic = &bp->vnic_info[vnic_id]; + + /* Check if requested RSS config matches RSS config of VNIC + * only if it is not a fresh VNIC configuration. + * Otherwise the existing VNIC configuration can be used. + */ + if (vnic->rx_queue_cnt) { + rc = match_vnic_rss_cfg(bp, vnic, rss); + if (rc) { + PMD_DRV_LOG(ERR, + "VNIC and RSS config mismatch\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "VNIC and RSS cfg mismatch"); + rc = -rte_errno; + goto ret; + } + goto vnic_found; + } + + for (i = 0; i < rss->queue_num; i++) { + PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", + rss->queue[i]); + + if (!rss->queue[i] || + rss->queue[i] >= bp->rx_nr_rings || + !bp->rx_queues[rss->queue[i]]) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid queue ID for RSS"); + rc = -rte_errno; + goto ret; + } + rxq = bp->rx_queues[rss->queue[i]]; + + if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] != + INVALID_HW_RING_ID) { + PMD_DRV_LOG(ERR, + "queue active with other VNIC\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid queue ID for RSS"); + rc = -rte_errno; + goto ret; + } + + rxq->vnic = vnic; + rxq->rx_started = 1; + vnic->rx_queue_cnt++; + } + + vnic->start_grp_id = rss->queue[0]; + vnic->end_grp_id = rss->queue[rss->queue_num - 1]; + vnic->func_default = 0; //This is not a default VNIC. + + rc = bnxt_vnic_prep(bp, vnic); + if (rc) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "VNIC prep fail"); + rc = -rte_errno; + goto ret; + } + + PMD_DRV_LOG(DEBUG, + "vnic[%d] = %p vnic->fw_grp_ids = %p\n", + vnic_id, vnic, vnic->fw_grp_ids); + + vnic->ff_pool_idx = vnic_id; + PMD_DRV_LOG(DEBUG, + "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx); + + /* This can be done only after vnic_grp_alloc is done. */ + for (i = 0; i < vnic->rx_queue_cnt; i++) { + vnic->fw_grp_ids[i] = + bp->grp_info[rss->queue[i]].fw_grp_id; + /* Make sure vnic0 does not use these rings. */ + bp->vnic_info[0].fw_grp_ids[rss->queue[i]] = + INVALID_HW_RING_ID; + } + + for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) { + for (i = 0; i < vnic->rx_queue_cnt; i++) + vnic->rss_table[rss_idx++] = + vnic->fw_grp_ids[i]; + } + + /* Configure RSS only if the queue count is > 1 */ + if (vnic->rx_queue_cnt > 1) { + vnic->hash_type = + bnxt_rte_to_hwrm_hash_types(rss->types); + + if (!rss->key_len) { + /* If hash key has not been specified, + * use random hash key. + */ + prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); + } else { + if (rss->key_len > HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, + rss->key, + HW_HASH_KEY_SIZE); + else + memcpy(vnic->rss_hash_key, + rss->key, + rss->key_len); + } + bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } else { + PMD_DRV_LOG(DEBUG, "No RSS config required\n"); + } + +vnic_found: + filter->dst_id = vnic->fw_vnic_id; + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + if (filter1 == NULL) { + rte_flow_error_set(error, + ENOSPC, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "New filter not available"); + rc = -rte_errno; + goto ret; + } + + PMD_DRV_LOG(DEBUG, "L2 filter created\n"); + bnxt_update_filter_flags_en(filter, filter1, use_ntuple); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { + PMD_DRV_LOG(DEBUG, + "Disable vector processing for mark\n"); + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Disable vector processing for mark"); + rc = -rte_errno; + goto ret; + } + + if (bp->mark_table == NULL) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Mark table not allocated."); + rc = -rte_errno; + goto ret; + } + + filter->valid_flags |= BNXT_FLOW_MARK_FLAG; + filter->mark = ((const struct rte_flow_action_mark *) + act->conf)->id; + PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark); + break; + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } + +done: + act = bnxt_flow_non_void_action(++act); + while (act->type != RTE_FLOW_ACTION_TYPE_END) + goto start; + + return rc; +ret: + + if (filter1) { + bnxt_hwrm_clear_l2_filter(bp, filter1); + bnxt_free_filter(bp, filter1); + } + + if (rte_errno) { + if (vnic && STAILQ_EMPTY(&vnic->filter)) + vnic->rx_queue_cnt = 0; + + if (rxq && !vnic->rx_queue_cnt) + rxq->vnic = &bp->vnic_info[0]; + } + return -rte_errno; +} + +static +struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + struct bnxt_vnic_info *vnic = NULL; + unsigned int i; + + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic->fw_vnic_id != INVALID_VNIC_ID && + filter->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n", + vnic->ff_pool_idx); + return vnic; + } + } + return NULL; +} + +static int +bnxt_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_vnic_info *vnic = NULL; + struct bnxt_filter_info *filter; + int ret = 0; + + bnxt_acquire_flow_lock(bp); + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) { + bnxt_release_flow_lock(bp); + return ret; + } + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + bnxt_release_flow_lock(bp); + return -ENOMEM; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + if (ret) + goto exit; + + vnic = find_matching_vnic(bp, filter); + if (vnic) { + if (STAILQ_EMPTY(&vnic->filter)) { + rte_free(vnic->fw_grp_ids); + bnxt_hwrm_vnic_ctx_free(bp, vnic); + bnxt_hwrm_vnic_free(bp, vnic); + vnic->rx_queue_cnt = 0; + PMD_DRV_LOG(DEBUG, "Free VNIC\n"); + } + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + bnxt_hwrm_clear_l2_filter(bp, filter); + +exit: + /* No need to hold on to this filter if we are just validating flow */ + bnxt_free_filter(bp, filter); + bnxt_release_flow_lock(bp); + + return ret; +} + +static void +bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter, + struct bnxt_filter_info *new_filter) +{ + /* Clear the new L2 filter that was created in the previous step in + * bnxt_validate_and_parse_flow. For L2 filters, we will use the new + * filter which points to the new destination queue and so we clear + * the previous L2 filter. For ntuple filters, we are going to reuse + * the old L2 filter and create new NTUPLE filter with this new + * destination queue subsequently during bnxt_flow_create. So we + * decrement the ref cnt of the L2 filter that would've been bumped + * up previously in bnxt_validate_and_parse_flow as the old n-tuple + * filter that was referencing it will be deleted now. + */ + bnxt_hwrm_clear_l2_filter(bp, old_filter); + if (new_filter->filter_type == HWRM_CFA_L2_FILTER) { + bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter); + } else { + if (new_filter->filter_type == HWRM_CFA_EM_FILTER) + bnxt_hwrm_clear_em_filter(bp, old_filter); + if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + bnxt_hwrm_clear_ntuple_filter(bp, old_filter); + } +} + +static int +bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf; + struct rte_flow *flow; + int i; + + for (i = bp->max_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + if (vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + mf = flow->filter; + + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + RTE_ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) { + if (mf->dst_id == nf->dst_id) + return -EEXIST; + /* Free the old filter, update flow + * with new filter + */ + bnxt_update_filter(bp, mf, nf); + STAILQ_REMOVE(&vnic->filter, mf, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, nf, next); + bnxt_free_filter(bp, mf); + flow->filter = nf; + return -EXDEV; + } + } + } + return 0; +} + +static void +bnxt_setup_flow_counter(struct bnxt *bp) +{ + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { + rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, + bnxt_flow_cnt_alarm_cb, + (void *)bp); + bp->flags |= BNXT_FLAG_FC_THREAD; + } +} + +void bnxt_flow_cnt_alarm_cb(void *arg) +{ + int rc = 0; + struct bnxt *bp = arg; + + if (!bp->flow_stat->rx_fc_out_tbl.va) { + PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); + bnxt_cancel_fc_thread(bp); + return; + } + + if (!bp->flow_stat->flow_count) { + bnxt_cancel_fc_thread(bp); + return; + } + + if (!bp->eth_dev->data->dev_started) { + bnxt_cancel_fc_thread(bp); + return; + } + + rc = bnxt_flow_stats_req(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n"); + return; + } + + rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, + bnxt_flow_cnt_alarm_cb, + (void *)bp); +} + + +static struct rte_flow * +bnxt_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_vnic_info *vnic = NULL; + struct bnxt_filter_info *filter; + bool update_flow = false; + struct rte_flow *flow; + int ret = 0; + uint32_t tun_type, flow_id; + + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow, Not a Trusted VF!"); + return NULL; + } + + if (!dev->data->dev_started) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Device must be started"); + return NULL; + } + + flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + bnxt_acquire_flow_lock(bp); + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Not a validate flow.\n"); + goto free_flow; + } + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Not enough resources for a new flow"); + goto free_flow; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + if (ret != 0) + goto free_filter; + + ret = bnxt_match_filter(bp, filter); + if (ret == -EEXIST) { + PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); + /* Clear the filter that was created as part of + * validate_and_parse_flow() above + */ + bnxt_hwrm_clear_l2_filter(bp, filter); + goto free_filter; + } else if (ret == -EXDEV) { + PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); + PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); + update_flow = true; + } + + /* If tunnel redirection to a VF/PF is specified then only tunnel_type + * is set and enable is set to the tunnel type. Issue hwrm cmd directly + * in such a case. + */ + if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && + filter->enables == filter->tunnel_type) { + ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Unable to query tunnel to VF"); + goto free_filter; + } + if (tun_type == (1U << filter->tunnel_type)) { + ret = + bnxt_hwrm_tunnel_redirect_free(bp, + filter->tunnel_type); + if (ret) { + PMD_DRV_LOG(ERR, + "Unable to free existing tunnel\n"); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Unable to free preexisting " + "tunnel on VF"); + goto free_filter; + } + } + ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Unable to redirect tunnel to VF"); + goto free_filter; + } + vnic = &bp->vnic_info[0]; + goto done; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) { + filter->enables |= + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); + } + + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + filter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); + } + + vnic = find_matching_vnic(bp, filter); +done: + if (!ret || update_flow) { + flow->filter = filter; + flow->vnic = vnic; + if (update_flow) { + ret = -EXDEV; + goto free_flow; + } + + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); + STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { + PMD_DRV_LOG(DEBUG, + "Mark action: mark id 0x%x, flow id 0x%x\n", + filter->mark, filter->flow_id); + + /* TCAM and EM should be 16-bit only. + * Other modes not supported. + */ + flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; + if (bp->mark_table[flow_id].valid) { + PMD_DRV_LOG(ERR, + "Entry for Mark id 0x%x occupied" + " flow id 0x%x\n", + filter->mark, filter->flow_id); + goto free_filter; + } + bp->mark_table[flow_id].valid = true; + bp->mark_table[flow_id].mark_id = filter->mark; + } + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count++; + bnxt_release_flow_lock(bp); + bnxt_setup_flow_counter(bp); + return flow; + } + +free_filter: + bnxt_free_filter(bp, filter); +free_flow: + if (ret == -EEXIST) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Matching Flow exists."); + else if (ret == -EXDEV) + rte_flow_error_set(error, 0, + RTE_FLOW_ERROR_TYPE_NONE, NULL, + "Flow with pattern exists, updating destination queue"); + else if (!rte_errno) + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(flow); + flow = NULL; + bnxt_release_flow_lock(bp); + return flow; +} + +static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp, + struct bnxt_filter_info *filter, + struct rte_flow_error *error) +{ + uint16_t tun_dst_fid; + uint32_t tun_type; + int ret = 0; + + ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Unable to query tunnel to VF"); + return ret; + } + if (tun_type == (1U << filter->tunnel_type)) { + ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type, + &tun_dst_fid); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "tunnel_redirect info cmd fail"); + return ret; + } + PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n", + tun_dst_fid + bp->first_vf_id, bp->fw_fid); + + /* Tunnel doesn't belong to this VF, so don't send HWRM + * cmd, just delete the flow from driver + */ + if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) + PMD_DRV_LOG(ERR, + "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n"); + else + ret = bnxt_hwrm_tunnel_redirect_free(bp, + filter->tunnel_type); + } + return ret; +} + +static int +_bnxt_flow_destroy(struct bnxt *bp, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + int ret = 0; + uint32_t flow_id; + + filter = flow->filter; + vnic = flow->vnic; + + if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && + filter->enables == filter->tunnel_type) { + ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); + if (!ret) + goto done; + else + return ret; + } + + ret = bnxt_match_filter(bp, filter); + if (ret == 0) + PMD_DRV_LOG(ERR, "Could not find matching flow\n"); + + if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { + flow_id = filter->flow_id & BNXT_FLOW_ID_MASK; + memset(&bp->mark_table[flow_id], 0, + sizeof(bp->mark_table[flow_id])); + filter->flow_id = 0; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + ret = bnxt_hwrm_clear_l2_filter(bp, filter); + +done: + if (!ret) { + /* If it is a L2 drop filter, when the filter is created, + * the FW updates the BC/MC records. + * Once this filter is removed, issue the set_rx_mask command + * to reset the BC/MC records in the HW to the settings + * before the drop counter is created. + */ + if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG) + bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]); + + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); + bnxt_free_filter(bp, filter); + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count--; + + /* If this was the last flow associated with this vnic, + * switch the queue back to RSS pool. + */ + if (vnic && !vnic->func_default && + STAILQ_EMPTY(&vnic->flow_list)) { + rte_free(vnic->fw_grp_ids); + if (vnic->rx_queue_cnt > 1) + bnxt_hwrm_vnic_ctx_free(bp, vnic); + + bnxt_hwrm_vnic_free(bp, vnic); + vnic->rx_queue_cnt = 0; + } + } else { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + int ret = 0; + + bnxt_acquire_flow_lock(bp); + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + + if (!flow->filter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + ret = _bnxt_flow_destroy(bp, flow, error); + bnxt_release_flow_lock(bp); + + return ret; +} + +void bnxt_cancel_fc_thread(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_FC_THREAD; + rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp); +} + +static int +bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + bnxt_acquire_flow_lock(bp); + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + while (!STAILQ_EMPTY(&vnic->flow_list)) { + flow = STAILQ_FIRST(&vnic->flow_list); + + if (!flow->filter) + continue; + + ret = _bnxt_flow_destroy(bp, flow, error); + if (ret) + break; + } + } + + bnxt_cancel_fc_thread(bp); + bnxt_release_flow_lock(bp); + + return ret; +} + +const struct rte_flow_ops bnxt_flow_ops = { + .validate = bnxt_flow_validate, + .create = bnxt_flow_create, + .destroy = bnxt_flow_destroy, + .flush = bnxt_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000..c1798b59d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c @@ -0,0 +1,5413 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +#define HWRM_SPEC_CODE_1_8_3 0x10803 +#define HWRM_VERSION_1_9_1 0x10901 +#define HWRM_VERSION_1_9_2 0x10903 + +struct bnxt_plcmodes_cfg { + uint32_t flags; + uint16_t jumbo_thresh; + uint16_t hds_offset; + uint16_t hds_threshold; +}; + +static int page_getenum(size_t size) +{ + if (size <= 1 << 4) + return 4; + if (size <= 1 << 12) + return 12; + if (size <= 1 << 13) + return 13; + if (size <= 1 << 16) + return 16; + if (size <= 1 << 21) + return 21; + if (size <= 1 << 22) + return 22; + if (size <= 1 << 30) + return 30; + PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size); + return sizeof(void *) * 8 - 1; +} + +static int page_roundup(size_t size) +{ + return 1 << page_getenum(size); +} + +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, + uint8_t *pg_attr, + uint64_t *pg_dir) +{ + if (rmem->nr_pages > 1) { + *pg_attr = 1; + *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map); + } else { + *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]); + } +} + +/* + * HWRM Functions (sent to HWRM) + * These are named bnxt_hwrm_*() and return 0 on success or -110 if the + * HWRM command times out, or a negative error code if the HWRM + * command was failed by the FW. + */ + +static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, + uint32_t msg_len, bool use_kong_mb) +{ + unsigned int i; + struct input *req = msg; + struct output *resp = bp->hwrm_cmd_resp_addr; + uint32_t *data = msg; + uint8_t *bar; + uint8_t *valid; + uint16_t max_req_len = bp->max_req_len; + struct hwrm_short_input short_input = { 0 }; + uint16_t bar_offset = use_kong_mb ? + GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET; + uint16_t mb_trigger_offset = use_kong_mb ? + GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER; + uint32_t timeout; + + /* Do not send HWRM commands to firmware in error state */ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return 0; + + timeout = bp->hwrm_cmd_timeout; + + if (bp->flags & BNXT_FLAG_SHORT_CMD || + msg_len > bp->max_req_len) { + void *short_cmd_req = bp->hwrm_short_cmd_req_addr; + + memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len); + memcpy(short_cmd_req, req, msg_len); + + short_input.req_type = rte_cpu_to_le_16(req->req_type); + short_input.signature = rte_cpu_to_le_16( + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD); + short_input.size = rte_cpu_to_le_16(msg_len); + short_input.req_addr = + rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr); + + data = (uint32_t *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Write request msg to hwrm channel */ + for (i = 0; i < msg_len; i += 4) { + bar = (uint8_t *)bp->bar0 + bar_offset + i; + rte_write32(*data, bar); + data++; + } + + /* Zero the rest of the request space */ + for (; i < max_req_len; i += 4) { + bar = (uint8_t *)bp->bar0 + bar_offset + i; + rte_write32(0, bar); + } + + /* Ring channel doorbell */ + bar = (uint8_t *)bp->bar0 + mb_trigger_offset; + rte_write32(1, bar); + /* + * Make sure the channel doorbell ring command complete before + * reading the response to avoid getting stale or invalid + * responses. + */ + rte_io_mb(); + + /* Poll for the valid bit */ + for (i = 0; i < timeout; i++) { + /* Sanity check on the resp->resp_len */ + rte_cio_rmb(); + if (resp->resp_len && resp->resp_len <= bp->max_resp_len) { + /* Last byte of resp contains the valid key */ + valid = (uint8_t *)resp + resp->resp_len - 1; + if (*valid == HWRM_RESP_VALID_KEY) + break; + } + rte_delay_us(1); + } + + if (i >= timeout) { + /* Suppress VER_GET timeout messages during reset recovery */ + if (bp->flags & BNXT_FLAG_FW_RESET && + rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + return -ETIMEDOUT; + + PMD_DRV_LOG(ERR, + "Error(timeout) sending msg 0x%04x, seq_id %d\n", + req->req_type, req->seq_id); + return -ETIMEDOUT; + } + return 0; +} + +/* + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * spinlock, and does initial processing. + * + * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It + * releases the spinlock only if it returns. If the regular int return codes + * are not used by the function, HWRM_CHECK_RESULT() should not be used + * directly, rather it should be copied and modified to suit the function. + * + * HWRM_UNLOCK() must be called after all response processing is completed. + */ +#define HWRM_PREP(req, type, kong) do { \ + rte_spinlock_lock(&bp->hwrm_lock); \ + if (bp->hwrm_cmd_resp_addr == NULL) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return -EACCES; \ + } \ + memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ + (req)->req_type = rte_cpu_to_le_16(type); \ + (req)->cmpl_ring = rte_cpu_to_le_16(-1); \ + (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ + rte_cpu_to_le_16(bp->chimp_cmd_seq++); \ + (req)->target_id = rte_cpu_to_le_16(0xffff); \ + (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ +} while (0) + +#define HWRM_CHECK_RESULT_SILENT() do {\ + if (rc) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + +#define HWRM_CHECK_RESULT() do {\ + if (rc) { \ + PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ + rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \ + rc = -EAGAIN; \ + else if (rc > 0) \ + rc = -EIO; \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + if (resp->resp_len >= 16) { \ + struct hwrm_err_output *tmp_hwrm_err_op = \ + (void *)resp; \ + PMD_DRV_LOG(ERR, \ + "error %d:%d:%08x:%04x\n", \ + rc, tmp_hwrm_err_op->cmd_err, \ + rte_le_to_cpu_32(\ + tmp_hwrm_err_op->opaque_0), \ + rte_le_to_cpu_16(\ + tmp_hwrm_err_op->opaque_1)); \ + } else { \ + PMD_DRV_LOG(ERR, "error %d\n", rc); \ + } \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ + rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \ + rc = -EAGAIN; \ + else if (rc > 0) \ + rc = -EIO; \ + return rc; \ + } \ +} while (0) + +#define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock) + +int bnxt_hwrm_tf_message_direct(struct bnxt *bp, + bool use_kong_mb, + uint16_t msg_type, + void *msg, + uint32_t msg_len, + void *resp_msg, + uint32_t resp_len) +{ + int rc = 0; + bool mailbox = BNXT_USE_CHIMP_MB; + struct input *req = msg; + struct output *resp = bp->hwrm_cmd_resp_addr; + + if (use_kong_mb) + mailbox = BNXT_USE_KONG(bp); + + HWRM_PREP(req, msg_type, mailbox); + + rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox); + + HWRM_CHECK_RESULT(); + + if (resp_msg) + memcpy(resp_msg, resp, resp_len); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp, + bool use_kong_mb, + uint16_t tf_type, + uint16_t tf_subtype, + uint32_t *tf_response_code, + void *msg, + uint32_t msg_len, + void *response, + uint32_t response_len) +{ + int rc = 0; + struct hwrm_cfa_tflib_input req = { .req_type = 0 }; + struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr; + bool mailbox = BNXT_USE_CHIMP_MB; + + if (msg_len > sizeof(req.tf_req)) + return -ENOMEM; + + if (use_kong_mb) + mailbox = BNXT_USE_KONG(bp); + + HWRM_PREP(&req, HWRM_TF, mailbox); + /* Build request using the user supplied request payload. + * TLV request size is checked at build time against HWRM + * request max size, thus no checking required. + */ + req.tf_type = tf_type; + req.tf_subtype = tf_subtype; + memcpy(req.tf_req, msg, msg_len); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox); + HWRM_CHECK_RESULT(); + + /* Copy the resp to user provided response buffer */ + if (response != NULL) + /* Post process response data. We need to copy only + * the 'payload' as the HWRM data structure really is + * HWRM header + msg header + payload and the TFLIB + * only provided a payload place holder. + */ + if (response_len != 0) { + memcpy(response, + resp->tf_resp, + response_len); + } + + /* Extract the internal tflib response code */ + *tf_response_code = resp->tf_resp_code; + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.mask = 0; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + uint16_t vlan_count, + struct bnxt_vlan_table_entry *vlan_table) +{ + int rc = 0; + struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t mask = 0; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return rc; + + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + if (vnic->flags & BNXT_VNIC_INFO_BCAST) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; + if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; + + if (vnic->flags & BNXT_VNIC_INFO_PROMISC) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; + + if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) { + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; + } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) { + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; + req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt); + req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr); + } + if (vlan_table) { + if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; + req.vlan_tag_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); + req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); + } + req.mask = rte_cpu_to_le_32(mask); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, + uint16_t vlan_count, + struct bnxt_vlan_antispoof_table_entry *vlan_table) +{ + int rc = 0; + struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 }; + struct hwrm_cfa_vlan_antispoof_cfg_output *resp = + bp->hwrm_cmd_resp_addr; + + /* + * Older HWRM versions did not support this command, and the set_rx_mask + * list was used for anti-spoof. In 1.8.0, the TX path configuration was + * removed from set_rx_mask call, and this command was added. + * + * This command is also present from 1.7.8.11 and higher, + * as well as 1.7.8.0 + */ + if (bp->fw_ver < ((1 << 24) | (8 << 16))) { + if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) { + if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) | + (11))) + return 0; + } + } + HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(fid); + + req.vlan_tag_mask_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); + req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct bnxt_filter_info *l2_filter = filter; + struct bnxt_vnic_info *vnic = NULL; + struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (filter->fw_l2_filter_id == UINT64_MAX) + return 0; + + if (filter->matching_l2_fltr_ptr) + l2_filter = filter->matching_l2_fltr_ptr; + + PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", + filter, l2_filter, l2_filter->l2_ref_cnt); + + if (l2_filter->l2_ref_cnt == 0) + return 0; + + if (l2_filter->l2_ref_cnt > 0) + l2_filter->l2_ref_cnt--; + + if (l2_filter->l2_ref_cnt > 0) + return 0; + + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); + + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_l2_filter_id = UINT64_MAX; + if (l2_filter->l2_ref_cnt == 0) { + vnic = l2_filter->vnic; + if (vnic) { + STAILQ_REMOVE(&vnic->filter, l2_filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, l2_filter); + } + } + + return 0; +} + +int bnxt_hwrm_set_l2_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; + uint32_t enables = 0; + uint16_t j = dst_id - 1; + + //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ + if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + conf->pool_map[j].pools & (1UL << j)) { + PMD_DRV_LOG(DEBUG, + "Add vlan %u to vmdq pool %u\n", + conf->pool_map[j].vlan_id, j); + + filter->l2_ivlan = conf->pool_map[j].vlan_id; + filter->enables |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + } + + if (filter->fw_l2_filter_id != UINT64_MAX) + bnxt_hwrm_clear_l2_filter(bp, filter); + + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR) + memcpy(req.l2_addr, filter->l2_addr, + RTE_ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) + memcpy(req.l2_addr_mask, filter->l2_addr_mask, + RTE_ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) + req.l2_ovlan = filter->l2_ovlan; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) + req.l2_ivlan = filter->l2_ivlan; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) + req.l2_ovlan_mask = filter->l2_ovlan_mask; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) + req.l2_ivlan_mask = filter->l2_ivlan_mask; + if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) + req.src_id = rte_cpu_to_le_32(filter->src_id); + if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) + req.src_type = filter->src_type; + if (filter->pri_hint) { + req.pri_hint = filter->pri_hint; + req.l2_filter_id_hint = + rte_cpu_to_le_64(filter->l2_filter_id_hint); + } + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); + HWRM_UNLOCK(); + + filter->l2_ref_cnt++; + + return rc; +} + +int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct hwrm_port_mac_cfg_input req = {.req_type = 0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB); + + if (ptp->rx_filter) + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + else + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (ptp->tx_tstamp_en) + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + req.flags = rte_cpu_to_le_32(flags); + req.enables = rte_cpu_to_le_32 + (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0}; + struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp) + return 0; + + HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); + + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (!BNXT_CHIP_THOR(bp) && + !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + return 0; + + if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) + bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; + + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); + if (!ptp) + return -ENOMEM; + + if (!BNXT_CHIP_THOR(bp)) { + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + } + + ptp->bp = bp; + bp->ptp_cfg = ptp; + + return 0; +} + +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_qcaps_input req = {.req_type = 0 }; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t new_max_vfs; + uint32_t flags; + int i; + + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + flags = rte_le_to_cpu_32(resp->flags); + if (BNXT_PF(bp)) { + bp->pf->port_id = resp->port_id; + bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs); + new_max_vfs = bp->pdev->max_vfs; + if (new_max_vfs != bp->pf->max_vfs) { + if (bp->pf->vf_info) + rte_free(bp->pf->vf_info); + bp->pf->vf_info = rte_malloc("bnxt_vf_info", + sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0); + bp->pf->max_vfs = new_max_vfs; + for (i = 0; i < new_max_vfs; i++) { + bp->pf->vf_info[i].fid = + bp->pf->first_vf_id + i; + bp->pf->vf_info[i].vlan_table = + rte_zmalloc("VF VLAN table", + getpagesize(), + getpagesize()); + if (bp->pf->vf_info[i].vlan_table == NULL) + PMD_DRV_LOG(ERR, + "Fail to alloc VLAN table for VF %d\n", + i); + else + rte_mem_lock_page( + bp->pf->vf_info[i].vlan_table); + bp->pf->vf_info[i].vlan_as_table = + rte_zmalloc("VF VLAN AS table", + getpagesize(), + getpagesize()); + if (bp->pf->vf_info[i].vlan_as_table == NULL) + PMD_DRV_LOG(ERR, + "Alloc VLAN AS table for VF %d fail\n", + i); + else + rte_mem_lock_page( + bp->pf->vf_info[i].vlan_as_table); + STAILQ_INIT(&bp->pf->vf_info[i].filter); + } + } + } + + bp->fw_fid = rte_le_to_cpu_32(resp->fid); + if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) { + bp->flags |= BNXT_FLAG_DFLT_MAC_SET; + memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN); + } else { + bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET; + } + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; + /* TODO: For now, do not support VMDq/RFS on VFs. */ + if (BNXT_PF(bp)) { + if (bp->pf->max_vfs) + bp->max_vnics = 1; + else + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + } else { + bp->max_vnics = 1; + } + PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n", + bp->max_l2_ctx, bp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + if (BNXT_PF(bp)) { + bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics); + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { + bp->flags |= BNXT_FLAG_PTP_SUPPORTED; + PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n"); + HWRM_UNLOCK(); + bnxt_hwrm_ptp_qcfg(bp); + } + } + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED) + bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; + PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); + } + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { + rc = bnxt_alloc_ctx_mem(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_resc_qcaps(bp); + if (!rc) + bp->flags |= BNXT_FLAG_NEW_RM; + } + + /* On older FW, + * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. + * But the error can be ignored. Return success. + */ + + return 0; +} + +/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */ +int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_vnic_qcaps_input req = {.req_type = 0 }; + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB); + + req.target_id = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) { + bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY; + PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n"); + } + + bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_reset(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_reset_input req = {.req_type = 0 }; + struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB); + + req.enables = rte_cpu_to_le_32(0); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_driver_register(struct bnxt *bp) +{ + int rc; + uint32_t flags = 0; + struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; + struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (bp->flags & BNXT_FLAG_REGISTERED) + return 0; + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; + + /* PFs and trusted VFs should indicate the support of the + * Master capability on non Stingray platform + */ + if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp)) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT; + + HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); + req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); + req.ver_maj = RTE_VER_YEAR; + req.ver_min = RTE_VER_MONTH; + req.ver_upd = RTE_VER_MINOR; + + if (BNXT_PF(bp)) { + req.enables |= rte_cpu_to_le_32( + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD); + memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd, + RTE_MIN(sizeof(req.vf_req_fwd), + sizeof(bp->pf->vf_req_fwd))); + + /* + * PF can sniff HWRM API issued by VF. This can be set up by + * linux driver and inherited by the DPDK PF driver. Clear + * this HWRM sniffer list in FW because DPDK PF driver does + * not support this. + */ + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE; + } + + req.flags = rte_cpu_to_le_32(flags); + + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | + ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | + ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | + ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); + if (BNXT_PF(bp)) + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + + HWRM_UNLOCK(); + + bp->flags |= BNXT_FLAG_REGISTERED; + + return rc; +} + +int bnxt_hwrm_check_vf_rings(struct bnxt *bp) +{ + if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM))) + return 0; + + return bnxt_hwrm_func_reserve_vf_resc(bp, true); +} + +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) +{ + int rc; + uint32_t flags = 0; + uint32_t enables; + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + + enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS; + + if (BNXT_HAS_RING_GRPS(bp)) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS; + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + } + + req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * + AGG_RING_MULTIPLIER); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); + req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); + if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); + req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); + req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } else if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); + } + + if (test) + flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST; + + if (test && BNXT_HAS_RING_GRPS(bp)) + flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST; + + req.flags = rte_cpu_to_le_32(flags); + req.enables |= rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (test) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +{ + int rc; + struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_resource_qcaps_input req = {0}; + + HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT_SILENT(); + + if (BNXT_VF(bp)) { + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + /* func_resource_qcaps does not return max_rx_em_flows. + * So use the value provided by func_qcaps. + */ + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + } + bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix); + bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); + if (bp->vf_resv_strategy > + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) + bp->vf_resv_strategy = + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL; + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) +{ + int rc = 0; + struct hwrm_ver_get_input req = {.req_type = 0 }; + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t fw_version; + uint16_t max_resp_len; + char type[RTE_MEMZONE_NAMESIZE]; + uint32_t dev_caps_cfg; + + bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->hwrm_cmd_timeout = timeout; + HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB); + + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (bp->flags & BNXT_FLAG_FW_RESET) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); + + PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, + resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); + bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | + (resp->hwrm_fw_min_8b << 16) | + (resp->hwrm_fw_bld_8b << 8) | + resp->hwrm_fw_rsvd_8b; + PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); + + fw_version = resp->hwrm_intf_maj_8b << 16; + fw_version |= resp->hwrm_intf_min_8b << 8; + fw_version |= resp->hwrm_intf_upd_8b; + bp->hwrm_spec_code = fw_version; + + /* def_req_timeout value is in milliseconds */ + bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout); + /* convert timeout to usec */ + bp->hwrm_cmd_timeout *= 1000; + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { + PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); + rc = -EINVAL; + goto error; + } + + if (bp->max_req_len > resp->max_req_win_len) { + PMD_DRV_LOG(ERR, "Unsupported request length\n"); + rc = -EINVAL; + } + bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); + bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len); + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; + + max_resp_len = rte_le_to_cpu_16(resp->max_resp_len); + dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg); + + if (bp->max_resp_len != max_resp_len) { + sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rte_free(bp->hwrm_cmd_resp_addr); + + bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0); + if (bp->hwrm_cmd_resp_addr == NULL) { + rc = -ENOMEM; + goto error; + } + bp->hwrm_cmd_resp_dma_addr = + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "Unable to map response buffer to physical memory.\n"); + rc = -ENOMEM; + goto error; + } + bp->max_resp_len = max_resp_len; + } + + if ((dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) { + PMD_DRV_LOG(DEBUG, "Short command supported\n"); + bp->flags |= BNXT_FLAG_SHORT_CMD; + } + + if (((dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) || + bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) { + sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT, + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rte_free(bp->hwrm_short_cmd_req_addr); + + bp->hwrm_short_cmd_req_addr = + rte_malloc(type, bp->hwrm_max_ext_req_len, 0); + if (bp->hwrm_short_cmd_req_addr == NULL) { + rc = -ENOMEM; + goto error; + } + bp->hwrm_short_cmd_req_dma_addr = + rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr); + if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { + rte_free(bp->hwrm_short_cmd_req_addr); + PMD_DRV_LOG(ERR, + "Unable to map buffer to physical memory.\n"); + rc = -ENOMEM; + goto error; + } + } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) { + bp->flags |= BNXT_FLAG_KONG_MB_EN; + PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n"); + } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n"); + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) { + bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT; + PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n"); + } + + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) { + PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n"); + bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS; + } + + +error: + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) +{ + int rc; + struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 }; + struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(bp->flags & BNXT_FLAG_REGISTERED)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); + req.flags = flags; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) +{ + int rc = 0; + struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB); + + if (conf->link_up) { + /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ + if (bp->link_info->auto_mode && conf->link_speed) { + req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n"); + } + + req.flags = rte_cpu_to_le_32(conf->phy_flags); + req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; + /* + * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set + * any auto mode, even "none". + */ + if (!conf->link_speed) { + /* No speeds specified. Enable AutoNeg - all speeds */ + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; + } + /* AutoNeg - Advertise speeds specified. */ + if (conf->auto_link_speed_mask && + !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) { + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; + } + + req.auto_duplex = conf->duplex; + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; + req.auto_pause = conf->auto_pause; + req.force_pause = conf->force_pause; + /* Set force_pause if there is no auto or if there is a force */ + if (req.auto_pause && !req.force_pause) + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE; + else + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE; + + req.enables = rte_cpu_to_le_32(enables); + } else { + req.flags = + rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN); + PMD_DRV_LOG(INFO, "Force Link Down\n"); + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, + struct bnxt_link_info *link_info) +{ + int rc = 0; + struct hwrm_port_phy_qcfg_input req = {0}; + struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + link_info->phy_link_status = resp->link; + link_info->link_up = + (link_info->phy_link_status == + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; + link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); + link_info->duplex = resp->duplex_cfg; + link_info->pause = resp->pause; + link_info->auto_pause = resp->auto_pause; + link_info->force_pause = resp->force_pause; + link_info->auto_mode = resp->auto_mode; + link_info->phy_type = resp->phy_type; + link_info->media_type = resp->media_type; + + link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); + link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); + link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); + link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed); + link_info->phy_ver[0] = resp->phy_maj; + link_info->phy_ver[1] = resp->phy_min; + link_info->phy_ver[2] = resp->phy_bld; + + HWRM_UNLOCK(); + + PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed); + PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode); + PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds); + PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed); + PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n", + link_info->auto_link_speed_mask); + PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n", + link_info->force_link_speed); + + return rc; +} + +static bool bnxt_find_lossy_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + return true; + } + } + return false; +} + +static void bnxt_find_first_valid_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN && + bp->tx_cos_queue[i].id != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + break; + } + } +} + +int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; + struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; + int i; + +get_rx_info: + HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); + + req.flags = rte_cpu_to_le_32(dir); + /* HWRM Version >= 1.9.1 only if COS Classification is not required. */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 && + !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) + req.drv_qmap_cap = + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) { + GET_TX_QUEUE_INFO(0); + GET_TX_QUEUE_INFO(1); + GET_TX_QUEUE_INFO(2); + GET_TX_QUEUE_INFO(3); + GET_TX_QUEUE_INFO(4); + GET_TX_QUEUE_INFO(5); + GET_TX_QUEUE_INFO(6); + GET_TX_QUEUE_INFO(7); + } else { + GET_RX_QUEUE_INFO(0); + GET_RX_QUEUE_INFO(1); + GET_RX_QUEUE_INFO(2); + GET_RX_QUEUE_INFO(3); + GET_RX_QUEUE_INFO(4); + GET_RX_QUEUE_INFO(5); + GET_RX_QUEUE_INFO(6); + GET_RX_QUEUE_INFO(7); + } + + HWRM_UNLOCK(); + + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX) + goto done; + + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id; + } else { + int j; + + /* iterate and find the COSq profile to use for Tx */ + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) { + for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->tx_cos_queue[i].id != 0xff) + bp->tx_cosq_id[j++] = + bp->tx_cos_queue[i].id; + } + } else { + /* When CoS classification is disabled, for normal NIC + * operations, ideally we should look to use LOSSY. + * If not found, fallback to the first valid profile + */ + if (!bnxt_find_lossy_profile(bp)) + bnxt_find_first_valid_profile(bp); + + } + } + + bp->max_tc = resp->max_configurable_queues; + bp->max_lltc = resp->max_configurable_lossless_queues; + if (bp->max_tc > BNXT_MAX_QUEUE) + bp->max_tc = BNXT_MAX_QUEUE; + bp->max_q = bp->max_tc; + + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) { + dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX; + goto get_rx_info; + } + +done: + return rc; +} + +int bnxt_hwrm_ring_alloc(struct bnxt *bp, + struct bnxt_ring *ring, + uint32_t ring_type, uint32_t map_index, + uint32_t stats_ctx_id, uint32_t cmpl_ring_id, + uint16_t tx_cosq_id) +{ + int rc = 0; + uint32_t enables = 0; + struct hwrm_ring_alloc_input req = {.req_type = 0 }; + struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct rte_mempool *mb_pool; + uint16_t rx_buf_size; + + HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB); + + req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); + req.fbo = rte_cpu_to_le_32(0); + /* Association of ring index with doorbell index */ + req.logical_id = rte_cpu_to_le_16(map_index); + req.length = rte_cpu_to_le_32(ring->ring_size); + + switch (ring_type) { + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + req.ring_type = ring_type; + req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); + req.queue_id = rte_cpu_to_le_16(tx_cosq_id); + if (stats_ctx_id != INVALID_STATS_CTX_ID) + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: + req.ring_type = ring_type; + req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); + if (BNXT_CHIP_THOR(bp)) { + mb_pool = bp->rx_queues[0]->mb_pool; + rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - + RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); + req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID; + } + if (stats_ctx_id != INVALID_STATS_CTX_ID) + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: + req.ring_type = ring_type; + if (BNXT_HAS_NQ(bp)) { + /* Association of cp ring with nq */ + req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id); + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID; + } + req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: + req.ring_type = ring_type; + req.page_size = BNXT_PAGE_SHFT; + req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: + req.ring_type = ring_type; + req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id); + + mb_pool = bp->rx_queues[0]->mb_pool; + rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - + RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); + req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); + + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); + enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID | + HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID | + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + break; + default: + PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n", + ring_type); + HWRM_UNLOCK(); + return -EINVAL; + } + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc || resp->error_code) { + if (rc == 0 && resp->error_code) + rc = rte_le_to_cpu_16(resp->error_code); + switch (ring_type) { + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc cp failed. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc rx failed. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc rx agg failed. rc:%d\n", + rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc tx failed. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc nq failed. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + default: + PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + } + } + + ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id); + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_ring_free(struct bnxt *bp, + struct bnxt_ring *ring, uint32_t ring_type) +{ + int rc; + struct hwrm_ring_free_input req = {.req_type = 0 }; + struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB); + + req.ring_type = ring_type; + req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc || resp->error_code) { + if (rc == 0 && resp->error_code) + rc = rte_le_to_cpu_16(resp->error_code); + HWRM_UNLOCK(); + + switch (ring_type) { + case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: + PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n", + rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n", + rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n", + rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_NQ: + PMD_DRV_LOG(ERR, + "hwrm_ring_free nq failed. rc:%d\n", rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG: + PMD_DRV_LOG(ERR, + "hwrm_ring_free agg failed. rc:%d\n", rc); + return rc; + default: + PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc); + return rc; + } + } + HWRM_UNLOCK(); + return 0; +} + +int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) +{ + int rc = 0; + struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; + struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); + + req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); + req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); + req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id); + req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) +{ + int rc; + struct hwrm_ring_grp_free_input req = {.req_type = 0 }; + struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB); + + req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID; + return rc; +} + +int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + int rc = 0; + struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + + if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) + return rc; + + HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); + + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + unsigned int idx __rte_unused) +{ + int rc; + struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); + + req.update_period_ms = rte_cpu_to_le_32(0); + + req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + unsigned int idx __rte_unused) +{ + int rc; + struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB); + + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0, i, j; + struct hwrm_vnic_alloc_input req = { 0 }; + struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + if (!BNXT_HAS_RING_GRPS(bp)) + goto skip_ring_grps; + + /* map ring groups to this vnic */ + PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", + vnic->start_grp_id, vnic->end_grp_id); + for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) + vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; + vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; + +skip_ring_grps: + vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu); + HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB); + + if (vnic->func_default) + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); + HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; +} + +static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_plcmodes_cfg *pmode) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + pmode->flags = rte_le_to_cpu_32(resp->flags); + /* dflt_vnic bit doesn't exist in the _cfg command */ + pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC); + pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh); + pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset); + pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold); + + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_plcmodes_cfg *pmode) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.flags = rte_cpu_to_le_32(pmode->flags); + req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh); + req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset); + req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold); + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID | + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID | + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID + ); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_plcmodes_cfg pmodes = { 0 }; + uint32_t ctx_enable_flag = 0; + uint32_t enables = 0; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + + rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes); + if (rc) + return rc; + + HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB); + + if (BNXT_CHIP_THOR(bp)) { + int dflt_rxq = vnic->start_grp_id; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + int i; + + /* + * The first active receive ring is used as the VNIC + * default receive ring. If there are no active receive + * rings (all corresponding receive queues are stopped), + * the first receive ring is used. + */ + for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_started) { + dflt_rxq = i; + break; + } + } + + rxq = bp->eth_dev->data->rx_queues[dflt_rxq]; + rxr = rxq->rx_ring; + cpr = rxq->cp_ring; + + req.default_rx_ring_id = + rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id); + req.default_cmpl_ring_id = + rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id); + enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID | + HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID; + goto config_mru; + } + + /* Only RSS support for now TBD: COS & LB */ + enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP; + if (vnic->lb_rule != 0xffff) + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; + if (vnic->cos_rule != 0xffff) + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; + if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) { + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + } + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) { + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID; + req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id); + } + + enables |= ctx_enable_flag; + req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); + req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule); + req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule); + req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule); + +config_mru: + req.enables = rte_cpu_to_le_32(enables); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.mru = rte_cpu_to_le_16(vnic->mru); + /* Configure default VNIC only once. */ + if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) { + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT); + bp->flags |= BNXT_FLAG_DFLT_VNIC_SET; + } + if (vnic->vlan_strip) + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE); + if (vnic->bd_stall) + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE); + if (vnic->roce_dual) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE); + if (vnic->roce_only) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE); + if (vnic->rss_dflt_cr) + req.flags |= rte_cpu_to_le_32( + HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes); + + return rc; +} + +int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int16_t fw_vf_id) +{ + int rc = 0; + struct hwrm_vnic_qcfg_input req = {.req_type = 0 }; + struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB); + + req.enables = + rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.vf_id = rte_cpu_to_le_16(fw_vf_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp); + vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule); + vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule); + vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule); + vnic->mru = rte_le_to_cpu_16(resp->mru); + vnic->func_default = rte_le_to_cpu_32( + resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT; + vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE; + vnic->bd_stall = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE; + vnic->roce_dual = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE; + vnic->roce_only = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE; + vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE; + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, + struct bnxt_vnic_info *vnic, uint16_t ctx_idx) +{ + int rc = 0; + uint16_t ctx_id; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + if (!BNXT_HAS_RING_GRPS(bp)) + vnic->fw_grp_ids[ctx_idx] = ctx_id; + else if (ctx_idx == 0) + vnic->rss_rule = ctx_id; + + HWRM_UNLOCK(); + + return rc; +} + +static +int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, + struct bnxt_vnic_info *vnic, uint16_t ctx_idx) +{ + int rc = 0; + struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp = + bp->hwrm_cmd_resp_addr; + + if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) { + PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); + return rc; + } + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); + + req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + + if (BNXT_CHIP_THOR(bp)) { + int j; + + for (j = 0; j < vnic->num_lb_ctxts; j++) { + rc = _bnxt_hwrm_vnic_ctx_free(bp, + vnic, + vnic->fw_grp_ids[j]); + vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + vnic->num_lb_ctxts = 0; + } else { + rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule); + vnic->rss_rule = INVALID_HW_RING_ID; + } + + return rc; +} + +int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_free_input req = {.req_type = 0 }; + struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id); + return rc; + } + + HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + /* Configure default VNIC again if necessary. */ + if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) + bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET; + + return rc; +} + +static int +bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int i; + int rc = 0; + int nr_ctxs = vnic->num_lb_ctxts; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + for (i = 0; i < nr_ctxs; i++) { + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; + + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr + + i * HW_HASH_INDEX_SIZE); + req.ring_table_pair_index = i; + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + } + + return rc; +} + +int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + if (!vnic->rss_table) + return 0; + + if (BNXT_CHIP_THOR(bp)) + return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic); + + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr); + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t size; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + + req.flags = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); + + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID); + + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); + + req.jumbo_thresh = rte_cpu_to_le_16(size); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool enable) +{ + int rc = 0; + struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) { + if (enable) + PMD_DRV_LOG(ERR, "No HW support for LRO\n"); + return -ENOTSUP; + } + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); + + if (enable) { + req.enables = rte_cpu_to_le_32( + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS | + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS | + HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN); + req.flags = rte_cpu_to_le_32( + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | + HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); + req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp)); + req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp)); + req.min_agg_len = rte_cpu_to_le_32(512); + } + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + bp->pf->vf_info[vf].random_mac = false; + + return rc; +} + +int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, + uint64_t *dropped) +{ + int rc = 0; + struct hwrm_func_qstats_input req = {.req_type = 0}; + struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (dropped) + *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, + struct rte_eth_stats *stats, + struct hwrm_func_qstats_output *func_qstats) +{ + int rc = 0; + struct hwrm_func_qstats_input req = {.req_type = 0}; + struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + if (func_qstats) + memcpy(func_qstats, resp, + sizeof(struct hwrm_func_qstats_output)); + + if (!stats) + goto exit; + + stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes); + + stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes); + + stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts); + stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts); + +exit: + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) +{ + int rc = 0; + struct hwrm_func_clr_stats_input req = {.req_type = 0}; + struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + + if (i >= bp->rx_cp_nr_rings) { + txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; + cpr = txq->cp_ring; + } else { + rxq = bp->rx_queues[i]; + cpr = rxq->cp_ring; + } + + rc = bnxt_hwrm_stat_clear(bp, cpr); + if (rc) + return rc; + } + return 0; +} + +static int +bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + int rc; + unsigned int i; + struct bnxt_cp_ring_info *cpr; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + + if (i >= bp->rx_cp_nr_rings) { + cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; + } else { + cpr = bp->rx_queues[i]->cp_ring; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[i].fw_stats_ctx = -1; + } + if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); + cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + if (rc) + return rc; + } + } + return 0; +} + +int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + + if (i >= bp->rx_cp_nr_rings) { + txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; + cpr = txq->cp_ring; + } else { + rxq = bp->rx_queues[i]; + cpr = rxq->cp_ring; + } + + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i); + + if (rc) + return rc; + } + return rc; +} + +static int +bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +{ + uint16_t idx; + uint32_t rc = 0; + + if (!BNXT_HAS_RING_GRPS(bp)) + return 0; + + for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) { + + if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) + continue; + + rc = bnxt_hwrm_ring_grp_free(bp, idx); + + if (rc) + return rc; + } + return rc; +} + +void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + + bnxt_hwrm_ring_free(bp, cp_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_NQ); + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * + sizeof(*cpr->cp_desc_ring)); + cpr->cp_raw_cons = 0; + cpr->valid = 0; +} + +void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + + bnxt_hwrm_ring_free(bp, cp_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * + sizeof(*cpr->cp_desc_ring)); + cpr->cp_raw_cons = 0; + cpr->valid = 0; +} + +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].rx_fw_ring_id = + INVALID_HW_RING_ID; + } + ring = rxr->ag_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + BNXT_CHIP_THOR(bp) ? + HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].ag_fw_ring_id = + INVALID_HW_RING_ID; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr); + + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; +} + +static int +bnxt_free_all_hwrm_rings(struct bnxt *bp) +{ + unsigned int i; + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_TX); + ring->fw_ring_id = INVALID_HW_RING_ID; + memset(txr->tx_desc_ring, 0, + txr->tx_ring_struct->ring_size * + sizeof(*txr->tx_desc_ring)); + memset(txr->tx_buf_ring, 0, + txr->tx_ring_struct->ring_size * + sizeof(*txr->tx_buf_ring)); + txr->tx_prod = 0; + txr->tx_cons = 0; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_free_cp_ring(bp, cpr); + cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; + } + } + + for (i = 0; i < bp->rx_cp_nr_rings; i++) + bnxt_free_hwrm_rx_ring(bp, i); + + return 0; +} + +int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) +{ + uint16_t i; + uint32_t rc = 0; + + if (!BNXT_HAS_RING_GRPS(bp)) + return 0; + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + rc = bnxt_hwrm_ring_grp_alloc(bp, i); + if (rc) + return rc; + } + return rc; +} + +/* + * HWRM utility functions + */ + +void bnxt_free_hwrm_resources(struct bnxt *bp) +{ + /* Release memzone */ + rte_free(bp->hwrm_cmd_resp_addr); + rte_free(bp->hwrm_short_cmd_req_addr); + bp->hwrm_cmd_resp_addr = NULL; + bp->hwrm_short_cmd_req_addr = NULL; + bp->hwrm_cmd_resp_dma_addr = 0; + bp->hwrm_short_cmd_req_dma_addr = 0; +} + +int bnxt_alloc_hwrm_resources(struct bnxt *bp) +{ + struct rte_pci_device *pdev = bp->pdev; + char type[RTE_MEMZONE_NAMESIZE]; + + sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + bp->max_resp_len = HWRM_MAX_RESP_LEN; + bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); + if (bp->hwrm_cmd_resp_addr == NULL) + return -ENOMEM; + bp->hwrm_cmd_resp_dma_addr = + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + rte_spinlock_init(&bp->hwrm_lock); + + return 0; +} + +static int +bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); + bnxt_free_filter(bp, filter); + } + return rc; +} + +static int +bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + struct rte_flow *flow; + int rc = 0; + + while (!STAILQ_EMPTY(&vnic->flow_list)) { + flow = STAILQ_FIRST(&vnic->flow_list); + filter = flow->filter; + PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + } + return rc; +} + +int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id, + filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, + filter); + else + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter); + if (rc) + break; + } + return rc; +} + +static void +bnxt_free_tunnel_ports(struct bnxt *bp) +{ + if (bp->vxlan_port_cnt) + bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN); + bp->vxlan_port = 0; + if (bp->geneve_port_cnt) + bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id, + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE); + bp->geneve_port = 0; +} + +void bnxt_free_all_hwrm_resources(struct bnxt *bp) +{ + int i; + + if (bp->vnic_info == NULL) + return; + + /* + * Cleanup VNICs in reverse order, to make sure the L2 filter + * from vnic0 is last to be cleaned up. + */ + for (i = bp->max_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + continue; + + bnxt_clear_hwrm_vnic_flows(bp, vnic); + + bnxt_clear_hwrm_vnic_filters(bp, vnic); + + bnxt_hwrm_vnic_ctx_free(bp, vnic); + + bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); + + bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); + } + /* Ring resources */ + bnxt_free_all_hwrm_rings(bp); + bnxt_free_all_hwrm_ring_grps(bp); + bnxt_free_all_hwrm_stat_ctxs(bp); + bnxt_free_tunnel_ports(bp); +} + +static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) +{ + uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; + + if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG) + return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; + + switch (conf_link_speed) { + case ETH_LINK_SPEED_10M_HD: + case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ + return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; + } + return hw_link_duplex; +} + +static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +{ + return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; +} + +static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) +{ + uint16_t eth_link_speed = 0; + + if (conf_link_speed == ETH_LINK_SPEED_AUTONEG) + return ETH_LINK_SPEED_AUTONEG; + + switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { + case ETH_LINK_SPEED_100M: + case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB; + break; + case ETH_LINK_SPEED_1G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB; + break; + case ETH_LINK_SPEED_2_5G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB; + break; + case ETH_LINK_SPEED_10G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB; + break; + case ETH_LINK_SPEED_20G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB; + break; + case ETH_LINK_SPEED_25G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; + break; + case ETH_LINK_SPEED_40G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; + break; + case ETH_LINK_SPEED_50G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; + break; + case ETH_LINK_SPEED_100G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + break; + case ETH_LINK_SPEED_200G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB; + break; + default: + PMD_DRV_LOG(ERR, + "Unsupported link speed %d; default to AUTO\n", + conf_link_speed); + break; + } + return eth_link_speed; +} + +#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \ + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \ + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \ + ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G) + +static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id) +{ + uint32_t one_speed; + + if (link_speed == ETH_LINK_SPEED_AUTONEG) + return 0; + + if (link_speed & ETH_LINK_SPEED_FIXED) { + one_speed = link_speed & ~ETH_LINK_SPEED_FIXED; + + if (one_speed & (one_speed - 1)) { + PMD_DRV_LOG(ERR, + "Invalid advertised speeds (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) { + PMD_DRV_LOG(ERR, + "Unsupported advertised speed (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + } else { + if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) { + PMD_DRV_LOG(ERR, + "Unsupported advertised speeds (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + } + return 0; +} + +static uint16_t +bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) +{ + uint16_t ret = 0; + + if (link_speed == ETH_LINK_SPEED_AUTONEG) { + if (bp->link_info->support_speeds) + return bp->link_info->support_speeds; + link_speed = BNXT_SUPPORTED_SPEEDS; + } + + if (link_speed & ETH_LINK_SPEED_100M) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; + if (link_speed & ETH_LINK_SPEED_100M_HD) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; + if (link_speed & ETH_LINK_SPEED_1G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB; + if (link_speed & ETH_LINK_SPEED_2_5G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB; + if (link_speed & ETH_LINK_SPEED_10G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB; + if (link_speed & ETH_LINK_SPEED_20G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB; + if (link_speed & ETH_LINK_SPEED_25G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB; + if (link_speed & ETH_LINK_SPEED_40G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; + if (link_speed & ETH_LINK_SPEED_50G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; + if (link_speed & ETH_LINK_SPEED_100G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB; + if (link_speed & ETH_LINK_SPEED_200G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB; + return ret; +} + +static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) +{ + uint32_t eth_link_speed = ETH_SPEED_NUM_NONE; + + switch (hw_link_speed) { + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB: + eth_link_speed = ETH_SPEED_NUM_100M; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB: + eth_link_speed = ETH_SPEED_NUM_1G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB: + eth_link_speed = ETH_SPEED_NUM_2_5G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB: + eth_link_speed = ETH_SPEED_NUM_10G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB: + eth_link_speed = ETH_SPEED_NUM_20G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB: + eth_link_speed = ETH_SPEED_NUM_25G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB: + eth_link_speed = ETH_SPEED_NUM_40G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: + eth_link_speed = ETH_SPEED_NUM_50G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: + eth_link_speed = ETH_SPEED_NUM_100G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB: + eth_link_speed = ETH_SPEED_NUM_200G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: + default: + PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n", + hw_link_speed); + break; + } + return eth_link_speed; +} + +static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) +{ + uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (hw_link_duplex) { + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: + /* FALLTHROUGH */ + eth_link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: + eth_link_duplex = ETH_LINK_HALF_DUPLEX; + break; + default: + PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n", + hw_link_duplex); + break; + } + return eth_link_duplex; +} + +int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) +{ + int rc = 0; + struct bnxt_link_info *link_info = bp->link_info; + + rc = bnxt_hwrm_port_phy_qcfg(bp, link_info); + if (rc) { + PMD_DRV_LOG(ERR, + "Get link config failed with rc %d\n", rc); + goto exit; + } + if (link_info->link_speed) + link->link_speed = + bnxt_parse_hw_link_speed(link_info->link_speed); + else + link->link_speed = ETH_SPEED_NUM_NONE; + link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex); + link->link_status = link_info->link_up; + link->link_autoneg = link_info->auto_mode == + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ? + ETH_LINK_FIXED : ETH_LINK_AUTONEG; +exit: + return rc; +} + +int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) +{ + int rc = 0; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_link_info link_req; + uint16_t speed, autoneg; + + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) + return 0; + + rc = bnxt_valid_link_speed(dev_conf->link_speeds, + bp->eth_dev->data->port_id); + if (rc) + goto error; + + memset(&link_req, 0, sizeof(link_req)); + link_req.link_up = link_up; + if (!link_up) + goto port_phy_cfg; + + autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); + if (BNXT_CHIP_THOR(bp) && + dev_conf->link_speeds == ETH_LINK_SPEED_40G) { + /* 40G is not supported as part of media auto detect. + * The speed should be forced and autoneg disabled + * to configure 40G speed. + */ + PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n"); + autoneg = 0; + } + + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); + link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; + /* Autoneg can be done only when the FW allows. + * When user configures fixed speed of 40G and later changes to + * any other speed, auto_link_speed/force_link_speed is still set + * to 40G until link comes up at new speed. + */ + if (autoneg == 1 && + !(!BNXT_CHIP_THOR(bp) && + (bp->link_info->auto_link_speed || + bp->link_info->force_link_speed))) { + link_req.phy_flags |= + HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; + link_req.auto_link_speed_mask = + bnxt_parse_eth_link_speed_mask(bp, + dev_conf->link_speeds); + } else { + if (bp->link_info->phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || + bp->link_info->phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE || + bp->link_info->media_type == + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) { + PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n"); + return -EINVAL; + } + + link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; + /* If user wants a particular speed try that first. */ + if (speed) + link_req.link_speed = speed; + else if (bp->link_info->force_link_speed) + link_req.link_speed = bp->link_info->force_link_speed; + else + link_req.link_speed = bp->link_info->auto_link_speed; + } + link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); + link_req.auto_pause = bp->link_info->auto_pause; + link_req.force_pause = bp->link_info->force_pause; + +port_phy_cfg: + rc = bnxt_hwrm_port_phy_cfg(bp, &link_req); + if (rc) { + PMD_DRV_LOG(ERR, + "Set link config failed with rc %d\n", rc); + } + +error: + return rc; +} + +/* JIRA 22088 */ +int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags; + int rc = 0; + bp->func_svif = BNXT_SVIF_INVALID; + uint16_t svif_info; + + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + /* Hard Coded.. 0xfff VLAN ID mask */ + bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + + svif_info = rte_le_to_cpu_16(resp->svif_info); + if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID) + bp->func_svif = svif_info & + HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK; + + flags = rte_le_to_cpu_16(resp->flags); + if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; + + if (BNXT_VF(bp) && + !BNXT_VF_IS_TRUSTED(bp) && + (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags |= BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n"); + } else if (BNXT_VF(bp) && + BNXT_VF_IS_TRUSTED(bp) && + !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n"); + } + + if (mtu) + *mtu = rte_le_to_cpu_16(resp->mtu); + + switch (resp->port_partition_type) { + case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: + case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: + case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: + /* FALLTHROUGH */ + bp->flags |= BNXT_FLAG_NPAR_PF; + break; + default: + bp->flags &= ~BNXT_FLAG_NPAR_PF; + break; + } + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp) +{ + struct hwrm_port_mac_qcfg_input req = {0}; + struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t port_svif_info; + int rc; + + bp->port_svif = BNXT_SVIF_INVALID; + + if (!BNXT_PF(bp)) + return 0; + + HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + port_svif_info = rte_le_to_cpu_16(resp->port_svif_info); + if (port_svif_info & + HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID) + bp->port_svif = port_svif_info & + HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK; + + HWRM_UNLOCK(); + + return 0; +} + +static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg, + struct hwrm_func_qcaps_output *qcaps) +{ + qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs; + memcpy(qcaps->mac_address, fcfg->dflt_mac_addr, + sizeof(qcaps->mac_address)); + qcaps->max_l2_ctxs = fcfg->num_l2_ctxs; + qcaps->max_rx_rings = fcfg->num_rx_rings; + qcaps->max_tx_rings = fcfg->num_tx_rings; + qcaps->max_cmpl_rings = fcfg->num_cmpl_rings; + qcaps->max_stat_ctx = fcfg->num_stat_ctxs; + qcaps->max_vfs = 0; + qcaps->first_vf_id = 0; + qcaps->max_vnics = fcfg->num_vnics; + qcaps->max_decap_records = 0; + qcaps->max_encap_records = 0; + qcaps->max_tx_wm_flows = 0; + qcaps->max_tx_em_flows = 0; + qcaps->max_rx_wm_flows = 0; + qcaps->max_rx_em_flows = 0; + qcaps->max_flow_id = 0; + qcaps->max_mcast_filters = fcfg->num_mcast_filters; + qcaps->max_sp_tx_rings = 0; + qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps; +} + +static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables; + int rc; + + enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_MRU | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS; + + if (BNXT_HAS_RING_GRPS(bp)) { + enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS; + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); + } else if (BNXT_HAS_NQ(bp)) { + enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX; + req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings); + } + + req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); + req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); + req.num_tx_rings = rte_cpu_to_le_16(tx_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings); + req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx); + req.num_vnics = rte_cpu_to_le_16(bp->max_vnics); + req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(enables); + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +static void populate_vf_func_cfg_req(struct bnxt *bp, + struct hwrm_func_cfg_input *req, + int num_vfs) +{ + req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_MRU | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + + req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); + req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); + req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / + (num_vfs + 1)); + req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); + req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings / + (num_vfs + 1)); + req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1)); + req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1)); + req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1)); + /* TODO: For now, do not support VMDq/RFS on VFs. */ + req->num_vnics = rte_cpu_to_le_16(1); + req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps / + (num_vfs + 1)); +} + +static void add_random_mac_if_needed(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) +{ + struct rte_ether_addr mac; + + if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac)) + return; + + if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) { + cfg_req->enables |= + rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + rte_eth_random_addr(cfg_req->dflt_mac_addr); + bp->pf->vf_info[vf].random_mac = true; + } else { + memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + } +} + +static int reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) +{ + struct hwrm_func_qcaps_input req = {0}; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* Get the actual allocated values now */ + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc) { + PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc); + copy_func_cfg_to_qcaps(cfg_req, resp); + } else if (resp->error_code) { + rc = rte_le_to_cpu_16(resp->error_code); + PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc); + copy_func_cfg_to_qcaps(cfg_req, resp); + } + + bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx); + bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs); + /* + * TODO: While not supporting VMDq with VFs, max_vnics is always + * forced to 1 in this case + */ + //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics); + bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); + + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* Check for zero MAC address */ + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + rc = rte_le_to_cpu_16(resp->vlan); + + HWRM_UNLOCK(); + + return rc; +} + +static int update_pf_resource_max(struct bnxt *bp) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* And copy the allocated numbers into the pf struct */ + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(0xffff); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + /* Only TX ring value reflects actual allocation? TODO */ + bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); + bp->pf->evb_mode = resp->evb_mode; + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) +{ + int rc; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); + return -EINVAL; + } + + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + bp->pf->func_cfg_flags &= + ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf->func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; + rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + rc = __bnxt_hwrm_func_qcaps(bp); + return rc; +} + +int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; + size_t sz; + int rc = 0; + size_t req_buf_sz; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); + return -EINVAL; + } + + rc = bnxt_hwrm_func_qcaps(bp); + + if (rc) + return rc; + + bp->pf->active_vfs = num_vfs; + + /* + * First, configure the PF to only use one TX ring. This ensures that + * there are enough rings for all VFs. + * + * If we don't do this, when we call func_alloc() later, we will lock + * extra rings to the PF that won't be available during func_cfg() of + * the VFs. + * + * This has been fixed with firmware versions above 20.6.54 + */ + bp->pf->func_cfg_flags &= + ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf->func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE; + rc = bnxt_hwrm_pf_func_cfg(bp, 1); + if (rc) + return rc; + + /* + * Now, create and register a buffer to hold forwarded VF requests + */ + req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN; + bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, + page_roundup(num_vfs * HWRM_MAX_REQ_LEN)); + if (bp->pf->vf_req_buf == NULL) { + rc = -ENOMEM; + goto error_free; + } + for (sz = 0; sz < req_buf_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz); + for (i = 0; i < num_vfs; i++) + bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) + + (i * HWRM_MAX_REQ_LEN); + + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + goto error_free; + + populate_vf_func_cfg_req(bp, &req, num_vfs); + + bp->pf->active_vfs = 0; + for (i = 0; i < num_vfs; i++) { + add_random_mac_if_needed(bp, &req, i); + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid); + rc = bnxt_hwrm_send_message(bp, + &req, + sizeof(req), + BNXT_USE_CHIMP_MB); + + /* Clear enable flag for next pass */ + req.enables &= ~rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + + if (rc || resp->error_code) { + PMD_DRV_LOG(ERR, + "Failed to initizlie VF %d\n", i); + PMD_DRV_LOG(ERR, + "Not all VFs available. (%d, %d)\n", + rc, resp->error_code); + HWRM_UNLOCK(); + break; + } + + HWRM_UNLOCK(); + + reserve_resources_from_vf(bp, &req, i); + bp->pf->active_vfs++; + bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid); + } + + /* + * Now configure the PF to use "the rest" of the resources + * We're using STD_TX_RING_MODE here though which will limit the TX + * rings. This will allow QoS to function properly. Not setting this + * will cause PF rings to break bandwidth settings. + */ + rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + if (rc) + goto error_free; + + rc = update_pf_resource_max(bp); + if (rc) + goto error_free; + + return rc; + +error_free: + bnxt_hwrm_func_buf_unrgtr(bp); + return rc; +} + +int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); + req.evb_mode = bp->pf->evb_mode; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type) +{ + struct hwrm_tunnel_dst_port_alloc_input req = {0}; + struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_val = port; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + switch (tunnel_type) { + case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN: + bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->vxlan_port = port; + break; + case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE: + bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->geneve_port = port; + break; + default: + break; + } + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type) +{ + struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); + + req.tunnel_type = tunnel_type; + req.tunnel_dst_port_id = rte_cpu_to_be_16(port); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, + uint32_t flags) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + req.flags = rte_cpu_to_le_32(flags); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp) +{ + uint32_t *flag = flagp; + + vnic->flags = *flag; +} + +int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); +} + +int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; + struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); + + req.req_buf_num_pages = rte_cpu_to_le_16(1); + req.req_buf_page_size = rte_cpu_to_le_16( + page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN)); + req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); + req.req_buf_page_addr0 = + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf)); + if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map buffer address to physical memory\n"); + return -ENOMEM; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; + struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) && bp->pdev->max_vfs)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(0xffff); + req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = rte_cpu_to_le_16( + bp->async_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) +{ + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + + req.enables = rte_cpu_to_le_32( + HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); + req.async_event_cr = rte_cpu_to_le_16( + bp->async_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t dflt_vlan, fid; + uint32_t func_cfg_flags; + int rc = 0; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + if (is_vf) { + dflt_vlan = bp->pf->vf_info[vf].dflt_vlan; + fid = bp->pf->vf_info[vf].fid; + func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags; + } else { + fid = rte_cpu_to_le_16(0xffff); + func_cfg_flags = bp->pf->func_cfg_flags; + dflt_vlan = bp->vlan; + } + + req.flags = rte_cpu_to_le_32(func_cfg_flags); + req.fid = rte_cpu_to_le_16(fid); + req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); + req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, + uint16_t max_bw, uint16_t enables) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32(enables); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); + req.max_bw = rte_cpu_to_le_32(max_bw); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); + req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp) +{ + int rc; + + if (BNXT_PF(bp)) + rc = bnxt_hwrm_func_cfg_def_cp(bp); + else + rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); + + return rc; +} + +int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size) +{ + int rc = 0; + struct hwrm_reject_fwd_resp_input req = {.req_type = 0}; + struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + if (ec_size > sizeof(req.encap_request)) + return -1; + + HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); + + req.encap_resp_target_id = rte_cpu_to_le_16(target_id); + memcpy(req.encap_request, encaped, ec_size); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, + struct rte_ether_addr *mac) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size) +{ + int rc = 0; + struct hwrm_exec_fwd_resp_input req = {.req_type = 0}; + struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + if (ec_size > sizeof(req.encap_request)) + return -1; + + HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); + + req.encap_resp_target_id = rte_cpu_to_le_16(target_id); + memcpy(req.encap_request, encaped, ec_size); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, + struct rte_eth_stats *stats, uint8_t rx) +{ + int rc = 0; + struct hwrm_stat_ctx_query_input req = {.req_type = 0}; + struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); + + req.stat_ctx_id = rte_cpu_to_le_32(cid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (rx) { + stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + } else { + stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + } + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_qstats(struct bnxt *bp) +{ + struct hwrm_port_qstats_input req = {0}; + struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = bp->pf; + int rc; + + HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB); + + req.port_id = rte_cpu_to_le_16(pf->port_id); + req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); + req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_clr_stats(struct bnxt *bp) +{ + struct hwrm_port_clr_stats_input req = {0}; + struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = bp->pf; + int rc; + + /* Not allowed on NS2 device, NPAR, MultiHost, VF */ + if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) || + BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) + return 0; + + HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB); + + req.port_id = rte_cpu_to_le_16(pf->port_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_qcaps_input req = {0}; + int rc; + + if (BNXT_VF(bp)) + return 0; + + HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); + req.port_id = bp->pf->port_id; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + unsigned int i; + + bp->leds->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, + sizeof(bp->leds[0]) * bp->leds->num_leds); + for (i = 0; i < bp->leds->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + + uint16_t caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->leds->num_leds = 0; + break; + } + } + } + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) +{ + struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_led_cfg_input req = {0}; + struct bnxt_led_cfg *led_cfg; + uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT; + uint16_t duration = 0; + int rc, i; + + if (!bp->leds->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); + + if (led_on) { + led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; + duration = rte_cpu_to_le_16(500); + } + req.port_id = bp->pf->port_id; + req.num_leds = bp->leds->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) { + req.enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, + uint32_t *length) +{ + int rc; + struct hwrm_nvm_get_dir_info_input req = {0}; + struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + *entries = rte_le_to_cpu_32(resp->entries); + *length = rte_le_to_cpu_32(resp->entry_length); + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) +{ + int rc; + uint32_t dir_entries; + uint32_t entry_length; + uint8_t *buf; + size_t buflen; + rte_iova_t dma_handle; + struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr; + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + buflen = dir_entries * entry_length; + buf = rte_malloc("nvm_dir", buflen, 0); + if (buf == NULL) + return -ENOMEM; + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + + rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + uint32_t offset, uint32_t length, + uint8_t *data) +{ + int rc; + uint8_t *buf; + rte_iova_t dma_handle; + struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; + + buf = rte_malloc("nvm_item", length, 0); + if (!buf) + return -ENOMEM; + + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + req.dir_idx = rte_cpu_to_le_16(index); + req.offset = rte_cpu_to_le_32(offset); + req.len = rte_cpu_to_le_32(length); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + if (rc == 0) + memcpy(data, buf, length); + + rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) +{ + int rc; + struct hwrm_nvm_erase_dir_entry_input req = {0}; + struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); + req.dir_idx = rte_cpu_to_le_16(index); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + + +int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + uint16_t dir_ordinal, uint16_t dir_ext, + uint16_t dir_attr, const uint8_t *data, + size_t data_len) +{ + int rc; + struct hwrm_nvm_write_input req = {0}; + struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr; + rte_iova_t dma_handle; + uint8_t *buf; + + buf = rte_malloc("nvm_write", data_len, 0); + if (!buf) + return -ENOMEM; + + dma_handle = rte_malloc_virt2iova(buf); + if (dma_handle == RTE_BAD_IOVA) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + memcpy(buf, data, data_len); + + HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB); + + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); + req.host_src_addr = rte_cpu_to_le_64(dma_handle); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +static void +bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata) +{ + uint32_t *count = cbdata; + + *count = *count + 1; +} + +static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused, + struct bnxt_vnic_info *vnic __rte_unused) +{ + return 0; +} + +int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf) +{ + uint32_t count = 0; + + bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count, + &count, bnxt_vnic_count_hwrm_stub); + + return count; +} + +static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, + uint16_t *vnic_ids) +{ + struct hwrm_func_vf_vnic_ids_query_input req = {0}; + struct hwrm_func_vf_vnic_ids_query_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* First query all VNIC ids */ + HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); + + req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf); + req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); + + if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { + HWRM_UNLOCK(); + PMD_DRV_LOG(ERR, + "unable to map VNIC ID table address to physical memory\n"); + return -ENOMEM; + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + rc = rte_le_to_cpu_32(resp->vnic_id_cnt); + + HWRM_UNLOCK(); + + return rc; +} + +/* + * This function queries the VNIC IDs for a specified VF. It then calls + * the vnic_cb to update the necessary field in vnic_info with cbdata. + * Then it calls the hwrm_cb function to program this new vnic configuration. + */ +int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, + void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata, + int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic)) +{ + struct bnxt_vnic_info vnic; + int rc = 0; + int i, num_vnic_ids; + uint16_t *vnic_ids; + size_t vnic_id_sz; + size_t sz; + + /* First query all VNIC ids */ + vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids); + vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, + RTE_CACHE_LINE_SIZE); + if (vnic_ids == NULL) + return -ENOMEM; + + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)vnic_ids) + sz); + + num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids); + + if (num_vnic_ids < 0) + return num_vnic_ids; + + /* Retrieve VNIC, update bd_stall then update */ + + for (i = 0; i < num_vnic_ids; i++) { + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf); + if (rc) + break; + if (vnic.mru <= 4) /* Indicates unallocated */ + continue; + + vnic_cb(&vnic, cbdata); + + rc = hwrm_cb(bp, &vnic); + if (rc) + break; + } + + rte_free(vnic_ids); + + return rc; +} + +int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, + bool on) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + req.enables |= rte_cpu_to_le_32( + HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE); + req.vlan_antispoof_mode = on ? + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN : + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) +{ + struct bnxt_vnic_info vnic; + uint16_t *vnic_ids; + size_t vnic_id_sz; + int num_vnic_ids, i; + size_t sz; + int rc; + + vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids); + vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, + RTE_CACHE_LINE_SIZE); + if (vnic_ids == NULL) + return -ENOMEM; + + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) + rte_mem_lock_page(((char *)vnic_ids) + sz); + + rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids); + if (rc <= 0) + goto exit; + num_vnic_ids = rc; + + /* + * Loop through to find the default VNIC ID. + * TODO: The easier way would be to obtain the resp->dflt_vnic_id + * by sending the hwrm_func_qcfg command to the firmware. + */ + for (i = 0; i < num_vnic_ids; i++) { + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, + bp->pf->first_vf_id + vf); + if (rc) + goto exit; + if (vnic.func_default) { + rte_free(vnic_ids); + return vnic.fw_vnic_id; + } + } + /* Could not find a default VNIC. */ + PMD_DRV_LOG(ERR, "No default VNIC\n"); +exit: + rte_free(vnic_ids); + return rc; +} + +int bnxt_hwrm_set_em_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_em_filter_id != UINT64_MAX) + bnxt_hwrm_clear_em_filter(bp, filter); + + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + RTE_ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR) + memcpy(req.dst_macaddr, filter->dst_macaddr, + RTE_ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID) + req.ovlan_vid = filter->l2_ovlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID) + req.ivlan_vid = filter->l2_ivlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_be_16(filter->src_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_be_16(filter->dst_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + + filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (filter->fw_em_filter_id == UINT64_MAX) + return 0; + + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); + + req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_l2_filter_id = UINT64_MAX; + + return 0; +} + +int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_ntuple_filter_id != UINT64_MAX) + bnxt_hwrm_clear_ntuple_filter(bp, filter); + + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + RTE_ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK) + req.src_ipaddr_mask[0] = + rte_cpu_to_le_32(filter->src_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK) + req.dst_ipaddr_mask[0] = + rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_le_16(filter->src_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK) + req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_le_16(filter->dst_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK) + req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_free_output *resp = + bp->hwrm_cmd_resp_addr; + + if (filter->fw_ntuple_filter_id == UINT64_MAX) + return 0; + + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); + + req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_ntuple_filter_id = UINT64_MAX; + + return 0; +} + +static int +bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct bnxt_rx_queue **rxqs = bp->rx_queues; + uint16_t *ring_tbl = vnic->rss_table; + int nr_ctxs = vnic->num_lb_ctxts; + int max_rings = bp->rx_nr_rings; + int i, j, k, cnt; + int rc = 0; + + for (i = 0, k = 0; i < nr_ctxs; i++) { + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr + + i * BNXT_RSS_ENTRIES_PER_CTX_THOR * + 2 * sizeof(*ring_tbl)); + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + + req.ring_table_pair_index = i; + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]); + + for (j = 0; j < 64; j++) { + uint16_t ring_id; + + /* Find next active ring. */ + for (cnt = 0; cnt < max_rings; cnt++) { + if (rx_queue_state[k] != + RTE_ETH_QUEUE_STATE_STOPPED) + break; + if (++k == max_rings) + k = 0; + } + + /* Return if no rings are active. */ + if (cnt == max_rings) { + HWRM_UNLOCK(); + return 0; + } + + /* Add rx/cp ring pair to RSS table. */ + rxr = rxqs[k]->rx_ring; + cpr = rxqs[k]->cp_ring; + + ring_id = rxr->rx_ring_struct->fw_ring_id; + *ring_tbl++ = rte_cpu_to_le_16(ring_id); + ring_id = cpr->cp_ring_struct->fw_ring_id; + *ring_tbl++ = rte_cpu_to_le_16(ring_id); + + if (++k == max_rings) + k = 0; + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + } + + return rc; +} + +int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + unsigned int rss_idx, fw_idx, i; + + if (!(vnic->rss_table && vnic->hash_type)) + return 0; + + if (BNXT_CHIP_THOR(bp)) + return bnxt_vnic_rss_configure_thor(bp, vnic); + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != + INVALID_HW_RING_ID) + break; + fw_idx++; + } + if (i == bp->rx_cp_nr_rings) + return 0; + vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; + } + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + + return 0; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + uint16_t flags; + + req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr_during_int = + rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int); + + req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max); + + /* min timer set to 1/2 of interrupt timer */ + req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min); + + /* buf timer set to 1/4 of interrupt timer */ + req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr); + + req->cmpl_aggr_dma_tmr_during_int = + rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int); + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + req->flags = rte_cpu_to_le_16(flags); +} + +static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req) +{ + struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables; + uint16_t flags; + int rc; + + HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max; + agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min; + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + agg_req->flags = rte_cpu_to_le_16(flags); + enables = + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR; + agg_req->enables = rte_cpu_to_le_32(enables); + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* Set ring coalesce parameters only for 100G NICs */ + if (BNXT_CHIP_THOR(bp)) { + if (bnxt_hwrm_set_coal_params_thor(bp, &req)) + return -1; + } else if (bnxt_stratus_device(bp)) { + bnxt_hwrm_set_coal_params(coal, &req); + } else { + return 0; + } + + HWRM_PREP(&req, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + BNXT_USE_CHIMP_MB); + req.ring_id = rte_cpu_to_le_16(ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return 0; +} + +#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG) +int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_input req = {0}; + struct hwrm_func_backing_store_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int total_alloc_len; + int rc, i, tqm_rings; + + if (!BNXT_CHIP_THOR(bp) || + bp->hwrm_spec_code < HWRM_VERSION_1_9_2 || + BNXT_VF(bp) || + bp->ctx) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT_SILENT(); + + total_alloc_len = sizeof(*ctx); + ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len, + RTE_CACHE_LINE_SIZE); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } + + ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); + ctx->qp_min_qp1_entries = + rte_le_to_cpu_16(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = + rte_le_to_cpu_16(resp->qp_max_l2_entries); + ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); + ctx->srq_max_l2_entries = + rte_le_to_cpu_16(resp->srq_max_l2_entries); + ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); + ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); + ctx->cq_max_l2_entries = + rte_le_to_cpu_16(resp->cq_max_l2_entries); + ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); + ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + rte_le_to_cpu_16(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); + ctx->stat_max_entries = + rte_le_to_cpu_32(resp->stat_max_entries); + ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); + ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = + rte_le_to_cpu_32(resp->mrav_max_entries); + ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); + ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); + ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; + + if (!ctx->tqm_fp_rings_count) + ctx->tqm_fp_rings_count = bp->max_q; + + tqm_rings = ctx->tqm_fp_rings_count + 1; + + ctx_pg = rte_malloc("bnxt_ctx_pg_mem", + sizeof(*ctx_pg) * tqm_rings, + RTE_CACHE_LINE_SIZE); + if (!ctx_pg) { + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < tqm_rings; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + + bp->ctx = ctx; +ctx_err: + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables) +{ + struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_output *resp = + bp->hwrm_cmd_resp_addr; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + uint32_t *num_entries; + uint64_t *pg_dir; + uint8_t *pg_attr; + uint32_t ena; + int i, rc; + + if (!ctx) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); + req.enables = rte_cpu_to_le_32(enables); + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) { + ctx_pg = &ctx->qp_mem; + req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.qp_num_qp1_entries = + rte_cpu_to_le_16(ctx->qp_min_qp1_entries); + req.qp_num_l2_entries = + rte_cpu_to_le_16(ctx->qp_max_l2_entries); + req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.qpc_pg_size_qpc_lvl, + &req.qpc_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) { + ctx_pg = &ctx->srq_mem; + req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.srq_num_l2_entries = + rte_cpu_to_le_16(ctx->srq_max_l2_entries); + req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.srq_pg_size_srq_lvl, + &req.srq_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) { + ctx_pg = &ctx->cq_mem; + req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.cq_num_l2_entries = + rte_cpu_to_le_16(ctx->cq_max_l2_entries); + req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.cq_pg_size_cq_lvl, + &req.cq_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) { + ctx_pg = &ctx->vnic_mem; + req.vnic_num_vnic_entries = + rte_cpu_to_le_16(ctx->vnic_max_vnic_entries); + req.vnic_num_ring_table_entries = + rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries); + req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.vnic_pg_size_vnic_lvl, + &req.vnic_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) { + ctx_pg = &ctx->stat_mem; + req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries); + req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.stat_pg_size_stat_lvl, + &req.stat_page_dir); + } + + req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size); + num_entries = &req.tqm_sp_num_entries; + pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl; + pg_dir = &req.tqm_sp_page_dir; + ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP; + for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { + if (!(enables & ena)) + continue; + + req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size); + + ctx_pg = ctx->tqm_mem[i]; + *num_entries = rte_cpu_to_le_16(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_ext_port_qstats(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS || + bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS)) + return 0; + + HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); + + req.port_id = rte_cpu_to_le_16(pf->port_id); + if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) { + req.tx_stat_host_addr = + rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map); + req.tx_stat_size = + rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext)); + } + if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) { + req.rx_stat_host_addr = + rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map); + req.rx_stat_size = + rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext)); + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc) { + bp->fw_rx_port_stats_ext_size = 0; + bp->fw_tx_port_stats_ext_size = 0; + } else { + bp->fw_rx_port_stats_ext_size = + rte_le_to_cpu_16(resp->rx_stat_size); + bp->fw_tx_port_stats_ext_size = + rte_le_to_cpu_16(resp->tx_stat_size); + } + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int +bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type) +{ + struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); + req.tunnel_type = type; + req.dest_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + +int +bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type) +{ + struct hwrm_cfa_redirect_tunnel_type_free_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_free_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); + req.tunnel_type = type; + req.dest_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type) +{ + struct hwrm_cfa_redirect_query_tunnel_type_input req = {0}; + struct hwrm_cfa_redirect_query_tunnel_type_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); + req.src_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + if (type) + *type = rte_le_to_cpu_32(resp->tunnel_mask); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, + uint16_t *dst_fid) +{ + struct hwrm_cfa_redirect_tunnel_type_info_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_info_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); + req.src_fid = bp->fw_fid; + req.tunnel_type = tun_type; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + if (dst_fid) + *dst_fid = rte_le_to_cpu_16(resp->dest_fid); + + PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_set_mac(struct bnxt *bp) +{ + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + + req.enables = + rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; + uint32_t flags; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) + return 0; + + /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. + * If we issue FUNC_DRV_IF_CHANGE with flags down before + * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR + */ + if (!up && (bp->flags & BNXT_FLAG_FW_RESET)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); + + if (up) + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (!up) + return 0; + + if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) { + PMD_DRV_LOG(INFO, "FW reset happened while port was down\n"); + bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + } + + return 0; +} + +int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_error_recovery_info *info = bp->recovery_info; + struct hwrm_error_recovery_qcfg_input req = {0}; + uint32_t flags = 0; + unsigned int i; + int rc; + + /* Older FW does not have error recovery support */ + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST; + else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU; + + if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) && + !(bp->flags & BNXT_FLAG_KONG_MB_EN)) { + rc = -EINVAL; + goto err; + } + + /* FW returned values are in units of 100msec */ + info->driver_polling_freq = + rte_le_to_cpu_32(resp->driver_polling_freq) * 100; + info->master_func_wait_period = + rte_le_to_cpu_32(resp->master_func_wait_period) * 100; + info->normal_func_wait_period = + rte_le_to_cpu_32(resp->normal_func_wait_period) * 100; + info->master_func_wait_period_after_reset = + rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100; + info->max_bailout_time_after_reset = + rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100; + info->status_regs[BNXT_FW_STATUS_REG] = + rte_le_to_cpu_32(resp->fw_health_status_reg); + info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] = + rte_le_to_cpu_32(resp->fw_heartbeat_reg); + info->status_regs[BNXT_FW_RECOVERY_CNT_REG] = + rte_le_to_cpu_32(resp->fw_reset_cnt_reg); + info->status_regs[BNXT_FW_RESET_INPROG_REG] = + rte_le_to_cpu_32(resp->reset_inprogress_reg); + info->reg_array_cnt = + rte_le_to_cpu_32(resp->reg_array_cnt); + + if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) { + rc = -EINVAL; + goto err; + } + + for (i = 0; i < info->reg_array_cnt; i++) { + info->reset_reg[i] = + rte_le_to_cpu_32(resp->reset_reg[i]); + info->reset_reg_val[i] = + rte_le_to_cpu_32(resp->reset_reg_val[i]); + info->delay_after_reset[i] = + resp->delay_after_reset[i]; + } +err: + HWRM_UNLOCK(); + + /* Map the FW status registers */ + if (!rc) + rc = bnxt_map_fw_health_status_regs(bp); + + if (rc) { + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + } + return rc; +} + +int bnxt_hwrm_fw_reset(struct bnxt *bp) +{ + struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fw_reset_input req = {0}; + int rc; + + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp)); + + req.embedded_proc_type = + HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = + HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP; + req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) +{ + struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_ts_query_input req = {0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB); + + switch (path) { + case BNXT_PTP_FLAGS_PATH_TX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX; + break; + case BNXT_PTP_FLAGS_PATH_RX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX; + break; + case BNXT_PTP_FLAGS_CURRENT_TIME: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME; + break; + } + + req.flags = rte_cpu_to_le_32(flags); + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (timestamp) { + *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]); + *timestamp |= + (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32; + } + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + uint32_t flags = 0; + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT)) + return rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) { + bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN; + PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n"); + } + + return rc; +} + +int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc) +{ + int rc = 0; + + struct hwrm_cfa_counter_qcaps_input req = {0}; + struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp)); + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + if (max_fc) + *max_fc = rte_le_to_cpu_16(resp->max_rx_fc); + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id) +{ + int rc = 0; + struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 }; + struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp)); + + req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0; + req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M; + req.page_dir = rte_cpu_to_le_64(dma_addr); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + if (ctx_id) { + *ctx_id = rte_le_to_cpu_16(resp->ctx_id); + PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id); + } + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id) +{ + int rc = 0; + struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 }; + struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp)); + + req.ctx_id = rte_cpu_to_le_16(ctx_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir, + uint16_t cntr, uint16_t ctx_id, + uint32_t num_entries, bool enable) +{ + struct hwrm_cfa_counter_cfg_input req = {0}; + struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags = 0; + int rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp)); + + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + req.counter_type = rte_cpu_to_le_16(cntr); + flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE : + HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE; + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL; + if (dir == BNXT_DIR_RX) + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX; + else if (dir == BNXT_DIR_TX) + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX; + req.flags = rte_cpu_to_le_16(flags); + req.ctx_id = rte_cpu_to_le_16(ctx_id); + req.num_entries = rte_cpu_to_le_32(num_entries); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp, + enum bnxt_flow_dir dir, + uint16_t cntr, + uint16_t num_entries) +{ + struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_counter_qstats_input req = {0}; + uint16_t flow_ctx_id = 0; + uint16_t flags = 0; + int rc = 0; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + if (dir == BNXT_DIR_RX) { + flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id; + flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX; + } else if (dir == BNXT_DIR_TX) { + flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id; + flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp)); + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + req.counter_type = rte_cpu_to_le_16(cntr); + req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id); + req.num_entries = rte_cpu_to_le_16(num_entries); + req.flags = rte_cpu_to_le_16(flags); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000..58b414d4f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_HWRM_H_ +#define _BNXT_HWRM_H_ + +#include +#include + +struct bnxt; +struct bnxt_filter_info; +struct bnxt_cp_ring_info; +struct hwrm_func_qstats_output; + +#define HWRM_SEQ_ID_INVALID -1U +/* Convert Bit field location to value */ +#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE) +#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED) +#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE) +#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE) +#define ASYNC_CMPL_EVENT_ID_RESET_NOTIFY \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY) +#define ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY \ + (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY) +#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \ + (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32)) +#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \ + (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32)) +#define ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION \ + (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION - 32)) + +#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY + +#define HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + +#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC +#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL + +#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN \ +HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED + +#define HWRM_SPEC_CODE_1_8_4 0x10804 +#define HWRM_SPEC_CODE_1_9_0 0x10900 +#define HWRM_SPEC_CODE_1_9_2 0x10902 + +#define FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \ + (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) + +#define GET_TX_QUEUE_INFO(x) \ + bp->tx_cos_queue[x].id = resp->queue_id##x; \ + bp->tx_cos_queue[x].profile = \ + resp->queue_id##x##_service_profile + +#define GET_RX_QUEUE_INFO(x) \ + bp->rx_cos_queue[x].id = resp->queue_id##x; \ + bp->rx_cos_queue[x].profile = \ + resp->queue_id##x##_service_profile + +int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp, + bool use_kong_mb, + uint16_t tf_type, + uint16_t tf_subtype, + uint32_t *tf_response_code, + void *msg, + uint32_t msg_len, + void *response, + uint32_t response_len); + +int bnxt_hwrm_tf_message_direct(struct bnxt *bp, + bool use_kong_mb, + uint16_t msg_type, + void *msg, + uint32_t msg_len, + void *resp_msg, + uint32_t resp_len); + +#define CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC \ + HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_FC + +enum bnxt_flow_dir { + BNXT_DIR_RX = 0, + BNXT_DIR_TX, + BNXT_DIR_LOOPBACK, + BNXT_DIR_MAX +}; + +#define BNXT_CTX_VAL_INVAL 0xFFFF + +int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, + uint16_t vlan_count, + struct bnxt_vlan_table_entry *vlan_table); +int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, + uint16_t vlan_count, + struct bnxt_vlan_antispoof_table_entry *vlan_table); +int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, + struct bnxt_filter_info *filter); +int bnxt_hwrm_set_l2_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter); +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size); +int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, + void *encaped, size_t ec_size); + +int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp); +int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp); +int bnxt_hwrm_func_driver_register(struct bnxt *bp); +int bnxt_hwrm_func_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_reset(struct bnxt *bp); +int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags); +int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, + struct rte_eth_stats *stats, + struct hwrm_func_qstats_output *func_qstats); +int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, + uint64_t *dropped); +int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid); +int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp); +int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp); + +int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); + +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp); +int bnxt_hwrm_ring_alloc(struct bnxt *bp, + struct bnxt_ring *ring, + uint32_t ring_type, uint32_t map_index, + uint32_t stats_ctx_id, uint32_t cmpl_ring_id, + uint16_t tx_cosq_id); +int bnxt_hwrm_ring_free(struct bnxt *bp, + struct bnxt_ring *ring, uint32_t ring_type); +int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx); +int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx); + +int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr); +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx); +int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx); +int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, + struct rte_eth_stats *stats, uint8_t rx); + +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout); + +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int16_t fw_vf_id); +int bnxt_hwrm_vnic_qcaps(struct bnxt *bp); +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + uint16_t ctx_idx); +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool enable); + +int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp); +int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp); +int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp); +void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr); +void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr); +int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_free_all_hwrm_resources(struct bnxt *bp); +void bnxt_free_hwrm_resources(struct bnxt *bp); +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index); +int bnxt_alloc_hwrm_resources(struct bnxt *bp); +int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); +int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); +int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test); +int bnxt_hwrm_allocate_pf_only(struct bnxt *bp); +int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs); +int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, + const uint8_t *mac_addr); +int bnxt_hwrm_pf_evb_mode(struct bnxt *bp); +int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, + uint16_t max_bw, uint16_t enables); +int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf); +int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, + struct rte_ether_addr *mac); +int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf); +int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type); +int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, + uint8_t tunnel_type); +int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf); +int bnxt_hwrm_port_qstats(struct bnxt *bp); +int bnxt_hwrm_port_clr_stats(struct bnxt *bp); +int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on); +int bnxt_hwrm_port_led_qcaps(struct bnxt *bp); +int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp); +int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, + uint32_t flags); +void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp); +int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf); +int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, + void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata, + int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic)); +int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, + bool on); +int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf); +int bnxt_hwrm_set_em_filter(struct bnxt *bp, uint16_t dst_id, + struct bnxt_filter_info *filter); +int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter); + +int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, uint16_t dst_id, + struct bnxt_filter_info *filter); +int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *filter); +int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data); +int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, + uint32_t *length); +int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + uint32_t offset, uint32_t length, + uint8_t *data); +int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index); +int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + uint16_t dir_ordinal, uint16_t dir_ext, + uint16_t dir_attr, const uint8_t *data, + size_t data_len); +int bnxt_hwrm_ptp_cfg(struct bnxt *bp); +int bnxt_vnic_rss_configure(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id); +int bnxt_hwrm_check_vf_rings(struct bnxt *bp); +int bnxt_hwrm_ext_port_qstats(struct bnxt *bp); +int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables); +int bnxt_alloc_ctx_mem(struct bnxt *bp); +int bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type); +int bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type); +int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type); +int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, + uint16_t *dst_fid); +int bnxt_hwrm_set_mac(struct bnxt *bp); +int bnxt_hwrm_if_change(struct bnxt *bp, bool state); +int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp); +int bnxt_hwrm_fw_reset(struct bnxt *bp); +int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, + uint64_t *timestamp); +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp); +int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc); +int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id); +int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id); +int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir, + uint16_t cntr, uint16_t ctx_id, + uint32_t num_entries, bool enable); +int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp, + enum bnxt_flow_dir dir, + uint16_t cntr, + uint16_t num_entries); +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c new file mode 100644 index 000000000..40e1b0c98 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.c @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include + +#include "bnxt.h" +#include "bnxt_irq.h" +#include "bnxt_ring.h" +#include "hsi_struct_def_dpdk.h" + +/* + * Interrupts + */ + +void bnxt_int_handler(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; + struct cmpl_base *cmp; + uint32_t raw_cons; + uint32_t cons; + + if (cpr == NULL) + return; + + raw_cons = cpr->cp_raw_cons; + pthread_mutex_lock(&bp->def_cp_lock); + while (1) { + if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) { + pthread_mutex_unlock(&bp->def_cp_lock); + return; + } + + if (is_bnxt_in_error(bp)) { + pthread_mutex_unlock(&bp->def_cp_lock); + return; + } + + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + cmp = &cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct)) + break; + + bnxt_event_hwrm_resp_handler(bp, cmp); + raw_cons = NEXT_RAW_CMP(raw_cons); + } + + cpr->cp_raw_cons = raw_cons; + if (BNXT_HAS_NQ(bp)) + bnxt_db_nq_arm(cpr); + else + B_CP_DB_REARM(cpr, cpr->cp_raw_cons); + + pthread_mutex_unlock(&bp->def_cp_lock); +} + +int bnxt_free_int(struct bnxt *bp) +{ + struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle; + struct bnxt_irq *irq = bp->irq_tbl; + int rc = 0; + + if (!irq) + return 0; + + if (irq->requested) { + int count = 0; + + /* + * Callback deregistration will fail with rc -EAGAIN if the + * callback is currently active. Retry every 50 ms until + * successful or 500 ms has elapsed. + */ + do { + rc = rte_intr_callback_unregister(intr_handle, + irq->handler, + bp->eth_dev); + if (rc >= 0) { + irq->requested = 0; + break; + } + rte_delay_ms(50); + } while (count++ < 10); + + if (rc < 0) { + PMD_DRV_LOG(ERR, "irq cb unregister failed rc: %d\n", + rc); + return rc; + } + } + + rte_free(bp->irq_tbl); + bp->irq_tbl = NULL; + + return 0; +} + +void bnxt_disable_int(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; + + if (BNXT_NUM_ASYNC_CPR(bp) == 0) + return; + + if (is_bnxt_in_error(bp)) + return; + + if (!cpr || !cpr->cp_db.doorbell) + return; + + /* Only the default completion ring */ + if (BNXT_HAS_NQ(bp)) + bnxt_db_nq(cpr); + else + B_CP_DB_DISARM(cpr); +} + +void bnxt_enable_int(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; + + if (BNXT_NUM_ASYNC_CPR(bp) == 0) + return; + + if (!cpr || !cpr->cp_db.doorbell) + return; + + /* Only the default completion ring */ + if (BNXT_HAS_NQ(bp)) + bnxt_db_nq_arm(cpr); + else + B_CP_DB_ARM(cpr); +} + +int bnxt_setup_int(struct bnxt *bp) +{ + uint16_t total_vecs; + const int len = sizeof(bp->irq_tbl[0].name); + int i; + + /* DPDK host only supports 1 MSI-X vector */ + total_vecs = 1; + bp->irq_tbl = rte_calloc("bnxt_irq_tbl", total_vecs, + sizeof(struct bnxt_irq), 0); + if (bp->irq_tbl) { + for (i = 0; i < total_vecs; i++) { + bp->irq_tbl[i].vector = i; + snprintf(bp->irq_tbl[i].name, len, + "%s-%d", bp->eth_dev->device->name, i); + bp->irq_tbl[i].handler = bnxt_int_handler; + } + } else { + PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n"); + return -ENOMEM; + } + + return 0; +} + +int bnxt_request_int(struct bnxt *bp) +{ + struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle; + struct bnxt_irq *irq = bp->irq_tbl; + int rc = 0; + + if (!irq) + return 0; + + if (!irq->requested) { + rc = rte_intr_callback_register(intr_handle, + irq->handler, + bp->eth_dev); + if (!rc) + irq->requested = 1; + } + +#ifdef RTE_EXEC_ENV_FREEBSD + /** + * In FreeBSD OS, nic_uio does not support interrupts and + * interrupt register callback will fail. + */ + rc = 0; +#endif + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h new file mode 100644 index 000000000..ad8a1df9c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_irq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_IRQ_H_ +#define _BNXT_IRQ_H_ + +struct bnxt_irq { + rte_intr_callback_fn handler; + unsigned int vector; + uint8_t requested; + char name[RTE_ETH_NAME_MAX_LEN + 2]; +}; + +struct bnxt; +int bnxt_free_int(struct bnxt *bp); +void bnxt_disable_int(struct bnxt *bp); +void bnxt_enable_int(struct bnxt *bp); +int bnxt_setup_int(struct bnxt *bp); +int bnxt_request_int(struct bnxt *bp); +void bnxt_int_handler(void *param); + +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h new file mode 100644 index 000000000..ea9d4a9d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_nvm_defs.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_NVM_DEFS_H_ +#define _BNXT_NVM_DEFS_H_ + +enum bnxt_nvm_directory_type { + BNX_DIR_TYPE_UNUSED = 0, + BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_UPDATE = 2, + BNX_DIR_TYPE_CHIMP_PATCH = 3, + BNX_DIR_TYPE_BOOTCODE = 4, + BNX_DIR_TYPE_VPD = 5, + BNX_DIR_TYPE_EXP_ROM_MBA = 6, + BNX_DIR_TYPE_AVS = 7, + BNX_DIR_TYPE_PCIE = 8, + BNX_DIR_TYPE_PORT_MACRO = 9, + BNX_DIR_TYPE_APE_FW = 10, + BNX_DIR_TYPE_APE_PATCH = 11, + BNX_DIR_TYPE_KONG_FW = 12, + BNX_DIR_TYPE_KONG_PATCH = 13, + BNX_DIR_TYPE_BONO_FW = 14, + BNX_DIR_TYPE_BONO_PATCH = 15, + BNX_DIR_TYPE_TANG_FW = 16, + BNX_DIR_TYPE_TANG_PATCH = 17, + BNX_DIR_TYPE_BOOTCODE_2 = 18, + BNX_DIR_TYPE_CCM = 19, + BNX_DIR_TYPE_PCI_CFG = 20, + BNX_DIR_TYPE_TSCF_UCODE = 21, + BNX_DIR_TYPE_ISCSI_BOOT = 22, + BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24, + BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25, + BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26, + BNX_DIR_TYPE_EXT_PHY = 27, + BNX_DIR_TYPE_SHARED_CFG = 40, + BNX_DIR_TYPE_PORT_CFG = 41, + BNX_DIR_TYPE_FUNC_CFG = 42, + BNX_DIR_TYPE_MGMT_CFG = 48, + BNX_DIR_TYPE_MGMT_DATA = 49, + BNX_DIR_TYPE_MGMT_WEB_DATA = 50, + BNX_DIR_TYPE_MGMT_WEB_META = 51, + BNX_DIR_TYPE_MGMT_EVENT_LOG = 52, + BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53 +}; + +#define BNX_DIR_ORDINAL_FIRST 0 + +#define BNX_DIR_EXT_NONE 0 +#define BNX_DIR_EXT_INACTIVE (1 << 0) +#define BNX_DIR_EXT_UPDATE (1 << 1) + +#define BNX_DIR_ATTR_NONE 0 +#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) +#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) + +#define BNX_PKG_LOG_MAX_LENGTH 4096 + +enum bnxnvm_pkglog_field_index { + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, + BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2, + BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3, + BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6 +}; + +#endif /* Don't add anything after this line */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c new file mode 100644 index 000000000..24a947f27 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.c @@ -0,0 +1,851 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" + +#include "hsi_struct_def_dpdk.h" + +/* + * Generic ring handling + */ + +void bnxt_free_ring(struct bnxt_ring *ring) +{ + if (!ring) + return; + + if (ring->vmem_size && *ring->vmem) { + memset((char *)*ring->vmem, 0, ring->vmem_size); + *ring->vmem = NULL; + } + ring->mem_zone = NULL; +} + +/* + * Ring groups + */ + +static void bnxt_init_ring_grps(struct bnxt *bp) +{ + unsigned int i; + + for (i = 0; i < bp->max_ring_grps; i++) + memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE, + sizeof(struct bnxt_ring_grp_info)); +} + +int bnxt_alloc_ring_grps(struct bnxt *bp) +{ + if (bp->max_tx_rings == 0) { + PMD_DRV_LOG(ERR, "No TX rings available!\n"); + return -EBUSY; + } + + /* THOR does not support ring groups. + * But we will use the array to save RSS context IDs. + */ + if (BNXT_CHIP_THOR(bp)) { + bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; + } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { + /* 1 ring is for default completion ring */ + PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); + return -ENOSPC; + } + + if (BNXT_HAS_RING_GRPS(bp)) { + bp->grp_info = rte_zmalloc("bnxt_grp_info", + sizeof(*bp->grp_info) * + bp->max_ring_grps, 0); + if (!bp->grp_info) { + PMD_DRV_LOG(ERR, + "Failed to alloc grp info tbl.\n"); + return -ENOMEM; + } + bnxt_init_ring_grps(bp); + } + + return 0; +} + +/* + * Allocates a completion ring with vmem and stats optionally also allocating + * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info + * to not allocate them. + * + * Order in the allocation is: + * stats - Always non-zero length + * cp vmem - Always zero-length, supported for the bnxt_ring abstraction + * tx vmem - Only non-zero length if tx_ring_info is not NULL + * rx vmem - Only non-zero length if rx_ring_info is not NULL + * cp bd ring - Always non-zero length + * tx bd ring - Only non-zero length if tx_ring_info is not NULL + * rx bd ring - Only non-zero length if rx_ring_info is not NULL + */ +int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, + struct bnxt_cp_ring_info *cp_ring_info, + struct bnxt_cp_ring_info *nq_ring_info, + const char *suffix) +{ + struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; + struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; + struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; + struct bnxt_ring *tx_ring; + struct bnxt_ring *rx_ring; + struct rte_pci_device *pdev = bp->pdev; + uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; + const struct rte_memzone *mz = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + + int stats_len = (tx_ring_info || rx_ring_info) ? + RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) - + sizeof (struct hwrm_resp_hdr)) : 0; + stats_len = RTE_ALIGN(stats_len, 128); + + int cp_vmem_start = stats_len; + int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); + cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128); + + int nq_vmem_len = nq_ring_info ? + RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0; + nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128); + + int nq_vmem_start = cp_vmem_start + cp_vmem_len; + + int tx_vmem_start = nq_vmem_start + nq_vmem_len; + int tx_vmem_len = + tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> + tx_ring_struct->vmem_size) : 0; + tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128); + + int rx_vmem_start = tx_vmem_start + tx_vmem_len; + int rx_vmem_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> + rx_ring_struct->vmem_size) : 0; + rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128); + int ag_vmem_start = 0; + int ag_vmem_len = 0; + int cp_ring_start = 0; + int nq_ring_start = 0; + + ag_vmem_start = rx_vmem_start + rx_vmem_len; + ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP( + rx_ring_info->ag_ring_struct->vmem_size) : 0; + cp_ring_start = ag_vmem_start + ag_vmem_len; + cp_ring_start = RTE_ALIGN(cp_ring_start, 4096); + + int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * + sizeof(struct cmpl_base)); + cp_ring_len = RTE_ALIGN(cp_ring_len, 128); + nq_ring_start = cp_ring_start + cp_ring_len; + nq_ring_start = RTE_ALIGN(nq_ring_start, 4096); + + int nq_ring_len = nq_ring_info ? cp_ring_len : 0; + + int tx_ring_start = nq_ring_start + nq_ring_len; + tx_ring_start = RTE_ALIGN(tx_ring_start, 4096); + int tx_ring_len = tx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * + sizeof(struct tx_bd_long)) : 0; + tx_ring_len = RTE_ALIGN(tx_ring_len, 4096); + + int rx_ring_start = tx_ring_start + tx_ring_len; + rx_ring_start = RTE_ALIGN(rx_ring_start, 4096); + int rx_ring_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * + sizeof(struct rx_prod_pkt_bd)) : 0; + rx_ring_len = RTE_ALIGN(rx_ring_len, 4096); + + int ag_ring_start = rx_ring_start + rx_ring_len; + ag_ring_start = RTE_ALIGN(ag_ring_start, 4096); + int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR; + ag_ring_len = RTE_ALIGN(ag_ring_len, 4096); + + int ag_bitmap_start = ag_ring_start + ag_ring_len; + int ag_bitmap_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint( + rx_ring_info->rx_ring_struct->ring_size * + AGG_RING_SIZE_FACTOR)) : 0; + + int tpa_info_start = ag_bitmap_start + ag_bitmap_len; + int tpa_info_len = 0; + + if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) { + int tpa_max = BNXT_TPA_MAX_AGGS(bp); + + tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info); + tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len); + } + + int total_alloc_len = tpa_info_start; + total_alloc_len += tpa_info_len; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, + suffix); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + getpagesize()); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->iova; + + if (tx_ring_info) { + txq->mz = mz; + tx_ring = tx_ring_info->tx_ring_struct; + + tx_ring->bd = ((char *)mz->addr + tx_ring_start); + tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; + tx_ring->bd_dma = mz_phys_addr + tx_ring_start; + tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; + tx_ring->mem_zone = (const void *)mz; + + if (!tx_ring->bd) + return -ENOMEM; + if (tx_ring->vmem_size) { + tx_ring->vmem = + (void **)((char *)mz->addr + tx_vmem_start); + tx_ring_info->tx_buf_ring = + (struct bnxt_sw_tx_bd *)tx_ring->vmem; + } + } + + if (rx_ring_info) { + rxq->mz = mz; + rx_ring = rx_ring_info->rx_ring_struct; + + rx_ring->bd = ((char *)mz->addr + rx_ring_start); + rx_ring_info->rx_desc_ring = + (struct rx_prod_pkt_bd *)rx_ring->bd; + rx_ring->bd_dma = mz_phys_addr + rx_ring_start; + rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; + rx_ring->mem_zone = (const void *)mz; + + if (!rx_ring->bd) + return -ENOMEM; + if (rx_ring->vmem_size) { + rx_ring->vmem = + (void **)((char *)mz->addr + rx_vmem_start); + rx_ring_info->rx_buf_ring = + (struct bnxt_sw_rx_bd *)rx_ring->vmem; + } + + rx_ring = rx_ring_info->ag_ring_struct; + + rx_ring->bd = ((char *)mz->addr + ag_ring_start); + rx_ring_info->ag_desc_ring = + (struct rx_prod_pkt_bd *)rx_ring->bd; + rx_ring->bd_dma = mz->iova + ag_ring_start; + rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; + rx_ring->mem_zone = (const void *)mz; + + if (!rx_ring->bd) + return -ENOMEM; + if (rx_ring->vmem_size) { + rx_ring->vmem = + (void **)((char *)mz->addr + ag_vmem_start); + rx_ring_info->ag_buf_ring = + (struct bnxt_sw_rx_bd *)rx_ring->vmem; + } + + rx_ring_info->ag_bitmap = + rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size * + AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr + + ag_bitmap_start, ag_bitmap_len); + + /* TPA info */ + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) + rx_ring_info->tpa_info = + ((struct bnxt_tpa_info *)((char *)mz->addr + + tpa_info_start)); + } + + cp_ring->bd = ((char *)mz->addr + cp_ring_start); + cp_ring->bd_dma = mz_phys_addr + cp_ring_start; + cp_ring_info->cp_desc_ring = cp_ring->bd; + cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; + cp_ring->mem_zone = (const void *)mz; + + if (!cp_ring->bd) + return -ENOMEM; + if (cp_ring->vmem_size) + *cp_ring->vmem = ((char *)mz->addr + stats_len); + if (stats_len) { + cp_ring_info->hw_stats = mz->addr; + cp_ring_info->hw_stats_map = mz_phys_addr; + } + cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + + if (nq_ring_info) { + struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct; + + nq_ring->bd = (char *)mz->addr + nq_ring_start; + nq_ring->bd_dma = mz_phys_addr + nq_ring_start; + nq_ring_info->cp_desc_ring = nq_ring->bd; + nq_ring_info->cp_desc_mapping = nq_ring->bd_dma; + nq_ring->mem_zone = (const void *)mz; + + if (!nq_ring->bd) + return -ENOMEM; + if (nq_ring->vmem_size) + *nq_ring->vmem = (char *)mz->addr + nq_vmem_start; + + nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + } + + return 0; +} + +static void bnxt_init_dflt_coal(struct bnxt_coal *coal) +{ + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; + coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; + /* min timer set to 1/2 of interrupt timer */ + coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; + /* buf timer set to 1/4 of interrupt timer */ + coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; + coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; +} + +static void bnxt_set_db(struct bnxt *bp, + struct bnxt_db_info *db, + uint32_t ring_type, + uint32_t map_idx, + uint32_t fid) +{ + if (BNXT_CHIP_THOR(bp)) { + if (BNXT_PF(bp)) + db->doorbell = (char *)bp->doorbell_base + 0x10000; + else + db->doorbell = (char *)bp->doorbell_base + 0x4000; + switch (ring_type) { + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: + db->db_key64 = DBR_PATH_L2; + break; + } + db->db_key64 |= (uint64_t)fid << DBR_XID_SFT; + db->db_64 = true; + } else { + db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80; + switch (ring_type) { + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + db->db_key32 = DB_KEY_TX; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: + db->db_key32 = DB_KEY_RX; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: + db->db_key32 = DB_KEY_CP; + break; + } + db->db_64 = false; + } +} + +static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + uint32_t nq_ring_id = HWRM_NA_SIGNATURE; + int cp_ring_index = queue_index + BNXT_RX_VEC_START; + struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; + uint8_t ring_type; + int rc = 0; + + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; + + if (BNXT_HAS_NQ(bp)) { + if (nqr) { + nq_ring_id = nqr->cp_ring_struct->fw_ring_id; + } else { + PMD_DRV_LOG(ERR, "NQ ring is NULL\n"); + return -EINVAL; + } + } + + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index, + HWRM_NA_SIGNATURE, nq_ring_id, 0); + if (rc) + return rc; + + cpr->cp_cons = 0; + bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index, + cp_ring->fw_ring_id); + bnxt_db_cq(cpr); + + return 0; +} + +int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *nqr; + struct bnxt_ring *ring; + int ring_index = BNXT_NUM_ASYNC_CPR(bp); + unsigned int socket_id; + uint8_t ring_type; + int rc = 0; + + if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring) + return 0; + + socket_id = rte_lcore_to_socket_id(rte_get_master_lcore()); + + nqr = rte_zmalloc_socket("nqr", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (nqr == NULL) + return -ENOMEM; + + ring = rte_zmalloc_socket("bnxt_cp_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) { + rte_free(nqr); + return -ENOMEM; + } + + ring->bd = (void *)nqr->cp_desc_ring; + ring->bd_dma = nqr->cp_desc_mapping; + ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); + ring->ring_mask = ring->ring_size - 1; + ring->vmem_size = 0; + ring->vmem = NULL; + + nqr->cp_ring_struct = ring; + rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr"); + if (rc) { + rte_free(ring); + rte_free(nqr); + return -ENOMEM; + } + + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; + + rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index, + HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); + if (rc) { + rte_free(ring); + rte_free(nqr); + return rc; + } + + bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index, + ring->fw_ring_id); + bnxt_db_nq(nqr); + + bp->rxtx_nq_ring = nqr; + + return 0; +} + +/* Free RX/TX NQ ring. */ +void bnxt_free_rxtx_nq_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring; + + if (!nqr) + return; + + bnxt_free_nq_ring(bp, nqr); + + bnxt_free_ring(nqr->cp_ring_struct); + rte_free(nqr->cp_ring_struct); + nqr->cp_ring_struct = NULL; + rte_free(nqr); + bp->rxtx_nq_ring = NULL; +} + +static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + uint8_t ring_type; + int rc = 0; + + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; + + rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, + queue_index, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id, 0); + if (rc) + return rc; + + rxr->rx_prod = 0; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; + bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id); + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + + return 0; +} + +static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index) +{ + unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->ag_ring_struct; + uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE; + uint8_t ring_type; + int rc = 0; + + ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id; + + if (BNXT_CHIP_THOR(bp)) { + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG; + hw_stats_ctx_id = cpr->hw_stats_ctx_id; + } else { + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX; + } + + rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx, + hw_stats_ctx_id, cp_ring->fw_ring_id, 0); + + if (rc) + return rc; + + rxr->ag_prod = 0; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; + bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id); + bnxt_db_write(&rxr->ag_db, rxr->ag_prod); + + return 0; +} + +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + int rc; + + rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr); + if (rc) + goto err_out; + + if (BNXT_HAS_RING_GRPS(bp)) { + bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; + bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; + } + + if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) { + /* + * If a dedicated async event completion ring is not enabled, + * use the first completion ring from PF or VF as the default + * completion ring for async event handling. + */ + bp->async_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } + + rc = bnxt_alloc_rx_ring(bp, queue_index); + if (rc) + goto err_out; + + rc = bnxt_alloc_rx_agg_ring(bp, queue_index); + if (rc) + goto err_out; + + if (rxq->rx_started) { + if (bnxt_init_one_rx_ring(rxq)) { + PMD_DRV_LOG(ERR, + "bnxt_init_one_rx_ring failed!\n"); + bnxt_rx_queue_release_op(rxq); + rc = -ENOMEM; + goto err_out; + } + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + bnxt_db_write(&rxr->ag_db, rxr->ag_prod); + } + rxq->index = queue_index; +#ifdef RTE_ARCH_X86 + bnxt_rxq_vec_setup(rxq); +#endif + + return 0; + +err_out: + PMD_DRV_LOG(ERR, + "Failed to allocate receive queue %d, rc %d.\n", + queue_index, rc); + return rc; +} + +/* Initialise all rings to -1, its used to free rings later if allocation + * of few rings fails. + */ +static void bnxt_init_all_rings(struct bnxt *bp) +{ + unsigned int i = 0; + struct bnxt_rx_queue *rxq; + struct bnxt_ring *cp_ring; + struct bnxt_ring *ring; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_queue *txq; + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + rxq = bp->rx_queues[i]; + /* Rx-compl */ + cp_ring = rxq->cp_ring->cp_ring_struct; + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + /* Rx-Reg */ + rxr = rxq->rx_ring; + ring = rxr->rx_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + /* Rx-AGG */ + ring = rxr->ag_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + } + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + txq = bp->tx_queues[i]; + /* Tx cmpl */ + cp_ring = txq->cp_ring->cp_ring_struct; + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + /*Tx Ring */ + ring = txq->tx_ring->tx_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + } +} + +/* ring_grp usage: + * [0] = default completion ring + * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings + * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings + */ +int bnxt_alloc_hwrm_rings(struct bnxt *bp) +{ + struct bnxt_coal coal; + unsigned int i; + uint8_t ring_type; + int rc = 0; + + bnxt_init_dflt_coal(&coal); + bnxt_init_all_rings(bp); + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + + if (bnxt_alloc_cmpl_ring(bp, i, cpr)) + goto err_out; + + if (BNXT_HAS_RING_GRPS(bp)) { + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; + bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; + } + + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); + if (!BNXT_NUM_ASYNC_CPR(bp) && !i) { + /* + * If a dedicated async event completion ring is not + * enabled, use the first completion ring as the default + * completion ring for async event handling. + */ + bp->async_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } + + if (bnxt_alloc_rx_ring(bp, i)) + goto err_out; + + if (bnxt_alloc_rx_agg_ring(bp, i)) + goto err_out; + + if (bnxt_init_one_rx_ring(rxq)) { + PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n"); + bnxt_rx_queue_release_op(rxq); + return -ENOMEM; + } + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + bnxt_db_write(&rxr->ag_db, rxr->ag_prod); + rxq->index = i; +#ifdef RTE_ARCH_X86 + bnxt_rxq_vec_setup(rxq); +#endif + } + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + unsigned int idx = i + bp->rx_cp_nr_rings; + uint16_t tx_cosq_id = 0; + + if (bnxt_alloc_cmpl_ring(bp, idx, cpr)) + goto err_out; + + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) + tx_cosq_id = bp->tx_cosq_id[i < bp->max_lltc ? i : 0]; + else + tx_cosq_id = bp->tx_cosq_id[0]; + /* Tx ring */ + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_TX; + rc = bnxt_hwrm_ring_alloc(bp, ring, + ring_type, + i, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id, + tx_cosq_id); + if (rc) + goto err_out; + + bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id); + txq->index = idx; + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); + } + +err_out: + return rc; +} + +/* Allocate dedicated async completion ring. */ +int bnxt_alloc_async_cp_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; + struct bnxt_ring *cp_ring; + uint8_t ring_type; + int rc; + + if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) + return 0; + + cp_ring = cpr->cp_ring_struct; + + if (BNXT_HAS_NQ(bp)) + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ; + else + ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL; + + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0, + HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0); + + if (rc) + return rc; + + cpr->cp_cons = 0; + cpr->valid = 0; + bnxt_set_db(bp, &cpr->cp_db, ring_type, 0, + cp_ring->fw_ring_id); + + if (BNXT_HAS_NQ(bp)) + bnxt_db_nq(cpr); + else + bnxt_db_cq(cpr); + + return bnxt_hwrm_set_async_event_cr(bp); +} + +/* Free dedicated async completion ring. */ +void bnxt_free_async_cp_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->async_cp_ring; + + if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL) + return; + + if (BNXT_HAS_NQ(bp)) + bnxt_free_nq_ring(bp, cpr); + else + bnxt_free_cp_ring(bp, cpr); + + bnxt_free_ring(cpr->cp_ring_struct); + rte_free(cpr->cp_ring_struct); + cpr->cp_ring_struct = NULL; + rte_free(cpr); + bp->async_cp_ring = NULL; +} + +int bnxt_alloc_async_ring_struct(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = NULL; + struct bnxt_ring *ring = NULL; + unsigned int socket_id; + + if (BNXT_NUM_ASYNC_CPR(bp) == 0) + return 0; + + socket_id = rte_lcore_to_socket_id(rte_get_master_lcore()); + + cpr = rte_zmalloc_socket("cpr", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + + ring = rte_zmalloc_socket("bnxt_cp_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) { + rte_free(cpr); + return -ENOMEM; + } + + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); + ring->ring_mask = ring->ring_size - 1; + ring->vmem_size = 0; + ring->vmem = NULL; + + bp->async_cp_ring = cpr; + cpr->cp_ring_struct = ring; + + return bnxt_alloc_rings(bp, 0, NULL, NULL, + bp->async_cp_ring, NULL, + "def_cp"); +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h new file mode 100644 index 000000000..48a39d788 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_ring.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_RING_H_ +#define _BNXT_RING_H_ + +#include + +#include + +#define RING_ADV(ring, idx, n) (((idx) + (n)) & (ring)->ring_mask) +#define RING_NEXT(ring, idx) RING_ADV(ring, idx, 1) + +#define DB_IDX_MASK 0xffffff +#define DB_IDX_VALID (0x1 << 26) +#define DB_IRQ_DIS (0x1 << 27) +#define DB_KEY_TX (0x0 << 28) +#define DB_KEY_RX (0x1 << 28) +#define DB_KEY_CP (0x2 << 28) +#define DB_KEY_ST (0x3 << 28) +#define DB_KEY_TX_PUSH (0x4 << 28) +#define DB_LONG_TX_PUSH (0x2 << 24) + +#define DEFAULT_CP_RING_SIZE 256 +#define DEFAULT_RX_RING_SIZE 256 +#define DEFAULT_TX_RING_SIZE 256 + +#define AGG_RING_SIZE_FACTOR 2 +#define AGG_RING_MULTIPLIER 2 + +/* These assume 4k pages */ +#define MAX_RX_DESC_CNT (8 * 1024) +#define MAX_TX_DESC_CNT (4 * 1024) +#define MAX_CP_DESC_CNT (16 * 1024) + +#define INVALID_HW_RING_ID ((uint16_t)-1) +#define INVALID_STATS_CTX_ID ((uint16_t)-1) + +struct bnxt_ring { + void *bd; + rte_iova_t bd_dma; + uint32_t ring_size; + uint32_t ring_mask; + + int vmem_size; + void **vmem; + + uint16_t fw_ring_id; /* Ring id filled by Chimp FW */ + uint16_t fw_rx_ring_id; + const void *mem_zone; +}; + +struct bnxt_ring_grp_info { + uint16_t fw_stats_ctx; + uint16_t fw_grp_id; + uint16_t rx_fw_ring_id; + uint16_t cp_fw_ring_id; + uint16_t ag_fw_ring_id; +}; + +struct bnxt; +struct bnxt_tx_ring_info; +struct bnxt_rx_ring_info; +struct bnxt_cp_ring_info; +void bnxt_free_ring(struct bnxt_ring *ring); +int bnxt_alloc_ring_grps(struct bnxt *bp); +int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, + struct bnxt_cp_ring_info *cp_ring_info, + struct bnxt_cp_ring_info *nq_ring_info, + const char *suffix); +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index); +int bnxt_alloc_hwrm_rings(struct bnxt *bp); +int bnxt_alloc_async_cp_ring(struct bnxt *bp); +void bnxt_free_async_cp_ring(struct bnxt *bp); +int bnxt_alloc_async_ring_struct(struct bnxt *bp); +int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp); +void bnxt_free_rxtx_nq_ring(struct bnxt *bp); + +static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx) +{ + if (db->db_64) + rte_write64_relaxed(db->db_key64 | idx, db->doorbell); + else + rte_write32(db->db_key32 | idx, db->doorbell); +} + +/* Ring an NQ doorbell and disable interrupts for the ring. */ +static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr) +{ + if (unlikely(!cpr->cp_db.db_64)) + return; + + rte_smp_wmb(); + rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ | + RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons), + cpr->cp_db.doorbell); +} + +/* Ring an NQ doorbell and enable interrupts for the ring. */ +static inline void bnxt_db_nq_arm(struct bnxt_cp_ring_info *cpr) +{ + if (unlikely(!cpr->cp_db.db_64)) + return; + + rte_smp_wmb(); + rte_write64(cpr->cp_db.db_key64 | DBR_TYPE_NQ_ARM | + RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons), + cpr->cp_db.doorbell); +} + +static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_db_info *db = &cpr->cp_db; + uint32_t idx = RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons); + + rte_smp_wmb(); + if (db->db_64) + rte_write64(db->db_key64 | idx, db->doorbell); + else + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c new file mode 100644 index 000000000..e42308a97 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.c @@ -0,0 +1,572 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * RX Queues + */ + +void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) +{ + if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats) + rxq->cp_ring->hw_stats = NULL; +} + +int bnxt_mq_rx_configure(struct bnxt *bp) +{ + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; + unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0; + int start_grp_id, end_grp_id = 1, rc = 0; + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter; + enum rte_eth_nb_pools pools = 1, max_pools = 0; + struct bnxt_rx_queue *rxq; + + bp->nr_vnics = 0; + + /* Single queue mode */ + if (bp->rx_cp_nr_rings < 2) { + vnic = &bp->vnic_info[0]; + if (!vnic) { + PMD_DRV_LOG(ERR, "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + vnic->flags |= BNXT_VNIC_INFO_BCAST; + bp->nr_vnics++; + + rxq = bp->eth_dev->data->rx_queues[0]; + rxq->vnic = vnic; + + vnic->func_default = true; + vnic->start_grp_id = 0; + vnic->end_grp_id = vnic->start_grp_id; + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + filter->mac_index = 0; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + goto out; + } + + /* Multi-queue mode */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) { + /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */ + + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_RSS: + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* FALLTHROUGH */ + /* ETH_8/64_POOLs */ + pools = conf->nb_queue_pools; + /* For each pool, allocate MACVLAN CFA rule & VNIC */ + max_pools = RTE_MIN(bp->max_vnics, + RTE_MIN(bp->max_l2_ctx, + RTE_MIN(bp->max_rsscos_ctx, + ETH_64_POOLS))); + PMD_DRV_LOG(DEBUG, + "pools = %u max_pools = %u\n", + pools, max_pools); + if (pools > max_pools) + pools = max_pools; + break; + case ETH_MQ_RX_RSS: + pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n", + dev_conf->rxmode.mq_mode); + rc = -EINVAL; + goto err_out; + } + } else if (!dev_conf->rxmode.mq_mode) { + pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : pools; + } + + pools = RTE_MIN(pools, bp->rx_cp_nr_rings); + nb_q_per_grp = bp->rx_cp_nr_rings / pools; + bp->rx_num_qs_per_vnic = nb_q_per_grp; + PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n", + pools, nb_q_per_grp); + start_grp_id = 0; + end_grp_id = nb_q_per_grp; + + for (i = 0; i < pools; i++) { + vnic = &bp->vnic_info[i]; + if (!vnic) { + PMD_DRV_LOG(ERR, "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + vnic->flags |= BNXT_VNIC_INFO_BCAST; + bp->nr_vnics++; + + for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { + rxq = bp->eth_dev->data->rx_queues[ring_idx]; + rxq->vnic = vnic; + PMD_DRV_LOG(DEBUG, + "rxq[%d] = %p vnic[%d] = %p\n", + ring_idx, rxq, i, vnic); + } + if (i == 0) { + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) { + bp->eth_dev->data->promiscuous = 1; + vnic->flags |= BNXT_VNIC_INFO_PROMISC; + } + vnic->func_default = true; + } + vnic->start_grp_id = start_grp_id; + vnic->end_grp_id = end_grp_id; + + if (i) { + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB || + !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS)) + vnic->rss_dflt_cr = true; + goto skip_filter_allocation; + } + filter = bnxt_alloc_filter(bp); + if (!filter) { + PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + filter->mac_index = 0; + filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; + /* + * TODO: Configure & associate CFA rule for + * each VNIC for each VMDq with MACVLAN, MACVLAN+TC + */ + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + +skip_filter_allocation: + start_grp_id = end_grp_id; + end_grp_id += nb_q_per_grp; + } + +out: + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf; + + if (bp->flags & BNXT_FLAG_UPDATE_HASH) + bp->flags &= ~BNXT_FLAG_UPDATE_HASH; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + vnic->hash_type = + bnxt_rte_to_hwrm_hash_types(rss->rss_hf); + + /* + * Use the supplied key if the key length is + * acceptable and the rss_key is not NULL + */ + if (rss->rss_key && + rss->rss_key_len <= HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, + rss->rss_key, rss->rss_key_len); + } + } + + return rc; + +err_out: + /* Free allocated vnic/filters */ + + return rc; +} + +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) +{ + struct bnxt_sw_rx_bd *sw_ring; + struct bnxt_tpa_info *tpa_info; + uint16_t i; + + if (!rxq) + return; + + rte_spinlock_lock(&rxq->lock); + + sw_ring = rxq->rx_ring->rx_buf_ring; + if (sw_ring) { + for (i = 0; + i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } + /* Free up mbufs in Agg ring */ + sw_ring = rxq->rx_ring->ag_buf_ring; + if (sw_ring) { + for (i = 0; + i < rxq->rx_ring->ag_ring_struct->ring_size; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } + + /* Free up mbufs in TPA */ + tpa_info = rxq->rx_ring->tpa_info; + if (tpa_info) { + int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); + + for (i = 0; i < max_aggs; i++) { + if (tpa_info[i].mbuf) { + rte_pktmbuf_free_seg(tpa_info[i].mbuf); + tpa_info[i].mbuf = NULL; + } + } + } + + rte_spinlock_unlock(&rxq->lock); +} + +void bnxt_free_rx_mbufs(struct bnxt *bp) +{ + struct bnxt_rx_queue *rxq; + int i; + + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + rxq = bp->rx_queues[i]; + bnxt_rx_queue_release_mbufs(rxq); + } +} + +void bnxt_rx_queue_release_op(void *rx_queue) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + + if (rxq) { + if (is_bnxt_in_error(rxq->bp)) + return; + + bnxt_rx_queue_release_mbufs(rxq); + + /* Free RX ring hardware descriptors */ + bnxt_free_ring(rxq->rx_ring->rx_ring_struct); + /* Free RX Agg ring hardware descriptors */ + bnxt_free_ring(rxq->rx_ring->ag_ring_struct); + + /* Free RX completion ring hardware descriptors */ + bnxt_free_ring(rxq->cp_ring->cp_ring_struct); + + bnxt_free_rxq_stats(rxq); + rte_memzone_free(rxq->mz); + rxq->mz = NULL; + + rte_free(rxq); + } +} + +int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct bnxt *bp = eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + struct bnxt_rx_queue *rxq; + int rc = 0; + uint8_t queue_state; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (queue_idx >= BNXT_MAX_RINGS(bp)) { + PMD_DRV_LOG(ERR, + "Cannot create Rx ring %d. Only %d rings available\n", + queue_idx, bp->max_rx_rings); + return -EINVAL; + } + + if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) { + PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc); + rc = -EINVAL; + goto out; + } + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_idx]; + if (rxq) + bnxt_rx_queue_release_op(rxq); + } + rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n"); + rc = -ENOMEM; + goto out; + } + rxq->bp = bp; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + + PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu); + + rc = bnxt_init_rx_ring_struct(rxq, socket_id); + if (rc) + goto out; + + PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size); + rxq->queue_id = queue_idx; + rxq->port_id = eth_dev->data->port_id; + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + eth_dev->data->rx_queues[queue_idx] = rxq; + /* Allocate RX ring hardware descriptors */ + if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL, + "rxr")) { + PMD_DRV_LOG(ERR, + "ring_dma_zone_reserve for rx_ring failed!\n"); + bnxt_rx_queue_release_op(rxq); + rc = -ENOMEM; + goto out; + } + rte_atomic64_init(&rxq->rx_mbuf_alloc_fail); + + /* rxq 0 must not be stopped when used as async CPR */ + if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0) + rxq->rx_deferred_start = false; + else + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + + if (rxq->rx_deferred_start) { + queue_state = RTE_ETH_QUEUE_STATE_STOPPED; + rxq->rx_started = false; + } else { + queue_state = RTE_ETH_QUEUE_STATE_STARTED; + rxq->rx_started = true; + } + eth_dev->data->rx_queue_state[queue_idx] = queue_state; + rte_spinlock_init(&rxq->lock); + + /* Configure mtu if it is different from what was configured before */ + if (!queue_idx) + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + +out: + return rc; +} + +int +bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_id]; + if (!rxq) + return -EINVAL; + + cpr = rxq->cp_ring; + B_CP_DB_REARM(cpr, cpr->cp_raw_cons); + } + return rc; +} + +int +bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_id]; + if (!rxq) + return -EINVAL; + + cpr = rxq->cp_ring; + B_CP_DB_DISARM(cpr); + } + return rc; +} + +int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; + struct bnxt_vnic_info *vnic = NULL; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (rxq == NULL) { + PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); + return -EINVAL; + } + + /* Set the queue state to started here. + * We check the status of the queue while posting buffer. + * If queue is it started, we do not post buffers for Rx. + */ + rxq->rx_started = true; + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + bnxt_free_hwrm_rx_ring(bp, rx_queue_id); + rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id); + if (rc) + return rc; + + if (BNXT_CHIP_THOR(bp)) { + /* Reconfigure default receive ring and MRU. */ + bnxt_hwrm_vnic_cfg(bp, rxq->vnic); + } + PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id); + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + vnic = rxq->vnic; + + if (BNXT_HAS_RING_GRPS(bp)) { + if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID) + return 0; + + vnic->fw_grp_ids[rx_queue_id] = + bp->grp_info[rx_queue_id].fw_grp_id; + PMD_DRV_LOG(DEBUG, + "vnic = %p fw_grp_id = %d\n", + vnic, bp->grp_info[rx_queue_id].fw_grp_id); + } + + PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt); + rc = bnxt_vnic_rss_configure(bp, vnic); + } + + if (rc != 0) { + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + rxq->rx_started = false; + } + + PMD_DRV_LOG(INFO, + "queue %d, rx_deferred_start %d, state %d!\n", + rx_queue_id, rxq->rx_deferred_start, + bp->eth_dev->data->rx_queue_state[rx_queue_id]); + + return rc; +} + +int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct bnxt *bp = dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic = NULL; + struct bnxt_rx_queue *rxq = NULL; + int active_queue_cnt = 0; + int i, rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* For the stingray platform and other platforms needing tighter + * control of resource utilization, Rx CQ 0 also works as + * Default CQ for async notifications + */ + if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) { + PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id); + return -EINVAL; + } + + rxq = bp->rx_queues[rx_queue_id]; + if (!rxq) { + PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); + return -EINVAL; + } + + vnic = rxq->vnic; + if (!vnic) { + PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n", + rx_queue_id); + return -EINVAL; + } + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + rxq->rx_started = false; + PMD_DRV_LOG(DEBUG, "Rx queue stopped\n"); + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (BNXT_HAS_RING_GRPS(bp)) + vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID; + + PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt); + rc = bnxt_vnic_rss_configure(bp, vnic); + } + + if (BNXT_CHIP_THOR(bp)) { + /* Compute current number of active receive queues. */ + for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) + if (bp->rx_queues[i]->rx_started) + active_queue_cnt++; + + /* + * For Thor, we need to ensure that the VNIC default receive + * ring corresponds to an active receive queue. When no queue + * is active, we need to temporarily set the MRU to zero so + * that packets are dropped early in the receive pipeline in + * order to prevent the VNIC default receive ring from being + * accessed. + */ + if (active_queue_cnt == 0) { + uint16_t saved_mru = vnic->mru; + + vnic->mru = 0; + /* Reconfigure default receive ring and MRU. */ + bnxt_hwrm_vnic_cfg(bp, vnic); + vnic->mru = saved_mru; + } else { + /* Reconfigure default receive ring. */ + bnxt_hwrm_vnic_cfg(bp, vnic); + } + } + + if (rc == 0) + bnxt_rx_queue_release_mbufs(rxq); + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h new file mode 100644 index 000000000..4f5182d9e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxq.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_RQX_H_ +#define _BNXT_RQX_H_ + +struct bnxt; +struct bnxt_rx_ring_info; +struct bnxt_cp_ring_info; +struct bnxt_rx_queue { + rte_spinlock_t lock; /* Synchronize between rx_queue_stop + * and fast path + */ + struct rte_mempool *mb_pool; /* mbuf pool for RX ring */ + struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */ + struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */ + uint64_t mbuf_initializer; /* val to init mbuf */ + uint16_t nb_rx_desc; /* num of RX desc */ + uint16_t rx_tail; /* cur val of RDT register */ + uint16_t nb_rx_hold; /* num held free RX desc */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t queue_id; /* RX queue index */ +#ifdef RTE_ARCH_X86 + uint16_t rxrearm_nb; /* number of descs to reinit. */ + uint16_t rxrearm_start; /* next desc index to reinit. */ +#endif + uint16_t reg_idx; /* RX queue register index */ + uint16_t port_id; /* Device port identifier */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t rx_deferred_start; /* not in global dev start */ + uint8_t rx_started; /* RX queue is started */ + + struct bnxt *bp; + int index; + struct bnxt_vnic_info *vnic; + + uint32_t rx_buf_size; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_cp_ring_info *cp_ring; + rte_atomic64_t rx_mbuf_alloc_fail; + const struct rte_memzone *mz; +}; + +void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); +int bnxt_mq_rx_configure(struct bnxt *bp); +void bnxt_rx_queue_release_op(void *rx_queue); +int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void bnxt_free_rx_mbufs(struct bnxt *bp); +int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, + uint16_t queue_id); +int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, + uint16_t queue_id); +int bnxt_rx_queue_start(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +int bnxt_rx_queue_stop(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq); +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c new file mode 100644 index 000000000..ee1acb196 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.c @@ -0,0 +1,1031 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include +#include + +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_ring.h" +#include "bnxt_rxr.h" +#include "bnxt_rxq.h" +#include "hsi_struct_def_dpdk.h" +#ifdef RTE_LIBRTE_IEEE1588 +#include "bnxt_hwrm.h" +#endif + +#include +#include + +/* + * RX Ring handling + */ + +static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb) +{ + struct rte_mbuf *data; + + data = rte_mbuf_raw_alloc(mb); + + return data; +} + +static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, + struct bnxt_rx_ring_info *rxr, + uint16_t prod) +{ + struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; + struct rte_mbuf *mbuf; + + mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); + if (!mbuf) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return -ENOMEM; + } + + rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + return 0; +} + +static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, + struct bnxt_rx_ring_info *rxr, + uint16_t prod) +{ + struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod]; + struct rte_mbuf *mbuf; + + if (rxbd == NULL) { + PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n"); + return -EINVAL; + } + + if (rx_buf == NULL) { + PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n"); + return -EINVAL; + } + + mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); + if (!mbuf) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return -ENOMEM; + } + + rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + return 0; +} + +static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, + struct rte_mbuf *mbuf) +{ + uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod); + struct bnxt_sw_rx_bd *prod_rx_buf; + struct rx_prod_pkt_bd *prod_bd; + + prod_rx_buf = &rxr->rx_buf_ring[prod]; + + RTE_ASSERT(prod_rx_buf->mbuf == NULL); + RTE_ASSERT(mbuf != NULL); + + prod_rx_buf->mbuf = mbuf; + + prod_bd = &rxr->rx_desc_ring[prod]; + + prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxr->rx_prod = prod; +} + +static inline +struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, + uint16_t cons) +{ + struct bnxt_sw_rx_bd *cons_rx_buf; + struct rte_mbuf *mbuf; + + cons_rx_buf = &rxr->rx_buf_ring[cons]; + RTE_ASSERT(cons_rx_buf->mbuf != NULL); + mbuf = cons_rx_buf->mbuf; + cons_rx_buf->mbuf = NULL; + return mbuf; +} + +static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, + struct rx_tpa_start_cmpl *tpa_start, + struct rx_tpa_start_cmpl_hi *tpa_start1) +{ + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint16_t agg_id; + uint16_t data_cons; + struct bnxt_tpa_info *tpa_info; + struct rte_mbuf *mbuf; + + agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start); + + data_cons = tpa_start->opaque; + tpa_info = &rxr->tpa_info[agg_id]; + + mbuf = bnxt_consume_rx_buf(rxr, data_cons); + + bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf); + + tpa_info->agg_count = 0; + tpa_info->mbuf = mbuf; + tpa_info->len = rte_le_to_cpu_32(tpa_start->len); + + mbuf->nb_segs = 1; + mbuf->next = NULL; + mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = rxq->port_id; + mbuf->ol_flags = PKT_RX_LRO; + if (likely(tpa_start->flags_type & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { + mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash); + mbuf->ol_flags |= PKT_RX_RSS_HASH; + } else { + mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code); + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + } + if (tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { + mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } + if (likely(tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + + /* recycle next mbuf */ + data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons); + bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons)); +} + +static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, + uint8_t agg_bufs, uint32_t raw_cp_cons) +{ + uint16_t last_cp_cons; + struct rx_pkt_cmpl *agg_cmpl; + + raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); + last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); + agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; + cpr->valid = FLIP_VALID(raw_cp_cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); + return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct); +} + +/* TPA consume agg buffer out of order, allocate connected data only */ +static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq) +{ + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod); + + /* TODO batch allocation for better performance */ + while (rte_bitmap_get(rxr->ag_bitmap, next)) { + if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) { + PMD_DRV_LOG(ERR, + "agg mbuf alloc failed: prod=0x%x\n", next); + break; + } + rte_bitmap_clear(rxr->ag_bitmap, next); + rxr->ag_prod = next; + next = RING_NEXT(rxr->ag_ring_struct, next); + } + + return 0; +} + +static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, + struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons, + uint8_t agg_buf, struct bnxt_tpa_info *tpa_info) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + int i; + uint16_t cp_cons, ag_cons; + struct rx_pkt_cmpl *rxcmp; + struct rte_mbuf *last = mbuf; + bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp); + + for (i = 0; i < agg_buf; i++) { + struct bnxt_sw_rx_bd *ag_buf; + struct rte_mbuf *ag_mbuf; + + if (is_thor_tpa) { + rxcmp = (void *)&tpa_info->agg_arr[i]; + } else { + *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons); + cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons); + rxcmp = (struct rx_pkt_cmpl *) + &cpr->cp_desc_ring[cp_cons]; + } + +#ifdef BNXT_DEBUG + bnxt_dump_cmpl(cp_cons, rxcmp); +#endif + + ag_cons = rxcmp->opaque; + RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask); + ag_buf = &rxr->ag_buf_ring[ag_cons]; + ag_mbuf = ag_buf->mbuf; + RTE_ASSERT(ag_mbuf != NULL); + + ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len); + + mbuf->nb_segs++; + mbuf->pkt_len += ag_mbuf->data_len; + + last->next = ag_mbuf; + last = ag_mbuf; + + ag_buf->mbuf = NULL; + + /* + * As aggregation buffer consumed out of order in TPA module, + * use bitmap to track freed slots to be allocated and notified + * to NIC + */ + rte_bitmap_set(rxr->ag_bitmap, ag_cons); + } + bnxt_prod_ag_mbuf(rxq); + return 0; +} + +static inline struct rte_mbuf *bnxt_tpa_end( + struct bnxt_rx_queue *rxq, + uint32_t *raw_cp_cons, + struct rx_tpa_end_cmpl *tpa_end, + struct rx_tpa_end_cmpl_hi *tpa_end1) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint16_t agg_id; + struct rte_mbuf *mbuf; + uint8_t agg_bufs; + uint8_t payload_offset; + struct bnxt_tpa_info *tpa_info; + + if (BNXT_CHIP_THOR(rxq->bp)) { + struct rx_tpa_v2_end_cmpl *th_tpa_end; + struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1; + + th_tpa_end = (void *)tpa_end; + th_tpa_end1 = (void *)tpa_end1; + agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end); + agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1); + payload_offset = th_tpa_end1->payload_offset; + } else { + agg_id = BNXT_TPA_END_AGG_ID(tpa_end); + agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end); + if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons)) + return NULL; + payload_offset = tpa_end->payload_offset; + } + + tpa_info = &rxr->tpa_info[agg_id]; + mbuf = tpa_info->mbuf; + RTE_ASSERT(mbuf != NULL); + + rte_prefetch0(mbuf); + if (agg_bufs) { + bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info); + } + mbuf->l4_len = payload_offset; + + struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); + RTE_ASSERT(new_data != NULL); + if (!new_data) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return NULL; + } + tpa_info->mbuf = new_data; + + return mbuf; +} + +static uint32_t +bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) +{ + uint32_t l3, pkt_type = 0; + uint32_t t_ipcs = 0, ip6 = 0, vlan = 0; + uint32_t flags_type; + + vlan = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)); + pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER; + + t_ipcs = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)); + ip6 = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE)); + + flags_type = rxcmp->flags_type & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK); + + if (!t_ipcs && !ip6) + l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!t_ipcs && ip6) + l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (t_ipcs && !ip6) + l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else + l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + + switch (flags_type) { + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_ICMP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_TCP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_UDP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP): + pkt_type |= l3; + break; + } + + return pkt_type; +} + +#ifdef RTE_LIBRTE_IEEE1588 +static void +bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl) +{ + uint64_t systime_cycles = 0; + + if (!BNXT_CHIP_THOR(bp)) + return; + + /* On Thor, Rx timestamps are provided directly in the + * Rx completion records to the driver. Only 32 bits of + * the timestamp is present in the completion. Driver needs + * to read the current 48 bit free running timer using the + * HWRM_PORT_TS_QUERY command and combine the upper 16 bits + * from the HWRM response with the lower 32 bits in the + * Rx completion to produce the 48 bit timestamp for the Rx packet + */ + bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, + &systime_cycles); + bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000); + bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl; +} +#endif + +static void +bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code; + uint32_t meta_fmt; + uint32_t meta; + bool gfid = false; + uint32_t mark_id; + uint32_t flags2; + int rc; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + flags2 = rte_le_to_cpu_32(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + + /* + * The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + switch (meta_fmt) { + case 0: + /* Not an LFID or GFID, a flush cmd. */ + goto skip_mark; + case 4: + case 5: + /* + * EM/TCAM case + * Assume that EM doesn't support Mark due to GFID + * collisions with EEM. Simply return without setting the mark + * in the mbuf. + */ + if (BNXT_CFA_META_EM_TEST(meta)) + goto skip_mark; + /* + * It is a TCAM entry, so it is an LFID. The TCAM IDX and Mode + * can also be determined by decoding the meta_data. We are not + * using these for now. + */ + break; + case 6: + case 7: + /* EEM Case, only using gfid in EEM for now. */ + gfid = true; + + /* + * For EEM flows, The first part of cfa_code is 16 bits. + * The second part is embedded in the + * metadata field from bit 19 onwards. The driver needs to + * ignore the first 19 bits of metadata and use the next 12 + * bits as higher 12 bits of cfa_code. + */ + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + break; + default: + /* For other values, the cfa_code is assumed to be an LFID. */ + break; + } + + if (cfa_code) { + rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, + cfa_code, &mark_id); + if (!rc) { + /* Got the mark, write it to the mbuf and return */ + mbuf->hash.fdir.hi = mark_id; + mbuf->udata64 = (cfa_code & 0xffffffffull) << 32; + mbuf->hash.fdir.id = rxcmp1->cfa_code; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + return; + } + } + +skip_mark: + mbuf->hash.fdir.hi = 0; + mbuf->hash.fdir.id = 0; +} + +void bnxt_set_mark_in_mbuf(struct bnxt *bp, + struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code = 0; + uint8_t meta_fmt = 0; + uint16_t flags2 = 0; + uint32_t meta = 0; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + if (!cfa_code) + return; + + if (cfa_code && !bp->mark_table[cfa_code].valid) + return; + + flags2 = rte_le_to_cpu_16(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + if (meta) { + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + + /* The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + /* meta_fmt == 4 => 'b100 => 'b10x => EM. + * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN + * meta_fmt == 6 => 'b110 => 'b11x => EEM + * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN. + */ + meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT; + } + + mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; +} + +static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, + struct bnxt_rx_queue *rxq, uint32_t *raw_cons) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct rx_pkt_cmpl *rxcmp; + struct rx_pkt_cmpl_hi *rxcmp1; + uint32_t tmp_raw_cons = *raw_cons; + uint16_t cons, prod, cp_cons = + RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); + struct rte_mbuf *mbuf; + int rc = 0; + uint8_t agg_buf = 0; + uint16_t cmp_type; + uint32_t flags2_f = 0; + uint16_t flags_type; + struct bnxt *bp = rxq->bp; + + rxcmp = (struct rx_pkt_cmpl *) + &cpr->cp_desc_ring[cp_cons]; + + cmp_type = CMP_TYPE(rxcmp); + + if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) { + struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp; + uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id); + struct bnxt_tpa_info *tpa_info; + + tpa_info = &rxr->tpa_info[agg_id]; + RTE_ASSERT(tpa_info->agg_count < 16); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; + rc = -EINVAL; /* Continue w/o new mbuf */ + goto next_rx; + } + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); + rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons]; + + if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) + return -EBUSY; + + cpr->valid = FLIP_VALID(cp_cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); + + if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) { + bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, + (struct rx_tpa_start_cmpl_hi *)rxcmp1); + rc = -EINVAL; /* Continue w/o new mbuf */ + goto next_rx; + } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { + mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons, + (struct rx_tpa_end_cmpl *)rxcmp, + (struct rx_tpa_end_cmpl_hi *)rxcmp1); + if (unlikely(!mbuf)) + return -EBUSY; + *rx_pkt = mbuf; + goto next_rx; + } else if (cmp_type != 0x11) { + rc = -EINVAL; + goto next_rx; + } + + agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) + >> RX_PKT_CMPL_AGG_BUFS_SFT; + if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) + return -EBUSY; + + prod = rxr->rx_prod; + + cons = rxcmp->opaque; + mbuf = bnxt_consume_rx_buf(rxr, cons); + if (mbuf == NULL) + return -EBUSY; + + rte_prefetch0(mbuf); + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->next = NULL; + mbuf->pkt_len = rxcmp->len; + mbuf->data_len = mbuf->pkt_len; + mbuf->port = rxq->port_id; + mbuf->ol_flags = 0; + + flags_type = rte_le_to_cpu_16(rxcmp->flags_type); + if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { + mbuf->hash.rss = rxcmp->rss_hash; + mbuf->ol_flags |= PKT_RX_RSS_HASH; + } + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + else + bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + +#ifdef RTE_LIBRTE_IEEE1588 + if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) == + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) { + mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST; + bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder); + } +#endif + if (agg_buf) + bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL); + + if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { + mbuf->vlan_tci = rxcmp1->metadata & + (RX_PKT_CMPL_METADATA_VID_MASK | + RX_PKT_CMPL_METADATA_DE | + RX_PKT_CMPL_METADATA_PRI_MASK); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } + + flags2_f = flags2_0xf(rxcmp1); + /* IP Checksum */ + if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) { + if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } else if (IS_IP_TUNNEL_PKT(flags2_f)) { + if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) || + RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + /* L4 Checksum */ + if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } else if (IS_L4_TUNNEL_PKT(flags2_f)) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS + (flags2_f))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; + } else { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + } + } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } + + mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + +#ifdef BNXT_DEBUG + if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { + /* Re-install the mbuf back to the rx ring */ + bnxt_reuse_rx_mbuf(rxr, cons, mbuf); + + rc = -EIO; + goto next_rx; + } +#endif + /* + * TODO: Redesign this.... + * If the allocation fails, the packet does not get received. + * Simply returning this will result in slowly falling behind + * on the producer ring buffers. + * Instead, "filling up" the producer just before ringing the + * doorbell could be a better solution since it will let the + * producer ring starve until memory is available again pushing + * the drops into hardware and getting them out of the driver + * allowing recovery to a full producer ring. + * + * This could also help with cache usage by preventing per-packet + * calls in favour of a tight loop with the same function being called + * in it. + */ + prod = RING_NEXT(rxr->rx_ring_struct, prod); + if (bnxt_alloc_rx_data(rxq, rxr, prod)) { + PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod); + rc = -ENOMEM; + goto rx; + } + rxr->rx_prod = prod; + /* + * All MBUFs are allocated with the same size under DPDK, + * no optimization for rx_copy_thresh + */ +rx: + *rx_pkt = mbuf; + +next_rx: + + *raw_cons = tmp_raw_cons; + + return rc; +} + +uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct bnxt_rx_queue *rxq = rx_queue; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + int nb_rx_pkts = 0; + struct rx_pkt_cmpl *rxcmp; + uint16_t prod = rxr->rx_prod; + uint16_t ag_prod = rxr->ag_prod; + int rc = 0; + bool evt = false; + + if (unlikely(is_bnxt_in_error(rxq->bp))) + return 0; + + /* If Rx Q was stopped return */ + if (unlikely(!rxq->rx_started || + !rte_spinlock_trylock(&rxq->lock))) + return 0; + + /* Handle RX burst request */ + while (1) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rte_prefetch0(&cpr->cp_desc_ring[cons]); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) + break; + cpr->valid = FLIP_VALID(cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); + + /* TODO: Avoid magic numbers... */ + if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { + rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); + if (likely(!rc) || rc == -ENOMEM) + nb_rx_pkts++; + if (rc == -EBUSY) /* partial completion */ + break; + } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) { + evt = + bnxt_event_hwrm_resp_handler(rxq->bp, + (struct cmpl_base *)rxcmp); + /* If the async event is Fatal error, return */ + if (unlikely(is_bnxt_in_error(rxq->bp))) + goto done; + } + + raw_cons = NEXT_RAW_CMP(raw_cons); + if (nb_rx_pkts == nb_pkts || evt) + break; + /* Post some Rx buf early in case of larger burst processing */ + if (nb_rx_pkts == BNXT_RX_POST_THRESH) + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + } + + cpr->cp_raw_cons = raw_cons; + if (!nb_rx_pkts && !evt) { + /* + * For PMD, there is no need to keep on pushing to REARM + * the doorbell if there are no new completions + */ + goto done; + } + + if (prod != rxr->rx_prod) + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + + /* Ring the AGG ring DB */ + if (ag_prod != rxr->ag_prod) + bnxt_db_write(&rxr->ag_db, rxr->ag_prod); + + bnxt_db_cq(cpr); + + /* Attempt to alloc Rx buf in case of a previous allocation failure. */ + if (rc == -ENOMEM) { + int i = RING_NEXT(rxr->rx_ring_struct, prod); + int cnt = nb_rx_pkts; + + for (; cnt; + i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + + /* Buffer already allocated for this index. */ + if (rx_buf->mbuf != NULL) + continue; + + /* This slot is empty. Alloc buffer for Rx */ + if (!bnxt_alloc_rx_data(rxq, rxr, i)) { + rxr->rx_prod = i; + bnxt_db_write(&rxr->rx_db, rxr->rx_prod); + } else { + PMD_DRV_LOG(ERR, "Alloc mbuf failed\n"); + break; + } + } + } + +done: + rte_spinlock_unlock(&rxq->lock); + + return nb_rx_pkts; +} + +/* + * Dummy DPDK callback for RX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + */ +uint16_t +bnxt_dummy_recv_pkts(void *rx_queue __rte_unused, + struct rte_mbuf **rx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +void bnxt_free_rx_rings(struct bnxt *bp) +{ + int i; + struct bnxt_rx_queue *rxq; + + if (!bp->rx_queues) + return; + + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + rxq = bp->rx_queues[i]; + if (!rxq) + continue; + + bnxt_free_ring(rxq->rx_ring->rx_ring_struct); + rte_free(rxq->rx_ring->rx_ring_struct); + + /* Free the Aggregator ring */ + bnxt_free_ring(rxq->rx_ring->ag_ring_struct); + rte_free(rxq->rx_ring->ag_ring_struct); + rxq->rx_ring->ag_ring_struct = NULL; + + rte_free(rxq->rx_ring); + + bnxt_free_ring(rxq->cp_ring->cp_ring_struct); + rte_free(rxq->cp_ring->cp_ring_struct); + rte_free(rxq->cp_ring); + + rte_free(rxq); + bp->rx_queues[i] = NULL; + } +} + +int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) +{ + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring *ring; + + rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf); + + rxr = rte_zmalloc_socket("bnxt_rx_ring", + sizeof(struct bnxt_rx_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxr == NULL) + return -ENOMEM; + rxq->rx_ring = rxr; + + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + rxr->rx_ring_struct = ring; + ring->ring_size = rte_align32pow2(rxq->nb_rx_desc); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)rxr->rx_desc_ring; + ring->bd_dma = rxr->rx_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); + ring->vmem = (void **)&rxr->rx_buf_ring; + + cpr = rte_zmalloc_socket("bnxt_rx_ring", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + rxq->cp_ring = cpr; + + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + cpr->cp_ring_struct = ring; + ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size * + (2 + AGG_RING_SIZE_FACTOR)); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->vmem_size = 0; + ring->vmem = NULL; + + /* Allocate Aggregator rings */ + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + rxr->ag_ring_struct = ring; + ring->ring_size = rte_align32pow2(rxq->nb_rx_desc * + AGG_RING_SIZE_FACTOR); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)rxr->ag_desc_ring; + ring->bd_dma = rxr->ag_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); + ring->vmem = (void **)&rxr->ag_buf_ring; + + return 0; +} + +static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type, + uint16_t len) +{ + uint32_t j; + struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd; + + if (!rx_bd_ring) + return; + for (j = 0; j < ring->ring_size; j++) { + rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type); + rx_bd_ring[j].len = rte_cpu_to_le_16(len); + rx_bd_ring[j].opaque = j; + } +} + +int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring *ring; + uint32_t prod, type; + unsigned int i; + uint16_t size; + + size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); + + type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; + + rxr = rxq->rx_ring; + ring = rxr->rx_ring_struct; + bnxt_init_rxbds(ring, type, size); + + prod = rxr->rx_prod; + for (i = 0; i < ring->ring_size; i++) { + if (unlikely(!rxr->rx_buf_ring[i].mbuf)) { + if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed rx ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } + } + rxr->rx_prod = prod; + prod = RING_NEXT(rxr->rx_ring_struct, prod); + } + + ring = rxr->ag_ring_struct; + type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; + bnxt_init_rxbds(ring, type, size); + prod = rxr->ag_prod; + + for (i = 0; i < ring->ring_size; i++) { + if (unlikely(!rxr->ag_buf_ring[i].mbuf)) { + if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed AG ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } + } + rxr->ag_prod = prod; + prod = RING_NEXT(rxr->ag_ring_struct, prod); + } + PMD_DRV_LOG(DEBUG, "AGG Done!\n"); + + if (rxr->tpa_info) { + unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); + + for (i = 0; i < max_aggs; i++) { + if (unlikely(!rxr->tpa_info[i].mbuf)) { + rxr->tpa_info[i].mbuf = + __bnxt_alloc_rx_data(rxq->mb_pool); + if (!rxr->tpa_info[i].mbuf) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return -ENOMEM; + } + } + } + } + PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n"); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h new file mode 100644 index 000000000..811dcd86b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxr.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_RXR_H_ +#define _BNXT_RXR_H_ +#include "hsi_struct_def_dpdk.h" + +#define B_RX_DB(db, prod) \ + (*(uint32_t *)db = (DB_KEY_RX | (prod))) + +#define BNXT_TPA_L4_SIZE(x) \ + { \ + typeof(x) hdr_info = (x); \ + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \ + } + +#define BNXT_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNXT_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) + +#define flags2_0xf(rxcmp1) \ + (((rxcmp1)->flags2) & 0xf) + +/* IP non tunnel can be with or without L4- + * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or + * Ether / (vlan) / outer IP|IP6 / ICMP + * we use '==' instead of '&' because tunnel pkts have all 4 fields set. + */ +#define IS_IP_NONTUNNEL_PKT(flags2_f) \ + ( \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \ + ) + +/* IP Tunnel pkt must have atleast tunnel-IP-calc set. + * again tunnel ie outer L4 is optional bcoz of + * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP + * also inner L3 chksum error is not taken into consideration by DPDK. + */ +#define IS_IP_TUNNEL_PKT(flags2_f) \ + ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) + +/* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts. + * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated + * as good csum pkt. + */ +#define RX_CMP_IP_CS_ERROR(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)) + +#define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)) + +#define RX_CMP_IP_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) + +#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) + +/* L4 non tunnel pkt- + * Ether / (vlan) / IP6 / UDP|TCP|SCTP + */ +#define IS_L4_NONTUNNEL_PKT(flags2_f) \ + ( \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC)))) + +/* L4 tunnel pkt- + * Outer L4 is not mandatory. Eg: GRE- + * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / + * UDP|TCP|SCTP + */ +#define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC))) + +#define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))) + +#define IS_L4_TUNNEL_PKT(flags2_f) \ + ( \ + IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \ + IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \ + ) + +#define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \ + ((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \ + RX_TPA_START_CMPL_AGG_ID_SFT) + +#define BNXT_TPA_START_AGG_ID_TH(cmp) \ + rte_le_to_cpu_16((cmp)->agg_id) + +static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp, + struct rx_tpa_start_cmpl *cmp) +{ + if (BNXT_CHIP_THOR(bp)) + return BNXT_TPA_START_AGG_ID_TH(cmp); + else + return BNXT_TPA_START_AGG_ID_PRE_TH(cmp); +} + +#define BNXT_TPA_END_AGG_BUFS(cmp) \ + (((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \ + >> RX_TPA_END_CMPL_AGG_BUFS_SFT) + +#define BNXT_TPA_END_AGG_BUFS_TH(cmp) \ + ((cmp)->tpa_agg_bufs) + +#define BNXT_TPA_END_AGG_ID(cmp) \ + (((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \ + RX_TPA_END_CMPL_AGG_ID_SFT) + +#define BNXT_TPA_END_AGG_ID_TH(cmp) \ + rte_le_to_cpu_16((cmp)->agg_id) + +#define RX_CMP_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) + +#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) + +#define RX_CMP_T_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) + +#define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS) + +/* Outer L4 chksum error + */ +#define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)) + +/* Inner L4 chksum error + */ +#define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)) + +#define BNXT_RX_POST_THRESH 32 + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, /* Undefined type */ + PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ + PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ + PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ +}; + +struct bnxt_tpa_info { + struct rte_mbuf *mbuf; + uint16_t len; + uint32_t agg_count; + struct rx_tpa_v2_abuf_cmpl agg_arr[TPA_MAX_NUM_SEGS]; +}; + +struct bnxt_sw_rx_bd { + struct rte_mbuf *mbuf; /* data associated with RX descriptor */ +}; + +struct bnxt_rx_ring_info { + uint16_t rx_prod; + uint16_t ag_prod; + struct bnxt_db_info rx_db; + struct bnxt_db_info ag_db; + + struct rx_prod_pkt_bd *rx_desc_ring; + struct rx_prod_pkt_bd *ag_desc_ring; + struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */ + struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */ + + rte_iova_t rx_desc_mapping; + rte_iova_t ag_desc_mapping; + + struct bnxt_ring *rx_ring_struct; + struct bnxt_ring *ag_ring_struct; + + /* + * To deal with out of order return from TPA, use free buffer indicator + */ + struct rte_bitmap *ag_bitmap; + + struct bnxt_tpa_info *tpa_info; +}; + +uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void bnxt_free_rx_rings(struct bnxt *bp); +int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id); +int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq); +int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +#ifdef RTE_ARCH_X86 +uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq); +#endif + +void bnxt_set_mark_in_mbuf(struct bnxt *bp, + struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf); + +#define BNXT_RX_META_CFA_CODE_SHIFT 19 +#define BNXT_CFA_CODE_META_SHIFT 16 +#define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT 0x8000000 +#define BNXT_RX_META_CFA_CODE_EEM_BIT 0x4000000 +#define BNXT_CFA_META_FMT_MASK 0x70 +#define BNXT_CFA_META_FMT_SHFT 4 +#define BNXT_CFA_META_FMT_EM_EEM_SHFT 1 +#define BNXT_CFA_META_FMT_EEM 3 +#define BNXT_CFA_META_EEM_TCAM_SHIFT 31 +#define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT) + +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c new file mode 100644 index 000000000..8f73add9b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_rxtx_vec_sse.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019 Broadcom All rights reserved. */ + +#include +#include + +#include +#include +#include +#include +#if defined(RTE_ARCH_X86) +#include +#else +#error "bnxt vector pmd: unsupported target." +#endif + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_ring.h" +#include "bnxt_rxr.h" +#include "bnxt_rxq.h" +#include "hsi_struct_def_dpdk.h" + +#include "bnxt_txq.h" +#include "bnxt_txr.h" + +/* + * RX Ring handling + */ + +#define RTE_BNXT_MAX_RX_BURST 32 +#define RTE_BNXT_MAX_TX_BURST 32 +#define RTE_BNXT_RXQ_REARM_THRESH 32 +#define RTE_BNXT_DESCS_PER_LOOP 4 + +static inline void +bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr) +{ + struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start]; + struct bnxt_sw_rx_bd *rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + int i; + + const __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 0); + const __m128i addrmask = _mm_set_epi64x(UINT64_MAX, 0); + + /* Pull RTE_BNXT_RXQ_REARM_THRESH more mbufs into the software ring */ + if (rte_mempool_get_bulk(rxq->mb_pool, + (void *)rx_bufs, + RTE_BNXT_RXQ_REARM_THRESH) < 0) { + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_BNXT_RXQ_REARM_THRESH; + + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_BNXT_RXQ_REARM_THRESH; i += 2, rx_bufs += 2) { + __m128i buf_addr0, buf_addr1; + __m128i rxbd0, rxbd1; + + mb0 = rx_bufs[0].mbuf; + mb1 = rx_bufs[1].mbuf; + + /* Load address fields from both mbufs */ + buf_addr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + buf_addr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* Load both rx descriptors (preserving some existing fields) */ + rxbd0 = _mm_loadu_si128((__m128i *)(rxbds + 0)); + rxbd1 = _mm_loadu_si128((__m128i *)(rxbds + 1)); + + /* Add default offset to buffer address. */ + buf_addr0 = _mm_add_epi64(buf_addr0, hdr_room); + buf_addr1 = _mm_add_epi64(buf_addr1, hdr_room); + + /* Clear all fields except address. */ + buf_addr0 = _mm_and_si128(buf_addr0, addrmask); + buf_addr1 = _mm_and_si128(buf_addr1, addrmask); + + /* Clear address field in descriptor. */ + rxbd0 = _mm_andnot_si128(addrmask, rxbd0); + rxbd1 = _mm_andnot_si128(addrmask, rxbd1); + + /* Set address field in descriptor. */ + rxbd0 = _mm_add_epi64(rxbd0, buf_addr0); + rxbd1 = _mm_add_epi64(rxbd1, buf_addr1); + + /* Store descriptors to memory. */ + _mm_store_si128((__m128i *)(rxbds++), rxbd0); + _mm_store_si128((__m128i *)(rxbds++), rxbd1); + } + + rxq->rxrearm_start += RTE_BNXT_RXQ_REARM_THRESH; + bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1); + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_BNXT_RXQ_REARM_THRESH; +} + +static uint32_t +bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) +{ + uint32_t l3, pkt_type = 0; + uint32_t t_ipcs = 0, ip6 = 0, vlan = 0; + uint32_t flags_type; + + vlan = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)); + pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER; + + t_ipcs = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)); + ip6 = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE)); + + flags_type = rxcmp->flags_type & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK); + + if (!t_ipcs && !ip6) + l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!t_ipcs && ip6) + l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (t_ipcs && !ip6) + l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else + l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + + switch (flags_type) { + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_ICMP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_TCP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP): + if (!t_ipcs) + pkt_type |= l3 | RTE_PTYPE_L4_UDP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP; + break; + + case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP): + pkt_type |= l3; + break; + } + + return pkt_type; +} + +static void +bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1) +{ + uint32_t flags; + + flags = flags2_0xf(rxcmp1); + /* IP Checksum */ + if (likely(IS_IP_NONTUNNEL_PKT(flags))) { + if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } else if (IS_IP_TUNNEL_PKT(flags)) { + if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) || + RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + } + + /* L4 Checksum */ + if (likely(IS_L4_NONTUNNEL_PKT(flags))) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } else if (IS_L4_TUNNEL_PKT(flags)) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS + (flags))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; + } else { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + } + } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } +} + +uint16_t +bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct bnxt_rx_queue *rxq = rx_queue; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + int nb_rx_pkts = 0; + struct rx_pkt_cmpl *rxcmp; + bool evt = false; + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + const __m128i shuf_msk = + _mm_set_epi8(15, 14, 13, 12, /* rss */ + 0xFF, 0xFF, /* vlan_tci (zeroes) */ + 3, 2, /* data_len */ + 0xFF, 0xFF, 3, 2, /* pkt_len */ + 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */ + + /* If Rx Q was stopped return */ + if (unlikely(!rxq->rx_started)) + return 0; + + if (rxq->rxrearm_nb >= RTE_BNXT_RXQ_REARM_THRESH) + bnxt_rxq_rearm(rxq, rxr); + + /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */ + nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST); + + /* + * Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP. + * nb_pkts < RTE_BNXT_DESCS_PER_LOOP, just return no packet + */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP); + if (!nb_pkts) + return 0; + + /* Handle RX burst request */ + while (1) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) + break; + + if (likely(CMP_TYPE(rxcmp) == RX_PKT_CMPL_TYPE_RX_L2)) { + struct rx_pkt_cmpl_hi *rxcmp1; + uint32_t tmp_raw_cons; + uint16_t cp_cons; + struct rte_mbuf *mbuf; + __m128i mm_rxcmp, pkt_mb; + + tmp_raw_cons = NEXT_RAW_CMP(raw_cons); + cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); + rxcmp1 = (struct rx_pkt_cmpl_hi *) + &cpr->cp_desc_ring[cp_cons]; + + if (!CMP_VALID(rxcmp1, tmp_raw_cons, + cpr->cp_ring_struct)) + break; + + raw_cons = tmp_raw_cons; + cons = rxcmp->opaque; + + mbuf = rxr->rx_buf_ring[cons].mbuf; + rte_prefetch0(mbuf); + rxr->rx_buf_ring[cons].mbuf = NULL; + + /* Set constant fields from mbuf initializer. */ + _mm_store_si128((__m128i *)&mbuf->rearm_data, + mbuf_init); + + /* Set mbuf pkt_len, data_len, and rss_hash fields. */ + mm_rxcmp = _mm_load_si128((__m128i *)rxcmp); + pkt_mb = _mm_shuffle_epi8(mm_rxcmp, shuf_msk); + _mm_storeu_si128((void *)&mbuf->rx_descriptor_fields1, + pkt_mb); + + rte_compiler_barrier(); + + if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) + mbuf->ol_flags |= PKT_RX_RSS_HASH; + + if (rxcmp1->flags2 & + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { + mbuf->vlan_tci = rxcmp1->metadata & + (RX_PKT_CMPL_METADATA_VID_MASK | + RX_PKT_CMPL_METADATA_DE | + RX_PKT_CMPL_METADATA_PRI_MASK); + mbuf->ol_flags |= + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } + + bnxt_parse_csum(mbuf, rxcmp1); + mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + + rx_pkts[nb_rx_pkts++] = mbuf; + } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) { + evt = + bnxt_event_hwrm_resp_handler(rxq->bp, + (struct cmpl_base *)rxcmp); + } + + raw_cons = NEXT_RAW_CMP(raw_cons); + if (nb_rx_pkts == nb_pkts || evt) + break; + } + rxr->rx_prod = RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts); + + rxq->rxrearm_nb += nb_rx_pkts; + cpr->cp_raw_cons = raw_cons; + cpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size); + if (nb_rx_pkts || evt) + bnxt_db_cq(cpr); + + return nb_rx_pkts; +} + +static void +bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct rte_mbuf **free = txq->free; + uint16_t cons = txr->tx_cons; + unsigned int blk = 0; + + while (nr_pkts--) { + struct bnxt_sw_tx_bd *tx_buf; + struct rte_mbuf *mbuf; + + tx_buf = &txr->tx_buf_ring[cons]; + cons = RING_NEXT(txr->tx_ring_struct, cons); + mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf); + tx_buf->mbuf = NULL; + + if (blk && mbuf->pool != free[0]->pool) { + rte_mempool_put_bulk(free[0]->pool, (void **)free, blk); + blk = 0; + } + free[blk++] = mbuf; + } + if (blk) + rte_mempool_put_bulk(free[0]->pool, (void **)free, blk); + + txr->tx_cons = cons; +} + +static void +bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq) +{ + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + uint32_t nb_tx_pkts = 0; + struct tx_cmpl *txcmp; + struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring; + struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; + uint32_t ring_mask = cp_ring_struct->ring_mask; + + do { + cons = RING_CMPL(ring_mask, raw_cons); + txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; + + if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct)) + break; + + if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)) + nb_tx_pkts += txcmp->opaque; + else + RTE_LOG_DP(ERR, PMD, + "Unhandled CMP type %02x\n", + CMP_TYPE(txcmp)); + raw_cons = NEXT_RAW_CMP(raw_cons); + } while (nb_tx_pkts < ring_mask); + + cpr->valid = !!(raw_cons & cp_ring_struct->ring_size); + if (nb_tx_pkts) { + bnxt_tx_cmp_vec(txq, nb_tx_pkts); + cpr->cp_raw_cons = raw_cons; + bnxt_db_cq(cpr); + } +} + +#define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \ + TX_BD_SHORT_FLAGS_COAL_NOW | \ + TX_BD_SHORT_TYPE_TX_BD_SHORT | \ + TX_BD_LONG_FLAGS_PACKET_END) + +#define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL) + +static inline uint32_t +bnxt_xmit_flags_len(uint16_t len, uint16_t flags) +{ + switch (len >> 9) { + case 0: + return flags | TX_BD_LONG_FLAGS_LHINT_LT512; + case 1: + return flags | TX_BD_LONG_FLAGS_LHINT_LT1K; + case 2: + return flags | TX_BD_LONG_FLAGS_LHINT_LT2K; + case 3: + return flags | TX_BD_LONG_FLAGS_LHINT_LT2K; + default: + return flags | TX_BD_LONG_FLAGS_LHINT_GTE2K; + } +} + +static uint16_t +bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct bnxt_tx_queue *txq = tx_queue; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + uint16_t prod = txr->tx_prod; + struct rte_mbuf *tx_mbuf; + struct tx_bd_long *txbd = NULL; + struct bnxt_sw_tx_bd *tx_buf; + uint16_t to_send; + + nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq)); + + if (unlikely(nb_pkts == 0)) + return 0; + + /* Handle TX burst request */ + to_send = nb_pkts; + while (to_send) { + tx_mbuf = *tx_pkts++; + rte_prefetch0(tx_mbuf); + + tx_buf = &txr->tx_buf_ring[prod]; + tx_buf->mbuf = tx_mbuf; + tx_buf->nr_bds = 1; + + txbd = &txr->tx_desc_ring[prod]; + txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off; + txbd->len = tx_mbuf->data_len; + txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len, + TX_BD_FLAGS_NOCMPL); + prod = RING_NEXT(txr->tx_ring_struct, prod); + to_send--; + } + + /* Request a completion for last packet in burst */ + if (txbd) { + txbd->opaque = nb_pkts; + txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL; + } + + rte_compiler_barrier(); + bnxt_db_write(&txr->tx_db, prod); + + txr->tx_prod = prod; + + return nb_pkts; +} + +uint16_t +bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int nb_sent = 0; + struct bnxt_tx_queue *txq = tx_queue; + + /* Tx queue was stopped; wait for it to be restarted */ + if (unlikely(!txq->tx_started)) { + PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n"); + return 0; + } + + /* Handle TX completions */ + if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh) + bnxt_handle_tx_cp_vec(txq); + + while (nb_pkts) { + uint16_t ret, num; + + num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST); + ret = bnxt_xmit_fixed_burst_vec(tx_queue, + &tx_pkts[nb_sent], + num); + nb_sent += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_sent; +} + +int __rte_cold +bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + rxq->rxrearm_nb = 0; + rxq->rxrearm_start = 0; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c new file mode 100644 index 000000000..cfe193284 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.c @@ -0,0 +1,1029 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_rxq.h" +#include "bnxt_stats.h" +#include "bnxt_txq.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = { + {"rx_64b_frames", offsetof(struct rx_port_stats, + rx_64b_frames)}, + {"rx_65b_127b_frames", offsetof(struct rx_port_stats, + rx_65b_127b_frames)}, + {"rx_128b_255b_frames", offsetof(struct rx_port_stats, + rx_128b_255b_frames)}, + {"rx_256b_511b_frames", offsetof(struct rx_port_stats, + rx_256b_511b_frames)}, + {"rx_512b_1023b_frames", offsetof(struct rx_port_stats, + rx_512b_1023b_frames)}, + {"rx_1024b_1518b_frames", offsetof(struct rx_port_stats, + rx_1024b_1518b_frames)}, + {"rx_good_vlan_frames", offsetof(struct rx_port_stats, + rx_good_vlan_frames)}, + {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats, + rx_1519b_2047b_frames)}, + {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats, + rx_2048b_4095b_frames)}, + {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats, + rx_4096b_9216b_frames)}, + {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats, + rx_9217b_16383b_frames)}, + {"rx_total_frames", offsetof(struct rx_port_stats, + rx_total_frames)}, + {"rx_ucast_frames", offsetof(struct rx_port_stats, + rx_ucast_frames)}, + {"rx_mcast_frames", offsetof(struct rx_port_stats, + rx_mcast_frames)}, + {"rx_bcast_frames", offsetof(struct rx_port_stats, + rx_bcast_frames)}, + {"rx_fcs_err_frames", offsetof(struct rx_port_stats, + rx_fcs_err_frames)}, + {"rx_ctrl_frames", offsetof(struct rx_port_stats, + rx_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct rx_port_stats, + rx_pause_frames)}, + {"rx_pfc_frames", offsetof(struct rx_port_stats, + rx_pfc_frames)}, + {"rx_unsupported_opcode_frames", offsetof(struct rx_port_stats, + rx_unsupported_opcode_frames)}, + {"rx_unsupported_da_pausepfc_frames", offsetof(struct rx_port_stats, + rx_unsupported_da_pausepfc_frames)}, + {"rx_wrong_sa_frames", offsetof(struct rx_port_stats, + rx_wrong_sa_frames)}, + {"rx_align_err_frames", offsetof(struct rx_port_stats, + rx_align_err_frames)}, + {"rx_oor_len_frames", offsetof(struct rx_port_stats, + rx_oor_len_frames)}, + {"rx_code_err_frames", offsetof(struct rx_port_stats, + rx_code_err_frames)}, + {"rx_false_carrier_frames", offsetof(struct rx_port_stats, + rx_false_carrier_frames)}, + {"rx_ovrsz_frames", offsetof(struct rx_port_stats, + rx_ovrsz_frames)}, + {"rx_jbr_frames", offsetof(struct rx_port_stats, + rx_jbr_frames)}, + {"rx_mtu_err_frames", offsetof(struct rx_port_stats, + rx_mtu_err_frames)}, + {"rx_match_crc_frames", offsetof(struct rx_port_stats, + rx_match_crc_frames)}, + {"rx_promiscuous_frames", offsetof(struct rx_port_stats, + rx_promiscuous_frames)}, + {"rx_tagged_frames", offsetof(struct rx_port_stats, + rx_tagged_frames)}, + {"rx_double_tagged_frames", offsetof(struct rx_port_stats, + rx_double_tagged_frames)}, + {"rx_trunc_frames", offsetof(struct rx_port_stats, + rx_trunc_frames)}, + {"rx_good_frames", offsetof(struct rx_port_stats, + rx_good_frames)}, + {"rx_sch_crc_err_frames", offsetof(struct rx_port_stats, + rx_sch_crc_err_frames)}, + {"rx_undrsz_frames", offsetof(struct rx_port_stats, + rx_undrsz_frames)}, + {"rx_frag_frames", offsetof(struct rx_port_stats, + rx_frag_frames)}, + {"rx_eee_lpi_events", offsetof(struct rx_port_stats, + rx_eee_lpi_events)}, + {"rx_eee_lpi_duration", offsetof(struct rx_port_stats, + rx_eee_lpi_duration)}, + {"rx_llfc_physical_msgs", offsetof(struct rx_port_stats, + rx_llfc_physical_msgs)}, + {"rx_llfc_logical_msgs", offsetof(struct rx_port_stats, + rx_llfc_logical_msgs)}, + {"rx_llfc_msgs_with_crc_err", offsetof(struct rx_port_stats, + rx_llfc_msgs_with_crc_err)}, + {"rx_hcfc_msgs", offsetof(struct rx_port_stats, + rx_hcfc_msgs)}, + {"rx_hcfc_msgs_with_crc_err", offsetof(struct rx_port_stats, + rx_hcfc_msgs_with_crc_err)}, + {"rx_bytes", offsetof(struct rx_port_stats, + rx_bytes)}, + {"rx_runt_bytes", offsetof(struct rx_port_stats, + rx_runt_bytes)}, + {"rx_runt_frames", offsetof(struct rx_port_stats, + rx_runt_frames)}, + {"rx_pfc_xon2xoff_frames_pri0", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri0)}, + {"rx_pfc_xon2xoff_frames_pri1", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri1)}, + {"rx_pfc_xon2xoff_frames_pri2", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri2)}, + {"rx_pfc_xon2xoff_frames_pri3", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri3)}, + {"rx_pfc_xon2xoff_frames_pri4", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri4)}, + {"rx_pfc_xon2xoff_frames_pri5", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri5)}, + {"rx_pfc_xon2xoff_frames_pri6", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri6)}, + {"rx_pfc_xon2xoff_frames_pri7", offsetof(struct rx_port_stats, + rx_pfc_xon2xoff_frames_pri7)}, + {"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri0)}, + {"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri1)}, + {"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri2)}, + {"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri3)}, + {"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri4)}, + {"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri5)}, + {"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri6)}, + {"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats, + rx_pfc_ena_frames_pri7)}, + {"rx_stat_discard", offsetof(struct rx_port_stats, + rx_stat_discard)}, + {"rx_stat_err", offsetof(struct rx_port_stats, + rx_stat_err)}, +}; + +static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = { + {"tx_64b_frames", offsetof(struct tx_port_stats, + tx_64b_frames)}, + {"tx_65b_127b_frames", offsetof(struct tx_port_stats, + tx_65b_127b_frames)}, + {"tx_128b_255b_frames", offsetof(struct tx_port_stats, + tx_128b_255b_frames)}, + {"tx_256b_511b_frames", offsetof(struct tx_port_stats, + tx_256b_511b_frames)}, + {"tx_512b_1023b_frames", offsetof(struct tx_port_stats, + tx_512b_1023b_frames)}, + {"tx_1024b_1518b_frames", offsetof(struct tx_port_stats, + tx_1024b_1518b_frames)}, + {"tx_good_vlan_frames", offsetof(struct tx_port_stats, + tx_good_vlan_frames)}, + {"tx_1519b_2047b_frames", offsetof(struct tx_port_stats, + tx_1519b_2047b_frames)}, + {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats, + tx_2048b_4095b_frames)}, + {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats, + tx_4096b_9216b_frames)}, + {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats, + tx_9217b_16383b_frames)}, + {"tx_good_frames", offsetof(struct tx_port_stats, + tx_good_frames)}, + {"tx_total_frames", offsetof(struct tx_port_stats, + tx_total_frames)}, + {"tx_ucast_frames", offsetof(struct tx_port_stats, + tx_ucast_frames)}, + {"tx_mcast_frames", offsetof(struct tx_port_stats, + tx_mcast_frames)}, + {"tx_bcast_frames", offsetof(struct tx_port_stats, + tx_bcast_frames)}, + {"tx_pause_frames", offsetof(struct tx_port_stats, + tx_pause_frames)}, + {"tx_pfc_frames", offsetof(struct tx_port_stats, + tx_pfc_frames)}, + {"tx_jabber_frames", offsetof(struct tx_port_stats, + tx_jabber_frames)}, + {"tx_fcs_err_frames", offsetof(struct tx_port_stats, + tx_fcs_err_frames)}, + {"tx_control_frames", offsetof(struct tx_port_stats, + tx_control_frames)}, + {"tx_oversz_frames", offsetof(struct tx_port_stats, + tx_oversz_frames)}, + {"tx_single_dfrl_frames", offsetof(struct tx_port_stats, + tx_single_dfrl_frames)}, + {"tx_multi_dfrl_frames", offsetof(struct tx_port_stats, + tx_multi_dfrl_frames)}, + {"tx_single_coll_frames", offsetof(struct tx_port_stats, + tx_single_coll_frames)}, + {"tx_multi_coll_frames", offsetof(struct tx_port_stats, + tx_multi_coll_frames)}, + {"tx_late_coll_frames", offsetof(struct tx_port_stats, + tx_late_coll_frames)}, + {"tx_excessive_coll_frames", offsetof(struct tx_port_stats, + tx_excessive_coll_frames)}, + {"tx_frag_frames", offsetof(struct tx_port_stats, + tx_frag_frames)}, + {"tx_err", offsetof(struct tx_port_stats, + tx_err)}, + {"tx_tagged_frames", offsetof(struct tx_port_stats, + tx_tagged_frames)}, + {"tx_dbl_tagged_frames", offsetof(struct tx_port_stats, + tx_dbl_tagged_frames)}, + {"tx_runt_frames", offsetof(struct tx_port_stats, + tx_runt_frames)}, + {"tx_fifo_underruns", offsetof(struct tx_port_stats, + tx_fifo_underruns)}, + {"tx_eee_lpi_events", offsetof(struct tx_port_stats, + tx_eee_lpi_events)}, + {"tx_eee_lpi_duration", offsetof(struct tx_port_stats, + tx_eee_lpi_duration)}, + {"tx_total_collisions", offsetof(struct tx_port_stats, + tx_total_collisions)}, + {"tx_bytes", offsetof(struct tx_port_stats, + tx_bytes)}, + {"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri0)}, + {"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri1)}, + {"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri2)}, + {"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri3)}, + {"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri4)}, + {"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri5)}, + {"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri6)}, + {"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats, + tx_pfc_ena_frames_pri7)}, + {"tx_llfc_logical_msgs", offsetof(struct tx_port_stats, + tx_llfc_logical_msgs)}, + {"tx_hcfc_msgs", offsetof(struct tx_port_stats, + tx_hcfc_msgs)}, + {"tx_xthol_frames", offsetof(struct tx_port_stats, + tx_xthol_frames)}, + {"tx_stat_discard", offsetof(struct tx_port_stats, + tx_stat_discard)}, + {"tx_stat_error", offsetof(struct tx_port_stats, + tx_stat_error)}, +}; + +static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { + {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_ucast_pkts)}, + {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_mcast_pkts)}, + {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + tx_bcast_pkts)}, + {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + tx_discard_pkts)}, + {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + tx_drop_pkts)}, + {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_ucast_bytes)}, + {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_mcast_bytes)}, + {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + tx_bcast_bytes)}, + {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_ucast_pkts)}, + {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_mcast_pkts)}, + {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, + rx_bcast_pkts)}, + {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + rx_discard_pkts)}, + {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output, + rx_drop_pkts)}, + {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_ucast_bytes)}, + {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_mcast_bytes)}, + {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output, + rx_bcast_bytes)}, + {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output, + rx_agg_pkts)}, + {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output, + rx_agg_bytes)}, + {"rx_agg_events", offsetof(struct hwrm_func_qstats_output, + rx_agg_events)}, + {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output, + rx_agg_aborts)}, +}; + + +static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = { + {"link_down_events", offsetof(struct rx_port_stats_ext, + link_down_events)}, + {"continuous_pause_events", offsetof(struct rx_port_stats_ext, + continuous_pause_events)}, + {"resume_pause_events", offsetof(struct rx_port_stats_ext, + resume_pause_events)}, + {"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext, + continuous_roce_pause_events)}, + {"resume_roce_pause_events", offsetof(struct rx_port_stats_ext, + resume_roce_pause_events)}, + {"rx_bytes_cos0", offsetof(struct rx_port_stats_ext, + rx_bytes_cos0)}, + {"rx_bytes_cos1", offsetof(struct rx_port_stats_ext, + rx_bytes_cos1)}, + {"rx_bytes_cos2", offsetof(struct rx_port_stats_ext, + rx_bytes_cos2)}, + {"rx_bytes_cos3", offsetof(struct rx_port_stats_ext, + rx_bytes_cos3)}, + {"rx_bytes_cos4", offsetof(struct rx_port_stats_ext, + rx_bytes_cos4)}, + {"rx_bytes_cos5", offsetof(struct rx_port_stats_ext, + rx_bytes_cos5)}, + {"rx_bytes_cos6", offsetof(struct rx_port_stats_ext, + rx_bytes_cos6)}, + {"rx_bytes_cos7", offsetof(struct rx_port_stats_ext, + rx_bytes_cos7)}, + {"rx_packets_cos0", offsetof(struct rx_port_stats_ext, + rx_packets_cos0)}, + {"rx_packets_cos1", offsetof(struct rx_port_stats_ext, + rx_packets_cos1)}, + {"rx_packets_cos2", offsetof(struct rx_port_stats_ext, + rx_packets_cos2)}, + {"rx_packets_cos3", offsetof(struct rx_port_stats_ext, + rx_packets_cos3)}, + {"rx_packets_cos4", offsetof(struct rx_port_stats_ext, + rx_packets_cos4)}, + {"rx_packets_cos5", offsetof(struct rx_port_stats_ext, + rx_packets_cos5)}, + {"rx_packets_cos6", offsetof(struct rx_port_stats_ext, + rx_packets_cos6)}, + {"rx_packets_cos7", offsetof(struct rx_port_stats_ext, + rx_packets_cos7)}, + {"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri0_rx_duration_us)}, + {"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri0_rx_transitions)}, + {"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri1_rx_duration_us)}, + {"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri1_rx_transitions)}, + {"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri2_rx_duration_us)}, + {"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri2_rx_transitions)}, + {"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri3_rx_duration_us)}, + {"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri3_rx_transitions)}, + {"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri4_rx_duration_us)}, + {"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri4_rx_transitions)}, + {"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri5_rx_duration_us)}, + {"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri5_rx_transitions)}, + {"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri6_rx_duration_us)}, + {"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri6_rx_transitions)}, + {"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext, + pfc_pri7_rx_duration_us)}, + {"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext, + pfc_pri7_rx_transitions)}, + {"rx_bits", offsetof(struct rx_port_stats_ext, + rx_bits)}, + {"rx_buffer_passed_threshold", offsetof(struct rx_port_stats_ext, + rx_buffer_passed_threshold)}, + {"rx_pcs_symbol_err", offsetof(struct rx_port_stats_ext, + rx_pcs_symbol_err)}, + {"rx_corrected_bits", offsetof(struct rx_port_stats_ext, + rx_corrected_bits)}, + {"rx_discard_bytes_cos0", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos0)}, + {"rx_discard_bytes_cos1", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos1)}, + {"rx_discard_bytes_cos2", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos2)}, + {"rx_discard_bytes_cos3", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos3)}, + {"rx_discard_bytes_cos4", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos4)}, + {"rx_discard_bytes_cos5", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos5)}, + {"rx_discard_bytes_cos6", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos6)}, + {"rx_discard_bytes_cos7", offsetof(struct rx_port_stats_ext, + rx_discard_bytes_cos7)}, + {"rx_discard_packets_cos0", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos0)}, + {"rx_discard_packets_cos1", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos1)}, + {"rx_discard_packets_cos2", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos2)}, + {"rx_discard_packets_cos3", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos3)}, + {"rx_discard_packets_cos4", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos4)}, + {"rx_discard_packets_cos5", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos5)}, + {"rx_discard_packets_cos6", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos6)}, + {"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext, + rx_discard_packets_cos7)}, +}; + +static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = { + {"tx_bytes_cos0", offsetof(struct tx_port_stats_ext, + tx_bytes_cos0)}, + {"tx_bytes_cos1", offsetof(struct tx_port_stats_ext, + tx_bytes_cos1)}, + {"tx_bytes_cos2", offsetof(struct tx_port_stats_ext, + tx_bytes_cos2)}, + {"tx_bytes_cos3", offsetof(struct tx_port_stats_ext, + tx_bytes_cos3)}, + {"tx_bytes_cos4", offsetof(struct tx_port_stats_ext, + tx_bytes_cos4)}, + {"tx_bytes_cos5", offsetof(struct tx_port_stats_ext, + tx_bytes_cos5)}, + {"tx_bytes_cos6", offsetof(struct tx_port_stats_ext, + tx_bytes_cos6)}, + {"tx_bytes_cos7", offsetof(struct tx_port_stats_ext, + tx_bytes_cos7)}, + {"tx_packets_cos0", offsetof(struct tx_port_stats_ext, + tx_packets_cos0)}, + {"tx_packets_cos1", offsetof(struct tx_port_stats_ext, + tx_packets_cos1)}, + {"tx_packets_cos2", offsetof(struct tx_port_stats_ext, + tx_packets_cos2)}, + {"tx_packets_cos3", offsetof(struct tx_port_stats_ext, + tx_packets_cos3)}, + {"tx_packets_cos4", offsetof(struct tx_port_stats_ext, + tx_packets_cos4)}, + {"tx_packets_cos5", offsetof(struct tx_port_stats_ext, + tx_packets_cos5)}, + {"tx_packets_cos6", offsetof(struct tx_port_stats_ext, + tx_packets_cos6)}, + {"tx_packets_cos7", offsetof(struct tx_port_stats_ext, + tx_packets_cos7)}, + {"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri0_tx_duration_us)}, + {"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri0_tx_transitions)}, + {"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri1_tx_duration_us)}, + {"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri1_tx_transitions)}, + {"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri2_tx_duration_us)}, + {"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri2_tx_transitions)}, + {"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri3_tx_duration_us)}, + {"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri3_tx_transitions)}, + {"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri4_tx_duration_us)}, + {"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri4_tx_transitions)}, + {"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri5_tx_duration_us)}, + {"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri5_tx_transitions)}, + {"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri6_tx_duration_us)}, + {"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri6_tx_transitions)}, + {"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext, + pfc_pri7_tx_duration_us)}, + {"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext, + pfc_pri7_tx_transitions)}, +}; + +/* + * Statistics functions + */ + +void bnxt_free_stats(struct bnxt *bp) +{ + int i; + + for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + + bnxt_free_txq_stats(txq); + } + for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + bnxt_free_rxq_stats(rxq); + } +} + +int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *bnxt_stats) +{ + int rc = 0; + unsigned int i; + struct bnxt *bp = eth_dev->data->dev_private; + unsigned int num_q_stats; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!eth_dev->data->dev_started) + return -EIO; + + num_q_stats = RTE_MIN(bp->rx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 1); + if (unlikely(rc)) + return rc; + bnxt_stats->rx_nombuf += + rte_atomic64_read(&rxq->rx_mbuf_alloc_fail); + } + + num_q_stats = RTE_MIN(bp->tx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 0); + if (unlikely(rc)) + return rc; + } + + rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats, NULL); + return rc; +} + +int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + unsigned int i; + int ret; + + ret = is_bnxt_in_error(bp); + if (ret) + return ret; + + if (!eth_dev->data->dev_started) { + PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); + return -EINVAL; + } + + ret = bnxt_clear_all_hwrm_stat_ctxs(bp); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail); + } + + return ret; +} + +int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n) +{ + struct bnxt *bp = eth_dev->data->dev_private; + unsigned int count, i; + unsigned int rx_port_stats_ext_cnt; + unsigned int tx_port_stats_ext_cnt; + unsigned int stat_size = sizeof(uint64_t); + struct hwrm_func_qstats_output func_qstats = {0}; + unsigned int stat_count; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (xstats == NULL) + return 0; + + memset(xstats, 0, sizeof(*xstats)); + + bnxt_hwrm_func_qstats(bp, 0xffff, NULL, &func_qstats); + bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_ext_port_qstats(bp); + rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings), + (bp->fw_rx_port_stats_ext_size / + stat_size)); + tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings), + (bp->fw_tx_port_stats_ext_size / + stat_size)); + + count = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + + stat_count = count; + + if (n < count) + return count; + + count = 0; + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats; + xstats[count].id = count; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)rx_stats + + bnxt_rx_stats_strings[i].offset)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats; + xstats[count].id = count; + xstats[count].value = rte_le_to_cpu_64( + *(uint64_t *)((char *)tx_stats + + bnxt_tx_stats_strings[i].offset)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) { + xstats[count].id = count; + xstats[count].value = + rte_le_to_cpu_64(((uint64_t *)&func_qstats)[i]); + count++; + } + + + for (i = 0; i < rx_port_stats_ext_cnt; i++) { + uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext; + + xstats[count].value = rte_le_to_cpu_64 + (*(uint64_t *)((char *)rx_stats_ext + + bnxt_rx_ext_stats_strings[i].offset)); + + count++; + } + + for (i = 0; i < tx_port_stats_ext_cnt; i++) { + uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext; + + xstats[count].value = rte_le_to_cpu_64 + (*(uint64_t *)((char *)tx_stats_ext + + bnxt_tx_ext_stats_strings[i].offset)); + count++; + } + + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + BNXT_FLOW_XSTATS_EN(bp)) { + int j; + + i = 0; + for (j = 0; j < bp->max_vnics; j++) { + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + + vnic = &bp->vnic_info[j]; + if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + if (STAILQ_EMPTY(&vnic->flow_list)) + continue; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + if (!flow || !flow->filter) + continue; + + filter = flow->filter; + xstats[count].id = count; + xstats[count].value = + filter->hw_stats.bytes; + count++; + xstats[count].id = count; + xstats[count].value = + filter->hw_stats.packets; + count++; + if (++i > bp->max_l2_ctx) + break; + } + if (i > bp->max_l2_ctx) + break; + } + } + + return stat_count; +} + +int bnxt_flow_stats_cnt(struct bnxt *bp) +{ + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + BNXT_FLOW_XSTATS_EN(bp)) { + struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx]; + struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx]; + + return RTE_DIM(flow_bytes) + RTE_DIM(flow_pkts); + } + + return 0; +} + +int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + unsigned int i, count = 0; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (xstats_names != NULL) { + count = 0; + + for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_rx_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_tx_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_func_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_rx_ext_stats_strings[i].name, + sizeof(xstats_names[count].name)); + + count++; + } + + for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) { + strlcpy(xstats_names[count].name, + bnxt_tx_ext_stats_strings[i].name, + sizeof(xstats_names[count].name)); + + count++; + } + + if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && + bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && + BNXT_FLOW_XSTATS_EN(bp)) { + for (i = 0; i < bp->max_l2_ctx; i++) { + char buf[RTE_ETH_XSTATS_NAME_SIZE]; + + sprintf(buf, "flow_%d_bytes", i); + strlcpy(xstats_names[count].name, buf, + sizeof(xstats_names[count].name)); + count++; + + sprintf(buf, "flow_%d_packets", i); + strlcpy(xstats_names[count].name, buf, + sizeof(xstats_names[count].name)); + + count++; + } + } + } + + return stat_cnt; +} + +int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = eth_dev->data->dev_private; + int ret; + + ret = is_bnxt_in_error(bp); + if (ret) + return ret; + + if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) || + !(bp->flags & BNXT_FLAG_PORT_STATS)) { + PMD_DRV_LOG(ERR, "Operation not supported\n"); + return -ENOTSUP; + } + + ret = bnxt_hwrm_port_clr_stats(bp); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n", + strerror(-ret)); + + return ret; +} + +int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit) +{ + struct bnxt *bp = dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + struct rte_eth_xstat xstats[stat_cnt]; + uint64_t values_copy[stat_cnt]; + uint16_t i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!ids) + return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt); + + bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt); + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + values[i] = values_copy[ids[i]]; + } + return stat_cnt; +} + +int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit) +{ + struct bnxt *bp = dev->data->dev_private; + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + + RTE_DIM(bnxt_func_stats_strings) + + RTE_DIM(bnxt_rx_ext_stats_strings) + + RTE_DIM(bnxt_tx_ext_stats_strings) + + bnxt_flow_stats_cnt(bp); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + uint16_t i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (!ids) + return bnxt_dev_xstats_get_names_op(dev, xstats_names, + stat_cnt); + bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL, + stat_cnt); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return stat_cnt; +} + +/* Update the input context memory with the flow counter IDs + * of the flows that we are interested in. + * Also, update the output tables with the current local values + * since that is what will be used by FW to accumulate + */ +static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl, + uint64_t *out_tbl, + struct bnxt_filter_info *filter, + uint32_t *ptbl_cnt) +{ + uint32_t in_tbl_cnt = *ptbl_cnt; + + in_tbl[in_tbl_cnt] = filter->flow_id; + out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets; + out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes; + in_tbl_cnt++; + *ptbl_cnt = in_tbl_cnt; +} + +/* Post issuing counter_qstats cmd, update the driver's local stat + * entries with the values DMA-ed by FW in the output table + */ +static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter, + uint64_t *out_tbl, + uint32_t out_tbl_idx) +{ + filter->hw_stats.packets = out_tbl[2 * out_tbl_idx]; + filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1]; +} + +static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr, + struct bnxt_filter_info *en_tbl[], + uint16_t in_flow_cnt) +{ + uint32_t *in_rx_tbl; + uint64_t *out_rx_tbl; + uint32_t in_rx_tbl_cnt = 0; + uint32_t out_rx_tbl_cnt = 0; + int i, rc = 0; + + in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va; + out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va; + + for (i = 0; i < in_flow_cnt; i++) { + if (!en_tbl[i]) + continue; + + /* Currently only ingress/Rx flows are supported anyway. */ + bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl, + en_tbl[i], &in_rx_tbl_cnt); + } + + /* Currently only ingress/Rx flows are supported */ + if (in_rx_tbl_cnt) { + rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr, + in_rx_tbl_cnt); + if (rc) + return rc; + } + + for (i = 0; i < in_flow_cnt; i++) { + if (!en_tbl[i]) + continue; + + /* Currently only ingress/Rx flows are supported */ + bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl, + out_rx_tbl_cnt); + out_rx_tbl_cnt++; + } + + return rc; +} + +/* Walks through the list which has all the flows + * requesting for explicit flow counters. + */ +int bnxt_flow_stats_req(struct bnxt *bp) +{ + int i; + int rc = 0; + struct rte_flow *flow; + uint16_t in_flow_tbl_cnt = 0; + struct bnxt_vnic_info *vnic = NULL; + struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc]; + uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC; + + bnxt_acquire_flow_lock(bp); + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) + continue; + + if (STAILQ_EMPTY(&vnic->flow_list)) + continue; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + if (!flow || !flow->filter) + continue; + + valid_en_tbl[in_flow_tbl_cnt++] = flow->filter; + if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) { + rc = bnxt_update_fc_tbl(bp, counter_type, + valid_en_tbl, + in_flow_tbl_cnt); + if (rc) + goto err; + in_flow_tbl_cnt = 0; + continue; + } + } + } + + if (!in_flow_tbl_cnt) + goto out; + + rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl, + in_flow_tbl_cnt); + if (!rc) { + bnxt_release_flow_lock(bp); + return 0; + } + +err: + /* If cmd fails once, no need of + * invoking again every second + */ + bnxt_release_flow_lock(bp); + bnxt_cancel_fc_thread(bp); +out: + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h new file mode 100644 index 000000000..3cf2a1b82 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_stats.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_STATS_H_ +#define _BNXT_STATS_H_ + +#include + +void bnxt_free_stats(struct bnxt *bp); +int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *bnxt_stats); +int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev); +int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit); +int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n); +int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev); +int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit); +int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit); + +struct bnxt_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c new file mode 100644 index 000000000..2d7645eeb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.c @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include + +#include "bnxt.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" + +/* + * TX Queues + */ + +void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) +{ + if (txq && txq->cp_ring && txq->cp_ring->hw_stats) + txq->cp_ring->hw_stats = NULL; +} + +static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) +{ + struct bnxt_sw_tx_bd *sw_ring; + uint16_t i; + + if (!txq) + return; + + sw_ring = txq->tx_ring->tx_buf_ring; + if (sw_ring) { + for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } +} + +void bnxt_free_tx_mbufs(struct bnxt *bp) +{ + struct bnxt_tx_queue *txq; + int i; + + for (i = 0; i < (int)bp->tx_nr_rings; i++) { + txq = bp->tx_queues[i]; + bnxt_tx_queue_release_mbufs(txq); + } +} + +void bnxt_tx_queue_release_op(void *tx_queue) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + + if (txq) { + if (is_bnxt_in_error(txq->bp)) + return; + + /* Free TX ring hardware descriptors */ + bnxt_tx_queue_release_mbufs(txq); + bnxt_free_ring(txq->tx_ring->tx_ring_struct); + + /* Free TX completion ring hardware descriptors */ + bnxt_free_ring(txq->cp_ring->cp_ring_struct); + + bnxt_free_txq_stats(txq); + rte_memzone_free(txq->mz); + txq->mz = NULL; + + rte_free(txq->free); + rte_free(txq); + } +} + +int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct bnxt *bp = eth_dev->data->dev_private; + struct bnxt_tx_queue *txq; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + if (queue_idx >= BNXT_MAX_RINGS(bp)) { + PMD_DRV_LOG(ERR, + "Cannot create Tx ring %d. Only %d rings available\n", + queue_idx, bp->max_tx_rings); + return -EINVAL; + } + + if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) { + PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc); + rc = -EINVAL; + goto out; + } + + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[queue_idx]; + if (txq) { + bnxt_tx_queue_release_op(txq); + txq = NULL; + } + } + txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq) { + PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!"); + rc = -ENOMEM; + goto out; + } + + txq->free = rte_zmalloc_socket(NULL, + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq->free) { + PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!"); + rte_free(txq); + rc = -ENOMEM; + goto out; + } + txq->bp = bp; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_conf->tx_free_thresh; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + rc = bnxt_init_tx_ring_struct(txq, socket_id); + if (rc) + goto out; + + txq->queue_id = queue_idx; + txq->port_id = eth_dev->data->port_id; + + /* Allocate TX ring hardware descriptors */ + if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL, + "txr")) { + PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!"); + bnxt_tx_queue_release_op(txq); + rc = -ENOMEM; + goto out; + } + + if (bnxt_init_one_tx_ring(txq)) { + PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!"); + bnxt_tx_queue_release_op(txq); + rc = -ENOMEM; + goto out; + } + + eth_dev->data->tx_queues[queue_idx] = txq; + + if (txq->tx_deferred_start) + txq->tx_started = false; + else + txq->tx_started = true; +out: + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h new file mode 100644 index 000000000..37a3f9539 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txq.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_TXQ_H_ +#define _BNXT_TXQ_H_ + +struct bnxt_tx_ring_info; +struct bnxt_cp_ring_info; +struct bnxt_tx_queue { + uint16_t nb_tx_desc; /* number of TX descriptors */ + uint16_t tx_free_thresh;/* minimum TX before freeing */ + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t tx_next_dd; /* next desc to scan for DD bit */ + uint16_t tx_next_rs; /* next desc to set RS bit */ + uint16_t queue_id; /* TX queue index */ + uint16_t reg_idx; /* TX queue register index */ + uint16_t port_id; /* Device port identifier */ + uint8_t pthresh; /* Prefetch threshold register */ + uint8_t hthresh; /* Host threshold register */ + uint8_t wthresh; /* Write-back threshold reg */ + uint32_t ctx_curr; /* Hardware context states */ + uint8_t tx_deferred_start; /* not in global dev start */ + uint8_t tx_started; /* TX queue is started */ + + struct bnxt *bp; + int index; + int tx_wake_thresh; + struct bnxt_tx_ring_info *tx_ring; + + unsigned int cp_nr_rings; + struct bnxt_cp_ring_info *cp_ring; + const struct rte_memzone *mz; + struct rte_mbuf **free; +}; + +void bnxt_free_txq_stats(struct bnxt_tx_queue *txq); +void bnxt_free_tx_mbufs(struct bnxt *bp); +void bnxt_tx_queue_release_op(void *tx_queue); +int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c new file mode 100644 index 000000000..16021407e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.c @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include + +#include "bnxt.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "hsi_struct_def_dpdk.h" +#include + +/* + * TX Ring handling + */ + +void bnxt_free_tx_rings(struct bnxt *bp) +{ + int i; + + for (i = 0; i < (int)bp->tx_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + + if (!txq) + continue; + + bnxt_free_ring(txq->tx_ring->tx_ring_struct); + rte_free(txq->tx_ring->tx_ring_struct); + rte_free(txq->tx_ring); + + bnxt_free_ring(txq->cp_ring->cp_ring_struct); + rte_free(txq->cp_ring->cp_ring_struct); + rte_free(txq->cp_ring); + + rte_free(txq); + bp->tx_queues[i] = NULL; + } +} + +int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + + txq->tx_wake_thresh = ring->ring_size / 2; + ring->fw_ring_id = INVALID_HW_RING_ID; + + return 0; +} + +int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id) +{ + struct bnxt_cp_ring_info *cpr; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring *ring; + + txr = rte_zmalloc_socket("bnxt_tx_ring", + sizeof(struct bnxt_tx_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (txr == NULL) + return -ENOMEM; + txq->tx_ring = txr; + + ring = rte_zmalloc_socket("bnxt_tx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + txr->tx_ring_struct = ring; + ring->ring_size = rte_align32pow2(txq->nb_tx_desc); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)txr->tx_desc_ring; + ring->bd_dma = txr->tx_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd); + ring->vmem = (void **)&txr->tx_buf_ring; + + cpr = rte_zmalloc_socket("bnxt_tx_ring", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + txq->cp_ring = cpr; + + ring = rte_zmalloc_socket("bnxt_tx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + cpr->cp_ring_struct = ring; + ring->ring_size = txr->tx_ring_struct->ring_size; + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->vmem_size = 0; + ring->vmem = NULL; + + return 0; +} + +static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, + struct bnxt_tx_queue *txq, + uint16_t *coal_pkts, + struct tx_bd_long **last_txbd) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + uint32_t outer_tpid_bd = 0; + struct tx_bd_long *txbd; + struct tx_bd_long_hi *txbd1 = NULL; + uint32_t vlan_tag_flags, cfa_action; + bool long_bd = false; + unsigned short nr_bds = 0; + struct rte_mbuf *m_seg; + struct bnxt_sw_tx_bd *tx_buf; + static const uint32_t lhint_arr[4] = { + TX_BD_LONG_FLAGS_LHINT_LT512, + TX_BD_LONG_FLAGS_LHINT_LT1K, + TX_BD_LONG_FLAGS_LHINT_LT2K, + TX_BD_LONG_FLAGS_LHINT_LT2K + }; + + if (unlikely(is_bnxt_in_error(txq->bp))) + return -EIO; + + if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | + PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | + PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN | + PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST | + PKT_TX_QINQ_PKT)) + long_bd = true; + + nr_bds = long_bd + tx_pkt->nb_segs; + if (unlikely(bnxt_tx_avail(txq) < nr_bds)) + return -ENOMEM; + + /* Check if number of Tx descriptors is above HW limit */ + if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) { + PMD_DRV_LOG(ERR, + "Num descriptors %d exceeds HW limit\n", nr_bds); + return -ENOSPC; + } + + /* If packet length is less than minimum packet size, pad it */ + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) { + uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt); + char *seg = rte_pktmbuf_append(tx_pkt, pad); + + if (!seg) { + PMD_DRV_LOG(ERR, + "Failed to pad mbuf by %d bytes\n", + pad); + return -ENOMEM; + } + + /* Note: data_len, pkt len are updated in rte_pktmbuf_append */ + memset(seg, 0, pad); + } + + /* Check non zero data_len */ + RTE_VERIFY(tx_pkt->data_len); + + tx_buf = &txr->tx_buf_ring[txr->tx_prod]; + tx_buf->mbuf = tx_pkt; + tx_buf->nr_bds = nr_bds; + + txbd = &txr->tx_desc_ring[txr->tx_prod]; + txbd->opaque = *coal_pkts; + txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; + txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW; + txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL; + txbd->len = tx_pkt->data_len; + if (tx_pkt->pkt_len >= 2014) + txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; + else + txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9]; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf)); + *last_txbd = txbd; + + if (long_bd) { + txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; + vlan_tag_flags = 0; + cfa_action = 0; + /* HW can accelerate only outer vlan in QinQ mode */ + if (tx_buf->mbuf->ol_flags & PKT_TX_QINQ_PKT) { + vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG | + tx_buf->mbuf->vlan_tci_outer; + outer_tpid_bd = txq->bp->outer_tpid_bd & + BNXT_OUTER_TPID_BD_MASK; + vlan_tag_flags |= outer_tpid_bd; + } else if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + /* shurd: Should this mask at + * TX_BD_LONG_CFA_META_VLAN_VID_MASK? + */ + vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG | + tx_buf->mbuf->vlan_tci; + /* Currently supports 8021Q, 8021AD vlan offloads + * QINQ1, QINQ2, QINQ3 vlan headers are deprecated + */ + /* DPDK only supports 802.11q VLAN packets */ + vlan_tag_flags |= + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + } + + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + + txbd1 = (struct tx_bd_long_hi *) + &txr->tx_desc_ring[txr->tx_prod]; + txbd1->lflags = 0; + txbd1->cfa_meta = vlan_tag_flags; + txbd1->cfa_action = cfa_action; + + if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) { + uint16_t hdr_size; + + /* TSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO | + TX_BD_LONG_LFLAGS_T_IPID; + hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_pkt->outer_l2_len + + tx_pkt->outer_l3_len : 0; + /* The hdr_size is multiple of 16bit units not 8bit. + * Hence divide by 2. + */ + txbd1->hdr_size = hdr_size >> 1; + txbd1->mss = tx_pkt->tso_segsz; + RTE_VERIFY(txbd1->mss); + + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) == + PKT_TX_OIP_IIP_TCP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) == + PKT_TX_OIP_IIP_TCP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) == + PKT_TX_OIP_IIP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) == + PKT_TX_IIP_TCP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) == + PKT_TX_IIP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) == + PKT_TX_IIP_TCP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) == + PKT_TX_OIP_TCP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) == + PKT_TX_OIP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) == + PKT_TX_OIP_TCP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) == + PKT_TX_OIP_IIP_CKSUM) { + /* Outer IP, Inner IP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) == + PKT_TX_TCP_UDP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) == + PKT_TX_TCP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) == + PKT_TX_UDP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) == + PKT_TX_IP_CKSUM) { + /* IP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) == + PKT_TX_OUTER_IP_CKSUM) { + /* IP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) == + PKT_TX_IEEE1588_TMST) { + /* PTP */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP; + txbd1->mss = 0; + } + } else { + txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; + } + + m_seg = tx_pkt->next; + while (m_seg) { + /* Check non zero data_len */ + RTE_VERIFY(m_seg->data_len); + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + tx_buf = &txr->tx_buf_ring[txr->tx_prod]; + tx_buf->mbuf = m_seg; + + txbd = &txr->tx_desc_ring[txr->tx_prod]; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg)); + txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; + txbd->len = m_seg->data_len; + + m_seg = m_seg->next; + } + + txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END; + + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + + return 0; +} + +static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct rte_mempool *pool = NULL; + struct rte_mbuf **free = txq->free; + uint16_t cons = txr->tx_cons; + unsigned int blk = 0; + int i, j; + + for (i = 0; i < nr_pkts; i++) { + struct rte_mbuf *mbuf; + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons]; + unsigned short nr_bds = tx_buf->nr_bds; + + for (j = 0; j < nr_bds; j++) { + mbuf = tx_buf->mbuf; + tx_buf->mbuf = NULL; + cons = RING_NEXT(txr->tx_ring_struct, cons); + tx_buf = &txr->tx_buf_ring[cons]; + if (!mbuf) /* long_bd's tx_buf ? */ + continue; + + mbuf = rte_pktmbuf_prefree_seg(mbuf); + if (unlikely(!mbuf)) + continue; + + /* EW - no need to unmap DMA memory? */ + + if (likely(mbuf->pool == pool)) { + /* Add mbuf to the bulk free array */ + free[blk++] = mbuf; + } else { + /* Found an mbuf from a different pool. Free + * mbufs accumulated so far to the previous + * pool + */ + if (likely(pool != NULL)) + rte_mempool_put_bulk(pool, + (void *)free, + blk); + + /* Start accumulating mbufs in a new pool */ + free[0] = mbuf; + pool = mbuf->pool; + blk = 1; + } + } + } + if (blk) + rte_mempool_put_bulk(pool, (void *)free, blk); + + txr->tx_cons = cons; +} + +static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) +{ + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + uint32_t nb_tx_pkts = 0; + struct tx_cmpl *txcmp; + struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring; + struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; + uint32_t ring_mask = cp_ring_struct->ring_mask; + uint32_t opaque = 0; + + if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh) + return 0; + + do { + cons = RING_CMPL(ring_mask, raw_cons); + txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; + rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) & + ring_mask]); + + if (!CMPL_VALID(txcmp, cpr->valid)) + break; + opaque = rte_cpu_to_le_32(txcmp->opaque); + NEXT_CMPL(cpr, cons, cpr->valid, 1); + rte_prefetch0(&cp_desc_ring[cons]); + + if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) + nb_tx_pkts += opaque; + else + RTE_LOG_DP(ERR, PMD, + "Unhandled CMP type %02x\n", + CMP_TYPE(txcmp)); + raw_cons = cons; + } while (nb_tx_pkts < ring_mask); + + if (nb_tx_pkts) { + bnxt_tx_cmp(txq, nb_tx_pkts); + cpr->cp_raw_cons = raw_cons; + bnxt_db_cq(cpr); + } + + return nb_tx_pkts; +} + +uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int rc; + uint16_t nb_tx_pkts = 0; + uint16_t coal_pkts = 0; + struct bnxt_tx_queue *txq = tx_queue; + struct tx_bd_long *last_txbd = NULL; + + /* Handle TX completions */ + bnxt_handle_tx_cp(txq); + + /* Tx queue was stopped; wait for it to be restarted */ + if (unlikely(!txq->tx_started)) { + PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n"); + return 0; + } + + /* Handle TX burst request */ + for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) { + coal_pkts++; + rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq, + &coal_pkts, &last_txbd); + + if (unlikely(rc)) + break; + } + + if (likely(nb_tx_pkts)) { + /* Request a completion on the last packet */ + last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL; + bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod); + } + + return nb_tx_pkts; +} + +/* + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + */ +uint16_t +bnxt_dummy_xmit_pkts(void *tx_queue __rte_unused, + struct rte_mbuf **tx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + txq->tx_started = true; + PMD_DRV_LOG(DEBUG, "Tx queue started\n"); + + return 0; +} + +int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct bnxt *bp = dev->data->dev_private; + struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; + int rc = 0; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + + /* Handle TX completions */ + bnxt_handle_tx_cp(txq); + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + txq->tx_started = false; + PMD_DRV_LOG(DEBUG, "Tx queue stopped\n"); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h new file mode 100644 index 000000000..e7f43f9d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_txr.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_TXR_H_ +#define _BNXT_TXR_H_ + +#include + +#define MAX_TX_RINGS 16 +#define BNXT_TX_PUSH_THRESH 92 +#define BNXT_MAX_TSO_SEGS 32 +#define BNXT_MIN_PKT_SIZE 52 + +#define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db) + +struct bnxt_tx_ring_info { + uint16_t tx_prod; + uint16_t tx_cons; + struct bnxt_db_info tx_db; + + struct tx_bd_long *tx_desc_ring; + struct bnxt_sw_tx_bd *tx_buf_ring; + + rte_iova_t tx_desc_mapping; + +#define BNXT_DEV_STATE_CLOSING 0x1 + uint32_t dev_state; + + struct bnxt_ring *tx_ring_struct; +}; + +struct bnxt_sw_tx_bd { + struct rte_mbuf *mbuf; /* mbuf associated with TX descriptor */ + uint8_t is_gso; + unsigned short nr_bds; +}; + +static inline uint32_t bnxt_tx_bds_in_hw(struct bnxt_tx_queue *txq) +{ + return ((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & + txq->tx_ring->tx_ring_struct->ring_mask); +} + +static inline uint32_t bnxt_tx_avail(struct bnxt_tx_queue *txq) +{ + /* Tell compiler to fetch tx indices from memory. */ + rte_compiler_barrier(); + + return ((txq->tx_ring->tx_ring_struct->ring_size - + bnxt_tx_bds_in_hw(txq)) - 1); +} + +void bnxt_free_tx_rings(struct bnxt *bp); +int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); +int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); +uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +#ifdef RTE_ARCH_X86 +uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +#endif + +int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM) +#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM) + + +#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM) + +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c new file mode 100644 index 000000000..dda40af28 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.c @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include +#include + +#include "bnxt_util.h" + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (bytes[i] != 0x00) + return 0; + return 1; +} + +void bnxt_eth_hw_addr_random(uint8_t *mac_addr) +{ + rte_eth_random_addr(mac_addr); + + /* Set Organizationally Unique Identifier (OUI) prefix */ + mac_addr[0] = 0x00; + mac_addr[1] = 0x0a; + mac_addr[2] = 0xf7; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h new file mode 100644 index 000000000..a15b3a1a9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_util.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_UTIL_H_ +#define _BNXT_UTIL_H_ + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif /* BIT */ + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len); +void bnxt_eth_hw_addr_random(uint8_t *mac_addr); + +#endif /* _BNXT_UTIL_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c new file mode 100644 index 000000000..326c0d1b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.c @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include + +#include +#include + +#include "bnxt.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * VNIC Functions + */ + +void prandom_bytes(void *dest_ptr, size_t len) +{ + char *dest = (char *)dest_ptr; + uint64_t rb; + + while (len) { + rb = rte_rand(); + if (len >= 8) { + memcpy(dest, &rb, 8); + len -= 8; + dest += 8; + } else { + memcpy(dest, &rb, len); + dest += len; + len = 0; + } + } +} + +static void bnxt_init_vnics(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + uint16_t max_vnics; + int i; + + max_vnics = bp->max_vnics; + STAILQ_INIT(&bp->free_vnic_list); + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE; + vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->hash_mode = + HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT; + vnic->rx_queue_cnt = 0; + + STAILQ_INIT(&vnic->filter); + STAILQ_INIT(&vnic->flow_list); + STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next); + } +} + +struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + + /* Find the 1st unused vnic from the free_vnic_list pool*/ + vnic = STAILQ_FIRST(&bp->free_vnic_list); + if (!vnic) { + PMD_DRV_LOG(ERR, "No more free VNIC resources\n"); + return NULL; + } + STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next); + return vnic; +} + +void bnxt_free_all_vnics(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + unsigned int i; + + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next); + vnic->rx_queue_cnt = 0; + } +} + +void bnxt_free_vnic_attributes(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + unsigned int i; + + if (bp->vnic_info == NULL) + return; + + for (i = 0; i < bp->max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic->rss_table) { + /* 'Unreserve' the rss_table */ + /* N/A */ + + vnic->rss_table = NULL; + } + + if (vnic->rss_hash_key) { + /* 'Unreserve' the rss_hash_key */ + /* N/A */ + + vnic->rss_hash_key = NULL; + } + } +} + +int bnxt_alloc_vnic_attributes(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + struct rte_pci_device *pdev = bp->pdev; + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t entry_length; + size_t rss_table_size; + uint16_t max_vnics; + int i; + rte_iova_t mz_phys_addr; + + entry_length = HW_HASH_KEY_SIZE + + BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN; + + if (BNXT_CHIP_THOR(bp)) + rss_table_size = BNXT_RSS_TBL_SIZE_THOR * + 2 * sizeof(*vnic->rss_table); + else + rss_table_size = HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table); + + entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length + rss_table_size); + + max_vnics = bp->max_vnics; + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_" PCI_PRI_FMT "_vnicattr", pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve(mz_name, + entry_length * max_vnics, SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); + if (!mz) + return -ENOMEM; + } + mz_phys_addr = mz->iova; + + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + + /* Allocate rss table and hash key */ + vnic->rss_table = + (void *)((char *)mz->addr + (entry_length * i)); + memset(vnic->rss_table, -1, entry_length); + + vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i); + vnic->rss_hash_key = (void *)((char *)vnic->rss_table + + rss_table_size); + + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + + rss_table_size; + vnic->mc_list = (void *)((char *)vnic->rss_hash_key + + HW_HASH_KEY_SIZE); + vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr + + HW_HASH_KEY_SIZE; + prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE); + } + + return 0; +} + +void bnxt_free_vnic_mem(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + uint16_t max_vnics, i; + + if (bp->vnic_info == NULL) + return; + + max_vnics = bp->max_vnics; + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) { + PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n"); + /* TODO Call HWRM to free VNIC */ + } + } + + rte_free(bp->vnic_info); + bp->vnic_info = NULL; +} + +int bnxt_alloc_vnic_mem(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic_mem; + uint16_t max_vnics; + + max_vnics = bp->max_vnics; + /* Allocate memory for VNIC pool and filter pool */ + vnic_mem = rte_zmalloc("bnxt_vnic_info", + max_vnics * sizeof(struct bnxt_vnic_info), 0); + if (vnic_mem == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs", + max_vnics); + return -ENOMEM; + } + bp->vnic_info = vnic_mem; + bnxt_init_vnics(bp); + return 0; +} + +int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; + + vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); + if (!vnic->fw_grp_ids) { + PMD_DRV_LOG(ERR, + "Failed to alloc %d bytes for group ids\n", + size); + return -ENOMEM; + } + memset(vnic->fw_grp_ids, -1, size); + + return 0; +} + +uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type) +{ + uint16_t hwrm_type = 0; + + if (rte_type & ETH_RSS_IPV4) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; + if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; + if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; + if (rte_type & ETH_RSS_IPV6) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; + if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP) + hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; + + return hwrm_type; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h new file mode 100644 index 000000000..a372b899b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/bnxt_vnic.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_VNIC_H_ +#define _BNXT_VNIC_H_ + +#include +#include + +#define INVALID_VNIC_ID ((uint16_t)-1) + +struct bnxt_vnic_info { + STAILQ_ENTRY(bnxt_vnic_info) next; + uint8_t ff_pool_idx; + + uint16_t fw_vnic_id; /* returned by Chimp during alloc */ + uint16_t rss_rule; + uint16_t start_grp_id; + uint16_t end_grp_id; + uint16_t *fw_grp_ids; + uint16_t num_lb_ctxts; + uint16_t dflt_ring_grp; + uint16_t mru; + uint16_t hash_type; + uint8_t hash_mode; + rte_iova_t rss_table_dma_addr; + uint16_t *rss_table; + rte_iova_t rss_hash_key_dma_addr; + void *rss_hash_key; + rte_iova_t mc_list_dma_addr; + char *mc_list; + uint32_t mc_addr_cnt; +#define BNXT_MAX_MC_ADDRS 16 + uint32_t flags; +#define BNXT_VNIC_INFO_PROMISC (1 << 0) +#define BNXT_VNIC_INFO_ALLMULTI (1 << 1) +#define BNXT_VNIC_INFO_BCAST (1 << 2) +#define BNXT_VNIC_INFO_UCAST (1 << 3) +#define BNXT_VNIC_INFO_MCAST (1 << 4) +#define BNXT_VNIC_INFO_TAGGED (1 << 5) +#define BNXT_VNIC_INFO_UNTAGGED (1 << 6) + + uint16_t cos_rule; + uint16_t lb_rule; + uint16_t rx_queue_cnt; + uint16_t cos_queue_id; + bool vlan_strip; + bool func_default; + bool bd_stall; + bool roce_dual; + bool roce_only; + bool rss_dflt_cr; + + STAILQ_HEAD(, bnxt_filter_info) filter; + STAILQ_HEAD(, rte_flow) flow_list; +}; + +struct bnxt; +int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int pool); +struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp); +void bnxt_free_all_vnics(struct bnxt *bp); +void bnxt_free_vnic_attributes(struct bnxt *bp); +int bnxt_alloc_vnic_attributes(struct bnxt *bp); +void bnxt_free_vnic_mem(struct bnxt *bp); +int bnxt_alloc_vnic_mem(struct bnxt *bp); +int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void prandom_bytes(void *dest_ptr, size_t len); +uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type); +#endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h b/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h new file mode 100644 index 000000000..7e30c9ffc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -0,0 +1,38674 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2020 Broadcom Inc. + * All rights reserved. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _HSI_STRUCT_DEF_DPDK_H_ +#define _HSI_STRUCT_DEF_DPDK_H_ + +/* This is the HWRM command header. */ +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* This is the HWRM response header. */ +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; +} __rte_packed; + +/* + * TLV encapsulated message. Use the TLV type field of the + * TLV to determine the type of message encapsulated. + */ +#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000) +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + + +/* HWRM request message */ +#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1) +/* HWRM response message */ +#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2) +/* RoCE slow path command */ +#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3) +/* RoCE slow path command to query CC Gen1 support. */ +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 UINT32_C(0x4) +/* RoCE slow path command to modify CC Gen1 support. */ +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 UINT32_C(0x5) +/* Engine CKV - The Alias key EC curve and ECC public key information. */ +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY UINT32_C(0x8001) +/* Engine CKV - Initialization vector. */ +#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003) +/* Engine CKV - Authentication tag. */ +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004) +/* Engine CKV - The encrypted data. */ +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005) +/* Engine CKV - Supported host_algorithms. */ +#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS UINT32_C(0x8006) +/* Engine CKV - The Host EC curve name and ECC public key information. */ +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY UINT32_C(0x8007) +/* Engine CKV - The ECDSA signature. */ +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008) +/* Engine CKV - The firmware EC curve name and ECC public key information. */ +#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY UINT32_C(0x8009) +/* Engine CKV - Supported firmware algorithms. */ +#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS UINT32_C(0x800a) +#define TLV_TYPE_LAST \ + TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS + + +/* tlv (size:64b/8B) */ +struct tlv { + /* + * The command discriminator is used to differentiate between various + * types of HWRM messages. This includes legacy HWRM and RoCE slowpath + * command messages as well as newer TLV encapsulated HWRM commands. + * + * For TLV encapsulated messages this field must be 0x8000. + */ + uint16_t cmd_discr; + uint8_t reserved_8b; + uint8_t flags; + /* + * Indicates the presence of additional TLV encapsulated data + * follows this TLV. + */ + #define TLV_FLAGS_MORE UINT32_C(0x1) + /* Last TLV in a sequence of TLVs. */ + #define TLV_FLAGS_MORE_LAST UINT32_C(0x0) + /* More TLVs follow this TLV. */ + #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1) + /* + * When an HWRM receiver detects a TLV type that it does not + * support with the TLV required flag set, the receiver must + * reject the HWRM message with an error code indicating an + * unsupported TLV type. + */ + #define TLV_FLAGS_REQUIRED UINT32_C(0x2) + /* No */ + #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1) + /* Yes */ + #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + /* + * This field defines the TLV type value which is divided into + * two ranges to differentiate between global and local TLV types. + * Global TLV types must be unique across all defined TLV types. + * Local TLV types are valid only for extensions to a given + * HWRM message and may be repeated across different HWRM message + * types. There is a direct correlation of each HWRM message type + * to a single global TLV type value. + * + * Global TLV range: `0 - (63k-1)` + * + * Local TLV range: `63k - (64k-1)` + */ + uint16_t tlv_type; + /* + * Length of the message data encapsulated by this TLV in bytes. + * This length does not include the size of the TLV header itself + * and it must be an integer multiple of 8B. + */ + uint16_t length; +} __rte_packed; + +/* Input */ +/* input (size:128b/16B) */ +struct input { + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t req_type; + /* + * This value indicates the what completion ring the request will + * be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t cmpl_ring; + /* This value indicates the command sequence number. */ + uint16_t seq_id; + /* + * Target ID of this command. + * + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint64_t resp_addr; +} __rte_packed; + +/* Output */ +/* output (size:64b/8B) */ +struct output { + /* + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate + */ + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + /* + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. + */ + uint16_t resp_len; +} __rte_packed; + +/* Short Command Structure */ +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + /* + * This field indicates the type of request in the request buffer. + * The format for the rest of the command (request) is determined + * by this field. + */ + uint16_t req_type; + /* + * This field indicates a signature that is used to identify short + * form of the command listed here. This field shall be set to + * 17185 (0x4321). + */ + uint16_t signature; + /* Signature indicating this is a short form of HWRM command */ + #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321) + #define HWRM_SHORT_INPUT_SIGNATURE_LAST \ + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD + /* The target ID of the command */ + uint16_t target_id; + /* Default target_id (0x0) to maintain compatibility with old driver */ + #define HWRM_SHORT_INPUT_TARGET_ID_DEFAULT UINT32_C(0x0) + /* Reserved for user-space HWRM interface */ + #define HWRM_SHORT_INPUT_TARGET_ID_TOOLS UINT32_C(0xfffd) + #define HWRM_SHORT_INPUT_TARGET_ID_LAST \ + HWRM_SHORT_INPUT_TARGET_ID_TOOLS + /* This value indicates the length of the request. */ + uint16_t size; + /* + * This is the host address where the request was written. + * This area must be 16B aligned. + */ + uint64_t req_addr; +} __rte_packed; + +/* + * Command numbering + * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml + * # So only structure definition is provided here. + */ +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + /* + * This version of the specification defines the commands listed in + * the table below. The following are general implementation + * requirements for these commands: + * + * # All commands listed below that are marked neither + * reserved nor experimental shall be implemented by the HWRM. + * # A HWRM client compliant to this specification should not use + * commands outside of the list below. + * # A HWRM client compliant to this specification should not use + * command numbers marked reserved below. + * # A command marked experimental below may not be implemented + * by the HWRM. + * # A command marked experimental may change in the + * future version of the HWRM specification. + * # A command not listed below may be implemented by the HWRM. + * The behavior of commands that are not listed below is outside + * the scope of this specification. + */ + uint16_t req_type; + #define HWRM_VER_GET UINT32_C(0x0) + #define HWRM_ERROR_RECOVERY_QCFG UINT32_C(0xc) + #define HWRM_FUNC_DRV_IF_CHANGE UINT32_C(0xd) + #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe) + #define HWRM_FUNC_VF_CFG UINT32_C(0xf) + /* Reserved for future use. */ + #define HWRM_RESERVED1 UINT32_C(0x10) + #define HWRM_FUNC_RESET UINT32_C(0x11) + #define HWRM_FUNC_GETFID UINT32_C(0x12) + #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13) + #define HWRM_FUNC_VF_FREE UINT32_C(0x14) + #define HWRM_FUNC_QCAPS UINT32_C(0x15) + #define HWRM_FUNC_QCFG UINT32_C(0x16) + #define HWRM_FUNC_CFG UINT32_C(0x17) + #define HWRM_FUNC_QSTATS UINT32_C(0x18) + #define HWRM_FUNC_CLR_STATS UINT32_C(0x19) + #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a) + #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b) + #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c) + #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d) + #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f) + #define HWRM_PORT_PHY_CFG UINT32_C(0x20) + #define HWRM_PORT_MAC_CFG UINT32_C(0x21) + /* Experimental */ + #define HWRM_PORT_TS_QUERY UINT32_C(0x22) + #define HWRM_PORT_QSTATS UINT32_C(0x23) + #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24) + /* Experimental */ + #define HWRM_PORT_CLR_STATS UINT32_C(0x25) + /* Experimental */ + #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26) + #define HWRM_PORT_PHY_QCFG UINT32_C(0x27) + #define HWRM_PORT_MAC_QCFG UINT32_C(0x28) + /* Experimental */ + #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29) + #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a) + #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b) + #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c) + #define HWRM_PORT_LED_CFG UINT32_C(0x2d) + #define HWRM_PORT_LED_QCFG UINT32_C(0x2e) + #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f) + #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30) + #define HWRM_QUEUE_QCFG UINT32_C(0x31) + #define HWRM_QUEUE_CFG UINT32_C(0x32) + #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33) + #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34) + #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35) + #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36) + #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37) + #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38) + #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39) + #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a) + #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b) + #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c) + #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d) + #define HWRM_VNIC_ALLOC UINT32_C(0x40) + #define HWRM_VNIC_FREE UINT32_C(0x41) + #define HWRM_VNIC_CFG UINT32_C(0x42) + #define HWRM_VNIC_QCFG UINT32_C(0x43) + #define HWRM_VNIC_TPA_CFG UINT32_C(0x44) + /* Experimental */ + #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45) + #define HWRM_VNIC_RSS_CFG UINT32_C(0x46) + #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47) + #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48) + #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49) + #define HWRM_VNIC_QCAPS UINT32_C(0x4a) + #define HWRM_RING_ALLOC UINT32_C(0x50) + #define HWRM_RING_FREE UINT32_C(0x51) + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52) + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53) + #define HWRM_RING_AGGINT_QCAPS UINT32_C(0x54) + #define HWRM_RING_RESET UINT32_C(0x5e) + #define HWRM_RING_GRP_ALLOC UINT32_C(0x60) + #define HWRM_RING_GRP_FREE UINT32_C(0x61) + /* Reserved for future use. */ + #define HWRM_RESERVED5 UINT32_C(0x64) + /* Reserved for future use. */ + #define HWRM_RESERVED6 UINT32_C(0x65) + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70) + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71) + #define HWRM_QUEUE_MPLS_QCAPS UINT32_C(0x80) + #define HWRM_QUEUE_MPLSTC2PRI_QCFG UINT32_C(0x81) + #define HWRM_QUEUE_MPLSTC2PRI_CFG UINT32_C(0x82) + #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90) + #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91) + #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92) + #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95) + #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99) + #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a) + #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e) + #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0) + #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1) + #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2) + #define HWRM_STAT_CTX_ENG_QUERY UINT32_C(0xaf) + #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0) + #define HWRM_STAT_CTX_FREE UINT32_C(0xb1) + #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2) + #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3) + #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4) + #define HWRM_PORT_PHY_MDIO_WRITE UINT32_C(0xb5) + #define HWRM_PORT_PHY_MDIO_READ UINT32_C(0xb6) + #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE UINT32_C(0xb7) + #define HWRM_PORT_PHY_MDIO_BUS_RELEASE UINT32_C(0xb8) + #define HWRM_PORT_QSTATS_EXT_PFC_WD UINT32_C(0xb9) + #define HWRM_PORT_ECN_QSTATS UINT32_C(0xba) + #define HWRM_FW_RESET UINT32_C(0xc0) + #define HWRM_FW_QSTATUS UINT32_C(0xc1) + #define HWRM_FW_HEALTH_CHECK UINT32_C(0xc2) + #define HWRM_FW_SYNC UINT32_C(0xc3) + #define HWRM_FW_STATE_QCAPS UINT32_C(0xc4) + #define HWRM_FW_STATE_QUIESCE UINT32_C(0xc5) + #define HWRM_FW_STATE_BACKUP UINT32_C(0xc6) + #define HWRM_FW_STATE_RESTORE UINT32_C(0xc7) + /* Experimental */ + #define HWRM_FW_SET_TIME UINT32_C(0xc8) + /* Experimental */ + #define HWRM_FW_GET_TIME UINT32_C(0xc9) + /* Experimental */ + #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca) + /* Experimental */ + #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb) + /* Experimental */ + #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc) + #define HWRM_FW_ECN_CFG UINT32_C(0xcd) + #define HWRM_FW_ECN_QCFG UINT32_C(0xce) + #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0) + #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1) + #define HWRM_FWD_RESP UINT32_C(0xd2) + #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3) + #define HWRM_OEM_CMD UINT32_C(0xd4) + /* Tells the fw to run PRBS test on a given port and lane. */ + #define HWRM_PORT_PRBS_TEST UINT32_C(0xd5) + #define HWRM_PORT_SFP_SIDEBAND_CFG UINT32_C(0xd6) + #define HWRM_PORT_SFP_SIDEBAND_QCFG UINT32_C(0xd7) + #define HWRM_FW_STATE_UNQUIESCE UINT32_C(0xd8) + /* Tells the fw to collect dsc dump on a given port and lane. */ + #define HWRM_PORT_DSC_DUMP UINT32_C(0xd9) + #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0) + #define HWRM_REG_POWER_QUERY UINT32_C(0xe1) + #define HWRM_CORE_FREQUENCY_QUERY UINT32_C(0xe2) + #define HWRM_REG_POWER_HISTOGRAM UINT32_C(0xe3) + #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0) + #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1) + #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2) + #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3) + /* Experimental */ + #define HWRM_CFA_METER_QCAPS UINT32_C(0xf4) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_CFG UINT32_C(0xfa) + /* Experimental */ + #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd) + /* Experimental */ + #define HWRM_CFA_VFR_FREE UINT32_C(0xfe) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102) + /* Experimental */ + #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103) + /* Experimental */ + #define HWRM_CFA_FLOW_FREE UINT32_C(0x104) + /* Experimental */ + #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105) + /* Experimental */ + #define HWRM_CFA_FLOW_STATS UINT32_C(0x106) + /* Experimental */ + #define HWRM_CFA_FLOW_INFO UINT32_C(0x107) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c) + /* Experimental */ + #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d) + /* Experimental */ + #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e) + /* Experimental */ + #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f) + /* Experimental */ + #define HWRM_FW_IPC_MSG UINT32_C(0x110) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111) + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE UINT32_C(0x112) + /* Experimental */ + #define HWRM_CFA_FLOW_AGING_TIMER_RESET UINT32_C(0x113) + /* Experimental */ + #define HWRM_CFA_FLOW_AGING_CFG UINT32_C(0x114) + /* Experimental */ + #define HWRM_CFA_FLOW_AGING_QCFG UINT32_C(0x115) + /* Experimental */ + #define HWRM_CFA_FLOW_AGING_QCAPS UINT32_C(0x116) + /* Experimental */ + #define HWRM_CFA_CTX_MEM_RGTR UINT32_C(0x117) + /* Experimental */ + #define HWRM_CFA_CTX_MEM_UNRGTR UINT32_C(0x118) + /* Experimental */ + #define HWRM_CFA_CTX_MEM_QCTX UINT32_C(0x119) + /* Experimental */ + #define HWRM_CFA_CTX_MEM_QCAPS UINT32_C(0x11a) + /* Experimental */ + #define HWRM_CFA_COUNTER_QCAPS UINT32_C(0x11b) + /* Experimental */ + #define HWRM_CFA_COUNTER_CFG UINT32_C(0x11c) + /* Experimental */ + #define HWRM_CFA_COUNTER_QCFG UINT32_C(0x11d) + /* Experimental */ + #define HWRM_CFA_COUNTER_QSTATS UINT32_C(0x11e) + /* Experimental */ + #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG UINT32_C(0x11f) + /* Experimental */ + #define HWRM_CFA_EEM_QCAPS UINT32_C(0x120) + /* Experimental */ + #define HWRM_CFA_EEM_CFG UINT32_C(0x121) + /* Experimental */ + #define HWRM_CFA_EEM_QCFG UINT32_C(0x122) + /* Experimental */ + #define HWRM_CFA_EEM_OP UINT32_C(0x123) + /* Experimental */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS UINT32_C(0x124) + /* Experimental - DEPRECATED */ + #define HWRM_CFA_TFLIB UINT32_C(0x125) + /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */ + #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e) + /* Engine CKV - Add a new CKEK used to encrypt keys. */ + #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f) + /* Engine CKV - Delete a previously added CKEK. */ + #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130) + /* Engine CKV - Add a new key to the key vault. */ + #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131) + /* Engine CKV - Delete a key from the key vault. */ + #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132) + /* Engine CKV - Delete all keys from the key vault. */ + #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133) + /* Engine CKV - Get random data. */ + #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134) + /* Engine CKV - Generate and encrypt a new AES key. */ + #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135) + /* Engine CKV - Configure a label index with a label value. */ + #define HWRM_ENGINE_CKV_KEY_LABEL_CFG UINT32_C(0x136) + /* Engine CKV - Query a label */ + #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG UINT32_C(0x137) + /* Engine - Query the available queue groups configuration. */ + #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c) + /* Engine - Query the queue groups assigned to a function. */ + #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d) + /* Engine - Query the available queue group meter profile configuration. */ + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e) + /* Engine - Query the configuration of a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f) + /* Engine - Allocate a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140) + /* Engine - Free a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141) + /* Engine - Query the meters assigned to a queue group. */ + #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142) + /* Engine - Bind a queue group meter profile to a queue group. */ + #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143) + /* Engine - Unbind a queue group meter profile from a queue group. */ + #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144) + /* Engine - Bind a queue group to a function. */ + #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145) + /* Engine - Query the scheduling group configuration. */ + #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146) + /* Engine - Query the queue groups assigned to a scheduling group. */ + #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147) + /* Engine - Query the configuration of a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148) + /* Engine - Configure a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149) + /* Engine - Bind a queue group to a scheduling group. */ + #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a) + /* Engine - Unbind a queue group from its scheduling group. */ + #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b) + /* Engine - Query the Engine configuration. */ + #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154) + /* Engine - Configure the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155) + /* Engine - Clear the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156) + /* Engine - Query the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157) + /* Engine - Query statistics counters for continuous errors from all CDDIP Engines. */ + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR UINT32_C(0x158) + /* Engine - Allocate an Engine RQ. */ + #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e) + /* Engine - Free an Engine RQ. */ + #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f) + /* Engine - Allocate an Engine CQ. */ + #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160) + /* Engine - Free an Engine CQ. */ + #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161) + /* Engine - Allocate an NQ. */ + #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162) + /* Engine - Free an NQ. */ + #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163) + /* Engine - Set the on-die RQE credit update location. */ + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164) + /* Engine - Query the engine function configuration. */ + #define HWRM_ENGINE_FUNC_QCFG UINT32_C(0x165) + /* Experimental */ + #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190) + /* Experimental */ + #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194) + /* Configures the BW of any VF */ + #define HWRM_FUNC_VF_BW_CFG UINT32_C(0x195) + /* Queries the BW of any VF */ + #define HWRM_FUNC_VF_BW_QCFG UINT32_C(0x196) + /* Queries pf ids belong to specified host(s) */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY UINT32_C(0x197) + /* Experimental */ + #define HWRM_SELFTEST_QLIST UINT32_C(0x200) + /* Experimental */ + #define HWRM_SELFTEST_EXEC UINT32_C(0x201) + /* Experimental */ + #define HWRM_SELFTEST_IRQ UINT32_C(0x202) + /* Experimental */ + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203) + /* Experimental */ + #define HWRM_PCIE_QSTATS UINT32_C(0x204) + /* Experimental */ + #define HWRM_MFG_FRU_WRITE_CONTROL UINT32_C(0x205) + /* Returns the current value of a free running counter from the device. */ + #define HWRM_MFG_TIMERS_QUERY UINT32_C(0x206) + /* Experimental */ + #define HWRM_MFG_OTP_CFG UINT32_C(0x207) + /* Experimental */ + #define HWRM_MFG_OTP_QCFG UINT32_C(0x208) + /* + * Tells the fw to run the DMA read from the host and DMA write + * to the host test. + */ + #define HWRM_MFG_HDMA_TEST UINT32_C(0x209) + /* Tells the fw to program the fru memory */ + #define HWRM_MFG_FRU_EEPROM_WRITE UINT32_C(0x20a) + /* Tells the fw to read the fru memory */ + #define HWRM_MFG_FRU_EEPROM_READ UINT32_C(0x20b) + /* Experimental */ + #define HWRM_TF UINT32_C(0x2bc) + /* Experimental */ + #define HWRM_TF_VERSION_GET UINT32_C(0x2bd) + /* Experimental */ + #define HWRM_TF_SESSION_OPEN UINT32_C(0x2c6) + /* Experimental */ + #define HWRM_TF_SESSION_ATTACH UINT32_C(0x2c7) + /* Experimental */ + #define HWRM_TF_SESSION_CLOSE UINT32_C(0x2c8) + /* Experimental */ + #define HWRM_TF_SESSION_QCFG UINT32_C(0x2c9) + /* Experimental */ + #define HWRM_TF_SESSION_RESC_QCAPS UINT32_C(0x2ca) + /* Experimental */ + #define HWRM_TF_SESSION_RESC_ALLOC UINT32_C(0x2cb) + /* Experimental */ + #define HWRM_TF_SESSION_RESC_FREE UINT32_C(0x2cc) + /* Experimental */ + #define HWRM_TF_SESSION_RESC_FLUSH UINT32_C(0x2cd) + /* Experimental */ + #define HWRM_TF_TBL_TYPE_GET UINT32_C(0x2d0) + /* Experimental */ + #define HWRM_TF_TBL_TYPE_SET UINT32_C(0x2d1) + /* Experimental */ + #define HWRM_TF_CTXT_MEM_RGTR UINT32_C(0x2da) + /* Experimental */ + #define HWRM_TF_CTXT_MEM_UNRGTR UINT32_C(0x2db) + /* Experimental */ + #define HWRM_TF_EXT_EM_QCAPS UINT32_C(0x2dc) + /* Experimental */ + #define HWRM_TF_EXT_EM_OP UINT32_C(0x2dd) + /* Experimental */ + #define HWRM_TF_EXT_EM_CFG UINT32_C(0x2de) + /* Experimental */ + #define HWRM_TF_EXT_EM_QCFG UINT32_C(0x2df) + /* Experimental */ + #define HWRM_TF_TCAM_SET UINT32_C(0x2ee) + /* Experimental */ + #define HWRM_TF_TCAM_GET UINT32_C(0x2ef) + /* Experimental */ + #define HWRM_TF_TCAM_MOVE UINT32_C(0x2f0) + /* Experimental */ + #define HWRM_TF_TCAM_FREE UINT32_C(0x2f1) + /* Experimental */ + #define HWRM_SV UINT32_C(0x400) + /* Experimental */ + #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10) + /* Experimental */ + #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11) + /* Experimental */ + #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12) + /* Experimental */ + #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13) + #define HWRM_DBG_DUMP UINT32_C(0xff14) + /* Experimental */ + #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15) + /* Experimental */ + #define HWRM_DBG_CFG UINT32_C(0xff16) + /* Experimental */ + #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17) + /* Experimental */ + #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18) + /* Experimental */ + #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19) + /* Experimental */ + #define HWRM_DBG_FW_CLI UINT32_C(0xff1a) + /* */ + #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b) + /* */ + #define HWRM_DBG_RING_INFO_GET UINT32_C(0xff1c) + /* Experimental */ + #define HWRM_DBG_CRASHDUMP_HEADER UINT32_C(0xff1d) + /* Experimental */ + #define HWRM_DBG_CRASHDUMP_ERASE UINT32_C(0xff1e) + /* Send driver debug information to firmware */ + #define HWRM_DBG_DRV_TRACE UINT32_C(0xff1f) + /* Experimental */ + #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee) + #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef) + #define HWRM_NVM_FLUSH UINT32_C(0xfff0) + #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1) + #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2) + #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3) + #define HWRM_NVM_MODIFY UINT32_C(0xfff4) + #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5) + #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6) + #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7) + #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8) + #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9) + #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa) + #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb) + #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc) + #define HWRM_NVM_READ UINT32_C(0xfffd) + #define HWRM_NVM_WRITE UINT32_C(0xfffe) + #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff) + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + uint16_t unused_0[3]; +} __rte_packed; + +/* Return Codes */ +/* ret_codes (size:64b/8B) */ +struct ret_codes { + uint16_t error_code; + /* Request was successfully executed by the HWRM. */ + #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0) + /* The HWRM failed to execute the request. */ + #define HWRM_ERR_CODE_FAIL UINT32_C(0x1) + /* + * The request contains invalid argument(s) or input + * parameters. + */ + #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2) + /* + * The requester is not allowed to access the requested + * resource. This error code shall be provided in a + * response to a request to query or modify an existing + * resource that is not accessible by the requester. + */ + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3) + /* + * The HWRM is unable to allocate the requested resource. + * This code only applies to requests for HWRM resource + * allocations. + */ + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4) + /* + * Invalid combination of flags is specified in the + * request. + */ + #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5) + /* + * Invalid combination of enables fields is specified in + * the request. + */ + #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6) + /* + * Request contains a required TLV that is not supported by + * the installed version of firmware. + */ + #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7) + /* + * No firmware buffer available to accept the request. Driver + * should retry the request. + */ + #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8) + /* + * This error code is only reported by firmware when some + * sub-option of a supported HWRM command is unsupported. + */ + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR UINT32_C(0x9) + /* + * This error code is only reported by firmware when the specific + * request is not able to process when the HOT reset in progress. + */ + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS UINT32_C(0xa) + /* + * This error code is only reported by firmware when the registered + * driver instances are not capable of hot reset. + */ + #define HWRM_ERR_CODE_HOT_RESET_FAIL UINT32_C(0xb) + /* + * This error code is only reported by the firmware when during + * flow allocation when a request for a flow counter fails because + * the number of flow counters are exhausted. + */ + #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC UINT32_C(0xc) + /* + * This error code is only reported by firmware when the registered + * driver instances requested to offloaded a flow but was unable to because + * the requested key's hash collides with the installed keys. + */ + #define HWRM_ERR_CODE_KEY_HASH_COLLISION UINT32_C(0xd) + /* + * This error code is only reported by firmware when the registered + * driver instances requested to offloaded a flow but was unable to because + * the same key has already been installed. + */ + #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS UINT32_C(0xe) + /* + * Generic HWRM execution error that represents an + * internal error. + */ + #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf) + /* + * Firmware is unable to service the request at the present time. Caller + * may try again later. + */ + #define HWRM_ERR_CODE_BUSY UINT32_C(0x10) + /* + * This value indicates that the HWRM response is in TLV format and + * should be interpreted as one or more TLVs starting with the + * hwrm_resp_hdr TLV. This value is not an indication of any error + * by itself, just an indication that the response should be parsed + * as TLV and the actual error code will be in the hwrm_resp_hdr TLV. + */ + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE UINT32_C(0x8000) + /* Unknown error */ + #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe) + /* Unsupported or invalid command */ + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff) + #define HWRM_ERR_CODE_LAST \ + HWRM_ERR_CODE_CMD_NOT_SUPPORTED + uint16_t unused_0[3]; +} __rte_packed; + +/* Output */ +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + /* + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate + */ + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + /* + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. + */ + uint16_t resp_len; + /* debug info for this error response. */ + uint32_t opaque_0; + /* debug info for this error response. */ + uint16_t opaque_1; + /* + * In the case of an error response, command specific error + * code is returned in this field. + */ + uint8_t cmd_err; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; +/* + * Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) +/* hwrm_func_buf_rgtr */ +#define HWRM_MAX_REQ_LEN 128 +/* hwrm_cfa_flow_info */ +#define HWRM_MAX_RESP_LEN 704 +/* 7 bit indirection table index. */ +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +/* valid key for HWRM response */ +#define HWRM_RESP_VALID_KEY 1 +/* Reserved for BONO processor */ +#define HWRM_TARGET_ID_BONO 0xFFF8 +/* Reserved for KONG processor */ +#define HWRM_TARGET_ID_KONG 0xFFF9 +/* Reserved for APE processor */ +#define HWRM_TARGET_ID_APE 0xFFFA +/* + * This value will be used by tools for User-space HWRM Interface. + * When tool execute any HWRM command with this target_id, firmware + * will copy the response and/or data payload via register space instead + * of DMAing it. + */ +#define HWRM_TARGET_ID_TOOLS 0xFFFD +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 1 +/* non-zero means beta version */ +#define HWRM_VERSION_RSVD 30 +#define HWRM_VERSION_STR "1.10.1.30" + +/**************** + * hwrm_ver_get * + ****************/ + + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This field represents the major version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. + */ + uint8_t hwrm_intf_maj; + /* + * This field represents the minor version of HWRM interface + * specification supported by the driver HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. + */ + uint8_t hwrm_intf_min; + /* + * This field represents the update version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. + */ + uint8_t hwrm_intf_upd; + uint8_t unused_0[5]; +} __rte_packed; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 1 in this field. + */ + uint8_t hwrm_intf_maj_8b; + /* + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. + */ + uint8_t hwrm_intf_min_8b; + /* + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. + */ + uint8_t hwrm_intf_upd_8b; + uint8_t hwrm_intf_rsvd_8b; + /* + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. + */ + uint8_t hwrm_fw_maj_8b; + /* + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. + */ + uint8_t hwrm_fw_min_8b; + /* + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes + * to a released firmware. + */ + uint8_t hwrm_fw_bld_8b; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version of the + * HWRM firmware. + */ + uint8_t hwrm_fw_rsvd_8b; + /* + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. + */ + uint8_t mgmt_fw_maj_8b; + /* + * This field represents the minor version of mgmt firmware. + * A change in minor version represents significant + * functionality changes. + */ + uint8_t mgmt_fw_min_8b; + /* + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. + */ + uint8_t mgmt_fw_bld_8b; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version + */ + uint8_t mgmt_fw_rsvd_8b; + /* + * This field represents the major version of network + * control firmware. + * A change in major version represents a major release. + */ + uint8_t netctrl_fw_maj_8b; + /* + * This field represents the minor version of network + * control firmware. + * A change in minor version represents significant + * functionality changes. + */ + uint8_t netctrl_fw_min_8b; + /* + * This field represents the build version of network + * control firmware. + * A change in update version represents bug fixes. + */ + uint8_t netctrl_fw_bld_8b; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version + */ + uint8_t netctrl_fw_rsvd_8b; + /* + * This field is used to indicate device's capabilities and + * configurations. + */ + uint32_t dev_caps_cfg; + /* + * If set to 1, then secure firmware update behavior + * is supported. + * If set to 0, then secure firmware update behavior is + * not supported. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ + UINT32_C(0x1) + /* + * If set to 1, then firmware based DCBX agent is supported. + * If set to 0, then firmware based DCBX agent capability + * is not supported on this device. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, then HWRM short command format is supported. + * If set to 0, then HWRM short command format is not supported. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, then HWRM short command format is required. + * If set to 0, then HWRM short command format is not required. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \ + UINT32_C(0x8) + /* + * If set to 1, then the KONG host mailbox channel is supported. + * If set to 0, then the KONG host mailbox channel is not supported. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED \ + UINT32_C(0x10) + /* + * If set to 1, then the 64bit flow handle is supported in addition to the + * legacy 16bit flow handle. If set to 0, then the 64bit flow handle is not + * supported. By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED \ + UINT32_C(0x20) + /* + * If set to 1, then filter type can be provided in filter_alloc or filter_cfg + * filter types like L2 for l2 traffic and ROCE for roce & l2 traffic. + * If set to 0, then filter types not supported. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED \ + UINT32_C(0x40) + /* + * If set to 1, firmware is capable to support virtio vSwitch offload model. + * If set to 0, firmware can't supported virtio vSwitch offload model. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED \ + UINT32_C(0x80) + /* + * If set to 1, firmware is capable to support trusted VF. + * If set to 0, firmware is not capable to support trusted VF. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED \ + UINT32_C(0x100) + /* + * If set to 1, firmware is capable to support flow aging. + * If set to 0, firmware is not capable to support flow aging. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED \ + UINT32_C(0x200) + /* + * If set to 1, firmware is capable to support advanced flow counters like, + * Meter drop counters and EEM counters. + * If set to 0, firmware is not capable to support advanced flow counters. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED \ + UINT32_C(0x400) + /* + * If set to 1, the firmware is able to support the use of the CFA + * Extended Exact Match(EEM) feature. + * If set to 0, firmware is not capable to support the use of the + * CFA EEM feature. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED \ + UINT32_C(0x800) + /* + * If set to 1, the firmware is able to support advance CFA flow management + * features reported in the HWRM_CFA_FLOW_MGNT_QCAPS. + * If set to 0, then the firmware doesn’t support the advance CFA flow management + * features. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED \ + UINT32_C(0x1000) + /* + * Deprecated and replaced with cfa_truflow_supported. + * If set to 1, the firmware is able to support TFLIB features. + * If set to 0, then the firmware doesn’t support TFLIB features. + * By default, this flag should be 0 for older version of core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED \ + UINT32_C(0x2000) + /* + * If set to 1, the firmware is able to support TruFlow features. + * If set to 0, then the firmware doesn’t support TruFlow features. + * By default, this flag should be 0 for older version of + * core firmware. + */ + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED \ + UINT32_C(0x4000) + /* + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. + */ + uint8_t roce_fw_maj_8b; + /* + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. + */ + uint8_t roce_fw_min_8b; + /* + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. + */ + uint8_t roce_fw_bld_8b; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version + */ + uint8_t roce_fw_rsvd_8b; + /* + * This field represents the name of HWRM FW (ASCII chars + * with NULL at the end). + */ + char hwrm_fw_name[16]; + /* + * This field represents the name of mgmt FW (ASCII chars + * with NULL at the end). + */ + char mgmt_fw_name[16]; + /* + * This field represents the name of network control + * firmware (ASCII chars with NULL at the end). + */ + char netctrl_fw_name[16]; + /* This field represents the active board package name. */ + char active_pkg_name[16]; + /* + * This field represents the name of RoCE FW (ASCII chars + * with NULL at the end). + */ + char roce_fw_name[16]; + /* This field returns the chip number. */ + uint16_t chip_num; + /* This field returns the revision of chip. */ + uint8_t chip_rev; + /* This field returns the chip metal number. */ + uint8_t chip_metal; + /* This field returns the bond id of the chip. */ + uint8_t chip_bond_id; + /* This value indicates the type of platform used for chip implementation. */ + uint8_t chip_platform_type; + /* ASIC */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0) + /* FPGA platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) + /* Palladium platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \ + HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM + /* + * This field returns the maximum value of request window that + * is supported by the HWRM. The request window is mapped + * into device address space using MMIO. + */ + uint16_t max_req_win_len; + /* + * This field returns the maximum value of response buffer in + * bytes. + */ + uint16_t max_resp_len; + /* + * This field returns the default request timeout value in + * milliseconds. + */ + uint16_t def_req_timeout; + /* + * This field will indicate if any subsystems is not fully + * initialized. + */ + uint8_t flags; + /* + * If set to 1, it will indicate to host drivers that firmware is + * not ready to start full blown HWRM commands. Host drivers should + * re-try HWRM_VER_GET with some timeout period. The timeout period + * can be selected up to 5 seconds. + * For Example, PCIe hot-plug: + * Hot plug timing is system dependent. It generally takes up to + * 600 miliseconds for firmware to clear DEV_NOT_RDY flag. + * If set to 0, device is ready to accept all HWRM commands. + */ + #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1) + /* + * If set to 1, external version present. + * If set to 0, external version not present. + */ + #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2) + uint8_t unused_0[2]; + /* + * For backward compatibility this field must be set to 1. + * Older drivers might look for this field to be 1 before + * processing the message. + */ + uint8_t always_1; + /* + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. A HWRM implementation that is + * compliant with this specification shall provide value of 1 + * in this field. + */ + uint16_t hwrm_intf_major; + /* + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. This can be due to addition or + * removal of functionality. HWRM interface specifications + * with the same major version but different minor versions are + * compatible. A HWRM implementation that is compliant with + * this specification shall provide value of 2 in this field. + */ + uint16_t hwrm_intf_minor; + /* + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. The + * interface update version is used to reflect minor changes or + * bug fixes to a released HWRM interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. + */ + uint16_t hwrm_intf_build; + /* + * This field represents the patch version of HWRM interface + * specification supported by the HWRM implementation. + */ + uint16_t hwrm_intf_patch; + /* + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. + */ + uint16_t hwrm_fw_major; + /* + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. + */ + uint16_t hwrm_fw_minor; + /* + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes to + * a released firmware. + */ + uint16_t hwrm_fw_build; + /* + * This field is a reserved field. + * This field can be used to represent firmware branches or customer + * specific releases tied to a specific (major,minor,update) version + * of the HWRM firmware. + */ + uint16_t hwrm_fw_patch; + /* + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. + */ + uint16_t mgmt_fw_major; + /* + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. + */ + uint16_t mgmt_fw_minor; + /* + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. + */ + uint16_t mgmt_fw_build; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version. + */ + uint16_t mgmt_fw_patch; + /* + * This field represents the major version of network control + * firmware. A change in major version represents + * a major release. + */ + uint16_t netctrl_fw_major; + /* + * This field represents the minor version of network control + * firmware. A change in minor version represents significant + * functionality changes. + */ + uint16_t netctrl_fw_minor; + /* + * This field represents the build version of network control + * firmware. A change in update version represents bug fixes. + */ + uint16_t netctrl_fw_build; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version + */ + uint16_t netctrl_fw_patch; + /* + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. + */ + uint16_t roce_fw_major; + /* + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. + */ + uint16_t roce_fw_minor; + /* + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. + */ + uint16_t roce_fw_build; + /* + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version + */ + uint16_t roce_fw_patch; + /* + * This field returns the maximum extended request length acceptable + * by the device which allows requests greater than mailbox size when + * used with the short cmd request format. + */ + uint16_t max_ext_req_len; + uint8_t unused_1[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* bd_base (size:64b/8B) */ +struct bd_base { + uint8_t type; + /* This value identifies the type of buffer descriptor. */ + #define BD_BASE_TYPE_MASK UINT32_C(0x3f) + #define BD_BASE_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. + */ + #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0) + /* + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. + */ + #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1) + /* + * Indicates that this BD is 16B long and is an RX Producer + * (i.e. empty) buffer descriptor. + */ + #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4) + /* + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. + */ + #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5) + /* + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. + */ + #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6) + /* + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. + */ + #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10) + /* + * Indicates that this BD is 32B long and is used for + * L2 packet transmission for small packets that require + * low latency. + */ + #define BD_BASE_TYPE_TX_BD_LONG_INLINE UINT32_C(0x11) + #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG_INLINE + uint8_t unused_1[7]; +} __rte_packed; + +/* tx_bd_short (size:128b/16B) */ +struct tx_bd_short { + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. + */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_SHORT_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. + */ + #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0) + #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. + */ + #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_SHORT_FLAGS_SFT 6 + /* + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. + */ + #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) + /* + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_SHORT_FLAGS_LHINT_LAST \ + TX_BD_SHORT_FLAGS_LHINT_GTE2K + /* + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) + /* + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. + */ + uint16_t len; + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. + */ + uint32_t opaque; + /* + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. + */ + uint64_t address; +} __rte_packed; + +/* tx_bd_long (size:128b/16B) */ +struct tx_bd_long { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* + * This value indicates the type of buffer descriptor. + * packet. + */ + #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_LONG_TYPE_SFT 0 + /* + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. + */ + #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10) + #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. + */ + #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_LONG_FLAGS_SFT 6 + /* + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. + */ + #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) + /* + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_LONG_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K + /* + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) + /* + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. + */ + uint16_t len; + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. + */ + uint32_t opaque; + /* + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. + */ + uint64_t address; +} __rte_packed; + +/* Last 16 bytes of tx_bd_long. */ +/* tx_bd_long_hi (size:128b/16B) */ +struct tx_bd_long_hi { + /* + * All bits in this field must be valid on the first BD of a packet. + * Their value on other BDs of the packet will be ignored. + */ + uint16_t lflags; + /* + * If set to 1, the controller replaces the TCP/UPD checksum + * fields of normal TCP/UPD checksum, or the inner TCP/UDP + * checksum field of the encapsulated TCP/UDP packets with the + * hardware calculated TCP/UDP checksum for the packet associated + * with this descriptor. The flag is ignored if the LSO flag is set. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) + /* + * If set to 1, the controller replaces the IP checksum of the + * normal packets, or the inner IP checksum of the encapsulated + * packets with the hardware calculated IP checksum for the + * packet associated with this descriptor. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) + /* + * If set to 1, the controller will not append an Ethernet CRC + * to the end of the frame. + * + * This bit must be valid on the first BD of a packet. + * + * Packet must be 64B or longer when this flag is set. It is not + * useful to use this bit with any form of TX offload such as + * CSO or LSO. The intent is that the packet from the host already + * has a valid Ethernet CRC on the packet. + */ + #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) + /* + * If set to 1, the device will record the time at which the packet + * was actually transmitted at the TX MAC. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) + /* + * If set to 1, The controller replaces the tunnel IP checksum + * field with hardware calculated IP checksum for the IP header + * of the packet associated with this descriptor. + * + * For outer UDP checksum, global outer UDP checksum TE_NIC register + * needs to be enabled. If the global outer UDP checksum TE_NIC register + * bit is set, outer UDP checksum will be calculated for the following + * cases: + * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner + * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for + * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP + * checksum will not be calculated. + * 2. Packets with lso flag set which implies inner TCP checksum calculation + * as part of LSO operation. + */ + #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) + /* + * If set to 1, the device will treat this packet with LSO(Large + * Send Offload) processing for both normal or encapsulated + * packets, which is a form of TCP segmentation. When this bit + * is 1, the hdr_size and mss fields must be valid. The driver + * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum + * flags since the controller will replace the appropriate + * checksum fields for segmented packets. + * + * When this bit is 1, the hdr_size and mss fields must be valid. + */ + #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) + /* + * If set to zero when LSO is '1', then the IPID will be treated + * as a 16b number and will be wrapped if it exceeds a value of + * 0xffff. + * + * If set to one when LSO is '1', then the IPID will be treated + * as a 15b number and will be wrapped if it exceeds a value 0f + * 0x7fff. + */ + #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) + /* + * If set to zero when LSO is '1', then the IPID of the tunnel + * IP header will not be modified during LSO operations. + * + * If set to one when LSO is '1', then the IPID of the tunnel + * IP header will be incremented for each subsequent segment of an + * LSO operation. + * + * The flag is ignored if the LSO packet is a normal (non-tunneled) + * TCP packet. + */ + #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) + /* + * If set to '1', then the RoCE ICRC will be appended to the + * packet. Packet must be a valid RoCE format packet. + */ + #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) + /* + * If set to '1', then the FCoE CRC will be appended to the + * packet. Packet must be a valid FCoE format packet. + */ + #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) + uint16_t hdr_size; + /* + * When LSO is '1', this field must contain the offset of the + * TCP payload from the beginning of the packet in as + * 16b words. In case of encapsulated/tunneling packet, this field + * contains the offset of the inner TCP payload from beginning of the + * packet as 16-bit words. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) + #define TX_BD_LONG_HDR_SIZE_SFT 0 + uint32_t mss; + /* + * This is the MSS value that will be used to do the LSO processing. + * The value is the length in bytes of the TCP payload for each + * segment generated by the LSO operation. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) + #define TX_BD_LONG_MSS_SFT 0 + uint16_t unused2; + /* + * This value selects a CFA action to perform on the packet. + * Set this value to zero if no CFA action is desired. + * + * This value must be valid on the first BD of a packet. + */ + uint16_t cfa_action; + /* + * This value is action meta-data that defines CFA edit operations + * that are done in addition to any action editing. + */ + uint32_t cfa_meta; + /* When key=1, This is the VLAN tag VID value. */ + #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) + #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 + /* When key=1, This is the VLAN tag DE value. */ + #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) + /* When key=1, This is the VLAN tag PRI value. */ + #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) + #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) + #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 + /* 0x88a8 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) + /* 0x8100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) + /* 0x9100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) + /* 0x9200 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) + /* 0x9300 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) + /* Value programmed in CFA VLANTPID register. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) + #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ + TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + /* + * This field identifies the type of edit to be performed + * on the packet. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) + #define TX_BD_LONG_CFA_META_KEY_SFT 28 + /* No editing */ + #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + /* + * - meta[17:16] - TPID select value (0 = 0x8100). + * - meta[15:12] - PRI/DE value. + * - meta[11:0] - VID value. + */ + #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) + #define TX_BD_LONG_CFA_META_KEY_LAST \ + TX_BD_LONG_CFA_META_KEY_VLAN_TAG +} __rte_packed; + +/* + * This structure is used to inform the NIC of packet data that needs to be + * transmitted with additional processing that requires extra data such as + * VLAN insertion plus attached inline data. This BD type may be used to + * improve latency for small packets needing the additional extended features + * supported by long BDs. + */ +/* tx_bd_long_inline (size:256b/32B) */ +struct tx_bd_long_inline { + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_LONG_INLINE_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_LONG_INLINE_TYPE_SFT 0 + /* + * This type of BD is 32B long and is used for inline L2 packet + * transmission. + */ + #define TX_BD_LONG_INLINE_TYPE_TX_BD_LONG_INLINE UINT32_C(0x11) + #define TX_BD_LONG_INLINE_TYPE_LAST \ + TX_BD_LONG_INLINE_TYPE_TX_BD_LONG_INLINE + /* + * All bits in this field may be set on the first BD of a packet. + * Only the packet_end bit may be set in non-first BDs. + */ + #define TX_BD_LONG_INLINE_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_LONG_INLINE_FLAGS_SFT 6 + /* + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. + */ + #define TX_BD_LONG_INLINE_FLAGS_PACKET_END UINT32_C(0x40) + /* + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in its processing. + * If this bit is set to 0, then the packet will be completed + * normally. + * + * This bit may be set only on the first BD of a packet. + */ + #define TX_BD_LONG_INLINE_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet, including the BD and inline + * data. + */ + #define TX_BD_LONG_INLINE_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_LONG_INLINE_FLAGS_BD_CNT_SFT 8 + /* This field is deprecated. */ + #define TX_BD_LONG_INLINE_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_LONG_INLINE_FLAGS_LHINT_SFT 13 + /* + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_INLINE_FLAGS_COAL_NOW UINT32_C(0x8000) + /* + * This is the length of the inline data, not including BD length, in + * bytes. + * The maximum value is 480. + * + * This field must be valid on all BDs of a packet. + */ + uint16_t len; + /* + * The opaque data field is passed through to the completion and can be + * used for any data that the driver wants to associate with the transmit + * BD. + * + * This field must be valid on the first BD of a packet. + */ + uint32_t opaque; + uint64_t unused1; + /* + * All bits in this field must be valid on the first BD of a packet. + * Their value on other BDs of the packet is ignored. + */ + uint16_t lflags; + /* + * If set to 1, the controller replaces the TCP/UPD checksum + * fields of normal TCP/UPD checksum, or the inner TCP/UDP + * checksum field of the encapsulated TCP/UDP packets with the + * hardware calculated TCP/UDP checksum for the packet associated + * with this descriptor. The flag is ignored if the LSO flag is set. + */ + #define TX_BD_LONG_INLINE_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) + /* + * If set to 1, the controller replaces the IP checksum of the + * normal packets, or the inner IP checksum of the encapsulated + * packets with the hardware calculated IP checksum for the + * packet associated with this descriptor. + */ + #define TX_BD_LONG_INLINE_LFLAGS_IP_CHKSUM UINT32_C(0x2) + /* + * If set to 1, the controller will not append an Ethernet CRC + * to the end of the frame. + * + * Packet must be 64B or longer when this flag is set. It is not + * useful to use this bit with any form of TX offload such as + * CSO or LSO. The intent is that the packet from the host already + * has a valid Ethernet CRC on the packet. + */ + #define TX_BD_LONG_INLINE_LFLAGS_NOCRC UINT32_C(0x4) + /* + * If set to 1, the device will record the time at which the packet + * was actually transmitted at the TX MAC. + */ + #define TX_BD_LONG_INLINE_LFLAGS_STAMP UINT32_C(0x8) + /* + * If set to 1, the controller replaces the tunnel IP checksum + * field with hardware calculated IP checksum for the IP header + * of the packet associated with this descriptor. The hardware + * updates an outer UDP checksum if it is non-zero. + */ + #define TX_BD_LONG_INLINE_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) + /* + * This bit must be 0 for BDs of this type. LSO is not supported with + * inline BDs. + */ + #define TX_BD_LONG_INLINE_LFLAGS_LSO UINT32_C(0x20) + /* Since LSO is not supported with inline BDs, this bit is not used. */ + #define TX_BD_LONG_INLINE_LFLAGS_IPID_FMT UINT32_C(0x40) + /* Since LSO is not supported with inline BDs, this bit is not used. */ + #define TX_BD_LONG_INLINE_LFLAGS_T_IPID UINT32_C(0x80) + /* + * If set to '1', then the RoCE ICRC will be appended to the + * packet. Packet must be a valid RoCE format packet. + */ + #define TX_BD_LONG_INLINE_LFLAGS_ROCE_CRC UINT32_C(0x100) + /* + * If set to '1', then the FCoE CRC will be appended to the + * packet. Packet must be a valid FCoE format packet. + */ + #define TX_BD_LONG_INLINE_LFLAGS_FCOE_CRC UINT32_C(0x200) + uint16_t unused2; + uint32_t unused3; + uint16_t unused4; + /* + * This value selects a CFA action to perform on the packet. + * Set this value to zero if no CFA action is desired. + * + * This value must be valid on the first BD of a packet. + */ + uint16_t cfa_action; + /* + * This value is action meta-data that defines CFA edit operations + * that are done in addition to any action editing. + */ + uint32_t cfa_meta; + /* When key = 1, this is the VLAN tag VID value. */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) + #define TX_BD_LONG_INLINE_CFA_META_VLAN_VID_SFT 0 + /* When key = 1, this is the VLAN tag DE value. */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_DE UINT32_C(0x1000) + /* When key = 1, this is the VLAN tag PRI value. */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) + #define TX_BD_LONG_INLINE_CFA_META_VLAN_PRI_SFT 13 + /* When key = 1, this is the VLAN tag TPID select value. */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_SFT 16 + /* 0x88a8 */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID88A8 \ + (UINT32_C(0x0) << 16) + /* 0x8100 */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID8100 \ + (UINT32_C(0x1) << 16) + /* 0x9100 */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9100 \ + (UINT32_C(0x2) << 16) + /* 0x9200 */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9200 \ + (UINT32_C(0x3) << 16) + /* 0x9300 */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9300 \ + (UINT32_C(0x4) << 16) + /* Value programmed in CFA VLANTPID register. */ + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPIDCFG \ + (UINT32_C(0x5) << 16) + #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_LAST \ + TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPIDCFG + #define TX_BD_LONG_INLINE_CFA_META_VLAN_RESERVED_MASK \ + UINT32_C(0xff80000) + #define TX_BD_LONG_INLINE_CFA_META_VLAN_RESERVED_SFT 19 + /* + * This field identifies the type of edit to be performed + * on the packet. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_INLINE_CFA_META_KEY_MASK \ + UINT32_C(0xf0000000) + #define TX_BD_LONG_INLINE_CFA_META_KEY_SFT 28 + /* No editing */ + #define TX_BD_LONG_INLINE_CFA_META_KEY_NONE \ + (UINT32_C(0x0) << 28) + /* + * - meta[17:16] - TPID select value (0 = 0x8100). + * - meta[15:12] - PRI/DE value. + * - meta[11:0] - VID value. + */ + #define TX_BD_LONG_INLINE_CFA_META_KEY_VLAN_TAG \ + (UINT32_C(0x1) << 28) + #define TX_BD_LONG_INLINE_CFA_META_KEY_LAST \ + TX_BD_LONG_INLINE_CFA_META_KEY_VLAN_TAG +} __rte_packed; + +/* tx_bd_empty (size:128b/16B) */ +struct tx_bd_empty { + /* This value identifies the type of buffer descriptor. */ + uint8_t type; + #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_EMPTY_TYPE_SFT 0 + /* + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. + */ + #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1) + #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY + uint8_t unused_1[3]; + uint8_t unused_2; + uint8_t unused_3[3]; + uint8_t unused_4[8]; +} __rte_packed; + +/* rx_prod_pkt_bd (size:128b/16B) */ +struct rx_prod_pkt_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_PKT_BD_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is an RX Producer + * (i.e. empty) buffer descriptor. + */ + #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) + #define RX_PROD_PKT_BD_TYPE_LAST \ + RX_PROD_PKT_BD_TYPE_RX_PROD_PKT + #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_PKT_BD_FLAGS_SFT 6 + /* + * If set to 1, the packet will be placed at the address plus + * 2B. The 2 Bytes of padding will be written as zero. + */ + #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) + /* + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. + */ + #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + /* + * This value is the number of additional buffers in the ring that + * describe the buffer space to be consumed for this packet. + * If the value is zero, then the packet must fit within the + * space described by this BD. If this value is 1 or more, it + * indicates how many additional "buffer" BDs are in the ring + * immediately following this BD to be used for the same + * network packet. + * + * Even if the packet to be placed does not need all the + * additional buffers, they will be consumed anyway. + */ + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 + /* + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. + */ + uint16_t len; + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive buffer set. + */ + uint32_t opaque; + /* + * This is the host physical address where data for the packet may + * be placed in host memory. + */ + uint64_t address; +} __rte_packed; + +/* rx_prod_bfr_bd (size:128b/16B) */ +struct rx_prod_bfr_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_BFR_BD_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. + */ + #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5) + #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR + #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_BFR_BD_FLAGS_SFT 6 + /* + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. + */ + uint16_t len; + /* This field is not used. */ + uint32_t opaque; + /* + * This is the host physical address where data for the packet may + * be placed in host memory. + */ + uint64_t address; +} __rte_packed; + +/* rx_prod_agg_bd (size:128b/16B) */ +struct rx_prod_agg_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_AGG_BD_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. + */ + #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6) + #define RX_PROD_AGG_BD_TYPE_LAST \ + RX_PROD_AGG_BD_TYPE_RX_PROD_AGG + #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_AGG_BD_FLAGS_SFT 6 + /* + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. + */ + #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40) + /* + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. + */ + uint16_t len; + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive assembly buffer. + */ + uint32_t opaque; + /* + * This is the host physical address where data for the packet may + * be placed in host memory. + */ + uint64_t address; +} __rte_packed; + +/* cmpl_base (size:128b/16B) */ +struct cmpl_base { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) + #define CMPL_BASE_TYPE_SFT 0 + /* + * TX L2 completion: + * Completion of TX packet. Length = 16B + */ + #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0) + /* + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B + */ + #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11) + /* + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B + */ + #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12) + /* + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B + */ + #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13) + /* + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B + */ + #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15) + /* + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B + */ + #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a) + /* + * HWRM Command Completion: + * Completion of an HWRM command. + */ + #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20) + /* Forwarded HWRM Request */ + #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + /* Forwarded HWRM Response */ + #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + /* HWRM Asynchronous Event Information */ + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + /* CQ Notification */ + #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30) + /* SRQ Threshold Event */ + #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32) + /* DBQ Threshold Event */ + #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34) + /* QP Async Notification */ + #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38) + /* Function Async Notification */ + #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a) + #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT + /* info1 is 16 b */ + uint16_t info1; + /* info2 is 32 b */ + uint32_t info2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint32_t info3_v; + #define CMPL_BASE_V UINT32_C(0x1) + #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) + #define CMPL_BASE_INFO3_SFT 1 + /* info4 is 32 b */ + uint32_t info4; +} __rte_packed; + +/* tx_cmpl (size:128b/16B) */ +struct tx_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) + #define TX_CMPL_TYPE_SFT 0 + /* + * TX L2 completion: + * Completion of TX packet. Length = 16B + */ + #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0) + #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2 + #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define TX_CMPL_FLAGS_SFT 6 + /* + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. + */ + #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* + * When this bit is '1', it indicates that the packet completed + * was transmitted using the push acceleration data provided + * by the driver. When this bit is '0', it indicates that the + * packet had not push acceleration data written or was executed + * as a normal packet even though push data was provided. + */ + #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) + /* unused1 is 16 b */ + uint16_t unused_0; + /* + * This is a copy of the opaque field from the first TX BD of this + * transmitted packet. + */ + uint32_t opaque; + uint16_t errors_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define TX_CMPL_V UINT32_C(0x1) + #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define TX_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem + * with the BDs for the packet. + */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No error */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) + /* + * Bad Format: + * BDs were not formatted correctly. + */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) + #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ + TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT + /* + * When this bit is '1', it indicates that the length of + * the packet was zero. No packet was transmitted. + */ + #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) + /* + * When this bit is '1', it indicates that the packet + * was longer than the programmed limit in TDI. No + * packet was transmitted. + */ + #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) + /* + * When this bit is '1', it indicates that one or more of the + * BDs associated with this packet generated a PCI error. + * This probably means the address was not valid. + */ + #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) + /* + * When this bit is '1', it indicates that the packet was longer + * than indicated by the hint. No packet was transmitted. + */ + #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) + /* + * When this bit is '1', it indicates that the packet was + * dropped due to Poison TLP error on one or more of the + * TLPs in the PXP completion. + */ + #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) + /* unused2 is 16 b */ + uint16_t unused_1; + /* unused3 is 32 b */ + uint32_t unused_2; +} __rte_packed; + +/* rx_pkt_cmpl (size:128b/16B) */ +struct rx_pkt_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_PKT_CMPL_TYPE_SFT 0 + /* + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B + */ + #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2 + #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PKT_CMPL_FLAGS_SFT 6 + /* + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. + */ + #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Normal: + * Packet was placed using normal algorithm. + */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) + /* + * Jumbo: + * Packet was placed using jumbo algorithm. + */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ + RX_PKT_CMPL_FLAGS_PLACEMENT_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) + /* + * This value indicates what the inner packet determined for the + * packet was. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 + /* + * Not Known: + * Indicates that the packet type was not known. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \ + (UINT32_C(0x0) << 12) + /* + * IP Packet: + * Indicates that the packet was an IP packet, but further + * classification was not possible. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_IP \ + (UINT32_C(0x1) << 12) + /* + * TCP Packet: + * Indicates that the packet was IP and TCP. + * This indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) + /* + * UDP Packet: + * Indicates that the packet was IP and UDP. + * This indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \ + (UINT32_C(0x3) << 12) + /* + * FCoE Packet: + * Indicates that the packet was recognized as a FCoE. + * This also indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \ + (UINT32_C(0x4) << 12) + /* + * RoCE Packet: + * Indicates that the packet was recognized as a RoCE. + * This also indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \ + (UINT32_C(0x5) << 12) + /* + * ICMP Packet: + * Indicates that the packet was recognized as ICMP. + * This indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \ + (UINT32_C(0x7) << 12) + /* + * PtP packet wo/timestamp: + * Indicates that the packet was recognized as a PtP + * packet. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \ + (UINT32_C(0x8) << 12) + /* + * PtP packet w/timestamp: + * Indicates that the packet was recognized as a PtP + * packet and that a timestamp was taken for the packet. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \ + (UINT32_C(0x9) << 12) + #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP + /* + * This is the length of the data for the packet stored in the + * buffer(s) identified by the opaque value. This includes + * the packet BD and any associated buffer BDs. This does not include + * the length of any data places in aggregation BDs. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + uint8_t agg_bufs_v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_PKT_CMPL_V1 UINT32_C(0x1) + /* + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided for the packet in the RX ring. + */ + #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) + #define RX_PKT_CMPL_AGG_BUFS_SFT 1 + /* unused1 is 2 b */ + #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0) + #define RX_PKT_CMPL_UNUSED1_SFT 6 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; + /* + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates that header is 256B into the packet. + */ + uint8_t payload_offset; + /* unused2 is 8 b */ + uint8_t unused1; + /* + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. + */ + uint32_t rss_hash; +} __rte_packed; + +/* Last 16 bytes of rx_pkt_cmpl. */ +/* rx_pkt_cmpl_hi (size:128b/16B) */ +struct rx_pkt_cmpl_hi { + uint32_t flags2; + /* + * This indicates that the ip checksum was calculated for the + * inner packet and that the ip_cs_error field indicates if there + * was an error. + */ + #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + /* + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the l4_cs_error field + * indicates if there was an error. + */ + #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + /* + * This indicates that the ip checksum was calculated for the + * tunnel header and that the t_ip_cs_error field indicates if there + * was an error. + */ + #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + /* + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the t_l4_cs_error field + * indicates if there was an error. + */ + #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata information. Value is zero. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE \ + (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN \ + (UINT32_C(0x1) << 4) + /* + * If ext_meta_format is equal to 1, the metadata field + * contains the lower 16b of the tunnel ID value, justified + * to LSB + * - VXLAN = VNI[23:0] -> VXLAN Network ID + * - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier. + * - NVGRE = TNI[23:0] -> Tenant Network ID + * - GRE = KEY[31:0] -> key field with bit mask. zero if K = 0 + * - IPV4 = 0 (not populated) + * - IPV6 = Flow Label[19:0] + * - PPPoE = sessionID[15:0] + * - MPLs = Outer label[19:0] + * - UPAR = Selected[31:0] with bit mask + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_TUNNEL_ID \ + (UINT32_C(0x2) << 4) + /* + * if ext_meta_format is equal to 1, metadata field contains + * 16b metadata from the prepended header (chdr_data). + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_CHDR_DATA \ + (UINT32_C(0x3) << 4) + /* + * If ext_meta_format is equal to 1, the metadata field contains + * the outer_l3_offset, inner_l2_offset, inner_l3_offset and + * inner_l4_size. + * - metadata[8:0] contains the outer_l3_offset. + * - metadata[17:9] contains the inner_l2_offset. + * - metadata[26:18] contains the inner_l3_offset. + * - metadata[31:27] contains the inner_l4_size. + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_HDR_OFFSET \ + (UINT32_C(0x4) << 4) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_HDR_OFFSET + /* + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. + * This value is only valid if itype indicates a packet + * with an IP header. + */ + #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) + /* + * This indicates that the complete 1's complement checksum was + * calculated for the packet. + */ + #define RX_PKT_CMPL_FLAGS2_COMPLETE_CHECKSUM_CALC UINT32_C(0x200) + /* + * The combination of this value and meta_format indicated what + * format the metadata field is. + */ + #define RX_PKT_CMPL_FLAGS2_EXT_META_FORMAT_MASK UINT32_C(0xc00) + #define RX_PKT_CMPL_FLAGS2_EXT_META_FORMAT_SFT 10 + /* + * This value is the complete 1's complement checksum calculated from + * the start of the outer L3 header to the end of the packet (not + * including the ethernet crc). It is valid when the + * 'complete_checksum_calc' flag is set. + */ + #define RX_PKT_CMPL_FLAGS2_COMPLETE_CHECKSUM_MASK \ + UINT32_C(0xffff0000) + #define RX_PKT_CMPL_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + /* + * This is data from the CFA block as indicated by the meta_format + * field. + */ + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_PKT_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_PKT_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_PKT_CMPL_METADATA_TPID_SFT 16 + uint16_t errors_v2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_PKT_CMPL_V2 \ + UINT32_C(0x1) + #define RX_PKT_CMPL_ERRORS_MASK \ + UINT32_C(0xfffe) + #define RX_PKT_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \ + UINT32_C(0xe) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) + /* + * Did Not Fit: + * Packet did not fit into packet buffer provided. + * For regular placement, this means the packet did not fit + * in the buffer provided. For HDS and jumbo placement, this + * means that the packet could not be placed into 7 physical + * buffers or less. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) + /* + * Not On Chip: + * All BDs needed for the packet were not on-chip when + * the packet arrived. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) + /* + * Bad Format: + * BDs were not formatted correctly. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + /* + * Flush: + * There was a bad_format error on the previous operation + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_FLUSH \ + (UINT32_C(0x5) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + /* + * This indicates that there was an error in the IP header + * checksum. + */ + #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \ + UINT32_C(0x10) + /* + * This indicates that there was an error in the TCP, UDP + * or ICMP checksum. + */ + #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \ + UINT32_C(0x20) + /* + * This indicates that there was an error in the tunnel + * IP header checksum. + */ + #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \ + UINT32_C(0x40) + /* + * This indicates that there was an error in the tunnel + * UDP checksum. + */ + #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \ + UINT32_C(0x80) + /* + * This indicates that there was a CRC error on either an FCoE + * or RoCE packet. The itype indicates the packet type. + */ + #define RX_PKT_CMPL_ERRORS_CRC_ERROR \ + UINT32_C(0x100) + /* + * This indicates that there was an error in the tunnel + * portion of the packet when this + * field is non-zero. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \ + UINT32_C(0xe00) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 + /* + * No additional error occurred on the tunnel portion + * or the packet of the packet does not have a tunnel. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 9) + /* + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 + * in the tunnel header. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ + (UINT32_C(0x1) << 9) + /* + * Indicates that header length is out of range in the + * tunnel header. Valid for + * IPv4. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 9) + /* + * Indicates that the physical packet is shorter than that + * claimed by the PPPoE header length for a tunnel PPPoE + * packet. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ + (UINT32_C(0x3) << 9) + /* + * Indicates that physical packet is shorter than that claimed + * by the tunnel l3 header length. Valid for IPv4, or IPv6 + * tunnel packet packets. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 9) + /* + * Indicates that the physical packet is shorter than that + * claimed by the tunnel UDP header length for a tunnel + * UDP packet that is not fragmented. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 9) + /* + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0) in the tunnel header. Valid + * for IPv4, and IPv6. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ + (UINT32_C(0x6) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + /* + * This indicates that there was an error in the inner + * portion of the packet when this + * field is non-zero. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \ + UINT32_C(0xf000) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 + /* + * No additional error occurred on the tunnel portion + * or the packet of the packet does not have a tunnel. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 12) + /* + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 or that + * option other than VFT was parsed on + * FCoE packet. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ + (UINT32_C(0x1) << 12) + /* + * indicates that header length is out of range. Valid for + * IPv4 and RoCE + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 12) + /* + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6 + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \ + (UINT32_C(0x3) << 12) + /* + * Indicates that physical packet is shorter than that + * claimed by the l3 header length. Valid for IPv4, + * IPv6 packet or RoCE packets. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 12) + /* + * Indicates that the physical packet is shorter than that + * claimed by the UDP header length for a UDP packet that is + * not fragmented. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 12) + /* + * Indicates that TCP header length > IP payload. Valid for + * TCP packets only. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ + (UINT32_C(0x6) << 12) + /* Indicates that TCP header length < 5. Valid for TCP. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ + (UINT32_C(0x7) << 12) + /* + * Indicates that TCP option headers result in a TCP header + * size that does not match data offset in TCP header. Valid + * for TCP. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + (UINT32_C(0x8) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + /* + * This field identifies the CFA action rule that was used for this + * packet. + */ + uint16_t cfa_code; + uint32_t reorder; + /* + * This value holds the reordering sequence number for the packet. + * If the reordering sequence is not valid, then this value is zero. + * The reordering domain for the packet is in the bottom 8 to 10b of + * the rss_hash value. The bottom 20b of this value contain the + * ordering domain value for the packet. + */ + #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) + #define RX_PKT_CMPL_REORDER_SFT 0 +} __rte_packed; + +/* + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is 0. + */ +/* rx_tpa_start_cmpl (size:128b/16B) */ +struct rx_tpa_start_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_START_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B + */ + #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) + #define RX_TPA_START_CMPL_TYPE_LAST \ + RX_TPA_START_CMPL_TYPE_RX_TPA_START + #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_START_CMPL_FLAGS_SFT 6 + /* This bit will always be '0' for TPA start completions. */ + #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) + /* + * This value indicates what the inner packet determined for the + * packet was. + */ + #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 + /* + * TCP Packet: + * Indicates that the packet was IP and TCP. + */ + #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ + RX_TPA_START_CMPL_FLAGS_ITYPE_TCP + /* + * This value indicates the amount of packet data written to the + * buffer the opaque field in this completion corresponds to. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint8_t v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) + #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + uint16_t agg_id; + /* unused2 is 9 b */ + #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_UNUSED2_SFT 0 + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) + #define RX_TPA_START_CMPL_AGG_ID_SFT 9 + /* + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. + */ + uint32_t rss_hash; +} __rte_packed; + +/* + * Last 16 bytes of rx_tpa_start_cmpl. + * + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is 0. + */ +/* rx_tpa_start_cmpl_hi (size:128b/16B) */ +struct rx_tpa_start_cmpl_hi { + uint32_t flags2; + /* + * This indicates that the ip checksum was calculated for the + * inner packet and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + /* + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the sum passed + * for all segments included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + /* + * This indicates that the ip checksum was calculated for the + * tunnel header and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + /* + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the sum passed for + * all segments included in the aggregation. + */ + #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata information. Value is zero. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \ + (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \ + (UINT32_C(0x1) << 4) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN + /* + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. + */ + #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) + /* + * This is data from the CFA block as indicated by the meta_format + * field. + */ + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 + uint16_t v2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) + /* + * This field identifies the CFA action rule that was used for this + * packet. + */ + uint16_t cfa_code; + /* + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. + */ + uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; + /* + * This is the offset from the beginning of the packet in bytes for + * the outer L3 header. If there is no outer L3 header, then this + * value is zero. + */ + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 + /* + * This is the offset from the beginning of the packet in bytes for + * the inner most L2 header. + */ + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 + /* + * This is the offset from the beginning of the packet in bytes for + * the inner most L3 header. + */ + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 + /* + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. + */ + #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 +} __rte_packed; + +/* + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is 0. + */ +/* rx_tpa_end_cmpl (size:128b/16B) */ +struct rx_tpa_end_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_END_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B + */ + #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + #define RX_TPA_END_CMPL_TYPE_LAST \ + RX_TPA_END_CMPL_TYPE_RX_TPA_END + #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_END_CMPL_FLAGS_SFT 6 + /* + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. + */ + #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* unused is 2 b */ + #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) + #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 + /* + * This value indicates what the inner packet determined for the + * packet was. + * - 2 TCP Packet + * Indicates that the packet was IP and TCP. This indicates + * that the ip_cs field is valid and that the tcp_udp_cs + * field is valid and contains the TCP checksum. + * This also indicates that the payload_offset field is valid. + */ + #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 + /* + * This value is zero for TPA End completions. + * There is no data in the buffer that corresponds to the opaque + * value in this completion. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint8_t agg_bufs_v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) + /* + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this aggregation + * packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided in the aggregation start completion. + */ + #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) + #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 + /* This value is the number of segments in the TPA operation. */ + uint8_t tpa_segs; + /* + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates an offset of 256 bytes. + */ + uint8_t payload_offset; + uint8_t agg_id; + /* unused2 is 1 b */ + #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1) + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) + #define RX_TPA_END_CMPL_AGG_ID_SFT 1 + /* + * For non-GRO packets, this value is the + * timestamp delta between earliest and latest timestamp values for + * TPA packet. If packets were not time stamped, then delta will be + * zero. + * + * For GRO packets, this field is zero except for the following + * sub-fields. + * - tsdelta[31] + * Timestamp present indication. When '0', no Timestamp + * option is in the packet. When '1', then a Timestamp + * option is present in the packet. + */ + uint32_t tsdelta; +} __rte_packed; + +/* + * Last 16 bytes of rx_tpa_end_cmpl. + * + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is 0. + */ +/* rx_tpa_end_cmpl_hi (size:128b/16B) */ +struct rx_tpa_end_cmpl_hi { + uint32_t tpa_dup_acks; + /* + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. + */ + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 + /* + * This value is the valid when TPA completion is active. It + * indicates the length of the longest segment of the TPA operation + * for LRO mode and the length of the first segment in GRO mode. + * + * This value may be used by GRO software to re-construct the original + * packet stream from the TPA packet. This is the length of all + * but the last segment for GRO. In LRO mode this value may be used + * to indicate MSS size to the stack. + */ + uint16_t tpa_seg_len; + /* unused4 is 16 b */ + uint16_t unused3; + uint16_t errors_v2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define RX_TPA_END_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* + * This error occurs when there is a fatal HW problem in + * the chip only. It indicates that there were not + * BDs on chip but that there was adequate reservation. + * provided by the TPA block. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) + /* + * This error occurs when TPA block was not configured to + * reserve adequate BDs for TPA operations on this RX + * ring. All data for the TPA operation was not placed. + * + * This error can also be generated when the number of + * segments is not programmed correctly in TPA and the + * 33 total aggregation buffers allowed for the TPA + * operation has been exceeded. + */ + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ + (UINT32_C(0x4) << 1) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR + /* unused5 is 16 b */ + uint16_t unused_4; + /* + * This is the opaque value that was completed for the TPA start + * completion that corresponds to this TPA end completion. + */ + uint32_t start_opaque; +} __rte_packed; + +/* + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is greater than 0. + */ +/* rx_tpa_v2_start_cmpl (size:128b/16B) */ +struct rx_tpa_v2_start_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_V2_START_CMPL_TYPE_MASK \ + UINT32_C(0x3f) + #define RX_TPA_V2_START_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B + */ + #define RX_TPA_V2_START_CMPL_TYPE_RX_TPA_START \ + UINT32_C(0x13) + #define RX_TPA_V2_START_CMPL_TYPE_LAST \ + RX_TPA_V2_START_CMPL_TYPE_RX_TPA_START + #define RX_TPA_V2_START_CMPL_FLAGS_MASK \ + UINT32_C(0xffc0) + #define RX_TPA_V2_START_CMPL_FLAGS_SFT 6 + /* This bit will always be '0' for TPA start completions. */ + #define RX_TPA_V2_START_CMPL_FLAGS_ERROR \ + UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_MASK \ + UINT32_C(0x380) + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_V2_START_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_TPA_V2_START_CMPL_FLAGS_RSS_VALID \ + UINT32_C(0x400) + /* + * For devices that support timestamps, when this bit is cleared the + * `inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset` + * field contains the 32b timestamp for + * the packet from the MAC. When this bit is set, the + * `inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset` + * field contains the outer_l3_offset, inner_l2_offset, + * inner_l3_offset, and inner_l4_size. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_TIMESTAMP_FLD_FORMAT \ + UINT32_C(0x800) + /* + * This value indicates what the inner packet determined for the + * packet was. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_ITYPE_MASK \ + UINT32_C(0xf000) + #define RX_TPA_V2_START_CMPL_FLAGS_ITYPE_SFT 12 + /* + * TCP Packet: + * Indicates that the packet was IP and TCP. + */ + #define RX_TPA_V2_START_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) + #define RX_TPA_V2_START_CMPL_FLAGS_ITYPE_LAST \ + RX_TPA_V2_START_CMPL_FLAGS_ITYPE_TCP + /* + * This value indicates the amount of packet data written to the + * buffer the opaque field in this completion corresponds to. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint8_t v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_V2_START_CMPL_V1 UINT32_C(0x1) + #define RX_TPA_V2_START_CMPL_LAST RX_TPA_V2_START_CMPL_V1 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + uint16_t agg_id; + /* + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. + */ + uint32_t rss_hash; +} __rte_packed; + +/* + * Last 16 bytes of rx_tpa_v2_start_cmpl. + * + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is greater than 0. + */ +/* rx_tpa_v2_start_cmpl_hi (size:128b/16B) */ +struct rx_tpa_v2_start_cmpl_hi { + uint32_t flags2; + /* + * This indicates that the ip checksum was calculated for the + * inner packet and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_IP_CS_CALC \ + UINT32_C(0x1) + /* + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the sum passed + * for all segments included in the aggregation. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_L4_CS_CALC \ + UINT32_C(0x2) + /* + * This indicates that the ip checksum was calculated for the + * tunnel header and that the sum passed for all segments + * included in the aggregation. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_T_IP_CS_CALC \ + UINT32_C(0x4) + /* + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the sum passed for + * all segments included in the aggregation. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_T_L4_CS_CALC \ + UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_MASK \ + UINT32_C(0xf0) + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_NONE \ + (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_VLAN \ + (UINT32_C(0x1) << 4) + /* + * If ext_meta_format is equal to 1, the metadata field + * contains the lower 16b of the tunnel ID value, justified + * to LSB + * - VXLAN = VNI[23:0] -> VXLAN Network ID + * - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier. + * - NVGRE = TNI[23:0] -> Tenant Network ID + * - GRE = KEY[31:0 -> key fieled with bit mask. zero if K = 0 + * - IPV4 = 0 (not populated) + * - IPV6 = Flow Label[19:0] + * - PPPoE = sessionID[15:0] + * - MPLs = Outer label[19:0] + * - UPAR = Selected[31:0] with bit mask + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_TUNNEL_ID \ + (UINT32_C(0x2) << 4) + /* + * if ext_meta_format is equal to 1, metadata field contains + * 16b metadata from the prepended header (chdr_data). + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_CHDR_DATA \ + (UINT32_C(0x3) << 4) + /* + * If ext_meta_format is equal to 1, the metadata field contains + * the outer_l3_offset, inner_l2_offset, inner_l3_offset and + * inner_l4_size. + * - metadata[8:0] contains the outer_l3_offset. + * - metadata[17:9] contains the inner_l2_offset. + * - metadata[26:18] contains the inner_l3_offset. + * - metadata[31:27] contains the inner_l4_size. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_HDR_OFFSET \ + (UINT32_C(0x4) << 4) + #define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_HDR_OFFSET + /* + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_IP_TYPE \ + UINT32_C(0x100) + /* + * This indicates that the complete 1's complement checksum was + * calculated for the packet. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_COMPLETE_CHECKSUM_CALC \ + UINT32_C(0x200) + /* + * The combination of this value and meta_format indicated what + * format the metadata field is. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_EXT_META_FORMAT_MASK \ + UINT32_C(0xc00) + #define RX_TPA_V2_START_CMPL_FLAGS2_EXT_META_FORMAT_SFT 10 + /* + * This value is the complete 1's complement checksum calculated from + * the start of the outer L3 header to the end of the packet (not + * including the ethernet crc). It is valid when the + * 'complete_checksum_calc' flag is set. For TPA Start completions, + * the complete checksum is calculated for the first packet in the + * aggregation only. + */ + #define RX_TPA_V2_START_CMPL_FLAGS2_COMPLETE_CHECKSUM_MASK \ + UINT32_C(0xffff0000) + #define RX_TPA_V2_START_CMPL_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + /* + * This is data from the CFA block as indicated by the meta_format + * field. + */ + uint32_t metadata; + /* When {ext_meta_format,meta_format}=1, this value is the VLAN VID. */ + #define RX_TPA_V2_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_TPA_V2_START_CMPL_METADATA_VID_SFT 0 + /* When {ext_meta_format,meta_format}=1, this value is the VLAN DE. */ + #define RX_TPA_V2_START_CMPL_METADATA_DE UINT32_C(0x1000) + /* When {ext_meta_format,meta_format}=1, this value is the VLAN PRI. */ + #define RX_TPA_V2_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_TPA_V2_START_CMPL_METADATA_PRI_SFT 13 + /* When {ext_meta_format,meta_format}=1, this value is the VLAN TPID. */ + #define RX_TPA_V2_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_TPA_V2_START_CMPL_METADATA_TPID_SFT 16 + uint16_t errors_v2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_V2_START_CMPL_V2 \ + UINT32_C(0x1) + #define RX_TPA_V2_START_CMPL_ERRORS_MASK \ + UINT32_C(0xfffe) + #define RX_TPA_V2_START_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. + */ + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_MASK \ + UINT32_C(0xe) + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) + /* + * Bad Format: + * BDs were not formatted correctly. + */ + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + /* + * Flush: + * There was a bad_format error on the previous operation + */ + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_FLUSH \ + (UINT32_C(0x5) << 1) + #define RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_V2_START_CMPL_ERRORS_BUFFER_ERROR_FLUSH + /* + * This field identifies the CFA action rule that was used for this + * packet. + */ + uint16_t cfa_code; + /* + * For devices that support timestamps this field is overridden + * with the timestamp value. When `flags.timestamp_fld_format` is + * cleared, this field contains the 32b timestamp for the packet from the + * MAC. + * + * When `flags.timestamp_fld_format` is set, this field contains the + * outer_l3_offset, inner_l2_offset, inner_l3_offset, and inner_l4_size + * as defined below. + */ + uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; + /* + * This is the offset from the beginning of the packet in bytes for + * the outer L3 header. If there is no outer L3 header, then this + * value is zero. + */ + #define RX_TPA_V2_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) + #define RX_TPA_V2_START_CMPL_OUTER_L3_OFFSET_SFT 0 + /* + * This is the offset from the beginning of the packet in bytes for + * the inner most L2 header. + */ + #define RX_TPA_V2_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) + #define RX_TPA_V2_START_CMPL_INNER_L2_OFFSET_SFT 9 + /* + * This is the offset from the beginning of the packet in bytes for + * the inner most L3 header. + */ + #define RX_TPA_V2_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) + #define RX_TPA_V2_START_CMPL_INNER_L3_OFFSET_SFT 18 + /* + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. + */ + #define RX_TPA_V2_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) + #define RX_TPA_V2_START_CMPL_INNER_L4_SIZE_SFT 27 +} __rte_packed; + +/* + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is greater than 0. + */ +/* rx_tpa_v2_end_cmpl (size:128b/16B) */ +struct rx_tpa_v2_end_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_V2_END_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_V2_END_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B + */ + #define RX_TPA_V2_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + #define RX_TPA_V2_END_CMPL_TYPE_LAST \ + RX_TPA_V2_END_CMPL_TYPE_RX_TPA_END + #define RX_TPA_V2_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_V2_END_CMPL_FLAGS_SFT 6 + /* + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) + /* + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_V2_END_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* unused is 2 b */ + #define RX_TPA_V2_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) + #define RX_TPA_V2_END_CMPL_FLAGS_UNUSED_SFT 10 + /* + * This value indicates what the inner packet determined for the + * packet was. + * - 2 TCP Packet + * Indicates that the packet was IP and TCP. This indicates + * that the ip_cs field is valid and that the tcp_udp_cs + * field is valid and contains the TCP checksum. + * This also indicates that the payload_offset field is valid. + */ + #define RX_TPA_V2_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_V2_END_CMPL_FLAGS_ITYPE_SFT 12 + /* + * This value is zero for TPA End completions. + * There is no data in the buffer that corresponds to the opaque + * value in this completion. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + uint8_t v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_V2_END_CMPL_V1 UINT32_C(0x1) + /* This value is the number of segments in the TPA operation. */ + uint8_t tpa_segs; + /* + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. + */ + uint16_t agg_id; + /* + * For non-GRO packets, this value is the + * timestamp delta between earliest and latest timestamp values for + * TPA packet. If packets were not time stamped, then delta will be + * zero. + * + * For GRO packets, this field is zero except for the following + * sub-fields. + * - tsdelta[31] + * Timestamp present indication. When '0', no Timestamp + * option is in the packet. When '1', then a Timestamp + * option is present in the packet. + */ + uint32_t tsdelta; +} __rte_packed; + +/* + * Last 16 bytes of rx_tpa_v2_end_cmpl. + * + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is greater than 0. + */ +/* rx_tpa_v2_end_cmpl_hi (size:128b/16B) */ +struct rx_tpa_v2_end_cmpl_hi { + /* + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. + */ + uint16_t tpa_dup_acks; + /* + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. + */ + #define RX_TPA_V2_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) + #define RX_TPA_V2_END_CMPL_TPA_DUP_ACKS_SFT 0 + /* + * This value indicated the offset in bytes from the beginning of + * the packet where the inner payload starts. This value is valid + * for TCP, UDP, FCoE and RoCE packets + */ + uint8_t payload_offset; + /* + * The value is the total number of aggregation buffers that were + * used in the TPA operation. All TPA aggregation buffer completions + * precede the TPA End completion. If the value is zero, then the + * aggregation is completely contained in the buffer space provided + * in the aggregation start completion. + * Note that the field is simply provided as a cross check. + */ + uint8_t tpa_agg_bufs; + /* + * This value is the valid when TPA completion is active. It + * indicates the length of the longest segment of the TPA operation + * for LRO mode and the length of the first segment in GRO mode. + * + * This value may be used by GRO software to re-construct the original + * packet stream from the TPA packet. This is the length of all + * but the last segment for GRO. In LRO mode this value may be used + * to indicate MSS size to the stack. + */ + uint16_t tpa_seg_len; + uint16_t unused_1; + uint16_t errors_v2; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_V2_END_CMPL_V2 UINT32_C(0x1) + #define RX_TPA_V2_END_CMPL_ERRORS_MASK \ + UINT32_C(0xfffe) + #define RX_TPA_V2_END_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. + */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_MASK \ + UINT32_C(0xe) + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) + /* + * This error occurs when there is a fatal HW problem in + * the chip only. It indicates that there were not + * BDs on chip but that there was adequate reservation. + * provided by the TPA block. + */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) + /* + * Bad Format: + * BDs were not formatted correctly. + */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + /* + * This error occurs when TPA block was not configured to + * reserve adequate BDs for TPA operations on this RX + * ring. All data for the TPA operation was not placed. + * + * This error can also be generated when the number of + * segments is not programmed correctly in TPA and the + * 33 total aggregation buffers allowed for the TPA + * operation has been exceeded. + */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ + (UINT32_C(0x4) << 1) + /* + * Flush: + * There was a bad_format error on the previous operation + */ + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_FLUSH \ + (UINT32_C(0x5) << 1) + #define RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_V2_END_CMPL_ERRORS_BUFFER_ERROR_FLUSH + uint16_t unused_2; + /* + * This is the opaque value that was completed for the TPA start + * completion that corresponds to this TPA end completion. + */ + uint32_t start_opaque; +} __rte_packed; + +/* + * This TPA completion structure is used on devices where the + * `hwrm_vnic_qcaps.max_aggs_supported` value is greater than 0. + */ +/* rx_tpa_v2_abuf_cmpl (size:128b/16B) */ +struct rx_tpa_v2_abuf_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_V2_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_V2_ABUF_CMPL_TYPE_SFT 0 + /* + * RX TPA Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA packet completion. Length = 16B + */ + #define RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG UINT32_C(0x16) + #define RX_TPA_V2_ABUF_CMPL_TYPE_LAST \ + RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG + /* + * This is the length of the data for the packet stored in this + * aggregation buffer identified by the opaque value. This does not + * include the length of any + * data placed in other aggregation BDs or in the packet or buffer + * BDs. This length does not include any space added due to + * hdr_offset register during HDS placement mode. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this aggregation + * buffer corresponds to. + */ + uint32_t opaque; + uint16_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_V2_ABUF_CMPL_V UINT32_C(0x1) + /* + * This is the aggregation ID that the completion is associated with. Use + * this number to correlate the TPA agg completion with the TPA start + * completion and the TPA end completion. + */ + uint16_t agg_id; + uint32_t unused_1; +} __rte_packed; + +/* rx_abuf_cmpl (size:128b/16B) */ +struct rx_abuf_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_ABUF_CMPL_TYPE_SFT 0 + /* + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B + */ + #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12) + #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG + /* + * This is the length of the data for the packet stored in this + * aggregation buffer identified by the opaque value. This does not + * include the length of any + * data placed in other aggregation BDs or in the packet or buffer + * BDs. This length does not include any space added due to + * hdr_offset register during HDS placement mode. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this aggregation + * buffer corresponds to. + */ + uint32_t opaque; + uint32_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_ABUF_CMPL_V UINT32_C(0x1) + /* unused3 is 32 b */ + uint32_t unused_2; +} __rte_packed; + +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define EJECT_CMPL_TYPE_SFT 0 + /* + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B + */ + #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a) + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define EJECT_CMPL_FLAGS_SFT 6 + /* + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. + */ + #define EJECT_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* + * This is the length of the statistics data stored in this + * buffer. + */ + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this ejection + * buffer corresponds to. + */ + uint32_t opaque; + uint16_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define EJECT_CMPL_V UINT32_C(0x1) + #define EJECT_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define EJECT_CMPL_ERRORS_SFT 1 + /* + * This error indicates that there was some sort of problem with + * the BDs for statistics ejection. The statistics ejection should + * be treated as invalid + */ + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) + /* + * Did Not Fit: + * Statistics did not fit into aggregation buffer provided. + */ + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) + /* + * Bad Format: + * BDs were not formatted correctly. + */ + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + /* + * Flush: + * There was a bad_format error on the previous operation + */ + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH \ + (UINT32_C(0x5) << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + /* reserved16 is 16 b */ + uint16_t reserved16; + /* unused3 is 32 b */ + uint32_t unused_2; +} __rte_packed; + +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_CMPL_TYPE_SFT 0 + /* + * HWRM Command Completion: + * Completion of an HWRM command. + */ + #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20) + #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE + /* This is the sequence_id of the HWRM command that has completed. */ + uint16_t sequence_id; + /* unused2 is 32 b */ + uint32_t unused_1; + uint32_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_CMPL_V UINT32_C(0x1) + /* unused4 is 32 b */ + uint32_t unused_3; +} __rte_packed; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + uint16_t req_len_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Request */ + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + #define HWRM_FWD_REQ_CMPL_TYPE_LAST \ + HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + /* Length of forwarded request in bytes. */ + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 + /* + * Source ID of this request. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t source_id; + /* unused1 is 32 b */ + uint32_t unused0; + /* Address of forwarded request. */ + uint32_t req_buf_addr_v[2]; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +} __rte_packed; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Response */ + #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + #define HWRM_FWD_RESP_CMPL_TYPE_LAST \ + HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + /* + * Source ID of this response. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t source_id; + /* Length of forwarded response in bytes. */ + uint16_t resp_len; + /* unused2 is 16 b */ + uint16_t unused_1; + /* Address of forwarded request. */ + uint32_t resp_buf_addr_v[2]; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +} __rte_packed; + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \ + UINT32_C(0x1) + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + UINT32_C(0x4) + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + /* Reset notification to clients */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY \ + UINT32_C(0x8) + /* Master function selection event */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY \ + UINT32_C(0x9) + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \ + UINT32_C(0x30) + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + /* Default VNIC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \ + UINT32_C(0x35) + /* HW flow aged */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED \ + UINT32_C(0x36) + /* + * A debug notification being posted to the driver. These + * notifications are purely for diagnostic purpose and should not be + * used for functional purpose. The driver is not supposed to act + * on these messages except to log/record it. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION \ + UINT32_C(0x37) + /* + * An EEM flow cached memory flush for all flows request event being + * posted to the PF driver. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ \ + UINT32_C(0x38) + /* + * An EEM flow cache memory flush completion event being posted to the + * firmware by the PF driver. This is indication that host EEM flush + * has completed by the PF. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE \ + UINT32_C(0x39) + /* + * A tcp flag action change event being posted to the PF or trusted VF + * driver by the firmware. The PF or trusted VF driver should query + * the firmware for the new TCP flag action update after receiving + * this async event. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE \ + UINT32_C(0x3a) + /* + * An EEM flow active event being posted to the PF or trusted VF driver + * by the firmware. The PF or trusted VF driver should update the + * flow's aging timer after receiving this async event. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE \ + UINT32_C(0x3b) + /* + * A eem cfg change event being posted to the trusted VF driver by the + * firmware if the parent PF EEM configuration changed. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE \ + UINT32_C(0x3c) + /* + * Deprecated. + * TFLIB unique default VNIC Configuration Change + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE \ + UINT32_C(0x3d) + /* + * Deprecated. + * TFLIB unique link status changed + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE \ + UINT32_C(0x3e) + /* + * An event signifying completion for HWRM_FW_STATE_QUIESCE + * (completion, timeout, or error) + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE \ + UINT32_C(0x3f) + /* + * An event signifying a HWRM command is in progress and its + * response will be deferred. This event is used on crypto controllers + * only. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE \ + UINT32_C(0x40) + /* + * An event signifying that a PFC WatchDog configuration + * has changed on any port / cos. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE \ + UINT32_C(0x41) + /* + * A trace log message. This contains firmware trace logs string + * embedded in the asynchronous message. This is an experimental + * event, not meant for production use at this time. + */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG \ + UINT32_C(0xfe) + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; +} __rte_packed; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates link status change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \ + UINT32_C(0x1) + /* + * If this bit set to 0, then it indicates that the link + * was up and it went down. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \ + UINT32_C(0x0) + /* + * If this bit is set to 1, then it indicates that the link + * was down and it went up. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + /* Indicates the physical port this link status change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0xe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \ + 1 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 4 + /* Indicates the physical function this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff00000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 20 +} __rte_packed; + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* The new MTU of the link in bytes. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * When this bit is '1', the link was forced to the + * force_link_speed value. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \ + UINT32_C(0x1) + /* The new link speed in 100 Mbps units. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \ + UINT32_C(0xfffe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \ + 1 + /* 100Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \ + (UINT32_C(0x1) << 1) + /* 1Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \ + (UINT32_C(0xa) << 1) + /* 2Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \ + (UINT32_C(0x14) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \ + (UINT32_C(0x19) << 1) + /* 10Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \ + (UINT32_C(0x64) << 1) + /* 20Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \ + (UINT32_C(0xc8) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \ + (UINT32_C(0xfa) << 1) + /* 40Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \ + (UINT32_C(0x190) << 1) + /* 50Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \ + (UINT32_C(0x1f4) << 1) + /* 100Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \ + (UINT32_C(0x3e8) << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 16 +} __rte_packed; + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + /* Event specific data */ + uint32_t event_data2; + /* ETS configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \ + UINT32_C(0x1) + /* PFC configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \ + UINT32_C(0x2) + /* APP configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \ + UINT32_C(0x4) + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* Priority recommended for RoCE traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \ + 16 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \ + (UINT32_C(0xff) << 16) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + /* Priority recommended for L2 traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \ + UINT32_C(0xff000000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \ + 24 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \ + (UINT32_C(0xff) << 24) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +} __rte_packed; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + UINT32_C(0x4) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * This value indicates the current port level enforcement policy + * for the optics module when there is an optical module mismatch + * and port is not connected. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \ + 16 + /* No enforcement */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \ + (UINT32_C(0x0) << 16) + /* Disable Transmit side Laser. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \ + (UINT32_C(0x1) << 16) + /* Raise a warning message. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \ + (UINT32_C(0x2) << 16) + /* Power down the module. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \ + (UINT32_C(0x3) << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +} __rte_packed; + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the supported link speeds + * configuration on the port has changed. + * If set to 0, then there is no change in supported link speeds + * configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the link speed configuration + * on the port has become illegal or invalid. + * If set to 0, then the link speed configuration on the port is + * legal or valid. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \ + UINT32_C(0x20000) +} __rte_packed; + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the FEC + * configuration on the port has changed. + * If set to 0, then there is no change in FEC configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the EEE configuration + * on the port has changed. + * If set to 0, then there is no change in EEE configuration + * on the port. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \ + UINT32_C(0x20000) + /* + * If set to 1, it indicates that the pause configuration + * on the PHY has changed. + * If set to 0, then there is no change in the pause + * configuration on the PHY. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \ + UINT32_C(0x40000) +} __rte_packed; + +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notify clients of imminent reset. */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY \ + UINT32_C(0x8) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + /* + * 8-lsb timestamp (100-msec resolution) + * The Minimum time required for the Firmware readiness after sending this + * notification to the driver instances. + */ + uint8_t timestamp_lo; + /* + * 16-lsb timestamp (100-msec resolution) + * The Maximum Firmware Reset bail out value in the order of 100 + * milli seconds. The driver instances will use this value to re-initiate the + * registration process again if the core firmware didn’t set the ready + * state bit. + */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates driver action requested */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT \ + 0 + /* + * If set to 1, it indicates that the l2 client should + * stop sending in band traffic to Nitro. + * if set to 0, there is no change in L2 client behavior. + */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE \ + UINT32_C(0x1) + /* + * If set to 1, it indicates that the L2 client should + * bring down the interface. + * If set to 0, then there is no change in L2 client behavior. + */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + /* Indicates reason for reset. */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK \ + UINT32_C(0xff00) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT \ + 8 + /* A management client has requested reset. */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST \ + (UINT32_C(0x1) << 8) + /* A fatal firmware exception has occurred. */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL \ + (UINT32_C(0x2) << 8) + /* A non-fatal firmware exception has occurred. */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL \ + (UINT32_C(0x3) << 8) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST \ + HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL + /* + * Minimum time before driver should attempt access - units 100ms ticks. + * Range 0-65535 + */ + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK \ + UINT32_C(0xffff0000) + #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT \ + 16 +} __rte_packed; + +/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_recovery { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* + * This async notification message can be used for selecting or + * deselecting master function for error recovery, + * and to communicate to all the functions whether error recovery + * was enabled/disabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY \ + UINT32_C(0x9) + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1 + /* 8-lsb timestamp (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates driver action requested */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT \ + 0 + /* + * If set to 1, this function is selected as Master function. + * This function has responsibility to do 'chip reset' when it + * detects a fatal error. If set to 0, master function functionality + * is disabled on this function. + */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC \ + UINT32_C(0x1) + /* + * If set to 1, error recovery is enabled. + * If set to 0, error recovery is disabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED \ + UINT32_C(0x2) +} __rte_packed; + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +} __rte_packed; + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +} __rte_packed; + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + /* Indicates the physical function this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +} __rte_packed; + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * If this bit is set to 1, then it indicates that the PF-VF + * communication was lost and it is established. + * If this bit set to 0, then it indicates that the PF-VF + * communication was established and it is lost. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \ + UINT32_C(0x1) +} __rte_packed; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* + * Each flag provided in this field indicates a specific VF + * configuration change. At least one of these flags shall be set to 1 + * when an asynchronous event completion of this type is provided + * by the HWRM. + */ + uint32_t event_data1; + /* + * If this bit is set to 1, then the value of MTU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then the value of MRU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then the value of default MAC + * address was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then the value of default VLAN + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \ + UINT32_C(0x8) + /* + * If this bit is set to 1, then the value of trusted VF enable + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE \ + UINT32_C(0x10) +} __rte_packed; + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + /* Identifiers of events. */ + uint16_t event_id; + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates llfc pfc status change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \ + 0 + /* + * If this field set to 1, then it indicates that llfc is + * enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that pfc + * is enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + /* Indicates the physical port this llfc pfc change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x1c) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \ + 2 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0x1fffe0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 5 +} __rte_packed; + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \ + 6 + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of a default vnic allocation or free */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \ + UINT32_C(0x35) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates default vnic configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \ + 0 + /* + * If this field is set to 1, then it indicates that + * a default VNIC has been allocate. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that + * a default VNIC has been freed. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + /* Indicates the physical function this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0x3fc) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 2 + /* Indicates the virtual function this event occurred on */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0x3fffc00) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 10 +} __rte_packed; + +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of a hw flow aged */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED \ + UINT32_C(0x36) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates flow ID this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK \ + UINT32_C(0x7fffffff) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT \ + 0 + /* Indicates flow direction this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION \ + UINT32_C(0x80000000) + /* + * If this bit set to 0, then it indicates that the aged + * event was rx flow. + */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX \ + (UINT32_C(0x0) << 31) + /* + * If this bit is set to 1, then it indicates that the aged + * event was tx flow. + */ + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST \ + HWRM_ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +} __rte_packed; + +/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_req { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of a eem_cache_flush request */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ \ + UINT32_C(0x38) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; +} __rte_packed; + +/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_done { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* + * Notification of a host eem_cache_flush has completed. This event + * is generated by the host driver. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE \ + UINT32_C(0x39) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates function ID that this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT \ + 0 +} __rte_packed; + +/* hwrm_async_event_cmpl_tcp_flag_action_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_tcp_flag_action_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of tcp flag action change */ + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE \ + UINT32_C(0x3a) + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; +} __rte_packed; + +/* hwrm_async_event_cmpl_eem_flow_active (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_flow_active { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of an active eem flow */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE \ + UINT32_C(0x3b) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE + /* Event specific data */ + uint32_t event_data2; + /* Indicates the 2nd global id this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_SFT \ + 0 + /* + * Indicates flow direction of the flow identified by + * the global_id_2. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION \ + UINT32_C(0x40000000) + /* If this bit is set to 0, then it indicates that this rx flow. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_RX \ + (UINT32_C(0x0) << 30) + /* If this bit is set to 1, then it indicates that this tx flow. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX \ + (UINT32_C(0x1) << 30) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates the 1st global id this event occurred on. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_SFT \ + 0 + /* + * Indicates flow direction of the flow identified by the + * global_id_1. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION \ + UINT32_C(0x40000000) + /* If this bit is set to 0, then it indicates that this is rx flow. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_RX \ + (UINT32_C(0x0) << 30) + /* If this bit is set to 1, then it indicates that this is tx flow. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX \ + (UINT32_C(0x1) << 30) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX + /* + * Indicates EEM flow aging mode this event occurred on. If + * this bit is set to 0, the event_data1 is the EEM global + * ID. If this bit is set to 1, the event_data1 is the number + * of global ID in the context memory. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE \ + UINT32_C(0x80000000) + /* EEM flow aging mode 0. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_0 \ + (UINT32_C(0x0) << 31) + /* EEM flow aging mode 1. */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 \ + (UINT32_C(0x1) << 31) + #define HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 +} __rte_packed; + +/* hwrm_async_event_cmpl_eem_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of EEM configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE \ + UINT32_C(0x3c) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * Value of 1 to indicate EEM TX configuration is enabled. Value of + * 0 to indicate the EEM TX configuration is disabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_TX_ENABLE \ + UINT32_C(0x1) + /* + * Value of 1 to indicate EEM RX configuration is enabled. Value of 0 + * to indicate the EEM RX configuration is disabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_RX_ENABLE \ + UINT32_C(0x2) +} __rte_packed; + +/* hwrm_async_event_cmpl_quiesce_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_quiesce_done { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* An event signifying completion of HWRM_FW_STATE_QUIESCE */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE + /* Event specific data */ + uint32_t event_data2; + /* Status of HWRM_FW_STATE_QUIESCE completion */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SFT \ + 0 + /* + * The quiesce operation started by HWRM_FW_STATE_QUIESCE + * completed successfully. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SUCCESS \ + UINT32_C(0x0) + /* + * The quiesce operation started by HWRM_FW_STATE_QUIESCE timed + * out. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_TIMEOUT \ + UINT32_C(0x1) + /* + * The quiesce operation started by HWRM_FW_STATE_QUIESCE + * encountered an error. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_LAST \ + HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR + /* opaque is 8 b */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_MASK \ + UINT32_C(0xff00) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_SFT \ + 8 + /* + * Additional information about internal hardware state related to + * idle/quiesce state. QUIESCE may succeed per quiesce_status + * regardless of idle_state_flags. If QUIESCE fails, the host may + * inspect idle_state_flags to determine whether a retry is warranted. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_SFT \ + 16 + /* + * Failure to quiesce is caused by host not updating the NQ consumer + * index. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_INCOMPLETE_NQ \ + UINT32_C(0x10000) + /* Flag 1 indicating partial non-idle state. */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_1 \ + UINT32_C(0x20000) + /* Flag 2 indicating partial non-idle state. */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_2 \ + UINT32_C(0x40000) + /* Flag 3 indicating partial non-idle state. */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_3 \ + UINT32_C(0x80000) + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Time stamp for error event */ + #define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA1_TIMESTAMP \ + UINT32_C(0x1) +} __rte_packed; + +/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */ +struct hwrm_async_event_cmpl_deferred_response { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* + * An event signifying a HWRM command is in progress and its + * response will be deferred + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE \ + UINT32_C(0x40) + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE + /* Event specific data */ + uint32_t event_data2; + /* + * The PF's mailbox is clear to issue another command. + * A command with this seq_id is still in progress + * and will return a regular HWRM completion when done. + * 'event_data1' field, if non-zero, contains the estimated + * execution time for the command. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT \ + 0 + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Estimated remaining time of command execution in ms (if not zero) */ + uint32_t event_data1; +} __rte_packed; + +/* hwrm_async_event_cmpl_pfc_watchdog_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pfc_watchdog_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PFC watchdog configuration change for given port/cos */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE \ + UINT32_C(0x41) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * 1 in bit position X indicates PFC watchdog should + * be on for COSX + */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_SFT \ + 0 + /* 1 means PFC WD for COS0 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS0 \ + UINT32_C(0x1) + /* 1 means PFC WD for COS1 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS1 \ + UINT32_C(0x2) + /* 1 means PFC WD for COS2 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS2 \ + UINT32_C(0x4) + /* 1 means PFC WD for COS3 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS3 \ + UINT32_C(0x8) + /* 1 means PFC WD for COS4 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS4 \ + UINT32_C(0x10) + /* 1 means PFC WD for COS5 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS5 \ + UINT32_C(0x20) + /* 1 means PFC WD for COS6 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS6 \ + UINT32_C(0x40) + /* 1 means PFC WD for COS7 is on, 0 - off. */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS7 \ + UINT32_C(0x80) + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff00) + #define HWRM_ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 8 +} __rte_packed; + +/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */ +struct hwrm_async_event_cmpl_fw_trace_msg { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Firmware trace log message */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG + /* Trace byte 0 to 3 */ + uint32_t event_data2; + /* Trace byte0 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_SFT 0 + /* Trace byte1 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_MASK \ + UINT32_C(0xff00) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_SFT 8 + /* Trace byte2 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_SFT 16 + /* Trace byte3 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_MASK \ + UINT32_C(0xff000000) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_SFT 24 + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_SFT 1 + /* Trace flags */ + uint8_t timestamp_lo; + /* Indicates if the string is partial or complete. */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING \ + UINT32_C(0x1) + /* Complete string */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_COMPLETE \ + UINT32_C(0x0) + /* Partial string */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_LAST \ + HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL + /* Indicates the firmware that sent the trace message. */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE \ + UINT32_C(0x2) + /* Primary firmware */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_PRIMARY \ + (UINT32_C(0x0) << 1) + /* Secondary firmware */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY \ + (UINT32_C(0x1) << 1) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY + /* Trace byte 4 to 5 */ + uint16_t timestamp_hi; + /* Trace byte4 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_SFT 0 + /* Trace byte5 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_MASK \ + UINT32_C(0xff00) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_SFT 8 + /* Trace byte 6 to 9 */ + uint32_t event_data1; + /* Trace byte6 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_SFT 0 + /* Trace byte7 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_MASK \ + UINT32_C(0xff00) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_SFT 8 + /* Trace byte8 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_SFT 16 + /* Trace byte9 */ + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_MASK \ + UINT32_C(0xff000000) + #define HWRM_ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_SFT 24 +} __rte_packed; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + /* Severity of HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + /* Warning */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \ + UINT32_C(0x0) + /* Non-fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \ + UINT32_C(0x1) + /* Fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Time stamp for error event */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \ + UINT32_C(0x1) +} __rte_packed; + +/******************* + * hwrm_func_reset * + *******************/ + + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* + * The ID of the VF that this PF is trying to reset. + * Only the parent PF shall be allowed to reset a child VF. + * + * A parent PF driver shall use this field only when a specific child VF + * is requested to be reset. + */ + uint16_t vf_id; + /* This value indicates the level of a function reset. */ + uint8_t func_reset_level; + /* + * Reset the caller function and its children VFs (if any). If no + * children functions exist, then reset the caller function only. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \ + UINT32_C(0x0) + /* Reset the caller function only */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \ + UINT32_C(0x1) + /* + * Reset all children VFs of the caller function driver if the + * caller is a PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver with + * no children VFs. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ + UINT32_C(0x2) + /* + * Reset a specific VF of the caller function driver if the caller + * is the parent PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver that is not + * the parent of the VF that is being requested to reset. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \ + UINT32_C(0x3) + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \ + HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF + uint8_t unused_0; +} __rte_packed; + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_func_getfid * + ********************/ + + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the pci_id field to be + * configured. + */ + #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1) + /* + * This value is the PCI ID of the queried function. + * If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (5b):Function Number(3b). + */ + uint16_t pci_id; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_func_vf_alloc * + **********************/ + + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* The number of virtual functions requested. */ + uint16_t num_vfs; +} __rte_packed; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The ID of the first VF allocated. */ + uint16_t first_vf_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_func_vf_free * + *********************/ + + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* + * The number of virtual functions requested. + * 0xFFFF - Cleanup all children of this PF. + */ + uint16_t num_vfs; +} __rte_packed; + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_func_vf_cfg * + ********************/ + + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the guest_vlan field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \ + UINT32_C(0x2) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x200) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x400) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x800) + /* + * The maximum transmission unit requested on the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to requesting mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The guest VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t guest_vlan; + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* + * This value is the current MAC address requested by the VF + * driver to be configured on this VF. A value of + * 00-00-00-00-00-00 indicates no MAC address configuration + * is requested by the VF driver. + * The parent PF driver may reject or overwrite this + * MAC address. + */ + uint8_t dflt_mac_addr[6]; + uint32_t flags; + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x1) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x2) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x4) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x8) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x10) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x20) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x40) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x80) + /* The number of RSS/COS contexts requested for the VF. */ + uint16_t num_rsscos_ctxs; + /* The number of completion rings requested for the VF. */ + uint16_t num_cmpl_rings; + /* The number of transmit rings requested for the VF. */ + uint16_t num_tx_rings; + /* The number of receive rings requested for the VF. */ + uint16_t num_rx_rings; + /* The number of L2 contexts requested for the VF. */ + uint16_t num_l2_ctxs; + /* The number of vnics requested for the VF. */ + uint16_t num_vnics; + /* The number of statistic contexts requested for the VF. */ + uint16_t num_stat_ctxs; + /* The number of HW ring groups requested for the VF. */ + uint16_t num_hw_ring_grps; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_func_qcaps * + *******************/ + + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_qcaps_output (size:704b/88B) */ +struct hwrm_func_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * Valid only for the PF. + * 0xFF... (All Fs) if this function is not associated with + * any port. + * 0xFF... (All Fs) if this function is called from a VF. + */ + uint16_t port_id; + uint32_t flags; + /* If 1, then Push mode is supported on this function. */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \ + UINT32_C(0x1) + /* + * If 1, then the global MSI-X auto-masking is enabled for the + * device. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ + UINT32_C(0x2) + /* + * If 1, then the Precision Time Protocol (PTP) processing + * is supported on this function. + * The HWRM should enable PTP on only a single Physical + * Function (PF) per port. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \ + UINT32_C(0x4) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v1 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \ + UINT32_C(0x8) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v2 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \ + UINT32_C(0x10) + /* + * If 1, then control and configuration of WoL magic packet + * are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ + UINT32_C(0x20) + /* + * If 1, then control and configuration of bitmap pattern + * packet are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \ + UINT32_C(0x40) + /* + * If set to 1, then the control and configuration of rate limit + * of an allocated TX ring on the queried function is supported. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \ + UINT32_C(0x80) + /* + * If 1, then control and configuration of minimum and + * maximum bandwidths are supported on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \ + UINT32_C(0x100) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the rate limits + * on the TX rings of its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the rate limits + * on the TX rings of its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ + UINT32_C(0x200) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the minimum and/or + * maximum bandwidths for its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the minimum or + * maximum bandwidths for its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \ + UINT32_C(0x400) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is supported + * on the queried function. + * If set to 0, then standard TX ring mode is not available + * on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ + UINT32_C(0x800) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GENEVE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x1000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect NVGRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x2000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x4000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect MPLS tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x8000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to support pcie stats. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \ + UINT32_C(0x10000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to adopt the VF's belonging + * to another PF. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \ + UINT32_C(0x20000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the administrative privilege to configure another PF + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \ + UINT32_C(0x40000) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, then + * the PF will know that the firmware has the capability to track + * the virtual link status. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED \ + UINT32_C(0x80000) + /* + * If 1, then this function supports the push mode that uses + * write combine buffers and the long inline tx buffer descriptor. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WCB_PUSH_MODE \ + UINT32_C(0x100000) + /* + * If 1, then FW has capability to allocate TX rings dynamically + * in ring alloc even if PF reserved pool is zero. + * This bit will be used only for PFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_DYNAMIC_TX_RING_ALLOC \ + UINT32_C(0x200000) + /* + * When this bit is '1', it indicates that core firmware is + * capable of Hot Reset. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE \ + UINT32_C(0x400000) + /* + * This flag will be set to 1 by the FW if FW supports adapter error + * recovery. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE \ + UINT32_C(0x800000) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, then + * the PF has the capability to support extended stats. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED \ + UINT32_C(0x1000000) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, then host + * must initiate reset or reload (or fastboot) the firmware image + * upon detection of device shutdown state. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD \ + UINT32_C(0x2000000) + /* + * If the query is for a VF, then this flag (always set to 0) shall + * be ignored. If this query is for a PF and this flag is set to 1, + * host, when registered for the default vnic change async event, + * receives async notification whenever a default vnic state is + * changed for any of child or adopted VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED \ + UINT32_C(0x4000000) + /* If set to 1, then the vlan acceleration for TX is disabled. */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED \ + UINT32_C(0x8000000) + /* + * When this bit is '1', it indicates that core firmware supports + * DBG_COREDUMP_XXX commands. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_COREDUMP_CMD_SUPPORTED \ + UINT32_C(0x10000000) + /* + * When this bit is '1', it indicates that core firmware supports + * DBG_CRASHDUMP_XXX commands. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_CRASHDUMP_CMD_SUPPORTED \ + UINT32_C(0x20000000) + /* + * If the query is for a VF, then this flag should be ignored. + * If the query is for a PF and this flag is set to 1, then + * the PF has the capability to support retrieval of + * rx_port_stats_ext_pfc_wd statistics (supported by the PFC + * WatchDog feature) via the hwrm_port_qstats_ext_pfc_wd command. + * If this flag is set to 1, only that (supported) command should + * be used for retrieval of PFC related statistics (rather than + * hwrm_port_qstats_ext command, which could previously be used). + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PFC_WD_STATS_SUPPORTED \ + UINT32_C(0x40000000) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * The maximum number of RSS/COS contexts that can be + * allocated to the function. + */ + uint16_t max_rsscos_ctx; + /* + * The maximum number of completion rings that can be + * allocated to the function. + */ + uint16_t max_cmpl_rings; + /* + * The maximum number of transmit rings that can be + * allocated to the function. + */ + uint16_t max_tx_rings; + /* + * The maximum number of receive rings that can be + * allocated to the function. + */ + uint16_t max_rx_rings; + /* + * The maximum number of L2 contexts that can be + * allocated to the function. + */ + uint16_t max_l2_ctxs; + /* + * The maximum number of VNICs that can be + * allocated to the function. + */ + uint16_t max_vnics; + /* + * The identifier for the first VF enabled on a PF. This + * is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t first_vf_id; + /* + * The maximum number of VFs that can be + * allocated to the function. This is valid only on the + * PF with SR-IOV enabled. 0xFF... (All Fs) if this + * command is called on a PF with SR-IOV disabled or + * on a VF. + */ + uint16_t max_vfs; + /* + * The maximum number of statistic contexts that can be + * allocated to the function. + */ + uint16_t max_stat_ctx; + /* + * The maximum number of Encapsulation records that can be + * offloaded by this function. + */ + uint32_t max_encap_records; + /* + * The maximum number of decapsulation records that can + * be offloaded by this function. + */ + uint32_t max_decap_records; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the TX side. + */ + uint32_t max_tx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the TX side. + */ + uint32_t max_tx_wm_flows; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the RX side. + */ + uint32_t max_rx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the RX side. + */ + uint32_t max_rx_wm_flows; + /* + * The maximum number of multicast filters that can + * be supported by this function on the RX side. + */ + uint32_t max_mcast_filters; + /* + * The maximum value of flow_id that can be supported + * in completion records. + */ + uint32_t max_flow_id; + /* + * The maximum number of HW ring groups that can be + * supported on this function. + */ + uint32_t max_hw_ring_grps; + /* + * The maximum number of strict priority transmit rings + * that can be allocated to the function. + * This number indicates the maximum number of TX rings + * that can be assigned strict priorities out of the + * maximum number of TX rings that can be allocated + * (max_tx_rings) to the function. + */ + uint16_t max_sp_tx_rings; + uint8_t unused_0[2]; + uint32_t flags_ext; + /* + * If 1, the device can be configured to set the ECN bits in the + * IP header of received packets if the receive queue length + * exceeds a given threshold. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_MARK_SUPPORTED \ + UINT32_C(0x1) + /* + * If 1, the device can report the number of received packets + * that it marked as having experienced congestion. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_STATS_SUPPORTED \ + UINT32_C(0x2) + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_func_qcfg * + ******************/ + + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_qcfg_output (size:768b/96B) */ +struct hwrm_func_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * 0xFF... (All Fs) if this function is not associated with + * any port. + */ + uint16_t port_id; + /* + * This value is the current VLAN setting for this + * function. The value of 0 for this field indicates + * no priority tagging or VLAN is used. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t vlan; + uint16_t flags; + /* + * If 1, then magic packet based Out-Of-Box WoL is enabled on + * the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + UINT32_C(0x1) + /* + * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled + * on the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \ + UINT32_C(0x2) + /* + * If set to 1, then FW based DCBX agent is enabled and running on + * the port associated with this function. + * If set to 0, then DCBX agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + UINT32_C(0x4) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is enabled + * on the queried function. + * If set to 0, then the standard TX ring mode is disabled + * on the queried function. In this extended TX ring resource + * mode, the minimum and maximum bandwidth settings are not + * supported to allow the allocation of TX rings to span multiple + * scheduler nodes. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ + UINT32_C(0x8) + /* + * If set to 1 then FW based LLDP agent is enabled and running on + * the port associated with this function. + * If set to 0 then the LLDP agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \ + UINT32_C(0x10) + /* + * If set to 1, then multi-host mode is active for this function. + * If set to 0, then multi-host mode is inactive for this function + * or not applicable for this device. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \ + UINT32_C(0x20) + /* + * If the function that is being queried is a PF, then the HWRM shall + * set this field to 0 and the HWRM client shall ignore this field. + * If the function that is being queried is a VF, then the HWRM shall + * set this field to 1 if the queried VF is trusted, otherwise the HWRM + * shall set this field to 0. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF \ + UINT32_C(0x40) + /* + * If set to 1, then secure mode is enabled for this function or device. + * If set to 0, then secure mode is disabled (or normal mode) for this + * function or device. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED \ + UINT32_C(0x80) + /* + * If set to 1, then this PF is enabled with a preboot driver that + * requires access to the legacy L2 ring model and legacy 32b + * doorbells. If set to 0, then this PF is not allowed to use + * the legacy L2 rings. This feature is not allowed on VFs and + * is only relevant for devices that require a context backing + * store. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_PREBOOT_LEGACY_L2_RINGS \ + UINT32_C(0x100) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * This value is current PCI ID of this + * function. If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (4b):Function Number(4b). + * If multi-host mode is active, the 4 lsb will indicate + * the PF index for this function. + */ + uint16_t pci_id; + /* + * The number of RSS/COS contexts currently + * allocated to the function. + */ + uint16_t alloc_rsscos_ctx; + /* + * The number of completion rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_cmpl_rings; + /* + * The number of transmit rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_tx_rings; + /* + * The number of receive rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_rx_rings; + /* The allocated number of L2 contexts to the function. */ + uint16_t alloc_l2_ctx; + /* The allocated number of vnics to the function. */ + uint16_t alloc_vnics; + /* + * The maximum transmission unit of the function. + * If the reported mtu value is non-zero then it will used for the + * rings allocated on this function. otherwise the default + * value is used if ring MTU is not specified. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * For vnics allocated on this function, this default + * value is used if vnic MRU is not specified. + */ + uint16_t mru; + /* The statistics context assigned to a function. */ + uint16_t stat_ctx_id; + /* + * The HWRM shall return Unknown value for this field + * when this command is used to query VF's configuration. + */ + uint8_t port_partition_type; + /* Single physical function */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0) + /* Multiple physical functions */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) + /* Network Partitioning 1.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) + /* Network Partitioning 1.5 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) + /* Network Partitioning 2.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) + /* Unknown */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN + /* + * This field will indicate number of physical functions on this port_partition. + * HWRM shall return unavail (i.e. value of 0) for this field + * when this command is used to query VF's configuration or + * from older firmware that doesn't support this field. + */ + uint8_t port_pf_cnt; + /* number of PFs is not available */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL + /* + * The default VNIC ID assigned to a function that is + * being queried. + */ + uint16_t dflt_vnic_id; + uint16_t max_mtu_configured; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates the minimum bandwidth is not + * configured. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates that the maximum bandwidth is not + * configured. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * This value indicates the Edge virtual bridge mode for the + * domain that this function belongs to. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* This value is the virtual link admin state setting. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_MASK \ + UINT32_C(0xc) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_SFT 2 + /* Admin link state is in forced down mode. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN \ + (UINT32_C(0x0) << 2) + /* Admin link state is in forced up mode. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \ + (UINT32_C(0x1) << 2) + /* Admin link state is in auto mode - follows the physical link state. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \ + (UINT32_C(0x2) << 2) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO + /* Reserved for future. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 4 + /* + * The number of VFs that are allocated to the function. + * This is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t alloc_vfs; + /* + * The number of allocated multicast filters for this + * function on the RX side. + */ + uint32_t alloc_mcast_filters; + /* + * The number of allocated HW ring groups for this + * function. + */ + uint32_t alloc_hw_ring_grps; + /* + * The number of strict priority transmit rings out of + * currently allocated TX rings to the function + * (alloc_tx_rings). + */ + uint16_t alloc_sp_tx_rings; + /* + * The number of statistics contexts + * currently reserved for the function. + */ + uint16_t alloc_stat_ctx; + /* + * This field specifies how many NQs are reserved for the PF. + * Remaining NQs that belong to the PF are available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t alloc_msix; + /* + * The number of registered VF’s associated with the PF. This field + * should be ignored when the request received on the VF interface. + * This field will be updated on the PF interface to initiate + * the unregister request on PF in the HOT Reset Process. + */ + uint16_t registered_vfs; + /* + * The size of the doorbell BAR in KBytes reserved for L2 including + * any area that is shared between L2 and RoCE. The L2 driver + * should only map the L2 portion of the doorbell BAR. Any rounding + * of the BAR size to the native CPU page size should be performed + * by the driver. If the value is zero, no special partitioning + * of the doorbell BAR between L2 and RoCE is required. + */ + uint16_t l2_doorbell_bar_size_kb; + uint8_t unused_1; + /* + * For backward compatibility this field must be set to 1. + * Older drivers might look for this field to be 1 before + * processing the message. + */ + uint8_t always_1; + /* + * This GRC address location is used by the Host driver interfaces to poll + * the adapter ready state to re-initiate the registration process again + * after receiving the RESET Notify event. + */ + uint32_t reset_addr_poll; + /* + * This field specifies legacy L2 doorbell size in KBytes. Drivers should use + * this value to find out the doorbell page offset from the BAR. + */ + uint16_t legacy_l2_db_size_kb; + uint16_t svif_info; + /* + * This field specifies the source virtual interface of the function being + * queried. Drivers can use this to program svif field in the L2 context + * table + */ + #define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK UINT32_C(0x7fff) + #define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_SFT 0 + /* This field specifies whether svif is valid or not */ + #define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID UINT32_C(0x8000) + uint8_t unused_2[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************** + * hwrm_func_cfg * + *****************/ + + +/* hwrm_func_cfg_input (size:704b/88B) */ +struct hwrm_func_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the the configuration is + * for the requesting function. + */ + uint16_t fid; + /* + * This field specifies how many NQs will be reserved for the PF. + * Remaining NQs that belong to the PF become available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t num_msix; + uint32_t flags; + /* + * When this bit is '1', the function is disabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to disallow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + UINT32_C(0x1) + /* + * When this bit is '1', the function is enabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to allow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \ + UINT32_C(0x1fc) + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is requested to be + * enabled on the function being configured. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ + UINT32_C(0x200) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then the standard TX ring mode is requested to + * be disabled on the function being configured. In this extended + * TX ring resource mode, the minimum and maximum bandwidth settings + * are not supported to allow the allocation of TX rings to + * span multiple scheduler nodes. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ + UINT32_C(0x400) + /* + * If this bit is set, virtual mac address configured + * in this command will be persistent over warm boot. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \ + UINT32_C(0x800) + /* + * This bit only applies to the VF. If this bit is set, the statistic + * context counters will not be cleared when the statistic context is freed + * or a function reset is called on VF. This bit will be cleared when the PF + * is unloaded or a function reset is called on the PF. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ + UINT32_C(0x1000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x2000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x4000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x8000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x10000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x20000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x40000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x80000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x100000) + /* + * This configuration change can be initiated by a PF driver. This + * configuration request shall be targeted to a VF. From local host + * resident HWRM clients, only the parent PF driver shall be allowed + * to initiate this change on one of its children VFs. If this bit is + * set to 1, then the VF that is being configured is requested to be + * trusted. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_ENABLE \ + UINT32_C(0x200000) + /* + * When this bit it set, even if PF reserved pool size is zero, + * FW will allow driver to create TX rings in ring alloc, + * by reserving TX ring, S3 node dynamically. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_DYNAMIC_TX_RING_ALLOC \ + UINT32_C(0x400000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of NQ rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_NQ_ASSETS_TEST \ + UINT32_C(0x800000) + /* + * This configuration change can be initiated by a PF driver. This + * configuration request shall be targeted to a VF. From local host + * resident HWRM clients, only the parent PF driver shall be allowed + * to initiate this change on one of its children VFs. If this bit is + * set to 1, then the VF that is being configured is requested to be + * untrusted. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_DISABLE \ + UINT32_C(0x1000000) + /* + * This bit is used by preboot drivers on a PF that require access + * to the legacy L2 ring model and legacy 32b doorbells. This + * feature is not allowed on VFs and is only relevant for devices + * that require a context backing store. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_PREBOOT_LEGACY_L2_RINGS \ + UINT32_C(0x2000000) + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the mru field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x2) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x4) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x200) + /* + * This bit must be '1' for the dflt_vlan field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \ + UINT32_C(0x400) + /* + * This bit must be '1' for the dflt_ip_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \ + UINT32_C(0x800) + /* + * This bit must be '1' for the min_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the max_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the vlan_antispoof_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the allowed_vlan_pris field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the evb_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the num_mcast_filters field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the cache_linesize field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the num_msix field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \ + UINT32_C(0x200000) + /* + * This bit must be '1' for the link admin state field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_LINK_STATE \ + UINT32_C(0x400000) + /* + * The maximum transmission unit of the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to configuring mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * The HWRM should make sure that the mru of + * the function does not exceed the mru of the physical + * port that this function is associated with. + * + * In addition to configuring mru per function, it is + * possible to configure mru per vnic. + * By default, the mru of each vnic associated + * with a function is equal to the mru of the function. + * The HWRM should make sure that the mru of each vnic + * that is assigned to a function has a valid mru. + */ + uint16_t mru; + /* + * The number of RSS/COS contexts requested for the + * function. + */ + uint16_t num_rsscos_ctxs; + /* + * The number of completion rings requested for the + * function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_cmpl_rings; + /* + * The number of transmit rings requested for the function. + * This does not include the rings allocated to any + * children functions if any. + */ + uint16_t num_tx_rings; + /* + * The number of receive rings requested for the function. + * This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_rx_rings; + /* The requested number of L2 contexts for the function. */ + uint16_t num_l2_ctxs; + /* The requested number of vnics for the function. */ + uint16_t num_vnics; + /* The requested number of statistic contexts for the function. */ + uint16_t num_stat_ctxs; + /* + * The number of HW ring groups that should + * be reserved for this function. + */ + uint16_t num_hw_ring_grps; + /* The default MAC address for the function being configured. */ + uint8_t dflt_mac_addr[6]; + /* + * The default VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t dflt_vlan; + /* + * The default IP address for the function being configured. + * This address is only used in enabling source property check. + */ + uint32_t dflt_ip_addr[4]; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* VLAN Anti-spoofing mode. */ + uint8_t vlan_antispoof_mode; + /* No VLAN anti-spoofing checks are enabled */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \ + UINT32_C(0x0) + /* Validate VLAN against the configured VLAN(s) */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + UINT32_C(0x1) + /* Insert VLAN if it does not exist, otherwise discard */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + UINT32_C(0x2) + /* Insert VLAN if it does not exist, override VLAN if it exists */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + /* + * This bit field defines VLAN PRIs that are allowed on + * this function. + * If nth bit is set, then VLAN PRI n is allowed on this + * function. + */ + uint8_t allowed_vlan_pris; + /* + * The HWRM shall allow a PF driver to change EVB mode for the + * partition it belongs to. + * The HWRM shall not allow a VF driver to change the EVB mode. + * The HWRM shall take into account the switching of EVB mode + * from one to another and reconfigure hardware resources as + * appropriately. + * The switching from VEB to VEPA mode requires + * the disabling of the loopback traffic. Additionally, + * source knock outs are handled differently in VEB and VEPA + * modes. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* This value is the virtual link admin state setting. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_MASK \ + UINT32_C(0xc) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_SFT 2 + /* Admin state is forced down. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN \ + (UINT32_C(0x0) << 2) + /* Admin state is forced up. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \ + (UINT32_C(0x1) << 2) + /* Admin state is in auto mode - is to follow the physical link state. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \ + (UINT32_C(0x2) << 2) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_LAST \ + HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO + /* Reserved for future. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 4 + /* + * The number of multicast filters that should + * be reserved for this function on the RX side. + */ + uint16_t num_mcast_filters; +} __rte_packed; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_func_qstats * + ********************/ + + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + * A privileged PF can query for other function's statistics. + */ + uint16_t fid; + /* This flags indicates the type of statistics request. */ + uint8_t flags; + /* This value is not used to avoid backward compatibility issues. */ + #define HWRM_FUNC_QSTATS_INPUT_FLAGS_UNUSED UINT32_C(0x0) + /* + * flags should be set to 1 when request is for only RoCE statistics. + * This will be honored only if the caller_fid is a privileged PF. + * In all other cases FID and caller_fid should be the same. + */ + #define HWRM_FUNC_QSTATS_INPUT_FLAGS_ROCE_ONLY UINT32_C(0x1) + #define HWRM_FUNC_QSTATS_INPUT_FLAGS_LAST \ + HWRM_FUNC_QSTATS_INPUT_FLAGS_ROCE_ONLY + uint8_t unused_0[5]; +} __rte_packed; + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets on the function. */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets on the function. */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets on the function. */ + uint64_t tx_bcast_pkts; + /* + * Number of transmitted packets that were discarded due to + * internal NIC resource problems. For transmit, this + * can only happen if TMP is configured to allow dropping + * in HOL blocking conditions, which is not a normal + * configuration. + */ + uint64_t tx_discard_pkts; + /* + * Number of dropped packets on transmit path on the function. + * These are packets that have been marked for drop by + * the TE CFA block or are packets that exceeded the + * transmit MTU limit for the function. + */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic on the function. */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic on the function. */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic on the function. */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets on the function. */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets on the function. */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets on the function. */ + uint64_t rx_bcast_pkts; + /* + * Number of received packets that were discarded on the function + * due to resource limitations. This can happen for 3 reasons. + * # The BD used for the packet has a bad format. + * # There were no BDs available in the ring for the packet. + * # There were no BDs available on-chip for the packet. + */ + uint64_t rx_discard_pkts; + /* + * Number of dropped packets on received path on the function. + * These are packets that have been marked for drop by the + * RE CFA. + */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic on the function. */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic on the function. */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic on the function. */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets on the function. */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes on the function. */ + uint64_t rx_agg_bytes; + /* Number of aggregation events on the function. */ + uint64_t rx_agg_events; + /* Number of aborted aggregations on the function. */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_func_clr_stats * + ***********************/ + + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_func_vf_resc_free * + **************************/ + + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_func_drv_rgtr * + **********************/ + + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is requesting + * all requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE \ + UINT32_C(0x1) + /* + * When this bit is '1', the function is requesting none of + * the requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE \ + UINT32_C(0x2) + /* + * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be ignored and ver_maj, ver_min, ver_upd + * and ver_patch shall be used for the driver version information. + * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be used for the driver version information and + * ver_maj, ver_min, ver_upd and ver_patch shall be ignored. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE \ + UINT32_C(0x4) + /* + * When this bit is '1', the function is indicating support of + * 64bit flow handle. The firmware that only supports 64bit flow + * handle should check this bit before allowing processing of + * HWRM_CFA_FLOW_XXX commands from the requesting function as firmware + * with 64bit flow handle support can only be compatible with drivers + * that support 64bit flow handle. The legacy drivers that don't support + * 64bit flow handle won't be able to use HWRM_CFA_FLOW_XXX commands when + * running with new firmware that only supports 64bit flow handle. The new + * firmware support 64bit flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED + * status to the legacy driver when encounters these commands. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE \ + UINT32_C(0x8) + /* + * When this bit is '1', the function is indicating support of + * Hot Reset. The driver interface will destroy the resources, + * unregister the function and register again up on receiving + * the RESET_NOTIFY Async notification from the core firmware. + * The core firmware will this use flag and trigger the Hot Reset + * process only if all the registered driver instances are capable + * of this support. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT \ + UINT32_C(0x10) + /* + * When this bit is 1, the function is indicating the support of the + * error recovery capability. Error recovery support will be used by + * firmware only if all the driver instances support error recovery + * process. By setting this bit, driver is indicating support for + * corresponding async event completion message. These will be + * delivered to the driver even if they did not register for it. + * If supported, after receiving reset notify async event with fatal + * flag set in event data1, then all the drivers have to tear down + * their resources without sending any HWRM commands to FW. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT \ + UINT32_C(0x20) + /* + * When this bit is 1, the function is indicating the support of the + * Master capability. The Firmware will use this capability to select the + * Master function. The master function will be used to initiate + * designated functionality like error recovery etc… If none of the + * registered PF’s or trusted VF’s indicate this support, then + * firmware will select the 1st registered PF as Master capable instance. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT \ + UINT32_C(0x40) + uint32_t enables; + /* + * This bit must be '1' for the os_type field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the ver field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \ + UINT32_C(0x2) + /* + * This bit must be '1' for the timestamp field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vf_req_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \ + UINT32_C(0x8) + /* + * This bit must be '1' for the async_event_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \ + UINT32_C(0x10) + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[3]; + /* + * This is a 32-bit timestamp provided by the driver for + * keep alive. + * The timestamp is in multiples of 1ms. + */ + uint32_t timestamp; + uint8_t unused_1[4]; + /* + * This is a 256-bit bit mask provided by the PF driver for + * letting the HWRM know what commands issued by the VF driver + * to the HWRM should be forwarded to the PF driver. + * Nth bit refers to the Nth req_type. + * + * Setting Nth bit to 1 indicates that requests from the + * VF driver with req_type equal to N shall be forwarded to + * the parent PF driver. + * + * This field is not valid for the VF driver. + */ + uint32_t vf_req_fwd[8]; + /* + * This is a 256-bit bit mask provided by the function driver + * (PF or VF driver) to indicate the list of asynchronous event + * completions to be forwarded. + * + * Nth bit refers to the Nth event_id. + * + * Setting Nth bit to 1 by the function driver shall result in + * the HWRM forwarding asynchronous event completion with + * event_id equal to N. + * + * If all bits are set to 0 (value of 0), then the HWRM shall + * not forward any asynchronous event completion to this + * function driver. + */ + uint32_t async_event_fwd[8]; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; +} __rte_packed; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When this bit is '1', it indicates that the + * HWRM_FUNC_DRV_IF_CHANGE call is supported. + */ + #define HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED \ + UINT32_C(0x1) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_func_drv_unrgtr * + ************************/ + + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is notifying + * the HWRM to prepare for the shutdown. + */ + #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_func_buf_rgtr * + **********************/ + + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This bit must be '1' for the err_buf_addr field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + /* + * This field represents the number of pages used for request + * buffer(s). + */ + uint16_t req_buf_num_pages; + /* + * This field represents the page size used for request + * buffer(s). + */ + uint16_t req_buf_page_size; + /* 16 bytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4) + /* 4 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc) + /* 8 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd) + /* 64 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10) + /* 2 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15) + /* 4 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16) + /* 1 Gbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \ + HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G + /* The length of the request buffer per VF in bytes. */ + uint16_t req_buf_len; + /* The length of the response buffer in bytes. */ + uint16_t resp_buf_len; + uint8_t unused_0[2]; + /* This field represents the page address of page #0. */ + uint64_t req_buf_page_addr0; + /* This field represents the page address of page #1. */ + uint64_t req_buf_page_addr1; + /* This field represents the page address of page #2. */ + uint64_t req_buf_page_addr2; + /* This field represents the page address of page #3. */ + uint64_t req_buf_page_addr3; + /* This field represents the page address of page #4. */ + uint64_t req_buf_page_addr4; + /* This field represents the page address of page #5. */ + uint64_t req_buf_page_addr5; + /* This field represents the page address of page #6. */ + uint64_t req_buf_page_addr6; + /* This field represents the page address of page #7. */ + uint64_t req_buf_page_addr7; + /* This field represents the page address of page #8. */ + uint64_t req_buf_page_addr8; + /* This field represents the page address of page #9. */ + uint64_t req_buf_page_addr9; + /* + * This field is used to receive the error reporting from + * the chipset. Only applicable for PFs. + */ + uint64_t error_buf_addr; + /* + * This field is used to receive the response forwarded by the + * HWRM. + */ + uint64_t resp_buf_addr; +} __rte_packed; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_func_buf_unrgtr * + ************************/ + + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_func_drv_qver * + **********************/ + + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Reserved for future use. */ + uint32_t reserved; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_func_drv_qver_output (size:256b/32B) */ +struct hwrm_func_drv_qver_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[3]; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_func_resource_qcaps * + ****************************/ + + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +struct hwrm_func_resource_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */ + uint16_t max_vfs; + /* Maximum guaranteed number of MSI-X vectors supported by function */ + uint16_t max_msix; + /* Hint of strategy to be used by PF driver to reserve resources for its VF */ + uint16_t vf_reservation_strategy; + /* The PF driver should evenly divide its remaining resources among all VFs. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \ + UINT32_C(0x0) + /* The PF driver should only reserve minimal resources for each VF. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \ + UINT32_C(0x1) + /* + * The PF driver should not reserve any resources for each VF until the + * the VF interface is brought up. + */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \ + UINT32_C(0x2) + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + /* Minimum guaranteed number of RSS/COS contexts */ + uint16_t min_rsscos_ctx; + /* Maximum non-guaranteed number of RSS/COS contexts */ + uint16_t max_rsscos_ctx; + /* Minimum guaranteed number of completion rings */ + uint16_t min_cmpl_rings; + /* Maximum non-guaranteed number of completion rings */ + uint16_t max_cmpl_rings; + /* Minimum guaranteed number of transmit rings */ + uint16_t min_tx_rings; + /* Maximum non-guaranteed number of transmit rings */ + uint16_t max_tx_rings; + /* Minimum guaranteed number of receive rings */ + uint16_t min_rx_rings; + /* Maximum non-guaranteed number of receive rings */ + uint16_t max_rx_rings; + /* Minimum guaranteed number of L2 contexts */ + uint16_t min_l2_ctxs; + /* Maximum non-guaranteed number of L2 contexts */ + uint16_t max_l2_ctxs; + /* Minimum guaranteed number of VNICs */ + uint16_t min_vnics; + /* Maximum non-guaranteed number of VNICs */ + uint16_t max_vnics; + /* Minimum guaranteed number of statistic contexts */ + uint16_t min_stat_ctx; + /* Maximum non-guaranteed number of statistic contexts */ + uint16_t max_stat_ctx; + /* Minimum guaranteed number of ring groups */ + uint16_t min_hw_ring_grps; + /* Maximum non-guaranteed number of ring groups */ + uint16_t max_hw_ring_grps; + /* + * Maximum number of inputs into the transmit scheduler for this function. + * The number of TX rings assigned to the function cannot exceed this value. + */ + uint16_t max_tx_scheduler_inputs; + uint16_t flags; + /* + * When this bit is '1', it indicates that VF_RESOURCE_CFG supports + * feature to reserve all minimum resources when minimum >= 1, otherwise + * returns an error. + */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_FLAGS_MIN_GUARANTEED \ + UINT32_C(0x1) + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************* + * hwrm_func_backing_store_qcaps * + *********************************/ + + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */ +struct hwrm_func_backing_store_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum number of QP context entries supported for this function. */ + uint32_t qp_max_entries; + /* + * Minimum number of QP context entries that are needed to be reserved + * for QP1 for the PF and its VFs. PF drivers must allocate at least + * this many QP context entries, even if RoCE will not be used. + */ + uint16_t qp_min_qp1_entries; + /* Maximum number of QP context entries that can be used for L2. */ + uint16_t qp_max_l2_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t qp_entry_size; + /* Maximum number of SRQ context entries that can be used for L2. */ + uint16_t srq_max_l2_entries; + /* Maximum number of SRQ context entries supported for this function. */ + uint32_t srq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t srq_entry_size; + /* Maximum number of CQ context entries that can be used for L2. */ + uint16_t cq_max_l2_entries; + /* Maximum number of CQ context entries supported for this function. */ + uint32_t cq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t cq_entry_size; + /* Maximum number of VNIC context entries supported for this function. */ + uint16_t vnic_max_vnic_entries; + /* Maximum number of Ring table context entries supported for this function. */ + uint16_t vnic_max_ring_table_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Maximum number of statistic context entries supported for this function. */ + uint32_t stat_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t stat_entry_size; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Minimum number of TQM context entries required per ring. */ + uint32_t tqm_min_entries_per_ring; + /* + * Maximum number of TQM context entries supported per ring. This is + * actually a recommended TQM queue size based on worst case usage of + * the TQM queue. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * TQM slowpath rings should be sized as follows: + * + * num_entries = num_vnics + num_l2_tx_rings + num_roce_qps + tqm_min_size + * + * Where: + * num_vnics is the number of VNICs allocated in the VNIC backing store + * num_l2_tx_rings is the number of L2 rings in the QP backing store + * num_roce_qps is the number of RoCE QPs in the QP backing store + * tqm_min_size is tqm_min_entries_per_ring reported by + * HWRM_FUNC_BACKING_STORE_QCAPS + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_max_entries_per_ring; + /* + * Maximum number of MR plus AV context entries supported for this + * function. + */ + uint32_t mrav_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tim_entry_size; + /* Maximum number of Timer context entries supported for this function. */ + uint32_t tim_max_entries; + /* + * When this field is zero, the 32b `mrav_num_entries` field in the + * `backing_store_cfg` and `backing_store_qcfg` commands represents + * the total number of MR plus AV entries allowed in the MR/AV backing + * store PBL. + * + * When this field is non-zero, the 32b `mrav_num_entries` field in + * the `backing_store_cfg` and `backing_store_qcfg` commands is + * logically divided into two 16b fields. Bits `[31:16]` represents + * the `mr_num_entries` and bits `[15:0]` represents `av_num_entries`. + * Both of these values are represented in a unit granularity + * specified by this field. For example, if this field is 16 and + * `mrav_num_entries` is `0x02000100`, then the number of MR entries + * is 8192 and the number of AV entries is 4096. + */ + uint16_t mrav_num_entries_units; + /* + * The number of entries specified for any TQM ring must be a + * multiple of this value to prevent any resource allocation + * limitations. + */ + uint8_t tqm_entries_multiple; + /* + * Initializer to be used by drivers + * to initialize context memory to ensure + * context subsystem flags an error for an attack + * before the first time context load. + */ + uint8_t ctx_kind_initializer; + /* Reserved for future. */ + uint32_t rsvd; + /* Reserved for future. */ + uint16_t rsvd1; + /* + * Count of TQM fastpath rings to be used for allocating backing store. + * Backing store configuration must be specified for each TQM ring from + * this count in `backing_store_cfg`. + */ + uint8_t tqm_fp_rings_count; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_func_backing_store_cfg * + *******************************/ + + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + /* + * When set, the 32b `mrav_num_entries` field is logically divided + * into two 16b fields, `mr_num_entries` and `av_num_entries`. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT \ + UINT32_C(0x2) + uint32_t enables; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* + * Number of TQM slowpath entries. + * + * TQM slowpath rings should be sized as follows: + * + * num_entries = num_vnics + num_l2_tx_rings + num_roce_qps + tqm_min_size + * + * Where: + * num_vnics is the number of VNICs allocated in the VNIC backing store + * num_l2_tx_rings is the number of L2 rings in the QP backing store + * num_roce_qps is the number of RoCE QPs in the QP backing store + * tqm_min_size is tqm_min_entries_per_ring reported by + * HWRM_FUNC_BACKING_STORE_QCAPS + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_sp_num_entries; + /* + * Number of TQM ring 0 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring0_num_entries; + /* + * Number of TQM ring 1 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring1_num_entries; + /* + * Number of TQM ring 2 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring2_num_entries; + /* + * Number of TQM ring 3 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring3_num_entries; + /* + * Number of TQM ring 4 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring4_num_entries; + /* + * Number of TQM ring 5 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring5_num_entries; + /* + * Number of TQM ring 6 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring6_num_entries; + /* + * Number of TQM ring 7 entries. + * + * TQM fastpath rings should be sized large enough to accommodate the + * maximum number of QPs (either L2 or RoCE, or both if shared) + * that can be enqueued to the TQM ring. + * + * Note that TQM ring sizes cannot be extended while the system is + * operational. If a PF driver needs to extend a TQM ring, it needs + * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate + * the backing store. + */ + uint32_t tqm_ring7_num_entries; + /* + * If the MR/AV split reservation flag is not set, then this field + * represents the total number of MR plus AV entries. For versions + * of firmware that support the split reservation, when it is not + * specified half of the entries will be reserved for MRs and the + * other half for AVs. + * + * If the MR/AV split reservation flag is set, then this + * field is logically divided into two 16b fields. Bits `[31:16]` + * represents the `mr_num_entries` and bits `[15:0]` represents + * `av_num_entries`. The granularity of these values is defined by + * the `mrav_num_entries_unit` field returned by the + * `backing_store_qcaps` command. + */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t qp_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t srq_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t cq_entry_size; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t stat_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tim_entry_size; +} __rte_packed; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_func_backing_store_qcfg * + ********************************/ + + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + /* + * When set, the 32b `mrav_num_entries` field is logically divided + * into two 16b fields, `mr_num_entries` and `av_num_entries`. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_MRAV_RESERVATION_SPLIT \ + UINT32_C(0x2) + uint8_t unused_0[4]; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* Number of TQM slowpath entries. */ + uint32_t tqm_sp_num_entries; + /* Number of TQM ring 0 entries. */ + uint32_t tqm_ring0_num_entries; + /* Number of TQM ring 1 entries. */ + uint32_t tqm_ring1_num_entries; + /* Number of TQM ring 2 entries. */ + uint32_t tqm_ring2_num_entries; + /* Number of TQM ring 3 entries. */ + uint32_t tqm_ring3_num_entries; + /* Number of TQM ring 4 entries. */ + uint32_t tqm_ring4_num_entries; + /* Number of TQM ring 5 entries. */ + uint32_t tqm_ring5_num_entries; + /* Number of TQM ring 6 entries. */ + uint32_t tqm_ring6_num_entries; + /* Number of TQM ring 7 entries. */ + uint32_t tqm_ring7_num_entries; + /* + * If the MR/AV split reservation flag is not set, then this field + * represents the total number of MR plus AV entries. For versions + * of firmware that support the split reservation, when it is not + * specified half of the entries will be reserved for MRs and the + * other half for AVs. + * + * If the MR/AV split reservation flag is set, then this + * field is logically divided into two 16b fields. Bits `[31:16]` + * represents the `mr_num_entries` and bits `[15:0]` represents + * `av_num_entries`. The granularity of these values is defined by + * the `mrav_num_entries_unit` field returned by the + * `backing_store_qcaps` command. + */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as 1 + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_error_recovery_qcfg * + ****************************/ + + +/* hwrm_error_recovery_qcfg_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t unused_0[8]; +} __rte_packed; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When this flag is set to 1, error recovery will be initiated + * through master function driver. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST UINT32_C(0x1) + /* + * When this flag is set to 1, error recovery will be performed + * through Co processor. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU UINT32_C(0x2) + /* + * Driver Polling frequency. This value is in units of 100msec. + * Typical value would be 10 to indicate 1sec. + * Drivers can poll FW health status, Heartbeat, reset_counter with + * this frequency. + */ + uint32_t driver_polling_freq; + /* + * This value is in units of 100msec. + * Typical value would be 30 to indicate 3sec. + * Master function wait period from detecting a fatal error to + * initiating reset. In this time period Master PF expects every + * active driver will detect fatal error. + */ + uint32_t master_func_wait_period; + /* + * This value is in units of 100msec. + * Typical value would be 50 to indicate 5sec. + * Normal function wait period from fatal error detection to + * polling FW health status. In this time period, drivers should not + * do any PCIe MMIO transaction and should not send any HWRM commands. + */ + uint32_t normal_func_wait_period; + /* + * This value is in units of 100msec. + * Typical value would be 20 to indicate 2sec. + * This field indicates that, master function wait period after chip + * reset. After this time, master function should reinitialize with + * FW. + */ + uint32_t master_func_wait_period_after_reset; + /* + * This value is in units of 100msec. + * Typical value would be 60 to indicate 6sec. + * This field is applicable to both master and normal functions. + * Even after chip reset, if FW status not changed to ready, + * then all the functions can poll for this much time and bailout. + */ + uint32_t max_bailout_time_after_reset; + /* + * FW health status register. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates upper 30bits of the register address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t fw_health_status_reg; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT \ + 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEALTH_STATUS_REG_ADDR_SFT \ + 2 + /* + * FW HeartBeat register. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates actual address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t fw_heartbeat_reg; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_SFT \ + 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_HEARTBEAT_REG_ADDR_SFT \ + 2 + /* + * FW reset counter. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates actual address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t fw_reset_cnt_reg; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_SFT \ + 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FW_RESET_CNT_REG_ADDR_SFT \ + 2 + /* + * Reset Inprogress Register address for PFs. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates actual address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t reset_inprogress_reg; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_SFT \ + 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_INPROGRESS_REG_ADDR_SFT \ + 2 + /* This field indicates the mask value for reset_inprogress_reg. */ + uint32_t reset_inprogress_reg_mask; + uint8_t unused_0[3]; + /* + * Array of registers and value count to reset the Chip + * Each array count has reset_reg, reset_reg_val, delay_after_reset + * in TLV format. Depending upon Chip type, number of reset registers + * will vary. Drivers have to write reset_reg_val in the reset_reg + * location in the same sequence in order to recover from a fatal + * error. + */ + uint8_t reg_array_cnt; + /* + * Reset register. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates actual address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t reset_reg[16]; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_SFT 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_RESET_REG_ADDR_SFT 2 + /* Value to be written in reset_reg to reset the controller. */ + uint32_t reset_reg_val[16]; + /* + * This value is in units of 1msec. + * Typical value would be 10 to indicate 10msec. + * Some of the operations like Core reset require delay before + * accessing PCIE MMIO register space. + * If this value is non-zero, drivers have to wait for + * this much time after writing reset_reg_val in reset_reg. + */ + uint8_t delay_after_reset[16]; + /* + * Error recovery counter. + * Lower 2 bits indicates address space location and upper 30 bits + * indicates actual address. + * A value of 0xFFFF-FFFF indicates this register does not exist. + */ + uint32_t err_recovery_cnt_reg; + /* Lower 2 bits indicates address space location. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT \ + 0 + /* + * If value is 0, this register is located in PCIe config space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG \ + UINT32_C(0x0) + /* + * If value is 1, this register is located in GRC address space. + * Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC \ + UINT32_C(0x1) + /* + * If value is 2, this register is located in first BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 \ + UINT32_C(0x2) + /* + * If value is 3, this register is located in second BAR address + * space. Drivers have to map appropriate window to access this + * register. + */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 \ + UINT32_C(0x3) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST \ + HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 + /* Upper 30bits of the register address. */ + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_MASK \ + UINT32_C(0xfffffffc) + #define HWRM_ERROR_RECOVERY_QCFG_OUTPUT_ERR_RECOVERY_CNT_REG_ADDR_SFT \ + 2 + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_func_vlan_qcfg * + ***********************/ + + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint64_t unused_0; + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd2; + /* Future use. */ + uint32_t rsvd3; + uint8_t unused_3[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_func_vlan_cfg * + **********************/ + + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[2]; + uint32_t enables; + /* + * This bit must be '1' for the stag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) + /* + * This bit must be '1' for the ctag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) + /* + * This bit must be '1' for the stag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) + /* + * This bit must be '1' for the ctag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) + /* + * This bit must be '1' for the stag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) + /* + * This bit must be '1' for the ctag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd1; + /* Future use. */ + uint32_t rsvd2; + uint8_t unused_3[4]; +} __rte_packed; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_func_vf_vnic_ids_query * + *******************************/ + + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; + /* Max number of vnic ids in vnic id table */ + uint32_t max_vnic_id_cnt; + /* This is the address for VF VNIC ID table */ + uint64_t vnic_id_tbl_addr; +} __rte_packed; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Actual number of vnic ids + * + * Each VNIC ID is written as a 32-bit number. + */ + uint32_t vnic_id_cnt; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_func_vf_bw_cfg * + ***********************/ + + +/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * The number of VF functions that are being configured. + * The cmd space allows up to 50 VFs' BW to be configured with one cmd. + */ + uint16_t num_vfs; + uint16_t unused[3]; + /* These 16-bit fields contain the VF fid and the rate scale percentage. */ + uint16_t vfn[48]; + /* The physical VF id the adjustment will be made to. */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_VFID_MASK UINT32_C(0xfff) + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_VFID_SFT 0 + /* + * This field configures the rate scale percentage of the VF as specified + * by the physical VF id. + */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_MASK UINT32_C(0xf000) + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_SFT 12 + /* 0% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_0 \ + (UINT32_C(0x0) << 12) + /* 6.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_6_66 \ + (UINT32_C(0x1) << 12) + /* 13.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_13_33 \ + (UINT32_C(0x2) << 12) + /* 20% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_20 \ + (UINT32_C(0x3) << 12) + /* 26.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_26_66 \ + (UINT32_C(0x4) << 12) + /* 33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_33_33 \ + (UINT32_C(0x5) << 12) + /* 40% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_40 \ + (UINT32_C(0x6) << 12) + /* 46.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_46_66 \ + (UINT32_C(0x7) << 12) + /* 53.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_53_33 \ + (UINT32_C(0x8) << 12) + /* 60% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_60 \ + (UINT32_C(0x9) << 12) + /* 66.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_66_66 \ + (UINT32_C(0xa) << 12) + /* 53.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_73_33 \ + (UINT32_C(0xb) << 12) + /* 80% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_80 \ + (UINT32_C(0xc) << 12) + /* 86.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_86_66 \ + (UINT32_C(0xd) << 12) + /* 93.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_93_33 \ + (UINT32_C(0xe) << 12) + /* 100% of the max tx rate */ + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_100 \ + (UINT32_C(0xf) << 12) + #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_LAST \ + HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_100 +} __rte_packed; + +/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_bw_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_func_vf_bw_qcfg * + ************************/ + + +/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * The number of VF functions that are being queried. + * The inline response space allows the host to query up to 50 VFs' + * rate scale percentage + */ + uint16_t num_vfs; + uint16_t unused[3]; + /* These 16-bit fields contain the VF fid */ + uint16_t vfn[48]; + /* The physical VF id of interest */ + #define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_MASK UINT32_C(0xfff) + #define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_SFT 0 +} __rte_packed; + +/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of VF functions that are being queried. + * The inline response space allows the host to query up to 50 VFs' rate + * scale percentage + */ + uint16_t num_vfs; + uint16_t unused[3]; + /* These 16-bit fields contain the VF fid and the rate scale percentage. */ + uint16_t vfn[48]; + /* The physical VF id the adjustment will be made to. */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_VFID_MASK UINT32_C(0xfff) + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_VFID_SFT 0 + /* + * This field configures the rate scale percentage of the VF as specified + * by the physical VF id. + */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_MASK UINT32_C(0xf000) + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_SFT 12 + /* 0% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_0 \ + (UINT32_C(0x0) << 12) + /* 6.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_6_66 \ + (UINT32_C(0x1) << 12) + /* 13.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_13_33 \ + (UINT32_C(0x2) << 12) + /* 20% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_20 \ + (UINT32_C(0x3) << 12) + /* 26.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_26_66 \ + (UINT32_C(0x4) << 12) + /* 33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_33_33 \ + (UINT32_C(0x5) << 12) + /* 40% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_40 \ + (UINT32_C(0x6) << 12) + /* 46.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_46_66 \ + (UINT32_C(0x7) << 12) + /* 53.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_53_33 \ + (UINT32_C(0x8) << 12) + /* 60% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_60 \ + (UINT32_C(0x9) << 12) + /* 66.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_66_66 \ + (UINT32_C(0xa) << 12) + /* 53.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_73_33 \ + (UINT32_C(0xb) << 12) + /* 80% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_80 \ + (UINT32_C(0xc) << 12) + /* 86.66% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_86_66 \ + (UINT32_C(0xd) << 12) + /* 93.33% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_93_33 \ + (UINT32_C(0xe) << 12) + /* 100% of the max tx rate */ + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_100 \ + (UINT32_C(0xf) << 12) + #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_LAST \ + HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_100 + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_func_drv_if_change * + ***************************/ + + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is indicating + * that the IF state is changing to UP state. The call should + * be made at the beginning of the driver's open call before + * resources are allocated. After making the call, the driver + * should check the response to see if any resources may have + * changed (see the response below). If the driver fails + * the open call, the driver should make this call again with + * this bit cleared to indicate that the IF state is not UP. + * During the driver's close call when the IF state is changing + * to DOWN, the driver should make this call with the bit cleared + * after all resources have been freed. + */ + #define HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP UINT32_C(0x1) + uint32_t unused; +} __rte_packed; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When this bit is '1', it indicates that the resources reserved + * for this function may have changed. The driver should check + * resource capabilities and reserve resources again before + * allocating resources. + */ + #define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_RESC_CHANGE \ + UINT32_C(0x1) + /* + * When this bit is '1', it indicates that the firmware got changed / reset. + * The driver should do complete re-initialization when that bit is set. + */ + #define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE \ + UINT32_C(0x2) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_func_host_pf_ids_query * + *******************************/ + + +/* hwrm_func_host_pf_ids_query_input (size:192b/24B) */ +struct hwrm_func_host_pf_ids_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t host; + /* + * # If this bit is set to '1', the query will contain PF(s) + * belongs to SOC host. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_SOC UINT32_C(0x1) + /* + * # If this bit is set to '1', the query will contain PF(s) + * belongs to EP0 host. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_0 UINT32_C(0x2) + /* + * # If this bit is set to '1', the query will contain PF(s) + * belongs to EP1 host. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_1 UINT32_C(0x4) + /* + * # If this bit is set to '1', the query will contain PF(s) + * belongs to EP2 host. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_2 UINT32_C(0x8) + /* + * # If this bit is set to '1', the query will contain PF(s) + * belongs to EP3 host. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_3 UINT32_C(0x10) + /* + * This provides a filter of what PF(s) will be returned in the + * query.. + */ + uint8_t filter; + /* + * all available PF(s) belong to the host(s) (defined in the + * host field). This includes the hidden PFs. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ALL UINT32_C(0x0) + /* + * all available PF(s) belong to the host(s) (defined in the + * host field) that is available for L2 traffic. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_L2 UINT32_C(0x1) + /* + * all available PF(s) belong to the host(s) (defined in the + * host field) that is available for ROCE traffic. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ROCE UINT32_C(0x2) + #define HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_LAST \ + HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ROCE + uint8_t unused_1[6]; +} __rte_packed; + +/* hwrm_func_host_pf_ids_query_output (size:128b/16B) */ +struct hwrm_func_host_pf_ids_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This provides the first PF ID of the device. */ + uint16_t first_pf_id; + uint16_t pf_ordinal_mask; + /* + * When this bit is '1', it indicates first PF belongs to one of + * the hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_0 \ + UINT32_C(0x1) + /* + * When this bit is '1', it indicates 2nd PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_1 \ + UINT32_C(0x2) + /* + * When this bit is '1', it indicates 3rd PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_2 \ + UINT32_C(0x4) + /* + * When this bit is '1', it indicates 4th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_3 \ + UINT32_C(0x8) + /* + * When this bit is '1', it indicates 5th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_4 \ + UINT32_C(0x10) + /* + * When this bit is '1', it indicates 6th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_5 \ + UINT32_C(0x20) + /* + * When this bit is '1', it indicates 7th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_6 \ + UINT32_C(0x40) + /* + * When this bit is '1', it indicates 8th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_7 \ + UINT32_C(0x80) + /* + * When this bit is '1', it indicates 9th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_8 \ + UINT32_C(0x100) + /* + * When this bit is '1', it indicates 10th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_9 \ + UINT32_C(0x200) + /* + * When this bit is '1', it indicates 11th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_10 \ + UINT32_C(0x400) + /* + * When this bit is '1', it indicates 12th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_11 \ + UINT32_C(0x800) + /* + * When this bit is '1', it indicates 13th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_12 \ + UINT32_C(0x1000) + /* + * When this bit is '1', it indicates 14th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_13 \ + UINT32_C(0x2000) + /* + * When this bit is '1', it indicates 15th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_14 \ + UINT32_C(0x4000) + /* + * When this bit is '1', it indicates 16th PF belongs to one of the + * hosts defined in the input request. + */ + #define HWRM_FUNC_HOST_PF_IDS_QUERY_OUTPUT_PF_ORDINAL_MASK_FUNC_15 \ + UINT32_C(0x8000) + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_port_phy_cfg * + *********************/ + + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ +struct hwrm_port_phy_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is set to '1', the PHY for the port shall + * be reset. + * + * # If this bit is set to 1, then the HWRM shall reset the + * PHY after applying PHY configuration changes specified + * in this command. + * # In order to guarantee that PHY configuration changes + * specified in this command take effect, the HWRM + * client should set this flag to 1. + * # If this bit is not set to 1, then the HWRM may reset + * the PHY depending on the current PHY configuration and + * settings specified in this command. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \ + UINT32_C(0x1) + /* deprecated bit. Do not use!!! */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the link shall be forced to + * the force_link_speed value. + * + * When this bit is set to '1', the HWRM client should + * not enable any of the auto negotiation related + * fields represented by auto_XXX fields in this command. + * When this bit is set to '1' and the HWRM client has + * enabled a auto_XXX field in this command, then the + * HWRM shall ignore the enabled auto_XXX field. + * + * When this bit is set to zero, the link + * shall be allowed to autoneg. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the auto-negotiation process + * shall be restarted on the link. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \ + UINT32_C(0x8) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be enabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be disabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be enabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be disabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \ + UINT32_C(0x80) + /* + * When set to 1, then the HWRM shall enable FEC autonegotitation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \ + UINT32_C(0x100) + /* + * When set to 1, then the HWRM shall disable FEC autonegotiation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ + UINT32_C(0x200) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ + UINT32_C(0x400) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ + UINT32_C(0x800) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ + UINT32_C(0x1000) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ + UINT32_C(0x2000) + /* + * When this bit is set to '1', the link shall be forced to + * be taken down. + * + * # When this bit is set to '1", all other + * command input settings related to the link speed shall + * be ignored. + * Once the link state is forced down, it can be + * explicitly cleared from that state by setting this flag + * to '0'. + * # If this flag is set to '0', then the link shall be + * cleared from forced down state if the link is in forced + * down state. + * There may be conditions (e.g. out-of-band or sideband + * configuration changes for the link) outside the scope + * of the HWRM implementation that may clear forced down + * link state. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \ + UINT32_C(0x4000) + uint32_t enables; + /* + * This bit must be '1' for the auto_mode field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the auto_duplex field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \ + UINT32_C(0x2) + /* + * This bit must be '1' for the auto_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \ + UINT32_C(0x4) + /* + * This bit must be '1' for the auto_link_speed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \ + UINT32_C(0x8) + /* + * This bit must be '1' for the auto_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ + UINT32_C(0x10) + /* + * This bit must be '1' for the wirespeed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \ + UINT32_C(0x20) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x40) + /* + * This bit must be '1' for the preemphasis field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the force_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \ + UINT32_C(0x100) + /* + * This bit must be '1' for the eee_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tx_lpi_timer field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \ + UINT32_C(0x400) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This is the speed that will be used if the force + * bit is '1'. If unsupported speed is selected, an error + * will be generated. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB UINT32_C(0x7d0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB + /* + * This value is used to identify what autoneg mode is + * used when the link speed is not being forced. + */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK + /* + * This is the duplex setting that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". + */ + uint8_t auto_duplex; + /* Half Duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0) + /* Full duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1) + /* Both Half and Full dupex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH + /* + * This value is used to configure the pause that will be + * used for autonegotiation. + * Add text on the usage of auto_pause and force_pause. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + uint8_t unused_0; + /* + * This is the speed that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". If an unsupported speed + * is selected, an error will be generated. + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_200GB UINT32_C(0x7d0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB + /* + * This is a mask of link speeds that will be used if + * autoneg_mode is "mask". If unsupported speed is enabled + * an error will be generated. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB \ + UINT32_C(0x4000) + /* This value controls the wirespeed feature. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON + /* This value controls the loopback setting for the PHY. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the transmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL + /* + * This value is used to configure the pause that will be + * used for force mode. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) + uint8_t unused_1; + /* + * This value controls the pre-emphasis to be used for the + * link. Driver should not set this value (use + * enable.preemphasis = 0) unless driver is sure of setting. + * Normally HWRM FW will determine proper pre-emphasis. + */ + uint32_t preemphasis; + /* + * Setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when EEE is enabled. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + * If EEE is enabled,then at least one speed shall be provided + * in this mask. + */ + uint16_t eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint8_t unused_2[2]; + /* + * Requested setting of TX LPI timer in microseconds. + * This field is valid only when EEE is enabled and TX LPI is + * enabled. + */ + uint32_t tx_lpi_timer; + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint32_t unused_3; +} __rte_packed; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to invalid speed */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1) + /* + * retry the command since the phy is not ready. + * retry count is returned in opaque_0. + * This is only valid for the first command and + * this value will not change for successive calls. + * but if a 0 is returned at any time then this should + * be treated as an un recoverable failure, + * + * retry interval in milli seconds is returned in opaque_1. + * This specifies the time that user should wait before + * issuing the next port_phy_cfg command. + */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \ + HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY + uint8_t unused_0[7]; +} __rte_packed; + +/********************** + * hwrm_port_phy_qcfg * + **********************/ + + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +struct hwrm_port_phy_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the current link status. */ + uint8_t link; + /* There is no link or cable detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0) + /* There is no link, but a cable has been detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1) + /* There is a link. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK + uint8_t unused_0; + /* This value indicates the current link speed of the connection. */ + uint16_t link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB UINT32_C(0x7d0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB + /* + * This value is indicates the duplex of the current + * configuration. + */ + uint8_t duplex_cfg; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL + /* + * This value is used to indicate the current + * pause configuration. When autoneg is enabled, this value + * represents the autoneg results of pause configuration. + */ + uint8_t pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) + /* + * The supported speeds for the port. This is a bit mask. + * For each speed that is supported, the corrresponding + * bit will be set to '1'. + */ + uint16_t support_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \ + UINT32_C(0x2000) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB \ + UINT32_C(0x4000) + /* + * Current setting of forced link speed. + * When the link speed is not being forced, this + * value shall be set to 0. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \ + UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \ + UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \ + UINT32_C(0x3e8) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_200GB \ + UINT32_C(0x7d0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB + /* Current setting of auto negotiation mode. */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK + /* + * Current setting of pause autonegotiation. + * Move autoneg_pause flag here. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + /* + * Current setting for auto_link_speed. This field is only + * valid when auto_mode is set to "one_speed" or "one_or_below". + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_200GB UINT32_C(0x7d0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB + /* + * Current setting for auto_link_speed_mask that is used to + * advertise speeds during autonegotiation. + * This field is only valid when auto_mode is set to "mask". + * The speeds specified in this field shall be a subset of + * supported speeds on this port. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_200GB \ + UINT32_C(0x4000) + /* Current setting for wirespeed. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON + /* Current setting for loopback. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the transmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL + /* + * Current setting of forced pause. + * When the pause configuration is not being forced, then + * this value shall be set to 0. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2) + /* + * This value indicates the current status of the optics module on + * this port. + */ + uint8_t module_status; + /* Module is inserted and accepted */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \ + UINT32_C(0x0) + /* Module is rejected and transmit side Laser is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \ + UINT32_C(0x1) + /* Module mismatch warning. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \ + UINT32_C(0x2) + /* Module is rejected and powered down. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \ + UINT32_C(0x3) + /* Module is not inserted. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ + UINT32_C(0x4) + /* Module is powered down because of over current fault. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_CURRENTFAULT \ + UINT32_C(0x5) + /* Module status is not applicable. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ + UINT32_C(0xff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE + /* Current setting for preemphasis. */ + uint32_t preemphasis; + /* This field represents the major version of the PHY. */ + uint8_t phy_maj; + /* This field represents the minor version of the PHY. */ + uint8_t phy_min; + /* This field represents the build version of the PHY. */ + uint8_t phy_bld; + /* This value represents a PHY type. */ + uint8_t phy_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* BASE-CR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \ + UINT32_C(0x1) + /* BASE-KR4 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \ + UINT32_C(0x2) + /* BASE-LR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \ + UINT32_C(0x3) + /* BASE-SR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \ + UINT32_C(0x4) + /* BASE-KR2 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \ + UINT32_C(0x5) + /* BASE-KX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \ + UINT32_C(0x6) + /* BASE-KR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \ + UINT32_C(0x7) + /* BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \ + UINT32_C(0x8) + /* EEE capable BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \ + UINT32_C(0x9) + /* SGMII connected external PHY */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \ + UINT32_C(0xa) + /* 25G_BASECR_CA_L */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \ + UINT32_C(0xb) + /* 25G_BASECR_CA_S */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \ + UINT32_C(0xc) + /* 25G_BASECR_CA_N */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \ + UINT32_C(0xd) + /* 25G_BASESR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \ + UINT32_C(0xe) + /* 100G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \ + UINT32_C(0xf) + /* 100G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \ + UINT32_C(0x10) + /* 100G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \ + UINT32_C(0x11) + /* 100G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \ + UINT32_C(0x12) + /* 100G_BASESR10 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \ + UINT32_C(0x13) + /* 40G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \ + UINT32_C(0x14) + /* 40G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \ + UINT32_C(0x15) + /* 40G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \ + UINT32_C(0x16) + /* 40G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \ + UINT32_C(0x17) + /* 40G_ACTIVE_CABLE */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ + UINT32_C(0x18) + /* 1G_baseT */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \ + UINT32_C(0x19) + /* 1G_baseSX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \ + UINT32_C(0x1a) + /* 1G_baseCX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \ + UINT32_C(0x1b) + /* 100G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4 \ + UINT32_C(0x1c) + /* 100G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4 \ + UINT32_C(0x1d) + /* 100G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4 \ + UINT32_C(0x1e) + /* 100G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4 \ + UINT32_C(0x1f) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4 + /* This value represents a media type. */ + uint8_t media_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0) + /* Twisted Pair */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1) + /* Direct Attached Copper */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2) + /* Fiber */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE + /* This value represents a transceiver type. */ + uint8_t xcvr_pkg_type; + /* PHY and MAC are in the same package */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ + UINT32_C(0x1) + /* PHY and MAC are in different packages */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ + UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL + uint8_t eee_config_phy_addr; + /* This field represents PHY address. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \ + UINT32_C(0x1f) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + /* + * This field represents flags related to EEE configuration. + * These EEE configuration flags are valid only when the + * auto_mode is not set to none (in other words autonegotiation + * is enabled). + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 + /* + * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled. + * Speeds for autoneg with EEE mode enabled + * are based on eee_link_speed_mask. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \ + UINT32_C(0x20) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and in use. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but is currently not in use. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \ + UINT32_C(0x40) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and TX LPI is enabled. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but TX LPI is disabled. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \ + UINT32_C(0x80) + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + uint8_t parallel_detect; + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1) + /* + * The advertised speeds for the port by the link partner. + * Each advertised speed will be set to '1'. + */ + uint16_t link_partner_adv_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ + UINT32_C(0x2000) + /* + * The advertised autoneg for the port by the link partner. + * This field is deprecated and should be set to 0. + */ + uint8_t link_partner_adv_auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ + UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ + UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ + UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ + UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ + UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + /* The advertised pause settings on the port by the link partner. */ + uint8_t link_partner_adv_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ + UINT32_C(0x2) + /* + * Current setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + */ + uint16_t adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* + * Current setting for link speed mask that is advertised by + * the link partner when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + */ + uint16_t link_partner_adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint32_t xcvr_identifier_type_tx_lpi_timer; + /* + * Current setting of TX LPI timer in microseconds. + * This field is valid only when_eee_enabled flag is set to 1 + * and tx_lpi_enabled is set to 1. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 + /* This value represents transceiver identifier type. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 24) + /* SFP/SFP+/SFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ + (UINT32_C(0x3) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ + (UINT32_C(0xc) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ + (UINT32_C(0xd) << 24) + /* QSFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ + (UINT32_C(0x11) << 24) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 + /* + * This value represents the current configuration of + * Forward Error Correction (FEC) on the port. + */ + uint16_t fec_cfg; + /* + * When set to 1, then FEC is not supported on this port. If this flag + * is set to 1, then all other FEC configuration flags shall be ignored. + * When set to 0, then FEC is supported as indicated by other + * configuration flags. + * If no cable is attached and the HWRM does not yet know the FEC + * capability, then the HWRM shall set this flag to 1 when reporting + * FEC capability. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + UINT32_C(0x1) + /* + * When set to 1, then FEC autonegotiation is supported on this port. + * When set to 0, then FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \ + UINT32_C(0x2) + /* + * When set to 1, then FEC autonegotiation is enabled on this port. + * When set to 0, then FEC autonegotiation is disabled if supported. + * This flag should be ignored if FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ + UINT32_C(0x4) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ + UINT32_C(0x8) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 74 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ + UINT32_C(0x10) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ + UINT32_C(0x20) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 91 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ + UINT32_C(0x40) + /* + * This value is indicates the duplex of the current + * connection state. + */ + uint8_t duplex_state; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL + /* Option flags fields. */ + uint8_t option_flags; + /* When this bit is '1', Media auto detect is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \ + UINT32_C(0x1) + /* + * Up to 16 bytes of null padded ASCII string representing + * PHY vendor. + * If the string is set to null, then the vendor name is not + * available. + */ + char phy_vendor_name[16]; + /* + * Up to 16 bytes of null padded ASCII string that + * identifies vendor specific part number of the PHY. + * If the string is set to null, then the vendor specific + * part number is not available. + */ + char phy_vendor_partnumber[16]; + uint8_t unused_2[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_port_mac_cfg * + *********************/ + + +/* hwrm_port_mac_cfg_input (size:384b/48B) */ +struct hwrm_port_mac_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * In this field, there are a number of CoS mappings related flags + * that are used to configure CoS mappings and their corresponding + * priorities in the hardware. + * For the priorities of CoS mappings, the HWRM uses the following + * priority order (high to low) by default: + * # vlan pri + * # ip_dscp + * # tunnel_vlan_pri + * # default cos + * + * A subset of CoS mappings can be enabled. + * If a priority is not specified for an enabled CoS mapping, the + * priority will be assigned in the above order for the enabled CoS + * mappings. For example, if vlan_pri and ip_dscp CoS mappings are + * enabled and their priorities are not specified, the following + * priority order (high to low) will be used by the HWRM: + * # vlan_pri + * # ip_dscp + * # default cos + * + * vlan_pri CoS mapping together with default CoS with lower priority + * are enabled by default by the HWRM. + */ + uint32_t flags; + /* + * When this bit is '1', this command will configure + * the MAC to match the current link state of the PHY. + * If the link is not established on the PHY, then this + * bit has no effect. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x8) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \ + UINT32_C(0x80) + /* + * When this bit is '1', the Out-Of-Box WoL is requested to + * be enabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x100) + /* + * When this bit is '1', the Out-Of-Box WoL is requested to + * be disabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \ + UINT32_C(0x200) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \ + UINT32_C(0x400) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \ + UINT32_C(0x800) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \ + UINT32_C(0x1000) + /* + * When this bit is set to '1', and the ptp_tx_ts_capture_enable + * bit is set, then the device uses one step Tx timestamping. + * This bit is temporary and used for experimental purposes. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_ONE_STEP_TX_TS \ + UINT32_C(0x2000) + uint32_t enables; + /* + * This bit must be '1' for the ipg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \ + UINT32_C(0x1) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x2) + /* + * This bit must be '1' for the vlan_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \ + UINT32_C(0x4) + /* + * This bit must be '1' for the tunnel_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \ + UINT32_C(0x10) + /* + * This bit must be '1' for the dscp2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \ + UINT32_C(0x20) + /* + * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the cos_field_cfg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \ + UINT32_C(0x100) + /* + * This bit must be '1' for the ptp_freq_adj_ppb field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_PTP_FREQ_ADJ_PPB \ + UINT32_C(0x200) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This value is used to configure the minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* This value controls the loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of inner packet headers of + * tunneled packets or packet headers of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t vlan_pri2cos_map_pri; + /* Reserved field. */ + uint8_t reserved1; + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of tunneled header. + * This mapping only applies when tunneled headers + * are present. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * This value controls the priority setting of IP DSCP to CoS + * mapping based on inner IP header of tunneled packets or + * IP header of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the receive side of this port. + * This field shall be ignored if the ptp_rx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the receive side of the port to + * capture the time stamp of every received PTP message + * with messageType field value set to i. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * This field shall be ignored if the ptp_tx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the transmit side of the port to + * capture the time stamp of every transmitted PTP message + * with messageType field value set to i. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \ + UINT32_C(0x1) + /* + * This field is used to specify selection of VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + * This field is valid only if inner VLAN PRI to CoS mapping + * is enabled. + * If VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI shall be selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to specify selection of tunnel VLAN + * PRI value based on whether one or two VLAN Tags are + * present in tunnel headers. + * This field is valid only if tunnel VLAN PRI to CoS mapping + * is enabled. + * If tunnel VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No tunnel VLAN PRI shall be selected for this + * configuration if only one VLAN Tag is present in + * the tunnel packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field shall be used to provide default CoS value + * that has been configured on this port. + * This field is valid only if default CoS mapping + * is enabled. + * If default CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + uint8_t unused_0[3]; + /* + * This signed field specifies by how much to adjust the frequency + * of sync timer updates (measured in parts per billion). + */ + int32_t ptp_freq_adj_ppb; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* Current configuration of the IPG value. */ + uint8_t ipg; + /* Current value of the loopback value. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_port_mac_qcfg * + **********************/ + + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be configured. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_mac_qcfg_output (size:256b/32B) */ +struct hwrm_port_mac_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* + * The minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* The loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE + /* + * Priority setting for VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t vlan_pri2cos_map_pri; + /* + * In this field, a number of CoS mappings related flags + * are used to indicate configured CoS mappings. + */ + uint8_t flags; + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x1) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is '1', the Out-Of-Box WoL is enabled on this + * port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x8) + /* When this bit is '1', PTP is enabled for RX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* When this bit is '1', PTP is enabled for TX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x20) + /* + * Priority setting for tunnel VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * Priority setting for DSCP to PRI mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the receive side of this port. + * If bit 'i' is set, then the receive side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the receive side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the receive side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * If bit 'i' is set, then the transmit side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the transmit side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the transmit side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \ + UINT32_C(0x1) + /* + * This field is used for selecting VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used for selecting tunnel VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the tunnel headers of tunneled packets. This selection + * does not apply to non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the tunnel + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to provide default CoS value that + * has been configured on this port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + uint8_t unused_1; + uint16_t port_svif_info; + /* + * This field specifies the source virtual interface of the port being + * queried. Drivers can use this to program port svif field in the + * L2 context table + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK \ + UINT32_C(0x7fff) + #define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_SFT 0 + /* This field specifies whether port_svif is valid or not */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID \ + UINT32_C(0x8000) + uint8_t unused_2[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_port_mac_ptp_qcfg * + **************************/ + + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ +struct hwrm_port_mac_ptp_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * In this field, a number of PTP related flags + * are used to indicate configured PTP capabilities. + */ + uint8_t flags; + /* + * When this bit is set to '1', the PTP related registers are + * directly accessible by the host. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the device supports one-step + * Tx timestamping. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the PTP information is accessible + * via HWRM commands. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \ + UINT32_C(0x8) + uint8_t unused_0[3]; + /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for RX. */ + uint32_t rx_ts_reg_off_seq_id; + /* Offset of the first PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_0; + /* Offset of the second PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_1; + /* Offset of the third PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_2; + /* Offset of the domain ID for RX. */ + uint32_t rx_ts_reg_off_domain_id; + /* Offset of the PTP FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo; + /* Offset of the PTP advance FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo_adv; + /* PTP timestamp granularity for RX. */ + uint32_t rx_ts_reg_off_granularity; + /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for TX. */ + uint32_t tx_ts_reg_off_seq_id; + /* Offset of the PTP FIFO register for TX. */ + uint32_t tx_ts_reg_off_fifo; + /* PTP timestamp granularity for TX. */ + uint32_t tx_ts_reg_off_granularity; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* Port Tx Statistics Format */ +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + /* Total Number of 64 Bytes frames transmitted */ + uint64_t tx_64b_frames; + /* Total Number of 65-127 Bytes frames transmitted */ + uint64_t tx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames transmitted */ + uint64_t tx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames transmitted */ + uint64_t tx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames transmitted */ + uint64_t tx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames transmitted */ + uint64_t tx_1024b_1518b_frames; + /* + * Total Number of each good VLAN (exludes FCS errors) + * frame transmitted which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). + */ + uint64_t tx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames transmitted */ + uint64_t tx_1519b_2047b_frames; + /* Total Number of 2048-4095 Bytes frames transmitted */ + uint64_t tx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames transmitted */ + uint64_t tx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames transmitted */ + uint64_t tx_9217b_16383b_frames; + /* Total Number of good frames transmitted */ + uint64_t tx_good_frames; + /* Total Number of frames transmitted */ + uint64_t tx_total_frames; + /* Total number of unicast frames transmitted */ + uint64_t tx_ucast_frames; + /* Total number of multicast frames transmitted */ + uint64_t tx_mcast_frames; + /* Total number of broadcast frames transmitted */ + uint64_t tx_bcast_frames; + /* Total number of PAUSE control frames transmitted */ + uint64_t tx_pause_frames; + /* + * Total number of PFC/per-priority PAUSE + * control frames transmitted + */ + uint64_t tx_pfc_frames; + /* Total number of jabber frames transmitted */ + uint64_t tx_jabber_frames; + /* Total number of frames transmitted with FCS error */ + uint64_t tx_fcs_err_frames; + /* Total number of control frames transmitted */ + uint64_t tx_control_frames; + /* Total number of over-sized frames transmitted */ + uint64_t tx_oversz_frames; + /* Total number of frames with single deferral */ + uint64_t tx_single_dfrl_frames; + /* Total number of frames with multiple deferrals */ + uint64_t tx_multi_dfrl_frames; + /* Total number of frames with single collision */ + uint64_t tx_single_coll_frames; + /* Total number of frames with multiple collisions */ + uint64_t tx_multi_coll_frames; + /* Total number of frames with late collisions */ + uint64_t tx_late_coll_frames; + /* Total number of frames with excessive collisions */ + uint64_t tx_excessive_coll_frames; + /* Total number of fragmented frames transmitted */ + uint64_t tx_frag_frames; + /* Total number of transmit errors */ + uint64_t tx_err; + /* Total number of single VLAN tagged frames transmitted */ + uint64_t tx_tagged_frames; + /* Total number of double VLAN tagged frames transmitted */ + uint64_t tx_dbl_tagged_frames; + /* Total number of runt frames transmitted */ + uint64_t tx_runt_frames; + /* Total number of TX FIFO under runs */ + uint64_t tx_fifo_underruns; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 0 transmitted + */ + uint64_t tx_pfc_ena_frames_pri0; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 1 transmitted + */ + uint64_t tx_pfc_ena_frames_pri1; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 2 transmitted + */ + uint64_t tx_pfc_ena_frames_pri2; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 3 transmitted + */ + uint64_t tx_pfc_ena_frames_pri3; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 4 transmitted + */ + uint64_t tx_pfc_ena_frames_pri4; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 5 transmitted + */ + uint64_t tx_pfc_ena_frames_pri5; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 6 transmitted + */ + uint64_t tx_pfc_ena_frames_pri6; + /* + * Total number of PFC frames with PFC enabled bit for + * Pri 7 transmitted + */ + uint64_t tx_pfc_ena_frames_pri7; + /* Total number of EEE LPI Events on TX */ + uint64_t tx_eee_lpi_events; + /* EEE LPI Duration Counter on TX */ + uint64_t tx_eee_lpi_duration; + /* + * Total number of Link Level Flow Control (LLFC) messages + * transmitted + */ + uint64_t tx_llfc_logical_msgs; + /* Total number of HCFC messages transmitted */ + uint64_t tx_hcfc_msgs; + /* Total number of TX collisions */ + uint64_t tx_total_collisions; + /* Total number of transmitted bytes */ + uint64_t tx_bytes; + /* Total number of end-to-end HOL frames */ + uint64_t tx_xthol_frames; + /* Total Tx Drops per Port reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops per Port reported by STATS block */ + uint64_t tx_stat_error; +} __rte_packed; + +/* Port Rx Statistics Format */ +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + /* Total Number of 64 Bytes frames received */ + uint64_t rx_64b_frames; + /* Total Number of 65-127 Bytes frames received */ + uint64_t rx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames received */ + uint64_t rx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames received */ + uint64_t rx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames received */ + uint64_t rx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames received */ + uint64_t rx_1024b_1518b_frames; + /* + * Total Number of each good VLAN (exludes FCS errors) + * frame received which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). + */ + uint64_t rx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames received */ + uint64_t rx_1519b_2047b_frames; + /* Total Number of 2048-4095 Bytes frames received */ + uint64_t rx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames received */ + uint64_t rx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames received */ + uint64_t rx_9217b_16383b_frames; + /* Total number of frames received */ + uint64_t rx_total_frames; + /* Total number of unicast frames received */ + uint64_t rx_ucast_frames; + /* Total number of multicast frames received */ + uint64_t rx_mcast_frames; + /* Total number of broadcast frames received */ + uint64_t rx_bcast_frames; + /* Total number of received frames with FCS error */ + uint64_t rx_fcs_err_frames; + /* Total number of control frames received */ + uint64_t rx_ctrl_frames; + /* Total number of PAUSE frames received */ + uint64_t rx_pause_frames; + /* Total number of PFC frames received */ + uint64_t rx_pfc_frames; + /* + * Total number of frames received with an unsupported + * opcode + */ + uint64_t rx_unsupported_opcode_frames; + /* + * Total number of frames received with an unsupported + * DA for pause and PFC + */ + uint64_t rx_unsupported_da_pausepfc_frames; + /* Total number of frames received with an unsupported SA */ + uint64_t rx_wrong_sa_frames; + /* Total number of received packets with alignment error */ + uint64_t rx_align_err_frames; + /* Total number of received frames with out-of-range length */ + uint64_t rx_oor_len_frames; + /* Total number of received frames with error termination */ + uint64_t rx_code_err_frames; + /* + * Total number of received frames with a false carrier is + * detected during idle, as defined by RX_ER samples active + * and RXD is 0xE. The event is reported along with the + * statistics generated on the next received frame. Only + * one false carrier condition can be detected and logged + * between frames. + * + * Carrier event, valid for 10M/100M speed modes only. + */ + uint64_t rx_false_carrier_frames; + /* Total number of over-sized frames received */ + uint64_t rx_ovrsz_frames; + /* Total number of jabber packets received */ + uint64_t rx_jbr_frames; + /* Total number of received frames with MTU error */ + uint64_t rx_mtu_err_frames; + /* Total number of received frames with CRC match */ + uint64_t rx_match_crc_frames; + /* Total number of frames received promiscuously */ + uint64_t rx_promiscuous_frames; + /* + * Total number of received frames with one or two VLAN + * tags + */ + uint64_t rx_tagged_frames; + /* Total number of received frames with two VLAN tags */ + uint64_t rx_double_tagged_frames; + /* Total number of truncated frames received */ + uint64_t rx_trunc_frames; + /* Total number of good frames (without errors) received */ + uint64_t rx_good_frames; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 0 + */ + uint64_t rx_pfc_xon2xoff_frames_pri0; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 1 + */ + uint64_t rx_pfc_xon2xoff_frames_pri1; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 2 + */ + uint64_t rx_pfc_xon2xoff_frames_pri2; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 3 + */ + uint64_t rx_pfc_xon2xoff_frames_pri3; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 4 + */ + uint64_t rx_pfc_xon2xoff_frames_pri4; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 5 + */ + uint64_t rx_pfc_xon2xoff_frames_pri5; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 6 + */ + uint64_t rx_pfc_xon2xoff_frames_pri6; + /* + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 7 + */ + uint64_t rx_pfc_xon2xoff_frames_pri7; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 0 + */ + uint64_t rx_pfc_ena_frames_pri0; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 1 + */ + uint64_t rx_pfc_ena_frames_pri1; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 2 + */ + uint64_t rx_pfc_ena_frames_pri2; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 3 + */ + uint64_t rx_pfc_ena_frames_pri3; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 4 + */ + uint64_t rx_pfc_ena_frames_pri4; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 5 + */ + uint64_t rx_pfc_ena_frames_pri5; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 6 + */ + uint64_t rx_pfc_ena_frames_pri6; + /* + * Total number of received PFC frames with PFC enabled + * bit for Pri 7 + */ + uint64_t rx_pfc_ena_frames_pri7; + /* Total Number of frames received with SCH CRC error */ + uint64_t rx_sch_crc_err_frames; + /* Total Number of under-sized frames received */ + uint64_t rx_undrsz_frames; + /* Total Number of fragmented frames received */ + uint64_t rx_frag_frames; + /* Total number of RX EEE LPI Events */ + uint64_t rx_eee_lpi_events; + /* EEE LPI Duration Counter on RX */ + uint64_t rx_eee_lpi_duration; + /* + * Total number of physical type Link Level Flow Control + * (LLFC) messages received + */ + uint64_t rx_llfc_physical_msgs; + /* + * Total number of logical type Link Level Flow Control + * (LLFC) messages received + */ + uint64_t rx_llfc_logical_msgs; + /* + * Total number of logical type Link Level Flow Control + * (LLFC) messages received with CRC error + */ + uint64_t rx_llfc_msgs_with_crc_err; + /* Total number of HCFC messages received */ + uint64_t rx_hcfc_msgs; + /* Total number of HCFC messages received with CRC error */ + uint64_t rx_hcfc_msgs_with_crc_err; + /* Total number of received bytes */ + uint64_t rx_bytes; + /* Total number of bytes received in runt frames */ + uint64_t rx_runt_bytes; + /* Total number of runt frames received */ + uint64_t rx_runt_frames; + /* Total Rx Discards per Port reported by STATS block */ + uint64_t rx_stat_discard; + uint64_t rx_stat_err; +} __rte_packed; + +/******************** + * hwrm_port_qstats * + ********************/ + + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __rte_packed; + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* Port Tx Statistics extended Format */ +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + /* Total number of tx bytes count on cos queue 0 */ + uint64_t tx_bytes_cos0; + /* Total number of tx bytes count on cos queue 1 */ + uint64_t tx_bytes_cos1; + /* Total number of tx bytes count on cos queue 2 */ + uint64_t tx_bytes_cos2; + /* Total number of tx bytes count on cos queue 3 */ + uint64_t tx_bytes_cos3; + /* Total number of tx bytes count on cos queue 4 */ + uint64_t tx_bytes_cos4; + /* Total number of tx bytes count on cos queue 5 */ + uint64_t tx_bytes_cos5; + /* Total number of tx bytes count on cos queue 6 */ + uint64_t tx_bytes_cos6; + /* Total number of tx bytes count on cos queue 7 */ + uint64_t tx_bytes_cos7; + /* Total number of tx packets count on cos queue 0 */ + uint64_t tx_packets_cos0; + /* Total number of tx packets count on cos queue 1 */ + uint64_t tx_packets_cos1; + /* Total number of tx packets count on cos queue 2 */ + uint64_t tx_packets_cos2; + /* Total number of tx packets count on cos queue 3 */ + uint64_t tx_packets_cos3; + /* Total number of tx packets count on cos queue 4 */ + uint64_t tx_packets_cos4; + /* Total number of tx packets count on cos queue 5 */ + uint64_t tx_packets_cos5; + /* Total number of tx packets count on cos queue 6 */ + uint64_t tx_packets_cos6; + /* Total number of tx packets count on cos queue 7 */ + uint64_t tx_packets_cos7; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */ + uint64_t pfc_pri0_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */ + uint64_t pfc_pri0_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */ + uint64_t pfc_pri1_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */ + uint64_t pfc_pri1_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */ + uint64_t pfc_pri2_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */ + uint64_t pfc_pri2_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */ + uint64_t pfc_pri3_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */ + uint64_t pfc_pri3_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */ + uint64_t pfc_pri4_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */ + uint64_t pfc_pri4_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */ + uint64_t pfc_pri5_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */ + uint64_t pfc_pri5_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */ + uint64_t pfc_pri6_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */ + uint64_t pfc_pri6_tx_transitions; + /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */ + uint64_t pfc_pri7_tx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */ + uint64_t pfc_pri7_tx_transitions; +} __rte_packed; + +/* Port Rx Statistics extended Format */ +/* rx_port_stats_ext (size:3648b/456B) */ +struct rx_port_stats_ext { + /* Number of times link state changed to down */ + uint64_t link_down_events; + /* Number of times the idle rings with pause bit are found */ + uint64_t continuous_pause_events; + /* Number of times the active rings pause bit resumed back */ + uint64_t resume_pause_events; + /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */ + uint64_t continuous_roce_pause_events; + /* Number of times, the ROCE cos queue PFC is enabled back */ + uint64_t resume_roce_pause_events; + /* Total number of rx bytes count on cos queue 0 */ + uint64_t rx_bytes_cos0; + /* Total number of rx bytes count on cos queue 1 */ + uint64_t rx_bytes_cos1; + /* Total number of rx bytes count on cos queue 2 */ + uint64_t rx_bytes_cos2; + /* Total number of rx bytes count on cos queue 3 */ + uint64_t rx_bytes_cos3; + /* Total number of rx bytes count on cos queue 4 */ + uint64_t rx_bytes_cos4; + /* Total number of rx bytes count on cos queue 5 */ + uint64_t rx_bytes_cos5; + /* Total number of rx bytes count on cos queue 6 */ + uint64_t rx_bytes_cos6; + /* Total number of rx bytes count on cos queue 7 */ + uint64_t rx_bytes_cos7; + /* Total number of rx packets count on cos queue 0 */ + uint64_t rx_packets_cos0; + /* Total number of rx packets count on cos queue 1 */ + uint64_t rx_packets_cos1; + /* Total number of rx packets count on cos queue 2 */ + uint64_t rx_packets_cos2; + /* Total number of rx packets count on cos queue 3 */ + uint64_t rx_packets_cos3; + /* Total number of rx packets count on cos queue 4 */ + uint64_t rx_packets_cos4; + /* Total number of rx packets count on cos queue 5 */ + uint64_t rx_packets_cos5; + /* Total number of rx packets count on cos queue 6 */ + uint64_t rx_packets_cos6; + /* Total number of rx packets count on cos queue 7 */ + uint64_t rx_packets_cos7; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */ + uint64_t pfc_pri0_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */ + uint64_t pfc_pri0_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */ + uint64_t pfc_pri1_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */ + uint64_t pfc_pri1_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */ + uint64_t pfc_pri2_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */ + uint64_t pfc_pri2_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */ + uint64_t pfc_pri3_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */ + uint64_t pfc_pri3_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */ + uint64_t pfc_pri4_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */ + uint64_t pfc_pri4_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */ + uint64_t pfc_pri5_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */ + uint64_t pfc_pri5_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */ + uint64_t pfc_pri6_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */ + uint64_t pfc_pri6_rx_transitions; + /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */ + uint64_t pfc_pri7_rx_duration_us; + /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */ + uint64_t pfc_pri7_rx_transitions; + /* Total number of received bits */ + uint64_t rx_bits; + /* The number of events where the port receive buffer was over 85% full */ + uint64_t rx_buffer_passed_threshold; + /* + * The number of symbol errors that wasn't corrected by FEC correction + * alogirithm + */ + uint64_t rx_pcs_symbol_err; + /* The number of corrected bits on the port according to active FEC */ + uint64_t rx_corrected_bits; + /* Total number of rx discard bytes count on cos queue 0 */ + uint64_t rx_discard_bytes_cos0; + /* Total number of rx discard bytes count on cos queue 1 */ + uint64_t rx_discard_bytes_cos1; + /* Total number of rx discard bytes count on cos queue 2 */ + uint64_t rx_discard_bytes_cos2; + /* Total number of rx discard bytes count on cos queue 3 */ + uint64_t rx_discard_bytes_cos3; + /* Total number of rx discard bytes count on cos queue 4 */ + uint64_t rx_discard_bytes_cos4; + /* Total number of rx discard bytes count on cos queue 5 */ + uint64_t rx_discard_bytes_cos5; + /* Total number of rx discard bytes count on cos queue 6 */ + uint64_t rx_discard_bytes_cos6; + /* Total number of rx discard bytes count on cos queue 7 */ + uint64_t rx_discard_bytes_cos7; + /* Total number of rx discard packets count on cos queue 0 */ + uint64_t rx_discard_packets_cos0; + /* Total number of rx discard packets count on cos queue 1 */ + uint64_t rx_discard_packets_cos1; + /* Total number of rx discard packets count on cos queue 2 */ + uint64_t rx_discard_packets_cos2; + /* Total number of rx discard packets count on cos queue 3 */ + uint64_t rx_discard_packets_cos3; + /* Total number of rx discard packets count on cos queue 4 */ + uint64_t rx_discard_packets_cos4; + /* Total number of rx discard packets count on cos queue 5 */ + uint64_t rx_discard_packets_cos5; + /* Total number of rx discard packets count on cos queue 6 */ + uint64_t rx_discard_packets_cos6; + /* Total number of rx discard packets count on cos queue 7 */ + uint64_t rx_discard_packets_cos7; +} __rte_packed; + +/* + * Port Rx Statistics extended PFC WatchDog Format. + * StormDetect and StormRevert event determination is based + * on an integration period and a percentage threshold. + * StormDetect event - when percentage of XOFF frames received + * within an integration period exceeds the configured threshold. + * StormRevert event - when percentage of XON frames received + * within an integration period exceeds the configured threshold. + * Actual number of XOFF/XON frames for the events to be triggered + * depends on both configured integration period and sampling rate. + * The statistics in this structure represent counts of specified + * events from the moment the feature (PFC WatchDog) is enabled via + * hwrm_queue_pfc_enable_cfg call. + */ +/* rx_port_stats_ext_pfc_wd (size:5120b/640B) */ +struct rx_port_stats_ext_pfc_wd { + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 0 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri0; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 1 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri1; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 2 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri2; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 3 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri3; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 4 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri4; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 5 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri5; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 6 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri6; + /* + * Total number of PFC WatchDog StormDetect events detected + * for Pri 7 + */ + uint64_t rx_pfc_watchdog_storms_detected_pri7; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 0 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri0; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 1 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri1; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 2 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri2; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 3 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri3; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 4 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri4; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 5 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri5; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 6 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri6; + /* + * Total number of PFC WatchDog StormRevert events detected + * for Pri 7 + */ + uint64_t rx_pfc_watchdog_storms_reverted_pri7; + /* + * Total number of packets received during PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri0; + /* + * Total number of packets received during PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri1; + /* + * Total number of packets received during PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri2; + /* + * Total number of packets received during PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri3; + /* + * Total number of packets received during PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri4; + /* + * Total number of packets received during PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri5; + /* + * Total number of packets received during PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri6; + /* + * Total number of packets received during PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_pri7; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri0; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri1; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri2; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri3; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri4; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri5; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri6; + /* + * Total number of bytes received during PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_pri7; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri0; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri1; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri2; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri3; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri4; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri5; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri6; + /* + * Total number of packets dropped on rx during PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_storms_rx_packets_dropped_pri7; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri0; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri1; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri2; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri3; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri4; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri5; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri6; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_storms_rx_bytes_dropped_pri7; + /* + * Number of packets received during last PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri0; + /* + * Number of packets received during last PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri1; + /* + * Number of packets received during last PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri2; + /* + * Number of packets received during last PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri3; + /* + * Number of packets received during last PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri4; + /* + * Number of packets received during last PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri5; + /* + * Number of packets received during last PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri6; + /* + * Number of packets received during last PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_pri7; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri0; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri1; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri2; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri3; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri4; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri5; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri6; + /* + * Number of bytes received during last PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_pri7; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri0; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri1; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri2; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri3; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri4; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri5; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri6; + /* + * Number of packets dropped on rx during last PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_packets_dropped_pri7; + /* + * Total number of bytes dropped on rx during PFC watchdog storm + * for pri 0 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri0; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 1 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri1; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 2 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri2; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 3 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri3; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 4 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri4; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 5 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri5; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 6 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri6; + /* + * Number of bytes dropped on rx during last PFC watchdog storm + * for pri 7 + */ + uint64_t rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri7; +} __rte_packed; + +/************************ + * hwrm_port_qstats_ext * + ************************/ + + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + /* + * The size of TX port extended + * statistics block in bytes. + */ + uint16_t tx_stat_size; + /* + * The size of RX port extended + * statistics block in bytes + */ + uint16_t rx_stat_size; + uint8_t unused_0[2]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __rte_packed; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + /* Total number of active cos queues available. */ + uint16_t total_active_cos_queues; + uint8_t flags; + /* + * If set to 1, then this field indicates that clear + * roce specific counters is supported. + */ + #define HWRM_PORT_QSTATS_EXT_OUTPUT_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED \ + UINT32_C(0x1) + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_port_qstats_ext_pfc_wd * + *******************************/ + + +/* hwrm_port_qstats_ext_pfc_wd_input (size:256b/32B) */ +struct hwrm_port_qstats_ext_pfc_wd_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + /* + * The size of rx_port_stats_ext_pfc_wd + * block in bytes + */ + uint16_t pfc_wd_stat_size; + uint8_t unused_0[4]; + /* + * This is the host address where + * rx_port_stats_ext_pfc_wd will be stored + */ + uint64_t pfc_wd_stat_host_addr; +} __rte_packed; + +/* hwrm_port_qstats_ext_pfc_wd_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_pfc_wd_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The size of rx_port_stats_ext_pfc_wd + * statistics block in bytes. + */ + uint16_t pfc_wd_stat_size; + uint8_t flags; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; + uint8_t unused_0[4]; +} __rte_packed; + +/************************* + * hwrm_port_lpbk_qstats * + *************************/ + + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +struct hwrm_port_lpbk_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast frames */ + uint64_t lpbk_ucast_frames; + /* Number of transmitted multicast frames */ + uint64_t lpbk_mcast_frames; + /* Number of transmitted broadcast frames */ + uint64_t lpbk_bcast_frames; + /* Number of transmitted bytes for unicast traffic */ + uint64_t lpbk_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t lpbk_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t lpbk_bcast_bytes; + /* Total Tx Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_error; + /* Total Rx Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_discard; + /* Total Rx Error Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_error; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_port_ecn_qstats * + ************************/ + + +/* hwrm_port_ecn_qstats_input (size:192b/24B) */ +struct hwrm_port_ecn_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port that is being queried. Unused if NIC is in + * multi-host mode. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_ecn_qstats_output (size:384b/48B) */ +struct hwrm_port_ecn_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of packets marked in CoS queue 0. */ + uint32_t mark_cnt_cos0; + /* Number of packets marked in CoS queue 1. */ + uint32_t mark_cnt_cos1; + /* Number of packets marked in CoS queue 2. */ + uint32_t mark_cnt_cos2; + /* Number of packets marked in CoS queue 3. */ + uint32_t mark_cnt_cos3; + /* Number of packets marked in CoS queue 4. */ + uint32_t mark_cnt_cos4; + /* Number of packets marked in CoS queue 5. */ + uint32_t mark_cnt_cos5; + /* Number of packets marked in CoS queue 6. */ + uint32_t mark_cnt_cos6; + /* Number of packets marked in CoS queue 7. */ + uint32_t mark_cnt_cos7; + /* + * Bitmask that indicates which CoS queues have ECN marking enabled. + * Bit i corresponds to CoS queue i. + */ + uint8_t mark_en; + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_port_clr_stats * + ***********************/ + + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t flags; + /* + * If set to 1, then this field indicates clear the following RoCE + * specific counters. + * RoCE associated TX/RX cos counters + * CNP associated TX/RX cos counters + * RoCE/CNP specific TX/RX flow counters + * Firmware will determine the RoCE/CNP cos queue based on qos profile. + * This flag is honored only when RoCE is enabled on that port. + */ + #define HWRM_PORT_CLR_STATS_INPUT_FLAGS_ROCE_COUNTERS UINT32_C(0x1) + uint8_t unused_0[5]; +} __rte_packed; + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_port_phy_qcaps * + ***********************/ + + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +struct hwrm_port_phy_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* PHY capability flags */ + uint8_t flags; + /* + * If set to 1, then this field indicates that the + * link is capable of supporting EEE. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \ + UINT32_C(0x1) + /* + * If set to 1, then this field indicates that the + * PHY is capable of supporting external loopback. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, then this field indicates that the + * PHY is capable of supporting loopback in autoneg mode. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_AUTONEG_LPBK_SUPPORTED \ + UINT32_C(0x4) + /* + * Indicates if the configuration of shared PHY settings is supported. + * In cases where a physical port is shared by multiple functions + * (e.g. NPAR, multihost, etc), the configuration of PHY + * settings may not be allowed. Callers to HWRM_PORT_PHY_CFG will + * get an HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED error in this case. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_SHARED_PHY_CFG_SUPPORTED \ + UINT32_C(0x8) + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \ + UINT32_C(0xf0) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 4 + /* Number of front panel ports for this device. */ + uint8_t port_cnt; + /* Not supported or unknown */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0) + /* single port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1) + /* 2-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2) + /* 3-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3) + /* 4-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \ + HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 + /* + * This is a bit mask to indicate what speeds are supported + * as forced speeds on this link. + * For each speed that can be forced on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_force_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \ + UINT32_C(0x2000) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_200GB \ + UINT32_C(0x4000) + /* + * This is a bit mask to indicate what speeds are supported + * for autonegotiation on this link. + * For each speed that can be autonegotiated on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_auto_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \ + UINT32_C(0x2000) + /* 200Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_200GB \ + UINT32_C(0x4000) + /* + * This is a bit mask to indicate what speeds are supported + * for EEE on this link. + * For each speed that can be autonegotiated when EEE is enabled + * on this link, the corresponding mask bit shall be set to '1'. + * This field is only valid when the eee_suppotred is set to '1'. + */ + uint16_t supported_speeds_eee_mode; + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \ + UINT32_C(0x40) + uint32_t tx_lpi_timer_low; + /* + * The lowest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0 + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24 + uint32_t valid_tx_lpi_timer_high; + /* + * The highest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0 + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24 +} __rte_packed; + +/**************************** + * hwrm_port_phy_mdio_write * + ****************************/ + + +/* hwrm_port_phy_mdio_write_input (size:320b/40B) */ +struct hwrm_port_phy_mdio_write_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Reserved for future use. */ + uint32_t unused_0[2]; + /* Port ID of port. */ + uint16_t port_id; + /* If phy_address is 0xFF, port_id will be used to derive phy_addr. */ + uint8_t phy_addr; + /* 8-bit device address. */ + uint8_t dev_addr; + /* 16-bit register address. */ + uint16_t reg_addr; + /* 16-bit register data. */ + uint16_t reg_data; + /* + * When this bit is set to 1 a Clause 45 mdio access is done. + * when this bit is set to 0 a Clause 22 mdio access is done. + */ + uint8_t cl45_mdio; + /* */ + uint8_t unused_1[7]; +} __rte_packed; + +/* hwrm_port_phy_mdio_write_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_port_phy_mdio_read * + ***************************/ + + +/* hwrm_port_phy_mdio_read_input (size:256b/32B) */ +struct hwrm_port_phy_mdio_read_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Reserved for future use. */ + uint32_t unused_0[2]; + /* Port ID of port. */ + uint16_t port_id; + /* If phy_address is 0xFF, port_id will be used to derive phy_addr. */ + uint8_t phy_addr; + /* 8-bit device address. */ + uint8_t dev_addr; + /* 16-bit register address. */ + uint16_t reg_addr; + /* + * When this bit is set to 1 a Clause 45 mdio access is done. + * when this bit is set to 0 a Clause 22 mdio access is done. + */ + uint8_t cl45_mdio; + /* */ + uint8_t unused_1; +} __rte_packed; + +/* hwrm_port_phy_mdio_read_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* 16-bit register data. */ + uint16_t reg_data; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_port_led_cfg * + *********************/ + + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the led0_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the led0_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the led0_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the led0_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \ + UINT32_C(0x8) + /* + * This bit must be '1' for the led0_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \ + UINT32_C(0x10) + /* + * This bit must be '1' for the led0_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the led1_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the led1_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the led1_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \ + UINT32_C(0x100) + /* + * This bit must be '1' for the led1_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \ + UINT32_C(0x200) + /* + * This bit must be '1' for the led1_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \ + UINT32_C(0x400) + /* + * This bit must be '1' for the led1_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \ + UINT32_C(0x800) + /* + * This bit must be '1' for the led2_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the led2_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the led2_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the led2_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the led2_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the led2_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the led3_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the led3_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the led3_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the led3_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \ + UINT32_C(0x200000) + /* + * This bit must be '1' for the led3_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ + UINT32_C(0x400000) + /* + * This bit must be '1' for the led3_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \ + UINT32_C(0x800000) + /* Port ID of port whose LEDs are configured. */ + uint16_t port_id; + /* + * The number of LEDs that are being configured. + * Up to 4 LEDs can be configured with this command. + */ + uint8_t num_leds; + /* Reserved field. */ + uint8_t rsvd; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The requested state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT + /* The requested color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #0 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* Reserved field. */ + uint8_t rsvd0; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The requested state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT + /* The requested color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #1 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* Reserved field. */ + uint8_t rsvd1; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The requested state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT + /* The requested color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #2 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* Reserved field. */ + uint8_t rsvd2; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The requested state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT + /* The requested color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #3 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + /* Reserved field. */ + uint8_t rsvd3; +} __rte_packed; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_port_led_qcfg * + **********************/ + + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID + /* The current state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT + /* The color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 is not grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID + /* The current state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT + /* The color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 is not grouped. + * For all other non-zero values of this field, LED #1 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID + /* The current state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT + /* The color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 is not grouped. + * For all other non-zero values of this field, LED #2 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID + /* The current state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT + /* The color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 is not grouped. + * For all other non-zero values of this field, LED #3 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_4[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_port_led_qcaps * + ***********************/ + + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* Reserved for future use. */ + uint8_t unused[3]; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + uint8_t unused_0; + /* The states supported by LED #0. */ + uint16_t led0_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #0. */ + uint16_t led0_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + uint8_t unused_1; + /* The states supported by LED #1. */ + uint16_t led1_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #1. */ + uint16_t led1_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + uint8_t unused_2; + /* The states supported by LED #2. */ + uint16_t led2_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #2. */ + uint16_t led2_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_3; + /* The states supported by LED #3. */ + uint16_t led3_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #3. */ + uint16_t led3_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t unused_4[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_port_prbs_test * + ***********************/ + + +/* hwrm_port_prbs_test_input (size:384b/48B) */ +struct hwrm_port_prbs_test_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Host address data is to DMA'd to. */ + uint64_t resp_data_addr; + /* + * Size of the buffer pointed to by resp_data_addr. The firmware may + * use this entire buffer or less than the entire buffer, but never more. + */ + uint16_t data_len; + uint16_t unused_0; + uint32_t unused_1; + /* Port ID of port where PRBS test to be run. */ + uint16_t port_id; + /* Polynomial selection for PRBS test. */ + uint16_t poly; + /* PRBS7 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS7 UINT32_C(0x0) + /* PRBS9 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS9 UINT32_C(0x1) + /* PRBS11 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS11 UINT32_C(0x2) + /* PRBS15 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS15 UINT32_C(0x3) + /* PRBS23 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS23 UINT32_C(0x4) + /* PRBS31 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS31 UINT32_C(0x5) + /* PRBS58 */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_PRBS58 UINT32_C(0x6) + /* Invalid */ + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_INVALID UINT32_C(0xff) + #define HWRM_PORT_PRBS_TEST_INPUT_POLY_LAST \ + HWRM_PORT_PRBS_TEST_INPUT_POLY_INVALID + /* + * Configuration bits for PRBS test. + * Use enable bit to start/stop test. + * Use tx/rx lane map bits to run test on specific lanes, + * if set to 0 test will be run on all lanes. + */ + uint16_t prbs_config; + /* + * Set 0 to stop test currently in progress + * Set 1 to start test with configuration provided. + */ + #define HWRM_PORT_PRBS_TEST_INPUT_PRBS_CONFIG_START_STOP \ + UINT32_C(0x1) + /* + * If set to 1, tx_lane_map bitmap should have lane bits set. + * If set to 0, test will be run on all lanes for this port. + */ + #define HWRM_PORT_PRBS_TEST_INPUT_PRBS_CONFIG_TX_LANE_MAP_VALID \ + UINT32_C(0x2) + /* + * If set to 1, rx_lane_map bitmap should have lane bits set. + * If set to 0, test will be run on all lanes for this port. + */ + #define HWRM_PORT_PRBS_TEST_INPUT_PRBS_CONFIG_RX_LANE_MAP_VALID \ + UINT32_C(0x4) + /* Duration in seconds to run the PRBS test. */ + uint16_t timeout; + /* + * If tx_lane_map_valid is set to 1, this field is a bitmap + * of tx lanes to run PRBS test. bit0 = lane0, + * bit1 = lane1 ..bit31 = lane31 + */ + uint32_t tx_lane_map; + /* + * If rx_lane_map_valid is set to 1, this field is a bitmap + * of rx lanes to run PRBS test. bit0 = lane0, + * bit1 = lane1 ..bit31 = lane31 + */ + uint32_t rx_lane_map; +} __rte_packed; + +/* hwrm_port_prbs_test_output (size:128b/16B) */ +struct hwrm_port_prbs_test_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Total length of stored data. */ + uint16_t total_data_len; + uint16_t unused_0; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_port_dsc_dump * + **********************/ + + +/* hwrm_port_dsc_dump_input (size:320b/40B) */ +struct hwrm_port_dsc_dump_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Host address where response diagnostic data is returned. */ + uint64_t resp_data_addr; + /* + * Size of the buffer pointed to by resp_data_addr. The firmware + * may use this entire buffer or less than the entire buffer, but + * never more. + */ + uint16_t data_len; + uint16_t unused_0; + uint32_t unused_1; + /* Port ID of port where dsc dump to be collected. */ + uint16_t port_id; + /* Diag level specified by the user */ + uint16_t diag_level; + /* SRDS_DIAG_LANE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_LANE \ + UINT32_C(0x0) + /* SRDS_DIAG_CORE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_CORE \ + UINT32_C(0x1) + /* SRDS_DIAG_EVENT */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_EVENT \ + UINT32_C(0x2) + /* SRDS_DIAG_EYE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_EYE \ + UINT32_C(0x3) + /* SRDS_DIAG_REG_CORE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_REG_CORE \ + UINT32_C(0x4) + /* SRDS_DIAG_REG_LANE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_REG_LANE \ + UINT32_C(0x5) + /* SRDS_DIAG_UC_CORE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_UC_CORE \ + UINT32_C(0x6) + /* SRDS_DIAG_UC_LANE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_UC_LANE \ + UINT32_C(0x7) + /* SRDS_DIAG_LANE_DEBUG */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_LANE_DEBUG \ + UINT32_C(0x8) + /* SRDS_DIAG_BER_VERT */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_BER_VERT \ + UINT32_C(0x9) + /* SRDS_DIAG_BER_HORZ */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_BER_HORZ \ + UINT32_C(0xa) + /* SRDS_DIAG_EVENT_SAFE */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_EVENT_SAFE \ + UINT32_C(0xb) + /* SRDS_DIAG_TIMESTAMP */ + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP \ + UINT32_C(0xc) + #define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_LAST \ + HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP + /* + * This field is a lane number + * on which to collect the dsc dump + */ + uint16_t lane_number; + /* + * Configuration bits. + * Use enable bit to start dsc dump or retrieve dump + */ + uint16_t dsc_dump_config; + /* + * Set 0 to retrieve the dsc dump + * Set 1 to start the dsc dump + */ + #define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_START_RETRIEVE \ + UINT32_C(0x1) +} __rte_packed; + +/* hwrm_port_dsc_dump_output (size:128b/16B) */ +struct hwrm_port_dsc_dump_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Total length of stored data. */ + uint16_t total_data_len; + uint16_t unused_0; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_port_sfp_sideband_cfg * + ******************************/ + + +/* hwrm_port_sfp_sideband_cfg_input (size:256b/32B) */ +struct hwrm_port_sfp_sideband_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be queried. */ + uint16_t port_id; + uint8_t unused_0[6]; + /* + * This bitfield is used to specify which bits from the 'flags' + * fields are being configured by the caller. + */ + uint32_t enables; + /* This bit must be '1' for rs0 to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_RS0 \ + UINT32_C(0x1) + /* This bit must be '1' for rs1 to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_RS1 \ + UINT32_C(0x2) + /* This bit must be '1' for tx_disable to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_TX_DIS \ + UINT32_C(0x4) + /* + * This bit must be '1' for mod_sel to be configured. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_MOD_SEL \ + UINT32_C(0x8) + /* This bit must be '1' for reset_l to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_RESET_L \ + UINT32_C(0x10) + /* This bit must be '1' for lp_mode to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_LP_MODE \ + UINT32_C(0x20) + /* This bit must be '1' for pwr_disable to be configured. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_ENABLES_PWR_DIS \ + UINT32_C(0x40) + /* + * Only bits that have corresponding bits in the 'enables' + * bitfield are processed by the firmware, all other bits + * of 'flags' are ignored. + */ + uint32_t flags; + /* + * This bit along with rs1 configures the current speed of the dual + * rate module. If these pins are GNDed then the speed can be changed + * by driectly writing to EEPROM. + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_RS0 \ + UINT32_C(0x1) + /* + * This bit along with rs0 configures the current speed of the dual + * rate module. If these pins are GNDed then the speed can be changed + * by driectly writing to EEPROM. + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_RS1 \ + UINT32_C(0x2) + /* + * When this bit is set to '1', tx_disable is set. + * On a 1G BASE-T module, if this bit is set, + * module PHY registers will not be accessible. + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_TX_DIS \ + UINT32_C(0x4) + /* + * When this bit is set to '1', this module is selected. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_MOD_SEL \ + UINT32_C(0x8) + /* + * If reset_l is set to 0, Module will be taken out of reset + * and other signals will be set to their requested state once + * the module is out of reset. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_RESET_L \ + UINT32_C(0x10) + /* + * When this bit is set to '1', the module will be configured + * in low power mode. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_LP_MODE \ + UINT32_C(0x20) + /* When this bit is set to '1', the module will be powered down. */ + #define HWRM_PORT_SFP_SIDEBAND_CFG_INPUT_FLAGS_PWR_DIS \ + UINT32_C(0x40) +} __rte_packed; + +/* hwrm_port_sfp_sideband_cfg_output (size:128b/16B) */ +struct hwrm_port_sfp_sideband_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. When + * writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_port_sfp_sideband_qcfg * + *******************************/ + + +/* hwrm_port_sfp_sideband_qcfg_input (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_port_sfp_sideband_qcfg_output (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Bitmask indicating which sideband signals are valid. + * This is based on the board and nvm cfg that is present on the board. + */ + uint32_t supported_mask; + uint32_t sideband_signals; + /* When this bit is set to '1', the Module is absent. */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_MOD_ABS \ + UINT32_C(0x1) + /* + * When this bit is set to '1', there is no valid signal on RX. + * This signal is a filtered version of Signal Detect. + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_RX_LOS \ + UINT32_C(0x2) + /* + * This bit along with rs1 indiactes the current speed of the dual + * rate module.If these pins are grounded then the speed can be + * changed by driectky writing to EEPROM. + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_RS0 \ + UINT32_C(0x4) + /* + * This bit along with rs0 indiactes the current speed of the dual + * rate module.If these pins are grounded then the speed can be + * changed by driectky writing to EEPROM. + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_RS1 \ + UINT32_C(0x8) + /* + * When this bit is set to '1', tx_disable is set. + * On a 1G BASE-T module, if this bit is set, module PHY + * registers will not be accessible. + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_TX_DIS \ + UINT32_C(0x10) + /* When this bit is set to '1', tx_fault is set. */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_TX_FAULT \ + UINT32_C(0x20) + /* + * When this bit is set to '1', module is selected. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_MOD_SEL \ + UINT32_C(0x40) + /* + * When this bit is set to '0', the module is held in reset. + * if reset_l is set to 1,first module is taken out of reset + * and other signals will be set to their requested state. + * Valid only on QSFP modules. + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_RESET_L \ + UINT32_C(0x80) + /* + * When this bit is set to '1', the module is in low power mode. + * Valid only on QSFP modules + */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_LP_MODE \ + UINT32_C(0x100) + /* When this bit is set to '1', module is in power down state. */ + #define HWRM_PORT_SFP_SIDEBAND_QCFG_OUTPUT_SIDEBAND_SIGNALS_PWR_DIS \ + UINT32_C(0x200) + uint8_t unused[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. When + * writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************** + * hwrm_port_phy_mdio_bus_acquire * + **********************************/ + + +/* hwrm_port_phy_mdio_bus_acquire_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_acquire_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of the port. */ + uint16_t port_id; + /* + * client_id of the client requesting BUS access. + * Any value from 0x10 to 0xFFFF can be used. + * Client should make sure that the returned client_id + * in response matches the client_id in request. + * 0-0xF are reserved for internal use. + */ + uint16_t client_id; + /* + * Timeout in milli seconds, MDIO BUS will be released automatically + * after this time, if another mdio acquire command is not received + * within the timeout window from the same client. + * A 0xFFFF will hold the bus until this bus is released. + */ + uint16_t mdio_bus_timeout; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_port_phy_mdio_bus_acquire_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_acquire_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t unused_0; + /* + * client_id of the module holding the BUS. + * 0-0xF are reserved for internal use. + */ + uint16_t client_id; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************** + * hwrm_port_phy_mdio_bus_release * + **********************************/ + + +/* hwrm_port_phy_mdio_bus_release_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_release_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of the port. */ + uint16_t port_id; + /* + * client_id of the client requesting BUS release. + * A client should not release any other clients BUS. + */ + uint16_t client_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_port_phy_mdio_bus_release_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_release_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t unused_0; + /* The BUS is released if client_id matches the client_id in request. */ + uint16_t clients_id; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_queue_qportcfg * + ***********************/ + + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX + /* + * Port ID of port for which the queue configuration is being + * queried. This field is only required when sent by IPC. + */ + uint16_t port_id; + /* + * Drivers will set this capability when it can use + * queue_idx_service_profile to map the queues to application. + */ + uint8_t drv_qmap_cap; + /* disabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0) + /* enabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED + uint8_t unused_0; +} __rte_packed; + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ +struct hwrm_queue_qportcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The maximum number of queues that can be configured on this + * port. + * Valid values range from 1 through 8. + */ + uint8_t max_configurable_queues; + /* + * The maximum number of lossless queues that can be configured + * on this port. + * Valid values range from 0 through 8. + */ + uint8_t max_configurable_lossless_queues; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_cfg command shall return error when trying to + * configure a queue not configurable. + */ + uint8_t queue_cfg_allowed; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queues are + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then the queues are + * configured symmetrically on TX and RX sides. For + * symmetric configuration, the queue configuration + * including queue ids and service profiles on the + * TX side is the same as the corresponding queue + * configuration on the RX side. + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pfcenable_cfg command. + * + * Each bit represents a specific priority where bit 0 represents + * priority 0 and bit 7 represents priority 7. + * # A value of 0 indicates that the priority is not configurable by + * the hwrm_queue_pfcenable_cfg command. + * # A value of 1 indicates that the priority is configurable. + * # A hwrm_queue_pfcenable_cfg command shall return error when + * trying to configure a priority that is not configurable. + */ + uint8_t queue_pfcenable_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue that is not configurable. + */ + uint8_t queue_pri2cos_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue not configurable. + */ + uint8_t queue_cos2bw_cfg_allowed; + /* + * ID of CoS Queue 0. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id0; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id0_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 1. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id1; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id1_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 2. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id2; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id2_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 3. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id3; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id3_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 4. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id4; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id4_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 5. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id5; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id5_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 6. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id6; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id6_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 7. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id7; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id7_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_queue_qcfg * + *******************/ + + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX + /* Queue ID of the queue. */ + uint32_t queue_id; +} __rte_packed; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This value is the estimate packet length used in the + * TX arbiter. + */ + uint32_t queue_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queue is + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then this queue is + * configured symmetrically on TX and RX sides. + */ + #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_queue_cfg * + ******************/ + + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR + uint32_t enables; + /* + * This bit must be '1' for the dflt_len field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1) + /* + * This bit must be '1' for the service_profile field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2) + /* Queue ID of queue that is to be configured by this function. */ + uint32_t queue_id; + /* + * This value is a the estimate packet length used in the + * TX arbiter. + * Set to 0xFF... (All Fs) to not adjust this value. + */ + uint32_t dflt_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_queue_pfcenable_qcfg * + *****************************/ + + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* If set to 1, then PFC is enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI0. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x100) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI1. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x200) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI2. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x400) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI3. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x800) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI4. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x1000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI5. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x2000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI6. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x4000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI7. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x8000) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_queue_pfcenable_cfg * + ****************************/ + + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* If set to 1, then PFC is requested to be enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is requested to be enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is requested to be enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is requested to be enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is requested to be enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is requested to be enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is requested to be enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is requested to be enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI0. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x100) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI1. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x200) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI2. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x400) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI3. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x800) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI4. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x1000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI5. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x2000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI6. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x4000) + /* If set to 1, then PFC WatchDog is requested to be enabled on PRI7. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_WATCHDOG_ENABLED \ + UINT32_C(0x8000) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_queue_pri2cos_qcfg * + ***************************/ + + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX + /* + * When this bit is set to '0', the query is + * for PRI from tunnel headers. + * When this bit is set to '1', the query is + * for PRI from inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[3]; +} __rte_packed; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri7_cos_queue_id; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the PRI to CoS + * configuration is asymmetric on TX and RX sides. + * If this flag is set to '0', then PRI to CoS configuration + * is symmetric on TX and RX sides. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_queue_pri2cos_cfg * + **************************/ + + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR + /* + * When this bit is set to '0', the mapping is requested + * for PRI from tunnel headers. + * When this bit is set to '1', the mapping is requested + * for PRI from inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4) + uint32_t enables; + /* + * This bit must be '1' for the pri0_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the pri1_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the pri2_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \ + UINT32_C(0x4) + /* + * This bit must be '1' for the pri3_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \ + UINT32_C(0x8) + /* + * This bit must be '1' for the pri4_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \ + UINT32_C(0x10) + /* + * This bit must be '1' for the pri5_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the pri6_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the pri7_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2 This value can only + * be changed before traffic has started. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + */ + uint8_t pri7_cos_queue_id; + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_queue_cos2bw_qcfg * + **************************/ + + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + uint16_t unused_1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id2_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id3_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id4_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_2[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_queue_cos2bw_cfg * + *************************/ + + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * If this bit is set to 1, then all queue_id0 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then all queue_id1 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then all queue_id2 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then all queue_id3 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \ + UINT32_C(0x8) + /* + * If this bit is set to 1, then all queue_id4 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \ + UINT32_C(0x10) + /* + * If this bit is set to 1, then all queue_id5 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \ + UINT32_C(0x20) + /* + * If this bit is set to 1, then all queue_id6 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \ + UINT32_C(0x40) + /* + * If this bit is set to 1, then all queue_id7 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id2_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id3_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id4_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_1[5]; +} __rte_packed; + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_queue_dscp_qcaps * + *************************/ + + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The number of bits provided by the hardware for the DSCP value. */ + uint8_t num_dscp_bits; + uint8_t unused_0; + /* Max number of DSCP-MASK-PRI entries supported. */ + uint16_t max_entries; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_queue_dscp2pri_qcfg * + ****************************/ + + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where the 24-bits DSCP-MASK-PRI + * tuple(s) will be copied to. + */ + uint64_t dest_data_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0; + /* Size of the buffer pointed to by dest_data_addr. */ + uint16_t dest_data_buffer_size; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * A count of the number of DSCP-MASK-PRI tuple(s) pointed to + * by the dest_data_addr. + */ + uint16_t entry_cnt; + /* + * This is the default PRI which un-initialized DSCP values are + * mapped to. + */ + uint8_t default_pri; + uint8_t unused_0[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_queue_dscp2pri_cfg * + ***************************/ + + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where the 24-bits DSCP-MASK-PRI tuple + * will be copied from. + */ + uint64_t src_data_addr; + uint32_t flags; + /* use_hw_default_pri is 1 b */ + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \ + UINT32_C(0x1) + uint32_t enables; + /* + * This bit must be '1' for the default_pri field to be + * configured. + */ + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \ + UINT32_C(0x1) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + /* + * This is the default PRI which un-initialized DSCP values will be + * mapped to. + */ + uint8_t default_pri; + /* + * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed + * to by src_data_addr. + */ + uint16_t entry_cnt; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_queue_mpls_qcaps * + *************************/ + + +/* hwrm_queue_mpls_qcaps_input (size:192b/24B) */ +struct hwrm_queue_mpls_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure MPLS TC(EXP) to pri mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_queue_mpls_qcaps_output (size:128b/16B) */ +struct hwrm_queue_mpls_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_mplstc2pri_cfg command. + * + * Each bit represents a specific pri where bit 0 represents + * pri 0 and bit 7 represents pri 7. + * # A value of 0 indicates that the pri is not configurable + * by the hwrm_queue_mplstc2pri_cfg command. + * # A value of 1 indicates that the pri is configurable. + * # A hwrm_queue_mplstc2pri_cfg command shall return error when + * trying to configure a pri that is not configurable. + */ + uint8_t queue_mplstc2pri_cfg_allowed; + /* + * This is the default PRI which un-initialized MPLS values will be + * mapped to. + */ + uint8_t hw_default_pri; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_queue_mplstc2pri_qcfg * + ******************************/ + + +/* hwrm_queue_mplstc2pri_qcfg_input (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure MPLS TC(EXP) to pri mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_queue_mplstc2pri_qcfg_output (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * pri assigned to MPLS TC(EXP) 0. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 0. + */ + uint8_t tc0_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 1. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 1. + */ + uint8_t tc1_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 2. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 2. + */ + uint8_t tc2_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 3. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 3. + */ + uint8_t tc3_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 4. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 4. + */ + uint8_t tc4_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 5. This value can only be changed + * before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 5. + */ + uint8_t tc5_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 6. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 6. + */ + uint8_t tc6_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 7. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no pri is assigned to the + * MPLS TC(EXP) 7. + */ + uint8_t tc7_pri_queue_id; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_queue_mplstc2pri_cfg * + *****************************/ + + +/* hwrm_queue_mplstc2pri_cfg_input (size:256b/32B) */ +struct hwrm_queue_mplstc2pri_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the mplstc0_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC0_PRI_QUEUE_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the mplstc1_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC1_PRI_QUEUE_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the mplstc2_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC2_PRI_QUEUE_ID \ + UINT32_C(0x4) + /* + * This bit must be '1' for the mplstc3_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC3_PRI_QUEUE_ID \ + UINT32_C(0x8) + /* + * This bit must be '1' for the mplstc4_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC4_PRI_QUEUE_ID \ + UINT32_C(0x10) + /* + * This bit must be '1' for the mplstc5_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC5_PRI_QUEUE_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the mplstc6_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC6_PRI_QUEUE_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the mplstc7_pri_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_MPLSTC2PRI_CFG_INPUT_ENABLES_TC7_PRI_QUEUE_ID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure MPLS TC(EXP)to pri mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[3]; + /* + * pri assigned to MPLS TC(EXP) 0. This value can only + * be changed before traffic has started. + */ + uint8_t tc0_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 1. This value can only + * be changed before traffic has started. + */ + uint8_t tc1_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 2 This value can only + * be changed before traffic has started. + */ + uint8_t tc2_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 3. This value can only + * be changed before traffic has started. + */ + uint8_t tc3_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 4. This value can only + * be changed before traffic has started. + */ + uint8_t tc4_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 5. This value can only + * be changed before traffic has started. + */ + uint8_t tc5_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 6. This value can only + * be changed before traffic has started. + */ + uint8_t tc6_pri_queue_id; + /* + * pri assigned to MPLS TC(EXP) 7. This value can only + * be changed before traffic has started. + */ + uint8_t tc7_pri_queue_id; +} __rte_packed; + +/* hwrm_queue_mplstc2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_mplstc2pri_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_vnic_alloc * + *******************/ + + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', this VNIC is requested to + * be the default VNIC for this function. + */ + #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_vnic_free * + ******************/ + + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************** + * hwrm_vnic_cfg * + *****************/ + + +/* hwrm_vnic_cfg_input (size:384b/48B) */ +struct hwrm_vnic_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the VNIC is requested to + * be the default VNIC for the function. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is being configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is being configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is being configured to be + * disabled on this VNIC. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to be + * operating in dual VNIC mode. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) + /* + * When this flag is set to '1', the VNIC is requested to + * be configured to receive only RoCE traffic. + * If this flag is set to '0', then this flag shall be + * ignored by the HWRM. + * If roce_dual_vnic_mode flag is set to '1' + * or roce_mirroring_capable_vnic_mode flag to 1, + * then the HWRM client shall not set this flag to '1'. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * This flag is used to enable a mode where + * RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured using hwrm_vnic_rss_cfg. + * + * If this mode is enabled, then the driver should not program + * RSS indirection table for the RSS context that is used for + * computing RSS hash only. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic, but forward only the + * RoCE traffic further. Also, RoCE traffic can be mirrored to + * L2 driver. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + uint32_t enables; + /* + * This bit must be '1' for the dflt_ring_grp field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the rss_rule field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cos_rule field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \ + UINT32_C(0x4) + /* + * This bit must be '1' for the lb_rule field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \ + UINT32_C(0x8) + /* + * This bit must be '1' for the mru field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x10) + /* + * This bit must be '1' for the default_rx_ring_id field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the default_cmpl_ring_id field to be + * configured. + */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \ + UINT32_C(0x40) + /* This bit must be '1' for the queue_id field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID \ + UINT32_C(0x80) + /* Logical vnic ID */ + uint16_t vnic_id; + /* + * Default Completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules and if + * there is no COS rule. + */ + uint16_t dflt_ring_grp; + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. + */ + uint16_t rss_rule; + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. + */ + uint16_t cos_rule; + /* + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. + */ + uint16_t lb_rule; + /* + * The maximum receive unit of the vnic. + * Each vnic is associated with a function. + * The vnic mru value overwrites the mru setting of the + * associated function. + * The HWRM shall make sure that vnic mru does not exceed + * the mru of the port the function is associated with. + */ + uint16_t mru; + /* + * Default Rx ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. + * The aggregation ring associated with the Rx ring is + * implied based on the Rx ring specified when the + * aggregation ring was allocated. + */ + uint16_t default_rx_ring_id; + /* + * Default completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. + */ + uint16_t default_cmpl_ring_id; + /* + * When specified, only incoming packets classified to the specified CoS + * queue ID will be arriving on this VNIC. Packet priority to CoS mapping + * rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this mode, + * ntuple filters with VNIC destination specified are invalid since they + * conflict with the the CoS to VNIC steering rules in this mode. + * + * If this field is not specified, packet to VNIC steering will be + * subject to the standard L2 filter rules and any additional ntuple + * filter rules with destination VNIC specified. + */ + uint16_t queue_id; + uint8_t unused0[6]; +} __rte_packed; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_vnic_qcfg * + ******************/ + + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* Logical vnic ID */ + uint32_t vnic_id; + /* ID of Virtual Function whose VNIC resource is being queried. */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Default Completion ring for the VNIC. */ + uint16_t dflt_ring_grp; + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. + */ + uint16_t rss_rule; + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. + */ + uint16_t cos_rule; + /* + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. + */ + uint16_t lb_rule; + /* The maximum receive unit of the vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; + /* + * When this bit is '1', the VNIC is the default VNIC for + * the function. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is disabled on + * this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to + * operate in dual VNIC mode. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) + /* + * When this flag is set to '1', the VNIC is configured to + * receive only RoCE traffic. + * When this flag is set to '0', the VNIC is not configured + * to receive only RoCE traffic. + * If roce_dual_vnic_mode flag and this flag both are set + * to '1', then it is an invalid configuration of the + * VNIC. The HWRM should not allow that type of + * mis-configuration by HWRM clients. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * When this bit is set to '1', then the VNIC is enabled in a + * mode where RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic, but forward only + * RoCE traffic further. Also RoCE traffic can be mirrored to + * L2 driver. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + /* + * When returned with a valid CoS Queue id, the CoS Queue/VNIC association + * is valid. Otherwise it will return 0xFFFF to indicate no VNIC/CoS + * queue association. + */ + uint16_t queue_id; + uint8_t unused_1[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_vnic_qcaps * + *******************/ + + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The maximum receive unit that is settable on a vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; + /* Unused. */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \ + UINT32_C(0x1) + /* + * When this bit is '1', the capability of stripping VLAN in + * the RX path is supported on VNIC(s). + * If set to '0', then VLAN stripping capability is + * not supported on VNIC(s). + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \ + UINT32_C(0x2) + /* + * When this bit is '1', the capability to buffer receive + * packets in the hardware until the host posts new receive buffers + * is supported on VNIC(s). + * If set to '0', then bd_stall capability is not supported + * on VNIC(s). + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \ + UINT32_C(0x4) + /* + * When this bit is '1', the capability to + * receive both RoCE and non-RoCE traffic on VNIC(s) is + * supported. + * If set to '0', then the capability to receive + * both RoCE and non-RoCE traffic on VNIC(s) is + * not supported. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \ + UINT32_C(0x8) + /* + * When this bit is set to '1', the capability to configure + * a VNIC to receive only RoCE traffic is supported. + * When this flag is set to '0', the VNIC capability to + * configure to receive only RoCE traffic is not supported. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \ + UINT32_C(0x10) + /* + * When this bit is set to '1', then the capability to enable + * a VNIC in a mode where RSS context without configuring + * RSS indirection table is supported (for RSS hash computation). + * When this bit is set to '0', then a VNIC can not be configured + * with a mode to enable RSS context without configuring RSS + * indirection table. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \ + UINT32_C(0x20) + /* + * When this bit is '1', the capability to + * mirror the the RoCE traffic is supported. + * If set to '0', then the capability to mirror the + * RoCE traffic is not supported. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \ + UINT32_C(0x40) + /* + * When this bit is '1', the outermost RSS hashing capability + * is supported. If set to '0', then the outermost RSS hashing + * capability is not supported. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \ + UINT32_C(0x80) + /* + * When this bit is '1', it indicates that firmware supports the + * ability to steer incoming packets from one CoS queue to one + * VNIC. This optional feature can then be enabled + * using HWRM_VNIC_CFG on any VNIC. This feature is only + * available when NVM option “enable_cos_classfication” is set + * to 1. If set to '0', firmware does not support this feature. + */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP \ + UINT32_C(0x100) + /* + * This field advertises the maximum concurrent TPA aggregations + * supported by the VNIC on new devices that support TPA v2. + * '0' means that TPA v2 is not supported. + */ + uint16_t max_aggs_supported; + uint8_t unused_1[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_vnic_tpa_cfg * + *********************/ + + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +struct hwrm_vnic_tpa_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * non-tunneled TCP packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * tunneled TCP packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Windows Receive Segment Coalescing (RSC) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \ + UINT32_C(0x4) + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Linux Generic Receive Offload (GRO) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \ + UINT32_C(0x8) + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for TCP + * packets with IP ECN set to non-zero. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \ + UINT32_C(0x10) + /* + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * GRE tunneled TCP packets only if all packets have the + * same GRE sequence. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) + /* + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP/IPv4 packets with consecutively increasing IPIDs. + * In other words, the last packet that is being + * aggregated to an already existing aggregation context + * shall have IPID 1 more than the IPID of the last packet + * that was aggregated in that aggregation context. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \ + UINT32_C(0x40) + /* + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP packets with the same TTL (IPv4) or Hop limit (IPv6) + * value. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \ + UINT32_C(0x80) + /* + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall DMA payload data using GRO rules. + * When this bit is '0', the VNIC shall DMA payload data + * using the more efficient LRO rules of filling all + * aggregation buffers. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_PACK_AS_GRO \ + UINT32_C(0x100) + uint32_t enables; + /* + * This bit must be '1' for the max_agg_segs field to be + * configured. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) + /* + * This bit must be '1' for the max_aggs field to be + * configured. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) + /* + * This bit must be '1' for the max_agg_timer field to be + * configured. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) + /* deprecated bit. Do not use!!! */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) + /* Logical vnic ID */ + uint16_t vnic_id; + /* + * This is the maximum number of TCP segments that can + * be aggregated (unit is Log2). Max value is 31. On new + * devices supporting TPA v2, the unit is multiples of 4 and + * valid values are > 0 and <= 63. + */ + uint16_t max_agg_segs; + /* 1 segment */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX + /* + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7. On new devices + * supporting TPA v2, this is in unit of 1 and must be > 0 + * and <= max_aggs_supported in the hwrm_vnic_qcaps response + * to enable TPA v2. + */ + uint16_t max_aggs; + /* 1 aggregation */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX + uint8_t unused_0[2]; + /* + * This is the maximum amount of time allowed for + * an aggregation context to complete after it was initiated. + */ + uint32_t max_agg_timer; + /* + * This is the minimum amount of payload length required to + * start an aggregation context. This field is deprecated and + * should be set to 0. The minimum length is set by firmware + * and can be queried using hwrm_vnic_tpa_qcfg. + */ + uint32_t min_agg_len; +} __rte_packed; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_vnic_rss_cfg * + *********************/ + + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t hash_type; + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + /* VNIC ID of VNIC associated with RSS table being configured. */ + uint16_t vnic_id; + /* + * Specifies which VNIC ring table pair to configure. + * Valid values range from 0 to 7. + */ + uint8_t ring_table_pair_index; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; + /* + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + /* This is the address for rss ring group table */ + uint64_t ring_grp_tbl_addr; + /* This is the address for rss hash key table */ + uint64_t hash_key_tbl_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_1[6]; +} __rte_packed; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* + * Unable to change global RSS mode to outer due to all active + * interfaces are not ready to support outer RSS hashing. + */ + #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY \ + UINT32_C(0x1) + #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_LAST \ + HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + uint8_t unused_0[7]; +} __rte_packed; + +/********************** + * hwrm_vnic_rss_qcfg * + **********************/ + + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t hash_type; + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) + /* + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + uint8_t unused_0[4]; + /* This is the value of rss hash key */ + uint32_t hash_key[10]; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; + /* + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + uint8_t unused_1[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_vnic_plcmodes_cfg * + **************************/ + + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the VNIC shall be configured to + * use regular placement algorithm. + * By default, the regular placement algorithm shall be + * enabled on the VNIC. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC shall be configured + * use the jumbo placement algorithm. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv4 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv4, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv4, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv4 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv4 + * packet. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) + /* + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv6 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv6, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv6, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv6 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv6 + * packet. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) + /* + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for FCoE packets at the + * beginning of FC payload. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) + /* + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for RoCE packets at the + * beginning of RoCE payload (after BTH/GRH headers). + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) + uint32_t enables; + /* + * This bit must be '1' for the jumbo_thresh_valid field to be + * configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the hds_offset_valid field to be + * configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the hds_threshold_valid field to be + * configured. + */ + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ + UINT32_C(0x4) + /* Logical vnic ID */ + uint32_t vnic_id; + /* + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. + */ + uint16_t jumbo_thresh; + /* + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. + */ + uint16_t hds_offset; + /* + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. + */ + uint16_t hds_threshold; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_vnic_plcmodes_qcfg * + ***************************/ + + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When this bit is '1', the VNIC is configured to + * use regular placement algorithm. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is configured to + * use the jumbo placement algorithm. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv4 packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv6 packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) + /* + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for FCoE packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) + /* + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for RoCE packets. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured + * to be the default VNIC of the requesting function. + */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \ + UINT32_C(0x40) + /* + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. + */ + uint16_t jumbo_thresh; + /* + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. + */ + uint16_t hds_offset; + /* + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. + */ + uint16_t hds_threshold; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************** + * hwrm_vnic_rss_cos_lb_ctx_alloc * + **********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************* + * hwrm_vnic_rss_cos_lb_ctx_free * + *********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_ring_alloc * + *******************/ + + +/* hwrm_ring_alloc_input (size:704b/88B) */ +struct hwrm_ring_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the ring_arb_cfg field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \ + UINT32_C(0x2) + /* + * This bit must be '1' for the stat_ctx_id_valid field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \ + UINT32_C(0x8) + /* + * This bit must be '1' for the max_bw_valid field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the rx_ring_id field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the nq_ring_id field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \ + UINT32_C(0x80) + /* + * This bit must be '1' for the rx_buf_size field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \ + UINT32_C(0x100) + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \ + HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ + uint8_t unused_0; + /* Ring allocation flags. */ + uint16_t flags; + /* + * For Rx rings, the incoming packet data can be placed at either + * a 0B or 2B offset from the start of the Rx packet buffer. When + * '1', the received packet will be padded with 2B of zeros at the + * front of the packet. Note that this flag is only used for + * Rx rings and is ignored for all other rings included Rx + * Aggregation rings. + */ + #define HWRM_RING_ALLOC_INPUT_FLAGS_RX_SOP_PAD UINT32_C(0x1) + /* + * This value is a pointer to the page table for the + * Ring. + */ + uint64_t page_tbl_addr; + /* First Byte Offset of the first entry in the first page. */ + uint32_t fbo; + /* + * Actual page size in 2^page_size. The supported range is increments + * in powers of 2 from 16 bytes to 1GB. + * - 4 = 16 B + * Page size is 16 B. + * - 12 = 4 KB + * Page size is 4 KB. + * - 13 = 8 KB + * Page size is 8 KB. + * - 16 = 64 KB + * Page size is 64 KB. + * - 21 = 2 MB + * Page size is 2 MB. + * - 22 = 4 MB + * Page size is 4 MB. + * - 30 = 1 GB + * Page size is 1 GB. + */ + uint8_t page_size; + /* + * This value indicates the depth of page table. + * For this version of the specification, value other than 0 or + * 1 shall be considered as an invalid value. + * When the page_tbl_depth = 0, then it is treated as a + * special case with the following. + * 1. FBO and page size fields are not valid. + * 2. page_tbl_addr is the physical address of the first + * element of the ring. + */ + uint8_t page_tbl_depth; + uint8_t unused_1[2]; + /* + * Number of 16B units in the ring. Minimum size for + * a ring is 16 16B entries. + */ + uint32_t length; + /* + * Logical ring number for the ring to be allocated. + * This value determines the position in the doorbell + * area where the update to the ring will be made. + * + * For completion rings, this value is also the MSI-X + * vector number for the function the completion ring is + * associated with. + */ + uint16_t logical_id; + /* + * This field is used only when ring_type is a TX ring. + * This value indicates what completion ring the TX ring + * is associated with. + */ + uint16_t cmpl_ring_id; + /* + * This field is used only when ring_type is a TX ring. + * This value indicates what CoS queue the TX ring + * is associated with. + */ + uint16_t queue_id; + /* + * When allocating a Rx ring or Rx aggregation ring, this field + * specifies the size of the buffer descriptors posted to the ring. + */ + uint16_t rx_buf_size; + /* + * When allocating an Rx aggregation ring, this field + * specifies the associated Rx ring ID. + */ + uint16_t rx_ring_id; + /* + * When allocating a completion ring, this field + * specifies the associated NQ ring ID. + */ + uint16_t nq_ring_id; + /* + * This field is used only when ring_type is a TX ring. + * This field is used to configure arbitration related + * parameters for a TX ring. + */ + uint16_t ring_arb_cfg; + /* Arbitration policy used for the ring. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \ + UINT32_C(0xf) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 + /* + * Use strict priority for the TX ring. + * Priority value is specified in arb_policy_param + */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ + UINT32_C(0x1) + /* + * Use weighted fair queue arbitration for the TX ring. + * Weight is specified in arb_policy_param + */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ + UINT32_C(0x2) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ + HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ + /* Reserved field. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \ + UINT32_C(0xf0) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4 + /* + * Arbitration policy specific parameter. + * # For strict priority arbitration policy, this field + * represents a priority value. If set to 0, then the priority + * is not specified and the HWRM is allowed to select + * any priority for this TX ring. + * # For weighted fair queue arbitration policy, this field + * represents a weight value. If set to 0, then the weight + * is not specified and the HWRM is allowed to select + * any weight for this TX ring. + */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ + UINT32_C(0xff00) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + uint16_t unused_3; + /* + * This field is reserved for the future use. + * It shall be set to 0. + */ + uint32_t reserved3; + /* + * This field is used only when ring_type is a TX ring. + * This input indicates what statistics context this ring + * should be associated with. + */ + uint32_t stat_ctx_id; + /* + * This field is reserved for the future use. + * It shall be set to 0. + */ + uint32_t reserved4; + /* + * This field is used only when ring_type is a TX ring + * to specify maximum BW allocated to the TX ring. + * The HWRM will translate this value into byte counter and + * time interval used for this ring inside the device. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * This field is used only when ring_type is a Completion ring. + * This value indicates what interrupt mode should be used + * on this completion ring. + * Note: In the legacy interrupt mode, no more than 16 + * completion rings are allowed. + */ + uint8_t int_mode; + /* Legacy INTA */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0) + /* Reserved */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1) + /* MSI-X */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2) + /* No Interrupt - Polled mode */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3) + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \ + HWRM_RING_ALLOC_INPUT_INT_MODE_POLL + uint8_t unused_4[3]; + /* + * The cq_handle is specified when allocating a completion ring. For + * devices that support NQs, this cq_handle will be included in the + * NQE to specify which CQ should be read to retrieve the completion + * record. + */ + uint64_t cq_handle; +} __rte_packed; + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Physical number of ring allocated. + * This value shall be unique for a ring type. + */ + uint16_t ring_id; + /* Logical number of ring allocated. */ + uint16_t logical_ring_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_ring_free * + ******************/ + + +/* hwrm_ring_free_input (size:192b/24B) */ +struct hwrm_ring_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \ + HWRM_RING_FREE_INPUT_RING_TYPE_NQ + uint8_t unused_0; + /* Physical number of ring allocated. */ + uint16_t ring_id; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_ring_reset * + *******************/ + + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \ + HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL + uint8_t unused_0; + /* Physical number of the ring. */ + uint16_t ring_id; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[4]; + /* Position of consumer index after ring reset completes. */ + uint8_t consumer_idx[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_ring_aggint_qcaps * + **************************/ + + +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t cmpl_params; + /* + * When this bit is set to '1', int_lat_tmr_min can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_INT_LAT_TMR_MIN \ + UINT32_C(0x1) + /* + * When this bit is set to '1', int_lat_tmr_max can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_INT_LAT_TMR_MAX \ + UINT32_C(0x2) + /* + * When this bit is set to '1', timer_reset can be enabled + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_TIMER_RESET \ + UINT32_C(0x4) + /* + * When this bit is set to '1', ring_idle can be enabled + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_RING_IDLE \ + UINT32_C(0x8) + /* + * When this bit is set to '1', num_cmpl_dma_aggr can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR \ + UINT32_C(0x10) + /* + * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT \ + UINT32_C(0x20) + /* + * When this bit is set to '1', cmpl_aggr_dma_tmr can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR \ + UINT32_C(0x40) + /* + * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT \ + UINT32_C(0x80) + /* + * When this bit is set to '1', num_cmpl_aggr_int can be configured + * on completion rings. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_AGGR_INT \ + UINT32_C(0x100) + uint32_t nq_params; + /* + * When this bit is set to '1', int_lat_tmr_min can be configured + * on notification queues. + */ + #define HWRM_RING_AGGINT_QCAPS_OUTPUT_NQ_PARAMS_INT_LAT_TMR_MIN \ + UINT32_C(0x1) + /* Minimum value for num_cmpl_dma_aggr */ + uint16_t num_cmpl_dma_aggr_min; + /* Maximum value for num_cmpl_dma_aggr */ + uint16_t num_cmpl_dma_aggr_max; + /* Minimum value for num_cmpl_dma_aggr_during_int */ + uint16_t num_cmpl_dma_aggr_during_int_min; + /* Maximum value for num_cmpl_dma_aggr_during_int */ + uint16_t num_cmpl_dma_aggr_during_int_max; + /* Minimum value for cmpl_aggr_dma_tmr */ + uint16_t cmpl_aggr_dma_tmr_min; + /* Maximum value for cmpl_aggr_dma_tmr */ + uint16_t cmpl_aggr_dma_tmr_max; + /* Minimum value for cmpl_aggr_dma_tmr_during_int */ + uint16_t cmpl_aggr_dma_tmr_during_int_min; + /* Maximum value for cmpl_aggr_dma_tmr_during_int */ + uint16_t cmpl_aggr_dma_tmr_during_int_max; + /* Minimum value for int_lat_tmr_min */ + uint16_t int_lat_tmr_min_min; + /* Maximum value for int_lat_tmr_min */ + uint16_t int_lat_tmr_min_max; + /* Minimum value for int_lat_tmr_max */ + uint16_t int_lat_tmr_max_min; + /* Maximum value for int_lat_tmr_max */ + uint16_t int_lat_tmr_max_max; + /* Minimum value for num_cmpl_aggr_int */ + uint16_t num_cmpl_aggr_int_min; + /* Maximum value for num_cmpl_aggr_int */ + uint16_t num_cmpl_aggr_int_max; + /* The units for timer parameters, in nanoseconds. */ + uint16_t timer_units; + uint8_t unused_0[1]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************************** + * hwrm_ring_cmpl_ring_qaggint_params * + **************************************/ + + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t flags; + /* + * When this bit is set to '1', interrupt max + * timer is reset whenever a completion is received. + */ + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \ + UINT32_C(0x1) + /* + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. + */ + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \ + UINT32_C(0x2) + /* + * Number of completions to aggregate before DMA + * during the normal mode. + */ + uint16_t num_cmpl_dma_aggr; + /* + * Number of completions to aggregate before DMA + * during the interrupt mode. + */ + uint16_t num_cmpl_dma_aggr_during_int; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). + */ + uint16_t cmpl_aggr_dma_tmr; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. + */ + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; + /* + * Maximum wait time (in unit of 80-nsec) spent aggregating + * completions before signaling the interrupt after the + * interrupt is enabled. + */ + uint16_t int_lat_tmr_max; + /* + * Minimum number of completions aggregated before signaling + * an interrupt. + */ + uint16_t num_cmpl_aggr_int; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************************** + * hwrm_ring_cmpl_ring_cfg_aggint_params * + *****************************************/ + + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint16_t flags; + /* + * When this bit is set to '1', interrupt latency max + * timer is reset whenever a completion is received. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \ + UINT32_C(0x1) + /* + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \ + UINT32_C(0x2) + /* + * Set this flag to 1 when configuring parameters on a + * notification queue. Set this flag to 0 when configuring + * parameters on a completion queue. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \ + UINT32_C(0x4) + /* + * Number of completions to aggregate before DMA + * during the normal mode. + */ + uint16_t num_cmpl_dma_aggr; + /* + * Number of completions to aggregate before DMA + * during the interrupt mode. + */ + uint16_t num_cmpl_dma_aggr_during_int; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). + */ + uint16_t cmpl_aggr_dma_tmr; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. + */ + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; + /* + * Maximum wait time (in unit of 80-nsec) spent aggregating + * cmpls before signaling the interrupt after the + * interrupt is enabled. + */ + uint16_t int_lat_tmr_max; + /* + * Minimum number of completions aggregated before signaling + * an interrupt. + */ + uint16_t num_cmpl_aggr_int; + /* + * Bitfield that indicates which parameters are to be applied. Only + * required when configuring devices with notification queues, and + * used in that case to set certain parameters on completion queues + * and others on notification queues. + */ + uint16_t enables; + /* + * This bit must be '1' for the num_cmpl_dma_aggr field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \ + UINT32_C(0x1) + /* + * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cmpl_aggr_dma_tmr field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the int_lat_tmr_min field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \ + UINT32_C(0x8) + /* + * This bit must be '1' for the int_lat_tmr_max field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_cmpl_aggr_int field to be + * configured. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \ + UINT32_C(0x20) + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_ring_grp_alloc * + ***********************/ + + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value identifies the CR associated with the ring + * group. + */ + uint16_t cr; + /* + * This value identifies the main RR associated with the ring + * group. + */ + uint16_t rr; + /* + * This value identifies the aggregation RR associated with + * the ring group. If this value is 0xFF... (All Fs), then no + * Aggregation ring will be set. + */ + uint16_t ar; + /* + * This value identifies the statistics context associated + * with the ring group. + */ + uint16_t sc; +} __rte_packed; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the ring group ID value. Use this value to program + * the default ring group for the VNIC or as table entries + * in an RSS/COS context. + */ + uint32_t ring_group_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_ring_grp_free * + **********************/ + + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This is the ring group ID value. */ + uint32_t ring_group_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; +/* + * special reserved flow ID to identify per function default + * flows for vSwitch offload + */ +#define DEFAULT_FLOW_ID 0xFFFFFFFFUL +/* + * special reserved flow ID to identify per function RoCEv1 + * flows + */ +#define ROCEV1_FLOW_ID 0xFFFFFFFEUL +/* + * special reserved flow ID to identify per function RoCEv2 + * flows + */ +#define ROCEV2_FLOW_ID 0xFFFFFFFDUL +/* + * special reserved flow ID to identify per function RoCEv2 + * CNP flows + */ +#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL + +/**************************** + * hwrm_cfa_l2_filter_alloc * + ****************************/ + + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x2) + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x4) + /* + * If this flag is set, all t_l2_* fields are invalid + * and they should not be specified. + * If this flag is set, then l2_* fields refer to + * fields of outermost L2 header. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \ + UINT32_C(0x8) + /* + * Enumeration denoting NO_ROCE_L2 to support old drivers. + * New driver L2 for only L2 traffic, ROCE for roce and l2 traffic + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_MASK \ + UINT32_C(0x30) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_SFT 4 + /* To support old drivers */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 \ + (UINT32_C(0x0) << 4) + /* Only L2 traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2 \ + (UINT32_C(0x1) << 4) + /* Roce & L2 traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_ROCE \ + (UINT32_C(0x2) << 4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_ROCE + /* + * Setting of this flag indicates that no XDP filter is created with + * L2 filter. + * 0 - legacy behavior, XDP filter is created with L2 filter + * 1 - XDP filter won't be created with L2 filter + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE \ + UINT32_C(0x40) + /* + * Setting this flag to 1 indicate the L2 fields in this command + * pertain to source fields. Setting this flag to 0 indicate the + * L2 fields in this command pertain to the destination fields + * and this is the default/legacy behavior. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID \ + UINT32_C(0x80) + uint32_t enables; + /* + * This bit must be '1' for the l2_addr field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ + UINT32_C(0x1) + /* + * This bit must be '1' for the l2_addr_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ + UINT32_C(0x2) + /* + * This bit must be '1' for the l2_ovlan field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \ + UINT32_C(0x4) + /* + * This bit must be '1' for the l2_ovlan_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ + UINT32_C(0x8) + /* + * This bit must be '1' for the l2_ivlan field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ + UINT32_C(0x10) + /* + * This bit must be '1' for the l2_ivlan_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ + UINT32_C(0x20) + /* + * This bit must be '1' for the t_l2_addr field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \ + UINT32_C(0x40) + /* + * This bit must be '1' for the t_l2_addr_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ + UINT32_C(0x80) + /* + * This bit must be '1' for the t_l2_ovlan field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ + UINT32_C(0x100) + /* + * This bit must be '1' for the t_l2_ovlan_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ + UINT32_C(0x200) + /* + * This bit must be '1' for the t_l2_ivlan field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ + UINT32_C(0x400) + /* + * This bit must be '1' for the t_l2_ivlan_mask field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ + UINT32_C(0x800) + /* + * This bit must be '1' for the src_type field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the src_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the tunnel_type field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the num_vlans field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the t_num_vlans field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_NUM_VLANS \ + UINT32_C(0x40000) + /* + * This value sets the match value for the L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. + */ + uint8_t l2_addr[6]; + /* This value sets the match value for the number of VLANs. */ + uint8_t num_vlans; + /* + * This value sets the match value for the number of VLANs + * in the tunnel headers. + */ + uint8_t t_num_vlans; + /* + * This value sets the mask value for the L2 address. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint8_t l2_addr_mask[6]; + /* This value sets VLAN ID value for outer VLAN. */ + uint16_t l2_ovlan; + /* + * This value sets the mask value for the ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint16_t l2_ovlan_mask; + /* This value sets VLAN ID value for inner VLAN. */ + uint16_t l2_ivlan; + /* + * This value sets the mask value for the ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint16_t l2_ivlan_mask; + uint8_t unused_1[2]; + /* + * This value sets the match value for the tunnel + * L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. + */ + uint8_t t_l2_addr[6]; + uint8_t unused_2[2]; + /* + * This value sets the mask value for the tunnel L2 + * address. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint8_t t_l2_addr_mask[6]; + /* This value sets VLAN ID value for tunnel outer VLAN. */ + uint16_t t_l2_ovlan; + /* + * This value sets the mask value for the tunnel ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint16_t t_l2_ovlan_mask; + /* This value sets VLAN ID value for tunnel inner VLAN. */ + uint16_t t_l2_ivlan; + /* + * This value sets the mask value for the tunnel ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. + */ + uint16_t t_l2_ivlan_mask; + /* This value identifies the type of source of the packet. */ + uint8_t src_type; + /* Network port */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0) + /* Physical function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1) + /* Virtual function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2) + /* Virtual NIC of a function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3) + /* Embedded processor for CFA management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4) + /* Embedded processor for OOB management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5) + /* Embedded processor for RoCE */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6) + /* Embedded processor for network proxy functions */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG + uint8_t unused_3; + /* + * This value is the id of the source. + * For a network port, it represents port_id. + * For a physical function, it represents fid. + * For a virtual function, it represents vf_id. + * For a vnic, it represents vnic_id. + * For embedded processors, this id is not valid. + * + * Notes: + * 1. The function ID is implied if it src_id is + * not provided for a src_type that is either + */ + uint32_t src_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_4; + /* + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. + */ + uint16_t dst_id; + /* + * Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint16_t mirror_vnic_id; + /* + * This hint is provided to help in placing + * the filter in the filter table. + */ + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \ + UINT32_C(0x4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN + uint8_t unused_5; + uint32_t unused_6; + /* + * This is the ID of the filter that goes along with + * the pri_hint. + * + * This field is valid only for the following values. + * 1 - Above the given filter + * 2 - Below the given filter + */ + uint64_t l2_filter_id_hint; +} __rte_packed; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + /* + * The flow id value in bit 0-29 is the actual ID of the flow + * associated with this filter and it shall be used to match + * and associate the flow identifier returned in completion + * records. A value of 0xFFFFFFFF in the 32-bit flow_id field + * shall indicate no valid flow id. + */ + uint32_t flow_id; + /* Indicate the flow id value. */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0 + /* Indicate type of the flow. */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE \ + UINT32_C(0x40000000) + /* + * If this bit set to 0, then it indicates that the flow is + * internal flow. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \ + (UINT32_C(0x0) << 30) + /* + * If this bit is set to 1, then it indicates that the flow is + * external flow. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT + /* Indicate the flow direction. */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR \ + UINT32_C(0x80000000) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_RX \ + (UINT32_C(0x0) << 31) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_cfa_l2_filter_free * + ***************************/ + + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; +} __rte_packed; + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_cfa_l2_filter_cfg * + **************************/ + + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +struct hwrm_cfa_l2_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP \ + UINT32_C(0x2) + /* + * Enumeration denoting NO_ROCE_L2 to support old drivers. + * New driver L2 for only L2 traffic, ROCE for roce and l2 traffic + */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_MASK \ + UINT32_C(0xc) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_SFT 2 + /* To support old drivers */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 \ + (UINT32_C(0x0) << 2) + /* Only L2 traffic */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_L2 \ + (UINT32_C(0x1) << 2) + /* Roce & L2 traffic */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE \ + (UINT32_C(0x2) << 2) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_LAST \ + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE + uint32_t enables; + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + /* + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. + */ + uint32_t dst_id; + /* + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint32_t new_mirror_vnic_id; +} __rte_packed; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_cfa_l2_set_rx_mask * + ***************************/ + + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* VNIC ID */ + uint32_t vnic_id; + uint32_t mask; + /* + * When this bit is '1', the function is requested to accept + * multi-cast packets specified by the multicast addr table. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \ + UINT32_C(0x2) + /* + * When this bit is '1', the function is requested to accept + * all multi-cast packets. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \ + UINT32_C(0x4) + /* + * When this bit is '1', the function is requested to accept + * broadcast packets. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \ + UINT32_C(0x8) + /* + * When this bit is '1', the function is requested to be + * put in the promiscuous mode. + * + * The HWRM should accept any function to set up + * promiscuous mode. + * + * The HWRM shall follow the semantics below for the + * promiscuous mode support. + * # When partitioning is not enabled on a port + * (i.e. single PF on the port), then the PF shall + * be allowed to be in the promiscuous mode. When the + * PF is in the promiscuous mode, then it shall + * receive all host bound traffic on that port. + * # When partitioning is enabled on a port + * (i.e. multiple PFs per port) and a PF on that + * port is in the promiscuous mode, then the PF + * receives all traffic within that partition as + * identified by a unique identifier for the + * PF (e.g. S-Tag). If a unique outer VLAN + * for the PF is specified, then the setting of + * promiscuous mode on that PF shall result in the + * PF receiving all host bound traffic with matching + * outer VLAN. + * # A VF shall can be set in the promiscuous mode. + * In the promiscuous mode, the VF does not receive any + * traffic unless a unique outer VLAN for the + * VF is specified. If a unique outer VLAN + * for the VF is specified, then the setting of + * promiscuous mode on that VF shall result in the + * VF receiving all host bound traffic with the + * matching outer VLAN. + * # The HWRM shall allow the setting of promiscuous + * mode on a function independently from the + * promiscuous mode settings on other functions. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \ + UINT32_C(0x10) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the outermost Layer 2 destination MAC + * address field. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \ + UINT32_C(0x20) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the VLAN-tagged packets that match the + * TPID and VID fields of VLAN tags in the VLAN tag + * table specified in this command. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \ + UINT32_C(0x40) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets that match the TPID and VID fields of VLAN + * tags in the VLAN tag table specified in this command. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \ + UINT32_C(0x80) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets matching any VLAN tag. + * + * If this flag is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan + * flags is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * The HWRM client shall set at most one flag out of + * vlanonly, vlan_nonvlan, and anyvlan_nonvlan. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ + UINT32_C(0x100) + /* This is the address for mcast address tbl. */ + uint64_t mc_tbl_addr; + /* + * This value indicates how many entries in mc_tbl are valid. + * Each entry is 6 bytes. + */ + uint32_t num_mc_entries; + uint8_t unused_0[4]; + /* + * This is the address for VLAN tag table. + * Each VLAN entry in the table is 4 bytes of a VLAN tag + * including TPID, PCP, DEI, and VID fields in network byte + * order. + */ + uint64_t vlan_tag_tbl_addr; + /* + * This value indicates how many entries in vlan_tag_tbl are + * valid. Each entry is 4 bytes. + */ + uint32_t num_vlan_tags; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Ntuple Filter */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \ + HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + uint8_t unused_0[7]; +} __rte_packed; + +/******************************* + * hwrm_cfa_vlan_antispoof_cfg * + *******************************/ + + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being configured. + * Only valid for a VF FID configured by the PF. + */ + uint16_t fid; + uint8_t unused_0[2]; + /* Number of VLAN entries in the vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; + /* + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table. Each table entry contains the 16-bit TPID + * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask, + * all in network order to match hwrm_cfa_l2_set_rx_mask. + * For an individual VLAN entry, the mask value should be 0xfff + * for the 12-bit VLAN ID. + */ + uint64_t vlan_tag_mask_tbl_addr; +} __rte_packed; + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_vlan_antispoof_qcfg * + ********************************/ + + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * Only valid for a VF FID queried by the PF. + */ + uint16_t fid; + uint8_t unused_0[2]; + /* + * Maximum number of VLAN entries the firmware is allowed to DMA + * to vlan_tag_mask_tbl. + */ + uint32_t max_vlan_entries; + /* + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table to which firmware will DMA to. Each table + * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only), + * 16-bit VLAN ID, and a 16-bit mask, all in network order to + * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, + * the mask value should be 0xfff for the 12-bit VLAN ID. + */ + uint64_t vlan_tag_mask_tbl_addr; +} __rte_packed; + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_tunnel_filter_alloc * + ********************************/ + + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + uint32_t enables; + /* + * This bit must be '1' for the l2_filter_id field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the l2_addr field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ + UINT32_C(0x2) + /* + * This bit must be '1' for the l2_ivlan field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ + UINT32_C(0x4) + /* + * This bit must be '1' for the l3_addr field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the l3_addr_type field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \ + UINT32_C(0x10) + /* + * This bit must be '1' for the t_l3_addr_type field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \ + UINT32_C(0x20) + /* + * This bit must be '1' for the t_l3_addr field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tunnel_type field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the vni field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \ + UINT32_C(0x100) + /* + * This bit must be '1' for the dst_vnic_id field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \ + UINT32_C(0x200) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x400) + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + /* + * This value sets the match value for the inner L2 + * MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. + */ + uint8_t l2_addr[6]; + /* + * This value sets VLAN ID value for inner VLAN. + * Only 12-bits of VLAN ID are used in setting the filter. + */ + uint16_t l2_ivlan; + /* + * The value of inner destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t l3_addr[4]; + /* + * The value of tunnel destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t t_l3_addr[4]; + /* + * This value indicates the type of inner IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t l3_addr_type; + /* + * This value indicates the type of tunnel IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t t_l3_addr_type; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + /* + * tunnel_flags allows the user to indicate the tunnel tag detection + * for the tunnel type specified in tunnel_type. + */ + uint8_t tunnel_flags; + /* + * If the tunnel_type is geneve, then this bit indicates if we + * need to match the geneve OAM packet. + * If the tunnel_type is nvgre or gre, then this bit indicates if + * we need to detect checksum present bit in geneve header. + * If the tunnel_type is mpls, then this bit indicates if we need + * to match mpls packet with explicit IPV4/IPV6 null header. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \ + UINT32_C(0x1) + /* + * If the tunnel_type is geneve, then this bit indicates if we + * need to detect the critical option bit set in the oam packet. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with key present bit set in + * gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from inner/second label. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \ + UINT32_C(0x2) + /* + * If the tunnel_type is geneve, then this bit indicates if we + * need to match geneve packet with extended header bit set in + * geneve header. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with sequence number + * present bit set in gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from out/first label. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \ + UINT32_C(0x4) + /* + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. + */ + uint32_t vni; + /* Logical VNIC ID of the destination VNIC. */ + uint32_t dst_vnic_id; + /* + * Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint32_t mirror_vnic_id; +} __rte_packed; + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; + /* + * The flow id value in bit 0-29 is the actual ID of the flow + * associated with this filter and it shall be used to match + * and associate the flow identifier returned in completion + * records. A value of 0xFFFFFFFF in the 32-bit flow_id field + * shall indicate no valid flow id. + */ + uint32_t flow_id; + /* Indicate the flow id value. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0 + /* Indicate type of the flow. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE \ + UINT32_C(0x40000000) + /* + * If this bit set to 0, then it indicates that the flow is + * internal flow. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \ + (UINT32_C(0x0) << 30) + /* + * If this bit is set to 1, then it indicates that the flow is + * external flow. + */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \ + HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT + /* Indicate the flow direction. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR \ + UINT32_C(0x80000000) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_RX \ + (UINT32_C(0x0) << 31) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \ + HWRM_CFA_TUNNEL_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_cfa_tunnel_filter_free * + *******************************/ + + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; +} __rte_packed; + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************************** + * hwrm_cfa_redirect_tunnel_type_alloc * + ***************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + /* Tunnel alloc flags. */ + uint8_t flags; + /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************************** + * hwrm_cfa_redirect_tunnel_type_free * + **************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __rte_packed; + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************************** + * hwrm_cfa_redirect_tunnel_type_info * + **************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The source function id. */ + uint16_t src_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __rte_packed; + +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + /* IPv4 version and header length. */ + uint8_t ver_hlen; + /* IPv4 header length */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + /* Version */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + /* IPv4 type of service. */ + uint8_t tos; + /* IPv4 identification. */ + uint16_t ip_id; + /* IPv4 flags and offset. */ + uint16_t flags_frag_offset; + /* IPv4 TTL. */ + uint8_t ttl; + /* IPv4 protocol. */ + uint8_t protocol; + /* IPv4 source address. */ + uint32_t src_ip_addr; + /* IPv4 destination address. */ + uint32_t dest_ip_addr; +} __rte_packed; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + /* IPv6 version, traffic class and flow label. */ + uint32_t ver_tc_flow_label; + /* IPv6 version shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \ + UINT32_C(0x1c) + /* IPv6 version mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \ + UINT32_C(0xf0000000) + /* IPv6 TC shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \ + UINT32_C(0x14) + /* IPv6 TC mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \ + UINT32_C(0xff00000) + /* IPv6 flow label shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \ + UINT32_C(0x0) + /* IPv6 flow label mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \ + UINT32_C(0xfffff) + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \ + HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + /* IPv6 payload length. */ + uint16_t payload_len; + /* IPv6 next header. */ + uint8_t next_hdr; + /* IPv6 TTL. */ + uint8_t ttl; + /* IPv6 source address. */ + uint32_t src_ip_addr[4]; + /* IPv6 destination address. */ + uint32_t dest_ip_addr[4]; +} __rte_packed; + +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ +struct hwrm_cfa_encap_data_vxlan { + /* Source MAC address. */ + uint8_t src_mac_addr[6]; + /* reserved. */ + uint16_t unused_0; + /* Destination MAC address. */ + uint8_t dst_mac_addr[6]; + /* Number of VLAN tags. */ + uint8_t num_vlan_tags; + /* reserved. */ + uint8_t unused_1; + /* Outer VLAN TPID. */ + uint16_t ovlan_tpid; + /* Outer VLAN TCI. */ + uint16_t ovlan_tci; + /* Inner VLAN TPID. */ + uint16_t ivlan_tpid; + /* Inner VLAN TCI. */ + uint16_t ivlan_tci; + /* L3 header fields. */ + uint32_t l3[10]; + /* IP version mask. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf) + /* IP version 4. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4) + /* IP version 6. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6) + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \ + HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + /* UDP source port. */ + uint16_t src_port; + /* UDP destination port. */ + uint16_t dst_port; + /* VXLAN Network Identifier. */ + uint32_t vni; + /* 3 bytes VXLAN header reserve fields from 1st dword of the VXLAN header. */ + uint8_t hdr_rsvd0[3]; + /* 1 byte VXLAN header reserve field from 2nd dword of the VXLAN header. */ + uint8_t hdr_rsvd1; + /* VXLAN header flags field. */ + uint8_t hdr_flags; + uint8_t unused[3]; +} __rte_packed; + +/******************************* + * hwrm_cfa_encap_record_alloc * + *******************************/ + + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + /* + * Setting of this flag indicates this encap record is external encap record. + * Resetting of this flag indicates this flag is internal encap record and + * this is the default setting. + */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_EXTERNAL \ + UINT32_C(0x2) + /* Encapsulation Type. */ + uint8_t encap_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \ + UINT32_C(0x6) + /* VLAN */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \ + HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN_GPE_V6 + uint8_t unused_0[3]; + /* This value is encap data used for the given encap type. */ + uint32_t encap_data[20]; +} __rte_packed; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_cfa_encap_record_free * + ******************************/ + + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_ntuple_filter_alloc * + ********************************/ + + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x2) + /* + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \ + UINT32_C(0x4) + /* + * Setting of this flag indicates that the dst_id field contains function ID. + * If this is not set it indicates dest_id is VNIC or VPORT. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_FID \ + UINT32_C(0x8) + /* + * Setting of this flag indicates match on arp reply when ethertype is 0x0806. + * If this is not set it indicates no specific arp opcode matching. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_ARP_REPLY \ + UINT32_C(0x10) + /* + * Setting of this flag indicates that the dst_id field contains RFS ring + * table index. If this is not set it indicates dst_id is VNIC or VPORT + * or function ID. Note dest_fid and dest_rfs_ring_idx can’t be set at + * the same time. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX \ + UINT32_C(0x20) + uint32_t enables; + /* + * This bit must be '1' for the l2_filter_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the ethertype field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the tunnel_type field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4) + /* + * This bit must be '1' for the src_macaddr field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the ipaddr_type field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x10) + /* + * This bit must be '1' for the src_ipaddr field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x20) + /* + * This bit must be '1' for the src_ipaddr_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ + UINT32_C(0x40) + /* + * This bit must be '1' for the dst_ipaddr field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x80) + /* + * This bit must be '1' for the dst_ipaddr_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ + UINT32_C(0x100) + /* + * This bit must be '1' for the ip_protocol field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x200) + /* + * This bit must be '1' for the src_port field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x400) + /* + * This bit must be '1' for the src_port_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ + UINT32_C(0x800) + /* + * This bit must be '1' for the dst_port field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the dst_port_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the pri_hint field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the ntuple_filter_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the dst_macaddr field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x40000) + /* This flag is deprecated. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_RFS_RING_TBL_IDX \ + UINT32_C(0x80000) + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + /* + * This value indicates the source MAC address in + * the Ethernet header. + */ + uint8_t src_macaddr[6]; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; + /* + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 + /* + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP + */ + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP + /* + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. + */ + uint16_t dst_id; + /* + * Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint16_t mirror_vnic_id; + /* + * This value indicates the tunnel type for this filter. + * If this field is not specified, then the filter shall + * apply to both non-tunneled and tunneled packets. + * If this field conflicts with the tunnel_type specified + * in the l2_filter_id, then the HWRM shall return an + * error for this command. + */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + /* + * This hint is provided to help in placing + * the filter in the filter table. + */ + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \ + UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST + /* + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t src_ipaddr[4]; + /* + * The value of source IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. + */ + uint32_t src_ipaddr_mask[4]; + /* + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t dst_ipaddr[4]; + /* + * The value of destination IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. + */ + uint32_t dst_ipaddr_mask[4]; + /* + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t src_port; + /* + * The value of source port mask to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t src_port_mask; + /* + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port; + /* + * The value of destination port mask to be used in + * filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port_mask; + /* + * This is the ID of the filter that goes along with + * the pri_hint. + */ + uint64_t ntuple_filter_id_hint; +} __rte_packed; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; + /* + * The flow id value in bit 0-29 is the actual ID of the flow + * associated with this filter and it shall be used to match + * and associate the flow identifier returned in completion + * records. A value of 0xFFFFFFFF in the 32-bit flow_id field + * shall indicate no valid flow id. + */ + uint32_t flow_id; + /* Indicate the flow id value. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0 + /* Indicate type of the flow. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE \ + UINT32_C(0x40000000) + /* + * If this bit set to 0, then it indicates that the flow is + * internal flow. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \ + (UINT32_C(0x0) << 30) + /* + * If this bit is set to 1, then it indicates that the flow is + * external flow. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT + /* Indicate the flow direction. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR \ + UINT32_C(0x80000000) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_RX \ + (UINT32_C(0x0) << 31) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Rx Mask VLAN */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + uint8_t unused_0[7]; +} __rte_packed; + +/******************************* + * hwrm_cfa_ntuple_filter_free * + *******************************/ + + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; +} __rte_packed; + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_cfa_ntuple_filter_cfg * + ******************************/ + + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the new_dst_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the new_meter_instance_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint32_t flags; + /* + * Setting this bit to 1 indicates that dest_id field contains FID. + * Setting this to 0 indicates that dest_id field contains VNIC or VPORT. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_FLAGS_DEST_FID \ + UINT32_C(0x1) + /* + * Setting of this flag indicates that the new_dst_id field contains + * RFS ring table index. If this is not set it indicates new_dst_id is + * VNIC or VPORT or function ID. Note dest_fid and dest_rfs_ring_idx + * can’t be set at the same time. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_FLAGS_DEST_RFS_RING_IDX \ + UINT32_C(0x2) + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; + /* + * If set, this value shall represent the new + * Logical VNIC ID of the destination VNIC for the RX + * path and new network port id of the destination port for + * the TX path. + */ + uint32_t new_dst_id; + /* + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint32_t new_mirror_vnic_id; + /* + * New meter to attach to the flow. Specifying the + * invalid instance ID is used to remove any existing + * meter from the flow. + */ + uint16_t new_meter_instance_id; + /* + * A value of 0xfff is considered invalid and implies the + * instance is not configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \ + HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID + uint8_t unused_1[6]; +} __rte_packed; + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_cfa_em_flow_alloc * + **************************/ + + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX + /* + * Setting of this flag indicates enabling of a byte counter for a given + * flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) + /* + * Setting of this flag indicates enabling of a packet counter for a given + * flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) + /* Setting of this flag indicates de-capsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) + /* Setting of this flag indicates encapsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) + /* + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) + uint32_t enables; + /* + * This bit must be '1' for the l2_filter_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the tunnel_type field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the tunnel_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x4) + /* + * This bit must be '1' for the src_macaddr field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the dst_macaddr field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x10) + /* + * This bit must be '1' for the ovlan_vid field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the ivlan_vid field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the ethertype field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the src_ipaddr field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x100) + /* + * This bit must be '1' for the dst_ipaddr field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x200) + /* + * This bit must be '1' for the ipaddr_type field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x400) + /* + * This bit must be '1' for the ip_protocol field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x800) + /* + * This bit must be '1' for the src_port field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the dst_port field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the encap_record_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the meter_instance_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ + UINT32_C(0x20000) + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[3]; + /* + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. + */ + uint32_t tunnel_id; + /* + * This value indicates the source MAC address in + * the Ethernet header. + */ + uint8_t src_macaddr[6]; + /* The meter instance to attach to the flow. */ + uint16_t meter_instance_id; + /* + * A value of 0xfff is considered invalid and implies the + * instance is not configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID + /* + * This value indicates the destination MAC address in + * the Ethernet header. + */ + uint8_t dst_macaddr[6]; + /* + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. + */ + uint16_t ovlan_vid; + /* + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. + */ + uint16_t ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; + /* + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 + /* + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP + */ + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP + uint8_t unused_1[2]; + /* + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t src_ipaddr[4]; + /* + * big_endian = True + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t dst_ipaddr[4]; + /* + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t src_port; + /* + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port; + /* + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. + */ + uint16_t dst_id; + /* + * Logical VNIC ID of the VNIC where traffic is + * mirrored. + */ + uint16_t mirror_vnic_id; + /* Logical ID of the encapsulation record. */ + uint32_t encap_record_id; + uint8_t unused_2[4]; +} __rte_packed; + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; + /* + * The flow id value in bit 0-29 is the actual ID of the flow + * associated with this filter and it shall be used to match + * and associate the flow identifier returned in completion + * records. A value of 0xFFFFFFFF in the 32-bit flow_id field + * shall indicate no valid flow id. + */ + uint32_t flow_id; + /* Indicate the flow id value. */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0 + /* Indicate type of the flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE \ + UINT32_C(0x40000000) + /* + * If this bit set to 0, then it indicates that the flow is + * internal flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \ + (UINT32_C(0x0) << 30) + /* + * If this bit is set to 1, then it indicates that the flow is + * external flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT + /* Indicate the flow direction. */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR \ + UINT32_C(0x80000000) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_RX \ + (UINT32_C(0x0) << 31) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_TX + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_cfa_em_flow_free * + *************************/ + + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; +} __rte_packed; + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_cfa_meter_qcaps * + ************************/ + + +/* hwrm_cfa_meter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_meter_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_cfa_meter_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_meter_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * Enumeration denoting the clock at which the Meter is running with. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_MASK UINT32_C(0xf) + #define HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_SFT 0 + /* 375 MHz */ + #define HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_375MHZ UINT32_C(0x0) + /* 625 MHz */ + #define HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_625MHZ UINT32_C(0x1) + #define HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_LAST \ + HWRM_CFA_METER_QCAPS_OUTPUT_FLAGS_CLOCK_625MHZ + uint8_t unused_0[4]; + /* + * The minimum guaranteed number of tx meter profiles supported + * for this function. + */ + uint16_t min_tx_profile; + /* + * The maximum non-guaranteed number of tx meter profiles supported + * for this function. + */ + uint16_t max_tx_profile; + /* + * The minimum guaranteed number of rx meter profiles supported + * for this function. + */ + uint16_t min_rx_profile; + /* + * The maximum non-guaranteed number of rx meter profiles supported + * for this function. + */ + uint16_t max_rx_profile; + /* + * The minimum guaranteed number of tx meter instances supported + * for this function. + */ + uint16_t min_tx_instance; + /* + * The maximum non-guaranteed number of tx meter instances supported + * for this function. + */ + uint16_t max_tx_instance; + /* + * The minimum guaranteed number of rx meter instances supported + * for this function. + */ + uint16_t min_rx_instance; + /* + * The maximum non-guaranteed number of rx meter instances supported + * for this function. + */ + uint16_t max_rx_instance; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_meter_profile_alloc * + ********************************/ + + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 + /* + * This field is reserved for the future use. + * It shall be set to 0. + */ + uint16_t reserved1; + /* + * This field is reserved for the future use. + * It shall be set to 0. + */ + uint32_t reserved2; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Raw value */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_RAW \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_RAW + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid value */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Raw unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +} __rte_packed; + +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; + /* + * A value of 0xfff is considered invalid and implies the + * profile is not configured. + */ + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_cfa_meter_profile_free * + *******************************/ + + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; + /* + * A value of 0xfff is considered invalid and implies the + * profile is not configured. + */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_cfa_meter_profile_cfg * + ******************************/ + + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; + /* + * A value of 0xfff is considered invalid and implies the + * profile is not configured. + */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID + /* + * This field is reserved for the future use. + * It shall be set to 0. + */ + uint32_t reserved; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Raw value */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_RAW \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_RAW + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid value */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Raw unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +} __rte_packed; + +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************* + * hwrm_cfa_meter_instance_alloc * + *********************************/ + + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; + /* + * A value of 0xffff is considered invalid and implies the + * profile is not configured. + */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; + /* + * A value of 0xffff is considered invalid and implies the + * instance is not configured. + */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_cfa_meter_instance_cfg * + *******************************/ + + +/* hwrm_cfa_meter_instance_cfg_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_CFG_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* + * This value identifies a new meter profile to be associated with + * the meter instance specified in this command. + */ + uint16_t meter_profile_id; + /* + * A value of 0xffff is considered invalid and implies the + * profile is not configured. + */ + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_CFG_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_CFG_INPUT_METER_PROFILE_ID_INVALID + /* + * This value identifies the ID of a meter instance that needs to be updated with + * a new meter profile specified in this command. + */ + uint16_t meter_instance_id; + uint8_t unused_1[2]; +} __rte_packed; + +/* hwrm_cfa_meter_instance_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_meter_instance_free * + ********************************/ + + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint8_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; + /* + * A value of 0xfff is considered invalid and implies the + * instance is not configured. + */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************* + * hwrm_cfa_decap_filter_alloc * + *******************************/ + + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* ovs_tunnel is 1 b */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \ + UINT32_C(0x1) + uint32_t enables; + /* + * This bit must be '1' for the tunnel_type field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the tunnel_id field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the src_macaddr field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the dst_macaddr field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the ovlan_vid field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x10) + /* + * This bit must be '1' for the ivlan_vid field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the t_ovlan_vid field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the t_ivlan_vid field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \ + UINT32_C(0x80) + /* + * This bit must be '1' for the ethertype field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x100) + /* + * This bit must be '1' for the src_ipaddr field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x200) + /* + * This bit must be '1' for the dst_ipaddr field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x400) + /* + * This bit must be '1' for the ipaddr_type field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x800) + /* + * This bit must be '1' for the ip_protocol field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the src_port field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the dst_port field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) + /* + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. + */ + uint32_t tunnel_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0; + uint16_t unused_1; + /* + * This value indicates the source MAC address in + * the Ethernet header. + */ + uint8_t src_macaddr[6]; + uint8_t unused_2[2]; + /* + * This value indicates the destination MAC address in + * the Ethernet header. + */ + uint8_t dst_macaddr[6]; + /* + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. + */ + uint16_t ovlan_vid; + /* + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. + */ + uint16_t ivlan_vid; + /* + * This value indicates the VLAN ID of the outer VLAN tag + * in the tunnel Ethernet header. + */ + uint16_t t_ovlan_vid; + /* + * This value indicates the VLAN ID of the inner VLAN tag + * in the tunnel Ethernet header. + */ + uint16_t t_ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; + /* + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 + /* + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP + */ + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP + uint16_t unused_3; + uint32_t unused_4; + /* + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t src_ipaddr[4]; + /* + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t dst_ipaddr[4]; + /* + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t src_port; + /* + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port; + /* + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path. + */ + uint16_t dst_id; + /* + * If set, this value shall represent the L2 context that matches the L2 + * information of the decap filter. + */ + uint16_t l2_ctxt_ref_id; +} __rte_packed; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_cfa_decap_filter_free * + ******************************/ + + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_cfa_flow_alloc * + ***********************/ + + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint16_t flags; + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL \ + UINT32_C(0x1) + /* num_vlan is 2 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK \ + UINT32_C(0x6) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1 + /* no tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \ + (UINT32_C(0x0) << 1) + /* 1 tag */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \ + (UINT32_C(0x1) << 1) + /* 2 tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \ + (UINT32_C(0x2) << 1) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO + /* Enumeration denoting the Flow Type. */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK \ + UINT32_C(0x38) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3 + /* L2 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \ + (UINT32_C(0x0) << 3) + /* IPV4 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \ + (UINT32_C(0x1) << 3) + /* IPV6 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \ + (UINT32_C(0x2) << 3) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 + /* + * when set to 1, indicates TX flow offload for function specified in src_fid and + * the dst_fid should be set to invalid value. To indicate a VM to VM flow, both + * of the path_tx and path_rx flags need to be set. For virtio vSwitch offload + * case, the src_fid and dst_fid is set to the same fid value. For the SRIOV + * vSwitch offload case, the src_fid and dst_fid must be set to the same VF FID + * belong to the children VFs of the same PF to indicate VM to VM flow. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x40) + /* + * when set to 1, indicates RX flow offload for function specified in dst_fid and + * the src_fid should be set to invalid value. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x80) + /* + * Set to 1 to indicate matching of VXLAN VNI from the custom vxlan header is + * required and the VXLAN VNI value is stored in the first 24 bits of the dmac field. + * This flag is only valid when the flow direction is RX. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_MATCH_VXLAN_IP_VNI \ + UINT32_C(0x100) + /* Set to 1 to indicate vhost_id is specified in the outer_vlan_tci field. */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_VHOST_ID_USE_VLAN \ + UINT32_C(0x200) + /* + * Tx Flow: vf fid. + * Rx Flow: pf fid. + */ + uint16_t src_fid; + /* Tunnel handle valid when tunnel flag is set. */ + uint32_t tunnel_handle; + uint16_t action_flags; + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \ + UINT32_C(0x1) + /* recycle is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \ + UINT32_C(0x2) + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \ + UINT32_C(0x4) + /* meter is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \ + UINT32_C(0x8) + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \ + UINT32_C(0x10) + /* nat_src is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \ + UINT32_C(0x20) + /* nat_dest is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \ + UINT32_C(0x40) + /* nat_ipv4_address is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \ + UINT32_C(0x80) + /* l2_header_rewrite is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \ + UINT32_C(0x100) + /* ttl_decrement is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \ + UINT32_C(0x200) + /* + * If set to 1 and flow direction is TX, it indicates decap of L2 header + * and encap of tunnel header. If set to 1 and flow direction is RX, it + * indicates decap of tunnel header and encap L2 header. The type of tunnel + * is specified in the tunnel_type field. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL_IP \ + UINT32_C(0x400) + /* If set to 1, flow aging is enabled for this flow. */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FLOW_AGING_ENABLED \ + UINT32_C(0x800) + /* + * If set to 1 an attempt will be made to try to offload this flow to the + * most optimal flow table resource. If set to 0, the flow will be + * placed to the default flow table resource. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_PRI_HINT \ + UINT32_C(0x1000) + /* + * If set to 1 there will be no attempt to allocate an on-chip try to + * offload this flow. If set to 0, which will keep compatibility with the + * older drivers, will cause the FW to attempt to allocate an on-chip flow + * counter for the newly created flow. This will keep the existing behavior + * with EM flows which always had an associated flow counter. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC \ + UINT32_C(0x2000) + /* + * Tx Flow: pf or vf fid. + * Rx Flow: vf fid. + */ + uint16_t dst_fid; + /* VLAN tpid, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tpid; + /* VLAN tci, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tci; + /* Meter id, valid when meter flag is set. */ + uint16_t act_meter_id; + /* Flow with the same l2 context tcam key. */ + uint16_t ref_flow_handle; + /* This value sets the match value for the ethertype. */ + uint16_t ethertype; + /* valid when num tags is 1 or 2. */ + uint16_t outer_vlan_tci; + /* This value sets the match value for the Destination MAC address. */ + uint16_t dmac[3]; + /* valid when num tags is 2. */ + uint16_t inner_vlan_tci; + /* This value sets the match value for the Source MAC address. */ + uint16_t smac[3]; + /* The bit length of destination IP address mask. */ + uint8_t ip_dst_mask_len; + /* The bit length of source IP address mask. */ + uint8_t ip_src_mask_len; + /* The value of destination IPv4/IPv6 address. */ + uint32_t ip_dst[4]; + /* The source IPv4/IPv6 address. */ + uint32_t ip_src[4]; + /* + * The value of source port. + * Applies to UDP and TCP traffic. + */ + uint16_t l4_src_port; + /* + * The value of source port mask. + * Applies to UDP and TCP traffic. + */ + uint16_t l4_src_port_mask; + /* + * The value of destination port. + * Applies to UDP and TCP traffic. + */ + uint16_t l4_dst_port; + /* + * The value of destination port mask. + * Applies to UDP and TCP traffic. + */ + uint16_t l4_dst_port_mask; + /* + * NAT IPv4/6 address based on address type flag. + * 0 values are ignored. + */ + uint32_t nat_ip_address[4]; + /* L2 header re-write Destination MAC address. */ + uint16_t l2_rewrite_dmac[3]; + /* + * The NAT source/destination port based on direction flag. + * Applies to UDP and TCP traffic. + * 0 values are ignored. + */ + uint16_t nat_port; + /* L2 header re-write Source MAC address. */ + uint16_t l2_rewrite_smac[3]; + /* The value of ip protocol. */ + uint8_t ip_proto; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL +} __rte_packed; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ +struct hwrm_cfa_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[2]; + /* + * The flow id value in bit 0-29 is the actual ID of the flow + * associated with this filter and it shall be used to match + * and associate the flow identifier returned in completion + * records. A value of 0xFFFFFFFF in the 32-bit flow_id field + * shall indicate no valid flow id. + */ + uint32_t flow_id; + /* Indicate the flow id value. */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \ + UINT32_C(0x3fffffff) + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0 + /* Indicate type of the flow. */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE \ + UINT32_C(0x40000000) + /* + * If this bit set to 0, then it indicates that the flow is + * internal flow. + */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \ + (UINT32_C(0x0) << 30) + /* + * If this bit is set to 1, then it indicates that the flow is + * external flow. + */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \ + HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT + /* Indicate the flow direction. */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR \ + UINT32_C(0x80000000) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_RX \ + (UINT32_C(0x0) << 31) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_TX \ + (UINT32_C(0x1) << 31) + #define HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \ + HWRM_CFA_FLOW_ALLOC_OUTPUT_FLOW_ID_DIR_TX + /* This value identifies a set of CFA data structures used for a flow. */ + uint64_t ext_flow_handle; + uint32_t flow_counter_id; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* No more L2 Context TCAM */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM UINT32_C(0x1) + /* No more action records */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD UINT32_C(0x2) + /* No more flow counters */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER UINT32_C(0x3) + /* No more wild-card TCAM */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM UINT32_C(0x4) + /* Hash collsion in exact match tables */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION UINT32_C(0x5) + /* Key is already installed */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS UINT32_C(0x6) + /* Flow Context DB is out of resource */ + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB UINT32_C(0x7) + #define HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST \ + HWRM_CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + uint8_t unused_0[7]; +} __rte_packed; + +/********************** + * hwrm_cfa_flow_free * + **********************/ + + +/* hwrm_cfa_flow_free_input (size:256b/32B) */ +struct hwrm_cfa_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + uint16_t unused_0; + /* Flow counter id to be freed. */ + uint32_t flow_counter_id; + /* This value identifies a set of CFA data structures used for a flow. */ + uint64_t ext_flow_handle; +} __rte_packed; + +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet is 64 b */ + uint64_t packet; + /* byte is 64 b */ + uint64_t byte; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_cfa_flow_action_data (size:960b/120B) */ +struct hwrm_cfa_flow_action_data { + uint16_t action_flags; + /* Setting of this flag indicates accept action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FWD \ + UINT32_C(0x1) + /* Setting of this flag indicates recycle action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_RECYCLE \ + UINT32_C(0x2) + /* Setting of this flag indicates drop action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DROP \ + UINT32_C(0x4) + /* Setting of this flag indicates meter action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_METER \ + UINT32_C(0x8) + /* Setting of this flag indicates tunnel action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL \ + UINT32_C(0x10) + /* + * If set to 1 and flow direction is TX, it indicates decap of L2 header + * and encap of tunnel header. If set to 1 and flow direction is RX, it + * indicates decap of tunnel header and encap L2 header. + */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL_IP \ + UINT32_C(0x20) + /* Setting of this flag indicates ttl decrement action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TTL_DECREMENT \ + UINT32_C(0x40) + /* If set to 1, flow aging is enabled for this flow. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FLOW_AGING_ENABLED \ + UINT32_C(0x80) + /* Setting of this flag indicates encap action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_ENCAP \ + UINT32_C(0x100) + /* Setting of this flag indicates decap action. */ + #define HWRM_CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DECAP \ + UINT32_C(0x200) + /* Meter id. */ + uint16_t act_meter_id; + /* VNIC id. */ + uint16_t vnic_id; + /* vport number. */ + uint16_t vport_id; + /* The NAT source/destination. */ + uint16_t nat_port; + uint16_t unused_0[3]; + /* NAT IPv4/IPv6 address. */ + uint32_t nat_ip_address[4]; + /* Encapsulation Type. */ + uint8_t encap_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_NVGRE UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2GRE UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPIP UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_GENEVE UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_MPLS UINT32_C(0x6) + /* VLAN */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VLAN UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_V4 UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE_V1 UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2_ETYPE UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE_V6 UINT32_C(0xc) + #define HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_LAST \ + HWRM_CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE_V6 + uint8_t unused[7]; + /* This value is encap data for the associated encap type. */ + uint32_t encap_data[20]; +} __rte_packed; + +/* hwrm_cfa_flow_tunnel_hdr_data (size:64b/8B) */ +struct hwrm_cfa_flow_tunnel_hdr_data { + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + /* Any tunneled traffic */ + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_LAST \ + HWRM_CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused[3]; + /* + * Tunnel identifier. + * Virtual Network Identifier (VNI). + */ + uint32_t tunnel_id; +} __rte_packed; + +/* hwrm_cfa_flow_l4_key_data (size:64b/8B) */ +struct hwrm_cfa_flow_l4_key_data { + /* The value of source port. */ + uint16_t l4_src_port; + /* The value of destination port. */ + uint16_t l4_dst_port; + uint32_t unused; +} __rte_packed; + +/* hwrm_cfa_flow_l3_key_data (size:512b/64B) */ +struct hwrm_cfa_flow_l3_key_data { + /* The value of ip protocol. */ + uint8_t ip_protocol; + uint8_t unused_0[7]; + /* The value of destination IPv4/IPv6 address. */ + uint32_t ip_dst[4]; + /* The source IPv4/IPv6 address. */ + uint32_t ip_src[4]; + /* NAT IPv4/IPv6 address. */ + uint32_t nat_ip_address[4]; + uint32_t unused[2]; +} __rte_packed; + +/* hwrm_cfa_flow_l2_key_data (size:448b/56B) */ +struct hwrm_cfa_flow_l2_key_data { + /* Destination MAC address. */ + uint16_t dmac[3]; + uint16_t unused_0; + /* Source MAC address. */ + uint16_t smac[3]; + uint16_t unused_1; + /* L2 header re-write Destination MAC address. */ + uint16_t l2_rewrite_dmac[3]; + uint16_t unused_2; + /* L2 header re-write Source MAC address. */ + uint16_t l2_rewrite_smac[3]; + /* Ethertype. */ + uint16_t ethertype; + /* Number of VLAN tags. */ + uint16_t num_vlan_tags; + /* VLAN tpid. */ + uint16_t l2_rewrite_vlan_tpid; + /* VLAN tci. */ + uint16_t l2_rewrite_vlan_tci; + uint8_t unused_3[2]; + /* Outer VLAN TPID. */ + uint16_t ovlan_tpid; + /* Outer VLAN TCI. */ + uint16_t ovlan_tci; + /* Inner VLAN TPID. */ + uint16_t ivlan_tpid; + /* Inner VLAN TCI. */ + uint16_t ivlan_tci; + uint8_t unused[8]; +} __rte_packed; + +/* hwrm_cfa_flow_key_data (size:4160b/520B) */ +struct hwrm_cfa_flow_key_data { + /* Flow associated tunnel L2 header key info. */ + uint32_t t_l2_key_data[14]; + /* Flow associated tunnel L2 header mask info. */ + uint32_t t_l2_key_mask[14]; + /* Flow associated tunnel L3 header key info. */ + uint32_t t_l3_key_data[16]; + /* Flow associated tunnel L3 header mask info. */ + uint32_t t_l3_key_mask[16]; + /* Flow associated tunnel L4 header key info. */ + uint32_t t_l4_key_data[2]; + /* Flow associated tunnel L4 header mask info. */ + uint32_t t_l4_key_mask[2]; + /* Flow associated tunnel header info. */ + uint32_t tunnel_hdr[2]; + /* Flow associated L2 header key info. */ + uint32_t l2_key_data[14]; + /* Flow associated L2 header mask info. */ + uint32_t l2_key_mask[14]; + /* Flow associated L3 header key info. */ + uint32_t l3_key_data[16]; + /* Flow associated L3 header mask info. */ + uint32_t l3_key_mask[16]; + /* Flow associated L4 header key info. */ + uint32_t l4_key_data[2]; + /* Flow associated L4 header mask info. */ + uint32_t l4_key_mask[2]; +} __rte_packed; + +/********************** + * hwrm_cfa_flow_info * + **********************/ + + +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + /* Max flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \ + UINT32_C(0xfff) + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0 + /* CNP flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \ + UINT32_C(0x1000) + /* RoCEv1 flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT \ + UINT32_C(0x2000) + /* RoCEv2 flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT \ + UINT32_C(0x4000) + /* Direction rx = 1 */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \ + UINT32_C(0x8000) + uint8_t unused_0[6]; + /* This value identifies a set of CFA data structures used for a flow. */ + uint64_t ext_flow_handle; +} __rte_packed; + +/* hwrm_cfa_flow_info_output (size:5632b/704B) */ +struct hwrm_cfa_flow_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t flags; + /* When set to 1, indicates the configuration is the TX flow. */ + #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_TX UINT32_C(0x1) + /* When set to 1, indicates the configuration is the RX flow. */ + #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_RX UINT32_C(0x2) + /* profile is 8 b */ + uint8_t profile; + /* src_fid is 16 b */ + uint16_t src_fid; + /* dst_fid is 16 b */ + uint16_t dst_fid; + /* l2_ctxt_id is 16 b */ + uint16_t l2_ctxt_id; + /* em_info is 64 b */ + uint64_t em_info; + /* tcam_info is 64 b */ + uint64_t tcam_info; + /* vfp_tcam_info is 64 b */ + uint64_t vfp_tcam_info; + /* ar_id is 16 b */ + uint16_t ar_id; + /* flow_handle is 16 b */ + uint16_t flow_handle; + /* tunnel_handle is 32 b */ + uint32_t tunnel_handle; + /* The flow aging timer for the flow, the unit is 100 milliseconds */ + uint16_t flow_timer; + uint8_t unused_0[6]; + /* Flow associated L2, L3 and L4 headers info. */ + uint32_t flow_key_data[130]; + /* Flow associated action record info. */ + uint32_t flow_action_info[30]; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_cfa_flow_flush * + ***********************/ + + +/* hwrm_cfa_flow_flush_input (size:256b/32B) */ +struct hwrm_cfa_flow_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* flags is 32 b */ + uint32_t flags; + /* + * Set to 1 to indicate the page size, page layers, and flow_handle_table_dma_addr + * fields are valid. The flow flush operation should only flush the flows from the + * flow table specified. This flag is set to 0 by older driver. For older firmware, + * setting this flag has no effect. + */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_TABLE_VALID \ + UINT32_C(0x1) + /* + * Set to 1 to indicate flow flush operation to cleanup all the flows, meters, CFA + * context memory tables etc. This flag is set to 0 by older driver. For older firmware, + * setting this flag has no effect. + */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_ALL \ + UINT32_C(0x2) + /* + * Set to 1 to indicate flow flush operation to cleanup all the flows by the caller. + * This flag is set to 0 by older driver. For older firmware, setting this flag has no effect. + */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_PORT \ + UINT32_C(0x4) + /* Set to 1 to indicate the flow counter IDs are included in the flow table. */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_INCL_FC \ + UINT32_C(0x8000000) + /* + * This specifies the size of flow handle entries provided by the driver + * in the flow table specified below. Only two flow handle size enums are defined. + */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_MASK \ + UINT32_C(0xc0000000) + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_SFT \ + 30 + /* The flow handle is 16bit */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_16BIT \ + (UINT32_C(0x0) << 30) + /* The flow handle is 64bit */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT \ + (UINT32_C(0x1) << 30) + #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_LAST \ + HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT + /* Specify page size of the flow table memory. */ + uint8_t page_size; + /* The page size is 4K */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4K UINT32_C(0x0) + /* The page size is 8K */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_8K UINT32_C(0x1) + /* The page size is 64K */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_64K UINT32_C(0x4) + /* The page size is 256K */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_256K UINT32_C(0x6) + /* The page size is 1M */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1M UINT32_C(0x8) + /* The page size is 2M */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_2M UINT32_C(0x9) + /* The page size is 4M */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4M UINT32_C(0xa) + /* The page size is 1G */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G UINT32_C(0x12) + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_LAST \ + HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G + /* FLow table memory indirect levels. */ + uint8_t page_level; + /* PBL pointer is physical start address. */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2) + #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LAST \ + HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2 + /* number of flows in the flow table */ + uint16_t num_flows; + /* Pointer to the PBL, or PDL depending on number of levels */ + uint64_t page_dir; +} __rte_packed; + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_cfa_flow_stats * + ***********************/ + + +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ +struct hwrm_cfa_flow_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Flow handle. */ + uint16_t num_flows; + /* Flow handle. */ + uint16_t flow_handle_0; + /* Flow handle. */ + uint16_t flow_handle_1; + /* Flow handle. */ + uint16_t flow_handle_2; + /* Flow handle. */ + uint16_t flow_handle_3; + /* Flow handle. */ + uint16_t flow_handle_4; + /* Flow handle. */ + uint16_t flow_handle_5; + /* Flow handle. */ + uint16_t flow_handle_6; + /* Flow handle. */ + uint16_t flow_handle_7; + /* Flow handle. */ + uint16_t flow_handle_8; + /* Flow handle. */ + uint16_t flow_handle_9; + uint8_t unused_0[2]; + /* Flow ID of a flow. */ + uint32_t flow_id_0; + /* Flow ID of a flow. */ + uint32_t flow_id_1; + /* Flow ID of a flow. */ + uint32_t flow_id_2; + /* Flow ID of a flow. */ + uint32_t flow_id_3; + /* Flow ID of a flow. */ + uint32_t flow_id_4; + /* Flow ID of a flow. */ + uint32_t flow_id_5; + /* Flow ID of a flow. */ + uint32_t flow_id_6; + /* Flow ID of a flow. */ + uint32_t flow_id_7; + /* Flow ID of a flow. */ + uint32_t flow_id_8; + /* Flow ID of a flow. */ + uint32_t flow_id_9; +} __rte_packed; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet_0 is 64 b */ + uint64_t packet_0; + /* packet_1 is 64 b */ + uint64_t packet_1; + /* packet_2 is 64 b */ + uint64_t packet_2; + /* packet_3 is 64 b */ + uint64_t packet_3; + /* packet_4 is 64 b */ + uint64_t packet_4; + /* packet_5 is 64 b */ + uint64_t packet_5; + /* packet_6 is 64 b */ + uint64_t packet_6; + /* packet_7 is 64 b */ + uint64_t packet_7; + /* packet_8 is 64 b */ + uint64_t packet_8; + /* packet_9 is 64 b */ + uint64_t packet_9; + /* byte_0 is 64 b */ + uint64_t byte_0; + /* byte_1 is 64 b */ + uint64_t byte_1; + /* byte_2 is 64 b */ + uint64_t byte_2; + /* byte_3 is 64 b */ + uint64_t byte_3; + /* byte_4 is 64 b */ + uint64_t byte_4; + /* byte_5 is 64 b */ + uint64_t byte_5; + /* byte_6 is 64 b */ + uint64_t byte_6; + /* byte_7 is 64 b */ + uint64_t byte_7; + /* byte_8 is 64 b */ + uint64_t byte_8; + /* byte_9 is 64 b */ + uint64_t byte_9; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************************** + * hwrm_cfa_flow_aging_timer_reset * + ***********************************/ + + +/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_timer_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[2]; + /* + * New flow timer value for the flow specified in the ext_flow_handle. + * The flow timer unit is 100ms. + */ + uint32_t flow_timer; + /* This value identifies a set of CFA data structures used for a flow. */ + uint64_t ext_flow_handle; +} __rte_packed; + +/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_timer_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_cfa_flow_aging_cfg * + ***************************/ + + +/* hwrm_cfa_flow_aging_cfg_input (size:384b/48B) */ +struct hwrm_cfa_flow_aging_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The bit field to enable per flow aging configuration. */ + uint16_t enables; + /* This bit must be '1' for the tcp flow timer field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FLOW_TIMER \ + UINT32_C(0x1) + /* This bit must be '1' for the tcp finish timer field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FIN_TIMER \ + UINT32_C(0x2) + /* This bit must be '1' for the udp flow timer field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_UDP_FLOW_TIMER \ + UINT32_C(0x4) + /* This bit must be '1' for the eem dma interval field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_DMA_INTERVAL \ + UINT32_C(0x8) + /* This bit must be '1' for the eem notice interval field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_NOTICE_INTERVAL \ + UINT32_C(0x10) + /* This bit must be '1' for the eem context memory maximum entries field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MAX_ENTRIES \ + UINT32_C(0x20) + /* This bit must be '1' for the eem context memory ID field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_ID \ + UINT32_C(0x40) + /* This bit must be '1' for the eem context memory type field to be configured */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MEM_TYPE \ + UINT32_C(0x80) + uint8_t flags; + /* Enumeration denoting the RX, TX type of the resource. */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX + /* Enumeration denoting the enable, disable eem flow aging configuration. */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM UINT32_C(0x2) + /* tx path */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_DISABLE \ + (UINT32_C(0x0) << 1) + /* rx path */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE \ + (UINT32_C(0x1) << 1) + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_LAST \ + HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE + uint8_t unused_0; + /* The flow aging timer for all TCP flows, the unit is 100 milliseconds. */ + uint32_t tcp_flow_timer; + /* The TCP finished timer for all TCP flows, the unit is 100 milliseconds. */ + uint32_t tcp_fin_timer; + /* The flow aging timer for all UDP flows, the unit is 100 milliseconds. */ + uint32_t udp_flow_timer; + /* The interval to dma eem ejection data to host memory, the unit is milliseconds. */ + uint16_t eem_dma_interval; + /* The interval to notify driver to read the eem ejection data, the unit is milliseconds. */ + uint16_t eem_notice_interval; + /* The maximum entries number in the eem context memory. */ + uint32_t eem_ctx_max_entries; + /* The context memory ID for eem flow aging. */ + uint16_t eem_ctx_id; + uint16_t eem_ctx_mem_type; + /* The content of context memory is eem ejection data, the size of each entry is 4 bytes. */ + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA \ + UINT32_C(0x0) + #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_LAST \ + HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_cfa_flow_aging_qcfg * + ****************************/ + + +/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The direction for the flow aging configuration, 1 is rx path, 2 is tx path. */ + uint8_t flags; + /* Enumeration denoting the RX, TX type of the resource. */ + #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */ +struct hwrm_cfa_flow_aging_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The current flow aging timer for all TCP flows, the unit is 100 millisecond. */ + uint32_t tcp_flow_timer; + /* The current TCP finished timer for all TCP flows, the unit is 100 millisecond. */ + uint32_t tcp_fin_timer; + /* The current flow aging timer for all UDP flows, the unit is 100 millisecond. */ + uint32_t udp_flow_timer; + /* The interval to dma eem ejection data to host memory, the unit is milliseconds. */ + uint16_t eem_dma_interval; + /* The interval to notify driver to read the eem ejection data, the unit is milliseconds. */ + uint16_t eem_notice_interval; + /* The maximum entries number in the eem context memory. */ + uint32_t eem_ctx_max_entries; + /* The context memory ID for eem flow aging. */ + uint16_t eem_ctx_id; + /* The context memory type for eem flow aging. */ + uint16_t eem_ctx_mem_type; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_cfa_flow_aging_qcaps * + *****************************/ + + +/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The direction for the flow aging configuration, 1 is rx path, 2 is tx path. */ + uint8_t flags; + /* Enumeration denoting the RX, TX type of the resource. */ + #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_flow_aging_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The maximum flow aging timer for all TCP flows, the unit is 100 millisecond. */ + uint32_t max_tcp_flow_timer; + /* The maximum TCP finished timer for all TCP flows, the unit is 100 millisecond. */ + uint32_t max_tcp_fin_timer; + /* The maximum flow aging timer for all UDP flows, the unit is 100 millisecond. */ + uint32_t max_udp_flow_timer; + /* The maximum aging flows that HW can support. */ + uint32_t max_aging_flows; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************************** + * hwrm_cfa_tcp_flag_process_qcfg * + **********************************/ + + +/* hwrm_cfa_tcp_flag_process_qcfg_input (size:128b/16B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The port 0 RX mirror action record ID. */ + uint16_t rx_ar_id_port0; + /* The port 1 RX mirror action record ID. */ + uint16_t rx_ar_id_port1; + /* The port 0 RX action record ID for TX TCP flag packets from loopback path. */ + uint16_t tx_ar_id_port0; + /* The port 1 RX action record ID for TX TCP flag packets from loopback path. */ + uint16_t tx_ar_id_port1; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_cfa_pair_info * + **********************/ + + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* If this flag is set, lookup by name else lookup by index. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1) + /* If this flag is set, lookup by PF id and VF id. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2) + /* Pair table index. */ + uint16_t pair_index; + /* Pair pf index. */ + uint8_t pair_pfid; + /* Pair vf index. */ + uint8_t pair_vfid; + /* Pair name (32 byte string). */ + char pair_name[32]; +} __rte_packed; + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Pair table index. */ + uint16_t next_pair_index; + /* Pair member a's fid. */ + uint16_t a_fid; + /* Logical host number. */ + uint8_t host_a_index; + /* Logical PF number. */ + uint8_t pf_a_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_a_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_a; + /* Tx CFA action. */ + uint16_t tx_cfa_action_a; + /* Pair member b's fid. */ + uint16_t b_fid; + /* Logical host number. */ + uint8_t host_b_index; + /* Logical PF number. */ + uint8_t pf_b_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_b_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_b; + /* Tx CFA action. */ + uint16_t tx_cfa_action_b; + /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */ + uint8_t pair_mode; + /* Pair between VF on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0) + /* Pair between REP on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1) + /* Pair between REP on local host with REP on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2) + /* Pair for the proxy interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3) + /* Pair for the PF interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR + /* Pair state. */ + uint8_t pair_state; + /* Pair has been allocated */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1) + /* Both pair members are active */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE + /* Pair name (32 byte string). */ + char pair_name[32]; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************************** + * hwrm_cfa_redirect_query_tunnel_type * + ***************************************/ + + +/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */ +struct hwrm_cfa_redirect_query_tunnel_type_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* The source function id. */ + uint16_t src_fid; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */ +struct hwrm_cfa_redirect_query_tunnel_type_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Tunnel Mask. */ + uint32_t tunnel_mask; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NONTUNNEL \ + UINT32_C(0x1) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN \ + UINT32_C(0x2) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NVGRE \ + UINT32_C(0x4) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2GRE \ + UINT32_C(0x8) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPIP \ + UINT32_C(0x10) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_GENEVE \ + UINT32_C(0x20) + /* Multi-Protocol Label Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_MPLS \ + UINT32_C(0x40) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_STT \ + UINT32_C(0x80) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE \ + UINT32_C(0x100) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_V4 \ + UINT32_C(0x200) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE_V1 \ + UINT32_C(0x400) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_ANYTUNNEL \ + UINT32_C(0x800) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2_ETYPE \ + UINT32_C(0x1000) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_GPE_V6 \ + UINT32_C(0x2000) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_cfa_ctx_mem_rgtr * + *************************/ + + +/* hwrm_cfa_ctx_mem_rgtr_input (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint16_t flags; + /* Counter PBL indirect levels. */ + uint8_t page_level; + /* PBL pointer is physical start address. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2) + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LAST \ + HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 + /* Page size. */ + uint8_t page_size; + /* 4KB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0) + /* 8KB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1) + /* 64KB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4) + /* 256KB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6) + /* 1MB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8) + /* 2MB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9) + /* 4MB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa) + /* 1GB page size. */ + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12) + #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_LAST \ + HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G + uint32_t unused_0; + /* Pointer to the PBL, or PDL depending on number of levels */ + uint64_t page_dir; +} __rte_packed; + +/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Id/Handle to the recently register context memory. This handle is passed + * to the CFA feature. + */ + uint16_t ctx_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_cfa_ctx_mem_unrgtr * + ***************************/ + + +/* hwrm_cfa_ctx_mem_unrgtr_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Id/Handle to the recently register context memory. This handle is passed + * to the CFA feature. + */ + uint16_t ctx_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_cfa_ctx_mem_qctx * + *************************/ + + +/* hwrm_cfa_ctx_mem_qctx_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_qctx_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Id/Handle to the recently register context memory. This handle is passed + * to the CFA feature. + */ + uint16_t ctx_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_qctx_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t flags; + /* Counter PBL indirect levels. */ + uint8_t page_level; + /* PBL pointer is physical start address. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2) + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LAST \ + HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2 + /* Page size. */ + uint8_t page_size; + /* 4KB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4K UINT32_C(0x0) + /* 8KB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_8K UINT32_C(0x1) + /* 64KB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_64K UINT32_C(0x4) + /* 256KB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_256K UINT32_C(0x6) + /* 1MB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1M UINT32_C(0x8) + /* 2MB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_2M UINT32_C(0x9) + /* 4MB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4M UINT32_C(0xa) + /* 1GB page size. */ + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G UINT32_C(0x12) + #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_LAST \ + HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G + uint8_t unused_0[4]; + /* Pointer to the PBL, or PDL depending on number of levels */ + uint64_t page_dir; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_cfa_ctx_mem_qcaps * + **************************/ + + +/* hwrm_cfa_ctx_mem_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Indicates the maximum number of context memory which can be registered. */ + uint16_t max_entries; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_cfa_eem_qcaps * + **********************/ + + +/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set to 1, indicates the configuration will apply to TX flows + * which are to be offloaded. + * Note if this bit is set then the path_rx bit can't be set. + */ + #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x1) + /* + * When set to 1, indicates the configuration will apply to RX flows + * which are to be offloaded. + * Note if this bit is set then the path_tx bit can't be set. + */ + #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x2) + /* When set to 1, all offloaded flows will be sent to EEM. */ + #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x4) + uint32_t unused_0; +} __rte_packed; + +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_eem_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When set to 1, indicates the configuration will apply to TX flows + * which are to be offloaded. + * Note if this bit is set then the path_rx bit can't be set. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_TX \ + UINT32_C(0x1) + /* + * When set to 1, indicates the configuration will apply to RX flows + * which are to be offloaded. + * Note if this bit is set then the path_tx bit can't be set. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_RX \ + UINT32_C(0x2) + /* + * When set to 1, indicates the the FW supports the Centralized + * Memory Model. The concept designates one entity for the + * memory allocation while all others ‘subscribe’ to it. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED \ + UINT32_C(0x4) + /* + * When set to 1, indicates the the FW supports the Detached + * Centralized Memory Model. The memory is allocated and managed + * as a separate entity. All PFs and VFs will be granted direct + * or semi-direct access to the allocated memory while none of + * which can interfere with the management of the memory. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED \ + UINT32_C(0x8) + uint32_t unused_0; + uint32_t supported; + /* + * If set to 1, then EEM KEY0 table is supported using crc32 hash. + * If set to 0, EEM KEY0 table is not supported. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE \ + UINT32_C(0x1) + /* + * If set to 1, then EEM KEY1 table is supported using lookup3 hash. + * If set to 0, EEM KEY1 table is not supported. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE \ + UINT32_C(0x2) + /* + * If set to 1, then EEM External Record table is supported. + * If set to 0, EEM External Record table is not supported. + * (This table includes action record, EFC pointers, encap pointers) + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE \ + UINT32_C(0x4) + /* + * If set to 1, then EEM External Flow Counters table is supported. + * If set to 0, EEM External Flow Counters table is not supported. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE \ + UINT32_C(0x8) + /* + * If set to 1, then FID table used for implicit flow flush is supported. + * If set to 0, then FID table used for implicit flow flush is not supported. + */ + #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE \ + UINT32_C(0x10) + /* + * The maximum number of entries supported by EEM. When configuring the host memory + * the number of numbers of entries that can supported are - + * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M, 128M entries. + * Any value that are not these values, the FW will round down to the closest support + * number of entries. + */ + uint32_t max_entries_supported; + /* The entry size in bytes of each entry in the EEM KEY0/KEY1 tables. */ + uint16_t key_entry_size; + /* The entry size in bytes of each entry in the EEM RECORD tables. */ + uint16_t record_entry_size; + /* The entry size in bytes of each entry in the EEM EFC tables. */ + uint16_t efc_entry_size; + /* The FID size in bytes of each entry in the EEM FID tables. */ + uint16_t fid_entry_size; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_cfa_eem_cfg * + ********************/ + + +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ +struct hwrm_cfa_eem_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set to 1, indicates the configuration will apply to TX flows + * which are to be offloaded. + * Note if this bit is set then the path_rx bit can't be set. + */ + #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x1) + /* + * When set to 1, indicates the configuration will apply to RX flows + * which are to be offloaded. + * Note if this bit is set then the path_tx bit can't be set. + */ + #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x2) + /* When set to 1, all offloaded flows will be sent to EEM. */ + #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x4) + /* When set to 1, secondary, 0 means primary. */ + #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_SECONDARY_PF \ + UINT32_C(0x8) + /* + * Group_id which used by Firmware to identify memory pools belonging + * to certain group. + */ + uint16_t group_id; + uint16_t unused_0; + /* + * Configured EEM with the given number of entries. All the EEM tables KEY0, KEY1, + * RECORD, EFC all have the same number of entries and all tables will be configured + * using this value. Current minimum value is 32k. Current maximum value is 128M. + */ + uint32_t num_entries; + uint32_t unused_1; + /* Configured EEM with the given context if for KEY0 table. */ + uint16_t key0_ctx_id; + /* Configured EEM with the given context if for KEY1 table. */ + uint16_t key1_ctx_id; + /* Configured EEM with the given context if for RECORD table. */ + uint16_t record_ctx_id; + /* Configured EEM with the given context if for EFC table. */ + uint16_t efc_ctx_id; + /* Configured EEM with the given context if for EFC table. */ + uint16_t fid_ctx_id; + uint16_t unused_2; + uint32_t unused_3; +} __rte_packed; + +/* hwrm_cfa_eem_cfg_output (size:128b/16B) */ +struct hwrm_cfa_eem_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_cfa_eem_qcfg * + *********************/ + + +/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* When set to 1, indicates the configuration is the TX flow. */ + #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x1) + /* When set to 1, indicates the configuration is the RX flow. */ + #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x2) + uint32_t unused_0; +} __rte_packed; + +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ +struct hwrm_cfa_eem_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* When set to 1, indicates the configuration is the TX flow. */ + #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_TX \ + UINT32_C(0x1) + /* When set to 1, indicates the configuration is the RX flow. */ + #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_RX \ + UINT32_C(0x2) + /* When set to 1, all offloaded flows will be sent to EEM. */ + #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x4) + /* The number of entries the FW has configured for EEM. */ + uint32_t num_entries; + /* Configured EEM with the given context if for KEY0 table. */ + uint16_t key0_ctx_id; + /* Configured EEM with the given context if for KEY1 table. */ + uint16_t key1_ctx_id; + /* Configured EEM with the given context if for RECORD table. */ + uint16_t record_ctx_id; + /* Configured EEM with the given context if for EFC table. */ + uint16_t efc_ctx_id; + /* Configured EEM with the given context if for EFC table. */ + uint16_t fid_ctx_id; + uint8_t unused_2[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************* + * hwrm_cfa_eem_op * + *******************/ + + +/* hwrm_cfa_eem_op_input (size:192b/24B) */ +struct hwrm_cfa_eem_op_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set to 1, indicates the host memory which is passed will be + * used for the TX flow offload function specified in fid. + * Note if this bit is set then the path_rx bit can't be set. + */ + #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_TX UINT32_C(0x1) + /* + * When set to 1, indicates the host memory which is passed will be + * used for the RX flow offload function specified in fid. + * Note if this bit is set then the path_tx bit can't be set. + */ + #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_RX UINT32_C(0x2) + uint16_t unused_0; + /* The number of EEM key table entries to be configured. */ + uint16_t op; + /* This value is reserved and should not be used. */ + #define HWRM_CFA_EEM_OP_INPUT_OP_RESERVED UINT32_C(0x0) + /* + * To properly stop EEM and ensure there are no DMA's, the caller + * must disable EEM for the given PF, using this call. This will + * safely disable EEM and ensure that all DMA'ed to the + * keys/records/efc have been completed. + */ + #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_DISABLE UINT32_C(0x1) + /* + * Once the EEM host memory has been configured, EEM options have + * been configured. Then the caller should enable EEM for the given + * PF. Note once this call has been made, then the EEM mechanism + * will be active and DMA's will occur as packets are processed. + */ + #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_ENABLE UINT32_C(0x2) + /* + * Clear EEM settings for the given PF so that the register values + * are reset back to there initial state. + */ + #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP UINT32_C(0x3) + #define HWRM_CFA_EEM_OP_INPUT_OP_LAST \ + HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP +} __rte_packed; + +/* hwrm_cfa_eem_op_output (size:128b/16B) */ +struct hwrm_cfa_eem_op_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************************** + * hwrm_cfa_adv_flow_mgnt_qcaps * + ********************************/ + + +/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t unused_0[4]; +} __rte_packed; + +/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * Value of 1 to indicate firmware support 16-bit flow handle. + * Value of 0 to indicate firmware not support 16-bit flow handle. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_16BIT_SUPPORTED \ + UINT32_C(0x1) + /* + * Value of 1 to indicate firmware support 64-bit flow handle. + * Value of 0 to indicate firmware not support 64-bit flow handle. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_64BIT_SUPPORTED \ + UINT32_C(0x2) + /* + * Value of 1 to indicate firmware support flow batch delete operation through + * HWRM_CFA_FLOW_FLUSH command. + * Value of 0 to indicate that the firmware does not support flow batch delete + * operation. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_BATCH_DELETE_SUPPORTED \ + UINT32_C(0x4) + /* + * Value of 1 to indicate that the firmware support flow reset all operation through + * HWRM_CFA_FLOW_FLUSH command. + * Value of 0 indicates firmware does not support flow reset all operation. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_RESET_ALL_SUPPORTED \ + UINT32_C(0x8) + /* + * Value of 1 to indicate that firmware supports use of FID as dest_id in + * HWRM_CFA_NTUPLE_ALLOC/CFG commands. + * Value of 0 indicates firmware does not support use of FID as dest_id. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED \ + UINT32_C(0x10) + /* + * Value of 1 to indicate that firmware supports TX EEM flows. + * Value of 0 indicates firmware does not support TX EEM flows. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_TX_EEM_FLOW_SUPPORTED \ + UINT32_C(0x20) + /* + * Value of 1 to indicate that firmware supports RX EEM flows. + * Value of 0 indicates firmware does not support RX EEM flows. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RX_EEM_FLOW_SUPPORTED \ + UINT32_C(0x40) + /* + * Value of 1 to indicate that firmware supports the dynamic allocation of an + * on-chip flow counter which can be used for EEM flows. + * Value of 0 indicates firmware does not support the dynamic allocation of an + * on-chip flow counter. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED \ + UINT32_C(0x80) + /* + * Value of 1 to indicate that firmware supports setting of + * rfs_ring_tbl_idx in HWRM_CFA_NTUPLE_ALLOC command. + * Value of 0 indicates firmware does not support rfs_ring_tbl_idx. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_SUPPORTED \ + UINT32_C(0x100) + /* + * Value of 1 to indicate that firmware supports untagged matching + * criteria on HWRM_CFA_L2_FILTER_ALLOC command. Value of 0 + * indicates firmware does not support untagged matching. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_UNTAGGED_VLAN_SUPPORTED \ + UINT32_C(0x200) + /* + * Value of 1 to indicate that firmware supports XDP filter. Value + * of 0 indicates firmware does not support XDP filter. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_XDP_SUPPORTED \ + UINT32_C(0x400) + /* + * Value of 1 to indicate that the firmware support L2 header source + * fields matching criteria on HWRM_CFA_L2_FILTER_ALLOC command. + * Value of 0 indicates firmware does not support L2 header source + * fields matching. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED \ + UINT32_C(0x800) + /* + * If set to 1, firmware is capable of supporting ARP ethertype as + * matching criteria for HWRM_CFA_NTUPLE_FILTER_ALLOC command on the + * RX direction. By default, this flag should be 0 for older version + * of firmware. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED \ + UINT32_C(0x1000) + /* + * Value of 1 to indicate that firmware supports setting of + * rfs_ring_tbl_idx in dst_id field of the HWRM_CFA_NTUPLE_ALLOC + * command. Value of 0 indicates firmware does not support + * rfs_ring_tbl_idx in dst_id field. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED \ + UINT32_C(0x2000) + /* + * If set to 1, firmware is capable of supporting IPv4/IPv6 as + * ethertype in HWRM_CFA_NTUPLE_FILTER_ALLOC command on the RX + * direction. By default, this flag should be 0 for older version + * of firmware. + */ + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED \ + UINT32_C(0x4000) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_cfa_tflib * + ******************/ + + +/* hwrm_cfa_tflib_input (size:1024b/128B) */ +struct hwrm_cfa_tflib_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* TFLIB message type. */ + uint16_t tf_type; + /* TFLIB message subtype. */ + uint16_t tf_subtype; + /* unused. */ + uint8_t unused0[4]; + /* TFLIB request data. */ + uint32_t tf_req[26]; +} __rte_packed; + +/* hwrm_cfa_tflib_output (size:5632b/704B) */ +struct hwrm_cfa_tflib_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* TFLIB message type. */ + uint16_t tf_type; + /* TFLIB message subtype. */ + uint16_t tf_subtype; + /* TFLIB response code */ + uint32_t tf_resp_code; + /* TFLIB response data. */ + uint32_t tf_resp[170]; + /* unused. */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********** + * hwrm_tf * + ***********/ + + +/* hwrm_tf_input (size:1024b/128B) */ +struct hwrm_tf_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* TF message type. */ + uint16_t type; + /* TF message subtype. */ + uint16_t subtype; + /* unused. */ + uint8_t unused0[4]; + /* TF request data. */ + uint32_t req[26]; +} __rte_packed; + +/* hwrm_tf_output (size:5632b/704B) */ +struct hwrm_tf_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* TF message type. */ + uint16_t type; + /* TF message subtype. */ + uint16_t subtype; + /* TF response code */ + uint32_t resp_code; + /* TF response data. */ + uint32_t resp[170]; + /* unused. */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_tf_version_get * + ***********************/ + + +/* hwrm_tf_version_get_input (size:128b/16B) */ +struct hwrm_tf_version_get_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_tf_version_get_output (size:128b/16B) */ +struct hwrm_tf_version_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Version Major number. */ + uint8_t major; + /* Version Minor number. */ + uint8_t minor; + /* Version Update number. */ + uint8_t update; + /* unused. */ + uint8_t unused0[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_tf_session_open * + ************************/ + + +/* hwrm_tf_session_open_input (size:640b/80B) */ +struct hwrm_tf_session_open_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Name of the session. */ + uint8_t session_name[64]; +} __rte_packed; + +/* hwrm_tf_session_open_output (size:128b/16B) */ +struct hwrm_tf_session_open_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Unique session identifier for the session created by the + * firmware. It includes PCIe bus info to distinguish the PF + * and session info to identify the associated TruFlow + * session. + */ + uint32_t fw_session_id; + /* unused. */ + uint8_t unused0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_tf_session_attach * + **************************/ + + +/* hwrm_tf_session_attach_input (size:704b/88B) */ +struct hwrm_tf_session_attach_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Unique session identifier for the session that the attach + * request want to attach to. This value originates from the + * shared session memory that the attach request opened by + * way of the 'attach name' that was passed in to the core + * attach API. + * The fw_session_id of the attach session includes PCIe bus + * info to distinguish the PF and session info to identify + * the associated TruFlow session. + */ + uint32_t attach_fw_session_id; + /* unused. */ + uint32_t unused0; + /* Name of the session it self. */ + uint8_t session_name[64]; +} __rte_packed; + +/* hwrm_tf_session_attach_output (size:128b/16B) */ +struct hwrm_tf_session_attach_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Unique session identifier for the session created by the + * firmware. It includes PCIe bus info to distinguish the PF + * and session info to identify the associated TruFlow + * session. This fw_session_id is unique to the attach + * request. + */ + uint32_t fw_session_id; + /* unused. */ + uint8_t unused0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_tf_session_close * + *************************/ + + +/* hwrm_tf_session_close_input (size:192b/24B) */ +struct hwrm_tf_session_close_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* unused. */ + uint8_t unused0[4]; +} __rte_packed; + +/* hwrm_tf_session_close_output (size:128b/16B) */ +struct hwrm_tf_session_close_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_tf_session_qcfg * + ************************/ + + +/* hwrm_tf_session_qcfg_input (size:192b/24B) */ +struct hwrm_tf_session_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* unused. */ + uint8_t unused0[4]; +} __rte_packed; + +/* hwrm_tf_session_qcfg_output (size:128b/16B) */ +struct hwrm_tf_session_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* RX action control settings flags. */ + uint8_t rx_act_flags; + /* + * A value of 1 in this field indicates that Global Flow ID + * reporting into cfa_code and cfa_metadata is enabled. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_GFID_EN \ + UINT32_C(0x1) + /* + * A value of 1 in this field indicates that both inner and outer + * are stripped and inner tag is passed. + * Enabled. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_VTAG_DLT_BOTH \ + UINT32_C(0x2) + /* + * A value of 1 in this field indicates that the re-use of + * existing tunnel L2 header SMAC is enabled for + * Non-tunnel L2, L2-L3 and IP-IP tunnel. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_TECT_SMAC_OVR_RUTNSL2 \ + UINT32_C(0x4) + /* TX Action control settings flags. */ + uint8_t tx_act_flags; + /* Disabled. */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_ABCR_VEB_EN \ + UINT32_C(0x1) + /* + * When set to 1 any GRE tunnels will include the + * optional Key field. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_GRE_SET_K \ + UINT32_C(0x2) + /* + * When set to 1, for GRE tunnels, the IPV6 Traffic Class (TC) + * field of the outer header is inherited from the inner header + * (if present) or the fixed value as taken from the encap + * record. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV6_TC_IH \ + UINT32_C(0x4) + /* + * When set to 1, for GRE tunnels, the IPV4 Type Of Service (TOS) + * field of the outer header is inherited from the inner header + * (if present) or the fixed value as taken from the encap record. + */ + #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV4_TOS_IH \ + UINT32_C(0x8) + /* unused. */ + uint8_t unused0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_tf_session_resc_qcaps * + ******************************/ + + +/* hwrm_tf_session_resc_qcaps_input (size:256b/32B) */ +struct hwrm_tf_session_resc_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX + /* + * Defines the size, in bytes, of the provided qcaps_addr + * buffer. The size should be set to the Resource Manager + * provided max qcaps value that is device specific. This is + * the max size possible. + */ + uint16_t size; + /* + * This is the DMA address for the qcaps output data + * array. Array is of tf_rm_cap type and is device specific. + */ + uint64_t qcaps_addr; +} __rte_packed; + +/* hwrm_tf_session_resc_qcaps_output (size:192b/24B) */ +struct hwrm_tf_session_resc_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Control flags. */ + uint32_t flags; + /* Session reservation strategy. */ + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_MASK \ + UINT32_C(0x3) + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_SFT \ + 0 + /* Static partitioning. */ + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_STATIC \ + UINT32_C(0x0) + /* Strategy 1. */ + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_1 \ + UINT32_C(0x1) + /* Strategy 2. */ + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_2 \ + UINT32_C(0x2) + /* Strategy 3. */ + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3 \ + UINT32_C(0x3) + #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_LAST \ + HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3 + /* + * Size of the returned tf_rm_cap data array. The value + * cannot exceed the size defined by the input msg. The data + * array is returned using the qcaps_addr specified DMA + * address also provided by the input msg. + */ + uint16_t size; + /* unused. */ + uint16_t unused0; + /* unused. */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_tf_session_resc_alloc * + ******************************/ + + +/* hwrm_tf_session_resc_alloc_input (size:256b/32B) */ +struct hwrm_tf_session_resc_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX + /* + * Defines the size, in bytes, of the provided num_addr + * buffer. + */ + uint16_t size; + /* + * This is the DMA address for the num input data array + * buffer. Array is of tf_rm_num type. Size of the buffer is + * provided by the 'size' field in this message. + */ + uint64_t num_addr; +} __rte_packed; + +/* hwrm_tf_session_resc_alloc_output (size:128b/16B) */ +struct hwrm_tf_session_resc_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_tf_session_resc_free * + *****************************/ + + +/* hwrm_tf_session_resc_free_input (size:256b/32B) */ +struct hwrm_tf_session_resc_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX + /* + * Defines the size, in bytes, of the provided free_addr + * buffer. + */ + uint16_t size; + /* + * This is the DMA address for the free input data array + * buffer. Array of tf_rm_res type. Size of the buffer is + * provided by the 'size field of this message. + */ + uint64_t free_addr; +} __rte_packed; + +/* hwrm_tf_session_resc_free_output (size:128b/16B) */ +struct hwrm_tf_session_resc_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_tf_session_resc_flush * + ******************************/ + + +/* hwrm_tf_session_resc_flush_input (size:256b/32B) */ +struct hwrm_tf_session_resc_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX + /* + * Defines the size, in bytes, of the provided flush_addr + * buffer. + */ + uint16_t size; + /* + * This is the DMA address for the flush input data array + * buffer. Array of tf_rm_res type. Size of the buffer is + * provided by the 'size' field in this message. + */ + uint64_t flush_addr; +} __rte_packed; + +/* hwrm_tf_session_resc_flush_output (size:128b/16B) */ +struct hwrm_tf_session_resc_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field is + * written last. + */ + uint8_t valid; +} __rte_packed; + +/* TruFlow RM capability of a resource. */ +/* tf_rm_cap (size:64b/8B) */ +struct tf_rm_cap { + /* + * Type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Minimum value. */ + uint16_t min; + /* Maximum value. */ + uint16_t max; +} __rte_packed; + +/* TruFlow RM number of a resource. */ +/* tf_rm_num (size:64b/8B) */ +struct tf_rm_num { + /* + * Type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Number of resources. */ + uint32_t num; +} __rte_packed; + +/* TruFlow RM reservation information. */ +/* tf_rm_res (size:64b/8B) */ +struct tf_rm_res { + /* + * Type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Start offset. */ + uint16_t start; + /* Number of resources. */ + uint16_t stride; +} __rte_packed; + +/************************ + * hwrm_tf_tbl_type_get * + ************************/ + + +/* hwrm_tf_tbl_type_get_input (size:256b/32B) */ +struct hwrm_tf_tbl_type_get_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX + /* unused. */ + uint8_t unused0[2]; + /* + * Type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Index of the type to retrieve. */ + uint32_t index; +} __rte_packed; + +/* hwrm_tf_tbl_type_get_output (size:1216b/152B) */ +struct hwrm_tf_tbl_type_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Response code. */ + uint32_t resp_code; + /* Response size. */ + uint16_t size; + /* unused */ + uint16_t unused0; + /* Response data. */ + uint8_t data[128]; + /* unused */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_tf_tbl_type_set * + ************************/ + + +/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */ +struct hwrm_tf_tbl_type_set_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX + /* unused. */ + uint8_t unused0[2]; + /* + * Type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Index of the type to retrieve. */ + uint32_t index; + /* Size of the data to set. */ + uint16_t size; + /* unused */ + uint8_t unused1[6]; + /* Data to be set. */ + uint8_t data[88]; +} __rte_packed; + +/* hwrm_tf_tbl_type_set_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_set_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal + * processor, the order of writes has to be such that this field + * is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_tf_ctxt_mem_rgtr * + *************************/ + + +/* hwrm_tf_ctxt_mem_rgtr_input (size:256b/32B) */ +struct hwrm_tf_ctxt_mem_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Control flags. */ + uint16_t flags; + /* Counter PBL indirect levels. */ + uint8_t page_level; + /* PBL pointer is physical start address. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1) + /* + * PBL pointer points to PDE table with each entry pointing + * to PTE tables. + */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2) + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LAST \ + HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 + /* Page size. */ + uint8_t page_size; + /* 4KB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0) + /* 8KB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1) + /* 64KB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4) + /* 256KB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6) + /* 1MB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8) + /* 2MB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9) + /* 4MB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa) + /* 1GB page size. */ + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12) + #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_LAST \ + HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G + /* unused. */ + uint32_t unused0; + /* Pointer to the PBL, or PDL depending on number of levels */ + uint64_t page_dir; +} __rte_packed; + +/* hwrm_tf_ctxt_mem_rgtr_output (size:128b/16B) */ +struct hwrm_tf_ctxt_mem_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Id/Handle to the recently register context memory. This + * handle is passed to the TF session. + */ + uint16_t ctx_id; + /* unused. */ + uint8_t unused0[5]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_tf_ctxt_mem_unrgtr * + ***************************/ + + +/* hwrm_tf_ctxt_mem_unrgtr_input (size:192b/24B) */ +struct hwrm_tf_ctxt_mem_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Id/Handle to the recently register context memory. This + * handle is passed to the TF session. + */ + uint16_t ctx_id; + /* unused. */ + uint8_t unused0[6]; +} __rte_packed; + +/* hwrm_tf_ctxt_mem_unrgtr_output (size:128b/16B) */ +struct hwrm_tf_ctxt_mem_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_tf_ext_em_qcaps * + ************************/ + + +/* hwrm_tf_ext_em_qcaps_input (size:192b/24B) */ +struct hwrm_tf_ext_em_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR \ + UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX \ + UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX \ + UINT32_C(0x1) + #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX + /* When set to 1, all offloaded flows will be sent to EXT EM. */ + #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x2) + /* unused. */ + uint32_t unused0; +} __rte_packed; + +/* hwrm_tf_ext_em_qcaps_output (size:320b/40B) */ +struct hwrm_tf_ext_em_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When set to 1, indicates the the FW supports the Centralized + * Memory Model. The concept designates one entity for the + * memory allocation while all others ‘subscribe’ to it. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED \ + UINT32_C(0x1) + /* + * When set to 1, indicates the the FW supports the Detached + * Centralized Memory Model. The memory is allocated and managed + * as a separate entity. All PFs and VFs will be granted direct + * or semi-direct access to the allocated memory while none of + * which can interfere with the management of the memory. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED \ + UINT32_C(0x2) + /* unused. */ + uint32_t unused0; + /* Support flags. */ + uint32_t supported; + /* + * If set to 1, then EXT EM KEY0 table is supported using + * crc32 hash. + * If set to 0, EXT EM KEY0 table is not supported. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE \ + UINT32_C(0x1) + /* + * If set to 1, then EXT EM KEY1 table is supported using + * lookup3 hash. + * If set to 0, EXT EM KEY1 table is not supported. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE \ + UINT32_C(0x2) + /* + * If set to 1, then EXT EM External Record table is supported. + * If set to 0, EXT EM External Record table is not + * supported. (This table includes action record, EFC + * pointers, encap pointers) + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE \ + UINT32_C(0x4) + /* + * If set to 1, then EXT EM External Flow Counters table is + * supported. + * If set to 0, EXT EM External Flow Counters table is not + * supported. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE \ + UINT32_C(0x8) + /* + * If set to 1, then FID table used for implicit flow flush + * is supported. + * If set to 0, then FID table used for implicit flow flush + * is not supported. + */ + #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE \ + UINT32_C(0x10) + /* + * The maximum number of entries supported by EXT EM. When + * configuring the host memory the number of numbers of + * entries that can supported are - + * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M, + * 128M entries. + * Any value that are not these values, the FW will round + * down to the closest support number of entries. + */ + uint32_t max_entries_supported; + /* + * The entry size in bytes of each entry in the EXT EM + * KEY0/KEY1 tables. + */ + uint16_t key_entry_size; + /* + * The entry size in bytes of each entry in the EXT EM RECORD + * tables. + */ + uint16_t record_entry_size; + /* The entry size in bytes of each entry in the EXT EM EFC tables. */ + uint16_t efc_entry_size; + /* The FID size in bytes of each entry in the EXT EM FID tables. */ + uint16_t fid_entry_size; + /* unused. */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_tf_ext_em_op * + *********************/ + + +/* hwrm_tf_ext_em_op_input (size:192b/24B) */ +struct hwrm_tf_ext_em_op_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Control flags. */ + uint16_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX + /* unused. */ + uint16_t unused0; + /* The number of EXT EM key table entries to be configured. */ + uint16_t op; + /* This value is reserved and should not be used. */ + #define HWRM_TF_EXT_EM_OP_INPUT_OP_RESERVED UINT32_C(0x0) + /* + * To properly stop EXT EM and ensure there are no DMA's, + * the caller must disable EXT EM for the given PF, using + * this call. This will safely disable EXT EM and ensure + * that all DMA'ed to the keys/records/efc have been + * completed. + */ + #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE UINT32_C(0x1) + /* + * Once the EXT EM host memory has been configured, EXT EM + * options have been configured. Then the caller should + * enable EXT EM for the given PF. Note once this call has + * been made, then the EXT EM mechanism will be active and + * DMA's will occur as packets are processed. + */ + #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE UINT32_C(0x2) + /* + * Clear EXT EM settings for the given PF so that the + * register values are reset back to their initial state. + */ + #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP UINT32_C(0x3) + #define HWRM_TF_EXT_EM_OP_INPUT_OP_LAST \ + HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP + /* unused. */ + uint16_t unused1; +} __rte_packed; + +/* hwrm_tf_ext_em_op_output (size:128b/16B) */ +struct hwrm_tf_ext_em_op_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_tf_ext_em_cfg * + **********************/ + + +/* hwrm_tf_ext_em_cfg_input (size:384b/48B) */ +struct hwrm_tf_ext_em_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR \ + UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX \ + UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX \ + UINT32_C(0x1) + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX + /* When set to 1, all offloaded flows will be sent to EXT EM. */ + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x2) + /* When set to 1, secondary, 0 means primary. */ + #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_SECONDARY_PF \ + UINT32_C(0x4) + /* + * Group_id which used by Firmware to identify memory pools belonging + * to certain group. + */ + uint16_t group_id; + /* + * Dynamically reconfigure EEM pending cache every 1/10th of second. + * If set to 0 it will disable the EEM HW flush of the pending cache. + */ + uint8_t flush_interval; + /* unused. */ + uint8_t unused0; + /* + * Configured EXT EM with the given number of entries. All + * the EXT EM tables KEY0, KEY1, RECORD, EFC all have the + * same number of entries and all tables will be configured + * using this value. Current minimum value is 32k. Current + * maximum value is 128M. + */ + uint32_t num_entries; + /* unused. */ + uint32_t unused1; + /* Configured EXT EM with the given context if for KEY0 table. */ + uint16_t key0_ctx_id; + /* Configured EXT EM with the given context if for KEY1 table. */ + uint16_t key1_ctx_id; + /* Configured EXT EM with the given context if for RECORD table. */ + uint16_t record_ctx_id; + /* Configured EXT EM with the given context if for EFC table. */ + uint16_t efc_ctx_id; + /* Configured EXT EM with the given context if for EFC table. */ + uint16_t fid_ctx_id; + /* unused. */ + uint16_t unused2; + /* unused. */ + uint32_t unused3; +} __rte_packed; + +/* hwrm_tf_ext_em_cfg_output (size:128b/16B) */ +struct hwrm_tf_ext_em_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_tf_ext_em_qcfg * + ***********************/ + + +/* hwrm_tf_ext_em_qcfg_input (size:192b/24B) */ +struct hwrm_tf_ext_em_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX + /* unused. */ + uint32_t unused0; +} __rte_packed; + +/* hwrm_tf_ext_em_qcfg_output (size:256b/32B) */ +struct hwrm_tf_ext_em_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR \ + UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_RX \ + UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX \ + UINT32_C(0x1) + #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_LAST \ + HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX + /* When set to 1, all offloaded flows will be sent to EXT EM. */ + #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD \ + UINT32_C(0x2) + /* The number of entries the FW has configured for EXT EM. */ + uint32_t num_entries; + /* Configured EXT EM with the given context if for KEY0 table. */ + uint16_t key0_ctx_id; + /* Configured EXT EM with the given context if for KEY1 table. */ + uint16_t key1_ctx_id; + /* Configured EXT EM with the given context if for RECORD table. */ + uint16_t record_ctx_id; + /* Configured EXT EM with the given context if for EFC table. */ + uint16_t efc_ctx_id; + /* Configured EXT EM with the given context if for EFC table. */ + uint16_t fid_ctx_id; + /* unused. */ + uint8_t unused0[5]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_tf_tcam_set * + ********************/ + + +/* hwrm_tf_tcam_set_input (size:1024b/128B) */ +struct hwrm_tf_tcam_set_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX + /* + * Indicate device data is being sent via DMA, the device + * data is packing does not change. + */ + #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA UINT32_C(0x2) + /* + * TCAM type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Index of TCAM entry. */ + uint16_t idx; + /* Number of bytes in the TCAM key. */ + uint8_t key_size; + /* Number of bytes in the TCAM result. */ + uint8_t result_size; + /* + * Offset from which the mask bytes start in the device data + * array, key offset is always 0. + */ + uint8_t mask_offset; + /* Offset from which the result bytes start in the device data array. */ + uint8_t result_offset; + /* unused. */ + uint8_t unused0[6]; + /* + * TCAM key located at offset 0, mask located at mask_offsec + * and result at result_offsec for the device. + */ + uint8_t dev_data[88]; +} __rte_packed; + +/* hwrm_tf_tcam_set_output (size:128b/16B) */ +struct hwrm_tf_tcam_set_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_tf_tcam_get * + ********************/ + + +/* hwrm_tf_tcam_get_input (size:256b/32B) */ +struct hwrm_tf_tcam_get_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX + /* + * TCAM type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Index of a TCAM entry. */ + uint16_t idx; + /* unused. */ + uint16_t unused0; +} __rte_packed; + +/* hwrm_tf_tcam_get_output (size:2368b/296B) */ +struct hwrm_tf_tcam_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of bytes in the TCAM key. */ + uint8_t key_size; + /* Number of bytes in the TCAM entry. */ + uint8_t result_size; + /* Offset from which the mask bytes start in the device data array. */ + uint8_t mask_offset; + /* Offset from which the result bytes start in the device data array. */ + uint8_t result_offset; + /* unused. */ + uint8_t unused0[4]; + /* + * TCAM key located at offset 0, mask located at mask_offsec + * and result at result_offsec for the device. + */ + uint8_t dev_data[272]; + /* unused. */ + uint8_t unused1[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_tf_tcam_move * + *********************/ + + +/* hwrm_tf_tcam_move_input (size:1024b/128B) */ +struct hwrm_tf_tcam_move_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX + /* + * TCAM type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Number of TCAM index pairs to be swapped for the device. */ + uint16_t count; + /* unused. */ + uint16_t unused0; + /* TCAM index pairs to be swapped for the device. */ + uint16_t idx_pairs[48]; +} __rte_packed; + +/* hwrm_tf_tcam_move_output (size:128b/16B) */ +struct hwrm_tf_tcam_move_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_tf_tcam_free * + *********************/ + + +/* hwrm_tf_tcam_free_input (size:1024b/128B) */ +struct hwrm_tf_tcam_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */ + uint32_t fw_session_id; + /* Control flags. */ + uint32_t flags; + /* Indicates the flow direction. */ + #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR UINT32_C(0x1) + /* If this bit set to 0, then it indicates rx flow. */ + #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0) + /* If this bit is set to 1, then it indicates that tx flow. */ + #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1) + #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_LAST \ + HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX + /* + * TCAM type of the resource, defined globally in the + * hwrm_tf_resc_type enum. + */ + uint32_t type; + /* Number of TCAM index to be deleted for the device. */ + uint16_t count; + /* unused. */ + uint16_t unused0; + /* TCAM index list to be deleted for the device. */ + uint16_t idx_list[48]; +} __rte_packed; + +/* hwrm_tf_tcam_free_output (size:128b/16B) */ +struct hwrm_tf_tcam_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* unused. */ + uint8_t unused0[7]; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been + * completely written. When writing a command completion or + * response to an internal processor, the order of writes has + * to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_tunnel_dst_port_query * + ******************************/ + + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 + uint8_t unused_0[7]; +} __rte_packed; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This field represents the identifier of L4 destination port + * used for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + */ + uint16_t tunnel_dst_port_id; + /* + * This field represents the value of L4 destination port + * identified by tunnel_dst_port_id. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * This field is in network byte order. + * + * A value of 0 means that the destination port is not + * configured. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************************** + * hwrm_tunnel_dst_port_alloc * + ******************************/ + + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 + uint8_t unused_0; + /* + * This field represents the value of L4 destination port used + * for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * + * This field is in network byte order. + * + * A value of 0 shall fail the command. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. + */ + uint16_t tunnel_dst_port_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_tunnel_dst_port_free * + *****************************/ + + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Use fixed layer 2 ether type of 0xFFFF */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE \ + UINT32_C(0xb) + /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \ + UINT32_C(0xc) + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 + uint8_t unused_0; + /* + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. + */ + uint16_t tunnel_dst_port_id; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* Periodic statistics context DMA to host. */ +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of discarded packets on received path */ + uint64_t rx_discard_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of discarded packets on transmit path */ + uint64_t tx_discard_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of TPA packets */ + uint64_t tpa_pkts; + /* Number of TPA bytes */ + uint64_t tpa_bytes; + /* Number of TPA events */ + uint64_t tpa_events; + /* Number of TPA aborts */ + uint64_t tpa_aborts; +} __rte_packed; + +/* Periodic statistics context DMA to host. */ +/* ctx_hw_stats_ext (size:1344b/168B) */ +struct ctx_hw_stats_ext { + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of discarded packets on received path */ + uint64_t rx_discard_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of discarded packets on transmit path */ + uint64_t tx_discard_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of TPA eligible packets */ + uint64_t rx_tpa_eligible_pkt; + /* Number of TPA eligible bytes */ + uint64_t rx_tpa_eligible_bytes; + /* Number of TPA packets */ + uint64_t rx_tpa_pkt; + /* Number of TPA bytes */ + uint64_t rx_tpa_bytes; + /* Number of TPA errors */ + uint64_t rx_tpa_errors; +} __rte_packed; + +/* Periodic Engine statistics context DMA to host. */ +/* ctx_eng_stats (size:512b/64B) */ +struct ctx_eng_stats { + /* + * Count of data bytes into the Engine. + * This includes any user supplied prefix, + * but does not include any predefined + * prefix data. + */ + uint64_t eng_bytes_in; + /* Count of data bytes out of the Engine. */ + uint64_t eng_bytes_out; + /* + * Count, in 4-byte (dword) units, of bytes + * that are input as auxiliary data. + * This includes the aux_cmd data. + */ + uint64_t aux_bytes_in; + /* + * Count, in 4-byte (dword) units, of bytes + * that are output as auxiliary data. + * This count is the buffer space for aux_data + * output provided in the RQE, not the actual + * aux_data written + */ + uint64_t aux_bytes_out; + /* Count of number of commands executed. */ + uint64_t commands; + /* + * Count of number of error commands. + * These are the commands with a + * non-zero status value. + */ + uint64_t error_commands; + /* + * Compression/Encryption Engine usage, + * the unit is count of clock cycles + */ + uint64_t cce_engine_usage; + /* + * De-Compression/De-cryption Engine usage, + * the unit is count of clock cycles + */ + uint64_t cdd_engine_usage; +} __rte_packed; + +/*********************** + * hwrm_stat_ctx_alloc * + ***********************/ + + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +struct hwrm_stat_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the address for statistic block. + * > For new versions of the chip, this address should be 128B + * > aligned. + */ + uint64_t stats_dma_addr; + /* + * The statistic block update period in ms. + * e.g. 250ms, 500ms, 750ms, 1000ms. + * If update_period_ms is 0, then the stats update + * shall be never done and the DMA address shall not be used. + * In this case, the stat block can only be read by + * hwrm_stat_ctx_query command. + * On Ethernet/L2 based devices: + * if tpa v2 supported (hwrm_vnic_qcaps[max_aggs_supported]>0), + * ctx_hw_stats_ext is used for DMA, + * else + * ctx_hw_stats is used for DMA. + */ + uint32_t update_period_ms; + /* + * This field is used to specify statistics context specific + * configuration flags. + */ + uint8_t stat_ctx_flags; + /* + * When this bit is set to '1', the statistics context shall be + * allocated for RoCE traffic only. In this case, traffic other + * than offloaded RoCE traffic shall not be included in this + * statistic context. + * When this bit is set to '0', the statistics context shall be + * used for network traffic or engine traffic. + */ + #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) + uint8_t unused_0; + /* + * This is the size of the structure (ctx_hw_stats or + * ctx_hw_stats_ext) that the driver has allocated to be used + * for the periodic DMA updates. + */ + uint16_t stats_dma_length; +} __rte_packed; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_stat_ctx_free * + **********************/ + + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*********************** + * hwrm_stat_ctx_query * + ***********************/ + + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of transmitted packets with error */ + uint64_t tx_err_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of received packets with error */ + uint64_t rx_err_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes */ + uint64_t rx_agg_bytes; + /* Number of aggregation events */ + uint64_t rx_agg_events; + /* Number of aborted aggregations */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_stat_ctx_eng_query * + ***************************/ + + +/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_eng_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */ +struct hwrm_stat_ctx_eng_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Count of data bytes into the Engine. + * This includes any user supplied prefix, + * but does not include any predefined + * prefix data. + */ + uint64_t eng_bytes_in; + /* Count of data bytes out of the Engine. */ + uint64_t eng_bytes_out; + /* + * Count, in 4-byte (dword) units, of bytes + * that are input as auxiliary data. + * This includes the aux_cmd data. + */ + uint64_t aux_bytes_in; + /* + * Count, in 4-byte (dword) units, of bytes + * that are output as auxiliary data. + * This count is the buffer space for aux_data + * output provided in the RQE, not the actual + * aux_data written + */ + uint64_t aux_bytes_out; + /* Count of number of commands executed. */ + uint64_t commands; + /* + * Count of number of error commands. + * These are the commands with a + * non-zero status value. + */ + uint64_t error_commands; + /* + * Compression/Encryption Engine usage, + * the unit is count of clock cycles + */ + uint64_t cce_engine_usage; + /* + * De-Compression/De-cryption Engine usage, + * the unit is count of clock cycles + */ + uint64_t cdd_engine_usage; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_stat_ctx_clr_stats * + ***************************/ + + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/******************** + * hwrm_pcie_qstats * + ********************/ + + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * The size of PCIe statistics block in bytes. + * Firmware will DMA the PCIe statistics to + * the host with this field size in the response. + */ + uint16_t pcie_stat_size; + uint8_t unused_0[6]; + /* + * This is the host address where + * PCIe statistics will be stored + */ + uint64_t pcie_stat_host_addr; +} __rte_packed; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of PCIe statistics block in bytes. */ + uint16_t pcie_stat_size; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* PCIe Statistics Formats */ +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + /* Number of physical layer receiver errors */ + uint64_t pcie_pl_signal_integrity; + /* Number of DLLP CRC errors detected by Data Link Layer */ + uint64_t pcie_dl_signal_integrity; + /* + * Number of TLP LCRC and sequence number errors detected + * by Data Link Layer + */ + uint64_t pcie_tl_signal_integrity; + /* Number of times LTSSM entered Recovery state */ + uint64_t pcie_link_integrity; + /* Report number of TLP bits that have been transmitted in Mbps */ + uint64_t pcie_tx_traffic_rate; + /* Report number of TLP bits that have been received in Mbps */ + uint64_t pcie_rx_traffic_rate; + /* Number of DLLP bytes that have been transmitted */ + uint64_t pcie_tx_dllp_statistics; + /* Number of DLLP bytes that have been received */ + uint64_t pcie_rx_dllp_statistics; + /* + * Number of times spent in each phase of gen3 + * equalization + */ + uint64_t pcie_equalization_time; + /* Records the last 16 transitions of the LTSSM */ + uint32_t pcie_ltssm_histogram[4]; + /* + * Record the last 8 reasons on why LTSSM transitioned + * to Recovery + */ + uint64_t pcie_recovery_histogram; +} __rte_packed; + +/********************** + * hwrm_exec_fwd_resp * + **********************/ + + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is an encapsulated request. This request should + * be executed by the HWRM and the response should be + * provided in the response buffer inside the encapsulated + * request. + */ + uint32_t encap_request[26]; + /* + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_reject_fwd_resp * + ************************/ + + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is an encapsulated request. This request should + * be rejected by the HWRM and the error response should be + * provided in the response buffer inside the encapsulated + * request. + */ + uint32_t encap_request[26]; + /* + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************** + * hwrm_fwd_resp * + *****************/ + + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value indicates the target id of the encapsulated + * response. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + /* + * This value indicates the completion ring the encapsulated + * response will be optionally completed on. If the value is + * -1, then no CR completion shall be generated for the + * encapsulated response. Any other value must be a + * valid CR ring_id value. If a valid encap_resp_cmpl_ring + * is provided, then a CR completion shall be generated for + * the encapsulated response. + */ + uint16_t encap_resp_cmpl_ring; + /* This field indicates the length of encapsulated response. */ + uint16_t encap_resp_len; + uint8_t unused_0; + uint8_t unused_1; + /* + * This is the host address where the encapsulated response + * will be written. + * This area must be 16B aligned and must be cleared to zero + * before the original request is made. + */ + uint64_t encap_resp_addr; + /* This is an encapsulated response. */ + uint32_t encap_resp[24]; +} __rte_packed; + +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************************** + * hwrm_fwd_async_event_cmpl * + *****************************/ + + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value indicates the target id of the encapsulated + * asynchronous event. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - Broadcast to all children VFs (only applicable when + * a PF is the requester) + */ + uint16_t encap_async_event_target_id; + uint8_t unused_0[6]; + /* This is an encapsulated asynchronous event completion. */ + uint32_t encap_async_event_cmpl[4]; +} __rte_packed; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_nvm_raw_write_blk * + **************************/ + + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Source Address. + * This is the location of the source data to be written. + */ + uint64_t host_src_addr; + /* + * 32-bit Destination Address. + * This is the NVRAM byte-offset where the source data will be written to. + */ + uint32_t dest_addr; + /* Length of data to be written, in bytes. */ + uint32_t len; +} __rte_packed; + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/***************** + * hwrm_nvm_read * + *****************/ + + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. + */ + uint64_t host_dest_addr; + /* The 0-based index of the directory entry. */ + uint16_t dir_idx; + uint8_t unused_0[2]; + /* The NVRAM byte-offset to read from. */ + uint32_t offset; + /* The length of the data to be read, in bytes. */ + uint32_t len; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************* + * hwrm_nvm_raw_dump * + *********************/ + + +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. + */ + uint64_t host_dest_addr; + /* 32-bit NVRAM byte-offset to read from. */ + uint32_t offset; + /* Total length of NVRAM contents to be read, in bytes. */ + uint32_t len; +} __rte_packed; + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_nvm_get_dir_entries * + ****************************/ + + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Destination Address. + * This is the host address where the directory will be written. + */ + uint64_t host_dest_addr; +} __rte_packed; + +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_nvm_get_dir_info * + *************************/ + + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of directory entries in the directory. */ + uint32_t entries; + /* Size of each directory entry, in bytes. */ + uint32_t entry_length; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/****************** + * hwrm_nvm_write * + ******************/ + + +/* hwrm_nvm_write_input (size:384b/48B) */ +struct hwrm_nvm_write_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Source Address. + * This is where the source data is. + */ + uint64_t host_src_addr; + /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */ + uint16_t dir_type; + /* + * Directory ordinal. + * The 0-based instance of the combined Directory Entry Type and Extension. + */ + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */ + uint16_t dir_attr; + /* + * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry. + * The data length stored in the directory entry will be updated to reflect this value once the write is complete. + */ + uint32_t dir_data_length; + /* Option. */ + uint16_t option; + uint16_t flags; + /* + * When this bit is '1', the original active image + * will not be removed. TBD: what purpose is this? + */ + #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \ + UINT32_C(0x1) + /* + * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length). + * If this value is less than the specified data length, it will be ignored. + * The response will contain the actual allocated item length, which may be greater than the requested item length. + * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accommodate + * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data). + */ + uint32_t dir_item_length; + uint32_t unused_0; +} __rte_packed; + +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length. + * The actual item length used when creating a new directory entry will be a multiple of an NVM block size. + */ + uint32_t dir_item_length; + /* The directory index of the created or modified item. */ + uint16_t dir_idx; + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \ + HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __rte_packed; + +/******************* + * hwrm_nvm_modify * + *******************/ + + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * 64-bit Host Source Address. + * This is where the modified data is. + */ + uint64_t host_src_addr; + /* 16-bit directory entry index. */ + uint16_t dir_idx; + uint16_t flags; + /* + * This flag indicates the sender wants to modify a continuous NVRAM + * area using a batch of this HWRM requests. The offset of a request + * must be continuous to the end of previous request's. Firmware does + * not update the directory entry until receiving the last request, + * which is indicated by the batch_last flag. + * This flag is set usually when a sender does not have a block of + * memory that is big enough to hold the entire NVRAM data for send + * at one time. + */ + #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_MODE UINT32_C(0x1) + /* + * This flag can be used only when the batch_mode flag is set. + * It indicates this request is the last of batch requests. + */ + #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_LAST UINT32_C(0x2) + /* 32-bit NVRAM byte-offset to modify content from. */ + uint32_t offset; + /* + * Length of data to be modified, in bytes. The length shall + * be non-zero. + */ + uint32_t len; + uint8_t unused_1[4]; +} __rte_packed; + +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_nvm_find_dir_entry * + ***************************/ + + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the dir_idx_valid field to be + * configured. + */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \ + UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; + /* Directory Entry (Image) Type */ + uint16_t dir_type; + /* + * Directory ordinal. + * The instance of this Directory Type + */ + uint16_t dir_ordinal; + /* The Directory Entry Extension flags. */ + uint16_t dir_ext; + /* This value indicates the search option using dir_ordinal. */ + uint8_t opt_ordinal; + /* This value indicates the search option using dir_ordinal. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0 + /* Equal to specified ordinal value. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0) + /* Greater than or equal to specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1) + /* Greater than specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \ + HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT + uint8_t unused_0[3]; +} __rte_packed; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Allocated NVRAM for this directory entry, in bytes. */ + uint32_t dir_item_length; + /* Size of the stored data for this directory entry, in bytes. */ + uint32_t dir_data_length; + /* + * Firmware version. + * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format. + */ + uint32_t fw_ver; + /* Directory ordinal. */ + uint16_t dir_ordinal; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/**************************** + * hwrm_nvm_erase_dir_entry * + ****************************/ + + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[6]; +} __rte_packed; + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************* + * hwrm_nvm_get_dev_info * + *************************/ + + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ +struct hwrm_nvm_get_dev_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Manufacturer ID. */ + uint16_t manufacturer_id; + /* Device ID. */ + uint16_t device_id; + /* Sector size of the NVRAM device. */ + uint32_t sector_size; + /* Total size, in bytes of the NVRAM device. */ + uint32_t nvram_size; + uint32_t reserved_size; + /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */ + uint32_t available_size; + /* This field represents the major version of NVM cfg */ + uint8_t nvm_cfg_ver_maj; + /* This field represents the minor version of NVM cfg */ + uint8_t nvm_cfg_ver_min; + /* This field represents the update version of NVM cfg */ + uint8_t nvm_cfg_ver_upd; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_nvm_mod_dir_entry * + **************************/ + + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the checksum field to be + * configured. + */ + #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; + /* + * Directory ordinal. + * The (0-based) instance of this Directory Type. + */ + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */ + uint16_t dir_attr; + /* + * If valid, then this field updates the checksum + * value of the content in the directory entry. + */ + uint32_t checksum; +} __rte_packed; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_nvm_verify_update * + **************************/ + + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Directory Entry Type, to be verified. */ + uint16_t dir_type; + /* + * Directory ordinal. + * The instance of the Directory Type to be verified. + */ + uint16_t dir_ordinal; + /* + * The Directory Entry Extension flags. + * The "UPDATE" extension flag must be set in this value. + * A corresponding directory entry with the same type and ordinal values but *without* + * the "UPDATE" extension flag must also exist. The other flags of the extension must + * be identical between the active and update entries. + */ + uint16_t dir_ext; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_nvm_install_update * + ***************************/ + + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Installation type. If the value 3 through 0xffff is used, + * only packaged items with that type value will be installed and + * conditional installation directives for those packaged items + * will be over-ridden (i.e. 'create' or 'replace' will be treated + * as 'install'). + */ + uint32_t install_type; + /* + * Perform a normal package installation. Conditional installation + * directives (e.g. 'create' and 'replace') of packaged items + * will be followed. + */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0) + /* + * Install all packaged items regardless of installation directive + * (i.e. treat all packaged items as though they have an installation + * directive of 'install'). + */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \ + UINT32_C(0xffffffff) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \ + HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL + uint16_t flags; + /* If set to 1, then securely erase all unused locations in persistent storage. */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \ + UINT32_C(0x1) + /* + * If set to 1, then unspecified images, images not in the package file, will be safely deleted. + * When combined with erase_unused_space then unspecified images will be securely erased. + */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \ + UINT32_C(0x2) + /* + * If set to 1, FW will defragment the NVM if defragmentation is required for the update. + * Allow additional time for this command to complete if this bit is set to 1. + */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \ + UINT32_C(0x4) + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Bit-mask of successfully installed items. + * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc. + * A value of 0 indicates that no items were successfully installed. + */ + uint64_t installed_items; + /* result is 8 b */ + uint8_t result; + /* There was no problem with the package installation. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS + /* problem_item is 8 b */ + uint8_t problem_item; + /* There was no problem with any packaged items. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \ + UINT32_C(0x0) + /* There was a problem with the NVM package itself. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \ + UINT32_C(0xff) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE + /* reset_required is 8 b */ + uint8_t reset_required; + /* + * No reset is required for installed/updated firmware or + * microcode to take effect. + */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \ + UINT32_C(0x0) + /* + * A PCIe reset (e.g. system reboot) is + * required for newly installed/updated firmware or + * microcode to take effect. + */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \ + UINT32_C(0x1) + /* + * A controller power reset (e.g. system power-cycle) is + * required for newly installed/updated firmware or + * microcode to take effect. Some newly installed/updated + * firmware or microcode may still take effect upon the + * next PCIe reset. + */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \ + UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER + uint8_t unused_0[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \ + HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __rte_packed; + +/****************** + * hwrm_nvm_flush * + ******************/ + + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* flush could not be performed */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1) + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \ + HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL + uint8_t unused_0[7]; +} __rte_packed; + +/************************* + * hwrm_nvm_get_variable * + *************************/ + + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where + * nvm variable will be stored + */ + uint64_t dest_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* + * When this bit is set to 1, the factory default value will be returned, + * 0 returns the operational value. + */ + #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \ + UINT32_C(0x1) + uint8_t unused_0; +} __rte_packed; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* size of data of the actual variable retrieved in bits */ + uint16_t data_len; + /* + * option_num is the option number for the data retrieved. It is possible in the + * future that the option number returned would be different than requested. This + * condition could occur if an option is deprecated and a new option id is defined + * with similar characteristics, but has a slightly different definition. This + * also makes it convenient for the caller to identify the variable result with + * the option id from the response. + */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + /* length specified is too small */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3) + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + uint8_t unused_0[7]; +} __rte_packed; + +/************************* + * hwrm_nvm_set_variable * + *************************/ + + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where + * nvm variable will be copied from + */ + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \ + UINT32_C(0x1) + /* encryption method */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \ + UINT32_C(0xe) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1 + /* No encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \ + (UINT32_C(0x0) << 1) + /* one-way encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \ + (UINT32_C(0x1) << 1) + /* symmetric AES256 encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \ + (UINT32_C(0x2) << 1) + /* SHA1 digest appended to plaintext contents, for authentication */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \ + (UINT32_C(0x3) << 1) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_MASK \ + UINT32_C(0x70) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_SFT 4 + /* When this bit is 1, update the factory default region */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FACTORY_DEFAULT \ + UINT32_C(0x80) + uint8_t unused_0; +} __rte_packed; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + uint8_t unused_0[7]; +} __rte_packed; + +/**************************** + * hwrm_nvm_validate_option * + ****************************/ + + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where + * nvm variable will be copied from + */ + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \ + UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t result; + /* indicates that the value provided for the option is not matching with the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0) + /* indicates that the value provided for the option is matching the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1) + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \ + HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \ + HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + uint8_t unused_0[7]; +} __rte_packed; + +/***************** + * hwrm_fw_reset * + ******************/ + + +/* hwrm_fw_reset_input (size:192b/24B) */ +struct hwrm_fw_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Type of embedded processor. */ + uint8_t embedded_proc_type; + /* Boot Processor */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_BOOT \ + UINT32_C(0x0) + /* Management Processor */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_MGMT \ + UINT32_C(0x1) + /* Network control processor */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_NETCTRL \ + UINT32_C(0x2) + /* RoCE control processor */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_ROCE \ + UINT32_C(0x3) + /* + * Host (in multi-host environment): This is only valid if requester is IPC. + * Reinit host hardware resources and PCIe. + */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_HOST \ + UINT32_C(0x4) + /* AP processor complex (in multi-host environment). Use host_idx to control which core is reset */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_AP \ + UINT32_C(0x5) + /* Reset all blocks of the chip (including all processors) */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP \ + UINT32_C(0x6) + /* + * Host (in multi-host environment): This is only valid if requester is IPC. + * Reinit host hardware resources. + */ + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT \ + UINT32_C(0x7) + #define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_LAST \ + HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT + /* Type of self reset. */ + uint8_t selfrst_status; + /* No Self Reset */ + #define HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTNONE \ + UINT32_C(0x0) + /* Self Reset as soon as possible to do so safely */ + #define HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP \ + UINT32_C(0x1) + /* Self Reset on PCIe Reset */ + #define HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTPCIERST \ + UINT32_C(0x2) + /* Self Reset immediately after notification to all clients. */ + #define HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTIMMEDIATE \ + UINT32_C(0x3) + #define HWRM_FW_RESET_INPUT_SELFRST_STATUS_LAST \ + HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTIMMEDIATE + /* + * Indicate which host is being reset. 0 means first host. + * Only valid when embedded_proc_type is host in multihost + * environment + */ + uint8_t host_idx; + uint8_t flags; + /* + * When this bit is '1', then the core firmware initiates + * the reset only after graceful shut down of all registered instances. + * If not, the device will continue with the existing firmware. + */ + #define HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL UINT32_C(0x1) + uint8_t unused_0[4]; +} __rte_packed; + +/* hwrm_fw_reset_output (size:128b/16B) */ +struct hwrm_fw_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Type of self reset. */ + uint8_t selfrst_status; + /* No Self Reset */ + #define HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_SELFRSTNONE \ + UINT32_C(0x0) + /* Self Reset as soon as possible to do so safely */ + #define HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_SELFRSTASAP \ + UINT32_C(0x1) + /* Self Reset on PCIe Reset */ + #define HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_SELFRSTPCIERST \ + UINT32_C(0x2) + /* Self Reset immediately after notification to all clients. */ + #define HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_SELFRSTIMMEDIATE \ + UINT32_C(0x3) + #define HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_LAST \ + HWRM_FW_RESET_OUTPUT_SELFRST_STATUS_SELFRSTIMMEDIATE + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/********************** + * hwrm_port_ts_query * + ***********************/ + + +/* hwrm_port_ts_query_input (size:192b/24B) */ +struct hwrm_port_ts_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH 0x1UL + /* tx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX 0x0UL + /* rx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX 0x1UL + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \ + HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX + /* + * If set, the response includes the current value of the free + * running timer. + */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME 0x2UL + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[2]; +} __rte_packed; + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Timestamp value of PTP message captured, or current value of + * free running timer. + */ + uint32_t ptp_msg_ts[2]; + /* Sequence ID of the PTP message captured. */ + uint16_t ptp_msg_seqid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************** + * hwrm_cfa_counter_qcaps * + **************************/ + + +/* hwrm_cfa_counter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_counter_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __rte_packed; + +/* hwrm_cfa_counter_qcaps_output (size:576b/72B) */ +struct hwrm_cfa_counter_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* Enumeration denoting the supported CFA counter format. */ + #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT \ + UINT32_C(0x1) + /* CFA counter types are not supported. */ + #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_NONE \ + UINT32_C(0x0) + /* 64-bit packet counters followed by 64-bit byte counters format. */ + #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT \ + UINT32_C(0x1) + #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_LAST \ + HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT + uint32_t unused_0; + /* Minimum guaranteed number of flow counters supported for this function, in RX direction. */ + uint32_t min_rx_fc; + /* Maximum non-guaranteed number of flow counters supported for this function, in RX direction. */ + uint32_t max_rx_fc; + /* Minimum guaranteed number of flow counters supported for this function, in TX direction. */ + uint32_t min_tx_fc; + /* Maximum non-guaranteed number of flow counters supported for this function, in TX direction. */ + uint32_t max_tx_fc; + /* Minimum guaranteed number of extension flow counters supported for this function, in RX direction. */ + uint32_t min_rx_efc; + /* Maximum non-guaranteed number of extension flow counters supported for this function, in RX direction. */ + uint32_t max_rx_efc; + /* Minimum guaranteed number of extension flow counters supported for this function, in TX direction. */ + uint32_t min_tx_efc; + /* Maximum non-guaranteed number of extension flow counters supported for this function, in TX direction. */ + uint32_t max_tx_efc; + /* Minimum guaranteed number of meter drop counters supported for this function, in RX direction. */ + uint32_t min_rx_mdc; + /* Maximum non-guaranteed number of meter drop counters supported for this function, in RX direction. */ + uint32_t max_rx_mdc; + /* Minimum guaranteed number of meter drop counters supported for this function, in TX direction. */ + uint32_t min_tx_mdc; + /* Maximum non-guaranteed number of meter drop counters supported for this function, in TX direction. */ + uint32_t max_tx_mdc; + /* Maximum guaranteed number of flow counters which can be used during flow alloc. */ + uint32_t max_flow_alloc_fc; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/************************ + * hwrm_cfa_counter_cfg * + ************************/ + + +/* hwrm_cfa_counter_cfg_input (size:256b/32B) */ +struct hwrm_cfa_counter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint16_t flags; + /* Enumeration denoting the configuration mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE \ + UINT32_C(0x1) + /* Disable the configuration mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE \ + UINT32_C(0x0) + /* Enable the configuration mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE \ + UINT32_C(0x1) + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_LAST \ + HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE + /* Enumeration denoting the RX, TX type of the resource. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH \ + UINT32_C(0x2) + /* Tx path. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 1) + /* Rx path. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 1) + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX + /* Enumeration denoting the data transfer mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_MASK \ + UINT32_C(0xc) + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_SFT 2 + /* Push mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PUSH \ + (UINT32_C(0x0) << 2) + /* Pull mode. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL \ + (UINT32_C(0x1) << 2) + /* Pull on async update. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC \ + (UINT32_C(0x2) << 2) + #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_LAST \ + HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC + uint16_t counter_type; + /* Flow counters. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_FC UINT32_C(0x0) + /* Extended flow counters. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_EFC UINT32_C(0x1) + /* Meter drop counters. */ + #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC UINT32_C(0x2) + #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_LAST \ + HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC + /* Ctx memory handle to be used for the counter. */ + uint16_t ctx_id; + /* Counter update cadence hint (only in Push mode). */ + uint16_t update_tmr_ms; + /* Total number of entries. */ + uint32_t num_entries; + uint32_t unused_0; +} __rte_packed; + +/* hwrm_cfa_counter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_counter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/*************************** + * hwrm_cfa_counter_qstats * + ***************************/ + + +/* hwrm_cfa_counter_qstats_input (size:320b/40B) */ +struct hwrm_cfa_counter_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors + * * 0xFFFD - Reserved for user-space HWRM interface + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint16_t flags; + /* Enumeration denoting the RX, TX type of the resource. */ + #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH UINT32_C(0x1) + /* Tx path. */ + #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* Rx path. */ + #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX + uint16_t counter_type; + uint16_t input_flow_ctx_id; + uint16_t num_entries; + uint16_t delta_time_ms; + uint16_t meter_instance_id; + uint16_t mdc_ctx_id; + uint8_t unused_0[2]; + uint64_t expected_count; +} __rte_packed; + +/* hwrm_cfa_counter_qstats_output (size:128b/16B) */ +struct hwrm_cfa_counter_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __rte_packed; + +/* + * This structure is fixed at the beginning of the ChiMP SRAM (GRC + * offset: 0x31001F0). Host software is expected to read from this + * location for a defined signature. If it exists, the software can + * assume the presence of this structure and the validity of the + * FW_STATUS location in the next field. + */ +/* hcomm_status (size:64b/8B) */ +struct hcomm_status { + uint32_t sig_ver; + /* + * This field defines the version of the structure. The latest + * version value is 1. + */ + #define HCOMM_STATUS_VER_MASK UINT32_C(0xff) + #define HCOMM_STATUS_VER_SFT 0 + #define HCOMM_STATUS_VER_LATEST UINT32_C(0x1) + #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST + /* + * This field is to store the signature value to indicate the + * presence of the structure. + */ + #define HCOMM_STATUS_SIGNATURE_MASK UINT32_C(0xffffff00) + #define HCOMM_STATUS_SIGNATURE_SFT 8 + #define HCOMM_STATUS_SIGNATURE_VAL (UINT32_C(0x484353) << 8) + #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL + uint32_t fw_status_loc; + #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK UINT32_C(0x3) + #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0 + /* PCIE configuration space */ + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG UINT32_C(0x0) + /* GRC space */ + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC UINT32_C(0x1) + /* BAR0 space */ + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 UINT32_C(0x2) + /* BAR1 space */ + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 UINT32_C(0x3) + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST \ + HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 + /* + * This offset where the fw_status register is located. The value + * is generally 4-byte aligned. + */ + #define HCOMM_STATUS_TRUE_OFFSET_MASK UINT32_C(0xfffffffc) + #define HCOMM_STATUS_TRUE_OFFSET_SFT 2 +} __rte_packed; +/* This is the GRC offset where the hcomm_status struct resides. */ +#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL + +#endif /* _HSI_STRUCT_DEF_DPDK_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/meson.build b/src/spdk/dpdk/drivers/net/bnxt/meson.build new file mode 100644 index 000000000..59dda6932 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/meson.build @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation +# Copyright(c) 2020 Broadcom + +install_headers('rte_pmd_bnxt.h') + +includes += include_directories('tf_ulp') +includes += include_directories('tf_core') + +sources = files('bnxt_cpr.c', + 'bnxt_ethdev.c', + 'bnxt_filter.c', + 'bnxt_flow.c', + 'bnxt_hwrm.c', + 'bnxt_irq.c', + 'bnxt_ring.c', + 'bnxt_rxq.c', + 'bnxt_rxr.c', + 'bnxt_stats.c', + 'bnxt_txq.c', + 'bnxt_txr.c', + 'bnxt_util.c', + 'bnxt_vnic.c', + + 'tf_core/tf_core.c', + 'tf_core/bitalloc.c', + 'tf_core/tf_msg.c', + 'tf_core/rand.c', + 'tf_core/stack.c', + 'tf_core/tf_em.c', + 'tf_core/tf_rm.c', + 'tf_core/tf_tbl.c', + 'tf_core/tfp.c', + + 'tf_ulp/bnxt_ulp.c', + 'tf_ulp/ulp_mark_mgr.c', + 'tf_ulp/ulp_flow_db.c', + 'tf_ulp/ulp_template_db.c', + 'tf_ulp/ulp_utils.c', + 'tf_ulp/ulp_mapper.c', + 'tf_ulp/ulp_matcher.c', + 'tf_ulp/ulp_rte_parser.c', + 'tf_ulp/bnxt_ulp_flow.c', + 'tf_ulp/ulp_port_db.c', + + 'rte_pmd_bnxt.c') + +if arch_subdir == 'x86' + sources += files('bnxt_rxtx_vec_sse.c') +endif diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c new file mode 100644 index 000000000..eafc1d386 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c @@ -0,0 +1,910 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "rte_pmd_bnxt.h" +#include "hsi_struct_def_dpdk.h" + +int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg) +{ + struct rte_pmd_bnxt_mb_event_param ret_param; + + ret_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED; + ret_param.vf_id = vf_id; + ret_param.msg = msg; + + _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX, + &ret_param); + + /* Default to approve */ + if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED) + ret_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK; + + return ret_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ? + true : false; +} + +int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + bp = eth_dev->data->dev_private; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set Tx loopback on non-PF port %d!\n", + port); + return -ENOTSUP; + } + + if (on) + bp->pf->evb_mode = BNXT_EVB_MODE_VEB; + else + bp->pf->evb_mode = BNXT_EVB_MODE_VEPA; + + rc = bnxt_hwrm_pf_evb_mode(bp); + + return rc; +} + +static void +rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr) +{ + uint8_t *on = onptr; + vnic->bd_stall = !(*on); +} + +int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on) +{ + struct rte_eth_dev *eth_dev; + struct bnxt *bp; + uint32_t i; + int rc = -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + bp = eth_dev->data->dev_private; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set all queues drop on non-PF port!\n"); + return -ENOTSUP; + } + + if (bp->vnic_info == NULL) + return -ENODEV; + + /* Stall PF */ + for (i = 0; i < bp->nr_vnics; i++) { + bp->vnic_info[i].bd_stall = !on; + rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i); + return rc; + } + } + + /* Stall all active VFs */ + for (i = 0; i < bp->pf->active_vfs; i++) { + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i, + rte_pmd_bnxt_set_all_queues_drop_en_cb, &on, + bnxt_hwrm_vnic_cfg); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i); + break; + } + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf, + struct rte_ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + + bp = dev->data->dev_private; + + if (vf >= dev_info.max_vfs || mac_addr == NULL) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set VF %d mac address on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr); + + return rc; +} + +int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct rte_eth_dev *eth_dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + uint16_t tot_rate = 0; + uint64_t idx; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + eth_dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(eth_dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = eth_dev->data->dev_private; + + if (!bp->pf->active_vfs) + return -EINVAL; + + if (vf >= bp->pf->max_vfs) + return -EINVAL; + + /* Add up the per queue BW and configure MAX BW of the VF */ + for (idx = 0; idx < 64; idx++) { + if ((1ULL << idx) & q_msk) + tot_rate += tx_rate; + } + + /* Requested BW can't be greater than link speed */ + if (tot_rate > eth_dev->data->dev_link.link_speed) { + PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate); + return -EINVAL; + } + + /* Requested BW already configured */ + if (tot_rate == bp->pf->vf_info[vf].max_tx_rate) + return 0; + + rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate, + HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW); + + if (!rc) + bp->pf->vf_info[vf].max_tx_rate = tot_rate; + + return rc; +} + +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + uint32_t func_flags; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set mac spoof on non-PF port %d!\n", port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + /* Prev setting same as new setting. */ + if (on == bp->pf->vf_info[vf].mac_spoof_en) + return 0; + + func_flags = bp->pf->vf_info[vf].func_cfg_flags; + func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE); + + if (on) + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + else + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + + rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); + if (!rc) { + bp->pf->vf_info[vf].mac_spoof_en = on; + bp->pf->vf_info[vf].func_cfg_flags = func_flags; + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set VLAN spoof on non-PF port %d!\n", port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on); + if (!rc) { + bp->pf->vf_info[vf].vlan_spoof_en = on; + if (on) { + if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp, + bp->pf->first_vf_id + vf, + bp->pf->vf_info[vf].vlan_count, + bp->pf->vf_info[vf].vlan_as_table)) + rc = -1; + } + } else { + PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf); + } + + return rc; +} + +static void +rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr) +{ + uint8_t *on = onptr; + vnic->vlan_strip = *on; +} + +int +rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set VF %d stripq on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, + rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on, + bnxt_hwrm_vnic_cfg); + if (rc) + PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf); + + return rc; +} + +int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, + uint16_t rx_mask, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint16_t flag = 0; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (!bp->pf->vf_info) + return -EINVAL; + + if (vf >= bp->pdev->max_vfs) + return -EINVAL; + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) { + PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n"); + return -ENOTSUP; + } + + /* Is this really the correct mapping? VFd seems to think it is. */ + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + flag |= BNXT_VNIC_INFO_PROMISC; + + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + flag |= BNXT_VNIC_INFO_BCAST; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST; + + if (on) + bp->pf->vf_info[vf].l2_rx_mask |= flag; + else + bp->pf->vf_info[vf].l2_rx_mask &= ~flag; + + rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, + vf_vnic_set_rxmask_cb, + &bp->pf->vf_info[vf].l2_rx_mask, + bnxt_set_rx_mask_no_vlan); + if (rc) + PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n"); + + return rc; +} + +static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf) +{ + int rc = 0; + int dflt_vnic; + struct bnxt_vnic_info vnic; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set VLAN table on non-PF port!\n"); + return -EINVAL; + } + + if (vf >= bp->pdev->max_vfs) + return -EINVAL; + + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf); + } else { + memset(&vnic, 0, sizeof(vnic)); + vnic.fw_vnic_id = dflt_vnic; + if (bnxt_hwrm_vnic_qcfg(bp, &vnic, + bp->pf->first_vf_id + vf) == 0) { + if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic, + bp->pf->vf_info[vf].vlan_count, + bp->pf->vf_info[vf].vlan_table)) + rc = -1; + } else { + rc = -1; + } + } + + return rc; +} + +int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) +{ + struct bnxt_vlan_table_entry *ve; + struct bnxt_vlan_antispoof_table_entry *vase; + struct rte_eth_dev *dev; + struct bnxt *bp; + uint16_t cnt; + int rc = 0; + int i, j; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + bp = dev->data->dev_private; + if (!bp->pf->vf_info) + return -EINVAL; + + for (i = 0; vf_mask; i++, vf_mask >>= 1) { + cnt = bp->pf->vf_info[i].vlan_count; + if ((vf_mask & 1) == 0) + continue; + + if (bp->pf->vf_info[i].vlan_table == NULL) { + rc = -1; + continue; + } + if (bp->pf->vf_info[i].vlan_as_table == NULL) { + rc = -1; + continue; + } + if (vlan_on) { + /* First, search for a duplicate... */ + for (j = 0; j < cnt; j++) { + if (rte_be_to_cpu_16( + bp->pf->vf_info[i].vlan_table[j].vid) == + vlan) + break; + } + if (j == cnt) { + /* Now check that there's space */ + if (cnt == getpagesize() / sizeof(struct + bnxt_vlan_antispoof_table_entry)) { + PMD_DRV_LOG(ERR, + "VLAN anti-spoof table is full\n"); + PMD_DRV_LOG(ERR, + "VF %d cannot add VLAN %u\n", + i, vlan); + rc = -1; + continue; + } + + /* cnt is one less than vlan_count */ + cnt = bp->pf->vf_info[i].vlan_count++; + /* + * And finally, add to the + * end of the table + */ + vase = &bp->pf->vf_info[i].vlan_as_table[cnt]; + // TODO: Hardcoded TPID + vase->tpid = rte_cpu_to_be_16(0x8100); + vase->vid = rte_cpu_to_be_16(vlan); + vase->mask = rte_cpu_to_be_16(0xfff); + ve = &bp->pf->vf_info[i].vlan_table[cnt]; + /* TODO: Hardcoded TPID */ + ve->tpid = rte_cpu_to_be_16(0x8100); + ve->vid = rte_cpu_to_be_16(vlan); + } + } else { + for (j = 0; j < cnt; j++) { + if (rte_be_to_cpu_16( + bp->pf->vf_info[i].vlan_table[j].vid) != + vlan) + continue; + memmove(&bp->pf->vf_info[i].vlan_table[j], + &bp->pf->vf_info[i].vlan_table[j + 1], + getpagesize() - ((j + 1) * + sizeof(struct bnxt_vlan_table_entry))); + memmove(&bp->pf->vf_info[i].vlan_as_table[j], + &bp->pf->vf_info[i].vlan_as_table[j + 1], + getpagesize() - ((j + 1) * sizeof(struct + bnxt_vlan_antispoof_table_entry))); + j--; + cnt = --bp->pf->vf_info[i].vlan_count; + } + } + bnxt_set_vf_table(bp, i); + } + + return rc; +} + +int rte_pmd_bnxt_get_vf_stats(uint16_t port, + uint16_t vf_id, + struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to get VF %d stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_qstats(bp, bp->pf->first_vf_id + vf_id, stats, + NULL); +} + +int rte_pmd_bnxt_reset_vf_stats(uint16_t port, + uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to reset VF %d stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_clr_stats(bp, bp->pf->first_vf_id + vf_id); +} + +int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to query VF %d RX stats on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_vf_vnic_count(bp, vf_id); +} + +int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, + uint64_t *count) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to query VF %d TX drops on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf->first_vf_id + vf_id, + count); +} + +int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr, + uint32_t vf_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info vnic; + struct rte_ether_addr dflt_mac; + int rc; + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf_id >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to config VF %d MAC on non-PF port %d!\n", + vf_id, port); + return -ENOTSUP; + } + + /* If the VF currently uses a random MAC, update default to this one */ + if (bp->pf->vf_info[vf_id].random_mac) { + if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0) + bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); + } + + /* query the default VNIC id used by the function */ + rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id); + if (rc < 0) + goto exit; + + memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); + vnic.fw_vnic_id = rte_le_to_cpu_16(rc); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf_id); + if (rc < 0) + goto exit; + + STAILQ_FOREACH(filter, &bp->pf->vf_info[vf_id].filter, next) { + if (filter->flags == + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX && + filter->enables == + (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) && + memcmp(addr, filter->l2_addr, RTE_ETHER_ADDR_LEN) == 0) { + bnxt_hwrm_clear_l2_filter(bp, filter); + break; + } + } + + if (filter == NULL) + filter = bnxt_alloc_vf_filter(bp, vf_id); + + filter->fw_l2_filter_id = UINT64_MAX; + filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + memcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN); + + /* Do not add a filter for the default MAC */ + if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) || + memcmp(filter->l2_addr, dflt_mac.addr_bytes, RTE_ETHER_ADDR_LEN)) + rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter); + +exit: + return rc; +} + +int +rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, + uint16_t vlan_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_bnxt_supported(dev)) + return -ENOTSUP; + + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set VF %d vlan insert on non-PF port %d!\n", + vf, port); + return -ENOTSUP; + } + + bp->pf->vf_info[vf].dflt_vlan = vlan_id; + if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) == + bp->pf->vf_info[vf].dflt_vlan) + return 0; + + rc = bnxt_hwrm_set_vf_vlan(bp, vf); + + return rc; +} + +int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev; + uint32_t func_flags; + struct bnxt *bp; + int rc; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) + return -EINVAL; + + dev = &rte_eth_devices[port]; + rc = rte_eth_dev_info_get(port, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, + "Error during getting device (port %u) info: %s\n", + port, strerror(-rc)); + + return rc; + } + bp = dev->data->dev_private; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, + "Attempt to set persist stats on non-PF port %d!\n", + port); + return -EINVAL; + } + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + /* Prev setting same as new setting. */ + if (on == bp->pf->vf_info[vf].persist_stats) + return 0; + + func_flags = bp->pf->vf_info[vf].func_cfg_flags; + + if (on) + func_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC; + else + func_flags &= + ~HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC; + + rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); + if (!rc) { + bp->pf->vf_info[vf].persist_stats = on; + bp->pf->vf_info[vf].func_cfg_flags = func_flags; + } + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h new file mode 100644 index 000000000..2e893cc7b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. + */ + +#ifndef _PMD_BNXT_H_ +#define _PMD_BNXT_H_ + +#include + +/* + * Response sent back to the caller after callback + */ +enum rte_pmd_bnxt_mb_event_rsp { + RTE_PMD_BNXT_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */ + RTE_PMD_BNXT_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */ + RTE_PMD_BNXT_MB_EVENT_PROCEED, /**< proceed with mbox request */ + RTE_PMD_BNXT_MB_EVENT_MAX /**< max value of this enum */ +}; + +/* mailbox message types */ +#define BNXT_VF_RESET 0x01 /* VF requests reset */ +#define BNXT_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define BNXT_VF_SET_VLAN 0x03 /* VF requests PF to set VLAN */ +#define BNXT_VF_SET_MTU 0x04 /* VF requests PF to set MTU */ +#define BNXT_VF_SET_MRU 0x05 /* VF requests PF to set MRU */ + +/* + * Data sent to the caller when the callback is executed. + */ +struct rte_pmd_bnxt_mb_event_param { + uint16_t vf_id; /* Virtual Function number */ + int retval; /* return value */ + void *msg; /* pointer to message */ +}; + +/** + * Enable/Disable VF MAC anti spoof + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Enable VF MAC anti spoof. + * 0 - Disable VF MAC anti spoof. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Set the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf, + struct rte_ether_addr *mac_addr); + +/** + * Enable/Disable vf vlan strip for all queues in a pool + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param on + * 1 - Enable VF's vlan strip on RX queues. + * 0 - Disable VF's vlan strip on RX queues. + * + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Enable/Disable vf vlan insert + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param vlan_id + * 0 - Disable VF's vlan insert. + * n - Enable; n is inserted as the vlan id. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, + uint16_t vlan_id); + +/** + * Enable/Disable hardware VF VLAN filtering by an Ethernet device of + * received VLAN packets tagged with a given VLAN Tag Identifier. + * + * @param port + * The port identifier of the Ethernet device. + * @param vlan + * The VLAN Tag Identifier whose filtering must be enabled or disabled. + * @param vf_mask + * Bitmap listing which VFs participate in the VLAN filtering. + * @param vlan_on + * 1 - Enable VFs VLAN filtering. + * 0 - Disable VFs VLAN filtering. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on); + +/** + * Enable/Disable tx loopback + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable tx loopback. + * 0 - Disable tx loopback. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on); + +/** + * set all queues drop enable bit + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - set the queue drop enable bit for all pools. + * 0 - reset the queue drop enable bit for all pools. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on); + +/** + * Set the VF rate limit. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param tx_rate + * Tx rate for the VF + * @param q_msk + * Mask of the Tx queue + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); + +/** + * Get VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @param stats + * A pointer to a structure of type *rte_eth_stats* to be filled with + * the values of device counters supported statistics: + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ + +int rte_pmd_bnxt_get_vf_stats(uint16_t port, + uint16_t vf_id, + struct rte_eth_stats *stats); + +/** + * Clear VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_reset_vf_stats(uint16_t port, + uint16_t vf_id); + +/** + * Enable/Disable VF VLAN anti spoof + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Enable VF VLAN anti spoof. + * 0 - Disable VF VLAN anti spoof. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Set RX L2 Filtering mode of a VF of an Ethernet device. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param rx_mask + * The RX mode mask + * @param on + * 1 - Enable a VF RX mode. + * 0 - Disable a VF RX mode. + * @return + * - (0) if successful. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, + uint16_t rx_mask, uint8_t on); + +/** + * Returns the number of default RX queues on a VF + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @return + * - Non-negative value - Number of default RX queues + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if on a function without VFs + * - (-ENOMEM) on an allocation failure + * - (-1) firmware interface error + */ +int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id); + +/** + * Queries the TX drop counter for the function + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @param count + * Pointer to a uint64_t that will be populated with the counter value. + * @return + * - Positive Non-zero value - Error code from HWRM + * - (-EINVAL) invalid vf_id specified. + * - (-ENOTSUP) Ethernet device is not a PF + */ +int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, + uint64_t *count); + +/** + * Programs the MAC address for the function specified + * + * @param port + * The port identifier of the Ethernet device. + * @param mac_addr + * The MAC address to be programmed in the filter. + * @param vf_id + * VF on which to get. + * @return + * - Positive Non-zero value - Error code from HWRM + * - (-EINVAL) invalid vf_id specified. + * - (-ENOTSUP) Ethernet device is not a PF + * - (-ENOMEM) on an allocation failure + */ +int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *mac_addr, + uint32_t vf_id); + +/** + * Enable/Disable VF statistics retention + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param on + * 1 - Prevent VF statistics from automatically resetting + * 0 - Allow VF statistics to automatically reset + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on); +#endif /* _PMD_BNXT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map new file mode 100644 index 000000000..bb5256234 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/rte_pmd_bnxt_version.map @@ -0,0 +1,22 @@ +DPDK_20.0 { + global: + + rte_pmd_bnxt_get_vf_rx_status; + rte_pmd_bnxt_get_vf_stats; + rte_pmd_bnxt_get_vf_tx_drop_count; + rte_pmd_bnxt_mac_addr_add; + rte_pmd_bnxt_reset_vf_stats; + rte_pmd_bnxt_set_all_queues_drop_en; + rte_pmd_bnxt_set_tx_loopback; + rte_pmd_bnxt_set_vf_mac_addr; + rte_pmd_bnxt_set_vf_mac_anti_spoof; + rte_pmd_bnxt_set_vf_persist_stats; + rte_pmd_bnxt_set_vf_rate_limit; + rte_pmd_bnxt_set_vf_rxmode; + rte_pmd_bnxt_set_vf_vlan_anti_spoof; + rte_pmd_bnxt_set_vf_vlan_filter; + rte_pmd_bnxt_set_vf_vlan_insert; + rte_pmd_bnxt_set_vf_vlan_stripq; + + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.c new file mode 100644 index 000000000..fb4df9a19 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.c @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include "bitalloc.h" + +#define BITALLOC_MAX_LEVELS 6 + +/* Finds the first bit set plus 1, equivalent to gcc __builtin_ffs */ +static int +ba_ffs(bitalloc_word_t v) +{ + int c; /* c will be the number of zero bits on the right plus 1 */ + + v &= -v; + c = v ? 32 : 0; + + if (v & 0x0000FFFF) + c -= 16; + if (v & 0x00FF00FF) + c -= 8; + if (v & 0x0F0F0F0F) + c -= 4; + if (v & 0x33333333) + c -= 2; + if (v & 0x55555555) + c -= 1; + + return c; +} + +int +ba_init(struct bitalloc *pool, int size) +{ + bitalloc_word_t *mem = (bitalloc_word_t *)pool; + int i; + + /* Initialize */ + pool->size = 0; + + if (size < 1 || size > BITALLOC_MAX_SIZE) + return -1; + + /* Zero structure */ + for (i = 0; + i < (int)(BITALLOC_SIZEOF(size) / sizeof(bitalloc_word_t)); + i++) + mem[i] = 0; + + /* Initialize */ + pool->size = size; + + /* Embed number of words of next level, after each level */ + int words[BITALLOC_MAX_LEVELS]; + int lev = 0; + int offset = 0; + + words[0] = (size + 31) / 32; + while (words[lev] > 1) { + lev++; + words[lev] = (words[lev - 1] + 31) / 32; + } + + while (lev) { + offset += words[lev]; + pool->storage[offset++] = words[--lev]; + } + + /* Free the entire pool */ + for (i = 0; i < size; i++) + ba_free(pool, i); + + return 0; +} + +static int +ba_alloc_helper(struct bitalloc *pool, + int offset, + int words, + unsigned int size, + int index, + int *clear) +{ + bitalloc_word_t *storage = &pool->storage[offset]; + int loc = ba_ffs(storage[index]); + int r; + + if (loc == 0) + return -1; + + loc--; + + if (pool->size > size) { + r = ba_alloc_helper(pool, + offset + words + 1, + storage[words], + size * 32, + index * 32 + loc, + clear); + } else { + r = index * 32 + loc; + *clear = 1; + pool->free_count--; + } + + if (*clear) { + storage[index] &= ~(1 << loc); + *clear = (storage[index] == 0); + } + + return r; +} + +int +ba_alloc(struct bitalloc *pool) +{ + int clear = 0; + + return ba_alloc_helper(pool, 0, 1, 32, 0, &clear); +} + +static int +ba_alloc_index_helper(struct bitalloc *pool, + int offset, + int words, + unsigned int size, + int *index, + int *clear) +{ + bitalloc_word_t *storage = &pool->storage[offset]; + int loc; + int r; + + if (pool->size > size) + r = ba_alloc_index_helper(pool, + offset + words + 1, + storage[words], + size * 32, + index, + clear); + else + r = 1; /* Check if already allocated */ + + loc = (*index % 32); + *index = *index / 32; + + if (r == 1) { + r = (storage[*index] & (1 << loc)) ? 0 : -1; + if (r == 0) { + *clear = 1; + pool->free_count--; + } + } + + if (*clear) { + storage[*index] &= ~(1 << loc); + *clear = (storage[*index] == 0); + } + + return r; +} + +int +ba_alloc_index(struct bitalloc *pool, int index) +{ + int clear = 0; + int index_copy = index; + + if (index < 0 || index >= (int)pool->size) + return -1; + + if (ba_alloc_index_helper(pool, 0, 1, 32, &index_copy, &clear) >= 0) + return index; + else + return -1; +} + +static int +ba_inuse_helper(struct bitalloc *pool, + int offset, + int words, + unsigned int size, + int *index) +{ + bitalloc_word_t *storage = &pool->storage[offset]; + int loc; + int r; + + if (pool->size > size) + r = ba_inuse_helper(pool, + offset + words + 1, + storage[words], + size * 32, + index); + else + r = 1; /* Check if in use */ + + loc = (*index % 32); + *index = *index / 32; + + if (r == 1) + r = (storage[*index] & (1 << loc)) ? -1 : 0; + + return r; +} + +int +ba_inuse(struct bitalloc *pool, int index) +{ + if (index < 0 || index >= (int)pool->size) + return -1; + + return ba_inuse_helper(pool, 0, 1, 32, &index) == 0; +} + +static int +ba_free_helper(struct bitalloc *pool, + int offset, + int words, + unsigned int size, + int *index) +{ + bitalloc_word_t *storage = &pool->storage[offset]; + int loc; + int r; + + if (pool->size > size) + r = ba_free_helper(pool, + offset + words + 1, + storage[words], + size * 32, + index); + else + r = 1; /* Check if already free */ + + loc = (*index % 32); + *index = *index / 32; + + if (r == 1) { + r = (storage[*index] & (1 << loc)) ? -1 : 0; + if (r == 0) + pool->free_count++; + } + + if (r == 0) + storage[*index] |= (1 << loc); + + return r; +} + +int +ba_free(struct bitalloc *pool, int index) +{ + if (index < 0 || index >= (int)pool->size) + return -1; + + return ba_free_helper(pool, 0, 1, 32, &index); +} + +int +ba_inuse_free(struct bitalloc *pool, int index) +{ + if (index < 0 || index >= (int)pool->size) + return -1; + + return ba_free_helper(pool, 0, 1, 32, &index) + 1; +} + +int +ba_free_count(struct bitalloc *pool) +{ + return (int)pool->free_count; +} + +int ba_inuse_count(struct bitalloc *pool) +{ + return (int)(pool->size) - (int)(pool->free_count); +} + +static int +ba_find_next_helper(struct bitalloc *pool, + int offset, + int words, + unsigned int size, + int *index, + int free) +{ + bitalloc_word_t *storage = &pool->storage[offset]; + int loc, r, bottom = 0; + + if (pool->size > size) + r = ba_find_next_helper(pool, + offset + words + 1, + storage[words], + size * 32, + index, + free); + else + bottom = 1; /* Bottom of tree */ + + loc = (*index % 32); + *index = *index / 32; + + if (bottom) { + int bit_index = *index * 32; + + loc = ba_ffs(~storage[*index] & ((bitalloc_word_t)-1 << loc)); + if (loc > 0) { + loc--; + r = (bit_index + loc); + if (r >= (int)pool->size) + r = -1; + } else { + /* Loop over array at bottom of tree */ + r = -1; + bit_index += 32; + *index = *index + 1; + while ((int)pool->size > bit_index) { + loc = ba_ffs(~storage[*index]); + + if (loc > 0) { + loc--; + r = (bit_index + loc); + if (r >= (int)pool->size) + r = -1; + break; + } + bit_index += 32; + *index = *index + 1; + } + } + } + + if (r >= 0 && (free)) { + if (bottom) + pool->free_count++; + storage[*index] |= (1 << loc); + } + + return r; +} + +int +ba_find_next_inuse(struct bitalloc *pool, int index) +{ + if (index < 0 || + index >= (int)pool->size || + pool->free_count == pool->size) + return -1; + + return ba_find_next_helper(pool, 0, 1, 32, &index, 0); +} + +int +ba_find_next_inuse_free(struct bitalloc *pool, int index) +{ + if (index < 0 || + index >= (int)pool->size || + pool->free_count == pool->size) + return -1; + + return ba_find_next_helper(pool, 0, 1, 32, &index, 1); +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.h new file mode 100644 index 000000000..563c8531a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/bitalloc.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _BITALLOC_H_ +#define _BITALLOC_H_ + +#include + +/* Bitalloc works on uint32_t as its word size */ +typedef uint32_t bitalloc_word_t; + +struct bitalloc { + bitalloc_word_t size; + bitalloc_word_t free_count; + bitalloc_word_t storage[1]; +}; + +#define BA_L0(s) (((s) + 31) / 32) +#define BA_L1(s) ((BA_L0(s) + 31) / 32) +#define BA_L2(s) ((BA_L1(s) + 31) / 32) +#define BA_L3(s) ((BA_L2(s) + 31) / 32) +#define BA_L4(s) ((BA_L3(s) + 31) / 32) + +#define BITALLOC_SIZEOF(size) \ + (sizeof(struct bitalloc) * \ + (((sizeof(struct bitalloc) + \ + sizeof(struct bitalloc) - 1 + \ + (sizeof(bitalloc_word_t) * \ + ((BA_L0(size) - 1) + \ + ((BA_L0(size) == 1) ? 0 : (BA_L1(size) + 1)) + \ + ((BA_L1(size) == 1) ? 0 : (BA_L2(size) + 1)) + \ + ((BA_L2(size) == 1) ? 0 : (BA_L3(size) + 1)) + \ + ((BA_L3(size) == 1) ? 0 : (BA_L4(size) + 1)))))) / \ + sizeof(struct bitalloc))) + +#define BITALLOC_MAX_SIZE (32 * 32 * 32 * 32 * 32 * 32) + +/* The instantiation of a bitalloc looks a bit odd. Since a + * bit allocator has variable storage, we need a way to get a + * a pointer to a bitalloc structure that points to the correct + * amount of storage. We do this by creating an array of + * bitalloc where the first element in the array is the + * actual bitalloc base structure, and the remaining elements + * in the array provide the storage for it. This approach allows + * instances to be individual variables or members of larger + * structures. + */ +#define BITALLOC_INST(name, size) \ + struct bitalloc name[(BITALLOC_SIZEOF(size) / \ + sizeof(struct bitalloc))] + +/* Symbolic return codes */ +#define BA_SUCCESS 0 +#define BA_FAIL -1 +#define BA_ENTRY_FREE 0 +#define BA_ENTRY_IN_USE 1 +#define BA_NO_ENTRY_FOUND -1 + +/** + * Initializates the bitallocator + * + * Returns 0 on success, -1 on failure. Size is arbitrary up to + * BITALLOC_MAX_SIZE + */ +int ba_init(struct bitalloc *pool, int size); + +/** + * Returns -1 on failure, or index of allocated entry + */ +int ba_alloc(struct bitalloc *pool); +int ba_alloc_index(struct bitalloc *pool, int index); + +/** + * Query a particular index in a pool to check if its in use. + * + * Returns -1 on invalid index, 1 if the index is allocated, 0 if it + * is free + */ +int ba_inuse(struct bitalloc *pool, int index); + +/** + * Variant of ba_inuse that frees the index if it is allocated, same + * return codes as ba_inuse + */ +int ba_inuse_free(struct bitalloc *pool, int index); + +/** + * Find next index that is in use, start checking at index 'idx' + * + * Returns next index that is in use on success, or + * -1 if no in use index is found + */ +int ba_find_next_inuse(struct bitalloc *pool, int idx); + +/** + * Variant of ba_find_next_inuse that also frees the next in use index, + * same return codes as ba_find_next_inuse + */ +int ba_find_next_inuse_free(struct bitalloc *pool, int idx); + +/** + * Multiple freeing of the same index has no negative side effects, + * but will return -1. returns -1 on failure, 0 on success. + */ +int ba_free(struct bitalloc *pool, int index); + +/** + * Returns the pool's free count + */ +int ba_free_count(struct bitalloc *pool); + +/** + * Returns the pool's in use count + */ +int ba_inuse_count(struct bitalloc *pool); + +#endif /* _BITALLOC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/hwrm_tf.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/hwrm_tf.h new file mode 100644 index 000000000..341909573 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/hwrm_tf.h @@ -0,0 +1,972 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ +#ifndef _HWRM_TF_H_ +#define _HWRM_TF_H_ + +#include "tf_core.h" + +typedef enum tf_type { + TF_TYPE_TRUFLOW, + TF_TYPE_LAST = TF_TYPE_TRUFLOW, +} tf_type_t; + +typedef enum tf_subtype { + HWRM_TFT_SESSION_ATTACH = 712, + HWRM_TFT_SESSION_HW_RESC_QCAPS = 721, + HWRM_TFT_SESSION_HW_RESC_ALLOC = 722, + HWRM_TFT_SESSION_HW_RESC_FREE = 723, + HWRM_TFT_SESSION_HW_RESC_FLUSH = 724, + HWRM_TFT_SESSION_SRAM_RESC_QCAPS = 725, + HWRM_TFT_SESSION_SRAM_RESC_ALLOC = 726, + HWRM_TFT_SESSION_SRAM_RESC_FREE = 727, + HWRM_TFT_SESSION_SRAM_RESC_FLUSH = 728, + HWRM_TFT_TBL_SCOPE_CFG = 731, + HWRM_TFT_EM_RULE_INSERT = 739, + HWRM_TFT_EM_RULE_DELETE = 740, + HWRM_TFT_REG_GET = 821, + HWRM_TFT_REG_SET = 822, + HWRM_TFT_TBL_TYPE_SET = 823, + HWRM_TFT_TBL_TYPE_GET = 824, + TF_SUBTYPE_LAST = HWRM_TFT_TBL_TYPE_GET, +} tf_subtype_t; + +/* Request and Response compile time checking */ +/* u32_t tlv_req_value[26]; */ +#define TF_MAX_REQ_SIZE 104 +/* u32_t tlv_resp_value[170]; */ +#define TF_MAX_RESP_SIZE 680 + +/* Use this to allocate/free any kind of + * indexes over HWRM and fill the parms pointer + */ +#define TF_BULK_RECV 128 +#define TF_BULK_SEND 16 + +/* EM Key value */ +#define TF_DEV_DATA_TYPE_TF_EM_RULE_INSERT_KEY_DATA 0x2e30UL +/* EM Key value */ +#define TF_DEV_DATA_TYPE_TF_EM_RULE_DELETE_KEY_DATA 0x2e40UL +/* L2 Context DMA Address Type */ +#define TF_DEV_DATA_TYPE_TF_L2_CTX_DMA_ADDR 0x2fe0UL +/* L2 Context Entry */ +#define TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY 0x2fe1UL +/* Prof tcam DMA Address Type */ +#define TF_DEV_DATA_TYPE_TF_PROF_TCAM_DMA_ADDR 0x3030UL +/* Prof tcam Entry */ +#define TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY 0x3031UL +/* WC DMA Address Type */ +#define TF_DEV_DATA_TYPE_TF_WC_DMA_ADDR 0x30d0UL +/* WC Entry */ +#define TF_DEV_DATA_TYPE_TF_WC_ENTRY 0x30d1UL +/* Action Data */ +#define TF_DEV_DATA_TYPE_TF_ACTION_DATA 0x3170UL +#define TF_DEV_DATA_TYPE_LAST TF_DEV_DATA_TYPE_TF_ACTION_DATA + +#define TF_BITS2BYTES(x) (((x) + 7) >> 3) +#define TF_BITS2BYTES_WORD_ALIGN(x) ((((x) + 31) >> 5) * 4) + +struct tf_session_attach_input; +struct tf_session_hw_resc_qcaps_input; +struct tf_session_hw_resc_qcaps_output; +struct tf_session_hw_resc_alloc_input; +struct tf_session_hw_resc_alloc_output; +struct tf_session_hw_resc_free_input; +struct tf_session_hw_resc_flush_input; +struct tf_session_sram_resc_qcaps_input; +struct tf_session_sram_resc_qcaps_output; +struct tf_session_sram_resc_alloc_input; +struct tf_session_sram_resc_alloc_output; +struct tf_session_sram_resc_free_input; +struct tf_session_sram_resc_flush_input; +struct tf_tbl_type_set_input; +struct tf_tbl_type_get_input; +struct tf_tbl_type_get_output; +struct tf_em_internal_insert_input; +struct tf_em_internal_insert_output; +struct tf_em_internal_delete_input; +/* Input params for session attach */ +typedef struct tf_session_attach_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* Session Name */ + char session_name[TF_SESSION_NAME_MAX]; +} tf_session_attach_input_t, *ptf_session_attach_input_t; + +/* Input params for session resource HW qcaps */ +typedef struct tf_session_hw_resc_qcaps_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_HW_RESC_QCAPS_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_HW_RESC_QCAPS_INPUT_FLAGS_DIR_TX (0x1) +} tf_session_hw_resc_qcaps_input_t, *ptf_session_hw_resc_qcaps_input_t; + +/* Output params for session resource HW qcaps */ +typedef struct tf_session_hw_resc_qcaps_output { + /* Control Flags */ + uint32_t flags; + /* When set to 0, indicates Static partitioning */ +#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_STATIC (0x0) + /* When set to 1, indicates Strategy 1 */ +#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_1 (0x1) + /* When set to 1, indicates Strategy 2 */ +#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_2 (0x2) + /* When set to 1, indicates Strategy 3 */ +#define TF_SESSION_HW_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3 (0x3) + /* Unused */ + uint8_t unused[4]; + /* Minimum guaranteed number of L2 Ctx */ + uint16_t l2_ctx_tcam_entries_min; + /* Maximum non-guaranteed number of L2 Ctx */ + uint16_t l2_ctx_tcam_entries_max; + /* Minimum guaranteed number of profile functions */ + uint16_t prof_func_min; + /* Maximum non-guaranteed number of profile functions */ + uint16_t prof_func_max; + /* Minimum guaranteed number of profile TCAM entries */ + uint16_t prof_tcam_entries_min; + /* Maximum non-guaranteed number of profile TCAM entries */ + uint16_t prof_tcam_entries_max; + /* Minimum guaranteed number of EM profile ID */ + uint16_t em_prof_id_min; + /* Maximum non-guaranteed number of EM profile ID */ + uint16_t em_prof_id_max; + /* Minimum guaranteed number of EM records entries */ + uint16_t em_record_entries_min; + /* Maximum non-guaranteed number of EM record entries */ + uint16_t em_record_entries_max; + /* Minimum guaranteed number of WC TCAM profile ID */ + uint16_t wc_tcam_prof_id_min; + /* Maximum non-guaranteed number of WC TCAM profile ID */ + uint16_t wc_tcam_prof_id_max; + /* Minimum guaranteed number of WC TCAM entries */ + uint16_t wc_tcam_entries_min; + /* Maximum non-guaranteed number of WC TCAM entries */ + uint16_t wc_tcam_entries_max; + /* Minimum guaranteed number of meter profiles */ + uint16_t meter_profiles_min; + /* Maximum non-guaranteed number of meter profiles */ + uint16_t meter_profiles_max; + /* Minimum guaranteed number of meter instances */ + uint16_t meter_inst_min; + /* Maximum non-guaranteed number of meter instances */ + uint16_t meter_inst_max; + /* Minimum guaranteed number of mirrors */ + uint16_t mirrors_min; + /* Maximum non-guaranteed number of mirrors */ + uint16_t mirrors_max; + /* Minimum guaranteed number of UPAR */ + uint16_t upar_min; + /* Maximum non-guaranteed number of UPAR */ + uint16_t upar_max; + /* Minimum guaranteed number of SP TCAM entries */ + uint16_t sp_tcam_entries_min; + /* Maximum non-guaranteed number of SP TCAM entries */ + uint16_t sp_tcam_entries_max; + /* Minimum guaranteed number of L2 Functions */ + uint16_t l2_func_min; + /* Maximum non-guaranteed number of L2 Functions */ + uint16_t l2_func_max; + /* Minimum guaranteed number of flexible key templates */ + uint16_t flex_key_templ_min; + /* Maximum non-guaranteed number of flexible key templates */ + uint16_t flex_key_templ_max; + /* Minimum guaranteed number of table Scopes */ + uint16_t tbl_scope_min; + /* Maximum non-guaranteed number of table Scopes */ + uint16_t tbl_scope_max; + /* Minimum guaranteed number of epoch0 entries */ + uint16_t epoch0_entries_min; + /* Maximum non-guaranteed number of epoch0 entries */ + uint16_t epoch0_entries_max; + /* Minimum guaranteed number of epoch1 entries */ + uint16_t epoch1_entries_min; + /* Maximum non-guaranteed number of epoch1 entries */ + uint16_t epoch1_entries_max; + /* Minimum guaranteed number of metadata */ + uint16_t metadata_min; + /* Maximum non-guaranteed number of metadata */ + uint16_t metadata_max; + /* Minimum guaranteed number of CT states */ + uint16_t ct_state_min; + /* Maximum non-guaranteed number of CT states */ + uint16_t ct_state_max; + /* Minimum guaranteed number of range profiles */ + uint16_t range_prof_min; + /* Maximum non-guaranteed number range profiles */ + uint16_t range_prof_max; + /* Minimum guaranteed number of range entries */ + uint16_t range_entries_min; + /* Maximum non-guaranteed number of range entries */ + uint16_t range_entries_max; + /* Minimum guaranteed number of LAG table entries */ + uint16_t lag_tbl_entries_min; + /* Maximum non-guaranteed number of LAG table entries */ + uint16_t lag_tbl_entries_max; +} tf_session_hw_resc_qcaps_output_t, *ptf_session_hw_resc_qcaps_output_t; + +/* Input params for session resource HW alloc */ +typedef struct tf_session_hw_resc_alloc_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_HW_RESC_ALLOC_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_HW_RESC_ALLOC_INPUT_FLAGS_DIR_TX (0x1) + /* Unused */ + uint8_t unused[2]; + /* Number of L2 CTX TCAM entries to be allocated */ + uint16_t num_l2_ctx_tcam_entries; + /* Number of profile functions to be allocated */ + uint16_t num_prof_func_entries; + /* Number of profile TCAM entries to be allocated */ + uint16_t num_prof_tcam_entries; + /* Number of EM profile ids to be allocated */ + uint16_t num_em_prof_id; + /* Number of EM records entries to be allocated */ + uint16_t num_em_record_entries; + /* Number of WC profiles ids to be allocated */ + uint16_t num_wc_tcam_prof_id; + /* Number of WC TCAM entries to be allocated */ + uint16_t num_wc_tcam_entries; + /* Number of meter profiles to be allocated */ + uint16_t num_meter_profiles; + /* Number of meter instances to be allocated */ + uint16_t num_meter_inst; + /* Number of mirrors to be allocated */ + uint16_t num_mirrors; + /* Number of UPAR to be allocated */ + uint16_t num_upar; + /* Number of SP TCAM entries to be allocated */ + uint16_t num_sp_tcam_entries; + /* Number of L2 functions to be allocated */ + uint16_t num_l2_func; + /* Number of flexible key templates to be allocated */ + uint16_t num_flex_key_templ; + /* Number of table scopes to be allocated */ + uint16_t num_tbl_scope; + /* Number of epoch0 entries to be allocated */ + uint16_t num_epoch0_entries; + /* Number of epoch1 entries to be allocated */ + uint16_t num_epoch1_entries; + /* Number of metadata to be allocated */ + uint16_t num_metadata; + /* Number of CT states to be allocated */ + uint16_t num_ct_state; + /* Number of range profiles to be allocated */ + uint16_t num_range_prof; + /* Number of range Entries to be allocated */ + uint16_t num_range_entries; + /* Number of LAG table entries to be allocated */ + uint16_t num_lag_tbl_entries; +} tf_session_hw_resc_alloc_input_t, *ptf_session_hw_resc_alloc_input_t; + +/* Output params for session resource HW alloc */ +typedef struct tf_session_hw_resc_alloc_output { + /* Starting index of L2 CTX TCAM entries allocated to the session */ + uint16_t l2_ctx_tcam_entries_start; + /* Number of L2 CTX TCAM entries allocated */ + uint16_t l2_ctx_tcam_entries_stride; + /* Starting index of profile functions allocated to the session */ + uint16_t prof_func_start; + /* Number of profile functions allocated */ + uint16_t prof_func_stride; + /* Starting index of profile TCAM entries allocated to the session */ + uint16_t prof_tcam_entries_start; + /* Number of profile TCAM entries allocated */ + uint16_t prof_tcam_entries_stride; + /* Starting index of EM profile ids allocated to the session */ + uint16_t em_prof_id_start; + /* Number of EM profile ids allocated */ + uint16_t em_prof_id_stride; + /* Starting index of EM record entries allocated to the session */ + uint16_t em_record_entries_start; + /* Number of EM record entries allocated */ + uint16_t em_record_entries_stride; + /* Starting index of WC TCAM profiles ids allocated to the session */ + uint16_t wc_tcam_prof_id_start; + /* Number of WC TCAM profile ids allocated */ + uint16_t wc_tcam_prof_id_stride; + /* Starting index of WC TCAM entries allocated to the session */ + uint16_t wc_tcam_entries_start; + /* Number of WC TCAM allocated */ + uint16_t wc_tcam_entries_stride; + /* Starting index of meter profiles allocated to the session */ + uint16_t meter_profiles_start; + /* Number of meter profiles allocated */ + uint16_t meter_profiles_stride; + /* Starting index of meter instance allocated to the session */ + uint16_t meter_inst_start; + /* Number of meter instance allocated */ + uint16_t meter_inst_stride; + /* Starting index of mirrors allocated to the session */ + uint16_t mirrors_start; + /* Number of mirrors allocated */ + uint16_t mirrors_stride; + /* Starting index of UPAR allocated to the session */ + uint16_t upar_start; + /* Number of UPAR allocated */ + uint16_t upar_stride; + /* Starting index of SP TCAM entries allocated to the session */ + uint16_t sp_tcam_entries_start; + /* Number of SP TCAM entries allocated */ + uint16_t sp_tcam_entries_stride; + /* Starting index of L2 functions allocated to the session */ + uint16_t l2_func_start; + /* Number of L2 functions allocated */ + uint16_t l2_func_stride; + /* Starting index of flexible key templates allocated to the session */ + uint16_t flex_key_templ_start; + /* Number of flexible key templates allocated */ + uint16_t flex_key_templ_stride; + /* Starting index of table scopes allocated to the session */ + uint16_t tbl_scope_start; + /* Number of table scopes allocated */ + uint16_t tbl_scope_stride; + /* Starting index of epoch0 entries allocated to the session */ + uint16_t epoch0_entries_start; + /* Number of epoch0 entries allocated */ + uint16_t epoch0_entries_stride; + /* Starting index of epoch1 entries allocated to the session */ + uint16_t epoch1_entries_start; + /* Number of epoch1 entries allocated */ + uint16_t epoch1_entries_stride; + /* Starting index of metadata allocated to the session */ + uint16_t metadata_start; + /* Number of metadata allocated */ + uint16_t metadata_stride; + /* Starting index of CT states allocated to the session */ + uint16_t ct_state_start; + /* Number of CT states allocated */ + uint16_t ct_state_stride; + /* Starting index of range profiles allocated to the session */ + uint16_t range_prof_start; + /* Number range profiles allocated */ + uint16_t range_prof_stride; + /* Starting index of range entries allocated to the session */ + uint16_t range_entries_start; + /* Number of range entries allocated */ + uint16_t range_entries_stride; + /* Starting index of LAG table entries allocated to the session */ + uint16_t lag_tbl_entries_start; + /* Number of LAG table entries allocated */ + uint16_t lag_tbl_entries_stride; +} tf_session_hw_resc_alloc_output_t, *ptf_session_hw_resc_alloc_output_t; + +/* Input params for session resource HW free */ +typedef struct tf_session_hw_resc_free_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_HW_RESC_FREE_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_HW_RESC_FREE_INPUT_FLAGS_DIR_TX (0x1) + /* Unused */ + uint8_t unused[2]; + /* Starting index of L2 CTX TCAM entries allocated to the session */ + uint16_t l2_ctx_tcam_entries_start; + /* Number of L2 CTX TCAM entries allocated */ + uint16_t l2_ctx_tcam_entries_stride; + /* Starting index of profile functions allocated to the session */ + uint16_t prof_func_start; + /* Number of profile functions allocated */ + uint16_t prof_func_stride; + /* Starting index of profile TCAM entries allocated to the session */ + uint16_t prof_tcam_entries_start; + /* Number of profile TCAM entries allocated */ + uint16_t prof_tcam_entries_stride; + /* Starting index of EM profile ids allocated to the session */ + uint16_t em_prof_id_start; + /* Number of EM profile ids allocated */ + uint16_t em_prof_id_stride; + /* Starting index of EM record entries allocated to the session */ + uint16_t em_record_entries_start; + /* Number of EM record entries allocated */ + uint16_t em_record_entries_stride; + /* Starting index of WC TCAM profiles ids allocated to the session */ + uint16_t wc_tcam_prof_id_start; + /* Number of WC TCAM profile ids allocated */ + uint16_t wc_tcam_prof_id_stride; + /* Starting index of WC TCAM entries allocated to the session */ + uint16_t wc_tcam_entries_start; + /* Number of WC TCAM allocated */ + uint16_t wc_tcam_entries_stride; + /* Starting index of meter profiles allocated to the session */ + uint16_t meter_profiles_start; + /* Number of meter profiles allocated */ + uint16_t meter_profiles_stride; + /* Starting index of meter instance allocated to the session */ + uint16_t meter_inst_start; + /* Number of meter instance allocated */ + uint16_t meter_inst_stride; + /* Starting index of mirrors allocated to the session */ + uint16_t mirrors_start; + /* Number of mirrors allocated */ + uint16_t mirrors_stride; + /* Starting index of UPAR allocated to the session */ + uint16_t upar_start; + /* Number of UPAR allocated */ + uint16_t upar_stride; + /* Starting index of SP TCAM entries allocated to the session */ + uint16_t sp_tcam_entries_start; + /* Number of SP TCAM entries allocated */ + uint16_t sp_tcam_entries_stride; + /* Starting index of L2 functions allocated to the session */ + uint16_t l2_func_start; + /* Number of L2 functions allocated */ + uint16_t l2_func_stride; + /* Starting index of flexible key templates allocated to the session */ + uint16_t flex_key_templ_start; + /* Number of flexible key templates allocated */ + uint16_t flex_key_templ_stride; + /* Starting index of table scopes allocated to the session */ + uint16_t tbl_scope_start; + /* Number of table scopes allocated */ + uint16_t tbl_scope_stride; + /* Starting index of epoch0 entries allocated to the session */ + uint16_t epoch0_entries_start; + /* Number of epoch0 entries allocated */ + uint16_t epoch0_entries_stride; + /* Starting index of epoch1 entries allocated to the session */ + uint16_t epoch1_entries_start; + /* Number of epoch1 entries allocated */ + uint16_t epoch1_entries_stride; + /* Starting index of metadata allocated to the session */ + uint16_t metadata_start; + /* Number of metadata allocated */ + uint16_t metadata_stride; + /* Starting index of CT states allocated to the session */ + uint16_t ct_state_start; + /* Number of CT states allocated */ + uint16_t ct_state_stride; + /* Starting index of range profiles allocated to the session */ + uint16_t range_prof_start; + /* Number range profiles allocated */ + uint16_t range_prof_stride; + /* Starting index of range entries allocated to the session */ + uint16_t range_entries_start; + /* Number of range entries allocated */ + uint16_t range_entries_stride; + /* Starting index of LAG table entries allocated to the session */ + uint16_t lag_tbl_entries_start; + /* Number of LAG table entries allocated */ + uint16_t lag_tbl_entries_stride; +} tf_session_hw_resc_free_input_t, *ptf_session_hw_resc_free_input_t; + +/* Input params for session resource HW flush */ +typedef struct tf_session_hw_resc_flush_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the flush apply to RX */ +#define TF_SESSION_HW_RESC_FLUSH_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the flush apply to TX */ +#define TF_SESSION_HW_RESC_FLUSH_INPUT_FLAGS_DIR_TX (0x1) + /* Unused */ + uint8_t unused[2]; + /* Starting index of L2 CTX TCAM entries allocated to the session */ + uint16_t l2_ctx_tcam_entries_start; + /* Number of L2 CTX TCAM entries allocated */ + uint16_t l2_ctx_tcam_entries_stride; + /* Starting index of profile functions allocated to the session */ + uint16_t prof_func_start; + /* Number of profile functions allocated */ + uint16_t prof_func_stride; + /* Starting index of profile TCAM entries allocated to the session */ + uint16_t prof_tcam_entries_start; + /* Number of profile TCAM entries allocated */ + uint16_t prof_tcam_entries_stride; + /* Starting index of EM profile ids allocated to the session */ + uint16_t em_prof_id_start; + /* Number of EM profile ids allocated */ + uint16_t em_prof_id_stride; + /* Starting index of EM record entries allocated to the session */ + uint16_t em_record_entries_start; + /* Number of EM record entries allocated */ + uint16_t em_record_entries_stride; + /* Starting index of WC TCAM profiles ids allocated to the session */ + uint16_t wc_tcam_prof_id_start; + /* Number of WC TCAM profile ids allocated */ + uint16_t wc_tcam_prof_id_stride; + /* Starting index of WC TCAM entries allocated to the session */ + uint16_t wc_tcam_entries_start; + /* Number of WC TCAM allocated */ + uint16_t wc_tcam_entries_stride; + /* Starting index of meter profiles allocated to the session */ + uint16_t meter_profiles_start; + /* Number of meter profiles allocated */ + uint16_t meter_profiles_stride; + /* Starting index of meter instance allocated to the session */ + uint16_t meter_inst_start; + /* Number of meter instance allocated */ + uint16_t meter_inst_stride; + /* Starting index of mirrors allocated to the session */ + uint16_t mirrors_start; + /* Number of mirrors allocated */ + uint16_t mirrors_stride; + /* Starting index of UPAR allocated to the session */ + uint16_t upar_start; + /* Number of UPAR allocated */ + uint16_t upar_stride; + /* Starting index of SP TCAM entries allocated to the session */ + uint16_t sp_tcam_entries_start; + /* Number of SP TCAM entries allocated */ + uint16_t sp_tcam_entries_stride; + /* Starting index of L2 functions allocated to the session */ + uint16_t l2_func_start; + /* Number of L2 functions allocated */ + uint16_t l2_func_stride; + /* Starting index of flexible key templates allocated to the session */ + uint16_t flex_key_templ_start; + /* Number of flexible key templates allocated */ + uint16_t flex_key_templ_stride; + /* Starting index of table scopes allocated to the session */ + uint16_t tbl_scope_start; + /* Number of table scopes allocated */ + uint16_t tbl_scope_stride; + /* Starting index of epoch0 entries allocated to the session */ + uint16_t epoch0_entries_start; + /* Number of epoch0 entries allocated */ + uint16_t epoch0_entries_stride; + /* Starting index of epoch1 entries allocated to the session */ + uint16_t epoch1_entries_start; + /* Number of epoch1 entries allocated */ + uint16_t epoch1_entries_stride; + /* Starting index of metadata allocated to the session */ + uint16_t metadata_start; + /* Number of metadata allocated */ + uint16_t metadata_stride; + /* Starting index of CT states allocated to the session */ + uint16_t ct_state_start; + /* Number of CT states allocated */ + uint16_t ct_state_stride; + /* Starting index of range profiles allocated to the session */ + uint16_t range_prof_start; + /* Number range profiles allocated */ + uint16_t range_prof_stride; + /* Starting index of range entries allocated to the session */ + uint16_t range_entries_start; + /* Number of range entries allocated */ + uint16_t range_entries_stride; + /* Starting index of LAG table entries allocated to the session */ + uint16_t lag_tbl_entries_start; + /* Number of LAG table entries allocated */ + uint16_t lag_tbl_entries_stride; +} tf_session_hw_resc_flush_input_t, *ptf_session_hw_resc_flush_input_t; + +/* Input params for session resource SRAM qcaps */ +typedef struct tf_session_sram_resc_qcaps_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_SRAM_RESC_QCAPS_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_SRAM_RESC_QCAPS_INPUT_FLAGS_DIR_TX (0x1) +} tf_session_sram_resc_qcaps_input_t, *ptf_session_sram_resc_qcaps_input_t; + +/* Output params for session resource SRAM qcaps */ +typedef struct tf_session_sram_resc_qcaps_output { + /* Flags */ + uint32_t flags; + /* When set to 0, indicates Static partitioning */ +#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_STATIC (0x0) + /* When set to 1, indicates Strategy 1 */ +#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_1 (0x1) + /* When set to 1, indicates Strategy 2 */ +#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_2 (0x2) + /* When set to 1, indicates Strategy 3 */ +#define TF_SESSION_SRAM_RESC_QCAPS_OUTPUT_FLAGS_SESS_RES_STRATEGY_3 (0x3) + /* Minimum guaranteed number of Full Action */ + uint16_t full_action_min; + /* Maximum non-guaranteed number of Full Action */ + uint16_t full_action_max; + /* Minimum guaranteed number of MCG */ + uint16_t mcg_min; + /* Maximum non-guaranteed number of MCG */ + uint16_t mcg_max; + /* Minimum guaranteed number of Encap 8B */ + uint16_t encap_8b_min; + /* Maximum non-guaranteed number of Encap 8B */ + uint16_t encap_8b_max; + /* Minimum guaranteed number of Encap 16B */ + uint16_t encap_16b_min; + /* Maximum non-guaranteed number of Encap 16B */ + uint16_t encap_16b_max; + /* Minimum guaranteed number of Encap 64B */ + uint16_t encap_64b_min; + /* Maximum non-guaranteed number of Encap 64B */ + uint16_t encap_64b_max; + /* Minimum guaranteed number of SP SMAC */ + uint16_t sp_smac_min; + /* Maximum non-guaranteed number of SP SMAC */ + uint16_t sp_smac_max; + /* Minimum guaranteed number of SP SMAC IPv4 */ + uint16_t sp_smac_ipv4_min; + /* Maximum non-guaranteed number of SP SMAC IPv4 */ + uint16_t sp_smac_ipv4_max; + /* Minimum guaranteed number of SP SMAC IPv6 */ + uint16_t sp_smac_ipv6_min; + /* Maximum non-guaranteed number of SP SMAC IPv6 */ + uint16_t sp_smac_ipv6_max; + /* Minimum guaranteed number of Counter 64B */ + uint16_t counter_64b_min; + /* Maximum non-guaranteed number of Counter 64B */ + uint16_t counter_64b_max; + /* Minimum guaranteed number of NAT SPORT */ + uint16_t nat_sport_min; + /* Maximum non-guaranteed number of NAT SPORT */ + uint16_t nat_sport_max; + /* Minimum guaranteed number of NAT DPORT */ + uint16_t nat_dport_min; + /* Maximum non-guaranteed number of NAT DPORT */ + uint16_t nat_dport_max; + /* Minimum guaranteed number of NAT S_IPV4 */ + uint16_t nat_s_ipv4_min; + /* Maximum non-guaranteed number of NAT S_IPV4 */ + uint16_t nat_s_ipv4_max; + /* Minimum guaranteed number of NAT D_IPV4 */ + uint16_t nat_d_ipv4_min; + /* Maximum non-guaranteed number of NAT D_IPV4 */ + uint16_t nat_d_ipv4_max; +} tf_session_sram_resc_qcaps_output_t, *ptf_session_sram_resc_qcaps_output_t; + +/* Input params for session resource SRAM alloc */ +typedef struct tf_session_sram_resc_alloc_input { + /* FW Session Id */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_SRAM_RESC_ALLOC_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_SRAM_RESC_ALLOC_INPUT_FLAGS_DIR_TX (0x1) + /* Unused */ + uint8_t unused[2]; + /* Number of full action SRAM entries to be allocated */ + uint16_t num_full_action; + /* Number of multicast groups to be allocated */ + uint16_t num_mcg; + /* Number of Encap 8B entries to be allocated */ + uint16_t num_encap_8b; + /* Number of Encap 16B entries to be allocated */ + uint16_t num_encap_16b; + /* Number of Encap 64B entries to be allocated */ + uint16_t num_encap_64b; + /* Number of SP SMAC entries to be allocated */ + uint16_t num_sp_smac; + /* Number of SP SMAC IPv4 entries to be allocated */ + uint16_t num_sp_smac_ipv4; + /* Number of SP SMAC IPv6 entries to be allocated */ + uint16_t num_sp_smac_ipv6; + /* Number of Counter 64B entries to be allocated */ + uint16_t num_counter_64b; + /* Number of NAT source ports to be allocated */ + uint16_t num_nat_sport; + /* Number of NAT destination ports to be allocated */ + uint16_t num_nat_dport; + /* Number of NAT source iPV4 addresses to be allocated */ + uint16_t num_nat_s_ipv4; + /* Number of NAT destination IPV4 addresses to be allocated */ + uint16_t num_nat_d_ipv4; +} tf_session_sram_resc_alloc_input_t, *ptf_session_sram_resc_alloc_input_t; + +/* Output params for session resource SRAM alloc */ +typedef struct tf_session_sram_resc_alloc_output { + /* Unused */ + uint8_t unused[2]; + /* Starting index of full action SRAM entries allocated to the session */ + uint16_t full_action_start; + /* Number of full action SRAM entries allocated */ + uint16_t full_action_stride; + /* Starting index of multicast groups allocated to this session */ + uint16_t mcg_start; + /* Number of multicast groups allocated */ + uint16_t mcg_stride; + /* Starting index of encap 8B entries allocated to the session */ + uint16_t encap_8b_start; + /* Number of encap 8B entries allocated */ + uint16_t encap_8b_stride; + /* Starting index of encap 16B entries allocated to the session */ + uint16_t encap_16b_start; + /* Number of encap 16B entries allocated */ + uint16_t encap_16b_stride; + /* Starting index of encap 64B entries allocated to the session */ + uint16_t encap_64b_start; + /* Number of encap 64B entries allocated */ + uint16_t encap_64b_stride; + /* Starting index of SP SMAC entries allocated to the session */ + uint16_t sp_smac_start; + /* Number of SP SMAC entries allocated */ + uint16_t sp_smac_stride; + /* Starting index of SP SMAC IPv4 entries allocated to the session */ + uint16_t sp_smac_ipv4_start; + /* Number of SP SMAC IPv4 entries allocated */ + uint16_t sp_smac_ipv4_stride; + /* Starting index of SP SMAC IPv6 entries allocated to the session */ + uint16_t sp_smac_ipv6_start; + /* Number of SP SMAC IPv6 entries allocated */ + uint16_t sp_smac_ipv6_stride; + /* Starting index of Counter 64B entries allocated to the session */ + uint16_t counter_64b_start; + /* Number of Counter 64B entries allocated */ + uint16_t counter_64b_stride; + /* Starting index of NAT source ports allocated to the session */ + uint16_t nat_sport_start; + /* Number of NAT source ports allocated */ + uint16_t nat_sport_stride; + /* Starting index of NAT destination ports allocated to the session */ + uint16_t nat_dport_start; + /* Number of NAT destination ports allocated */ + uint16_t nat_dport_stride; + /* Starting index of NAT source IPV4 addresses allocated to the session */ + uint16_t nat_s_ipv4_start; + /* Number of NAT source IPV4 addresses allocated */ + uint16_t nat_s_ipv4_stride; + /* + * Starting index of NAT destination IPV4 addresses allocated to the + * session + */ + uint16_t nat_d_ipv4_start; + /* Number of NAT destination IPV4 addresses allocated */ + uint16_t nat_d_ipv4_stride; +} tf_session_sram_resc_alloc_output_t, *ptf_session_sram_resc_alloc_output_t; + +/* Input params for session resource SRAM free */ +typedef struct tf_session_sram_resc_free_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the query apply to RX */ +#define TF_SESSION_SRAM_RESC_FREE_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the query apply to TX */ +#define TF_SESSION_SRAM_RESC_FREE_INPUT_FLAGS_DIR_TX (0x1) + /* Starting index of full action SRAM entries allocated to the session */ + uint16_t full_action_start; + /* Number of full action SRAM entries allocated */ + uint16_t full_action_stride; + /* Starting index of multicast groups allocated to this session */ + uint16_t mcg_start; + /* Number of multicast groups allocated */ + uint16_t mcg_stride; + /* Starting index of encap 8B entries allocated to the session */ + uint16_t encap_8b_start; + /* Number of encap 8B entries allocated */ + uint16_t encap_8b_stride; + /* Starting index of encap 16B entries allocated to the session */ + uint16_t encap_16b_start; + /* Number of encap 16B entries allocated */ + uint16_t encap_16b_stride; + /* Starting index of encap 64B entries allocated to the session */ + uint16_t encap_64b_start; + /* Number of encap 64B entries allocated */ + uint16_t encap_64b_stride; + /* Starting index of SP SMAC entries allocated to the session */ + uint16_t sp_smac_start; + /* Number of SP SMAC entries allocated */ + uint16_t sp_smac_stride; + /* Starting index of SP SMAC IPv4 entries allocated to the session */ + uint16_t sp_smac_ipv4_start; + /* Number of SP SMAC IPv4 entries allocated */ + uint16_t sp_smac_ipv4_stride; + /* Starting index of SP SMAC IPv6 entries allocated to the session */ + uint16_t sp_smac_ipv6_start; + /* Number of SP SMAC IPv6 entries allocated */ + uint16_t sp_smac_ipv6_stride; + /* Starting index of Counter 64B entries allocated to the session */ + uint16_t counter_64b_start; + /* Number of Counter 64B entries allocated */ + uint16_t counter_64b_stride; + /* Starting index of NAT source ports allocated to the session */ + uint16_t nat_sport_start; + /* Number of NAT source ports allocated */ + uint16_t nat_sport_stride; + /* Starting index of NAT destination ports allocated to the session */ + uint16_t nat_dport_start; + /* Number of NAT destination ports allocated */ + uint16_t nat_dport_stride; + /* Starting index of NAT source IPV4 addresses allocated to the session */ + uint16_t nat_s_ipv4_start; + /* Number of NAT source IPV4 addresses allocated */ + uint16_t nat_s_ipv4_stride; + /* + * Starting index of NAT destination IPV4 addresses allocated to the + * session + */ + uint16_t nat_d_ipv4_start; + /* Number of NAT destination IPV4 addresses allocated */ + uint16_t nat_d_ipv4_stride; +} tf_session_sram_resc_free_input_t, *ptf_session_sram_resc_free_input_t; + +/* Input params for session resource SRAM flush */ +typedef struct tf_session_sram_resc_flush_input { + /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the flush apply to RX */ +#define TF_SESSION_SRAM_RESC_FLUSH_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the flush apply to TX */ +#define TF_SESSION_SRAM_RESC_FLUSH_INPUT_FLAGS_DIR_TX (0x1) + /* Starting index of full action SRAM entries allocated to the session */ + uint16_t full_action_start; + /* Number of full action SRAM entries allocated */ + uint16_t full_action_stride; + /* Starting index of multicast groups allocated to this session */ + uint16_t mcg_start; + /* Number of multicast groups allocated */ + uint16_t mcg_stride; + /* Starting index of encap 8B entries allocated to the session */ + uint16_t encap_8b_start; + /* Number of encap 8B entries allocated */ + uint16_t encap_8b_stride; + /* Starting index of encap 16B entries allocated to the session */ + uint16_t encap_16b_start; + /* Number of encap 16B entries allocated */ + uint16_t encap_16b_stride; + /* Starting index of encap 64B entries allocated to the session */ + uint16_t encap_64b_start; + /* Number of encap 64B entries allocated */ + uint16_t encap_64b_stride; + /* Starting index of SP SMAC entries allocated to the session */ + uint16_t sp_smac_start; + /* Number of SP SMAC entries allocated */ + uint16_t sp_smac_stride; + /* Starting index of SP SMAC IPv4 entries allocated to the session */ + uint16_t sp_smac_ipv4_start; + /* Number of SP SMAC IPv4 entries allocated */ + uint16_t sp_smac_ipv4_stride; + /* Starting index of SP SMAC IPv6 entries allocated to the session */ + uint16_t sp_smac_ipv6_start; + /* Number of SP SMAC IPv6 entries allocated */ + uint16_t sp_smac_ipv6_stride; + /* Starting index of Counter 64B entries allocated to the session */ + uint16_t counter_64b_start; + /* Number of Counter 64B entries allocated */ + uint16_t counter_64b_stride; + /* Starting index of NAT source ports allocated to the session */ + uint16_t nat_sport_start; + /* Number of NAT source ports allocated */ + uint16_t nat_sport_stride; + /* Starting index of NAT destination ports allocated to the session */ + uint16_t nat_dport_start; + /* Number of NAT destination ports allocated */ + uint16_t nat_dport_stride; + /* Starting index of NAT source IPV4 addresses allocated to the session */ + uint16_t nat_s_ipv4_start; + /* Number of NAT source IPV4 addresses allocated */ + uint16_t nat_s_ipv4_stride; + /* + * Starting index of NAT destination IPV4 addresses allocated to the + * session + */ + uint16_t nat_d_ipv4_start; + /* Number of NAT destination IPV4 addresses allocated */ + uint16_t nat_d_ipv4_stride; +} tf_session_sram_resc_flush_input_t, *ptf_session_sram_resc_flush_input_t; + +/* Input params for table type set */ +typedef struct tf_tbl_type_set_input { + /* Session Id */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the get apply to RX */ +#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the get apply to TX */ +#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX (0x1) + /* Type of the object to set */ + uint32_t type; + /* Size of the data to set in bytes */ + uint16_t size; + /* Data to set */ + uint8_t data[TF_BULK_SEND]; + /* Index to set */ + uint32_t index; +} tf_tbl_type_set_input_t, *ptf_tbl_type_set_input_t; + +/* Input params for table type get */ +typedef struct tf_tbl_type_get_input { + /* Session Id */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the get apply to RX */ +#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the get apply to TX */ +#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX (0x1) + /* Type of the object to set */ + uint32_t type; + /* Index to get */ + uint32_t index; +} tf_tbl_type_get_input_t, *ptf_tbl_type_get_input_t; + +/* Output params for table type get */ +typedef struct tf_tbl_type_get_output { + /* Size of the data read in bytes */ + uint16_t size; + /* Data read */ + uint8_t data[TF_BULK_RECV]; +} tf_tbl_type_get_output_t, *ptf_tbl_type_get_output_t; + +/* Input params for EM internal rule insert */ +typedef struct tf_em_internal_insert_input { + /* Firmware Session Id */ + uint32_t fw_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the get apply to RX */ +#define TF_EM_INTERNAL_INSERT_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the get apply to TX */ +#define TF_EM_INTERNAL_INSERT_INPUT_FLAGS_DIR_TX (0x1) + /* strength */ + uint16_t strength; + /* index to action */ + uint32_t action_ptr; + /* index of em record */ + uint32_t em_record_idx; + /* EM Key value */ + uint64_t em_key[8]; + /* number of bits in em_key */ + uint16_t em_key_bitlen; +} tf_em_internal_insert_input_t, *ptf_em_internal_insert_input_t; + +/* Output params for EM internal rule insert */ +typedef struct tf_em_internal_insert_output { + /* EM record pointer index */ + uint16_t rptr_index; + /* EM record offset 0~3 */ + uint8_t rptr_entry; +} tf_em_internal_insert_output_t, *ptf_em_internal_insert_output_t; + +/* Input params for EM INTERNAL rule delete */ +typedef struct tf_em_internal_delete_input { + /* Session Id */ + uint32_t tf_session_id; + /* flags */ + uint16_t flags; + /* When set to 0, indicates the get apply to RX */ +#define TF_EM_INTERNAL_DELETE_INPUT_FLAGS_DIR_RX (0x0) + /* When set to 1, indicates the get apply to TX */ +#define TF_EM_INTERNAL_DELETE_INPUT_FLAGS_DIR_TX (0x1) + /* EM internal flow hanndle */ + uint64_t flow_handle; + /* EM Key value */ + uint64_t em_key[8]; + /* number of bits in em_key */ + uint16_t em_key_bitlen; +} tf_em_internal_delete_input_t, *ptf_em_internal_delete_input_t; + +#endif /* _HWRM_TF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/lookup3.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/lookup3.h new file mode 100644 index 000000000..e5abcc2f2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/lookup3.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Based on lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * http://www.burtleburtle.net/bob/c/lookup3.c + * + * These functions for producing 32-bit hashes for has table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It is in + * the public domain. It has no warranty. + */ + +#ifndef _LOOKUP3_H_ +#define _LOOKUP3_H_ + +#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k)))) + +/** ------------------------------------------------------------------------- + * This is reversible, so any information in (a,b,c) before mix() is + * still in (a,b,c) after mix(). + * + * If four pairs of (a,b,c) inputs are run through mix(), or through + * mix() in reverse, there are at least 32 bits of the output that + * are sometimes the same for one pair and different for another pair. + * This was tested for: + * pairs that differed by one bit, by two bits, in any combination + * of top bits of (a,b,c), or in any combination of bottom bits of + * (a,b,c). + * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + * is commonly produced by subtraction) look like a single 1-bit + * difference. + * the base values were pseudorandom, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that + * satisfy this are + * 4 6 8 16 19 4 + * 9 15 3 18 27 15 + * 14 9 3 7 17 3 + * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing + * for "differ" defined as + with a one-bit base and a two-bit delta. I + * used http://burtleburtle.net/bob/hash/avalanche.html to choose + * the operations, constants, and arrangements of the variables. + * + * This does not achieve avalanche. There are input bits of (a,b,c) + * that fail to affect some output bits of (a,b,c), especially of a. The + * most thoroughly mixed value is c, but it doesn't really even achieve + * avalanche in c. + * + * This allows some parallelism. Read-after-writes are good at doubling + * the number of bits affected, so the goal of mixing pulls in the opposite + * direction as the goal of parallelism. I did what I could. Rotates + * seem to cost as much as shifts on every machine I could lay my hands + * on, and rotates are much kinder to the top and bottom bits, so I used + * rotates. + * -------------------------------------------------------------------------- + */ +#define mix(a, b, c) \ +{ \ + (a) -= (c); (a) ^= rot((c), 4); (c) += b; \ + (b) -= (a); (b) ^= rot((a), 6); (a) += c; \ + (c) -= (b); (c) ^= rot((b), 8); (b) += a; \ + (a) -= (c); (a) ^= rot((c), 16); (c) += b; \ + (b) -= (a); (b) ^= rot((a), 19); (a) += c; \ + (c) -= (b); (c) ^= rot((b), 4); (b) += a; \ +} + +/** -------------------------------------------------------------------------- + * final -- final mixing of 3 32-bit values (a,b,c) into c + * + * Pairs of (a,b,c) values differing in only a few bits will usually + * produce values of c that look totally different. This was tested for + * pairs that differed by one bit, by two bits, in any combination + * of top bits of (a,b,c), or in any combination of bottom bits of + * (a,b,c). + * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + * is commonly produced by subtraction) look like a single 1-bit + * difference. + * the base values were pseudorandom, all zero but one bit set, or + * all zero plus a counter that starts at zero. + * + * These constants passed: + * 14 11 25 16 4 14 24 + * 12 14 25 16 4 14 24 + * and these came close: + * 4 8 15 26 3 22 24 + * 10 8 15 26 3 22 24 + * 11 8 15 26 3 22 24 + * -------------------------------------------------------------------------- + */ +#define final(a, b, c) \ +{ \ + (c) ^= (b); (c) -= rot((b), 14); \ + (a) ^= (c); (a) -= rot((c), 11); \ + (b) ^= (a); (b) -= rot((a), 25); \ + (c) ^= (b); (c) -= rot((b), 16); \ + (a) ^= (c); (a) -= rot((c), 4); \ + (b) ^= (a); (b) -= rot((a), 14); \ + (c) ^= (b); (c) -= rot((b), 24); \ +} + +/** -------------------------------------------------------------------- + * This works on all machines. To be useful, it requires + * -- that the key be an array of uint32_t's, and + * -- that the length be the number of uint32_t's in the key + + * The function hashword() is identical to hashlittle() on little-endian + * machines, and identical to hashbig() on big-endian machines, + * except that the length has to be measured in uint32_ts rather than in + * bytes. hashlittle() is more complicated than hashword() only because + * hashlittle() has to dance around fitting the key bytes into registers. + * + * Input Parameters: + * key: an array of uint32_t values + * length: the length of the key, in uint32_ts + * initval: the previous hash, or an arbitrary value + * -------------------------------------------------------------------- + */ +static inline uint32_t hashword(const uint32_t *k, + size_t length, + uint32_t initval) { + uint32_t a, b, c; + int index = 12; + + /* Set up the internal state */ + a = 0xdeadbeef + (((uint32_t)length) << 2) + initval; + b = a; + c = a; + + /*-------------------------------------------- handle most of the key */ + while (length > 3) { + a += k[index]; + b += k[index - 1]; + c += k[index - 2]; + mix(a, b, c); + length -= 3; + index -= 3; + } + + /*-------------------------------------- handle the last 3 uint32_t's */ + switch (length) { /* all the case statements fall through */ + case 3: + c += k[index - 2]; + /* Falls through. */ + case 2: + b += k[index - 1]; + /* Falls through. */ + case 1: + a += k[index]; + final(a, b, c); + /* Falls through. */ + case 0: /* case 0: nothing left to add */ + /* FALLTHROUGH */ + break; + } + /*------------------------------------------------- report the result */ + return c; +} + +#endif /* _LOOKUP3_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.c new file mode 100644 index 000000000..32028df90 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +/* Random Number Functions */ + +#include +#include +#include "rand.h" + +#define TF_RAND_LFSR_INIT_VALUE 0xACE1u + +uint16_t lfsr = TF_RAND_LFSR_INIT_VALUE; +uint32_t bit; + +/** + * Generates a 16 bit pseudo random number + * + * Returns: + * uint16_t number + */ +uint16_t rand16(void) +{ + bit = ((lfsr >> 0) ^ (lfsr >> 2) ^ (lfsr >> 3) ^ (lfsr >> 5)) & 1; + return lfsr = (lfsr >> 1) | (bit << 15); +} + +/** + * Generates a 32 bit pseudo random number + * + * Returns: + * uint32_t number + */ +uint32_t rand32(void) +{ + return (rand16() << 16) | rand16(); +} + +/** + * Resets the seed used by the pseudo random number generator + */ +void rand_init(void) +{ + lfsr = TF_RAND_LFSR_INIT_VALUE; + bit = 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.h new file mode 100644 index 000000000..31cd76e8b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/rand.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +/* Random Number Functions */ +#ifndef __RAND_H__ +#define __RAND_H__ + +/** + * Generates a 16 bit pseudo random number + * + * Returns: + * uint16_t number + * + */ +uint16_t rand16(void); + +/** + * Generates a 32 bit pseudo random number + * + * Returns: + * uint32_t number + * + */ +uint32_t rand32(void); + +/** + * Resets the seed used by the pseudo random number generator + * + * Returns: + * + */ +void rand_init(void); + +#endif /* __RAND_H__ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.c new file mode 100644 index 000000000..9cfbd244f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.c @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "stack.h" + +#define STACK_EMPTY -1 + +/* Initialize stack + */ +int +stack_init(int num_entries, uint32_t *items, struct stack *st) +{ + if (items == NULL || st == NULL) + return -EINVAL; + + st->max = num_entries; + st->top = STACK_EMPTY; + st->items = items; + + return 0; +} + +/* Return the size of the stack + */ +int32_t +stack_size(struct stack *st) +{ + return st->top + 1; +} + +/* Check if the stack is empty + */ +bool +stack_is_empty(struct stack *st) +{ + return st->top == STACK_EMPTY; +} + +/* Check if the stack is full + */ +bool +stack_is_full(struct stack *st) +{ + return st->top == st->max - 1; +} + +/* Add element x to the stack + */ +int +stack_push(struct stack *st, uint32_t x) +{ + if (stack_is_full(st)) + return -EOVERFLOW; + + /* add an element and increments the top index + */ + st->items[++st->top] = x; + + return 0; +} + +/* Pop top element x from the stack and return + * in user provided location. + */ +int +stack_pop(struct stack *st, uint32_t *x) +{ + if (stack_is_empty(st)) + return -ENOENT; + + *x = st->items[st->top]; + st->top--; + + return 0; +} + +/* Dump the stack + */ +void stack_dump(struct stack *st) +{ + int i, j; + + printf("top=%d\n", st->top); + printf("max=%d\n", st->max); + + if (st->top == -1) { + printf("stack is empty\n"); + return; + } + + for (i = 0; i < st->max + 7 / 8; i++) { + printf("item[%d] 0x%08x", i, st->items[i]); + + for (j = 0; j < 7; j++) { + if (i++ < st->max - 1) + printf(" 0x%08x", st->items[i]); + } + printf("\n"); + } +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.h new file mode 100644 index 000000000..ebd055592 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/stack.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ +#ifndef _STACK_H_ +#define _STACK_H_ + +#include +#include +#include +#include + +/** Stack data structure + */ +struct stack { + int max; /**< Maximum number of entries */ + int top; /**< maximum value in stack */ + uint32_t *items; /**< items in the stack */ +}; + +/** Initialize stack of uint32_t elements + * + * [in] num_entries + * maximum number of elements in the stack + * + * [in] items + * pointer to items (must be sized to (uint32_t * num_entries) + * + * s[in] st + * pointer to the stack structure + * + * return + * 0 for success + */ +int stack_init(int num_entries, + uint32_t *items, + struct stack *st); + +/** Return the size of the stack + * + * [in] st + * pointer to the stack + * + * return + * number of elements + */ +int32_t stack_size(struct stack *st); + +/** Check if the stack is empty + * + * [in] st + * pointer to the stack + * + * return + * true or false + */ +bool stack_is_empty(struct stack *st); + +/** Check if the stack is full + * + * [in] st + * pointer to the stack + * + * return + * true or false + */ +bool stack_is_full(struct stack *st); + +/** Add element x to the stack + * + * [in] st + * pointer to the stack + * + * [in] x + * value to push on the stack + * return + * 0 for success + */ +int stack_push(struct stack *st, uint32_t x); + +/** Pop top element x from the stack and return + * in user provided location. + * + * [in] st + * pointer to the stack + * + * [in, out] x + * pointer to where the value popped will be written + * + * return + * 0 for success + */ +int stack_pop(struct stack *st, uint32_t *x); + +/** Dump stack information + * + * Warning: Don't use for large stacks due to prints + * + * [in] st + * pointer to the stack + * + * return + * none + */ +void stack_dump(struct stack *st); + +#endif /* _STACK_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.c new file mode 100644 index 000000000..cf9f36adb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.c @@ -0,0 +1,656 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include + +#include "tf_core.h" +#include "tf_session.h" +#include "tf_tbl.h" +#include "tf_em.h" +#include "tf_rm.h" +#include "tf_msg.h" +#include "tfp.h" +#include "bitalloc.h" +#include "bnxt.h" +#include "rand.h" + +static inline uint32_t SWAP_WORDS32(uint32_t val32) +{ + return (((val32 & 0x0000ffff) << 16) | + ((val32 & 0xffff0000) >> 16)); +} + +static void tf_seeds_init(struct tf_session *session) +{ + int i; + uint32_t r; + + /* Initialize the lfsr */ + rand_init(); + + /* RX and TX use the same seed values */ + session->lkup_lkup3_init_cfg[TF_DIR_RX] = + session->lkup_lkup3_init_cfg[TF_DIR_TX] = + SWAP_WORDS32(rand32()); + + for (i = 0; i < TF_LKUP_SEED_MEM_SIZE / 2; i++) { + r = SWAP_WORDS32(rand32()); + session->lkup_em_seed_mem[TF_DIR_RX][i * 2] = r; + session->lkup_em_seed_mem[TF_DIR_TX][i * 2] = r; + r = SWAP_WORDS32(rand32()); + session->lkup_em_seed_mem[TF_DIR_RX][i * 2 + 1] = (r & 0x1); + session->lkup_em_seed_mem[TF_DIR_TX][i * 2 + 1] = (r & 0x1); + } +} + +int +tf_open_session(struct tf *tfp, + struct tf_open_session_parms *parms) +{ + int rc; + struct tf_session *session; + struct tfp_calloc_parms alloc_parms; + unsigned int domain, bus, slot, device; + uint8_t fw_session_id; + + if (tfp == NULL || parms == NULL) + return -EINVAL; + + /* Filter out any non-supported device types on the Core + * side. It is assumed that the Firmware will be supported if + * firmware open session succeeds. + */ + if (parms->device_type != TF_DEVICE_TYPE_WH) + return -ENOTSUP; + + /* Build the beginning of session_id */ + rc = sscanf(parms->ctrl_chan_name, + "%x:%x:%x.%d", + &domain, + &bus, + &slot, + &device); + if (rc != 4) { + PMD_DRV_LOG(ERR, + "Failed to scan device ctrl_chan_name\n"); + return -EINVAL; + } + + /* open FW session and get a new session_id */ + rc = tf_msg_session_open(tfp, + parms->ctrl_chan_name, + &fw_session_id); + if (rc) { + /* Log error */ + if (rc == -EEXIST) + PMD_DRV_LOG(ERR, + "Session is already open, rc:%d\n", + rc); + else + PMD_DRV_LOG(ERR, + "Open message send failed, rc:%d\n", + rc); + + parms->session_id.id = TF_FW_SESSION_ID_INVALID; + return rc; + } + + /* Allocate session */ + alloc_parms.nitems = 1; + alloc_parms.size = sizeof(struct tf_session_info); + alloc_parms.alignment = 0; + rc = tfp_calloc(&alloc_parms); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "Failed to allocate session info, rc:%d\n", + rc); + goto cleanup; + } + + tfp->session = (struct tf_session_info *)alloc_parms.mem_va; + + /* Allocate core data for the session */ + alloc_parms.nitems = 1; + alloc_parms.size = sizeof(struct tf_session); + alloc_parms.alignment = 0; + rc = tfp_calloc(&alloc_parms); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "Failed to allocate session data, rc:%d\n", + rc); + goto cleanup; + } + + tfp->session->core_data = alloc_parms.mem_va; + + session = (struct tf_session *)tfp->session->core_data; + tfp_memcpy(session->ctrl_chan_name, + parms->ctrl_chan_name, + TF_SESSION_NAME_MAX); + + /* Initialize Session */ + session->device_type = parms->device_type; + tf_rm_init(tfp); + + /* Construct the Session ID */ + session->session_id.internal.domain = domain; + session->session_id.internal.bus = bus; + session->session_id.internal.device = device; + session->session_id.internal.fw_session_id = fw_session_id; + + rc = tf_msg_session_qcfg(tfp); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "Query config message send failed, rc:%d\n", + rc); + goto cleanup_close; + } + + /* Shadow DB configuration */ + if (parms->shadow_copy) { + /* Ignore shadow_copy setting */ + session->shadow_copy = 0;/* parms->shadow_copy; */ +#if (TF_SHADOW == 1) + rc = tf_rm_shadow_db_init(tfs); + if (rc) + PMD_DRV_LOG(ERR, + "Shadow DB Initialization failed\n, rc:%d", + rc); + /* Add additional processing */ +#endif /* TF_SHADOW */ + } + + /* Adjust the Session with what firmware allowed us to get */ + rc = tf_rm_allocate_validate(tfp); + if (rc) { + /* Log error */ + goto cleanup_close; + } + + /* Setup hash seeds */ + tf_seeds_init(session); + + session->ref_count++; + + /* Return session ID */ + parms->session_id = session->session_id; + + PMD_DRV_LOG(INFO, + "Session created, session_id:%d\n", + parms->session_id.id); + + PMD_DRV_LOG(INFO, + "domain:%d, bus:%d, device:%d, fw_session_id:%d\n", + parms->session_id.internal.domain, + parms->session_id.internal.bus, + parms->session_id.internal.device, + parms->session_id.internal.fw_session_id); + + return 0; + + cleanup: + tfp_free(tfp->session->core_data); + tfp_free(tfp->session); + tfp->session = NULL; + return rc; + + cleanup_close: + tf_close_session(tfp); + return -EINVAL; +} + +int +tf_attach_session(struct tf *tfp __rte_unused, + struct tf_attach_session_parms *parms __rte_unused) +{ +#if (TF_SHARED == 1) + int rc; + + if (tfp == NULL) + return -EINVAL; + + /* - Open the shared memory for the attach_chan_name + * - Point to the shared session for this Device instance + * - Check that session is valid + * - Attach to the firmware so it can record there is more + * than one client of the session. + */ + + if (tfp->session) { + if (tfp->session->session_id.id != TF_SESSION_ID_INVALID) { + rc = tf_msg_session_attach(tfp, + parms->ctrl_chan_name, + parms->session_id); + } + } +#endif /* TF_SHARED */ + return -1; +} + +int +tf_close_session(struct tf *tfp) +{ + int rc; + int rc_close = 0; + struct tf_session *tfs; + union tf_session_id session_id; + + if (tfp == NULL || tfp->session == NULL) + return -EINVAL; + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* Cleanup if we're last user of the session */ + if (tfs->ref_count == 1) { + /* Cleanup any outstanding resources */ + rc_close = tf_rm_close(tfp); + } + + if (tfs->session_id.id != TF_SESSION_ID_INVALID) { + rc = tf_msg_session_close(tfp); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "Message send failed, rc:%d\n", + rc); + } + + /* Update the ref_count */ + tfs->ref_count--; + } + + session_id = tfs->session_id; + + /* Final cleanup as we're last user of the session */ + if (tfs->ref_count == 0) { + tfp_free(tfp->session->core_data); + tfp_free(tfp->session); + tfp->session = NULL; + } + + PMD_DRV_LOG(INFO, + "Session closed, session_id:%d\n", + session_id.id); + + PMD_DRV_LOG(INFO, + "domain:%d, bus:%d, device:%d, fw_session_id:%d\n", + session_id.internal.domain, + session_id.internal.bus, + session_id.internal.device, + session_id.internal.fw_session_id); + + return rc_close; +} + +/** insert EM hash entry API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_insert_em_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms) +{ + struct tf_tbl_scope_cb *tbl_scope_cb; + + if (tfp == NULL || parms == NULL) + return -EINVAL; + + tbl_scope_cb = + tbl_scope_cb_find((struct tf_session *)tfp->session->core_data, + parms->tbl_scope_id); + if (tbl_scope_cb == NULL) + return -EINVAL; + + /* Process the EM entry per Table Scope type */ + return tf_insert_eem_entry((struct tf_session *)tfp->session->core_data, + tbl_scope_cb, + parms); +} + +/** Delete EM hash entry API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_delete_em_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms) +{ + struct tf_tbl_scope_cb *tbl_scope_cb; + + if (tfp == NULL || parms == NULL) + return -EINVAL; + + tbl_scope_cb = + tbl_scope_cb_find((struct tf_session *)tfp->session->core_data, + parms->tbl_scope_id); + if (tbl_scope_cb == NULL) + return -EINVAL; + + return tf_delete_eem_entry(tfp, parms); +} + +/** allocate identifier resource + * + * Returns success or failure code. + */ +int tf_alloc_identifier(struct tf *tfp, + struct tf_alloc_identifier_parms *parms) +{ + struct bitalloc *session_pool; + struct tf_session *tfs; + int id; + int rc; + + if (parms == NULL || tfp == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, "%s: session error\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + switch (parms->ident_type) { + case TF_IDENT_TYPE_L2_CTXT: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_L2_CTXT_REMAP_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_PROF_FUNC: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_PROF_FUNC_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_EM_PROF: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_EM_PROF_ID_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_WC_PROF: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_WC_TCAM_PROF_ID_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_L2_FUNC: + PMD_DRV_LOG(ERR, "%s: unsupported %s\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + rc = -EOPNOTSUPP; + break; + default: + PMD_DRV_LOG(ERR, "%s: %s\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + rc = -EINVAL; + break; + } + + if (rc) { + PMD_DRV_LOG(ERR, "%s: identifier pool %s failure\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + return rc; + } + + id = ba_alloc(session_pool); + + if (id == BA_FAIL) { + PMD_DRV_LOG(ERR, "%s: %s: No resource available\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + return -ENOMEM; + } + parms->id = id; + return 0; +} + +/** free identifier resource + * + * Returns success or failure code. + */ +int tf_free_identifier(struct tf *tfp, + struct tf_free_identifier_parms *parms) +{ + struct bitalloc *session_pool; + int rc; + int ba_rc; + struct tf_session *tfs; + + if (parms == NULL || tfp == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, "%s: Session error\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + switch (parms->ident_type) { + case TF_IDENT_TYPE_L2_CTXT: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_L2_CTXT_REMAP_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_PROF_FUNC: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_PROF_FUNC_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_EM_PROF: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_EM_PROF_ID_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_WC_PROF: + TF_RM_GET_POOLS(tfs, parms->dir, &session_pool, + TF_WC_TCAM_PROF_ID_POOL_NAME, + rc); + break; + case TF_IDENT_TYPE_L2_FUNC: + PMD_DRV_LOG(ERR, "%s: unsupported %s\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + rc = -EOPNOTSUPP; + break; + default: + PMD_DRV_LOG(ERR, "%s: invalid %s\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + rc = -EINVAL; + break; + } + if (rc) { + PMD_DRV_LOG(ERR, "%s: %s Identifier pool access failed\n", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type)); + return rc; + } + + ba_rc = ba_inuse(session_pool, (int)parms->id); + + if (ba_rc == BA_FAIL || ba_rc == BA_ENTRY_FREE) { + PMD_DRV_LOG(ERR, "%s: %s: Entry %d already free", + tf_dir_2_str(parms->dir), + tf_ident_2_str(parms->ident_type), + parms->id); + return -EINVAL; + } + + ba_free(session_pool, (int)parms->id); + + return 0; +} + +int +tf_alloc_tcam_entry(struct tf *tfp, + struct tf_alloc_tcam_entry_parms *parms) +{ + int rc; + int index; + struct tf_session *tfs; + struct bitalloc *session_pool; + + if (parms == NULL || tfp == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, "%s: session error\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + rc = tf_rm_lookup_tcam_type_pool(tfs, + parms->dir, + parms->tcam_tbl_type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tcam_type_pool */ + if (rc) + return rc; + + index = ba_alloc(session_pool); + if (index == BA_FAIL) { + PMD_DRV_LOG(ERR, "%s: %s: No resource available\n", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->tcam_tbl_type)); + return -ENOMEM; + } + + parms->idx = index; + return 0; +} + +int +tf_set_tcam_entry(struct tf *tfp, + struct tf_set_tcam_entry_parms *parms) +{ + int rc; + int id; + struct tf_session *tfs; + struct bitalloc *session_pool; + + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "%s, Session info invalid\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* + * Each tcam send msg function should check for key sizes range + */ + + rc = tf_rm_lookup_tcam_type_pool(tfs, + parms->dir, + parms->tcam_tbl_type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tcam_type_pool */ + if (rc) + return rc; + + + /* Verify that the entry has been previously allocated */ + id = ba_inuse(session_pool, parms->idx); + if (id != 1) { + PMD_DRV_LOG(ERR, + "%s: %s: Invalid or not allocated index, idx:%d\n", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->tcam_tbl_type), + parms->idx); + return -EINVAL; + } + + rc = tf_msg_tcam_entry_set(tfp, parms); + + return rc; +} + +int +tf_get_tcam_entry(struct tf *tfp __rte_unused, + struct tf_get_tcam_entry_parms *parms __rte_unused) +{ + int rc = -EOPNOTSUPP; + + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "%s, Session info invalid\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + return rc; +} + +int +tf_free_tcam_entry(struct tf *tfp, + struct tf_free_tcam_entry_parms *parms) +{ + int rc; + struct tf_session *tfs; + struct bitalloc *session_pool; + + if (parms == NULL || tfp == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, "%s: Session error\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + rc = tf_rm_lookup_tcam_type_pool(tfs, + parms->dir, + parms->tcam_tbl_type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tcam_type_pool */ + if (rc) + return rc; + + rc = ba_inuse(session_pool, (int)parms->idx); + if (rc == BA_FAIL || rc == BA_ENTRY_FREE) { + PMD_DRV_LOG(ERR, "%s: %s: Entry %d already free", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->tcam_tbl_type), + parms->idx); + return -EINVAL; + } + + ba_free(session_pool, (int)parms->idx); + + rc = tf_msg_tcam_entry_free(tfp, parms); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, "%s: %s: Entry %d free failed", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->tcam_tbl_type), + parms->idx); + } + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.h new file mode 100644 index 000000000..1eedd80e7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_core.h @@ -0,0 +1,1385 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_CORE_H_ +#define _TF_CORE_H_ + +#include +#include +#include +#include + +#include "tf_project.h" + +/** + * @file + * + * Truflow Core API Header File + */ + +/********** BEGIN Truflow Core DEFINITIONS **********/ + + +#define TF_KILOBYTE 1024 +#define TF_MEGABYTE (1024 * 1024) + +/** + * direction + */ +enum tf_dir { + TF_DIR_RX, /**< Receive */ + TF_DIR_TX, /**< Transmit */ + TF_DIR_MAX +}; + +/** + * memory choice + */ +enum tf_mem { + TF_MEM_INTERNAL, /**< Internal */ + TF_MEM_EXTERNAL, /**< External */ + TF_MEM_MAX +}; + +/** + * The size of the external action record (Wh+/Brd2) + * + * Currently set to 512. + * + * AR (16B) + encap (256B) + stats_ptrs (8) + resvd (8) + * + stats (16) = 304 aligned on a 16B boundary + * + * Theoretically, the size should be smaller. ~304B + */ +#define TF_ACTION_RECORD_SZ 512 + +/** + * External pool size + * + * Defines a single pool of external action records of + * fixed size. Currently, this is an index. + */ +#define TF_EXT_POOL_ENTRY_SZ_BYTES 1 + +/** + * External pool entry count + * + * Defines the number of entries in the external action pool + */ +#define TF_EXT_POOL_ENTRY_CNT (1 * 1024) + +/** + * Number of external pools + */ +#define TF_EXT_POOL_CNT_MAX 1 + +/** + * External pool Id + */ +#define TF_EXT_POOL_0 0 /**< matches TF_TBL_TYPE_EXT */ +#define TF_EXT_POOL_1 1 /**< matches TF_TBL_TYPE_EXT_0 */ + +/** EEM record AR helper + * + * Helper to handle the Action Record Pointer in the EEM Record Entry. + * + * Convert absolute offset to action record pointer in EEM record entry + * Convert action record pointer in EEM record entry to absolute offset + */ +#define TF_ACT_REC_OFFSET_2_PTR(offset) ((offset) >> 4) +#define TF_ACT_REC_PTR_2_OFFSET(offset) ((offset) << 4) + +/* + * Helper Macros + */ +#define TF_BITS_2_BYTES(num_bits) (((num_bits) + 7) / 8) + +/********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/ + +/** + * @page general General + * + * @ref tf_open_session + * + * @ref tf_attach_session + * + * @ref tf_close_session + */ + + +/** Session Version defines + * + * The version controls the format of the tf_session and + * tf_session_info structure. This is to assure upgrade between + * versions can be supported. + */ +#define TF_SESSION_VER_MAJOR 1 /**< Major Version */ +#define TF_SESSION_VER_MINOR 0 /**< Minor Version */ +#define TF_SESSION_VER_UPDATE 0 /**< Update Version */ + +/** Session Name + * + * Name of the TruFlow control channel interface. Expects + * format to be RTE Name specific, i.e. rte_eth_dev_get_name_by_port() + */ +#define TF_SESSION_NAME_MAX 64 + +#define TF_FW_SESSION_ID_INVALID 0xFF /**< Invalid FW Session ID define */ + +/** Session Identifier + * + * Unique session identifier which includes PCIe bus info to + * distinguish the PF and session info to identify the associated + * TruFlow session. Session ID is constructed from the passed in + * ctrl_chan_name in tf_open_session() together with an allocated + * fw_session_id. Done by TruFlow on tf_open_session(). + */ +union tf_session_id { + uint32_t id; + struct { + uint8_t domain; + uint8_t bus; + uint8_t device; + uint8_t fw_session_id; + } internal; +}; + +/** Session Version + * + * The version controls the format of the tf_session and + * tf_session_info structure. This is to assure upgrade between + * versions can be supported. + * + * Please see the TF_VER_MAJOR/MINOR and UPDATE defines. + */ +struct tf_session_version { + uint8_t major; + uint8_t minor; + uint8_t update; +}; + +/** Session supported device types + * + */ +enum tf_device_type { + TF_DEVICE_TYPE_WH = 0, /**< Whitney+ */ + TF_DEVICE_TYPE_BRD2, /**< TBD */ + TF_DEVICE_TYPE_BRD3, /**< TBD */ + TF_DEVICE_TYPE_BRD4, /**< TBD */ + TF_DEVICE_TYPE_MAX /**< Maximum */ +}; + +/** TruFlow Session Information + * + * Structure defining a TruFlow Session, also known as a Management + * session. This structure is initialized at time of + * tf_open_session(). It is passed to all of the TruFlow APIs as way + * to prescribe and isolate resources between different TruFlow ULP + * Applications. + */ +struct tf_session_info { + /** + * TrueFlow Version. Used to control the structure layout when + * sharing sessions. No guarantee that a secondary process + * would come from the same version of an executable. + * TruFlow initializes this variable on tf_open_session(). + * + * Owner: TruFlow + * Access: TruFlow + */ + struct tf_session_version ver; + /** + * will be STAILQ_ENTRY(tf_session_info) next + * + * Owner: ULP + * Access: ULP + */ + void *next; + /** + * Session ID is a unique identifier for the session. TruFlow + * initializes this variable during tf_open_session() + * processing. + * + * Owner: TruFlow + * Access: Truflow & ULP + */ + union tf_session_id session_id; + /** + * Protects access to core_data. Lock is initialized and owned + * by ULP. TruFlow can access the core_data without checking + * the lock. + * + * Owner: ULP + * Access: ULP + */ + uint8_t spin_lock; + /** + * The core_data holds the TruFlow tf_session data + * structure. This memory is allocated and owned by TruFlow on + * tf_open_session(). + * + * TruFlow uses this memory for session management control + * until the session is closed by ULP. Access control is done + * by the spin_lock which ULP controls ahead of TruFlow API + * calls. + * + * Please see tf_open_session_parms for specification details + * on this variable. + * + * Owner: TruFlow + * Access: TruFlow + */ + void *core_data; + /** + * The core_data_sz_bytes specifies the size of core_data in + * bytes. + * + * The size is set by TruFlow on tf_open_session(). + * + * Please see tf_open_session_parms for specification details + * on this variable. + * + * Owner: TruFlow + * Access: TruFlow + */ + uint32_t core_data_sz_bytes; +}; + +/** TruFlow handle + * + * Contains a pointer to the session info. Allocated by ULP and passed + * to TruFlow using tf_open_session(). TruFlow will populate the + * session info at that time. Additional 'opens' can be done using + * same session_info by using tf_attach_session(). + * + * It is expected that ULP allocates this memory as shared memory. + * + * NOTE: This struct must be within the BNXT PMD struct bnxt + * (bp). This allows use of container_of() to get access to the PMD. + */ +struct tf { + struct tf_session_info *session; +}; + + +/** + * tf_open_session parameters definition. + */ +struct tf_open_session_parms { + /** [in] ctrl_chan_name + * + * String containing name of control channel interface to be + * used for this session to communicate with firmware. + * + * The ctrl_chan_name can be looked up by using + * rte_eth_dev_get_name_by_port() within the ULP. + * + * ctrl_chan_name will be used as part of a name for any + * shared memory allocation. + */ + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + /** [in] shadow_copy + * + * Boolean controlling the use and availability of shadow + * copy. Shadow copy will allow the TruFlow to keep track of + * resource content on the firmware side without having to + * query firmware. Additional private session core_data will + * be allocated if this boolean is set to 'true', default + * 'false'. + * + * Size of memory depends on the NVM Resource settings for the + * control channel. + */ + bool shadow_copy; + /** [in/out] session_id + * + * Session_id is unique per session. + * + * Session_id is composed of domain, bus, device and + * fw_session_id. The construction is done by parsing the + * ctrl_chan_name together with allocation of a fw_session_id. + * + * The session_id allows a session to be shared between devices. + */ + union tf_session_id session_id; + /** [in] device type + * + * Device type is passed, one of Wh+, Brd2, Brd3, Brd4 + */ + enum tf_device_type device_type; +}; + +/** + * Opens a new TruFlow management session. + * + * TruFlow will allocate session specific memory, shared memory, to + * hold its session data. This data is private to TruFlow. + * + * Multiple PFs can share the same session. An association, refcount, + * between session and PFs is maintained within TruFlow. Thus, a PF + * can attach to an existing session, see tf_attach_session(). + * + * No other TruFlow APIs will succeed unless this API is first called and + * succeeds. + * + * tf_open_session() returns a session id that can be used on attach. + * + * [in] tfp + * Pointer to TF handle + * [in] parms + * Pointer to open parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_open_session(struct tf *tfp, + struct tf_open_session_parms *parms); + +struct tf_attach_session_parms { + /** [in] ctrl_chan_name + * + * String containing name of control channel interface to be + * used for this session to communicate with firmware. + * + * The ctrl_chan_name can be looked up by using + * rte_eth_dev_get_name_by_port() within the ULP. + * + * ctrl_chan_name will be used as part of a name for any + * shared memory allocation. + */ + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + + /** [in] attach_chan_name + * + * String containing name of attach channel interface to be + * used for this session. + * + * The attach_chan_name must be given to a 2nd process after + * the primary process has been created. This is the + * ctrl_chan_name of the primary process and is used to find + * the shared memory for the session that the attach is going + * to use. + */ + char attach_chan_name[TF_SESSION_NAME_MAX]; + + /** [in] session_id + * + * Session_id is unique per session. For Attach the session_id + * should be the session_id that was returned on the first + * open. + * + * Session_id is composed of domain, bus, device and + * fw_session_id. The construction is done by parsing the + * ctrl_chan_name together with allocation of a fw_session_id + * during tf_open_session(). + * + * A reference count will be incremented on attach. A session + * is first fully closed when reference count is zero by + * calling tf_close_session(). + */ + union tf_session_id session_id; +}; + +/** + * Attaches to an existing session. Used when more than one PF wants + * to share a single session. In that case all TruFlow management + * traffic will be sent to the TruFlow firmware using the 'PF' that + * did the attach not the session ctrl channel. + * + * Attach will increment a ref count as to manage the shared session data. + * + * [in] tfp, pointer to TF handle + * [in] parms, pointer to attach parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_attach_session(struct tf *tfp, + struct tf_attach_session_parms *parms); + +/** + * Closes an existing session. Cleans up all hardware and firmware + * state associated with the TruFlow application session when the last + * PF associated with the session results in refcount to be zero. + * + * Returns success or failure code. + */ +int tf_close_session(struct tf *tfp); + +/** + * @page ident Identity Management + * + * @ref tf_alloc_identifier + * + * @ref tf_free_identifier + */ +enum tf_identifier_type { + /** The L2 Context is returned from the L2 Ctxt TCAM lookup + * and can be used in WC TCAM or EM keys to virtualize further + * lookups. + */ + TF_IDENT_TYPE_L2_CTXT, + /** The WC profile func is returned from the L2 Ctxt TCAM lookup + * to enable virtualization of the profile TCAM. + */ + TF_IDENT_TYPE_PROF_FUNC, + /** The WC profile ID is included in the WC lookup key + * to enable virtualization of the WC TCAM hardware. + */ + TF_IDENT_TYPE_WC_PROF, + /** The EM profile ID is included in the EM lookup key + * to enable virtualization of the EM hardware. (not required for Brd4 + * as it has table scope) + */ + TF_IDENT_TYPE_EM_PROF, + /** The L2 func is included in the ILT result and from recycling to + * enable virtualization of further lookups. + */ + TF_IDENT_TYPE_L2_FUNC +}; + +/** tf_alloc_identifier parameter definition + */ +struct tf_alloc_identifier_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Identifier type + */ + enum tf_identifier_type ident_type; + /** + * [out] Identifier allocated + */ + uint16_t id; +}; + +/** tf_free_identifier parameter definition + */ +struct tf_free_identifier_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Identifier type + */ + enum tf_identifier_type ident_type; + /** + * [in] ID to free + */ + uint16_t id; +}; + +/** allocate identifier resource + * + * TruFlow core will allocate a free id from the per identifier resource type + * pool reserved for the session during tf_open(). No firmware is involved. + * + * Returns success or failure code. + */ +int tf_alloc_identifier(struct tf *tfp, + struct tf_alloc_identifier_parms *parms); + +/** free identifier resource + * + * TruFlow core will return an id back to the per identifier resource type pool + * reserved for the session. No firmware is involved. During tf_close, the + * complete pool is returned to the firmware. + * + * Returns success or failure code. + */ +int tf_free_identifier(struct tf *tfp, + struct tf_free_identifier_parms *parms); + +/** + * @page dram_table DRAM Table Scope Interface + * + * @ref tf_alloc_tbl_scope + * + * @ref tf_free_tbl_scope + * + * If we allocate the EEM memory from the core, we need to store it in + * the shared session data structure to make sure it can be freed later. + * (for example if the PF goes away) + * + * Current thought is that memory is allocated within core. + */ + + +/** tf_alloc_tbl_scope_parms definition + */ +struct tf_alloc_tbl_scope_parms { + /** + * [in] All Maximum key size required. + */ + uint16_t rx_max_key_sz_in_bits; + /** + * [in] Maximum Action size required (includes inlined items) + */ + uint16_t rx_max_action_entry_sz_in_bits; + /** + * [in] Memory size in Megabytes + * Total memory size allocated by user to be divided + * up for actions, hash, counters. Only inline external actions. + * Use this variable or the number of flows, do not set both. + */ + uint32_t rx_mem_size_in_mb; + /** + * [in] Number of flows * 1000. If set, rx_mem_size_in_mb must equal 0. + */ + uint32_t rx_num_flows_in_k; + /** + * [in] Brd4 only receive table access interface id + */ + uint32_t rx_tbl_if_id; + /** + * [in] All Maximum key size required. + */ + uint16_t tx_max_key_sz_in_bits; + /** + * [in] Maximum Action size required (includes inlined items) + */ + uint16_t tx_max_action_entry_sz_in_bits; + /** + * [in] Memory size in Megabytes + * Total memory size allocated by user to be divided + * up for actions, hash, counters. Only inline external actions. + */ + uint32_t tx_mem_size_in_mb; + /** + * [in] Number of flows * 1000 + */ + uint32_t tx_num_flows_in_k; + /** + * [in] Brd4 only receive table access interface id + */ + uint32_t tx_tbl_if_id; + /** + * [in] Flush pending HW cached flows every 1/10th of value + * set in seconds, both idle and active flows are flushed + * from the HW cache. If set to 0, this feature will be disabled. + */ + uint8_t hw_flow_cache_flush_timer; + /** + * [out] table scope identifier + */ + uint32_t tbl_scope_id; +}; + +struct tf_free_tbl_scope_parms { + /** + * [in] table scope identifier + */ + uint32_t tbl_scope_id; +}; + +/** + * allocate a table scope + * + * On Brd4 Firmware will allocate a scope ID. On other devices, the scope + * is a software construct to identify an EEM table. This function will + * divide the hash memory/buckets and records according to the device + * device constraints based upon calculations using either the number of flows + * requested or the size of memory indicated. Other parameters passed in + * determine the configuration (maximum key size, maximum external action record + * size. + * + * This API will allocate the table region in + * DRAM, program the PTU page table entries, and program the number of static + * buckets (if Brd4) in the RX and TX CFAs. Buckets are assumed to start at + * 0 in the EM memory for the scope. Upon successful completion of this API, + * hash tables are fully initialized and ready for entries to be inserted. + * + * A single API is used to allocate a common table scope identifier in both + * receive and transmit CFA. The scope identifier is common due to nature of + * connection tracking sending notifications between RX and TX direction. + * + * The receive and transmit table access identifiers specify which rings will + * be used to initialize table DRAM. The application must ensure mutual + * exclusivity of ring usage for table scope allocation and any table update + * operations. + * + * The hash table buckets, EM keys, and EM lookup results are stored in the + * memory allocated based on the rx_em_hash_mb/tx_em_hash_mb parameters. The + * hash table buckets are stored at the beginning of that memory. + * + * NOTE: No EM internal setup is done here. On chip EM records are managed + * internally by TruFlow core. + * + * Returns success or failure code. + */ +int tf_alloc_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms); + + +/** + * free a table scope + * + * Firmware checks that the table scope ID is owned by the TruFlow + * session, verifies that no references to this table scope remains + * (Brd4 ILT) or Profile TCAM entries for either CFA (RX/TX) direction, + * then frees the table scope ID. + * + * Returns success or failure code. + */ +int tf_free_tbl_scope(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms); + +/** + * TCAM table type + */ +enum tf_tcam_tbl_type { + TF_TCAM_TBL_TYPE_L2_CTXT_TCAM, + TF_TCAM_TBL_TYPE_PROF_TCAM, + TF_TCAM_TBL_TYPE_WC_TCAM, + TF_TCAM_TBL_TYPE_SP_TCAM, + TF_TCAM_TBL_TYPE_CT_RULE_TCAM, + TF_TCAM_TBL_TYPE_VEB_TCAM, + TF_TCAM_TBL_TYPE_MAX + +}; + +/** + * @page tcam TCAM Access + * + * @ref tf_alloc_tcam_entry + * + * @ref tf_set_tcam_entry + * + * @ref tf_get_tcam_entry + * + * @ref tf_free_tcam_entry + */ + +/** tf_alloc_tcam_entry parameter definition + */ +struct tf_alloc_tcam_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] TCAM table type + */ + enum tf_tcam_tbl_type tcam_tbl_type; + /** + * [in] Enable search for matching entry + */ + uint8_t search_enable; + /** + * [in] Key data to match on (if search) + */ + uint8_t *key; + /** + * [in] key size in bits (if search) + */ + uint16_t key_sz_in_bits; + /** + * [in] Mask data to match on (if search) + */ + uint8_t *mask; + /** + * [in] Priority of entry requested (definition TBD) + */ + uint32_t priority; + /** + * [out] If search, set if matching entry found + */ + uint8_t hit; + /** + * [out] Current refcnt after allocation + */ + uint16_t ref_cnt; + /** + * [out] Idx allocated + * + */ + uint16_t idx; +}; + +/** allocate TCAM entry + * + * Allocate a TCAM entry - one of these types: + * + * L2 Context + * Profile TCAM + * WC TCAM + * VEB TCAM + * + * This function allocates a TCAM table record. This function + * will attempt to allocate a TCAM table entry from the session + * owned TCAM entries or search a shadow copy of the TCAM table for a + * matching entry if search is enabled. Key, mask and result must match for + * hit to be set. Only TruFlow core data is accessed. + * A hash table to entry mapping is maintained for search purposes. If + * search is not enabled, the first available free entry is returned based + * on priority and alloc_cnt is set to 1. If search is enabled and a matching + * entry to entry_data is found, hit is set to TRUE and alloc_cnt is set to 1. + * RefCnt is also returned. + * + * Also returns success or failure code. + */ +int tf_alloc_tcam_entry(struct tf *tfp, + struct tf_alloc_tcam_entry_parms *parms); + +/** tf_set_tcam_entry parameter definition + */ +struct tf_set_tcam_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] TCAM table type + */ + enum tf_tcam_tbl_type tcam_tbl_type; + /** + * [in] base index of the entry to program + */ + uint16_t idx; + /** + * [in] struct containing key + */ + uint8_t *key; + /** + * [in] struct containing mask fields + */ + uint8_t *mask; + /** + * [in] key size in bits (if search) + */ + uint16_t key_sz_in_bits; + /** + * [in] struct containing result + */ + uint8_t *result; + /** + * [in] struct containing result size in bits + */ + uint16_t result_sz_in_bits; +}; + +/** set TCAM entry + * + * Program a TCAM table entry for a TruFlow session. + * + * If the entry has not been allocated, an error will be returned. + * + * Returns success or failure code. + */ +int tf_set_tcam_entry(struct tf *tfp, + struct tf_set_tcam_entry_parms *parms); + +/** tf_get_tcam_entry parameter definition + */ +struct tf_get_tcam_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] TCAM table type + */ + enum tf_tcam_tbl_type tcam_tbl_type; + /** + * [in] index of the entry to get + */ + uint16_t idx; + /** + * [out] struct containing key + */ + uint8_t *key; + /** + * [out] struct containing mask fields + */ + uint8_t *mask; + /** + * [out] key size in bits + */ + uint16_t key_sz_in_bits; + /** + * [out] struct containing result + */ + uint8_t *result; + /** + * [out] struct containing result size in bits + */ + uint16_t result_sz_in_bits; +}; + +/** get TCAM entry + * + * Program a TCAM table entry for a TruFlow session. + * + * If the entry has not been allocated, an error will be returned. + * + * Returns success or failure code. + */ +int tf_get_tcam_entry(struct tf *tfp, + struct tf_get_tcam_entry_parms *parms); + +/** tf_free_tcam_entry parameter definition + */ +struct tf_free_tcam_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] TCAM table type + */ + enum tf_tcam_tbl_type tcam_tbl_type; + /** + * [in] Index to free + */ + uint16_t idx; + /** + * [out] reference count after free + */ + uint16_t ref_cnt; +}; + +/** free TCAM entry + * + * Free TCAM entry. + * + * Firmware checks to ensure the TCAM entries are owned by the TruFlow + * session. TCAM entry will be invalidated. All-ones mask. + * writes to hw. + * + * WCTCAM profile id of 0 must be used to invalidate an entry. + * + * Returns success or failure code. + */ +int tf_free_tcam_entry(struct tf *tfp, + struct tf_free_tcam_entry_parms *parms); + +/** + * @page table Table Access + * + * @ref tf_alloc_tbl_entry + * + * @ref tf_free_tbl_entry + * + * @ref tf_set_tbl_entry + * + * @ref tf_get_tbl_entry + */ + +/** + * Enumeration of TruFlow table types. A table type is used to identify a + * resource object. + * + * NOTE: The table type TF_TBL_TYPE_EXT is unique in that it is + * the only table type that is connected with a table scope. + */ +enum tf_tbl_type { + /** Wh+/Brd2 Action Record */ + TF_TBL_TYPE_FULL_ACT_RECORD, + /** Multicast Groups */ + TF_TBL_TYPE_MCAST_GROUPS, + /** Action Encap 8 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_8B, + /** Action Encap 16 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_16B, + /** Action Encap 64 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_32B, + /** Action Encap 64 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_64B, + /** Action Source Properties SMAC */ + TF_TBL_TYPE_ACT_SP_SMAC, + /** Action Source Properties SMAC IPv4 */ + TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + /** Action Source Properties SMAC IPv6 */ + TF_TBL_TYPE_ACT_SP_SMAC_IPV6, + /** Action Statistics 64 Bits */ + TF_TBL_TYPE_ACT_STATS_64, + /** Action Modify L4 Src Port */ + TF_TBL_TYPE_ACT_MODIFY_SPORT, + /** Action Modify L4 Dest Port */ + TF_TBL_TYPE_ACT_MODIFY_DPORT, + /** Action Modify IPv4 Source */ + TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC, + /** Action _Modify L4 Dest Port */ + TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST, + /** Action Modify IPv6 Source */ + TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC, + /** Action Modify IPv6 Destination */ + TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST, + + /* HW */ + + /** Meter Profiles */ + TF_TBL_TYPE_METER_PROF, + /** Meter Instance */ + TF_TBL_TYPE_METER_INST, + /** Mirror Config */ + TF_TBL_TYPE_MIRROR_CONFIG, + /** UPAR */ + TF_TBL_TYPE_UPAR, + /** Brd4 Epoch 0 table */ + TF_TBL_TYPE_EPOCH0, + /** Brd4 Epoch 1 table */ + TF_TBL_TYPE_EPOCH1, + /** Brd4 Metadata */ + TF_TBL_TYPE_METADATA, + /** Brd4 CT State */ + TF_TBL_TYPE_CT_STATE, + /** Brd4 Range Profile */ + TF_TBL_TYPE_RANGE_PROF, + /** Brd4 Range Entry */ + TF_TBL_TYPE_RANGE_ENTRY, + /** Brd4 LAG Entry */ + TF_TBL_TYPE_LAG, + /** Brd4 only VNIC/SVIF Table */ + TF_TBL_TYPE_VNIC_SVIF, + + /* External */ + + /** External table type - initially 1 poolsize entries. + * All External table types are associated with a table + * scope. Internal types are not. + */ + TF_TBL_TYPE_EXT, + TF_TBL_TYPE_MAX +}; + +/** tf_alloc_tbl_entry parameter definition + */ +struct tf_alloc_tbl_entry_parms { + /** + * [in] Receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Type of the allocation + */ + enum tf_tbl_type type; + /** + * [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + */ + uint32_t tbl_scope_id; + /** + * [in] Enable search for matching entry. If the table type is + * internal the shadow copy will be searched before + * alloc. Session must be configured with shadow copy enabled. + */ + uint8_t search_enable; + /** + * [in] Result data to search for (if search_enable) + */ + uint8_t *result; + /** + * [in] Result data size in bytes (if search_enable) + */ + uint16_t result_sz_in_bytes; + /** + * [out] If search_enable, set if matching entry found + */ + uint8_t hit; + /** + * [out] Current ref count after allocation (if search_enable) + */ + uint16_t ref_cnt; + /** + * [out] Idx of allocated entry or found entry (if search_enable) + */ + uint32_t idx; +}; + +/** allocate index table entries + * + * Internal types: + * + * Allocate an on chip index table entry or search for a matching + * entry of the indicated type for this TruFlow session. + * + * Allocates an index table record. This function will attempt to + * allocate an entry or search an index table for a matching entry if + * search is enabled (only the shadow copy of the table is accessed). + * + * If search is not enabled, the first available free entry is + * returned. If search is enabled and a matching entry to entry_data + * is found hit is set to TRUE and success is returned. + * + * External types: + * + * These are used to allocate inlined action record memory. + * + * Allocates an external index table action record. + * + * NOTE: + * Implementation of the internals of this function will be a stack with push + * and pop. + * + * Returns success or failure code. + */ +int tf_alloc_tbl_entry(struct tf *tfp, + struct tf_alloc_tbl_entry_parms *parms); + +/** tf_free_tbl_entry parameter definition + */ +struct tf_free_tbl_entry_parms { + /** + * [in] Receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Type of the allocation type + */ + enum tf_tbl_type type; + /** + * [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + */ + uint32_t tbl_scope_id; + /** + * [in] Index to free + */ + uint32_t idx; + /** + * [out] Reference count after free, only valid if session has been + * created with shadow_copy. + */ + uint16_t ref_cnt; +}; + +/** free index table entry + * + * Used to free a previously allocated table entry. + * + * Internal types: + * + * If session has shadow_copy enabled the shadow DB is searched and if + * found the element ref_cnt is decremented. If ref_cnt goes to + * zero then the element is returned to the session pool. + * + * If the session does not have a shadow DB the element is free'ed and + * given back to the session pool. + * + * External types: + * + * Free's an external index table action record. + * + * NOTE: + * Implementation of the internals of this function will be a stack with push + * and pop. + * + * Returns success or failure code. + */ +int tf_free_tbl_entry(struct tf *tfp, + struct tf_free_tbl_entry_parms *parms); + +/** tf_set_tbl_entry parameter definition + */ +struct tf_set_tbl_entry_parms { + /** + * [in] Table scope identifier + */ + uint32_t tbl_scope_id; + /** + * [in] Receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Type of object to set + */ + enum tf_tbl_type type; + /** + * [in] Entry data + */ + uint8_t *data; + /** + * [in] Entry size + */ + uint16_t data_sz_in_bytes; + /** + * [in] Entry index to write to + */ + uint32_t idx; +}; + +/** set index table entry + * + * Used to insert an application programmed index table entry into a + * previous allocated table location. A shadow copy of the table + * is maintained (if enabled) (only for internal objects) + * + * Returns success or failure code. + */ +int tf_set_tbl_entry(struct tf *tfp, + struct tf_set_tbl_entry_parms *parms); + +/** tf_get_tbl_entry parameter definition + */ +struct tf_get_tbl_entry_parms { + /** + * [in] Receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] Type of object to get + */ + enum tf_tbl_type type; + /** + * [out] Entry data + */ + uint8_t *data; + /** + * [out] Entry size + */ + uint16_t data_sz_in_bytes; + /** + * [in] Entry index to read + */ + uint32_t idx; +}; + +/** get index table entry + * + * Used to retrieve a previous set index table entry. + * + * Reads and compares with the shadow table copy (if enabled) (only + * for internal objects). + * + * Returns success or failure code. Failure will be returned if the + * provided data buffer is too small for the data type requested. + */ +int tf_get_tbl_entry(struct tf *tfp, + struct tf_get_tbl_entry_parms *parms); + +/** + * @page exact_match Exact Match Table + * + * @ref tf_insert_em_entry + * + * @ref tf_delete_em_entry + * + * @ref tf_search_em_entry + * + */ +/** tf_insert_em_entry parameter definition + */ +struct tf_insert_em_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] internal or external + */ + enum tf_mem mem; + /** + * [in] ID of table scope to use (external only) + */ + uint32_t tbl_scope_id; + /** + * [in] ID of table interface to use (Brd4 only) + */ + uint32_t tbl_if_id; + /** + * [in] ptr to structure containing key fields + */ + uint8_t *key; + /** + * [in] key bit length + */ + uint16_t key_sz_in_bits; + /** + * [in] ptr to structure containing result field + */ + uint8_t *em_record; + /** + * [out] result size in bits + */ + uint16_t em_record_sz_in_bits; + /** + * [in] duplicate check flag + */ + uint8_t dup_check; + /** + * [out] Flow handle value for the inserted entry. This is encoded + * as the entries[4]:bucket[2]:hashId[1]:hash[14] + */ + uint64_t flow_handle; + /** + * [out] Flow id is returned as null (internal) + * Flow id is the GFID value for the inserted entry (external) + * This is the value written to the BD and useful information for mark. + */ + uint64_t flow_id; +}; +/** + * tf_delete_em_entry parameter definition + */ +struct tf_delete_em_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] internal or external + */ + enum tf_mem mem; + /** + * [in] ID of table scope to use (external only) + */ + uint32_t tbl_scope_id; + /** + * [in] ID of table interface to use (Brd4 only) + */ + uint32_t tbl_if_id; + /** + * [in] epoch group IDs of entry to delete + * 2 element array with 2 ids. (Brd4 only) + */ + uint16_t *epochs; + /** + * [in] structure containing flow delete handle information + */ + uint64_t flow_handle; +}; +/** + * tf_search_em_entry parameter definition + */ +struct tf_search_em_entry_parms { + /** + * [in] receive or transmit direction + */ + enum tf_dir dir; + /** + * [in] internal or external + */ + enum tf_mem mem; + /** + * [in] ID of table scope to use (external only) + */ + uint32_t tbl_scope_id; + /** + * [in] ID of table interface to use (Brd4 only) + */ + uint32_t tbl_if_id; + /** + * [in] ptr to structure containing key fields + */ + uint8_t *key; + /** + * [in] key bit length + */ + uint16_t key_sz_in_bits; + /** + * [in/out] ptr to structure containing EM record fields + */ + uint8_t *em_record; + /** + * [out] result size in bits + */ + uint16_t em_record_sz_in_bits; + /** + * [in] epoch group IDs of entry to lookup + * 2 element array with 2 ids. (Brd4 only) + */ + uint16_t *epochs; + /** + * [in] ptr to structure containing flow delete handle + */ + uint64_t flow_handle; +}; + +/** insert em hash entry in internal table memory + * + * Internal: + * + * This API inserts an exact match entry into internal EM table memory + * of the specified direction. + * + * Note: The EM record is managed within the TruFlow core and not the + * application. + * + * Shadow copy of internal record table an association with hash and 1,2, or 4 + * associated buckets + * + * External: + * This API inserts an exact match entry into DRAM EM table memory of the + * specified direction and table scope. + * + * When inserting an entry into an exact match table, the TruFlow library may + * need to allocate a dynamic bucket for the entry (Brd4 only). + * + * The insertion of duplicate entries in an EM table is not permitted. If a + * TruFlow application can guarantee that it will never insert duplicates, it + * can disable duplicate checking by passing a zero value in the dup_check + * parameter to this API. This will optimize performance. Otherwise, the + * TruFlow library will enforce protection against inserting duplicate entries. + * + * Flow handle is defined in this document: + * + * https://docs.google.com + * /document/d/1NESu7RpTN3jwxbokaPfYORQyChYRmJgs40wMIRe8_-Q/edit + * + * Returns success or busy code. + * + */ +int tf_insert_em_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms); + +/** delete em hash entry table memory + * + * Internal: + * + * This API deletes an exact match entry from internal EM table memory of the + * specified direction. If a valid flow ptr is passed in then that takes + * precedence over the pointer to the complete key passed in. + * + * + * External: + * + * This API deletes an exact match entry from EM table memory of the specified + * direction and table scope. If a valid flow handle is passed in then that + * takes precedence over the pointer to the complete key passed in. + * + * The TruFlow library may release a dynamic bucket when an entry is deleted. + * + * + * Returns success or not found code + * + * + */ +int tf_delete_em_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms); + +/** search em hash entry table memory + * + * Internal: + + * This API looks up an EM entry in table memory with the specified EM + * key or flow (flow takes precedence) and direction. + * + * The status will be one of: success or entry not found. If the lookup + * succeeds, a pointer to the matching entry and the result record associated + * with the matching entry will be provided. + * + * If flow_handle is set, search shadow copy. + * + * Otherwise, query the fw with key to get result. + * + * External: + * + * This API looks up an EM entry in table memory with the specified EM + * key or flow_handle (flow takes precedence), direction and table scope. + * + * The status will be one of: success or entry not found. If the lookup + * succeeds, a pointer to the matching entry and the result record associated + * with the matching entry will be provided. + * + * Returns success or not found code + * + */ +int tf_search_em_entry(struct tf *tfp, + struct tf_search_em_entry_parms *parms); +#endif /* _TF_CORE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.c new file mode 100644 index 000000000..bd8e2ba8a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.c @@ -0,0 +1,515 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "tf_core.h" +#include "tf_em.h" +#include "tf_msg.h" +#include "tfp.h" +#include "lookup3.h" +#include "tf_ext_flow_handle.h" + +#include "bnxt.h" + +/* Enable EEM table dump + */ +#define TF_EEM_DUMP + +static struct tf_eem_64b_entry zero_key_entry; + +static uint32_t tf_em_get_key_mask(int num_entries) +{ + uint32_t mask = num_entries - 1; + + if (num_entries & 0x7FFF) + return 0; + + if (num_entries > (128 * 1024 * 1024)) + return 0; + + return mask; +} + +/* CRC32i support for Key0 hash */ +#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8)) +#define crc32(x, y) crc32i(~0, x, y) + +static const uint32_t crc32tbl[] = { /* CRC polynomial 0xedb88320 */ +0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, +0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, +0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, +0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, +0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, +0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, +0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, +0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, +0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, +0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, +0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, +0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, +0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, +0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, +0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, +0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, +0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, +0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, +0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, +0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, +0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, +0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, +0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, +0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, +0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, +0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, +0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, +0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, +0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, +0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, +0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, +0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, +0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, +0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, +0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, +0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, +0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, +0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, +0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, +0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, +0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, +0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, +0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, +0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, +0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, +0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, +0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, +0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, +0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, +0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, +0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, +0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, +0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, +0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, +0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, +0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, +0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, +0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, +0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, +0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, +0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, +0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, +0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, +0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +static uint32_t crc32i(uint32_t crc, const uint8_t *buf, size_t len) +{ + int l; + + for (l = (len - 1); l >= 0; l--) + crc = ucrc32(buf[l], crc); + + return ~crc; +} + +static uint32_t tf_em_lkup_get_crc32_hash(struct tf_session *session, + uint8_t *key, + enum tf_dir dir) +{ + int i; + uint32_t index; + uint32_t val1, val2; + uint8_t temp[4]; + uint8_t *kptr = key; + + /* Do byte-wise XOR of the 52-byte HASH key first. */ + index = *key; + kptr--; + + for (i = TF_HW_EM_KEY_MAX_SIZE - 2; i >= 0; i--) { + index = index ^ *kptr; + kptr--; + } + + /* Get seeds */ + val1 = session->lkup_em_seed_mem[dir][index * 2]; + val2 = session->lkup_em_seed_mem[dir][index * 2 + 1]; + + temp[3] = (uint8_t)(val1 >> 24); + temp[2] = (uint8_t)(val1 >> 16); + temp[1] = (uint8_t)(val1 >> 8); + temp[0] = (uint8_t)(val1 & 0xff); + val1 = 0; + + /* Start with seed */ + if (!(val2 & 0x1)) + val1 = crc32i(~val1, temp, 4); + + val1 = crc32i(~val1, + (key - (TF_HW_EM_KEY_MAX_SIZE - 1)), + TF_HW_EM_KEY_MAX_SIZE); + + /* End with seed */ + if (val2 & 0x1) + val1 = crc32i(~val1, temp, 4); + + return val1; +} + +static uint32_t tf_em_lkup_get_lookup3_hash(uint32_t lookup3_init_value, + uint8_t *in_key) +{ + uint32_t val1; + + val1 = hashword(((uint32_t *)in_key) + 1, + TF_HW_EM_KEY_MAX_SIZE / (sizeof(uint32_t)), + lookup3_init_value); + + return val1; +} + +void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb, + enum tf_dir dir, + uint32_t offset, + enum tf_em_table_type table_type) +{ + int level = 0; + int page = offset / TF_EM_PAGE_SIZE; + void *addr = NULL; + struct tf_em_ctx_mem_info *ctx = &tbl_scope_cb->em_ctx_info[dir]; + + if (ctx == NULL) + return NULL; + + if (dir != TF_DIR_RX && dir != TF_DIR_TX) + return NULL; + + if (table_type < KEY0_TABLE || table_type > EFC_TABLE) + return NULL; + + /* + * Use the level according to the num_level of page table + */ + level = ctx->em_tables[table_type].num_lvl - 1; + + addr = (void *)ctx->em_tables[table_type].pg_tbl[level].pg_va_tbl[page]; + + return addr; +} + +/** Read Key table entry + * + * Entry is read in to entry + */ +static int tf_em_read_entry(struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_eem_64b_entry *entry, + uint32_t entry_size, + uint32_t index, + enum tf_em_table_type table_type, + enum tf_dir dir) +{ + void *page; + uint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE; + + page = tf_em_get_table_page(tbl_scope_cb, + dir, + (index * entry_size), + table_type); + + if (page == NULL) + return -EINVAL; + + memcpy((uint8_t *)entry, (uint8_t *)page + entry_offset, entry_size); + return 0; +} + +static int tf_em_write_entry(struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_eem_64b_entry *entry, + uint32_t entry_size, + uint32_t index, + enum tf_em_table_type table_type, + enum tf_dir dir) +{ + void *page; + uint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE; + + page = tf_em_get_table_page(tbl_scope_cb, + dir, + (index * entry_size), + table_type); + + if (page == NULL) + return -EINVAL; + + memcpy((uint8_t *)page + entry_offset, entry, entry_size); + + return 0; +} + +static int tf_em_entry_exists(struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_eem_64b_entry *entry, + uint32_t index, + enum tf_em_table_type table_type, + enum tf_dir dir) +{ + int rc; + struct tf_eem_64b_entry table_entry; + + rc = tf_em_read_entry(tbl_scope_cb, + &table_entry, + TF_EM_KEY_RECORD_SIZE, + index, + table_type, + dir); + + if (rc != 0) + return -EINVAL; + + if (table_entry.hdr.word1 & (1 << TF_LKUP_RECORD_VALID_SHIFT)) { + if (entry != NULL) { + if (memcmp(&table_entry, + entry, + TF_EM_KEY_RECORD_SIZE) == 0) + return -EEXIST; + } else { + return -EEXIST; + } + + return -EBUSY; + } + + return 0; +} + +static void tf_em_create_key_entry(struct tf_eem_entry_hdr *result, + uint8_t *in_key, + struct tf_eem_64b_entry *key_entry) +{ + key_entry->hdr.word1 = result->word1; + + if (result->word1 & TF_LKUP_RECORD_ACT_REC_INT_MASK) + key_entry->hdr.pointer = result->pointer; + else + key_entry->hdr.pointer = result->pointer; + + memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4); +} + +/* tf_em_select_inject_table + * + * Returns: + * 0 - Key does not exist in either table and can be inserted + * at "index" in table "table". + * EEXIST - Key does exist in table at "index" in table "table". + * TF_ERR - Something went horribly wrong. + */ +static int tf_em_select_inject_table(struct tf_tbl_scope_cb *tbl_scope_cb, + enum tf_dir dir, + struct tf_eem_64b_entry *entry, + uint32_t key0_hash, + uint32_t key1_hash, + uint32_t *index, + enum tf_em_table_type *table) +{ + int key0_entry; + int key1_entry; + + /* + * Check KEY0 table. + */ + key0_entry = tf_em_entry_exists(tbl_scope_cb, + entry, + key0_hash, + KEY0_TABLE, + dir); + + /* + * Check KEY1 table. + */ + key1_entry = tf_em_entry_exists(tbl_scope_cb, + entry, + key1_hash, + KEY1_TABLE, + dir); + + if (key0_entry == -EEXIST) { + *table = KEY0_TABLE; + *index = key0_hash; + return -EEXIST; + } else if (key1_entry == -EEXIST) { + *table = KEY1_TABLE; + *index = key1_hash; + return -EEXIST; + } else if (key0_entry == 0) { + *table = KEY0_TABLE; + *index = key0_hash; + return 0; + } else if (key1_entry == 0) { + *table = KEY1_TABLE; + *index = key1_hash; + return 0; + } + + return -EINVAL; +} + +/** insert EEM entry API + * + * returns: + * 0 + * TF_ERR - unable to get lock + * + * insert callback returns: + * 0 + * TF_ERR_EM_DUP - key is already in table + */ +int tf_insert_eem_entry(struct tf_session *session, + struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_insert_em_entry_parms *parms) +{ + uint32_t mask; + uint32_t key0_hash; + uint32_t key1_hash; + uint32_t key0_index; + uint32_t key1_index; + struct tf_eem_64b_entry key_entry; + uint32_t index; + enum tf_em_table_type table_type; + uint32_t gfid; + int num_of_entry; + + /* Get mask to use on hash */ + mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[KEY0_TABLE].num_entries); + + if (!mask) + return -EINVAL; + + num_of_entry = TF_HW_EM_KEY_MAX_SIZE + 4; + + key0_hash = tf_em_lkup_get_crc32_hash(session, + &parms->key[num_of_entry] - 1, + parms->dir); + key0_index = key0_hash & mask; + + key1_hash = + tf_em_lkup_get_lookup3_hash(session->lkup_lkup3_init_cfg[parms->dir], + parms->key); + key1_index = key1_hash & mask; + + /* + * Use the "result" arg to populate all of the key entry then + * store the byte swapped "raw" entry in a local copy ready + * for insertion in to the table. + */ + tf_em_create_key_entry((struct tf_eem_entry_hdr *)parms->em_record, + ((uint8_t *)parms->key), + &key_entry); + + /* + * Find which table to use + */ + if (tf_em_select_inject_table(tbl_scope_cb, + parms->dir, + &key_entry, + key0_index, + key1_index, + &index, + &table_type) == 0) { + if (table_type == KEY0_TABLE) { + TF_SET_GFID(gfid, + key0_index, + KEY0_TABLE); + } else { + TF_SET_GFID(gfid, + key1_index, + KEY1_TABLE); + } + + /* + * Inject + */ + if (tf_em_write_entry(tbl_scope_cb, + &key_entry, + TF_EM_KEY_RECORD_SIZE, + index, + table_type, + parms->dir) == 0) { + TF_SET_FLOW_ID(parms->flow_id, + gfid, + TF_GFID_TABLE_EXTERNAL, + parms->dir); + TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle, + 0, + 0, + 0, + index, + 0, + table_type); + return 0; + } + } + + return -EINVAL; +} + +/** delete EEM hash entry API + * + * returns: + * 0 + * -EINVAL - parameter error + * TF_NO_SESSION - bad session ID + * TF_ERR_TBL_SCOPE - invalid table scope + * TF_ERR_TBL_IF - invalid table interface + * + * insert callback returns + * 0 + * TF_NO_EM_MATCH - entry not found + */ +int tf_delete_eem_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms) +{ + struct tf_session *session; + struct tf_tbl_scope_cb *tbl_scope_cb; + enum tf_em_table_type hash_type; + uint32_t index; + + if (parms == NULL) + return -EINVAL; + + session = (struct tf_session *)tfp->session->core_data; + if (session == NULL) + return -EINVAL; + + tbl_scope_cb = tbl_scope_cb_find(session, + parms->tbl_scope_id); + if (tbl_scope_cb == NULL) + return -EINVAL; + + if (parms->flow_handle == 0) + return -EINVAL; + + TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type); + TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index); + + if (tf_em_entry_exists(tbl_scope_cb, + NULL, + index, + hash_type, + parms->dir) == -EEXIST) { + tf_em_write_entry(tbl_scope_cb, + &zero_key_entry, + TF_EM_KEY_RECORD_SIZE, + index, + hash_type, + parms->dir); + + return 0; + } + + return -EINVAL; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.h new file mode 100644 index 000000000..8a3584fbd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_em.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_EM_H_ +#define _TF_EM_H_ + +#include "tf_core.h" +#include "tf_session.h" + +#define TF_HW_EM_KEY_MAX_SIZE 52 +#define TF_EM_KEY_RECORD_SIZE 64 + +/** EEM Entry header + * + */ +struct tf_eem_entry_hdr { + uint32_t pointer; + uint32_t word1; /* + * The header is made up of two words, + * this is the first word. This field has multiple + * subfields, there is no suitable single name for + * it so just going with word1. + */ +#define TF_LKUP_RECORD_VALID_SHIFT 31 +#define TF_LKUP_RECORD_VALID_MASK 0x80000000 +#define TF_LKUP_RECORD_L1_CACHEABLE_SHIFT 30 +#define TF_LKUP_RECORD_L1_CACHEABLE_MASK 0x40000000 +#define TF_LKUP_RECORD_STRENGTH_SHIFT 28 +#define TF_LKUP_RECORD_STRENGTH_MASK 0x30000000 +#define TF_LKUP_RECORD_RESERVED_SHIFT 17 +#define TF_LKUP_RECORD_RESERVED_MASK 0x0FFE0000 +#define TF_LKUP_RECORD_KEY_SIZE_SHIFT 8 +#define TF_LKUP_RECORD_KEY_SIZE_MASK 0x0001FF00 +#define TF_LKUP_RECORD_ACT_REC_SIZE_SHIFT 3 +#define TF_LKUP_RECORD_ACT_REC_SIZE_MASK 0x000000F8 +#define TF_LKUP_RECORD_ACT_REC_INT_SHIFT 2 +#define TF_LKUP_RECORD_ACT_REC_INT_MASK 0x00000004 +#define TF_LKUP_RECORD_EXT_FLOW_CTR_SHIFT 1 +#define TF_LKUP_RECORD_EXT_FLOW_CTR_MASK 0x00000002 +#define TF_LKUP_RECORD_ACT_PTR_MSB_SHIFT 0 +#define TF_LKUP_RECORD_ACT_PTR_MSB_MASK 0x00000001 +}; + +/** EEM Entry + * Each EEM entry is 512-bit (64-bytes) + */ +struct tf_eem_64b_entry { + /** Key is 448 bits - 56 bytes */ + uint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct tf_eem_entry_hdr)]; + /** Header is 8 bytes long */ + struct tf_eem_entry_hdr hdr; +}; + +/** + * Allocates EEM Table scope + * + * [in] tfp + * Pointer to TruFlow handle + * + * [in] parms + * Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + * -ENOMEM - Out of memory + */ +int tf_alloc_eem_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms); + +/** + * Free's EEM Table scope control block + * + * [in] tfp + * Pointer to TruFlow handle + * + * [in] parms + * Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_free_eem_tbl_scope_cb(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms); + +/** + * Function to search for table scope control block structure + * with specified table scope ID. + * + * [in] session + * Session to use for the search of the table scope control block + * [in] tbl_scope_id + * Table scope ID to search for + * + * Returns: + * Pointer to the found table scope control block struct or NULL if + * table scope control block struct not found + */ +struct tf_tbl_scope_cb *tbl_scope_cb_find(struct tf_session *session, + uint32_t tbl_scope_id); + +int tf_insert_eem_entry(struct tf_session *session, + struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_insert_em_entry_parms *parms); + +int tf_delete_eem_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms); + +void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb, + enum tf_dir dir, + uint32_t offset, + enum tf_em_table_type table_type); + +#endif /* _TF_EM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h new file mode 100644 index 000000000..417a99cda --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_EXT_FLOW_HANDLE_H_ +#define _TF_EXT_FLOW_HANDLE_H_ + +#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK 0x00000000F0000000ULL +#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT 28 +#define TF_FLOW_TYPE_FLOW_HANDLE_MASK 0x00000000000000F0ULL +#define TF_FLOW_TYPE_FLOW_HANDLE_SFT 4 +#define TF_FLAGS_FLOW_HANDLE_MASK 0x000000000000000FULL +#define TF_FLAGS_FLOW_HANDLE_SFT 0 +#define TF_INDEX_FLOW_HANDLE_MASK 0xFFFFFFF000000000ULL +#define TF_INDEX_FLOW_HANDLE_SFT 36 +#define TF_ENTRY_NUM_FLOW_HANDLE_MASK 0x0000000E00000000ULL +#define TF_ENTRY_NUM_FLOW_HANDLE_SFT 33 +#define TF_HASH_TYPE_FLOW_HANDLE_MASK 0x0000000100000000ULL +#define TF_HASH_TYPE_FLOW_HANDLE_SFT 32 + +#define TF_FLOW_HANDLE_MASK (TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK | \ + TF_FLOW_TYPE_FLOW_HANDLE_MASK | \ + TF_FLAGS_FLOW_HANDLE_MASK | \ + TF_INDEX_FLOW_HANDLE_MASK | \ + TF_ENTRY_NUM_FLOW_HANDLE_MASK | \ + TF_HASH_TYPE_FLOW_HANDLE_MASK) + +#define TF_GET_FIELDS_FROM_FLOW_HANDLE(flow_handle, \ + num_key_entries, \ + flow_type, \ + flags, \ + index, \ + entry_num, \ + hash_type) \ +do { \ + (num_key_entries) = \ + (((flow_handle) & TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK) >> \ + TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT); \ + (flow_type) = (((flow_handle) & TF_FLOW_TYPE_FLOW_HANDLE_MASK) >> \ + TF_FLOW_TYPE_FLOW_HANDLE_SFT); \ + (flags) = (((flow_handle) & TF_FLAGS_FLOW_HANDLE_MASK) >> \ + TF_FLAGS_FLOW_HANDLE_SFT); \ + (index) = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >> \ + TF_INDEX_FLOW_HANDLE_SFT); \ + (entry_num) = (((flow_handle) & TF_ENTRY_NUM_FLOW_HANDLE_MASK) >> \ + TF_ENTRY_NUM_FLOW_HANDLE_SFT); \ + (hash_type) = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >> \ + TF_HASH_TYPE_FLOW_HANDLE_SFT); \ +} while (0) + +#define TF_SET_FIELDS_IN_FLOW_HANDLE(flow_handle, \ + num_key_entries, \ + flow_type, \ + flags, \ + index, \ + entry_num, \ + hash_type) \ +do { \ + (flow_handle) &= ~TF_FLOW_HANDLE_MASK; \ + (flow_handle) |= \ + (((num_key_entries) << TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT) & \ + TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK); \ + (flow_handle) |= (((flow_type) << TF_FLOW_TYPE_FLOW_HANDLE_SFT) & \ + TF_FLOW_TYPE_FLOW_HANDLE_MASK); \ + (flow_handle) |= (((flags) << TF_FLAGS_FLOW_HANDLE_SFT) & \ + TF_FLAGS_FLOW_HANDLE_MASK); \ + (flow_handle) |= ((((uint64_t)index) << TF_INDEX_FLOW_HANDLE_SFT) & \ + TF_INDEX_FLOW_HANDLE_MASK); \ + (flow_handle) |= \ + ((((uint64_t)entry_num) << TF_ENTRY_NUM_FLOW_HANDLE_SFT) & \ + TF_ENTRY_NUM_FLOW_HANDLE_MASK); \ + (flow_handle) |= \ + ((((uint64_t)hash_type) << TF_HASH_TYPE_FLOW_HANDLE_SFT) & \ + TF_HASH_TYPE_FLOW_HANDLE_MASK); \ +} while (0) +#define TF_SET_FIELDS_IN_WH_FLOW_HANDLE TF_SET_FIELDS_IN_FLOW_HANDLE + +#define TF_GET_INDEX_FROM_FLOW_HANDLE(flow_handle, \ + index) \ +do { \ + index = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >> \ + TF_INDEX_FLOW_HANDLE_SFT); \ +} while (0) + +#define TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(flow_handle, \ + hash_type) \ +do { \ + hash_type = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >> \ + TF_HASH_TYPE_FLOW_HANDLE_SFT); \ +} while (0) + +/* + * 32 bit Flow ID handlers + */ +#define TF_GFID_FLOW_ID_MASK 0xFFFFFFF0UL +#define TF_GFID_FLOW_ID_SFT 4 +#define TF_FLAG_FLOW_ID_MASK 0x00000002UL +#define TF_FLAG_FLOW_ID_SFT 1 +#define TF_DIR_FLOW_ID_MASK 0x00000001UL +#define TF_DIR_FLOW_ID_SFT 0 + +#define TF_SET_FLOW_ID(flow_id, gfid, flag, dir) \ +do { \ + (flow_id) &= ~(TF_GFID_FLOW_ID_MASK | \ + TF_FLAG_FLOW_ID_MASK | \ + TF_DIR_FLOW_ID_MASK); \ + (flow_id) |= (((gfid) << TF_GFID_FLOW_ID_SFT) & \ + TF_GFID_FLOW_ID_MASK) | \ + (((flag) << TF_FLAG_FLOW_ID_SFT) & \ + TF_FLAG_FLOW_ID_MASK) | \ + (((dir) << TF_DIR_FLOW_ID_SFT) & \ + TF_DIR_FLOW_ID_MASK); \ +} while (0) + +#define TF_GET_GFID_FROM_FLOW_ID(flow_id, gfid) \ +do { \ + gfid = (((flow_id) & TF_GFID_FLOW_ID_MASK) >> \ + TF_GFID_FLOW_ID_SFT); \ +} while (0) + +#define TF_GET_DIR_FROM_FLOW_ID(flow_id, dir) \ +do { \ + dir = (((flow_id) & TF_DIR_FLOW_ID_MASK) >> \ + TF_DIR_FLOW_ID_SFT); \ +} while (0) + +#define TF_GET_FLAG_FROM_FLOW_ID(flow_id, flag) \ +do { \ + flag = (((flow_id) & TF_FLAG_FLOW_ID_MASK) >> \ + TF_FLAG_FLOW_ID_SFT); \ +} while (0) + +/* + * 32 bit GFID handlers + */ +#define TF_HASH_INDEX_GFID_MASK 0x07FFFFFFUL +#define TF_HASH_INDEX_GFID_SFT 0 +#define TF_HASH_TYPE_GFID_MASK 0x08000000UL +#define TF_HASH_TYPE_GFID_SFT 27 + +#define TF_GFID_TABLE_INTERNAL 0 +#define TF_GFID_TABLE_EXTERNAL 1 + +#define TF_SET_GFID(gfid, index, type) \ +do { \ + gfid = (((index) << TF_HASH_INDEX_GFID_SFT) & \ + TF_HASH_INDEX_GFID_MASK) | \ + (((type) << TF_HASH_TYPE_GFID_SFT) & \ + TF_HASH_TYPE_GFID_MASK); \ +} while (0) + +#define TF_GET_HASH_INDEX_FROM_GFID(gfid, index) \ +do { \ + index = (((gfid) & TF_HASH_INDEX_GFID_MASK) >> \ + TF_HASH_INDEX_GFID_SFT); \ +} while (0) + +#define TF_GET_HASH_TYPE_FROM_GFID(gfid, type) \ +do { \ + type = (((gfid) & TF_HASH_TYPE_GFID_MASK) >> \ + TF_HASH_TYPE_GFID_SFT); \ +} while (0) + + +#endif /* _TF_EXT_FLOW_HANDLE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.c new file mode 100644 index 000000000..beecafdeb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.c @@ -0,0 +1,1251 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include + +#include "bnxt.h" +#include "tf_core.h" +#include "tf_session.h" +#include "tfp.h" + +#include "tf_msg_common.h" +#include "tf_msg.h" +#include "hsi_struct_def_dpdk.h" +#include "hwrm_tf.h" + +/** + * Endian converts min and max values from the HW response to the query + */ +#define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \ + (query)->hw_query[index].min = \ + tfp_le_to_cpu_16(response. element ## _min); \ + (query)->hw_query[index].max = \ + tfp_le_to_cpu_16(response. element ## _max); \ +} while (0) + +/** + * Endian converts the number of entries from the alloc to the request + */ +#define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \ + (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index])) + +/** + * Endian converts the start and stride value from the free to the request + */ +#define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \ + request.element ## _start = \ + tfp_cpu_to_le_16(hw_entry[index].start); \ + request.element ## _stride = \ + tfp_cpu_to_le_16(hw_entry[index].stride); \ +} while (0) + +/** + * Endian converts the start and stride from the HW response to the + * alloc + */ +#define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \ + hw_entry[index].start = \ + tfp_le_to_cpu_16(response.element ## _start); \ + hw_entry[index].stride = \ + tfp_le_to_cpu_16(response.element ## _stride); \ +} while (0) + +/** + * Endian converts min and max values from the SRAM response to the + * query + */ +#define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \ + (query)->sram_query[index].min = \ + tfp_le_to_cpu_16(response.element ## _min); \ + (query)->sram_query[index].max = \ + tfp_le_to_cpu_16(response.element ## _max); \ +} while (0) + +/** + * Endian converts the number of entries from the action (alloc) to + * the request + */ +#define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \ + (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index])) + +/** + * Endian converts the start and stride value from the free to the request + */ +#define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \ + request.element ## _start = \ + tfp_cpu_to_le_16(sram_entry[index].start); \ + request.element ## _stride = \ + tfp_cpu_to_le_16(sram_entry[index].stride); \ +} while (0) + +/** + * Endian converts the start and stride from the HW response to the + * alloc + */ +#define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \ + sram_entry[index].start = \ + tfp_le_to_cpu_16(response.element ## _start); \ + sram_entry[index].stride = \ + tfp_le_to_cpu_16(response.element ## _stride); \ +} while (0) + +/** + * This is the MAX data we can transport across regular HWRM + */ +#define TF_PCI_BUF_SIZE_MAX 88 + +/** + * If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method + */ +struct tf_msg_dma_buf { + void *va_addr; + uint64_t pa_addr; +}; + +static int +tf_tcam_tbl_2_hwrm(enum tf_tcam_tbl_type tcam_type, + uint32_t *hwrm_type) +{ + int rc = 0; + + switch (tcam_type) { + case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM: + *hwrm_type = TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY; + break; + case TF_TCAM_TBL_TYPE_PROF_TCAM: + *hwrm_type = TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY; + break; + case TF_TCAM_TBL_TYPE_WC_TCAM: + *hwrm_type = TF_DEV_DATA_TYPE_TF_WC_ENTRY; + break; + case TF_TCAM_TBL_TYPE_VEB_TCAM: + rc = -EOPNOTSUPP; + break; + case TF_TCAM_TBL_TYPE_SP_TCAM: + rc = -EOPNOTSUPP; + break; + case TF_TCAM_TBL_TYPE_CT_RULE_TCAM: + rc = -EOPNOTSUPP; + break; + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} + +/** + * Sends session open request to TF Firmware + */ +int +tf_msg_session_open(struct tf *tfp, + char *ctrl_chan_name, + uint8_t *fw_session_id) +{ + int rc; + struct hwrm_tf_session_open_input req = { 0 }; + struct hwrm_tf_session_open_output resp = { 0 }; + struct tfp_send_msg_parms parms = { 0 }; + + /* Populate the request */ + memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX); + + parms.tf_type = HWRM_TF_SESSION_OPEN; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + if (rc) + return rc; + + *fw_session_id = resp.fw_session_id; + + return rc; +} + +/** + * Sends session attach request to TF Firmware + */ +int +tf_msg_session_attach(struct tf *tfp __rte_unused, + char *ctrl_chan_name __rte_unused, + uint8_t tf_fw_session_id __rte_unused) +{ + return -1; +} + +/** + * Sends session close request to TF Firmware + */ +int +tf_msg_session_close(struct tf *tfp) +{ + int rc; + struct hwrm_tf_session_close_input req = { 0 }; + struct hwrm_tf_session_close_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + struct tfp_send_msg_parms parms = { 0 }; + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + + parms.tf_type = HWRM_TF_SESSION_CLOSE; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} + +/** + * Sends session query config request to TF Firmware + */ +int +tf_msg_session_qcfg(struct tf *tfp) +{ + int rc; + struct hwrm_tf_session_qcfg_input req = { 0 }; + struct hwrm_tf_session_qcfg_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + struct tfp_send_msg_parms parms = { 0 }; + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + + parms.tf_type = HWRM_TF_SESSION_QCFG, + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} + +/** + * Sends session HW resource query capability request to TF Firmware + */ +int +tf_msg_session_hw_resc_qcaps(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_hw_query *query) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_hw_resc_qcaps_input req = { 0 }; + struct tf_session_hw_resc_qcaps_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + memset(query, 0, sizeof(*query)); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + MSG_PREP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_HW_RESC_QCAPS, + req, + resp); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + /* Process the response */ + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp, + l2_ctx_tcam_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp, + prof_func); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp, + prof_tcam_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp, + em_prof_id); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp, + em_record_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp, + wc_tcam_prof_id); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp, + wc_tcam_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp, + meter_profiles); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST, + resp, meter_inst); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp, + mirrors); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp, + upar); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp, + sp_tcam_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp, + l2_func); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp, + flex_key_templ); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp, + tbl_scope); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp, + epoch0_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp, + epoch1_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp, + metadata); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp, + ct_state); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp, + range_prof); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp, + range_entries); + TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp, + lag_tbl_entries); + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session HW resource allocation request to TF Firmware + */ +int +tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused, + enum tf_dir dir, + struct tf_rm_hw_alloc *hw_alloc __rte_unused, + struct tf_rm_entry *hw_entry __rte_unused) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_hw_resc_alloc_input req = { 0 }; + struct tf_session_hw_resc_alloc_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + memset(hw_entry, 0, sizeof(*hw_entry)); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req, + l2_ctx_tcam_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req, + prof_func_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req, + prof_tcam_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req, + em_prof_id); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req, + em_record_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req, + wc_tcam_prof_id); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req, + wc_tcam_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req, + meter_profiles); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req, + meter_inst); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req, + mirrors); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req, + upar); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req, + sp_tcam_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req, + l2_func); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req, + flex_key_templ); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req, + tbl_scope); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req, + epoch0_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req, + epoch1_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req, + metadata); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req, + ct_state); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req, + range_prof); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req, + range_entries); + TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req, + lag_tbl_entries); + + MSG_PREP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_HW_RESC_ALLOC, + req, + resp); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + /* Process the response */ + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp, + l2_ctx_tcam_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp, + prof_func); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp, + prof_tcam_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp, + em_prof_id); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp, + em_record_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp, + wc_tcam_prof_id); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp, + wc_tcam_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp, + meter_profiles); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp, + meter_inst); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp, + mirrors); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp, + upar); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp, + sp_tcam_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp, + l2_func); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp, + flex_key_templ); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp, + tbl_scope); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp, + epoch0_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp, + epoch1_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp, + metadata); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp, + ct_state); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp, + range_prof); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp, + range_entries); + TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp, + lag_tbl_entries); + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session HW resource free request to TF Firmware + */ +int +tf_msg_session_hw_resc_free(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *hw_entry) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_hw_resc_free_input req = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + memset(hw_entry, 0, sizeof(*hw_entry)); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req, + l2_ctx_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req, + prof_func); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req, + prof_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req, + em_prof_id); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req, + em_record_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req, + wc_tcam_prof_id); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req, + wc_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req, + meter_profiles); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req, + meter_inst); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req, + mirrors); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req, + upar); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req, + sp_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req, + l2_func); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req, + flex_key_templ); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req, + tbl_scope); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req, + epoch0_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req, + epoch1_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req, + metadata); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req, + ct_state); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req, + range_prof); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req, + range_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req, + lag_tbl_entries); + + MSG_PREP_NO_RESP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_HW_RESC_FREE, + req); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session HW resource flush request to TF Firmware + */ +int +tf_msg_session_hw_resc_flush(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *hw_entry) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_hw_resc_free_input req = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req, + l2_ctx_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req, + prof_func); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req, + prof_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req, + em_prof_id); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req, + em_record_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req, + wc_tcam_prof_id); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req, + wc_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req, + meter_profiles); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req, + meter_inst); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req, + mirrors); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req, + upar); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req, + sp_tcam_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req, + l2_func); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req, + flex_key_templ); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req, + tbl_scope); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req, + epoch0_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req, + epoch1_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req, + metadata); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req, + ct_state); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req, + range_prof); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req, + range_entries); + TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req, + lag_tbl_entries); + + MSG_PREP_NO_RESP(parms, + TF_KONG_MB, + TF_TYPE_TRUFLOW, + HWRM_TFT_SESSION_HW_RESC_FLUSH, + req); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session SRAM resource query capability request to TF Firmware + */ +int +tf_msg_session_sram_resc_qcaps(struct tf *tfp __rte_unused, + enum tf_dir dir, + struct tf_rm_sram_query *query __rte_unused) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_sram_resc_qcaps_input req = { 0 }; + struct tf_session_sram_resc_qcaps_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + MSG_PREP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_SRAM_RESC_QCAPS, + req, + resp); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + /* Process the response */ + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_FULL_ACTION, resp, + full_action); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_MCG, resp, + mcg); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_8B, resp, + encap_8b); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_16B, resp, + encap_16b); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_ENCAP_64B, resp, + encap_64b); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC, resp, + sp_smac); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, resp, + sp_smac_ipv4); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, resp, + sp_smac_ipv6); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_COUNTER_64B, resp, + counter_64b); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_SPORT, resp, + nat_sport); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_DPORT, resp, + nat_dport); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp, + nat_s_ipv4); + TF_SRAM_RESP_TO_QUERY(query, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp, + nat_d_ipv4); + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session SRAM resource allocation request to TF Firmware + */ +int +tf_msg_session_sram_resc_alloc(struct tf *tfp __rte_unused, + enum tf_dir dir, + struct tf_rm_sram_alloc *sram_alloc __rte_unused, + struct tf_rm_entry *sram_entry __rte_unused) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_sram_resc_alloc_input req = { 0 }; + struct tf_session_sram_resc_alloc_output resp; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + memset(&resp, 0, sizeof(resp)); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_FULL_ACTION, req, + full_action); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_MCG, req, + mcg); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_8B, req, + encap_8b); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_16B, req, + encap_16b); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_ENCAP_64B, req, + encap_64b); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC, req, + sp_smac); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, + req, sp_smac_ipv4); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, + req, sp_smac_ipv6); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_COUNTER_64B, + req, counter_64b); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_SPORT, req, + nat_sport); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_DPORT, req, + nat_dport); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req, + nat_s_ipv4); + TF_SRAM_ALLOC_TO_REQ(sram_alloc, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req, + nat_d_ipv4); + + MSG_PREP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_SRAM_RESC_ALLOC, + req, + resp); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + /* Process the response */ + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, + resp, full_action); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_MCG, resp, + mcg); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, resp, + encap_8b); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, resp, + encap_16b); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, resp, + encap_64b); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, resp, + sp_smac); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, + resp, sp_smac_ipv4); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, + resp, sp_smac_ipv6); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, resp, + counter_64b); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, resp, + nat_sport); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, resp, + nat_dport); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, resp, + nat_s_ipv4); + TF_SRAM_RESP_TO_ALLOC(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, resp, + nat_d_ipv4); + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session SRAM resource free request to TF Firmware + */ +int +tf_msg_session_sram_resc_free(struct tf *tfp __rte_unused, + enum tf_dir dir, + struct tf_rm_entry *sram_entry __rte_unused) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_sram_resc_free_input req = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req, + full_action); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req, + mcg); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req, + encap_8b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req, + encap_16b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req, + encap_64b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req, + sp_smac); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req, + sp_smac_ipv4); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req, + sp_smac_ipv6); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req, + counter_64b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req, + nat_sport); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req, + nat_dport); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req, + nat_s_ipv4); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req, + nat_d_ipv4); + + MSG_PREP_NO_RESP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_SESSION_SRAM_RESC_FREE, + req); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends session SRAM resource flush request to TF Firmware + */ +int +tf_msg_session_sram_resc_flush(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *sram_entry) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_session_sram_resc_free_input req = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_FULL_ACTION, req, + full_action); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_MCG, req, + mcg); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_8B, req, + encap_8b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_16B, req, + encap_16b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_ENCAP_64B, req, + encap_64b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC, req, + sp_smac); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, req, + sp_smac_ipv4); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, req, + sp_smac_ipv6); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_COUNTER_64B, req, + counter_64b); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_SPORT, req, + nat_sport); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_DPORT, req, + nat_dport); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_S_IPV4, req, + nat_s_ipv4); + TF_SRAM_FREE_TO_REQ(sram_entry, TF_RESC_TYPE_SRAM_NAT_D_IPV4, req, + nat_d_ipv4); + + MSG_PREP_NO_RESP(parms, + TF_KONG_MB, + TF_TYPE_TRUFLOW, + HWRM_TFT_SESSION_SRAM_RESC_FLUSH, + req); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +/** + * Sends EM mem register request to Firmware + */ +int tf_msg_em_mem_rgtr(struct tf *tfp, + int page_lvl, + int page_size, + uint64_t dma_addr, + uint16_t *ctx_id) +{ + int rc; + struct hwrm_tf_ctxt_mem_rgtr_input req = { 0 }; + struct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 }; + struct tfp_send_msg_parms parms = { 0 }; + + req.page_level = page_lvl; + req.page_size = page_size; + req.page_dir = tfp_cpu_to_le_64(dma_addr); + + parms.tf_type = HWRM_TF_CTXT_MEM_RGTR; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + if (rc) + return rc; + + *ctx_id = tfp_le_to_cpu_16(resp.ctx_id); + + return rc; +} + +/** + * Sends EM mem unregister request to Firmware + */ +int tf_msg_em_mem_unrgtr(struct tf *tfp, + uint16_t *ctx_id) +{ + int rc; + struct hwrm_tf_ctxt_mem_unrgtr_input req = {0}; + struct hwrm_tf_ctxt_mem_unrgtr_output resp = {0}; + struct tfp_send_msg_parms parms = { 0 }; + + req.ctx_id = tfp_cpu_to_le_32(*ctx_id); + + parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} + +/** + * Sends EM qcaps request to Firmware + */ +int tf_msg_em_qcaps(struct tf *tfp, + int dir, + struct tf_em_caps *em_caps) +{ + int rc; + struct hwrm_tf_ext_em_qcaps_input req = {0}; + struct hwrm_tf_ext_em_qcaps_output resp = { 0 }; + uint32_t flags; + struct tfp_send_msg_parms parms = { 0 }; + + flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX : + HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX); + req.flags = tfp_cpu_to_le_32(flags); + + parms.tf_type = HWRM_TF_EXT_EM_QCAPS; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + if (rc) + return rc; + + em_caps->supported = tfp_le_to_cpu_32(resp.supported); + em_caps->max_entries_supported = + tfp_le_to_cpu_32(resp.max_entries_supported); + em_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size); + em_caps->record_entry_size = + tfp_le_to_cpu_16(resp.record_entry_size); + em_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size); + + return rc; +} + +/** + * Sends EM config request to Firmware + */ +int tf_msg_em_cfg(struct tf *tfp, + uint32_t num_entries, + uint16_t key0_ctx_id, + uint16_t key1_ctx_id, + uint16_t record_ctx_id, + uint16_t efc_ctx_id, + uint8_t flush_interval, + int dir) +{ + int rc; + struct hwrm_tf_ext_em_cfg_input req = {0}; + struct hwrm_tf_ext_em_cfg_output resp = {0}; + uint32_t flags; + struct tfp_send_msg_parms parms = { 0 }; + + flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX : + HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX); + flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD; + + req.flags = tfp_cpu_to_le_32(flags); + req.num_entries = tfp_cpu_to_le_32(num_entries); + + req.flush_interval = flush_interval; + + req.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id); + req.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id); + req.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id); + req.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id); + + parms.tf_type = HWRM_TF_EXT_EM_CFG; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} + +/** + * Sends EM operation request to Firmware + */ +int tf_msg_em_op(struct tf *tfp, + int dir, + uint16_t op) +{ + int rc; + struct hwrm_tf_ext_em_op_input req = {0}; + struct hwrm_tf_ext_em_op_output resp = {0}; + uint32_t flags; + struct tfp_send_msg_parms parms = { 0 }; + + flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX : + HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX); + req.flags = tfp_cpu_to_le_32(flags); + req.op = tfp_cpu_to_le_16(op); + + parms.tf_type = HWRM_TF_EXT_EM_OP; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} + +int +tf_msg_set_tbl_entry(struct tf *tfp, + enum tf_dir dir, + enum tf_tbl_type type, + uint16_t size, + uint8_t *data, + uint32_t index) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_tbl_type_set_input req = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + req.type = tfp_cpu_to_le_32(type); + req.size = tfp_cpu_to_le_16(size); + req.index = tfp_cpu_to_le_32(index); + + tfp_memcpy(&req.data, + data, + size); + + MSG_PREP_NO_RESP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_TBL_TYPE_SET, + req); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +int +tf_msg_get_tbl_entry(struct tf *tfp, + enum tf_dir dir, + enum tf_tbl_type type, + uint16_t size, + uint8_t *data, + uint32_t index) +{ + int rc; + struct tfp_send_msg_parms parms = { 0 }; + struct tf_tbl_type_get_input req = { 0 }; + struct tf_tbl_type_get_output resp = { 0 }; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Populate the request */ + req.fw_session_id = + tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id); + req.flags = tfp_cpu_to_le_16(dir); + req.type = tfp_cpu_to_le_32(type); + req.index = tfp_cpu_to_le_32(index); + + MSG_PREP(parms, + TF_KONG_MB, + HWRM_TF, + HWRM_TFT_TBL_TYPE_GET, + req, + resp); + + rc = tfp_send_msg_tunneled(tfp, &parms); + if (rc) + return rc; + + /* Verify that we got enough buffer to return the requested data */ + if (resp.size < size) + return -EINVAL; + + tfp_memcpy(data, + &resp.data, + resp.size); + + return tfp_le_to_cpu_32(parms.tf_resp_code); +} + +#define TF_BYTES_PER_SLICE(tfp) 12 +#define NUM_SLICES(tfp, bytes) \ + (((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp)) + +static int +tf_msg_get_dma_buf(struct tf_msg_dma_buf *buf, int size) +{ + struct tfp_calloc_parms alloc_parms; + int rc; + + /* Allocate session */ + alloc_parms.nitems = 1; + alloc_parms.size = size; + alloc_parms.alignment = 0; + rc = tfp_calloc(&alloc_parms); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "Failed to allocate tcam dma entry, rc:%d\n", + rc); + return -ENOMEM; + } + + buf->pa_addr = (uintptr_t)alloc_parms.mem_pa; + buf->va_addr = alloc_parms.mem_va; + + return 0; +} + +int +tf_msg_tcam_entry_set(struct tf *tfp, + struct tf_set_tcam_entry_parms *parms) +{ + int rc; + struct tfp_send_msg_parms mparms = { 0 }; + struct hwrm_tf_tcam_set_input req = { 0 }; + struct hwrm_tf_tcam_set_output resp = { 0 }; + uint16_t key_bytes = + TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits); + uint16_t result_bytes = + TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits); + struct tf_msg_dma_buf buf = { 0 }; + uint8_t *data = NULL; + int data_size = 0; + + rc = tf_tcam_tbl_2_hwrm(parms->tcam_tbl_type, &req.type); + if (rc != 0) + return rc; + + req.idx = tfp_cpu_to_le_16(parms->idx); + if (parms->dir == TF_DIR_TX) + req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX; + + req.key_size = key_bytes; + req.mask_offset = key_bytes; + /* Result follows after key and mask, thus multiply by 2 */ + req.result_offset = 2 * key_bytes; + req.result_size = result_bytes; + data_size = 2 * req.key_size + req.result_size; + + if (data_size <= TF_PCI_BUF_SIZE_MAX) { + /* use pci buffer */ + data = &req.dev_data[0]; + } else { + /* use dma buffer */ + req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA; + rc = tf_msg_get_dma_buf(&buf, data_size); + if (rc != 0) + return rc; + data = buf.va_addr; + memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr)); + } + + memcpy(&data[0], parms->key, key_bytes); + memcpy(&data[key_bytes], parms->mask, key_bytes); + memcpy(&data[req.result_offset], parms->result, result_bytes); + + mparms.tf_type = HWRM_TF_TCAM_SET; + mparms.req_data = (uint32_t *)&req; + mparms.req_size = sizeof(req); + mparms.resp_data = (uint32_t *)&resp; + mparms.resp_size = sizeof(resp); + mparms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &mparms); + if (rc) + return rc; + + if (buf.va_addr != NULL) + tfp_free(buf.va_addr); + + return rc; +} + +int +tf_msg_tcam_entry_free(struct tf *tfp, + struct tf_free_tcam_entry_parms *in_parms) +{ + int rc; + struct hwrm_tf_tcam_free_input req = { 0 }; + struct hwrm_tf_tcam_free_output resp = { 0 }; + struct tfp_send_msg_parms parms = { 0 }; + + /* Populate the request */ + rc = tf_tcam_tbl_2_hwrm(in_parms->tcam_tbl_type, &req.type); + if (rc != 0) + return rc; + + req.count = 1; + req.idx_list[0] = tfp_cpu_to_le_16(in_parms->idx); + if (in_parms->dir == TF_DIR_TX) + req.flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX; + + parms.tf_type = HWRM_TF_TCAM_FREE; + parms.req_data = (uint32_t *)&req; + parms.req_size = sizeof(req); + parms.resp_data = (uint32_t *)&resp; + parms.resp_size = sizeof(resp); + parms.mailbox = TF_KONG_MB; + + rc = tfp_send_msg_direct(tfp, + &parms); + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.h new file mode 100644 index 000000000..030d1881e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_MSG_H_ +#define _TF_MSG_H_ + +#include "tf_tbl.h" +#include "tf_rm.h" + +struct tf; + +/** + * Sends session open request to Firmware + * + * [in] session + * Pointer to session handle + * + * [in] ctrl_chan_name + * PCI name of the control channel + * + * [in/out] fw_session_id + * Pointer to the fw_session_id that is allocated on firmware side + * + * Returns: + * + */ +int tf_msg_session_open(struct tf *tfp, + char *ctrl_chan_name, + uint8_t *fw_session_id); + +/** + * Sends session close request to Firmware + * + * [in] session + * Pointer to session handle + * + * [in] fw_session_id + * Pointer to the fw_session_id that is assigned to the session at + * time of session open + * + * Returns: + * + */ +int tf_msg_session_attach(struct tf *tfp, + char *ctrl_channel_name, + uint8_t tf_fw_session_id); + +/** + * Sends session close request to Firmware + * + * [in] session + * Pointer to session handle + * + * Returns: + * + */ +int tf_msg_session_close(struct tf *tfp); + +/** + * Sends session query config request to TF Firmware + */ +int tf_msg_session_qcfg(struct tf *tfp); + +/** + * Sends session HW resource query capability request to TF Firmware + */ +int tf_msg_session_hw_resc_qcaps(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_hw_query *hw_query); + +/** + * Sends session HW resource allocation request to TF Firmware + */ +int tf_msg_session_hw_resc_alloc(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_hw_alloc *hw_alloc, + struct tf_rm_entry *hw_entry); + +/** + * Sends session HW resource free request to TF Firmware + */ +int tf_msg_session_hw_resc_free(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *hw_entry); + +/** + * Sends session HW resource flush request to TF Firmware + */ +int tf_msg_session_hw_resc_flush(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *hw_entry); + +/** + * Sends session SRAM resource query capability request to TF Firmware + */ +int tf_msg_session_sram_resc_qcaps(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_sram_query *sram_query); + +/** + * Sends session SRAM resource allocation request to TF Firmware + */ +int tf_msg_session_sram_resc_alloc(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_sram_alloc *sram_alloc, + struct tf_rm_entry *sram_entry); + +/** + * Sends session SRAM resource free request to TF Firmware + */ +int tf_msg_session_sram_resc_free(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *sram_entry); + +/** + * Sends session SRAM resource flush request to TF Firmware + */ +int tf_msg_session_sram_resc_flush(struct tf *tfp, + enum tf_dir dir, + struct tf_rm_entry *sram_entry); + +/** + * Sends EM mem register request to Firmware + */ +int tf_msg_em_mem_rgtr(struct tf *tfp, + int page_lvl, + int page_size, + uint64_t dma_addr, + uint16_t *ctx_id); + +/** + * Sends EM mem unregister request to Firmware + */ +int tf_msg_em_mem_unrgtr(struct tf *tfp, + uint16_t *ctx_id); + +/** + * Sends EM qcaps request to Firmware + */ +int tf_msg_em_qcaps(struct tf *tfp, + int dir, + struct tf_em_caps *em_caps); + +/** + * Sends EM config request to Firmware + */ +int tf_msg_em_cfg(struct tf *tfp, + uint32_t num_entries, + uint16_t key0_ctx_id, + uint16_t key1_ctx_id, + uint16_t record_ctx_id, + uint16_t efc_ctx_id, + uint8_t flush_interval, + int dir); + +/** + * Sends EM operation request to Firmware + */ +int tf_msg_em_op(struct tf *tfp, + int dir, + uint16_t op); + +/** + * Sends tcam entry 'set' to the Firmware. + * + * [in] tfp + * Pointer to session handle + * + * [in] parms + * Pointer to set parameters + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_tcam_entry_set(struct tf *tfp, + struct tf_set_tcam_entry_parms *parms); + +/** + * Sends tcam entry 'free' to the Firmware. + * + * [in] tfp + * Pointer to session handle + * + * [in] parms + * Pointer to free parameters + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_tcam_entry_free(struct tf *tfp, + struct tf_free_tcam_entry_parms *parms); + +/** + * Sends Set message of a Table Type element to the firmware. + * + * [in] tfp + * Pointer to session handle + * + * [in] dir + * Direction location of the element to set + * + * [in] type + * Type of the object to set + * + * [in] size + * Size of the data to set + * + * [in] data + * Data to set + * + * [in] index + * Index to set + * + * Returns: + * 0 - Success + */ +int tf_msg_set_tbl_entry(struct tf *tfp, + enum tf_dir dir, + enum tf_tbl_type type, + uint16_t size, + uint8_t *data, + uint32_t index); + +/** + * Sends get message of a Table Type element to the firmware. + * + * [in] tfp + * Pointer to session handle + * + * [in] dir + * Direction location of the element to get + * + * [in] type + * Type of the object to get + * + * [in] size + * Size of the data read + * + * [in] data + * Data read + * + * [in] index + * Index to get + * + * Returns: + * 0 - Success + */ +int tf_msg_get_tbl_entry(struct tf *tfp, + enum tf_dir dir, + enum tf_tbl_type type, + uint16_t size, + uint8_t *data, + uint32_t index); + +#endif /* _TF_MSG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg_common.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg_common.h new file mode 100644 index 000000000..7a4e82561 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_msg_common.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_MSG_COMMON_H_ +#define _TF_MSG_COMMON_H_ + +/* Communication Mailboxes */ +#define TF_CHIMP_MB 0 +#define TF_KONG_MB 1 + +/* Helper to fill in the parms structure */ +#define MSG_PREP(parms, mb, type, subtype, req, resp) do { \ + parms.mailbox = mb; \ + parms.tf_type = type; \ + parms.tf_subtype = subtype; \ + parms.tf_resp_code = 0; \ + parms.req_size = sizeof(req); \ + parms.req_data = (uint32_t *)&(req); \ + parms.resp_size = sizeof(resp); \ + parms.resp_data = (uint32_t *)&(resp); \ + } while (0) + +#define MSG_PREP_NO_REQ(parms, mb, type, subtype, resp) do { \ + parms.mailbox = mb; \ + parms.tf_type = type; \ + parms.tf_subtype = subtype; \ + parms.tf_resp_code = 0; \ + parms.req_size = 0; \ + parms.req_data = NULL; \ + parms.resp_size = sizeof(resp); \ + parms.resp_data = (uint32_t *)&(resp); \ + } while (0) + +#define MSG_PREP_NO_RESP(parms, mb, type, subtype, req) do { \ + parms.mailbox = mb; \ + parms.tf_type = type; \ + parms.tf_subtype = subtype; \ + parms.tf_resp_code = 0; \ + parms.req_size = sizeof(req); \ + parms.req_data = (uint32_t *)&(req); \ + parms.resp_size = 0; \ + parms.resp_data = NULL; \ + } while (0) + +#endif /* _TF_MSG_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_project.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_project.h new file mode 100644 index 000000000..ab5f113d8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_project.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_PROJECT_H_ +#define _TF_PROJECT_H_ + +/* Wh+ support enabled */ +#ifndef TF_SUPPORT_P4 +#define TF_SUPPORT_P4 1 +#endif + +/* Shadow DB Support */ +#ifndef TF_SHADOW +#define TF_SHADOW 0 +#endif + +/* Shared memory for session */ +#ifndef TF_SHARED +#define TF_SHARED 0 +#endif + +#endif /* _TF_PROJECT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_resources.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_resources.h new file mode 100644 index 000000000..05e131f8b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_resources.h @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_RESOURCES_H_ +#define _TF_RESOURCES_H_ + +/* + * Hardware specific MAX values + * NOTE: Should really come from the chip_cfg.h in some MAX form or HCAPI + */ + +/* Common HW resources for all chip variants */ +#define TF_NUM_L2_CTXT_TCAM 1024 /* < Number of L2 context TCAM + * entries + */ +#define TF_NUM_PROF_FUNC 128 /* < Number prof_func ID */ +#define TF_NUM_PROF_TCAM 1024 /* < Number entries in profile + * TCAM + */ +#define TF_NUM_EM_PROF_ID 64 /* < Number software EM Profile + * IDs + */ +#define TF_NUM_WC_PROF_ID 256 /* < Number WC profile IDs */ +#define TF_NUM_WC_TCAM_ROW 256 /* Number slices per row in WC + * TCAM. A slices is a WC TCAM entry. + */ +#define TF_NUM_METER_PROF 256 /* < Number of meter profiles */ +#define TF_NUM_METER 1024 /* < Number of meter instances */ +#define TF_NUM_MIRROR 2 /* < Number of mirror instances */ +#define TF_NUM_UPAR 2 /* < Number of UPAR instances */ + +/* Wh+/Brd2 specific HW resources */ +#define TF_NUM_SP_TCAM 512 /* < Number of Source Property TCAM + * entries + */ + +/* Brd2/Brd4 specific HW resources */ +#define TF_NUM_L2_FUNC 256 /* < Number of L2 Func */ + + +/* Brd3, Brd4 common HW resources */ +#define TF_NUM_FKB 1 /* < Number of Flexible Key Builder + * templates + */ + +/* Brd4 specific HW resources */ +#define TF_NUM_TBL_SCOPE 16 /* < Number of TBL scopes */ +#define TF_NUM_EPOCH0 1 /* < Number of Epoch0 */ +#define TF_NUM_EPOCH1 1 /* < Number of Epoch1 */ +#define TF_NUM_METADATA 8 /* < Number of MetaData Profiles */ +#define TF_NUM_CT_STATE 32 /* < Number of Connection Tracking + * States + */ +#define TF_NUM_RANGE_PROF 16 /* < Number of Range Profiles */ +#define TF_NUM_RANGE_ENTRY (64 * 1024) /* < Number of Range Entries */ +#define TF_NUM_LAG_ENTRY 256 /* < Number of LAG Entries */ + +/* + * Common for the Reserved Resource defines below: + * + * - HW Resources + * For resources where a priority level plays a role, i.e. l2 ctx + * tcam entries, both a number of resources and a begin/end pair is + * required. The begin/end is used to assure TFLIB gets the correct + * priority setting for that resource. + * + * For EM records there is no priority required thus a number of + * resources is sufficient. + * + * Example, TCAM: + * 64 L2 CTXT TCAM entries would in a max 1024 pool be entry + * 0-63 as HW presents 0 as the highest priority entry. + * + * - SRAM Resources + * Handled as regular resources as there is no priority required. + * + * Common for these resources is that they are handled per direction, + * rx/tx. + */ + +/* HW Resources */ + +/* L2 CTX */ +#define TF_RSVD_L2_CTXT_TCAM_RX 64 +#define TF_RSVD_L2_CTXT_TCAM_BEGIN_IDX_RX 0 +#define TF_RSVD_L2_CTXT_TCAM_END_IDX_RX (TF_RSVD_L2_CTXT_RX - 1) +#define TF_RSVD_L2_CTXT_TCAM_TX 960 +#define TF_RSVD_L2_CTXT_TCAM_BEGIN_IDX_TX 0 +#define TF_RSVD_L2_CTXT_TCAM_END_IDX_TX (TF_RSVD_L2_CTXT_TX - 1) + +/* Profiler */ +#define TF_RSVD_PROF_FUNC_RX 64 +#define TF_RSVD_PROF_FUNC_BEGIN_IDX_RX 64 +#define TF_RSVD_PROF_FUNC_END_IDX_RX 127 +#define TF_RSVD_PROF_FUNC_TX 64 +#define TF_RSVD_PROF_FUNC_BEGIN_IDX_TX 64 +#define TF_RSVD_PROF_FUNC_END_IDX_TX 127 + +#define TF_RSVD_PROF_TCAM_RX 64 +#define TF_RSVD_PROF_TCAM_BEGIN_IDX_RX 960 +#define TF_RSVD_PROF_TCAM_END_IDX_RX 1023 +#define TF_RSVD_PROF_TCAM_TX 64 +#define TF_RSVD_PROF_TCAM_BEGIN_IDX_TX 960 +#define TF_RSVD_PROF_TCAM_END_IDX_TX 1023 + +/* EM Profiles IDs */ +#define TF_RSVD_EM_PROF_ID_RX 64 +#define TF_RSVD_EM_PROF_ID_BEGIN_IDX_RX 0 +#define TF_RSVD_EM_PROF_ID_END_IDX_RX 63 /* Less on CU+ then SR */ +#define TF_RSVD_EM_PROF_ID_TX 64 +#define TF_RSVD_EM_PROF_ID_BEGIN_IDX_TX 0 +#define TF_RSVD_EM_PROF_ID_END_IDX_TX 63 /* Less on CU+ then SR */ + +/* EM Records */ +#define TF_RSVD_EM_REC_RX 16000 +#define TF_RSVD_EM_REC_BEGIN_IDX_RX 0 +#define TF_RSVD_EM_REC_TX 16000 +#define TF_RSVD_EM_REC_BEGIN_IDX_TX 0 + +/* Wildcard */ +#define TF_RSVD_WC_TCAM_PROF_ID_RX 128 +#define TF_RSVD_WC_TCAM_PROF_ID_BEGIN_IDX_RX 128 +#define TF_RSVD_WC_TCAM_PROF_ID_END_IDX_RX 255 +#define TF_RSVD_WC_TCAM_PROF_ID_TX 128 +#define TF_RSVD_WC_TCAM_PROF_ID_BEGIN_IDX_TX 128 +#define TF_RSVD_WC_TCAM_PROF_ID_END_IDX_TX 255 + +#define TF_RSVD_WC_TCAM_RX 64 +#define TF_RSVD_WC_TCAM_BEGIN_IDX_RX 0 +#define TF_RSVD_WC_TCAM_END_IDX_RX 63 +#define TF_RSVD_WC_TCAM_TX 64 +#define TF_RSVD_WC_TCAM_BEGIN_IDX_TX 0 +#define TF_RSVD_WC_TCAM_END_IDX_TX 63 + +#define TF_RSVD_METER_PROF_RX 0 +#define TF_RSVD_METER_PROF_BEGIN_IDX_RX 0 +#define TF_RSVD_METER_PROF_END_IDX_RX 0 +#define TF_RSVD_METER_PROF_TX 0 +#define TF_RSVD_METER_PROF_BEGIN_IDX_TX 0 +#define TF_RSVD_METER_PROF_END_IDX_TX 0 + +#define TF_RSVD_METER_INST_RX 0 +#define TF_RSVD_METER_INST_BEGIN_IDX_RX 0 +#define TF_RSVD_METER_INST_END_IDX_RX 0 +#define TF_RSVD_METER_INST_TX 0 +#define TF_RSVD_METER_INST_BEGIN_IDX_TX 0 +#define TF_RSVD_METER_INST_END_IDX_TX 0 + +/* Mirror */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_MIRROR_RX 0 +#define TF_RSVD_MIRROR_BEGIN_IDX_RX 0 +#define TF_RSVD_MIRROR_END_IDX_RX 0 +#define TF_RSVD_MIRROR_TX 0 +#define TF_RSVD_MIRROR_BEGIN_IDX_TX 0 +#define TF_RSVD_MIRROR_END_IDX_TX 0 + +/* UPAR */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_UPAR_RX 0 +#define TF_RSVD_UPAR_BEGIN_IDX_RX 0 +#define TF_RSVD_UPAR_END_IDX_RX 0 +#define TF_RSVD_UPAR_TX 0 +#define TF_RSVD_UPAR_BEGIN_IDX_TX 0 +#define TF_RSVD_UPAR_END_IDX_TX 0 + +/* Source Properties */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_SP_TCAM_RX 0 +#define TF_RSVD_SP_TCAM_BEGIN_IDX_RX 0 +#define TF_RSVD_SP_TCAM_END_IDX_RX 0 +#define TF_RSVD_SP_TCAM_TX 0 +#define TF_RSVD_SP_TCAM_BEGIN_IDX_TX 0 +#define TF_RSVD_SP_TCAM_END_IDX_TX 0 + +/* L2 Func */ +#define TF_RSVD_L2_FUNC_RX 0 +#define TF_RSVD_L2_FUNC_BEGIN_IDX_RX 0 +#define TF_RSVD_L2_FUNC_END_IDX_RX 0 +#define TF_RSVD_L2_FUNC_TX 0 +#define TF_RSVD_L2_FUNC_BEGIN_IDX_TX 0 +#define TF_RSVD_L2_FUNC_END_IDX_TX 0 + +/* FKB */ +#define TF_RSVD_FKB_RX 0 +#define TF_RSVD_FKB_BEGIN_IDX_RX 0 +#define TF_RSVD_FKB_END_IDX_RX 0 +#define TF_RSVD_FKB_TX 0 +#define TF_RSVD_FKB_BEGIN_IDX_TX 0 +#define TF_RSVD_FKB_END_IDX_TX 0 + +/* TBL Scope */ +#define TF_RSVD_TBL_SCOPE_RX 1 +#define TF_RSVD_TBL_SCOPE_BEGIN_IDX_RX 0 +#define TF_RSVD_TBL_SCOPE_END_IDX_RX 1 +#define TF_RSVD_TBL_SCOPE_TX 1 +#define TF_RSVD_TBL_SCOPE_BEGIN_IDX_TX 0 +#define TF_RSVD_TBL_SCOPE_END_IDX_TX 1 + +/* EPOCH0 */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_EPOCH0_RX 0 +#define TF_RSVD_EPOCH0_BEGIN_IDX_RX 0 +#define TF_RSVD_EPOCH0_END_IDX_RX 0 +#define TF_RSVD_EPOCH0_TX 0 +#define TF_RSVD_EPOCH0_BEGIN_IDX_TX 0 +#define TF_RSVD_EPOCH0_END_IDX_TX 0 + +/* EPOCH1 */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_EPOCH1_RX 0 +#define TF_RSVD_EPOCH1_BEGIN_IDX_RX 0 +#define TF_RSVD_EPOCH1_END_IDX_RX 0 +#define TF_RSVD_EPOCH1_TX 0 +#define TF_RSVD_EPOCH1_BEGIN_IDX_TX 0 +#define TF_RSVD_EPOCH1_END_IDX_TX 0 + +/* METADATA */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_METADATA_RX 0 +#define TF_RSVD_METADATA_BEGIN_IDX_RX 0 +#define TF_RSVD_METADATA_END_IDX_RX 0 +#define TF_RSVD_METADATA_TX 0 +#define TF_RSVD_METADATA_BEGIN_IDX_TX 0 +#define TF_RSVD_METADATA_END_IDX_TX 0 + +/* CT_STATE */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_CT_STATE_RX 0 +#define TF_RSVD_CT_STATE_BEGIN_IDX_RX 0 +#define TF_RSVD_CT_STATE_END_IDX_RX 0 +#define TF_RSVD_CT_STATE_TX 0 +#define TF_RSVD_CT_STATE_BEGIN_IDX_TX 0 +#define TF_RSVD_CT_STATE_END_IDX_TX 0 + +/* RANGE_PROF */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_RANGE_PROF_RX 0 +#define TF_RSVD_RANGE_PROF_BEGIN_IDX_RX 0 +#define TF_RSVD_RANGE_PROF_END_IDX_RX 0 +#define TF_RSVD_RANGE_PROF_TX 0 +#define TF_RSVD_RANGE_PROF_BEGIN_IDX_TX 0 +#define TF_RSVD_RANGE_PROF_END_IDX_TX 0 + +/* RANGE_ENTRY */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_RANGE_ENTRY_RX 0 +#define TF_RSVD_RANGE_ENTRY_BEGIN_IDX_RX 0 +#define TF_RSVD_RANGE_ENTRY_END_IDX_RX 0 +#define TF_RSVD_RANGE_ENTRY_TX 0 +#define TF_RSVD_RANGE_ENTRY_BEGIN_IDX_TX 0 +#define TF_RSVD_RANGE_ENTRY_END_IDX_TX 0 + +/* LAG_ENTRY */ +/* Not yet supported fully in the infra */ +#define TF_RSVD_LAG_ENTRY_RX 0 +#define TF_RSVD_LAG_ENTRY_BEGIN_IDX_RX 0 +#define TF_RSVD_LAG_ENTRY_END_IDX_RX 0 +#define TF_RSVD_LAG_ENTRY_TX 0 +#define TF_RSVD_LAG_ENTRY_BEGIN_IDX_TX 0 +#define TF_RSVD_LAG_ENTRY_END_IDX_TX 0 + + +/* SRAM - Resources + * Limited to the types that CFA provides. + */ +#define TF_RSVD_SRAM_FULL_ACTION_RX 8001 +#define TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_FULL_ACTION_TX 8001 +#define TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX 0 + +/* Not yet supported fully in the infra */ +#define TF_RSVD_SRAM_MCG_RX 0 +#define TF_RSVD_SRAM_MCG_BEGIN_IDX_RX 0 +/* Multicast Group on TX is not supported */ +#define TF_RSVD_SRAM_MCG_TX 0 +#define TF_RSVD_SRAM_MCG_BEGIN_IDX_TX 0 + +/* First encap of 8B RX is reserved by CFA */ +#define TF_RSVD_SRAM_ENCAP_8B_RX 32 +#define TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX 0 +/* First encap of 8B TX is reserved by CFA */ +#define TF_RSVD_SRAM_ENCAP_8B_TX 0 +#define TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_ENCAP_16B_RX 16 +#define TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX 0 +/* First encap of 16B TX is reserved by CFA */ +#define TF_RSVD_SRAM_ENCAP_16B_TX 20 +#define TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX 0 + +/* Encap of 64B on RX is not supported */ +#define TF_RSVD_SRAM_ENCAP_64B_RX 0 +#define TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_RX 0 +/* First encap of 64B TX is reserved by CFA */ +#define TF_RSVD_SRAM_ENCAP_64B_TX 1007 +#define TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_SP_SMAC_RX 0 +#define TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_SP_SMAC_TX 0 +#define TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX 0 + +/* SRAM SP IPV4 on RX is not supported */ +#define TF_RSVD_SRAM_SP_SMAC_IPV4_RX 0 +#define TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_SP_SMAC_IPV4_TX 511 +#define TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX 0 + +/* SRAM SP IPV6 on RX is not supported */ +#define TF_RSVD_SRAM_SP_SMAC_IPV6_RX 0 +#define TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_RX 0 +/* Not yet supported fully in infra */ +#define TF_RSVD_SRAM_SP_SMAC_IPV6_TX 0 +#define TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_COUNTER_64B_RX 160 +#define TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_COUNTER_64B_TX 160 +#define TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_NAT_SPORT_RX 0 +#define TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_NAT_SPORT_TX 0 +#define TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_NAT_DPORT_RX 0 +#define TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_NAT_DPORT_TX 0 +#define TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_NAT_S_IPV4_RX 0 +#define TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_NAT_S_IPV4_TX 0 +#define TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX 0 + +#define TF_RSVD_SRAM_NAT_D_IPV4_RX 0 +#define TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX 0 +#define TF_RSVD_SRAM_NAT_D_IPV4_TX 0 +#define TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX 0 + +/* HW Resource Pool names */ + +#define TF_L2_CTXT_TCAM_POOL_NAME l2_ctxt_tcam_pool +#define TF_L2_CTXT_TCAM_POOL_NAME_RX l2_ctxt_tcam_pool_rx +#define TF_L2_CTXT_TCAM_POOL_NAME_TX l2_ctxt_tcam_pool_tx + +#define TF_PROF_FUNC_POOL_NAME prof_func_pool +#define TF_PROF_FUNC_POOL_NAME_RX prof_func_pool_rx +#define TF_PROF_FUNC_POOL_NAME_TX prof_func_pool_tx + +#define TF_PROF_TCAM_POOL_NAME prof_tcam_pool +#define TF_PROF_TCAM_POOL_NAME_RX prof_tcam_pool_rx +#define TF_PROF_TCAM_POOL_NAME_TX prof_tcam_pool_tx + +#define TF_EM_PROF_ID_POOL_NAME em_prof_id_pool +#define TF_EM_PROF_ID_POOL_NAME_RX em_prof_id_pool_rx +#define TF_EM_PROF_ID_POOL_NAME_TX em_prof_id_pool_tx + +#define TF_WC_TCAM_PROF_ID_POOL_NAME wc_tcam_prof_id_pool +#define TF_WC_TCAM_PROF_ID_POOL_NAME_RX wc_tcam_prof_id_pool_rx +#define TF_WC_TCAM_PROF_ID_POOL_NAME_TX wc_tcam_prof_id_pool_tx + +#define TF_WC_TCAM_POOL_NAME wc_tcam_pool +#define TF_WC_TCAM_POOL_NAME_RX wc_tcam_pool_rx +#define TF_WC_TCAM_POOL_NAME_TX wc_tcam_pool_tx + +#define TF_METER_PROF_POOL_NAME meter_prof_pool +#define TF_METER_PROF_POOL_NAME_RX meter_prof_pool_rx +#define TF_METER_PROF_POOL_NAME_TX meter_prof_pool_tx + +#define TF_METER_INST_POOL_NAME meter_inst_pool +#define TF_METER_INST_POOL_NAME_RX meter_inst_pool_rx +#define TF_METER_INST_POOL_NAME_TX meter_inst_pool_tx + +#define TF_MIRROR_POOL_NAME mirror_pool +#define TF_MIRROR_POOL_NAME_RX mirror_pool_rx +#define TF_MIRROR_POOL_NAME_TX mirror_pool_tx + +#define TF_UPAR_POOL_NAME upar_pool +#define TF_UPAR_POOL_NAME_RX upar_pool_rx +#define TF_UPAR_POOL_NAME_TX upar_pool_tx + +#define TF_SP_TCAM_POOL_NAME sp_tcam_pool +#define TF_SP_TCAM_POOL_NAME_RX sp_tcam_pool_rx +#define TF_SP_TCAM_POOL_NAME_TX sp_tcam_pool_tx + +#define TF_FKB_POOL_NAME fkb_pool +#define TF_FKB_POOL_NAME_RX fkb_pool_rx +#define TF_FKB_POOL_NAME_TX fkb_pool_tx + +#define TF_TBL_SCOPE_POOL_NAME tbl_scope_pool +#define TF_TBL_SCOPE_POOL_NAME_RX tbl_scope_pool_rx +#define TF_TBL_SCOPE_POOL_NAME_TX tbl_scope_pool_tx + +#define TF_L2_FUNC_POOL_NAME l2_func_pool +#define TF_L2_FUNC_POOL_NAME_RX l2_func_pool_rx +#define TF_L2_FUNC_POOL_NAME_TX l2_func_pool_tx + +#define TF_EPOCH0_POOL_NAME epoch0_pool +#define TF_EPOCH0_POOL_NAME_RX epoch0_pool_rx +#define TF_EPOCH0_POOL_NAME_TX epoch0_pool_tx + +#define TF_EPOCH1_POOL_NAME epoch1_pool +#define TF_EPOCH1_POOL_NAME_RX epoch1_pool_rx +#define TF_EPOCH1_POOL_NAME_TX epoch1_pool_tx + +#define TF_METADATA_POOL_NAME metadata_pool +#define TF_METADATA_POOL_NAME_RX metadata_pool_rx +#define TF_METADATA_POOL_NAME_TX metadata_pool_tx + +#define TF_CT_STATE_POOL_NAME ct_state_pool +#define TF_CT_STATE_POOL_NAME_RX ct_state_pool_rx +#define TF_CT_STATE_POOL_NAME_TX ct_state_pool_tx + +#define TF_RANGE_PROF_POOL_NAME range_prof_pool +#define TF_RANGE_PROF_POOL_NAME_RX range_prof_pool_rx +#define TF_RANGE_PROF_POOL_NAME_TX range_prof_pool_tx + +#define TF_RANGE_ENTRY_POOL_NAME range_entry_pool +#define TF_RANGE_ENTRY_POOL_NAME_RX range_entry_pool_rx +#define TF_RANGE_ENTRY_POOL_NAME_TX range_entry_pool_tx + +#define TF_LAG_ENTRY_POOL_NAME lag_entry_pool +#define TF_LAG_ENTRY_POOL_NAME_RX lag_entry_pool_rx +#define TF_LAG_ENTRY_POOL_NAME_TX lag_entry_pool_tx + +/* SRAM Resource Pool names */ +#define TF_SRAM_FULL_ACTION_POOL_NAME sram_full_action_pool +#define TF_SRAM_FULL_ACTION_POOL_NAME_RX sram_full_action_pool_rx +#define TF_SRAM_FULL_ACTION_POOL_NAME_TX sram_full_action_pool_tx + +#define TF_SRAM_MCG_POOL_NAME sram_mcg_pool +#define TF_SRAM_MCG_POOL_NAME_RX sram_mcg_pool_rx +#define TF_SRAM_MCG_POOL_NAME_TX sram_mcg_pool_tx + +#define TF_SRAM_ENCAP_8B_POOL_NAME sram_encap_8b_pool +#define TF_SRAM_ENCAP_8B_POOL_NAME_RX sram_encap_8b_pool_rx +#define TF_SRAM_ENCAP_8B_POOL_NAME_TX sram_encap_8b_pool_tx + +#define TF_SRAM_ENCAP_16B_POOL_NAME sram_encap_16b_pool +#define TF_SRAM_ENCAP_16B_POOL_NAME_RX sram_encap_16b_pool_rx +#define TF_SRAM_ENCAP_16B_POOL_NAME_TX sram_encap_16b_pool_tx + +#define TF_SRAM_ENCAP_64B_POOL_NAME sram_encap_64b_pool +#define TF_SRAM_ENCAP_64B_POOL_NAME_RX sram_encap_64b_pool_rx +#define TF_SRAM_ENCAP_64B_POOL_NAME_TX sram_encap_64b_pool_tx + +#define TF_SRAM_SP_SMAC_POOL_NAME sram_sp_smac_pool +#define TF_SRAM_SP_SMAC_POOL_NAME_RX sram_sp_smac_pool_rx +#define TF_SRAM_SP_SMAC_POOL_NAME_TX sram_sp_smac_pool_tx + +#define TF_SRAM_SP_SMAC_IPV4_POOL_NAME sram_sp_smac_ipv4_pool +#define TF_SRAM_SP_SMAC_IPV4_POOL_NAME_RX sram_sp_smac_ipv4_pool_rx +#define TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX sram_sp_smac_ipv4_pool_tx + +#define TF_SRAM_SP_SMAC_IPV6_POOL_NAME sram_sp_smac_ipv6_pool +#define TF_SRAM_SP_SMAC_IPV6_POOL_NAME_RX sram_sp_smac_ipv6_pool_rx +#define TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX sram_sp_smac_ipv6_pool_tx + +#define TF_SRAM_STATS_64B_POOL_NAME sram_stats_64b_pool +#define TF_SRAM_STATS_64B_POOL_NAME_RX sram_stats_64b_pool_rx +#define TF_SRAM_STATS_64B_POOL_NAME_TX sram_stats_64b_pool_tx + +#define TF_SRAM_NAT_SPORT_POOL_NAME sram_nat_sport_pool +#define TF_SRAM_NAT_SPORT_POOL_NAME_RX sram_nat_sport_pool_rx +#define TF_SRAM_NAT_SPORT_POOL_NAME_TX sram_nat_sport_pool_tx + +#define TF_SRAM_NAT_DPORT_POOL_NAME sram_nat_dport_pool +#define TF_SRAM_NAT_DPORT_POOL_NAME_RX sram_nat_dport_pool_rx +#define TF_SRAM_NAT_DPORT_POOL_NAME_TX sram_nat_dport_pool_tx + +#define TF_SRAM_NAT_S_IPV4_POOL_NAME sram_nat_s_ipv4_pool +#define TF_SRAM_NAT_S_IPV4_POOL_NAME_RX sram_nat_s_ipv4_pool_rx +#define TF_SRAM_NAT_S_IPV4_POOL_NAME_TX sram_nat_s_ipv4_pool_tx + +#define TF_SRAM_NAT_D_IPV4_POOL_NAME sram_nat_d_ipv4_pool +#define TF_SRAM_NAT_D_IPV4_POOL_NAME_RX sram_nat_d_ipv4_pool_rx +#define TF_SRAM_NAT_D_IPV4_POOL_NAME_TX sram_nat_d_ipv4_pool_tx + +/* Sw Resource Pool Names */ + +#define TF_L2_CTXT_REMAP_POOL_NAME l2_ctxt_remap_pool +#define TF_L2_CTXT_REMAP_POOL_NAME_RX l2_ctxt_remap_pool_rx +#define TF_L2_CTXT_REMAP_POOL_NAME_TX l2_ctxt_remap_pool_tx + + +/** HW Resource types + */ +enum tf_resource_type_hw { + /* Common HW resources for all chip variants */ + TF_RESC_TYPE_HW_L2_CTXT_TCAM, + TF_RESC_TYPE_HW_PROF_FUNC, + TF_RESC_TYPE_HW_PROF_TCAM, + TF_RESC_TYPE_HW_EM_PROF_ID, + TF_RESC_TYPE_HW_EM_REC, + TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, + TF_RESC_TYPE_HW_WC_TCAM, + TF_RESC_TYPE_HW_METER_PROF, + TF_RESC_TYPE_HW_METER_INST, + TF_RESC_TYPE_HW_MIRROR, + TF_RESC_TYPE_HW_UPAR, + /* Wh+/Brd2 specific HW resources */ + TF_RESC_TYPE_HW_SP_TCAM, + /* Brd2/Brd4 specific HW resources */ + TF_RESC_TYPE_HW_L2_FUNC, + /* Brd3, Brd4 common HW resources */ + TF_RESC_TYPE_HW_FKB, + /* Brd4 specific HW resources */ + TF_RESC_TYPE_HW_TBL_SCOPE, + TF_RESC_TYPE_HW_EPOCH0, + TF_RESC_TYPE_HW_EPOCH1, + TF_RESC_TYPE_HW_METADATA, + TF_RESC_TYPE_HW_CT_STATE, + TF_RESC_TYPE_HW_RANGE_PROF, + TF_RESC_TYPE_HW_RANGE_ENTRY, + TF_RESC_TYPE_HW_LAG_ENTRY, + TF_RESC_TYPE_HW_MAX +}; + +/** HW Resource types + */ +enum tf_resource_type_sram { + TF_RESC_TYPE_SRAM_FULL_ACTION, + TF_RESC_TYPE_SRAM_MCG, + TF_RESC_TYPE_SRAM_ENCAP_8B, + TF_RESC_TYPE_SRAM_ENCAP_16B, + TF_RESC_TYPE_SRAM_ENCAP_64B, + TF_RESC_TYPE_SRAM_SP_SMAC, + TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, + TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, + TF_RESC_TYPE_SRAM_COUNTER_64B, + TF_RESC_TYPE_SRAM_NAT_SPORT, + TF_RESC_TYPE_SRAM_NAT_DPORT, + TF_RESC_TYPE_SRAM_NAT_S_IPV4, + TF_RESC_TYPE_SRAM_NAT_D_IPV4, + TF_RESC_TYPE_SRAM_MAX +}; + +#endif /* _TF_RESOURCES_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.c new file mode 100644 index 000000000..38b1e71cd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.c @@ -0,0 +1,3294 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include + +#include + +#include "tf_rm.h" +#include "tf_core.h" +#include "tf_session.h" +#include "tf_resources.h" +#include "tf_msg.h" +#include "bnxt.h" + +/** + * Internal macro to perform HW resource allocation check between what + * firmware reports vs what was statically requested. + * + * Parameters: + * struct tf_rm_hw_query *hquery - Pointer to the hw query result + * enum tf_dir dir - Direction to process + * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element + * in the hw query structure + * define def_value - Define value to check against + * uint32_t *eflag - Result of the check + */ +#define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \ + if ((dir) == TF_DIR_RX) { \ + if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \ + *(eflag) |= 1 << (hcapi_type); \ + } else { \ + if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \ + *(eflag) |= 1 << (hcapi_type); \ + } \ +} while (0) + +/** + * Internal macro to perform HW resource allocation check between what + * firmware reports vs what was statically requested. + * + * Parameters: + * struct tf_rm_sram_query *squery - Pointer to the sram query result + * enum tf_dir dir - Direction to process + * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element + * in the hw query structure + * define def_value - Define value to check against + * uint32_t *eflag - Result of the check + */ +#define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \ + if ((dir) == TF_DIR_RX) { \ + if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\ + *(eflag) |= 1 << (hcapi_type); \ + } else { \ + if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\ + *(eflag) |= 1 << (hcapi_type); \ + } \ +} while (0) + +/** + * Internal macro to convert a reserved resource define name to be + * direction specific. + * + * Parameters: + * enum tf_dir dir - Direction to process + * string type - Type name to append RX or TX to + * string dtype - Direction specific type + * + * + */ +#define TF_RESC_RSVD(dir, type, dtype) do { \ + if ((dir) == TF_DIR_RX) \ + (dtype) = type ## _RX; \ + else \ + (dtype) = type ## _TX; \ + } while (0) + +const char +*tf_dir_2_str(enum tf_dir dir) +{ + switch (dir) { + case TF_DIR_RX: + return "RX"; + case TF_DIR_TX: + return "TX"; + default: + return "Invalid direction"; + } +} + +const char +*tf_ident_2_str(enum tf_identifier_type id_type) +{ + switch (id_type) { + case TF_IDENT_TYPE_L2_CTXT: + return "l2_ctxt_remap"; + case TF_IDENT_TYPE_PROF_FUNC: + return "prof_func"; + case TF_IDENT_TYPE_WC_PROF: + return "wc_prof"; + case TF_IDENT_TYPE_EM_PROF: + return "em_prof"; + case TF_IDENT_TYPE_L2_FUNC: + return "l2_func"; + default: + return "Invalid identifier"; + } +} + +const char +*tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type) +{ + switch (tcam_type) { + case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM: + return "l2_ctxt_tcam"; + case TF_TCAM_TBL_TYPE_PROF_TCAM: + return "prof_tcam"; + case TF_TCAM_TBL_TYPE_WC_TCAM: + return "wc_tcam"; + case TF_TCAM_TBL_TYPE_VEB_TCAM: + return "veb_tcam"; + case TF_TCAM_TBL_TYPE_SP_TCAM: + return "sp_tcam"; + case TF_TCAM_TBL_TYPE_CT_RULE_TCAM: + return "ct_rule_tcam"; + default: + return "Invalid tcam table type"; + } +} + +const char +*tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type) +{ + switch (hw_type) { + case TF_RESC_TYPE_HW_L2_CTXT_TCAM: + return "L2 ctxt tcam"; + case TF_RESC_TYPE_HW_PROF_FUNC: + return "Profile Func"; + case TF_RESC_TYPE_HW_PROF_TCAM: + return "Profile tcam"; + case TF_RESC_TYPE_HW_EM_PROF_ID: + return "EM profile id"; + case TF_RESC_TYPE_HW_EM_REC: + return "EM record"; + case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID: + return "WC tcam profile id"; + case TF_RESC_TYPE_HW_WC_TCAM: + return "WC tcam"; + case TF_RESC_TYPE_HW_METER_PROF: + return "Meter profile"; + case TF_RESC_TYPE_HW_METER_INST: + return "Meter instance"; + case TF_RESC_TYPE_HW_MIRROR: + return "Mirror"; + case TF_RESC_TYPE_HW_UPAR: + return "UPAR"; + case TF_RESC_TYPE_HW_SP_TCAM: + return "Source properties tcam"; + case TF_RESC_TYPE_HW_L2_FUNC: + return "L2 Function"; + case TF_RESC_TYPE_HW_FKB: + return "FKB"; + case TF_RESC_TYPE_HW_TBL_SCOPE: + return "Table scope"; + case TF_RESC_TYPE_HW_EPOCH0: + return "EPOCH0"; + case TF_RESC_TYPE_HW_EPOCH1: + return "EPOCH1"; + case TF_RESC_TYPE_HW_METADATA: + return "Metadata"; + case TF_RESC_TYPE_HW_CT_STATE: + return "Connection tracking state"; + case TF_RESC_TYPE_HW_RANGE_PROF: + return "Range profile"; + case TF_RESC_TYPE_HW_RANGE_ENTRY: + return "Range entry"; + case TF_RESC_TYPE_HW_LAG_ENTRY: + return "LAG"; + default: + return "Invalid identifier"; + } +} + +const char +*tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type) +{ + switch (sram_type) { + case TF_RESC_TYPE_SRAM_FULL_ACTION: + return "Full action"; + case TF_RESC_TYPE_SRAM_MCG: + return "MCG"; + case TF_RESC_TYPE_SRAM_ENCAP_8B: + return "Encap 8B"; + case TF_RESC_TYPE_SRAM_ENCAP_16B: + return "Encap 16B"; + case TF_RESC_TYPE_SRAM_ENCAP_64B: + return "Encap 64B"; + case TF_RESC_TYPE_SRAM_SP_SMAC: + return "Source properties SMAC"; + case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4: + return "Source properties SMAC IPv4"; + case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6: + return "Source properties IPv6"; + case TF_RESC_TYPE_SRAM_COUNTER_64B: + return "Counter 64B"; + case TF_RESC_TYPE_SRAM_NAT_SPORT: + return "NAT source port"; + case TF_RESC_TYPE_SRAM_NAT_DPORT: + return "NAT destination port"; + case TF_RESC_TYPE_SRAM_NAT_S_IPV4: + return "NAT source IPv4"; + case TF_RESC_TYPE_SRAM_NAT_D_IPV4: + return "NAT destination IPv4"; + default: + return "Invalid identifier"; + } +} + +/** + * Helper function to perform a HW HCAPI resource type lookup against + * the reserved value of the same static type. + * + * Returns: + * -EOPNOTSUPP - Reserved resource type not supported + * Value - Integer value of the reserved value for the requested type + */ +static int +tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index) +{ + uint32_t value = -EOPNOTSUPP; + + switch (index) { + case TF_RESC_TYPE_HW_L2_CTXT_TCAM: + TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value); + break; + case TF_RESC_TYPE_HW_PROF_FUNC: + TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value); + break; + case TF_RESC_TYPE_HW_PROF_TCAM: + TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value); + break; + case TF_RESC_TYPE_HW_EM_PROF_ID: + TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value); + break; + case TF_RESC_TYPE_HW_EM_REC: + TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value); + break; + case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID: + TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value); + break; + case TF_RESC_TYPE_HW_WC_TCAM: + TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value); + break; + case TF_RESC_TYPE_HW_METER_PROF: + TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value); + break; + case TF_RESC_TYPE_HW_METER_INST: + TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value); + break; + case TF_RESC_TYPE_HW_MIRROR: + TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value); + break; + case TF_RESC_TYPE_HW_UPAR: + TF_RESC_RSVD(dir, TF_RSVD_UPAR, value); + break; + case TF_RESC_TYPE_HW_SP_TCAM: + TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value); + break; + case TF_RESC_TYPE_HW_L2_FUNC: + TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value); + break; + case TF_RESC_TYPE_HW_FKB: + TF_RESC_RSVD(dir, TF_RSVD_FKB, value); + break; + case TF_RESC_TYPE_HW_TBL_SCOPE: + TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value); + break; + case TF_RESC_TYPE_HW_EPOCH0: + TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value); + break; + case TF_RESC_TYPE_HW_EPOCH1: + TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value); + break; + case TF_RESC_TYPE_HW_METADATA: + TF_RESC_RSVD(dir, TF_RSVD_METADATA, value); + break; + case TF_RESC_TYPE_HW_CT_STATE: + TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value); + break; + case TF_RESC_TYPE_HW_RANGE_PROF: + TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value); + break; + case TF_RESC_TYPE_HW_RANGE_ENTRY: + TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value); + break; + case TF_RESC_TYPE_HW_LAG_ENTRY: + TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value); + break; + default: + break; + } + + return value; +} + +/** + * Helper function to perform a SRAM HCAPI resource type lookup + * against the reserved value of the same static type. + * + * Returns: + * -EOPNOTSUPP - Reserved resource type not supported + * Value - Integer value of the reserved value for the requested type + */ +static int +tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index) +{ + uint32_t value = -EOPNOTSUPP; + + switch (index) { + case TF_RESC_TYPE_SRAM_FULL_ACTION: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value); + break; + case TF_RESC_TYPE_SRAM_MCG: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value); + break; + case TF_RESC_TYPE_SRAM_ENCAP_8B: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value); + break; + case TF_RESC_TYPE_SRAM_ENCAP_16B: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value); + break; + case TF_RESC_TYPE_SRAM_ENCAP_64B: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value); + break; + case TF_RESC_TYPE_SRAM_SP_SMAC: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value); + break; + case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value); + break; + case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value); + break; + case TF_RESC_TYPE_SRAM_COUNTER_64B: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value); + break; + case TF_RESC_TYPE_SRAM_NAT_SPORT: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value); + break; + case TF_RESC_TYPE_SRAM_NAT_DPORT: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value); + break; + case TF_RESC_TYPE_SRAM_NAT_S_IPV4: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value); + break; + case TF_RESC_TYPE_SRAM_NAT_D_IPV4: + TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value); + break; + default: + break; + } + + return value; +} + +/** + * Helper function to print all the HW resource qcaps errors reported + * in the error_flag. + * + * [in] dir + * Receive or transmit direction + * + * [in] error_flag + * Pointer to the hw error flags created at time of the query check + */ +static void +tf_rm_print_hw_qcaps_error(enum tf_dir dir, + struct tf_rm_hw_query *hw_query, + uint32_t *error_flag) +{ + int i; + + PMD_DRV_LOG(ERR, "QCAPS errors HW\n"); + PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir)); + PMD_DRV_LOG(ERR, " Elements:\n"); + + for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) { + if (*error_flag & 1 << i) + PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n", + tf_hcapi_hw_2_str(i), + hw_query->hw_query[i].max, + tf_rm_rsvd_hw_value(dir, i)); + } +} + +/** + * Helper function to print all the SRAM resource qcaps errors + * reported in the error_flag. + * + * [in] dir + * Receive or transmit direction + * + * [in] error_flag + * Pointer to the sram error flags created at time of the query check + */ +static void +tf_rm_print_sram_qcaps_error(enum tf_dir dir, + struct tf_rm_sram_query *sram_query, + uint32_t *error_flag) +{ + int i; + + PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n"); + PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir)); + PMD_DRV_LOG(ERR, " Elements:\n"); + + for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { + if (*error_flag & 1 << i) + PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n", + tf_hcapi_sram_2_str(i), + sram_query->sram_query[i].max, + tf_rm_rsvd_sram_value(dir, i)); + } +} + +/** + * Performs a HW resource check between what firmware capability + * reports and what the core expects is available. + * + * Firmware performs the resource carving at AFM init time and the + * resource capability is reported in the TruFlow qcaps msg. + * + * [in] query + * Pointer to HW Query data structure. Query holds what the firmware + * offers of the HW resources. + * + * [in] dir + * Receive or transmit direction + * + * [in/out] error_flag + * Pointer to a bit array indicating the error of a single HCAPI + * resource type. When a bit is set to 1, the HCAPI resource type + * failed static allocation. + * + * Returns: + * 0 - Success + * -ENOMEM - Failure on one of the allocated resources. Check the + * error_flag for what types are flagged errored. + */ +static int +tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query, + enum tf_dir dir, + uint32_t *error_flag) +{ + *error_flag = 0; + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_L2_CTXT_TCAM, + TF_RSVD_L2_CTXT_TCAM, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_PROF_FUNC, + TF_RSVD_PROF_FUNC, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_PROF_TCAM, + TF_RSVD_PROF_TCAM, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_EM_PROF_ID, + TF_RSVD_EM_PROF_ID, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_EM_REC, + TF_RSVD_EM_REC, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, + TF_RSVD_WC_TCAM_PROF_ID, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_WC_TCAM, + TF_RSVD_WC_TCAM, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_METER_PROF, + TF_RSVD_METER_PROF, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_METER_INST, + TF_RSVD_METER_INST, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_MIRROR, + TF_RSVD_MIRROR, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_UPAR, + TF_RSVD_UPAR, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_SP_TCAM, + TF_RSVD_SP_TCAM, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_L2_FUNC, + TF_RSVD_L2_FUNC, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_FKB, + TF_RSVD_FKB, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_TBL_SCOPE, + TF_RSVD_TBL_SCOPE, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_EPOCH0, + TF_RSVD_EPOCH0, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_EPOCH1, + TF_RSVD_EPOCH1, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_METADATA, + TF_RSVD_METADATA, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_CT_STATE, + TF_RSVD_CT_STATE, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_RANGE_PROF, + TF_RSVD_RANGE_PROF, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_RANGE_ENTRY, + TF_RSVD_RANGE_ENTRY, + error_flag); + + TF_RM_CHECK_HW_ALLOC(query, + dir, + TF_RESC_TYPE_HW_LAG_ENTRY, + TF_RSVD_LAG_ENTRY, + error_flag); + + if (*error_flag != 0) + return -ENOMEM; + + return 0; +} + +/** + * Performs a SRAM resource check between what firmware capability + * reports and what the core expects is available. + * + * Firmware performs the resource carving at AFM init time and the + * resource capability is reported in the TruFlow qcaps msg. + * + * [in] query + * Pointer to SRAM Query data structure. Query holds what the + * firmware offers of the SRAM resources. + * + * [in] dir + * Receive or transmit direction + * + * [in/out] error_flag + * Pointer to a bit array indicating the error of a single HCAPI + * resource type. When a bit is set to 1, the HCAPI resource type + * failed static allocation. + * + * Returns: + * 0 - Success + * -ENOMEM - Failure on one of the allocated resources. Check the + * error_flag for what types are flagged errored. + */ +static int +tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query, + enum tf_dir dir, + uint32_t *error_flag) +{ + *error_flag = 0; + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_FULL_ACTION, + TF_RSVD_SRAM_FULL_ACTION, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_MCG, + TF_RSVD_SRAM_MCG, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_ENCAP_8B, + TF_RSVD_SRAM_ENCAP_8B, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_ENCAP_16B, + TF_RSVD_SRAM_ENCAP_16B, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_ENCAP_64B, + TF_RSVD_SRAM_ENCAP_64B, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_SP_SMAC, + TF_RSVD_SRAM_SP_SMAC, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, + TF_RSVD_SRAM_SP_SMAC_IPV4, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, + TF_RSVD_SRAM_SP_SMAC_IPV6, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_COUNTER_64B, + TF_RSVD_SRAM_COUNTER_64B, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_NAT_SPORT, + TF_RSVD_SRAM_NAT_SPORT, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_NAT_DPORT, + TF_RSVD_SRAM_NAT_DPORT, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_NAT_S_IPV4, + TF_RSVD_SRAM_NAT_S_IPV4, + error_flag); + + TF_RM_CHECK_SRAM_ALLOC(query, + dir, + TF_RESC_TYPE_SRAM_NAT_D_IPV4, + TF_RSVD_SRAM_NAT_D_IPV4, + error_flag); + + if (*error_flag != 0) + return -ENOMEM; + + return 0; +} + +/** + * Internal function to mark pool entries used. + */ +static void +tf_rm_reserve_range(uint32_t count, + uint32_t rsv_begin, + uint32_t rsv_end, + uint32_t max, + struct bitalloc *pool) +{ + uint32_t i; + + /* If no resources has been requested we mark everything + * 'used' + */ + if (count == 0) { + for (i = 0; i < max; i++) + ba_alloc_index(pool, i); + } else { + /* Support 2 main modes + * Reserved range starts from bottom up (with + * pre-reserved value or not) + * - begin = 0 to end xx + * - begin = 1 to end xx + * + * Reserved range starts from top down + * - begin = yy to end max + */ + + /* Bottom up check, start from 0 */ + if (rsv_begin == 0) { + for (i = rsv_end + 1; i < max; i++) + ba_alloc_index(pool, i); + } + + /* Bottom up check, start from 1 or higher OR + * Top Down + */ + if (rsv_begin >= 1) { + /* Allocate from 0 until start */ + for (i = 0; i < rsv_begin; i++) + ba_alloc_index(pool, i); + + /* Skip and then do the remaining */ + if (rsv_end < max - 1) { + for (i = rsv_end; i < max; i++) + ba_alloc_index(pool, i); + } + } + } +} + +/** + * Internal function to mark all the l2 ctxt allocated that Truflow + * does not own. + */ +static void +tf_rm_rsvd_l2_ctxt(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM; + uint32_t end = 0; + + /* l2 ctxt rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_L2_CTXT_TCAM, + tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX); + + /* l2 ctxt tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_L2_CTXT_TCAM, + tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX); +} + +/** + * Internal function to mark all the profile tcam and profile func + * resources that Truflow does not own. + */ +static void +tf_rm_rsvd_prof(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC; + uint32_t end = 0; + + /* profile func rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_PROF_FUNC, + tfs->TF_PROF_FUNC_POOL_NAME_RX); + + /* profile func tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_PROF_FUNC, + tfs->TF_PROF_FUNC_POOL_NAME_TX); + + index = TF_RESC_TYPE_HW_PROF_TCAM; + + /* profile tcam rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_PROF_TCAM, + tfs->TF_PROF_TCAM_POOL_NAME_RX); + + /* profile tcam tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_PROF_TCAM, + tfs->TF_PROF_TCAM_POOL_NAME_TX); +} + +/** + * Internal function to mark all the em profile id allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_em_prof(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID; + uint32_t end = 0; + + /* em prof id rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_EM_PROF_ID, + tfs->TF_EM_PROF_ID_POOL_NAME_RX); + + /* em prof id tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_EM_PROF_ID, + tfs->TF_EM_PROF_ID_POOL_NAME_TX); +} + +/** + * Internal function to mark all the wildcard tcam and profile id + * resources that Truflow does not own. + */ +static void +tf_rm_rsvd_wc(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID; + uint32_t end = 0; + + /* wc profile id rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_WC_PROF_ID, + tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX); + + /* wc profile id tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_WC_PROF_ID, + tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX); + + index = TF_RESC_TYPE_HW_WC_TCAM; + + /* wc tcam rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_WC_TCAM_ROW, + tfs->TF_WC_TCAM_POOL_NAME_RX); + + /* wc tcam tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_WC_TCAM_ROW, + tfs->TF_WC_TCAM_POOL_NAME_TX); +} + +/** + * Internal function to mark all the meter resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_meter(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_METER_PROF; + uint32_t end = 0; + + /* meter profiles rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_METER_PROF, + tfs->TF_METER_PROF_POOL_NAME_RX); + + /* meter profiles tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_METER_PROF, + tfs->TF_METER_PROF_POOL_NAME_TX); + + index = TF_RESC_TYPE_HW_METER_INST; + + /* meter rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_METER, + tfs->TF_METER_INST_POOL_NAME_RX); + + /* meter tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_METER, + tfs->TF_METER_INST_POOL_NAME_TX); +} + +/** + * Internal function to mark all the mirror resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_mirror(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_MIRROR; + uint32_t end = 0; + + /* mirror rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_MIRROR, + tfs->TF_MIRROR_POOL_NAME_RX); + + /* mirror tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_MIRROR, + tfs->TF_MIRROR_POOL_NAME_TX); +} + +/** + * Internal function to mark all the upar resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_upar(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_UPAR; + uint32_t end = 0; + + /* upar rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_UPAR, + tfs->TF_UPAR_POOL_NAME_RX); + + /* upar tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_UPAR, + tfs->TF_UPAR_POOL_NAME_TX); +} + +/** + * Internal function to mark all the sp tcam resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_sp_tcam(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_SP_TCAM; + uint32_t end = 0; + + /* sp tcam rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_SP_TCAM, + tfs->TF_SP_TCAM_POOL_NAME_RX); + + /* sp tcam tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_SP_TCAM, + tfs->TF_SP_TCAM_POOL_NAME_TX); +} + +/** + * Internal function to mark all the l2 func resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_l2_func(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_L2_FUNC; + uint32_t end = 0; + + /* l2 func rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_L2_FUNC, + tfs->TF_L2_FUNC_POOL_NAME_RX); + + /* l2 func tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_L2_FUNC, + tfs->TF_L2_FUNC_POOL_NAME_TX); +} + +/** + * Internal function to mark all the fkb resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_fkb(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_FKB; + uint32_t end = 0; + + /* fkb rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_FKB, + tfs->TF_FKB_POOL_NAME_RX); + + /* fkb tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_FKB, + tfs->TF_FKB_POOL_NAME_TX); +} + +/** + * Internal function to mark all the tbld scope resources allocated + * that Truflow does not own. + */ +static void +tf_rm_rsvd_tbl_scope(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE; + uint32_t end = 0; + + /* tbl scope rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_TBL_SCOPE, + tfs->TF_TBL_SCOPE_POOL_NAME_RX); + + /* tbl scope tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_TBL_SCOPE, + tfs->TF_TBL_SCOPE_POOL_NAME_TX); +} + +/** + * Internal function to mark all the l2 epoch resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_epoch(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_EPOCH0; + uint32_t end = 0; + + /* epoch0 rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_EPOCH0, + tfs->TF_EPOCH0_POOL_NAME_RX); + + /* epoch0 tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_EPOCH0, + tfs->TF_EPOCH0_POOL_NAME_TX); + + index = TF_RESC_TYPE_HW_EPOCH1; + + /* epoch1 rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_EPOCH1, + tfs->TF_EPOCH1_POOL_NAME_RX); + + /* epoch1 tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_EPOCH1, + tfs->TF_EPOCH1_POOL_NAME_TX); +} + +/** + * Internal function to mark all the metadata resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_metadata(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_METADATA; + uint32_t end = 0; + + /* metadata rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_METADATA, + tfs->TF_METADATA_POOL_NAME_RX); + + /* metadata tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_METADATA, + tfs->TF_METADATA_POOL_NAME_TX); +} + +/** + * Internal function to mark all the ct state resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_ct_state(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_CT_STATE; + uint32_t end = 0; + + /* ct state rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_CT_STATE, + tfs->TF_CT_STATE_POOL_NAME_RX); + + /* ct state tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_CT_STATE, + tfs->TF_CT_STATE_POOL_NAME_TX); +} + +/** + * Internal function to mark all the range resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_range(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF; + uint32_t end = 0; + + /* range profile rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_RANGE_PROF, + tfs->TF_RANGE_PROF_POOL_NAME_RX); + + /* range profile tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_RANGE_PROF, + tfs->TF_RANGE_PROF_POOL_NAME_TX); + + index = TF_RESC_TYPE_HW_RANGE_ENTRY; + + /* range entry rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_RANGE_ENTRY, + tfs->TF_RANGE_ENTRY_POOL_NAME_RX); + + /* range entry tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_RANGE_ENTRY, + tfs->TF_RANGE_ENTRY_POOL_NAME_TX); +} + +/** + * Internal function to mark all the lag resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_lag_entry(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY; + uint32_t end = 0; + + /* lag entry rx direction */ + if (tfs->resc.rx.hw_entry[index].stride > 0) + end = tfs->resc.rx.hw_entry[index].start + + tfs->resc.rx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, + tfs->resc.rx.hw_entry[index].start, + end, + TF_NUM_LAG_ENTRY, + tfs->TF_LAG_ENTRY_POOL_NAME_RX); + + /* lag entry tx direction */ + if (tfs->resc.tx.hw_entry[index].stride > 0) + end = tfs->resc.tx.hw_entry[index].start + + tfs->resc.tx.hw_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, + tfs->resc.tx.hw_entry[index].start, + end, + TF_NUM_LAG_ENTRY, + tfs->TF_LAG_ENTRY_POOL_NAME_TX); +} + +/** + * Internal function to mark all the full action resources allocated + * that Truflow does not own. + */ +static void +tf_rm_rsvd_sram_full_action(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION; + uint16_t end = 0; + + /* full action rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_FULL_ACTION_RX, + tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX); + + /* full action tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_FULL_ACTION_TX, + tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX); +} + +/** + * Internal function to mark all the multicast group resources + * allocated that Truflow does not own. + */ +static void +tf_rm_rsvd_sram_mcg(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_MCG; + uint16_t end = 0; + + /* multicast group rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_MCG_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_MCG_RX, + tfs->TF_SRAM_MCG_POOL_NAME_RX); + + /* Multicast Group on TX is not supported */ +} + +/** + * Internal function to mark all the encap resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_sram_encap(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B; + uint16_t end = 0; + + /* encap 8b rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_ENCAP_8B_RX, + tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX); + + /* encap 8b tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_ENCAP_8B_TX, + tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_ENCAP_16B; + + /* encap 16b rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_ENCAP_16B_RX, + tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX); + + /* encap 16b tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_ENCAP_16B_TX, + tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_ENCAP_64B; + + /* Encap 64B not supported on RX */ + + /* Encap 64b tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_ENCAP_64B_TX, + tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX); +} + +/** + * Internal function to mark all the sp resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_sram_sp(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC; + uint16_t end = 0; + + /* sp smac rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_SP_SMAC_RX, + tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX); + + /* sp smac tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_SP_SMAC_TX, + tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4; + + /* SP SMAC IPv4 not supported on RX */ + + /* sp smac ipv4 tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_SP_SMAC_IPV4_TX, + tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6; + + /* SP SMAC IPv6 not supported on RX */ + + /* sp smac ipv6 tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_SP_SMAC_IPV6_TX, + tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX); +} + +/** + * Internal function to mark all the stat resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_sram_stats(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B; + uint16_t end = 0; + + /* counter 64b rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_COUNTER_64B_RX, + tfs->TF_SRAM_STATS_64B_POOL_NAME_RX); + + /* counter 64b tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_COUNTER_64B_TX, + tfs->TF_SRAM_STATS_64B_POOL_NAME_TX); +} + +/** + * Internal function to mark all the nat resources allocated that + * Truflow does not own. + */ +static void +tf_rm_rsvd_sram_nat(struct tf_session *tfs) +{ + uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT; + uint16_t end = 0; + + /* nat source port rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_NAT_SPORT_RX, + tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX); + + /* nat source port tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_NAT_SPORT_TX, + tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_NAT_DPORT; + + /* nat destination port rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_NAT_DPORT_RX, + tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX); + + /* nat destination port tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_NAT_DPORT_TX, + tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_NAT_S_IPV4; + + /* nat source port ipv4 rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_NAT_S_IPV4_RX, + tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX); + + /* nat source ipv4 port tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_NAT_S_IPV4_TX, + tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX); + + index = TF_RESC_TYPE_SRAM_NAT_D_IPV4; + + /* nat destination port ipv4 rx direction */ + if (tfs->resc.rx.sram_entry[index].stride > 0) + end = tfs->resc.rx.sram_entry[index].start + + tfs->resc.rx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX, + end, + TF_RSVD_SRAM_NAT_D_IPV4_RX, + tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX); + + /* nat destination ipv4 port tx direction */ + if (tfs->resc.tx.sram_entry[index].stride > 0) + end = tfs->resc.tx.sram_entry[index].start + + tfs->resc.tx.sram_entry[index].stride - 1; + + tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, + TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX, + end, + TF_RSVD_SRAM_NAT_D_IPV4_TX, + tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX); +} + +/** + * Internal function used to validate the HW allocated resources + * against the requested values. + */ +static int +tf_rm_hw_alloc_validate(enum tf_dir dir, + struct tf_rm_hw_alloc *hw_alloc, + struct tf_rm_entry *hw_entry) +{ + int error = 0; + int i; + + for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) { + if (hw_entry[i].stride != hw_alloc->hw_num[i]) { + PMD_DRV_LOG(ERR, + "%s, Alloc failed id:%d expect:%d got:%d\n", + tf_dir_2_str(dir), + i, + hw_alloc->hw_num[i], + hw_entry[i].stride); + error = -1; + } + } + + return error; +} + +/** + * Internal function used to validate the SRAM allocated resources + * against the requested values. + */ +static int +tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused, + struct tf_rm_sram_alloc *sram_alloc, + struct tf_rm_entry *sram_entry) +{ + int error = 0; + int i; + + for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { + if (sram_entry[i].stride != sram_alloc->sram_num[i]) { + PMD_DRV_LOG(ERR, + "%s, Alloc failed idx:%d expect:%d got:%d\n", + tf_dir_2_str(dir), + i, + sram_alloc->sram_num[i], + sram_entry[i].stride); + error = -1; + } + } + + return error; +} + +/** + * Internal function used to mark all the HW resources allocated that + * Truflow does not own. + */ +static void +tf_rm_reserve_hw(struct tf *tfp) +{ + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* TBD + * There is no direct AFM resource allocation as it is carved + * statically at AFM boot time. Thus the bit allocators work + * on the full HW resource amount and we just mark everything + * used except the resources that Truflow took ownership off. + */ + tf_rm_rsvd_l2_ctxt(tfs); + tf_rm_rsvd_prof(tfs); + tf_rm_rsvd_em_prof(tfs); + tf_rm_rsvd_wc(tfs); + tf_rm_rsvd_mirror(tfs); + tf_rm_rsvd_meter(tfs); + tf_rm_rsvd_upar(tfs); + tf_rm_rsvd_sp_tcam(tfs); + tf_rm_rsvd_l2_func(tfs); + tf_rm_rsvd_fkb(tfs); + tf_rm_rsvd_tbl_scope(tfs); + tf_rm_rsvd_epoch(tfs); + tf_rm_rsvd_metadata(tfs); + tf_rm_rsvd_ct_state(tfs); + tf_rm_rsvd_range(tfs); + tf_rm_rsvd_lag_entry(tfs); +} + +/** + * Internal function used to mark all the SRAM resources allocated + * that Truflow does not own. + */ +static void +tf_rm_reserve_sram(struct tf *tfp) +{ + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* TBD + * There is no direct AFM resource allocation as it is carved + * statically at AFM boot time. Thus the bit allocators work + * on the full HW resource amount and we just mark everything + * used except the resources that Truflow took ownership off. + */ + tf_rm_rsvd_sram_full_action(tfs); + tf_rm_rsvd_sram_mcg(tfs); + tf_rm_rsvd_sram_encap(tfs); + tf_rm_rsvd_sram_sp(tfs); + tf_rm_rsvd_sram_stats(tfs); + tf_rm_rsvd_sram_nat(tfs); +} + +/** + * Internal function used to allocate and validate all HW resources. + */ +static int +tf_rm_allocate_validate_hw(struct tf *tfp, + enum tf_dir dir) +{ + int rc; + int i; + struct tf_rm_hw_query hw_query; + struct tf_rm_hw_alloc hw_alloc; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + struct tf_rm_entry *hw_entries; + uint32_t error_flag; + + if (dir == TF_DIR_RX) + hw_entries = tfs->resc.rx.hw_entry; + else + hw_entries = tfs->resc.tx.hw_entry; + + /* Query for Session HW Resources */ + rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW qcaps message send failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW QCAPS validation failed, error_flag:0x%x\n", + tf_dir_2_str(dir), + error_flag); + tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag); + goto cleanup; + } + + /* Post process HW capability */ + for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) + hw_alloc.hw_num[i] = hw_query.hw_query[i].max; + + /* Allocate Session HW Resources */ + rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW alloc message send failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + /* Perform HW allocation validation as its possible the + * resource availability changed between qcaps and alloc + */ + rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW Resource validation failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + return 0; + + cleanup: + return -1; +} + +/** + * Internal function used to allocate and validate all SRAM resources. + * + * [in] tfp + * Pointer to TF handle + * + * [in] dir + * Receive or transmit direction + * + * Returns: + * 0 - Success + * -1 - Internal error + */ +static int +tf_rm_allocate_validate_sram(struct tf *tfp, + enum tf_dir dir) +{ + int rc; + int i; + struct tf_rm_sram_query sram_query; + struct tf_rm_sram_alloc sram_alloc; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + struct tf_rm_entry *sram_entries; + uint32_t error_flag; + + if (dir == TF_DIR_RX) + sram_entries = tfs->resc.rx.sram_entry; + else + sram_entries = tfs->resc.tx.sram_entry; + + /* Query for Session SRAM Resources */ + rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, SRAM qcaps message send failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, SRAM QCAPS validation failed, error_flag:%x\n", + tf_dir_2_str(dir), + error_flag); + tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag); + goto cleanup; + } + + /* Post process SRAM capability */ + for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) + sram_alloc.sram_num[i] = sram_query.sram_query[i].max; + + /* Allocate Session SRAM Resources */ + rc = tf_msg_session_sram_resc_alloc(tfp, + dir, + &sram_alloc, + sram_entries); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, SRAM alloc message send failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + /* Perform SRAM allocation validation as its possible the + * resource availability changed between qcaps and alloc + */ + rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries); + if (rc) { + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, SRAM Resource allocation validation failed\n", + tf_dir_2_str(dir)); + goto cleanup; + } + + return 0; + + cleanup: + return -1; +} + +/** + * Helper function used to prune a HW resource array to only hold + * elements that needs to be flushed. + * + * [in] tfs + * Session handle + * + * [in] dir + * Receive or transmit direction + * + * [in] hw_entries + * Master HW Resource database + * + * [in/out] flush_entries + * Pruned HW Resource database of entries to be flushed. This + * array should be passed in as a complete copy of the master HW + * Resource database. The outgoing result will be a pruned version + * based on the result of the requested checking + * + * Returns: + * 0 - Success, no flush required + * 1 - Success, flush required + * -1 - Internal error + */ +static int +tf_rm_hw_to_flush(struct tf_session *tfs, + enum tf_dir dir, + struct tf_rm_entry *hw_entries, + struct tf_rm_entry *flush_entries) +{ + int rc; + int flush_rc = 0; + int free_cnt; + struct bitalloc *pool; + + /* Check all the hw resource pools and check for left over + * elements. Any found will result in the complete pool of a + * type to get invalidated. + */ + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_L2_CTXT_TCAM_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) { + flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0; + flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_PROF_FUNC_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) { + flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0; + flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_PROF_TCAM_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) { + flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0; + flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_EM_PROF_ID_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) { + flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0; + flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0; + } else { + flush_rc = 1; + } + + flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0; + flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0; + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_WC_TCAM_PROF_ID_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) { + flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0; + flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_WC_TCAM_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) { + flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0; + flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_METER_PROF_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) { + flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0; + flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_METER_INST_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) { + flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0; + flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_MIRROR_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) { + flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0; + flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_UPAR_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) { + flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0; + flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SP_TCAM_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) { + flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0; + flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_L2_FUNC_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) { + flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0; + flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_FKB_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) { + flush_entries[TF_RESC_TYPE_HW_FKB].start = 0; + flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_TBL_SCOPE_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) { + flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0; + flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0; + } else { + PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n", + tf_dir_2_str(dir), + free_cnt, + hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride); + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_EPOCH0_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) { + flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0; + flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_EPOCH1_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) { + flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0; + flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_METADATA_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) { + flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0; + flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_CT_STATE_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) { + flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0; + flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_RANGE_PROF_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) { + flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0; + flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_RANGE_ENTRY_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) { + flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0; + flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_LAG_ENTRY_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) { + flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0; + flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0; + } else { + flush_rc = 1; + } + + return flush_rc; +} + +/** + * Helper function used to prune a SRAM resource array to only hold + * elements that needs to be flushed. + * + * [in] tfs + * Session handle + * + * [in] dir + * Receive or transmit direction + * + * [in] hw_entries + * Master SRAM Resource data base + * + * [in/out] flush_entries + * Pruned SRAM Resource database of entries to be flushed. This + * array should be passed in as a complete copy of the master SRAM + * Resource database. The outgoing result will be a pruned version + * based on the result of the requested checking + * + * Returns: + * 0 - Success, no flush required + * 1 - Success, flush required + * -1 - Internal error + */ +static int +tf_rm_sram_to_flush(struct tf_session *tfs, + enum tf_dir dir, + struct tf_rm_entry *sram_entries, + struct tf_rm_entry *flush_entries) +{ + int rc; + int flush_rc = 0; + int free_cnt; + struct bitalloc *pool; + + /* Check all the sram resource pools and check for left over + * elements. Any found will result in the complete pool of a + * type to get invalidated. + */ + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_FULL_ACTION_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) { + flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0; + } else { + flush_rc = 1; + } + + /* Only pools for RX direction */ + if (dir == TF_DIR_RX) { + TF_RM_GET_POOLS_RX(tfs, &pool, + TF_SRAM_MCG_POOL_NAME); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) { + flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0; + } else { + flush_rc = 1; + } + } else { + /* Always prune TX direction */ + flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_ENCAP_8B_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) { + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_ENCAP_16B_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) { + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0; + } else { + flush_rc = 1; + } + + /* Only pools for TX direction */ + if (dir == TF_DIR_TX) { + TF_RM_GET_POOLS_TX(tfs, &pool, + TF_SRAM_ENCAP_64B_POOL_NAME); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == + sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) { + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0; + } else { + flush_rc = 1; + } + } else { + /* Always prune RX direction */ + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_SP_SMAC_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) { + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0; + } else { + flush_rc = 1; + } + + /* Only pools for TX direction */ + if (dir == TF_DIR_TX) { + TF_RM_GET_POOLS_TX(tfs, &pool, + TF_SRAM_SP_SMAC_IPV4_POOL_NAME); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == + sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) { + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = + 0; + } else { + flush_rc = 1; + } + } else { + /* Always prune RX direction */ + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0; + } + + /* Only pools for TX direction */ + if (dir == TF_DIR_TX) { + TF_RM_GET_POOLS_TX(tfs, &pool, + TF_SRAM_SP_SMAC_IPV6_POOL_NAME); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == + sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) { + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = + 0; + } else { + flush_rc = 1; + } + } else { + /* Always prune RX direction */ + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_STATS_64B_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) { + flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_NAT_SPORT_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) { + flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_NAT_DPORT_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) { + flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_NAT_S_IPV4_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) { + flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0; + } else { + flush_rc = 1; + } + + TF_RM_GET_POOLS(tfs, dir, &pool, + TF_SRAM_NAT_D_IPV4_POOL_NAME, + rc); + if (rc) + return rc; + free_cnt = ba_free_count(pool); + if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) { + flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0; + flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0; + } else { + flush_rc = 1; + } + + return flush_rc; +} + +/** + * Helper function used to generate an error log for the HW types that + * needs to be flushed. The types should have been cleaned up ahead of + * invoking tf_close_session. + * + * [in] hw_entries + * HW Resource database holding elements to be flushed + */ +static void +tf_rm_log_hw_flush(enum tf_dir dir, + struct tf_rm_entry *hw_entries) +{ + int i; + + /* Walk the hw flush array and log the types that wasn't + * cleaned up. + */ + for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) { + if (hw_entries[i].stride != 0) + PMD_DRV_LOG(ERR, + "%s: %s was not cleaned up\n", + tf_dir_2_str(dir), + tf_hcapi_hw_2_str(i)); + } +} + +/** + * Helper function used to generate an error log for the SRAM types + * that needs to be flushed. The types should have been cleaned up + * ahead of invoking tf_close_session. + * + * [in] sram_entries + * SRAM Resource database holding elements to be flushed + */ +static void +tf_rm_log_sram_flush(enum tf_dir dir, + struct tf_rm_entry *sram_entries) +{ + int i; + + /* Walk the sram flush array and log the types that wasn't + * cleaned up. + */ + for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { + if (sram_entries[i].stride != 0) + PMD_DRV_LOG(ERR, + "%s: %s was not cleaned up\n", + tf_dir_2_str(dir), + tf_hcapi_sram_2_str(i)); + } +} + +void +tf_rm_init(struct tf *tfp __rte_unused) +{ + struct tf_session *tfs = + (struct tf_session *)(tfp->session->core_data); + + /* This version is host specific and should be checked against + * when attaching as there is no guarantee that a secondary + * would run from same image version. + */ + tfs->ver.major = TF_SESSION_VER_MAJOR; + tfs->ver.minor = TF_SESSION_VER_MINOR; + tfs->ver.update = TF_SESSION_VER_UPDATE; + + tfs->session_id.id = 0; + tfs->ref_count = 0; + + /* Initialization of Table Scopes */ + /* ll_init(&tfs->tbl_scope_ll); */ + + /* Initialization of HW and SRAM resource DB */ + memset(&tfs->resc, 0, sizeof(struct tf_rm_db)); + + /* Initialization of HW Resource Pools */ + ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); + ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); + ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC); + ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC); + ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM); + ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM); + ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID); + ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID); + + /* TBD, how do we want to handle EM records ?*/ + /* EM Records should not be controlled by way of a pool */ + + ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID); + ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID); + ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW); + ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW); + ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF); + ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF); + ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER); + ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER); + ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR); + ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR); + ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR); + ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR); + + ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM); + ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM); + + ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB); + ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB); + + ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE); + ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE); + ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC); + ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC); + ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0); + ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0); + ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1); + ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1); + ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA); + ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA); + ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE); + ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE); + ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF); + ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF); + ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY); + ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY); + ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY); + ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY); + + /* Initialization of SRAM Resource Pools + * These pools are set to the TFLIB defined MAX sizes not + * AFM's HW max as to limit the memory consumption + */ + ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX, + TF_RSVD_SRAM_FULL_ACTION_RX); + ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX, + TF_RSVD_SRAM_FULL_ACTION_TX); + /* Only Multicast Group on RX is supported */ + ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX, + TF_RSVD_SRAM_MCG_RX); + ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX, + TF_RSVD_SRAM_ENCAP_8B_RX); + ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_8B_TX); + ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX, + TF_RSVD_SRAM_ENCAP_16B_RX); + ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_16B_TX); + /* Only Encap 64B on TX is supported */ + ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_64B_TX); + ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX, + TF_RSVD_SRAM_SP_SMAC_RX); + ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_TX); + /* Only SP SMAC IPv4 on TX is supported */ + ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_IPV4_TX); + /* Only SP SMAC IPv6 on TX is supported */ + ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_IPV6_TX); + ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX, + TF_RSVD_SRAM_COUNTER_64B_RX); + ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX, + TF_RSVD_SRAM_COUNTER_64B_TX); + ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_SPORT_RX); + ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_SPORT_TX); + ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_DPORT_RX); + ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_DPORT_TX); + ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_S_IPV4_RX); + ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_S_IPV4_TX); + ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_D_IPV4_RX); + ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_D_IPV4_TX); + + /* Initialization of pools local to TF Core */ + ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); + ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); +} + +int +tf_rm_allocate_validate(struct tf *tfp) +{ + int rc; + int i; + + for (i = 0; i < TF_DIR_MAX; i++) { + rc = tf_rm_allocate_validate_hw(tfp, i); + if (rc) + return rc; + rc = tf_rm_allocate_validate_sram(tfp, i); + if (rc) + return rc; + } + + /* With both HW and SRAM allocated and validated we can + * 'scrub' the reservation on the pools. + */ + tf_rm_reserve_hw(tfp); + tf_rm_reserve_sram(tfp); + + return rc; +} + +int +tf_rm_close(struct tf *tfp) +{ + int rc; + int rc_close = 0; + int i; + struct tf_rm_entry *hw_entries; + struct tf_rm_entry *hw_flush_entries; + struct tf_rm_entry *sram_entries; + struct tf_rm_entry *sram_flush_entries; + struct tf_session *tfs __rte_unused = + (struct tf_session *)(tfp->session->core_data); + + struct tf_rm_db flush_resc = tfs->resc; + + /* On close it is assumed that the session has already cleaned + * up all its resources, individually, while destroying its + * flows. No checking is performed thus the behavior is as + * follows. + * + * Session RM will signal FW to release session resources. FW + * will perform invalidation of all the allocated entries + * (assures any outstanding resources has been cleared, then + * free the FW RM instance. + * + * Session will then be freed by tf_close_session() thus there + * is no need to clean each resource pool as the whole session + * is going away. + */ + + for (i = 0; i < TF_DIR_MAX; i++) { + if (i == TF_DIR_RX) { + hw_entries = tfs->resc.rx.hw_entry; + hw_flush_entries = flush_resc.rx.hw_entry; + sram_entries = tfs->resc.rx.sram_entry; + sram_flush_entries = flush_resc.rx.sram_entry; + } else { + hw_entries = tfs->resc.tx.hw_entry; + hw_flush_entries = flush_resc.tx.hw_entry; + sram_entries = tfs->resc.tx.sram_entry; + sram_flush_entries = flush_resc.tx.sram_entry; + } + + /* Check for any not previously freed HW resources and + * flush if required. + */ + rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries); + if (rc) { + rc_close = -ENOTEMPTY; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, lingering HW resources\n", + tf_dir_2_str(i)); + + /* Log the entries to be flushed */ + tf_rm_log_hw_flush(i, hw_flush_entries); + rc = tf_msg_session_hw_resc_flush(tfp, + i, + hw_flush_entries); + if (rc) { + rc_close = rc; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW flush failed\n", + tf_dir_2_str(i)); + } + } + + /* Check for any not previously freed SRAM resources + * and flush if required. + */ + rc = tf_rm_sram_to_flush(tfs, + i, + sram_entries, + sram_flush_entries); + if (rc) { + rc_close = -ENOTEMPTY; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, lingering SRAM resources\n", + tf_dir_2_str(i)); + + /* Log the entries to be flushed */ + tf_rm_log_sram_flush(i, sram_flush_entries); + + rc = tf_msg_session_sram_resc_flush(tfp, + i, + sram_flush_entries); + if (rc) { + rc_close = rc; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW flush failed\n", + tf_dir_2_str(i)); + } + } + + rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries); + if (rc) { + rc_close = rc; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, HW free failed\n", + tf_dir_2_str(i)); + } + + rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries); + if (rc) { + rc_close = rc; + /* Log error */ + PMD_DRV_LOG(ERR, + "%s, SRAM free failed\n", + tf_dir_2_str(i)); + } + } + + return rc_close; +} + +#if (TF_SHADOW == 1) +int +tf_rm_shadow_db_init(struct tf_session *tfs) +{ + rc = 1; + + return rc; +} +#endif /* TF_SHADOW */ + +int +tf_rm_lookup_tcam_type_pool(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tcam_tbl_type type, + struct bitalloc **pool) +{ + int rc = -EOPNOTSUPP; + + *pool = NULL; + + switch (type) { + case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_L2_CTXT_TCAM_POOL_NAME, + rc); + break; + case TF_TCAM_TBL_TYPE_PROF_TCAM: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_PROF_TCAM_POOL_NAME, + rc); + break; + case TF_TCAM_TBL_TYPE_WC_TCAM: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_WC_TCAM_POOL_NAME, + rc); + break; + case TF_TCAM_TBL_TYPE_VEB_TCAM: + case TF_TCAM_TBL_TYPE_SP_TCAM: + case TF_TCAM_TBL_TYPE_CT_RULE_TCAM: + default: + break; + } + + if (rc == -EOPNOTSUPP) { + PMD_DRV_LOG(ERR, + "dir:%d, Tcam type not supported, type:%d\n", + dir, + type); + return rc; + } else if (rc == -1) { + PMD_DRV_LOG(ERR, + "%s:, Tcam type lookup failed, type:%d\n", + tf_dir_2_str(dir), + type); + return rc; + } + + return 0; +} + +int +tf_rm_lookup_tbl_type_pool(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tbl_type type, + struct bitalloc **pool) +{ + int rc = -EOPNOTSUPP; + + *pool = NULL; + + switch (type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_FULL_ACTION_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_MCAST_GROUPS: + /* No pools for TX direction, so bail out */ + if (dir == TF_DIR_TX) + break; + TF_RM_GET_POOLS_RX(tfs, pool, + TF_SRAM_MCG_POOL_NAME); + rc = 0; + break; + case TF_TBL_TYPE_ACT_ENCAP_8B: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_ENCAP_8B_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_ENCAP_16B: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_ENCAP_16B_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_ENCAP_64B: + /* No pools for RX direction, so bail out */ + if (dir == TF_DIR_RX) + break; + TF_RM_GET_POOLS_TX(tfs, pool, + TF_SRAM_ENCAP_64B_POOL_NAME); + rc = 0; + break; + case TF_TBL_TYPE_ACT_SP_SMAC: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_SP_SMAC_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + /* No pools for TX direction, so bail out */ + if (dir == TF_DIR_RX) + break; + TF_RM_GET_POOLS_TX(tfs, pool, + TF_SRAM_SP_SMAC_IPV4_POOL_NAME); + rc = 0; + break; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + /* No pools for TX direction, so bail out */ + if (dir == TF_DIR_RX) + break; + TF_RM_GET_POOLS_TX(tfs, pool, + TF_SRAM_SP_SMAC_IPV6_POOL_NAME); + rc = 0; + break; + case TF_TBL_TYPE_ACT_STATS_64: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_STATS_64B_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_MODIFY_SPORT: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_NAT_SPORT_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_NAT_S_IPV4_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_SRAM_NAT_D_IPV4_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_METER_PROF: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_METER_PROF_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_METER_INST: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_METER_INST_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_MIRROR_CONFIG: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_MIRROR_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_UPAR: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_UPAR_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_EPOCH0: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_EPOCH0_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_EPOCH1: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_EPOCH1_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_METADATA: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_METADATA_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_CT_STATE: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_CT_STATE_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_RANGE_PROF: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_RANGE_PROF_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_RANGE_ENTRY: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_RANGE_ENTRY_POOL_NAME, + rc); + break; + case TF_TBL_TYPE_LAG: + TF_RM_GET_POOLS(tfs, dir, pool, + TF_LAG_ENTRY_POOL_NAME, + rc); + break; + /* Not yet supported */ + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST: + case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC: + case TF_TBL_TYPE_VNIC_SVIF: + break; + /* No bitalloc pools for these types */ + case TF_TBL_TYPE_EXT: + default: + break; + } + + if (rc == -EOPNOTSUPP) { + PMD_DRV_LOG(ERR, + "dir:%d, Table type not supported, type:%d\n", + dir, + type); + return rc; + } else if (rc == -1) { + PMD_DRV_LOG(ERR, + "dir:%d, Table type lookup failed, type:%d\n", + dir, + type); + return rc; + } + + return 0; +} + +int +tf_rm_convert_tbl_type(enum tf_tbl_type type, + uint32_t *hcapi_type) +{ + int rc = 0; + + switch (type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION; + break; + case TF_TBL_TYPE_MCAST_GROUPS: + *hcapi_type = TF_RESC_TYPE_SRAM_MCG; + break; + case TF_TBL_TYPE_ACT_ENCAP_8B: + *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B; + break; + case TF_TBL_TYPE_ACT_ENCAP_16B: + *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B; + break; + case TF_TBL_TYPE_ACT_ENCAP_64B: + *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B; + break; + case TF_TBL_TYPE_ACT_SP_SMAC: + *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC; + break; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4; + break; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6; + break; + case TF_TBL_TYPE_ACT_STATS_64: + *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B; + break; + case TF_TBL_TYPE_ACT_MODIFY_SPORT: + *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT; + break; + case TF_TBL_TYPE_ACT_MODIFY_DPORT: + *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT; + break; + case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC: + *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4; + break; + case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST: + *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4; + break; + case TF_TBL_TYPE_METER_PROF: + *hcapi_type = TF_RESC_TYPE_HW_METER_PROF; + break; + case TF_TBL_TYPE_METER_INST: + *hcapi_type = TF_RESC_TYPE_HW_METER_INST; + break; + case TF_TBL_TYPE_MIRROR_CONFIG: + *hcapi_type = TF_RESC_TYPE_HW_MIRROR; + break; + case TF_TBL_TYPE_UPAR: + *hcapi_type = TF_RESC_TYPE_HW_UPAR; + break; + case TF_TBL_TYPE_EPOCH0: + *hcapi_type = TF_RESC_TYPE_HW_EPOCH0; + break; + case TF_TBL_TYPE_EPOCH1: + *hcapi_type = TF_RESC_TYPE_HW_EPOCH1; + break; + case TF_TBL_TYPE_METADATA: + *hcapi_type = TF_RESC_TYPE_HW_METADATA; + break; + case TF_TBL_TYPE_CT_STATE: + *hcapi_type = TF_RESC_TYPE_HW_CT_STATE; + break; + case TF_TBL_TYPE_RANGE_PROF: + *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF; + break; + case TF_TBL_TYPE_RANGE_ENTRY: + *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY; + break; + case TF_TBL_TYPE_LAG: + *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY; + break; + /* Not yet supported */ + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST: + case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC: + case TF_TBL_TYPE_VNIC_SVIF: + case TF_TBL_TYPE_EXT: /* No pools for this type */ + default: + *hcapi_type = -1; + rc = -EOPNOTSUPP; + } + + return rc; +} + +int +tf_rm_convert_index(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tbl_type type, + enum tf_rm_convert_type c_type, + uint32_t index, + uint32_t *convert_index) +{ + int rc; + struct tf_rm_resc *resc; + uint32_t hcapi_type; + uint32_t base_index; + + if (dir == TF_DIR_RX) + resc = &tfs->resc.rx; + else if (dir == TF_DIR_TX) + resc = &tfs->resc.tx; + else + return -EOPNOTSUPP; + + rc = tf_rm_convert_tbl_type(type, &hcapi_type); + if (rc) + return -1; + + switch (type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + case TF_TBL_TYPE_MCAST_GROUPS: + case TF_TBL_TYPE_ACT_ENCAP_8B: + case TF_TBL_TYPE_ACT_ENCAP_16B: + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_ENCAP_64B: + case TF_TBL_TYPE_ACT_SP_SMAC: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + case TF_TBL_TYPE_ACT_STATS_64: + case TF_TBL_TYPE_ACT_MODIFY_SPORT: + case TF_TBL_TYPE_ACT_MODIFY_DPORT: + case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC: + case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST: + base_index = resc->sram_entry[hcapi_type].start; + break; + case TF_TBL_TYPE_MIRROR_CONFIG: + case TF_TBL_TYPE_METER_PROF: + case TF_TBL_TYPE_METER_INST: + case TF_TBL_TYPE_UPAR: + case TF_TBL_TYPE_EPOCH0: + case TF_TBL_TYPE_EPOCH1: + case TF_TBL_TYPE_METADATA: + case TF_TBL_TYPE_CT_STATE: + case TF_TBL_TYPE_RANGE_PROF: + case TF_TBL_TYPE_RANGE_ENTRY: + case TF_TBL_TYPE_LAG: + base_index = resc->hw_entry[hcapi_type].start; + break; + /* Not yet supported */ + case TF_TBL_TYPE_VNIC_SVIF: + case TF_TBL_TYPE_EXT: /* No pools for this type */ + default: + return -EOPNOTSUPP; + } + + switch (c_type) { + case TF_RM_CONVERT_RM_BASE: + *convert_index = index - base_index; + break; + case TF_RM_CONVERT_ADD_BASE: + *convert_index = index + base_index; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.h new file mode 100644 index 000000000..e69d443a8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_rm.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef TF_RM_H_ +#define TF_RM_H_ + +#include "tf_resources.h" +#include "tf_core.h" +#include "bitalloc.h" + +struct tf; +struct tf_session; + +/* Internal macro to determine appropriate allocation pools based on + * DIRECTION parm, also performs error checking for DIRECTION parm. The + * SESSION_POOL and SESSION pointers are set appropriately upon + * successful return (the GLOBAL_POOL is used to globally manage + * resource allocation and the SESSION_POOL is used to track the + * resources that have been allocated to the session) + * + * parameters: + * struct tfp *tfp + * enum tf_dir direction + * struct bitalloc **session_pool + * string base_pool_name - used to form pointers to the + * appropriate bit allocation + * pools, both directions of the + * session pools must have same + * base name, for example if + * POOL_NAME is feat_pool: - the + * ptr's to the session pools + * are feat_pool_rx feat_pool_tx + * + * int rc - return code + * 0 - Success + * -1 - invalid DIRECTION parm + */ +#define TF_RM_GET_POOLS(tfs, direction, session_pool, pool_name, rc) do { \ + (rc) = 0; \ + if ((direction) == TF_DIR_RX) { \ + *(session_pool) = (tfs)->pool_name ## _RX; \ + } else if ((direction) == TF_DIR_TX) { \ + *(session_pool) = (tfs)->pool_name ## _TX; \ + } else { \ + rc = -1; \ + } \ + } while (0) + +#define TF_RM_GET_POOLS_RX(tfs, session_pool, pool_name) \ + (*(session_pool) = (tfs)->pool_name ## _RX) + +#define TF_RM_GET_POOLS_TX(tfs, session_pool, pool_name) \ + (*(session_pool) = (tfs)->pool_name ## _TX) + +/** + * Resource query single entry + */ +struct tf_rm_query_entry { + /** Minimum guaranteed number of elements */ + uint16_t min; + /** Maximum non-guaranteed number of elements */ + uint16_t max; +}; + +/** + * Resource single entry + */ +struct tf_rm_entry { + /** Starting index of the allocated resource */ + uint16_t start; + /** Number of allocated elements */ + uint16_t stride; +}; + +/** + * Resource query array of HW entities + */ +struct tf_rm_hw_query { + /** array of HW resource entries */ + struct tf_rm_query_entry hw_query[TF_RESC_TYPE_HW_MAX]; +}; + +/** + * Resource allocation array of HW entities + */ +struct tf_rm_hw_alloc { + /** array of HW resource entries */ + uint16_t hw_num[TF_RESC_TYPE_HW_MAX]; +}; + +/** + * Resource query array of SRAM entities + */ +struct tf_rm_sram_query { + /** array of SRAM resource entries */ + struct tf_rm_query_entry sram_query[TF_RESC_TYPE_SRAM_MAX]; +}; + +/** + * Resource allocation array of SRAM entities + */ +struct tf_rm_sram_alloc { + /** array of SRAM resource entries */ + uint16_t sram_num[TF_RESC_TYPE_SRAM_MAX]; +}; + +/** + * Resource Manager arrays for a single direction + */ +struct tf_rm_resc { + /** array of HW resource entries */ + struct tf_rm_entry hw_entry[TF_RESC_TYPE_HW_MAX]; + /** array of SRAM resource entries */ + struct tf_rm_entry sram_entry[TF_RESC_TYPE_SRAM_MAX]; +}; + +/** + * Resource Manager Database + */ +struct tf_rm_db { + struct tf_rm_resc rx; + struct tf_rm_resc tx; +}; + +/** + * Helper function converting direction to text string + */ +const char +*tf_dir_2_str(enum tf_dir dir); + +/** + * Helper function converting identifier to text string + */ +const char +*tf_ident_2_str(enum tf_identifier_type id_type); + +/** + * Helper function converting tcam type to text string + */ +const char +*tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type); + +/** + * Helper function used to convert HW HCAPI resource type to a string. + */ +const char +*tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type); + +/** + * Helper function used to convert SRAM HCAPI resource type to a string. + */ +const char +*tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type); + +/** + * Initializes the Resource Manager and the associated database + * entries for HW and SRAM resources. Must be called before any other + * Resource Manager functions. + * + * [in] tfp + * Pointer to TF handle + */ +void tf_rm_init(struct tf *tfp); + +/** + * Allocates and validates both HW and SRAM resources per the NVM + * configuration. If any allocation fails all resources for the + * session is deallocated. + * + * [in] tfp + * Pointer to TF handle + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_allocate_validate(struct tf *tfp); + +/** + * Closes the Resource Manager and frees all allocated resources per + * the associated database. + * + * [in] tfp + * Pointer to TF handle + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + * - (-ENOTEMPTY) if resources are not cleaned up before close + */ +int tf_rm_close(struct tf *tfp); + +#if (TF_SHADOW == 1) +/** + * Initializes Shadow DB of configuration elements + * + * [in] tfs + * Pointer to TF Session + * + * Returns: + * 0 - Success + */ +int tf_rm_shadow_db_init(struct tf_session *tfs); +#endif /* TF_SHADOW */ + +/** + * Perform a Session Pool lookup using the Tcam table type. + * + * Function will print error msg if tcam type is unsupported or lookup + * failed. + * + * [in] tfs + * Pointer to TF Session + * + * [in] type + * Type of the object + * + * [in] dir + * Receive or transmit direction + * + * [in/out] session_pool + * Session pool + * + * Returns: + * 0 - Success will set the **pool + * -EOPNOTSUPP - Type is not supported + */ +int +tf_rm_lookup_tcam_type_pool(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tcam_tbl_type type, + struct bitalloc **pool); + +/** + * Perform a Session Pool lookup using the Table type. + * + * Function will print error msg if table type is unsupported or + * lookup failed. + * + * [in] tfs + * Pointer to TF Session + * + * [in] type + * Type of the object + * + * [in] dir + * Receive or transmit direction + * + * [in/out] session_pool + * Session pool + * + * Returns: + * 0 - Success will set the **pool + * -EOPNOTSUPP - Type is not supported + */ +int +tf_rm_lookup_tbl_type_pool(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tbl_type type, + struct bitalloc **pool); + +/** + * Converts the TF Table Type to internal HCAPI_TYPE + * + * [in] type + * Type to be converted + * + * [in/out] hcapi_type + * Converted type + * + * Returns: + * 0 - Success will set the *hcapi_type + * -EOPNOTSUPP - Type is not supported + */ +int +tf_rm_convert_tbl_type(enum tf_tbl_type type, + uint32_t *hcapi_type); + +/** + * TF RM Convert of index methods. + */ +enum tf_rm_convert_type { + /** Adds the base of the Session Pool to the index */ + TF_RM_CONVERT_ADD_BASE, + /** Removes the Session Pool base from the index */ + TF_RM_CONVERT_RM_BASE +}; + +/** + * Provides conversion of the Table Type index in relation to the + * Session Pool base. + * + * [in] tfs + * Pointer to TF Session + * + * [in] dir + * Receive or transmit direction + * + * [in] type + * Type of the object + * + * [in] c_type + * Type of conversion to perform + * + * [in] index + * Index to be converted + * + * [in/out] convert_index + * Pointer to the converted index + */ +int +tf_rm_convert_index(struct tf_session *tfs, + enum tf_dir dir, + enum tf_tbl_type type, + enum tf_rm_convert_type c_type, + uint32_t index, + uint32_t *convert_index); + +#endif /* TF_RM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_session.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_session.h new file mode 100644 index 000000000..50ef2d530 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_session.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_SESSION_H_ +#define _TF_SESSION_H_ + +#include +#include + +#include "bitalloc.h" +#include "tf_core.h" +#include "tf_rm.h" +#include "tf_tbl.h" + +/** Session defines + */ +#define TF_SESSIONS_MAX 1 /** max # sessions */ +#define TF_SESSION_ID_INVALID 0xFFFFFFFF /** Invalid Session ID define */ + +/** Session + * + * Shared memory containing private TruFlow session information. + * Through this structure the session can keep track of resource + * allocations and (if so configured) any shadow copy of flow + * information. + * + * Memory is assigned to the Truflow instance by way of + * tf_open_session. Memory is allocated and owned by i.e. ULP. + * + * Access control to this shared memory is handled by the spin_lock in + * tf_session_info. + */ +struct tf_session { + /** TrueFlow Version. Used to control the structure layout + * when sharing sessions. No guarantee that a secondary + * process would come from the same version of an executable. + */ + struct tf_session_version ver; + + /** Device type, provided by tf_open_session(). + */ + enum tf_device_type device_type; + + /** Session ID, allocated by FW on tf_open_session(). + */ + union tf_session_id session_id; + + /** + * String containing name of control channel interface to be + * used for this session to communicate with firmware. + * + * ctrl_chan_name will be used as part of a name for any + * shared memory allocation. + */ + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + + /** + * Boolean controlling the use and availability of shadow + * copy. Shadow copy will allow the TruFlow Core to keep track + * of resource content on the firmware side without having to + * query firmware. Additional private session core_data will + * be allocated if this boolean is set to 'true', default + * 'false'. + * + * Size of memory depends on the NVM Resource settings for the + * control channel. + */ + bool shadow_copy; + + /** + * Session Reference Count. To keep track of functions per + * session the ref_count is incremented. There is also a + * parallel TruFlow Firmware ref_count in case the TruFlow + * Core goes away without informing the Firmware. + */ + uint8_t ref_count; + + /** Session HW and SRAM resources */ + struct tf_rm_db resc; + + /* Session HW resource pools */ + + /** RX L2 CTXT TCAM Pool */ + BITALLOC_INST(TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); + /** TX L2 CTXT TCAM Pool */ + BITALLOC_INST(TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); + + /** RX Profile Func Pool */ + BITALLOC_INST(TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC); + /** TX Profile Func Pool */ + BITALLOC_INST(TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC); + + /** RX Profile TCAM Pool */ + BITALLOC_INST(TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM); + /** TX Profile TCAM Pool */ + BITALLOC_INST(TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM); + + /** RX EM Profile ID Pool */ + BITALLOC_INST(TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID); + /** TX EM Key Pool */ + BITALLOC_INST(TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID); + + /** RX WC Profile Pool */ + BITALLOC_INST(TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID); + /** TX WC Profile Pool */ + BITALLOC_INST(TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID); + + /* TBD, how do we want to handle EM records ?*/ + /* EM Records are not controlled by way of a pool */ + + /** RX WC TCAM Pool */ + BITALLOC_INST(TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW); + /** TX WC TCAM Pool */ + BITALLOC_INST(TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW); + + /** RX Meter Profile Pool */ + BITALLOC_INST(TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF); + /** TX Meter Profile Pool */ + BITALLOC_INST(TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF); + + /** RX Meter Instance Pool */ + BITALLOC_INST(TF_METER_INST_POOL_NAME_RX, TF_NUM_METER); + /** TX Meter Pool */ + BITALLOC_INST(TF_METER_INST_POOL_NAME_TX, TF_NUM_METER); + + /** RX Mirror Configuration Pool*/ + BITALLOC_INST(TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR); + /** RX Mirror Configuration Pool */ + BITALLOC_INST(TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR); + + /** RX UPAR Pool */ + BITALLOC_INST(TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR); + /** TX UPAR Pool */ + BITALLOC_INST(TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR); + + /** RX SP TCAM Pool */ + BITALLOC_INST(TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM); + /** TX SP TCAM Pool */ + BITALLOC_INST(TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM); + + /** RX FKB Pool */ + BITALLOC_INST(TF_FKB_POOL_NAME_RX, TF_NUM_FKB); + /** TX FKB Pool */ + BITALLOC_INST(TF_FKB_POOL_NAME_TX, TF_NUM_FKB); + + /** RX Table Scope Pool */ + BITALLOC_INST(TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE); + /** TX Table Scope Pool */ + BITALLOC_INST(TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE); + + /** RX L2 Func Pool */ + BITALLOC_INST(TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC); + /** TX L2 Func Pool */ + BITALLOC_INST(TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC); + + /** RX Epoch0 Pool */ + BITALLOC_INST(TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0); + /** TX Epoch0 Pool */ + BITALLOC_INST(TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0); + + /** TX Epoch1 Pool */ + BITALLOC_INST(TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1); + /** TX Epoch1 Pool */ + BITALLOC_INST(TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1); + + /** RX MetaData Profile Pool */ + BITALLOC_INST(TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA); + /** TX MetaData Profile Pool */ + BITALLOC_INST(TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA); + + /** RX Connection Tracking State Pool */ + BITALLOC_INST(TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE); + /** TX Connection Tracking State Pool */ + BITALLOC_INST(TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE); + + /** RX Range Profile Pool */ + BITALLOC_INST(TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF); + /** TX Range Profile Pool */ + BITALLOC_INST(TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF); + + /** RX Range Pool */ + BITALLOC_INST(TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY); + /** TX Range Pool */ + BITALLOC_INST(TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY); + + /** RX LAG Pool */ + BITALLOC_INST(TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY); + /** TX LAG Pool */ + BITALLOC_INST(TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY); + + /* Session SRAM pools */ + + /** RX Full Action Record Pool */ + BITALLOC_INST(TF_SRAM_FULL_ACTION_POOL_NAME_RX, + TF_RSVD_SRAM_FULL_ACTION_RX); + /** TX Full Action Record Pool */ + BITALLOC_INST(TF_SRAM_FULL_ACTION_POOL_NAME_TX, + TF_RSVD_SRAM_FULL_ACTION_TX); + + /** RX Multicast Group Pool, only RX is supported */ + BITALLOC_INST(TF_SRAM_MCG_POOL_NAME_RX, + TF_RSVD_SRAM_MCG_RX); + + /** RX Encap 8B Pool*/ + BITALLOC_INST(TF_SRAM_ENCAP_8B_POOL_NAME_RX, + TF_RSVD_SRAM_ENCAP_8B_RX); + /** TX Encap 8B Pool*/ + BITALLOC_INST(TF_SRAM_ENCAP_8B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_8B_TX); + + /** RX Encap 16B Pool */ + BITALLOC_INST(TF_SRAM_ENCAP_16B_POOL_NAME_RX, + TF_RSVD_SRAM_ENCAP_16B_RX); + /** TX Encap 16B Pool */ + BITALLOC_INST(TF_SRAM_ENCAP_16B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_16B_TX); + + /** TX Encap 64B Pool, only TX is supported */ + BITALLOC_INST(TF_SRAM_ENCAP_64B_POOL_NAME_TX, + TF_RSVD_SRAM_ENCAP_64B_TX); + + /** RX Source Properties SMAC Pool */ + BITALLOC_INST(TF_SRAM_SP_SMAC_POOL_NAME_RX, + TF_RSVD_SRAM_SP_SMAC_RX); + /** TX Source Properties SMAC Pool */ + BITALLOC_INST(TF_SRAM_SP_SMAC_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_TX); + + /** TX Source Properties SMAC IPv4 Pool, only TX is supported */ + BITALLOC_INST(TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_IPV4_TX); + + /** TX Source Properties SMAC IPv6 Pool, only TX is supported */ + BITALLOC_INST(TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX, + TF_RSVD_SRAM_SP_SMAC_IPV6_TX); + + /** RX Counter 64B Pool */ + BITALLOC_INST(TF_SRAM_STATS_64B_POOL_NAME_RX, + TF_RSVD_SRAM_COUNTER_64B_RX); + /** TX Counter 64B Pool */ + BITALLOC_INST(TF_SRAM_STATS_64B_POOL_NAME_TX, + TF_RSVD_SRAM_COUNTER_64B_TX); + + /** RX NAT Source Port Pool */ + BITALLOC_INST(TF_SRAM_NAT_SPORT_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_SPORT_RX); + /** TX NAT Source Port Pool */ + BITALLOC_INST(TF_SRAM_NAT_SPORT_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_SPORT_TX); + + /** RX NAT Destination Port Pool */ + BITALLOC_INST(TF_SRAM_NAT_DPORT_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_DPORT_RX); + /** TX NAT Destination Port Pool */ + BITALLOC_INST(TF_SRAM_NAT_DPORT_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_DPORT_TX); + + /** RX NAT Source IPv4 Pool */ + BITALLOC_INST(TF_SRAM_NAT_S_IPV4_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_S_IPV4_RX); + /** TX NAT Source IPv4 Pool */ + BITALLOC_INST(TF_SRAM_NAT_S_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_S_IPV4_TX); + + /** RX NAT Destination IPv4 Pool */ + BITALLOC_INST(TF_SRAM_NAT_D_IPV4_POOL_NAME_RX, + TF_RSVD_SRAM_NAT_D_IPV4_RX); + /** TX NAT IPv4 Destination Pool */ + BITALLOC_INST(TF_SRAM_NAT_D_IPV4_POOL_NAME_TX, + TF_RSVD_SRAM_NAT_D_IPV4_TX); + + /** + * Pools not allocated from HCAPI RM + */ + + /** RX L2 Ctx Remap ID Pool */ + BITALLOC_INST(TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); + /** TX L2 Ctx Remap ID Pool */ + BITALLOC_INST(TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); + + /** CRC32 seed table */ +#define TF_LKUP_SEED_MEM_SIZE 512 + uint32_t lkup_em_seed_mem[TF_DIR_MAX][TF_LKUP_SEED_MEM_SIZE]; + + /** Lookup3 init values */ + uint32_t lkup_lkup3_init_cfg[TF_DIR_MAX]; + + /** Table scope array */ + struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE]; +}; + +#endif /* _TF_SESSION_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.c new file mode 100644 index 000000000..d900c9c09 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.c @@ -0,0 +1,1803 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +/* Truflow Table APIs and supporting code */ + +#include +#include +#include +#include +#include +#include +#include +#include "hsi_struct_def_dpdk.h" + +#include "tf_core.h" +#include "tf_em.h" +#include "tf_msg.h" +#include "tfp.h" +#include "hwrm_tf.h" +#include "bnxt.h" +#include "tf_resources.h" +#include "tf_rm.h" + +#define PTU_PTE_VALID 0x1UL +#define PTU_PTE_LAST 0x2UL +#define PTU_PTE_NEXT_TO_LAST 0x4UL + +/* Number of pointers per page_size */ +#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *)) + +#define TF_EM_PG_SZ_4K (1 << 12) +#define TF_EM_PG_SZ_8K (1 << 13) +#define TF_EM_PG_SZ_64K (1 << 16) +#define TF_EM_PG_SZ_256K (1 << 18) +#define TF_EM_PG_SZ_1M (1 << 20) +#define TF_EM_PG_SZ_2M (1 << 21) +#define TF_EM_PG_SZ_4M (1 << 22) +#define TF_EM_PG_SZ_1G (1 << 30) + +#define TF_EM_CTX_ID_INVALID 0xFFFF + +#define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */ +#define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */ + +/** + * Function to free a page table + * + * [in] tp + * Pointer to the page table to free + */ +static void +tf_em_free_pg_tbl(struct tf_em_page_tbl *tp) +{ + uint32_t i; + + for (i = 0; i < tp->pg_count; i++) { + if (!tp->pg_va_tbl[i]) { + PMD_DRV_LOG(WARNING, + "No map for page %d table %016" PRIu64 "\n", + i, + (uint64_t)(uintptr_t)tp); + continue; + } + + tfp_free(tp->pg_va_tbl[i]); + tp->pg_va_tbl[i] = NULL; + } + + tp->pg_count = 0; + tfp_free(tp->pg_va_tbl); + tp->pg_va_tbl = NULL; + tfp_free(tp->pg_pa_tbl); + tp->pg_pa_tbl = NULL; +} + +/** + * Function to free an EM table + * + * [in] tbl + * Pointer to the EM table to free + */ +static void +tf_em_free_page_table(struct tf_em_table *tbl) +{ + struct tf_em_page_tbl *tp; + int i; + + for (i = 0; i < tbl->num_lvl; i++) { + tp = &tbl->pg_tbl[i]; + + PMD_DRV_LOG(INFO, + "EEM: Freeing page table: size %u lvl %d cnt %u\n", + TF_EM_PAGE_SIZE, + i, + tp->pg_count); + + tf_em_free_pg_tbl(tp); + } + + tbl->l0_addr = NULL; + tbl->l0_dma_addr = 0; + tbl->num_lvl = 0; + tbl->num_data_pages = 0; +} + +/** + * Allocation of page tables + * + * [in] tfp + * Pointer to a TruFlow handle + * + * [in] pg_count + * Page count to allocate + * + * [in] pg_size + * Size of each page + * + * Returns: + * 0 - Success + * -ENOMEM - Out of memory + */ +static int +tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp, + uint32_t pg_count, + uint32_t pg_size) +{ + uint32_t i; + struct tfp_calloc_parms parms; + + parms.nitems = pg_count; + parms.size = sizeof(void *); + parms.alignment = 0; + + if (tfp_calloc(&parms) != 0) + return -ENOMEM; + + tp->pg_va_tbl = parms.mem_va; + + if (tfp_calloc(&parms) != 0) { + tfp_free(tp->pg_va_tbl); + return -ENOMEM; + } + + tp->pg_pa_tbl = parms.mem_va; + + tp->pg_count = 0; + tp->pg_size = pg_size; + + for (i = 0; i < pg_count; i++) { + parms.nitems = 1; + parms.size = pg_size; + parms.alignment = TF_EM_PAGE_ALIGNMENT; + + if (tfp_calloc(&parms) != 0) + goto cleanup; + + tp->pg_pa_tbl[i] = (uint64_t)(uintptr_t)parms.mem_pa; + tp->pg_va_tbl[i] = parms.mem_va; + + memset(tp->pg_va_tbl[i], 0, pg_size); + tp->pg_count++; + } + + return 0; + +cleanup: + tf_em_free_pg_tbl(tp); + return -ENOMEM; +} + +/** + * Allocates EM page tables + * + * [in] tbl + * Table to allocate pages for + * + * Returns: + * 0 - Success + * -ENOMEM - Out of memory + */ +static int +tf_em_alloc_page_table(struct tf_em_table *tbl) +{ + struct tf_em_page_tbl *tp; + int rc = 0; + int i; + uint32_t j; + + for (i = 0; i < tbl->num_lvl; i++) { + tp = &tbl->pg_tbl[i]; + + rc = tf_em_alloc_pg_tbl(tp, + tbl->page_cnt[i], + TF_EM_PAGE_SIZE); + if (rc) { + PMD_DRV_LOG(WARNING, + "Failed to allocate page table: lvl: %d\n", + i); + goto cleanup; + } + + for (j = 0; j < tp->pg_count; j++) { + PMD_DRV_LOG(INFO, + "EEM: Allocated page table: size %u lvl %d cnt" + " %u VA:%p PA:%p\n", + TF_EM_PAGE_SIZE, + i, + tp->pg_count, + (uint32_t *)tp->pg_va_tbl[j], + (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]); + } + } + return rc; + +cleanup: + tf_em_free_page_table(tbl); + return rc; +} + +/** + * Links EM page tables + * + * [in] tp + * Pointer to page table + * + * [in] tp_next + * Pointer to the next page table + * + * [in] set_pte_last + * Flag controlling if the page table is last + */ +static void +tf_em_link_page_table(struct tf_em_page_tbl *tp, + struct tf_em_page_tbl *tp_next, + bool set_pte_last) +{ + uint64_t *pg_pa = tp_next->pg_pa_tbl; + uint64_t *pg_va; + uint64_t valid; + uint32_t k = 0; + uint32_t i; + uint32_t j; + + for (i = 0; i < tp->pg_count; i++) { + pg_va = tp->pg_va_tbl[i]; + + for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) { + if (k == tp_next->pg_count - 2 && set_pte_last) + valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID; + else if (k == tp_next->pg_count - 1 && set_pte_last) + valid = PTU_PTE_LAST | PTU_PTE_VALID; + else + valid = PTU_PTE_VALID; + + pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid); + if (++k >= tp_next->pg_count) + return; + } + } +} + +/** + * Setup a EM page table + * + * [in] tbl + * Pointer to EM page table + */ +static void +tf_em_setup_page_table(struct tf_em_table *tbl) +{ + struct tf_em_page_tbl *tp_next; + struct tf_em_page_tbl *tp; + bool set_pte_last = 0; + int i; + + for (i = 0; i < tbl->num_lvl - 1; i++) { + tp = &tbl->pg_tbl[i]; + tp_next = &tbl->pg_tbl[i + 1]; + if (i == tbl->num_lvl - 2) + set_pte_last = 1; + tf_em_link_page_table(tp, tp_next, set_pte_last); + } + + tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0]; + tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0]; +} + +/** + * Given the page size, size of each data item (entry size), + * and the total number of entries needed, determine the number + * of page table levels and the number of data pages required. + * + * [in] page_size + * Page size + * + * [in] entry_size + * Entry size + * + * [in] num_entries + * Number of entries needed + * + * [out] num_data_pages + * Number of pages required + * + * Returns: + * Success - Number of EM page levels required + * -ENOMEM - Out of memory + */ +static int +tf_em_size_page_tbl_lvl(uint32_t page_size, + uint32_t entry_size, + uint32_t num_entries, + uint64_t *num_data_pages) +{ + uint64_t lvl_data_size = page_size; + int lvl = PT_LVL_0; + uint64_t data_size; + + *num_data_pages = 0; + data_size = (uint64_t)num_entries * entry_size; + + while (lvl_data_size < data_size) { + lvl++; + + if (lvl == PT_LVL_1) + lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) * + page_size; + else if (lvl == PT_LVL_2) + lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) * + MAX_PAGE_PTRS(page_size) * page_size; + else + return -ENOMEM; + } + + *num_data_pages = roundup(data_size, page_size) / page_size; + + return lvl; +} + +/** + * Return the number of page table pages needed to + * reference the given number of next level pages. + * + * [in] num_pages + * Number of EM pages + * + * [in] page_size + * Size of each EM page + * + * Returns: + * Number of EM page table pages + */ +static uint32_t +tf_em_page_tbl_pgcnt(uint32_t num_pages, + uint32_t page_size) +{ + return roundup(num_pages, MAX_PAGE_PTRS(page_size)) / + MAX_PAGE_PTRS(page_size); + return 0; +} + +/** + * Given the number of data pages, page_size and the maximum + * number of page table levels (already determined), size + * the number of page table pages required at each level. + * + * [in] max_lvl + * Max number of levels + * + * [in] num_data_pages + * Number of EM data pages + * + * [in] page_size + * Size of an EM page + * + * [out] *page_cnt + * EM page count + */ +static void +tf_em_size_page_tbls(int max_lvl, + uint64_t num_data_pages, + uint32_t page_size, + uint32_t *page_cnt) +{ + if (max_lvl == PT_LVL_0) { + page_cnt[PT_LVL_0] = num_data_pages; + } else if (max_lvl == PT_LVL_1) { + page_cnt[PT_LVL_1] = num_data_pages; + page_cnt[PT_LVL_0] = + tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size); + } else if (max_lvl == PT_LVL_2) { + page_cnt[PT_LVL_2] = num_data_pages; + page_cnt[PT_LVL_1] = + tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size); + page_cnt[PT_LVL_0] = + tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size); + } else { + return; + } +} + +/** + * Size the EM table based on capabilities + * + * [in] tbl + * EM table to size + * + * Returns: + * 0 - Success + * - EINVAL - Parameter error + * - ENOMEM - Out of memory + */ +static int +tf_em_size_table(struct tf_em_table *tbl) +{ + uint64_t num_data_pages; + uint32_t *page_cnt; + int max_lvl; + uint32_t num_entries; + uint32_t cnt = TF_EM_MIN_ENTRIES; + + /* Ignore entry if both size and number are zero */ + if (!tbl->entry_size && !tbl->num_entries) + return 0; + + /* If only one is set then error */ + if (!tbl->entry_size || !tbl->num_entries) + return -EINVAL; + + /* Determine number of page table levels and the number + * of data pages needed to process the given eem table. + */ + if (tbl->type == RECORD_TABLE) { + /* + * For action records just a memory size is provided. Work + * backwards to resolve to number of entries + */ + num_entries = tbl->num_entries / tbl->entry_size; + if (num_entries < TF_EM_MIN_ENTRIES) { + num_entries = TF_EM_MIN_ENTRIES; + } else { + while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES) + cnt *= 2; + num_entries = cnt; + } + } else { + num_entries = tbl->num_entries; + } + + max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE, + tbl->entry_size, + tbl->num_entries, + &num_data_pages); + if (max_lvl < 0) { + PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n"); + PMD_DRV_LOG(WARNING, + "table: %d data-sz: %016" PRIu64 " page-sz: %u\n", + tbl->type, + (uint64_t)num_entries * tbl->entry_size, + TF_EM_PAGE_SIZE); + return -ENOMEM; + } + + tbl->num_lvl = max_lvl + 1; + tbl->num_data_pages = num_data_pages; + + /* Determine the number of pages needed at each level */ + page_cnt = tbl->page_cnt; + memset(page_cnt, 0, sizeof(tbl->page_cnt)); + tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE, + page_cnt); + + PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type); + PMD_DRV_LOG(INFO, + "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n", + max_lvl + 1, + (uint64_t)num_data_pages * TF_EM_PAGE_SIZE, + num_data_pages, + page_cnt[PT_LVL_0], + page_cnt[PT_LVL_1], + page_cnt[PT_LVL_2]); + + return 0; +} + +/** + * Unregisters EM Ctx in Firmware + * + * [in] tfp + * Pointer to a TruFlow handle + * + * [in] tbl_scope_cb + * Pointer to a table scope control block + * + * [in] dir + * Receive or transmit direction + */ +static void +tf_em_ctx_unreg(struct tf *tfp, + struct tf_tbl_scope_cb *tbl_scope_cb, + int dir) +{ + struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir]; + struct tf_em_table *tbl; + int i; + + for (i = KEY0_TABLE; i < MAX_TABLE; i++) { + tbl = &ctxp->em_tables[i]; + + if (tbl->num_entries != 0 && tbl->entry_size != 0) { + tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id); + tf_em_free_page_table(tbl); + } + } +} + +/** + * Registers EM Ctx in Firmware + * + * [in] tfp + * Pointer to a TruFlow handle + * + * [in] tbl_scope_cb + * Pointer to a table scope control block + * + * [in] dir + * Receive or transmit direction + * + * Returns: + * 0 - Success + * -ENOMEM - Out of Memory + */ +static int +tf_em_ctx_reg(struct tf *tfp, + struct tf_tbl_scope_cb *tbl_scope_cb, + int dir) +{ + struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir]; + struct tf_em_table *tbl; + int rc = 0; + int i; + + for (i = KEY0_TABLE; i < MAX_TABLE; i++) { + tbl = &ctxp->em_tables[i]; + + if (tbl->num_entries && tbl->entry_size) { + rc = tf_em_size_table(tbl); + + if (rc) + goto cleanup; + + rc = tf_em_alloc_page_table(tbl); + if (rc) + goto cleanup; + + tf_em_setup_page_table(tbl); + rc = tf_msg_em_mem_rgtr(tfp, + tbl->num_lvl - 1, + TF_EM_PAGE_SIZE_ENUM, + tbl->l0_dma_addr, + &tbl->ctx_id); + if (rc) + goto cleanup; + } + } + return rc; + +cleanup: + tf_em_ctx_unreg(tfp, tbl_scope_cb, dir); + return rc; +} + +/** + * Validates EM number of entries requested + * + * [in] tbl_scope_cb + * Pointer to table scope control block to be populated + * + * [in] parms + * Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +static int +tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb, + struct tf_alloc_tbl_scope_parms *parms) +{ + uint32_t cnt; + + if (parms->rx_mem_size_in_mb != 0) { + uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1); + uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8) + + 1); + uint32_t num_entries = (parms->rx_mem_size_in_mb * + TF_MEGABYTE) / (key_b + action_b); + + if (num_entries < TF_EM_MIN_ENTRIES) { + PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:" + "%uMB\n", + parms->rx_mem_size_in_mb); + return -EINVAL; + } + + cnt = TF_EM_MIN_ENTRIES; + while (num_entries > cnt && + cnt <= TF_EM_MAX_ENTRIES) + cnt *= 2; + + if (cnt > TF_EM_MAX_ENTRIES) { + PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: " + "%u\n", + (parms->tx_num_flows_in_k * TF_KILOBYTE)); + return -EINVAL; + } + + parms->rx_num_flows_in_k = cnt / TF_KILOBYTE; + } else { + if ((parms->rx_num_flows_in_k * TF_KILOBYTE) < + TF_EM_MIN_ENTRIES || + (parms->rx_num_flows_in_k * TF_KILOBYTE) > + tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) { + PMD_DRV_LOG(ERR, + "EEM: Invalid number of Rx flows " + "requested:%u max:%u\n", + parms->rx_num_flows_in_k * TF_KILOBYTE, + tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported); + return -EINVAL; + } + + /* must be a power-of-2 supported value + * in the range 32K - 128M + */ + cnt = TF_EM_MIN_ENTRIES; + while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt && + cnt <= TF_EM_MAX_ENTRIES) + cnt *= 2; + + if (cnt > TF_EM_MAX_ENTRIES) { + PMD_DRV_LOG(ERR, + "EEM: Invalid number of Rx requested: %u\n", + (parms->rx_num_flows_in_k * TF_KILOBYTE)); + return -EINVAL; + } + } + + if (parms->tx_mem_size_in_mb != 0) { + uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1); + uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8) + + 1); + uint32_t num_entries = (parms->tx_mem_size_in_mb * + (TF_KILOBYTE * TF_KILOBYTE)) / + (key_b + action_b); + + if (num_entries < TF_EM_MIN_ENTRIES) { + PMD_DRV_LOG(ERR, + "EEM: Insufficient memory requested:%uMB\n", + parms->rx_mem_size_in_mb); + return -EINVAL; + } + + cnt = TF_EM_MIN_ENTRIES; + while (num_entries > cnt && + cnt <= TF_EM_MAX_ENTRIES) + cnt *= 2; + + if (cnt > TF_EM_MAX_ENTRIES) { + PMD_DRV_LOG(ERR, + "EEM: Invalid number of Tx requested: %u\n", + (parms->tx_num_flows_in_k * TF_KILOBYTE)); + return -EINVAL; + } + + parms->tx_num_flows_in_k = cnt / TF_KILOBYTE; + } else { + if ((parms->tx_num_flows_in_k * TF_KILOBYTE) < + TF_EM_MIN_ENTRIES || + (parms->tx_num_flows_in_k * TF_KILOBYTE) > + tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) { + PMD_DRV_LOG(ERR, + "EEM: Invalid number of Tx flows " + "requested:%u max:%u\n", + (parms->tx_num_flows_in_k * TF_KILOBYTE), + tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported); + return -EINVAL; + } + + cnt = TF_EM_MIN_ENTRIES; + while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt && + cnt <= TF_EM_MAX_ENTRIES) + cnt *= 2; + + if (cnt > TF_EM_MAX_ENTRIES) { + PMD_DRV_LOG(ERR, + "EEM: Invalid number of Tx requested: %u\n", + (parms->tx_num_flows_in_k * TF_KILOBYTE)); + return -EINVAL; + } + } + + if (parms->rx_num_flows_in_k != 0 && + (parms->rx_max_key_sz_in_bits / 8 == 0)) { + PMD_DRV_LOG(ERR, + "EEM: Rx key size required: %u\n", + (parms->rx_max_key_sz_in_bits)); + return -EINVAL; + } + + if (parms->tx_num_flows_in_k != 0 && + (parms->tx_max_key_sz_in_bits / 8 == 0)) { + PMD_DRV_LOG(ERR, + "EEM: Tx key size required: %u\n", + (parms->tx_max_key_sz_in_bits)); + return -EINVAL; + } + /* Rx */ + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries = + parms->rx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size = + parms->rx_max_key_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries = + parms->rx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size = + parms->rx_max_key_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries = + parms->rx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size = + parms->rx_max_action_entry_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries = + 0; + + /* Tx */ + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries = + parms->tx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size = + parms->tx_max_key_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries = + parms->tx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size = + parms->tx_max_key_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries = + parms->tx_num_flows_in_k * TF_KILOBYTE; + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size = + parms->tx_max_action_entry_sz_in_bits / 8; + + tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries = + 0; + + return 0; +} + +/** + * Internal function to set a Table Entry. Supports all internal Table Types + * + * [in] tfp + * Pointer to TruFlow handle + * + * [in] parms + * Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +static int +tf_set_tbl_entry_internal(struct tf *tfp, + struct tf_set_tbl_entry_parms *parms) +{ + int rc; + int id; + uint32_t index; + struct bitalloc *session_pool; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Lookup the pool using the table type of the element */ + rc = tf_rm_lookup_tbl_type_pool(tfs, + parms->dir, + parms->type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tbl_type_pool */ + if (rc) + return rc; + + index = parms->idx; + + if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD && + parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4) { + PMD_DRV_LOG(ERR, + "dir:%d, Type not supported, type:%d\n", + parms->dir, + parms->type); + return -EOPNOTSUPP; + } + + /* Adjust the returned index/offset as there is no guarantee + * that the start is 0 at time of RM allocation + */ + tf_rm_convert_index(tfs, + parms->dir, + parms->type, + TF_RM_CONVERT_RM_BASE, + parms->idx, + &index); + + /* Verify that the entry has been previously allocated */ + id = ba_inuse(session_pool, index); + if (id != 1) { + PMD_DRV_LOG(ERR, + "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n", + parms->dir, + parms->type, + index); + return -EINVAL; + } + + /* Set the entry */ + rc = tf_msg_set_tbl_entry(tfp, + parms->dir, + parms->type, + parms->data_sz_in_bytes, + parms->data, + parms->idx); + if (rc) { + PMD_DRV_LOG(ERR, + "dir:%d, Set failed, type:%d, rc:%d\n", + parms->dir, + parms->type, + rc); + } + + return rc; +} + +/** + * Internal function to get a Table Entry. Supports all Table Types + * except the TF_TBL_TYPE_EXT as that is handled as a table scope. + * + * [in] tfp + * Pointer to TruFlow handle + * + * [in] parms + * Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +static int +tf_get_tbl_entry_internal(struct tf *tfp, + struct tf_get_tbl_entry_parms *parms) +{ + int rc; + int id; + uint32_t index; + struct bitalloc *session_pool; + struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + + /* Lookup the pool using the table type of the element */ + rc = tf_rm_lookup_tbl_type_pool(tfs, + parms->dir, + parms->type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tbl_type_pool */ + if (rc) + return rc; + + index = parms->idx; + + /* Adjust the returned index/offset as there is no guarantee + * that the start is 0 at time of RM allocation + */ + tf_rm_convert_index(tfs, + parms->dir, + parms->type, + TF_RM_CONVERT_RM_BASE, + parms->idx, + &index); + + /* Verify that the entry has been previously allocated */ + id = ba_inuse(session_pool, index); + if (id != 1) { + PMD_DRV_LOG(ERR, + "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n", + parms->dir, + parms->type, + index); + return -EINVAL; + } + + /* Get the entry */ + rc = tf_msg_get_tbl_entry(tfp, + parms->dir, + parms->type, + parms->data_sz_in_bytes, + parms->data, + parms->idx); + if (rc) { + PMD_DRV_LOG(ERR, + "dir:%d, Get failed, type:%d, rc:%d\n", + parms->dir, + parms->type, + rc); + } + + return rc; +} + +#if (TF_SHADOW == 1) +/** + * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for + * the requested entry. If found the ref count is incremente and + * returned. + * + * [in] tfs + * Pointer to session + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry found and ref count incremented + * -ENOENT - Failure, entry not found + */ +static int +tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused, + struct tf_alloc_tbl_entry_parms *parms __rte_unused) +{ + PMD_DRV_LOG(ERR, + "dir:%d, Entry Alloc with search not supported\n", + parms->dir); + + + return -EOPNOTSUPP; +} + +/** + * Free Tbl entry from the Shadow DB. Shadow DB is searched for + * the requested entry. If found the ref count is decremente and + * new ref_count returned. + * + * [in] tfs + * Pointer to session + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry found and ref count decremented + * -ENOENT - Failure, entry not found + */ +static int +tf_free_tbl_entry_shadow(struct tf_session *tfs, + struct tf_free_tbl_entry_parms *parms) +{ + PMD_DRV_LOG(ERR, + "dir:%d, Entry Free with search not supported\n", + parms->dir); + + return -EOPNOTSUPP; +} +#endif /* TF_SHADOW */ + +/** + * Create External Tbl pool of memory indexes. + * + * [in] dir + * direction + * [in] tbl_scope_cb + * pointer to the table scope + * [in] num_entries + * number of entries to write + * [in] entry_sz_bytes + * size of each entry + * + * Return: + * 0 - Success, entry allocated - no search support + * -ENOMEM -EINVAL -EOPNOTSUPP + * - Failure, entry not allocated, out of resources + */ +static int +tf_create_tbl_pool_external(enum tf_dir dir, + struct tf_tbl_scope_cb *tbl_scope_cb, + uint32_t num_entries, + uint32_t entry_sz_bytes) +{ + struct tfp_calloc_parms parms; + uint32_t i; + int32_t j; + int rc = 0; + struct stack *pool = &tbl_scope_cb->ext_act_pool[dir]; + + parms.nitems = num_entries; + parms.size = sizeof(uint32_t); + parms.alignment = 0; + + if (tfp_calloc(&parms) != 0) { + PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n", + dir, strerror(-ENOMEM)); + return -ENOMEM; + } + + /* Create empty stack + */ + rc = stack_init(num_entries, parms.mem_va, pool); + + if (rc != 0) { + PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n", + dir, strerror(-rc)); + goto cleanup; + } + + /* Save the malloced memory address so that it can + * be freed when the table scope is freed. + */ + tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va; + + /* Fill pool with indexes in reverse + */ + j = (num_entries - 1) * entry_sz_bytes; + + for (i = 0; i < num_entries; i++) { + rc = stack_push(pool, j); + if (rc != 0) { + PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n", + tf_dir_2_str(dir), strerror(-rc)); + goto cleanup; + } + + if (j < 0) { + PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n", + dir, j); + goto cleanup; + } + j -= entry_sz_bytes; + } + + if (!stack_is_full(pool)) { + rc = -EINVAL; + PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n", + dir, strerror(-rc)); + goto cleanup; + } + return 0; +cleanup: + tfp_free((void *)parms.mem_va); + return rc; +} + +/** + * Destroy External Tbl pool of memory indexes. + * + * [in] dir + * direction + * [in] tbl_scope_cb + * pointer to the table scope + * + */ +static void +tf_destroy_tbl_pool_external(enum tf_dir dir, + struct tf_tbl_scope_cb *tbl_scope_cb) +{ + uint32_t *ext_act_pool_mem = + tbl_scope_cb->ext_act_pool_mem[dir]; + + tfp_free(ext_act_pool_mem); +} + +/** + * Allocate External Tbl entry from the Session Pool. + * + * [in] tfp + * Pointer to Truflow Handle + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry allocated - no search support + * -ENOMEM -EINVAL -EOPNOTSUPP + * - Failure, entry not allocated, out of resources + */ +static int +tf_alloc_tbl_entry_pool_external(struct tf *tfp, + struct tf_alloc_tbl_entry_parms *parms) +{ + int rc; + uint32_t index; + struct tf_session *tfs; + struct tf_tbl_scope_cb *tbl_scope_cb; + struct stack *pool; + + /* Check parameters */ + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* Get the pool info from the table scope + */ + tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id); + + if (tbl_scope_cb == NULL) { + PMD_DRV_LOG(ERR, + "%s, table scope not allocated\n", + tf_dir_2_str(parms->dir)); + return -EINVAL; + } + pool = &tbl_scope_cb->ext_act_pool[parms->dir]; + + /* Allocate an element + */ + rc = stack_pop(pool, &index); + + if (rc != 0) { + PMD_DRV_LOG(ERR, + "dir:%d, Allocation failed, type:%d\n", + parms->dir, + parms->type); + return rc; + } + parms->idx = index; + return rc; +} + +/** + * Allocate Internal Tbl entry from the Session Pool. + * + * [in] tfp + * Pointer to Truflow Handle + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry found and ref count decremented + * -ENOMEM - Failure, entry not allocated, out of resources + */ +static int +tf_alloc_tbl_entry_pool_internal(struct tf *tfp, + struct tf_alloc_tbl_entry_parms *parms) +{ + int rc; + int id; + int free_cnt; + uint32_t index; + struct bitalloc *session_pool; + struct tf_session *tfs; + + /* Check parameters */ + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD && + parms->type != TF_TBL_TYPE_ACT_SP_SMAC && + parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 && + parms->type != TF_TBL_TYPE_ACT_ENCAP_8B && + parms->type != TF_TBL_TYPE_ACT_ENCAP_16B && + parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) { + PMD_DRV_LOG(ERR, + "dir:%d, Type not supported, type:%d\n", + parms->dir, + parms->type); + return -EOPNOTSUPP; + } + + /* Lookup the pool using the table type of the element */ + rc = tf_rm_lookup_tbl_type_pool(tfs, + parms->dir, + parms->type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tbl_type_pool */ + if (rc) + return rc; + + id = ba_alloc(session_pool); + if (id == -1) { + free_cnt = ba_free_count(session_pool); + + PMD_DRV_LOG(ERR, + "dir:%d, Allocation failed, type:%d, free:%d\n", + parms->dir, + parms->type, + free_cnt); + return -ENOMEM; + } + + /* Adjust the returned index/offset as there is no guarantee + * that the start is 0 at time of RM allocation + */ + tf_rm_convert_index(tfs, + parms->dir, + parms->type, + TF_RM_CONVERT_ADD_BASE, + id, + &index); + parms->idx = index; + return rc; +} + +/** + * Free External Tbl entry to the session pool. + * + * [in] tfp + * Pointer to Truflow Handle + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry freed + * + * - Failure, entry not successfully freed for these reasons + * -ENOMEM + * -EOPNOTSUPP + * -EINVAL + */ +static int +tf_free_tbl_entry_pool_external(struct tf *tfp, + struct tf_free_tbl_entry_parms *parms) +{ + int rc = 0; + struct tf_session *tfs; + uint32_t index; + struct tf_tbl_scope_cb *tbl_scope_cb; + struct stack *pool; + + /* Check parameters */ + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* Get the pool info from the table scope + */ + tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id); + + if (tbl_scope_cb == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + pool = &tbl_scope_cb->ext_act_pool[parms->dir]; + + index = parms->idx; + + rc = stack_push(pool, index); + + if (rc != 0) { + PMD_DRV_LOG(ERR, + "dir:%d, consistency error, stack full, type:%d, idx:%d\n", + parms->dir, + parms->type, + index); + } + return rc; +} + +/** + * Free Internal Tbl entry from the Session Pool. + * + * [in] tfp + * Pointer to Truflow Handle + * [in] parms + * Allocation parameters + * + * Return: + * 0 - Success, entry found and ref count decremented + * -ENOMEM - Failure, entry not allocated, out of resources + */ +static int +tf_free_tbl_entry_pool_internal(struct tf *tfp, + struct tf_free_tbl_entry_parms *parms) +{ + int rc = 0; + int id; + struct bitalloc *session_pool; + struct tf_session *tfs; + uint32_t index; + + /* Check parameters */ + if (tfp == NULL || parms == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameters\n"); + return -EINVAL; + } + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD && + parms->type != TF_TBL_TYPE_ACT_SP_SMAC && + parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 && + parms->type != TF_TBL_TYPE_ACT_ENCAP_8B && + parms->type != TF_TBL_TYPE_ACT_ENCAP_16B && + parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) { + PMD_DRV_LOG(ERR, + "dir:%d, Type not supported, type:%d\n", + parms->dir, + parms->type); + return -EOPNOTSUPP; + } + + /* Lookup the pool using the table type of the element */ + rc = tf_rm_lookup_tbl_type_pool(tfs, + parms->dir, + parms->type, + &session_pool); + /* Error logging handled by tf_rm_lookup_tbl_type_pool */ + if (rc) + return rc; + + index = parms->idx; + + /* Adjust the returned index/offset as there is no guarantee + * that the start is 0 at time of RM allocation + */ + tf_rm_convert_index(tfs, + parms->dir, + parms->type, + TF_RM_CONVERT_RM_BASE, + parms->idx, + &index); + + /* Check if element was indeed allocated */ + id = ba_inuse_free(session_pool, index); + if (id == -1) { + PMD_DRV_LOG(ERR, + "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n", + parms->dir, + parms->type, + index); + return -ENOMEM; + } + + return rc; +} + +/* API defined in tf_em.h */ +struct tf_tbl_scope_cb * +tbl_scope_cb_find(struct tf_session *session, + uint32_t tbl_scope_id) +{ + int i; + + /* Check that id is valid */ + i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id); + if (i < 0) + return NULL; + + for (i = 0; i < TF_NUM_TBL_SCOPE; i++) { + if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id) + return &session->tbl_scopes[i]; + } + + return NULL; +} + +/* API defined in tf_core.h */ +int +tf_free_eem_tbl_scope_cb(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms) +{ + int rc = 0; + enum tf_dir dir; + struct tf_tbl_scope_cb *tbl_scope_cb; + struct tf_session *session; + + session = (struct tf_session *)(tfp->session->core_data); + + tbl_scope_cb = tbl_scope_cb_find(session, + parms->tbl_scope_id); + + if (tbl_scope_cb == NULL) + return -EINVAL; + + /* Free Table control block */ + ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index); + + /* free table scope locks */ + for (dir = 0; dir < TF_DIR_MAX; dir++) { + /* Free associated external pools + */ + tf_destroy_tbl_pool_external(dir, + tbl_scope_cb); + tf_msg_em_op(tfp, + dir, + HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE); + + /* free table scope and all associated resources */ + tf_em_ctx_unreg(tfp, tbl_scope_cb, dir); + } + + return rc; +} + +/* API defined in tf_em.h */ +int +tf_alloc_eem_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms) +{ + int rc; + enum tf_dir dir; + struct tf_tbl_scope_cb *tbl_scope_cb; + struct tf_em_table *em_tables; + int index; + struct tf_session *session; + struct tf_free_tbl_scope_parms free_parms; + + /* check parameters */ + if (parms == NULL || tfp->session == NULL) { + PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n"); + return -EINVAL; + } + + session = (struct tf_session *)tfp->session->core_data; + + /* Get Table Scope control block from the session pool */ + index = ba_alloc(session->tbl_scope_pool_rx); + if (index == -1) { + PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope " + "Control Block\n"); + return -ENOMEM; + } + + tbl_scope_cb = &session->tbl_scopes[index]; + tbl_scope_cb->index = index; + tbl_scope_cb->tbl_scope_id = index; + parms->tbl_scope_id = index; + + for (dir = 0; dir < TF_DIR_MAX; dir++) { + rc = tf_msg_em_qcaps(tfp, + dir, + &tbl_scope_cb->em_caps[dir]); + if (rc) { + PMD_DRV_LOG(ERR, + "EEM: Unable to query for EEM capability\n"); + goto cleanup; + } + } + + /* + * Validate and setup table sizes + */ + if (tf_em_validate_num_entries(tbl_scope_cb, parms)) + goto cleanup; + + for (dir = 0; dir < TF_DIR_MAX; dir++) { + /* + * Allocate tables and signal configuration to FW + */ + rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir); + if (rc) { + PMD_DRV_LOG(ERR, + "EEM: Unable to register for EEM ctx\n"); + goto cleanup; + } + + em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables; + rc = tf_msg_em_cfg(tfp, + em_tables[KEY0_TABLE].num_entries, + em_tables[KEY0_TABLE].ctx_id, + em_tables[KEY1_TABLE].ctx_id, + em_tables[RECORD_TABLE].ctx_id, + em_tables[EFC_TABLE].ctx_id, + parms->hw_flow_cache_flush_timer, + dir); + if (rc) { + PMD_DRV_LOG(ERR, + "TBL: Unable to configure EEM in firmware\n"); + goto cleanup_full; + } + + rc = tf_msg_em_op(tfp, + dir, + HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE); + + if (rc) { + PMD_DRV_LOG(ERR, + "EEM: Unable to enable EEM in firmware\n"); + goto cleanup_full; + } + + /* Allocate the pool of offsets of the external memory. + * Initially, this is a single fixed size pool for all external + * actions related to a single table scope. + */ + rc = tf_create_tbl_pool_external(dir, + tbl_scope_cb, + em_tables[RECORD_TABLE].num_entries, + em_tables[RECORD_TABLE].entry_size); + if (rc) { + PMD_DRV_LOG(ERR, + "%d TBL: Unable to allocate idx pools %s\n", + dir, + strerror(-rc)); + goto cleanup_full; + } + } + + return 0; + +cleanup_full: + free_parms.tbl_scope_id = index; + tf_free_eem_tbl_scope_cb(tfp, &free_parms); + return -EINVAL; + +cleanup: + /* Free Table control block */ + ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index); + return -EINVAL; +} + +/* API defined in tf_core.h */ +int +tf_set_tbl_entry(struct tf *tfp, + struct tf_set_tbl_entry_parms *parms) +{ + int rc = 0; + struct tf_tbl_scope_cb *tbl_scope_cb; + struct tf_session *session; + + if (tfp == NULL || parms == NULL || parms->data == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + if (parms->type == TF_TBL_TYPE_EXT) { + void *base_addr; + uint32_t offset = parms->idx; + uint32_t tbl_scope_id; + + session = (struct tf_session *)(tfp->session->core_data); + + tbl_scope_id = parms->tbl_scope_id; + + if (tbl_scope_id == TF_TBL_SCOPE_INVALID) { + PMD_DRV_LOG(ERR, + "dir:%d, Table scope not allocated\n", + parms->dir); + return -EINVAL; + } + + /* Get the table scope control block associated with the + * external pool + */ + tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id); + + if (tbl_scope_cb == NULL) + return -EINVAL; + + /* External table, implicitly the Action table */ + base_addr = tf_em_get_table_page(tbl_scope_cb, + parms->dir, + offset, + RECORD_TABLE); + if (base_addr == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Base address lookup failed\n", + parms->dir); + return -EINVAL; + } + + offset %= TF_EM_PAGE_SIZE; + rte_memcpy((char *)base_addr + offset, + parms->data, + parms->data_sz_in_bytes); + } else { + /* Internal table type processing */ + rc = tf_set_tbl_entry_internal(tfp, parms); + if (rc) { + PMD_DRV_LOG(ERR, + "dir:%d, Set failed, type:%d, rc:%d\n", + parms->dir, + parms->type, + rc); + } + } + + return rc; +} + +/* API defined in tf_core.h */ +int +tf_get_tbl_entry(struct tf *tfp, + struct tf_get_tbl_entry_parms *parms) +{ + int rc = 0; + + if (tfp == NULL || parms == NULL) + return -EINVAL; + + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + if (parms->type == TF_TBL_TYPE_EXT) { + PMD_DRV_LOG(ERR, + "dir:%d, External table type not supported\n", + parms->dir); + + rc = -EOPNOTSUPP; + } else { + /* Internal table type processing */ + rc = tf_get_tbl_entry_internal(tfp, parms); + if (rc) + PMD_DRV_LOG(ERR, + "dir:%d, Get failed, type:%d, rc:%d\n", + parms->dir, + parms->type, + rc); + } + + return rc; +} + +/* API defined in tf_core.h */ +int +tf_alloc_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms) +{ + int rc; + + /* check parameters */ + if (parms == NULL || tfp == NULL) { + PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n"); + return -EINVAL; + } + + rc = tf_alloc_eem_tbl_scope(tfp, parms); + + return rc; +} + +/* API defined in tf_core.h */ +int +tf_free_tbl_scope(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms) +{ + int rc; + + /* check parameters */ + if (parms == NULL || tfp == NULL) { + PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n"); + return -EINVAL; + } + + /* free table scope and all associated resources */ + rc = tf_free_eem_tbl_scope_cb(tfp, parms); + + return rc; +} + +/* API defined in tf_core.h */ +int +tf_alloc_tbl_entry(struct tf *tfp, + struct tf_alloc_tbl_entry_parms *parms) +{ + int rc; +#if (TF_SHADOW == 1) + struct tf_session *tfs; +#endif /* TF_SHADOW */ + + /* Check parameters */ + if (parms == NULL || tfp == NULL) { + PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n"); + return -EINVAL; + } + /* + * No shadow copy support for external tables, allocate and return + */ + if (parms->type == TF_TBL_TYPE_EXT) { + rc = tf_alloc_tbl_entry_pool_external(tfp, parms); + return rc; + } + +#if (TF_SHADOW == 1) + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* Search the Shadow DB for requested element. If not found go + * allocate one from the Session Pool + */ + if (parms->search_enable && tfs->shadow_copy) { + rc = tf_alloc_tbl_entry_shadow(tfs, parms); + /* Entry found and parms populated with return data */ + if (rc == 0) + return rc; + } +#endif /* TF_SHADOW */ + + rc = tf_alloc_tbl_entry_pool_internal(tfp, parms); + if (rc) + PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n", + parms->dir, + rc); + + return rc; +} + +/* API defined in tf_core.h */ +int +tf_free_tbl_entry(struct tf *tfp, + struct tf_free_tbl_entry_parms *parms) +{ + int rc; +#if (TF_SHADOW == 1) + struct tf_session *tfs; +#endif /* TF_SHADOW */ + + /* Check parameters */ + if (parms == NULL || tfp == NULL) { + PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n"); + return -EINVAL; + } + /* + * No shadow of external tables so just free the entry + */ + if (parms->type == TF_TBL_TYPE_EXT) { + rc = tf_free_tbl_entry_pool_external(tfp, parms); + return rc; + } + +#if (TF_SHADOW == 1) + if (tfp->session == NULL || tfp->session->core_data == NULL) { + PMD_DRV_LOG(ERR, + "dir:%d, Session info invalid\n", + parms->dir); + return -EINVAL; + } + + tfs = (struct tf_session *)(tfp->session->core_data); + + /* Search the Shadow DB for requested element. If not found go + * allocate one from the Session Pool + */ + if (parms->search_enable && tfs->shadow_copy) { + rc = tf_free_tbl_entry_shadow(tfs, parms); + /* Entry free'ed and parms populated with return data */ + if (rc == 0) + return rc; + } +#endif /* TF_SHADOW */ + + rc = tf_free_tbl_entry_pool_internal(tfp, parms); + + if (rc) + PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n", + parms->dir, + rc); + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.h new file mode 100644 index 000000000..bdc6288ee --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tf_tbl.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _TF_TBL_H_ +#define _TF_TBL_H_ + +#include +#include "stack.h" + +enum tf_pg_tbl_lvl { + PT_LVL_0, + PT_LVL_1, + PT_LVL_2, + PT_LVL_MAX +}; + +enum tf_em_table_type { + KEY0_TABLE, + KEY1_TABLE, + RECORD_TABLE, + EFC_TABLE, + MAX_TABLE +}; + +struct tf_em_page_tbl { + uint32_t pg_count; + uint32_t pg_size; + void **pg_va_tbl; + uint64_t *pg_pa_tbl; +}; + +struct tf_em_table { + int type; + uint32_t num_entries; + uint16_t ctx_id; + uint32_t entry_size; + int num_lvl; + uint32_t page_cnt[PT_LVL_MAX]; + uint64_t num_data_pages; + void *l0_addr; + uint64_t l0_dma_addr; + struct tf_em_page_tbl pg_tbl[PT_LVL_MAX]; +}; + +struct tf_em_ctx_mem_info { + struct tf_em_table em_tables[MAX_TABLE]; +}; + +/** table scope control block content */ +struct tf_em_caps { + uint32_t flags; + uint32_t supported; + uint32_t max_entries_supported; + uint16_t key_entry_size; + uint16_t record_entry_size; + uint16_t efc_entry_size; +}; + +/** Invalid table scope id */ +#define TF_TBL_SCOPE_INVALID 0xffffffff + +/** + * Table Scope Control Block + * + * Holds private data for a table scope. Only one instance of a table + * scope with Internal EM is supported. + */ +struct tf_tbl_scope_cb { + uint32_t tbl_scope_id; + int index; + struct tf_em_ctx_mem_info em_ctx_info[TF_DIR_MAX]; + struct tf_em_caps em_caps[TF_DIR_MAX]; + struct stack ext_act_pool[TF_DIR_MAX]; + uint32_t *ext_act_pool_mem[TF_DIR_MAX]; +}; + +/** Hardware Page sizes supported for EEM: 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G. + * Round-down other page sizes to the lower hardware page size supported. + */ +#define BNXT_PAGE_SHIFT 22 /** 2M */ + +#if (BNXT_PAGE_SHIFT < 12) /** < 4K >> 4K */ +#define TF_EM_PAGE_SHIFT 12 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K +#elif (BNXT_PAGE_SHIFT <= 13) /** 4K, 8K */ +#define TF_EM_PAGE_SHIFT 13 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K +#elif (BNXT_PAGE_SHIFT < 16) /** 16K, 32K >> 8K */ +#define TF_EM_PAGE_SHIFT 15 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_32K +#elif (BNXT_PAGE_SHIFT <= 17) /** 64K, 128K >> 64K */ +#define TF_EM_PAGE_SHIFT 16 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K +#elif (BNXT_PAGE_SHIFT <= 19) /** 256K, 512K >> 256K */ +#define TF_EM_PAGE_SHIFT 18 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K +#elif (BNXT_PAGE_SHIFT <= 21) /** 1M */ +#define TF_EM_PAGE_SHIFT 20 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M +#elif (BNXT_PAGE_SHIFT <= 22) /** 2M, 4M */ +#define TF_EM_PAGE_SHIFT 21 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M +#elif (BNXT_PAGE_SHIFT <= 29) /** 8M ... 512M >> 4M */ +#define TF_EM_PAGE_SHIFT 22 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M +#else /** >= 1G >> 1G */ +#define TF_EM_PAGE_SHIFT 30 +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G +#endif + +#define TF_EM_PAGE_SIZE (1 << TF_EM_PAGE_SHIFT) +#define TF_EM_PAGE_ALIGNMENT (1 << TF_EM_PAGE_SHIFT) + +/** + * Initialize table pool structure to indicate + * no table scope has been associated with the + * external pool of indexes. + * + * [in] session + */ +void +tf_init_tbl_pool(struct tf_session *session); + +#endif /* _TF_TBL_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.c b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.c new file mode 100644 index 000000000..3bce3ade1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.c @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * see the individual elements. + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tf_core.h" +#include "tfp.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tf_msg_common.h" + +/** + * Sends TruFlow msg to the TruFlow Firmware using + * a message specific HWRM message type. + * + * Returns success or failure code. + */ +int +tfp_send_msg_direct(struct tf *tfp, + struct tfp_send_msg_parms *parms) +{ + int rc = 0; + uint8_t use_kong_mb = 1; + + if (parms == NULL) + return -EINVAL; + + if (parms->mailbox == TF_CHIMP_MB) + use_kong_mb = 0; + + rc = bnxt_hwrm_tf_message_direct(container_of(tfp, + struct bnxt, + tfp), + use_kong_mb, + parms->tf_type, + parms->req_data, + parms->req_size, + parms->resp_data, + parms->resp_size); + + return rc; +} + +/** + * Sends preformatted TruFlow msg to the TruFlow Firmware using + * the Truflow tunnel HWRM message type. + * + * Returns success or failure code. + */ +int +tfp_send_msg_tunneled(struct tf *tfp, + struct tfp_send_msg_parms *parms) +{ + int rc = 0; + uint8_t use_kong_mb = 1; + + if (parms == NULL) + return -EINVAL; + + if (parms->mailbox == TF_CHIMP_MB) + use_kong_mb = 0; + + rc = bnxt_hwrm_tf_message_tunneled(container_of(tfp, + struct bnxt, + tfp), + use_kong_mb, + parms->tf_type, + parms->tf_subtype, + &parms->tf_resp_code, + parms->req_data, + parms->req_size, + parms->resp_data, + parms->resp_size); + + return rc; +} + +/** + * Allocates zero'ed memory from the heap. + * + * Returns success or failure code. + */ +int +tfp_calloc(struct tfp_calloc_parms *parms) +{ + if (parms == NULL) + return -EINVAL; + + parms->mem_va = rte_zmalloc("tf", + (parms->nitems * parms->size), + parms->alignment); + if (parms->mem_va == NULL) { + PMD_DRV_LOG(ERR, "Allocate failed mem_va\n"); + return -ENOMEM; + } + + parms->mem_pa = (void *)((uintptr_t)rte_mem_virt2iova(parms->mem_va)); + if (parms->mem_pa == (void *)((uintptr_t)RTE_BAD_IOVA)) { + PMD_DRV_LOG(ERR, "Allocate failed mem_pa\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * Frees the memory space pointed to by the provided pointer. The + * pointer must have been returned from the tfp_calloc(). + */ +void +tfp_free(void *addr) +{ + rte_free(addr); +} + +/** + * Copies n bytes from src memory to dest memory. The memory areas + * must not overlap. + */ +void +tfp_memcpy(void *dest, void *src, size_t n) +{ + rte_memcpy(dest, src, n); +} + +/** + * Used to initialize portable spin lock + */ +void +tfp_spinlock_init(struct tfp_spinlock_parms *parms) +{ + rte_spinlock_init(&parms->slock); +} + +/** + * Used to lock portable spin lock + */ +void +tfp_spinlock_lock(struct tfp_spinlock_parms *parms) +{ + rte_spinlock_lock(&parms->slock); +} + +/** + * Used to unlock portable spin lock + */ +void +tfp_spinlock_unlock(struct tfp_spinlock_parms *parms) +{ + rte_spinlock_unlock(&parms->slock); +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.h b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.h new file mode 100644 index 000000000..8d5e94e1a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_core/tfp.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +/* This header file defines the Portability structures and APIs for + * TruFlow. + */ + +#ifndef _TFP_H_ +#define _TFP_H_ + +#include + +/** Spinlock + */ +struct tfp_spinlock_parms { + rte_spinlock_t slock; +}; + +/** + * @file + * + * TrueFlow Portability API Header File + */ + +/** send message parameter definition + */ +struct tfp_send_msg_parms { + /** + * [in] mailbox, specifying the Mailbox to send the command on. + */ + uint32_t mailbox; + /** + * [in] tlv_subtype, specifies the tlv_type. + */ + uint16_t tf_type; + /** + * [in] tlv_subtype, specifies the tlv_subtype. + */ + uint16_t tf_subtype; + /** + * [out] tf_resp_code, response code from the internal tlv + * message. Only supported on tunneled messages. + */ + uint32_t tf_resp_code; + /** + * [out] size, number specifying the request size of the data in bytes + */ + uint32_t req_size; + /** + * [in] data, pointer to the data to be sent within the HWRM command + */ + uint32_t *req_data; + /** + * [out] size, number specifying the response size of the data in bytes + */ + uint32_t resp_size; + /** + * [out] data, pointer to the data to be sent within the HWRM command + */ + uint32_t *resp_data; +}; + +/** calloc parameter definition + */ +struct tfp_calloc_parms { + /** + * [in] nitems, number specifying number of items to allocate. + */ + size_t nitems; + /** + * [in] size, number specifying the size of each memory item + * requested. Size is in bytes. + */ + size_t size; + /** + * [in] alignment, number indicates byte alignment required. 0 + * - don't care, 16 - 16 byte alignment, 4K - 4K alignment etc + */ + size_t alignment; + /** + * [out] mem_va, pointer to the allocated memory. + */ + void *mem_va; + /** + * [out] mem_pa, physical address of the allocated memory. + */ + void *mem_pa; +}; + +/** + * @page Portability + * + * @ref tfp_send_direct + * @ref tfp_send_msg_tunneled + * + * @ref tfp_calloc + * @ref tfp_free + * @ref tfp_memcpy + * + * @ref tfp_spinlock_init + * @ref tfp_spinlock_lock + * @ref tfp_spinlock_unlock + * + * @ref tfp_cpu_to_le_16 + * @ref tfp_le_to_cpu_16 + * @ref tfp_cpu_to_le_32 + * @ref tfp_le_to_cpu_32 + * @ref tfp_cpu_to_le_64 + * @ref tfp_le_to_cpu_64 + * @ref tfp_cpu_to_be_16 + * @ref tfp_be_to_cpu_16 + * @ref tfp_cpu_to_be_32 + * @ref tfp_be_to_cpu_32 + * @ref tfp_cpu_to_be_64 + * @ref tfp_be_to_cpu_64 + */ + +#define tfp_cpu_to_le_16(val) rte_cpu_to_le_16(val) +#define tfp_le_to_cpu_16(val) rte_le_to_cpu_16(val) +#define tfp_cpu_to_le_32(val) rte_cpu_to_le_32(val) +#define tfp_le_to_cpu_32(val) rte_le_to_cpu_32(val) +#define tfp_cpu_to_le_64(val) rte_cpu_to_le_64(val) +#define tfp_le_to_cpu_64(val) rte_le_to_cpu_64(val) +#define tfp_cpu_to_be_16(val) rte_cpu_to_be_16(val) +#define tfp_be_to_cpu_16(val) rte_be_to_cpu_16(val) +#define tfp_cpu_to_be_32(val) rte_cpu_to_be_32(val) +#define tfp_be_to_cpu_32(val) rte_be_to_cpu_32(val) +#define tfp_cpu_to_be_64(val) rte_cpu_to_be_64(val) +#define tfp_be_to_cpu_64(val) rte_be_to_cpu_64(val) +#define tfp_bswap_16(val) rte_bswap16(val) +#define tfp_bswap_32(val) rte_bswap32(val) +#define tfp_bswap_64(val) rte_bswap64(val) + +/** + * Provides communication capability from the TrueFlow API layer to + * the TrueFlow firmware. The portability layer internally provides + * the transport to the firmware. + * + * [in] session, pointer to session handle + * [in] parms, parameter structure + * + * Returns: + * 0 - Success + * -1 - Global error like not supported + * -EINVAL - Parameter Error + */ +int tfp_send_msg_direct(struct tf *tfp, + struct tfp_send_msg_parms *parms); + +/** + * Provides communication capability from the TrueFlow API layer to + * the TrueFlow firmware. The portability layer internally provides + * the transport to the firmware. + * + * [in] session, pointer to session handle + * [in] parms, parameter structure + * + * Returns: + * 0 - Success + * -1 - Global error like not supported + * -EINVAL - Parameter Error + */ +int tfp_send_msg_tunneled(struct tf *tfp, + struct tfp_send_msg_parms *parms); + +/** + * Allocates zero'ed memory from the heap. + * + * NOTE: Also performs virt2phy address conversion by default thus is + * can be expensive to invoke. + * + * [in] parms, parameter structure + * + * Returns: + * 0 - Success + * -ENOMEM - No memory available + * -EINVAL - Parameter error + */ +int tfp_calloc(struct tfp_calloc_parms *parms); + +void tfp_free(void *addr); +void tfp_memcpy(void *dest, void *src, size_t n); +void tfp_spinlock_init(struct tfp_spinlock_parms *slock); +void tfp_spinlock_lock(struct tfp_spinlock_parms *slock); +void tfp_spinlock_unlock(struct tfp_spinlock_parms *slock); +#endif /* _TFP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h new file mode 100644 index 000000000..f41757908 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_TF_COMMON_H_ +#define _BNXT_TF_COMMON_H_ + +#define BNXT_TF_DBG(lvl, fmt, args...) PMD_DRV_LOG(lvl, fmt, ## args) + +#define BNXT_ULP_EM_FLOWS 8192 +#define BNXT_ULP_1M_FLOWS 1000000 +#define BNXT_EEM_RX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1) +#define BNXT_EEM_TX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1) +#define BNXT_EEM_HASH_KEY2_USED 0x8000000 +#define BNXT_EEM_RX_HW_HASH_KEY2_BIT BNXT_ULP_1M_FLOWS +#define BNXT_ULP_DFLT_RX_MAX_KEY 512 +#define BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY 256 +#define BNXT_ULP_DFLT_RX_MEM 0 +#define BNXT_ULP_RX_NUM_FLOWS 32 +#define BNXT_ULP_RX_TBL_IF_ID 0 +#define BNXT_ULP_DFLT_TX_MAX_KEY 512 +#define BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY 256 +#define BNXT_ULP_DFLT_TX_MEM 0 +#define BNXT_ULP_TX_NUM_FLOWS 32 +#define BNXT_ULP_TX_TBL_IF_ID 0 + +enum bnxt_tf_rc { + BNXT_TF_RC_PARSE_ERR = -2, + BNXT_TF_RC_ERROR = -1, + BNXT_TF_RC_SUCCESS = 0 +}; + +/* eth IPv4 Type */ +enum bnxt_ulp_eth_ip_type { + BNXT_ULP_ETH_IPV4 = 4, + BNXT_ULP_ETH_IPV6 = 5, + BNXT_ULP_MAX_ETH_IP_TYPE = 0 +}; + +/* ulp direction Type */ +enum ulp_direction_type { + ULP_DIR_INGRESS, + ULP_DIR_EGRESS, +}; + +struct bnxt_ulp_mark_tbl * +bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx); + +int32_t +bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mark_tbl *mark_tbl); + +#endif /* _BNXT_TF_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c new file mode 100644 index 000000000..872c1aba4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.c @@ -0,0 +1,798 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "bnxt_ulp.h" +#include "bnxt_tf_common.h" +#include "bnxt.h" +#include "tf_core.h" +#include "tf_ext_flow_handle.h" + +#include "ulp_template_db.h" +#include "ulp_template_struct.h" +#include "ulp_mark_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_port_db.h" + +/* Linked list of all TF sessions. */ +STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list = + STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list); + +/* Mutex to synchronize bnxt_ulp_session_list operations. */ +static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Allow the deletion of context only for the bnxt device that + * created the session + * TBD - The implementation of the function should change to + * using the reference count once tf_session_attach functionality + * is fixed. + */ +bool +ulp_ctx_deinit_allowed(void *ptr) +{ + struct bnxt *bp = (struct bnxt *)ptr; + + if (!bp) + return 0; + + if (&bp->tfp == bp->ulp_ctx->g_tfp) + return 1; + + return 0; +} + +/* + * Initialize an ULP session. + * An ULP session will contain all the resources needed to support rte flow + * offloads. A session is initialized as part of rte_eth_device start. + * A single vswitch instance can have multiple uplinks which means + * rte_eth_device start will be called for each of these devices. + * ULP session manager will make sure that a single ULP session is only + * initialized once. Apart from this, it also initializes MARK database, + * EEM table & flow database. ULP session manager also manages a list of + * all opened ULP sessions. + */ +static int32_t +ulp_ctx_session_open(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct rte_eth_dev *ethdev = bp->eth_dev; + int32_t rc = 0; + struct tf_open_session_parms params; + + memset(¶ms, 0, sizeof(params)); + + rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id, + params.ctrl_chan_name); + if (rc) { + BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n", + ethdev->data->port_id, rc); + return rc; + } + + rc = tf_open_session(&bp->tfp, ¶ms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n", + params.ctrl_chan_name, rc); + return -EINVAL; + } + session->session_opened = 1; + session->g_tfp = &bp->tfp; + return rc; +} + +/* + * Close the ULP session. + * It takes the ulp context pointer. + */ +static void +ulp_ctx_session_close(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + /* close the session in the hardware */ + if (session->session_opened) + tf_close_session(&bp->tfp); + session->session_opened = 0; + session->g_tfp = NULL; + bp->ulp_ctx->g_tfp = NULL; +} + +static void +bnxt_init_tbl_scope_parms(struct bnxt *bp, + struct tf_alloc_tbl_scope_parms *params) +{ + struct bnxt_ulp_device_params *dparms; + uint32_t dev_id; + int rc; + + rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id); + if (rc) + /* TBD: For now, just use default. */ + dparms = 0; + else + dparms = bnxt_ulp_device_params_get(dev_id); + + /* + * Set the flush timer for EEM entries. The value is in 100ms intervals, + * so 100 is 10s. + */ + params->hw_flow_cache_flush_timer = 100; + + if (!dparms) { + params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; + params->rx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; + params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; + params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS; + params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; + + params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; + params->tx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; + params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; + params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS; + params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; + } else { + params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; + params->rx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; + params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; + params->rx_num_flows_in_k = dparms->num_flows / (1024); + params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID; + + params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; + params->tx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; + params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; + params->tx_num_flows_in_k = dparms->num_flows / (1024); + params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID; + } +} + +/* Initialize Extended Exact Match host memory. */ +static int32_t +ulp_eem_tbl_scope_init(struct bnxt *bp) +{ + struct tf_alloc_tbl_scope_parms params = {0}; + int rc; + + bnxt_init_tbl_scope_parms(bp, ¶ms); + + rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms); + if (rc) { + BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n", + rc); + return rc; + } + + rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id); + if (rc) { + BNXT_TF_DBG(ERR, "Unable to set table scope id\n"); + return rc; + } + + return 0; +} + +/* Free Extended Exact Match host memory */ +static int32_t +ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) +{ + struct tf_free_tbl_scope_parms params = {0}; + struct tf *tfp; + int32_t rc = 0; + + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + /* Free the resources for the last device */ + if (!ulp_ctx_deinit_allowed(bp)) + return rc; + + tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + if (!tfp) { + BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to get the table scope id\n"); + return -EINVAL; + } + + rc = tf_free_tbl_scope(tfp, ¶ms); + if (rc) { + BNXT_TF_DBG(ERR, "Unable to free table scope\n"); + return -EINVAL; + } + return rc; +} + +/* The function to free and deinit the ulp context data. */ +static int32_t +ulp_ctx_deinit(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + if (!session || !bp) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* close the tf session */ + ulp_ctx_session_close(bp, session); + + /* Free the contents */ + if (session->cfg_data) { + rte_free(session->cfg_data); + bp->ulp_ctx->cfg_data = NULL; + session->cfg_data = NULL; + } + return 0; +} + +/* The function to allocate and initialize the ulp context data. */ +static int32_t +ulp_ctx_init(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_data *ulp_data; + int32_t rc = 0; + + if (!session || !bp) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Allocate memory to hold ulp context data. */ + ulp_data = rte_zmalloc("bnxt_ulp_data", + sizeof(struct bnxt_ulp_data), 0); + if (!ulp_data) { + BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n"); + return -ENOMEM; + } + + /* Increment the ulp context data reference count usage. */ + bp->ulp_ctx->cfg_data = ulp_data; + session->cfg_data = ulp_data; + ulp_data->ref_cnt++; + + /* Open the ulp session. */ + rc = ulp_ctx_session_open(bp, session); + if (rc) { + (void)ulp_ctx_deinit(bp, session); + return rc; + } + bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, session->g_tfp); + return rc; +} + +static int32_t +ulp_ctx_attach(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_session_state *session) +{ + if (!ulp_ctx || !session) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Increment the ulp context data reference count usage. */ + ulp_ctx->cfg_data = session->cfg_data; + ulp_ctx->cfg_data->ref_cnt++; + + /* TBD call TF_session_attach. */ + ulp_ctx->g_tfp = session->g_tfp; + return 0; +} + +static int32_t +ulp_ctx_detach(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx; + + if (!bp || !session) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + ulp_ctx = bp->ulp_ctx; + + if (!ulp_ctx->cfg_data) + return 0; + + /* TBD call TF_session_detach */ + + /* Increment the ulp context data reference count usage. */ + if (ulp_ctx->cfg_data->ref_cnt >= 1) { + ulp_ctx->cfg_data->ref_cnt--; + if (ulp_ctx_deinit_allowed(bp)) + ulp_ctx_deinit(bp, session); + ulp_ctx->cfg_data = NULL; + ulp_ctx->g_tfp = NULL; + return 0; + } + BNXT_TF_DBG(ERR, "context deatach on invalid data\n"); + return 0; +} + +/* + * Initialize the state of an ULP session. + * If the state of an ULP session is not initialized, set it's state to + * initialized. If the state is already initialized, do nothing. + */ +static void +ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) +{ + pthread_mutex_lock(&session->bnxt_ulp_mutex); + + if (!session->bnxt_ulp_init) { + session->bnxt_ulp_init = true; + *init = false; + } else { + *init = true; + } + + pthread_mutex_unlock(&session->bnxt_ulp_mutex); +} + +/* + * Check if an ULP session is already allocated for a specific PCI + * domain & bus. If it is already allocated simply return the session + * pointer, otherwise allocate a new session. + */ +static struct bnxt_ulp_session_state * +ulp_get_session(struct rte_pci_addr *pci_addr) +{ + struct bnxt_ulp_session_state *session; + + STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) { + if (session->pci_info.domain == pci_addr->domain && + session->pci_info.bus == pci_addr->bus) { + return session; + } + } + return NULL; +} + +/* + * Allocate and Initialize an ULP session and set it's state to INITIALIZED. + * If it's already initialized simply return the already existing session. + */ +static struct bnxt_ulp_session_state * +ulp_session_init(struct bnxt *bp, + bool *init) +{ + struct rte_pci_device *pci_dev; + struct rte_pci_addr *pci_addr; + struct bnxt_ulp_session_state *session; + + if (!bp) + return NULL; + + pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); + pci_addr = &pci_dev->addr; + + pthread_mutex_lock(&bnxt_ulp_global_mutex); + + session = ulp_get_session(pci_addr); + if (!session) { + /* Not Found the session Allocate a new one */ + session = rte_zmalloc("bnxt_ulp_session", + sizeof(struct bnxt_ulp_session_state), + 0); + if (!session) { + BNXT_TF_DBG(ERR, + "Allocation failed for bnxt_ulp_session\n"); + pthread_mutex_unlock(&bnxt_ulp_global_mutex); + return NULL; + + } else { + /* Add it to the queue */ + session->pci_info.domain = pci_addr->domain; + session->pci_info.bus = pci_addr->bus; + pthread_mutex_init(&session->bnxt_ulp_mutex, NULL); + STAILQ_INSERT_TAIL(&bnxt_ulp_session_list, + session, next); + } + } + ulp_context_initialized(session, init); + pthread_mutex_unlock(&bnxt_ulp_global_mutex); + return session; +} + +/* + * When a device is closed, remove it's associated session from the global + * session list. + */ +static void +ulp_session_deinit(struct bnxt_ulp_session_state *session) +{ + if (!session) + return; + + if (!session->cfg_data) { + pthread_mutex_lock(&bnxt_ulp_global_mutex); + STAILQ_REMOVE(&bnxt_ulp_session_list, session, + bnxt_ulp_session_state, next); + pthread_mutex_destroy(&session->bnxt_ulp_mutex); + rte_free(session); + pthread_mutex_unlock(&bnxt_ulp_global_mutex); + } +} + +/* + * When a port is initialized by dpdk. This functions is called + * and this function initializes the ULP context and rest of the + * infrastructure associated with it. + */ +int32_t +bnxt_ulp_init(struct bnxt *bp) +{ + struct bnxt_ulp_session_state *session; + bool init; + int rc; + + if (bp->ulp_ctx) { + BNXT_TF_DBG(ERR, "ulp ctx already allocated\n"); + return -EINVAL; + } + + /* + * Multiple uplink ports can be associated with a single vswitch. + * Make sure only the port that is started first will initialize + * the TF session. + */ + session = ulp_session_init(bp, &init); + if (!session) { + BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n"); + return -EINVAL; + } + + bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx", + sizeof(struct bnxt_ulp_context), 0); + if (!bp->ulp_ctx) { + BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n"); + ulp_session_deinit(session); + return -ENOMEM; + } + + /* + * If ULP is already initialized for a specific domain then simply + * assign the ulp context to this rte_eth_dev. + */ + if (init) { + rc = ulp_ctx_attach(bp->ulp_ctx, session); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to attach the ulp context\n"); + ulp_session_deinit(session); + rte_free(bp->ulp_ctx); + return rc; + } + /* update the port database */ + rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to update port database\n"); + ulp_ctx_detach(bp, session); + ulp_session_deinit(session); + rte_free(bp->ulp_ctx); + } + return rc; + } + + /* Allocate and Initialize the ulp context. */ + rc = ulp_ctx_init(bp, session); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the ulp context\n"); + goto jump_to_error; + } + + /* create the port database */ + rc = ulp_port_db_init(bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the port database\n"); + goto jump_to_error; + } + + /* update the port database */ + rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to update port database\n"); + goto jump_to_error; + } + + /* Create the Mark database. */ + rc = ulp_mark_db_init(bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the mark database\n"); + goto jump_to_error; + } + + /* Create the flow database. */ + rc = ulp_flow_db_init(bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the flow database\n"); + goto jump_to_error; + } + + /* Create the eem table scope. */ + rc = ulp_eem_tbl_scope_init(bp); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n"); + goto jump_to_error; + } + + rc = ulp_mapper_init(bp->ulp_ctx); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n"); + goto jump_to_error; + } + + return rc; + +jump_to_error: + bnxt_ulp_deinit(bp); + return -ENOMEM; +} + +/* Below are the access functions to access internal data of ulp context. */ + +/* + * When a port is deinit'ed by dpdk. This function is called + * and this function clears the ULP context and rest of the + * infrastructure associated with it. + */ +void +bnxt_ulp_deinit(struct bnxt *bp) +{ + struct bnxt_ulp_session_state *session; + struct rte_pci_device *pci_dev; + struct rte_pci_addr *pci_addr; + + /* Get the session first */ + pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device); + pci_addr = &pci_dev->addr; + pthread_mutex_lock(&bnxt_ulp_global_mutex); + session = ulp_get_session(pci_addr); + pthread_mutex_unlock(&bnxt_ulp_global_mutex); + + /* session not found then just exit */ + if (!session) + return; + + /* clean up regular flows */ + ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); + + /* cleanup the eem table scope */ + ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx); + + /* cleanup the flow database */ + ulp_flow_db_deinit(bp->ulp_ctx); + + /* Delete the Mark database */ + ulp_mark_db_deinit(bp->ulp_ctx); + + /* cleanup the ulp mapper */ + ulp_mapper_deinit(bp->ulp_ctx); + + /* Delete the Port database */ + ulp_port_db_deinit(bp->ulp_ctx); + + /* Delete the ulp context and tf session */ + ulp_ctx_detach(bp, session); + + /* Finally delete the bnxt session*/ + ulp_session_deinit(session); + + rte_free(bp->ulp_ctx); +} + +/* Function to set the Mark DB into the context */ +int32_t +bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mark_tbl *mark_tbl) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) { + BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + return -EINVAL; + } + + ulp_ctx->cfg_data->mark_tbl = mark_tbl; + + return 0; +} + +/* Function to retrieve the Mark DB from the context. */ +struct bnxt_ulp_mark_tbl * +bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->mark_tbl; +} + +/* Function to set the device id of the hardware. */ +int32_t +bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, + uint32_t dev_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + ulp_ctx->cfg_data->dev_id = dev_id; + return 0; + } + + return -EINVAL; +} + +/* Function to get the device id of the hardware. */ +int32_t +bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, + uint32_t *dev_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + *dev_id = ulp_ctx->cfg_data->dev_id; + return 0; + } + + return -EINVAL; +} + +/* Function to get the table scope id of the EEM table. */ +int32_t +bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, + uint32_t *tbl_scope_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id; + return 0; + } + + return -EINVAL; +} + +/* Function to set the table scope id of the EEM table. */ +int32_t +bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, + uint32_t tbl_scope_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id; + return 0; + } + + return -EINVAL; +} + +/* Function to set the tfp session details from the ulp context. */ +int32_t +bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp) +{ + if (!ulp) { + BNXT_TF_DBG(ERR, "Invalid arguments\n"); + return -EINVAL; + } + + /* TBD The tfp should be removed once tf_attach is implemented. */ + ulp->g_tfp = tfp; + return 0; +} + +/* Function to get the tfp session details from the ulp context. */ +struct tf * +bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp) +{ + if (!ulp) { + BNXT_TF_DBG(ERR, "Invalid arguments\n"); + return NULL; + } + /* TBD The tfp should be removed once tf_attach is implemented. */ + return ulp->g_tfp; +} + +/* + * Get the device table entry based on the device id. + * + * dev_id [in] The device id of the hardware + * + * Returns the pointer to the device parameters. + */ +struct bnxt_ulp_device_params * +bnxt_ulp_device_params_get(uint32_t dev_id) +{ + if (dev_id < BNXT_ULP_MAX_NUM_DEVICES) + return &ulp_device_params[dev_id]; + return NULL; +} + +/* Function to set the flow database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_flow_db *flow_db) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->flow_db = flow_db; + return 0; +} + +/* Function to get the flow database from the ulp context. */ +struct bnxt_ulp_flow_db * +bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->flow_db; +} + +/* Function to get the ulp context from eth device. */ +struct bnxt_ulp_context * +bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev) +{ + struct bnxt *bp; + + bp = (struct bnxt *)dev->data->dev_private; + if (!bp) { + BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n"); + return NULL; + } + return bp->ulp_ctx; +} + +int32_t +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) { + BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + return -EINVAL; + } + + ulp_ctx->cfg_data->mapper_data = mapper_data; + return 0; +} + +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) { + BNXT_TF_DBG(ERR, "Invalid ulp context data\n"); + return NULL; + } + + return ulp_ctx->cfg_data->mapper_data; +} + +/* Function to set the port database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->port_db = port_db; + return 0; +} + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->port_db; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.h new file mode 100644 index 000000000..eecc09cea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_ULP_H_ +#define _BNXT_ULP_H_ + +#include +#include +#include + +#include "rte_ethdev.h" + +struct bnxt_ulp_data { + uint32_t tbl_scope_id; + struct bnxt_ulp_mark_tbl *mark_tbl; + uint32_t dev_id; /* Hardware device id */ + uint32_t ref_cnt; + struct bnxt_ulp_flow_db *flow_db; + void *mapper_data; + struct bnxt_ulp_port_db *port_db; +}; + +struct bnxt_ulp_context { + struct bnxt_ulp_data *cfg_data; + /* TBD The tfp should be removed once tf_attach is implemented. */ + struct tf *g_tfp; +}; + +struct bnxt_ulp_pci_info { + uint32_t domain; + uint8_t bus; +}; + +struct bnxt_ulp_session_state { + STAILQ_ENTRY(bnxt_ulp_session_state) next; + bool bnxt_ulp_init; + pthread_mutex_t bnxt_ulp_mutex; + struct bnxt_ulp_pci_info pci_info; + struct bnxt_ulp_data *cfg_data; + /* TBD The tfp should be removed once tf_attach is implemented. */ + struct tf *g_tfp; + uint32_t session_opened; +}; + +/* ULP flow id structure */ +struct rte_tf_flow { + uint32_t flow_id; +}; + +/* + * Allow the deletion of context only for the bnxt device that + * created the session + * TBD - The implementation of the function should change to + * using the reference count once tf_session_attach functionality + * is fixed. + */ +bool +ulp_ctx_deinit_allowed(void *bp); + +/* Function to set the device id of the hardware. */ +int32_t +bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, uint32_t dev_id); + +/* Function to get the device id of the hardware. */ +int32_t +bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, uint32_t *dev_id); + +/* Function to set the table scope id of the EEM table. */ +int32_t +bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, + uint32_t tbl_scope_id); + +/* Function to get the table scope id of the EEM table. */ +int32_t +bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, + uint32_t *tbl_scope_id); + +/* Function to set the tfp session details in the ulp context. */ +int32_t +bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp); + +/* Function to get the tfp session details from ulp context. */ +struct tf * +bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp); + +/* Get the device table entry based on the device id. */ +struct bnxt_ulp_device_params * +bnxt_ulp_device_params_get(uint32_t dev_id); + +int32_t +bnxt_ulp_ctxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mark_tbl *mark_tbl); + +struct bnxt_ulp_mark_tbl * +bnxt_ulp_ctxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the flow database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_flow_db *flow_db); + +/* Function to get the flow database from the ulp context. */ +struct bnxt_ulp_flow_db * +bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to get the ulp context from eth device. */ +struct bnxt_ulp_context * +bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev); + +/* Function to add the ulp mapper data to the ulp context */ +int32_t +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data); + +/* Function to get the ulp mapper data from the ulp context */ +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the port database to the ulp context. */ +int32_t +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db); + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx); + +#endif /* _BNXT_ULP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c new file mode 100644 index 000000000..dbec8cecf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "ulp_rte_parser.h" +#include "ulp_matcher.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include + +static int32_t +bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + /* Perform the validation of the arguments for null */ + if (!error) + return BNXT_TF_RC_ERROR; + + if (!pattern) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return BNXT_TF_RC_ERROR; + } + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return BNXT_TF_RC_ERROR; + } + + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return BNXT_TF_RC_ERROR; + } + + if (attr->egress && attr->ingress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "EGRESS AND INGRESS UNSUPPORTED"); + return BNXT_TF_RC_ERROR; + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to create the rte flow. */ +static struct rte_flow * +bnxt_ulp_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 }; + struct ulp_rte_parser_params params; + struct bnxt_ulp_context *ulp_ctx; + uint32_t class_id, act_tmpl; + struct rte_flow *flow_id; + uint32_t fid; + int ret; + + if (bnxt_ulp_flow_validate_args(attr, + pattern, actions, + error) == BNXT_TF_RC_ERROR) { + BNXT_TF_DBG(ERR, "Invalid arguments being passed\n"); + return NULL; + } + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + return NULL; + } + + /* Initialize the parser params */ + memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; + + if (attr->egress) + params.dir = ULP_DIR_EGRESS; + + /* copy the device port id and direction for further processing */ + ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_INCOMING_IF, + dev->data->port_id); + ULP_UTIL_CHF_IDX_WR(¶ms, BNXT_ULP_CHF_IDX_DIRECTION, params.dir); + + /* Parse the rte flow pattern */ + ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + /* Parse the rte flow action */ + ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + ret = ulp_matcher_pattern_match(¶ms, &class_id); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + ret = ulp_matcher_action_match(¶ms, &act_tmpl); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + mapper_cparms.app_priority = attr->priority; + mapper_cparms.hdr_bitmap = ¶ms.hdr_bitmap; + mapper_cparms.hdr_field = params.hdr_field; + mapper_cparms.act = ¶ms.act_bitmap; + mapper_cparms.act_prop = ¶ms.act_prop; + mapper_cparms.class_tid = class_id; + mapper_cparms.act_tid = act_tmpl; + mapper_cparms.func_id = bnxt_get_fw_func_id(dev->data->port_id); + mapper_cparms.dir = params.dir; + + /* Call the ulp mapper to create the flow in the hardware. */ + ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms, &fid); + if (!ret) { + flow_id = (struct rte_flow *)((uintptr_t)fid); + return flow_id; + } + +parse_error: + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + return NULL; +} + +/* Function to validate the rte flow. */ +static int +bnxt_ulp_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct ulp_rte_parser_params params; + uint32_t class_id, act_tmpl; + int ret; + struct bnxt_ulp_context *ulp_ctx; + + if (bnxt_ulp_flow_validate_args(attr, + pattern, actions, + error) == BNXT_TF_RC_ERROR) { + BNXT_TF_DBG(ERR, "Invalid arguments being passed\n"); + return -EINVAL; + } + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + return -EINVAL; + } + + /* Initialize the parser params */ + memset(¶ms, 0, sizeof(struct ulp_rte_parser_params)); + params.ulp_ctx = ulp_ctx; + + if (attr->egress) + params.dir = ULP_DIR_EGRESS; + + /* Parse the rte flow pattern */ + ret = bnxt_ulp_rte_parser_hdr_parse(pattern, ¶ms); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + /* Parse the rte flow action */ + ret = bnxt_ulp_rte_parser_act_parse(actions, ¶ms); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + ret = ulp_matcher_pattern_match(¶ms, &class_id); + + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + ret = ulp_matcher_action_match(¶ms, &act_tmpl); + if (ret != BNXT_TF_RC_SUCCESS) + goto parse_error; + + /* all good return success */ + return ret; + +parse_error: + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to validate flow."); + return -EINVAL; +} + +/* Function to destroy the rte flow. */ +static int +bnxt_ulp_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + struct bnxt_ulp_context *ulp_ctx; + uint32_t flow_id; + uint16_t func_id; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + return -EINVAL; + } + + flow_id = (uint32_t)(uintptr_t)flow; + func_id = bnxt_get_fw_func_id(dev->data->port_id); + + if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) == + false) { + BNXT_TF_DBG(ERR, "Incorrect device params\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + return -EINVAL; + } + + ret = ulp_mapper_flow_destroy(ulp_ctx, flow_id); + if (ret) + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + + return ret; +} + +/* Function to destroy the rte flows. */ +static int32_t +bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev, + struct rte_flow_error *error) +{ + struct bnxt_ulp_context *ulp_ctx; + int32_t ret = 0; + struct bnxt *bp; + uint16_t func_id; + + ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev); + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "ULP context is not initialized\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush flow."); + return -EINVAL; + } + bp = eth_dev->data->dev_private; + + /* Free the resources for the last device */ + if (ulp_ctx_deinit_allowed(bp)) { + ret = ulp_flow_db_session_flow_flush(ulp_ctx); + } else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) { + func_id = bnxt_get_fw_func_id(eth_dev->data->port_id); + ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id); + } + if (ret) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush flow."); + return ret; +} + +const struct rte_flow_ops bnxt_ulp_rte_flow_ops = { + .validate = bnxt_ulp_flow_validate, + .create = bnxt_ulp_flow_create, + .destroy = bnxt_ulp_flow_destroy, + .flush = bnxt_ulp_flow_flush, + .query = NULL, + .isolate = NULL +}; diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c new file mode 100644 index 000000000..35a7f868a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c @@ -0,0 +1,827 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "ulp_flow_db.h" +#include "ulp_utils.h" +#include "ulp_template_struct.h" +#include "ulp_mapper.h" + +#define ULP_FLOW_DB_RES_DIR_BIT 31 +#define ULP_FLOW_DB_RES_DIR_MASK 0x80000000 +#define ULP_FLOW_DB_RES_FUNC_BITS 28 +#define ULP_FLOW_DB_RES_FUNC_MASK 0x70000000 +#define ULP_FLOW_DB_RES_NXT_MASK 0x0FFFFFFF + +/* Macro to copy the nxt_resource_idx */ +#define ULP_FLOW_DB_RES_NXT_SET(dst, src) {(dst) |= ((src) &\ + ULP_FLOW_DB_RES_NXT_MASK); } +#define ULP_FLOW_DB_RES_NXT_RESET(dst) ((dst) &= ~(ULP_FLOW_DB_RES_NXT_MASK)) + +/* + * Helper function to set the bit in the active flow table + * No validation is done in this function. + * + * flow_tbl [in] Ptr to flow table + * idx [in] The index to bit to be set or reset. + * flag [in] 1 to set and 0 to reset. + * + * returns none + */ +static void +ulp_flow_db_active_flow_set(struct bnxt_ulp_flow_tbl *flow_tbl, + uint32_t idx, + uint32_t flag) +{ + uint32_t active_index; + + active_index = idx / ULP_INDEX_BITMAP_SIZE; + if (flag) + ULP_INDEX_BITMAP_SET(flow_tbl->active_flow_tbl[active_index], + idx); + else + ULP_INDEX_BITMAP_RESET(flow_tbl->active_flow_tbl[active_index], + idx); +} + +/* + * Helper function to allocate the flow table and initialize + * is set.No validation being done in this function. + * + * flow_tbl [in] Ptr to flow table + * idx [in] The index to bit to be set or reset. + * + * returns 1 on set or 0 if not set. + */ +static int32_t +ulp_flow_db_active_flow_is_set(struct bnxt_ulp_flow_tbl *flow_tbl, + uint32_t idx) +{ + uint32_t active_index; + + active_index = idx / ULP_INDEX_BITMAP_SIZE; + return ULP_INDEX_BITMAP_GET(flow_tbl->active_flow_tbl[active_index], + idx); +} + +/* + * Helper function to copy the resource params to resource info + * No validation being done in this function. + * + * resource_info [out] Ptr to resource information + * params [in] The input params from the caller + * returns none + */ +static void +ulp_flow_db_res_params_to_info(struct ulp_fdb_resource_info *resource_info, + struct ulp_flow_db_res_params *params) +{ + resource_info->nxt_resource_idx |= ((params->direction << + ULP_FLOW_DB_RES_DIR_BIT) & + ULP_FLOW_DB_RES_DIR_MASK); + resource_info->nxt_resource_idx |= ((params->resource_func << + ULP_FLOW_DB_RES_FUNC_BITS) & + ULP_FLOW_DB_RES_FUNC_MASK); + + if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EM_TABLE) { + resource_info->resource_hndl = (uint32_t)params->resource_hndl; + resource_info->resource_type = params->resource_type; + + } else { + resource_info->resource_em_handle = params->resource_hndl; + } +} + +/* + * Helper function to copy the resource params to resource info + * No validation being done in this function. + * + * resource_info [in] Ptr to resource information + * params [out] The output params to the caller + * + * returns none + */ +static void +ulp_flow_db_res_info_to_params(struct ulp_fdb_resource_info *resource_info, + struct ulp_flow_db_res_params *params) +{ + memset(params, 0, sizeof(struct ulp_flow_db_res_params)); + params->direction = ((resource_info->nxt_resource_idx & + ULP_FLOW_DB_RES_DIR_MASK) >> + ULP_FLOW_DB_RES_DIR_BIT); + params->resource_func = ((resource_info->nxt_resource_idx & + ULP_FLOW_DB_RES_FUNC_MASK) >> + ULP_FLOW_DB_RES_FUNC_BITS); + + if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EM_TABLE) { + params->resource_hndl = resource_info->resource_hndl; + params->resource_type = resource_info->resource_type; + } else { + params->resource_hndl = resource_info->resource_em_handle; + } +} + +/* + * Helper function to allocate the flow table and initialize + * the stack for allocation operations. + * + * flow_db [in] Ptr to flow database structure + * tbl_idx [in] The index to table creation. + * + * Returns 0 on success or negative number on failure. + */ +static int32_t +ulp_flow_db_alloc_resource(struct bnxt_ulp_flow_db *flow_db, + enum bnxt_ulp_flow_db_tables tbl_idx) +{ + uint32_t idx = 0; + struct bnxt_ulp_flow_tbl *flow_tbl; + uint32_t size; + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + size = sizeof(struct ulp_fdb_resource_info) * flow_tbl->num_resources; + flow_tbl->flow_resources = + rte_zmalloc("ulp_fdb_resource_info", size, 0); + + if (!flow_tbl->flow_resources) { + BNXT_TF_DBG(ERR, "Failed to alloc memory for flow table\n"); + return -ENOMEM; + } + size = sizeof(uint32_t) * flow_tbl->num_resources; + flow_tbl->flow_tbl_stack = rte_zmalloc("flow_tbl_stack", size, 0); + if (!flow_tbl->flow_tbl_stack) { + BNXT_TF_DBG(ERR, "Failed to alloc memory flow tbl stack\n"); + return -ENOMEM; + } + size = (flow_tbl->num_flows / sizeof(uint64_t)) + 1; + flow_tbl->active_flow_tbl = rte_zmalloc("active flow tbl", size, 0); + if (!flow_tbl->active_flow_tbl) { + BNXT_TF_DBG(ERR, "Failed to alloc memory active tbl\n"); + return -ENOMEM; + } + + /* Initialize the stack table. */ + for (idx = 0; idx < flow_tbl->num_resources; idx++) + flow_tbl->flow_tbl_stack[idx] = idx; + + /* Ignore the first element in the list. */ + flow_tbl->head_index = 1; + /* Tail points to the last entry in the list. */ + flow_tbl->tail_index = flow_tbl->num_resources - 1; + return 0; +} + +/* + * Helper function to deallocate the flow table. + * + * flow_db [in] Ptr to flow database structure + * tbl_idx [in] The index to table creation. + * + * Returns none. + */ +static void +ulp_flow_db_dealloc_resource(struct bnxt_ulp_flow_db *flow_db, + enum bnxt_ulp_flow_db_tables tbl_idx) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* Free all the allocated tables in the flow table. */ + if (flow_tbl->active_flow_tbl) { + rte_free(flow_tbl->active_flow_tbl); + flow_tbl->active_flow_tbl = NULL; + } + + if (flow_tbl->flow_tbl_stack) { + rte_free(flow_tbl->flow_tbl_stack); + flow_tbl->flow_tbl_stack = NULL; + } + + if (flow_tbl->flow_resources) { + rte_free(flow_tbl->flow_resources); + flow_tbl->flow_resources = NULL; + } +} + +/* + * Helper function to add function id to the flow table + * + * flow_db [in] Ptr to flow table + * flow_id [in] The flow id of the flow + * func_id [in] The func_id to be set, for reset pass zero + * + * returns none + */ +static void +ulp_flow_db_func_id_set(struct bnxt_ulp_flow_db *flow_db, + uint32_t flow_id, + uint32_t func_id) +{ + /* set the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size) + flow_db->func_id_tbl[flow_id] = func_id; + else /* This should never happen */ + BNXT_TF_DBG(ERR, "Invalid flow id, flowdb corrupt\n"); +} + +/* + * Initialize the flow database. Memory is allocated in this + * call and assigned to the flow database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_device_params *dparms; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + uint32_t dev_id; + + /* Get the dev specific number of flows that needed to be supported. */ + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctxt, &dev_id)) { + BNXT_TF_DBG(ERR, "Invalid device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + BNXT_TF_DBG(ERR, "could not fetch the device params\n"); + return -ENODEV; + } + + flow_db = rte_zmalloc("bnxt_ulp_flow_db", + sizeof(struct bnxt_ulp_flow_db), 0); + if (!flow_db) { + BNXT_TF_DBG(ERR, + "Failed to allocate memory for flow table ptr\n"); + return -ENOMEM; + } + + /* Attach the flow database to the ulp context. */ + bnxt_ulp_cntxt_ptr2_flow_db_set(ulp_ctxt, flow_db); + + /* Populate the regular flow table limits. */ + flow_tbl = &flow_db->flow_tbl[BNXT_ULP_REGULAR_FLOW_TABLE]; + flow_tbl->num_flows = dparms->num_flows + 1; + flow_tbl->num_resources = (flow_tbl->num_flows * + dparms->num_resources_per_flow); + + /* Populate the default flow table limits. */ + flow_tbl = &flow_db->flow_tbl[BNXT_ULP_DEFAULT_FLOW_TABLE]; + flow_tbl->num_flows = BNXT_FLOW_DB_DEFAULT_NUM_FLOWS + 1; + flow_tbl->num_resources = (flow_tbl->num_flows * + BNXT_FLOW_DB_DEFAULT_NUM_RESOURCES); + + /* Allocate the resource for the regular flow table. */ + if (ulp_flow_db_alloc_resource(flow_db, BNXT_ULP_REGULAR_FLOW_TABLE)) + goto error_free; + if (ulp_flow_db_alloc_resource(flow_db, BNXT_ULP_DEFAULT_FLOW_TABLE)) + goto error_free; + + /* add 1 since we are not using index 0 for flow id */ + flow_db->func_id_tbl_size = dparms->num_flows + 1; + /* Allocate the function Id table */ + flow_db->func_id_tbl = rte_zmalloc("bnxt_ulp_flow_db_func_id_table", + flow_db->func_id_tbl_size * + sizeof(uint16_t), 0); + if (!flow_db->func_id_tbl) { + BNXT_TF_DBG(ERR, + "Failed to allocate mem for flow table func id\n"); + goto error_free; + } + /* All good so return. */ + return 0; +error_free: + ulp_flow_db_deinit(ulp_ctxt); + return -ENOMEM; +} + +/* + * Deinitialize the flow database. Memory is deallocated in + * this call and all flows should have been purged before this + * call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Detach the flow database from the ulp context. */ + bnxt_ulp_cntxt_ptr2_flow_db_set(ulp_ctxt, NULL); + + /* Free up all the memory. */ + ulp_flow_db_dealloc_resource(flow_db, BNXT_ULP_REGULAR_FLOW_TABLE); + ulp_flow_db_dealloc_resource(flow_db, BNXT_ULP_DEFAULT_FLOW_TABLE); + rte_free(flow_db->func_id_tbl); + rte_free(flow_db); + + return 0; +} + +/* + * Allocate the flow database entry + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [out] The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint16_t func_id, + uint32_t *fid) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + + *fid = 0; /* Initialize fid to invalid value */ + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + /* check for max flows */ + if (flow_tbl->num_flows <= flow_tbl->head_index) { + BNXT_TF_DBG(ERR, "Flow database has reached max flows\n"); + return -ENOMEM; + } + if (flow_tbl->tail_index <= (flow_tbl->head_index + 1)) { + BNXT_TF_DBG(ERR, "Flow database has reached max resources\n"); + return -ENOMEM; + } + *fid = flow_tbl->flow_tbl_stack[flow_tbl->head_index]; + flow_tbl->head_index++; + ulp_flow_db_active_flow_set(flow_tbl, *fid, 1); + + /* The function id update is only valid for regular flow table */ + if (tbl_idx == BNXT_ULP_REGULAR_FLOW_TABLE) + ulp_flow_db_func_id_set(flow_db, *fid, func_id); + + /* all good, return success */ + return 0; +} + +/* + * Allocate the flow database entry. + * The params->critical_resource has to be set to 0 to allocate a new resource. + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * params [in] The contents to be copied into resource + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + struct ulp_flow_db_res_params *params) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct ulp_fdb_resource_info *resource, *fid_resource; + uint32_t idx; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + if (tbl_idx >= BNXT_ULP_FLOW_TABLE_MAX) { + BNXT_TF_DBG(ERR, "Invalid table index\n"); + return -EINVAL; + } + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* check for max flows */ + if (fid >= flow_tbl->num_flows || !fid) { + BNXT_TF_DBG(ERR, "Invalid flow index\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flow_is_set(flow_tbl, fid)) { + BNXT_TF_DBG(ERR, "flow does not exist\n"); + return -EINVAL; + } + + /* check for max resource */ + if ((flow_tbl->head_index + 1) >= flow_tbl->tail_index) { + BNXT_TF_DBG(ERR, "Flow db has reached max resources\n"); + return -ENOMEM; + } + fid_resource = &flow_tbl->flow_resources[fid]; + + if (!params->critical_resource) { + /* Not the critical_resource so allocate a resource */ + idx = flow_tbl->flow_tbl_stack[flow_tbl->tail_index]; + resource = &flow_tbl->flow_resources[idx]; + flow_tbl->tail_index--; + + /* Update the chain list of resource*/ + ULP_FLOW_DB_RES_NXT_SET(resource->nxt_resource_idx, + fid_resource->nxt_resource_idx); + /* update the contents */ + ulp_flow_db_res_params_to_info(resource, params); + ULP_FLOW_DB_RES_NXT_RESET(fid_resource->nxt_resource_idx); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + idx); + } else { + /* critical resource. Just update the fid resource */ + ulp_flow_db_res_params_to_info(fid_resource, params); + } + + /* all good, return success */ + return 0; +} + +/* + * Free the flow database entry. + * The params->critical_resource has to be set to 1 to free the first resource. + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * params [in/out] The contents to be copied into params. + * Onlythe critical_resource needs to be set by the caller. + * + * Returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + struct ulp_flow_db_res_params *params) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct ulp_fdb_resource_info *nxt_resource, *fid_resource; + uint32_t nxt_idx = 0; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + if (tbl_idx >= BNXT_ULP_FLOW_TABLE_MAX) { + BNXT_TF_DBG(ERR, "Invalid table index\n"); + return -EINVAL; + } + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* check for max flows */ + if (fid >= flow_tbl->num_flows || !fid) { + BNXT_TF_DBG(ERR, "Invalid flow index\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flow_is_set(flow_tbl, fid)) { + BNXT_TF_DBG(ERR, "flow does not exist\n"); + return -EINVAL; + } + + fid_resource = &flow_tbl->flow_resources[fid]; + if (!params->critical_resource) { + /* Not the critical resource so free the resource */ + ULP_FLOW_DB_RES_NXT_SET(nxt_idx, + fid_resource->nxt_resource_idx); + if (!nxt_idx) { + /* reached end of resources */ + return -ENOENT; + } + nxt_resource = &flow_tbl->flow_resources[nxt_idx]; + + /* connect the fid resource to the next resource */ + ULP_FLOW_DB_RES_NXT_RESET(fid_resource->nxt_resource_idx); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + nxt_resource->nxt_resource_idx); + + /* update the contents to be given to caller */ + ulp_flow_db_res_info_to_params(nxt_resource, params); + + /* Delete the nxt_resource */ + memset(nxt_resource, 0, sizeof(struct ulp_fdb_resource_info)); + + /* add it to the free list */ + flow_tbl->tail_index++; + if (flow_tbl->tail_index >= flow_tbl->num_resources) { + BNXT_TF_DBG(ERR, "FlowDB:Tail reached max\n"); + return -ENOENT; + } + flow_tbl->flow_tbl_stack[flow_tbl->tail_index] = nxt_idx; + + } else { + /* Critical resource. copy the contents and exit */ + ulp_flow_db_res_info_to_params(fid_resource, params); + ULP_FLOW_DB_RES_NXT_SET(nxt_idx, + fid_resource->nxt_resource_idx); + memset(fid_resource, 0, sizeof(struct ulp_fdb_resource_info)); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + nxt_idx); + } + + /* all good, return success */ + return 0; +} + +/* + * Free the flow database entry + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + if (tbl_idx >= BNXT_ULP_FLOW_TABLE_MAX) { + BNXT_TF_DBG(ERR, "Invalid table index\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* check for limits of fid */ + if (fid >= flow_tbl->num_flows || !fid) { + BNXT_TF_DBG(ERR, "Invalid flow index\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flow_is_set(flow_tbl, fid)) { + BNXT_TF_DBG(ERR, "flow does not exist\n"); + return -EINVAL; + } + flow_tbl->head_index--; + if (!flow_tbl->head_index) { + BNXT_TF_DBG(ERR, "FlowDB: Head Ptr is zero\n"); + return -ENOENT; + } + flow_tbl->flow_tbl_stack[flow_tbl->head_index] = fid; + ulp_flow_db_active_flow_set(flow_tbl, fid, 0); + if (tbl_idx == BNXT_ULP_REGULAR_FLOW_TABLE) + ulp_flow_db_func_id_set(flow_db, fid, 0); + + /* all good, return success */ + return 0; +} + +/* + * Get the flow database entry details + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * nxt_idx [in/out] the index to the next entry + * params [out] The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + uint32_t *nxt_idx, + struct ulp_flow_db_res_params *params) +{ + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct ulp_fdb_resource_info *nxt_resource, *fid_resource; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + if (tbl_idx >= BNXT_ULP_FLOW_TABLE_MAX) { + BNXT_TF_DBG(ERR, "Invalid table index\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl[tbl_idx]; + + /* check for limits of fid */ + if (fid >= flow_tbl->num_flows || !fid) { + BNXT_TF_DBG(ERR, "Invalid flow index\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flow_is_set(flow_tbl, fid)) { + BNXT_TF_DBG(ERR, "flow does not exist\n"); + return -EINVAL; + } + + if (!*nxt_idx) { + fid_resource = &flow_tbl->flow_resources[fid]; + ulp_flow_db_res_info_to_params(fid_resource, params); + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + fid_resource->nxt_resource_idx); + } else { + nxt_resource = &flow_tbl->flow_resources[*nxt_idx]; + ulp_flow_db_res_info_to_params(nxt_resource, params); + *nxt_idx = 0; + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + nxt_resource->nxt_resource_idx); + } + + /* all good, return success */ + return 0; +} + +/* + * Get the flow database entry iteratively + * + * flow_tbl [in] Ptr to flow table + * fid [in/out] The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +static int32_t +ulp_flow_db_next_entry_get(struct bnxt_ulp_flow_tbl *flowtbl, + uint32_t *fid) +{ + uint32_t lfid = *fid; + uint32_t idx, s_idx, mod_fid; + uint64_t bs; + + do { + /* increment the flow id to find the next valid flow id */ + lfid++; + if (lfid >= flowtbl->num_flows) + return -ENOENT; + idx = lfid / ULP_INDEX_BITMAP_SIZE; + mod_fid = lfid % ULP_INDEX_BITMAP_SIZE; + s_idx = idx; + while (!(bs = flowtbl->active_flow_tbl[idx])) { + idx++; + if ((idx * ULP_INDEX_BITMAP_SIZE) >= flowtbl->num_flows) + return -ENOENT; + } + /* + * remove the previous bits in the bitset bs to find the + * next non zero bit in the bitset. This needs to be done + * only if the idx is same as he one you started. + */ + if (s_idx == idx) + bs &= (-1UL >> mod_fid); + lfid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); + if (*fid >= lfid) { + BNXT_TF_DBG(ERR, "Flow Database is corrupt\n"); + return -ENOENT; + } + } while (!ulp_flow_db_active_flow_is_set(flowtbl, lfid)); + + /* all good, return success */ + *fid = lfid; + return 0; +} + +/* + * Flush all flows in the flow database. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, + uint32_t idx) +{ + uint32_t fid = 0; + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "Invalid Argument\n"); + return -EINVAL; + } + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Flow database not found\n"); + return -EINVAL; + } + flow_tbl = &flow_db->flow_tbl[idx]; + while (!ulp_flow_db_next_entry_get(flow_tbl, &fid)) + ulp_mapper_resources_free(ulp_ctx, fid, idx); + + return 0; +} + +/* + * Flush all flows in the flow database that belong to a device function. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + uint16_t func_id) +{ + uint32_t flow_id = 0; + struct bnxt_ulp_flow_db *flow_db; + struct bnxt_ulp_flow_tbl *flow_tbl; + + if (!ulp_ctx || !func_id) { + BNXT_TF_DBG(ERR, "Invalid Argument\n"); + return -EINVAL; + } + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Flow database not found\n"); + return -EINVAL; + } + flow_tbl = &flow_db->flow_tbl[BNXT_ULP_REGULAR_FLOW_TABLE]; + while (!ulp_flow_db_next_entry_get(flow_tbl, &flow_id)) { + if (flow_db->func_id_tbl[flow_id] == func_id) + ulp_mapper_resources_free(ulp_ctx, flow_id, + BNXT_ULP_REGULAR_FLOW_TABLE); + } + + return 0; +} + +/* + * Flush all flows in the flow database that are associated with the session. + * + * ulp_ctxt [in] Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx) +{ + /* + * TBD: Tf core implementation of FW session flush shall change this + * implementation. + */ + return ulp_flow_db_flush_flows(ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE); +} + +/* + * Check that flow id matches the function id or not + * + * ulp_ctxt [in] Ptr to ulp context + * flow_db [in] Ptr to flow table + * func_id [in] The func_id to be set, for reset pass zero. + * + * returns true on success or false on failure + */ +bool +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx, + uint32_t flow_id, + uint32_t func_id) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + BNXT_TF_DBG(ERR, "Flow database not found\n"); + return false; + } + + /* set the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size && func_id && + flow_db->func_id_tbl[flow_id] == func_id) + return true; + + return false; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h new file mode 100644 index 000000000..ebca84947 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_FLOW_DB_H_ +#define _ULP_FLOW_DB_H_ + +#include "bnxt_ulp.h" +#include "ulp_template_db.h" + +#define BNXT_FLOW_DB_DEFAULT_NUM_FLOWS 128 +#define BNXT_FLOW_DB_DEFAULT_NUM_RESOURCES 5 + +/* Structure for the flow database resource information. */ +struct ulp_fdb_resource_info { + /* Points to next resource in the chained list. */ + uint32_t nxt_resource_idx; + union { + uint64_t resource_em_handle; + struct { + uint32_t resource_type; + uint32_t resource_hndl; + }; + }; +}; + +/* Structure for the flow database resource information. */ +struct bnxt_ulp_flow_tbl { + /* Flow tbl is the resource object list for each flow id. */ + struct ulp_fdb_resource_info *flow_resources; + + /* Flow table stack to track free list of resources. */ + uint32_t *flow_tbl_stack; + uint32_t head_index; + uint32_t tail_index; + + /* Table to track the active flows. */ + uint64_t *active_flow_tbl; + uint32_t num_flows; + uint32_t num_resources; +}; + +/* Flow database supports two tables. */ +enum bnxt_ulp_flow_db_tables { + BNXT_ULP_REGULAR_FLOW_TABLE, + BNXT_ULP_DEFAULT_FLOW_TABLE, + BNXT_ULP_FLOW_TABLE_MAX +}; + +/* Structure for the flow database resource information. */ +struct bnxt_ulp_flow_db { + struct bnxt_ulp_flow_tbl flow_tbl[BNXT_ULP_FLOW_TABLE_MAX]; + uint16_t *func_id_tbl; + uint32_t func_id_tbl_size; +}; + +/* flow db resource params to add resources */ +struct ulp_flow_db_res_params { + enum tf_dir direction; + enum bnxt_ulp_resource_func resource_func; + uint64_t resource_hndl; + uint32_t resource_type; + uint32_t critical_resource; +}; + +/* + * Initialize the flow database. Memory is allocated in this + * call and assigned to the flow database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Deinitialize the flow database. Memory is deallocated in + * this call and all flows should have been purged before this + * call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Allocate the flow database entry + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * func_id [in] The function id of the device.Valid only for regular flows. + * fid [out] The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint16_t func_id, + uint32_t *fid); + +/* + * Allocate the flow database entry. + * The params->critical_resource has to be set to 0 to allocate a new resource. + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * params [in] The contents to be copied into resource + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + struct ulp_flow_db_res_params *params); + +/* + * Free the flow database entry. + * The params->critical_resource has to be set to 1 to free the first resource. + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * params [in/out] The contents to be copied into params. + * Only the critical_resource needs to be set by the caller. + * + * Returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + struct ulp_flow_db_res_params *params); + +/* + * Free the flow database entry + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid); + +/* + *Get the flow database entry details + * + * ulp_ctxt [in] Ptr to ulp_context + * tbl_idx [in] Specify it is regular or default flow + * fid [in] The index to the flow entry + * nxt_idx [in/out] the index to the next entry + * params [out] The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int32_t ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_flow_db_tables tbl_idx, + uint32_t fid, + uint32_t *nxt_idx, + struct ulp_flow_db_res_params *params); + +/* + * Flush all flows in the flow database. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, + uint32_t idx); + +/* + * Flush all flows in the flow database that belong to a device function. + * + * ulp_ctxt [in] Ptr to ulp context + * tbl_idx [in] The index to table + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + uint16_t func_id); + +/* + * Flush all flows in the flow database that are associated with the session. + * + * ulp_ctxt [in] Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int32_t +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx); + +/* + * Check that flow id matches the function id or not + * + * ulp_ctxt [in] Ptr to ulp context + * flow_db [in] Ptr to flow table + * func_id [in] The func_id to be set, for reset pass zero. + * + * returns true on success or false on failure + */ +bool +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx, + uint32_t flow_id, + uint32_t func_id); + +#endif /* _ULP_FLOW_DB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.c new file mode 100644 index 000000000..938b88e22 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.c @@ -0,0 +1,2122 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include "bnxt.h" +#include "ulp_template_db.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_common.h" +#include "ulp_utils.h" +#include "bnxt_ulp.h" +#include "tfp.h" +#include "tf_ext_flow_handle.h" +#include "ulp_mark_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" + +static struct bnxt_ulp_def_ident_info * +ulp_mapper_def_ident_info_list_get(uint32_t *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ; + return ulp_def_ident_tbl; +} + +/* + * Read a default identifier from the mapper regfile. + * + * The regval is always returned in big-endian. + * + * returns 0 on success + */ +static int32_t +ulp_mapper_def_regfile_read(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + uint16_t idx, + uint64_t *regval) +{ + if (!mapper_data || !regval || + dir >= TF_DIR_MAX || idx >= BNXT_ULP_DEF_REGFILE_INDEX_LAST) + return -EINVAL; + *regval = mapper_data->dflt_ids[dir][idx].ident; + return 0; +} + +/* + * Write a default identifier to the mapper regfile + * + * The regval value must be in big-endian. + * + * return 0 on success. + */ +static int32_t +ulp_mapper_def_regfile_write(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + uint16_t idx, + uint64_t regval) +{ + if (!mapper_data || dir >= TF_DIR_MAX || + idx >= BNXT_ULP_DEF_REGFILE_INDEX_LAST) + return -EINVAL; + mapper_data->dflt_ids[dir][idx].ident = regval; + return 0; +} + +/* Retrieve the cache initialization parameters for the tbl_idx */ +static struct bnxt_ulp_cache_tbl_params * +ulp_mapper_cache_tbl_params_get(uint32_t tbl_idx) +{ + if (tbl_idx >= BNXT_ULP_CACHE_TBL_MAX_SZ) + return NULL; + + return &ulp_cache_tbl_params[tbl_idx]; +} + +/* + * Get the size of the action property for a given index. + * + * idx [in] The index for the action property + * + * returns the size of the action property. + */ +static uint32_t +ulp_mapper_act_prop_size_get(uint32_t idx) +{ + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) + return 0; + return ulp_act_prop_map_table[idx]; +} + +/* + * Get the list of result fields that implement the flow action. + * Gets a device dependent list of tables that implement the action template id. + * + * dev_id [in] The device id of the forwarding element + * + * tid [in] The action template id that matches the flow + * + * num_tbls [out] The number of action tables in the returned array + * + * Returns An array of action tables to implement the flow, or NULL on error. + */ +static struct bnxt_ulp_mapper_act_tbl_info * +ulp_mapper_action_tbl_list_get(uint32_t dev_id, + uint32_t tid, + uint32_t *num_tbls) +{ + uint32_t idx; + uint32_t tidx; + + if (!num_tbls) { + BNXT_TF_DBG(ERR, "Invalid arguments\n"); + return NULL; + } + + /* template shift and device mask */ + tidx = ULP_DEVICE_PARAMS_INDEX(tid, dev_id); + + /* NOTE: Need to have something from template compiler to help validate + * range of dev_id and act_tid + */ + idx = ulp_act_tmpl_list[tidx].start_tbl_idx; + *num_tbls = ulp_act_tmpl_list[tidx].num_tbls; + + return &ulp_act_tbl_list[idx]; +} + +/** Get a list of classifier tables that implement the flow + * Gets a device dependent list of tables that implement the class template id + * + * dev_id [in] The device id of the forwarding element + * + * tid [in] The template id that matches the flow + * + * num_tbls [out] The number of classifier tables in the returned array + * + * returns An array of classifier tables to implement the flow, or NULL on + * error + */ +static struct bnxt_ulp_mapper_class_tbl_info * +ulp_mapper_class_tbl_list_get(uint32_t dev_id, + uint32_t tid, + uint32_t *num_tbls) +{ + uint32_t idx; + uint32_t tidx = ULP_DEVICE_PARAMS_INDEX(tid, dev_id); + + if (!num_tbls) + return NULL; + + /* NOTE: Need to have something from template compiler to help validate + * range of dev_id and tid + */ + idx = ulp_class_tmpl_list[tidx].start_tbl_idx; + *num_tbls = ulp_class_tmpl_list[tidx].num_tbls; + + return &ulp_class_tbl_list[idx]; +} + +/* + * Get the list of key fields that implement the flow. + * + * ctxt [in] The ulp context + * + * tbl [in] A single table instance to get the key fields from + * + * num_flds [out] The number of key fields in the returned array + * + * Returns array of Key fields, or NULL on error. + */ +static struct bnxt_ulp_mapper_class_key_field_info * +ulp_mapper_key_fields_get(struct bnxt_ulp_mapper_class_tbl_info *tbl, + uint32_t *num_flds) +{ + uint32_t idx; + + if (!tbl || !num_flds) + return NULL; + + idx = tbl->key_start_idx; + *num_flds = tbl->key_num_fields; + + /* NOTE: Need template to provide range checking define */ + return &ulp_class_key_field_list[idx]; +} + +/* + * Get the list of data fields that implement the flow. + * + * ctxt [in] The ulp context + * + * tbl [in] A single table instance to get the data fields from + * + * num_flds [out] The number of data fields in the returned array. + * + * Returns array of data fields, or NULL on error. + */ +static struct bnxt_ulp_mapper_result_field_info * +ulp_mapper_result_fields_get(struct bnxt_ulp_mapper_class_tbl_info *tbl, + uint32_t *num_flds) +{ + uint32_t idx; + + if (!tbl || !num_flds) + return NULL; + + idx = tbl->result_start_idx; + *num_flds = tbl->result_num_fields; + + /* NOTE: Need template to provide range checking define */ + return &ulp_class_result_field_list[idx]; +} + +/* + * Get the list of result fields that implement the flow action. + * + * tbl [in] A single table instance to get the results fields + * from num_flds [out] The number of data fields in the returned + * array. + * + * Returns array of data fields, or NULL on error. + */ +static struct bnxt_ulp_mapper_result_field_info * +ulp_mapper_act_result_fields_get(struct bnxt_ulp_mapper_act_tbl_info *tbl, + uint32_t *num_rslt_flds, + uint32_t *num_encap_flds) +{ + uint32_t idx; + + if (!tbl || !num_rslt_flds || !num_encap_flds) + return NULL; + + idx = tbl->result_start_idx; + *num_rslt_flds = tbl->result_num_fields; + *num_encap_flds = tbl->encap_num_fields; + + /* NOTE: Need template to provide range checking define */ + return &ulp_act_result_field_list[idx]; +} + +/* + * Get the list of ident fields that implement the flow + * + * tbl [in] A single table instance to get the ident fields from + * + * num_flds [out] The number of ident fields in the returned array + * + * returns array of ident fields, or NULL on error + */ +static struct bnxt_ulp_mapper_ident_info * +ulp_mapper_ident_fields_get(struct bnxt_ulp_mapper_class_tbl_info *tbl, + uint32_t *num_flds) +{ + uint32_t idx; + + if (!tbl || !num_flds) + return NULL; + + idx = tbl->ident_start_idx; + *num_flds = tbl->ident_nums; + + /* NOTE: Need template to provide range checking define */ + return &ulp_ident_list[idx]; +} + +static struct bnxt_ulp_mapper_cache_entry * +ulp_mapper_cache_entry_get(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_cache_tbl_id id, + uint16_t key) +{ + struct bnxt_ulp_mapper_data *mapper_data; + + mapper_data = bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp); + if (!mapper_data || !mapper_data->cache_tbl[id]) { + BNXT_TF_DBG(ERR, "Unable to acquire the cache tbl (%d)\n", id); + return NULL; + } + + return &mapper_data->cache_tbl[id][key]; +} + +/* + * Concatenates the tbl_type and tbl_id into a 32bit value for storing in the + * resource_type. This is done to conserve memory since both the tbl_type and + * tbl_id are 16bit. + */ +static inline void +ulp_mapper_cache_res_type_set(struct ulp_flow_db_res_params *res, + uint16_t tbl_type, + uint16_t tbl_id) +{ + res->resource_type = + ((uint32_t)tbl_id << ULP_MAPPER_CACHE_RES_TBL_ID_SHFT) | + ((uint32_t)tbl_type << ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT); +} + +/* Extracts the tbl_type and tbl_id from the 32bit resource type. */ +static inline void +ulp_mapper_cache_res_type_get(struct ulp_flow_db_res_params *res, + uint16_t *tbl_type, + uint16_t *tbl_id) +{ + *tbl_type = (uint16_t)((res->resource_type >> + ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT) & + ULP_MAPPER_CACHE_RES_TBL_MASK); + *tbl_id = (uint16_t)((res->resource_type >> + ULP_MAPPER_CACHE_RES_TBL_ID_SHFT) & + ULP_MAPPER_CACHE_RES_TBL_MASK); +} + +static int32_t +ulp_mapper_cache_entry_free(struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct bnxt_ulp_mapper_cache_entry *cache_entry; + struct tf_free_identifier_parms ident_parms; + struct tf_free_tcam_entry_parms tcam_parms; + uint16_t table_id, table_type; + int32_t rc, trc, i; + + /* + * The table id, used for cache, and table_type, used for tcam, are + * both encoded within the resource. We must first extract them to + * formulate the args for tf calls. + */ + ulp_mapper_cache_res_type_get(res, &table_type, &table_id); + + cache_entry = ulp_mapper_cache_entry_get(ulp, table_id, + (uint16_t)res->resource_hndl); + if (!cache_entry || !cache_entry->ref_count) { + BNXT_TF_DBG(ERR, "Cache entry (%d:%d) not valid on free.\n", + table_id, (uint16_t)res->resource_hndl); + return -EINVAL; + } + + /* + * See if we need to delete the entry. The tcam and identifiers are all + * tracked by the cached entries reference count. All are deleted when + * the reference count hit zero. + */ + cache_entry->ref_count--; + if (cache_entry->ref_count) + return 0; + + /* + * Need to delete the tcam entry and the allocated identifiers. + * In the event of a failure, need to try to delete the remaining + * resources before returning error. + */ + tcam_parms.dir = res->direction; + tcam_parms.tcam_tbl_type = table_type; + tcam_parms.idx = cache_entry->tcam_idx; + rc = tf_free_tcam_entry(tfp, &tcam_parms); + if (rc) + BNXT_TF_DBG(ERR, "Failed to free tcam [%d][%s][0x%04x] rc=%d\n", + table_type, + (res->direction == TF_DIR_RX) ? "RX" : "TX", + tcam_parms.idx, rc); + + /* + * Free the identifiers associated with the tcam entry. Entries with + * negative one are considered uninitialized. + */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM; i++) { + if (cache_entry->idents[i] == ULP_IDENTS_INVALID) + continue; + + ident_parms.dir = res->direction; + ident_parms.ident_type = cache_entry->ident_types[i]; + ident_parms.id = cache_entry->idents[i]; + trc = tf_free_identifier(tfp, &ident_parms); + if (trc) { + BNXT_TF_DBG(ERR, "Failed to free identifier " + "[%d][%s][0x%04x] rc=%d\n", + ident_parms.ident_type, + (res->direction == TF_DIR_RX) ? "RX" : "TX", + ident_parms.id, trc); + rc = trc; + } + } + + return rc; +} + +static inline int32_t +ulp_mapper_tcam_entry_free(struct bnxt_ulp_context *ulp __rte_unused, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_tcam_entry_parms fparms = { + .dir = res->direction, + .tcam_tbl_type = res->resource_type, + .idx = (uint16_t)res->resource_hndl + }; + + return tf_free_tcam_entry(tfp, &fparms); +} + +static inline int32_t +ulp_mapper_index_entry_free(struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_tbl_entry_parms fparms = { + .dir = res->direction, + .type = res->resource_type, + .idx = (uint32_t)res->resource_hndl + }; + + /* + * Just set the table scope, it will be ignored if not necessary + * by the tf_free_tbl_entry + */ + bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id); + + return tf_free_tbl_entry(tfp, &fparms); +} + +static inline int32_t +ulp_mapper_eem_entry_free(struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct tf_delete_em_entry_parms fparms = { 0 }; + int32_t rc; + + fparms.dir = res->direction; + fparms.mem = TF_MEM_EXTERNAL; + fparms.flow_handle = res->resource_hndl; + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to get table scope\n"); + return -EINVAL; + } + + return tf_delete_em_entry(tfp, &fparms); +} + +static inline int32_t +ulp_mapper_ident_free(struct bnxt_ulp_context *ulp __rte_unused, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_identifier_parms fparms = { + .dir = res->direction, + .ident_type = res->resource_type, + .id = (uint16_t)res->resource_hndl + }; + + return tf_free_identifier(tfp, &fparms); +} + +static inline int32_t +ulp_mapper_mark_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + return ulp_mark_db_mark_del(ulp, + res->resource_type, + res->resource_hndl); +} + +/* + * Process the identifier instruction and either store it in the flow database + * or return it in the val (if not NULL) on success. If val is NULL, the + * identifier is to be stored in the flow database. + */ +static int32_t +ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl, + struct bnxt_ulp_mapper_ident_info *ident, + uint16_t *val) +{ + struct ulp_flow_db_res_params fid_parms; + uint64_t id = 0; + int32_t idx; + struct tf_alloc_identifier_parms iparms = { 0 }; + struct tf_free_identifier_parms free_parms = { 0 }; + struct tf *tfp; + int rc; + + tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx); + if (!tfp) { + BNXT_TF_DBG(ERR, "Failed to get tf pointer\n"); + return -EINVAL; + } + + idx = ident->regfile_wr_idx; + + iparms.ident_type = ident->ident_type; + iparms.dir = tbl->direction; + + rc = tf_alloc_identifier(tfp, &iparms); + if (rc) { + BNXT_TF_DBG(ERR, "Alloc ident %s:%d failed.\n", + (iparms.dir == TF_DIR_RX) ? "RX" : "TX", + iparms.ident_type); + return rc; + } + + id = (uint64_t)tfp_cpu_to_be_64(iparms.id); + if (!ulp_regfile_write(parms->regfile, idx, id)) { + BNXT_TF_DBG(ERR, "Regfile[%d] write failed.\n", idx); + rc = -EINVAL; + /* Need to free the identifier, so goto error */ + goto error; + } + + /* Link the resource to the flow in the flow db */ + if (!val) { + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = ident->resource_func; + fid_parms.resource_type = ident->ident_type; + fid_parms.resource_hndl = iparms.id; + fid_parms.critical_resource = 0; + + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } else { + *val = iparms.id; + } + + return 0; + +error: + /* Need to free the identifier */ + free_parms.dir = tbl->direction; + free_parms.ident_type = ident->ident_type; + free_parms.id = iparms.id; + + (void)tf_free_identifier(tfp, &free_parms); + + BNXT_TF_DBG(ERR, "Ident process failed for %s:%s\n", + ident->description, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX"); + return rc; +} + +static int32_t +ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + struct bnxt_ulp_mapper_result_field_info *fld, + struct ulp_blob *blob, + const char *name) +{ + uint16_t idx, size_idx; + uint8_t *val = NULL; + uint64_t regval; + uint32_t val_size = 0, field_size = 0; + + switch (fld->result_opcode) { + case BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT: + val = fld->result_operand; + if (!ulp_blob_push(blob, val, fld->field_bit_size)) { + BNXT_TF_DBG(ERR, "%s failed to add field\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP: + if (!ulp_operand_read(fld->result_operand, + (uint8_t *)&idx, sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + BNXT_TF_DBG(ERR, "%s act_prop[%d] oob\n", name, idx); + return -EINVAL; + } + val = &parms->act_prop->act_details[idx]; + field_size = ulp_mapper_act_prop_size_get(idx); + if (fld->field_bit_size < ULP_BYTE_2_BITS(field_size)) { + field_size = field_size - + ((fld->field_bit_size + 7) / 8); + val += field_size; + } + if (!ulp_blob_push(blob, val, fld->field_bit_size)) { + BNXT_TF_DBG(ERR, "%s push field failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_RESULT_OPC_SET_TO_ENCAP_ACT_PROP_SZ: + if (!ulp_operand_read(fld->result_operand, + (uint8_t *)&idx, sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + BNXT_TF_DBG(ERR, "%s act_prop[%d] oob\n", name, idx); + return -EINVAL; + } + val = &parms->act_prop->act_details[idx]; + + /* get the size index next */ + if (!ulp_operand_read(&fld->result_operand[sizeof(uint16_t)], + (uint8_t *)&size_idx, sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); + return -EINVAL; + } + size_idx = tfp_be_to_cpu_16(size_idx); + + if (size_idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + BNXT_TF_DBG(ERR, "act_prop[%d] oob\n", size_idx); + return -EINVAL; + } + memcpy(&val_size, &parms->act_prop->act_details[size_idx], + sizeof(uint32_t)); + val_size = tfp_be_to_cpu_32(val_size); + val_size = ULP_BYTE_2_BITS(val_size); + ulp_blob_push_encap(blob, val, val_size); + break; + case BNXT_ULP_RESULT_OPC_SET_TO_REGFILE: + if (!ulp_operand_read(fld->result_operand, + (uint8_t *)&idx, sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s operand read failed\n", name); + return -EINVAL; + } + + idx = tfp_be_to_cpu_16(idx); + /* Uninitialized regfile entries return 0 */ + if (!ulp_regfile_read(parms->regfile, idx, ®val)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read oob\n", + name, idx); + return -EINVAL; + } + + val = ulp_blob_push_64(blob, ®val, fld->field_bit_size); + if (!val) { + BNXT_TF_DBG(ERR, "%s push field failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE: + if (!ulp_operand_read(fld->result_operand, + (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + if (ulp_mapper_def_regfile_read(parms->mapper_data, + dir, + idx, ®val)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); + return -EINVAL; + } + val = ulp_blob_push_64(blob, ®val, fld->field_bit_size); + if (!val) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + default: + return -EINVAL; + } + return 0; +} + +/* Function to alloc action record and set the table. */ +static int32_t +ulp_mapper_keymask_field_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + struct bnxt_ulp_mapper_class_key_field_info *f, + struct ulp_blob *blob, + uint8_t is_key, + const char *name) +{ + uint64_t val64; + uint16_t idx, bitlen; + uint32_t opcode; + uint8_t *operand; + struct ulp_regfile *regfile = parms->regfile; + uint8_t *val = NULL; + struct bnxt_ulp_mapper_class_key_field_info *fld = f; + uint32_t field_size; + + if (is_key) { + operand = fld->spec_operand; + opcode = fld->spec_opcode; + } else { + operand = fld->mask_operand; + opcode = fld->mask_opcode; + } + + bitlen = fld->field_bit_size; + + switch (opcode) { + case BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT: + val = operand; + if (!ulp_blob_push(blob, val, bitlen)) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_SPEC_OPC_ADD_PAD: + if (!ulp_blob_pad_push(blob, bitlen)) { + BNXT_TF_DBG(ERR, "%s pad too large for blob\n", name); + return -EINVAL; + } + + break; + case BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD: + if (!ulp_operand_read(operand, (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + if (is_key) + val = parms->hdr_field[idx].spec; + else + val = parms->hdr_field[idx].mask; + + /* + * Need to account for how much data was pushed to the header + * field vs how much is to be inserted in the key/mask. + */ + field_size = parms->hdr_field[idx].size; + if (bitlen < ULP_BYTE_2_BITS(field_size)) { + field_size = field_size - ((bitlen + 7) / 8); + val += field_size; + } + + if (!ulp_blob_push(blob, val, bitlen)) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_SPEC_OPC_SET_TO_REGFILE: + if (!ulp_operand_read(operand, (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + + if (!ulp_regfile_read(regfile, idx, &val64)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); + return -EINVAL; + } + + val = ulp_blob_push_64(blob, &val64, bitlen); + if (!val) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + case BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE: + if (!ulp_operand_read(operand, (uint8_t *)&idx, + sizeof(uint16_t))) { + BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name); + return -EINVAL; + } + idx = tfp_be_to_cpu_16(idx); + if (ulp_mapper_def_regfile_read(parms->mapper_data, + dir, + idx, &val64)) { + BNXT_TF_DBG(ERR, "%s regfile[%d] read failed.\n", + name, idx); + return -EINVAL; + } + val = ulp_blob_push_64(blob, &val64, bitlen); + if (!val) { + BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name); + return -EINVAL; + } + break; + default: + break; + } + + return 0; +} + +/* Function to alloc action record and set the table. */ +static int32_t +ulp_mapper_action_alloc_and_set(struct bnxt_ulp_mapper_parms *parms, + struct ulp_blob *blob) +{ + struct ulp_flow_db_res_params fid_parms; + struct tf_alloc_tbl_entry_parms alloc_parms = { 0 }; + struct tf_free_tbl_entry_parms free_parms = { 0 }; + struct bnxt_ulp_mapper_act_tbl_info *atbls = parms->atbls; + int32_t rc = 0; + int32_t trc; + uint64_t idx; + uint32_t tbl_scope_id; + + bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx, &tbl_scope_id); + + /* Set the allocation parameters for the table*/ + alloc_parms.dir = atbls->direction; + alloc_parms.type = atbls->table_type; + alloc_parms.search_enable = atbls->srch_b4_alloc; + alloc_parms.result = ulp_blob_data_get(blob, + &alloc_parms.result_sz_in_bytes); + alloc_parms.tbl_scope_id = tbl_scope_id; + if (!alloc_parms.result) { + BNXT_TF_DBG(ERR, "blob is not populated\n"); + return -EINVAL; + } + + rc = tf_alloc_tbl_entry(parms->tfp, &alloc_parms); + if (rc) { + BNXT_TF_DBG(ERR, "table type= [%d] dir = [%s] alloc failed\n", + alloc_parms.type, + (alloc_parms.dir == TF_DIR_RX) ? "RX" : "TX"); + return rc; + } + + /* Need to calculate the idx for the result record */ + uint64_t tmpidx = alloc_parms.idx; + + if (atbls->table_type == TF_TBL_TYPE_EXT) + tmpidx = TF_ACT_REC_OFFSET_2_PTR(alloc_parms.idx); + else + tmpidx = alloc_parms.idx; + + idx = tfp_cpu_to_be_64(tmpidx); + + /* Store the allocated index for future use in the regfile */ + rc = ulp_regfile_write(parms->regfile, atbls->regfile_wr_idx, idx); + if (!rc) { + BNXT_TF_DBG(ERR, "regfile[%d] write failed\n", + atbls->regfile_wr_idx); + rc = -EINVAL; + goto error; + } + + /* + * The set_tbl_entry API if search is not enabled or searched entry + * is not found. + */ + if (!atbls->srch_b4_alloc || !alloc_parms.hit) { + struct tf_set_tbl_entry_parms set_parm = { 0 }; + uint16_t length; + + set_parm.dir = atbls->direction; + set_parm.type = atbls->table_type; + set_parm.idx = alloc_parms.idx; + set_parm.data = ulp_blob_data_get(blob, &length); + set_parm.data_sz_in_bytes = length / 8; + + if (set_parm.type == TF_TBL_TYPE_EXT) + set_parm.tbl_scope_id = tbl_scope_id; + + /* set the table entry */ + rc = tf_set_tbl_entry(parms->tfp, &set_parm); + if (rc) { + BNXT_TF_DBG(ERR, "table[%d][%s][%d] set failed\n", + set_parm.type, + (set_parm.dir == TF_DIR_RX) ? "RX" : "TX", + set_parm.idx); + goto error; + } + } + + /* Link the resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = atbls->direction; + fid_parms.resource_func = atbls->resource_func; + fid_parms.resource_type = atbls->table_type; + fid_parms.resource_hndl = alloc_parms.idx; + fid_parms.critical_resource = 0; + + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to link resource to flow rc = %d\n", + rc); + rc = -EINVAL; + goto error; + } + + return 0; +error: + + free_parms.dir = alloc_parms.dir; + free_parms.type = alloc_parms.type; + free_parms.idx = alloc_parms.idx; + + trc = tf_free_tbl_entry(parms->tfp, &free_parms); + if (trc) + BNXT_TF_DBG(ERR, "Failed to free table entry on failure\n"); + + return rc; +} + +/* + * Function to process the action Info. Iterate through the list + * action info templates and process it. + */ +static int32_t +ulp_mapper_action_info_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_act_tbl_info *tbl) +{ + struct ulp_blob blob; + struct bnxt_ulp_mapper_result_field_info *flds, *fld; + uint32_t num_flds = 0; + uint32_t encap_flds = 0; + uint32_t i; + int32_t rc; + uint16_t bit_size; + + if (!tbl || !parms->act_prop || !parms->act_bitmap || !parms->regfile) + return -EINVAL; + + /* use the max size if encap is enabled */ + if (tbl->encap_num_fields) + bit_size = BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + else + bit_size = tbl->result_bit_size; + if (!ulp_blob_init(&blob, bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "action blob init failed\n"); + return -EINVAL; + } + + flds = ulp_mapper_act_result_fields_get(tbl, &num_flds, &encap_flds); + if (!flds || !num_flds) { + BNXT_TF_DBG(ERR, "Template undefined for action\n"); + return -EINVAL; + } + + for (i = 0; i < (num_flds + encap_flds); i++) { + fld = &flds[i]; + rc = ulp_mapper_result_field_process(parms, + tbl->direction, + fld, + &blob, + "Action"); + if (rc) { + BNXT_TF_DBG(ERR, "Action field failed\n"); + return rc; + } + /* set the swap index if 64 bit swap is enabled */ + if (parms->encap_byte_swap && encap_flds) { + if ((i + 1) == num_flds) + ulp_blob_encap_swap_idx_set(&blob); + /* if 64 bit swap is enabled perform the 64bit swap */ + if ((i + 1) == (num_flds + encap_flds)) + ulp_blob_perform_encap_swap(&blob); + } + } + + rc = ulp_mapper_action_alloc_and_set(parms, &blob); + return rc; +} + +static int32_t +ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_class_key_field_info *kflds; + struct ulp_blob key, mask, data; + uint32_t i, num_kflds; + struct tf *tfp; + int32_t rc, trc; + struct tf_alloc_tcam_entry_parms aparms = { 0 }; + struct tf_set_tcam_entry_parms sparms = { 0 }; + struct ulp_flow_db_res_params fid_parms = { 0 }; + struct tf_free_tcam_entry_parms free_parms = { 0 }; + uint32_t hit = 0; + uint16_t tmplen = 0; + + /* Skip this if was handled by the cache. */ + if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP) { + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + return 0; + } + + tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx); + if (!tfp) { + BNXT_TF_DBG(ERR, "Failed to get truflow pointer\n"); + return -EINVAL; + } + + kflds = ulp_mapper_key_fields_get(tbl, &num_kflds); + if (!kflds || !num_kflds) { + BNXT_TF_DBG(ERR, "Failed to get key fields\n"); + return -EINVAL; + } + + if (!ulp_blob_init(&key, tbl->key_bit_size, parms->order) || + !ulp_blob_init(&mask, tbl->key_bit_size, parms->order) || + !ulp_blob_init(&data, tbl->result_bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "blob inits failed.\n"); + return -EINVAL; + } + + /* create the key/mask */ + /* + * NOTE: The WC table will require some kind of flag to handle the + * mode bits within the key/mask + */ + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], + &key, 1, "TCAM Key"); + if (rc) { + BNXT_TF_DBG(ERR, "Key field set failed.\n"); + return rc; + } + + /* Setup the mask */ + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], + &mask, 0, "TCAM Mask"); + if (rc) { + BNXT_TF_DBG(ERR, "Mask field set failed.\n"); + return rc; + } + } + + aparms.dir = tbl->direction; + aparms.tcam_tbl_type = tbl->table_type; + aparms.search_enable = tbl->srch_b4_alloc; + aparms.key_sz_in_bits = tbl->key_bit_size; + aparms.key = ulp_blob_data_get(&key, &tmplen); + if (tbl->key_bit_size != tmplen) { + BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n", + tmplen, tbl->key_bit_size); + return -EINVAL; + } + + aparms.mask = ulp_blob_data_get(&mask, &tmplen); + if (tbl->key_bit_size != tmplen) { + BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n", + tmplen, tbl->key_bit_size); + return -EINVAL; + } + + aparms.priority = tbl->priority; + + /* + * All failures after this succeeds require the entry to be freed. + * cannot return directly on failure, but needs to goto error + */ + rc = tf_alloc_tcam_entry(tfp, &aparms); + if (rc) { + BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc); + return rc; + } + + hit = aparms.hit; + + /* Build the result */ + if (!tbl->srch_b4_alloc || !hit) { + struct bnxt_ulp_mapper_result_field_info *dflds; + struct bnxt_ulp_mapper_ident_info *idents; + uint32_t num_dflds, num_idents; + + /* + * Since the cache entry is responsible for allocating + * identifiers when in use, allocate the identifiers only + * during normal processing. + */ + if (parms->tcam_tbl_opc == + BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) { + idents = ulp_mapper_ident_fields_get(tbl, &num_idents); + + for (i = 0; i < num_idents; i++) { + rc = ulp_mapper_ident_process(parms, tbl, + &idents[i], NULL); + /* Already logged the error, just return */ + if (rc) + goto error; + } + } + + /* Create the result data blob */ + dflds = ulp_mapper_result_fields_get(tbl, &num_dflds); + if (!dflds || !num_dflds) { + BNXT_TF_DBG(ERR, "Failed to get data fields.\n"); + rc = -EINVAL; + goto error; + } + + for (i = 0; i < num_dflds; i++) { + rc = ulp_mapper_result_field_process(parms, + tbl->direction, + &dflds[i], + &data, + "TCAM Result"); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to set data fields\n"); + goto error; + } + } + + sparms.dir = aparms.dir; + sparms.tcam_tbl_type = aparms.tcam_tbl_type; + sparms.idx = aparms.idx; + /* Already verified the key/mask lengths */ + sparms.key = ulp_blob_data_get(&key, &tmplen); + sparms.mask = ulp_blob_data_get(&mask, &tmplen); + sparms.key_sz_in_bits = tbl->key_bit_size; + sparms.result = ulp_blob_data_get(&data, &tmplen); + + if (tbl->result_bit_size != tmplen) { + BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n", + tmplen, tbl->result_bit_size); + rc = -EINVAL; + goto error; + } + sparms.result_sz_in_bits = tbl->result_bit_size; + + rc = tf_set_tcam_entry(tfp, &sparms); + if (rc) { + BNXT_TF_DBG(ERR, "tcam[%d][%s][%d] write failed.\n", + sparms.tcam_tbl_type, + (sparms.dir == TF_DIR_RX) ? "RX" : "TX", + sparms.idx); + goto error; + } + + /* Update cache with TCAM index if the was cache allocated. */ + if (parms->tcam_tbl_opc == + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) { + if (!parms->cache_ptr) { + BNXT_TF_DBG(ERR, "Unable to update cache"); + rc = -EINVAL; + goto error; + } + parms->cache_ptr->tcam_idx = aparms.idx; + } + + } else { + BNXT_TF_DBG(ERR, "Not supporting search before alloc now\n"); + rc = -EINVAL; + goto error; + } + + /* + * Only link the entry to the flow db in the event that cache was not + * used. + */ + if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) { + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->table_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = aparms.idx; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to link resource to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } else { + /* + * Reset the tcam table opcode to normal in case the next tcam + * entry does not use cache. + */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + parms->cache_ptr = NULL; + } + + return 0; +error: + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + free_parms.dir = tbl->direction; + free_parms.tcam_tbl_type = tbl->table_type; + free_parms.idx = aparms.idx; + trc = tf_free_tcam_entry(tfp, &free_parms); + if (trc) + BNXT_TF_DBG(ERR, "Failed to free tcam[%d][%d][%d] on failure\n", + tbl->table_type, tbl->direction, aparms.idx); + + return rc; +} + +static int32_t +ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_class_key_field_info *kflds; + struct bnxt_ulp_mapper_result_field_info *dflds; + struct ulp_blob key, data; + uint32_t i, num_kflds, num_dflds; + uint16_t tmplen; + struct tf *tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx); + struct ulp_rte_act_prop *a_prop = parms->act_prop; + struct ulp_flow_db_res_params fid_parms = { 0 }; + struct tf_insert_em_entry_parms iparms = { 0 }; + struct tf_delete_em_entry_parms free_parms = { 0 }; + int32_t trc; + int32_t rc = 0; + + kflds = ulp_mapper_key_fields_get(tbl, &num_kflds); + if (!kflds || !num_kflds) { + BNXT_TF_DBG(ERR, "Failed to get key fields\n"); + return -EINVAL; + } + + /* Initialize the key/result blobs */ + if (!ulp_blob_init(&key, tbl->blob_key_bit_size, parms->order) || + !ulp_blob_init(&data, tbl->result_bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "blob inits failed.\n"); + return -EINVAL; + } + + /* create the key */ + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], + &key, 1, "EM Key"); + if (rc) { + BNXT_TF_DBG(ERR, "Key field set failed.\n"); + return rc; + } + } + + /* + * TBD: Normally should process identifiers in case of using recycle or + * loopback. Not supporting recycle for now. + */ + + /* Create the result data blob */ + dflds = ulp_mapper_result_fields_get(tbl, &num_dflds); + if (!dflds || !num_dflds) { + BNXT_TF_DBG(ERR, "Failed to get data fields.\n"); + return -EINVAL; + } + + for (i = 0; i < num_dflds; i++) { + struct bnxt_ulp_mapper_result_field_info *fld; + + fld = &dflds[i]; + + rc = ulp_mapper_result_field_process(parms, + tbl->direction, + fld, + &data, + "EM Result"); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to set data fields.\n"); + return rc; + } + } + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx, + &iparms.tbl_scope_id); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to get table scope rc=%d\n", rc); + return rc; + } + + /* + * NOTE: the actual blob size will differ from the size in the tbl + * entry due to the padding. + */ + iparms.dup_check = 0; + iparms.dir = tbl->direction; + iparms.mem = tbl->mem; + iparms.key = ulp_blob_data_get(&key, &tmplen); + iparms.key_sz_in_bits = tbl->key_bit_size; + iparms.em_record = ulp_blob_data_get(&data, &tmplen); + iparms.em_record_sz_in_bits = tbl->result_bit_size; + + rc = tf_insert_em_entry(tfp, &iparms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to insert em entry rc=%d.\n", rc); + return rc; + } + + if (tbl->mark_enable && + ULP_BITMAP_ISSET(parms->act_bitmap->bits, + BNXT_ULP_ACTION_BIT_MARK)) { + uint32_t val, mark, gfid, flag; + /* TBD: Need to determine if GFID is enabled globally */ + if (sizeof(val) != BNXT_ULP_ACT_PROP_SZ_MARK) { + BNXT_TF_DBG(ERR, "Mark size (%d) != expected (%zu)\n", + BNXT_ULP_ACT_PROP_SZ_MARK, sizeof(val)); + rc = -EINVAL; + goto error; + } + + memcpy(&val, + &a_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK], + sizeof(val)); + + mark = tfp_be_to_cpu_32(val); + + TF_GET_GFID_FROM_FLOW_ID(iparms.flow_id, gfid); + flag = BNXT_ULP_MARK_GLOBAL_HW_FID; + rc = ulp_mark_db_mark_add(parms->ulp_ctx, + flag, + gfid, + mark); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to add mark to flow\n"); + goto error; + } + + /* + * Link the mark resource to the flow in the flow db + * The mark is never the critical resource, so it is 0. + */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_HW_FID; + fid_parms.resource_type = flag; + fid_parms.resource_hndl = gfid; + fid_parms.critical_resource = 0; + + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Fail to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } + + /* Link the EM resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->table_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = iparms.flow_handle; + + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Fail to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + + return 0; +error: + free_parms.dir = iparms.dir; + free_parms.mem = iparms.mem; + free_parms.tbl_scope_id = iparms.tbl_scope_id; + free_parms.flow_handle = iparms.flow_handle; + + trc = tf_delete_em_entry(tfp, &free_parms); + if (trc) + BNXT_TF_DBG(ERR, "Failed to delete EM entry on failed add\n"); + + return rc; +} + +static int32_t +ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_result_field_info *flds; + struct ulp_flow_db_res_params fid_parms; + struct ulp_blob data; + uint64_t idx; + uint16_t tmplen; + uint32_t i, num_flds; + int32_t rc = 0, trc = 0; + struct tf_alloc_tbl_entry_parms aparms = { 0 }; + struct tf_set_tbl_entry_parms sparms = { 0 }; + struct tf_free_tbl_entry_parms free_parms = { 0 }; + uint32_t tbl_scope_id; + struct tf *tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx); + + bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx, &tbl_scope_id); + + if (!ulp_blob_init(&data, tbl->result_bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "Failed initial index table blob\n"); + return -EINVAL; + } + + flds = ulp_mapper_result_fields_get(tbl, &num_flds); + if (!flds || !num_flds) { + BNXT_TF_DBG(ERR, "Template undefined for action\n"); + return -EINVAL; + } + + for (i = 0; i < num_flds; i++) { + rc = ulp_mapper_result_field_process(parms, + tbl->direction, + &flds[i], + &data, + "Indexed Result"); + if (rc) { + BNXT_TF_DBG(ERR, "data field failed\n"); + return rc; + } + } + + aparms.dir = tbl->direction; + aparms.type = tbl->table_type; + aparms.search_enable = tbl->srch_b4_alloc; + aparms.result = ulp_blob_data_get(&data, &tmplen); + aparms.result_sz_in_bytes = ULP_SZ_BITS2BYTES(tbl->result_bit_size); + aparms.tbl_scope_id = tbl_scope_id; + + /* All failures after the alloc succeeds require a free */ + rc = tf_alloc_tbl_entry(tfp, &aparms); + if (rc) { + BNXT_TF_DBG(ERR, "Alloc table[%d][%s] failed rc=%d\n", + tbl->table_type, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", + rc); + return rc; + } + + /* Always storing values in Regfile in BE */ + idx = tfp_cpu_to_be_64(aparms.idx); + rc = ulp_regfile_write(parms->regfile, tbl->regfile_wr_idx, idx); + if (!rc) { + BNXT_TF_DBG(ERR, "Write regfile[%d] failed\n", + tbl->regfile_wr_idx); + goto error; + } + + if (!tbl->srch_b4_alloc) { + sparms.dir = tbl->direction; + sparms.type = tbl->table_type; + sparms.data = ulp_blob_data_get(&data, &tmplen); + sparms.data_sz_in_bytes = + ULP_SZ_BITS2BYTES(tbl->result_bit_size); + sparms.idx = aparms.idx; + sparms.tbl_scope_id = tbl_scope_id; + + rc = tf_set_tbl_entry(tfp, &sparms); + if (rc) { + BNXT_TF_DBG(ERR, "Set table[%d][%s][%d] failed rc=%d\n", + tbl->table_type, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", + sparms.idx, + rc); + + goto error; + } + } + + /* Link the resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->table_type; + fid_parms.resource_hndl = aparms.idx; + fid_parms.critical_resource = 0; + + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to link resource to flow rc = %d\n", + rc); + goto error; + } + + return rc; +error: + /* + * Free the allocated resource since we failed to either + * write to the entry or link the flow + */ + free_parms.dir = tbl->direction; + free_parms.type = tbl->table_type; + free_parms.idx = aparms.idx; + free_parms.tbl_scope_id = tbl_scope_id; + + trc = tf_free_tbl_entry(tfp, &free_parms); + if (trc) + BNXT_TF_DBG(ERR, "Failed to free tbl entry on failure\n"); + + return rc; +} + +static int32_t +ulp_mapper_cache_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_class_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_class_key_field_info *kflds; + struct bnxt_ulp_mapper_cache_entry *cache_entry; + struct bnxt_ulp_mapper_ident_info *idents; + uint32_t i, num_kflds = 0, num_idents = 0; + struct ulp_flow_db_res_params fid_parms; + struct tf_free_identifier_parms fparms; + uint16_t tmplen, tmp_ident; + struct ulp_blob key; + uint8_t *cache_key; + uint64_t regval; + uint16_t *ckey; + int32_t rc; + + /* Get the key fields list and build the key. */ + kflds = ulp_mapper_key_fields_get(tbl, &num_kflds); + if (!kflds || !num_kflds) { + BNXT_TF_DBG(ERR, "Failed to get key fields\n"); + return -EINVAL; + } + if (!ulp_blob_init(&key, tbl->key_bit_size, parms->order)) { + BNXT_TF_DBG(ERR, "Failed to alloc blob\n"); + return -EINVAL; + } + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_keymask_field_process(parms, tbl->direction, + &kflds[i], + &key, 1, "Cache Key"); + if (rc) { + BNXT_TF_DBG(ERR, + "Failed to create key for Cache rc=%d\n", + rc); + return -EINVAL; + } + } + + /* + * Perform the lookup in the cache table with constructed key. The + * cache_key is a byte array of tmplen, it needs to be converted to a + * index for the cache table. + */ + cache_key = ulp_blob_data_get(&key, &tmplen); + ckey = (uint16_t *)cache_key; + cache_entry = ulp_mapper_cache_entry_get(parms->ulp_ctx, + tbl->cache_tbl_id, + *ckey); + + /* + * Get the identifier list for processing by both the hit and miss + * processing. + */ + idents = ulp_mapper_ident_fields_get(tbl, &num_idents); + + if (!cache_entry->ref_count) { + /* Initialize the cache entry */ + cache_entry->tcam_idx = 0; + cache_entry->ref_count = 0; + for (i = 0; i < BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM; i++) + cache_entry->idents[i] = ULP_IDENTS_INVALID; + + /* Need to allocate identifiers for storing in the cache. */ + for (i = 0; i < num_idents; i++) { + /* + * Since we are using the cache, the identifier does not + * get added to the flow db. Pass in the pointer to the + * tmp_ident. + */ + rc = ulp_mapper_ident_process(parms, tbl, + &idents[i], &tmp_ident); + if (rc) + goto error; + + cache_entry->ident_types[i] = idents[i].ident_type; + cache_entry->idents[i] = tmp_ident; + } + + /* Tell the TCAM processor to alloc an entry */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC; + /* Store the cache key for use by the tcam process code */ + parms->cache_ptr = cache_entry; + } else { + /* Cache hit, get values from result. */ + for (i = 0; i < num_idents; i++) { + regval = (uint64_t)cache_entry->idents[i]; + if (!ulp_regfile_write(parms->regfile, + idents[i].regfile_wr_idx, + tfp_cpu_to_be_64(regval))) { + BNXT_TF_DBG(ERR, + "Failed to write to regfile\n"); + return -EINVAL; + } + } + /* + * The cached entry is being used, so let the tcam processing + * know not to process this table. + */ + parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP; + } + + /* Made through the cache processing, increment the reference count. */ + cache_entry->ref_count++; + + /* Link the cache to the flow db. */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + + /* + * Cache resource type is composed of both table_type and cache_tbl_id + * need to set it appropriately via setter. + */ + ulp_mapper_cache_res_type_set(&fid_parms, + tbl->table_type, + tbl->cache_tbl_id); + fid_parms.resource_hndl = (uint64_t)*ckey; + fid_parms.critical_resource = tbl->critical_resource; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + parms->tbl_idx, + parms->fid, + &fid_parms); + if (rc) + BNXT_TF_DBG(ERR, "Failed to add cache to flow db.\n"); + + return rc; +error: + /* + * This error handling only gets called when the idents are being + * allocated for the cache on misses. Using the num_idents that was + * previously set. + */ + for (i = 0; i < num_idents; i++) { + if (cache_entry->idents[i] == ULP_IDENTS_INVALID) + continue; + + fparms.dir = tbl->direction; + fparms.ident_type = idents[i].ident_type; + fparms.id = cache_entry->idents[i]; + tf_free_identifier(parms->tfp, &fparms); + } + + return rc; +} + +/* + * Function to process the action template. Iterate through the list + * action info templates and process it. + */ +static int32_t +ulp_mapper_action_tbls_process(struct bnxt_ulp_mapper_parms *parms) +{ + uint32_t i; + int32_t rc = 0; + + if (!parms->atbls || !parms->num_atbls) { + BNXT_TF_DBG(ERR, "No action tables for template[%d][%d].\n", + parms->dev_id, parms->act_tid); + return -EINVAL; + } + + for (i = 0; i < parms->num_atbls; i++) { + rc = ulp_mapper_action_info_process(parms, &parms->atbls[i]); + if (rc) + return rc; + } + + return rc; +} + +/* Create the classifier table entries for a flow. */ +static int32_t +ulp_mapper_class_tbls_process(struct bnxt_ulp_mapper_parms *parms) +{ + uint32_t i; + int32_t rc = 0; + + if (!parms) + return -EINVAL; + + if (!parms->ctbls || !parms->num_ctbls) { + BNXT_TF_DBG(ERR, "No class tables for template[%d][%d].\n", + parms->dev_id, parms->class_tid); + return -EINVAL; + } + + for (i = 0; i < parms->num_ctbls; i++) { + struct bnxt_ulp_mapper_class_tbl_info *tbl = &parms->ctbls[i]; + + switch (tbl->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + rc = ulp_mapper_tcam_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + rc = ulp_mapper_em_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = ulp_mapper_index_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE: + rc = ulp_mapper_cache_tbl_process(parms, tbl); + break; + default: + BNXT_TF_DBG(ERR, "Unexpected class resource %d\n", + tbl->resource_func); + return -EINVAL; + } + + if (rc) { + BNXT_TF_DBG(ERR, "Resource type %d failed\n", + tbl->resource_func); + return rc; + } + } + + return rc; +} + +static int32_t +ulp_mapper_resource_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + struct tf *tfp; + int32_t rc = 0; + + if (!res || !ulp) { + BNXT_TF_DBG(ERR, "Unable to free resource\n "); + return -EINVAL; + } + + tfp = bnxt_ulp_cntxt_tfp_get(ulp); + if (!tfp) { + BNXT_TF_DBG(ERR, "Unable to free resource failed to get tfp\n"); + return -EINVAL; + } + + switch (res->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE: + rc = ulp_mapper_cache_entry_free(ulp, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + rc = ulp_mapper_tcam_entry_free(ulp, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + rc = ulp_mapper_eem_entry_free(ulp, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = ulp_mapper_index_entry_free(ulp, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + rc = ulp_mapper_ident_free(ulp, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_HW_FID: + rc = ulp_mapper_mark_free(ulp, res); + break; + default: + break; + } + + return rc; +} + +int32_t +ulp_mapper_resources_free(struct bnxt_ulp_context *ulp_ctx, + uint32_t fid, + enum bnxt_ulp_flow_db_tables tbl_type) +{ + struct ulp_flow_db_res_params res_parms = { 0 }; + int32_t rc, trc; + + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "Invalid parms, unable to free flow\n"); + return -EINVAL; + } + + /* + * Set the critical resource on the first resource del, then iterate + * while status is good + */ + res_parms.critical_resource = 1; + rc = ulp_flow_db_resource_del(ulp_ctx, tbl_type, fid, &res_parms); + + if (rc) { + /* + * This is unexpected on the first call to resource del. + * It likely means that the flow did not exist in the flow db. + */ + BNXT_TF_DBG(ERR, "Flow[%d][0x%08x] failed to free (rc=%d)\n", + tbl_type, fid, rc); + return rc; + } + + while (!rc) { + trc = ulp_mapper_resource_free(ulp_ctx, &res_parms); + if (trc) + /* + * On fail, we still need to attempt to free the + * remaining resources. Don't return + */ + BNXT_TF_DBG(ERR, + "Flow[%d][0x%x] Res[%d][0x%016" PRIx64 + "] failed rc=%d.\n", + tbl_type, fid, res_parms.resource_func, + res_parms.resource_hndl, trc); + + /* All subsequent call require the critical_resource be zero */ + res_parms.critical_resource = 0; + + rc = ulp_flow_db_resource_del(ulp_ctx, + tbl_type, + fid, + &res_parms); + } + + /* Free the Flow ID since we've removed all resources */ + rc = ulp_flow_db_fid_free(ulp_ctx, tbl_type, fid); + + return rc; +} + +int32_t +ulp_mapper_flow_destroy(struct bnxt_ulp_context *ulp_ctx, uint32_t fid) +{ + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, "Invalid parms, unable to free flow\n"); + return -EINVAL; + } + + return ulp_mapper_resources_free(ulp_ctx, + fid, + BNXT_ULP_REGULAR_FLOW_TABLE); +} + +/* Function to handle the mapping of the Flow to be compatible + * with the underlying hardware. + */ +int32_t +ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_create_parms *cparms, + uint32_t *flowid) +{ + struct bnxt_ulp_device_params *device_params; + struct bnxt_ulp_mapper_parms parms; + struct ulp_regfile regfile; + int32_t rc, trc; + + if (!ulp_ctx || !cparms) + return -EINVAL; + + /* Initialize the parms structure */ + memset(&parms, 0, sizeof(parms)); + parms.act_prop = cparms->act_prop; + parms.act_bitmap = cparms->act; + parms.regfile = ®file; + parms.hdr_field = cparms->hdr_field; + parms.tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + parms.ulp_ctx = ulp_ctx; + parms.tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL; + + /* Get the device id from the ulp context */ + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &parms.dev_id)) { + BNXT_TF_DBG(ERR, "Invalid ulp context\n"); + return -EINVAL; + } + + /* + * Get the mapper data for dynamic mapper data such as default + * ids. + */ + parms.mapper_data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!parms.mapper_data) { + BNXT_TF_DBG(ERR, "Failed to get the ulp mapper data\n"); + return -EINVAL; + } + + /* Get the action table entry from device id and act context id */ + parms.act_tid = cparms->act_tid; + parms.atbls = ulp_mapper_action_tbl_list_get(parms.dev_id, + parms.act_tid, + &parms.num_atbls); + if (!parms.atbls || !parms.num_atbls) { + BNXT_TF_DBG(ERR, "No action tables for %d:%d\n", + parms.dev_id, parms.act_tid); + return -EINVAL; + } + + /* Get the class table entry from device id and act context id */ + parms.class_tid = cparms->class_tid; + parms.ctbls = ulp_mapper_class_tbl_list_get(parms.dev_id, + parms.class_tid, + &parms.num_ctbls); + if (!parms.ctbls || !parms.num_ctbls) { + BNXT_TF_DBG(ERR, "No class tables for %d:%d\n", + parms.dev_id, parms.class_tid); + return -EINVAL; + } + + /* Get the byte order for the further processing from device params */ + device_params = bnxt_ulp_device_params_get(parms.dev_id); + if (!device_params) { + BNXT_TF_DBG(ERR, "No class tables for %d:%d\n", + parms.dev_id, parms.class_tid); + return -EINVAL; + } + parms.order = device_params->byte_order; + parms.encap_byte_swap = device_params->encap_byte_swap; + + /* initialize the registry file for further processing */ + if (!ulp_regfile_init(parms.regfile)) { + BNXT_TF_DBG(ERR, "regfile initialization failed.\n"); + return -EINVAL; + } + + rc = ulp_regfile_write(parms.regfile, + BNXT_ULP_REGFILE_INDEX_CLASS_TID, + tfp_cpu_to_be_64((uint64_t)parms.class_tid)); + if (!rc) { + BNXT_TF_DBG(ERR, "Unable to write template ID to regfile\n"); + return -EINVAL; + } + + /* Allocate a Flow ID for attaching all resources for the flow to. + * Once allocated, all errors have to walk the list of resources and + * free each of them. + */ + rc = ulp_flow_db_fid_alloc(ulp_ctx, + BNXT_ULP_REGULAR_FLOW_TABLE, + cparms->func_id, + &parms.fid); + if (rc) { + BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n"); + return rc; + } + + /* Process the action template list from the selected action table*/ + rc = ulp_mapper_action_tbls_process(&parms); + if (rc) { + BNXT_TF_DBG(ERR, "action tables failed creation for %d:%d\n", + parms.dev_id, parms.act_tid); + goto flow_error; + } + + /* All good. Now process the class template */ + rc = ulp_mapper_class_tbls_process(&parms); + if (rc) { + BNXT_TF_DBG(ERR, "class tables failed creation for %d:%d\n", + parms.dev_id, parms.class_tid); + goto flow_error; + } + + *flowid = parms.fid; + + return rc; + +flow_error: + /* Free all resources that were allocated during flow creation */ + trc = ulp_mapper_flow_destroy(ulp_ctx, parms.fid); + if (trc) + BNXT_TF_DBG(ERR, "Failed to free all resources rc=%d\n", trc); + + return rc; +} + +int32_t +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_cache_tbl_params *tbl; + struct tf_alloc_identifier_parms iparms; + struct bnxt_ulp_mapper_data *data; + struct bnxt_ulp_def_ident_info *dflt_ids; + uint32_t i, num_dflt_ids, reg_idx; + uint64_t regval; + struct tf *tfp; + int32_t rc, csize; + + if (!ulp_ctx) + return -EINVAL; + + tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + if (!tfp) + return -EINVAL; + + data = rte_zmalloc("ulp_mapper_data", + sizeof(struct bnxt_ulp_mapper_data), 0); + if (!data) { + BNXT_TF_DBG(ERR, "Failed to allocate the mapper data\n"); + return -ENOMEM; + } + + if (bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, data)) { + BNXT_TF_DBG(ERR, "Failed to set mapper data in context\n"); + /* Don't call deinit since the prof_func wasn't allocated. */ + rte_free(data); + return -ENOMEM; + } + + /* Allocate the default ids. */ + dflt_ids = ulp_mapper_def_ident_info_list_get(&num_dflt_ids); + for (i = 0; i < num_dflt_ids; i++) { + iparms.ident_type = dflt_ids[i].ident_type; + iparms.dir = dflt_ids[i].direction; + + rc = tf_alloc_identifier(tfp, &iparms); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to alloc dflt " + "identifier [%s][%d]\n", + (iparms.dir == TF_DIR_RX) ? "RX" : "TX", + iparms.ident_type); + goto error; + } + reg_idx = dflt_ids[i].def_regfile_index; + /* All regfile entries are stored as 64bit big-endian values. */ + regval = tfp_cpu_to_be_64((uint64_t)iparms.id); + rc = ulp_mapper_def_regfile_write(data, iparms.dir, + reg_idx, regval); + if (rc) { + BNXT_TF_DBG(ERR, "Failed to write to default " + "regfile.\n"); + goto error; + } + } + + /* Allocate the ulp cache tables. */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_MAX_SZ; i++) { + tbl = ulp_mapper_cache_tbl_params_get(i); + if (!tbl) { + BNXT_TF_DBG(ERR, "Failed to get cache table parms (%d)", + i); + goto error; + } + if (tbl->num_entries != 0) { + csize = sizeof(struct bnxt_ulp_mapper_cache_entry) * + tbl->num_entries; + data->cache_tbl[i] = rte_zmalloc("ulp mapper cache tbl", + csize, 0); + if (!data->cache_tbl[i]) { + BNXT_TF_DBG(ERR, "Failed to allocate Cache " + "table %d.\n", i); + rc = -ENOMEM; + goto error; + } + } + } + + return 0; +error: + /* Ignore the return code in favor of returning the original error. */ + ulp_mapper_deinit(ulp_ctx); + return rc; +} + +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct tf_free_identifier_parms free_parms; + struct bnxt_ulp_def_ident_info *dflt_ids; + struct bnxt_ulp_mapper_data *data; + uint32_t i, num_dflt_ids, reg_idx; + enum tf_dir dir; + uint64_t regval; + struct tf *tfp; + + if (!ulp_ctx) { + BNXT_TF_DBG(ERR, + "Failed to acquire ulp context, so data may " + "not be released.\n"); + return; + } + + data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!data) { + /* Go ahead and return since there is no allocated data. */ + BNXT_TF_DBG(ERR, "No data appears to have been allocated.\n"); + return; + } + + tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx); + if (!tfp) { + BNXT_TF_DBG(ERR, "Failed to acquire tfp.\n"); + /* Free the mapper data regardless of errors. */ + goto free_mapper_data; + } + + /* Free the default prof func ids per direction. */ + dflt_ids = ulp_mapper_def_ident_info_list_get(&num_dflt_ids); + for (i = 0; i < num_dflt_ids; i++) { + reg_idx = dflt_ids[i].def_regfile_index; + dir = dflt_ids[i].direction; + free_parms.ident_type = dflt_ids[i].ident_type; + free_parms.dir = dir; + if (ulp_mapper_def_regfile_read(data, dir, reg_idx, ®val)) { + BNXT_TF_DBG(ERR, "Failed to read def regfile to free " + "identifier.\n"); + continue; + } + /* + * All regfile entries are stored as 64bit big-endian. Need + * to convert the value to cpu before calling tf. + */ + regval = tfp_be_to_cpu_64(regval); + free_parms.id = (uint16_t)regval; + /* Ignore errors and free the remaining identifiers. */ + tf_free_identifier(tfp, &free_parms); + } + +free_mapper_data: + /* Free the ulp cache tables */ + for (i = 0; i < BNXT_ULP_CACHE_TBL_MAX_SZ; i++) { + rte_free(data->cache_tbl[i]); + data->cache_tbl[i] = NULL; + } + + rte_free(data); + /* Reset the data pointer within the ulp_ctx. */ + bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, NULL); +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.h new file mode 100644 index 000000000..162d869c8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mapper.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_MAPPER_H_ +#define _ULP_MAPPER_H_ + +#include +#include +#include +#include "tf_core.h" +#include "ulp_template_db.h" +#include "ulp_template_struct.h" +#include "bnxt_ulp.h" +#include "ulp_utils.h" + +#define ULP_SZ_BITS2BYTES(x) (((x) + 7) / 8) +#define ULP_IDENTS_INVALID ((uint16_t)0xffff) +#define ULP_MAPPER_CACHE_RES_TBL_ID_SHFT 16 +#define ULP_MAPPER_CACHE_RES_TBL_TYPE_SHFT 0 +#define ULP_MAPPER_CACHE_RES_TBL_MASK ((uint32_t)0x0000ffff) + +/* + * The cache table opcode is used to convey informat from the cache handler + * to the tcam handler. The opcodes do the following: + * NORMAL - tcam should process all instructions as normal + * SKIP - tcam is using the cached entry and doesn't need to process the + * instruction. + * ALLOC - tcam needs to allocate the tcam index and store in the cache entry + */ +enum bnxt_ulp_cache_table_opc { + BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL, + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP, + BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC +}; + +struct bnxt_ulp_mapper_cache_entry { + uint32_t ref_count; + uint16_t tcam_idx; + uint16_t idents[BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM]; + uint8_t ident_types[BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM]; +}; + +struct bnxt_ulp_mapper_def_id_entry { + enum tf_identifier_type ident_type; + uint64_t ident; +}; + +struct bnxt_ulp_mapper_data { + struct bnxt_ulp_mapper_def_id_entry + dflt_ids[TF_DIR_MAX][BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ]; + struct bnxt_ulp_mapper_cache_entry + *cache_tbl[BNXT_ULP_CACHE_TBL_MAX_SZ]; +}; + +/* Internal Structure for passing the arguments around */ +struct bnxt_ulp_mapper_parms { + uint32_t dev_id; + enum bnxt_ulp_byte_order order; + uint32_t act_tid; + struct bnxt_ulp_mapper_act_tbl_info *atbls; + uint32_t num_atbls; + uint32_t class_tid; + struct bnxt_ulp_mapper_class_tbl_info *ctbls; + uint32_t num_ctbls; + struct ulp_rte_act_prop *act_prop; + struct ulp_rte_act_bitmap *act_bitmap; + struct ulp_rte_hdr_field *hdr_field; + struct ulp_regfile *regfile; + struct tf *tfp; + struct bnxt_ulp_context *ulp_ctx; + uint8_t encap_byte_swap; + uint32_t fid; + enum bnxt_ulp_flow_db_tables tbl_idx; + struct bnxt_ulp_mapper_data *mapper_data; + enum bnxt_ulp_cache_table_opc tcam_tbl_opc; + struct bnxt_ulp_mapper_cache_entry *cache_ptr; +}; + +struct bnxt_ulp_mapper_create_parms { + uint32_t app_priority; + struct ulp_rte_hdr_bitmap *hdr_bitmap; + struct ulp_rte_hdr_field *hdr_field; + struct ulp_rte_act_bitmap *act; + struct ulp_rte_act_prop *act_prop; + uint32_t class_tid; + uint32_t act_tid; + uint16_t func_id; + enum ulp_direction_type dir; +}; + +/* Function to initialize any dynamic mapper data. */ +int32_t +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx); + +/* Function to release all dynamic mapper data. */ +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx); + +/* + * Function to handle the mapping of the Flow to be compatible + * with the underlying hardware. + */ +int32_t +ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_create_parms *parms, + uint32_t *flowid); + +/* Function that frees all resources associated with the flow. */ +int32_t +ulp_mapper_flow_destroy(struct bnxt_ulp_context *ulp_ctx, uint32_t fid); + +/* + * Function that frees all resources and can be called on default or regular + * flows + */ +int32_t +ulp_mapper_resources_free(struct bnxt_ulp_context *ulp_ctx, + uint32_t fid, + enum bnxt_ulp_flow_db_tables tbl_type); + +#endif /* _ULP_MAPPER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.c new file mode 100644 index 000000000..9e8b81e4c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.c @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt.h" +#include "bnxt_ulp.h" +#include "tf_ext_flow_handle.h" +#include "ulp_mark_mgr.h" +#include "bnxt_tf_common.h" +#include "ulp_template_db.h" +#include "ulp_template_struct.h" + +#define ULP_MARK_DB_ENTRY_SET_VALID(mark_info) ((mark_info)->flags |=\ + BNXT_ULP_MARK_VALID) +#define ULP_MARK_DB_ENTRY_IS_INVALID(mark_info) (!((mark_info)->flags &\ + BNXT_ULP_MARK_VALID)) +#define ULP_MARK_DB_ENTRY_IS_GLOBAL_HW_FID(mark_info) ((mark_info)->flags &\ + BNXT_ULP_MARK_GLOBAL_HW_FID) + +static inline uint32_t +ulp_mark_db_idx_get(bool is_gfid, uint32_t fid, struct bnxt_ulp_mark_tbl *mtbl) +{ + uint32_t idx = 0, hashtype = 0; + + if (is_gfid) { + TF_GET_HASH_TYPE_FROM_GFID(fid, hashtype); + TF_GET_HASH_INDEX_FROM_GFID(fid, idx); + + /* Need to truncate anything beyond supported flows */ + idx &= mtbl->gfid_mask; + if (hashtype) + idx |= mtbl->gfid_type_bit; + } else { + idx = fid; + } + return idx; +} + +/* + * Allocate and Initialize all Mark Manager resources for this ulp context. + * + * ctxt [in] The ulp context for the mark manager. + * + */ +int32_t +ulp_mark_db_init(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_device_params *dparms; + struct bnxt_ulp_mark_tbl *mark_tbl = NULL; + uint32_t dev_id; + + if (!ctxt) { + BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n"); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { + BNXT_TF_DBG(DEBUG, "Failed to get device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + BNXT_TF_DBG(DEBUG, "Failed to device parms\n"); + return -EINVAL; + } + + mark_tbl = rte_zmalloc("ulp_rx_mark_tbl_ptr", + sizeof(struct bnxt_ulp_mark_tbl), 0); + if (!mark_tbl) + goto mem_error; + + /* Need to allocate 2 * Num flows to account for hash type bit.*/ + mark_tbl->lfid_num_entries = dparms->lfid_entries; + mark_tbl->lfid_tbl = rte_zmalloc("ulp_rx_em_flow_mark_table", + mark_tbl->lfid_num_entries * + sizeof(struct bnxt_lfid_mark_info), + 0); + if (!mark_tbl->lfid_tbl) + goto mem_error; + + /* Need to allocate 2 * Num flows to account for hash type bit */ + mark_tbl->gfid_num_entries = dparms->gfid_entries; + mark_tbl->gfid_tbl = rte_zmalloc("ulp_rx_eem_flow_mark_table", + mark_tbl->gfid_num_entries * + sizeof(struct bnxt_gfid_mark_info), + 0); + if (!mark_tbl->gfid_tbl) + goto mem_error; + + /* + * These values are used to compress the FID to the allowable index + * space. The FID from hw may be the full hash which may be a big + * value to allocate and so allocate only needed hash values. + * gfid mask is the number of flow entries for the each left/right + * hash The gfid type bit is used to get to the higher or lower hash + * entries. + */ + mark_tbl->gfid_mask = (mark_tbl->gfid_num_entries / 2) - 1; + mark_tbl->gfid_type_bit = (mark_tbl->gfid_num_entries / 2); + + BNXT_TF_DBG(DEBUG, "GFID Max = 0x%08x\nGFID MASK = 0x%08x\n", + mark_tbl->gfid_num_entries - 1, + mark_tbl->gfid_mask); + + /* Add the mark tbl to the ulp context. */ + bnxt_ulp_cntxt_ptr2_mark_db_set(ctxt, mark_tbl); + return 0; + +mem_error: + rte_free(mark_tbl->gfid_tbl); + rte_free(mark_tbl->lfid_tbl); + rte_free(mark_tbl); + BNXT_TF_DBG(DEBUG, "Failed to allocate memory for mark mgr\n"); + return -ENOMEM; +} + +/* + * Release all resources in the Mark Manager for this ulp context + * + * ctxt [in] The ulp context for the mark manager + * + */ +int32_t +ulp_mark_db_deinit(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_mark_tbl *mtbl; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + + if (mtbl) { + rte_free(mtbl->gfid_tbl); + rte_free(mtbl->lfid_tbl); + rte_free(mtbl); + + /* Safe to ignore on deinit */ + (void)bnxt_ulp_cntxt_ptr2_mark_db_set(ctxt, NULL); + } + + return 0; +} + +/* + * Get a Mark from the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * is_gfid [in] The type of fid (GFID or LFID) + * + * fid [in] The flow id that is returned by HW in BD + * + * mark [out] The mark that is associated with the FID + * + */ +int32_t +ulp_mark_db_mark_get(struct bnxt_ulp_context *ctxt, + bool is_gfid, + uint32_t fid, + uint32_t *mark) +{ + struct bnxt_ulp_mark_tbl *mtbl; + uint32_t idx = 0; + + if (!ctxt || !mark) + return -EINVAL; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) { + BNXT_TF_DBG(ERR, "Unable to get Mark Table\n"); + return -EINVAL; + } + + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + + if (is_gfid) { + if (idx >= mtbl->gfid_num_entries || + ULP_MARK_DB_ENTRY_IS_INVALID(&mtbl->gfid_tbl[idx])) + return -EINVAL; + + BNXT_TF_DBG(DEBUG, "Get GFID[0x%0x] = 0x%0x\n", + idx, mtbl->gfid_tbl[idx].mark_id); + + *mark = mtbl->gfid_tbl[idx].mark_id; + } else { + if (idx >= mtbl->lfid_num_entries || + ULP_MARK_DB_ENTRY_IS_INVALID(&mtbl->lfid_tbl[idx])) + return -EINVAL; + + BNXT_TF_DBG(DEBUG, "Get LFID[0x%0x] = 0x%0x\n", + idx, mtbl->lfid_tbl[idx].mark_id); + + *mark = mtbl->lfid_tbl[idx].mark_id; + } + + return 0; +} + +/* + * Adds a Mark to the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * mark_flag [in] mark flags. + * + * fid [in] The flow id that is returned by HW in BD + * + * mark [in] The mark to be associated with the FID + * + */ +int32_t +ulp_mark_db_mark_add(struct bnxt_ulp_context *ctxt, + uint32_t mark_flag, + uint32_t fid, + uint32_t mark) +{ + struct bnxt_ulp_mark_tbl *mtbl; + uint32_t idx = 0; + bool is_gfid; + + if (!ctxt) { + BNXT_TF_DBG(ERR, "Invalid ulp context\n"); + return -EINVAL; + } + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) { + BNXT_TF_DBG(ERR, "Unable to get Mark DB\n"); + return -EINVAL; + } + + is_gfid = (mark_flag & BNXT_ULP_MARK_GLOBAL_HW_FID); + if (is_gfid) { + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + if (idx >= mtbl->gfid_num_entries) { + BNXT_TF_DBG(ERR, "Mark index greater than allocated\n"); + return -EINVAL; + } + BNXT_TF_DBG(DEBUG, "Set GFID[0x%0x] = 0x%0x\n", idx, mark); + mtbl->gfid_tbl[idx].mark_id = mark; + ULP_MARK_DB_ENTRY_SET_VALID(&mtbl->gfid_tbl[idx]); + + } else { + /* For the LFID, the FID is used as the index */ + if (fid >= mtbl->lfid_num_entries) { + BNXT_TF_DBG(ERR, "Mark index greater than allocated\n"); + return -EINVAL; + } + mtbl->lfid_tbl[fid].mark_id = mark; + ULP_MARK_DB_ENTRY_SET_VALID(&mtbl->lfid_tbl[fid]); + } + + return 0; +} + +/* + * Removes a Mark from the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * mark_flag [in] mark flags. + * + * fid [in] The flow id that is returned by HW in BD + * + */ +int32_t +ulp_mark_db_mark_del(struct bnxt_ulp_context *ctxt, + uint32_t mark_flag, + uint32_t fid) +{ + struct bnxt_ulp_mark_tbl *mtbl; + uint32_t idx = 0; + bool is_gfid; + + if (!ctxt) { + BNXT_TF_DBG(ERR, "Invalid ulp context\n"); + return -EINVAL; + } + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) { + BNXT_TF_DBG(ERR, "Unable to get Mark DB\n"); + return -EINVAL; + } + + is_gfid = (mark_flag & BNXT_ULP_MARK_GLOBAL_HW_FID); + if (is_gfid) { + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + if (idx >= mtbl->gfid_num_entries) { + BNXT_TF_DBG(ERR, "Mark index greater than allocated\n"); + return -EINVAL; + } + BNXT_TF_DBG(DEBUG, "Reset GFID[0x%0x]\n", idx); + memset(&mtbl->gfid_tbl[idx], 0, + sizeof(struct bnxt_gfid_mark_info)); + + } else { + /* For the LFID, the FID is used as the index */ + if (fid >= mtbl->lfid_num_entries) { + BNXT_TF_DBG(ERR, "Mark index greater than allocated\n"); + return -EINVAL; + } + memset(&mtbl->lfid_tbl[fid], 0, + sizeof(struct bnxt_lfid_mark_info)); + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.h new file mode 100644 index 000000000..fd0d84011 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_mark_mgr.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_MARK_MGR_H_ +#define _ULP_MARK_MGR_H_ + +#include "bnxt_ulp.h" + +#define BNXT_ULP_MARK_VALID 0x1 +#define BNXT_ULP_MARK_GLOBAL_HW_FID 0x4 +#define BNXT_ULP_MARK_LOCAL_HW_FID 0x8 + +struct bnxt_lfid_mark_info { + uint16_t mark_id; + uint16_t flags; +}; + +struct bnxt_gfid_mark_info { + uint32_t mark_id; + uint16_t flags; +}; + +struct bnxt_ulp_mark_tbl { + struct bnxt_lfid_mark_info *lfid_tbl; + struct bnxt_gfid_mark_info *gfid_tbl; + uint32_t lfid_num_entries; + uint32_t gfid_num_entries; + uint32_t gfid_mask; + uint32_t gfid_type_bit; +}; + +/* + * Allocate and Initialize all Mark Manager resources for this ulp context. + * + * Initialize MARK database for GFID & LFID tables + * GFID: Global flow id which is based on EEM hash id. + * LFID: Local flow id which is the CFA action pointer. + * GFID is used for EEM flows, LFID is used for EM flows. + * + * Flow mapper modules adds mark_id in the MARK database. + * + * BNXT PMD receive handler extracts the hardware flow id from the + * received completion record. Fetches mark_id from the MARK + * database using the flow id. Injects mark_id into the packet's mbuf. + * + * ctxt [in] The ulp context for the mark manager. + */ +int32_t +ulp_mark_db_init(struct bnxt_ulp_context *ctxt); + +/* + * Release all resources in the Mark Manager for this ulp context + * + * ctxt [in] The ulp context for the mark manager + */ +int32_t +ulp_mark_db_deinit(struct bnxt_ulp_context *ctxt); + +/* + * Get a Mark from the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * is_gfid [in] The type of fid (GFID or LFID) + * + * fid [in] The flow id that is returned by HW in BD + * + * mark [out] The mark that is associated with the FID + * + */ +int32_t +ulp_mark_db_mark_get(struct bnxt_ulp_context *ctxt, + bool is_gfid, + uint32_t fid, + uint32_t *mark); + +/* + * Adds a Mark to the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * mark_flag [in] mark flags. + * + * fid [in] The flow id that is returned by HW in BD + * + * mark [in] The mark to be associated with the FID + * + */ +int32_t +ulp_mark_db_mark_add(struct bnxt_ulp_context *ctxt, + uint32_t mark_flag, + uint32_t gfid, + uint32_t mark); + +/* + * Removes a Mark from the Mark Manager + * + * ctxt [in] The ulp context for the mark manager + * + * mark_flag [in] mark flags + * + * fid [in] The flow id that is returned by HW in BD + * + */ +int32_t +ulp_mark_db_mark_del(struct bnxt_ulp_context *ctxt, + uint32_t mark_flag, + uint32_t gfid); + +#endif /* _ULP_MARK_MGR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.c new file mode 100644 index 000000000..e5f23ef27 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.c @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include "ulp_matcher.h" +#include "ulp_utils.h" + +/* Utility function to calculate the class matcher hash */ +static uint32_t +ulp_matcher_class_hash_calculate(uint64_t hi_sig, uint64_t lo_sig) +{ + uint64_t hash; + + hi_sig |= ((hi_sig % BNXT_ULP_CLASS_HID_HIGH_PRIME) << + BNXT_ULP_CLASS_HID_SHFTL); + lo_sig |= ((lo_sig % BNXT_ULP_CLASS_HID_LOW_PRIME) << + (BNXT_ULP_CLASS_HID_SHFTL + 2)); + hash = hi_sig ^ lo_sig; + hash = (hash >> BNXT_ULP_CLASS_HID_SHFTR) & BNXT_ULP_CLASS_HID_MASK; + return (uint32_t)hash; +} + +/* Utility function to calculate the action matcher hash */ +static uint32_t +ulp_matcher_action_hash_calculate(uint64_t hi_sig) +{ + uint64_t hash; + + hi_sig |= ((hi_sig % BNXT_ULP_ACT_HID_HIGH_PRIME) << + BNXT_ULP_ACT_HID_SHFTL); + hash = hi_sig; + hash = (hash >> BNXT_ULP_ACT_HID_SHFTR) & BNXT_ULP_ACT_HID_MASK; + return (uint32_t)hash; +} + +/* Utility function to mask the computed and internal proto headers. */ +static void +ulp_matcher_hdr_fields_normalize(struct ulp_rte_hdr_bitmap *hdr1, + struct ulp_rte_hdr_bitmap *hdr2) +{ + /* copy the contents first */ + rte_memcpy(hdr2, hdr1, sizeof(struct ulp_rte_hdr_bitmap)); + + /* reset the computed fields */ + ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_SVIF); + ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_OO_VLAN); + ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_OI_VLAN); + ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_IO_VLAN); + ULP_BITMAP_RESET(hdr2->bits, BNXT_ULP_HDR_BIT_II_VLAN); +} + +/* + * Function to handle the matching of RTE Flows and validating + * the pattern masks against the flow templates. + */ +int32_t +ulp_matcher_pattern_match(struct ulp_rte_parser_params *params, + uint32_t *class_id) +{ + struct ulp_rte_hdr_bitmap hdr_bitmap_masked; + struct bnxt_ulp_class_match_info *class_match; + uint32_t class_hid; + uint8_t vf_to_vf; + uint16_t tmpl_id; + + /* Remove the hdr bit maps that are internal or computed */ + ulp_matcher_hdr_fields_normalize(¶ms->hdr_bitmap, + &hdr_bitmap_masked); + + /* determine vf to vf flow */ + if (params->dir == ULP_DIR_EGRESS && + ULP_BITMAP_ISSET(params->act_bitmap.bits, + BNXT_ULP_ACTION_BIT_VNIC)) { + vf_to_vf = 1; + } else { + vf_to_vf = 0; + } + + /* calculate the hash of the given flow */ + class_hid = ulp_matcher_class_hash_calculate(hdr_bitmap_masked.bits, + params->fld_bitmap.bits); + + /* validate the calculate hash values */ + if (class_hid >= BNXT_ULP_CLASS_SIG_TBL_MAX_SZ) + goto error; + tmpl_id = ulp_class_sig_tbl[class_hid]; + if (!tmpl_id) + goto error; + + class_match = &ulp_class_match_list[tmpl_id]; + if (ULP_BITMAP_CMP(&hdr_bitmap_masked, &class_match->hdr_sig)) { + BNXT_TF_DBG(DEBUG, "Proto Header does not match\n"); + goto error; + } + if (ULP_BITMAP_CMP(¶ms->fld_bitmap, &class_match->field_sig)) { + BNXT_TF_DBG(DEBUG, "Field signature does not match\n"); + goto error; + } + if (vf_to_vf != class_match->act_vnic) { + BNXT_TF_DBG(DEBUG, "Vnic Match failed\n"); + goto error; + } + BNXT_TF_DBG(DEBUG, "Found matching pattern template %d\n", + class_match->class_tid); + *class_id = class_match->class_tid; + return BNXT_TF_RC_SUCCESS; + +error: + BNXT_TF_DBG(DEBUG, "Did not find any matching template\n"); + *class_id = 0; + return BNXT_TF_RC_ERROR; +} + +/* + * Function to handle the matching of RTE Flows and validating + * the action against the flow templates. + */ +int32_t +ulp_matcher_action_match(struct ulp_rte_parser_params *params, + uint32_t *act_id) +{ + uint32_t act_hid; + uint16_t tmpl_id; + struct bnxt_ulp_act_match_info *act_match; + + /* calculate the hash of the given flow action */ + act_hid = ulp_matcher_action_hash_calculate(params->act_bitmap.bits); + + /* validate the calculate hash values */ + if (act_hid >= BNXT_ULP_ACT_SIG_TBL_MAX_SZ) + goto error; + tmpl_id = ulp_act_sig_tbl[act_hid]; + if (!tmpl_id) + goto error; + + act_match = &ulp_act_match_list[tmpl_id]; + if (ULP_BITMAP_CMP(¶ms->act_bitmap, &act_match->act_sig)) { + BNXT_TF_DBG(DEBUG, "Action Header does not match\n"); + goto error; + } + *act_id = act_match->act_tid; + BNXT_TF_DBG(DEBUG, "Found matching action template %u\n", *act_id); + return BNXT_TF_RC_SUCCESS; + +error: + BNXT_TF_DBG(DEBUG, "Did not find any matching action template\n"); + *act_id = 0; + return BNXT_TF_RC_ERROR; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.h new file mode 100644 index 000000000..fc197830f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_matcher.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#ifndef ULP_MATCHER_H_ +#define ULP_MATCHER_H_ + +#include +#include "bnxt.h" +#include "ulp_template_db.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_common.h" + +/* + * Function to handle the matching of RTE Flows and validating + * the pattern masks against the flow templates. + */ +int32_t +ulp_matcher_pattern_match(struct ulp_rte_parser_params *params, + uint32_t *class_id); + +/* + * Function to handle the matching of RTE Flows and validating + * the action against the flow templates. + */ +int32_t +ulp_matcher_action_match(struct ulp_rte_parser_params *params, + uint32_t *act_id); + +#endif /* ULP_MATCHER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.c new file mode 100644 index 000000000..e3b924289 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt.h" +#include "bnxt_vnic.h" +#include "bnxt_tf_common.h" +#include "ulp_port_db.h" + +static uint32_t +ulp_port_db_allocate_ifindex(struct bnxt_ulp_port_db *port_db) +{ + uint32_t idx = 1; + + while (idx < port_db->ulp_intf_list_size && + port_db->ulp_intf_list[idx].type != BNXT_ULP_INTF_TYPE_INVALID) + idx++; + + if (idx >= port_db->ulp_intf_list_size) { + BNXT_TF_DBG(ERR, "Port DB interface list is full\n"); + return 0; + } + return idx; +} + +/* + * Initialize the port database. Memory is allocated in this + * call and assigned to the port database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = rte_zmalloc("bnxt_ulp_port_db", + sizeof(struct bnxt_ulp_port_db), 0); + if (!port_db) { + BNXT_TF_DBG(ERR, + "Failed to allocate memory for port db\n"); + return -ENOMEM; + } + + /* Attach the port database to the ulp context. */ + bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, port_db); + + /* index 0 is not being used hence add 1 to size */ + port_db->ulp_intf_list_size = BNXT_PORT_DB_MAX_INTF_LIST + 1; + /* Allocate the port tables */ + port_db->ulp_intf_list = rte_zmalloc("bnxt_ulp_port_db_intf_list", + port_db->ulp_intf_list_size * + sizeof(struct ulp_interface_info), + 0); + if (!port_db->ulp_intf_list) { + BNXT_TF_DBG(ERR, + "Failed to allocate mem for port interface list\n"); + goto error_free; + } + return 0; + +error_free: + ulp_port_db_deinit(ulp_ctxt); + return -ENOMEM; +} + +/* + * Deinitialize the port database. Memory is deallocated in + * this call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Detach the flow database from the ulp context. */ + bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, NULL); + + /* Free up all the memory. */ + rte_free(port_db->ulp_intf_list); + rte_free(port_db); + return 0; +} + +/* + * Update the port database.This api is called when the port + * details are available during the startup. + * + * ulp_ctxt [in] Ptr to ulp context + * bp [in]. ptr to the device function. + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp) +{ + struct bnxt_ulp_port_db *port_db; + uint32_t port_id = bp->eth_dev->data->port_id; + uint32_t ifindex; + struct ulp_interface_info *intf; + int32_t rc; + struct bnxt_vnic_info *vnic; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + + rc = ulp_port_db_dev_port_to_ulp_index(ulp_ctxt, port_id, &ifindex); + if (rc == -ENOENT) { + /* port not found, allocate one */ + ifindex = ulp_port_db_allocate_ifindex(port_db); + if (!ifindex) + return -ENOMEM; + port_db->dev_port_list[port_id] = ifindex; + } else if (rc == -EINVAL) { + return -EINVAL; + } + + /* update the interface details */ + intf = &port_db->ulp_intf_list[ifindex]; + if (BNXT_PF(bp) || BNXT_VF(bp)) { + if (BNXT_PF(bp)) { + intf->type = BNXT_ULP_INTF_TYPE_PF; + intf->port_svif = bp->port_svif; + } else { + intf->type = BNXT_ULP_INTF_TYPE_VF; + } + intf->func_id = bp->fw_fid; + intf->func_svif = bp->func_svif; + vnic = BNXT_GET_DEFAULT_VNIC(bp); + if (vnic) + intf->default_vnic = vnic->fw_vnic_id; + intf->bp = bp; + memcpy(intf->mac_addr, bp->mac_addr, sizeof(intf->mac_addr)); + } else { + BNXT_TF_DBG(ERR, "Invalid interface type\n"); + } + + return 0; +} + +/* + * Api to get the ulp ifindex for a given device port. + * + * ulp_ctxt [in] Ptr to ulp context + * port_id [in].device port id + * ifindex [out] ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + uint32_t port_id, + uint32_t *ifindex) +{ + struct bnxt_ulp_port_db *port_db; + + *ifindex = 0; + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= RTE_MAX_ETHPORTS) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + if (!port_db->dev_port_list[port_id]) + return -ENOENT; + + *ifindex = port_db->dev_port_list[port_id]; + return 0; +} + +/* + * Api to get the function id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * func_id [out] the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *func_id) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + *func_id = port_db->ulp_intf_list[ifindex].func_id; + return 0; +} + +/* + * Api to get the svid for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * dir [in] the direction for the flow. + * svif [out] the svif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint32_t dir, + uint16_t *svif) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + if (dir == ULP_DIR_EGRESS) + *svif = port_db->ulp_intf_list[ifindex].func_svif; + else + *svif = port_db->ulp_intf_list[ifindex].port_svif; + return 0; +} + +/* + * Api to get the vnic id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * vnic [out] the vnic of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *vnic) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + BNXT_TF_DBG(ERR, "Invalid Arguments\n"); + return -EINVAL; + } + *vnic = port_db->ulp_intf_list[ifindex].default_vnic; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.h new file mode 100644 index 000000000..271c29a47 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_port_db.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_PORT_DB_H_ +#define _ULP_PORT_DB_H_ + +#include "bnxt_ulp.h" + +#define BNXT_PORT_DB_MAX_INTF_LIST 256 + +/* enumeration of the interface types */ +enum bnxt_ulp_intf_type { + BNXT_ULP_INTF_TYPE_INVALID = 0, + BNXT_ULP_INTF_TYPE_PF = 1, + BNXT_ULP_INTF_TYPE_VF, + BNXT_ULP_INTF_TYPE_PF_REP, + BNXT_ULP_INTF_TYPE_VF_REP, + BNXT_ULP_INTF_TYPE_LAST +}; + +/* Structure for the Port database resource information. */ +struct ulp_interface_info { + enum bnxt_ulp_intf_type type; + uint16_t func_id; + uint16_t func_svif; + uint16_t port_svif; + uint16_t default_vnic; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + /* back pointer to the bnxt driver, it is null for rep ports */ + struct bnxt *bp; +}; + +/* Structure for the Port database */ +struct bnxt_ulp_port_db { + struct ulp_interface_info *ulp_intf_list; + uint32_t ulp_intf_list_size; + + /* dpdk device external port list */ + uint16_t dev_port_list[RTE_MAX_ETHPORTS]; +}; + +/* + * Initialize the port database. Memory is allocated in this + * call and assigned to the port database. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Deinitialize the port database. Memory is deallocated in + * this call. + * + * ulp_ctxt [in] Ptr to ulp context + * + * Returns 0 on success. + */ +int32_t ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Update the port database.This api is called when the port + * details are available during the startup. + * + * ulp_ctxt [in] Ptr to ulp context + * bp [in]. ptr to the device function. + * + * Returns 0 on success or negative number on failure. + */ +int32_t ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp); + +/* + * Api to get the ulp ifindex for a given device port. + * + * ulp_ctxt [in] Ptr to ulp context + * port_id [in].device port id + * ifindex [out] ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + uint32_t port_id, + uint32_t *ifindex); + +/* + * Api to get the function id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * func_id [out] the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *func_id); + +/* + * Api to get the svid for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * dir [in] the direction for the flow. + * svif [out] the svif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint32_t dir, + uint16_t *svif); + +/* + * Api to get the vnic id for a given ulp ifindex. + * + * ulp_ctxt [in] Ptr to ulp context + * ifindex [in] ulp ifindex + * vnic [out] the vnic of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int32_t +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + uint32_t ifindex, + uint16_t *vnic); + +#endif /* _ULP_PORT_DB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c new file mode 100644 index 000000000..ace5fad97 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -0,0 +1,1302 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#include "bnxt.h" +#include "ulp_template_db.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_common.h" +#include "ulp_rte_parser.h" +#include "ulp_utils.h" +#include "tfp.h" +#include "ulp_port_db.h" + +/* Utility function to skip the void items. */ +static inline int32_t +ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment) +{ + if (!*item) + return 0; + if (increment) + (*item)++; + while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID) + (*item)++; + if (*item) + return 1; + return 0; +} + +/* Utility function to update the field_bitmap */ +static void +ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params, + uint32_t idx) +{ + struct ulp_rte_hdr_field *field; + + field = ¶ms->hdr_field[idx]; + if (ulp_bitmap_notzero(field->mask, field->size)) { + ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); + /* Not exact match */ + if (!ulp_bitmap_is_ones(field->mask, field->size)) + ULP_BITMAP_SET(params->fld_bitmap.bits, + BNXT_ULP_MATCH_TYPE_BITMASK_WM); + } else { + ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); + } +} + +/* Utility function to copy field spec items */ +static struct ulp_rte_hdr_field * +ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field, + const void *buffer, + uint32_t size) +{ + field->size = size; + memcpy(field->spec, buffer, field->size); + field++; + return field; +} + +/* Utility function to copy field masks items */ +static void +ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params, + uint32_t *idx, + const void *buffer, + uint32_t size) +{ + struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx]; + + memcpy(field->mask, buffer, size); + ulp_rte_parser_field_bitmap_update(params, *idx); + *idx = *idx + 1; +} + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow items into the ulp structures. + */ +int32_t +bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item *item = pattern; + struct bnxt_ulp_rte_hdr_info *hdr_info; + + params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; + if (params->dir == ULP_DIR_EGRESS) + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + + /* Parse all the items in the pattern */ + while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { + /* get the header information from the flow_hdr_info table */ + hdr_info = &ulp_hdr_info[item->type]; + if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { + BNXT_TF_DBG(ERR, + "Truflow parser does not support type %d\n", + item->type); + return BNXT_TF_RC_PARSE_ERR; + } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { + /* call the registered callback handler */ + if (hdr_info->proto_hdr_func) { + if (hdr_info->proto_hdr_func(item, params) != + BNXT_TF_RC_SUCCESS) { + return BNXT_TF_RC_ERROR; + } + } + } + item++; + } + /* update the implied SVIF */ + (void)ulp_rte_parser_svif_process(params); + return BNXT_TF_RC_SUCCESS; +} + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow actions into the ulp structures. + */ +int32_t +bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_action *action_item = actions; + struct bnxt_ulp_rte_act_info *hdr_info; + + /* Parse all the items in the pattern */ + while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { + /* get the header information from the flow_hdr_info table */ + hdr_info = &ulp_act_info[action_item->type]; + if (hdr_info->act_type == + BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { + BNXT_TF_DBG(ERR, + "Truflow parser does not support act %u\n", + action_item->type); + return BNXT_TF_RC_ERROR; + } else if (hdr_info->act_type == + BNXT_ULP_ACT_TYPE_SUPPORTED) { + /* call the registered callback handler */ + if (hdr_info->proto_act_func) { + if (hdr_info->proto_act_func(action_item, + params) != + BNXT_TF_RC_SUCCESS) { + return BNXT_TF_RC_ERROR; + } + } + } + action_item++; + } + /* update the implied VNIC */ + ulp_rte_parser_vnic_process(params); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item PF Header. */ +static int32_t +ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params, + enum rte_flow_item_type proto, + uint16_t svif, + uint16_t mask) +{ + uint16_t port_id = svif; + uint32_t dir = 0; + struct ulp_rte_hdr_field *hdr_field; + uint32_t ifindex; + int32_t rc; + + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) { + BNXT_TF_DBG(ERR, + "SVIF already set,multiple source not support'd\n"); + return BNXT_TF_RC_ERROR; + } + + /*update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF */ + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); + + if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) { + dir = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_DIRECTION); + /* perform the conversion from dpdk port to bnxt svif */ + rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, + &ifindex); + if (rc) { + BNXT_TF_DBG(ERR, + "Invalid port id\n"); + return BNXT_TF_RC_ERROR; + } + ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif); + svif = rte_cpu_to_be_16(svif); + } + hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; + memcpy(hdr_field->spec, &svif, sizeof(svif)); + memcpy(hdr_field->mask, &mask, sizeof(mask)); + hdr_field->size = sizeof(svif); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of the RTE port id */ +int32_t +ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params) +{ + uint16_t port_id = 0; + uint16_t svif_mask = 0xFFFF; + + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) + return BNXT_TF_RC_SUCCESS; + + /* SVIF not set. So get the port id */ + port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); + + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID, + port_id, svif_mask); +} + +/* Function to handle the implicit VNIC RTE port id */ +int32_t +ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params) +{ + struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; + + if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) || + ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT)) + return BNXT_TF_RC_SUCCESS; + + /* Update the vnic details */ + ulp_rte_pf_act_handler(NULL, params); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item PF Header. */ +int32_t +ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + uint16_t port_id = 0; + uint16_t svif_mask = 0xFFFF; + + /* Get the port id */ + port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); + + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, + item->type, + port_id, svif_mask); +} + +/* Function to handle the parsing of RTE Flow item VF Header. */ +int32_t +ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_vf *vf_spec = item->spec; + const struct rte_flow_item_vf *vf_mask = item->mask; + uint16_t svif = 0, mask = 0; + + /* Get VF rte_flow_item for Port details */ + if (vf_spec) + svif = (uint16_t)vf_spec->id; + if (vf_mask) + mask = (uint16_t)vf_mask->id; + + return ulp_rte_parser_svif_set(params, item->type, svif, mask); +} + +/* Function to handle the parsing of RTE Flow item port id Header. */ +int32_t +ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_port_id *port_spec = item->spec; + const struct rte_flow_item_port_id *port_mask = item->mask; + uint16_t svif = 0, mask = 0; + + /* + * Copy the rte_flow_item for Port into hdr_field using port id + * header fields. + */ + if (port_spec) + svif = (uint16_t)port_spec->id; + if (port_mask) + mask = (uint16_t)port_mask->id; + + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, item->type, svif, mask); +} + +/* Function to handle the parsing of RTE Flow item phy port Header. */ +int32_t +ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_phy_port *port_spec = item->spec; + const struct rte_flow_item_phy_port *port_mask = item->mask; + uint32_t svif = 0, mask = 0; + + /* Copy the rte_flow_item for phy port into hdr_field */ + if (port_spec) + svif = port_spec->index; + if (port_mask) + mask = port_mask->index; + + /* Update the SVIF details */ + return ulp_rte_parser_svif_set(params, item->type, svif, mask); +} + +/* Function to handle the parsing of RTE Flow item Ethernet Header. */ +int32_t +ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_eth *eth_spec = item->spec; + const struct rte_flow_item_eth *eth_mask = item->mask; + struct ulp_rte_hdr_field *field; + uint32_t idx = params->field_idx; + uint64_t set_flag = 0; + uint32_t size; + + /* + * Copy the rte_flow_item for eth into hdr_field using ethernet + * header fields + */ + if (eth_spec) { + size = sizeof(eth_spec->dst.addr_bytes); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + eth_spec->dst.addr_bytes, + size); + size = sizeof(eth_spec->src.addr_bytes); + field = ulp_rte_parser_fld_copy(field, + eth_spec->src.addr_bytes, + size); + field = ulp_rte_parser_fld_copy(field, + ð_spec->type, + sizeof(eth_spec->type)); + } + if (eth_mask) { + ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes, + sizeof(eth_mask->dst.addr_bytes)); + ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes, + sizeof(eth_mask->src.addr_bytes)); + ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type, + sizeof(eth_mask->type)); + } + /* Add number of vlan header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; + params->vlan_idx = params->field_idx; + params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM; + + /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */ + set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ETH); + if (set_flag) + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); + else + ULP_BITMAP_RESET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_ETH); + + /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */ + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item Vlan Header. */ +int32_t +ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_vlan *vlan_spec = item->spec; + const struct rte_flow_item_vlan *vlan_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bit; + uint32_t idx = params->vlan_idx; + uint16_t vlan_tag, priority; + uint32_t outer_vtag_num; + uint32_t inner_vtag_num; + + /* + * Copy the rte_flow_item for vlan into hdr_field using Vlan + * header fields + */ + if (vlan_spec) { + vlan_tag = ntohs(vlan_spec->tci); + priority = htons(vlan_tag >> 13); + vlan_tag &= 0xfff; + vlan_tag = htons(vlan_tag); + + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &priority, + sizeof(priority)); + field = ulp_rte_parser_fld_copy(field, + &vlan_tag, + sizeof(vlan_tag)); + field = ulp_rte_parser_fld_copy(field, + &vlan_spec->inner_type, + sizeof(vlan_spec->inner_type)); + } + + if (vlan_mask) { + vlan_tag = ntohs(vlan_mask->tci); + priority = htons(vlan_tag >> 13); + vlan_tag &= 0xfff; + vlan_tag = htons(vlan_tag); + + field = ¶ms->hdr_field[idx]; + memcpy(field->mask, &priority, field->size); + field++; + memcpy(field->mask, &vlan_tag, field->size); + field++; + memcpy(field->mask, &vlan_mask->inner_type, field->size); + } + /* Set the vlan index to new incremented value */ + params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + + /* Get the outer tag and inner tag counts */ + outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_O_VTAG_NUM); + inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params, + BNXT_ULP_CHF_IDX_I_VTAG_NUM); + + /* Update the hdr_bitmap of the vlans */ + hdr_bit = ¶ms->hdr_bitmap; + if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) { + /* Set the outer vlan bit and update the vlan tag num */ + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN); + outer_vtag_num++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN)) { + /* Set the outer vlan bit and update the vlan tag num */ + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN); + outer_vtag_num++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN)) { + /* Set the inner vlan bit and update the vlan tag num */ + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN); + inner_vtag_num++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN)) { + /* Set the inner vlan bit and update the vlan tag num */ + ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN); + inner_vtag_num++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1); + } else { + BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n"); + return BNXT_TF_RC_ERROR; + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item IPV4 Header. */ +int32_t +ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; + const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; + uint32_t inner_l3, outer_l3; + + inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); + if (inner_l3) { + BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + if (ipv4_spec) { + size = sizeof(ipv4_spec->hdr.version_ihl); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &ipv4_spec->hdr.version_ihl, + size); + size = sizeof(ipv4_spec->hdr.type_of_service); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.type_of_service, + size); + size = sizeof(ipv4_spec->hdr.total_length); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.total_length, + size); + size = sizeof(ipv4_spec->hdr.packet_id); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.packet_id, + size); + size = sizeof(ipv4_spec->hdr.fragment_offset); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.fragment_offset, + size); + size = sizeof(ipv4_spec->hdr.time_to_live); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.time_to_live, + size); + size = sizeof(ipv4_spec->hdr.next_proto_id); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.next_proto_id, + size); + size = sizeof(ipv4_spec->hdr.hdr_checksum); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.hdr_checksum, + size); + size = sizeof(ipv4_spec->hdr.src_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.src_addr, + size); + size = sizeof(ipv4_spec->hdr.dst_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv4_spec->hdr.dst_addr, + size); + } + if (ipv4_mask) { + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.version_ihl, + sizeof(ipv4_mask->hdr.version_ihl)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.type_of_service, + sizeof(ipv4_mask->hdr.type_of_service)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.total_length, + sizeof(ipv4_mask->hdr.total_length)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.packet_id, + sizeof(ipv4_mask->hdr.packet_id)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.fragment_offset, + sizeof(ipv4_mask->hdr.fragment_offset)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.time_to_live, + sizeof(ipv4_mask->hdr.time_to_live)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.next_proto_id, + sizeof(ipv4_mask->hdr.next_proto_id)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.hdr_checksum, + sizeof(ipv4_mask->hdr.hdr_checksum)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.src_addr, + sizeof(ipv4_mask->hdr.src_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv4_mask->hdr.dst_addr, + sizeof(ipv4_mask->hdr.dst_addr)); + } + /* Add the number of ipv4 header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; + + /* Set the ipv4 header bitmap and computed l3 header bitmaps */ + outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); + if (outer_l3 || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); + inner_l3++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3); + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); + outer_l3++; + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3); + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item IPV6 Header */ +int32_t +ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; + const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; + uint32_t inner_l3, outer_l3; + + inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3); + if (inner_l3) { + BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + if (ipv6_spec) { + size = sizeof(ipv6_spec->hdr.vtc_flow); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &ipv6_spec->hdr.vtc_flow, + size); + size = sizeof(ipv6_spec->hdr.payload_len); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.payload_len, + size); + size = sizeof(ipv6_spec->hdr.proto); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.proto, + size); + size = sizeof(ipv6_spec->hdr.hop_limits); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.hop_limits, + size); + size = sizeof(ipv6_spec->hdr.src_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.src_addr, + size); + size = sizeof(ipv6_spec->hdr.dst_addr); + field = ulp_rte_parser_fld_copy(field, + &ipv6_spec->hdr.dst_addr, + size); + } + if (ipv6_mask) { + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.vtc_flow, + sizeof(ipv6_mask->hdr.vtc_flow)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.payload_len, + sizeof(ipv6_mask->hdr.payload_len)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.proto, + sizeof(ipv6_mask->hdr.proto)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.hop_limits, + sizeof(ipv6_mask->hdr.hop_limits)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.src_addr, + sizeof(ipv6_mask->hdr.src_addr)); + ulp_rte_prsr_mask_copy(params, &idx, + &ipv6_mask->hdr.dst_addr, + sizeof(ipv6_mask->hdr.dst_addr)); + } + /* add number of ipv6 header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; + + /* Set the ipv6 header bitmap and computed l3 header bitmaps */ + outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3); + if (outer_l3 || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1); + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item UDP Header. */ +int32_t +ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_udp *udp_spec = item->spec; + const struct rte_flow_item_udp *udp_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; + uint32_t inner_l4, outer_l4; + + inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); + if (inner_l4) { + BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + if (udp_spec) { + size = sizeof(udp_spec->hdr.src_port); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &udp_spec->hdr.src_port, + size); + size = sizeof(udp_spec->hdr.dst_port); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dst_port, + size); + size = sizeof(udp_spec->hdr.dgram_len); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dgram_len, + size); + size = sizeof(udp_spec->hdr.dgram_cksum); + field = ulp_rte_parser_fld_copy(field, + &udp_spec->hdr.dgram_cksum, + size); + } + if (udp_mask) { + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.src_port, + sizeof(udp_mask->hdr.src_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dst_port, + sizeof(udp_mask->hdr.dst_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dgram_len, + sizeof(udp_mask->hdr.dgram_len)); + ulp_rte_prsr_mask_copy(params, &idx, + &udp_mask->hdr.dgram_cksum, + sizeof(udp_mask->hdr.dgram_cksum)); + } + + /* Add number of UDP header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; + + /* Set the udp header bitmap and computed l4 header bitmaps */ + outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); + if (outer_l4 || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item TCP Header. */ +int32_t +ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_tcp *tcp_spec = item->spec; + const struct rte_flow_item_tcp *tcp_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; + uint32_t inner_l4, outer_l4; + + inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4); + if (inner_l4) { + BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + if (tcp_spec) { + size = sizeof(tcp_spec->hdr.src_port); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &tcp_spec->hdr.src_port, + size); + size = sizeof(tcp_spec->hdr.dst_port); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.dst_port, + size); + size = sizeof(tcp_spec->hdr.sent_seq); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.sent_seq, + size); + size = sizeof(tcp_spec->hdr.recv_ack); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.recv_ack, + size); + size = sizeof(tcp_spec->hdr.data_off); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.data_off, + size); + size = sizeof(tcp_spec->hdr.tcp_flags); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.tcp_flags, + size); + size = sizeof(tcp_spec->hdr.rx_win); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.rx_win, + size); + size = sizeof(tcp_spec->hdr.cksum); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.cksum, + size); + size = sizeof(tcp_spec->hdr.tcp_urp); + field = ulp_rte_parser_fld_copy(field, + &tcp_spec->hdr.tcp_urp, + size); + } else { + idx += BNXT_ULP_PROTO_HDR_TCP_NUM; + } + + if (tcp_mask) { + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.src_port, + sizeof(tcp_mask->hdr.src_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.dst_port, + sizeof(tcp_mask->hdr.dst_port)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.sent_seq, + sizeof(tcp_mask->hdr.sent_seq)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.recv_ack, + sizeof(tcp_mask->hdr.recv_ack)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.data_off, + sizeof(tcp_mask->hdr.data_off)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.tcp_flags, + sizeof(tcp_mask->hdr.tcp_flags)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.rx_win, + sizeof(tcp_mask->hdr.rx_win)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.cksum, + sizeof(tcp_mask->hdr.cksum)); + ulp_rte_prsr_mask_copy(params, &idx, + &tcp_mask->hdr.tcp_urp, + sizeof(tcp_mask->hdr.tcp_urp)); + } + /* add number of TCP header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; + + /* Set the udp header bitmap and computed l4 header bitmaps */ + outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4); + if (outer_l4 || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP); + ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1); + } + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item Vxlan Header. */ +int32_t +ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_item_vxlan *vxlan_spec = item->spec; + const struct rte_flow_item_vxlan *vxlan_mask = item->mask; + struct ulp_rte_hdr_field *field; + struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = params->field_idx; + uint32_t size; + + /* + * Copy the rte_flow_item for vxlan into hdr_field using vxlan + * header fields + */ + if (vxlan_spec) { + size = sizeof(vxlan_spec->flags); + field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx], + &vxlan_spec->flags, + size); + size = sizeof(vxlan_spec->rsvd0); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->rsvd0, + size); + size = sizeof(vxlan_spec->vni); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->vni, + size); + size = sizeof(vxlan_spec->rsvd1); + field = ulp_rte_parser_fld_copy(field, + &vxlan_spec->rsvd1, + size); + } + if (vxlan_mask) { + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->flags, + sizeof(vxlan_mask->flags)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->rsvd0, + sizeof(vxlan_mask->rsvd0)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->vni, + sizeof(vxlan_mask->vni)); + ulp_rte_prsr_mask_copy(params, &idx, + &vxlan_mask->rsvd1, + sizeof(vxlan_mask->rsvd1)); + } + /* Add number of vxlan header elements */ + params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; + + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item void Header */ +int32_t +ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused, + struct ulp_rte_parser_params *params __rte_unused) +{ + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action void Header. */ +int32_t +ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused, + struct ulp_rte_parser_params *params __rte_unused) +{ + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action Mark Header. */ +int32_t +ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *param) +{ + const struct rte_flow_action_mark *mark; + struct ulp_rte_act_bitmap *act = ¶m->act_bitmap; + uint32_t mark_id; + + mark = action_item->conf; + if (mark) { + mark_id = tfp_cpu_to_be_32(mark->id); + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK], + &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK); + + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK); + return BNXT_TF_RC_SUCCESS; + } + BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n"); + return BNXT_TF_RC_ERROR; +} + +/* Function to handle the parsing of RTE Flow action RSS Header. */ +int32_t +ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *param) +{ + const struct rte_flow_action_rss *rss = action_item->conf; + + if (rss) { + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS); + return BNXT_TF_RC_SUCCESS; + } + BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n"); + return BNXT_TF_RC_ERROR; +} + +/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ +int32_t +ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params) +{ + const struct rte_flow_action_vxlan_encap *vxlan_encap; + const struct rte_flow_item *item; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv6 *ipv6_spec; + struct rte_flow_item_vxlan vxlan_spec; + uint32_t vlan_num = 0, vlan_size = 0; + uint32_t ip_size = 0, ip_type = 0; + uint32_t vxlan_size = 0; + uint8_t *buff; + /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */ + const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x40, 0x11}; + struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap; + struct ulp_rte_act_prop *ap = ¶ms->act_prop; + + vxlan_encap = action_item->conf; + if (!vxlan_encap) { + BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n"); + return BNXT_TF_RC_ERROR; + } + + item = vxlan_encap->definition; + if (!item) { + BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n"); + return BNXT_TF_RC_ERROR; + } + + if (!ulp_rte_item_skip_void(&item, 0)) + return BNXT_TF_RC_ERROR; + + /* must have ethernet header */ + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n"); + return BNXT_TF_RC_ERROR; + } + eth_spec = item->spec; + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC]; + ulp_encap_buffer_copy(buff, + eth_spec->dst.addr_bytes, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC); + + /* Goto the next item */ + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + + /* May have vlan header */ + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_num++; + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; + ulp_encap_buffer_copy(buff, + item->spec, + sizeof(struct rte_flow_item_vlan)); + + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + } + + /* may have two vlan headers */ + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_num++; + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + + sizeof(struct rte_flow_item_vlan)], + item->spec, + sizeof(struct rte_flow_item_vlan)); + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + } + /* Update the vlan count and size of more than one */ + if (vlan_num) { + vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); + vlan_num = tfp_cpu_to_be_32(vlan_num); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], + &vlan_num, + sizeof(uint32_t)); + vlan_size = tfp_cpu_to_be_32(vlan_size); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ], + &vlan_size, + sizeof(uint32_t)); + } + + /* L3 must be IPv4, IPv6 */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { + ipv4_spec = item->spec; + ip_size = BNXT_ULP_ENCAP_IPV4_SIZE; + + /* copy the ipv4 details */ + if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl, + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) { + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; + ulp_encap_buffer_copy(buff, + def_ipv4_hdr, + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + + BNXT_ULP_ENCAP_IPV4_ID_PROTO); + } else { + const uint8_t *tmp_buff; + + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP]; + ulp_encap_buffer_copy(buff, + &ipv4_spec->hdr.version_ihl, + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS); + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS]; + tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id; + ulp_encap_buffer_copy(buff, + tmp_buff, + BNXT_ULP_ENCAP_IPV4_ID_PROTO); + } + buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP + + BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS + + BNXT_ULP_ENCAP_IPV4_ID_PROTO]; + ulp_encap_buffer_copy(buff, + (const uint8_t *)&ipv4_spec->hdr.dst_addr, + BNXT_ULP_ENCAP_IPV4_DEST_IP); + + /* Update the ip size details */ + ip_size = tfp_cpu_to_be_32(ip_size); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], + &ip_size, sizeof(uint32_t)); + + /* update the ip type */ + ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], + &ip_type, sizeof(uint32_t)); + + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + ipv6_spec = item->spec; + ip_size = BNXT_ULP_ENCAP_IPV6_SIZE; + + /* copy the ipv4 details */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP], + ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE); + + /* Update the ip size details */ + ip_size = tfp_cpu_to_be_32(ip_size); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], + &ip_size, sizeof(uint32_t)); + + /* update the ip type */ + ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], + &ip_type, sizeof(uint32_t)); + + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + } else { + BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n"); + return BNXT_TF_RC_ERROR; + } + + /* L4 is UDP */ + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { + BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n"); + return BNXT_TF_RC_ERROR; + } + /* copy the udp details */ + ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP], + item->spec, BNXT_ULP_ENCAP_UDP_SIZE); + + if (!ulp_rte_item_skip_void(&item, 1)) + return BNXT_TF_RC_ERROR; + + /* Finally VXLAN */ + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); + return BNXT_TF_RC_ERROR; + } + vxlan_size = sizeof(struct rte_flow_item_vxlan); + /* copy the vxlan details */ + memcpy(&vxlan_spec, item->spec, vxlan_size); + vxlan_spec.flags = 0x08; + ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN], + (const uint8_t *)&vxlan_spec, + vxlan_size); + vxlan_size = tfp_cpu_to_be_32(vxlan_size); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], + &vxlan_size, sizeof(uint32_t)); + + /*update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action vxlan_encap Header */ +int32_t +ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item + __rte_unused, + struct ulp_rte_parser_params *params) +{ + /* update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACTION_BIT_VXLAN_DECAP); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action drop Header. */ +int32_t +ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused, + struct ulp_rte_parser_params *params) +{ + /* Update the hdr_bitmap with drop */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action count. */ +int32_t +ulp_rte_count_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params) + +{ + const struct rte_flow_action_count *act_count; + struct ulp_rte_act_prop *act_prop = ¶ms->act_prop; + + act_count = action_item->conf; + if (act_count) { + if (act_count->shared) { + BNXT_TF_DBG(ERR, + "Parse Error:Shared count not supported\n"); + return BNXT_TF_RC_PARSE_ERR; + } + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], + &act_count->id, + BNXT_ULP_ACT_PROP_SZ_COUNT); + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action PF. */ +int32_t +ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused, + struct ulp_rte_parser_params *params) +{ + uint32_t svif; + + /* Update the hdr_bitmap with vnic bit */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); + + /* copy the PF of the current device into VNIC Property */ + svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF); + svif = bnxt_get_vnic_id(svif); + svif = rte_cpu_to_be_32(svif); + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + &svif, BNXT_ULP_ACT_PROP_SZ_VNIC); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action VF. */ +int32_t +ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *param) +{ + const struct rte_flow_action_vf *vf_action; + uint32_t pid; + + vf_action = action_item->conf; + if (vf_action) { + if (vf_action->original) { + BNXT_TF_DBG(ERR, + "Parse Error:VF Original not supported\n"); + return BNXT_TF_RC_PARSE_ERR; + } + /* TBD: Update the computed VNIC using VF conversion */ + pid = bnxt_get_vnic_id(vf_action->id); + pid = rte_cpu_to_be_32(pid); + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action port_id. */ +int32_t +ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, + struct ulp_rte_parser_params *param) +{ + const struct rte_flow_action_port_id *port_id; + uint32_t pid; + + port_id = act_item->conf; + if (port_id) { + if (port_id->original) { + BNXT_TF_DBG(ERR, + "ParseErr:Portid Original not supported\n"); + return BNXT_TF_RC_PARSE_ERR; + } + /* TBD: Update the computed VNIC using port conversion */ + pid = bnxt_get_vnic_id(port_id->id); + pid = rte_cpu_to_be_32(pid); + memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action phy_port. */ +int32_t +ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *prm) +{ + const struct rte_flow_action_phy_port *phy_port; + uint32_t pid; + + phy_port = action_item->conf; + if (phy_port) { + if (phy_port->original) { + BNXT_TF_DBG(ERR, + "Parse Err:Port Original not supported\n"); + return BNXT_TF_RC_PARSE_ERR; + } + pid = bnxt_get_vnic_id(phy_port->index); + pid = rte_cpu_to_be_32(pid); + memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], + &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT); + return BNXT_TF_RC_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h new file mode 100644 index 000000000..cbc8a43de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_RTE_PARSER_H_ +#define _ULP_RTE_PARSER_H_ + +#include +#include +#include +#include "ulp_template_db.h" +#include "ulp_template_struct.h" + +/* defines to be used in the tunnel header parsing */ +#define BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS 2 +#define BNXT_ULP_ENCAP_IPV4_ID_PROTO 6 +#define BNXT_ULP_ENCAP_IPV4_DEST_IP 4 +#define BNXT_ULP_ENCAP_IPV4_SIZE 12 +#define BNXT_ULP_ENCAP_IPV6_SIZE 8 +#define BNXT_ULP_ENCAP_UDP_SIZE 4 + +/* Function to handle the parsing of the RTE port id. */ +int32_t +ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params); + +/* Function to handle the implicit VNIC RTE port id */ +int32_t +ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params); + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow items into the ulp structures. + */ +int32_t +bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[], + struct ulp_rte_parser_params *params); + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow actions into the ulp structures. + */ +int32_t +bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[], + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item PF Header. */ +int32_t +ulp_rte_pf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item VF Header. */ +int32_t +ulp_rte_vf_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item port id Header. */ +int32_t +ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item port Header. */ +int32_t +ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the RTE item Ethernet Header. */ +int32_t +ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item Vlan Header. */ +int32_t +ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item IPV4 Header. */ +int32_t +ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item IPV6 Header. */ +int32_t +ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item UDP Header. */ +int32_t +ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item TCP Header. */ +int32_t +ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item Vxlan Header. */ +int32_t +ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow item void Header. */ +int32_t +ulp_rte_void_hdr_handler(const struct rte_flow_item *item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action void Header. */ +int32_t +ulp_rte_void_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action RSS Header. */ +int32_t +ulp_rte_rss_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action Mark Header. */ +int32_t +ulp_rte_mark_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ +int32_t +ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */ +int32_t +ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action drop Header. */ +int32_t +ulp_rte_drop_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action count. */ +int32_t +ulp_rte_count_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action PF. */ +int32_t +ulp_rte_pf_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action VF. */ +int32_t +ulp_rte_vf_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action port_id. */ +int32_t +ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item, + struct ulp_rte_parser_params *params); + +/* Function to handle the parsing of RTE Flow action phy_port. */ +int32_t +ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); + +#endif /* _ULP_RTE_PARSER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.c new file mode 100644 index 000000000..e89aefad4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.c @@ -0,0 +1,1784 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +/* + * date: Mon Mar 9 02:37:53 2020 + * version: 0.0 + */ + +#include "ulp_template_db.h" +#include "ulp_template_field_db.h" +#include "ulp_template_struct.h" +#include "ulp_rte_parser.h" + +uint32_t ulp_act_prop_map_table[] = { + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_TYPE, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_NUM, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE, + [BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM] = + BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM, + [BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM] = + BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM, + [BNXT_ULP_ACT_PROP_IDX_PORT_ID] = + BNXT_ULP_ACT_PROP_SZ_PORT_ID, + [BNXT_ULP_ACT_PROP_IDX_VNIC] = + BNXT_ULP_ACT_PROP_SZ_VNIC, + [BNXT_ULP_ACT_PROP_IDX_VPORT] = + BNXT_ULP_ACT_PROP_SZ_VPORT, + [BNXT_ULP_ACT_PROP_IDX_MARK] = + BNXT_ULP_ACT_PROP_SZ_MARK, + [BNXT_ULP_ACT_PROP_IDX_COUNT] = + BNXT_ULP_ACT_PROP_SZ_COUNT, + [BNXT_ULP_ACT_PROP_IDX_METER] = + BNXT_ULP_ACT_PROP_SZ_METER, + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_VLAN] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_VLAN, + [BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_PCP] = + BNXT_ULP_ACT_PROP_SZ_OF_SET_VLAN_PCP, + [BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_VID] = + BNXT_ULP_ACT_PROP_SZ_OF_SET_VLAN_VID, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST, + [BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_TP_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_TP_DST, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_0, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_1, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_2, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_3, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_4, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_5, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_6, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_7, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_UDP, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN, + [BNXT_ULP_ACT_PROP_IDX_LAST] = + BNXT_ULP_ACT_PROP_SZ_LAST +}; + +struct bnxt_ulp_rte_act_info ulp_act_info[] = { + [RTE_FLOW_ACTION_TYPE_END] = { + .act_type = BNXT_ULP_ACT_TYPE_END, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VOID] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_void_act_handler + }, + [RTE_FLOW_ACTION_TYPE_PASSTHRU] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_JUMP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_MARK] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_mark_act_handler + }, + [RTE_FLOW_ACTION_TYPE_FLAG] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_QUEUE] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DROP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_drop_act_handler + }, + [RTE_FLOW_ACTION_TYPE_COUNT] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_count_act_handler + }, + [RTE_FLOW_ACTION_TYPE_RSS] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_rss_act_handler + }, + [RTE_FLOW_ACTION_TYPE_PF] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_pf_act_handler + }, + [RTE_FLOW_ACTION_TYPE_VF] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_vf_act_handler + }, + [RTE_FLOW_ACTION_TYPE_PHY_PORT] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_phy_port_act_handler + }, + [RTE_FLOW_ACTION_TYPE_PORT_ID] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_port_id_act_handler + }, + [RTE_FLOW_ACTION_TYPE_METER] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SECURITY] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_DEC_MPLS_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_OUT] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_IN] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_POP_MPLS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_vxlan_encap_act_handler + }, + [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_vxlan_decap_act_handler + }, + [RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_RAW_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_RAW_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV4_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV6_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TP_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TP_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_MAC_SWAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INC_TCP_ACK] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + } +}; + +struct bnxt_ulp_cache_tbl_params ulp_cache_tbl_params[] = { + [BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_EGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS] = { + .num_entries = 16384 + }, + [BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_EGRESS] = { + .num_entries = 16384 + } +}; + +struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[] = { + [0] = { + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .def_regfile_index = + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID, + .direction = TF_DIR_RX + } +}; + +struct bnxt_ulp_device_params ulp_device_params[BNXT_ULP_DEVICE_ID_LAST] = { + [BNXT_ULP_DEVICE_ID_WH_PLUS] = { + .global_fid_enable = BNXT_ULP_SYM_YES, + .byte_order = (enum bnxt_ulp_byte_order) + BNXT_ULP_SYM_LITTLE_ENDIAN, + .encap_byte_swap = 1, + .lfid_entries = 16384, + .lfid_entry_size = 4, + .gfid_entries = 65536, + .gfid_entry_size = 4, + .num_flows = 32768, + .num_resources_per_flow = 8 + } +}; + +struct bnxt_ulp_rte_hdr_info ulp_hdr_info[] = { + [RTE_FLOW_ITEM_TYPE_END] = { + .hdr_type = BNXT_ULP_HDR_TYPE_END, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VOID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_void_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_INVERT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ANY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PF] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_pf_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_VF] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_vf_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_phy_port_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_PORT_ID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_port_id_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_RAW] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_eth_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_vlan_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_ipv4_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_ipv6_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_ICMP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_udp_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_tcp_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_vxlan_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_E_TAG] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_NVGRE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_MPLS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GRE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_FUZZY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTPC] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTPU] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ESP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GENEVE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_IPV6_EXT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_MARK] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_META] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GRE_KEY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTP_PSC] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOES] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOED] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_NSH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_IGMP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_AH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_HIGIG2] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + } +}; + +uint32_t bnxt_ulp_encap_vtag_map[] = { + [0] = BNXT_ULP_ENCAP_VTAG_ENCODING_NOP, + [1] = BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_ECAP_PRI, + [2] = BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_ECAP_PRI +}; + +uint16_t ulp_class_sig_tbl[BNXT_ULP_CLASS_SIG_TBL_MAX_SZ] = { + [BNXT_ULP_CLASS_HID_0092] = 1 +}; + +struct bnxt_ulp_class_match_info ulp_class_match_list[] = { + [1] = { + .class_hid = BNXT_ULP_CLASS_HID_0092, + .hdr_sig = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_sig = { .bits = + BNXT_ULP_HF0_BITMASK_O_IPV4_SRC_ADDR | + BNXT_ULP_HF0_BITMASK_O_IPV4_DST_ADDR | + BNXT_ULP_HF0_BITMASK_O_UDP_SRC_PORT | + BNXT_ULP_HF0_BITMASK_O_UDP_DST_PORT | + BNXT_ULP_MATCH_TYPE_BITMASK_EM }, + .class_tid = 0, + .act_vnic = 0, + .wc_pri = 0 + } +}; + +uint16_t ulp_act_sig_tbl[BNXT_ULP_ACT_SIG_TBL_MAX_SZ] = { + [BNXT_ULP_ACT_HID_0029] = 1 +}; + +struct bnxt_ulp_act_match_info ulp_act_match_list[] = { + [1] = { + .act_hid = BNXT_ULP_ACT_HID_0029, + .act_sig = { .bits = + BNXT_ULP_ACTION_BIT_MARK | + BNXT_ULP_ACTION_BIT_RSS | + BNXT_ULP_ACTION_BIT_VNIC | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 0 + } +}; + +struct bnxt_ulp_mapper_tbl_list_info ulp_class_tmpl_list[] = { + [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | + BNXT_ULP_DEVICE_ID_WH_PLUS)] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 5, + .start_tbl_idx = 0 + } +}; + +struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[] = { + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE, + .table_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_NOT_USED, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 0, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .result_start_idx = 0, + .result_bit_size = 10, + .result_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .table_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_LEVEL_0, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 2, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 1, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 1, + .ident_nums = 0, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = 0, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE, + .table_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_NOT_USED, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 15, + .blob_key_bit_size = 16, + .key_bit_size = 16, + .key_num_fields = 3, + .result_start_idx = 14, + .result_bit_size = 10, + .result_num_fields = 1, + .ident_start_idx = 1, + .ident_nums = 1, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .table_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_LEVEL_0, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 18, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 42, + .result_start_idx = 15, + .result_bit_size = 38, + .result_num_fields = 8, + .ident_start_idx = 2, + .ident_nums = 0, + .mark_enable = BNXT_ULP_MARK_ENABLE_NO, + .critical_resource = 0, + .cache_tbl_id = 0, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .table_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_RX, + .priority = BNXT_ULP_PRIORITY_NOT_USED, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .key_start_idx = 60, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 11, + .result_start_idx = 23, + .result_bit_size = 64, + .result_num_fields = 9, + .ident_start_idx = 2, + .ident_nums = 0, + .mark_enable = BNXT_ULP_MARK_ENABLE_YES, + .critical_resource = 1, + .cache_tbl_id = 0, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_NOT_USED + } +}; + +struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = { + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TUN_HDR_TYPE_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 12, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 12, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 48, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_HDR_FIELD, + .mask_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_HF0_IDX_SVIF_INDEX & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 12, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 12, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 48, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TUN_HDR_TYPE_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + /* class template id: 0, wh_plus, table: profile_tcam_cache_0 */ + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 7, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE, + .spec_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_CLASS_TID >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_CLASS_TID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_L3_HDR_TYPE_IPV4, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_L2_HDR_TYPE_DIX, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 3, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TUN_HDR_TYPE_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TL4_HDR_TYPE_UDP, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TL3_HDR_TYPE_IPV4, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_TL2_HDR_TYPE_DIX, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 9, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 7, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE, + .spec_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_PKT_TYPE_L2, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 251, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_ADD_PAD, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 3, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 16, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_UDP_DST_PORT & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 16, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {BNXT_ULP_SYM_IP_PROTO_UDP, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 32, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 32, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD, + .spec_operand = {(BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 48, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 24, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT, + .spec_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .mask_opcode = BNXT_ULP_MASK_OPC_SET_TO_CONSTANT, + .mask_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + .spec_opcode = BNXT_ULP_SPEC_OPC_SET_TO_REGFILE, + .spec_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + } +}; + +struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = { + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 7, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE, + .result_operand = { + (BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 3, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 6, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 3, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 16, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {(0x00f9 >> 8) & 0xff, + 0x00f9 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 5, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x15, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 33, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_REGFILE, + .result_operand = {(BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN >> 8) & 0xff, + BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 5, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 9, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {(0x00c5 >> 8) & 0xff, + 0x00c5 & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 11, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = { + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + }, + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + } +}; + +struct bnxt_ulp_mapper_tbl_list_info ulp_act_tmpl_list[] = { + [((0 << BNXT_ULP_LOG2_MAX_NUM_DEV) | + BNXT_ULP_DEVICE_ID_WH_PLUS)] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 1, + .start_tbl_idx = 0 + } +}; + +struct bnxt_ulp_mapper_act_tbl_info ulp_act_tbl_list[] = { + { + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .table_type = TF_TBL_TYPE_EXT, + .direction = TF_DIR_RX, + .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO, + .result_start_idx = 0, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0, + .regfile_wr_idx = BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN + } +}; + +struct bnxt_ulp_mapper_result_field_info ulp_act_result_field_list[] = { + { + .field_bit_size = 14, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 8, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 11, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 16, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 16, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 10, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 4, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {BNXT_ULP_SYM_DECAP_FUNC_NONE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 12, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP, + .result_operand = {(BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 2, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + }, + { + .field_bit_size = 1, + .result_opcode = BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT, + .result_operand = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + } +}; diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.h new file mode 100644 index 000000000..e6065d2fb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_db.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +/* + * date: Mon Mar 9 02:37:53 2020 + * version: 0.0 + */ + +#ifndef ULP_TEMPLATE_DB_H_ +#define ULP_TEMPLATE_DB_H_ + +#define BNXT_ULP_REGFILE_MAX_SZ 16 +#define BNXT_ULP_MAX_NUM_DEVICES 4 +#define BNXT_ULP_LOG2_MAX_NUM_DEV 2 +#define BNXT_ULP_CACHE_TBL_MAX_SZ 4 +#define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 256 +#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 2 +#define BNXT_ULP_CLASS_HID_LOW_PRIME 7919 +#define BNXT_ULP_CLASS_HID_HIGH_PRIME 7919 +#define BNXT_ULP_CLASS_HID_SHFTR 0 +#define BNXT_ULP_CLASS_HID_SHFTL 23 +#define BNXT_ULP_CLASS_HID_MASK 255 +#define BNXT_ULP_ACT_SIG_TBL_MAX_SZ 256 +#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 2 +#define BNXT_ULP_ACT_HID_LOW_PRIME 7919 +#define BNXT_ULP_ACT_HID_HIGH_PRIME 7919 +#define BNXT_ULP_ACT_HID_SHFTR 0 +#define BNXT_ULP_ACT_HID_SHFTL 23 +#define BNXT_ULP_ACT_HID_MASK 255 +#define BNXT_ULP_CACHE_TBL_IDENT_MAX_NUM 2 +#define BNXT_ULP_DEF_IDENT_INFO_TBL_MAX_SZ 1 + +enum bnxt_ulp_action_bit { + BNXT_ULP_ACTION_BIT_MARK = 0x0000000000000001, + BNXT_ULP_ACTION_BIT_DROP = 0x0000000000000002, + BNXT_ULP_ACTION_BIT_COUNT = 0x0000000000000004, + BNXT_ULP_ACTION_BIT_RSS = 0x0000000000000008, + BNXT_ULP_ACTION_BIT_METER = 0x0000000000000010, + BNXT_ULP_ACTION_BIT_VNIC = 0x0000000000000020, + BNXT_ULP_ACTION_BIT_VPORT = 0x0000000000000040, + BNXT_ULP_ACTION_BIT_VXLAN_DECAP = 0x0000000000000080, + BNXT_ULP_ACTION_BIT_NVGRE_DECAP = 0x0000000000000100, + BNXT_ULP_ACTION_BIT_OF_POP_MPLS = 0x0000000000000200, + BNXT_ULP_ACTION_BIT_OF_PUSH_MPLS = 0x0000000000000400, + BNXT_ULP_ACTION_BIT_MAC_SWAP = 0x0000000000000800, + BNXT_ULP_ACTION_BIT_SET_MAC_SRC = 0x0000000000001000, + BNXT_ULP_ACTION_BIT_SET_MAC_DST = 0x0000000000002000, + BNXT_ULP_ACTION_BIT_OF_POP_VLAN = 0x0000000000004000, + BNXT_ULP_ACTION_BIT_OF_PUSH_VLAN = 0x0000000000008000, + BNXT_ULP_ACTION_BIT_OF_SET_VLAN_PCP = 0x0000000000010000, + BNXT_ULP_ACTION_BIT_OF_SET_VLAN_VID = 0x0000000000020000, + BNXT_ULP_ACTION_BIT_SET_IPV4_SRC = 0x0000000000040000, + BNXT_ULP_ACTION_BIT_SET_IPV4_DST = 0x0000000000080000, + BNXT_ULP_ACTION_BIT_SET_IPV6_SRC = 0x0000000000100000, + BNXT_ULP_ACTION_BIT_SET_IPV6_DST = 0x0000000000200000, + BNXT_ULP_ACTION_BIT_DEC_TTL = 0x0000000000400000, + BNXT_ULP_ACTION_BIT_SET_TP_SRC = 0x0000000000800000, + BNXT_ULP_ACTION_BIT_SET_TP_DST = 0x0000000001000000, + BNXT_ULP_ACTION_BIT_VXLAN_ENCAP = 0x0000000002000000, + BNXT_ULP_ACTION_BIT_NVGRE_ENCAP = 0x0000000004000000, + BNXT_ULP_ACTION_BIT_LAST = 0x0000000008000000 +}; + +enum bnxt_ulp_hdr_bit { + BNXT_ULP_HDR_BIT_SVIF = 0x0000000000000001, + BNXT_ULP_HDR_BIT_O_ETH = 0x0000000000000002, + BNXT_ULP_HDR_BIT_OO_VLAN = 0x0000000000000004, + BNXT_ULP_HDR_BIT_OI_VLAN = 0x0000000000000008, + BNXT_ULP_HDR_BIT_O_IPV4 = 0x0000000000000010, + BNXT_ULP_HDR_BIT_O_IPV6 = 0x0000000000000020, + BNXT_ULP_HDR_BIT_O_TCP = 0x0000000000000040, + BNXT_ULP_HDR_BIT_O_UDP = 0x0000000000000080, + BNXT_ULP_HDR_BIT_T_VXLAN = 0x0000000000000100, + BNXT_ULP_HDR_BIT_T_GRE = 0x0000000000000200, + BNXT_ULP_HDR_BIT_I_ETH = 0x0000000000000400, + BNXT_ULP_HDR_BIT_IO_VLAN = 0x0000000000000800, + BNXT_ULP_HDR_BIT_II_VLAN = 0x0000000000001000, + BNXT_ULP_HDR_BIT_I_IPV4 = 0x0000000000002000, + BNXT_ULP_HDR_BIT_I_IPV6 = 0x0000000000004000, + BNXT_ULP_HDR_BIT_I_TCP = 0x0000000000008000, + BNXT_ULP_HDR_BIT_I_UDP = 0x0000000000010000, + BNXT_ULP_HDR_BIT_LAST = 0x0000000000020000 +}; + +enum bnxt_ulp_act_type { + BNXT_ULP_ACT_TYPE_NOT_SUPPORTED = 0, + BNXT_ULP_ACT_TYPE_SUPPORTED = 1, + BNXT_ULP_ACT_TYPE_END = 2, + BNXT_ULP_ACT_TYPE_LAST = 3 +}; + +enum bnxt_ulp_byte_order { + BNXT_ULP_BYTE_ORDER_BE = 0, + BNXT_ULP_BYTE_ORDER_LE = 1, + BNXT_ULP_BYTE_ORDER_LAST = 2 +}; + +enum bnxt_ulp_cache_tbl_id { + BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_INGRESS = 0, + BNXT_ULP_CACHE_TBL_ID_L2_CNTXT_TCAM_EGRESS = 1, + BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_INGRESS = 2, + BNXT_ULP_CACHE_TBL_ID_PROFILE_TCAM_EGRESS = 3, + BNXT_ULP_CACHE_TBL_ID_LAST = 4 +}; + +enum bnxt_ulp_chf_idx { + BNXT_ULP_CHF_IDX_MPLS_TAG_NUM = 0, + BNXT_ULP_CHF_IDX_O_VTAG_NUM = 1, + BNXT_ULP_CHF_IDX_O_VTAG_PRESENT = 2, + BNXT_ULP_CHF_IDX_O_TWO_VTAGS = 3, + BNXT_ULP_CHF_IDX_I_VTAG_NUM = 4, + BNXT_ULP_CHF_IDX_I_VTAG_PRESENT = 5, + BNXT_ULP_CHF_IDX_I_TWO_VTAGS = 6, + BNXT_ULP_CHF_IDX_INCOMING_IF = 7, + BNXT_ULP_CHF_IDX_DIRECTION = 8, + BNXT_ULP_CHF_IDX_SVIF = 9, + BNXT_ULP_CHF_IDX_O_L3 = 10, + BNXT_ULP_CHF_IDX_I_L3 = 11, + BNXT_ULP_CHF_IDX_O_L4 = 12, + BNXT_ULP_CHF_IDX_I_L4 = 13, + BNXT_ULP_CHF_IDX_LAST = 14 +}; + +enum bnxt_ulp_def_regfile_index { + BNXT_ULP_DEF_REGFILE_INDEX_DEF_PROF_FUNC_ID = 0, + BNXT_ULP_DEF_REGFILE_INDEX_LAST = 1 +}; + +enum bnxt_ulp_device_id { + BNXT_ULP_DEVICE_ID_WH_PLUS = 0, + BNXT_ULP_DEVICE_ID_THOR = 1, + BNXT_ULP_DEVICE_ID_STINGRAY = 2, + BNXT_ULP_DEVICE_ID_STINGRAY2 = 3, + BNXT_ULP_DEVICE_ID_LAST = 4 +}; + +enum bnxt_ulp_direction { + BNXT_ULP_DIRECTION_INGRESS = 0, + BNXT_ULP_DIRECTION_EGRESS = 1, + BNXT_ULP_DIRECTION_LAST = 2 +}; + +enum bnxt_ulp_hdr_type { + BNXT_ULP_HDR_TYPE_NOT_SUPPORTED = 0, + BNXT_ULP_HDR_TYPE_SUPPORTED = 1, + BNXT_ULP_HDR_TYPE_END = 2, + BNXT_ULP_HDR_TYPE_LAST = 3 +}; + +enum bnxt_ulp_mark_enable { + BNXT_ULP_MARK_ENABLE_NO = 0, + BNXT_ULP_MARK_ENABLE_YES = 1, + BNXT_ULP_MARK_ENABLE_LAST = 2 +}; + +enum bnxt_ulp_mask_opc { + BNXT_ULP_MASK_OPC_SET_TO_CONSTANT = 0, + BNXT_ULP_MASK_OPC_SET_TO_HDR_FIELD = 1, + BNXT_ULP_MASK_OPC_SET_TO_REGFILE = 2, + BNXT_ULP_MASK_OPC_SET_TO_DEF_REGFILE = 3, + BNXT_ULP_MASK_OPC_ADD_PAD = 4, + BNXT_ULP_MASK_OPC_LAST = 5 +}; + +enum bnxt_ulp_match_type { + BNXT_ULP_MATCH_TYPE_EM = 0, + BNXT_ULP_MATCH_TYPE_WC = 1, + BNXT_ULP_MATCH_TYPE_LAST = 2 +}; + +enum bnxt_ulp_priority { + BNXT_ULP_PRIORITY_LEVEL_0 = 0, + BNXT_ULP_PRIORITY_LEVEL_1 = 1, + BNXT_ULP_PRIORITY_LEVEL_2 = 2, + BNXT_ULP_PRIORITY_LEVEL_3 = 3, + BNXT_ULP_PRIORITY_LEVEL_4 = 4, + BNXT_ULP_PRIORITY_LEVEL_5 = 5, + BNXT_ULP_PRIORITY_LEVEL_6 = 6, + BNXT_ULP_PRIORITY_LEVEL_7 = 7, + BNXT_ULP_PRIORITY_NOT_USED = 8, + BNXT_ULP_PRIORITY_LAST = 9 +}; + +enum bnxt_ulp_regfile_index { + BNXT_ULP_REGFILE_INDEX_CLASS_TID = 0, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0 = 1, + BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_1 = 2, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_0 = 3, + BNXT_ULP_REGFILE_INDEX_PROF_FUNC_ID_1 = 4, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_0 = 5, + BNXT_ULP_REGFILE_INDEX_EM_PROFILE_ID_1 = 6, + BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_0 = 7, + BNXT_ULP_REGFILE_INDEX_WC_PROFILE_ID_1 = 8, + BNXT_ULP_REGFILE_INDEX_ACTION_PTR_MAIN = 9, + BNXT_ULP_REGFILE_INDEX_ACTION_PTR_0 = 10, + BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_0 = 11, + BNXT_ULP_REGFILE_INDEX_ENCAP_PTR_1 = 12, + BNXT_ULP_REGFILE_INDEX_CRITICAL_RESOURCE = 13, + BNXT_ULP_REGFILE_INDEX_CACHE_ENTRY_PTR = 14, + BNXT_ULP_REGFILE_INDEX_NOT_USED = 15, + BNXT_ULP_REGFILE_INDEX_LAST = 16 +}; + +enum bnxt_ulp_resource_func { + BNXT_ULP_RESOURCE_FUNC_INVALID = 0, + BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE = 1, + BNXT_ULP_RESOURCE_FUNC_EM_TABLE = 2, + BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE = 3, + BNXT_ULP_RESOURCE_FUNC_CACHE_TABLE = 4, + BNXT_ULP_RESOURCE_FUNC_IDENTIFIER = 5, + BNXT_ULP_RESOURCE_FUNC_HW_FID = 6, + BNXT_ULP_RESOURCE_FUNC_LAST = 7 +}; + +enum bnxt_ulp_result_opc { + BNXT_ULP_RESULT_OPC_SET_TO_CONSTANT = 0, + BNXT_ULP_RESULT_OPC_SET_TO_ACT_PROP = 1, + BNXT_ULP_RESULT_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 2, + BNXT_ULP_RESULT_OPC_SET_TO_REGFILE = 3, + BNXT_ULP_RESULT_OPC_SET_TO_DEF_REGFILE = 4, + BNXT_ULP_RESULT_OPC_LAST = 5 +}; + +enum bnxt_ulp_search_before_alloc { + BNXT_ULP_SEARCH_BEFORE_ALLOC_NO = 0, + BNXT_ULP_SEARCH_BEFORE_ALLOC_YES = 1, + BNXT_ULP_SEARCH_BEFORE_ALLOC_LAST = 2 +}; + +enum bnxt_ulp_spec_opc { + BNXT_ULP_SPEC_OPC_SET_TO_CONSTANT = 0, + BNXT_ULP_SPEC_OPC_SET_TO_HDR_FIELD = 1, + BNXT_ULP_SPEC_OPC_SET_TO_REGFILE = 2, + BNXT_ULP_SPEC_OPC_SET_TO_DEF_REGFILE = 3, + BNXT_ULP_SPEC_OPC_ADD_PAD = 4, + BNXT_ULP_SPEC_OPC_LAST = 5 +}; + +enum bnxt_ulp_encap_vtag_encoding { + BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_ECAP_PRI = 4, + BNXT_ULP_ENCAP_VTAG_ENCODING_DTAG_REMAP_DIFFSERV = 5, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_ECAP_PRI = 6, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_DIFFSERV = 7, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_0 = 8, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_1 = 9, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_2 = 10, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_3 = 11, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_4 = 12, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_5 = 13, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_6 = 14, + BNXT_ULP_ENCAP_VTAG_ENCODING_NO_TAG_REMAP_PRI_7 = 15, + BNXT_ULP_ENCAP_VTAG_ENCODING_NOP = 0, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_ECAP_PRI = 1, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_IVLAN_PRI = 2, + BNXT_ULP_ENCAP_VTAG_ENCODING_STAG_REMAP_DIFFSERV = 3 +}; + +enum bnxt_ulp_fdb_resource_flags { + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_EGR = 0x01, + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_INGR = 0x00 +}; + +enum bnxt_ulp_fdb_type { + BNXT_ULP_FDB_TYPE_DEFAULT = 1, + BNXT_ULP_FDB_TYPE_REGULAR = 0 +}; + +enum bnxt_ulp_flow_dir_bitmask { + BNXT_ULP_FLOW_DIR_BITMASK_EGR = 0x8000000000000000, + BNXT_ULP_FLOW_DIR_BITMASK_ING = 0x0000000000000000 +}; + +enum bnxt_ulp_match_type_bitmask { + BNXT_ULP_MATCH_TYPE_BITMASK_EM = 0x0000000000000000, + BNXT_ULP_MATCH_TYPE_BITMASK_WM = 0x0000000000000001 +}; + +enum bnxt_ulp_sym { + BNXT_ULP_SYM_BIG_ENDIAN = 0, + BNXT_ULP_SYM_DECAP_FUNC_NONE = 0, + BNXT_ULP_SYM_DECAP_FUNC_THRU_L2 = 11, + BNXT_ULP_SYM_DECAP_FUNC_THRU_L3 = 12, + BNXT_ULP_SYM_DECAP_FUNC_THRU_L4 = 13, + BNXT_ULP_SYM_DECAP_FUNC_THRU_TL2 = 3, + BNXT_ULP_SYM_DECAP_FUNC_THRU_TL3 = 8, + BNXT_ULP_SYM_DECAP_FUNC_THRU_TL4 = 9, + BNXT_ULP_SYM_DECAP_FUNC_THRU_TUN = 10, + BNXT_ULP_SYM_ECV_CUSTOM_EN_NO = 0, + BNXT_ULP_SYM_ECV_CUSTOM_EN_YES = 1, + BNXT_ULP_SYM_ECV_L2_EN_NO = 0, + BNXT_ULP_SYM_ECV_L2_EN_YES = 1, + BNXT_ULP_SYM_ECV_L3_TYPE_IPV4 = 4, + BNXT_ULP_SYM_ECV_L3_TYPE_IPV6 = 5, + BNXT_ULP_SYM_ECV_L3_TYPE_MPLS_8847 = 6, + BNXT_ULP_SYM_ECV_L3_TYPE_MPLS_8848 = 7, + BNXT_ULP_SYM_ECV_L3_TYPE_NONE = 0, + BNXT_ULP_SYM_ECV_L4_TYPE_NONE = 0, + BNXT_ULP_SYM_ECV_L4_TYPE_UDP = 4, + BNXT_ULP_SYM_ECV_L4_TYPE_UDP_CSUM = 5, + BNXT_ULP_SYM_ECV_L4_TYPE_UDP_ENTROPY = 6, + BNXT_ULP_SYM_ECV_L4_TYPE_UDP_ENTROPY_CSUM = 7, + BNXT_ULP_SYM_ECV_TUN_TYPE_GENERIC = 1, + BNXT_ULP_SYM_ECV_TUN_TYPE_GRE = 5, + BNXT_ULP_SYM_ECV_TUN_TYPE_NGE = 3, + BNXT_ULP_SYM_ECV_TUN_TYPE_NONE = 0, + BNXT_ULP_SYM_ECV_TUN_TYPE_NVGRE = 4, + BNXT_ULP_SYM_ECV_TUN_TYPE_VXLAN = 2, + BNXT_ULP_SYM_ECV_VALID_NO = 0, + BNXT_ULP_SYM_ECV_VALID_YES = 1, + BNXT_ULP_SYM_IP_PROTO_UDP = 17, + BNXT_ULP_SYM_L2_HDR_TYPE_DIX = 0, + BNXT_ULP_SYM_L2_HDR_TYPE_LLC = 2, + BNXT_ULP_SYM_L2_HDR_TYPE_LLC_SNAP = 1, + BNXT_ULP_SYM_L3_HDR_TYPE_ARP = 2, + BNXT_ULP_SYM_L3_HDR_TYPE_EAPOL = 4, + BNXT_ULP_SYM_L3_HDR_TYPE_FCOE = 6, + BNXT_ULP_SYM_L3_HDR_TYPE_IPV4 = 0, + BNXT_ULP_SYM_L3_HDR_TYPE_IPV6 = 1, + BNXT_ULP_SYM_L3_HDR_TYPE_PTP = 3, + BNXT_ULP_SYM_L3_HDR_TYPE_ROCE = 5, + BNXT_ULP_SYM_L3_HDR_TYPE_UPAR1 = 7, + BNXT_ULP_SYM_L3_HDR_TYPE_UPAR2 = 8, + BNXT_ULP_SYM_L4_HDR_TYPE_BTH_V1 = 5, + BNXT_ULP_SYM_L4_HDR_TYPE_ICMP = 2, + BNXT_ULP_SYM_L4_HDR_TYPE_TCP = 0, + BNXT_ULP_SYM_L4_HDR_TYPE_UDP = 1, + BNXT_ULP_SYM_L4_HDR_TYPE_UPAR1 = 3, + BNXT_ULP_SYM_L4_HDR_TYPE_UPAR2 = 4, + BNXT_ULP_SYM_LITTLE_ENDIAN = 1, + BNXT_ULP_SYM_MATCH_TYPE_EM = 0, + BNXT_ULP_SYM_MATCH_TYPE_WM = 1, + BNXT_ULP_SYM_NO = 0, + BNXT_ULP_SYM_PKT_TYPE_L2 = 0, + BNXT_ULP_SYM_POP_VLAN_NO = 0, + BNXT_ULP_SYM_POP_VLAN_YES = 1, + BNXT_ULP_SYM_STINGRAY2_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_STINGRAY_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_THOR_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_TL2_HDR_TYPE_DIX = 0, + BNXT_ULP_SYM_TL3_HDR_TYPE_IPV4 = 0, + BNXT_ULP_SYM_TL3_HDR_TYPE_IPV6 = 1, + BNXT_ULP_SYM_TL4_HDR_TYPE_TCP = 0, + BNXT_ULP_SYM_TL4_HDR_TYPE_UDP = 1, + BNXT_ULP_SYM_TUN_HDR_TYPE_GENEVE = 1, + BNXT_ULP_SYM_TUN_HDR_TYPE_GRE = 3, + BNXT_ULP_SYM_TUN_HDR_TYPE_IPV4 = 4, + BNXT_ULP_SYM_TUN_HDR_TYPE_IPV6 = 5, + BNXT_ULP_SYM_TUN_HDR_TYPE_MPLS = 7, + BNXT_ULP_SYM_TUN_HDR_TYPE_NONE = 15, + BNXT_ULP_SYM_TUN_HDR_TYPE_NVGRE = 2, + BNXT_ULP_SYM_TUN_HDR_TYPE_PPPOE = 6, + BNXT_ULP_SYM_TUN_HDR_TYPE_UPAR1 = 8, + BNXT_ULP_SYM_TUN_HDR_TYPE_UPAR2 = 9, + BNXT_ULP_SYM_TUN_HDR_TYPE_VXLAN = 0, + BNXT_ULP_SYM_WH_PLUS_LOOPBACK_PORT = 3, + BNXT_ULP_SYM_YES = 1 +}; + +enum bnxt_ulp_act_prop_sz { + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_TYPE = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE = 4, + BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_PORT_ID = 4, + BNXT_ULP_ACT_PROP_SZ_VNIC = 4, + BNXT_ULP_ACT_PROP_SZ_VPORT = 4, + BNXT_ULP_ACT_PROP_SZ_MARK = 4, + BNXT_ULP_ACT_PROP_SZ_COUNT = 4, + BNXT_ULP_ACT_PROP_SZ_METER = 4, + BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC = 8, + BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST = 8, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_VLAN = 4, + BNXT_ULP_ACT_PROP_SZ_OF_SET_VLAN_PCP = 4, + BNXT_ULP_ACT_PROP_SZ_OF_SET_VLAN_VID = 4, + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC = 4, + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST = 4, + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC = 16, + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST = 16, + BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC = 4, + BNXT_ULP_ACT_PROP_SZ_SET_TP_DST = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_0 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_1 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_2 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_3 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_4 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_5 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_6 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_7 = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC = 6, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC = 6, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG = 8, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP = 32, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC = 16, + BNXT_ULP_ACT_PROP_SZ_ENCAP_UDP = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN = 32, + BNXT_ULP_ACT_PROP_SZ_LAST = 4 +}; + +enum bnxt_ulp_act_prop_idx { + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ = 0, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ = 4, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ = 8, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE = 12, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM = 16, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE = 20, + BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM = 24, + BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM = 28, + BNXT_ULP_ACT_PROP_IDX_PORT_ID = 32, + BNXT_ULP_ACT_PROP_IDX_VNIC = 36, + BNXT_ULP_ACT_PROP_IDX_VPORT = 40, + BNXT_ULP_ACT_PROP_IDX_MARK = 44, + BNXT_ULP_ACT_PROP_IDX_COUNT = 48, + BNXT_ULP_ACT_PROP_IDX_METER = 52, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC = 56, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST = 64, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_VLAN = 72, + BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_PCP = 76, + BNXT_ULP_ACT_PROP_IDX_OF_SET_VLAN_VID = 80, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC = 84, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST = 88, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC = 92, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST = 108, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC = 124, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST = 128, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0 = 132, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1 = 136, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2 = 140, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3 = 144, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4 = 148, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5 = 152, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6 = 156, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7 = 160, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC = 164, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC = 170, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG = 176, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP = 184, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC = 216, + BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP = 232, + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN = 236, + BNXT_ULP_ACT_PROP_IDX_LAST = 268 +}; +enum bnxt_ulp_class_hid { + BNXT_ULP_CLASS_HID_0092 = 0x0092 +}; + +enum bnxt_ulp_act_hid { + BNXT_ULP_ACT_HID_0029 = 0x0029 +}; + +#endif /* _ULP_TEMPLATE_DB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h new file mode 100644 index 000000000..587de8a83 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_field_db.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2020 Broadcom + * All rights reserved. + */ + +#ifndef ULP_HDR_FIELD_ENUMS_H_ +#define ULP_HDR_FIELD_ENUMS_H_ + +enum bnxt_ulp_hf0 { + BNXT_ULP_HF0_IDX_SVIF_INDEX = 0, + BNXT_ULP_HF0_IDX_O_ETH_DMAC = 1, + BNXT_ULP_HF0_IDX_O_ETH_SMAC = 2, + BNXT_ULP_HF0_IDX_O_ETH_TYPE = 3, + BNXT_ULP_HF0_IDX_OO_VLAN_CFI_PRI = 4, + BNXT_ULP_HF0_IDX_OO_VLAN_VID = 5, + BNXT_ULP_HF0_IDX_OO_VLAN_TYPE = 6, + BNXT_ULP_HF0_IDX_OI_VLAN_CFI_PRI = 7, + BNXT_ULP_HF0_IDX_OI_VLAN_VID = 8, + BNXT_ULP_HF0_IDX_OI_VLAN_TYPE = 9, + BNXT_ULP_HF0_IDX_O_IPV4_VER = 10, + BNXT_ULP_HF0_IDX_O_IPV4_TOS = 11, + BNXT_ULP_HF0_IDX_O_IPV4_LEN = 12, + BNXT_ULP_HF0_IDX_O_IPV4_FRAG_ID = 13, + BNXT_ULP_HF0_IDX_O_IPV4_FRAG_OFF = 14, + BNXT_ULP_HF0_IDX_O_IPV4_TTL = 15, + BNXT_ULP_HF0_IDX_O_IPV4_NEXT_PID = 16, + BNXT_ULP_HF0_IDX_O_IPV4_CSUM = 17, + BNXT_ULP_HF0_IDX_O_IPV4_SRC_ADDR = 18, + BNXT_ULP_HF0_IDX_O_IPV4_DST_ADDR = 19, + BNXT_ULP_HF0_IDX_O_UDP_SRC_PORT = 20, + BNXT_ULP_HF0_IDX_O_UDP_DST_PORT = 21, + BNXT_ULP_HF0_IDX_O_UDP_LENGTH = 22, + BNXT_ULP_HF0_IDX_O_UDP_CSUM = 23 +}; + +enum bnxt_ulp_hf_bitmask0 { + BNXT_ULP_HF0_BITMASK_SVIF_INDEX = 0x8000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_DMAC = 0x4000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_SMAC = 0x2000000000000000, + BNXT_ULP_HF0_BITMASK_O_ETH_TYPE = 0x1000000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_CFI_PRI = 0x0800000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_VID = 0x0400000000000000, + BNXT_ULP_HF0_BITMASK_OO_VLAN_TYPE = 0x0200000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_CFI_PRI = 0x0100000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_VID = 0x0080000000000000, + BNXT_ULP_HF0_BITMASK_OI_VLAN_TYPE = 0x0040000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_VER = 0x0020000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_TOS = 0x0010000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_LEN = 0x0008000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_FRAG_ID = 0x0004000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_FRAG_OFF = 0x0002000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_TTL = 0x0001000000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_NEXT_PID = 0x0000800000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_CSUM = 0x0000400000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_SRC_ADDR = 0x0000200000000000, + BNXT_ULP_HF0_BITMASK_O_IPV4_DST_ADDR = 0x0000100000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_SRC_PORT = 0x0000080000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_DST_PORT = 0x0000040000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_LENGTH = 0x0000020000000000, + BNXT_ULP_HF0_BITMASK_O_UDP_CSUM = 0x0000010000000000 +}; + +#endif /* _ULP_HDR_FIELD_ENUMS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_struct.h new file mode 100644 index 000000000..0e0d02ff4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_template_struct.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TEMPLATE_STRUCT_H_ +#define _ULP_TEMPLATE_STRUCT_H_ + +#include +#include "rte_ether.h" +#include "rte_icmp.h" +#include "rte_ip.h" +#include "rte_tcp.h" +#include "rte_udp.h" +#include "rte_esp.h" +#include "rte_sctp.h" +#include "rte_flow.h" +#include "tf_core.h" + +/* Number of fields for each protocol */ +#define BNXT_ULP_PROTO_HDR_SVIF_NUM 1 +#define BNXT_ULP_PROTO_HDR_ETH_NUM 3 +#define BNXT_ULP_PROTO_HDR_S_VLAN_NUM 3 +#define BNXT_ULP_PROTO_HDR_VLAN_NUM 6 +#define BNXT_ULP_PROTO_HDR_IPV4_NUM 10 +#define BNXT_ULP_PROTO_HDR_IPV6_NUM 6 +#define BNXT_ULP_PROTO_HDR_UDP_NUM 4 +#define BNXT_ULP_PROTO_HDR_TCP_NUM 9 +#define BNXT_ULP_PROTO_HDR_VXLAN_NUM 4 +#define BNXT_ULP_PROTO_HDR_MAX 128 +#define BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX 0 + +struct ulp_rte_hdr_bitmap { + uint64_t bits; +}; + +struct ulp_rte_field_bitmap { + uint64_t bits; +}; + +/* Structure to store the protocol fields */ +#define RTE_PARSER_FLOW_HDR_FIELD_SIZE 16 +struct ulp_rte_hdr_field { + uint8_t spec[RTE_PARSER_FLOW_HDR_FIELD_SIZE]; + uint8_t mask[RTE_PARSER_FLOW_HDR_FIELD_SIZE]; + uint32_t size; +}; + +struct ulp_rte_act_bitmap { + uint64_t bits; +}; + +/* Structure to hold the action property details. */ +struct ulp_rte_act_prop { + uint8_t act_details[BNXT_ULP_ACT_PROP_IDX_LAST]; +}; + +/* Structure to be used for passing all the parser functions */ +struct ulp_rte_parser_params { + struct ulp_rte_hdr_bitmap hdr_bitmap; + struct ulp_rte_field_bitmap fld_bitmap; + struct ulp_rte_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; + uint32_t comp_fld[BNXT_ULP_CHF_IDX_LAST]; + uint32_t field_idx; + uint32_t vlan_idx; + struct ulp_rte_act_bitmap act_bitmap; + struct ulp_rte_act_prop act_prop; + uint32_t dir; + struct bnxt_ulp_context *ulp_ctx; +}; + +/* Flow Parser Header Information Structure */ +struct bnxt_ulp_rte_hdr_info { + enum bnxt_ulp_hdr_type hdr_type; + /* Flow Parser Protocol Header Function Prototype */ + int (*proto_hdr_func)(const struct rte_flow_item *item_list, + struct ulp_rte_parser_params *params); +}; + +/* Flow Parser Header Information Structure Array defined in template source*/ +extern struct bnxt_ulp_rte_hdr_info ulp_hdr_info[]; + +/* Flow Parser Action Information Structure */ +struct bnxt_ulp_rte_act_info { + enum bnxt_ulp_act_type act_type; + /* Flow Parser Protocol Action Function Prototype */ + int32_t (*proto_act_func) + (const struct rte_flow_action *action_item, + struct ulp_rte_parser_params *params); +}; + +/* Flow Parser Action Information Structure Array defined in template source*/ +extern struct bnxt_ulp_rte_act_info ulp_act_info[]; + +/* Flow Matcher structures */ +struct bnxt_ulp_header_match_info { + struct ulp_rte_hdr_bitmap hdr_bitmap; + uint32_t start_idx; + uint32_t num_entries; + uint32_t class_tmpl_id; + uint32_t act_vnic; +}; + +struct ulp_rte_bitmap { + uint64_t bits; +}; + +struct bnxt_ulp_class_match_info { + struct ulp_rte_bitmap hdr_sig; + struct ulp_rte_bitmap field_sig; + uint32_t class_hid; + uint32_t class_tid; + uint8_t act_vnic; + uint8_t wc_pri; +}; + +/* Flow Matcher templates Structure for class entries */ +extern uint16_t ulp_class_sig_tbl[]; +extern struct bnxt_ulp_class_match_info ulp_class_match_list[]; + +/* Flow Matcher Action structures */ +struct bnxt_ulp_action_match_info { + struct ulp_rte_act_bitmap act_bitmap; + uint32_t act_tmpl_id; +}; + +struct bnxt_ulp_act_match_info { + struct ulp_rte_bitmap act_sig; + uint32_t act_hid; + uint32_t act_tid; +}; + +/* Flow Matcher templates Structure for action entries */ +extern uint16_t ulp_act_sig_tbl[]; +extern struct bnxt_ulp_act_match_info ulp_act_match_list[]; + +/* Device specific parameters */ +struct bnxt_ulp_device_params { + uint8_t description[16]; + uint32_t global_fid_enable; + enum bnxt_ulp_byte_order byte_order; + uint8_t encap_byte_swap; + uint32_t lfid_entries; + uint32_t lfid_entry_size; + uint64_t gfid_entries; + uint32_t gfid_entry_size; + uint64_t num_flows; + uint32_t num_resources_per_flow; +}; + +/* Flow Mapper */ +struct bnxt_ulp_mapper_tbl_list_info { + uint32_t device_name; + uint32_t start_tbl_idx; + uint32_t num_tbls; +}; + +struct bnxt_ulp_mapper_class_tbl_info { + enum bnxt_ulp_resource_func resource_func; + uint32_t table_type; + uint8_t direction; + uint8_t mem; + uint32_t priority; + uint8_t srch_b4_alloc; + uint32_t critical_resource; + + /* Information for accessing the ulp_key_field_list */ + uint32_t key_start_idx; + uint16_t key_bit_size; + uint16_t key_num_fields; + /* Size of the blob that holds the key */ + uint16_t blob_key_bit_size; + + /* Information for accessing the ulp_class_result_field_list */ + uint32_t result_start_idx; + uint16_t result_bit_size; + uint16_t result_num_fields; + + /* Information for accessing the ulp_ident_list */ + uint32_t ident_start_idx; + uint16_t ident_nums; + + uint8_t mark_enable; + enum bnxt_ulp_regfile_index regfile_wr_idx; + + enum bnxt_ulp_cache_tbl_id cache_tbl_id; +}; + +struct bnxt_ulp_mapper_act_tbl_info { + enum bnxt_ulp_resource_func resource_func; + enum tf_tbl_type table_type; + uint8_t direction; + uint8_t srch_b4_alloc; + uint32_t result_start_idx; + uint16_t result_bit_size; + uint16_t encap_num_fields; + uint16_t result_num_fields; + + enum bnxt_ulp_regfile_index regfile_wr_idx; +}; + +struct bnxt_ulp_mapper_class_key_field_info { + uint8_t description[64]; + enum bnxt_ulp_mask_opc mask_opcode; + enum bnxt_ulp_spec_opc spec_opcode; + uint16_t field_bit_size; + uint8_t mask_operand[16]; + uint8_t spec_operand[16]; +}; + +struct bnxt_ulp_mapper_result_field_info { + uint8_t description[64]; + enum bnxt_ulp_result_opc result_opcode; + uint16_t field_bit_size; + uint8_t result_operand[16]; +}; + +struct bnxt_ulp_mapper_ident_info { + uint8_t description[64]; + uint32_t resource_func; + + uint16_t ident_type; + uint16_t ident_bit_size; + uint16_t ident_bit_pos; + enum bnxt_ulp_regfile_index regfile_wr_idx; +}; + +struct bnxt_ulp_def_ident_info { + enum tf_dir direction; + enum tf_identifier_type ident_type; + enum bnxt_ulp_def_regfile_index def_regfile_index; +}; + +struct bnxt_ulp_cache_tbl_params { + uint16_t num_entries; +}; + +/* + * Flow Mapper Static Data Externs: + * Access to the below static data should be done through access functions and + * directly throughout the code. + */ + +/* + * The ulp_device_params is indexed by the dev_id. + * This table maintains the device specific parameters. + */ +extern struct bnxt_ulp_device_params ulp_device_params[]; + +/* + * The ulp_class_tmpl_list and ulp_act_tmpl_list are indexed by the dev_id + * and template id (either class or action) returned by the matcher. + * The result provides the start index and number of entries in the connected + * ulp_class_tbl_list/ulp_act_tbl_list. + */ +extern struct bnxt_ulp_mapper_tbl_list_info ulp_class_tmpl_list[]; +extern struct bnxt_ulp_mapper_tbl_list_info ulp_act_tmpl_list[]; + +/* + * The ulp_class_tbl_list and ulp_act_tbl_list are indexed based on the results + * of the template lists. Each entry describes the high level details of the + * table entry to include the start index and number of instructions in the + * field lists. + */ +extern struct bnxt_ulp_mapper_class_tbl_info ulp_class_tbl_list[]; +extern struct bnxt_ulp_mapper_act_tbl_info ulp_act_tbl_list[]; + +/* + * The ulp_class_result_field_list provides the instructions for creating result + * records such as tcam/em results. + */ +extern struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[]; + +/* + * The ulp_data_field_list provides the instructions for creating an action + * record. It uses the same structure as the result list, but is only used for + * actions. + */ +extern +struct bnxt_ulp_mapper_result_field_info ulp_act_result_field_list[]; + +/* + * The ulp_act_prop_map_table provides the mapping to index and size of action + * tcam and em tables. + */ +extern +struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[]; + +/* + * The ulp_ident_list provides the instructions for creating identifiers such + * as profile ids. + */ +extern struct bnxt_ulp_mapper_ident_info ulp_ident_list[]; + +/* + * The ulp_act_prop_map_table provides the mapping to index and size of action + * properties. + */ +extern uint32_t ulp_act_prop_map_table[]; + +/* + * The ulp_def_ident_tbl provides the list of default identifiers that need to + * be initialized and where to store them. + */ +extern struct bnxt_ulp_def_ident_info ulp_def_ident_tbl[]; + +/* + * The ulp_cache_tbl_parms table provides the sizes of the cache tables the + * mapper must dynamically allocate during initialization. + */ +extern struct bnxt_ulp_cache_tbl_params ulp_cache_tbl_params[]; + +#endif /* _ULP_TEMPLATE_STRUCT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.c b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.c new file mode 100644 index 000000000..0150c1d49 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.c @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#include "ulp_utils.h" +#include "bnxt_tf_common.h" + +/* + * Initialize the regfile structure for writing + * + * regfile [in] Ptr to a regfile instance + * + * returns 0 on error or 1 on success + */ +uint32_t +ulp_regfile_init(struct ulp_regfile *regfile) +{ + /* validate the arguments */ + if (!regfile) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + memset(regfile, 0, sizeof(struct ulp_regfile)); + return 1; /* Success */ +} + +/* + * Read a value from the regfile + * + * regfile [in] The regfile instance. Must be initialized prior to being used + * + * field [in] The field to be read within the regfile. + * + * data [in/out] + * + * returns size, zero on failure + */ +uint32_t +ulp_regfile_read(struct ulp_regfile *regfile, + enum bnxt_ulp_regfile_index field, + uint64_t *data) +{ + /* validate the arguments */ + if (!regfile || field >= BNXT_ULP_REGFILE_INDEX_LAST) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + + *data = regfile->entry[field].data; + return sizeof(*data); +} + +/* + * Write a value to the regfile + * + * regfile [in] The regfile instance. Must be initialized prior to being used + * + * field [in] The field to be written within the regfile. + * + * data [in] The value is written into this variable. It is going to be in the + * same byte order as it was written. + * + * size [in] The size in bytes of the value beingritten into this + * variable. + * + * returns 0 on fail + */ +uint32_t +ulp_regfile_write(struct ulp_regfile *regfile, + enum bnxt_ulp_regfile_index field, + uint64_t data) +{ + /* validate the arguments */ + if (!regfile || field >= BNXT_ULP_REGFILE_INDEX_LAST) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + + regfile->entry[field].data = data; + return sizeof(data); /* Success */ +} + +static void +ulp_bs_put_msb(uint8_t *bs, uint16_t bitpos, uint8_t bitlen, uint8_t val) +{ + uint8_t bitoffs = bitpos % 8; + uint16_t index = bitpos / 8; + uint8_t mask; + uint8_t tmp; + int8_t shift; + + tmp = bs[index]; + mask = ((uint8_t)-1 >> (8 - bitlen)); + shift = 8 - bitoffs - bitlen; + val &= mask; + + if (shift >= 0) { + tmp &= ~(mask << shift); + tmp |= val << shift; + bs[index] = tmp; + } else { + tmp &= ~((uint8_t)-1 >> bitoffs); + tmp |= val >> -shift; + bs[index++] = tmp; + + tmp = bs[index]; + tmp &= ((uint8_t)-1 >> (bitlen - (8 - bitoffs))); + tmp |= val << (8 + shift); + bs[index] = tmp; + } +} + +static void +ulp_bs_put_lsb(uint8_t *bs, uint16_t bitpos, uint8_t bitlen, uint8_t val) +{ + uint8_t bitoffs = bitpos % 8; + uint16_t index = bitpos / 8; + uint8_t mask; + uint8_t tmp; + uint8_t shift; + uint8_t partial; + + tmp = bs[index]; + shift = bitoffs; + + if (bitoffs + bitlen <= 8) { + mask = ((1 << bitlen) - 1) << shift; + tmp &= ~mask; + tmp |= ((val << shift) & mask); + bs[index] = tmp; + } else { + partial = 8 - bitoffs; + mask = ((1 << partial) - 1) << shift; + tmp &= ~mask; + tmp |= ((val << shift) & mask); + bs[index++] = tmp; + + val >>= partial; + partial = bitlen - partial; + mask = ((1 << partial) - 1); + tmp = bs[index]; + tmp &= ~mask; + tmp |= (val & mask); + bs[index] = tmp; + } +} + +/* Assuming that val is in Big-Endian Format */ +static uint32_t +ulp_bs_push_lsb(uint8_t *bs, uint16_t pos, uint8_t len, uint8_t *val) +{ + int i; + int cnt = (len) / 8; + int tlen = len; + + if (cnt > 0 && !(len % 8)) + cnt -= 1; + + for (i = 0; i < cnt; i++) { + ulp_bs_put_lsb(bs, pos, 8, val[cnt - i]); + pos += 8; + tlen -= 8; + } + + /* Handle the remainder bits */ + if (tlen) + ulp_bs_put_lsb(bs, pos, tlen, val[0]); + return len; +} + +/* Assuming that val is in Big-Endian Format */ +static uint32_t +ulp_bs_push_msb(uint8_t *bs, uint16_t pos, uint8_t len, uint8_t *val) +{ + int i; + int cnt = (len + 7) / 8; + int tlen = len; + + /* Handle any remainder bits */ + int tmp = len % 8; + + if (!tmp) + tmp = 8; + + ulp_bs_put_msb(bs, pos, tmp, val[0]); + + pos += tmp; + tlen -= tmp; + + for (i = 1; i < cnt; i++) { + ulp_bs_put_msb(bs, pos, 8, val[i]); + pos += 8; + tlen -= 8; + } + + return len; +} + +/* + * Initializes the blob structure for creating binary blob + * + * blob [in] The blob to be initialized + * + * bitlen [in] The bit length of the blob + * + * order [in] The byte order for the blob. Currently only supporting + * big endian. All fields are packed with this order. + * + * returns 0 on error or 1 on success + */ +uint32_t +ulp_blob_init(struct ulp_blob *blob, + uint16_t bitlen, + enum bnxt_ulp_byte_order order) +{ + /* validate the arguments */ + if (!blob || bitlen > (8 * sizeof(blob->data))) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + blob->bitlen = bitlen; + blob->byte_order = order; + blob->write_idx = 0; + memset(blob->data, 0, sizeof(blob->data)); + return 1; /* Success */ +} + +/* + * Add data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] A pointer to bytes to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error. + */ +#define ULP_BLOB_BYTE 8 +#define ULP_BLOB_BYTE_HEX 0xFF +#define BLOB_MASK_CAL(x) ((0xFF << (x)) & 0xFF) +uint32_t +ulp_blob_push(struct ulp_blob *blob, + uint8_t *data, + uint32_t datalen) +{ + uint32_t rc; + + /* validate the arguments */ + if (!blob || datalen > (uint32_t)(blob->bitlen - blob->write_idx)) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + + if (blob->byte_order == BNXT_ULP_BYTE_ORDER_BE) + rc = ulp_bs_push_msb(blob->data, + blob->write_idx, + datalen, + data); + else + rc = ulp_bs_push_lsb(blob->data, + blob->write_idx, + datalen, + data); + if (!rc) { + BNXT_TF_DBG(ERR, "Failed ro write blob\n"); + return 0; + } + blob->write_idx += datalen; + return datalen; +} + +/* + * Add data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] 64-bit value to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +uint8_t * +ulp_blob_push_64(struct ulp_blob *blob, + uint64_t *data, + uint32_t datalen) +{ + uint8_t *val = (uint8_t *)data; + int rc; + + int size = (datalen + 7) / 8; + + if (!blob || !data || + datalen > (uint32_t)(blob->bitlen - blob->write_idx)) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; + } + + rc = ulp_blob_push(blob, &val[8 - size], datalen); + if (!rc) + return 0; + + return &val[8 - size]; +} + +/* + * Add encap data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] value to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +uint32_t +ulp_blob_push_encap(struct ulp_blob *blob, + uint8_t *data, + uint32_t datalen) +{ + uint8_t *val = (uint8_t *)data; + uint32_t initial_size, write_size = datalen; + uint32_t size = 0; + + if (!blob || !data || + datalen > (uint32_t)(blob->bitlen - blob->write_idx)) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; + } + + initial_size = ULP_BYTE_2_BITS(sizeof(uint64_t)) - + (blob->write_idx % ULP_BYTE_2_BITS(sizeof(uint64_t))); + while (write_size > 0) { + if (initial_size && write_size > initial_size) { + size = initial_size; + initial_size = 0; + } else if (initial_size && write_size <= initial_size) { + size = write_size; + initial_size = 0; + } else if (write_size > ULP_BYTE_2_BITS(sizeof(uint64_t))) { + size = ULP_BYTE_2_BITS(sizeof(uint64_t)); + } else { + size = write_size; + } + if (!ulp_blob_push(blob, val, size)) { + BNXT_TF_DBG(ERR, "push field failed\n"); + return 0; + } + val += ULP_BITS_2_BYTE(size); + write_size -= size; + } + return datalen; +} + +/* + * Adds pad to an initialized blob at the current offset + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * datalen [in] The number of bits of pad to add + * + * returns the number of pad bits added, zero on failure + */ +uint32_t +ulp_blob_pad_push(struct ulp_blob *blob, + uint32_t datalen) +{ + if (datalen > (uint32_t)(blob->bitlen - blob->write_idx)) { + BNXT_TF_DBG(ERR, "Pad too large for blob\n"); + return 0; + } + + blob->write_idx += datalen; + return datalen; +} + +/* + * Get the data portion of the binary blob. + * + * blob [in] The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * datalen [out] The number of bits to that are filled. + * + * returns a byte array of the blob data. Returns NULL on error. + */ +uint8_t * +ulp_blob_data_get(struct ulp_blob *blob, + uint16_t *datalen) +{ + /* validate the arguments */ + if (!blob) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return NULL; /* failure */ + } + *datalen = blob->write_idx; + return blob->data; +} + +/* + * Set the encap swap start index of the binary blob. + * + * blob [in] The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * returns void. + */ +void +ulp_blob_encap_swap_idx_set(struct ulp_blob *blob) +{ + /* validate the arguments */ + if (!blob) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return; /* failure */ + } + blob->encap_swap_idx = blob->write_idx; +} + +/* + * Perform the encap buffer swap to 64 bit reversal. + * + * blob [in] The blob's data to be used for swap. + * + * returns void. + */ +void +ulp_blob_perform_encap_swap(struct ulp_blob *blob) +{ + uint32_t i, idx = 0, end_idx = 0; + uint8_t temp_val_1, temp_val_2; + + /* validate the arguments */ + if (!blob) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return; /* failure */ + } + idx = ULP_BITS_2_BYTE_NR(blob->encap_swap_idx + 1); + end_idx = ULP_BITS_2_BYTE(blob->write_idx); + + while (idx <= end_idx) { + for (i = 0; i < 4; i = i + 2) { + temp_val_1 = blob->data[idx + i]; + temp_val_2 = blob->data[idx + i + 1]; + blob->data[idx + i] = blob->data[idx + 6 - i]; + blob->data[idx + i + 1] = blob->data[idx + 7 - i]; + blob->data[idx + 7 - i] = temp_val_2; + blob->data[idx + 6 - i] = temp_val_1; + } + idx += 8; + } +} + +/* + * Read data from the operand + * + * operand [in] A pointer to a 16 Byte operand + * + * val [in/out] The variable to copy the operand to + * + * bytes [in] The number of bytes to read into val + * + * returns number of bits read, zero on error + */ +uint16_t +ulp_operand_read(uint8_t *operand, + uint8_t *val, + uint16_t bytes) +{ + /* validate the arguments */ + if (!operand || !val) { + BNXT_TF_DBG(ERR, "invalid argument\n"); + return 0; /* failure */ + } + memcpy(val, operand, bytes); + return bytes; +} + +/* + * copy the buffer in the encap format which is 2 bytes. + * The MSB of the src is placed at the LSB of dst. + * + * dst [out] The destination buffer + * src [in] The source buffer dst + * size[in] size of the buffer. + */ +void +ulp_encap_buffer_copy(uint8_t *dst, + const uint8_t *src, + uint16_t size) +{ + uint16_t idx = 0; + + /* copy 2 bytes at a time. Write MSB to LSB */ + while ((idx + sizeof(uint16_t)) <= size) { + memcpy(&dst[idx], &src[size - idx - sizeof(uint16_t)], + sizeof(uint16_t)); + idx += sizeof(uint16_t); + } +} + +/* + * Check the buffer is empty + * + * buf [in] The buffer + * size [in] The size of the buffer + * + */ +int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size) +{ + return buf[0] == 0 && !memcmp(buf, buf + 1, size - 1); +} + +/* Function to check if bitmap is zero.Return 1 on success */ +uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return 0; + bitmap++; + } + return 1; +} + +/* Function to check if bitmap is ones. Return 1 on success */ +uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0xFF) + return 0; + bitmap++; + } + return 1; +} + +/* Function to check if bitmap is not zero. Return 1 on success */ +uint32_t ulp_bitmap_notzero(uint8_t *bitmap, int32_t size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return 1; + bitmap++; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.h b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.h new file mode 100644 index 000000000..5db393398 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bnxt/tf_ulp/ulp_utils.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_UTILS_H_ +#define _ULP_UTILS_H_ + +#include "bnxt.h" +#include "ulp_template_db.h" + +/* + * Macros for bitmap sets and gets + * These macros can be used if the val are power of 2. + */ +#define ULP_BITMAP_SET(bitmap, val) ((bitmap) |= (val)) +#define ULP_BITMAP_RESET(bitmap, val) ((bitmap) &= ~(val)) +#define ULP_BITMAP_ISSET(bitmap, val) ((bitmap) & (val)) +#define ULP_BITMAP_CMP(b1, b2) memcmp(&(b1)->bits, \ + &(b2)->bits, sizeof((b1)->bits)) +/* + * Macros for bitmap sets and gets + * These macros can be used if the val are not power of 2 and + * are simple index values. + */ +#define ULP_INDEX_BITMAP_SIZE (sizeof(uint64_t) * 8) +#define ULP_INDEX_BITMAP_CSET(i) (1UL << \ + ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))) + +#define ULP_INDEX_BITMAP_SET(b, i) ((b) |= \ + (1UL << ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE)))) + +#define ULP_INDEX_BITMAP_RESET(b, i) ((b) &= \ + (~(1UL << ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))))) + +#define ULP_INDEX_BITMAP_GET(b, i) (((b) >> \ + ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))) & 1) + +#define ULP_DEVICE_PARAMS_INDEX(tid, dev_id) \ + (((tid) << BNXT_ULP_LOG2_MAX_NUM_DEV) | (dev_id)) + +/* Macro to convert bytes to bits */ +#define ULP_BYTE_2_BITS(byte_x) ((byte_x) * 8) +/* Macro to convert bits to bytes */ +#define ULP_BITS_2_BYTE(bits_x) (((bits_x) + 7) / 8) +/* Macro to convert bits to bytes with no round off*/ +#define ULP_BITS_2_BYTE_NR(bits_x) ((bits_x) / 8) + +/* Macros to read the computed fields */ +#define ULP_UTIL_CHF_IDX_RD(params, idx) \ + rte_be_to_cpu_32((params)->comp_fld[(idx)]) + +#define ULP_UTIL_CHF_IDX_WR(params, idx, val) \ + ((params)->comp_fld[(idx)] = rte_cpu_to_be_32((val))) +/* + * Making the blob statically sized to 128 bytes for now. + * The blob must be initialized with ulp_blob_init prior to using. + */ +#define BNXT_ULP_FLMP_BLOB_SIZE (128) +#define BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS ULP_BYTE_2_BITS(BNXT_ULP_FLMP_BLOB_SIZE) +struct ulp_blob { + enum bnxt_ulp_byte_order byte_order; + uint16_t write_idx; + uint16_t bitlen; + uint8_t data[BNXT_ULP_FLMP_BLOB_SIZE]; + uint16_t encap_swap_idx; +}; + +/* + * The data can likely be only 32 bits for now. Just size check + * the data when being written. + */ +#define ULP_REGFILE_ENTRY_SIZE (sizeof(uint32_t)) +struct ulp_regfile_entry { + uint64_t data; + uint32_t size; +}; + +struct ulp_regfile { + struct ulp_regfile_entry entry[BNXT_ULP_REGFILE_INDEX_LAST]; +}; + +/* + * Initialize the regfile structure for writing + * + * regfile [in] Ptr to a regfile instance + * + * returns 0 on error or 1 on success + */ +uint32_t +ulp_regfile_init(struct ulp_regfile *regfile); + +/* + * Read a value from the regfile + * + * regfile [in] The regfile instance. Must be initialized prior to being used + * + * field [in] The field to be read within the regfile. + * + * returns the byte array + */ +uint32_t +ulp_regfile_read(struct ulp_regfile *regfile, + enum bnxt_ulp_regfile_index field, + uint64_t *data); + +/* + * Write a value to the regfile + * + * regfile [in] The regfile instance. Must be initialized prior to being used + * + * field [in] The field to be written within the regfile. + * + * data [in] The value is written into this variable. It is going to be in the + * same byte order as it was written. + * + * returns zero on error + */ +uint32_t +ulp_regfile_write(struct ulp_regfile *regfile, + enum bnxt_ulp_regfile_index field, + uint64_t data); + +/* + * Initializes the blob structure for creating binary blob + * + * blob [in] The blob to be initialized + * + * bitlen [in] The bit length of the blob + * + * order [in] The byte order for the blob. Currently only supporting + * big endian. All fields are packed with this order. + * + * returns 0 on error or 1 on success + */ +uint32_t +ulp_blob_init(struct ulp_blob *blob, + uint16_t bitlen, + enum bnxt_ulp_byte_order order); + +/* + * Add data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] A pointer to bytes to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error. + */ +uint32_t +ulp_blob_push(struct ulp_blob *blob, + uint8_t *data, + uint32_t datalen); + +/* + * Add data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] 64-bit value to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, ptr to pushed data otherwise + */ +uint8_t * +ulp_blob_push_64(struct ulp_blob *blob, + uint64_t *data, + uint32_t datalen); + +/* + * Add encap data to the binary blob at the current offset. + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * data [in] value to be added to the blob. + * + * datalen [in] The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +uint32_t +ulp_blob_push_encap(struct ulp_blob *blob, + uint8_t *data, + uint32_t datalen); + +/* + * Get the data portion of the binary blob. + * + * blob [in] The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * datalen [out] The number of bits to that are filled. + * + * returns a byte array of the blob data. Returns NULL on error. + */ +uint8_t * +ulp_blob_data_get(struct ulp_blob *blob, + uint16_t *datalen); + +/* + * Adds pad to an initialized blob at the current offset + * + * blob [in] The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * datalen [in] The number of bits of pad to add + * + * returns the number of pad bits added, zero on failure + */ +uint32_t +ulp_blob_pad_push(struct ulp_blob *blob, + uint32_t datalen); + +/* + * Set the 64 bit swap start index of the binary blob. + * + * blob [in] The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * returns void. + */ +void +ulp_blob_encap_swap_idx_set(struct ulp_blob *blob); + +/* + * Perform the encap buffer swap to 64 bit reversal. + * + * blob [in] The blob's data to be used for swap. + * + * returns void. + */ +void +ulp_blob_perform_encap_swap(struct ulp_blob *blob); + +/* + * Read data from the operand + * + * operand [in] A pointer to a 16 Byte operand + * + * val [in/out] The variable to copy the operand to + * + * bitlen [in] The number of bits to read into val + * + * returns number of bits read, zero on error + */ +uint16_t +ulp_operand_read(uint8_t *operand, + uint8_t *val, + uint16_t bitlen); + +/* + * copy the buffer in the encap format which is 2 bytes. + * The MSB of the src is placed at the LSB of dst. + * + * dst [out] The destination buffer + * src [in] The source buffer dst + * size[in] size of the buffer. + */ +void +ulp_encap_buffer_copy(uint8_t *dst, + const uint8_t *src, + uint16_t size); + +/* + * Check the buffer is empty + * + * buf [in] The buffer + * size [in] The size of the buffer + */ +int32_t ulp_buffer_is_empty(const uint8_t *buf, uint32_t size); + +/* Function to check if bitmap is zero.Return 1 on success */ +uint32_t ulp_bitmap_is_zero(uint8_t *bitmap, int32_t size); + +/* Function to check if bitmap is ones. Return 1 on success */ +uint32_t ulp_bitmap_is_ones(uint8_t *bitmap, int32_t size); + +/* Function to check if bitmap is not zero. Return 1 on success */ +uint32_t ulp_bitmap_notzero(uint8_t *bitmap, int32_t size); + +#endif /* _ULP_UTILS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bonding/Makefile b/src/spdk/dpdk/drivers/net/bonding/Makefile new file mode 100644 index 000000000..728551a84 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bond.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_bond_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_api.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_bond.h +SYMLINK-y-include += rte_eth_bond_8023ad.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h b/src/spdk/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h new file mode 100644 index 000000000..6e44ffdb1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/eth_bond_8023ad_private.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _ETH_BOND_8023AD_PRIVATE_H_ +#define _ETH_BOND_8023AD_PRIVATE_H_ + +#include + +#include +#include +#include +#include + +#include "rte_eth_bond_8023ad.h" + +#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100 +/** Maximum number of packets to one slave queued in TX ring. */ +#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3 +/** Maximum number of LACP packets from one slave queued in TX ring. */ +#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1 +/** + * Timeouts deffinitions (5.4.4 in 802.1AX documentation). + */ +#define BOND_8023AD_FAST_PERIODIC_MS 900 +#define BOND_8023AD_SLOW_PERIODIC_MS 29000 +#define BOND_8023AD_SHORT_TIMEOUT_MS 3000 +#define BOND_8023AD_LONG_TIMEOUT_MS 90000 +#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000 +#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000 +#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500 +#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000 + +/** + * Interval of showing warning message from state machines. All messages will + * be held (and gathered together) to prevent flooding. + * This is no parto of 802.1AX standard. + */ +#define BOND_8023AD_WARNINGS_PERIOD_MS 1000 + + + +/** + * State machine flags + */ +#define SM_FLAGS_BEGIN 0x0001 +#define SM_FLAGS_LACP_ENABLED 0x0002 +#define SM_FLAGS_ACTOR_CHURN 0x0004 +#define SM_FLAGS_PARTNER_CHURN 0x0008 +#define SM_FLAGS_MOVED 0x0100 +#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200 +#define SM_FLAGS_NTT 0x0400 + +#define BOND_LINK_FULL_DUPLEX_KEY 0x01 +#define BOND_LINK_SPEED_KEY_10M 0x02 +#define BOND_LINK_SPEED_KEY_100M 0x04 +#define BOND_LINK_SPEED_KEY_1000M 0x08 +#define BOND_LINK_SPEED_KEY_10G 0x10 +#define BOND_LINK_SPEED_KEY_20G 0x11 +#define BOND_LINK_SPEED_KEY_40G 0x12 + +#define WRN_RX_MARKER_TO_FAST 0x01 +#define WRN_UNKNOWN_SLOW_TYPE 0x02 +#define WRN_UNKNOWN_MARKER_TYPE 0x04 +#define WRN_NOT_LACP_CAPABLE 0x08 +#define WRN_RX_QUEUE_FULL 0x10 +#define WRN_TX_QUEUE_FULL 0x20 + +#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f)) +#define SET_FLAGS(_variable, _f) ((_variable) |= (_f)) +#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f)) + +#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)) +#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f) +#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f) + +#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f)) +#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f) +#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f) + +#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f)) +#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f) +#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f) + +/** Variables associated with each port (5.4.7 in 802.1AX documentation). */ +struct port { + /** + * The operational values of the Actor's state parameters. Bitmask + * of port states. + */ + uint8_t actor_state; + + /** The operational Actor's port parameters */ + struct port_params actor; + + /** + * The operational value of the Actor's view of the current values of + * the Partner's state parameters. The Actor sets this variable either + * to the value received from the Partner in an LACPDU, or to the value + * of Partner_Admin_Port_State. Bitmask of port states. + */ + uint8_t partner_state; + + /** The operational Partner's port parameters */ + struct port_params partner; + + /* Additional port parameters not listed in documentation */ + /** State machine flags */ + uint16_t sm_flags; + enum rte_bond_8023ad_selection selected; + + /** Indicates if either allmulti or promisc has been enforced on the + * slave so that we can receive lacp packets + */ +#define BOND_8023AD_FORCED_ALLMULTI (1 << 0) +#define BOND_8023AD_FORCED_PROMISC (1 << 1) + uint8_t forced_rx_flags; + + uint64_t current_while_timer; + uint64_t periodic_timer; + uint64_t wait_while_timer; + uint64_t tx_machine_timer; + uint64_t tx_marker_timer; + /* Agregator parameters */ + /** Used aggregator port ID */ + uint16_t aggregator_port_id; + + /** Memory pool used to allocate rings */ + struct rte_mempool *mbuf_pool; + + /** Ring of LACP packets from RX burst function */ + struct rte_ring *rx_ring; + + /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */ + struct rte_ring *tx_ring; + + /** Timer which is also used as mutex. If is 0 (not running) RX marker + * packet might be responded. Otherwise shall be dropped. It is zeroed in + * mode 4 callback function after expire. */ + volatile uint64_t rx_marker_timer; + + uint64_t warning_timer; + volatile uint16_t warnings_to_show; + + /** Memory pool used to allocate slow queues */ + struct rte_mempool *slow_pool; +}; + +struct mode8023ad_private { + uint64_t fast_periodic_timeout; + uint64_t slow_periodic_timeout; + uint64_t short_timeout; + uint64_t long_timeout; + uint64_t aggregate_wait_timeout; + uint64_t tx_period_timeout; + uint64_t rx_marker_timeout; + uint64_t update_timeout_us; + rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb; + uint8_t external_sm; + struct rte_ether_addr mac_addr; + + struct rte_eth_link slave_link; + /***< slave link properties */ + + /** + * Configuration of dedicated hardware queues for control plane + * traffic + */ + struct { + uint8_t enabled; + + struct rte_flow *flow[RTE_MAX_ETHPORTS]; + + uint16_t rx_qid; + uint16_t tx_qid; + } dedicated_queues; + enum rte_bond_8023ad_agg_selection agg_selection; +}; + +/** + * @internal + * The pool of *port* structures. The size of the pool + * is configured at compile-time in the file. + */ +extern struct port bond_mode_8023ad_ports[]; + +/* Forward declaration */ +struct bond_dev_private; + + +/** + * @internal + * + * Set mode 4 configuration of bonded interface. + * + * @pre Bonded interface must be stopped. + * + * @param dev Bonded interface + * @param conf new configuration. If NULL set default configuration. + */ +void +bond_mode_8023ad_setup(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Enables 802.1AX mode and all active slaves on bonded interface. + * + * @param dev Bonded interface + * @return + * 0 on success, negative value otherwise. + */ +int +bond_mode_8023ad_enable(struct rte_eth_dev *dev); + +/** + * @internal + * + * Disables 802.1AX mode of the bonded interface and slaves. + * + * @param dev Bonded interface + * @return + * 0 on success, negative value otherwise. + */ +int bond_mode_8023ad_disable(struct rte_eth_dev *dev); + +/** + * @internal + * + * Starts 802.3AX state machines management logic. + * @param dev Bonded interface + * @return + * 0 if machines was started, 1 if machines was already running, + * negative value otherwise. + */ +int +bond_mode_8023ad_start(struct rte_eth_dev *dev); + +/** + * @internal + * + * Stops 802.3AX state machines management logic. + * @param dev Bonded interface + * @return + * 0 if this call stopped state machines, -ENOENT if alarm was not set. + */ +void +bond_mode_8023ad_stop(struct rte_eth_dev *dev); + +/** + * @internal + * + * Passes given slow packet to state machines management logic. + * @param internals Bonded device private data. + * @param slave_id Slave port id. + * @param slot_pkt Slow packet. + */ +void +bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + uint16_t slave_id, struct rte_mbuf *pkt); + +/** + * @internal + * + * Appends given slave used slave + * + * @param dev Bonded interface. + * @param port_id Slave port ID to be added + * + * @return + * 0 on success, negative value otherwise. + */ +void +bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id); + +/** + * @internal + * + * Denitializes and removes given slave from 802.1AX mode. + * + * @param dev Bonded interface. + * @param slave_num Position of slave in active_slaves array + * + * @return + * 0 on success, negative value otherwise. + */ +int +bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos); + +/** + * Updates state when MAC was changed on bonded device or one of its slaves. + * @param bond_dev Bonded device + */ +void +bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev); + +int +bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, + uint16_t slave_port); + +int +bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port); + +int +bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id); + +#endif /* _ETH_BOND_8023AD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bonding/eth_bond_private.h b/src/spdk/dpdk/drivers/net/bonding/eth_bond_private.h new file mode 100644 index 000000000..c9b2d0fe4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/eth_bond_private.h @@ -0,0 +1,324 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#ifndef _ETH_BOND_PRIVATE_H_ +#define _ETH_BOND_PRIVATE_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#include "rte_eth_bond.h" +#include "eth_bond_8023ad_private.h" +#include "rte_eth_bond_alb.h" + +#define PMD_BOND_SLAVE_PORT_KVARG ("slave") +#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary") +#define PMD_BOND_MODE_KVARG ("mode") +#define PMD_BOND_AGG_MODE_KVARG ("agg_mode") +#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy") +#define PMD_BOND_SOCKET_ID_KVARG ("socket_id") +#define PMD_BOND_MAC_ADDR_KVARG ("mac") +#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms") +#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay") +#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay") + +#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2") +#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23") +#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34") + +extern int bond_logtype; + +#define RTE_BOND_LOG(lvl, msg, ...) \ + rte_log(RTE_LOG_ ## lvl, bond_logtype, \ + "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) + +#define BONDING_MODE_INVALID 0xFF + +extern const char *pmd_bond_init_valid_arguments[]; + +extern struct rte_vdev_driver pmd_bond_drv; + +extern const struct rte_flow_ops bond_flow_ops; + +/** Port Queue Mapping Structure */ +struct bond_rx_queue { + uint16_t queue_id; + /**< Queue Id */ + struct bond_dev_private *dev_private; + /**< Reference to eth_dev private structure */ + uint16_t nb_rx_desc; + /**< Number of RX descriptors available for the queue */ + struct rte_eth_rxconf rx_conf; + /**< Copy of RX configuration structure for queue */ + struct rte_mempool *mb_pool; + /**< Reference to mbuf pool to use for RX queue */ +}; + +struct bond_tx_queue { + uint16_t queue_id; + /**< Queue Id */ + struct bond_dev_private *dev_private; + /**< Reference to dev private structure */ + uint16_t nb_tx_desc; + /**< Number of TX descriptors available for the queue */ + struct rte_eth_txconf tx_conf; + /**< Copy of TX configuration structure for queue */ +}; + +/** Bonded slave devices structure */ +struct bond_ethdev_slave_ports { + uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */ + uint16_t slave_count; /**< Number of slaves */ +}; + +struct bond_slave_details { + uint16_t port_id; + + uint8_t link_status_poll_enabled; + uint8_t link_status_wait_to_complete; + uint8_t last_link_status; + /**< Port Id of slave eth_dev */ + struct rte_ether_addr persisted_mac_addr; + + uint16_t reta_size; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + /* Slaves flows */ + struct rte_flow *flows[RTE_MAX_ETHPORTS]; + /* Flow description for synchronization */ + struct rte_flow_conv_rule rule; + uint8_t rule_data[]; +}; + +typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves); + +/** Link Bonding PMD device private configuration Structure */ +struct bond_dev_private { + uint16_t port_id; /**< Port Id of Bonded Port */ + uint8_t mode; /**< Link Bonding Mode */ + + rte_spinlock_t lock; + rte_spinlock_t lsc_lock; + + uint16_t primary_port; /**< Primary Slave Port */ + uint16_t current_primary_port; /**< Primary Slave Port */ + uint16_t user_defined_primary_port; + /**< Flag for whether primary port is user defined or not */ + + uint8_t balance_xmit_policy; + /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */ + burst_xmit_hash_t burst_xmit_hash; + /**< Transmit policy hash function */ + + uint8_t user_defined_mac; + /**< Flag for whether MAC address is user defined or not */ + + uint8_t link_status_polling_enabled; + uint32_t link_status_polling_interval_ms; + + uint32_t link_down_delay_ms; + uint32_t link_up_delay_ms; + + uint16_t nb_rx_queues; /**< Total number of rx queues */ + uint16_t nb_tx_queues; /**< Total number of tx queues*/ + + uint16_t active_slave; /**< Next active_slave to poll */ + uint16_t active_slave_count; /**< Number of active slaves */ + uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */ + + uint16_t slave_count; /**< Number of bonded slaves */ + struct bond_slave_details slaves[RTE_MAX_ETHPORTS]; + /**< Arary of bonded slaves details */ + + struct mode8023ad_private mode4; + uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS]; + /**< TLB active slaves send order */ + struct mode_alb_private mode6; + + uint64_t rx_offload_capa; /** Rx offload capability */ + uint64_t tx_offload_capa; /** Tx offload capability */ + uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */ + uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */ + + /**< List of the configured flows */ + TAILQ_HEAD(sub_flows, rte_flow) flow_list; + + /**< Flow isolation state */ + int flow_isolated; + int flow_isolated_valid; + + /** Bit mask of RSS offloads, the bit offset also means flow type */ + uint64_t flow_type_rss_offloads; + + struct rte_eth_rxconf default_rxconf; /**< Default RxQ conf. */ + struct rte_eth_txconf default_txconf; /**< Default TxQ conf. */ + struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptor limits */ + struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */ + + uint16_t reta_size; + struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 / + RTE_RETA_GROUP_SIZE]; + + uint8_t rss_key[52]; /**< 52-byte hash key buffer. */ + uint8_t rss_key_len; /**< hash key length in bytes. */ + + struct rte_kvargs *kvlist; + uint8_t slave_update_idx; + + uint32_t candidate_max_rx_pktlen; + uint32_t max_rx_pktlen; + + void *vlan_filter_bmpmem; /* enabled vlan filter bitmap */ + struct rte_bitmap *vlan_filter_bmp; +}; + +extern const struct eth_dev_ops default_dev_ops; + +int +check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev); + +int +check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev); + +/* Search given slave array to find position of given id. + * Return slave pos or slaves_count if not found. */ +static inline uint16_t +find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) { + + uint16_t pos; + for (pos = 0; pos < slaves_count; pos++) { + if (slave_id == slaves[pos]) + break; + } + + return pos; +} + +int +valid_port_id(uint16_t port_id); + +int +valid_bonded_port_id(uint16_t port_id); + +int +valid_slave_port_id(uint16_t port_id, uint8_t mode); + +void +deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id); + +void +activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id); + +int +mac_address_set(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *new_mac_addr); + +int +mac_address_get(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *dst_mac_addr); + +int +mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev); + +int +slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id); + +int +slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id); + +int +bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode); + +int +slave_configure(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_dev *slave_eth_dev); + +void +slave_remove(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev); + +void +slave_add(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev); + +void +burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves); + +void +burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves); + +void +burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves); + + +void +bond_ethdev_primary_set(struct bond_dev_private *internals, + uint16_t slave_port_id); + +int +bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, + void *param, void *ret_param); + +int +bond_ethdev_parse_slave_port_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_slave_mode_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_socket_id_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_bond_mac_addr_kvarg(const char *key, + const char *value, void *extra_args); + +int +bond_ethdev_parse_time_ms_kvarg(const char *key, + const char *value, void *extra_args); + +void +bond_tlb_disable(struct bond_dev_private *internals); + +void +bond_tlb_enable(struct bond_dev_private *internals); + +void +bond_tlb_activate_slave(struct bond_dev_private *internals); + +void +bond_ethdev_stop(struct rte_eth_dev *eth_dev); + +void +bond_ethdev_close(struct rte_eth_dev *dev); + +#endif diff --git a/src/spdk/dpdk/drivers/net/bonding/meson.build b/src/spdk/dpdk/drivers/net/bonding/meson.build new file mode 100644 index 000000000..a3eff3b31 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +name = 'bond' #, james bond :-) +sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c', + 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c') + +deps += 'sched' # needed for rte_bitmap.h +deps += ['ip_frag'] + +install_headers('rte_eth_bond.h', 'rte_eth_bond_8023ad.h') diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h new file mode 100644 index 000000000..874aa91a5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _RTE_ETH_BOND_H_ +#define _RTE_ETH_BOND_H_ + +/** + * @file rte_eth_bond.h + * + * RTE Link Bonding Ethernet Device + * Link Bonding for 1GbE and 10GbE ports to allow the aggregation of multiple + * (slave) NICs into a single logical interface. The bonded device processes + * these interfaces based on the mode of operation specified and supported. + * This implementation supports 4 modes of operation round robin, active backup + * balance and broadcast. Providing redundant links, fault tolerance and/or + * load balancing of network ports + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Supported modes of operation of link bonding library */ + +#define BONDING_MODE_ROUND_ROBIN (0) +/**< Round Robin (Mode 0). + * In this mode all transmitted packets will be balanced equally across all + * active slaves of the bonded in a round robin fashion. */ +#define BONDING_MODE_ACTIVE_BACKUP (1) +/**< Active Backup (Mode 1). + * In this mode all packets transmitted will be transmitted on the primary + * slave until such point as the primary slave is no longer available and then + * transmitted packets will be sent on the next available slaves. The primary + * slave can be defined by the user but defaults to the first active slave + * available if not specified. */ +#define BONDING_MODE_BALANCE (2) +/**< Balance (Mode 2). + * In this mode all packets transmitted will be balanced across the available + * slaves using one of three available transmit policies - l2, l2+3 or l3+4. + * See BALANCE_XMIT_POLICY macros definitions for further details on transmit + * policies. */ +#define BONDING_MODE_BROADCAST (3) +/**< Broadcast (Mode 3). + * In this mode all transmitted packets will be transmitted on all available + * active slaves of the bonded. */ +#define BONDING_MODE_8023AD (4) +/**< 802.3AD (Mode 4). + * + * This mode provides auto negotiation/configuration + * of peers and well as link status changes monitoring using out of band + * LACP (link aggregation control protocol) messages. For further details of + * LACP specification see the IEEE 802.3ad/802.1AX standards. It is also + * described here + * https://www.kernel.org/doc/Documentation/networking/bonding.txt. + * + * Important Usage Notes: + * - for LACP mode to work the rx/tx burst functions must be invoked + * at least once every 100ms, otherwise the out-of-band LACP messages will not + * be handled with the expected latency and this may cause the link status to be + * incorrectly marked as down or failure to correctly negotiate with peers. + * - For optimal performance during initial handshaking the array of mbufs provided + * to rx_burst should be at least 2 times the slave count size. + * + */ +#define BONDING_MODE_TLB (5) +/**< Adaptive TLB (Mode 5) + * This mode provides an adaptive transmit load balancing. It dynamically + * changes the transmitting slave, according to the computed load. Statistics + * are collected in 100ms intervals and scheduled every 10ms */ +#define BONDING_MODE_ALB (6) +/**< Adaptive Load Balancing (Mode 6) + * This mode includes adaptive TLB and receive load balancing (RLB). In RLB the + * bonding driver intercepts ARP replies send by local system and overwrites its + * source MAC address, so that different peers send data to the server on + * different slave interfaces. When local system sends ARP request, it saves IP + * information from it. When ARP reply from that peer is received, its MAC is + * stored, one of slave MACs assigned and ARP reply send to that peer. + */ + +/* Balance Mode Transmit Policies */ +#define BALANCE_XMIT_POLICY_LAYER2 (0) +/**< Layer 2 (Ethernet MAC) */ +#define BALANCE_XMIT_POLICY_LAYER23 (1) +/**< Layer 2+3 (Ethernet MAC + IP Addresses) transmit load balancing */ +#define BALANCE_XMIT_POLICY_LAYER34 (2) +/**< Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing */ + +/** + * Create a bonded rte_eth_dev device + * + * @param name Name of new link bonding device. + * @param mode Mode to initialize bonding device in. + * @param socket_id Socket Id on which to allocate eth_dev resources. + * + * @return + * Port Id of created rte_eth_dev on success, negative value otherwise + */ +int +rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id); + +/** + * Free a bonded rte_eth_dev device + * + * @param name Name of the link bonding device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_free(const char *name); + +/** + * Add a rte_eth_dev device as a slave to the bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id); + +/** + * Remove a slave rte_eth_dev device from the bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id); + +/** + * Set link bonding mode of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param mode Bonding mode to set + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode); + +/** + * Get link bonding mode of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * link bonding mode on success, negative value otherwise + */ +int +rte_eth_bond_mode_get(uint16_t bonded_port_id); + +/** + * Set slave rte_eth_dev as primary slave of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id); + +/** + * Get primary slave of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Port Id of primary slave on success, -1 on failure + */ +int +rte_eth_bond_primary_get(uint16_t bonded_port_id); + +/** + * Populate an array with list of the slaves port id's of the bonded device + * + * @param bonded_port_id Port ID of bonded eth_dev to interrogate + * @param slaves Array to be populated with the current active slaves + * @param len Length of slaves array + * + * @return + * Number of slaves associated with bonded device on success, + * negative value otherwise + */ +int +rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], + uint16_t len); + +/** + * Populate an array with list of the active slaves port id's of the bonded + * device. + * + * @param bonded_port_id Port ID of bonded eth_dev to interrogate + * @param slaves Array to be populated with the current active slaves + * @param len Length of slaves array + * + * @return + * Number of active slaves associated with bonded device on success, + * negative value otherwise + */ +int +rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], + uint16_t len); + +/** + * Set explicit MAC address to use on bonded device and it's slaves. + * + * @param bonded_port_id Port ID of bonded device. + * @param mac_addr MAC Address to use on bonded device overriding + * slaves MAC addresses + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mac_address_set(uint16_t bonded_port_id, + struct rte_ether_addr *mac_addr); + +/** + * Reset bonded device to use MAC from primary slave on bonded device and it's + * slaves. + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mac_address_reset(uint16_t bonded_port_id); + +/** + * Set the transmit policy for bonded device to use when it is operating in + * balance mode, this parameter is otherwise ignored in other modes of + * operation. + * + * @param bonded_port_id Port ID of bonded device. + * @param policy Balance mode transmission policy. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy); + +/** + * Get the transmit policy set on bonded device for balance mode operation + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Balance transmit policy on success, negative value otherwise. + */ +int +rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id); + +/** + * Set the link monitoring frequency (in ms) for monitoring the link status of + * slave devices + * + * @param bonded_port_id Port ID of bonded device. + * @param internal_ms Monitoring interval in milliseconds + * + * @return + * 0 on success, negative value otherwise. + */ + +int +rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms); + +/** + * Get the current link monitoring frequency (in ms) for monitoring of the link + * status of slave devices + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Monitoring interval on success, negative value otherwise. + */ +int +rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id); + + +/** + * Set the period in milliseconds for delaying the disabling of a bonded link + * when the link down status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * @param delay_ms Delay period in milliseconds. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id, + uint32_t delay_ms); + +/** + * Get the period in milliseconds set for delaying the disabling of a bonded + * link when the link down status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Delay period on success, negative value otherwise. + */ +int +rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id); + +/** + * Set the period in milliseconds for delaying the enabling of a bonded link + * when the link up status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * @param delay_ms Delay period in milliseconds. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, + uint32_t delay_ms); + +/** + * Get the period in milliseconds set for delaying the enabling of a bonded + * link when the link up status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Delay period on success, negative value otherwise. + */ +int +rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c new file mode 100644 index 000000000..b77a37ddb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -0,0 +1,1719 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "eth_bond_private.h" + +static void bond_mode_8023ad_ext_periodic_cb(void *arg); +#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD + +#define MODE4_DEBUG(fmt, ...) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%6u [Port %u: %s] " fmt, \ + bond_dbg_get_time_diff_ms(), slave_id, \ + __func__, ##__VA_ARGS__) + +static uint64_t start_time; + +static unsigned +bond_dbg_get_time_diff_ms(void) +{ + uint64_t now; + + now = rte_rdtsc(); + if (start_time == 0) + start_time = now; + + return ((now - start_time) * 1000) / rte_get_tsc_hz(); +} + +static void +bond_print_lacp(struct lacpdu *l) +{ + char a_address[18]; + char p_address[18]; + char a_state[256] = { 0 }; + char p_state[256] = { 0 }; + + static const char * const state_labels[] = { + "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP" + }; + + int a_len = 0; + int p_len = 0; + uint8_t i; + uint8_t *addr; + + addr = l->actor.port_params.system.addr_bytes; + snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + addr = l->partner.port_params.system.addr_bytes; + snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + for (i = 0; i < 8; i++) { + if ((l->actor.state >> i) & 1) { + a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ", + state_labels[i]); + } + + if ((l->partner.state >> i) & 1) { + p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ", + state_labels[i]); + } + } + + if (a_len && a_state[a_len-1] == ' ') + a_state[a_len-1] = '\0'; + + if (p_len && p_state[p_len-1] == ' ') + p_state[p_len-1] = '\0'; + + RTE_BOND_LOG(DEBUG, + "LACP: {\n" + " subtype= %02X\n" + " ver_num=%02X\n" + " actor={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " partner={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " collector={info=%02X, length=%02X, max_delay=%04X\n, " + "type_term=%02X, terminator_length = %02X }", + l->subtype, + l->version_number, + l->actor.tlv_type_info, + l->actor.info_length, + l->actor.port_params.system_priority, + a_address, + l->actor.port_params.key, + l->actor.port_params.port_priority, + l->actor.port_params.port_number, + a_state, + l->partner.tlv_type_info, + l->partner.info_length, + l->partner.port_params.system_priority, + p_address, + l->partner.port_params.key, + l->partner.port_params.port_priority, + l->partner.port_params.port_number, + p_state, + l->tlv_type_collector_info, + l->collector_info_length, + l->collector_max_delay, + l->tlv_type_terminator, + l->terminator_length); + +} + +#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu) +#else +#define BOND_PRINT_LACP(lacpdu) do { } while (0) +#define MODE4_DEBUG(fmt, ...) do { } while (0) +#endif + +static const struct rte_ether_addr lacp_mac_addr = { + .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 } +}; + +struct port bond_mode_8023ad_ports[RTE_MAX_ETHPORTS]; + +static void +timer_cancel(uint64_t *timer) +{ + *timer = 0; +} + +static void +timer_set(uint64_t *timer, uint64_t timeout) +{ + *timer = rte_rdtsc() + timeout; +} + +/* Forces given timer to be in expired state. */ +static void +timer_force_expired(uint64_t *timer) +{ + *timer = rte_rdtsc(); +} + +static bool +timer_is_stopped(uint64_t *timer) +{ + return *timer == 0; +} + +static bool +timer_is_expired(uint64_t *timer) +{ + return *timer < rte_rdtsc(); +} + +/* Timer is in running state if it is not stopped nor expired */ +static bool +timer_is_running(uint64_t *timer) +{ + return !timer_is_stopped(timer) && !timer_is_expired(timer); +} + +static void +set_warning_flags(struct port *port, uint16_t flags) +{ + int retval; + uint16_t old; + uint16_t new_flag = 0; + + do { + old = port->warnings_to_show; + new_flag = old | flags; + retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag); + } while (unlikely(retval == 0)); +} + +static void +show_warnings(uint16_t slave_id) +{ + struct port *port = &bond_mode_8023ad_ports[slave_id]; + uint8_t warnings; + + do { + warnings = port->warnings_to_show; + } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0); + + if (!warnings) + return; + + if (!timer_is_expired(&port->warning_timer)) + return; + + + timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS * + rte_get_tsc_hz() / 1000); + + if (warnings & WRN_RX_QUEUE_FULL) { + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into RX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will notwork correctly", + slave_id); + } + + if (warnings & WRN_TX_QUEUE_FULL) { + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into TX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will not work correctly", + slave_id); + } + + if (warnings & WRN_RX_MARKER_TO_FAST) + RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.", + slave_id); + + if (warnings & WRN_UNKNOWN_SLOW_TYPE) { + RTE_BOND_LOG(INFO, + "Slave %u: ignoring unknown slow protocol frame type", + slave_id); + } + + if (warnings & WRN_UNKNOWN_MARKER_TYPE) + RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type", + slave_id); + + if (warnings & WRN_NOT_LACP_CAPABLE) + MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); +} + +static void +record_default(struct port *port) +{ + /* Record default parameters for partner. Partner admin parameters + * are not implemented so set them to arbitrary default (last known) and + * mark actor that parner is in defaulted state. */ + port->partner_state = STATE_LACP_ACTIVE; + ACTOR_STATE_SET(port, DEFAULTED); +} + +/** Function handles rx state machine. + * + * This function implements Receive State Machine from point 5.4.12 in + * 802.1AX documentation. It should be called periodically. + * + * @param lacpdu LACPDU received. + * @param port Port on which LACPDU was received. + */ +static void +rx_machine(struct bond_dev_private *internals, uint16_t slave_id, + struct lacpdu *lacp) +{ + struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; + uint64_t timeout; + + if (SM_FLAG(port, BEGIN)) { + /* Initialize stuff */ + MODE4_DEBUG("-> INITIALIZE\n"); + SM_FLAG_CLR(port, MOVED); + port->selected = UNSELECTED; + + record_default(port); + + ACTOR_STATE_CLR(port, EXPIRED); + timer_cancel(&port->current_while_timer); + + /* DISABLED: On initialization partner is out of sync */ + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + + /* LACP DISABLED stuff if LACP not enabled on this port */ + if (!SM_FLAG(port, LACP_ENABLED)) + PARTNER_STATE_CLR(port, AGGREGATION); + else + PARTNER_STATE_SET(port, AGGREGATION); + } + + if (!SM_FLAG(port, LACP_ENABLED)) { + /* Update parameters only if state changed */ + if (!timer_is_stopped(&port->current_while_timer)) { + port->selected = UNSELECTED; + record_default(port); + PARTNER_STATE_CLR(port, AGGREGATION); + ACTOR_STATE_CLR(port, EXPIRED); + timer_cancel(&port->current_while_timer); + } + return; + } + + if (lacp) { + MODE4_DEBUG("LACP -> CURRENT\n"); + BOND_PRINT_LACP(lacp); + /* Update selected flag. If partner parameters are defaulted assume they + * are match. If not defaulted compare LACP actor with ports parner + * params. */ + if (!ACTOR_STATE(port, DEFAULTED) && + (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION) + || memcmp(&port->partner, &lacp->actor.port_params, + sizeof(port->partner)) != 0)) { + MODE4_DEBUG("selected <- UNSELECTED\n"); + port->selected = UNSELECTED; + } + + /* Record this PDU actor params as partner params */ + memcpy(&port->partner, &lacp->actor.port_params, + sizeof(struct port_params)); + port->partner_state = lacp->actor.state; + + /* Partner parameters are not defaulted any more */ + ACTOR_STATE_CLR(port, DEFAULTED); + + /* If LACP partner params match this port actor params */ + agg = &bond_mode_8023ad_ports[port->aggregator_port_id]; + bool match = port->actor.system_priority == + lacp->partner.port_params.system_priority && + rte_is_same_ether_addr(&agg->actor.system, + &lacp->partner.port_params.system) && + port->actor.port_priority == + lacp->partner.port_params.port_priority && + port->actor.port_number == + lacp->partner.port_params.port_number; + + /* Update NTT if partners information are outdated (xored and masked + * bits are set)*/ + uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT | + STATE_SYNCHRONIZATION | STATE_AGGREGATION; + + if (((port->actor_state ^ lacp->partner.state) & state_mask) || + match == false) { + SM_FLAG_SET(port, NTT); + } + + /* If LACP partner params match this port actor params */ + if (match == true && ACTOR_STATE(port, AGGREGATION) == + PARTNER_STATE(port, AGGREGATION)) + PARTNER_STATE_SET(port, SYNCHRONIZATION); + else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port, + AGGREGATION)) + PARTNER_STATE_SET(port, SYNCHRONIZATION); + else + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + + if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT)) + timeout = internals->mode4.short_timeout; + else + timeout = internals->mode4.long_timeout; + + timer_set(&port->current_while_timer, timeout); + ACTOR_STATE_CLR(port, EXPIRED); + return; /* No state change */ + } + + /* If CURRENT state timer is not running (stopped or expired) + * transit to EXPIRED state from DISABLED or CURRENT */ + if (!timer_is_running(&port->current_while_timer)) { + ACTOR_STATE_SET(port, EXPIRED); + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT); + timer_set(&port->current_while_timer, internals->mode4.short_timeout); + } +} + +/** + * Function handles periodic tx state machine. + * + * Function implements Periodic Transmission state machine from point 5.4.13 + * in 802.1AX documentation. It should be called periodically. + * + * @param port Port to handle state machine. + */ +static void +periodic_machine(struct bond_dev_private *internals, uint16_t slave_id) +{ + struct port *port = &bond_mode_8023ad_ports[slave_id]; + /* Calculate if either site is LACP enabled */ + uint64_t timeout; + uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) || + PARTNER_STATE(port, LACP_ACTIVE); + + uint8_t is_partner_fast, was_partner_fast; + /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */ + if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) { + timer_cancel(&port->periodic_timer); + timer_force_expired(&port->tx_machine_timer); + SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); + + MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n", + SM_FLAG(port, BEGIN) ? "begind " : "", + SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ", + active ? "LACP active " : "LACP pasive "); + return; + } + + is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT); + was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT); + + /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW. + * Other case: check if timer expire or partners settings changed. */ + if (!timer_is_stopped(&port->periodic_timer)) { + if (timer_is_expired(&port->periodic_timer)) { + SM_FLAG_SET(port, NTT); + } else if (is_partner_fast != was_partner_fast) { + /* Partners timeout was slow and now it is fast -> send LACP. + * In other case (was fast and now it is slow) just switch + * timeout to slow without forcing send of LACP (because standard + * say so)*/ + if (is_partner_fast) + SM_FLAG_SET(port, NTT); + } else + return; /* Nothing changed */ + } + + /* Handle state transition to FAST/SLOW LACP timeout */ + if (is_partner_fast) { + timeout = internals->mode4.fast_periodic_timeout; + SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT); + } else { + timeout = internals->mode4.slow_periodic_timeout; + SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); + } + + timer_set(&port->periodic_timer, timeout); +} + +/** + * Function handles mux state machine. + * + * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation. + * It should be called periodically. + * + * @param port Port to handle state machine. + */ +static void +mux_machine(struct bond_dev_private *internals, uint16_t slave_id) +{ + struct port *port = &bond_mode_8023ad_ports[slave_id]; + + /* Save current state for later use */ + const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | + STATE_COLLECTING; + + /* Enter DETACHED state on BEGIN condition or from any other state if + * port was unselected */ + if (SM_FLAG(port, BEGIN) || + port->selected == UNSELECTED || (port->selected == STANDBY && + (port->actor_state & state_mask) != 0)) { + /* detach mux from aggregator */ + port->actor_state &= ~state_mask; + /* Set ntt to true if BEGIN condition or transition from any other state + * which is indicated that wait_while_timer was started */ + if (SM_FLAG(port, BEGIN) || + !timer_is_stopped(&port->wait_while_timer)) { + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("-> DETACHED\n"); + } + timer_cancel(&port->wait_while_timer); + } + + if (timer_is_stopped(&port->wait_while_timer)) { + if (port->selected == SELECTED || port->selected == STANDBY) { + timer_set(&port->wait_while_timer, + internals->mode4.aggregate_wait_timeout); + + MODE4_DEBUG("DETACHED -> WAITING\n"); + } + /* Waiting state entered */ + return; + } + + /* Transit next state if port is ready */ + if (!timer_is_expired(&port->wait_while_timer)) + return; + + if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) && + !PARTNER_STATE(port, SYNCHRONIZATION)) { + /* If in COLLECTING or DISTRIBUTING state and partner becomes out of + * sync transit to ATACHED state. */ + ACTOR_STATE_CLR(port, DISTRIBUTING); + ACTOR_STATE_CLR(port, COLLECTING); + /* Clear actor sync to activate transit ATACHED in condition bellow */ + ACTOR_STATE_CLR(port, SYNCHRONIZATION); + MODE4_DEBUG("Out of sync -> ATTACHED\n"); + } + + if (!ACTOR_STATE(port, SYNCHRONIZATION)) { + /* attach mux to aggregator */ + RTE_ASSERT((port->actor_state & (STATE_COLLECTING | + STATE_DISTRIBUTING)) == 0); + + ACTOR_STATE_SET(port, SYNCHRONIZATION); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("ATTACHED Entered\n"); + } else if (!ACTOR_STATE(port, COLLECTING)) { + /* Start collecting if in sync */ + if (PARTNER_STATE(port, SYNCHRONIZATION)) { + MODE4_DEBUG("ATTACHED -> COLLECTING\n"); + ACTOR_STATE_SET(port, COLLECTING); + SM_FLAG_SET(port, NTT); + } + } else if (ACTOR_STATE(port, COLLECTING)) { + /* Check if partner is in COLLECTING state. If so this port can + * distribute frames to it */ + if (!ACTOR_STATE(port, DISTRIBUTING)) { + if (PARTNER_STATE(port, COLLECTING)) { + /* Enable DISTRIBUTING if partner is collecting */ + ACTOR_STATE_SET(port, DISTRIBUTING); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing started.", + internals->port_id, slave_id); + } + } else { + if (!PARTNER_STATE(port, COLLECTING)) { + /* Disable DISTRIBUTING (enter COLLECTING state) if partner + * is not collecting */ + ACTOR_STATE_CLR(port, DISTRIBUTING); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing stopped.", + internals->port_id, slave_id); + } + } + } +} + +/** + * Function handles transmit state machine. + * + * Function implements Transmit Machine from point 5.4.16 in 802.1AX + * documentation. + * + * @param port + */ +static void +tx_machine(struct bond_dev_private *internals, uint16_t slave_id) +{ + struct port *agg, *port = &bond_mode_8023ad_ports[slave_id]; + + struct rte_mbuf *lacp_pkt = NULL; + struct lacpdu_header *hdr; + struct lacpdu *lacpdu; + + /* If periodic timer is not running periodic machine is in NO PERIODIC and + * according to 802.3ax standard tx machine should not transmit any frames + * and set ntt to false. */ + if (timer_is_stopped(&port->periodic_timer)) + SM_FLAG_CLR(port, NTT); + + if (!SM_FLAG(port, NTT)) + return; + + if (!timer_is_expired(&port->tx_machine_timer)) + return; + + lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool); + if (lacp_pkt == NULL) { + RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool"); + return; + } + + lacp_pkt->data_len = sizeof(*hdr); + lacp_pkt->pkt_len = sizeof(*hdr); + + hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + + /* Source and destination MAC */ + rte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr); + rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr); + hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW); + + lacpdu = &hdr->lacpdu; + memset(lacpdu, 0, sizeof(*lacpdu)); + + /* Initialize LACP part */ + lacpdu->subtype = SLOW_SUBTYPE_LACP; + lacpdu->version_number = 1; + + /* ACTOR */ + lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION; + lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params); + memcpy(&hdr->lacpdu.actor.port_params, &port->actor, + sizeof(port->actor)); + agg = &bond_mode_8023ad_ports[port->aggregator_port_id]; + rte_ether_addr_copy(&agg->actor.system, + &hdr->lacpdu.actor.port_params.system); + lacpdu->actor.state = port->actor_state; + + /* PARTNER */ + lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION; + lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params); + memcpy(&lacpdu->partner.port_params, &port->partner, + sizeof(struct port_params)); + lacpdu->partner.state = port->partner_state; + + /* Other fields */ + lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION; + lacpdu->collector_info_length = 0x10; + lacpdu->collector_max_delay = 0; + + lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION; + lacpdu->terminator_length = 0; + + MODE4_DEBUG("Sending LACP frame\n"); + BOND_PRINT_LACP(lacpdu); + + if (internals->mode4.dedicated_queues.enabled == 0) { + int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt); + if (retval != 0) { + /* If TX ring full, drop packet and free message. + Retransmission will happen in next function call. */ + rte_pktmbuf_free(lacp_pkt); + set_warning_flags(port, WRN_TX_QUEUE_FULL); + return; + } + } else { + uint16_t pkts_sent = rte_eth_tx_burst(slave_id, + internals->mode4.dedicated_queues.tx_qid, + &lacp_pkt, 1); + if (pkts_sent != 1) { + rte_pktmbuf_free(lacp_pkt); + set_warning_flags(port, WRN_TX_QUEUE_FULL); + return; + } + } + + + timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout); + SM_FLAG_CLR(port, NTT); +} + +static uint16_t +max_index(uint64_t *a, int n) +{ + if (n <= 0) + return -1; + + int i, max_i = 0; + uint64_t max = a[0]; + + for (i = 1; i < n; ++i) { + if (a[i] > max) { + max = a[i]; + max_i = i; + } + } + + return max_i; +} + +/** + * Function assigns port to aggregator. + * + * @param bond_dev_private Pointer to bond_dev_private structure. + * @param port_pos Port to assign. + */ +static void +selection_logic(struct bond_dev_private *internals, uint16_t slave_id) +{ + struct port *agg, *port; + uint16_t slaves_count, new_agg_id, i, j = 0; + uint16_t *slaves; + uint64_t agg_bandwidth[RTE_MAX_ETHPORTS] = {0}; + uint64_t agg_count[RTE_MAX_ETHPORTS] = {0}; + uint16_t default_slave = 0; + struct rte_eth_link link_info; + uint16_t agg_new_idx = 0; + int ret; + + slaves = internals->active_slaves; + slaves_count = internals->active_slave_count; + port = &bond_mode_8023ad_ports[slave_id]; + + /* Search for aggregator suitable for this port */ + for (i = 0; i < slaves_count; ++i) { + agg = &bond_mode_8023ad_ports[slaves[i]]; + /* Skip ports that are not aggreagators */ + if (agg->aggregator_port_id != slaves[i]) + continue; + + ret = rte_eth_link_get_nowait(slaves[i], &link_info); + if (ret < 0) { + RTE_BOND_LOG(ERR, + "Slave (port %u) link get failed: %s\n", + slaves[i], rte_strerror(-ret)); + continue; + } + agg_count[i] += 1; + agg_bandwidth[i] += link_info.link_speed; + + /* Actors system ID is not checked since all slave device have the same + * ID (MAC address). */ + if ((agg->actor.key == port->actor.key && + agg->partner.system_priority == port->partner.system_priority && + rte_is_same_ether_addr(&agg->partner.system, + &port->partner.system) == 1 + && (agg->partner.key == port->partner.key)) && + rte_is_zero_ether_addr(&port->partner.system) != 1 && + (agg->actor.key & + rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) { + + if (j == 0) + default_slave = i; + j++; + } + } + + switch (internals->mode4.agg_selection) { + case AGG_COUNT: + agg_new_idx = max_index(agg_count, slaves_count); + new_agg_id = slaves[agg_new_idx]; + break; + case AGG_BANDWIDTH: + agg_new_idx = max_index(agg_bandwidth, slaves_count); + new_agg_id = slaves[agg_new_idx]; + break; + case AGG_STABLE: + if (default_slave == slaves_count) + new_agg_id = slaves[slave_id]; + else + new_agg_id = slaves[default_slave]; + break; + default: + if (default_slave == slaves_count) + new_agg_id = slaves[slave_id]; + else + new_agg_id = slaves[default_slave]; + break; + } + + if (new_agg_id != port->aggregator_port_id) { + port->aggregator_port_id = new_agg_id; + + MODE4_DEBUG("-> SELECTED: ID=%3u\n" + "\t%s aggregator ID=%3u\n", + port->aggregator_port_id, + port->aggregator_port_id == slave_id ? + "aggregator not found, using default" : "aggregator found", + port->aggregator_port_id); + } + + port->selected = SELECTED; +} + +/* Function maps DPDK speed to bonding speed stored in key field */ +static uint16_t +link_speed_key(uint16_t speed) { + uint16_t key_speed; + + switch (speed) { + case ETH_SPEED_NUM_NONE: + key_speed = 0x00; + break; + case ETH_SPEED_NUM_10M: + key_speed = BOND_LINK_SPEED_KEY_10M; + break; + case ETH_SPEED_NUM_100M: + key_speed = BOND_LINK_SPEED_KEY_100M; + break; + case ETH_SPEED_NUM_1G: + key_speed = BOND_LINK_SPEED_KEY_1000M; + break; + case ETH_SPEED_NUM_10G: + key_speed = BOND_LINK_SPEED_KEY_10G; + break; + case ETH_SPEED_NUM_20G: + key_speed = BOND_LINK_SPEED_KEY_20G; + break; + case ETH_SPEED_NUM_40G: + key_speed = BOND_LINK_SPEED_KEY_40G; + break; + default: + /* Unknown speed*/ + key_speed = 0xFFFF; + } + + return key_speed; +} + +static void +rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id, + struct rte_mbuf *lacp_pkt) { + struct lacpdu_header *lacp; + struct lacpdu_actor_partner_params *partner; + + if (lacp_pkt != NULL) { + lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); + + partner = &lacp->lacpdu.partner; + if (rte_is_same_ether_addr(&partner->port_params.system, + &internals->mode4.mac_addr)) { + /* This LACP frame is sending to the bonding port + * so pass it to rx_machine. + */ + rx_machine(internals, slave_id, &lacp->lacpdu); + } + rte_pktmbuf_free(lacp_pkt); + } else + rx_machine(internals, slave_id, NULL); +} + +static void +bond_mode_8023ad_periodic_cb(void *arg) +{ + struct rte_eth_dev *bond_dev = arg; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct port *port; + struct rte_eth_link link_info; + struct rte_ether_addr slave_addr; + struct rte_mbuf *lacp_pkt = NULL; + uint16_t slave_id; + uint16_t i; + + + /* Update link status on each port */ + for (i = 0; i < internals->active_slave_count; i++) { + uint16_t key; + int ret; + + slave_id = internals->active_slaves[i]; + ret = rte_eth_link_get_nowait(slave_id, &link_info); + if (ret < 0) { + RTE_BOND_LOG(ERR, + "Slave (port %u) link get failed: %s\n", + slave_id, rte_strerror(-ret)); + } + + if (ret >= 0 && link_info.link_status != 0) { + key = link_speed_key(link_info.link_speed) << 1; + if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX) + key |= BOND_LINK_FULL_DUPLEX_KEY; + } else { + key = 0; + } + + rte_eth_macaddr_get(slave_id, &slave_addr); + port = &bond_mode_8023ad_ports[slave_id]; + + key = rte_cpu_to_be_16(key); + if (key != port->actor.key) { + if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY))) + set_warning_flags(port, WRN_NOT_LACP_CAPABLE); + + port->actor.key = key; + SM_FLAG_SET(port, NTT); + } + + if (!rte_is_same_ether_addr(&port->actor.system, &slave_addr)) { + rte_ether_addr_copy(&slave_addr, &port->actor.system); + if (port->aggregator_port_id == slave_id) + SM_FLAG_SET(port, NTT); + } + } + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + port = &bond_mode_8023ad_ports[slave_id]; + + if ((port->actor.key & + rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { + + SM_FLAG_SET(port, BEGIN); + + /* LACP is disabled on half duples or link is down */ + if (SM_FLAG(port, LACP_ENABLED)) { + /* If port was enabled set it to BEGIN state */ + SM_FLAG_CLR(port, LACP_ENABLED); + ACTOR_STATE_CLR(port, DISTRIBUTING); + ACTOR_STATE_CLR(port, COLLECTING); + } + + /* Skip this port processing */ + continue; + } + + SM_FLAG_SET(port, LACP_ENABLED); + + if (internals->mode4.dedicated_queues.enabled == 0) { + /* Find LACP packet to this port. Do not check subtype, + * it is done in function that queued packet + */ + int retval = rte_ring_dequeue(port->rx_ring, + (void **)&lacp_pkt); + + if (retval != 0) + lacp_pkt = NULL; + + rx_machine_update(internals, slave_id, lacp_pkt); + } else { + uint16_t rx_count = rte_eth_rx_burst(slave_id, + internals->mode4.dedicated_queues.rx_qid, + &lacp_pkt, 1); + + if (rx_count == 1) + bond_mode_8023ad_handle_slow_pkt(internals, + slave_id, lacp_pkt); + else + rx_machine_update(internals, slave_id, NULL); + } + + periodic_machine(internals, slave_id); + mux_machine(internals, slave_id); + tx_machine(internals, slave_id); + selection_logic(internals, slave_id); + + SM_FLAG_CLR(port, BEGIN); + show_warnings(slave_id); + } + + rte_eal_alarm_set(internals->mode4.update_timeout_us, + bond_mode_8023ad_periodic_cb, arg); +} + +static int +bond_mode_8023ad_register_lacp_mac(uint16_t slave_id) +{ + int ret; + + ret = rte_eth_allmulticast_enable(slave_id); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "failed to enable allmulti mode for port %u: %s", + slave_id, rte_strerror(-ret)); + } + if (rte_eth_allmulticast_get(slave_id)) { + RTE_BOND_LOG(DEBUG, "forced allmulti for port %u", + slave_id); + bond_mode_8023ad_ports[slave_id].forced_rx_flags = + BOND_8023AD_FORCED_ALLMULTI; + return 0; + } + + ret = rte_eth_promiscuous_enable(slave_id); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "failed to enable promiscuous mode for port %u: %s", + slave_id, rte_strerror(-ret)); + } + if (rte_eth_promiscuous_get(slave_id)) { + RTE_BOND_LOG(DEBUG, "forced promiscuous for port %u", + slave_id); + bond_mode_8023ad_ports[slave_id].forced_rx_flags = + BOND_8023AD_FORCED_PROMISC; + return 0; + } + + return -1; +} + +static void +bond_mode_8023ad_unregister_lacp_mac(uint16_t slave_id) +{ + int ret; + + switch (bond_mode_8023ad_ports[slave_id].forced_rx_flags) { + case BOND_8023AD_FORCED_ALLMULTI: + RTE_BOND_LOG(DEBUG, "unset allmulti for port %u", slave_id); + ret = rte_eth_allmulticast_disable(slave_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "failed to disable allmulti mode for port %u: %s", + slave_id, rte_strerror(-ret)); + break; + + case BOND_8023AD_FORCED_PROMISC: + RTE_BOND_LOG(DEBUG, "unset promisc for port %u", slave_id); + ret = rte_eth_promiscuous_disable(slave_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "failed to disable promiscuous mode for port %u: %s", + slave_id, rte_strerror(-ret)); + break; + + default: + break; + } +} + +void +bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, + uint16_t slave_id) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + + struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct port_params initial = { + .system = { { 0 } }, + .system_priority = rte_cpu_to_be_16(0xFFFF), + .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY), + .port_priority = rte_cpu_to_be_16(0x00FF), + .port_number = 0, + }; + + char mem_name[RTE_ETH_NAME_MAX_LEN]; + int socket_id; + unsigned element_size; + uint32_t total_tx_desc; + struct bond_tx_queue *bd_tx_q; + uint16_t q_id; + + /* Given slave mus not be in active list */ + RTE_ASSERT(find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == internals->active_slave_count); + RTE_SET_USED(internals); /* used only for assert when enabled */ + + memcpy(&port->actor, &initial, sizeof(struct port_params)); + /* Standard requires that port ID must be grater than 0. + * Add 1 do get corresponding port_number */ + port->actor.port_number = rte_cpu_to_be_16(slave_id + 1); + + memcpy(&port->partner, &initial, sizeof(struct port_params)); + + /* default states */ + port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED; + port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION; + port->sm_flags = SM_FLAGS_BEGIN; + + /* use this port as agregator */ + port->aggregator_port_id = slave_id; + + if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) { + RTE_BOND_LOG(WARNING, "slave %u is most likely broken and won't receive LACP packets", + slave_id); + } + + timer_cancel(&port->warning_timer); + + if (port->mbuf_pool != NULL) + return; + + RTE_ASSERT(port->rx_ring == NULL); + RTE_ASSERT(port->tx_ring == NULL); + + socket_id = rte_eth_dev_socket_id(slave_id); + if (socket_id == (int)LCORE_ID_ANY) + socket_id = rte_socket_id(); + + element_size = sizeof(struct slow_protocol_frame) + + RTE_PKTMBUF_HEADROOM; + + /* The size of the mempool should be at least: + * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */ + total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS; + for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) { + bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id]; + total_tx_desc += bd_tx_q->nb_tx_desc; + } + + snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id); + port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc, + RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? + 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, + 0, element_size, socket_id); + + /* Any memory allocation failure in initialization is critical because + * resources can't be free, so reinitialization is impossible. */ + if (port->mbuf_pool == NULL) { + rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", + slave_id, mem_name, rte_strerror(rte_errno)); + } + + snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id); + port->rx_ring = rte_ring_create(mem_name, + rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0); + + if (port->rx_ring == NULL) { + rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id, + mem_name, rte_strerror(rte_errno)); + } + + /* TX ring is at least one pkt longer to make room for marker packet. */ + snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id); + port->tx_ring = rte_ring_create(mem_name, + rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0); + + if (port->tx_ring == NULL) { + rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id, + mem_name, rte_strerror(rte_errno)); + } +} + +int +bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused, + uint16_t slave_id) +{ + void *pkt = NULL; + struct port *port = NULL; + uint8_t old_partner_state; + + port = &bond_mode_8023ad_ports[slave_id]; + + ACTOR_STATE_CLR(port, AGGREGATION); + port->selected = UNSELECTED; + + old_partner_state = port->partner_state; + record_default(port); + + bond_mode_8023ad_unregister_lacp_mac(slave_id); + + /* If partner timeout state changes then disable timer */ + if (!((old_partner_state ^ port->partner_state) & + STATE_LACP_SHORT_TIMEOUT)) + timer_cancel(&port->current_while_timer); + + PARTNER_STATE_CLR(port, AGGREGATION); + ACTOR_STATE_CLR(port, EXPIRED); + + /* flush rx/tx rings */ + while (rte_ring_dequeue(port->rx_ring, &pkt) == 0) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + + while (rte_ring_dequeue(port->tx_ring, &pkt) == 0) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + return 0; +} + +void +bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct rte_ether_addr slave_addr; + struct port *slave, *agg_slave; + uint16_t slave_id, i, j; + + bond_mode_8023ad_stop(bond_dev); + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + slave = &bond_mode_8023ad_ports[slave_id]; + rte_eth_macaddr_get(slave_id, &slave_addr); + + if (rte_is_same_ether_addr(&slave_addr, &slave->actor.system)) + continue; + + rte_ether_addr_copy(&slave_addr, &slave->actor.system); + /* Do nothing if this port is not an aggregator. In other case + * Set NTT flag on every port that use this aggregator. */ + if (slave->aggregator_port_id != slave_id) + continue; + + for (j = 0; j < internals->active_slave_count; j++) { + agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]]; + if (agg_slave->aggregator_port_id == slave_id) + SM_FLAG_SET(agg_slave, NTT); + } + } + + if (bond_dev->data->dev_started) + bond_mode_8023ad_start(bond_dev); +} + +static void +bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + uint64_t ms_ticks = rte_get_tsc_hz() / 1000; + + conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks; + conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks; + conf->short_timeout_ms = mode4->short_timeout / ms_ticks; + conf->long_timeout_ms = mode4->long_timeout / ms_ticks; + conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks; + conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks; + conf->update_timeout_ms = mode4->update_timeout_us / 1000; + conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks; + conf->slowrx_cb = mode4->slowrx_cb; + conf->agg_selection = mode4->agg_selection; +} + +static void +bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf) +{ + conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS; + conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS; + conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS; + conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS; + conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS; + conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS; + conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS; + conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; + conf->slowrx_cb = NULL; + conf->agg_selection = AGG_STABLE; +} + +static void +bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4, + struct rte_eth_bond_8023ad_conf *conf) +{ + uint64_t ms_ticks = rte_get_tsc_hz() / 1000; + + mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks; + mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks; + mode4->short_timeout = conf->short_timeout_ms * ms_ticks; + mode4->long_timeout = conf->long_timeout_ms * ms_ticks; + mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks; + mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks; + mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks; + mode4->update_timeout_us = conf->update_timeout_ms * 1000; + + mode4->dedicated_queues.enabled = 0; + mode4->dedicated_queues.rx_qid = UINT16_MAX; + mode4->dedicated_queues.tx_qid = UINT16_MAX; +} + +void +bond_mode_8023ad_setup(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_bond_8023ad_conf def_conf; + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + + if (conf == NULL) { + conf = &def_conf; + bond_mode_8023ad_conf_get_default(conf); + } + + bond_mode_8023ad_stop(dev); + bond_mode_8023ad_conf_assign(mode4, conf); + mode4->slowrx_cb = conf->slowrx_cb; + mode4->agg_selection = AGG_STABLE; + + if (dev->data->dev_started) + bond_mode_8023ad_start(dev); +} + +int +bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + uint16_t i; + + for (i = 0; i < internals->active_slave_count; i++) + bond_mode_8023ad_activate_slave(bond_dev, + internals->active_slaves[i]); + + return 0; +} + +int +bond_mode_8023ad_start(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000; + + rte_eth_macaddr_get(internals->port_id, &mode4->mac_addr); + if (mode4->slowrx_cb) + return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb, + bond_dev); + + return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev); +} + +void +bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + + if (mode4->slowrx_cb) { + rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb, + bond_dev); + return; + } + rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev); +} + +void +bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + uint16_t slave_id, struct rte_mbuf *pkt) +{ + struct mode8023ad_private *mode4 = &internals->mode4; + struct port *port = &bond_mode_8023ad_ports[slave_id]; + struct marker_header *m_hdr; + uint64_t marker_timer, old_marker_timer; + int retval; + uint8_t wrn, subtype; + /* If packet is a marker, we send response now by reusing given packet + * and update only source MAC, destination MAC is multicast so don't + * update it. Other frames will be handled later by state machines */ + subtype = rte_pktmbuf_mtod(pkt, + struct slow_protocol_frame *)->slow_protocol.subtype; + + if (subtype == SLOW_SUBTYPE_MARKER) { + m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *); + + if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) { + wrn = WRN_UNKNOWN_MARKER_TYPE; + goto free_out; + } + + /* Setup marker timer. Do it in loop in case concurrent access. */ + do { + old_marker_timer = port->rx_marker_timer; + if (!timer_is_expired(&old_marker_timer)) { + wrn = WRN_RX_MARKER_TO_FAST; + goto free_out; + } + + timer_set(&marker_timer, mode4->rx_marker_timeout); + retval = rte_atomic64_cmpset(&port->rx_marker_timer, + old_marker_timer, marker_timer); + } while (unlikely(retval == 0)); + + m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP; + rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr); + + if (internals->mode4.dedicated_queues.enabled == 0) { + int retval = rte_ring_enqueue(port->tx_ring, pkt); + if (retval != 0) { + /* reset timer */ + port->rx_marker_timer = 0; + wrn = WRN_TX_QUEUE_FULL; + goto free_out; + } + } else { + /* Send packet directly to the slow queue */ + uint16_t tx_count = rte_eth_tx_burst(slave_id, + internals->mode4.dedicated_queues.tx_qid, + &pkt, 1); + if (tx_count != 1) { + /* reset timer */ + port->rx_marker_timer = 0; + wrn = WRN_TX_QUEUE_FULL; + goto free_out; + } + } + } else if (likely(subtype == SLOW_SUBTYPE_LACP)) { + if (internals->mode4.dedicated_queues.enabled == 0) { + int retval = rte_ring_enqueue(port->rx_ring, pkt); + if (retval != 0) { + /* If RX fing full free lacpdu message and drop packet */ + wrn = WRN_RX_QUEUE_FULL; + goto free_out; + } + } else + rx_machine_update(internals, slave_id, pkt); + } else { + wrn = WRN_UNKNOWN_SLOW_TYPE; + goto free_out; + } + + return; + +free_out: + set_warning_flags(port, wrn); + rte_pktmbuf_free(pkt); +} + +int +rte_eth_bond_8023ad_conf_get(uint16_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + if (conf == NULL) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_conf_get(bond_dev, conf); + return 0; +} + +int +rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, + enum rte_bond_8023ad_agg_selection agg_selection) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct mode8023ad_private *mode4; + + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + internals = bond_dev->data->dev_private; + + if (internals->mode != 4) + return -EINVAL; + + mode4 = &internals->mode4; + if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH + || agg_selection == AGG_STABLE) + mode4->agg_selection = agg_selection; + return 0; +} + +int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct mode8023ad_private *mode4; + + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + internals = bond_dev->data->dev_private; + + if (internals->mode != 4) + return -EINVAL; + mode4 = &internals->mode4; + + return mode4->agg_selection; +} + + + +static int +bond_8023ad_setup_validate(uint16_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + if (conf != NULL) { + /* Basic sanity check */ + if (conf->slow_periodic_ms == 0 || + conf->fast_periodic_ms >= conf->slow_periodic_ms || + conf->long_timeout_ms == 0 || + conf->short_timeout_ms >= conf->long_timeout_ms || + conf->aggregate_wait_timeout_ms == 0 || + conf->tx_period_ms == 0 || + conf->rx_marker_period_ms == 0 || + conf->update_timeout_ms == 0) { + RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid"); + return -EINVAL; + } + } + + return 0; +} + + +int +rte_eth_bond_8023ad_setup(uint16_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + int err; + + err = bond_8023ad_setup_validate(port_id, conf); + if (err != 0) + return err; + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_setup(bond_dev, conf); + + return 0; +} + + + + + +int +rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, + struct rte_eth_bond_8023ad_slave_info *info) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct port *port; + + if (info == NULL || valid_bonded_port_id(port_id) != 0 || + rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + + internals = bond_dev->data->dev_private; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == + internals->active_slave_count) + return -EINVAL; + + port = &bond_mode_8023ad_ports[slave_id]; + info->selected = port->selected; + + info->actor_state = port->actor_state; + rte_memcpy(&info->actor, &port->actor, sizeof(port->actor)); + + info->partner_state = port->partner_state; + rte_memcpy(&info->partner, &port->partner, sizeof(port->partner)); + + info->agg_port_id = port->aggregator_port_id; + return 0; +} + +static int +bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct mode8023ad_private *mode4; + + if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + + if (!bond_dev->data->dev_started) + return -EINVAL; + + internals = bond_dev->data->dev_private; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == + internals->active_slave_count) + return -EINVAL; + + mode4 = &internals->mode4; + if (mode4->slowrx_cb == NULL) + return -EINVAL; + + return 0; +} + +int +rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, + int enabled) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &bond_mode_8023ad_ports[slave_id]; + + if (enabled) + ACTOR_STATE_SET(port, COLLECTING); + else + ACTOR_STATE_CLR(port, COLLECTING); + + return 0; +} + +int +rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, + int enabled) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &bond_mode_8023ad_ports[slave_id]; + + if (enabled) + ACTOR_STATE_SET(port, DISTRIBUTING); + else + ACTOR_STATE_CLR(port, DISTRIBUTING); + + return 0; +} + +int +rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id) +{ + struct port *port; + int err; + + err = bond_8023ad_ext_validate(port_id, slave_id); + if (err != 0) + return err; + + port = &bond_mode_8023ad_ports[slave_id]; + return ACTOR_STATE(port, DISTRIBUTING); +} + +int +rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id) +{ + struct port *port; + int err; + + err = bond_8023ad_ext_validate(port_id, slave_id); + if (err != 0) + return err; + + port = &bond_mode_8023ad_ports[slave_id]; + return ACTOR_STATE(port, COLLECTING); +} + +int +rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id, + struct rte_mbuf *lacp_pkt) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &bond_mode_8023ad_ports[slave_id]; + + if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header)) + return -EINVAL; + + struct lacpdu_header *lacp; + + /* only enqueue LACPDUs */ + lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP) + return -EINVAL; + + MODE4_DEBUG("sending LACP frame\n"); + + return rte_ring_enqueue(port->tx_ring, lacp_pkt); +} + +static void +bond_mode_8023ad_ext_periodic_cb(void *arg) +{ + struct rte_eth_dev *bond_dev = arg; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + struct port *port; + void *pkt = NULL; + uint16_t i, slave_id; + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + port = &bond_mode_8023ad_ports[slave_id]; + + if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { + struct rte_mbuf *lacp_pkt = pkt; + struct lacpdu_header *lacp; + + lacp = rte_pktmbuf_mtod(lacp_pkt, + struct lacpdu_header *); + RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); + + /* This is LACP frame so pass it to rx callback. + * Callback is responsible for freeing mbuf. + */ + mode4->slowrx_cb(slave_id, lacp_pkt); + } + } + + rte_eal_alarm_set(internals->mode4.update_timeout_us, + bond_mode_8023ad_ext_periodic_cb, arg); +} + +int +rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port) +{ + int retval = 0; + struct rte_eth_dev *dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(port) != 0) + return -EINVAL; + + dev = &rte_eth_devices[port]; + internals = dev->data->dev_private; + + if (check_for_bonded_ethdev(dev) != 0) + return -1; + + if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0) + return -1; + + /* Device must be stopped to set up slow queue */ + if (dev->data->dev_started) + return -1; + + internals->mode4.dedicated_queues.enabled = 1; + + bond_ethdev_mode_set(dev, internals->mode); + return retval; +} + +int +rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port) +{ + int retval = 0; + struct rte_eth_dev *dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(port) != 0) + return -EINVAL; + + dev = &rte_eth_devices[port]; + internals = dev->data->dev_private; + + if (check_for_bonded_ethdev(dev) != 0) + return -1; + + /* Device must be stopped to set up slow queue */ + if (dev->data->dev_started) + return -1; + + internals->mode4.dedicated_queues.enabled = 0; + + bond_ethdev_mode_set(dev, internals->mode); + + return retval; +} diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h new file mode 100644 index 000000000..11a71a55e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef RTE_ETH_BOND_8023AD_H_ +#define RTE_ETH_BOND_8023AD_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Actor/partner states + */ +#define STATE_LACP_ACTIVE 0x01 +#define STATE_LACP_SHORT_TIMEOUT 0x02 +#define STATE_AGGREGATION 0x04 +#define STATE_SYNCHRONIZATION 0x08 +#define STATE_COLLECTING 0x10 +#define STATE_DISTRIBUTING 0x20 +/** Partners parameters are defaulted */ +#define STATE_DEFAULTED 0x40 +#define STATE_EXPIRED 0x80 + +#define TLV_TYPE_ACTOR_INFORMATION 0x01 +#define TLV_TYPE_PARTNER_INFORMATION 0x02 +#define TLV_TYPE_COLLECTOR_INFORMATION 0x03 +#define TLV_TYPE_TERMINATOR_INFORMATION 0x00 + +#define SLOW_SUBTYPE_LACP 0x01 +#define SLOW_SUBTYPE_MARKER 0x02 + +#define MARKER_TLV_TYPE_INFO 0x01 +#define MARKER_TLV_TYPE_RESP 0x02 + +typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t slave_id, + struct rte_mbuf *lacp_pkt); + +enum rte_bond_8023ad_selection { + UNSELECTED, + STANDBY, + SELECTED +}; + +enum rte_bond_8023ad_agg_selection { + AGG_BANDWIDTH, + AGG_COUNT, + AGG_STABLE +}; + +/** Generic slow protocol structure */ +struct slow_protocol { + uint8_t subtype; + uint8_t reserved_119[119]; +} __rte_packed; + +/** Generic slow protocol frame type structure */ +struct slow_protocol_frame { + struct rte_ether_hdr eth_hdr; + struct slow_protocol slow_protocol; +} __rte_packed __rte_aligned(2); + +struct port_params { + uint16_t system_priority; + /**< System priority (unused in current implementation) */ + struct rte_ether_addr system; + /**< System ID - Slave MAC address, same as bonding MAC address */ + uint16_t key; + /**< Speed information (implementation dependednt) and duplex. */ + uint16_t port_priority; + /**< Priority of this (unused in current implementation) */ + uint16_t port_number; + /**< Port number. It corresponds to slave port id. */ +} __rte_packed __rte_aligned(2); + +struct lacpdu_actor_partner_params { + uint8_t tlv_type_info; + uint8_t info_length; + struct port_params port_params; + uint8_t state; + uint8_t reserved_3[3]; +} __rte_packed __rte_aligned(2); + +/** LACPDU structure (5.4.2 in 802.1AX documentation). */ +struct lacpdu { + uint8_t subtype; + uint8_t version_number; + + struct lacpdu_actor_partner_params actor; + struct lacpdu_actor_partner_params partner; + + uint8_t tlv_type_collector_info; + uint8_t collector_info_length; + uint16_t collector_max_delay; + uint8_t reserved_12[12]; + + uint8_t tlv_type_terminator; + uint8_t terminator_length; + uint8_t reserved_50[50]; +} __rte_packed __rte_aligned(2); + +/** LACPDU frame: Contains ethernet header and LACPDU. */ +struct lacpdu_header { + struct rte_ether_hdr eth_hdr; + struct lacpdu lacpdu; +} __rte_packed __rte_aligned(2); + +struct marker { + uint8_t subtype; + uint8_t version_number; + + uint8_t tlv_type_marker; + uint8_t info_length; + uint16_t requester_port; + struct rte_ether_addr requester_system; + uint32_t requester_transaction_id; + uint8_t reserved_2[2]; + + uint8_t tlv_type_terminator; + uint8_t terminator_length; + uint8_t reserved_90[90]; +} __rte_packed __rte_aligned(2); + +struct marker_header { + struct rte_ether_hdr eth_hdr; + struct marker marker; +} __rte_packed __rte_aligned(2); + +struct rte_eth_bond_8023ad_conf { + uint32_t fast_periodic_ms; + uint32_t slow_periodic_ms; + uint32_t short_timeout_ms; + uint32_t long_timeout_ms; + uint32_t aggregate_wait_timeout_ms; + uint32_t tx_period_ms; + uint32_t rx_marker_period_ms; + uint32_t update_timeout_ms; + rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb; + enum rte_bond_8023ad_agg_selection agg_selection; +}; + +struct rte_eth_bond_8023ad_slave_info { + enum rte_bond_8023ad_selection selected; + uint8_t actor_state; + struct port_params actor; + uint8_t partner_state; + struct port_params partner; + uint16_t agg_port_id; +}; + +/** + * @internal + * + * Function returns current configuration of 802.3AX mode. + * + * @param port_id Bonding device id + * @param conf Pointer to timeout structure. + * + * @return + * 0 - if ok + * -EINVAL if conf is NULL + */ +int +rte_eth_bond_8023ad_conf_get(uint16_t port_id, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Function set new configuration of 802.3AX mode. + * + * @param port_id Bonding device id + * @param conf Configuration, if NULL set default configuration. + * @return + * 0 - if ok + * -EINVAL if configuration is invalid. + */ +int +rte_eth_bond_8023ad_setup(uint16_t port_id, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Function returns current state of given slave device. + * + * @param slave_id Port id of valid slave. + * @param conf buffer for configuration + * @return + * 0 - if ok + * -EINVAL if conf is NULL or slave id is invalid (not a slave of given + * bonded device or is not inactive). + */ +int +rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, + struct rte_eth_bond_8023ad_slave_info *conf); + +#ifdef __cplusplus +} +#endif + +/** + * Configure a slave port to start collecting. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @param enabled Non-zero when collection enabled. + * @return + * 0 - if ok + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id, + int enabled); + +/** + * Get COLLECTING flag from slave port actor state. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @return + * 0 - if not set + * 1 - if set + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id); + +/** + * Configure a slave port to start distributing. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @param enabled Non-zero when distribution enabled. + * @return + * 0 - if ok + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id, + int enabled); + +/** + * Get DISTRIBUTING flag from slave port actor state. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @return + * 0 - if not set + * 1 - if set + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id); + +/** + * LACPDU transmit path for external 802.3ad state machine. Caller retains + * ownership of the packet on failure. + * + * @param port_id Bonding device id + * @param slave_id Port ID of valid slave device. + * @param lacp_pkt mbuf containing LACPDU. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id, + struct rte_mbuf *lacp_pkt); + +/** + * Enable dedicated hw queues for 802.3ad control plane traffic on on slaves + * + * This function creates an additional tx and rx queue on each slave for + * dedicated 802.3ad control plane traffic . A flow filtering rule is + * programmed on each slave to redirect all LACP slow packets to that rx queue + * for processing in the LACP state machine, this removes the need to filter + * these packets in the bonded devices data path. The additional tx queue is + * used to enable the LACP state machine to enqueue LACP packets directly to + * slave hw independently of the bonded devices data path. + * + * To use this feature all slaves must support the programming of the flow + * filter rule required for rx and have enough queues that one rx and tx queue + * can be reserved for the LACP state machines control packets. + * + * Bonding port must be stopped to change this configuration. + * + * @param port_id Bonding device id + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port_id); + +/** + * Disable slow queue on slaves + * + * This function disables hardware slow packet filter. + * + * Bonding port must be stopped to change this configuration. + * + * @see rte_eth_bond_8023ad_slow_pkt_hw_filter_enable + * + * @param port_id Bonding device id + * @return + * 0 on success, negative value otherwise. + * + */ +int +rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id); + +/* + * Get aggregator mode for 8023ad + * @param port_id Bonding device id + * + * @return + * agregator mode on success, negative value otherwise + */ +int +rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id); + +/** + * Set aggregator mode for 8023ad + * @param port_id Bonding device id + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, + enum rte_bond_8023ad_agg_selection agg_selection); +#endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c new file mode 100644 index 000000000..1d36a4a4a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.c @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include "eth_bond_private.h" +#include "rte_eth_bond_alb.h" + +static inline uint8_t +simple_hash(uint8_t *hash_start, int hash_size) +{ + int i; + uint8_t hash; + + hash = 0; + for (i = 0; i < hash_size; ++i) + hash ^= hash_start[i]; + + return hash; +} + +static uint16_t +calculate_slave(struct bond_dev_private *internals) +{ + uint16_t idx; + + idx = (internals->mode6.last_slave + 1) % internals->active_slave_count; + internals->mode6.last_slave = idx; + return internals->active_slaves[idx]; +} + +int +bond_mode_alb_enable(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct client_data *hash_table = internals->mode6.client_table; + + uint16_t data_size; + char mem_name[RTE_ETH_NAME_MAX_LEN]; + int socket_id = bond_dev->data->numa_node; + + /* Fill hash table with initial values */ + memset(hash_table, 0, sizeof(struct client_data) * ALB_HASH_TABLE_SIZE); + rte_spinlock_init(&internals->mode6.lock); + internals->mode6.last_slave = ALB_NULL_INDEX; + internals->mode6.ntt = 0; + + /* Initialize memory pool for ARP packets to send */ + if (internals->mode6.mempool == NULL) { + /* + * 256 is size of ETH header, ARP header and nested VLAN headers. + * The value is chosen to be cache aligned. + */ + data_size = 256 + RTE_PKTMBUF_HEADROOM; + snprintf(mem_name, sizeof(mem_name), "%s_ALB", + bond_dev->device->name); + internals->mode6.mempool = rte_pktmbuf_pool_create(mem_name, + 512 * RTE_MAX_ETHPORTS, + RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? + 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, + 0, data_size, socket_id); + + if (internals->mode6.mempool == NULL) { + RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n", + bond_dev->device->name); + goto mempool_alloc_error; + } + } + + return 0; + +mempool_alloc_error: + return -ENOMEM; +} + +void bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals) +{ + struct rte_arp_hdr *arp; + + struct client_data *hash_table = internals->mode6.client_table; + struct client_data *client_info; + + uint8_t hash_index; + + arp = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset); + + /* ARP Requests are forwarded to the application with no changes */ + if (arp->arp_opcode != rte_cpu_to_be_16(RTE_ARP_OP_REPLY)) + return; + + /* From now on, we analyze only ARP Reply packets */ + hash_index = simple_hash((uint8_t *) &arp->arp_data.arp_sip, + sizeof(arp->arp_data.arp_sip)); + client_info = &hash_table[hash_index]; + + /* + * We got reply for ARP Request send by the application. We need to + * update client table when received data differ from what is stored + * in ALB table and issue sending update packet to that slave. + */ + rte_spinlock_lock(&internals->mode6.lock); + if (client_info->in_use == 0 || + client_info->app_ip != arp->arp_data.arp_tip || + client_info->cli_ip != arp->arp_data.arp_sip || + !rte_is_same_ether_addr(&client_info->cli_mac, + &arp->arp_data.arp_sha) || + client_info->vlan_count != offset / sizeof(struct rte_vlan_hdr) || + memcmp(client_info->vlan, eth_h + 1, offset) != 0 + ) { + client_info->in_use = 1; + client_info->app_ip = arp->arp_data.arp_tip; + client_info->cli_ip = arp->arp_data.arp_sip; + rte_ether_addr_copy(&arp->arp_data.arp_sha, + &client_info->cli_mac); + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, + &client_info->app_mac); + rte_ether_addr_copy(&client_info->app_mac, + &arp->arp_data.arp_tha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct rte_vlan_hdr); + } + internals->mode6.ntt = 1; + rte_spinlock_unlock(&internals->mode6.lock); +} + +uint16_t +bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals) +{ + struct rte_arp_hdr *arp; + + struct client_data *hash_table = internals->mode6.client_table; + struct client_data *client_info; + + uint8_t hash_index; + + struct rte_ether_addr bonding_mac; + + arp = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset); + + /* + * Traffic with src MAC other than bonding should be sent on + * current primary port. + */ + rte_eth_macaddr_get(internals->port_id, &bonding_mac); + if (!rte_is_same_ether_addr(&bonding_mac, &arp->arp_data.arp_sha)) { + rte_eth_macaddr_get(internals->current_primary_port, + &arp->arp_data.arp_sha); + return internals->current_primary_port; + } + + hash_index = simple_hash((uint8_t *)&arp->arp_data.arp_tip, + sizeof(uint32_t)); + client_info = &hash_table[hash_index]; + + rte_spinlock_lock(&internals->mode6.lock); + if (arp->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REPLY)) { + if (client_info->in_use) { + if (client_info->app_ip == arp->arp_data.arp_sip && + client_info->cli_ip == arp->arp_data.arp_tip) { + /* Entry is already assigned to this client */ + if (!rte_is_broadcast_ether_addr( + &arp->arp_data.arp_tha)) { + rte_ether_addr_copy( + &arp->arp_data.arp_tha, + &client_info->cli_mac); + } + rte_eth_macaddr_get(client_info->slave_idx, + &client_info->app_mac); + rte_ether_addr_copy(&client_info->app_mac, + &arp->arp_data.arp_sha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct rte_vlan_hdr); + rte_spinlock_unlock(&internals->mode6.lock); + return client_info->slave_idx; + } + } + + /* Assign new slave to this client and update src mac in ARP */ + client_info->in_use = 1; + client_info->ntt = 0; + client_info->app_ip = arp->arp_data.arp_sip; + rte_ether_addr_copy(&arp->arp_data.arp_tha, + &client_info->cli_mac); + client_info->cli_ip = arp->arp_data.arp_tip; + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, + &client_info->app_mac); + rte_ether_addr_copy(&client_info->app_mac, + &arp->arp_data.arp_sha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct rte_vlan_hdr); + rte_spinlock_unlock(&internals->mode6.lock); + return client_info->slave_idx; + } + + /* If packet is not ARP Reply, send it on current primary port. */ + rte_spinlock_unlock(&internals->mode6.lock); + rte_eth_macaddr_get(internals->current_primary_port, + &arp->arp_data.arp_sha); + return internals->current_primary_port; +} + +uint16_t +bond_mode_alb_arp_upd(struct client_data *client_info, + struct rte_mbuf *pkt, struct bond_dev_private *internals) +{ + struct rte_ether_hdr *eth_h; + struct rte_arp_hdr *arp_h; + uint16_t slave_idx; + + rte_spinlock_lock(&internals->mode6.lock); + eth_h = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + + rte_ether_addr_copy(&client_info->app_mac, ð_h->s_addr); + rte_ether_addr_copy(&client_info->cli_mac, ð_h->d_addr); + if (client_info->vlan_count > 0) + eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else + eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP); + + arp_h = (struct rte_arp_hdr *)( + (char *)eth_h + sizeof(struct rte_ether_hdr) + + client_info->vlan_count * sizeof(struct rte_vlan_hdr)); + + memcpy(eth_h + 1, client_info->vlan, + client_info->vlan_count * sizeof(struct rte_vlan_hdr)); + + rte_ether_addr_copy(&client_info->app_mac, &arp_h->arp_data.arp_sha); + arp_h->arp_data.arp_sip = client_info->app_ip; + rte_ether_addr_copy(&client_info->cli_mac, &arp_h->arp_data.arp_tha); + arp_h->arp_data.arp_tip = client_info->cli_ip; + + arp_h->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER); + arp_h->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + arp_h->arp_hlen = RTE_ETHER_ADDR_LEN; + arp_h->arp_plen = sizeof(uint32_t); + arp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); + + slave_idx = client_info->slave_idx; + rte_spinlock_unlock(&internals->mode6.lock); + + return slave_idx; +} + +void +bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct client_data *client_info; + + int i; + + /* If active slave count is 0, it's pointless to refresh alb table */ + if (internals->active_slave_count <= 0) + return; + + rte_spinlock_lock(&internals->mode6.lock); + internals->mode6.last_slave = ALB_NULL_INDEX; + + for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { + client_info = &internals->mode6.client_table[i]; + if (client_info->in_use) { + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac); + internals->mode6.ntt = 1; + } + } + rte_spinlock_unlock(&internals->mode6.lock); +} diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h new file mode 100644 index 000000000..386e70c59 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_alb.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef RTE_ETH_BOND_ALB_H_ +#define RTE_ETH_BOND_ALB_H_ + +#include +#include + +#define ALB_HASH_TABLE_SIZE 256 +#define ALB_NULL_INDEX 0xFFFFFFFF + +struct client_data { + /** ARP data of single client */ + struct rte_ether_addr app_mac; + /**< MAC address of application running DPDK */ + uint32_t app_ip; + /**< IP address of application running DPDK */ + struct rte_ether_addr cli_mac; + /**< Client MAC address */ + uint32_t cli_ip; + /**< Client IP address */ + + uint16_t slave_idx; + /**< Index of slave on which we connect with that client */ + uint8_t in_use; + /**< Flag indicating if entry in client table is currently used */ + uint8_t ntt; + /**< Flag indicating if we need to send update to this client on next tx */ + + struct rte_vlan_hdr vlan[2]; + /**< Content of vlan headers */ + uint8_t vlan_count; + /**< Number of nested vlan headers */ +}; + +struct mode_alb_private { + struct client_data client_table[ALB_HASH_TABLE_SIZE]; + /**< Hash table storing ARP data of every client connected */ + struct rte_mempool *mempool; + /**< Mempool for creating ARP update packets */ + uint8_t ntt; + /**< Flag indicating if we need to send update to any client on next tx */ + uint32_t last_slave; + /**< Index of last used slave in client table */ + rte_spinlock_t lock; +}; + +/** + * ALB mode initialization. + * + * @param bond_dev Pointer to bonding device. + * + * @return + * Error code - 0 on success. + */ +int +bond_mode_alb_enable(struct rte_eth_dev *bond_dev); + +/** + * Function handles ARP packet reception. If received ARP request, it is + * forwarded to application without changes. If it is ARP reply, client table + * is updated. + * + * @param eth_h ETH header of received packet. + * @param offset Vlan header offset. + * @param internals Bonding data. + */ +void +bond_mode_alb_arp_recv(struct rte_ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals); + +/** + * Function handles ARP packet transmission. It also decides on which slave + * send that packet. If packet is ARP Request, it is send on primary slave. + * If it is ARP Reply, it is send on slave stored in client table for that + * connection. On Reply function also updates data in client table. + * + * @param eth_h ETH header of transmitted packet. + * @param offset Vlan header offset. + * @param internals Bonding data. + * + * @return + * Index of slave on which packet should be sent. + */ +uint16_t +bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals); + +/** + * Function fills packet with ARP data from client_info. + * + * @param client_info Data of client to which packet is sent. + * @param pkt Pointer to packet which is sent. + * @param internals Bonding data. + * + * @return + * Index of slawe on which packet should be sent. + */ +uint16_t +bond_mode_alb_arp_upd(struct client_data *client_info, + struct rte_mbuf *pkt, struct bond_dev_private *internals); + +/** + * Function updates slave indexes of active connections. + * + * @param bond_dev Pointer to bonded device struct. + */ +void +bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev); + +#endif /* RTE_ETH_BOND_ALB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c new file mode 100644 index 000000000..f38eb3b47 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_api.c @@ -0,0 +1,1052 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "rte_eth_bond.h" +#include "eth_bond_private.h" +#include "eth_bond_8023ad_private.h" + +int +check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev) +{ + /* Check valid pointer */ + if (eth_dev == NULL || + eth_dev->device == NULL || + eth_dev->device->driver == NULL || + eth_dev->device->driver->name == NULL) + return -1; + + /* return 0 if driver name matches */ + return eth_dev->device->driver->name != pmd_bond_drv.driver.name; +} + +int +valid_bonded_port_id(uint16_t port_id) +{ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); + return check_for_bonded_ethdev(&rte_eth_devices[port_id]); +} + +int +check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev) +{ + int i; + struct bond_dev_private *internals; + + if (check_for_bonded_ethdev(eth_dev) != 0) + return 0; + + internals = eth_dev->data->dev_private; + + /* Check if any of slave devices is a bonded device */ + for (i = 0; i < internals->slave_count; i++) + if (valid_bonded_port_id(internals->slaves[i].port_id) == 0) + return 1; + + return 0; +} + +int +valid_slave_port_id(uint16_t port_id, uint8_t mode) +{ + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); + + /* Verify that port_id refers to a non bonded port */ + if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 && + mode == BONDING_MODE_8023AD) { + RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad" + " mode as slave is also a bonded device, only " + "physical devices can be support in this mode."); + return -1; + } + + return 0; +} + +void +activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint16_t active_count = internals->active_slave_count; + + if (internals->mode == BONDING_MODE_8023AD) + bond_mode_8023ad_activate_slave(eth_dev, port_id); + + if (internals->mode == BONDING_MODE_TLB + || internals->mode == BONDING_MODE_ALB) { + + internals->tlb_slaves_order[active_count] = port_id; + } + + RTE_ASSERT(internals->active_slave_count < + (RTE_DIM(internals->active_slaves) - 1)); + + internals->active_slaves[internals->active_slave_count] = port_id; + internals->active_slave_count++; + + if (internals->mode == BONDING_MODE_TLB) + bond_tlb_activate_slave(internals); + if (internals->mode == BONDING_MODE_ALB) + bond_mode_alb_client_list_upd(eth_dev); +} + +void +deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id) +{ + uint16_t slave_pos; + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint16_t active_count = internals->active_slave_count; + + if (internals->mode == BONDING_MODE_8023AD) { + bond_mode_8023ad_stop(eth_dev); + bond_mode_8023ad_deactivate_slave(eth_dev, port_id); + } else if (internals->mode == BONDING_MODE_TLB + || internals->mode == BONDING_MODE_ALB) + bond_tlb_disable(internals); + + slave_pos = find_slave_by_id(internals->active_slaves, active_count, + port_id); + + /* If slave was not at the end of the list + * shift active slaves up active array list */ + if (slave_pos < active_count) { + active_count--; + memmove(internals->active_slaves + slave_pos, + internals->active_slaves + slave_pos + 1, + (active_count - slave_pos) * + sizeof(internals->active_slaves[0])); + } + + RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves)); + internals->active_slave_count = active_count; + + /* Resetting active_slave when reaches to max + * no of slaves in active list + */ + if (internals->active_slave >= active_count) + internals->active_slave = 0; + + if (eth_dev->data->dev_started) { + if (internals->mode == BONDING_MODE_8023AD) { + bond_mode_8023ad_start(eth_dev); + } else if (internals->mode == BONDING_MODE_TLB) { + bond_tlb_enable(internals); + } else if (internals->mode == BONDING_MODE_ALB) { + bond_tlb_enable(internals); + bond_mode_alb_client_list_upd(eth_dev); + } + } +} + +int +rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id) +{ + struct bond_dev_private *internals; + char devargs[52]; + uint16_t port_id; + int ret; + + if (name == NULL) { + RTE_BOND_LOG(ERR, "Invalid name specified"); + return -EINVAL; + } + + ret = snprintf(devargs, sizeof(devargs), + "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id); + if (ret < 0 || ret >= (int)sizeof(devargs)) + return -ENOMEM; + + ret = rte_vdev_init(name, devargs); + if (ret) + return -ENOMEM; + + ret = rte_eth_dev_get_port_by_name(name, &port_id); + RTE_ASSERT(!ret); + + /* + * To make bond_ethdev_configure() happy we need to free the + * internals->kvlist here. + * + * Also see comment in bond_ethdev_configure(). + */ + internals = rte_eth_devices[port_id].data->dev_private; + rte_kvargs_free(internals->kvlist); + internals->kvlist = NULL; + + return port_id; +} + +int +rte_eth_bond_free(const char *name) +{ + return rte_vdev_uninit(name); +} + +static int +slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + int found; + int res = 0; + uint64_t slab = 0; + uint32_t pos = 0; + uint16_t first; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + if ((bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) == 0) + return 0; + + internals = bonded_eth_dev->data->dev_private; + found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); + first = pos; + + if (!found) + return 0; + + do { + uint32_t i; + uint64_t mask; + + for (i = 0, mask = 1; + i < RTE_BITMAP_SLAB_BIT_SIZE; + i ++, mask <<= 1) { + if (unlikely(slab & mask)) { + uint16_t vlan_id = pos + i; + + res = rte_eth_dev_vlan_filter(slave_port_id, + vlan_id, 1); + } + } + found = rte_bitmap_scan(internals->vlan_filter_bmp, + &pos, &slab); + } while (found && first != pos && res == 0); + + return res; +} + +static int +slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) +{ + struct rte_flow *flow; + struct rte_flow_error ferror; + uint16_t slave_port_id = internals->slaves[slave_id].port_id; + + if (internals->flow_isolated_valid != 0) { + rte_eth_dev_stop(slave_port_id); + if (rte_flow_isolate(slave_port_id, internals->flow_isolated, + &ferror)) { + RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave" + " %d: %s", slave_id, ferror.message ? + ferror.message : "(no stated reason)"); + return -1; + } + } + TAILQ_FOREACH(flow, &internals->flow_list, next) { + flow->flows[slave_id] = rte_flow_create(slave_port_id, + flow->rule.attr, + flow->rule.pattern, + flow->rule.actions, + &ferror); + if (flow->flows[slave_id] == NULL) { + RTE_BOND_LOG(ERR, "Cannot create flow for slave" + " %d: %s", slave_id, + ferror.message ? ferror.message : + "(no stated reason)"); + /* Destroy successful bond flows from the slave */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_id] != NULL) { + rte_flow_destroy(slave_port_id, + flow->flows[slave_id], + &ferror); + flow->flows[slave_id] = NULL; + } + } + return -1; + } + } + return 0; +} + +static void +eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; + + internals->reta_size = di->reta_size; + + /* Inherit Rx offload capabilities from the first slave device */ + internals->rx_offload_capa = di->rx_offload_capa; + internals->rx_queue_offload_capa = di->rx_queue_offload_capa; + internals->flow_type_rss_offloads = di->flow_type_rss_offloads; + + /* Inherit maximum Rx packet size from the first slave device */ + internals->candidate_max_rx_pktlen = di->max_rx_pktlen; + + /* Inherit default Rx queue settings from the first slave device */ + memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i)); + + /* + * Turn off descriptor prefetch and writeback by default for all + * slave devices. Applications may tweak this setting if need be. + */ + rxconf_i->rx_thresh.pthresh = 0; + rxconf_i->rx_thresh.hthresh = 0; + rxconf_i->rx_thresh.wthresh = 0; + + /* Setting this to zero should effectively enable default values */ + rxconf_i->rx_free_thresh = 0; + + /* Disable deferred start by default for all slave devices */ + rxconf_i->rx_deferred_start = 0; +} + +static void +eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_txconf *txconf_i = &internals->default_txconf; + + /* Inherit Tx offload capabilities from the first slave device */ + internals->tx_offload_capa = di->tx_offload_capa; + internals->tx_queue_offload_capa = di->tx_queue_offload_capa; + + /* Inherit default Tx queue settings from the first slave device */ + memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i)); + + /* + * Turn off descriptor prefetch and writeback by default for all + * slave devices. Applications may tweak this setting if need be. + */ + txconf_i->tx_thresh.pthresh = 0; + txconf_i->tx_thresh.hthresh = 0; + txconf_i->tx_thresh.wthresh = 0; + + /* + * Setting these parameters to zero assumes that default + * values will be configured implicitly by slave devices. + */ + txconf_i->tx_free_thresh = 0; + txconf_i->tx_rs_thresh = 0; + + /* Disable deferred start by default for all slave devices */ + txconf_i->tx_deferred_start = 0; +} + +static void +eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; + const struct rte_eth_rxconf *rxconf = &di->default_rxconf; + + internals->rx_offload_capa &= di->rx_offload_capa; + internals->rx_queue_offload_capa &= di->rx_queue_offload_capa; + internals->flow_type_rss_offloads &= di->flow_type_rss_offloads; + + /* + * If at least one slave device suggests enabling this + * setting by default, enable it for all slave devices + * since disabling it may not be necessarily supported. + */ + if (rxconf->rx_drop_en == 1) + rxconf_i->rx_drop_en = 1; + + /* + * Adding a new slave device may cause some of previously inherited + * offloads to be withdrawn from the internal rx_queue_offload_capa + * value. Thus, the new internal value of default Rx queue offloads + * has to be masked by rx_queue_offload_capa to make sure that only + * commonly supported offloads are preserved from both the previous + * value and the value being inhereted from the new slave device. + */ + rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) & + internals->rx_queue_offload_capa; + + /* + * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be + * the power of 2, the lower one is GCD + */ + if (internals->reta_size > di->reta_size) + internals->reta_size = di->reta_size; + + if (!internals->max_rx_pktlen && + di->max_rx_pktlen < internals->candidate_max_rx_pktlen) + internals->candidate_max_rx_pktlen = di->max_rx_pktlen; +} + +static void +eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals, + const struct rte_eth_dev_info *di) +{ + struct rte_eth_txconf *txconf_i = &internals->default_txconf; + const struct rte_eth_txconf *txconf = &di->default_txconf; + + internals->tx_offload_capa &= di->tx_offload_capa; + internals->tx_queue_offload_capa &= di->tx_queue_offload_capa; + + /* + * Adding a new slave device may cause some of previously inherited + * offloads to be withdrawn from the internal tx_queue_offload_capa + * value. Thus, the new internal value of default Tx queue offloads + * has to be masked by tx_queue_offload_capa to make sure that only + * commonly supported offloads are preserved from both the previous + * value and the value being inhereted from the new slave device. + */ + txconf_i->offloads = (txconf_i->offloads | txconf->offloads) & + internals->tx_queue_offload_capa; +} + +static void +eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *slave_desc_lim) +{ + memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim)); +} + +static int +eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, + const struct rte_eth_desc_lim *slave_desc_lim) +{ + bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max, + slave_desc_lim->nb_max); + bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min, + slave_desc_lim->nb_min); + bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align, + slave_desc_lim->nb_align); + + if (bond_desc_lim->nb_min > bond_desc_lim->nb_max || + bond_desc_lim->nb_align > bond_desc_lim->nb_max) { + RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits"); + return -EINVAL; + } + + /* Treat maximum number of segments equal to 0 as unspecified */ + if (slave_desc_lim->nb_seg_max != 0 && + (bond_desc_lim->nb_seg_max == 0 || + slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max)) + bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max; + if (slave_desc_lim->nb_mtu_seg_max != 0 && + (bond_desc_lim->nb_mtu_seg_max == 0 || + slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max)) + bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max; + + return 0; +} + +static int +__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_link link_props; + struct rte_eth_dev_info dev_info; + int ret; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + if (valid_slave_port_id(slave_port_id, internals->mode) != 0) + return -1; + + slave_eth_dev = &rte_eth_devices[slave_port_id]; + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) { + RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device"); + return -1; + } + + ret = rte_eth_dev_info_get(slave_port_id, &dev_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, slave_port_id, strerror(-ret)); + + return ret; + } + if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) { + RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small", + slave_port_id); + return -1; + } + + slave_add(internals, slave_eth_dev); + + /* We need to store slaves reta_size to be able to synchronize RETA for all + * slave devices even if its sizes are different. + */ + internals->slaves[internals->slave_count].reta_size = dev_info.reta_size; + + if (internals->slave_count < 1) { + /* if MAC is not user defined then use MAC of first slave add to + * bonded device */ + if (!internals->user_defined_mac) { + if (mac_address_set(bonded_eth_dev, + slave_eth_dev->data->mac_addrs)) { + RTE_BOND_LOG(ERR, "Failed to set MAC address"); + return -1; + } + } + + /* Make primary slave */ + internals->primary_port = slave_port_id; + internals->current_primary_port = slave_port_id; + + /* Inherit queues settings from first slave */ + internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues; + internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues; + + eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info); + eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info); + + eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim, + &dev_info.rx_desc_lim); + eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim, + &dev_info.tx_desc_lim); + } else { + int ret; + + eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info); + eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info); + + ret = eth_bond_slave_inherit_desc_lim_next( + &internals->rx_desc_lim, &dev_info.rx_desc_lim); + if (ret != 0) + return ret; + + ret = eth_bond_slave_inherit_desc_lim_next( + &internals->tx_desc_lim, &dev_info.tx_desc_lim); + if (ret != 0) + return ret; + } + + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= + internals->flow_type_rss_offloads; + + if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) { + RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d", + slave_port_id); + return -1; + } + + /* Add additional MAC addresses to the slave */ + if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) { + RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu", + slave_port_id); + return -1; + } + + internals->slave_count++; + + if (bonded_eth_dev->data->dev_started) { + if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) { + internals->slave_count--; + RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d", + slave_port_id); + return -1; + } + } + + /* Update all slave devices MACs */ + mac_address_slaves_update(bonded_eth_dev); + + /* Register link status change callback with bonded device pointer as + * argument*/ + rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id); + + /* If bonded device is started then we can add the slave to our active + * slave array */ + if (bonded_eth_dev->data->dev_started) { + ret = rte_eth_link_get_nowait(slave_port_id, &link_props); + if (ret < 0) { + rte_eth_dev_callback_unregister(slave_port_id, + RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, + &bonded_eth_dev->data->port_id); + internals->slave_count--; + RTE_BOND_LOG(ERR, + "Slave (port %u) link get failed: %s\n", + slave_port_id, rte_strerror(-ret)); + return -1; + } + + if (link_props.link_status == ETH_LINK_UP) { + if (internals->active_slave_count == 0 && + !internals->user_defined_primary_port) + bond_ethdev_primary_set(internals, + slave_port_id); + } + } + + /* Add slave details to bonded device */ + slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE; + + slave_vlan_filter_set(bonded_port_id, slave_port_id); + + return 0; + +} + +int +rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + int retval; + + /* Verify that port id's are valid bonded and slave ports */ + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + rte_spinlock_lock(&internals->lock); + + retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id); + + rte_spinlock_unlock(&internals->lock); + + return retval; +} + +static int +__eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, + uint16_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_dev *slave_eth_dev; + struct rte_flow_error flow_error; + struct rte_flow *flow; + int i, slave_idx; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + if (valid_slave_port_id(slave_port_id, internals->mode) < 0) + return -1; + + /* first remove from active slave list */ + slave_idx = find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_port_id); + + if (slave_idx < internals->active_slave_count) + deactivate_slave(bonded_eth_dev, slave_port_id); + + slave_idx = -1; + /* now find in slave list */ + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == slave_port_id) { + slave_idx = i; + break; + } + + if (slave_idx < 0) { + RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d", + internals->slave_count); + return -1; + } + + /* Un-register link status change callback with bonded device pointer as + * argument*/ + rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, + &rte_eth_devices[bonded_port_id].data->port_id); + + /* Restore original MAC address of slave device */ + rte_eth_dev_default_mac_addr_set(slave_port_id, + &(internals->slaves[slave_idx].persisted_mac_addr)); + + /* remove additional MAC addresses from the slave */ + slave_remove_mac_addresses(bonded_eth_dev, slave_port_id); + + /* + * Remove bond device flows from slave device. + * Note: don't restore flow isolate mode. + */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_idx] != NULL) { + rte_flow_destroy(slave_port_id, flow->flows[slave_idx], + &flow_error); + flow->flows[slave_idx] = NULL; + } + } + + slave_eth_dev = &rte_eth_devices[slave_port_id]; + slave_remove(internals, slave_eth_dev); + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); + + /* first slave in the active list will be the primary by default, + * otherwise use first device in list */ + if (internals->current_primary_port == slave_port_id) { + if (internals->active_slave_count > 0) + internals->current_primary_port = internals->active_slaves[0]; + else if (internals->slave_count > 0) + internals->current_primary_port = internals->slaves[0].port_id; + else + internals->primary_port = 0; + } + + if (internals->active_slave_count < 1) { + /* if no slaves are any longer attached to bonded device and MAC is not + * user defined then clear MAC of bonded device as it will be reset + * when a new slave is added */ + if (internals->slave_count < 1 && !internals->user_defined_mac) + memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0, + sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs))); + } + if (internals->slave_count == 0) { + internals->rx_offload_capa = 0; + internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + internals->reta_size = 0; + internals->candidate_max_rx_pktlen = 0; + internals->max_rx_pktlen = 0; + } + return 0; +} + +int +rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + int retval; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + rte_spinlock_lock(&internals->lock); + + retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id); + + rte_spinlock_unlock(&internals->lock); + + return retval; +} + +int +rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode) +{ + struct rte_eth_dev *bonded_eth_dev; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + + if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 && + mode == BONDING_MODE_8023AD) + return -1; + + return bond_ethdev_mode_set(bonded_eth_dev, mode); +} + +int +rte_eth_bond_mode_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->mode; +} + +int +rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (valid_slave_port_id(slave_port_id, internals->mode) != 0) + return -1; + + internals->user_defined_primary_port = 1; + internals->primary_port = slave_port_id; + + bond_ethdev_primary_set(internals, slave_port_id); + + return 0; +} + +int +rte_eth_bond_primary_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->slave_count < 1) + return -1; + + return internals->current_primary_port; +} + +int +rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], + uint16_t len) +{ + struct bond_dev_private *internals; + uint16_t i; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + if (slaves == NULL) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->slave_count > len) + return -1; + + for (i = 0; i < internals->slave_count; i++) + slaves[i] = internals->slaves[i].port_id; + + return internals->slave_count; +} + +int +rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[], + uint16_t len) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + if (slaves == NULL) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->active_slave_count > len) + return -1; + + memcpy(slaves, internals->active_slaves, + internals->active_slave_count * sizeof(internals->active_slaves[0])); + + return internals->active_slave_count; +} + +int +rte_eth_bond_mac_address_set(uint16_t bonded_port_id, + struct rte_ether_addr *mac_addr) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + /* Set MAC Address of Bonded Device */ + if (mac_address_set(bonded_eth_dev, mac_addr)) + return -1; + + internals->user_defined_mac = 1; + + /* Update all slave devices MACs*/ + if (internals->slave_count > 0) + return mac_address_slaves_update(bonded_eth_dev); + + return 0; +} + +int +rte_eth_bond_mac_address_reset(uint16_t bonded_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + internals->user_defined_mac = 0; + + if (internals->slave_count > 0) { + int slave_port; + /* Get the primary slave location based on the primary port + * number as, while slave_add(), we will keep the primary + * slave based on slave_count,but not based on the primary port. + */ + for (slave_port = 0; slave_port < internals->slave_count; + slave_port++) { + if (internals->slaves[slave_port].port_id == + internals->primary_port) + break; + } + + /* Set MAC Address of Bonded Device */ + if (mac_address_set(bonded_eth_dev, + &internals->slaves[slave_port].persisted_mac_addr) + != 0) { + RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device"); + return -1; + } + /* Update all slave devices MAC addresses */ + return mac_address_slaves_update(bonded_eth_dev); + } + /* No need to update anything as no slaves present */ + return 0; +} + +int +rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + switch (policy) { + case BALANCE_XMIT_POLICY_LAYER2: + internals->balance_xmit_policy = policy; + internals->burst_xmit_hash = burst_xmit_l2_hash; + break; + case BALANCE_XMIT_POLICY_LAYER23: + internals->balance_xmit_policy = policy; + internals->burst_xmit_hash = burst_xmit_l23_hash; + break; + case BALANCE_XMIT_POLICY_LAYER34: + internals->balance_xmit_policy = policy; + internals->burst_xmit_hash = burst_xmit_l34_hash; + break; + + default: + return -1; + } + return 0; +} + +int +rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->balance_xmit_policy; +} + +int +rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_status_polling_interval_ms = internal_ms; + + return 0; +} + +int +rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_status_polling_interval_ms; +} + +int +rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id, + uint32_t delay_ms) + +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_down_delay_ms = delay_ms; + + return 0; +} + +int +rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_down_delay_ms; +} + +int +rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms) + +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_up_delay_ms = delay_ms; + + return 0; +} + +int +rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_up_delay_ms; +} diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c new file mode 100644 index 000000000..abdf55261 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_args.c @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include +#include +#include +#include + +#include "rte_eth_bond.h" +#include "eth_bond_private.h" + +const char *pmd_bond_init_valid_arguments[] = { + PMD_BOND_SLAVE_PORT_KVARG, + PMD_BOND_PRIMARY_SLAVE_KVARG, + PMD_BOND_MODE_KVARG, + PMD_BOND_XMIT_POLICY_KVARG, + PMD_BOND_SOCKET_ID_KVARG, + PMD_BOND_MAC_ADDR_KVARG, + PMD_BOND_AGG_MODE_KVARG, + "driver", + NULL +}; + +static inline int +find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr) +{ + struct rte_pci_device *pci_dev; + struct rte_pci_addr *eth_pci_addr; + unsigned i; + + RTE_ETH_FOREACH_DEV(i) { + pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); + eth_pci_addr = &pci_dev->addr; + + if (pci_addr->bus == eth_pci_addr->bus && + pci_addr->devid == eth_pci_addr->devid && + pci_addr->domain == eth_pci_addr->domain && + pci_addr->function == eth_pci_addr->function) + return i; + } + return -1; +} + +static inline int +find_port_id_by_dev_name(const char *name) +{ + unsigned i; + + RTE_ETH_FOREACH_DEV(i) { + if (rte_eth_devices[i].data == NULL) + continue; + + if (strcmp(rte_eth_devices[i].device->name, name) == 0) + return i; + } + return -1; +} + +static inline int +bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr) +{ + const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); + const struct rte_pci_addr *paddr = _pci_addr; + + return rte_pci_addr_cmp(&pdev->addr, paddr); +} + +/** + * Parses a port identifier string to a port id by pci address, then by name, + * and finally port id. + */ +static inline int +parse_port_id(const char *port_str) +{ + struct rte_pci_addr dev_addr; + struct rte_bus *pci_bus; + struct rte_device *dev; + int port_id; + + pci_bus = rte_bus_find_by_name("pci"); + if (pci_bus == NULL) { + RTE_BOND_LOG(ERR, "unable to find PCI bus\n"); + return -1; + } + + /* try parsing as pci address, physical devices */ + if (pci_bus->parse(port_str, &dev_addr) == 0) { + dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr); + if (dev == NULL) { + RTE_BOND_LOG(ERR, "unable to find PCI device"); + return -1; + } + port_id = find_port_id_by_pci_addr(&dev_addr); + if (port_id < 0) + return -1; + } else { + /* try parsing as device name, virtual devices */ + port_id = find_port_id_by_dev_name(port_str); + if (port_id < 0) { + char *end; + errno = 0; + + /* try parsing as port id */ + port_id = strtol(port_str, &end, 10); + if (*end != 0 || errno != 0) + return -1; + } + } + + if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) { + RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range", + port_str); + return -1; + } + return port_id; +} + +int +bond_ethdev_parse_slave_port_kvarg(const char *key, + const char *value, void *extra_args) +{ + struct bond_ethdev_slave_ports *slave_ports; + + if (value == NULL || extra_args == NULL) + return -1; + + slave_ports = extra_args; + + if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) { + int port_id = parse_port_id(value); + if (port_id < 0) { + RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", + value); + return -1; + } else + slave_ports->slaves[slave_ports->slave_count++] = + port_id; + } + return 0; +} + +int +bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint8_t *mode; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + mode = extra_args; + + errno = 0; + *mode = strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + /* validate mode value */ + switch (*mode) { + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + return 0; + default: + RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value); + return -1; + } +} + +int +bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint8_t *agg_mode; + + if (value == NULL || extra_args == NULL) + return -1; + + agg_mode = extra_args; + + errno = 0; + if (strncmp(value, "stable", 6) == 0) + *agg_mode = AGG_STABLE; + + if (strncmp(value, "bandwidth", 9) == 0) + *agg_mode = AGG_BANDWIDTH; + + if (strncmp(value, "count", 5) == 0) + *agg_mode = AGG_COUNT; + + switch (*agg_mode) { + case AGG_STABLE: + case AGG_BANDWIDTH: + case AGG_COUNT: + return 0; + default: + RTE_BOND_LOG(ERR, "Invalid agg mode value stable/bandwidth/count"); + return -1; + } +} + +int +bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int socket_id; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + errno = 0; + socket_id = (uint8_t)strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + /* validate socket id value */ + if (socket_id >= 0) { + *(uint8_t *)extra_args = (uint8_t)socket_id; + return 0; + } + return -1; +} + +int +bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int primary_slave_port_id; + + if (value == NULL || extra_args == NULL) + return -1; + + primary_slave_port_id = parse_port_id(value); + if (primary_slave_port_id < 0) + return -1; + + *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id; + + return 0; +} + +int +bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint8_t *xmit_policy; + + if (value == NULL || extra_args == NULL) + return -1; + + xmit_policy = extra_args; + + if (strcmp(PMD_BOND_XMIT_POLICY_LAYER2_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER2; + else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER23_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER23; + else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER34_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER34; + else + return -1; + + return 0; +} + +int +bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (value == NULL || extra_args == NULL) + return -1; + + /* Parse MAC */ + return rte_ether_unformat_addr(value, extra_args); +} + +int +bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint32_t time_ms; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + errno = 0; + time_ms = (uint32_t)strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + *(uint32_t *)extra_args = time_ms; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c new file mode 100644 index 000000000..417f76bf6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_flow.c @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "eth_bond_private.h" + +static struct rte_flow * +bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr, + const struct rte_flow_item *items, + const struct rte_flow_action *actions) +{ + struct rte_flow *flow; + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = items, + .actions_ro = actions, + }; + struct rte_flow_error error; + int ret; + + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Unable to process flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + return NULL; + } + flow = rte_zmalloc_socket(NULL, offsetof(struct rte_flow, rule) + ret, + RTE_CACHE_LINE_SIZE, numa_node); + if (unlikely(flow == NULL)) { + RTE_BOND_LOG(ERR, "Could not allocate new flow"); + return NULL; + } + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, + &error); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Failed to copy flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + rte_free(flow); + return NULL; + } + return flow; +} + +static void +bond_flow_release(struct rte_flow **flow) +{ + rte_free(*flow); + *flow = NULL; +} + +static int +bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_validate(internals->slaves[i].port_id, attr, + patterns, actions, err); + if (ret) { + RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed" + " for slave %d with error %d", i, ret); + return ret; + } + } + return 0; +} + +static struct rte_flow * +bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + int i; + + flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions); + if (unlikely(flow == NULL)) { + rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOMEM)); + return NULL; + } + for (i = 0; i < internals->slave_count; i++) { + flow->flows[i] = rte_flow_create(internals->slaves[i].port_id, + attr, patterns, actions, err); + if (unlikely(flow->flows[i] == NULL)) { + RTE_BOND_LOG(ERR, "Failed to create flow on slave %d", + i); + goto err; + } + } + TAILQ_INSERT_TAIL(&internals->flow_list, flow, next); + return flow; +err: + /* Destroy all slaves flows. */ + for (i = 0; i < internals->slave_count; i++) { + if (flow->flows[i] != NULL) + rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + } + bond_flow_release(&flow); + return NULL; +} + +static int +bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret = 0; + + for (i = 0; i < internals->slave_count; i++) { + int lret; + + if (unlikely(flow->flows[i] == NULL)) + continue; + lret = rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + if (unlikely(lret != 0)) { + RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:" + " %d", i, lret); + ret = lret; + } + } + TAILQ_REMOVE(&internals->flow_list, flow, next); + bond_flow_release(&flow); + return ret; +} + +static int +bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + void *tmp; + int ret = 0; + int lret; + + /* Destroy all bond flows from its slaves instead of flushing them to + * keep the LACP flow or any other external flows. + */ + TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) { + lret = bond_flow_destroy(dev, flow, err); + if (unlikely(lret != 0)) + ret = lret; + } + if (unlikely(ret != 0)) + RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves"); + return ret; +} + +static int +bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, + struct rte_flow_query_count *count, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow_query_count slave_count; + int i; + int ret; + + count->bytes = 0; + count->hits = 0; + rte_memcpy(&slave_count, count, sizeof(slave_count)); + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_query(internals->slaves[i].port_id, + flow->flows[i], action, + &slave_count, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Failed to query flow on" + " slave %d: %d", i, ret); + return ret; + } + count->bytes += slave_count.bytes; + count->hits += slave_count.hits; + slave_count.bytes = 0; + slave_count.hits = 0; + } + return 0; +} + +static int +bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *arg, + struct rte_flow_error *err) +{ + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + return bond_flow_query_count(dev, flow, action, arg, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, arg, + rte_strerror(ENOTSUP)); + } +} + +static int +bond_flow_isolate(struct rte_eth_dev *dev, int set, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_isolate(internals->slaves[i].port_id, set, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed" + " for slave %d with error %d", i, ret); + internals->flow_isolated_valid = 0; + return ret; + } + } + internals->flow_isolated = set; + internals->flow_isolated_valid = 1; + return 0; +} + +const struct rte_flow_ops bond_flow_ops = { + .validate = bond_flow_validate, + .create = bond_flow_create, + .destroy = bond_flow_destroy, + .flush = bond_flow_flush, + .query = bond_flow_query, + .isolate = bond_flow_isolate, +}; diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c new file mode 100644 index 000000000..612a64599 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c @@ -0,0 +1,3760 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_eth_bond.h" +#include "eth_bond_private.h" +#include "eth_bond_8023ad_private.h" + +#define REORDER_PERIOD_MS 10 +#define DEFAULT_POLLING_INTERVAL_10_MS (10) +#define BOND_MAX_MAC_ADDRS 16 + +#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port) + +/* Table for statistics in mode 5 TLB */ +static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS]; + +static inline size_t +get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto) +{ + size_t vlan_offset = 0; + + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto || + rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) { + struct rte_vlan_hdr *vlan_hdr = + (struct rte_vlan_hdr *)(eth_hdr + 1); + + vlan_offset = sizeof(struct rte_vlan_hdr); + *proto = vlan_hdr->eth_proto; + + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) { + vlan_hdr = vlan_hdr + 1; + *proto = vlan_hdr->eth_proto; + vlan_offset += sizeof(struct rte_vlan_hdr); + } + } + return vlan_offset; +} + +static uint16_t +bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + + uint16_t num_rx_total = 0; + uint16_t slave_count; + uint16_t active_slave; + int i; + + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + internals = bd_rx_q->dev_private; + slave_count = internals->active_slave_count; + active_slave = internals->active_slave; + + for (i = 0; i < slave_count && nb_pkts; i++) { + uint16_t num_rx_slave; + + /* Offset of pointer to *bufs increases as packets are received + * from other slaves */ + num_rx_slave = + rte_eth_rx_burst(internals->active_slaves[active_slave], + bd_rx_q->queue_id, + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + if (++active_slave == slave_count) + active_slave = 0; + } + + if (++internals->active_slave >= slave_count) + internals->active_slave = 0; + return num_rx_total; +} + +static uint16_t +bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + + internals = bd_rx_q->dev_private; + + return rte_eth_rx_burst(internals->current_primary_port, + bd_rx_q->queue_id, bufs, nb_pkts); +} + +static inline uint8_t +is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf) +{ + const uint16_t ether_type_slow_be = + rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); + + return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) && + (ethertype == ether_type_slow_be && + (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP)); +} + +/***************************************************************************** + * Flow director's setup for mode 4 optimization + */ + +static struct rte_flow_item_eth flow_item_eth_type_8023ad = { + .dst.addr_bytes = { 0 }, + .src.addr_bytes = { 0 }, + .type = RTE_BE16(RTE_ETHER_TYPE_SLOW), +}; + +static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = { + .dst.addr_bytes = { 0 }, + .src.addr_bytes = { 0 }, + .type = 0xFFFF, +}; + +static struct rte_flow_item flow_item_8023ad[] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &flow_item_eth_type_8023ad, + .last = NULL, + .mask = &flow_item_eth_mask_type_8023ad, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + .spec = NULL, + .last = NULL, + .mask = NULL, + } +}; + +const struct rte_flow_attr flow_attr_8023ad = { + .group = 0, + .priority = 0, + .ingress = 1, + .egress = 0, + .reserved = 0, +}; + +int +bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, + uint16_t slave_port) { + struct rte_eth_dev_info slave_info; + struct rte_flow_error error; + struct bond_dev_private *internals = bond_dev->data->dev_private; + + const struct rte_flow_action_queue lacp_queue_conf = { + .index = 0, + }; + + const struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_QUEUE, + .conf = &lacp_queue_conf + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + } + }; + + int ret = rte_flow_validate(slave_port, &flow_attr_8023ad, + flow_item_8023ad, actions, &error); + if (ret < 0) { + RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)", + __func__, error.message, slave_port, + internals->mode4.dedicated_queues.rx_qid); + return -1; + } + + ret = rte_eth_dev_info_get(slave_port, &slave_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, slave_port, strerror(-ret)); + + return ret; + } + + if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues || + slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) { + RTE_BOND_LOG(ERR, + "%s: Slave %d capabilities doesn't allow to allocate additional queues", + __func__, slave_port); + return -1; + } + + return 0; +} + +int +bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { + struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id]; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct rte_eth_dev_info bond_info; + uint16_t idx; + int ret; + + /* Verify if all slaves in bonding supports flow director and */ + if (internals->slave_count > 0) { + ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, bond_dev->data->port_id, + strerror(-ret)); + + return ret; + } + + internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues; + internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues; + + for (idx = 0; idx < internals->slave_count; idx++) { + if (bond_ethdev_8023ad_flow_verify(bond_dev, + internals->slaves[idx].port_id) != 0) + return -1; + } + } + + return 0; +} + +int +bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { + + struct rte_flow_error error; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct rte_flow_action_queue lacp_queue_conf = { + .index = internals->mode4.dedicated_queues.rx_qid, + }; + + const struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_QUEUE, + .conf = &lacp_queue_conf + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + } + }; + + internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port, + &flow_attr_8023ad, flow_item_8023ad, actions, &error); + if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) { + RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s " + "(slave_port=%d queue_id=%d)", + error.message, slave_port, + internals->mode4.dedicated_queues.rx_qid); + return -1; + } + + return 0; +} + +static inline uint16_t +rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, + bool dedicated_rxq) +{ + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + struct bond_dev_private *internals = bd_rx_q->dev_private; + struct rte_eth_dev *bonded_eth_dev = + &rte_eth_devices[internals->port_id]; + struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs; + struct rte_ether_hdr *hdr; + + const uint16_t ether_type_slow_be = + rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); + uint16_t num_rx_total = 0; /* Total number of received packets */ + uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t slave_count, idx; + + uint8_t collecting; /* current slave collecting status */ + const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id); + const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id); + uint8_t subtype; + uint16_t i; + uint16_t j; + uint16_t k; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + slave_count = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * slave_count); + + idx = internals->active_slave; + if (idx >= slave_count) { + internals->active_slave = 0; + idx = 0; + } + for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { + j = num_rx_total; + collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]], + COLLECTING); + + /* Read packets from this slave */ + num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id, + &bufs[num_rx_total], nb_pkts - num_rx_total); + + for (k = j; k < 2 && k < num_rx_total; k++) + rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *)); + + /* Handle slow protocol packets. */ + while (j < num_rx_total) { + if (j + 3 < num_rx_total) + rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *)); + + hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *); + subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype; + + /* Remove packet from array if: + * - it is slow packet but no dedicated rxq is present, + * - slave is not in collecting state, + * - bonding interface is not in promiscuous mode: + * - packet is unicast and address does not match, + * - packet is multicast and bonding interface + * is not in allmulti, + */ + if (unlikely( + (!dedicated_rxq && + is_lacp_packets(hdr->ether_type, subtype, + bufs[j])) || + !collecting || + (!promisc && + ((rte_is_unicast_ether_addr(&hdr->d_addr) && + !rte_is_same_ether_addr(bond_mac, + &hdr->d_addr)) || + (!allmulti && + rte_is_multicast_ether_addr(&hdr->d_addr)))))) { + + if (hdr->ether_type == ether_type_slow_be) { + bond_mode_8023ad_handle_slow_pkt( + internals, slaves[idx], bufs[j]); + } else + rte_pktmbuf_free(bufs[j]); + + /* Packet is managed by mode 4 or dropped, shift the array */ + num_rx_total--; + if (j < num_rx_total) { + memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) * + (num_rx_total - j)); + } + } else + j++; + } + if (unlikely(++idx == slave_count)) + idx = 0; + } + + if (++internals->active_slave >= slave_count) + internals->active_slave = 0; + + return num_rx_total; +} + +static uint16_t +bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + return rx_burst_8023ad(queue, bufs, nb_pkts, false); +} + +static uint16_t +bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + return rx_burst_8023ad(queue, bufs, nb_pkts, true); +} + +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) +uint32_t burstnumberRX; +uint32_t burstnumberTX; + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + +static void +arp_op_name(uint16_t arp_op, char *buf, size_t buf_len) +{ + switch (arp_op) { + case RTE_ARP_OP_REQUEST: + strlcpy(buf, "ARP Request", buf_len); + return; + case RTE_ARP_OP_REPLY: + strlcpy(buf, "ARP Reply", buf_len); + return; + case RTE_ARP_OP_REVREQUEST: + strlcpy(buf, "Reverse ARP Request", buf_len); + return; + case RTE_ARP_OP_REVREPLY: + strlcpy(buf, "Reverse ARP Reply", buf_len); + return; + case RTE_ARP_OP_INVREQUEST: + strlcpy(buf, "Peer Identify Request", buf_len); + return; + case RTE_ARP_OP_INVREPLY: + strlcpy(buf, "Peer Identify Reply", buf_len); + return; + default: + break; + } + strlcpy(buf, "Unknown", buf_len); + return; +} +#endif +#define MaxIPv4String 16 +static void +ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size) +{ + uint32_t ipv4_addr; + + ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr); + snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF, + (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF, + ipv4_addr & 0xFF); +} + +#define MAX_CLIENTS_NUMBER 128 +uint8_t active_clients; +struct client_stats_t { + uint16_t port; + uint32_t ipv4_addr; + uint32_t ipv4_rx_packets; + uint32_t ipv4_tx_packets; +}; +struct client_stats_t client_stats[MAX_CLIENTS_NUMBER]; + +static void +update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator) +{ + int i = 0; + + for (; i < MAX_CLIENTS_NUMBER; i++) { + if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) { + /* Just update RX packets number for this client */ + if (TXorRXindicator == &burstnumberRX) + client_stats[i].ipv4_rx_packets++; + else + client_stats[i].ipv4_tx_packets++; + return; + } + } + /* We have a new client. Insert him to the table, and increment stats */ + if (TXorRXindicator == &burstnumberRX) + client_stats[active_clients].ipv4_rx_packets++; + else + client_stats[active_clients].ipv4_tx_packets++; + client_stats[active_clients].ipv4_addr = addr; + client_stats[active_clients].port = port; + active_clients++; + +} + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB +#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \ + "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \ + info, \ + port, \ + eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \ + eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \ + eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \ + src_ip, \ + eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \ + eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \ + eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \ + dst_ip, \ + arp_op, ++burstnumber) +#endif + +static void +mode6_debug(const char __rte_unused *info, + struct rte_ether_hdr *eth_h, uint16_t port, + uint32_t __rte_unused *burstnumber) +{ + struct rte_ipv4_hdr *ipv4_h; +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + struct rte_arp_hdr *arp_h; + char dst_ip[16]; + char ArpOp[24]; + char buf[16]; +#endif + char src_ip[16]; + + uint16_t ether_type = eth_h->ether_type; + uint16_t offset = get_vlan_offset(eth_h, ðer_type); + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + strlcpy(buf, info, 16); +#endif + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String); +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber); +#endif + update_client_stats(ipv4_h->src_addr, port, burstnumber); + } +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { + arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String); + ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String); + arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode), + ArpOp, sizeof(ArpOp)); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber); + } +#endif +} +#endif + +static uint16_t +bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + struct rte_ether_hdr *eth_h; + uint16_t ether_type, offset; + uint16_t nb_recv_pkts; + int i; + + nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts); + + for (i = 0; i < nb_recv_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + bond_mode_alb_arp_recv(eth_h, offset, internals); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) + mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + } + + return nb_recv_pkts; +} + +static uint16_t +bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; + uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + uint16_t num_of_slaves; + uint16_t slaves[RTE_MAX_ETHPORTS]; + + uint16_t num_tx_total = 0, num_tx_slave; + + static int slave_idx = 0; + int i, cslave_idx = 0, tx_fail_total = 0; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * num_of_slaves); + + if (num_of_slaves < 1) + return num_tx_total; + + /* Populate slaves mbuf with which packets are to be sent on it */ + for (i = 0; i < nb_pkts; i++) { + cslave_idx = (slave_idx + i) % num_of_slaves; + slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i]; + } + + /* increment current slave index so the next call to tx burst starts on the + * next slave */ + slave_idx = ++cslave_idx; + + /* Send packet burst on each slave device */ + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] > 0) { + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += tx_fail_slave; + + memcpy(&bufs[nb_pkts - tx_fail_total], + &slave_bufs[i][num_tx_slave], + tx_fail_slave * sizeof(bufs[0])); + } + num_tx_total += num_tx_slave; + } + } + + return num_tx_total; +} + +static uint16_t +bond_ethdev_tx_burst_active_backup(void *queue, + struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + if (internals->active_slave_count < 1) + return 0; + + return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id, + bufs, nb_pkts); +} + +static inline uint16_t +ether_hash(struct rte_ether_hdr *eth_hdr) +{ + unaligned_uint16_t *word_src_addr = + (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes; + unaligned_uint16_t *word_dst_addr = + (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes; + + return (word_src_addr[0] ^ word_dst_addr[0]) ^ + (word_src_addr[1] ^ word_dst_addr[1]) ^ + (word_src_addr[2] ^ word_dst_addr[2]); +} + +static inline uint32_t +ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr) +{ + return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr; +} + +static inline uint32_t +ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr) +{ + unaligned_uint32_t *word_src_addr = + (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]); + unaligned_uint32_t *word_dst_addr = + (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]); + + return (word_src_addr[0] ^ word_dst_addr[0]) ^ + (word_src_addr[1] ^ word_dst_addr[1]) ^ + (word_src_addr[2] ^ word_dst_addr[2]) ^ + (word_src_addr[3] ^ word_dst_addr[3]); +} + + +void +burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves) +{ + struct rte_ether_hdr *eth_hdr; + uint32_t hash; + int i; + + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *); + + hash = ether_hash(eth_hdr); + + slaves[i] = (hash ^= hash >> 8) % slave_count; + } +} + +void +burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves) +{ + uint16_t i; + struct rte_ether_hdr *eth_hdr; + uint16_t proto; + size_t vlan_offset; + uint32_t hash, l3hash; + + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *); + l3hash = 0; + + proto = eth_hdr->ether_type; + hash = ether_hash(eth_hdr); + + vlan_offset = get_vlan_offset(eth_hdr, &proto); + + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv4_hash(ipv4_hdr); + + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + } + + hash = hash ^ l3hash; + hash ^= hash >> 16; + hash ^= hash >> 8; + + slaves[i] = hash % slave_count; + } +} + +void +burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint16_t slave_count, uint16_t *slaves) +{ + struct rte_ether_hdr *eth_hdr; + uint16_t proto; + size_t vlan_offset; + int i; + + struct rte_udp_hdr *udp_hdr; + struct rte_tcp_hdr *tcp_hdr; + uint32_t hash, l3hash, l4hash; + + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *); + size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]); + proto = eth_hdr->ether_type; + vlan_offset = get_vlan_offset(eth_hdr, &proto); + l3hash = 0; + l4hash = 0; + + if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + size_t ip_hdr_offset; + + l3hash = ipv4_hash(ipv4_hdr); + + /* there is no L4 header in fragmented packet */ + if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) + == 0)) { + ip_hdr_offset = (ipv4_hdr->version_ihl + & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER; + + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *) + ((char *)ipv4_hdr + + ip_hdr_offset); + if ((size_t)tcp_hdr + sizeof(*tcp_hdr) + < pkt_end) + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == + IPPROTO_UDP) { + udp_hdr = (struct rte_udp_hdr *) + ((char *)ipv4_hdr + + ip_hdr_offset); + if ((size_t)udp_hdr + sizeof(*udp_hdr) + < pkt_end) + l4hash = HASH_L4_PORTS(udp_hdr); + } + } + } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + + if (ipv6_hdr->proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv6_hdr->proto == IPPROTO_UDP) { + udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(udp_hdr); + } + } + + hash = l3hash ^ l4hash; + hash ^= hash >> 16; + hash ^= hash >> 8; + + slaves[i] = hash % slave_count; + } +} + +struct bwg_slave { + uint64_t bwg_left_int; + uint64_t bwg_left_remainder; + uint16_t slave; +}; + +void +bond_tlb_activate_slave(struct bond_dev_private *internals) { + int i; + + for (i = 0; i < internals->active_slave_count; i++) { + tlb_last_obytets[internals->active_slaves[i]] = 0; + } +} + +static int +bandwidth_cmp(const void *a, const void *b) +{ + const struct bwg_slave *bwg_a = a; + const struct bwg_slave *bwg_b = b; + int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int; + int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder - + (int64_t)bwg_a->bwg_left_remainder; + if (diff > 0) + return 1; + else if (diff < 0) + return -1; + else if (diff2 > 0) + return 1; + else if (diff2 < 0) + return -1; + else + return 0; +} + +static void +bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx, + struct bwg_slave *bwg_slave) +{ + struct rte_eth_link link_status; + int ret; + + ret = rte_eth_link_get_nowait(port_id, &link_status); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s", + port_id, rte_strerror(-ret)); + return; + } + uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8; + if (link_bwg == 0) + return; + link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS; + bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg; + bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg; +} + +static void +bond_ethdev_update_tlb_slave_cb(void *arg) +{ + struct bond_dev_private *internals = arg; + struct rte_eth_stats slave_stats; + struct bwg_slave bwg_array[RTE_MAX_ETHPORTS]; + uint16_t slave_count; + uint64_t tx_bytes; + + uint8_t update_stats = 0; + uint16_t slave_id; + uint16_t i; + + internals->slave_update_idx++; + + + if (internals->slave_update_idx >= REORDER_PERIOD_MS) + update_stats = 1; + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + rte_eth_stats_get(slave_id, &slave_stats); + tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; + bandwidth_left(slave_id, tx_bytes, + internals->slave_update_idx, &bwg_array[i]); + bwg_array[i].slave = slave_id; + + if (update_stats) { + tlb_last_obytets[slave_id] = slave_stats.obytes; + } + } + + if (update_stats == 1) + internals->slave_update_idx = 0; + + slave_count = i; + qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp); + for (i = 0; i < slave_count; i++) + internals->tlb_slaves_order[i] = bwg_array[i].slave; + + rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, + (struct bond_dev_private *)internals); +} + +static uint16_t +bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + struct rte_eth_dev *primary_port = + &rte_eth_devices[internals->primary_port]; + uint16_t num_tx_total = 0; + uint16_t i, j; + + uint16_t num_of_slaves = internals->active_slave_count; + uint16_t slaves[RTE_MAX_ETHPORTS]; + + struct rte_ether_hdr *ether_hdr; + struct rte_ether_addr primary_slave_addr; + struct rte_ether_addr active_slave_addr; + + if (num_of_slaves < 1) + return num_tx_total; + + memcpy(slaves, internals->tlb_slaves_order, + sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); + + + rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr); + + if (nb_pkts > 3) { + for (i = 0; i < 3; i++) + rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*)); + } + + for (i = 0; i < num_of_slaves; i++) { + rte_eth_macaddr_get(slaves[i], &active_slave_addr); + for (j = num_tx_total; j < nb_pkts; j++) { + if (j + 3 < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*)); + + ether_hdr = rte_pktmbuf_mtod(bufs[j], + struct rte_ether_hdr *); + if (rte_is_same_ether_addr(ðer_hdr->s_addr, + &primary_slave_addr)) + rte_ether_addr_copy(&active_slave_addr, + ðer_hdr->s_addr); +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX); +#endif + } + + num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs + num_tx_total, nb_pkts - num_tx_total); + + if (num_tx_total == nb_pkts) + break; + } + + return num_tx_total; +} + +void +bond_tlb_disable(struct bond_dev_private *internals) +{ + rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals); +} + +void +bond_tlb_enable(struct bond_dev_private *internals) +{ + bond_ethdev_update_tlb_slave_cb(internals); +} + +static uint16_t +bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + struct rte_ether_hdr *eth_h; + uint16_t ether_type, offset; + + struct client_data *client_info; + + /* + * We create transmit buffers for every slave and one additional to send + * through tlb. In worst case every packet will be send on one port. + */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts]; + uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 }; + + /* + * We create separate transmit buffers for update packets as they won't + * be counted in num_tx_total. + */ + struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE]; + uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + struct rte_mbuf *upd_pkt; + size_t pkt_size; + + uint16_t num_send, num_not_send = 0; + uint16_t num_tx_total = 0; + uint16_t slave_idx; + + int i, j; + + /* Search tx buffer for ARP packets and forward them to alb */ + for (i = 0; i < nb_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { + slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); + + /* Change src mac in eth header */ + rte_eth_macaddr_get(slave_idx, ð_h->s_addr); + + /* Add packet to slave tx buffer */ + slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i]; + slave_bufs_pkts[slave_idx]++; + } else { + /* If packet is not ARP, send it with TLB policy */ + slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] = + bufs[i]; + slave_bufs_pkts[RTE_MAX_ETHPORTS]++; + } + } + + /* Update connected client ARP tables */ + if (internals->mode6.ntt) { + for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { + client_info = &internals->mode6.client_table[i]; + + if (client_info->in_use) { + /* Allocate new packet to send ARP update on current slave */ + upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); + if (upd_pkt == NULL) { + RTE_BOND_LOG(ERR, + "Failed to allocate ARP packet from pool"); + continue; + } + pkt_size = sizeof(struct rte_ether_hdr) + + sizeof(struct rte_arp_hdr) + + client_info->vlan_count * + sizeof(struct rte_vlan_hdr); + upd_pkt->data_len = pkt_size; + upd_pkt->pkt_len = pkt_size; + + slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt, + internals); + + /* Add packet to update tx buffer */ + update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt; + update_bufs_pkts[slave_idx]++; + } + } + internals->mode6.ntt = 0; + } + + /* Send ARP packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (slave_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, + slave_bufs[i], slave_bufs_pkts[i]); + for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[i][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + num_not_send += slave_bufs_pkts[i] - num_send; + +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + /* Print TX stats including update packets */ + for (j = 0; j < slave_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], + struct rte_ether_hdr *); + mode6_debug("TX ARP:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send update packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (update_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i], + update_bufs_pkts[i]); + for (j = num_send; j < update_bufs_pkts[i]; j++) { + rte_pktmbuf_free(update_bufs[i][j]); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + for (j = 0; j < update_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(update_bufs[i][j], + struct rte_ether_hdr *); + mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send non-ARP packets using tlb policy */ + if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { + num_send = bond_ethdev_tx_burst_tlb(queue, + slave_bufs[RTE_MAX_ETHPORTS], + slave_bufs_pkts[RTE_MAX_ETHPORTS]); + + for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + } + + return num_tx_total; +} + +static inline uint16_t +tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, + uint16_t *slave_port_ids, uint16_t slave_count) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + /* Array to sort mbufs for transmission on each slave into */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; + /* Number of mbufs for transmission on each slave */ + uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; + /* Mapping array generated by hash function to map mbufs to slaves */ + uint16_t bufs_slave_port_idxs[nb_bufs]; + + uint16_t slave_tx_count; + uint16_t total_tx_count = 0, total_tx_fail_count = 0; + + uint16_t i; + + /* + * Populate slaves mbuf with the packets which are to be sent on it + * selecting output slave using hash based on xmit policy + */ + internals->burst_xmit_hash(bufs, nb_bufs, slave_count, + bufs_slave_port_idxs); + + for (i = 0; i < nb_bufs; i++) { + /* Populate slave mbuf arrays with mbufs for that slave. */ + uint16_t slave_idx = bufs_slave_port_idxs[i]; + + slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i]; + } + + /* Send packet burst on each slave device */ + for (i = 0; i < slave_count; i++) { + if (slave_nb_bufs[i] == 0) + continue; + + slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], + bd_tx_q->queue_id, slave_bufs[i], + slave_nb_bufs[i]); + + total_tx_count += slave_tx_count; + + /* If tx burst fails move packets to end of bufs */ + if (unlikely(slave_tx_count < slave_nb_bufs[i])) { + int slave_tx_fail_count = slave_nb_bufs[i] - + slave_tx_count; + total_tx_fail_count += slave_tx_fail_count; + memcpy(&bufs[nb_bufs - total_tx_fail_count], + &slave_bufs[i][slave_tx_count], + slave_tx_fail_count * sizeof(bufs[0])); + } + } + + return total_tx_count; +} + +static uint16_t +bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; + + if (unlikely(nb_bufs == 0)) + return 0; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting + */ + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; + + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); + return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids, + slave_count); +} + +static inline uint16_t +tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs, + bool dedicated_txq) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; + + uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t dist_slave_count; + + uint16_t slave_tx_count; + + uint16_t i; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; + + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); + + if (dedicated_txq) + goto skip_tx_ring; + + /* Check for LACP control packets and send if available */ + for (i = 0; i < slave_count; i++) { + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; + struct rte_mbuf *ctrl_pkt = NULL; + + if (likely(rte_ring_empty(port->tx_ring))) + continue; + + if (rte_ring_dequeue(port->tx_ring, + (void **)&ctrl_pkt) != -ENOENT) { + slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], + bd_tx_q->queue_id, &ctrl_pkt, 1); + /* + * re-enqueue LAG control plane packets to buffering + * ring if transmission fails so the packet isn't lost. + */ + if (slave_tx_count != 1) + rte_ring_enqueue(port->tx_ring, ctrl_pkt); + } + } + +skip_tx_ring: + if (unlikely(nb_bufs == 0)) + return 0; + + dist_slave_count = 0; + for (i = 0; i < slave_count; i++) { + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; + + if (ACTOR_STATE(port, DISTRIBUTING)) + dist_slave_port_ids[dist_slave_count++] = + slave_port_ids[i]; + } + + if (unlikely(dist_slave_count < 1)) + return 0; + + return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids, + dist_slave_count); +} + +static uint16_t +bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + return tx_burst_8023ad(queue, bufs, nb_bufs, false); +} + +static uint16_t +bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + return tx_burst_8023ad(queue, bufs, nb_bufs, true); +} + +static uint16_t +bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + uint16_t slaves[RTE_MAX_ETHPORTS]; + uint8_t tx_failed_flag = 0; + uint16_t num_of_slaves; + + uint16_t max_nb_of_tx_pkts = 0; + + int slave_tx_total[RTE_MAX_ETHPORTS]; + int i, most_successful_tx_slave = -1; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * num_of_slaves); + + if (num_of_slaves < 1) + return 0; + + /* Increment reference count on mbufs */ + for (i = 0; i < nb_pkts; i++) + rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1); + + /* Transmit burst on each active slave */ + for (i = 0; i < num_of_slaves; i++) { + slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs, nb_pkts); + + if (unlikely(slave_tx_total[i] < nb_pkts)) + tx_failed_flag = 1; + + /* record the value and slave index for the slave which transmits the + * maximum number of packets */ + if (slave_tx_total[i] > max_nb_of_tx_pkts) { + max_nb_of_tx_pkts = slave_tx_total[i]; + most_successful_tx_slave = i; + } + } + + /* if slaves fail to transmit packets from burst, the calling application + * is not expected to know about multiple references to packets so we must + * handle failures of all packets except those of the most successful slave + */ + if (unlikely(tx_failed_flag)) + for (i = 0; i < num_of_slaves; i++) + if (i != most_successful_tx_slave) + while (slave_tx_total[i] < nb_pkts) + rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + + return max_nb_of_tx_pkts; +} + +static void +link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link) +{ + struct bond_dev_private *bond_ctx = ethdev->data->dev_private; + + if (bond_ctx->mode == BONDING_MODE_8023AD) { + /** + * If in mode 4 then save the link properties of the first + * slave, all subsequent slaves must match these properties + */ + struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link; + + bond_link->link_autoneg = slave_link->link_autoneg; + bond_link->link_duplex = slave_link->link_duplex; + bond_link->link_speed = slave_link->link_speed; + } else { + /** + * In any other mode the link properties are set to default + * values of AUTONEG/DUPLEX + */ + ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG; + ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + } +} + +static int +link_properties_valid(struct rte_eth_dev *ethdev, + struct rte_eth_link *slave_link) +{ + struct bond_dev_private *bond_ctx = ethdev->data->dev_private; + + if (bond_ctx->mode == BONDING_MODE_8023AD) { + struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link; + + if (bond_link->link_duplex != slave_link->link_duplex || + bond_link->link_autoneg != slave_link->link_autoneg || + bond_link->link_speed != slave_link->link_speed) + return -1; + } + + return 0; +} + +int +mac_address_get(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *dst_mac_addr) +{ + struct rte_ether_addr *mac_addr; + + if (eth_dev == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified"); + return -1; + } + + if (dst_mac_addr == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer MAC specified"); + return -1; + } + + mac_addr = eth_dev->data->mac_addrs; + + rte_ether_addr_copy(mac_addr, dst_mac_addr); + return 0; +} + +int +mac_address_set(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *new_mac_addr) +{ + struct rte_ether_addr *mac_addr; + + if (eth_dev == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified"); + return -1; + } + + if (new_mac_addr == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer MAC specified"); + return -1; + } + + mac_addr = eth_dev->data->mac_addrs; + + /* If new MAC is different to current MAC then update */ + if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0) + memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr)); + + return 0; +} + +static const struct rte_ether_addr null_mac_addr; + +/* + * Add additional MAC addresses to the slave + */ +int +slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id) +{ + int i, ret; + struct rte_ether_addr *mac_addr; + + for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { + mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + if (rte_is_same_ether_addr(mac_addr, &null_mac_addr)) + break; + + ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0); + if (ret < 0) { + /* rollback */ + for (i--; i > 0; i--) + rte_eth_dev_mac_addr_remove(slave_port_id, + &bonded_eth_dev->data->mac_addrs[i]); + return ret; + } + } + + return 0; +} + +/* + * Remove additional MAC addresses from the slave + */ +int +slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev, + uint16_t slave_port_id) +{ + int i, rc, ret; + struct rte_ether_addr *mac_addr; + + rc = 0; + for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) { + mac_addr = &bonded_eth_dev->data->mac_addrs[i]; + if (rte_is_same_ether_addr(mac_addr, &null_mac_addr)) + break; + + ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr); + /* save only the first error */ + if (ret < 0 && rc == 0) + rc = ret; + } + + return rc; +} + +int +mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) +{ + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + int i; + + /* Update slave devices MAC addresses */ + if (internals->slave_count < 1) + return -1; + + switch (internals->mode) { + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + for (i = 0; i < internals->slave_count; i++) { + if (rte_eth_dev_default_mac_addr_set( + internals->slaves[i].port_id, + bonded_eth_dev->data->mac_addrs)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->slaves[i].port_id); + return -1; + } + } + break; + case BONDING_MODE_8023AD: + bond_mode_8023ad_mac_address_update(bonded_eth_dev); + break; + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == + internals->current_primary_port) { + if (rte_eth_dev_default_mac_addr_set( + internals->primary_port, + bonded_eth_dev->data->mac_addrs)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->current_primary_port); + return -1; + } + } else { + if (rte_eth_dev_default_mac_addr_set( + internals->slaves[i].port_id, + &internals->slaves[i].persisted_mac_addr)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->slaves[i].port_id); + return -1; + } + } + } + } + + return 0; +} + +int +bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) +{ + struct bond_dev_private *internals; + + internals = eth_dev->data->dev_private; + + switch (mode) { + case BONDING_MODE_ROUND_ROBIN: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_ACTIVE_BACKUP: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; + break; + case BONDING_MODE_BALANCE: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_BROADCAST: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_8023AD: + if (bond_mode_8023ad_enable(eth_dev) != 0) + return -1; + + if (internals->mode4.dedicated_queues.enabled == 0) { + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; + RTE_BOND_LOG(WARNING, + "Using mode 4, it is necessary to do TX burst " + "and RX burst at least every 100ms."); + } else { + /* Use flow director's optimization */ + eth_dev->rx_pkt_burst = + bond_ethdev_rx_burst_8023ad_fast_queue; + eth_dev->tx_pkt_burst = + bond_ethdev_tx_burst_8023ad_fast_queue; + } + break; + case BONDING_MODE_TLB: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; + break; + case BONDING_MODE_ALB: + if (bond_mode_alb_enable(eth_dev) != 0) + return -1; + + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb; + break; + default: + return -1; + } + + internals->mode = mode; + + return 0; +} + + +static int +slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_dev *slave_eth_dev) +{ + int errval = 0; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; + + if (port->slow_pool == NULL) { + char mem_name[256]; + int slave_id = slave_eth_dev->data->port_id; + + snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool", + slave_id); + port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191, + 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE, + slave_eth_dev->data->numa_node); + + /* Any memory allocation failure in initialization is critical because + * resources can't be free, so reinitialization is impossible. */ + if (port->slow_pool == NULL) { + rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", + slave_id, mem_name, rte_strerror(rte_errno)); + } + } + + if (internals->mode4.dedicated_queues.enabled == 1) { + /* Configure slow Rx queue */ + + errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, + internals->mode4.dedicated_queues.rx_qid, 128, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + NULL, port->slow_pool); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, + internals->mode4.dedicated_queues.rx_qid, + errval); + return errval; + } + + errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, + internals->mode4.dedicated_queues.tx_qid, 512, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + NULL); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, + internals->mode4.dedicated_queues.tx_qid, + errval); + return errval; + } + } + return 0; +} + +int +slave_configure(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_dev *slave_eth_dev) +{ + struct bond_rx_queue *bd_rx_q; + struct bond_tx_queue *bd_tx_q; + uint16_t nb_rx_queues; + uint16_t nb_tx_queues; + + int errval; + uint16_t q_id; + struct rte_flow_error flow_error; + + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + + /* Stop slave */ + rte_eth_dev_stop(slave_eth_dev->data->port_id); + + /* Enable interrupts on slave device if supported */ + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + + /* If RSS is enabled for bonding, try to enable it for slaves */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (internals->rss_key_len != 0) { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + internals->rss_key_len; + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + internals->rss_key; + } else { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + } + + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + slave_eth_dev->data->dev_conf.rxmode.mq_mode = + bonded_eth_dev->data->dev_conf.rxmode.mq_mode; + } + + if (bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + slave_eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + slave_eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; + + nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; + nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; + + if (internals->mode == BONDING_MODE_8023AD) { + if (internals->mode4.dedicated_queues.enabled == 1) { + nb_rx_queues++; + nb_tx_queues++; + } + } + + errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id, + bonded_eth_dev->data->mtu); + if (errval != 0 && errval != -ENOTSUP) { + RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + + /* Configure device */ + errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, + nb_rx_queues, nb_tx_queues, + &(slave_eth_dev->data->dev_conf)); + if (errval != 0) { + RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + + /* Setup Rx Queues */ + for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { + bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id]; + + errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id, + bd_rx_q->nb_rx_desc, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + &(bd_rx_q->rx_conf), bd_rx_q->mb_pool); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, q_id, errval); + return errval; + } + } + + /* Setup Tx Queues */ + for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) { + bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id]; + + errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id, + bd_tx_q->nb_tx_desc, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + &bd_tx_q->tx_conf); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, q_id, errval); + return errval; + } + } + + if (internals->mode == BONDING_MODE_8023AD && + internals->mode4.dedicated_queues.enabled == 1) { + if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev) + != 0) + return errval; + + if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev, + slave_eth_dev->data->port_id) != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, q_id, errval); + return -1; + } + + if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) + rte_flow_destroy(slave_eth_dev->data->port_id, + internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id], + &flow_error); + + bond_ethdev_8023ad_flow_set(bonded_eth_dev, + slave_eth_dev->data->port_id); + } + + /* Start device */ + errval = rte_eth_dev_start(slave_eth_dev->data->port_id); + if (errval != 0) { + RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)", + slave_eth_dev->data->port_id, errval); + return -1; + } + + /* If RSS is enabled for bonding, synchronize RETA */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + int i; + struct bond_dev_private *internals; + + internals = bonded_eth_dev->data->dev_private; + + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) { + errval = rte_eth_dev_rss_reta_update( + slave_eth_dev->data->port_id, + &internals->reta_conf[0], + internals->slaves[i].reta_size); + if (errval != 0) { + RTE_BOND_LOG(WARNING, + "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + " RSS Configuration for bonding may be inconsistent.", + slave_eth_dev->data->port_id, errval); + } + break; + } + } + } + + /* If lsc interrupt is set, check initial slave's link status */ + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { + slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0); + bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id, + RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id, + NULL); + } + + return 0; +} + +void +slave_remove(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev) +{ + uint16_t i; + + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == + slave_eth_dev->data->port_id) + break; + + if (i < (internals->slave_count - 1)) { + struct rte_flow *flow; + + memmove(&internals->slaves[i], &internals->slaves[i + 1], + sizeof(internals->slaves[0]) * + (internals->slave_count - i - 1)); + TAILQ_FOREACH(flow, &internals->flow_list, next) { + memmove(&flow->flows[i], &flow->flows[i + 1], + sizeof(flow->flows[0]) * + (internals->slave_count - i - 1)); + flow->flows[internals->slave_count - 1] = NULL; + } + } + + internals->slave_count--; + + /* force reconfiguration of slave interfaces */ + _rte_eth_dev_reset(slave_eth_dev); +} + +static void +bond_ethdev_slave_link_status_change_monitor(void *cb_arg); + +void +slave_add(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev) +{ + struct bond_slave_details *slave_details = + &internals->slaves[internals->slave_count]; + + slave_details->port_id = slave_eth_dev->data->port_id; + slave_details->last_link_status = 0; + + /* Mark slave devices that don't support interrupts so we can + * compensate when we start the bond + */ + if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { + slave_details->link_status_poll_enabled = 1; + } + + slave_details->link_status_wait_to_complete = 0; + /* clean tlb_last_obytes when adding port for bonding device */ + memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs, + sizeof(struct rte_ether_addr)); +} + +void +bond_ethdev_primary_set(struct bond_dev_private *internals, + uint16_t slave_port_id) +{ + int i; + + if (internals->active_slave_count < 1) + internals->current_primary_port = slave_port_id; + else + /* Search bonded device slave ports for new proposed primary port */ + for (i = 0; i < internals->active_slave_count; i++) { + if (internals->active_slaves[i] == slave_port_id) + internals->current_primary_port = slave_port_id; + } +} + +static int +bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev); + +static int +bond_ethdev_start(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals; + int i; + + /* slave eth dev will be started by bonded device */ + if (check_for_bonded_ethdev(eth_dev)) { + RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)", + eth_dev->data->port_id); + return -1; + } + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_started = 1; + + internals = eth_dev->data->dev_private; + + if (internals->slave_count == 0) { + RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); + goto out_err; + } + + if (internals->user_defined_mac == 0) { + struct rte_ether_addr *new_mac_addr = NULL; + + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == internals->primary_port) + new_mac_addr = &internals->slaves[i].persisted_mac_addr; + + if (new_mac_addr == NULL) + goto out_err; + + if (mac_address_set(eth_dev, new_mac_addr) != 0) { + RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", + eth_dev->data->port_id); + goto out_err; + } + } + + if (internals->mode == BONDING_MODE_8023AD) { + if (internals->mode4.dedicated_queues.enabled == 1) { + internals->mode4.dedicated_queues.rx_qid = + eth_dev->data->nb_rx_queues; + internals->mode4.dedicated_queues.tx_qid = + eth_dev->data->nb_tx_queues; + } + } + + + /* Reconfigure each slave device if starting bonded device */ + for (i = 0; i < internals->slave_count; i++) { + struct rte_eth_dev *slave_ethdev = + &(rte_eth_devices[internals->slaves[i].port_id]); + if (slave_configure(eth_dev, slave_ethdev) != 0) { + RTE_BOND_LOG(ERR, + "bonded port (%d) failed to reconfigure slave device (%d)", + eth_dev->data->port_id, + internals->slaves[i].port_id); + goto out_err; + } + /* We will need to poll for link status if any slave doesn't + * support interrupts + */ + if (internals->slaves[i].link_status_poll_enabled) + internals->link_status_polling_enabled = 1; + } + + /* start polling if needed */ + if (internals->link_status_polling_enabled) { + rte_eal_alarm_set( + internals->link_status_polling_interval_ms * 1000, + bond_ethdev_slave_link_status_change_monitor, + (void *)&rte_eth_devices[internals->port_id]); + } + + /* Update all slave devices MACs*/ + if (mac_address_slaves_update(eth_dev) != 0) + goto out_err; + + if (internals->user_defined_primary_port) + bond_ethdev_primary_set(internals, internals->primary_port); + + if (internals->mode == BONDING_MODE_8023AD) + bond_mode_8023ad_start(eth_dev); + + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) + bond_tlb_enable(internals); + + return 0; + +out_err: + eth_dev->data->dev_started = 0; + return -1; +} + +static void +bond_ethdev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + if (dev->data->rx_queues != NULL) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rte_free(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + } + + if (dev->data->tx_queues != NULL) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + rte_free(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; + } +} + +void +bond_ethdev_stop(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint16_t i; + + if (internals->mode == BONDING_MODE_8023AD) { + struct port *port; + void *pkt = NULL; + + bond_mode_8023ad_stop(eth_dev); + + /* Discard all messages to/from mode 4 state machines */ + for (i = 0; i < internals->active_slave_count; i++) { + port = &bond_mode_8023ad_ports[internals->active_slaves[i]]; + + RTE_ASSERT(port->rx_ring != NULL); + while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) + rte_pktmbuf_free(pkt); + + RTE_ASSERT(port->tx_ring != NULL); + while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT) + rte_pktmbuf_free(pkt); + } + } + + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) { + bond_tlb_disable(internals); + for (i = 0; i < internals->active_slave_count; i++) + tlb_last_obytets[internals->active_slaves[i]] = 0; + } + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_started = 0; + + internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) { + uint16_t slave_id = internals->slaves[i].port_id; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) != + internals->active_slave_count) { + internals->slaves[i].last_link_status = 0; + rte_eth_dev_stop(slave_id); + deactivate_slave(eth_dev, slave_id); + } + } +} + +void +bond_ethdev_close(struct rte_eth_dev *dev) +{ + struct bond_dev_private *internals = dev->data->dev_private; + uint16_t bond_port_id = internals->port_id; + int skipped = 0; + struct rte_flow_error ferror; + + RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); + while (internals->slave_count != skipped) { + uint16_t port_id = internals->slaves[skipped].port_id; + + rte_eth_dev_stop(port_id); + + if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { + RTE_BOND_LOG(ERR, + "Failed to remove port %d from bonded device %s", + port_id, dev->device->name); + skipped++; + } + } + bond_flow_ops.flush(dev, &ferror); + bond_ethdev_free_queues(dev); + rte_bitmap_reset(internals->vlan_filter_bmp); +} + +/* forward declaration */ +static int bond_ethdev_configure(struct rte_eth_dev *dev); + +static int +bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct bond_slave_details slave; + int ret; + + uint16_t max_nb_rx_queues = UINT16_MAX; + uint16_t max_nb_tx_queues = UINT16_MAX; + uint16_t max_rx_desc_lim = UINT16_MAX; + uint16_t max_tx_desc_lim = UINT16_MAX; + + dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS; + + dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ? + internals->candidate_max_rx_pktlen : + RTE_ETHER_MAX_JUMBO_FRAME_LEN; + + /* Max number of tx/rx queues that the bonded device can support is the + * minimum values of the bonded slaves, as all slaves must be capable + * of supporting the same number of tx/rx queues. + */ + if (internals->slave_count > 0) { + struct rte_eth_dev_info slave_info; + uint16_t idx; + + for (idx = 0; idx < internals->slave_count; idx++) { + slave = internals->slaves[idx]; + ret = rte_eth_dev_info_get(slave.port_id, &slave_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, + "%s: Error during getting device (port %u) info: %s\n", + __func__, + slave.port_id, + strerror(-ret)); + + return ret; + } + + if (slave_info.max_rx_queues < max_nb_rx_queues) + max_nb_rx_queues = slave_info.max_rx_queues; + + if (slave_info.max_tx_queues < max_nb_tx_queues) + max_nb_tx_queues = slave_info.max_tx_queues; + + if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim) + max_rx_desc_lim = slave_info.rx_desc_lim.nb_max; + + if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim) + max_tx_desc_lim = slave_info.tx_desc_lim.nb_max; + } + } + + dev_info->max_rx_queues = max_nb_rx_queues; + dev_info->max_tx_queues = max_nb_tx_queues; + + memcpy(&dev_info->default_rxconf, &internals->default_rxconf, + sizeof(dev_info->default_rxconf)); + memcpy(&dev_info->default_txconf, &internals->default_txconf, + sizeof(dev_info->default_txconf)); + + dev_info->rx_desc_lim.nb_max = max_rx_desc_lim; + dev_info->tx_desc_lim.nb_max = max_tx_desc_lim; + + /** + * If dedicated hw queues enabled for link bonding device in LACP mode + * then we need to reduce the maximum number of data path queues by 1. + */ + if (internals->mode == BONDING_MODE_8023AD && + internals->mode4.dedicated_queues.enabled == 1) { + dev_info->max_rx_queues--; + dev_info->max_tx_queues--; + } + + dev_info->min_rx_bufsize = 0; + + dev_info->rx_offload_capa = internals->rx_offload_capa; + dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa; + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + + dev_info->reta_size = internals->reta_size; + + return 0; +} + +static int +bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int res; + uint16_t i; + struct bond_dev_private *internals = dev->data->dev_private; + + /* don't do this while a slave is being added */ + rte_spinlock_lock(&internals->lock); + + if (on) + rte_bitmap_set(internals->vlan_filter_bmp, vlan_id); + else + rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id); + + for (i = 0; i < internals->slave_count; i++) { + uint16_t port_id = internals->slaves[i].port_id; + + res = rte_eth_dev_vlan_filter(port_id, vlan_id, on); + if (res == ENOTSUP) + RTE_BOND_LOG(WARNING, + "Setting VLAN filter on slave port %u not supported.", + port_id); + } + + rte_spinlock_unlock(&internals->lock); + return 0; +} + +static int +bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) +{ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *) + rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue), + 0, dev->data->numa_node); + if (bd_rx_q == NULL) + return -1; + + bd_rx_q->queue_id = rx_queue_id; + bd_rx_q->dev_private = dev->data->dev_private; + + bd_rx_q->nb_rx_desc = nb_rx_desc; + + memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf)); + bd_rx_q->mb_pool = mb_pool; + + dev->data->rx_queues[rx_queue_id] = bd_rx_q; + + return 0; +} + +static int +bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *) + rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue), + 0, dev->data->numa_node); + + if (bd_tx_q == NULL) + return -1; + + bd_tx_q->queue_id = tx_queue_id; + bd_tx_q->dev_private = dev->data->dev_private; + + bd_tx_q->nb_tx_desc = nb_tx_desc; + memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf)); + + dev->data->tx_queues[tx_queue_id] = bd_tx_q; + + return 0; +} + +static void +bond_ethdev_rx_queue_release(void *queue) +{ + if (queue == NULL) + return; + + rte_free(queue); +} + +static void +bond_ethdev_tx_queue_release(void *queue) +{ + if (queue == NULL) + return; + + rte_free(queue); +} + +static void +bond_ethdev_slave_link_status_change_monitor(void *cb_arg) +{ + struct rte_eth_dev *bonded_ethdev, *slave_ethdev; + struct bond_dev_private *internals; + + /* Default value for polling slave found is true as we don't want to + * disable the polling thread if we cannot get the lock */ + int i, polling_slave_found = 1; + + if (cb_arg == NULL) + return; + + bonded_ethdev = cb_arg; + internals = bonded_ethdev->data->dev_private; + + if (!bonded_ethdev->data->dev_started || + !internals->link_status_polling_enabled) + return; + + /* If device is currently being configured then don't check slaves link + * status, wait until next period */ + if (rte_spinlock_trylock(&internals->lock)) { + if (internals->slave_count > 0) + polling_slave_found = 0; + + for (i = 0; i < internals->slave_count; i++) { + if (!internals->slaves[i].link_status_poll_enabled) + continue; + + slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id]; + polling_slave_found = 1; + + /* Update slave link status */ + (*slave_ethdev->dev_ops->link_update)(slave_ethdev, + internals->slaves[i].link_status_wait_to_complete); + + /* if link status has changed since last checked then call lsc + * event callback */ + if (slave_ethdev->data->dev_link.link_status != + internals->slaves[i].last_link_status) { + internals->slaves[i].last_link_status = + slave_ethdev->data->dev_link.link_status; + + bond_ethdev_lsc_event_callback(internals->slaves[i].port_id, + RTE_ETH_EVENT_INTR_LSC, + &bonded_ethdev->data->port_id, + NULL); + } + } + rte_spinlock_unlock(&internals->lock); + } + + if (polling_slave_found) + /* Set alarm to continue monitoring link status of slave ethdev's */ + rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000, + bond_ethdev_slave_link_status_change_monitor, cb_arg); +} + +static int +bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) +{ + int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link); + + struct bond_dev_private *bond_ctx; + struct rte_eth_link slave_link; + + bool one_link_update_succeeded; + uint32_t idx; + int ret; + + bond_ctx = ethdev->data->dev_private; + + ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + + if (ethdev->data->dev_started == 0 || + bond_ctx->active_slave_count == 0) { + ethdev->data->dev_link.link_status = ETH_LINK_DOWN; + return 0; + } + + ethdev->data->dev_link.link_status = ETH_LINK_UP; + + if (wait_to_complete) + link_update = rte_eth_link_get; + else + link_update = rte_eth_link_get_nowait; + + switch (bond_ctx->mode) { + case BONDING_MODE_BROADCAST: + /** + * Setting link speed to UINT32_MAX to ensure we pick up the + * value of the first active slave + */ + ethdev->data->dev_link.link_speed = UINT32_MAX; + + /** + * link speed is minimum value of all the slaves link speed as + * packet loss will occur on this slave if transmission at rates + * greater than this are attempted + */ + for (idx = 0; idx < bond_ctx->active_slave_count; idx++) { + ret = link_update(bond_ctx->active_slaves[idx], + &slave_link); + if (ret < 0) { + ethdev->data->dev_link.link_speed = + ETH_SPEED_NUM_NONE; + RTE_BOND_LOG(ERR, + "Slave (port %u) link get failed: %s", + bond_ctx->active_slaves[idx], + rte_strerror(-ret)); + return 0; + } + + if (slave_link.link_speed < + ethdev->data->dev_link.link_speed) + ethdev->data->dev_link.link_speed = + slave_link.link_speed; + } + break; + case BONDING_MODE_ACTIVE_BACKUP: + /* Current primary slave */ + ret = link_update(bond_ctx->current_primary_port, &slave_link); + if (ret < 0) { + RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s", + bond_ctx->current_primary_port, + rte_strerror(-ret)); + return 0; + } + + ethdev->data->dev_link.link_speed = slave_link.link_speed; + break; + case BONDING_MODE_8023AD: + ethdev->data->dev_link.link_autoneg = + bond_ctx->mode4.slave_link.link_autoneg; + ethdev->data->dev_link.link_duplex = + bond_ctx->mode4.slave_link.link_duplex; + /* fall through */ + /* to update link speed */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /** + * In theses mode the maximum theoretical link speed is the sum + * of all the slaves + */ + ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + one_link_update_succeeded = false; + + for (idx = 0; idx < bond_ctx->active_slave_count; idx++) { + ret = link_update(bond_ctx->active_slaves[idx], + &slave_link); + if (ret < 0) { + RTE_BOND_LOG(ERR, + "Slave (port %u) link get failed: %s", + bond_ctx->active_slaves[idx], + rte_strerror(-ret)); + continue; + } + + one_link_update_succeeded = true; + ethdev->data->dev_link.link_speed += + slave_link.link_speed; + } + + if (!one_link_update_succeeded) { + RTE_BOND_LOG(ERR, "All slaves link get failed"); + return 0; + } + } + + + return 0; +} + + +static int +bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_eth_stats slave_stats; + int i, j; + + for (i = 0; i < internals->slave_count; i++) { + rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats); + + stats->ipackets += slave_stats.ipackets; + stats->opackets += slave_stats.opackets; + stats->ibytes += slave_stats.ibytes; + stats->obytes += slave_stats.obytes; + stats->imissed += slave_stats.imissed; + stats->ierrors += slave_stats.ierrors; + stats->oerrors += slave_stats.oerrors; + stats->rx_nombuf += slave_stats.rx_nombuf; + + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + stats->q_ipackets[j] += slave_stats.q_ipackets[j]; + stats->q_opackets[j] += slave_stats.q_opackets[j]; + stats->q_ibytes[j] += slave_stats.q_ibytes[j]; + stats->q_obytes[j] += slave_stats.q_obytes[j]; + stats->q_errors[j] += slave_stats.q_errors[j]; + } + + } + + return 0; +} + +static int +bond_ethdev_stats_reset(struct rte_eth_dev *dev) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int err; + int ret; + + for (i = 0, err = 0; i < internals->slave_count; i++) { + ret = rte_eth_stats_reset(internals->slaves[i].port_id); + if (ret != 0) + err = ret; + } + + return err; +} + +static int +bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + int ret = 0; + uint16_t port_id; + + switch (internals->mode) { + /* Promiscuous mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + port_id = internals->slaves[i].port_id; + + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; + break; + } + /* Promiscuous mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch promisc when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + port_id = internals->current_primary_port; + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + } + + return ret; +} + +static int +bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret = 0; + uint16_t port_id; + + switch (internals->mode) { + /* Promiscuous mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + port_id = internals->slaves[i].port_id; + + if (internals->mode == BONDING_MODE_8023AD && + bond_mode_8023ad_ports[port_id].forced_rx_flags == + BOND_8023AD_FORCED_PROMISC) { + slave_ok++; + continue; + } + ret = rte_eth_promiscuous_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; + break; + } + /* Promiscuous mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch promisc when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + port_id = internals->current_primary_port; + ret = rte_eth_promiscuous_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable promiscuous mode for port %u: %s", + port_id, rte_strerror(-ret)); + } + + return ret; +} + +static int +bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + int ret = 0; + uint16_t port_id; + + switch (internals->mode) { + /* allmulti mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + port_id = internals->slaves[i].port_id; + + ret = rte_eth_allmulticast_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; + break; + } + /* allmulti mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch allmulti when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + port_id = internals->current_primary_port; + ret = rte_eth_allmulticast_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + } + + return ret; +} + +static int +bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + int ret = 0; + uint16_t port_id; + + switch (internals->mode) { + /* allmulti mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + + for (i = 0; i < internals->slave_count; i++) { + uint16_t port_id = internals->slaves[i].port_id; + + if (internals->mode == BONDING_MODE_8023AD && + bond_mode_8023ad_ports[port_id].forced_rx_flags == + BOND_8023AD_FORCED_ALLMULTI) + continue; + + ret = rte_eth_allmulticast_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; + } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; + break; + } + /* allmulti mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + /* Do not touch allmulti when there cannot be primary ports */ + if (internals->slave_count == 0) + break; + port_id = internals->current_primary_port; + ret = rte_eth_allmulticast_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + } + + return ret; +} + +static void +bond_ethdev_delayed_lsc_propagation(void *arg) +{ + if (arg == NULL) + return; + + _rte_eth_dev_callback_process((struct rte_eth_dev *)arg, + RTE_ETH_EVENT_INTR_LSC, NULL); +} + +int +bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, + void *param, void *ret_param __rte_unused) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_link link; + int rc = -1; + int ret; + + uint8_t lsc_flag = 0; + int valid_slave = 0; + uint16_t active_pos; + uint16_t i; + + if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL) + return rc; + + bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param]; + + if (check_for_bonded_ethdev(bonded_eth_dev)) + return rc; + + internals = bonded_eth_dev->data->dev_private; + + /* If the device isn't started don't handle interrupts */ + if (!bonded_eth_dev->data->dev_started) + return rc; + + /* verify that port_id is a valid slave of bonded port */ + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == port_id) { + valid_slave = 1; + break; + } + } + + if (!valid_slave) + return rc; + + /* Synchronize lsc callback parallel calls either by real link event + * from the slaves PMDs or by the bonding PMD itself. + */ + rte_spinlock_lock(&internals->lsc_lock); + + /* Search for port in active port list */ + active_pos = find_slave_by_id(internals->active_slaves, + internals->active_slave_count, port_id); + + ret = rte_eth_link_get_nowait(port_id, &link); + if (ret < 0) + RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id); + + if (ret == 0 && link.link_status) { + if (active_pos < internals->active_slave_count) + goto link_update; + + /* check link state properties if bonded link is up*/ + if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) { + if (link_properties_valid(bonded_eth_dev, &link) != 0) + RTE_BOND_LOG(ERR, "Invalid link properties " + "for slave %d in bonding mode %d", + port_id, internals->mode); + } else { + /* inherit slave link properties */ + link_properties_set(bonded_eth_dev, &link); + } + + /* If no active slave ports then set this port to be + * the primary port. + */ + if (internals->active_slave_count < 1) { + /* If first active slave, then change link status */ + bonded_eth_dev->data->dev_link.link_status = + ETH_LINK_UP; + internals->current_primary_port = port_id; + lsc_flag = 1; + + mac_address_slaves_update(bonded_eth_dev); + } + + activate_slave(bonded_eth_dev, port_id); + + /* If the user has defined the primary port then default to + * using it. + */ + if (internals->user_defined_primary_port && + internals->primary_port == port_id) + bond_ethdev_primary_set(internals, port_id); + } else { + if (active_pos == internals->active_slave_count) + goto link_update; + + /* Remove from active slave list */ + deactivate_slave(bonded_eth_dev, port_id); + + if (internals->active_slave_count < 1) + lsc_flag = 1; + + /* Update primary id, take first active slave from list or if none + * available set to -1 */ + if (port_id == internals->current_primary_port) { + if (internals->active_slave_count > 0) + bond_ethdev_primary_set(internals, + internals->active_slaves[0]); + else + internals->current_primary_port = internals->primary_port; + } + } + +link_update: + /** + * Update bonded device link properties after any change to active + * slaves + */ + bond_ethdev_link_update(bonded_eth_dev, 0); + + if (lsc_flag) { + /* Cancel any possible outstanding interrupts if delays are enabled */ + if (internals->link_up_delay_ms > 0 || + internals->link_down_delay_ms > 0) + rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation, + bonded_eth_dev); + + if (bonded_eth_dev->data->dev_link.link_status) { + if (internals->link_up_delay_ms > 0) + rte_eal_alarm_set(internals->link_up_delay_ms * 1000, + bond_ethdev_delayed_lsc_propagation, + (void *)bonded_eth_dev); + else + _rte_eth_dev_callback_process(bonded_eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + + } else { + if (internals->link_down_delay_ms > 0) + rte_eal_alarm_set(internals->link_down_delay_ms * 1000, + bond_ethdev_delayed_lsc_propagation, + (void *)bonded_eth_dev); + else + _rte_eth_dev_callback_process(bonded_eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } + } + + rte_spinlock_unlock(&internals->lsc_lock); + + return rc; +} + +static int +bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + unsigned i, j; + int result = 0; + int slave_reta_size; + unsigned reta_count; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + reta_count = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < reta_count; i++) { + internals->reta_conf[i].mask = reta_conf[i].mask; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + internals->reta_conf[i].reta[j] = reta_conf[i].reta[j]; + } + + /* Fill rest of array */ + for (; i < RTE_DIM(internals->reta_conf); i += reta_count) + memcpy(&internals->reta_conf[i], &internals->reta_conf[0], + sizeof(internals->reta_conf[0]) * reta_count); + + /* Propagate RETA over slaves */ + for (i = 0; i < internals->slave_count; i++) { + slave_reta_size = internals->slaves[i].reta_size; + result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id, + &internals->reta_conf[0], slave_reta_size); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = internals->reta_conf[i].reta[j]; + + return 0; +} + +static int +bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + int i, result = 0; + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_eth_rss_conf bond_rss_conf; + + memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf)); + + bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads; + + if (bond_rss_conf.rss_hf != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf; + + if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len < + sizeof(internals->rss_key)) { + if (bond_rss_conf.rss_key_len == 0) + bond_rss_conf.rss_key_len = 40; + internals->rss_key_len = bond_rss_conf.rss_key_len; + memcpy(internals->rss_key, bond_rss_conf.rss_key, + internals->rss_key_len); + } + + for (i = 0; i < internals->slave_count; i++) { + result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id, + &bond_rss_conf); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bond_dev_private *internals = dev->data->dev_private; + + rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + rss_conf->rss_key_len = internals->rss_key_len; + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len); + + return 0; +} + +static int +bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int ret, i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mtu_set == NULL) { + rte_spinlock_unlock(&internals->lock); + return -ENOTSUP; + } + } + for (i = 0; i < internals->slave_count; i++) { + ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu); + if (ret < 0) { + rte_spinlock_unlock(&internals->lock); + return ret; + } + } + + rte_spinlock_unlock(&internals->lock); + return 0; +} + +static int +bond_ethdev_mac_address_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + if (mac_address_set(dev, addr)) { + RTE_BOND_LOG(ERR, "Failed to update MAC address"); + return -EINVAL; + } + + return 0; +} + +static int +bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type type, enum rte_filter_op op, void *arg) +{ + if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &bond_flow_ops; + return 0; + } + return -ENOTSUP; +} + +static int +bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, uint32_t vmdq) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int ret, i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mac_addr_add == NULL || + *slave_eth_dev->dev_ops->mac_addr_remove == NULL) { + ret = -ENOTSUP; + goto end; + } + } + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id, + mac_addr, vmdq); + if (ret < 0) { + /* rollback */ + for (i--; i >= 0; i--) + rte_eth_dev_mac_addr_remove( + internals->slaves[i].port_id, mac_addr); + goto end; + } + } + + ret = 0; +end: + rte_spinlock_unlock(&internals->lock); + return ret; +} + +static void +bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL) + goto end; + } + + struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index]; + + for (i = 0; i < internals->slave_count; i++) + rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id, + mac_addr); + +end: + rte_spinlock_unlock(&internals->lock); +} + +const struct eth_dev_ops default_dev_ops = { + .dev_start = bond_ethdev_start, + .dev_stop = bond_ethdev_stop, + .dev_close = bond_ethdev_close, + .dev_configure = bond_ethdev_configure, + .dev_infos_get = bond_ethdev_info, + .vlan_filter_set = bond_ethdev_vlan_filter_set, + .rx_queue_setup = bond_ethdev_rx_queue_setup, + .tx_queue_setup = bond_ethdev_tx_queue_setup, + .rx_queue_release = bond_ethdev_rx_queue_release, + .tx_queue_release = bond_ethdev_tx_queue_release, + .link_update = bond_ethdev_link_update, + .stats_get = bond_ethdev_stats_get, + .stats_reset = bond_ethdev_stats_reset, + .promiscuous_enable = bond_ethdev_promiscuous_enable, + .promiscuous_disable = bond_ethdev_promiscuous_disable, + .allmulticast_enable = bond_ethdev_allmulticast_enable, + .allmulticast_disable = bond_ethdev_allmulticast_disable, + .reta_update = bond_ethdev_rss_reta_update, + .reta_query = bond_ethdev_rss_reta_query, + .rss_hash_update = bond_ethdev_rss_hash_update, + .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get, + .mtu_set = bond_ethdev_mtu_set, + .mac_addr_set = bond_ethdev_mac_address_set, + .mac_addr_add = bond_ethdev_mac_addr_add, + .mac_addr_remove = bond_ethdev_mac_addr_remove, + .filter_ctrl = bond_filter_ctrl +}; + +static int +bond_alloc(struct rte_vdev_device *dev, uint8_t mode) +{ + const char *name = rte_vdev_device_name(dev); + uint8_t socket_id = dev->device.numa_node; + struct bond_dev_private *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + uint32_t vlan_filter_bmp_size; + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + + /* reserve an ethdev entry */ + eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals)); + if (eth_dev == NULL) { + RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev"); + goto err; + } + + internals = eth_dev->data->dev_private; + eth_dev->data->nb_rx_queues = (uint16_t)1; + eth_dev->data->nb_tx_queues = (uint16_t)1; + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN * + BOND_MAX_MAC_ADDRS, 0, socket_id); + if (eth_dev->data->mac_addrs == NULL) { + RTE_BOND_LOG(ERR, + "Failed to allocate %u bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS); + goto err; + } + + eth_dev->dev_ops = &default_dev_ops; + eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC; + + rte_spinlock_init(&internals->lock); + rte_spinlock_init(&internals->lsc_lock); + + internals->port_id = eth_dev->data->port_id; + internals->mode = BONDING_MODE_INVALID; + internals->current_primary_port = RTE_MAX_ETHPORTS + 1; + internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2; + internals->burst_xmit_hash = burst_xmit_l2_hash; + internals->user_defined_mac = 0; + + internals->link_status_polling_enabled = 0; + + internals->link_status_polling_interval_ms = + DEFAULT_POLLING_INTERVAL_10_MS; + internals->link_down_delay_ms = 0; + internals->link_up_delay_ms = 0; + + internals->slave_count = 0; + internals->active_slave_count = 0; + internals->rx_offload_capa = 0; + internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; + internals->candidate_max_rx_pktlen = 0; + internals->max_rx_pktlen = 0; + + /* Initially allow to choose any offload type */ + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + + memset(&internals->default_rxconf, 0, + sizeof(internals->default_rxconf)); + memset(&internals->default_txconf, 0, + sizeof(internals->default_txconf)); + + memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); + memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); + + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); + memset(internals->slaves, 0, sizeof(internals->slaves)); + + TAILQ_INIT(&internals->flow_list); + internals->flow_isolated_valid = 0; + + /* Set mode 4 default configuration */ + bond_mode_8023ad_setup(eth_dev, NULL); + if (bond_ethdev_mode_set(eth_dev, mode)) { + RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d", + eth_dev->data->port_id, mode); + goto err; + } + + vlan_filter_bmp_size = + rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1); + internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size, + RTE_CACHE_LINE_SIZE); + if (internals->vlan_filter_bmpmem == NULL) { + RTE_BOND_LOG(ERR, + "Failed to allocate vlan bitmap for bonded device %u", + eth_dev->data->port_id); + goto err; + } + + internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1, + internals->vlan_filter_bmpmem, vlan_filter_bmp_size); + if (internals->vlan_filter_bmp == NULL) { + RTE_BOND_LOG(ERR, + "Failed to init vlan bitmap for bonded device %u", + eth_dev->data->port_id); + rte_free(internals->vlan_filter_bmpmem); + goto err; + } + + return eth_dev->data->port_id; + +err: + rte_free(internals); + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; + rte_eth_dev_release_port(eth_dev); + return -1; +} + +static int +bond_probe(struct rte_vdev_device *dev) +{ + const char *name; + struct bond_dev_private *internals; + struct rte_kvargs *kvlist; + uint8_t bonding_mode, socket_id/*, agg_mode*/; + int arg_count, port_id; + uint8_t agg_mode; + struct rte_eth_dev *eth_dev; + + if (!dev) + return -EINVAL; + + name = rte_vdev_device_name(dev); + RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + RTE_BOND_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &default_dev_ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + pmd_bond_init_valid_arguments); + if (kvlist == NULL) + return -1; + + /* Parse link bonding mode */ + if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) { + if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, + &bond_ethdev_parse_slave_mode_kvarg, + &bonding_mode) != 0) { + RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s", + name); + goto parse_error; + } + } else { + RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded " + "device %s", name); + goto parse_error; + } + + /* Parse socket id to create bonding device on */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG); + if (arg_count == 1) { + if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG, + &bond_ethdev_parse_socket_id_kvarg, &socket_id) + != 0) { + RTE_BOND_LOG(ERR, "Invalid socket Id specified for " + "bonded device %s", name); + goto parse_error; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(ERR, "Socket Id can be specified only once for " + "bonded device %s", name); + goto parse_error; + } else { + socket_id = rte_socket_id(); + } + + dev->device.numa_node = socket_id; + + /* Create link bonding eth device */ + port_id = bond_alloc(dev, bonding_mode); + if (port_id < 0) { + RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " + "socket %u.", name, bonding_mode, socket_id); + goto parse_error; + } + internals = rte_eth_devices[port_id].data->dev_private; + internals->kvlist = kvlist; + + if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { + if (rte_kvargs_process(kvlist, + PMD_BOND_AGG_MODE_KVARG, + &bond_ethdev_parse_slave_agg_mode_kvarg, + &agg_mode) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", + name); + goto parse_error; + } + + if (internals->mode == BONDING_MODE_8023AD) + internals->mode4.agg_selection = agg_mode; + } else { + internals->mode4.agg_selection = AGG_STABLE; + } + + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); + RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " + "socket %u.", name, port_id, bonding_mode, socket_id); + return 0; + +parse_error: + rte_kvargs_free(kvlist); + + return -1; +} + +static int +bond_remove(struct rte_vdev_device *dev) +{ + struct rte_eth_dev *eth_dev; + struct bond_dev_private *internals; + const char *name; + + if (!dev) + return -EINVAL; + + name = rte_vdev_device_name(dev); + RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name); + + /* now free all data allocation - for eth_dev structure, + * dummy pci driver and internal (private) data + */ + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return rte_eth_dev_release_port(eth_dev); + + RTE_ASSERT(eth_dev->device == &dev->device); + + internals = eth_dev->data->dev_private; + if (internals->slave_count != 0) + return -EBUSY; + + if (eth_dev->data->dev_started == 1) { + bond_ethdev_stop(eth_dev); + bond_ethdev_close(eth_dev); + } + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + internals = eth_dev->data->dev_private; + /* Try to release mempool used in mode6. If the bond + * device is not mode6, free the NULL is not problem. + */ + rte_mempool_free(internals->mode6.mempool); + rte_bitmap_free(internals->vlan_filter_bmp); + rte_free(internals->vlan_filter_bmpmem); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +/* this part will resolve the slave portids after all the other pdev and vdev + * have been allocated */ +static int +bond_ethdev_configure(struct rte_eth_dev *dev) +{ + const char *name = dev->device->name; + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_kvargs *kvlist = internals->kvlist; + int arg_count; + uint16_t port_id = dev - rte_eth_devices; + uint8_t agg_mode; + + static const uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, + 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B, + 0xBE, 0xAC, 0x01, 0xFA + }; + + unsigned i, j; + + /* + * If RSS is enabled, fill table with default values and + * set key to the the value specified in port RSS configuration. + * Fall back to default RSS key if the key is not specified + */ + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) { + internals->rss_key_len = + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + memcpy(internals->rss_key, + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key, + internals->rss_key_len); + } else { + internals->rss_key_len = sizeof(default_rss_key); + memcpy(internals->rss_key, default_rss_key, + internals->rss_key_len); + } + + for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { + internals->reta_conf[i].mask = ~0LL; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + internals->reta_conf[i].reta[j] = + (i * RTE_RETA_GROUP_SIZE + j) % + dev->data->nb_rx_queues; + } + } + + /* set the max_rx_pktlen */ + internals->max_rx_pktlen = internals->candidate_max_rx_pktlen; + + /* + * if no kvlist, it means that this bonded device has been created + * through the bonding api. + */ + if (!kvlist) + return 0; + + /* Parse MAC address for bonded device */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG); + if (arg_count == 1) { + struct rte_ether_addr bond_mac; + + if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG, + &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { + RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s", + name); + return -1; + } + + /* Set MAC address */ + if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) { + RTE_BOND_LOG(ERR, + "Failed to set mac address on bonded device %s", + name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(ERR, + "MAC address can be specified only once for bonded device %s", + name); + return -1; + } + + /* Parse/set balance mode transmit policy */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG); + if (arg_count == 1) { + uint8_t xmit_policy; + + if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG, + &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != + 0) { + RTE_BOND_LOG(INFO, + "Invalid xmit policy specified for bonded device %s", + name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) { + RTE_BOND_LOG(ERR, + "Failed to set balance xmit policy on bonded device %s", + name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(ERR, + "Transmit policy can be specified only once for bonded device %s", + name); + return -1; + } + + if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { + if (rte_kvargs_process(kvlist, + PMD_BOND_AGG_MODE_KVARG, + &bond_ethdev_parse_slave_agg_mode_kvarg, + &agg_mode) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", + name); + } + if (internals->mode == BONDING_MODE_8023AD) { + int ret = rte_eth_bond_8023ad_agg_selection_set(port_id, + agg_mode); + if (ret < 0) { + RTE_BOND_LOG(ERR, + "Invalid args for agg selection set for bonded device %s", + name); + return -1; + } + } + } + + /* Parse/add slave ports to bonded device */ + if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) { + struct bond_ethdev_slave_ports slave_ports; + unsigned i; + + memset(&slave_ports, 0, sizeof(slave_ports)); + + if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG, + &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse slave ports for bonded device %s", + name); + return -1; + } + + for (i = 0; i < slave_ports.slave_count; i++) { + if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) { + RTE_BOND_LOG(ERR, + "Failed to add port %d as slave to bonded device %s", + slave_ports.slaves[i], name); + } + } + + } else { + RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name); + return -1; + } + + /* Parse/set primary slave port id*/ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG); + if (arg_count == 1) { + uint16_t primary_slave_port_id; + + if (rte_kvargs_process(kvlist, + PMD_BOND_PRIMARY_SLAVE_KVARG, + &bond_ethdev_parse_primary_slave_port_id_kvarg, + &primary_slave_port_id) < 0) { + RTE_BOND_LOG(INFO, + "Invalid primary slave port id specified for bonded device %s", + name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_primary_set(port_id, primary_slave_port_id) + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set primary slave port %d on bonded device %s", + primary_slave_port_id, name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(INFO, + "Primary slave can be specified only once for bonded device %s", + name); + return -1; + } + + /* Parse link status monitor polling interval */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG); + if (arg_count == 1) { + uint32_t lsc_poll_interval_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LSC_POLL_PERIOD_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &lsc_poll_interval_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid lsc polling interval value specified for bonded" + " device %s", name); + return -1; + } + + if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms) + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set lsc monitor polling interval (%u ms) on bonded device %s", + lsc_poll_interval_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(INFO, + "LSC polling interval can be specified only once for bonded" + " device %s", name); + return -1; + } + + /* Parse link up interrupt propagation delay */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG); + if (arg_count == 1) { + uint32_t link_up_delay_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LINK_UP_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_up_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link up propagation delay value specified for" + " bonded device %s", name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms) + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link up propagation delay (%u ms) on bonded" + " device %s", link_up_delay_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(INFO, + "Link up propagation delay can be specified only once for" + " bonded device %s", name); + return -1; + } + + /* Parse link down interrupt propagation delay */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG); + if (arg_count == 1) { + uint32_t link_down_delay_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_down_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link down propagation delay value specified for" + " bonded device %s", name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms) + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link down propagation delay (%u ms) on bonded device %s", + link_down_delay_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_BOND_LOG(INFO, + "Link down propagation delay can be specified only once for bonded device %s", + name); + return -1; + } + + return 0; +} + +struct rte_vdev_driver pmd_bond_drv = { + .probe = bond_probe, + .remove = bond_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv); +RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond); + +RTE_PMD_REGISTER_PARAM_STRING(net_bonding, + "slave= " + "primary= " + "mode=[0-6] " + "xmit_policy=[l2 | l23 | l34] " + "agg_mode=[count | stable | bandwidth] " + "socket_id= " + "mac= " + "lsc_poll_period_ms= " + "up_delay= " + "down_delay="); + +int bond_logtype; + +RTE_INIT(bond_init_log) +{ + bond_logtype = rte_log_register("pmd.net.bond"); + if (bond_logtype >= 0) + rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map b/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map new file mode 100644 index 000000000..270c7d5d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/bonding/rte_pmd_bond_version.map @@ -0,0 +1,33 @@ +DPDK_20.0 { + global: + + rte_eth_bond_8023ad_agg_selection_get; + rte_eth_bond_8023ad_agg_selection_set; + rte_eth_bond_8023ad_conf_get; + rte_eth_bond_8023ad_dedicated_queues_disable; + rte_eth_bond_8023ad_dedicated_queues_enable; + rte_eth_bond_8023ad_ext_collect; + rte_eth_bond_8023ad_ext_collect_get; + rte_eth_bond_8023ad_ext_distrib; + rte_eth_bond_8023ad_ext_distrib_get; + rte_eth_bond_8023ad_ext_slowtx; + rte_eth_bond_8023ad_setup; + rte_eth_bond_8023ad_slave_info; + rte_eth_bond_active_slaves_get; + rte_eth_bond_create; + rte_eth_bond_free; + rte_eth_bond_link_monitoring_set; + rte_eth_bond_mac_address_reset; + rte_eth_bond_mac_address_set; + rte_eth_bond_mode_get; + rte_eth_bond_mode_set; + rte_eth_bond_primary_get; + rte_eth_bond_primary_set; + rte_eth_bond_slave_add; + rte_eth_bond_slave_remove; + rte_eth_bond_slaves_get; + rte_eth_bond_xmit_policy_get; + rte_eth_bond_xmit_policy_set; + + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/cxgbe/Makefile b/src/spdk/dpdk/drivers/net/cxgbe/Makefile new file mode 100644 index 000000000..53b2bb56d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/Makefile @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014-2018 Chelsio Communications. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_cxgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_cxgbe_version.map + +# +# CFLAGS for gcc/clang +# +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +endif +endif + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += mps_tcam.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += l2t.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += smt.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h new file mode 100644 index 000000000..62de35c7c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h @@ -0,0 +1,835 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +/* This file should not be included directly. Include common.h instead. */ + +#ifndef __T4_ADAPTER_H__ +#define __T4_ADAPTER_H__ + +#include +#include +#include +#include +#include + +#include "../cxgbe_compat.h" +#include "../cxgbe_ofld.h" +#include "t4_regs_values.h" + +enum { + MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ +}; + +struct adapter; +struct sge_rspq; + +enum { + PORT_RSS_DONE = (1 << 0), +}; + +struct port_info { + struct adapter *adapter; /* adapter that this port belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct port_stats stats_base; /* port statistics base */ + struct link_config link_cfg; /* link configuration info */ + + unsigned long flags; /* port related flags */ + short int xact_addr_filt; /* index of exact MAC address filter */ + + u16 viid; /* associated virtual interface id */ + s8 mdio_addr; /* address of the PHY */ + u8 port_type; /* firmware port type */ + u8 mod_type; /* firmware module type */ + u8 port_id; /* physical port ID */ + u8 pidx; /* port index for this PF */ + u8 tx_chan; /* associated channel */ + + u8 n_rx_qsets; /* # of rx qsets */ + u8 n_tx_qsets; /* # of tx qsets */ + u8 first_qset; /* index of first qset */ + + u16 *rss; /* rss table */ + u8 rss_mode; /* rss mode */ + u16 rss_size; /* size of VI's RSS table slice */ + u64 rss_hf; /* RSS Hash Function */ + + /* viid fields either returned by fw + * or decoded by parsing viid by driver. + */ + u8 vin; + u8 vivld; +}; + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + FW_QUEUE_BOUND = (1 << 3), + FW_OK = (1 << 4), + CFG_QUEUES = (1 << 5), + MASTER_PF = (1 << 6), +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + void *buf; /* struct page or mbuf */ + dma_addr_t dma_addr; +}; + +struct sge_fl { /* SGE free-buffer queue state */ + /* RO fields */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + + dma_addr_t addr; /* bus address of HW ring start */ + __be64 *desc; /* address of HW Rx descriptor ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int size; /* capacity of free list */ + + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long low; /* # of times momentarily starving */ +}; + +#define MAX_MBUF_FRAGS (16384 / 512 + 2) + +/* A packet gather list */ +struct pkt_gl { + union { + struct rte_mbuf *mbufs[MAX_MBUF_FRAGS]; + } /* UNNAMED */; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ + bool usembufs; /* use mbufs for fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct adapter *adapter; /* adapter that this queue belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct rte_mempool *mb_pool; /* associated mempool */ + + dma_addr_t phys_addr; /* physical address of the ring */ + __be64 *desc; /* address of HW response ring */ + const __be64 *cur_desc; /* current descriptor in queue */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + struct sge_qstat *stat; + + unsigned int cidx; /* consumer index */ + unsigned int gts_idx; /* last gts write sent */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + int offset; /* offset into current Rx buffer */ + + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 port_id; /* associated port-id */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE relative QID for the response Q */ + u16 abs_id; /* absolute SGE id for the response q */ + + rspq_handler_t handler; /* associated handler for this response q */ +}; + +struct sge_eth_rx_stats { /* Ethernet rx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 rx_bytes; /* # of ethernet bytes */ + u64 rx_cso; /* # of Rx checksum offloads */ + u64 vlan_ex; /* # of Rx VLAN extractions */ + u64 rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* a SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_rx_stats stats; + bool usembufs; /* one ingress packet per mbuf FL buffer */ +} __rte_cache_aligned; + +/* + * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per + * packet (if one sgl is present) and type 1 needs 32 bytes. This means + * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit + * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR + * to be able to free those mbufs when we get completions back from the FW. + * Allocating the maximum number of pointers in every tx desc is a waste + * of memory resources so we only store 2 pointers per tx desc which should + * be enough since a tx desc can only fit 2 packets in the best case + * scenario where a packet needs 32 bytes. + */ +#define ETH_COALESCE_PKT_NUM 15 +#define ETH_COALESCE_VF_PKT_NUM 7 +#define ETH_COALESCE_PKT_PER_DESC 2 + +struct tx_eth_coal_desc { + struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC]; + struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC]; + int idx; +}; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct rte_mbuf *mbuf; + struct ulptx_sgl *sgl; + struct tx_eth_coal_desc coalesce; +}; + +enum { + EQ_STOPPED = (1 << 0), +}; + +struct eth_coalesce { + unsigned char *ptr; + unsigned char type; + unsigned int idx; + unsigned int len; + unsigned int flits; + unsigned int max; + __u8 ethmacdst[ETHER_ADDR_LEN]; + __u8 ethmacsrc[ETHER_ADDR_LEN]; + __be16 ethtype; + __be16 vlantci; +}; + +struct sge_txq { + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + struct eth_coalesce coalesce; /* coalesce info */ + + uint64_t phys_addr; /* physical address of the ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the Tx Q */ + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned int dbidx; /* last idx when db ring was done */ + unsigned int equeidx; /* last sent credit request */ + unsigned int last_pidx; /* last pidx recorded by tx monitor */ + unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ + unsigned int abs_id; + + int db_disabled; /* doorbell state */ + unsigned short db_pidx; /* doorbell producer index */ + unsigned short db_pidx_inc; /* doorbell producer increment */ +}; + +struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 tx_bytes; /* # of ethernet bytes */ + u64 tso; /* # of TSO requests */ + u64 tx_cso; /* # of Tx checksum offloads */ + u64 vlan_ins; /* # of Tx VLAN insertions */ + u64 mapping_err; /* # of I/O MMU packet mapping errors */ + u64 coal_wr; /* # of coalesced wr */ + u64 coal_pkts; /* # of coalesced packets */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ + struct rte_eth_dev_data *data; + struct sge_eth_tx_stats stats; /* queue statistics */ + rte_spinlock_t txq_lock; + + unsigned int flags; /* flags for state of the queue */ +} __rte_cache_aligned; + +struct sge_ctrl_txq { /* State for an SGE control Tx queue */ + struct sge_txq q; /* txq */ + struct adapter *adapter; /* adapter associated with this queue */ + rte_spinlock_t ctrlq_lock; /* control queue lock */ + u8 full; /* the Tx ring is full */ + u64 txp; /* number of transmits */ + struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */ +} __rte_cache_aligned; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_rspq fw_evtq __rte_cache_aligned; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + + /* response queue interrupt parameters */ + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + + u32 fl_align; /* response queue message alignment */ + u32 fl_pg_order; /* large page allocation size */ + u32 fl_starve_thres; /* Free List starvation threshold */ +}; + +#define T4_OS_NEEDS_MBOX_LOCKING 1 + +/* + * OS Lock/List primitives for those interfaces in the Common Code which + * need this. + */ + +struct mbox_entry { + TAILQ_ENTRY(mbox_entry) next; +}; + +TAILQ_HEAD(mbox_list, mbox_entry); + +struct adapter_devargs { + bool keep_ovlan; + bool force_link_up; + bool tx_mode_latency; + u32 filtermode; + u32 filtermask; +}; + +struct adapter { + struct rte_pci_device *pdev; /* associated rte pci device */ + struct rte_eth_dev *eth_dev; /* first port's rte eth device */ + struct adapter_params params; /* adapter parameters */ + struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */ + struct sge sge; /* associated SGE */ + + /* support for single-threading access to adapter mailbox registers */ + struct mbox_list mbox_list; + rte_spinlock_t mbox_lock; + + u8 *regs; /* pointer to registers region */ + u8 *bar2; /* pointer to bar2 region */ + unsigned long flags; /* adapter flags */ + unsigned int mbox; /* associated mailbox */ + unsigned int pf; /* associated physical function id */ + + unsigned int vpd_busy; + unsigned int vpd_flag; + + int use_unpacked_mode; /* unpacked rx mode state */ + rte_spinlock_t win0_lock; + + rte_spinlock_t flow_lock; /* Serialize access for rte_flow ops */ + + unsigned int clipt_start; /* CLIP table start */ + unsigned int clipt_end; /* CLIP table end */ + unsigned int l2t_start; /* Layer 2 table start */ + unsigned int l2t_end; /* Layer 2 table end */ + struct clip_tbl *clipt; /* CLIP table */ + struct l2t_data *l2t; /* Layer 2 table */ + struct smt_data *smt; /* Source mac table */ + struct mpstcam_table *mpstcam; + + struct tid_info tids; /* Info used to access TID related tables */ + + struct adapter_devargs devargs; +}; + +/** + * t4_os_rwlock_init - initialize rwlock + * @lock: the rwlock + */ +static inline void t4_os_rwlock_init(rte_rwlock_t *lock) +{ + rte_rwlock_init(lock); +} + +/** + * t4_os_write_lock - get a write lock + * @lock: the rwlock + */ +static inline void t4_os_write_lock(rte_rwlock_t *lock) +{ + rte_rwlock_write_lock(lock); +} + +/** + * t4_os_write_unlock - unlock a write lock + * @lock: the rwlock + */ +static inline void t4_os_write_unlock(rte_rwlock_t *lock) +{ + rte_rwlock_write_unlock(lock); +} + +/** + * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev + * @dev: the rte_eth_dev + * + * Return the struct port_info associated with a rte_eth_dev + */ +static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev) +{ + return dev->data->dev_private; +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx) +{ + return adap->port[idx]; +} + +/** + * ethdev2adap - return the adapter structure associated with a rte_eth_dev + * @dev: the rte_eth_dev + * + * Return the struct adapter associated with a rte_eth_dev + */ +static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev) +{ + return ethdev2pinfo(dev)->adapter; +} + +#define CXGBE_PCI_REG(reg) rte_read32(reg) + +static inline uint64_t cxgbe_read_addr64(volatile void *addr) +{ + uint64_t val = CXGBE_PCI_REG(addr); + uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)); + + val2 = (uint64_t)(val2 << 32); + val += val2; + return val; +} + +static inline uint32_t cxgbe_read_addr(volatile void *addr) +{ + return CXGBE_PCI_REG(addr); +} + +#define CXGBE_PCI_REG_ADDR(adap, reg) \ + ((volatile uint32_t *)((char *)(adap)->regs + (reg))) + +#define CXGBE_READ_REG(adap, reg) \ + cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_READ_REG64(adap, reg) \ + cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg)) + +#define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((value), (reg)) + +#define CXGBE_WRITE_REG(adap, reg, value) \ + CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +#define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \ + CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val) +{ + CXGBE_PCI_REG_WRITE(addr, val); + CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32)); + return val; +} + +#define CXGBE_WRITE_REG64(adap, reg, value) \ + cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + return CXGBE_READ_REG(adapter, reg_addr); +} + +/** + * t4_write_reg - write a HW register with barrier + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + CXGBE_WRITE_REG(adapter, reg_addr, val); +} + +/** + * t4_write_reg_relaxed - write a HW register with no barrier + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr, + u32 val) +{ + CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val); +} + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + return CXGBE_READ_REG64(adapter, reg_addr); +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + CXGBE_WRITE_REG64(adapter, reg_addr, val); +} + +#define PCI_STATUS 0x06 /* 16 bits */ +#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ +#define PCI_CAPABILITY_LIST 0x34 +/* Offset of first capability list entry */ +#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ +#define PCI_CAP_LIST_ID 0 /* Capability ID */ +#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ +#define PCI_EXP_DEVCTL 0x0008 /* Device control */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ +#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ +#define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */ +#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ +#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */ +#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ +#define PCI_VPD_DATA 4 /* 32-bits of data returned here */ + +/** + * t4_os_pci_write_cfg4 - 32-bit write to PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given register in PCI config space. + */ +static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr, + off_t val) +{ + u32 val32 = val; + + if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32), + addr) < 0) + dev_err(adapter, "Can't write to PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 32-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr, + u32 *val) +{ + if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_pci_write_cfg2 - 16-bit write to PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: the value to write + * + * Write a 16-bit value into the given register in PCI config space. + */ +static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr, + off_t val) +{ + u16 val16 = val; + + if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16), + addr) < 0) + dev_err(adapter, "Can't write to PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 16-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr, + u16 *val) +{ + if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg - read a 8-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 8-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr, + u8 *val) +{ + if (rte_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_find_pci_capability - lookup a capability in the PCI capability list + * @adapter: the adapter + * @cap: the capability + * + * Return the address of the given capability within the PCI capability list. + */ +static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) +{ + u16 status; + int ttl = 48; + u8 pos = 0; + u8 id = 0; + + t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status); + if (!(status & PCI_STATUS_CAP_LIST)) { + dev_err(adapter, "PCIe capability reading failed\n"); + return -1; + } + + t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos); + while (ttl-- && pos >= 0x40) { + pos &= ~3; + t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id); + + if (id == 0xff) + break; + + if (id == cap) + return (int)pos; + + t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos); + } + return 0; +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @port_idx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the + * common code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, + u8 hw_addr[]) +{ + struct port_info *pi = adap2pinfo(adapter, port_idx); + + rte_ether_addr_copy((struct rte_ether_addr *)hw_addr, + &pi->eth_dev->data->mac_addrs[0]); +} + +/** + * t4_os_lock_init - initialize spinlock + * @lock: the spinlock + */ +static inline void t4_os_lock_init(rte_spinlock_t *lock) +{ + rte_spinlock_init(lock); +} + +/** + * t4_os_lock - spin until lock is acquired + * @lock: the spinlock + */ +static inline void t4_os_lock(rte_spinlock_t *lock) +{ + rte_spinlock_lock(lock); +} + +/** + * t4_os_unlock - unlock a spinlock + * @lock: the spinlock + */ +static inline void t4_os_unlock(rte_spinlock_t *lock) +{ + rte_spinlock_unlock(lock); +} + +/** + * t4_os_trylock - try to get a lock + * @lock: the spinlock + */ +static inline int t4_os_trylock(rte_spinlock_t *lock) +{ + return rte_spinlock_trylock(lock); +} + +/** + * t4_os_init_list_head - initialize + * @head: head of list to initialize [to empty] + */ +static inline void t4_os_init_list_head(struct mbox_list *head) +{ + TAILQ_INIT(head); +} + +static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head) +{ + return TAILQ_FIRST(head); +} + +/** + * t4_os_atomic_add_tail - Enqueue list element atomically onto list + * @new: the entry to be addded to the queue + * @head: current head of the linked list + * @lock: lock to use to guarantee atomicity + */ +static inline void t4_os_atomic_add_tail(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_INSERT_TAIL(head, entry, next); + t4_os_unlock(lock); +} + +/** + * t4_os_atomic_list_del - Dequeue list element atomically from list + * @entry: the entry to be remove/dequeued from the list. + * @lock: the spinlock + */ +static inline void t4_os_atomic_list_del(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_REMOVE(head, entry, next); + t4_os_unlock(lock); +} + +/** + * t4_init_completion - initialize completion + * @c: the completion context + */ +static inline void t4_init_completion(struct t4_completion *c) +{ + c->done = 0; + t4_os_lock_init(&c->lock); +} + +/** + * t4_complete - set completion as done + * @c: the completion context + */ +static inline void t4_complete(struct t4_completion *c) +{ + t4_os_lock(&c->lock); + c->done = 1; + t4_os_unlock(&c->lock); +} + +/** + * cxgbe_port_viid - get the VI id of a port + * @dev: the device for the port + * + * Return the VI id of the given port. + */ +static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev) +{ + return ethdev2pinfo(dev)->viid; +} + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); +#define t4_os_alloc(_size) t4_alloc_mem((_size)) +#define t4_os_free(_ptr) t4_free_mem((_ptr)) + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void reclaim_completed_tx(struct sge_txq *q); +void t4_free_sge_resources(struct adapter *adap); +void t4_sge_tx_monitor_start(struct adapter *adap); +void t4_sge_tx_monitor_stop(struct adapter *adap); +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, + uint16_t nb_pkts); +int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf); +int t4_sge_init(struct adapter *adap); +int t4vf_sge_init(struct adapter *adap); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, + struct rte_eth_dev *eth_dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t handler, + int cong, struct rte_mempool *mp, int queue_id, + int socket_id); +int t4_sge_eth_txq_start(struct sge_eth_txq *txq); +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq); +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq); +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq); +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq); +void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq); +void t4_sge_eth_clear_queues(struct port_info *pi); +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done); +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues); +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags); + +#endif /* __T4_ADAPTER_H__ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/common.h b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h new file mode 100644 index 000000000..79c8fcb76 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __CHELSIO_COMMON_H +#define __CHELSIO_COMMON_H + +#include "../cxgbe_compat.h" +#include "t4_hw.h" +#include "t4vf_hw.h" +#include "t4_chip_type.h" +#include "t4fw_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K + +#define T4_MEMORY_WRITE 0 +#define T4_MEMORY_READ 1 + +enum { + MAX_NPORTS = 4, /* max # of ports */ +}; + +enum { + T5_REGMAP_SIZE = (332 * 1024), +}; + +enum { + MEMWIN0_APERTURE = 2048, + MEMWIN0_BASE = 0x1b800, +}; + +enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; + +enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; + +enum cc_pause { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */ +}; + +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ + unsigned int dack_re; /* DACK timer resolution */ + unsigned int la_mask; /* what events are recorded by TP LA */ + unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 filter_mask; + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* cached TP_OUT_CONFIG compressed error vector + * and passing outer header info for encapsulated packets. + */ + int rx_pkt_encap; + + /* + * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; + int ethertype_shift; + int macmatch_shift; + int tos_shift; + + u64 hash_filter_mask; +}; + +struct vpd_params { + unsigned int cclk; +}; + +struct pci_params { + uint16_t vendor_id; + uint16_t device_id; + uint32_t vpd_cap_addr; + uint16_t speed; + uint8_t width; +}; + +/* + * Firmware device log. + */ +struct devlog_params { + u32 memtype; /* which memory (EDC0, EDC1, MC) */ + u32 start; /* start of log in firmware memory */ + u32 size; /* size of log */ +}; + +struct arch_specific_params { + u8 nchan; + u16 mps_rplc_size; + u16 vfcount; + u32 sge_fl_db; + u16 mps_tcam_size; +}; + +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + uint synmapen:1; /* SYN Map Enable */ + uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */ + uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */ + uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */ + uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */ + uint ofdmapen:1; /* Offload Map Enable */ + uint tnlmapen:1; /* Tunnel Map Enable */ + uint tnlalllookup:1; /* Tunnel All Lookup */ + uint hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Maximum resources provisioned for a PCI PF. + */ +struct pf_resources { + unsigned int neq; /* N egress Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + +struct adapter_params { + struct sge_params sge; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + struct devlog_params devlog; + struct rss_params rss; + struct pf_resources pfres; + struct vf_resources vfres; + enum pcie_memwin drv_memwin; + + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + + unsigned int fw_vers; + unsigned int bs_vers; + unsigned int tp_vers; + unsigned int er_vers; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned int mc_size; /* MC memory size */ + unsigned int cim_la_size; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + + unsigned char hash_filter; + + enum chip_type chip; /* chip code */ + struct arch_specific_params arch; /* chip specific params */ + + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ + u8 filter2_wr_support; /* FW support for FILTER2_WR */ + u32 viid_smt_extn_support:1; /* FW returns vin and smt index */ + u32 max_tx_coalesce_num; /* Max # of Tx packets that can be coalesced */ +}; + +/* Firmware Port Capabilities types. + */ +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ +}; + +struct link_config { + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + + u32 requested_speed; /* speed (Mb/s) user has requested */ + u32 speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec auto_fec; /* Forward Error Correction + * "automatic" (IEEE 802.3) + */ + enum cc_fec requested_fec; /* Forward Error Correction requested */ + enum cc_fec fec; /* Forward Error Correction actual */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ +}; + +#include "adapter.h" + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, + int attempts, int delay, u32 *valp); + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +static inline int is_pf4(struct adapter *adap) +{ + return adap->pf == 4; +} + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline int is_hashfilter(const struct adapter *adap) +{ + return adap->params.hash_filter; +} + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val); +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4vf_fw_reset(struct adapter *adap); +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); +int t4_fl_pkt_align(struct adapter *adap); +int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2); +int t4vf_get_vfres(struct adapter *adap); +int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat); +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size); +int t4_fw_initialize(struct adapter *adap, unsigned int mbox); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4vf_query_params(struct adapter *adap, unsigned int nparams, + const u32 *params, u32 *vals); +int t4vf_get_dev_params(struct adapter *adap); +int t4vf_get_vpd_params(struct adapter *adap); +int t4vf_get_rss_glb_config(struct adapter *adap); +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals); +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype, + u8 *vivld, u8 *vin); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size, u8 *vivild, u8 *vin); +int t4_free_vi(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok); +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return ((ticks * 1000 + adapter->params.vpd.cclk / 2) / + adapter->params.vpd.cclk); +} + +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl, bool sleep_ok, int timeout); +int t4_wr_mbox_meat(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, + const void *cmd, int size, void *rpl, + int timeout) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, + timeout); +} + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + + +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx); +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx); + +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_pfres(struct adapter *adapter); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_flash_cfg_addr(struct adapter *adapter); +unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx); +unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx); +const char *t4_get_port_type_description(enum fw_port_type port_type); +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p); +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset); +void t4_clr_port_stats(struct adapter *adap, int idx); +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps); +void t4_reset_link_config(struct adapter *adap, int idx); +int t4_get_version_info(struct adapter *adapter); +void t4_dump_version_info(struct adapter *adapter); +int t4_get_flash_params(struct adapter *adapter); +int t4_get_chip_type(struct adapter *adap, int ver); +int t4_prep_adapter(struct adapter *adapter); +int t4vf_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4vf_port_init(struct adapter *adap); +int t4_init_rss_mode(struct adapter *adap, int mbox); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq); +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq); +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw); +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx); +void t4_read_rss_key(struct adapter *adap, u32 *key); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +unsigned int t4_get_regs_len(struct adapter *adap); +unsigned int t4vf_get_pf_from_vf(struct adapter *adap); +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); +int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); +int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); +int t4_seeprom_wp(struct adapter *adapter, int enable); +int t4_memory_rw_addr(struct adapter *adap, int win, + u32 addr, u32 len, void *hbuf, int dir); +int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, + u32 len, void *hbuf, int dir); +static inline int t4_memory_rw(struct adapter *adap, int win, + int mtype, u32 maddr, u32 len, + void *hbuf, int dir) +{ + return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir); +} +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); +#endif /* __CHELSIO_COMMON_H */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h new file mode 100644 index 000000000..c0c5d0b2c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4_CHIP_TYPE_H__ +#define __T4_CHIP_TYPE_H__ + +/* + * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use the "version" (V) of the adpater to code the Chip Version above. + */ +#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12) +#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf) +#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, +}; + +static inline int is_t4(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4); +} + +static inline int is_t5(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5); +} + +static inline int is_t6(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6); +} +#endif /* __T4_CHIP_TYPE_H__ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c new file mode 100644 index 000000000..c8514c963 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c @@ -0,0 +1,5701 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "t4_regs.h" +#include "t4_regs_values.h" +#include "t4fw_interface.h" + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, A_TP_MTU_TABLE, + V_MTUINDEX(0xff) | V_MTUVALUE(i)); + v = t4_read_reg(adap, A_TP_MTU_TABLE); + mtus[i] = G_MTUVALUE(v); + if (mtu_log) + mtu_log[i] = G_MTUWIDTH(v); + } +} + +/** + * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @adap: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val) +{ + t4_write_reg(adap, A_TP_PIO_ADDR, addr); + val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; + t4_write_reg(adap, A_TP_PIO_DATA, val); +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = cxgbe_fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | + V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void)t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/** + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char * const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (pcie_fw & F_PCIE_FW_ERR) + pr_err("%s: Firmware reports adapter error: %s\n", + __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]); +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line), + be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y)); +} + +#define X_CIM_PF_NOACCESS 0xeeeeeeee + +/* + * If the Host OS Driver needs locking arround accesses to the mailbox, this + * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ... + */ +/* makes single-statement usage a bit cleaner ... */ +#ifdef T4_OS_NEEDS_MBOX_LOCKING +#define T4_OS_MBOX_LOCKING(x) x +#else +#define T4_OS_MBOX_LOCKING(x) do {} while (0) +#endif + +/** + * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * @timeout: time to wait for command to finish before timing out + * (negative implies @sleep_ok=false) + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. Some FW commands like RESET and + * INITIALIZE can take a considerable amount of time to execute. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * Note that passing in a negative @timeout is an alternate mechanism + * for specifying @sleep_ok=false. This is useful when a higher level + * interface allows for specification of @timeout but not @sleep_ok ... + * + * Returns 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok, int timeout) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + u64 res; + int i, ms; + unsigned int delay_idx; + __be64 *temp = (__be64 *)malloc(size * sizeof(char)); + __be64 *p = temp; + u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); + u32 ctl; + struct mbox_entry entry; + u32 pcie_fw = 0; + + if (!temp) + return -ENOMEM; + + if ((size & 15) || size > MBOX_LEN) { + free(temp); + return -EINVAL; + } + + memset(p, 0, size); + memcpy(p, (const __be64 *)cmd, size); + + /* + * If we have a negative timeout, that implies that we can't sleep. + */ + if (timeout < 0) { + sleep_ok = false; + timeout = -timeout; + } + +#ifdef T4_OS_NEEDS_MBOX_LOCKING + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... Also check for a + * firmware error which we'll report as a device error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) { + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock); + t4_report_fw_error(adap); + free(temp); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adap->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } +#endif /* T4_OS_NEEDS_MBOX_LOCKING */ + + /* + * Attempt to gain access to the mailbox. + */ + for (i = 0; i < 4; i++) { + ctl = t4_read_reg(adap, ctl_reg); + v = G_MBOWNER(ctl); + if (v != X_MBOWNER_NONE) + break; + } + + /* + * If we were unable to gain access, dequeue ourselves from the + * mailbox atomic access list and report the error to our caller. + */ + if (v != X_MBOWNER_PL) { + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + free(temp); + return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT); + } + + /* + * If we gain ownership of the mailbox and there's a "valid" message + * in it, this is likely an asynchronous error message from the + * firmware. So we'll report that and then proceed on with attempting + * to issue our own command ... which may well fail if the error + * presaged the firmware crashing ... + */ + if (ctl & F_MBMSGVALID) { + dev_err(adap, "found VALID command in mbox %u: " + "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + } + + /* + * Copy in the new mailbox command and send it on its way ... + */ + for (i = 0; i < size; i += 8, p++) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); + + CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + /* + * Loop waiting for the reply; bail out if we time out or the firmware + * reports an error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + msleep(ms); + } + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + v = t4_read_reg(adap, ctl_reg); + if (v == X_CIM_PF_NOACCESS) + continue; + if (G_MBOWNER(v) == X_MBOWNER_PL) { + if (!(v & F_MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + CXGBE_DEBUG_MBOX(adap, + "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + CXGBE_DEBUG_MBOX(adap, + "command %#x completed in %d ms (%ssleeping)\n", + *(const u8 *)cmd, + i + ms, sleep_ok ? "" : "non-"); + + res = t4_read_reg64(adap, data_reg); + if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = V_FW_CMD_RETVAL(EIO); + } else if (rpl) { + get_mbox_rpl(adap, rpl, size / 8, data_reg); + } + t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); + T4_OS_MBOX_LOCKING( + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock)); + free(temp); + return -G_FW_CMD_RETVAL((int)res); + } + } + + /* + * We timed out waiting for a reply to our mailbox command. Report + * the error and also check to see if the firmware reported any + * errors ... + */ + dev_err(adap, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + free(temp); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; +} + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_get_regs_len - return the size of the chips register set + * @adapter: the adapter + * + * Returns the size of the chip's BAR0 register space. + */ +unsigned int t4_get_regs_len(struct adapter *adapter) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + switch (chip_version) { + case CHELSIO_T5: + case CHELSIO_T6: + return T5_REGMAP_SIZE; + } + + dev_err(adapter, + "Unsupported chip version %d\n", chip_version); + return 0; +} + +/** + * t4_get_regs - read chip registers into provided buffer + * @adap: the adapter + * @buf: register buffer + * @buf_size: size (in bytes) of register buffer + * + * If the provided register buffer isn't large enough for the chip's + * full register range, the register dump will be truncated to the + * register buffer's size. + */ +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) +{ + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x10c0, + 0x10cc, 0x10f8, + 0x1100, 0x1100, + 0x110c, 0x1148, + 0x1180, 0x1184, + 0x1190, 0x1194, + 0x11a0, 0x11a4, + 0x11b0, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30b0, + 0x30b8, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x56ec, + 0x56f4, 0x5720, + 0x5728, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x589c, + 0x58a4, 0x58ac, + 0x58b8, 0x58bc, + 0x5940, 0x59c8, + 0x59d0, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a70, + 0x5a80, 0x5a9c, + 0x5b94, 0x5bfc, + 0x6000, 0x6020, + 0x6028, 0x6040, + 0x6058, 0x609c, + 0x60a8, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7b58, + 0x7b60, 0x7b84, + 0x7b8c, 0x7c54, + 0x7d00, 0x7d38, + 0x7d40, 0x7d80, + 0x7d8c, 0x7ddc, + 0x7de4, 0x7e04, + 0x7e10, 0x7e1c, + 0x7e24, 0x7e38, + 0x7e40, 0x7e44, + 0x7e4c, 0x7e78, + 0x7e80, 0x7edc, + 0x7ee8, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e04, + 0x8e10, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x9058, + 0x9060, 0x9060, + 0x9068, 0x90f8, + 0x9400, 0x9408, + 0x9410, 0x9470, + 0x9600, 0x9600, + 0x9608, 0x9638, + 0x9640, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd004, + 0xd010, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x1106c, + 0x11074, 0x11088, + 0x1109c, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x190e8, + 0x190f0, 0x190f8, + 0x19100, 0x19110, + 0x19120, 0x19124, + 0x19150, 0x19194, + 0x1919c, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19428, + 0x19430, 0x19444, + 0x1944c, 0x1946c, + 0x19474, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c08, + 0x19c10, 0x19c60, + 0x19c94, 0x19ce4, + 0x19cf0, 0x19d40, + 0x19d50, 0x19d94, + 0x19da0, 0x19de8, + 0x19df0, 0x19e10, + 0x19e50, 0x19e90, + 0x19ea0, 0x19f24, + 0x19f34, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fb4, + 0x19fc4, 0x19fe4, + 0x1a000, 0x1a004, + 0x1a010, 0x1a06c, + 0x1a0b0, 0x1a0e4, + 0x1a0ec, 0x1a0f8, + 0x1a100, 0x1a108, + 0x1a114, 0x1a120, + 0x1a128, 0x1a130, + 0x1a138, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e044, + 0x1e04c, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e444, + 0x1e44c, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e844, + 0x1e84c, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec44, + 0x1ec4c, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f044, + 0x1f04c, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f444, + 0x1f44c, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f844, + 0x1f84c, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc44, + 0x1fc4c, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30038, 0x30038, + 0x30040, 0x30040, + 0x30100, 0x30144, + 0x30190, 0x301a0, + 0x301a8, 0x301b8, + 0x301c4, 0x301c8, + 0x301d0, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x304b4, + 0x304c0, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30828, + 0x30834, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a14, + 0x30a1c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30a74, + 0x30a7c, 0x30afc, + 0x30b08, 0x30c24, + 0x30d00, 0x30d00, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d3c, + 0x30d48, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e00, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32200, 0x32200, + 0x32208, 0x32240, + 0x32248, 0x32280, + 0x32288, 0x322c0, + 0x322c8, 0x322fc, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b10, + 0x32b20, 0x32b30, + 0x32b40, 0x32b50, + 0x32b60, 0x32b70, + 0x33000, 0x33028, + 0x33030, 0x33048, + 0x33060, 0x33068, + 0x33070, 0x3309c, + 0x330f0, 0x33128, + 0x33130, 0x33148, + 0x33160, 0x33168, + 0x33170, 0x3319c, + 0x331f0, 0x33238, + 0x33240, 0x33240, + 0x33248, 0x33250, + 0x3325c, 0x33264, + 0x33270, 0x332b8, + 0x332c0, 0x332e4, + 0x332f8, 0x33338, + 0x33340, 0x33340, + 0x33348, 0x33350, + 0x3335c, 0x33364, + 0x33370, 0x333b8, + 0x333c0, 0x333e4, + 0x333f8, 0x33428, + 0x33430, 0x33448, + 0x33460, 0x33468, + 0x33470, 0x3349c, + 0x334f0, 0x33528, + 0x33530, 0x33548, + 0x33560, 0x33568, + 0x33570, 0x3359c, + 0x335f0, 0x33638, + 0x33640, 0x33640, + 0x33648, 0x33650, + 0x3365c, 0x33664, + 0x33670, 0x336b8, + 0x336c0, 0x336e4, + 0x336f8, 0x33738, + 0x33740, 0x33740, + 0x33748, 0x33750, + 0x3375c, 0x33764, + 0x33770, 0x337b8, + 0x337c0, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33928, + 0x33930, 0x33948, + 0x33960, 0x33968, + 0x33970, 0x3399c, + 0x339f0, 0x33a38, + 0x33a40, 0x33a40, + 0x33a48, 0x33a50, + 0x33a5c, 0x33a64, + 0x33a70, 0x33ab8, + 0x33ac0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34038, 0x34038, + 0x34040, 0x34040, + 0x34100, 0x34144, + 0x34190, 0x341a0, + 0x341a8, 0x341b8, + 0x341c4, 0x341c8, + 0x341d0, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x344b4, + 0x344c0, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34828, + 0x34834, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a14, + 0x34a1c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34a74, + 0x34a7c, 0x34afc, + 0x34b08, 0x34c24, + 0x34d00, 0x34d00, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d3c, + 0x34d48, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e00, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36200, 0x36200, + 0x36208, 0x36240, + 0x36248, 0x36280, + 0x36288, 0x362c0, + 0x362c8, 0x362fc, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b10, + 0x36b20, 0x36b30, + 0x36b40, 0x36b50, + 0x36b60, 0x36b70, + 0x37000, 0x37028, + 0x37030, 0x37048, + 0x37060, 0x37068, + 0x37070, 0x3709c, + 0x370f0, 0x37128, + 0x37130, 0x37148, + 0x37160, 0x37168, + 0x37170, 0x3719c, + 0x371f0, 0x37238, + 0x37240, 0x37240, + 0x37248, 0x37250, + 0x3725c, 0x37264, + 0x37270, 0x372b8, + 0x372c0, 0x372e4, + 0x372f8, 0x37338, + 0x37340, 0x37340, + 0x37348, 0x37350, + 0x3735c, 0x37364, + 0x37370, 0x373b8, + 0x373c0, 0x373e4, + 0x373f8, 0x37428, + 0x37430, 0x37448, + 0x37460, 0x37468, + 0x37470, 0x3749c, + 0x374f0, 0x37528, + 0x37530, 0x37548, + 0x37560, 0x37568, + 0x37570, 0x3759c, + 0x375f0, 0x37638, + 0x37640, 0x37640, + 0x37648, 0x37650, + 0x3765c, 0x37664, + 0x37670, 0x376b8, + 0x376c0, 0x376e4, + 0x376f8, 0x37738, + 0x37740, 0x37740, + 0x37748, 0x37750, + 0x3775c, 0x37764, + 0x37770, 0x377b8, + 0x377c0, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37928, + 0x37930, 0x37948, + 0x37960, 0x37968, + 0x37970, 0x3799c, + 0x379f0, 0x37a38, + 0x37a40, 0x37a40, + 0x37a48, 0x37a50, + 0x37a5c, 0x37a64, + 0x37a70, 0x37ab8, + 0x37ac0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38038, 0x38038, + 0x38040, 0x38040, + 0x38100, 0x38144, + 0x38190, 0x381a0, + 0x381a8, 0x381b8, + 0x381c4, 0x381c8, + 0x381d0, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x384b4, + 0x384c0, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38828, + 0x38834, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a14, + 0x38a1c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38a74, + 0x38a7c, 0x38afc, + 0x38b08, 0x38c24, + 0x38d00, 0x38d00, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d3c, + 0x38d48, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e00, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a200, 0x3a200, + 0x3a208, 0x3a240, + 0x3a248, 0x3a280, + 0x3a288, 0x3a2c0, + 0x3a2c8, 0x3a2fc, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab10, + 0x3ab20, 0x3ab30, + 0x3ab40, 0x3ab50, + 0x3ab60, 0x3ab70, + 0x3b000, 0x3b028, + 0x3b030, 0x3b048, + 0x3b060, 0x3b068, + 0x3b070, 0x3b09c, + 0x3b0f0, 0x3b128, + 0x3b130, 0x3b148, + 0x3b160, 0x3b168, + 0x3b170, 0x3b19c, + 0x3b1f0, 0x3b238, + 0x3b240, 0x3b240, + 0x3b248, 0x3b250, + 0x3b25c, 0x3b264, + 0x3b270, 0x3b2b8, + 0x3b2c0, 0x3b2e4, + 0x3b2f8, 0x3b338, + 0x3b340, 0x3b340, + 0x3b348, 0x3b350, + 0x3b35c, 0x3b364, + 0x3b370, 0x3b3b8, + 0x3b3c0, 0x3b3e4, + 0x3b3f8, 0x3b428, + 0x3b430, 0x3b448, + 0x3b460, 0x3b468, + 0x3b470, 0x3b49c, + 0x3b4f0, 0x3b528, + 0x3b530, 0x3b548, + 0x3b560, 0x3b568, + 0x3b570, 0x3b59c, + 0x3b5f0, 0x3b638, + 0x3b640, 0x3b640, + 0x3b648, 0x3b650, + 0x3b65c, 0x3b664, + 0x3b670, 0x3b6b8, + 0x3b6c0, 0x3b6e4, + 0x3b6f8, 0x3b738, + 0x3b740, 0x3b740, + 0x3b748, 0x3b750, + 0x3b75c, 0x3b764, + 0x3b770, 0x3b7b8, + 0x3b7c0, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b928, + 0x3b930, 0x3b948, + 0x3b960, 0x3b968, + 0x3b970, 0x3b99c, + 0x3b9f0, 0x3ba38, + 0x3ba40, 0x3ba40, + 0x3ba48, 0x3ba50, + 0x3ba5c, 0x3ba64, + 0x3ba70, 0x3bab8, + 0x3bac0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c038, 0x3c038, + 0x3c040, 0x3c040, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1a0, + 0x3c1a8, 0x3c1b8, + 0x3c1c4, 0x3c1c8, + 0x3c1d0, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c4b4, + 0x3c4c0, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c828, + 0x3c834, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca14, + 0x3ca1c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3ca74, + 0x3ca7c, 0x3cafc, + 0x3cb08, 0x3cc24, + 0x3cd00, 0x3cd00, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd3c, + 0x3cd48, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de00, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e200, 0x3e200, + 0x3e208, 0x3e240, + 0x3e248, 0x3e280, + 0x3e288, 0x3e2c0, + 0x3e2c8, 0x3e2fc, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb10, + 0x3eb20, 0x3eb30, + 0x3eb40, 0x3eb50, + 0x3eb60, 0x3eb70, + 0x3f000, 0x3f028, + 0x3f030, 0x3f048, + 0x3f060, 0x3f068, + 0x3f070, 0x3f09c, + 0x3f0f0, 0x3f128, + 0x3f130, 0x3f148, + 0x3f160, 0x3f168, + 0x3f170, 0x3f19c, + 0x3f1f0, 0x3f238, + 0x3f240, 0x3f240, + 0x3f248, 0x3f250, + 0x3f25c, 0x3f264, + 0x3f270, 0x3f2b8, + 0x3f2c0, 0x3f2e4, + 0x3f2f8, 0x3f338, + 0x3f340, 0x3f340, + 0x3f348, 0x3f350, + 0x3f35c, 0x3f364, + 0x3f370, 0x3f3b8, + 0x3f3c0, 0x3f3e4, + 0x3f3f8, 0x3f428, + 0x3f430, 0x3f448, + 0x3f460, 0x3f468, + 0x3f470, 0x3f49c, + 0x3f4f0, 0x3f528, + 0x3f530, 0x3f548, + 0x3f560, 0x3f568, + 0x3f570, 0x3f59c, + 0x3f5f0, 0x3f638, + 0x3f640, 0x3f640, + 0x3f648, 0x3f650, + 0x3f65c, 0x3f664, + 0x3f670, 0x3f6b8, + 0x3f6c0, 0x3f6e4, + 0x3f6f8, 0x3f738, + 0x3f740, 0x3f740, + 0x3f748, 0x3f750, + 0x3f75c, 0x3f764, + 0x3f770, 0x3f7b8, + 0x3f7c0, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f928, + 0x3f930, 0x3f948, + 0x3f960, 0x3f968, + 0x3f970, 0x3f99c, + 0x3f9f0, 0x3fa38, + 0x3fa40, 0x3fa40, + 0x3fa48, 0x3fa50, + 0x3fa5c, 0x3fa64, + 0x3fa70, 0x3fab8, + 0x3fac0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40050, + 0x40060, 0x40068, + 0x4007c, 0x4008c, + 0x40094, 0x400b0, + 0x400c0, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40254, + 0x40260, 0x40264, + 0x40270, 0x40288, + 0x40290, 0x40298, + 0x402ac, 0x402c8, + 0x402d0, 0x402e0, + 0x402f0, 0x402f0, + 0x40300, 0x4033c, + 0x403f8, 0x403fc, + 0x41304, 0x413c4, + 0x41400, 0x4140c, + 0x41414, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44054, + 0x4405c, 0x44078, + 0x440c0, 0x44174, + 0x44180, 0x441ac, + 0x441b4, 0x441b8, + 0x441c0, 0x44254, + 0x4425c, 0x44278, + 0x442c0, 0x44374, + 0x44380, 0x443ac, + 0x443b4, 0x443b8, + 0x443c0, 0x44454, + 0x4445c, 0x44478, + 0x444c0, 0x44574, + 0x44580, 0x445ac, + 0x445b4, 0x445b8, + 0x445c0, 0x44654, + 0x4465c, 0x44678, + 0x446c0, 0x44774, + 0x44780, 0x447ac, + 0x447b4, 0x447b8, + 0x447c0, 0x44854, + 0x4485c, 0x44878, + 0x448c0, 0x44974, + 0x44980, 0x449ac, + 0x449b4, 0x449b8, + 0x449c0, 0x449fc, + 0x45000, 0x45004, + 0x45010, 0x45030, + 0x45040, 0x45060, + 0x45068, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45204, + 0x45210, 0x45230, + 0x45240, 0x45260, + 0x45268, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4703c, + 0x47044, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47408, + 0x47414, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48050, + 0x48060, 0x48068, + 0x4807c, 0x4808c, + 0x48094, 0x480b0, + 0x480c0, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48254, + 0x48260, 0x48264, + 0x48270, 0x48288, + 0x48290, 0x48298, + 0x482ac, 0x482c8, + 0x482d0, 0x482e0, + 0x482f0, 0x482f0, + 0x48300, 0x4833c, + 0x483f8, 0x483fc, + 0x49304, 0x493c4, + 0x49400, 0x4940c, + 0x49414, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c054, + 0x4c05c, 0x4c078, + 0x4c0c0, 0x4c174, + 0x4c180, 0x4c1ac, + 0x4c1b4, 0x4c1b8, + 0x4c1c0, 0x4c254, + 0x4c25c, 0x4c278, + 0x4c2c0, 0x4c374, + 0x4c380, 0x4c3ac, + 0x4c3b4, 0x4c3b8, + 0x4c3c0, 0x4c454, + 0x4c45c, 0x4c478, + 0x4c4c0, 0x4c574, + 0x4c580, 0x4c5ac, + 0x4c5b4, 0x4c5b8, + 0x4c5c0, 0x4c654, + 0x4c65c, 0x4c678, + 0x4c6c0, 0x4c774, + 0x4c780, 0x4c7ac, + 0x4c7b4, 0x4c7b8, + 0x4c7c0, 0x4c854, + 0x4c85c, 0x4c878, + 0x4c8c0, 0x4c974, + 0x4c980, 0x4c9ac, + 0x4c9b4, 0x4c9b8, + 0x4c9c0, 0x4c9fc, + 0x4d000, 0x4d004, + 0x4d010, 0x4d030, + 0x4d040, 0x4d060, + 0x4d068, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d204, + 0x4d210, 0x4d230, + 0x4d240, 0x4d260, + 0x4d268, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f03c, + 0x4f044, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f408, + 0x4f414, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x50084, + 0x50090, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x50884, + 0x50890, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + + static const unsigned int t6_reg_ranges[] = { + 0x1008, 0x101c, + 0x1024, 0x10a8, + 0x10b4, 0x10f8, + 0x1100, 0x1114, + 0x111c, 0x112c, + 0x1138, 0x113c, + 0x1144, 0x114c, + 0x1180, 0x1184, + 0x1190, 0x1194, + 0x11a0, 0x11a4, + 0x11b0, 0x11b4, + 0x11fc, 0x1274, + 0x1280, 0x133c, + 0x1800, 0x18fc, + 0x3000, 0x302c, + 0x3060, 0x30b0, + 0x30b8, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x56ec, + 0x56f4, 0x5720, + 0x5728, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x589c, + 0x58a4, 0x58ac, + 0x58b8, 0x58bc, + 0x5940, 0x595c, + 0x5980, 0x598c, + 0x59b0, 0x59c8, + 0x59d0, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a6c, + 0x5a80, 0x5a8c, + 0x5a94, 0x5a9c, + 0x5b94, 0x5bfc, + 0x5c10, 0x5e48, + 0x5e50, 0x5e94, + 0x5ea0, 0x5eb0, + 0x5ec0, 0x5ec0, + 0x5ec8, 0x5ed0, + 0x5ee0, 0x5ee0, + 0x5ef0, 0x5ef0, + 0x5f00, 0x5f00, + 0x6000, 0x6020, + 0x6028, 0x6040, + 0x6058, 0x609c, + 0x60a8, 0x619c, + 0x7700, 0x7798, + 0x77c0, 0x7880, + 0x78cc, 0x78fc, + 0x7b00, 0x7b58, + 0x7b60, 0x7b84, + 0x7b8c, 0x7c54, + 0x7d00, 0x7d38, + 0x7d40, 0x7d84, + 0x7d8c, 0x7ddc, + 0x7de4, 0x7e04, + 0x7e10, 0x7e1c, + 0x7e24, 0x7e38, + 0x7e40, 0x7e44, + 0x7e4c, 0x7e78, + 0x7e80, 0x7edc, + 0x7ee8, 0x7efc, + 0x8dc0, 0x8de4, + 0x8df8, 0x8e04, + 0x8e10, 0x8e84, + 0x8ea0, 0x8f88, + 0x8fb8, 0x9058, + 0x9060, 0x9060, + 0x9068, 0x90f8, + 0x9100, 0x9124, + 0x9400, 0x9470, + 0x9600, 0x9600, + 0x9608, 0x9638, + 0x9640, 0x9704, + 0x9710, 0x971c, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xd100, 0xd118, + 0xd200, 0xd214, + 0xd220, 0xd234, + 0xd240, 0xd254, + 0xd260, 0xd274, + 0xd280, 0xd294, + 0xd2a0, 0xd2b4, + 0xd2c0, 0xd2d4, + 0xd2e0, 0xd2f4, + 0xd300, 0xd31c, + 0xdfc0, 0xdfe0, + 0xe000, 0xf008, + 0xf010, 0xf018, + 0xf020, 0xf028, + 0x11000, 0x11014, + 0x11048, 0x1106c, + 0x11074, 0x11088, + 0x11098, 0x11120, + 0x1112c, 0x1117c, + 0x11190, 0x112e0, + 0x11300, 0x1130c, + 0x12000, 0x1206c, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x190e8, + 0x190f0, 0x190f8, + 0x19100, 0x19110, + 0x19120, 0x19124, + 0x19150, 0x19194, + 0x1919c, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x192a4, 0x192b0, + 0x192bc, 0x192bc, + 0x19348, 0x1934c, + 0x193f8, 0x19418, + 0x19420, 0x19428, + 0x19430, 0x19444, + 0x1944c, 0x1946c, + 0x19474, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c48, + 0x19c50, 0x19c80, + 0x19c94, 0x19c98, + 0x19ca0, 0x19cbc, + 0x19ce4, 0x19ce4, + 0x19cf0, 0x19cf8, + 0x19d00, 0x19d28, + 0x19d50, 0x19d78, + 0x19d94, 0x19d98, + 0x19da0, 0x19dc8, + 0x19df0, 0x19e10, + 0x19e50, 0x19e6c, + 0x19ea0, 0x19ebc, + 0x19ec4, 0x19ef4, + 0x19f04, 0x19f2c, + 0x19f34, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fac, + 0x19fc4, 0x19fc8, + 0x19fd0, 0x19fe4, + 0x1a000, 0x1a004, + 0x1a010, 0x1a06c, + 0x1a0b0, 0x1a0e4, + 0x1a0ec, 0x1a0f8, + 0x1a100, 0x1a108, + 0x1a114, 0x1a120, + 0x1a128, 0x1a130, + 0x1a138, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e044, + 0x1e04c, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e444, + 0x1e44c, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e844, + 0x1e84c, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec44, + 0x1ec4c, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f044, + 0x1f04c, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f444, + 0x1f44c, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f844, + 0x1f84c, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc44, + 0x1fc4c, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30100, 0x30168, + 0x30190, 0x301a0, + 0x301a8, 0x301b8, + 0x301c4, 0x301c8, + 0x301d0, 0x301d0, + 0x30200, 0x30320, + 0x30400, 0x304b4, + 0x304c0, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x308a0, + 0x308c0, 0x30908, + 0x30910, 0x309b8, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a14, + 0x30a1c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30a74, + 0x30a7c, 0x30afc, + 0x30b08, 0x30c24, + 0x30d00, 0x30d14, + 0x30d1c, 0x30d3c, + 0x30d44, 0x30d4c, + 0x30d54, 0x30d74, + 0x30d7c, 0x30d7c, + 0x30de0, 0x30de0, + 0x30e00, 0x30ed4, + 0x30f00, 0x30fa4, + 0x30fc0, 0x30fc4, + 0x31000, 0x31004, + 0x31080, 0x310fc, + 0x31208, 0x31220, + 0x3123c, 0x31254, + 0x31300, 0x31300, + 0x31308, 0x3131c, + 0x31338, 0x3133c, + 0x31380, 0x31380, + 0x31388, 0x313a8, + 0x313b4, 0x313b4, + 0x31400, 0x31420, + 0x31438, 0x3143c, + 0x31480, 0x31480, + 0x314a8, 0x314a8, + 0x314b0, 0x314b4, + 0x314c8, 0x314d4, + 0x31a40, 0x31a4c, + 0x31af0, 0x31b20, + 0x31b38, 0x31b3c, + 0x31b80, 0x31b80, + 0x31ba8, 0x31ba8, + 0x31bb0, 0x31bb4, + 0x31bc8, 0x31bd4, + 0x32140, 0x3218c, + 0x321f0, 0x321f4, + 0x32200, 0x32200, + 0x32218, 0x32218, + 0x32400, 0x32400, + 0x32408, 0x3241c, + 0x32618, 0x32620, + 0x32664, 0x32664, + 0x326a8, 0x326a8, + 0x326ec, 0x326ec, + 0x32a00, 0x32abc, + 0x32b00, 0x32b38, + 0x32b20, 0x32b38, + 0x32b40, 0x32b58, + 0x32b60, 0x32b78, + 0x32c00, 0x32c00, + 0x32c08, 0x32c3c, + 0x33000, 0x3302c, + 0x33034, 0x33050, + 0x33058, 0x33058, + 0x33060, 0x3308c, + 0x3309c, 0x330ac, + 0x330c0, 0x330c0, + 0x330c8, 0x330d0, + 0x330d8, 0x330e0, + 0x330ec, 0x3312c, + 0x33134, 0x33150, + 0x33158, 0x33158, + 0x33160, 0x3318c, + 0x3319c, 0x331ac, + 0x331c0, 0x331c0, + 0x331c8, 0x331d0, + 0x331d8, 0x331e0, + 0x331ec, 0x33290, + 0x33298, 0x332c4, + 0x332e4, 0x33390, + 0x33398, 0x333c4, + 0x333e4, 0x3342c, + 0x33434, 0x33450, + 0x33458, 0x33458, + 0x33460, 0x3348c, + 0x3349c, 0x334ac, + 0x334c0, 0x334c0, + 0x334c8, 0x334d0, + 0x334d8, 0x334e0, + 0x334ec, 0x3352c, + 0x33534, 0x33550, + 0x33558, 0x33558, + 0x33560, 0x3358c, + 0x3359c, 0x335ac, + 0x335c0, 0x335c0, + 0x335c8, 0x335d0, + 0x335d8, 0x335e0, + 0x335ec, 0x33690, + 0x33698, 0x336c4, + 0x336e4, 0x33790, + 0x33798, 0x337c4, + 0x337e4, 0x337fc, + 0x33814, 0x33814, + 0x33854, 0x33868, + 0x33880, 0x3388c, + 0x338c0, 0x338d0, + 0x338e8, 0x338ec, + 0x33900, 0x3392c, + 0x33934, 0x33950, + 0x33958, 0x33958, + 0x33960, 0x3398c, + 0x3399c, 0x339ac, + 0x339c0, 0x339c0, + 0x339c8, 0x339d0, + 0x339d8, 0x339e0, + 0x339ec, 0x33a90, + 0x33a98, 0x33ac4, + 0x33ae4, 0x33b10, + 0x33b24, 0x33b28, + 0x33b38, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c24, 0x33c28, + 0x33c38, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34100, 0x34168, + 0x34190, 0x341a0, + 0x341a8, 0x341b8, + 0x341c4, 0x341c8, + 0x341d0, 0x341d0, + 0x34200, 0x34320, + 0x34400, 0x344b4, + 0x344c0, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x348a0, + 0x348c0, 0x34908, + 0x34910, 0x349b8, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a14, + 0x34a1c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34a74, + 0x34a7c, 0x34afc, + 0x34b08, 0x34c24, + 0x34d00, 0x34d14, + 0x34d1c, 0x34d3c, + 0x34d44, 0x34d4c, + 0x34d54, 0x34d74, + 0x34d7c, 0x34d7c, + 0x34de0, 0x34de0, + 0x34e00, 0x34ed4, + 0x34f00, 0x34fa4, + 0x34fc0, 0x34fc4, + 0x35000, 0x35004, + 0x35080, 0x350fc, + 0x35208, 0x35220, + 0x3523c, 0x35254, + 0x35300, 0x35300, + 0x35308, 0x3531c, + 0x35338, 0x3533c, + 0x35380, 0x35380, + 0x35388, 0x353a8, + 0x353b4, 0x353b4, + 0x35400, 0x35420, + 0x35438, 0x3543c, + 0x35480, 0x35480, + 0x354a8, 0x354a8, + 0x354b0, 0x354b4, + 0x354c8, 0x354d4, + 0x35a40, 0x35a4c, + 0x35af0, 0x35b20, + 0x35b38, 0x35b3c, + 0x35b80, 0x35b80, + 0x35ba8, 0x35ba8, + 0x35bb0, 0x35bb4, + 0x35bc8, 0x35bd4, + 0x36140, 0x3618c, + 0x361f0, 0x361f4, + 0x36200, 0x36200, + 0x36218, 0x36218, + 0x36400, 0x36400, + 0x36408, 0x3641c, + 0x36618, 0x36620, + 0x36664, 0x36664, + 0x366a8, 0x366a8, + 0x366ec, 0x366ec, + 0x36a00, 0x36abc, + 0x36b00, 0x36b38, + 0x36b20, 0x36b38, + 0x36b40, 0x36b58, + 0x36b60, 0x36b78, + 0x36c00, 0x36c00, + 0x36c08, 0x36c3c, + 0x37000, 0x3702c, + 0x37034, 0x37050, + 0x37058, 0x37058, + 0x37060, 0x3708c, + 0x3709c, 0x370ac, + 0x370c0, 0x370c0, + 0x370c8, 0x370d0, + 0x370d8, 0x370e0, + 0x370ec, 0x3712c, + 0x37134, 0x37150, + 0x37158, 0x37158, + 0x37160, 0x3718c, + 0x3719c, 0x371ac, + 0x371c0, 0x371c0, + 0x371c8, 0x371d0, + 0x371d8, 0x371e0, + 0x371ec, 0x37290, + 0x37298, 0x372c4, + 0x372e4, 0x37390, + 0x37398, 0x373c4, + 0x373e4, 0x3742c, + 0x37434, 0x37450, + 0x37458, 0x37458, + 0x37460, 0x3748c, + 0x3749c, 0x374ac, + 0x374c0, 0x374c0, + 0x374c8, 0x374d0, + 0x374d8, 0x374e0, + 0x374ec, 0x3752c, + 0x37534, 0x37550, + 0x37558, 0x37558, + 0x37560, 0x3758c, + 0x3759c, 0x375ac, + 0x375c0, 0x375c0, + 0x375c8, 0x375d0, + 0x375d8, 0x375e0, + 0x375ec, 0x37690, + 0x37698, 0x376c4, + 0x376e4, 0x37790, + 0x37798, 0x377c4, + 0x377e4, 0x377fc, + 0x37814, 0x37814, + 0x37854, 0x37868, + 0x37880, 0x3788c, + 0x378c0, 0x378d0, + 0x378e8, 0x378ec, + 0x37900, 0x3792c, + 0x37934, 0x37950, + 0x37958, 0x37958, + 0x37960, 0x3798c, + 0x3799c, 0x379ac, + 0x379c0, 0x379c0, + 0x379c8, 0x379d0, + 0x379d8, 0x379e0, + 0x379ec, 0x37a90, + 0x37a98, 0x37ac4, + 0x37ae4, 0x37b10, + 0x37b24, 0x37b28, + 0x37b38, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c24, 0x37c28, + 0x37c38, 0x37c50, + 0x37cf0, 0x37cfc, + 0x40040, 0x40040, + 0x40080, 0x40084, + 0x40100, 0x40100, + 0x40140, 0x401bc, + 0x40200, 0x40214, + 0x40228, 0x40228, + 0x40240, 0x40258, + 0x40280, 0x40280, + 0x40304, 0x40304, + 0x40330, 0x4033c, + 0x41304, 0x413c8, + 0x413d0, 0x413dc, + 0x413f0, 0x413f0, + 0x41400, 0x4140c, + 0x41414, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x4407c, + 0x440c0, 0x441ac, + 0x441b4, 0x4427c, + 0x442c0, 0x443ac, + 0x443b4, 0x4447c, + 0x444c0, 0x445ac, + 0x445b4, 0x4467c, + 0x446c0, 0x447ac, + 0x447b4, 0x4487c, + 0x448c0, 0x449ac, + 0x449b4, 0x44a7c, + 0x44ac0, 0x44bac, + 0x44bb4, 0x44c7c, + 0x44cc0, 0x44dac, + 0x44db4, 0x44e7c, + 0x44ec0, 0x44fac, + 0x44fb4, 0x4507c, + 0x450c0, 0x451ac, + 0x451b4, 0x451fc, + 0x45800, 0x45804, + 0x45810, 0x45830, + 0x45840, 0x45860, + 0x45868, 0x45868, + 0x45880, 0x45884, + 0x458a0, 0x458b0, + 0x45a00, 0x45a04, + 0x45a10, 0x45a30, + 0x45a40, 0x45a60, + 0x45a68, 0x45a68, + 0x45a80, 0x45a84, + 0x45aa0, 0x45ab0, + 0x460c0, 0x460e4, + 0x47000, 0x4703c, + 0x47044, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47408, + 0x47414, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x47820, 0x4782c, + 0x50000, 0x50084, + 0x50090, 0x500cc, + 0x50300, 0x50384, + 0x50400, 0x50400, + 0x50800, 0x50884, + 0x50890, 0x508cc, + 0x50b00, 0x50b84, + 0x50c00, 0x50c00, + 0x51000, 0x51020, + 0x51028, 0x510b0, + 0x51300, 0x51324, + }; + + u32 *buf_end = (u32 *)((char *)buf + buf_size); + const unsigned int *reg_ranges; + int reg_ranges_size, range; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + + /* Select the right set of register ranges to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T5: + reg_ranges = t5_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); + break; + + case CHELSIO_T6: + reg_ranges = t6_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); + break; + + default: + dev_err(adap, + "Unsupported chip version %d\n", chip_version); + return; + } + + /* Clear the register buffer and insert the appropriate register + * values selected by the above register ranges. + */ + memset(buf, 0, buf_size); + for (range = 0; range < reg_ranges_size; range += 2) { + unsigned int reg = reg_ranges[range]; + unsigned int last_reg = reg_ranges[range + 1]; + u32 *bufp = (u32 *)((char *)buf + reg); + + /* Iterate across the register range filling in the register + * buffer but don't write past the end of the register buffer. + */ + while (reg <= last_reg && bufp < buf_end) { + *bufp++ = t4_read_reg(adap, reg); + reg += sizeof(u32); + } + } +} + +/* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ +#define EEPROM_DELAY 10 /* 10us per poll spin */ +#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ + +#define EEPROM_STAT_ADDR 0x7bfc + +/** + * Small utility function to wait till any outstanding VPD Access is complete. + * We have a per-adapter state variable "VPD Busy" to indicate when we have a + * VPD Access in flight. This allows us to handle the problem of having a + * previous VPD Access time out and prevent an attempt to inject a new VPD + * Request before any in-flight VPD request has completed. + */ +static int t4_seeprom_wait(struct adapter *adapter) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int max_poll; + + /* If no VPD Access is in flight, we can just return success right + * away. + */ + if (!adapter->vpd_busy) + return 0; + + /* Poll the VPD Capability Address/Flag register waiting for it + * to indicate that the operation is complete. + */ + max_poll = EEPROM_MAX_POLL; + do { + u16 val; + + udelay(EEPROM_DELAY); + t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); + + /* If the operation is complete, mark the VPD as no longer + * busy and return success. + */ + if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { + adapter->vpd_busy = 0; + return 0; + } + } while (--max_poll); + + /* Failure! Note that we leave the VPD Busy status set in order to + * avoid pushing a new VPD Access request into the VPD Capability till + * the current operation eventually succeeds. It's a bug to issue a + * new request when an existing request is in flight and will result + * in corrupt hardware state. + */ + return -ETIMEDOUT; +} + +/** + * t4_seeprom_read - read a serial EEPROM location + * @adapter: adapter to read + * @addr: EEPROM virtual address + * @data: where to store the read data + * + * Read a 32-bit word from a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int ret; + + /* VPD Accesses must alway be 4-byte aligned! + */ + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + /* Wait for any previous operation which may still be in flight to + * complete. + */ + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD still busy from previous operation\n"); + return ret; + } + + /* Issue our new VPD Read request, mark the VPD as being busy and wait + * for our request to complete. If it doesn't complete, note the + * error and return it to our caller. Note that we do not reset the + * VPD Busy status! + */ + t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); + adapter->vpd_busy = 1; + adapter->vpd_flag = PCI_VPD_ADDR_F; + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD read of address %#x failed\n", addr); + return ret; + } + + /* Grab the returned data, swizzle it into our endianness and + * return success. + */ + t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); + *data = le32_to_cpu(*data); + return 0; +} + +/** + * t4_seeprom_write - write a serial EEPROM location + * @adapter: adapter to write + * @addr: virtual EEPROM address + * @data: value to write + * + * Write a 32-bit word to a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int ret; + u32 stats_reg = 0; + int max_poll; + + /* VPD Accesses must alway be 4-byte aligned! + */ + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + /* Wait for any previous operation which may still be in flight to + * complete. + */ + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD still busy from previous operation\n"); + return ret; + } + + /* Issue our new VPD Read request, mark the VPD as being busy and wait + * for our request to complete. If it doesn't complete, note the + * error and return it to our caller. Note that we do not reset the + * VPD Busy status! + */ + t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, + cpu_to_le32(data)); + t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, + (u16)addr | PCI_VPD_ADDR_F); + adapter->vpd_busy = 1; + adapter->vpd_flag = 0; + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD write of address %#x failed\n", addr); + return ret; + } + + /* Reset PCI_VPD_DATA register after a transaction and wait for our + * request to complete. If it doesn't complete, return error. + */ + t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); + max_poll = EEPROM_MAX_POLL; + do { + udelay(EEPROM_DELAY); + t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); + } while ((stats_reg & 0x1) && --max_poll); + if (!max_poll) + return -ETIMEDOUT; + + /* Return success! */ + return 0; +} + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, int enable) +{ + return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); +} + +/** + * t4_fw_tp_pio_rw - Access TP PIO through LDST + * @adap: the adapter + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * + * Access TP PIO registers through LDST + */ +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw) +{ + int cmd = FW_LDST_ADDRSPC_TP_PIO; + struct fw_ldst_cmd c; + unsigned int i; + int ret; + + for (i = 0 ; i < nregs; i++) { + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | + F_FW_CMD_REQUEST | + (rw ? F_FW_CMD_READ : + F_FW_CMD_WRITE) | + V_FW_LDST_CMD_ADDRSPACE(cmd)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + + c.u.addrval.addr = cpu_to_be32(start_index + i); + c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret == 0) { + if (rw) + vals[i] = be32_to_cpu(c.u.addrval.val); + } + } +} + +/** + * t4_read_rss_key - read the global RSS key + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * + * Reads the global 320-bit RSS key. + */ +void t4_read_rss_key(struct adapter *adap, u32 *key) +{ + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1); +} + +/** + * t4_write_rss_key - program one of the RSS keys + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * @idx: which RSS key to write + * + * Writes one of the RSS keys with the given 320-bit value. If @idx is + * 0..15 the corresponding entry in the RSS key table is written, + * otherwise the global RSS key is written. + */ +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx) +{ + u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); + u8 rss_key_addr_cnt = 16; + + /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), + * allows access to key addresses 16-63 by using KeyWrAddrX + * as index[5:4](upper 2) into key table + */ + if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && + (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) + rss_key_addr_cnt = 32; + + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0); + + if (idx >= 0 && idx < rss_key_addr_cnt) { + if (rss_key_addr_cnt > 16) + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDRX(idx >> 4) | + V_T6_VFWRADDR(idx) | F_KEYWREN); + else + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDR(idx) | F_KEYWREN); + } +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "response queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + int nq = min(n, 32); + int nq_packed = 0; + __be32 *qp = &cmd.iq0_to_iq2; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = 0; + qbuf[1] = 0; + qbuf[2] = 0; + while (nqbuf && nq_packed < 32) { + nqbuf--; + nq_packed++; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | + V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | + V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + if (is_pf4(adapter)) + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), + NULL); + else + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + + return 0; +} + +/** + * t4_config_vi_rss - configure per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: RSS flags + * @defq: id of the default RSS queue for the VI. + * + * Configures VI-specific RSS properties. + */ +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq) +{ + struct fw_rss_vi_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | + V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); + if (is_pf4(adapter)) + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL); +} + +/** + * t4_read_config_vi_rss - read the configured per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: where to place the configured flags + * @defq: where to place the id of the default RSS queue for the VI. + * + * Read configured VI-specific RSS properties. + */ +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq) +{ + struct fw_rss_vi_config_cmd c; + unsigned int result; + int ret; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c); + if (!ret) { + result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen); + if (defq) + *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result); + if (flags) + *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ; + } + + return ret; +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + int i; + + for (i = 0; i < 9; i++) { + a[i] = 1; + b[i] = 0; + } + + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[9] = 1; + b[10] = 1; + b[11] = 2; + b[12] = 2; + b[13] = 3; + b[14] = 3; + b[15] = 3; + b[16] = 3; + b[17] = 4; + b[18] = 4; + b[19] = 4; + b[20] = 4; + b[21] = 4; + b[22] = 5; + b[23] = 5; + b[24] = 5; + b[25] = 5; + b[26] = 5; + b[27] = 5; + b[28] = 6; + b[29] = 6; + b[30] = 7; + b[31] = 7; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ + F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ + (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ +} while (0) + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int ret; + + /* + * Ask firmware for the Core Clock since it knows how to translate the + * Reference Clock ('V2') VPD field into a Core Clock value ... + */ + cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &cclk_param, &cclk_val); + if (ret) { + dev_err(adapter, "%s: error in fetching from coreclock - %d\n", + __func__, ret); + return ret; + } + + p->cclk = cclk_val; + dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk); + return 0; +} + +/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + u32 word; + int v; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PFVF_CMD_PFN(adapter->pf) | + V_FW_PFVF_CMD_VFN(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = G_FW_PFVF_CMD_NEQ(word); + return 0; +} + +/* serial flash and firmware constants and flash config file constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_OP, + V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, A_SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_DATA, val); + t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | + V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); + return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianness. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) || + (addr & 3)) + return -EINVAL; + + addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST; + + ret = sf1_write(adapter, 4, 1, 0, addr); + if (ret != 0) + return ret; + + ret = sf1_read(adapter, 1, 1, 0, data); + if (ret != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = cpu_to_be32(*data); + } + return 0; +} + +/** + * t4_get_exprom_version - return the Expansion ROM version (if any) + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the Expansion ROM header from FLASH and returns the version + * number (if present) through the @vers return value pointer. We return + * this in the Firmware Version Format since it's convenient. Return + * 0 on success, -ENOENT if no Expansion ROM is present. + */ +static int t4_get_exprom_version(struct adapter *adapter, u32 *vers) +{ + struct exprom_header { + unsigned char hdr_arr[16]; /* must start with 0x55aa */ + unsigned char hdr_ver[4]; /* Expansion ROM version */ + } *hdr; + u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), + sizeof(u32))]; + int ret; + + ret = t4_read_flash(adapter, FLASH_EXP_ROM_START, + ARRAY_SIZE(exprom_header_buf), + exprom_header_buf, 0); + if (ret) + return ret; + + hdr = (struct exprom_header *)exprom_header_buf; + if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) + return -ENOENT; + + *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | + V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | + V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | + V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); + return 0; +} + +/** + * t4_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int t4_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); +} + +/** + * t4_get_bs_version - read the firmware bootstrap version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW Bootstrap version from flash. + */ +static int t4_get_bs_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** + * t4_get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int t4_get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_get_version_info - extract various chip/firmware version information + * @adapter: the adapter + * + * Reads various chip/firmware version numbers and stores them into the + * adapter Adapter Parameters structure. If any of the efforts fails + * the first failure will be returned, but all of the version numbers + * will be read. + */ +int t4_get_version_info(struct adapter *adapter) +{ + int ret = 0; + +#define FIRST_RET(__getvinfo) \ + do { \ + int __ret = __getvinfo; \ + if (__ret && !ret) \ + ret = __ret; \ + } while (0) + + FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); + FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); + FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); + FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); + +#undef FIRST_RET + + return ret; +} + +/** + * t4_dump_version_info - dump all of the adapter configuration IDs + * @adapter: the adapter + * + * Dumps all of the various bits of adapter configuration version/revision + * IDs information. This is typically called at some point after + * t4_get_version_info() has been called. + */ +void t4_dump_version_info(struct adapter *adapter) +{ + /** + * Device information. + */ + dev_info(adapter, "Chelsio rev %d\n", + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + + /** + * Firmware Version. + */ + if (!adapter->params.fw_vers) + dev_warn(adapter, "No firmware loaded\n"); + else + dev_info(adapter, "Firmware version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); + + /** + * Bootstrap Firmware Version. + */ + if (!adapter->params.bs_vers) + dev_warn(adapter, "No bootstrap loaded\n"); + else + dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers)); + + /** + * TP Microcode Version. + */ + if (!adapter->params.tp_vers) + dev_warn(adapter, "No TP Microcode loaded\n"); + else + dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); + + /** + * Expansion ROM version. + */ + if (!adapter->params.er_vers) + dev_info(adapter, "No Expansion ROM loaded\n"); + else + dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers)); +} + +#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \ + FW_PORT_CAP32_ANEG) +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + +#define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + +#undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + +#define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + +#undef CAP32_TO_CAP16 + + return caps16; +} + +/* Translate Firmware Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause Frame specification into Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} + +/** + * t4_link_l1cfg - apply link configuration to MAC/PHY + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; + struct fw_port_cmd cmd; + + lc->link_ok = 0; + + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = lc->auto_fec; + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); + + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else if (lc->autoneg == AUTONEG_DISABLE) { + rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else { + rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; + } + + /* And send that on to the Firmware ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_PORT_CMD_PORTID(port)); + cmd.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_L1_CFG : + FW_PORT_ACTION_L1_CFG32) | + FW_LEN16(cmd)); + + if (fw_caps == FW_CAPS16) + cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); + else + cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); + + return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL); +} + +/** + * t4_flash_cfg_addr - return the address of the flash configuration file + * @adapter: the adapter + * + * Return the address within the flash where the Firmware Configuration + * File is stored, or an error if the device FLASH is too small to contain + * a Firmware Configuration File. + */ +int t4_flash_cfg_addr(struct adapter *adapter) +{ + /* + * If the device FLASH isn't large enough to hold a Firmware + * Configuration File, return an error. + */ + if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) + return -ENOSPC; + + return FLASH_CFG_START; +} + +#define PF_INTR_MASK (F_PFSW | F_PFCIM) + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 val = 0; + u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); + u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; + t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | + F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | + F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | + F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | + F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | + F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | + F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); + u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); + + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_get_port_type_description - return Port Type string description + * @port_type: firmware Port Type enumeration + */ +const char *t4_get_port_type_description(enum fw_port_type port_type) +{ + static const char * const port_type_description[] = { + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", + "KX4", + "CX4", + "KX", + "KR", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + "KR4_100G", + "CR4_QSFP", + "CR_QSFP", + "CR2_QSFP", + "SFP28", + "KR_SFP28", + }; + + if (port_type < ARRAY_SIZE(port_type_description)) + return port_type_description[port_type]; + return "UNKNOWN"; +} + +/** + * t4_get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @pidx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap, + A_MPS_CMN_CTL)); + + if (pidx >= nports) { + dev_warn(adap, "MPS Port Index %d >= Nports %d\n", + pidx, nports); + return 0; + } + + switch (chip_version) { + case CHELSIO_T4: + case CHELSIO_T5: + switch (nports) { + case 1: return 0xf; + case 2: return 3 << (2 * pidx); + case 4: return 1 << pidx; + } + break; + + case CHELSIO_T6: + switch (nports) { + case 2: return 1 << (2 * pidx); + } + break; + } + + dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n", + chip_version, nports); + return 0; +} + +/** + * t4_get_tp_ch_map - return TP ingress channels associated with a port + * @adapter: the adapter + * @pidx: the port index + * + * Returns a bitmap indicating which TP Ingress Channels are associated with + * a given Port. Bit i is set if TP Ingress Channel i is used by the Port. + */ +unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, + A_MPS_CMN_CTL)); + + if (pidx >= nports) { + dev_warn(adap, "TP Port Index %d >= Nports %d\n", + pidx, nports); + return 0; + } + + switch (chip_version) { + case CHELSIO_T4: + case CHELSIO_T5: + /* Note that this happens to be the same values as the MPS + * Buffer Group Map for these Chips. But we replicate the code + * here because they're really separate concepts. + */ + switch (nports) { + case 1: return 0xf; + case 2: return 3 << (2 * pidx); + case 4: return 1 << pidx; + } + break; + + case CHELSIO_T6: + switch (nports) { + case 2: return 1 << pidx; + } + break; + } + + dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n", + chip_version, nports); + return 0; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); + +#define GET_STAT(name) \ + t4_read_reg64(adap, \ + (is_t4(adap->params.chip) ? \ + PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\ + T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & F_COUNTPAUSESTATTX) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & F_COUNTPAUSEMCTX) + p->tx_mcast_frames -= p->tx_pause; + } + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & F_COUNTPAUSESTATRX) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & F_COUNTPAUSEMCRX) + p->rx_mcast_frames -= p->rx_pause; + } + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_port_stats_offset - collect port stats relative to a previous snapshot + * @adap: The adapter + * @idx: The port + * @stats: Current stats to fill + * @offset: Previous stats snapshot + */ +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset) +{ + u64 *s, *o; + unsigned int i; + + t4_get_port_stats(adap, idx, stats); + for (i = 0, s = (u64 *)stats, o = (u64 *)offset; + i < (sizeof(struct port_stats) / sizeof(u64)); + i++, s++, o++) + *s -= *o; +} + +/** + * t4_clr_port_stats - clear port statistics + * @adap: the adapter + * @idx: the port index + * + * Clear HW statistics for the given port. + */ +void t4_clr_port_stats(struct adapter *adap, int idx) +{ + unsigned int i; + u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 port_base_addr; + + if (is_t4(adap->params.chip)) + port_base_addr = PORT_BASE(idx); + else + port_base_addr = T5_PORT_BASE(idx); + + for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = 0; i < 4; i++) + if (bgmap & (1 << i)) { + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + + i * 8, 0); + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + + i * 8, 0); + } +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state (if non-NULL) + * + * Issues a command to establish communication with FW. Returns either + * an error (negative integer) or the mailbox of the Master PF. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + u32 v; + unsigned int master_mbox; + int retries = FW_CMD_HELLO_RETRIES; + +retry: + memset(&c, 0, sizeof(c)); + INIT_CMD(c, HELLO, WRITE); + c.err_to_clearinit = cpu_to_be32( + V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : + M_FW_HELLO_CMD_MBMASTER) | + V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | + V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | + F_FW_HELLO_CMD_CLEARINIT); + + /* + * Issue the HELLO command to the firmware. If it's not successful + * but indicates that we got a "busy" or "timeout" condition, retry + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so ... + */ + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret != FW_SUCCESS) { + if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) + goto retry; + if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) + t4_report_fw_error(adap); + return ret; + } + + v = be32_to_cpu(c.err_to_clearinit); + master_mbox = G_FW_HELLO_CMD_MBMASTER(v); + if (state) { + if (v & F_FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else if (v & F_FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else + *state = DEV_STATE_UNINIT; + } + + /* + * If we're not the Master PF then we need to wait around for the + * Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable PF and + * there is no current Master PF; a Master PF may show up momentarily + * and we wouldn't want to fail pointlessly. (This can happen when an + * OS loads lots of different drivers rapidly at the same time). In + * this case, the Master PF returned by the firmware will be + * M_PCIE_FW_MASTER so the test below will work ... + */ + if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 && + master_mbox != mbox) { + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + u32 pcie_fw; + + msleep(50); + waiting -= 50; + + /* + * If neither Error nor Initialialized are indicated + * by the firmware keep waiting till we exaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + return -ETIMEDOUT; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & F_PCIE_FW_ERR) + *state = DEV_STATE_ERR; + else if (pcie_fw & F_PCIE_FW_INIT) + *state = DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (master_mbox == M_PCIE_FW_MASTER && + (pcie_fw & F_PCIE_FW_MASTER_VLD)) + master_mbox = G_PCIE_FW_MASTER(pcie_fw); + break; + } + } + + return master_mbox; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_halt - issue a reset/halt to FW and put uP into RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * M_PCIE_FW_MASTER). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) +{ + int ret = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= M_PCIE_FW_MASTER) { + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); + c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (ret == 0 || force) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, + F_PCIE_FW_HALT); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return ret; +} + +/** + * t4_fw_restart - restart the firmware by taking the uP out of RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by t4_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= M_PCIE_FW_MASTER) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + msleep(100); + if (t4_fw_reset(adap, mbox, + F_PIORST | F_PIORSTMODE) == 0) + return 0; + } + + t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); + msleep(2000); + } else { + int ms; + + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) + return FW_SUCCESS; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/** + * t4_fl_pkt_align - return the fl packet alignment + * @adap: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + */ +int t4_fl_pkt_align(struct adapter *adap) +{ + u32 sge_control, sge_control2; + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + sge_control = t4_read_reg(adap, A_SGE_CONTROL); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + ingpad_shift = X_INGPADBOUNDARY_SHIFT; + else + ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; + + ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adap->params.chip)) { + sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2); + ingpackboundary = G_INGPACKBOUNDARY(sge_control2); + if (ingpackboundary == X_INGPACKBOUNDARY_16B) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + X_INGPACKBOUNDARY_SHIFT); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +/** + * t4_fixup_host_params_compat - fix up host-dependent parameters + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * @chip_compat: maintain compatibility with designated chip + * + * Various registers in the chip contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * @chip_compat is used to limit the set of changes that are made + * to be compatible with the indicated chip release. This is used by + * drivers to maintain compatibility with chip register settings when + * the drivers haven't [yet] been updated with new chip support. + */ +int t4_fixup_host_params_compat(struct adapter *adap, + unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat) +{ + unsigned int page_shift = cxgbe_fls(page_size) - 1; + unsigned int sge_hps = page_shift - 10; + unsigned int stat_len = cache_line_size > 64 ? 128 : 64; + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; + unsigned int fl_align_log = cxgbe_fls(fl_align) - 1; + + t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE, + V_HOSTPAGESIZEPF0(sge_hps) | + V_HOSTPAGESIZEPF1(sge_hps) | + V_HOSTPAGESIZEPF2(sge_hps) | + V_HOSTPAGESIZEPF3(sge_hps) | + V_HOSTPAGESIZEPF4(sge_hps) | + V_HOSTPAGESIZEPF5(sge_hps) | + V_HOSTPAGESIZEPF6(sge_hps) | + V_HOSTPAGESIZEPF7(sge_hps)); + + if (is_t4(adap->params.chip) || is_t4(chip_compat)) + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(fl_align_log - + X_INGPADBOUNDARY_SHIFT) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + else { + unsigned int pack_align; + unsigned int ingpad, ingpack; + unsigned int pcie_cap; + + /* + * T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. + */ + + /* We want the Packing Boundary to be based on the Cache Line + * Size in order to help avoid False Sharing performance + * issues between CPUs, etc. We also want the Packing + * Boundary to incorporate the PCI-E Maximum Payload Size. We + * get best performance when the Packing Boundary is a + * multiple of the Maximum Payload Size. + */ + pack_align = fl_align; + pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP); + if (pcie_cap) { + unsigned int mps, mps_log; + u16 devctl; + + /* The PCIe Device Control Maximum Payload Size field + * [bits 7:5] encodes sizes as powers of 2 starting at + * 128 bytes. + */ + t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL, + &devctl); + mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; + mps = 1 << mps_log; + if (mps > pack_align) + pack_align = mps; + } + + /* + * N.B. T5 has a different interpretation of the "0" value for + * the Packing Boundary. This corresponds to 16 bytes instead + * of the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes ... + */ + if (pack_align <= 16) { + ingpack = X_INGPACKBOUNDARY_16B; + fl_align = 16; + } else if (pack_align == 32) { + ingpack = X_INGPACKBOUNDARY_64B; + fl_align = 64; + } else { + unsigned int pack_align_log = cxgbe_fls(pack_align) - 1; + + ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT; + fl_align = pack_align; + } + + /* Use the smallest Ingress Padding which isn't smaller than + * the Memory Controller Read/Write Size. We'll take that as + * being 8 bytes since we don't know of any system with a + * wider Memory Controller Bus Width. + */ + if (is_t5(adap->params.chip)) + ingpad = X_INGPADBOUNDARY_32B; + else + ingpad = X_T6_INGPADBOUNDARY_8B; + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(ingpad) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, A_SGE_CONTROL2, + V_INGPACKBOUNDARY(M_INGPACKBOUNDARY), + V_INGPACKBOUNDARY(ingpack)); + } + + /* + * Adjust various SGE Free List Host Buffer Sizes. + * + * The first four entries are: + * + * 0: Host Page Size + * 1: 64KB + * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) + * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) + * + * For the single-MTU buffers in unpacked mode we need to include + * space for the SGE Control Packet Shift, 14 byte Ethernet header, + * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet + * Padding boundary. All of these are accommodated in the Factory + * Default Firmware Configuration File but we need to adjust it for + * this host's cache line size. + */ + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1) + & ~(fl_align - 1)); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1) + & ~(fl_align - 1)); + + t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12)); + + return 0; +} + +/** + * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible) + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * + * Various registers in T4 contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * This routine makes changes which are compatible with T4 chips. + */ +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size) +{ + return t4_fixup_host_params_compat(adap, page_size, cache_line_size, + T4_LAST_REV); +} + +/** + * t4_fw_initialize - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_fw_initialize(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params_rw - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @rw: Write and read flag + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +static int t4_query_params_rw(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + u32 *val, int rw) +{ + unsigned int i; + int ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + for (i = 0; i < nparams; i++) { + *p++ = cpu_to_be32(*params++); + if (rw) + *p = cpu_to_be32(*(val + i)); + p++; + } + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = be32_to_cpu(*p); + return ret; +} + +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); +} + +/** + * t4_set_params_timeout - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @timeout: the timeout time + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + while (nparams--) { + *p++ = cpu_to_be32(*params++); + *p++ = cpu_to_be32(*val++); + } + + return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); +} + +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_alloc_vi_func - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * @portfunc: which Port Application Function MAC Address is desired + * @idstype: Intrusion Detection Type + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype, + u8 *vivld, u8 *vin) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | + V_FW_VI_CMD_FUNC(portfunc)); + c.portid_pkd = V_FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + /* FALLTHROUGH */ + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + /* FALLTHROUGH */ + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + /* FALLTHROUGH */ + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + /* FALLTHROUGH */ + } + } + if (rss_size) + *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); + if (vivld) + *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)); + if (vin) + *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)); + return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid)); +} + +/** + * t4_alloc_vi - allocate an [Ethernet Function] virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Backwards compatible and convieniance routine to allocate a Virtual + * Interface with a Ethernet Port Application Function and Intrustion + * Detection System disabled. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size, u8 *vivld, u8 *vin) +{ + return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, + FW_VI_FUNC_ETH, 0, vivld, vin); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) | + V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); + + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = M_FW_VI_RXMODE_CMD_MTU; + if (promisc < 0) + promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; + if (all_multi < 0) + all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; + if (bcast < 0) + bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; + if (vlanex < 0) + vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | + V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + if (is_pf4(adap)) + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, + sleep_ok); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @mac: the MAC address + * @mask: the mask + * @idx: index at which to add this entry + * @port_id: the port index + * @lookup_type: MAC address for inner (1) or outer (0) header + * @sleep_ok: call is allowed to sleep + * + * Adds the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number or the allocated index for this mac. + */ +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + int ret = 0; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_MAC_CMD_VIID(viid)); + val = V_FW_CMD_LEN16(1) | + V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(val); + + /* Specify that this is an inner mac address */ + p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx)); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | + V_DATAPORTNUM(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | + V_DATAPORTNUM(M_DATAPORTNUM)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); + + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + if (ret == 0) { + ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd)); + if (ret != (int)idx) + ret = -ENOMEM; + } + + return ret; +} + +/** + * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @addr: the MAC address + * @mask: the mask + * @idx: index of the entry in mps tcam + * @lookup_type: MAC address for inner (1) or outer (0) header + * @port_id: the port index + * @sleep_ok: call is allowed to sleep + * + * Removes the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number on failure. + */ +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 raw; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_CMD_EXEC(0) | + V_FW_VI_MAC_CMD_VIID(viid)); + raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) | + raw | + V_FW_CMD_LEN16(1)); + + p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) | + FW_VI_MAC_ID_BASED_FREE); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | + V_DATAPORTNUM(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | + V_DATAPORTNUM(M_DATAPORTNUM)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN); + + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address if + * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the + * latter case the address is added persistently if @persist is %true. + * + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. Note that this index may differ from @idx. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + int max_mac_addr = adap->params.arch.mps_tcam_size; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); + p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | + V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | + V_FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); + if (ret == 0) { + ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); + if (ret >= max_mac_addr) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_enable_vi_params - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | + V_FW_VI_ENABLE_CMD_EEN(tx_en) | + V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | + FW_LEN16(c)); + if (is_pf4(adap)) + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC); + c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) | + V_FW_IQ_CMD_IQSTOP(!start) | + FW_LEN16(c)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + if (is_pf4(adap)) { + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } else { + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); + } +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_link_down_rc_str - return a string for a Link Down Reason Code + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +static const char *t4_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/* Return the highest speed set in the port capabilities, in Mb/s. */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ +#define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + +#undef TEST_SPEED_RETURN + + return 0; +} + +/** + * t4_handle_get_port_info - process a FW reply message + * @pi: the port info + * @rpl: start of the FW message + * + * Processes a GET_PORT_INFO FW reply message. + */ +static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) +{ + const struct fw_port_cmd *cmd = (const void *)rpl; + int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16)); + fw_port_cap32_t pcaps, acaps, linkattr; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; + enum fw_port_module_type mod_type; + enum fw_port_type port_type; + unsigned int speed, fc, fec; + int link_ok, linkdnrc; + + /* Extract the various fields from the Port Information message. + */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus); + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + linkattr = 0; + if (lstatus & F_FW_PORT_CMD_RXPAUSE) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & F_FW_PORT_CMD_TXPAUSE) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32 = + be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + + link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32); + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + lc->auto_fec = fec; + pi->port_type = port_type; + pi->mod_type = mod_type; + t4_os_portmod_changed(adapter, pi->pidx); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adap, "Port %d link down, reason: %s\n", + pi->tx_chan, t4_link_down_rc_str(linkdnrc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->fec = fec; + lc->pcaps = pcaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->requested_speed = fwcap_to_speed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + } +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(pf) | + V_FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + /* + * This might be a port command ... this simplifies the following + * conditionals ... We can get away with pre-dereferencing + * action_to_len16 because it's in the first 16 bytes and all messages + * will be at least that long. + */ + const struct fw_port_cmd *p = (const void *)rpl; + unsigned int action = + G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); + + if (opcode == FW_PORT_CMD && + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { + /* link/module state change message */ + int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); + struct port_info *pi = NULL; + int i; + + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->tx_chan == chan) + break; + } + + t4_handle_get_port_info(pi, rpl); + } else { + dev_warn(adap, "Unknown firmware reply %d\n", opcode); + return -EINVAL; + } + return 0; +} + +void t4_reset_link_config(struct adapter *adap, int idx) +{ + struct port_info *pi = adap2pinfo(adap, idx); + struct link_config *lc = &pi->link_cfg; + + lc->link_ok = 0; + lc->requested_speed = 0; + lc->requested_fc = 0; + lc->speed = 0; + lc->fc = 0; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) +{ + lc->pcaps = pcaps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = 0; + lc->fc = 0; + + /** + * For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + lc->auto_fec = fwcap_to_cc_fec(acaps); + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->acaps = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4_wait_dev_ready - wait till to reads of registers work + * + * Right after the device is RESET is can take a small amount of time + * for it to respond to register reads. Until then, all reads will + * return either 0xff...ff or 0xee...ee. Return an error if reads + * don't work within a reasonable time frame. + */ +static int t4_wait_dev_ready(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + + if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) + return 0; + + msleep(500); + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) + return 0; + + dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n", + whoami); + return -EIO; +} + +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + +int t4_get_flash_params(struct adapter *adapter) +{ + /* + * Table for non-standard supported Flash parts. Note, all Flash + * parts must have 64KB sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + int ret; + u32 flashid = 0; + unsigned int part, manufacturer; + unsigned int density, size = 0; + + /** + * Issue a Read ID Command to the Flash part. We decode supported + * Flash parts and their sizes from this. There's a newer Query + * Command which can retrieve detailed geometry information but + * many Flash parts don't support it. + */ + ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adapter, 3, 0, 1, &flashid); + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret < 0) + return ret; + + /** + * Check to see if it's one of our non-standard supported Flash parts. + */ + for (part = 0; part < ARRAY_SIZE(supported_flash); part++) { + if (supported_flash[part].vendor_and_model_id == flashid) { + adapter->params.sf_size = + supported_flash[part].size_mb; + adapter->params.sf_nsec = + adapter->params.sf_size / SF_SEC_SIZE; + goto found; + } + } + + /** + * Decode Flash part size. The code below looks repetative with + * common encodings, but that's not guaranteed in the JEDEC + * specification for the Read JADEC ID command. The only thing that + * we're guaranteed by the JADEC specification is where the + * Manufacturer ID is in the returned result. After that each + * Manufacturer ~could~ encode things completely differently. + * Note, all Flash parts must have 64KB sectors. + */ + manufacturer = flashid & 0xff; + switch (manufacturer) { + case 0x20: { /* Micron/Numonix */ + /** + * This Density -> Size decoding table is taken from Micron + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x14: + size = 1 << 20; /* 1MB */ + break; + case 0x15: + size = 1 << 21; /* 2MB */ + break; + case 0x16: + size = 1 << 22; /* 4MB */ + break; + case 0x17: + size = 1 << 23; /* 8MB */ + break; + case 0x18: + size = 1 << 24; /* 16MB */ + break; + case 0x19: + size = 1 << 25; /* 32MB */ + break; + case 0x20: + size = 1 << 26; /* 64MB */ + break; + case 0x21: + size = 1 << 27; /* 128MB */ + break; + case 0x22: + size = 1 << 28; /* 256MB */ + break; + } + break; + } + + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ + /** + * This Density -> Size decoding table is taken from ISSI + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x16: + size = 1 << 25; /* 32MB */ + break; + case 0x17: + size = 1 << 26; /* 64MB */ + break; + } + break; + } + + case 0xc2: { /* Macronix */ + /** + * This Density -> Size decoding table is taken from Macronix + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: + size = 1 << 23; /* 8MB */ + break; + case 0x18: + size = 1 << 24; /* 16MB */ + break; + } + break; + } + + case 0xef: { /* Winbond */ + /** + * This Density -> Size decoding table is taken from Winbond + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: + size = 1 << 23; /* 8MB */ + break; + case 0x18: + size = 1 << 24; /* 16MB */ + break; + } + break; + } + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + dev_warn(adapter, + "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; + } + + /** + * Store decoded Flash size and fall through into vetting code. + */ + adapter->params.sf_size = size; + adapter->params.sf_nsec = size / SF_SEC_SIZE; + +found: + /* + * We should reject adapters with FLASHes which are too small. So, emit + * a warning. + */ + if (adapter->params.sf_size < FLASH_MIN_SIZE) + dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", + flashid, adapter->params.sf_size, FLASH_MIN_SIZE); + + return 0; +} + +static void set_pcie_completion_timeout(struct adapter *adapter, + u8 range) +{ + u32 pcie_cap; + u16 val; + + pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); + if (pcie_cap) { + t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); + val &= 0xfff0; + val |= range; + t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); + } +} + +/** + * t4_get_chip_type - Determine chip type from device ID + * @adap: the adapter + * @ver: adapter version + */ +int t4_get_chip_type(struct adapter *adap, int ver) +{ + enum chip_type chip = 0; + u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV)); + + /* Retrieve adapter's device ID */ + switch (ver) { + case CHELSIO_T5: + chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + break; + case CHELSIO_T6: + chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); + break; + default: + dev_err(adap, "Device %d is not supported\n", + adap->params.pci.device_id); + return -EINVAL; + } + + return chip; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4_prep_adapter(struct adapter *adapter) +{ + int ret, ver; + u32 pl_rev; + + ret = t4_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 256; + adapter->params.arch.nchan = 2; + adapter->params.arch.vfcount = 256; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + + adapter->params.pci.vpd_cap_addr = + t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); + + ret = t4_get_flash_params(adapter); + if (ret < 0) { + dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n", + -ret); + return ret; + } + + adapter->params.cim_la_size = CIMLA_SIZE; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port and clock for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + adapter->params.vpd.cclk = 50000; + + /* Set pci completion timeout value to 4 seconds. */ + set_pcie_completion_timeout(adapter, 0xd); + return 0; +} + +/** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* + * T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* + * Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* + * Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ? + adapter->params.sge.eq_qpp : + adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* + * Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* + * If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* + * Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); + s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * + adapter->pf); + sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0); + + /* + * Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf); + qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + + return 0; +} + +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan, ret; + u32 param, v; + + v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); + adap->params.tp.tre = G_TIMERRESOLUTION(v); + adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* + * Cache the adapter's Compressed Filter Mode/Mask and global Ingress + * Configuration. + */ + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK)); + + /* Read current value */ + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, ¶m, &v); + if (!ret) { + dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n", + G_FW_PARAMS_PARAM_FILTER_MODE(v), + G_FW_PARAMS_PARAM_FILTER_MASK(v)); + adap->params.tp.vlan_pri_map = + G_FW_PARAMS_PARAM_FILTER_MODE(v); + adap->params.tp.filter_mask = + G_FW_PARAMS_PARAM_FILTER_MASK(v); + } else { + dev_info(adap, + "Failed to read filter mode/mask via fw api, using indirect-reg-read\n"); + + /* In case of older-fw (which doesn't expose the api + * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses + * the fw api) combination, fall-back to older method of reading + * the filter mode from indirect-register + */ + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.vlan_pri_map, 1, + A_TP_VLAN_PRI_MAP); + + /* With the older-fw and newer-driver combination we might run + * into an issue when user wants to use hash filter region but + * the filter_mask is zero, in this case filter_mask validation + * is tough. To avoid that we set the filter_mask same as filter + * mode, which will behave exactly as the older way of ignoring + * the filter mask validation. + */ + adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map; + } + + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.ingress_config, 1, + A_TP_INGRESS_CONFIG); + + /* For T6, cache the adapter's compressed error vector + * and passing outer header info for encapsulated packets. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + v = t4_read_reg(adap, A_TP_OUT_CONFIG); + adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0; + } + + /* + * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); + adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + F_PROTOCOL); + adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, + F_ETHERTYPE); + adap->params.tp.macmatch_shift = t4_filter_field_shift(adap, + F_MACMATCH); + adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS); + + v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask = v; + v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask |= ((u64)v << 32); + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case F_FCOE: + field_shift += W_FT_FCOE; + break; + case F_PORT: + field_shift += W_FT_PORT; + break; + case F_VNIC_ID: + field_shift += W_FT_VNIC_ID; + break; + case F_VLAN: + field_shift += W_FT_VLAN; + break; + case F_TOS: + field_shift += W_FT_TOS; + break; + case F_PROTOCOL: + field_shift += W_FT_PROTOCOL; + break; + case F_ETHERTYPE: + field_shift += W_FT_ETHERTYPE; + break; + case F_MACMATCH: + field_shift += W_FT_MACMATCH; + break; + case F_MPSHITTYPE: + field_shift += W_FT_MPSHITTYPE; + break; + case F_FRAGMENTATION: + field_shift += W_FT_FRAGMENTATION; + break; + } + } + return field_shift; +} + +int t4_init_rss_mode(struct adapter *adap, int mbox) +{ + int i, ret; + struct fw_rss_vi_config_cmd rvc; + + memset(&rvc, 0, sizeof(rvc)); + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); + } + return 0; +} + +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + struct fw_port_cmd cmd; + u8 vivld = 0, vin = 0; + int ret, i, j = 0; + int mdio_addr; + u32 action; + u8 addr[6]; + + memset(&cmd, 0, sizeof(cmd)); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + unsigned int rss_size = 0; + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + /* If we haven't yet determined whether we're talking to + * Firmware which knows the new 32-bit Port Capabilities, it's + * time to find out now. This will also tell new Firmware to + * send us Port Status Updates using the new 32-bit Port + * Capabilities version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val, caps; + + caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32; + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X(caps)); + val = 1; + ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m, + &val); + fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16; + adap->params.fw_caps_support = fw_caps; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(j)); + action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32; + cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); + if (ret) + return ret; + + /* Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = + be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1; + pcaps = be16_to_cpu(cmd.u.info.pcap); + acaps = be16_to_cpu(cmd.u.info.acap); + pcaps = fwcaps16_to_caps32(pcaps); + acaps = fwcaps16_to_caps32(acaps); + } else { + u32 lstatus32 = + be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1; + pcaps = be32_to_cpu(cmd.u.info32.pcaps32); + acaps = be32_to_cpu(cmd.u.info32.acaps32); + } + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size, + &vivld, &vin); + if (ret < 0) + return ret; + + pi->viid = ret; + pi->tx_chan = j; + pi->rss_size = rss_size; + t4_os_set_hw_addr(adap, i, addr); + + /* If fw supports returning the VIN as part of FW_VI_CMD, + * save the returned values. + */ + if (adap->params.viid_smt_extn_support) { + pi->vivld = vivld; + pi->vin = vin; + } else { + /* Retrieve the values from VIID */ + pi->vivld = G_FW_VIID_VIVLD(pi->viid); + pi->vin = G_FW_VIID_VIN(pi->viid); + } + + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; + pi->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&pi->link_cfg, pcaps, acaps); + j++; + } + return 0; +} + +/** + * t4_memory_rw_addr - read/write adapter memory via PCIE memory window + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @addr: address within adapter memory + * @len: amount of memory to transfer + * @hbuf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address and host buffer must be aligned on 32-bit + * boudaries; the length may be arbitrary. + * + * NOTES: + * 1. The memory is transferred as a raw byte sequence from/to the + * firmware's memory. If this memory contains data structures which + * contain multi-byte integers, it's the caller's responsibility to + * perform appropriate byte order conversions. + * + * 2. It is the Caller's responsibility to ensure that no other code + * uses the specified PCI-E Memory Window while this routine is + * using it. This is typically done via the use of OS-specific + * locks, etc. + */ +int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr, + u32 len, void *hbuf, int dir) +{ + u32 pos, offset, resid; + u32 win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; + + /* Argument sanity checks ...*/ + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) + return -EINVAL; + buf = (u32 *)hbuf; + + /* It's convenient to be able to handle lengths which aren't a + * multiple of 32-bits because we often end up transferring files to + * the firmware. So we'll handle that by normalizing the length here + * and then handling any residual transfer at the end. + */ + resid = len & 0x3; + len -= resid; + + /* Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + win)); + mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT); + mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT; + + win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf); + + /* Calculate our initial PCI-E Memory Window Position and Offset into + * that Window. + */ + pos = addr & ~(mem_aperture - 1); + offset = addr - pos; + + /* Set up initial PCI-E Memory Window to cover the start of our + * transfer. (Read it back to ensure that changes propagate before we + * attempt to use the new value.) + */ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win), + pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win)); + + /* Transfer data to/from the adapter as long as there's an integral + * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. + */ + while (len > 0) { + if (dir == T4_MEMORY_READ) + *buf++ = le32_to_cpu((__le32)t4_read_reg(adap, + mem_base + + offset)); + else + t4_write_reg(adap, mem_base + offset, + (u32)cpu_to_le32(*buf++)); + offset += sizeof(__be32); + len -= sizeof(__be32); + + /* If we've reached the end of our current window aperture, + * move the PCI-E Memory Window on to the next. Note that + * doing this here after "len" may be 0 allows us to set up + * the PCI-E Memory Window for a possible final residual + * transfer below ... + */ + if (offset == mem_aperture) { + pos += mem_aperture; + offset = 0; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, + win), pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, + win)); + } + } + + /* If the original transfer had a length which wasn't a multiple of + * 32-bits, now's where we need to finish off the transfer of the + * residual amount. The PCI-E Memory Window has already been moved + * above (if necessary) to cover this final transfer. + */ + if (resid) { + union { + u32 word; + char byte[4]; + } last; + unsigned char *bp; + int i; + + if (dir == T4_MEMORY_READ) { + last.word = le32_to_cpu((__le32)t4_read_reg(adap, + mem_base + + offset)); + for (bp = (unsigned char *)buf, i = resid; i < 4; i++) + bp[i] = last.byte[i]; + } else { + last.word = *buf; + for (i = resid; i < 4; i++) + last.byte[i] = 0; + t4_write_reg(adap, mem_base + offset, + (u32)cpu_to_le32(last.word)); + } + } + + return 0; +} + +/** + * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC + * @maddr: address within indicated memory type + * @len: amount of memory to transfer + * @hbuf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Reads/writes adapter memory using t4_memory_rw_addr(). This routine + * provides an (memory type, address within memory type) interface. + */ +int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, + u32 len, void *hbuf, int dir) +{ + u32 mtype_offset; + u32 edc_size, mc_size; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller + * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) + */ + edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR)); + if (mtype != MEM_MC1) { + mtype_offset = (mtype * (edc_size * 1024 * 1024)); + } else { + mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap, + A_MA_EXT_MEMORY0_BAR)); + mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + return t4_memory_rw_addr(adap, win, + mtype_offset + maddr, len, + hbuf, dir); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h new file mode 100644 index 000000000..e77563dfa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +enum { + NCHAN = 4, /* # of HW channels */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + MBOX_LEN = 64, /* mailbox size in bytes */ + UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */ +}; + +enum { + CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ +}; + +enum { + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ +}; + +enum { + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +/* PCI-e memory window access */ +enum pcie_memwin { + MEMWIN_NIC = 0, +}; + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + /* max no. of desc allowed in WR */ + SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE, +}; + +enum { + TCB_SIZE = 128, /* TCB size */ +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + } u; +}; + +#define S_RSPD_NEWBUF 31 +#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF) +#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U) + +#define S_RSPD_LEN 0 +#define M_RSPD_LEN 0x7fffffff +#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) +#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) + +#define S_RSPD_GEN 7 +#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN) +#define F_RSPD_GEN V_RSPD_GEN(1U) + +#define S_RSPD_TYPE 4 +#define M_RSPD_TYPE 0x3 +#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE) +#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE) + +/* Rx queue interrupt deferral field: timer index */ +#define S_QINTR_CNT_EN 0 +#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN) +#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U) + +#define S_QINTR_TIMER_IDX 1 +#define M_QINTR_TIMER_IDX 0x7 +#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX) +#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX) + +/* + * Flash layout. + */ +#define FLASH_START(start) ((start) * SF_SEC_SIZE) +#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) + +enum { + /* + * Various Expansion-ROM boot images, etc. + */ + FLASH_EXP_ROM_START_SEC = 0, + FLASH_EXP_ROM_NSECS = 6, + FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC), + FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS), + + /* + * Location of firmware image in FLASH. + */ + FLASH_FW_START_SEC = 8, + FLASH_FW_NSECS = 16, + FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), + FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + + /* + * Location of bootstrap firmware image in FLASH. + */ + FLASH_FWBOOTSTRAP_START_SEC = 27, + FLASH_FWBOOTSTRAP_NSECS = 1, + FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC), + FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS), + + /* + * Location of Firmware Configuration File in FLASH. + */ + FLASH_CFG_START_SEC = 31, + FLASH_CFG_NSECS = 1, + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), + FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + + /* + * We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, +}; + +#undef FLASH_START +#undef FLASH_MAX_SIZE + +#endif /* __T4_HW_H */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h new file mode 100644 index 000000000..a6ddaa7b0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef T4_MSG_H +#define T4_MSG_H + +enum { + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_L2T_WRITE_REQ = 0x12, + CPL_SMT_WRITE_REQ = 0x14, + CPL_TID_RELEASE = 0x1A, + CPL_L2T_WRITE_RPL = 0x23, + CPL_ACT_OPEN_RPL = 0x25, + CPL_ABORT_RPL_RSS = 0x2D, + CPL_SMT_WRITE_RPL = 0x2E, + CPL_SET_TCB_RPL = 0x3A, + CPL_ACT_OPEN_REQ6 = 0x83, + CPL_SGE_EGR_UPDATE = 0xA5, + CPL_FW4_MSG = 0xC0, + CPL_FW6_MSG = 0xE0, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_TCPDDP = 5, +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, +}; + +union opcode_tid { + __be32 opcode_tid; + __u8 opcode; +}; + +#define S_CPL_OPCODE 24 +#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) + +#define G_TID(x) ((x) & 0xFFFFFF) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd)))) + +/* partitioning of TID fields that also carry a queue id */ +#define S_TID_TID 0 +#define M_TID_TID 0x3fff +#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID) + +#define S_TID_QID 14 +#define V_TID_QID(x) ((x) << S_TID_QID) + +struct rss_header { + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 channel:2; + __u8 filter_hit:1; + __u8 filter_tid:1; + __u8 hash_type:2; + __u8 ipv6:1; + __u8 send2fw:1; +#else + __u8 send2fw:1; + __u8 ipv6:1; + __u8 hash_type:2; + __u8 filter_tid:1; + __u8 filter_hit:1; + __u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW) +#define RSS_HDR struct rss_header rss_hdr +#else +#define RSS_HDR +#endif + +#ifndef CHELSIO_FW +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr +#define WR_HDR_SIZE sizeof(struct work_request_hdr) +#else +#define WR_HDR +#define WR_HDR_SIZE 0 +#endif + +#define S_COOKIE 5 +#define M_COOKIE 0x7 +#define V_COOKIE(x) ((x) << S_COOKIE) +#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE) + +/* option 0 fields */ +#define S_TX_CHAN 2 +#define V_TX_CHAN(x) ((x) << S_TX_CHAN) + +#define S_DELACK 5 +#define V_DELACK(x) ((x) << S_DELACK) + +#define S_NON_OFFLOAD 7 +#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD) +#define F_NON_OFFLOAD V_NON_OFFLOAD(1U) + +#define S_ULP_MODE 8 +#define V_ULP_MODE(x) ((x) << S_ULP_MODE) + +#define S_SMAC_SEL 28 +#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL) + +#define S_TCAM_BYPASS 48 +#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS) +#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL) + +#define S_L2T_IDX 36 +#define V_L2T_IDX(x) ((__u64)(x) << S_L2T_IDX) + +#define S_NAGLE 49 +#define V_NAGLE(x) ((__u64)(x) << S_NAGLE) + +/* option 2 fields */ +#define S_RSS_QUEUE 0 +#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) + +#define S_RSS_QUEUE_VALID 10 +#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID) +#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U) + +#define S_CONG_CNTRL 14 +#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) + +#define S_RX_CHANNEL 26 +#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL) +#define F_RX_CHANNEL V_RX_CHANNEL(1U) + +#define S_CCTRL_ECN 27 +#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN) + +#define S_SACK_EN 30 +#define V_SACK_EN(x) ((x) << S_SACK_EN) + +#define S_T5_OPT_2_VALID 31 +#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID) +#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U) + +struct cpl_t6_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + +struct cpl_t6_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + +#define S_FILTER_TUPLE 24 +#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) + +struct cpl_act_open_rpl { + RSS_HDR + union opcode_tid ot; + __be32 atid_status; +}; + +/* cpl_act_open_rpl.atid_status fields */ +#define S_AOPEN_STATUS 0 +#define M_AOPEN_STATUS 0xFF +#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS) + +#define S_AOPEN_ATID 8 +#define M_AOPEN_ATID 0xFFFFFF +#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID) + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; + __be64 mask; + __be64 val; +}; + +/* cpl_set_tcb_field.word_cookie fields */ +#define S_WORD 0 +#define V_WORD(x) ((x) << S_WORD) + +/* cpl_get_tcb.reply_ctrl fields */ +#define S_QUEUENO 0 +#define V_QUEUENO(x) ((x) << S_QUEUENO) + +#define S_REPLY_CHAN 14 +#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN) + +#define S_NO_REPLY 15 +#define V_NO_REPLY(x) ((x) << S_NO_REPLY) + +struct cpl_set_tcb_rpl { + RSS_HDR + union opcode_tid ot; + __be16 rsvd; + __u8 cookie; + __u8 status; + __be64 oldval; +}; + +/* cpl_abort_req status command code + */ +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + RSS_HDR + union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be32 flags; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; + __be16 pack; + __be16 len; + __be64 ctrl1; +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +/* cpl_tx_pkt_core.ctrl0 fields */ +#define S_TXPKT_PF 8 +#define M_TXPKT_PF 0x7 +#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF) +#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF) + +#define S_TXPKT_INTF 16 +#define M_TXPKT_INTF 0xF +#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) +#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) + +#define S_TXPKT_OPCODE 24 +#define M_TXPKT_OPCODE 0xFF +#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) +#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) + +/* cpl_tx_pkt_core.ctrl1 fields */ +#define S_TXPKT_IPHDR_LEN 20 +#define M_TXPKT_IPHDR_LEN 0x3FFF +#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN) +#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN) + +#define S_TXPKT_ETHHDR_LEN 34 +#define M_TXPKT_ETHHDR_LEN 0x3F +#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN) +#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN) + +#define S_T6_TXPKT_ETHHDR_LEN 32 +#define M_T6_TXPKT_ETHHDR_LEN 0xFF +#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN) +#define G_T6_TXPKT_ETHHDR_LEN(x) \ + (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN) + +#define S_TXPKT_CSUM_TYPE 40 +#define M_TXPKT_CSUM_TYPE 0xF +#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE) +#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE) + +#define S_TXPKT_VLAN 44 +#define M_TXPKT_VLAN 0xFFFF +#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN) +#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) + +#define S_TXPKT_VLAN_VLD 60 +#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD) +#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL) + +#define S_TXPKT_IPCSUM_DIS 62 +#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS) +#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL) + +#define S_TXPKT_L4CSUM_DIS 63 +#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS) +#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL) + +struct cpl_tx_pkt_lso_core { + __be32 lso_ctrl; + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +/* cpl_tx_pkt_lso_core.lso_ctrl fields */ +#define S_LSO_TCPHDR_LEN 0 +#define M_LSO_TCPHDR_LEN 0xF +#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN) +#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN) + +#define S_LSO_IPHDR_LEN 4 +#define M_LSO_IPHDR_LEN 0xFFF +#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN) +#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN) + +#define S_LSO_ETHHDR_LEN 16 +#define M_LSO_ETHHDR_LEN 0xF +#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN) +#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN) + +#define S_LSO_IPV6 20 +#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) +#define F_LSO_IPV6 V_LSO_IPV6(1U) + +#define S_LSO_LAST_SLICE 22 +#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE) +#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U) + +#define S_LSO_FIRST_SLICE 23 +#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE) +#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U) + +#define S_LSO_OPCODE 24 +#define M_LSO_OPCODE 0xFF +#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE) +#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE) + +#define S_LSO_T5_XFER_SIZE 0 +#define M_LSO_T5_XFER_SIZE 0xFFFFFFF +#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE) +#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE) + +struct cpl_rx_pkt { + RSS_HDR; + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 iff:4; + __u8 csum_calc:1; + __u8 ipmi_pkt:1; + __u8 vlan_ex:1; + __u8 ip_frag:1; +#else + __u8 ip_frag:1; + __u8 vlan_ex:1; + __u8 ipmi_pkt:1; + __u8 csum_calc:1; + __u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; + __be16 hdr_len; + __be16 err_vec; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; + __be16 l2t_idx; + __be16 vlan; + __u8 dst_mac[6]; +}; + +/* cpl_l2t_write_req.params fields */ +#define S_L2T_W_PORT 8 +#define V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT) + +#define S_L2T_W_LPBK 10 +#define V_L2T_W_LPBK(x) ((x) << S_L2T_W_LPBK) + +#define S_L2T_W_ARPMISS 11 +#define V_L2T_W_ARPMISS(x) ((x) << S_L2T_W_ARPMISS) + +#define S_L2T_W_NOREPLY 15 +#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY) + +struct cpl_l2t_write_rpl { + RSS_HDR + union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_smt_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __be16 pfvf1; + __u8 src_mac1[6]; + __be16 pfvf0; + __u8 src_mac0[6]; +}; + +struct cpl_t6_smt_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __be64 tag; + __be16 pfvf0; + __u8 src_mac0[6]; + __be32 local_ip; + __be32 rsvd; +}; + +struct cpl_smt_write_rpl { + RSS_HDR + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +/* cpl_smt_{read,write}_req.params fields */ +#define S_SMTW_OVLAN_IDX 16 +#define V_SMTW_OVLAN_IDX(x) ((x) << S_SMTW_OVLAN_IDX) + +#define S_SMTW_IDX 20 +#define V_SMTW_IDX(x) ((x) << S_SMTW_IDX) + +#define S_SMTW_NORPL 31 +#define V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL) + +/* rx_pkt.l2info fields */ +#define S_RXF_UDP 22 +#define V_RXF_UDP(x) ((x) << S_RXF_UDP) +#define F_RXF_UDP V_RXF_UDP(1U) + +#define S_RXF_TCP 23 +#define V_RXF_TCP(x) ((x) << S_RXF_TCP) +#define F_RXF_TCP V_RXF_TCP(1U) + +#define S_RXF_IP 24 +#define V_RXF_IP(x) ((x) << S_RXF_IP) +#define F_RXF_IP V_RXF_IP(1U) + +#define S_RXF_IP6 25 +#define V_RXF_IP6(x) ((x) << S_RXF_IP6) +#define F_RXF_IP6 V_RXF_IP6(1U) + +/* rx_pkt.err_vec fields */ +/* In T6, rx_pkt.err_vec indicates + * RxError Error vector (16b) or + * Encapsulating header length (8b), + * Outer encapsulation type (2b) and + * compressed error vector (6b) if CRxPktEnc is + * enabled in TP_OUT_CONFIG + */ +#define S_T6_COMPR_RXERR_VEC 0 +#define M_T6_COMPR_RXERR_VEC 0x3F +#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC) +#define G_T6_COMPR_RXERR_VEC(x) \ + (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC) + +/* cpl_fw*.type values */ +enum { + FW_TYPE_RSSCPL = 4, +}; + +struct cpl_fw4_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw6_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +/* ULP_TX opcodes */ +enum { + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +#define S_ULPTX_CMD 24 +#define M_ULPTX_CMD 0xFF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULP_TX_SC_MORE 23 +#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE) +#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U) + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; + __be32 len0; + __be64 addr0; + +#if !(defined C99_NOT_SUPPORTED) + struct ulptx_sge_pair sge[0]; +#endif + +}; + +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + +#define S_ULPTX_NSGE 0 +#define M_ULPTX_NSGE 0xFFFF +#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) + +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +/* ulp_txpkt.cmd_dest fields */ +#define S_ULP_TXPKT_DEST 16 +#define M_ULP_TXPKT_DEST 0x3 +#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST) + +#define S_ULP_TXPKT_FID 4 +#define M_ULP_TXPKT_FID 0x7ff +#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID) + +#define S_ULP_TXPKT_RO 3 +#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO) +#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U) + +#endif /* T4_MSG_H */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h new file mode 100644 index 000000000..f5f027a2e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2019 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* + * The Os-Dependent code can defined cpp macros for creating a PCI Device ID + * Table. This is useful because it allows the PCI ID Table to be maintained + * in a single place and all supporting OSes to get new PCI Device IDs + * automatically. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + * + * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional] + * -- If defined, indicates that the OS Driver has support for Bypass + * -- Adapters. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +/* + * Some sanity checks ... + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* + * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* + * T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ +#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ +#endif + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ + CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x509A), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509B), /* Custom T540-CR LOM */ + CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR SFP+ LOM */ + CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR SFP+ */ + CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T580-KR4 */ + CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */ + CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */ + CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */ + CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */ + + /* T6 adapter */ + CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6004), /* T6425-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */ + CH_PCI_ID_TABLE_FENTRY(0x6006), /* T62100-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6009), /* T6210-BT */ + CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x6015), /* T6201-BT */ + CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */ + CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ + CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ + CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h new file mode 100644 index 000000000..97cf49a48 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h @@ -0,0 +1,966 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define NUM_PCIE_MEM_ACCESS_INSTANCES 8 + +#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_PCIE_FW_INSTANCES 8 + +#define T5_MYPORT_BASE 0x2c000 +#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr)) + +#define T5_PORT0_BASE 0x30000 +#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr)) + +#define T5_PORT_STRIDE 0x4000 +#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE) +#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg)) + +#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 + +#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512 + +#define S_DATAPORTNUM 12 +#define M_DATAPORTNUM 0xfU +#define V_DATAPORTNUM(x) ((x) << S_DATAPORTNUM) + +#define S_DATALKPTYPE 10 +#define M_DATALKPTYPE 0x3U +#define V_DATALKPTYPE(x) ((x) << S_DATALKPTYPE) + +/* registers for module SGE */ +#define SGE_BASE_ADDR 0x1000 + +#define A_SGE_PF_KDOORBELL 0x0 +#define A_SGE_VF_KDOORBELL 0x0 + +#define S_QID 15 +#define M_QID 0x1ffffU +#define V_QID(x) ((x) << S_QID) +#define G_QID(x) (((x) >> S_QID) & M_QID) + +#define S_DBPRIO 14 +#define V_DBPRIO(x) ((x) << S_DBPRIO) +#define F_DBPRIO V_DBPRIO(1U) + +#define S_PIDX 0 +#define M_PIDX 0x3fffU +#define V_PIDX(x) ((x) << S_PIDX) +#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX) + +#define S_DBTYPE 13 +#define V_DBTYPE(x) ((x) << S_DBTYPE) +#define F_DBTYPE V_DBTYPE(1U) + +#define S_PIDX_T5 0 +#define M_PIDX_T5 0x1fffU +#define V_PIDX_T5(x) ((x) << S_PIDX_T5) +#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) + +#define A_SGE_PF_GTS 0x4 + +#define T4VF_SGE_BASE_ADDR 0x0000 +#define A_SGE_VF_GTS 0x4 + +#define S_INGRESSQID 16 +#define M_INGRESSQID 0xffffU +#define V_INGRESSQID(x) ((x) << S_INGRESSQID) +#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID) + +#define S_SEINTARM 12 +#define V_SEINTARM(x) ((x) << S_SEINTARM) +#define F_SEINTARM V_SEINTARM(1U) + +#define S_CIDXINC 0 +#define M_CIDXINC 0xfffU +#define V_CIDXINC(x) ((x) << S_CIDXINC) +#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC) + +#define A_SGE_CONTROL 0x1008 + +#define S_RXPKTCPLMODE 18 +#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE) +#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U) + +#define S_EGRSTATUSPAGESIZE 17 +#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE) +#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U) + +#define S_PKTSHIFT 10 +#define M_PKTSHIFT 0x7U +#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) +#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT) + +#define S_INGPADBOUNDARY 4 +#define M_INGPADBOUNDARY 0x7U +#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY) +#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY) + +#define A_SGE_HOST_PAGE_SIZE 0x100c + +#define S_HOSTPAGESIZEPF7 28 +#define M_HOSTPAGESIZEPF7 0xfU +#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7) +#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7) + +#define S_HOSTPAGESIZEPF6 24 +#define M_HOSTPAGESIZEPF6 0xfU +#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6) +#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6) + +#define S_HOSTPAGESIZEPF5 20 +#define M_HOSTPAGESIZEPF5 0xfU +#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5) +#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5) + +#define S_HOSTPAGESIZEPF4 16 +#define M_HOSTPAGESIZEPF4 0xfU +#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4) +#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4) + +#define S_HOSTPAGESIZEPF3 12 +#define M_HOSTPAGESIZEPF3 0xfU +#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3) +#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3) + +#define S_HOSTPAGESIZEPF2 8 +#define M_HOSTPAGESIZEPF2 0xfU +#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2) +#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2) + +#define S_HOSTPAGESIZEPF1 4 +#define M_HOSTPAGESIZEPF1 0xfU +#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1) +#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1) + +#define S_HOSTPAGESIZEPF0 0 +#define M_HOSTPAGESIZEPF0 0xfU +#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0) +#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0) + +#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 + +#define S_QUEUESPERPAGEPF1 4 +#define M_QUEUESPERPAGEPF1 0xfU +#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1) +#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1) + +#define S_QUEUESPERPAGEPF0 0 +#define M_QUEUESPERPAGEPF0 0xfU +#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0) +#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0) + +#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014 + +#define S_ERR_CPL_EXCEED_IQE_SIZE 22 +#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE) +#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U) + +#define S_ERR_INVALID_CIDX_INC 21 +#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC) +#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U) + +#define S_ERR_CPL_OPCODE_0 19 +#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0) +#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U) + +#define S_ERR_DROPPED_DB 18 +#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) +#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID1 17 +#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1) +#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID0 16 +#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0) +#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U) + +#define S_ERR_BAD_DB_PIDX3 15 +#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3) +#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U) + +#define S_ERR_BAD_DB_PIDX2 14 +#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2) +#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U) + +#define S_ERR_BAD_DB_PIDX1 13 +#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1) +#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U) + +#define S_ERR_BAD_DB_PIDX0 12 +#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0) +#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U) + +#define S_ERR_ING_PCIE_CHAN 11 +#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN) +#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U) + +#define S_ERR_ING_CTXT_PRIO 10 +#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO) +#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U) + +#define S_ERR_EGR_CTXT_PRIO 9 +#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO) +#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U) + +#define S_DBFIFO_HP_INT 8 +#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) +#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) + +#define S_DBFIFO_LP_INT 7 +#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) +#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) + +#define S_INGRESS_SIZE_ERR 5 +#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR) +#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U) + +#define S_EGRESS_SIZE_ERR 4 +#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR) +#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U) + +#define A_SGE_INT_ENABLE3 0x1040 + +#define A_SGE_FL_BUFFER_SIZE0 0x1044 +#define A_SGE_FL_BUFFER_SIZE1 0x1048 +#define A_SGE_FL_BUFFER_SIZE2 0x104c +#define A_SGE_FL_BUFFER_SIZE3 0x1050 + +#define A_SGE_FLM_CFG 0x1090 + +#define S_CREDITCNT 4 +#define M_CREDITCNT 0x3U +#define V_CREDITCNT(x) ((x) << S_CREDITCNT) +#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT) + +#define S_CREDITCNTPACKING 2 +#define M_CREDITCNTPACKING 0x3U +#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING) +#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING) + +#define A_SGE_CONM_CTRL 0x1094 + +#define S_T6_EGRTHRESHOLDPACKING 16 +#define M_T6_EGRTHRESHOLDPACKING 0xffU +#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \ + M_T6_EGRTHRESHOLDPACKING) + +#define S_EGRTHRESHOLD 8 +#define M_EGRTHRESHOLD 0x3fU +#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD) +#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD) + +#define S_EGRTHRESHOLDPACKING 14 +#define M_EGRTHRESHOLDPACKING 0x3fU +#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING) +#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \ + M_EGRTHRESHOLDPACKING) + +#define S_INGTHRESHOLD 2 +#define M_INGTHRESHOLD 0x3fU +#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD) +#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD) + +#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0 + +#define S_THRESHOLD_0 24 +#define M_THRESHOLD_0 0x3fU +#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0) +#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0) + +#define S_THRESHOLD_1 16 +#define M_THRESHOLD_1 0x3fU +#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1) +#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1) + +#define S_THRESHOLD_2 8 +#define M_THRESHOLD_2 0x3fU +#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2) +#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2) + +#define S_THRESHOLD_3 0 +#define M_THRESHOLD_3 0x3fU +#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3) +#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3) + +#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8 + +#define S_TIMERVALUE0 16 +#define M_TIMERVALUE0 0xffffU +#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0) +#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0) + +#define S_TIMERVALUE1 0 +#define M_TIMERVALUE1 0xffffU +#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1) +#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1) + +#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc + +#define S_TIMERVALUE2 16 +#define M_TIMERVALUE2 0xffffU +#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2) +#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2) + +#define S_TIMERVALUE3 0 +#define M_TIMERVALUE3 0xffffU +#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3) +#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3) + +#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0 + +#define S_TIMERVALUE4 16 +#define M_TIMERVALUE4 0xffffU +#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4) +#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4) + +#define S_TIMERVALUE5 0 +#define M_TIMERVALUE5 0xffffU +#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5) +#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5) + +#define A_SGE_DEBUG_INDEX 0x10cc +#define A_SGE_DEBUG_DATA_HIGH 0x10d0 +#define A_SGE_DEBUG_DATA_LOW 0x10d4 +#define A_SGE_STAT_CFG 0x10ec + +#define S_STATMODE 2 +#define M_STATMODE 0x3U +#define V_STATMODE(x) ((x) << S_STATMODE) +#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE) + +#define S_STATSOURCE_T5 9 +#define M_STATSOURCE_T5 0xfU +#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5) +#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) + +#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8 + +#define A_SGE_CONTROL2 0x1124 + +#define S_IDMAARBROUNDROBIN 19 +#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN) +#define F_IDMAARBROUNDROBIN V_IDMAARBROUNDROBIN(1U) + +#define S_INGPACKBOUNDARY 16 +#define M_INGPACKBOUNDARY 0x7U +#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY) +#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc + +/* registers for module PCIE */ +#define PCIE_BASE_ADDR 0x3000 + +#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068 + +#define S_PCIEOFST 10 +#define M_PCIEOFST 0x3fffffU +#define V_PCIEOFST(x) ((x) << S_PCIEOFST) +#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST) + +#define S_BIR 8 +#define M_BIR 0x3U +#define V_BIR(x) ((x) << S_BIR) +#define G_BIR(x) (((x) >> S_BIR) & M_BIR) + +#define S_WINDOW 0 +#define M_WINDOW 0xffU +#define V_WINDOW(x) ((x) << S_WINDOW) +#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW) + +#define A_PCIE_MEM_ACCESS_OFFSET 0x306c + +#define S_PFNUM 0 +#define M_PFNUM 0x7U +#define V_PFNUM(x) ((x) << S_PFNUM) +#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM) + +#define A_PCIE_FW 0x30b8 +#define A_PCIE_FW_PF 0x30bc + +#define A_PCIE_CFG2 0x3018 + +#define S_TOTMAXTAG 0 +#define M_TOTMAXTAG 0x3U +#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG) + +#define S_T6_TOTMAXTAG 0 +#define M_T6_TOTMAXTAG 0x7U +#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG) + +#define A_PCIE_CMD_CFG 0x5980 + +#define S_MINTAG 0 +#define M_MINTAG 0xffU +#define V_MINTAG(x) ((x) << S_MINTAG) + +#define S_T6_MINTAG 0 +#define M_T6_MINTAG 0xffU +#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG) + +/* registers for module CIM */ +#define CIM_BASE_ADDR 0x7b00 + +#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0 + +#define A_CIM_PF_MAILBOX_DATA 0x240 +#define A_CIM_PF_MAILBOX_CTRL 0x280 + +#define S_MBMSGVALID 3 +#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID) +#define F_MBMSGVALID V_MBMSGVALID(1U) + +#define S_MBOWNER 0 +#define M_MBOWNER 0x3U +#define V_MBOWNER(x) ((x) << S_MBOWNER) +#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER) + +#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290 +#define A_CIM_BOOT_CFG 0x7b00 + +#define S_UPCRST 0 +#define V_UPCRST(x) ((x) << S_UPCRST) +#define F_UPCRST V_UPCRST(1U) + +#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16 + +/* registers for module TP */ +#define A_TP_OUT_CONFIG 0x7d04 + +#define S_CRXPKTENC 3 +#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC) +#define F_CRXPKTENC V_CRXPKTENC(1U) + +#define TP_BASE_ADDR 0x7d00 +#define A_TP_CMM_TCB_BASE 0x7d10 + +#define A_TP_TIMER_RESOLUTION 0x7d90 + +#define S_TIMERRESOLUTION 16 +#define M_TIMERRESOLUTION 0xffU +#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) +#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION) + +#define S_DELAYEDACKRESOLUTION 0 +#define M_DELAYEDACKRESOLUTION 0xffU +#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) +#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \ + M_DELAYEDACKRESOLUTION) + +#define A_TP_CCTRL_TABLE 0x7ddc + +#define A_TP_MTU_TABLE 0x7de4 + +#define S_MTUINDEX 24 +#define M_MTUINDEX 0xffU +#define V_MTUINDEX(x) ((x) << S_MTUINDEX) +#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX) + +#define S_MTUWIDTH 16 +#define M_MTUWIDTH 0xfU +#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH) +#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH) + +#define S_MTUVALUE 0 +#define M_MTUVALUE 0x3fffU +#define V_MTUVALUE(x) ((x) << S_MTUVALUE) +#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE) + +#define A_TP_RSS_CONFIG_VRT 0x7e00 + +#define S_KEYMODE 6 +#define M_KEYMODE 0x3U +#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE) + +#define S_KEYWRADDR 0 +#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR) + +#define S_KEYWREN 4 +#define V_KEYWREN(x) ((x) << S_KEYWREN) +#define F_KEYWREN V_KEYWREN(1U) + +#define S_KEYWRADDRX 30 +#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX) + +#define S_KEYEXTEND 26 +#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND) +#define F_KEYEXTEND V_KEYEXTEND(1U) + +#define S_T6_VFWRADDR 8 +#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR) + +#define A_TP_PIO_ADDR 0x7e40 +#define A_TP_PIO_DATA 0x7e44 + +#define A_TP_RSS_SECRET_KEY0 0x40 + +#define A_TP_VLAN_PRI_MAP 0x140 + +#define S_FRAGMENTATION 9 +#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) +#define F_FRAGMENTATION V_FRAGMENTATION(1U) + +#define S_MPSHITTYPE 8 +#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) +#define F_MPSHITTYPE V_MPSHITTYPE(1U) + +#define S_MACMATCH 7 +#define V_MACMATCH(x) ((x) << S_MACMATCH) +#define F_MACMATCH V_MACMATCH(1U) + +#define S_ETHERTYPE 6 +#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) +#define F_ETHERTYPE V_ETHERTYPE(1U) + +#define S_PROTOCOL 5 +#define V_PROTOCOL(x) ((x) << S_PROTOCOL) +#define F_PROTOCOL V_PROTOCOL(1U) + +#define S_TOS 4 +#define V_TOS(x) ((x) << S_TOS) +#define F_TOS V_TOS(1U) + +#define S_VLAN 3 +#define V_VLAN(x) ((x) << S_VLAN) +#define F_VLAN V_VLAN(1U) + +#define S_VNIC_ID 2 +#define V_VNIC_ID(x) ((x) << S_VNIC_ID) +#define F_VNIC_ID V_VNIC_ID(1U) + +#define S_PORT 1 +#define V_PORT(x) ((x) << S_PORT) +#define F_PORT V_PORT(1U) + +#define S_FCOE 0 +#define V_FCOE(x) ((x) << S_FCOE) +#define F_FCOE V_FCOE(1U) + +#define A_TP_INGRESS_CONFIG 0x141 + +#define S_USE_ENC_IDX 13 +#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX) +#define F_USE_ENC_IDX V_USE_ENC_IDX(1U) + +#define S_VNIC 11 +#define V_VNIC(x) ((x) << S_VNIC) +#define F_VNIC V_VNIC(1U) + +#define S_CSUM_HAS_PSEUDO_HDR 10 +#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR) +#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U) + +#define S_RM_OVLAN 9 +#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN) + +/* registers for module MA */ +#define A_MA_EDRAM0_BAR 0x77c0 + +#define S_EDRAM0_SIZE 0 +#define M_EDRAM0_SIZE 0xfffU +#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE) +#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE) + +#define A_MA_EXT_MEMORY0_BAR 0x77c8 + +#define S_EXT_MEM0_SIZE 0 +#define M_EXT_MEM0_SIZE 0xfffU +#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE) +#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE) + +/* registers for module MPS */ +#define MPS_BASE_ADDR 0x9000 +#define T4VF_MPS_BASE_ADDR 0x0100 + +#define S_REPLICATE 11 +#define V_REPLICATE(x) ((x) << S_REPLICATE) +#define F_REPLICATE V_REPLICATE(1U) + +#define S_PF 8 +#define M_PF 0x7U +#define V_PF(x) ((x) << S_PF) +#define G_PF(x) (((x) >> S_PF) & M_PF) + +#define S_VF_VALID 7 +#define V_VF_VALID(x) ((x) << S_VF_VALID) +#define F_VF_VALID V_VF_VALID(1U) + +#define S_VF 0 +#define M_VF 0x7fU +#define V_VF(x) ((x) << S_VF) +#define G_VF(x) (((x) >> S_VF) & M_VF) + +#define A_MPS_STAT_CTL 0x9600 + +#define S_COUNTPAUSEMCRX 5 +#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX) +#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U) + +#define S_COUNTPAUSESTATRX 4 +#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX) +#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U) + +#define S_COUNTPAUSEMCTX 3 +#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX) +#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U) + +#define S_COUNTPAUSESTATTX 2 +#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX) +#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U) + +#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c +#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define A_MPS_CMN_CTL 0x9000 + +#define S_NUMPORTS 0 +#define M_NUMPORTS 0x3U +#define V_NUMPORTS(x) ((x) << S_NUMPORTS) +#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS) + +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc + +#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80 +#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88 +#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90 +#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98 +#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0 +#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8 +#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0 +#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0 +#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0 +#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0 +#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8 + +#define A_MPS_PORT0_RX_IVLAN 0x3011c + +#define S_IVLAN_ETYPE 0 +#define M_IVLAN_ETYPE 0xffffU +#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE) + +#define MPS_PORT_RX_IVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_IVLAN(idx) \ + (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE) + +#define A_MPS_PORT0_RX_OVLAN0 0x30120 + +#define S_OVLAN_MASK 16 +#define M_OVLAN_MASK 0xffffU +#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK) + +#define S_OVLAN_ETYPE 0 +#define M_OVLAN_ETYPE 0xffffU +#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE) + +#define MPS_PORT_RX_OVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_OVLAN_BASE(idx) \ +(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE) +#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg)) + +#define A_RX_OVLAN0 0x0 +#define A_RX_OVLAN1 0x4 +#define A_RX_OVLAN2 0x8 + +#define A_MPS_PORT0_RX_CTL 0x30100 + +#define S_OVLAN_EN0 0 +#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0) +#define F_OVLAN_EN0 V_OVLAN_EN0(1) + +#define S_OVLAN_EN1 1 +#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1) +#define F_OVLAN_EN1 V_OVLAN_EN1(1) + +#define S_OVLAN_EN2 2 +#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2) +#define F_OVLAN_EN2 V_OVLAN_EN2(1) + +#define S_IVLAN_EN 4 +#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN) +#define F_IVLAN_EN V_IVLAN_EN(1) + +#define MPS_PORT_RX_CTL_STRIDE 0x4000 +#define MPS_PORT_RX_CTL(idx) \ + (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE) + +/* registers for module ULP_RX */ +#define ULP_RX_BASE_ADDR 0x19150 + +#define S_HPZ0 0 +#define M_HPZ0 0xfU +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + +#define A_ULP_RX_TDDP_PSZ 0x19178 + +/* registers for module SF */ +#define SF_BASE_ADDR 0x193f8 + +#define A_SF_DATA 0x193f8 +#define A_SF_OP 0x193fc + +#define S_SF_LOCK 4 +#define V_SF_LOCK(x) ((x) << S_SF_LOCK) +#define F_SF_LOCK V_SF_LOCK(1U) + +#define S_CONT 3 +#define V_CONT(x) ((x) << S_CONT) +#define F_CONT V_CONT(1U) + +#define S_BYTECNT 1 +#define M_BYTECNT 0x3U +#define V_BYTECNT(x) ((x) << S_BYTECNT) +#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT) + +#define S_OP 0 +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +/* registers for module PL */ +#define PL_BASE_ADDR 0x19400 + +#define S_SOURCEPF 8 +#define M_SOURCEPF 0x7U +#define V_SOURCEPF(x) ((x) << S_SOURCEPF) +#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF) + +#define S_T6_SOURCEPF 9 +#define M_T6_SOURCEPF 0x7U +#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF) +#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF) + +#define A_PL_PF_INT_ENABLE 0x3c4 + +#define S_PFSW 3 +#define V_PFSW(x) ((x) << S_PFSW) +#define F_PFSW V_PFSW(1U) + +#define S_PFCIM 1 +#define V_PFCIM(x) ((x) << S_PFCIM) +#define F_PFCIM V_PFCIM(1U) + +#define A_PL_WHOAMI 0x19400 +#define A_PL_VF_WHOAMI 0x0 + +#define A_PL_RST 0x19428 + +#define A_PL_INT_MAP0 0x19414 + +#define S_PIORST 1 +#define V_PIORST(x) ((x) << S_PIORST) +#define F_PIORST V_PIORST(1U) + +#define S_PIORSTMODE 0 +#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE) +#define F_PIORSTMODE V_PIORSTMODE(1U) + +#define A_PL_REV 0x1943c +#define A_PL_VF_REV 0x4 + +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) + +/* registers for module LE */ +#define A_LE_DB_CONFIG 0x19c04 + +#define S_HASHEN 20 +#define V_HASHEN(x) ((x) << S_HASHEN) +#define F_HASHEN V_HASHEN(1U) + +#define A_LE_DB_TID_HASHBASE 0x19df8 + +#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac +#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0 diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h new file mode 100644 index 000000000..e3f549e51 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4_REGS_VALUES_H__ +#define __T4_REGS_VALUES_H__ + +/* + * This file contains definitions for various T4 register value hardware + * constants. The types of values encoded here are predominantly those for + * register fields which control "modal" behavior. For the most part, we do + * not include definitions for register fields which are simple numeric + * metrics, etc. + */ + +/* + * SGE definitions. + * ================ + */ + +/* + * SGE register field values. + */ + +/* CONTROL register */ +#define X_RXPKTCPLMODE_SPLIT 1 +#define X_INGPCIEBOUNDARY_32B 0 +#define X_INGPADBOUNDARY_SHIFT 5 +#define X_INGPADBOUNDARY_32B 0 + +#define X_T6_INGPADBOUNDARY_SHIFT 3 +#define X_T6_INGPADBOUNDARY_8B 0 + +/* CONTROL2 register */ +#define X_INGPACKBOUNDARY_SHIFT 5 +#define X_INGPACKBOUNDARY_16B 0 +#define X_INGPACKBOUNDARY_64B 1 + +/* GTS register */ +#define X_TIMERREG_RESTART_COUNTER 6 +#define X_TIMERREG_UPDATE_CIDX 7 + +/* + * Egress Context field values + */ +#define X_FETCHBURSTMIN_64B 2 +#define X_FETCHBURSTMIN_128B 3 +#define X_FETCHBURSTMAX_256B 2 +#define X_FETCHBURSTMAX_512B 3 + +#define X_HOSTFCMODE_NONE 0 + +/* + * Ingress Context field values + */ +#define X_UPDATEDELIVERY_STATUS_PAGE 2 + +#define X_RSPD_TYPE_FLBUF 0 +#define X_RSPD_TYPE_CPL 1 + +/* + * Context field definitions. This is by no means a complete list of SGE + * Context fields. In the vast majority of cases the firmware initializes + * things the way they need to be set up. But in a few small cases, we need + * to compute new values and ship them off to the firmware to be applied to + * the SGE Conexts ... + */ + +/* + * Congestion Manager Definitions. + */ +#define S_CONMCTXT_CNGTPMODE 19 +#define M_CONMCTXT_CNGTPMODE 0x3 +#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE) +#define G_CONMCTXT_CNGTPMODE(x) \ + (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE) +#define S_CONMCTXT_CNGCHMAP 0 +#define M_CONMCTXT_CNGCHMAP 0xffff +#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP) +#define G_CONMCTXT_CNGCHMAP(x) \ + (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP) + +#define X_CONMCTXT_CNGTPMODE_QUEUE 1 +#define X_CONMCTXT_CNGTPMODE_CHANNEL 2 + +/* + * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 + +/* + * CIM definitions. + * ================ + */ + +/* + * CIM register field values. + */ +#define X_MBOWNER_NONE 0 +#define X_MBOWNER_FW 1 +#define X_MBOWNER_PL 2 + +/* + * PCI-E definitions. + * ================== + */ +#define X_WINDOW_SHIFT 10 +#define X_PCIEOFST_SHIFT 10 + +/* + * TP definitions. + * =============== + */ + +/* + * TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define W_FT_FCOE 1 +#define W_FT_PORT 3 +#define W_FT_VNIC_ID 17 +#define W_FT_VLAN 17 +#define W_FT_TOS 8 +#define W_FT_PROTOCOL 8 +#define W_FT_ETHERTYPE 16 +#define W_FT_MACMATCH 9 +#define W_FT_MPSHITTYPE 3 +#define W_FT_FRAGMENTATION 1 + +/* + * Some of the Compressed Filter Tuple fields have internal structure. These + * bit shifts/masks describe those structures. All shifts are relative to the + * base position of the fields within the Compressed Filter Tuple + */ +#define S_FT_VLAN_VLD 16 +#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) +#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) + +#endif /* __T4_REGS_VALUES_H__ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h new file mode 100644 index 000000000..afd03b735 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _T4_TCB_DEFS_H +#define _T4_TCB_DEFS_H + +/* 31:24 */ +#define W_TCB_SMAC_SEL 0 +#define S_TCB_SMAC_SEL 24 +#define M_TCB_SMAC_SEL 0xffULL +#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL) + +/* 95:32 */ +#define W_TCB_T_FLAGS 1 + +/* 105:96 */ +#define W_TCB_RSS_INFO 3 +#define S_TCB_RSS_INFO 0 +#define M_TCB_RSS_INFO 0x3ffULL +#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO) + +/* 191:160 */ +#define W_TCB_TIMESTAMP 5 +#define S_TCB_TIMESTAMP 0 +#define M_TCB_TIMESTAMP 0xffffffffULL +#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP) + +/* 223:192 */ +#define W_TCB_T_RTT_TS_RECENT_AGE 6 +#define S_TCB_T_RTT_TS_RECENT_AGE 0 +#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL +#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE) + +/* 255:224 */ +#define S_TCB_T_RTSEQ_RECENT 0 +#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL +#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT) + +#define S_TF_CCTRL_ECE 60 + +#define S_TF_CCTRL_CWR 61 + +#define S_TF_CCTRL_RFR 62 + +#endif /* _T4_TCB_DEFS_H */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h new file mode 100644 index 000000000..0032178d0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h @@ -0,0 +1,2452 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +/****************************************************************************** + * R E T U R N V A L U E S + ********************************/ + +enum fw_retval { + FW_SUCCESS = 0, /* completed successfully */ + FW_EPERM = 1, /* operation not permitted */ + FW_ENOENT = 2, /* no such file or directory */ + FW_EIO = 5, /* input/output error; hw bad */ + FW_ENOEXEC = 8, /* exec format error; inv microcode */ + FW_EAGAIN = 11, /* try again */ + FW_ENOMEM = 12, /* out of memory */ + FW_EFAULT = 14, /* bad address; fw bad */ + FW_EBUSY = 16, /* resource busy */ + FW_EEXIST = 17, /* file exists */ + FW_ENODEV = 19, /* no such device */ + FW_EINVAL = 22, /* invalid argument */ + FW_ENOSPC = 28, /* no space left on device */ + FW_ENOSYS = 38, /* functionality not implemented */ + FW_ENODATA = 61, /* no data available */ + FW_EPROTO = 71, /* protocol error */ + FW_EADDRINUSE = 98, /* address already in use */ + FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ + FW_ENETDOWN = 100, /* network is down */ + FW_ENETUNREACH = 101, /* network is unreachable */ + FW_ENOBUFS = 105, /* no buffer space available */ + FW_ETIMEDOUT = 110, /* timeout */ + FW_EINPROGRESS = 115, /* fw internal */ +}; + +/****************************************************************************** + * M E M O R Y T Y P E s + ******************************/ + +enum fw_memtype { + FW_MEMTYPE_EDC0 = 0x0, + FW_MEMTYPE_EDC1 = 0x1, + FW_MEMTYPE_EXTMEM = 0x2, + FW_MEMTYPE_FLASH = 0x4, + FW_MEMTYPE_INTERNAL = 0x5, + FW_MEMTYPE_EXTMEM1 = 0x6, +}; + +/****************************************************************************** + * W O R K R E Q U E S T s + ********************************/ + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_ETH_TX_PKTS_WR = 0x09, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_ETH_TX_PKTS_VM_WR = 0x12, + FW_FILTER2_WR = 0x77, + FW_ETH_TX_PKTS2_WR = 0x78, +}; + +/* + * Generic work request header flit0 + */ +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +/* work request opcode (hi) + */ +#define S_FW_WR_OP 24 +#define M_FW_WR_OP 0xff +#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) +#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) + +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER + */ +#define S_FW_WR_ATOMIC 23 +#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC) + +/* work request immediate data length (hi) + */ +#define S_FW_WR_IMMDLEN 0 +#define M_FW_WR_IMMDLEN 0xff +#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN) +#define G_FW_WR_IMMDLEN(x) \ + (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN) + +/* egress queue status update to egress queue status entry (lo) + */ +#define S_FW_WR_EQUEQ 30 +#define M_FW_WR_EQUEQ 0x1 +#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ) +#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) +#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) + +/* flow context identifier (lo) + */ +#define S_FW_WR_FLOWID 8 +#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID) + +/* length in units of 16-bytes (lo) + */ +#define S_FW_WR_LEN16 0 +#define M_FW_WR_LEN16 0xff +#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16) +#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16) + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0 +#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff +#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN) +#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \ + (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN) + +struct fw_eth_tx_pkts_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 type; +}; + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +struct fw_eth_tx_pkts_vm_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 r4; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +/* filter wr reply code in cookie in CPL_SET_TCB_RPL */ +enum fw_filter_wr_cookie { + FW_FILTER_WR_SUCCESS, + FW_FILTER_WR_FLT_ADDED, + FW_FILTER_WR_FLT_DELETED, + FW_FILTER_WR_SMT_TBL_FULL, + FW_FILTER_WR_EINVAL, +}; + +struct fw_filter2_wr { + __be32 op_pkd; + __be32 len16_pkd; + __be64 r3; + __be32 tid_to_iq; + __be32 del_filter_to_l2tix; + __be16 ethtype; + __be16 ethtypem; + __u8 frag_to_ovlan_vldm; + __u8 smac_sel; + __be16 rx_chan_rx_rpl_iq; + __be32 maci_to_matchtypem; + __u8 ptcl; + __u8 ptclm; + __u8 ttyp; + __u8 ttypm; + __be16 ivlan; + __be16 ivlanm; + __be16 ovlan; + __be16 ovlanm; + __u8 lip[16]; + __u8 lipm[16]; + __u8 fip[16]; + __u8 fipm[16]; + __be16 lp; + __be16 lpm; + __be16 fp; + __be16 fpm; + __be16 r7; + __u8 sma[6]; + __be16 r8; + __u8 filter_type_swapmac; + __u8 natmode_to_ulp_type; + __be16 newlport; + __be16 newfport; + __u8 newlip[16]; + __u8 newfip[16]; + __be32 natseqcheck; + __be32 r9; + __be64 r10; + __be64 r11; + __be64 r12; + __be64 r13; +}; + +#define S_FW_FILTER_WR_TID 12 +#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) + +#define S_FW_FILTER_WR_RQTYPE 11 +#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) + +#define S_FW_FILTER_WR_NOREPLY 10 +#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) + +#define S_FW_FILTER_WR_IQ 0 +#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) + +#define S_FW_FILTER_WR_DEL_FILTER 31 +#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) +#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) + +#define S_FW_FILTER_WR_RPTTID 25 +#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) + +#define S_FW_FILTER_WR_DROP 24 +#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) + +#define S_FW_FILTER_WR_DIRSTEER 23 +#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) + +#define S_FW_FILTER_WR_MASKHASH 22 +#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) + +#define S_FW_FILTER_WR_DIRSTEERHASH 21 +#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) + +#define S_FW_FILTER_WR_LPBK 20 +#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) + +#define S_FW_FILTER_WR_DMAC 19 +#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) + +#define S_FW_FILTER_WR_SMAC 18 +#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) + +#define S_FW_FILTER_WR_INSVLAN 17 +#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) + +#define S_FW_FILTER_WR_RMVLAN 16 +#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) + +#define S_FW_FILTER_WR_HITCNTS 15 +#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) + +#define S_FW_FILTER_WR_TXCHAN 13 +#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) + +#define S_FW_FILTER_WR_PRIO 12 +#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) + +#define S_FW_FILTER_WR_L2TIX 0 +#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) + +#define S_FW_FILTER_WR_FRAG 7 +#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) + +#define S_FW_FILTER_WR_FRAGM 6 +#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) + +#define S_FW_FILTER_WR_IVLAN_VLD 5 +#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) + +#define S_FW_FILTER_WR_OVLAN_VLD 4 +#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) + +#define S_FW_FILTER_WR_IVLAN_VLDM 3 +#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) + +#define S_FW_FILTER_WR_OVLAN_VLDM 2 +#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) + +#define S_FW_FILTER_WR_RX_CHAN 15 +#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) + +#define S_FW_FILTER_WR_RX_RPL_IQ 0 +#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) + +#define S_FW_FILTER_WR_MACI 23 +#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) + +#define S_FW_FILTER_WR_MACIM 14 +#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) + +#define S_FW_FILTER_WR_FCOE 13 +#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) + +#define S_FW_FILTER_WR_FCOEM 12 +#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) + +#define S_FW_FILTER_WR_PORT 9 +#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) + +#define S_FW_FILTER_WR_PORTM 6 +#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) + +#define S_FW_FILTER_WR_MATCHTYPE 3 +#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) + +#define S_FW_FILTER_WR_MATCHTYPEM 0 +#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) + +#define S_FW_FILTER2_WR_SWAPMAC 0 +#define V_FW_FILTER2_WR_SWAPMAC(x) ((x) << S_FW_FILTER2_WR_SWAPMAC) + +#define S_FW_FILTER2_WR_NATMODE 5 +#define V_FW_FILTER2_WR_NATMODE(x) ((x) << S_FW_FILTER2_WR_NATMODE) + +#define S_FW_FILTER2_WR_ULP_TYPE 0 +#define V_FW_FILTER2_WR_ULP_TYPE(x) ((x) << S_FW_FILTER2_WR_ULP_TYPE) + +/****************************************************************************** + * C O M M A N D s + *********************/ + +/* + * The maximum length of time, in miliseconds, that we expect any firmware + * command to take to execute and return a reply to the host. The RESET + * and INITIALIZE commands can take a fair amount of time to execute but + * most execute in far less time than this maximum. This constant is used + * by host software to determine how long to wait for a firmware command + * reply before declaring the firmware as dead/unreachable ... + */ +#define FW_CMD_MAX_TIMEOUT 10000 + +/* + * If a host driver does a HELLO and discovers that there's already a MASTER + * selected, we may have to wait for that MASTER to finish issuing RESET, + * configuration and INITIALIZE commands. Also, there's a possibility that + * our own HELLO may get lost if it happens right as the MASTER is issuign a + * RESET command, so we need to be willing to make a few retries of our HELLO. + */ +#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) +#define FW_CMD_HELLO_RETRIES 3 + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_CLIP_CMD = 0x28, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PORT = 0x04, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define S_FW_CMD_OP 24 +#define M_FW_CMD_OP 0xff +#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) +#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP) + +#define S_FW_CMD_REQUEST 23 +#define M_FW_CMD_REQUEST 0x1 +#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) +#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST) +#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) + +#define S_FW_CMD_READ 22 +#define M_FW_CMD_READ 0x1 +#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ) +#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ) +#define F_FW_CMD_READ V_FW_CMD_READ(1U) + +#define S_FW_CMD_WRITE 21 +#define M_FW_CMD_WRITE 0x1 +#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) +#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE) +#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) + +#define S_FW_CMD_EXEC 20 +#define M_FW_CMD_EXEC 0x1 +#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC) +#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC) +#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U) + +#define S_FW_CMD_RETVAL 8 +#define M_FW_CMD_RETVAL 0xff +#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL) +#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL) + +#define S_FW_CMD_LEN16 0 +#define M_FW_CMD_LEN16 0xff +#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16) +#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16) + +#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) + +/* address spaces + */ +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_TP_PIO = 0x0010, +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_ctxtflush; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + __u8 access_ctl; + __u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + struct fw_ldst_pcie { + __u8 ctrl_to_fn; + __u8 bnum; + __u8 r; + __u8 ext_r; + __u8 select_naccess; + __u8 pcie_fn; + __be16 nset_pkd; + __be32 data[12]; + } pcie; + struct fw_ldst_i2c_deprecated { + __u8 pid_pkd; + __u8 base; + __u8 boffset; + __u8 data; + __be32 r9; + } i2c_deprecated; + struct fw_ldst_i2c { + __u8 pid; + __u8 did; + __u8 boffset; + __u8 blen; + __be32 r9; + __u8 data[48]; + } i2c; + struct fw_ldst_le { + __be32 index; + __be32 r9; + __u8 val[33]; + __u8 r11[7]; + } le; + } u; +}; + +#define S_FW_LDST_CMD_ADDRSPACE 0 +#define M_FW_LDST_CMD_ADDRSPACE 0xff +#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 halt_pkd; +}; + +#define S_FW_RESET_CMD_HALT 31 +#define M_FW_RESET_CMD_HALT 0x1 +#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT) +#define G_FW_RESET_CMD_HALT(x) \ + (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT) +#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U) + +enum { + FW_HELLO_CMD_STAGE_OS = 0, +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_clearinit; + __be32 fwrev; +}; + +#define S_FW_HELLO_CMD_ERR 31 +#define M_FW_HELLO_CMD_ERR 0x1 +#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR) +#define G_FW_HELLO_CMD_ERR(x) \ + (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR) +#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U) + +#define S_FW_HELLO_CMD_INIT 30 +#define M_FW_HELLO_CMD_INIT 0x1 +#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT) +#define G_FW_HELLO_CMD_INIT(x) \ + (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT) +#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U) + +#define S_FW_HELLO_CMD_MASTERDIS 29 +#define M_FW_HELLO_CMD_MASTERDIS 0x1 +#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS) +#define G_FW_HELLO_CMD_MASTERDIS(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS) +#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U) + +#define S_FW_HELLO_CMD_MASTERFORCE 28 +#define M_FW_HELLO_CMD_MASTERFORCE 0x1 +#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE) +#define G_FW_HELLO_CMD_MASTERFORCE(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE) +#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U) + +#define S_FW_HELLO_CMD_MBMASTER 24 +#define M_FW_HELLO_CMD_MBMASTER 0xf +#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER) +#define G_FW_HELLO_CMD_MBMASTER(x) \ + (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER) + +#define S_FW_HELLO_CMD_MBASYNCNOT 20 +#define M_FW_HELLO_CMD_MBASYNCNOT 0x7 +#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT) +#define G_FW_HELLO_CMD_MBASYNCNOT(x) \ + (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT) + +#define S_FW_HELLO_CMD_STAGE 17 +#define M_FW_HELLO_CMD_STAGE 0x7 +#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE) +#define G_FW_HELLO_CMD_STAGE(x) \ + (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE) + +#define S_FW_HELLO_CMD_CLEARINIT 16 +#define M_FW_HELLO_CMD_CLEARINIT 0x1 +#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT) +#define G_FW_HELLO_CMD_CLEARINIT(x) \ + (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT) +#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U) + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, + FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, +}; + +enum fw_memtype_cf { + FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 cfvalid_to_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 toecaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 cfcsum; + __be32 finiver; + __be32 finicsum; +}; + +#define S_FW_CAPS_CONFIG_CMD_CFVALID 27 +#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1 +#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID) +#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID) +#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U) + +#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24 +#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7 +#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) + +#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16 +#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff +#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ +}; + +/* + * device parameters + */ + +#define S_FW_PARAMS_PARAM_FILTER_MODE 16 +#define M_FW_PARAMS_PARAM_FILTER_MODE 0xffff +#define V_FW_PARAMS_PARAM_FILTER_MODE(x) \ + ((x) << S_FW_PARAMS_PARAM_FILTER_MODE) +#define G_FW_PARAMS_PARAM_FILTER_MODE(x) \ + (((x) >> S_FW_PARAMS_PARAM_FILTER_MODE) & \ + M_FW_PARAMS_PARAM_FILTER_MODE) + +#define S_FW_PARAMS_PARAM_FILTER_MASK 0 +#define M_FW_PARAMS_PARAM_FILTER_MASK 0xffff +#define V_FW_PARAMS_PARAM_FILTER_MASK(x) \ + ((x) << S_FW_PARAMS_PARAM_FILTER_MASK) +#define G_FW_PARAMS_PARAM_FILTER_MASK(x) \ + (((x) >> S_FW_PARAMS_PARAM_FILTER_MASK) & \ + M_FW_PARAMS_PARAM_FILTER_MASK) + +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */ + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */ + FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, + FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, + FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, + FW_PARAMS_PARAM_DEV_FILTER = 0x2E, +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, + FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, + FW_PARAMS_PARAM_PFVF_MAX_PKTS_PER_ETH_TX_PKTS_WR = 0x3D, + FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E, + FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F, +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, +}; + +enum fw_params_param_dev_filter { + FW_PARAM_DEV_FILTER_VNIC_MODE = 0x00, + FW_PARAM_DEV_FILTER_MODE_MASK = 0x01, +}; + +#define S_FW_PARAMS_MNEM 24 +#define M_FW_PARAMS_MNEM 0xff +#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM) +#define G_FW_PARAMS_MNEM(x) \ + (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM) + +#define S_FW_PARAMS_PARAM_X 16 +#define M_FW_PARAMS_PARAM_X 0xff +#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X) +#define G_FW_PARAMS_PARAM_X(x) \ + (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X) + +#define S_FW_PARAMS_PARAM_Y 8 +#define M_FW_PARAMS_PARAM_Y 0xff +#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y) +#define G_FW_PARAMS_PARAM_Y(x) \ + (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y) + +#define S_FW_PARAMS_PARAM_Z 0 +#define M_FW_PARAMS_PARAM_Z 0xff +#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z) +#define G_FW_PARAMS_PARAM_Z(x) \ + (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z) + +#define S_FW_PARAMS_PARAM_YZ 0 +#define M_FW_PARAMS_PARAM_YZ 0xffff +#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ) +#define G_FW_PARAMS_PARAM_YZ(x) \ + (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) + +#define S_FW_PARAMS_PARAM_XYZ 0 +#define M_FW_PARAMS_PARAM_XYZ 0xffffff +#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define S_FW_PARAMS_CMD_PFN 8 +#define M_FW_PARAMS_CMD_PFN 0x7 +#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN) +#define G_FW_PARAMS_CMD_PFN(x) \ + (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN) + +#define S_FW_PARAMS_CMD_VFN 0 +#define M_FW_PARAMS_CMD_VFN 0xff +#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN) +#define G_FW_PARAMS_CMD_VFN(x) \ + (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 type_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define S_FW_PFVF_CMD_PFN 8 +#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN) + +#define S_FW_PFVF_CMD_VFN 0 +#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN) + +#define S_FW_PFVF_CMD_NIQFLINT 20 +#define M_FW_PFVF_CMD_NIQFLINT 0xfff +#define G_FW_PFVF_CMD_NIQFLINT(x) \ + (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT) + +#define S_FW_PFVF_CMD_NIQ 0 +#define M_FW_PFVF_CMD_NIQ 0xfffff +#define G_FW_PFVF_CMD_NIQ(x) \ + (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ) + +#define S_FW_PFVF_CMD_PMASK 20 +#define M_FW_PFVF_CMD_PMASK 0xf +#define G_FW_PFVF_CMD_PMASK(x) \ + (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK) + +#define S_FW_PFVF_CMD_NEQ 0 +#define M_FW_PFVF_CMD_NEQ 0xfffff +#define G_FW_PFVF_CMD_NEQ(x) \ + (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ) + +#define S_FW_PFVF_CMD_TC 24 +#define M_FW_PFVF_CMD_TC 0xff +#define G_FW_PFVF_CMD_TC(x) \ + (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC) + +#define S_FW_PFVF_CMD_NVI 16 +#define M_FW_PFVF_CMD_NVI 0xff +#define G_FW_PFVF_CMD_NVI(x) \ + (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI) + +#define S_FW_PFVF_CMD_NEXACTF 0 +#define M_FW_PFVF_CMD_NEXACTF 0xffff +#define G_FW_PFVF_CMD_NEXACTF(x) \ + (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF) + +#define S_FW_PFVF_CMD_R_CAPS 24 +#define M_FW_PFVF_CMD_R_CAPS 0xff +#define G_FW_PFVF_CMD_R_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS) + +#define S_FW_PFVF_CMD_WX_CAPS 16 +#define M_FW_PFVF_CMD_WX_CAPS 0xff +#define G_FW_PFVF_CMD_WX_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS) + +#define S_FW_PFVF_CMD_NETHCTRL 0 +#define M_FW_PFVF_CMD_NETHCTRL 0xffff +#define G_FW_PFVF_CMD_NETHCTRL(x) \ + (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL) + +/* + * ingress queue type; the first 1K ingress queues can have associated 0, + * 1 or 2 free lists and an interrupt, all other ingress queues lack these + * capabilities + */ +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, +}; + +enum fw_iq_iqtype { + FW_IQ_IQTYPE_NIC = 1, + FW_IQ_IQTYPE_OFLD, +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define S_FW_IQ_CMD_PFN 8 +#define M_FW_IQ_CMD_PFN 0x7 +#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN) +#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN) + +#define S_FW_IQ_CMD_VFN 0 +#define M_FW_IQ_CMD_VFN 0xff +#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN) +#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN) + +#define S_FW_IQ_CMD_ALLOC 31 +#define M_FW_IQ_CMD_ALLOC 0x1 +#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC) +#define G_FW_IQ_CMD_ALLOC(x) \ + (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC) +#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U) + +#define S_FW_IQ_CMD_FREE 30 +#define M_FW_IQ_CMD_FREE 0x1 +#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE) +#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE) +#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U) + +#define S_FW_IQ_CMD_IQSTART 28 +#define M_FW_IQ_CMD_IQSTART 0x1 +#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART) +#define G_FW_IQ_CMD_IQSTART(x) \ + (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART) +#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U) + +#define S_FW_IQ_CMD_IQSTOP 27 +#define M_FW_IQ_CMD_IQSTOP 0x1 +#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP) +#define G_FW_IQ_CMD_IQSTOP(x) \ + (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP) +#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U) + +#define S_FW_IQ_CMD_TYPE 29 +#define M_FW_IQ_CMD_TYPE 0x7 +#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE) +#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE) + +#define S_FW_IQ_CMD_IQASYNCH 28 +#define M_FW_IQ_CMD_IQASYNCH 0x1 +#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH) +#define G_FW_IQ_CMD_IQASYNCH(x) \ + (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH) +#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U) + +#define S_FW_IQ_CMD_VIID 16 +#define M_FW_IQ_CMD_VIID 0xfff +#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID) +#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID) + +#define S_FW_IQ_CMD_IQANDST 15 +#define M_FW_IQ_CMD_IQANDST 0x1 +#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST) +#define G_FW_IQ_CMD_IQANDST(x) \ + (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST) +#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U) + +#define S_FW_IQ_CMD_IQANUD 12 +#define M_FW_IQ_CMD_IQANUD 0x3 +#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD) +#define G_FW_IQ_CMD_IQANUD(x) \ + (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD) + +#define S_FW_IQ_CMD_IQANDSTINDEX 0 +#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff +#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX) +#define G_FW_IQ_CMD_IQANDSTINDEX(x) \ + (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX) + +#define S_FW_IQ_CMD_IQGTSMODE 14 +#define M_FW_IQ_CMD_IQGTSMODE 0x1 +#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE) +#define G_FW_IQ_CMD_IQGTSMODE(x) \ + (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE) +#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U) + +#define S_FW_IQ_CMD_IQPCIECH 12 +#define M_FW_IQ_CMD_IQPCIECH 0x3 +#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH) +#define G_FW_IQ_CMD_IQPCIECH(x) \ + (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH) + +#define S_FW_IQ_CMD_IQINTCNTTHRESH 4 +#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3 +#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH) +#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \ + (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH) + +#define S_FW_IQ_CMD_IQESIZE 0 +#define M_FW_IQ_CMD_IQESIZE 0x3 +#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE) +#define G_FW_IQ_CMD_IQESIZE(x) \ + (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE) + +#define S_FW_IQ_CMD_IQRO 30 +#define M_FW_IQ_CMD_IQRO 0x1 +#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO) +#define G_FW_IQ_CMD_IQRO(x) \ + (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO) +#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U) + +#define S_FW_IQ_CMD_IQFLINTCONGEN 27 +#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1 +#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN) +#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \ + (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) +#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) + +#define S_FW_IQ_CMD_IQTYPE 24 +#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE) + +#define S_FW_IQ_CMD_FL0CNGCHMAP 20 +#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf +#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) +#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \ + (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP) + +#define S_FW_IQ_CMD_FL0DATARO 12 +#define M_FW_IQ_CMD_FL0DATARO 0x1 +#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO) +#define G_FW_IQ_CMD_FL0DATARO(x) \ + (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO) +#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U) + +#define S_FW_IQ_CMD_FL0CONGCIF 11 +#define M_FW_IQ_CMD_FL0CONGCIF 0x1 +#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF) +#define G_FW_IQ_CMD_FL0CONGCIF(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF) +#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U) + +#define S_FW_IQ_CMD_FL0FETCHRO 6 +#define M_FW_IQ_CMD_FL0FETCHRO 0x1 +#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO) +#define G_FW_IQ_CMD_FL0FETCHRO(x) \ + (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO) +#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U) + +#define S_FW_IQ_CMD_FL0HOSTFCMODE 4 +#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3 +#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE) +#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \ + (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE) + +#define S_FW_IQ_CMD_FL0PADEN 2 +#define M_FW_IQ_CMD_FL0PADEN 0x1 +#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN) +#define G_FW_IQ_CMD_FL0PADEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN) +#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U) + +#define S_FW_IQ_CMD_FL0PACKEN 1 +#define M_FW_IQ_CMD_FL0PACKEN 0x1 +#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN) +#define G_FW_IQ_CMD_FL0PACKEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN) +#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U) + +#define S_FW_IQ_CMD_FL0CONGEN 0 +#define M_FW_IQ_CMD_FL0CONGEN 0x1 +#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN) +#define G_FW_IQ_CMD_FL0CONGEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN) +#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U) + +#define S_FW_IQ_CMD_FL0FBMIN 7 +#define M_FW_IQ_CMD_FL0FBMIN 0x7 +#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN) +#define G_FW_IQ_CMD_FL0FBMIN(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN) + +#define S_FW_IQ_CMD_FL0FBMAX 4 +#define M_FW_IQ_CMD_FL0FBMAX 0x7 +#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX) +#define G_FW_IQ_CMD_FL0FBMAX(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 autoequiqe_to_viid; + __be32 r8_lo; + __be64 r9; +}; + +#define S_FW_EQ_ETH_CMD_PFN 8 +#define M_FW_EQ_ETH_CMD_PFN 0x7 +#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN) +#define G_FW_EQ_ETH_CMD_PFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN) + +#define S_FW_EQ_ETH_CMD_VFN 0 +#define M_FW_EQ_ETH_CMD_VFN 0xff +#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN) +#define G_FW_EQ_ETH_CMD_VFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN) + +#define S_FW_EQ_ETH_CMD_ALLOC 31 +#define M_FW_EQ_ETH_CMD_ALLOC 0x1 +#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC) +#define G_FW_EQ_ETH_CMD_ALLOC(x) \ + (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC) +#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U) + +#define S_FW_EQ_ETH_CMD_FREE 30 +#define M_FW_EQ_ETH_CMD_FREE 0x1 +#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE) +#define G_FW_EQ_ETH_CMD_FREE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE) +#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U) + +#define S_FW_EQ_ETH_CMD_EQSTART 28 +#define M_FW_EQ_ETH_CMD_EQSTART 0x1 +#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART) +#define G_FW_EQ_ETH_CMD_EQSTART(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART) +#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U) + +#define S_FW_EQ_ETH_CMD_EQID 0 +#define M_FW_EQ_ETH_CMD_EQID 0xfffff +#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID) +#define G_FW_EQ_ETH_CMD_EQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) + +#define S_FW_EQ_ETH_CMD_PHYSEQID 0 +#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff +#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID) + +#define S_FW_EQ_ETH_CMD_FETCHRO 22 +#define M_FW_EQ_ETH_CMD_FETCHRO 0x1 +#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) +#define G_FW_EQ_ETH_CMD_FETCHRO(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO) +#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U) + +#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20 +#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE) +#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE) + +#define S_FW_EQ_ETH_CMD_PCIECHN 16 +#define M_FW_EQ_ETH_CMD_PCIECHN 0x3 +#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN) +#define G_FW_EQ_ETH_CMD_PCIECHN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN) + +#define S_FW_EQ_ETH_CMD_IQID 0 +#define M_FW_EQ_ETH_CMD_IQID 0xffff +#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID) +#define G_FW_EQ_ETH_CMD_IQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID) + +#define S_FW_EQ_ETH_CMD_FBMIN 23 +#define M_FW_EQ_ETH_CMD_FBMIN 0x7 +#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN) +#define G_FW_EQ_ETH_CMD_FBMIN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN) + +#define S_FW_EQ_ETH_CMD_FBMAX 20 +#define M_FW_EQ_ETH_CMD_FBMAX 0x7 +#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX) +#define G_FW_EQ_ETH_CMD_FBMAX(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX) + +#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16 +#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7 +#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH) +#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \ + (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH) + +#define S_FW_EQ_ETH_CMD_EQSIZE 0 +#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff +#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE) +#define G_FW_EQ_ETH_CMD_EQSIZE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE) + +#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30 +#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1 +#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U) + +#define S_FW_EQ_ETH_CMD_VIID 16 +#define M_FW_EQ_ETH_CMD_VIID 0xfff +#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID) +#define G_FW_EQ_ETH_CMD_VIID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define S_FW_EQ_CTRL_CMD_PFN 8 +#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN) + +#define S_FW_EQ_CTRL_CMD_VFN 0 +#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN) + +#define S_FW_EQ_CTRL_CMD_ALLOC 31 +#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC) +#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U) + +#define S_FW_EQ_CTRL_CMD_FREE 30 +#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE) +#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U) + +#define S_FW_EQ_CTRL_CMD_EQSTART 28 +#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART) +#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U) + +#define S_FW_EQ_CTRL_CMD_CMPLIQID 20 +#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID) + +#define S_FW_EQ_CTRL_CMD_EQID 0 +#define M_FW_EQ_CTRL_CMD_EQID 0xfffff +#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID) +#define G_FW_EQ_CTRL_CMD_EQID(x) \ + (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID) + +#define S_FW_EQ_CTRL_CMD_PHYSEQID 0 +#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff +#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID) +#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \ + (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID) + +#define S_FW_EQ_CTRL_CMD_FETCHRO 22 +#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO) +#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U) + +#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20 +#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE) + +#define S_FW_EQ_CTRL_CMD_PCIECHN 16 +#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN) + +#define S_FW_EQ_CTRL_CMD_IQID 0 +#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID) + +#define S_FW_EQ_CTRL_CMD_FBMIN 23 +#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN) + +#define S_FW_EQ_CTRL_CMD_FBMAX 20 +#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX) + +#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16 +#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH) + +#define S_FW_EQ_CTRL_CMD_EQSIZE 0 +#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE) + +enum fw_vi_func { + FW_VI_FUNC_ETH, +}; + +/* Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ + +#define S_FW_VIID_VIVLD 7 +#define M_FW_VIID_VIVLD 0x1 +#define G_FW_VIID_VIVLD(x) (((x) >> S_FW_VIID_VIVLD) & M_FW_VIID_VIVLD) + +#define S_FW_VIID_VIN 0 +#define M_FW_VIID_VIN 0x7F +#define G_FW_VIID_VIN(x) (((x) >> S_FW_VIID_VIN) & M_FW_VIID_VIN) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 type_to_viid; + __u8 mac[6]; + __u8 portid_pkd; + __u8 nmac; + __u8 nmac0[6]; + __be16 norss_rsssize; + __u8 nmac1[6]; + __be16 idsiiq_pkd; + __u8 nmac2[6]; + __be16 idseiq_pkd; + __u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define S_FW_VI_CMD_PFN 8 +#define M_FW_VI_CMD_PFN 0x7 +#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN) +#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN) + +#define S_FW_VI_CMD_VFN 0 +#define M_FW_VI_CMD_VFN 0xff +#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN) +#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN) + +#define S_FW_VI_CMD_ALLOC 31 +#define M_FW_VI_CMD_ALLOC 0x1 +#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC) +#define G_FW_VI_CMD_ALLOC(x) \ + (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC) +#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U) + +#define S_FW_VI_CMD_FREE 30 +#define M_FW_VI_CMD_FREE 0x1 +#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE) +#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE) +#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U) + +#define S_FW_VI_CMD_VFVLD 24 +#define M_FW_VI_CMD_VFVLD 0x1 +#define G_FW_VI_CMD_VFVLD(x) \ + (((x) >> S_FW_VI_CMD_VFVLD) & M_FW_VI_CMD_VFVLD) + +#define S_FW_VI_CMD_VIN 16 +#define M_FW_VI_CMD_VIN 0xff +#define G_FW_VI_CMD_VIN(x) \ + (((x) >> S_FW_VI_CMD_VIN) & M_FW_VI_CMD_VIN) + +#define S_FW_VI_CMD_TYPE 15 +#define M_FW_VI_CMD_TYPE 0x1 +#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE) +#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE) +#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U) + +#define S_FW_VI_CMD_FUNC 12 +#define M_FW_VI_CMD_FUNC 0x7 +#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC) +#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC) + +#define S_FW_VI_CMD_VIID 0 +#define M_FW_VI_CMD_VIID 0xfff +#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID) +#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID) + +#define S_FW_VI_CMD_PORTID 4 +#define M_FW_VI_CMD_PORTID 0xf +#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID) +#define G_FW_VI_CMD_PORTID(x) \ + (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID) + +#define S_FW_VI_CMD_RSSSIZE 0 +#define M_FW_VI_CMD_RSSSIZE 0x7ff +#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE) +#define G_FW_VI_CMD_RSSSIZE(x) \ + (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_ID_BASED_FREE 0x3FC + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY = 0x0, + FW_VI_MAC_SMT_AND_MPSTCAM = 0x3 +}; + +enum fw_vi_mac_entry_types { + FW_VI_MAC_TYPE_RAW = 0x2, +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + __u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + struct fw_vi_mac_raw { + __be32 raw_idx_pkd; + __be32 data0_pkd; + __be32 data1[2]; + __be64 data0m_pkd; + __be32 data1m[2]; + } raw; + } u; +}; + +#define S_FW_VI_MAC_CMD_VIID 0 +#define M_FW_VI_MAC_CMD_VIID 0xfff +#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID) +#define G_FW_VI_MAC_CMD_VIID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID) + +#define S_FW_VI_MAC_CMD_FREEMACS 31 +#define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS) + +#define S_FW_VI_MAC_CMD_ENTRY_TYPE 23 +#define V_FW_VI_MAC_CMD_ENTRY_TYPE(x) ((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE) + +#define S_FW_VI_MAC_CMD_VALID 15 +#define M_FW_VI_MAC_CMD_VALID 0x1 +#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID) +#define G_FW_VI_MAC_CMD_VALID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID) +#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U) + +#define S_FW_VI_MAC_CMD_SMAC_RESULT 10 +#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3 +#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT) +#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \ + (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT) + +#define S_FW_VI_MAC_CMD_IDX 0 +#define M_FW_VI_MAC_CMD_IDX 0x3ff +#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX) +#define G_FW_VI_MAC_CMD_IDX(x) \ + (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX) + +#define S_FW_VI_MAC_CMD_RAW_IDX 16 +#define M_FW_VI_MAC_CMD_RAW_IDX 0xffff +#define V_FW_VI_MAC_CMD_RAW_IDX(x) ((x) << S_FW_VI_MAC_CMD_RAW_IDX) +#define G_FW_VI_MAC_CMD_RAW_IDX(x) \ + (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX) + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_vlanexen; + __be32 r4_lo; +}; + +#define S_FW_VI_RXMODE_CMD_VIID 0 +#define M_FW_VI_RXMODE_CMD_VIID 0xfff +#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID) +#define G_FW_VI_RXMODE_CMD_VIID(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID) + +#define S_FW_VI_RXMODE_CMD_MTU 16 +#define M_FW_VI_RXMODE_CMD_MTU 0xffff +#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU) +#define G_FW_VI_RXMODE_CMD_MTU(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU) + +#define S_FW_VI_RXMODE_CMD_PROMISCEN 14 +#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3 +#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN) +#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN) + +#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12 +#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3 +#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN) +#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN) + +#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10 +#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3 +#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN) +#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \ + M_FW_VI_RXMODE_CMD_BROADCASTEN) + +#define S_FW_VI_RXMODE_CMD_VLANEXEN 8 +#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3 +#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN) +#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define S_FW_VI_ENABLE_CMD_VIID 0 +#define M_FW_VI_ENABLE_CMD_VIID 0xfff +#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID) +#define G_FW_VI_ENABLE_CMD_VIID(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID) + +#define S_FW_VI_ENABLE_CMD_IEN 31 +#define M_FW_VI_ENABLE_CMD_IEN 0x1 +#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN) +#define G_FW_VI_ENABLE_CMD_IEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN) +#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U) + +#define S_FW_VI_ENABLE_CMD_EEN 30 +#define M_FW_VI_ENABLE_CMD_EEN 0x1 +#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN) +#define G_FW_VI_ENABLE_CMD_EEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN) +#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U) + +#define S_FW_VI_ENABLE_CMD_DCB_INFO 28 +#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1 +#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO) +#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) +#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define S_FW_VI_STATS_CMD_VIID 0 +#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID) + +#define S_FW_VI_STATS_CMD_NSTATS 12 +#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS) + +#define S_FW_VI_STATS_CMD_IX 0 +#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX) + +/* old 16-bit port capabilities bitmap */ +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_25G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDIX = 0x0200, + FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_FEC_RS = 0x0800, + FW_PORT_CAP_FEC_BASER_RS = 0x1000, + FW_PORT_CAP_FEC_RESERVED = 0x2000, + FW_PORT_CAP_802_3_PAUSE = 0x4000, + FW_PORT_CAP_802_3_ASM_DIR = 0x8000, +}; + +#define S_FW_PORT_CAP_SPEED 0 +#define M_FW_PORT_CAP_SPEED 0x3f +#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED) +#define G_FW_PORT_CAP_SPEED(x) \ + (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED) + +enum fw_port_mdi { + FW_PORT_CAP_MDI_AUTO, +}; + +#define S_FW_PORT_CAP_MDI 9 +#define M_FW_PORT_CAP_MDI 3 +#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) +#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) + +/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ +#define FW_PORT_CAP32_SPEED_100M 0x00000001UL +#define FW_PORT_CAP32_SPEED_1G 0x00000002UL +#define FW_PORT_CAP32_SPEED_10G 0x00000004UL +#define FW_PORT_CAP32_SPEED_25G 0x00000008UL +#define FW_PORT_CAP32_SPEED_40G 0x00000010UL +#define FW_PORT_CAP32_SPEED_50G 0x00000020UL +#define FW_PORT_CAP32_SPEED_100G 0x00000040UL +#define FW_PORT_CAP32_FC_RX 0x00010000UL +#define FW_PORT_CAP32_FC_TX 0x00020000UL +#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL +#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL +#define FW_PORT_CAP32_ANEG 0x00100000UL +#define FW_PORT_CAP32_MDIX 0x00200000UL +#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_FEC_RS 0x00800000UL +#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL + +#define S_FW_PORT_CAP32_SPEED 0 +#define M_FW_PORT_CAP32_SPEED 0xfff +#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED) +#define G_FW_PORT_CAP32_SPEED(x) \ + (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED) + +enum fw_port_mdi32 { + FW_PORT_CAP32_MDI_AUTO, +}; + +#define S_FW_PORT_CAP32_MDI 21 +#define M_FW_PORT_CAP32_MDI 3 +#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI) +#define G_FW_PORT_CAP32_MDI(x) \ + (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L1_CFG32 = 0x0009, + FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __u8 ctlbf; + __u8 ovlan3_to_ivlan0; + __be16 ivlantype; + __be16 txipg_force_pinfo; + __be16 mtu; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 auxlinfo; + __u8 dcbxdis_pkd; + __u8 r8_lo; + __be16 lpacap; + __be64 r9; + } info; + struct fw_port_diags { + __u8 diagop; + __u8 r[3]; + __be32 diagval; + } diags; + union fw_port_dcb { + struct fw_port_dcb_pgid { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[2]; + __be32 pgid; + __be64 r11; + } pgid; + struct fw_port_dcb_pgrate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[5]; + __u8 num_tcs_supported; + __u8 pgrate[8]; + __u8 tsa[8]; + } pgrate; + struct fw_port_dcb_priorate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[6]; + __u8 strict_priorate[8]; + } priorate; + struct fw_port_dcb_pfc { + __u8 type; + __u8 pfcen; + __u8 r10[5]; + __u8 max_pfc_tcs; + __be64 r11; + } pfc; + struct fw_port_app_priority { + __u8 type; + __u8 r10[2]; + __u8 idx; + __u8 user_prio_map; + __u8 sel_field; + __be16 protocolid; + __be64 r12; + } app_priority; + struct fw_port_dcb_control { + __u8 type; + __u8 all_syncd_pkd; + __be16 dcb_version_to_app_state; + __be32 r11; + __be64 r12; + } control; + } dcb; + struct fw_port_l1cfg32 { + __be32 rcap32; + __be32 r; + } l1cfg32; + struct fw_port_info32 { + __be32 lstatus32_to_cbllen32; + __be32 auxlinfo32_mtu32; + __be32 linkattr32; + __be32 pcaps32; + __be32 acaps32; + __be32 lpacaps32; + } info32; + } u; +}; + +#define S_FW_PORT_CMD_PORTID 0 +#define M_FW_PORT_CMD_PORTID 0xf +#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID) +#define G_FW_PORT_CMD_PORTID(x) \ + (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID) + +#define S_FW_PORT_CMD_ACTION 16 +#define M_FW_PORT_CMD_ACTION 0xffff +#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION) +#define G_FW_PORT_CMD_ACTION(x) \ + (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION) + +#define S_FW_PORT_CMD_LSTATUS 31 +#define M_FW_PORT_CMD_LSTATUS 0x1 +#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS) +#define G_FW_PORT_CMD_LSTATUS(x) \ + (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS) +#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U) + +#define S_FW_PORT_CMD_LSPEED 24 +#define M_FW_PORT_CMD_LSPEED 0x3f +#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED) +#define G_FW_PORT_CMD_LSPEED(x) \ + (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED) + +#define S_FW_PORT_CMD_TXPAUSE 23 +#define M_FW_PORT_CMD_TXPAUSE 0x1 +#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE) +#define G_FW_PORT_CMD_TXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE) +#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U) + +#define S_FW_PORT_CMD_RXPAUSE 22 +#define M_FW_PORT_CMD_RXPAUSE 0x1 +#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE) +#define G_FW_PORT_CMD_RXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE) +#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U) + +#define S_FW_PORT_CMD_MDIOCAP 21 +#define M_FW_PORT_CMD_MDIOCAP 0x1 +#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP) +#define G_FW_PORT_CMD_MDIOCAP(x) \ + (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP) +#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U) + +#define S_FW_PORT_CMD_MDIOADDR 16 +#define M_FW_PORT_CMD_MDIOADDR 0x1f +#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR) +#define G_FW_PORT_CMD_MDIOADDR(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR) + +#define S_FW_PORT_CMD_PTYPE 8 +#define M_FW_PORT_CMD_PTYPE 0x1f +#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE) +#define G_FW_PORT_CMD_PTYPE(x) \ + (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE) + +#define S_FW_PORT_CMD_LINKDNRC 5 +#define M_FW_PORT_CMD_LINKDNRC 0x7 +#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC) +#define G_FW_PORT_CMD_LINKDNRC(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC) + +#define S_FW_PORT_CMD_MODTYPE 0 +#define M_FW_PORT_CMD_MODTYPE 0x1f +#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE) +#define G_FW_PORT_CMD_MODTYPE(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) + +#define S_FW_PORT_CMD_LSTATUS32 31 +#define M_FW_PORT_CMD_LSTATUS32 0x1 +#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32) +#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U) + +#define S_FW_PORT_CMD_LINKDNRC32 28 +#define M_FW_PORT_CMD_LINKDNRC32 0x7 +#define G_FW_PORT_CMD_LINKDNRC32(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32) + +#define S_FW_PORT_CMD_MDIOCAP32 26 +#define M_FW_PORT_CMD_MDIOCAP32 0x1 +#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32) +#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U) + +#define S_FW_PORT_CMD_MDIOADDR32 21 +#define M_FW_PORT_CMD_MDIOADDR32 0x1f +#define G_FW_PORT_CMD_MDIOADDR32(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32) + +#define S_FW_PORT_CMD_PORTTYPE32 13 +#define M_FW_PORT_CMD_PORTTYPE32 0xff +#define G_FW_PORT_CMD_PORTTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32) + +#define S_FW_PORT_CMD_MODTYPE32 8 +#define M_FW_PORT_CMD_MODTYPE32 0x1f +#define G_FW_PORT_CMD_MODTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32) + +/* + * These are configured into the VPD and hence tools that generate + * VPD may use this enumeration. + * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed + * + * REMEMBER: + * Update the Common Code t4_hw.c:t4_get_port_type_description() + * with any new Firmware Port Technology Types! + */ +enum fw_port_type { + FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */ + FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */ + FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */ + FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */ + FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */ + FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */ + FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */ + FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_BP_AP = 10, + /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_BP4_AP = 11, + /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */ + FW_PORT_TYPE_BP40_BA = 15, + /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */ + FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */ + FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */ + FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */ + FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */ + FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */ + FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */ + FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE +}; + +/* These are read from module's EEPROM and determined once the + * module is inserted. + */ +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA = 0x0, + FW_PORT_MOD_TYPE_LR = 0x1, + FW_PORT_MOD_TYPE_SR = 0x2, + FW_PORT_MOD_TYPE_ER = 0x3, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5, + FW_PORT_MOD_TYPE_LRM = 0x6, + FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3, + FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1, + FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE +}; + +/* used by FW and tools may use this to generate VPD */ +enum fw_port_mod_sub_type { + FW_PORT_MOD_SUB_TYPE_NA, + FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1, + FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2, + FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3, + FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4, + FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5, + FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6, + FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7, + FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8, + + /* + * The following will never been in the VPD. They are TWINAX cable + * lengths decoded from SFP+ module i2c PROMs. These should almost + * certainly go somewhere else ... + */ + FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9, + FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA, + FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB, + FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, +}; + +/* link down reason codes (3b) */ +enum fw_port_link_dn_rc { + FW_PORT_LINK_DN_RC_NONE, + FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */ + FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */ + FW_PORT_LINK_DN_RESERVED3, + FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */ + FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */ + FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */ + FW_PORT_LINK_DN_RESERVED7 +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + __u8 nstats_bg_bm; + __u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +#define S_FW_RSS_IND_TBL_CMD_VIID 0 +#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff +#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID) +#define G_FW_RSS_IND_TBL_CMD_VIID(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID) + +#define S_FW_RSS_IND_TBL_CMD_IQ0 20 +#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0) +#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0) + +#define S_FW_RSS_IND_TBL_CMD_IQ1 10 +#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1) +#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1) + +#define S_FW_RSS_IND_TBL_CMD_IQ2 0 +#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2) +#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_keymode; + __be32 synmapen_to_hashtoeplitz; + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28 +#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf +#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \ + (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8 +#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3 +#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \ + V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0 +#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) +#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \ + V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U) + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_udpen; + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_VI_CONFIG_CMD_VIID 0 +#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff +#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID) +#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID) + +#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16 +#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff +#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) +#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \ + M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4 +#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3 +#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2 +#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1 +#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0 +#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) + +struct fw_clip_cmd { + __be32 op_to_write; + __be32 alloc_to_len16; + __be64 ip_hi; + __be64 ip_lo; + __be32 r4[2]; +}; + +#define S_FW_CLIP_CMD_ALLOC 31 +#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) +#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) + +#define S_FW_CLIP_CMD_FREE 30 +#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) +#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) + +/****************************************************************************** + * D E B U G C O M M A N D s + ******************************************************/ + +struct fw_debug_cmd { + __be32 op_type; + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + __u8 filename_0_7[8]; + __u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +#define S_FW_DEBUG_CMD_TYPE 0 +#define M_FW_DEBUG_CMD_TYPE 0xff +#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE) +#define G_FW_DEBUG_CMD_TYPE(x) \ + (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE) + +/****************************************************************************** + * P C I E F W R E G I S T E R + **************************************/ + +/* + * Register definitions for the PCIE_FW register which the firmware uses + * to retain status across RESETs. This register should be considered + * as a READ-ONLY register for Host Software and only to be used to + * track firmware initialization/error state, etc. + */ +#define S_PCIE_FW_ERR 31 +#define M_PCIE_FW_ERR 0x1 +#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR) +#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR) +#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U) + +#define S_PCIE_FW_INIT 30 +#define M_PCIE_FW_INIT 0x1 +#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT) +#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT) +#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U) + +#define S_PCIE_FW_HALT 29 +#define M_PCIE_FW_HALT 0x1 +#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT) +#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT) +#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U) + +#define S_PCIE_FW_EVAL 24 +#define M_PCIE_FW_EVAL 0x7 +#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL) +#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL) + +#define S_PCIE_FW_MASTER_VLD 15 +#define M_PCIE_FW_MASTER_VLD 0x1 +#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD) +#define G_PCIE_FW_MASTER_VLD(x) \ + (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD) +#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U) + +#define S_PCIE_FW_MASTER 12 +#define M_PCIE_FW_MASTER 0x7 +#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER) +#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER) + +/****************************************************************************** + * B I N A R Y H E A D E R F O R M A T + **********************************************/ + +/* + * firmware binary header format + */ +struct fw_hdr { + __u8 ver; + __u8 chip; /* terminator chip family */ + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; /* tcp processor microcode version */ + __u8 intfver_nic; + __u8 intfver_vnic; + __u8 intfver_ofld; + __u8 intfver_ri; + __u8 intfver_iscsipdu; + __u8 intfver_iscsi; + __u8 intfver_fcoepdu; + __u8 intfver_fcoe; + __u32 reserved2; + __u32 reserved3; + __u32 magic; /* runtime or bootstrap fw */ + __be32 flags; + __be32 reserved6[23]; +}; + +#define S_FW_HDR_FW_VER_MAJOR 24 +#define M_FW_HDR_FW_VER_MAJOR 0xff +#define V_FW_HDR_FW_VER_MAJOR(x) \ + ((x) << S_FW_HDR_FW_VER_MAJOR) +#define G_FW_HDR_FW_VER_MAJOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR) + +#define S_FW_HDR_FW_VER_MINOR 16 +#define M_FW_HDR_FW_VER_MINOR 0xff +#define V_FW_HDR_FW_VER_MINOR(x) \ + ((x) << S_FW_HDR_FW_VER_MINOR) +#define G_FW_HDR_FW_VER_MINOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR) + +#define S_FW_HDR_FW_VER_MICRO 8 +#define M_FW_HDR_FW_VER_MICRO 0xff +#define V_FW_HDR_FW_VER_MICRO(x) \ + ((x) << S_FW_HDR_FW_VER_MICRO) +#define G_FW_HDR_FW_VER_MICRO(x) \ + (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO) + +#define S_FW_HDR_FW_VER_BUILD 0 +#define M_FW_HDR_FW_VER_BUILD 0xff +#define V_FW_HDR_FW_VER_BUILD(x) \ + ((x) << S_FW_HDR_FW_VER_BUILD) +#define G_FW_HDR_FW_VER_BUILD(x) \ + (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD) + +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c new file mode 100644 index 000000000..649bacfb2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c @@ -0,0 +1,880 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "common.h" +#include "t4_regs.h" + +/** + * t4vf_wait_dev_ready - wait till to reads of registers work + * + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +static int t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n", + val); + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL; + __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_entry entry; + unsigned int delay_idx; + u32 v, mbox_data; + const __be64 *p; + int i, ret; + int ms; + + /* In T6, mailbox size is changed to 128 bytes to avoid + * invalidating the entire prefetch buffer. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + mbox_data = T4VF_MBDATA_BASE_ADDR; + else + mbox_data = T6VF_MBDATA_BASE_ADDR; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... + */ + if (i > (2 * FW_CMD_MAX_TIMEOUT)) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = -EBUSY; + return ret; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adapter->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + + if (v != X_MBOWNER_PL) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; + return ret; + } + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, + F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + delay_idx = 0; + ms = delay[0]; + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (G_MBOWNER(v) == X_MBOWNER_PL) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & F_MBMSGVALID) == 0) { + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). (Again we + * avoid clogging the log with FW_VI_STATS_CMD + * reply results.) + */ + + /* + * Retrieve the command reply and release the mailbox. + */ + get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data); + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + + /* return value in high-order host-endian word */ + v = be64_to_cpu(cmd_rpl[0]); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const u32 *)cmd) + & F_FW_CMD_REQUEST) == 0); + memcpy(rpl, cmd_rpl, size); + } + return -((int)G_FW_CMD_RETVAL(v)); + } + } + + /* + * We timed out. Return the error ... + */ + dev_err(adapter, "command %#x timed out\n", + *(const u8 *)cmd); + dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock); + ret = -ETIMEDOUT; + return ret; +} + +/** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware resetting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) | + F_FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd))); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + u32 pl_vf_rev; + int ret, ver; + + ret = t4vf_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + /* + * Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, + pl_vf_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, + pl_vf_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + return 0; +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + unsigned int i; + size_t len16; + int ret; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = cpu_to_be32(*params++); + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) + return v; + vpd_params->cclk = vals[0]; + dev_debug(adapter, "%s: vpd_params->cclk = %u\n", + __func__, vpd_params->cclk); + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives fw and tp version. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) + return v; + adapter->params.fw_vers = vals[0]; + adapter->params.tp_vers = vals[1]; + + dev_info(adapter, "Firmware version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); + + dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); + return 0; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + struct fw_params_param *p; + struct fw_params_cmd cmd; + unsigned int i; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_fl_pkt_align - return the fl packet alignment + * @adapter: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + */ +int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control, + u32 sge_control2) +{ + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = X_INGPADBOUNDARY_SHIFT; + else + ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; + + ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adapter->params.chip)) { + ingpackboundary = G_INGPACKBOUNDARY(sge_control2); + if (ingpackboundary == X_INGPACKBOUNDARY_16B) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + X_INGPACKBOUNDARY_SHIFT); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami)); +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Translate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE + (be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu + (rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + rss->u.basicvirtual.ofdmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + rss->u.basicvirtual.tnlmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + rss->u.basicvirtual.hashtoeplitz = + ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + u32 word; + int v; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); + vfres->niq = G_FW_PFVF_CMD_NIQ(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = G_FW_PFVF_CMD_NEQ(word); + vfres->pmask = G_FW_PFVF_CMD_PMASK(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = G_FW_PFVF_CMD_TC(word); + vfres->nvi = G_FW_PFVF_CMD_NVI(word); + vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word); + vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word); + vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word); + return 0; +} + +/** + * t4vf_get_port_stats_fw - collect "port" statistics via Firmware + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface via Firmware + * commands. + */ +static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + unsigned int rem = VI_VF_NUM_STATS; + struct fw_vi_stats_vf fwstats; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) | + V_FW_VI_STATS_CMD_VIID(pi->viid) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) | + V_FW_VI_STATS_CMD_NSTATS(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret != FW_SUCCESS) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) + + be64_to_cpu(fwstats.tx_mcast_bytes) + + be64_to_cpu(fwstats.tx_ucast_bytes); + p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames); + + p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + /* + * If this is not the first Virtual Interface for our Virtual + * Function, we need to use Firmware commands to retrieve its + * MPS statistics. + */ + if (pidx != 0) + t4vf_get_port_stats_fw(adapter, pidx, p); + + /* + * But for the first VI, we can grab its statistics via the MPS + * register mapped into the VF register space. + */ +#define GET_STAT(name) \ + t4_read_reg64(adapter, \ + T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L) + p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) + + GET_STAT(TX_VF_MCAST_BYTES) + + GET_STAT(TX_VF_UCAST_BYTES); + p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); + p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); + p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); + p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES); + + p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); + p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); + p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); + + p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES); +#undef GET_STAT +} + +static int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | + F_FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + F_FW_VI_CMD_ALLOC); + cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid)); +} + +int t4vf_port_init(struct adapter *adapter) +{ + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd, port_rpl; + struct fw_vi_cmd vi_cmd, vi_rpl; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + int mdio_addr; + int ret, i; + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + + /* + * If we haven't yet determined if we're talking to Firmware + * which knows the new 32-bit Port Caps, it's time to find + * out now. This will also tell new Firmware to send us Port + * Status Updates using the new 32-bit Port Capabilities + * version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X + (FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4vf_set_params(adapter, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } + + ret = t4vf_alloc_vi(adapter, p->port_id); + if (ret < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", p->port_id, ret); + return ret; + } + p->viid = ret; + + /* + * Execute a VI Read command to get our Virtual Interface + * information like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid)); + ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (ret != FW_SUCCESS) + return ret; + + p->rss_size = G_FW_VI_CMD_RSSSIZE + (be16_to_cpu(vi_rpl.norss_rsssize)); + t4_os_set_hw_addr(adapter, i, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're + * done now. Else, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32 + (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(p->port_id)); + port_cmd.action_to_len16 = cpu_to_be32 + (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), + &port_rpl); + if (ret != FW_SUCCESS) + return ret; + + /* + * Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu + (port_rpl.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : + -1); + pcaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.pcap)); + acaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.acap)); + } else { + u32 lstatus32 = be32_to_cpu + (port_rpl.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1); + pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); + acaps = be32_to_cpu(port_rpl.u.info32.acaps32); + } + + p->port_type = port_type; + p->mdio_addr = mdio_addr; + p->mod_type = FW_PORT_MOD_TYPE_NA; + init_link_config(&p->link_cfg, pcaps, acaps); + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h new file mode 100644 index 000000000..55e436e74 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4VF_HW_H +#define __T4VF_HW_H + +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_CIM_BASE_ADDR 0x0300 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T6VF_MBDATA_BASE_ADDR 0x0280 + +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES +#endif /* __T4VF_HW_H */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c new file mode 100644 index 000000000..a0ab2a6ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include "base/common.h" +#include "clip_tbl.h" + +/** + * Allocate clip entry in HW with associated IPV4/IPv6 address + */ +static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip) +{ + struct adapter *adap = ethdev2adap(dev); + struct fw_clip_cmd c; + u64 hi = ((u64)lip[1]) << 32 | lip[0]; + u64 lo = ((u64)lip[3]) << 32 | lip[2]; + + memset(&c, 0, sizeof(c)); + c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE); + c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.ip_hi = hi; + c.ip_lo = lo; + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +/** + * Delete clip entry in HW having the associated IPV4/IPV6 address + */ +static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip) +{ + struct adapter *adap = ethdev2adap(dev); + struct fw_clip_cmd c; + u64 hi = ((u64)lip[1]) << 32 | lip[0]; + u64 lo = ((u64)lip[3]) << 32 | lip[2]; + + memset(&c, 0, sizeof(c)); + c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.ip_hi = hi; + c.ip_lo = lo; + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +/** + * cxgbe_clip_release - Release associated CLIP entry + * @ce: clip entry to release + * + * Releases ref count and frees up a clip entry from CLIP table + */ +void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce) +{ + int ret; + + t4_os_lock(&ce->lock); + if (rte_atomic32_dec_and_test(&ce->refcnt)) { + ret = clip6_release_mbox(dev, ce->addr); + if (ret) + dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret); + } + t4_os_unlock(&ce->lock); +} + +/** + * find_or_alloc_clipe - Find/Allocate a free CLIP entry + * @c: CLIP table + * @lip: IPV4/IPV6 address to compare/add + * Returns pointer to the IPV4/IPV6 entry found/created + * + * Finds/Allocates an CLIP entry to be used for a filter rule. + */ +static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c, + const u32 *lip) +{ + struct clip_entry *end, *e; + struct clip_entry *first_free = NULL; + unsigned int clipt_size = c->clipt_size; + + for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) { + if (rte_atomic32_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (memcmp(lip, e->addr, sizeof(e->addr)) == 0) + goto exists; + } + } + + if (first_free) { + e = first_free; + goto exists; + } + + return NULL; + +exists: + return e; +} + +static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev, + u32 *lip, u8 v6) +{ + struct adapter *adap = ethdev2adap(dev); + struct clip_tbl *ctbl = adap->clipt; + struct clip_entry *ce; + int ret = 0; + + if (!ctbl) + return NULL; + + t4_os_write_lock(&ctbl->lock); + ce = find_or_alloc_clipe(ctbl, lip); + if (ce) { + t4_os_lock(&ce->lock); + if (!rte_atomic32_read(&ce->refcnt)) { + rte_memcpy(ce->addr, lip, sizeof(ce->addr)); + if (v6) { + ce->type = FILTER_TYPE_IPV6; + rte_atomic32_set(&ce->refcnt, 1); + ret = clip6_get_mbox(dev, lip); + if (ret) + dev_debug(adap, + "CLIP FW ADD CMD failed: %d", + ret); + } else { + ce->type = FILTER_TYPE_IPV4; + } + } else { + rte_atomic32_inc(&ce->refcnt); + } + t4_os_unlock(&ce->lock); + } + t4_os_write_unlock(&ctbl->lock); + + return ret ? NULL : ce; +} + +/** + * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry + * @dev: rte_eth_dev pointer + * @lip: IPV6 address to add + * Returns pointer to the CLIP entry created + * + * Allocates a IPV6 CLIP entry to be used for a filter rule. + */ +struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip) +{ + return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6); +} + +/** + * Initialize CLIP Table + */ +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end) +{ + unsigned int clipt_size; + struct clip_tbl *ctbl; + unsigned int i; + + if (clipt_start >= clipt_end) + return NULL; + + clipt_size = clipt_end - clipt_start + 1; + + ctbl = t4_os_alloc(sizeof(*ctbl) + + clipt_size * sizeof(struct clip_entry)); + if (!ctbl) + return NULL; + + ctbl->clipt_start = clipt_start; + ctbl->clipt_size = clipt_size; + + t4_os_rwlock_init(&ctbl->lock); + + for (i = 0; i < ctbl->clipt_size; i++) { + t4_os_lock_init(&ctbl->cl_list[i].lock); + rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0); + } + + return ctbl; +} + +/** + * Cleanup CLIP Table + */ +void t4_cleanup_clip_tbl(struct adapter *adap) +{ + if (adap->clipt) + t4_os_free(adap->clipt); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h new file mode 100644 index 000000000..737ccc691 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_CLIP_H_ +#define _CXGBE_CLIP_H_ + +/* + * State for the corresponding entry of the HW CLIP table. + */ +struct clip_entry { + enum filter_type type; /* entry type */ + u32 addr[4]; /* IPV4 or IPV6 address */ + rte_spinlock_t lock; /* entry lock */ + rte_atomic32_t refcnt; /* entry reference count */ +}; + +struct clip_tbl { + unsigned int clipt_start; /* start index of CLIP table */ + unsigned int clipt_size; /* size of CLIP table */ + rte_rwlock_t lock; /* table rw lock */ + struct clip_entry cl_list[0]; /* MUST BE LAST */ +}; + +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end); +void t4_cleanup_clip_tbl(struct adapter *adap); +struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip); +void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce); +#endif /* _CXGBE_CLIP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h new file mode 100644 index 000000000..0bf6061c0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_H_ +#define _CXGBE_H_ + +#include "base/common.h" +#include "base/t4_regs.h" + +#define CXGBE_MIN_RING_DESC_SIZE 128 /* Min TX/RX descriptor ring size */ +#define CXGBE_MAX_RING_DESC_SIZE 4096 /* Max TX/RX descriptor ring size */ + +#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */ +#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */ + +#define CXGBE_MIN_RX_BUFSIZE RTE_ETHER_MIN_MTU /* min buf size */ +#define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \ + RTE_ETHER_CRC_LEN) /* max pkt */ + +/* Max poll time is 100 * 100msec = 10 sec */ +#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */ +#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */ + +#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */ +#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_OTHER) +#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_OTHER | \ + ETH_RSS_IPV6_EX) +#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_IPV6_TCP_EX) +#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_UDP_EX) +#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) + +/* Tx/Rx Offloads supported */ +#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_RSS_HASH) + +/* Devargs filtermode and filtermask representation */ +enum cxgbe_devargs_filter_mode_flags { + CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT = (1 << 0), + CXGBE_DEVARGS_FILTER_MODE_PF_VF = (1 << 1), + + CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC = (1 << 2), + CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE = (1 << 3), + CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER = (1 << 4), + CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER = (1 << 5), + CXGBE_DEVARGS_FILTER_MODE_IP_TOS = (1 << 6), + CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL = (1 << 7), + CXGBE_DEVARGS_FILTER_MODE_MAX = (1 << 8), +}; + +enum cxgbe_filter_vnic_mode { + CXGBE_FILTER_VNIC_MODE_NONE, + CXGBE_FILTER_VNIC_MODE_PFVF, + CXGBE_FILTER_VNIC_MODE_OVLAN, +}; + +/* Common PF and VF devargs */ +#define CXGBE_DEVARG_CMN_KEEP_OVLAN "keep_ovlan" +#define CXGBE_DEVARG_CMN_TX_MODE_LATENCY "tx_mode_latency" + +/* VF only devargs */ +#define CXGBE_DEVARG_VF_FORCE_LINK_UP "force_link_up" + +/* Filter Mode/Mask devargs */ +#define CXGBE_DEVARG_PF_FILTER_MODE "filtermode" +#define CXGBE_DEVARG_PF_FILTER_MASK "filtermask" + +bool cxgbe_force_linkup(struct adapter *adap); +int cxgbe_probe(struct adapter *adapter); +int cxgbevf_probe(struct adapter *adapter); +void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps); +int cxgbe_set_link_status(struct port_info *pi, bool status); +int cxgbe_up(struct adapter *adap); +int cxgbe_down(struct port_info *pi); +void cxgbe_close(struct adapter *adapter); +void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats); +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats); +void cxgbe_stats_reset(struct port_info *pi); +int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us, + unsigned int cnt, struct t4_completion *c); +int cxgbe_link_start(struct port_info *pi); +int cxgbe_setup_sge_fwevtq(struct adapter *adapter); +int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter); +void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev); +int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev); +int cxgbe_init_rss(struct adapter *adap); +int cxgbe_setup_rss(struct port_info *pi); +void cxgbe_enable_rx_queues(struct port_info *pi); +void cxgbe_print_port_info(struct adapter *adap); +void cxgbe_print_adapter_info(struct adapter *adap); +void cxgbe_process_devargs(struct adapter *adap); +void cxgbe_configure_max_ethqsets(struct adapter *adapter); + +#endif /* _CXGBE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h new file mode 100644 index 000000000..83ae1c2e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_COMPAT_H_ +#define _CXGBE_COMPAT_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +extern int cxgbe_logtype; +extern int cxgbe_mbox_logtype; + +#define dev_printf(level, logtype, fmt, ...) \ + rte_log(RTE_LOG_ ## level, logtype, \ + "rte_cxgbe_pmd: " fmt, ##__VA_ARGS__) + +#define dev_err(x, fmt, ...) \ + dev_printf(ERR, cxgbe_logtype, fmt, ##__VA_ARGS__) +#define dev_info(x, fmt, ...) \ + dev_printf(INFO, cxgbe_logtype, fmt, ##__VA_ARGS__) +#define dev_warn(x, fmt, ...) \ + dev_printf(WARNING, cxgbe_logtype, fmt, ##__VA_ARGS__) +#define dev_debug(x, fmt, ...) \ + dev_printf(DEBUG, cxgbe_logtype, fmt, ##__VA_ARGS__) + +#define CXGBE_DEBUG_MBOX(x, fmt, ...) \ + dev_printf(DEBUG, cxgbe_mbox_logtype, "MBOX:" fmt, ##__VA_ARGS__) + +#define CXGBE_FUNC_TRACE() \ + dev_printf(DEBUG, cxgbe_logtype, "CXGBE trace: %s\n", __func__) + +#define pr_err(fmt, ...) dev_err(0, fmt, ##__VA_ARGS__) +#define pr_warn(fmt, ...) dev_warn(0, fmt, ##__VA_ARGS__) +#define pr_info(fmt, ...) dev_info(0, fmt, ##__VA_ARGS__) +#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__) + +#define ASSERT(x) do {\ + if (!(x)) \ + rte_panic("CXGBE: x"); \ +} while (0) +#define BUG_ON(x) ASSERT(!(x)) + +#ifndef WARN_ON +#define WARN_ON(x) do { \ + int ret = !!(x); \ + if (unlikely(ret)) \ + pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \ +} while (0) +#endif + +#define __iomem + +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +#define L1_CACHE_SHIFT 6 +#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT) + +#define PAGE_SHIFT 12 +#define CXGBE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) +#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a))) + +#define VLAN_HLEN 4 +#define ETHER_ADDR_LEN 6 + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef uint64_t dma_addr_t; + +#ifndef __le16 +#define __le16 uint16_t +#endif +#ifndef __le32 +#define __le32 uint32_t +#endif +#ifndef __le64 +#define __le64 uint64_t +#endif +#ifndef __be16 +#define __be16 uint16_t +#endif +#ifndef __be32 +#define __be32 uint32_t +#endif +#ifndef __be64 +#define __be64 uint64_t +#endif +#ifndef __u8 +#define __u8 uint8_t +#endif +#ifndef __u16 +#define __u16 uint16_t +#endif +#ifndef __u32 +#define __u32 uint32_t +#endif +#ifndef __u64 +#define __u64 uint64_t +#endif + +#define FALSE 0 +#define TRUE 1 + +#ifndef min +#define min(a, b) RTE_MIN(a, b) +#endif + +#ifndef max +#define max(a, b) RTE_MAX(a, b) +#endif + +/* + * round up val _p to a power of 2 size _s + */ +#define cxgbe_roundup(_p, _s) (((unsigned long)(_p) + (_s - 1)) & ~(_s - 1)) + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + typeof(((type *)0)->member)(*__mptr) = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) +#endif + +#define ARRAY_SIZE(arr) RTE_DIM(arr) + +#define cpu_to_be16(o) rte_cpu_to_be_16(o) +#define cpu_to_be32(o) rte_cpu_to_be_32(o) +#define cpu_to_be64(o) rte_cpu_to_be_64(o) +#define cpu_to_le32(o) rte_cpu_to_le_32(o) +#define be16_to_cpu(o) rte_be_to_cpu_16(o) +#define be32_to_cpu(o) rte_be_to_cpu_32(o) +#define be64_to_cpu(o) rte_be_to_cpu_64(o) +#define le32_to_cpu(o) rte_le_to_cpu_32(o) + +#ifndef ntohs +#define ntohs(o) be16_to_cpu(o) +#endif + +#ifndef ntohl +#define ntohl(o) be32_to_cpu(o) +#endif + +#ifndef htons +#define htons(o) cpu_to_be16(o) +#endif + +#ifndef htonl +#define htonl(o) cpu_to_be32(o) +#endif + +#ifndef caddr_t +typedef char *caddr_t; +#endif + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DELAY(x) rte_delay_us(x) +#define udelay(x) DELAY(x) +#define msleep(x) DELAY(1000 * (x)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +static inline uint8_t hweight32(uint32_t word32) +{ + uint32_t res = word32 - ((word32 >> 1) & 0x55555555); + + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; + +} /* weight32 */ + +/** + * cxgbe_fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note cxgbe_fls(0) = 0, cxgbe_fls(1) = 1, cxgbe_fls(0x80000000) = 32. + */ +static inline int cxgbe_fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +static inline unsigned long ilog2(unsigned long n) +{ + unsigned int e = 0; + + while (n) { + if (n & ~((1 << 8) - 1)) { + e += 8; + n >>= 8; + continue; + } + + if (n & ~((1 << 4) - 1)) { + e += 4; + n >>= 4; + } + + for (;;) { + n >>= 1; + if (n == 0) + break; + e++; + } + } + + return e; +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + rte_write32(val, addr); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, (void *)((uintptr_t)addr + 4)); +} + +static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr) +{ + rte_write32_relaxed(val, addr); +} + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +static inline unsigned int mult_frac(unsigned int x, unsigned int numer, + unsigned int denom) +{ + unsigned int quot = x / denom; + unsigned int rem = x % denom; + + return (quot * numer) + ((rem * numer) / denom); +} +#endif /* _CXGBE_COMPAT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c new file mode 100644 index 000000000..1deee2f5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c @@ -0,0 +1,1259 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cxgbe.h" +#include "cxgbe_pfvf.h" +#include "cxgbe_flow.h" + +int cxgbe_logtype; +int cxgbe_mbox_logtype; + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct rte_pci_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { .vendor_id = 0, } \ + } + +/* + *... and the PCI ID Table itself ... + */ +#include "base/t4_pci_id_tbl.h" + +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; + uint16_t pkts_sent, pkts_remain; + uint16_t total_sent = 0; + uint16_t idx = 0; + int ret = 0; + + t4_os_lock(&txq->txq_lock); + /* free up desc from already completed tx */ + reclaim_completed_tx(&txq->q); + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); + while (total_sent < nb_pkts) { + pkts_remain = nb_pkts - total_sent; + + for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { + idx = total_sent + pkts_sent; + if ((idx + 1) < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], + volatile void *)); + ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); + if (ret < 0) + break; + } + if (!pkts_sent) + break; + total_sent += pkts_sent; + /* reclaim as much as possible */ + reclaim_completed_tx(&txq->q); + } + + t4_os_unlock(&txq->txq_lock); + return total_sent; +} + +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; + unsigned int work_done; + + if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) + dev_err(adapter, "error in cxgbe poll\n"); + + return work_done; +} + +int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; + + static const struct rte_eth_desc_lim cxgbe_desc_lim = { + .nb_max = CXGBE_MAX_RING_DESC_SIZE, + .nb_min = CXGBE_MIN_RING_DESC_SIZE, + .nb_align = 1, + }; + + device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; + device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; + device_info->max_rx_queues = max_queues; + device_info->max_tx_queues = max_queues; + device_info->max_mac_addrs = 1; + /* XXX: For now we support one MAC/port */ + device_info->max_vfs = adapter->params.arch.vfcount; + device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ + + device_info->rx_queue_offload_capa = 0UL; + device_info->rx_offload_capa = CXGBE_RX_OFFLOADS; + + device_info->tx_queue_offload_capa = 0UL; + device_info->tx_offload_capa = CXGBE_TX_OFFLOADS; + + device_info->reta_size = pi->rss_size; + device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; + device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL; + + device_info->rx_desc_lim = cxgbe_desc_lim; + device_info->tx_desc_lim = cxgbe_desc_lim; + cxgbe_get_speed_caps(pi, &device_info->speed_capa); + + return 0; +} + +int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 1, -1, 1, -1, false); +} + +int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 0, -1, 1, -1, false); +} + +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + /* TODO: address filters ?? */ + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); +} + +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + /* TODO: address filters ?? */ + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); +} + +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct rte_eth_link new_link = { 0 }; + unsigned int i, work_done, budget = 32; + u8 old_link = pi->link_cfg.link_ok; + + for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) + break; + + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* Exit if link status changed or always forced up */ + if (pi->link_cfg.link_ok != old_link || + cxgbe_force_linkup(adapter)) + break; + + if (!wait_to_complete) + break; + + rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); + } + + new_link.link_status = cxgbe_force_linkup(adapter) ? + ETH_LINK_UP : pi->link_cfg.link_ok; + new_link.link_autoneg = pi->link_cfg.autoneg; + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_speed = pi->link_cfg.speed; + + return rte_eth_linkstatus_set(eth_dev, &new_link); +} + +/** + * Set device link up. + */ +int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; + struct sge *s = &adapter->sge; + int ret; + + if (!s->fw_evtq.desc) + return -ENOMEM; + + /* Flush all link events */ + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* If link already up, nothing to do */ + if (pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, true); + if (ret) + return ret; + + cxgbe_dev_link_update(dev, 1); + return 0; +} + +/** + * Set device link down. + */ +int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; + struct sge *s = &adapter->sge; + int ret; + + if (!s->fw_evtq.desc) + return -ENOMEM; + + /* Flush all link events */ + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* If link already down, nothing to do */ + if (!pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, false); + if (ret) + return ret; + + cxgbe_dev_link_update(dev, 0); + return 0; +} + +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct rte_eth_dev_info dev_info; + int err; + uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) + return err; + + /* Must accommodate at least RTE_ETHER_MIN_MTU */ + if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen) + return -EINVAL; + + /* set to jumbo mode if needed */ + if (new_mtu > RTE_ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, + -1, -1, true); + if (!err) + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; + + return err; +} + +/* + * Stop device. + */ +void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + CXGBE_FUNC_TRACE(); + + if (!(adapter->flags & FULL_INIT_DONE)) + return; + + cxgbe_down(pi); + + /* + * We clear queues only if both tx and rx path of the port + * have been disabled + */ + t4_sge_eth_clear_queues(pi); +} + +/* Start the device. + * It returns 0 on success. + */ +int cxgbe_dev_start(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode; + struct adapter *adapter = pi->adapter; + int err = 0, i; + + CXGBE_FUNC_TRACE(); + + /* + * If we don't have a connection to the firmware there's nothing we + * can do. + */ + if (!(adapter->flags & FW_OK)) { + err = -ENXIO; + goto out; + } + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = cxgbe_up(adapter); + if (err < 0) + goto out; + } + + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; + else + eth_dev->data->scattered_rx = 0; + + cxgbe_enable_rx_queues(pi); + + err = cxgbe_setup_rss(pi); + if (err) + goto out; + + for (i = 0; i < pi->n_tx_qsets; i++) { + err = cxgbe_dev_tx_queue_start(eth_dev, i); + if (err) + goto out; + } + + for (i = 0; i < pi->n_rx_qsets; i++) { + err = cxgbe_dev_rx_queue_start(eth_dev, i); + if (err) + goto out; + } + + err = cxgbe_link_start(pi); + if (err) + goto out; + +out: + return err; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + CXGBE_FUNC_TRACE(); + + if (!(adapter->flags & FULL_INIT_DONE)) + return; + + cxgbe_down(pi); + + /* + * We clear queues only if both tx and rx path of the port + * have been disabled + */ + t4_sge_eth_clear_queues(pi); + eth_dev->data->scattered_rx = 0; +} + +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + int err; + + CXGBE_FUNC_TRACE(); + + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + + if (!(adapter->flags & FW_QUEUE_BOUND)) { + err = cxgbe_setup_sge_fwevtq(adapter); + if (err) + return err; + adapter->flags |= FW_QUEUE_BOUND; + if (is_pf4(adapter)) { + err = cxgbe_setup_sge_ctrl_txq(adapter); + if (err) + return err; + } + } + + err = cxgbe_cfg_queue_count(eth_dev); + if (err) + return err; + + return 0; +} + +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + int ret; + struct sge_eth_txq *txq = (struct sge_eth_txq *) + (eth_dev->data->tx_queues[tx_queue_id]); + + dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); + + ret = t4_sge_eth_txq_start(txq); + if (ret == 0) + eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + int ret; + struct sge_eth_txq *txq = (struct sge_eth_txq *) + (eth_dev->data->tx_queues[tx_queue_id]); + + dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); + + ret = t4_sge_eth_txq_stop(txq); + if (ret == 0) + eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; + int err = 0; + unsigned int temp_nb_desc; + + dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", + __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, + socket_id, pi->first_qset); + + /* Free up the existing queue */ + if (eth_dev->data->tx_queues[queue_idx]) { + cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]); + eth_dev->data->tx_queues[queue_idx] = NULL; + } + + eth_dev->data->tx_queues[queue_idx] = (void *)txq; + + /* Sanity Checking + * + * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE + */ + temp_nb_desc = nb_desc; + if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { + dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_DEFAULT_TX_DESC_SIZE); + temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE; + } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { + dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE); + return -(EINVAL); + } + + txq->q.size = temp_nb_desc; + + err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, + s->fw_evtq.cntxt_id, socket_id); + + dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n", + __func__, txq->q.cntxt_id, txq->q.abs_id, err); + return err; +} + +void cxgbe_dev_tx_queue_release(void *q) +{ + struct sge_eth_txq *txq = (struct sge_eth_txq *)q; + + if (txq) { + struct port_info *pi = (struct port_info *) + (txq->eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n", + __func__, pi->port_id, txq->q.cntxt_id); + + t4_sge_eth_txq_release(adap, txq); + } +} + +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + int ret; + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + struct sge_rspq *q; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rx_queue_id); + + q = eth_dev->data->rx_queues[rx_queue_id]; + + ret = t4_sge_eth_rxq_start(adap, q); + if (ret == 0) + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + int ret; + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + struct sge_rspq *q; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rx_queue_id); + + q = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_stop(adap, q); + if (ret == 0) + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; + int err = 0; + int msi_idx = 0; + unsigned int temp_nb_desc; + struct rte_eth_dev_info dev_info; + unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + + dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", + __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, + socket_id, mp); + + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) { + dev_err(adap, "%s: error during getting ethernet device info", + __func__); + return err; + } + + /* Must accommodate at least RTE_ETHER_MIN_MTU */ + if ((pkt_len < dev_info.min_rx_bufsize) || + (pkt_len > dev_info.max_rx_pktlen)) { + dev_err(adap, "%s: max pkt len must be > %d and <= %d\n", + __func__, dev_info.min_rx_bufsize, + dev_info.max_rx_pktlen); + return -EINVAL; + } + + /* Free up the existing queue */ + if (eth_dev->data->rx_queues[queue_idx]) { + cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]); + eth_dev->data->rx_queues[queue_idx] = NULL; + } + + eth_dev->data->rx_queues[queue_idx] = (void *)rxq; + + /* Sanity Checking + * + * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE + */ + temp_nb_desc = nb_desc; + if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { + dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_DEFAULT_RX_DESC_SIZE); + temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE; + } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { + dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE); + return -(EINVAL); + } + + rxq->rspq.size = temp_nb_desc; + if ((&rxq->fl) != NULL) + rxq->fl.size = temp_nb_desc; + + /* Set to jumbo mode if necessary */ + if (pkt_len > RTE_ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, + &rxq->fl, NULL, + is_pf4(adapter) ? + t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, + queue_idx, socket_id); + + dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n", + __func__, err, pi->port_id, rxq->rspq.cntxt_id, + rxq->rspq.abs_id); + return err; +} + +void cxgbe_dev_rx_queue_release(void *q) +{ + struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; + struct sge_rspq *rq = &rxq->rspq; + + if (rq) { + struct port_info *pi = (struct port_info *) + (rq->eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rxq->rspq.cntxt_id); + + t4_sge_eth_rxq_release(adap, rxq); + } +} + +/* + * Get port statistics. + */ +static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *eth_stats) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct port_stats ps; + unsigned int i; + + cxgbe_stats_get(pi, &ps); + + /* RX Stats */ + eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + + ps.rx_ovflow2 + ps.rx_ovflow3 + + ps.rx_trunc0 + ps.rx_trunc1 + + ps.rx_trunc2 + ps.rx_trunc3; + eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err + + ps.rx_jabber + ps.rx_too_long + ps.rx_runt + + ps.rx_len_err; + + /* TX Stats */ + eth_stats->opackets = ps.tx_frames; + eth_stats->obytes = ps.tx_octets; + eth_stats->oerrors = ps.tx_error_frames; + + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + eth_stats->q_ipackets[i] = rxq->stats.pkts; + eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + eth_stats->ipackets += eth_stats->q_ipackets[i]; + eth_stats->ibytes += eth_stats->q_ibytes[i]; + } + + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + eth_stats->q_opackets[i] = txq->stats.pkts; + eth_stats->q_obytes[i] = txq->stats.tx_bytes; + } + return 0; +} + +/* + * Reset port statistics. + */ +static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + unsigned int i; + + cxgbe_stats_reset(pi); + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + rxq->stats.pkts = 0; + rxq->stats.rx_bytes = 0; + } + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + txq->stats.pkts = 0; + txq->stats.tx_bytes = 0; + txq->stats.mapping_err = 0; + } + + return 0; +} + +static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct link_config *lc = &pi->link_cfg; + int rx_pause, tx_pause; + + fc_conf->autoneg = lc->fc & PAUSE_AUTONEG; + rx_pause = lc->fc & PAUSE_RX; + tx_pause = lc->fc & PAUSE_TX; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + return 0; +} + +static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + if (fc_conf->autoneg) + lc->requested_fc |= PAUSE_AUTONEG; + else + lc->requested_fc &= ~PAUSE_AUTONEG; + } + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + lc->requested_fc |= PAUSE_RX; + else + lc->requested_fc &= ~PAUSE_RX; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + lc->requested_fc |= PAUSE_TX; + else + lc->requested_fc &= ~PAUSE_TX; + + return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, + &pi->link_cfg); +} + +const uint32_t * +cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) + return ptypes; + return NULL; +} + +/* Update RSS hash configuration + */ +static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + int err; + + err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf); + if (err) + return err; + + pi->rss_hf = rss_conf->rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = cpu_to_be32(key[i]); + + t4_write_rss_key(adapter, mod_key, -1); + } + + return 0; +} + +/* Get RSS hash configuration + */ +static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u64 rss_hf = 0; + u64 flags = 0; + int err; + + err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid, + &flags, NULL); + + if (err) + return err; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { + rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + rss_hf |= CXGBE_RSS_HF_IPV6_MASK; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + rss_hf |= CXGBE_RSS_HF_IPV4_MASK; + + rss_conf->rss_hf = rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + t4_read_rss_key(adapter, key); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = be32_to_cpu(key[i]); + + memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN); + } + + return 0; +} + +static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + return EEPROMSIZE; +} + +/** + * eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + if (phys_addr < EEPROMVSIZE) + return phys_addr - 1024; + return -EINVAL; +} + +/* The next two routines implement eeprom read/write from physical addresses. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = t4_seeprom_read(adap, vaddr, v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = t4_seeprom_write(adap, vaddr, v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int cxgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *e) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u32 i, err = 0; + u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); + + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + rte_memcpy(e->data, buf + e->offset, e->length); + rte_free(buf); + return err; +} + +static int cxgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3; + + if (adapter->pf > 0) { + u32 start = 1024 + adapter->pf * EEPROMPFSIZE; + + if (aligned_offset < start || + aligned_offset + aligned_len > start + EEPROMPFSIZE) + return -EPERM; + } + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) { + /* RMW possibly needed for first or last words. + */ + buf = rte_zmalloc(NULL, aligned_len, 0); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + rte_memcpy(buf + (eeprom->offset & 3), eeprom->data, + eeprom->length); + } else { + buf = eeprom->data; + } + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != eeprom->data) + rte_free(buf); + return err; +} + +static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + return t4_get_regs_len(adapter) / sizeof(uint32_t); +} + +static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, + struct rte_dev_reg_info *regs) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + + regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | + (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | + (1 << 16); + + if (regs->data == NULL) { + regs->length = cxgbe_get_regs_len(eth_dev); + regs->width = sizeof(uint32_t); + + return 0; + } + + t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t))); + + return 0; +} + +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct port_info *pi = dev->data->dev_private; + int ret; + + ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr); + if (ret < 0) { + dev_err(adapter, "failed to set mac addr; err = %d\n", + ret); + return ret; + } + pi->xact_addr_filt = ret; + return 0; +} + +static const struct eth_dev_ops cxgbe_eth_dev_ops = { + .dev_start = cxgbe_dev_start, + .dev_stop = cxgbe_dev_stop, + .dev_close = cxgbe_dev_close, + .promiscuous_enable = cxgbe_dev_promiscuous_enable, + .promiscuous_disable = cxgbe_dev_promiscuous_disable, + .allmulticast_enable = cxgbe_dev_allmulticast_enable, + .allmulticast_disable = cxgbe_dev_allmulticast_disable, + .dev_configure = cxgbe_dev_configure, + .dev_infos_get = cxgbe_dev_info_get, + .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, + .link_update = cxgbe_dev_link_update, + .dev_set_link_up = cxgbe_dev_set_link_up, + .dev_set_link_down = cxgbe_dev_set_link_down, + .mtu_set = cxgbe_dev_mtu_set, + .tx_queue_setup = cxgbe_dev_tx_queue_setup, + .tx_queue_start = cxgbe_dev_tx_queue_start, + .tx_queue_stop = cxgbe_dev_tx_queue_stop, + .tx_queue_release = cxgbe_dev_tx_queue_release, + .rx_queue_setup = cxgbe_dev_rx_queue_setup, + .rx_queue_start = cxgbe_dev_rx_queue_start, + .rx_queue_stop = cxgbe_dev_rx_queue_stop, + .rx_queue_release = cxgbe_dev_rx_queue_release, + .filter_ctrl = cxgbe_dev_filter_ctrl, + .stats_get = cxgbe_dev_stats_get, + .stats_reset = cxgbe_dev_stats_reset, + .flow_ctrl_get = cxgbe_flow_ctrl_get, + .flow_ctrl_set = cxgbe_flow_ctrl_set, + .get_eeprom_length = cxgbe_get_eeprom_length, + .get_eeprom = cxgbe_get_eeprom, + .set_eeprom = cxgbe_set_eeprom, + .get_reg = cxgbe_get_regs, + .rss_hash_update = cxgbe_dev_rss_hash_update, + .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, + .mac_addr_set = cxgbe_mac_addr_set, +}; + +/* + * Initialize driver + * It returns 0 on success. + */ +static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = NULL; + char name[RTE_ETH_NAME_MAX_LEN]; + int err = 0; + + CXGBE_FUNC_TRACE(); + + eth_dev->dev_ops = &cxgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; + eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } + return 0; + } + + snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); + adapter = rte_zmalloc(name, sizeof(*adapter), 0); + if (!adapter) + return -1; + + adapter->use_unpacked_mode = 1; + adapter->regs = (void *)pci_dev->mem_resource[0].addr; + if (!adapter->regs) { + dev_err(adapter, "%s: cannot map device registers\n", __func__); + err = -ENOMEM; + goto out_free_adapter; + } + adapter->pdev = pci_dev; + adapter->eth_dev = eth_dev; + pi->adapter = adapter; + + cxgbe_process_devargs(adapter); + + err = cxgbe_probe(adapter); + if (err) { + dev_err(adapter, "%s: cxgbe probe failed with err %d\n", + __func__, err); + goto out_free_adapter; + } + + return 0; + +out_free_adapter: + rte_free(adapter); + return err; +} + +static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + + /* Free up other ports and all resources */ + cxgbe_close(adap); + return 0; +} + +static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct port_info), eth_cxgbe_dev_init); +} + +static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit); +} + +static struct rte_pci_driver rte_cxgbe_pmd = { + .id_table = cxgb4_pci_tbl, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_cxgbe_pci_probe, + .remove = eth_cxgbe_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); +RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " + CXGBE_DEVARG_PF_FILTER_MODE "= " + CXGBE_DEVARG_PF_FILTER_MASK "= "); + +RTE_INIT(cxgbe_init_log) +{ + cxgbe_logtype = rte_log_register("pmd.net.cxgbe"); + if (cxgbe_logtype >= 0) + rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE); + cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox"); + if (cxgbe_mbox_logtype >= 0) + rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c new file mode 100644 index 000000000..27e96c73e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c @@ -0,0 +1,1433 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#include + +#include "base/common.h" +#include "base/t4_tcb.h" +#include "base/t4_regs.h" +#include "cxgbe_filter.h" +#include "clip_tbl.h" +#include "l2t.h" +#include "smt.h" + +/** + * Initialize Hash Filters + */ +int cxgbe_init_hash_filter(struct adapter *adap) +{ + unsigned int n_user_filters; + unsigned int user_filter_perc; + int ret; + u32 params[7], val[7]; + +#define FW_PARAM_DEV(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + V_FW_PARAMS_PARAM_Y(0) | \ + V_FW_PARAMS_PARAM_Z(0)) + + params[0] = FW_PARAM_DEV(NTID); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + params, val); + if (ret < 0) + return ret; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + + user_filter_perc = 100; + n_user_filters = mult_frac(adap->tids.nftids, + user_filter_perc, + 100); + + adap->tids.nftids = n_user_filters; + adap->params.hash_filter = 1; + return 0; +} + +/** + * Validate if the requested filter specification can be set by checking + * if the requested features have been enabled + */ +int cxgbe_validate_filter(struct adapter *adapter, + struct ch_filter_specification *fs) +{ + u32 fconf, iconf; + + /* + * Check for unconfigured fields being used. + */ + fconf = fs->cap ? adapter->params.tp.filter_mask : + adapter->params.tp.vlan_pri_map; + + iconf = adapter->params.tp.ingress_config; + +#define S(_field) \ + (fs->val._field || fs->mask._field) +#define U(_mask, _field) \ + (!(fconf & (_mask)) && S(_field)) + + if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || + U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) || + U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) || + U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld)) + return -EOPNOTSUPP; + + /* Either OVLAN or PFVF match is enabled in hardware, but not both */ + if ((S(pfvf_vld) && !(iconf & F_VNIC)) || + (S(ovlan_vld) && (iconf & F_VNIC))) + return -EOPNOTSUPP; + + /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */ + if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) || + (S(pfvf_vld) && (iconf & F_USE_ENC_IDX))) + return -EOPNOTSUPP; + +#undef S +#undef U + + /* + * If the user is requesting that the filter action loop + * matching packets back out one of our ports, make sure that + * the egress port is in range. + */ + if (fs->action == FILTER_SWITCH && + fs->eport >= adapter->params.nports) + return -ERANGE; + + /* + * Don't allow various trivially obvious bogus out-of-range + * values ... + */ + if (fs->val.iport >= adapter->params.nports) + return -ERANGE; + + if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support) + return -EOPNOTSUPP; + + if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support) + return -EOPNOTSUPP; + + return 0; +} + +/** + * Get the queue to which the traffic must be steered to. + */ +static unsigned int get_filter_steerq(struct rte_eth_dev *dev, + struct ch_filter_specification *fs) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + unsigned int iq; + + /* + * If the user has requested steering matching Ingress Packets + * to a specific Queue Set, we need to make sure it's in range + * for the port and map that into the Absolute Queue ID of the + * Queue Set's Response Queue. + */ + if (!fs->dirsteer) { + iq = 0; + } else { + /* + * If the iq id is greater than the number of qsets, + * then assume it is an absolute qid. + */ + if (fs->iq < pi->n_rx_qsets) + iq = adapter->sge.ethrxq[pi->first_qset + + fs->iq].rspq.abs_id; + else + iq = fs->iq; + } + + return iq; +} + +/* Return an error number if the indicated filter isn't writable ... */ +static int writable_filter(struct filter_entry *f) +{ + if (f->locked) + return -EPERM; + if (f->pending) + return -EBUSY; + + return 0; +} + +/** + * Send CPL_SET_TCB_FIELD message + */ +static void set_tcb_field(struct adapter *adapter, unsigned int ftid, + u16 word, u64 mask, u64 val, int no_reply) +{ + struct rte_mbuf *mbuf; + struct cpl_set_tcb_field *req; + struct sge_ctrl_txq *ctrlq; + + ctrlq = &adapter->sge.ctrlq[0]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + WARN_ON(!mbuf); + + mbuf->data_len = sizeof(*req); + mbuf->pkt_len = mbuf->data_len; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); + memset(req, 0, sizeof(*req)); + INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid); + req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) | + V_QUEUENO(adapter->sge.fw_evtq.abs_id) | + V_NO_REPLY(no_reply)); + req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + + t4_mgmt_tx(ctrlq, mbuf); +} + +/** + * Set one of the t_flags bits in the TCB. + */ +static void set_tcb_tflag(struct adapter *adap, unsigned int ftid, + unsigned int bit_pos, unsigned int val, int no_reply) +{ + set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos, + (unsigned long long)val << bit_pos, no_reply); +} + +/** + * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command. + */ +static inline void mk_set_tcb_field_ulp(struct filter_entry *f, + struct cpl_set_tcb_field *req, + unsigned int word, + u64 mask, u64 val, u8 cookie, + int no_reply) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr)); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); + req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) | + V_QUEUENO(0)); + req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + sc = (struct ulptx_idata *)(req + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * IPv6 requires 2 slots on T6 and 4 slots for cards below T6. + * IPv4 requires only 1 slot on all cards. + */ +u8 cxgbe_filter_slots(struct adapter *adap, u8 family) +{ + if (family == FILTER_TYPE_IPV6) { + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) + return 4; + + return 2; + } + + return 1; +} + +/** + * Check if entries are already filled. + */ +bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries) +{ + bool result = FALSE; + u32 i; + + /* Ensure there's enough slots available. */ + t4_os_lock(&t->ftid_lock); + for (i = fidx; i < fidx + nentries; i++) { + if (rte_bitmap_get(t->ftid_bmap, i)) { + result = TRUE; + break; + } + } + t4_os_unlock(&t->ftid_lock); + return result; +} + +/** + * Allocate available free entries. + */ +int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries) +{ + struct tid_info *t = &adap->tids; + int pos; + int size = t->nftids; + + t4_os_lock(&t->ftid_lock); + if (nentries > 1) + pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, + nentries); + else + pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size); + t4_os_unlock(&t->ftid_lock); + + return pos < size ? pos : -1; +} + +/** + * Construct hash filter ntuple. + */ +static u64 hash_filter_ntuple(const struct filter_entry *f) +{ + struct adapter *adap = ethdev2adap(f->dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */ + + if (tp->port_shift >= 0 && f->fs.mask.iport) + ntuple |= (u64)f->fs.val.iport << tp->port_shift; + + if (tp->protocol_shift >= 0) { + if (!f->fs.val.proto) + ntuple |= (u64)tcp_proto << tp->protocol_shift; + else + ntuple |= (u64)f->fs.val.proto << tp->protocol_shift; + } + + if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype) + ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift; + if (tp->macmatch_shift >= 0 && f->fs.mask.macidx) + ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift; + if (tp->vlan_shift >= 0 && f->fs.mask.ivlan) + ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) << + tp->vlan_shift; + if (tp->vnic_shift >= 0) { + if ((adap->params.tp.ingress_config & F_VNIC) && + f->fs.mask.pfvf_vld) + ntuple |= (u64)(f->fs.val.pfvf_vld << 16 | + f->fs.val.pf << 13 | f->fs.val.vf) << + tp->vnic_shift; + else if (!(adap->params.tp.ingress_config & F_VNIC) && + f->fs.mask.ovlan_vld) + ntuple |= (u64)(f->fs.val.ovlan_vld << 16 | + f->fs.val.ovlan) << tp->vnic_shift; + } + if (tp->tos_shift >= 0 && f->fs.mask.tos) + ntuple |= (u64)f->fs.val.tos << tp->tos_shift; + + return ntuple; +} + +/** + * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command. + */ +static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, + unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*abort_req) - + sizeof(struct work_request_hdr)); + OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); + abort_req->rsvd0 = cpu_to_be32(0); + abort_req->rsvd1 = 0; + abort_req->cmd = CPL_ABORT_NO_RST; + sc = (struct ulptx_idata *)(abort_req + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command. + */ +static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, + unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | + V_ULP_TXPKT_DEST(0)); + txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16)); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(sizeof(*abort_rpl) - + sizeof(struct work_request_hdr)); + OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + abort_rpl->rsvd0 = cpu_to_be32(0); + abort_rpl->rsvd1 = 0; + abort_rpl->cmd = CPL_ABORT_NO_RST; + sc = (struct ulptx_idata *)(abort_rpl + 1); + sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); + sc->len = cpu_to_be32(0); +} + +/** + * Delete the specified hash filter. + */ +static int cxgbe_del_hash_filter(struct rte_eth_dev *dev, + unsigned int filter_id, + struct filter_ctx *ctx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct tid_info *t = &adapter->tids; + struct filter_entry *f; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + int ret; + + if (filter_id > adapter->tids.ntids) + return -E2BIG; + + f = lookup_tid(t, filter_id); + if (!f) { + dev_err(adapter, "%s: no filter entry for filter_id = %d\n", + __func__, filter_id); + return -EINVAL; + } + + ret = writable_filter(f); + if (ret) + return ret; + + if (f->valid) { + unsigned int wrlen; + struct rte_mbuf *mbuf; + struct work_request_hdr *wr; + struct ulptx_idata *aligner; + struct cpl_set_tcb_field *req; + struct cpl_abort_req *abort_req; + struct cpl_abort_rpl *abort_rpl; + + f->ctx = ctx; + f->pending = 1; + + wrlen = cxgbe_roundup(sizeof(*wr) + + (sizeof(*req) + sizeof(*aligner)) + + sizeof(*abort_req) + sizeof(*abort_rpl), + 16); + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + dev_err(adapter, "%s: could not allocate skb ..\n", + __func__); + goto out_err; + } + + mbuf->data_len = wrlen; + mbuf->pkt_len = mbuf->data_len; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *); + INIT_ULPTX_WR(req, wrlen, 0, 0); + wr = (struct work_request_hdr *)req; + wr++; + req = (struct cpl_set_tcb_field *)wr; + mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO, + V_TCB_RSS_INFO(M_TCB_RSS_INFO), + V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id), + 0, 1); + aligner = (struct ulptx_idata *)(req + 1); + abort_req = (struct cpl_abort_req *)(aligner + 1); + mk_abort_req_ulp(abort_req, f->tid); + abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); + mk_abort_rpl_ulp(abort_rpl, f->tid); + t4_mgmt_tx(ctrlq, mbuf); + } + return 0; + +out_err: + return -ENOMEM; +} + +/** + * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter. + */ +static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req6 *req = NULL; + u64 local_lo, local_hi, peer_lo, peer_hi; + u32 *lip = (u32 *)f->fs.val.lip; + u32 *fip = (u32 *)f->fs.val.fip; + + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T6: + req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *); + + INIT_TP_WR(req, 0); + break; + default: + dev_err(adap, "%s: unsupported chip type!\n", __func__); + return; + } + + local_hi = ((u64)lip[1]) << 32 | lip[0]; + local_lo = ((u64)lip[3]) << 32 | lip[2]; + peer_hi = ((u64)fip[1]) << 32 | fip[0]; + peer_lo = ((u64)fip[3]) << 32 | fip[2]; + + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip_hi = local_hi; + req->local_ip_lo = local_lo; + req->peer_ip_hi = peer_hi; + req->peer_ip_lo = peer_lo; + req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + V_DELACK(f->fs.hitcnts) | + V_L2T_IDX(f->l2t ? f->l2t->idx : 0) | + V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) + << 1) | + V_TX_CHAN(f->fs.eport) | + V_ULP_MODE(ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); + req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); + req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID | + V_RSS_QUEUE(f->fs.iq) | + F_T5_OPT_2_VALID | + F_RX_CHANNEL | + V_SACK_EN(f->fs.swapmac) | + V_CONG_CNTRL((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); +} + +/** + * Build a ACT_OPEN_REQ message for setting IPv4 hash filter. + */ +static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req *req = NULL; + + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T6: + req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *); + + INIT_TP_WR(req, 0); + break; + default: + dev_err(adap, "%s: unsupported chip type!\n", __func__); + return; + } + + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | + f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; + req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 | + f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24; + req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + V_DELACK(f->fs.hitcnts) | + V_L2T_IDX(f->l2t ? f->l2t->idx : 0) | + V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) + << 1) | + V_TX_CHAN(f->fs.eport) | + V_ULP_MODE(ULP_MODE_NONE) | + F_TCAM_BYPASS | F_NON_OFFLOAD); + req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); + req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID | + V_RSS_QUEUE(f->fs.iq) | + F_T5_OPT_2_VALID | + F_RX_CHANNEL | + V_SACK_EN(f->fs.swapmac) | + V_CONG_CNTRL((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + V_CCTRL_ECN(f->fs.action == FILTER_SWITCH)); +} + +/** + * Set the specified hash filter. + */ +static int cxgbe_set_hash_filter(struct rte_eth_dev *dev, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + struct tid_info *t = &adapter->tids; + struct filter_entry *f; + struct rte_mbuf *mbuf; + struct sge_ctrl_txq *ctrlq; + unsigned int iq; + int atid, size; + int ret = 0; + + ret = cxgbe_validate_filter(adapter, fs); + if (ret) + return ret; + + iq = get_filter_steerq(dev, fs); + + ctrlq = &adapter->sge.ctrlq[pi->port_id]; + + f = t4_os_alloc(sizeof(*f)); + if (!f) + goto out_err; + + f->fs = *fs; + f->ctx = ctx; + f->dev = dev; + f->fs.iq = iq; + + /* + * If the new filter requires loopback Destination MAC and/or VLAN + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for + * the filter. + */ + if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) { + /* allocate L2T entry for new filter */ + f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan, + f->fs.eport, f->fs.dmac); + if (!f->l2t) { + ret = -ENOMEM; + goto out_err; + } + } + + /* If the new filter requires Source MAC rewriting then we need to + * allocate a SMT entry for the filter + */ + if (f->fs.newsmac) { + f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac); + if (!f->smt) { + ret = -EAGAIN; + goto out_err; + } + } + + atid = cxgbe_alloc_atid(t, f); + if (atid < 0) + goto out_err; + + if (f->fs.type == FILTER_TYPE_IPV6) { + /* IPv6 hash filter */ + f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip); + if (!f->clipt) + goto free_atid; + + size = sizeof(struct cpl_t6_act_open_req6); + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto free_clip; + } + + mbuf->data_len = size; + mbuf->pkt_len = mbuf->data_len; + + mk_act_open_req6(f, mbuf, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } else { + /* IPv4 hash filter */ + size = sizeof(struct cpl_t6_act_open_req); + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto free_atid; + } + + mbuf->data_len = size; + mbuf->pkt_len = mbuf->data_len; + + mk_act_open_req(f, mbuf, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } + + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; + +free_clip: + cxgbe_clip_release(f->dev, f->clipt); +free_atid: + cxgbe_free_atid(t, atid); + +out_err: + t4_os_free(f); + return ret; +} + +/** + * Clear a filter and release any of its resources that we own. This also + * clears the filter's "pending" status. + */ +static void clear_filter(struct filter_entry *f) +{ + if (f->clipt) + cxgbe_clip_release(f->dev, f->clipt); + + /* + * The zeroing of the filter rule below clears the filter valid, + * pending, locked flags etc. so it's all we need for + * this operation. + */ + memset(f, 0, sizeof(*f)); +} + +/** + * t4_mk_filtdelwr - create a delete filter WR + * @adap: adapter context + * @ftid: the filter ID + * @wr: the filter work request to populate + * @qid: ingress queue to receive the delete notification + * + * Creates a filter work request to delete the supplied filter. If @qid is + * negative the delete notification is suppressed. + */ +static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid, + struct fw_filter2_wr *wr, int qid) +{ + memset(wr, 0, sizeof(*wr)); + if (adap->params.filter2_wr_support) + wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR)); + else + wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); + wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); + wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | + V_FW_FILTER_WR_NOREPLY(qid < 0)); + wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); + if (qid >= 0) + wr->rx_chan_rx_rpl_iq = + cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); +} + +/** + * Create FW work request to delete the filter at a specified index + */ +static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct rte_mbuf *mbuf; + struct fw_filter2_wr *fwr; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) + return -ENOMEM; + + mbuf->data_len = sizeof(*fwr); + mbuf->pkt_len = mbuf->data_len; + + fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *); + t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id); + + /* + * Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; +} + +static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx) +{ + struct adapter *adapter = ethdev2adap(dev); + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct rte_mbuf *mbuf; + struct fw_filter2_wr *fwr; + struct sge_ctrl_txq *ctrlq; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + int ret; + + /* + * If the new filter requires loopback Destination MAC and/or VLAN + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for + * the filter. + */ + if (f->fs.newvlan || f->fs.newdmac) { + /* allocate L2T entry for new filter */ + f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan, + f->fs.eport, f->fs.dmac); + + if (!f->l2t) + return -ENOMEM; + } + + /* If the new filter requires Source MAC rewriting then we need to + * allocate a SMT entry for the filter + */ + if (f->fs.newsmac) { + f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac); + if (!f->smt) { + if (f->l2t) { + cxgbe_l2t_release(f->l2t); + f->l2t = NULL; + } + return -ENOMEM; + } + } + + ctrlq = &adapter->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) { + ret = -ENOMEM; + goto out; + } + + mbuf->data_len = sizeof(*fwr); + mbuf->pkt_len = mbuf->data_len; + + fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *); + memset(fwr, 0, sizeof(*fwr)); + + /* + * Construct the work request to set the filter. + */ + if (adapter->params.filter2_wr_support) + fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR)); + else + fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); + fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16)); + fwr->tid_to_iq = + cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) | + V_FW_FILTER_WR_RQTYPE(f->fs.type) | + V_FW_FILTER_WR_NOREPLY(0) | + V_FW_FILTER_WR_IQ(f->fs.iq)); + fwr->del_filter_to_l2tix = + cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | + V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | + V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | + V_FW_FILTER_WR_SMAC(f->fs.newsmac) | + V_FW_FILTER_WR_DMAC(f->fs.newdmac) | + V_FW_FILTER_WR_INSVLAN + (f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) | + V_FW_FILTER_WR_RMVLAN + (f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | + V_FW_FILTER_WR_TXCHAN(f->fs.eport) | + V_FW_FILTER_WR_PRIO(f->fs.prio) | + V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); + fwr->ethtype = cpu_to_be16(f->fs.val.ethtype); + fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype); + fwr->frag_to_ovlan_vldm = + (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | + V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | + V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | + V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); + fwr->smac_sel = f->smt ? f->smt->hw_idx : 0; + fwr->rx_chan_rx_rpl_iq = + cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) | + V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id + )); + fwr->maci_to_matchtypem = + cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | + V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | + V_FW_FILTER_WR_PORT(f->fs.val.iport) | + V_FW_FILTER_WR_PORTM(f->fs.mask.iport)); + fwr->ptcl = f->fs.val.proto; + fwr->ptclm = f->fs.mask.proto; + fwr->ttyp = f->fs.val.tos; + fwr->ttypm = f->fs.mask.tos; + fwr->ivlan = cpu_to_be16(f->fs.val.ivlan); + fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan); + fwr->ovlan = cpu_to_be16(f->fs.val.ovlan); + fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan); + rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); + rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); + rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); + rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); + fwr->lp = cpu_to_be16(f->fs.val.lport); + fwr->lpm = cpu_to_be16(f->fs.mask.lport); + fwr->fp = cpu_to_be16(f->fs.val.fport); + fwr->fpm = cpu_to_be16(f->fs.mask.fport); + + if (adapter->params.filter2_wr_support) { + fwr->filter_type_swapmac = + V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac); + fwr->natmode_to_ulp_type = + V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ? + ULP_MODE_TCPDDP : + ULP_MODE_NONE) | + V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode); + memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); + memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); + fwr->newlport = cpu_to_be16(f->fs.nat_lport); + fwr->newfport = cpu_to_be16(f->fs.nat_fport); + } + + /* + * Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(ctrlq, mbuf); + return 0; + +out: + return ret; +} + +/** + * Set the corresponding entries in the bitmap. + */ +static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries) +{ + u32 i; + + t4_os_lock(&t->ftid_lock); + if (rte_bitmap_get(t->ftid_bmap, fidx)) { + t4_os_unlock(&t->ftid_lock); + return -EBUSY; + } + + for (i = fidx; i < fidx + nentries; i++) + rte_bitmap_set(t->ftid_bmap, i); + t4_os_unlock(&t->ftid_lock); + return 0; +} + +/** + * Clear the corresponding entries in the bitmap. + */ +static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries) +{ + u32 i; + + t4_os_lock(&t->ftid_lock); + for (i = fidx; i < fidx + nentries; i++) + rte_bitmap_clear(t->ftid_bmap, i); + t4_os_unlock(&t->ftid_lock); +} + +/** + * Check a delete filter request for validity and send it to the hardware. + * Return 0 on success, an error number otherwise. We attach any provided + * filter operation context to the internal filter specification in order to + * facilitate signaling completion of the operation. + */ +int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct filter_entry *f; + unsigned int chip_ver; + u8 nentries; + int ret; + + if (is_hashfilter(adapter) && fs->cap) + return cxgbe_del_hash_filter(dev, filter_id, ctx); + + if (filter_id >= adapter->tids.nftids) + return -ERANGE; + + chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + + /* + * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6, + * and 4 slot boundary for cards below T6. + */ + if (fs->type == FILTER_TYPE_IPV6) { + if (chip_ver < CHELSIO_T6) + filter_id &= ~(0x3); + else + filter_id &= ~(0x1); + } + + nentries = cxgbe_filter_slots(adapter, fs->type); + ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries); + if (!ret) { + dev_warn(adap, "%s: could not find filter entry: %u\n", + __func__, filter_id); + return -EINVAL; + } + + f = &adapter->tids.ftid_tab[filter_id]; + ret = writable_filter(f); + if (ret) + return ret; + + if (f->valid) { + f->ctx = ctx; + cxgbe_clear_ftid(&adapter->tids, + f->tid - adapter->tids.ftid_base, + nentries); + return del_filter_wr(dev, filter_id); + } + + /* + * If the caller has passed in a Completion Context then we need to + * mark it as a successful completion so they don't stall waiting + * for it. + */ + if (ctx) { + ctx->result = 0; + t4_complete(&ctx->completion); + } + + return 0; +} + +/** + * Check a Chelsio Filter Request for validity, convert it into our internal + * format and send it to the hardware. Return 0 on success, an error number + * otherwise. We attach any provided filter operation context to the internal + * filter specification in order to facilitate signaling completion of the + * operation. + */ +int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct port_info *pi = ethdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + u8 nentries, bitoff[16] = {0}; + struct filter_entry *f; + unsigned int chip_ver; + unsigned int fidx, iq; + u32 iconf; + int ret; + + if (is_hashfilter(adapter) && fs->cap) + return cxgbe_set_hash_filter(dev, fs, ctx); + + if (filter_id >= adapter->tids.nftids) + return -ERANGE; + + chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + + ret = cxgbe_validate_filter(adapter, fs); + if (ret) + return ret; + + /* + * IPv6 filters occupy four slots and must be aligned on four-slot + * boundaries for T5. On T6, IPv6 filters occupy two-slots and + * must be aligned on two-slot boundaries. + * + * IPv4 filters only occupy a single slot and have no alignment + * requirements. + */ + fidx = filter_id; + if (fs->type == FILTER_TYPE_IPV6) { + if (chip_ver < CHELSIO_T6) + fidx &= ~(0x3); + else + fidx &= ~(0x1); + } + + if (fidx != filter_id) + return -EINVAL; + + nentries = cxgbe_filter_slots(adapter, fs->type); + ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries); + if (ret) + return -EBUSY; + + iq = get_filter_steerq(dev, fs); + + /* + * Check to make sure that provided filter index is not + * already in use by someone else + */ + f = &adapter->tids.ftid_tab[filter_id]; + if (f->valid) + return -EBUSY; + + fidx = adapter->tids.ftid_base + filter_id; + ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries); + if (ret) + return ret; + + /* + * Check to make sure the filter requested is writable ... + */ + ret = writable_filter(f); + if (ret) { + /* Clear the bits we have set above */ + cxgbe_clear_ftid(&adapter->tids, filter_id, nentries); + return ret; + } + + /* + * Allocate a clip table entry only if we have non-zero IPv6 address + */ + if (chip_ver > CHELSIO_T5 && fs->type && + memcmp(fs->val.lip, bitoff, sizeof(bitoff))) { + f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip); + if (!f->clipt) + goto free_tid; + } + + /* + * Convert the filter specification into our internal format. + * We copy the PF/VF specification into the Outer VLAN field + * here so the rest of the code -- including the interface to + * the firmware -- doesn't have to constantly do these checks. + */ + f->fs = *fs; + f->fs.iq = iq; + f->dev = dev; + + iconf = adapter->params.tp.ingress_config; + + /* Either PFVF or OVLAN can be active, but not both + * So, if PFVF is enabled, then overwrite the OVLAN + * fields with PFVF fields before writing the spec + * to hardware. + */ + if (iconf & F_VNIC) { + f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf; + f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf; + f->fs.val.ovlan_vld = fs->val.pfvf_vld; + f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } + + /* + * Attempt to set the filter. If we don't succeed, we clear + * it and return the failure. + */ + f->ctx = ctx; + f->tid = fidx; /* Save the actual tid */ + ret = set_filter_wr(dev, filter_id); + if (ret) + goto free_tid; + + return ret; + +free_tid: + cxgbe_clear_ftid(&adapter->tids, filter_id, nentries); + clear_filter(f); + return ret; +} + +/** + * Handle a Hash filter write reply. + */ +void cxgbe_hash_filter_rpl(struct adapter *adap, + const struct cpl_act_open_rpl *rpl) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + struct filter_ctx *ctx = NULL; + unsigned int tid = GET_TID(rpl); + unsigned int ftid = G_TID_TID(G_AOPEN_ATID + (be32_to_cpu(rpl->atid_status))); + unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); + + f = lookup_atid(t, ftid); + if (!f) { + dev_warn(adap, "%s: could not find filter entry: %d\n", + __func__, ftid); + return; + } + + ctx = f->ctx; + f->ctx = NULL; + + switch (status) { + case CPL_ERR_NONE: { + f->tid = tid; + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + + cxgbe_insert_tid(t, f, f->tid, 0); + cxgbe_free_atid(t, ftid); + if (ctx) { + ctx->tid = f->tid; + ctx->result = 0; + } + if (f->fs.hitcnts) + set_tcb_field(adap, tid, + W_TCB_TIMESTAMP, + V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) | + V_TCB_T_RTT_TS_RECENT_AGE + (M_TCB_T_RTT_TS_RECENT_AGE), + V_TCB_TIMESTAMP(0ULL) | + V_TCB_T_RTT_TS_RECENT_AGE(0ULL), + 1); + if (f->fs.newdmac) + set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1); + if (f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) + set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1); + if (f->fs.newsmac) { + set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1); + set_tcb_field(adap, tid, W_TCB_SMAC_SEL, + V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), + V_TCB_SMAC_SEL(f->smt->hw_idx), 1); + } + break; + } + default: + dev_warn(adap, "%s: filter creation failed with status = %u\n", + __func__, status); + + if (ctx) { + if (status == CPL_ERR_TCAM_FULL) + ctx->result = -EAGAIN; + else + ctx->result = -EINVAL; + } + + cxgbe_free_atid(t, ftid); + t4_os_free(f); + } + + if (ctx) + t4_complete(&ctx->completion); +} + +/** + * Handle a LE-TCAM filter write/deletion reply. + */ +void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) +{ + struct filter_entry *f = NULL; + unsigned int tid = GET_TID(rpl); + int idx, max_fidx = adap->tids.nftids; + + /* Get the corresponding filter entry for this tid */ + if (adap->tids.ftid_tab) { + /* Check this in normal filter region */ + idx = tid - adap->tids.ftid_base; + if (idx >= max_fidx) + return; + + f = &adap->tids.ftid_tab[idx]; + if (f->tid != tid) + return; + } + + /* We found the filter entry for this tid */ + if (f) { + unsigned int ret = G_COOKIE(rpl->cookie); + struct filter_ctx *ctx; + + /* + * Pull off any filter operation context attached to the + * filter. + */ + ctx = f->ctx; + f->ctx = NULL; + + if (ret == FW_FILTER_WR_FLT_ADDED) { + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + if (ctx) { + ctx->tid = f->tid; + ctx->result = 0; + } + } else if (ret == FW_FILTER_WR_FLT_DELETED) { + /* + * Clear the filter when we get confirmation from the + * hardware that the filter has been deleted. + */ + clear_filter(f); + if (ctx) + ctx->result = 0; + } else { + /* + * Something went wrong. Issue a warning about the + * problem and clear everything out. + */ + dev_warn(adap, "filter %u setup failed with error %u\n", + idx, ret); + clear_filter(f); + if (ctx) + ctx->result = -EINVAL; + } + + if (ctx) + t4_complete(&ctx->completion); + } +} + +/* + * Retrieve the packet count for the specified filter. + */ +int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *c, int hash, bool get_byte) +{ + struct filter_entry *f; + unsigned int tcb_base, tcbaddr; + int ret; + + tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE); + if (is_hashfilter(adapter) && hash) { + if (fidx < adapter->tids.ntids) { + f = adapter->tids.tid_tab[fidx]; + if (!f) + return -EINVAL; + + if (is_t5(adapter->params.chip)) { + *c = 0; + return 0; + } + tcbaddr = tcb_base + (fidx * TCB_SIZE); + goto get_count; + } else { + return -ERANGE; + } + } else { + if (fidx >= adapter->tids.nftids) + return -ERANGE; + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + + tcbaddr = tcb_base + f->tid * TCB_SIZE; + } + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + +get_count: + if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) { + /* + * For T5, the Filter Packet Hit Count is maintained as a + * 32-bit Big Endian value in the TCB field {timestamp}. + * Similar to the craziness above, instead of the filter hit + * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) * + * sizeof(u32)), it actually shows up at offset 24. Whacky. + */ + if (get_byte) { + unsigned int word_offset = 4; + __be64 be64_byte_count; + + t4_os_lock(&adapter->win0_lock); + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + + (word_offset * sizeof(__be32)), + sizeof(be64_byte_count), + &be64_byte_count, + T4_MEMORY_READ); + t4_os_unlock(&adapter->win0_lock); + if (ret < 0) + return ret; + *c = be64_to_cpu(be64_byte_count); + } else { + unsigned int word_offset = 6; + __be32 be32_count; + + t4_os_lock(&adapter->win0_lock); + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + + (word_offset * sizeof(__be32)), + sizeof(be32_count), &be32_count, + T4_MEMORY_READ); + t4_os_unlock(&adapter->win0_lock); + if (ret < 0) + return ret; + *c = (u64)be32_to_cpu(be32_count); + } + } + return 0; +} + +/* + * Clear the packet count for the specified filter. + */ +int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx, + int hash, bool clear_byte) +{ + u64 tcb_mask = 0, tcb_val = 0; + struct filter_entry *f = NULL; + u16 tcb_word = 0; + + if (is_hashfilter(adapter) && hash) { + if (fidx >= adapter->tids.ntids) + return -ERANGE; + + /* No hitcounts supported for T5 hashfilters */ + if (is_t5(adapter->params.chip)) + return 0; + + f = adapter->tids.tid_tab[fidx]; + } else { + if (fidx >= adapter->tids.nftids) + return -ERANGE; + + f = &adapter->tids.ftid_tab[fidx]; + } + + if (!f || !f->valid) + return -EINVAL; + + tcb_word = W_TCB_TIMESTAMP; + tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP); + tcb_val = V_TCB_TIMESTAMP(0ULL); + + set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1); + + if (clear_byte) { + tcb_word = W_TCB_T_RTT_TS_RECENT_AGE; + tcb_mask = + V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) | + V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT); + tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) | + V_TCB_T_RTSEQ_RECENT(0ULL); + + set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1); + } + + return 0; +} + +/** + * Handle a Hash filter delete reply. + */ +void cxgbe_hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + struct filter_ctx *ctx = NULL; + unsigned int tid = GET_TID(rpl); + + f = lookup_tid(t, tid); + if (!f) { + dev_warn(adap, "%s: could not find filter entry: %u\n", + __func__, tid); + return; + } + + ctx = f->ctx; + f->ctx = NULL; + + f->valid = 0; + + if (f->clipt) + cxgbe_clip_release(f->dev, f->clipt); + + cxgbe_remove_tid(t, 0, tid, 0); + t4_os_free(f); + + if (ctx) { + ctx->result = 0; + t4_complete(&ctx->completion); + } +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h new file mode 100644 index 000000000..e79c052de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_FILTER_H_ +#define _CXGBE_FILTER_H_ + +#include "base/t4_msg.h" +/* + * Defined bit width of user definable filter tuples + */ +#define ETHTYPE_BITWIDTH 16 +#define FRAG_BITWIDTH 1 +#define MACIDX_BITWIDTH 9 +#define FCOE_BITWIDTH 1 +#define IPORT_BITWIDTH 3 +#define MATCHTYPE_BITWIDTH 3 +#define PROTO_BITWIDTH 8 +#define TOS_BITWIDTH 8 +#define PF_BITWIDTH 3 +#define VF_BITWIDTH 13 +#define IVLAN_BITWIDTH 16 +#define OVLAN_BITWIDTH 16 + +/* + * Filter matching rules. These consist of a set of ingress packet field + * (value, mask) tuples. The associated ingress packet field matches the + * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field + * rule can be constructed by specifying a tuple of (0, 0).) A filter rule + * matches an ingress packet when all of the individual individual field + * matching rules are true. + * + * Partial field masks are always valid, however, while it may be easy to + * understand their meanings for some fields (e.g. IP address to match a + * subnet), for others making sensible partial masks is less intuitive (e.g. + * MPS match type) ... + */ +struct ch_filter_tuple { + /* + * Compressed header matching field rules. The TP_VLAN_PRI_MAP + * register selects which of these fields will participate in the + * filter match rules -- up to a maximum of 36 bits. Because + * TP_VLAN_PRI_MAP is a global register, all filters must use the same + * set of fields. + */ + uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ + uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ + uint32_t ivlan_vld:1; /* inner VLAN valid */ + uint32_t ovlan_vld:1; /* outer VLAN valid */ + uint32_t pfvf_vld:1; /* PF/VF valid */ + uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ + uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ + uint32_t iport:IPORT_BITWIDTH; /* ingress port */ + uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ + uint32_t proto:PROTO_BITWIDTH; /* protocol type */ + uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ + uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ + uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ + uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ + uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ + + /* + * Uncompressed header matching field rules. These are always + * available for field rules. + */ + uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ + uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ + uint16_t lport; /* local port */ + uint16_t fport; /* foreign port */ + + /* reservations for future additions */ + uint8_t rsvd[12]; +}; + +/* + * Filter specification + */ +struct ch_filter_specification { + void *private; + /* Administrative fields for filter. */ + uint32_t hitcnts:1; /* count filter hits in TCB */ + uint32_t prio:1; /* filter has priority over active/server */ + + /* + * Fundamental filter typing. This is the one element of filter + * matching that doesn't exist as a (value, mask) tuple. + */ + uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ + uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */ + + /* + * Packet dispatch information. Ingress packets which match the + * filter rules will be dropped, passed to the host or switched back + * out as egress packets. + */ + uint32_t action:2; /* drop, pass, switch */ + + uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ + uint32_t iq:10; /* ingress queue */ + + uint32_t eport:2; /* egress port to switch packet out */ + uint32_t newsmac:1; /* rewrite source MAC address */ + uint32_t newdmac:1; /* rewrite destination MAC address */ + uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */ + uint32_t newvlan:2; /* rewrite VLAN Tag */ + uint8_t smac[RTE_ETHER_ADDR_LEN]; /* new source MAC address */ + uint8_t dmac[RTE_ETHER_ADDR_LEN]; /* new destination MAC address */ + uint16_t vlan; /* VLAN Tag to insert */ + + /* + * Switch proxy/rewrite fields. An ingress packet which matches a + * filter with "switch" set will be looped back out as an egress + * packet -- potentially with some header rewriting. + */ + uint32_t nat_mode:3; /* specify NAT operation mode */ + + uint8_t nat_lip[16]; /* local IP to use after NAT'ing */ + uint8_t nat_fip[16]; /* foreign IP to use after NAT'ing */ + uint16_t nat_lport; /* local port number to use after NAT'ing */ + uint16_t nat_fport; /* foreign port number to use after NAT'ing */ + + /* Filter rule value/mask pairs. */ + struct ch_filter_tuple val; + struct ch_filter_tuple mask; +}; + +enum { + FILTER_PASS = 0, /* default */ + FILTER_DROP, + FILTER_SWITCH +}; + +enum { + VLAN_REMOVE = 1, + VLAN_INSERT, + VLAN_REWRITE +}; + +enum { + NAT_MODE_NONE = 0, /* No NAT performed */ + NAT_MODE_DIP, /* NAT on Dst IP */ + NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */ + NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */ + NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */ + NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */ + NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */ + NAT_MODE_ALL /* NAT on entire 4-tuple */ +}; + +enum filter_type { + FILTER_TYPE_IPV4 = 0, + FILTER_TYPE_IPV6, +}; + +struct t4_completion { + unsigned int done; /* completion done (0 - No, 1 - Yes) */ + rte_spinlock_t lock; /* completion lock */ +}; + +/* + * Filter operation context to allow callers to wait for + * an asynchronous completion. + */ +struct filter_ctx { + struct t4_completion completion; /* completion rendezvous */ + int result; /* result of operation */ + u32 tid; /* to store tid of hash filter */ +}; + +/* + * Host shadow copy of ingress filter entry. This is in host native format + * and doesn't match the ordering or bit order, etc. of the hardware or the + * firmware command. + */ +struct filter_entry { + /* + * Administrative fields for filter. + */ + u32 valid:1; /* filter allocated and valid */ + u32 locked:1; /* filter is administratively locked */ + u32 pending:1; /* filter action is pending FW reply */ + struct filter_ctx *ctx; /* caller's completion hook */ + struct clip_entry *clipt; /* CLIP Table entry for IPv6 */ + struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ + struct smt_entry *smt; /* Source Mac Table entry for smac */ + struct rte_eth_dev *dev; /* Port's rte eth device */ + void *private; /* For use by apps using filter_entry */ + + /* This will store the actual tid */ + u32 tid; + + /* + * The filter itself. + */ + struct ch_filter_specification fs; +}; + +#define FILTER_ID_MAX (~0U) + +struct tid_info; +struct adapter; + +/** + * Find first clear bit in the bitmap. + */ +static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap, + unsigned int size) +{ + unsigned int idx; + + for (idx = 0; idx < size; idx++) + if (!rte_bitmap_get(bmap, idx)) + break; + + return idx; +} + +/** + * Find a free region of 'num' consecutive entries. + */ +static inline unsigned int +cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size, + unsigned int num) +{ + unsigned int idx, j, free = 0; + + if (num > size) + return size; + + for (idx = 0; idx < size; idx += num) { + for (j = 0; j < num; j++) { + if (!rte_bitmap_get(bmap, idx + j)) { + free++; + } else { + free = 0; + break; + } + } + + /* Found the Region */ + if (free == num) + break; + + /* Reached the end and still no region found */ + if ((idx + num) > size) { + idx = size; + break; + } + } + + return idx; +} + +u8 cxgbe_filter_slots(struct adapter *adap, u8 family); +bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries); +void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl); +int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx); +int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id, + struct ch_filter_specification *fs, + struct filter_ctx *ctx); +int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries); +int cxgbe_init_hash_filter(struct adapter *adap); +void cxgbe_hash_filter_rpl(struct adapter *adap, + const struct cpl_act_open_rpl *rpl); +void cxgbe_hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl); +int cxgbe_validate_filter(struct adapter *adap, + struct ch_filter_specification *fs); +int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *c, int hash, bool get_byte); +int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx, + int hash, bool clear_byte); +#endif /* _CXGBE_FILTER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c new file mode 100644 index 000000000..166c39ba5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c @@ -0,0 +1,1458 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#include "base/common.h" +#include "cxgbe_flow.h" + +#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \ +do { \ + if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \ + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \ + NULL, "Redefined match item with" \ + " different values found"); \ + (fs)->val.elem = (__v); \ + (fs)->mask.elem = (__m); \ +} while (0) + +#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \ +do { \ + memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \ + memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \ +} while (0) + +#define CXGBE_FILL_FS(v, m, elem) \ + __CXGBE_FILL_FS(v, m, fs, elem, e) + +#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \ + __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem) + +static int +cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e) +{ + /* rte_flow specification does not allow it. */ + if (!i->spec && (i->mask || i->last)) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + i, "last or mask given without spec"); + /* + * We don't support it. + * Although, we can support values in last as 0's or last == spec. + * But this will not provide user with any additional functionality + * and will only increase the complexity for us. + */ + if (i->last) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + i, "last is not supported by chelsio pmd"); + return 0; +} + +/** + * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information, + * there's only 40-bits available to store match fields. + * So, to save space, optimize filter spec for some common + * known fields that hardware can parse against incoming + * packets automatically. + */ +static void +cxgbe_tweak_filter_spec(struct adapter *adap, + struct ch_filter_specification *fs) +{ + /* Save 16-bit ethertype field space, by setting corresponding + * 1-bit flags in the filter spec for common known ethertypes. + * When hardware sees these flags, it automatically infers and + * matches incoming packets against the corresponding ethertype. + */ + if (fs->mask.ethtype == 0xffff) { + switch (fs->val.ethtype) { + case RTE_ETHER_TYPE_IPV4: + if (adap->params.tp.ethertype_shift < 0) { + fs->type = FILTER_TYPE_IPV4; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } + break; + case RTE_ETHER_TYPE_IPV6: + if (adap->params.tp.ethertype_shift < 0) { + fs->type = FILTER_TYPE_IPV6; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } + break; + case RTE_ETHER_TYPE_VLAN: + if (adap->params.tp.ethertype_shift < 0 && + adap->params.tp.vlan_shift >= 0) { + fs->val.ivlan_vld = 1; + fs->mask.ivlan_vld = 1; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } + break; + case RTE_ETHER_TYPE_QINQ: + if (adap->params.tp.ethertype_shift < 0 && + adap->params.tp.vnic_shift >= 0) { + fs->val.ovlan_vld = 1; + fs->mask.ovlan_vld = 1; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } + break; + default: + break; + } + } +} + +static void +cxgbe_fill_filter_region(struct adapter *adap, + struct ch_filter_specification *fs) +{ + struct tp_params *tp = &adap->params.tp; + u64 hash_filter_mask = tp->hash_filter_mask; + u64 ntuple_mask = 0; + + fs->cap = 0; + + if (!is_hashfilter(adap)) + return; + + if (fs->type) { + uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff}; + uint8_t bitoff[16] = {0}; + + if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, biton, sizeof(biton)) || + memcmp(fs->mask.fip, biton, sizeof(biton))) + return; + } else { + uint32_t biton = 0xffffffff; + uint32_t bitoff = 0x0U; + + if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) || + !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) || + memcmp(fs->mask.lip, &biton, sizeof(biton)) || + memcmp(fs->mask.fip, &biton, sizeof(biton))) + return; + } + + if (!fs->val.lport || fs->mask.lport != 0xffff) + return; + if (!fs->val.fport || fs->mask.fport != 0xffff) + return; + + if (tp->protocol_shift >= 0) + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; + if (tp->ethertype_shift >= 0) + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; + if (tp->port_shift >= 0) + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; + if (tp->macmatch_shift >= 0) + ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; + if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld) + ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) << + tp->vlan_shift; + if (tp->vnic_shift >= 0) { + if (fs->mask.ovlan_vld) + ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 | + fs->mask.ovlan) << tp->vnic_shift; + else if (fs->mask.pfvf_vld) + ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 | + fs->mask.pf << 13 | + fs->mask.vf) << tp->vnic_shift; + } + if (tp->tos_shift >= 0) + ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift; + + if (ntuple_mask != hash_filter_mask) + return; + + fs->cap = 1; /* use hash region */ +} + +static int +ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *umask = item->mask; + const struct rte_flow_item_eth *mask; + + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_eth *)dmask; + + if (!spec) + return 0; + + /* we don't support SRC_MAC filtering*/ + if (!rte_is_zero_ether_addr(&mask->src)) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "src mac filtering not supported"); + + if (!rte_is_zero_ether_addr(&mask->dst)) { + const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0]; + const u8 *m = (const u8 *)&mask->dst.addr_bytes[0]; + struct rte_flow *flow = (struct rte_flow *)fs->private; + struct port_info *pi = (struct port_info *) + (flow->dev->data->dev_private); + int idx; + + idx = cxgbe_mpstcam_alloc(pi, addr, m); + if (idx <= 0) + return rte_flow_error_set(e, idx, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unable to allocate mac" + " entry in h/w"); + CXGBE_FILL_FS(idx, 0x1ff, macidx); + } + + CXGBE_FILL_FS(be16_to_cpu(spec->type), + be16_to_cpu(mask->type), ethtype); + + return 0; +} + +static int +ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_phy_port *val = item->spec; + const struct rte_flow_item_phy_port *umask = item->mask; + const struct rte_flow_item_phy_port *mask; + + mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask; + + if (!val) + return 0; /* Wildcard, match all physical ports */ + + if (val->index > 0x7) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "port index up to 0x7 is supported"); + + CXGBE_FILL_FS(val->index, mask->index, iport); + + return 0; +} + +static int +ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *umask = item->mask; + const struct rte_flow_item_vlan *mask; + + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask; + + if (!fs->mask.ethtype) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Can't parse VLAN item without knowing ethertype"); + + /* If ethertype is already set and is not VLAN (0x8100) or + * QINQ(0x88A8), then don't proceed further. Otherwise, + * reset the outer ethertype, so that it can be replaced by + * innermost ethertype. Note that hardware will automatically + * match against VLAN or QINQ packets, based on 'ivlan_vld' or + * 'ovlan_vld' bit set in Chelsio filter spec, respectively. + */ + if (fs->mask.ethtype) { + if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN && + fs->val.ethtype != RTE_ETHER_TYPE_QINQ) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Ethertype must be 0x8100 or 0x88a8"); + } + + if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) { + CXGBE_FILL_FS(1, 1, ovlan_vld); + if (spec) { + CXGBE_FILL_FS(be16_to_cpu(spec->tci), + be16_to_cpu(mask->tci), ovlan); + + fs->mask.ethtype = 0; + fs->val.ethtype = 0; + } + } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) { + CXGBE_FILL_FS(1, 1, ivlan_vld); + if (spec) { + CXGBE_FILL_FS(be16_to_cpu(spec->tci), + be16_to_cpu(mask->tci), ivlan); + + fs->mask.ethtype = 0; + fs->val.ethtype = 0; + } + } + + if (spec) + CXGBE_FILL_FS(be16_to_cpu(spec->inner_type), + be16_to_cpu(mask->inner_type), ethtype); + + return 0; +} + +static int +ch_rte_parsetype_pf(const void *dmask __rte_unused, + const struct rte_flow_item *item __rte_unused, + struct ch_filter_specification *fs, + struct rte_flow_error *e __rte_unused) +{ + struct rte_flow *flow = (struct rte_flow *)fs->private; + struct rte_eth_dev *dev = flow->dev; + struct adapter *adap = ethdev2adap(dev); + + CXGBE_FILL_FS(1, 1, pfvf_vld); + + CXGBE_FILL_FS(adap->pf, 0x7, pf); + return 0; +} + +static int +ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_vf *umask = item->mask; + const struct rte_flow_item_vf *val = item->spec; + const struct rte_flow_item_vf *mask; + + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_vf *)dmask; + + CXGBE_FILL_FS(1, 1, pfvf_vld); + + if (!val) + return 0; /* Wildcard, match all Vf */ + + if (val->id > UCHAR_MAX) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VF ID > MAX(255)"); + + CXGBE_FILL_FS(val->id, mask->id, vf); + + return 0; +} + +static int +ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_udp *val = item->spec; + const struct rte_flow_item_udp *umask = item->mask; + const struct rte_flow_item_udp *mask; + + mask = umask ? umask : (const struct rte_flow_item_udp *)dmask; + + if (mask->hdr.dgram_len || mask->hdr.dgram_cksum) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "udp: only src/dst port supported"); + + CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto); + if (!val) + return 0; + CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), + be16_to_cpu(mask->hdr.src_port), fport); + CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), + be16_to_cpu(mask->hdr.dst_port), lport); + return 0; +} + +static int +ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_tcp *val = item->spec; + const struct rte_flow_item_tcp *umask = item->mask; + const struct rte_flow_item_tcp *mask; + + mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask; + + if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off || + mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum || + mask->hdr.tcp_urp) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "tcp: only src/dst port supported"); + + CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto); + if (!val) + return 0; + CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port), + be16_to_cpu(mask->hdr.src_port), fport); + CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port), + be16_to_cpu(mask->hdr.dst_port), lport); + return 0; +} + +static int +ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_ipv4 *val = item->spec; + const struct rte_flow_item_ipv4 *umask = item->mask; + const struct rte_flow_item_ipv4 *mask; + + mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask; + + if (mask->hdr.time_to_live) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "ttl is not supported"); + + if (fs->mask.ethtype && + (fs->val.ethtype != RTE_ETHER_TYPE_IPV4)) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Couldn't find IPv4 ethertype"); + fs->type = FILTER_TYPE_IPV4; + if (!val) + return 0; /* ipv4 wild card */ + + CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto); + CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); + CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); + CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos); + + return 0; +} + +static int +ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_ipv6 *val = item->spec; + const struct rte_flow_item_ipv6 *umask = item->mask; + const struct rte_flow_item_ipv6 *mask; + u32 vtc_flow, vtc_flow_mask; + + mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask; + + vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow); + + if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK || + mask->hdr.payload_len || mask->hdr.hop_limits) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "flow/hop are not supported"); + + if (fs->mask.ethtype && + (fs->val.ethtype != RTE_ETHER_TYPE_IPV6)) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Couldn't find IPv6 ethertype"); + fs->type = FILTER_TYPE_IPV6; + if (!val) + return 0; /* ipv6 wild card */ + + CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto); + + vtc_flow = be32_to_cpu(val->hdr.vtc_flow); + CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT, + (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT, + tos); + + CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); + CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); + + return 0; +} + +static int +cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr, + struct rte_flow_error *e) +{ + if (attr->egress) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, + attr, "attribute: is" + " not supported !"); + if (attr->group > 0) + return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, + attr, "group parameter is" + " not supported."); + + flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX; + + return 0; +} + +static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq) +{ + struct port_info *pi = ethdev2pinfo(dev); + + if (rxq > pi->n_rx_qsets) + return -EINVAL; + return 0; +} + +static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx) +{ + struct adapter *adap = ethdev2adap(f->dev); + struct ch_filter_specification fs = f->fs; + u8 nentries; + + if (fidx >= adap->tids.nftids) { + dev_err(adap, "invalid flow index %d.\n", fidx); + return -EINVAL; + } + + nentries = cxgbe_filter_slots(adap, fs.type); + if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) { + dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f); + return -EINVAL; + } + + return 0; +} + +static int +cxgbe_validate_fidxonadd(struct ch_filter_specification *fs, + struct adapter *adap, unsigned int fidx) +{ + u8 nentries; + + nentries = cxgbe_filter_slots(adap, fs->type); + if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) { + dev_err(adap, "filter index: %d is busy.\n", fidx); + return -EBUSY; + } + + if (fidx >= adap->tids.nftids) { + dev_err(adap, "filter index (%u) >= max(%u)\n", + fidx, adap->tids.nftids); + return -ERANGE; + } + + return 0; +} + +static int +cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del) +{ + if (flow->fs.cap) + return 0; /* Hash filters */ + return del ? cxgbe_validate_fidxondel(flow->f, fidx) : + cxgbe_validate_fidxonadd(&flow->fs, + ethdev2adap(flow->dev), fidx); +} + +static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx) +{ + struct ch_filter_specification *fs = &flow->fs; + struct adapter *adap = ethdev2adap(flow->dev); + + /* For tcam get the next available slot, if default value specified */ + if (flow->fidx == FILTER_ID_MAX) { + u8 nentries; + int idx; + + nentries = cxgbe_filter_slots(adap, fs->type); + idx = cxgbe_alloc_ftid(adap, nentries); + if (idx < 0) { + dev_err(adap, "unable to get a filter index in tcam\n"); + return -ENOMEM; + } + *fidx = (unsigned int)idx; + } else { + *fidx = flow->fidx; + } + + return 0; +} + +static int +cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type) +{ + const struct rte_flow_item *i; + int j, index = -ENOENT; + + for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) { + if (i->type == type) { + index = j; + break; + } + } + + return index; +} + +static int +ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs) +{ + /* nmode: + * BIT_0 = [src_ip], BIT_1 = [dst_ip] + * BIT_2 = [src_port], BIT_3 = [dst_port] + * + * Only below cases are supported as per our spec. + */ + switch (nmode) { + case 0: /* 0000b */ + fs->nat_mode = NAT_MODE_NONE; + break; + case 2: /* 0010b */ + fs->nat_mode = NAT_MODE_DIP; + break; + case 5: /* 0101b */ + fs->nat_mode = NAT_MODE_SIP_SP; + break; + case 7: /* 0111b */ + fs->nat_mode = NAT_MODE_DIP_SIP_SP; + break; + case 10: /* 1010b */ + fs->nat_mode = NAT_MODE_DIP_DP; + break; + case 11: /* 1011b */ + fs->nat_mode = NAT_MODE_DIP_DP_SIP; + break; + case 14: /* 1110b */ + fs->nat_mode = NAT_MODE_DIP_DP_SP; + break; + case 15: /* 1111b */ + fs->nat_mode = NAT_MODE_ALL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +ch_rte_parse_atype_switch(const struct rte_flow_action *a, + const struct rte_flow_item items[], + uint8_t *nmode, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_action_of_set_vlan_vid *vlanid; + const struct rte_flow_action_of_set_vlan_pcp *vlanpcp; + const struct rte_flow_action_of_push_vlan *pushvlan; + const struct rte_flow_action_set_ipv4 *ipv4; + const struct rte_flow_action_set_ipv6 *ipv6; + const struct rte_flow_action_set_tp *tp_port; + const struct rte_flow_action_phy_port *port; + const struct rte_flow_action_set_mac *mac; + int item_index; + u16 tmp_vlan; + + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + vlanid = (const struct rte_flow_action_of_set_vlan_vid *) + a->conf; + /* If explicitly asked to push a new VLAN header, + * then don't set rewrite mode. Otherwise, the + * incoming VLAN packets will get their VLAN fields + * rewritten, instead of adding an additional outer + * VLAN header. + */ + if (fs->newvlan != VLAN_INSERT) + fs->newvlan = VLAN_REWRITE; + tmp_vlan = fs->vlan & 0xe000; + fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan; + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *) + a->conf; + /* If explicitly asked to push a new VLAN header, + * then don't set rewrite mode. Otherwise, the + * incoming VLAN packets will get their VLAN fields + * rewritten, instead of adding an additional outer + * VLAN header. + */ + if (fs->newvlan != VLAN_INSERT) + fs->newvlan = VLAN_REWRITE; + tmp_vlan = fs->vlan & 0xfff; + fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + pushvlan = (const struct rte_flow_action_of_push_vlan *) + a->conf; + if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "only ethertype 0x8100 " + "supported for push vlan."); + fs->newvlan = VLAN_INSERT; + break; + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + fs->newvlan = VLAN_REMOVE; + break; + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + port = (const struct rte_flow_action_phy_port *)a->conf; + fs->eport = port->index; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV4); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV4 " + "found."); + + ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; + memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); + *nmode |= 1 << 0; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV4); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV4 " + "found."); + + ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf; + memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr)); + *nmode |= 1 << 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV6); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV6 " + "found."); + + ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; + memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); + *nmode |= 1 << 0; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_IPV6); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_IPV6 " + "found."); + + ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf; + memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr)); + *nmode |= 1 << 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_TCP); + if (item_index < 0) { + item_index = + cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_UDP); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_TCP or " + "RTE_FLOW_ITEM_TYPE_UDP found"); + } + + tp_port = (const struct rte_flow_action_set_tp *)a->conf; + fs->nat_fport = be16_to_cpu(tp_port->port); + *nmode |= 1 << 2; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_TCP); + if (item_index < 0) { + item_index = + cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_UDP); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_TCP or " + "RTE_FLOW_ITEM_TYPE_UDP found"); + } + + tp_port = (const struct rte_flow_action_set_tp *)a->conf; + fs->nat_lport = be16_to_cpu(tp_port->port); + *nmode |= 1 << 3; + break; + case RTE_FLOW_ACTION_TYPE_MAC_SWAP: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_ETH); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_ETH " + "found"); + fs->swapmac = 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_ETH); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_ETH " + "found"); + mac = (const struct rte_flow_action_set_mac *)a->conf; + + fs->newsmac = 1; + memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac)); + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_ETH); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_ETH found"); + mac = (const struct rte_flow_action_set_mac *)a->conf; + + fs->newdmac = 1; + memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac)); + break; + default: + /* We are not supposed to come here */ + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Action not supported"); + } + + return 0; +} + +static int +cxgbe_rtef_parse_actions(struct rte_flow *flow, + const struct rte_flow_item items[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct ch_filter_specification *fs = &flow->fs; + uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0; + uint8_t vlan_set_vid = 0, vlan_set_pcp = 0; + const struct rte_flow_action_queue *q; + const struct rte_flow_action *a; + char abit = 0; + int ret; + + for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { + switch (a->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + case RTE_FLOW_ACTION_TYPE_DROP: + if (abit++) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "specify only 1 pass/drop"); + fs->action = FILTER_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + q = (const struct rte_flow_action_queue *)a->conf; + if (!q) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, q, + "specify rx queue index"); + if (check_rxq(flow->dev, q->index)) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, q, + "Invalid rx queue"); + if (abit++) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "specify only 1 pass/drop"); + fs->action = FILTER_PASS; + fs->dirsteer = 1; + fs->iq = q->index; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + fs->hitcnts = 1; + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + vlan_set_vid++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + vlan_set_pcp++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + case RTE_FLOW_ACTION_TYPE_PHY_PORT: + case RTE_FLOW_ACTION_TYPE_MAC_SWAP: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + nat_ipv4++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + nat_ipv6++; + goto action_switch; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: +action_switch: + /* We allow multiple switch actions, but switch is + * not compatible with either queue or drop + */ + if (abit++ && fs->action != FILTER_SWITCH) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "overlapping action specified"); + if (nat_ipv4 && nat_ipv6) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Can't have one address ipv4 and the" + " other ipv6"); + + ret = ch_rte_parse_atype_switch(a, items, &nmode, fs, + e); + if (ret) + return ret; + fs->action = FILTER_SWITCH; + break; + default: + /* Not supported action : return error */ + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + a, "Action not supported"); + } + } + + if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp)) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "Both OF_SET_VLAN_VID and " + "OF_SET_VLAN_PCP must be specified"); + + if (ch_rte_parse_nat(nmode, fs)) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "invalid settings for swich action"); + return 0; +} + +static struct chrte_fparse parseitem[] = { + [RTE_FLOW_ITEM_TYPE_ETH] = { + .fptr = ch_rte_parsetype_eth, + .dmask = &(const struct rte_flow_item_eth){ + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0xffff, + } + }, + + [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { + .fptr = ch_rte_parsetype_port, + .dmask = &(const struct rte_flow_item_phy_port){ + .index = 0x7, + } + }, + + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .fptr = ch_rte_parsetype_vlan, + .dmask = &(const struct rte_flow_item_vlan){ + .tci = 0xffff, + .inner_type = 0xffff, + } + }, + + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .fptr = ch_rte_parsetype_ipv4, + .dmask = &(const struct rte_flow_item_ipv4) { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + }, + }, + }, + + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .fptr = ch_rte_parsetype_ipv6, + .dmask = &(const struct rte_flow_item_ipv6) { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xff000000), + }, + }, + }, + + [RTE_FLOW_ITEM_TYPE_UDP] = { + .fptr = ch_rte_parsetype_udp, + .dmask = &rte_flow_item_udp_mask, + }, + + [RTE_FLOW_ITEM_TYPE_TCP] = { + .fptr = ch_rte_parsetype_tcp, + .dmask = &rte_flow_item_tcp_mask, + }, + + [RTE_FLOW_ITEM_TYPE_PF] = { + .fptr = ch_rte_parsetype_pf, + .dmask = NULL, + }, + + [RTE_FLOW_ITEM_TYPE_VF] = { + .fptr = ch_rte_parsetype_vf, + .dmask = &(const struct rte_flow_item_vf){ + .id = 0xffffffff, + } + }, +}; + +static int +cxgbe_rtef_parse_items(struct rte_flow *flow, + const struct rte_flow_item items[], + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(flow->dev); + const struct rte_flow_item *i; + char repeat[ARRAY_SIZE(parseitem)] = {0}; + + for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) { + struct chrte_fparse *idx; + int ret; + + if (i->type >= ARRAY_SIZE(parseitem)) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + i, "Item not supported"); + + switch (i->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + continue; + default: + /* check if item is repeated */ + if (repeat[i->type] && + i->type != RTE_FLOW_ITEM_TYPE_VLAN) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, i, + "parse items cannot be repeated(except void/vlan)"); + + repeat[i->type] = 1; + + /* validate the item */ + ret = cxgbe_validate_item(i, e); + if (ret) + return ret; + + idx = &flow->item_parser[i->type]; + if (!idx || !idx->fptr) { + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, i, + "Item not supported"); + } else { + ret = idx->fptr(idx->dmask, i, &flow->fs, e); + if (ret) + return ret; + } + } + } + + cxgbe_fill_filter_region(adap, &flow->fs); + cxgbe_tweak_filter_spec(adap, &flow->fs); + + return 0; +} + +static int +cxgbe_flow_parse(struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + int ret; + /* parse user request into ch_filter_specification */ + ret = cxgbe_rtef_parse_attr(flow, attr, e); + if (ret) + return ret; + ret = cxgbe_rtef_parse_items(flow, item, e); + if (ret) + return ret; + return cxgbe_rtef_parse_actions(flow, item, action, e); +} + +static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct ch_filter_specification *fs = &flow->fs; + struct adapter *adap = ethdev2adap(dev); + struct tid_info *t = &adap->tids; + struct filter_ctx ctx; + unsigned int fidx; + int err; + + if (cxgbe_get_fidx(flow, &fidx)) + return -ENOMEM; + if (cxgbe_verify_fidx(flow, fidx, 0)) + return -1; + + t4_init_completion(&ctx.completion); + /* go create the filter */ + err = cxgbe_set_filter(dev, fidx, fs, &ctx); + if (err) { + dev_err(adap, "Error %d while creating filter.\n", err); + return err; + } + + /* Poll the FW for reply */ + err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, + CXGBE_FLOW_POLL_MS, + CXGBE_FLOW_POLL_CNT, + &ctx.completion); + if (err) { + dev_err(adap, "Filter set operation timed out (%d)\n", err); + return err; + } + if (ctx.result) { + dev_err(adap, "Hardware error %d while creating the filter.\n", + ctx.result); + return ctx.result; + } + + if (fs->cap) { /* to destroy the filter */ + flow->fidx = ctx.tid; + flow->f = lookup_tid(t, ctx.tid); + } else { + flow->fidx = fidx; + flow->f = &adap->tids.ftid_tab[fidx]; + } + + return 0; +} + +static struct rte_flow * +cxgbe_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + struct rte_flow *flow; + int ret; + + flow = t4_os_alloc(sizeof(struct rte_flow)); + if (!flow) { + rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unable to allocate memory for" + " filter_entry"); + return NULL; + } + + flow->item_parser = parseitem; + flow->dev = dev; + flow->fs.private = (void *)flow; + + if (cxgbe_flow_parse(flow, attr, item, action, e)) { + t4_os_free(flow); + return NULL; + } + + t4_os_lock(&adap->flow_lock); + /* go, interact with cxgbe_filter */ + ret = __cxgbe_flow_create(dev, flow); + t4_os_unlock(&adap->flow_lock); + if (ret) { + rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unable to create flow rule"); + t4_os_free(flow); + return NULL; + } + + flow->f->private = flow; /* Will be used during flush */ + + return flow; +} + +static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct adapter *adap = ethdev2adap(dev); + struct filter_entry *f = flow->f; + struct ch_filter_specification *fs; + struct filter_ctx ctx; + int err; + + fs = &f->fs; + if (cxgbe_verify_fidx(flow, flow->fidx, 1)) + return -1; + + t4_init_completion(&ctx.completion); + err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx); + if (err) { + dev_err(adap, "Error %d while deleting filter.\n", err); + return err; + } + + /* Poll the FW for reply */ + err = cxgbe_poll_for_completion(&adap->sge.fw_evtq, + CXGBE_FLOW_POLL_MS, + CXGBE_FLOW_POLL_CNT, + &ctx.completion); + if (err) { + dev_err(adap, "Filter delete operation timed out (%d)\n", err); + return err; + } + if (ctx.result) { + dev_err(adap, "Hardware error %d while deleting the filter.\n", + ctx.result); + return ctx.result; + } + + fs = &flow->fs; + if (fs->mask.macidx) { + struct port_info *pi = (struct port_info *) + (dev->data->dev_private); + int ret; + + ret = cxgbe_mpstcam_remove(pi, fs->val.macidx); + if (!ret) + return ret; + } + + return 0; +} + +static int +cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + int ret; + + t4_os_lock(&adap->flow_lock); + ret = __cxgbe_flow_destroy(dev, flow); + t4_os_unlock(&adap->flow_lock); + if (ret) + return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + flow, "error destroying filter."); + t4_os_free(flow); + return 0; +} + +static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count, + u64 *byte_count) +{ + struct adapter *adap = ethdev2adap(flow->dev); + struct ch_filter_specification fs = flow->f->fs; + unsigned int fidx = flow->fidx; + int ret = 0; + + ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0); + if (ret) + return ret; + return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1); +} + +static int +cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(flow->dev); + struct ch_filter_specification fs; + struct rte_flow_query_count *c; + struct filter_entry *f; + int ret; + + RTE_SET_USED(dev); + + f = flow->f; + fs = f->fs; + + if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) + return rte_flow_error_set(e, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "only count supported for query"); + + /* + * This is a valid operation, Since we are allowed to do chelsio + * specific operations in rte side of our code but not vise-versa + * + * So, fs can be queried/modified here BUT rte_flow_query_count + * cannot be worked on by the lower layer since we want to maintain + * it as rte_flow agnostic. + */ + if (!fs.hitcnts) + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + &fs, "filter hit counters were not" + " enabled during filter creation"); + + c = (struct rte_flow_query_count *)data; + + t4_os_lock(&adap->flow_lock); + ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes); + if (ret) { + rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION, + f, "cxgbe pmd failed to perform query"); + goto out; + } + + /* Query was successful */ + c->bytes_set = 1; + c->hits_set = 1; + if (c->reset) + cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true); + +out: + t4_os_unlock(&adap->flow_lock); + return ret; +} + +static int +cxgbe_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + struct rte_flow *flow; + unsigned int fidx; + int ret = 0; + + flow = t4_os_alloc(sizeof(struct rte_flow)); + if (!flow) + return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Unable to allocate memory for filter_entry"); + + flow->item_parser = parseitem; + flow->dev = dev; + + ret = cxgbe_flow_parse(flow, attr, item, action, e); + if (ret) { + t4_os_free(flow); + return ret; + } + + if (cxgbe_validate_filter(adap, &flow->fs)) { + t4_os_free(flow); + return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "validation failed. Check f/w config file."); + } + + t4_os_lock(&adap->flow_lock); + if (cxgbe_get_fidx(flow, &fidx)) { + ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "no memory in tcam."); + goto out; + } + + if (cxgbe_verify_fidx(flow, fidx, 0)) { + ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "validation failed"); + goto out; + } + +out: + t4_os_unlock(&adap->flow_lock); + t4_os_free(flow); + return ret; +} + +/* + * @ret : > 0 filter destroyed succsesfully + * < 0 error destroying filter + * == 1 filter not active / not found + */ +static int +cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev) +{ + if (f && (f->valid || f->pending) && + f->dev == dev && /* Only if user has asked for this port */ + f->private) /* We (rte_flow) created this filter */ + return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private); + return 1; +} + +static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e) +{ + struct adapter *adap = ethdev2adap(dev); + unsigned int i; + int ret = 0; + + t4_os_lock(&adap->flow_lock); + if (adap->tids.ftid_tab) { + struct filter_entry *f = &adap->tids.ftid_tab[0]; + + for (i = 0; i < adap->tids.nftids; i++, f++) { + ret = cxgbe_check_n_destroy(f, dev); + if (ret < 0) { + rte_flow_error_set(e, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + f->private, + "error destroying TCAM " + "filter."); + goto out; + } + } + } + + if (is_hashfilter(adap) && adap->tids.tid_tab) { + struct filter_entry *f; + + for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) { + f = (struct filter_entry *)adap->tids.tid_tab[i]; + + ret = cxgbe_check_n_destroy(f, dev); + if (ret < 0) { + rte_flow_error_set(e, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + f->private, + "error destroying HASH " + "filter."); + goto out; + } + } + } + +out: + t4_os_unlock(&adap->flow_lock); + return ret >= 0 ? 0 : ret; +} + +static const struct rte_flow_ops cxgbe_flow_ops = { + .validate = cxgbe_flow_validate, + .create = cxgbe_flow_create, + .destroy = cxgbe_flow_destroy, + .flush = cxgbe_flow_flush, + .query = cxgbe_flow_query, + .isolate = NULL, +}; + +int +cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + RTE_SET_USED(dev); + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &cxgbe_flow_ops; + break; + default: + ret = -ENOTSUP; + break; + } + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h new file mode 100644 index 000000000..ec8e47aeb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#ifndef _CXGBE_FLOW_H_ +#define _CXGBE_FLOW_H_ + +#include +#include "cxgbe_filter.h" +#include "mps_tcam.h" +#include "cxgbe.h" + +/* Max poll time is 100 * 100msec = 10 sec */ +#define CXGBE_FLOW_POLL_MS 100 /* 100 milliseconds */ +#define CXGBE_FLOW_POLL_CNT 100 /* Max number of times to poll */ + +struct chrte_fparse { + int (*fptr)(const void *mask, /* currently supported mask */ + const struct rte_flow_item *item, /* user input */ + struct ch_filter_specification *fs, /* where to parse */ + struct rte_flow_error *e); + const void *dmask; /* Specify what is supported by chelsio by default*/ +}; + +struct rte_flow { + struct filter_entry *f; + struct ch_filter_specification fs; /* temp, to create filter */ + struct chrte_fparse *item_parser; + /* + * filter_entry doesn't store user priority. + * Post creation of filter this will indicate the + * flow index (fidx) for both hash and tcam filters + */ + unsigned int fidx; + struct rte_eth_dev *dev; +}; + +int +cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); + +#endif /* _CXGBE_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c new file mode 100644 index 000000000..a541d95cc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c @@ -0,0 +1,2238 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/common.h" +#include "base/t4_regs.h" +#include "base/t4_msg.h" +#include "cxgbe.h" +#include "cxgbe_pfvf.h" +#include "clip_tbl.h" +#include "l2t.h" +#include "smt.h" +#include "mps_tcam.h" + +static const u16 cxgbe_filter_mode_features[] = { + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | + F_PROTOCOL | F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | + F_PROTOCOL | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | + F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_TOS | + F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VLAN | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VNIC_ID | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_PROTOCOL | F_TOS | + F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | + F_PORT), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VLAN | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VNIC_ID | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VLAN | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VNIC_ID | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT | + F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VLAN | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE), + (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE), + (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VNIC_ID | F_FCOE), + (F_FRAGMENTATION | F_VLAN | F_VNIC_ID | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT | + F_FCOE), + (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT), + (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT), + (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | F_PORT), + (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT), + (F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | F_PORT | F_FCOE), + (F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_PORT), +}; + +/** + * Allocate a chunk of memory. The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + return rte_zmalloc(NULL, size, 0); +} + +/** + * Free memory allocated through t4_alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + rte_free(addr); +} + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + __rte_unused const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + + /* + * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == + FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + if (opcode != CPL_SGE_EGR_UPDATE) { + dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n", + opcode); + goto out; + } + } + + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + /* do nothing */ + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *msg = (const void *)rsp; + + t4_handle_fw_rpl(q->adapter, msg->data); + } else if (opcode == CPL_ABORT_RPL_RSS) { + const struct cpl_abort_rpl_rss *p = (const void *)rsp; + + cxgbe_hash_del_filter_rpl(q->adapter, p); + } else if (opcode == CPL_SET_TCB_RPL) { + const struct cpl_set_tcb_rpl *p = (const void *)rsp; + + cxgbe_filter_rpl(q->adapter, p); + } else if (opcode == CPL_ACT_OPEN_RPL) { + const struct cpl_act_open_rpl *p = (const void *)rsp; + + cxgbe_hash_filter_rpl(q->adapter, p); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (const void *)rsp; + + cxgbe_do_l2t_write_rpl(q->adapter, p); + } else if (opcode == CPL_SMT_WRITE_RPL) { + const struct cpl_smt_write_rpl *p = (const void *)rsp; + + cxgbe_do_smt_write_rpl(q->adapter, p); + } else { + dev_err(adapter, "unexpected CPL %#x on FW event queue\n", + opcode); + } +out: + return 0; +} + +/** + * Setup sge control queues to pass control information. + */ +int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err = 0, i = 0; + + for_each_port(adapter, i) { + struct port_info *pi = adap2pinfo(adapter, i); + char name[RTE_ETH_NAME_MAX_LEN]; + struct sge_ctrl_txq *q = &s->ctrlq[i]; + + q->q.size = 1024; + err = t4_sge_alloc_ctrl_txq(adapter, q, + adapter->eth_dev, i, + s->fw_evtq.cntxt_id, + rte_socket_id()); + if (err) { + dev_err(adapter, "Failed to alloc ctrl txq. Err: %d", + err); + goto out; + } + snprintf(name, sizeof(name), "%s_ctrl_pool_%d", + pi->eth_dev->device->driver->name, + pi->eth_dev->data->port_id); + q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size, + RTE_CACHE_LINE_SIZE, + RTE_MBUF_PRIV_ALIGN, + RTE_MBUF_DEFAULT_BUF_SIZE, + SOCKET_ID_ANY); + if (!q->mb_pool) { + err = -rte_errno; + dev_err(adapter, + "Can't create ctrl pool for port %d. Err: %d\n", + pi->eth_dev->data->port_id, err); + goto out; + } + } + return 0; +out: + t4_free_sge_resources(adapter); + return err; +} + +/** + * cxgbe_poll_for_completion: Poll rxq for completion + * @q: rxq to poll + * @ms: milliseconds to delay + * @cnt: number of times to poll + * @c: completion to check for 'done' status + * + * Polls the rxq for reples until completion is done or the count + * expires. + */ +int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms, + unsigned int cnt, struct t4_completion *c) +{ + unsigned int i; + unsigned int work_done, budget = 32; + + if (!c) + return -EINVAL; + + for (i = 0; i < cnt; i++) { + cxgbe_poll(q, NULL, budget, &work_done); + t4_os_lock(&c->lock); + if (c->done) { + t4_os_unlock(&c->lock); + return 0; + } + t4_os_unlock(&c->lock); + rte_delay_ms(ms); + } + return -ETIMEDOUT; +} + +int cxgbe_setup_sge_fwevtq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err = 0; + int msi_idx = 0; + + err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev, + msi_idx, NULL, fwevtq_handler, -1, NULL, 0, + rte_socket_id()); + return err; +} + +static int closest_timer(const struct sge *s, int time) +{ + unsigned int i, match = 0; + int delta, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + unsigned int i, match = 0; + int delta, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/** + * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt) +{ + struct adapter *adap = q->adapter; + unsigned int timer_val; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + V_FW_PARAMS_PARAM_X( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + V_FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + &v, &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER : + closest_timer(&adap->sge, us); + + if ((us | cnt) == 0) + q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); + else + q->intr_params = V_QINTR_TIMER_IDX(timer_val) | + V_QINTR_CNT_EN(cnt > 0); + return 0; +} + +/** + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgbe_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + t4_os_lock(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + t4_os_unlock(&t->atid_lock); + return atid; +} + +/** + * Release an active-open TID. + */ +void cxgbe_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + t4_os_lock(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + t4_os_unlock(&t->atid_lock); +} + +/** + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid) +{ + struct cpl_tid_release *req; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *); + INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); +} + +/** + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, + unsigned short family) +{ + struct rte_mbuf *mbuf; + struct adapter *adap = container_of(t, struct adapter, tids); + + WARN_ON(tid >= t->ntids); + + if (t->tid_tab[tid]) { + t->tid_tab[tid] = NULL; + rte_atomic32_dec(&t->conns_in_use); + if (t->hash_base && tid >= t->hash_base) { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_dec(&t->hash_tids_in_use); + } else { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_dec(&t->tids_in_use); + } + } + + mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool); + if (mbuf) { + mbuf->data_len = sizeof(struct cpl_tid_release); + mbuf->pkt_len = mbuf->data_len; + mk_tid_release(mbuf, tid); + t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf); + } +} + +/** + * Insert a TID. + */ +void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid, + unsigned short family) +{ + t->tid_tab[tid] = data; + if (t->hash_base && tid >= t->hash_base) { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_inc(&t->hash_tids_in_use); + } else { + if (family == FILTER_TYPE_IPV4) + rte_atomic32_inc(&t->tids_in_use); + } + + rte_atomic32_inc(&t->conns_in_use); +} + +/** + * Free TID tables. + */ +static void tid_free(struct tid_info *t) +{ + if (t->tid_tab) { + if (t->ftid_bmap) + rte_bitmap_free(t->ftid_bmap); + + if (t->ftid_bmap_array) + t4_os_free(t->ftid_bmap_array); + + t4_os_free(t->tid_tab); + } + + memset(t, 0, sizeof(struct tid_info)); +} + +/** + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int ftid_bmap_size; + unsigned int natids = t->natids; + unsigned int max_ftids = t->nftids; + + ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids); + size = t->ntids * sizeof(*t->tid_tab) + + max_ftids * sizeof(*t->ftid_tab) + + natids * sizeof(*t->atid_tab); + + t->tid_tab = t4_os_alloc(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->ftid_tab = (struct filter_entry *)&t->atid_tab[t->natids]; + t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size); + if (!t->ftid_bmap_array) { + tid_free(t); + return -ENOMEM; + } + + t4_os_lock_init(&t->atid_lock); + t4_os_lock_init(&t->ftid_lock); + + t->afree = NULL; + t->atids_in_use = 0; + rte_atomic32_init(&t->tids_in_use); + rte_atomic32_set(&t->tids_in_use, 0); + rte_atomic32_init(&t->conns_in_use); + rte_atomic32_set(&t->conns_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + + t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array, + ftid_bmap_size); + if (!t->ftid_bmap) { + tid_free(t); + return -ENOMEM; + } + + return 0; +} + +static inline bool is_x_1g_port(const struct link_config *lc) +{ + return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; +} + +static inline bool is_x_10g_port(const struct link_config *lc) +{ + unsigned int speeds, high_speeds; + + speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); + high_speeds = speeds & + ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); + + return high_speeds != 0; +} + +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adapter = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + +int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + unsigned int max_queues = s->max_ethqsets / adap->params.nports; + + if ((eth_dev->data->nb_rx_queues < 1) || + (eth_dev->data->nb_tx_queues < 1)) + return -EINVAL; + + if ((eth_dev->data->nb_rx_queues > max_queues) || + (eth_dev->data->nb_tx_queues > max_queues)) + return -EINVAL; + + if (eth_dev->data->nb_rx_queues > pi->rss_size) + return -EINVAL; + + /* We must configure RSS, since config has changed*/ + pi->flags &= ~PORT_RSS_DONE; + + pi->n_rx_qsets = eth_dev->data->nb_rx_queues; + pi->n_tx_qsets = eth_dev->data->nb_tx_queues; + + return 0; +} + +void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + unsigned int i, nb_ports = 0, qidx = 0; + unsigned int q_per_port = 0; + + if (!(adap->flags & CFG_QUEUES)) { + for_each_port(adap, i) { + struct port_info *tpi = adap2pinfo(adap, i); + + nb_ports += (is_x_10g_port(&tpi->link_cfg)) || + is_x_1g_port(&tpi->link_cfg) ? 1 : 0; + } + + /* + * We default up to # of cores queues per 1G/10G port. + */ + if (nb_ports) + q_per_port = (s->max_ethqsets - + (adap->params.nports - nb_ports)) / + nb_ports; + + if (q_per_port > rte_lcore_count()) + q_per_port = rte_lcore_count(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + + /* Initially n_rx_qsets == n_tx_qsets */ + pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || + is_x_1g_port(&pi->link_cfg)) ? + q_per_port : 1; + pi->n_tx_qsets = pi->n_rx_qsets; + + if (pi->n_rx_qsets > pi->rss_size) + pi->n_rx_qsets = pi->rss_size; + + qidx += pi->n_rx_qsets; + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(adap, &r->rspq, 5, 32, 1024, 64); + r->usembufs = 1; + r->fl.size = (r->usembufs ? 1024 : 72); + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); + adap->flags |= CFG_QUEUES; + } +} + +void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) +{ + t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats, + &pi->stats_base); +} + +void cxgbe_stats_reset(struct port_info *pi) +{ + t4_clr_port_stats(pi->adapter, pi->tx_chan); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 mem_win0_base; + + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win0_base = MEMWIN0_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + MEMWIN_NIC), + mem_win0_base | V_BIR(0) | + V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + MEMWIN_NIC)); +} + +int cxgbe_init_rss(struct adapter *adap) +{ + unsigned int i; + + if (is_pf4(adap)) { + int err; + + err = t4_init_rss_mode(adap, adap->mbox); + if (err) + return err; + } + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); + if (!pi->rss) + return -ENOMEM; + + pi->rss_hf = CXGBE_RSS_HF_ALL; + } + return 0; +} + +/** + * Dump basic information about the adapter. + */ +void cxgbe_print_adapter_info(struct adapter *adap) +{ + /** + * Hardware/Firmware/etc. Version/Revision IDs. + */ + t4_dump_version_info(adap); +} + +void cxgbe_print_port_info(struct adapter *adap) +{ + int i; + char buf[80]; + struct rte_pci_addr *loc = &adap->pdev->addr; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + char *bufp = buf; + + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) + bufp += sprintf(bufp, "100M/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) + bufp += sprintf(bufp, "1G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) + bufp += sprintf(bufp, "25G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) + bufp += sprintf(bufp, "40G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) + bufp += sprintf(bufp, "50G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) + bufp += sprintf(bufp, "100G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", + t4_get_port_type_description( + (enum fw_port_type)pi->port_type)); + + dev_info(adap, + " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", + loc->domain, loc->bus, loc->devid, loc->function, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + } +} + +static int check_devargs_handler(const char *key, const char *value, void *p) +{ + if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) || + !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) || + !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) { + if (!strncmp(value, "1", 1)) { + bool *dst_val = (bool *)p; + + *dst_val = true; + } + } + + if (!strncmp(key, CXGBE_DEVARG_PF_FILTER_MODE, strlen(key)) || + !strncmp(key, CXGBE_DEVARG_PF_FILTER_MASK, strlen(key))) { + u32 *dst_val = (u32 *)p; + char *endptr = NULL; + u32 arg_val; + + arg_val = strtoul(value, &endptr, 16); + if (errno || endptr == value) + return -EINVAL; + + *dst_val = arg_val; + } + + return 0; +} + +static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key, + void *p) +{ + struct rte_kvargs *kvlist; + int ret = 0; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) + goto out; + + ret = rte_kvargs_process(kvlist, key, check_devargs_handler, p); + +out: + rte_kvargs_free(kvlist); + + return ret; +} + +static void cxgbe_get_devargs_int(struct adapter *adap, bool *dst, + const char *key, bool default_value) +{ + struct rte_pci_device *pdev = adap->pdev; + int ret; + bool devarg_value = default_value; + + *dst = default_value; + if (!pdev) + return; + + ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value); + if (ret) + return; + + *dst = devarg_value; +} + +static void cxgbe_get_devargs_u32(struct adapter *adap, u32 *dst, + const char *key, u32 default_value) +{ + struct rte_pci_device *pdev = adap->pdev; + u32 devarg_value = default_value; + int ret; + + *dst = default_value; + if (!pdev) + return; + + ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value); + if (ret) + return; + + *dst = devarg_value; +} + +void cxgbe_process_devargs(struct adapter *adap) +{ + cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan, + CXGBE_DEVARG_CMN_KEEP_OVLAN, false); + cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency, + CXGBE_DEVARG_CMN_TX_MODE_LATENCY, false); + cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up, + CXGBE_DEVARG_VF_FORCE_LINK_UP, false); + cxgbe_get_devargs_u32(adap, &adap->devargs.filtermode, + CXGBE_DEVARG_PF_FILTER_MODE, 0); + cxgbe_get_devargs_u32(adap, &adap->devargs.filtermask, + CXGBE_DEVARG_PF_FILTER_MASK, 0); +} + +static void configure_vlan_types(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) { + /* OVLAN Type 0x88a8 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x88a8)); + /* OVLAN Type 0x9100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x9100)); + + /* IVLAN 0X8100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i), + V_IVLAN_ETYPE(M_IVLAN_ETYPE), + V_IVLAN_ETYPE(0x8100)); + + t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i), + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_IVLAN_EN, + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_IVLAN_EN); + } + + t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, V_RM_OVLAN(1), + V_RM_OVLAN(!adapter->devargs.keep_ovlan)); +} + +static int cxgbe_get_filter_vnic_mode_from_devargs(u32 val) +{ + u32 vnic_mode; + + vnic_mode = val & (CXGBE_DEVARGS_FILTER_MODE_PF_VF | + CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER); + if (vnic_mode) { + switch (vnic_mode) { + case CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER: + return CXGBE_FILTER_VNIC_MODE_OVLAN; + case CXGBE_DEVARGS_FILTER_MODE_PF_VF: + return CXGBE_FILTER_VNIC_MODE_PFVF; + default: + return -EINVAL; + } + } + + return CXGBE_FILTER_VNIC_MODE_NONE; +} + +static int cxgbe_get_filter_mode_from_devargs(u32 val, bool closest_match) +{ + int vnic_mode, fmode = 0; + bool found = false; + u8 i; + + if (val >= CXGBE_DEVARGS_FILTER_MODE_MAX) { + pr_err("Unsupported flags set in filter mode. Must be < 0x%x\n", + CXGBE_DEVARGS_FILTER_MODE_MAX); + return -ERANGE; + } + + vnic_mode = cxgbe_get_filter_vnic_mode_from_devargs(val); + if (vnic_mode < 0) { + pr_err("Unsupported Vnic-mode, more than 1 Vnic-mode selected\n"); + return vnic_mode; + } + + if (vnic_mode) + fmode |= F_VNIC_ID; + if (val & CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT) + fmode |= F_PORT; + if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC) + fmode |= F_MACMATCH; + if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE) + fmode |= F_ETHERTYPE; + if (val & CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER) + fmode |= F_VLAN; + if (val & CXGBE_DEVARGS_FILTER_MODE_IP_TOS) + fmode |= F_TOS; + if (val & CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL) + fmode |= F_PROTOCOL; + + for (i = 0; i < ARRAY_SIZE(cxgbe_filter_mode_features); i++) { + if ((cxgbe_filter_mode_features[i] & fmode) == fmode) { + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return closest_match ? cxgbe_filter_mode_features[i] : fmode; +} + +static int configure_filter_mode_mask(struct adapter *adap) +{ + u32 params[2], val[2], nparams = 0; + int ret; + + if (!adap->devargs.filtermode && !adap->devargs.filtermask) + return 0; + + if (!adap->devargs.filtermode || !adap->devargs.filtermask) { + pr_err("Unsupported, Provide both filtermode and filtermask devargs\n"); + return -EINVAL; + } + + if (adap->devargs.filtermask & ~adap->devargs.filtermode) { + pr_err("Unsupported, filtermask (0x%x) must be subset of filtermode (0x%x)\n", + adap->devargs.filtermask, adap->devargs.filtermode); + + return -EINVAL; + } + + params[0] = CXGBE_FW_PARAM_DEV(FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK); + + ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermode, + true); + if (ret < 0) { + pr_err("Unsupported filtermode devargs combination:0x%x\n", + adap->devargs.filtermode); + return ret; + } + + val[0] = V_FW_PARAMS_PARAM_FILTER_MODE(ret); + + ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermask, + false); + if (ret < 0) { + pr_err("Unsupported filtermask devargs combination:0x%x\n", + adap->devargs.filtermask); + return ret; + } + + val[0] |= V_FW_PARAMS_PARAM_FILTER_MASK(ret); + + nparams++; + + ret = cxgbe_get_filter_vnic_mode_from_devargs(adap->devargs.filtermode); + if (ret < 0) + return ret; + + if (ret) { + params[1] = CXGBE_FW_PARAM_DEV(FILTER) | + V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE); + + val[1] = ret - 1; + + nparams++; + } + + return t4_set_params(adap, adap->mbox, adap->pf, 0, nparams, + params, val); +} + +static void configure_pcie_ext_tag(struct adapter *adapter) +{ + u16 v; + int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); + + if (!pos) + return; + + if (pos > 0) { + t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v); + v |= PCI_EXP_DEVCTL_EXT_TAG; + t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v); + if (is_t6(adapter->params.chip)) { + t4_set_reg_field(adapter, A_PCIE_CFG2, + V_T6_TOTMAXTAG(M_T6_TOTMAXTAG), + V_T6_TOTMAXTAG(7)); + t4_set_reg_field(adapter, A_PCIE_CMD_CFG, + V_T6_MINTAG(M_T6_MINTAG), + V_T6_MINTAG(8)); + } else { + t4_set_reg_field(adapter, A_PCIE_CFG2, + V_TOTMAXTAG(M_TOTMAXTAG), + V_TOTMAXTAG(3)); + t4_set_reg_field(adapter, A_PCIE_CMD_CFG, + V_MINTAG(M_MINTAG), + V_MINTAG(8)); + } + } +} + +/* Figure out how many Queue Sets we can support */ +void cxgbe_configure_max_ethqsets(struct adapter *adapter) +{ + unsigned int ethqsets; + + /* + * We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue. + * + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + if (is_pf4(adapter)) { + struct pf_resources *pfres = &adapter->params.pfres; + + ethqsets = pfres->niqflint - 1; + if (pfres->neq < ethqsets * 2) + ethqsets = pfres->neq / 2; + } else { + struct vf_resources *vfres = &adapter->params.vfres; + + ethqsets = vfres->niqflint - 1; + if (vfres->nethctrl != ethqsets) + ethqsets = min(vfres->nethctrl, ethqsets); + if (vfres->neq < ethqsets * 2) + ethqsets = vfres->neq / 2; + } + + if (ethqsets > MAX_ETH_QSETS) + ethqsets = MAX_ETH_QSETS; + adapter->sge.max_ethqsets = ethqsets; +} + +/* + * Tweak configuration based on system architecture, etc. Most of these have + * defaults assigned to them by Firmware Configuration Files (if we're using + * them) but need to be explicitly set if we're using hard-coded + * initialization. So these are essentially common tweaks/settings for + * Configuration Files and hard-coded initialization ... + */ +static int adap_init0_tweaks(struct adapter *adapter) +{ + u8 rx_dma_offset; + + /* + * Fix up various Host-Dependent Parameters like Page Size, Cache + * Line Size, etc. The firmware default is for a 4KB Page Size and + * 64B Cache Line Size ... + */ + t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES, + T5_LAST_REV); + + /* + * Keep the chip default offset to deliver Ingress packets into our + * DMA buffers to zero + */ + rx_dma_offset = 0; + t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), + V_PKTSHIFT(rx_dma_offset)); + + t4_set_reg_field(adapter, A_SGE_FLM_CFG, + V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING, + V_CREDITCNT(3) | V_CREDITCNTPACKING(1)); + + t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD, + V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U)); + + t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U), + V_IDMAARBROUNDROBIN(1U)); + + /* + * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux + * adds the pseudo header itself. + */ + t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, + F_CSUM_HAS_PSEUDO_HDR, 0); + + return 0; +} + +/* + * Attempt to initialize the adapter via a Firmware Configuration File. + */ +static int adap_init0_config(struct adapter *adapter, int reset) +{ + struct fw_caps_config_cmd caps_cmd; + unsigned long mtype = 0, maddr = 0; + u32 finiver, finicsum, cfcsum; + int ret; + int config_issued = 0; + int cfg_addr; + char config_name[20]; + + /* + * Reset device if necessary. + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + F_PIORSTMODE | F_PIORST); + if (ret < 0) { + dev_warn(adapter, "Firmware reset failed, error %d\n", + -ret); + goto bye; + } + } + + cfg_addr = t4_flash_cfg_addr(adapter); + if (cfg_addr < 0) { + ret = cfg_addr; + dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n", + -ret); + goto bye; + } + + strcpy(config_name, "On Flash"); + mtype = FW_MEMTYPE_CF_FLASH; + maddr = cfg_addr; + + /* + * Issue a Capability Configuration command to the firmware to get it + * to parse the Configuration File. We don't use t4_fw_config_file() + * because we want the ability to modify various features after we've + * processed the configuration file ... + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = + cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID | + V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | + V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + /* + * If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + dev_info(adapter, "%s: Going for embedded config in firmware..\n", + __func__); + + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + strcpy(config_name, "Firmware Default"); + } + + config_issued = 1; + if (ret < 0) + goto bye; + + finiver = be32_to_cpu(caps_cmd.finiver); + finicsum = be32_to_cpu(caps_cmd.finicsum); + cfcsum = be32_to_cpu(caps_cmd.cfcsum); + if (finicsum != cfcsum) + dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n", + finicsum, cfcsum); + + /* + * If we're a pure NIC driver then disable all offloading facilities. + * This will allow the firmware to optimize aspects of the hardware + * configuration which will result in improved performance. + */ + caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD); + caps_cmd.toecaps = 0; + caps_cmd.iscsicaps = 0; + caps_cmd.rdmacaps = 0; + caps_cmd.fcoecaps = 0; + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) { + dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n", + -ret); + goto bye; + } + + /* + * Tweak configuration based on system architecture, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) { + dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret); + goto bye; + } + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) { + dev_warn(adapter, "Initializing Firmware failed, error %d\n", + -ret); + goto bye; + } + + /* + * Return successfully and note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants buried in the driver. + */ + dev_info(adapter, + "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + + return 0; + + /* + * Something bad happened. Return the error ... (If the "error" + * is that there's no Configuration File on the adapter we don't + * want to issue a warning since this is fairly common.) + */ +bye: + if (config_issued && ret != -ENOENT) + dev_warn(adapter, "\"%s\" configuration file error %d\n", + config_name, -ret); + + dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret); + return ret; +} + +static int adap_init0(struct adapter *adap) +{ + struct fw_caps_config_cmd caps_cmd; + int ret = 0; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + int reset = 1; + int mbox = adap->mbox; + + /* + * Contact FW, advertising Master capability. + */ + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); + if (ret < 0) { + dev_err(adap, "%s: could not connect to FW, error %d\n", + __func__, -ret); + goto bye; + } + + CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__, + adap->mbox, ret); + + if (ret == mbox) + adap->flags |= MASTER_PF; + + if (state == DEV_STATE_INIT) { + /* + * Force halt and reset FW because a previous instance may have + * exited abnormally without properly shutting down + */ + ret = t4_fw_halt(adap, adap->mbox, reset); + if (ret < 0) { + dev_err(adap, "Failed to halt. Exit.\n"); + goto bye; + } + + ret = t4_fw_restart(adap, adap->mbox, reset); + if (ret < 0) { + dev_err(adap, "Failed to restart. Exit.\n"); + goto bye; + } + state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT); + } + + t4_get_version_info(adap); + + ret = t4_get_core_clock(adap, &adap->params.vpd); + if (ret < 0) { + dev_err(adap, "%s: could not get core clock, error %d\n", + __func__, -ret); + goto bye; + } + + /* + * If the firmware is initialized already (and we're not forcing a + * master initialization), note that we're living with existing + * adapter parameters. Otherwise, it's time to try initializing the + * adapter ... + */ + if (state == DEV_STATE_INIT) { + dev_info(adap, "Coming up as %s: Adapter already initialized\n", + adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + } else { + dev_info(adap, "Coming up as MASTER: Initializing adapter\n"); + + ret = adap_init0_config(adap, reset); + if (ret == -ENOENT) { + dev_err(adap, + "No Configuration File present on adapter. Using hard-wired configuration parameters.\n"); + goto bye; + } + } + if (ret < 0) { + dev_err(adap, "could not initialize adapter, error %d\n", -ret); + goto bye; + } + + /* Now that we've successfully configured and initialized the adapter + * (or found it already initialized), we can ask the Firmware what + * resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning info\n"); + goto bye; + } + + /* Find out what ports are available to us. */ + v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); + if (ret < 0) { + dev_err(adap, "%s: failure in t4_query_params; error = %d\n", + __func__, ret); + goto bye; + } + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + + dev_debug(adap, "%s: adap->params.nports = %u\n", __func__, + adap->params.nports); + + /* + * Give the SGE code a chance to pull in anything that it needs ... + * Note that this must be called after we retrieve our VPD parameters + * in order to know how to convert core ticks to seconds, etc. + */ + ret = t4_sge_init(adap); + if (ret < 0) { + dev_err(adap, "t4_sge_init failed with error %d\n", + -ret); + goto bye; + } + + /* + * Grab some of our basic fundamental operating parameters. + */ + params[0] = CXGBE_FW_PARAM_PFVF(L2T_START); + params[1] = CXGBE_FW_PARAM_PFVF(L2T_END); + params[2] = CXGBE_FW_PARAM_PFVF(FILTER_START); + params[3] = CXGBE_FW_PARAM_PFVF(FILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val); + if (ret < 0) + goto bye; + adap->l2t_start = val[0]; + adap->l2t_end = val[1]; + adap->tids.ftid_base = val[2]; + adap->tids.nftids = val[3] - val[2] + 1; + + params[0] = CXGBE_FW_PARAM_PFVF(CLIP_START); + params[1] = CXGBE_FW_PARAM_PFVF(CLIP_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->clipt_start = val[0]; + adap->clipt_end = val[1]; + + /* + * Get device capabilities so we can determine what resources we need + * to manage. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) && + is_t6(adap->params.chip)) { + if (cxgbe_init_hash_filter(adap) < 0) + goto bye; + } + + /* See if FW supports FW_FILTER2 work request */ + if (is_t4(adap->params.chip)) { + adap->params.filter2_wr_support = 0; + } else { + params[0] = CXGBE_FW_PARAM_DEV(FILTER2_WR); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); + } + + /* Check if FW supports returning vin. + * If this is not supported, driver will interpret + * these values from viid. + */ + params[0] = CXGBE_FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); + + /* query tid-related parameters */ + params[0] = CXGBE_FW_PARAM_DEV(NTID); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + params[0] = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP); + val[0] = 1; + (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); + + /* + * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL + * capability. Earlier versions of the firmware didn't have the + * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no + * permission to use ULPTX MEMWRITE DSGL. + */ + if (is_t4(adap->params.chip)) { + adap->params.ulptx_memwrite_dsgl = false; + } else { + params[0] = CXGBE_FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); + } + + /* Query for max number of packets that can be coalesced for Tx */ + params[0] = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); + if (!ret && val[0] > 0) + adap->params.max_tx_coalesce_num = val[0]; + else + adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM; + + /* + * The MTU/MSS Table is initialized by now, so load their values. If + * we're initializing the adapter, then we'll make any modifications + * we want to the MTU/MSS Table and also initialize the congestion + * parameters. + */ + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + if (state != DEV_STATE_INIT) { + int i; + + /* + * The default MTU Table contains values 1492 and 1500. + * However, for TCP, it's better to have two values which are + * a multiple of 8 +/- 4 bytes apart near this popular MTU. + * This allows us to have a TCP Data Payload which is a + * multiple of 8 regardless of what combination of TCP Options + * are in use (always a multiple of 4 bytes) which is + * important for performance reasons. For instance, if no + * options are in use, then we have a 20-byte IP header and a + * 20-byte TCP header. In this case, a 1500-byte MSS would + * result in a TCP Data Payload of 1500 - 40 == 1460 bytes + * which is not a multiple of 8. So using an MSS of 1488 in + * this case results in a TCP Data Payload of 1448 bytes which + * is a multiple of 8. On the other hand, if 12-byte TCP Time + * Stamps have been negotiated, then an MTU of 1500 bytes + * results in a TCP Data Payload of 1448 bytes which, as + * above, is a multiple of 8 bytes ... + */ + for (i = 0; i < NMTUS; i++) + if (adap->params.mtus[i] == 1492) { + adap->params.mtus[i] = 1488; + break; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + } + t4_init_sge_params(adap); + ret = configure_filter_mode_mask(adap); + if (ret < 0) + goto bye; + t4_init_tp_params(adap); + configure_pcie_ext_tag(adap); + configure_vlan_types(adap); + cxgbe_configure_max_ethqsets(adap); + + adap->params.drv_memwin = MEMWIN_NIC; + adap->flags |= FW_OK; + dev_debug(adap, "%s: returning zero..\n", __func__); + return 0; + + /* + * Something bad happened. If a command timed out or failed with EIO + * FW does not operate within its spec or something catastrophic + * happened to HW/FW, stop issuing commands. + */ +bye: + if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->mbox); + return ret; +} + +/** + * t4_os_portmod_changed - handle port module changes + * @adap: the adapter associated with the module change + * @port_id: the port index whose module status has changed + * + * This is the OS-dependent handler for port module changes. It is + * invoked when a port module is removed or inserted for any OS-specific + * processing. + */ +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char * const mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" + }; + + const struct port_info *pi = adap2pinfo(adap, port_id); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); + else if (pi->mod_type < ARRAY_SIZE(mod_str)) + dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, + mod_str[pi->mod_type]); + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + dev_info(adap, "Port%d: unsupported port module inserted\n", + pi->port_id); + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + dev_info(adap, "Port%d: unknown port module inserted\n", + pi->port_id); + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + dev_info(adap, "Port%d: transceiver module error\n", + pi->port_id); + else + dev_info(adap, "Port%d: unknown module type %d inserted\n", + pi->port_id, pi->mod_type); +} + +bool cxgbe_force_linkup(struct adapter *adap) +{ + if (is_pf4(adap)) + return false; /* force_linkup not required for pf driver */ + + return adap->devargs.force_link_up; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +int cxgbe_link_start(struct port_info *pi) +{ + struct adapter *adapter = pi->adapter; + u64 conf_offloads; + unsigned int mtu; + int ret; + + mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); + + conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads; + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1, + !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP), + true); + if (ret == 0) { + ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, + (u8 *)&pi->eth_dev->data->mac_addrs[0]); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0 && is_pf4(adapter)) + ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, + &pi->link_cfg); + if (ret == 0) { + /* + * Enabling a Virtual Interface can result in an interrupt + * during the processing of the VI Enable command and, in some + * paths, result in an attempt to issue another command in the + * interrupt context. Thus, we disable interrupts during the + * course of the VI Enable command ... + */ + ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, + true, true, false); + } + + if (ret == 0 && cxgbe_force_linkup(adapter)) + pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP; + return ret; +} + +/** + * cxgbe_write_rss_conf - flash the RSS configuration for a given port + * @pi: the port + * @rss_hf: Hash configuration to apply + */ +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) +{ + struct adapter *adapter = pi->adapter; + const struct sge_eth_rxq *rxq; + u64 flags = 0; + u16 rss; + int err; + + /* Should never be called before setting up sge eth rx queues */ + if (!(adapter->flags & FULL_INIT_DONE)) { + dev_err(adap, "%s No RXQs available on port %d\n", + __func__, pi->port_id); + return -EINVAL; + } + + /* Don't allow unsupported hash functions */ + if (rss_hf & ~CXGBE_RSS_HF_ALL) + return -EINVAL; + + if (rss_hf & CXGBE_RSS_HF_IPV4_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + if (rss_hf & CXGBE_RSS_HF_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + + if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + + if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + rss = rxq[0].rspq.abs_id; + + /* If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, + flags, rss); + return err; +} + +/** + * cxgbe_write_rss - write the RSS table for a given port + * @pi: the port + * @queues: array of queue indices for RSS + * + * Sets up the portion of the HW RSS table for the port's VI to distribute + * packets to the Rx queues in @queues. + */ +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) +{ + u16 *rss; + int i, err; + struct adapter *adapter = pi->adapter; + const struct sge_eth_rxq *rxq; + + /* Should never be called before setting up sge eth rx queues */ + BUG_ON(!(adapter->flags & FULL_INIT_DONE)); + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); + if (!rss) + return -ENOMEM; + + /* map the queue indices to queue ids */ + for (i = 0; i < pi->rss_size; i++, queues++) + rss[i] = rxq[*queues].rspq.abs_id; + + err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, + pi->rss_size, rss, pi->rss_size); + rte_free(rss); + return err; +} + +/** + * setup_rss - configure RSS + * @adapter: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for all ports since the mapping + * table has plenty of entries. + */ +int cxgbe_setup_rss(struct port_info *pi) +{ + int j, err; + struct adapter *adapter = pi->adapter; + + dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n", + __func__, pi->rss_size, pi->n_rx_qsets); + + if (!(pi->flags & PORT_RSS_DONE)) { + if (adapter->flags & FULL_INIT_DONE) { + /* Fill default values with equal distribution */ + for (j = 0; j < pi->rss_size; j++) + pi->rss[j] = j % pi->n_rx_qsets; + + err = cxgbe_write_rss(pi, pi->rss); + if (err) + return err; + + err = cxgbe_write_rss_conf(pi, pi->rss_hf); + if (err) + return err; + pi->flags |= PORT_RSS_DONE; + } + } + return 0; +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap, struct sge_rspq *q) +{ + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS, + V_SEINTARM(q->intr_params) | + V_INGRESSQID(q->cntxt_id)); +} + +void cxgbe_enable_rx_queues(struct port_info *pi) +{ + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + unsigned int i; + + for (i = 0; i < pi->n_rx_qsets; i++) + enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq); +} + +/** + * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps. + * @port_type: Firmware Port Type + * @fw_caps: Firmware Port Capabilities + * @speed_caps: Device Info Speed Capabilities + * + * Translate a Firmware Port Capabilities specification to Device Info + * Speed Capabilities. + */ +static void fw_caps_to_speed_caps(enum fw_port_type port_type, + unsigned int fw_caps, + u32 *speed_caps) +{ +#define SET_SPEED(__speed_name) \ + do { \ + *speed_caps |= ETH_LINK_ ## __speed_name; \ + } while (0) + +#define FW_CAPS_TO_SPEED(__fw_name) \ + do { \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ + SET_SPEED(__fw_name); \ + } while (0) + + switch (port_type) { + case FW_PORT_TYPE_BT_SGMII: + case FW_PORT_TYPE_BT_XFI: + case FW_PORT_TYPE_BT_XAUI: + FW_CAPS_TO_SPEED(SPEED_100M); + FW_CAPS_TO_SPEED(SPEED_1G); + FW_CAPS_TO_SPEED(SPEED_10G); + break; + + case FW_PORT_TYPE_KX4: + case FW_PORT_TYPE_KX: + case FW_PORT_TYPE_FIBER_XFI: + case FW_PORT_TYPE_FIBER_XAUI: + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_QSA: + FW_CAPS_TO_SPEED(SPEED_1G); + FW_CAPS_TO_SPEED(SPEED_10G); + break; + + case FW_PORT_TYPE_KR: + SET_SPEED(SPEED_10G); + break; + + case FW_PORT_TYPE_BP_AP: + case FW_PORT_TYPE_BP4_AP: + SET_SPEED(SPEED_1G); + SET_SPEED(SPEED_10G); + break; + + case FW_PORT_TYPE_BP40_BA: + case FW_PORT_TYPE_QSFP: + SET_SPEED(SPEED_40G); + break; + + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_SFP28: + case FW_PORT_TYPE_KR_SFP28: + FW_CAPS_TO_SPEED(SPEED_1G); + FW_CAPS_TO_SPEED(SPEED_10G); + FW_CAPS_TO_SPEED(SPEED_25G); + break; + + case FW_PORT_TYPE_CR2_QSFP: + SET_SPEED(SPEED_50G); + break; + + case FW_PORT_TYPE_KR4_100G: + case FW_PORT_TYPE_CR4_QSFP: + FW_CAPS_TO_SPEED(SPEED_25G); + FW_CAPS_TO_SPEED(SPEED_40G); + FW_CAPS_TO_SPEED(SPEED_50G); + FW_CAPS_TO_SPEED(SPEED_100G); + break; + + default: + break; + } + +#undef FW_CAPS_TO_SPEED +#undef SET_SPEED +} + +/** + * cxgbe_get_speed_caps - Fetch supported speed capabilities + * @pi: Underlying port's info + * @speed_caps: Device Info speed capabilities + * + * Fetch supported speed capabilities of the underlying port. + */ +void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) +{ + *speed_caps = 0; + + fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, + speed_caps); + + if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) + *speed_caps |= ETH_LINK_SPEED_FIXED; +} + +/** + * cxgbe_set_link_status - Set device link up or down. + * @pi: Underlying port's info + * @status: 0 - down, 1 - up + * + * Set the device link up or down. + */ +int cxgbe_set_link_status(struct port_info *pi, bool status) +{ + struct adapter *adapter = pi->adapter; + int err = 0; + + err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status); + if (err) { + dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); + return err; + } + + if (!status) + t4_reset_link_config(adapter, pi->pidx); + + return 0; +} + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + */ +int cxgbe_up(struct adapter *adap) +{ + enable_rx(adap, &adap->sge.fw_evtq); + t4_sge_tx_monitor_start(adap); + if (is_pf4(adap)) + t4_intr_enable(adap); + adap->flags |= FULL_INIT_DONE; + + /* TODO: deadman watchdog ?? */ + return 0; +} + +/* + * Close the port + */ +int cxgbe_down(struct port_info *pi) +{ + return cxgbe_set_link_status(pi, false); +} + +/* + * Release resources when all the ports have been stopped. + */ +void cxgbe_close(struct adapter *adapter) +{ + struct port_info *pi; + int i; + + if (adapter->flags & FULL_INIT_DONE) { + tid_free(&adapter->tids); + t4_cleanup_mpstcam(adapter); + t4_cleanup_clip_tbl(adapter); + t4_cleanup_l2t(adapter); + t4_cleanup_smt(adapter); + if (is_pf4(adapter)) + t4_intr_disable(adapter); + t4_sge_tx_monitor_stop(adapter); + t4_free_sge_resources(adapter); + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, + adapter->pf, 0, pi->viid); + rte_eth_dev_release_port(pi->eth_dev); + } + adapter->flags &= ~FULL_INIT_DONE; + } + + if (is_pf4(adapter) && (adapter->flags & FW_OK)) + t4_fw_bye(adapter, adapter->mbox); +} + +static void adap_smt_index(struct adapter *adapter, u32 *smt_start_idx, + u32 *smt_size) +{ + u32 params[2], smt_val[2]; + int ret; + + params[0] = CXGBE_FW_PARAM_PFVF(GET_SMT_START); + params[1] = CXGBE_FW_PARAM_PFVF(GET_SMT_SIZE); + + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 2, params, smt_val); + + /* if FW doesn't recognize this command then set it to default setting + * which is start index as 0 and size as 256. + */ + if (ret < 0) { + *smt_start_idx = 0; + *smt_size = SMT_SIZE; + } else { + *smt_start_idx = smt_val[0]; + /* smt size can be zero, if nsmt is not yet configured in + * the config file or set as zero, then configure all the + * remaining entries to this PF itself. + */ + if (!smt_val[1]) + *smt_size = SMT_SIZE - *smt_start_idx; + else + *smt_size = smt_val[1]; + } +} + +int cxgbe_probe(struct adapter *adapter) +{ + u32 smt_start_idx, smt_size; + struct port_info *pi; + int func, i; + int err = 0; + u32 whoami; + int chip; + + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + chip = t4_get_chip_type(adapter, + CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id)); + if (chip < 0) + return chip; + + func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); + + adapter->mbox = func; + adapter->pf = func; + + t4_os_lock_init(&adapter->mbox_lock); + TAILQ_INIT(&adapter->mbox_list); + t4_os_lock_init(&adapter->win0_lock); + + err = t4_prep_adapter(adapter); + if (err) + return err; + + setup_memwin(adapter); + err = adap_init0(adapter); + if (err) { + dev_err(adapter, "%s: Adapter initialization failed, error %d\n", + __func__, err); + goto out_free; + } + + if (!is_t4(adapter->params.chip)) { + /* + * The userspace doorbell BAR is split evenly into doorbell + * regions, each associated with an egress queue. If this + * per-queue region is large enough (at least UDBS_SEG_SIZE) + * then it can be used to submit a tx work request with an + * implied doorbell. Enable write combining on the BAR if + * there is room for such work requests. + */ + int s_qpp, qpp, num_seg; + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * + adapter->pf); + qpp = 1 << ((t4_read_reg(adapter, + A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp) + & M_QUEUESPERPAGEPF0); + num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE; + if (qpp > num_seg) + dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n"); + + adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; + if (!adapter->bar2) { + dev_err(adapter, "cannot map device bar2 region\n"); + err = -ENOMEM; + goto out_free; + } + t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | + V_STATMODE(0)); + } + + for_each_port(adapter, i) { + const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); + + if (i == 0) { + /* First port is already allocated by DPDK */ + eth_dev = adapter->eth_dev; + goto allocate_mac; + } + + /* + * now do all data allocation - for eth_dev structure, + * and internal (private) data for the remaining ports + */ + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + goto out_free; + + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) + goto out_free; + +allocate_mac: + pi = eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + pi->pidx = i; + + pi->eth_dev->device = &adapter->pdev->device; + pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; + pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; + pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); + + pi->eth_dev->data->mac_addrs = rte_zmalloc(name, + RTE_ETHER_ADDR_LEN, 0); + if (!pi->eth_dev->data->mac_addrs) { + dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", + __func__); + err = -1; + goto out_free; + } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } + } + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0); + if (err) { + dev_err(adapter, "%s: t4_port_init failed with err %d\n", + __func__, err); + goto out_free; + } + } + + cxgbe_cfg_queues(adapter->eth_dev); + + cxgbe_print_adapter_info(adapter); + cxgbe_print_port_info(adapter); + + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up some + * functionality + */ + dev_warn(adapter, "could not allocate CLIP. Continuing\n"); + } + + adap_smt_index(adapter, &smt_start_idx, &smt_size); + adapter->smt = t4_init_smt(smt_start_idx, smt_size); + if (!adapter->smt) + dev_warn(adapter, "could not allocate SMT, continuing\n"); + + adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(adapter, "could not allocate L2T. Continuing\n"); + } + + if (tid_init(&adapter->tids) < 0) { + /* Disable filtering support */ + dev_warn(adapter, "could not allocate TID table, " + "filter support disabled. Continuing\n"); + } + + t4_os_lock_init(&adapter->flow_lock); + + adapter->mpstcam = t4_init_mpstcam(adapter); + if (!adapter->mpstcam) + dev_warn(adapter, "could not allocate mps tcam table." + " Continuing\n"); + + if (is_hashfilter(adapter)) { + if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) { + u32 hash_base, hash_reg; + + hash_reg = A_LE_DB_TID_HASHBASE; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base / 4; + } + } else { + /* Disable hash filtering support */ + dev_warn(adapter, + "Maskless filter support disabled. Continuing\n"); + } + + err = cxgbe_init_rss(adapter); + if (err) + goto out_free; + + return 0; + +out_free: + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); + rte_eth_dev_release_port(pi->eth_dev); + } + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->mbox); + return -err; +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h new file mode 100644 index 000000000..50931ed04 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_OFLD_H_ +#define _CXGBE_OFLD_H_ + +#include + +#include "cxgbe_filter.h" + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \ + V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = cpu_to_be32( \ + V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + V_FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \ + V_FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + V_FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of filter TID. + * The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + struct filter_entry *ftid_tab; /* Normal filters */ + union aopen_entry *atid_tab; + struct rte_bitmap *ftid_bmap; + uint8_t *ftid_bmap_array; + unsigned int nftids, natids; + unsigned int ftid_base, hash_base; + + union aopen_entry *afree; + unsigned int atids_in_use; + + /* TIDs in the TCAM */ + rte_atomic32_t tids_in_use; + /* TIDs in the HASH */ + rte_atomic32_t hash_tids_in_use; + rte_atomic32_t conns_in_use; + + rte_spinlock_t atid_lock __rte_cache_aligned; + rte_spinlock_t ftid_lock; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +int cxgbe_alloc_atid(struct tid_info *t, void *data); +void cxgbe_free_atid(struct tid_info *t, unsigned int atid); +void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid, + unsigned short family); +void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid, + unsigned short family); + +#endif /* _CXGBE_OFLD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h new file mode 100644 index 000000000..0b7c52aec --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_PFVF_H_ +#define _CXGBE_PFVF_H_ + +#define CXGBE_FW_PARAM_DEV(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define CXGBE_FW_PARAM_PFVF(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + V_FW_PARAMS_PARAM_Y(0) | \ + V_FW_PARAMS_PARAM_Z(0)) + +void cxgbe_dev_rx_queue_release(void *q); +void cxgbe_dev_tx_queue_release(void *q); +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev); +void cxgbe_dev_close(struct rte_eth_dev *eth_dev); +int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info); +int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev); +int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev); +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev); +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev); +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr); +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev); +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); +int cxgbe_dev_start(struct rte_eth_dev *eth_dev); +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete); +int cxgbe_dev_set_link_up(struct rte_eth_dev *dev); +int cxgbe_dev_set_link_down(struct rte_eth_dev *dev); +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev); +#endif /* _CXGBE_PFVF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c new file mode 100644 index 000000000..4165ba986 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "cxgbe.h" +#include "cxgbe_pfvf.h" + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct rte_pci_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { .vendor_id = 0, } \ + } + +/* + *... and the PCI ID Table itself ... + */ +#include "base/t4_pci_id_tbl.h" + +/* + * Get port statistics. + */ +static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *eth_stats) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct port_stats ps; + unsigned int i; + + cxgbevf_stats_get(pi, &ps); + + /* RX Stats */ + eth_stats->ierrors = ps.rx_len_err; + + /* TX Stats */ + eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames + + ps.tx_ucast_frames; + eth_stats->obytes = ps.tx_octets; + eth_stats->oerrors = ps.tx_drop; + + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + eth_stats->q_ipackets[i] = rxq->stats.pkts; + eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + eth_stats->ipackets += eth_stats->q_ipackets[i]; + eth_stats->ibytes += eth_stats->q_ibytes[i]; + } + + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + eth_stats->q_opackets[i] = txq->stats.pkts; + eth_stats->q_obytes[i] = txq->stats.tx_bytes; + } + return 0; +} + +static const struct eth_dev_ops cxgbevf_eth_dev_ops = { + .dev_start = cxgbe_dev_start, + .dev_stop = cxgbe_dev_stop, + .dev_close = cxgbe_dev_close, + .promiscuous_enable = cxgbe_dev_promiscuous_enable, + .promiscuous_disable = cxgbe_dev_promiscuous_disable, + .allmulticast_enable = cxgbe_dev_allmulticast_enable, + .allmulticast_disable = cxgbe_dev_allmulticast_disable, + .dev_configure = cxgbe_dev_configure, + .dev_infos_get = cxgbe_dev_info_get, + .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, + .link_update = cxgbe_dev_link_update, + .dev_set_link_up = cxgbe_dev_set_link_up, + .dev_set_link_down = cxgbe_dev_set_link_down, + .mtu_set = cxgbe_dev_mtu_set, + .tx_queue_setup = cxgbe_dev_tx_queue_setup, + .tx_queue_start = cxgbe_dev_tx_queue_start, + .tx_queue_stop = cxgbe_dev_tx_queue_stop, + .tx_queue_release = cxgbe_dev_tx_queue_release, + .rx_queue_setup = cxgbe_dev_rx_queue_setup, + .rx_queue_start = cxgbe_dev_rx_queue_start, + .rx_queue_stop = cxgbe_dev_rx_queue_stop, + .rx_queue_release = cxgbe_dev_rx_queue_release, + .stats_get = cxgbevf_dev_stats_get, + .mac_addr_set = cxgbe_mac_addr_set, +}; + +/* + * Initialize driver + * It returns 0 on success. + */ +static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev; + char name[RTE_ETH_NAME_MAX_LEN]; + struct adapter *adapter = NULL; + int err = 0; + + CXGBE_FUNC_TRACE(); + + eth_dev->dev_ops = &cxgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; + eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } + return 0; + } + + snprintf(name, sizeof(name), "cxgbevfadapter%d", + eth_dev->data->port_id); + adapter = rte_zmalloc(name, sizeof(*adapter), 0); + if (!adapter) + return -1; + + adapter->use_unpacked_mode = 1; + adapter->regs = (void *)pci_dev->mem_resource[0].addr; + if (!adapter->regs) { + dev_err(adapter, "%s: cannot map device registers\n", __func__); + err = -ENOMEM; + goto out_free_adapter; + } + adapter->pdev = pci_dev; + adapter->eth_dev = eth_dev; + pi->adapter = adapter; + + cxgbe_process_devargs(adapter); + + err = cxgbevf_probe(adapter); + if (err) { + dev_err(adapter, "%s: cxgbevf probe failed with err %d\n", + __func__, err); + goto out_free_adapter; + } + + return 0; + +out_free_adapter: + rte_free(adapter); + return err; +} + +static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + + /* Free up other ports and all resources */ + cxgbe_close(adap); + return 0; +} + +static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info), + eth_cxgbevf_dev_init); +} + +static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit); +} + +static struct rte_pci_driver rte_cxgbevf_pmd = { + .id_table = cxgb4vf_pci_tbl, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_cxgbevf_pci_probe, + .remove = eth_cxgbevf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl); +RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf, + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " + CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> "); diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c new file mode 100644 index 000000000..66fb92375 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include + +#include "base/common.h" +#include "base/t4_regs.h" +#include "base/t4_msg.h" +#include "cxgbe.h" +#include "cxgbe_pfvf.h" +#include "mps_tcam.h" + +/* + * Figure out how many Ports and Queue Sets we can support. This depends on + * knowing our Virtual Function Resources and may be called a second time if + * we fall back from MSI-X to MSI Interrupt Mode. + */ +static void size_nports_qsets(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + unsigned int pmask_nports; + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d maximum" + " allowed virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We may have been provisioned with more VIs than the number of + * ports we're allowed to access (our Port Access Rights Mask). + * This is obviously a configuration conflict but we don't want to + * do anything silly just because of that. + */ + pmask_nports = hweight32(adapter->params.vfres.pmask); + if (pmask_nports < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + " virtual interfaces; limited by Port Access Rights" + " mask %#x\n", pmask_nports, adapter->params.nports, + adapter->params.vfres.pmask); + adapter->params.nports = pmask_nports; + } + + cxgbe_configure_max_ethqsets(adapter); + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } +} + +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats) +{ + t4vf_get_port_stats(pi->adapter, pi->pidx, stats); +} + +static int adap_init0vf(struct adapter *adapter) +{ + u32 param, val = 0; + int err; + + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + + adapter->pf = t4vf_get_pf_from_vf(adapter); + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "error in sge init\n"); + return err; + } + + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + param = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP); + val = 1; + t4vf_set_params(adapter, 1, ¶m, &val); + + /* Query for max number of packets that can be coalesced for Tx */ + param = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); + err = t4vf_query_params(adapter, 1, ¶m, &val); + if (!err && val > 0) + adapter->params.max_tx_coalesce_num = val; + else + adapter->params.max_tx_coalesce_num = ETH_COALESCE_VF_PKT_NUM; + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * Check for various parameter sanity issues. + */ + if (adapter->params.vfres.pmask == 0) { + dev_err(adapter->pdev_dev, "no port access configured\n" + "usable!\n"); + return -EINVAL; + } + if (adapter->params.vfres.nvi == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + + /* + * Initialize nports and max_ethqsets now that we have our Virtual + * Function Resources. + */ + size_nports_qsets(adapter); + adapter->flags |= FW_OK; + return 0; +} + +int cxgbevf_probe(struct adapter *adapter) +{ + struct port_info *pi; + unsigned int pmask; + int err = 0; + int i; + + t4_os_lock_init(&adapter->mbox_lock); + TAILQ_INIT(&adapter->mbox_list); + err = t4vf_prep_adapter(adapter); + if (err) + return err; + + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; + if (!adapter->bar2) { + dev_err(adapter, "cannot map device bar2 region\n"); + err = -ENOMEM; + return err; + } + } + + err = adap_init0vf(adapter); + if (err) { + dev_err(adapter, "%s: Adapter initialization failed, error %d\n", + __func__, err); + goto out_free; + } + + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, i) { + const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + int port_id; + + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); + + if (i == 0) { + /* First port is already allocated by DPDK */ + eth_dev = adapter->eth_dev; + goto allocate_mac; + } + + /* + * now do all data allocation - for eth_dev structure, + * and internal (private) data for the remaining ports + */ + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + err = -ENOMEM; + goto out_free; + } + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) + goto out_free; + +allocate_mac: + pi = eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = port_id; + pi->pidx = i; + + pi->eth_dev->device = &adapter->pdev->device; + pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; + pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; + pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); + pi->eth_dev->data->mac_addrs = rte_zmalloc(name, + RTE_ETHER_ADDR_LEN, 0); + if (!pi->eth_dev->data->mac_addrs) { + dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", + __func__); + err = -ENOMEM; + goto out_free; + } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } + } + + if (adapter->flags & FW_OK) { + err = t4vf_port_init(adapter); + if (err) { + dev_err(adapter, "%s: t4_port_init failed with err %d\n", + __func__, err); + goto out_free; + } + } + + cxgbe_cfg_queues(adapter->eth_dev); + cxgbe_print_adapter_info(adapter); + cxgbe_print_port_info(adapter); + + adapter->mpstcam = t4_init_mpstcam(adapter); + if (!adapter->mpstcam) + dev_warn(adapter, + "VF could not allocate mps tcam table. Continuing\n"); + + err = cxgbe_init_rss(adapter); + if (err) + goto out_free; + return 0; + +out_free: + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); + rte_eth_dev_release_port(pi->eth_dev); + } + return -err; +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/l2t.c b/src/spdk/dpdk/drivers/net/cxgbe/l2t.c new file mode 100644 index 000000000..f9d651fe0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/l2t.c @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include "base/common.h" +#include "l2t.h" + +/** + * cxgbe_l2t_release - Release associated L2T entry + * @e: L2T entry to release + * + * Releases ref count and frees up an L2T entry from L2T table + */ +void cxgbe_l2t_release(struct l2t_entry *e) +{ + if (rte_atomic32_read(&e->refcnt) != 0) + rte_atomic32_dec(&e->refcnt); +} + +/** + * Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really + * the L2T index it refers to. + */ +void cxgbe_do_l2t_write_rpl(struct adapter *adap, + const struct cpl_l2t_write_rpl *rpl) +{ + struct l2t_data *d = adap->l2t; + unsigned int tid = GET_TID(rpl); + unsigned int l2t_idx = tid % L2T_SIZE; + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, l2t_idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; + + t4_os_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_VALID; + t4_os_unlock(&e->lock); + } +} + +/** + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync, + bool loopback, bool arpmiss) +{ + struct adapter *adap = ethdev2adap(dev); + struct l2t_data *d = adap->l2t; + struct rte_mbuf *mbuf; + struct cpl_l2t_write_req *req; + struct sge_ctrl_txq *ctrlq; + unsigned int l2t_idx = e->idx + d->l2t_start; + unsigned int port_id = ethdev2pinfo(dev)->port_id; + + ctrlq = &adap->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) + return -ENOMEM; + + mbuf->data_len = sizeof(*req); + mbuf->pkt_len = mbuf->data_len; + + req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = + cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + l2t_idx | V_SYNC_WR(sync) | + V_TID_QID(adap->sge.fw_evtq.abs_id))); + req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) | + V_L2T_W_LPBK(loopback) | + V_L2T_W_ARPMISS(arpmiss) | + V_L2T_W_NOREPLY(!sync)); + req->l2t_idx = cpu_to_be16(l2t_idx); + req->vlan = cpu_to_be16(e->vlan); + rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN); + + if (loopback) + memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN); + + t4_mgmt_tx(ctrlq, mbuf); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + + return 0; +} + +/** + * find_or_alloc_l2e - Find/Allocate a free L2T entry + * @d: L2T table + * @vlan: VLAN id to compare/add + * @port: port id to compare/add + * @dmac: Destination MAC address to compare/add + * Returns pointer to the L2T entry found/created + * + * Finds/Allocates an L2T entry to be used by switching rule of a filter. + */ +static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan, + u8 port, u8 *dmac) +{ + struct l2t_entry *end, *e; + struct l2t_entry *first_free = NULL; + + for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) { + if (rte_atomic32_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (e->state == L2T_STATE_SWITCHING) { + if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) && + e->vlan == vlan && e->lport == port) + goto exists; + } + } + } + + if (first_free) { + e = first_free; + goto found; + } + + return NULL; + +found: + e->state = L2T_STATE_UNUSED; + +exists: + return e; +} + +static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev, + u16 vlan, u8 port, + u8 *eth_addr) +{ + struct adapter *adap = ethdev2adap(dev); + struct l2t_data *d = adap->l2t; + struct l2t_entry *e; + int ret = 0; + + t4_os_write_lock(&d->lock); + e = find_or_alloc_l2e(d, vlan, port, eth_addr); + if (e) { + t4_os_lock(&e->lock); + if (!rte_atomic32_read(&e->refcnt)) { + e->state = L2T_STATE_SWITCHING; + e->vlan = vlan; + e->lport = port; + rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN); + rte_atomic32_set(&e->refcnt, 1); + ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS); + if (ret < 0) + dev_debug(adap, "Failed to write L2T entry: %d", + ret); + } else { + rte_atomic32_inc(&e->refcnt); + } + t4_os_unlock(&e->lock); + } + t4_os_write_unlock(&d->lock); + + return ret ? NULL : e; +} + +/** + * cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule + * @dev: rte_eth_dev pointer + * @vlan: VLAN Id + * @port: Associated port + * @dmac: Destination MAC address to add to L2T + * Returns pointer to the allocated l2t entry + * + * Allocates a L2T entry for use by switching rule of a filter + */ +struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan, + u8 port, u8 *dmac) +{ + return t4_l2t_alloc_switching(dev, vlan, port, dmac); +} + +/** + * Initialize L2 Table + */ +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) +{ + unsigned int l2t_size; + unsigned int i; + struct l2t_data *d; + + if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) + return NULL; + l2t_size = l2t_end - l2t_start + 1; + + d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); + if (!d) + return NULL; + + d->l2t_start = l2t_start; + d->l2t_size = l2t_size; + + t4_os_rwlock_init(&d->lock); + + for (i = 0; i < d->l2t_size; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + t4_os_lock_init(&d->l2tab[i].lock); + rte_atomic32_set(&d->l2tab[i].refcnt, 0); + } + + return d; +} + +/** + * Cleanup L2 Table + */ +void t4_cleanup_l2t(struct adapter *adap) +{ + if (adap->l2t) + t4_os_free(adap->l2t); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/l2t.h b/src/spdk/dpdk/drivers/net/cxgbe/l2t.h new file mode 100644 index 000000000..2c489e4aa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/l2t.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ +#ifndef _CXGBE_L2T_H_ +#define _CXGBE_L2T_H_ + +#include "base/t4_msg.h" + +enum { + L2T_SIZE = 4096 /* # of L2T entries */ +}; + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +/* + * State for the corresponding entry of the HW L2 table. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index within in-memory table */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 lport; /* destination port */ + u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */ + rte_spinlock_t lock; /* entry lock */ + rte_atomic32_t refcnt; /* entry reference count */ +}; + +struct l2t_data { + unsigned int l2t_start; /* start index of our piece of the L2T */ + unsigned int l2t_size; /* number of entries in l2tab */ + rte_rwlock_t lock; /* table rw lock */ + struct l2t_entry l2tab[0]; /* MUST BE LAST */ +}; + +#define L2T_LPBK true +#define L2T_ARPMISS true + +/* identifies sync vs async L2T_WRITE_REQs */ +#define S_SYNC_WR 12 +#define V_SYNC_WR(x) ((x) << S_SYNC_WR) +#define F_SYNC_WR V_SYNC_WR(1) + +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); +void t4_cleanup_l2t(struct adapter *adap); +struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan, + u8 port, u8 *dmac); +void cxgbe_l2t_release(struct l2t_entry *e); +void cxgbe_do_l2t_write_rpl(struct adapter *p, + const struct cpl_l2t_write_rpl *rpl); +#endif /* _CXGBE_L2T_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/meson.build b/src/spdk/dpdk/drivers/net/cxgbe/meson.build new file mode 100644 index 000000000..3992aba44 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/meson.build @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('cxgbe_ethdev.c', + 'cxgbe_main.c', + 'cxgbevf_ethdev.c', + 'cxgbevf_main.c', + 'sge.c', + 'cxgbe_filter.c', + 'cxgbe_flow.c', + 'clip_tbl.c', + 'mps_tcam.c', + 'l2t.c', + 'smt.c', + 'base/t4_hw.c', + 'base/t4vf_hw.c') +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c new file mode 100644 index 000000000..5302d1343 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include "mps_tcam.h" + +static inline bool +match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask) +{ + if (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) && + !memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN)) + return true; + return false; +} + +static int cxgbe_update_free_idx(struct mpstcam_table *t) +{ + struct mps_tcam_entry *entry = t->entry; + u16 i, next = t->free_idx + 1; + + if (entry[t->free_idx].state == MPS_ENTRY_UNUSED) + /* You are already pointing to a free entry !! */ + return 0; + + /* loop, till we don't rollback to same index where we started */ + for (i = next; i != t->free_idx; i++) { + if (i == t->size) + /* rollback and search free entry from start */ + i = 0; + + if (entry[i].state == MPS_ENTRY_UNUSED) { + t->free_idx = i; + return 0; + } + } + + return -1; /* table is full */ +} + +static struct mps_tcam_entry * +cxgbe_mpstcam_lookup(struct mpstcam_table *t, const u8 *eth_addr, + const u8 *mask) +{ + struct mps_tcam_entry *entry = t->entry; + int i; + + if (!entry) + return NULL; + + for (i = 0; i < t->size; i++) { + if (entry[i].state == MPS_ENTRY_UNUSED) + continue; /* entry is not being used */ + if (match_entry(&entry[i], eth_addr, mask)) + return &entry[i]; + } + + return NULL; +} + +int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr, + const u8 *mask) +{ + struct adapter *adap = pi->adapter; + struct mpstcam_table *mpstcam = adap->mpstcam; + struct mps_tcam_entry *entry; + int ret; + + if (!adap->mpstcam) { + dev_err(adap, "mpstcam table is not available\n"); + return -EOPNOTSUPP; + } + + /* If entry already present, return it. */ + t4_os_write_lock(&mpstcam->lock); + entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask); + if (entry) { + rte_atomic32_add(&entry->refcnt, 1); + t4_os_write_unlock(&mpstcam->lock); + return entry->idx; + } + + if (mpstcam->full) { + t4_os_write_unlock(&mpstcam->lock); + dev_err(adap, "mps-tcam table is full\n"); + return -ENOMEM; + } + + ret = t4_alloc_raw_mac_filt(adap, pi->viid, eth_addr, mask, + mpstcam->free_idx, 0, pi->port_id, false); + if (ret <= 0) { + t4_os_write_unlock(&mpstcam->lock); + return ret; + } + + /* Fill in the new values */ + entry = &mpstcam->entry[ret]; + memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN); + memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN); + rte_atomic32_set(&entry->refcnt, 1); + entry->state = MPS_ENTRY_USED; + + if (cxgbe_update_free_idx(mpstcam)) + mpstcam->full = true; + + t4_os_write_unlock(&mpstcam->lock); + return ret; +} + +int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr) +{ + struct adapter *adap = pi->adapter; + struct mpstcam_table *mpstcam = adap->mpstcam; + struct mps_tcam_entry *entry; + + if (!mpstcam) + return -EOPNOTSUPP; + t4_os_write_lock(&mpstcam->lock); + if (idx != -1 && idx >= mpstcam->size) { + t4_os_write_unlock(&mpstcam->lock); + return -EINVAL; + } + if (idx >= 0) { + entry = &mpstcam->entry[idx]; + /* user wants to modify an existing entry. + * verify if entry exists + */ + if (entry->state != MPS_ENTRY_USED) { + t4_os_write_unlock(&mpstcam->lock); + return -EINVAL; + } + } + + idx = t4_change_mac(adap, adap->mbox, pi->viid, idx, addr, true, true); + if (idx < 0) { + t4_os_write_unlock(&mpstcam->lock); + return idx; + } + + /* idx can now be different from what user provided */ + entry = &mpstcam->entry[idx]; + memcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN); + /* NOTE: we have considered the case that idx returned by t4_change_mac + * will be different from the user provided value only if user + * provided value is -1 + */ + if (entry->state == MPS_ENTRY_UNUSED) { + rte_atomic32_set(&entry->refcnt, 1); + entry->state = MPS_ENTRY_USED; + } + + if (cxgbe_update_free_idx(mpstcam)) + mpstcam->full = true; + + t4_os_write_unlock(&mpstcam->lock); + return idx; +} + +/** + * hold appropriate locks while calling this. + */ +static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry) +{ + memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN); + memset(entry->mask, 0, RTE_ETHER_ADDR_LEN); + rte_atomic32_clear(&entry->refcnt); + entry->state = MPS_ENTRY_UNUSED; +} + +/** + * ret < 0: fatal error + * ret = 0: entry removed in h/w + * ret > 0: updated refcount. + */ +int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx) +{ + struct adapter *adap = pi->adapter; + struct mpstcam_table *t = adap->mpstcam; + struct mps_tcam_entry *entry; + int ret; + + if (!t) + return -EOPNOTSUPP; + t4_os_write_lock(&t->lock); + entry = &t->entry[idx]; + if (entry->state == MPS_ENTRY_UNUSED) { + t4_os_write_unlock(&t->lock); + return -EINVAL; + } + + if (rte_atomic32_read(&entry->refcnt) == 1) + ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr, + entry->mask, idx, 1, pi->port_id, + false); + else + ret = rte_atomic32_sub_return(&entry->refcnt, 1); + + if (ret == 0) { + reset_mpstcam_entry(entry); + t->full = false; /* We have atleast 1 free entry */ + cxgbe_update_free_idx(t); + } + + t4_os_write_unlock(&t->lock); + return ret; +} + +struct mpstcam_table *t4_init_mpstcam(struct adapter *adap) +{ + struct mpstcam_table *t; + int i; + u16 size = adap->params.arch.mps_tcam_size; + + t = t4_os_alloc(sizeof(*t) + size * sizeof(struct mps_tcam_entry)); + if (!t) + return NULL; + + t4_os_rwlock_init(&t->lock); + t->full = false; + t->size = size; + + for (i = 0; i < size; i++) { + reset_mpstcam_entry(&t->entry[i]); + t->entry[i].mpstcam = t; + t->entry[i].idx = i; + } + + /* first entry is used by chip. this is overwritten only + * in t4_cleanup_mpstcam() + */ + t->entry[0].state = MPS_ENTRY_USED; + t->free_idx = 1; + + return t; +} + +void t4_cleanup_mpstcam(struct adapter *adap) +{ + if (adap->mpstcam) + t4_os_free(adap->mpstcam); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h new file mode 100644 index 000000000..3d1e8d3db --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_MPSTCAM_H_ +#define _CXGBE_MPSTCAM_H_ + +#include "base/common.h" + +enum { + MPS_ENTRY_UNUSED, /* Keep this first so memset 0 renders + * the correct state. Other states can + * be added in future like MPS_ENTRY_BUSY + * to reduce contention while mboxing + * the request to f/w or to denote attributes + * for a specific entry + */ + MPS_ENTRY_USED, +}; + +struct mps_tcam_entry { + u8 state; + u16 idx; + + /* add data here which uniquely defines an entry */ + u8 eth_addr[RTE_ETHER_ADDR_LEN]; + u8 mask[RTE_ETHER_ADDR_LEN]; + + struct mpstcam_table *mpstcam; /* backptr */ + rte_atomic32_t refcnt; +}; + +struct mpstcam_table { + u16 size; + rte_rwlock_t lock; + u16 free_idx; /* next free index */ + bool full; /* since free index can be present + * anywhere in the table, size and + * free_idx cannot alone determine + * if the table is full + */ + struct mps_tcam_entry entry[0]; +}; + +struct mpstcam_table *t4_init_mpstcam(struct adapter *adap); +void t4_cleanup_mpstcam(struct adapter *adap); +int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *mac, const u8 *mask); +int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx); +int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr); + +#endif /* _CXGBE_MPSTCAM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/cxgbe/sge.c b/src/spdk/dpdk/drivers/net/cxgbe/sge.c new file mode 100644 index 000000000..aba85a209 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/sge.c @@ -0,0 +1,2658 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/common.h" +#include "base/t4_regs.h" +#include "base/t4_msg.h" +#include "cxgbe.h" + +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq); + +/* + * Max number of Rx buffers we replenish at a time. + */ +#define MAX_RX_REFILL 64U + +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 256 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +/* + * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet + * per mbuf buffer). We currently only support two sizes for 1500- and + * 9000-byte MTUs. We could easily support more but there doesn't seem to be + * much need for that ... + */ +#define FL_MTU_SMALL 1500 +#define FL_MTU_LARGE 9000 + +static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, + unsigned int mtu) +{ + struct sge *s = &adapter->sge; + + return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu, + s->fl_align); +} + +#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) +#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) + +/* + * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses + * these to specify the buffer size as an index into the SGE Free List Buffer + * Size register array. We also use bit 4, when the buffer has been unmapped + * for DMA, but this is of course never sent to the hardware and is only used + * to prevent double unmappings. All of the above requires that the Free List + * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are + * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal + * Free List Buffer alignment is 32 bytes, this works out for us ... + */ +enum { + RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ + RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ + RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ + + /* + * XXX We shouldn't depend on being able to use these indices. + * XXX Especially when some other Master PF has initialized the + * XXX adapter or we use the Firmware Configuration File. We + * XXX should really search through the Host Buffer Size register + * XXX array for the appropriately sized buffer indices. + */ + RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ + RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */ + + RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ + RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ +}; + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr) +{ + struct rte_mbuf *m = mbuf; + + for (; m; m = m->next, addr++) { + *addr = m->buf_iova + rte_pktmbuf_headroom(m); + if (*addr == 0) + goto out_err; + } + return 0; + +out_err: + return -ENOMEM; +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = 0; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool); + } +} + +static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +/** + * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) +{ + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; +} + +static inline unsigned int get_buf_size(struct adapter *adapter, + const struct rx_sw_desc *d) +{ + unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; + unsigned int buf_size = 0; + + switch (rx_buf_size_idx) { + case RX_SMALL_MTU_BUF: + buf_size = FL_MTU_SMALL_BUFSIZE(adapter); + break; + + case RX_LARGE_MTU_BUF: + buf_size = FL_MTU_LARGE_BUFSIZE(adapter); + break; + + default: + BUG_ON(1); + /* NOT REACHED */ + } + + return buf_size; +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct sge_fl *q, int n) +{ + unsigned int cidx = q->cidx; + struct rx_sw_desc *d; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->buf) { + rte_pktmbuf_free(d->buf); + d->buf = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + q->avail--; + } + q->cidx = cidx; +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct sge_fl *q) +{ + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 64) { + u32 val = adap->params.arch.sge_fl_db; + + if (is_t4(adap->params.chip)) + val |= V_PIDX(q->pend_cred / 8); + else + val |= V_PIDX_T5(q->pend_cred / 8); + + /* + * Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ + wmb(); + + /* + * If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(!q->bar2_addr)) { + u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_KDOORBELL; + + t4_write_reg_relaxed(adap, reg, + val | V_QID(q->cntxt_id)); + } else { + writel_relaxed(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + + SGE_UDB_KDOORBELL)); + + /* + * This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf, + dma_addr_t mapping) +{ + sd->buf = buf; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, + int n) +{ + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl); + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + unsigned int buf_size_idx = RX_SMALL_MTU_BUF; + struct rte_mbuf *buf_bulk[n]; + int ret, i; + struct rte_pktmbuf_pool_private *mbp_priv; + u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ + mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); + if (jumbo_en && + ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)) + buf_size_idx = RX_LARGE_MTU_BUF; + + ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n); + if (unlikely(ret != 0)) { + dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n", + __func__); + q->alloc_failed++; + rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; + goto out; + } + + for (i = 0; i < n; i++) { + struct rte_mbuf *mbuf = buf_bulk[i]; + dma_addr_t mapping; + + if (!mbuf) { + dev_debug(adap, "%s: mbuf alloc failed\n", __func__); + q->alloc_failed++; + rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; + goto out; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->data_off = + (uint16_t)((char *) + RTE_PTR_ALIGN((char *)mbuf->buf_addr + + RTE_PKTMBUF_HEADROOM, + adap->sge.fl_align) - + (char *)mbuf->buf_addr); + mbuf->next = NULL; + mbuf->nb_segs = 1; + mbuf->port = rxq->rspq.port_id; + + mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova + + mbuf->data_off, + adap->sge.fl_align); + mapping |= buf_size_idx; + *d++ = cpu_to_be64(mapping); + set_rx_sw_desc(sd, mbuf, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(adap, q))) { + /* + * Make sure data has been written to free list + */ + wmb(); + q->low++; + } + + return cred; +} + +/** + * refill_fl - refill an SGE Rx buffer ring with mbufs + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. Returns the number of buffers + * allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n) +{ + return refill_fl_usembufs(adap, q, n); +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail)); +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + + hw_cidx -= q->cidx; + if (hw_cidx < 0) + return hw_cidx + q->size; + return hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @q: the Tx queue to reclaim completed descriptors from + * + * Reclaims Tx descriptors that the SGE has indicated it has processed. + */ +void reclaim_completed_tx(struct sge_txq *q) +{ + unsigned int avail = reclaimable(q); + + do { + /* reclaim as much as possible */ + reclaim_tx_desc(q, avail); + q->in_use -= avail; + avail = reclaimable(q); + } while (avail); +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @m: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. Return value corresponds to the headroom required. + */ +static inline int is_eth_imm(const struct rte_mbuf *m) +{ + unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + + hdrlen += sizeof(struct cpl_tx_pkt); + if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen) + return hdrlen; + + return 0; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @m: the packet + * @adap: adapter structure pointer + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct rte_mbuf *m, + struct adapter *adap) +{ + size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) : + sizeof(struct fw_eth_tx_pkt_vm_wr); + unsigned int flits; + int hdrlen; + + /* + * If the mbuf is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the mbuf data in the Work Request. + */ + + hdrlen = is_eth_imm(m); + if (hdrlen) + return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the mbuf body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(m->nb_segs); + if (m->tso_segsz) + flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (wr_size + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @mbuf: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into mbuf main-body data to include in the SGL + * @addr: address of mapped region + * + * Generates a scatter/gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + struct rte_mbuf *m = mbuf; + unsigned int nfrags = m->nb_segs; + struct ulptx_sge_pair buf[nfrags / 2]; + + len = m->data_len - start; + sgl->len0 = htonl(len); + sgl->addr0 = rte_cpu_to_be_64(addr[0]); + + sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) | + V_ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = 0; nfrags >= 2; nfrags -= 2, to++) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->addr[0] = rte_cpu_to_be_64(addr[++i]); + m = m->next; + to->len[1] = rte_cpu_to_be_32(m->data_len); + to->addr[1] = rte_cpu_to_be_64(addr[++i]); + } + if (nfrags) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->len[1] = rte_cpu_to_be_32(0); + to->addr[0] = rte_cpu_to_be_64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat, + (u8 *)sgl->sge); + unsigned int part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat); + rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1); + end = RTE_PTR_ADD((void *)q->desc, part1); + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +#define IDXDIFF(head, tail, wrap) \ + ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) + +#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size) +#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size) + +#define PIDXDIFF(head, tail, wrap) \ + ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail)) +#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size) + +/** + * ring_tx_db - ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q) +{ + int n = Q_IDXDIFF(q, dbidx); + + /* + * Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. + */ + rte_wmb(); + + /* + * If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!q->bar2_addr)) { + u32 val = V_PIDX(n); + + /* + * For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + if (!q->db_disabled) + t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + V_QID(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + } else { + u32 val = V_PIDX_T5(n); + + /* + * T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & F_DBPRIO); + + writel(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL)); + + /* + * This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + rte_wmb(); + } + q->dbidx = q->pidx; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m) +{ + int csum_type; + + if (m->ol_flags & PKT_TX_IP_CKSUM) { + switch (m->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + csum_type = TX_CSUM_TCPIP; + break; + case PKT_TX_UDP_CKSUM: + csum_type = TX_CSUM_UDPIP; + break; + default: + goto nocsum; + } + } else { + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len); + int eth_hdr_len = m->l2_len; + + if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) + hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len); + else + hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len); + return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len; + } +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return F_TXPKT_L4CSUM_DIS; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +#define MAX_COALESCE_LEN 64000 + +static inline int wraps_around(struct sge_txq *q, int ndesc) +{ + return (q->pidx + ndesc) > q->size ? 1 : 0; +} + +static void tx_timer_cb(void *data) +{ + struct adapter *adap = (struct adapter *)data; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + int i; + unsigned int coal_idx; + + /* monitor any pending tx */ + for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) { + if (t4_os_trylock(&txq->txq_lock)) { + coal_idx = txq->q.coalesce.idx; + if (coal_idx) { + if (coal_idx == txq->q.last_coal_idx && + txq->q.pidx == txq->q.last_pidx) { + ship_tx_pkt_coalesce_wr(adap, txq); + } else { + txq->q.last_coal_idx = coal_idx; + txq->q.last_pidx = txq->q.pidx; + } + } + t4_os_unlock(&txq->txq_lock); + } + } + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); +} + +/** + * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR + * @ adap: adapter structure + * @txq: tx queue + * + * writes the different fields of the pkts WR and sends it. + */ +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq) +{ + struct fw_eth_tx_pkts_vm_wr *vmwr; + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + struct fw_eth_tx_pkts_wr *wr; + struct sge_txq *q = &txq->q; + unsigned int ndesc; + u32 wr_mid; + + /* fill the pkts WR header */ + wr = (void *)&q->desc[q->pidx]; + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + vmwr = (void *)&q->desc[q->pidx]; + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); + ndesc = flits_to_desc(q->coalesce.flits); + wr->equiq_to_len16 = htonl(wr_mid); + wr->plen = cpu_to_be16(q->coalesce.len); + wr->npkt = q->coalesce.idx; + wr->r3 = 0; + if (is_pf4(adap)) { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + wr->type = q->coalesce.type; + } else { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); + vmwr->r4 = 0; + memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst, + fw_hdr_copy_len); + } + + /* zero out coalesce structure members */ + memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce)); + + txq_advance(q, ndesc); + txq->stats.coal_wr++; + txq->stats.coal_pkts += wr->npkt; + + if (Q_IDXDIFF(q, equeidx) >= q->size / 2) { + q->equeidx = q->pidx; + wr_mid |= F_FW_WR_EQUEQ; + wr->equiq_to_len16 = htonl(wr_mid); + } + ring_tx_db(adap, q); +} + +/** + * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not + * @txq: tx queue where the mbuf is sent + * @mbuf: mbuf to be sent + * @nflits: return value for number of flits needed + * @adap: adapter structure + * + * This function decides if a packet should be coalesced or not. + */ +static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + unsigned int *nflits, + struct adapter *adap) +{ + struct fw_eth_tx_pkts_vm_wr *wr; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); + struct sge_txq *q = &txq->q; + unsigned int flits, ndesc; + unsigned char type = 0; + int credits, wr_size; + + /* use coal WR type 1 when no frags are present */ + type = (mbuf->nb_segs == 1) ? 1 : 0; + if (!is_pf4(adap)) { + if (!type) + return 0; + + if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), + fw_hdr_copy_len)) + ship_tx_pkt_coalesce_wr(adap, txq); + } + + if (unlikely(type != q->coalesce.type && q->coalesce.idx)) + ship_tx_pkt_coalesce_wr(adap, txq); + + /* calculate the number of flits required for coalescing this packet + * without the 2 flits of the WR header. These are added further down + * if we are just starting in new PKTS WR. sgl_len doesn't account for + * the possible 16 bytes alignment ULP TX commands so we do it here. + */ + flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U; + if (type == 0) + flits += (sizeof(struct ulp_txpkt) + + sizeof(struct ulptx_idata)) / sizeof(__be64); + flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64); + *nflits = flits; + + /* If coalescing is on, the mbuf is added to a pkts WR */ + if (q->coalesce.idx) { + ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8); + credits = txq_avail(q) - ndesc; + + /* If we are wrapping or this is last mbuf then, send the + * already coalesced mbufs and let the non-coalesce pass + * handle the mbuf. + */ + if (unlikely(credits < 0 || wraps_around(q, ndesc))) { + ship_tx_pkt_coalesce_wr(adap, txq); + return 0; + } + + /* If the max coalesce len or the max WR len is reached + * ship the WR and keep coalescing on. + */ + if (unlikely((q->coalesce.len + mbuf->pkt_len > + MAX_COALESCE_LEN) || + (q->coalesce.flits + flits > + q->coalesce.max))) { + ship_tx_pkt_coalesce_wr(adap, txq); + goto new; + } + return 1; + } + +new: + /* start a new pkts WR, the WR header is not filled below */ + wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) : + sizeof(struct fw_eth_tx_pkts_vm_wr); + flits += wr_size / sizeof(__be64); + ndesc = flits_to_desc(q->coalesce.flits + flits); + credits = txq_avail(q) - ndesc; + + if (unlikely(credits < 0 || wraps_around(q, ndesc))) + return 0; + q->coalesce.flits += wr_size / sizeof(__be64); + q->coalesce.type = type; + q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + + q->coalesce.flits * sizeof(__be64); + if (!is_pf4(adap)) + memcpy((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len); + return 1; +} + +/** + * tx_do_packet_coalesce - add an mbuf to a coalesce WR + * @txq: sge_eth_txq used send the mbuf + * @mbuf: mbuf to be sent + * @flits: flits needed for this mbuf + * @adap: adapter structure + * @pi: port_info structure + * @addr: mapped address of the mbuf + * + * Adds an mbuf to be sent as part of a coalesce WR by filling a + * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and + * ulp_tx_sc_dsgl command. + */ +static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + int flits, struct adapter *adap, + const struct port_info *pi, + dma_addr_t *addr, uint16_t nb_pkts) +{ + u64 cntrl, *end; + struct sge_txq *q = &txq->q; + struct ulp_txpkt *mc; + struct ulptx_idata *sc_imm; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *sd; + unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; + + if (q->coalesce.type == 0) { + mc = (struct ulp_txpkt *)q->coalesce.ptr; + mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) | + V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) | + F_ULP_TXPKT_RO); + mc->len = htonl(DIV_ROUND_UP(flits, 2)); + sc_imm = (struct ulptx_idata *)(mc + 1); + sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) | + F_ULP_TX_SC_MORE); + sc_imm->len = htonl(sizeof(*cpl)); + end = (u64 *)mc + flits; + cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1); + } else { + end = (u64 *)q->coalesce.ptr + flits; + cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr; + } + + /* update coalesce structure for this txq */ + q->coalesce.flits += flits; + q->coalesce.ptr += flits * sizeof(__be64); + q->coalesce.len += mbuf->pkt_len; + + /* fill the cpl message, same as in t4_eth_xmit, this should be kept + * similar to t4_eth_xmit + */ + if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, mbuf) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } else { + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + } + + if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id)); + cpl->pack = htons(0); + cpl->len = htons(len); + cpl->ctrl1 = cpu_to_be64(cntrl); + write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr); + txq->stats.pkts++; + txq->stats.tx_bytes += len; + + sd = &q->sdesc[q->pidx + (idx >> 1)]; + if (!(idx & 1)) { + if (sd->coalesce.idx) { + int i; + + for (i = 0; i < sd->coalesce.idx; i++) { + rte_pktmbuf_free(sd->coalesce.mbuf[i]); + sd->coalesce.mbuf[i] = NULL; + } + } + } + + /* store pointers to the mbuf and the sgl used in free_tx_desc. + * each tx desc can hold two pointers corresponding to the value + * of ETH_COALESCE_PKT_PER_DESC + */ + sd->coalesce.mbuf[idx & 1] = mbuf; + sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1); + sd->coalesce.idx = (idx & 1) + 1; + + /* Send the coalesced work request, only if max reached. However, + * if lower latency is preferred over throughput, then don't wait + * for coalescing the next Tx burst and send the packets now. + */ + q->coalesce.idx++; + if (q->coalesce.idx == adap->params.max_tx_coalesce_num || + (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts)) + ship_tx_pkt_coalesce_wr(adap, txq); + + return 0; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @txq: the egress queue + * @mbuf: the packet + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, + uint16_t nb_pkts) +{ + const struct port_info *pi; + struct cpl_tx_pkt_lso_core *lso; + struct adapter *adap; + struct rte_mbuf *m = mbuf; + struct fw_eth_tx_pkt_wr *wr; + struct fw_eth_tx_pkt_vm_wr *vmwr; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *d; + dma_addr_t addr[m->nb_segs]; + unsigned int flits, ndesc, cflits; + int l3hdr_len, l4hdr_len, eth_xtra_len; + int len, last_desc; + int credits; + u32 wr_mid; + u64 cntrl, *end; + bool v6; + u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; + + /* Reject xmit if queue is stopped */ + if (unlikely(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) { +out_free: + rte_pktmbuf_free(m); + return 0; + } + + if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && + (unlikely(m->pkt_len > max_pkt_len))) + goto out_free; + + pi = txq->data->dev_private; + adap = pi->adapter; + + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + /* align the end of coalesce WR to a 512 byte boundary */ + txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; + + if (!((m->ol_flags & PKT_TX_TCP_SEG) || + m->pkt_len > RTE_ETHER_MAX_LEN)) { + if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { + if (unlikely(map_mbuf(mbuf, addr) < 0)) { + dev_warn(adap, "%s: mapping err for coalesce\n", + __func__); + txq->stats.mapping_err++; + goto out_free; + } + return tx_do_packet_coalesce(txq, mbuf, cflits, adap, + pi, addr, nb_pkts); + } else { + return -EBUSY; + } + } + + if (txq->q.coalesce.idx) + ship_tx_pkt_coalesce_wr(adap, txq); + + flits = calc_tx_flits(m, adap); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + dev_debug(adap, "%s: Tx ring %u full; credits = %d\n", + __func__, txq->q.cntxt_id, credits); + return -EBUSY; + } + + if (unlikely(map_mbuf(m, addr) < 0)) { + txq->stats.mapping_err++; + goto out_free; + } + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (Q_IDXDIFF(&txq->q, equeidx) >= 64) { + txq->q.equeidx = txq->q.pidx; + wr_mid |= F_FW_WR_EQUEQ; + } + + wr = (void *)&txq->q.desc[txq->q.pidx]; + vmwr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + if (is_pf4(adap)) { + wr->r3 = rte_cpu_to_be_64(0); + end = (u64 *)wr + flits; + } else { + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + + vmwr->r3[0] = rte_cpu_to_be_32(0); + vmwr->r3[1] = rte_cpu_to_be_32(0); + memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *), + fw_hdr_copy_len); + end = (u64 *)vmwr + flits; + } + + len = 0; + len += sizeof(*cpl); + + /* Coalescing skipped and we send through normal path */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | + V_FW_WR_IMMDLEN(len)); + if (is_pf4(adap)) + cpl = (void *)(wr + 1); + else + cpl = (void *)(vmwr + 1); + if (m->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, m) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } + } else { + if (is_pf4(adap)) + lso = (void *)(wr + 1); + else + lso = (void *)(vmwr + 1); + v6 = (m->ol_flags & PKT_TX_IPV6) != 0; + l3hdr_len = m->l3_len; + l4hdr_len = m->l4_len; + eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN; + len += sizeof(*lso); + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | + V_FW_WR_IMMDLEN(len)); + lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | + F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | + V_LSO_IPV6(v6) | + V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | + V_LSO_IPHDR_LEN(l3hdr_len / 4) | + V_LSO_TCPHDR_LEN(l4hdr_len / 4)); + lso->ipid_ofst = htons(0); + lso->mss = htons(m->tso_segsz); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(m->pkt_len); + else + lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len); + else + cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len); + + cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : + TX_CSUM_TCPIP) | + V_TXPKT_IPHDR_LEN(l3hdr_len); + txq->stats.tso++; + txq->stats.tx_cso += m->tso_segsz; + } + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) | + V_TXPKT_PF(0)); + + cpl->pack = htons(0); + cpl->len = htons(m->pkt_len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + txq->stats.pkts++; + txq->stats.tx_bytes += m->pkt_len; + last_desc = txq->q.pidx + ndesc - 1; + if (last_desc >= (int)txq->q.size) + last_desc -= txq->q.size; + + d = &txq->q.sdesc[last_desc]; + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + txq->q.sdesc[last_desc].mbuf = m; + txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + txq_advance(&txq->q, ndesc); + ring_tx_db(adap, &txq->q); + return 0; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any mbufs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @mbuf: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct rte_mbuf *mbuf) +{ + return mbuf->pkt_len <= MAX_CTRL_WR_LEN; +} + +/** + * inline_tx_mbuf: inline a packet's data into TX descriptors + * @q: the TX queue where the packet will be inlined + * @from: pointer to data portion of packet + * @to: pointer after cpl where data has to be inlined + * @len: length of data to inline + * + * Inline a packet's contents directly to TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to, + int len) +{ + int left = RTE_PTR_DIFF(q->stat, *to); + + if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) { + rte_memcpy(*to, from, len); + *to = RTE_PTR_ADD(*to, len); + } else { + rte_memcpy(*to, from, left); + from = RTE_PTR_ADD(from, left); + left = len - left; + rte_memcpy((void *)q->desc, from, left); + *to = RTE_PTR_ADD((void *)q->desc, left); + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @mbuf: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + caddr_t dst; + + if (unlikely(!is_imm(mbuf))) { + WARN_ON(1); + rte_pktmbuf_free(mbuf); + return -1; + } + + reclaim_completed_tx_imm(&q->q); + ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc)); + t4_os_lock(&q->ctrlq_lock); + + q->full = txq_avail(&q->q) < ndesc ? 1 : 0; + if (unlikely(q->full)) { + t4_os_unlock(&q->ctrlq_lock); + return -1; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + dst = (void *)wr; + inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t), + &dst, mbuf->data_len); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < 64)) + wr->lo |= htonl(F_FW_WR_EQUEQ); + + q->txp++; + + ring_tx_db(q->adapter, &q->q); + t4_os_unlock(&q->ctrlq_lock); + + rte_pktmbuf_free(mbuf); + return 0; +} + +/** + * t4_mgmt_tx - send a management message + * @q: the control queue + * @mbuf: the packet containing the management message + * + * Send a management message through control queue. + */ +int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + return ctrl_xmit(q, mbuf); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * @node: preferred node for memory allocations + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, __rte_unused uint16_t queue_id, + int socket_id, const char *z_name, + const char *z_name_sw) +{ + size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; + const struct rte_memzone *tz; + void *s = NULL; + + dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " + "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" + " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, + stat_size, queue_id, socket_id, z_name, z_name_sw); + + tz = rte_memzone_lookup(z_name); + if (tz) { + dev_debug(adapter, "%s: tz exists...returning existing..\n", + __func__); + goto alloc_sw_ring; + } + + /* + * Allocate TX/RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_memzone_reserve_aligned(z_name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, 4096); + if (!tz) + return NULL; + +alloc_sw_ring: + memset(tz->addr, 0, len); + if (sw_size) { + s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, + RTE_CACHE_LINE_SIZE, socket_id); + + if (!s) { + dev_err(adapter, "%s: failed to get sw_ring memory\n", + __func__); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + + *phys = (uint64_t)tz->iova; + return tz->addr; +} + +#define CXGB4_MSG_AN ((void *)1) + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len); + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype, + uint64_t ol_flags) +{ + pkt->packet_type |= ptype; + pkt->ol_flags |= ol_flags; +} + +static inline void cxgbe_fill_mbuf_info(struct adapter *adap, + const struct cpl_rx_pkt *cpl, + struct rte_mbuf *pkt) +{ + bool csum_ok; + u16 err_vec; + + if (adap->params.tp.rx_pkt_encap) + err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec)); + else + err_vec = ntohs(cpl->err_vec); + + csum_ok = cpl->csum_calc && !err_vec; + + if (cpl->vlan_ex) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + else + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0); + + if (cpl->l2info & htonl(F_RXF_IP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4, + csum_ok ? PKT_RX_IP_CKSUM_GOOD : + PKT_RX_IP_CKSUM_BAD); + else if (cpl->l2info & htonl(F_RXF_IP6)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6, + csum_ok ? PKT_RX_IP_CKSUM_GOOD : + PKT_RX_IP_CKSUM_BAD); + + if (cpl->l2info & htonl(F_RXF_TCP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP, + csum_ok ? PKT_RX_L4_CKSUM_GOOD : + PKT_RX_L4_CKSUM_BAD); + else if (cpl->l2info & htonl(F_RXF_UDP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP, + csum_ok ? PKT_RX_L4_CKSUM_GOOD : + PKT_RX_L4_CKSUM_BAD); +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * @rx_pkts: mbuf to put the pkts + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget, + struct rte_mbuf **rx_pkts) +{ + int ret = 0, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + if (q->cidx == ntohs(q->stat->pidx)) + break; + + rc = (const struct rsp_ctrl *) + ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc))); + + /* + * Ensure response has been read + */ + rmb(); + rsp_type = G_RSPD_TYPE(rc->u.type_gen); + + if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { + struct sge *s = &q->adapter->sge; + unsigned int stat_pidx; + int stat_pidx_diff; + + stat_pidx = ntohs(q->stat->pidx); + stat_pidx_diff = P_IDXDIFF(q, stat_pidx); + while (stat_pidx_diff && budget_left) { + const struct rx_sw_desc *rsd = + &rxq->fl.sdesc[rxq->fl.cidx]; + const struct rss_header *rss_hdr = + (const void *)q->cur_desc; + const struct cpl_rx_pkt *cpl = + (const void *)&q->cur_desc[1]; + struct rte_mbuf *pkt, *npkt; + u32 len, bufsz; + + rc = (const struct rsp_ctrl *) + ((const char *)q->cur_desc + + (q->iqe_len - sizeof(*rc))); + + rsp_type = G_RSPD_TYPE(rc->u.type_gen); + if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF)) + break; + + len = ntohl(rc->pldbuflen_qid); + BUG_ON(!(len & F_RSPD_NEWBUF)); + pkt = rsd->buf; + npkt = pkt; + len = G_RSPD_LEN(len); + pkt->pkt_len = len; + + /* Chain mbufs into len if necessary */ + while (len) { + struct rte_mbuf *new_pkt = rsd->buf; + + bufsz = min(get_buf_size(q->adapter, + rsd), len); + new_pkt->data_len = bufsz; + unmap_rx_buf(&rxq->fl); + len -= bufsz; + npkt->next = new_pkt; + npkt = new_pkt; + pkt->nb_segs++; + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + } + npkt->next = NULL; + pkt->nb_segs--; + + cxgbe_fill_mbuf_info(q->adapter, cpl, pkt); + + if (!rss_hdr->filter_tid && + rss_hdr->hash_type) { + pkt->ol_flags |= PKT_RX_RSS_HASH; + pkt->hash.rss = + ntohl(rss_hdr->hash_val); + } + + if (cpl->vlan_ex) + pkt->vlan_tci = ntohs(cpl->vlan); + + rte_pktmbuf_adj(pkt, s->pktshift); + rxq->stats.pkts++; + rxq->stats.rx_bytes += pkt->pkt_len; + rx_pkts[budget - budget_left] = pkt; + + rspq_next(q); + budget_left--; + stat_pidx_diff--; + } + continue; + } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + /* + * If this is a Response Queue with an associated Free List and + * there's room for another chunk of new Free List buffer pointers, + * refill the Free List. + */ + + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) + __refill_fl(q->adapter, &rxq->fl); + + return budget - budget_left; +} + +int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done) +{ + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + unsigned int cidx_inc; + unsigned int params; + u32 val; + + *work_done = process_responses(q, budget, rx_pkts); + + if (*work_done) { + cidx_inc = R_IDXDIFF(q, gts_idx); + + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) + __refill_fl(q->adapter, &rxq->fl); + + params = q->intr_params; + q->next_intr_params = params; + val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); + + if (unlikely(!q->bar2_addr)) { + u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_GTS; + + t4_write_reg(q->adapter, reg, + val | V_INGRESSQID((u32)q->cntxt_id)); + } else { + writel(val | V_INGRESSQID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS)); + /* This Write memory Barrier will force the + * write to the User Doorbell area to be + * flushed. + */ + wmb(); + } + q->gts_idx = q->cidx; + } + return 0; +} + +/** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq) +{ + struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); + unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + + return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0, + rq->cntxt_id, fl_id, 0xffff); +} + +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq) +{ + struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); + unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + + return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0, + rq->cntxt_id, fl_id, 0xffff); +} + +/* + * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 + * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map + */ +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct rte_eth_dev *eth_dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd, int cong, + struct rte_mempool *mp, int queue_id, int socket_id) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = eth_dev->data->dev_private; + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + unsigned int nb_refill; + u8 pciechan; + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = cxgbe_roundup(iq->size, 16); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, + fwevtq ? "fwq_ring" : "rx_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0, + queue_id, socket_id, z_name, z_name_sw); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) | + V_FW_IQ_CMD_VFN(0)); + if (cong >= 0) + c.iqns_to_fl0congen = + htonl(F_FW_IQ_CMD_IQFLINTCONGEN | + V_FW_IQ_CMD_IQTYPE(cong ? + FW_IQ_IQTYPE_NIC : + FW_IQ_IQTYPE_OFLD) | + F_FW_IQ_CMD_IQRO); + } else { + pciechan = pi->port_id; + } + + c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | + (sizeof(c) / 16)); + c.type_to_iqandstindex = + htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + V_FW_IQ_CMD_IQASYNCH(fwevtq) | + V_FW_IQ_CMD_VIID(pi->viid) | + V_FW_IQ_CMD_IQANDST(intr_idx < 0) | + V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) | + V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = + htons(V_FW_IQ_CMD_IQPCIECH(pciechan) | + F_FW_IQ_CMD_IQGTSMODE | + V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, + fl); + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); + + /* + * Allocate the ring for the hardware free list (with space + * for its status page) along with the associated software + * descriptor ring. The free list size needs to be a multiple + * of the Egress Queue Unit and at least 2 Egress Units larger + * than the SGE's Egress Congrestion Threshold + * (fl_starve_thres - 1). + */ + if (fl->size < s->fl_starve_thres - 1 + 2 * 8) + fl->size = s->fl_starve_thres - 1 + 2 * 8; + fl->size = cxgbe_roundup(fl->size, 8); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, + fwevtq ? "fwq_ring" : "fl_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + fl->desc = alloc_ring(fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), + &fl->addr, &fl->sdesc, s->stat_len, + queue_id, socket_id, z_name, z_name_sw); + + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); + c.iqns_to_fl0congen |= + htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | + (unlikely(rxq->usembufs) ? + 0 : F_FW_IQ_CMD_FL0PACKEN) | + F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | + F_FW_IQ_CMD_FL0PADEN); + if (is_pf4(adap) && cong >= 0) + c.iqns_to_fl0congen |= + htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | + F_FW_IQ_CMD_FL0CONGCIF | + F_FW_IQ_CMD_FL0CONGEN); + + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. + * Hence maximum allowed burst size will be 448 bytes. + */ + c.fl0dcaen_to_fl0cidxfthresh = + htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ? + X_FETCHBURSTMIN_128B : + X_FETCHBURSTMIN_64B) | + V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ? + X_FETCHBURSTMAX_512B : + X_FETCHBURSTMAX_256B)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); + if (ret) + goto err; + + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gts_idx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); + iq->size--; /* subtract status entry */ + iq->stat = (void *)&iq->desc[iq->size * 8]; + iq->eth_dev = eth_dev; + iq->handler = hnd; + iq->port_id = pi->pidx; + iq->mb_pool = mp; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + if (fl) { + fl->cntxt_id = ntohs(c.fl0id); + fl->avail = 0; + fl->pend_cred = 0; + fl->pidx = 0; + fl->cidx = 0; + fl->alloc_failed = 0; + + /* + * Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adap, fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + + nb_refill = refill_fl(adap, fl, fl_cap(fl)); + if (nb_refill != fl_cap(fl)) { + ret = -ENOMEM; + dev_err(adap, "%s: mbuf alloc failed with error: %d\n", + __func__, ret); + goto refill_fl_err; + } + } + + /* + * For T5 and later we attempt to set up the Congestion Manager values + * of the new RX Ethernet Queue. This should really be handled by + * firmware because it's more complex than any host driver wants to + * get involved with and it's different per chip and this is almost + * certainly wrong. Formware would be wrong as well, but it would be + * a lot easier to fix in one place ... For now we do something very + * simple (and hopefully less wrong). + */ + if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { + u32 param, val; + int i; + + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | + V_FW_PARAMS_PARAM_YZ(iq->cntxt_id)); + if (cong == 0) { + val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE); + } else { + val = V_CONMCTXT_CNGTPMODE( + X_CONMCTXT_CNGTPMODE_CHANNEL); + for (i = 0; i < 4; i++) { + if (cong & (1 << i)) + val |= V_CONMCTXT_CNGCHMAP(1 << + (i << 2)); + } + } + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret) + dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n", + iq->cntxt_id, -ret); + } + + return 0; + +refill_fl_err: + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, + iq->cntxt_id, fl->cntxt_id, 0xffff); +fl_nomem: + ret = -ENOMEM; +err: + iq->cntxt_id = 0; + iq->abs_id = 0; + if (iq->desc) + iq->desc = NULL; + + if (fl && fl->desc) { + rte_free(fl->sdesc); + fl->cntxt_id = 0; + fl->sdesc = NULL; + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id, + unsigned int abs_id) +{ + q->cntxt_id = id; + q->abs_id = abs_id; + q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); + q->cidx = 0; + q->pidx = 0; + q->dbidx = 0; + q->in_use = 0; + q->equeidx = 0; + q->coalesce.idx = 0; + q->coalesce.len = 0; + q->coalesce.flits = 0; + q->last_coal_idx = 0; + q->last_pidx = 0; + q->stat = (void *)&q->desc[q->size]; +} + +int t4_sge_eth_txq_start(struct sge_eth_txq *txq) +{ + /* + * TODO: For flow-control, queue may be stopped waiting to reclaim + * credits. + * Ensure queue is in EQ_STOPPED state before starting it. + */ + if (!(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + txq->flags &= ~EQ_STOPPED; + + return 0; +} + +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq) +{ + txq->flags |= EQ_STOPPED; + + return 0; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = eth_dev->data->dev_private; + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + u8 pciechan; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, "tx_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), &txq->q.phys_addr, + &txq->q.sdesc, s->stat_len, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) | + V_FW_EQ_ETH_CMD_VFN(0)); + } else { + pciechan = pi->port_id; + } + + c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | + F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); + c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | + V_FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_ETH_CMD_PCIECHN(pciechan) | + F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); + + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); + if (ret) { + rte_free(txq->q.sdesc); + txq->q.sdesc = NULL; + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)), + G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd))); + txq->stats.tso = 0; + txq->stats.pkts = 0; + txq->stats.tx_cso = 0; + txq->stats.coal_wr = 0; + txq->stats.vlan_ins = 0; + txq->stats.tx_bytes = 0; + txq->stats.coal_pkts = 0; + txq->stats.mapping_err = 0; + txq->flags |= EQ_STOPPED; + txq->eth_dev = eth_dev; + txq->data = eth_dev->data; + t4_os_lock_init(&txq->txq_lock); + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = eth_dev->data->dev_private; + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, "ctrl_tx_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + 0, &txq->q.phys_addr, + NULL, 0, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(adap->pf) | + V_FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC | + F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16)); + c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) { + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)), + G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd))); + txq->adapter = adap; + txq->full = 0; + return 0; +} + +static void free_txq(struct sge_txq *q) +{ + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, + rq->cntxt_id, fl_id, 0xffff); + rq->cntxt_id = 0; + rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(fl, fl->avail); + rte_free(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/* + * Clear all queues of the port + * + * Note: This function must only be called after rx and tx path + * of the port have been disabled. + */ +void t4_sge_eth_clear_queues(struct port_info *pi) +{ + int i; + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; + + for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { + if (rxq->rspq.desc) + t4_sge_eth_rxq_stop(adap, &rxq->rspq); + } + for (i = 0; i < pi->n_tx_qsets; i++, txq++) { + if (txq->q.desc) { + struct sge_txq *q = &txq->q; + + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(q); + free_tx_desc(q, q->size); + q->equeidx = q->pidx; + } + } +} + +void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) +{ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_stop(adap, &rxq->rspq); + free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL); + } +} + +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq) +{ + if (txq->q.desc) { + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(&txq->q); + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id); + free_tx_desc(&txq->q, txq->q.size); + rte_free(txq->q.sdesc); + free_txq(&txq->q); + } +} + +void t4_sge_tx_monitor_start(struct adapter *adap) +{ + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); +} + +void t4_sge_tx_monitor_stop(struct adapter *adap) +{ + rte_eal_alarm_cancel(tx_timer_cb, (void *)adap); +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + unsigned int i; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) { + /* Free only the queues allocated */ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_release(adap, rxq); + rxq->rspq.eth_dev = NULL; + } + if (txq->q.desc) { + t4_sge_eth_txq_release(adap, txq); + txq->eth_dev = NULL; + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + reclaim_completed_tx_imm(&cq->q); + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, + cq->q.cntxt_id); + free_txq(&cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request those individually. + * + * Called in two different modes: + * + * 1. Perform actual hardware initialization and record hard-coded + * parameters which were used. This gets used when we're the + * Master PF and the Firmware Configuration File support didn't + * work for some reason. + * + * 2. We're not the Master PF or initialization was performed with + * a Firmware Configuration File. In this case we need to grab + * any of the SGE operating parameters that we need to have in + * order to do our job and make sure we can live with them ... + */ +static int t4_sge_init_soft(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; + + /* + * Verify that CPL messages are going to the Ingress Queue for + * process_responses() and that only packet data is going to the + * Free Lists. + */ + if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adap, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Validate the Host Buffer Register Array indices that we want to + * use ... + * + * XXX Note that we should really read through the Host Buffer Size + * XXX register array and find the indices of the Buffer Sizes which + * XXX meet our needs! + */ +#define READ_FL_BUF(x) \ + t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32)) + + fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); + fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); + fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); + fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); + + /* + * We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + +#undef READ_FL_BUF + + /* + * The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + + if (adap->use_unpacked_mode) { + int err = 0; + + if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) { + dev_err(adap, "bad SGE FL small MTU %d\n", + fl_small_mtu); + err = -EINVAL; + } + if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { + dev_err(adap, "bad SGE FL large MTU %d\n", + fl_large_mtu); + err = -EINVAL; + } + if (err) + return err; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1); + timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3); + timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5); + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(timer_value_4_and_5)); + + ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD); + s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold); + + return 0; +} + +int t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 sge_control, sge_conm_ctrl; + int ret, egress_threshold; + + /* + * Ingress Padding Boundary and Egress Status Page Size are set up by + * t4_fixup_host_params(). + */ + sge_control = t4_read_reg(adap, A_SGE_CONTROL); + s->pktshift = G_PKTSHIFT(sge_control); + s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64; + s->fl_align = t4_fl_pkt_align(adap); + ret = t4_sge_init_soft(adap); + if (ret < 0) { + dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n", + __func__, -ret); + return ret; + } + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) For T4, + * there was only a single field to control this. For T5 there's the + * original field which now only applies to Unpacked Mode Free List + * buffers and a new field which only applies to Packed Mode Free List + * buffers. + */ + sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL); + if (is_t4(adap->params.chip) || adap->use_unpacked_mode) + egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl); + else + egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl); + s->fl_starve_thres = 2 * egress_threshold + 1; + + return 0; +} + +int t4vf_sge_init(struct adapter *adap) +{ + struct sge_params *sge_params = &adap->params.sge; + u32 sge_ingress_queues_per_page; + u32 sge_egress_queues_per_page; + u32 sge_control, sge_control2; + u32 fl_small_pg, fl_large_pg; + u32 sge_ingress_rx_threshold; + u32 sge_timer_value_0_and_1; + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; + u32 sge_congestion_control; + struct sge *s = &adap->sge; + unsigned int s_hps, s_qpp; + u32 sge_host_page_size; + u32 params[7], vals[7]; + int v; + + /* query basic params from fw */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); + params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0)); + params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1)); + params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); + params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); + params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adap, 7, params, vals); + if (v != FW_SUCCESS) + return v; + + sge_control = vals[0]; + sge_host_page_size = vals[1]; + fl_small_pg = vals[2]; + fl_large_pg = vals[3]; + sge_timer_value_0_and_1 = vals[4]; + sge_timer_value_2_and_3 = vals[5]; + sge_timer_value_4_and_5 = vals[6]; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. + */ + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + + if ((sge_control & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + + /* Grab ingress packing boundary from SGE_CONTROL2 for */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); + v = t4vf_query_params(adap, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter, "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_control2 = vals[0]; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) + return v; + sge_ingress_rx_threshold = vals[0]; + sge_congestion_control = vals[1]; + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adap, "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_egress_queues_per_page = vals[0]; + sge_ingress_queues_per_page = vals[1]; + + /* + * We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. + */ + s_hps = (S_HOSTPAGESIZEPF0 + + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf); + sge_params->hps = + ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0); + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf); + sge_params->eq_qpp = + ((sge_egress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + sge_params->iq_qpp = + ((sge_ingress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + + /* + * Now translate the queried parameters into our internal forms. + */ + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE) + ? 128 : 64); + s->pktshift = G_PKTSHIFT(sge_control); + s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2); + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + s->fl_starve_thres = + G_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + G_T6_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; + + /* + * Save RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(sge_timer_value_4_and_5)); + s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold); + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/smt.c b/src/spdk/dpdk/drivers/net/cxgbe/smt.c new file mode 100644 index 000000000..e8f38676e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/smt.c @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Chelsio Communications. + * All rights reserved. + */ + +#include "base/common.h" +#include "smt.h" + +void cxgbe_do_smt_write_rpl(struct adapter *adap, + const struct cpl_smt_write_rpl *rpl) +{ + unsigned int smtidx = G_TID_TID(GET_TID(rpl)); + struct smt_data *s = adap->smt; + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + struct smt_entry *e = &s->smtab[smtidx]; + + dev_err(adap, + "Unexpected SMT_WRITE_RPL status %u for entry %u\n", + rpl->status, smtidx); + t4_os_lock(&e->lock); + e->state = SMT_STATE_ERROR; + t4_os_unlock(&e->lock); + } +} + +static int write_smt_entry(struct rte_eth_dev *dev, struct smt_entry *e) +{ + unsigned int port_id = ethdev2pinfo(dev)->port_id; + struct adapter *adap = ethdev2adap(dev); + struct cpl_t6_smt_write_req *t6req; + struct smt_data *s = adap->smt; + struct cpl_smt_write_req *req; + struct sge_ctrl_txq *ctrlq; + struct rte_mbuf *mbuf; + u8 row; + + ctrlq = &adap->sge.ctrlq[port_id]; + mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool); + if (!mbuf) + return -ENOMEM; + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) { + mbuf->data_len = sizeof(*req); + mbuf->pkt_len = mbuf->data_len; + + /* Source MAC Table (SMT) contains 256 SMAC entries + * organized in 128 rows of 2 entries each. + */ + req = rte_pktmbuf_mtod(mbuf, struct cpl_smt_write_req *); + INIT_TP_WR(req, 0); + + /* Each row contains an SMAC pair. + * LSB selects the SMAC entry within a row + */ + if (e->idx & 1) { + req->pfvf1 = 0x0; + rte_memcpy(req->src_mac1, e->src_mac, + RTE_ETHER_ADDR_LEN); + + /* fill pfvf0/src_mac0 with entry + * at prev index from smt-tab. + */ + req->pfvf0 = 0x0; + rte_memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac, + RTE_ETHER_ADDR_LEN); + } else { + req->pfvf0 = 0x0; + rte_memcpy(req->src_mac0, e->src_mac, + RTE_ETHER_ADDR_LEN); + + /* fill pfvf1/src_mac1 with entry + * at next index from smt-tab + */ + req->pfvf1 = 0x0; + rte_memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac, + RTE_ETHER_ADDR_LEN); + } + row = (e->hw_idx >> 1); + } else { + mbuf->data_len = sizeof(*t6req); + mbuf->pkt_len = mbuf->data_len; + + /* Source MAC Table (SMT) contains 256 SMAC entries */ + t6req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_smt_write_req *); + INIT_TP_WR(t6req, 0); + + /* fill pfvf0/src_mac0 from smt-tab */ + t6req->pfvf0 = 0x0; + rte_memcpy(t6req->src_mac0, s->smtab[e->idx].src_mac, + RTE_ETHER_ADDR_LEN); + row = e->hw_idx; + req = (struct cpl_smt_write_req *)t6req; + } + + OPCODE_TID(req) = + cpu_to_be32(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, + e->hw_idx | + V_TID_QID(adap->sge.fw_evtq.abs_id))); + + req->params = cpu_to_be32(V_SMTW_NORPL(0) | + V_SMTW_IDX(row) | + V_SMTW_OVLAN_IDX(0)); + t4_mgmt_tx(ctrlq, mbuf); + + return 0; +} + +/** + * find_or_alloc_smte - Find/Allocate a free SMT entry + * @s: SMT table + * @smac: Source MAC address to compare/add + * Returns pointer to the SMT entry found/created + * + * Finds/Allocates an SMT entry to be used by switching rule of a filter. + */ +static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) +{ + struct smt_entry *e, *end, *first_free = NULL; + + for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { + if (!rte_atomic32_read(&e->refcnt)) { + if (!first_free) + first_free = e; + } else { + if (e->state == SMT_STATE_SWITCHING) { + /* This entry is actually in use. See if we can + * re-use it ? + */ + if (!memcmp(e->src_mac, smac, + RTE_ETHER_ADDR_LEN)) + goto found; + } + } + } + + if (!first_free) + return NULL; + + e = first_free; + e->state = SMT_STATE_UNUSED; + +found: + return e; +} + +static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev, + u16 pfvf, u8 *smac) +{ + struct adapter *adap = ethdev2adap(dev); + struct smt_data *s = adap->smt; + struct smt_entry *e; + int ret; + + t4_os_write_lock(&s->lock); + e = find_or_alloc_smte(s, smac); + if (e) { + t4_os_lock(&e->lock); + if (!rte_atomic32_read(&e->refcnt)) { + e->pfvf = pfvf; + rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN); + ret = write_smt_entry(dev, e); + if (ret) { + e->pfvf = 0; + memset(e->src_mac, 0, RTE_ETHER_ADDR_LEN); + t4_os_unlock(&e->lock); + e = NULL; + goto out_write_unlock; + } + e->state = SMT_STATE_SWITCHING; + rte_atomic32_set(&e->refcnt, 1); + } else { + rte_atomic32_inc(&e->refcnt); + } + t4_os_unlock(&e->lock); + } + +out_write_unlock: + t4_os_write_unlock(&s->lock); + return e; +} + +/** + * cxgbe_smt_alloc_switching - Allocate an SMT entry for switching rule + * @dev: rte_eth_dev pointer + * @smac: MAC address to add to SMT + * Returns pointer to the SMT entry created + * + * Allocates an SMT entry to be used by switching rule of a filter. + */ +struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac) +{ + return t4_smt_alloc_switching(dev, 0x0, smac); +} + +/** + * Initialize Source MAC Table + */ +struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size) +{ + struct smt_data *s; + u32 i; + + s = t4_alloc_mem(sizeof(*s) + smt_size * sizeof(struct smt_entry)); + if (!s) + return NULL; + + s->smt_start = smt_start_idx; + s->smt_size = smt_size; + t4_os_rwlock_init(&s->lock); + + for (i = 0; i < s->smt_size; ++i) { + s->smtab[i].idx = i; + s->smtab[i].hw_idx = smt_start_idx + i; + s->smtab[i].state = SMT_STATE_UNUSED; + memset(&s->smtab[i].src_mac, 0, RTE_ETHER_ADDR_LEN); + t4_os_lock_init(&s->smtab[i].lock); + rte_atomic32_set(&s->smtab[i].refcnt, 0); + } + return s; +} + +/** + * Cleanup Source MAC Table + */ +void t4_cleanup_smt(struct adapter *adap) +{ + if (adap->smt) + t4_os_free(adap->smt); +} diff --git a/src/spdk/dpdk/drivers/net/cxgbe/smt.h b/src/spdk/dpdk/drivers/net/cxgbe/smt.h new file mode 100644 index 000000000..be1fab8ba --- /dev/null +++ b/src/spdk/dpdk/drivers/net/cxgbe/smt.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Chelsio Communications. + * All rights reserved. + */ +#ifndef __CXGBE_SMT_H_ +#define __CXGBE_SMT_H_ + +#include "base/t4_msg.h" + +enum { + SMT_STATE_SWITCHING, + SMT_STATE_UNUSED, + SMT_STATE_ERROR +}; + +enum { + SMT_SIZE = 256 +}; + +struct smt_entry { + u16 state; + u16 idx; + u16 pfvf; + u16 hw_idx; + u8 src_mac[RTE_ETHER_ADDR_LEN]; + rte_atomic32_t refcnt; + rte_spinlock_t lock; +}; + +struct smt_data { + unsigned int smt_size; + unsigned int smt_start; + rte_rwlock_t lock; + struct smt_entry smtab[0]; +}; + +struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size); +void t4_cleanup_smt(struct adapter *adap); +void cxgbe_do_smt_write_rpl(struct adapter *adap, + const struct cpl_smt_write_rpl *rpl); +struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac); + +#endif /* __CXGBE_SMT_H_ */ + diff --git a/src/spdk/dpdk/drivers/net/dpaa/Makefile b/src/spdk/dpdk/drivers/net/dpaa/Makefile new file mode 100644 index 000000000..d7bbc0e15 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/Makefile @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk +RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa + +# +# library name +# +LIB = librte_pmd_dpaa.a + +CFLAGS := -I$(SRCDIR) $(CFLAGS) +CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -Wno-pointer-arith +CFLAGS += -I$(RTE_SDK_DPAA)/ +CFLAGS += -I$(RTE_SDK_DPAA)/include +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/ +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman +CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa +CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax +CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include + +EXPORT_MAP := rte_pmd_dpaa_version.map + +# Interfaces with DPDK +SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c + +LDLIBS += -lrte_bus_dpaa +LDLIBS += -lrte_mempool_dpaa +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_common_dpaax + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c new file mode 100644 index 000000000..13d1c6a1f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.c @@ -0,0 +1,1674 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2017-2019 NXP + * + */ +/* System headers */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +int dpaa_logtype_pmd; + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS; + +/* Keep track of whether QMAN and BMAN have been globally initialized */ +static int is_global_init; +static int default_q; /* use default queue - FMC is not executed*/ +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. + */ +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 + +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; +static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ + + +/* Per FQ Taildrop in frame count */ +static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; + +struct rte_dpaa_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset; +}; + +static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { + {"rx_align_err", + offsetof(struct dpaa_if_stats, raln)}, + {"rx_valid_pause", + offsetof(struct dpaa_if_stats, rxpf)}, + {"rx_fcs_err", + offsetof(struct dpaa_if_stats, rfcs)}, + {"rx_vlan_frame", + offsetof(struct dpaa_if_stats, rvlan)}, + {"rx_frame_err", + offsetof(struct dpaa_if_stats, rerr)}, + {"rx_drop_err", + offsetof(struct dpaa_if_stats, rdrp)}, + {"rx_undersized", + offsetof(struct dpaa_if_stats, rund)}, + {"rx_oversize_err", + offsetof(struct dpaa_if_stats, rovr)}, + {"rx_fragment_pkt", + offsetof(struct dpaa_if_stats, rfrg)}, + {"tx_valid_pause", + offsetof(struct dpaa_if_stats, txpf)}, + {"tx_fcs_err", + offsetof(struct dpaa_if_stats, terr)}, + {"tx_vlan_frame", + offsetof(struct dpaa_if_stats, tvlan)}, + {"rx_undersized", + offsetof(struct dpaa_if_stats, tund)}, +}; + +static struct rte_dpaa_driver rte_dpaa_pmd; + +static int +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + +static inline void +dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) +{ + memset(opts, 0, sizeof(struct qm_mcc_initfq)); + opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | + QM_FQCTRL_PREFERINCACHE; + opts->fqd.context_a.stashing.exclusive = 0; + if (dpaa_svr_family != SVR_LS1046A_FAMILY) + opts->fqd.context_a.stashing.annotation_cl = + DPAA_IF_RX_ANNOTATION_STASH; + opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; + opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; +} + +static int +dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; + uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + + PMD_INIT_FUNC_TRACE(); + + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) + return -EINVAL; + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (dev->data->min_rx_buf_size && + !dev->data->scattered_rx && frame_size > buffsz) { + DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); + return -EINVAL; + } + + /* check * >= max_frame */ + if (dev->data->min_rx_buf_size && dev->data->scattered_rx && + (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { + DPAA_PMD_ERR("Too big to fit for Max SG list %d", + buffsz * DPAA_SGT_MAX_ENTRIES); + return -EINVAL; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + fman_if_set_maxfrm(dpaa_intf->fif, frame_size); + + return 0; +} + +static int +dpaa_eth_dev_configure(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + + PMD_INIT_FUNC_TRACE(); + + /* Rx offloads which are enabled by default */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_INFO( + "Some of rx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads which are enabled by default */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_INFO( + "Some of tx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + uint32_t max_len; + + DPAA_PMD_DEBUG("enabling jumbo"); + + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + DPAA_MAX_RX_PKT_LEN) + max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + else { + DPAA_PMD_INFO("enabling jumbo override conf max len=%d " + "supported is %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + DPAA_MAX_RX_PKT_LEN); + max_len = DPAA_MAX_RX_PKT_LEN; + } + + fman_if_set_maxfrm(dpaa_intf->fif, max_len); + dev->data->mtu = max_len + - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; + } + + if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + DPAA_PMD_DEBUG("enabling scatter mode"); + fman_if_set_sg(dpaa_intf->fif, 1); + dev->data->scattered_rx = 1; + } + + return 0; +} + +static const uint32_t * +dpaa_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP + }; + + PMD_INIT_FUNC_TRACE(); + + if (dev->rx_pkt_burst == dpaa_eth_queue_rx) + return ptypes; + return NULL; +} + +static int dpaa_eth_dev_start(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* Change tx callback to the real one */ + dev->tx_pkt_burst = dpaa_eth_queue_tx; + fman_if_enable_rx(dpaa_intf->fif); + + return 0; +} + +static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_disable_rx(dpaa_intf->fif); + dev->tx_pkt_burst = dpaa_eth_tx_drop_all; +} + +static void dpaa_eth_dev_close(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + dpaa_eth_dev_stop(dev); +} + +static int +dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, + char *fw_version, + size_t fw_size) +{ + int ret; + FILE *svr_file = NULL; + unsigned int svr_ver = 0; + + PMD_INIT_FUNC_TRACE(); + + svr_file = fopen(DPAA_SOC_ID_FILE, "r"); + if (!svr_file) { + DPAA_PMD_ERR("Unable to open SoC device"); + return -ENOTSUP; /* Not supported on this infra */ + } + if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) + dpaa_svr_family = svr_ver & SVR_MASK; + else + DPAA_PMD_ERR("Unable to read SoC device"); + + fclose(svr_file); + + ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", + svr_ver, fman_ip_rev); + ret += 1; /* add the size of '\0' */ + + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static int dpaa_eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + DPAA_PMD_DEBUG(": %s", dpaa_intf->name); + + dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; + dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; + dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; + dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; + + if (dpaa_intf->fif->mac_type == fman_mac_1g) { + dev_info->speed_capa = ETH_LINK_SPEED_1G; + } else if (dpaa_intf->fif->mac_type == fman_mac_10g) { + dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); + } else { + DPAA_PMD_ERR("invalid link_speed: %s, %d", + dpaa_intf->name, dpaa_intf->fif->mac_type); + return -EINVAL; + } + + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; + dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; + + return 0; +} + +static int dpaa_eth_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_link *link = &dev->data->dev_link; + + PMD_INIT_FUNC_TRACE(); + + if (dpaa_intf->fif->mac_type == fman_mac_1g) + link->link_speed = ETH_SPEED_NUM_1G; + else if (dpaa_intf->fif->mac_type == fman_mac_10g) + link->link_speed = ETH_SPEED_NUM_10G; + else + DPAA_PMD_ERR("invalid link_speed: %s, %d", + dpaa_intf->name, dpaa_intf->fif->mac_type); + + link->link_status = dpaa_intf->valid; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_autoneg = ETH_LINK_AUTONEG; + return 0; +} + +static int dpaa_eth_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_stats_get(dpaa_intf->fif, stats); + return 0; +} + +static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_stats_reset(dpaa_intf->fif); + + return 0; +} + +static int +dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); + uint64_t values[sizeof(struct dpaa_if_stats) / 8]; + + if (n < num) + return num; + + if (xstats == NULL) + return 0; + + fman_if_stats_get_all(dpaa_intf->fif, values, + sizeof(struct dpaa_if_stats) / 8); + + for (i = 0; i < num; i++) { + xstats[i].id = i; + xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; + } + return i; +} + +static int +dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + + if (limit < stat_cnt) + return stat_cnt; + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + strlcpy(xstats_names[i].name, + dpaa_xstats_strings[i].name, + sizeof(xstats_names[i].name)); + + return stat_cnt; +} + +static int +dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; + + if (!ids) { + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + if (n < stat_cnt) + return stat_cnt; + + if (!values) + return 0; + + fman_if_stats_get_all(dpaa_intf->fif, values_copy, + sizeof(struct dpaa_if_stats) / 8); + + for (i = 0; i < stat_cnt; i++) + values[i] = + values_copy[dpaa_xstats_strings[i].offset / 8]; + + return stat_cnt; + } + + dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); + + for (i = 0; i < n; i++) { + if (ids[i] >= stat_cnt) { + DPAA_PMD_ERR("id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +dpaa_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + + if (!ids) + return dpaa_xstats_get_names(dev, xstats_names, limit); + + dpaa_xstats_get_names(dev, xstats_names_copy, limit); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + DPAA_PMD_ERR("id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + return limit; +} + +static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_promiscuous_enable(dpaa_intf->fif); + + return 0; +} + +static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_promiscuous_disable(dpaa_intf->fif); + + return 0; +} + +static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_set_mcast_filter_table(dpaa_intf->fif); + + return 0; +} + +static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_reset_mcast_filter_table(dpaa_intf->fif); + + return 0; +} + +static +int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; + struct qm_mcc_initfq opts = {0}; + u32 flags = 0; + int ret; + u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + + PMD_INIT_FUNC_TRACE(); + + if (queue_idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_rx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, rxq->fqid); + + /* Max packet can fit in single buffer */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { + ; + } else if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SCATTER) { + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > + buffsz * DPAA_SGT_MAX_ENTRIES) { + DPAA_PMD_ERR("max RxPkt size %d too big to fit " + "MaxSGlist %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz * DPAA_SGT_MAX_ENTRIES); + rte_errno = EOVERFLOW; + return -rte_errno; + } + } else { + DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz - RTE_PKTMBUF_HEADROOM); + } + + if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { + struct fman_if_ic_params icp; + uint32_t fd_offset; + uint32_t bp_size; + + if (!mp->pool_data) { + DPAA_PMD_ERR("Not an offloaded buffer pool!"); + return -1; + } + dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + + memset(&icp, 0, sizeof(icp)); + /* set ICEOF for to the default value , which is 0*/ + icp.iciof = DEFAULT_ICIOF; + icp.iceof = DEFAULT_RX_ICEOF; + icp.icsz = DEFAULT_ICSZ; + fman_if_set_ic_params(dpaa_intf->fif, &icp); + + fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; + fman_if_set_fdoff(dpaa_intf->fif, fd_offset); + + /* Buffer pool size should be equal to Dataroom Size*/ + bp_size = rte_pktmbuf_data_room_size(mp); + fman_if_set_bp(dpaa_intf->fif, mp->size, + dpaa_intf->bp_info->bpid, bp_size); + dpaa_intf->valid = 1; + DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", + dpaa_intf->name, fd_offset, + fman_if_get_fdoff(dpaa_intf->fif)); + } + DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, + fman_if_get_sg_enable(dpaa_intf->fif), + dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* checking if push mode only, no error check for now */ + if (!rxq->is_static && + dpaa_push_mode_max_queue > dpaa_push_queue_idx) { + struct qman_portal *qp; + int q_fd; + + dpaa_push_queue_idx++; + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | + QM_FQCTRL_CTXASTASHING | + QM_FQCTRL_PREFERINCACHE; + opts.fqd.context_a.stashing.exclusive = 0; + /* In muticore scenario stashing becomes a bottleneck on LS1046. + * So do not enable stashing in this case + */ + if (dpaa_svr_family != SVR_LS1046A_FAMILY) + opts.fqd.context_a.stashing.annotation_cl = + DPAA_IF_RX_ANNOTATION_STASH; + opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; + opts.fqd.context_a.stashing.context_cl = + DPAA_IF_RX_CONTEXT_STASH; + + /*Create a channel and associate given queue with the channel*/ + qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = rxq->ch_id; + opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; + flags = QMAN_INITFQ_FLAG_SCHED; + + /* Configure tail drop */ + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + ret = qman_init_fq(rxq, flags, &opts); + if (ret) { + DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } + if (dpaa_svr_family == SVR_LS1043A_FAMILY) { + rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; + } else { + rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; + rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; + } + + rxq->is_static = true; + + /* Allocate qman specific portals */ + qp = fsl_qman_fq_portal_create(&q_fd); + if (!qp) { + DPAA_PMD_ERR("Unable to alloc fq portal"); + return -1; + } + rxq->qp = qp; + + /* Set up the device interrupt handler */ + if (!dev->intr_handle) { + struct rte_dpaa_device *dpaa_dev; + struct rte_device *rdev = dev->device; + + dpaa_dev = container_of(rdev, struct rte_dpaa_device, + device); + dev->intr_handle = &dpaa_dev->intr_handle; + dev->intr_handle->intr_vec = rte_zmalloc(NULL, + dpaa_push_mode_max_queue, 0); + if (!dev->intr_handle->intr_vec) { + DPAA_PMD_ERR("intr_vec alloc failed"); + return -ENOMEM; + } + dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; + dev->intr_handle->max_intr = dpaa_push_mode_max_queue; + } + + dev->intr_handle->type = RTE_INTR_HANDLE_EXT; + dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; + dev->intr_handle->efds[queue_idx] = q_fd; + rxq->q_fd = q_fd; + } + rxq->bp_array = rte_dpaa_bpid_info; + dev->data->rx_queues[queue_idx] = rxq; + + /* configure the CGR size as per the desc size */ + if (dpaa_intf->cgr_rx) { + struct qm_mcc_initcgr cgr_opts = {0}; + + /* Enable tail drop with cgr on this queue */ + qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); + ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); + if (ret) { + DPAA_PMD_WARN( + "rx taildrop modify fail on fqid %d (ret=%d)", + rxq->fqid, ret); + } + } + + return 0; +} + +int +dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + u16 ch_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + int ret; + u32 flags = 0; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; + struct qm_mcc_initfq opts = {0}; + + if (dpaa_push_mode_max_queue) + DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" + "PUSH mode already enabled for first %d queues.\n" + "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", + dpaa_push_mode_max_queue); + + dpaa_poll_queue_default_config(&opts); + + switch (queue_conf->ev.sched_type) { + case RTE_SCHED_TYPE_ATOMIC: + opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; + /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary + * configuration with HOLD_ACTIVE setting + */ + opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); + rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; + break; + case RTE_SCHED_TYPE_ORDERED: + DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); + return -1; + default: + opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; + rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; + break; + } + + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = ch_id; + opts.fqd.dest.wq = queue_conf->ev.priority; + + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + + flags = QMAN_INITFQ_FLAG_SCHED; + + ret = qman_init_fq(rxq, flags, &opts); + if (ret) { + DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } + + /* copy configuration which needs to be filled during dequeue */ + memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); + dev->data->rx_queues[eth_rx_queue_id] = rxq; + + return ret; +} + +int +dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id) +{ + struct qm_mcc_initfq opts; + int ret; + u32 flags = 0; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; + + dpaa_poll_queue_default_config(&opts); + + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + + ret = qman_init_fq(rxq, flags, &opts); + if (ret) { + DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", + rxq->fqid, ret); + } + + rxq->cb.dqrr_dpdk_cb = NULL; + dev->data->rx_queues[eth_rx_queue_id] = NULL; + + return 0; +} + +static +void dpaa_eth_rx_queue_release(void *rxq __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static +int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (queue_idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_tx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); + dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; + return 0; +} + +static void dpaa_eth_tx_queue_release(void *txq __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static uint32_t +dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; + u32 frm_cnt = 0; + + PMD_INIT_FUNC_TRACE(); + + if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { + DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frm_cnt); + } + return frm_cnt; +} + +static int dpaa_link_down(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + dpaa_eth_dev_stop(dev); + return 0; +} + +static int dpaa_link_up(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + dpaa_eth_dev_start(dev); + return 0; +} + +static int +dpaa_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_fc_conf *net_fc; + + PMD_INIT_FUNC_TRACE(); + + if (!(dpaa_intf->fc_conf)) { + dpaa_intf->fc_conf = rte_zmalloc(NULL, + sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); + if (!dpaa_intf->fc_conf) { + DPAA_PMD_ERR("unable to save flow control info"); + return -ENOMEM; + } + } + net_fc = dpaa_intf->fc_conf; + + if (fc_conf->high_water < fc_conf->low_water) { + DPAA_PMD_ERR("Incorrect Flow Control Configuration"); + return -EINVAL; + } + + if (fc_conf->mode == RTE_FC_NONE) { + return 0; + } else if (fc_conf->mode == RTE_FC_TX_PAUSE || + fc_conf->mode == RTE_FC_FULL) { + fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, + fc_conf->low_water, + dpaa_intf->bp_info->bpid); + if (fc_conf->pause_time) + fman_if_set_fc_quanta(dpaa_intf->fif, + fc_conf->pause_time); + } + + /* Save the information in dpaa device */ + net_fc->pause_time = fc_conf->pause_time; + net_fc->high_water = fc_conf->high_water; + net_fc->low_water = fc_conf->low_water; + net_fc->send_xon = fc_conf->send_xon; + net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; + net_fc->mode = fc_conf->mode; + net_fc->autoneg = fc_conf->autoneg; + + return 0; +} + +static int +dpaa_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (net_fc) { + fc_conf->pause_time = net_fc->pause_time; + fc_conf->high_water = net_fc->high_water; + fc_conf->low_water = net_fc->low_water; + fc_conf->send_xon = net_fc->send_xon; + fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; + fc_conf->mode = net_fc->mode; + fc_conf->autoneg = net_fc->autoneg; + return 0; + } + ret = fman_if_get_fc_threshold(dpaa_intf->fif); + if (ret) { + fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); + } else { + fc_conf->mode = RTE_FC_NONE; + } + + return 0; +} + +static int +dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + uint32_t index, + __rte_unused uint32_t pool) +{ + int ret; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); + + if (ret) + DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); + return 0; +} + +static void +dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, + uint32_t index) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + fman_if_clear_mac_addr(dpaa_intf->fif, index); +} + +static int +dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + int ret; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); + if (ret) + DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); + + return ret; +} + +static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; + + if (!rxq->is_static) + return -EINVAL; + + return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); +} + +static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; + uint32_t temp; + ssize_t temp1; + + if (!rxq->is_static) + return -EINVAL; + + qman_fq_portal_irqsource_remove(rxq->qp, ~0); + + temp1 = read(rxq->q_fd, &temp, sizeof(temp)); + if (temp1 != sizeof(temp)) + DPAA_PMD_ERR("irq read error"); + + qman_fq_portal_thread_irq(rxq->qp); + + return 0; +} + +static struct eth_dev_ops dpaa_devops = { + .dev_configure = dpaa_eth_dev_configure, + .dev_start = dpaa_eth_dev_start, + .dev_stop = dpaa_eth_dev_stop, + .dev_close = dpaa_eth_dev_close, + .dev_infos_get = dpaa_eth_dev_info, + .dev_supported_ptypes_get = dpaa_supported_ptypes_get, + + .rx_queue_setup = dpaa_eth_rx_queue_setup, + .tx_queue_setup = dpaa_eth_tx_queue_setup, + .rx_queue_release = dpaa_eth_rx_queue_release, + .tx_queue_release = dpaa_eth_tx_queue_release, + .rx_queue_count = dpaa_dev_rx_queue_count, + + .flow_ctrl_get = dpaa_flow_ctrl_get, + .flow_ctrl_set = dpaa_flow_ctrl_set, + + .link_update = dpaa_eth_link_update, + .stats_get = dpaa_eth_stats_get, + .xstats_get = dpaa_dev_xstats_get, + .xstats_get_by_id = dpaa_xstats_get_by_id, + .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, + .xstats_get_names = dpaa_xstats_get_names, + .xstats_reset = dpaa_eth_stats_reset, + .stats_reset = dpaa_eth_stats_reset, + .promiscuous_enable = dpaa_eth_promiscuous_enable, + .promiscuous_disable = dpaa_eth_promiscuous_disable, + .allmulticast_enable = dpaa_eth_multicast_enable, + .allmulticast_disable = dpaa_eth_multicast_disable, + .mtu_set = dpaa_mtu_set, + .dev_set_link_down = dpaa_link_down, + .dev_set_link_up = dpaa_link_up, + .mac_addr_add = dpaa_dev_add_mac_addr, + .mac_addr_remove = dpaa_dev_remove_mac_addr, + .mac_addr_set = dpaa_dev_set_mac_addr, + + .fw_version_get = dpaa_fw_version_get, + + .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, + .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, +}; + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) +{ + if (strcmp(dev->device->driver->name, + drv->driver.name)) + return false; + + return true; +} + +static bool +is_dpaa_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_dpaa_pmd); +} + +int +rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) +{ + struct rte_eth_dev *dev; + struct dpaa_if *dpaa_intf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_dpaa_supported(dev)) + return -ENOTSUP; + + dpaa_intf = dev->data->dev_private; + + if (on) + fman_if_loopback_enable(dpaa_intf->fif); + else + fman_if_loopback_disable(dpaa_intf->fif); + + return 0; +} + +static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) +{ + struct rte_eth_fc_conf *fc_conf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (!(dpaa_intf->fc_conf)) { + dpaa_intf->fc_conf = rte_zmalloc(NULL, + sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); + if (!dpaa_intf->fc_conf) { + DPAA_PMD_ERR("unable to save flow control info"); + return -ENOMEM; + } + } + fc_conf = dpaa_intf->fc_conf; + ret = fman_if_get_fc_threshold(dpaa_intf->fif); + if (ret) { + fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); + } else { + fc_conf->mode = RTE_FC_NONE; + } + + return 0; +} + +/* Initialise an Rx FQ */ +static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, + uint32_t fqid) +{ + struct qm_mcc_initfq opts = {0}; + int ret; + u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; + struct qm_mcc_initcgr cgr_opts = { + .we_mask = QM_CGR_WE_CS_THRES | + QM_CGR_WE_CSTD_EN | + QM_CGR_WE_MODE, + .cgr = { + .cstd_en = QM_CGR_EN, + .mode = QMAN_CGR_MODE_FRAME + } + }; + + if (fqid) { + ret = qman_reserve_fqid(fqid); + if (ret) { + DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", + fqid, ret); + return -EINVAL; + } + } else { + flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; + } + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); + ret = qman_create_fq(fqid, flags, fq); + if (ret) { + DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", + fqid, ret); + return ret; + } + fq->is_static = false; + + dpaa_poll_queue_default_config(&opts); + + if (cgr_rx) { + /* Enable tail drop with cgr on this queue */ + qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); + cgr_rx->cb = NULL; + ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, + &cgr_opts); + if (ret) { + DPAA_PMD_WARN( + "rx taildrop init fail on rx fqid 0x%x(ret=%d)", + fq->fqid, ret); + goto without_cgr; + } + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = cgr_rx->cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } +without_cgr: + ret = qman_init_fq(fq, 0, &opts); + if (ret) + DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); + return ret; +} + +/* Initialise a Tx FQ */ +static int dpaa_tx_queue_init(struct qman_fq *fq, + struct fman_if *fman_intf) +{ + struct qm_mcc_initfq opts = {0}; + int ret; + + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | + QMAN_FQ_FLAG_TO_DCPORTAL, fq); + if (ret) { + DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); + return ret; + } + opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; + opts.fqd.dest.channel = fman_intf->tx_channel_id; + opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; + opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + opts.fqd.context_b = 0; + /* no tx-confirmation */ + opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; + opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; + DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); + if (ret) + DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); + return ret; +} + +#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER +/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ +static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) +{ + struct qm_mcc_initfq opts = {0}; + int ret; + + PMD_INIT_FUNC_TRACE(); + + ret = qman_reserve_fqid(fqid); + if (ret) { + DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", + fqid, ret); + return -EINVAL; + } + /* "map" this Rx FQ to one of the interfaces Tx FQID */ + DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); + ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); + if (ret) { + DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", + fqid, ret); + return ret; + } + opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; + opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; + ret = qman_init_fq(fq, 0, &opts); + if (ret) + DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", + fqid, ret); + return ret; +} +#endif + +/* Initialise a network interface */ +static int +dpaa_dev_init(struct rte_eth_dev *eth_dev) +{ + int num_rx_fqs, fqid; + int loop, ret = 0; + int dev_id; + struct rte_dpaa_device *dpaa_device; + struct dpaa_if *dpaa_intf; + struct fm_eth_port_cfg *cfg; + struct fman_if *fman_intf; + struct fman_if_bpool *bp, *tmp_bp; + uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; + char eth_buf[RTE_ETHER_ADDR_FMT_SIZE]; + + PMD_INIT_FUNC_TRACE(); + + dpaa_intf = eth_dev->data->dev_private; + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev->dev_ops = &dpaa_devops; + /* Plugging of UCODE burst API not supported in Secondary */ + eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; + eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + qman_set_fq_lookup_table( + dpaa_intf->rx_queues->qman_fq_lookup_table); +#endif + return 0; + } + + dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); + dev_id = dpaa_device->id.dev_id; + dpaa_intf = eth_dev->data->dev_private; + cfg = dpaa_get_eth_port_cfg(dev_id); + fman_intf = cfg->fman_if; + + dpaa_intf->name = dpaa_device->name; + + /* save fman_if & cfg in the interface struture */ + dpaa_intf->fif = fman_intf; + dpaa_intf->ifid = dev_id; + dpaa_intf->cfg = cfg; + + /* Initialize Rx FQ's */ + if (default_q) { + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; + } else { + if (getenv("DPAA_NUM_RX_QUEUES")) + num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); + else + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; + } + + + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX + * queues. + */ + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { + DPAA_PMD_ERR("Invalid number of RX queues\n"); + return -EINVAL; + } + + dpaa_intf->rx_queues = rte_zmalloc(NULL, + sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); + if (!dpaa_intf->rx_queues) { + DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); + return -ENOMEM; + } + + /* If congestion control is enabled globally*/ + if (td_threshold) { + dpaa_intf->cgr_rx = rte_zmalloc(NULL, + sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); + if (!dpaa_intf->cgr_rx) { + DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); + ret = -ENOMEM; + goto free_rx; + } + + ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); + if (ret != num_rx_fqs) { + DPAA_PMD_WARN("insufficient CGRIDs available"); + ret = -EINVAL; + goto free_rx; + } + } else { + dpaa_intf->cgr_rx = NULL; + } + + for (loop = 0; loop < num_rx_fqs; loop++) { + if (default_q) + fqid = cfg->rx_def; + else + fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx * + DPAA_PCD_FQID_MULTIPLIER + loop; + + if (dpaa_intf->cgr_rx) + dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; + + ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], + dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, + fqid); + if (ret) + goto free_rx; + dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; + } + dpaa_intf->nb_rx_queues = num_rx_fqs; + + /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ + dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * + MAX_DPAA_CORES, MAX_CACHELINE); + if (!dpaa_intf->tx_queues) { + DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); + ret = -ENOMEM; + goto free_rx; + } + + for (loop = 0; loop < MAX_DPAA_CORES; loop++) { + ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], + fman_intf); + if (ret) + goto free_tx; + dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; + } + dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; + +#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER + dpaa_debug_queue_init(&dpaa_intf->debug_queues[ + DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); + dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; + dpaa_debug_queue_init(&dpaa_intf->debug_queues[ + DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); + dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; +#endif + + DPAA_PMD_DEBUG("All frame queues created"); + + /* Get the initial configuration for flow control */ + dpaa_fc_set_default(dpaa_intf); + + /* reset bpool list, initialize bpool dynamically */ + list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { + list_del(&bp->node); + rte_free(bp); + } + + /* Populate ethdev structure */ + eth_dev->dev_ops = &dpaa_devops; + eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; + eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); + if (eth_dev->data->mac_addrs == NULL) { + DPAA_PMD_ERR("Failed to allocate %d bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); + ret = -ENOMEM; + goto free_tx; + } + + /* copy the primary mac address */ + rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); + rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr); + + DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf); + + /* Disable RX mode */ + fman_if_discard_rx_errors(fman_intf); + fman_if_disable_rx(fman_intf); + /* Disable promiscuous mode */ + fman_if_promiscuous_disable(fman_intf); + /* Disable multicast */ + fman_if_reset_mcast_filter_table(fman_intf); + /* Reset interface statistics */ + fman_if_stats_reset(fman_intf); + /* Disable SG by default */ + fman_if_set_sg(fman_intf, 0); + fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); + + return 0; + +free_tx: + rte_free(dpaa_intf->tx_queues); + dpaa_intf->tx_queues = NULL; + dpaa_intf->nb_tx_queues = 0; + +free_rx: + rte_free(dpaa_intf->cgr_rx); + rte_free(dpaa_intf->rx_queues); + dpaa_intf->rx_queues = NULL; + dpaa_intf->nb_rx_queues = 0; + return ret; +} + +static int +dpaa_dev_uninit(struct rte_eth_dev *dev) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + int loop; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + if (!dpaa_intf) { + DPAA_PMD_WARN("Already closed or not started"); + return -1; + } + + dpaa_eth_dev_close(dev); + + /* release configuration memory */ + if (dpaa_intf->fc_conf) + rte_free(dpaa_intf->fc_conf); + + /* Release RX congestion Groups */ + if (dpaa_intf->cgr_rx) { + for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) + qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); + + qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, + dpaa_intf->nb_rx_queues); + } + + rte_free(dpaa_intf->cgr_rx); + dpaa_intf->cgr_rx = NULL; + + rte_free(dpaa_intf->rx_queues); + dpaa_intf->rx_queues = NULL; + + rte_free(dpaa_intf->tx_queues); + dpaa_intf->tx_queues = NULL; + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + return 0; +} + +static int +rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, + struct rte_dpaa_device *dpaa_dev) +{ + int diag; + int ret; + struct rte_eth_dev *eth_dev; + + PMD_INIT_FUNC_TRACE(); + + if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); + + return -1; + } + + /* In case of secondary process, the device is already configured + * and no further action is required, except portal initialization + * and verifying secondary attachment to port name. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); + if (!eth_dev) + return -ENOMEM; + eth_dev->device = &dpaa_dev->device; + eth_dev->dev_ops = &dpaa_devops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { + if (access("/tmp/fmc.bin", F_OK) == -1) { + DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); + default_q = 1; + } + + /* disabling the default push mode for LS1043 */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY) + dpaa_push_mode_max_queue = 0; + + /* if push mode queues to be enabled. Currenly we are allowing + * only one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } + + is_global_init = 1; + } + + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)1); + if (ret) { + DPAA_PMD_ERR("Unable to initialize portal"); + return ret; + } + } + + /* In case of secondary process, the device is already configured + * and no further action is required, except portal initialization + * and verifying secondary attachment to port name. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); + if (!eth_dev) + return -ENOMEM; + } else { + eth_dev = rte_eth_dev_allocate(dpaa_dev->name); + if (eth_dev == NULL) + return -ENOMEM; + + eth_dev->data->dev_private = rte_zmalloc( + "ethdev private structure", + sizeof(struct dpaa_if), + RTE_CACHE_LINE_SIZE); + if (!eth_dev->data->dev_private) { + DPAA_PMD_ERR("Cannot allocate memzone for port data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } + } + eth_dev->device = &dpaa_dev->device; + dpaa_dev->eth_dev = eth_dev; + + /* Invoke PMD device initialization function */ + diag = dpaa_dev_init(eth_dev); + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + rte_eth_dev_release_port(eth_dev); + return diag; +} + +static int +rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) +{ + struct rte_eth_dev *eth_dev; + + PMD_INIT_FUNC_TRACE(); + + eth_dev = dpaa_dev->eth_dev; + dpaa_dev_uninit(eth_dev); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_dpaa_driver rte_dpaa_pmd = { + .drv_type = FSL_DPAA_ETH, + .probe = rte_dpaa_probe, + .remove = rte_dpaa_remove, +}; + +RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); +RTE_INIT(dpaa_net_init_log) +{ + dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa"); + if (dpaa_logtype_pmd >= 0) + rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h new file mode 100644 index 000000000..6a6477ac8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_ethdev.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2017-2019 NXP + * + */ +#ifndef __DPAA_ETHDEV_H__ +#define __DPAA_ETHDEV_H__ + +/* System headers */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#define MAX_DPAA_CORES 4 +#define DPAA_MBUF_HW_ANNOTATION 64 +#define DPAA_FD_PTA_SIZE 64 + +/* mbuf->seqn will be used to store event entry index for + * driver specific usage. For parallel mode queues, invalid + * index will be set and for atomic mode queues, valid value + * ranging from 1 to 16. + */ +#define DPAA_INVALID_MBUF_SEQN 0 + +/* we will re-use the HEADROOM for annotation in RX */ +#define DPAA_HW_BUF_RESERVE 0 +#define DPAA_PACKET_LAYOUT_ALIGN 64 + +/* Alignment to use for cpu-local structs to avoid coherency problems. */ +#define MAX_CACHELINE 64 + +#define DPAA_MAX_RX_PKT_LEN 10240 + +#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ + +/* RX queue tail drop threshold (CGR Based) in frame count */ +#define CGR_RX_PERFQ_THRESH 256 +#define CGR_TX_CGR_THRESH 512 + +/*max mac filter for memac(8) including primary mac addr*/ +#define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) + +/*Maximum number of slots available in TX ring*/ +#define DPAA_TX_BURST_SIZE 7 + +/* Optimal burst size for RX and TX as default */ +#define DPAA_DEF_RX_BURST_SIZE 7 +#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE + +#ifndef VLAN_TAG_SIZE +#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ +#endif + +/* PCD frame queues */ +#define DPAA_PCD_FQID_START 0x400 +#define DPAA_PCD_FQID_MULTIPLIER 0x100 +#define DPAA_DEFAULT_NUM_PCD_QUEUES 1 +#define DPAA_MAX_NUM_PCD_QUEUES 4 + +#define DPAA_IF_TX_PRIORITY 3 +#define DPAA_IF_RX_PRIORITY 0 +#define DPAA_IF_DEBUG_PRIORITY 7 + +#define DPAA_IF_RX_ANNOTATION_STASH 1 +#define DPAA_IF_RX_DATA_STASH 1 +#define DPAA_IF_RX_CONTEXT_STASH 0 + +/* Each "debug" FQ is represented by one of these */ +#define DPAA_DEBUG_FQ_RX_ERROR 0 +#define DPAA_DEBUG_FQ_TX_ERROR 1 + +#define DPAA_RSS_OFFLOAD_ALL ( \ + ETH_RSS_L2_PAYLOAD | \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + +#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_CKSUM | \ + PKT_TX_UDP_CKSUM) + +/* DPAA Frame descriptor macros */ + +#define DPAA_FD_CMD_FCO 0x80000000 +/**< Frame queue Context Override */ +#define DPAA_FD_CMD_RPD 0x40000000 +/**< Read Prepended Data */ +#define DPAA_FD_CMD_UPD 0x20000000 +/**< Update Prepended Data */ +#define DPAA_FD_CMD_DTC 0x10000000 +/**< Do IP/TCP/UDP Checksum */ +#define DPAA_FD_CMD_DCL4C 0x10000000 +/**< Didn't calculate L4 Checksum */ +#define DPAA_FD_CMD_CFQ 0x00ffffff +/**< Confirmation Frame Queue */ + +/* Each network interface is represented by one of these */ +struct dpaa_if { + int valid; + char *name; + const struct fm_eth_port_cfg *cfg; + struct qman_fq *rx_queues; + struct qman_cgr *cgr_rx; + struct qman_fq *tx_queues; + struct qman_fq debug_queues[2]; + uint16_t nb_rx_queues; + uint16_t nb_tx_queues; + uint32_t ifid; + struct fman_if *fif; + struct dpaa_bp_info *bp_info; + struct rte_eth_fc_conf *fc_conf; +}; + +struct dpaa_if_stats { + /* Rx Statistics Counter */ + uint64_t reoct; /**>") + +#define DPAA_PMD_DEBUG(fmt, args...) \ + DPAA_PMD_LOG(DEBUG, fmt, ## args) +#define DPAA_PMD_ERR(fmt, args...) \ + DPAA_PMD_LOG(ERR, fmt, ## args) +#define DPAA_PMD_INFO(fmt, args...) \ + DPAA_PMD_LOG(INFO, fmt, ## args) +#define DPAA_PMD_WARN(fmt, args...) \ + DPAA_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#endif diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c new file mode 100644 index 000000000..5dba1db8b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.c @@ -0,0 +1,1025 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2017,2019 NXP + * + */ + +/* System headers */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dpaa_ethdev.h" +#include "dpaa_rxtx.h" +#include +#include + +#include +#include +#include +#include +#include +#include + +#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ + do { \ + (_fd)->cmd = 0; \ + (_fd)->opaque_addr = 0; \ + (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ + (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ + (_fd)->opaque |= (_mbuf)->pkt_len; \ + (_fd)->addr = (_mbuf)->buf_iova; \ + (_fd)->bpid = _bpid; \ + } while (0) + +#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) +static void dpaa_display_frame(const struct qm_fd *fd) +{ + int ii; + char *ptr; + + printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n", + __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format, + fd->offset, fd->length20, fd->status); + + ptr = (char *)rte_dpaa_mem_ptov(fd->addr); + ptr += fd->offset; + printf("%02x ", *ptr); + for (ii = 1; ii < fd->length20; ii++) { + printf("%02x ", *ptr); + if ((ii % 16) == 0) + printf("\n"); + ptr++; + } + printf("\n"); +} +#else +#define dpaa_display_frame(a) +#endif + +static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, + uint64_t prs __rte_unused) +{ + DPAA_DP_LOG(DEBUG, "Slow parsing"); + /*TBD:XXX: to be implemented*/ +} + +static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) +{ + struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); + uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; + + DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); + + switch (prs) { + case DPAA_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + break; + case DPAA_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + break; + case DPAA_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + break; + case DPAA_PKT_TYPE_IPV4_FRAG: + case DPAA_PKT_TYPE_IPV4_FRAG_UDP: + case DPAA_PKT_TYPE_IPV4_FRAG_TCP: + case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; + break; + case DPAA_PKT_TYPE_IPV6_FRAG: + case DPAA_PKT_TYPE_IPV6_FRAG_UDP: + case DPAA_PKT_TYPE_IPV6_FRAG_TCP: + case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; + break; + case DPAA_PKT_TYPE_IPV4_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT; + break; + case DPAA_PKT_TYPE_IPV6_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT; + break; + case DPAA_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; + break; + case DPAA_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; + break; + case DPAA_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; + break; + case DPAA_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; + break; + case DPAA_PKT_TYPE_IPV4_EXT_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; + break; + case DPAA_PKT_TYPE_IPV6_EXT_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; + break; + case DPAA_PKT_TYPE_IPV4_EXT_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; + break; + case DPAA_PKT_TYPE_IPV6_EXT_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; + break; + case DPAA_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; + break; + case DPAA_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; + break; + case DPAA_PKT_TYPE_NONE: + m->packet_type = 0; + break; + /* More switch cases can be added */ + default: + dpaa_slow_parsing(m, prs); + } + + m->tx_offload = annot->parse.ip_off[0]; + m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) + << DPAA_PKT_L3_LEN_SHIFT; + + /* Set the hash values */ + m->hash.rss = (uint32_t)(annot->hash); + /* All packets with Bad checksum are dropped by interface (and + * corresponding notification issued to RX error queues). + */ + m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD; + + /* Check if Vlan is present */ + if (prs & DPAA_PARSE_VLAN_MASK) + m->ol_flags |= PKT_RX_VLAN; + /* Packet received without stripping the vlan */ +} + +static inline void dpaa_checksum(struct rte_mbuf *mbuf) +{ + struct rte_ether_hdr *eth_hdr = + rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); + char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + + DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); + + if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || + ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV4_EXT)) { + ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); + } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV6) || + ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV6_EXT)) + ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + + if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + + mbuf->l3_len); + tcp_hdr->cksum = 0; + if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, + tcp_hdr); + else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, + tcp_hdr); + } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == + RTE_PTYPE_L4_UDP) { + struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr + + mbuf->l3_len); + udp_hdr->dgram_cksum = 0; + if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) + udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, + udp_hdr); + else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ + udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, + udp_hdr); + } +} + +static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, + struct qm_fd *fd, char *prs_buf) +{ + struct dpaa_eth_parse_results_t *prs; + + DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); + + prs = GET_TX_PRS(prs_buf); + prs->l3r = 0; + prs->l4r = 0; + if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || + ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV4_EXT)) + prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; + else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV6) || + ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == + RTE_PTYPE_L3_IPV6_EXT)) + prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; + + if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) + prs->l4r = DPAA_L4_PARSE_RESULT_TCP; + else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) + prs->l4r = DPAA_L4_PARSE_RESULT_UDP; + + prs->ip_off[0] = mbuf->l2_len; + prs->l4_off = mbuf->l3_len + mbuf->l2_len; + /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ + fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; +} + +static inline void +dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) +{ + if (!mbuf->packet_type) { + struct rte_net_hdr_lens hdr_lens; + + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK + | RTE_PTYPE_L4_MASK); + mbuf->l2_len = hdr_lens.l2_len; + mbuf->l3_len = hdr_lens.l3_len; + } + if (mbuf->data_off < (DEFAULT_TX_ICEOF + + sizeof(struct dpaa_eth_parse_results_t))) { + DPAA_DP_LOG(DEBUG, "Checksum offload Err: " + "Not enough Headroom " + "space for correct Checksum offload." + "So Calculating checksum in Software."); + dpaa_checksum(mbuf); + } else { + dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); + } +} + +struct rte_mbuf * +dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) +{ + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); + struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; + struct qm_sg_entry *sgt, *sg_temp; + void *vaddr, *sg_vaddr; + int i = 0; + uint8_t fd_offset = fd->offset; + + vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); + if (!vaddr) { + DPAA_PMD_ERR("unable to convert physical address"); + return NULL; + } + sgt = vaddr + fd_offset; + sg_temp = &sgt[i++]; + hw_sg_to_cpu(sg_temp); + temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); + + first_seg = (struct rte_mbuf *)((char *)sg_vaddr - + bp_info->meta_data_size); + first_seg->data_off = sg_temp->offset; + first_seg->data_len = sg_temp->length; + first_seg->pkt_len = sg_temp->length; + rte_mbuf_refcnt_set(first_seg, 1); + + first_seg->port = ifid; + first_seg->nb_segs = 1; + first_seg->ol_flags = 0; + prev_seg = first_seg; + while (i < DPAA_SGT_MAX_ENTRIES) { + sg_temp = &sgt[i++]; + hw_sg_to_cpu(sg_temp); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, + qm_sg_entry_get64(sg_temp)); + cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - + bp_info->meta_data_size); + cur_seg->data_off = sg_temp->offset; + cur_seg->data_len = sg_temp->length; + first_seg->pkt_len += sg_temp->length; + first_seg->nb_segs += 1; + rte_mbuf_refcnt_set(cur_seg, 1); + prev_seg->next = cur_seg; + if (sg_temp->final) { + cur_seg->next = NULL; + break; + } + prev_seg = cur_seg; + } + DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", + first_seg->pkt_len, first_seg->nb_segs); + + dpaa_eth_packet_info(first_seg, vaddr); + rte_pktmbuf_free_seg(temp); + + return first_seg; +} + +static inline struct rte_mbuf * +dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) +{ + struct rte_mbuf *mbuf; + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); + void *ptr; + uint8_t format = + (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; + uint16_t offset; + uint32_t length; + + if (unlikely(format == qm_fd_sg)) + return dpaa_eth_sg_to_mbuf(fd, ifid); + + offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; + length = fd->opaque & DPAA_FD_LENGTH_MASK; + + DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); + + /* Ignoring case when format != qm_fd_contig */ + dpaa_display_frame(fd); + ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); + + mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); + /* Prefetch the Parse results and packet data to L1 */ + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + + mbuf->data_off = offset; + mbuf->data_len = length; + mbuf->pkt_len = length; + + mbuf->port = ifid; + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); + + return mbuf; +} + +/* Specific for LS1043 */ +void +dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, + void **bufs, int num_bufs) +{ + struct rte_mbuf *mbuf; + struct dpaa_bp_info *bp_info; + const struct qm_fd *fd; + void *ptr; + struct dpaa_if *dpaa_intf; + uint16_t offset, i; + uint32_t length; + uint8_t format; + + bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); + ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); + + for (i = 0; i < num_bufs; i++) { + if (i < num_bufs - 1) { + bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); + ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); + rte_prefetch0((void *)((uint8_t *)ptr + + DEFAULT_RX_ICEOF)); + bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - + bp_info->meta_data_size); + } + + fd = &dqrr[i]->fd; + dpaa_intf = fq[0]->dpaa_intf; + + format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> + DPAA_FD_FORMAT_SHIFT; + if (unlikely(format == qm_fd_sg)) { + bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); + continue; + } + + offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> + DPAA_FD_OFFSET_SHIFT; + length = fd->opaque & DPAA_FD_LENGTH_MASK; + + mbuf = bufs[i]; + mbuf->data_off = offset; + mbuf->data_len = length; + mbuf->pkt_len = length; + mbuf->port = dpaa_intf->ifid; + + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); + } +} + +void +dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, + void **bufs, int num_bufs) +{ + struct rte_mbuf *mbuf; + const struct qm_fd *fd; + struct dpaa_if *dpaa_intf; + uint16_t offset, i; + uint32_t length; + uint8_t format; + + for (i = 0; i < num_bufs; i++) { + fd = &dqrr[i]->fd; + dpaa_intf = fq[0]->dpaa_intf; + + format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> + DPAA_FD_FORMAT_SHIFT; + if (unlikely(format == qm_fd_sg)) { + bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); + continue; + } + + offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> + DPAA_FD_OFFSET_SHIFT; + length = fd->opaque & DPAA_FD_LENGTH_MASK; + + mbuf = bufs[i]; + mbuf->data_off = offset; + mbuf->data_len = length; + mbuf->pkt_len = length; + mbuf->port = dpaa_intf->ifid; + + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); + } +} + +void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) +{ + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); + void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); + + /* In case of LS1046, annotation stashing is disabled due to L2 cache + * being bottleneck in case of multicore scanario for this platform. + * So we prefetch the annoation beforehand, so that it is available + * in cache when accessed. + */ + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + + *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); +} + +static uint16_t +dpaa_eth_queue_portal_rx(struct qman_fq *fq, + struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + int ret; + + if (unlikely(!fq->qp_initialized)) { + ret = rte_dpaa_portal_fq_init((void *)0, fq); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal %d", ret); + return 0; + } + fq->qp_initialized = 1; + } + + return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); +} + +enum qman_cb_dqrr_result +dpaa_rx_cb_parallel(void *event, + struct qman_portal *qm __always_unused, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr, + void **bufs) +{ + u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; + struct rte_mbuf *mbuf; + struct rte_event *ev = (struct rte_event *)event; + + mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); + ev->event_ptr = (void *)mbuf; + ev->flow_id = fq->ev.flow_id; + ev->sub_event_type = fq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = fq->ev.sched_type; + ev->queue_id = fq->ev.queue_id; + ev->priority = fq->ev.priority; + ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; + mbuf->seqn = DPAA_INVALID_MBUF_SEQN; + *bufs = mbuf; + + return qman_cb_dqrr_consume; +} + +enum qman_cb_dqrr_result +dpaa_rx_cb_atomic(void *event, + struct qman_portal *qm __always_unused, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr, + void **bufs) +{ + u8 index; + u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; + struct rte_mbuf *mbuf; + struct rte_event *ev = (struct rte_event *)event; + + mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); + ev->event_ptr = (void *)mbuf; + ev->flow_id = fq->ev.flow_id; + ev->sub_event_type = fq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = fq->ev.sched_type; + ev->queue_id = fq->ev.queue_id; + ev->priority = fq->ev.priority; + + /* Save active dqrr entries */ + index = DQRR_PTR2IDX(dqrr); + DPAA_PER_LCORE_DQRR_SIZE++; + DPAA_PER_LCORE_DQRR_HELD |= 1 << index; + DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; + ev->impl_opaque = index + 1; + mbuf->seqn = (uint32_t)index + 1; + *bufs = mbuf; + + return qman_cb_dqrr_defer; +} + +uint16_t dpaa_eth_queue_rx(void *q, + struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + struct qman_fq *fq = q; + struct qm_dqrr_entry *dq; + uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; + int num_rx_bufs, ret; + uint32_t vdqcr_flags = 0; + + if (unlikely(rte_dpaa_bpid_info == NULL && + rte_eal_process_type() == RTE_PROC_SECONDARY)) + rte_dpaa_bpid_info = fq->bp_array; + + if (likely(fq->is_static)) + return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); + + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal"); + return 0; + } + } + + /* Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_bufs < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_bufs; + } else { + num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); + if (ret) + return 0; + + do { + dq = qman_dequeue(fq); + if (!dq) + continue; + bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); + qman_dqrr_consume(fq, dq); + } while (fq->flags & QMAN_FQ_STATE_VDQCR); + + return num_rx; +} + +int +dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qm_fd *fd, + uint32_t bpid) +{ + struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid); + struct rte_mbuf *temp, *mi; + struct qm_sg_entry *sg_temp, *sgt; + int i = 0; + + DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); + + temp = rte_pktmbuf_alloc(bp_info->mp); + if (!temp) { + DPAA_PMD_ERR("Failure in allocation of mbuf"); + return -1; + } + if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) + + temp->data_off)) { + DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); + return -1; + } + + fd->cmd = 0; + fd->opaque_addr = 0; + + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { + if (!mbuf->packet_type) { + struct rte_net_hdr_lens hdr_lens; + + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK + | RTE_PTYPE_L4_MASK); + mbuf->l2_len = hdr_lens.l2_len; + mbuf->l3_len = hdr_lens.l3_len; + } + if (temp->data_off < DEFAULT_TX_ICEOF + + sizeof(struct dpaa_eth_parse_results_t)) + temp->data_off = DEFAULT_TX_ICEOF + + sizeof(struct dpaa_eth_parse_results_t); + dcbz_64(temp->buf_addr); + dpaa_checksum_offload(mbuf, fd, temp->buf_addr); + } + + sgt = temp->buf_addr + temp->data_off; + fd->format = QM_FD_SG; + fd->addr = temp->buf_iova; + fd->offset = temp->data_off; + fd->bpid = bpid; + fd->length20 = mbuf->pkt_len; + + while (i < DPAA_SGT_MAX_ENTRIES) { + sg_temp = &sgt[i++]; + sg_temp->opaque = 0; + sg_temp->val = 0; + sg_temp->addr = cur_seg->buf_iova; + sg_temp->offset = cur_seg->data_off; + sg_temp->length = cur_seg->data_len; + if (RTE_MBUF_DIRECT(cur_seg)) { + if (rte_mbuf_refcnt_read(cur_seg) > 1) { + /*If refcnt > 1, invalid bpid is set to ensure + * buffer is not freed by HW. + */ + sg_temp->bpid = 0xff; + rte_mbuf_refcnt_update(cur_seg, -1); + } else { + sg_temp->bpid = + DPAA_MEMPOOL_TO_BPID(cur_seg->pool); + } + cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); + if (rte_mbuf_refcnt_read(mi) > 1) { + /*If refcnt > 1, invalid bpid is set to ensure + * owner buffer is not freed by HW. + */ + sg_temp->bpid = 0xff; + } else { + sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); + rte_mbuf_refcnt_update(mi, 1); + } + prev_seg = cur_seg; + cur_seg = cur_seg->next; + prev_seg->next = NULL; + rte_pktmbuf_free(prev_seg); + } + if (cur_seg == NULL) { + sg_temp->final = 1; + cpu_to_hw_sg(sg_temp); + break; + } + cpu_to_hw_sg(sg_temp); + } + return 0; +} + +/* Handle mbufs which are not segmented (non SG) */ +static inline void +tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, + struct dpaa_bp_info *bp_info, + struct qm_fd *fd_arr) +{ + struct rte_mbuf *mi = NULL; + + if (RTE_MBUF_DIRECT(mbuf)) { + if (rte_mbuf_refcnt_read(mbuf) > 1) { + /* In case of direct mbuf and mbuf being cloned, + * BMAN should _not_ release buffer. + */ + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); + /* Buffer should be releasd by EAL */ + rte_mbuf_refcnt_update(mbuf, -1); + } else { + /* In case of direct mbuf and no cloning, mbuf can be + * released by BMAN. + */ + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); + } + } else { + /* This is data-containing core mbuf: 'mi' */ + mi = rte_mbuf_from_indirect(mbuf); + if (rte_mbuf_refcnt_read(mi) > 1) { + /* In case of indirect mbuf, and mbuf being cloned, + * BMAN should _not_ release it and let EAL release + * it through pktmbuf_free below. + */ + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); + } else { + /* In case of indirect mbuf, and no cloning, core mbuf + * should be released by BMAN. + * Increate refcnt of core mbuf so that when + * pktmbuf_free is called and mbuf is released, EAL + * doesn't try to release core mbuf which would have + * been released by BMAN. + */ + rte_mbuf_refcnt_update(mi, 1); + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); + } + rte_pktmbuf_free(mbuf); + } + + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, fd_arr); +} + +/* Handle all mbufs on dpaa BMAN managed pool */ +static inline uint16_t +tx_on_dpaa_pool(struct rte_mbuf *mbuf, + struct dpaa_bp_info *bp_info, + struct qm_fd *fd_arr) +{ + DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); + + if (mbuf->nb_segs == 1) { + /* Case for non-segmented buffers */ + tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); + } else if (mbuf->nb_segs > 1 && + mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { + if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) { + DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); + return 1; + } + } else { + DPAA_PMD_DEBUG("Number of Segments not supported"); + return 1; + } + + return 0; +} + +/* Handle all mbufs on an external pool (non-dpaa) */ +static inline struct rte_mbuf * +reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) +{ + struct dpaa_if *dpaa_intf = txq->dpaa_intf; + struct dpaa_bp_info *bp_info = dpaa_intf->bp_info; + struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0}; + struct rte_mbuf *temp_mbuf; + int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0; + uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0; + char *data; + + DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer"); + + mbufs_size = bp_info->size - + bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM; + extra_seg = !!(mbuf->pkt_len % mbufs_size); + num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg; + + ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs); + if (ret != 0) { + DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed"); + return NULL; + } + + temp_mbuf = mbuf; + + while (temp_mbuf) { + /* If mbuf data is less than new mbuf remaining memory */ + if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) { + bytes_to_copy = temp_mbuf->data_len - offset1; + mbuf_greater = -1; + /* If mbuf data is greater than new mbuf remaining memory */ + } else if ((temp_mbuf->data_len - offset1) > + (mbufs_size - offset2)) { + bytes_to_copy = mbufs_size - offset2; + mbuf_greater = 1; + /* if mbuf data is equal to new mbuf remaining memory */ + } else { + bytes_to_copy = temp_mbuf->data_len - offset1; + mbuf_greater = 0; + } + + /* Copy the data */ + data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); + + rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, + void *, offset1), bytes_to_copy); + + /* Set new offsets and the temp buffers */ + if (mbuf_greater == -1) { + offset1 = 0; + offset2 += bytes_to_copy; + temp_mbuf = temp_mbuf->next; + } else if (mbuf_greater == 1) { + offset2 = 0; + offset1 += bytes_to_copy; + new_mbufs[i]->next = new_mbufs[i + 1]; + new_mbufs[0]->nb_segs++; + i++; + } else { + offset1 = 0; + offset2 = 0; + temp_mbuf = temp_mbuf->next; + new_mbufs[i]->next = new_mbufs[i + 1]; + if (new_mbufs[i + 1]) + new_mbufs[0]->nb_segs++; + i++; + } + } + + /* Copy other required fields */ + new_mbufs[0]->ol_flags = mbuf->ol_flags; + new_mbufs[0]->packet_type = mbuf->packet_type; + new_mbufs[0]->tx_offload = mbuf->tx_offload; + + rte_pktmbuf_free(mbuf); + + return new_mbufs[0]; +} + +uint16_t +dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + struct rte_mbuf *mbuf, *mi = NULL; + struct rte_mempool *mp; + struct dpaa_bp_info *bp_info; + struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; + uint32_t frames_to_send, loop, sent = 0; + uint16_t state; + int ret, realloc_mbuf = 0; + uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; + + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal"); + return 0; + } + } + + DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); + + while (nb_bufs) { + frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? + DPAA_TX_BURST_SIZE : nb_bufs; + for (loop = 0; loop < frames_to_send; loop++) { + mbuf = *(bufs++); + /* In case the data offset is not multiple of 16, + * FMAN can stall because of an errata. So reallocate + * the buffer in such case. + */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY && + (mbuf->data_off & 0x7F) != 0x0) + realloc_mbuf = 1; + seqn = mbuf->seqn; + if (seqn != DPAA_INVALID_MBUF_SEQN) { + index = seqn - 1; + if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { + flags[loop] = + ((index & QM_EQCR_DCA_IDXMASK) << 8); + flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; + DPAA_PER_LCORE_DQRR_SIZE--; + DPAA_PER_LCORE_DQRR_HELD &= + ~(1 << index); + } + } + + if (likely(RTE_MBUF_DIRECT(mbuf))) { + mp = mbuf->pool; + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + if (likely(mp->ops_index == + bp_info->dpaa_ops_index && + mbuf->nb_segs == 1 && + realloc_mbuf == 0 && + rte_mbuf_refcnt_read(mbuf) == 1)) { + DPAA_MBUF_TO_CONTIG_FD(mbuf, + &fd_arr[loop], bp_info->bpid); + if (mbuf->ol_flags & + DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, + &fd_arr[loop]); + continue; + } + } else { + mi = rte_mbuf_from_indirect(mbuf); + mp = mi->pool; + } + + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + if (unlikely(mp->ops_index != bp_info->dpaa_ops_index || + realloc_mbuf == 1)) { + struct rte_mbuf *temp_mbuf; + + temp_mbuf = reallocate_mbuf(q, mbuf); + if (!temp_mbuf) { + /* Set frames_to_send & nb_bufs so + * that packets are transmitted till + * previous frame. + */ + frames_to_send = loop; + nb_bufs = loop; + goto send_pkts; + } + mbuf = temp_mbuf; + realloc_mbuf = 0; + } + + state = tx_on_dpaa_pool(mbuf, bp_info, + &fd_arr[loop]); + if (unlikely(state)) { + /* Set frames_to_send & nb_bufs so + * that packets are transmitted till + * previous frame. + */ + frames_to_send = loop; + nb_bufs = loop; + goto send_pkts; + } + } + +send_pkts: + loop = 0; + while (loop < frames_to_send) { + loop += qman_enqueue_multi(q, &fd_arr[loop], + &flags[loop], + frames_to_send - loop); + } + nb_bufs -= frames_to_send; + sent += frames_to_send; + } + + DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); + + return sent; +} + +uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, + struct rte_mbuf **bufs __rte_unused, + uint16_t nb_bufs __rte_unused) +{ + DPAA_DP_LOG(DEBUG, "Drop all packets"); + + /* Drop all incoming packets. No need to free packets here + * because the rte_eth f/w frees up the packets through tx_buffer + * callback in case this functions returns count less than nb_bufs + */ + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h new file mode 100644 index 000000000..4f896fba1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/dpaa_rxtx.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2017 NXP + * + */ + +#ifndef __DPDK_RXTX_H__ +#define __DPDK_RXTX_H__ + +/* internal offset from where IC is copied to packet buffer*/ +#define DEFAULT_ICIOF 32 +/* IC transfer size */ +#define DEFAULT_ICSZ 48 + +/* IC offsets from buffer header address */ +#define DEFAULT_RX_ICEOF 16 +#define DEFAULT_TX_ICEOF 16 + +/* + * Values for the L3R field of the FM Parse Results + */ +/* L3 Type field: First IP Present IPv4 */ +#define DPAA_L3_PARSE_RESULT_IPV4 0x80 +/* L3 Type field: First IP Present IPv6 */ +#define DPAA_L3_PARSE_RESULT_IPV6 0x40 +/* Values for the L4R field of the FM Parse Results + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual. + */ +/* L4 Type field: UDP */ +#define DPAA_L4_PARSE_RESULT_UDP 0x40 +/* L4 Type field: TCP */ +#define DPAA_L4_PARSE_RESULT_TCP 0x20 + +#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63 + /** + +/** + * Enable/Disable TX loopback + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable TX loopback. + * 0 - Disable TX loopback. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on); + +#endif /* _PMD_DPAA_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map new file mode 100644 index 000000000..774aa0de4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa/rte_pmd_dpaa_version.map @@ -0,0 +1,14 @@ +DPDK_20.0 { + global: + + rte_pmd_dpaa_set_tx_loopback; + + local: *; +}; + +INTERNAL { + global: + + dpaa_eth_eventq_attach; + dpaa_eth_eventq_detach; +}; diff --git a/src/spdk/dpdk/drivers/net/dpaa2/Makefile b/src/spdk/dpdk/drivers/net/dpaa2/Makefile new file mode 100644 index 000000000..6f38c18b9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/Makefile @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. +# Copyright 2016-2019 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_dpaa2.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal +CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 + +# versioning export map +EXPORT_MAP := rte_pmd_dpaa2_version.map + +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_mux.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_sparser.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpdmux.c +SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += mc/dprtc.c +SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += dpaa2_ptp.c + +LDLIBS += -lrte_bus_fslmc +LDLIBS += -lrte_mempool_dpaa2 +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_common_dpaax + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_PMD)-include := rte_pmd_dpaa2.h +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c new file mode 100644 index 000000000..34de0d1f7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016-2019 NXP + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../dpaa2_ethdev.h" + +int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg); + +int +rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, + uint16_t offset, + uint8_t size) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_index = 0; + + p_params = rte_zmalloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA; + kg_cfg.extracts[0].extract.from_data.offset = offset; + kg_cfg.extracts[0].extract.from_data.size = size; + kg_cfg.extracts[0].num_of_byte_masks = 0; + kg_cfg.num_extracts = 1; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int +dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_index = 0; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + + ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); + if (ret) { + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); + rte_free(p_params); + return ret; + } + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int dpaa2_remove_flow_dist( + struct rte_eth_dev *eth_dev, + uint8_t tc_index) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + kg_cfg.num_extracts = 0; + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = 0; + tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; +} + +int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg) +{ + uint32_t loop = 0, i = 0, dist_field = 0; + int l2_configured = 0, l3_configured = 0; + int l4_configured = 0, sctp_configured = 0; + + memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); + while (req_dist_set) { + if (req_dist_set % 2 != 0) { + dist_field = 1U << loop; + switch (dist_field) { + case ETH_RSS_L2_PAYLOAD: + + if (l2_configured) + break; + l2_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_ETH; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_ETH_TYPE; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_IPV4: + case ETH_RSS_FRAG_IPV4: + case ETH_RSS_NONFRAG_IPV4_OTHER: + case ETH_RSS_IPV6: + case ETH_RSS_FRAG_IPV6: + case ETH_RSS_NONFRAG_IPV6_OTHER: + case ETH_RSS_IPV6_EX: + + if (l3_configured) + break; + l3_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_PROTO; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + kg_cfg->num_extracts++; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_TCP: + case ETH_RSS_NONFRAG_IPV6_TCP: + case ETH_RSS_NONFRAG_IPV4_UDP: + case ETH_RSS_NONFRAG_IPV6_UDP: + case ETH_RSS_IPV6_TCP_EX: + case ETH_RSS_IPV6_UDP_EX: + + if (l4_configured) + break; + l4_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_SCTP: + case ETH_RSS_NONFRAG_IPV6_SCTP: + + if (sctp_configured) + break; + sctp_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + default: + DPAA2_PMD_WARN( + "Unsupported flow dist option %x", + dist_field); + return -EINVAL; + } + } + req_dist_set = req_dist_set >> 1; + loop++; + } + kg_cfg->num_extracts = i; + return 0; +} + +int +dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, + void *blist) +{ + /* Function to attach a DPNI with a buffer pool list. Buffer pool list + * handle is passed in blist. + */ + int32_t retcode; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_pools_cfg bpool_cfg; + struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist; + struct dpni_buffer_layout layout; + int tot_size; + + /* ... rx buffer layout . + * Check alignment for buffer layouts first + */ + + /* ... rx buffer layout ... */ + tot_size = RTE_PKTMBUF_HEADROOM; + tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN); + + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + + layout.pass_timestamp = true; + layout.pass_frame_status = 1; + layout.private_data_size = DPAA2_FD_PTA_SIZE; + layout.pass_parser_result = 1; + layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN; + layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE - + DPAA2_MBUF_HW_ANNOTATION; + retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, &layout); + if (retcode) { + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", + retcode); + return retcode; + } + + /*Attach buffer pool to the network interface as described by the user*/ + memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg)); + bpool_cfg.num_dpbp = 1; + bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; + bpool_cfg.pools[0].backup_pool = 0; + bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size, + DPAA2_PACKET_LAYOUT_ALIGN); + bpool_cfg.pools[0].priority_mask = 0; + + retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); + if (retcode != 0) { + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); + return retcode; + } + + priv->bp_list = bp_list; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h new file mode 100644 index 000000000..7e5e499b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016,2019 NXP + * + */ + +/** + * @file + * + * DPNI packet parse results - implementation internal + */ + +#ifndef _DPAA2_HW_DPNI_ANNOT_H_ +#define _DPAA2_HW_DPNI_ANNOT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ +#define DPAA2_FD_CTRL_PTA 0x00800000 +#define DPAA2_FD_CTRL_PTV1 0x00400000 + +/* Frame annotation status */ +struct dpaa2_fas { + uint8_t reserved; + uint8_t ppid; + __le16 ifpid; + __le32 status; +} __rte_packed; + +/** + * HW Packet Annotation Register structures + */ +struct dpaa2_annot_hdr { + /**< word1: Frame Annotation Status (8 bytes)*/ + uint64_t word1; + + /**< word2: Time Stamp (8 bytes)*/ + uint64_t word2; + + /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/ + uint64_t word3; + + /**< word4: Frame Annotation Flags-FAF (8 bytes) */ + uint64_t word4; + + /**< word5: + * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset + + * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n + + * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word5; + + /**< word6: + * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1 + * + IPOffset_norMInEncapO + GREOffset + L4Offset + + * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word6; + + /**< word7: + * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + * + IPv6FragOffset + GrossRunningSum + * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes) + */ + uint64_t word7; + + /**< word8: + * ParseErrorcode + Soft Parsing Context (1 + 7 bytes) + */ + uint64_t word8; +}; + +/** + * Internal Macros to get/set Packet annotation header + */ + +/** General Macro to define a particular bit position*/ +#define BIT_POS(x) ((uint64_t)1 << ((x))) +/** Set a bit in the variable */ +#define BIT_SET_AT_POS(var, pos) ((var) |= (pos)) +/** Reset the bit in the variable */ +#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos)) +/** Check the bit is set in the variable */ +#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0) +/** + * Macrso to define bit position in word3 + */ +#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000) +#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16) +#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000) +#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000) +#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23) +#define PARSING_ERROR BIT_POS(22) +#define L2_ETH_MAC_PRESENT BIT_POS(21) +#define L2_ETH_MAC_UNICAST BIT_POS(20) +#define L2_ETH_MAC_MULTICAST BIT_POS(19) +#define L2_ETH_MAC_BROADCAST BIT_POS(18) +#define L2_ETH_FRAME_IS_BPDU BIT_POS(17) +#define L2_ETH_FCOE_PRESENT BIT_POS(16) +#define L2_ETH_FIP_PRESENT BIT_POS(15) +#define L2_ETH_PARSING_ERROR BIT_POS(14) +#define L2_LLC_SNAP_PRESENT BIT_POS(13) +#define L2_UNKNOWN_LLC_OUI BIT_POS(12) +#define L2_LLC_SNAP_ERROR BIT_POS(11) +#define L2_VLAN_1_PRESENT BIT_POS(10) +#define L2_VLAN_N_PRESENT BIT_POS(9) +#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8) +#define L2_VLAN_PARSING_ERROR BIT_POS(7) +#define L2_PPPOE_PPP_PRESENT BIT_POS(6) +#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5) +#define L2_MPLS_1_PRESENT BIT_POS(4) +#define L2_MPLS_N_PRESENT BIT_POS(3) +#define L2_MPLS_PARSING_ERROR BIT_POS(2) +#define L2_ARP_PRESENT BIT_POS(1) +#define L2_ARP_PARSING_ERROR BIT_POS(0) +/** + * Macrso to define bit position in word4 + */ +#define L2_UNKNOWN_PROTOCOL BIT_POS(63) +#define L2_SOFT_PARSING_ERROR BIT_POS(62) +#define L3_IPV4_1_PRESENT BIT_POS(61) +#define L3_IPV4_1_UNICAST BIT_POS(60) +#define L3_IPV4_1_MULTICAST BIT_POS(59) +#define L3_IPV4_1_BROADCAST BIT_POS(58) +#define L3_IPV4_N_PRESENT BIT_POS(57) +#define L3_IPV4_N_UNICAST BIT_POS(56) +#define L3_IPV4_N_MULTICAST BIT_POS(55) +#define L3_IPV4_N_BROADCAST BIT_POS(54) +#define L3_IPV6_1_PRESENT BIT_POS(53) +#define L3_IPV6_1_UNICAST BIT_POS(52) +#define L3_IPV6_1_MULTICAST BIT_POS(51) +#define L3_IPV6_N_PRESENT BIT_POS(50) +#define L3_IPV6_N_UNICAST BIT_POS(49) +#define L3_IPV6_N_MULTICAST BIT_POS(48) +#define L3_IP_1_OPT_PRESENT BIT_POS(47) +#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46) +#define L3_IP_1_MORE_FRAGMENT BIT_POS(45) +#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44) +#define L3_IP_1_PARSING_ERROR BIT_POS(43) +#define L3_IP_N_OPT_PRESENT BIT_POS(42) +#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41) +#define L3_IP_N_MORE_FRAGMENT BIT_POS(40) +#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39) +#define L3_PROTO_ICMP_PRESENT BIT_POS(38) +#define L3_PROTO_IGMP_PRESENT BIT_POS(37) +#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36) +#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35) +#define L3_IP_N_PARSING_ERROR BIT_POS(34) +#define L3_MIN_ENCAP_PRESENT BIT_POS(33) +#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32) +#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31) +#define L3_PROTO_GRE_PRESENT BIT_POS(30) +#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29) +#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28) +#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27) +#define L3_SOFT_PARSING_ERROR BIT_POS(26) +#define L3_PROTO_UDP_PRESENT BIT_POS(25) +#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24) +#define L3_PROTO_TCP_PRESENT BIT_POS(23) +#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22) +#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21) +#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20) +#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19) +#define L3_PROTO_IPSEC_PRESENT BIT_POS(18) +#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17) +#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16) +#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15) +#define L3_PROTO_SCTP_PRESENT BIT_POS(14) +#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13) +#define L3_PROTO_DCCP_PRESENT BIT_POS(12) +#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11) +#define L4_UNKNOWN_PROTOCOL BIT_POS(10) +#define L4_SOFT_PARSING_ERROR BIT_POS(9) +#define L3_PROTO_GTP_PRESENT BIT_POS(8) +#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7) +#define L3_PROTO_ESP_PRESENT BIT_POS(6) +#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5) +#define L3_PROTO_ISCSI_PRESENT BIT_POS(4) +#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3) +#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2) +#define L5_SOFT_PARSING_ERROR BIT_POS(1) +#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0) + +#define DPAA2_L3_IPv4 (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6 (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_TCP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_UDP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_TCP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +/** + * Macros to get values in word5 + */ +#define SHIM_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000) +#define SHIM_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000) +#define IP_PID_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define ETH_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000) +#define LLC_SNAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000) +#define VLAN_TCI_OFFSET_1(var) ((uint64_t)(var) & 0x0000000000FF0000) +#define VLAN_TCI_OFFSET_N(var) ((uint64_t)(var) & 0x000000000000FF00) +#define LAST_ETYPE_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF) + +/** + * Macros to get values in word6 + */ +#define PPPOE_OFFSET(var) ((uint64_t)(var) & 0xFF00000000000000) +#define MPLS_OFFSET_1(var) ((uint64_t)(var) & 0x00FF000000000000) +#define MPLS_OFFSET_N(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)(var) & 0x000000FF00000000) +#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000) +#define GRE_OFFSET(var) ((uint64_t)(var) & 0x0000000000FF0000) +#define L4_OFFSET(var) ((uint64_t)(var) & 0x000000000000FF00) +#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF) + +/** + * Macros to get values in word7 + */ +#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000) +#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000) +#define NEXT_HDR_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define IPV6_FRAG_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000) +#define GROSS_RUNNING_SUM(var) ((uint64_t)(var) & 0x00000000FFFF0000) +#define RUNNING_SUM(var) ((uint64_t)(var) & 0x000000000000FFFF) + +/** + * Macros to get values in word8 + */ +#define PARSE_ERROR_CODE(var) ((uint64_t)(var) & 0xFF00000000000000) +#define SOFT_PARSING_CONTEXT(var) ((uint64_t)(var) & 0x00FFFFFFFFFFFFFF) + +/*FAEAD offset in anmotation area*/ +#define DPAA2_FD_HW_ANNOT_FAEAD_OFFSET 0x58 + +struct dpaa2_faead { + uint32_t fqid; + uint32_t ctrl; +}; + +/*FAEAD bits */ +/*A2 OMB contains valid data*/ +#define DPAA2_ANNOT_FAEAD_A2V 0x20000000 +/*egress confirmation FQID in FAEAD contains valid data*/ +#define DPAA2_ANNOT_FAEAD_A4V 0x08000000 +/*UPD is valid*/ +#define DPAA2_ANNOT_FAEAD_UPDV 0x00001000 +/*EBDD is valid*/ +#define DPAA2_ANNOT_FAEAD_EBDDV 0x00002000 +/*EBDD (External Buffer Deallocation Disable) */ +#define DPAA2_ANNOT_FAEAD_EBDD 0x00000020 +/*UPD (Update prepended data)*/ +#define DPAA2_ANNOT_FAEAD_UPD 0x00000010 + +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_ETH_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_ETH_FAS_MS 0x40000000 +#define DPAA2_ETH_FAS_PTP BIT_POS(59) +/* Ethernet multicast frame */ +#define DPAA2_ETH_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_ETH_FAS_BC 0x02000000 +#define DPAA2_ETH_FAS_KSE 0x00040000 +#define DPAA2_ETH_FAS_EOFHE 0x00020000 +#define DPAA2_ETH_FAS_MNLE 0x00010000 +#define DPAA2_ETH_FAS_TIDE 0x00008000 +#define DPAA2_ETH_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_ETH_FAS_FLE 0x00002000 +/* Frame physical error; our favourite pastime */ +#define DPAA2_ETH_FAS_FPE 0x00001000 +#define DPAA2_ETH_FAS_PTE 0x00000080 +#define DPAA2_ETH_FAS_ISP 0x00000040 +#define DPAA2_ETH_FAS_PHE 0x00000020 +#define DPAA2_ETH_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_ETH_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_ETH_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_ETH_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_ETH_FAS_L4CE 0x00000001 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c new file mode 100644 index 000000000..2f031ec5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c @@ -0,0 +1,2702 @@ +/* * SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dpaa2_pmd_logs.h" +#include +#include +#include +#include +#include +#include "dpaa2_ethdev.h" +#include "dpaa2_sparser.h" +#include + +#define DRIVER_LOOPBACK_MODE "drv_loopback" +#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_SCTP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_TIMESTAMP; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS; + +/* enable timestamp in mbuf */ +enum pmd_dpaa2_ts dpaa2_enable_ts; + +struct rte_dpaa2_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint8_t page_id; /* dpni statistics page id */ + uint8_t stats_id; /* stats id in the given page */ +}; + +static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { + {"ingress_multicast_frames", 0, 2}, + {"ingress_multicast_bytes", 0, 3}, + {"ingress_broadcast_frames", 0, 4}, + {"ingress_broadcast_bytes", 0, 5}, + {"egress_multicast_frames", 1, 2}, + {"egress_multicast_bytes", 1, 3}, + {"egress_broadcast_frames", 1, 4}, + {"egress_broadcast_bytes", 1, 5}, + {"ingress_filtered_frames", 2, 0}, + {"ingress_discarded_frames", 2, 1}, + {"ingress_nobuffer_discards", 2, 2}, + {"egress_discarded_frames", 2, 3}, + {"egress_confirmed_frames", 2, 4}, + {"cgr_reject_frames", 4, 0}, + {"cgr_reject_bytes", 4, 1}, +}; + +static const enum rte_filter_op dpaa2_supported_filter_ops[] = { + RTE_ETH_FILTER_ADD, + RTE_ETH_FILTER_DELETE, + RTE_ETH_FILTER_UPDATE, + RTE_ETH_FILTER_FLUSH, + RTE_ETH_FILTER_GET +}; + +static struct rte_dpaa2_driver rte_dpaa2_pmd; +static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); +static int dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); +static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); +static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +int dpaa2_logtype_pmd; + +void +rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) +{ + dpaa2_enable_ts = enable; +} + +static int +dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + if (on) + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, + vlan_id, 0, 0, 0); + else + ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, + priv->token, vlan_id); + + if (ret < 0) + DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", + ret, vlan_id, priv->hw_id); + + return ret; +} + +static int +dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN Filter not avaialble */ + if (!priv->max_vlan_filters) { + DPAA2_PMD_INFO("VLAN filter not available"); + goto next_mask; + } + + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, true); + else + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); + } +next_mask: + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + DPAA2_PMD_INFO("VLAN extend offload not supported"); + } + + return 0; +} + +static int +dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type __rte_unused, + uint16_t tpid) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + int ret = -ENOTSUP; + + PMD_INIT_FUNC_TRACE(); + + /* nothing to be done for standard vlan tpids */ + if (tpid == 0x8100 || tpid == 0x88A8) + return 0; + + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); + /* if already configured tpids, remove them first */ + if (ret == -EBUSY) { + struct dpni_custom_tpid_cfg tpid_list = {0}; + + ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, &tpid_list); + if (ret < 0) + goto fail; + ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid_list.tpid1); + if (ret < 0) + goto fail; + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + } +fail: + return ret; +} + +static int +dpaa2_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, + size_t fw_size) +{ + int ret; + struct fsl_mc_io *dpni = dev->process_private; + struct mc_soc_version mc_plat_info = {0}; + struct mc_version mc_ver_info = {0}; + + PMD_INIT_FUNC_TRACE(); + + if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) + DPAA2_PMD_WARN("\tmc_get_soc_version failed"); + + if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) + DPAA2_PMD_WARN("\tmc_get_version failed"); + + ret = snprintf(fw_version, fw_size, + "%x-%d.%d.%d", + mc_plat_info.svr, + mc_ver_info.major, + mc_ver_info.minor, + mc_ver_info.revision); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static int +dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + dev_info->if_index = priv->hw_id; + + dev_info->max_mac_addrs = priv->max_mac_filters; + dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; + dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; + dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; + dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->speed_capa = ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; + + dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; + /* same is rx size for best perf */ + dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; + + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; + dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; + + if (dpaa2_svr_family == SVR_LX2160A) { + dev_info->speed_capa |= ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + } + + return 0; +} + +static int +dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + uint16_t dist_idx; + uint32_t vq_id; + uint8_t num_rxqueue_per_tc; + struct dpaa2_queue *mc_q, *mcq; + uint32_t tot_queues; + int i; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); + if (priv->tx_conf_en) + tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; + else + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, + RTE_CACHE_LINE_SIZE); + if (!mc_q) { + DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); + return -1; + } + + for (i = 0; i < priv->nb_rx_queues; i++) { + mc_q->eth_data = dev->data; + priv->rx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_q->q_storage = rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->flow_id = 0xffff; + priv->tx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q->cscn = rte_malloc(NULL, + sizeof(struct qbman_result), 16); + if (!dpaa2_q->cscn) + goto fail_tx; + } + + if (priv->tx_conf_en) { + /*Setup tx confirmation queues*/ + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->tc_index = i; + mc_q->flow_id = 0; + priv->tx_conf_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + dpaa2_q->q_storage = + rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail_tx_conf; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail_tx_conf; + } + } + + vq_id = 0; + for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { + mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; + mcq->tc_index = dist_idx / num_rxqueue_per_tc; + mcq->flow_id = dist_idx % num_rxqueue_per_tc; + vq_id++; + } + + return 0; +fail_tx_conf: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + priv->tx_conf_vq[i--] = NULL; + } + i = priv->nb_tx_queues; +fail_tx: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + priv->tx_vq[i--] = NULL; + } + i = priv->nb_rx_queues; +fail: + i -= 1; + mc_q = priv->rx_vq[0]; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_free_dq_storage(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); + priv->rx_vq[i--] = NULL; + } + rte_free(mc_q); + return -1; +} + +static void +dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Queue allocation base */ + if (priv->rx_vq[0]) { + /* cleaning up queue storage */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q->q_storage) + rte_free(dpaa2_q->q_storage); + } + /* cleanup tx queue cscn */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + } + if (priv->tx_conf_en) { + /* cleanup tx conf queue storage */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *) + priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + } + } + /*free memory for all queues (RX+TX) */ + rte_free(priv->rx_vq[0]); + priv->rx_vq[0] = NULL; + } +} + +static int +dpaa2_eth_dev_configure(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Rx offloads which are enabled by default */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_INFO( + "Some of rx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads which are enabled by default */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_INFO( + "Some of tx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, + priv->token, eth_conf->rxmode.max_rx_pkt_len + - RTE_ETHER_CRC_LEN); + if (ret) { + DPAA2_PMD_ERR( + "Unable to set mtu. check config"); + return ret; + } + dev->data->mtu = + dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - + VLAN_TAG_SIZE; + } else { + return -1; + } + } + + if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { + ret = dpaa2_setup_flow_dist(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow distribution." + "Check queue config"); + return ret; + } + } + + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)) + rx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); + return ret; + } + + if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) + dpaa2_enable_ts = true; + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); + return ret; + } + + /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in + * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] + * to 0 for LS2 in the hardware thus disabling data/annotation + * stashing. For LX2 this is fixed in hardware and thus hash result and + * parse results can be received in FD using this option. + */ + if (dpaa2_svr_family == SVR_LX2160A) { + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_FLCTYPE_HASH, true); + if (ret) { + DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); + return ret; + } + } + + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + + return 0; +} + +/* Function to setup RX flow information. It contains traffic class ID, + * flow ID, destination configuration etc. + */ +static int +dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct dpaa2_queue *dpaa2_q; + struct dpni_queue cfg; + uint8_t options = 0; + uint8_t flow_id; + uint32_t bpid; + int i, ret; + + PMD_INIT_FUNC_TRACE(); + + DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", + dev, rx_queue_id, mb_pool, rx_conf); + + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { + bpid = mempool_to_bpid(mb_pool); + ret = dpaa2_attach_bp_list(priv, + rte_dpaa2_bpid_info[bpid].bp_list); + if (ret) + return ret; + } + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ + dpaa2_q->bp_array = rte_dpaa2_bpid_info; + + /*Get the flow id from given VQ id*/ + flow_id = dpaa2_q->flow_id; + memset(&cfg, 0, sizeof(struct dpni_queue)); + + options = options | DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_q); + + /* check if a private cgr available. */ + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + priv->cgid_in_use[i] = 1; + break; + } + } + + if (i < priv->max_cgs) { + options |= DPNI_QUEUE_OPT_SET_CGID; + cfg.cgid = i; + dpaa2_q->cgid = cfg.cgid; + } else { + dpaa2_q->cgid = 0xff; + } + + /*if ls2088 or rev2 device, enable the stashing */ + + if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { + options |= DPNI_QUEUE_OPT_FLC; + cfg.flc.stash_control = true; + cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; + /* 00 00 00 - last 6 bit represent annotation, context stashing, + * data stashing setting 01 01 00 (0x14) + * (in following order ->DS AS CS) + * to enable 1 line data, 1 line annotation. + * For LX2, this setting should be 01 00 00 (0x10) + */ + if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) + cfg.flc.value |= 0x10; + else + cfg.flc.value |= 0x14; + } + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); + return -1; + } + + if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { + struct dpni_taildrop taildrop; + + taildrop.enable = 1; + + /* Private CGR will use tail drop length as nb_rx_desc. + * for rest cases we can use standard byte based tail drop. + * There is no HW restriction, but number of CGRs are limited, + * hence this restriction is placed. + */ + if (dpaa2_q->cgid != 0xff) { + /*enabling per rx queue congestion control */ + taildrop.threshold = nb_rx_desc; + taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; + taildrop.oal = 0; + DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, + dpaa2_q->cgid, &taildrop); + } else { + /*enabling per rx queue congestion control */ + taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; + taildrop.units = DPNI_CONGESTION_UNIT_BYTES; + taildrop.oal = CONG_RX_OAL; + DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, + &taildrop); + } + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } else { /* Disable tail Drop */ + struct dpni_taildrop taildrop = {0}; + DPAA2_PMD_INFO("Tail drop is disabled on queue"); + + taildrop.enable = 0; + if (dpaa2_q->cgid != 0xff) { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, + dpaa2_q->tc_index, + dpaa2_q->cgid, &taildrop); + } else { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, &taildrop); + } + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } + + dev->data->rx_queues[rx_queue_id] = dpaa2_q; + return 0; +} + +static int +dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) + priv->tx_vq[tx_queue_id]; + struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) + priv->tx_conf_vq[tx_queue_id]; + struct fsl_mc_io *dpni = dev->process_private; + struct dpni_queue tx_conf_cfg; + struct dpni_queue tx_flow_cfg; + uint8_t options = 0, flow_id; + struct dpni_queue_id qid; + uint32_t tc_id; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Return if queue already configured */ + if (dpaa2_q->flow_id != 0xffff) { + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + return 0; + } + + memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); + memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); + + tc_id = tx_queue_id; + flow_id = 0; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + tc_id, flow_id, options, &tx_flow_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); + return -1; + } + + dpaa2_q->flow_id = flow_id; + + if (tx_queue_id == 0) { + /*Set tx-conf and error configuration*/ + if (priv->tx_conf_en) + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_AFFINE); + else + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); + if (ret) { + DPAA2_PMD_ERR("Error in set tx conf mode settings: " + "err=%d", ret); + return -1; + } + } + dpaa2_q->tc_index = tc_id; + + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &tx_flow_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_q->fqid = qid.fqid; + + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { + struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; + + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; + cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; + /* Notify that the queue is not congested when the data in + * the queue is below this thershold. + */ + cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; + cong_notif_cfg.message_ctx = 0; + cong_notif_cfg.message_iova = + (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); + cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; + cong_notif_cfg.notification_mode = + DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | + DPNI_CONG_OPT_COHERENT_WRITE; + cong_notif_cfg.cg_point = DPNI_CP_QUEUE; + + ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, + priv->token, + DPNI_QUEUE_TX, + tc_id, + &cong_notif_cfg); + if (ret) { + DPAA2_PMD_ERR( + "Error in setting tx congestion notification: " + "err=%d", ret); + return -ret; + } + } + dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + + if (priv->tx_conf_en) { + dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; + options = options | DPNI_QUEUE_OPT_USER_CTX; + tx_conf_cfg.user_context = (size_t)(dpaa2_q); + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx conf flow: " + "tc_index=%d, flow=%d err=%d", + dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, ret); + return -1; + } + + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_tx_conf_q->fqid = qid.fqid; + } + return 0; +} + +static void +dpaa2_dev_rx_queue_release(void *q __rte_unused) +{ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q; + struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)priv->eth_dev->process_private; + uint8_t options = 0; + int ret; + struct dpni_queue cfg; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + PMD_INIT_FUNC_TRACE(); + if (dpaa2_q->cgid != 0xff) { + options = DPNI_QUEUE_OPT_CLEAR_CGID; + cfg.cgid = dpaa2_q->cgid; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, dpaa2_q->flow_id, + options, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", + dpaa2_q->fqid, ret); + priv->cgid_in_use[dpaa2_q->cgid] = 0; + dpaa2_q->cgid = 0xff; + } +} + +static void +dpaa2_dev_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static uint32_t +dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + int32_t ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + struct qbman_swp *swp; + struct qbman_fq_query_np_rslt state; + uint32_t frame_cnt = 0; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return -EINVAL; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + + if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { + frame_cnt = qbman_fq_state_frame_count(&state); + DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frame_cnt); + } + return frame_cnt; +} + +static const uint32_t * +dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /*todo -= add more types */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || + dev->rx_pkt_burst == dpaa2_dev_rx || + dev->rx_pkt_burst == dpaa2_dev_loopback_rx) + return ptypes; + return NULL; +} + +/** + * Dpaa2 link Interrupt handler + * + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +dpaa2_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret; + int irq_index = DPNI_IRQ_INDEX; + unsigned int status = 0, clear = 0; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, &status); + if (unlikely(ret)) { + DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); + clear = 0xffffffff; + goto out; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { + clear = DPNI_IRQ_EVENT_LINK_CHANGED; + dpaa2_dev_link_update(dev, 0); + /* calling all the apps registered for link status event */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } +out: + ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, clear); + if (unlikely(ret)) + DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); +} + +static int +dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) +{ + int err = 0; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int irq_index = DPNI_IRQ_INDEX; + unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; + + PMD_INIT_FUNC_TRACE(); + + err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, + irq_index, mask); + if (err < 0) { + DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, + strerror(-err)); + return err; + } + + err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, + irq_index, enable); + if (err < 0) + DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, + strerror(-err)); + + return err; +} + +static int +dpaa2_dev_start(struct rte_eth_dev *dev) +{ + struct rte_device *rdev = dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct dpni_queue cfg; + struct dpni_error_cfg err_cfg; + uint16_t qdid; + struct dpni_queue_id qid; + struct dpaa2_queue *dpaa2_q; + int ret, i; + struct rte_intr_handle *intr_handle; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = &dpaa2_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", + priv->hw_id, ret); + return ret; + } + + /* Power up the phy. Needed to make the link go UP */ + dpaa2_dev_set_link_up(dev); + + ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &qdid); + if (ret) { + DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); + return ret; + } + priv->qdid = qdid; + + for (i = 0; i < data->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting flow information: " + "err=%d", ret); + return ret; + } + dpaa2_q->fqid = qid.fqid; + } + + /*checksum errors, send them to normal path and set it in annotation */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + err_cfg.errors |= DPNI_ERROR_PHE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + err_cfg.set_frame_annotation = true; + + ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, + priv->token, &err_cfg); + if (ret) { + DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", + ret); + return ret; + } + + /* if the interrupts were configured on this devices*/ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /* Registering LSC interrupt handler */ + rte_intr_callback_register(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + + /* enable vfio intr/eventfd mapping + * Interrupt index 0 is required, so we can not use + * rte_intr_enable. + */ + rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); + + /* enable dpni_irqs */ + dpaa2_eth_setup_irqs(dev, 1); + } + + /* Change the tx burst function if ordered queues are used */ + if (priv->en_ordered) + dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + + return 0; +} + +/** + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + */ +static void +dpaa2_dev_stop(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret; + struct rte_eth_link link; + struct rte_intr_handle *intr_handle = dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + /* reset interrupt callback */ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /*disable dpni irqs */ + dpaa2_eth_setup_irqs(dev, 0); + + /* disable vfio intr before callback unregister */ + rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); + + /* Unregistering LSC interrupt handler */ + rte_intr_callback_unregister(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + } + + dpaa2_dev_set_link_down(dev); + + ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", + ret, priv->hw_id); + return; + } + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static void +dpaa2_dev_close(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret; + struct rte_eth_link link; + + PMD_INIT_FUNC_TRACE(); + + dpaa2_flow_clean(dev); + + /* Clean the device first */ + ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); + return; + } + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static int +dpaa2_dev_promiscuous_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -ENODEV; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); + + return ret; +} + +static int +dpaa2_dev_promiscuous_disable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -ENODEV; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); + + if (dev->data->all_multicast == 0) { + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable M promisc mode %d", + ret); + } + + return ret; +} + +static int +dpaa2_dev_allmulticast_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -ENODEV; + } + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); + + return ret; +} + +static int +dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -ENODEV; + } + + /* must remain on for all promiscuous */ + if (dev->data->promiscuous == 1) + return 0; + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); + + return ret; +} + +static int +dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN) + return -EINVAL; + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + /* Set the Max Rx frame length as 'mtu' + + * Maximum Ethernet header length + */ + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + frame_size - RTE_ETHER_CRC_LEN); + if (ret) { + DPAA2_PMD_ERR("Setting the max frame length failed"); + return -1; + } + DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); + return 0; +} + +static int +dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, + addr->addr_bytes, 0, 0, 0); + if (ret) + DPAA2_PMD_ERR( + "error: Adding the MAC ADDR failed: err = %d", ret); + return 0; +} + +static void +dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, + uint32_t index) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct rte_eth_dev_data *data = dev->data; + struct rte_ether_addr *macaddr; + + PMD_INIT_FUNC_TRACE(); + + macaddr = &data->mac_addrs[index]; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, + priv->token, macaddr->addr_bytes); + if (ret) + DPAA2_PMD_ERR( + "error: Removing the MAC ADDR failed: err = %d", ret); +} + +static int +dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, + priv->token, addr->addr_bytes); + + if (ret) + DPAA2_PMD_ERR( + "error: Setting the MAC ADDR failed %d", ret); + + return ret; +} + +static +int dpaa2_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int32_t retcode; + uint8_t page0 = 0, page1 = 1, page2 = 2; + union dpni_statistics value; + int i; + struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; + + memset(&value, 0, sizeof(union dpni_statistics)); + + PMD_INIT_FUNC_TRACE(); + + if (!dpni) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + if (!stats) { + DPAA2_PMD_ERR("stats is NULL"); + return -EINVAL; + } + + /*Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page0, 0, &value); + if (retcode) + goto err; + + stats->ipackets = value.page_0.ingress_all_frames; + stats->ibytes = value.page_0.ingress_all_bytes; + + /*Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page1, 0, &value); + if (retcode) + goto err; + + stats->opackets = value.page_1.egress_all_frames; + stats->obytes = value.page_1.egress_all_bytes; + + /*Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page2, 0, &value); + if (retcode) + goto err; + + /* Ingress drop frame count due to configured rules */ + stats->ierrors = value.page_2.ingress_filtered_frames; + /* Ingress drop frame count due to error */ + stats->ierrors += value.page_2.ingress_discarded_frames; + + stats->oerrors = value.page_2.egress_discarded_frames; + stats->imissed = value.page_2.ingress_nobuffer_discards; + + /* Fill in per queue stats */ + for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && + (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { + dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_rxq) + stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; + if (dpaa2_txq) + stats->q_opackets[i] = dpaa2_txq->tx_pkts; + + /* Byte counting is not implemented */ + stats->q_ibytes[i] = 0; + stats->q_obytes[i] = 0; + } + + return 0; + +err: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return retcode; +}; + +static int +dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int32_t retcode; + union dpni_statistics value[5] = {}; + unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); + + if (n < num) + return num; + + if (xstats == NULL) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + goto err; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + goto err; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + goto err; + + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, + priv->token, + 4, 0, &value[4]); + if (retcode) + goto err; + break; + } + } + + for (i = 0; i < num; i++) { + xstats[i].id = i; + xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return i; +err: + DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); + return retcode; +} + +static int +dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + + if (limit < stat_cnt) + return stat_cnt; + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + strlcpy(xstats_names[i].name, + dpaa2_xstats_strings[i].name, + sizeof(xstats_names[i].name)); + + return stat_cnt; +} + +static int +dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + uint64_t values_copy[stat_cnt]; + + if (!ids) { + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)dev->process_private; + int32_t retcode; + union dpni_statistics value[5] = {}; + + if (n < stat_cnt) + return stat_cnt; + + if (!values) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + return 0; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + return 0; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + return 0; + + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 4, 0, &value[4]); + if (retcode) + return 0; + + for (i = 0; i < stat_cnt; i++) { + values[i] = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return stat_cnt; + } + + dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); + + for (i = 0; i < n; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +dpaa2_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + + if (!ids) + return dpaa2_xstats_get_names(dev, xstats_names, limit); + + dpaa2_xstats_get_names(dev, xstats_names_copy, limit); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + return limit; +} + +static int +dpaa2_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int retcode; + int i; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); + if (retcode) + goto error; + + /* Reset the per queue stats in dpaa2_queue structure */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q) + dpaa2_q->rx_pkts = 0; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_q) + dpaa2_q->tx_pkts = 0; + } + + return 0; + +error: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return retcode; +}; + +/* return 0 means link status changed, -1 means not changed */ +static int +dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct rte_eth_link link; + struct dpni_link_state state = {0}; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return 0; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); + return -1; + } + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_status = state.up; + link.link_speed = state.rate; + + if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) + link.link_duplex = ETH_LINK_HALF_DUPLEX; + else + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); + else + DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, + link.link_status ? "Up" : "Down"); + + return ret; +} + +/** + * Toggle the DPNI to enable, if not already enabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_up(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int en = 0; + struct dpni_link_state state = {0}; + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)dev->process_private; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* Check if DPNI is currently enabled */ + ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); + if (ret) { + /* Unable to obtain dpni status; Not continuing */ + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + + /* Enable link if not already enabled */ + if (!en) { + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + } + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); + return -1; + } + + /* changing tx burst function to start enqueues */ + dev->tx_pkt_burst = dpaa2_dev_tx; + dev->data->dev_link.link_status = state.up; + dev->data->dev_link.link_speed = state.rate; + + if (state.up) + DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); + else + DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); + return ret; +} + +/** + * Toggle the DPNI to disable, if not already disabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_down(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int dpni_enabled = 0; + int retries = 10; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)dev->process_private; + + if (dpni == NULL) { + DPAA2_PMD_ERR("Device has not yet been configured"); + return ret; + } + + /*changing tx burst function to avoid any more enqueues */ + dev->tx_pkt_burst = dummy_dev_tx; + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + ret = dpni_disable(dpni, 0, priv->token); + if (ret) { + DPAA2_PMD_ERR("dpni disable failed (%d)", ret); + return ret; + } + ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); + if (ret) { + DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); + return ret; + } + if (dpni_enabled) + /* Allow the MC some slack */ + rte_delay_us(100 * 1000); + } while (dpni_enabled && --retries); + + if (!retries) { + DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); + /* todo- we may have to manually cleanup queues. + */ + } else { + DPAA2_PMD_INFO("Port %d Link DOWN successful", + dev->data->port_id); + } + + dev->data->dev_link.link_status = 0; + + return ret; +} + +static int +dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)dev->process_private; + + if (dpni == NULL || fc_conf == NULL) { + DPAA2_PMD_ERR("device not configured"); + return ret; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + return ret; + } + + memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + if (state.options & DPNI_LINK_OPT_PAUSE) { + /* DPNI_LINK_OPT_PAUSE set + * if ASYM_PAUSE not set, + * RX Side flow control (handle received Pause frame) + * TX side flow control (send Pause frame) + * if ASYM_PAUSE set, + * RX Side flow control (handle received Pause frame) + * No TX side flow control (send Pause frame disabled) + */ + if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) + fc_conf->mode = RTE_FC_FULL; + else + fc_conf->mode = RTE_FC_RX_PAUSE; + } else { + /* DPNI_LINK_OPT_PAUSE not set + * if ASYM_PAUSE set, + * TX side flow control (send Pause frame) + * No RX side flow control (No action on pause frame rx) + * if ASYM_PAUSE not set, + * Flow control disabled + */ + if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + } + + return ret; +} + +static int +dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)dev->process_private; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* It is necessary to obtain the current state before setting fc_conf + * as MC would return error in case rate, autoneg or duplex values are + * different. + */ + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); + return -1; + } + + /* Disable link before setting configuration */ + dpaa2_dev_set_link_down(dev); + + /* Based on fc_conf, update cfg */ + cfg.rate = state.rate; + cfg.options = state.options; + + /* update cfg with fc_conf */ + switch (fc_conf->mode) { + case RTE_FC_FULL: + /* Full flow control; + * OPT_PAUSE set, ASYM_PAUSE not set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_TX_PAUSE: + /* Enable RX flow control + * OPT_PAUSE not set; + * ASYM_PAUSE set; + */ + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + break; + case RTE_FC_RX_PAUSE: + /* Enable TX Flow control + * OPT_PAUSE set + * ASYM_PAUSE set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_NONE: + /* Disable Flow control + * OPT_PAUSE not set + * ASYM_PAUSE not set + */ + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + default: + DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", + fc_conf->mode); + return -1; + } + + ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", + ret); + + /* Enable link */ + dpaa2_dev_set_link_up(dev); + + return ret; +} + +static int +dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (rss_conf->rss_hf) { + ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow dist"); + return ret; + } + } else { + ret = dpaa2_remove_flow_dist(dev, 0); + if (ret) { + DPAA2_PMD_ERR("Unable to remove flow dist"); + return ret; + } + } + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; + return 0; +} + +static int +dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + /* dpaa2 does not support rss_key, so length should be 0*/ + rss_conf->rss_key_len = 0; + rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; + return 0; +} + +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + struct dpaa2_dpcon_dev *dpcon, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options, priority; + int ret; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) + dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) + dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) + dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; + else + return -EINVAL; + + priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * + (dpcon->num_priorities - 1); + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_DPCON; + cfg.destination.id = dpcon->dpcon_id; + cfg.destination.priority = priority; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; + cfg.destination.hold_active = 1; + } + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && + !eth_priv->en_ordered) { + struct opr_cfg ocfg; + + /* Restoration window size = 256 frames */ + ocfg.oprrws = 3; + /* Restoration window size = 512 frames for LX2 */ + if (dpaa2_svr_family == SVR_LX2160A) + ocfg.oprrws = 4; + /* Auto advance NESN window enabled */ + ocfg.oa = 1; + /* Late arrival window size disabled */ + ocfg.olws = 0; + /* ORL resource exhaustaion advance NESN disabled */ + ocfg.oeane = 0; + /* Loose ordering enabled */ + ocfg.oloe = 1; + eth_priv->en_loose_ordered = 1; + /* Strict ordering enabled if explicitly set */ + if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { + ocfg.oloe = 0; + eth_priv->en_loose_ordered = 0; + } + + ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, + dpaa2_ethq->tc_index, flow_id, + OPR_OPT_CREATE, &ocfg); + if (ret) { + DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); + return ret; + } + + eth_priv->en_ordered = 1; + } + + options |= DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_ethq); + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + return ret; + } + + memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); + + return 0; +} + +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options; + int ret; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_NONE; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + + return ret; +} + +static inline int +dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { + if (dpaa2_supported_filter_ops[i] == filter_op) + return 0; + } + return -ENOTSUP; +} + +static int +dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -ENODEV; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { + ret = -ENOTSUP; + break; + } + *(const void **)arg = &dpaa2_flow_ops; + dpaa2_filter_type |= filter_type; + break; + default: + RTE_LOG(ERR, PMD, "Filter type (%d) not supported", + filter_type); + ret = -ENOTSUP; + break; + } + return ret; +} + +static struct eth_dev_ops dpaa2_ethdev_ops = { + .dev_configure = dpaa2_eth_dev_configure, + .dev_start = dpaa2_dev_start, + .dev_stop = dpaa2_dev_stop, + .dev_close = dpaa2_dev_close, + .promiscuous_enable = dpaa2_dev_promiscuous_enable, + .promiscuous_disable = dpaa2_dev_promiscuous_disable, + .allmulticast_enable = dpaa2_dev_allmulticast_enable, + .allmulticast_disable = dpaa2_dev_allmulticast_disable, + .dev_set_link_up = dpaa2_dev_set_link_up, + .dev_set_link_down = dpaa2_dev_set_link_down, + .link_update = dpaa2_dev_link_update, + .stats_get = dpaa2_dev_stats_get, + .xstats_get = dpaa2_dev_xstats_get, + .xstats_get_by_id = dpaa2_xstats_get_by_id, + .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, + .xstats_get_names = dpaa2_xstats_get_names, + .stats_reset = dpaa2_dev_stats_reset, + .xstats_reset = dpaa2_dev_stats_reset, + .fw_version_get = dpaa2_fw_version_get, + .dev_infos_get = dpaa2_dev_info_get, + .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, + .mtu_set = dpaa2_dev_mtu_set, + .vlan_filter_set = dpaa2_vlan_filter_set, + .vlan_offload_set = dpaa2_vlan_offload_set, + .vlan_tpid_set = dpaa2_vlan_tpid_set, + .rx_queue_setup = dpaa2_dev_rx_queue_setup, + .rx_queue_release = dpaa2_dev_rx_queue_release, + .tx_queue_setup = dpaa2_dev_tx_queue_setup, + .tx_queue_release = dpaa2_dev_tx_queue_release, + .rx_queue_count = dpaa2_dev_rx_queue_count, + .flow_ctrl_get = dpaa2_flow_ctrl_get, + .flow_ctrl_set = dpaa2_flow_ctrl_set, + .mac_addr_add = dpaa2_dev_add_mac_addr, + .mac_addr_remove = dpaa2_dev_remove_mac_addr, + .mac_addr_set = dpaa2_dev_set_mac_addr, + .rss_hash_update = dpaa2_dev_rss_hash_update, + .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, + .filter_ctrl = dpaa2_dev_flow_ctrl, +#if defined(RTE_LIBRTE_IEEE1588) + .timesync_enable = dpaa2_timesync_enable, + .timesync_disable = dpaa2_timesync_disable, + .timesync_read_time = dpaa2_timesync_read_time, + .timesync_write_time = dpaa2_timesync_write_time, + .timesync_adjust_time = dpaa2_timesync_adjust_time, + .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, +#endif +}; + +/* Populate the mac address from physically available (u-boot/firmware) and/or + * one set by higher layers like MC (restool) etc. + * Returns the table of MAC entries (multiple entries) + */ +static int +populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, + struct rte_ether_addr *mac_entry) +{ + int ret; + struct rte_ether_addr phy_mac, prime_mac; + + memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); + memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); + + /* Get the physical device MAC address */ + ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); + goto cleanup; + } + + ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); + goto cleanup; + } + + /* Now that both MAC have been obtained, do: + * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy + * and return phy + * If empty_mac(phy), return prime. + * if both are empty, create random MAC, set as prime and return + */ + if (!rte_is_zero_ether_addr(&phy_mac)) { + /* If the addresses are not same, overwrite prime */ + if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", + ret); + goto cleanup; + } + memcpy(&prime_mac, &phy_mac, + sizeof(struct rte_ether_addr)); + } + } else if (rte_is_zero_ether_addr(&prime_mac)) { + /* In case phys and prime, both are zero, create random MAC */ + rte_eth_random_addr(prime_mac.addr_bytes); + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); + goto cleanup; + } + } + + /* prime_mac the final MAC address */ + memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); + return 0; + +cleanup: + return -1; +} + +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static int +dpaa2_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct fsl_mc_io *dpni_dev; + struct dpni_attr attr; + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct dpni_buffer_layout layout; + int ret, hw_id, i; + + PMD_INIT_FUNC_TRACE(); + + dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); + if (!dpni_dev) { + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); + return -1; + } + dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + eth_dev->process_private = (void *)dpni_dev; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* In case of secondary, only burst and ops API need to be + * plugged. + */ + eth_dev->dev_ops = &dpaa2_ethdev_ops; + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + else if (dpaa2_get_devargs(dev->devargs, + DRIVER_NO_PREFETCH_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_rx; + else + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + return 0; + } + + dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); + + hw_id = dpaa2_dev->object_id; + ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure in opening dpni@%d with err code %d", + hw_id, ret); + rte_free(dpni_dev); + return -1; + } + + /* Clean the device first */ + ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", + hw_id, ret); + goto init_err; + } + + ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); + if (ret) { + DPAA2_PMD_ERR( + "Failure in get dpni@%d attribute, err code %d", + hw_id, ret); + goto init_err; + } + + priv->num_rx_tc = attr.num_rx_tcs; + /* only if the custom CG is enabled */ + if (attr.options & DPNI_OPT_CUSTOM_CG) + priv->max_cgs = attr.num_cgs; + else + priv->max_cgs = 0; + + for (i = 0; i < priv->max_cgs; i++) + priv->cgid_in_use[i] = 0; + + for (i = 0; i < attr.num_rx_tcs; i++) + priv->nb_rx_queues += attr.num_queues; + + /* Using number of TX queues as number of TX TCs */ + priv->nb_tx_queues = attr.num_tx_tcs; + + DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", + priv->num_rx_tc, priv->nb_rx_queues, + priv->nb_tx_queues, priv->max_cgs); + + priv->hw = dpni_dev; + priv->hw_id = hw_id; + priv->options = attr.options; + priv->max_mac_filters = attr.mac_filter_entries; + priv->max_vlan_filters = attr.vlan_filter_entries; + priv->flags = 0; +#if defined(RTE_LIBRTE_IEEE1588) + priv->tx_conf_en = 1; +#else + priv->tx_conf_en = 0; +#endif + + /* Allocate memory for hardware structure for queues */ + ret = dpaa2_alloc_rx_tx_queues(eth_dev); + if (ret) { + DPAA2_PMD_ERR("Queue allocation Failed"); + goto init_err; + } + + /* Allocate memory for storing MAC addresses. + * Table of mac_filter_entries size is allocated so that RTE ether lib + * can add MAC entries when rte_eth_dev_mac_addr_add is called. + */ + eth_dev->data->mac_addrs = rte_zmalloc("dpni", + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + DPAA2_PMD_ERR( + "Failed to allocate %d bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); + ret = -ENOMEM; + goto init_err; + } + + ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); + if (ret) { + DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + goto init_err; + } + + /* ... tx buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); + goto init_err; + } + + /* ... tx-conf and error buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", + ret); + goto init_err; + } + + eth_dev->dev_ops = &dpaa2_ethdev_ops; + + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + DPAA2_PMD_INFO("Loopback mode"); + } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_rx; + DPAA2_PMD_INFO("No Prefetch mode"); + } else { + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + } + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + + /*Init fields w.r.t. classficaition*/ + memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); + priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.qos_extract_param) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " + " classificaiton ", ret); + goto init_err; + } + for (i = 0; i < MAX_TCS; i++) { + memset(&priv->extract.fs_key_cfg[i], 0, + sizeof(struct dpkg_profile_cfg)); + priv->extract.fs_extract_param[i] = + (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.fs_extract_param[i]) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", + ret); + goto init_err; + } + } + + ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, + RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE); + if (ret) { + DPAA2_PMD_ERR("Unable to set mtu. check config"); + goto init_err; + } + + /*TODO To enable soft parser support DPAA2 driver needs to integrate + * with external entity to receive byte code for software sequence + * and same will be offload to the H/W using MC interface. + * Currently it is assumed that DPAA2 driver has byte code by some + * mean and same if offloaded to H/W. + */ + if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { + WRIOP_SS_INITIALIZER(priv); + ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", + ret); + return ret; + } + + ret = dpaa2_eth_enable_wriop_soft_parser(priv, + DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", + ret); + return ret; + } + } + RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + return 0; +init_err: + dpaa2_dev_uninit(eth_dev); + return ret; +} + +static int +dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private; + int i, ret; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpni) { + DPAA2_PMD_WARN("Already closed or not started"); + return -1; + } + + dpaa2_dev_close(eth_dev); + + dpaa2_free_rx_tx_queues(eth_dev); + + /* Close the device at underlying layer*/ + ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure closing dpni device with err code %d", + ret); + } + + /* Free the allocated memory for ethernet private data and dpni*/ + priv->hw = NULL; + eth_dev->process_private = NULL; + rte_free(dpni); + + for (i = 0; i < MAX_TCS; i++) { + if (priv->extract.fs_extract_param[i]) + rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); + } + + if (priv->extract.qos_extract_param) + rte_free((void *)(size_t)priv->extract.qos_extract_param); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); + return 0; +} + +static int +rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, + struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + struct dpaa2_dev_priv *dev_priv; + int diag; + + if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA2_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); + + return -1; + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); + if (!eth_dev) + return -ENODEV; + dev_priv = rte_zmalloc("ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (dev_priv == NULL) { + DPAA2_PMD_CRIT( + "Unable to allocate memory for private data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } + eth_dev->data->dev_private = (void *)dev_priv; + /* Store a pointer to eth_dev in dev_private */ + dev_priv->eth_dev = eth_dev; + dev_priv->tx_conf_en = 0; + } else { + eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); + if (!eth_dev) { + DPAA2_PMD_DEBUG("returning enodev"); + return -ENODEV; + } + } + + eth_dev->device = &dpaa2_dev->device; + + dpaa2_dev->eth_dev = eth_dev; + eth_dev->data->rx_mbuf_alloc_failed = 0; + + if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + + /* Invoke PMD device initialization function */ + diag = dpaa2_dev_init(eth_dev); + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + rte_eth_dev_release_port(eth_dev); + return diag; +} + +static int +rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + + eth_dev = dpaa2_dev->eth_dev; + dpaa2_dev_uninit(eth_dev); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_dpaa2_driver rte_dpaa2_pmd = { + .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, + .drv_type = DPAA2_ETH, + .probe = rte_dpaa2_probe, + .remove = rte_dpaa2_remove, +}; + +RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); +RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, + DRIVER_LOOPBACK_MODE "= " + DRIVER_NO_PREFETCH_MODE "="); +RTE_INIT(dpaa2_pmd_init_log) +{ + dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); + if (dpaa2_logtype_pmd >= 0) + rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h new file mode 100644 index 000000000..c7fb6539f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016-2019 NXP + * + */ + +#ifndef _DPAA2_ETHDEV_H +#define _DPAA2_ETHDEV_H + +#include +#include + +#include + +#include +#include + +#define DPAA2_MIN_RX_BUF_SIZE 512 +#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ + +#define MAX_TCS DPNI_MAX_TC +#define MAX_RX_QUEUES 128 +#define MAX_TX_QUEUES 16 +#define MAX_DPNI 8 + +#define DPAA2_RX_DEFAULT_NBDESC 512 + +/*default tc to be used for ,congestion, distribution etc configuration. */ +#define DPAA2_DEF_TC 0 + +/* Threshold for a Tx queue to *Enter* Congestion state. + */ +#define CONG_ENTER_TX_THRESHOLD 512 + +/* Threshold for a queue to *Exit* Congestion state. + */ +#define CONG_EXIT_TX_THRESHOLD 480 + +#define CONG_RETRY_COUNT 18000 + +/* RX queue tail drop threshold + * currently considering 64 KB packets + */ +#define CONG_THRESHOLD_RX_BYTES_Q (64 * 1024) +#define CONG_RX_OAL 128 + +/* Size of the input SMMU mapped memory required by MC */ +#define DIST_PARAM_IOVA_SIZE 256 + +/* Enable TX Congestion control support + * default is disable + */ +#define DPAA2_TX_CGR_OFF 0x01 + +/* Disable RX tail drop, default is enable */ +#define DPAA2_RX_TAILDROP_OFF 0x04 + +#define DPAA2_RSS_OFFLOAD_ALL ( \ + ETH_RSS_L2_PAYLOAD | \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + +/* LX2 FRC Parsed values (Little Endian) */ +#define DPAA2_PKT_TYPE_ETHER 0x0060 +#define DPAA2_PKT_TYPE_IPV4 0x0000 +#define DPAA2_PKT_TYPE_IPV6 0x0020 +#define DPAA2_PKT_TYPE_IPV4_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT) +#define DPAA2_PKT_TYPE_IPV6_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT) +#define DPAA2_PKT_TYPE_VLAN_1 0x0160 +#define DPAA2_PKT_TYPE_VLAN_2 0x0260 + +/* enable timestamp in mbuf*/ +extern enum pmd_dpaa2_ts dpaa2_enable_ts; + +#define DPAA2_QOS_TABLE_RECONFIGURE 1 +#define DPAA2_FS_TABLE_RECONFIGURE 2 + +/*Externaly defined*/ +extern const struct rte_flow_ops dpaa2_flow_ops; +extern enum rte_filter_type dpaa2_filter_type; + +struct dpaa2_dev_priv { + void *hw; + int32_t hw_id; + int32_t qdid; + uint16_t token; + uint8_t nb_tx_queues; + uint8_t nb_rx_queues; + uint32_t options; + void *rx_vq[MAX_RX_QUEUES]; + void *tx_vq[MAX_TX_QUEUES]; + struct dpaa2_bp_list *bp_list; /** +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +struct rte_flow { + LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct dpni_rule_cfg rule; + uint8_t key_size; + uint8_t tc_id; + uint8_t flow_type; + uint8_t index; + enum rte_flow_action_type action; + uint16_t flow_id; +}; + +/* Layout for rule compositions for supported patterns */ +/* TODO: Current design only supports Ethernet + IPv4 based classification. */ +/* So corresponding offset macros are valid only. Rest are placeholder for */ +/* now. Once support for other netwrok headers will be added then */ +/* corresponding macros will be updated with correct values*/ +#define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/ +#define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */ + /* + Sizeof Eth fields */ +#define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */ + /* + Sizeof VLAN fields */ +#define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */ + /* + Sizeof IPV4 fields */ +#define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */ + /* + Sizeof IPV6 fields */ +#define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */ + /* + Sizeof ICMP fields */ +#define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */ + /* + Sizeof UDP fields */ +#define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */ + /* + Sizeof TCP fields */ +#define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */ + /* + Sizeof SCTP fields */ + +static const +enum rte_flow_item_type dpaa2_supported_pattern_type[] = { + RTE_FLOW_ITEM_TYPE_END, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_GRE, +}; + +static const +enum rte_flow_action_type dpaa2_supported_action_type[] = { + RTE_FLOW_ACTION_TYPE_END, + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_RSS +}; + +enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE; +static const void *default_mask; + +static int +dpaa2_configure_flow_eth(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_eth *spec, *mask; + + /* TODO: Currently upper bound of range parameter is not implemented */ + const struct rte_flow_item_eth *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + /* TODO: pattern is an array of 9 elements where 9th pattern element */ + /* is for QoS table and 1-8th pattern element is for FS tables. */ + /* It can be changed to macro. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_eth *)pattern->spec; + last = (const struct rte_flow_item_eth *)pattern->last; + mask = (const struct rte_flow_item_eth *) + (pattern->mask ? pattern->mask : default_mask); + + /* Key rule */ + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH; + memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes), + sizeof(struct rte_ether_addr)); + key_iova += sizeof(struct rte_ether_addr); + memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes), + sizeof(struct rte_ether_addr)); + key_iova += sizeof(struct rte_ether_addr); + memcpy((void *)key_iova, (const void *)(&spec->type), + sizeof(rte_be16_t)); + + /* Key mask */ + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH; + memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes), + sizeof(struct rte_ether_addr)); + mask_iova += sizeof(struct rte_ether_addr); + memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes), + sizeof(struct rte_ether_addr)); + mask_iova += sizeof(struct rte_ether_addr); + memcpy((void *)mask_iova, (const void *)(&mask->type), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH + + ((2 * sizeof(struct rte_ether_addr)) + + sizeof(rte_be16_t))); + return device_configured; +} + +static int +dpaa2_configure_flow_vlan(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_vlan *spec, *mask; + + const struct rte_flow_item_vlan *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; + priv->extract.qos_key_cfg.num_extracts++; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; + priv->extract.fs_key_cfg[group].num_extracts++; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_vlan *)pattern->spec; + last = (const struct rte_flow_item_vlan *)pattern->last; + mask = (const struct rte_flow_item_vlan *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN; + memcpy((void *)key_iova, (const void *)(&spec->tci), + sizeof(rte_be16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN; + memcpy((void *)mask_iova, (const void *)(&mask->tci), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t)); + return device_configured; +} + +static int +dpaa2_configure_flow_ipv4(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_ipv4 *spec, *mask; + + const struct rte_flow_item_ipv4 *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_ipv4 *)pattern->spec; + last = (const struct rte_flow_item_ipv4 *)pattern->last; + mask = (const struct rte_flow_item_ipv4 *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4; + memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr, + sizeof(uint32_t)); + key_iova += sizeof(uint32_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr, + sizeof(uint32_t)); + key_iova += sizeof(uint32_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id, + sizeof(uint8_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4; + memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr, + sizeof(uint32_t)); + mask_iova += sizeof(uint32_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr, + sizeof(uint32_t)); + mask_iova += sizeof(uint32_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id, + sizeof(uint8_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)) + sizeof(uint8_t)); + + return device_configured; +} + +static int +dpaa2_configure_flow_ipv6(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_ipv6 *spec, *mask; + + const struct rte_flow_item_ipv6 *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_ipv6 *)pattern->spec; + last = (const struct rte_flow_item_ipv6 *)pattern->last; + mask = (const struct rte_flow_item_ipv6 *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6; + memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr), + sizeof(spec->hdr.src_addr)); + key_iova += sizeof(spec->hdr.src_addr); + memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr), + sizeof(spec->hdr.dst_addr)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6; + memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr), + sizeof(mask->hdr.src_addr)); + mask_iova += sizeof(mask->hdr.src_addr); + memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr), + sizeof(mask->hdr.dst_addr)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 + + sizeof(spec->hdr.src_addr) + + sizeof(mask->hdr.dst_addr)); + return device_configured; +} + +static int +dpaa2_configure_flow_icmp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_icmp *spec, *mask; + + const struct rte_flow_item_icmp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_icmp *)pattern->spec; + last = (const struct rte_flow_item_icmp *)pattern->last; + mask = (const struct rte_flow_item_icmp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP; + memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type, + sizeof(uint8_t)); + key_iova += sizeof(uint8_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code, + sizeof(uint8_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP; + memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type, + sizeof(uint8_t)); + key_iova += sizeof(uint8_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code, + sizeof(uint8_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP + + (2 * sizeof(uint8_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_udp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_udp *spec, *mask; + + const struct rte_flow_item_udp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_udp *)pattern->spec; + last = (const struct rte_flow_item_udp *)pattern->last; + mask = (const struct rte_flow_item_udp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x11, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP + + (2 * sizeof(uint16_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_tcp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_tcp *spec, *mask; + + const struct rte_flow_item_tcp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_tcp *)pattern->spec; + last = (const struct rte_flow_item_tcp *)pattern->last; + mask = (const struct rte_flow_item_tcp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x06, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP + + (2 * sizeof(uint16_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_sctp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_sctp *spec, *mask; + + const struct rte_flow_item_sctp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_sctp *)pattern->spec; + last = (const struct rte_flow_item_sctp *)pattern->last; + mask = (const struct rte_flow_item_sctp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x84, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP + + (2 * sizeof(uint16_t))); + return device_configured; +} + +static int +dpaa2_configure_flow_gre(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_gre *spec, *mask; + + const struct rte_flow_item_gre *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_gre *)pattern->spec; + last = (const struct rte_flow_item_gre *)pattern->last; + mask = (const struct rte_flow_item_gre *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE; + memcpy((void *)key_iova, (const void *)(&spec->protocol), + sizeof(rte_be16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE; + memcpy((void *)mask_iova, (const void *)(&mask->protocol), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t)); + + return device_configured; +} + +static int +dpaa2_generic_flow_set(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_action_queue *dest_queue; + const struct rte_flow_action_rss *rss_conf; + uint16_t index; + int is_keycfg_configured = 0, end_of_list = 0; + int ret = 0, i = 0, j = 0; + struct dpni_attr nic_attr; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpni_qos_tbl_cfg qos_cfg; + struct dpkg_profile_cfg key_cfg; + struct dpni_fs_action_cfg action; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + size_t param; + struct rte_flow *curr = LIST_FIRST(&priv->flows); + + /* Parse pattern list to get the matching parameters */ + while (!end_of_list) { + switch (pattern[i].type) { + case RTE_FLOW_ITEM_TYPE_ETH: + is_keycfg_configured = dpaa2_configure_flow_eth(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + is_keycfg_configured = dpaa2_configure_flow_vlan(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + is_keycfg_configured = dpaa2_configure_flow_ipv4(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + is_keycfg_configured = dpaa2_configure_flow_ipv6(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + is_keycfg_configured = dpaa2_configure_flow_icmp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + is_keycfg_configured = dpaa2_configure_flow_udp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + is_keycfg_configured = dpaa2_configure_flow_tcp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + is_keycfg_configured = dpaa2_configure_flow_sctp(flow, + dev, attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + is_keycfg_configured = dpaa2_configure_flow_gre(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_END: + end_of_list = 1; + break; /*End of List*/ + default: + DPAA2_PMD_ERR("Invalid action type"); + ret = -ENOTSUP; + break; + } + i++; + } + + /* Let's parse action on matching traffic */ + end_of_list = 0; + while (!end_of_list) { + switch (actions[j].type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf); + flow->flow_id = dest_queue->index; + flow->action = RTE_FLOW_ACTION_TYPE_QUEUE; + memset(&action, 0, sizeof(struct dpni_fs_action_cfg)); + action.flow_id = flow->flow_id; + if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, + (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + + memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); + qos_cfg.discard_on_miss = true; + qos_cfg.keep_entries = true; + qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; + ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured.(%d)" + , ret); + return -1; + } + } + if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id], + (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc; + tc_cfg.dist_mode = DPNI_DIST_MODE_FS; + tc_cfg.key_cfg_iova = + (uint64_t)priv->extract.fs_extract_param[flow->tc_id]; + tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; + tc_cfg.fs_cfg.keep_entries = true; + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, + priv->token, + flow->tc_id, &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured.(%d)" + , ret); + return -1; + } + } + /* Configure QoS table first */ + memset(&nic_attr, 0, sizeof(struct dpni_attr)); + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, + priv->token, &nic_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get attribute. dpni@%p err code(%d)\n", + dpni, ret); + return ret; + } + + action.flow_id = action.flow_id % nic_attr.num_rx_tcs; + index = flow->index + (flow->tc_id * nic_attr.fs_entries); + ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, + priv->token, &flow->rule, + flow->tc_id, index, + 0, 0); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in addnig entry to QoS table(%d)", ret); + return ret; + } + + /* Then Configure FS table */ + ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token, + flow->tc_id, flow->index, + &flow->rule, &action); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in adding entry to FS table(%d)", ret); + return ret; + } + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, + priv->token, &nic_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get attribute. dpni@%p err code(%d)\n", + dpni, ret); + return ret; + } + rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf); + for (i = 0; i < (int)rss_conf->queue_num; i++) { + if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) || + rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) { + DPAA2_PMD_ERR( + "Queue/Group combination are not supported\n"); + return -ENOTSUP; + } + } + + flow->action = RTE_FLOW_ACTION_TYPE_RSS; + ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types, + &key_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "unable to set flow distribution.please check queue config\n"); + return ret; + } + + /* Allocate DMA'ble memory to write the rules */ + param = (size_t)rte_malloc(NULL, 256, 64); + if (!param) { + DPAA2_PMD_ERR("Memory allocation failure\n"); + return -1; + } + + if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + rte_free((void *)param); + return -1; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.dist_size = rss_conf->queue_num; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + tc_cfg.key_cfg_iova = (size_t)param; + tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, + priv->token, flow->tc_id, + &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured: %d\n", ret); + rte_free((void *)param); + return -1; + } + + rte_free((void *)param); + if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, + (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + memset(&qos_cfg, 0, + sizeof(struct dpni_qos_tbl_cfg)); + qos_cfg.discard_on_miss = true; + qos_cfg.keep_entries = true; + qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; + ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution can not be configured(%d)\n", + ret); + return -1; + } + } + + /* Add Rule into QoS table */ + index = flow->index + (flow->tc_id * nic_attr.fs_entries); + ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule, flow->tc_id, + index, 0, 0); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in QoS table(%d)", + ret); + return ret; + } + break; + case RTE_FLOW_ACTION_TYPE_END: + end_of_list = 1; + break; + default: + DPAA2_PMD_ERR("Invalid action type"); + ret = -ENOTSUP; + break; + } + j++; + } + + if (!ret) { + /* New rules are inserted. */ + if (!curr) { + LIST_INSERT_HEAD(&priv->flows, flow, next); + } else { + while (LIST_NEXT(curr, next)) + curr = LIST_NEXT(curr, next); + LIST_INSERT_AFTER(curr, flow, next); + } + } + return ret; +} + +static inline int +dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, + const struct rte_flow_attr *attr) +{ + int ret = 0; + + if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { + DPAA2_PMD_ERR("Priority group is out of range\n"); + ret = -ENOTSUP; + } + if (unlikely(attr->priority >= dpni_attr->fs_entries)) { + DPAA2_PMD_ERR("Priority within the group is out of range\n"); + ret = -ENOTSUP; + } + if (unlikely(attr->egress)) { + DPAA2_PMD_ERR( + "Flow configuration is not supported on egress side\n"); + ret = -ENOTSUP; + } + if (unlikely(!attr->ingress)) { + DPAA2_PMD_ERR("Ingress flag must be configured\n"); + ret = -EINVAL; + } + return ret; +} + +static inline void +dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern) +{ + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + default_mask = (const void *)&rte_flow_item_eth_mask; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + default_mask = (const void *)&rte_flow_item_vlan_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + default_mask = (const void *)&rte_flow_item_ipv4_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + default_mask = (const void *)&rte_flow_item_ipv6_mask; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + default_mask = (const void *)&rte_flow_item_icmp_mask; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + default_mask = (const void *)&rte_flow_item_udp_mask; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + default_mask = (const void *)&rte_flow_item_tcp_mask; + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + default_mask = (const void *)&rte_flow_item_sctp_mask; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + default_mask = (const void *)&rte_flow_item_gre_mask; + break; + default: + DPAA2_PMD_ERR("Invalid pattern type"); + } +} + +static inline int +dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv, + const struct rte_flow_item pattern[]) +{ + unsigned int i, j, k, is_found = 0; + int ret = 0; + + for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { + for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) { + if (dpaa2_supported_pattern_type[i] == pattern[j].type) { + is_found = 1; + break; + } + } + if (!is_found) { + ret = -ENOTSUP; + break; + } + } + /* Lets verify other combinations of given pattern rules */ + for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { + if (!pattern[j].spec) { + ret = -EINVAL; + break; + } + if ((pattern[j].last) && (!pattern[j].mask)) + dpaa2_dev_update_default_mask(&pattern[j]); + } + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) { + for (j = 0; j < MAX_TCS + 1; j++) { + for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) { + if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type) + break; + } + if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) + ret = -ENOTSUP; + } + } + return ret; +} + +static inline int +dpaa2_dev_verify_actions(const struct rte_flow_action actions[]) +{ + unsigned int i, j, is_found = 0; + int ret = 0; + + for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { + for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) { + if (dpaa2_supported_action_type[i] == actions[j].type) { + is_found = 1; + break; + } + } + if (!is_found) { + ret = -ENOTSUP; + break; + } + } + for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { + if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf)) + ret = -EINVAL; + } + return ret; +} + +static +int dpaa2_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *flow_attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpni_attr dpni_attr; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + uint16_t token = priv->token; + int ret = 0; + + memset(&dpni_attr, 0, sizeof(struct dpni_attr)); + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get dpni@%p attribute, err code %d\n", + dpni, ret); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); + return ret; + } + + /* Verify input attributes */ + ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid attributes are given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); + goto not_valid_params; + } + /* Verify input pattern list */ + ret = dpaa2_dev_verify_patterns(priv, pattern); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid pattern list is given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "invalid"); + goto not_valid_params; + } + /* Verify input action list */ + ret = dpaa2_dev_verify_actions(actions); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid action list is given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "invalid"); + goto not_valid_params; + } +not_valid_params: + return ret; +} + +static +struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *flow = NULL; + size_t key_iova = 0, mask_iova = 0; + int ret; + + flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE); + if (!flow) { + DPAA2_PMD_ERR("Failure to allocate memory for flow"); + goto mem_failure; + } + /* Allocate DMA'ble memory to write the rules */ + key_iova = (size_t)rte_malloc(NULL, 256, 64); + if (!key_iova) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configuration\n"); + goto mem_failure; + } + mask_iova = (size_t)rte_malloc(NULL, 256, 64); + if (!mask_iova) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configuration\n"); + goto mem_failure; + } + + flow->rule.key_iova = key_iova; + flow->rule.mask_iova = mask_iova; + flow->rule.key_size = 0; + + switch (dpaa2_filter_type) { + case RTE_ETH_FILTER_GENERIC: + ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, + actions, error); + if (ret < 0) { + if (error->type > RTE_FLOW_ERROR_TYPE_ACTION) + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + attr, "unknown"); + DPAA2_PMD_ERR( + "Failure to create flow, return code (%d)", ret); + goto creation_error; + } + break; + default: + DPAA2_PMD_ERR("Filter type (%d) not supported", + dpaa2_filter_type); + break; + } + + return flow; +mem_failure: + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "memory alloc"); +creation_error: + rte_free((void *)flow); + rte_free((void *)key_iova); + rte_free((void *)mask_iova); + + return NULL; +} + +static +int dpaa2_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + switch (flow->action) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Remove entry from QoS table first */ + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in adding entry to QoS table(%d)", ret); + goto error; + } + + /* Then remove entry from FS table */ + ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, + flow->tc_id, &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in FS table(%d)", ret); + goto error; + } + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in QoS table(%d)", ret); + goto error; + } + break; + default: + DPAA2_PMD_ERR( + "Action type (%d) is not supported", flow->action); + ret = -ENOTSUP; + break; + } + + LIST_REMOVE(flow, next); + /* Now free the flow */ + rte_free(flow); + +error: + if (ret) + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "unknown"); + return ret; +} + +/** + * Destroy user-configured flow rules. + * + * This function skips internal flows rules. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +static int +dpaa2_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct rte_flow *flow = LIST_FIRST(&priv->flows); + + while (flow) { + struct rte_flow *next = LIST_NEXT(flow, next); + + dpaa2_flow_destroy(dev, flow, error); + flow = next; + } + return 0; +} + +static int +dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + return 0; +} + +/** + * Clean up all flow rules. + * + * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow + * rules regardless of whether they are internal or user-configured. + * + * @param priv + * Pointer to private structure. + */ +void +dpaa2_flow_clean(struct rte_eth_dev *dev) +{ + struct rte_flow *flow; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + while ((flow = LIST_FIRST(&priv->flows))) + dpaa2_flow_destroy(dev, flow, NULL); +} + +const struct rte_flow_ops dpaa2_flow_ops = { + .create = dpaa2_flow_create, + .validate = dpaa2_flow_validate, + .destroy = dpaa2_flow_destroy, + .flush = dpaa2_flow_flush, + .query = dpaa2_flow_query, +}; diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_mux.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_mux.c new file mode 100644 index 000000000..f8366e839 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_mux.c @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2020 NXP + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct dpaa2_dpdmux_dev { + TAILQ_ENTRY(dpaa2_dpdmux_dev) next; + /**< Pointer to Next device instance */ + struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */ + uint16_t token; + uint32_t dpdmux_id; /*HW ID for DPDMUX object */ + uint8_t num_ifs; /* Number of interfaces in DPDMUX */ +}; + +struct rte_flow { + struct dpdmux_rule_cfg rule; +}; + +TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); +static struct dpdmux_dev_list dpdmux_dev_list = + TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */ + +static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; + + /* Get DPBP dev handle from list using index */ + TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { + if (dpdmux_dev->dpdmux_id == dpdmux_id) + break; + } + + return dpdmux_dev; +} + +struct rte_flow * +rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + struct rte_flow_item *pattern[], + struct rte_flow_action *actions[]) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + struct dpkg_profile_cfg kg_cfg; + const struct rte_flow_action_vf *vf_conf; + struct dpdmux_cls_action dpdmux_action; + struct rte_flow *flow = NULL; + void *key_iova, *mask_iova, *key_cfg_iova = NULL; + uint8_t key_size = 0; + int ret; + + /* Find the DPDMUX from dpdmux_id in our list */ + dpdmux_dev = get_dpdmux_from_id(dpdmux_id); + if (!dpdmux_dev) { + DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); + return NULL; + } + + key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, + RTE_CACHE_LINE_SIZE); + if (!key_cfg_iova) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return NULL; + } + flow = rte_zmalloc(NULL, sizeof(struct rte_flow) + + (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE); + if (!flow) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configuration\n"); + goto creation_error; + } + key_iova = (void *)((size_t)flow + sizeof(struct rte_flow)); + mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE); + + /* Currently taking only IP protocol as an extract type. + * This can be exended to other fields using pattern->type. + */ + memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); + + switch (pattern[0]->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + const struct rte_flow_item_ipv4 *spec; + + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP; + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO; + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; + kg_cfg.num_extracts = 1; + + spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec; + memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id), + sizeof(uint8_t)); + memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t)); + key_size = sizeof(uint8_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + { + const struct rte_flow_item_udp *spec; + uint16_t udp_dst_port; + + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP; + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; + kg_cfg.num_extracts = 1; + + spec = (const struct rte_flow_item_udp *)pattern[0]->spec; + udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port); + memcpy((void *)key_iova, (const void *)&udp_dst_port, + sizeof(rte_be16_t)); + memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); + key_size = sizeof(uint16_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_ETH: + { + const struct rte_flow_item_eth *spec; + uint16_t eth_type; + + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH; + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE; + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; + kg_cfg.num_extracts = 1; + + spec = (const struct rte_flow_item_eth *)pattern[0]->spec; + eth_type = rte_constant_bswap16(spec->type); + memcpy((void *)key_iova, (const void *)ð_type, + sizeof(rte_be16_t)); + memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); + key_size = sizeof(uint16_t); + } + break; + + default: + DPAA2_PMD_ERR("Not supported pattern type: %d", + pattern[0]->type); + goto creation_error; + } + + ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova); + if (ret) { + DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); + goto creation_error; + } + + ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, + (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova))); + if (ret) { + DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", ret); + goto creation_error; + } + + /* As now our key extract parameters are set, let us configure + * the rule. + */ + flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova)); + flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova)); + flow->rule.key_size = key_size; + + vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf); + if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { + DPAA2_PMD_ERR("Invalid destination id\n"); + goto creation_error; + } + dpdmux_action.dest_if = vf_conf->id; + + ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, &flow->rule, + &dpdmux_action); + if (ret) { + DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)", + ret); + goto creation_error; + } + + return flow; + +creation_error: + rte_free((void *)key_cfg_iova); + rte_free((void *)flow); + return NULL; +} + +static int +dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, + struct vfio_device_info *obj_info __rte_unused, + int dpdmux_id) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + struct dpdmux_attr attr; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate DPAA2 dpdmux handle */ + dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0); + if (!dpdmux_dev) { + DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); + return -1; + } + + /* Open the dpdmux object */ + dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id, + &dpdmux_dev->token); + if (ret) { + DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret); + goto init_err; + } + + ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, &attr); + if (ret) { + DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret); + goto init_err; + } + + ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, 1); + if (ret) { + DPAA2_PMD_ERR("setting default interface failed in %s", + __func__); + goto init_err; + } + + dpdmux_dev->dpdmux_id = dpdmux_id; + dpdmux_dev->num_ifs = attr.num_ifs; + + TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next); + + return 0; + +init_err: + if (dpdmux_dev) + rte_free(dpdmux_dev); + + return -1; +} + +static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { + .dev_type = DPAA2_MUX, + .create = dpaa2_create_dpdmux_device, +}; + +RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h new file mode 100644 index 000000000..c47ba8e10 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_PMD_LOGS_H_ +#define _DPAA2_PMD_LOGS_H_ + +extern int dpaa2_logtype_pmd; + +#define DPAA2_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \ + fmt "\n", ##args) + +#define DPAA2_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>") + +#define DPAA2_PMD_CRIT(fmt, args...) \ + DPAA2_PMD_LOG(CRIT, fmt, ## args) +#define DPAA2_PMD_INFO(fmt, args...) \ + DPAA2_PMD_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_ERR(fmt, args...) \ + DPAA2_PMD_LOG(ERR, fmt, ## args) +#define DPAA2_PMD_WARN(fmt, args...) \ + DPAA2_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_PMD_DP_DEBUG(fmt, args...) \ + DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_PMD_DP_INFO(fmt, args...) \ + DPAA2_PMD_DP_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_DP_WARN(fmt, args...) \ + DPAA2_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_PMD_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ptp.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ptp.c new file mode 100644 index 000000000..f58eedb31 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ptp.c @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 NXP + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct dpaa2_dprtc_dev { + struct fsl_mc_io dprtc; /** handle to DPRTC portal object */ + uint16_t token; + uint32_t dprtc_id; /*HW ID for DPRTC object */ +}; +static struct dpaa2_dprtc_dev *dprtc_dev; + +int dpaa2_timesync_enable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +int dpaa2_timesync_disable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +int dpaa2_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + uint64_t ns; + int ret = 0; + + RTE_SET_USED(dev); + + ret = dprtc_get_time(&dprtc_dev->dprtc, CMD_PRI_LOW, + dprtc_dev->token, &ns); + if (ret) { + DPAA2_PMD_ERR("dprtc_get_time failed ret: %d", ret); + return ret; + } + + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +int dpaa2_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *ts) +{ + uint64_t ns; + int ret = 0; + + RTE_SET_USED(dev); + + ns = rte_timespec_to_ns(ts); + + ret = dprtc_set_time(&dprtc_dev->dprtc, CMD_PRI_LOW, + dprtc_dev->token, ns); + if (ret) { + DPAA2_PMD_ERR("dprtc_set_time failed ret: %d", ret); + return ret; + } + + return 0; +} + +int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + uint64_t ns; + int ret = 0; + + RTE_SET_USED(dev); + + ret = dprtc_get_time(&dprtc_dev->dprtc, CMD_PRI_LOW, + dprtc_dev->token, &ns); + if (ret) { + DPAA2_PMD_ERR("dprtc_get_time failed ret: %d", ret); + return ret; + } + + ns += delta; + + ret = dprtc_set_time(&dprtc_dev->dprtc, CMD_PRI_LOW, + dprtc_dev->token, ns); + if (ret) { + DPAA2_PMD_ERR("dprtc_set_time failed ret: %d", ret); + return ret; + } + + return 0; +} + +int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + if (priv->next_tx_conf_queue) + dpaa2_dev_tx_conf(priv->next_tx_conf_queue); + else + return -1; + *timestamp = rte_ns_to_timespec(priv->tx_timestamp); + + return 0; +} + +int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + *timestamp = rte_ns_to_timespec(priv->rx_timestamp); + return 0; +} + +static int +dpaa2_create_dprtc_device(int vdev_fd __rte_unused, + struct vfio_device_info *obj_info __rte_unused, + int dprtc_id) +{ + struct dprtc_attr attr; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate DPAA2 dprtc handle */ + dprtc_dev = rte_malloc(NULL, sizeof(struct dpaa2_dprtc_dev), 0); + if (!dprtc_dev) { + DPAA2_PMD_ERR("Memory allocation failed for DPRTC Device"); + return -1; + } + + /* Open the dprtc object */ + dprtc_dev->dprtc.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + ret = dprtc_open(&dprtc_dev->dprtc, CMD_PRI_LOW, dprtc_id, + &dprtc_dev->token); + if (ret) { + DPAA2_PMD_ERR("Unable to open dprtc object: err(%d)", ret); + goto init_err; + } + + ret = dprtc_get_attributes(&dprtc_dev->dprtc, CMD_PRI_LOW, + dprtc_dev->token, &attr); + if (ret) { + DPAA2_PMD_ERR("Unable to get dprtc attr: err(%d)", ret); + goto init_err; + } + + dprtc_dev->dprtc_id = dprtc_id; + + return 0; + +init_err: + if (dprtc_dev) + rte_free(dprtc_dev); + + return -1; +} + +static struct rte_dpaa2_object rte_dpaa2_dprtc_obj = { + .dev_type = DPAA2_DPRTC, + .create = dpaa2_create_dprtc_device, +}; + +RTE_PMD_REGISTER_DPAA2_OBJECT(dprtc, rte_dpaa2_dprtc_obj); diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c new file mode 100644 index 000000000..630f8c73c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -0,0 +1,1643 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016-2020 NXP + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "dpaa2_pmd_logs.h" +#include "dpaa2_ethdev.h" +#include "base/dpaa2_hw_dpni_annot.h" + +static inline uint32_t __rte_hot +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation); + +static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused; + +#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ + DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ + DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ + DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ + DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ + DPAA2_SET_FD_FRC(_fd, 0); \ + DPAA2_RESET_FD_CTRL(_fd); \ + DPAA2_RESET_FD_FLC(_fd); \ +} while (0) + +static inline void __rte_hot +dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) +{ + struct dpaa2_annot_hdr *annotation; + uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); + + m->packet_type = RTE_PTYPE_UNKNOWN; + switch (frc) { + case DPAA2_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + break; + case DPAA2_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + break; + case DPAA2_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + break; + case DPAA2_PKT_TYPE_IPV4_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT; + break; + case DPAA2_PKT_TYPE_IPV6_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT; + break; + case DPAA2_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV4_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; + break; + case DPAA2_PKT_TYPE_IPV6_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; + break; + default: + m->packet_type = dpaa2_dev_rx_parse_slow(m, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + } + m->hash.rss = fd->simple.flc_hi; + m->ol_flags |= PKT_RX_RSS_HASH; + + if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { + annotation = (struct dpaa2_annot_hdr *) + ((size_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); + m->timestamp = annotation->word2; + m->ol_flags |= PKT_RX_TIMESTAMP; + DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); + } + + DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " + "ol_flags =0x%" PRIx64 "", + frc, m->packet_type, m->ol_flags); +} + +static inline uint32_t __rte_hot +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation) +{ + uint32_t pkt_type = RTE_PTYPE_UNKNOWN; + uint16_t *vlan_tci; + + DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" + "(4)=0x%" PRIx64 "\t", + annotation->word3, annotation->word4); + +#if defined(RTE_LIBRTE_IEEE1588) + if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) + mbuf->ol_flags |= PKT_RX_IEEE1588_PTP; +#endif + + if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN; + pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; + pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; + } + + if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { + pkt_type |= RTE_PTYPE_L2_ETHER_ARP; + goto parse_done; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { + pkt_type |= RTE_PTYPE_L2_ETHER; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | + L3_IPV4_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV4; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV4_EXT; + + } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | + L3_IPV6_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV6; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV6_EXT; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | + L3_IP_1_MORE_FRAGMENT | + L3_IP_N_FIRST_FRAGMENT | + L3_IP_N_MORE_FRAGMENT)) { + pkt_type |= RTE_PTYPE_L4_FRAG; + goto parse_done; + } else { + pkt_type |= RTE_PTYPE_L4_NONFRAG; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_UDP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_TCP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_SCTP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_ICMP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) + pkt_type |= RTE_PTYPE_UNKNOWN; + +parse_done: + return pkt_type; +} + +static inline uint32_t __rte_hot +dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) +{ + struct dpaa2_annot_hdr *annotation = + (struct dpaa2_annot_hdr *)hw_annot_addr; + + DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + mbuf->ol_flags |= PKT_RX_TIMESTAMP; + mbuf->timestamp = annotation->word2; + DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp); + + /* Check detailed parsing requirement */ + if (annotation->word3 & 0x7FFFFC3FFFF) + return dpaa2_dev_rx_parse_slow(mbuf, annotation); + + /* Return some common types from parse processing */ + switch (annotation->word4) { + case DPAA2_L3_IPv4: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + case DPAA2_L3_IPv6: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + case DPAA2_L3_IPv4_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv4_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + case DPAA2_L3_IPv6_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv6_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + default: + break; + } + + return dpaa2_dev_rx_parse_slow(mbuf, annotation); +} + +static inline struct rte_mbuf *__rte_hot +eth_sg_fd_to_mbuf(const struct qbman_fd *fd, + int port_id) +{ + struct qbman_sge *sgt, *sge; + size_t sg_addr, fd_addr; + int i = 0; + struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; + + fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + + /* Get Scatter gather table address */ + sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); + + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); + + /* First Scatter gather entry */ + first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + /* Prepare all the metadata for first segment */ + first_seg->buf_addr = (uint8_t *)sg_addr; + first_seg->ol_flags = 0; + first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + first_seg->data_len = sge->length & 0x1FFFF; + first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); + first_seg->nb_segs = 1; + first_seg->next = NULL; + first_seg->port = port_id; + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_new(first_seg, fd); + else + first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + + rte_mbuf_refcnt_set(first_seg, 1); + cur_seg = first_seg; + while (!DPAA2_SG_IS_FINAL(sge)) { + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FLE_ADDR(sge)); + next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); + next_seg->buf_addr = (uint8_t *)sg_addr; + next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + next_seg->data_len = sge->length & 0x1FFFF; + first_seg->nb_segs += 1; + rte_mbuf_refcnt_set(next_seg, 1); + cur_seg->next = next_seg; + next_seg->next = NULL; + cur_seg = next_seg; + } + temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + rte_mbuf_refcnt_set(temp, 1); + rte_pktmbuf_free_seg(temp); + + return (void *)first_seg; +} + +static inline struct rte_mbuf *__rte_hot +eth_fd_to_mbuf(const struct qbman_fd *fd, + int port_id) +{ + void *iova_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(iova_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + + /* need to repopulated some of the fields, + * as they may have changed in last transmission + */ + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); + mbuf->data_len = DPAA2_GET_FD_LEN(fd); + mbuf->pkt_len = mbuf->data_len; + mbuf->port = port_id; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + + /* Parse the packet */ + /* parse results for LX2 are there in FRC field of FD. + * For other DPAA2 platforms , parse results are after + * the private - sw annotation area + */ + + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_new(mbuf, fd); + else + mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, + (void *)((size_t)iova_addr + DPAA2_FD_PTA_SIZE)); + + DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + + return mbuf; +} + +static int __rte_noinline __rte_hot +eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; + struct qbman_sge *sgt, *sge = NULL; + int i; + + temp = rte_pktmbuf_alloc(mbuf->pool); + if (temp == NULL) { + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); + return -ENOMEM; + } + + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); + DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); + DPAA2_SET_ONLY_FD_BPID(fd, bpid); + DPAA2_SET_FD_OFFSET(fd, temp->data_off); + DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); + DPAA2_RESET_FD_FRC(fd); + DPAA2_RESET_FD_CTRL(fd); + /*Set Scatter gather table and Scatter gather entries*/ + sgt = (struct qbman_sge *)( + (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_GET_FD_OFFSET(fd)); + + for (i = 0; i < mbuf->nb_segs; i++) { + sge = &sgt[i]; + /*Resetting the buffer pool id and offset field*/ + sge->fin_bpid_offset = 0; + DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); + DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); + sge->length = cur_seg->data_len; + if (RTE_MBUF_DIRECT(cur_seg)) { + if (rte_mbuf_refcnt_read(cur_seg) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + rte_mbuf_refcnt_update(cur_seg, -1); + } else + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(cur_seg->pool)); + cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); + if (rte_mbuf_refcnt_read(mi) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * owner buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + } else { + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(mi->pool)); + rte_mbuf_refcnt_update(mi, 1); + } + prev_seg = cur_seg; + cur_seg = cur_seg->next; + prev_seg->next = NULL; + rte_pktmbuf_free(prev_seg); + } + } + DPAA2_SG_SET_FINAL(sge, true); + return 0; +} + +static void +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) __rte_unused; + +static void __rte_noinline __rte_hot +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); + + DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + if (RTE_MBUF_DIRECT(mbuf)) { + if (rte_mbuf_refcnt_read(mbuf) > 1) { + DPAA2_SET_FD_IVP(fd); + rte_mbuf_refcnt_update(mbuf, -1); + } + } else { + struct rte_mbuf *mi; + + mi = rte_mbuf_from_indirect(mbuf); + if (rte_mbuf_refcnt_read(mi) > 1) + DPAA2_SET_FD_IVP(fd); + else + rte_mbuf_refcnt_update(mi, 1); + rte_pktmbuf_free(mbuf); + } +} + +static inline int __rte_hot +eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *m; + void *mb = NULL; + + if (rte_dpaa2_mbuf_alloc_bulk( + rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { + DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); + return -1; + } + m = (struct rte_mbuf *)mb; + memcpy((char *)m->buf_addr + mbuf->data_off, + (void *)((char *)mbuf->buf_addr + mbuf->data_off), + mbuf->pkt_len); + + /* Copy required fields */ + m->data_off = mbuf->data_off; + m->ol_flags = mbuf->ol_flags; + m->packet_type = mbuf->packet_type; + m->tx_offload = mbuf->tx_offload; + + DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); + + DPAA2_PMD_DP_DEBUG( + "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," + " meta: %d, off: %d, len: %d\n", + (void *)mbuf, + mbuf->buf_addr, + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); + +return 0; +} + +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ +uint16_t +dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage, *dq_storage1 = NULL; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, pull_size; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd; + struct qbman_pull_desc pulldesc; + struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; +#if defined(RTE_LIBRTE_IEEE1588) + struct dpaa2_dev_priv *priv = eth_data->dev_private; +#endif + + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + + if (unlikely(!rte_dpaa2_bpid_info && + rte_eal_process_type() == RTE_PROC_SECONDARY)) + rte_dpaa2_bpid_info = dpaa2_q->bp_array; + + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; + if (unlikely(!q_storage->active_dqs)) { + q_storage->toggle = 0; + dq_storage = q_storage->dq_storage[q_storage->toggle]; + q_storage->last_num_pkts = pull_size; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + " QBMAN is busy (1)\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + q_storage->active_dqs = dq_storage; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); + } + + dq_storage = q_storage->active_dqs; + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); + + /* Prepare next pull descriptor. This will give space for the + * prefething done on DQRR entries + */ + q_storage->toggle ^= 1; + dq_storage1 = q_storage->dq_storage[q_storage->toggle]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage1, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + + /* Check if the previous issued command is completed. + * Also seems like the SWP is shared between the Ethernet Driver + * and the SEC driver. + */ + while (!qbman_check_command_complete(dq_storage)) + ; + if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) + clear_swp_active_dqs(q_storage->active_dpio_id); + + pending = 1; + + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + +#ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + if (dpaa2_svr_family != SVR_LX2160A) { + const struct qbman_fd *next_fd = + qbman_result_DQ_fd(dq_storage + 1); + /* Prefetch Annotation address for the parse results */ + rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR( + next_fd) + DPAA2_FD_PTA_SIZE + 16))); + } +#endif + + if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) + bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id); + else + bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id); +#if defined(RTE_LIBRTE_IEEE1588) + priv->rx_timestamp = bufs[num_rx]->timestamp; +#endif + + if (eth_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + rte_vlan_strip(bufs[num_rx]); + + dq_storage++; + num_rx++; + } while (pending); + + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); + continue; + } + break; + } + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); + + dpaa2_q->rx_pkts += num_rx; + + return num_rx; +} + +void __rte_hot +dpaa2_dev_process_parallel_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); + + qbman_swp_dqrr_consume(swp, dq); +} + +void __rte_hot +dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + uint8_t dqrr_index; + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); + + dqrr_index = qbman_get_dqrr_idx(dq); + ev->mbuf->seqn = dqrr_index + 1; + DPAA2_PER_LCORE_DQRR_SIZE++; + DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; + DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; +} + +void __rte_hot +dpaa2_dev_process_ordered_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); + + ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; + ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; + ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; + + qbman_swp_dqrr_consume(swp, dq); +} + +uint16_t +dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ */ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, next_pull = nb_pkts, num_pulled; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd; + struct qbman_pull_desc pulldesc; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + do { + dq_storage = dpaa2_q->q_storage->dq_storage[0]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + + if (next_pull > dpaa2_dqrr_size) { + qbman_pull_desc_set_numframes(&pulldesc, + dpaa2_dqrr_size); + next_pull -= dpaa2_dqrr_size; + } else { + qbman_pull_desc_set_numframes(&pulldesc, next_pull); + next_pull = 0; + } + + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG( + "VDQ command is not issued.QBMAN is busy\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + + rte_prefetch0((void *)((size_t)(dq_storage + 1))); + /* Check if the previous issued command is completed. */ + while (!qbman_check_command_complete(dq_storage)) + ; + + num_pulled = 0; + pending = 1; + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & + QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + +#ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA + if (dpaa2_svr_family != SVR_LX2160A) { + const struct qbman_fd *next_fd = + qbman_result_DQ_fd(dq_storage + 1); + + /* Prefetch Annotation address for the parse + * results. + */ + rte_prefetch0((DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FD_ADDR(next_fd) + + DPAA2_FD_PTA_SIZE + 16))); + } +#endif + + if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) + bufs[num_rx] = eth_sg_fd_to_mbuf(fd, + eth_data->port_id); + else + bufs[num_rx] = eth_fd_to_mbuf(fd, + eth_data->port_id); + + if (eth_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) { + rte_vlan_strip(bufs[num_rx]); + } + + dq_storage++; + num_rx++; + num_pulled++; + } while (pending); + /* Last VDQ provided all packets and more packets are requested */ + } while (next_pull && num_pulled == dpaa2_dqrr_size); + + dpaa2_q->rx_pkts += num_rx; + + return num_rx; +} + +uint16_t dpaa2_dev_tx_conf(void *queue) +{ + /* Function receive frames for a given device and VQ */ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_tx_conf = 0, num_pulled; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd, *next_fd; + struct qbman_pull_desc pulldesc; + struct qbman_release_desc releasedesc; + uint32_t bpid; + uint64_t buf; +#if defined(RTE_LIBRTE_IEEE1588) + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_annot_hdr *annotation; +#endif + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + do { + dq_storage = dpaa2_q->q_storage->dq_storage[0]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + + qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size); + + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + + rte_prefetch0((void *)((size_t)(dq_storage + 1))); + /* Check if the previous issued command is completed. */ + while (!qbman_check_command_complete(dq_storage)) + ; + + num_pulled = 0; + pending = 1; + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & + QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + + next_fd = qbman_result_DQ_fd(dq_storage + 1); + /* Prefetch Annotation address for the parse results */ + rte_prefetch0((void *)(size_t) + (DPAA2_GET_FD_ADDR(next_fd) + + DPAA2_FD_PTA_SIZE + 16)); + + bpid = DPAA2_GET_FD_BPID(fd); + + /* Create a release descriptor required for releasing + * buffers into QBMAN + */ + qbman_release_desc_clear(&releasedesc); + qbman_release_desc_set_bpid(&releasedesc, bpid); + + buf = DPAA2_GET_FD_ADDR(fd); + /* feed them to bman */ + do { + ret = qbman_swp_release(swp, &releasedesc, + &buf, 1); + } while (ret == -EBUSY); + + dq_storage++; + num_tx_conf++; + num_pulled++; +#if defined(RTE_LIBRTE_IEEE1588) + annotation = (struct dpaa2_annot_hdr *)((size_t) + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE); + priv->tx_timestamp = annotation->word2; +#endif + } while (pending); + + /* Last VDQ provided all packets and more packets are requested */ + } while (num_pulled == dpaa2_dqrr_size); + + dpaa2_q->rx_pkts += num_tx_conf; + + return num_tx_conf; +} + +/* Configure the egress frame annotation for timestamp update */ +static void enable_tx_tstamp(struct qbman_fd *fd) +{ + struct dpaa2_faead *fd_faead; + + /* Set frame annotation status field as valid */ + (fd)->simple.frc |= DPAA2_FD_FRC_FASV; + + /* Set frame annotation egress action descriptor as valid */ + (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV; + + /* Set Annotation Length as 128B */ + (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL; + + /* enable update of confirmation frame annotation */ + fd_faead = (struct dpaa2_faead *)((size_t) + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET); + fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV | + DPAA2_ANNOT_FAEAD_UPD; +} + +/* + * Callback to handle sending packets through WRIOP based interface + */ +uint16_t +dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function to transmit the frames to given device and VQ*/ + uint32_t loop, retry_count; + int32_t ret; + struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; + struct rte_mbuf *mi; + uint32_t frames_to_send; + struct rte_mempool *mp; + struct qbman_eq_desc eqdesc; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_swp *swp; + uint16_t num_tx = 0; + uint16_t bpid; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + uint32_t flags[MAX_TX_RING_SLOTS] = {0}; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", + eth_data, dpaa2_q->fqid); + +#ifdef RTE_LIBRTE_IEEE1588 + /* IEEE1588 driver need pointer to tx confirmation queue + * corresponding to last packet transmitted for reading + * the timestamp + */ + priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue; + dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue); +#endif + + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid); + + /*Clear the unused FD fields before sending*/ + while (nb_pkts) { + /*Check if the queue is congested*/ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } + + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; + + for (loop = 0; loop < frames_to_send; loop++) { + if ((*bufs)->seqn) { + uint8_t dqrr_index = (*bufs)->seqn - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | + dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + } + + if (likely(RTE_MBUF_DIRECT(*bufs))) { + mp = (*bufs)->pool; + /* Check the basic scenario and set + * the FD appropriately here itself. + */ + if (likely(mp && mp->ops_index == + priv->bp_list->dpaa2_ops_index && + (*bufs)->nb_segs == 1 && + rte_mbuf_refcnt_read((*bufs)) == 1)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (eth_data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + DPAA2_MBUF_TO_CONTIG_FD((*bufs), + &fd_arr[loop], mempool_to_bpid(mp)); + bufs++; +#ifdef RTE_LIBRTE_IEEE1588 + enable_tx_tstamp(&fd_arr[loop]); +#endif + continue; + } + } else { + mi = rte_mbuf_from_indirect(*bufs); + mp = mi->pool; + } + /* Not a hw_pkt pool allocated frame */ + if (unlikely(!mp || !priv->bp_list)) { + DPAA2_PMD_ERR("Err: No buffer pool attached"); + goto send_n_return; + } + + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (eth_data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); + /* alloc should be from the default buffer pool + * attached to this interface + */ + bpid = priv->bp_list->buf_pool.bpid; + + if (unlikely((*bufs)->nb_segs > 1)) { + DPAA2_PMD_ERR("S/G support not added" + " for non hw offload buffer"); + goto send_n_return; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + goto send_n_return; + } + /* free the original packet */ + rte_pktmbuf_free(*bufs); + } else { + bpid = mempool_to_bpid(mp); + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid); + } + } +#ifdef RTE_LIBRTE_IEEE1588 + enable_tx_tstamp(&fd_arr[loop]); +#endif + bufs++; + } + + loop = 0; + retry_count = 0; + while (loop < frames_to_send) { + ret = qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[loop], &flags[loop], + frames_to_send - loop); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + num_tx += loop; + nb_pkts -= loop; + goto send_n_return; + } + } else { + loop += ret; + retry_count = 0; + } + } + + num_tx += loop; + nb_pkts -= loop; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + retry_count = 0; + while (i < loop) { + ret = qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[i], + &flags[i], + loop - i); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) + break; + } else { + i += ret; + retry_count = 0; + } + } + num_tx += i; + } +skip_tx: + dpaa2_q->tx_pkts += num_tx; + return num_tx; +} + +void +dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci) +{ + struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; + struct qbman_fd *fd; + struct rte_mbuf *m; + + fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); + + /* Setting port id does not matter as we are to free the mbuf */ + m = eth_fd_to_mbuf(fd, 0); + rte_pktmbuf_free(m); +} + +static void +dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, + struct rte_mbuf *m, + struct qbman_eq_desc *eqdesc) +{ + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; + struct eqresp_metadata *eqresp_meta; + uint16_t orpid, seqnum; + uint8_t dq_idx; + + qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid); + + if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> + DPAA2_EQCR_OPRID_SHIFT; + seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> + DPAA2_EQCR_SEQNUM_SHIFT; + + if (!priv->en_loose_ordered) { + qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); + qbman_eq_desc_set_response(eqdesc, (uint64_t) + DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ + dpio_dev->eqresp_pi]), 1); + qbman_eq_desc_set_token(eqdesc, 1); + + eqresp_meta = &dpio_dev->eqresp_meta[ + dpio_dev->eqresp_pi]; + eqresp_meta->dpaa2_q = dpaa2_q; + eqresp_meta->mp = m->pool; + + dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? + dpio_dev->eqresp_pi++ : + (dpio_dev->eqresp_pi = 0); + } else { + qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); + } + } else { + dq_idx = m->seqn - 1; + qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); + } + m->seqn = DPAA2_INVALID_MBUF_SEQN; +} + +/* Callback to handle sending ordered packets through WRIOP based interface */ +uint16_t +dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function to transmit the frames to given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; + struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; + struct rte_mbuf *mi; + struct rte_mempool *mp; + struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; + struct qbman_swp *swp; + uint32_t frames_to_send, num_free_eq_desc; + uint32_t loop, retry_count; + int32_t ret; + uint16_t num_tx = 0; + uint16_t bpid; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", + eth_data, dpaa2_q->fqid); + + /* This would also handle normal and atomic queues as any type + * of packet can be enqueued when ordered queues are being used. + */ + while (nb_pkts) { + /*Check if the queue is congested*/ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } + + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; + + if (!priv->en_loose_ordered) { + if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + num_free_eq_desc = dpaa2_free_eq_descriptors(); + if (num_free_eq_desc < frames_to_send) + frames_to_send = num_free_eq_desc; + } + } + + for (loop = 0; loop < frames_to_send; loop++) { + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc[loop]); + + if ((*bufs)->seqn) { + /* Use only queue 0 for Tx in case of atomic/ + * ordered packets as packets can get unordered + * when being tranmitted out from the interface + */ + dpaa2_set_enqueue_descriptor(order_sendq, + (*bufs), + &eqdesc[loop]); + } else { + qbman_eq_desc_set_no_orp(&eqdesc[loop], + DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_fq(&eqdesc[loop], + dpaa2_q->fqid); + } + + if (likely(RTE_MBUF_DIRECT(*bufs))) { + mp = (*bufs)->pool; + /* Check the basic scenario and set + * the FD appropriately here itself. + */ + if (likely(mp && mp->ops_index == + priv->bp_list->dpaa2_ops_index && + (*bufs)->nb_segs == 1 && + rte_mbuf_refcnt_read((*bufs)) == 1)) { + if (unlikely((*bufs)->ol_flags + & PKT_TX_VLAN_PKT)) { + ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + DPAA2_MBUF_TO_CONTIG_FD((*bufs), + &fd_arr[loop], + mempool_to_bpid(mp)); + bufs++; + continue; + } + } else { + mi = rte_mbuf_from_indirect(*bufs); + mp = mi->pool; + } + /* Not a hw_pkt pool allocated frame */ + if (unlikely(!mp || !priv->bp_list)) { + DPAA2_PMD_ERR("Err: No buffer pool attached"); + goto send_n_return; + } + + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); + /* alloc should be from the default buffer pool + * attached to this interface + */ + bpid = priv->bp_list->buf_pool.bpid; + + if (unlikely((*bufs)->nb_segs > 1)) { + DPAA2_PMD_ERR( + "S/G not supp for non hw offload buffer"); + goto send_n_return; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + goto send_n_return; + } + /* free the original packet */ + rte_pktmbuf_free(*bufs); + } else { + bpid = mempool_to_bpid(mp); + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], + bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid); + } + } + bufs++; + } + + loop = 0; + retry_count = 0; + while (loop < frames_to_send) { + ret = qbman_swp_enqueue_multiple_desc(swp, + &eqdesc[loop], &fd_arr[loop], + frames_to_send - loop); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + num_tx += loop; + nb_pkts -= loop; + goto send_n_return; + } + } else { + loop += ret; + retry_count = 0; + } + } + + num_tx += loop; + nb_pkts -= loop; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + retry_count = 0; + while (i < loop) { + ret = qbman_swp_enqueue_multiple_desc(swp, + &eqdesc[loop], &fd_arr[i], loop - i); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) + break; + } else { + i += ret; + retry_count = 0; + } + } + num_tx += i; + } +skip_tx: + dpaa2_q->tx_pkts += num_tx; + return num_tx; +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + (void)queue; + (void)bufs; + (void)nb_pkts; + return 0; +} + +#if defined(RTE_TOOLCHAIN_GCC) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#elif defined(RTE_TOOLCHAIN_CLANG) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" +#endif + +/* This function loopbacks all the received packets.*/ +uint16_t +dpaa2_dev_loopback_rx(void *queue, + struct rte_mbuf **bufs __rte_unused, + uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage, *dq_storage1 = NULL; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, num_tx = 0, pull_size; + uint8_t pending, status; + struct qbman_swp *swp; + struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; + struct qbman_pull_desc pulldesc; + struct qbman_eq_desc eqdesc; + struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_queue *tx_q = priv->tx_vq[0]; + /* todo - currently we are using 1st TX queue only for loopback*/ + + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; + if (unlikely(!q_storage->active_dqs)) { + q_storage->toggle = 0; + dq_storage = q_storage->dq_storage[q_storage->toggle]; + q_storage->last_num_pkts = pull_size; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG( + "VDQ command not issued.QBMAN busy\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + q_storage->active_dqs = dq_storage; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); + } + + dq_storage = q_storage->active_dqs; + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); + + /* Prepare next pull descriptor. This will give space for the + * prefething done on DQRR entries + */ + q_storage->toggle ^= 1; + dq_storage1 = q_storage->dq_storage[q_storage->toggle]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage1, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_response(&eqdesc, 0, 0); + qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); + + /* Check if the previous issued command is completed. + * Also seems like the SWP is shared between the Ethernet Driver + * and the SEC driver. + */ + while (!qbman_check_command_complete(dq_storage)) + ; + if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) + clear_swp_active_dqs(q_storage->active_dpio_id); + + pending = 1; + + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); + + dq_storage++; + num_rx++; + } while (pending); + + while (num_tx < num_rx) { + num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, + &fd[num_tx], 0, num_rx - num_tx); + } + + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); + continue; + } + break; + } + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); + + dpaa2_q->rx_pkts += num_rx; + dpaa2_q->tx_pkts += num_tx; + + return 0; +} +#if defined(RTE_TOOLCHAIN_GCC) +#pragma GCC diagnostic pop +#elif defined(RTE_TOOLCHAIN_CLANG) +#pragma clang diagnostic pop +#endif diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.c new file mode 100644 index 000000000..7e8fedd81 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.c @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "dpaa2_ethdev.h" +#include "dpaa2_sparser.h" +#include "base/dpaa2_hw_dpni_annot.h" +#define __STDC_FORMAT_MACROS +#include +#include + +uint8_t wriop_bytecode[] = { + 0x00, 0x04, 0x29, 0x42, 0x03, 0xe0, 0x12, 0x00, 0x29, 0x02, + 0x18, 0x00, 0x87, 0x3c, 0x00, 0x02, 0x18, 0x00, 0x00, 0x00 +}; + +struct frame_attr frame_attr_arr[] = { + /* Frame Attribute Flags 1 */ + /* 000 */ {"Routing header present in IPv6 header 2 ", 0, 0x80000000}, + /* 001 */ {"GTP Primed was detected ", 0, 0x40000000}, + /* 002 */ {"VLAN with VID = 0 was detected ", 0, 0x20000000}, + /* 003 */ {"A PTP frame was detected ", 0, 0x10000000}, + /* 004 */ {"VXLAN was parsed ", 0, 0x08000000}, + /* 005 */ {"A VXLAN HXS parsing error was detected ", 0, 0x04000000}, + /* 006 */ {"Ethernet control protocol was detected ", 0, 0x02000000}, + /* 007 */ {"IKE was detected at UDP port 4500 ", 0, 0x01000000}, + /* 008 */ {"Shim Shell Soft Parsing Error ", 0, 0x00800000}, + /* 009 */ {"Parsing Error ", 0, 0x00400000}, + /* 010 */ {"Ethernet MAC Present ", 0, 0x00200000}, + /* 011 */ {"Ethernet Unicast ", 0, 0x00100000}, + /* 012 */ {"Ethernet Multicast ", 0, 0x00080000}, + /* 013 */ {"Ethernet Broadcast ", 0, 0x00040000}, + /* 014 */ {"BPDU frame (MAC DA is 01:80:C2:00:00:00)", 0, 0x00020000}, + /* 015 */ {"FCoE detected (Ether type is 0x8906) ", 0, 0x00010000}, + /* 016 */ {"FIP detected (Ether type is 0x8914) ", 0, 0x00008000}, + /* 017 */ {"Ethernet Parsing Error ", 0, 0x00004000}, + /* 018 */ {"LLC+SNAP Present ", 0, 0x00002000}, + /* 019 */ {"Unknown LLC/OUI ", 0, 0x00001000}, + /* 020 */ {"LLC+SNAP Error ", 0, 0x00000800}, + /* 021 */ {"VLAN 1 Present ", 0, 0x00000400}, + /* 022 */ {"VLAN n Present ", 0, 0x00000200}, + /* 023 */ {"CFI bit in a \"8100\" VLAN tag is set ", 0, 0x00000100}, + /* 024 */ {"VLAN Parsing Error ", 0, 0x00000080}, + /* 025 */ {"PPPoE+PPP Present ", 0, 0x00000040}, + /* 026 */ {"PPPoE+PPP Parsing Error ", 0, 0x00000020}, + /* 027 */ {"MPLS 1 Present ", 0, 0x00000010}, + /* 028 */ {"MPLS n Present ", 0, 0x00000008}, + /* 029 */ {"MPLS Parsing Error ", 0, 0x00000004}, + /* 030 */ {"ARP frame Present (Ethertype 0x0806) ", 0, 0x00000002}, + /* 031 */ {"ARP Parsing Error ", 0, 0x00000001}, + /* Frame Attribute Flags 2 */ + /* 032 */ {"L2 Unknown Protocol ", 1, 0x80000000}, + /* 033 */ {"L2 Soft Parsing Error ", 1, 0x40000000}, + /* 034 */ {"IPv4 1 Present ", 1, 0x20000000}, + /* 035 */ {"IPv4 1 Unicast ", 1, 0x10000000}, + /* 036 */ {"IPv4 1 Multicast ", 1, 0x08000000}, + /* 037 */ {"IPv4 1 Broadcast ", 1, 0x04000000}, + /* 038 */ {"IPv4 n Present ", 1, 0x02000000}, + /* 039 */ {"IPv4 n Unicast ", 1, 0x01000000}, + /* 040 */ {"IPv4 n Multicast ", 1, 0x00800000}, + /* 041 */ {"IPv4 n Broadcast ", 1, 0x00400000}, + /* 042 */ {"IPv6 1 Present ", 1, 0x00200000}, + /* 043 */ {"IPv6 1 Unicast ", 1, 0x00100000}, + /* 044 */ {"IPv6 1 Multicast ", 1, 0x00080000}, + /* 045 */ {"IPv6 n Present ", 1, 0x00040000}, + /* 046 */ {"IPv6 n Unicast ", 1, 0x00020000}, + /* 047 */ {"IPv6 n Multicast ", 1, 0x00010000}, + /* 048 */ {"IP 1 option present ", 1, 0x00008000}, + /* 049 */ {"IP 1 Unknown Protocol ", 1, 0x00004000}, + /* 050 */ {"IP 1 Packet is a fragment ", 1, 0x00002000}, + /* 051 */ {"IP 1 Packet is an initial fragment ", 1, 0x00001000}, + /* 052 */ {"IP 1 Parsing Error ", 1, 0x00000800}, + /* 053 */ {"IP n option present ", 1, 0x00000400}, + /* 054 */ {"IP n Unknown Protocol ", 1, 0x00000200}, + /* 055 */ {"IP n Packet is a fragment ", 1, 0x00000100}, + /* 056 */ {"IP n Packet is an initial fragment ", 1, 0x00000080}, + /* 057 */ {"ICMP detected (IP proto is 1) ", 1, 0x00000040}, + /* 058 */ {"IGMP detected (IP proto is 2) ", 1, 0x00000020}, + /* 059 */ {"ICMPv6 detected (IP proto is 3a) ", 1, 0x00000010}, + /* 060 */ {"UDP Light detected (IP proto is 136) ", 1, 0x00000008}, + /* 061 */ {"IP n Parsing Error ", 1, 0x00000004}, + /* 062 */ {"Min. Encap Present ", 1, 0x00000002}, + /* 063 */ {"Min. Encap S flag set ", 1, 0x00000001}, + /* Frame Attribute Flags 3 */ + /* 064 */ {"Min. Encap Parsing Error ", 2, 0x80000000}, + /* 065 */ {"GRE Present ", 2, 0x40000000}, + /* 066 */ {"GRE R bit set ", 2, 0x20000000}, + /* 067 */ {"GRE Parsing Error ", 2, 0x10000000}, + /* 068 */ {"L3 Unknown Protocol ", 2, 0x08000000}, + /* 069 */ {"L3 Soft Parsing Error ", 2, 0x04000000}, + /* 070 */ {"UDP Present ", 2, 0x02000000}, + /* 071 */ {"UDP Parsing Error ", 2, 0x01000000}, + /* 072 */ {"TCP Present ", 2, 0x00800000}, + /* 073 */ {"TCP options present ", 2, 0x00400000}, + /* 074 */ {"TCP Control bits 6-11 set ", 2, 0x00200000}, + /* 075 */ {"TCP Control bits 3-5 set ", 2, 0x00100000}, + /* 076 */ {"TCP Parsing Error ", 2, 0x00080000}, + /* 077 */ {"IPSec Present ", 2, 0x00040000}, + /* 078 */ {"IPSec ESP found ", 2, 0x00020000}, + /* 079 */ {"IPSec AH found ", 2, 0x00010000}, + /* 080 */ {"IPSec Parsing Error ", 2, 0x00008000}, + /* 081 */ {"SCTP Present ", 2, 0x00004000}, + /* 082 */ {"SCTP Parsing Error ", 2, 0x00002000}, + /* 083 */ {"DCCP Present ", 2, 0x00001000}, + /* 084 */ {"DCCP Parsing Error ", 2, 0x00000800}, + /* 085 */ {"L4 Unknown Protocol ", 2, 0x00000400}, + /* 086 */ {"L4 Soft Parsing Error ", 2, 0x00000200}, + /* 087 */ {"GTP Present ", 2, 0x00000100}, + /* 088 */ {"GTP Parsing Error ", 2, 0x00000080}, + /* 089 */ {"ESP Present ", 2, 0x00000040}, + /* 090 */ {"ESP Parsing Error ", 2, 0x00000020}, + /* 091 */ {"iSCSI detected (Port# 860) ", 2, 0x00000010}, + /* 092 */ {"Capwap-control detected (Port# 5246) ", 2, 0x00000008}, + /* 093 */ {"Capwap-data detected (Port# 5247) ", 2, 0x00000004}, + /* 094 */ {"L5 Soft Parsing Error ", 2, 0x00000002}, + /* 095 */ {"IPv6 Route hdr1 present ", 2, 0x00000001}, + /* 096 */ {NULL, 0, 0x00000000} +}; + +struct frame_attr_ext frame_attr_ext_arr[] = { + /* Frame Attribute Flags Extension */ + /* 096 */ {"User defined soft parser bit #0 ", 0, 0x8000}, + /* 096 */ {"User defined soft parser bit #1 ", 0, 0x4000}, + /* 096 */ {"User defined soft parser bit #2 ", 0, 0x2000}, + /* 096 */ {"User defined soft parser bit #3 ", 0, 0x1000}, + /* 096 */ {"User defined soft parser bit #4 ", 0, 0x0800}, + /* 096 */ {"User defined soft parser bit #5 ", 0, 0x0400}, + /* 096 */ {"User defined soft parser bit #6 ", 0, 0x0200}, + /* 096 */ {"User defined soft parser bit #7 ", 0, 0x0100}, + /* 097 */ {"Reserved ", 0, 0x00ff}, + /* 112 */ {NULL, 0, 0x0000} +}; + +#define SWAP_WORD(pr) \ +do { \ + for (int i = 0; i < 4 ; i++) { \ + pr[i] = pr[i] ^ pr[6 - i + 1]; \ + pr[6 - i + 1] = pr[6 - i + 1] ^ pr[i]; \ + pr[i] = pr[i] ^ pr[6 - i + 1]; \ + } \ +} while (0) + +#define fa_print_sb() \ +do { \ + if (rte_cpu_to_be_32(*pdw) & frm_attr->fld_mask) \ + DPAA2_PMD_DP_DEBUG("t %s : Yes", frm_attr->fld_name); \ +} while (0) + +#define fa_print_sb_ext() \ +do { \ + if (rte_cpu_to_be_16(*pw) & frm_attr_ext->fld_mask) \ + DPAA2_PMD_DP_DEBUG("\t %s : Yes", \ + frm_attr_ext->fld_name); \ +} while (0) + +#define fa_print_mb_ext() \ +do { \ + if (rte_cpu_to_be_16(*pw) & frm_attr_ext->fld_mask) \ + DPAA2_PMD_DP_DEBUG("\t %s : 0x%02x", \ + frm_attr_ext->fld_name, \ + rte_cpu_to_be_16(*pw) & frm_attr_ext->fld_mask);\ +} while (0) + +int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, + enum dpni_soft_sequence_dest dest) +{ + struct fsl_mc_io *dpni = priv->hw; + struct dpni_load_ss_cfg cfg; + struct dpni_drv_sparser_param sp_param; + uint8_t *addr; + int ret; + + memset(&sp_param, 0, sizeof(sp_param)); + sp_param.start_pc = priv->ss_offset; + sp_param.byte_code = &wriop_bytecode[0]; + sp_param.size = sizeof(wriop_bytecode); + + cfg.dest = dest; + cfg.ss_offset = sp_param.start_pc; + cfg.ss_size = sp_param.size; + + addr = rte_malloc(NULL, sp_param.size, 64); + if (!addr) { + DPAA2_PMD_ERR("Memory unavailable for soft parser param\n"); + return -1; + } + + memcpy(addr, sp_param.byte_code, sp_param.size); + cfg.ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr)); + + ret = dpni_load_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) { + DPAA2_PMD_ERR("dpni_load_sw_sequence failed\n"); + rte_free(addr); + return ret; + } + + priv->ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr)); + priv->ss_offset += sp_param.size; + RTE_LOG(INFO, PMD, "Soft parser loaded for dpni@%d\n", priv->hw_id); + + rte_free(addr); + return 0; +} + +int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, + enum dpni_soft_sequence_dest dest) +{ + struct fsl_mc_io *dpni = priv->hw; + struct dpni_enable_ss_cfg cfg; + uint8_t pa[3]; + struct dpni_drv_sparser_param sp_param; + uint8_t *param_addr = NULL; + int ret; + + memset(&sp_param, 0, sizeof(sp_param)); + pa[0] = 32; /* Custom Header Length in bytes */ + sp_param.custom_header_first = 1; + sp_param.param_offset = 32; + sp_param.param_size = 1; + sp_param.start_pc = priv->ss_offset; + sp_param.param_array = (uint8_t *)&pa[0]; + + cfg.dest = dest; + cfg.ss_offset = sp_param.start_pc; + cfg.set_start = sp_param.custom_header_first; + cfg.hxs = (uint16_t)sp_param.link_to_hard_hxs; + cfg.param_offset = sp_param.param_offset; + cfg.param_size = sp_param.param_size; + if (cfg.param_size) { + param_addr = rte_malloc(NULL, cfg.param_size, 64); + if (!param_addr) { + DPAA2_PMD_ERR("Memory unavailable for soft parser param\n"); + return -1; + } + + memcpy(param_addr, sp_param.param_array, cfg.param_size); + cfg.param_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(param_addr)); + priv->ss_param_iova = cfg.param_iova; + } else { + cfg.param_iova = 0; + } + + ret = dpni_enable_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) { + DPAA2_PMD_ERR("dpni_enable_sw_sequence failed for dpni%d\n", + priv->hw_id); + rte_free(param_addr); + return ret; + } + + rte_free(param_addr); + RTE_LOG(INFO, PMD, "Soft parser enabled for dpni@%d\n", priv->hw_id); + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.h new file mode 100644 index 000000000..365b8062a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_sparser.h @@ -0,0 +1,206 @@ +/* * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +/** + * @file dpaa2_sparser.h + * + * @brief Soft parser related macros & functions support for DPAA2 device + * framework based applications. + * + */ + +#ifndef _DPAA2_SPARSER_H +#define _DPAA2_SPARSER_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define WRIOP_SS_INITIALIZER(priv) \ +do { \ + /* Base offset of parse profile memory in WRIOP */ \ + (priv)->ss_offset = 0x20; \ + (priv)->ss_iova = (size_t)NULL; \ + (priv)->ss_param_iova = (size_t)NULL; \ +} while (0) + +/**************************************************************************/ +/* + * @enum parser_starting_hxs_code + * @Description PARSER Starting HXS code + */ +/***************************************************************************/ +enum parser_starting_hxs_code { + /** Ethernet Starting HXS coding */ + PARSER_ETH_STARTING_HXS = 0x0000, + /** LLC+SNAP Starting HXS coding */ + PARSER_LLC_SNAP_STARTING_HXS = 0x0001, + /** VLAN Starting HXS coding */ + PARSER_VLAN_STARTING_HXS = 0x0002, + /** PPPoE+PPP Starting HXS coding */ + PARSER_PPPOE_PPP_STARTING_HXS = 0x0003, + /** MPLS Starting HXS coding */ + PARSER_MPLS_STARTING_HXS = 0x0004, + /** ARP Starting HXS coding */ + PARSER_ARP_STARTING_HXS = 0x0005, + /** IP Starting HXS coding */ + PARSER_IP_STARTING_HXS = 0x0006, + /** IPv4 Starting HXS coding */ + PARSER_IPV4_STARTING_HXS = 0x0007, + /** IPv6 Starting HXS coding */ + PARSER_IPV6_STARTING_HXS = 0x0008, + /** GRE Starting HXS coding */ + PARSER_GRE_STARTING_HXS = 0x0009, + /** MinEncap Starting HXS coding */ + PARSER_MINENCAP_STARTING_HXS = 0x000A, + /** Other L3 Shell Starting HXS coding */ + PARSER_OTHER_L3_SHELL_STARTING_HXS = 0x000B, + /** TCP Starting HXS coding */ + PARSER_TCP_STARTING_HXS = 0x000C, + /** UDP Starting HXS coding */ + PARSER_UDP_STARTING_HXS = 0x000D, + /** IPSec Starting HXS coding */ + PARSER_IPSEC_STARTING_HXS = 0x000E, + /** SCTP Starting HXS coding */ + PARSER_SCTP_STARTING_HXS = 0x000F, + /** DCCP Starting HXS coding */ + PARSER_DCCP_STARTING_HXS = 0x0010, + /** Other L4 Shell Starting HXS coding */ + PARSER_OTHER_L4_SHELL_STARTING_HXS = 0x0011, + /** GTP Starting HXS coding */ + PARSER_GTP_STARTING_HXS = 0x0012, + /** ESP Starting HXS coding */ + PARSER_ESP_STARTING_HXS = 0x0013, + /** VXLAN Starting HXS coding */ + PARSER_VXLAN_STARTING_HXS = 0x0014, + /** L5 (and above) Shell Starting HXS coding */ + PARSER_L5_SHELL_STARTING_HXS = 0x001E, + /** Final Shell Starting HXS coding */ + PARSER_FINAL_SHELL_STARTING_HXS = 0x001F +}; + +/**************************************************************************/ +/* + * @Description struct dpni_drv_sparser_param - Structure representing the + * information needed to activate(enable) a Soft Parser. + */ +/***************************************************************************/ + +struct dpni_drv_sparser_param { + /* The "custom_header_first" must be set if the custom header to parse + * is the first header in the packet, otherwise "custom_header_first" + * must be cleared. + */ + uint8_t custom_header_first; + /* Hard HXS on which a soft parser is activated. This must be + * configured. + * if the header to parse is not the first header in the packet. + */ + enum parser_starting_hxs_code link_to_hard_hxs; + /* Soft Sequence Start PC */ + uint16_t start_pc; + /* Soft Sequence byte-code */ + uint8_t *byte_code; + /* Soft Sequence size */ + uint16_t size; + /* Pointer to the Parameters Array of the SP */ + uint8_t *param_array; + /* Parameters offset */ + uint8_t param_offset; + /* Parameters size */ + uint8_t param_size; +}; + +struct sp_parse_result { + /* Next header */ + uint16_t nxt_hdr; + /* Frame Attribute Flags Extension */ + uint16_t frame_attribute_flags_extension; + /* Frame Attribute Flags (part 1) */ + uint32_t frame_attribute_flags_1; + /* Frame Attribute Flags (part 2) */ + uint32_t frame_attribute_flags_2; + /* Frame Attribute Flags (part 3) */ + uint32_t frame_attribute_flags_3; + /* Shim Offset 1 */ + uint8_t shim_offset_1; + /* Shim Offset 2 */ + uint8_t shim_offset_2; + /* Outer IP protocol field offset */ + uint8_t ip_1_pid_offset; + /* Ethernet offset */ + uint8_t eth_offset; + /* LLC+SNAP offset */ + uint8_t llc_snap_offset; + /* First VLAN's TCI field offset*/ + uint8_t vlan_tci1_offset; + /* Last VLAN's TCI field offset*/ + uint8_t vlan_tcin_offset; + /* Last Ethertype offset*/ + uint8_t last_etype_offset; + /* PPPoE offset */ + uint8_t pppoe_offset; + /* First MPLS offset */ + uint8_t mpls_offset_1; + /* Last MPLS offset */ + uint8_t mpls_offset_n; + /* Layer 3 (Outer IP, ARP, FCoE or FIP) offset */ + uint8_t l3_offset; + /* Inner IP or MinEncap offset*/ + uint8_t ipn_or_minencap_offset; + /* GRE offset */ + uint8_t gre_offset; + /* Layer 4 offset*/ + uint8_t l4_offset; + /* Layer 5 offset */ + uint8_t l5_offset; + /* Routing header offset of 1st IPv6 header */ + uint8_t routing_hdr_offset1; + /* Routing header offset of 2nd IPv6 header */ + uint8_t routing_hdr_offset2; + /* Next header offset */ + uint8_t nxt_hdr_offset; + /* IPv6 fragmentable part offset */ + uint8_t ipv6_frag_offset; + /* Frame's untouched running sum, input to parser */ + uint16_t gross_running_sum; + /* Running Sum */ + uint16_t running_sum; + /* Parse Error code */ + uint8_t parse_error_code; + /* Offset to the next header field before IPv6 fragment extension */ + uint8_t nxt_hdr_before_ipv6_frag_ext; + /* Inner IP Protocol field offset */ + uint8_t ip_n_pid_offset; + /* Reserved for Soft parsing context*/ + uint8_t soft_parsing_context[21]; +}; + +struct frame_attr { + const char *fld_name; + uint8_t faf_offset; + uint32_t fld_mask; +}; + +struct frame_attr_ext { + const char *fld_name; + uint8_t faf_ext_offset; + uint16_t fld_mask; +}; + + +struct parse_err { + uint16_t code; + const char *err_name; +}; + +/* Macro definitions */ +#define IS_ONE_BIT_FIELD(_mask) \ +(!((_mask) & ((_mask) - 1)) || (_mask == 1)) + +int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, + enum dpni_soft_sequence_dest dest); +int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, + enum dpni_soft_sequence_dest dest); +#endif /* _DPAA2_SPARSER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpdmux.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpdmux.c new file mode 100644 index 000000000..63f1ec7d3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpdmux.c @@ -0,0 +1,929 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + * + */ +#include +#include +#include +#include + +/** @addtogroup dpdmux + * @{ + */ + +/** + * dpdmux_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmux_id: DPDMUX unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmux_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmux_id, + uint16_t *token) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_open *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpdmux_cmd_open *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmux_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_create() - Create the DPDMUX object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: returned object id + * + * Create the DPDMUX object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmux_cfg *cfg, + uint32_t *obj_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_create *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_create *)cmd.params; + cmd_params->method = cfg->method; + cmd_params->manip = cfg->manip; + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + cmd_params->adv_max_dmat_entries = + cpu_to_le16(cfg->adv.max_dmat_entries); + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups); + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids); + cmd_params->options = cpu_to_le64(cfg->adv.options); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpdmux_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_destroy *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_enable() - Enable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_disable() - Disable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_is_enabled() - Check if the DPDMUX is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params; + *en = dpdmux_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_attributes() - Retrieve DPDMUX attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->options = le64_to_cpu(rsp_params->options); + attr->method = rsp_params->method; + attr->manip = rsp_params->manip; + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + attr->mem_size = le16_to_cpu(rsp_params->mem_size); + + return 0; +} + +/** + * dpdmux_if_enable() - Enable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_disable() - Disable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @max_frame_length: The required maximum frame length + * + * Update the maximum frame length on all DMUX interfaces. + * In case of VEPA, the maximum frame length on all dmux interfaces + * will be updated with the minimum value of the mfls of the connected + * dpnis and the actual value of dmux mfl. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_ul_reset_counters() - Function resets the uplink counter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_set_accepted_frames() - Set the accepted frame types + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @cfg: Frame types configuration + * + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or + * priority-tagged frames are discarded. + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or + * priority-tagged frames are accepted. + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, + * untagged and priority-tagged frame are accepted; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_accepted_frames *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_accepted_frames *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpdmux_set_field(cmd_params->frames_options, + ACCEPTED_FRAMES_TYPE, + cfg->type); + dpdmux_set_field(cmd_params->frames_options, + UNACCEPTED_FRAMES_ACTION, + cfg->unaccept_act); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @attr: Interface attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_if_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if *cmd_params; + struct dpdmux_rsp_if_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE); + attr->is_default = dpdmux_get_field(rsp_params->enabled, IS_DEFAULT); + attr->accept_frame_type = dpdmux_get_field( + rsp_params->accepted_frames_type, + ACCEPTED_FRAMES_TYPE); + + return 0; +} + +/** + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function removes a L2 rule from DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function adds a L2 rule into DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Id + * @counter_type: counter type + * @counter: Returned specific counter information + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + enum dpdmux_counter_type counter_type, + uint64_t *counter) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_counter *cmd_params; + struct dpdmux_rsp_if_get_counter *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->counter_type = counter_type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params; + *counter = le64_to_cpu(rsp_params->counter); + + return 0; +} + +/** + * dpdmux_if_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + cmd_params->advertising = cpu_to_le64(cfg->advertising); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_link_state - Return the link state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @state: link state + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_link_state *cmd_params; + struct dpdmux_rsp_if_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params; + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->up = dpdmux_get_field(rsp_params->up, UP); + state->state_valid = dpdmux_get_field(rsp_params->up, STATE_VALID); + state->supported = le64_to_cpu(rsp_params->supported); + state->advertising = le64_to_cpu(rsp_params->advertising); + + return 0; +} + +/** + * dpdmux_if_set_default - Set default interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_DEFAULT, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_default - Get default interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *if_id) +{ + struct dpdmux_cmd_if *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_DEFAULT, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_cmd_if *)cmd.params; + *if_id = le16_to_cpu(rsp_params->if_id); + + return 0; +} + +/** + * dpdmux_set_custom_key - Set a custom classification key. + * + * This API is only available for DPDMUX instance created with + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the + * classification table using dpdmux_add_custom_cls_entry. + * + * Calls to dpdmux_set_custom_key remove all existing classification entries + * that may have been added previously using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface id + * @key_cfg_iova: DMA address of a configuration structure set up using + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t key_cfg_iova) +{ + struct dpdmux_set_custom_key *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY, + cmd_flags, + token); + cmd_params = (struct dpdmux_set_custom_key *)cmd.params; + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_add_custom_cls_entry - Adds a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key + * composition rule must be set up using dpdmux_set_custom_key. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to insert. Rules cannot be duplicated, if a + * matching rule already exists, the action will be replaced. + * @action: Action to perform for matching traffic. + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action) +{ + struct dpdmux_cmd_add_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->dest_if = cpu_to_le16(action->dest_if); + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification + * entries previously inserted using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to remove + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule) +{ + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_api_version() - Get Data Path Demux API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path demux API + * @minor_ver: Minor version of data path demux API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_api_version *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c new file mode 100644 index 000000000..1e171eedc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2017 NXP + * + */ +#include +#include +#include + +/** + * dpkg_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + * - dpkg_prepare_key_cfg() + */ +int +dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpkg_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpkg_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + if (extr->num_of_byte_masks > DPKG_NUM_OF_MASKS) + return -EINVAL; + + for (j = 0; j < extr->num_of_byte_masks; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c new file mode 100644 index 000000000..683d7bcc1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c @@ -0,0 +1,2639 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2019 NXP + * + */ +#include +#include +#include +#include + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_create() - Create the DPNI object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPNI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id) +{ + struct dpni_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpni_cmd_create *)cmd.params; + cmd_params->options = cpu_to_le32(cfg->options); + cmd_params->num_queues = cfg->num_queues; + cmd_params->num_tcs = cfg->num_tcs; + cmd_params->mac_filter_entries = cfg->mac_filter_entries; + cmd_params->num_rx_tcs = cfg->num_rx_tcs; + cmd_params->vlan_filter_entries = cfg->vlan_filter_entries; + cmd_params->qos_entries = cfg->qos_entries; + cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries); + cmd_params->num_cgs = cfg->num_cgs; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpni_destroy() - Destroy the DPNI object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dpni_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, + cmd_flags, + dprc_token); + /* set object id to destroy */ + cmd_params = (struct dpni_cmd_destroy *)cmd.params; + cmd_params->dpsw_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + cmd_params->pool_options = cfg->pool_options; + for (i = 0; i < cmd_params->num_dpbp; i++) { + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_rx_tcs = rsp_params->num_rx_tcs; + attr->num_tx_tcs = rsp_params->num_tx_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + attr->num_cgs = rsp_params->num_cgs; + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * This function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = + (int)dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = + (int)dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = + (int)dpni_get_field(rsp_params->flags, PASS_FS); + layout->pass_sw_opaque = + (int)dpni_get_field(rsp_params->flags, PASS_SWO); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16((uint16_t)layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + dpni_set_field(cmd_params->flags, PASS_SWO, layout->pass_sw_opaque); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_offload() - Get DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, a value of 1 indicates that the + * offload is enabled. + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + cmd_params->advertising = cpu_to_le64(cfg->advertising); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->supported = le64_to_cpu(rsp_params->supported); + state->advertising = le64_to_cpu(rsp_params->advertising); + + return 0; +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * @flags : 0 - tc_id and flow_id will be ignored. + * Pkt with this mac_id will be passed to the next + * classification stages + * DPNI_MAC_SET_QUEUE_ACTION + * Pkt with this mac will be forward directly to + * queue defined by the tc_id and flow_id + * @tc_id : Traffic class selection (0-7) + * @flow_id : Selects the specific queue out of the set allocated for the + * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1 + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6], + uint8_t flags, + uint8_t tc_id, + uint8_t flow_id) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + cmd_params->flags = flags; + cmd_params->tc_id = tc_id; + cmd_params->fq_id = flow_id; + + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct dpni_cmd_enable_vlan_filter *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params; + dpni_set_field(cmd_params->en, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_vlan_id() - Add VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to add + * @flags: 0 - tc_id and flow_id will be ignored. + * Pkt with this vlan_id will be passed to the next + * classification stages + * DPNI_VLAN_SET_QUEUE_ACTION + * Pkt with this vlan_id will be forward directly to + * queue defined by the tc_id and flow_id + * + * @tc_id: Traffic class selection (0-7) + * @flow_id: Selects the specific queue out of the set allocated for the + * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id, + uint8_t flags, + uint8_t tc_id, + uint8_t flow_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->flags = flags; + cmd_params->tc_id = tc_id; + cmd_params->flow_id = flow_id; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_vlan_id() - Remove VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_vlan_filters() - Clear all VLAN filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpkg_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->flags, + DIST_MODE, + cfg->dist_mode); + dpni_set_field(cmd_params->flags, + MISS_ACTION, + cfg->fs_cfg.miss_action); + dpni_set_field(cmd_params->keep_hash_key, + KEEP_HASH_KEY, + cfg->fs_cfg.keep_hash_key); + dpni_set_field(cmd_params->keep_hash_key, + KEEP_ENTRIES, + cfg->fs_cfg.keep_entries); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_tx_confirmation_mode() - Tx confirmation mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mode: Tx confirmation mode + * + * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not + * selected at DPNI creation. + * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all + * transmit confirmation (including the private confirmation queues), regardless + * of previous settings; Note that in this case, Tx error frames are still + * enqueued to the general transmit errors queue. + * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all + * Tx confirmations to a shared Tx conf queue. 'index' field in dpni_get_queue + * command will be ignored. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode) +{ + struct dpni_tx_confirmation_mode *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE, + cmd_flags, + token); + cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params; + cmd_params->confirmation_mode = mode; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_qos_table() - Set QoS mapping table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS table configuration + * + * This function and all QoS-related functions require that + *'max_tcs > 1' was set at DPNI creation. + * + * warning: Before calling this function, call dpkg_prepare_key_cfg() to + * prepare the key_cfg_iova parameter + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_qos_tbl_cfg *cfg) +{ + struct dpni_cmd_set_qos_table *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params; + cmd_params->default_tc = cfg->default_tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->discard_on_miss, + ENABLE, + cfg->discard_on_miss); + dpni_set_field(cmd_params->discard_on_miss, + KEEP_QOS_ENTRIES, + cfg->keep_entries); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS rule to add + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS classification on + * this DPNI, it is ignored for exact match. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg, + uint8_t tc_id, + uint16_t index, + uint8_t flags, + uint8_t flow_id) +{ + struct dpni_cmd_add_qos_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params; + cmd_params->flags = flags; + cmd_params->flow_id = flow_id; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_qos_entry() - Remove QoS mapping entry + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_qos_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_qos_table() - Clear all QoS mapping entries + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Following this function call, all frames are directed to + * the default traffic class (0) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS classification + * on this DPNI, it is ignored for exact match. + * @cfg: Flow steering rule to add + * @action: Action to be taken as result of a classification hit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + uint16_t index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action) +{ + struct dpni_cmd_add_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + cmd_params->options = cpu_to_le16(action->options); + cmd_params->flow_id = cpu_to_le16(action->flow_id); + cmd_params->flc = cpu_to_le64(action->flc); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Flow steering rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id) +{ + struct dpni_cmd_clear_fs_entries *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params; + cmd_params->tc_id = tc_id; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_congestion_notification() - Set traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_cmd_set_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->congestion_point = cfg->cg_point; + cmd_params->cgid = (uint8_t)cfg->cgid; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); + dpni_set_field(cmd_params->type_units, + DEST_TYPE, + cfg->dest_cfg.dest_type); + dpni_set_field(cmd_params->type_units, + CONG_UNITS, + cfg->units); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_congestion_notification() - Get traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_rsp_get_congestion_notification *rsp_params; + struct dpni_cmd_get_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_GET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->congestion_point = cfg->cg_point; + cmd_params->cgid = cfg->cgid; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + cfg->dest_cfg.priority = rsp_params->dest_priority; + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, + DEST_TYPE); + + return 0; +} + +/** + * dpni_get_api_version() - Get Data Path Network Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path network interface API + * @minor_ver: Minor version of data path network interface API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct dpni_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + cmd_params->cgid = queue->cgid; + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + if (dpni_get_field(rsp_params->flags, CGID_VALID)) + queue->cgid = rsp_params->cgid; + else + queue->cgid = -1; + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6. + * @param: Custom parameter for some pages used to select + * a certain statistic source, for example the TC. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint16_t param, + union dpni_statistics *stat) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + cmd_params->param = param; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_reset_statistics() - Clears DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * + * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current + * congestion notification or early drop (WRED) configuration previously applied + * to the same TC. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point, DPNI_CP_QUEUE is only supported in + * combination with DPNI_QUEUE_RX. + * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX. + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. + * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + dpni_set_field(cmd_params->enable_oal_lo, ENABLE, taildrop->enable); + dpni_set_field(cmd_params->enable_oal_lo, OAL_LO, taildrop->oal); + dpni_set_field(cmd_params->oal_hi, + OAL_HI, + taildrop->oal >> DPNI_OAL_LO_SIZE); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + uint8_t oal_lo, oal_hi; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO); + oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI); + taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo; + + /* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */ + if (taildrop->oal >= 0x0800) + taildrop->oal |= 0xF000; + + return 0; +} + +/** + * dpni_set_opr() - Set Order Restoration configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated + * for the same TC. Value must be in range 0 to + * NUM_QUEUES - 1 + * @options: Configuration mode options + * can be OPR_OPT_CREATE or OPR_OPT_RETIRE + * @cfg: Configuration options for the OPR + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + uint8_t options, + struct opr_cfg *cfg) +{ + struct dpni_cmd_set_opr *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_OPR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_opr *)cmd.params; + cmd_params->tc_id = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->oloe = cfg->oloe; + cmd_params->oeane = cfg->oeane; + cmd_params->olws = cfg->olws; + cmd_params->oa = cfg->oa; + cmd_params->oprrws = cfg->oprrws; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_opr() - Retrieve Order Restoration config and query. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated + * for the same TC. Value must be in range 0 to + * NUM_QUEUES - 1 + * @cfg: Returned OPR configuration + * @qry: Returned OPR query + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + struct opr_cfg *cfg, + struct opr_qry *qry) +{ + struct dpni_rsp_get_opr *rsp_params; + struct dpni_cmd_get_opr *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OPR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_opr *)cmd.params; + cmd_params->index = index; + cmd_params->tc_id = tc; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_opr *)cmd.params; + cfg->oloe = rsp_params->oloe; + cfg->oeane = rsp_params->oeane; + cfg->olws = rsp_params->olws; + cfg->oa = rsp_params->oa; + cfg->oprrws = rsp_params->oprrws; + qry->rip = dpni_get_field(rsp_params->flags, RIP); + qry->enable = dpni_get_field(rsp_params->flags, OPR_ENABLE); + qry->nesn = le16_to_cpu(rsp_params->nesn); + qry->ndsn = le16_to_cpu(rsp_params->ndsn); + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq); + qry->tseq_nlis = dpni_get_field(rsp_params->tseq_nlis, TSEQ_NLIS); + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq); + qry->hseq_nlis = dpni_get_field(rsp_params->hseq_nlis, HSEQ_NLIS); + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr); + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr); + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid); + qry->opr_id = le16_to_cpu(rsp_params->opr_id); + + return 0; +} + +/** + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If the FS is already enabled with a previous call the classification + * key will be changed but all the table rules are kept. If the + * existing rules do not match the key the results will not be + * predictable. It is the user responsibility to keep key integrity + * If cfg.enable is set to 1 the command will create a flow steering table + * and will classify packets according to this table. The packets + * that miss all the table rules will be classified according to + * settings made in dpni_set_rx_hash_dist() + * If cfg.enable is set to 0 the command will clear flow steering table. The + * packets will be classified according to settings made in + * dpni_set_rx_hash_dist() + */ +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_fs_dist *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If cfg.enable is set to 1 the packets will be classified using a hash + * function based on the key received in cfg.key_cfg_iova parameter + * If cfg.enable is set to 0 the packets will be sent to the queue configured in + * dpni_set_rx_dist_default_queue() call + */ +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_hash_dist *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc_id = cfg->tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_custom_tpid() - Configures a distinct Ethertype value + * (or TPID value) to indicate VLAN tag in addition to the common + * TPID values 0x8100 and 0x88A8 + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: New value for TPID + * + * Only two custom values are accepted. If the function is called for the third + * time it will return error. + * To replace an existing value use dpni_remove_custom_tpid() to remove + * a previous TPID and after that use again the function. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid) +{ + struct dpni_cmd_add_custom_tpid *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_custom_tpid() - Removes a distinct Ethertype value added + * previously with dpni_add_custom_tpid() + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: New value for TPID + * + * Use this function when a TPID value added with dpni_add_custom_tpid() needs + * to be replaced. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid) +{ + struct dpni_cmd_remove_custom_tpid *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_custom_tpid() - Returns custom TPID (vlan tags) values configured + * to detect 802.1q frames + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: TPID values. Only nonzero members of the structure are valid. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, struct dpni_custom_tpid_cfg *tpid) +{ + struct dpni_rsp_get_custom_tpid *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_CUSTOM_TPID, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* read command response */ + rsp_params = (struct dpni_rsp_get_custom_tpid *)cmd.params; + tpid->tpid1 = le16_to_cpu(rsp_params->tpid1); + tpid->tpid2 = le16_to_cpu(rsp_params->tpid2); + + return err; +} + +int dpni_load_sw_sequence(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_load_ss_cfg *cfg) +{ + struct dpni_load_sw_sequence *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE, + cmd_flags, + token); + cmd_params = (struct dpni_load_sw_sequence *)cmd.params; + cmd_params->dest = cfg->dest; + cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset); + cmd_params->ss_size = cpu_to_le16(cfg->ss_size); + cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_enable_ss_cfg *cfg) +{ + struct dpni_enable_sw_sequence *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE, + cmd_flags, + token); + cmd_params = (struct dpni_enable_sw_sequence *)cmd.params; + cmd_params->dest = cfg->dest; + cmd_params->set_start = cfg->set_start; + cmd_params->hxs = cpu_to_le16(cfg->hxs); + cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset); + cmd_params->param_offset = cfg->param_offset; + cmd_params->param_size = cfg->param_size; + cmd_params->param_iova = cpu_to_le64(cfg->param_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_sw_sequence_layout() - Get the soft sequence layout + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @src: Source of the layout (WRIOP Rx or Tx) + * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory + * + * warning: After calling this function, call dpni_extract_sw_sequence_layout() + * to get the layout. + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_soft_sequence_dest src, + uint64_t ss_layout_iova) +{ + struct dpni_get_sw_sequence_layout *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT, + cmd_flags, + token); + + cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params; + cmd_params->src = src; + cmd_params->layout_iova = cpu_to_le64(ss_layout_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_extract_sw_sequence_layout() - extract the software sequence layout + * @layout: software sequence layout + * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it + * to DMA + * + * This function has to be called after dpni_get_sw_sequence_layout + * + */ +void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout, + const uint8_t *sw_sequence_layout_buf) +{ + const struct dpni_sw_sequence_layout_entry *ext_params; + int i; + uint16_t ss_size, ss_offset; + + ext_params = (const struct dpni_sw_sequence_layout_entry *) + sw_sequence_layout_buf; + + for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) { + ss_offset = le16_to_cpu(ext_params[i].ss_offset); + ss_size = le16_to_cpu(ext_params[i].ss_size); + + if (ss_offset == 0 && ss_size == 0) { + layout->num_ss = i; + return; + } + + layout->ss[i].ss_offset = ss_offset; + layout->ss[i].ss_size = ss_size; + layout->ss[i].param_offset = ext_params[i].param_offset; + layout->ss[i].param_size = ext_params[i].param_size; + } +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dprtc.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dprtc.c new file mode 100644 index 000000000..42ac89150 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dprtc.c @@ -0,0 +1,523 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2019 NXP + */ +#include +#include +#include +#include + +/** @addtogroup dprtc + * @{ + */ + +/** + * dprtc_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dprtc_id: DPRTC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dprtc_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dprtc_id, + uint16_t *token) +{ + struct dprtc_cmd_open *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dprtc_cmd_open *)cmd.params; + cmd_params->dprtc_id = cpu_to_le32(dprtc_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return err; +} + +/** + * dprtc_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_create() - Create the DPRTC object. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPRTC object, allocate required resources and + * perform required initialization. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dprtc_cfg *cfg, + uint32_t *obj_id) +{ + struct mc_command cmd = { 0 }; + int err; + + (void)(cfg); /* unused */ + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, + cmd_flags, + dprc_token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dprtc_destroy() - Destroy the DPRTC object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dprtc_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dprtc_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dprtc_cmd_destroy *)cmd.params; + cmd_params->object_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_enable() - Enable the DPRTC. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_disable() - Disable the DPRTC. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_is_enabled() - Check if the DPRTC is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct dprtc_rsp_is_enabled *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params; + *en = dprtc_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dprtc_reset() - Reset the DPRTC, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_attributes - Retrieve DPRTC attributes. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dprtc_attr *attr) +{ + struct dprtc_rsp_get_attributes *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->paddr = le32_to_cpu(rsp_params->paddr); + attr->little_endian = + dprtc_get_field(rsp_params->little_endian, ENDIANNESS); + return 0; +} + +/** + * dprtc_set_clock_offset() - Sets the clock's offset + * (usually relative to another clock). + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @offset: New clock offset (in nanoseconds). + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int64_t offset) +{ + struct dprtc_cmd_set_clock_offset *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params; + cmd_params->offset = cpu_to_le64(offset); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_freq_compensation() - Sets a new frequency compensation value. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: The new frequency compensation value to set. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t freq_compensation) +{ + struct dprtc_get_freq_compensation *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, + cmd_flags, + token); + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; + cmd_params->freq_compensation = cpu_to_le32(freq_compensation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: Frequency compensation value + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t *freq_compensation) +{ + struct dprtc_get_freq_compensation *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); + + return 0; +} + +/** + * dprtc_get_time() - Returns the current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: Current RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t *time) +{ + struct dprtc_time *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_time *)cmd.params; + *time = le64_to_cpu(rsp_params->time); + + return 0; +} + +/** + * dprtc_set_time() - Updates current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: New RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time) +{ + struct dprtc_time *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_alarm() - Defines and sets alarm. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: In nanoseconds, the time when the alarm + * should go off - must be a multiple of + * 1 microsecond + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_alarm(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, uint64_t time) +{ + struct dprtc_time *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_api_version() - Get Data Path Real Time Counter API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path real time counter API + * @minor_ver: Minor version of data path real time counter API + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct dprtc_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h new file mode 100644 index 000000000..accd1ef5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + * + */ +#ifndef __FSL_DPDMUX_H +#define __FSL_DPDMUX_H + +#include + +struct fsl_mc_io; + +/** @addtogroup dpdmux Data Path Demux API + * Contains API for handling DPDMUX topology and functionality + * @{ + */ + +int dpdmux_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmux_id, + uint16_t *token); + +int dpdmux_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * DPDMUX general options + */ + +/** + * Enable bridging between internal interfaces + */ +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL + +/** + * Mask support for classification + */ +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL + +#define DPDMUX_IRQ_INDEX_IF 0x0000 +#define DPDMUX_IRQ_INDEX 0x0001 + +/** + * IRQ event - Indicates that the link state changed + */ +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * enum dpdmux_manip - DPDMUX manipulation operations + * @DPDMUX_MANIP_NONE: No manipulation on frames + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress + */ +enum dpdmux_manip { + DPDMUX_MANIP_NONE = 0x0, + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 +}; + +/** + * enum dpdmux_method - DPDMUX method options + * @DPDMUX_METHOD_NONE: no DPDMUX method + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN + */ +enum dpdmux_method { + DPDMUX_METHOD_NONE = 0x0, + DPDMUX_METHOD_C_VLAN_MAC = 0x1, + DPDMUX_METHOD_MAC = 0x2, + DPDMUX_METHOD_C_VLAN = 0x3, + DPDMUX_METHOD_S_VLAN = 0x4, + DPDMUX_METHOD_CUSTOM = 0x5, +}; + +/** + * struct dpdmux_cfg - DPDMUX configuration parameters + * @method: Defines the operation method for the DPDMUX address table + * @manip: Required manipulation operation + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @adv: Advanced parameters; default is all zeros; + * use this structure to change default settings + * @adv.options: DPDMUX options - combination of 'DPDMUX_OPT_' flags. + * @adv.max_dmat_entries: Maximum entries in DPDMUX address table + * 0 - indicates default: 64 entries per interface. + * @adv.max_mc_groups: Number of multicast groups in DPDMUX table + * 0 - indicates default: 32 multicast groups. + * @adv.max_vlan_ids: Maximum vlan ids allowed in the system - + * relevant only case of working in mac+vlan method. + * 0 - indicates default 16 vlan ids. + */ +struct dpdmux_cfg { + enum dpdmux_method method; + enum dpdmux_manip manip; + uint16_t num_ifs; + struct { + uint64_t options; + uint16_t max_dmat_entries; + uint16_t max_mc_groups; + uint16_t max_vlan_ids; + } adv; +}; + +int dpdmux_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmux_cfg *cfg, + uint32_t *obj_id); + +int dpdmux_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dpdmux_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmux_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpdmux_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmux_attr - Structure representing DPDMUX attributes + * @id: DPDMUX object ID + * @options: Configuration options (bitmap) + * @method: DPDMUX address table method + * @manip: DPDMUX manipulation type + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @mem_size: DPDMUX frame storage memory size + */ +struct dpdmux_attr { + int id; + uint64_t options; + enum dpdmux_method method; + enum dpdmux_manip manip; + uint16_t num_ifs; + uint16_t mem_size; +}; + +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_attr *attr); + +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length); + +/** + * enum dpdmux_counter_type - Counter types + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + */ +enum dpdmux_counter_type { + DPDMUX_CNT_ING_FRAME = 0x0, + DPDMUX_CNT_ING_BYTE = 0x1, + DPDMUX_CNT_ING_FLTR_FRAME = 0x2, + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, + DPDMUX_CNT_ING_MCAST_FRAME = 0x4, + DPDMUX_CNT_ING_MCAST_BYTE = 0x5, + DPDMUX_CNT_ING_BCAST_FRAME = 0x6, + DPDMUX_CNT_ING_BCAST_BYTES = 0x7, + DPDMUX_CNT_EGR_FRAME = 0x8, + DPDMUX_CNT_EGR_BYTE = 0x9, + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa +}; + +/** + * enum dpdmux_accepted_frames_type - DPDMUX frame types + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority-tagged frames + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * priority-tagged frames that are received on this + * interface + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames + * received on this interface are accepted + */ +enum dpdmux_accepted_frames_type { + DPDMUX_ADMIT_ALL = 0, + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, + DPDMUX_ADMIT_ONLY_UNTAGGED = 2 +}; + +/** + * enum dpdmux_action - DPDMUX action for un-accepted frames + * @DPDMUX_ACTION_DROP: Drop un-accepted frames + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the + * control interface + */ +enum dpdmux_action { + DPDMUX_ACTION_DROP = 0, + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 +}; + +/** + * struct dpdmux_accepted_frames - Frame types configuration + * @type: Defines ingress accepted frames + * @unaccept_act: Defines action on frames not accepted + */ +struct dpdmux_accepted_frames { + enum dpdmux_accepted_frames_type type; + enum dpdmux_action unaccept_act; +}; + +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_accepted_frames *cfg); + +/** + * struct dpdmux_if_attr - Structure representing frame types configuration + * @rate: Configured interface rate (in bits per second) + * @enabled: Indicates if interface is enabled + * @accept_frame_type: Indicates type of accepted frames for the interface + */ +struct dpdmux_if_attr { + uint32_t rate; + int enabled; + int is_default; + enum dpdmux_accepted_frames_type accept_frame_type; +}; + +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_if_attr *attr); + +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +/** + * struct dpdmux_l2_rule - Structure representing L2 rule + * @mac_addr: MAC address + * @vlan_id: VLAN ID + */ +struct dpdmux_l2_rule { + uint8_t mac_addr[6]; + uint16_t vlan_id; +}; + +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + enum dpdmux_counter_type counter_type, + uint64_t *counter); + +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * Enable auto-negotiation + */ +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values + */ +struct dpdmux_link_cfg { + uint32_t rate; + uint64_t options; + uint64_t advertising; +}; + +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_cfg *cfg); +/** + * struct dpdmux_link_state - Structure representing DPDMUX link state + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values + * @up: 0 - down, 1 - up + * @state_valid: Ignore/Update the state of the link + * @supported: Speeds capability of the phy (bitmap) + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpdmux_link_state { + uint32_t rate; + uint64_t options; + int up; + int state_valid; + uint64_t supported; + uint64_t advertising; +}; + +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_state *state); + +int dpdmux_if_set_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +int dpdmux_if_get_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *if_id); + +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t key_cfg_iova); + +/** + * struct dpdmux_rule_cfg - Custom classification rule. + * + * @key_iova: DMA address of buffer storing the look-up value + * @mask_iova: DMA address of the mask used for TCAM classification + * @key_size: size, in bytes, of the look-up value. This must match the size + * of the look-up key defined using dpdmux_set_custom_key, otherwise the + * entry will never be hit + */ +struct dpdmux_rule_cfg { + uint64_t key_iova; + uint64_t mask_iova; + uint8_t key_size; +}; + +/** + * struct dpdmux_cls_action - Action to execute for frames matching the + * classification entry + * + * @dest_if: Interface to forward the frames to. Port numbering is similar to + * the one used to connect interfaces: + * - 0 is the uplink port, + * - all others are downlink ports. + */ +struct dpdmux_cls_action { + uint16_t dest_if; +}; + +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action); + +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule); + +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +#endif /* __FSL_DPDMUX_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h new file mode 100644 index 000000000..a60b2ebe3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + * + */ +#ifndef _FSL_DPDMUX_CMD_H +#define _FSL_DPDMUX_CMD_H + +/* DPDMUX Version */ +#define DPDMUX_VER_MAJOR 6 +#define DPDMUX_VER_MINOR 3 + +#define DPDMUX_CMD_BASE_VERSION 1 +#define DPDMUX_CMD_VERSION_2 2 +#define DPDMUX_CMD_ID_OFFSET 4 + +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) |\ + DPDMUX_CMD_BASE_VERSION) +#define DPDMUX_CMD_V2(id) (((id) << DPDMUX_CMD_ID_OFFSET) | \ + DPDMUX_CMD_VERSION_2) + +/* Command IDs */ +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800) +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806) +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906) +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986) +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06) + +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002) +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003) +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004) +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005) +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006) + +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1) + +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3) + +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7) +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8) +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9) +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa) + +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0) +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1) +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2) +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD_V2(0x0b3) +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD_V2(0x0b4) + +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5) +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6) +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7) + +#define DPDMUX_CMDID_IF_SET_DEFAULT DPDMUX_CMD(0x0b8) +#define DPDMUX_CMDID_IF_GET_DEFAULT DPDMUX_CMD(0x0b9) + +#define DPDMUX_MASK(field) \ + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \ + DPDMUX_##field##_SHIFT) +#define dpdmux_set_field(var, field, val) \ + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field))) +#define dpdmux_get_field(var, field) \ + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpdmux_cmd_open { + uint32_t dpdmux_id; +}; + +struct dpdmux_cmd_create { + uint8_t method; + uint8_t manip; + uint16_t num_ifs; + uint32_t pad; + + uint16_t adv_max_dmat_entries; + uint16_t adv_max_mc_groups; + uint16_t adv_max_vlan_ids; + uint16_t pad1; + + uint64_t options; +}; + +struct dpdmux_cmd_destroy { + uint32_t dpdmux_id; +}; + +#define DPDMUX_ENABLE_SHIFT 0 +#define DPDMUX_ENABLE_SIZE 1 +#define DPDMUX_IS_DEFAULT_SHIFT 1 +#define DPDMUX_IS_DEFAULT_SIZE 1 + +struct dpdmux_rsp_is_enabled { + uint8_t en; +}; + +struct dpdmux_rsp_get_attr { + uint8_t method; + uint8_t manip; + uint16_t num_ifs; + uint16_t mem_size; + uint16_t pad; + + uint64_t pad1; + + uint32_t id; + uint32_t pad2; + + uint64_t options; +}; + +struct dpdmux_cmd_set_max_frame_length { + uint16_t max_frame_length; +}; + +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4 + +struct dpdmux_cmd_if_set_accepted_frames { + uint16_t if_id; + uint8_t frames_options; +}; + +struct dpdmux_cmd_if { + uint16_t if_id; +}; + +struct dpdmux_rsp_if_get_attr { + uint8_t pad[3]; + uint8_t enabled; + uint8_t pad1[3]; + uint8_t accepted_frames_type; + uint32_t rate; +}; + +struct dpdmux_cmd_if_l2_rule { + uint16_t if_id; + uint8_t mac_addr5; + uint8_t mac_addr4; + uint8_t mac_addr3; + uint8_t mac_addr2; + uint8_t mac_addr1; + uint8_t mac_addr0; + + uint32_t pad; + uint16_t vlan_id; +}; + +struct dpdmux_cmd_if_get_counter { + uint16_t if_id; + uint8_t counter_type; +}; + +struct dpdmux_rsp_if_get_counter { + uint64_t pad; + uint64_t counter; +}; + +struct dpdmux_cmd_if_set_link_cfg { + uint16_t if_id; + uint16_t pad[3]; + + uint32_t rate; + uint32_t pad1; + + uint64_t options; + uint64_t advertising; +}; + +struct dpdmux_cmd_if_get_link_state { + uint16_t if_id; +}; + +#define DPDMUX_UP_SHIFT 0 +#define DPDMUX_UP_SIZE 1 +#define DPDMUX_STATE_VALID_SHIFT 1 +#define DPDMUX_STATE_VALID_SIZE 1 +struct dpdmux_rsp_if_get_link_state { + uint32_t pad; + uint8_t up; + uint8_t pad1[3]; + + uint32_t rate; + uint32_t pad2; + + uint64_t options; + uint64_t supported; + uint64_t advertising; +}; + +struct dpdmux_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; + +struct dpdmux_set_custom_key { + uint64_t pad[6]; + uint64_t key_cfg_iova; +}; + +struct dpdmux_cmd_add_custom_cls_entry { + uint8_t pad[3]; + uint8_t key_size; + uint16_t pad1; + uint16_t dest_if; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpdmux_cmd_remove_custom_cls_entry { + uint8_t pad[3]; + uint8_t key_size; + uint32_t pad1; + uint64_t key_iova; + uint64_t mask_iova; +}; +#pragma pack(pop) +#endif /* _FSL_DPDMUX_CMD_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h new file mode 100644 index 000000000..02fe8d50e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2013-2015 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + uint8_t mask; + uint8_t offset; +}; + +/* Macros for accessing command fields smaller than 1byte */ +#define DPKG_MASK(field) \ + GENMASK(DPKG_##field##_SHIFT + DPKG_##field##_SIZE - 1, \ + DPKG_##field##_SHIFT) +#define dpkg_set_field(var, field, val) \ + ((var) |= (((val) << DPKG_##field##_SHIFT) & DPKG_MASK(field))) +#define dpkg_get_field(var, field) \ + (((var) & DPKG_MASK(field)) >> DPKG_##field##_SHIFT) + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE' + * @extract.from_hdr.prot: Any of the supported headers + * @extract.from_hdr.type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @extract.from_hdr.field: One of the supported fields (NH_FLD_) + * @extract.from_hdr.size: Size in bytes + * @extract.from_hdr.offset: Byte offset + * @extract.from_hdr.hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + * @extract.from_data.size: Size in bytes + * @extract.from_data.offset: Byte offset + * @extract.from_parse.size: Size in bytes + * @extract.from_parse.offset: Byte offset + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + union { + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + uint32_t field; + uint8_t size; + uint8_t offset; + uint8_t hdr_index; + } from_hdr; + struct { + uint8_t size; + uint8_t offset; + } from_data; + struct { + uint8_t size; + uint8_t offset; + } from_parse; + } extract; + + uint8_t num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + uint8_t num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + uint8_t mask; + uint8_t offset; +}; + +#define DPKG_EFH_TYPE_SHIFT 0 +#define DPKG_EFH_TYPE_SIZE 4 +#define DPKG_EXTRACT_TYPE_SHIFT 0 +#define DPKG_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + uint8_t prot; + /* EFH type stored in the 4 least significant bits */ + uint8_t efh_type; + uint8_t size; + uint8_t offset; + uint32_t field; + /* word 1 */ + uint8_t hdr_index; + uint8_t constant; + uint8_t num_of_repeats; + uint8_t num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + uint8_t extract_type; + uint8_t pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + uint8_t num_extracts; + uint8_t pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[10]; +}; + +int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + uint8_t *key_cfg_buf); + +#endif /* __FSL_DPKG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h new file mode 100644 index 000000000..598911ddd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h @@ -0,0 +1,1584 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2019 NXP + * + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include +#include + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 +/** + * Maximum number of storage-profiles per DPNI + */ +#define DPNI_MAX_SP 2 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (uint8_t)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +/** + * Enable the Order Restoration support + */ +#define DPNI_OPT_HAS_OPR 0x000040 + +/** + * Order Point Records are shared for the entire TC + */ +#define DPNI_OPT_OPR_PER_TC 0x000080 +/** + * All Tx traffic classes will use a single sender (ignore num_queueus for tx) + */ +#define DPNI_OPT_SINGLE_SENDER 0x000100 +/** + * Define a custom number of congestion groups + */ +#define DPNI_OPT_CUSTOM_CG 0x000200 + + +/** + * Software sequence maximum layout size + */ +#define DPNI_SW_SEQUENCE_LAYOUT_SIZE 33 + +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token); + +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpni_cfg - Structure representing DPNI configuration + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * DPNI_OPT_SINGLE_SENDER + * @fs_entries: Number of entries in the flow steering table. + * This table is used to select the ingress queue for + * ingress traffic, targeting a GPP core or another. + * In addition it can be used to discard traffic that + * matches the set rule. It is either an exact match table + * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING + * bit in OPTIONS field. This field is ignored if + * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise, + * value 0 defaults to 64. Maximum supported value is 1024. + * Note that the total number of entries is limited on the + * SoC to as low as 512 entries if TCAM is used. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. This is an exact match table used to filter + * ingress traffic based on VLAN IDs. Value 0 disables VLAN + * filtering. Maximum supported value is 16. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. This is an exact match table and allows both + * unicast and multicast entries. The primary MAC address + * of the network interface is not part of this table, + * this contains only entries in addition to it. This + * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in + * OPTIONS field. Otherwise, value 0 defaults to 80. + * Maximum supported value is 80. + * @num_queues: Number of Tx and Rx queues used for traffic + * distribution. This is orthogonal to QoS and is only + * used to distribute traffic to multiple GPP cores. + * This configuration affects the number of Tx queues + * (logical FQs, all associated with a single CEETM queue), + * Rx queues and Tx confirmation queues, if applicable. + * Value 0 defaults to one queue. Maximum supported value + * is 8. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * TCs can have different priority levels for the purpose + * of Tx scheduling (see DPNI_SET_TX_PRIORITIES), different + * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM + * queues for traffic classes (including class queues on + * Tx). Value 0 defaults to one TC. Maximum supported value + * is 16. There are maximum 16 TCs for Tx and 8 TCs for Rx. + * When num_tcs>8 Tx will use this value but Rx will have + * only 8 traffic classes. + * @num_rx_tcs: if set to other value than zero represents number + * of TCs used for Rx. Maximum value is 8. If set to zero the + * number of Rx TCs will be initialized with the value provided + * in num_tcs parameter. + * @qos_entries: Number of entries in the QoS classification table. This + * table is used to select the TC for ingress traffic. It + * is either an exact match or a TCAM table, depending on + * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This + * field is ignored if the DPNI has a single TC. Otherwise, + * a value of 0 defaults to 64. Maximum supported value + * is 64. + */ +struct dpni_cfg { + uint32_t options; + uint16_t fs_entries; + uint8_t vlan_filter_entries; + uint8_t mac_filter_entries; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t num_rx_tcs; + uint8_t qos_entries; + uint8_t num_cgs; +}; + +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id); + +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pool_options: Buffer assignment options + * This field is a combination of DPNI_POOL_ASSOC_flags + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.priority: Priority mask that indicates TC's used with this buffer. + * I set to 0x00 MC will assume value 0xff. + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ + +#define DPNI_POOL_ASSOC_QPRI 0 +#define DPNI_POOL_ASSOC_QDBIN 1 + +struct dpni_pools_cfg { + uint8_t num_dpbp; + uint8_t pool_options; + struct { + int dpbp_id; + uint8_t priority_mask; + uint16_t buffer_size; + int backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI. + * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result + * in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key + * will result in an error. + * @wriop_version: Version of WRIOP HW block. + * The 3 version values are stored on 6, 5, 5 bits + * respectively. + * Values returned: + * - 0x400 - WRIOP version 1.0.0, used on LS2080 and + * variants, + * - 0x421 - WRIOP version 1.1.1, used on LS2088 and + * variants, + * - 0x422 - WRIOP version 1.1.2, used on LS1088 and + * variants. + * - 0xC00 - WRIOP version 3.0.0, used on LX2160 and + * variants. + */ +struct dpni_attr { + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t num_tx_tcs; + uint8_t mac_filter_entries; + uint8_t vlan_filter_entries; + uint8_t qos_entries; + uint16_t fs_entries; + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; + uint8_t num_cgs; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Discard error. When set all discarded frames in wriop will be enqueued to + * error queue. To be used in dpni_set_errors_behavior() only if error_action + * parameter is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. + */ +#define DPNI_ERROR_DISC 0x80000000 + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__ + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame + * annotation status (FAS); relevant only + * for the non-discard action + */ +struct dpni_error_cfg { + uint32_t errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 +/** + * Select to modify the sw-opaque value setting + */ +#define DPNI_BUF_LAYOUT_OPT_SW_OPAQUE 0x00000080 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the + * buffer layout; + * Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + uint32_t options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + int pass_sw_opaque; + uint16_t private_data_size; + uint16_t data_align; + uint16_t data_head_room; + uint16_t data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */ +enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + * @DPNI_OPT_FLCTYPE_HASH: flow context will be generated by WRIOP for AIOP or + * for CPU + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, + DPNI_FLCTYPE_HASH, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +/** + * union dpni_statistics - Union describing the DPNI statistics + * @page_0: Page_0 statistics structure + * @page_0.ingress_all_frames: Ingress frame count + * @page_0.ingress_all_bytes: Ingress byte count + * @page_0.ingress_multicast_frames: Ingress multicast frame count + * @page_0.ingress_multicast_bytes: Ingress multicast byte count + * @page_0.ingress_broadcast_frames: Ingress broadcast frame count + * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count + * @page_1: Page_1 statistics structure + * @page_1.egress_all_frames: Egress frame count + * @page_1.egress_all_bytes: Egress byte count + * @page_1.egress_multicast_frames: Egress multicast frame count + * @page_1.egress_multicast_bytes: Egress multicast byte count + * @page_1.egress_broadcast_frames: Egress broadcast frame count + * @page_1.egress_broadcast_bytes: Egress broadcast byte count + * @page_2: Page_2 statistics structure + * @page_2.ingress_filtered_frames: Ingress filtered frame count + * @page_2.ingress_discarded_frames: Ingress discarded frame count + * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to + * lack of buffers + * @page_2.egress_discarded_frames: Egress discarded frame count + * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @page_3: Page_3 statistics structure with values for the selected TC + * @page_3.ceetm_dequeue_bytes: Cumulative count of the number of bytes dequeued + * @page_3.ceetm_dequeue_frames: Cumulative count of the number of frames + * dequeued + * @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all + * frames whose enqueue was rejected + * @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected + * @page_4: congestion point drops for seleted TC + * @page_4.cgr_reject_frames: number of rejected frames due to congestion point + * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point + * @page_5: policer statistics per TC + * @page_5.policer_cnt_red: NUmber of red colored frames + * @page_5.policer_cnt_yellow: number of yellow colored frames + * @page_5.policer_cnt_green: number of green colored frames + * @page_5.policer_cnt_re_red: number of recolored red frames + * @page_5.policer_cnt_re_yellow: number of recolored yellow frames + * @page_6.tx_pending_frames_cnt: total number of frames pending in Tx queues + * @raw: raw statistics structure, used to index counters + */ +union dpni_statistics { + struct { + uint64_t ingress_all_frames; + uint64_t ingress_all_bytes; + uint64_t ingress_multicast_frames; + uint64_t ingress_multicast_bytes; + uint64_t ingress_broadcast_frames; + uint64_t ingress_broadcast_bytes; + } page_0; + struct { + uint64_t egress_all_frames; + uint64_t egress_all_bytes; + uint64_t egress_multicast_frames; + uint64_t egress_multicast_bytes; + uint64_t egress_broadcast_frames; + uint64_t egress_broadcast_bytes; + } page_1; + struct { + uint64_t ingress_filtered_frames; + uint64_t ingress_discarded_frames; + uint64_t ingress_nobuffer_discards; + uint64_t egress_discarded_frames; + uint64_t egress_confirmed_frames; + } page_2; + struct { + uint64_t ceetm_dequeue_bytes; + uint64_t ceetm_dequeue_frames; + uint64_t ceetm_reject_bytes; + uint64_t ceetm_reject_frames; + } page_3; + struct { + uint64_t cgr_reject_frames; + uint64_t cgr_reject_bytes; + } page_4; + struct { + uint64_t policer_cnt_red; + uint64_t policer_cnt_yellow; + uint64_t policer_cnt_green; + uint64_t policer_cnt_re_red; + uint64_t policer_cnt_re_yellow; + } page_5; + struct { + uint64_t tx_pending_frames_cnt; + } page_6; + struct { + uint64_t counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL +/** + * Enable priority flow control pause frames + */ +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL + +/** + * Advertise 10MB full duplex + */ +#define DPNI_ADVERTISED_10BASET_FULL 0x0000000000000001ULL +/** + * Advertise 100MB full duplex + */ +#define DPNI_ADVERTISED_100BASET_FULL 0x0000000000000002ULL +/** + * Advertise 1GB full duplex + */ +#define DPNI_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL +/** + * Advertise auto-negotiation enable + */ +#define DPNI_ADVERTISED_AUTONEG 0x0000000000000008ULL +/** + * Advertise 10GB full duplex + */ +#define DPNI_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL +/** + * Advertise 2.5GB full duplex + */ +#define DPNI_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL +/** + * Advertise 5GB full duplex + */ +#define DPNI_ADVERTISED_5000BASET_FULL 0x0000000000000040ULL + + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_' values + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpni_link_cfg { + uint32_t rate; + uint64_t options; + uint64_t advertising; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_' values + * @up: Link state; '0' for down, '1' for up + * @state_valid: Ignore/Update the state of the link + * @supported: Speeds capability of the phy (bitmap) + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpni_link_state { + uint32_t rate; + uint64_t options; + int up; + int state_valid; + uint64_t supported; + uint64_t advertising; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length); + +int dpni_set_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t mtu); + +int dpni_get_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *mtu); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6], + uint8_t flags, + uint8_t tc_id, + uint8_t flow_id); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +/** + * Set vlan filter queue action + */ +#define DPNI_VLAN_SET_QUEUE_ACTION 1 + +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id, + uint8_t flags, + uint8_t tc_id, + uint8_t flow_id); + +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id); + +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + * @keep_hash_key: used only when miss_action is set to DPNI_FS_MISS_HASH. When + * set to one unclassified frames will be distributed according to previous + * used hash key. If set to zero hash key will be replaced with the key + * provided for flow steering. + * @keep_entries: if set to one command will not delete the entries that already + * exist into FS table. Use this option with caution: if the table + * entries are not compatible with the distribution key the packets + * will not be classified properly. + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + uint16_t default_flow_id; + char keep_hash_key; + uint8_t keep_entries; +}; + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + uint16_t dist_size; + enum dpni_dist_mode dist_mode; + uint64_t key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_dest_cfg - Structure representing DPNI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPNI_DEST_NONE' option + */ +struct dpni_dest_cfg { + enum dpni_dest dest_type; + int dest_id; + uint8_t priority; +}; + +/* DPNI congestion options */ + +/** + * CSCN message is written to message_iova once entering a + * congestion state (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 +/** + * CSCN message is written to message_iova once exiting a + * congestion state (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 +/** + * CSCN write will attempt to allocate into a cache (coherent write); + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected + */ +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once entering a congestion state + * (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once exiting a congestion state + * (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) + */ +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 +/** + * This congestion will trigger flow control or priority flow control. This + * will have effect only if flow control is enabled with dpni_set_link_cfg() + */ +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set congestion per queue, identified by QUEUE_TYPE, TC + * and QUEUE_INDEX + * @DPNI_CP_GROUP: Set congestion per queue group. Depending on options + * used to define the DPNI this can be either per + * TC (default) or per interface + * (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + * @DPNI_CP_CONGESTION_GROUP: Set per congestion group id. This will work + * only if the DPNI is created with DPNI_OPT_CUSTOM_CG option + */ + +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, + DPNI_CP_CONGESTION_GROUP, +}; + +/** + * struct dpni_congestion_notification_cfg - congestion notification + * configuration + * @units: units type + * @threshold_entry: above this threshold we enter a congestion state. + * set it to '0' to disable it + * @threshold_exit: below this threshold we exit the congestion state. + * @message_ctx: The context that will be part of the CSCN message + * @message_iova: I/O virtual address (must be in DMA-able memory), + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is + * contained in 'options' + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_' values + * @cg_point: Congestion point settings + * @cgid: id of the congestion group. The index is relative to dpni. + */ + +struct dpni_congestion_notification_cfg { + enum dpni_congestion_unit units; + uint32_t threshold_entry; + uint32_t threshold_exit; + uint64_t message_ctx; + uint64_t message_iova; + struct dpni_dest_cfg dest_cfg; + uint16_t notification_mode; + enum dpni_congestion_point cg_point; + int cgid; +}; + +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg); + +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg); + +/* DPNI FLC stash options */ + +/** + * stashes the whole annotation area (up to 192 bytes) + */ +#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 + +/** + * struct dpni_queue - Queue structure + * @destination - Destination structure + * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. + * Not relevant for Tx queues. + * @destination.type: May be one of the following: + * 0 - No destination, queue can be manually + * queried, but will not push traffic or + * notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic + * becomes available in the queue a FQDAN + * (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is + * associated with a DPCON object for the + * purpose of scheduling between multiple + * queues. The DPCON may be independently + * configured to generate notifications. + * Not relevant for Tx queues. + * @destination.hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are + * not affined to a single DPIO. + * @user_context: User data, presented to the user along with any frames + * from this queue. Not relevant for Tx queues. + * @flc: FD FLow Context structure + * @flc.value: Default FLC value for traffic dequeued from + * this queue. Please check description of FD + * structure for more information. + * Note that FLC values set using dpni_add_fs_entry, + * if any, take precedence over values per queue. + * @flc.stash_control: Boolean, indicates whether the 6 lowest + * - significant bits are used for stash control. + * significant bits are used for stash control. If set, the 6 + * least significant bits in value are interpreted as follows: + * - bits 0-1: indicates the number of 64 byte units of context + * that are stashed. FLC value is interpreted as a memory address + * in this case, excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame + * annotation to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame + * data to be stashed. Frame data is placed at FD[ADDR] + + * FD[OFFSET]. + * For more details check the Frame Descriptor section in the + * hardware documentation. + *@cgid :indicate the cgid to set relative to dpni + */ +struct dpni_queue { + struct { + uint16_t id; + enum dpni_dest type; + char hold_active; + uint8_t priority; + } destination; + uint64_t user_context; + struct { + uint64_t value; + char stash_control; + } flc; + int cgid; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this + * specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. + * Only relevant for Tx queues. + */ +struct dpni_queue_id { + uint32_t fqid; + uint16_t qdbin; +}; + +/** + * enum dpni_confirmation_mode - Defines DPNI options supported for Tx + * confirmation + * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is + * an affine Tx Confirmation queue + * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx + * confirmation queue + * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated + * with proper FD set-up to have buffers release to a Buffer Pool, otherwise + * buffers will be leaked + */ +enum dpni_confirmation_mode { + DPNI_CONF_AFFINE, + DPNI_CONF_SINGLE, + DPNI_CONF_DISABLE, +}; + +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode); + +int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode *mode); + +/** + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * key extractions to be used as the QoS criteria by calling + * dpkg_prepare_key_cfg() + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); + * '0' to use the 'default_tc' in such cases + * @keep_entries: if set to one will not delele existing table entries. This + * option will work properly only for dpni objects created with + * DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must + * be compatible with new key composition rule. + * It is the caller's job to delete incompatible entries before + * executing this function. + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 + */ +struct dpni_qos_tbl_cfg { + uint64_t key_cfg_iova; + int discard_on_miss; + int keep_entries; + uint8_t default_tc; +}; + +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_qos_tbl_cfg *cfg); + +/** + * struct dpni_rule_cfg - Rule configuration for table lookup + * @key_iova: I/O virtual address of the key (must be in DMA-able memory) + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) + * @key_size: key and mask size (in bytes) + */ +struct dpni_rule_cfg { + uint64_t key_iova; + uint64_t mask_iova; + uint8_t key_size; +}; + +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg, + uint8_t tc_id, + uint16_t index, + uint8_t flags, + uint8_t flow_id); + +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg); + +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. + */ + #define DPNI_FS_OPT_DISCARD 0x1 + +/** + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to + * override the FLC value set per queue. + * For more details check the Frame Descriptor section in the hardware + * documentation. + */ +#define DPNI_FS_OPT_SET_FLC 0x2 + +/* + * Indicates whether the 6 lowest significant bits of FLC are used for stash + * control. If set, the 6 least significant bits in value are interpreted as + * follows: + * - bits 0-1: indicates the number of 64 byte units of context that are + * stashed. FLC value is interpreted as a memory address in this case, + * excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame annotation + * to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame data to be + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. + */ +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 + +/** + * struct dpni_fs_action_cfg - Action configuration for table look-up + * @flc: FLC value for traffic matching this rule. Please check the Frame + * Descriptor section in the hardware documentation for more information. + * @flow_id: Identifies the Rx queue used for matching traffic. Supported + * values are in range 0 to num_queue-1. + * @options: Any combination of DPNI_FS_OPT_ values. + */ +struct dpni_fs_action_cfg { + uint64_t flc; + uint16_t flow_id; + uint16_t options; +}; + +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + uint16_t index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action); + +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rule_cfg *cfg); + +int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id); + +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Set queue destination configuration + */ +#define DPNI_QUEUE_OPT_DEST 0x00000002 + +/** + * Set FD[FLC] configuration for traffic on this queue. Note that FLC values + * set with dpni_add_fs_entry, if any, take precedence over values per queue. + */ +#define DPNI_QUEUE_OPT_FLC 0x00000004 + +/** + * Set the queue to hold active mode. This prevents the queue from being + * rescheduled between DPIOs while it carries traffic and is active on one + * DPNI. Can help reduce reordering when servicing one queue on multiple + * CPUs, but the queue is also less likely to push data to multiple CPUs + * especially when congested. + */ +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +#define DPNI_QUEUE_OPT_SET_CGID 0x00000040 +#define DPNI_QUEUE_OPT_CLEAR_CGID 0x00000080 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint16_t param, + union dpni_statistics *stat); + +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only + * supports byte units, this field is ignored and + * assumed = 0 if CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, + * THRESHOLD must be > 0 if the taildrop is + * enabled. + * @oal : Overhead Accounting Length, a 12-bit, 2's complement value + * with range (-2048 to +2047) representing a fixed per-frame + * overhead to be added to the actual length of a frame when + * performing WRED and tail drop calculations and threshold + * comparisons. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + uint32_t threshold; + int16_t oal; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); + +int dpni_set_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + uint8_t options, + struct opr_cfg *cfg); + +int dpni_get_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + struct opr_cfg *cfg, + struct opr_qry *qry); + +/** + * When used for queue_idx in function dpni_set_rx_dist_default_queue will + * signal to dpni to drop all unclassified frames + */ +#define DPNI_FS_MISS_DROP ((uint16_t)-1) + +/** + * struct dpni_rx_dist_cfg - distribution configuration + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8, + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448, + * 512,768,896,1024 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise + * it can be '0' + * @enable: enable/disable the distribution. + * @tc: TC id for which distribution is set + * @fs_miss_flow_id: when packet misses all rules from flow steering table and + * hash is disabled it will be put into this queue id; use + * DPNI_FS_MISS_DROP to drop frames. The value of this field is + * used only when flow steering distribution is enabled and hash + * distribution is disabled + */ +struct dpni_rx_dist_cfg { + uint16_t dist_size; + uint64_t key_cfg_iova; + uint8_t enable; + uint8_t tc; + uint16_t fs_miss_flow_id; +}; + +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg); + +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg); + +int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid); + +int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid); + +/** + * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID + * values used in current dpni object to detect 802.1q frames. + * @tpid1: first tag. Not used if zero. + * @tpid2: second tag. Not used if zero. + */ +struct dpni_custom_tpid_cfg { + uint16_t tpid1; + uint16_t tpid2; +}; + +int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, struct dpni_custom_tpid_cfg *tpid); + +/** + * enum dpni_soft_sequence_dest - Enumeration of WRIOP software sequence + * destinations + * @DPNI_SS_INGRESS: Ingress parser + * @DPNI_SS_EGRESS: Egress parser + */ +enum dpni_soft_sequence_dest { + DPNI_SS_INGRESS = 0, + DPNI_SS_EGRESS = 1, +}; + +/** + * struct dpni_load_ss_cfg - Structure for Software Sequence load configuration + * @dest: Destination of the Software Sequence: ingress or egress parser + * @ss_size: Size of the Software Sequence + * @ss_offset: The offset where to load the Software Sequence (0x20-0x7FD) + * @ss_iova: I/O virtual address of the Software Sequence + */ +struct dpni_load_ss_cfg { + enum dpni_soft_sequence_dest dest; + uint16_t ss_size; + uint16_t ss_offset; + uint64_t ss_iova; +}; + +/** + * struct dpni_enable_ss_cfg - Structure for software sequence enable + * configuration + * @dest: Destination of the Software Sequence: ingress or egress parser + * @hxs: HXS to attach the software sequence to + * @set_start: If the Software Sequence or HDR it is attached to is set as + * parser start + * If hxs=DUMMY_LAST_HXS the ss_offset is set directly as parser + * start else the hdr index code is set as parser start + * @ss_offset: The offset of the Software Sequence to enable or set as parse + * start + * @param_size: Size of the software sequence parameters + * @param_offset: Offset in the parameter zone for the software sequence + * parameters + * @param_iova: I/O virtual address of the parameters + */ +struct dpni_enable_ss_cfg { + enum dpni_soft_sequence_dest dest; + uint16_t hxs; + uint8_t set_start; + uint16_t ss_offset; + uint8_t param_size; + uint8_t param_offset; + uint64_t param_iova; +}; + +/** + * dpni_load_sw_sequence() - Loads a software sequence in parser memory. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Software sequence load configuration + * Return: '0' on Success; Error code otherwise. + */ +int dpni_load_sw_sequence(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_load_ss_cfg *cfg); + +/** + * dpni_eanble_sw_sequence() - Enables a software sequence in the parser + * profile + * corresponding to the ingress or egress of the DPNI. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Software sequence enable configuration + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_enable_ss_cfg *cfg); + +/** + * struct dpni_sw_sequence_layout - Structure for software sequence enable + * configuration + * @num_ss: Number of software sequences returned + * @ss: Array of software sequence entries. The number of valid entries + * must match 'num_ss' value + */ +struct dpni_sw_sequence_layout { + uint8_t num_ss; + struct { + uint16_t ss_offset; + uint16_t ss_size; + uint8_t param_offset; + uint8_t param_size; + } ss[DPNI_SW_SEQUENCE_LAYOUT_SIZE]; +}; + +/** + * dpni_get_sw_sequence_layout() - Get the soft sequence layout + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @src: Source of the layout (WRIOP Rx or Tx) + * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory + * + * warning: After calling this function, call dpni_extract_sw_sequence_layout() + * to get the layout + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_soft_sequence_dest src, + uint64_t ss_layout_iova); + +/** + * dpni_extract_sw_sequence_layout() - extract the software sequence layout + * @layout: software sequence layout + * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it + * to DMA + * + * This function has to be called after dpni_get_sw_sequence_layout + * + */ +void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout, + const uint8_t *sw_sequence_layout_buf); + +#endif /* __FSL_DPNI_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h new file mode 100644 index 000000000..9e7376200 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h @@ -0,0 +1,858 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2019 NXP + * + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 13 + +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_VERSION_2 2 +#define DPNI_CMD_VERSION_3 3 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2) +#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3) + +/* Command IDs */ +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD_V3(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x981) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD_V3(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD_V2(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD_V2(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230) +#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231) +#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232) +#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V3(0x235) + +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD_V2(0x240) +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD_V2(0x241) +#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242) +#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243) +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D) +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V2(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265) + +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D) +#define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270) +#define DPNI_CMDID_ENABLE_SW_SEQUENCE DPNI_CMD(0x271) +#define DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT DPNI_CMD(0x272) +#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e) +#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f) +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) +#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275) +#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276) +#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpni_cmd_open { + uint32_t dpni_id; +}; + +struct dpni_cmd_create { + uint32_t options; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t mac_filter_entries; + uint8_t pad1; + uint8_t vlan_filter_entries; + uint8_t pad2; + uint8_t qos_entries; + uint8_t pad3; + uint16_t fs_entries; + uint8_t num_rx_tcs; + uint8_t pad4; + uint8_t num_cgs; +}; + +struct dpni_cmd_destroy { + uint32_t dpsw_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + uint16_t dpbp_id; + uint8_t priority_mask; + uint8_t pad; +}; + +struct dpni_cmd_set_pools { + uint8_t num_dpbp; + uint8_t backup_pool_mask; + uint8_t pad; + uint8_t pool_options; + struct dpni_cmd_pool pool[8]; + uint16_t buffer_size[8]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_enable { + uint8_t enable; + uint8_t pad[3]; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_enable { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_enable { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_mask { + uint32_t mask; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_mask { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_mask { + uint32_t mask; +}; + +struct dpni_cmd_get_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_status { + uint32_t status; +}; + +struct dpni_cmd_clear_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t mac_filter_entries; + uint8_t num_tx_tcs; + /* response word 1 */ + uint8_t vlan_filter_entries; + uint8_t pad1; + uint8_t qos_entries; + uint8_t pad2; + uint16_t fs_entries; + uint16_t pad3; + /* response word 2 */ + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; + uint8_t num_cgs; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + uint32_t errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + uint8_t flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 +#define DPNI_PASS_SWO_SHIFT 3 +#define DPNI_PASS_SWO_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + uint8_t qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + uint8_t pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* response word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + uint8_t qtype; + uint8_t pad0[3]; + uint16_t options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* cmd word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_offload { + uint8_t pad[3]; + uint8_t dpni_offload; + uint32_t config; +}; + +struct dpni_cmd_get_offload { + uint8_t pad[3]; + uint8_t dpni_offload; +}; + +struct dpni_rsp_get_offload { + uint32_t pad; + uint32_t config; +}; + +struct dpni_cmd_get_qdid { + uint8_t qtype; +}; + +struct dpni_rsp_get_qdid { + uint16_t qdid; +}; + +struct dpni_rsp_get_sp_info { + uint16_t spids[2]; +}; + +struct dpni_rsp_get_tx_data_offset { + uint16_t data_offset; +}; + +struct dpni_cmd_get_statistics { + uint8_t page_number; + uint16_t param; +}; + +struct dpni_rsp_get_statistics { + uint64_t counter[7]; +}; + +struct dpni_cmd_set_link_cfg { + uint64_t pad0; + uint32_t rate; + uint32_t pad1; + uint64_t options; + uint64_t advertising; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 +#define DPNI_STATE_VALID_SHIFT 1 +#define DPNI_STATE_VALID_SIZE 1 + +struct dpni_rsp_get_link_state { + uint32_t pad0; + /* from LSB: up:1 */ + uint8_t flags; + uint8_t pad1[3]; + uint32_t rate; + uint32_t pad2; + uint64_t options; + uint64_t supported; + uint64_t advertising; +}; + +struct dpni_cmd_set_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_multicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_unicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +#define DPNI_MAC_SET_QUEUE_ACTION 1 + +struct dpni_cmd_add_mac_addr { + uint8_t flags; + uint8_t pad; + uint8_t mac_addr[6]; + uint8_t tc_id; + uint8_t fq_id; +}; + +struct dpni_cmd_remove_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + uint8_t flags; +}; + +struct dpni_cmd_enable_vlan_filter { + /* only the LSB */ + uint8_t en; +}; + +#define DPNI_VLAN_SET_QUEUE_ACTION 1 + +struct dpni_cmd_vlan_id { + uint8_t flags; + uint8_t tc_id; + uint8_t flow_id; + uint8_t pad; + uint16_t vlan_id; +}; + +#define DPNI_SEPARATE_GRP_SHIFT 0 +#define DPNI_SEPARATE_GRP_SIZE 1 +#define DPNI_MODE_1_SHIFT 0 +#define DPNI_MODE_1_SIZE 4 +#define DPNI_MODE_2_SHIFT 4 +#define DPNI_MODE_2_SIZE 4 + +struct dpni_cmd_set_tx_priorities { + uint16_t flags; + uint8_t prio_group_A; + uint8_t prio_group_B; + uint32_t pad0; + uint8_t modes[4]; + uint32_t pad1; + uint64_t pad2; + uint16_t delta_bandwidth[8]; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 +#define DPNI_KEEP_HASH_KEY_SHIFT 7 +#define DPNI_KEEP_HASH_KEY_SIZE 1 +#define DPNI_KEEP_ENTRIES_SHIFT 6 +#define DPNI_KEEP_ENTRIES_SIZE 1 + +struct dpni_cmd_set_rx_tc_dist { + uint16_t dist_size; + uint8_t tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + uint8_t flags; + uint8_t pad0; + /* only the LSB */ + uint8_t keep_hash_key; + uint16_t default_flow_id; + uint64_t pad1[5]; + uint64_t key_cfg_iova; +}; + +struct dpni_cmd_get_queue { + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CGID_VALID_SHIFT 5 +#define DPNI_CGID_VALID_SIZE 1 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + uint64_t pad0; + /* response word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + /* From LSB: + * dest_type:4, pad:1, cgid_valid:1, flc_stash_ctrl:1, hold_active:1 + */ + uint8_t flags; + /* response word 2 */ + uint64_t flc; + /* response word 3 */ + uint64_t user_context; + /* response word 4 */ + uint32_t fqid; + uint16_t qdbin; + uint16_t pad2; + /* response word 5*/ + uint8_t cgid; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint8_t options; + uint32_t pad0; + /* cmd word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + uint8_t flags; + /* cmd word 2 */ + uint64_t flc; + /* cmd word 3 */ + uint64_t user_context; + /* cmd word 4 */ + uint8_t cgid; +}; + +#define DPNI_DISCARD_ON_MISS_SHIFT 0 +#define DPNI_DISCARD_ON_MISS_SIZE 1 +#define DPNI_KEEP_QOS_ENTRIES_SHIFT 1 +#define DPNI_KEEP_QOS_ENTRIES_SIZE 1 + +struct dpni_cmd_set_qos_table { + uint32_t pad; + uint8_t default_tc; + /* only the LSB */ + uint8_t discard_on_miss; + uint16_t pad1[21]; + uint64_t key_cfg_iova; +}; + +#define DPNI_QOS_OPT_SET_TC_ONLY 0x0 +#define DPNI_QOS_OPT_SET_FLOW_ID 0x1 + +struct dpni_cmd_add_qos_entry { + uint8_t flags; + uint8_t flow_id; + uint8_t tc_id; + uint8_t key_size; + uint16_t index; + uint16_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_remove_qos_entry { + uint8_t pad1[3]; + uint8_t key_size; + uint32_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_add_fs_entry { + uint16_t options; + uint8_t tc_id; + uint8_t key_size; + uint16_t index; + uint16_t flow_id; + uint64_t key_iova; + uint64_t mask_iova; + uint64_t flc; +}; + +struct dpni_cmd_remove_fs_entry { + uint16_t pad1; + uint8_t tc_id; + uint8_t key_size; + uint32_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_clear_fs_entries { + uint16_t pad; + uint8_t tc_id; +}; + +#define DPNI_DROP_ENABLE_SHIFT 0 +#define DPNI_DROP_ENABLE_SIZE 1 +#define DPNI_DROP_UNITS_SHIFT 2 +#define DPNI_DROP_UNITS_SIZE 2 + +struct dpni_early_drop { + /* from LSB: enable:1 units:2 */ + uint8_t flags; + uint8_t pad0[3]; + uint32_t pad1; + uint8_t green_drop_probability; + uint8_t pad2[7]; + uint64_t green_max_threshold; + uint64_t green_min_threshold; + uint64_t pad3; + uint8_t yellow_drop_probability; + uint8_t pad4[7]; + uint64_t yellow_max_threshold; + uint64_t yellow_min_threshold; + uint64_t pad5; + uint8_t red_drop_probability; + uint8_t pad6[7]; + uint64_t red_max_threshold; + uint64_t red_min_threshold; +}; + +struct dpni_cmd_early_drop { + uint8_t qtype; + uint8_t tc; + uint8_t pad[6]; + uint64_t early_drop_iova; +}; + +struct dpni_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; + +struct dpni_cmd_get_taildrop { + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + uint64_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +#define DPNI_OAL_LO_SHIFT 1 +#define DPNI_OAL_LO_SIZE 7 +#define DPNI_OAL_HI_SHIFT 0 +#define DPNI_OAL_HI_SIZE 5 + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint32_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +struct dpni_tx_confirmation_mode { + uint32_t pad; + uint8_t confirmation_mode; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CONG_UNITS_SHIFT 4 +#define DPNI_CONG_UNITS_SIZE 2 + +struct dpni_cmd_set_congestion_notification { + uint8_t qtype; + uint8_t tc; + uint8_t pad; + uint8_t congestion_point; + uint8_t cgid; + uint8_t pad2[3]; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +struct dpni_cmd_get_congestion_notification { + uint8_t qtype; + uint8_t tc; + uint8_t pad; + uint8_t congestion_point; + uint8_t cgid; +}; + +struct dpni_rsp_get_congestion_notification { + uint64_t pad; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +struct dpni_cmd_set_opr { + uint8_t pad0; + uint8_t tc_id; + uint8_t index; + uint8_t options; + uint8_t pad1[7]; + uint8_t oloe; + uint8_t oeane; + uint8_t olws; + uint8_t oa; + uint8_t oprrws; +}; + +struct dpni_cmd_get_opr { + uint8_t pad; + uint8_t tc_id; + uint8_t index; +}; + +#define DPNI_RIP_SHIFT 0 +#define DPNI_RIP_SIZE 1 +#define DPNI_OPR_ENABLE_SHIFT 1 +#define DPNI_OPR_ENABLE_SIZE 1 +#define DPNI_TSEQ_NLIS_SHIFT 0 +#define DPNI_TSEQ_NLIS_SIZE 1 +#define DPNI_HSEQ_NLIS_SHIFT 0 +#define DPNI_HSEQ_NLIS_SIZE 1 + +struct dpni_rsp_get_opr { + uint64_t pad0; + /* from LSB: rip:1 enable:1 */ + uint8_t flags; + uint16_t pad1; + uint8_t oloe; + uint8_t oeane; + uint8_t olws; + uint8_t oa; + uint8_t oprrws; + uint16_t nesn; + uint16_t pad8; + uint16_t ndsn; + uint16_t pad2; + uint16_t ea_tseq; + /* only the LSB */ + uint8_t tseq_nlis; + uint8_t pad3; + uint16_t ea_hseq; + /* only the LSB */ + uint8_t hseq_nlis; + uint8_t pad4; + uint16_t ea_hptr; + uint16_t pad5; + uint16_t ea_tptr; + uint16_t pad6; + uint16_t opr_vid; + uint16_t pad7; + uint16_t opr_id; +}; + +struct dpni_cmd_add_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_cmd_remove_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_rsp_get_custom_tpid { + uint16_t tpid1; + uint16_t tpid2; +}; + +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_fs_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc; + uint16_t miss_flow_id; + uint16_t pad1; + uint64_t key_cfg_iova; +}; + +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_hash_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc_id; + uint32_t pad; + uint64_t key_cfg_iova; +}; + +struct dpni_load_sw_sequence { + uint8_t dest; + uint8_t pad0[7]; + uint16_t ss_offset; + uint16_t pad1; + uint16_t ss_size; + uint16_t pad2; + uint64_t ss_iova; +}; + +struct dpni_enable_sw_sequence { + uint8_t dest; + uint8_t pad0[7]; + uint16_t ss_offset; + uint16_t hxs; + uint8_t set_start; + uint8_t pad1[3]; + uint8_t param_offset; + uint8_t pad2[3]; + uint8_t param_size; + uint8_t pad3[3]; + uint64_t param_iova; +}; + +struct dpni_get_sw_sequence_layout { + uint8_t src; + uint8_t pad0[7]; + uint64_t layout_iova; +}; + +struct dpni_sw_sequence_layout_entry { + uint16_t ss_offset; + uint16_t ss_size; + uint8_t param_offset; + uint8_t param_size; + uint16_t pad; +}; + +#pragma pack(pop) +#endif /* _FSL_DPNI_CMD_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc.h new file mode 100644 index 000000000..49edb5a05 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2019 NXP + */ +#ifndef __FSL_DPRTC_H +#define __FSL_DPRTC_H + +/** @addtogroup dprtc Data Path Real Time Counter API + * Contains initialization APIs and runtime control APIs for RTC + * @{ + */ + +struct fsl_mc_io; + +int dprtc_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dprtc_id, + uint16_t *token); + +int dprtc_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dprtc_cfg - Structure representing DPRTC configuration + * @options: place holder + */ +struct dprtc_cfg { + uint32_t options; +}; + +int dprtc_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dprtc_cfg *cfg, + uint32_t *obj_id); + +int dprtc_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dprtc_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dprtc_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dprtc_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dprtc_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int64_t offset); + +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t freq_compensation); + +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t *freq_compensation); + +int dprtc_get_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t *time); + +int dprtc_set_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time); + +int dprtc_set_alarm(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time); + +/** + * struct dprtc_attr - Structure representing DPRTC attributes + * @id: DPRTC object ID + */ +struct dprtc_attr { + int id; + int paddr; + int little_endian; +}; + +int dprtc_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dprtc_attr *attr); + +int dprtc_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +#endif /* __FSL_DPRTC_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h new file mode 100644 index 000000000..eca12ff5e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2019 NXP + */ +#include +#ifndef _FSL_DPRTC_CMD_H +#define _FSL_DPRTC_CMD_H + +/* DPRTC Version */ +#define DPRTC_VER_MAJOR 2 +#define DPRTC_VER_MINOR 1 + +/* Command versioning */ +#define DPRTC_CMD_BASE_VERSION 1 +#define DPRTC_CMD_ID_OFFSET 4 + +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910) +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990) +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10) + +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002) +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003) +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004) +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005) +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006) + +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0) +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5) +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6) +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7) +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8) +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9) +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPRTC_MASK(field) \ + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \ + DPRTC_##field##_SHIFT) +#define dprtc_get_field(var, field) \ + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT) + +#pragma pack(push, 1) +struct dprtc_cmd_open { + uint32_t dprtc_id; +}; + +struct dprtc_cmd_destroy { + uint32_t object_id; +}; + +#define DPRTC_ENABLE_SHIFT 0 +#define DPRTC_ENABLE_SIZE 1 +#define DPRTC_ENDIANNESS_SHIFT 0 +#define DPRTC_ENDIANNESS_SIZE 1 + +struct dprtc_rsp_is_enabled { + uint8_t en; +}; + +struct dprtc_rsp_get_attributes { + uint32_t paddr; + uint32_t id; + uint8_t little_endian; +}; + +struct dprtc_cmd_set_clock_offset { + uint64_t offset; +}; + +struct dprtc_get_freq_compensation { + uint32_t freq_compensation; +}; + +struct dprtc_time { + uint64_t time; +}; + +struct dprtc_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; +#pragma pack(pop) +#endif /* _FSL_DPRTC_CMD_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h new file mode 100644 index 000000000..3eaad2f71 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2015 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + * + */ + +#ifndef __FSL_NET_H +#define __FSL_NET_H + +#define LAST_HDR_INDEX 0xFFFFFFFF + +/*****************************************************************************/ +/* Protocol fields */ +/*****************************************************************************/ + +/************************* Ethernet fields *********************************/ +#define NH_FLD_ETH_DA (1) +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) + +#define NH_FLD_ETH_ADDR_SIZE 6 + +/*************************** VLAN fields ***********************************/ +#define NH_FLD_VLAN_VPRI (1) +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/************************ IP (generic) fields ******************************/ +#define NH_FLD_IP_VER (1) +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) + +#define NH_FLD_IP_PROTO_SIZE 1 + +/***************************** IPV4 fields *********************************/ +#define NH_FLD_IPV4_VER (1) +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) + +#define NH_FLD_IPV4_ADDR_SIZE 4 +#define NH_FLD_IPV4_PROTO_SIZE 1 + +/***************************** IPV6 fields *********************************/ +#define NH_FLD_IPV6_VER (1) +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) + +#define NH_FLD_IPV6_ADDR_SIZE 16 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 + +/***************************** ICMP fields *********************************/ +#define NH_FLD_ICMP_TYPE (1) +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) + +#define NH_FLD_ICMP_CODE_SIZE 1 +#define NH_FLD_ICMP_TYPE_SIZE 1 + +/***************************** IGMP fields *********************************/ +#define NH_FLD_IGMP_VERSION (1) +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) + +/***************************** TCP fields **********************************/ +#define NH_FLD_TCP_PORT_SRC (1) +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) + +#define NH_FLD_TCP_PORT_SIZE 2 + +/***************************** UDP fields **********************************/ +#define NH_FLD_UDP_PORT_SRC (1) +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) + +#define NH_FLD_UDP_PORT_SIZE 2 + +/*************************** UDP-lite fields *******************************/ +#define NH_FLD_UDP_LITE_PORT_SRC (1) +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) +#define NH_FLD_UDP_LITE_ALL_FIELDS \ + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) + +#define NH_FLD_UDP_LITE_PORT_SIZE 2 + +/*************************** UDP-encap-ESP fields **************************/ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) + +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_PORT_SRC (1) +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) + +#define NH_FLD_SCTP_PORT_SIZE 2 + +/***************************** DCCP fields *********************************/ +#define NH_FLD_DCCP_PORT_SRC (1) +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) + +#define NH_FLD_DCCP_PORT_SIZE 2 + +/***************************** IPHC fields *********************************/ +#define NH_FLD_IPHC_CID (1) +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) + +/*************************** L2TPV2 fields *********************************/ +#define NH_FLD_L2TPV2_TYPE_BIT (1) +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) +#define NH_FLD_L2TPV2_ALL_FIELDS \ + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) + +/*************************** L2TPV3 fields *********************************/ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) + +/**************************** PPP fields ***********************************/ +#define NH_FLD_PPP_PID (1) +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) + +/************************** PPPoE fields ***********************************/ +#define NH_FLD_PPPOE_VER (1) +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) + +/************************* PPP-Mux fields **********************************/ +#define NH_FLD_PPPMUX_PID (1) +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) + +/*********************** PPP-Mux sub-frame fields **************************/ +#define NH_FLD_PPPMUX_SUBFRM_PFF (1) +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) + +/*************************** LLC fields ************************************/ +#define NH_FLD_LLC_DSAP (1) +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) + +/*************************** NLPID fields **********************************/ +#define NH_FLD_NLPID_NLPID (1) +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) + +/*************************** SNAP fields ***********************************/ +#define NH_FLD_SNAP_OUI (1) +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) + +/*************************** LLC SNAP fields *******************************/ +#define NH_FLD_LLC_SNAP_TYPE (1) +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) + +#define NH_FLD_ARP_HTYPE (1) +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) + +/*************************** RFC2684 fields ********************************/ +#define NH_FLD_RFC2684_LLC (1) +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) + +/*************************** User defined fields ***************************/ +#define NH_FLD_USER_DEFINED_SRCPORT (1) +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS \ + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) + +/*************************** Payload fields ********************************/ +#define NH_FLD_PAYLOAD_BUFFER (1) +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) + +/*************************** GRE fields ************************************/ +#define NH_FLD_GRE_TYPE (1) +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) + +/*************************** MINENCAP fields *******************************/ +#define NH_FLD_MINENCAP_SRC_IP (1) +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) +#define NH_FLD_MINENCAP_ALL_FIELDS \ + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) + +/*************************** IPSEC AH fields *******************************/ +#define NH_FLD_IPSEC_AH_SPI (1) +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) + +/*************************** IPSEC ESP fields ******************************/ +#define NH_FLD_IPSEC_ESP_SPI (1) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) + +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 + +/*************************** MPLS fields ***********************************/ +#define NH_FLD_MPLS_LABEL_STACK (1) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) + +/*************************** MACSEC fields *********************************/ +#define NH_FLD_MACSEC_SECTAG (1) +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) + +/*************************** GTP fields ************************************/ +#define NH_FLD_GTP_TEID (1) + +/* Protocol options */ + +/* Ethernet options */ +#define NH_OPT_ETH_BROADCAST 1 +#define NH_OPT_ETH_MULTICAST 2 +#define NH_OPT_ETH_UNICAST 3 +#define NH_OPT_ETH_BPDU 4 + +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) +/* also applicable for broadcast */ + +/* VLAN options */ +#define NH_OPT_VLAN_CFI 1 + +/* IPV4 options */ +#define NH_OPT_IPV4_UNICAST 1 +#define NH_OPT_IPV4_MULTICAST 2 +#define NH_OPT_IPV4_BROADCAST 3 +#define NH_OPT_IPV4_OPTION 4 +#define NH_OPT_IPV4_FRAG 5 +#define NH_OPT_IPV4_INITIAL_FRAG 6 + +/* IPV6 options */ +#define NH_OPT_IPV6_UNICAST 1 +#define NH_OPT_IPV6_MULTICAST 2 +#define NH_OPT_IPV6_OPTION 3 +#define NH_OPT_IPV6_FRAG 4 +#define NH_OPT_IPV6_INITIAL_FRAG 5 + +/* General IP options (may be used for any version) */ +#define NH_OPT_IP_FRAG 1 +#define NH_OPT_IP_INITIAL_FRAG 2 +#define NH_OPT_IP_OPTION 3 + +/* Minenc. options */ +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 + +/* GRE. options */ +#define NH_OPT_GRE_ROUTING_PRESENT 1 + +/* TCP options */ +#define NH_OPT_TCP_OPTIONS 1 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3 + +/* CAPWAP options */ +#define NH_OPT_CAPWAP_DTLS 1 + +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/*! IEEE8021.Q */ +#define NH_IEEE8021Q_ETYPE 0x8100 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ + ((((uint32_t)(etype & 0xFFFF)) << 16) | \ + (((uint32_t)(pcp & 0x07)) << 13) | \ + (((uint32_t)(dei & 0x01)) << 12) | \ + (((uint32_t)(vlan_id & 0xFFF)))) + +#endif /* __FSL_NET_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/meson.build b/src/spdk/dpdk/drivers/net/dpaa2/meson.build new file mode 100644 index 000000000..6dd0eb274 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/meson.build @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if not is_linux + build = false + reason = 'only supported on linux' +endif + +deps += ['mempool_dpaa2'] +sources = files('base/dpaa2_hw_dpni.c', + 'dpaa2_mux.c', + 'dpaa2_ethdev.c', + 'dpaa2_flow.c', + 'dpaa2_rxtx.c', + 'dpaa2_sparser.c', + 'mc/dpkg.c', + 'mc/dpdmux.c', + 'mc/dpni.c') + +if dpdk_conf.has('RTE_LIBRTE_IEEE1588') + sources += files('mc/dprtc.c') + sources += files('dpaa2_ptp.c') +endif + +includes += include_directories('base', 'mc') + +install_headers('rte_pmd_dpaa2.h') diff --git a/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h new file mode 100644 index 000000000..ca7bf7d46 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _RTE_PMD_DPAA2_H +#define _RTE_PMD_DPAA2_H + +/** + * @file rte_pmd_dpaa2.h + * + * NXP dpaa2 PMD specific functions. + * + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + */ + +#include + +enum pmd_dpaa2_ts { + PMD_DPAA2_DISABLE_TS, + PMD_DPAA2_ENABLE_TS +}; + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable/Disable timestamping update in mbuf for LX2160 kind of devices. + * For LS2088/LS1088 devices, timestamping will be updated in mbuf without + * calling this API. + * + * @param pmd_dpaa2_ts + * Enum to enable/disable timestamp update in mbuf for LX2160 devices. + */ +__rte_experimental +void rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Create a flow rule to demultiplex ethernet traffic to separate network + * interfaces. + * + * @param dpdmux_id + * ID of the DPDMUX MC object. + * @param[in] pattern + * Pattern specification. + * @param[in] actions + * Associated actions. + * + * @return + * A valid handle in case of success, NULL otherwise. + */ +__rte_experimental +struct rte_flow * +rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + struct rte_flow_item *pattern[], + struct rte_flow_action *actions[]); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Create a custom hash key on basis of offset of start of packet and size. + * for e.g. if we need GRE packets (non-vlan and without any extra headers) + * to be hashed on basis of inner IP header, we will provide offset as: + * 14 (eth) + 20 (IP) + 4 (GRE) + 12 (Inner Src offset) = 50 and size + * as 8 bytes. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param offset + * Offset from the start of packet which needs to be included to + * calculate hash + * @param size + * Size of the hash input key + * + * @return + * - 0 if successful. + * - Negative in case of failure. + */ +__rte_experimental +int +rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, + uint16_t offset, + uint8_t size); + +#endif /* _RTE_PMD_DPAA2_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map new file mode 100644 index 000000000..b633fdc2a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map @@ -0,0 +1,18 @@ +DPDK_20.0 { + local: *; +}; + +EXPERIMENTAL { + global: + + rte_pmd_dpaa2_mux_flow_create; + rte_pmd_dpaa2_set_custom_hash; + rte_pmd_dpaa2_set_timestamp; +}; + +INTERNAL { + global: + + dpaa2_eth_eventq_attach; + dpaa2_eth_eventq_detach; +}; diff --git a/src/spdk/dpdk/drivers/net/e1000/Makefile b/src/spdk/dpdk/drivers/net/e1000/Makefile new file mode 100644 index 000000000..9fb038cf0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/Makefile @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_e1000.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_e1000_version.map + +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181 +CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259 +else +# +# CFLAGS for gcc/clang +# +CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-unused-variable +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-misleading-indentation +ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough +endif +endif +endif +endif + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_80003es2lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82540.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82541.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82542.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82543.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82571.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/e1000/base/README b/src/spdk/dpdk/drivers/net/e1000/base/README new file mode 100644 index 000000000..56738d001 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/README @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +This directory contains source code of FreeBSD em & igb drivers of version +cid-shared-code.2016.11.22 released by ND. The sub-directory of base/ +contains the original source package. + +This driver is valid for the product(s) listed below +* Intel® Ethernet Controller 82540 +* Intel® Ethernet Controller 82545 Series +* Intel® Ethernet Controller 82546 Series +* Intel® Ethernet Controller 82571 Series +* Intel® Ethernet Controller 82572 Series +* Intel® Ethernet Controller 82573 +* Intel® Ethernet Controller 82574 +* Intel® Ethernet Controller 82583 +* Intel® Ethernet Controller I217 Series +* Intel® Ethernet Controller I218 Series +* Intel® Ethernet Controller I219 Series +* Intel® Ethernet Controller 82576 Series +* Intel® Ethernet Controller 82575 Series +* Intel® Ethernet Controller 82580 Series +* Intel® Ethernet Controller I350 Series +* Intel® Ethernet Controller I210 Series +* Intel® Ethernet Controller I211 +* Intel® Ethernet Controller I354 Series +* Intel® Ethernet Controller DH89XXCC Series + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + e1000_osdep.c + e1000_osdep.h diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c new file mode 100644 index 000000000..bcfda9415 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.c @@ -0,0 +1,1496 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000_api.h" + +STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 *data); +STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 data); +STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +STATIC const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_gg82563_cable_length_table) / \ + sizeof(e1000_gg82563_cable_length_table[0])) + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_80003es2lan"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return E1000_SUCCESS; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + phy->ops.acquire = e1000_acquire_phy_80003es2lan; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.release = e1000_release_phy_80003es2lan; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan; + phy->ops.get_cable_length = e1000_get_cable_length_80003es2lan; + phy->ops.read_reg = e1000_read_phy_reg_gg82563_80003es2lan; + phy->ops.write_reg = e1000_write_phy_reg_gg82563_80003es2lan; + + phy->ops.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_80003es2lan"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_80003es2lan; + nvm->ops.read = e1000_read_nvm_eerd; + nvm->ops.release = e1000_release_nvm_80003es2lan; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_80003es2lan; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_80003es2lan"); + + /* Set media type and media-dependent function pointers */ + switch (hw->device_id) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000_check_for_serdes_link_generic; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_generic; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_MODE_MASK); + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_80003es2lan; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_80003es2lan; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_80003es2lan"); + + hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan; + hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan; + hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +STATIC s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_mac_csr_80003es2lan"); + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +STATIC void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_mac_csr_80003es2lan"); + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_80003es2lan"); + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000_acquire_nvm_generic(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_80003es2lan"); + + e1000_release_nvm_generic(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_80003es2lan"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usec_delay(200); + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + } else { + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usec_delay(200); + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + } else { + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + DEBUGFUNC("e1000_write_nvm_80003es2lan"); + + return e1000_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_80003es2lan"); + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("GG82563 PSCR: %X\n", phy_data); + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (hw->phy.autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on GG82563 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_80003es2lan"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_80003es2lan"); + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 kum_reg_data; + + DEBUGFUNC("e1000_reset_hw_80003es2lan"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return e1000_check_alt_mac_addr_generic(hw); +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + DEBUGFUNC("e1000_init_hw_80003es2lan"); + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = E1000_READ_REG(hw, E1000_TCTL); + reg_data |= E1000_TCTL_RTLC; + E1000_WRITE_REG(hw, E1000_TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = E1000_READ_REG(hw, E1000_TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec._80003es2lan.mdic_wa_enable = true; + + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec._80003es2lan.mdic_wa_enable = false; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan"); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + + return; +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +STATIC s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 reg; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan"); + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!hw->mac.ops.check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, + data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_80003es2lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 speed; + u16 duplex; + + DEBUGFUNC("e1000_configure_on_link_up"); + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + DEBUGFUNC("e1000_configure_kmrn_for_10_100"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + do { + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + DEBUGFUNC("e1000_configure_kmrn_for_1000"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + do { + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan"); + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan"); + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_80003es2lan"); + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h new file mode 100644 index 000000000..f77beb253 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_80003es2lan.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_80003ES2LAN_H_ +#define _E1000_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c new file mode 100644 index 000000000..0d14f6e07 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82540.c @@ -0,0 +1,688 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* + * 82540EM Gigabit Ethernet Controller + * 82540EP Gigabit Ethernet Controller + * 82545EM Gigabit Ethernet Controller (Copper) + * 82545EM Gigabit Ethernet Controller (Fiber) + * 82545GM Gigabit Ethernet Controller + * 82546EB Gigabit Ethernet Controller (Copper) + * 82546EB Gigabit Ethernet Controller (Fiber) + * 82546GB Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw); +STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw); +STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82540(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82540 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.read_reg = e1000_read_phy_reg_m88; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.write_reg = e1000_write_phy_reg_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82540; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (phy->id == M88E1011_I_PHY_ID) + break; + /* Fall Through */ + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82540 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_init_nvm_params_82540"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + switch (nvm->override) { + case e1000_nvm_override_microwire_large: + nvm->address_bits = 8; + nvm->word_size = 256; + break; + case e1000_nvm_override_microwire_small: + nvm->address_bits = 6; + nvm->word_size = 64; + break; + default: + nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6; + nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64; + break; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82540 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82540"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82540; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82540; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82540 + : e1000_setup_fiber_serdes_link_82540; + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + break; + case e1000_media_type_fiber: + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + break; + case e1000_media_type_internal_serdes: + mac->ops.check_for_link = e1000_check_for_serdes_link_generic; + break; + default: + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + /* link info */ + mac->ops.get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82540; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82540; + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_82540 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82540(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82540"); + + hw->mac.ops.init_params = e1000_init_mac_params_82540; + hw->nvm.ops.init_params = e1000_init_nvm_params_82540; + hw->phy.ops.init_params = e1000_init_phy_params_82540; +} + +/** + * e1000_reset_hw_82540 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw) +{ + u32 ctrl, manc; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82540"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n"); + switch (hw->mac.type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST); + break; + default: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for EEPROM reload */ + msec_delay(5); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82540 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txdctl, ctrl_ext; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_82540"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + if (mac->type < e1000_82545_rev_3) + E1000_WRITE_REG(hw, E1000_VET, 0); + + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. The *_rev_3 hardware at + * least doesn't respond correctly to every other dword in an + * MWB to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + if (mac->type < e1000_82545_rev_3) + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82540(hw); + + if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) || + (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* + * Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_setup_copper_link_82540 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_setup_copper_link_82540"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_set_phy_mode_82540(hw); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_82545_rev_3 || + hw->mac.type == e1000_82546_rev_3) { + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &data); + if (ret_val) + goto out; + data |= 0x00000008; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + data); + if (ret_val) + goto out; + } + + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Set the output amplitude to the value in the EEPROM and adjust the VCO + * speed to improve Bit Error Rate (BER) performance. Configures collision + * distance and flow control for fiber and serdes links. Upon successful + * setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_82540"); + + switch (mac->type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + /* + * If we're on serdes media, adjust the output + * amplitude to value set in the EEPROM. + */ + ret_val = e1000_adjust_serdes_amplitude_82540(hw); + if (ret_val) + goto out; + } + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed_82540(hw); + if (ret_val) + goto out; + default: + break; + } + + ret_val = e1000_setup_fiber_serdes_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM + * @hw: pointer to the HW structure + * + * Adjust the SERDES output amplitude based on the EEPROM settings. + **/ +STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_adjust_serdes_amplitude_82540"); + + ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data); + if (ret_val) + goto out; + + if (nvm_data != NVM_RESERVED_WORD) { + /* Adjust serdes output amplitude only. */ + nvm_data &= NVM_SERDES_AMPLITUDE_MASK; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_EXT_CTRL, + nvm_data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_set_vco_speed_82540 - Set VCO speed for better performance + * @hw: pointer to the HW structure + * + * Set the VCO speed to improve Bit Error Rate (BER) performance. + **/ +STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + DEBUGFUNC("e1000_set_vco_speed_82540"); + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, + default_page); + +out: + return ret_val; +} + +/** + * e1000_set_phy_mode_82540 - Set PHY to class A mode + * @hw: pointer to the HW structure + * + * Sets the PHY to class A mode and assumes the following operations will + * follow to enable the new class mode: + * 1. Do a PHY soft reset. + * 2. Restart auto-negotiation or force link. + **/ +STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_phy_mode_82540"); + + if (hw->mac.type != e1000_82545_rev_3) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) { + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + } + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82540"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); +} + +/** + * e1000_read_mac_addr_82540 - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + * + * This version is being used over generic because of customer issues + * with VmWare and Virtual Box when using generic. It seems in + * the emulated 82545, RAR[0] does NOT have a valid address after a + * reset, this older method works and using this breaks nothing for + * these legacy adapters. + **/ +s32 e1000_read_mac_addr_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return ret_val; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c new file mode 100644 index 000000000..eb80873f5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.c @@ -0,0 +1,1239 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* + * 82541EI Gigabit Ethernet Controller + * 82541ER Gigabit Ethernet Controller + * 82541GI Gigabit Ethernet Controller + * 82541PI Gigabit Ethernet Controller + * 82547EI Gigabit Ethernet Controller + * 82547GI Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw); +STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw); +STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw); +STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw); +STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up); +STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw); + +STATIC const u16 e1000_igp_cable_length_table[] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, + 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, + 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120, + 120, 120, 120, 120, 120, 120, 120, 120}; +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_cable_length_table) / \ + sizeof(e1000_igp_cable_length_table[0])) + +/** + * e1000_init_phy_params_82541 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_82541"); + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_igp; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_82541; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.reset = e1000_phy_hw_reset_82541; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82541; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + if (phy->id != IGP01E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82541 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = E1000_SUCCESS; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82541"); + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->type = e1000_nvm_eeprom_spi; + eecd |= E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_spi_small: + nvm->type = e1000_nvm_eeprom_spi; + eecd &= ~E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_microwire_large: + nvm->type = e1000_nvm_eeprom_microwire; + eecd |= E1000_EECD_SIZE; + break; + case e1000_nvm_override_microwire_small: + nvm->type = e1000_nvm_eeprom_microwire; + eecd &= ~E1000_EECD_SIZE; + break; + default: + nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi + : e1000_nvm_eeprom_microwire; + break; + } + + if (nvm->type == e1000_nvm_eeprom_spi) { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8; + nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_spi; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_spi; + + /* + * nvm->word_size must be discovered after the pointers + * are set so we can verify the size from the nvm image + * itself. Temporarily set it to a dummy value so the + * read will work. + */ + nvm->word_size = 64; + ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size); + if (ret_val) + goto out; + size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT; + /* + * if size != 0, it can be added to a constant and become + * the left-shift value to set the word_size. Otherwise, + * word_size stays at 64. + */ + if (size) { + size += NVM_WORD_SIZE_BASE_SHIFT_82541; + nvm->word_size = 1 << size; + } + } else { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + } + +out: + return ret_val; +} + +/** + * e1000_init_mac_params_82541 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82541"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_copper; + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + + /* Function Pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82541; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82541; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = e1000_setup_copper_link_82541; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82541; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82541; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_82541; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_82541; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82541 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82541(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82541"); + + hw->mac.ops.init_params = e1000_init_mac_params_82541; + hw->nvm.ops.init_params = e1000_init_nvm_params_82541; + hw->phy.ops.init_params = e1000_init_phy_params_82541; +} + +/** + * e1000_reset_hw_82541 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw) +{ + u32 ledctl, ctrl, manc; + + DEBUGFUNC("e1000_reset_hw_82541"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Must reset the Phy before resetting the MAC */ + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n"); + switch (hw->mac.type) { + case e1000_82541: + case e1000_82541_rev_2: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + default: + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for NVM reload */ + msec_delay(20); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + e1000_phy_init_script_82541(hw); + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + + /* Once again, mask the interrupts */ + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + /* Clear any pending interrupt events. */ + E1000_READ_REG(hw, E1000_ICR); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_82541 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + u32 i, txdctl; + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_82541"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Storing the Speed Power Down value for later use */ + ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, + &dev_spec->spd_default); + if (ret_val) + goto out; + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82541(hw); + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_82541 - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_get_link_up_info_82541"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + goto out; + + if (!phy->speed_downgraded) + goto out; + + /* + * IGP01 PHY may advertise full duplex operation after speed + * downgrade even if it is operating at half duplex. + * Here we set the duplex settings to match the duplex in the + * link partner's capabilities. + */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data); + if (ret_val) + goto out; + + if (!(data & NWAY_ER_LP_NWAY_CAPS)) { + *duplex = HALF_DUPLEX; + } else { + ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data); + if (ret_val) + goto out; + + if (*speed == SPEED_100) { + if (!(data & NWAY_LPAR_100TX_FD_CAPS)) + *duplex = HALF_DUPLEX; + } else if (*speed == SPEED_10) { + if (!(data & NWAY_LPAR_10T_FD_CAPS)) + *duplex = HALF_DUPLEX; + } + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82541 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ledctl; + + DEBUGFUNC("e1000_phy_hw_reset_82541"); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + e1000_phy_init_script_82541(hw); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82541 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + u32 ctrl, ledctl; + + DEBUGFUNC("e1000_setup_copper_link_82541"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + + /* Earlier revs of the IGP phy require us to force MDI. */ + if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) { + dev_spec->dsp_config = e1000_dsp_config_disabled; + phy->mdix = 1; + } else { + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + if (dev_spec->ffe_config == e1000_ffe_config_active) + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_check_for_link_82541 - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. + **/ +STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_link_82541"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + ret_val = e1000_config_dsp_after_link_change_82541(hw, false); + goto out; /* No link detected */ + } + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_config_dsp_after_link_change_82541(hw, true); + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * e1000_config_dsp_after_link_change_82541 - Config DSP after link + * @hw: pointer to the HW structure + * @link_up: boolean flag for link up status + * + * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS + * at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + **/ +STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + u32 idle_errs = 0; + u16 phy_data, phy_saved_data, speed, duplex, i; + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D}; + + DEBUGFUNC("e1000_config_dsp_after_link_change_82541"); + + if (link_up) { + ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (speed != SPEED_1000) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + if ((dev_spec->dsp_config == e1000_dsp_config_enabled) && + phy->min_cable_length >= 50) { + + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = phy->ops.write_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + dev_spec->dsp_config = e1000_dsp_config_activated; + } + + if ((dev_spec->ffe_config != e1000_ffe_config_enabled) || + (phy->min_cable_length >= 50)) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* clear previous idle error counts */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + usec_delay(1000); + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + goto out; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + dev_spec->ffe_config = e1000_ffe_config_active; + + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + goto out; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } else { + if (dev_spec->dsp_config == e1000_dsp_config_activated) { + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = phy->ops.read_reg(hw, 0x2F5B, + &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = phy->ops.write_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, + phy_saved_data); + if (ret_val) + goto out; + + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + if (dev_spec->ffe_config != e1000_ffe_config_active) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + goto out; + + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 i, data; + u16 cur_agc_value, agc_value = 0; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D}; + + DEBUGFUNC("e1000_get_cable_length_igp_82541"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data); + if (ret_val) + goto out; + + cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Bounds checking */ + if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + agc_value += cur_agc_value; + + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) { + agc_value -= min_agc_value; + /* Average the three remaining channels for the length. */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Average the channels for the length. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] > + IGP01E1000_AGC_RANGE) + ? (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) + : 0; + phy->max_cable_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82541"); + + switch (hw->mac.type) { + case e1000_82541_rev_2: + case e1000_82547_rev_2: + break; + default: + ret_val = e1000_set_d3_lplu_state_generic(hw, active); + goto out; + break; + } + + ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_setup_led_82541 - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + + DEBUGFUNC("e1000_setup_led_82541"); + + ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, + &dev_spec->spd_default); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(dev_spec->spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + +out: + return ret_val; +} + +/** + * e1000_cleanup_led_82541 - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + + DEBUGFUNC("e1000_cleanup_led_82541"); + + ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, + dev_spec->spd_default); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + +out: + return ret_val; +} + +/** + * e1000_phy_init_script_82541 - Initialize GbE PHY + * @hw: pointer to the HW structure + * + * Initializes the IGP PHY. + **/ +STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + u32 ret_val; + u16 phy_saved_data; + + DEBUGFUNC("e1000_phy_init_script_82541"); + + if (!dev_spec->phy_init_script) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Delay after phy reset to enable NVM configuration to load */ + msec_delay(20); + + /* + * Save off the current value of register 0x2F5B to be restored at + * the end of this routine. + */ + ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003); + + msec_delay(20); + + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + + msec_delay(5); + + switch (hw->mac.type) { + case e1000_82541: + case e1000_82547: + hw->phy.ops.write_reg(hw, 0x1F95, 0x0001); + + hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21); + + hw->phy.ops.write_reg(hw, 0x1F79, 0x0018); + + hw->phy.ops.write_reg(hw, 0x1F30, 0x1600); + + hw->phy.ops.write_reg(hw, 0x1F31, 0x0014); + + hw->phy.ops.write_reg(hw, 0x1F32, 0x161C); + + hw->phy.ops.write_reg(hw, 0x1F94, 0x0003); + + hw->phy.ops.write_reg(hw, 0x1F96, 0x003F); + + hw->phy.ops.write_reg(hw, 0x2010, 0x0008); + break; + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy.ops.write_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + hw->phy.ops.write_reg(hw, 0x0000, 0x3300); + + msec_delay(20); + + /* Now enable the transmitter */ + hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac.type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + hw->phy.ops.write_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + hw->phy.ops.write_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + +out: + return ret_val; +} + +/** + * e1000_init_script_state_82541 - Enable/Disable PHY init script + * @hw: pointer to the HW structure + * @state: boolean value used to enable/disable PHY init script + * + * Allows the driver to enable/disable the PHY init script, if the PHY is an + * IGP PHY. + **/ +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + + DEBUGFUNC("e1000_init_script_state_82541"); + + if (hw->phy.type != e1000_phy_igp) { + DEBUGOUT("Initialization script not necessary.\n"); + goto out; + } + + dev_spec->phy_init_script = state; + +out: + return; +} + +/** + * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82541"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h new file mode 100644 index 000000000..2f343f182 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82541.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_82541_H_ +#define _E1000_82541_H_ + +#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1) + +#define IGP01E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_DSP_RESET 0x1F33 + +#define IGP01E1000_PHY_DSP_FFE 0x1F35 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A + +#define IGP01E1000_IEEE_FORCE_GIG 0x0140 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 +#define IGP01E1000_AGC_RANGE 10 + +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + + +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state); +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c new file mode 100644 index 000000000..9351ed722 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82542.c @@ -0,0 +1,561 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* + * 82542 Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82542(struct e1000_hw *hw); +STATIC s32 e1000_led_off_82542(struct e1000_hw *hw); +STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82542 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82542"); + + phy->type = e1000_phy_none; + + return ret_val; +} + +/** + * e1000_init_nvm_params_82542 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_82542"); + + nvm->address_bits = 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + + /* Function Pointers */ + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_stop_nvm; + nvm->ops.write = e1000_write_nvm_microwire; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82542 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82542"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_fiber; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_82542; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82542; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82542; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82542; + /* phy/fiber/serdes setup */ + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_generic; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82542; + /* set RAR */ + mac->ops.rar_set = e1000_rar_set_82542; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_82542; + mac->ops.led_off = e1000_led_off_82542; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542; + /* link info */ + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82542 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82542"); + + hw->mac.ops.init_params = e1000_init_mac_params_82542; + hw->nvm.ops.init_params = e1000_init_nvm_params_82542; + hw->phy.ops.init_params = e1000_init_phy_params_82542; +} + +/** + * e1000_get_bus_info_82542 - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. + **/ +STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_bus_info_82542"); + + hw->bus.type = e1000_bus_type_pci; + hw->bus.speed = e1000_bus_speed_unknown; + hw->bus.width = e1000_bus_width_unknown; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_82542 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + + DEBUGFUNC("e1000_reset_hw_82542"); + + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2\n"); + e1000_pci_clear_mwi(hw); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + hw->nvm.ops.reload(hw); + msec_delay(2); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + if (hw->revision_id == E1000_REVISION_2) { + if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return ret_val; +} + +/** + * e1000_init_hw_82542 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + u16 i; + + DEBUGFUNC("e1000_init_hw_82542"); + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + mac->ops.clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->revision_id == E1000_REVISION_2) { + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82542(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82542(hw); + + return ret_val; +} + +/** + * e1000_setup_link_82542 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_82542"); + + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + + hw->fc.requested_mode &= ~e1000_fc_tx_pause; + + if (mac->report_tx_early) + hw->fc.requested_mode &= ~e1000_fc_rx_pause; + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing Flow Control address, type and timer regs\n"); + + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_led_on_82542 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. + **/ +STATIC s32 e1000_led_on_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82542"); + + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82542 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. + **/ +STATIC s32 e1000_led_off_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82542"); + + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_82542 - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_82542"); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); + + return E1000_SUCCESS; +} + +/** + * e1000_translate_register_82542 - Translate the proper register offset + * @reg: e1000 register to be read + * + * Registers in 82542 are located in different offsets than other adapters + * even though they function in the same manner. This function takes in + * the name of the register to read and returns the correct offset for + * 82542 silicon. + **/ +u32 e1000_translate_register_82542(u32 reg) +{ + /* + * Some of the 82542 registers are located at different + * offsets than they are in newer adapters. + * Despite the difference in location, the registers + * function in the same manner. + */ + switch (reg) { + case E1000_RA: + reg = 0x00040; + break; + case E1000_RDTR: + reg = 0x00108; + break; + case E1000_RDBAL(0): + reg = 0x00110; + break; + case E1000_RDBAH(0): + reg = 0x00114; + break; + case E1000_RDLEN(0): + reg = 0x00118; + break; + case E1000_RDH(0): + reg = 0x00120; + break; + case E1000_RDT(0): + reg = 0x00128; + break; + case E1000_RDBAL(1): + reg = 0x00138; + break; + case E1000_RDBAH(1): + reg = 0x0013C; + break; + case E1000_RDLEN(1): + reg = 0x00140; + break; + case E1000_RDH(1): + reg = 0x00148; + break; + case E1000_RDT(1): + reg = 0x00150; + break; + case E1000_FCRTH: + reg = 0x00160; + break; + case E1000_FCRTL: + reg = 0x00168; + break; + case E1000_MTA: + reg = 0x00200; + break; + case E1000_TDBAL(0): + reg = 0x00420; + break; + case E1000_TDBAH(0): + reg = 0x00424; + break; + case E1000_TDLEN(0): + reg = 0x00428; + break; + case E1000_TDH(0): + reg = 0x00430; + break; + case E1000_TDT(0): + reg = 0x00438; + break; + case E1000_TIDV: + reg = 0x00440; + break; + case E1000_VFTA: + reg = 0x00600; + break; + case E1000_TDFH: + reg = 0x08010; + break; + case E1000_TDFT: + reg = 0x08018; + break; + default: + break; + } + + return reg; +} + +/** + * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82542"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); +} + +/** + * e1000_read_mac_addr_82542 - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + **/ +s32 e1000_read_mac_addr_82542(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return ret_val; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c new file mode 100644 index 000000000..dfde2a8b9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.c @@ -0,0 +1,1524 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* + * 82543GC Gigabit Ethernet Controller (Fiber) + * 82543GC Gigabit Ethernet Controller (Copper) + * 82544EI Gigabit Ethernet Controller (Copper) + * 82544EI Gigabit Ethernet Controller (Fiber) + * 82544GC Gigabit Ethernet Controller (Copper) + * 82544GC Gigabit Ethernet Controller (LOM) + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 data); +STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw); +STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82543(struct e1000_hw *hw); +STATIC s32 e1000_led_off_82543(struct e1000_hw *hw); +STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, + u32 value); +STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw); +STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw); +STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw); +STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw); +STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw); +STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count); +STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw); +STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state); + +/** + * e1000_init_phy_params_82543 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82543"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.read_reg = (hw->mac.type == e1000_82543) + ? e1000_read_phy_reg_82543 + : e1000_read_phy_reg_m88; + phy->ops.reset = (hw->mac.type == e1000_82543) + ? e1000_phy_hw_reset_82543 + : e1000_phy_hw_reset_generic; + phy->ops.write_reg = (hw->mac.type == e1000_82543) + ? e1000_write_phy_reg_82543 + : e1000_write_phy_reg_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + + /* + * The external PHY of the 82543 can be in a funky state. + * Resetting helps us read the PHY registers for acquiring + * the PHY ID. + */ + if (!e1000_init_phy_disabled_82543(hw)) { + ret_val = phy->ops.reset(hw); + if (ret_val) { + DEBUGOUT("Resetting PHY during init failed.\n"); + goto out; + } + msec_delay(20); + } + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82543: + if (phy->id != M88E1000_E_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + case e1000_82544: + if (phy->id != M88E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82543 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_82543"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + nvm->delay_usec = 50; + nvm->address_bits = 6; + nvm->opcode_bits = 3; + + /* Function Pointers */ + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82543 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82543"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82544EI_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82543; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82543; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82543; + /* physical interface setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543; + /* check for link */ + mac->ops.check_for_link = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_check_for_copper_link_82543 + : e1000_check_for_fiber_link_82543; + /* link info */ + mac->ops.get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_82543; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_82543; + mac->ops.led_off = e1000_led_off_82543; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82543; + + /* Set tbi compatibility */ + if ((hw->mac.type != e1000_82543) || + (hw->phy.media_type == e1000_media_type_fiber)) + e1000_set_tbi_compatibility_82543(hw, false); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82543 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82543(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82543"); + + hw->mac.ops.init_params = e1000_init_mac_params_82543; + hw->nvm.ops.init_params = e1000_init_nvm_params_82543; + hw->phy.ops.init_params = e1000_init_phy_params_82543; +} + +/** + * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status + * @hw: pointer to the HW structure + * + * Returns the current status of 10-bit Interface (TBI) compatibility + * (enabled/disabled). + **/ +STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool state = false; + + DEBUGFUNC("e1000_tbi_compatibility_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED); + +out: + return state; +} + +/** + * e1000_set_tbi_compatibility_82543 - Set TBI compatibility + * @hw: pointer to the HW structure + * @state: enable/disable TBI compatibility + * + * Enables or disabled 10-bit Interface (TBI) compatibility. + **/ +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + + DEBUGFUNC("e1000_set_tbi_compatibility_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + if (state) + dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED; + +out: + return; +} + +/** + * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status + * @hw: pointer to the HW structure + * + * Returns the current status of 10-bit Interface (TBI) store bad packet (SBP) + * (enabled/disabled). + **/ +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool state = false; + + DEBUGFUNC("e1000_tbi_sbp_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED); + +out: + return state; +} + +/** + * e1000_set_tbi_sbp_82543 - Set TBI SBP + * @hw: pointer to the HW structure + * @state: enable/disable TBI store bad packet + * + * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP). + **/ +STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + + DEBUGFUNC("e1000_set_tbi_sbp_82543"); + + if (state && e1000_tbi_compatibility_enabled_82543(hw)) + dev_spec->tbi_compatibility |= TBI_SBP_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED; + + return; +} + +/** + * e1000_init_phy_disabled_82543 - Returns init PHY status + * @hw: pointer to the HW structure + * + * Returns the current status of whether PHY initialization is disabled. + * True if PHY initialization is disabled else false. + **/ +STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool ret_val; + + DEBUGFUNC("e1000_init_phy_disabled_82543"); + + if (hw->mac.type != e1000_82543) { + ret_val = false; + goto out; + } + + ret_val = dev_spec->init_phy_disabled; + +out: + return ret_val; +} + +/** + * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled + * @hw: pointer to the HW structure + * @stats: Struct containing statistic register values + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * @max_frame_size: The maximum frame size + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + **/ +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, u32 frame_len, + u8 *mac_addr, u32 max_frame_size) +{ + if (!(e1000_tbi_sbp_enabled_82543(hw))) + goto out; + + /* First adjust the frame length. */ + frame_len--; + /* + * We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + stats->gorc += frame_len; + + /* + * Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + /* + * In this case, the hardware has over counted the number of + * oversize frames. + */ + if ((frame_len == max_frame_size) && (stats->roc > 0)) + stats->roc--; + + /* + * Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } + +out: + return; +} + +/** + * e1000_read_phy_reg_82543 - Read PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY at offset and stores the information read to data. + **/ +STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We must first send a preamble through the MDIO pin to signal the + * beginning of an MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The format + * of an MII read instruction consists of a shift out of 14 bits and + * is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits shifted in + * are TurnAround bits used to avoid contention on the MDIO pin when a + * READ operation is performed. These two bits are thrown away + * followed by a shift in of 16 bits which contains the desired data. + */ + mdic = (offset | (hw->phy.addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits_82543(hw, mdic, 14); + + /* + * Now that we've shifted out the read command to the MII, we need to + * "shift in" the 16-bit value (18 total bits) of the requested PHY + * register address. + */ + *data = e1000_shift_in_mdi_bits_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82543 - Write PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be written + * @data: pointer to the data to be written at offset + * + * Writes data to the PHY at offset. + **/ +STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We'll need to use the SW defined pins to shift the write command + * out to the PHY. We first send a preamble to the PHY to signal the + * beginning of the MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the remaining required fields that will indicate a + * write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the command. The + * format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)data; + + e1000_shift_out_mdi_bits_82543(hw, mdic, 32); + +out: + return ret_val; +} + +/** + * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Raise the management data input clock by setting the MDC bit in the control + * register. + **/ +STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Lower the management data input clock by clearing the MDC bit in the + * control register. + **/ +STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY + * @hw: pointer to the HW structure + * @data: data to send to the PHY + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the PHY. So, the value in the + * "data" parameter will be shifted out to the PHY one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count) +{ + u32 ctrl, mask; + + /* + * We need to shift "count" number of bits out to the PHY. So, the + * value in the "data" parameter will be shifted out to the PHY one + * bit at a time. In order to do this, "data" must be broken down + * into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* + * A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(10); + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + mask >>= 1; + } +} + +/** + * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY + * @hw: pointer to the HW structure + * + * In order to read a register from the PHY, we need to shift 18 bits + * in from the PHY. Bits are "shifted in" by raising the clock input to + * the PHY (setting the MDC bit), and then reading the value of the data out + * MDIO bit. + **/ +STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* + * In order to read a register from the PHY, we need to shift in a + * total of 18 bits from the PHY. The first two bit (turnaround) + * times are used to avoid contention on the MDIO pin when a read + * operation is performed. These two bits are ignored by us and + * thrown away. Bits are "shifted in" by raising the input to the + * Management Data Clock (setting the MDC bit) and then reading the + * value of the MDIO bit. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + /* + * Raise and lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked + * out the last bit of the Register Address. + */ + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data <<= 1; + e1000_raise_mdi_clk_82543(hw, &ctrl); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk_82543(hw, &ctrl); + } + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + return data; +} + +/** + * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY + * @hw: pointer to the HW structure + * + * Calls the function to force speed and duplex for the m88 PHY, and + * if the PHY is not auto-negotiating and the speed is forced to 10Mbit, + * then call the function for polarity reversal workaround. + **/ +STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82543"); + + ret_val = e1000_phy_force_speed_duplex_m88(hw); + if (ret_val) + goto out; + + if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex & + E1000_ALL_10_SPEED)) + ret_val = e1000_polarity_reversal_workaround_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal + * @hw: pointer to the HW structure + * + * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity + * inadvertently. To workaround the issue, we disable the transmitter on + * the PHY until we have established the link partner's link parameters. + **/ +STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 mii_status_reg; + u16 i; + bool link; + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * This loop will early-out if the NO link condition has been met. + * In other words, DO NOT use e1000_phy_has_link_generic() here. + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* + * Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & ~MII_SR_LINK_STATUS)) + break; + msec_delay_irq(100); + } + + /* Recommended delay time after link has been lost */ + msec_delay_irq(1000); + + /* Now we will re-enable the transmitter on the PHY */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * Read the MII Status Register and wait for Link Status bit + * to be set. + */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82543 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Sets the PHY_RESET_DIR bit in the extended device control register + * to put the PHY into a reset and waits for completion. Once the reset + * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out + * of reset. + **/ +STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + + DEBUGFUNC("e1000_phy_hw_reset_82543"); + + /* + * Read the Extended Device Control Register, assert the PHY_RESET_DIR + * bit to put the PHY into reset... + */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* ...then take it out of reset. */ + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + if (!(hw->phy.ops.get_cfg_done)) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.get_cfg_done(hw); + + return ret_val; +} + +/** + * e1000_reset_hw_82543 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82543"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + e1000_set_tbi_sbp_82543(hw, false); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n"); + if (hw->mac.type == e1000_82543) { + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } else { + /* + * The 82544 can't ACK the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } + + /* + * After MAC reset, force reload of NVM to restore power-on + * settings to device. + */ + hw->nvm.ops.reload(hw); + msec_delay(2); + + /* Masking off and clearing any pending interrupts */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82543 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + u32 ctrl; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_82543"); + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82543(hw); + + return ret_val; +} + +/** + * e1000_setup_link_82543 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Read the EEPROM to determine the initial polarity value and write the + * extended device control register with the information before calling + * the generic setup link function, which does the following: + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_setup_link_82543"); + + /* + * Take the 4 bits from NVM word 0xF that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before phy setup. + */ + if (hw->mac.type == e1000_82543) { + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) << + NVM_SWDPIO_EXT_SHIFT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + ret_val = e1000_setup_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82543 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + bool link = true; + + DEBUGFUNC("e1000_setup_copper_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU; + /* + * With 82543, we need to force speed and duplex on the MAC + * equal to what the PHY speed and duplex configuration is. + * In addition, we need to perform a hardware reset on the + * PHY to take it out of reset. + */ + if (hw->mac.type == e1000_82543) { + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + ret_val = hw->phy.ops.reset(hw); + if (ret_val) + goto out; + } else { + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */ + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex_82543(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + goto out; + + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + /* Config the MAC and PHY after link is up */ + if (hw->mac.type == e1000_82544) { + hw->mac.ops.config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) + goto out; + } + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_link_82543 - Setup link for fiber + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber links. Upon + * successful setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW definable pin 1 is cleared when the + * optics detect a signal. If we have a signal, then poll for a + * "Link-Up" indication. + */ + if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + else + DEBUGOUT("No signal detected\n"); + +out: + return ret_val; +} + +/** + * e1000_check_for_copper_link_82543 - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks the phy for link, if link exists, do the following: + * - check for downshift + * - do polarity workaround (if necessary) + * - configure collision distance + * - configure flow control after link up + * - configure tbi compatibility + **/ +STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 icr, rctl; + s32 ret_val; + u16 speed, duplex; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link_82543"); + + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we can return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + /* + * If speed and duplex are forced to 10H or 10F, then we will + * implement the polarity reversal workaround. We disable + * interrupts first, and upon returning, place the devices + * interrupt state to its previous value except for the link + * status change interrupt which will happened due to the + * execution of this workaround. + */ + if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) { + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + ret_val = e1000_polarity_reversal_workaround_82543(hw); + icr = E1000_READ_REG(hw, E1000_ICR); + E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC)); + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); + } + + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if (mac->type == e1000_82544) + hw->mac.ops.config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + goto out; + } + } + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + /* + * At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the link + * partner capability register. We use the link speed to determine if + * TBI compatibility needs to be turned on or off. If the link is not + * at gigabit speed, then TBI compatibility is not needed. If we are + * at gigabit speed, we turn on TBI compatibility. + */ + if (e1000_tbi_compatibility_enabled_82543(hw)) { + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* + * If link speed is not set to gigabit speed, + * we do not need to enable TBI compatibility. + */ + if (e1000_tbi_sbp_enabled_82543(hw)) { + /* + * If we previously were in the mode, + * turn it off. + */ + e1000_set_tbi_sbp_82543(hw, false); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } else { + /* + * If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!e1000_tbi_sbp_enabled_82543(hw)) { + e1000_set_tbi_sbp_82543(hw, true); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } + } +out: + return ret_val; +} + +/** + * e1000_check_for_fiber_link_82543 - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw, ctrl, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */ + if ((!(ctrl & E1000_CTRL_SWDPIN1)) && + (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + ret_val = 0; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + +out: + return ret_val; +} + +/** + * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings + * @hw: pointer to the HW structure + * + * For the 82543 silicon, we need to set the MAC to match the settings + * of the PHY, even if the PHY is auto-negotiating. + **/ +STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_config_mac_to_phy_82543"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + /* Set the bits to force speed and duplex */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + /* + * Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + ctrl &= ~E1000_CTRL_FD; + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + + hw->mac.ops.config_collision_dist(hw); + + /* + * Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * e1000_write_vfta_82543 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. + **/ +STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + DEBUGFUNC("e1000_write_vfta_82543"); + + if ((hw->mac.type == e1000_82544) && (offset & 1)) { + temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp); + E1000_WRITE_FLUSH(hw); + } else { + e1000_write_vfta_generic(hw, offset, value); + } +} + +/** + * e1000_led_on_82543 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. + **/ +STATIC s32 e1000_led_on_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Clear SW-definable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Fiber 82544 and all 82543 use this method */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82543 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. + **/ +STATIC s32 e1000_led_off_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Set SW-definable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82543"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h new file mode 100644 index 000000000..51a421190 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82543.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_82543_H_ +#define _E1000_82543_H_ + +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_PREAMBLE_SIZE 32 +#define PHY_SOF 0x1 +#define PHY_OP_READ 0x2 +#define PHY_OP_WRITE 0x1 +#define PHY_TURNAROUND 0x2 + +#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */ +/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */ +#define TBI_SBP_ENABLED 0x2 + +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, u8 *mac_addr, + u32 max_frame_size); +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, + bool state); +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw); + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c new file mode 100644 index 000000000..157b953cd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.c @@ -0,0 +1,2006 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000_api.h" + +STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw); +STATIC void e1000_release_nvm_82571(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw); +STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw); +STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82574(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data); +STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw); +STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, + bool active); +STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_82571"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return E1000_SUCCESS; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + phy->ops.get_cfg_done = e1000_get_cfg_done_82571; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.acquire = e1000_get_hw_semaphore_82571; + phy->ops.release = e1000_put_hw_semaphore_82571; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.read_reg = e1000_read_phy_reg_m88; + phy->ops.write_reg = e1000_write_phy_reg_m88; + phy->ops.acquire = e1000_get_hw_semaphore_82571; + phy->ops.release = e1000_put_hw_semaphore_82571; + break; + case e1000_82574: + case e1000_82583: + E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex); + + phy->type = e1000_phy_bm; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.read_reg = e1000_read_phy_reg_bm2; + phy->ops.write_reg = e1000_write_phy_reg_bm2; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + DEBUGOUT("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82571"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + nvm->ops.acquire = e1000_acquire_nvm_82571; + nvm->ops.release = e1000_release_nvm_82571; + break; + } + nvm->ops.read = e1000_read_nvm_eerd; + nvm->ops.update = e1000_update_nvm_checksum_82571; + nvm->ops.validate = e1000_validate_nvm_checksum_82571; + nvm->ops.valid_led_default = e1000_valid_led_default_82571; + nvm->ops.write = e1000_write_nvm_82571; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + DEBUGFUNC("e1000_init_mac_params_82571"); + + /* Set media type and media-dependent function pointers */ + switch (hw->device_id) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_copper_generic; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82571; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82571; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82571; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_82571; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82571; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn off LED */ + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571; + + /* MAC-specific function pointers */ + switch (hw->mac.type) { + case e1000_82573: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + mac->ops.led_on = e1000_led_on_generic; + mac->ops.blink_led = e1000_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_MODE_MASK); + break; + case e1000_82574: + case e1000_82583: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; + break; + default: + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + mac->ops.led_on = e1000_led_on_generic; + mac->ops.blink_led = e1000_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = E1000_READ_REG(hw, E1000_SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + E1000_WRITE_REG(hw, E1000_SWSM2, swsm2 | + E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else { + force_clear_smbi = false; + } + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + DEBUGOUT("Please update your 82571 Bootagent\n"); + } + E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* Initialze device specific counter of SMBI acquisition timeouts. */ + hw->dev_spec._82571.smb_counter = 0; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82571 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82571"); + + hw->mac.ops.init_params = e1000_init_mac_params_82571; + hw->nvm.ops.init_params = e1000_init_nvm_params_82571; + hw->phy.ops.init_params = e1000_init_phy_params_82571; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + DEBUGFUNC("e1000_get_phy_id_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_82571"); + + /* If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec._82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == sw_timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec._82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +STATIC s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_82573"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + msec_delay(2); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + DEBUGOUT("Driver can't access the PHY\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_put_hw_semaphore_82573"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_hw_semaphore_82574"); + + E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_put_hw_semaphore_82574"); + + e1000_put_hw_semaphore_82573(hw); + E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = E1000_READ_REG(hw, E1000_POEMB); + + DEBUGFUNC("e1000_set_d0_lplu_state_82574"); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + E1000_WRITE_REG(hw, E1000_POEMB, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = E1000_READ_REG(hw, E1000_POEMB); + + DEBUGFUNC("e1000_set_d3_lplu_state_82574"); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + E1000_WRITE_REG(hw, E1000_POEMB, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82571"); + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000_acquire_nvm_generic(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +STATIC void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82571"); + + e1000_release_nvm_generic(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_nvm_82571"); + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_update_nvm_checksum_82571"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return E1000_SUCCESS; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_nvm_checksum_82571"); + + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_eewr_82571"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + E1000_WRITE_REG(hw, E1000_EEWR, eewr); + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + DEBUGFUNC("e1000_get_cfg_done_82571"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82571"); + + if (!(phy->ops.read_reg)) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext, eecd, tctl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82571"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + break; + case e1000_82573: + case e1000_82574: + case e1000_82583: + msec_delay(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82571"); + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000_get_laa_state_82571(hw)) + rar_count--; + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000_enable_tx_pkt_filtering_generic(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = E1000_READ_REG(hw, E1000_GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, E1000_GCR, reg_data); + break; + default: + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + break; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + /* MSI-X configure for 82574 */ + if (mac->type == e1000_82574) + E1000_WRITE_REG(hw, E1000_IVAR, + (E1000_IVAR_INT_ALLOC_VALID << 16)); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_82571"); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; + default: + break; + } + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~(1 << 29); + E1000_WRITE_REG(hw, E1000_CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = E1000_READ_REG(hw, E1000_PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + E1000_WRITE_REG(hw, E1000_PBA_ECC, reg); + } + + /* Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + if ((hw->mac.type == e1000_82571) || + (hw->mac.type == e1000_82572)) { + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + } + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + } + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_GCR); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_GCR, reg); + + /* Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = E1000_READ_REG(hw, E1000_GCR2); + reg |= 1; + E1000_WRITE_REG(hw, E1000_GCR2, reg); + break; + default: + break; + } + + return; +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + DEBUGFUNC("e1000_clear_vfta_82571"); + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + s32 ret_val; + + DEBUGFUNC("e1000_check_mng_mode_82574"); + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +STATIC s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + DEBUGFUNC("e1000_led_on_82574"); + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & E1000_READ_REG(hw, E1000_STATUS))) { + /* If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + E1000_WRITE_REG(hw, E1000_LEDCTL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + s32 ret_val; + + DEBUGFUNC("e1000_check_phy_82574"); + + /* Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER, + &receive_errors); + if (ret_val) + return false; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS, + &status_1kbt); + if (ret_val) + return false; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + return true; + } + + return false; +} + + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_link_82571"); + + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_82571"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_fiber_serdes_link_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + E1000_WRITE_REG(hw, E1000_SCTL, + E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000_setup_fiber_serdes_link_generic(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_serdes_link_82571"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + E1000_READ_REG(hw, E1000_RXCW); + /* SYNCH bit and IV bit are sticky */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + */ + if (rxcw & E1000_RXCW_C) { + /* Enable autoneg, and unforce link up */ + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, + (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + DEBUGOUT("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + DEBUGOUT("AN_PROG -> DOWN\n"); + } + } else { + /* The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + E1000_WRITE_REG(hw, E1000_TXCW, + (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = + e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + DEBUGOUT("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & + ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + DEBUGOUT("ANYSTATE -> DOWN\n"); + } else { + /* Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + DEBUGOUT("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = E1000_READ_REG(hw, E1000_TXCW); + txcw |= E1000_TXCW_ANE; + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82571"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000_get_laa_state_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec._82571.laa_is_present; +} + +/** + * e1000_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + DEBUGFUNC("e1000_set_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec._82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); + return; +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_fix_nvm_checksum_82571"); + + if (nvm->type != e1000_nvm_flash_hw) + return E1000_SUCCESS; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = nvm->ops.read(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = nvm->ops.read(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = nvm->ops.write(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = nvm->ops.update(hw); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_read_mac_addr_82571"); + + if (hw->mac.type == e1000_82571) { + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + } + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!phy->ops.check_reset_block) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82571"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h new file mode 100644 index 000000000..7cc179b37 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82571.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_82571_H_ +#define _E1000_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000_get_laa_state_82571(struct e1000_hw *hw); +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c new file mode 100644 index 000000000..4c3611c6d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.c @@ -0,0 +1,3753 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter + * 82580 Gigabit Network Connection + * I350 Gigabit Network Connection + */ + +#include "e1000_api.h" +#include "e1000_i210.h" + +STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +STATIC void e1000_release_phy_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +STATIC void e1000_release_nvm_82575(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); +STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw); +STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw); +STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +STATIC void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw); + +STATIC void e1000_i2c_start(struct e1000_hw *hw); +STATIC void e1000_i2c_stop(struct e1000_hw *hw); +STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw); +STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +STATIC bool e1000_get_i2c_data(u32 *i2cctl); + +STATIC const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table) / \ + sizeof(e1000_82580_rxpbs_table[0])) + + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +STATIC bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = e1000_read_phy_reg_gs40g; + phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else if (phy->id == M88E1543_E_PHY_ID || + phy->id == M88E1512_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = e1000_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = e1000_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_none; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else { + nvm->type = e1000_nvm_flash_hw; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + case e1000_i354: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + /* Derives media type */ + e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + + /* Enable EEE default settings for EEE supported devices */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) + mac->ops.init_hw = e1000_init_hw_i210; + else + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; + } else { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + } + if (hw->mac.type >= e1000_82580) + mac->ops.validate_mdi_setting = + e1000_validate_mdi_setting_crossover_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; +} + +/** + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +STATIC void e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_release_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) + e1000_get_phy_id(hw); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + } else { + ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + struct e1000_phy_info *phy = &hw->phy; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = e1000_initialize_M88E1512_phy(hw); +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +STATIC void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + } else { + ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; +} + +/** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check for copper. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check for other. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + e1000_check_for_link_82575(hw); + } else { + e1000_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + +/** + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + u32 status; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. + */ + if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + DEBUGOUT("2500 Mbs, "); + DEBUGOUT("Full Duplex\n"); + } + } + + } else { + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + } + + return E1000_SUCCESS; +} + +/** + * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + if (!e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); + } + + return; +} + +/** + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) + e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +s32 e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (e1000_sgmii_active_82575(hw)) { + /* allow time for SFP cage time to power up phy */ + msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = e1000_copper_link_setup_82577(hw); + break; + case e1000_phy_none: + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); +out: + return ret_val; +} + +/** + * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_setup_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return ret_val; + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + + E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); + + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* extract link mode setting */ + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = e1000_set_sfp_media_type_82575(hw); + if ((ret_val != E1000_SUCCESS) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* + * If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + break; + } + + return ret_val; +} + +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + E1000_WRITE_FLUSH(hw); + + /* Read SFP module data */ + while (timeout) { + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == E1000_SUCCESS) + break; + msec_delay(100); + timeout--; + } + if (ret_val != E1000_SUCCESS) + goto out; + + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != E1000_SUCCESS) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +/** + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); +} + +/** + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("e1000_rx_fifo_flush_82575"); + + /* disable IPv6 options as per hardware errata */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); + + rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); +} + +/** + * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; +} + +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, reg_offset, reg_val); +} + +/** + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + case e1000_i354: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + +} + +/** + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); +} + +/** + * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + DEBUGFUNC("e1000_reset_hw_82580"); + + hw->dev_spec._82575.global_device_reset = false; + + /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->device_id) { + case E1000_DEV_ID_DH89XXCC_SGMII: + break; + default: + E1000_WRITE_FLUSH(hw); + break; + } + + /* Add delay to insure DEV_RST or RST has time to complete */ + msec_delay(5); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 e1000_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, true); +} + +/** + * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1512 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1512_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1512_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1543_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1543_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return E1000_SUCCESS; +} + +/** + * e1000_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_set_eee_i354"); + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * e1000_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_get_eee_status_i354"); + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_in_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +STATIC void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +STATIC void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +STATIC bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h new file mode 100644 index 000000000..52dd7a9a1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_82575.h @@ -0,0 +1,493 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +#ifdef E1000_BIT_FIELDS +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1U << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1U << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +s32 e1000_init_hw_82575(struct e1000_hw *hw); + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); +#endif /* _E1000_82575_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c new file mode 100644 index 000000000..718952801 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.c @@ -0,0 +1,1353 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + mac->type = e1000_82542; + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + mac->type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + mac->type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + mac->type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + mac->type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + mac->type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + mac->type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + mac->type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + mac->type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + mac->type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + mac->type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + mac->type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + mac->type = e1000_82571; + break; + case E1000_DEV_ID_82572EI: + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + mac->type = e1000_82572; + break; + case E1000_DEV_ID_82573E: + case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: + mac->type = e1000_82573; + break; + case E1000_DEV_ID_82574L: + case E1000_DEV_ID_82574LA: + mac->type = e1000_82574; + break; + case E1000_DEV_ID_82583V: + mac->type = e1000_82583; + break; + case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: + mac->type = e1000_80003es2lan; + break; + case E1000_DEV_ID_ICH8_IFE: + case E1000_DEV_ID_ICH8_IFE_GT: + case E1000_DEV_ID_ICH8_IFE_G: + case E1000_DEV_ID_ICH8_IGP_M: + case E1000_DEV_ID_ICH8_IGP_M_AMT: + case E1000_DEV_ID_ICH8_IGP_AMT: + case E1000_DEV_ID_ICH8_IGP_C: + case E1000_DEV_ID_ICH8_82567V_3: + mac->type = e1000_ich8lan; + break; + case E1000_DEV_ID_ICH9_IFE: + case E1000_DEV_ID_ICH9_IFE_GT: + case E1000_DEV_ID_ICH9_IFE_G: + case E1000_DEV_ID_ICH9_IGP_M: + case E1000_DEV_ID_ICH9_IGP_M_AMT: + case E1000_DEV_ID_ICH9_IGP_M_V: + case E1000_DEV_ID_ICH9_IGP_AMT: + case E1000_DEV_ID_ICH9_BM: + case E1000_DEV_ID_ICH9_IGP_C: + case E1000_DEV_ID_ICH10_R_BM_LM: + case E1000_DEV_ID_ICH10_R_BM_LF: + case E1000_DEV_ID_ICH10_R_BM_V: + mac->type = e1000_ich9lan; + break; + case E1000_DEV_ID_ICH10_D_BM_LM: + case E1000_DEV_ID_ICH10_D_BM_LF: + case E1000_DEV_ID_ICH10_D_BM_V: + mac->type = e1000_ich10lan; + break; + case E1000_DEV_ID_PCH_D_HV_DM: + case E1000_DEV_ID_PCH_D_HV_DC: + case E1000_DEV_ID_PCH_M_HV_LM: + case E1000_DEV_ID_PCH_M_HV_LC: + mac->type = e1000_pchlan; + break; + case E1000_DEV_ID_PCH2_LV_LM: + case E1000_DEV_ID_PCH2_LV_V: + mac->type = e1000_pch2lan; + break; + case E1000_DEV_ID_PCH_LPT_I217_LM: + case E1000_DEV_ID_PCH_LPT_I217_V: + case E1000_DEV_ID_PCH_LPTLP_I218_LM: + case E1000_DEV_ID_PCH_LPTLP_I218_V: + case E1000_DEV_ID_PCH_I218_LM2: + case E1000_DEV_ID_PCH_I218_V2: + case E1000_DEV_ID_PCH_I218_LM3: + case E1000_DEV_ID_PCH_I218_V3: + mac->type = e1000_pch_lpt; + break; + case E1000_DEV_ID_PCH_SPT_I219_LM: + case E1000_DEV_ID_PCH_SPT_I219_V: + case E1000_DEV_ID_PCH_SPT_I219_LM2: + case E1000_DEV_ID_PCH_SPT_I219_V2: + case E1000_DEV_ID_PCH_LBG_I219_LM3: + case E1000_DEV_ID_PCH_SPT_I219_LM4: + case E1000_DEV_ID_PCH_SPT_I219_V4: + case E1000_DEV_ID_PCH_SPT_I219_LM5: + case E1000_DEV_ID_PCH_SPT_I219_V5: + mac->type = e1000_pch_spt; + break; + case E1000_DEV_ID_PCH_CNP_I219_LM6: + case E1000_DEV_ID_PCH_CNP_I219_V6: + case E1000_DEV_ID_PCH_CNP_I219_LM7: + case E1000_DEV_ID_PCH_CNP_I219_V7: + mac->type = e1000_pch_cnp; + break; + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_82576_VF: + case E1000_DEV_ID_82576_VF_HV: + mac->type = e1000_vfadapt; + break; + case E1000_DEV_ID_I350_VF: + case E1000_DEV_ID_I350_VF_HV: + mac->type = e1000_vfadapt_i350; + break; + + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82542: + e1000_init_function_pointers_82542(hw); + break; + case e1000_82543: + case e1000_82544: + e1000_init_function_pointers_82543(hw); + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + e1000_init_function_pointers_82540(hw); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + e1000_init_function_pointers_82541(hw); + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + e1000_init_function_pointers_82571(hw); + break; + case e1000_80003es2lan: + e1000_init_function_pointers_80003es2lan(hw); + break; + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + e1000_init_function_pointers_ich8lan(hw); + break; + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + e1000_init_function_pointers_82575(hw); + break; + case e1000_i210: + case e1000_i211: + e1000_init_function_pointers_i210(hw); + break; + case e1000_vfadapt: + e1000_init_function_pointers_vf(hw); + break; + case e1000_vfadapt_i350: + e1000_init_function_pointers_vf(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + return hw->mac.ops.rar_set(hw, addr, index); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + return e1000_mng_write_cmd_header_generic(hw, hdr); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + return e1000_mng_enable_host_if_generic(hw); +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cfg_on_link_up - Configure PHY upon link up + * @hw: pointer to the HW structure + **/ +s32 e1000_cfg_on_link_up(struct e1000_hw *hw) +{ + if (hw->phy.ops.cfg_on_link_up) + return hw->phy.ops.cfg_on_link_up(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_read_pba_num - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num) +{ + return e1000_read_pba_num_generic(hw, pba_num); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h new file mode 100644 index 000000000..3054d5b9d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_api.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82542(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82543(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82540(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82571(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82541(struct e1000_hw *hw); +extern void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw); +extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); + +s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_cfg_on_link_up(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +u32 e1000_translate_register_82542(u32 reg); + + + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * a = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * errors = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * min_frame_size = the minimum frame length we want to accept. + * max_frame_size = the maximum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ + ((length) <= ((max_frame_size) + 1))) : \ + (((length) > (min_frame_size)) && \ + ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) + +#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) +#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ +#endif /* _E1000_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h new file mode 100644 index 000000000..8831da7ca --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_defines.h @@ -0,0 +1,1485 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#if !defined(EXTERNAL_RELEASE) || defined(E1000E_MQ) +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#endif /* !EXTERNAL_RELEASE || E1000E_MQ */ +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ +#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_EITR_INTERVAL 0x00007FFC + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +/* HH Time Sync */ +#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +#define E1000_TSICR_TXTS 0x00000002 +#define E1000_TSIM_TXTS 0x00000002 +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 +#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_NVM_GRANT_ATTEMPTS +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ +#define NVM_PHY_CLASS_WORD 0x0007 +#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 +#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define BCM54616_E_PHY_ID 0x03625D10 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + + +/* Proxy Filter Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#ifndef E1000_UNUSEDARG +#define E1000_UNUSEDARG +#endif /* E1000_UNUSEDARG */ +#ifndef ERROR_REPORT +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* ERROR_REPORT */ +#endif /* _E1000_DEFINES_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h new file mode 100644 index 000000000..9793b724e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_hw.h @@ -0,0 +1,1020 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */ +#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7 +#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8 +#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3 +#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6 +#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD +#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE +#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB +#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_82576_VF_HV 0x152D +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_DEV_ID_I350_VF_HV 0x152F +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82542, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, + e1000_pch_spt, + e1000_pch_cnp, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_eeprom_microwire, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, + e1000_nvm_override_microwire_small, + e1000_nvm_override_microwire_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, + e1000_phy_82580, + e1000_phy_vf, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ffe_config { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +}; + +enum e1000_dsp_config { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool report_tx_early; + enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82541 { + enum e1000_dsp_config dsp_config; + enum e1000_ffe_config ffe_config; + u16 spd_default; + bool phy_init_script; +}; + +struct e1000_dev_spec_82542 { + bool dma_fairness; +}; + +struct e1000_dev_spec_82543 { + u32 tbi_compatibility; + bool dma_fairness; + bool init_phy_disabled; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; + E1000_MUTEX swflag_mutex; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_SHADOW_RAM_WORDS 2048 + +#ifdef ULP_SUPPORT +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + +#endif /* ULP_SUPPORT */ +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; + E1000_MUTEX nvm_mutex; + E1000_MUTEX swflag_mutex; + bool nvm_k1_enabled; + bool disable_k1_off; + bool eee_disable; + u16 eee_lp_ability; +#ifdef ULP_SUPPORT + enum e1000_ulp_state ulp_state; + bool ulp_capability_disabled; + bool during_suspend_flow; + bool during_dpg_exit; +#endif /* ULP_SUPPORT */ + u16 lat_enc; + u16 max_ltr_enc; + bool smbus_disable; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82541 _82541; + struct e1000_dev_spec_82542 _82542; + struct e1000_dev_spec_82543 _82543; + struct e1000_dev_spec_82571 _82571; + struct e1000_dev_spec_80003es2lan _80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "e1000_82541.h" +#include "e1000_82543.h" +#include "e1000_82571.h" +#include "e1000_80003es2lan.h" +#include "e1000_ich8lan.h" +#include "e1000_82575.h" +#include "e1000_i210.h" + +/* These functions must be implemented by drivers */ +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c new file mode 100644 index 000000000..9298223c3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.c @@ -0,0 +1,1005 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + + +STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); +STATIC void e1000_release_nvm_i210(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + +/** + * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_i210"); + + ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +STATIC void e1000_release_nvm_i210(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_i210"); + + e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_i210"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_i210"); + + while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_i210"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + e1000_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_read_nvm_srrd_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_write_nvm_srwr_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_srwr"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + E1000_READ_REG(hw, E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + UNREFERENCED_1PARAMETER(words); + + DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val != E1000_SUCCESS) + DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; + ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("e1000_read_invm_version"); + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + } + + if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = E1000_SUCCESS; + break; + } + } + return status; +} + +/** + * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + DEBUGFUNC("e1000_validate_nvm_checksum_i210"); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = e1000_read_nvm_eerd; + + status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_i210"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = e1000_update_flash_i210(hw); + } else { + ret_val = E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * e1000_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 e1000_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 flup; + + DEBUGFUNC("e1000_update_flash_i210"); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; + E1000_WRITE_REG(hw, E1000_EECD, flup); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("e1000_pool_flash_update_done_i210"); + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize the i210/i211 NVM parameters and function pointers. + **/ +STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val; + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_i210"); + + ret_val = e1000_init_nvm_params_82575(hw); + nvm->ops.acquire = e1000_acquire_nvm_i210; + nvm->ops.release = e1000_release_nvm_i210; + nvm->ops.valid_led_default = e1000_valid_led_default_i210; + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = e1000_null_write_nvm; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.update = e1000_null_ops_generic; + } + return ret_val; +} + +/** + * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_i210(struct e1000_hw *hw) +{ + e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + + return; +} + +/** + * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_i210"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +STATIC s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = E1000_READ_REG(hw, E1000_WUC); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + phy_word = E1000_PHY_PLL_UNCONF; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * e1000_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_i210"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_i210 - Init hw for I210/I211 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i210 hw family. + **/ +s32 e1000_init_hw_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_i210"); + if ((hw->mac.type >= e1000_i210) && + !(e1000_get_flash_presence_i210(hw))) { + ret_val = e1000_pll_workaround_i210(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + } + hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; + ret_val = e1000_init_hw_82575(hw); + return ret_val; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h new file mode 100644 index 000000000..c6aa2a17b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_i210.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); +s32 e1000_update_flash_i210(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 data); +s32 e1000_init_hw_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I211 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c new file mode 100644 index 000000000..5241cf698 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.c @@ -0,0 +1,6096 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +/* 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V + */ + +#include "e1000_api.h" + +STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); +STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); +STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); +STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT +STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count); +#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */ +STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, + u16 *data); +STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 *data); +STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); +STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, + u32 offset, u16 *data); +STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) +{ + u16 phy_reg = 0; + u32 phy_id = 0; + s32 ret_val = 0; + u16 retry_count; + u32 mac_reg = 0; + + for (retry_count = 0; retry_count < 2; retry_count++) { + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) + continue; + phy_id = (u32)(phy_reg << 16); + + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) { + phy_id = 0; + continue; + } + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + break; + } + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + goto out; + } else if (phy_id) { + hw->phy.id = phy_id; + hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + goto out; + } + + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } + + if (ret_val) + return false; +out: + if (hw->mac.type >= e1000_pch_lpt) { + /* Only unforce SMBus if ME is not active */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + } + } + + return true; +} + +/** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + + if (hw->mac.type < e1000_pch_lpt) { + msec_delay(50); + } else { + u16 count = 20; + + do { + msec_delay(5); + } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); + + msec_delay(30); + } +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); + + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + +#ifdef ULP_SUPPORT + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + +#endif /* ULP_SUPPORT */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + DEBUGOUT("Failed to initialize PHY flow\n"); + goto out; + } + + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msec_delay(50); + + /* fall-through */ + case e1000_pch2lan: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; + break; + } + + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + if (!ret_val) { + + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + ERROR_REPORT("Reset blocked by ME\n"); + goto out; + } + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + ERROR_REPORT("ME blocked access to PHY after reset\n"); + } + +out: + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + msec_delay(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_pchlan"); + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_swflag_ich8lan; + phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; + phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.release = e1000_release_swflag_ich8lan; + phy->ops.reset = e1000_phy_hw_reset_ich8lan; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + phy->id = e1000_phy_unknown; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; + + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + break; + } + phy->type = e1000_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000_phy_sw_reset_generic; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + DEBUGFUNC("e1000_init_phy_params_ich8lan"); + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_swflag_ich8lan; + phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.release = e1000_release_swflag_ich8lan; + phy->ops.reset = e1000_phy_hw_reset_ich8lan; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000_write_phy_reg_bm; + phy->ops.read_reg = e1000_read_phy_reg_bm; + ret_val = e1000_determine_phy_address(hw); + if (ret_val) { + DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + msec_delay(1); + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000_read_phy_reg_bm; + phy->ops.write_reg = e1000_write_phy_reg_bm; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + u32 nvm_size; + + DEBUGFUNC("e1000_init_nvm_params_ich8lan"); + + nvm->type = e1000_nvm_flash_sw; + + if (hw->mac.type >= e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = + (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + DEBUGOUT("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } + + nvm->word_size = E1000_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + E1000_MUTEX_INIT(&dev_spec->nvm_mutex); + E1000_MUTEX_INIT(&dev_spec->swflag_mutex); + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_ich8lan; + nvm->ops.release = e1000_release_nvm_ich8lan; + if (hw->mac.type >= e1000_pch_spt) { + nvm->ops.read = e1000_read_nvm_spt; + nvm->ops.update = e1000_update_nvm_checksum_spt; + } else { + nvm->ops.read = e1000_read_nvm_ich8lan; + nvm->ops.update = e1000_update_nvm_checksum_ich8lan; + } + nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; + nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; + nvm->ops.write = e1000_write_nvm_ich8lan; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) + u16 pci_cfg; +#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ + + DEBUGFUNC("e1000_init_mac_params_ich8lan"); + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_ich8lan; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_ich8lan; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_ich8lan; + /* physical interface setup */ + mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; + + /* LED and other operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT + /* multicast address update for pch2 */ + mac->ops.update_mc_addr_list = + e1000_update_mc_addr_list_pch2lan; + /* fall-through */ +#endif + case e1000_pchlan: +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) + /* save PCH revision_id */ + e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg); + /* SPT uses full byte for revision ID, + * as opposed to previous generations + */ + if (hw->mac.type >= e1000_pch_spt) + hw->revision_id = (u8)(pci_cfg &= 0x00FF); + else + hw->revision_id = (u8)(pci_cfg &= 0x000F); +#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if (mac->type >= e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + return E1000_SUCCESS; +} + +/** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg_locked"); + + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, + data); + else + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg_locked"); + + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + DEBUGFUNC("e1000_read_emi_reg_locked"); + + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. + **/ +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; + + DEBUGFUNC("e1000_set_eee_pchlan"); + + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + return E1000_SUCCESS; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); + if (ret_val) + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); + if (ret_val) + goto release; + + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + + /* Enable EEE only for speeds in which the link partner is + * EEE capable and for which we advertise EEE. + */ + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); + if (data & NWAY_LPAR_100TX_FD_CAPS) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + } + + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); + if (ret_val) + goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. + **/ +STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + u16 reg; + + if (link && (status & E1000_STATUS_SPEED_1000)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + ®); + if (ret_val) + goto release; + + ret_val = + e1000_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usec_delay(10); + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, + fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); + } + + return ret_val; +} + +#ifdef ULP_SUPPORT +/** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = E1000_SUCCESS; + u16 phy_reg; + u16 oem_reg = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (!to_sx) { + int i = 0; + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + if (i++ == 100) + break; + + msec_delay(50); + } + DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", + (E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", + i * 50); + if (!(E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED)) + return 0; + } + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* During S0 Idle keep the phy in PCI-E mode */ + if (hw->dev_spec.ich8lan.smbus_disable) + goto skip_smbus; + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &oem_reg); + if (ret_val) + goto release; + + phy_reg = oem_reg; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + + if (ret_val) + goto release; + } + +skip_smbus: + if (!to_sx) { + /* Change the 'Link Status Change' interrupt to trigger + * on 'Cable Status Change' + */ + ret_val = e1000_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_OP_MODES, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; + e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, + phy_reg); + } + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if (!to_sx) { + /* Disable Tx so that the MAC doesn't send any (buffered) + * packets to the PHY. + */ + mac_reg = E1000_READ_REG(hw, E1000_TCTL); + mac_reg &= ~E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); + } + + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && + to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + oem_reg); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); +out: + if (ret_val) + DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=false); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=true + * to forcibly disable ULP. + + * When the cable is plugged in while the device is in D0, a Cable Status + * Change interrupt is generated which causes this function to be called + * to partially disable ULP mode and restart autonegotiation. This function + * is then called again due to the resulting Link Status Change interrupt + * to finish cleaning up after the ULP flow. + */ +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = E1000_SUCCESS; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } + + /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ + while (E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 30) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + msec_delay(10); + } + DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + + /* Restore link speed advertisements and restart + * Auto-negotiation + */ + if (hw->mac.autoneg) { + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) + goto out; + } else { + ret_val = e1000_setup_copper_link_generic(hw); + if (ret_val) + goto out; + } + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Revert the change to the 'Link Status Change' + * interrupt to trigger on 'Cable Status Change' + */ + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, + &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; + e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg); + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + msec_delay(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + /* CSC interrupt received due to ULP Indication */ + if ((phy_reg & I218_ULP_CONFIG1_IND) || force) { + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_EN_ULP_LANPHYPC | + I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + + if (!force) { + hw->phy.ops.release(hw); + + if (hw->mac.autoneg) + e1000_phy_setup_autoneg(hw); + else + e1000_setup_copper_link_generic(hw); + + e1000_sw_lcd_config_ich8lan(hw); + + e1000_oem_bits_config_ich8lan(hw, true); + + /* Set ULP state to unknown and return non-zero to + * indicate no link (yet) and re-enter on the next LSC + * to finish disabling ULP flow. + */ + hw->dev_spec.ich8lan.ulp_state = + e1000_ulp_state_unknown; + + return 1; + } + } + + /* Re-enable Tx */ + mac_reg = E1000_READ_REG(hw, E1000_TCTL); + mac_reg |= E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + hw->phy.ops.reset(hw); + msec_delay(50); + } +out: + if (ret_val) + DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +#endif /* ULP_SUPPORT */ + + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; + bool link = false; + u16 phy_reg; + + DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + } else { + /* Check the MAC's STATUS register to determine link state + * since the PHY could be inaccessible while in ULP mode. + */ + link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); + if (link) + ret_val = e1000_disable_ulp_lpt_lp(hw, false); + else + ret_val = e1000_enable_ulp_lpt_lp(hw, false); + if (ret_val) + return ret_val; + } + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + return ret_val; + } + + /* When connected at 10Mbps half-duplex, some parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if ((hw->mac.type >= e1000_pch2lan) && link) { + u16 speed, duplex; + + e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); + tipg_reg = E1000_READ_REG(hw, E1000_TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + + if (duplex == HALF_DUPLEX && speed == SPEED_10) { + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else if (hw->mac.type >= e1000_pch_spt && + duplex == FULL_DUPLEX && speed != SPEED_1000) { + tipg_reg |= 0xC; + emi_val = 1; + } else { + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } + + E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + + + if (hw->mac.type >= e1000_pch_lpt) { + u16 phy_reg; + + hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG, + &phy_reg); + phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; + if (speed == SPEED_100 || speed == SPEED_10) + phy_reg |= 0x3E8; + else + phy_reg |= 0xFA; + hw->phy.ops.write_reg_locked(hw, + I217_PLL_CLOCK_GATE_REG, + phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } + } + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + + if (hw->mac.type >= e1000_pch_spt) { + u16 data; + u16 ptr_gap; + + if (speed == SPEED_1000) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg_locked(hw, + PHY_REG(776, 20), + &data); + if (ret_val) { + hw->phy.ops.release(hw); + return ret_val; + } + + ptr_gap = (data & (0x3FF << 2)) >> 2; + if (ptr_gap < 0x18) { + data &= ~(0x3FF << 2); + data |= (0x18 << 2); + ret_val = + hw->phy.ops.write_reg_locked(hw, + PHY_REG(776, 20), data); + } + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } else { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg_locked(hw, + PHY_REG(776, 20), + 0xC023); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + } + } + } + + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if (hw->mac.type >= e1000_pch_lpt) { + u32 mac_reg; + + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); + } + + /* Work-around I218 hang issue */ + if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + /* Configure K0s minimum time */ + if (hw->mac.type >= e1000_pch_lpt) { + e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME); + } + + if (hw->mac.type >= e1000_pch_lpt) { + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + + if (hw->mac.type == e1000_pch_spt) { + /* FEXTNVM6 K1-off workaround - for SPT only */ + u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + } + + if (hw->dev_spec.ich8lan.disable_k1_off == true) + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); + } + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + return ret_val; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + return ret_val; + } + + /* Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != + E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* Enable/Disable EEE after link up */ + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific function pointers for PHY, MAC, and NVM. + **/ +void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_ich8lan"); + + hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; + hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + hw->phy.ops.init_params = e1000_init_phy_params_pchlan; + break; + default: + break; + } +} + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_acquire_nvm_ich8lan"); + + E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); + + return E1000_SUCCESS; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_ich8lan"); + + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); + + return; +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_swflag_ich8lan"); + + E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_release_swflag_ich8lan"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + } else { + DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + return; +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_ich8lan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_pchlan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_pch2lan"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + return E1000_SUCCESS; + } + + /* RAR[1-6] are owned by manageability. Skip those and program the + * next address into the SHRA register array. + */ + if (index < (u32) (hw->mac.rar_entry_count)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); + E1000_WRITE_FLUSH(hw); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && + (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) + return E1000_SUCCESS; + + DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), E1000_READ_REG(hw, E1000_FWSM)); + } + +out: + DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + DEBUGFUNC("e1000_rar_set_pch_lpt"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + return E1000_SUCCESS; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), + rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), + rar_high); + E1000_WRITE_FLUSH(hw); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && + (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) + return E1000_SUCCESS; + } + } + +out: + DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT +/** + * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array of the PCH2 MAC and PHY. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count) +{ + u16 phy_reg = 0; + int i; + s32 ret_val; + + DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); + + e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + for (i = 0; i < hw->mac.mta_reg_count; i++) { + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(hw->mac.mta_shadow[i] & + 0xFFFF)); + hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), + (u16)((hw->mac.mta_shadow[i] >> 16) & + 0xFFFF)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */ +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + bool blocked = false; + int i = 0; + + DEBUGFUNC("e1000_check_reset_block_ich8lan"); + + do { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { + blocked = true; + msec_delay(10); + continue; + } + blocked = false; + } while (blocked && (i++ < 30)); + return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = E1000_READ_REG(hw, E1000_STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + DEBUGOUT("Unsupported SMB frequency in PHY\n"); + } + } + + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = E1000_SUCCESS; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = E1000_READ_REG(hw, E1000_FEXTNVM); + if (!(data & sw_cfg_mask)) + goto release; + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; + + cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto release; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto release; + + data = E1000_READ_REG(hw, E1000_LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto release; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto release; + + ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto release; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = E1000_SUCCESS; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + DEBUGFUNC("e1000_k1_gig_workaround_hv"); + + if (hw->mac.type != e1000_pchlan) + return E1000_SUCCESS; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + DEBUGFUNC("e1000_configure_k1_ich8lan"); + + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + return ret_val; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + return ret_val; + + usec_delay(20); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + E1000_WRITE_FLUSH(hw); + usec_delay(20); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + usec_delay(20); + + return E1000_SUCCESS; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + DEBUGFUNC("e1000_oem_bits_config_ich8lan"); + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto release; + } + + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto release; + + mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto release; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); + + ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); + + if (hw->mac.type != e1000_pchlan) + return E1000_SUCCESS; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, + 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000_phy_sw_reset_generic(hw); + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, + 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + /* Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + return ret_val; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, + phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count); i++) { + mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +#ifndef CRC32_OS_SUPPORT +STATIC u32 e1000_calc_rx_da_crc(u8 mac[]) +{ + u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ + u32 i, j, mask, crc; + + DEBUGFUNC("e1000_calc_rx_da_crc"); + + crc = 0xffffffff; + for (i = 0; i < 6; i++) { + crc = crc ^ mac[i]; + for (j = 8; j > 0; j--) { + mask = (crc & 1) * (-1); + crc = (crc >> 1) ^ (poly & mask); + } + } + return ~crc; +} + +#endif /* CRC32_OS_SUPPORT */ +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = E1000_SUCCESS; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); + + if (hw->mac.type < e1000_pch2lan) + return E1000_SUCCESS; + + /* disable Rx path while enabling/disabling workaround */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), + phy_reg | (1 << 14)); + if (ret_val) + return ret_val; + + if (enable) { + /* Write Rx addresses (rar_entry_count for RAL/H, and + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < hw->mac.rar_entry_count; i++) { + u8 mac_addr[ETH_ADDR_LEN] = {0}; + u32 addr_high, addr_low; + + addr_high = E1000_READ_REG(hw, E1000_RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = E1000_READ_REG(hw, E1000_RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + +#ifndef CRC32_OS_SUPPORT + E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), + e1000_calc_rx_da_crc(mac_addr)); +#else /* CRC32_OS_SUPPORT */ + E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), + E1000_CRC32(ETH_ADDR_LEN, mac_addr)); +#endif /* CRC32_OS_SUPPORT */ + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); + + mac_reg = E1000_READ_REG(hw, E1000_RCTL); + mac_reg |= E1000_RCTL_SECRC; + E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); + + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Enable jumbo frame workaround in the PHY */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (E1000_TX_PTR_GAP << 2); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | + (1 << 10)); + if (ret_val) + return ret_val; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); + mac_reg &= ~(0xF << 14); + E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); + + mac_reg = E1000_READ_REG(hw, E1000_RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); + + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Write PHY register values back to h/w defaults */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & + ~(1 << 10)); + if (ret_val) + return ret_val; + } + + /* re-enable Rx path after enabling/disabling workaround */ + return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & + ~(1 << 14)); +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); + + if (hw->mac.type != e1000_pch2lan) + return E1000_SUCCESS; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 for 1000 and 100 speeds + **/ +STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 status_reg = 0; + + DEBUGFUNC("e1000_k1_workaround_lv"); + + if (hw->mac.type != e1000_pch2lan) + return E1000_SUCCESS; + + /* Set K1 beacon duration based on 10Mbs speed */ + ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); + if (ret_val) + return ret_val; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { + u16 pm_phy_reg; + + /* LV 1G/100 Packet drop issue wa */ + ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, + &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, + pm_phy_reg); + if (ret_val) + return ret_val; + } else { + u32 mac_reg; + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); + } + } + + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); + + if (hw->mac.type < e1000_pch2lan) + return; + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + DEBUGFUNC("e1000_lan_init_done_ich8lan"); + + /* Wait for basic configuration completes before proceeding */ + do { + data = E1000_READ_REG(hw, E1000_STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usec_delay(100); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = E1000_READ_REG(hw, E1000_STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + E1000_WRITE_REG(hw, E1000_STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 reg; + + DEBUGFUNC("e1000_post_phy_reset_ich8lan"); + + if (hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* Allow time for h/w to get to quiescent state after reset */ + msec_delay(10); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + msec_delay(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); + hw->phy.ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 oem_reg; + + DEBUGFUNC("e1000_set_lplu_state_pchlan"); + ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + return ret_val; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); + + if (phy->type == e1000_phy_ife) + return E1000_SUCCESS; + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; + u8 sig_byte = 0; + s32 ret_val; + + DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); + + switch (hw->mac.type) { + case e1000_pch_spt: + case e1000_pch_cnp: + bank1_offset = nvm->flash_bank_size; + act_offset = E1000_ICH_NVM_SIG_WORD; + + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return E1000_SUCCESS; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + + bank1_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return E1000_SUCCESS; + } + + DEBUGOUT("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + case e1000_ich8lan: + case e1000_ich9lan: + eecd = E1000_READ_REG(hw, E1000_EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return E1000_SUCCESS; + } + DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return E1000_SUCCESS; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return E1000_SUCCESS; + } + + DEBUGOUT("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } +} + +/** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + DEBUGFUNC("e1000_read_nvm_spt"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = E1000_SUCCESS; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset+i].modified) || + !(dev_spec->shadow_ram[offset+i+1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset+i].modified) + data[i] = dev_spec->shadow_ram[offset+i].value; + else + data[i] = (u16) (dword & 0xFFFF); + if (dev_spec->shadow_ram[offset+i].modified) + data[i+1] = + dev_spec->shadow_ram[offset+i+1].value; + else + data[i+1] = (u16) (dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u16 i, word; + + DEBUGFUNC("e1000_read_nvm_ich8lan"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = E1000_SUCCESS; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (!hsfsts.hsf_status.fldesvalid) { + DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (!hsfsts.hsf_status.flcinprog) { + /* There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, + hsfsts.regval); + ret_val = E1000_SUCCESS; + } else { + s32 i; + + /* Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (!hsfsts.hsf_status.flcinprog) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1); + } + if (ret_val == E1000_SUCCESS) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, + hsfsts.regval); + } else { + DEBUGOUT("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + u32 i = 0; + + DEBUGFUNC("e1000_flash_cycle_ich8lan"); + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + if (hw->mac.type >= e1000_pch_spt) + hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone) + break; + usec_delay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) + return E1000_SUCCESS; + + return -E1000_ERR_NVM; +} + +/** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + DEBUGFUNC("e1000_read_flash_dword_ich8lan"); + + if (!data) + return -E1000_ERR_NVM; + + /* Must convert word offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + DEBUGFUNC("e1000_read_flash_word_ich8lan"); + + if (!data) + return -E1000_ERR_NVM; + + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type >= e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + + if (ret_val) + return ret_val; + + *data = (u8)word; + + return E1000_SUCCESS; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + DEBUGFUNC("e1000_read_flash_data_ich8lan"); + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == E1000_SUCCESS) { + flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + DEBUGFUNC("e1000_read_flash_data_ich8lan"); + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type < e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + (u32)hsflctl.regval << 16); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == E1000_SUCCESS) { + *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + DEBUGFUNC("e1000_write_nvm_ich8lan"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = true; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + nvm->ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_spt - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u32 dword = 0; + + DEBUGFUNC("e1000_update_nvm_checksum_spt"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usec_delay(100); + + /* Write the data to the new bank. Offset in words*/ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + DEBUGOUT("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword*/ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword*/ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + msec_delay(10); + } + +out: + if (ret_val) + DEBUGOUT1("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usec_delay(100); + + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usec_delay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + DEBUGOUT("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + msec_delay(10); + } + +out: + if (ret_val) + DEBUGOUT1("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + u16 word; + u16 valid_csum_mask; + + DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); + + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = hw->nvm.ops.read(hw, word, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = hw->nvm.ops.write(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = hw->nvm.ops.update(hw); + if (ret_val) + return ret_val; + } + + return e1000_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + DEBUGFUNC("e1000_write_ich8_data"); + + if (hw->mac.type >= e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type >= e1000_pch_spt) + hsflctl.regval = + E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (ret_val == E1000_SUCCESS) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + DEBUGFUNC("e1000_write_flash_data32_ich8lan"); + + if (hw->mac.type >= e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type >= e1000_pch_spt) + hsflctl.regval = E1000_READ_FLASH_REG(hw, + ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (ret_val == E1000_SUCCESS) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + DEBUGFUNC("e1000_write_flash_byte_ich8lan"); + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan"); + + /* Must convert word offset into bytes. */ + offset <<= 1; + + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset); + usec_delay(100); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (ret_val == E1000_SUCCESS) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); + usec_delay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (ret_val == E1000_SUCCESS) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration; j++) { + do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + if (hw->mac.type >= e1000_pch_spt) + hsflctl.regval = + E1000_READ_FLASH_REG(hw, + ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + if (hw->mac.type >= e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, + flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); + if (ret_val == E1000_SUCCESS) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* repeat for some time before giving up */ + continue; + else if (!hsfsts.hsf_status.flcdone) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_ich8lan"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + DEBUGFUNC("e1000_id_led_init_pchlan"); + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the bus width is hard coded. + **/ +STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + DEBUGFUNC("e1000_get_bus_info_ich8lan"); + + ret_val = e1000_get_bus_info_pcie_generic(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 kum_cfg; + u32 ctrl, reg; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_ich8lan"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); + if (ret_val) + return ret_val; + + if (kum_cfg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + if (!hw->phy.ops.check_reset_block(hw)) { + /* Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + DEBUGOUT("Issuing a global reset to ich8lan\n"); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msec_delay(20); + + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = E1000_READ_REG(hw, E1000_FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); + } + + if (!ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + reg = E1000_READ_REG(hw, E1000_KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + E1000_WRITE_REG(hw, E1000_KABGTXD, reg); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_ich8lan"); + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000_set_pcie_no_snoop_generic(hw, snoop); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); + + /* Extended Device Control */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = E1000_READ_REG(hw, E1000_STATUS); + reg &= ~(1U << 31); + E1000_WRITE_REG(hw, E1000_STATUS, reg); + } + + /* work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if (hw->mac.type >= e1000_pch_lpt) { + reg = E1000_READ_REG(hw, E1000_PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); + + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_MEHE; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + } + + return; +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_ich8lan"); + + if (hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); + + ret_val = hw->phy.ops.write_reg(hw, + PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_ich8lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, + ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, + reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_ich8lan"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return E1000_SUCCESS; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return E1000_SUCCESS; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return E1000_SUCCESS; + + /* Issue PHY reset */ + hw->phy.ops.reset(hw); + msec_delay_irq(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); + + if (hw->mac.type != e1000_ich8lan) { + DEBUGOUT("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; + + return; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = E1000_READ_REG(hw, E1000_PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); + + /* Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, + data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data = 0; + + DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg, device_id = hw->device_id; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type >= e1000_pch_spt)) { + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, + fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); + if (ret_val) + goto release; + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link, and enable Auto Enable LPI since there will + * be no driver to enable LPI while in Sx. + */ + if ((eee_advert & I82579_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I82579_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + + /* Set Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, + I217_LPI_GPIO_CTRL, + &phy_reg); + phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, + I217_LPI_GPIO_CTRL, + phy_reg); + } + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Enable proxy to reset only on power good. */ + hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, + &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, + phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; + hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; + hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; + hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000_phy_hw_reset_generic(hw); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } + + return; +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_resume_workarounds_pchlan"); + if (hw->mac.type < e1000_pch2lan) + return E1000_SUCCESS; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) { + DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); + return ret_val; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + DEBUGOUT("Failed to setup iRST\n"); + return ret_val; + } + + /* Clear Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); + + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, + &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); +release: + if (ret_val) + DEBUGOUT1("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + 0); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_led_on_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + return E1000_SUCCESS; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_led_off_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_led_pchlan"); + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, + (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_pchlan"); + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, + (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + DEBUGFUNC("e1000_led_on_pchlan"); + + /* If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + DEBUGFUNC("e1000_led_off_pchlan"); + + /* If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u32 status; + + DEBUGFUNC("e1000_get_cfg_done_ich8lan"); + + e1000_get_cfg_done_generic(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + ret_val = E1000_SUCCESS; + } + } + + /* Clear PHY Reset Asserted bit */ + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_PHYRA) + E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); + else + DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + DEBUGOUT("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +/** + * e1000_configure_k0s_lpt - Configure K0s power state + * @hw: pointer to the HW structure + * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3. + * 0 corresponds to 128ns, each value over 0 doubles the duration. + * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4. + * 0 corresponds to 128ns, each value over 0 doubles the duration. + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns: + * -E1000_ERR_PHY (-2) in case of access error + * -E1000_ERR_PARAM (-4) in case of parameters error + **/ +s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time) +{ + s32 ret_val; + u16 kmrn_reg = 0; + + DEBUGFUNC("e1000_configure_k0s_lpt"); + + if (entry_latency > 3 || min_time > 4) + return -E1000_ERR_PARAM; + + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, + &kmrn_reg); + if (ret_val) + return ret_val; + + /* for now don't touch the latency */ + kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK); + kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT)); + + ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, + kmrn_reg); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h new file mode 100644 index 000000000..699a92c4b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_ich8lan.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_ICH8LAN_H_ +#define _E1000_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_H2ME 0x05B50 /* Host to ME */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 + +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_2_REQ (1 << 29) +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +/* enable ULP even if when phy powered down via lanphypc */ +#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400 +/* disable clear of sticky ULP on PERST */ +#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800 +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_K1_CLK_REQ 0x200 +#define HV_PM_CTRL_K1_ENABLE 0x4000 + +#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) +#define I217_PLL_CLOCK_GATE_MASK 0x07FF + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 + +/* 82579 DFT Control */ +#define I82579_DFT_CTRL PHY_REG(769, 20) +#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */ + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) +#define E1000_PCI_REVISION_ID_REG 0x08 +#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */ +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +#ifdef ULP_SUPPORT +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); +#endif /* ULP_SUPPORT */ +#endif /* _E1000_ICH8LAN_H_ */ +void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link); diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c new file mode 100644 index 000000000..d0e1fa58b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.c @@ -0,0 +1,2220 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + +STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw); +STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); + +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + UNREFERENCED_1PARAMETER(hw); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) +{ + DEBUGFUNC("e1000_null_link_info"); + UNREFERENCED_3PARAMETER(hw, s, d); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + UNREFERENCED_1PARAMETER(hw); + return false; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_update_mc"); + UNREFERENCED_3PARAMETER(hw, h, a); + return; +} + +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + UNREFERENCED_3PARAMETER(hw, a, b); + return; +} + +/** + * e1000_null_rar_set - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_rar_set"); + UNREFERENCED_3PARAMETER(hw, h, a); + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + **/ +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & E1000_STATUS_PCIX_MODE) + ? e1000_bus_type_pcix + : e1000_bus_type_pci; + + /* Bus speed */ + if (bus->type == e1000_bus_type_pci) { + bus->speed = (status & E1000_STATUS_PCI66) + ? e1000_bus_speed_66 + : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + bus->speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + bus->speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + bus->speed = e1000_bus_speed_133; + break; + default: + bus->speed = e1000_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & E1000_STATUS_BUS64) + ? e1000_bus_width_64 + : e1000_bus_width_32; + + /* Which PCI(-X) function? */ + mac->ops.set_lan_id(hw); + + return ret_val; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading PCI config space. + **/ +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u16 pci_header_type; + u32 status; + + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on older hardware or 82573 */ + if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573)) + return E1000_SUCCESS; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + u16 nvm_offset = 0; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + UNREFERENCED_1PARAMETER(hw); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE) || + E1000_REMOVED(hw->hw_addr)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + return -E1000_ERR_CONFIG; + } + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); + UNREFERENCED_1PARAMETER(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h new file mode 100644 index 000000000..bbd2a7388 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mac.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +#ifndef E1000_REMOVED +#define E1000_REMOVED(a) (0) +#endif /* E1000_REMOVED */ +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_default_fc_generic(struct e1000_hw *hw); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c new file mode 100644 index 000000000..7fb63dd72 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.c @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + s32 ret_val; + + factps = E1000_READ_REG(hw, E1000_FACTPS); + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} +/** + * e1000_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled + * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("e1000_load_firmware"); + + if (hw->mac.type < e1000_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -E1000_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = E1000_READ_REG(hw, E1000_ICR_V2); + + /* Reset ROM-FW */ + hicr = E1000_READ_REG(hw, E1000_HICR); + hicr |= E1000_HICR_FW_RESET_ENABLE; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + hicr |= E1000_HICR_FW_RESET; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + E1000_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { + icr = E1000_READ_REG(hw, E1000_ICR_V2); + if (icr & E1000_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if ((fwsm & E1000_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == + E1000_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = E1000_HI_FW_BASE_ADDRESS + + ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); + + E1000_WRITE_REG(hw, E1000_HIBBA, hibba); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + i % E1000_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h new file mode 100644 index 000000000..45dbb0069 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_manage.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 +#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HI_FW_BASE_ADDRESS 0x10000 +#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c new file mode 100644 index 000000000..6fae6767f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.c @@ -0,0 +1,762 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_mbx.h" + +/** + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + UNREFERENCED_2PARAMETER(hw, mbx_id); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG *msg, + u16 E1000_UNUSEDARG size, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + UNREFERENCED_4PARAMETER(hw, msg, size, mbx_id); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +STATIC s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +STATIC s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +STATIC u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_msg_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_msg_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_ack_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_ack_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_rst_vf"); + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + int count = 10; + + DEBUGFUNC("e1000_obtain_mbx_lock_vf"); + + do { + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1000); + } while (count-- > 0); + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("e1000_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_vf(hw, 0); + e1000_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + +STATIC s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_pf"); + + if (vflre & (1 << vf_number)) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + int count = 10; + + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + + do { + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for pf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1000); + } while (count-- > 0); + + return ret_val; + +} + +/** + * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } +} + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h new file mode 100644 index 000000000..683781e21 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_mbx.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_api.h" + +/* Define mailbox register bits */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c new file mode 100644 index 000000000..56e2db122 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.c @@ -0,0 +1,1356 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + +STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/** + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_led_default"); + UNREFERENCED_2PARAMETER(hw, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +STATIC void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +STATIC void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +STATIC void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +STATIC u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; + + usec_delay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +STATIC void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_lower_eec_clk(hw, &eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +STATIC s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("e1000_read_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = e1000_shift_in_eec_bits(hw, 16); + e1000_standby_nvm(hw); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + msec_delay(10); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + e1000_standby_nvm(hw); + + while (words_written < words) { + e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + e1000_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + e1000_shift_out_eec_bits(hw, data[words_written], 16); + + e1000_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -E1000_ERR_NVM; + goto release; + } + + e1000_standby_nvm(hw); + + words_written++; + } + + e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); + + if ((hw->mac.type >= e1000_i210) && + !e1000_get_flash_presence_i210(hw)) { + DEBUGOUT("Flashless no PBA string\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = E1000_PBANUM_LENGTH; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_num_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_read_pba_num_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (nvm_data == NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM Not Supported\n"); + return -E1000_NOT_IMPLEMENTED; + } + *pba_num = (u32)(nvm_data << 16); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= nvm_data; + + return E1000_SUCCESS; +} + + +/** + * e1000_read_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @max_pba_block_size: PBA block size limit + * @pba: pointer to output PBA structure + * + * Reads PBA from EEPROM image when eeprom_buf is not NULL. + * Reads PBA from physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct e1000_pba *pba) +{ + s32 ret_val; + u16 pba_block_size; + + if (pba == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -E1000_ERR_PARAM; + + ret_val = e1000_get_pba_block_size(hw, eeprom_buf, + eeprom_buf_size, + &pba_block_size); + if (ret_val) + return ret_val; + + if (pba_block_size > max_pba_block_size) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, pba->word[1], + pba_block_size, + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba_block_size)) { + memcpy(pba->pba_block, + &eeprom_buf[pba->word[1]], + pba_block_size * sizeof(u16)); + } else { + return -E1000_ERR_PARAM; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba: pointer to PBA structure + * + * Writes PBA to EEPROM image when eeprom_buf is not NULL. + * Writes PBA to physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct e1000_pba *pba) +{ + s32 ret_val; + + if (pba == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0]; + eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_write_nvm(hw, pba->word[1], + pba->pba_block[0], + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba->pba_block[0])) { + memcpy(&eeprom_buf[pba->word[1]], + pba->pba_block, + pba->pba_block[0] * sizeof(u16)); + } else { + return -E1000_ERR_PARAM; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_pba_block_size + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba_data_size: pointer to output variable + * + * Returns the size of the PBA block in words. Function operates on EEPROM + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical + * EEPROM device. + * + **/ +s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size) +{ + s32 ret_val; + u16 pba_word[2]; + u16 length; + + DEBUGFUNC("e1000_get_pba_block_size"); + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, &pba_word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba_word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba_word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba_word[0] == NVM_PBA_PTR_GUARD) { + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, pba_word[1] + 0, 1, + &length); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > pba_word[1]) + length = eeprom_buf[pba_word[1] + 0]; + else + return -E1000_ERR_PARAM; + } + + if (length == 0xFFFF || length == 0) + return -E1000_ERR_NVM_PBA_SECTION; + } else { + /* PBA number in legacy format, there is no PBA Block. */ + length = 0; + } + + if (pba_block_size != NULL) + *pba_block_size = length; + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images */ + /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: + e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i354: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(e1000_get_flash_presence_i210(hw))) { + e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} + + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h new file mode 100644 index 000000000..38adab528 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_nvm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +struct e1000_pba { + u16 word[2]; + u16 *pba_block; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + + +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct e1000_pba *pba); +s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct e1000_pba *pba); +s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_stop_nvm(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_get_fw_version(struct e1000_hw *hw, + struct e1000_fw_version *fw_vers); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c new file mode 100644 index 000000000..55265931b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.c @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +/* + * NOTE: the following routines using the e1000 + * naming style are provided to the shared + * code but are OS specific + */ + +void +e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return; +} + +void +e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + *value = 0; + return; +} + +void +e1000_pci_set_mwi(struct e1000_hw *hw) +{ +} + +void +e1000_pci_clear_mwi(struct e1000_hw *hw) +{ +} + + +/* + * Read the PCI Express capabilities + */ +int32_t +e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} + +/* + * Write the PCI Express capabilities + */ +int32_t +e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h new file mode 100644 index 000000000..94a49f340 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_osdep.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ +/*$FreeBSD$*/ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../e1000_logs.h" + +#define DELAY(x) rte_delay_us_sleep(x) +#define usec_delay(x) DELAY(x) +#define usec_delay_irq(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) +#define msec_delay_irq(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define UNREFERENCED_PARAMETER(_p) +#define UNREFERENCED_1PARAMETER(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) + +#define FALSE 0 +#define TRUE 1 + +#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ + +/* Mutex used in the shared code */ +#define E1000_MUTEX uintptr_t +#define E1000_MUTEX_INIT(mutex) (*(mutex) = 0) +#define E1000_MUTEX_LOCK(mutex) (*(mutex) = 1) +#define E1000_MUTEX_UNLOCK(mutex) (*(mutex) = 0) + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_PCI_REG(reg) rte_read32(reg) + +#define E1000_PCI_REG16(reg) rte_read16(reg) + +#define E1000_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) + +#define E1000_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) + +#define E1000_PCI_REG_WRITE16(reg, value) \ + rte_write16((rte_cpu_to_le_16(value)), reg) + +#define E1000_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +#define E1000_PCI_REG_FLASH_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->flash_address + (reg))) + +static inline uint32_t e1000_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(E1000_PCI_REG(addr)); +} + +static inline uint16_t e1000_read_addr16(volatile void *addr) +{ + return rte_le_to_cpu_16(E1000_PCI_REG16(addr)); +} + +/* Necessary defines */ +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_ALL_FULL_DUPLEX ( \ + ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL) + +#define M88E1543_E_PHY_ID 0x01410EA0 +#define ULP_SUPPORT + +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 + +/* Register READ/WRITE macros */ + +#define E1000_READ_REG(hw, reg) \ + e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg))) + +#define E1000_WRITE_REG(hw, reg, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value)) + +#define E1000_READ_REG_ARRAY(hw, reg, index) \ + E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_ACCESS_PANIC(x, hw, reg, value) \ + rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \ + __FILE__, __LINE__, (hw), (reg), (unsigned int)(value)) + +/* + * To be able to do IO write, we need to map IO BAR + * (bar 2/4 depending on device). + * Right now mapping multiple BARs is not supported by DPDK. + * Fortunatelly we need it only for legacy hw support. + */ + +#define E1000_WRITE_REG_IO(hw, reg, value) \ + E1000_WRITE_REG(hw, reg, value) + +/* + * Tested on I217/I218 chipset. + */ + +#define E1000_READ_FLASH_REG(hw, reg) \ + e1000_read_addr(E1000_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define E1000_READ_FLASH_REG16(hw, reg) \ + e1000_read_addr16(E1000_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define E1000_WRITE_FLASH_REG(hw, reg, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#define E1000_WRITE_FLASH_REG16(hw, reg, value) \ + E1000_PCI_REG_WRITE16(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#define STATIC static + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#endif /* _E1000_OSDEP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c new file mode 100644 index 000000000..956c06747 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.c @@ -0,0 +1,4231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#include "e1000_api.h" + +STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw); +STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page); +STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +STATIC const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +STATIC const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.set_page = e1000_null_set_page; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.read_reg_page = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.write_reg_page = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; + phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; + phy->ops.cfg_on_link_up = e1000_null_ops_generic; +} + +/** + * e1000_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_set_page"); + UNREFERENCED_2PARAMETER(hw, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, + bool E1000_UNUSEDARG active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + UNREFERENCED_2PARAMETER(hw, active); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_write_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return E1000_SUCCESS; +} + +/** + * e1000_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_read_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return E1000_SUCCESS; +} + +/** + * e1000_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_write_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u16 retry_count = 0; + + DEBUGFUNC("e1000_get_phy_id"); + + if (!phy->ops.read_reg) + return E1000_SUCCESS; + + while (retry_count < 2) { + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return E1000_SUCCESS; + + retry_count++; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usec_delay_irq(100); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usec_delay_irq(100); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_i2c"); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_i2c"); + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +STATIC s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +STATIC s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +STATIC s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = phy->ops.write_reg(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = phy->ops.write_reg(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + if (phy->type == e1000_phy_82578) { + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link = true; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID) || + (hw->phy.id == M88E1512_E_PHY_ID)) + return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~MII_CR_SPEED_1000; + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp: + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -E1000_ERR_PHY; + } + + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (phy->media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + usec_delay(1); + + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + UNREFERENCED_1PARAMETER(hw); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + case I210_I_PHY_ID: + phy_type = e1000_phy_i210; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +STATIC u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_read_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_write_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + DEBUGFUNC("e1000_enable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -E1000_ERR_PARAM; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + DEBUGOUT2("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + DEBUGOUT2("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + DEBUGFUNC("e1000_disable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -E1000_ERR_PARAM; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + DEBUGOUT2("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + DEBUGFUNC("e1000_access_phy_wakeup_reg_bm"); + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + DEBUGOUT1("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + DEBUGOUT("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + DEBUGOUT1("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +STATIC s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__e1000_read_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__e1000_write_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + !(MAX_PHY_REG_ADDRESS & reg) && + (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page + * @page: page to be accessed + **/ +STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + DEBUGFUNC("e1000_access_phy_debug_regs_hv"); + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + DEBUGOUT("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + DEBUGOUT("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_link_stall_workaround_hv"); + + if (hw->phy.type != e1000_phy_82578) + return E1000_SUCCESS; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + return E1000_SUCCESS; + + /* check if link is up and at 1Gbps */ + ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return E1000_SUCCESS; + + msec_delay(200); + + /* flush the packets in the fifo buffer */ + ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_82577"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("e1000_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) { + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) { + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h new file mode 100644 index 000000000..2c71e64c8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_phy.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +s32 e1000_copper_link_autoneg(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ +#define E1000_KMRNCTRLSTA_K0S_CTRL 0x1E /* Kumeran K0s Control */ +#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT 0 +#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT 4 +#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_MASK \ + (3 << E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT) +#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK \ + (7 << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT) +#define E1000_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */ +#define E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h new file mode 100644 index 000000000..b072c5c1d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_regs.h @@ -0,0 +1,666 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define E1000_I210_TQAVCTRL 0x3570 + +/* QAV Tx mode control register bitfields masks */ +/* QAV enable */ +#define E1000_TQAVCTRL_MODE (1 << 0) +/* Fetching arbitration type */ +#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) +/* Fetching timer enable */ +#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) +/* Launch arbitration type */ +#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) +/* Launch timer enable */ +#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) +/* SP waits for SR enable */ +#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) +/* Fetching timer correction */ +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ + (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) + +/* High credit registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define E1000_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) + +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* VT Registers */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ +#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ +#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ +#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + + + +#endif diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c new file mode 100644 index 000000000..543fa7741 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.c @@ -0,0 +1,560 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + + +#include "e1000_api.h" + + +STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw); +STATIC void e1000_release_vf(struct e1000_hw *hw); +STATIC s32 e1000_acquire_vf(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_vf(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw); +STATIC void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32); +STATIC int e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *); + +/** + * e1000_init_phy_params_vf - Inits PHY params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no PHY available to the VF. + **/ +STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_phy_params_vf"); + hw->phy.type = e1000_phy_vf; + hw->phy.ops.acquire = e1000_acquire_vf; + hw->phy.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_nvm_params_vf - Inits NVM params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no NVM available to the VF. + **/ +STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_nvm_params_vf"); + hw->nvm.type = e1000_nvm_none; + hw->nvm.ops.acquire = e1000_acquire_vf; + hw->nvm.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_vf"); + + /* Set media type */ + /* + * Virtual functions don't care what they're media type is as they + * have no direct access to the PHY, or the media. That is handled + * by the physical function driver. + */ + hw->phy.media_type = e1000_media_type_unknown; + + /* No ASF features for the VF driver */ + mac->asf_firmware_present = false; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Disable adaptive IFS mode so the generic funcs don't do anything */ + mac->adaptive_ifs = false; + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* link setup */ + mac->ops.setup_link = e1000_setup_link_vf; + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_vf"); + + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->nvm.ops.init_params = e1000_init_nvm_params_vf; + hw->phy.ops.init_params = e1000_init_phy_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_acquire_vf - Acquire rights to access PHY or NVM. + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +STATIC s32 e1000_acquire_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return -E1000_ERR_PHY; +} + +/** + * e1000_release_vf - Release PHY or NVM + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +STATIC void e1000_release_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_setup_link_vf - Sets up link. + * @hw: pointer to the HW structure + * + * Virtual functions cannot change link. + **/ +STATIC s32 e1000_setup_link_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_setup_link_vf"); + UNREFERENCED_1PARAMETER(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pcie_vf - Gets the bus info. + * @hw: pointer to the HW structure + * + * Virtual functions are not really on their own bus. + **/ +STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + DEBUGFUNC("e1000_get_bus_info_pcie_vf"); + + /* Do not set type PCI-E because we don't want disable master to run */ + bus->type = e1000_bus_type_reserved; + bus->speed = e1000_bus_speed_2500; + + return 0; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + DEBUGFUNC("e1000_get_link_up_info_vf"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + s32 ret_val = -E1000_ERR_MAC_INIT; + u32 ctrl, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("e1000_reset_hw_vf"); + + DEBUGOUT("Issuing a function level reset to MAC\n"); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_hw_vf"); + + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +STATIC int e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, + u32 E1000_UNUSEDARG index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + UNREFERENCED_1PARAMETER(index); + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +STATIC u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +STATIC void e1000_write_msg_read_ack(struct e1000_hw *hw, + u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 retmsg[E1000_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size, 0); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, E1000_VFMAILBOX_SIZE, 0); +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 i; + + DEBUGFUNC("e1000_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + msgbuf[0] = E1000_VF_SET_MULTICAST; + + if (mc_addr_count > 30) { + msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW; + mc_addr_count = 30; + } + + msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + DEBUGOUT1("Hash value = 0x%03X\n", hash_value); + hash_list[i] = hash_value & 0x0FFF; + mc_addr_list += ETH_ADDR_LEN; + } + + e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_vfta_set_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + if (set) + msgbuf[0] |= E1000_VF_SET_VLAN_ADD; + + e1000_write_msg_read_ack(hw, msgbuf, 2); +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + e1000_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc + * @hw: pointer to the HW structure + * @uni: boolean indicating unicast promisc status + * @multi: boolean indicating multicast promisc status + **/ +s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf = E1000_VF_SET_PROMISC; + s32 ret_val; + + switch (type) { + case e1000_promisc_multicast: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + break; + case e1000_promisc_enabled: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + case e1000_promisc_unicast: + msgbuf |= E1000_VF_SET_PROMISC_UNICAST; + case e1000_promisc_disabled: + break; + default: + return -E1000_ERR_MAC_INIT; + } + + ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0); + + if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK)) + ret_val = -E1000_ERR_MAC_INIT; + + return ret_val; +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + DEBUGFUNC("e1000_check_for_link_vf"); + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* at this point we know the PF is talking to us, check and see if + * we are still accepting timeout or if we had a timeout failure. + * if we failed then we will need to reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h new file mode 100644 index 000000000..c05d557f0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/e1000_vf.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001 - 2015 Intel Corporation + */ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 + +#define E1000_VF_INIT_TIMEOUT 200 /* Num of retries to clear RSTI */ + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Interrupt Defines */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + ((_n) << 2)) +#define E1000_EICS 0x01520 /* Ext. Intr Cause Set -W0 */ +#define E1000_EIMS 0x01524 /* Ext. Intr Mask Set/Read -RW */ +#define E1000_EIMC 0x01528 /* Ext. Intr Mask Clear -WO */ +#define E1000_EIAC 0x0152C /* Ext. Intr Auto Clear -RW */ +#define E1000_EIAM 0x01530 /* Ext. Intr Ack Auto Clear Mask -RW */ +#define E1000_IVAR0 0x01700 /* Intr Vector Alloc (array) -RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes -RW */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + /* RSS type, Packet type */ + u16 pkt_info; + /* Split Header, header buffer len */ + u16 hdr_info; + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "e1000_mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type); +#endif /* _E1000_VF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/base/meson.build b/src/spdk/dpdk/drivers/net/e1000/base/meson.build new file mode 100644 index 000000000..5e1716def --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/base/meson.build @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = [ + 'e1000_80003es2lan.c', + 'e1000_82540.c', + 'e1000_82541.c', + 'e1000_82542.c', + 'e1000_82543.c', + 'e1000_82571.c', + 'e1000_82575.c', + 'e1000_api.c', + 'e1000_i210.c', + 'e1000_ich8lan.c', + 'e1000_mac.c', + 'e1000_manage.c', + 'e1000_mbx.c', + 'e1000_nvm.c', + 'e1000_osdep.c', + 'e1000_phy.c', + 'e1000_vf.c' +] + +error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter', + '-Wno-unused-variable', '-Wno-misleading-indentation', + '-Wno-implicit-fallthrough'] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('e1000_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h b/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h new file mode 100644 index 000000000..1e41ae9de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h @@ -0,0 +1,530 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _E1000_ETHDEV_H_ +#define _E1000_ETHDEV_H_ + +#include + +#include +#include +#include + +#define E1000_INTEL_VENDOR_ID 0x8086 + +/* need update link, bit flag */ +#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) +#define E1000_FLAG_MAILBOX (uint32_t)(1 << 1) + +/* + * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD + * driver. + */ +#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */ +#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */ +#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */ +#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define E1000_RXD_ERR_CKSUM_BIT 29 +#define E1000_RXD_ERR_CKSUM_MSK 3 +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ +#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */ +#define IGB_VFTA_SIZE 128 + +#define IGB_HKEY_MAX_INDEX 10 +#define IGB_MAX_RX_QUEUE_NUM 8 +#define IGB_MAX_RX_QUEUE_NUM_82576 16 + +#define E1000_I219_MAX_RX_QUEUE_NUM 2 +#define E1000_I219_MAX_TX_QUEUE_NUM 2 + +#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ +#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */ +#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */ +#define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */ + +#define E1000_ETQF_ETHERTYPE 0x0000FFFF +#define E1000_ETQF_QUEUE 0x00070000 +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_MAX_ETQF_FILTERS 8 + +#define E1000_IMIR_DSTPORT 0x0000FFFF +#define E1000_IMIR_PRIORITY 0xE0000000 +#define E1000_MAX_TTQF_FILTERS 8 +#define E1000_2TUPLE_MAX_PRI 7 + +#define E1000_MAX_FLEX_FILTERS 8 +#define E1000_MAX_FHFT 4 +#define E1000_MAX_FHFT_EXT 4 +#define E1000_FHFT_SIZE_IN_DWD 64 +#define E1000_MAX_FLEX_FILTER_PRI 7 +#define E1000_MAX_FLEX_FILTER_LEN 128 +#define E1000_MAX_FLEX_FILTER_DWDS \ + (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t)) +#define E1000_FLEX_FILTERS_MASK_SIZE \ + (E1000_MAX_FLEX_FILTER_DWDS / 2) +#define E1000_FHFT_QUEUEING_LEN 0x0000007F +#define E1000_FHFT_QUEUEING_QUEUE 0x00000700 +#define E1000_FHFT_QUEUEING_PRIO 0x00070000 +#define E1000_FHFT_QUEUEING_OFFSET 0xFC +#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8 +#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16 +#define E1000_WUFC_FLEX_HQ 0x00004000 + +#define E1000_SPQF_SRCPORT 0x0000FFFF + +#define E1000_MAX_FTQF_FILTERS 8 +#define E1000_FTQF_PROTOCOL_MASK 0x000000FF +#define E1000_FTQF_5TUPLE_MASK_SHIFT 28 +#define E1000_FTQF_QUEUE_MASK 0x03ff0000 +#define E1000_FTQF_QUEUE_SHIFT 16 +#define E1000_FTQF_QUEUE_ENABLE 0x00000100 + +#define IGB_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +/* + * The overhead from MTU to max frame size. + * Considering VLAN so a tag needs to be counted. + */ +#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + VLAN_TAG_SIZE) + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define E1000_MIN_RING_DESC 32 +#define E1000_MAX_RING_DESC 4096 + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define E1000_ALIGN 128 + +#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc)) +#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc)) + +#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc)) +#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc)) + +#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define IGB_TX_MAX_SEG UINT8_MAX +#define IGB_TX_MAX_MTU_SEG UINT8_MAX +#define EM_TX_MAX_SEG UINT8_MAX +#define EM_TX_MAX_MTU_SEG UINT8_MAX + +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != e1000_82580 && (type) != e1000_i350 &&\ + (type) != e1000_82576 && (type) != e1000_i210 &&\ + (type) != e1000_i211)\ + return -ENOTSUP;\ +} while (0) + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != e1000_82580 && (type) != e1000_i350 &&\ + (type) != e1000_i210 && (type) != e1000_i211)\ + return -ENOTSUP; \ +} while (0) + +/* structure for interrupt relative data */ +struct e1000_interrupt { + uint32_t flags; + uint32_t mask; +}; + +/* local vfta copy */ +struct e1000_vfta { + uint32_t vfta[IGB_VFTA_SIZE]; +}; + +/* + * VF data which used by PF host only + */ +#define E1000_MAX_VF_MC_ENTRIES 30 +struct e1000_vf_info { + uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; + uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES]; + uint16_t num_vf_mc_hashes; + uint16_t default_vf_vlan_id; + uint16_t vlans_enabled; + uint16_t pf_qos; + uint16_t vlan_count; + uint16_t tx_rate; +}; + +TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter); + +struct e1000_flex_filter_info { + uint16_t len; + uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */ + /* if mask bit is 1b, do not compare corresponding byte in dwords. */ + uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE]; + uint8_t priority; +}; + +/* Flex filter structure */ +struct e1000_flex_filter { + TAILQ_ENTRY(e1000_flex_filter) entries; + uint16_t index; /* index of flex filter */ + struct e1000_flex_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter); +TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter); + +struct e1000_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + uint8_t proto; /* l4 protocol. */ + /* the packet matched above 5tuple and contain any set bit will hit this filter. */ + uint8_t tcp_flags; + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +struct e1000_2tuple_filter_info { + uint16_t dst_port; + uint8_t proto; /* l4 protocol. */ + /* the packet matched above 2tuple and contain any set bit will hit this filter. */ + uint8_t tcp_flags; + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct e1000_5tuple_filter { + TAILQ_ENTRY(e1000_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct e1000_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +/* 2tuple filter structure */ +struct e1000_2tuple_filter { + TAILQ_ENTRY(e1000_2tuple_filter) entries; + uint16_t index; /* the index of 2tuple filter */ + struct e1000_2tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +/* ethertype filter structure */ +struct igb_ethertype_filter { + uint16_t ethertype; + uint32_t etqf; +}; + +struct igb_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ + /* Queues indices to use. */ + uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576]; +}; + +/* + * Structure to store filters'info. + */ +struct e1000_filter_info { + uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters*/ + struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS]; + uint8_t flex_mask; /* Bit mask for every used flex filter */ + struct e1000_flex_filter_list flex_list; + /* Bit mask for every used 5tuple filter */ + uint8_t fivetuple_mask; + struct e1000_5tuple_filter_list fivetuple_list; + /* Bit mask for every used 2tuple filter */ + uint8_t twotuple_mask; + struct e1000_2tuple_filter_list twotuple_list; + /* store the SYN filter info */ + uint32_t syn_info; + /* store the rss filter info */ + struct igb_rte_flow_rss_conf rss_info; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct e1000_adapter { + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_interrupt intr; + struct e1000_vfta shadow_vfta; + struct e1000_vf_info *vfdata; + struct e1000_filter_info filter; + bool stopped; + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; +}; + +#define E1000_DEV_PRIVATE(adapter) \ + ((struct e1000_adapter *)adapter) + +#define E1000_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct e1000_adapter *)adapter)->hw) + +#define E1000_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct e1000_adapter *)adapter)->stats) + +#define E1000_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct e1000_adapter *)adapter)->intr) + +#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct e1000_adapter *)adapter)->shadow_vfta) + +#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \ + (&((struct e1000_adapter *)adapter)->vfdata) + +#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ + (&((struct e1000_adapter *)adapter)->filter) + +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + +/* ntuple filter list structure */ +struct igb_ntuple_filter_ele { + TAILQ_ENTRY(igb_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; + +/* ethertype filter list structure */ +struct igb_ethertype_filter_ele { + TAILQ_ENTRY(igb_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; + +/* syn filter list structure */ +struct igb_eth_syn_filter_ele { + TAILQ_ENTRY(igb_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; + +/* flex filter list structure */ +struct igb_flex_filter_ele { + TAILQ_ENTRY(igb_flex_filter_ele) entries; + struct rte_eth_flex_filter filter_info; +}; + +/* rss filter list structure */ +struct igb_rss_conf_ele { + TAILQ_ENTRY(igb_rss_conf_ele) entries; + struct igb_rte_flow_rss_conf filter_info; +}; + +/* igb_flow memory list structure */ +struct igb_flow_mem { + TAILQ_ENTRY(igb_flow_mem) entries; + struct rte_flow *flow; + struct rte_eth_dev *dev; +}; + +TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele); +extern struct igb_ntuple_filter_list igb_filter_ntuple_list; +TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele); +extern struct igb_ethertype_filter_list igb_filter_ethertype_list; +TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele); +extern struct igb_syn_filter_list igb_filter_syn_list; +TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele); +extern struct igb_flex_filter_list igb_filter_flex_list; +TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele); +extern struct igb_rss_filter_list igb_filter_rss_list; +TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem); +extern struct igb_flow_mem_list igb_flow_list; + +extern const struct rte_flow_ops igb_flow_ops; + +/* + * RX/TX IGB function prototypes + */ +void eth_igb_tx_queue_release(void *txq); +void eth_igb_rx_queue_release(void *rxq); +void igb_dev_clear_queues(struct rte_eth_dev *dev); +void igb_dev_free_queues(struct rte_eth_dev *dev); + +uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + +int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset); +int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset); + +uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + +int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt); + +int eth_igb_rx_init(struct rte_eth_dev *dev); + +void eth_igb_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_scattered_pkts(void *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +int eth_igb_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int eth_igbvf_rx_init(struct rte_eth_dev *dev); + +void eth_igbvf_tx_init(struct rte_eth_dev *dev); + +/* + * misc function prototypes + */ +void igb_pf_host_init(struct rte_eth_dev *eth_dev); + +void igb_pf_mbx_process(struct rte_eth_dev *eth_dev); + +int igb_pf_host_configure(struct rte_eth_dev *eth_dev); + +void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +uint32_t em_get_max_pktlen(struct rte_eth_dev *dev); + +/* + * RX/TX EM function prototypes + */ +void eth_em_tx_queue_release(void *txq); +void eth_em_rx_queue_release(void *rxq); + +void em_dev_clear_queues(struct rte_eth_dev *dev); +void em_dev_free_queues(struct rte_eth_dev *dev); + +uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + +int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset); +int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset); + +uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + +int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_em_rx_init(struct rte_eth_dev *dev); + +void eth_em_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +void igb_pf_host_uninit(struct rte_eth_dev *dev); + +void igb_filterlist_flush(struct rte_eth_dev *dev); +int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter); +int igb_delete_2tuple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter); +void igb_remove_flex_filter(struct rte_eth_dev *dev, + struct e1000_flex_filter *filter); +int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, + uint8_t idx); +int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, bool add); +int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +int eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add); +int igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); +int igb_config_rss_filter(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *conf, + bool add); +void em_flush_desc_rings(struct rte_eth_dev *dev); + +#endif /* _E1000_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c new file mode 100644 index 000000000..231f5c03e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "e1000_logs.h" + +/* declared as extern in e1000_logs.h */ +int e1000_logtype_init; +int e1000_logtype_driver; + +#ifdef RTE_LIBRTE_E1000_DEBUG_RX +int e1000_logtype_rx; +#endif +#ifdef RTE_LIBRTE_E1000_DEBUG_TX +int e1000_logtype_tx; +#endif +#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE +int e1000_logtype_tx_free; +#endif + +/* avoids double registering of logs if EM and IGB drivers are in use */ +static int e1000_log_initialized; + +void +e1000_igb_init_log(void) +{ + if (e1000_log_initialized) + return; + + e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); + if (e1000_logtype_init >= 0) + rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); + e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); + if (e1000_logtype_driver >= 0) + rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_E1000_DEBUG_RX + e1000_logtype_rx = rte_log_register("pmd.net.e1000.rx"); + if (e1000_logtype_rx >= 0) + rte_log_set_level(e1000_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX + e1000_logtype_tx = rte_log_register("pmd.net.e1000.tx"); + if (e1000_logtype_tx >= 0) + rte_log_set_level(e1000_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE + e1000_logtype_tx_free = rte_log_register("pmd.net.e1000.tx_free"); + if (e1000_logtype_tx_free >= 0) + rte_log_set_level(e1000_logtype_tx_free, RTE_LOG_DEBUG); +#endif + + e1000_log_initialized = 1; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h new file mode 100644 index 000000000..2612134f3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/e1000_logs.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _E1000_LOGS_H_ +#define _E1000_LOGS_H_ + +#include + +extern int e1000_logtype_init; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, e1000_logtype_init, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_E1000_DEBUG_RX +extern int e1000_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, e1000_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX +extern int e1000_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, e1000_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE +extern int e1000_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, e1000_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int e1000_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, e1000_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + + +/* log init function shared by e1000 and igb drivers */ +void e1000_igb_init_log(void); + +#endif /* _E1000_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c b/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c new file mode 100644 index 000000000..902b1cdca --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/em_ethdev.c @@ -0,0 +1,1851 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" + +#define EM_EIAC 0x000DC + +#define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) + + +static int eth_em_configure(struct rte_eth_dev *dev); +static int eth_em_start(struct rte_eth_dev *dev); +static void eth_em_stop(struct rte_eth_dev *dev); +static void eth_em_close(struct rte_eth_dev *dev); +static int eth_em_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_em_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_em_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_em_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int eth_em_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_em_stats_reset(struct rte_eth_dev *dev); +static int eth_em_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_em_interrupt_setup(struct rte_eth_dev *dev); +static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_em_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); +static void eth_em_interrupt_handler(void *param); + +static int em_hw_init(struct e1000_hw *hw); +static int em_hardware_init(struct e1000_hw *hw); +static void em_hw_control_acquire(struct e1000_hw *hw); +static void em_hw_control_release(struct e1000_hw *hw); +static void em_init_manageability(struct e1000_hw *hw); +static void em_release_manageability(struct e1000_hw *hw); + +static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); + +/* +static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +*/ + +static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static void em_lsc_intr_disable(struct e1000_hw *hw); +static void em_rxq_intr_enable(struct e1000_hw *hw); +static void em_rxq_intr_disable(struct e1000_hw *hw); + +static int eth_em_led_on(struct rte_eth_dev *dev); +static int eth_em_led_off(struct rte_eth_dev *dev); + +static int em_get_rx_buffer_size(struct e1000_hw *hw); +static int eth_em_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); + +static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +#define EM_FC_PAUSE_TIME 0x0680 +#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +static enum e1000_fc_mode em_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_em_map[] = { + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops eth_em_ops = { + .dev_configure = eth_em_configure, + .dev_start = eth_em_start, + .dev_stop = eth_em_stop, + .dev_close = eth_em_close, + .promiscuous_enable = eth_em_promiscuous_enable, + .promiscuous_disable = eth_em_promiscuous_disable, + .allmulticast_enable = eth_em_allmulticast_enable, + .allmulticast_disable = eth_em_allmulticast_disable, + .link_update = eth_em_link_update, + .stats_get = eth_em_stats_get, + .stats_reset = eth_em_stats_reset, + .dev_infos_get = eth_em_infos_get, + .mtu_set = eth_em_mtu_set, + .vlan_filter_set = eth_em_vlan_filter_set, + .vlan_offload_set = eth_em_vlan_offload_set, + .rx_queue_setup = eth_em_rx_queue_setup, + .rx_queue_release = eth_em_rx_queue_release, + .rx_queue_count = eth_em_rx_queue_count, + .rx_descriptor_done = eth_em_rx_descriptor_done, + .rx_descriptor_status = eth_em_rx_descriptor_status, + .tx_descriptor_status = eth_em_tx_descriptor_status, + .tx_queue_setup = eth_em_tx_queue_setup, + .tx_queue_release = eth_em_tx_queue_release, + .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, + .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, + .dev_led_on = eth_em_led_on, + .dev_led_off = eth_em_led_off, + .flow_ctrl_get = eth_em_flow_ctrl_get, + .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, + .mac_addr_add = eth_em_rar_set, + .mac_addr_remove = eth_em_rar_clear, + .set_mc_addr_list = eth_em_set_mc_addr_list, + .rxq_info_get = em_rxq_info_get, + .txq_info_get = em_txq_info_get, +}; + + +/** + * eth_em_dev_is_ich8 - Check for ICH8 device + * @hw: pointer to the HW structure + * + * return TRUE for ICH8, otherwise FALSE + **/ +static bool +eth_em_dev_is_ich8(struct e1000_hw *hw) +{ + DEBUGFUNC("eth_em_dev_is_ich8"); + + switch (hw->device_id) { + case E1000_DEV_ID_PCH2_LV_LM: + case E1000_DEV_ID_PCH_LPT_I217_LM: + case E1000_DEV_ID_PCH_LPT_I217_V: + case E1000_DEV_ID_PCH_LPTLP_I218_LM: + case E1000_DEV_ID_PCH_LPTLP_I218_V: + case E1000_DEV_ID_PCH_I218_V2: + case E1000_DEV_ID_PCH_I218_LM2: + case E1000_DEV_ID_PCH_I218_V3: + case E1000_DEV_ID_PCH_I218_LM3: + case E1000_DEV_ID_PCH_SPT_I219_LM: + case E1000_DEV_ID_PCH_SPT_I219_V: + case E1000_DEV_ID_PCH_SPT_I219_LM2: + case E1000_DEV_ID_PCH_SPT_I219_V2: + case E1000_DEV_ID_PCH_LBG_I219_LM3: + case E1000_DEV_ID_PCH_SPT_I219_LM4: + case E1000_DEV_ID_PCH_SPT_I219_V4: + case E1000_DEV_ID_PCH_SPT_I219_LM5: + case E1000_DEV_ID_PCH_SPT_I219_V5: + case E1000_DEV_ID_PCH_CNP_I219_LM6: + case E1000_DEV_ID_PCH_CNP_I219_V6: + case E1000_DEV_ID_PCH_CNP_I219_LM7: + case E1000_DEV_ID_PCH_CNP_I219_V7: + return 1; + default: + return 0; + } +} + +static int +eth_em_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + + eth_dev->dev_ops = ð_em_ops; + eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; + eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; + eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = + (eth_rx_burst_t)ð_em_recv_scattered_pkts; + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->device_id = pci_dev->id.device_id; + adapter->stopped = 0; + + /* For ICH8 support we'll need to map the flash memory BAR */ + if (eth_em_dev_is_ich8(hw)) + hw->flash_address = (void *)pci_dev->mem_resource[1].addr; + + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || + em_hw_init(hw) != 0) { + PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " + "failed to init HW", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + return -ENODEV; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * + hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); + return -ENOMEM; + } + + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + eth_dev->data->mac_addrs); + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(intr_handle, + eth_em_interrupt_handler, eth_dev); + + return 0; +} + +static int +eth_em_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + eth_em_close(eth_dev); + + return 0; +} + +static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct e1000_adapter), eth_em_dev_init); +} + +static int eth_em_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit); +} + +static struct rte_pci_driver rte_em_pmd = { + .id_table = pci_id_em_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_em_pci_probe, + .remove = eth_em_pci_remove, +}; + +static int +em_hw_init(struct e1000_hw *hw) +{ + int diag; + + diag = hw->mac.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "MAC Initialization Error"); + return diag; + } + diag = hw->nvm.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "NVM Initialization Error"); + return diag; + } + diag = hw->phy.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "PHY Initialization Error"); + return diag; + } + (void) e1000_get_bus_info(hw); + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + e1000_init_script_state_82541(hw, TRUE); + e1000_set_tbi_compatibility_82543(hw, TRUE); + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + e1000_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + diag = e1000_validate_nvm_checksum(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + goto error; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + diag = e1000_read_mac_addr(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + goto error; + } + + /* Now initialize the hardware */ + diag = em_hardware_init(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + goto error; + } + + hw->mac.get_link_status = 1; + + /* Indicate SOL/IDER usage */ + diag = e1000_check_reset_block(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to " + "SOL/IDER session"); + } + return 0; + +error: + em_hw_control_release(hw); + return diag; +} + +static int +eth_em_configure(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + + PMD_INIT_FUNC_TRACE(); + + return 0; +} + +static void +em_set_pba(struct e1000_hw *hw) +{ + uint32_t pba; + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + * Devices before the 82547 had a Packet Buffer of 64K. + * After the 82547 the buffer was reduced to 40K. + */ + switch (hw->mac.type) { + case e1000_82547: + case e1000_82547_rev_2: + /* 82547: Total Packet Buffer is 40K */ + pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + break; + case e1000_82573: /* 82573: Total Packet Buffer is 32K */ + pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ + break; + case e1000_82574: + case e1000_82583: + pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ + break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; + case e1000_ich9lan: + case e1000_ich10lan: + pba = E1000_PBA_10K; + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + pba = E1000_PBA_26K; + break; + default: + pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ + } + + E1000_WRITE_REG(hw, E1000_PBA, pba); +} + +static void +eth_em_rxtx_control(struct rte_eth_dev *dev, + bool enable) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tctl, rctl; + + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (enable) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); +} + +static int +eth_em_start(struct rte_eth_dev *dev) +{ + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int ret, mask; + uint32_t intr_vector = 0; + uint32_t *speeds; + int num_speeds; + bool autoneg; + + PMD_INIT_FUNC_TRACE(); + + eth_em_stop(dev); + + e1000_power_up_phy(hw); + + /* Set default PBA value */ + em_set_pba(hw); + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* + * With the 82571 adapter, RAR[0] may be overwritten + * when the other port is reset, we make a duplicate + * in RAR[14] for that eventuality, this assures + * the interface continues to function. + */ + if (hw->mac.type == e1000_82571) { + e1000_set_laa_state_82571(hw, TRUE); + e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); + } + + /* Initialize the hardware */ + if (em_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return -EIO; + } + + E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); + + /* Configure for OS presence */ + em_init_manageability(hw); + + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle)) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + + /* enable rx interrupt */ + em_rxq_intr_enable(hw); + } + + eth_em_tx_init(dev); + + ret = eth_em_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + em_dev_clear_queues(dev); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + ret = eth_em_vlan_offload_set(dev, mask); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to update vlan offload"); + em_dev_clear_queues(dev); + return ret; + } + + /* Set Interrupt Throttling Rate to maximum allowed value. */ + E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); + + /* Setup link speed and duplex */ + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + hw->mac.autoneg = 1; + } else { + num_speeds = 0; + autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; + goto error_invalid_config; + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) + goto error_invalid_config; + + /* Set/reset the mac.autoneg based on the link speed, + * fixed or not + */ + if (!autoneg) { + hw->mac.autoneg = 0; + hw->mac.forced_speed_duplex = + hw->phy.autoneg_advertised; + } else { + hw->mac.autoneg = 1; + } + } + + e1000_setup_link(hw); + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) { + ret = eth_em_interrupt_setup(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to setup interrupts"); + em_dev_clear_queues(dev); + return ret; + } + } + } else { + rte_intr_callback_unregister(intr_handle, + eth_em_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplexn"); + } + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0) + eth_em_rxq_interrupt_setup(dev); + + rte_intr_enable(intr_handle); + + adapter->stopped = 0; + + eth_em_rxtx_control(dev, true); + eth_em_link_update(dev, 0); + + PMD_INIT_LOG(DEBUG, "<<"); + + return 0; + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); + em_dev_clear_queues(dev); + return -EINVAL; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_em_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + eth_em_rxtx_control(dev, false); + em_rxq_intr_disable(hw); + em_lsc_intr_disable(hw); + + e1000_reset_hw(hw); + + /* Flush desc rings for i219 */ + if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) + em_flush_desc_rings(dev); + + if (hw->mac.type >= e1000_82544) + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Power down the phy. Needed to make the link go down */ + e1000_power_down_phy(hw); + + em_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + eth_em_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +eth_em_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + eth_em_stop(dev); + adapter->stopped = 1; + em_dev_free_queues(dev); + e1000_phy_hw_reset(hw); + em_release_manageability(hw); + em_hw_control_release(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + eth_em_interrupt_handler, dev); +} + +static int +em_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + + rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +em_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Issue a global reset */ + e1000_reset_hw(hw); + + /* Let the firmware know the OS is in control */ + em_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitrary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = em_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - + PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); + hw->fc.low_water = hw->fc.high_water - 1500; + + if (hw->mac.type == e1000_80003es2lan) + hw->fc.pause_time = UINT16_MAX; + else + hw->fc.pause_time = EM_FC_PAUSE_TIME; + + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if (em_fc_setting <= e1000_fc_full) + hw->fc.requested_mode = em_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Workaround: no TX flow ctrl for PCH */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + + /* Override - settings for PCH2LAN, ya its magic :) */ + if (hw->mac.type == e1000_pch2lan) { + hw->fc.high_water = 0x5C20; + hw->fc.low_water = 0x5048; + hw->fc.pause_time = 0x0650; + hw->fc.refresh_time = 0x0400; + } else if (hw->mac.type == e1000_pch_lpt || + hw->mac.type == e1000_pch_spt || + hw->mac.type == e1000_pch_cnp) { + hw->fc.requested_mode = e1000_fc_full; + } + + diag = e1000_init_hw(hw); + if (diag < 0) + return diag; + e1000_check_for_link(hw); + return 0; +} + +/* This function is based on em_update_stats_counters() in e1000/if_em.c */ +static int +eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int pause_frames; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + + /* + * For watchdog management we need to know if we have been + * paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* + * For the 64-bit byte counters the low dword must be read first. + * Both registers clear on the read of the high dword. + */ + + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tor += E1000_READ_REG(hw, E1000_TORH); + stats->tot += E1000_READ_REG(hw, E1000_TOTH); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + if (hw->mac.type >= e1000_82571) { + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + } + + if (hw->mac.type >= e1000_82543) { + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + } + + if (rte_stats == NULL) + return -EINVAL; + + /* Rx Errors */ + rte_stats->imissed = stats->mpc; + rte_stats->ierrors = stats->crcerrs + + stats->rlec + stats->ruc + stats->roc + + stats->rxerrc + stats->algnerrc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; + return 0; +} + +static int +eth_em_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_em_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; +} + +static int +eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + em_rxq_intr_enable(hw); + rte_intr_ack(intr_handle); + + return 0; +} + +static int +eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + em_rxq_intr_disable(hw); + + return 0; +} + +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + case e1000_ich9lan: + case e1000_ich10lan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pch_cnp: + case e1000_82574: + case e1000_80003es2lan: /* 9K Jumbo Frame size */ + case e1000_82583: + return 0x2412; + case e1000_pchlan: + return 0x1000; + /* Adapters that do not support jumbo frames */ + case e1000_ich8lan: + return RTE_ETHER_MAX_LEN; + default: + return MAX_JUMBO_FRAME_SIZE; + } +} + +static int +eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + + /* + * Starting with 631xESB hw supports 2 TX/RX queues per port. + * Unfortunatelly, all these nics have just one TX context. + * So we have few choises for TX: + * - Use just one TX queue. + * - Allow cksum offload only for one TX queue. + * - Don't allow TX cksum offload at all. + * For now, option #1 was chosen. + * To use second RX queue we have to use extended RX descriptor + * (Multiple Receive Queues are mutually exclusive with UDP + * fragmentation and are not supported when a legacy receive + * descriptor format is used). + * Which means separate RX routinies - as legacy nics (82540, 82545) + * don't support extended RXD. + * To avoid it we support just one RX queue for now (no RSS). + */ + + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_RXD_ALIGN, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_TXD_ALIGN, + .nb_seg_max = EM_TX_MAX_SEG, + .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; + + return 0; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + int link_up, count; + + link_up = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_up = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_up = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_up = hw->mac.serdes_has_link; + break; + + default: + break; + } + if (link_up || wait_to_complete == 0) + break; + rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + + /* Now we check if a transition has happened */ + if (link_up) { + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; + link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + } else { + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_FIXED; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +/* + * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. For AMT version type f/w + * this means that the network i/f is open. + */ +static void +em_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware know the driver has taken over */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); + + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT versions of the + * f/w this means that the network i/f is closed. + */ +static void +em_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware taken over control of h/w */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +em_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +/* + * Give control back to hardware management + * controller if there is one. + */ +static void +em_release_manageability(struct e1000_hw *hw) +{ + uint32_t manc; + + if (e1000_enable_mng_pass_thru(hw)) { + manc = E1000_READ_REG(hw, E1000_MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static int +eth_em_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_em_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_em_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_em_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +em_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + +static void +em_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* restore vfta from local copy */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +em_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + +} + +static void +em_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static int +eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; + if(mask & ETH_VLAN_STRIP_MASK){ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + em_vlan_hw_strip_enable(dev); + else + em_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + em_vlan_hw_filter_enable(dev); + else + em_vlan_hw_filter_disable(dev); + } + + return 0; +} + +/* + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_setup(struct rte_eth_dev *dev) +{ + uint32_t regval; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* clear interrupt */ + E1000_READ_REG(hw, E1000_ICR); + regval = E1000_READ_REG(hw, E1000_IMS); + E1000_WRITE_REG(hw, E1000_IMS, + regval | E1000_ICR_LSC | E1000_ICR_OTHER); + return 0; +} + +/* + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_READ_REG(hw, E1000_ICR); + em_rxq_intr_enable(hw); + return 0; +} + +/* + * It enable receive packet interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_rxq_intr_enable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); + E1000_WRITE_FLUSH(hw); +} + +/* + * It disabled lsc interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_lsc_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); + E1000_WRITE_FLUSH(hw); +} + +/* + * It disabled receive packet interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_rxq_intr_disable(struct e1000_hw *hw) +{ + E1000_READ_REG(hw, E1000_ICR); + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); + E1000_WRITE_FLUSH(hw); +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_eth_link link; + int ret; + + if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) + return -1; + + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + rte_intr_ack(intr_handle); + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_em_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + rte_eth_linkstatus_get(dev, &link); + + if (link.link_status) { + PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); + } + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_em_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_em_interrupt_get_status(dev); + eth_em_interrupt_action(dev, dev->intr_handle); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static int +eth_em_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_em_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + uint32_t ctrl; + int tx_pause; + int rx_pause; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water; + fc_conf->low_water = hw->fc.low_water; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = hw->mac.autoneg; + + /* + * Return rx_pause and tx_pause status according to actual setting of + * the TFCE and RFCE bits in the CTRL register. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + if (ctrl & E1000_CTRL_TFCE) + tx_pause = 1; + else + tx_pause = 0; + + if (ctrl & E1000_CTRL_RFCE) + rx_pause = 1; + else + rx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != hw->mac.autoneg) + return -ENOTSUP; + rx_buf_size = em_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= E1000_RCTL_PMCF; + else + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); + return -EIO; +} + +static int +eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + return e1000_rar_set(hw, mac_addr->addr_bytes, index); +} + +static void +eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[RTE_ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} + +static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + +static int +eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev_info dev_info; + struct e1000_hw *hw; + uint32_t frame_size; + uint32_t rctl; + int ret; + + ret = eth_em_infos_get(dev, &dev_info); + if (ret != 0) + return ret; + + frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + return 0; +} + +static int +eth_em_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); + return 0; +} + +RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); +RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(igb_init_log) +{ + e1000_igb_init_log(); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c b/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c new file mode 100644 index 000000000..49c53712a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/em_rxtx.c @@ -0,0 +1,2140 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" +#include "base/e1000_osdep.h" + +#define E1000_TXD_VLAN_SHIFT 16 + +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +#define E1000_TX_OFFLOAD_MASK ( \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_VLAN_PKT) + +#define E1000_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK) + +/* PCI offset for querying configuration status register */ +#define PCI_CFG_STATUS_REG 0x06 +#define FLUSH_DESC_REQUIRED 0x100 + + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct em_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct em_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct em_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct em_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ +}; + +/** + * Hardware context number + */ +enum { + EM_CTX_0 = 0, /**< CTX0 */ + EM_CTX_NUM = 1, /**< CTX NUM */ +}; + +/** Offload features */ +union em_vlan_macip { + uint32_t data; + struct { + uint16_t l3_len:9; /**< L3 (IP) Header Length. */ + uint16_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint16_t vlan_tci; + /**< VLAN Tag Control Identifier (CPU order). */ + } f; +}; + +/* + * Compare mask for vlan_macip_len.data, + * should be in sync with em_vlan_macip.f layout. + * */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + +/** + * Structure to check if new context need be built + */ +struct em_ctx_info { + uint64_t flags; /**< ol_flags related to context build. */ + uint32_t cmp_mask; /**< compare mask */ + union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ +}; + +/** + * Structure associated with each TX queue. + */ +struct em_tx_queue { + volatile struct e1000_data_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + /**< Start freeing TX buffers if there are less free descriptors than + this value. */ + uint16_t tx_free_thresh; + /**< Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t queue_id; /**< TX queue index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + struct em_ctx_info ctx_cache; + /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_em_prefetch(p) rte_prefetch0(p) +#else +#define rte_em_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif /* DEFAULT_TX_FREE_THRESH */ + +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif /* DEFAULT_TX_RS_THRESH */ + + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + * Populates TX context descriptor. + */ +static inline void +em_set_xmit_ctx(struct em_tx_queue* txq, + volatile struct e1000_context_desc *ctx_txd, + uint64_t flags, + union em_vlan_macip hdrlen) +{ + uint32_t cmp_mask, cmd_len; + uint16_t ipcse, l2len; + struct e1000_context_desc ctx; + + cmp_mask = 0; + cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C; + + l2len = hdrlen.f.l2_len; + ipcse = (uint16_t)(l2len + hdrlen.f.l3_len); + + /* setup IPCS* fields */ + ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len; + ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len + + offsetof(struct rte_ipv4_hdr, hdr_checksum)); + + /* + * When doing checksum or TCP segmentation with IPv6 headers, + * IPCSE field should be set t0 0. + */ + if (flags & PKT_TX_IP_CKSUM) { + ctx.lower_setup.ip_fields.ipcse = + (uint16_t)rte_cpu_to_le_16(ipcse - 1); + cmd_len |= E1000_TXD_CMD_IP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + } else { + ctx.lower_setup.ip_fields.ipcse = 0; + } + + /* setup TUCS* fields */ + ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse; + ctx.upper_setup.tcp_fields.tucse = 0; + + switch (flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + + offsetof(struct rte_udp_hdr, dgram_cksum)); + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_TCP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + + offsetof(struct rte_tcp_hdr, cksum)); + cmd_len |= E1000_TXD_CMD_TCP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + default: + ctx.upper_setup.tcp_fields.tucso = 0; + } + + ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len); + ctx.tcp_seg_setup.data = 0; + + *ctx_txd = ctx; + + txq->ctx_cache.flags = flags; + txq->ctx_cache.cmp_mask = cmp_mask; + txq->ctx_cache.hdrlen = hdrlen; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_ctx_update(struct em_tx_queue *txq, uint64_t flags, + union em_vlan_macip hdrlen) +{ + /* If match with the current context */ + if (likely (txq->ctx_cache.flags == flags && + ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) & + txq->ctx_cache.cmp_mask) == 0)) + return EM_CTX_0; + + /* Mismatch */ + return EM_CTX_NUM; +} + +/* Reset transmit descriptors after they have been used */ +static inline int +em_xmit_cleanup(struct em_tx_queue *txq) +{ + struct em_tx_entry *sw_ring = txq->sw_ring; + volatile struct e1000_data_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD)) + { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", nb_tx_to_clean, + last_desc_cleaned, desc_to_clean_to, txq->port_id, + txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].upper.fields.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + /* No Error */ + return 0; +} + +static inline uint32_t +tx_desc_cksum_flags_to_upper(uint64_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8}; + static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + return tmp; +} + +uint16_t +eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct em_tx_queue *txq; + struct em_tx_entry *sw_ring; + struct em_tx_entry *txe, *txn; + volatile struct e1000_data_desc *txr; + volatile struct e1000_data_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t popts_spec; + uint32_t cmd_type_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint64_t tx_ol_req; + uint32_t ctx; + uint32_t new_ctx; + union em_vlan_macip hdrlen; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Determine if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + em_xmit_cleanup(txq); + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + + /* If hardware offload required */ + tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)); + if (tx_ol_req) { + hdrlen.f.vlan_tci = tx_pkt->vlan_tci; + hdrlen.f.l2_len = tx_pkt->l2_len; + hdrlen.f.l3_len = tx_pkt->l3_len; + /* If new context to be built or reuse the exist ctx. */ + ctx = what_ctx_update(txq, tx_ol_req, hdrlen); + + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == EM_CTX_NUM); + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) tx_pkt->pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + while (unlikely (nb_used > txq->nb_tx_free)) { + PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (em_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_TXD_DTYP_DATA + * - E1000_TXD_DTYP_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_POPTS_IXSM + * - E1000_TXD_POPTS_TXSM + * + * The following bits must be set in the last Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_CMD_VLE + * - E1000_TXD_CMD_IFCS + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_IFCS; + popts_spec = 0; + + /* Set VLAN Tag offload fields. */ + if (ol_flags & PKT_TX_VLAN_PKT) { + cmd_type_len |= E1000_TXD_CMD_VLE; + popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT; + } + + if (tx_ol_req) { + /* + * Setup the TX Context Descriptor if required + */ + if (new_ctx) { + volatile struct e1000_context_desc *ctx_txd; + + ctx_txd = (volatile struct e1000_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + em_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + hdrlen); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags); + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); + txd->upper.data = rte_cpu_to_le_32(popts_spec); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= E1000_TXD_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=%4u " + "(port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= E1000_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + txd->lower.data |= rte_cpu_to_le_32(cmd_type_len); + } +end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0); + + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_error) +{ + uint64_t pkt_flags = 0; + + if (rx_error & E1000_RXD_ERR_IPE) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (rx_error & E1000_RXD_ERR_TCPE) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + return pkt_flags; +} + +uint16_t +eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma_addr; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", + (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* Rearm RXD: attach new mbuf and reset status to zero. */ + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->buffer_addr = dma_addr; + rxdp->status = 0; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + rxm->ol_flags = rx_desc_status_to_pkt_flags(status); + rxm->ol_flags = rxm->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct em_rx_queue *rxq; + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->buffer_addr = dma; + rxdp->status = 0; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.length); + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (status & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - + (RTE_ETHER_CRC_LEN - data_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - IP checksum flag, + * - error flags. + */ + first_seg->port = rxq->port_id; + + first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); + first_seg->ol_flags = first_seg->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +#define EM_MAX_BUF_SIZE 16384 +#define EM_RCTL_FLXBUF_STEP 1024 + +static void +em_tx_queue_release_mbufs(struct em_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i != txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_tx_queue_release(struct em_tx_queue *txq) +{ + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +eth_em_tx_queue_release(void *txq) +{ + em_tx_queue_release(txq); +} + +/* (Re)set dynamic em_tx_queue fields to defaults */ +static void +em_reset_tx_queue(struct em_tx_queue *txq) +{ + uint16_t i, nb_desc, prev; + static const struct e1000_data_desc txd_init = { + .upper.fields = {.status = E1000_TXD_STAT_DD}, + }; + + nb_desc = txq->nb_tx_desc; + + /* Initialize ring entries */ + + prev = (uint16_t) (nb_desc - 1); + + for (i = 0; i < nb_desc; i++) { + txq->tx_ring[i] = txd_init; + txq->sw_ring[i].mbuf = NULL; + txq->sw_ring[i].last_id = i; + txq->sw_ring[prev].next_id = i; + prev = i; + } + + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->nb_tx_free = (uint16_t)(nb_desc - 1); + txq->last_desc_cleaned = (uint16_t)(nb_desc - 1); + txq->nb_tx_used = 0; + txq->tx_tail = 0; + + memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); +} + +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + +int +eth_em_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct em_tx_queue *txq; + struct e1000_hw *hw; + uint32_t tsize; + uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % EM_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -(EINVAL); + } + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4, + DEFAULT_TX_FREE_THRESH); + + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh == 0) + tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh, + DEFAULT_TX_RS_THRESH); + + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + em_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize, + RTE_CACHE_LINE_SIZE, socket_id); + if (tz == NULL) + return -ENOMEM; + + /* Allocate the tx queue data structure. */ + if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq), + RTE_CACHE_LINE_SIZE)) == NULL) + return -ENOMEM; + + /* Allocate software ring */ + if ((txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(txq->sw_ring[0]) * nb_desc, + RTE_CACHE_LINE_SIZE)) == NULL) { + em_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_rs_thresh = tx_rs_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); + txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring = (struct e1000_data_desc *) tz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + em_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; + return 0; +} + +static void +em_rx_queue_release_mbufs(struct em_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i != rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_rx_queue_release(struct em_rx_queue *rxq) +{ + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +void +eth_em_rx_queue_release(void *rxq) +{ + em_rx_queue_release(rxq); +} + +/* Reset dynamic em_rx_queue fields back to defaults */ +static void +em_reset_rx_queue(struct em_rx_queue *rxq) +{ + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > RTE_ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + +int +eth_em_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct em_rx_queue *rxq; + struct e1000_hw *hw; + uint32_t rsize; + uint64_t offloads; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % EM_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * EM devices don't support drop_en functionality. + * It's an optimization that does nothing on single-queue devices, + * so just log the issue and carry on. + */ + if (rx_conf->rx_drop_en) { + PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by " + "device"); + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->rx_queues[queue_idx] != NULL) { + em_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate RX ring for max possible mumber of hardware descriptors. */ + rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize, + RTE_CACHE_LINE_SIZE, socket_id); + if (rz == NULL) + return -ENOMEM; + + /* Allocate the RX queue data structure. */ + if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq), + RTE_CACHE_LINE_SIZE)) == NULL) + return -ENOMEM; + + /* Allocate software ring. */ + if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof (rxq->sw_ring[0]) * nb_desc, + RTE_CACHE_LINE_SIZE)) == NULL) { + em_rx_queue_release(rxq); + return -ENOMEM; + } + + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); + rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); + rxq->rx_ring_phys_addr = rz->iova; + rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + em_reset_rx_queue(rxq); + rxq->offloads = offloads; + + return 0; +} + +uint32_t +eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define EM_RXQ_SCAN_INTERVAL 4 + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->status & E1000_RXD_STAT_DD)) { + desc += EM_RXQ_SCAN_INTERVAL; + rxdp += EM_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->status & E1000_RXD_STAT_DD); +} + +int +eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct em_rx_queue *rxq = rx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].status; + if (*status & E1000_RXD_STAT_DD) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct em_tx_queue *txq = tx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].upper.fields.status; + if (*status & E1000_TXD_STAT_DD) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +void +em_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct em_tx_queue *txq; + struct em_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + em_reset_tx_queue(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + em_reset_rx_queue(rxq); + } + } +} + +void +em_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_em_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_em_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/* + * Takes as input/output parameter RX buffer size. + * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register. + */ +static uint32_t +em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz) +{ + /* + * For BSIZE & BSEX all configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + static const struct { + uint32_t bufsz; + uint32_t rctl; + } bufsz_to_rctl[] = { + {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)}, + {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)}, + {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)}, + {2048, E1000_RCTL_SZ_2048}, + {1024, E1000_RCTL_SZ_1024}, + {512, E1000_RCTL_SZ_512}, + {256, E1000_RCTL_SZ_256}, + }; + + int i; + uint32_t rctl_bsize; + + rctl_bsize = *bufsz; + + /* + * Starting from 82571 it is possible to specify RX buffer size + * by RCTL.FLXBUF. When this field is different from zero, the + * RX buffer size = RCTL.FLXBUF * 1K + * (e.g. t is possible to specify RX buffer size 1,2,...,15KB). + * It is working ok on real HW, but by some reason doesn't work + * on VMware emulated 82574L. + * So for now, always use BSIZE/BSEX to setup RX buffer size. + * If you don't plan to use it on VMware emulated 82574L and + * would like to specify RX buffer size in 1K granularity, + * uncomment the following lines: + * *************************************************************** + * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 && + * rctl_bsize >= EM_RCTL_FLXBUF_STEP) { + * rctl_bsize /= EM_RCTL_FLXBUF_STEP; + * *bufsz = rctl_bsize; + * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT & + * E1000_RCTL_FLXBUF_MASK); + * } + * *************************************************************** + */ + + for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]); + i++) { + if (rctl_bsize >= bufsz_to_rctl[i].bufsz) { + *bufsz = bufsz_to_rctl[i].bufsz; + return bufsz_to_rctl[i].rctl; + } + } + + /* Should never happen. */ + return -EINVAL; +} + +static int +em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) +{ + struct em_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + static const struct e1000_rx_desc rxd_init = { + .buffer_addr = 0, + }; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct e1000_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu", rxq->queue_id); + return -ENOMEM; + } + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + /* Clear HW ring memory */ + rxq->rx_ring[i] = rxd_init; + + rxd = &rxq->rx_ring[i]; + rxd->buffer_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ +int +eth_em_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; + uint32_t rctl; + uint32_t rfctl; + uint32_t rxcsum; + uint32_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + + /* Disable extended descriptor type. */ + rfctl &= ~E1000_RFCTL_EXTEN; + /* Disable accelerated acknowledge */ + if (hw->mac.type == e1000_82574) + rfctl |= E1000_RFCTL_ACK_DIS; + + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* + * XXX TEMPORARY WORKAROUND: on some systems with 82573 + * long latencies are observed, like Lenovo X60. This + * change eliminates the problem, but since having positive + * values in RDTR is a known source of problems on other + * platforms another solution is being sought. + */ + if (hw->mac.type == e1000_82573) + E1000_WRITE_REG(hw, E1000_RDTR, 0x20); + + dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts; + + /* Determine RX bufsize. */ + rctl_bsize = EM_MAX_BUF_SIZE; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint32_t buf_size; + + rxq = dev->data->rx_queues[i]; + buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rctl_bsize = RTE_MIN(rctl_bsize, buf_size); + } + + rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize); + + /* Configure and enable each RX queue. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and setup queue */ + ret = em_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(*rxq->rx_ring)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + rxdctl &= 0xFE000000; + rxdctl |= rxq->pthresh & 0x3F; + rxdctl |= (rxq->hthresh & 0x3F) << 8; + rxdctl |= (rxq->wthresh & 0x3F) << 16; + rxdctl |= E1000_RXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + + /* + * Due to EM devices not having any sort of hardware + * limit for packet length, jumbo frame of any size + * can be accepted, thus we have to enable scattered + * rx if jumbo frames are enabled (or if buffer size + * is too small to accommodate non-jumbo packets) + * to avoid splitting packets that don't fit into + * one buffer. + */ + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || + rctl_bsize < RTE_ETHER_MAX_LEN) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = + (eth_rx_burst_t)eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* No MRQ or RSS support for now */ + + /* Set early receive threshold on appropriate hw */ + if ((hw->mac.type == e1000_ich9lan || + hw->mac.type == e1000_pch2lan || + hw->mac.type == e1000_ich10lan) && + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); + E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); + } + + if (hw->mac.type == e1000_pch2lan) { + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); + else + e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); + } + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + /* Legacy descriptor type. */ + rctl &= ~E1000_RCTL_DTYP_MASK; + + /* + * Configure support of jumbo frames, if any. + */ + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + rctl |= E1000_RCTL_LPE; + else + rctl &= ~E1000_RCTL_LPE; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_em_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(*txq->tx_ring)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + /* + * bit 22 is reserved, on some models should always be 0, + * on others - always 1. + */ + txdctl &= E1000_TXDCTL_COUNT_DESC; + txdctl |= txq->pthresh & 0x3F; + txdctl |= (txq->hthresh & 0x3F) << 8; + txdctl |= (txq->wthresh & 0x3F) << 16; + txdctl |= E1000_TXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + /* SPT and CNP Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + uint32_t reg_val; + reg_val = E1000_READ_REG(hw, E1000_IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + E1000_WRITE_REG(hw, E1000_IOSFPC, reg_val); + + /* Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val = E1000_READ_REG(hw, E1000_TARC(0)); + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; + E1000_WRITE_REG(hw, E1000_TARC(0), reg_val); + } + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + +void +em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct em_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; +} + +void +em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct em_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; +} + +static void +e1000_flush_tx_ring(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + volatile struct e1000_data_desc *tx_desc; + volatile uint32_t *tdt_reg_addr; + uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + uint16_t size = 512; + struct em_tx_queue *txq; + int i; + + if (dev->data->tx_queues == NULL) + return; + tctl = E1000_READ_REG(hw, E1000_TCTL); + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); + for (i = 0; i < dev->data->nb_tx_queues && + i < E1000_I219_MAX_TX_QUEUE_NUM; i++) { + txq = dev->data->tx_queues[i]; + tdt = E1000_READ_REG(hw, E1000_TDT(i)); + if (tdt != txq->tx_tail) + return; + tx_desc = &txq->tx_ring[txq->tx_tail]; + tx_desc->buffer_addr = rte_cpu_to_le_64(txq->tx_ring_phys_addr); + tx_desc->lower.data = rte_cpu_to_le_32(txd_lower | size); + tx_desc->upper.data = 0; + + rte_cio_wmb(); + txq->tx_tail++; + if (txq->tx_tail == txq->nb_tx_desc) + txq->tx_tail = 0; + tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(i)); + E1000_PCI_REG_WRITE(tdt_reg_addr, txq->tx_tail); + usec_delay(250); + } +} + +static void +e1000_flush_rx_ring(struct rte_eth_dev *dev) +{ + uint32_t rctl, rxdctl; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + + for (i = 0; i < dev->data->nb_rx_queues && + i < E1000_I219_MAX_RX_QUEUE_NUM; i++) { + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, + * host threshold to 1 and make sure the granularity + * is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + /* momentarily enable the RX ring for the changes to take effect */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * em_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting/closing the + * HW. Failure to do this will cause the HW to enter a unit hang state which + * can only be released by PCI reset on the device + * + */ + +void +em_flush_desc_rings(struct rte_eth_dev *dev) +{ + uint32_t fextnvm11, tdlen; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t pci_cfg_status = 0; + int ret; + + fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); + E1000_WRITE_REG(hw, E1000_FEXTNVM11, + fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX); + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); + ret = rte_pci_read_config(pci_dev, &pci_cfg_status, + sizeof(pci_cfg_status), PCI_CFG_STATUS_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_CFG_STATUS_REG); + return; + } + + /* do nothing if we're not in faulty state, or if the queue is empty */ + if ((pci_cfg_status & FLUSH_DESC_REQUIRED) && tdlen) { + /* flush desc ring */ + e1000_flush_tx_ring(dev); + ret = rte_pci_read_config(pci_dev, &pci_cfg_status, + sizeof(pci_cfg_status), PCI_CFG_STATUS_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_CFG_STATUS_REG); + return; + } + + if (pci_cfg_status & FLUSH_DESC_REQUIRED) + e1000_flush_rx_ring(dev); + } +} diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c b/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c new file mode 100644 index 000000000..a5551e817 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c @@ -0,0 +1,5792 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" +#include "igb_regs.h" + +/* + * Default values for port configuration + */ +#define IGB_DEFAULT_RX_FREE_THRESH 32 + +#define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_DEFAULT_RX_HTHRESH 8 +#define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) + +#define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_DEFAULT_TX_HTHRESH 1 +#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) + +/* Bit shift and mask */ +#define IGB_4_BIT_WIDTH (CHAR_BIT / 2) +#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) +#define IGB_8_BIT_WIDTH CHAR_BIT +#define IGB_8_BIT_MASK UINT8_MAX + +/* Additional timesync values. */ +#define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL +#define E1000_ETQF_FILTER_1588 3 +#define IGB_82576_TSYNC_SHIFT 16 +#define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define E1000_VTIVAR_MISC 0x01740 +#define E1000_VTIVAR_MISC_MASK 0xFF +#define E1000_VTIVAR_VALID 0x80 +#define E1000_VTIVAR_MISC_MAILBOX 0 +#define E1000_VTIVAR_MISC_INTR_MASK 0x3 + +/* External VLAN Enable bit mask */ +#define E1000_CTRL_EXT_EXT_VLAN (1 << 26) + +/* External VLAN Ether Type bit mask and shift */ +#define E1000_VET_VET_EXT 0xFFFF0000 +#define E1000_VET_VET_EXT_SHIFT 16 + +/* MSI-X other interrupt vector */ +#define IGB_MSIX_OTHER_INTR_VEC 0 + +static int eth_igb_configure(struct rte_eth_dev *dev); +static int eth_igb_start(struct rte_eth_dev *dev); +static void eth_igb_stop(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); +static void eth_igb_close(struct rte_eth_dev *dev); +static int eth_igb_reset(struct rte_eth_dev *dev); +static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igb_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int eth_igb_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_igb_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, unsigned int n); +static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit); +static int eth_igb_stats_reset(struct rte_eth_dev *dev); +static int eth_igb_xstats_reset(struct rte_eth_dev *dev); +static int eth_igb_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); +static int eth_igb_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); +static int eth_igbvf_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); +static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_igb_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); +static void eth_igb_interrupt_handler(void *param); +static int igb_hardware_init(struct e1000_hw *hw); +static void igb_hw_control_acquire(struct e1000_hw *hw); +static void igb_hw_control_release(struct e1000_hw *hw); +static void igb_init_manageability(struct e1000_hw *hw); +static void igb_release_manageability(struct e1000_hw *hw); + +static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid_id); +static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); + +static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); + +static int eth_igb_led_on(struct rte_eth_dev *dev); +static int eth_igb_led_off(struct rte_eth_dev *dev); + +static void igb_intr_disable(struct rte_eth_dev *dev); +static int igb_get_rx_buffer_size(struct e1000_hw *hw); +static int eth_igb_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); + +static void igbvf_intr_disable(struct e1000_hw *hw); +static int igbvf_dev_configure(struct rte_eth_dev *dev); +static int igbvf_dev_start(struct rte_eth_dev *dev); +static void igbvf_dev_stop(struct rte_eth_dev *dev); +static void igbvf_dev_close(struct rte_eth_dev *dev); +static int igbvf_promiscuous_enable(struct rte_eth_dev *dev); +static int igbvf_promiscuous_disable(struct rte_eth_dev *dev); +static int igbvf_allmulticast_enable(struct rte_eth_dev *dev); +static int igbvf_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igbvf_link_update(struct e1000_hw *hw); +static int eth_igbvf_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); +static int eth_igbvf_stats_reset(struct rte_eth_dev *dev); +static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); +static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); +static int igbvf_get_reg_length(struct rte_eth_dev *dev); +static int igbvf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter); +static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int eth_igb_get_reg_length(struct rte_eth_dev *dev); +static int eth_igb_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); +static int eth_igb_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int eth_igb_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); +static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int igb_timesync_enable(struct rte_eth_dev *dev); +static int igb_timesync_disable(struct rte_eth_dev *dev); +static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int igb_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int igb_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); +static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, + uint8_t index, uint8_t offset); +static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); +static void eth_igbvf_interrupt_handler(void *param); +static void igbvf_mbx_process(struct rte_eth_dev *dev); +static int igb_filter_restore(struct rte_eth_dev *dev); + +/* + * Define VF Stats MACRO for Non "cleared on read" register + */ +#define UPDATE_VF_STAT(reg, last, cur) \ +{ \ + u32 latest = E1000_READ_REG(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ + last = latest; \ +} + +#define IGB_FC_PAUSE_TIME 0x0680 +#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ + +static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_igb_map[] = { + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +/* + * The set of PCI devices this driver supports (for 82576&I350 VF) + */ +static const struct rte_pci_id pci_id_igbvf_map[] = { + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, + .nb_seg_max = IGB_TX_MAX_SEG, + .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, +}; + +static const struct eth_dev_ops eth_igb_ops = { + .dev_configure = eth_igb_configure, + .dev_start = eth_igb_start, + .dev_stop = eth_igb_stop, + .dev_set_link_up = eth_igb_dev_set_link_up, + .dev_set_link_down = eth_igb_dev_set_link_down, + .dev_close = eth_igb_close, + .dev_reset = eth_igb_reset, + .promiscuous_enable = eth_igb_promiscuous_enable, + .promiscuous_disable = eth_igb_promiscuous_disable, + .allmulticast_enable = eth_igb_allmulticast_enable, + .allmulticast_disable = eth_igb_allmulticast_disable, + .link_update = eth_igb_link_update, + .stats_get = eth_igb_stats_get, + .xstats_get = eth_igb_xstats_get, + .xstats_get_by_id = eth_igb_xstats_get_by_id, + .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, + .xstats_get_names = eth_igb_xstats_get_names, + .stats_reset = eth_igb_stats_reset, + .xstats_reset = eth_igb_xstats_reset, + .fw_version_get = eth_igb_fw_version_get, + .dev_infos_get = eth_igb_infos_get, + .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, + .mtu_set = eth_igb_mtu_set, + .vlan_filter_set = eth_igb_vlan_filter_set, + .vlan_tpid_set = eth_igb_vlan_tpid_set, + .vlan_offload_set = eth_igb_vlan_offload_set, + .rx_queue_setup = eth_igb_rx_queue_setup, + .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, + .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, + .rx_queue_release = eth_igb_rx_queue_release, + .rx_queue_count = eth_igb_rx_queue_count, + .rx_descriptor_done = eth_igb_rx_descriptor_done, + .rx_descriptor_status = eth_igb_rx_descriptor_status, + .tx_descriptor_status = eth_igb_tx_descriptor_status, + .tx_queue_setup = eth_igb_tx_queue_setup, + .tx_queue_release = eth_igb_tx_queue_release, + .tx_done_cleanup = eth_igb_tx_done_cleanup, + .dev_led_on = eth_igb_led_on, + .dev_led_off = eth_igb_led_off, + .flow_ctrl_get = eth_igb_flow_ctrl_get, + .flow_ctrl_set = eth_igb_flow_ctrl_set, + .mac_addr_add = eth_igb_rar_set, + .mac_addr_remove = eth_igb_rar_clear, + .mac_addr_set = eth_igb_default_mac_addr_set, + .reta_update = eth_igb_rss_reta_update, + .reta_query = eth_igb_rss_reta_query, + .rss_hash_update = eth_igb_rss_hash_update, + .rss_hash_conf_get = eth_igb_rss_hash_conf_get, + .filter_ctrl = eth_igb_filter_ctrl, + .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, + .timesync_enable = igb_timesync_enable, + .timesync_disable = igb_timesync_disable, + .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, + .get_reg = eth_igb_get_regs, + .get_eeprom_length = eth_igb_get_eeprom_length, + .get_eeprom = eth_igb_get_eeprom, + .set_eeprom = eth_igb_set_eeprom, + .get_module_info = eth_igb_get_module_info, + .get_module_eeprom = eth_igb_get_module_eeprom, + .timesync_adjust_time = igb_timesync_adjust_time, + .timesync_read_time = igb_timesync_read_time, + .timesync_write_time = igb_timesync_write_time, +}; + +/* + * dev_ops for virtual function, bare necessities for basic vf + * operation have been implemented + */ +static const struct eth_dev_ops igbvf_eth_dev_ops = { + .dev_configure = igbvf_dev_configure, + .dev_start = igbvf_dev_start, + .dev_stop = igbvf_dev_stop, + .dev_close = igbvf_dev_close, + .promiscuous_enable = igbvf_promiscuous_enable, + .promiscuous_disable = igbvf_promiscuous_disable, + .allmulticast_enable = igbvf_allmulticast_enable, + .allmulticast_disable = igbvf_allmulticast_disable, + .link_update = eth_igb_link_update, + .stats_get = eth_igbvf_stats_get, + .xstats_get = eth_igbvf_xstats_get, + .xstats_get_names = eth_igbvf_xstats_get_names, + .stats_reset = eth_igbvf_stats_reset, + .xstats_reset = eth_igbvf_stats_reset, + .vlan_filter_set = igbvf_vlan_filter_set, + .dev_infos_get = eth_igbvf_infos_get, + .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, + .rx_queue_setup = eth_igb_rx_queue_setup, + .rx_queue_release = eth_igb_rx_queue_release, + .rx_descriptor_done = eth_igb_rx_descriptor_done, + .rx_descriptor_status = eth_igb_rx_descriptor_status, + .tx_descriptor_status = eth_igb_tx_descriptor_status, + .tx_queue_setup = eth_igb_tx_queue_setup, + .tx_queue_release = eth_igb_tx_queue_release, + .tx_done_cleanup = eth_igb_tx_done_cleanup, + .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, + .mac_addr_set = igbvf_default_mac_addr_set, + .get_reg = igbvf_get_regs, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_igb_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { + {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, + {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, + {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, + {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, + {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, + {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, + {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, + ecol)}, + {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, + {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, + {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, + {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, + {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, + {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, + {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, + {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, + {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, + {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, + {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, + fcruc)}, + {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, + {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, + {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, + {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, + {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, + {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, + {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, + {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, + {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, + {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, + {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, + {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, + ptc1023)}, + {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, + {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, + {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, + {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, + {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, + {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, + + {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, +}; + +#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ + sizeof(rte_igb_stats_strings[0])) + +static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { + {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, + {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, + {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, + {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, + {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, +}; + +#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ + sizeof(rte_igbvf_stats_strings[0])) + + +static inline void +igb_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (rte_intr_allow_others(intr_handle) && + dev->data->dev_conf.intr_conf.lsc != 0) { + E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC); + } + + E1000_WRITE_REG(hw, E1000_IMS, intr->mask); + E1000_WRITE_FLUSH(hw); +} + +static void +igb_intr_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (rte_intr_allow_others(intr_handle) && + dev->data->dev_conf.intr_conf.lsc != 0) { + E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC); + } + + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); +} + +static inline void +igbvf_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* only for mailbox */ + E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_FLUSH(hw); +} + +/* only for mailbox now. If RX/TX needed, should extend this function. */ +static void +igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) +{ + uint32_t tmp = 0; + + /* mailbox */ + tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); + tmp |= E1000_VTIVAR_VALID; + E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); +} + +static void +eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Configure VF other cause ivar */ + igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); +} + +static inline int32_t +igb_pf_reset_hw(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = e1000_reset_hw(hw); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + return status; +} + +static void +igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + + e1000_set_mac_type(hw); + + /* need to check if it is a vf device below */ +} + +static int +igb_reset_swfw_lock(struct e1000_hw *hw) +{ + int ret_val; + + /* + * Do mac ops initialization manually here, since we will need + * some function pointers set by this call. + */ + ret_val = e1000_init_mac_params(hw); + if (ret_val) + return ret_val; + + /* + * SMBI lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + if (e1000_get_hw_semaphore_generic(hw) < 0) { + PMD_DRV_LOG(DEBUG, "SMBI lock released"); + } + e1000_put_hw_semaphore_generic(hw); + + if (hw->mac.ops.acquire_swfw_sync != NULL) { + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + mask = E1000_SWFW_PHY0_SM << hw->bus.func; + if (hw->bus.func > E1000_FUNC_1) + mask <<= 2; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", + hw->bus.func); + } + hw->mac.ops.release_swfw_sync(hw, mask); + + /* + * This one is more tricky since it is common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = E1000_SWFW_EEP_SM; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + } + hw->mac.ops.release_swfw_sync(hw, mask); + } + + return E1000_SUCCESS; +} + +/* Remove all ntuple filters of the device */ +static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + filter_info->fivetuple_mask = 0; + while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { + TAILQ_REMOVE(&filter_info->twotuple_list, + p_2tuple, entries); + rte_free(p_2tuple); + } + filter_info->twotuple_mask = 0; + + return 0; +} + +/* Remove all flex filters of the device */ +static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_flex_filter *p_flex; + + while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { + TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); + rte_free(p_flex); + } + filter_info->flex_mask = 0; + + return 0; +} + +static int +eth_igb_dev_init(struct rte_eth_dev *eth_dev) +{ + int error = 0; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + + uint32_t ctrl_ext; + + eth_dev->dev_ops = ð_igb_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; + + igb_identify_hardware(eth_dev, pci_dev); + if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + e1000_get_bus_info(hw); + + /* Reset any pending lock */ + if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + /* Finish initialization */ + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + igb_pf_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + if (e1000_validate_nvm_checksum(hw) < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + error = -EIO; + goto err_late; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw) != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + error = -EIO; + goto err_late; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); + error = -ENOMEM; + goto err_late; + } + + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* Now initialize the hardware */ + if (igb_hardware_init(hw) != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + error = -ENODEV; + goto err_late; + } + hw->mac.get_link_status = 1; + adapter->stopped = 0; + + /* Indicate SOL/IDER usage */ + if (e1000_check_reset_block(hw) < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to" + "SOL/IDER session"); + } + + /* initialize PF if max_vfs not zero */ + igb_pf_host_init(eth_dev); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&pci_dev->intr_handle, + eth_igb_interrupt_handler, + (void *)eth_dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pci_dev->intr_handle); + + /* enable support intr */ + igb_intr_enable(eth_dev); + + eth_igb_dev_set_link_down(eth_dev); + + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct e1000_filter_info)); + + TAILQ_INIT(&filter_info->flex_list); + TAILQ_INIT(&filter_info->twotuple_list); + TAILQ_INIT(&filter_info->fivetuple_list); + + TAILQ_INIT(&igb_filter_ntuple_list); + TAILQ_INIT(&igb_filter_ethertype_list); + TAILQ_INIT(&igb_filter_syn_list); + TAILQ_INIT(&igb_filter_flex_list); + TAILQ_INIT(&igb_filter_rss_list); + TAILQ_INIT(&igb_flow_list); + + return 0; + +err_late: + igb_hw_control_release(hw); + + return error; +} + +static int +eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + eth_igb_close(eth_dev); + + return 0; +} + +/* + * Virtual Function device init + */ +static int +eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int diag; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &igbvf_eth_dev_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + adapter->stopped = 0; + + /* Initialize the shared code (base driver) */ + diag = e1000_setup_init_funcs(hw, TRUE); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", + diag); + return -EIO; + } + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* Disable the interrupts for VF */ + igbvf_intr_disable(hw); + + diag = hw->mac.ops.reset_hw(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN * + hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC " + "addresses", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); + return -ENOMEM; + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* Generate a random MAC address, if none was assigned by PF. */ + if (rte_is_zero_ether_addr(perm_addr)) { + rte_eth_random_addr(perm_addr->addr_bytes); + PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); + PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x", + perm_addr->addr_bytes[0], + perm_addr->addr_bytes[1], + perm_addr->addr_bytes[2], + perm_addr->addr_bytes[3], + perm_addr->addr_bytes[4], + perm_addr->addr_bytes[5]); + } + + diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " + "mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "igb_mac_82576_vf"); + + intr_handle = &pci_dev->intr_handle; + rte_intr_callback_register(intr_handle, + eth_igbvf_interrupt_handler, eth_dev); + + return 0; +} + +static int +eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + igbvf_dev_close(eth_dev); + + return 0; +} + +static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct e1000_adapter), eth_igb_dev_init); +} + +static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); +} + +static struct rte_pci_driver rte_igb_pmd = { + .id_table = pci_id_igb_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_igb_pci_probe, + .remove = eth_igb_pci_remove, +}; + + +static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct e1000_adapter), eth_igbvf_dev_init); +} + +static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); +} + +/* + * virtual function driver struct + */ +static struct rte_pci_driver rte_igbvf_pmd = { + .id_table = pci_id_igbvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_igbvf_pci_probe, + .remove = eth_igbvf_pci_remove, +}; + +static void +igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ + uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static int +igb_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || + tx_mq_mode == ETH_MQ_TX_DCB || + tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { + PMD_INIT_LOG(ERR, "DCB mode is not supported."); + return -EINVAL; + } + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* Check multi-queue mode. + * To no break software we accept ETH_MQ_RX_NONE as this might + * be used to turn off VLAN filter. + */ + + if (rx_mq_mode == ETH_MQ_RX_NONE || + rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + } else { + /* Only support one queue on VFs. + * RSS together with SRIOV is not supported. + */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + rx_mq_mode); + return -EINVAL; + } + /* TX mode is not used here, so mode might be ignored.*/ + if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(WARNING, "SRIOV is active," + " TX mode %d is not supported. " + " Driver will behave as %d mode.", + tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); + } + + /* check valid queue number */ + if ((nb_rx_q > 1) || (nb_tx_q > 1)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " only support one queue on VFs."); + return -EINVAL; + } + } else { + /* To no break software that set invalid mode, only display + * warning if invalid mode is used. + */ + if (rx_mq_mode != ETH_MQ_RX_NONE && + rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && + rx_mq_mode != ETH_MQ_RX_RSS) { + /* RSS together with VMDq not supported*/ + PMD_INIT_LOG(ERR, "RX mode %d is not supported.", + rx_mq_mode); + return -EINVAL; + } + + if (tx_mq_mode != ETH_MQ_TX_NONE && + tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + PMD_INIT_LOG(WARNING, "TX mode %d is not supported." + " Due to txmode is meaningless in this" + " driver, just ignore.", + tx_mq_mode); + } + } + return 0; +} + +static int +eth_igb_configure(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multipe queue mode checking */ + ret = igb_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", + ret); + return ret; + } + + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); + + return 0; +} + +static void +eth_igb_rxtx_control(struct rte_eth_dev *dev, + bool enable) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tctl, rctl; + + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + + if (enable) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); +} + +static int +eth_igb_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int ret, mask; + uint32_t intr_vector = 0; + uint32_t ctrl_ext; + uint32_t *speeds; + int num_speeds; + bool autoneg; + + PMD_INIT_FUNC_TRACE(); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* Power up the phy. Needed to make the link go Up */ + eth_igb_dev_set_link_up(dev); + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + */ + if (hw->mac.type == e1000_82575) { + uint32_t pba; + + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* Initialize the hardware */ + if (igb_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return -EIO; + } + adapter->stopped = 0; + + E1000_WRITE_REG(hw, E1000_VET, + RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + /* configure PF module if SRIOV enabled */ + igb_pf_host_configure(dev); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* confiugre msix for rx interrupt */ + eth_igb_configure_msix_intr(dev); + + /* Configure for OS presence */ + igb_init_manageability(hw); + + eth_igb_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = eth_igb_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + igb_dev_clear_queues(dev); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + /* + * VLAN Offload Settings + */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + ret = eth_igb_vlan_offload_set(dev, mask); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to set vlan offload"); + igb_dev_clear_queues(dev); + return ret; + } + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable VLAN filter since VMDq always use VLAN filter */ + igb_vmdq_vlan_hw_filter_enable(dev); + } + + if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + /* Configure EITR with the maximum possible value (0xFFFF) */ + E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); + } + + /* Setup link speed and duplex */ + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + hw->mac.autoneg = 1; + } else { + num_speeds = 0; + autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; + goto error_invalid_config; + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) + goto error_invalid_config; + + /* Set/reset the mac.autoneg based on the link speed, + * fixed or not + */ + if (!autoneg) { + hw->mac.autoneg = 0; + hw->mac.forced_speed_duplex = + hw->phy.autoneg_advertised; + } else { + hw->mac.autoneg = 1; + } + } + + e1000_setup_link(hw); + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + eth_igb_lsc_interrupt_setup(dev, TRUE); + else + eth_igb_lsc_interrupt_setup(dev, FALSE); + } else { + rte_intr_callback_unregister(intr_handle, + eth_igb_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex"); + } + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + eth_igb_rxq_interrupt_setup(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + igb_intr_enable(dev); + + /* restore all types filter */ + igb_filter_restore(dev); + + eth_igb_rxtx_control(dev, true); + eth_igb_link_update(dev, 0); + + PMD_INIT_LOG(DEBUG, "<<"); + + return 0; + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); + igb_dev_clear_queues(dev); + return -EINVAL; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_igb_stop(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + + if (adapter->stopped) + return; + + eth_igb_rxtx_control(dev, false); + + igb_intr_disable(dev); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + igb_pf_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Set bit for Go Link disconnect if PHY reset is not blocked */ + if (hw->mac.type >= e1000_82580 && + (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg |= E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + + /* Power down the phy. Needed to make the link go Down */ + eth_igb_dev_set_link_down(dev); + + igb_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + eth_igb_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + adapter->stopped = true; +} + +static int +eth_igb_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_up_phy(hw); + else + e1000_power_up_fiber_serdes_link(hw); + + return 0; +} + +static int +eth_igb_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(hw); + else + e1000_shutdown_fiber_serdes_link(hw); + + return 0; +} + +static void +eth_igb_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + eth_igb_stop(dev); + + e1000_phy_hw_reset(hw); + igb_release_manageability(hw); + igb_hw_control_release(hw); + + /* Clear bit for Go Link disconnect if PHY reset is not blocked */ + if (hw->mac.type >= e1000_82580 && + (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + + igb_dev_free_queues(dev); + + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Reset any pending lock */ + igb_reset_swfw_lock(hw); + + /* uninitialize PF if max_vfs not zero */ + igb_pf_host_uninit(dev); + + rte_intr_callback_unregister(intr_handle, + eth_igb_interrupt_handler, dev); + + /* clear the SYN filter info */ + filter_info->syn_info = 0; + + /* clear the ethertype filters info */ + filter_info->ethertype_mask = 0; + memset(filter_info->ethertype_filters, 0, + E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); + + /* clear the rss filter info */ + memset(&filter_info->rss_info, 0, + sizeof(struct igb_rte_flow_rss_conf)); + + /* remove all ntuple filters of the device */ + igb_ntuple_filter_uninit(dev); + + /* remove all flex filters of the device */ + igb_flex_filter_uninit(dev); + + /* clear all the filters list */ + igb_filterlist_flush(dev); +} + +/* + * Reset PF device. + */ +static int +eth_igb_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific and is currently not implemented. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_igb_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_igb_dev_init(dev); + + return ret; +} + + +static int +igb_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + if (hw->mac.type == e1000_82576) { + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; + } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { + /* PBS needs to be translated according to a lookup table */ + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); + rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); + rx_buf_size = (rx_buf_size << 10); + } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; + } else { + rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; + } + + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +igb_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Let the firmware know the OS is in control */ + igb_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitrary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = igb_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); + hw->fc.low_water = hw->fc.high_water - 1500; + hw->fc.pause_time = IGB_FC_PAUSE_TIME; + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) + hw->fc.requested_mode = igb_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Issue a global reset */ + igb_pf_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + diag = e1000_init_hw(hw); + if (diag < 0) + return diag; + + E1000_WRITE_REG(hw, E1000_VET, + RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); + e1000_get_phy_info(hw); + e1000_check_for_link(hw); + + return 0; +} + +/* This function is based on igb_update_stats_counters() in igb/if_igb.c */ +static void +igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) +{ + int pause_frames; + + uint64_t old_gprc = stats->gprc; + uint64_t old_gptc = stats->gptc; + uint64_t old_tpr = stats->tpr; + uint64_t old_tpt = stats->tpt; + uint64_t old_rpthc = stats->rpthc; + uint64_t old_hgptc = stats->hgptc; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += + E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + /* + ** For watchdog management we need to know if we have been + ** paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* For the 64-bit byte counters the low dword must be read first. */ + /* Both registers clear on the read of the high dword */ + + /* Workaround CRC bytes included in size, take away 4 bytes/packet */ + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + + stats->tor += E1000_READ_REG(hw, E1000_TORL); + stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); + stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; + stats->tot += E1000_READ_REG(hw, E1000_TOTL); + stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); + stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; + + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + + /* Host to Card Statistics */ + + stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); + stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); + stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); + stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); + stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); + stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); + stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); + stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); + stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); + stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; + stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); + stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); + stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; + stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); + stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); + stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); + + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); +} + +static int +eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + igb_read_stats_registers(hw, stats); + + if (rte_stats == NULL) + return -EINVAL; + + /* Rx Errors */ + rte_stats->imissed = stats->mpc; + rte_stats->ierrors = stats->crcerrs + + stats->rlec + stats->ruc + stats->roc + + stats->rxerrc + stats->algnerrc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; + return 0; +} + +static int +eth_igb_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_igb_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; +} + +static int +eth_igb_xstats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); + + return 0; +} + +static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size) +{ + unsigned i; + + if (xstats_names == NULL) + return IGB_NB_XSTATS; + + /* Note: limit checked in rte_eth_xstats_names() */ + + for (i = 0; i < IGB_NB_XSTATS; i++) { + strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name, + sizeof(xstats_names[i].name)); + } + + return IGB_NB_XSTATS; +} + +static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit) +{ + unsigned int i; + + if (!ids) { + if (xstats_names == NULL) + return IGB_NB_XSTATS; + + for (i = 0; i < IGB_NB_XSTATS; i++) + strlcpy(xstats_names[i].name, + rte_igb_stats_strings[i].name, + sizeof(xstats_names[i].name)); + + return IGB_NB_XSTATS; + + } else { + struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; + + eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + IGB_NB_XSTATS); + + for (i = 0; i < limit; i++) { + if (ids[i] >= IGB_NB_XSTATS) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return limit; + } +} + +static int +eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IGB_NB_XSTATS) + return IGB_NB_XSTATS; + + igb_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IGB_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_igb_stats_strings[i].offset); + } + + return IGB_NB_XSTATS; +} + +static int +eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i; + + if (!ids) { + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + if (n < IGB_NB_XSTATS) + return IGB_NB_XSTATS; + + igb_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!values) + return 0; + + /* Extended stats */ + for (i = 0; i < IGB_NB_XSTATS; i++) + values[i] = *(uint64_t *)(((char *)hw_stats) + + rte_igb_stats_strings[i].offset); + + return IGB_NB_XSTATS; + + } else { + uint64_t values_copy[IGB_NB_XSTATS]; + + eth_igb_xstats_get_by_id(dev, NULL, values_copy, + IGB_NB_XSTATS); + + for (i = 0; i < n; i++) { + if (ids[i] >= IGB_NB_XSTATS) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; + } +} + +static void +igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) +{ + /* Good Rx packets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGPRC, + hw_stats->last_gprc, hw_stats->gprc); + + /* Good Rx octets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGORC, + hw_stats->last_gorc, hw_stats->gorc); + + /* Good Tx packets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGPTC, + hw_stats->last_gptc, hw_stats->gptc); + + /* Good Tx octets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGOTC, + hw_stats->last_gotc, hw_stats->gotc); + + /* Rx Multicst packets */ + UPDATE_VF_STAT(E1000_VFMPRC, + hw_stats->last_mprc, hw_stats->mprc); + + /* Good Rx loopback packets */ + UPDATE_VF_STAT(E1000_VFGPRLBC, + hw_stats->last_gprlbc, hw_stats->gprlbc); + + /* Good Rx loopback octets */ + UPDATE_VF_STAT(E1000_VFGORLBC, + hw_stats->last_gorlbc, hw_stats->gorlbc); + + /* Good Tx loopback packets */ + UPDATE_VF_STAT(E1000_VFGPTLBC, + hw_stats->last_gptlbc, hw_stats->gptlbc); + + /* Good Tx loopback octets */ + UPDATE_VF_STAT(E1000_VFGOTLBC, + hw_stats->last_gotlbc, hw_stats->gotlbc); +} + +static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < IGBVF_NB_XSTATS; i++) { + strlcpy(xstats_names[i].name, + rte_igbvf_stats_strings[i].name, + sizeof(xstats_names[i].name)); + } + return IGBVF_NB_XSTATS; +} + +static int +eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IGBVF_NB_XSTATS) + return IGBVF_NB_XSTATS; + + igbvf_read_stats_registers(hw, hw_stats); + + if (!xstats) + return 0; + + for (i = 0; i < IGBVF_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_igbvf_stats_strings[i].offset); + } + + return IGBVF_NB_XSTATS; +} + +static int +eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + igbvf_read_stats_registers(hw, hw_stats); + + if (rte_stats == NULL) + return -EINVAL; + + rte_stats->ipackets = hw_stats->gprc; + rte_stats->ibytes = hw_stats->gorc; + rte_stats->opackets = hw_stats->gptc; + rte_stats->obytes = hw_stats->gotc; + return 0; +} + +static int +eth_igbvf_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Sync HW register to the last stats */ + eth_igbvf_stats_get(dev, NULL); + + /* reset HW current stats*/ + memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - + offsetof(struct e1000_vf_stats, gprc)); + + return 0; +} + +static int +eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_fw_version fw; + int ret; + + e1000_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(e1000_get_flash_presence_i210(hw))) { + ret = snprintf(fw_version, fw_size, + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + /* fall through */ + default: + /* if option rom is valid, display its version too */ + if (fw.or_valid) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else { + if (fw.etrack_id != 0X0000) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, + fw.etrack_id); + } else { + ret = snprintf(fw_version, fw_size, + "%d.%d.%d", + fw.eep_major, fw.eep_minor, + fw.eep_build); + } + } + break; + } + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static int +eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_82575: + dev_info->max_rx_queues = 4; + dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; + break; + + case e1000_82576: + dev_info->max_rx_queues = 16; + dev_info->max_tx_queues = 16; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 16; + break; + + case e1000_82580: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; + break; + + case e1000_i350: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; + break; + + case e1000_i354: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + break; + + case e1000_i210: + dev_info->max_rx_queues = 4; + dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; + break; + + case e1000_i211: + dev_info->max_rx_queues = 2; + dev_info->max_tx_queues = 2; + dev_info->max_vmdq_pools = 0; + break; + + default: + /* Should not happen */ + return -EINVAL; + } + dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .offloads = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G; + + dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + + return 0; +} + +static const uint32_t * +eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to igb_rxd_pkt_info_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == eth_igb_recv_pkts || + dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) + return ptypes; + return NULL; +} + +static int +eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + switch (hw->mac.type) { + case e1000_vfadapt: + dev_info->max_rx_queues = 2; + dev_info->max_tx_queues = 2; + break; + case e1000_vfadapt_i350: + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + break; + default: + /* Should not happen */ + return -EINVAL; + } + + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .offloads = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + return 0; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + /* VF device is type_unknown */ + case e1000_media_type_unknown: + eth_igbvf_link_update(hw); + link_check = !hw->mac.get_link_status; + break; + + default: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + + /* Now we check if a transition has happened */ + if (link_check) { + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; + link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + } else if (!link_check) { + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_FIXED; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +/* + * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. + */ +static void +igb_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void +igb_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware taken over control of h/w */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +igb_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +igb_release_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static int +eth_igb_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_igb_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_UPE); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_igb_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_igb_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +static int +eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static int +eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg, qinq; + + qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); + qinq &= E1000_CTRL_EXT_EXT_VLAN; + + /* only outer TPID of double VLAN can be configured*/ + if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { + reg = E1000_READ_REG(hw, E1000_VET); + reg = (reg & (~E1000_VET_VET_EXT)) | + ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); + E1000_WRITE_REG(hw, E1000_VET, reg); + + return 0; + } + + /* all other TPID values are read-only*/ + PMD_DRV_LOG(ERR, "Not supported"); + + return -ENOTSUP; +} + +static void +igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + +static void +igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* restore VFTA table */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE); +} + +static void +igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE); +} + +static int +eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; + if(mask & ETH_VLAN_STRIP_MASK){ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + igb_vlan_hw_strip_enable(dev); + else + igb_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + igb_vlan_hw_filter_enable(dev); + else + igb_vlan_hw_filter_disable(dev); + } + + if(mask & ETH_VLAN_EXTEND_MASK){ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + igb_vlan_hw_extend_enable(dev); + else + igb_vlan_hw_extend_disable(dev); + } + + return 0; +} + + +/** + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (on) + intr->mask |= E1000_ICR_LSC; + else + intr->mask &= ~E1000_ICR_LSC; + + return 0; +} + +/* It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + uint32_t mask, regval; + int ret; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; + struct rte_eth_dev_info dev_info; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = eth_igb_infos_get(dev, &dev_info); + if (ret != 0) + return ret; + + mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift; + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); + + return 0; +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + igb_intr_disable(dev); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + + intr->flags = 0; + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + if (icr & E1000_ICR_VMMB) + intr->flags |= E1000_FLAG_MAILBOX; + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + int ret; + + if (intr->flags & E1000_FLAG_MAILBOX) { + igb_pf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } + + igb_intr_enable(dev); + rte_intr_ack(intr_handle); + + if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_igb_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); + } + + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_igb_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igb_interrupt_get_status(dev); + eth_igb_interrupt_action(dev, dev->intr_handle); +} + +static int +eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + igbvf_intr_disable(hw); + + /* read-on-clear nic registers here */ + eicr = E1000_READ_REG(hw, E1000_EICR); + intr->flags = 0; + + if (eicr == E1000_VTIVAR_MISC_MAILBOX) + intr->flags |= E1000_FLAG_MAILBOX; + + return 0; +} + +void igbvf_mbx_process(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_mbx_info *mbx = &hw->mbx; + u32 in_msg = 0; + + /* peek the message first */ + in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0)); + + /* PF reset VF event */ + if (in_msg == E1000_PF_CONTROL_MSG) { + /* dummy mbx read to ack pf */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + return; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static int +eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & E1000_FLAG_MAILBOX) { + igbvf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } + + igbvf_intr_enable(dev); + rte_intr_ack(intr_handle); + + return 0; +} + +static void +eth_igbvf_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igbvf_interrupt_get_status(dev); + eth_igbvf_interrupt_action(dev, dev->intr_handle); +} + +static int +eth_igb_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_igb_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + uint32_t ctrl; + int tx_pause; + int rx_pause; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water; + fc_conf->low_water = hw->fc.low_water; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = hw->mac.autoneg; + + /* + * Return rx_pause and tx_pause status according to actual setting of + * the TFCE and RFCE bits in the CTRL register. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + if (ctrl & E1000_CTRL_TFCE) + tx_pause = 1; + else + tx_pause = 0; + + if (ctrl & E1000_CTRL_RFCE) + rx_pause = 1; + else + rx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != hw->mac.autoneg) + return -ENOTSUP; + rx_buf_size = igb_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= E1000_RCTL_PMCF; + else + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); + return -EIO; +} + +#define E1000_RAH_POOLSEL_SHIFT (18) +static int +eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rah; + + e1000_rar_set(hw, mac_addr->addr_bytes, index); + rah = E1000_READ_REG(hw, E1000_RAH(index)); + rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); + E1000_WRITE_REG(hw, E1000_RAH(index), rah); + return 0; +} + +static void +eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[RTE_ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} + +static int +eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + eth_igb_rar_clear(dev, 0); + eth_igb_rar_set(dev, (void *)addr, 0, 0); + + return 0; +} +/* + * Virtual Function operations + */ +static void +igbvf_intr_disable(struct e1000_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + /* Clear interrupt mask to stop from interrupts being generated */ + E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); + + E1000_WRITE_FLUSH(hw); +} + +static void +igbvf_stop_adapter(struct rte_eth_dev *dev) +{ + u32 reg_val; + u16 i; + struct rte_eth_dev_info dev_info; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = eth_igbvf_infos_get(dev, &dev_info); + if (ret != 0) + return; + + /* Clear interrupt mask to stop from interrupts being generated */ + igbvf_intr_disable(hw); + + /* Clear any pending interrupts, flush previous writes */ + E1000_READ_REG(hw, E1000_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < dev_info.max_tx_queues; i++) + E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < dev_info.max_rx_queues; i++) { + reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); + reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); + while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) + ; + } + + /* flush all queues disables */ + E1000_WRITE_FLUSH(hw); + msec_delay(2); +} + +static int eth_igbvf_link_update(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + int ret_val = E1000_SUCCESS; + + PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = TRUE; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + goto out; + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = FALSE; + +out: + return ret_val; +} + + +static int +igbvf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf* conf = &dev->data->dev_conf; + + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ +#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; + } +#else + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { + PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); + conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + } +#endif + + return 0; +} + +static int +igbvf_dev_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int ret; + uint32_t intr_vector = 0; + + PMD_INIT_FUNC_TRACE(); + + hw->mac.ops.reset_hw(hw); + adapter->stopped = 0; + + /* Set all vfta */ + igbvf_set_vfta_all(dev,1); + + eth_igbvf_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = eth_igbvf_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + igb_dev_clear_queues(dev); + return ret; + } + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + intr_vector = dev->data->nb_rx_queues; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + eth_igbvf_configure_msix_intr(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + igbvf_intr_enable(dev); + + return 0; +} + +static void +igbvf_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + + if (adapter->stopped) + return; + + PMD_INIT_FUNC_TRACE(); + + igbvf_stop_adapter(dev); + + /* + * Clear what we set, but we still keep shadow_vfta to + * restore after device starts + */ + igbvf_set_vfta_all(dev,0); + + igb_dev_clear_queues(dev); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + adapter->stopped = true; +} + +static void +igbvf_dev_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr addr; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + PMD_INIT_FUNC_TRACE(); + + e1000_reset_hw(hw); + + igbvf_dev_stop(dev); + + igb_dev_free_queues(dev); + + /** + * reprogram the RAR with a zero mac address, + * to ensure that the VF traffic goes to the PF + * after stop, close and detach of the VF. + **/ + + memset(&addr, 0, sizeof(addr)); + igbvf_default_mac_addr_set(dev, &addr); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_intr_callback_unregister(&pci_dev->intr_handle, + eth_igbvf_interrupt_handler, + (void *)dev); +} + +static int +igbvf_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set both unicast and multicast promisc */ + e1000_promisc_set_vf(hw, e1000_promisc_enabled); + + return 0; +} + +static int +igbvf_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* If in allmulticast mode leave multicast promisc */ + if (dev->data->all_multicast == 1) + e1000_promisc_set_vf(hw, e1000_promisc_multicast); + else + e1000_promisc_set_vf(hw, e1000_promisc_disabled); + + return 0; +} + +static int +igbvf_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* In promiscuous mode multicast promisc already set */ + if (dev->data->promiscuous == 0) + e1000_promisc_set_vf(hw, e1000_promisc_multicast); + + return 0; +} + +static int +igbvf_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* In promiscuous mode leave multicast promisc enabled */ + if (dev->data->promiscuous == 0) + e1000_promisc_set_vf(hw, e1000_promisc_disabled); + + return 0; +} + +static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + uint32_t msgbuf[2]; + s32 err; + + /* After set vlan, vlan strip will also be enabled in igb driver*/ + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + if (on) + msgbuf[0] |= E1000_VF_SET_VLAN_ADD; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + goto mbx_err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + goto mbx_err; + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) + err = -EINVAL; + +mbx_err: + return err; +} + +static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + int i = 0, j = 0, vfta = 0, mask = 1; + + for (i = 0; i < IGB_VFTA_SIZE; i++){ + vfta = shadow_vfta->vfta[i]; + if(vfta){ + mask = 1; + for (j = 0; j < 32; j++){ + if(vfta & mask) + igbvf_set_vfta(hw, + (uint16_t)((i<<5)+j), on); + mask<<=1; + } + } + } + +} + +static int +igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vid_idx = 0; + uint32_t vid_bit = 0; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ + ret = igbvf_set_vfta(hw, vlan_id, !!on); + if(ret){ + PMD_INIT_LOG(ERR, "Unable to set VF vlan"); + return ret; + } + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + + /*Save what we set and retore it after device reset*/ + if (on) + shadow_vfta->vfta[vid_idx] |= vid_bit; + else + shadow_vfta->vfta[vid_idx] &= ~vid_bit; + + return 0; +} + +static int +igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* index is not used by rar_set() */ + hw->mac.ops.rar_set(hw, (void *)addr, 0); + return 0; +} + + +static int +eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + if (mask == IGB_4_BIT_MASK) + r = 0; + else + r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); + } + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); + } + + return 0; +} + +static int +eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + uint32_t reta; + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IGB_8_BIT_MASK); + } + } + + return 0; +} + +int +eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf, rfctl; + + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) + return -EINVAL; + + synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); + + if (add) { + if (synqf & E1000_SYN_FILTER_ENABLE) + return -EINVAL; + + synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & + E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); + + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + if (filter->hig_pri) + rfctl |= E1000_RFCTL_SYNQFP; + else + rfctl &= ~E1000_RFCTL_SYNQFP; + + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + } else { + if (!(synqf & E1000_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf = 0; + } + + filter_info->syn_info = synqf; + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); + return 0; +} + +static int +eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf, rfctl; + + synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); + if (synqf & E1000_SYN_FILTER_ENABLE) { + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; + filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> + E1000_SYN_FILTER_QUEUE_SHIFT); + return 0; + } + + return -ENOENT; +} + +static int +eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ +static inline int +ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, + struct e1000_2tuple_filter_info *filter_info) +{ + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) + return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + return -EINVAL; /* flags is invalid. */ + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; + else + filter_info->tcp_flags = 0; + + return 0; +} + +static inline struct e1000_2tuple_filter * +igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, + struct e1000_2tuple_filter_info *key) +{ + struct e1000_2tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_2tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* inject a igb 2tuple filter to HW */ +static inline void +igb_inject_2uple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ttqf = E1000_TTQF_DISABLE_MASK; + uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; + int i; + + i = filter->index; + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + ttqf |= E1000_TTQF_QUEUE_ENABLE; + ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); + ttqf |= (uint32_t)(filter->filter_info.proto & + E1000_TTQF_PROTOCOL_MASK); + if (filter->filter_info.proto_mask == 0) + ttqf &= ~E1000_TTQF_MASK_ENABLE; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + +/* + * igb_add_2tuple_filter - add a 2tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be added. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter *filter; + int i, ret; + + filter = rte_zmalloc("e1000_2tuple_filter", + sizeof(struct e1000_2tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } + if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; + + /* + * look for an unused 2tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { + if (!(filter_info->twotuple_mask & (1 << i))) { + filter_info->twotuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->twotuple_list, + filter, + entries); + break; + } + } + if (i >= E1000_MAX_TTQF_FILTERS) { + PMD_DRV_LOG(ERR, "2tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } + + igb_inject_2uple_filter(dev, filter); + return 0; +} + +int +igb_delete_2tuple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); + rte_free(filter); + + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); + return 0; +} + +/* + * igb_remove_2tuple_filter - remove a 2tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be removed. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_2tuple_filter *filter; + int ret; + + memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter_2tuple); + if (ret < 0) + return ret; + + filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter_2tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + igb_delete_2tuple_filter(dev, filter); + + return 0; +} + +/* inject a igb flex filter to HW */ +static inline void +igb_inject_flex_filter(struct rte_eth_dev *dev, + struct e1000_flex_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t wufc, queueing; + uint32_t reg_off; + uint8_t i, j = 0; + + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(filter->index); + else + reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); + + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | + (E1000_WUFC_FLX0 << filter->index)); + queueing = filter->filter_info.len | + (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | + (filter->filter_info.priority << + E1000_FHFT_QUEUEING_PRIO_SHIFT); + E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, + queueing); + + for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { + E1000_WRITE_REG(hw, reg_off, + filter->filter_info.dwords[j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + filter->filter_info.dwords[++j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + (uint32_t)filter->filter_info.mask[i]); + reg_off += sizeof(uint32_t) * 2; + ++j; + } +} + +static inline struct e1000_flex_filter * +eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, + struct e1000_flex_filter_info *key) +{ + struct e1000_flex_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_flex_filter_info)) == 0) + return it; + } + + return NULL; +} + +/* remove a flex byte filter + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +void +igb_remove_flex_filter(struct rte_eth_dev *dev, + struct e1000_flex_filter *filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t wufc, i; + uint32_t reg_off; + + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(filter->index); + else + reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); + + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) + E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); + + E1000_WRITE_REG(hw, E1000_WUFC, wufc & + (~(E1000_WUFC_FLX0 << filter->index))); + + filter_info->flex_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->flex_list, filter, entries); + rte_free(filter); +} + +int +eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter, *it; + uint32_t mask; + uint8_t shift, i; + + flex_filter = rte_zmalloc("e1000_flex_filter", + sizeof(struct e1000_flex_filter), 0); + if (flex_filter == NULL) + return -ENOMEM; + + flex_filter->filter_info.len = filter->len; + flex_filter->filter_info.priority = filter->priority; + memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); + for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { + mask = 0; + /* reverse bits in flex filter's mask*/ + for (shift = 0; shift < CHAR_BIT; shift++) { + if (filter->mask[i] & (0x01 << shift)) + mask |= (0x80 >> shift); + } + flex_filter->filter_info.mask[i] = mask; + } + + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info); + if (it == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + rte_free(flex_filter); + return -ENOENT; + } + if (it != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(flex_filter); + return -EEXIST; + } + + if (add) { + flex_filter->queue = filter->queue; + /* + * look for an unused flex filter index + * and insert the filter into the list. + */ + for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { + if (!(filter_info->flex_mask & (1 << i))) { + filter_info->flex_mask |= 1 << i; + flex_filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->flex_list, + flex_filter, + entries); + break; + } + } + if (i >= E1000_MAX_FLEX_FILTERS) { + PMD_DRV_LOG(ERR, "flex filters are full."); + rte_free(flex_filter); + return -ENOSYS; + } + + igb_inject_flex_filter(dev, flex_filter); + + } else { + igb_remove_flex_filter(dev, it); + rte_free(flex_filter); + } + + return 0; +} + +static int +eth_igb_get_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter flex_filter, *it; + uint32_t wufc, queueing, wufc_en = 0; + + memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); + flex_filter.filter_info.len = filter->len; + flex_filter.filter_info.priority = filter->priority; + memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); + memcpy(flex_filter.filter_info.mask, filter->mask, + RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); + + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter.filter_info); + if (it == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + wufc = E1000_READ_REG(hw, E1000_WUFC); + wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); + + if ((wufc & wufc_en) == wufc_en) { + uint32_t reg_off = 0; + if (it->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(it->index); + else + reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); + + queueing = E1000_READ_REG(hw, + reg_off + E1000_FHFT_QUEUEING_OFFSET); + filter->len = queueing & E1000_FHFT_QUEUEING_LEN; + filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> + E1000_FHFT_QUEUEING_PRIO_SHIFT; + filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> + E1000_FHFT_QUEUEING_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +static int +eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_flex_filter *filter; + int ret = 0; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + filter = (struct rte_eth_flex_filter *)arg; + if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN + || filter->len % sizeof(uint64_t) != 0) { + PMD_DRV_LOG(ERR, "filter's length is out of range"); + return -EINVAL; + } + if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { + PMD_DRV_LOG(ERR, "filter's priority is out of range"); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_get_flex_filter(dev, filter); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, + struct e1000_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) + return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) + return -EINVAL; /* flags is invalid. */ + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; + else + filter_info->tcp_flags = 0; + + return 0; +} + +static inline struct e1000_5tuple_filter * +igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, + struct e1000_5tuple_filter_info *key) +{ + struct e1000_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* inject a igb 5-tuple filter to HW */ +static inline void +igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; + uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; + uint8_t i; + + i = filter->index; + ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; + if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ + ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; + if (filter->filter_info.dst_ip_mask == 0) + ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; + if (filter->filter_info.src_port_mask == 0) + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + if (filter->filter_info.proto_mask == 0) + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; + ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & + E1000_FTQF_QUEUE_MASK; + ftqf |= E1000_FTQF_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); + E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); + E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + + spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; + E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + +/* + * igb_add_5tuple_filter_82576 - add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be added. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *filter; + uint8_t i; + int ret; + + filter = rte_zmalloc("e1000_5tuple_filter", + sizeof(struct e1000_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } + + if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { + if (!(filter_info->fivetuple_mask & (1 << i))) { + filter_info->fivetuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= E1000_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } + + igb_inject_5tuple_filter_82576(dev, filter); + return 0; +} + +int +igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); + E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); + return 0; +} + +/* + * igb_remove_5tuple_filter_82576 - remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be removed. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_5tuple_filter *filter; + int ret; + + memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; + + filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + igb_delete_5tuple_filter_82576(dev, filter); + + return 0; +} + +static int +eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t rctl; + struct e1000_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + E1000_ETH_OVERHEAD; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + +#ifdef RTE_LIBRTE_82571_SUPPORT + /* XXX: not bigger than max_rx_pktlen */ + if (hw->mac.type == e1000_82571) + return -ENOTSUP; +#endif + ret = eth_igb_infos_get(dev, &dev_info); + if (ret != 0) + return ret; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || + frame_size > dev_info.max_rx_pktlen) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + return 0; +} + +/* + * igb_add_del_ntuple_filter - add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + if (add) + ret = igb_add_5tuple_filter_82576(dev, + ntuple_filter); + else + ret = igb_remove_5tuple_filter_82576(dev, + ntuple_filter); + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && + hw->mac.type != e1000_i210 && + hw->mac.type != e1000_i211) + return -ENOTSUP; + if (add) + ret = igb_add_2tuple_filter(dev, ntuple_filter); + else + ret = igb_remove_2tuple_filter(dev, ntuple_filter); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * igb_get_ntuple_filter - get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_5tuple_filter *p_5tuple_filter; + struct e1000_2tuple_filter *p_2tuple_filter; + int ret; + + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + memset(&filter_5tuple, + 0, + sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; + p_5tuple_filter = igb_5tuple_filter_lookup_82576( + &filter_info->fivetuple_list, + &filter_5tuple); + if (p_5tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_5tuple_filter->queue; + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + return -ENOTSUP; + memset(&filter_2tuple, + 0, + sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); + if (ret < 0) + return ret; + p_2tuple_filter = igb_2tuple_filter_lookup( + &filter_info->twotuple_list, + &filter_2tuple); + if (p_2tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_2tuple_filter->queue; + break; + default: + ret = -EINVAL; + break; + } + + return 0; +} + +/* + * igb_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static inline int +igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i].ethertype == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static inline int +igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, + uint16_t ethertype, uint32_t etqf) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i].ethertype = ethertype; + filter_info->ethertype_filters[i].etqf = etqf; + return i; + } + } + return -1; +} + +int +igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= E1000_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx].ethertype = 0; + filter_info->ethertype_filters[idx].etqf = 0; + return idx; +} + + +int +igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + int ret; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); + etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; + ret = igb_ethertype_filter_insert(filter_info, + filter->ether_type, etqf); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSYS; + } + } else { + ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf; + int ret; + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); + if (etqf & E1000_ETQF_FILTER_ENABLE) { + filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqf & E1000_ETQF_QUEUE) >> + E1000_ETQF_QUEUE_SHIFT; + return 0; + } + + return -ENOENT; +} + +/* + * igb_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = igb_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = igb_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = eth_igb_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FLEXIBLE: + ret = eth_igb_flex_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &igb_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; +} + +static int +eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); + return 0; +} + +static uint64_t +igb_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* + * Need to read System Time Residue Register to be able + * to read the other two registers. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + /* + * Need to read System Time Residue Register to be able + * to read the other two registers. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + /* Only the 8 LSB are valid. */ + systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) + & 0xff) << 32; + break; + default: + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) + << 32; + break; + } + + return systime_cycles; +} + +static uint64_t +igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + /* Only the 8 LSB are valid. */ + rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) + & 0xff) << 32; + break; + default: + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) + << 32; + break; + } + + return rx_tstamp_cycles; +} + +static uint64_t +igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + /* Only the 8 LSB are valid. */ + tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) + & 0xff) << 32; + break; + default: + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) + << 32; + break; + } + + return tx_tstamp_cycles; +} + +static void +igb_start_timecounters(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = dev->data->dev_private; + uint32_t incval = 1; + uint32_t shift = 0; + uint64_t mask = E1000_CYCLECOUNTER_MASK; + + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + /* 32 LSB bits + 8 MSB bits = 40 bits */ + mask = (1ULL << 40) - 1; + /* fall-through */ + case e1000_i210: + case e1000_i211: + /* + * Start incrementing the register + * used to timestamp PTP packets. + */ + E1000_WRITE_REG(hw, E1000_TIMINCA, incval); + break; + case e1000_82576: + incval = E1000_INCVALUE_82576; + shift = IGB_82576_TSYNC_SHIFT; + E1000_WRITE_REG(hw, E1000_TIMINCA, + E1000_INCPERIOD_82576 | incval); + break; + default: + /* Not supported */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = mask; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = mask; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = mask; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct e1000_adapter *adapter = dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct e1000_adapter *adapter = dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct e1000_adapter *adapter = dev->data->dev_private; + + systime_cycles = igb_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +igb_timesync_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; + + /* Stop the timesync system time. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); + /* fall-through */ + case e1000_82576: + E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); + E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); + break; + default: + /* Not supported. */ + return -ENOTSUP; + } + + /* Enable system time for it isn't on by default. */ + tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); + tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; + E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); + + igb_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), + (RTE_ETHER_TYPE_1588 | + E1000_ETQF_FILTER_ENABLE | + E1000_ETQF_1588)); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); + + /* Enable Timestamping of transmitted PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); + + return 0; +} + +static int +igb_timesync_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, 0); + + return 0; +} + +static int +igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) + return -EINVAL; + + rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) + return -EINVAL; + + tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = igb_regs[g_ind++])) + count += igb_reg_group_count(reg_group); + + return count; +} + +static int +igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = igbvf_regs[g_ind++])) + count += igb_reg_group_count(reg_group); + + return count; +} + +static int +eth_igb_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + if (data == NULL) { + regs->length = eth_igb_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = igb_regs[g_ind++])) + count += igb_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +igbvf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + if (data == NULL) { + regs->length = igbvf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = igbvf_regs[g_ind++])) + count += igb_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +eth_igb_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Return unit is byte count */ + return hw->nvm.word_size * 2; +} + +static int +eth_igb_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_nvm_info *nvm = &hw->nvm; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first >= hw->nvm.word_size) || + ((first + length) >= hw->nvm.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | + ((uint32_t)hw->device_id << 16); + + if ((nvm->ops.read) == NULL) + return -ENOTSUP; + + return nvm->ops.read(hw, first, length, data); +} + +static int +eth_igb_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_nvm_info *nvm = &hw->nvm; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first >= hw->nvm.word_size) || + ((first + length) >= hw->nvm.word_size)) + return -EINVAL; + + in_eeprom->magic = (uint32_t)hw->vendor_id | + ((uint32_t)hw->device_id << 16); + + if ((nvm->ops.write) == NULL) + return -ENOTSUP; + return nvm->ops.write(hw, first, length, data); +} + +static int +eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t sff8472_rev, addr_mode; + bool page_swap = false; + + if (hw->phy.media_type == e1000_media_type_copper || + hw->phy.media_type == e1000_media_type_unknown) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; + u16 first_word, last_word; + int i = 0; + + if (info->length == 0) + return -EINVAL; + + first_word = info->offset >> 1; + last_word = (info->offset + info->length - 1) >> 1; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + return -EIO; + } + + dataword[i] = rte_be_to_cpu_16(dataword[i]); + } + + memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); + + return 0; +} + +static int +eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = E1000_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = E1000_RX_VEC_START; + + uint32_t mask = 1 << (queue_id + vec); + + E1000_WRITE_REG(hw, E1000_EIMC, mask); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = E1000_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = E1000_RX_VEC_START; + + uint32_t mask = 1 << (queue_id + vec); + uint32_t regval; + + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); + E1000_WRITE_FLUSH(hw); + + rte_intr_ack(intr_handle); + + return 0; +} + +static void +eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, + uint8_t index, uint8_t offset) +{ + uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + + /* clear bits */ + val &= ~((uint32_t)0xFF << offset); + + /* write vector and valid bit */ + val |= (msix_vector | E1000_IVAR_VALID) << offset; + + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); +} + +static void +eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp = 0; + + if (hw->mac.type == e1000_82575) { + if (direction == 0) + tmp = E1000_EICR_RX_QUEUE0 << queue; + else if (direction == 1) + tmp = E1000_EICR_TX_QUEUE0 << queue; + E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); + } else if (hw->mac.type == e1000_82576) { + if ((direction == 0) || (direction == 1)) + eth_igb_write_ivar(hw, msix_vector, queue & 0x7, + ((queue & 0x8) << 1) + + 8 * direction); + } else if ((hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354) || + (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + if ((direction == 0) || (direction == 1)) + eth_igb_write_ivar(hw, msix_vector, + queue >> 1, + ((queue & 0x1) << 4) + + 8 * direction); + } +} + +/* Sets up the hardware to generate MSI-X interrupts properly + * @hw + * board private structure + */ +static void +eth_igb_configure_msix_intr(struct rte_eth_dev *dev) +{ + int queue_id; + uint32_t tmpval, regval, intr_mask; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = E1000_MISC_VEC_ID; + uint32_t base = E1000_MISC_VEC_ID; + uint32_t misc_shift = 0; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) { + vec = base = E1000_RX_VEC_START; + misc_shift = 1; + } + + /* set interrupt vector for other causes */ + if (hw->mac.type == e1000_82575) { + tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* enable MSI-X PBA support */ + tmpval |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read */ + tmpval |= E1000_CTRL_EXT_EIAME; + tmpval |= E1000_CTRL_EXT_IRCA; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); + + /* enable msix_other interrupt */ + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); + regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); + regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); + } else if ((hw->mac.type == e1000_82576) || + (hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354) || + (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + /* turn on MSI-X capability first */ + E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << + misc_shift; + + if (dev->data->dev_conf.intr_conf.lsc != 0) + intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); + + regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); + + /* enable msix_other interrupt */ + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); + tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8; + E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); + } + + /* use EIAM to auto-mask when MSI-X interrupt + * is asserted, this saves a register write for every interrupt + */ + intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << + misc_shift; + + if (dev->data->dev_conf.intr_conf.lsc != 0) + intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); + + regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); + + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { + eth_igb_assign_msix_vector(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + E1000_WRITE_FLUSH(hw); +} + +/* restore n-tuple filter */ +static inline void +igb_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { + igb_inject_5tuple_filter_82576(dev, p_5tuple); + } + + TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { + igb_inject_2uple_filter(dev, p_2tuple); + } +} + +/* restore SYN filter */ +static inline void +igb_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & E1000_SYN_FILTER_ENABLE) { + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); + } +} + +/* restore ethernet type filter */ +static inline void +igb_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + E1000_WRITE_REG(hw, E1000_ETQF(i), + filter_info->ethertype_filters[i].etqf); + E1000_WRITE_FLUSH(hw); + } + } +} + +/* restore flex byte filter */ +static inline void +igb_flex_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter; + + TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { + igb_inject_flex_filter(dev, flex_filter); + } +} + +/* restore rss filter */ +static inline void +igb_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.conf.queue_num) + igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); +} + +/* restore all types filter */ +static int +igb_filter_restore(struct rte_eth_dev *dev) +{ + igb_ntuple_filter_restore(dev); + igb_ethertype_filter_restore(dev); + igb_syn_filter_restore(dev); + igb_flex_filter_restore(dev); + igb_rss_filter_restore(dev); + + return 0; +} + +RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); +RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(e1000_init_log) +{ + e1000_igb_init_log(); +} diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_flow.c b/src/spdk/dpdk/drivers/net/e1000/igb_flow.c new file mode 100644 index 000000000..43fef889b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/igb_flow.c @@ -0,0 +1,1922 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" + +#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \ + do { \ + item = (pattern) + (index); \ + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \ + (index)++; \ + item = (pattern) + (index); \ + } \ + } while (0) + +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\ + (index)++; \ + act = (actions) + (index); \ + } \ + } while (0) + +#define IGB_FLEX_RAW_NUM 12 + +struct igb_flow_mem_list igb_flow_list; +struct igb_ntuple_filter_list igb_filter_ntuple_list; +struct igb_ethertype_filter_list igb_filter_ethertype_list; +struct igb_syn_filter_list igb_filter_syn_list; +struct igb_flex_filter_list igb_filter_flex_list; +struct igb_rss_filter_list igb_filter_rss_list; + +/** + * Please aware there's an asumption for all the parsers. + * rte_flow_item is using big endian, rte_flow_attr and + * rte_flow_action are using CPU order. + * Because the pattern is used to describe the packets, + * normally the packets should use network order. + */ + +/** + * Parse the rule to see if it is a n-tuple rule. + * And get the n-tuple filter info BTW. + * pattern: + * The first not void item can be ETH or IPV4. + * The second not void item must be IPV4 if the first one is ETH. + * The third not void item must be UDP or TCP or SCTP + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * next_proto_id 17 0xFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ntuple_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + uint32_t index; + + if (!pattern) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* parse pattern */ + index = 0; + + /* the first not void item can be MAC or IPv4 */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* if the first item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + + /* check if the next not void item is TCP or UDP or SCTP */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + if (item->spec && item->mask) { + tcp_mask = item->mask; + + /** + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = tcp_mask->hdr.dst_port; + filter->src_port_mask = tcp_mask->hdr.src_port; + if (tcp_mask->hdr.tcp_flags == 0xFF) { + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + } else if (!tcp_mask->hdr.tcp_flags) { + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + } else { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + tcp_spec = item->spec; + filter->dst_port = tcp_spec->hdr.dst_port; + filter->src_port = tcp_spec->hdr.src_port; + filter->tcp_flags = tcp_spec->hdr.tcp_flags; + } + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + if (item->spec && item->mask) { + udp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = udp_mask->hdr.dst_port; + filter->src_port_mask = udp_mask->hdr.src_port; + + udp_spec = item->spec; + filter->dst_port = udp_spec->hdr.dst_port; + filter->src_port = udp_spec->hdr.src_port; + } + } else { + if (item->spec && item->mask) { + sctp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = sctp_mask->hdr.dst_port; + filter->src_port_mask = sctp_mask->hdr.src_port; + + sctp_spec = (const struct rte_flow_item_sctp *) + item->spec; + filter->dst_port = sctp_spec->hdr.dst_port; + filter->src_port = sctp_spec->hdr.src_port; + } + } + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* parse action */ + index = 0; + + /** + * n-tuple only supports forwarding, + * check if the first not void action is QUEUE. + */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + item, "Not supported action."); + return -rte_errno; + } + filter->queue = + ((const struct rte_flow_action_queue *)act->conf)->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + filter->priority = (uint16_t)attr->priority; + + return 0; +} + +/* a specific function for igb because the flags is specific */ +static int +igb_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); + + if (ret) + return ret; + + /* Igb doesn't support many priorities. */ + if (filter->priority > E1000_2TUPLE_MAX_PRI) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Priority not supported by ntuple filter"); + return -rte_errno; + } + + if (hw->mac.type == e1000_82576) { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by ntuple filter"); + return -rte_errno; + } + filter->flags |= RTE_5TUPLE_FLAGS; + } else { + if (filter->src_ip_mask || filter->dst_ip_mask || + filter->src_port_mask) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "only two tuple are " + "supported by this filter"); + return -rte_errno; + } + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by ntuple filter"); + return -rte_errno; + } + filter->flags |= RTE_2TUPLE_FLAGS; + } + + return 0; +} + +/** + * Parse the rule to see if it is a ethertype rule. + * And get the ethertype filter info BTW. + * pattern: + * The first not void item can be ETH. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH type 0x0807 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ethertype_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_action_queue *act_q; + uint32_t index; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* Parse pattern */ + index = 0; + + /* The first non-void item should be MAC. */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + eth_spec = item->spec; + eth_mask = item->mask; + + /* Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ether address mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ethertype mask"); + return -rte_errno; + } + + /* If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + /* Check if the next non-void item is END. */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter."); + return -rte_errno; + } + + /* Parse action */ + + index = 0; + /* Check if the first non-void action is QUEUE or DROP. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* Parse attr */ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static int +igb_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_ethertype_filter(attr, pattern, + actions, filter, error); + + if (ret) + return ret; + + if (hw->mac.type == e1000_82576) { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { + memset(filter, 0, sizeof( + struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not supported " + "by ethertype filter"); + return -rte_errno; + } + } else { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof( + struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not supported " + "by ethertype filter"); + return -rte_errno; + } + } + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "IPv4/IPv6 not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "mac compare is unsupported"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "drop option is unsupported"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a TCP SYN rule. + * And get the TCP SYN filter info BTW. + * pattern: + * The first not void item must be ETH. + * The second not void item must be IPV4 or IPV6. + * The third not void item must be TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * TCP tcp_flags 0x02 0xFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_action_queue *act_q; + uint32_t index; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* parse pattern */ + index = 0; + + /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* if the item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN address mask"); + return -rte_errno; + } + + /* check if the next not void item is IPv4 or IPv6 */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Skip IP */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* if the item is IP, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + + /* check if the next not void item is TCP */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Get the TCP info. Only support SYN. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + tcp_spec = item->spec; + tcp_mask = item->mask; + if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) || + tcp_mask->hdr.src_port || + tcp_mask->hdr.dst_port || + tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* parse action */ + index = 0; + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Support 2 priorities, the lowest or highest. */ + if (!attr->priority) { + filter->hig_pri = 0; + } else if (attr->priority == (uint32_t)~0U) { + filter->hig_pri = 1; + } else { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int +igb_parse_syn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_syn_filter(attr, pattern, + actions, filter, error); + + if (hw->mac.type == e1000_82576) { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by syn filter"); + return -rte_errno; + } + } else { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by syn filter"); + return -rte_errno; + } + } + + if (ret) + return ret; + + return 0; +} + +/** + * Parse the rule to see if it is a flex byte rule. + * And get the flex byte filter info BTW. + * pattern: + * The first not void item must be RAW. + * The second not void item can be RAW or END. + * The third not void item can be RAW or END. + * The last not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * RAW relative 0 0x1 + * offset 0 0xFFFFFFFF + * pattern {0x08, 0x06} {0xFF, 0xFF} + * RAW relative 1 0x1 + * offset 100 0xFFFFFFFF + * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF} + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_flex_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_flex_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_raw *raw_spec; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_action_queue *act_q; + uint32_t index, i, offset, total_offset; + uint32_t max_offset = 0; + int32_t shift, j, raw_index = 0; + int32_t relative[IGB_FLEX_RAW_NUM] = {0}; + int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0}; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* parse pattern */ + index = 0; + +item_loop: + + /* the first not void item should be RAW */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + raw_spec = item->spec; + raw_mask = item->mask; + + if (!raw_mask->length || + !raw_mask->relative) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + if (raw_mask->offset) + offset = raw_spec->offset; + else + offset = 0; + + for (j = 0; j < raw_spec->length; j++) { + if (raw_mask->pattern[j] != 0xFF) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + } + + total_offset = 0; + + if (raw_spec->relative) { + for (j = raw_index; j > 0; j--) { + total_offset += raw_offset[j - 1]; + if (!relative[j - 1]) + break; + } + if (total_offset + raw_spec->length + offset > max_offset) + max_offset = total_offset + raw_spec->length + offset; + } else { + if (raw_spec->length + offset > max_offset) + max_offset = raw_spec->length + offset; + } + + if ((raw_spec->length + offset + total_offset) > + RTE_FLEX_FILTER_MAXLEN) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + if (raw_spec->relative == 0) { + for (j = 0; j < raw_spec->length; j++) + filter->bytes[offset + j] = + raw_spec->pattern[j]; + j = offset / CHAR_BIT; + shift = offset % CHAR_BIT; + } else { + for (j = 0; j < raw_spec->length; j++) + filter->bytes[total_offset + offset + j] = + raw_spec->pattern[j]; + j = (total_offset + offset) / CHAR_BIT; + shift = (total_offset + offset) % CHAR_BIT; + } + + i = 0; + + for ( ; shift < CHAR_BIT; shift++) { + filter->mask[j] |= (0x80 >> shift); + i++; + if (i == raw_spec->length) + break; + if (shift == (CHAR_BIT - 1)) { + j++; + shift = -1; + } + } + + relative[raw_index] = raw_spec->relative; + raw_offset[raw_index] = offset + raw_spec->length; + raw_index++; + + /* check if the next not void item is RAW */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + /* go back to parser */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* if the item is RAW, the content should be parse */ + goto item_loop; + } + + filter->len = RTE_ALIGN(max_offset, 8); + + /* parse action */ + index = 0; + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + filter->priority = (uint16_t)attr->priority; + + return 0; +} + +static int +igb_parse_flex_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_flex_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + ret = cons_parse_flex_filter(attr, pattern, + actions, filter, error); + + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not supported by flex filter"); + return -rte_errno; + } + + if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN || + filter->len % sizeof(uint64_t) != 0) { + PMD_DRV_LOG(ERR, "filter's length is out of range"); + return -EINVAL; + } + + if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { + PMD_DRV_LOG(ERR, "filter's priority is out of range"); + return -EINVAL; + } + + if (ret) + return ret; + + return 0; +} + +static int +igb_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct igb_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n, index; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + index = 0; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (((hw->mac.type == e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(dev, rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + +/** + * Create a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +igb_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_flex_filter flex_filter; + struct igb_rte_flow_rss_conf rss_conf; + struct rte_flow *flow = NULL; + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + + flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem", + sizeof(struct igb_flow_mem), 0); + if (!igb_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + igb_flow_mem_ptr->flow = flow; + igb_flow_mem_ptr->dev = dev; + TAILQ_INSERT_TAIL(&igb_flow_list, + igb_flow_mem_ptr, entries); + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = igb_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) { + ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter", + sizeof(struct igb_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = igb_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = igb_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = rte_zmalloc( + "igb_ethertype_filter", + sizeof(struct igb_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = igb_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("igb_syn_filter", + sizeof(struct igb_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + + rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&igb_filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } + + memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + ret = igb_parse_flex_filter(dev, attr, pattern, + actions, &flex_filter, error); + if (!ret) { + ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE); + if (!ret) { + flex_filter_ptr = rte_zmalloc("igb_flex_filter", + sizeof(struct igb_flex_filter_ele), 0); + if (!flex_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + + rte_memcpy(&flex_filter_ptr->filter_info, + &flex_filter, + sizeof(struct rte_eth_flex_filter)); + TAILQ_INSERT_TAIL(&igb_filter_flex_list, + flex_filter_ptr, entries); + flow->rule = flex_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_FLEXIBLE; + return flow; + } + } + + memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + ret = igb_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = igb_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("igb_rss_filter", + sizeof(struct igb_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + igb_rss_conf_init(dev, &rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&igb_filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + +out: + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(igb_flow_mem_ptr); + rte_free(flow); + return NULL; +} + +/** + * Check if the flow rule is supported by igb. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int +igb_flow_validate(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_flex_filter flex_filter; + struct igb_rte_flow_rss_conf rss_conf; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = igb_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = igb_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) + return 0; + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = igb_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) + return 0; + + memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + ret = igb_parse_flex_filter(dev, attr, pattern, + actions, &flex_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + ret = igb_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + + return ret; +} + +/* Destroy a flow rule on igb. */ +static int +igb_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) + pmd_flow->rule; + ret = igb_add_del_ntuple_filter(dev, + &ntuple_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) + pmd_flow->rule; + ret = igb_add_del_ethertype_filter(dev, + ðertype_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct igb_eth_syn_filter_ele *) + pmd_flow->rule; + ret = eth_igb_syn_filter_set(dev, + &syn_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FLEXIBLE: + flex_filter_ptr = (struct igb_flex_filter_ele *) + pmd_flow->rule; + ret = eth_igb_add_del_flex_filter(dev, + &flex_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_flex_list, + flex_filter_ptr, entries); + rte_free(flex_filter_ptr); + } + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct igb_rss_conf_ele *) + pmd_flow->rule; + ret = igb_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { + if (igb_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, entries); + rte_free(igb_flow_mem_ptr); + } + } + rte_free(flow); + + return ret; +} + +/* remove all the n-tuple filters */ +static void +igb_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + igb_delete_5tuple_filter_82576(dev, p_5tuple); + + while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) + igb_delete_2tuple_filter(dev, p_2tuple); +} + +/* remove all the ether type filters */ +static void +igb_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + (void)igb_ethertype_filter_remove(filter_info, + (uint8_t)i); + E1000_WRITE_REG(hw, E1000_ETQF(i), 0); + E1000_WRITE_FLUSH(hw); + } + } +} + +/* remove the SYN filter */ +static void +igb_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; + E1000_WRITE_REG(hw, E1000_SYNQF(0), 0); + E1000_WRITE_FLUSH(hw); + } +} + +/* remove all the flex filters */ +static void +igb_clear_all_flex_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter; + + while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list))) + igb_remove_flex_filter(dev, flex_filter); +} + +/* remove the rss filter */ +static void +igb_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter->rss_info.conf.queue_num) + igb_config_rss_filter(dev, &filter->rss_info, FALSE); +} + +void +igb_filterlist_flush(struct rte_eth_dev *dev) +{ + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_rss_conf_ele *rss_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + enum rte_filter_type filter_type; + struct rte_flow *pmd_flow; + + TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { + if (igb_flow_mem_ptr->dev == dev) { + pmd_flow = igb_flow_mem_ptr->flow; + filter_type = pmd_flow->filter_type; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = + (struct igb_ntuple_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = + (struct igb_ethertype_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = + (struct igb_eth_syn_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + break; + case RTE_ETH_FILTER_FLEXIBLE: + flex_filter_ptr = + (struct igb_flex_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_flex_list, + flex_filter_ptr, entries); + rte_free(flex_filter_ptr); + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = + (struct igb_rss_conf_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type" + "(%d) not supported", filter_type); + break; + } + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, + entries); + rte_free(igb_flow_mem_ptr->flow); + rte_free(igb_flow_mem_ptr); + } + } +} + +/* Destroy all flow rules associated with a port on igb. */ +static int +igb_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + igb_clear_all_ntuple_filter(dev); + igb_clear_all_ethertype_filter(dev); + igb_clear_syn_filter(dev); + igb_clear_all_flex_filter(dev); + igb_clear_rss_filter(dev); + igb_filterlist_flush(dev); + + return 0; +} + +const struct rte_flow_ops igb_flow_ops = { + .validate = igb_flow_validate, + .create = igb_flow_create, + .destroy = igb_flow_destroy, + .flush = igb_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_pf.c b/src/spdk/dpdk/drivers/net/e1000/igb_pf.c new file mode 100644 index 000000000..9d74c08ab --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/igb_pf.c @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/e1000_defines.h" +#include "base/e1000_regs.h" +#include "base/e1000_hw.h" +#include "e1000_ethdev.h" + +static inline uint16_t +dev_num_vf(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + return pci_dev->max_vfs; +} + +static inline +int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) +{ + unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN]; + struct e1000_vf_info *vfinfo = + *E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint16_t vfn; + + for (vfn = 0; vfn < vf_num; vfn++) { + rte_eth_random_addr(vf_mac_addr); + /* keep the random address as default */ + memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, + RTE_ETHER_ADDR_LEN); + } + + return 0; +} + +static inline int +igb_mb_intr_setup(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= E1000_ICR_VMMB; + + return 0; +} + +void igb_pf_host_init(struct rte_eth_dev *eth_dev) +{ + struct e1000_vf_info **vfinfo = + E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t vf_num; + uint8_t nb_queue; + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + if (0 == (vf_num = dev_num_vf(eth_dev))) + return; + + if (hw->mac.type == e1000_i350) + nb_queue = 1; + else if(hw->mac.type == e1000_82576) + /* per datasheet, it should be 2, but 1 seems correct */ + nb_queue = 1; + else + return; + + *vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0); + if (*vfinfo == NULL) + rte_panic("Cannot allocate memory for private VF data\n"); + + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS; + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue); + + igb_vf_perm_addr_gen(eth_dev, vf_num); + + /* set mb interrupt mask */ + igb_mb_intr_setup(eth_dev); + + return; +} + +void igb_pf_host_uninit(struct rte_eth_dev *dev) +{ + struct e1000_vf_info **vfinfo; + uint16_t vf_num; + + PMD_INIT_FUNC_TRACE(); + + vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + RTE_ETH_DEV_SRIOV(dev).active = 0; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0; + RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0; + + vf_num = dev_num_vf(dev); + if (vf_num == 0) + return; + + rte_free(*vfinfo); + *vfinfo = NULL; +} + +#define E1000_RAH_POOLSEL_SHIFT (18) +int igb_pf_host_configure(struct rte_eth_dev *eth_dev) +{ + uint32_t vtctl; + uint16_t vf_num; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint32_t vlanctrl; + int i; + uint32_t rah; + + if (0 == (vf_num = dev_num_vf(eth_dev))) + return -1; + + /* enable VMDq and set the default pool for PF */ + vtctl = E1000_READ_REG(hw, E1000_VT_CTL); + vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK; + vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + << E1000_VT_CTL_DEFAULT_POOL_SHIFT; + vtctl |= E1000_VT_CTL_VM_REPL_EN; + E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); + + /* Enable pools reserved to PF only */ + E1000_WRITE_REG(hw, E1000_VFRE, (~0U) << vf_num); + E1000_WRITE_REG(hw, E1000_VFTE, (~0U) << vf_num); + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (hw->mac.type == e1000_i350) + E1000_WRITE_REG(hw, E1000_TXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN); + else + E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN); + + /* clear VMDq map to perment rar 0 */ + rah = E1000_READ_REG(hw, E1000_RAH(0)); + rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT); + E1000_WRITE_REG(hw, E1000_RAH(0), rah); + + /* clear VMDq map to scan rar 32 */ + rah = E1000_READ_REG(hw, E1000_RAH(hw->mac.rar_entry_count)); + rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT); + E1000_WRITE_REG(hw, E1000_RAH(hw->mac.rar_entry_count), rah); + + /* set VMDq map to default PF pool */ + rah = E1000_READ_REG(hw, E1000_RAH(0)); + rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + + E1000_RAH_POOLSEL_SHIFT)); + E1000_WRITE_REG(hw, E1000_RAH(0), rah); + + /* + * enable vlan filtering and allow all vlan tags through + */ + vlanctrl = E1000_READ_REG(hw, E1000_RCTL); + vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */ + E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, 0xFFFFFFFF); + } + + /* Enable/Disable MAC Anti-Spoofing */ + e1000_vmdq_set_anti_spoofing_pf(hw, FALSE, vf_num); + + return 0; +} + +static void +set_rx_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE; + uint16_t vfn = dev_num_vf(dev); + + /* Check for Promiscuous and All Multicast modes */ + fctrl = E1000_READ_REG(hw, E1000_RCTL); + + /* set all bits that we expect to always be set */ + fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */ + fctrl |= E1000_RCTL_BAM; + + /* clear the bits we are changing the status of */ + fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + + if (dev_data->promiscuous) { + fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (dev_data->all_multicast) { + fctrl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + vmolr |= E1000_VMOLR_ROMPE; + } + } + + if ((hw->mac.type == e1000_82576) || + (hw->mac.type == e1000_i350)) { + vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_MPME | E1000_VMOLR_ROMPE | + E1000_VMOLR_ROPE); + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + } + + E1000_WRITE_REG(hw, E1000_RCTL, fctrl); +} + +static inline void +igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | + E1000_VMOLR_BAM | E1000_VMOLR_AUPE); + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + + E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); + + /* reset multicast table array for vf */ + vfinfo[vf].num_vf_mc_hashes = 0; + + /* reset rx mode */ + set_rx_mode(dev); +} + +static inline void +igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* enable transmit and receive for vf */ + reg = E1000_READ_REG(hw, E1000_VFTE); + reg |= (reg | (1 << vf)); + E1000_WRITE_REG(hw, E1000_VFTE, reg); + + reg = E1000_READ_REG(hw, E1000_VFRE); + reg |= (reg | (1 << vf)); + E1000_WRITE_REG(hw, E1000_VFRE, reg); + + igb_vf_reset_event(dev, vf); +} + +static int +igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + uint32_t rah; + + igb_vf_reset_msg(dev, vf); + + hw->mac.ops.rar_set(hw, vf_mac, rar_entry); + rah = E1000_READ_REG(hw, E1000_RAH(rar_entry)); + rah |= (0x1 << (vf + E1000_RAH_POOLSEL_SHIFT)); + E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah); + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN); + e1000_write_mbx(hw, msgbuf, 3, vf); + + return 0; +} + +static int +igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + int rah; + + if (rte_is_unicast_ether_addr((struct rte_ether_addr *)new_mac)) { + if (!rte_is_zero_ether_addr((struct rte_ether_addr *)new_mac)) + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + sizeof(vfinfo[vf].vf_mac_addresses)); + hw->mac.ops.rar_set(hw, new_mac, rar_entry); + rah = E1000_READ_REG(hw, E1000_RAH(rar_entry)); + rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf)); + E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah); + return 0; + } + return -1; +} + +static int +igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +{ + int i; + uint32_t vector_bit; + uint32_t vector_reg; + uint32_t mta_reg; + int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> + E1000_VT_MSGINFO_SHIFT; + uint16_t *hash_list = (uint16_t *)&msgbuf[1]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + + /* only so many hash values supported */ + entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = (uint16_t)entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, vector_reg); + mta_reg |= (1 << vector_bit); + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, vector_reg, mta_reg); + } + + return 0; +} + +static int +igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + int add, vid; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint32_t vid_idx, vid_bit, vfta; + + add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) + >> E1000_VT_MSGINFO_SHIFT; + vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + + if (add) + vfinfo[vf].vlan_count++; + else if (vfinfo[vf].vlan_count) + vfinfo[vf].vlan_count--; + + vid_idx = (uint32_t)((vid >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t)(1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (add) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK; + uint32_t max_frame = rlpml + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + uint32_t vmolr; + + if (max_frame < RTE_ETHER_MIN_LEN || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) + return -1; + + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= rlpml; + + /* Enable Long Packet support */ + vmolr |= E1000_VMOLR_LPE; + + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint16_t mbx_size = E1000_VFMAILBOX_SIZE; + uint32_t msgbuf[E1000_VFMAILBOX_SIZE]; + int32_t retval; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf); + return retval; + } + + /* do nothing with the message already processed */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + E1000_WRITE_FLUSH(hw); + + /* perform VF reset */ + if (msgbuf[0] == E1000_VF_RESET) { + return igb_vf_reset(dev, vf, msgbuf); + } + + /* check & process VF to PF mailbox message */ + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_vf_set_mac_addr(dev, vf, msgbuf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_vf_set_multicast(dev, vf, msgbuf); + break; + case E1000_VF_SET_LPE: + retval = igb_vf_set_rlpml(dev, vf, msgbuf); + break; + case E1000_VF_SET_VLAN: + retval = igb_vf_set_vlan(dev, vf, msgbuf); + break; + default: + PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x", + (unsigned) msgbuf[0]); + retval = E1000_ERR_MBX; + break; + } + + /* response the VF according to the message process result */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; + + e1000_write_mbx(hw, msgbuf, 1, vf); + + return retval; +} + +static inline void +igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint32_t msg = E1000_VT_MSGTYPE_NACK; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + e1000_write_mbx(hw, &msg, 1, vf); +} + +void igb_pf_mbx_process(struct rte_eth_dev *eth_dev) +{ + uint16_t vf; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { + /* check & process vf function level reset */ + if (!e1000_check_for_rst(hw, vf)) + igb_vf_reset_event(eth_dev, vf); + + /* check & process vf mailbox messages */ + if (!e1000_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(eth_dev, vf); + + /* check & process acks from vf */ + if (!e1000_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(eth_dev, vf); + } +} diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_regs.h b/src/spdk/dpdk/drivers/net/e1000/igb_regs.h new file mode 100644 index 000000000..cacd49c7d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/igb_regs.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Intel Corporation + */ +#ifndef _IGB_REGS_H_ +#define _IGB_REGS_H_ + +#include "e1000_ethdev.h" + +struct reg_info { + uint32_t base_addr; + uint32_t count; + uint32_t stride; + const char *name; +}; + +static const struct reg_info igb_regs_general[] = { + {E1000_CTRL, 1, 1, "E1000_CTRL"}, + {E1000_STATUS, 1, 1, "E1000_STATUS"}, + {E1000_CTRL_EXT, 1, 1, "E1000_CTRL_EXT"}, + {E1000_MDIC, 1, 1, "E1000_MDIC"}, + {E1000_SCTL, 1, 1, "E1000_SCTL"}, + {E1000_CONNSW, 1, 1, "E1000_CONNSW"}, + {E1000_VET, 1, 1, "E1000_VET"}, + {E1000_LEDCTL, 1, 1, "E1000_LEDCTL"}, + {E1000_PBA, 1, 1, "E1000_PBA"}, + {E1000_PBS, 1, 1, "E1000_PBS"}, + {E1000_FRTIMER, 1, 1, "E1000_FRTIMER"}, + {E1000_TCPTIMER, 1, 1, "E1000_TCPTIMER"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_nvm[] = { + {E1000_EECD, 1, 1, "E1000_EECD"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_interrupt[] = { + {E1000_EICS, 1, 1, "E1000_EICS"}, + {E1000_EIMS, 1, 1, "E1000_EIMS"}, + {E1000_EIMC, 1, 1, "E1000_EIMC"}, + {E1000_EIAC, 1, 1, "E1000_EIAC"}, + {E1000_EIAM, 1, 1, "E1000_EIAM"}, + {E1000_ICS, 1, 1, "E1000_ICS"}, + {E1000_IMS, 1, 1, "E1000_IMS"}, + {E1000_IMC, 1, 1, "E1000_IMC"}, + {E1000_IAC, 1, 1, "E1000_IAC"}, + {E1000_IAM, 1, 1, "E1000_IAM"}, + {E1000_IMIRVP, 1, 1, "E1000_IMIRVP"}, + {E1000_EITR(0), 10, 4, "E1000_EITR"}, + {E1000_IMIR(0), 8, 4, "E1000_IMIR"}, + {E1000_IMIREXT(0), 8, 4, "E1000_IMIREXT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_fctl[] = { + {E1000_FCAL, 1, 1, "E1000_FCAL"}, + {E1000_FCAH, 1, 1, "E1000_FCAH"}, + {E1000_FCTTV, 1, 1, "E1000_FCTTV"}, + {E1000_FCRTL, 1, 1, "E1000_FCRTL"}, + {E1000_FCRTH, 1, 1, "E1000_FCRTH"}, + {E1000_FCRTV, 1, 1, "E1000_FCRTV"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_rxdma[] = { + {E1000_RDBAL(0), 4, 0x100, "E1000_RDBAL"}, + {E1000_RDBAH(0), 4, 0x100, "E1000_RDBAH"}, + {E1000_RDLEN(0), 4, 0x100, "E1000_RDLEN"}, + {E1000_RDH(0), 4, 0x100, "E1000_RDH"}, + {E1000_RDT(0), 4, 0x100, "E1000_RDT"}, + {E1000_RXCTL(0), 4, 0x100, "E1000_RXCTL"}, + {E1000_SRRCTL(0), 4, 0x100, "E1000_SRRCTL"}, + {E1000_DCA_RXCTRL(0), 4, 0x100, "E1000_DCA_RXCTRL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_rx[] = { + {E1000_RCTL, 1, 1, "E1000_RCTL"}, + {E1000_RXCSUM, 1, 1, "E1000_RXCSUM"}, + {E1000_RLPML, 1, 1, "E1000_RLPML"}, + {E1000_RFCTL, 1, 1, "E1000_RFCTL"}, + {E1000_MRQC, 1, 1, "E1000_MRQC"}, + {E1000_VT_CTL, 1, 1, "E1000_VT_CTL"}, + {E1000_RAL(0), 16, 8, "E1000_RAL"}, + {E1000_RAH(0), 16, 8, "E1000_RAH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_tx[] = { + {E1000_TCTL, 1, 1, "E1000_TCTL"}, + {E1000_TCTL_EXT, 1, 1, "E1000_TCTL_EXT"}, + {E1000_TIPG, 1, 1, "E1000_TIPG"}, + {E1000_DTXCTL, 1, 1, "E1000_DTXCTL"}, + {E1000_TDBAL(0), 4, 0x100, "E1000_TDBAL"}, + {E1000_TDBAH(0), 4, 0x100, "E1000_TDBAH"}, + {E1000_TDLEN(0), 4, 0x100, "E1000_TDLEN"}, + {E1000_TDH(0), 4, 0x100, "E1000_TDLEN"}, + {E1000_TDT(0), 4, 0x100, "E1000_TDT"}, + {E1000_TXDCTL(0), 4, 0x100, "E1000_TXDCTL"}, + {E1000_TDWBAL(0), 4, 0x100, "E1000_TDWBAL"}, + {E1000_TDWBAH(0), 4, 0x100, "E1000_TDWBAH"}, + {E1000_DCA_TXCTRL(0), 4, 0x100, "E1000_DCA_TXCTRL"}, + {E1000_TDFH, 1, 1, "E1000_TDFH"}, + {E1000_TDFT, 1, 1, "E1000_TDFT"}, + {E1000_TDFHS, 1, 1, "E1000_TDFHS"}, + {E1000_TDFPC, 1, 1, "E1000_TDFPC"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_wakeup[] = { + {E1000_WUC, 1, 1, "E1000_WUC"}, + {E1000_WUFC, 1, 1, "E1000_WUFC"}, + {E1000_WUS, 1, 1, "E1000_WUS"}, + {E1000_IPAV, 1, 1, "E1000_IPAV"}, + {E1000_WUPL, 1, 1, "E1000_WUPL"}, + {E1000_IP4AT_REG(0), 4, 8, "E1000_IP4AT_REG"}, + {E1000_IP6AT_REG(0), 4, 4, "E1000_IP6AT_REG"}, + {E1000_WUPM_REG(0), 4, 4, "E1000_WUPM_REG"}, + {E1000_FFMT_REG(0), 4, 8, "E1000_FFMT_REG"}, + {E1000_FFVT_REG(0), 4, 8, "E1000_FFVT_REG"}, + {E1000_FFLT_REG(0), 4, 8, "E1000_FFLT_REG"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_mac[] = { + {E1000_PCS_CFG0, 1, 1, "E1000_PCS_CFG0"}, + {E1000_PCS_LCTL, 1, 1, "E1000_PCS_LCTL"}, + {E1000_PCS_LSTAT, 1, 1, "E1000_PCS_LSTAT"}, + {E1000_PCS_ANADV, 1, 1, "E1000_PCS_ANADV"}, + {E1000_PCS_LPAB, 1, 1, "E1000_PCS_LPAB"}, + {E1000_PCS_NPTX, 1, 1, "E1000_PCS_NPTX"}, + {E1000_PCS_LPABNP, 1, 1, "E1000_PCS_LPABNP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info *igb_regs[] = { + igb_regs_general, + igb_regs_nvm, + igb_regs_interrupt, + igb_regs_fctl, + igb_regs_rxdma, + igb_regs_rx, + igb_regs_tx, + igb_regs_wakeup, + igb_regs_mac, + NULL}; + +/* FIXME: reading igb_regs_interrupt results side-effect which doesn't + * work with VFIO; re-install igb_regs_interrupt once issue is resolved. + */ +static const struct reg_info *igbvf_regs[] = { + igb_regs_general, + igb_regs_rxdma, + igb_regs_tx, + NULL}; + +static inline int +igb_read_regs(struct e1000_hw *hw, const struct reg_info *reg, + uint32_t *reg_buf) +{ + unsigned int i; + + for (i = 0; i < reg->count; i++) { + reg_buf[i] = E1000_READ_REG(hw, + reg->base_addr + i * reg->stride); + } + return reg->count; +}; + +static inline int +igb_reg_group_count(const struct reg_info *regs) +{ + int count = 0; + int i = 0; + + while (regs[i].count) + count += regs[i++].count; + return count; +}; + +static inline int +igb_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf, + const struct reg_info *regs) +{ + int count = 0; + int i = 0; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + while (regs[i].count) + count += igb_read_regs(hw, ®s[i++], ®_buf[count]); + return count; +}; + +#endif /* _IGB_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c b/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c new file mode 100644 index 000000000..684fa4ad8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/igb_rxtx.c @@ -0,0 +1,2965 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" + +#ifdef RTE_LIBRTE_IEEE1588 +#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define IGB_TX_IEEE1588_TMST 0 +#endif +/* Bit Mask to indicate what bits required for building TX context */ +#define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + IGB_TX_IEEE1588_TMST) + +#define IGB_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct igb_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct igb_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * rx queue flags + */ +enum igb_rxq_flags { + IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01, +}; + +/** + * Structure associated with each RX queue. + */ +struct igb_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t reg_idx; /**< RX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ +}; + +/** + * Hardware context number + */ +enum igb_advctx_num { + IGB_CTX_0 = 0, /**< CTX0 */ + IGB_CTX_1 = 1, /**< CTX1 */ + IGB_CTX_NUM = 2, /**< CTX_NUM */ +}; + +/** Offload features */ +union igb_tx_offload { + uint64_t data; + struct { + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size. */ + + /* uint64_t unused:8; */ + }; +}; + +/* + * Compare mask for igb_tx_offload.data, + * should be in sync with igb_tx_offload layout. + * */ +#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */ +#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */ +#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */ +#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */ +/** Mac + IP + TCP + Mss mask. */ +#define TX_TSO_CMP_MASK \ + (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK) + +/** + * Strucutre to check if new context need be built + */ +struct igb_advctx_info { + uint64_t flags; /**< ol_flags related to context build. */ + /** tx offload: vlan, tso, l2-l3-l4 lengths. */ + union igb_tx_offload tx_offload; + /** compare mask for tx offload. */ + union igb_tx_offload tx_offload_mask; +}; + +/** + * Structure associated with each TX queue. + */ +struct igb_tx_queue { + volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint32_t txd_type; /**< Device-specific TXD type */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_head; + /**< Index of first used TX descriptor. */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; /**< TX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint32_t ctx_curr; + /**< Current used hardware descriptor. */ + uint32_t ctx_start; + /**< Start context position for transmit queue. */ + struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; + /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_igb_prefetch(p) rte_prefetch0(p) +#else +#define rte_igb_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +/* + * Macro for VMDq feature for 1 GbE NIC. + */ +#define E1000_VMOLR_SIZE (8) +#define IGB_TSO_MAX_HDRLEN (512) +#define IGB_TSO_MAX_MSS (9216) + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + *There're some limitations in hardware for TCP segmentation offload. We + *should check whether the parameters are valid. + */ +static inline uint64_t +check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para) +{ + if (!(ol_req & PKT_TX_TCP_SEG)) + return ol_req; + if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len + + ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) { + ol_req &= ~PKT_TX_TCP_SEG; + ol_req |= PKT_TX_TCP_CKSUM; + } + return ol_req; +} + +/* + * Advanced context descriptor are almost same between igb/ixgbe + * This is a separate function, looking for optimization opportunity here + * Rework required to go with the pre-defined values. + */ + +static inline void +igbe_set_xmit_ctx(struct igb_tx_queue* txq, + volatile struct e1000_adv_tx_context_desc *ctx_txd, + uint64_t ol_flags, union igb_tx_offload tx_offload) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_idx, ctx_curr; + uint32_t vlan_macip_lens; + union igb_tx_offload tx_offload_mask; + + ctx_curr = txq->ctx_curr; + ctx_idx = ctx_curr + txq->ctx_start; + + tx_offload_mask.data = 0; + type_tucmd_mlhl = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT); + + if (ol_flags & PKT_TX_VLAN_PKT) + tx_offload_mask.data |= TX_VLAN_CMP_MASK; + + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 | + E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 | + E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + + tx_offload_mask.data |= TX_TSO_CMP_MASK; + mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK; + + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << E1000_ADVTXD_L4LEN_SHIFT; + break; + default: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + break; + } + } + + txq->ctx_cache[ctx_curr].flags = ol_flags; + txq->ctx_cache[ctx_curr].tx_offload.data = + tx_offload_mask.data & tx_offload.data; + txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = (uint32_t)tx_offload.data; + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = 0; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct igb_tx_queue *txq, uint64_t flags, + union igb_tx_offload tx_offload) +{ + /* If match with the current context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + return txq->ctx_curr; + } + + /* If match with the second context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + return txq->ctx_curr; + } + + /* Mismatch, use the previous context */ + return IGB_CTX_NUM; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM}; + static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0]; + return tmp; +} + +static inline uint32_t +tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype; + static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE}; + static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE}; + cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; + cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0]; + return cmdtype; +} + +uint16_t +eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct igb_tx_queue *txq; + struct igb_tx_entry *sw_ring; + struct igb_tx_entry *txe, *txn; + volatile union e1000_adv_tx_desc *txr; + volatile union e1000_adv_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_end; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint64_t tx_ol_req; + uint32_t new_ctx = 0; + uint32_t ctx = 0; + union igb_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the VLAN Tag Identifier, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1); + + ol_flags = tx_pkt->ol_flags; + tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK; + + /* If a Context Descriptor need be built . */ + if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_ol_req = check_tso_para(tx_ol_req, tx_offload); + + ctx = what_advctx_update(txq, tx_ol_req, tx_offload); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IGB_CTX_NUM); + ctx = txq->ctx_curr + txq->ctx_start; + tx_last = (uint16_t) (tx_last + new_ctx); + } + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Check if there are enough free descriptors in the TX ring + * to transmit the next packet. + * This operation is based on the two following rules: + * + * 1- Only check that the last needed TX descriptor can be + * allocated (by construction, if that descriptor is free, + * all intermediate ones are also free). + * + * For this purpose, the index of the last TX descriptor + * used for a packet (the "last descriptor" of a packet) + * is recorded in the TX entries (the last one included) + * that are associated with all TX descriptors allocated + * for that packet. + * + * 2- Avoid to allocate the last free TX descriptor of the + * ring, in order to never set the TDT register with the + * same value stored in parallel by the NIC in the TDH + * register, which makes the TX engine of the NIC enter + * in a deadlock situation. + * + * By extension, avoid to allocate a free descriptor that + * belongs to the last set of free descriptors allocated + * to the same packet previously transmitted. + */ + + /* + * The "last descriptor" of the previously sent packet, if any, + * which used the last descriptor to allocate. + */ + tx_end = sw_ring[tx_last].last_id; + + /* + * The next descriptor following that "last descriptor" in the + * ring. + */ + tx_end = sw_ring[tx_end].next_id; + + /* + * The "last descriptor" associated with that next descriptor. + */ + tx_end = sw_ring[tx_end].last_id; + + /* + * Check that this descriptor is free. + */ + if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_ADVTXD_DTYP_DATA + * - E1000_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_ADVTXD_DCMD_IFCS + * - E1000_ADVTXD_MAC_1588 + * - E1000_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = txq->txd_type | + E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; + if (tx_ol_req & PKT_TX_TCP_SEG) + pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len); + olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT); +#if defined(RTE_LIBRTE_IEEE1588) + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; +#endif + if (tx_ol_req) { + /* Setup TX Advanced context descriptor if required */ + if (new_ctx) { + volatile struct e1000_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + e1000_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* Setup the TX Advanced Data Descriptor */ + cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req); + olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT); + } + + m_seg = tx_pkt; + do { + txn = &sw_ring[txe->next_id]; + txd = &txr[tx_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up transmit descriptor. + */ + slen = (uint16_t) m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + * and Report Status (RS). + */ + txd->read.cmd_type_len |= + rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); + } + end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT). + */ + E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + /* Check some limitations for TSO in hardware */ + if (m->ol_flags & PKT_TX_TCP_SEG) + if ((m->tso_segsz > IGB_TSO_MAX_MSS) || + (m->l2_len + m->l3_len + m->l4_len > + IGB_TSO_MAX_HDRLEN)) { + rte_errno = EINVAL; + return i; + } + + if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ +#define IGB_PACKET_TYPE_IPV4 0X01 +#define IGB_PACKET_TYPE_IPV4_TCP 0X11 +#define IGB_PACKET_TYPE_IPV4_UDP 0X21 +#define IGB_PACKET_TYPE_IPV4_SCTP 0X41 +#define IGB_PACKET_TYPE_IPV4_EXT 0X03 +#define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IGB_PACKET_TYPE_IPV6 0X04 +#define IGB_PACKET_TYPE_IPV6_TCP 0X14 +#define IGB_PACKET_TYPE_IPV6_UDP 0X24 +#define IGB_PACKET_TYPE_IPV6_EXT 0X0C +#define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IGB_PACKET_TYPE_IPV4_IPV6 0X05 +#define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IGB_PACKET_TYPE_MAX 0X80 +#define IGB_PACKET_TYPE_MASK 0X7F +#define IGB_PACKET_TYPE_SHIFT 0X04 +static inline uint32_t +igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) +{ + static const uint32_t + ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + }; + if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK; + + return ptype_table[pkt_info]; +} + +static inline uint64_t +rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs) +{ + uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH; + +#if defined(RTE_LIBRTE_IEEE1588) + static uint32_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + struct rte_eth_dev dev = rte_eth_devices[rxq->port_id]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private); + + /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */ + if (hw->mac.type == e1000_i210) + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07]; + else + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07]; +#else + RTE_SET_USED(rxq); +#endif + + return pkt_flags; +} + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0); + +#if defined(RTE_LIBRTE_IEEE1588) + if (rx_status & E1000_RXD_STAT_TMST) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + /* + * Bit 30: IPE, IPv4 checksum error + * Bit 29: L4I, L4I integrity error + */ + + static uint64_t error_to_pkt_flags_map[4] = { + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + return error_to_pkt_flags_map[(rx_status >> + E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK]; +} + +uint16_t +eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct igb_rx_queue *rxq; + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + + /* + * The vlan_tci field is only valid when PKT_RX_VLAN is + * set in the pkt_flags field and must be in CPU byte order. + */ + if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) && + (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) { + rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan); + } else { + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + } + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + rxm->ol_flags = pkt_flags; + rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. + lo_dword.hs_rss.pkt_info); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct igb_rx_queue *rxq; + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.pkt_addr = dma; + rxdp->read.hdr_addr = 0; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (staterr & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - + (RTE_ETHER_CRC_LEN - data_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + first_seg->port = rxq->port_id; + first_seg->hash.rss = rxd.wb.lower.hi_dword.rss; + + /* + * The vlan_tci field is only valid when PKT_RX_VLAN is + * set in the pkt_flags field and must be in CPU byte order. + */ + if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) && + (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) { + first_seg->vlan_tci = + rte_be_to_cpu_16(rxd.wb.upper.vlan); + } else { + first_seg->vlan_tci = + rte_le_to_cpu_16(rxd.wb.upper.vlan); + } + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + first_seg->ol_flags = pkt_flags; + first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. + lower.lo_dword.hs_rss.pkt_info); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ + +static void +igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_tx_queue_release(struct igb_tx_queue *txq) +{ + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +eth_igb_tx_queue_release(void *txq) +{ + igb_tx_queue_release(txq); +} + +static int +igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt) +{ + struct igb_tx_entry *sw_ring; + volatile union e1000_adv_tx_desc *txr; + uint16_t tx_first; /* First segment analyzed. */ + uint16_t tx_id; /* Current segment being processed. */ + uint16_t tx_last; /* Last segment in the current packet. */ + uint16_t tx_next; /* First segment of the next packet. */ + int count; + + if (txq != NULL) { + count = 0; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + + /* + * tx_tail is the last sent packet on the sw_ring. Goto the end + * of that packet (the last segment in the packet chain) and + * then the next segment will be the start of the oldest segment + * in the sw_ring. This is the first packet that will be + * attempted to be freed. + */ + + /* Get last segment in most recently added packet. */ + tx_first = sw_ring[txq->tx_tail].last_id; + + /* Get the next segment, which is the oldest segment in ring. */ + tx_first = sw_ring[tx_first].next_id; + + /* Set the current index to the first. */ + tx_id = tx_first; + + /* + * Loop through each packet. For each packet, verify that an + * mbuf exists and that the last segment is free. If so, free + * it and move on. + */ + while (1) { + tx_last = sw_ring[tx_id].last_id; + + if (sw_ring[tx_last].mbuf) { + if (txr[tx_last].wb.status & + E1000_TXD_STAT_DD) { + /* + * Increment the number of packets + * freed. + */ + count++; + + /* Get the start of the next packet. */ + tx_next = sw_ring[tx_last].next_id; + + /* + * Loop through all segments in a + * packet. + */ + do { + rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf); + sw_ring[tx_id].mbuf = NULL; + sw_ring[tx_id].last_id = tx_id; + + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; + + } while (tx_id != tx_next); + + if (unlikely(count == (int)free_cnt)) + break; + } else + /* + * mbuf still in use, nothing left to + * free. + */ + break; + } else { + /* + * There are multiple reasons to be here: + * 1) All the packets on the ring have been + * freed - tx_id is equal to tx_first + * and some packets have been freed. + * - Done, exit + * 2) Interfaces has not sent a rings worth of + * packets yet, so the segment after tail is + * still empty. Or a previous call to this + * function freed some of the segments but + * not all so there is a hole in the list. + * Hopefully this is a rare case. + * - Walk the list and find the next mbuf. If + * there isn't one, then done. + */ + if (likely((tx_id == tx_first) && (count != 0))) + break; + + /* + * Walk the list and find the next mbuf, if any. + */ + do { + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; + + if (sw_ring[tx_id].mbuf) + break; + + } while (tx_id != tx_first); + + /* + * Determine why previous loop bailed. If there + * is not an mbuf, done. + */ + if (sw_ring[tx_id].mbuf == NULL) + break; + } + } + } else + count = -ENODEV; + + return count; +} + +int +eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + return igb_tx_done_cleanup(txq, free_cnt); +} + +static void +igb_reset_tx_queue_stat(struct igb_tx_queue *txq) +{ + txq->tx_head = 0; + txq->tx_tail = 0; + txq->ctx_curr = 0; + memset((void*)&txq->ctx_cache, 0, + IGB_CTX_NUM * sizeof(struct igb_advctx_info)); +} + +static void +igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) +{ + static const union e1000_adv_tx_desc zeroed_desc = {{0}}; + struct igb_tx_entry *txe = txq->sw_ring; + uint16_t i, prev; + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; + } + + /* Initialize ring entries */ + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]); + + txd->wb.status = E1000_TXD_STAT_DD; + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->txd_type = E1000_ADVTXD_DTYP_DATA; + /* 82575 specific, each tx queue will use 2 hw contexts */ + if (hw->mac.type == e1000_82575) + txq->ctx_start = txq->queue_id * IGB_CTX_NUM; + + igb_reset_tx_queue_stat(txq); +} + +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return tx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + +int +eth_igb_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct igb_tx_queue *txq; + struct e1000_hw *hw; + uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % IGB_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The tx_free_thresh and tx_rs_thresh values are not used in the 1G + * driver. + */ + if (tx_conf->tx_free_thresh != 0) + PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_rs_thresh != 0) + PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576) + PMD_INIT_LOG(INFO, "To improve 1G driver performance, " + "consider setting the TX WTHRESH value to 4, 8, " + "or 16."); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->tx_queues[queue_idx] != NULL) { + igb_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, + E1000_ALIGN, socket_id); + if (tz == NULL) { + igb_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + if (txq->wthresh > 0 && hw->mac.type == e1000_82576) + txq->wthresh = 1; + txq->queue_id = queue_idx; + txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); + txq->tx_ring_phys_addr = tz->iova; + + txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(struct igb_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + igb_tx_queue_release(txq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + igb_reset_tx_queue(txq, dev); + dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->tx_pkt_prepare = ð_igb_prep_pkts; + dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; + + return 0; +} + +static void +igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_rx_queue_release(struct igb_rx_queue *rxq) +{ + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +void +eth_igb_rx_queue_release(void *rxq) +{ + igb_rx_queue_release(rxq); +} + +static void +igb_reset_rx_queue(struct igb_rx_queue *rxq) +{ + static const union e1000_adv_rx_desc zeroed_desc = {{0}}; + unsigned i; + + /* Zero out HW ring memory */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + rxq->rx_ring[i] = zeroed_desc; + } + + rxq->rx_tail = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_RSS_HASH; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + +int +eth_igb_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct igb_rx_queue *rxq; + struct e1000_hw *hw; + unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % IGB_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed */ + if (dev->data->rx_queues[queue_idx] != NULL) { + igb_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the RX queue data structure. */ + rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), + RTE_CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + rxq->offloads = offloads; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + if (rxq->wthresh > 0 && + (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350)) + rxq->wthresh = 1; + rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + rxq->port_id = dev->data->port_id; + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, + E1000_ALIGN, socket_id); + if (rz == NULL) { + igb_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx)); + rxq->rx_ring_phys_addr = rz->iova; + rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr; + + /* Allocate software ring. */ + rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof(struct igb_rx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (rxq->sw_ring == NULL) { + igb_rx_queue_release(rxq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + igb_reset_rx_queue(rxq); + + return 0; +} + +uint32_t +eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define IGB_RXQ_SCAN_INTERVAL 4 + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) { + desc += IGB_RXQ_SCAN_INTERVAL; + rxdp += IGB_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD); +} + +int +eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct igb_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.upper.status_error; + if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct igb_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + + status = &txq->tx_ring[desc].wb.status; + if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +void +igb_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct igb_tx_queue *txq; + struct igb_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); + } + } +} + +void +igb_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_igb_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_igb_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/** + * Receive Side Scaling (RSS). + * See section 7.1.1.7 in the following document: + * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source and + * destination ports of TCP/UDP headers, if any, of received packets are hashed + * against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +igb_rss_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + uint32_t mrqc; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc &= ~E1000_MRQC_ENABLE_MASK; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +static void +igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf) +{ + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint64_t rss_hf; + uint16_t i; + + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key); + } + } + + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +int +eth_igb_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct e1000_hw *hw; + uint32_t mrqc; + uint64_t rss_hf; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Before changing anything, first check that the update RSS operation + * does not attempt to disable RSS, if RSS was enabled at + * initialization time, or does not attempt to enable RSS, if RSS was + * disabled at initialization time. + */ + rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL; + mrqc = E1000_READ_REG(hw, E1000_MRQC); + if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -(EINVAL); + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -(EINVAL); + igb_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct e1000_hw *hw; + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint64_t rss_hf; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Return RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + /* Get RSS functions configured in MRQC register */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */ + rss_conf->rss_hf = 0; + return 0; + } + rss_hf = 0; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_conf->rss_hf = rss_hf; + return 0; +} + +static void +igb_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct e1000_hw *hw; + uint32_t shift; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Fill in redirection table. */ + shift = (hw->mac.type == e1000_82575) ? 6 : 0; + for (i = 0; i < 128; i++) { + union e1000_reta { + uint32_t dword; + uint8_t bytes[4]; + } reta; + uint8_t q_idx; + + q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ? + i % dev->data->nb_rx_queues : 0); + reta.bytes[i & 3] = (uint8_t) (q_idx << shift); + if ((i & 3) == 3) + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) { + igb_rss_disable(dev); + return; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + igb_hw_rss_hash_set(hw, &rss_conf); +} + +/* + * Check if the mac type support VMDq or not. + * Return 1 if it supports, otherwise, return 0. + */ +static int +igb_is_vmdq_supported(const struct rte_eth_dev *dev) +{ + const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + return 1; + case e1000_82540: + case e1000_82541: + case e1000_82542: + case e1000_82543: + case e1000_82544: + case e1000_82545: + case e1000_82546: + case e1000_82547: + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + case e1000_i210: + case e1000_i211: + default: + PMD_INIT_LOG(ERR, "Cannot support VMDq feature"); + return 0; + } +} + +static int +igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct e1000_hw *hw; + uint32_t mrqc, vt_ctl, vmolr, rctl; + int i; + + PMD_INIT_FUNC_TRACE(); + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + /* Check if mac type can support VMDq, return value of 0 means NOT support */ + if (igb_is_vmdq_supported(dev) == 0) + return -1; + + igb_rss_disable(dev); + + /* RCTL: eanble VLAN filter */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* MRQC: enable vmdq */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc |= E1000_MRQC_ENABLE_VMDQ; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* VTCTL: pool selection according to VLAN tag */ + vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT); + vt_ctl |= E1000_VT_CTL_IGNORE_MAC; + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE | + E1000_VMOLR_ROPE | E1000_VMOLR_BAM | + E1000_VMOLR_MPME); + + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG) + vmolr |= E1000_VMOLR_AUPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC) + vmolr |= E1000_VMOLR_ROMPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC) + vmolr |= E1000_VMOLR_ROPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST) + vmolr |= E1000_VMOLR_BAM; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST) + vmolr |= E1000_VMOLR_MPME; + + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + + /* + * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1 + * Both 82576 and 82580 support it + */ + if (hw->mac.type != e1000_i350) { + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr |= E1000_VMOLR_STRVLAN; + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX); + + /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */ + if (hw->mac.type != e1000_82580) + E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK); + + /* + * RAH/RAL - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX)); + E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX); + + /* VLVF: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \ + (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \ + ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \ + E1000_VLVF_POOLSEL_MASK))); + } + + E1000_WRITE_FLUSH(hw); + + return 0; +} + + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ + +static int +igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) +{ + struct igb_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + + /* Initialize software ring entries. */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union e1000_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu", rxq->queue_id); + return -ENOMEM; + } + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +#define E1000_MRQC_DEF_Q_SHIFT (3) +static int +igb_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mrqc; + + if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) { + /* + * SRIOV active scheme + * FIXME if support RSS together with VMDq & SRIOV + */ + mrqc = E1000_MRQC_ENABLE_VMDQ; + /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */ + mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + igb_rss_configure(dev); + break; + case ETH_MQ_RX_VMDQ_ONLY: + /*Configure general VMDQ only RX parameters*/ + igb_vmdq_rx_hw_configure(dev); + break; + case ETH_MQ_RX_NONE: + /* if mq_mode is none, disable rss mode.*/ + default: + igb_rss_disable(dev); + break; + } + } + + return 0; +} + +int +eth_igb_rx_init(struct rte_eth_dev *dev) +{ + struct rte_eth_rxmode *rxmode; + struct e1000_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rctl; + uint32_t rxcsum; + uint32_t srrctl; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + srrctl = 0; + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + rxmode = &dev->data->dev_conf.rxmode; + + /* + * Configure support of jumbo frames, if any. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + rctl |= E1000_RCTL_LPE; + + /* + * Set maximum packet length by default, and might be updated + * together with enabling/disabling dual VLAN. + */ + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE); + } else + rctl &= ~E1000_RCTL_LPE; + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + dev->rx_pkt_burst = eth_igb_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + rxq->flags = 0; + /* + * i350 and i354 vlan packets have vlan tags byte swapped. + */ + if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) { + rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN; + PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required"); + } else { + PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required"); + } + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igb_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), + rxq->nb_rx_desc * + sizeof(union e1000_adv_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr); + + srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure RX buffer size. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 127 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) & + E1000_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t) ((srrctl & + E1000_SRRCTL_BSIZEPKT_MASK) << + E1000_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) + rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= E1000_SRRCTL_DROP_EN; + + E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl); + + /* Enable this RX queue. */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= (rxq->pthresh & 0x1F); + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup BSIZE field of RCTL register, if needed. + * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL + * register, since the code above configures the SRRCTL register of + * the RX queue in such a case. + * All configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + if (rctl_bsize > 0) { + if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */ + rctl |= E1000_RCTL_SZ_512; + else /* 256 <= buf_size < 512 - use 256 */ + rctl |= E1000_RCTL_SZ_256; + } + + /* + * Configure RSS if device configured with multiple RX queues. + */ + igb_dev_mq_rx_configure(dev); + + /* Update the rctl since igb_dev_mq_rx_configure may change its value */ + rctl |= E1000_READ_REG(hw, E1000_RCTL); + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + /* Enable both L3/L4 rx checksum offload */ + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; + else + rxcsum &= ~E1000_RXCSUM_CRCOFL; + + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + + /* clear STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(rxq->reg_idx)); + dvmolr &= ~E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); + } + } + } else { + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + + /* set STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(rxq->reg_idx)); + dvmolr |= E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); + } + } + } + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0); + E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + } + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_igb_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + + E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx), + txq->nb_tx_desc * + sizeof(union e1000_adv_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0); + E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx)); + txdctl |= txq->pthresh & 0x1F; + txdctl |= ((txq->hthresh & 0x1F) << 8); + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + e1000_config_collision_dist(hw); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + +/********************************************************************* + * + * Enable VF receive unit. + * + **********************************************************************/ +int +eth_igbvf_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_rx_queue *rxq; + uint32_t srrctl; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* setup MTU */ + e1000_rlpml_set_vf(hw, + (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE)); + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + dev->rx_pkt_burst = eth_igb_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + rxq->flags = 0; + /* + * i350VF LB vlan packets have vlan tags byte swapped. + */ + if (hw->mac.type == e1000_vfadapt_i350) { + rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN; + PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required"); + } else { + PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required"); + } + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igb_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(union e1000_adv_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure RX buffer size. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 127 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) & + E1000_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t) ((srrctl & + E1000_SRRCTL_BSIZEPKT_MASK) << + E1000_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) + rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= E1000_SRRCTL_DROP_EN; + + E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); + + /* Enable this RX queue. */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= (rxq->pthresh & 0x1F); + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + if (hw->mac.type == e1000_vfadapt) { + /* + * Workaround of 82576 VF Erratum + * force set WTHRESH to 1 + * to avoid Write-Back not triggered sometimes + */ + rxdctl |= 0x10000; + PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !"); + } + else + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + } + + return 0; +} + +/********************************************************************* + * + * Enable VF transmit unit. + * + **********************************************************************/ +void +eth_igbvf_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(union e1000_adv_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + txdctl |= txq->pthresh & 0x1F; + txdctl |= ((txq->hthresh & 0x1F) << 8); + if (hw->mac.type == e1000_82576) { + /* + * Workaround of 82576 VF Erratum + * force set WTHRESH to 1 + * to avoid Write-Back not triggered sometimes + */ + txdctl |= 0x10000; + PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !"); + } + else + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + +} + +void +igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct igb_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; +} + +void +igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct igb_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (in->key_len > RTE_DIM(out->key) || + ((hw->mac.type == e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (in->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +int +igb_config_rss_filter(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *conf, bool add) +{ + uint32_t shift; + uint16_t i, j; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!add) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { + igb_rss_disable(dev); + memset(&filter_info->rss_info, 0, + sizeof(struct igb_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (filter_info->rss_info.conf.queue_num) + return -EINVAL; + + /* Fill in redirection table. */ + shift = (hw->mac.type == e1000_82575) ? 6 : 0; + for (i = 0, j = 0; i < 128; i++, j++) { + union e1000_reta { + uint32_t dword; + uint8_t bytes[4]; + } reta; + uint8_t q_idx; + + if (j == conf->conf.queue_num) + j = 0; + q_idx = conf->conf.queue[j]; + reta.bytes[i & 3] = (uint8_t)(q_idx << shift); + if ((i & 3) == 3) + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); + } + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) { + igb_rss_disable(dev); + return 0; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + igb_hw_rss_hash_set(hw, &rss_conf); + + if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf)) + return -EINVAL; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/e1000/meson.build b/src/spdk/dpdk/drivers/net/e1000/meson.build new file mode 100644 index 000000000..cf456995c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/meson.build @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +subdir('base') +objs = [base_objs] + +sources = files( + 'e1000_logs.c', + 'em_ethdev.c', + 'em_rxtx.c', + 'igb_ethdev.c', + 'igb_flow.c', + 'igb_pf.c', + 'igb_rxtx.c' +) + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map b/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/e1000/rte_pmd_e1000_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ena/Makefile b/src/spdk/dpdk/drivers/net/ena/Makefile new file mode 100644 index 000000000..8ccff36db --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ena.a +CFLAGS += $(WERROR_FLAGS) -O2 +INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base + +EXPORT_MAP := rte_pmd_ena_version.map + +VPATH += $(SRCDIR)/base +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c + +CFLAGS += $(INCLUDES) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_timer + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_com.c b/src/spdk/dpdk/drivers/net/ena/base/ena_com.c new file mode 100644 index 000000000..6257c535b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_com.c @@ -0,0 +1,2935 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#include "ena_com.h" + +/*****************************************************************************/ +/*****************************************************************************/ + +/* Timeout in micro-sec */ +#define ADMIN_CMD_TIMEOUT_US (3000000) + +#define ENA_ASYNC_QUEUE_DEPTH 16 +#define ENA_ADMIN_QUEUE_DEPTH 32 + +#define ENA_CTRL_MAJOR 0 +#define ENA_CTRL_MINOR 0 +#define ENA_CTRL_SUB_MINOR 1 + +#define MIN_ENA_CTRL_VER \ + (((ENA_CTRL_MAJOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ + ((ENA_CTRL_MINOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ + (ENA_CTRL_SUB_MINOR)) + +#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) +#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) + +#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF + +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 + +#define ENA_REGS_ADMIN_INTR_MASK 1 + +#define ENA_POLL_MS 5 + +/*****************************************************************************/ +/*****************************************************************************/ +/*****************************************************************************/ + +enum ena_cmd_status { + ENA_CMD_SUBMITTED, + ENA_CMD_COMPLETED, + /* Abort - canceled by the driver */ + ENA_CMD_ABORTED, +}; + +struct ena_comp_ctx { + ena_wait_event_t wait_event; + struct ena_admin_acq_entry *user_cqe; + u32 comp_size; + enum ena_cmd_status status; + /* status from the device */ + u8 comp_status; + u8 cmd_opcode; + bool occupied; +}; + +struct ena_com_stats_ctx { + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; +}; + +static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, + struct ena_common_mem_addr *ena_addr, + dma_addr_t addr) +{ + if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { + ena_trc_err("dma address has more bits that the device supports\n"); + return ENA_COM_INVAL; + } + + ena_addr->mem_addr_low = lower_32_bits(addr); + ena_addr->mem_addr_high = (u16)upper_32_bits(addr); + + return 0; +} + +static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_sq *sq = &queue->sq; + u16 size = ADMIN_SQ_SIZE(queue->q_depth); + + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, + sq->mem_handle); + + if (!sq->entries) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + sq->head = 0; + sq->tail = 0; + sq->phase = 1; + + sq->db_addr = NULL; + + return 0; +} + +static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_cq *cq = &queue->cq; + u16 size = ADMIN_CQ_SIZE(queue->q_depth); + + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, + cq->mem_handle); + + if (!cq->entries) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + cq->head = 0; + cq->phase = 1; + + return 0; +} + +static int ena_com_admin_init_aenq(struct ena_com_dev *dev, + struct ena_aenq_handlers *aenq_handlers) +{ + struct ena_com_aenq *aenq = &dev->aenq; + u32 addr_low, addr_high, aenq_caps; + u16 size; + + dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; + size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); + ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, + aenq->entries, + aenq->dma_addr, + aenq->mem_handle); + + if (!aenq->entries) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + aenq->head = aenq->q_depth; + aenq->phase = 1; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); + + ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); + ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); + + aenq_caps = 0; + aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; + aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); + + if (unlikely(!aenq_handlers)) { + ena_trc_err("aenq handlers pointer is NULL\n"); + return ENA_COM_INVAL; + } + + aenq->aenq_handlers = aenq_handlers; + + return 0; +} + +static void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) +{ + comp_ctx->occupied = false; + ATOMIC32_DEC(&queue->outstanding_cmds); +} + +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, + u16 command_id, bool capture) +{ + if (unlikely(command_id >= queue->q_depth)) { + ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); + return NULL; + } + + if (unlikely(!queue->comp_ctx)) { + ena_trc_err("Completion context is NULL\n"); + return NULL; + } + + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { + ena_trc_err("Completion context is occupied\n"); + return NULL; + } + + if (capture) { + ATOMIC32_INC(&queue->outstanding_cmds); + queue->comp_ctx[command_id].occupied = true; + } + + return &queue->comp_ctx[command_id]; +} + +static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + struct ena_comp_ctx *comp_ctx; + u16 tail_masked, cmd_id; + u16 queue_size_mask; + u16 cnt; + + queue_size_mask = admin_queue->q_depth - 1; + + tail_masked = admin_queue->sq.tail & queue_size_mask; + + /* In case of queue FULL */ + cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds); + if (cnt >= admin_queue->q_depth) { + ena_trc_dbg("admin queue is full.\n"); + admin_queue->stats.out_of_space++; + return ERR_PTR(ENA_COM_NO_SPACE); + } + + cmd_id = admin_queue->curr_cmd_id; + + cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & + ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; + + cmd->aq_common_descriptor.command_id |= cmd_id & + ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); + if (unlikely(!comp_ctx)) + return ERR_PTR(ENA_COM_INVAL); + + comp_ctx->status = ENA_CMD_SUBMITTED; + comp_ctx->comp_size = (u32)comp_size_in_bytes; + comp_ctx->user_cqe = comp; + comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; + + ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); + + memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); + + admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & + queue_size_mask; + + admin_queue->sq.tail++; + admin_queue->stats.submitted_cmd++; + + if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) + admin_queue->sq.phase = !admin_queue->sq.phase; + + ENA_DB_SYNC(&admin_queue->sq.mem_handle); + ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, + admin_queue->sq.db_addr); + + return comp_ctx; +} + +static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +{ + size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); + struct ena_comp_ctx *comp_ctx; + u16 i; + + queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); + if (unlikely(!queue->comp_ctx)) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + for (i = 0; i < queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(queue, i, false); + if (comp_ctx) + ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); + } + + return 0; +} + +static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + unsigned long flags = 0; + struct ena_comp_ctx *comp_ctx; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + if (unlikely(!admin_queue->running_state)) { + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + return ERR_PTR(ENA_COM_NO_DEVICE); + } + comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, + cmd_size_in_bytes, + comp, + comp_size_in_bytes); + if (IS_ERR(comp_ctx)) + admin_queue->running_state = false; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + return comp_ctx; +} + +static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_sq *io_sq) +{ + size_t size; + int dev_node = 0; + + memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; + io_sq->desc_entry_size = + (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_desc) : + sizeof(struct ena_eth_io_rx_desc); + + size = io_sq->desc_entry_size * io_sq->q_depth; + io_sq->bus = ena_dev->bus; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle, + ctx->numa_node, + dev_node); + if (!io_sq->desc_addr.virt_addr) { + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle); + } + + if (!io_sq->desc_addr.virt_addr) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + } + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Allocate bounce buffers */ + io_sq->bounce_buf_ctrl.buffer_size = + ena_dev->llq_info.desc_list_entry_size; + io_sq->bounce_buf_ctrl.buffers_num = + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; + io_sq->bounce_buf_ctrl.next_to_use = 0; + + size = io_sq->bounce_buf_ctrl.buffer_size * + io_sq->bounce_buf_ctrl.buffers_num; + + ENA_MEM_ALLOC_NODE(ena_dev->dmadev, + size, + io_sq->bounce_buf_ctrl.base_buffer, + ctx->numa_node, + dev_node); + if (!io_sq->bounce_buf_ctrl.base_buffer) + io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); + + if (!io_sq->bounce_buf_ctrl.base_buffer) { + ena_trc_err("bounce buffer memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + memcpy(&io_sq->llq_info, &ena_dev->llq_info, + sizeof(io_sq->llq_info)); + + /* Initiate the first bounce buffer */ + io_sq->llq_buf_ctrl.curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, io_sq->llq_info.desc_list_entry_size); + io_sq->llq_buf_ctrl.descs_left_in_line = + io_sq->llq_info.descs_num_before_header; + io_sq->disable_meta_caching = + io_sq->llq_info.disable_meta_caching; + + if (io_sq->llq_info.max_entries_in_tx_burst > 0) + io_sq->entries_in_tx_burst_left = + io_sq->llq_info.max_entries_in_tx_burst; + } + + io_sq->tail = 0; + io_sq->next_to_comp = 0; + io_sq->phase = 1; + + return 0; +} + +static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_cq *io_cq) +{ + size_t size; + int prev_node = 0; + + memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); + + /* Use the basic completion descriptor for Rx */ + io_cq->cdesc_entry_size_in_bytes = + (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_cdesc) : + sizeof(struct ena_eth_io_rx_cdesc_base); + + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + io_cq->bus = ena_dev->bus; + + ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle, + ctx->numa_node, + prev_node); + if (!io_cq->cdesc_addr.virt_addr) { + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle); + } + + if (!io_cq->cdesc_addr.virt_addr) { + ena_trc_err("memory allocation failed\n"); + return ENA_COM_NO_MEM; + } + + io_cq->phase = 1; + io_cq->head = 0; + + return 0; +} + +static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, + struct ena_admin_acq_entry *cqe) +{ + struct ena_comp_ctx *comp_ctx; + u16 cmd_id; + + cmd_id = cqe->acq_common_descriptor.command & + ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); + if (unlikely(!comp_ctx)) { + ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); + admin_queue->running_state = false; + return; + } + + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + + if (comp_ctx->user_cqe) + memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); + + if (!admin_queue->polling) + ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); +} + +static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) +{ + struct ena_admin_acq_entry *cqe = NULL; + u16 comp_num = 0; + u16 head_masked; + u8 phase; + + head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); + phase = admin_queue->cq.phase; + + cqe = &admin_queue->cq.entries[head_masked]; + + /* Go over all the completions */ + while ((READ_ONCE8(cqe->acq_common_descriptor.flags) & + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Do not read the rest of the completion entry before the + * phase bit was validated + */ + dma_rmb(); + ena_com_handle_single_admin_completion(admin_queue, cqe); + + head_masked++; + comp_num++; + if (unlikely(head_masked == admin_queue->q_depth)) { + head_masked = 0; + phase = !phase; + } + + cqe = &admin_queue->cq.entries[head_masked]; + } + + admin_queue->cq.head += comp_num; + admin_queue->cq.phase = phase; + admin_queue->sq.head += comp_num; + admin_queue->stats.completed_cmd += comp_num; +} + +static int ena_com_comp_status_to_errno(u8 comp_status) +{ + if (unlikely(comp_status != 0)) + ena_trc_err("admin command failed[%u]\n", comp_status); + + switch (comp_status) { + case ENA_ADMIN_SUCCESS: + return ENA_COM_OK; + case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: + return ENA_COM_NO_MEM; + case ENA_ADMIN_UNSUPPORTED_OPCODE: + return ENA_COM_UNSUPPORTED; + case ENA_ADMIN_BAD_OPCODE: + case ENA_ADMIN_MALFORMED_REQUEST: + case ENA_ADMIN_ILLEGAL_PARAMETER: + case ENA_ADMIN_UNKNOWN_ERROR: + return ENA_COM_INVAL; + } + + return ENA_COM_INVAL; +} + +static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags = 0; + ena_time_t timeout; + int ret; + + timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); + + while (1) { + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + if (comp_ctx->status != ENA_CMD_SUBMITTED) + break; + + if (ENA_TIME_EXPIRE(timeout)) { + ena_trc_err("Wait for completion (polling) timeout\n"); + /* ENA didn't have any completion */ + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + admin_queue->stats.no_completion++; + admin_queue->running_state = false; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + ret = ENA_COM_TIMER_EXPIRED; + goto err; + } + + ENA_MSLEEP(ENA_POLL_MS); + } + + if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { + ena_trc_err("Command was aborted\n"); + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + admin_queue->stats.aborted_cmd++; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ret = ENA_COM_NO_DEVICE; + goto err; + } + + ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, + "Invalid comp status %d\n", comp_ctx->status); + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +/** + * Set the LLQ configurations of the firmware + * + * The driver provides only the enabled feature values to the device, + * which in turn, checks if they are supported. + */ +static int ena_com_set_llq(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + int ret; + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; + + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + + if (llq_info->disable_meta_caching) + cmd.u.llq.accel_mode.u.set.enabled_flags |= + BIT(ENA_ADMIN_DISABLE_META_CACHING); + + if (llq_info->max_entries_in_tx_burst) + cmd.u.llq.accel_mode.u.set.enabled_flags |= + BIT(ENA_ADMIN_LIMIT_TX_BURST); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to set LLQ configurations: %d\n", ret); + + return ret; +} + +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + u16 supported_feat; + int rc; + + memset(llq_info, 0, sizeof(*llq_info)); + + supported_feat = llq_features->header_location_ctrl_supported; + + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { + llq_info->header_location_ctrl = + llq_default_cfg->llq_header_location; + } else { + ena_trc_err("Invalid header location control, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { + supported_feat = llq_features->descriptors_stride_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; + } else { + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; + } else { + ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, + supported_feat, + llq_info->desc_stride_ctrl); + } + } else { + llq_info->desc_stride_ctrl = 0; + } + + supported_feat = llq_features->entry_size_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; + } else { + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_info->desc_list_entry_size = 128; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; + llq_info->desc_list_entry_size = 192; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_info->desc_list_entry_size = 256; + } else { + ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); + return -EINVAL; + } + + ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, + supported_feat, + llq_info->desc_list_entry_size); + } + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { + /* The desc list entry size should be whole multiply of 8 + * This requirement comes from __iowrite64_copy() + */ + ena_trc_err("illegal entry size %d\n", + llq_info->desc_list_entry_size); + return -EINVAL; + } + + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) + llq_info->descs_per_entry = llq_info->desc_list_entry_size / + sizeof(struct ena_eth_io_tx_desc); + else + llq_info->descs_per_entry = 1; + + supported_feat = llq_features->desc_num_before_header_supported; + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; + } else { + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; + } else { + ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, + llq_info->descs_num_before_header); + } + /* Check for accelerated queue supported */ + llq_info->disable_meta_caching = + llq_features->accel_mode.u.get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING); + + if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) + llq_info->max_entries_in_tx_burst = + llq_features->accel_mode.u.get.max_tx_burst_size / + llq_default_cfg->llq_ring_entry_size_value; + + rc = ena_com_set_llq(ena_dev); + if (rc) + ena_trc_err("Cannot set LLQ configuration: %d\n", rc); + + return rc; +} + +static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags = 0; + int ret; + + ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, + admin_queue->completion_timeout); + + /* In case the command wasn't completed find out the root cause. + * There might be 2 kinds of errors + * 1) No completion (timeout reached) + * 2) There is completion but the device didn't get any msi-x interrupt. + */ + if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + admin_queue->stats.no_completion++; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + if (comp_ctx->status == ENA_CMD_COMPLETED) { + ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", + comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); + /* Check if fallback to polling is enabled */ + if (admin_queue->auto_polling) + admin_queue->polling = true; + } else { + ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); + } + /* Check if shifted to polling mode. + * This will happen if there is a completion without an interrupt + * and autopolling mode is enabled. Continuing normal execution in such case + */ + if (!admin_queue->polling) { + admin_queue->running_state = false; + ret = ENA_COM_TIMER_EXPIRED; + goto err; + } + } + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +/* This method read the hardware device register through posting writes + * and waiting for response + * On timeout the function will return ENA_MMIO_READ_TIMEOUT + */ +static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = + mmio_read->read_resp; + u32 mmio_read_reg, ret, i; + unsigned long flags = 0; + u32 timeout = mmio_read->reg_read_to; + + ENA_MIGHT_SLEEP(); + + if (timeout == 0) + timeout = ENA_REG_READ_TIMEOUT; + + /* If readless is disabled, perform regular read */ + if (!mmio_read->readless_supported) + return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); + + ENA_SPINLOCK_LOCK(mmio_read->lock, flags); + mmio_read->seq_num++; + + read_resp->req_id = mmio_read->seq_num + 0xDEAD; + mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & + ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; + mmio_read_reg |= mmio_read->seq_num & + ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; + + ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, + ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + + for (i = 0; i < timeout; i++) { + if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num) + break; + + ENA_UDELAY(1); + } + + if (unlikely(i == timeout)) { + ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, + offset, + read_resp->req_id, + read_resp->reg_off); + ret = ENA_MMIO_READ_TIMEOUT; + goto err; + } + + if (read_resp->reg_off != offset) { + ena_trc_err("Read failure: wrong offset provided\n"); + ret = ENA_MMIO_READ_TIMEOUT; + } else { + ret = read_resp->reg_val; + } +err: + ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); + + return ret; +} + +/* There are two types to wait for completion. + * Polling mode - wait until the completion is available. + * Async mode - wait on wait queue until the completion is ready + * (or the timeout expired). + * It is expected that the IRQ called ena_com_handle_admin_completion + * to mark the completions. + */ +static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + if (admin_queue->polling) + return ena_com_wait_and_process_admin_cq_polling(comp_ctx, + admin_queue); + + return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, + admin_queue); +} + +static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_sq_cmd destroy_cmd; + struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; + u8 direction; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + destroy_cmd.sq.sq_identity |= (direction << + ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_SQ_SQ_DIRECTION_MASK; + + destroy_cmd.sq.sq_idx = io_sq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) + ena_trc_err("failed to destroy io sq error: %d\n", ret); + + return ret; +} + +static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, + struct ena_com_io_cq *io_cq) +{ + size_t size; + + if (io_cq->cdesc_addr.virt_addr) { + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle); + + io_cq->cdesc_addr.virt_addr = NULL; + } + + if (io_sq->desc_addr.virt_addr) { + size = io_sq->desc_entry_size * io_sq->q_depth; + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle); + + io_sq->desc_addr.virt_addr = NULL; + } + + if (io_sq->bounce_buf_ctrl.base_buffer) { + ENA_MEM_FREE(ena_dev->dmadev, + io_sq->bounce_buf_ctrl.base_buffer, + (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT)); + io_sq->bounce_buf_ctrl.base_buffer = NULL; + } +} + +static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, + u16 exp_state) +{ + u32 val, i; + + /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ + timeout = (timeout * 100) / ENA_POLL_MS; + + for (i = 0; i < timeout; i++) { + val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == + exp_state) + return 0; + + ENA_MSLEEP(ENA_POLL_MS); + } + + return ENA_COM_TIMER_EXPIRED; +} + +static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, + enum ena_admin_aq_feature_id feature_id) +{ + u32 feature_mask = 1 << feature_id; + + /* Device attributes is always supported */ + if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && + !(ena_dev->supported_features & feature_mask)) + return false; + + return true; +} + +static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id, + dma_addr_t control_buf_dma_addr, + u32 control_buff_size, + u8 feature_ver) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_get_feat_cmd get_cmd; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { + ena_trc_dbg("Feature %d isn't supported\n", feature_id); + return ENA_COM_UNSUPPORTED; + } + + memset(&get_cmd, 0x0, sizeof(get_cmd)); + admin_queue = &ena_dev->admin_queue; + + get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; + + if (control_buff_size) + get_cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + else + get_cmd.aq_common_descriptor.flags = 0; + + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd.control_buffer.address, + control_buf_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + get_cmd.control_buffer.length = control_buff_size; + get_cmd.feat_common.feature_version = feature_ver; + get_cmd.feat_common.feature_id = feature_id; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *) + &get_cmd, + sizeof(get_cmd), + (struct ena_admin_acq_entry *) + get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to submit get_feature command %d error: %d\n", + feature_id, ret); + + return ret; +} + +static int ena_com_get_feature(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id, + u8 feature_ver) +{ + return ena_com_get_feature_ex(ena_dev, + get_resp, + feature_id, + 0, + 0, + feature_ver); +} + +static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + (ena_dev->rss).hash_key; + + ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key)); + /* The key is stored in the device in uint32_t array + * as well as the API requires the key to be passed in this + * format. Thus the size of our array should be divided by 4 + */ + hash_key->keys_num = sizeof(hash_key->key) / sizeof(uint32_t); +} + +static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_key), + rss->hash_key, + rss->hash_key_dma_addr, + rss->hash_key_mem_handle); + + if (unlikely(!rss->hash_key)) + return ENA_COM_NO_MEM; + + return 0; +} + +static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_key) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_key), + rss->hash_key, + rss->hash_key_dma_addr, + rss->hash_key_mem_handle); + rss->hash_key = NULL; +} + +static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_ctrl), + rss->hash_ctrl, + rss->hash_ctrl_dma_addr, + rss->hash_ctrl_mem_handle); + + if (unlikely(!rss->hash_ctrl)) + return ENA_COM_NO_MEM; + + return 0; +} + +static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_ctrl) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_ctrl), + rss->hash_ctrl, + rss->hash_ctrl_dma_addr, + rss->hash_ctrl_mem_handle); + rss->hash_ctrl = NULL; +} + +static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, + u16 log_size) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + size_t tbl_size; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); + if (unlikely(ret)) + return ret; + + if ((get_resp.u.ind_table.min_size > log_size) || + (get_resp.u.ind_table.max_size < log_size)) { + ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, + 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); + return ENA_COM_INVAL; + } + + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + if (unlikely(!rss->rss_ind_tbl)) + goto mem_err1; + + tbl_size = (1ULL << log_size) * sizeof(u16); + rss->host_rss_ind_tbl = + ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); + if (unlikely(!rss->host_rss_ind_tbl)) + goto mem_err2; + + rss->tbl_log_size = log_size; + + return 0; + +mem_err2: + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + rss->rss_ind_tbl = NULL; +mem_err1: + rss->tbl_log_size = 0; + return ENA_COM_NO_MEM; +} + +static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + size_t tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + if (rss->rss_ind_tbl) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + rss->rss_ind_tbl = NULL; + + if (rss->host_rss_ind_tbl) + ENA_MEM_FREE(ena_dev->dmadev, + rss->host_rss_ind_tbl, + ((1ULL << rss->tbl_log_size) * sizeof(u16))); + rss->host_rss_ind_tbl = NULL; +} + +static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, u16 cq_idx) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_sq_cmd create_cmd; + struct ena_admin_acq_create_sq_resp_desc cmd_completion; + u8 direction; + int ret; + + memset(&create_cmd, 0x0, sizeof(create_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + create_cmd.sq_identity |= (direction << + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; + + create_cmd.sq_caps_2 |= io_sq->mem_queue_type & + ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; + + create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; + + create_cmd.sq_caps_3 |= + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + + create_cmd.cq_idx = cq_idx; + create_cmd.sq_depth = io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.sq_ba, + io_sq->desc_addr.phys_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + ena_trc_err("Failed to create IO SQ. error: %d\n", ret); + return ret; + } + + io_sq->idx = cmd_completion.sq_idx; + + io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + (uintptr_t)cmd_completion.sq_doorbell_offset); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_headers_offset); + + io_sq->desc_addr.pbuf_dev_addr = + (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_descriptors_offset); + } + + ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + + return ret; +} + +static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_com_io_sq *io_sq; + u16 qid; + int i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + qid = rss->host_rss_ind_tbl[i]; + if (qid >= ENA_TOTAL_NUM_QUEUES) + return ENA_COM_INVAL; + + io_sq = &ena_dev->io_sq_queues[qid]; + + if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) + return ENA_COM_INVAL; + + rss->rss_ind_tbl[i].cq_idx = io_sq->idx; + } + + return 0; +} + +static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, + u16 intr_delay_resolution) +{ + u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; + + if (unlikely(!intr_delay_resolution)) { + ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; + } + + /* update Rx */ + ena_dev->intr_moder_rx_interval = + ena_dev->intr_moder_rx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; + + /* update Tx */ + ena_dev->intr_moder_tx_interval = + ena_dev->intr_moder_tx_interval * + prev_intr_delay_resolution / + intr_delay_resolution; + + ena_dev->intr_delay_resolution = intr_delay_resolution; +} + +/*****************************************************************************/ +/******************************* API ******************************/ +/*****************************************************************************/ + +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *comp, + size_t comp_size) +{ + struct ena_comp_ctx *comp_ctx; + int ret; + + comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, + comp, comp_size); + if (IS_ERR(comp_ctx)) { + if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) + ena_trc_dbg("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + else + ena_trc_err("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + + return PTR_ERR(comp_ctx); + } + + ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); + if (unlikely(ret)) { + if (admin_queue->running_state) + ena_trc_err("Failed to process command. ret = %d\n", + ret); + else + ena_trc_dbg("Failed to process command. ret = %d\n", + ret); + } + return ret; +} + +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_cq_cmd create_cmd; + struct ena_admin_acq_create_cq_resp_desc cmd_completion; + int ret; + + memset(&create_cmd, 0x0, sizeof(create_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; + + create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & + ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + create_cmd.cq_caps_1 |= + ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; + + create_cmd.msix_vector = io_cq->msix_vector; + create_cmd.cq_depth = io_cq->q_depth; + + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.cq_ba, + io_cq->cdesc_addr.phys_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + ena_trc_err("Failed to create IO CQ. error: %d\n", ret); + return ret; + } + + io_cq->idx = cmd_completion.cq_idx; + + io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_interrupt_unmask_register_offset); + + if (cmd_completion.cq_head_db_register_offset) + io_cq->cq_head_db_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_head_db_register_offset); + + if (cmd_completion.numa_node_register_offset) + io_cq->numa_node_cfg_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.numa_node_register_offset); + + ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + + return ret; +} + +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq) +{ + if (qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Invalid queue number %d but the max is %d\n", + qid, ENA_TOTAL_NUM_QUEUES); + return ENA_COM_INVAL; + } + + *io_sq = &ena_dev->io_sq_queues[qid]; + *io_cq = &ena_dev->io_cq_queues[qid]; + + return 0; +} + +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_comp_ctx *comp_ctx; + u16 i; + + if (!admin_queue->comp_ctx) + return; + + for (i = 0; i < admin_queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(admin_queue, i, false); + if (unlikely(!comp_ctx)) + break; + + comp_ctx->status = ENA_CMD_ABORTED; + + ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); + } +} + +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags = 0; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ENA_MSLEEP(ENA_POLL_MS); + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + } + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); +} + +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_cq_cmd destroy_cmd; + struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); + + destroy_cmd.cq_idx = io_cq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) + ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); + + return ret; +} + +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) +{ + return ena_dev->admin_queue.running_state; +} + +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags = 0; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_dev->admin_queue.running_state = state; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); +} + +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) +{ + u16 depth = ena_dev->aenq.q_depth; + + ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); + + /* Init head_db to mark that all entries in the queue + * are initially available + */ + ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); + if (ret) { + ena_trc_info("Can't get aenq configuration\n"); + return ret; + } + + if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { + ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", + get_resp.u.aenq.supported_groups, + groups_flag); + return ENA_COM_UNSUPPORTED; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; + cmd.u.aenq.enabled_groups = groups_flag; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to config AENQ ret: %d\n", ret); + + return ret; +} + +int ena_com_get_dma_width(struct ena_com_dev *ena_dev) +{ + u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + int width; + + if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> + ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; + + ena_trc_dbg("ENA dma width: %d\n", width); + + if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { + ena_trc_err("DMA width illegal value: %d\n", width); + return ENA_COM_INVAL; + } + + ena_dev->dma_addr_bits = width; + + return width; +} + +int ena_com_validate_version(struct ena_com_dev *ena_dev) +{ + u32 ver; + u32 ctrl_ver; + u32 ctrl_ver_masked; + + /* Make sure the ENA version and the controller version are at least + * as the driver expects + */ + ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); + ctrl_ver = ena_com_reg_bar_read32(ena_dev, + ENA_REGS_CONTROLLER_VERSION_OFF); + + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || + (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + ena_trc_info("ena device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + + ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) + >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) + >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + + ctrl_ver_masked = + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); + + /* Validate the ctrl version without the implementation ID */ + if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { + ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + return -1; + } + + return 0; +} + +void ena_com_admin_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_com_admin_cq *cq = &admin_queue->cq; + struct ena_com_admin_sq *sq = &admin_queue->sq; + struct ena_com_aenq *aenq = &ena_dev->aenq; + u16 size; + + ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); + if (admin_queue->comp_ctx) + ENA_MEM_FREE(ena_dev->dmadev, + admin_queue->comp_ctx, + (admin_queue->q_depth * sizeof(struct ena_comp_ctx))); + admin_queue->comp_ctx = NULL; + size = ADMIN_SQ_SIZE(admin_queue->q_depth); + if (sq->entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, + sq->dma_addr, sq->mem_handle); + sq->entries = NULL; + + size = ADMIN_CQ_SIZE(admin_queue->q_depth); + if (cq->entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, + cq->dma_addr, cq->mem_handle); + cq->entries = NULL; + + size = ADMIN_AENQ_SIZE(aenq->q_depth); + if (ena_dev->aenq.entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, + aenq->dma_addr, aenq->mem_handle); + aenq->entries = NULL; + ENA_SPINLOCK_DESTROY(admin_queue->q_lock); +} + +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) +{ + u32 mask_value = 0; + + if (polling) + mask_value = ENA_REGS_ADMIN_INTR_MASK; + + ENA_REG_WRITE32(ena_dev->bus, mask_value, + ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); + ena_dev->admin_queue.polling = polling; +} + +bool ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev) +{ + return ena_dev->admin_queue.polling; +} + +void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, + bool polling) +{ + ena_dev->admin_queue.auto_polling = polling; +} + +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + ENA_SPINLOCK_INIT(mmio_read->lock); + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + mmio_read->read_resp, + mmio_read->read_resp_dma_addr, + mmio_read->read_resp_mem_handle); + if (unlikely(!mmio_read->read_resp)) + goto err; + + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + mmio_read->read_resp->req_id = 0x0; + mmio_read->seq_num = 0x0; + mmio_read->readless_supported = true; + + return 0; + +err: + ENA_SPINLOCK_DESTROY(mmio_read->lock); + return ENA_COM_NO_MEM; +} + +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + mmio_read->readless_supported = readless_supported; +} + +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + mmio_read->read_resp, + mmio_read->read_resp_dma_addr, + mmio_read->read_resp_mem_handle); + + mmio_read->read_resp = NULL; + ENA_SPINLOCK_DESTROY(mmio_read->lock); +} + +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + u32 addr_low, addr_high; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); + + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); +} + +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; + int ret; + + dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { + ena_trc_err("Device isn't ready, abort com init\n"); + return ENA_COM_NO_DEVICE; + } + + admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; + + admin_queue->bus = ena_dev->bus; + admin_queue->q_dmadev = ena_dev->dmadev; + admin_queue->polling = false; + admin_queue->curr_cmd_id = 0; + + ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); + + ENA_SPINLOCK_INIT(admin_queue->q_lock); + + ret = ena_com_init_comp_ctxt(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_sq(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_cq(admin_queue); + if (ret) + goto error; + + admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + ENA_REGS_AQ_DB_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); + + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); + + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); + + aq_caps = 0; + aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; + aq_caps |= (sizeof(struct ena_admin_aq_entry) << + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; + + acq_caps = 0; + acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; + acq_caps |= (sizeof(struct ena_admin_acq_entry) << + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; + + ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); + ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); + ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); + if (ret) + goto error; + + admin_queue->running_state = true; + + return 0; +error: + ena_com_admin_destroy(ena_dev); + + return ret; +} + +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + int ret; + + if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + ctx->qid, ENA_TOTAL_NUM_QUEUES); + return ENA_COM_INVAL; + } + + io_sq = &ena_dev->io_sq_queues[ctx->qid]; + io_cq = &ena_dev->io_cq_queues[ctx->qid]; + + memset(io_sq, 0x0, sizeof(*io_sq)); + memset(io_cq, 0x0, sizeof(*io_cq)); + + /* Init CQ */ + io_cq->q_depth = ctx->queue_size; + io_cq->direction = ctx->direction; + io_cq->qid = ctx->qid; + + io_cq->msix_vector = ctx->msix_vector; + + io_sq->q_depth = ctx->queue_size; + io_sq->direction = ctx->direction; + io_sq->qid = ctx->qid; + + io_sq->mem_queue_type = ctx->mem_queue_type; + + if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + /* header length is limited to 8 bits */ + io_sq->tx_max_header_size = + ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); + + ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); + if (ret) + goto error; + ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_cq(ena_dev, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); + if (ret) + goto destroy_io_cq; + + return 0; + +destroy_io_cq: + ena_com_destroy_io_cq(ena_dev, io_cq); +error: + ena_com_io_queue_free(ena_dev, io_sq, io_cq); + return ret; +} + +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + + if (qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + qid, ENA_TOTAL_NUM_QUEUES); + return; + } + + io_sq = &ena_dev->io_sq_queues[qid]; + io_cq = &ena_dev->io_cq_queues[qid]; + + ena_com_destroy_io_sq(ena_dev, io_sq); + ena_com_destroy_io_cq(ena_dev, io_cq); + + ena_com_io_queue_free(ena_dev, io_sq, io_cq); +} + +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp) +{ + return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); +} + +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_DEVICE_ATTRIBUTES, 0); + if (rc) + return rc; + + memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, + sizeof(get_resp.u.dev_attr)); + ena_dev->supported_features = get_resp.u.dev_attr.supported_features; + + if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_MAX_QUEUES_EXT, + ENA_FEATURE_MAX_QUEUE_EXT_VER); + if (rc) + return rc; + + if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) + return -EINVAL; + + memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, + sizeof(get_resp.u.max_queue_ext)); + ena_dev->tx_max_header_size = + get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; + } else { + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_MAX_QUEUES_NUM, 0); + memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, + sizeof(get_resp.u.max_queue)); + ena_dev->tx_max_header_size = + get_resp.u.max_queue.max_header_size; + + if (rc) + return rc; + } + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_AENQ_CONFIG, 0); + if (rc) + return rc; + + memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, + sizeof(get_resp.u.aenq)); + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); + if (rc) + return rc; + + memcpy(&get_feat_ctx->offload, &get_resp.u.offload, + sizeof(get_resp.u.offload)); + + /* Driver hints isn't mandatory admin command. So in case the + * command isn't supported set driver hints to 0 + */ + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); + + if (!rc) + memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, + sizeof(get_resp.u.hw_hints)); + else if (rc == ENA_COM_UNSUPPORTED) + memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); + else + return rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); + if (!rc) + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, + sizeof(get_resp.u.llq)); + else if (rc == ENA_COM_UNSUPPORTED) + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); + else + return rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); + if (!rc) + memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table, + sizeof(get_resp.u.ind_table)); + else if (rc == ENA_COM_UNSUPPORTED) + memset(&get_feat_ctx->ind_table, 0x0, + sizeof(get_feat_ctx->ind_table)); + else + return rc; + + return 0; +} + +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) +{ + ena_com_handle_admin_completion(&ena_dev->admin_queue); +} + +/* ena_handle_specific_aenq_event: + * return the handler that is relevant to the specific event group + */ +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, + u16 group) +{ + struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; + + if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) + return aenq_handlers->handlers[group]; + + return aenq_handlers->unimplemented_handler; +} + +/* ena_aenq_intr_handler: + * handles the aenq incoming events. + * pop events from the queue and apply the specific handler + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) +{ + struct ena_admin_aenq_entry *aenq_e; + struct ena_admin_aenq_common_desc *aenq_common; + struct ena_com_aenq *aenq = &dev->aenq; + u64 timestamp; + ena_aenq_handler handler_cb; + u16 masked_head, processed = 0; + u8 phase; + + masked_head = aenq->head & (aenq->q_depth - 1); + phase = aenq->phase; + aenq_e = &aenq->entries[masked_head]; /* Get first entry */ + aenq_common = &aenq_e->aenq_common_desc; + + /* Go over all the events */ + while ((READ_ONCE8(aenq_common->flags) & + ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Make sure the phase bit (ownership) is as expected before + * reading the rest of the descriptor. + */ + dma_rmb(); + + timestamp = (u64)aenq_common->timestamp_low | + ((u64)aenq_common->timestamp_high << 32); + ENA_TOUCH(timestamp); /* In case debug is disabled */ + ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n", + aenq_common->group, + aenq_common->syndrom, + timestamp); + + /* Handle specific event*/ + handler_cb = ena_com_get_specific_aenq_cb(dev, + aenq_common->group); + handler_cb(data, aenq_e); /* call the actual event handler*/ + + /* Get next event entry */ + masked_head++; + processed++; + + if (unlikely(masked_head == aenq->q_depth)) { + masked_head = 0; + phase = !phase; + } + aenq_e = &aenq->entries[masked_head]; + aenq_common = &aenq_e->aenq_common_desc; + } + + aenq->head += processed; + aenq->phase = phase; + + /* Don't update aenq doorbell if there weren't any processed events */ + if (!processed) + return; + + /* write the aenq doorbell after all AENQ descriptors were read */ + mb(); + ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head, + dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +#ifndef MMIOWB_NOT_DEFINED + mmiowb(); +#endif +} + +int ena_com_dev_reset(struct ena_com_dev *ena_dev, + enum ena_regs_reset_reason_types reset_reason) +{ + u32 stat, timeout, cap, reset_val; + int rc; + + stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || + (cap == ENA_MMIO_READ_TIMEOUT))) { + ena_trc_err("Reg read32 timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { + ena_trc_err("Device isn't ready, can't reset device\n"); + return ENA_COM_INVAL; + } + + timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> + ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; + if (timeout == 0) { + ena_trc_err("Invalid timeout value\n"); + return ENA_COM_INVAL; + } + + /* start reset */ + reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; + reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & + ENA_REGS_DEV_CTL_RESET_REASON_MASK; + ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + + /* Write again the MMIO read request address */ + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + rc = wait_for_reset_state(ena_dev, timeout, + ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); + if (rc != 0) { + ena_trc_err("Reset indication didn't turn on\n"); + return rc; + } + + /* reset done */ + ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + rc = wait_for_reset_state(ena_dev, timeout, 0); + if (rc != 0) { + ena_trc_err("Reset indication didn't turn off\n"); + return rc; + } + + timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> + ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; + if (timeout) + /* the resolution of timeout reg is 100ms */ + ena_dev->admin_queue.completion_timeout = timeout * 100000; + else + ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; + + return 0; +} + +static int ena_get_dev_stats(struct ena_com_dev *ena_dev, + struct ena_com_stats_ctx *ctx, + enum ena_admin_get_stats_type type) +{ + struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; + struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; + struct ena_com_admin_queue *admin_queue; + int ret; + + admin_queue = &ena_dev->admin_queue; + + get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; + get_cmd->aq_common_descriptor.flags = 0; + get_cmd->type = type; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to get stats. error: %d\n", ret); + + return ret; +} + +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats) +{ + struct ena_com_stats_ctx ctx; + int ret; + + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.basic_stats, + sizeof(ctx.get_resp.basic_stats)); + + return ret; +} + +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { + ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); + return ENA_COM_UNSUPPORTED; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_MTU; + cmd.u.mtu.mtu = mtu; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); + + return ret; +} + +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload) +{ + int ret; + struct ena_admin_get_feat_resp resp; + + ret = ena_com_get_feature(ena_dev, &resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); + if (unlikely(ret)) { + ena_trc_err("Failed to get offload capabilities %d\n", ret); + return ret; + } + + memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); + + return 0; +} + +int ena_com_set_hash_function(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_FUNCTION)) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); + return ENA_COM_UNSUPPORTED; + } + + /* Validate hash function is supported */ + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, 0); + if (unlikely(ret)) + return ret; + + if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { + ena_trc_err("Func hash %d isn't supported by device, abort\n", + rss->hash_func); + return ENA_COM_UNSUPPORTED; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; + cmd.u.flow_hash_func.init_val = rss->hash_init_val; + cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_key_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = sizeof(*rss->hash_key); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) { + ena_trc_err("Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); + return ENA_COM_INVAL; + } + + return 0; +} + +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key; + struct ena_admin_get_feat_resp get_resp; + enum ena_admin_hash_functions old_func; + struct ena_rss *rss = &ena_dev->rss; + int rc; + + hash_key = rss->hash_key; + + /* Make sure size is a mult of DWs */ + if (unlikely(key_len & 0x3)) + return ENA_COM_INVAL; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key), 0); + if (unlikely(rc)) + return rc; + + if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { + ena_trc_err("Flow hash function %d isn't supported\n", func); + return ENA_COM_UNSUPPORTED; + } + + switch (func) { + case ENA_ADMIN_TOEPLITZ: + if (key) { + if (key_len != sizeof(hash_key->key)) { + ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return ENA_COM_INVAL; + } + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len / sizeof(u32); + } + break; + case ENA_ADMIN_CRC32: + rss->hash_init_val = init_val; + break; + default: + ena_trc_err("Invalid hash function (%d)\n", func); + return ENA_COM_INVAL; + } + + old_func = rss->hash_func; + rss->hash_func = func; + rc = ena_com_set_hash_function(ena_dev); + + /* Restore the old function */ + if (unlikely(rc)) + rss->hash_func = old_func; + + return rc; +} + +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key), 0); + if (unlikely(rc)) + return rc; + + /* ENA_FFS returns 1 in case the lsb is set */ + rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func); + if (rss->hash_func) + rss->hash_func--; + + if (func) + *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); + + return 0; +} + +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_INPUT, + rss->hash_ctrl_dma_addr, + sizeof(*rss->hash_ctrl), 0); + if (unlikely(rc)) + return rc; + + if (fields) + *fields = rss->hash_ctrl->selected_fields[proto].fields; + + return 0; +} + +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_INPUT)) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); + return ENA_COM_UNSUPPORTED; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; + cmd.u.flow_hash_input.enabled_input_sort = + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_ctrl_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + cmd.control_buffer.length = sizeof(*hash_ctrl); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) + ena_trc_err("Failed to set hash input. error: %d\n", ret); + + return ret; +} + +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = + rss->hash_ctrl; + u16 available_fields = 0; + int rc, i; + + /* Get the supported hash input */ + rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); + if (unlikely(rc)) + return rc; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = + ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; + + for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { + available_fields = hash_ctrl->selected_fields[i].fields & + hash_ctrl->supported_fields[i].fields; + if (available_fields != hash_ctrl->selected_fields[i].fields) { + ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); + return ENA_COM_UNSUPPORTED; + } + } + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return rc; +} + +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + u16 supported_fields; + int rc; + + if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { + ena_trc_err("Invalid proto num (%u)\n", proto); + return ENA_COM_INVAL; + } + + /* Get the ctrl table */ + rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); + if (unlikely(rc)) + return rc; + + /* Make sure all the fields are supported */ + supported_fields = hash_ctrl->supported_fields[proto].fields; + if ((hash_fields & supported_fields) != hash_fields) { + ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); + } + + hash_ctrl->selected_fields[proto].fields = hash_fields; + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return 0; +} + +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) + return ENA_COM_INVAL; + + if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) + return ENA_COM_INVAL; + + rss->host_rss_ind_tbl[entry_idx] = entry_value; + + return 0; +} + +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + return ENA_COM_UNSUPPORTED; + } + + ret = ena_com_ind_tbl_convert_to_device(ena_dev); + if (ret) { + ena_trc_err("Failed to convert host indirection table to device table\n"); + return ret; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; + cmd.u.ind_table.size = rss->tbl_log_size; + cmd.u.ind_table.inline_index = 0xFFFFFFFF; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->rss_ind_tbl_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to set indirect table. error: %d\n", ret); + + return ret; +} + +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + u32 tbl_size; + int i, rc; + + tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, + rss->rss_ind_tbl_dma_addr, + tbl_size, 0); + if (unlikely(rc)) + return rc; + + if (!ind_tbl) + return 0; + + for (i = 0; i < (1 << rss->tbl_log_size); i++) + ind_tbl[i] = rss->host_rss_ind_tbl[i]; + + return 0; +} + +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) +{ + int rc; + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); + + rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); + if (unlikely(rc)) + goto err_indr_tbl; + + rc = ena_com_hash_key_allocate(ena_dev); + if (unlikely(rc)) + goto err_hash_key; + + ena_com_hash_key_fill_default_key(ena_dev); + + rc = ena_com_hash_ctrl_init(ena_dev); + if (unlikely(rc)) + goto err_hash_ctrl; + + return 0; + +err_hash_ctrl: + ena_com_hash_key_destroy(ena_dev); +err_hash_key: + ena_com_indirect_table_destroy(ena_dev); +err_indr_tbl: + + return rc; +} + +void ena_com_rss_destroy(struct ena_com_dev *ena_dev) +{ + ena_com_indirect_table_destroy(ena_dev); + ena_com_hash_key_destroy(ena_dev); + ena_com_hash_ctrl_destroy(ena_dev); + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); +} + +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + SZ_4K, + host_attr->host_info, + host_attr->host_info_dma_addr, + host_attr->host_info_dma_handle); + if (unlikely(!host_attr->host_info)) + return ENA_COM_NO_MEM; + + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | + (ENA_COMMON_SPEC_VERSION_MINOR)); + + return 0; +} + +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr, + host_attr->debug_area_dma_handle); + if (unlikely(!host_attr->debug_area_virt_addr)) { + host_attr->debug_area_size = 0; + return ENA_COM_NO_MEM; + } + + host_attr->debug_area_size = debug_area_size; + + return 0; +} + +void ena_com_delete_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->host_info) { + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + SZ_4K, + host_attr->host_info, + host_attr->host_info_dma_addr, + host_attr->host_info_dma_handle); + host_attr->host_info = NULL; + } +} + +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->debug_area_virt_addr) { + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + host_attr->debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr, + host_attr->debug_area_dma_handle); + host_attr->debug_area_virt_addr = NULL; + } +} + +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + + int ret; + + /* Host attribute config is called before ena_com_get_dev_attr_feat + * so ena_com can't check if the feature is supported. + */ + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.debug_ba, + host_attr->debug_area_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.os_info_ba, + host_attr->host_info_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to set host attributes: %d\n", ret); + + return ret; +} + +/* Interrupt moderation */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) +{ + return ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_INTERRUPT_MODERATION); +} + +static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, + u32 intr_delay_resolution, + u32 *intr_moder_interval) +{ + if (!intr_delay_resolution) { + ena_trc_err("Illegal interrupt delay granularity value\n"); + return ENA_COM_FAULT; + } + + *intr_moder_interval = coalesce_usecs / intr_delay_resolution; + + return 0; +} + + +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) +{ + return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_tx_interval); +} + +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) +{ + return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + ena_dev->intr_delay_resolution, + &ena_dev->intr_moder_rx_interval); +} + +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + struct ena_admin_get_feat_resp get_resp; + u16 delay_resolution; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_INTERRUPT_MODERATION, 0); + + if (rc) { + if (rc == ENA_COM_UNSUPPORTED) { + ena_trc_dbg("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); + rc = 0; + } else { + ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); + } + + /* no moderation supported, disable adaptive support */ + ena_com_disable_adaptive_moderation(ena_dev); + return rc; + } + + /* if moderation is supported by device we set adaptive moderation */ + delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; + ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); + + /* Disable adaptive moderation by default - can be enabled later */ + ena_com_disable_adaptive_moderation(ena_dev); + + return 0; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) +{ + return ena_dev->intr_moder_tx_interval; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) +{ + return ena_dev->intr_moder_rx_interval; +} + +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + int rc; + struct ena_com_llq_info *llq_info = &(ena_dev->llq_info);; + + if (!llq_features->max_llq_num) { + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); + if (rc) + return rc; + + ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - + (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); + + if (ena_dev->tx_max_header_size == 0) { + ena_trc_err("the size of the LLQ entry is smaller than needed\n"); + return -EINVAL; + } + + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_com.h b/src/spdk/dpdk/drivers/net/ena/base/ena_com.h new file mode 100644 index 000000000..61074eaf6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_com.h @@ -0,0 +1,976 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef ENA_COM +#define ENA_COM + +#include "ena_plat.h" + +#define ENA_MAX_NUM_IO_QUEUES 128U +/* We need to queues for each IO (on for Tx and one for Rx) */ +#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) + +#define ENA_MAX_HANDLERS 256 + +#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 + +/* Unit in usec */ +#define ENA_REG_READ_TIMEOUT 200000 + +#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) +#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) +#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) + +/*****************************************************************************/ +/*****************************************************************************/ +/* ENA adaptive interrupt moderation settings */ + +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 +#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 + +#define ENA_HASH_KEY_SIZE 40 + +#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF + +#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 + +struct ena_llq_configurations { + enum ena_admin_llq_header_location llq_header_location; + enum ena_admin_llq_ring_entry_size llq_ring_entry_size; + enum ena_admin_llq_stride_ctrl llq_stride_ctrl; + enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; + u16 llq_ring_entry_size_value; +}; + +enum queue_direction { + ENA_COM_IO_QUEUE_DIRECTION_TX, + ENA_COM_IO_QUEUE_DIRECTION_RX +}; + +struct ena_com_buf { + dma_addr_t paddr; /**< Buffer physical address */ + u16 len; /**< Buffer length in bytes */ +}; + +struct ena_com_rx_buf_info { + u16 len; + u16 req_id; +}; + +struct ena_com_io_desc_addr { + u8 __iomem *pbuf_dev_addr; /* LLQ address */ + u8 *virt_addr; + dma_addr_t phys_addr; + ena_mem_handle_t mem_handle; +}; + +struct ena_com_tx_meta { + u16 mss; + u16 l3_hdr_len; + u16 l3_hdr_offset; + u16 l4_hdr_len; /* In words */ +}; + +struct ena_com_llq_info { + u16 header_location_ctrl; + u16 desc_stride_ctrl; + u16 desc_list_entry_size_ctrl; + u16 desc_list_entry_size; + u16 descs_num_before_header; + u16 descs_per_entry; + u16 max_entries_in_tx_burst; + bool disable_meta_caching; +}; + +struct ena_com_io_cq { + struct ena_com_io_desc_addr cdesc_addr; + void *bus; + + /* Interrupt unmask register */ + u32 __iomem *unmask_reg; + + /* The completion queue head doorbell register */ + u32 __iomem *cq_head_db_reg; + + /* numa configuration register (for TPH) */ + u32 __iomem *numa_node_cfg_reg; + + /* The value to write to the above register to unmask + * the interrupt of this queue + */ + u32 msix_vector; + + enum queue_direction direction; + + /* holds the number of cdesc of the current packet */ + u16 cur_rx_pkt_cdesc_count; + /* save the firt cdesc idx of the current packet */ + u16 cur_rx_pkt_cdesc_start_idx; + + u16 q_depth; + /* Caller qid */ + u16 qid; + + /* Device queue index */ + u16 idx; + u16 head; + u16 last_head_update; + u8 phase; + u8 cdesc_entry_size_in_bytes; + +} ____cacheline_aligned; + +struct ena_com_io_bounce_buffer_control { + u8 *base_buffer; + u16 next_to_use; + u16 buffer_size; + u16 buffers_num; /* Must be a power of 2 */ +}; + +/* This struct is to keep tracking the current location of the next llq entry */ +struct ena_com_llq_pkt_ctrl { + u8 *curr_bounce_buf; + u16 idx; + u16 descs_left_in_line; +}; + +struct ena_com_io_sq { + struct ena_com_io_desc_addr desc_addr; + void *bus; + + u32 __iomem *db_addr; + u8 __iomem *header_addr; + + enum queue_direction direction; + enum ena_admin_placement_policy_type mem_queue_type; + + bool disable_meta_caching; + + u32 msix_vector; + struct ena_com_tx_meta cached_tx_meta; + struct ena_com_llq_info llq_info; + struct ena_com_llq_pkt_ctrl llq_buf_ctrl; + struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; + + u16 q_depth; + u16 qid; + + u16 idx; + u16 tail; + u16 next_to_comp; + u16 llq_last_copy_tail; + u32 tx_max_header_size; + u8 phase; + u8 desc_entry_size; + u8 dma_addr_bits; + u16 entries_in_tx_burst_left; +} ____cacheline_aligned; + +struct ena_com_admin_cq { + struct ena_admin_acq_entry *entries; + ena_mem_handle_t mem_handle; + dma_addr_t dma_addr; + + u16 head; + u8 phase; +}; + +struct ena_com_admin_sq { + struct ena_admin_aq_entry *entries; + ena_mem_handle_t mem_handle; + dma_addr_t dma_addr; + + u32 __iomem *db_addr; + + u16 head; + u16 tail; + u8 phase; + +}; + +struct ena_com_stats_admin { + u32 aborted_cmd; + u32 submitted_cmd; + u32 completed_cmd; + u32 out_of_space; + u32 no_completion; +}; + +struct ena_com_admin_queue { + void *q_dmadev; + void *bus; + ena_spinlock_t q_lock; /* spinlock for the admin queue */ + + struct ena_comp_ctx *comp_ctx; + u32 completion_timeout; + u16 q_depth; + struct ena_com_admin_cq cq; + struct ena_com_admin_sq sq; + + /* Indicate if the admin queue should poll for completion */ + bool polling; + + /* Define if fallback to polling mode should occur */ + bool auto_polling; + + u16 curr_cmd_id; + + /* Indicate that the ena was initialized and can + * process new admin commands + */ + bool running_state; + + /* Count the number of outstanding admin commands */ + ena_atomic32_t outstanding_cmds; + + struct ena_com_stats_admin stats; +}; + +struct ena_aenq_handlers; + +struct ena_com_aenq { + u16 head; + u8 phase; + struct ena_admin_aenq_entry *entries; + dma_addr_t dma_addr; + ena_mem_handle_t mem_handle; + u16 q_depth; + struct ena_aenq_handlers *aenq_handlers; +}; + +struct ena_com_mmio_read { + struct ena_admin_ena_mmio_req_read_less_resp *read_resp; + dma_addr_t read_resp_dma_addr; + ena_mem_handle_t read_resp_mem_handle; + u32 reg_read_to; /* in us */ + u16 seq_num; + bool readless_supported; + /* spin lock to ensure a single outstanding read */ + ena_spinlock_t lock; +}; + +struct ena_rss { + /* Indirect table */ + u16 *host_rss_ind_tbl; + struct ena_admin_rss_ind_table_entry *rss_ind_tbl; + dma_addr_t rss_ind_tbl_dma_addr; + ena_mem_handle_t rss_ind_tbl_mem_handle; + u16 tbl_log_size; + + /* Hash key */ + enum ena_admin_hash_functions hash_func; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + dma_addr_t hash_key_dma_addr; + ena_mem_handle_t hash_key_mem_handle; + u32 hash_init_val; + + /* Flow Control */ + struct ena_admin_feature_rss_hash_control *hash_ctrl; + dma_addr_t hash_ctrl_dma_addr; + ena_mem_handle_t hash_ctrl_mem_handle; + +}; + +struct ena_host_attribute { + /* Debug area */ + u8 *debug_area_virt_addr; + dma_addr_t debug_area_dma_addr; + ena_mem_handle_t debug_area_dma_handle; + u32 debug_area_size; + + /* Host information */ + struct ena_admin_host_info *host_info; + dma_addr_t host_info_dma_addr; + ena_mem_handle_t host_info_dma_handle; +}; + +/* Each ena_dev is a PCI function. */ +struct ena_com_dev { + struct ena_com_admin_queue admin_queue; + struct ena_com_aenq aenq; + struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; + struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; + u8 __iomem *reg_bar; + void __iomem *mem_bar; + void *dmadev; + void *bus; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + u32 tx_max_header_size; + u16 stats_func; /* Selected function for extended statistic dump */ + u16 stats_queue; /* Selected queue for extended statistic dump */ + + struct ena_com_mmio_read mmio_read; + + struct ena_rss rss; + u32 supported_features; + u32 dma_addr_bits; + + struct ena_host_attribute host_attr; + bool adaptive_coalescing; + u16 intr_delay_resolution; + + /* interrupt moderation intervals are in usec divided by + * intr_delay_resolution, which is supplied by the device. + */ + u32 intr_moder_tx_interval; + u32 intr_moder_rx_interval; + + struct ena_intr_moder_entry *intr_moder_tbl; + + struct ena_com_llq_info llq_info; +}; + +struct ena_com_dev_get_features_ctx { + struct ena_admin_queue_feature_desc max_queues; + struct ena_admin_queue_ext_feature_desc max_queue_ext; + struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_aenq_desc aenq; + struct ena_admin_feature_offload_desc offload; + struct ena_admin_ena_hw_hints hw_hints; + struct ena_admin_feature_llq_desc llq; + struct ena_admin_feature_rss_ind_table ind_table; +}; + +struct ena_com_create_io_ctx { + enum ena_admin_placement_policy_type mem_queue_type; + enum queue_direction direction; + int numa_node; + u32 msix_vector; + u16 queue_size; + u16 qid; +}; + +typedef void (*ena_aenq_handler)(void *data, + struct ena_admin_aenq_entry *aenq_e); + +/* Holds aenq handlers. Indexed by AENQ event group */ +struct ena_aenq_handlers { + ena_aenq_handler handlers[ENA_MAX_HANDLERS]; + ena_aenq_handler unimplemented_handler; +}; + +/*****************************************************************************/ +/*****************************************************************************/ +#if defined(__cplusplus) +extern "C" { +#endif + +/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * + * Initialize the register read mechanism. + * + * @note: This method must be the first stage in the initialization sequence. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); + +/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * @readless_supported: readless mode (enable/disable) + */ +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, + bool readless_supported); + +/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return + * value physical address. + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); + +/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_admin_init - Init the admin and the async queues + * @ena_dev: ENA communication layer struct + * @aenq_handlers: Those handlers to be called upon event. + * + * Initialize the admin submission and completion queues. + * Initialize the asynchronous events notification queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers); + +/* ena_com_admin_destroy - Destroy the admin and the async events queues. + * @ena_dev: ENA communication layer struct + * + * @note: Before calling this method, the caller must validate that the device + * won't send any additional admin completions/aenq. + * To achieve that, a FLR is recommended. + */ +void ena_com_admin_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_dev_reset - Perform device FLR to the device. + * @ena_dev: ENA communication layer struct + * @reset_reason: Specify what is the trigger for the reset in case of an error. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_dev_reset(struct ena_com_dev *ena_dev, + enum ena_regs_reset_reason_types reset_reason); + +/* ena_com_create_io_queue - Create io queue. + * @ena_dev: ENA communication layer struct + * @ctx - create context structure + * + * Create the submission and the completion queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx); + +/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + */ +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); + +/* ena_com_get_io_handlers - Return the io queue handlers + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + * @io_sq - IO submission queue handler + * @io_cq - IO completion queue handler. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq); + +/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications + * @ena_dev: ENA communication layer struct + * + * After this method, aenq event can be received via AENQ. + */ +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_running_state - Set the state of the admin queue + * @ena_dev: ENA communication layer struct + * + * Change the state of the admin queue (enable/disable) + */ +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); + +/* ena_com_get_admin_running_state - Get the admin queue state + * @ena_dev: ENA communication layer struct + * + * Retrieve the state of the admin queue (enable/disable) + * + * @return - current polling mode (enable/disable) + */ +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * @polling: ENAble/Disable polling mode + * + * Set the admin completion mode. + */ +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); + +/* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * + * Get the admin completion mode. + * If polling mode is on, ena_com_execute_admin_command will perform a + * polling on the admin completion queue for the commands completion, + * otherwise it will wait on wait event. + * + * @return state + */ +bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode + * @ena_dev: ENA communication layer struct + * @polling: Enable/Disable polling mode + * + * Set the autopolling mode. + * If autopolling is on: + * In case of missing interrupt when data is available switch to polling. + */ +void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, + bool polling); + +/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method goes over the admin completion queue and wakes up all the pending + * threads that wait on the commands wait event. + * + * @note: Should be called after MSI-X interrupt. + */ +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); + +/* ena_com_aenq_intr_handler - AENQ interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method goes over the async event notification queue and calls the proper + * aenq handler. + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); + +/* ena_com_abort_admin_commands - Abort all the outstanding admin commands. + * @ena_dev: ENA communication layer struct + * + * This method aborts all the outstanding admin commands. + * The caller should then call ena_com_wait_for_abort_completion to make sure + * all the commands were completed. + */ +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); + +/* ena_com_wait_for_abort_completion - Wait for admin commands abort. + * @ena_dev: ENA communication layer struct + * + * This method waits until all the outstanding admin commands are completed. + */ +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); + +/* ena_com_validate_version - Validate the device parameters + * @ena_dev: ENA communication layer struct + * + * This method verifies the device parameters are the same as the saved + * parameters in ena_dev. + * This method is useful after device reset, to validate the device mac address + * and the device offloads are the same as before the reset. + * + * @return - 0 on success negative value otherwise. + */ +int ena_com_validate_version(struct ena_com_dev *ena_dev); + +/* ena_com_get_link_params - Retrieve physical link parameters. + * @ena_dev: ENA communication layer struct + * @resp: Link parameters + * + * Retrieve the physical link parameters, + * like speed, auto-negotiation and full duplex support. + * + * @return - 0 on Success negative value otherwise. + */ +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp); + +/* ena_com_get_dma_width - Retrieve physical dma address width the device + * supports. + * @ena_dev: ENA communication layer struct + * + * Retrieve the maximum physical address bits the device can handle. + * + * @return: > 0 on Success and negative value otherwise. + */ +int ena_com_get_dma_width(struct ena_com_dev *ena_dev); + +/* ena_com_set_aenq_config - Set aenq groups configurations + * @ena_dev: ENA communication layer struct + * @groups flag: bit fields flags of enum ena_admin_aenq_group. + * + * Configure which aenq event group the driver would like to receive. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); + +/* ena_com_get_dev_attr_feat - Get device features + * @ena_dev: ENA communication layer struct + * @get_feat_ctx: returned context that contain the get features. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); + +/* ena_com_get_dev_basic_stats - Get device basic statistics + * @ena_dev: ENA communication layer struct + * @stats: stats return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats); + +/* ena_com_set_dev_mtu - Configure the device mtu. + * @ena_dev: ENA communication layer struct + * @mtu: mtu value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); + +/* ena_com_get_offload_settings - Retrieve the device offloads capabilities + * @ena_dev: ENA communication layer struct + * @offlad: offload return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload); + +/* ena_com_rss_init - Init RSS + * @ena_dev: ENA communication layer struct + * @log_size: indirection log size + * + * Allocate RSS/RFS resources. + * The caller then can configure rss using ena_com_set_hash_function, + * ena_com_set_hash_ctrl and ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); + +/* ena_com_rss_destroy - Destroy rss + * @ena_dev: ENA communication layer struct + * + * Free all the RSS/RFS resources. + */ +void ena_com_rss_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_fill_hash_function - Fill RSS hash function + * @ena_dev: ENA communication layer struct + * @func: The hash function (Toeplitz or crc) + * @key: Hash key (for toeplitz hash) + * @key_len: key length (max length 10 DW) + * @init_val: initial value for the hash function + * + * Fill the ena_dev resources with the desire hash function, hash key, key_len + * and key initial value (if needed by the hash function). + * To flush the key into the device the caller should call + * ena_com_set_hash_function. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val); + +/* ena_com_set_hash_function - Flush the hash function and it dependencies to + * the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash function and it dependencies (key, key length and + * initial value) if needed. + * + * @note: Prior to this method the caller should call ena_com_fill_hash_function + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_function(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_function - Retrieve the hash function and the hash key + * from the device. + * @ena_dev: ENA communication layer struct + * @func: hash function + * @key: hash key + * + * Retrieve the hash function and the hash key from the device. + * + * @note: If the caller called ena_com_fill_hash_function but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key); + +/* ena_com_fill_hash_ctrl - Fill RSS hash control + * @ena_dev: ENA communication layer struct. + * @proto: The protocol to configure. + * @hash_fields: bit mask of ena_admin_flow_hash_fields + * + * Fill the ena_dev resources with the desire hash control (the ethernet + * fields that take part of the hash) for a specific protocol. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields); + +/* ena_com_set_hash_ctrl - Flush the hash control resources to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash control (the ethernet fields that take part of the hash) + * + * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_ctrl - Retrieve the hash control from the device. + * @ena_dev: ENA communication layer struct + * @proto: The protocol to retrieve. + * @fields: bit mask of ena_admin_flow_hash_fields. + * + * Retrieve the hash control from the device. + * + * @note: If the caller called ena_com_fill_hash_ctrl but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields); + +/* ena_com_set_default_hash_ctrl - Set the hash control to a default + * configuration. + * @ena_dev: ENA communication layer struct + * + * Fill the ena_dev resources with the default hash control configuration. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS + * indirection table + * @ena_dev: ENA communication layer struct. + * @entry_idx - indirection table entry. + * @entry_value - redirection value + * + * Fill a single entry of the RSS indirection table in the ena_dev resources. + * To flush the indirection table to the device, the called should call + * ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value); + +/* ena_com_indirect_table_set - Flush the indirection table to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the indirection hash control to the device. + * Prior to this method the caller should call ena_com_indirect_table_fill_entry + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_get - Retrieve the indirection table from the device. + * @ena_dev: ENA communication layer struct + * @ind_tbl: indirection table + * + * Retrieve the RSS indirection table from the device. + * + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); + +/* ena_com_allocate_host_info - Allocate host info resources. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_allocate_debug_area - Allocate debug area. + * @ena_dev: ENA communication layer struct + * @debug_area_size - debug area size. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size); + +/* ena_com_delete_debug_area - Free the debug area resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocated debug area. + */ +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); + +/* ena_com_delete_host_info - Free the host info resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocated host info. + */ +void ena_com_delete_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_set_host_attributes - Update the device with the host + * attributes (debug area and host info) base address. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_cq - Create io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Create IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_destroy_io_cq - Destroy io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Destroy IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_execute_admin_command - Execute admin command + * @admin_queue: admin queue. + * @cmd: the admin command to execute. + * @cmd_size: the command size. + * @cmd_completion: command completion return value. + * @cmd_comp_size: command completion size. + + * Submit an admin command and then wait until the device returns a + * completion. + * The completion will be copied into cmd_comp. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *cmd_comp, + size_t cmd_comp_size); + +/* ena_com_init_interrupt_moderation - Init interrupt moderation + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_interrupt_moderation_supported - Return if interrupt moderation + * capability is supported by the device. + * + * @return - supported or not. + */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); + +/* ena_com_update_nonadaptive_moderation_interval_tx - Update the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * @tx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs); + +/* ena_com_update_nonadaptive_moderation_interval_rx - Update the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * @rx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs); + +/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); + +/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); + +/* ena_com_config_dev_mode - Configure the placement policy of the device. + * @ena_dev: ENA communication layer struct + * @llq_features: LLQ feature descriptor, retrieve via + * ena_com_get_dev_attr_feat. + * @ena_llq_config: The default driver LLQ parameters configurations + */ +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_config); + +static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) +{ + return ena_dev->adaptive_coalescing; +} + +static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = true; +} + +static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = false; +} + +/* ena_com_update_intr_reg - Prepare interrupt register + * @intr_reg: interrupt register to update. + * @rx_delay_interval: Rx interval in usecs + * @tx_delay_interval: Tx interval in usecs + * @unmask: unmask enable/disable + * + * Prepare interrupt update register with the supplied parameters. + */ +static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, + u32 rx_delay_interval, + u32 tx_delay_interval, + bool unmask) +{ + intr_reg->intr_control = 0; + intr_reg->intr_control |= rx_delay_interval & + ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + + intr_reg->intr_control |= + (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; + + if (unmask) + intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) +{ + u16 size, buffers_num; + u8 *buf; + + size = bounce_buf_ctrl->buffer_size; + buffers_num = bounce_buf_ctrl->buffers_num; + + buf = bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; + + prefetchw(bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); + + return buf; +} + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ +#endif /* !(ENA_COM) */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h new file mode 100644 index 000000000..6d266c4cf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_admin_defs.h @@ -0,0 +1,1656 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_ADMIN_H_ +#define _ENA_ADMIN_H_ + +#define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32 +#define ENA_ADMIN_EXTRA_PROPERTIES_COUNT 32 + +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + ENA_ADMIN_DESTROY_SQ = 2, + ENA_ADMIN_CREATE_CQ = 3, + ENA_ADMIN_DESTROY_CQ = 4, + ENA_ADMIN_GET_FEATURE = 8, + ENA_ADMIN_SET_FEATURE = 9, + ENA_ADMIN_GET_STATS = 11, +}; + +enum ena_admin_aq_completion_status { + ENA_ADMIN_SUCCESS = 0, + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + ENA_ADMIN_BAD_OPCODE = 2, + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + ENA_ADMIN_MALFORMED_REQUEST = 4, + /* Additional status is provided in ACQ entry extended_status */ + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_RESOURCE_BUSY = 7, +}; + +enum ena_admin_aq_feature_id { + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + ENA_ADMIN_MAX_QUEUES_NUM = 2, + ENA_ADMIN_HW_HINTS = 3, + ENA_ADMIN_LLQ = 4, + ENA_ADMIN_EXTRA_PROPERTIES_STRINGS = 5, + ENA_ADMIN_EXTRA_PROPERTIES_FLAGS = 6, + ENA_ADMIN_MAX_QUEUES_EXT = 7, + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_MTU = 14, + ENA_ADMIN_RSS_HASH_INPUT = 18, + ENA_ADMIN_INTERRUPT_MODERATION = 20, + ENA_ADMIN_AENQ_CONFIG = 26, + ENA_ADMIN_LINK_CONFIG = 27, + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, +}; + +enum ena_admin_placement_policy_type { + /* descriptors and headers are in host memory */ + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, + /* descriptors and headers are in device memory (a.k.a Low Latency + * Queue) + */ + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, +}; + +enum ena_admin_link_types { + ENA_ADMIN_LINK_SPEED_1G = 0x1, + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + ENA_ADMIN_LINK_SPEED_5G = 0x4, + ENA_ADMIN_LINK_SPEED_10G = 0x8, + ENA_ADMIN_LINK_SPEED_25G = 0x10, + ENA_ADMIN_LINK_SPEED_40G = 0x20, + ENA_ADMIN_LINK_SPEED_50G = 0x40, + ENA_ADMIN_LINK_SPEED_100G = 0x80, + ENA_ADMIN_LINK_SPEED_200G = 0x100, + ENA_ADMIN_LINK_SPEED_400G = 0x200, +}; + +enum ena_admin_completion_policy_type { + /* completion queue entry for each sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, + /* completion queue entry upon request in sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, + /* current queue head pointer is updated in OS memory upon sq + * descriptor request + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, + /* current queue head pointer is updated in OS memory for each sq + * descriptor + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, +}; + +/* basic stats return ena_admin_basic_stats while extanded stats return a + * buffer (string format) with additional statistics per queue and per + * device id + */ +enum ena_admin_get_stats_type { + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, +}; + +enum ena_admin_get_stats_scope { + ENA_ADMIN_SPECIFIC_QUEUE = 0, + ENA_ADMIN_ETH_TRAFFIC = 1, +}; + +struct ena_admin_aq_common_desc { + /* 11:0 : command_id + * 15:12 : reserved12 + */ + uint16_t command_id; + + /* as appears in ena_admin_aq_opcode */ + uint8_t opcode; + + /* 0 : phase + * 1 : ctrl_data - control buffer address valid + * 2 : ctrl_data_indirect - control buffer address + * points to list of pages with addresses of control + * buffers + * 7:3 : reserved3 + */ + uint8_t flags; +}; + +/* used in ena_admin_aq_entry. Can point directly to control data, or to a + * page list chunk. Used also at the end of indirect mode page list chunks, + * for chaining. + */ +struct ena_admin_ctrl_buff_info { + uint32_t length; + + struct ena_common_mem_addr address; +}; + +struct ena_admin_sq { + uint16_t sq_idx; + + /* 4:0 : reserved + * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx + */ + uint8_t sq_identity; + + uint8_t reserved1; +}; + +struct ena_admin_aq_entry { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + uint32_t inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + uint32_t inline_data_w4[12]; +}; + +struct ena_admin_acq_common_desc { + /* command identifier to associate it with the aq descriptor + * 11:0 : command_id + * 15:12 : reserved12 + */ + uint16_t command; + + uint8_t status; + + /* 0 : phase + * 7:1 : reserved1 + */ + uint8_t flags; + + uint16_t extended_status; + + /* indicates to the driver which AQ entry has been consumed by the + * device and could be reused + */ + uint16_t sq_head_indx; +}; + +struct ena_admin_acq_entry { + struct ena_admin_acq_common_desc acq_common_descriptor; + + uint32_t response_specific_data[14]; +}; + +struct ena_admin_aq_create_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved0_w1 + * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx + */ + uint8_t sq_identity; + + uint8_t reserved8_w1; + + /* 3:0 : placement_policy - Describing where the SQ + * descriptor ring and the SQ packet headers reside: + * 0x1 - descriptors and headers are in OS memory, + * 0x3 - descriptors and headers in device memory + * (a.k.a Low Latency Queue) + * 6:4 : completion_policy - Describing what policy + * to use for generation completion entry (cqe) in + * the CQ associated with this SQ: 0x0 - cqe for each + * sq descriptor, 0x1 - cqe upon request in sq + * descriptor, 0x2 - current queue head pointer is + * updated in OS memory upon sq descriptor request + * 0x3 - current queue head pointer is updated in OS + * memory for each sq descriptor + * 7 : reserved15_w1 + */ + uint8_t sq_caps_2; + + /* 0 : is_physically_contiguous - Described if the + * queue ring memory is allocated in physical + * contiguous pages or split. + * 7:1 : reserved17_w1 + */ + uint8_t sq_caps_3; + + /* associated completion queue id. This CQ must be created prior to + * SQ creation + */ + uint16_t cq_idx; + + /* submission queue depth in entries */ + uint16_t sq_depth; + + /* SQ physical base address in OS memory. This field should not be + * used for Low Latency queues. Has to be page aligned. + */ + struct ena_common_mem_addr sq_ba; + + /* specifies queue head writeback location in OS memory. Valid if + * completion_policy is set to completion_policy_head_on_demand or + * completion_policy_head. Has to be cache aligned + */ + struct ena_common_mem_addr sq_head_writeback; + + uint32_t reserved0_w7; + + uint32_t reserved0_w8; +}; + +enum ena_admin_sq_direction { + ENA_ADMIN_SQ_DIRECTION_TX = 1, + ENA_ADMIN_SQ_DIRECTION_RX = 2, +}; + +struct ena_admin_acq_create_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + uint16_t sq_idx; + + uint16_t reserved; + + /* queue doorbell address as an offset to PCIe MMIO REG BAR */ + uint32_t sq_doorbell_offset; + + /* low latency queue ring base address as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + uint32_t llq_descriptors_offset; + + /* low latency queue headers' memory as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + uint32_t llq_headers_offset; +}; + +struct ena_admin_aq_destroy_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_sq sq; +}; + +struct ena_admin_acq_destroy_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +struct ena_admin_aq_create_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved5 + * 5 : interrupt_mode_enabled - if set, cq operates + * in interrupt mode, otherwise - polling + * 7:6 : reserved6 + */ + uint8_t cq_caps_1; + + /* 4:0 : cq_entry_size_words - size of CQ entry in + * 32-bit words, valid values: 4, 8. + * 7:5 : reserved7 + */ + uint8_t cq_caps_2; + + /* completion queue depth in # of entries. must be power of 2 */ + uint16_t cq_depth; + + /* msix vector assigned to this cq */ + uint32_t msix_vector; + + /* cq physical base address in OS memory. CQ must be physically + * contiguous + */ + struct ena_common_mem_addr cq_ba; +}; + +struct ena_admin_acq_create_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + uint16_t cq_idx; + + /* actual cq depth in number of entries */ + uint16_t cq_actual_depth; + + uint32_t numa_node_register_offset; + + uint32_t cq_head_db_register_offset; + + uint32_t cq_interrupt_unmask_register_offset; +}; + +struct ena_admin_aq_destroy_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + uint16_t cq_idx; + + uint16_t reserved1; +}; + +struct ena_admin_acq_destroy_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +/* ENA AQ Get Statistics command. Extended statistics are placed in control + * buffer pointed by AQ entry + */ +struct ena_admin_aq_get_stats_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + /* command specific inline data */ + uint32_t inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + /* stats type as defined in enum ena_admin_get_stats_type */ + uint8_t type; + + /* stats scope defined in enum ena_admin_get_stats_scope */ + uint8_t scope; + + uint16_t reserved3; + + /* queue id. used when scope is specific_queue */ + uint16_t queue_idx; + + /* device id, value 0xFFFF means mine. only privileged device can get + * stats of other device + */ + uint16_t device_id; +}; + +/* Basic Statistics Command. */ +struct ena_admin_basic_stats { + uint32_t tx_bytes_low; + + uint32_t tx_bytes_high; + + uint32_t tx_pkts_low; + + uint32_t tx_pkts_high; + + uint32_t rx_bytes_low; + + uint32_t rx_bytes_high; + + uint32_t rx_pkts_low; + + uint32_t rx_pkts_high; + + uint32_t rx_drops_low; + + uint32_t rx_drops_high; + + uint32_t tx_drops_low; + + uint32_t tx_drops_high; +}; + +struct ena_admin_acq_get_stats_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + struct ena_admin_basic_stats basic_stats; +}; + +struct ena_admin_get_set_feature_common_desc { + /* 1:0 : select - 0x1 - current value; 0x3 - default + * value + * 7:3 : reserved3 + */ + uint8_t flags; + + /* as appears in ena_admin_aq_feature_id */ + uint8_t feature_id; + + /* The driver specifies the max feature version it supports and the + * device responds with the currently supported feature version. The + * field is zero based + */ + uint8_t feature_version; + + uint8_t reserved8; +}; + +struct ena_admin_device_attr_feature_desc { + uint32_t impl_id; + + uint32_t device_version; + + /* bitmap of ena_admin_aq_feature_id */ + uint32_t supported_features; + + uint32_t reserved3; + + /* Indicates how many bits are used physical address access. */ + uint32_t phys_addr_width; + + /* Indicates how many bits are used virtual address access. */ + uint32_t virt_addr_width; + + /* unicast MAC address (in Network byte order) */ + uint8_t mac_addr[6]; + + uint8_t reserved7[2]; + + uint32_t max_mtu; +}; + +enum ena_admin_llq_header_location { + /* header is in descriptor list */ + ENA_ADMIN_INLINE_HEADER = 1, + /* header in a separate ring, implies 16B descriptor list entry */ + ENA_ADMIN_HEADER_RING = 2, +}; + +enum ena_admin_llq_ring_entry_size { + ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1, + ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2, + ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4, +}; + +enum ena_admin_llq_num_descs_before_header { + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8, +}; + +/* packet descriptor list entry always starts with one or more descriptors, + * followed by a header. The rest of the descriptors are located in the + * beginning of the subsequent entry. Stride refers to how the rest of the + * descriptors are placed. This field is relevant only for inline header + * mode + */ +enum ena_admin_llq_stride_ctrl { + ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1, + ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, +}; + +enum ena_admin_accel_mode_feat { + ENA_ADMIN_DISABLE_META_CACHING = 0, + ENA_ADMIN_LIMIT_TX_BURST = 1, +}; + +struct ena_admin_accel_mode_get { + /* bit field of enum ena_admin_accel_mode_feat */ + uint16_t supported_flags; + + /* maximum burst size between two doorbells. The size is in bytes */ + uint16_t max_tx_burst_size; +}; + +struct ena_admin_accel_mode_set { + /* bit field of enum ena_admin_accel_mode_feat */ + uint16_t enabled_flags; + + uint16_t reserved; +}; + +struct ena_admin_accel_mode_req { + union { + uint32_t raw[2]; + + struct ena_admin_accel_mode_get get; + + struct ena_admin_accel_mode_set set; + } u; +}; + +struct ena_admin_feature_llq_desc { + uint32_t max_llq_num; + + uint32_t max_llq_depth; + + /* specify the header locations the device supports. bitfield of + * enum ena_admin_llq_header_location. + */ + uint16_t header_location_ctrl_supported; + + /* the header location the driver selected to use. */ + uint16_t header_location_ctrl_enabled; + + /* if inline header is specified - this is the size of descriptor + * list entry. If header in a separate ring is specified - this is + * the size of header ring entry. bitfield of enum + * ena_admin_llq_ring_entry_size. specify the entry sizes the device + * supports + */ + uint16_t entry_size_ctrl_supported; + + /* the entry size the driver selected to use. */ + uint16_t entry_size_ctrl_enabled; + + /* valid only if inline header is specified. First entry associated + * with the packet includes descriptors and header. Rest of the + * entries occupied by descriptors. This parameter defines the max + * number of descriptors precedding the header in the first entry. + * The field is bitfield of enum + * ena_admin_llq_num_descs_before_header and specify the values the + * device supports + */ + uint16_t desc_num_before_header_supported; + + /* the desire field the driver selected to use */ + uint16_t desc_num_before_header_enabled; + + /* valid only if inline was chosen. bitfield of enum + * ena_admin_llq_stride_ctrl + */ + uint16_t descriptors_stride_ctrl_supported; + + /* the stride control the driver selected to use */ + uint16_t descriptors_stride_ctrl_enabled; + + /* reserved */ + uint32_t reserved1; + + /* accelerated low latency queues requirement. Driver needs to + * support those requirements in order to use accelerated LLQ + */ + struct ena_admin_accel_mode_req accel_mode; +}; + +struct ena_admin_queue_ext_feature_fields { + uint32_t max_tx_sq_num; + + uint32_t max_tx_cq_num; + + uint32_t max_rx_sq_num; + + uint32_t max_rx_cq_num; + + uint32_t max_tx_sq_depth; + + uint32_t max_tx_cq_depth; + + uint32_t max_rx_sq_depth; + + uint32_t max_rx_cq_depth; + + uint32_t max_tx_header_size; + + /* Maximum Descriptors number, including meta descriptor, allowed for + * a single Tx packet + */ + uint16_t max_per_packet_tx_descs; + + /* Maximum Descriptors number allowed for a single Rx packet */ + uint16_t max_per_packet_rx_descs; +}; + +struct ena_admin_queue_feature_desc { + uint32_t max_sq_num; + + uint32_t max_sq_depth; + + uint32_t max_cq_num; + + uint32_t max_cq_depth; + + uint32_t max_legacy_llq_num; + + uint32_t max_legacy_llq_depth; + + uint32_t max_header_size; + + /* Maximum Descriptors number, including meta descriptor, allowed for + * a single Tx packet + */ + uint16_t max_packet_tx_descs; + + /* Maximum Descriptors number allowed for a single Rx packet */ + uint16_t max_packet_rx_descs; +}; + +struct ena_admin_set_feature_mtu_desc { + /* exclude L2 */ + uint32_t mtu; +}; + +struct ena_admin_get_extra_properties_strings_desc { + uint32_t count; +}; + +struct ena_admin_get_extra_properties_flags_desc { + uint32_t flags; +}; + +struct ena_admin_set_feature_host_attr_desc { + /* host OS info base address in OS memory. host info is 4KB of + * physically contiguous + */ + struct ena_common_mem_addr os_info_ba; + + /* host debug area base address in OS memory. debug area must be + * physically contiguous + */ + struct ena_common_mem_addr debug_ba; + + /* debug area size */ + uint32_t debug_area_size; +}; + +struct ena_admin_feature_intr_moder_desc { + /* interrupt delay granularity in usec */ + uint16_t intr_delay_resolution; + + uint16_t reserved; +}; + +struct ena_admin_get_feature_link_desc { + /* Link speed in Mb */ + uint32_t speed; + + /* bit field of enum ena_admin_link types */ + uint32_t supported; + + /* 0 : autoneg + * 1 : duplex - Full Duplex + * 31:2 : reserved2 + */ + uint32_t flags; +}; + +struct ena_admin_feature_aenq_desc { + /* bitmask for AENQ groups the device can report */ + uint32_t supported_groups; + + /* bitmask for AENQ groups to report */ + uint32_t enabled_groups; +}; + +struct ena_admin_feature_offload_desc { + /* 0 : TX_L3_csum_ipv4 + * 1 : TX_L4_ipv4_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 2 : TX_L4_ipv4_csum_full + * 3 : TX_L4_ipv6_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 4 : TX_L4_ipv6_csum_full + * 5 : tso_ipv4 + * 6 : tso_ipv6 + * 7 : tso_ecn + */ + uint32_t tx; + + /* Receive side supported stateless offload + * 0 : RX_L3_csum_ipv4 - IPv4 checksum + * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum + * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum + * 3 : RX_hash - Hash calculation + */ + uint32_t rx_supported; + + uint32_t rx_enabled; +}; + +enum ena_admin_hash_functions { + ENA_ADMIN_TOEPLITZ = 1, + ENA_ADMIN_CRC32 = 2, +}; + +struct ena_admin_feature_rss_flow_hash_control { + uint32_t keys_num; + + uint32_t reserved; + + uint32_t key[10]; +}; + +struct ena_admin_feature_rss_flow_hash_function { + /* 7:0 : funcs - bitmask of ena_admin_hash_functions */ + uint32_t supported_func; + + /* 7:0 : selected_func - bitmask of + * ena_admin_hash_functions + */ + uint32_t selected_func; + + /* initial value */ + uint32_t init_val; +}; + +/* RSS flow hash protocols */ +enum ena_admin_flow_hash_proto { + ENA_ADMIN_RSS_TCP4 = 0, + ENA_ADMIN_RSS_UDP4 = 1, + ENA_ADMIN_RSS_TCP6 = 2, + ENA_ADMIN_RSS_UDP6 = 3, + ENA_ADMIN_RSS_IP4 = 4, + ENA_ADMIN_RSS_IP6 = 5, + ENA_ADMIN_RSS_IP4_FRAG = 6, + ENA_ADMIN_RSS_NOT_IP = 7, + /* TCPv6 with extension header */ + ENA_ADMIN_RSS_TCP6_EX = 8, + /* IPv6 with extension header */ + ENA_ADMIN_RSS_IP6_EX = 9, + ENA_ADMIN_RSS_PROTO_NUM = 16, +}; + +/* RSS flow hash fields */ +enum ena_admin_flow_hash_fields { + /* Ethernet Dest Addr */ + ENA_ADMIN_RSS_L2_DA = BIT(0), + /* Ethernet Src Addr */ + ENA_ADMIN_RSS_L2_SA = BIT(1), + /* ipv4/6 Dest Addr */ + ENA_ADMIN_RSS_L3_DA = BIT(2), + /* ipv4/6 Src Addr */ + ENA_ADMIN_RSS_L3_SA = BIT(3), + /* tcp/udp Dest Port */ + ENA_ADMIN_RSS_L4_DP = BIT(4), + /* tcp/udp Src Port */ + ENA_ADMIN_RSS_L4_SP = BIT(5), +}; + +struct ena_admin_proto_input { + /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ + uint16_t fields; + + uint16_t reserved2; +}; + +struct ena_admin_feature_rss_hash_control { + struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM]; +}; + +struct ena_admin_feature_rss_flow_hash_input { + /* supported hash input sorting + * 1 : L3_sort - support swap L3 addresses if DA is + * smaller than SA + * 2 : L4_sort - support swap L4 ports if DP smaller + * SP + */ + uint16_t supported_input_sort; + + /* enabled hash input sorting + * 1 : enable_L3_sort - enable swap L3 addresses if + * DA smaller than SA + * 2 : enable_L4_sort - enable swap L4 ports if DP + * smaller than SP + */ + uint16_t enabled_input_sort; +}; + +enum ena_admin_os_type { + ENA_ADMIN_OS_LINUX = 1, + ENA_ADMIN_OS_WIN = 2, + ENA_ADMIN_OS_DPDK = 3, + ENA_ADMIN_OS_FREEBSD = 4, + ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, +}; + +struct ena_admin_host_info { + /* defined in enum ena_admin_os_type */ + uint32_t os_type; + + /* os distribution string format */ + uint8_t os_dist_str[128]; + + /* OS distribution numeric format */ + uint32_t os_dist; + + /* kernel version string format */ + uint8_t kernel_ver_str[32]; + + /* Kernel version numeric format */ + uint32_t kernel_ver; + + /* 7:0 : major + * 15:8 : minor + * 23:16 : sub_minor + * 31:24 : module_type + */ + uint32_t driver_version; + + /* features bitmap */ + uint32_t supported_network_features[2]; + + /* ENA spec version of driver */ + uint16_t ena_spec_version; + + /* ENA device's Bus, Device and Function + * 2:0 : function + * 7:3 : device + * 15:8 : bus + */ + uint16_t bdf; + + /* Number of CPUs */ + uint16_t num_cpus; + + uint16_t reserved; + + /* 0 : mutable_rss_table_size + * 1 : rx_offset + * 2 : interrupt_moderation + * 3 : map_rx_buf_bidirectional + * 31:4 : reserved + */ + uint32_t driver_supported_features; +}; + +struct ena_admin_rss_ind_table_entry { + uint16_t cq_idx; + + uint16_t reserved; +}; + +struct ena_admin_feature_rss_ind_table { + /* min supported table size (2^min_size) */ + uint16_t min_size; + + /* max supported table size (2^max_size) */ + uint16_t max_size; + + /* table size (2^size) */ + uint16_t size; + + /* 0 : one_entry_update - The ENA device supports + * setting a single RSS table entry + */ + uint8_t flags; + + uint8_t reserved; + + /* index of the inline entry. 0xFFFFFFFF means invalid */ + uint32_t inline_index; + + /* used for updating single entry, ignored when setting the entire + * table through the control buffer. + */ + struct ena_admin_rss_ind_table_entry inline_entry; +}; + +/* When hint value is 0, driver should use it's own predefined value */ +struct ena_admin_ena_hw_hints { + /* value in ms */ + uint16_t mmio_read_timeout; + + /* value in ms */ + uint16_t driver_watchdog_timeout; + + /* Per packet tx completion timeout. value in ms */ + uint16_t missing_tx_completion_timeout; + + uint16_t missed_tx_completion_count_threshold_to_reset; + + /* value in ms */ + uint16_t admin_completion_tx_timeout; + + uint16_t netdev_wd_timeout; + + uint16_t max_tx_sgl_size; + + uint16_t max_rx_sgl_size; + + uint16_t reserved[8]; +}; + +struct ena_admin_get_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + uint32_t raw[11]; +}; + +struct ena_admin_queue_ext_feature_desc { + /* version */ + uint8_t version; + + uint8_t reserved1[3]; + + union { + struct ena_admin_queue_ext_feature_fields max_queue_ext; + + uint32_t raw[10]; + } ; +}; + +struct ena_admin_get_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + uint32_t raw[14]; + + struct ena_admin_device_attr_feature_desc dev_attr; + + struct ena_admin_feature_llq_desc llq; + + struct ena_admin_queue_feature_desc max_queue; + + struct ena_admin_queue_ext_feature_desc max_queue_ext; + + struct ena_admin_feature_aenq_desc aenq; + + struct ena_admin_get_feature_link_desc link; + + struct ena_admin_feature_offload_desc offload; + + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + struct ena_admin_feature_rss_ind_table ind_table; + + struct ena_admin_feature_intr_moder_desc intr_moderation; + + struct ena_admin_ena_hw_hints hw_hints; + + struct ena_admin_get_extra_properties_strings_desc extra_properties_strings; + + struct ena_admin_get_extra_properties_flags_desc extra_properties_flags; + } u; +}; + +struct ena_admin_set_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + union { + uint32_t raw[11]; + + /* mtu size */ + struct ena_admin_set_feature_mtu_desc mtu; + + /* host attributes */ + struct ena_admin_set_feature_host_attr_desc host_attr; + + /* AENQ configuration */ + struct ena_admin_feature_aenq_desc aenq; + + /* rss flow hash function */ + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + /* rss flow hash input */ + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + /* rss indirection table */ + struct ena_admin_feature_rss_ind_table ind_table; + + /* LLQ configuration */ + struct ena_admin_feature_llq_desc llq; + } u; +}; + +struct ena_admin_set_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + uint32_t raw[14]; + } u; +}; + +struct ena_admin_aenq_common_desc { + uint16_t group; + + uint16_t syndrom; + + /* 0 : phase + * 7:1 : reserved - MBZ + */ + uint8_t flags; + + uint8_t reserved1[3]; + + uint32_t timestamp_low; + + uint32_t timestamp_high; +}; + +/* asynchronous event notification groups */ +enum ena_admin_aenq_group { + ENA_ADMIN_LINK_CHANGE = 0, + ENA_ADMIN_FATAL_ERROR = 1, + ENA_ADMIN_WARNING = 2, + ENA_ADMIN_NOTIFICATION = 3, + ENA_ADMIN_KEEP_ALIVE = 4, + ENA_ADMIN_AENQ_GROUPS_NUM = 5, +}; + +enum ena_admin_aenq_notification_syndrom { + ENA_ADMIN_SUSPEND = 0, + ENA_ADMIN_RESUME = 1, + ENA_ADMIN_UPDATE_HINTS = 2, +}; + +struct ena_admin_aenq_entry { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* command specific inline data */ + uint32_t inline_data_w4[12]; +}; + +struct ena_admin_aenq_link_change_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* 0 : link_status */ + uint32_t flags; +}; + +struct ena_admin_aenq_keep_alive_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + uint32_t rx_drops_low; + + uint32_t rx_drops_high; + + uint32_t tx_drops_low; + + uint32_t tx_drops_high; +}; + +struct ena_admin_ena_mmio_req_read_less_resp { + uint16_t req_id; + + uint16_t reg_off; + + /* value is valid when poll is cleared */ + uint32_t reg_val; +}; + +/* aq_common_desc */ +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) + +/* sq */ +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) + +/* acq_common_desc */ +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aq_create_sq_cmd */ +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) + +/* aq_create_cq_cmd */ +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) + +/* get_set_feature_common_desc */ +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) + +/* get_feature_link_desc */ +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) + +/* feature_offload_desc */ +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) + +/* feature_rss_flow_hash_function */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0) + +/* feature_rss_flow_hash_input */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) + +/* host_info */ +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK BIT(0) +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1 +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) +#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 +#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) +#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK BIT(3) + +/* feature_rss_ind_table */ +#define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0) + +/* aenq_common_desc */ +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aenq_link_change_desc */ +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) + +#if !defined(DEFS_LINUX_MAINLINE) +static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p) +{ + return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val) +{ + p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p) +{ + return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; +} + +static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val) +{ + p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p) +{ + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT; +} + +static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val) +{ + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK; +} + +static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p) +{ + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT; +} + +static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val) +{ + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; +} + +static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p) +{ + return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT; +} + +static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val) +{ + p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK; +} + +static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p) +{ + return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val) +{ + p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p) +{ + return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; +} + +static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val) +{ + p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p) +{ + return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT; +} + +static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) +{ + p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p) +{ + return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; +} + +static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) +{ + p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p) +{ + return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT; +} + +static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) +{ + p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p) +{ + return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; +} + +static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val) +{ + p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p) +{ + return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT; +} + +static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val) +{ + p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; +} + +static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p) +{ + return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; +} + +static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val) +{ + p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; +} + +static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p) +{ + return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; +} + +static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val) +{ + p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; +} + +static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p) +{ + return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; +} + +static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val) +{ + p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; +} + +static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p) +{ + return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT; +} + +static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val) +{ + p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p) +{ + return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; +} + +static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p) +{ + return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; +} + +static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK; +} + +static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT; +} + +static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val) +{ + p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK; +} + +static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p) +{ + return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; +} + +static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val) +{ + p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; +} + +static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p) +{ + return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; +} + +static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val) +{ + p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; +} + +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT; +} + +static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) +{ + p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; +} + +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT; +} + +static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) +{ + p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; +} + +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT; +} + +static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) +{ + p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK; +} + +static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT; +} + +static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) +{ + p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK; +} + +static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p) +{ + return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK; +} + +static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK; +} + +static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p) +{ + return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT; +} + +static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK; +} + +static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p) +{ + return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT; +} + +static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK; +} + +static inline uint32_t get_ena_admin_host_info_module_type(const struct ena_admin_host_info *p) +{ + return (p->driver_version & ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK) >> ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT; +} + +static inline void set_ena_admin_host_info_module_type(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT) & ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK; +} + +static inline uint16_t get_ena_admin_host_info_function(const struct ena_admin_host_info *p) +{ + return p->bdf & ENA_ADMIN_HOST_INFO_FUNCTION_MASK; +} + +static inline void set_ena_admin_host_info_function(struct ena_admin_host_info *p, uint16_t val) +{ + p->bdf |= val & ENA_ADMIN_HOST_INFO_FUNCTION_MASK; +} + +static inline uint16_t get_ena_admin_host_info_device(const struct ena_admin_host_info *p) +{ + return (p->bdf & ENA_ADMIN_HOST_INFO_DEVICE_MASK) >> ENA_ADMIN_HOST_INFO_DEVICE_SHIFT; +} + +static inline void set_ena_admin_host_info_device(struct ena_admin_host_info *p, uint16_t val) +{ + p->bdf |= (val << ENA_ADMIN_HOST_INFO_DEVICE_SHIFT) & ENA_ADMIN_HOST_INFO_DEVICE_MASK; +} + +static inline uint16_t get_ena_admin_host_info_bus(const struct ena_admin_host_info *p) +{ + return (p->bdf & ENA_ADMIN_HOST_INFO_BUS_MASK) >> ENA_ADMIN_HOST_INFO_BUS_SHIFT; +} + +static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, uint16_t val) +{ + p->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK; +} + +static inline uint32_t get_ena_admin_host_info_mutable_rss_table_size(const struct ena_admin_host_info *p) +{ + return p->driver_supported_features & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK; +} + +static inline void set_ena_admin_host_info_mutable_rss_table_size(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_supported_features |= val & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK; +} + +static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p) +{ + return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT; +} + +static inline void set_ena_admin_host_info_rx_offset(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT) & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; +} + +static inline uint32_t get_ena_admin_host_info_interrupt_moderation(const struct ena_admin_host_info *p) +{ + return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK) >> ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT; +} + +static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; +} + +static inline uint32_t get_ena_admin_host_info_map_rx_buf_bidirectional(const struct ena_admin_host_info *p) +{ + return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK) >> ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT; +} + +static inline void set_ena_admin_host_info_map_rx_buf_bidirectional(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT) & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK; +} + +static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p) +{ + return p->flags & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK; +} + +static inline void set_ena_admin_feature_rss_ind_table_one_entry_update(struct ena_admin_feature_rss_ind_table *p, uint8_t val) +{ + p->flags |= val & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK; +} + +static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p) +{ + return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; +} + +static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val) +{ + p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p) +{ + return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; +} + +static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val) +{ + p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; +} + +#endif /* !defined(DEFS_LINUX_MAINLINE) */ +#endif /* _ENA_ADMIN_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h new file mode 100644 index 000000000..d1ee40de3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_common_defs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_COMMON_H_ +#define _ENA_COMMON_H_ + +#define ENA_COMMON_SPEC_VERSION_MAJOR 2 +#define ENA_COMMON_SPEC_VERSION_MINOR 0 + +/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ +struct ena_common_mem_addr { + uint32_t mem_addr_low; + + uint16_t mem_addr_high; + + /* MBZ */ + uint16_t reserved16; +}; + +#endif /* _ENA_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h new file mode 100644 index 000000000..108bed852 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h @@ -0,0 +1,943 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_ETH_IO_H_ +#define _ENA_ETH_IO_H_ + +enum ena_eth_io_l3_proto_index { + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + ENA_ETH_IO_L3_PROTO_FCOE = 21, + ENA_ETH_IO_L3_PROTO_ROCE = 22, +}; + +enum ena_eth_io_l4_proto_index { + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L4_PROTO_TCP = 12, + ENA_ETH_IO_L4_PROTO_UDP = 13, + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, +}; + +struct ena_eth_io_tx_desc { + /* 15:0 : length - Buffer length in bytes, must + * include any packet trailers that the ENA supposed + * to update like End-to-End CRC, Authentication GMAC + * etc. This length must not include the + * 'Push_Buffer' length. This length must not include + * the 4-byte added in the end for 802.3 Ethernet FCS + * 21:16 : req_id_hi - Request ID[15:10] + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBZ + * 24 : phase + * 25 : reserved1 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + uint32_t len_ctrl; + + /* 3:0 : l3_proto_idx - L3 protocol. This field + * required when l3_csum_en,l3_csum or tso_en are set. + * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and + * DF flags of the IPv4 header is 0. Otherwise must + * be set to 1 + * 6:5 : reserved5 + * 7 : tso_en - Enable TSO, For TCP only. + * 12:8 : l4_proto_idx - L4 protocol. This field need + * to be set when l4_csum_en or tso_en are set. + * 13 : l3_csum_en - enable IPv4 header checksum. + * 14 : l4_csum_en - enable TCP/UDP checksum. + * 15 : ethernet_fcs_dis - when set, the controller + * will not append the 802.3 Ethernet Frame Check + * Sequence to the packet + * 16 : reserved16 + * 17 : l4_csum_partial - L4 partial checksum. when + * set to 0, the ENA calculates the L4 checksum, + * where the Destination Address required for the + * TCP/UDP pseudo-header is taken from the actual + * packet L3 header. when set to 1, the ENA doesn't + * calculate the sum of the pseudo-header, instead, + * the checksum field of the L4 is used instead. When + * TSO enabled, the checksum of the pseudo-header + * must not include the tcp length field. L4 partial + * checksum should be used for IPv6 packet that + * contains Routing Headers. + * 20:18 : reserved18 - MBZ + * 21 : reserved21 - MBZ + * 31:22 : req_id_lo - Request ID[9:0] + */ + uint32_t meta_ctrl; + + uint32_t buff_addr_lo; + + /* address high and header size + * 15:0 : addr_hi - Buffer Pointer[47:32] + * 23:16 : reserved16_w2 + * 31:24 : header_length - Header length. For Low + * Latency Queues, this fields indicates the number + * of bytes written to the headers' memory. For + * normal queues, if packet is TCP or UDP, and longer + * than max_header_size, then this field should be + * set to the sum of L4 header offset and L4 header + * size(without options), otherwise, this field + * should be set to 0. For both modes, this field + * must not exceed the max_header_size. + * max_header_size value is reported by the Max + * Queues Feature descriptor + */ + uint32_t buff_addr_hi_hdr_sz; +}; + +struct ena_eth_io_tx_meta_desc { + /* 9:0 : req_id_lo - Request ID[9:0] + * 11:10 : reserved10 - MBZ + * 12 : reserved12 - MBZ + * 13 : reserved13 - MBZ + * 14 : ext_valid - if set, offset fields in Word2 + * are valid Also MSS High in Word 0 and bits [31:24] + * in Word 3 + * 15 : reserved15 + * 19:16 : mss_hi + * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1: + * Extended Metadata Descriptor + * 21 : meta_store - Store extended metadata in queue + * cache + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBO + * 24 : phase + * 25 : reserved25 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + uint32_t len_ctrl; + + /* 5:0 : req_id_hi + * 31:6 : reserved6 - MBZ + */ + uint32_t word1; + + /* 7:0 : l3_hdr_len + * 15:8 : l3_hdr_off + * 21:16 : l4_hdr_len_in_words - counts the L4 header + * length in words. there is an explicit assumption + * that L4 header appears right after L3 header and + * L4 offset is based on l3_hdr_off+l3_hdr_len + * 31:22 : mss_lo + */ + uint32_t word2; + + uint32_t reserved; +}; + +struct ena_eth_io_tx_cdesc { + /* Request ID[15:0] */ + uint16_t req_id; + + uint8_t status; + + /* flags + * 0 : phase + * 7:1 : reserved1 + */ + uint8_t flags; + + uint16_t sub_qid; + + uint16_t sq_head_idx; +}; + +struct ena_eth_io_rx_desc { + /* In bytes. 0 means 64KB */ + uint16_t length; + + /* MBZ */ + uint8_t reserved2; + + /* 0 : phase + * 1 : reserved1 - MBZ + * 2 : first - Indicates first descriptor in + * transaction + * 3 : last - Indicates last descriptor in transaction + * 4 : comp_req + * 5 : reserved5 - MBO + * 7:6 : reserved6 - MBZ + */ + uint8_t ctrl; + + uint16_t req_id; + + /* MBZ */ + uint16_t reserved6; + + uint32_t buff_addr_lo; + + uint16_t buff_addr_hi; + + /* MBZ */ + uint16_t reserved16_w3; +}; + +/* 4-word format Note: all ethernet parsing information are valid only when + * last=1 + */ +struct ena_eth_io_rx_cdesc_base { + /* 4:0 : l3_proto_idx + * 6:5 : src_vlan_cnt + * 7 : reserved7 - MBZ + * 12:8 : l4_proto_idx + * 13 : l3_csum_err - when set, either the L3 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l3_proto_idx indicates IPv4 packet + * 14 : l4_csum_err - when set, either the L4 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l4_proto_idx indicates TCP/UDP packet, and, + * ipv4_frag is not set. This bit is valid only when + * l4_csum_checked below is set. + * 15 : ipv4_frag - Indicates IPv4 fragmented packet + * 16 : l4_csum_checked - L4 checksum was verified + * (could be OK or error), when cleared the status of + * checksum is unknown + * 23:17 : reserved17 - MBZ + * 24 : phase + * 25 : l3_csum2 - second checksum engine result + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 29:28 : reserved28 + * 30 : buffer - 0: Metadata descriptor. 1: Buffer + * Descriptor was used + * 31 : reserved31 + */ + uint32_t status; + + uint16_t length; + + uint16_t req_id; + + /* 32-bit hash result */ + uint32_t hash; + + uint16_t sub_qid; + + uint8_t offset; + + uint8_t reserved; +}; + +/* 8-word format */ +struct ena_eth_io_rx_cdesc_ext { + struct ena_eth_io_rx_cdesc_base base; + + uint32_t buff_addr_lo; + + uint16_t buff_addr_hi; + + uint16_t reserved16; + + uint32_t reserved_w6; + + uint32_t reserved_w7; +}; + +struct ena_eth_io_intr_reg { + /* 14:0 : rx_intr_delay + * 29:15 : tx_intr_delay + * 30 : intr_unmask + * 31 : reserved + */ + uint32_t intr_control; +}; + +struct ena_eth_io_numa_node_cfg_reg { + /* 7:0 : numa + * 30:8 : reserved + * 31 : enabled + */ + uint32_t numa_cfg; +}; + +/* tx_desc */ +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) + +/* tx_meta_desc */ +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) + +/* tx_cdesc */ +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) + +/* rx_desc */ +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) + +/* rx_cdesc_base */ +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) + +/* intr_reg */ +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) + +/* numa_node_cfg_reg */ +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) + +#if !defined(DEFS_LINUX_MAINLINE) +static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p) +{ + return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK; +} + +static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p) +{ + return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; +} + +static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p) +{ + return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; +} + +static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p) +{ + return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val) +{ + p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p) +{ + return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p) +{ + return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p) +{ + return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val) +{ + p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; +} + +static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p) +{ + return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; +} + +static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val) +{ + p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p) +{ + return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK; +} + +static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val) +{ + p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val) +{ + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val) +{ + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val) +{ + p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p) +{ + return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_checked(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_checked(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val) +{ + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p) +{ + return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; +} + +static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val) +{ + p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p) +{ + return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT; +} + +static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val) +{ + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p) +{ + return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT; +} + +static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val) +{ + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p) +{ + return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; +} + +static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +{ + p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; +} + +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p) +{ + return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT; +} + +static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +{ + p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; +} + +#endif /* !defined(DEFS_LINUX_MAINLINE) */ +#endif /* _ENA_ETH_IO_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h new file mode 100644 index 000000000..f486e9fe6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_gen_info.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#define ENA_GEN_DATE "Wed Sep 25 11:32:57 UTC 2019" +#define ENA_GEN_COMMIT "952697a9e0d3" diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h new file mode 100644 index 000000000..20dba04d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_includes.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#include "ena_common_defs.h" +#include "ena_regs_defs.h" +#include "ena_admin_defs.h" +#include "ena_eth_io_defs.h" diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h new file mode 100644 index 000000000..2d6bf5486 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_defs/ena_regs_defs.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_REGS_H_ +#define _ENA_REGS_H_ + +enum ena_regs_reset_reason_types { + ENA_REGS_RESET_NORMAL = 0, + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, + ENA_REGS_RESET_ADMIN_TO = 2, + ENA_REGS_RESET_MISS_TX_CMPL = 3, + ENA_REGS_RESET_INV_RX_REQ_ID = 4, + ENA_REGS_RESET_INV_TX_REQ_ID = 5, + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, + ENA_REGS_RESET_INIT_ERR = 7, + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, + ENA_REGS_RESET_OS_TRIGGER = 9, + ENA_REGS_RESET_OS_NETDEV_WD = 10, + ENA_REGS_RESET_SHUTDOWN = 11, + ENA_REGS_RESET_USER_TRIGGER = 12, + ENA_REGS_RESET_GENERIC = 13, + ENA_REGS_RESET_MISS_INTERRUPT = 14, + ENA_REGS_RESET_LAST, +}; + +/* ena_registers offsets */ + +/* 0 base */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* version register */ +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 + +/* controller_version register */ +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 + +/* caps register */ +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 + +/* aq_caps register */ +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 + +/* acq_caps register */ +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 + +/* aenq_caps register */ +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 + +/* dev_ctl register */ +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 + +/* dev_sts register */ +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 + +/* mmio_reg_read register */ +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 + +/* rss_ind_entry_update register */ +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 + +#endif /* _ENA_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c new file mode 100644 index 000000000..80d35556c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.c @@ -0,0 +1,618 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#include "ena_eth_com.h" + +static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( + struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 expected_phase, head_masked; + u16 desc_phase; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + + (head_masked * io_cq->cdesc_entry_size_in_bytes)); + + desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; + + if (desc_phase != expected_phase) + return NULL; + + /* Make sure we read the rest of the descriptor after the phase bit + * has been read + */ + dma_rmb(); + + return cdesc; +} + +static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked; + u32 offset; + + tail_masked = io_sq->tail & (io_sq->q_depth - 1); + + offset = tail_masked * io_sq->desc_entry_size; + + return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); +} + +static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, + u8 *bounce_buffer) +{ + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + + u16 dst_tail_mask; + u32 dst_offset; + + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; + + if (is_llq_max_tx_burst_exists(io_sq)) { + if (unlikely(!io_sq->entries_in_tx_burst_left)) { + ena_trc_err("Error: trying to send more packets than tx burst allows\n"); + return ENA_COM_NO_SPACE; + } + + io_sq->entries_in_tx_burst_left--; + ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n", + io_sq->qid, io_sq->entries_in_tx_burst_left); + } + + /* Make sure everything was written into the bounce buffer before + * writing the bounce buffer to the device + */ + wmb(); + + /* The line is completed. Copy it to dev */ + ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset, + bounce_buffer, + llq_info->desc_list_entry_size); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; + + return ENA_COM_OK; +} + +static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, + u8 *header_src, + u16 header_len) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; + u16 header_offset; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return 0; + + header_offset = + llq_info->descs_num_before_header * io_sq->desc_entry_size; + + if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { + ena_trc_err("trying to write header larger than llq entry can accommodate\n"); + return ENA_COM_FAULT; + } + + if (unlikely(!bounce_buffer)) { + ena_trc_err("bounce buffer is NULL\n"); + return ENA_COM_FAULT; + } + + memcpy(bounce_buffer + header_offset, header_src, header_len); + + return 0; +} + +static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + u8 *bounce_buffer; + void *sq_desc; + + bounce_buffer = pkt_ctrl->curr_bounce_buf; + + if (unlikely(!bounce_buffer)) { + ena_trc_err("bounce buffer is NULL\n"); + return NULL; + } + + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; + pkt_ctrl->idx++; + pkt_ctrl->descs_left_in_line--; + + return sq_desc; +} + +static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return ENA_COM_OK; + + /* bounce buffer was used, so write it and get a new one */ + if (pkt_ctrl->idx) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) { + ena_trc_err("failed to write bounce buffer to device\n"); + return rc; + } + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + } + + pkt_ctrl->idx = 0; + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; + return ENA_COM_OK; +} + +static void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return get_sq_desc_llq(io_sq); + + return get_sq_desc_regular_queue(io_sq); +} + +static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (!pkt_ctrl->descs_left_in_line) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) { + ena_trc_err("failed to write bounce buffer to device\n"); + return rc; + } + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + + pkt_ctrl->idx = 0; + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) + pkt_ctrl->descs_left_in_line = 1; + else + pkt_ctrl->descs_left_in_line = + llq_info->desc_list_entry_size / io_sq->desc_entry_size; + } + + return ENA_COM_OK; +} + +static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return ena_com_sq_update_llq_tail(io_sq); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; + + return ENA_COM_OK; +} + +static struct ena_eth_io_rx_cdesc_base * + ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) +{ + idx &= (io_cq->q_depth - 1); + return (struct ena_eth_io_rx_cdesc_base *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + idx * io_cq->cdesc_entry_size_in_bytes); +} + +static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 count = 0, head_masked; + u32 last = 0; + + do { + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (!cdesc) + break; + + ena_com_cq_inc_head(io_cq); + count++; + last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + } while (!last); + + if (last) { + *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; + count += io_cq->cur_rx_pkt_cdesc_count; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + + io_cq->cur_rx_pkt_cdesc_count = 0; + io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; + + ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); + } else { + io_cq->cur_rx_pkt_cdesc_count += count; + count = 0; + } + + return count; +} + +static int ena_com_create_meta(struct ena_com_io_sq *io_sq, + struct ena_com_tx_meta *ena_meta) +{ + struct ena_eth_io_tx_meta_desc *meta_desc = NULL; + + meta_desc = get_sq_desc(io_sq); + memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; + + /* bits 0-9 of the mss */ + meta_desc->word2 |= (ena_meta->mss << + ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; + /* bits 10-13 of the mss */ + meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << + ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; + + /* Extended meta desc */ + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; + meta_desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_META_DESC_PHASE_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + + meta_desc->word2 |= ena_meta->l3_hdr_len & + ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; + meta_desc->word2 |= (ena_meta->l3_hdr_offset << + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; + + meta_desc->word2 |= (ena_meta->l4_hdr_len << + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; + + return ena_com_sq_update_tail(io_sq); +} + +static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + bool *have_meta) +{ + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + /* When disable meta caching is set, don't bother to save the meta and + * compare it to the stored version, just create the meta + */ + if (io_sq->disable_meta_caching) { + if (unlikely(!ena_tx_ctx->meta_valid)) + return ENA_COM_INVAL; + + *have_meta = true; + return ena_com_create_meta(io_sq, ena_meta); + } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { + *have_meta = true; + /* Cache the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + return ena_com_create_meta(io_sq, ena_meta); + } else { + *have_meta = false; + return ENA_COM_OK; + } +} + +static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, + struct ena_eth_io_rx_cdesc_base *cdesc) +{ + ena_rx_ctx->l3_proto = cdesc->status & + ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; + ena_rx_ctx->l4_proto = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; + ena_rx_ctx->l3_csum_err = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_err = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_checked = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); + ena_rx_ctx->hash = cdesc->hash; + ena_rx_ctx->frag = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; + + ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n", + ena_rx_ctx->l3_proto, + ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, + ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, + ena_rx_ctx->frag, + cdesc->status); +} + +/*****************************************************************************/ +/***************************** API **********************************/ +/*****************************************************************************/ + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc) +{ + struct ena_eth_io_tx_desc *desc = NULL; + struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; + void *buffer_to_push = ena_tx_ctx->push_header; + u16 header_len = ena_tx_ctx->header_len; + u16 num_bufs = ena_tx_ctx->num_bufs; + u16 start_tail = io_sq->tail; + int i, rc; + bool have_meta; + u64 addr_hi; + + ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, + "wrong Q type"); + + /* num_bufs +1 for potential meta desc */ + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { + ena_trc_dbg("Not enough space in the tx queue\n"); + return ENA_COM_NO_MEM; + } + + if (unlikely(header_len > io_sq->tx_max_header_size)) { + ena_trc_err("header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); + return ENA_COM_INVAL; + } + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV + && !buffer_to_push)) { + ena_trc_err("push header wasn't provided on LLQ mode\n"); + return ENA_COM_INVAL; + } + + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); + if (unlikely(rc)) + return rc; + + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); + if (unlikely(rc)) { + ena_trc_err("failed to create and store tx meta desc\n"); + return rc; + } + + /* If the caller doesn't want to send packets */ + if (unlikely(!num_bufs && !header_len)) { + rc = ena_com_close_bounce_buffer(io_sq); + if (rc) + ena_trc_err("failed to write buffers to LLQ\n"); + *nb_hw_desc = io_sq->tail - start_tail; + return rc; + } + + desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return ENA_COM_FAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + /* Set first desc when we don't have meta descriptor */ + if (!have_meta) + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; + + desc->buff_addr_hi_hdr_sz |= (header_len << + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; + desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; + + /* Bits 0-9 */ + desc->meta_ctrl |= (ena_tx_ctx->req_id << + ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; + + desc->meta_ctrl |= (ena_tx_ctx->df << + ENA_ETH_IO_TX_DESC_DF_SHIFT) & + ENA_ETH_IO_TX_DESC_DF_MASK; + + /* Bits 10-15 */ + desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << + ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; + + if (ena_tx_ctx->meta_valid) { + desc->meta_ctrl |= (ena_tx_ctx->tso_enable << + ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_TSO_EN_MASK; + desc->meta_ctrl |= ena_tx_ctx->l3_proto & + ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_proto << + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; + } + + for (i = 0; i < num_bufs; i++) { + /* The first desc share the same desc as the header */ + if (likely(i != 0)) { + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) { + ena_trc_err("failed to update sq tail\n"); + return rc; + } + + desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return ENA_COM_FAULT; + + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + } + + desc->len_ctrl |= ena_bufs->len & + ENA_ETH_IO_TX_DESC_LENGTH_MASK; + + addr_hi = ((ena_bufs->paddr & + GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + desc->buff_addr_lo = (u32)ena_bufs->paddr; + desc->buff_addr_hi_hdr_sz |= addr_hi & + ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; + ena_bufs++; + } + + /* set the last desc indicator */ + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; + + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) { + ena_trc_err("failed to update sq tail of the last descriptor\n"); + return rc; + } + + rc = ena_com_close_bounce_buffer(io_sq); + if (rc) + ena_trc_err("failed when closing bounce buffer\n"); + + *nb_hw_desc = io_sq->tail - start_tail; + return rc; +} + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; + struct ena_eth_io_rx_cdesc_base *cdesc = NULL; + u16 cdesc_idx = 0; + u16 nb_hw_desc; + u16 i = 0; + + ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); + + nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + if (nb_hw_desc == 0) { + ena_rx_ctx->descs = nb_hw_desc; + return 0; + } + + ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n", + io_cq->qid, nb_hw_desc); + + if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { + ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n", + nb_hw_desc, ena_rx_ctx->max_bufs); + return ENA_COM_NO_SPACE; + } + + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); + ena_rx_ctx->pkt_offset = cdesc->offset; + + do { + ena_buf->len = cdesc->length; + ena_buf->req_id = cdesc->req_id; + ena_buf++; + } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i))); + + /* Update SQ head ptr */ + io_sq->next_to_comp += nb_hw_desc; + + ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__, + io_sq->qid, io_sq->next_to_comp); + + /* Get rx flags from the last pkt */ + ena_com_rx_set_flags(ena_rx_ctx, cdesc); + + ena_rx_ctx->descs = nb_hw_desc; + return 0; +} + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id) +{ + struct ena_eth_io_rx_desc *desc; + + ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); + + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) + return ENA_COM_NO_SPACE; + + desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return ENA_COM_FAULT; + + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); + + desc->length = ena_buf->len; + + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | + ENA_ETH_IO_RX_DESC_LAST_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + + desc->req_id = req_id; + + desc->buff_addr_lo = (u32)ena_buf->paddr; + desc->buff_addr_hi = + ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + return ena_com_sq_update_tail(io_sq); +} + +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (cdesc) + return false; + else + return true; +} diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h new file mode 100644 index 000000000..e37b642d4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_eth_com.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef ENA_ETH_COM_H_ +#define ENA_ETH_COM_H_ + +#if defined(__cplusplus) +extern "C" { +#endif +#include "ena_com.h" + +/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ +#define ENA_COMP_HEAD_THRESH 4 + +struct ena_com_tx_ctx { + struct ena_com_tx_meta ena_meta; + struct ena_com_buf *ena_bufs; + /* For LLQ, header buffer - pushed to the device mem space */ + void *push_header; + + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + u16 num_bufs; + u16 req_id; + /* For regular queue, indicate the size of the header + * For LLQ, indicate the size of the pushed buffer + */ + u16 header_len; + + u8 meta_valid; + u8 tso_enable; + u8 l3_csum_enable; + u8 l4_csum_enable; + u8 l4_csum_partial; + u8 df; /* Don't fragment */ +}; + +struct ena_com_rx_ctx { + struct ena_com_rx_buf_info *ena_bufs; + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + bool l3_csum_err; + bool l4_csum_err; + u8 l4_csum_checked; + /* fragmented packet */ + bool frag; + u32 hash; + u16 descs; + int max_bufs; + u8 pkt_offset; +}; + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc); + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx); + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id); + +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); + +static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, + struct ena_eth_io_intr_reg *intr_reg) +{ + ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg); +} + +static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) +{ + u16 tail, next_to_comp, cnt; + + next_to_comp = io_sq->next_to_comp; + tail = io_sq->tail; + cnt = tail - next_to_comp; + + return io_sq->q_depth - 1 - cnt; +} + +/* Check if the submission queue has enough space to hold required_buffers */ +static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, + u16 required_buffers) +{ + int temp; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return ena_com_free_q_entries(io_sq) >= required_buffers; + + /* This calculation doesn't need to be 100% accurate. So to reduce + * the calculation overhead just Subtract 2 lines from the free descs + * (one for the header line and one to compensate the devision + * down calculation. + */ + temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; + + return ena_com_free_q_entries(io_sq) > temp; +} + +static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + if (!ena_tx_ctx->meta_valid) + return false; + + return !!memcmp(&io_sq->cached_tx_meta, + &ena_tx_ctx->ena_meta, + sizeof(struct ena_com_tx_meta)); +} + +static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) +{ + return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && + io_sq->llq_info.max_entries_in_tx_burst > 0; +} + +static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_com_llq_info *llq_info; + int descs_after_first_entry; + int num_entries_needed = 1; + u16 num_descs; + + if (!is_llq_max_tx_burst_exists(io_sq)) + return false; + + llq_info = &io_sq->llq_info; + num_descs = ena_tx_ctx->num_bufs; + + if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) + ++num_descs; + + if (num_descs > llq_info->descs_num_before_header) { + descs_after_first_entry = num_descs - llq_info->descs_num_before_header; + num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, + llq_info->descs_per_entry); + } + + ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n", + io_sq->qid, num_descs, num_entries_needed); + + return num_entries_needed > io_sq->entries_in_tx_burst_left; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; + u16 tail = io_sq->tail; + + ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); + + ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); + + if (is_llq_max_tx_burst_exists(io_sq)) { + ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n", + io_sq->qid, max_entries_in_tx_burst); + io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; + } + + return 0; +} + +static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) +{ + u16 unreported_comp, head; + bool need_update; + + if (unlikely(io_cq->cq_head_db_reg)) { + head = io_cq->head; + unreported_comp = head - io_cq->last_head_update; + need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); + + if (unlikely(need_update)) { + ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); + ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); + io_cq->last_head_update = head; + } + } + + return 0; +} + +static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, + u8 numa_node) +{ + struct ena_eth_io_numa_node_cfg_reg numa_cfg; + + if (!io_cq->numa_node_cfg_reg) + return; + + numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) + | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; + + ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); +} + +static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) +{ + io_sq->next_to_comp += elem; +} + +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, + u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return ENA_COM_TRY_AGAIN; + + dma_rmb(); + + *req_id = READ_ONCE16(cdesc->req_id); + if (unlikely(*req_id >= io_cq->q_depth)) { + ena_trc_err("Invalid req id %d\n", cdesc->req_id); + return ENA_COM_INVAL; + } + + ena_com_cq_inc_head(io_cq); + + return 0; +} + +#if defined(__cplusplus) +} +#endif +#endif /* ENA_ETH_COM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h b/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h new file mode 100644 index 000000000..258382308 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_plat.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef ENA_PLAT_H_ +#define ENA_PLAT_H_ + +#if defined(ENA_IPXE) +#include +#elif defined(__linux__) +#if defined(__KERNEL__) +#include +#else +#include +#endif +#elif defined(__FreeBSD__) +#if defined(_KERNEL) +#include +#else +#include +#endif +#elif defined(_WIN32) +#include +#else +#error "Invalid platform" +#endif + +#endif /* ENA_PLAT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h b/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h new file mode 100644 index 000000000..595967e6e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/base/ena_plat_dpdk.h @@ -0,0 +1,315 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef DPDK_ENA_COM_ENA_PLAT_DPDK_H_ +#define DPDK_ENA_COM_ENA_PLAT_DPDK_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +typedef uint64_t dma_addr_t; +#ifndef ETIME +#define ETIME ETIMEDOUT +#endif + +#define ena_atomic32_t rte_atomic32_t +#define ena_mem_handle_t const struct rte_memzone * + +#define SZ_256 (256U) +#define SZ_4K (4096U) + +#define ENA_COM_OK 0 +#define ENA_COM_NO_MEM -ENOMEM +#define ENA_COM_INVAL -EINVAL +#define ENA_COM_NO_SPACE -ENOSPC +#define ENA_COM_NO_DEVICE -ENODEV +#define ENA_COM_TIMER_EXPIRED -ETIME +#define ENA_COM_FAULT -EFAULT +#define ENA_COM_TRY_AGAIN -EAGAIN +#define ENA_COM_UNSUPPORTED -EOPNOTSUPP + +#define ____cacheline_aligned __rte_cache_aligned + +#define ENA_ABORT() abort() + +#define ENA_MSLEEP(x) rte_delay_ms(x) +#define ENA_UDELAY(x) rte_delay_us(x) + +#define ENA_TOUCH(x) ((void)(x)) +#define memcpy_toio memcpy +#define wmb rte_wmb +#define rmb rte_rmb +#define mb rte_mb +#define mmiowb rte_io_wmb +#define __iomem + +#define US_PER_S 1000000 +#define ENA_GET_SYSTEM_USECS() \ + (rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz()) + +extern int ena_logtype_com; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG +#define ENA_ASSERT(cond, format, arg...) \ + do { \ + if (unlikely(!(cond))) { \ + rte_log(RTE_LOGTYPE_ERR, ena_logtype_com, \ + format, ##arg); \ + rte_panic("line %d\tassert \"" #cond "\"" \ + "failed\n", __LINE__); \ + } \ + } while (0) +#else +#define ENA_ASSERT(cond, format, arg...) do {} while (0) +#endif + +#define ENA_MAX32(x, y) RTE_MAX((x), (y)) +#define ENA_MAX16(x, y) RTE_MAX((x), (y)) +#define ENA_MAX8(x, y) RTE_MAX((x), (y)) +#define ENA_MIN32(x, y) RTE_MIN((x), (y)) +#define ENA_MIN16(x, y) RTE_MIN((x), (y)) +#define ENA_MIN8(x, y) RTE_MIN((x), (y)) + +#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8) +#define U64_C(x) x ## ULL +#define BIT(nr) (1UL << (nr)) +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) +#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#ifdef RTE_LIBRTE_ENA_COM_DEBUG +#define ena_trc_log(level, fmt, arg...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_com, \ + "[ENA_COM: %s]" fmt, __func__, ##arg) + +#define ena_trc_dbg(format, arg...) ena_trc_log(DEBUG, format, ##arg) +#define ena_trc_info(format, arg...) ena_trc_log(INFO, format, ##arg) +#define ena_trc_warn(format, arg...) ena_trc_log(WARNING, format, ##arg) +#define ena_trc_err(format, arg...) ena_trc_log(ERR, format, ##arg) +#else +#define ena_trc_dbg(format, arg...) do { } while (0) +#define ena_trc_info(format, arg...) do { } while (0) +#define ena_trc_warn(format, arg...) do { } while (0) +#define ena_trc_err(format, arg...) do { } while (0) +#endif /* RTE_LIBRTE_ENA_COM_DEBUG */ + +#define ENA_WARN(cond, format, arg...) \ +do { \ + if (unlikely(cond)) { \ + ena_trc_err( \ + "Warn failed on %s:%s:%d:" format, \ + __FILE__, __func__, __LINE__, ##arg); \ + } \ +} while (0) + +/* Spinlock related methods */ +#define ena_spinlock_t rte_spinlock_t +#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock) +#define ENA_SPINLOCK_LOCK(spinlock, flags) \ + ({(void)flags; rte_spinlock_lock(&spinlock); }) +#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \ + ({(void)flags; rte_spinlock_unlock(&(spinlock)); }) +#define ENA_SPINLOCK_DESTROY(spinlock) ((void)spinlock) + +#define q_waitqueue_t \ + struct { \ + pthread_cond_t cond; \ + pthread_mutex_t mutex; \ + } + +#define ena_wait_queue_t q_waitqueue_t + +#define ENA_WAIT_EVENT_INIT(waitqueue) \ + do { \ + pthread_mutex_init(&(waitqueue).mutex, NULL); \ + pthread_cond_init(&(waitqueue).cond, NULL); \ + } while (0) + +#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \ + do { \ + struct timespec wait; \ + struct timeval now; \ + unsigned long timeout_us; \ + gettimeofday(&now, NULL); \ + wait.tv_sec = now.tv_sec + timeout / 1000000UL; \ + timeout_us = timeout % 1000000UL; \ + wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \ + pthread_mutex_lock(&waitevent.mutex); \ + pthread_cond_timedwait(&waitevent.cond, \ + &waitevent.mutex, &wait); \ + pthread_mutex_unlock(&waitevent.mutex); \ + } while (0) +#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond) +/* pthread condition doesn't need to be rearmed after usage */ +#define ENA_WAIT_EVENT_CLEAR(...) +#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue)) + +#define ena_wait_event_t ena_wait_queue_t +#define ENA_MIGHT_SLEEP() + +#define ena_time_t uint64_t +#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles()) +#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \ + (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles()) + +/* + * Each rte_memzone should have unique name. + * To satisfy it, count number of allocations and add it to name. + */ +extern rte_atomic32_t ena_alloc_cnt; + +#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \ + do { \ + const struct rte_memzone *mz = NULL; \ + ENA_TOUCH(dmadev); ENA_TOUCH(handle); \ + if (size > 0) { \ + char z_name[RTE_MEMZONE_NAMESIZE]; \ + snprintf(z_name, sizeof(z_name), \ + "ena_alloc_%d", \ + rte_atomic32_add_return(&ena_alloc_cnt, 1)); \ + mz = rte_memzone_reserve(z_name, size, \ + SOCKET_ID_ANY, \ + RTE_MEMZONE_IOVA_CONTIG); \ + handle = mz; \ + } \ + if (mz == NULL) { \ + virt = NULL; \ + phys = 0; \ + } else { \ + memset(mz->addr, 0, size); \ + virt = mz->addr; \ + phys = mz->iova; \ + } \ + } while (0) +#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \ + ({ ENA_TOUCH(size); ENA_TOUCH(phys); \ + ENA_TOUCH(dmadev); \ + rte_memzone_free(handle); }) + +#define ENA_MEM_ALLOC_COHERENT_NODE( \ + dmadev, size, virt, phys, mem_handle, node, dev_node) \ + do { \ + const struct rte_memzone *mz = NULL; \ + ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ + if (size > 0) { \ + char z_name[RTE_MEMZONE_NAMESIZE]; \ + snprintf(z_name, sizeof(z_name), \ + "ena_alloc_%d", \ + rte_atomic32_add_return(&ena_alloc_cnt, 1)); \ + mz = rte_memzone_reserve(z_name, size, node, \ + RTE_MEMZONE_IOVA_CONTIG); \ + mem_handle = mz; \ + } \ + if (mz == NULL) { \ + virt = NULL; \ + phys = 0; \ + } else { \ + memset(mz->addr, 0, size); \ + virt = mz->addr; \ + phys = mz->iova; \ + } \ + } while (0) + +#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \ + do { \ + ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ + virt = rte_zmalloc_socket(NULL, size, 0, node); \ + } while (0) + +#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1) +#define ENA_MEM_FREE(dmadev, ptr, size) \ + ({ ENA_TOUCH(dmadev); ENA_TOUCH(size); rte_free(ptr); }) + +#define ENA_DB_SYNC(mem_handle) ((void)mem_handle) + +#define ENA_REG_WRITE32(bus, value, reg) \ + ({ (void)(bus); rte_write32((value), (reg)); }) +#define ENA_REG_WRITE32_RELAXED(bus, value, reg) \ + ({ (void)(bus); rte_write32_relaxed((value), (reg)); }) +#define ENA_REG_READ32(bus, reg) \ + ({ (void)(bus); rte_read32_relaxed((reg)); }) + +#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr) +#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr) +#define ATOMIC32_SET(i32_ptr, val) rte_atomic32_set(i32_ptr, val) +#define ATOMIC32_READ(i32_ptr) rte_atomic32_read(i32_ptr) + +#define msleep(x) rte_delay_us(x * 1000) +#define udelay(x) rte_delay_us(x) + +#define dma_rmb() rmb() + +#define MAX_ERRNO 4095 +#define IS_ERR(x) (((unsigned long)x) >= (unsigned long)-MAX_ERRNO) +#define ERR_PTR(error) ((void *)(long)error) +#define PTR_ERR(error) ((long)(void *)error) +#define might_sleep() + +#define prefetch(x) rte_prefetch0(x) +#define prefetchw(x) prefetch(x) + +#define lower_32_bits(x) ((uint32_t)(x)) +#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16)) + +#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles()) +#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \ + (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles()) +#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue)) + +#ifndef READ_ONCE +#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var)))) +#endif + +#define READ_ONCE8(var) READ_ONCE(var) +#define READ_ONCE16(var) READ_ONCE(var) +#define READ_ONCE32(var) READ_ONCE(var) + +/* The size must be 8 byte align */ +#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \ + do { \ + int count, i; \ + uint64_t *to = (uint64_t *)(dst); \ + const uint64_t *from = (const uint64_t *)(src); \ + count = (size) / 8; \ + for (i = 0; i < count; i++, from++, to++) \ + rte_write64_relaxed(*from, to); \ + } while(0) + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define ENA_FFS(x) ffs(x) + +void ena_rss_key_fill(void *key, size_t size); + +#define ENA_RSS_FILL_KEY(key, size) ena_rss_key_fill(key, size) + +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT 0 + +#define ENA_PRIu64 PRIu64 + +#include "ena_includes.h" +#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c new file mode 100644 index 000000000..fbddc79f7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.c @@ -0,0 +1,2967 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ena_ethdev.h" +#include "ena_logs.h" +#include "ena_platform.h" +#include "ena_com.h" +#include "ena_eth_com.h" + +#include +#include +#include +#include + +#define DRV_MODULE_VER_MAJOR 2 +#define DRV_MODULE_VER_MINOR 1 +#define DRV_MODULE_VER_SUBMINOR 0 + +#define ENA_IO_TXQ_IDX(q) (2 * (q)) +#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) +/*reverse version of ENA_IO_RXQ_IDX*/ +#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) + +#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) +#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) + +#define GET_L4_HDR_LEN(mbuf) \ + ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ + mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) + +#define ENA_RX_RSS_TABLE_LOG_SIZE 7 +#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) +#define ENA_HASH_KEY_SIZE 40 +#define ETH_GSTRING_LEN 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define ENA_MIN_RING_DESC 128 + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +/* Device arguments */ +#define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" + +/* + * Each rte_memzone should have unique name. + * To satisfy it, count number of allocation and add it to name. + */ +rte_atomic32_t ena_alloc_cnt; + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(dev_start), + ENA_STAT_GLOBAL_ENTRY(dev_stop), + ENA_STAT_GLOBAL_ENTRY(tx_drops), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(available_desc), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refill_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(mbuf_alloc_fail), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(bad_req_id), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) + +#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ + DEV_TX_OFFLOAD_UDP_CKSUM |\ + DEV_TX_OFFLOAD_IPV4_CKSUM |\ + DEV_TX_OFFLOAD_TCP_TSO) +#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ + PKT_TX_IP_CKSUM |\ + PKT_TX_TCP_SEG) + +/** Vendor ID used by Amazon devices */ +#define PCI_VENDOR_ID_AMAZON 0x1D0F +/** Amazon devices */ +#define PCI_DEVICE_ID_ENA_VF 0xEC20 +#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 + +#define ENA_TX_OFFLOAD_MASK (\ + PKT_TX_L4_MASK | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_SEG) + +#define ENA_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + +int ena_logtype_init; +int ena_logtype_driver; + +#ifdef RTE_LIBRTE_ENA_DEBUG_RX +int ena_logtype_rx; +#endif +#ifdef RTE_LIBRTE_ENA_DEBUG_TX +int ena_logtype_tx; +#endif +#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE +int ena_logtype_tx_free; +#endif +#ifdef RTE_LIBRTE_ENA_COM_DEBUG +int ena_logtype_com; +#endif + +static const struct rte_pci_id pci_id_ena_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, + { .device_id = 0 }, +}; + +static struct ena_aenq_handlers aenq_handlers; + +static int ena_device_init(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state); +static int ena_dev_configure(struct rte_eth_dev *dev); +static void ena_tx_map_mbuf(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct rte_mbuf *mbuf, + void **push_header, + uint16_t *header_len); +static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); +static void ena_tx_cleanup(struct ena_ring *tx_ring); +static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); +static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, + struct ena_com_rx_buf_info *ena_bufs, + uint32_t descs, + uint16_t *next_to_clean, + uint8_t offset); +static uint16_t eth_ena_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct rte_mbuf *mbuf, uint16_t id); +static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); +static void ena_init_rings(struct ena_adapter *adapter, + bool disable_meta_caching); +static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int ena_start(struct rte_eth_dev *dev); +static void ena_stop(struct rte_eth_dev *dev); +static void ena_close(struct rte_eth_dev *dev); +static int ena_dev_reset(struct rte_eth_dev *dev); +static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static void ena_rx_queue_release_all(struct rte_eth_dev *dev); +static void ena_tx_queue_release_all(struct rte_eth_dev *dev); +static void ena_rx_queue_release(void *queue); +static void ena_tx_queue_release(void *queue); +static void ena_rx_queue_release_bufs(struct ena_ring *ring); +static void ena_tx_queue_release_bufs(struct ena_ring *ring); +static int ena_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int ena_create_io_queue(struct ena_ring *ring); +static void ena_queue_stop(struct ena_ring *ring); +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); +static int ena_queue_start(struct ena_ring *ring); +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); +static void ena_stats_restart(struct rte_eth_dev *dev); +static int ena_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ena_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ena_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static void ena_interrupt_handler_rte(void *cb_arg); +static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); +static void ena_destroy_device(struct rte_eth_dev *eth_dev); +static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); +static int ena_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); +static int ena_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, + unsigned int n); +static int ena_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n); +static int ena_process_bool_devarg(const char *key, + const char *value, + void *opaque); +static int ena_parse_devargs(struct ena_adapter *adapter, + struct rte_devargs *devargs); + +static const struct eth_dev_ops ena_dev_ops = { + .dev_configure = ena_dev_configure, + .dev_infos_get = ena_infos_get, + .rx_queue_setup = ena_rx_queue_setup, + .tx_queue_setup = ena_tx_queue_setup, + .dev_start = ena_start, + .dev_stop = ena_stop, + .link_update = ena_link_update, + .stats_get = ena_stats_get, + .xstats_get_names = ena_xstats_get_names, + .xstats_get = ena_xstats_get, + .xstats_get_by_id = ena_xstats_get_by_id, + .mtu_set = ena_mtu_set, + .rx_queue_release = ena_rx_queue_release, + .tx_queue_release = ena_tx_queue_release, + .dev_close = ena_close, + .dev_reset = ena_dev_reset, + .reta_update = ena_rss_reta_update, + .reta_query = ena_rss_reta_query, +}; + +void ena_rss_key_fill(void *key, size_t size) +{ + static bool key_generated; + static uint8_t default_key[ENA_HASH_KEY_SIZE]; + size_t i; + + RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); + + if (!key_generated) { + for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) + default_key[i] = rte_rand() & 0xff; + key_generated = true; + } + + rte_memcpy(key, default_key, size); +} + +static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + uint64_t ol_flags = 0; + uint32_t packet_type = 0; + + if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) + packet_type |= RTE_PTYPE_L4_TCP; + else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) + packet_type |= RTE_PTYPE_L4_UDP; + + if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) + packet_type |= RTE_PTYPE_L3_IPV4; + else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) + packet_type |= RTE_PTYPE_L3_IPV6; + + if (!ena_rx_ctx->l4_csum_checked) + ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + else + if (unlikely(ena_rx_ctx->l4_csum_err) && !ena_rx_ctx->frag) + ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + + if (unlikely(ena_rx_ctx->l3_csum_err)) + ol_flags |= PKT_RX_IP_CKSUM_BAD; + + mbuf->ol_flags = ol_flags; + mbuf->packet_type = packet_type; +} + +static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, + struct ena_com_tx_ctx *ena_tx_ctx, + uint64_t queue_offloads, + bool disable_meta_caching) +{ + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + if ((mbuf->ol_flags & MBUF_OFFLOADS) && + (queue_offloads & QUEUE_OFFLOADS)) { + /* check if TSO is required */ + if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && + (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { + ena_tx_ctx->tso_enable = true; + + ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); + } + + /* check if L3 checksum is needed */ + if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) + ena_tx_ctx->l3_csum_enable = true; + + if (mbuf->ol_flags & PKT_TX_IPV6) { + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; + } else { + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; + + /* set don't fragment (DF) flag */ + if (mbuf->packet_type & + (RTE_PTYPE_L4_NONFRAG + | RTE_PTYPE_INNER_L4_NONFRAG)) + ena_tx_ctx->df = true; + } + + /* check if L4 checksum is needed */ + if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; + ena_tx_ctx->l4_csum_enable = true; + } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == + PKT_TX_UDP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; + ena_tx_ctx->l4_csum_enable = true; + } else { + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; + ena_tx_ctx->l4_csum_enable = false; + } + + ena_meta->mss = mbuf->tso_segsz; + ena_meta->l3_hdr_len = mbuf->l3_len; + ena_meta->l3_hdr_offset = mbuf->l2_len; + + ena_tx_ctx->meta_valid = true; + } else if (disable_meta_caching) { + memset(ena_meta, 0, sizeof(*ena_meta)); + ena_tx_ctx->meta_valid = true; + } else { + ena_tx_ctx->meta_valid = false; + } +} + +static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return 0; + + PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id); + + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + rx_ring->adapter->trigger_reset = true; + ++rx_ring->rx_stats.bad_req_id; + + return -EFAULT; +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->mbuf)) + return 0; + } + + if (tx_info) + PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n"); + else + PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id); + + /* Trigger device reset */ + ++tx_ring->tx_stats.bad_req_id; + tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + tx_ring->adapter->trigger_reset = true; + return -EFAULT; +} + +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_DPDK; + host_info->kernel_ver = RTE_VERSION; + strlcpy((char *)host_info->kernel_ver_str, rte_version(), + sizeof(host_info->kernel_ver_str)); + host_info->os_dist = RTE_VERSION; + strlcpy((char *)host_info->os_dist_str, rte_version(), + sizeof(host_info->os_dist_str)); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << + ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + host_info->num_cpus = rte_lcore_count(); + + host_info->driver_supported_features = + ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + if (rc == -ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); + + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +/* This function calculates the number of xstats based on the current config */ +static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) +{ + return ENA_STATS_ARRAY_GLOBAL + + (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + + (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_xstats_calc_num(adapter->rte_dev); + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); + if (rc) { + PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { + if (rc == -ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); + + goto err; + } + + return; +err: + ena_com_delete_debug_area(&adapter->ena_dev); +} + +static void ena_close(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ena_adapter *adapter = dev->data->dev_private; + + if (adapter->state == ENA_ADAPTER_STATE_RUNNING) + ena_stop(dev); + adapter->state = ENA_ADAPTER_STATE_CLOSED; + + ena_rx_queue_release_all(dev); + ena_tx_queue_release_all(dev); + + rte_free(adapter->drv_stats); + adapter->drv_stats = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ena_interrupt_handler_rte, + adapter); + + /* + * MAC is not allocated dynamically. Setting NULL should prevent from + * release of the resource in the rte_eth_dev_release_port(). + */ + dev->data->mac_addrs = NULL; +} + +static int +ena_dev_reset(struct rte_eth_dev *dev) +{ + int rc = 0; + + ena_destroy_device(dev); + rc = eth_ena_dev_init(dev); + if (rc) + PMD_INIT_LOG(CRIT, "Cannot initialize device"); + + return rc; +} + +static int ena_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc, i; + u16 entry_value; + int conf_idx; + int idx; + + if ((reta_size == 0) || (reta_conf == NULL)) + return -EINVAL; + + if (reta_size > ENA_RX_RSS_TABLE_SIZE) { + PMD_DRV_LOG(WARNING, + "indirection table %d is bigger than supported (%d)\n", + reta_size, ENA_RX_RSS_TABLE_SIZE); + return -EINVAL; + } + + for (i = 0 ; i < reta_size ; i++) { + /* each reta_conf is for 64 entries. + * to support 128 we use 2 conf of 64 + */ + conf_idx = i / RTE_RETA_GROUP_SIZE; + idx = i % RTE_RETA_GROUP_SIZE; + if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { + entry_value = + ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { + PMD_DRV_LOG(ERR, + "Cannot fill indirect table\n"); + return rc; + } + } + } + + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { + PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); + return rc; + } + + PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries for port %d\n", + __func__, reta_size, adapter->rte_dev->data->port_id); + + return 0; +} + +/* Query redirection table. */ +static int ena_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc; + int i; + u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; + int reta_conf_idx; + int reta_idx; + + if (reta_size == 0 || reta_conf == NULL || + (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) + return -EINVAL; + + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { + PMD_DRV_LOG(ERR, "cannot get indirect table\n"); + return -ENOTSUP; + } + + for (i = 0 ; i < reta_size ; i++) { + reta_conf_idx = i / RTE_RETA_GROUP_SIZE; + reta_idx = i % RTE_RETA_GROUP_SIZE; + if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) + reta_conf[reta_conf_idx].reta[reta_idx] = + ENA_IO_RXQ_IDX_REV(indirect_table[i]); + } + + return 0; +} + +static int ena_rss_init_default(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = &adapter->ena_dev; + uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; + int rc, i; + u32 val; + + rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); + if (unlikely(rc)) { + PMD_DRV_LOG(ERR, "Cannot init indirect table\n"); + goto err_rss_init; + } + + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + val = i % nb_rx_queues; + rc = ena_com_indirect_table_fill_entry(ena_dev, i, + ENA_IO_RXQ_IDX(val)); + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { + PMD_DRV_LOG(ERR, "Cannot fill indirect table\n"); + goto err_fill_indir; + } + } + + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, + ENA_HASH_KEY_SIZE, 0xFFFFFFFF); + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { + PMD_DRV_LOG(INFO, "Cannot fill hash function\n"); + goto err_fill_indir; + } + + rc = ena_com_set_default_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { + PMD_DRV_LOG(INFO, "Cannot fill hash control\n"); + goto err_fill_indir; + } + + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { + PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); + goto err_fill_indir; + } + PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n", + adapter->rte_dev->data->port_id); + + return 0; + +err_fill_indir: + ena_com_rss_destroy(ena_dev); +err_rss_init: + + return rc; +} + +static void ena_rx_queue_release_all(struct rte_eth_dev *dev) +{ + struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; + int nb_queues = dev->data->nb_rx_queues; + int i; + + for (i = 0; i < nb_queues; i++) + ena_rx_queue_release(queues[i]); +} + +static void ena_tx_queue_release_all(struct rte_eth_dev *dev) +{ + struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; + int nb_queues = dev->data->nb_tx_queues; + int i; + + for (i = 0; i < nb_queues; i++) + ena_tx_queue_release(queues[i]); +} + +static void ena_rx_queue_release(void *queue) +{ + struct ena_ring *ring = (struct ena_ring *)queue; + + /* Free ring resources */ + if (ring->rx_buffer_info) + rte_free(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + + if (ring->rx_refill_buffer) + rte_free(ring->rx_refill_buffer); + ring->rx_refill_buffer = NULL; + + if (ring->empty_rx_reqs) + rte_free(ring->empty_rx_reqs); + ring->empty_rx_reqs = NULL; + + ring->configured = 0; + + PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n", + ring->port_id, ring->id); +} + +static void ena_tx_queue_release(void *queue) +{ + struct ena_ring *ring = (struct ena_ring *)queue; + + /* Free ring resources */ + if (ring->push_buf_intermediate_buf) + rte_free(ring->push_buf_intermediate_buf); + + if (ring->tx_buffer_info) + rte_free(ring->tx_buffer_info); + + if (ring->empty_tx_reqs) + rte_free(ring->empty_tx_reqs); + + ring->empty_tx_reqs = NULL; + ring->tx_buffer_info = NULL; + ring->push_buf_intermediate_buf = NULL; + + ring->configured = 0; + + PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n", + ring->port_id, ring->id); +} + +static void ena_rx_queue_release_bufs(struct ena_ring *ring) +{ + unsigned int i; + + for (i = 0; i < ring->ring_size; ++i) { + struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; + if (rx_info->mbuf) { + rte_mbuf_raw_free(rx_info->mbuf); + rx_info->mbuf = NULL; + } + } +} + +static void ena_tx_queue_release_bufs(struct ena_ring *ring) +{ + unsigned int i; + + for (i = 0; i < ring->ring_size; ++i) { + struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; + + if (tx_buf->mbuf) + rte_pktmbuf_free(tx_buf->mbuf); + } +} + +static int ena_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct rte_eth_link *link = &dev->data->dev_link; + struct ena_adapter *adapter = dev->data->dev_private; + + link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + link->link_speed = ETH_SPEED_NUM_NONE; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + + return 0; +} + +static int ena_queue_start_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_ring *queues = NULL; + int nb_queues; + int i = 0; + int rc = 0; + + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; + } + for (i = 0; i < nb_queues; i++) { + if (queues[i].configured) { + if (ring_type == ENA_RING_TYPE_RX) { + ena_assert_msg( + dev->data->rx_queues[i] == &queues[i], + "Inconsistent state of rx queues\n"); + } else { + ena_assert_msg( + dev->data->tx_queues[i] == &queues[i], + "Inconsistent state of tx queues\n"); + } + + rc = ena_queue_start(&queues[i]); + + if (rc) { + PMD_INIT_LOG(ERR, + "failed to start queue %d type(%d)", + i, ring_type); + goto err; + } + } + } + + return 0; + +err: + while (i--) + if (queues[i].configured) + ena_queue_stop(&queues[i]); + + return rc; +} + +static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) +{ + uint32_t max_frame_len = adapter->max_mtu; + + if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) + max_frame_len = + adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; + + return max_frame_len; +} + +static int ena_check_valid_conf(struct ena_adapter *adapter) +{ + uint32_t max_frame_len = ena_get_mtu_conf(adapter); + + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; + } + + return 0; +} + +static int +ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, + bool use_large_llq_hdr) +{ + struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; + struct ena_com_dev *ena_dev = ctx->ena_dev; + uint32_t max_tx_queue_size; + uint32_t max_rx_queue_size; + + if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { + struct ena_admin_queue_ext_feature_fields *max_queue_ext = + &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; + max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, + max_queue_ext->max_rx_sq_depth); + max_tx_queue_size = max_queue_ext->max_tx_cq_depth; + + if (ena_dev->tx_mem_queue_type == + ENA_ADMIN_PLACEMENT_POLICY_DEV) { + max_tx_queue_size = RTE_MIN(max_tx_queue_size, + llq->max_llq_depth); + } else { + max_tx_queue_size = RTE_MIN(max_tx_queue_size, + max_queue_ext->max_tx_sq_depth); + } + + ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_rx_descs); + ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_tx_descs); + } else { + struct ena_admin_queue_feature_desc *max_queues = + &ctx->get_feat_ctx->max_queues; + max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, + max_queues->max_sq_depth); + max_tx_queue_size = max_queues->max_cq_depth; + + if (ena_dev->tx_mem_queue_type == + ENA_ADMIN_PLACEMENT_POLICY_DEV) { + max_tx_queue_size = RTE_MIN(max_tx_queue_size, + llq->max_llq_depth); + } else { + max_tx_queue_size = RTE_MIN(max_tx_queue_size, + max_queues->max_sq_depth); + } + + ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + max_queues->max_packet_rx_descs); + ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + max_queues->max_packet_tx_descs); + } + + /* Round down to the nearest power of 2 */ + max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); + max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); + + if (use_large_llq_hdr) { + if ((llq->entry_size_ctrl_supported & + ENA_ADMIN_LIST_ENTRY_SIZE_256B) && + (ena_dev->tx_mem_queue_type == + ENA_ADMIN_PLACEMENT_POLICY_DEV)) { + max_tx_queue_size /= 2; + PMD_INIT_LOG(INFO, + "Forcing large headers and decreasing maximum TX queue size to %d\n", + max_tx_queue_size); + } else { + PMD_INIT_LOG(ERR, + "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); + } + } + + if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { + PMD_INIT_LOG(ERR, "Invalid queue size"); + return -EFAULT; + } + + ctx->max_tx_queue_size = max_tx_queue_size; + ctx->max_rx_queue_size = max_rx_queue_size; + + return 0; +} + +static void ena_stats_restart(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = dev->data->dev_private; + + rte_atomic64_init(&adapter->drv_stats->ierrors); + rte_atomic64_init(&adapter->drv_stats->oerrors); + rte_atomic64_init(&adapter->drv_stats->rx_nombuf); + adapter->drv_stats->rx_drops = 0; +} + +static int ena_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct ena_admin_basic_stats ena_stats; + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc; + int i; + int max_rings_stats; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -ENOTSUP; + + memset(&ena_stats, 0, sizeof(ena_stats)); + rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); + if (unlikely(rc)) { + PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); + return rc; + } + + /* Set of basic statistics from ENA */ + stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, + ena_stats.rx_pkts_low); + stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, + ena_stats.tx_pkts_low); + stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, + ena_stats.rx_bytes_low); + stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, + ena_stats.tx_bytes_low); + + /* Driver related stats */ + stats->imissed = adapter->drv_stats->rx_drops; + stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); + stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); + stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); + + max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; + + stats->q_ibytes[i] = rx_stats->bytes; + stats->q_ipackets[i] = rx_stats->cnt; + stats->q_errors[i] = rx_stats->bad_desc_num + + rx_stats->bad_req_id; + } + + max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; + + stats->q_obytes[i] = tx_stats->bytes; + stats->q_opackets[i] = tx_stats->cnt; + } + + return 0; +} + +static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + int rc = 0; + + ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); + adapter = dev->data->dev_private; + + ena_dev = &adapter->ena_dev; + ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); + + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { + PMD_DRV_LOG(ERR, + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; + } + + rc = ena_com_set_dev_mtu(ena_dev, mtu); + if (rc) + PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); + else + PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu); + + return rc; +} + +static int ena_start(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = dev->data->dev_private; + uint64_t ticks; + int rc = 0; + + rc = ena_check_valid_conf(adapter); + if (rc) + return rc; + + rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); + if (rc) + return rc; + + rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); + if (rc) + goto err_start_tx; + + if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { + rc = ena_rss_init_default(adapter); + if (rc) + goto err_rss_init; + } + + ena_stats_restart(dev); + + adapter->timestamp_wd = rte_get_timer_cycles(); + adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; + + ticks = rte_get_timer_hz(); + rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), + ena_timer_wd_callback, adapter); + + ++adapter->dev_stats.dev_start; + adapter->state = ENA_ADAPTER_STATE_RUNNING; + + return 0; + +err_rss_init: + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); +err_start_tx: + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); + return rc; +} + +static void ena_stop(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc; + + rte_timer_stop_sync(&adapter->timer_wd); + ena_queue_stop_all(dev, ENA_RING_TYPE_TX); + ena_queue_stop_all(dev, ENA_RING_TYPE_RX); + + if (adapter->trigger_reset) { + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc); + } + + ++adapter->dev_stats.dev_stop; + adapter->state = ENA_ADAPTER_STATE_STOPPED; +} + +static int ena_create_io_queue(struct ena_ring *ring) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + 0, 0, 0, 0, 0 }; + uint16_t ena_qid; + unsigned int i; + int rc; + + adapter = ring->adapter; + ena_dev = &adapter->ena_dev; + + if (ring->type == ENA_RING_TYPE_TX) { + ena_qid = ENA_IO_TXQ_IDX(ring->id); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + for (i = 0; i < ring->ring_size; i++) + ring->empty_tx_reqs[i] = i; + } else { + ena_qid = ENA_IO_RXQ_IDX(ring->id); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + for (i = 0; i < ring->ring_size; i++) + ring->empty_rx_reqs[i] = i; + } + ctx.queue_size = ring->ring_size; + ctx.qid = ena_qid; + ctx.msix_vector = -1; /* interrupts not used */ + ctx.numa_node = ring->numa_socket_id; + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + PMD_DRV_LOG(ERR, + "failed to create io queue #%d (qid:%d) rc: %d\n", + ring->id, ena_qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &ring->ena_com_io_sq, + &ring->ena_com_io_cq); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to get io queue handlers. queue num %d rc: %d\n", + ring->id, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; + } + + if (ring->type == ENA_RING_TYPE_TX) + ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); + + return 0; +} + +static void ena_queue_stop(struct ena_ring *ring) +{ + struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; + + if (ring->type == ENA_RING_TYPE_RX) { + ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); + ena_rx_queue_release_bufs(ring); + } else { + ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); + ena_tx_queue_release_bufs(ring); + } +} + +static void ena_queue_stop_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_ring *queues = NULL; + uint16_t nb_queues, i; + + if (ring_type == ENA_RING_TYPE_RX) { + queues = adapter->rx_ring; + nb_queues = dev->data->nb_rx_queues; + } else { + queues = adapter->tx_ring; + nb_queues = dev->data->nb_tx_queues; + } + + for (i = 0; i < nb_queues; ++i) + if (queues[i].configured) + ena_queue_stop(&queues[i]); +} + +static int ena_queue_start(struct ena_ring *ring) +{ + int rc, bufs_num; + + ena_assert_msg(ring->configured == 1, + "Trying to start unconfigured queue\n"); + + rc = ena_create_io_queue(ring); + if (rc) { + PMD_INIT_LOG(ERR, "Failed to create IO queue!"); + return rc; + } + + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->type == ENA_RING_TYPE_TX) { + ring->tx_stats.available_desc = + ena_com_free_q_entries(ring->ena_com_io_sq); + return 0; + } + + bufs_num = ring->ring_size - 1; + rc = ena_populate_rx_queue(ring, bufs_num); + if (rc != bufs_num) { + ena_com_destroy_io_queue(&ring->adapter->ena_dev, + ENA_IO_RXQ_IDX(ring->id)); + PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); + return ENA_COM_FAULT; + } + + return 0; +} + +static int ena_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct ena_ring *txq = NULL; + struct ena_adapter *adapter = dev->data->dev_private; + unsigned int i; + + txq = &adapter->tx_ring[queue_idx]; + + if (txq->configured) { + PMD_DRV_LOG(CRIT, + "API violation. Queue %d is already configured\n", + queue_idx); + return ENA_COM_FAULT; + } + + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, + "Unsupported size of TX queue: %d is not a power of 2.\n", + nb_desc); + return -EINVAL; + } + + if (nb_desc > adapter->max_tx_ring_size) { + PMD_DRV_LOG(ERR, + "Unsupported size of TX queue (max size: %d)\n", + adapter->max_tx_ring_size); + return -EINVAL; + } + + if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) + nb_desc = adapter->max_tx_ring_size; + + txq->port_id = dev->data->port_id; + txq->next_to_clean = 0; + txq->next_to_use = 0; + txq->ring_size = nb_desc; + txq->size_mask = nb_desc - 1; + txq->numa_socket_id = socket_id; + + txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", + sizeof(struct ena_tx_buffer) * + txq->ring_size, + RTE_CACHE_LINE_SIZE); + if (!txq->tx_buffer_info) { + PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n"); + return -ENOMEM; + } + + txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", + sizeof(u16) * txq->ring_size, + RTE_CACHE_LINE_SIZE); + if (!txq->empty_tx_reqs) { + PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n"); + rte_free(txq->tx_buffer_info); + return -ENOMEM; + } + + txq->push_buf_intermediate_buf = + rte_zmalloc("txq->push_buf_intermediate_buf", + txq->tx_max_header_size, + RTE_CACHE_LINE_SIZE); + if (!txq->push_buf_intermediate_buf) { + PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n"); + rte_free(txq->tx_buffer_info); + rte_free(txq->empty_tx_reqs); + return -ENOMEM; + } + + for (i = 0; i < txq->ring_size; i++) + txq->empty_tx_reqs[i] = i; + + if (tx_conf != NULL) { + txq->offloads = + tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + } + /* Store pointer to this queue in upper layer */ + txq->configured = 1; + dev->data->tx_queues[queue_idx] = txq; + + return 0; +} + +static int ena_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_ring *rxq = NULL; + size_t buffer_size; + int i; + + rxq = &adapter->rx_ring[queue_idx]; + if (rxq->configured) { + PMD_DRV_LOG(CRIT, + "API violation. Queue %d is already configured\n", + queue_idx); + return ENA_COM_FAULT; + } + + if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) + nb_desc = adapter->max_rx_ring_size; + + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, + "Unsupported size of RX queue: %d is not a power of 2.\n", + nb_desc); + return -EINVAL; + } + + if (nb_desc > adapter->max_rx_ring_size) { + PMD_DRV_LOG(ERR, + "Unsupported size of RX queue (max size: %d)\n", + adapter->max_rx_ring_size); + return -EINVAL; + } + + /* ENA isn't supporting buffers smaller than 1400 bytes */ + buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + if (buffer_size < ENA_RX_BUF_MIN_SIZE) { + PMD_DRV_LOG(ERR, + "Unsupported size of RX buffer: %zu (min size: %d)\n", + buffer_size, ENA_RX_BUF_MIN_SIZE); + return -EINVAL; + } + + rxq->port_id = dev->data->port_id; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + rxq->ring_size = nb_desc; + rxq->size_mask = nb_desc - 1; + rxq->numa_socket_id = socket_id; + rxq->mb_pool = mp; + + rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", + sizeof(struct ena_rx_buffer) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->rx_buffer_info) { + PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n"); + return -ENOMEM; + } + + rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE); + + if (!rxq->rx_refill_buffer) { + PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + return -ENOMEM; + } + + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->empty_rx_reqs) { + PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + rte_free(rxq->rx_refill_buffer); + rxq->rx_refill_buffer = NULL; + return -ENOMEM; + } + + for (i = 0; i < nb_desc; i++) + rxq->empty_rx_reqs[i] = i; + + /* Store pointer to this queue in upper layer */ + rxq->configured = 1; + dev->data->rx_queues[queue_idx] = rxq; + + return 0; +} + +static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct rte_mbuf *mbuf, uint16_t id) +{ + struct ena_com_buf ebuf; + int rc; + + /* prepare physical address for DMA transaction */ + ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; + ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; + + /* pass resource to device */ + rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); + if (unlikely(rc != 0)) + PMD_DRV_LOG(WARNING, "failed adding rx desc\n"); + + return rc; +} + +static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) +{ + unsigned int i; + int rc; + uint16_t next_to_use = rxq->next_to_use; + uint16_t in_use, req_id; + struct rte_mbuf **mbufs = rxq->rx_refill_buffer; + + if (unlikely(!count)) + return 0; + + in_use = rxq->ring_size - 1 - + ena_com_free_q_entries(rxq->ena_com_io_sq); + ena_assert_msg(((in_use + count) < rxq->ring_size), + "bad ring state\n"); + + /* get resources for incoming packets */ + rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); + if (unlikely(rc < 0)) { + rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); + ++rxq->rx_stats.mbuf_alloc_fail; + PMD_RX_LOG(DEBUG, "there are no enough free buffers"); + return 0; + } + + for (i = 0; i < count; i++) { + struct rte_mbuf *mbuf = mbufs[i]; + struct ena_rx_buffer *rx_info; + + if (likely((i + 4) < count)) + rte_prefetch0(mbufs[i + 4]); + + req_id = rxq->empty_rx_reqs[next_to_use]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc)) + break; + + rx_info = &rxq->rx_buffer_info[req_id]; + + rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); + if (unlikely(rc != 0)) + break; + + rx_info->mbuf = mbuf; + next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); + } + + if (unlikely(i < count)) { + PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), + count - i); + ++rxq->rx_stats.refill_partial; + } + + /* When we submitted free recources to device... */ + if (likely(i > 0)) { + /* ...let HW know that it can fill buffers with data. */ + ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + + rxq->next_to_use = next_to_use; + } + + return i; +} + +static int ena_device_init(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) +{ + uint32_t aenq_groups; + int rc; + bool readless_supported; + + /* Initialize mmio registers */ + rc = ena_com_mmio_reg_read_request_init(ena_dev); + if (rc) { + PMD_DRV_LOG(ERR, "failed to init mmio read less\n"); + return rc; + } + + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled. + */ + readless_supported = + !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id + & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + + /* reset device */ + rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); + if (rc) { + PMD_DRV_LOG(ERR, "cannot reset device\n"); + goto err_mmio_read_less; + } + + /* check FW version */ + rc = ena_com_validate_version(ena_dev); + if (rc) { + PMD_DRV_LOG(ERR, "device version is too low\n"); + goto err_mmio_read_less; + } + + ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); + + /* ENA device administration layer init */ + rc = ena_com_admin_init(ena_dev, &aenq_handlers); + if (rc) { + PMD_DRV_LOG(ERR, + "cannot initialize ena admin queue with device\n"); + goto err_mmio_read_less; + } + + /* To enable the msix interrupts the driver needs to know the number + * of queues. So the driver uses polling mode to retrieve this + * information. + */ + ena_com_set_admin_polling_mode(ena_dev, true); + + ena_config_host_info(ena_dev); + + /* Get Device Attributes and features */ + rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); + if (rc) { + PMD_DRV_LOG(ERR, + "cannot get attribute for ena device rc= %d\n", rc); + goto err_admin_init; + } + + aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | + BIT(ENA_ADMIN_NOTIFICATION) | + BIT(ENA_ADMIN_KEEP_ALIVE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING); + + aenq_groups &= get_feat_ctx->aenq.supported_groups; + rc = ena_com_set_aenq_config(ena_dev, aenq_groups); + if (rc) { + PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc); + goto err_admin_init; + } + + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + + return 0; + +err_admin_init: + ena_com_admin_destroy(ena_dev); + +err_mmio_read_less: + ena_com_mmio_reg_read_request_destroy(ena_dev); + + return rc; +} + +static void ena_interrupt_handler_rte(void *cb_arg) +{ + struct ena_adapter *adapter = cb_arg; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + ena_com_admin_q_comp_intr_handler(ena_dev); + if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) + ena_com_aenq_intr_handler(ena_dev, adapter); +} + +static void check_for_missing_keep_alive(struct ena_adapter *adapter) +{ + if (!adapter->wd_state) + return; + + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) + return; + + if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= + adapter->keep_alive_timeout)) { + PMD_DRV_LOG(ERR, "Keep alive timeout\n"); + adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; + adapter->trigger_reset = true; + ++adapter->dev_stats.wd_expired; + } +} + +/* Check if admin queue is enabled */ +static void check_for_admin_com_state(struct ena_adapter *adapter) +{ + if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { + PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n"); + adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; + adapter->trigger_reset = true; + } +} + +static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, + void *arg) +{ + struct ena_adapter *adapter = arg; + struct rte_eth_dev *dev = adapter->rte_dev; + + check_for_missing_keep_alive(adapter); + check_for_admin_com_state(adapter); + + if (unlikely(adapter->trigger_reset)) { + PMD_DRV_LOG(ERR, "Trigger reset is on\n"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static inline void +set_default_llq_configurations(struct ena_llq_configurations *llq_config, + struct ena_admin_feature_llq_desc *llq, + bool use_large_llq_hdr) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + + if (use_large_llq_hdr && + (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { + llq_config->llq_ring_entry_size = + ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_config->llq_ring_entry_size_value = 256; + } else { + llq_config->llq_ring_entry_size = + ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_ring_entry_size_value = 128; + } +} + +static int +ena_set_queues_placement_policy(struct ena_adapter *adapter, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) +{ + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + PMD_DRV_LOG(INFO, + "LLQ is not supported. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + PMD_INIT_LOG(WARNING, "Failed to config dev mode. " + "Fallback to host mode policy."); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + /* Nothing to config, exit */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (!adapter->dev_mem_base) { + PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. " + "Fallback to host mode policy.\n."); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + ena_dev->mem_bar = adapter->dev_mem_base; + + return 0; +} + +static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; + + /* Regular queues capabilities */ + if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { + struct ena_admin_queue_ext_feature_fields *max_queue_ext = + &get_feat_ctx->max_queue_ext.max_queue_ext; + io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, + max_queue_ext->max_rx_cq_num); + io_tx_sq_num = max_queue_ext->max_tx_sq_num; + io_tx_cq_num = max_queue_ext->max_tx_cq_num; + } else { + struct ena_admin_queue_feature_desc *max_queues = + &get_feat_ctx->max_queues; + io_tx_sq_num = max_queues->max_sq_num; + io_tx_cq_num = max_queues->max_cq_num; + io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); + } + + /* In case of LLQ use the llq number in the get feature cmd */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + io_tx_sq_num = get_feat_ctx->llq.max_llq_num; + + max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); + max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); + max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); + + if (unlikely(max_num_io_queues == 0)) { + PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n"); + return -EFAULT; + } + + return max_num_io_queues; +} + +static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) +{ + struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct ena_adapter *adapter = eth_dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_llq_configurations llq_config; + const char *queue_type_str; + uint32_t max_num_io_queues; + int rc; + static int adapters_found; + bool disable_meta_caching; + bool wd_state = false; + + eth_dev->dev_ops = &ena_dev_ops; + eth_dev->rx_pkt_burst = ð_ena_recv_pkts; + eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + memset(adapter, 0, sizeof(struct ena_adapter)); + ena_dev = &adapter->ena_dev; + + adapter->rte_eth_dev_data = eth_dev->data; + adapter->rte_dev = eth_dev; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + adapter->pdev = pci_dev; + + PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + intr_handle = &pci_dev->intr_handle; + + adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; + adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; + + if (!adapter->regs) { + PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", + ENA_REGS_BAR); + return -ENXIO; + } + + ena_dev->reg_bar = adapter->regs; + ena_dev->dmadev = adapter->pdev; + + adapter->id_number = adapters_found; + + snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", + adapter->id_number); + + rc = ena_parse_devargs(adapter, pci_dev->device.devargs); + if (rc != 0) { + PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); + goto err; + } + + /* device specific initialization routine */ + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); + if (rc) { + PMD_INIT_LOG(CRIT, "Failed to init ENA device"); + goto err; + } + adapter->wd_state = wd_state; + + set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, + adapter->use_large_llq_hdr); + rc = ena_set_queues_placement_policy(adapter, ena_dev, + &get_feat_ctx.llq, &llq_config); + if (unlikely(rc)) { + PMD_INIT_LOG(CRIT, "Failed to set placement policy"); + return rc; + } + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + queue_type_str = "Regular"; + else + queue_type_str = "Low latency"; + PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); + + calc_queue_ctx.ena_dev = ena_dev; + calc_queue_ctx.get_feat_ctx = &get_feat_ctx; + + max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); + rc = ena_calc_io_queue_size(&calc_queue_ctx, + adapter->use_large_llq_hdr); + if (unlikely((rc != 0) || (max_num_io_queues == 0))) { + rc = -EFAULT; + goto err_device_destroy; + } + + adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; + adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; + adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; + adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; + adapter->max_num_io_queues = max_num_io_queues; + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + disable_meta_caching = + !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + } else { + disable_meta_caching = false; + } + + /* prepare ring structures */ + ena_init_rings(adapter, disable_meta_caching); + + ena_config_debug_area(adapter); + + /* Set max MTU for this device */ + adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; + + /* set device support for offloads */ + adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; + adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; + adapter->offloads.rx_csum_supported = + (get_feat_ctx.offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; + + /* Copy MAC address and point DPDK to it */ + eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; + rte_ether_addr_copy((struct rte_ether_addr *) + get_feat_ctx.dev_attr.mac_addr, + (struct rte_ether_addr *)adapter->mac_addr); + + /* + * Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + adapter->drv_stats = rte_zmalloc("adapter stats", + sizeof(*adapter->drv_stats), + RTE_CACHE_LINE_SIZE); + if (!adapter->drv_stats) { + PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n"); + rc = -ENOMEM; + goto err_delete_debug_area; + } + + rte_intr_callback_register(intr_handle, + ena_interrupt_handler_rte, + adapter); + rte_intr_enable(intr_handle); + ena_com_set_admin_polling_mode(ena_dev, false); + ena_com_admin_aenq_enable(ena_dev); + + if (adapters_found == 0) + rte_timer_subsystem_init(); + rte_timer_init(&adapter->timer_wd); + + adapters_found++; + adapter->state = ENA_ADAPTER_STATE_INIT; + + return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; +} + +static void ena_destroy_device(struct rte_eth_dev *eth_dev) +{ + struct ena_adapter *adapter = eth_dev->data->dev_private; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + if (adapter->state == ENA_ADAPTER_STATE_FREE) + return; + + ena_com_set_admin_running_state(ena_dev, false); + + if (adapter->state != ENA_ADAPTER_STATE_CLOSED) + ena_close(eth_dev); + + ena_com_delete_debug_area(ena_dev); + ena_com_delete_host_info(ena_dev); + + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); + ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + + adapter->state = ENA_ADAPTER_STATE_FREE; +} + +static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + ena_destroy_device(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + + return 0; +} + +static int ena_dev_configure(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = dev->data->dev_private; + + adapter->state = ENA_ADAPTER_STATE_CONFIG; + + adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; + adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; + return 0; +} + +static void ena_init_rings(struct ena_adapter *adapter, + bool disable_meta_caching) +{ + size_t i; + + for (i = 0; i < adapter->max_num_io_queues; i++) { + struct ena_ring *ring = &adapter->tx_ring[i]; + + ring->configured = 0; + ring->type = ENA_RING_TYPE_TX; + ring->adapter = adapter; + ring->id = i; + ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; + ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + ring->sgl_size = adapter->max_tx_sgl_size; + ring->disable_meta_caching = disable_meta_caching; + } + + for (i = 0; i < adapter->max_num_io_queues; i++) { + struct ena_ring *ring = &adapter->rx_ring[i]; + + ring->configured = 0; + ring->type = ENA_RING_TYPE_RX; + ring->adapter = adapter; + ring->id = i; + ring->sgl_size = adapter->max_rx_sgl_size; + } +} + +static int ena_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + uint64_t rx_feat = 0, tx_feat = 0; + + ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); + adapter = dev->data->dev_private; + + ena_dev = &adapter->ena_dev; + ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); + + dev_info->speed_capa = + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + /* Set Tx & Rx features available for device */ + if (adapter->offloads.tso4_supported) + tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; + + if (adapter->offloads.tx_csum_supported) + tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (adapter->offloads.rx_csum_supported) + rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* Inform framework about available features */ + dev_info->rx_offload_capa = rx_feat; + dev_info->rx_queue_offload_capa = rx_feat; + dev_info->tx_offload_capa = tx_feat; + dev_info->tx_queue_offload_capa = tx_feat; + + dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | + ETH_RSS_UDP; + + dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; + dev_info->max_rx_pktlen = adapter->max_mtu; + dev_info->max_mac_addrs = 1; + + dev_info->max_rx_queues = adapter->max_num_io_queues; + dev_info->max_tx_queues = adapter->max_num_io_queues; + dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; + + adapter->tx_supported_offloads = tx_feat; + adapter->rx_supported_offloads = rx_feat; + + dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; + dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + adapter->max_rx_sgl_size); + dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + adapter->max_rx_sgl_size); + + dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; + dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + adapter->max_tx_sgl_size); + dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + adapter->max_tx_sgl_size); + + return 0; +} + +static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) +{ + mbuf->data_len = len; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->refcnt = 1; + mbuf->next = NULL; +} + +static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, + struct ena_com_rx_buf_info *ena_bufs, + uint32_t descs, + uint16_t *next_to_clean, + uint8_t offset) +{ + struct rte_mbuf *mbuf; + struct rte_mbuf *mbuf_head; + struct ena_rx_buffer *rx_info; + int rc; + uint16_t ntc, len, req_id, buf = 0; + + if (unlikely(descs == 0)) + return NULL; + + ntc = *next_to_clean; + + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; + if (unlikely(validate_rx_req_id(rx_ring, req_id))) + return NULL; + + rx_info = &rx_ring->rx_buffer_info[req_id]; + + mbuf = rx_info->mbuf; + RTE_ASSERT(mbuf != NULL); + + ena_init_rx_mbuf(mbuf, len); + + /* Fill the mbuf head with the data specific for 1st segment. */ + mbuf_head = mbuf; + mbuf_head->nb_segs = descs; + mbuf_head->port = rx_ring->port_id; + mbuf_head->pkt_len = len; + mbuf_head->data_off += offset; + + rx_info->mbuf = NULL; + rx_ring->empty_rx_reqs[ntc] = req_id; + ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); + + while (--descs) { + ++buf; + len = ena_bufs[buf].len; + req_id = ena_bufs[buf].req_id; + if (unlikely(validate_rx_req_id(rx_ring, req_id))) { + rte_mbuf_raw_free(mbuf_head); + return NULL; + } + + rx_info = &rx_ring->rx_buffer_info[req_id]; + RTE_ASSERT(rx_info->mbuf != NULL); + + if (unlikely(len == 0)) { + /* + * Some devices can pass descriptor with the length 0. + * To avoid confusion, the PMD is simply putting the + * descriptor back, as it was never used. We'll avoid + * mbuf allocation that way. + */ + rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, + rx_info->mbuf, req_id); + if (unlikely(rc != 0)) { + /* Free the mbuf in case of an error. */ + rte_mbuf_raw_free(rx_info->mbuf); + } else { + /* + * If there was no error, just exit the loop as + * 0 length descriptor is always the last one. + */ + break; + } + } else { + /* Create an mbuf chain. */ + mbuf->next = rx_info->mbuf; + mbuf = mbuf->next; + + ena_init_rx_mbuf(mbuf, len); + mbuf_head->pkt_len += len; + } + + /* + * Mark the descriptor as depleted and perform necessary + * cleanup. + * This code will execute in two cases: + * 1. Descriptor len was greater than 0 - normal situation. + * 2. Descriptor len was 0 and we failed to add the descriptor + * to the device. In that situation, we should try to add + * the mbuf again in the populate routine and mark the + * descriptor as used up by the device. + */ + rx_info->mbuf = NULL; + rx_ring->empty_rx_reqs[ntc] = req_id; + ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); + } + + *next_to_clean = ntc; + + return mbuf_head; +} + +static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); + unsigned int free_queue_entries; + unsigned int refill_threshold; + uint16_t next_to_clean = rx_ring->next_to_clean; + uint16_t descs_in_use; + struct rte_mbuf *mbuf; + uint16_t completed; + struct ena_com_rx_ctx ena_rx_ctx; + int i, rc = 0; + + /* Check adapter state */ + if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { + PMD_DRV_LOG(ALERT, + "Trying to receive pkts while device is NOT running\n"); + return 0; + } + + descs_in_use = rx_ring->ring_size - + ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; + nb_pkts = RTE_MIN(descs_in_use, nb_pkts); + + for (completed = 0; completed < nb_pkts; completed++) { + ena_rx_ctx.max_bufs = rx_ring->sgl_size; + ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; + ena_rx_ctx.descs = 0; + ena_rx_ctx.pkt_offset = 0; + /* receive packet context */ + rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, + rx_ring->ena_com_io_sq, + &ena_rx_ctx); + if (unlikely(rc)) { + PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->reset_reason = + ENA_REGS_RESET_TOO_MANY_RX_DESCS; + rx_ring->adapter->trigger_reset = true; + ++rx_ring->rx_stats.bad_desc_num; + return 0; + } + + mbuf = ena_rx_mbuf(rx_ring, + ena_rx_ctx.ena_bufs, + ena_rx_ctx.descs, + &next_to_clean, + ena_rx_ctx.pkt_offset); + if (unlikely(mbuf == NULL)) { + for (i = 0; i < ena_rx_ctx.descs; ++i) { + rx_ring->empty_rx_reqs[next_to_clean] = + rx_ring->ena_bufs[i].req_id; + next_to_clean = ENA_IDX_NEXT_MASKED( + next_to_clean, rx_ring->size_mask); + } + break; + } + + /* fill mbuf attributes if any */ + ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx); + + if (unlikely(mbuf->ol_flags & + (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { + rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); + ++rx_ring->rx_stats.bad_csum; + } + + mbuf->hash.rss = ena_rx_ctx.hash; + + rx_pkts[completed] = mbuf; + rx_ring->rx_stats.bytes += mbuf->pkt_len; + } + + rx_ring->rx_stats.cnt += completed; + rx_ring->next_to_clean = next_to_clean; + + free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); + refill_threshold = + RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, + (unsigned int)ENA_REFILL_THRESH_PACKET); + + /* Burst refill to save doorbells, memory barriers, const interval */ + if (free_queue_entries > refill_threshold) { + ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); + ena_populate_rx_queue(rx_ring, free_queue_entries); + } + + return completed; +} + +static uint16_t +eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int32_t ret; + uint32_t i; + struct rte_mbuf *m; + struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); + struct rte_ipv4_hdr *ip_hdr; + uint64_t ol_flags; + uint16_t frag_field; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + if (!(ol_flags & PKT_TX_IPV4)) + continue; + + /* If there was not L2 header length specified, assume it is + * length of the ethernet header. + */ + if (unlikely(m->l2_len == 0)) + m->l2_len = sizeof(struct rte_ether_hdr); + + ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->l2_len); + frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); + + if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { + m->packet_type |= RTE_PTYPE_L4_NONFRAG; + + /* If IPv4 header has DF flag enabled and TSO support is + * disabled, partial chcecksum should not be calculated. + */ + if (!tx_ring->adapter->offloads.tso4_supported) + continue; + } + + if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || + (ol_flags & PKT_TX_L4_MASK) == + PKT_TX_SCTP_CKSUM) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + + /* In case we are supposed to TSO and have DF not set (DF=0) + * hardware must be provided with partial checksum, otherwise + * it will take care of necessary calculations. + */ + + ret = rte_net_intel_cksum_flags_prepare(m, + ol_flags & ~PKT_TX_TCP_SEG); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +static void ena_update_hints(struct ena_adapter *adapter, + struct ena_admin_ena_hw_hints *hints) +{ + if (hints->admin_completion_tx_timeout) + adapter->ena_dev.admin_queue.completion_timeout = + hints->admin_completion_tx_timeout * 1000; + + if (hints->mmio_read_timeout) + /* convert to usec */ + adapter->ena_dev.mmio_read.reg_read_to = + hints->mmio_read_timeout * 1000; + + if (hints->driver_watchdog_timeout) { + if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) + adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; + else + // Convert msecs to ticks + adapter->keep_alive_timeout = + (hints->driver_watchdog_timeout * + rte_get_timer_hz()) / 1000; + } +} + +static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + struct rte_mbuf *mbuf) +{ + struct ena_com_dev *ena_dev; + int num_segments, header_len, rc; + + ena_dev = &tx_ring->adapter->ena_dev; + num_segments = mbuf->nb_segs; + header_len = mbuf->data_len; + + if (likely(num_segments < tx_ring->sgl_size)) + return 0; + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + (num_segments == tx_ring->sgl_size) && + (header_len < tx_ring->tx_max_header_size)) + return 0; + + ++tx_ring->tx_stats.linearize; + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) { + PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n"); + rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); + ++tx_ring->tx_stats.linearize_failed; + return rc; + } + + return rc; +} + +static void ena_tx_map_mbuf(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct rte_mbuf *mbuf, + void **push_header, + uint16_t *header_len) +{ + struct ena_com_buf *ena_buf; + uint16_t delta, seg_len, push_len; + + delta = 0; + seg_len = mbuf->data_len; + + tx_info->mbuf = mbuf; + ena_buf = tx_info->bufs; + + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* + * Tx header might be (and will be in most cases) smaller than + * tx_max_header_size. But it's not an issue to send more data + * to the device, than actually needed if the mbuf size is + * greater than tx_max_header_size. + */ + push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); + *header_len = push_len; + + if (likely(push_len <= seg_len)) { + /* If the push header is in the single segment, then + * just point it to the 1st mbuf data. + */ + *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); + } else { + /* If the push header lays in the several segments, copy + * it to the intermediate buffer. + */ + rte_pktmbuf_read(mbuf, 0, push_len, + tx_ring->push_buf_intermediate_buf); + *push_header = tx_ring->push_buf_intermediate_buf; + delta = push_len - seg_len; + } + } else { + *push_header = NULL; + *header_len = 0; + push_len = 0; + } + + /* Process first segment taking into consideration pushed header */ + if (seg_len > push_len) { + ena_buf->paddr = mbuf->buf_iova + + mbuf->data_off + + push_len; + ena_buf->len = seg_len - push_len; + ena_buf++; + tx_info->num_of_bufs++; + } + + while ((mbuf = mbuf->next) != NULL) { + seg_len = mbuf->data_len; + + /* Skip mbufs if whole data is pushed as a header */ + if (unlikely(delta > seg_len)) { + delta -= seg_len; + continue; + } + + ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; + ena_buf->len = seg_len - delta; + ena_buf++; + tx_info->num_of_bufs++; + + delta = 0; + } +} + +static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) +{ + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; + uint16_t next_to_use; + uint16_t header_len; + uint16_t req_id; + void *push_header; + int nb_hw_desc; + int rc; + + rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + return rc; + + next_to_use = tx_ring->next_to_use; + + req_id = tx_ring->empty_tx_reqs[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); + + ena_tx_ctx.ena_bufs = tx_info->bufs; + ena_tx_ctx.push_header = push_header; + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + ena_tx_ctx.req_id = req_id; + ena_tx_ctx.header_len = header_len; + + /* Set Tx offloads flags, if applicable */ + ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, + tx_ring->disable_meta_caching); + + if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, + &ena_tx_ctx))) { + PMD_DRV_LOG(DEBUG, + "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", + tx_ring->id); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + } + + /* prepare the packet's descriptors to dma engine */ + rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, + &nb_hw_desc); + if (unlikely(rc)) { + ++tx_ring->tx_stats.prepare_ctx_err; + return rc; + } + + tx_info->tx_descs = nb_hw_desc; + + tx_ring->tx_stats.cnt++; + tx_ring->tx_stats.bytes += mbuf->pkt_len; + + tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, + tx_ring->size_mask); + + return 0; +} + +static void ena_tx_cleanup(struct ena_ring *tx_ring) +{ + unsigned int cleanup_budget; + unsigned int total_tx_descs = 0; + uint16_t next_to_clean = tx_ring->next_to_clean; + + cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, + (unsigned int)ENA_REFILL_THRESH_PACKET); + + while (likely(total_tx_descs < cleanup_budget)) { + struct rte_mbuf *mbuf; + struct ena_tx_buffer *tx_info; + uint16_t req_id; + + if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) + break; + + if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) + break; + + /* Get Tx info & store how many descs were processed */ + tx_info = &tx_ring->tx_buffer_info[req_id]; + + mbuf = tx_info->mbuf; + rte_pktmbuf_free(mbuf); + + tx_info->mbuf = NULL; + tx_ring->empty_tx_reqs[next_to_clean] = req_id; + + total_tx_descs += tx_info->tx_descs; + + /* Put back descriptor to the ring for reuse */ + next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, + tx_ring->size_mask); + } + + if (likely(total_tx_descs > 0)) { + /* acknowledge completion of sent packets */ + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); + } +} + +static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); + uint16_t sent_idx = 0; + + /* Check adapter state */ + if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { + PMD_DRV_LOG(ALERT, + "Trying to xmit pkts while device is NOT running\n"); + return 0; + } + + nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq), + nb_pkts); + + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { + if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) + break; + + rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, + tx_ring->size_mask)]); + } + + tx_ring->tx_stats.available_desc = + ena_com_free_q_entries(tx_ring->ena_com_io_sq); + + /* If there are ready packets to be xmitted... */ + if (sent_idx > 0) { + /* ...let HW do its best :-) */ + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + tx_ring->tx_stats.doorbells++; + } + + ena_tx_cleanup(tx_ring); + + tx_ring->tx_stats.available_desc = + ena_com_free_q_entries(tx_ring->ena_com_io_sq); + tx_ring->tx_stats.tx_poll++; + + return sent_idx; +} + +/** + * DPDK callback to retrieve names of extended device statistics + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] xstats_names + * Buffer to insert names into. + * @param n + * Number of names. + * + * @return + * Number of xstats names. + */ +static int ena_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int n) +{ + unsigned int xstats_count = ena_xstats_calc_num(dev); + unsigned int stat, i, count = 0; + + if (n < xstats_count || !xstats_names) + return xstats_count; + + for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) + strcpy(xstats_names[count].name, + ena_stats_global_strings[stat].name); + + for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) + for (i = 0; i < dev->data->nb_rx_queues; i++, count++) + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%d_%s", i, + ena_stats_rx_strings[stat].name); + + for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) + for (i = 0; i < dev->data->nb_tx_queues; i++, count++) + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%d_%s", i, + ena_stats_tx_strings[stat].name); + + return xstats_count; +} + +/** + * DPDK callback to get extended device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats table output buffer. + * @param n + * The size of the stats table. + * + * @return + * Number of xstats on success, negative on failure. + */ +static int ena_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct ena_adapter *adapter = dev->data->dev_private; + unsigned int xstats_count = ena_xstats_calc_num(dev); + unsigned int stat, i, count = 0; + int stat_offset; + void *stats_begin; + + if (n < xstats_count) + return xstats_count; + + if (!xstats) + return 0; + + for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { + stat_offset = ena_stats_rx_strings[stat].stat_offset; + stats_begin = &adapter->dev_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + + for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { + for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { + stat_offset = ena_stats_rx_strings[stat].stat_offset; + stats_begin = &adapter->rx_ring[i].rx_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + } + + for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { + for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { + stat_offset = ena_stats_tx_strings[stat].stat_offset; + stats_begin = &adapter->tx_ring[i].rx_stats; + + xstats[count].id = count; + xstats[count].value = *((uint64_t *) + ((char *)stats_begin + stat_offset)); + } + } + + return count; +} + +static int ena_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n) +{ + struct ena_adapter *adapter = dev->data->dev_private; + uint64_t id; + uint64_t rx_entries, tx_entries; + unsigned int i; + int qid; + int valid = 0; + for (i = 0; i < n; ++i) { + id = ids[i]; + /* Check if id belongs to global statistics */ + if (id < ENA_STATS_ARRAY_GLOBAL) { + values[i] = *((uint64_t *)&adapter->dev_stats + id); + ++valid; + continue; + } + + /* Check if id belongs to rx queue statistics */ + id -= ENA_STATS_ARRAY_GLOBAL; + rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; + if (id < rx_entries) { + qid = id % dev->data->nb_rx_queues; + id /= dev->data->nb_rx_queues; + values[i] = *((uint64_t *) + &adapter->rx_ring[qid].rx_stats + id); + ++valid; + continue; + } + /* Check if id belongs to rx queue statistics */ + id -= rx_entries; + tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; + if (id < tx_entries) { + qid = id % dev->data->nb_tx_queues; + id /= dev->data->nb_tx_queues; + values[i] = *((uint64_t *) + &adapter->tx_ring[qid].tx_stats + id); + ++valid; + continue; + } + } + + return valid; +} + +static int ena_process_bool_devarg(const char *key, + const char *value, + void *opaque) +{ + struct ena_adapter *adapter = opaque; + bool bool_value; + + /* Parse the value. */ + if (strcmp(value, "1") == 0) { + bool_value = true; + } else if (strcmp(value, "0") == 0) { + bool_value = false; + } else { + PMD_INIT_LOG(ERR, + "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", + value, key); + return -EINVAL; + } + + /* Now, assign it to the proper adapter field. */ + if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR)) + adapter->use_large_llq_hdr = bool_value; + + return 0; +} + +static int ena_parse_devargs(struct ena_adapter *adapter, + struct rte_devargs *devargs) +{ + static const char * const allowed_args[] = { + ENA_DEVARG_LARGE_LLQ_HDR, + }; + struct rte_kvargs *kvlist; + int rc; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, allowed_args); + if (kvlist == NULL) { + PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", + devargs->args); + return -EINVAL; + } + + rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, + ena_process_bool_devarg, adapter); + + rte_kvargs_free(kvlist); + + return rc; +} + +/********************************************************************* + * PMD configuration + *********************************************************************/ +static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ena_adapter), eth_ena_dev_init); +} + +static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); +} + +static struct rte_pci_driver rte_ena_pmd = { + .id_table = pci_id_ena_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_WC_ACTIVATE, + .probe = eth_ena_pci_probe, + .remove = eth_ena_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); + +RTE_INIT(ena_init_log) +{ + ena_logtype_init = rte_log_register("pmd.net.ena.init"); + if (ena_logtype_init >= 0) + rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); + ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); + if (ena_logtype_driver >= 0) + rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_ENA_DEBUG_RX + ena_logtype_rx = rte_log_register("pmd.net.ena.rx"); + if (ena_logtype_rx >= 0) + rte_log_set_level(ena_logtype_rx, RTE_LOG_NOTICE); +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX + ena_logtype_tx = rte_log_register("pmd.net.ena.tx"); + if (ena_logtype_tx >= 0) + rte_log_set_level(ena_logtype_tx, RTE_LOG_NOTICE); +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE + ena_logtype_tx_free = rte_log_register("pmd.net.ena.tx_free"); + if (ena_logtype_tx_free >= 0) + rte_log_set_level(ena_logtype_tx_free, RTE_LOG_NOTICE); +#endif + +#ifdef RTE_LIBRTE_ENA_COM_DEBUG + ena_logtype_com = rte_log_register("pmd.net.ena.com"); + if (ena_logtype_com >= 0) + rte_log_set_level(ena_logtype_com, RTE_LOG_NOTICE); +#endif +} + +/****************************************************************************** + ******************************** AENQ Handlers ******************************* + *****************************************************************************/ +static void ena_update_on_link_change(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct rte_eth_dev *eth_dev; + struct ena_adapter *adapter; + struct ena_admin_aenq_link_change_desc *aenq_link_desc; + uint32_t status; + + adapter = adapter_data; + aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; + eth_dev = adapter->rte_dev; + + status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); + adapter->link_status = status; + + ena_link_update(eth_dev, 0); + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static void ena_notification(void *data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = data; + struct ena_admin_ena_hw_hints *hints; + + if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) + PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n", + aenq_e->aenq_common_desc.group, + ENA_ADMIN_NOTIFICATION); + + switch (aenq_e->aenq_common_desc.syndrom) { + case ENA_ADMIN_UPDATE_HINTS: + hints = (struct ena_admin_ena_hw_hints *) + (&aenq_e->inline_data_w4); + ena_update_hints(adapter, hints); + break; + default: + PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n", + aenq_e->aenq_common_desc.syndrom); + } +} + +static void ena_keep_alive(void *adapter_data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = adapter_data; + struct ena_admin_aenq_keep_alive_desc *desc; + uint64_t rx_drops; + uint64_t tx_drops; + + adapter->timestamp_wd = rte_get_timer_cycles(); + + desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; + rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; + tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; + + adapter->drv_stats->rx_drops = rx_drops; + adapter->dev_stats.tx_drops = tx_drops; +} + +/** + * This handler will called for unknown event group or unimplemented handlers + **/ +static void unimplemented_aenq_handler(__rte_unused void *data, + __rte_unused struct ena_admin_aenq_entry *aenq_e) +{ + PMD_DRV_LOG(ERR, "Unknown event was received or event with " + "unimplemented handler\n"); +} + +static struct ena_aenq_handlers aenq_handlers = { + .handlers = { + [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, + [ENA_ADMIN_NOTIFICATION] = ena_notification, + [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive + }, + .unimplemented_handler = unimplemented_aenq_handler +}; diff --git a/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h new file mode 100644 index 000000000..6e24a4e58 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/ena_ethdev.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_ETHDEV_H_ +#define _ENA_ETHDEV_H_ + +#include +#include +#include +#include + +#include "ena_com.h" + +#define ENA_REGS_BAR 0 +#define ENA_MEM_BAR 2 + +#define ENA_MAX_NUM_QUEUES 128 +#define ENA_MIN_FRAME_LEN 64 +#define ENA_NAME_MAX_LEN 20 +#define ENA_PKT_MAX_BUFS 17 +#define ENA_RX_BUF_MIN_SIZE 1400 +#define ENA_DEFAULT_RING_SIZE 1024 + +#define ENA_MIN_MTU 128 + +#define ENA_MMIO_DISABLE_REG_READ BIT(0) + +#define ENA_WD_TIMEOUT_SEC 3 +#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz()) + +/* While processing submitted and completed descriptors (rx and tx path + * respectively) in a loop it is desired to: + * - perform batch submissions while populating sumbissmion queue + * - avoid blocking transmission of other packets during cleanup phase + * Hence the utilization ratio of 1/8 of a queue size or max value if the size + * of the ring is very big - like 8k Rx rings. + */ +#define ENA_REFILL_THRESH_DIVIDER 8 +#define ENA_REFILL_THRESH_PACKET 256 + +#define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask)) +#define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask)) + +struct ena_adapter; + +enum ena_ring_type { + ENA_RING_TYPE_RX = 1, + ENA_RING_TYPE_TX = 2, +}; + +struct ena_tx_buffer { + struct rte_mbuf *mbuf; + unsigned int tx_descs; + unsigned int num_of_bufs; + struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; +}; + +/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */ +struct ena_rx_buffer { + struct rte_mbuf *mbuf; + struct ena_com_buf ena_buf; +}; + +struct ena_calc_queue_size_ctx { + struct ena_com_dev_get_features_ctx *get_feat_ctx; + struct ena_com_dev *ena_dev; + u32 max_rx_queue_size; + u32 max_tx_queue_size; + u16 max_tx_sgl_size; + u16 max_rx_sgl_size; +}; + +struct ena_stats_tx { + u64 cnt; + u64 bytes; + u64 prepare_ctx_err; + u64 linearize; + u64 linearize_failed; + u64 tx_poll; + u64 doorbells; + u64 bad_req_id; + u64 available_desc; +}; + +struct ena_stats_rx { + u64 cnt; + u64 bytes; + u64 refill_partial; + u64 bad_csum; + u64 mbuf_alloc_fail; + u64 bad_desc_num; + u64 bad_req_id; +}; + +struct ena_ring { + u16 next_to_use; + u16 next_to_clean; + + enum ena_ring_type type; + enum ena_admin_placement_policy_type tx_mem_queue_type; + /* Holds the empty requests for TX/RX OOO completions */ + union { + uint16_t *empty_tx_reqs; + uint16_t *empty_rx_reqs; + }; + + union { + struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ + struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */ + }; + struct rte_mbuf **rx_refill_buffer; + unsigned int ring_size; /* number of tx/rx_buffer_info's entries */ + unsigned int size_mask; + + struct ena_com_io_cq *ena_com_io_cq; + struct ena_com_io_sq *ena_com_io_sq; + + struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS] + __rte_cache_aligned; + + struct rte_mempool *mb_pool; + unsigned int port_id; + unsigned int id; + /* Max length PMD can push to device for LLQ */ + uint8_t tx_max_header_size; + int configured; + + uint8_t *push_buf_intermediate_buf; + + struct ena_adapter *adapter; + uint64_t offloads; + u16 sgl_size; + + bool disable_meta_caching; + + union { + struct ena_stats_rx rx_stats; + struct ena_stats_tx tx_stats; + }; + + unsigned int numa_socket_id; +} __rte_cache_aligned; + +enum ena_adapter_state { + ENA_ADAPTER_STATE_FREE = 0, + ENA_ADAPTER_STATE_INIT = 1, + ENA_ADAPTER_STATE_RUNNING = 2, + ENA_ADAPTER_STATE_STOPPED = 3, + ENA_ADAPTER_STATE_CONFIG = 4, + ENA_ADAPTER_STATE_CLOSED = 5, +}; + +struct ena_driver_stats { + rte_atomic64_t ierrors; + rte_atomic64_t oerrors; + rte_atomic64_t rx_nombuf; + u64 rx_drops; +}; + +struct ena_stats_dev { + u64 wd_expired; + u64 dev_start; + u64 dev_stop; + /* + * Tx drops cannot be reported as the driver statistic, because DPDK + * rte_eth_stats structure isn't providing appropriate field for that. + * As a workaround it is being published as an extended statistic. + */ + u64 tx_drops; +}; + +struct ena_offloads { + bool tso4_supported; + bool tx_csum_supported; + bool rx_csum_supported; +}; + +/* board specific private data structure */ +struct ena_adapter { + /* OS defined structs */ + struct rte_pci_device *pdev; + struct rte_eth_dev_data *rte_eth_dev_data; + struct rte_eth_dev *rte_dev; + + struct ena_com_dev ena_dev __rte_cache_aligned; + + /* TX */ + struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; + u32 max_tx_ring_size; + u16 max_tx_sgl_size; + + /* RX */ + struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; + u32 max_rx_ring_size; + u16 max_rx_sgl_size; + + u32 max_num_io_queues; + u16 max_mtu; + struct ena_offloads offloads; + + int id_number; + char name[ENA_NAME_MAX_LEN]; + u8 mac_addr[RTE_ETHER_ADDR_LEN]; + + void *regs; + void *dev_mem_base; + + struct ena_driver_stats *drv_stats; + enum ena_adapter_state state; + + uint64_t tx_supported_offloads; + uint64_t tx_selected_offloads; + uint64_t rx_supported_offloads; + uint64_t rx_selected_offloads; + + bool link_status; + + enum ena_regs_reset_reason_types reset_reason; + + struct rte_timer timer_wd; + uint64_t timestamp_wd; + uint64_t keep_alive_timeout; + + struct ena_stats_dev dev_stats; + + bool trigger_reset; + + bool wd_state; + + bool use_large_llq_hdr; +}; + +#endif /* _ENA_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/ena_logs.h b/src/spdk/dpdk/drivers/net/ena/ena_logs.h new file mode 100644 index 000000000..9053c9183 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/ena_logs.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef _ENA_LOGS_H_ +#define _ENA_LOGS_H_ + +extern int ena_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) + +#ifdef RTE_LIBRTE_ENA_DEBUG_RX +extern int ena_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX +extern int ena_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE +extern int ena_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int ena_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ena_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) + +#endif /* _ENA_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ena/ena_platform.h b/src/spdk/dpdk/drivers/net/ena/ena_platform.h new file mode 100644 index 000000000..d3e40e0e9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/ena_platform.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. + * All rights reserved. + */ + +#ifndef __ENA_PLATFORM_H__ +#define __ENA_PLATFORM_H__ + +#define swap16_to_le(x) (x) + +#define swap32_to_le(x) (x) + +#define swap64_to_le(x) (x) + +#define swap16_from_le(x) (x) + +#define swap32_from_le(x) (x) + +#define swap64_from_le(x) (x) + +#define ena_assert_msg(cond, msg) \ + do { \ + if (unlikely(!(cond))) { \ + rte_log(RTE_LOG_ERR, ena_logtype_driver, \ + "Assert failed on %s:%s:%d: ", \ + __FILE__, __func__, __LINE__); \ + rte_panic(msg); \ + } \ + } while (0) + +#endif /* __ENA_PLATFORM_H__ */ diff --git a/src/spdk/dpdk/drivers/net/ena/meson.build b/src/spdk/dpdk/drivers/net/ena/meson.build new file mode 100644 index 000000000..189903b90 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('ena_ethdev.c', + 'base/ena_com.c', + 'base/ena_eth_com.c') + +deps += ['timer'] + +includes += include_directories('base', 'base/ena_defs') diff --git a/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map b/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ena/rte_pmd_ena_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/enetc/Makefile b/src/spdk/dpdk/drivers/net/enetc/Makefile new file mode 100644 index 000000000..7276026e3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_enetc.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax +EXPORT_MAP := rte_pmd_enetc_version.map +SRCS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc_rxtx.c + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_ethdev -lrte_net +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_common_dpaax + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/enetc/base/enetc_hw.h b/src/spdk/dpdk/drivers/net/enetc/base/enetc_hw.h new file mode 100644 index 000000000..66fad58e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/base/enetc_hw.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2020 NXP + */ + +#ifndef _ENETC_HW_H_ +#define _ENETC_HW_H_ +#include + +#define BIT(x) ((uint64_t)1 << ((x))) + +/* ENETC device IDs */ +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID 0xe100 + +/* BD RING ALIGNMENT */ +#define ENETC_BD_RING_ALIGN 128 + +/* ENETC register block BAR */ +#define ENETC_BAR_REGS 0x0 + +/* SI regs, offset: 0h */ +#define ENETC_SIMR 0x0 +#define ENETC_SIMR_EN BIT(31) + +#define ENETC_SICAR0 0x40 +#define ENETC_SICAR0_COHERENT 0x2B2B6727 +#define ENETC_SIPMAR0 0x80 +#define ENETC_SIPMAR1 0x84 + +#define ENETC_SICAPR0 0x900 +#define ENETC_SICAPR1 0x904 + +#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) +#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) + +#define ENETC_SICCAPR 0x1200 + +/* enum for BD type */ +enum enetc_bdr_type {TX, RX}; + +#define ENETC_BDR(type, n, off) (0x8000 + (type) * 0x100 + (n) * 0x200 \ + + (off)) +/* RX BDR reg offsets */ +#define ENETC_RBMR 0x0 /* RX BDR mode register*/ +#define ENETC_RBMR_EN BIT(31) + +#define ENETC_RBSR 0x4 /* Rx BDR status register*/ +#define ENETC_RBBSR 0x8 /* Rx BDR buffer size register*/ +#define ENETC_RBCIR 0xc /* Rx BDR consumer index register*/ +#define ENETC_RBBAR0 0x10 /* Rx BDR base address register 0 */ +#define ENETC_RBBAR1 0x14 /* Rx BDR base address register 1*/ +#define ENETC_RBPIR 0x18 /* Rx BDR producer index register*/ +#define ENETC_RBLENR 0x20 /* Rx BDR length register*/ +#define ENETC_RBIER 0xa0 /* Rx BDR interrupt enable register*/ +#define ENETC_RBIER_RXTIE BIT(0) +#define ENETC_RBIDR 0xa4 /* Rx BDR interrupt detect register*/ +#define ENETC_RBICIR0 0xa8 /* Rx BDR inetrrupt coalescing register 0*/ +#define ENETC_RBICIR0_ICEN BIT(31) + + +#define ENETC_TBMR 0x0 /* Tx BDR mode register (TBMR) 32 RW */ +#define ENETC_TBSR 0x4 /* x BDR status register (TBSR) 32 RO */ +#define ENETC_TBBAR0 0x10 /* Tx BDR base address register 0 (TBBAR0) 32 RW */ +#define ENETC_TBBAR1 0x14 /* Tx BDR base address register 1 (TBBAR1) 32 RW */ +#define ENETC_TBCIR 0x18 /* Tx BDR consumer index register (TBCIR) 32 RW */ +#define ENETC_TBCISR 0x1C /* Tx BDR consumer index shadow register 32 RW */ +#define ENETC_TBIER 0xA0 /* Tx BDR interrupt enable register 32 RW */ +#define ENETC_TBIDR 0xA4 /* Tx BDR interrupt detect register 32 RO */ +#define ENETC_TBICR0 0xA8 /* Tx BDR interrupt coalescing register 0 32 RW */ +#define ENETC_TBICR1 0xAC /* Tx BDR interrupt coalescing register 1 32 RW */ +#define ENETC_TBLENR 0x20 + +#define ENETC_TBCISR_IDX_MASK 0xffff +#define ENETC_TBIER_TXFIE BIT(1) + +#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) +#define ENETC_TBMR_EN BIT(31) + +/* Port regs, offset: 1_0000h */ +#define ENETC_PORT_BASE 0x10000 +#define ENETC_PMR 0x00000 +#define ENETC_PMR_EN (BIT(16) | BIT(17) | BIT(18)) +#define ENETC_PSR 0x00004 /* RO */ +#define ENETC_PSIPMR 0x00018 +#define ENETC_PSIPMR_SET_UP(n) (0x1 << (n)) /* n = SI index */ +#define ENETC_PSIPMR_SET_MP(n) (0x1 << ((n) + 16)) +#define ENETC_PSIPMAR0(n) (0x00100 + (n) * 0x20) +#define ENETC_PSIPMAR1(n) (0x00104 + (n) * 0x20) +#define ENETC_PCAPR0 0x00900 +#define ENETC_PCAPR1 0x00904 +#define ENETC_PM0_RX_FIFO 0x801C +#define ENETC_PM0_IF_MODE 0x8300 +#define ENETC_PM1_IF_MODE 0x9300 +#define ENETC_PMO_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) +#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1)) +#define ENETC_PM0_IFM_XGMII BIT(12) + +#define ENETC_PV0CFGR(n) (0x00920 + (n) * 0x10) +#define ENETC_PVCFGR_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PVCFGR_SET_RXBDR(val) (((val) & 0xff) << 16) + +#define ENETC_PM0_CMD_CFG 0x08008 +#define ENETC_PM0_TX_EN BIT(0) +#define ENETC_PM0_RX_EN BIT(1) +#define ENETC_PM0_CRC BIT(6) + +#define ENETC_PAR_PORT_CFG 0x03050 +#define L3_CKSUM BIT(0) +#define L4_CKSUM BIT(1) + +#define ENETC_PM0_MAXFRM 0x08014 +#define ENETC_SET_TX_MTU(val) ((val) << 16) +#define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PTXMBAR 0x0608 +/* n = TC index [0..7] */ +#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) + +#define ENETC_PM0_STATUS 0x08304 +#define ENETC_LINK_MODE 0x0000000000080000ULL +#define ENETC_LINK_STATUS 0x0000000000010000ULL +#define ENETC_LINK_SPEED_MASK 0x0000000000060000ULL +#define ENETC_LINK_SPEED_10M 0x0ULL +#define ENETC_LINK_SPEED_100M 0x0000000000020000ULL +#define ENETC_LINK_SPEED_1G 0x0000000000040000ULL + +/* Global regs, offset: 2_0000h */ +#define ENETC_GLOBAL_BASE 0x20000 +#define ENETC_G_EIPBRR0 0x00bf8 +#define ENETC_G_EIPBRR1 0x00bfc + +/* MAC Counters */ +/* Config register to reset counters*/ +#define ENETC_PM0_STAT_CONFIG 0x080E0 +/* Receive frames counter without error */ +#define ENETC_PM0_RFRM 0x08120 +/* Receive packets counter, good + bad */ +#define ENETC_PM0_RPKT 0x08160 +/* Received octets, good + bad */ +#define ENETC_PM0_REOCT 0x08120 +/* Transmit octets, good + bad */ +#define ENETC_PM0_TEOCT 0x08200 +/* Transmit frames counter without error */ +#define ENETC_PM0_TFRM 0x08220 +/* Transmit packets counter, good + bad */ +#define ENETC_PM0_TPKT 0x08260 +/* Dropped not Truncated packets counter */ +#define ENETC_PM0_RDRNTP 0x081C8 +/* Dropped + trucated packets counter */ +#define ENETC_PM0_RDRP 0x08158 +/* Receive packets error counter */ +#define ENETC_PM0_RERR 0x08138 +/* Transmit packets error counter */ +#define ENETC_PM0_TERR 0x08238 + +/* Stats Reset Bit*/ +#define ENETC_CLEAR_STATS BIT(2) + +#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) +#define ENETC_G_EPFBLPR1_XGMII 0x80000000 + +/* general register accessors */ +#define enetc_rd_reg(reg) rte_read32((void *)(reg)) +#define enetc_wr_reg(reg, val) rte_write32((val), (void *)(reg)) +#define enetc_rd(hw, off) enetc_rd_reg((size_t)(hw)->reg + (off)) +#define enetc_wr(hw, off, val) enetc_wr_reg((size_t)(hw)->reg + (off), val) +/* port register accessors - PF only */ +#define enetc_port_rd(hw, off) enetc_rd_reg((size_t)(hw)->port + (off)) +#define enetc_port_wr(hw, off, val) \ + enetc_wr_reg((size_t)(hw)->port + (off), val) +/* global register accessors - PF only */ +#define enetc_global_rd(hw, off) \ + enetc_rd_reg((size_t)(hw)->global + (off)) +#define enetc_global_wr(hw, off, val) \ + enetc_wr_reg((size_t)(hw)->global + (off), val) +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_rd(hw, t, n, off) \ + enetc_rd(hw, ENETC_BDR(t, n, off)) +#define enetc_bdr_wr(hw, t, n, off, val) \ + enetc_wr(hw, ENETC_BDR(t, n, off), val) + +#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) +#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) +#define enetc_txbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, TX, n, off, val) +#define enetc_rxbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, RX, n, off, val) + +#define ENETC_TX_ADDR(txq, addr) ((void *)((txq)->enetc_txbdr + (addr))) + +#define ENETC_TXBD_FLAGS_IE BIT(13) +#define ENETC_TXBD_FLAGS_F BIT(15) + +/* ENETC Parsed values (Little Endian) */ +#define ENETC_PARSE_ERROR 0x8000 +#define ENETC_PKT_TYPE_ETHER 0x0060 +#define ENETC_PKT_TYPE_IPV4 0x0000 +#define ENETC_PKT_TYPE_IPV6 0x0020 +#define ENETC_PKT_TYPE_IPV4_TCP \ + (0x0010 | ENETC_PKT_TYPE_IPV4) +#define ENETC_PKT_TYPE_IPV6_TCP \ + (0x0010 | ENETC_PKT_TYPE_IPV6) +#define ENETC_PKT_TYPE_IPV4_UDP \ + (0x0011 | ENETC_PKT_TYPE_IPV4) +#define ENETC_PKT_TYPE_IPV6_UDP \ + (0x0011 | ENETC_PKT_TYPE_IPV6) +#define ENETC_PKT_TYPE_IPV4_SCTP \ + (0x0013 | ENETC_PKT_TYPE_IPV4) +#define ENETC_PKT_TYPE_IPV6_SCTP \ + (0x0013 | ENETC_PKT_TYPE_IPV6) +#define ENETC_PKT_TYPE_IPV4_ICMP \ + (0x0003 | ENETC_PKT_TYPE_IPV4) +#define ENETC_PKT_TYPE_IPV6_ICMP \ + (0x0003 | ENETC_PKT_TYPE_IPV6) + +/* PCI device info */ +struct enetc_hw { + void *reg; /* SI registers, used by all PCI functions */ + void *port; /* Port registers, PF only */ + void *global; /* IP global registers, PF only */ +}; + +struct enetc_eth_mac_info { + uint8_t addr[RTE_ETHER_ADDR_LEN]; + uint8_t perm_addr[RTE_ETHER_ADDR_LEN]; + uint8_t get_link_status; +}; + +struct enetc_eth_hw { + struct rte_eth_dev *ndev; + struct enetc_hw hw; + uint16_t device_id; + uint16_t vendor_id; + uint8_t revision_id; + struct enetc_eth_mac_info mac; +}; + +/* Transmit Descriptor */ +struct enetc_tx_desc { + uint64_t addr; + uint16_t frm_len; + uint16_t buf_len; + uint32_t flags_errors; +}; + +/* TX Buffer Descriptors (BD) */ +struct enetc_tx_bd { + uint64_t addr; + uint16_t buf_len; + uint16_t frm_len; + uint16_t err_csum; + uint16_t flags; +}; + +/* RX buffer descriptor */ +union enetc_rx_bd { + struct { + uint64_t addr; + uint8_t reserved[8]; + } w; + struct { + uint16_t inet_csum; + uint16_t parse_summary; + uint32_t rss_hash; + uint16_t buf_len; + uint16_t vlan_opt; + union { + struct { + uint16_t flags; + uint16_t error; + }; + uint32_t lstatus; + }; + } r; +}; + +#endif diff --git a/src/spdk/dpdk/drivers/net/enetc/enetc.h b/src/spdk/dpdk/drivers/net/enetc/enetc.h new file mode 100644 index 000000000..14ef3bc18 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/enetc.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _ENETC_H_ +#define _ENETC_H_ + +#include + +#include "base/enetc_hw.h" + +#define PCI_VENDOR_ID_FREESCALE 0x1957 + +/* Max TX rings per ENETC. */ +#define MAX_TX_RINGS 2 + +/* Max RX rings per ENTEC. */ +#define MAX_RX_RINGS 1 + +/* Max BD counts per Ring. */ +#define MAX_BD_COUNT 64000 +/* Min BD counts per Ring. */ +#define MIN_BD_COUNT 32 +/* BD ALIGN */ +#define BD_ALIGN 8 + +/* minimum frame size supported */ +#define ENETC_MAC_MINFRM_SIZE 68 +/* maximum frame size supported */ +#define ENETC_MAC_MAXFRM_SIZE 9600 + +/* + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) + +/* + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((uint32_t)(n)) + +#define ENETC_TXBD(BDR, i) (&(((struct enetc_tx_bd *)((BDR).bd_base))[i])) +#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i])) + +struct enetc_swbd { + struct rte_mbuf *buffer_addr; +}; + +struct enetc_bdr { + void *bd_base; /* points to Rx or Tx BD ring */ + struct enetc_swbd *q_swbd; + union { + void *tcir; + void *rcir; + }; + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + uint16_t index; + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + union { + void *tcisr; /* Tx */ + int next_to_alloc; /* Rx */ + }; + struct rte_mempool *mb_pool; /* mbuf pool to populate RX ring. */ + struct rte_eth_dev *ndev; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct enetc_eth_adapter { + struct rte_eth_dev *ndev; + struct enetc_eth_hw hw; +}; + +#define ENETC_DEV_PRIVATE(adapter) \ + ((struct enetc_eth_adapter *)adapter) + +#define ENETC_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct enetc_eth_adapter *)adapter)->hw) + +#define ENETC_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct enetc_eth_adapter *)adapter)->stats) + +#define ENETC_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct enetc_eth_adapter *)adapter)->intr) + +/* + * RX/TX ENETC function prototypes + */ +uint16_t enetc_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + + +int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt); + +static inline int +enetc_bd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_use) + return bdr->next_to_clean - bdr->next_to_use - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; +} +#endif /* _ENETC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enetc/enetc_ethdev.c b/src/spdk/dpdk/drivers/net/enetc/enetc_ethdev.c new file mode 100644 index 000000000..1716e11dd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/enetc_ethdev.c @@ -0,0 +1,960 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2020 NXP + */ + +#include +#include +#include +#include + +#include "enetc_logs.h" +#include "enetc.h" + +int enetc_logtype_pmd; + +static int +enetc_dev_start(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; + + PMD_INIT_FUNC_TRACE(); + val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); + enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, + val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + + /* Enable port */ + val = enetc_port_rd(enetc_hw, ENETC_PMR); + enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN); + + /* set auto-speed for RGMII */ + if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) { + enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, + ENETC_PM0_IFM_RGAUTO); + enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, + ENETC_PM0_IFM_RGAUTO); + } + if (enetc_global_rd(enetc_hw, + ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) { + enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE, + ENETC_PM0_IFM_XGMII); + enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE, + ENETC_PM0_IFM_XGMII); + } + + return 0; +} + +static void +enetc_dev_stop(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; + + PMD_INIT_FUNC_TRACE(); + /* Disable port */ + val = enetc_port_rd(enetc_hw, ENETC_PMR); + enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN)); + + val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); + enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, + val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); +} + +static const uint32_t * +enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + struct rte_eth_link link; + uint32_t status; + + PMD_INIT_FUNC_TRACE(); + + memset(&link, 0, sizeof(link)); + + status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS); + + if (status & ENETC_LINK_MODE) + link.link_duplex = ETH_LINK_FULL_DUPLEX; + else + link.link_duplex = ETH_LINK_HALF_DUPLEX; + + if (status & ENETC_LINK_STATUS) + link.link_status = ETH_LINK_UP; + else + link.link_status = ETH_LINK_DOWN; + + switch (status & ENETC_LINK_SPEED_MASK) { + case ENETC_LINK_SPEED_1G: + link.link_speed = ETH_SPEED_NUM_1G; + break; + + case ENETC_LINK_SPEED_100M: + link.link_speed = ETH_SPEED_NUM_100M; + break; + + default: + case ENETC_LINK_SPEED_10M: + link.link_speed = ETH_SPEED_NUM_10M; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +static void +print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); + ENETC_PMD_NOTICE("%s%s\n", name, buf); +} + +static int +enetc_hardware_init(struct enetc_eth_hw *hw) +{ + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t *mac = (uint32_t *)hw->mac.addr; + uint32_t high_mac = 0; + uint16_t low_mac = 0; + + PMD_INIT_FUNC_TRACE(); + /* Calculating and storing the base HW addresses */ + hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE); + hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE); + + /* WA for Rx lock-up HW erratum */ + enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1); + + /* set ENETC transaction flags to coherent, don't allocate. + * BD writes merge with surrounding cache line data, frame data writes + * overwrite cache line. + */ + enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT); + + /* Enabling Station Interface */ + enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN); + + *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0)); + high_mac = (uint32_t)*mac; + mac++; + *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0)); + low_mac = (uint16_t)*mac; + + if ((high_mac | low_mac) == 0) { + char *first_byte; + + ENETC_PMD_NOTICE("MAC is not available for this SI, " + "set random MAC\n"); + mac = (uint32_t *)hw->mac.addr; + *mac = (uint32_t)rte_rand(); + first_byte = (char *)mac; + *first_byte &= 0xfe; /* clear multicast bit */ + *first_byte |= 0x02; /* set local assignment bit (IEEE802) */ + + enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac); + mac++; + *mac = (uint16_t)rte_rand(); + enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac); + print_ethaddr("New address: ", + (const struct rte_ether_addr *)hw->mac.addr); + } + + return 0; +} + +static int +enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *dev_info) +{ + PMD_INIT_FUNC_TRACE(); + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = MAX_BD_COUNT, + .nb_min = MIN_BD_COUNT, + .nb_align = BD_ALIGN, + }; + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = MAX_BD_COUNT, + .nb_min = MIN_BD_COUNT, + .nb_align = BD_ALIGN, + }; + dev_info->max_rx_queues = MAX_RX_RINGS; + dev_info->max_tx_queues = MAX_TX_RINGS; + dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE; + dev_info->rx_offload_capa = + (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME); + + return 0; +} + +static int +enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc) +{ + int size; + + size = nb_desc * sizeof(struct enetc_swbd); + txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); + if (txr->q_swbd == NULL) + return -ENOMEM; + + size = nb_desc * sizeof(struct enetc_tx_bd); + txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); + if (txr->bd_base == NULL) { + rte_free(txr->q_swbd); + txr->q_swbd = NULL; + return -ENOMEM; + } + + txr->bd_count = nb_desc; + txr->next_to_clean = 0; + txr->next_to_use = 0; + + return 0; +} + +static void +enetc_free_bdr(struct enetc_bdr *rxr) +{ + rte_free(rxr->q_swbd); + rte_free(rxr->bd_base); + rxr->q_swbd = NULL; + rxr->bd_base = NULL; +} + +static void +enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + phys_addr_t bd_address; + + bd_address = (phys_addr_t) + rte_mem_virt2iova((const void *)tx_ring->bd_base); + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits((uint64_t)bd_address)); + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits((uint64_t)bd_address)); + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0); + enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0); + tx_ring->tcir = (void *)((size_t)hw->reg + + ENETC_BDR(TX, idx, ENETC_TBCIR)); + tx_ring->tcisr = (void *)((size_t)hw->reg + + ENETC_BDR(TX, idx, ENETC_TBCISR)); +} + +static int +enetc_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + int err = 0; + struct enetc_bdr *tx_ring; + struct rte_eth_dev_data *data = dev->data; + struct enetc_eth_adapter *priv = + ENETC_DEV_PRIVATE(data->dev_private); + + PMD_INIT_FUNC_TRACE(); + if (nb_desc > MAX_BD_COUNT) + return -1; + + tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); + if (tx_ring == NULL) { + ENETC_PMD_ERR("Failed to allocate TX ring memory"); + err = -ENOMEM; + return -1; + } + + err = enetc_alloc_txbdr(tx_ring, nb_desc); + if (err) + goto fail; + + tx_ring->index = queue_idx; + tx_ring->ndev = dev; + enetc_setup_txbdr(&priv->hw.hw, tx_ring); + data->tx_queues[queue_idx] = tx_ring; + + if (!tx_conf->tx_deferred_start) { + /* enable ring */ + enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, + ENETC_TBMR, ENETC_TBMR_EN); + dev->data->tx_queue_state[tx_ring->index] = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + dev->data->tx_queue_state[tx_ring->index] = + RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +fail: + rte_free(tx_ring); + + return err; +} + +static void +enetc_tx_queue_release(void *txq) +{ + if (txq == NULL) + return; + + struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq; + struct enetc_eth_hw *eth_hw = + ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private); + struct enetc_hw *hw; + struct enetc_swbd *tx_swbd; + int i; + uint32_t val; + + /* Disable the ring */ + hw = ð_hw->hw; + val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR); + val &= (~ENETC_TBMR_EN); + enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val); + + /* clean the ring*/ + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->q_swbd[i]; + while (tx_swbd->buffer_addr != NULL) { + rte_pktmbuf_free(tx_swbd->buffer_addr); + tx_swbd->buffer_addr = NULL; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = &tx_ring->q_swbd[i]; + } + } + + enetc_free_bdr(tx_ring); + rte_free(tx_ring); +} + +static int +enetc_alloc_rxbdr(struct enetc_bdr *rxr, + uint16_t nb_rx_desc) +{ + int size; + + size = nb_rx_desc * sizeof(struct enetc_swbd); + rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); + if (rxr->q_swbd == NULL) + return -ENOMEM; + + size = nb_rx_desc * sizeof(union enetc_rx_bd); + rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); + if (rxr->bd_base == NULL) { + rte_free(rxr->q_swbd); + rxr->q_swbd = NULL; + return -ENOMEM; + } + + rxr->bd_count = nb_rx_desc; + rxr->next_to_clean = 0; + rxr->next_to_use = 0; + rxr->next_to_alloc = 0; + + return 0; +} + +static void +enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, + struct rte_mempool *mb_pool) +{ + int idx = rx_ring->index; + uint16_t buf_size; + phys_addr_t bd_address; + + bd_address = (phys_addr_t) + rte_mem_virt2iova((const void *)rx_ring->bd_base); + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits((uint64_t)bd_address)); + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits((uint64_t)bd_address)); + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + rx_ring->mb_pool = mb_pool; + rx_ring->rcir = (void *)((size_t)hw->reg + + ENETC_BDR(RX, idx, ENETC_RBCIR)); + enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring))); + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) - + RTE_PKTMBUF_HEADROOM); + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size); + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); +} + +static int +enetc_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + int err = 0; + struct enetc_bdr *rx_ring; + struct rte_eth_dev_data *data = dev->data; + struct enetc_eth_adapter *adapter = + ENETC_DEV_PRIVATE(data->dev_private); + uint64_t rx_offloads = data->dev_conf.rxmode.offloads; + + PMD_INIT_FUNC_TRACE(); + if (nb_rx_desc > MAX_BD_COUNT) + return -1; + + rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0); + if (rx_ring == NULL) { + ENETC_PMD_ERR("Failed to allocate RX ring memory"); + err = -ENOMEM; + return err; + } + + err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc); + if (err) + goto fail; + + rx_ring->index = rx_queue_id; + rx_ring->ndev = dev; + enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool); + data->rx_queues[rx_queue_id] = rx_ring; + + if (!rx_conf->rx_deferred_start) { + /* enable ring */ + enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR, + ENETC_RBMR_EN); + dev->data->rx_queue_state[rx_ring->index] = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + dev->data->rx_queue_state[rx_ring->index] = + RTE_ETH_QUEUE_STATE_STOPPED; + } + + rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ? + RTE_ETHER_CRC_LEN : 0); + + return 0; +fail: + rte_free(rx_ring); + + return err; +} + +static void +enetc_rx_queue_release(void *rxq) +{ + if (rxq == NULL) + return; + + struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq; + struct enetc_eth_hw *eth_hw = + ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private); + struct enetc_swbd *q_swbd; + struct enetc_hw *hw; + uint32_t val; + int i; + + /* Disable the ring */ + hw = ð_hw->hw; + val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR); + val &= (~ENETC_RBMR_EN); + enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val); + + /* Clean the ring */ + i = rx_ring->next_to_clean; + q_swbd = &rx_ring->q_swbd[i]; + while (i != rx_ring->next_to_use) { + rte_pktmbuf_free(q_swbd->buffer_addr); + q_swbd->buffer_addr = NULL; + q_swbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + q_swbd = &rx_ring->q_swbd[i]; + } + } + + enetc_free_bdr(rx_ring); + rte_free(rx_ring); +} + +static +int enetc_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + + /* Total received packets, bad + good, if we want to get counters of + * only good received packets then use ENETC_PM0_RFRM, + * ENETC_PM0_TFRM registers. + */ + stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT); + stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT); + stats->ibytes = enetc_port_rd(enetc_hw, ENETC_PM0_REOCT); + stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT); + /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without + * truncated packets + */ + stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP); + stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR); + stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR); + + return 0; +} + +static int +enetc_stats_reset(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + + enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS); + + return 0; +} + +static void +enetc_dev_close(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + enetc_dev_stop(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + enetc_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + enetc_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +static int +enetc_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t psipmr = 0; + + psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); + + /* Setting to enable promiscuous mode*/ + psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); + + enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); + + return 0; +} + +static int +enetc_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t psipmr = 0; + + /* Setting to disable promiscuous mode for SI0*/ + psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); + psipmr &= (~ENETC_PSIPMR_SET_UP(0)); + + if (dev->data->all_multicast == 0) + psipmr &= (~ENETC_PSIPMR_SET_MP(0)); + + enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); + + return 0; +} + +static int +enetc_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t psipmr = 0; + + psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR); + + /* Setting to enable allmulticast mode for SI0*/ + psipmr |= ENETC_PSIPMR_SET_MP(0); + + enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); + + return 0; +} + +static int +enetc_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t psipmr = 0; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + /* Setting to disable all multicast mode for SI0*/ + psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) & + ~(ENETC_PSIPMR_SET_MP(0)); + + enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr); + + return 0; +} + +static int +enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + /* check that mtu is within the allowed range */ + if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE) + return -EINVAL; + + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (dev->data->min_rx_buf_size && + !dev->data->scattered_rx && frame_size > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + ENETC_PMD_ERR("SG not enabled, will not fit in one buffer"); + return -EINVAL; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); + enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + /*setting the MTU*/ + enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) | + ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE)); + + return 0; +} + +static int +enetc_dev_configure(struct rte_eth_dev *dev) +{ + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint32_t checksum = L3_CKSUM | L4_CKSUM; + + PMD_INIT_FUNC_TRACE(); + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + uint32_t max_len; + + max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + + enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(max_len)); + enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), + ENETC_MAC_MAXFRM_SIZE); + enetc_port_wr(enetc_hw, ENETC_PTXMBAR, + 2 * ENETC_MAC_MAXFRM_SIZE); + dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - + RTE_ETHER_CRC_LEN; + } + + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + int config; + + config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); + config |= ENETC_PM0_CRC; + enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config); + } + + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + checksum &= ~L3_CKSUM; + + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) + checksum &= ~L4_CKSUM; + + enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum); + + + return 0; +} + +static int +enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct enetc_eth_adapter *priv = + ENETC_DEV_PRIVATE(dev->data->dev_private); + struct enetc_bdr *rx_ring; + uint32_t rx_data; + + rx_ring = dev->data->rx_queues[qidx]; + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { + rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, + ENETC_RBMR); + rx_data = rx_data | ENETC_RBMR_EN; + enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, + rx_data); + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return 0; +} + +static int +enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct enetc_eth_adapter *priv = + ENETC_DEV_PRIVATE(dev->data->dev_private); + struct enetc_bdr *rx_ring; + uint32_t rx_data; + + rx_ring = dev->data->rx_queues[qidx]; + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { + rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index, + ENETC_RBMR); + rx_data = rx_data & (~ENETC_RBMR_EN); + enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR, + rx_data); + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +static int +enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct enetc_eth_adapter *priv = + ENETC_DEV_PRIVATE(dev->data->dev_private); + struct enetc_bdr *tx_ring; + uint32_t tx_data; + + tx_ring = dev->data->tx_queues[qidx]; + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) { + tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, + ENETC_TBMR); + tx_data = tx_data | ENETC_TBMR_EN; + enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, + tx_data); + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return 0; +} + +static int +enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct enetc_eth_adapter *priv = + ENETC_DEV_PRIVATE(dev->data->dev_private); + struct enetc_bdr *tx_ring; + uint32_t tx_data; + + tx_ring = dev->data->tx_queues[qidx]; + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) { + tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index, + ENETC_TBMR); + tx_data = tx_data & (~ENETC_TBMR_EN); + enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR, + tx_data); + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_enetc_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +/* Features supported by this driver */ +static const struct eth_dev_ops enetc_ops = { + .dev_configure = enetc_dev_configure, + .dev_start = enetc_dev_start, + .dev_stop = enetc_dev_stop, + .dev_close = enetc_dev_close, + .link_update = enetc_link_update, + .stats_get = enetc_stats_get, + .stats_reset = enetc_stats_reset, + .promiscuous_enable = enetc_promiscuous_enable, + .promiscuous_disable = enetc_promiscuous_disable, + .allmulticast_enable = enetc_allmulticast_enable, + .allmulticast_disable = enetc_allmulticast_disable, + .dev_infos_get = enetc_dev_infos_get, + .mtu_set = enetc_mtu_set, + .rx_queue_setup = enetc_rx_queue_setup, + .rx_queue_start = enetc_rx_queue_start, + .rx_queue_stop = enetc_rx_queue_stop, + .rx_queue_release = enetc_rx_queue_release, + .tx_queue_setup = enetc_tx_queue_setup, + .tx_queue_start = enetc_tx_queue_start, + .tx_queue_stop = enetc_tx_queue_stop, + .tx_queue_release = enetc_tx_queue_release, + .dev_supported_ptypes_get = enetc_supported_ptypes_get, +}; + +/** + * Initialisation of the enetc device + * + * @param eth_dev + * - Pointer to the structure rte_eth_dev + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static int +enetc_dev_init(struct rte_eth_dev *eth_dev) +{ + int error = 0; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct enetc_eth_hw *hw = + ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + eth_dev->dev_ops = &enetc_ops; + eth_dev->rx_pkt_burst = &enetc_recv_pkts; + eth_dev->tx_pkt_burst = &enetc_xmit_pkts; + + /* Retrieving and storing the HW base address of device */ + hw->hw.reg = (void *)pci_dev->mem_resource[0].addr; + hw->device_id = pci_dev->id.device_id; + + error = enetc_hardware_init(hw); + if (error != 0) { + ENETC_PMD_ERR("Hardware initialization failed"); + return -1; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", + RTE_ETHER_ADDR_LEN, 0); + if (!eth_dev->data->mac_addrs) { + ENETC_PMD_ERR("Failed to allocate %d bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * 1); + error = -ENOMEM; + return -1; + } + + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + + /* Set MTU */ + enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN)); + eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN - + RTE_ETHER_CRC_LEN; + + if (rte_eal_iova_mode() == RTE_IOVA_PA) + dpaax_iova_table_populate(); + + ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + return 0; +} + +static int +enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_iova_mode() == RTE_IOVA_PA) + dpaax_iova_table_depopulate(); + + return 0; +} + +static int +enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct enetc_eth_adapter), + enetc_dev_init); +} + +static int +enetc_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit); +} + +static struct rte_pci_driver rte_enetc_pmd = { + .id_table = pci_id_enetc_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = enetc_pci_probe, + .remove = enetc_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map); +RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci"); + +RTE_INIT(enetc_pmd_init_log) +{ + enetc_logtype_pmd = rte_log_register("pmd.net.enetc"); + if (enetc_logtype_pmd >= 0) + rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/enetc/enetc_logs.h b/src/spdk/dpdk/drivers/net/enetc/enetc_logs.h new file mode 100644 index 000000000..0976d42de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/enetc_logs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _ENETC_LOGS_H_ +#define _ENETC_LOGS_H_ + +extern int enetc_logtype_pmd; + +#define ENETC_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, enetc_logtype_pmd, "enetc_net: " \ + fmt "\n", ##args) + +#define ENETC_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, enetc_logtype_pmd, "enetc_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() ENETC_PMD_DEBUG(">>") + +#define ENETC_PMD_CRIT(fmt, args...) \ + ENETC_PMD_LOG(CRIT, fmt, ## args) +#define ENETC_PMD_INFO(fmt, args...) \ + ENETC_PMD_LOG(INFO, fmt, ## args) +#define ENETC_PMD_NOTICE(fmt, args...) \ + ENETC_PMD_LOG(NOTICE, fmt, ## args) +#define ENETC_PMD_ERR(fmt, args...) \ + ENETC_PMD_LOG(ERR, fmt, ## args) +#define ENETC_PMD_WARN(fmt, args...) \ + ENETC_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define ENETC_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define ENETC_PMD_DP_DEBUG(fmt, args...) \ + ENETC_PMD_DP_LOG(DEBUG, fmt, ## args) +#define ENETC_PMD_DP_INFO(fmt, args...) \ + ENETC_PMD_DP_LOG(INFO, fmt, ## args) +#define ENETC_PMD_DP_WARN(fmt, args...) \ + ENETC_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _ENETC_LOGS_H_*/ diff --git a/src/spdk/dpdk/drivers/net/enetc/enetc_rxtx.c b/src/spdk/dpdk/drivers/net/enetc/enetc_rxtx.c new file mode 100644 index 000000000..412322523 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/enetc_rxtx.c @@ -0,0 +1,401 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2020 NXP + */ + +#include +#include +#include + +#include "rte_ethdev.h" +#include "rte_malloc.h" +#include "rte_memzone.h" + +#include "base/enetc_hw.h" +#include "enetc.h" +#include "enetc_logs.h" + +#define ENETC_CACHE_LINE_RXBDS (RTE_CACHE_LINE_SIZE / \ + sizeof(union enetc_rx_bd)) +#define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */ + +static int +enetc_clean_tx_ring(struct enetc_bdr *tx_ring) +{ + int tx_frm_cnt = 0; + struct enetc_swbd *tx_swbd, *tx_swbd_base; + int i, hwci, bd_count; + struct rte_mbuf *m[ENETC_RXBD_BUNDLE]; + + /* we don't need barriers here, we just want a relatively current value + * from HW. + */ + hwci = (int)(rte_read32_relaxed(tx_ring->tcisr) & + ENETC_TBCISR_IDX_MASK); + + tx_swbd_base = tx_ring->q_swbd; + bd_count = tx_ring->bd_count; + i = tx_ring->next_to_clean; + tx_swbd = &tx_swbd_base[i]; + + /* we're only reading the CI index once here, which means HW may update + * it while we're doing clean-up. We could read the register in a loop + * but for now I assume it's OK to leave a few Tx frames for next call. + * The issue with reading the register in a loop is that we're stalling + * here trying to catch up with HW which keeps sending traffic as long + * as it has traffic to send, so in effect we could be waiting here for + * the Tx ring to be drained by HW, instead of us doing Rx in that + * meantime. + */ + while (i != hwci) { + /* It seems calling rte_pktmbuf_free is wasting a lot of cycles, + * make a list and call _free when it's done. + */ + if (tx_frm_cnt == ENETC_RXBD_BUNDLE) { + rte_pktmbuf_free_bulk(m, tx_frm_cnt); + tx_frm_cnt = 0; + } + + m[tx_frm_cnt] = tx_swbd->buffer_addr; + tx_swbd->buffer_addr = NULL; + + i++; + tx_swbd++; + if (unlikely(i == bd_count)) { + i = 0; + tx_swbd = tx_swbd_base; + } + + tx_frm_cnt++; + } + + if (tx_frm_cnt) + rte_pktmbuf_free_bulk(m, tx_frm_cnt); + + tx_ring->next_to_clean = i; + + return 0; +} + +uint16_t +enetc_xmit_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct enetc_swbd *tx_swbd; + int i, start, bds_to_use; + struct enetc_tx_bd *txbd; + struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue; + + i = tx_ring->next_to_use; + + bds_to_use = enetc_bd_unused(tx_ring); + if (bds_to_use < nb_pkts) + nb_pkts = bds_to_use; + + start = 0; + while (nb_pkts--) { + tx_ring->q_swbd[i].buffer_addr = tx_pkts[start]; + txbd = ENETC_TXBD(*tx_ring, i); + tx_swbd = &tx_ring->q_swbd[i]; + txbd->frm_len = tx_pkts[start]->pkt_len; + txbd->buf_len = txbd->frm_len; + txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F); + txbd->addr = (uint64_t)(uintptr_t) + rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_iova + + tx_swbd->buffer_addr->data_off); + i++; + start++; + if (unlikely(i == tx_ring->bd_count)) + i = 0; + } + + /* we're only cleaning up the Tx ring here, on the assumption that + * software is slower than hardware and hardware completed sending + * older frames out by now. + * We're also cleaning up the ring before kicking off Tx for the new + * batch to minimize chances of contention on the Tx ring + */ + enetc_clean_tx_ring(tx_ring); + + tx_ring->next_to_use = i; + enetc_wr_reg(tx_ring->tcir, i); + return start; +} + +int +enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j, k = ENETC_RXBD_BUNDLE; + struct rte_mbuf *m[ENETC_RXBD_BUNDLE]; + struct rte_mempool *mb_pool; + + i = rx_ring->next_to_use; + mb_pool = rx_ring->mb_pool; + rx_swbd = &rx_ring->q_swbd[i]; + rxbd = ENETC_RXBD(*rx_ring, i); + for (j = 0; j < buff_cnt; j++) { + /* bulk alloc for the next up to 8 BDs */ + if (k == ENETC_RXBD_BUNDLE) { + k = 0; + int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE); + + if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt)) + return -1; + } + + rx_swbd->buffer_addr = m[k]; + rxbd->w.addr = (uint64_t)(uintptr_t) + rx_swbd->buffer_addr->buf_iova + + rx_swbd->buffer_addr->data_off; + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + rx_swbd++; + rxbd++; + i++; + k++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + rx_swbd = &rx_ring->q_swbd[i]; + } + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; + rx_ring->next_to_use = i; + enetc_wr_reg(rx_ring->rcir, i); + } + + return j; +} + +static inline void enetc_slow_parsing(struct rte_mbuf *m, + uint64_t parse_results) +{ + m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + + switch (parse_results) { + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_SCTP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_SCTP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_ICMP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_ICMP; + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_BAD; + return; + /* More switch cases can be added */ + default: + m->packet_type = RTE_PTYPE_UNKNOWN; + m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN | + PKT_RX_L4_CKSUM_UNKNOWN; + } +} + + +static inline void __rte_hot +enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results) +{ + ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results); + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD; + + switch (parse_results) { + case ENETC_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + return; + case ENETC_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + return; + case ENETC_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + return; + case ENETC_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + return; + case ENETC_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + return; + case ENETC_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + return; + case ENETC_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + return; + case ENETC_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_SCTP; + return; + case ENETC_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_SCTP; + return; + case ENETC_PKT_TYPE_IPV4_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_ICMP; + return; + case ENETC_PKT_TYPE_IPV6_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_ICMP; + return; + /* More switch cases can be added */ + default: + enetc_slow_parsing(m, parse_results); + } + +} + +static int +enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct rte_mbuf **rx_pkts, + int work_limit) +{ + int rx_frm_cnt = 0; + int cleaned_cnt, i, bd_count; + struct enetc_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + + /* next descriptor to process */ + i = rx_ring->next_to_clean; + /* next descriptor to process */ + rxbd = ENETC_RXBD(*rx_ring, i); + rte_prefetch0(rxbd); + bd_count = rx_ring->bd_count; + /* LS1028A does not have platform cache so any software access following + * a hardware write will go directly to DDR. Latency of such a read is + * in excess of 100 core cycles, so try to prefetch more in advance to + * mitigate this. + * How much is worth prefetching really depends on traffic conditions. + * With congested Rx this could go up to 4 cache lines or so. But if + * software keeps up with hardware and follows behind Rx PI by a cache + * line or less then it's harmful in terms of performance to cache more. + * We would only prefetch BDs that have yet to be written by ENETC, + * which will have to be evicted again anyway. + */ + rte_prefetch0(ENETC_RXBD(*rx_ring, + (i + ENETC_CACHE_LINE_RXBDS) % bd_count)); + rte_prefetch0(ENETC_RXBD(*rx_ring, + (i + ENETC_CACHE_LINE_RXBDS * 2) % bd_count)); + + cleaned_cnt = enetc_bd_unused(rx_ring); + rx_swbd = &rx_ring->q_swbd[i]; + while (likely(rx_frm_cnt < work_limit)) { + uint32_t bd_status; + + bd_status = rte_le_to_cpu_32(rxbd->r.lstatus); + if (!bd_status) + break; + + rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len - + rx_ring->crc_len; + rx_swbd->buffer_addr->data_len = rxbd->r.buf_len - + rx_ring->crc_len; + rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash; + rx_swbd->buffer_addr->ol_flags = 0; + enetc_dev_rx_parse(rx_swbd->buffer_addr, + rxbd->r.parse_summary); + rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr; + cleaned_cnt++; + rx_swbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rx_swbd = &rx_ring->q_swbd[i]; + } + rxbd = ENETC_RXBD(*rx_ring, i); + rte_prefetch0(ENETC_RXBD(*rx_ring, + (i + ENETC_CACHE_LINE_RXBDS) % + bd_count)); + rte_prefetch0(ENETC_RXBD(*rx_ring, + (i + ENETC_CACHE_LINE_RXBDS * 2) % + bd_count)); + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + enetc_refill_rx_ring(rx_ring, cleaned_cnt); + + return rx_frm_cnt; +} + +uint16_t +enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq; + + return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts); +} diff --git a/src/spdk/dpdk/drivers/net/enetc/meson.build b/src/spdk/dpdk/drivers/net/enetc/meson.build new file mode 100644 index 000000000..bea54bea8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if not is_linux + build = false + reason = 'only supported on linux' +endif + +deps += ['common_dpaax'] +sources = files('enetc_ethdev.c', + 'enetc_rxtx.c') + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/enetc/rte_pmd_enetc_version.map b/src/spdk/dpdk/drivers/net/enetc/rte_pmd_enetc_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enetc/rte_pmd_enetc_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/enic/Makefile b/src/spdk/dpdk/drivers/net/enic/Makefile new file mode 100644 index 000000000..d098a474a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/Makefile @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. +# Copyright 2007 Nuova Systems, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_enic.a + +EXPORT_MAP := rte_pmd_enic_version.map + +CFLAGS += -I$(SRCDIR)/base/ +CFLAGS += -I$(SRCDIR) +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci + +VPATH += $(SRCDIR)/src + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_fm_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_wq.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c + +# The current implementation assumes 64-bit pointers +CC_AVX2_SUPPORT=0 +ifeq ($(CONFIG_RTE_ARCH_X86_64),y) +# Figure out if the compiler supports avx2. The extra check using +# -march=core-avx2 is necessary to support users who build for the +# 'default' machine (corei7 which has no avx2) and run the binary on +# newer CPUs that have avx2. +# This part is verbatim from i40e makefile. +ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) + CC_AVX2_SUPPORT=1 +else + CC_AVX2_SUPPORT=\ + $(shell $(CC) -march=core-avx2 -dM -E - &1 | \ + grep -q AVX2 && echo 1) + ifeq ($(CC_AVX2_SUPPORT), 1) + ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) + CFLAGS_enic_rxtx_vec_avx2.o += -march=core-avx2 + else + CFLAGS_enic_rxtx_vec_avx2.o += -mavx2 + endif + endif +endif +endif + +ifeq ($(CC_AVX2_SUPPORT), 1) + SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx_vec_avx2.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h b/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h new file mode 100644 index 000000000..7151353cb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/cq_desc.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ +#include + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, + CQ_DESC_TYPE_IOMMU_MISS = 5, + CQ_DESC_TYPE_SGL = 6, + CQ_DESC_TYPE_CLASSIFIER = 7, + CQ_DESC_TYPE_TEST = 127, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specific area is unique for each completion + * queue type. + */ +struct cq_desc { + uint16_t completed_index; + uint16_t q_number; + uint8_t type_specific[11]; + uint8_t type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_color_enc(struct cq_desc *desc, const uint8_t color) +{ + if (color) + desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT); + else + desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT); +} + +static inline void cq_desc_enc(struct cq_desc *desc, + const uint8_t type, const uint8_t color, const uint16_t q_number, + const uint16_t completed_index) +{ + desc->type_color = (type & CQ_DESC_TYPE_MASK) | + ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT); + desc->q_number = rte_cpu_to_le_16(q_number & CQ_DESC_Q_NUM_MASK); + desc->completed_index = rte_cpu_to_le_16(completed_index & + CQ_DESC_COMP_NDX_MASK); +} + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + uint8_t *type, uint8_t *color, uint16_t *q_number, + uint16_t *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const uint8_t type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rte_rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = rte_le_to_cpu_16(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = rte_le_to_cpu_16(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +static inline void cq_color_dec(const struct cq_desc *desc_arg, uint8_t *color) +{ + volatile const struct cq_desc *desc = desc_arg; + + *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h new file mode 100644 index 000000000..602ac22b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/cq_enet_desc.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + uint16_t completed_index; + uint16_t q_number; + uint8_t reserved[11]; + uint8_t type_color; +}; + +static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc, + uint8_t type, uint8_t color, uint16_t q_number, + uint16_t completed_index) +{ + cq_desc_enc((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + uint8_t *type, uint8_t *color, uint16_t *q_number, + uint16_t *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_desc { + uint16_t completed_index_flags; + uint16_t q_number_rss_type_flags; + uint32_t rss_hash; + uint16_t bytes_written_flags; + uint16_t vlan; + uint16_t checksum_fcoe; + uint8_t flags; + uint8_t type_color; +}; + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_clsf_desc { + uint16_t completed_index_flags; + uint16_t q_number_rss_type_flags; + uint16_t filter_id; + uint16_t lif; + uint16_t bytes_written_flags; + uint16_t vlan; + uint16_t checksum_fcoe; + uint8_t flags; + uint8_t type_color; +}; + +#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) +#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) +#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) + +#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ + ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) +#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 + +#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) + +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ + ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) +#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) + +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12 +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3 +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13 + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 + +#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) +#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) +#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) +#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) +#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) + +static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc, + uint8_t type, uint8_t color, uint16_t q_number, + uint16_t completed_index, uint8_t ingress_port, uint8_t fcoe, + uint8_t eop, uint8_t sop, uint8_t rss_type, uint8_t csum_not_calc, + uint32_t rss_hash, uint16_t bytes_written, uint8_t packet_error, + uint8_t vlan_stripped, uint16_t vlan, uint16_t checksum, + uint8_t fcoe_sof, uint8_t fcoe_fc_crc_ok, uint8_t fcoe_enc_error, + uint8_t fcoe_eof, uint8_t tcp_udp_csum_ok, uint8_t udp, uint8_t tcp, + uint8_t ipv4_csum_ok, uint8_t ipv6, uint8_t ipv4, uint8_t ipv4_fragment, + uint8_t fcs_ok) +{ + cq_desc_enc((struct cq_desc *)desc, type, + color, q_number, completed_index); + + desc->completed_index_flags |= rte_cpu_to_le_16 + ((ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) | + (fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) | + (eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) | + (sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0)); + + desc->q_number_rss_type_flags |= rte_cpu_to_le_16 + (((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) << + CQ_DESC_Q_NUM_BITS) | + (csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0)); + + desc->rss_hash = rte_cpu_to_le_32(rss_hash); + + desc->bytes_written_flags = rte_cpu_to_le_16 + ((bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) | + (packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) | + (vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0)); + + desc->vlan = rte_cpu_to_le_16(vlan); + + if (fcoe) { + desc->checksum_fcoe = rte_cpu_to_le_16 + ((fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) | + ((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) << + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT)); + } else { + desc->checksum_fcoe = rte_cpu_to_le_16(checksum); + } + + desc->flags = + (tcp_udp_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK : 0) | + (udp ? CQ_ENET_RQ_DESC_FLAGS_UDP : 0) | + (tcp ? CQ_ENET_RQ_DESC_FLAGS_TCP : 0) | + (ipv4_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK : 0) | + (ipv6 ? CQ_ENET_RQ_DESC_FLAGS_IPV6 : 0) | + (ipv4 ? CQ_ENET_RQ_DESC_FLAGS_IPV4 : 0) | + (ipv4_fragment ? CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT : 0) | + (fcs_ok ? CQ_ENET_RQ_DESC_FLAGS_FCS_OK : 0) | + (fcoe_fc_crc_ok ? CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK : 0) | + (fcoe_enc_error ? CQ_ENET_RQ_DESC_FCOE_ENC_ERROR : 0); +} + +static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, + uint8_t *type, uint8_t *color, uint16_t *q_number, + uint16_t *completed_index, uint8_t *ingress_port, uint8_t *fcoe, + uint8_t *eop, uint8_t *sop, uint8_t *rss_type, uint8_t *csum_not_calc, + uint32_t *rss_hash, uint16_t *bytes_written, uint8_t *packet_error, + uint8_t *vlan_stripped, uint16_t *vlan_tci, uint16_t *checksum, + uint8_t *fcoe_sof, uint8_t *fcoe_fc_crc_ok, uint8_t *fcoe_enc_error, + uint8_t *fcoe_eof, uint8_t *tcp_udp_csum_ok, uint8_t *udp, uint8_t *tcp, + uint8_t *ipv4_csum_ok, uint8_t *ipv6, uint8_t *ipv4, + uint8_t *ipv4_fragment, uint8_t *fcs_ok) +{ + uint16_t completed_index_flags; + uint16_t q_number_rss_type_flags; + uint16_t bytes_written_flags; + + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); + + completed_index_flags = rte_le_to_cpu_16(desc->completed_index_flags); + q_number_rss_type_flags = + rte_le_to_cpu_16(desc->q_number_rss_type_flags); + bytes_written_flags = rte_le_to_cpu_16(desc->bytes_written_flags); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (uint8_t)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = rte_le_to_cpu_32(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = rte_le_to_cpu_16(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (uint8_t)(rte_le_to_cpu_16(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (uint8_t)((rte_le_to_cpu_16(desc->checksum_fcoe) >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = rte_le_to_cpu_16(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h new file mode 100644 index 000000000..c79c0287b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/rq_enet_desc.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _RQ_ENET_DESC_H_ +#define _RQ_ENET_DESC_H_ + +#include + +/* Ethernet receive queue descriptor: 16B */ +struct rq_enet_desc { + uint64_t address; + uint16_t length_type; + uint8_t reserved[6]; +}; + +enum rq_enet_type_types { + RQ_ENET_TYPE_ONLY_SOP = 0, + RQ_ENET_TYPE_NOT_SOP = 1, + RQ_ENET_TYPE_RESV2 = 2, + RQ_ENET_TYPE_RESV3 = 3, +}; + +#define RQ_ENET_ADDR_BITS 64 +#define RQ_ENET_LEN_BITS 14 +#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) +#define RQ_ENET_TYPE_BITS 2 +#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) + +static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc, + uint64_t address, uint8_t type, uint16_t length) +{ + desc->address = rte_cpu_to_le_64(address); + desc->length_type = rte_cpu_to_le_16((length & RQ_ENET_LEN_MASK) | + ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); +} + +static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, + uint64_t *address, uint8_t *type, uint16_t *length) +{ + *address = rte_le_to_cpu_64(desc->address); + *length = rte_le_to_cpu_16(desc->length_type) & RQ_ENET_LEN_MASK; + *type = (uint8_t)((rte_le_to_cpu_16(desc->length_type) >> + RQ_ENET_LEN_BITS) & RQ_ENET_TYPE_MASK); +} + +#endif /* _RQ_ENET_DESC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c new file mode 100644 index 000000000..51d6fd387 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.c @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include "vnic_dev.h" +#include "vnic_cq.h" +#include + +void vnic_cq_free(struct vnic_cq *cq) +{ + vnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int socket_id, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + char res_name[RTE_MEMZONE_NAMESIZE]; + static int instance; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + pr_err("Failed to hook CQ[%u] resource\n", index); + return -EINVAL; + } + + snprintf(res_name, sizeof(res_name), "%d-cq-%u", instance++, index); + err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size, + socket_id, res_name); + if (err) + return err; + + return 0; +} + +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, uint64_t cq_message_addr) +{ + uint64_t paddr; + + paddr = (uint64_t)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); + + cq->interrupt_offset = interrupt_offset; +} + +void vnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + vnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h new file mode 100644 index 000000000..2e48759c4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_cq.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* Completion queue control */ +struct vnic_cq_ctrl { + uint64_t ring_base; /* 0x00 */ + uint32_t ring_size; /* 0x08 */ + uint32_t pad0; + uint32_t flow_control_enable; /* 0x10 */ + uint32_t pad1; + uint32_t color_enable; /* 0x18 */ + uint32_t pad2; + uint32_t cq_head; /* 0x20 */ + uint32_t pad3; + uint32_t cq_tail; /* 0x28 */ + uint32_t pad4; + uint32_t cq_tail_color; /* 0x30 */ + uint32_t pad5; + uint32_t interrupt_enable; /* 0x38 */ + uint32_t pad6; + uint32_t cq_entry_enable; /* 0x40 */ + uint32_t pad7; + uint32_t cq_message_enable; /* 0x48 */ + uint32_t pad8; + uint32_t interrupt_offset; /* 0x50 */ + uint32_t pad9; + uint64_t cq_message_addr; /* 0x58 */ + uint32_t pad10; +}; + +#ifdef ENIC_AIC +struct vnic_rx_bytes_counter { + unsigned int small_pkt_bytes_cnt; + unsigned int large_pkt_bytes_cnt; +}; +#endif + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; + unsigned int interrupt_offset; +#ifdef ENIC_AIC + struct vnic_rx_bytes_counter pkt_size_counter; + unsigned int cur_rx_coal_timeval; + unsigned int tobe_rx_coal_timeval; + ktime_t prev_ts; +#endif +}; + +void vnic_cq_free(struct vnic_cq *cq); +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int socket_id, + unsigned int desc_count, unsigned int desc_size); +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, uint64_t message_addr); +void vnic_cq_clean(struct vnic_cq *cq); +int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count, + unsigned int desc_size); + +#endif /* _VNIC_CQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c new file mode 100644 index 000000000..ac03817f4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.c @@ -0,0 +1,1216 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_nic.h" +#include "vnic_stats.h" +#include "vnic_flowman.h" + + +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + uint32_t mul; + uint32_t div; + uint32_t max_usec; +}; + +struct vnic_dev { + void *priv; + struct rte_pci_device *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + uint32_t notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + struct fm_info *flowman_info; + dma_addr_t flowman_info_pa; + enum vnic_proxy_type proxy; + uint32_t proxy_index; + uint64_t args[VNIC_DEVCMD_NARGS]; + int in_reset; + struct vnic_intr_coal_timer_info intr_coal_timer_info; + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, uint8_t *name); + void (*free_consistent)(void *priv, + size_t size, void *vaddr, + dma_addr_t dma_handle); +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *vnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +void vnic_register_cbacks(struct vnic_dev *vdev, + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, uint8_t *name), + void (*free_consistent)(void *priv, + size_t size, void *vaddr, + dma_addr_t dma_handle)) +{ + vdev->alloc_consistent = alloc_consistent; + vdev->free_consistent = free_consistent; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar, unsigned int num_bars) +{ + struct vnic_resource_header __iomem *rh; + struct mgmt_barmap_hdr __iomem *mrh; + struct vnic_resource __iomem *r; + uint8_t type; + + if (num_bars == 0) + return -EINVAL; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + pr_err("vNIC BAR0 res hdr length error\n"); + return -EINVAL; + } + + rh = bar->vaddr; + mrh = bar->vaddr; + if (!rh) { + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + return -EINVAL; + } + + /* Check for mgmt vnic in addition to normal vnic */ + if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || + (ioread32(&rh->version) != VNIC_RES_VERSION)) { + if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || + (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { + pr_err("vNIC BAR0 res magic/version error " \ + "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + return -EINVAL; + } + } + + if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) + r = (struct vnic_resource __iomem *)(mrh + 1); + else + r = (struct vnic_resource __iomem *)(rh + 1); + + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + uint8_t bar_num = ioread8(&r->bar); + uint32_t bar_offset = ioread32(&r->bar_offset); + uint32_t count = ioread32(&r->count); + uint32_t len; + + r++; + + if (bar_num >= num_bars) + continue; + + if (!bar[bar_num].len || !bar[bar_num].vaddr) + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar[bar_num].len) { + pr_err("vNIC BAR0 resource %d " \ + "out-of-bounds, offset 0x%x + " \ + "size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar[bar_num].len); + return -EINVAL; + } + break; + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + len = count; + break; + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + + bar_offset; + vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; + } + + return 0; +} + +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = VNIC_ALIGN(desc_count, count_align); + + ring->desc_size = VNIC_ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size, + __rte_unused unsigned int socket_id, + char *z_name) +{ + void *alloc_addr; + dma_addr_t alloc_pa = 0; + + vnic_dev_desc_ring_size(ring, desc_count, desc_size); + alloc_addr = vdev->alloc_consistent(vdev->priv, + ring->size_unaligned, + &alloc_pa, (uint8_t *)z_name); + if (!alloc_addr) { + pr_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + ring->descs_unaligned = alloc_addr; + if (!alloc_pa) { + pr_err("Failed to map allocated ring (size=%d), aborting\n", + (int)ring->size); + vdev->free_consistent(vdev->priv, + ring->size_unaligned, + alloc_addr, + alloc_pa); + return -ENOMEM; + } + ring->base_addr_unaligned = alloc_pa; + + ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (uint8_t *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + vnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void vnic_dev_free_desc_ring(__rte_unused struct vnic_dev *vdev, + struct vnic_dev_ring *ring) +{ + if (ring->descs) { + vdev->free_consistent(vdev->priv, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + unsigned int i; + int delay; + uint32_t status; + int err; + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + if (status & STAT_BUSY) { + + pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + return -EBUSY; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + writeq(vdev->args[i], &devcmd->args[i]); + rte_wmb(); /* complete all writes initiated till now */ + } + + iowrite32(cmd, &devcmd->cmd); + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + return 0; + + for (delay = 0; delay < wait; delay++) { + + usleep(100); + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + + if (!(status & STAT_BUSY)) { + if (status & STAT_ERROR) { + err = -(int)readq(&devcmd->args[0]); + if (cmd != CMD_CAPABILITY && + cmd != CMD_OVERLAY_OFFLOAD_CTRL && + cmd != CMD_GET_SUPP_FEATURE_VER) + pr_err("Devcmd %d failed " \ + "with error code %d\n", + _CMD_N(cmd), err); + return err; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rte_rmb();/* finish all reads */ + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = readq(&devcmd->args[i]); + } + + return 0; + } + } + + pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, + uint64_t *args, int nargs, int wait) +{ + uint32_t status; + int err; + + /* + * Proxy command consumes 2 arguments. One for proxy index, + * the other is for command to be proxied + */ + if (nargs > VNIC_DEVCMD_NARGS - 2) { + pr_err("number of args %d exceeds the maximum\n", nargs); + return -EINVAL; + } + memset(vdev->args, 0, sizeof(vdev->args)); + + vdev->args[0] = vdev->proxy_index; + vdev->args[1] = cmd; + memcpy(&vdev->args[2], args, nargs * sizeof(args[0])); + + err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + if (err) + return err; + + status = (uint32_t)vdev->args[0]; + if (status & STAT_ERROR) { + err = (int)vdev->args[1]; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + return err; + } + + memcpy(args, &vdev->args[1], nargs * sizeof(args[0])); + + return 0; +} + +static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, uint64_t *args, int nargs, int wait) +{ + int err; + + if (nargs > VNIC_DEVCMD_NARGS) { + pr_err("number of args %d exceeds the maximum\n", nargs); + return -EINVAL; + } + memset(vdev->args, 0, sizeof(vdev->args)); + memcpy(vdev->args, args, nargs * sizeof(args[0])); + + err = _vnic_dev_cmd(vdev, cmd, wait); + + memcpy(args, vdev->args, nargs * sizeof(args[0])); + + return err; +} + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + uint64_t *a0, uint64_t *a1, int wait) +{ + uint64_t args[2]; + int err; + + args[0] = *a0; + args[1] = *a1; + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_BY_INDEX: + err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, + args, ARRAY_SIZE(args), wait); + break; + case PROXY_BY_BDF: + err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, + args, ARRAY_SIZE(args), wait); + break; + case PROXY_NONE: + default: + err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait); + break; + } + + if (err == 0) { + *a0 = args[0]; + *a1 = args[1]; + } + + return err; +} + +int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + uint64_t *args, int nargs, int wait) +{ + switch (vdev->proxy) { + case PROXY_BY_INDEX: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, + args, nargs, wait); + case PROXY_BY_BDF: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, + args, nargs, wait); + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait); + } +} + +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + char name[RTE_MEMZONE_NAMESIZE]; + uint64_t a0, a1 = 0; + int wait = 1000; + int err = 0; + static uint32_t instance; + + if (!vdev->fw_info) { + snprintf((char *)name, sizeof(name), "vnic_fw_info-%u", + instance++); + vdev->fw_info = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa, (uint8_t *)name); + if (!vdev->fw_info) + return -ENOMEM; + a0 = vdev->fw_info_pa; + a1 = sizeof(struct vnic_devcmd_fw_info); + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, + &a0, &a1, wait); + } + *fw_info = vdev->fw_info; + return err; +} + +static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, uint64_t *args, + int nargs) +{ + memset(args, 0, nargs * sizeof(*args)); + args[0] = CMD_ADD_ADV_FILTER; + args[1] = FILTER_CAP_MODE_V1_FLAG; + return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000); +} + +int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) +{ + uint64_t a0 = CMD_ADD_ADV_FILTER, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (err) + return 0; + return (a1 >= (uint32_t)FILTER_DPDK_1); +} + +int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs) +{ + int wait = 1000; + + return vnic_dev_cmd_args(vdev, CMD_FLOW_MANAGER_OP, args, nargs, wait); +} + +static int vnic_dev_flowman_enable(struct vnic_dev *vdev, uint32_t *mode, + uint8_t *filter_actions) +{ + char name[RTE_MEMZONE_NAMESIZE]; + uint64_t args[3]; + uint64_t ops; + static uint32_t instance; + + /* flowman devcmd available? */ + if (!vnic_dev_capable(vdev, CMD_FLOW_MANAGER_OP)) + return 0; + /* Have the version we are using? */ + args[0] = FM_API_VERSION_QUERY; + if (vnic_dev_flowman_cmd(vdev, args, 1)) + return 0; + if ((args[0] & (1ULL << FM_VERSION)) == 0) + return 0; + /* Select the version */ + args[0] = FM_API_VERSION_SELECT; + args[1] = FM_VERSION; + if (vnic_dev_flowman_cmd(vdev, args, 2)) + return 0; + /* Can we get fm_info? */ + if (!vdev->flowman_info) { + snprintf((char *)name, sizeof(name), "vnic_fm_info-%u", + instance++); + vdev->flowman_info = vdev->alloc_consistent(vdev->priv, + sizeof(struct fm_info), + &vdev->flowman_info_pa, (uint8_t *)name); + if (!vdev->flowman_info) + return 0; + } + args[0] = FM_INFO_QUERY; + args[1] = vdev->flowman_info_pa; + args[2] = sizeof(struct fm_info); + if (vnic_dev_flowman_cmd(vdev, args, 3)) + return 0; + /* Have required operations? */ + ops = (1ULL << FMOP_END) | + (1ULL << FMOP_DROP) | + (1ULL << FMOP_RQ_STEER) | + (1ULL << FMOP_EXACT_MATCH) | + (1ULL << FMOP_MARK) | + (1ULL << FMOP_TAG) | + (1ULL << FMOP_EG_HAIRPIN) | + (1ULL << FMOP_ENCAP) | + (1ULL << FMOP_DECAP_NOSTRIP); + if ((vdev->flowman_info->fm_op_mask & ops) != ops) + return 0; + /* Good to use flowman now */ + *mode = FILTER_FLOWMAN; + *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG | + FILTER_ACTION_FILTER_ID_FLAG | + FILTER_ACTION_COUNTER_FLAG | + FILTER_ACTION_DROP_FLAG; + return 1; +} + +/* Determine the "best" filtering mode VIC is capaible of. Returns one of 4 + * value or 0 on error: + * FILTER_FLOWMAN- flowman api capable + * FILTER_DPDK_1- advanced filters availabile + * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that + * the IP layer must explicitly specified. I.e. cannot have a UDP + * filter that matches both IPv4 and IPv6. + * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available. + * all other filter types are not available. + * Retrun true in filter_tags if supported + */ +int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode, + uint8_t *filter_actions) +{ + uint64_t args[4]; + int err; + uint32_t max_level = 0; + + /* If flowman is available, use it as it is the most capable API */ + if (vnic_dev_flowman_enable(vdev, mode, filter_actions)) + return 0; + + err = vnic_dev_advanced_filters_cap(vdev, args, 4); + + /* determine supported filter actions */ + *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ + if (args[2] == FILTER_CAP_MODE_V1) + *filter_actions = args[3]; + + if (err || ((args[0] == 1) && (args[1] == 0))) { + /* Adv filter Command not supported or adv filters available but + * not enabled. Try the normal filter capability command. + */ + args[0] = CMD_ADD_FILTER; + args[1] = 0; + err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000); + if (err) + return err; + max_level = args[1]; + goto parse_max_level; + } else if (args[2] == FILTER_CAP_MODE_V1) { + /* parse filter capability mask in args[1] */ + if (args[1] & FILTER_DPDK_1_FLAG) + *mode = FILTER_DPDK_1; + else if (args[1] & FILTER_USNIC_IP_FLAG) + *mode = FILTER_USNIC_IP; + else if (args[1] & FILTER_IPV4_5TUPLE_FLAG) + *mode = FILTER_IPV4_5TUPLE; + return 0; + } + max_level = args[1]; +parse_max_level: + if (max_level >= (uint32_t)FILTER_USNIC_IP) + *mode = FILTER_USNIC_IP; + else + *mode = FILTER_IPV4_5TUPLE; + return 0; +} + +void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, + bool *weak) +{ + uint64_t a0 = CMD_NIC_CFG, a1 = 0; + int wait = 1000; + int err; + + *cfg_chk = false; + *weak = false; + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (err == 0 && a0 != 0 && a1 != 0) { + *cfg_chk = true; + *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK); + } +} + +int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) +{ + uint64_t a0 = (uint32_t)cmd, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + + return !(err || a0); +} + +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, + void *value) +{ + uint64_t a0, a1; + int wait = 1000; + int err; + + a0 = offset; + a1 = size; + + err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(uint8_t *)value = (uint8_t)a0; + break; + case 2: + *(uint16_t *)value = (uint16_t)a0; + break; + case 4: + *(uint32_t *)value = (uint32_t)a0; + break; + case 8: + *(uint64_t *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int vnic_dev_stats_clear(struct vnic_dev *vdev) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + uint64_t a0, a1; + int wait = 1000; + + if (!vdev->stats) + return -ENOMEM; + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int vnic_dev_close(struct vnic_dev *vdev) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int vnic_dev_enable_wait(struct vnic_dev *vdev) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) + return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + else + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_disable(struct vnic_dev *vdev) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int vnic_dev_open(struct vnic_dev *vdev, int arg) +{ + uint64_t a0 = (uint32_t)arg, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int vnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + int err, i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + mac_addr[i] = 0; + + err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + if (err) + return err; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + mac_addr[i] = ((uint8_t *)&a0)[i]; + + return 0; +} + +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + uint64_t a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); + if (err) + pr_err("Can't set packet filter\n"); + + return err; +} + +int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + ((uint8_t *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + if (err) + pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); + + return err; +} + +int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + ((uint8_t *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + if (err) + pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); + + return err; +} + +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + uint8_t ig_vlan_rewrite_mode) +{ + uint64_t a0 = ig_vlan_rewrite_mode, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) + return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, + &a0, &a1, wait); + else + return 0; +} + +void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) +{ + vdev->in_reset = state; +} + +static inline int vnic_dev_in_reset(struct vnic_dev *vdev) +{ + return vdev->in_reset; +} + +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, uint16_t intr) +{ + uint64_t a0, a1; + int wait = 1000; + int r; + + memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); + if (!vnic_dev_in_reset(vdev)) { + vdev->notify = notify_addr; + vdev->notify_pa = notify_pa; + } + + a0 = (uint64_t)notify_pa; + a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL; + a1 += sizeof(struct vnic_devcmd_notify); + + r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + if (!vnic_dev_in_reset(vdev)) + vdev->notify_sz = (r == 0) ? (uint32_t)a1 : 0; + + return r; +} + +int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr) +{ + void *notify_addr = NULL; + dma_addr_t notify_pa = 0; + char name[RTE_MEMZONE_NAMESIZE]; + static uint32_t instance; + + if (vdev->notify || vdev->notify_pa) { + return vnic_dev_notify_setcmd(vdev, vdev->notify, + vdev->notify_pa, intr); + } + if (!vnic_dev_in_reset(vdev)) { + snprintf((char *)name, sizeof(name), + "vnic_notify-%u", instance++); + notify_addr = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_devcmd_notify), + ¬ify_pa, (uint8_t *)name); + if (!notify_addr) + return -ENOMEM; + } + + return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); +} + +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) +{ + uint64_t a0, a1; + int wait = 1000; + int err; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + if (!vnic_dev_in_reset(vdev)) { + vdev->notify = NULL; + vdev->notify_pa = 0; + vdev->notify_sz = 0; + } + + return err; +} + +int vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + if (vdev->notify && !vnic_dev_in_reset(vdev)) { + vdev->free_consistent(vdev->priv, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + } + + return vnic_dev_notify_unsetcmd(vdev); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + uint32_t *words; + unsigned int nwords = vdev->notify_sz / 4; + unsigned int i; + uint32_t csum; + + if (!vdev->notify || !vdev->notify_sz) + return 0; + + do { + csum = 0; + rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); + words = (uint32_t *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int vnic_dev_init(struct vnic_dev *vdev, int arg) +{ + uint64_t a0 = (uint32_t)arg, a1 = 0; + int wait = 1000; + int r = 0; + + if (vnic_dev_capable(vdev, CMD_INIT)) + r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); + else { + vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); + if (a0 & CMD_INITF_DEFAULT_MAC) { + /* Emulate these for old CMD_INIT_v1 which + * didn't pass a0 so no CMD_INITF_*. + */ + vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + } + } + return r; +} + +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) +{ + /* Default: hardware intr coal timer is in units of 1.5 usecs */ + vdev->intr_coal_timer_info.mul = 2; + vdev->intr_coal_timer_info.div = 3; + vdev->intr_coal_timer_info.max_usec = + vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); +} + +int vnic_dev_link_status(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +uint32_t vnic_dev_port_speed(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.port_speed; +} + +uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, + uint32_t usec) +{ + return (usec * vdev->intr_coal_timer_info.mul) / + vdev->intr_coal_timer_info.div; +} + +uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, + uint32_t hw_cycles) +{ + return (hw_cycles * vdev->intr_coal_timer_info.div) / + vdev->intr_coal_timer_info.mul; +} + +uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) +{ + return vdev->intr_coal_timer_info.max_usec; +} + +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) +{ + char name[RTE_MEMZONE_NAMESIZE]; + static uint32_t instance; + + snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); + vdev->stats = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_stats), + &vdev->stats_pa, (uint8_t *)name); + return vdev->stats == NULL ? -ENOMEM : 0; +} + +void vnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + vdev->free_consistent(vdev->priv, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->stats) + vdev->free_consistent(vdev->priv, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->flowman_info) + vdev->free_consistent(vdev->priv, + sizeof(struct fm_info), + vdev->flowman_info, vdev->flowman_info_pa); + if (vdev->fw_info) + vdev->free_consistent(vdev->priv, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + rte_free(vdev); + } +} + +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars) +{ + if (!vdev) { + char name[RTE_MEMZONE_NAMESIZE]; + snprintf((char *)name, sizeof(name), "%s-vnic", + pdev->device.name); + vdev = (struct vnic_dev *)rte_zmalloc_socket(name, + sizeof(struct vnic_dev), + RTE_CACHE_LINE_SIZE, + pdev->device.numa_node); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar, num_bars)) + goto err_out; + + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + goto err_out; + + return vdev; + +err_out: + vnic_dev_unregister(vdev); + return NULL; +} + +/* + * vnic_dev_classifier: Add/Delete classifier entries + * @vdev: vdev of the device + * @cmd: CLSF_ADD for Add filter + * CLSF_DEL for Delete filter + * @entry: In case of ADD filter, the caller passes the RQ number in this + * variable. + * This function stores the filter_id returned by the + * firmware in the same variable before return; + * + * In case of DEL filter, the caller passes the RQ number. Return + * value is irrelevant. + * @data: filter data + * @action: action data + */ +int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry, + struct filter_v2 *data, struct filter_action_v2 *action_v2) +{ + uint64_t a0 = 0, a1 = 0; + int wait = 1000; + dma_addr_t tlv_pa; + int ret = -EINVAL; + struct filter_tlv *tlv, *tlv_va; + uint64_t tlv_size; + uint32_t filter_size, action_size; + static unsigned int unique_id; + char z_name[RTE_MEMZONE_NAMESIZE]; + enum vnic_devcmd_cmd dev_cmd; + + if (cmd == CLSF_ADD) { + dev_cmd = (data->type >= FILTER_DPDK_1) ? + CMD_ADD_ADV_FILTER : CMD_ADD_FILTER; + + filter_size = vnic_filter_size(data); + action_size = vnic_action_size(action_v2); + + tlv_size = filter_size + action_size + + 2*sizeof(struct filter_tlv); + snprintf((char *)z_name, sizeof(z_name), + "vnic_clsf_%u", unique_id++); + tlv_va = vdev->alloc_consistent(vdev->priv, + tlv_size, &tlv_pa, (uint8_t *)z_name); + if (!tlv_va) + return -ENOMEM; + tlv = tlv_va; + a0 = tlv_pa; + a1 = tlv_size; + memset(tlv, 0, tlv_size); + tlv->type = CLSF_TLV_FILTER; + tlv->length = filter_size; + memcpy(&tlv->val, (void *)data, filter_size); + + tlv = (struct filter_tlv *)((char *)tlv + + sizeof(struct filter_tlv) + + filter_size); + + tlv->type = CLSF_TLV_ACTION; + tlv->length = action_size; + memcpy(&tlv->val, (void *)action_v2, action_size); + ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait); + *entry = (uint16_t)a0; + vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); + } else if (cmd == CLSF_DEL) { + a0 = *entry; + ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); + } + + return ret; +} + +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, uint8_t overlay, + uint8_t config) +{ + uint64_t a0 = overlay; + uint64_t a1 = config; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); +} + +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay, + uint16_t vxlan_udp_port_number) +{ + uint64_t a1 = vxlan_udp_port_number; + uint64_t a0 = overlay; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); +} + +int vnic_dev_capable_vxlan(struct vnic_dev *vdev) +{ + uint64_t a0 = VIC_FEATURE_VXLAN; + uint64_t a1 = 0; + int wait = 1000; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ + return ret == 0 && + (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == + (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); +} + +int vnic_dev_capable_geneve(struct vnic_dev *vdev) +{ + uint64_t a0 = VIC_FEATURE_GENEVE; + uint64_t a1 = 0; + int wait = 1000; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + return ret == 0 && (a1 & FEATURE_GENEVE_OPTIONS); +} diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h new file mode 100644 index 000000000..02e19c0b8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_dev.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include + +#include +#include + +#include "enic_compat.h" +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline uint64_t readq(void __iomem *reg) +{ + return ((uint64_t)readl((char *)reg + 0x4UL) << 32) | + (uint64_t)readl(reg); +} + +static inline void writeq(uint64_t val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel((uint32_t)(val >> 32), (char *)reg + 0x4UL); +} +#endif + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev_iomap_info { + dma_addr_t bus_addr; + unsigned long len; + void __iomem *vaddr; +}; + +struct vnic_dev; +struct vnic_stats; + +void *vnic_dev_priv(struct vnic_dev *vdev); +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void vnic_register_cbacks(struct vnic_dev *vdev, + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, uint8_t *name), + void (*free_consistent)(void *priv, + size_t size, void *vaddr, + dma_addr_t dma_handle)); +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev, + enum vnic_res_type type, unsigned int index); +uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev, + enum vnic_res_type type); +uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev, + enum vnic_res_type type, unsigned int index); +unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev, + enum vnic_res_type type); +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size, unsigned int socket_id, + char *z_name); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + uint64_t *a0, uint64_t *a1, int wait); +int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + uint64_t *args, int nargs, int wait); +void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, uint16_t index); +void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, uint16_t bdf); +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int vnic_dev_capable_adv_filters(struct vnic_dev *vdev); +int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd); +int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, uint32_t *mode, + uint8_t *filter_actions); +void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk, + bool *weak); +int vnic_dev_asic_info(struct vnic_dev *vdev, uint16_t *asic_type, + uint16_t *asic_rev); +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, + void *value); +int vnic_dev_stats_clear(struct vnic_dev *vdev); +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int vnic_dev_hang_notify(struct vnic_dev *vdev); +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti); +int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, + int multicast, int broadcast, int promisc, int allmulti); +int vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr); +int vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr); +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr); +int vnic_dev_raise_intr(struct vnic_dev *vdev, uint16_t intr); +int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr); +void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state); +int vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, uint16_t intr); +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); +int vnic_dev_link_status(struct vnic_dev *vdev); +uint32_t vnic_dev_port_speed(struct vnic_dev *vdev); +uint32_t vnic_dev_msg_lvl(struct vnic_dev *vdev); +uint32_t vnic_dev_mtu(struct vnic_dev *vdev); +uint32_t vnic_dev_link_down_cnt(struct vnic_dev *vdev); +uint32_t vnic_dev_notify_status(struct vnic_dev *vdev); +uint32_t vnic_dev_uif(struct vnic_dev *vdev); +int vnic_dev_close(struct vnic_dev *vdev); +int vnic_dev_enable(struct vnic_dev *vdev); +int vnic_dev_enable_wait(struct vnic_dev *vdev); +int vnic_dev_disable(struct vnic_dev *vdev); +int vnic_dev_open(struct vnic_dev *vdev, int arg); +int vnic_dev_open_done(struct vnic_dev *vdev, int *done); +int vnic_dev_init(struct vnic_dev *vdev, int arg); +int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err); +int vnic_dev_init_prov(struct vnic_dev *vdev, uint8_t *buf, uint32_t len); +int vnic_dev_deinit(struct vnic_dev *vdev); +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); +int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); +uint32_t vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, + uint32_t usec); +uint32_t vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, + uint32_t hw_cycles); +uint32_t vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev); +void vnic_dev_unregister(struct vnic_dev *vdev); +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + uint8_t ig_vlan_rewrite_mode); +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars); +struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); +int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); +int vnic_dev_get_size(void); +int vnic_dev_int13(struct vnic_dev *vdev, uint64_t arg, uint32_t op); +int vnic_dev_perbi(struct vnic_dev *vdev, uint64_t arg, uint32_t op); +uint32_t vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev); +int vnic_dev_init_prov2(struct vnic_dev *vdev, uint8_t *buf, uint32_t len); +int vnic_dev_enable2(struct vnic_dev *vdev, int active); +int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); +int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); +int vnic_dev_set_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr); +int vnic_dev_classifier(struct vnic_dev *vdev, uint8_t cmd, uint16_t *entry, + struct filter_v2 *data, struct filter_action_v2 *action_v2); +int vnic_dev_flowman_cmd(struct vnic_dev *vdev, uint64_t *args, int nargs); +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, + uint8_t overlay, uint8_t config); +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, uint8_t overlay, + uint16_t vxlan_udp_port_number); +int vnic_dev_capable_vxlan(struct vnic_dev *vdev); +int vnic_dev_capable_geneve(struct vnic_dev *vdev); +#endif /* _VNIC_DEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h new file mode 100644 index 000000000..a2f577f1e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_devcmd.h @@ -0,0 +1,1166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. + */ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. + */ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* + * mcpu fw info in mem: + * in: + * (uint64_t)a0=paddr to struct vnic_devcmd_fw_info + * action: + * Fills in struct vnic_devcmd_fw_info (128 bytes) + * note: + * An old definition of CMD_MCPU_FW_INFO + */ + CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* + * mcpu fw info in mem: + * in: + * (uint64_t)a0=paddr to struct vnic_devcmd_fw_info + * (uint16_t)a1=size of the structure + * out: + * (uint16_t)a1=0 for in:a1 = 0, + * data size actually written for other values. + * action: + * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0, + * first in:a1 bytes for 0 < in:a1 <= 132, + * 132 bytes for other values of in:a1. + * note: + * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1 + * for source compatibility. + */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (uint16_t)a0=offset,(uint8_t)a1=size + * out: a0=value + */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (uint64_t)a0=paddr to stats area, + * (uint16_t)a1=sizeof stats area + */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* set Rx packet filter: (uint32_t)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), + + /* set Rx packet filter for all: (uint32_t)a0=filters + * (see CMD_PFILTER_*) + */ + CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), + + /* hang detection notification */ + CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), + + /* MAC address in (u48)a0 */ + CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), +#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */ + + /* add addr from (u48)a0 */ + CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), + + /* del addr from (u48)a0 */ + CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), + + /* add VLAN id in (uint16_t)a0 */ + CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), + + /* del VLAN id in (uint16_t)a0 */ + CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), + + /* + * nic_cfg in (uint32_t)a0 + * + * Capability query: + * out: (uint64_t) a0= 1 if a1 is valid + * (uint64_t) a1= (NIC_CFG bits supported) | (flags << 32) + * (flags are CMD_NIC_CFG_CAPF_xxx) + */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* + * nic_cfg_chk (same as nic_cfg, but may return error) + * in (uint32_t)a0 + * + * Capability query: + * out: (uint64_t) a0= 1 if a1 is valid + * (uint64_t) a1= (NIC_CFG bits supported) | (flags << 32) + * (flags are CMD_NIC_CFG_CAPF_xxx) + */ + CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* union vnic_rss_key in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */ + CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), + + /* union vnic_rss_cpu in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */ + CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), + + /* initiate softreset */ + CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), + + /* softreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (uint64_t)a0=paddr to notify (set paddr=0 to unset) + * (uint32_t)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (uint16_t)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (uint32_t)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* UNDI API: (uint64_t)a0=paddr to s_PXENV_UNDI_ struct, + * (uint8_t)a1=PXENV_UNDI_xxx + */ + CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), + + /* initiate open sequence (uint32_t)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (uint32_t)a0=flags (see CMD_INITF_*) */ +/***** Replaced by CMD_INIT *****/ + CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* variant of CMD_INIT, with provisioning info + * (uint64_t)a0=paddr of vnic_devcmd_provinfo + * (uint32_t)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump sum of all vnic stats on same uplink in mem: + * (uint64_t)a0=paddr + * (uint16_t)a1=sizeof stats area + */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno + */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct + * (uint32_t)a1=INT13_CMD_xxx + */ + CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), + + /* logical uplink enable/disable: (uint64_t)a0: 0/1=disable/enable */ + CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* initialize virtual link: (uint32_t)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35), + + /* check fw capability of a cmd: + * in: (uint32_t)a0=cmd + * out: (uint32_t)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits + */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (uint64_t)a0=paddr of arg + * (uint32_t)a1=CMD_PERBI_XXX + */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (uint16_t)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress + */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (uint32_t)a0=new vlan rewrite mode + * out: (uint32_t)a0=old vlan rewrite mode + */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (uint16_t)a0=bdf of target vnic + * (uint32_t)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (uint32_t)a0=status of proxied cmd + * a1-a15=out args of proxied cmd + */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (uint64_t)a0=phsical address of buffer passed in from caller. + * (uint16_t)a1=size of buffer specified in a0. + * out: (uint64_t)a0=phsical address of buffer passed in from caller. + * (uint16_t)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. + */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct + * (uint32_t)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (uint16_t)a0=new default vlan + * (uint16_t)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (uint16_t)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + + /* init_prov_info2: + * Variant of CMD_INIT_PROV_INFO, where it will not try to enable + * the vnic until CMD_ENABLE2 is issued. + * (uint64_t)a0=paddr of vnic_devcmd_provinfo + * (uint32_t)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), + + /* enable2: + * (uint32_t)a0=0 ==> standby + * =CMD_ENABLE2_ACTIVE ==> active + */ + CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48), + + /* + * cmd_status: + * Returns the status of the specified command + * Input: + * a0 = command for which status is being queried. + * Possible values are: + * CMD_SOFT_RESET + * CMD_HANG_RESET + * CMD_OPEN + * CMD_INIT + * CMD_INIT_PROV_INFO + * CMD_DEINIT + * CMD_INIT_PROV_INFO2 + * CMD_ENABLE2 + * Output: + * if status == STAT_ERROR + * a0 = ERR_ENOTSUPPORTED - status for command in a0 is + * not supported + * if status == STAT_NONE + * a0 = status of the devcmd specified in a0 as follows. + * ERR_SUCCESS - command in a0 completed successfully + * ERR_EINPROGRESS - command in a0 is still in progress + */ + CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49), + + /* + * Returns interrupt coalescing timer conversion factors. + * After calling this devcmd, ENIC driver can convert + * interrupt coalescing timer in usec into CPU cycles as follows: + * + * intr_timer_cycles = intr_timer_usec * multiplier / divisor + * + * Interrupt coalescing timer in usecs can be be converted/obtained + * from CPU cycles as follows: + * + * intr_timer_usec = intr_timer_cycles * divisor / multiplier + * + * in: none + * out: (uint32_t)a0 = multiplier + * (uint32_t)a1 = divisor + * (uint32_t)a2 = maximum timer value in usec + */ + CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), + + /* + * ISCSI DUMP API: + * in: (uint64_t)a0=paddr of the param or param itself + * (uint32_t)a1=ISCSI_CMD_xxx + */ + CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51), + + /* + * ISCSI DUMP STATUS API: + * in: (uint32_t)a0=cmd tag + * in: (uint32_t)a1=ISCSI_CMD_xxx + * out: (uint32_t)a0=cmd status + */ + CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52), + + /* + * Subvnic migration from MQ <--> VF. + * Enable the LIF migration from MQ to VF and vice versa. MQ and VF + * indexes are statically bound at the time of initialization. + * Based on the direction of migration, the resources of either MQ or + * the VF shall be attached to the LIF. + * in: (uint32_t)a0=Direction of Migration + * 0=> Migrate to VF + * 1=> Migrate to MQ + * (uint32_t)a1=VF index (MQ index) + */ + CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53), + + /* + * Register / Deregister the notification block for MQ subvnics + * in: + * (uint64_t)a0=paddr to notify (set paddr=0 to unset) + * (uint32_t)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (uint16_t)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (uint32_t)a1 = effective size + */ + CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54), + + /* + * Set the predefined mac address as default + * in: + * (u48)a0=mac addr + */ + CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (uint64_t)a0=paddr of vnic_devcmd_provinfo + * (uint32_t)a1=sizeof provision info + */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* + * Initialization for the devcmd2 interface. + * in: (uint64_t) a0=host result buffer physical address + * in: (uint16_t) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57), + + /* + * Add a filter. + * in: (uint64_t) a0= filter address + * (uint32_t) a1= size of filter + * out: (uint32_t) a0=filter identifier + * + * Capability query: + * out: (uint64_t) a0= 1 if capability query supported + * (uint64_t) a1= MAX filter type supported + */ + CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58), + + /* + * Delete a filter. + * in: (uint32_t) a0=filter identifier + */ + CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59), + + /* + * Enable a Queue Pair in User space NIC + * in: (uint32_t) a0=Queue Pair number + * (uint32_t) a1= command + */ + CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60), + + /* + * Disable a Queue Pair in User space NIC + * in: (uint32_t) a0=Queue Pair number + * (uint32_t) a1= command + */ + CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61), + + /* + * Stats dump Queue Pair in User space NIC + * in: (uint32_t) a0=Queue Pair number + * (uint64_t) a1=host buffer addr for status dump + * (uint32_t) a2=length of the buffer + */ + CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62), + + /* + * Clear stats for Queue Pair in User space NIC + * in: (uint32_t) a0=Queue Pair number + */ + CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), + + /* + * UEFI BOOT API: (uint64_t)a0= UEFI FLS_CMD_xxx + * (ui64)a1= paddr for the info buffer + */ + CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64), + + /* + * Return the iSCSI config details required by the EFI Option ROM + * in: (uint32_t) a0=0 Get Boot Info for PXE eNIC as per + * pxe_boot_config_t + * a0=1 Get Boot info for iSCSI enic as per + * iscsi_boot_efi_cfg_t + * in: (uint64_t) a1=Host address where iSCSI config info is returned + */ + CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65), + + /* + * Create a Queue Pair (RoCE) + * in: (uint32_t) a0 = Queue Pair number + * (uint32_t) a1 = Remote QP + * (uint32_t) a2 = RDMA-RQ + * (uint16_t) a3 = RQ Res Group + * (uint16_t) a4 = SQ Res Group + * (uint32_t) a5 = Protection Domain + * (uint64_t) a6 = Remote MAC + * (uint32_t) a7 = start PSN + * (uint16_t) a8 = MSS + * (uint32_t) a9 = protocol version + */ + CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66), + + /* + * Delete a Queue Pair (RoCE) + * in: (uint32_t) a0 = Queue Pair number + */ + CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67), + + /* + * Retrieve a Queue Pair's status information (RoCE) + * in: (uint32_t) a0 = Queue Pair number + * (uint64_t) a1 = host buffer addr for QP status struct + * (uint32_t) a2 = length of the buffer + */ + CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68), + + /* + * Use this devcmd for agreeing on the highest common version supported + * by both driver and fw for by features who need such a facility. + * in: (uint64_t) a0 = feature (driver requests for the supported + * versions on this feature) + * out: (uint64_t) a0 = bitmap of all supported versions for that + * feature + */ + CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69), + + /* + * Initialize the RDMA notification work queue + * in: (uint64_t) a0 = host buffer address + * in: (uint16_t) a1 = number of entries in buffer + * in: (uint16_t) a2 = resource group number + * in: (uint16_t) a3 = CQ number to post completion + */ + CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70), + + /* + * De-init the RDMA notification work queue + * in: (uint64_t) a0=resource group number + */ + CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71), + + /* + * Control (Enable/Disable) overlay offloads on the given vnic + * in: (uint8_t) a0 = OVERLAY_FEATURE_NVGRE : NVGRE + * a0 = OVERLAY_FEATURE_VXLAN : VxLAN + * a0 = OVERLAY_FEATURE_GENEVE : Geneve + * in: (uint8_t) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or + * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or + * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2 + */ + CMD_OVERLAY_OFFLOAD_CTRL = + _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72), + + /* + * Configuration of overlay offloads feature on a given vNIC + * in: (uint8_t) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN + * OVERLAY_CFG_GENEVE_PORT_UPDATE : Geneve + * in: (uint16_t) a1 = unsigned short int port information + */ + CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), + + /* + * Return the configured name for the device + * in: (uint64_t) a0=Host address where the name is copied + * (uint32_t) a1=Size of the buffer + */ + CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74), + + /* + * Enable group interrupt for the VF + * in: (uint32_t) a0 = GRPINTR_ENABLE : enable + * a0 = GRPINTR_DISABLE : disable + * a0 = GRPINTR_UPD_VECT: update group vector addr + * in: (uint32_t) a1 = interrupt group count + * in: (uint64_t) a2 = Start of host buffer address for DMAing group + * vector bitmap + * in: (uint64_t) a3 = Stride between group vectors + */ + CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75), + + /* + * Set cq arrary base and size in a list of consective wqs and + * rqs for a device + * in: (uint16_t) a0 = the wq relative index in the device. + * -1 indicates skipping wq configuration + * in: (uint16_t) a1 = the wcq relative index in the device + * in: (uint16_t) a2 = the rq relative index in the device + * -1 indicates skipping rq configuration + * in: (uint16_t) a3 = the rcq relative index in the device + */ + CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76), + + /* + * Add an advanced filter. + * in: (uint64_t) a0= filter address + * (uint32_t) a1= size of filter + * out: (uint32_t) a0=filter identifier + * + * Capability query: + * in: (uint64_t) a1= supported filter capability exchange modes + * out: (uint64_t) a0= 1 if capability query supported + * if (uint64_t) a1 = 0: a1 = MAX filter type supported + * if (uint64_t) a1 & FILTER_CAP_MODE_V1_FLAG: + * a1 = bitmask of supported filters + * a2 = FILTER_CAP_MODE_V1 + * a3 = bitmask of supported actions + */ + CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77), + + /* + * Perform a Flow Manager Operation (see flowman_api.h) + * in: (uint32_t) a0 = sub-command + * (uint64_t) a1..15 = (sub-command specific) + * + * All arguments that have not been assigned a meaning should be + * initialized to 0 to allow for better driver forward compatibility. + */ + CMD_FLOW_MANAGER_OP = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 88), +}; + +/* Modes for exchanging advanced filter capabilities. The modes supported by + * the driver are passed in the CMD_ADD_ADV_FILTER capability command and the + * mode selected is returned. + * V0: the maximum filter type supported is returned + * V1: bitmasks of supported filters and actions are returned + */ +enum filter_cap_mode { + FILTER_CAP_MODE_V0 = 0, /* Must always be 0 for legacy drivers */ + FILTER_CAP_MODE_V1 = 1, +}; +#define FILTER_CAP_MODE_V1_FLAG (1 << FILTER_CAP_MODE_V1) + +/* CMD_ENABLE2 flags */ +#define CMD_ENABLE2_STANDBY 0x0 +#define CMD_ENABLE2_ACTIVE 0x1 + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ +#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_NIC_CFG */ +#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */ +#define CMD_QP_RQWQ 0x0 + +/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ +#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 +#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 +#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2 +#define IG_VLAN_REWRITE_MODE_PASS_THRU 3 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ + STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state + * if seen a failover to the standby happened + */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, + ERR_EMAXRES = 10, + ERR_ENOTSUPPORTED = 11, + ERR_EINPROGRESS = 12, + ERR_MAX +}; + +/* + * note: hw_version and asic_rev refer to the same thing, + * but have different formats. hw_version is + * a 32-byte string (e.g. "A2") and asic_rev is + * a 16-bit integer (e.g. 0xA2). + */ +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; + uint16_t asic_type; + uint16_t asic_rev; +}; + +enum fwinfo_asic_type { + FWINFO_ASIC_TYPE_UNKNOWN, + FWINFO_ASIC_TYPE_PALO, + FWINFO_ASIC_TYPE_SERENO, + FWINFO_ASIC_TYPE_CRUZ, +}; + +struct vnic_devcmd_notify { + uint32_t csum; /* checksum over following words */ + + uint32_t link_state; /* link up == 1 */ + uint32_t port_speed; /* effective port speed (rate limit) */ + uint32_t mtu; /* MTU */ + uint32_t msglvl; /* requested driver msg lvl */ + uint32_t uif; /* uplink interface */ + uint32_t status; /* status bits (see VNIC_STF_*) */ + uint32_t error; /* error code (see ERR_*) for 1st ERR */ + uint32_t link_down_cnt; /* running count of link down + * transitions + */ + uint32_t perbi_rebuild_cnt; /* running count of perbi rebuilds */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ +#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */ +#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */ +/* all supported status flags */ +#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\ + VNIC_STF_STD_PAUSE |\ + VNIC_STF_PFC_PAUSE |\ + 0) + +struct vnic_devcmd_provinfo { + uint8_t oui[3]; + uint8_t type; + uint8_t data[0]; +}; + +/* + * These are used in flags field of different filters to denote + * valid fields used. + */ +#define FILTER_FIELD_VALID(fld) (1 << (fld - 1)) + +#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2) +#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3) +#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4) + +#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \ + FILTER_FIELD_USNIC_ETHTYPE | \ + FILTER_FIELD_USNIC_PROTO | \ + FILTER_FIELD_USNIC_ID) + +struct filter_usnic_id { + uint32_t flags; + uint16_t vlan; + uint16_t ethtype; + uint8_t proto_version; + uint32_t usnic_id; +} __rte_packed; + +#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1) +#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2) +#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3) +#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4) +#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5) + +#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \ + FILTER_FIELD_5TUP_SRC_AD | \ + FILTER_FIELD_5TUP_DST_AD | \ + FILTER_FIELD_5TUP_SRC_PT | \ + FILTER_FIELD_5TUP_DST_PT) + +/* Enums for the protocol field. */ +enum protocol_e { + PROTO_UDP = 0, + PROTO_TCP = 1, + PROTO_IPV4 = 2, + PROTO_IPV6 = 3 +}; + +struct filter_ipv4_5tuple { + uint32_t flags; + uint32_t protocol; + uint32_t src_addr; + uint32_t dst_addr; + uint16_t src_port; + uint16_t dst_port; +} __rte_packed; + +#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2) + +#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \ + FILTER_FIELD_VMQ_MAC) + +#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC + +struct filter_mac_vlan { + uint32_t flags; + uint16_t vlan; + uint8_t mac_addr[6]; +} __rte_packed; + +#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2) +#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3) +#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4) +#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5) + +#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \ + FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \ + FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \ + FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \ + FILTER_FIELD_VLAN_IP_3TUP_DST_PT) + +struct filter_vlan_ip_3tuple { + uint32_t flags; + uint16_t vlan; + uint16_t l3_protocol; + union { + uint32_t dst_addr_v4; + uint8_t dst_addr_v6[16]; + } u; + uint32_t l4_protocol; + uint16_t dst_port; +} __rte_packed; + +#define FILTER_GENERIC_1_BYTES 64 + +enum filter_generic_1_layer { + FILTER_GENERIC_1_L2, + FILTER_GENERIC_1_L3, + FILTER_GENERIC_1_L4, + FILTER_GENERIC_1_L5, + FILTER_GENERIC_1_NUM_LAYERS +}; + +#define FILTER_GENERIC_1_IPV4 (1 << 0) +#define FILTER_GENERIC_1_IPV6 (1 << 1) +#define FILTER_GENERIC_1_UDP (1 << 2) +#define FILTER_GENERIC_1_TCP (1 << 3) +#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4) +#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5) +#define FILTER_GENERIC_1_L4SUM_OK (1 << 6) +#define FILTER_GENERIC_1_IPFRAG (1 << 7) + +#define FILTER_GENERIC_1_KEY_LEN 64 + +/* + * Version 1 of generic filter specification + * position is only 16 bits, reserving positions > 64k to be used by firmware + */ +struct filter_generic_1 { + uint16_t position; /* lower position comes first */ + uint32_t mask_flags; + uint32_t val_flags; + uint16_t mask_vlan; + uint16_t val_vlan; + struct { + uint8_t mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means + * " don't care" + */ + uint8_t val[FILTER_GENERIC_1_KEY_LEN]; + } __rte_packed layer[FILTER_GENERIC_1_NUM_LAYERS]; +} __rte_packed; + +/* Specifies the filter_action type. */ +enum { + FILTER_ACTION_RQ_STEERING = 0, + FILTER_ACTION_V2 = 1, + FILTER_ACTION_MAX +}; + +struct filter_action { + uint32_t type; + union { + uint32_t rq_idx; + } u; +} __rte_packed; + +#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0) +#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1) +#define FILTER_ACTION_DROP_FLAG (1 << 2) +#define FILTER_ACTION_COUNTER_FLAG (1 << 3) +#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \ + | FILTER_ACTION_DROP_FLAG \ + | FILTER_ACTION_FILTER_ID_FLAG) + +/* Version 2 of filter action must be a strict extension of struct + * filter_action where the first fields exactly match in size and meaning. + */ +struct filter_action_v2 { + uint32_t type; + uint32_t rq_idx; + uint32_t flags; /* use FILTER_ACTION_XXX_FLAG defines */ + uint16_t filter_id; + uint8_t reserved[32]; /* for future expansion */ +} __rte_packed; + +/* Specifies the filter type. */ +enum filter_type { + FILTER_USNIC_ID = 0, + FILTER_IPV4_5TUPLE = 1, + FILTER_MAC_VLAN = 2, + FILTER_VLAN_IP_3TUPLE = 3, + FILTER_NVGRE_VMQ = 4, + FILTER_USNIC_IP = 5, + FILTER_DPDK_1 = 6, + FILTER_FLOWMAN = 7, + FILTER_MAX +}; + +#define FILTER_USNIC_ID_FLAG (1 << FILTER_USNIC_ID) +#define FILTER_IPV4_5TUPLE_FLAG (1 << FILTER_IPV4_5TUPLE) +#define FILTER_MAC_VLAN_FLAG (1 << FILTER_MAC_VLAN) +#define FILTER_VLAN_IP_3TUPLE_FLAG (1 << FILTER_VLAN_IP_3TUPLE) +#define FILTER_NVGRE_VMQ_FLAG (1 << FILTER_NVGRE_VMQ) +#define FILTER_USNIC_IP_FLAG (1 << FILTER_USNIC_IP) +#define FILTER_DPDK_1_FLAG (1 << FILTER_DPDK_1) +#define FILTER_V1_ALL (FILTER_USNIC_ID_FLAG | \ + FILTER_IPV4_5TUPLE_FLAG | \ + FILTER_MAC_VLAN_FLAG | \ + FILTER_VLAN_IP_3TUPLE_FLAG | \ + FILTER_NVGRE_VMQ_FLAG | \ + FILTER_USNIC_IP_FLAG | \ + FILTER_DPDK_1_FLAG) + +struct filter { + uint32_t type; + union { + struct filter_usnic_id usnic; + struct filter_ipv4_5tuple ipv4; + struct filter_mac_vlan mac_vlan; + struct filter_vlan_ip_3tuple vlan_3tuple; + } u; +} __rte_packed; + +/* + * This is a strict superset of "struct filter" and exists only + * because many drivers use "sizeof (struct filter)" in deciding TLV size. + * This new, larger struct filter would cause any code that uses that method + * to not work with older firmware, so we add filter_v2 to hold the + * new filter types. Drivers should use vnic_filter_size() to determine + * the TLV size instead of sizeof (struct fiter_v2) to guard against future + * growth. + */ +struct filter_v2 { + uint32_t type; + union { + struct filter_usnic_id usnic; + struct filter_ipv4_5tuple ipv4; + struct filter_mac_vlan mac_vlan; + struct filter_vlan_ip_3tuple vlan_3tuple; + struct filter_generic_1 generic_1; + } u; +} __rte_packed; + +enum { + CLSF_TLV_FILTER = 0, + CLSF_TLV_ACTION = 1, +}; + +struct filter_tlv { + uint32_t type; + uint32_t length; + uint32_t val[0]; +}; + +/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */ +#define FILTER_MAX_BUF_SIZE 100 +#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \ + sizeof(struct filter_action_v2) + \ + (2 * sizeof(struct filter_tlv))) + +/* + * Compute actual structure size given filter type. To be "future-proof," + * drivers should use this instead of "sizeof (struct filter_v2)" when + * computing length for TLV. + */ +static inline uint32_t +vnic_filter_size(struct filter_v2 *fp) +{ + uint32_t size; + + switch (fp->type) { + case FILTER_USNIC_ID: + size = sizeof(fp->u.usnic); + break; + case FILTER_IPV4_5TUPLE: + size = sizeof(fp->u.ipv4); + break; + case FILTER_MAC_VLAN: + case FILTER_NVGRE_VMQ: + size = sizeof(fp->u.mac_vlan); + break; + case FILTER_VLAN_IP_3TUPLE: + size = sizeof(fp->u.vlan_3tuple); + break; + case FILTER_USNIC_IP: + case FILTER_DPDK_1: + size = sizeof(fp->u.generic_1); + break; + default: + size = sizeof(fp->u); + break; + } + size += sizeof(fp->type); + return size; +} + + +enum { + CLSF_ADD = 0, + CLSF_DEL = 1, +}; + +/* + * Get the action structure size given action type. To be "future-proof," + * drivers should use this instead of "sizeof (struct filter_action_v2)" + * when computing length for TLV. + */ +static inline uint32_t +vnic_action_size(struct filter_action_v2 *fap) +{ + uint32_t size; + + switch (fap->type) { + case FILTER_ACTION_RQ_STEERING: + size = sizeof(struct filter_action); + break; + case FILTER_ACTION_V2: + size = sizeof(struct filter_action_v2); + break; + default: + size = sizeof(struct filter_action); + break; + } + return size; +} + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + uint32_t status; /* RO */ + uint32_t cmd; /* RW */ + uint64_t args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian)*/ +}; + +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + uint16_t pad; + uint16_t flags; + uint32_t cmd; /* same command #defines as original */ + uint64_t args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + uint64_t results[VNIC_DEVCMD2_NRESULTS]; + uint32_t pad; + uint16_t completed_index; /* into copy WQ */ + uint8_t error; /* same error codes as original */ + uint8_t color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + +/* Overlay related definitions */ + +/* + * This enum lists the flag associated with each of the overlay features + */ +typedef enum { + OVERLAY_FEATURE_NVGRE = 1, + OVERLAY_FEATURE_VXLAN, + OVERLAY_FEATURE_GENEVE, + OVERLAY_FEATURE_MAX, +} overlay_feature_t; + +#define OVERLAY_OFFLOAD_ENABLE 0 +#define OVERLAY_OFFLOAD_DISABLE 1 +#define OVERLAY_OFFLOAD_ENABLE_V2 2 + +#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0 +#define OVERLAY_CFG_GENEVE_PORT_UPDATE 1 + +/* + * Use this enum to get the supported versions for each of these features + * If you need to use the devcmd_get_supported_feature_version(), add + * the new feature into this enum and install function handler in devcmd.c + */ +typedef enum { + VIC_FEATURE_VXLAN, + VIC_FEATURE_RDMA, + VIC_FEATURE_GENEVE, + VIC_FEATURE_MAX, +} vic_feature_t; + +/* + * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER + * to indicate the host driver about the VxLAN and Multi WQ features + * supported + */ +#define FEATURE_VXLAN_IPV6_INNER (1 << 0) +#define FEATURE_VXLAN_IPV6_OUTER (1 << 1) +#define FEATURE_VXLAN_MULTI_WQ (1 << 2) + +#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \ + FEATURE_VXLAN_IPV6_OUTER) +/* Support Geneve option bytes */ +#define FEATURE_GENEVE_OPTIONS (1 << 0) + +/* + * CMD_CONFIG_GRPINTR subcommands + */ +typedef enum { + GRPINTR_ENABLE = 1, + GRPINTR_DISABLE, + GRPINTR_UPD_VECT, +} grpintr_subcmd_t; + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h new file mode 100644 index 000000000..7687951c9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_enet.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_ENIC_H_ +#define _VNIC_ENIC_H_ + +/* Hardware intr coalesce timer is in units of 1.5us */ +#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3) +#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2) + +/* Device-specific region: enet configuration */ +struct vnic_enet_config { + uint32_t flags; + uint32_t wq_desc_count; + uint32_t rq_desc_count; + uint16_t mtu; + uint16_t intr_timer_deprecated; + uint8_t intr_timer_type; + uint8_t intr_mode; + char devname[16]; + uint32_t intr_timer_usec; + uint16_t loop_tag; + uint16_t vf_rq_count; + uint16_t num_arfs; + uint64_t mem_paddr; + uint16_t rdma_qp_id; + uint16_t rdma_qp_count; + uint16_t rdma_resgrp; + uint32_t rdma_mr_id; + uint32_t rdma_mr_count; + uint32_t max_pkt_size; +}; + +#define VENETF_TSO 0x1 /* TSO enabled */ +#define VENETF_LRO 0x2 /* LRO enabled */ +#define VENETF_RXCSUM 0x4 /* RX csum enabled */ +#define VENETF_TXCSUM 0x8 /* TX csum enabled */ +#define VENETF_RSS 0x10 /* RSS enabled */ +#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */ +#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */ +#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */ +#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ +#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ +#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ +#define VENETF_LOOP 0x800 /* Loopback enabled */ +#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */ +#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */ +#define VENETF_VMQ 0x4000 /* VMQ enabled */ +#define VENETF_ARFS 0x8000 /* ARFS enabled */ +#define VENETF_VXLAN 0x10000 /* VxLAN offload */ +#define VENETF_NVGRE 0x20000 /* NVGRE offload */ +#define VENETF_GRPINTR 0x40000 /* group interrupt */ +#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */ +#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */ +#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */ + +#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ +#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ + +#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */ +#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */ +#define VENET_INTR_MODE_INTX 2 /* Try INTx only */ + +#endif /* _VNIC_ENIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_flowman.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_flowman.h new file mode 100644 index 000000000..81e2cff1b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_flowman.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ +#ifndef _VNIC_FLOWMAN_H_ +#define _VNIC_FLOWMAN_H_ + +/* This file contains Flow Manager (FM) API of the firmware */ + +/* Flow manager sub-ops */ +enum { + FM_EXACT_TABLE_ALLOC, + FM_TCAM_TABLE_ALLOC, + FM_MATCH_TABLE_FREE, + FM_COUNTER_BRK, + FM_COUNTER_QUERY, + FM_COUNTER_CLEAR_ALL, + FM_COUNTER_DMA, + FM_ACTION_ALLOC, + FM_ACTION_FREE, + FM_EXACT_ENTRY_INSTALL, + FM_TCAM_ENTRY_INSTALL, + FM_MATCH_ENTRY_REMOVE, + FM_VNIC_FIND, + FM_API_VERSION_QUERY, + FM_API_VERSION_SELECT, + FM_INFO_QUERY +}; + +/* + * FKM (flow key metadata) flags used to match packet metadata + * (e.g. packet is tcp) + */ +#define FKM_BITS \ + FBIT(FKM_QTAG) \ + FBIT(FKM_CMD) \ + FBIT(FKM_IPV4) \ + FBIT(FKM_IPV6) \ + FBIT(FKM_ROCE) \ + FBIT(FKM_UDP) \ + FBIT(FKM_TCP) \ + FBIT(FKM_TCPORUDP) \ + FBIT(FKM_IPFRAG) \ + FBIT(FKM_NVGRE) \ + FBIT(FKM_VXLAN) \ + FBIT(FKM_GENEVE) \ + FBIT(FKM_NSH) \ + FBIT(FKM_ROCEV2) \ + FBIT(FKM_VLAN_PRES) \ + FBIT(FKM_IPOK) \ + FBIT(FKM_L4OK) \ + FBIT(FKM_ROCEOK) \ + FBIT(FKM_FCSOK) \ + FBIT(FKM_EG_SPAN) \ + FBIT(FKM_IG_SPAN) \ + FBIT(FKM_EG_HAIRPINNED) + +/* + * FKH (flow key header) flags. + * This selects which headers are valid in the struct. + * This is distinct from metadata in that metadata is requesting actual + * selection criteria. If, for example, a TCAM match with metadata "FKM_UDP" + * is feeding into an exact match table, there may be no need for the + * exact match table to also specify FKM_UDP, so FKH_UDP is used to + * specify that the UDP header fields should be used in the match. + */ +#define FKH_BITS \ + FBIT(FKH_ETHER) \ + FBIT(FKH_QTAG) \ + FBIT(FKH_L2RAW) \ + FBIT(FKH_IPV4) \ + FBIT(FKH_IPV6) \ + FBIT(FKH_L3RAW) \ + FBIT(FKH_UDP) \ + FBIT(FKH_TCP) \ + FBIT(FKH_ICMP) \ + FBIT(FKH_VXLAN) \ + FBIT(FKH_L4RAW) + +#define FBIT(X) X##_BIT, +enum { + FKM_BITS + FKM_BIT_COUNT +}; + +enum { + FKH_BITS + FKH_BIT_COUNT +}; +#undef FBIT +#define FBIT(X) X = (1 << X##_BIT), +enum { + FKM_BITS +}; +enum { + FKH_BITS +}; +#undef FBIT + +#define FM_ETH_ALEN 6 +#define FM_LAYER_SIZE 64 + +/* Header match pattern */ +struct fm_header_set { + uint32_t fk_metadata; /* FKM flags */ + uint32_t fk_header_select; /* FKH flags */ + uint16_t fk_vlan; + /* L2: Ethernet Header (valid if FKH_ETHER) */ + union { + struct { + uint8_t fk_dstmac[FM_ETH_ALEN]; + uint8_t fk_srcmac[FM_ETH_ALEN]; + uint16_t fk_ethtype; + } __rte_packed eth; + uint8_t rawdata[FM_LAYER_SIZE]; + } __rte_packed l2; + /* L3: IPv4 or IPv6 (valid if FKH_IPV4,6) */ + union { + /* Valid if FKH_IPV4 */ + struct { + uint8_t fk_ihl_vers; + uint8_t fk_tos; + uint16_t fk_tot_len; + uint16_t fk_id; + uint16_t fk_frag_off; + uint8_t fk_ttl; + uint8_t fk_proto; + uint16_t fk_check; + uint32_t fk_saddr; + uint32_t fk_daddr; + } __rte_packed ip4; + /* Valid if FKH_IPV6 */ + struct { + union { + struct { + uint32_t fk_un1_flow; + uint16_t fk_un1_plen; + uint8_t fk_un1_nxt; + uint8_t fk_un1_hlim; + } unl; + uint8_t fk_un2_vfc; + } ctl; + uint8_t fk_srcip[16]; + uint8_t fk_dstip[16]; + } __rte_packed ip6; + uint8_t rawdata[FM_LAYER_SIZE]; + } __rte_packed l3; + /* L4: UDP, TCP, or ICMP (valid if FKH_UDP,TCP,ICMP) */ + union { + struct { + uint16_t fk_source; + uint16_t fk_dest; + uint16_t fk_len; + uint16_t fk_check; + } __rte_packed udp; + struct { + uint16_t fk_source; + uint16_t fk_dest; + uint32_t fk_seq; + uint32_t fk_ack_seq; + uint16_t fk_flags; + uint16_t fk_window; + uint16_t fk_check; + uint16_t fk_urg_ptr; + } __rte_packed tcp; + struct { + uint8_t fk_code; + uint8_t fk_type; + } __rte_packed icmp; + uint8_t rawdata[FM_LAYER_SIZE]; + } __rte_packed l4; + /* VXLAN (valid if FKH_VXLAN) */ + struct { + uint8_t fkvx_flags; + uint8_t fkvx_res0[3]; + uint8_t fkvx_vni[3]; + uint8_t fkvx_res1; + } __rte_packed vxlan; + /* Payload or unknown inner-most protocol */ + uint8_t fk_l5_data[64]; +} __rte_packed; + +/* + * FK (flow key) template. + * fk_hdrset specifies a set of headers per layer of encapsulation. + * Currently FM supports two header sets: outer (0) and inner(1) + */ +#define FM_HDRSET_MAX 2 + +struct fm_key_template { + struct fm_header_set fk_hdrset[FM_HDRSET_MAX]; + uint32_t fk_flags; + uint16_t fk_packet_tag; + uint16_t fk_packet_size; + uint16_t fk_port_id; + uint32_t fk_wq_id; /* WQ index */ + uint64_t fk_wq_vnic; /* VNIC handle for WQ index */ +} __rte_packed; + +/* Action operation types */ +enum { + FMOP_NOP = 0, + /* End the action chain. */ + FMOP_END, + /* Drop packet and end the action chain. */ + FMOP_DROP, + /* Steer packet to an RQ. */ + FMOP_RQ_STEER, + /* + * Jump to an exact match table. + * arg1: exact match table handle + */ + FMOP_EXACT_MATCH, + /* Apply CQ-visible mark on packet. Mark is written to RSS HASH. */ + FMOP_MARK, + /* + * Apply CQ-visible mark on packet. Mark is written to a field in + * extended CQ. RSS HASH is preserved. + */ + FMOP_EXT_MARK, + /* + * Apply internal tag which can be matched in subsequent + * stages or hairpin. + */ + FMOP_TAG, + /* Hairpin packet from EG -> IG */ + FMOP_EG_HAIRPIN, + /* Hairpin packet from IG -> EG */ + FMOP_IG_HAIRPIN, + /* Encap with VXLAN and inner VLAN from metadata. */ + FMOP_ENCAP_IVLAN, + /* Encap, no inner VLAN. */ + FMOP_ENCAP_NOIVLAN, + /* Encap, add inner VLAN if present. */ + FMOP_ENCAP, + /* Set outer VLAN. */ + FMOP_SET_OVLAN, + /* Decap when vlan_strip is off */ + FMOP_DECAP_NOSTRIP, + /* Decap and strip VLAN */ + FMOP_DECAP_STRIP, + /* Remove outer VLAN */ + FMOP_POP_VLAN, + /* Set Egress port */ + FMOP_SET_EGPORT, + /* Steer to an RQ without entering EMIT state */ + FMOP_RQ_STEER_ONLY, + /* Set VLAN when replicating encapped packets */ + FMOP_SET_ENCAP_VLAN, + /* Enter EMIT state */ + FMOP_EMIT, + /* Enter MODIFY state */ + FMOP_MODIFY, + FMOP_OP_MAX, +}; + +/* + * Action operation. + * Complex actions are achieved by a series of "transform operations" + * We can have complex transform operations like "decap" or "vxlan + * encap" and also simple ops like insert this data, add PACKET_LEN to + * this address, etc. + */ +struct fm_action_op { + uint32_t fa_op; /* FMOP flags */ + + union { + struct { + uint8_t len1_offset; + uint8_t len1_delta; + uint8_t len2_offset; + uint8_t len2_delta; + uint16_t outer_vlan; + uint8_t template_offset; + uint8_t template_len; + } __rte_packed encap; + struct { + uint16_t rq_index; + uint16_t rq_count; + uint64_t vnic_handle; + } __rte_packed rq_steer; + struct { + uint16_t vlan; + } __rte_packed ovlan; + struct { + uint16_t vlan; + } __rte_packed set_encap_vlan; + struct { + uint16_t mark; + } __rte_packed mark; + struct { + uint32_t ext_mark; + } __rte_packed ext_mark; + struct { + uint8_t tag; + } __rte_packed tag; + struct { + uint64_t handle; + } __rte_packed exact; + struct { + uint32_t egport; + } __rte_packed set_egport; + } __rte_packed; +} __rte_packed; + +#define FM_ACTION_OP_MAX 64 +#define FM_ACTION_DATA_MAX 96 + +/* + * Action is a series of action operations applied to matched + * packet. FMA (flowman action). + */ +struct fm_action { + struct fm_action_op fma_action_ops[FM_ACTION_OP_MAX]; + uint8_t fma_data[FM_ACTION_DATA_MAX]; +} __rte_packed; + +/* Match entry flags. FMEF (flow match entry flag) */ +#define FMEF_COUNTER 0x0001 /* counter index is valid */ + +/* FEM (flow exact match) entry */ +struct fm_exact_match_entry { + struct fm_key_template fem_data; /* Match data. Mask is per table */ + uint32_t fem_flags; /* FMEF_xxx */ + uint64_t fem_action; /* Action handle */ + uint32_t fem_counter; /* Counter index */ +} __rte_packed; + +/* FTM (flow TCAM match) entry */ +struct fm_tcam_match_entry { + struct fm_key_template ftm_mask; /* Key mask */ + struct fm_key_template ftm_data; /* Match data */ + uint32_t ftm_flags; /* FMEF_xxx */ + uint32_t ftm_position; /* Entry position */ + uint64_t ftm_action; /* Action handle */ + uint32_t ftm_counter; /* Counter index */ +} __rte_packed; + +/* Match directions */ +enum { + FM_INGRESS, + FM_EGRESS, + FM_DIR_CNT +}; + +/* Last stage ID, independent of the number of stages in hardware */ +#define FM_STAGE_LAST 0xff + +/* Hash based exact match table. FET (flow exact match table) */ +struct fm_exact_match_table { + uint8_t fet_direction; /* FM_INGRESS or EGRESS*/ + uint8_t fet_stage; + uint8_t pad[2]; + uint32_t fet_max_entries; + uint64_t fet_dflt_action; + struct fm_key_template fet_key; +} __rte_packed; + +/* TCAM based match table. FTT (flow TCAM match table) */ +struct fm_tcam_match_table { + uint8_t ftt_direction; + uint8_t ftt_stage; + uint8_t pad[2]; + uint32_t ftt_max_entries; +} __rte_packed; + +struct fm_counter_counts { + uint64_t fcc_packets; + uint64_t fcc_bytes; +} __rte_packed; + +/* + * Return structure for FM_INFO_QUERY devcmd + */ +#define FM_VERSION 1 /* This header file is for version 1 */ + +struct fm_info { + uint64_t fm_op_mask; /* Bitmask of action supported ops */ + uint64_t fm_current_ts; /* Current VIC timestamp */ + uint64_t fm_clock_freq; /* Timestamp clock frequency */ + uint16_t fm_max_ops; /* Max ops in an action */ + uint8_t fm_stages; /* Number of match-action stages */ + uint8_t pad[5]; + uint32_t fm_counter_count; /* Number of allocated counters */ +} __rte_packed; + +#endif /* _VNIC_FLOWMAN_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c new file mode 100644 index 000000000..e3ef70954 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.c @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include "vnic_dev.h" +#include "vnic_intr.h" + +void vnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + return -EINVAL; + } + + return 0; +} + +void vnic_intr_init(struct vnic_intr *intr, uint32_t coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + vnic_intr_coalescing_timer_set(intr, coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + uint32_t coalescing_timer) +{ + iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev, + coalescing_timer), &intr->ctrl->coalescing_timer); +} + +void vnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h new file mode 100644 index 000000000..6282ae520 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_intr.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + + +#include "vnic_dev.h" + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + uint32_t coalescing_timer; /* 0x00 */ + uint32_t pad0; + uint32_t coalescing_value; /* 0x08 */ + uint32_t pad1; + uint32_t coalescing_type; /* 0x10 */ + uint32_t pad2; + uint32_t mask_on_assertion; /* 0x18 */ + uint32_t pad3; + uint32_t mask; /* 0x20 */ + uint32_t pad4; + uint32_t int_credits; /* 0x28 */ + uint32_t pad5; + uint32_t int_credit_return; /* 0x30 */ + uint32_t pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void vnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void vnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline int vnic_intr_masked(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->mask); +} + +static inline void vnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, int unmask, int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + uint32_t int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = vnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + vnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +static inline uint32_t vnic_intr_legacy_pba(uint32_t __iomem *legacy_pba) +{ + /* read PBA without clearing */ + return ioread32(legacy_pba); +} + +void vnic_intr_free(struct vnic_intr *intr); +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index); +void vnic_intr_init(struct vnic_intr *intr, uint32_t coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion); +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + uint32_t coalescing_timer); +void vnic_intr_clean(struct vnic_intr *intr); + +#endif /* _VNIC_INTR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h new file mode 100644 index 000000000..6e4a83d24 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_nic.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_NIC_H_ +#define _VNIC_NIC_H_ + +#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 +#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) +#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 +#define NIC_CFG_RSS_HASH_BITS (7UL << 16) +#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL +#define NIC_CFG_RSS_HASH_BITS_SHIFT 16 +#define NIC_CFG_RSS_BASE_CPU (7UL << 19) +#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL +#define NIC_CFG_RSS_BASE_CPU_SHIFT 19 +#define NIC_CFG_RSS_ENABLE (1UL << 22) +#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL +#define NIC_CFG_RSS_ENABLE_SHIFT 22 +#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) +#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL +#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 +#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) +#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL +#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 + +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0) +#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) +#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) +#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5) +#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6) +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7) + +static inline void vnic_set_nic_cfg(uint32_t *nic_cfg, + uint8_t rss_default_cpu, uint8_t rss_hash_type, + uint8_t rss_hash_bits, uint8_t rss_base_cpu, + uint8_t rss_enable, uint8_t tso_ipid_split_en, + uint8_t ig_vlan_strip_en) +{ + *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | + ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) + << NIC_CFG_RSS_HASH_TYPE_SHIFT) | + ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) + << NIC_CFG_RSS_HASH_BITS_SHIFT) | + ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) + << NIC_CFG_RSS_BASE_CPU_SHIFT) | + ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) + << NIC_CFG_RSS_ENABLE_SHIFT) | + ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) + << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | + ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) + << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); +} + +#endif /* _VNIC_NIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h new file mode 100644 index 000000000..870a4de6e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_resource.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L +#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */ +#define MGMTVNIC_VERSION 0x00000000L + +/* The MAC address assigned to the CFG vNIC is fixed. */ +#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d } + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_MEM, /* Window to dev memory */ + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSS_KEY, /* Enet RSS secret key */ + RES_TYPE_RSS_CPU, /* Enet RSS indirection table */ + RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */ + RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */ + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_DEBUG, /* Debug-only info */ + RES_TYPE_DEV, /* Device-specific region */ + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + uint32_t magic; + uint32_t version; +}; + +struct mgmt_barmap_hdr { + uint32_t magic; /* magic number */ + uint32_t version; /* header format version */ + uint16_t lif; /* loopback lif for mgmt frames */ + uint16_t pci_slot; /* installed pci slot */ + char serial[16]; /* card serial number */ +}; + +struct vnic_resource { + uint8_t type; + uint8_t bar; + uint8_t pad[2]; + uint32_t bar_offset; + uint32_t count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c new file mode 100644 index 000000000..1af96a941 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.c @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include "vnic_dev.h" +#include "vnic_rq.h" + +void vnic_rq_free(struct vnic_rq *rq) +{ + struct vnic_dev *vdev; + + vdev = rq->vdev; + + vnic_dev_free_desc_ring(vdev, &rq->ring); + + rq->ctrl = NULL; +} + +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int rc; + char res_name[RTE_MEMZONE_NAMESIZE]; + static int instance; + + rq->index = index; + rq->vdev = vdev; + + rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); + if (!rq->ctrl) { + pr_err("Failed to hook RQ[%u] resource\n", index); + return -EINVAL; + } + + vnic_rq_disable(rq); + + snprintf(res_name, sizeof(res_name), "%d-rq-%u", instance++, index); + rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size, + rq->socket_id, res_name); + return rc; +} + +void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + uint64_t paddr; + unsigned int count = rq->ring.desc_count; + + paddr = (uint64_t)rq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &rq->ctrl->ring_base); + iowrite32(count, &rq->ctrl->ring_size); + iowrite32(cq_index, &rq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); + iowrite32(0, &rq->ctrl->error_status); + iowrite32(fetch_index, &rq->ctrl->fetch_index); + iowrite32(posted_index, &rq->ctrl->posted_index); + if (rq->data_queue_enable) + iowrite32(((1 << 10) | rq->data_queue_idx), + &rq->ctrl->data_ring); + else + iowrite32(0, &rq->ctrl->data_ring); +} + +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + uint32_t fetch_index = 0; + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + + vnic_rq_init_start(rq, cq_index, + fetch_index, fetch_index, + error_interrupt_enable, + error_interrupt_offset); + rq->rxst_idx = 0; + rq->tot_pkts = 0; + rq->pkt_first_seg = NULL; + rq->pkt_last_seg = NULL; +} + +unsigned int vnic_rq_error_status(struct vnic_rq *rq) +{ + return ioread32(&rq->ctrl->error_status); +} + +void vnic_rq_enable(struct vnic_rq *rq) +{ + iowrite32(1, &rq->ctrl->enable); +} + +int vnic_rq_disable(struct vnic_rq *rq) +{ + unsigned int wait; + + iowrite32(0, &rq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&rq->ctrl->running))) + return 0; + usleep(10); + } + + pr_err("Failed to disable RQ[%d]\n", rq->index); + + return -ETIMEDOUT; +} + +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct rte_mbuf **buf)) +{ + struct rte_mbuf **buf; + uint32_t fetch_index, i; + unsigned int count = rq->ring.desc_count; + + buf = &rq->mbuf_ring[0]; + + for (i = 0; i < count; i++) { + (*buf_clean)(buf); + buf++; + } + rq->ring.desc_avail = count - 1; + rq->rx_nb_hold = 0; + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + + iowrite32(fetch_index, &rq->ctrl->posted_index); + + vnic_dev_clear_desc_ring(&rq->ring); +} diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h new file mode 100644 index 000000000..cfe65015d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rq.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_RQ_H_ +#define _VNIC_RQ_H_ + +#include + +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Receive queue control */ +struct vnic_rq_ctrl { + uint64_t ring_base; /* 0x00 */ + uint32_t ring_size; /* 0x08 */ + uint32_t pad0; + uint32_t posted_index; /* 0x10 */ + uint32_t pad1; + uint32_t cq_index; /* 0x18 */ + uint32_t pad2; + uint32_t enable; /* 0x20 */ + uint32_t pad3; + uint32_t running; /* 0x28 */ + uint32_t pad4; + uint32_t fetch_index; /* 0x30 */ + uint32_t pad5; + uint32_t error_interrupt_enable; /* 0x38 */ + uint32_t pad6; + uint32_t error_interrupt_offset; /* 0x40 */ + uint32_t pad7; + uint32_t error_status; /* 0x48 */ + uint32_t pad8; + uint32_t tcp_sn; /* 0x50 */ + uint32_t pad9; + uint32_t unused; /* 0x58 */ + uint32_t pad10; + uint32_t dca_select; /* 0x60 */ + uint32_t pad11; + uint32_t dca_value; /* 0x68 */ + uint32_t pad12; + uint32_t data_ring; /* 0x70 */ + uint32_t pad13; + uint32_t header_split; /* 0x78 */ + uint32_t pad14; +}; + +struct vnic_rq { + unsigned int index; + unsigned int posted_index; + struct vnic_dev *vdev; + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct rte_mbuf **free_mbufs; /* reserve of free mbufs */ + int num_free_mbufs; + struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ + unsigned int mbuf_next_idx; /* next mb to consume */ + void *os_buf_head; + unsigned int pkts_outstanding; + uint16_t rx_nb_hold; + uint16_t rx_free_thresh; + unsigned int socket_id; + struct rte_mempool *mp; + uint16_t rxst_idx; + uint32_t tot_pkts; + uint16_t data_queue_idx; + uint8_t data_queue_enable; + uint8_t is_sop; + uint8_t in_use; + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + unsigned int max_mbufs_per_pkt; + uint16_t tot_nb_desc; + bool need_initial_post; +}; + +static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) +{ + /* how many does SW own? */ + return rq->ring.desc_avail; +} + +static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) +{ + /* how many does HW own? */ + return rq->ring.desc_count - rq->ring.desc_avail - 1; +} + + + +enum desc_return_options { + VNIC_RQ_RETURN_DESC, + VNIC_RQ_DEFER_RETURN_DESC, +}; + +static inline int vnic_rq_fill(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq)) +{ + int err; + + while (vnic_rq_desc_avail(rq) > 0) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +static inline int vnic_rq_fill_count(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq), unsigned int count) +{ + int err; + + while ((vnic_rq_desc_avail(rq) > 0) && (count--)) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq); +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error); +unsigned int vnic_rq_error_status(struct vnic_rq *rq); +void vnic_rq_enable(struct vnic_rq *rq); +int vnic_rq_disable(struct vnic_rq *rq); +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct rte_mbuf **buf)); +#endif /* _VNIC_RQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h new file mode 100644 index 000000000..2b3c571f8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_rss.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_RSS_H_ +#define _VNIC_RSS_H_ + +/* RSS key array */ +union vnic_rss_key { + struct { + uint8_t b[10]; + uint8_t b_pad[6]; + } key[4]; + uint64_t raw[8]; +}; + +/* RSS cpu array */ +union vnic_rss_cpu { + struct { + uint8_t b[4]; + uint8_t b_pad[4]; + } cpu[32]; + uint64_t raw[32]; +}; + +#endif /* _VNIC_RSS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h new file mode 100644 index 000000000..a2fb40f07 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_stats.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + uint64_t tx_frames_ok; + uint64_t tx_unicast_frames_ok; + uint64_t tx_multicast_frames_ok; + uint64_t tx_broadcast_frames_ok; + uint64_t tx_bytes_ok; + uint64_t tx_unicast_bytes_ok; + uint64_t tx_multicast_bytes_ok; + uint64_t tx_broadcast_bytes_ok; + uint64_t tx_drops; + uint64_t tx_errors; + uint64_t tx_tso; + uint64_t rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + uint64_t rx_frames_ok; + uint64_t rx_frames_total; + uint64_t rx_unicast_frames_ok; + uint64_t rx_multicast_frames_ok; + uint64_t rx_broadcast_frames_ok; + uint64_t rx_bytes_ok; + uint64_t rx_unicast_bytes_ok; + uint64_t rx_multicast_bytes_ok; + uint64_t rx_broadcast_bytes_ok; + uint64_t rx_drop; + uint64_t rx_no_bufs; + uint64_t rx_errors; + uint64_t rx_rss; + uint64_t rx_crc_errors; + uint64_t rx_frames_64; + uint64_t rx_frames_127; + uint64_t rx_frames_255; + uint64_t rx_frames_511; + uint64_t rx_frames_1023; + uint64_t rx_frames_1518; + uint64_t rx_frames_to_max; + uint64_t rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c new file mode 100644 index 000000000..fc6dde5ae --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.c @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include "vnic_dev.h" +#include "vnic_wq.h" + +static inline +int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = vnic_dev_get_res(vdev, res_type, index); + if (!wq->ctrl) + return -EINVAL; + return 0; +} + +static inline +int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + char res_name[RTE_MEMZONE_NAMESIZE]; + static int instance; + + snprintf(res_name, sizeof(res_name), "%d-wq-%u", instance++, wq->index); + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size, + wq->socket_id, res_name); +} + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + unsigned int count = wq->ring.desc_count; + /* Allocate the mbuf ring */ + wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs", + sizeof(struct rte_mbuf *) * count, + RTE_CACHE_LINE_SIZE, wq->socket_id); + wq->head_idx = 0; + wq->tail_idx = 0; + if (wq->bufs == NULL) + return -ENOMEM; + return 0; +} + +void vnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + + vdev = wq->vdev; + + vnic_dev_free_desc_ring(vdev, &wq->ring); + + rte_free(wq->bufs); + wq->ctrl = NULL; +} + + +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); + if (err) { + pr_err("Failed to hook WQ[%d] resource, err %d\n", index, err); + return err; + } + + vnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + vnic_wq_free(wq); + return err; + } + + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + uint64_t paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (uint64_t)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->head_idx = fetch_index; + wq->tail_idx = wq->head_idx; +} + +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + vnic_wq_init_start(wq, cq_index, 0, 0, + error_interrupt_enable, + error_interrupt_offset); + wq->cq_pend = 0; + wq->last_completed_index = 0; +} + +unsigned int vnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void vnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + usleep(10); + } + + pr_err("Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct rte_mbuf **buf)) +{ + struct rte_mbuf **buf; + unsigned int to_clean = wq->tail_idx; + + buf = &wq->bufs[to_clean]; + + while (vnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(buf); + to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); + + buf = &wq->bufs[to_clean]; + wq->ring.desc_avail++; + } + + wq->head_idx = 0; + wq->tail_idx = 0; + wq->last_completed_index = 0; + *((uint32_t *)wq->cqmsg_rz->addr) = 0; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h new file mode 100644 index 000000000..789a50a64 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/vnic_wq.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + + +#include "vnic_dev.h" +#include "vnic_cq.h" +#include + +/* Work queue control */ +struct vnic_wq_ctrl { + uint64_t ring_base; /* 0x00 */ + uint32_t ring_size; /* 0x08 */ + uint32_t pad0; + uint32_t posted_index; /* 0x10 */ + uint32_t pad1; + uint32_t cq_index; /* 0x18 */ + uint32_t pad2; + uint32_t enable; /* 0x20 */ + uint32_t pad3; + uint32_t running; /* 0x28 */ + uint32_t pad4; + uint32_t fetch_index; /* 0x30 */ + uint32_t pad5; + uint32_t dca_value; /* 0x38 */ + uint32_t pad6; + uint32_t error_interrupt_enable; /* 0x40 */ + uint32_t pad7; + uint32_t error_interrupt_offset; /* 0x48 */ + uint32_t pad8; + uint32_t error_status; /* 0x50 */ + uint32_t pad9; +}; + +struct vnic_wq { + unsigned int index; + uint64_t tx_offload_notsup_mask; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct rte_mbuf **bufs; + unsigned int head_idx; + unsigned int cq_pend; + unsigned int tail_idx; + unsigned int socket_id; + const struct rte_memzone *cqmsg_rz; + uint16_t last_completed_index; + uint64_t offloads; +}; + +static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +#define PI_LOG2_CACHE_LINE_SIZE 5 +#define PI_INDEX_BITS 12 +#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) +#define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1) +#define PI_PREFETCH_LEN_OFF 16 +#define PI_PREFETCH_ADDR_BITS 43 +#define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1) +#define PI_PREFETCH_ADDR_OFF 21 + +/** How many cache lines are touched by buffer (addr, len). */ +static inline unsigned int num_cache_lines_touched(dma_addr_t addr, + unsigned int len) +{ + const unsigned long mask = PI_PREFETCH_LEN_MASK; + const unsigned long laddr = (unsigned long)addr; + unsigned long lines, equiv_len; + /* A. If addr is aligned, our solution is just to round up len to the + next boundary. + + e.g. addr = 0, len = 48 + +--------------------+ + |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a + +--------------------+ + |XXXXXXXXXX | cacheline b + +--------------------+ + + B. If addr is not aligned, however, we may use an extra + cacheline. e.g. addr = 12, len = 22 + + +--------------------+ + | XXXXXXXXXXXXX| + +--------------------+ + |XX | + +--------------------+ + + Our solution is to make the problem equivalent to case A + above by adding the empty space in the first cacheline to the length: + unsigned long len; + + +--------------------+ + |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len + +--------------------+ + |XX | + +--------------------+ + + */ + equiv_len = len + (laddr & mask); + + /* Now we can just round up this len to the next 32-byte boundary. */ + lines = (equiv_len + mask) & (~mask); + + /* Scale bytes -> cachelines. */ + return lines >> PI_LOG2_CACHE_LINE_SIZE; +} + +static inline uint64_t vnic_cached_posted_index(dma_addr_t addr, + unsigned int len, + unsigned int index) +{ + unsigned int num_cache_lines = num_cache_lines_touched(addr, len); + /* Wish we could avoid a branch here. We could have separate + * vnic_wq_post() and vinc_wq_post_inline(), the latter + * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would + * eliminate the if (eop) branch as well. + */ + if (num_cache_lines > PI_PREFETCH_LEN_MASK) + num_cache_lines = 0; + return (index & PI_INDEX_MASK) | + ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) | + (((addr >> PI_LOG2_CACHE_LINE_SIZE) & + PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); +} + +static inline uint32_t +buf_idx_incr(uint32_t n_descriptors, uint32_t idx) +{ + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; +} + +void vnic_wq_free(struct vnic_wq *wq); +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); +unsigned int vnic_wq_error_status(struct vnic_wq *wq); +void vnic_wq_enable(struct vnic_wq *wq); +int vnic_wq_disable(struct vnic_wq *wq); +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct rte_mbuf **buf)); +#endif /* _VNIC_WQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h b/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h new file mode 100644 index 000000000..e1ad18798 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/base/wq_enet_desc.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +#include + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + uint64_t address; + uint16_t length; + uint16_t mss_loopback; + uint16_t header_length_flags; + uint16_t vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + uint64_t address, uint16_t length, uint16_t mss, uint16_t header_length, + uint8_t offload_mode, uint8_t eop, uint8_t cq_entry, uint8_t fcoe_encap, + uint8_t vlan_tag_insert, uint16_t vlan_tag, uint8_t loopback) +{ + desc->address = rte_cpu_to_le_64(address); + desc->length = rte_cpu_to_le_16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = rte_cpu_to_le_16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = rte_cpu_to_le_16 + ((header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = rte_cpu_to_le_16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + uint64_t *address, uint16_t *length, uint16_t *mss, + uint16_t *header_length, uint8_t *offload_mode, uint8_t *eop, + uint8_t *cq_entry, uint8_t *fcoe_encap, uint8_t *vlan_tag_insert, + uint16_t *vlan_tag, uint8_t *loopback) +{ + *address = rte_le_to_cpu_64(desc->address); + *length = rte_le_to_cpu_16(desc->length) & WQ_ENET_LEN_MASK; + *mss = (rte_le_to_cpu_16(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (uint8_t)((rte_le_to_cpu_16(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = rte_le_to_cpu_16(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = + (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = + (uint8_t)((rte_le_to_cpu_16(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = rte_le_to_cpu_16(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/enic.h b/src/spdk/dpdk/drivers/net/enic/enic.h new file mode 100644 index 000000000..a95e51eea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _ENIC_H_ +#define _ENIC_H_ + +#include +#include +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_flowman.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "cq_enet_desc.h" +#include +#include +#include + +#define DRV_NAME "enic_pmd" +#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" +#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" + +#define VLAN_ETH_HLEN 18 + +#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ +#define ENIC_CALC_IP_CKSUM 1 +#define ENIC_CALC_TCP_UDP_CKSUM 2 +#define ENIC_MAX_MTU 9000 +#define ENIC_PAGE_SIZE 4096 +#define PAGE_ROUND_UP(x) \ + ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1))) + +#define ENICPMD_VFIO_PATH "/dev/vfio/vfio" +/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/ + +#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ +/* enet SRIOV Standalone vNic VF */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_SN 0x02B7 + +/* Special Filter id for non-specific packet flagging. Don't change value */ +#define ENIC_MAGIC_FILTER_ID 0xffff + +#define ENICPMD_FDIR_MAX 64 + +/* + * Interrupt 0: LSC and errors + * Interrupt 1: rx queue 0 + * Interrupt 2: rx queue 1 + * ... + */ +#define ENICPMD_LSC_INTR_OFFSET 0 +#define ENICPMD_RXQ_INTR_OFFSET 1 + +struct enic_fdir_node { + struct rte_eth_fdir_filter filter; + uint16_t fltr_id; + uint16_t rq_index; +}; + +struct enic_fdir { + struct rte_eth_fdir_stats stats; + struct rte_hash *hash; + struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; + uint32_t modes; + uint32_t types_mask; + void (*copy_fltr_fn)(struct filter_v2 *filt, + const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks); +}; + +struct enic_soft_stats { + rte_atomic64_t rx_nombuf; + rte_atomic64_t rx_packet_errors; + rte_atomic64_t tx_oversized; +}; + +struct enic_memzone_entry { + const struct rte_memzone *rz; + LIST_ENTRY(enic_memzone_entry) entries; +}; + +/* Defined in enic_fm_flow.c */ +struct enic_flowman; +struct enic_fm_flow; + +struct rte_flow { + LIST_ENTRY(rte_flow) next; + /* Data for filter API based flow (enic_flow.c) */ + uint16_t enic_filter_id; + struct filter_v2 enic_filter; + /* Data for flow manager based flow (enic_fm_flow.c) */ + struct enic_fm_flow *fm; +}; + +/* Per-instance private data structure */ +struct enic { + struct enic *next; + struct rte_pci_device *pdev; + struct vnic_enet_config config; + struct vnic_dev_bar bar0; + struct vnic_dev *vdev; + + /* + * mbuf_initializer contains 64 bits of mbuf rearm_data, used by + * the avx2 handler at this time. + */ + uint64_t mbuf_initializer; + unsigned int port_id; + bool overlay_offload; + struct rte_eth_dev *rte_dev; + struct rte_eth_dev_data *dev_data; + struct enic_fdir fdir; + char bdf_name[ENICPMD_BDF_LENGTH]; + int dev_fd; + int iommu_group_fd; + int iommu_groupid; + int eventfd; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + pthread_t err_intr_thread; + int promisc; + int allmulti; + uint8_t ig_vlan_strip_en; + int link_status; + uint8_t hw_ip_checksum; + uint16_t max_mtu; + uint8_t adv_filters; + uint32_t flow_filter_mode; + uint8_t filter_actions; /* HW supported actions */ + bool vxlan; + bool disable_overlay; /* devargs disable_overlay=1 */ + uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */ + uint8_t geneve_opt_avail; /* Geneve with options offload available */ + uint8_t geneve_opt_enabled; /* Geneve with options offload enabled */ + uint8_t geneve_opt_request; /* devargs geneve-opt=1 */ + bool nic_cfg_chk; /* NIC_CFG_CHK available */ + bool udp_rss_weak; /* Bodega style UDP RSS */ + uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */ + uint16_t vxlan_port; /* current vxlan port pushed to NIC */ + int use_simple_tx_handler; + + unsigned int flags; + unsigned int priv_flags; + + /* work queue (len = conf_wq_count) */ + struct vnic_wq *wq; + unsigned int wq_count; /* equals eth_dev nb_tx_queues */ + + /* receive queue (len = conf_rq_count) */ + struct vnic_rq *rq; + unsigned int rq_count; /* equals eth_dev nb_rx_queues */ + + /* completion queue (len = conf_cq_count) */ + struct vnic_cq *cq; + unsigned int cq_count; /* equals rq_count + wq_count */ + + /* interrupt vectors (len = conf_intr_count) */ + struct vnic_intr *intr; + unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */ + + /* software counters */ + struct enic_soft_stats soft_stats; + + /* configured resources on vic */ + unsigned int conf_rq_count; + unsigned int conf_wq_count; + unsigned int conf_cq_count; + unsigned int conf_intr_count; + + /* linked list storing memory allocations */ + LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; + rte_spinlock_t memzone_list_lock; + rte_spinlock_t mtu_lock; + + LIST_HEAD(enic_flows, rte_flow) flows; + + /* RSS */ + uint16_t reta_size; + uint8_t hash_key_size; + uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */ + /* + * Keep a copy of current RSS config for queries, as we cannot retrieve + * it from the NIC. + */ + uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */ + uint8_t rss_enable; + uint64_t rss_hf; /* ETH_RSS flags */ + union vnic_rss_key rss_key; + union vnic_rss_cpu rss_cpu; + + uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */ + uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_offload_mask; /* PKT_TX flags accepted */ + + /* Multicast MAC addresses added to the NIC */ + uint32_t mc_count; + struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS]; + + /* Flow manager API */ + struct enic_flowman *fm; +}; + +/* Compute ethdev's max packet size from MTU */ +static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu) +{ + /* ethdev max size includes eth whereas NIC MTU does not */ + return mtu + RTE_ETHER_HDR_LEN; +} + +/* Get the CQ index from a Start of Packet(SOP) RQ index */ +static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx) +{ + return sop_idx; +} + +/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */ +static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx) +{ + return sop_idx; +} + +/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */ +static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx) +{ + return rte_idx; +} + +/* Get the Data RQ index from a RTE RQ index */ +static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx, + struct enic *enic) +{ + return enic->rq_count + rte_idx; +} + +static inline unsigned int enic_vnic_rq_count(struct enic *enic) +{ + return enic->rq_count * 2; +} + +static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) +{ + /* Scatter rx uses two receive queues together with one + * completion queue, so the completion queue number is no + * longer the same as the rq number. + */ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) +{ + return eth_dev->data->dev_private; +} + +static inline uint32_t +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + uint32_t d = i0 + i1; + d -= (d >= n_descriptors) ? n_descriptors : 0; + return d; +} + +static inline uint32_t +enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + int32_t d = i1 - i0; + return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); +} + +static inline uint32_t +enic_ring_incr(uint32_t n_descriptors, uint32_t idx) +{ + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; +} + +int dev_is_enic(struct rte_eth_dev *dev); +void enic_fdir_stats_get(struct enic *enic, + struct rte_eth_fdir_stats *stats); +int enic_fdir_add_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +int enic_fdir_del_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +void enic_free_wq(void *txq); +int enic_alloc_intr_resources(struct enic *enic); +int enic_setup_finish(struct enic *enic); +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc); +void enic_start_wq(struct enic *enic, uint16_t queue_idx); +int enic_stop_wq(struct enic *enic, uint16_t queue_idx); +void enic_start_rq(struct enic *enic, uint16_t queue_idx); +int enic_stop_rq(struct enic *enic, uint16_t queue_idx); +void enic_free_rq(void *rxq); +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc, uint16_t free_thresh); +int enic_set_vnic_res(struct enic *enic); +int enic_init_rss_nic_cfg(struct enic *enic); +int enic_set_rss_conf(struct enic *enic, + struct rte_eth_rss_conf *rss_conf); +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu); +int enic_set_vlan_strip(struct enic *enic); +int enic_enable(struct enic *enic); +int enic_disable(struct enic *enic); +void enic_remove(struct enic *enic); +int enic_get_link_status(struct enic *enic); +int enic_dev_stats_get(struct enic *enic, + struct rte_eth_stats *r_stats); +int enic_dev_stats_clear(struct enic *enic); +int enic_add_packet_filter(struct enic *enic); +int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); +int enic_del_mac_address(struct enic *enic, int mac_index); +unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); +void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + uint8_t sop, uint8_t eop, uint8_t cq_entry, + uint16_t ol_flags, uint16_t vlan_tag); + +void enic_post_wq_index(struct vnic_wq *wq); +int enic_probe(struct enic *enic); +int enic_clsf_init(struct enic *enic); +void enic_clsf_destroy(struct enic *enic); +int enic_fm_init(struct enic *enic); +void enic_fm_destroy(struct enic *enic); +void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle, + uint8_t *name); +void enic_free_consistent(void *priv, size_t size, void *vaddr, + dma_addr_t dma_handle); +uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t enic_dummy_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int enic_set_mtu(struct enic *enic, uint16_t new_mtu); +int enic_link_update(struct rte_eth_dev *eth_dev); +bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev); +void enic_pick_rx_handler(struct rte_eth_dev *eth_dev); +void enic_pick_tx_handler(struct rte_eth_dev *eth_dev); +void enic_fdir_info(struct enic *enic); +void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats); +extern const struct rte_flow_ops enic_flow_ops; +extern const struct rte_flow_ops enic_fm_flow_ops; +#endif /* _ENIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/enic_clsf.c b/src/spdk/dpdk/drivers/net/enic/enic_clsf.c new file mode 100644 index 000000000..e206123ba --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_clsf.c @@ -0,0 +1,502 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" + +#ifdef RTE_ARCH_X86 +#include +#define DEFAULT_HASH_FUNC rte_hash_crc +#else +#include +#define DEFAULT_HASH_FUNC rte_jhash +#endif + +#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX + +static void copy_fltr_v1(struct filter_v2 *fltr, + const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks); +static void copy_fltr_v2(struct filter_v2 *fltr, + const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks); + +void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats) +{ + *stats = enic->fdir.stats; +} + +void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info) +{ + info->mode = (enum rte_fdir_mode)enic->fdir.modes; + info->flow_types_mask[0] = enic->fdir.types_mask; +} + +void enic_fdir_info(struct enic *enic) +{ + enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT; + enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP | + 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP; + if (enic->adv_filters) { + enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER | + 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP | + 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP | + 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP | + 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP | + 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER; + enic->fdir.copy_fltr_fn = copy_fltr_v2; + } else { + enic->fdir.copy_fltr_fn = copy_fltr_v1; + } +} + +static void +enic_set_layer(struct filter_generic_1 *gp, unsigned int flag, + enum filter_generic_1_layer layer, void *mask, void *val, + unsigned int len) +{ + gp->mask_flags |= flag; + gp->val_flags |= gp->mask_flags; + memcpy(gp->layer[layer].mask, mask, len); + memcpy(gp->layer[layer].val, val, len); +} + +/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs + * without advanced filter support. + */ +static void +copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input, + __rte_unused const struct rte_eth_fdir_masks *masks) +{ + fltr->type = FILTER_IPV4_5TUPLE; + fltr->u.ipv4.src_addr = rte_be_to_cpu_32( + input->flow.ip4_flow.src_ip); + fltr->u.ipv4.dst_addr = rte_be_to_cpu_32( + input->flow.ip4_flow.dst_ip); + fltr->u.ipv4.src_port = rte_be_to_cpu_16( + input->flow.udp4_flow.src_port); + fltr->u.ipv4.dst_port = rte_be_to_cpu_16( + input->flow.udp4_flow.dst_port); + + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) + fltr->u.ipv4.protocol = PROTO_TCP; + else + fltr->u.ipv4.protocol = PROTO_UDP; + + fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; +} + +/* Copy Flow Director filter to a VIC generic filter (requires advanced + * filter support. + */ +static void +copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks) +{ + struct filter_generic_1 *gp = &fltr->u.generic_1; + + fltr->type = FILTER_DPDK_1; + memset(gp, 0, sizeof(*gp)); + + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) { + struct rte_udp_hdr udp_mask, udp_val; + memset(&udp_mask, 0, sizeof(udp_mask)); + memset(&udp_val, 0, sizeof(udp_val)); + + if (input->flow.udp4_flow.src_port) { + udp_mask.src_port = masks->src_port_mask; + udp_val.src_port = input->flow.udp4_flow.src_port; + } + if (input->flow.udp4_flow.dst_port) { + udp_mask.dst_port = masks->dst_port_mask; + udp_val.dst_port = input->flow.udp4_flow.dst_port; + } + + enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, + &udp_mask, &udp_val, sizeof(struct rte_udp_hdr)); + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) { + struct rte_tcp_hdr tcp_mask, tcp_val; + memset(&tcp_mask, 0, sizeof(tcp_mask)); + memset(&tcp_val, 0, sizeof(tcp_val)); + + if (input->flow.tcp4_flow.src_port) { + tcp_mask.src_port = masks->src_port_mask; + tcp_val.src_port = input->flow.tcp4_flow.src_port; + } + if (input->flow.tcp4_flow.dst_port) { + tcp_mask.dst_port = masks->dst_port_mask; + tcp_val.dst_port = input->flow.tcp4_flow.dst_port; + } + + enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) { + struct rte_sctp_hdr sctp_mask, sctp_val; + memset(&sctp_mask, 0, sizeof(sctp_mask)); + memset(&sctp_val, 0, sizeof(sctp_val)); + + if (input->flow.sctp4_flow.src_port) { + sctp_mask.src_port = masks->src_port_mask; + sctp_val.src_port = input->flow.sctp4_flow.src_port; + } + if (input->flow.sctp4_flow.dst_port) { + sctp_mask.dst_port = masks->dst_port_mask; + sctp_val.dst_port = input->flow.sctp4_flow.dst_port; + } + if (input->flow.sctp4_flow.verify_tag) { + sctp_mask.tag = 0xffffffff; + sctp_val.tag = input->flow.sctp4_flow.verify_tag; + } + + /* + * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware + * has no "packet is SCTP" flag. Use flag=0 (generic L4) and + * manually set proto_id=sctp below. + */ + enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask, + &sctp_val, sizeof(struct rte_sctp_hdr)); + } + + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) { + struct rte_ipv4_hdr ip4_mask, ip4_val; + memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr)); + memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr)); + + if (input->flow.ip4_flow.tos) { + ip4_mask.type_of_service = masks->ipv4_mask.tos; + ip4_val.type_of_service = input->flow.ip4_flow.tos; + } + if (input->flow.ip4_flow.ttl) { + ip4_mask.time_to_live = masks->ipv4_mask.ttl; + ip4_val.time_to_live = input->flow.ip4_flow.ttl; + } + if (input->flow.ip4_flow.proto) { + ip4_mask.next_proto_id = masks->ipv4_mask.proto; + ip4_val.next_proto_id = input->flow.ip4_flow.proto; + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) { + /* Explicitly match the SCTP protocol number */ + ip4_mask.next_proto_id = 0xff; + ip4_val.next_proto_id = IPPROTO_SCTP; + } + if (input->flow.ip4_flow.src_ip) { + ip4_mask.src_addr = masks->ipv4_mask.src_ip; + ip4_val.src_addr = input->flow.ip4_flow.src_ip; + } + if (input->flow.ip4_flow.dst_ip) { + ip4_mask.dst_addr = masks->ipv4_mask.dst_ip; + ip4_val.dst_addr = input->flow.ip4_flow.dst_ip; + } + + enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3, + &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr)); + } + + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) { + struct rte_udp_hdr udp_mask, udp_val; + memset(&udp_mask, 0, sizeof(udp_mask)); + memset(&udp_val, 0, sizeof(udp_val)); + + if (input->flow.udp6_flow.src_port) { + udp_mask.src_port = masks->src_port_mask; + udp_val.src_port = input->flow.udp6_flow.src_port; + } + if (input->flow.udp6_flow.dst_port) { + udp_mask.dst_port = masks->dst_port_mask; + udp_val.dst_port = input->flow.udp6_flow.dst_port; + } + enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, + &udp_mask, &udp_val, sizeof(struct rte_udp_hdr)); + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) { + struct rte_tcp_hdr tcp_mask, tcp_val; + memset(&tcp_mask, 0, sizeof(tcp_mask)); + memset(&tcp_val, 0, sizeof(tcp_val)); + + if (input->flow.tcp6_flow.src_port) { + tcp_mask.src_port = masks->src_port_mask; + tcp_val.src_port = input->flow.tcp6_flow.src_port; + } + if (input->flow.tcp6_flow.dst_port) { + tcp_mask.dst_port = masks->dst_port_mask; + tcp_val.dst_port = input->flow.tcp6_flow.dst_port; + } + enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) { + struct rte_sctp_hdr sctp_mask, sctp_val; + memset(&sctp_mask, 0, sizeof(sctp_mask)); + memset(&sctp_val, 0, sizeof(sctp_val)); + + if (input->flow.sctp6_flow.src_port) { + sctp_mask.src_port = masks->src_port_mask; + sctp_val.src_port = input->flow.sctp6_flow.src_port; + } + if (input->flow.sctp6_flow.dst_port) { + sctp_mask.dst_port = masks->dst_port_mask; + sctp_val.dst_port = input->flow.sctp6_flow.dst_port; + } + if (input->flow.sctp6_flow.verify_tag) { + sctp_mask.tag = 0xffffffff; + sctp_val.tag = input->flow.sctp6_flow.verify_tag; + } + + enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask, + &sctp_val, sizeof(struct rte_sctp_hdr)); + } + + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || + input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) { + struct rte_ipv6_hdr ipv6_mask, ipv6_val; + memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr)); + memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr)); + + if (input->flow.ipv6_flow.proto) { + ipv6_mask.proto = masks->ipv6_mask.proto; + ipv6_val.proto = input->flow.ipv6_flow.proto; + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) { + /* See comments for IPv4 SCTP above. */ + ipv6_mask.proto = 0xff; + ipv6_val.proto = IPPROTO_SCTP; + } + memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip, + sizeof(ipv6_mask.src_addr)); + memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip, + sizeof(ipv6_val.src_addr)); + memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip, + sizeof(ipv6_mask.dst_addr)); + memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip, + sizeof(ipv6_val.dst_addr)); + if (input->flow.ipv6_flow.tc) { + ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12; + ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12; + } + if (input->flow.ipv6_flow.hop_limits) { + ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits; + ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits; + } + + enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3, + &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr)); + } +} + +int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) +{ + int32_t pos; + struct enic_fdir_node *key; + /* See if the key is in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + case -ENOENT: + enic->fdir.stats.f_remove++; + return -EINVAL; + default: + /* The entry is present in the table */ + key = enic->fdir.nodes[pos]; + + /* Delete the filter */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL, NULL); + rte_free(key); + enic->fdir.nodes[pos] = NULL; + enic->fdir.stats.free++; + enic->fdir.stats.remove++; + break; + } + return 0; +} + +int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) +{ + struct enic_fdir_node *key; + struct filter_v2 fltr; + int32_t pos; + uint8_t do_free = 0; + uint16_t old_fltr_id = 0; + uint32_t flowtype_supported; + uint16_t flex_bytes; + uint16_t queue; + struct filter_action_v2 action; + + memset(&fltr, 0, sizeof(fltr)); + memset(&action, 0, sizeof(action)); + flowtype_supported = enic->fdir.types_mask + & (1 << params->input.flow_type); + + flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | + (params->input.flow_ext.flexbytes[0] & 0xFF)); + + if (!enic->fdir.hash || + (params->input.flow_ext.vlan_tci & 0xFFF) || + !flowtype_supported || flex_bytes || + params->action.behavior /* drop */) { + enic->fdir.stats.f_add++; + return -ENOTSUP; + } + + /* Get the enicpmd RQ from the DPDK Rx queue */ + queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue); + + if (!enic->rq[queue].in_use) + return -EINVAL; + + /* See if the key is already there in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + enic->fdir.stats.f_add++; + return -EINVAL; + case -ENOENT: + /* Add a new classifier entry */ + if (!enic->fdir.stats.free) { + enic->fdir.stats.f_add++; + return -ENOSPC; + } + key = rte_zmalloc("enic_fdir_node", + sizeof(struct enic_fdir_node), 0); + if (!key) { + enic->fdir.stats.f_add++; + return -ENOMEM; + } + break; + default: + /* The entry is already present in the table. + * Check if there is a change in queue + */ + key = enic->fdir.nodes[pos]; + enic->fdir.nodes[pos] = NULL; + if (unlikely(key->rq_index == queue)) { + /* Nothing to be done */ + enic->fdir.stats.f_add++; + pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + dev_err(enic, "Add hash key failed\n"); + return pos; + } + enic->fdir.nodes[pos] = key; + dev_warning(enic, + "FDIR rule is already present\n"); + return 0; + } + + if (likely(enic->fdir.stats.free)) { + /* Add the filter and then delete the old one. + * This is to avoid packets from going into the + * default queue during the window between + * delete and add + */ + do_free = 1; + old_fltr_id = key->fltr_id; + } else { + /* No free slots in the classifier. + * Delete the filter and add the modified one later + */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL, NULL); + enic->fdir.stats.free++; + } + + break; + } + + key->filter = *params; + key->rq_index = queue; + + enic->fdir.copy_fltr_fn(&fltr, ¶ms->input, + &enic->rte_dev->data->dev_conf.fdir_conf.mask); + action.type = FILTER_ACTION_RQ_STEERING; + action.rq_idx = queue; + + if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr, + &action)) { + key->fltr_id = queue; + } else { + dev_err(enic, "Add classifier entry failed\n"); + enic->fdir.stats.f_add++; + rte_free(key); + return -1; + } + + if (do_free) + vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL, + NULL); + else{ + enic->fdir.stats.free--; + enic->fdir.stats.add++; + } + + pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + enic->fdir.stats.f_add++; + dev_err(enic, "Add hash key failed\n"); + return pos; + } + + enic->fdir.nodes[pos] = key; + return 0; +} + +void enic_clsf_destroy(struct enic *enic) +{ + uint32_t index; + struct enic_fdir_node *key; + /* delete classifier entries */ + for (index = 0; index < ENICPMD_FDIR_MAX; index++) { + key = enic->fdir.nodes[index]; + if (key) { + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL, NULL); + rte_free(key); + enic->fdir.nodes[index] = NULL; + } + } + + if (enic->fdir.hash) { + rte_hash_free(enic->fdir.hash); + enic->fdir.hash = NULL; + } +} + +int enic_clsf_init(struct enic *enic) +{ + char clsf_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters hash_params = { + .name = clsf_name, + .entries = ENICPMD_CLSF_HASH_ENTRIES, + .key_len = sizeof(struct rte_eth_fdir_filter), + .hash_func = DEFAULT_HASH_FUNC, + .hash_func_init_val = 0, + .socket_id = SOCKET_ID_ANY, + }; + snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name); + enic->fdir.hash = rte_hash_create(&hash_params); + memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats)); + enic->fdir.stats.free = ENICPMD_FDIR_MAX; + return NULL == enic->fdir.hash; +} diff --git a/src/spdk/dpdk/drivers/net/enic/enic_compat.h b/src/spdk/dpdk/drivers/net/enic/enic_compat.h new file mode 100644 index 000000000..774127303 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_compat.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _ENIC_COMPAT_H_ +#define _ENIC_COMPAT_H_ + +#include +#include + +#include +#include +#include +#include + +#define ETH_ALEN 6 + +#define __iomem + +#define pr_err(y, args...) dev_err(0, y, ##args) +#define pr_warn(y, args...) dev_warning(0, y, ##args) +#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__) + +#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) + +extern int enic_pmd_logtype; + +#define dev_printk(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, enic_pmd_logtype, \ + "PMD: rte_enic_pmd: " fmt, ##args) + +#define dev_err(x, args...) dev_printk(ERR, args) +#define dev_info(x, args...) dev_printk(INFO, args) +#define dev_warning(x, args...) dev_printk(WARNING, args) +#define dev_debug(x, args...) dev_printk(DEBUG, args) + +#define ENICPMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, enic_pmd_logtype, \ + "%s " fmt "\n", __func__, ##args) +#define ENICPMD_FUNC_TRACE() ENICPMD_LOG(DEBUG, ">>") + +typedef unsigned long long dma_addr_t; + +static inline uint32_t ioread32(volatile void *addr) +{ + return rte_read32(addr); +} + +static inline uint8_t ioread8(volatile void *addr) +{ + return rte_read8(addr); +} + +static inline void iowrite32(uint32_t val, volatile void *addr) +{ + rte_write32(val, addr); +} + +static inline void iowrite32_relaxed(uint32_t val, volatile void *addr) +{ + rte_write32_relaxed(val, addr); +} + +static inline unsigned int readl(volatile void __iomem *addr) +{ + return rte_read32(addr); +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + rte_write32(val, addr); +} + +#endif /* _ENIC_COMPAT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c b/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c new file mode 100644 index 000000000..32d5397f8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c @@ -0,0 +1,1316 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "vnic_intr.h" +#include "vnic_cq.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_enet.h" +#include "enic.h" + +int enic_pmd_logtype; + +/* + * The set of PCI devices this driver supports + */ +#define CISCO_PCI_VENDOR_ID 0x1137 +static const struct rte_pci_id pci_id_enic_map[] = { + {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)}, + {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)}, + {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)}, + {.vendor_id = 0, /* sentinel */}, +}; + +/* Supported link speeds of production VIC models */ +static const struct vic_speed_capa { + uint16_t sub_devid; + uint32_t capa; +} vic_speed_capa_map[] = { + { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */ + { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */ + { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */ + { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */ + { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */ + { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */ + { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */ + { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */ + { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */ + { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */ + { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */ + { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */ + { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */ + { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */ + { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G }, /* 1440 Mezz */ + { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G }, /* 1480 MLOM */ + { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */ + { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */ + { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */ + { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */ + { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */ + { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */ + { 0, 0 }, /* End marker */ +}; + +#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" +#define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx" +#define ENIC_DEVARG_GENEVE_OPT "geneve-opt" +#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" + +RTE_INIT(enicpmd_init_log) +{ + enic_pmd_logtype = rte_log_register("pmd.net.enic"); + if (enic_pmd_logtype >= 0) + rte_log_set_level(enic_pmd_logtype, RTE_LOG_INFO); +} + +static int +enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, void *arg) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret = 0; + + ENICPMD_FUNC_TRACE(); + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + case RTE_ETH_FILTER_UPDATE: + ret = enic_fdir_add_fltr(enic, + (struct rte_eth_fdir_filter *)arg); + break; + + case RTE_ETH_FILTER_DELETE: + ret = enic_fdir_del_fltr(enic, + (struct rte_eth_fdir_filter *)arg); + break; + + case RTE_ETH_FILTER_STATS: + enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); + break; + + case RTE_ETH_FILTER_FLUSH: + dev_warning(enic, "unsupported operation %u", filter_op); + ret = -ENOTSUP; + break; + case RTE_ETH_FILTER_INFO: + enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg); + break; + default: + dev_err(enic, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + struct enic *enic = pmd_priv(dev); + int ret = 0; + + ENICPMD_FUNC_TRACE(); + + /* + * Currently, when Geneve with options offload is enabled, host + * cannot insert match-action rules. + */ + if (enic->geneve_opt_enabled) + return -ENOTSUP; + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + if (enic->flow_filter_mode == FILTER_FLOWMAN) + *(const void **)arg = &enic_fm_flow_ops; + else + *(const void **)arg = &enic_flow_ops; + break; + case RTE_ETH_FILTER_FDIR: + ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); + break; + default: + dev_warning(enic, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static void enicpmd_dev_tx_queue_release(void *txq) +{ + ENICPMD_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + enic_free_wq(txq); +} + +static int enicpmd_dev_setup_intr(struct enic *enic) +{ + int ret; + unsigned int index; + + ENICPMD_FUNC_TRACE(); + + /* Are we done with the init of all the queues? */ + for (index = 0; index < enic->cq_count; index++) { + if (!enic->cq[index].ctrl) + break; + } + if (enic->cq_count != index) + return 0; + for (index = 0; index < enic->wq_count; index++) { + if (!enic->wq[index].ctrl) + break; + } + if (enic->wq_count != index) + return 0; + /* check start of packet (SOP) RQs only in case scatter is disabled. */ + for (index = 0; index < enic->rq_count; index++) { + if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl) + break; + } + if (enic->rq_count != index) + return 0; + + ret = enic_alloc_intr_resources(enic); + if (ret) { + dev_err(enic, "alloc intr failed\n"); + return ret; + } + enic_init_vnic_resources(enic); + + ret = enic_setup_finish(enic); + if (ret) + dev_err(enic, "setup could not be finished\n"); + + return ret; +} + +static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + struct vnic_wq *wq; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + RTE_ASSERT(queue_idx < enic->conf_wq_count); + wq = &enic->wq[queue_idx]; + wq->offloads = tx_conf->offloads | + eth_dev->data->dev_conf.txmode.offloads; + eth_dev->data->tx_queues[queue_idx] = (void *)wq; + + ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); + if (ret) { + dev_err(enic, "error in allocating wq\n"); + return ret; + } + + return enicpmd_dev_setup_intr(enic); +} + +static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + enic_start_wq(enic, queue_idx); + + return 0; +} + +static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + ret = enic_stop_wq(enic, queue_idx); + if (ret) + dev_err(enic, "error in stopping wq %d\n", queue_idx); + + return ret; +} + +static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + enic_start_rq(enic, queue_idx); + + return 0; +} + +static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + ret = enic_stop_rq(enic, queue_idx); + if (ret) + dev_err(enic, "error in stopping rq %d\n", queue_idx); + + return ret; +} + +static void enicpmd_dev_rx_queue_release(void *rxq) +{ + ENICPMD_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + enic_free_rq(rxq); +} + +static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(dev); + uint32_t queue_count = 0; + struct vnic_cq *cq; + uint32_t cq_tail; + uint16_t cq_idx; + int rq_num; + + rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id); + cq = &enic->cq[enic_cq_rq(enic, rq_num)]; + cq_idx = cq->to_clean; + + cq_tail = ioread32(&cq->ctrl->cq_tail); + + if (cq_tail < cq_idx) + cq_tail += cq->ring.desc_count; + + queue_count = cq_tail - cq_idx; + + return queue_count; +} + +static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count); + eth_dev->data->rx_queues[queue_idx] = + (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; + + ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc, + rx_conf->rx_free_thresh); + if (ret) { + dev_err(enic, "error in allocating rq\n"); + return ret; + } + + return enicpmd_dev_setup_intr(enic); +} + +static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct enic *enic = pmd_priv(eth_dev); + uint64_t offloads; + + ENICPMD_FUNC_TRACE(); + + offloads = eth_dev->data->dev_conf.rxmode.offloads; + if (mask & ETH_VLAN_STRIP_MASK) { + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + enic->ig_vlan_strip_en = 1; + else + enic->ig_vlan_strip_en = 0; + } + + if ((mask & ETH_VLAN_FILTER_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { + dev_warning(enic, + "Configuration of VLAN filter is not supported\n"); + } + + if ((mask & ETH_VLAN_EXTEND_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) { + dev_warning(enic, + "Configuration of extended VLAN is not supported\n"); + } + + return enic_set_vlan_strip(enic); +} + +static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) +{ + int ret; + int mask; + struct enic *enic = pmd_priv(eth_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + ret = enic_set_vnic_res(enic); + if (ret) { + dev_err(enic, "Set vNIC resource num failed, aborting\n"); + return ret; + } + + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + + enic->mc_count = 0; + enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CHECKSUM); + /* All vlan offload masks to apply the current settings */ + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = enicpmd_vlan_offload_set(eth_dev, mask); + if (ret) { + dev_err(enic, "Failed to configure VLAN offloads\n"); + return ret; + } + /* + * Initialize RSS with the default reta and key. If the user key is + * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the + * default key. + */ + return enic_init_rss_nic_cfg(enic); +} + +/* Start the device. + * It returns 0 on success. + */ +static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + return enic_enable(enic); +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_link link; + struct enic *enic = pmd_priv(eth_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + ENICPMD_FUNC_TRACE(); + enic_disable(enic); + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(eth_dev, &link); +} + +/* + * Stop device. + */ +static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_remove(enic); +} + +static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + ENICPMD_FUNC_TRACE(); + return enic_link_update(eth_dev); +} + +static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + return enic_dev_stats_get(enic, stats); +} + +static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + return enic_dev_stats_clear(enic); +} + +static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev) +{ + const struct vic_speed_capa *m; + struct rte_pci_device *pdev; + uint16_t id; + + pdev = RTE_ETH_DEV_TO_PCI(eth_dev); + id = pdev->id.subsystem_device_id; + for (m = vic_speed_capa_map; m->sub_devid != 0; m++) { + if (m->sub_devid == id) + return m->capa; + } + /* 1300 and later models are at least 40G */ + if (id >= 0x0100) + return ETH_LINK_SPEED_40G; + /* VFs have subsystem id 0, check device id */ + if (id == 0) { + /* Newer VF implies at least 40G model */ + if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN) + return ETH_LINK_SPEED_40G; + } + return ETH_LINK_SPEED_10G; +} + +static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ + device_info->max_rx_queues = enic->conf_rq_count / 2; + device_info->max_tx_queues = enic->conf_wq_count; + device_info->min_rx_bufsize = ENIC_MIN_MTU; + /* "Max" mtu is not a typo. HW receives packet sizes up to the + * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is + * a hint to the driver to size receive buffers accordingly so that + * larger-than-vnic-mtu packets get truncated.. For DPDK, we let + * the user decide the buffer size via rxmode.max_rx_pkt_len, basically + * ignoring vNIC mtu. + */ + device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); + device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS; + device_info->min_mtu = ENIC_MIN_MTU; + device_info->max_mtu = enic->max_mtu; + device_info->rx_offload_capa = enic->rx_offload_capa; + device_info->tx_offload_capa = enic->tx_offload_capa; + device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; + device_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH + }; + device_info->reta_size = enic->reta_size; + device_info->hash_key_size = enic->hash_key_size; + device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; + device_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = enic->config.rq_desc_count, + .nb_min = ENIC_MIN_RQ_DESCS, + .nb_align = ENIC_ALIGN_DESCS, + }; + device_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = enic->config.wq_desc_count, + .nb_min = ENIC_MIN_WQ_DESCS, + .nb_align = ENIC_ALIGN_DESCS, + .nb_seg_max = ENIC_TX_XMIT_MAX, + .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, + }; + device_info->default_rxportconf = (struct rte_eth_dev_portconf) { + .burst_size = ENIC_DEFAULT_RX_BURST, + .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, + ENIC_DEFAULT_RX_RING_SIZE), + .nb_queues = ENIC_DEFAULT_RX_RINGS, + }; + device_info->default_txportconf = (struct rte_eth_dev_portconf) { + .burst_size = ENIC_DEFAULT_TX_BURST, + .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, + ENIC_DEFAULT_TX_RING_SIZE), + .nb_queues = ENIC_DEFAULT_TX_RINGS, + }; + device_info->speed_capa = speed_capa_from_pci_id(eth_dev); + + return 0; +} + +static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_overlay[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst != enic_dummy_recv_pkts && + dev->rx_pkt_burst != NULL) { + struct enic *enic = pmd_priv(dev); + if (enic->overlay_offload) + return ptypes_overlay; + else + return ptypes; + } + return NULL; +} + +static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + + enic->promisc = 1; + ret = enic_add_packet_filter(enic); + if (ret != 0) + enic->promisc = 0; + + return ret; +} + +static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + enic->promisc = 0; + ret = enic_add_packet_filter(enic); + if (ret != 0) + enic->promisc = 1; + + return ret; +} + +static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + enic->allmulti = 1; + ret = enic_add_packet_filter(enic); + if (ret != 0) + enic->allmulti = 0; + + return ret; +} + +static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + enic->allmulti = 0; + ret = enic_add_packet_filter(enic); + if (ret != 0) + enic->allmulti = 1; + + return ret; +} + +static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, __rte_unused uint32_t pool) +{ + struct enic *enic = pmd_priv(eth_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + return enic_set_mac_address(enic, mac_addr->addr_bytes); +} + +static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) +{ + struct enic *enic = pmd_priv(eth_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + ENICPMD_FUNC_TRACE(); + if (enic_del_mac_address(enic, index)) + dev_err(enic, "del mac addr failed\n"); +} + +static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *addr) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + ret = enic_del_mac_address(enic, 0); + if (ret) + return ret; + return enic_set_mac_address(enic, addr->addr_bytes); +} + +static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); + ENICPMD_LOG(DEBUG, " %s address %s\n", + add ? "add" : "remove", mac_str); +} + +static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct enic *enic = pmd_priv(eth_dev); + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + uint32_t i, j; + int ret; + + ENICPMD_FUNC_TRACE(); + + /* Validate the given addresses first */ + for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) { + addr = &mc_addr_set[i]; + if (!rte_is_multicast_ether_addr(addr) || + rte_is_broadcast_ether_addr(addr)) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, addr); + ENICPMD_LOG(ERR, " invalid multicast address %s\n", + mac_str); + return -EINVAL; + } + } + + /* Flush all if requested */ + if (nb_mc_addr == 0 || mc_addr_set == NULL) { + ENICPMD_LOG(DEBUG, " flush multicast addresses\n"); + for (i = 0; i < enic->mc_count; i++) { + addr = &enic->mc_addrs[i]; + debug_log_add_del_addr(addr, false); + ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); + if (ret) + return ret; + } + enic->mc_count = 0; + return 0; + } + + if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) { + ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n", + ENIC_MULTICAST_PERFECT_FILTERS); + return -ENOSPC; + } + /* + * devcmd is slow, so apply the difference instead of flushing and + * adding everything. + * 1. Delete addresses on the NIC but not on the host + */ + for (i = 0; i < enic->mc_count; i++) { + addr = &enic->mc_addrs[i]; + for (j = 0; j < nb_mc_addr; j++) { + if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) + break; + } + if (j < nb_mc_addr) + continue; + debug_log_add_del_addr(addr, false); + ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); + if (ret) + return ret; + } + /* 2. Add addresses on the host but not on the NIC */ + for (i = 0; i < nb_mc_addr; i++) { + addr = &mc_addr_set[i]; + for (j = 0; j < enic->mc_count; j++) { + if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j])) + break; + } + if (j < enic->mc_count) + continue; + debug_log_add_del_addr(addr, true); + ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes); + if (ret) + return ret; + } + /* Keep a copy so we can flush/apply later on.. */ + memcpy(enic->mc_addrs, mc_addr_set, + nb_mc_addr * sizeof(struct rte_ether_addr)); + enic->mc_count = nb_mc_addr; + return 0; +} + +static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + return enic_set_mtu(enic, mtu); +} + +static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( + enic->rss_cpu.cpu[i / 4].b[i % 4]); + } + + return 0; +} + +static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + union vnic_rss_cpu rss_cpu; + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_update: wrong reta_size. given=%u" + " expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + /* + * Start with the current reta and modify it per reta_conf, as we + * need to push the entire reta even if we only modify one entry. + */ + rss_cpu = enic->rss_cpu; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx( + reta_conf[idx].reta[shift]); + } + return enic_set_rss_reta(enic, &rss_cpu); +} + +static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + return enic_set_rss_conf(enic, rss_conf); +} + +static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + if (rss_conf == NULL) + return -EINVAL; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" + " expected=%u+\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + rss_conf->rss_hf = enic->rss_hf; + if (rss_conf->rss_key != NULL) { + int i; + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { + rss_conf->rss_key[i] = + enic->rss_key.key[i / 10].b[i % 10]; + } + rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + return 0; +} + +static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + struct vnic_rq *rq_sop; + struct vnic_rq *rq_data; + struct rte_eth_rxconf *conf; + uint16_t sop_queue_idx; + uint16_t data_queue_idx; + + ENICPMD_FUNC_TRACE(); + sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); + data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic); + rq_sop = &enic->rq[sop_queue_idx]; + rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ + qinfo->mp = rq_sop->mp; + qinfo->scattered_rx = rq_sop->data_queue_enable; + qinfo->nb_desc = rq_sop->ring.desc_count; + if (qinfo->scattered_rx) + qinfo->nb_desc += rq_data->ring.desc_count; + conf = &qinfo->conf; + memset(conf, 0, sizeof(*conf)); + conf->rx_free_thresh = rq_sop->rx_free_thresh; + conf->rx_drop_en = 1; + /* + * Except VLAN stripping (port setting), all the checksum offloads + * are always enabled. + */ + conf->offloads = enic->rx_offload_capa; + if (!enic->ig_vlan_strip_en) + conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + /* rx_thresh and other fields are not applicable for enic */ +} + +static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + struct vnic_wq *wq = &enic->wq[tx_queue_id]; + + ENICPMD_FUNC_TRACE(); + qinfo->nb_desc = wq->ring.desc_count; + memset(&qinfo->conf, 0, sizeof(qinfo->conf)); + qinfo->conf.offloads = wq->offloads; + /* tx_thresh, and all the other fields are not applicable for enic */ +} + +static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + +static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + +static int udp_tunnel_common_check(struct enic *enic, + struct rte_eth_udp_tunnel *tnl) +{ + if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) + return -ENOTSUP; + if (!enic->overlay_offload) { + ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not " + "supported\n"); + return -ENOTSUP; + } + return 0; +} + +static int update_vxlan_port(struct enic *enic, uint16_t port) +{ + if (vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + port)) { + ENICPMD_LOG(DEBUG, " failed to update vxlan port\n"); + return -EINVAL; + } + ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port); + enic->vxlan_port = port; + return 0; +} + +static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tnl) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + ENICPMD_FUNC_TRACE(); + ret = udp_tunnel_common_check(enic, tnl); + if (ret) + return ret; + /* + * The NIC has 1 configurable VXLAN port number. "Adding" a new port + * number replaces it. + */ + if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) { + ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n", + tnl->udp_port); + return -EINVAL; + } + return update_vxlan_port(enic, tnl->udp_port); +} + +static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tnl) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + ENICPMD_FUNC_TRACE(); + ret = udp_tunnel_common_check(enic, tnl); + if (ret) + return ret; + /* + * Clear the previously set port number and restore the + * hardware default port number. Some drivers disable VXLAN + * offloads when there are no configured port numbers. But + * enic does not do that as VXLAN is part of overlay offload, + * which is tied to inner RSS and TSO. + */ + if (tnl->udp_port != enic->vxlan_port) { + ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n", + tnl->udp_port); + return -EINVAL; + } + return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT); +} + +static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size) +{ + struct vnic_devcmd_fw_info *info; + struct enic *enic; + int ret; + + ENICPMD_FUNC_TRACE(); + if (fw_version == NULL || fw_size <= 0) + return -EINVAL; + enic = pmd_priv(eth_dev); + ret = vnic_dev_fw_info(enic->vdev, &info); + if (ret) + return ret; + snprintf(fw_version, fw_size, "%s %s", + info->fw_version, info->fw_build); + fw_version[fw_size - 1] = '\0'; + return 0; +} + +static const struct eth_dev_ops enicpmd_eth_dev_ops = { + .dev_configure = enicpmd_dev_configure, + .dev_start = enicpmd_dev_start, + .dev_stop = enicpmd_dev_stop, + .dev_set_link_up = NULL, + .dev_set_link_down = NULL, + .dev_close = enicpmd_dev_close, + .promiscuous_enable = enicpmd_dev_promiscuous_enable, + .promiscuous_disable = enicpmd_dev_promiscuous_disable, + .allmulticast_enable = enicpmd_dev_allmulticast_enable, + .allmulticast_disable = enicpmd_dev_allmulticast_disable, + .link_update = enicpmd_dev_link_update, + .stats_get = enicpmd_dev_stats_get, + .stats_reset = enicpmd_dev_stats_reset, + .queue_stats_mapping_set = NULL, + .dev_infos_get = enicpmd_dev_info_get, + .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, + .mtu_set = enicpmd_mtu_set, + .vlan_filter_set = NULL, + .vlan_tpid_set = NULL, + .vlan_offload_set = enicpmd_vlan_offload_set, + .vlan_strip_queue_set = NULL, + .rx_queue_start = enicpmd_dev_rx_queue_start, + .rx_queue_stop = enicpmd_dev_rx_queue_stop, + .tx_queue_start = enicpmd_dev_tx_queue_start, + .tx_queue_stop = enicpmd_dev_tx_queue_stop, + .rx_queue_setup = enicpmd_dev_rx_queue_setup, + .rx_queue_release = enicpmd_dev_rx_queue_release, + .rx_queue_count = enicpmd_dev_rx_queue_count, + .rx_descriptor_done = NULL, + .tx_queue_setup = enicpmd_dev_tx_queue_setup, + .tx_queue_release = enicpmd_dev_tx_queue_release, + .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, + .rxq_info_get = enicpmd_dev_rxq_info_get, + .txq_info_get = enicpmd_dev_txq_info_get, + .dev_led_on = NULL, + .dev_led_off = NULL, + .flow_ctrl_get = NULL, + .flow_ctrl_set = NULL, + .priority_flow_ctrl_set = NULL, + .mac_addr_add = enicpmd_add_mac_addr, + .mac_addr_remove = enicpmd_remove_mac_addr, + .mac_addr_set = enicpmd_set_mac_addr, + .set_mc_addr_list = enicpmd_set_mc_addr_list, + .filter_ctrl = enicpmd_dev_filter_ctrl, + .reta_query = enicpmd_dev_rss_reta_query, + .reta_update = enicpmd_dev_rss_reta_update, + .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, + .rss_hash_update = enicpmd_dev_rss_hash_update, + .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, + .fw_version_get = enicpmd_dev_fw_version_get, +}; + +static int enic_parse_zero_one(const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + bool b; + + enic = (struct enic *)opaque; + if (strcmp(value, "0") == 0) { + b = false; + } else if (strcmp(value, "1") == 0) { + b = true; + } else { + dev_err(enic, "Invalid value for %s" + ": expected=0|1 given=%s\n", key, value); + return -EINVAL; + } + if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0) + enic->disable_overlay = b; + if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0) + enic->enable_avx2_rx = b; + if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0) + enic->geneve_opt_request = b; + return 0; +} + +static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + + enic = (struct enic *)opaque; + if (strcmp(value, "trunk") == 0) { + /* Trunk mode: always tag */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; + } else if (strcmp(value, "untag") == 0) { + /* Untag default VLAN mode: untag if VLAN = default VLAN */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; + } else if (strcmp(value, "priority") == 0) { + /* + * Priority-tag default VLAN mode: priority tag (VLAN header + * with ID=0) if VLAN = default + */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; + } else if (strcmp(value, "pass") == 0) { + /* Pass through mode: do not touch tags */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + } else { + dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE + ": expected=trunk|untag|priority|pass given=%s\n", + value); + return -EINVAL; + } + return 0; +} + +static int enic_check_devargs(struct rte_eth_dev *dev) +{ + static const char *const valid_keys[] = { + ENIC_DEVARG_DISABLE_OVERLAY, + ENIC_DEVARG_ENABLE_AVX2_RX, + ENIC_DEVARG_GENEVE_OPT, + ENIC_DEVARG_IG_VLAN_REWRITE, + NULL}; + struct enic *enic = pmd_priv(dev); + struct rte_kvargs *kvlist; + + ENICPMD_FUNC_TRACE(); + + enic->disable_overlay = false; + enic->enable_avx2_rx = false; + enic->geneve_opt_request = false; + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + if (!dev->device->devargs) + return 0; + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, + enic_parse_zero_one, enic) < 0 || + rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX, + enic_parse_zero_one, enic) < 0 || + rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT, + enic_parse_zero_one, enic) < 0 || + rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, + enic_parse_ig_vlan_rewrite, enic) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + rte_kvargs_free(kvlist); + return 0; +} + +/* Initialize the driver + * It returns 0 on success. + */ +static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pdev; + struct rte_pci_addr *addr; + struct enic *enic = pmd_priv(eth_dev); + int err; + + ENICPMD_FUNC_TRACE(); + + eth_dev->dev_ops = &enicpmd_eth_dev_ops; + eth_dev->rx_pkt_burst = &enic_recv_pkts; + eth_dev->tx_pkt_burst = &enic_xmit_pkts; + eth_dev->tx_pkt_prepare = &enic_prep_pkts; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + enic_pick_tx_handler(eth_dev); + enic_pick_rx_handler(eth_dev); + return 0; + } + /* Only the primary sets up adapter and other data in shared memory */ + enic->port_id = eth_dev->data->port_id; + enic->rte_dev = eth_dev; + enic->dev_data = eth_dev->data; + /* Let rte_eth_dev_close() release the port resources */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + pdev = RTE_ETH_DEV_TO_PCI(eth_dev); + rte_eth_copy_pci_info(eth_dev, pdev); + enic->pdev = pdev; + addr = &pdev->addr; + + snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", + addr->domain, addr->bus, addr->devid, addr->function); + + err = enic_check_devargs(eth_dev); + if (err) + return err; + return enic_probe(enic); +} + +static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic), + eth_enicpmd_dev_init); +} + +static int eth_enic_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_enic_pmd = { + .id_table = pci_id_enic_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_enic_pci_probe, + .remove = eth_enic_pci_remove, +}; + +int dev_is_enic(struct rte_eth_dev *dev) +{ + return dev->device->driver == &rte_enic_pmd.driver; +} + +RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); +RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_enic, + ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " + ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 " + ENIC_DEVARG_GENEVE_OPT "=0|1 " + ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass"); diff --git a/src/spdk/dpdk/drivers/net/enic/enic_flow.c b/src/spdk/dpdk/drivers/net/enic/enic_flow.c new file mode 100644 index 000000000..cebca7d55 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_flow.c @@ -0,0 +1,1795 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enic_compat.h" +#include "enic.h" +#include "vnic_dev.h" +#include "vnic_nic.h" + +/* + * Common arguments passed to copy_item functions. Use this structure + * so we can easily add new arguments. + * item: Item specification. + * filter: Partially filled in NIC filter structure. + * inner_ofst: If zero, this is an outer header. If non-zero, this is + * the offset into L5 where the header begins. + * l2_proto_off: offset to EtherType eth or vlan header. + * l3_proto_off: offset to next protocol field in IPv4 or 6 header. + */ +struct copy_item_args { + const struct rte_flow_item *item; + struct filter_v2 *filter; + uint8_t *inner_ofst; + uint8_t l2_proto_off; + uint8_t l3_proto_off; + struct enic *enic; +}; + +/* functions for copying items into enic filters */ +typedef int (enic_copy_item_fn)(struct copy_item_args *arg); + +/** Info about how to copy items into enic filters. */ +struct enic_items { + /** Function for copying and validating an item. */ + enic_copy_item_fn *copy_item; + /** List of valid previous items. */ + const enum rte_flow_item_type * const prev_items; + /** True if it's OK for this item to be the first item. For some NIC + * versions, it's invalid to start the stack above layer 3. + */ + const uint8_t valid_start_item; + /* Inner packet version of copy_item. */ + enic_copy_item_fn *inner_copy_item; +}; + +/** Filtering capabilities for various NIC and firmware versions. */ +struct enic_filter_cap { + /** list of valid items and their handlers and attributes. */ + const struct enic_items *item_info; + /* Max type in the above list, used to detect unsupported types */ + enum rte_flow_item_type max_item_type; +}; + +/* functions for copying flow actions into enic actions */ +typedef int (copy_action_fn)(struct enic *enic, + const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action); + +/** Action capabilities for various NICs. */ +struct enic_action_cap { + /** list of valid actions */ + const enum rte_flow_action_type *actions; + /** copy function for a particular NIC */ + copy_action_fn *copy_fn; +}; + +/* Forward declarations */ +static enic_copy_item_fn enic_copy_item_ipv4_v1; +static enic_copy_item_fn enic_copy_item_udp_v1; +static enic_copy_item_fn enic_copy_item_tcp_v1; +static enic_copy_item_fn enic_copy_item_raw_v2; +static enic_copy_item_fn enic_copy_item_eth_v2; +static enic_copy_item_fn enic_copy_item_vlan_v2; +static enic_copy_item_fn enic_copy_item_ipv4_v2; +static enic_copy_item_fn enic_copy_item_ipv6_v2; +static enic_copy_item_fn enic_copy_item_udp_v2; +static enic_copy_item_fn enic_copy_item_tcp_v2; +static enic_copy_item_fn enic_copy_item_sctp_v2; +static enic_copy_item_fn enic_copy_item_vxlan_v2; +static enic_copy_item_fn enic_copy_item_inner_eth_v2; +static enic_copy_item_fn enic_copy_item_inner_vlan_v2; +static enic_copy_item_fn enic_copy_item_inner_ipv4_v2; +static enic_copy_item_fn enic_copy_item_inner_ipv6_v2; +static enic_copy_item_fn enic_copy_item_inner_udp_v2; +static enic_copy_item_fn enic_copy_item_inner_tcp_v2; +static copy_action_fn enic_copy_action_v1; +static copy_action_fn enic_copy_action_v2; + +/** + * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match + * is supported. + */ +static const struct enic_items enic_items_v1[] = { + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .copy_item = enic_copy_item_ipv4_v1, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .copy_item = enic_copy_item_udp_v1, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .copy_item = enic_copy_item_tcp_v1, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, +}; + +/** + * NICs have Advanced Filters capability but they are disabled. This means + * that layer 3 must be specified. + */ +static const struct enic_items enic_items_v2[] = { + [RTE_FLOW_ITEM_TYPE_RAW] = { + .copy_item = enic_copy_item_raw_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .copy_item = enic_copy_item_eth_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_eth_v2, + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .copy_item = enic_copy_item_vlan_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_vlan_v2, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .copy_item = enic_copy_item_ipv4_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_ipv4_v2, + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .copy_item = enic_copy_item_ipv6_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_ipv6_v2, + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .copy_item = enic_copy_item_udp_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_udp_v2, + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .copy_item = enic_copy_item_tcp_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_tcp_v2, + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .copy_item = enic_copy_item_sctp_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .copy_item = enic_copy_item_vxlan_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, +}; + +/** NICs with Advanced filters enabled */ +static const struct enic_items enic_items_v3[] = { + [RTE_FLOW_ITEM_TYPE_RAW] = { + .copy_item = enic_copy_item_raw_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .copy_item = enic_copy_item_eth_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_eth_v2, + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .copy_item = enic_copy_item_vlan_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_vlan_v2, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .copy_item = enic_copy_item_ipv4_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_ipv4_v2, + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .copy_item = enic_copy_item_ipv6_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_ipv6_v2, + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .copy_item = enic_copy_item_udp_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_udp_v2, + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .copy_item = enic_copy_item_tcp_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = enic_copy_item_inner_tcp_v2, + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .copy_item = enic_copy_item_sctp_v2, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .copy_item = enic_copy_item_vxlan_v2, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + .inner_copy_item = NULL, + }, +}; + +/** Filtering capabilities indexed this NICs supported filter type. */ +static const struct enic_filter_cap enic_filter_cap[] = { + [FILTER_IPV4_5TUPLE] = { + .item_info = enic_items_v1, + .max_item_type = RTE_FLOW_ITEM_TYPE_TCP, + }, + [FILTER_USNIC_IP] = { + .item_info = enic_items_v2, + .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, + [FILTER_DPDK_1] = { + .item_info = enic_items_v3, + .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, +}; + +/** Supported actions for older NICs */ +static const enum rte_flow_action_type enic_supported_actions_v1[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_END, +}; + +/** Supported actions for newer NICs */ +static const enum rte_flow_action_type enic_supported_actions_v2_id[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_RSS, + RTE_FLOW_ACTION_TYPE_PASSTHRU, + RTE_FLOW_ACTION_TYPE_END, +}; + +static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_RSS, + RTE_FLOW_ACTION_TYPE_PASSTHRU, + RTE_FLOW_ACTION_TYPE_END, +}; + +/** Action capabilities indexed by NIC version information */ +static const struct enic_action_cap enic_action_cap[] = { + [FILTER_ACTION_RQ_STEERING_FLAG] = { + .actions = enic_supported_actions_v1, + .copy_fn = enic_copy_action_v1, + }, + [FILTER_ACTION_FILTER_ID_FLAG] = { + .actions = enic_supported_actions_v2_id, + .copy_fn = enic_copy_action_v2, + }, + [FILTER_ACTION_DROP_FLAG] = { + .actions = enic_supported_actions_v2_drop, + .copy_fn = enic_copy_action_v2, + }, +}; + +static int +mask_exact_match(const uint8_t *supported, const uint8_t *supplied, + unsigned int size) +{ + unsigned int i; + for (i = 0; i < size; i++) { + if (supported[i] != supplied[i]) + return 0; + } + return 1; +} + +static int +enic_copy_item_ipv4_v1(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; + struct rte_ipv4_hdr supported_mask = { + .src_addr = 0xffffffff, + .dst_addr = 0xffffffff, + }; + + ENICPMD_FUNC_TRACE(); + + if (!mask) + mask = &rte_flow_item_ipv4_mask; + + /* This is an exact match filter, both fields must be set */ + if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) { + ENICPMD_LOG(ERR, "IPv4 exact match src/dst addr"); + return ENOTSUP; + } + + /* check that the suppied mask exactly matches capabilty */ + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "IPv4 exact match mask"); + return ENOTSUP; + } + + enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + enic_5tup->src_addr = spec->hdr.src_addr; + enic_5tup->dst_addr = spec->hdr.dst_addr; + + return 0; +} + +static int +enic_copy_item_udp_v1(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; + struct rte_udp_hdr supported_mask = { + .src_port = 0xffff, + .dst_port = 0xffff, + }; + + ENICPMD_FUNC_TRACE(); + + if (!mask) + mask = &rte_flow_item_udp_mask; + + /* This is an exact match filter, both ports must be set */ + if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) { + ENICPMD_LOG(ERR, "UDP exact match src/dst addr"); + return ENOTSUP; + } + + /* check that the suppied mask exactly matches capabilty */ + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "UDP exact match mask"); + return ENOTSUP; + } + + enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + enic_5tup->src_port = spec->hdr.src_port; + enic_5tup->dst_port = spec->hdr.dst_port; + enic_5tup->protocol = PROTO_UDP; + + return 0; +} + +static int +enic_copy_item_tcp_v1(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; + struct rte_tcp_hdr supported_mask = { + .src_port = 0xffff, + .dst_port = 0xffff, + }; + + ENICPMD_FUNC_TRACE(); + + if (!mask) + mask = &rte_flow_item_tcp_mask; + + /* This is an exact match filter, both ports must be set */ + if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) { + ENICPMD_LOG(ERR, "TCPIPv4 exact match src/dst addr"); + return ENOTSUP; + } + + /* check that the suppied mask exactly matches capabilty */ + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "TCP exact match mask"); + return ENOTSUP; + } + + enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + enic_5tup->src_port = spec->hdr.src_port; + enic_5tup->dst_port = spec->hdr.dst_port; + enic_5tup->protocol = PROTO_TCP; + + return 0; +} + +/* + * The common 'copy' function for all inner packet patterns. Patterns are + * first appended to the L5 pattern buffer. Then, since the NIC filter + * API has no special support for inner packet matching at the moment, + * we set EtherType and IP proto as necessary. + */ +static int +copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst, + const void *val, const void *mask, uint8_t val_size, + uint8_t proto_off, uint16_t proto_val, uint8_t proto_size) +{ + uint8_t *l5_mask, *l5_val; + uint8_t start_off; + + /* No space left in the L5 pattern buffer. */ + start_off = *inner_ofst; + if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN) + return ENOTSUP; + l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask; + l5_val = gp->layer[FILTER_GENERIC_1_L5].val; + /* Copy the pattern into the L5 buffer. */ + if (val) { + memcpy(l5_mask + start_off, mask, val_size); + memcpy(l5_val + start_off, val, val_size); + } + /* Set the protocol field in the previous header. */ + if (proto_off) { + void *m, *v; + + m = l5_mask + proto_off; + v = l5_val + proto_off; + if (proto_size == 1) { + *(uint8_t *)m = 0xff; + *(uint8_t *)v = (uint8_t)proto_val; + } else if (proto_size == 2) { + *(uint16_t *)m = 0xffff; + *(uint16_t *)v = proto_val; + } + } + /* All inner headers land in L5 buffer even if their spec is null. */ + *inner_ofst += val_size; + return 0; +} + +static int +enic_copy_item_inner_eth_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_eth_mask; + arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ether_hdr), + 0 /* no previous protocol */, 0, 0); +} + +static int +enic_copy_item_inner_vlan_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + uint8_t eth_type_off; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_vlan_mask; + /* Append vlan header to L5 and set ether type = TPID */ + eth_type_off = arg->l2_proto_off; + arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_vlan_hdr), + eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2); +} + +static int +enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + /* Append ipv4 header to L5 and set ether type = ipv4 */ + arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ipv4_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2); +} + +static int +enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + /* Append ipv6 header to L5 and set ether type = ipv6 */ + arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ipv6_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2); +} + +static int +enic_copy_item_inner_udp_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_udp_mask; + /* Append udp header to L5 and set ip proto = udp */ + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_udp_hdr), + arg->l3_proto_off, IPPROTO_UDP, 1); +} + +static int +enic_copy_item_inner_tcp_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_tcp_mask; + /* Append tcp header to L5 and set ip proto = tcp */ + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_tcp_hdr), + arg->l3_proto_off, IPPROTO_TCP, 1); +} + +static int +enic_copy_item_eth_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + struct rte_ether_hdr enic_spec; + struct rte_ether_hdr enic_mask; + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_eth_mask; + + memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + + memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + enic_spec.ether_type = spec->type; + enic_mask.ether_type = mask->type; + + /* outer header */ + memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask, + sizeof(struct rte_ether_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec, + sizeof(struct rte_ether_hdr)); + return 0; +} + +static int +enic_copy_item_vlan_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + struct rte_ether_hdr *eth_mask; + struct rte_ether_hdr *eth_val; + + ENICPMD_FUNC_TRACE(); + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_vlan_mask; + + eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask; + eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val; + /* Outer TPID cannot be matched */ + if (eth_mask->ether_type) + return ENOTSUP; + /* + * For recent models: + * When packet matching, the VIC always compares vlan-stripped + * L2, regardless of vlan stripping settings. So, the inner type + * from vlan becomes the ether type of the eth header. + * + * Older models w/o hardware vxlan parser have a different + * behavior when vlan stripping is disabled. In this case, + * vlan tag remains in the L2 buffer. + */ + if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) { + struct rte_vlan_hdr *vlan; + + vlan = (struct rte_vlan_hdr *)(eth_mask + 1); + vlan->eth_proto = mask->inner_type; + vlan = (struct rte_vlan_hdr *)(eth_val + 1); + vlan->eth_proto = spec->inner_type; + } else { + eth_mask->ether_type = mask->inner_type; + eth_val->ether_type = spec->inner_type; + } + /* For TCI, use the vlan mask/val fields (little endian). */ + gp->mask_vlan = rte_be_to_cpu_16(mask->tci); + gp->val_vlan = rte_be_to_cpu_16(spec->tci); + return 0; +} + +static int +enic_copy_item_ipv4_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Match IPv4 */ + gp->mask_flags |= FILTER_GENERIC_1_IPV4; + gp->val_flags |= FILTER_GENERIC_1_IPV4; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_ipv4_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct rte_ipv4_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct rte_ipv4_hdr)); + return 0; +} + +static int +enic_copy_item_ipv6_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Match IPv6 */ + gp->mask_flags |= FILTER_GENERIC_1_IPV6; + gp->val_flags |= FILTER_GENERIC_1_IPV6; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_ipv6_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct rte_ipv6_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct rte_ipv6_hdr)); + return 0; +} + +static int +enic_copy_item_udp_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Match UDP */ + gp->mask_flags |= FILTER_GENERIC_1_UDP; + gp->val_flags |= FILTER_GENERIC_1_UDP; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_udp_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct rte_udp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct rte_udp_hdr)); + return 0; +} + +static int +enic_copy_item_tcp_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Match TCP */ + gp->mask_flags |= FILTER_GENERIC_1_TCP; + gp->val_flags |= FILTER_GENERIC_1_TCP; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + return ENOTSUP; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct rte_tcp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct rte_tcp_hdr)); + return 0; +} + +static int +enic_copy_item_sctp_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + const struct rte_flow_item_sctp *spec = item->spec; + const struct rte_flow_item_sctp *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + uint8_t *ip_proto_mask = NULL; + uint8_t *ip_proto = NULL; + + ENICPMD_FUNC_TRACE(); + + /* + * The NIC filter API has no flags for "match sctp", so explicitly set + * the protocol number in the IP pattern. + */ + if (gp->val_flags & FILTER_GENERIC_1_IPV4) { + struct rte_ipv4_hdr *ip; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + ip_proto_mask = &ip->next_proto_id; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip_proto = &ip->next_proto_id; + } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) { + struct rte_ipv6_hdr *ip; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + ip_proto_mask = &ip->proto; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip_proto = &ip->proto; + } else { + /* Need IPv4/IPv6 pattern first */ + return EINVAL; + } + *ip_proto = IPPROTO_SCTP; + *ip_proto_mask = 0xff; + + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_sctp_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct rte_sctp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct rte_sctp_hdr)); + return 0; +} + +static int +enic_copy_item_vxlan_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + uint8_t *inner_ofst = arg->inner_ofst; + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + struct rte_udp_hdr *udp; + + ENICPMD_FUNC_TRACE(); + + /* + * The NIC filter API has no flags for "match vxlan". Set UDP port to + * avoid false positives. + */ + gp->mask_flags |= FILTER_GENERIC_1_UDP; + gp->val_flags |= FILTER_GENERIC_1_UDP; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; + udp->dst_port = 0xffff; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; + udp->dst_port = RTE_BE16(4789); + /* Match all if no spec */ + if (!spec) + return 0; + + if (!mask) + mask = &rte_flow_item_vxlan_mask; + + memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask, + sizeof(struct rte_vxlan_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec, + sizeof(struct rte_vxlan_hdr)); + + *inner_ofst = sizeof(struct rte_vxlan_hdr); + return 0; +} + +/* + * Copy raw item into version 2 NIC filter. Currently, raw pattern match is + * very limited. It is intended for matching UDP tunnel header (e.g. vxlan + * or geneve). + */ +static int +enic_copy_item_raw_v2(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + struct filter_v2 *enic_filter = arg->filter; + uint8_t *inner_ofst = arg->inner_ofst; + const struct rte_flow_item_raw *spec = item->spec; + const struct rte_flow_item_raw *mask = item->mask; + struct filter_generic_1 *gp = &enic_filter->u.generic_1; + + ENICPMD_FUNC_TRACE(); + + /* Cannot be used for inner packet */ + if (*inner_ofst) + return EINVAL; + /* Need both spec and mask */ + if (!spec || !mask) + return EINVAL; + /* Only supports relative with offset 0 */ + if (!spec->relative || spec->offset != 0 || spec->search || spec->limit) + return EINVAL; + /* Need non-null pattern that fits within the NIC's filter pattern */ + if (spec->length == 0 || + spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN || + !spec->pattern || !mask->pattern) + return EINVAL; + /* + * Mask fields, including length, are often set to zero. Assume that + * means "same as spec" to avoid breaking existing apps. If length + * is not zero, then it should be >= spec length. + * + * No more pattern follows this, so append to the L4 layer instead of + * L5 to work with both recent and older VICs. + */ + if (mask->length != 0 && mask->length < spec->length) + return EINVAL; + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), + mask->pattern, spec->length); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), + spec->pattern, spec->length); + + return 0; +} + +/** + * Return 1 if current item is valid on top of the previous one. + * + * @param prev_item[in] + * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this + * is the first item. + * @param item_info[in] + * Info about this item, like valid previous items. + * @param is_first[in] + * True if this the first item in the pattern. + */ +static int +item_stacking_valid(enum rte_flow_item_type prev_item, + const struct enic_items *item_info, uint8_t is_first_item) +{ + enum rte_flow_item_type const *allowed_items = item_info->prev_items; + + ENICPMD_FUNC_TRACE(); + + for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) { + if (prev_item == *allowed_items) + return 1; + } + + /* This is the first item in the stack. Check if that's cool */ + if (is_first_item && item_info->valid_start_item) + return 1; + + return 0; +} + +/* + * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5. + * Instead it is in L4 following the UDP header. Append the vxlan + * pattern to L4 (udp) and shift any inner packet pattern in L5. + */ +static void +fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp, + uint8_t inner_ofst) +{ + uint8_t layer[FILTER_GENERIC_1_KEY_LEN]; + uint8_t inner; + uint8_t vxlan; + + if (!(inner_ofst > 0 && enic->vxlan)) + return; + ENICPMD_FUNC_TRACE(); + vxlan = sizeof(struct rte_vxlan_hdr); + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), + gp->layer[FILTER_GENERIC_1_L5].mask, vxlan); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), + gp->layer[FILTER_GENERIC_1_L5].val, vxlan); + inner = inner_ofst - vxlan; + memset(layer, 0, sizeof(layer)); + memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner); + memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer)); + memset(layer, 0, sizeof(layer)); + memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner); + memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer)); +} + +/** + * Build the intenal enic filter structure from the provided pattern. The + * pattern is validated as the items are copied. + * + * @param pattern[in] + * @param items_info[in] + * Info about this NICs item support, like valid previous items. + * @param enic_filter[out] + * NIC specfilc filters derived from the pattern. + * @param error[out] + */ +static int +enic_copy_filter(const struct rte_flow_item pattern[], + const struct enic_filter_cap *cap, + struct enic *enic, + struct filter_v2 *enic_filter, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_item *item = pattern; + uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */ + enum rte_flow_item_type prev_item; + const struct enic_items *item_info; + struct copy_item_args args; + enic_copy_item_fn *copy_fn; + uint8_t is_first_item = 1; + + ENICPMD_FUNC_TRACE(); + + prev_item = 0; + + args.filter = enic_filter; + args.inner_ofst = &inner_ofst; + args.enic = enic; + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + /* Get info about how to validate and copy the item. If NULL + * is returned the nic does not support the item. + */ + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + item_info = &cap->item_info[item->type]; + if (item->type > cap->max_item_type || + item_info->copy_item == NULL || + (inner_ofst > 0 && item_info->inner_copy_item == NULL)) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Unsupported item."); + return -rte_errno; + } + + /* check to see if item stacking is valid */ + if (!item_stacking_valid(prev_item, item_info, is_first_item)) + goto stacking_error; + + args.item = item; + copy_fn = inner_ofst > 0 ? item_info->inner_copy_item : + item_info->copy_item; + ret = copy_fn(&args); + if (ret) + goto item_not_supported; + prev_item = item->type; + is_first_item = 0; + } + fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst); + + return 0; + +item_not_supported: + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "enic type error"); + return -rte_errno; + +stacking_error: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "stacking error"); + return -rte_errno; +} + +/** + * Build the intenal version 1 NIC action structure from the provided pattern. + * The pattern is validated as the items are copied. + * + * @param actions[in] + * @param enic_action[out] + * NIC specfilc actions derived from the actions. + * @param error[out] + */ +static int +enic_copy_action_v1(__rte_unused struct enic *enic, + const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action) +{ + enum { FATE = 1, }; + uint32_t overlap = 0; + + ENICPMD_FUNC_TRACE(); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: { + const struct rte_flow_action_queue *queue = + (const struct rte_flow_action_queue *) + actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + enic_action->rq_idx = + enic_rte_rq_idx_to_sop_idx(queue->index); + break; + } + default: + RTE_ASSERT(0); + break; + } + } + if (!(overlap & FATE)) + return ENOTSUP; + enic_action->type = FILTER_ACTION_RQ_STEERING; + return 0; +} + +/** + * Build the intenal version 2 NIC action structure from the provided pattern. + * The pattern is validated as the items are copied. + * + * @param actions[in] + * @param enic_action[out] + * NIC specfilc actions derived from the actions. + * @param error[out] + */ +static int +enic_copy_action_v2(struct enic *enic, + const struct rte_flow_action actions[], + struct filter_action_v2 *enic_action) +{ + enum { FATE = 1, MARK = 2, }; + uint32_t overlap = 0; + bool passthru = false; + + ENICPMD_FUNC_TRACE(); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: { + const struct rte_flow_action_queue *queue = + (const struct rte_flow_action_queue *) + actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + enic_action->rq_idx = + enic_rte_rq_idx_to_sop_idx(queue->index); + enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_MARK: { + const struct rte_flow_action_mark *mark = + (const struct rte_flow_action_mark *) + actions->conf; + + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; + /* + * Map mark ID (32-bit) to filter ID (16-bit): + * - Reject values > 16 bits + * - Filter ID 0 is reserved for filters that steer + * but not mark. So add 1 to the mark ID to avoid + * using 0. + * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is + * reserved for the "flag" action below. + */ + if (mark->id >= ENIC_MAGIC_FILTER_ID - 1) + return EINVAL; + enic_action->filter_id = mark->id + 1; + enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_FLAG: { + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; + /* ENIC_MAGIC_FILTER_ID is reserved for flagging */ + enic_action->filter_id = ENIC_MAGIC_FILTER_ID; + enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_DROP: { + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + enic_action->flags |= FILTER_ACTION_DROP_FLAG; + break; + } + case RTE_FLOW_ACTION_TYPE_RSS: { + const struct rte_flow_action_rss *rss = + (const struct rte_flow_action_rss *) + actions->conf; + bool allow; + uint16_t i; + + /* + * Hardware does not support general RSS actions, but + * we can still support the dummy one that is used to + * "receive normally". + */ + allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->level == 0 && + (rss->types == 0 || + rss->types == enic->rss_hf) && + rss->queue_num == enic->rq_count && + rss->key_len == 0; + /* Identity queue map is ok */ + for (i = 0; i < rss->queue_num; i++) + allow = allow && (i == rss->queue[i]); + if (!allow) + return ENOTSUP; + if (overlap & FATE) + return ENOTSUP; + /* Need MARK or FLAG */ + if (!(overlap & MARK)) + return ENOTSUP; + overlap |= FATE; + break; + } + case RTE_FLOW_ACTION_TYPE_PASSTHRU: { + /* + * Like RSS above, PASSTHRU + MARK may be used to + * "mark and then receive normally". MARK usually comes + * after PASSTHRU, so remember we have seen passthru + * and check for mark later. + */ + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + passthru = true; + break; + } + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + default: + RTE_ASSERT(0); + break; + } + } + /* Only PASSTHRU + MARK is allowed */ + if (passthru && !(overlap & MARK)) + return ENOTSUP; + if (!(overlap & FATE)) + return ENOTSUP; + enic_action->type = FILTER_ACTION_V2; + return 0; +} + +/** Check if the action is supported */ +static int +enic_match_action(const struct rte_flow_action *action, + const enum rte_flow_action_type *supported_actions) +{ + for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END; + supported_actions++) { + if (action->type == *supported_actions) + return 1; + } + return 0; +} + +/** Get the NIC filter capabilties structure */ +static const struct enic_filter_cap * +enic_get_filter_cap(struct enic *enic) +{ + if (enic->flow_filter_mode) + return &enic_filter_cap[enic->flow_filter_mode]; + + return NULL; +} + +/** Get the actions for this NIC version. */ +static const struct enic_action_cap * +enic_get_action_cap(struct enic *enic) +{ + const struct enic_action_cap *ea; + uint8_t actions; + + actions = enic->filter_actions; + if (actions & FILTER_ACTION_DROP_FLAG) + ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; + else if (actions & FILTER_ACTION_FILTER_ID_FLAG) + ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; + else + ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG]; + return ea; +} + +/* Debug function to dump internal NIC action structure. */ +static void +enic_dump_actions(const struct filter_action_v2 *ea) +{ + if (ea->type == FILTER_ACTION_RQ_STEERING) { + ENICPMD_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx); + } else if (ea->type == FILTER_ACTION_V2) { + ENICPMD_LOG(INFO, "Actions(V2)\n"); + if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG) + ENICPMD_LOG(INFO, "\tqueue: %u\n", + enic_sop_rq_idx_to_rte_idx(ea->rq_idx)); + if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG) + ENICPMD_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id); + } +} + +/* Debug function to dump internal NIC filter structure. */ +static void +enic_dump_filter(const struct filter_v2 *filt) +{ + const struct filter_generic_1 *gp; + int i, j, mbyte; + char buf[128], *bp; + char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16]; + char l4csum[16], ipfrag[16]; + + switch (filt->type) { + case FILTER_IPV4_5TUPLE: + ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE\n"); + break; + case FILTER_USNIC_IP: + case FILTER_DPDK_1: + /* FIXME: this should be a loop */ + gp = &filt->u.generic_1; + ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n", + gp->val_vlan, gp->mask_vlan); + + if (gp->mask_flags & FILTER_GENERIC_1_IPV4) + sprintf(ip4, "%s ", + (gp->val_flags & FILTER_GENERIC_1_IPV4) + ? "ip4(y)" : "ip4(n)"); + else + sprintf(ip4, "%s ", "ip4(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_IPV6) + sprintf(ip6, "%s ", + (gp->val_flags & FILTER_GENERIC_1_IPV4) + ? "ip6(y)" : "ip6(n)"); + else + sprintf(ip6, "%s ", "ip6(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_UDP) + sprintf(udp, "%s ", + (gp->val_flags & FILTER_GENERIC_1_UDP) + ? "udp(y)" : "udp(n)"); + else + sprintf(udp, "%s ", "udp(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_TCP) + sprintf(tcp, "%s ", + (gp->val_flags & FILTER_GENERIC_1_TCP) + ? "tcp(y)" : "tcp(n)"); + else + sprintf(tcp, "%s ", "tcp(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP) + sprintf(tcpudp, "%s ", + (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP) + ? "tcpudp(y)" : "tcpudp(n)"); + else + sprintf(tcpudp, "%s ", "tcpudp(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK) + sprintf(ip4csum, "%s ", + (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK) + ? "ip4csum(y)" : "ip4csum(n)"); + else + sprintf(ip4csum, "%s ", "ip4csum(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK) + sprintf(l4csum, "%s ", + (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK) + ? "l4csum(y)" : "l4csum(n)"); + else + sprintf(l4csum, "%s ", "l4csum(x)"); + + if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG) + sprintf(ipfrag, "%s ", + (gp->val_flags & FILTER_GENERIC_1_IPFRAG) + ? "ipfrag(y)" : "ipfrag(n)"); + else + sprintf(ipfrag, "%s ", "ipfrag(x)"); + ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp, + tcp, tcpudp, ip4csum, l4csum, ipfrag); + + for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) { + mbyte = FILTER_GENERIC_1_KEY_LEN - 1; + while (mbyte && !gp->layer[i].mask[mbyte]) + mbyte--; + if (mbyte == 0) + continue; + + bp = buf; + for (j = 0; j <= mbyte; j++) { + sprintf(bp, "%02x", + gp->layer[i].mask[j]); + bp += 2; + } + *bp = '\0'; + ENICPMD_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf); + bp = buf; + for (j = 0; j <= mbyte; j++) { + sprintf(bp, "%02x", + gp->layer[i].val[j]); + bp += 2; + } + *bp = '\0'; + ENICPMD_LOG(INFO, "\tL%u val: %s\n", i + 2, buf); + } + break; + default: + ENICPMD_LOG(INFO, "FILTER UNKNOWN\n"); + break; + } +} + +/* Debug function to dump internal NIC flow structures. */ +static void +enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt) +{ + enic_dump_filter(filt); + enic_dump_actions(ea); +} + + +/** + * Internal flow parse/validate function. + * + * @param dev[in] + * This device pointer. + * @param pattern[in] + * @param actions[in] + * @param error[out] + * @param enic_filter[out] + * Internal NIC filter structure pointer. + * @param enic_action[out] + * Internal NIC action structure pointer. + */ +static int +enic_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct filter_v2 *enic_filter, + struct filter_action_v2 *enic_action) +{ + unsigned int ret = 0; + struct enic *enic = pmd_priv(dev); + const struct enic_filter_cap *enic_filter_cap; + const struct enic_action_cap *enic_action_cap; + const struct rte_flow_action *action; + + ENICPMD_FUNC_TRACE(); + + memset(enic_filter, 0, sizeof(*enic_filter)); + memset(enic_action, 0, sizeof(*enic_action)); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No pattern specified"); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "No action specified"); + return -rte_errno; + } + + if (attrs) { + if (attrs->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "priority groups are not supported"); + return -rte_errno; + } else if (attrs->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priorities are not supported"); + return -rte_errno; + } else if (attrs->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } else if (attrs->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } else if (!attrs->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "No attribute specified"); + return -rte_errno; + } + + /* Verify Actions. */ + enic_action_cap = enic_get_action_cap(enic); + for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END; + action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + else if (!enic_match_action(action, enic_action_cap->actions)) + break; + } + if (action->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION, + action, "Invalid action."); + return -rte_errno; + } + ret = enic_action_cap->copy_fn(enic, actions, enic_action); + if (ret) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unsupported action."); + return -rte_errno; + } + + /* Verify Flow items. If copying the filter from flow format to enic + * format fails, the flow is not supported + */ + enic_filter_cap = enic_get_filter_cap(enic); + if (enic_filter_cap == NULL) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Flow API not available"); + return -rte_errno; + } + enic_filter->type = enic->flow_filter_mode; + ret = enic_copy_filter(pattern, enic_filter_cap, enic, + enic_filter, error); + return ret; +} + +/** + * Push filter/action to the NIC. + * + * @param enic[in] + * Device structure pointer. + * @param enic_filter[in] + * Internal NIC filter structure pointer. + * @param enic_action[in] + * Internal NIC action structure pointer. + * @param error[out] + */ +static struct rte_flow * +enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, + struct filter_action_v2 *enic_action, + struct rte_flow_error *error) +{ + struct rte_flow *flow; + int err; + uint16_t entry; + + ENICPMD_FUNC_TRACE(); + + flow = rte_calloc(__func__, 1, sizeof(*flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "cannot allocate flow memory"); + return NULL; + } + + /* entry[in] is the queue id, entry[out] is the filter Id for delete */ + entry = enic_action->rq_idx; + err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, + enic_action); + if (err) { + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "vnic_dev_classifier error"); + rte_free(flow); + return NULL; + } + + flow->enic_filter_id = entry; + flow->enic_filter = *enic_filter; + return flow; +} + +/** + * Remove filter/action from the NIC. + * + * @param enic[in] + * Device structure pointer. + * @param filter_id[in] + * Id of NIC filter. + * @param enic_action[in] + * Internal NIC action structure pointer. + * @param error[out] + */ +static int +enic_flow_del_filter(struct enic *enic, struct rte_flow *flow, + struct rte_flow_error *error) +{ + uint16_t filter_id; + int err; + + ENICPMD_FUNC_TRACE(); + + filter_id = flow->enic_filter_id; + err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL); + if (err) { + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "vnic_dev_classifier failed"); + return -err; + } + return 0; +} + +/* + * The following functions are callbacks for Generic flow API. + */ + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +static int +enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct filter_v2 enic_filter; + struct filter_action_v2 enic_action; + int ret; + + ENICPMD_FUNC_TRACE(); + + ret = enic_flow_parse(dev, attrs, pattern, actions, error, + &enic_filter, &enic_action); + if (!ret) + enic_dump_flow(&enic_action, &enic_filter); + return ret; +} + +/** + * Create a flow supported by the NIC. + * + * @see rte_flow_create() + * @see rte_flow_ops + */ +static struct rte_flow * +enic_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct filter_v2 enic_filter; + struct filter_action_v2 enic_action; + struct rte_flow *flow; + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + + ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter, + &enic_action); + if (ret < 0) + return NULL; + + flow = enic_flow_add_filter(enic, &enic_filter, &enic_action, + error); + if (flow) + LIST_INSERT_HEAD(&enic->flows, flow, next); + + return flow; +} + +/** + * Destroy a flow supported by the NIC. + * + * @see rte_flow_destroy() + * @see rte_flow_ops + */ +static int +enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + __rte_unused struct rte_flow_error *error) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + + enic_flow_del_filter(enic, flow, error); + LIST_REMOVE(flow, next); + rte_free(flow); + return 0; +} + +/** + * Flush all flows on the device. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +static int +enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct rte_flow *flow; + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + + + while (!LIST_EMPTY(&enic->flows)) { + flow = LIST_FIRST(&enic->flows); + enic_flow_del_filter(enic, flow, error); + LIST_REMOVE(flow, next); + rte_free(flow); + } + return 0; +} + +/** + * Flow callback registration. + * + * @see rte_flow_ops + */ +const struct rte_flow_ops enic_flow_ops = { + .validate = enic_flow_validate, + .create = enic_flow_create, + .destroy = enic_flow_destroy, + .flush = enic_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/enic/enic_fm_flow.c b/src/spdk/dpdk/drivers/net/enic/enic_fm_flow.c new file mode 100644 index 000000000..6ee022437 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_fm_flow.c @@ -0,0 +1,2463 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enic_compat.h" +#include "enic.h" +#include "vnic_dev.h" +#include "vnic_nic.h" + +#define IP_DEFTTL 64 /* from RFC 1340. */ +#define IP6_VTC_FLOW 0x60000000 + +/* Highest Item type supported by Flowman */ +#define FM_MAX_ITEM_TYPE RTE_FLOW_ITEM_TYPE_VXLAN + +/* Up to 1024 TCAM entries */ +#define FM_MAX_TCAM_TABLE_SIZE 1024 + +/* Up to 4096 entries per exact match table */ +#define FM_MAX_EXACT_TABLE_SIZE 4096 + +/* Number of counters to increase on for each increment */ +#define FM_COUNTERS_EXPAND 100 + +#define FM_INVALID_HANDLE 0 + +/* + * Flow exact match tables (FET) in the VIC and rte_flow groups. + * Use a simple scheme to map groups to tables. + * Group 0 uses the single TCAM tables, one for each direction. + * Group 1, 2, ... uses its own exact match table. + * + * The TCAM tables are allocated upfront during init. + * + * Exact match tables are allocated on demand. 3 paths that lead allocations. + * + * 1. Add a flow that jumps from group 0 to group N. + * + * If N does not exist, we allocate an exact match table for it, using + * a dummy key. A key is required for the table. + * + * 2. Add a flow that uses group N. + * + * If N does not exist, we allocate an exact match table for it, using + * the flow's key. Subsequent flows to the same group all should have + * the same key. + * + * Without a jump flow to N, N is not reachable in hardware. No packets + * reach N and match. + * + * 3. Add a flow to an empty group N. + * + * N has been created via (1) and the dummy key. We free that table, allocate + * a new table using the new flow's key. Also re-do the existing jump flow to + * point to the new table. + */ +#define FM_TCAM_RTE_GROUP 0 + +struct enic_fm_fet { + TAILQ_ENTRY(enic_fm_fet) list; + uint32_t group; /* rte_flow group ID */ + uint64_t handle; /* Exact match table handle from flowman */ + uint8_t ingress; + uint8_t default_key; + int ref; /* Reference count via get/put */ + struct fm_key_template key; /* Key associated with the table */ +}; + +struct enic_fm_counter { + SLIST_ENTRY(enic_fm_counter) next; + uint32_t handle; +}; + +/* rte_flow.fm */ +struct enic_fm_flow { + bool counter_valid; + uint64_t entry_handle; + uint64_t action_handle; + struct enic_fm_counter *counter; + struct enic_fm_fet *fet; +}; + +struct enic_fm_jump_flow { + TAILQ_ENTRY(enic_fm_jump_flow) list; + struct rte_flow *flow; + uint32_t group; + struct fm_tcam_match_entry match; + struct fm_action action; +}; + +/* + * Flowman uses host memory for commands. This structure is allocated + * in DMA-able memory. + */ +union enic_flowman_cmd_mem { + struct fm_tcam_match_table fm_tcam_match_table; + struct fm_exact_match_table fm_exact_match_table; + struct fm_tcam_match_entry fm_tcam_match_entry; + struct fm_exact_match_entry fm_exact_match_entry; + struct fm_action fm_action; +}; + +struct enic_flowman { + struct enic *enic; + /* Command buffer */ + struct { + union enic_flowman_cmd_mem *va; + dma_addr_t pa; + } cmd; + /* TCAM tables allocated upfront, used for group 0 */ + uint64_t ig_tcam_hndl; + uint64_t eg_tcam_hndl; + /* Counters */ + SLIST_HEAD(enic_free_counters, enic_fm_counter) counters; + void *counter_stack; + uint32_t counters_alloced; + /* Exact match tables for groups != 0, dynamically allocated */ + TAILQ_HEAD(fet_list, enic_fm_fet) fet_list; + /* + * Default exact match tables used for jump actions to + * non-existent groups. + */ + struct enic_fm_fet *default_eg_fet; + struct enic_fm_fet *default_ig_fet; + /* Flows that jump to the default table above */ + TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list; + /* + * Scratch data used during each invocation of flow_create + * and flow_validate. + */ + struct enic_fm_fet *fet; + struct fm_tcam_match_entry tcam_entry; + struct fm_action action; + struct fm_action action_tmp; /* enic_fm_reorder_action_op */ + int action_op_count; +}; + +static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle); + +/* + * Common arguments passed to copy_item functions. Use this structure + * so we can easily add new arguments. + * item: Item specification. + * fm_tcam_entry: Flowman TCAM match entry. + * header_level: 0 for outer header, 1 for inner header. + */ +struct copy_item_args { + const struct rte_flow_item *item; + struct fm_tcam_match_entry *fm_tcam_entry; + uint8_t header_level; +}; + +/* functions for copying items into flowman match */ +typedef int (enic_copy_item_fn)(struct copy_item_args *arg); + +/* Info about how to copy items into flowman match */ +struct enic_fm_items { + /* Function for copying and validating an item. */ + enic_copy_item_fn * const copy_item; + /* List of valid previous items. */ + const enum rte_flow_item_type * const prev_items; + /* + * True if it's OK for this item to be the first item. For some NIC + * versions, it's invalid to start the stack above layer 3. + */ + const uint8_t valid_start_item; +}; + +static enic_copy_item_fn enic_fm_copy_item_eth; +static enic_copy_item_fn enic_fm_copy_item_ipv4; +static enic_copy_item_fn enic_fm_copy_item_ipv6; +static enic_copy_item_fn enic_fm_copy_item_raw; +static enic_copy_item_fn enic_fm_copy_item_sctp; +static enic_copy_item_fn enic_fm_copy_item_tcp; +static enic_copy_item_fn enic_fm_copy_item_udp; +static enic_copy_item_fn enic_fm_copy_item_vlan; +static enic_copy_item_fn enic_fm_copy_item_vxlan; + +/* Ingress actions */ +static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = { + RTE_FLOW_ACTION_TYPE_COUNT, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_JUMP, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_PORT_ID, + RTE_FLOW_ACTION_TYPE_PASSTHRU, + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_RSS, + RTE_FLOW_ACTION_TYPE_VOID, + RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, + RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, + RTE_FLOW_ACTION_TYPE_END, /* END must be the last entry */ +}; + +/* Egress actions */ +static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = { + RTE_FLOW_ACTION_TYPE_COUNT, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_JUMP, + RTE_FLOW_ACTION_TYPE_PASSTHRU, + RTE_FLOW_ACTION_TYPE_VOID, + RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, + RTE_FLOW_ACTION_TYPE_END, +}; + +static const struct enic_fm_items enic_fm_items[] = { + [RTE_FLOW_ITEM_TYPE_RAW] = { + .copy_item = enic_fm_copy_item_raw, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .copy_item = enic_fm_copy_item_eth, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .copy_item = enic_fm_copy_item_vlan, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .copy_item = enic_fm_copy_item_ipv4, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .copy_item = enic_fm_copy_item_ipv6, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .copy_item = enic_fm_copy_item_udp, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .copy_item = enic_fm_copy_item_tcp, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .copy_item = enic_fm_copy_item_sctp, + .valid_start_item = 0, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, + }, + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .copy_item = enic_fm_copy_item_vxlan, + .valid_start_item = 1, + .prev_items = (const enum rte_flow_item_type[]) { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, + }, + }, +}; + +static int +enic_fm_copy_item_eth(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + /* Match all if no spec */ + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_eth_mask; + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_header_select |= FKH_ETHER; + fm_mask->fk_header_select |= FKH_ETHER; + memcpy(&fm_data->l2.eth, spec, sizeof(*spec)); + memcpy(&fm_mask->l2.eth, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_vlan(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + struct rte_ether_hdr *eth_mask; + struct rte_ether_hdr *eth_val; + uint32_t meta; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + /* Outer and inner packet vlans need different flags */ + meta = FKM_VLAN_PRES; + if (lvl > 0) + meta = FKM_QTAG; + fm_data->fk_metadata |= meta; + fm_mask->fk_metadata |= meta; + + /* Match all if no spec */ + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_vlan_mask; + + eth_mask = (void *)&fm_mask->l2.eth; + eth_val = (void *)&fm_data->l2.eth; + + /* Outer TPID cannot be matched */ + if (eth_mask->ether_type) + return -ENOTSUP; + + /* + * When packet matching, the VIC always compares vlan-stripped + * L2, regardless of vlan stripping settings. So, the inner type + * from vlan becomes the ether type of the eth header. + */ + eth_mask->ether_type = mask->inner_type; + eth_val->ether_type = spec->inner_type; + fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG; + fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG; + fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci); + fm_mask->fk_vlan = rte_be_to_cpu_16(mask->tci); + return 0; +} + +static int +enic_fm_copy_item_ipv4(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_metadata |= FKM_IPV4; + fm_mask->fk_metadata |= FKM_IPV4; + + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_ipv4_mask; + + fm_data->fk_header_select |= FKH_IPV4; + fm_mask->fk_header_select |= FKH_IPV4; + memcpy(&fm_data->l3.ip4, spec, sizeof(*spec)); + memcpy(&fm_mask->l3.ip4, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_ipv6(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_metadata |= FKM_IPV6; + fm_mask->fk_metadata |= FKM_IPV6; + + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_ipv6_mask; + + fm_data->fk_header_select |= FKH_IPV6; + fm_mask->fk_header_select |= FKH_IPV6; + memcpy(&fm_data->l3.ip6, spec, sizeof(*spec)); + memcpy(&fm_mask->l3.ip6, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_udp(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_metadata |= FKM_UDP; + fm_mask->fk_metadata |= FKM_UDP; + + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_udp_mask; + + fm_data->fk_header_select |= FKH_UDP; + fm_mask->fk_header_select |= FKH_UDP; + memcpy(&fm_data->l4.udp, spec, sizeof(*spec)); + memcpy(&fm_mask->l4.udp, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_tcp(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_metadata |= FKM_TCP; + fm_mask->fk_metadata |= FKM_TCP; + + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_tcp_mask; + + fm_data->fk_header_select |= FKH_TCP; + fm_mask->fk_header_select |= FKH_TCP; + memcpy(&fm_data->l4.tcp, spec, sizeof(*spec)); + memcpy(&fm_mask->l4.tcp, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_sctp(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_sctp *spec = item->spec; + const struct rte_flow_item_sctp *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + uint8_t *ip_proto_mask = NULL; + uint8_t *ip_proto = NULL; + uint32_t l3_fkh; + + ENICPMD_FUNC_TRACE(); + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + /* + * The NIC filter API has no flags for "match sctp", so explicitly + * set the protocol number in the IP pattern. + */ + if (fm_data->fk_metadata & FKM_IPV4) { + struct rte_ipv4_hdr *ip; + ip = (struct rte_ipv4_hdr *)&fm_mask->l3.ip4; + ip_proto_mask = &ip->next_proto_id; + ip = (struct rte_ipv4_hdr *)&fm_data->l3.ip4; + ip_proto = &ip->next_proto_id; + l3_fkh = FKH_IPV4; + } else if (fm_data->fk_metadata & FKM_IPV6) { + struct rte_ipv6_hdr *ip; + ip = (struct rte_ipv6_hdr *)&fm_mask->l3.ip6; + ip_proto_mask = &ip->proto; + ip = (struct rte_ipv6_hdr *)&fm_data->l3.ip6; + ip_proto = &ip->proto; + l3_fkh = FKH_IPV6; + } else { + /* Need IPv4/IPv6 pattern first */ + return -EINVAL; + } + *ip_proto = IPPROTO_SCTP; + *ip_proto_mask = 0xff; + fm_data->fk_header_select |= l3_fkh; + fm_mask->fk_header_select |= l3_fkh; + + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_sctp_mask; + + fm_data->fk_header_select |= FKH_L4RAW; + fm_mask->fk_header_select |= FKH_L4RAW; + memcpy(fm_data->l4.rawdata, spec, sizeof(*spec)); + memcpy(fm_mask->l4.rawdata, mask, sizeof(*mask)); + return 0; +} + +static int +enic_fm_copy_item_vxlan(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + /* Only 2 header levels (outer and inner) allowed */ + if (arg->header_level > 0) + return -EINVAL; + + fm_data = &entry->ftm_data.fk_hdrset[0]; + fm_mask = &entry->ftm_mask.fk_hdrset[0]; + fm_data->fk_metadata |= FKM_VXLAN; + fm_mask->fk_metadata |= FKM_VXLAN; + /* items from here on out are inner header items */ + arg->header_level = 1; + + /* Match all if no spec */ + if (!spec) + return 0; + if (!mask) + mask = &rte_flow_item_vxlan_mask; + + fm_data->fk_header_select |= FKH_VXLAN; + fm_mask->fk_header_select |= FKH_VXLAN; + memcpy(&fm_data->vxlan, spec, sizeof(*spec)); + memcpy(&fm_mask->vxlan, mask, sizeof(*mask)); + return 0; +} + +/* + * Currently, raw pattern match is very limited. It is intended for matching + * UDP tunnel header (e.g. vxlan or geneve). + */ +static int +enic_fm_copy_item_raw(struct copy_item_args *arg) +{ + const struct rte_flow_item *item = arg->item; + const struct rte_flow_item_raw *spec = item->spec; + const struct rte_flow_item_raw *mask = item->mask; + const uint8_t lvl = arg->header_level; + struct fm_tcam_match_entry *entry = arg->fm_tcam_entry; + struct fm_header_set *fm_data, *fm_mask; + + ENICPMD_FUNC_TRACE(); + /* Cannot be used for inner packet */ + if (lvl > 0) + return -EINVAL; + /* Need both spec and mask */ + if (!spec || !mask) + return -EINVAL; + /* Only supports relative with offset 0 */ + if (!spec->relative || spec->offset != 0 || spec->search || + spec->limit) + return -EINVAL; + /* Need non-null pattern that fits within the NIC's filter pattern */ + if (spec->length == 0 || + spec->length + sizeof(struct rte_udp_hdr) > FM_LAYER_SIZE || + !spec->pattern || !mask->pattern) + return -EINVAL; + /* + * Mask fields, including length, are often set to zero. Assume that + * means "same as spec" to avoid breaking existing apps. If length + * is not zero, then it should be >= spec length. + * + * No more pattern follows this, so append to the L4 layer instead of + * L5 to work with both recent and older VICs. + */ + if (mask->length != 0 && mask->length < spec->length) + return -EINVAL; + + fm_data = &entry->ftm_data.fk_hdrset[lvl]; + fm_mask = &entry->ftm_mask.fk_hdrset[lvl]; + fm_data->fk_header_select |= FKH_L4RAW; + fm_mask->fk_header_select |= FKH_L4RAW; + fm_data->fk_header_select &= ~FKH_UDP; + fm_mask->fk_header_select &= ~FKH_UDP; + memcpy(fm_data->l4.rawdata + sizeof(struct rte_udp_hdr), + spec->pattern, spec->length); + memcpy(fm_mask->l4.rawdata + sizeof(struct rte_udp_hdr), + mask->pattern, spec->length); + return 0; +} + +static int +enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress, + struct fm_key_template *key, int entries, + struct enic_fm_fet **fet_out) +{ + struct fm_exact_match_table *cmd; + struct fm_header_set *hdr; + struct enic_fm_fet *fet; + uint64_t args[3]; + int ret; + + ENICPMD_FUNC_TRACE(); + fet = calloc(1, sizeof(struct enic_fm_fet)); + if (fet == NULL) + return -ENOMEM; + cmd = &fm->cmd.va->fm_exact_match_table; + memset(cmd, 0, sizeof(*cmd)); + cmd->fet_direction = ingress ? FM_INGRESS : FM_EGRESS; + cmd->fet_stage = FM_STAGE_LAST; + cmd->fet_max_entries = entries ? entries : FM_MAX_EXACT_TABLE_SIZE; + if (key == NULL) { + hdr = &cmd->fet_key.fk_hdrset[0]; + memset(hdr, 0, sizeof(*hdr)); + hdr->fk_header_select = FKH_IPV4 | FKH_UDP; + hdr->l3.ip4.fk_saddr = 0xFFFFFFFF; + hdr->l3.ip4.fk_daddr = 0xFFFFFFFF; + hdr->l4.udp.fk_source = 0xFFFF; + hdr->l4.udp.fk_dest = 0xFFFF; + fet->default_key = 1; + } else { + memcpy(&cmd->fet_key, key, sizeof(*key)); + memcpy(&fet->key, key, sizeof(*key)); + fet->default_key = 0; + } + cmd->fet_key.fk_packet_tag = 1; + + args[0] = FM_EXACT_TABLE_ALLOC; + args[1] = fm->cmd.pa; + ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2); + if (ret) { + ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret); + free(fet); + return ret; + } + fet->handle = args[0]; + fet->ingress = ingress; + ENICPMD_LOG(DEBUG, "allocated exact match table: handle=0x%" PRIx64, + fet->handle); + *fet_out = fet; + return 0; +} + +static void +enic_fet_free(struct enic_flowman *fm, struct enic_fm_fet *fet) +{ + ENICPMD_FUNC_TRACE(); + enic_fm_tbl_free(fm, fet->handle); + if (!fet->default_key) + TAILQ_REMOVE(&fm->fet_list, fet, list); + free(fet); +} + +/* + * Get the exact match table for the given combination of + * . Allocate one on the fly as necessary. + */ +static int +enic_fet_get(struct enic_flowman *fm, + uint32_t group, + uint8_t ingress, + struct fm_key_template *key, + struct enic_fm_fet **fet_out, + struct rte_flow_error *error) +{ + struct enic_fm_fet *fet; + + ENICPMD_FUNC_TRACE(); + /* See if we already have this table open */ + TAILQ_FOREACH(fet, &fm->fet_list, list) { + if (fet->group == group && fet->ingress == ingress) + break; + } + if (fet == NULL) { + /* Jumping to a non-existing group? Use the default table */ + if (key == NULL) { + fet = ingress ? fm->default_ig_fet : fm->default_eg_fet; + } else if (enic_fet_alloc(fm, ingress, key, 0, &fet)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "enic: cannot get exact match table"); + } + fet->group = group; + /* Default table is never on the open table list */ + if (!fet->default_key) + TAILQ_INSERT_HEAD(&fm->fet_list, fet, list); + } + fet->ref++; + *fet_out = fet; + ENICPMD_LOG(DEBUG, "fet_get: %s %s group=%u ref=%u", + fet->default_key ? "default" : "", + fet->ingress ? "ingress" : "egress", + fet->group, fet->ref); + return 0; +} + +static void +enic_fet_put(struct enic_flowman *fm, struct enic_fm_fet *fet) +{ + ENICPMD_FUNC_TRACE(); + RTE_ASSERT(fet->ref > 0); + fet->ref--; + ENICPMD_LOG(DEBUG, "fet_put: %s %s group=%u ref=%u", + fet->default_key ? "default" : "", + fet->ingress ? "ingress" : "egress", + fet->group, fet->ref); + if (fet->ref == 0) + enic_fet_free(fm, fet); +} + +/* Return 1 if current item is valid on top of the previous one. */ +static int +fm_item_stacking_valid(enum rte_flow_item_type prev_item, + const struct enic_fm_items *item_info, + uint8_t is_first_item) +{ + enum rte_flow_item_type const *allowed_items = item_info->prev_items; + + ENICPMD_FUNC_TRACE(); + for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) { + if (prev_item == *allowed_items) + return 1; + } + + /* This is the first item in the stack. Check if that's cool */ + if (is_first_item && item_info->valid_start_item) + return 1; + return 0; +} + +/* + * Build the flow manager match entry structure from the provided pattern. + * The pattern is validated as the items are copied. + */ +static int +enic_fm_copy_entry(struct enic_flowman *fm, + const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct enic_fm_items *item_info; + enum rte_flow_item_type prev_item; + const struct rte_flow_item *item; + struct copy_item_args args; + uint8_t prev_header_level; + uint8_t is_first_item; + int ret; + + ENICPMD_FUNC_TRACE(); + item = pattern; + is_first_item = 1; + prev_item = RTE_FLOW_ITEM_TYPE_END; + + args.fm_tcam_entry = &fm->tcam_entry; + args.header_level = 0; + prev_header_level = 0; + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + /* + * Get info about how to validate and copy the item. If NULL + * is returned the nic does not support the item. + */ + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + item_info = &enic_fm_items[item->type]; + + if (item->type > FM_MAX_ITEM_TYPE || + item_info->copy_item == NULL) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "enic: unsupported item"); + } + + /* check to see if item stacking is valid */ + if (!fm_item_stacking_valid(prev_item, item_info, + is_first_item)) + goto stacking_error; + + args.item = item; + ret = item_info->copy_item(&args); + if (ret) + goto item_not_supported; + /* Going from outer to inner? Treat it as a new packet start */ + if (prev_header_level != args.header_level) { + prev_item = RTE_FLOW_ITEM_TYPE_END; + is_first_item = 1; + } else { + prev_item = item->type; + is_first_item = 0; + } + prev_header_level = args.header_level; + } + return 0; + +item_not_supported: + return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "enic: unsupported item type"); + +stacking_error: + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "enic: unsupported item stack"); +} + +static void +flow_item_skip_void(const struct rte_flow_item **item) +{ + for ( ; ; (*item)++) + if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID) + return; +} + +static void +append_template(void **template, uint8_t *off, const void *data, int len) +{ + memcpy(*template, data, len); + *template = (char *)*template + len; + *off = *off + len; +} + +static int +enic_fm_append_action_op(struct enic_flowman *fm, + struct fm_action_op *fm_op, + struct rte_flow_error *error) +{ + int count; + + count = fm->action_op_count; + ENICPMD_LOG(DEBUG, "append action op: idx=%d op=%u", + count, fm_op->fa_op); + if (count == FM_ACTION_OP_MAX) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many action operations"); + } + fm->action.fma_action_ops[count] = *fm_op; + fm->action_op_count = count + 1; + return 0; +} + +/* NIC requires that 1st steer appear before decap. + * Correct example: steer, decap, steer, steer, ... + */ +static void +enic_fm_reorder_action_op(struct enic_flowman *fm) +{ + struct fm_action_op *op, *steer, *decap; + struct fm_action_op tmp_op; + + ENICPMD_FUNC_TRACE(); + /* Find 1st steer and decap */ + op = fm->action.fma_action_ops; + steer = NULL; + decap = NULL; + while (op->fa_op != FMOP_END) { + if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP) + decap = op; + else if (!steer && op->fa_op == FMOP_RQ_STEER) + steer = op; + op++; + } + /* If decap is before steer, swap */ + if (steer && decap && decap < steer) { + op = fm->action.fma_action_ops; + ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld", + (long)(decap - op), (long)(steer - op)); + tmp_op = *decap; + *decap = *steer; + *steer = tmp_op; + } +} + +/* VXLAN decap is done via flowman compound action */ +static int +enic_fm_copy_vxlan_decap(struct enic_flowman *fm, + struct fm_tcam_match_entry *fmt, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + struct fm_header_set *fm_data; + struct fm_action_op fm_op; + + ENICPMD_FUNC_TRACE(); + fm_data = &fmt->ftm_data.fk_hdrset[0]; + if (!(fm_data->fk_metadata & FKM_VXLAN)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "vxlan-decap: vxlan must be in pattern"); + } + + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_DECAP_NOSTRIP; + return enic_fm_append_action_op(fm, &fm_op, error); +} + +/* VXLAN encap is done via flowman compound action */ +static int +enic_fm_copy_vxlan_encap(struct enic_flowman *fm, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + struct fm_action_op fm_op; + struct rte_ether_hdr *eth; + uint16_t *ethertype; + void *template; + uint8_t off; + + ENICPMD_FUNC_TRACE(); + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_ENCAP; + template = fm->action.fma_data; + off = 0; + /* + * Copy flow items to the flowman template starting L2. + * L2 must be ethernet. + */ + flow_item_skip_void(&item); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "vxlan-encap: first item should be ethernet"); + eth = (struct rte_ether_hdr *)template; + ethertype = ð->ether_type; + append_template(&template, &off, item->spec, + sizeof(struct rte_flow_item_eth)); + item++; + flow_item_skip_void(&item); + /* Optional VLAN */ + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + const struct rte_flow_item_vlan *spec; + + ENICPMD_LOG(DEBUG, "vxlan-encap: vlan"); + spec = item->spec; + fm_op.encap.outer_vlan = rte_be_to_cpu_16(spec->tci); + item++; + flow_item_skip_void(&item); + } + /* L3 must be IPv4, IPv6 */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + struct rte_ipv4_hdr *ip4; + + ENICPMD_LOG(DEBUG, "vxlan-encap: ipv4"); + *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + ip4 = (struct rte_ipv4_hdr *)template; + /* + * Offset of IPv4 length field and its initial value + * (IP + UDP + VXLAN) are specified in the action. The NIC + * will add inner packet length. + */ + fm_op.encap.len1_offset = off + + offsetof(struct rte_ipv4_hdr, total_length); + fm_op.encap.len1_delta = sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr) + + sizeof(struct rte_vxlan_hdr); + append_template(&template, &off, item->spec, + sizeof(struct rte_ipv4_hdr)); + ip4->version_ihl = RTE_IPV4_VHL_DEF; + if (ip4->time_to_live == 0) + ip4->time_to_live = IP_DEFTTL; + ip4->next_proto_id = IPPROTO_UDP; + break; + } + case RTE_FLOW_ITEM_TYPE_IPV6: + { + struct rte_ipv6_hdr *ip6; + + ENICPMD_LOG(DEBUG, "vxlan-encap: ipv6"); + *ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + ip6 = (struct rte_ipv6_hdr *)template; + fm_op.encap.len1_offset = off + + offsetof(struct rte_ipv6_hdr, payload_len); + fm_op.encap.len1_delta = sizeof(struct rte_udp_hdr) + + sizeof(struct rte_vxlan_hdr); + append_template(&template, &off, item->spec, + sizeof(struct rte_ipv6_hdr)); + ip6->vtc_flow |= rte_cpu_to_be_32(IP6_VTC_FLOW); + if (ip6->hop_limits == 0) + ip6->hop_limits = IP_DEFTTL; + ip6->proto = IPPROTO_UDP; + break; + } + default: + return rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "vxlan-encap: L3 must be IPv4/IPv6"); + } + item++; + flow_item_skip_void(&item); + + /* L4 is UDP */ + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "vxlan-encap: UDP must follow IPv4/IPv6"); + /* UDP length = UDP + VXLAN. NIC will add inner packet length. */ + fm_op.encap.len2_offset = + off + offsetof(struct rte_udp_hdr, dgram_len); + fm_op.encap.len2_delta = + sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr); + append_template(&template, &off, item->spec, + sizeof(struct rte_udp_hdr)); + item++; + flow_item_skip_void(&item); + + /* Finally VXLAN */ + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) + return rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "vxlan-encap: VXLAN must follow UDP"); + append_template(&template, &off, item->spec, + sizeof(struct rte_flow_item_vxlan)); + + /* + * Fill in the rest of the action structure. + * Indicate that we want to encap with vxlan at packet start. + */ + fm_op.encap.template_offset = 0; + fm_op.encap.template_len = off; + return enic_fm_append_action_op(fm, &fm_op, error); +} + +static int +enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr, + uint64_t *handle) +{ + uint32_t bdf; + uint64_t args[2]; + int rc; + + ENICPMD_FUNC_TRACE(); + ENICPMD_LOG(DEBUG, "bdf=%x:%x:%x", addr->bus, addr->devid, + addr->function); + bdf = addr->bus << 8 | addr->devid << 3 | addr->function; + args[0] = FM_VNIC_FIND; + args[1] = bdf; + rc = vnic_dev_flowman_cmd(enic->vdev, args, 2); + if (rc != 0) { + ENICPMD_LOG(ERR, "allocating counters rc=%d", rc); + return rc; + } + *handle = args[0]; + ENICPMD_LOG(DEBUG, "found vnic: handle=0x%" PRIx64, *handle); + return 0; +} + +/* Translate flow actions to flowman TCAM entry actions */ +static int +enic_fm_copy_action(struct enic_flowman *fm, + const struct rte_flow_action actions[], + uint8_t ingress, + struct rte_flow_error *error) +{ + enum { + FATE = 1 << 0, + DECAP = 1 << 1, + PASSTHRU = 1 << 2, + COUNT = 1 << 3, + ENCAP = 1 << 4, + }; + struct fm_tcam_match_entry *fmt; + struct fm_action_op fm_op; + struct enic *enic; + uint32_t overlap; + uint64_t vnic_h; + bool first_rq; + int ret; + + ENICPMD_FUNC_TRACE(); + fmt = &fm->tcam_entry; + first_rq = true; + enic = fm->enic; + overlap = 0; + vnic_h = 0; /* 0 = current vNIC */ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: { + if (overlap & PASSTHRU) + goto unsupported; + overlap |= PASSTHRU; + break; + } + case RTE_FLOW_ACTION_TYPE_JUMP: { + const struct rte_flow_action_jump *jump = + actions->conf; + struct enic_fm_fet *fet; + + if (overlap & FATE) + goto unsupported; + ret = enic_fet_get(fm, jump->group, ingress, NULL, + &fet, error); + if (ret) + return ret; + overlap |= FATE; + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_EXACT_MATCH; + fm_op.exact.handle = fet->handle; + fm->fet = fet; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + break; + } + case RTE_FLOW_ACTION_TYPE_MARK: { + const struct rte_flow_action_mark *mark = + actions->conf; + + if (mark->id >= ENIC_MAGIC_FILTER_ID - 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "invalid mark id"); + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_MARK; + fm_op.mark.mark = mark->id + 1; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + break; + } + case RTE_FLOW_ACTION_TYPE_FLAG: { + /* ENIC_MAGIC_FILTER_ID is reserved for flagging */ + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_MARK; + fm_op.mark.mark = ENIC_MAGIC_FILTER_ID; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + break; + } + case RTE_FLOW_ACTION_TYPE_QUEUE: { + const struct rte_flow_action_queue *queue = + actions->conf; + + /* + * If fate other than QUEUE or RSS, fail. Multiple + * rss and queue actions are ok. + */ + if ((overlap & FATE) && first_rq) + goto unsupported; + first_rq = false; + overlap |= FATE; + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_RQ_STEER; + fm_op.rq_steer.rq_index = + enic_rte_rq_idx_to_sop_idx(queue->index); + fm_op.rq_steer.rq_count = 1; + fm_op.rq_steer.vnic_handle = vnic_h; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u", + fm_op.rq_steer.rq_index); + break; + } + case RTE_FLOW_ACTION_TYPE_DROP: { + if (overlap & FATE) + goto unsupported; + overlap |= FATE; + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_DROP; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + ENICPMD_LOG(DEBUG, "create DROP action"); + break; + } + case RTE_FLOW_ACTION_TYPE_COUNT: { + if (overlap & COUNT) + goto unsupported; + overlap |= COUNT; + /* Count is associated with entry not action on VIC. */ + fmt->ftm_flags |= FMEF_COUNTER; + break; + } + case RTE_FLOW_ACTION_TYPE_RSS: { + const struct rte_flow_action_rss *rss = actions->conf; + bool allow; + uint16_t i; + + /* + * If fate other than QUEUE or RSS, fail. Multiple + * rss and queue actions are ok. + */ + if ((overlap & FATE) && first_rq) + goto unsupported; + first_rq = false; + overlap |= FATE; + + /* + * Hardware only supports RSS actions on outer level + * with default type and function. Queues must be + * sequential. + */ + allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->level == 0 && (rss->types == 0 || + rss->types == enic->rss_hf) && + rss->queue_num <= enic->rq_count && + rss->queue[rss->queue_num - 1] < enic->rq_count; + + + /* Identity queue map needs to be sequential */ + for (i = 1; i < rss->queue_num; i++) + allow = allow && (rss->queue[i] == + rss->queue[i - 1] + 1); + if (!allow) + goto unsupported; + + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_RQ_STEER; + fm_op.rq_steer.rq_index = + enic_rte_rq_idx_to_sop_idx(rss->queue[0]); + fm_op.rq_steer.rq_count = rss->queue_num; + fm_op.rq_steer.vnic_handle = vnic_h; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u", + fm_op.rq_steer.rq_index); + break; + } + case RTE_FLOW_ACTION_TYPE_PORT_ID: { + const struct rte_flow_action_port_id *port; + struct rte_pci_device *pdev; + struct rte_eth_dev *dev; + + port = actions->conf; + if (port->original) { + vnic_h = 0; /* This port */ + break; + } + ENICPMD_LOG(DEBUG, "port id %u", port->id); + if (!rte_eth_dev_is_valid_port(port->id)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "invalid port_id"); + } + dev = &rte_eth_devices[port->id]; + if (!dev_is_enic(dev)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "port_id is not enic"); + } + pdev = RTE_ETH_DEV_TO_PCI(dev); + if (enic_fm_find_vnic(enic, &pdev->addr, &vnic_h)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "port_id is not vnic"); + } + break; + } + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: { + if (overlap & DECAP) + goto unsupported; + overlap |= DECAP; + + ret = enic_fm_copy_vxlan_decap(fm, fmt, actions, + error); + if (ret != 0) + return ret; + break; + } + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: { + const struct rte_flow_action_vxlan_encap *encap; + + encap = actions->conf; + if (overlap & ENCAP) + goto unsupported; + overlap |= ENCAP; + ret = enic_fm_copy_vxlan_encap(fm, encap->definition, + error); + if (ret != 0) + return ret; + break; + } + default: + goto unsupported; + } + } + + if (!(overlap & (FATE | PASSTHRU | COUNT))) + goto unsupported; + memset(&fm_op, 0, sizeof(fm_op)); + fm_op.fa_op = FMOP_END; + ret = enic_fm_append_action_op(fm, &fm_op, error); + if (ret) + return ret; + enic_fm_reorder_action_op(fm); + return 0; + +unsupported: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "enic: unsupported action"); +} + +/** Check if the action is supported */ +static int +enic_fm_match_action(const struct rte_flow_action *action, + const enum rte_flow_action_type *supported_actions) +{ + for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END; + supported_actions++) { + if (action->type == *supported_actions) + return 1; + } + return 0; +} + +/* Debug function to dump internal NIC action structure. */ +static void +enic_fm_dump_tcam_actions(const struct fm_action *fm_action) +{ + /* Manually keep in sync with FMOP commands */ + const char *fmop_str[FMOP_OP_MAX] = { + [FMOP_END] = "end", + [FMOP_DROP] = "drop", + [FMOP_RQ_STEER] = "steer", + [FMOP_EXACT_MATCH] = "exmatch", + [FMOP_MARK] = "mark", + [FMOP_EXT_MARK] = "ext_mark", + [FMOP_TAG] = "tag", + [FMOP_EG_HAIRPIN] = "eg_hairpin", + [FMOP_IG_HAIRPIN] = "ig_hairpin", + [FMOP_ENCAP_IVLAN] = "encap_ivlan", + [FMOP_ENCAP_NOIVLAN] = "encap_noivlan", + [FMOP_ENCAP] = "encap", + [FMOP_SET_OVLAN] = "set_ovlan", + [FMOP_DECAP_NOSTRIP] = "decap_nostrip", + }; + const struct fm_action_op *op = &fm_action->fma_action_ops[0]; + char buf[128], *bp = buf; + const char *op_str; + int i, n, buf_len; + + buf[0] = '\0'; + buf_len = sizeof(buf); + for (i = 0; i < FM_ACTION_OP_MAX; i++) { + if (op->fa_op == FMOP_END) + break; + if (op->fa_op >= FMOP_OP_MAX) + op_str = "unknown"; + else + op_str = fmop_str[op->fa_op]; + n = snprintf(bp, buf_len, "%s,", op_str); + if (n > 0 && n < buf_len) { + bp += n; + buf_len -= n; + } + op++; + } + /* Remove trailing comma */ + if (buf[0]) + *(bp - 1) = '\0'; + ENICPMD_LOG(DEBUG, " Acions: %s", buf); +} + +static int +bits_to_str(uint32_t bits, const char *strings[], int max, + char *buf, int buf_len) +{ + int i, n = 0, len = 0; + + for (i = 0; i < max; i++) { + if (bits & (1 << i)) { + n = snprintf(buf, buf_len, "%s,", strings[i]); + if (n > 0 && n < buf_len) { + buf += n; + buf_len -= n; + len += n; + } + } + } + /* Remove trailing comma */ + if (len) { + *(buf - 1) = '\0'; + len--; + } + return len; +} + +/* Debug function to dump internal NIC filter structure. */ +static void +__enic_fm_dump_tcam_match(const struct fm_header_set *fk_hdrset, char *buf, + int buf_len) +{ + /* Manually keep in sync with FKM_BITS */ + const char *fm_fkm_str[FKM_BIT_COUNT] = { + [FKM_QTAG_BIT] = "qtag", + [FKM_CMD_BIT] = "cmd", + [FKM_IPV4_BIT] = "ip4", + [FKM_IPV6_BIT] = "ip6", + [FKM_ROCE_BIT] = "roce", + [FKM_UDP_BIT] = "udp", + [FKM_TCP_BIT] = "tcp", + [FKM_TCPORUDP_BIT] = "tcpportudp", + [FKM_IPFRAG_BIT] = "ipfrag", + [FKM_NVGRE_BIT] = "nvgre", + [FKM_VXLAN_BIT] = "vxlan", + [FKM_GENEVE_BIT] = "geneve", + [FKM_NSH_BIT] = "nsh", + [FKM_ROCEV2_BIT] = "rocev2", + [FKM_VLAN_PRES_BIT] = "vlan_pres", + [FKM_IPOK_BIT] = "ipok", + [FKM_L4OK_BIT] = "l4ok", + [FKM_ROCEOK_BIT] = "roceok", + [FKM_FCSOK_BIT] = "fcsok", + [FKM_EG_SPAN_BIT] = "eg_span", + [FKM_IG_SPAN_BIT] = "ig_span", + [FKM_EG_HAIRPINNED_BIT] = "eg_hairpinned", + }; + /* Manually keep in sync with FKH_BITS */ + const char *fm_fkh_str[FKH_BIT_COUNT] = { + [FKH_ETHER_BIT] = "eth", + [FKH_QTAG_BIT] = "qtag", + [FKH_L2RAW_BIT] = "l2raw", + [FKH_IPV4_BIT] = "ip4", + [FKH_IPV6_BIT] = "ip6", + [FKH_L3RAW_BIT] = "l3raw", + [FKH_UDP_BIT] = "udp", + [FKH_TCP_BIT] = "tcp", + [FKH_ICMP_BIT] = "icmp", + [FKH_VXLAN_BIT] = "vxlan", + [FKH_L4RAW_BIT] = "l4raw", + }; + uint32_t fkh_bits = fk_hdrset->fk_header_select; + uint32_t fkm_bits = fk_hdrset->fk_metadata; + int n; + + if (!fkm_bits && !fkh_bits) + return; + n = snprintf(buf, buf_len, "metadata("); + if (n > 0 && n < buf_len) { + buf += n; + buf_len -= n; + } + n = bits_to_str(fkm_bits, fm_fkm_str, FKM_BIT_COUNT, buf, buf_len); + if (n > 0 && n < buf_len) { + buf += n; + buf_len -= n; + } + n = snprintf(buf, buf_len, ") valid hdr fields("); + if (n > 0 && n < buf_len) { + buf += n; + buf_len -= n; + } + n = bits_to_str(fkh_bits, fm_fkh_str, FKH_BIT_COUNT, buf, buf_len); + if (n > 0 && n < buf_len) { + buf += n; + buf_len -= n; + } + snprintf(buf, buf_len, ")"); +} + +static void +enic_fm_dump_tcam_match(const struct fm_tcam_match_entry *match, + uint8_t ingress) +{ + char buf[256]; + + memset(buf, 0, sizeof(buf)); + __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[0], + buf, sizeof(buf)); + ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter", + (ingress) ? "IG" : "EG", buf, + (match->ftm_flags & FMEF_COUNTER) ? "" : "no "); + memset(buf, 0, sizeof(buf)); + __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[1], + buf, sizeof(buf)); + if (buf[0]) + ENICPMD_LOG(DEBUG, " Inner: %s", buf); +} + +/* Debug function to dump internal NIC flow structures. */ +static void +enic_fm_dump_tcam_entry(const struct fm_tcam_match_entry *fm_match, + const struct fm_action *fm_action, + uint8_t ingress) +{ + if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG)) + return; + enic_fm_dump_tcam_match(fm_match, ingress); + enic_fm_dump_tcam_actions(fm_action); +} + +static int +enic_fm_flow_parse(struct enic_flowman *fm, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_action *action; + unsigned int ret; + static const enum rte_flow_action_type *sa; + + ENICPMD_FUNC_TRACE(); + ret = 0; + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "no pattern specified"); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "no action specified"); + return -rte_errno; + } + + if (attrs) { + if (attrs->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priorities are not supported"); + return -rte_errno; + } else if (attrs->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } else if (attrs->ingress && attrs->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "bidirectional rules not supported"); + return -rte_errno; + } + + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "no attribute specified"); + return -rte_errno; + } + + /* Verify Actions. */ + sa = (attrs->ingress) ? enic_fm_supported_ig_actions : + enic_fm_supported_eg_actions; + for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END; + action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + else if (!enic_fm_match_action(action, sa)) + break; + } + if (action->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION, + action, "invalid action"); + return -rte_errno; + } + ret = enic_fm_copy_entry(fm, pattern, error); + if (ret) + return ret; + ret = enic_fm_copy_action(fm, actions, attrs->ingress, error); + return ret; +} + +static void +enic_fm_counter_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow) +{ + if (!fm_flow->counter_valid) + return; + SLIST_INSERT_HEAD(&fm->counters, fm_flow->counter, next); + fm_flow->counter_valid = false; +} + +static int +enic_fm_more_counters(struct enic_flowman *fm) +{ + struct enic_fm_counter *new_stack; + struct enic_fm_counter *ctrs; + struct enic *enic; + int i, rc; + uint64_t args[2]; + + ENICPMD_FUNC_TRACE(); + enic = fm->enic; + new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced + + FM_COUNTERS_EXPAND) * + sizeof(struct enic_fm_counter), 0); + if (new_stack == NULL) { + ENICPMD_LOG(ERR, "cannot alloc counter memory"); + return -ENOMEM; + } + fm->counter_stack = new_stack; + + args[0] = FM_COUNTER_BRK; + args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND; + rc = vnic_dev_flowman_cmd(enic->vdev, args, 2); + if (rc != 0) { + ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc); + return rc; + } + ctrs = (struct enic_fm_counter *)fm->counter_stack + + fm->counters_alloced; + for (i = 0; i < FM_COUNTERS_EXPAND; i++, ctrs++) { + ctrs->handle = fm->counters_alloced + i; + SLIST_INSERT_HEAD(&fm->counters, ctrs, next); + } + fm->counters_alloced += FM_COUNTERS_EXPAND; + ENICPMD_LOG(DEBUG, "%u counters allocated, total: %u", + FM_COUNTERS_EXPAND, fm->counters_alloced); + return 0; +} + +static int +enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c) +{ + struct enic *enic; + uint64_t args[3]; + int ret; + + ENICPMD_FUNC_TRACE(); + enic = fm->enic; + args[0] = FM_COUNTER_QUERY; + args[1] = c->handle; + args[2] = 1; /* clear */ + ret = vnic_dev_flowman_cmd(enic->vdev, args, 3); + if (ret) { + ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x", + ret, c->handle); + return ret; + } + return 0; +} + +static int +enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error, + struct enic_fm_counter **ctr) +{ + struct enic_fm_counter *c; + int ret; + + ENICPMD_FUNC_TRACE(); + *ctr = NULL; + if (SLIST_EMPTY(&fm->counters)) { + ret = enic_fm_more_counters(fm); + if (ret) + return rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "enic: out of counters"); + } + c = SLIST_FIRST(&fm->counters); + SLIST_REMOVE_HEAD(&fm->counters, next); + *ctr = c; + return 0; +} + +static int +enic_fm_action_free(struct enic_flowman *fm, uint64_t handle) +{ + uint64_t args[2]; + int rc; + + ENICPMD_FUNC_TRACE(); + args[0] = FM_ACTION_FREE; + args[1] = handle; + rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2); + if (rc) + ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64, + rc, handle); + return rc; +} + +static int +enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle) +{ + uint64_t args[2]; + int rc; + + ENICPMD_FUNC_TRACE(); + args[0] = FM_MATCH_ENTRY_REMOVE; + args[1] = handle; + rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2); + if (rc) + ENICPMD_LOG(ERR, "cannot free match entry: rc=%d" + " handle=0x%" PRIx64, rc, handle); + return rc; +} + +static struct enic_fm_jump_flow * +find_jump_flow(struct enic_flowman *fm, uint32_t group) +{ + struct enic_fm_jump_flow *j; + + ENICPMD_FUNC_TRACE(); + TAILQ_FOREACH(j, &fm->jump_list, list) { + if (j->group == group) + return j; + } + return NULL; +} + +static void +remove_jump_flow(struct enic_flowman *fm, struct rte_flow *flow) +{ + struct enic_fm_jump_flow *j; + + ENICPMD_FUNC_TRACE(); + TAILQ_FOREACH(j, &fm->jump_list, list) { + if (j->flow == flow) { + TAILQ_REMOVE(&fm->jump_list, j, list); + free(j); + return; + } + } +} + +static int +save_jump_flow(struct enic_flowman *fm, + struct rte_flow *flow, + uint32_t group, + struct fm_tcam_match_entry *match, + struct fm_action *action) +{ + struct enic_fm_jump_flow *j; + + ENICPMD_FUNC_TRACE(); + j = calloc(1, sizeof(struct enic_fm_jump_flow)); + if (j == NULL) + return -ENOMEM; + j->flow = flow; + j->group = group; + j->match = *match; + j->action = *action; + TAILQ_INSERT_HEAD(&fm->jump_list, j, list); + ENICPMD_LOG(DEBUG, "saved jump flow: flow=%p group=%u", flow, group); + return 0; +} + +static void +__enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow) +{ + if (fm_flow->entry_handle != FM_INVALID_HANDLE) { + enic_fm_entry_free(fm, fm_flow->entry_handle); + fm_flow->entry_handle = FM_INVALID_HANDLE; + } + if (fm_flow->action_handle != FM_INVALID_HANDLE) { + enic_fm_action_free(fm, fm_flow->action_handle); + fm_flow->action_handle = FM_INVALID_HANDLE; + } + enic_fm_counter_free(fm, fm_flow); + if (fm_flow->fet) { + enic_fet_put(fm, fm_flow->fet); + fm_flow->fet = NULL; + } +} + +static void +enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow) +{ + if (flow->fm->fet && flow->fm->fet->default_key) + remove_jump_flow(fm, flow); + __enic_fm_flow_free(fm, flow->fm); + free(flow->fm); + free(flow); +} + +static int +enic_fm_add_tcam_entry(struct enic_flowman *fm, + struct fm_tcam_match_entry *match_in, + uint64_t *entry_handle, + uint8_t ingress, + struct rte_flow_error *error) +{ + struct fm_tcam_match_entry *ftm; + uint64_t args[3]; + int ret; + + ENICPMD_FUNC_TRACE(); + /* Copy entry to the command buffer */ + ftm = &fm->cmd.va->fm_tcam_match_entry; + memcpy(ftm, match_in, sizeof(*ftm)); + /* Add TCAM entry */ + args[0] = FM_TCAM_ENTRY_INSTALL; + args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl; + args[2] = fm->cmd.pa; + ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3); + if (ret != 0) { + ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d", + ingress ? "ingress" : "egress", ret); + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "enic: devcmd(tcam-entry-install)"); + return ret; + } + ENICPMD_LOG(DEBUG, "installed %s TCAM entry: handle=0x%" PRIx64, + ingress ? "ingress" : "egress", (uint64_t)args[0]); + *entry_handle = args[0]; + return 0; +} + +static int +enic_fm_add_exact_entry(struct enic_flowman *fm, + struct fm_tcam_match_entry *match_in, + uint64_t *entry_handle, + struct enic_fm_fet *fet, + struct rte_flow_error *error) +{ + struct fm_exact_match_entry *fem; + uint64_t args[3]; + int ret; + + ENICPMD_FUNC_TRACE(); + /* The new entry must have the table's key */ + if (memcmp(fet->key.fk_hdrset, match_in->ftm_mask.fk_hdrset, + sizeof(struct fm_header_set) * FM_HDRSET_MAX)) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "enic: key does not match group's key"); + } + + /* Copy entry to the command buffer */ + fem = &fm->cmd.va->fm_exact_match_entry; + /* + * Translate TCAM entry to exact entry. As is only need to drop + * position and mask. The mask is part of the exact match table. + * Position (aka priority) is not supported in the exact match table. + */ + fem->fem_data = match_in->ftm_data; + fem->fem_flags = match_in->ftm_flags; + fem->fem_action = match_in->ftm_action; + fem->fem_counter = match_in->ftm_counter; + + /* Add exact entry */ + args[0] = FM_EXACT_ENTRY_INSTALL; + args[1] = fet->handle; + args[2] = fm->cmd.pa; + ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3); + if (ret != 0) { + ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u", + fet->ingress ? "ingress" : "egress", fet->group); + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "enic: devcmd(exact-entry-install)"); + return ret; + } + ENICPMD_LOG(DEBUG, "installed %s exact entry: group=%u" + " handle=0x%" PRIx64, + fet->ingress ? "ingress" : "egress", fet->group, + (uint64_t)args[0]); + *entry_handle = args[0]; + return 0; +} + +/* Push match-action to the NIC. */ +static int +__enic_fm_flow_add_entry(struct enic_flowman *fm, + struct enic_fm_flow *fm_flow, + struct fm_tcam_match_entry *match_in, + struct fm_action *action_in, + uint32_t group, + uint8_t ingress, + struct rte_flow_error *error) +{ + struct enic_fm_counter *ctr; + struct fm_action *fma; + uint64_t action_h; + uint64_t entry_h; + uint64_t args[3]; + int ret; + + ENICPMD_FUNC_TRACE(); + /* Allocate action. */ + fma = &fm->cmd.va->fm_action; + memcpy(fma, action_in, sizeof(*fma)); + args[0] = FM_ACTION_ALLOC; + args[1] = fm->cmd.pa; + ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2); + if (ret != 0) { + ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret); + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "enic: devcmd(action-alloc)"); + return ret; + } + action_h = args[0]; + fm_flow->action_handle = action_h; + match_in->ftm_action = action_h; + ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64, action_h); + + /* Allocate counter if requested. */ + if (match_in->ftm_flags & FMEF_COUNTER) { + ret = enic_fm_counter_alloc(fm, error, &ctr); + if (ret) /* error has been filled in */ + return ret; + fm_flow->counter_valid = true; + fm_flow->counter = ctr; + match_in->ftm_counter = ctr->handle; + } + + /* + * Get the group's table (either TCAM or exact match table) and + * add entry to it. If we use the exact match table, the handler + * will translate the TCAM entry (match_in) to the appropriate + * exact match entry and use that instead. + */ + entry_h = FM_INVALID_HANDLE; + if (group == FM_TCAM_RTE_GROUP) { + ret = enic_fm_add_tcam_entry(fm, match_in, &entry_h, ingress, + error); + if (ret) + return ret; + /* Jump action might have a ref to fet */ + fm_flow->fet = fm->fet; + fm->fet = NULL; + } else { + struct enic_fm_fet *fet = NULL; + + ret = enic_fet_get(fm, group, ingress, + &match_in->ftm_mask, &fet, error); + if (ret) + return ret; + fm_flow->fet = fet; + ret = enic_fm_add_exact_entry(fm, match_in, &entry_h, fet, + error); + if (ret) + return ret; + } + /* Clear counter after adding entry, as it requires in-use counter */ + if (fm_flow->counter_valid) { + ret = enic_fm_counter_zero(fm, fm_flow->counter); + if (ret) + return ret; + } + fm_flow->entry_handle = entry_h; + return 0; +} + +/* Push match-action to the NIC. */ +static struct rte_flow * +enic_fm_flow_add_entry(struct enic_flowman *fm, + struct fm_tcam_match_entry *match_in, + struct fm_action *action_in, + const struct rte_flow_attr *attrs, + struct rte_flow_error *error) +{ + struct enic_fm_flow *fm_flow; + struct rte_flow *flow; + + ENICPMD_FUNC_TRACE(); + enic_fm_dump_tcam_entry(match_in, action_in, attrs->ingress); + flow = calloc(1, sizeof(*flow)); + fm_flow = calloc(1, sizeof(*fm_flow)); + if (flow == NULL || fm_flow == NULL) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "enic: cannot allocate rte_flow"); + free(flow); + free(fm_flow); + return NULL; + } + flow->fm = fm_flow; + fm_flow->action_handle = FM_INVALID_HANDLE; + fm_flow->entry_handle = FM_INVALID_HANDLE; + if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in, + attrs->group, attrs->ingress, error)) { + enic_fm_flow_free(fm, flow); + return NULL; + } + return flow; +} + +static void +convert_jump_flows(struct enic_flowman *fm, struct enic_fm_fet *fet, + struct rte_flow_error *error) +{ + struct enic_fm_flow *fm_flow; + struct enic_fm_jump_flow *j; + struct fm_action *fma; + uint32_t group; + + ENICPMD_FUNC_TRACE(); + /* + * Find the saved flows that should jump to the new table (fet). + * Then delete the old TCAM entry that jumps to the default table, + * and add a new one that jumps to the new table. + */ + group = fet->group; + j = find_jump_flow(fm, group); + while (j) { + ENICPMD_LOG(DEBUG, "convert jump flow: flow=%p group=%u", + j->flow, group); + /* Delete old entry */ + fm_flow = j->flow->fm; + __enic_fm_flow_free(fm, fm_flow); + + /* Add new entry */ + fma = &j->action; + fma->fma_action_ops[0].exact.handle = fet->handle; + if (__enic_fm_flow_add_entry(fm, fm_flow, &j->match, fma, + FM_TCAM_RTE_GROUP, fet->ingress, error)) { + /* Cannot roll back changes at the moment */ + ENICPMD_LOG(ERR, "cannot convert jump flow: flow=%p", + j->flow); + } else { + fm_flow->fet = fet; + fet->ref++; + ENICPMD_LOG(DEBUG, "convert ok: group=%u ref=%u", + fet->group, fet->ref); + } + + TAILQ_REMOVE(&fm->jump_list, j, list); + free(j); + j = find_jump_flow(fm, group); + } +} + +static void +enic_fm_open_scratch(struct enic_flowman *fm) +{ + fm->action_op_count = 0; + fm->fet = NULL; + memset(&fm->tcam_entry, 0, sizeof(fm->tcam_entry)); + memset(&fm->action, 0, sizeof(fm->action)); +} + +static void +enic_fm_close_scratch(struct enic_flowman *fm) +{ + if (fm->fet) { + enic_fet_put(fm, fm->fet); + fm->fet = NULL; + } + fm->action_op_count = 0; +} + +static int +enic_fm_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct fm_tcam_match_entry *fm_tcam_entry; + struct fm_action *fm_action; + struct enic_flowman *fm; + int ret; + + ENICPMD_FUNC_TRACE(); + fm = pmd_priv(dev)->fm; + if (fm == NULL) + return -ENOTSUP; + enic_fm_open_scratch(fm); + ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error); + if (!ret) { + fm_tcam_entry = &fm->tcam_entry; + fm_action = &fm->action; + enic_fm_dump_tcam_entry(fm_tcam_entry, fm_action, + attrs->ingress); + } + enic_fm_close_scratch(fm); + return ret; +} + +static int +enic_fm_flow_query_count(struct rte_eth_dev *dev, + struct rte_flow *flow, void *data, + struct rte_flow_error *error) +{ + struct rte_flow_query_count *query; + struct enic_fm_flow *fm_flow; + struct enic *enic; + uint64_t args[3]; + int rc; + + ENICPMD_FUNC_TRACE(); + enic = pmd_priv(dev); + query = data; + fm_flow = flow->fm; + if (!fm_flow->counter_valid) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "enic: flow does not have counter"); + + args[0] = FM_COUNTER_QUERY; + args[1] = fm_flow->counter->handle; + args[2] = query->reset; + rc = vnic_dev_flowman_cmd(enic->vdev, args, 3); + if (rc) { + ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x", + rc, fm_flow->counter->handle); + return rc; + } + query->hits_set = 1; + query->hits = args[0]; + query->bytes_set = 1; + query->bytes = args[1]; + return 0; +} + +static int +enic_fm_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = 0; + + ENICPMD_FUNC_TRACE(); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = enic_fm_flow_query_count(dev, flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; + } + return 0; +} + +static struct rte_flow * +enic_fm_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attrs, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct fm_tcam_match_entry *fm_tcam_entry; + struct fm_action *fm_action; + struct enic_flowman *fm; + struct enic_fm_fet *fet; + struct rte_flow *flow; + struct enic *enic; + int ret; + + ENICPMD_FUNC_TRACE(); + enic = pmd_priv(dev); + fm = enic->fm; + if (fm == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "flowman is not initialized"); + return NULL; + } + enic_fm_open_scratch(fm); + flow = NULL; + ret = enic_fm_flow_parse(fm, attrs, pattern, actions, error); + if (ret < 0) + goto error_with_scratch; + fm_tcam_entry = &fm->tcam_entry; + fm_action = &fm->action; + flow = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action, + attrs, error); + if (flow) { + LIST_INSERT_HEAD(&enic->flows, flow, next); + fet = flow->fm->fet; + if (fet && fet->default_key) { + /* + * Jump to non-existent group? Save the relevant info + * so we can convert this flow when that group + * materializes. + */ + save_jump_flow(fm, flow, fet->group, + fm_tcam_entry, fm_action); + } else if (fet && fet->ref == 1) { + /* + * A new table is created. Convert the saved flows + * that should jump to this group. + */ + convert_jump_flows(fm, fet, error); + } + } + +error_with_scratch: + enic_fm_close_scratch(fm); + return flow; +} + +static int +enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + __rte_unused struct rte_flow_error *error) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + if (enic->fm == NULL) + return 0; + LIST_REMOVE(flow, next); + enic_fm_flow_free(enic->fm, flow); + return 0; +} + +static int +enic_fm_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct enic_fm_flow *fm_flow; + struct enic_flowman *fm; + struct rte_flow *flow; + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + if (enic->fm == NULL) + return 0; + fm = enic->fm; + while (!LIST_EMPTY(&enic->flows)) { + flow = LIST_FIRST(&enic->flows); + fm_flow = flow->fm; + LIST_REMOVE(flow, next); + /* + * If tables are null, then vNIC is closing, and the firmware + * has already cleaned up flowman state. So do not try to free + * resources, as it only causes errors. + */ + if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) { + fm_flow->entry_handle = FM_INVALID_HANDLE; + fm_flow->action_handle = FM_INVALID_HANDLE; + fm_flow->fet = NULL; + } + enic_fm_flow_free(fm, flow); + } + return 0; +} + +static int +enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle) +{ + uint64_t args[2]; + int rc; + + args[0] = FM_MATCH_TABLE_FREE; + args[1] = handle; + rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2); + if (rc) + ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64, + rc, handle); + return rc; +} + +static int +enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction, + uint32_t max_entries, uint64_t *handle) +{ + struct fm_tcam_match_table *tcam_tbl; + struct enic *enic; + uint64_t args[2]; + int rc; + + ENICPMD_FUNC_TRACE(); + enic = fm->enic; + tcam_tbl = &fm->cmd.va->fm_tcam_match_table; + tcam_tbl->ftt_direction = direction; + tcam_tbl->ftt_stage = FM_STAGE_LAST; + tcam_tbl->ftt_max_entries = max_entries; + args[0] = FM_TCAM_TABLE_ALLOC; + args[1] = fm->cmd.pa; + rc = vnic_dev_flowman_cmd(enic->vdev, args, 2); + if (rc) { + ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d", + (direction == FM_INGRESS) ? "IG" : "EG", rc); + return rc; + } + *handle = args[0]; + ENICPMD_LOG(DEBUG, "%s TCAM table allocated, handle=0x%" PRIx64, + (direction == FM_INGRESS) ? "IG" : "EG", *handle); + return 0; +} + +static int +enic_fm_init_counters(struct enic_flowman *fm) +{ + ENICPMD_FUNC_TRACE(); + SLIST_INIT(&fm->counters); + return enic_fm_more_counters(fm); +} + +static void +enic_fm_free_all_counters(struct enic_flowman *fm) +{ + struct enic *enic; + uint64_t args[2]; + int rc; + + enic = fm->enic; + args[0] = FM_COUNTER_BRK; + args[1] = 0; + rc = vnic_dev_flowman_cmd(enic->vdev, args, 2); + if (rc != 0) + ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc); + rte_free(fm->counter_stack); +} + +static int +enic_fm_alloc_tcam_tables(struct enic_flowman *fm) +{ + int rc; + + ENICPMD_FUNC_TRACE(); + rc = enic_fm_tcam_tbl_alloc(fm, FM_INGRESS, FM_MAX_TCAM_TABLE_SIZE, + &fm->ig_tcam_hndl); + if (rc) + return rc; + rc = enic_fm_tcam_tbl_alloc(fm, FM_EGRESS, FM_MAX_TCAM_TABLE_SIZE, + &fm->eg_tcam_hndl); + return rc; +} + +static void +enic_fm_free_tcam_tables(struct enic_flowman *fm) +{ + ENICPMD_FUNC_TRACE(); + if (fm->ig_tcam_hndl) { + ENICPMD_LOG(DEBUG, "free IG TCAM table handle=0x%" PRIx64, + fm->ig_tcam_hndl); + enic_fm_tbl_free(fm, fm->ig_tcam_hndl); + fm->ig_tcam_hndl = FM_INVALID_HANDLE; + } + if (fm->eg_tcam_hndl) { + ENICPMD_LOG(DEBUG, "free EG TCAM table handle=0x%" PRIx64, + fm->eg_tcam_hndl); + enic_fm_tbl_free(fm, fm->eg_tcam_hndl); + fm->eg_tcam_hndl = FM_INVALID_HANDLE; + } +} + +int +enic_fm_init(struct enic *enic) +{ + struct enic_flowman *fm; + uint8_t name[RTE_MEMZONE_NAMESIZE]; + int rc; + + if (enic->flow_filter_mode != FILTER_FLOWMAN) + return 0; + ENICPMD_FUNC_TRACE(); + fm = calloc(1, sizeof(*fm)); + if (fm == NULL) { + ENICPMD_LOG(ERR, "cannot alloc flowman struct"); + return -ENOMEM; + } + fm->enic = enic; + TAILQ_INIT(&fm->fet_list); + TAILQ_INIT(&fm->jump_list); + /* Allocate host memory for flowman commands */ + snprintf((char *)name, sizeof(name), "fm-cmd-%s", enic->bdf_name); + fm->cmd.va = enic_alloc_consistent(enic, + sizeof(union enic_flowman_cmd_mem), &fm->cmd.pa, name); + if (!fm->cmd.va) { + ENICPMD_LOG(ERR, "cannot allocate flowman command memory"); + rc = -ENOMEM; + goto error_fm; + } + /* Allocate TCAM tables upfront as they are the main tables */ + rc = enic_fm_alloc_tcam_tables(fm); + if (rc) { + ENICPMD_LOG(ERR, "cannot alloc TCAM tables"); + goto error_cmd; + } + /* Then a number of counters */ + rc = enic_fm_init_counters(fm); + if (rc) { + ENICPMD_LOG(ERR, "cannot alloc counters"); + goto error_tables; + } + /* + * One default exact match table for each direction. We hold onto + * it until close. + */ + rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet); + if (rc) { + ENICPMD_LOG(ERR, "cannot alloc default IG exact match table"); + goto error_counters; + } + fm->default_ig_fet->ref = 1; + rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet); + if (rc) { + ENICPMD_LOG(ERR, "cannot alloc default EG exact match table"); + goto error_ig_fet; + } + fm->default_eg_fet->ref = 1; + enic->fm = fm; + return 0; + +error_ig_fet: + enic_fet_free(fm, fm->default_ig_fet); +error_counters: + enic_fm_free_all_counters(fm); +error_tables: + enic_fm_free_tcam_tables(fm); +error_cmd: + enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem), + fm->cmd.va, fm->cmd.pa); +error_fm: + free(fm); + return rc; +} + +void +enic_fm_destroy(struct enic *enic) +{ + struct enic_flowman *fm; + struct enic_fm_fet *fet; + + if (enic->fm == NULL) + return; + ENICPMD_FUNC_TRACE(); + fm = enic->fm; + enic_fet_free(fm, fm->default_eg_fet); + enic_fet_free(fm, fm->default_ig_fet); + /* Free all exact match tables still open */ + while (!TAILQ_EMPTY(&fm->fet_list)) { + fet = TAILQ_FIRST(&fm->fet_list); + enic_fet_free(fm, fet); + } + enic_fm_free_tcam_tables(fm); + enic_fm_free_all_counters(fm); + enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem), + fm->cmd.va, fm->cmd.pa); + fm->cmd.va = NULL; + free(fm); + enic->fm = NULL; +} + +const struct rte_flow_ops enic_fm_flow_ops = { + .validate = enic_fm_flow_validate, + .create = enic_fm_flow_create, + .destroy = enic_fm_flow_destroy, + .flush = enic_fm_flow_flush, + .query = enic_fm_flow_query, +}; diff --git a/src/spdk/dpdk/drivers/net/enic/enic_main.c b/src/spdk/dpdk/drivers/net/enic/enic_main.c new file mode 100644 index 000000000..7942b0df6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_main.c @@ -0,0 +1,1882 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" + +static inline int enic_is_sriov_vf(struct enic *enic) +{ + return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; +} + +static int is_zero_addr(uint8_t *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +static int is_mcast_addr(uint8_t *addr) +{ + return addr[0] & 1; +} + +static int is_eth_addr_valid(uint8_t *addr) +{ + return !is_mcast_addr(addr) && !is_zero_addr(addr); +} + +static void +enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) +{ + uint16_t i; + + if (!rq || !rq->mbuf_ring) { + dev_debug(enic, "Pointer to rq or mbuf_ring is NULL"); + return; + } + + for (i = 0; i < rq->ring.desc_count; i++) { + if (rq->mbuf_ring[i]) { + rte_pktmbuf_free_seg(rq->mbuf_ring[i]); + rq->mbuf_ring[i] = NULL; + } + } +} + +static void enic_free_wq_buf(struct rte_mbuf **buf) +{ + struct rte_mbuf *mbuf = *buf; + + rte_pktmbuf_free_seg(mbuf); + *buf = NULL; +} + +static void enic_log_q_error(struct enic *enic) +{ + unsigned int i; + uint32_t error_status; + + for (i = 0; i < enic->wq_count; i++) { + error_status = vnic_wq_error_status(&enic->wq[i]); + if (error_status) + dev_err(enic, "WQ[%d] error_status %d\n", i, + error_status); + } + + for (i = 0; i < enic_vnic_rq_count(enic); i++) { + if (!enic->rq[i].in_use) + continue; + error_status = vnic_rq_error_status(&enic->rq[i]); + if (error_status) + dev_err(enic, "RQ[%d] error_status %d\n", i, + error_status); + } +} + +static void enic_clear_soft_stats(struct enic *enic) +{ + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_clear(&soft_stats->rx_nombuf); + rte_atomic64_clear(&soft_stats->rx_packet_errors); + rte_atomic64_clear(&soft_stats->tx_oversized); +} + +static void enic_init_soft_stats(struct enic *enic) +{ + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_init(&soft_stats->rx_nombuf); + rte_atomic64_init(&soft_stats->rx_packet_errors); + rte_atomic64_init(&soft_stats->tx_oversized); + enic_clear_soft_stats(enic); +} + +int enic_dev_stats_clear(struct enic *enic) +{ + int ret; + + ret = vnic_dev_stats_clear(enic->vdev); + if (ret != 0) { + dev_err(enic, "Error in clearing stats\n"); + return ret; + } + enic_clear_soft_stats(enic); + + return 0; +} + +int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) +{ + struct vnic_stats *stats; + struct enic_soft_stats *soft_stats = &enic->soft_stats; + int64_t rx_truncated; + uint64_t rx_packet_errors; + int ret = vnic_dev_stats_dump(enic->vdev, &stats); + + if (ret) { + dev_err(enic, "Error in getting stats\n"); + return ret; + } + + /* The number of truncated packets can only be calculated by + * subtracting a hardware counter from error packets received by + * the driver. Note: this causes transient inaccuracies in the + * ipackets count. Also, the length of truncated packets are + * counted in ibytes even though truncated packets are dropped + * which can make ibytes be slightly higher than it should be. + */ + rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); + rx_truncated = rx_packet_errors - stats->rx.rx_errors; + + r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; + r_stats->opackets = stats->tx.tx_frames_ok; + + r_stats->ibytes = stats->rx.rx_bytes_ok; + r_stats->obytes = stats->tx.tx_bytes_ok; + + r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; + r_stats->oerrors = stats->tx.tx_errors + + rte_atomic64_read(&soft_stats->tx_oversized); + + r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; + + r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); + return 0; +} + +int enic_del_mac_address(struct enic *enic, int mac_index) +{ + struct rte_eth_dev *eth_dev = enic->rte_dev; + uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; + + return vnic_dev_del_addr(enic->vdev, mac_addr); +} + +int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) +{ + int err; + + if (!is_eth_addr_valid(mac_addr)) { + dev_err(enic, "invalid mac address\n"); + return -EINVAL; + } + + err = vnic_dev_add_addr(enic->vdev, mac_addr); + if (err) + dev_err(enic, "add mac addr failed\n"); + return err; +} + +static void +enic_free_rq_buf(struct rte_mbuf **mbuf) +{ + if (*mbuf == NULL) + return; + + rte_pktmbuf_free(*mbuf); + *mbuf = NULL; +} + +void enic_init_vnic_resources(struct enic *enic) +{ + unsigned int error_interrupt_enable = 1; + unsigned int error_interrupt_offset = 0; + unsigned int rxq_interrupt_enable = 0; + unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; + unsigned int index = 0; + unsigned int cq_idx; + struct vnic_rq *data_rq; + + if (enic->rte_dev->data->dev_conf.intr_conf.rxq) + rxq_interrupt_enable = 1; + + for (index = 0; index < enic->rq_count; index++) { + cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index)); + + vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)], + cq_idx, + error_interrupt_enable, + error_interrupt_offset); + + data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]; + if (data_rq->in_use) + vnic_rq_init(data_rq, + cq_idx, + error_interrupt_enable, + error_interrupt_offset); + vnic_cq_init(&enic->cq[cq_idx], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + rxq_interrupt_enable, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + rxq_interrupt_offset, + 0 /* cq_message_addr */); + if (rxq_interrupt_enable) + rxq_interrupt_offset++; + } + + for (index = 0; index < enic->wq_count; index++) { + vnic_wq_init(&enic->wq[index], + enic_cq_wq(enic, index), + error_interrupt_enable, + error_interrupt_offset); + /* Compute unsupported ol flags for enic_prep_pkts() */ + enic->wq[index].tx_offload_notsup_mask = + PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask; + + cq_idx = enic_cq_wq(enic, index); + vnic_cq_init(&enic->cq[cq_idx], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 0 /* cq_entry_enable */, + 1 /* cq_message_enable */, + 0 /* interrupt offset */, + (uint64_t)enic->wq[index].cqmsg_rz->iova); + } + + for (index = 0; index < enic->intr_count; index++) { + vnic_intr_init(&enic->intr[index], + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + /*mask_on_assertion*/1); + } +} + + +static int +enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) +{ + struct rte_mbuf *mb; + struct rq_enet_desc *rqd = rq->ring.descs; + unsigned i; + dma_addr_t dma_addr; + uint32_t max_rx_pkt_len; + uint16_t rq_buf_len; + + if (!rq->in_use) + return 0; + + dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, + rq->ring.desc_count); + + /* + * If *not* using scatter and the mbuf size is greater than the + * requested max packet size (max_rx_pkt_len), then reduce the + * posted buffer size to max_rx_pkt_len. HW still receives packets + * larger than max_rx_pkt_len, but they will be truncated, which we + * drop in the rx handler. Not ideal, but better than returning + * large packets when the user is not expecting them. + */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; + rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM; + if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable) + rq_buf_len = max_rx_pkt_len; + for (i = 0; i < rq->ring.desc_count; i++, rqd++) { + mb = rte_mbuf_raw_alloc(rq->mp); + if (mb == NULL) { + dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", + (unsigned)rq->index); + return -ENOMEM; + } + + mb->data_off = RTE_PKTMBUF_HEADROOM; + dma_addr = (dma_addr_t)(mb->buf_iova + + RTE_PKTMBUF_HEADROOM); + rq_enet_desc_enc(rqd, dma_addr, + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP + : RQ_ENET_TYPE_NOT_SOP), + rq_buf_len); + rq->mbuf_ring[i] = mb; + } + /* + * Do not post the buffers to the NIC until we enable the RQ via + * enic_start_rq(). + */ + rq->need_initial_post = true; + /* Initialize fetch index while RQ is disabled */ + iowrite32(0, &rq->ctrl->fetch_index); + return 0; +} + +/* + * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has + * allocated the buffers and filled the RQ descriptor ring. Just need to push + * the post index to the NIC. + */ +static void +enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) +{ + if (!rq->in_use || !rq->need_initial_post) + return; + + /* make sure all prior writes are complete before doing the PIO write */ + rte_rmb(); + + /* Post all but the last buffer to VIC. */ + rq->posted_index = rq->ring.desc_count - 1; + + rq->rx_nb_hold = 0; + + dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", + enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); + iowrite32(rq->posted_index, &rq->ctrl->posted_index); + rte_rmb(); + rq->need_initial_post = false; +} + +void * +enic_alloc_consistent(void *priv, size_t size, + dma_addr_t *dma_handle, uint8_t *name) +{ + void *vaddr; + const struct rte_memzone *rz; + *dma_handle = 0; + struct enic *enic = (struct enic *)priv; + struct enic_memzone_entry *mze; + + rz = rte_memzone_reserve_aligned((const char *)name, size, + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); + if (!rz) { + pr_err("%s : Failed to allocate memory requested for %s\n", + __func__, name); + return NULL; + } + + vaddr = rz->addr; + *dma_handle = (dma_addr_t)rz->iova; + + mze = rte_malloc("enic memzone entry", + sizeof(struct enic_memzone_entry), 0); + + if (!mze) { + pr_err("%s : Failed to allocate memory for memzone list\n", + __func__); + rte_memzone_free(rz); + return NULL; + } + + mze->rz = rz; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + + return vaddr; +} + +void +enic_free_consistent(void *priv, + __rte_unused size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + struct enic_memzone_entry *mze; + struct enic *enic = (struct enic *)priv; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_FOREACH(mze, &enic->memzone_list, entries) { + if (mze->rz->addr == vaddr && + mze->rz->iova == dma_handle) + break; + } + if (mze == NULL) { + rte_spinlock_unlock(&enic->memzone_list_lock); + dev_warning(enic, + "Tried to free memory, but couldn't find it in the memzone list\n"); + return; + } + LIST_REMOVE(mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + rte_memzone_free(mze->rz); + rte_free(mze); +} + +int enic_link_update(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + link.link_status = enic_get_link_status(enic); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = vnic_dev_port_speed(enic->vdev); + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +static void +enic_intr_handler(void *arg) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; + struct enic *enic = pmd_priv(dev); + + vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); + + enic_link_update(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + enic_log_q_error(enic); + /* Re-enable irq in case of INTx */ + rte_intr_ack(&enic->pdev->intr_handle); +} + +static int enic_rxq_intr_init(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + uint32_t rxq_intr_count, i; + int err; + + intr_handle = enic->rte_dev->intr_handle; + if (!enic->rte_dev->data->dev_conf.intr_conf.rxq) + return 0; + /* + * Rx queue interrupts only work when we have MSI-X interrupts, + * one per queue. Sharing one interrupt is technically + * possible with VIC, but it is not worth the complications it brings. + */ + if (!rte_intr_cap_multiple(intr_handle)) { + dev_err(enic, "Rx queue interrupts require MSI-X interrupts" + " (vfio-pci driver)\n"); + return -ENOTSUP; + } + rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET; + err = rte_intr_efd_enable(intr_handle, rxq_intr_count); + if (err) { + dev_err(enic, "Failed to enable event fds for Rx queue" + " interrupts\n"); + return err; + } + intr_handle->intr_vec = rte_zmalloc("enic_intr_vec", + rxq_intr_count * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + dev_err(enic, "Failed to allocate intr_vec\n"); + return -ENOMEM; + } + for (i = 0; i < rxq_intr_count; i++) + intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET; + return 0; +} + +static void enic_rxq_intr_deinit(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + + intr_handle = enic->rte_dev->intr_handle; + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx) +{ + struct wq_enet_desc *desc; + struct vnic_wq *wq; + unsigned int i; + + /* + * Fill WQ descriptor fields that never change. Every descriptor is + * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH + * descriptors (i.e. request one completion update every 32 packets). + */ + wq = &enic->wq[queue_idx]; + desc = (struct wq_enet_desc *)wq->ring.descs; + for (i = 0; i < wq->ring.desc_count; i++, desc++) { + desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT; + if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1) + desc->header_length_flags |= + (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT); + } +} + +/* + * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used + * used when that file is not compiled. + */ +__rte_weak bool +enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev) +{ + return false; +} + +void enic_pick_rx_handler(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + /* + * Preference order: + * 1. The vectorized handler if possible and requested. + * 2. The non-scatter, simplified handler if scatter Rx is not used. + * 3. The default handler as a fallback. + */ + if (enic_use_vector_rx_handler(eth_dev)) + return; + if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) { + ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler"); + eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts; + } else { + ENICPMD_LOG(DEBUG, " use the normal Rx handler"); + eth_dev->rx_pkt_burst = &enic_recv_pkts; + } +} + +/* Secondary process uses this to set the Tx handler */ +void enic_pick_tx_handler(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + if (enic->use_simple_tx_handler) { + ENICPMD_LOG(DEBUG, " use the simple tx handler"); + eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; + } else { + ENICPMD_LOG(DEBUG, " use the default tx handler"); + eth_dev->tx_pkt_burst = &enic_xmit_pkts; + } +} + +int enic_enable(struct enic *enic) +{ + unsigned int index; + int err; + struct rte_eth_dev *eth_dev = enic->rte_dev; + uint64_t simple_tx_offloads; + uintptr_t p; + + if (enic->enable_avx2_rx) { + struct rte_mbuf mb_def = { .buf_addr = 0 }; + + /* + * mbuf_initializer contains const-after-init fields of + * receive mbufs (i.e. 64 bits of fields from rearm_data). + * It is currently used by the vectorized handler. + */ + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = enic->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + enic->mbuf_initializer = *(uint64_t *)p; + } + + eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); + eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + + /* vnic notification of link status has already been turned on in + * enic_dev_init() which is called during probe time. Here we are + * just turning on interrupt vector 0 if needed. + */ + if (eth_dev->data->dev_conf.intr_conf.lsc) + vnic_dev_notify_set(enic->vdev, 0); + + err = enic_rxq_intr_init(enic); + if (err) + return err; + if (enic_clsf_init(enic)) + dev_warning(enic, "Init of hash table for clsf failed."\ + "Flow director feature will not work\n"); + + if (enic_fm_init(enic)) + dev_warning(enic, "Init of flowman failed.\n"); + + for (index = 0; index < enic->rq_count; index++) { + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); + if (err) { + dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); + return err; + } + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]); + if (err) { + /* release the allocated mbufs for the sop rq*/ + enic_rxmbuf_queue_release(enic, + &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); + + dev_err(enic, "Failed to alloc data RX queue mbufs\n"); + return err; + } + } + + /* + * Use the simple TX handler if possible. Only checksum offloads + * and vlan insertion are supported. + */ + simple_tx_offloads = enic->tx_offload_capa & + (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + if ((eth_dev->data->dev_conf.txmode.offloads & + ~simple_tx_offloads) == 0) { + ENICPMD_LOG(DEBUG, " use the simple tx handler"); + eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; + for (index = 0; index < enic->wq_count; index++) + enic_prep_wq_for_simple_tx(enic, index); + enic->use_simple_tx_handler = 1; + } else { + ENICPMD_LOG(DEBUG, " use the default tx handler"); + eth_dev->tx_pkt_burst = &enic_xmit_pkts; + } + + enic_pick_rx_handler(eth_dev); + + for (index = 0; index < enic->wq_count; index++) + enic_start_wq(enic, index); + for (index = 0; index < enic->rq_count; index++) + enic_start_rq(enic, index); + + vnic_dev_add_addr(enic->vdev, enic->mac_addr); + + vnic_dev_enable_wait(enic->vdev); + + /* Register and enable error interrupt */ + rte_intr_callback_register(&(enic->pdev->intr_handle), + enic_intr_handler, (void *)enic->rte_dev); + + rte_intr_enable(&(enic->pdev->intr_handle)); + /* Unmask LSC interrupt */ + vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); + + return 0; +} + +int enic_alloc_intr_resources(struct enic *enic) +{ + int err; + unsigned int i; + + dev_info(enic, "vNIC resources used: "\ + "wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic_vnic_rq_count(enic), + enic->cq_count, enic->intr_count); + + for (i = 0; i < enic->intr_count; i++) { + err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); + if (err) { + enic_free_vnic_resources(enic); + return err; + } + } + return 0; +} + +void enic_free_rq(void *rxq) +{ + struct vnic_rq *rq_sop, *rq_data; + struct enic *enic; + + if (rxq == NULL) + return; + + rq_sop = (struct vnic_rq *)rxq; + enic = vnic_dev_priv(rq_sop->vdev); + rq_data = &enic->rq[rq_sop->data_queue_idx]; + + if (rq_sop->free_mbufs) { + struct rte_mbuf **mb; + int i; + + mb = rq_sop->free_mbufs; + for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs; + i < ENIC_RX_BURST_MAX; i++) + rte_pktmbuf_free(mb[i]); + rte_free(rq_sop->free_mbufs); + rq_sop->free_mbufs = NULL; + rq_sop->num_free_mbufs = 0; + } + + enic_rxmbuf_queue_release(enic, rq_sop); + if (rq_data->in_use) + enic_rxmbuf_queue_release(enic, rq_data); + + rte_free(rq_sop->mbuf_ring); + if (rq_data->in_use) + rte_free(rq_data->mbuf_ring); + + rq_sop->mbuf_ring = NULL; + rq_data->mbuf_ring = NULL; + + vnic_rq_free(rq_sop); + if (rq_data->in_use) + vnic_rq_free(rq_data); + + vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]); + + rq_sop->in_use = 0; + rq_data->in_use = 0; +} + +void enic_start_wq(struct enic *enic, uint16_t queue_idx) +{ + struct rte_eth_dev_data *data = enic->dev_data; + vnic_wq_enable(&enic->wq[queue_idx]); + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; +} + +int enic_stop_wq(struct enic *enic, uint16_t queue_idx) +{ + struct rte_eth_dev_data *data = enic->dev_data; + int ret; + + ret = vnic_wq_disable(&enic->wq[queue_idx]); + if (ret) + return ret; + + data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +void enic_start_rq(struct enic *enic, uint16_t queue_idx) +{ + struct rte_eth_dev_data *data = enic->dev_data; + struct vnic_rq *rq_sop; + struct vnic_rq *rq_data; + rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; + rq_data = &enic->rq[rq_sop->data_queue_idx]; + + if (rq_data->in_use) { + vnic_rq_enable(rq_data); + enic_initial_post_rx(enic, rq_data); + } + rte_mb(); + vnic_rq_enable(rq_sop); + enic_initial_post_rx(enic, rq_sop); + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; +} + +int enic_stop_rq(struct enic *enic, uint16_t queue_idx) +{ + struct rte_eth_dev_data *data = enic->dev_data; + int ret1 = 0, ret2 = 0; + struct vnic_rq *rq_sop; + struct vnic_rq *rq_data; + rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; + rq_data = &enic->rq[rq_sop->data_queue_idx]; + + ret2 = vnic_rq_disable(rq_sop); + rte_mb(); + if (rq_data->in_use) + ret1 = vnic_rq_disable(rq_data); + + if (ret2) + return ret2; + else if (ret1) + return ret1; + + data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc, uint16_t free_thresh) +{ + int rc; + uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx); + uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic); + struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; + struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; + unsigned int mbuf_size, mbufs_per_pkt; + unsigned int nb_sop_desc, nb_data_desc; + uint16_t min_sop, max_sop, min_data, max_data; + uint32_t max_rx_pkt_len; + + rq_sop->is_sop = 1; + rq_sop->data_queue_idx = data_queue_idx; + rq_data->is_sop = 0; + rq_data->data_queue_idx = 0; + rq_sop->socket_id = socket_id; + rq_sop->mp = mp; + rq_data->socket_id = socket_id; + rq_data->mp = mp; + rq_sop->in_use = 1; + rq_sop->rx_free_thresh = free_thresh; + rq_data->rx_free_thresh = free_thresh; + dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, + free_thresh); + + mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - + RTE_PKTMBUF_HEADROOM); + /* max_rx_pkt_len includes the ethernet header and CRC. */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; + + if (enic->rte_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SCATTER) { + dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx); + /* ceil((max pkt len)/mbuf_size) */ + mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size; + } else { + dev_info(enic, "Scatter rx mode disabled\n"); + mbufs_per_pkt = 1; + if (max_rx_pkt_len > mbuf_size) { + dev_warning(enic, "The maximum Rx packet size (%u) is" + " larger than the mbuf size (%u), and" + " scatter is disabled. Larger packets will" + " be truncated.\n", + max_rx_pkt_len, mbuf_size); + } + } + + if (mbufs_per_pkt > 1) { + dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx); + rq_sop->data_queue_enable = 1; + rq_data->in_use = 1; + /* + * HW does not directly support rxmode.max_rx_pkt_len. HW always + * receives packet sizes up to the "max" MTU. + * If not using scatter, we can achieve the effect of dropping + * larger packets by reducing the size of posted buffers. + * See enic_alloc_rx_queue_mbufs(). + */ + if (max_rx_pkt_len < + enic_mtu_to_max_rx_pktlen(enic->max_mtu)) { + dev_warning(enic, "rxmode.max_rx_pkt_len is ignored" + " when scatter rx mode is in use.\n"); + } + } else { + dev_info(enic, "Rq %u Scatter rx mode not being used\n", + queue_idx); + rq_sop->data_queue_enable = 0; + rq_data->in_use = 0; + } + + /* number of descriptors have to be a multiple of 32 */ + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK; + nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK; + + rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; + rq_data->max_mbufs_per_pkt = mbufs_per_pkt; + + if (mbufs_per_pkt > 1) { + min_sop = ENIC_RX_BURST_MAX; + max_sop = ((enic->config.rq_desc_count / + (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK); + min_data = min_sop * (mbufs_per_pkt - 1); + max_data = enic->config.rq_desc_count; + } else { + min_sop = ENIC_RX_BURST_MAX; + max_sop = enic->config.rq_desc_count; + min_data = 0; + max_data = 0; + } + + if (nb_desc < (min_sop + min_data)) { + dev_warning(enic, + "Number of rx descs too low, adjusting to minimum\n"); + nb_sop_desc = min_sop; + nb_data_desc = min_data; + } else if (nb_desc > (max_sop + max_data)) { + dev_warning(enic, + "Number of rx_descs too high, adjusting to maximum\n"); + nb_sop_desc = max_sop; + nb_data_desc = max_data; + } + if (mbufs_per_pkt > 1) { + dev_info(enic, "For max packet size %u and mbuf size %u valid" + " rx descriptor range is %u to %u\n", + max_rx_pkt_len, mbuf_size, min_sop + min_data, + max_sop + max_data); + } + dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", + nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); + + /* Allocate sop queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, + nb_sop_desc, sizeof(struct rq_enet_desc)); + if (rc) { + dev_err(enic, "error in allocation of sop rq\n"); + goto err_exit; + } + nb_sop_desc = rq_sop->ring.desc_count; + + if (rq_data->in_use) { + /* Allocate data queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, + nb_data_desc, + sizeof(struct rq_enet_desc)); + if (rc) { + dev_err(enic, "error in allocation of data rq\n"); + goto err_free_rq_sop; + } + nb_data_desc = rq_data->ring.desc_count; + } + rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, + socket_id, nb_sop_desc + nb_data_desc, + sizeof(struct cq_enet_rq_desc)); + if (rc) { + dev_err(enic, "error in allocation of cq for rq\n"); + goto err_free_rq_data; + } + + /* Allocate the mbuf rings */ + rq_sop->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_sop_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->mbuf_ring == NULL) + goto err_free_cq; + + if (rq_data->in_use) { + rq_data->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_data_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_data->mbuf_ring == NULL) + goto err_free_sop_mbuf; + } + + rq_sop->free_mbufs = (struct rte_mbuf **) + rte_zmalloc_socket("rq->free_mbufs", + sizeof(struct rte_mbuf *) * + ENIC_RX_BURST_MAX, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->free_mbufs == NULL) + goto err_free_data_mbuf; + rq_sop->num_free_mbufs = 0; + + rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */ + + return 0; + +err_free_data_mbuf: + rte_free(rq_data->mbuf_ring); +err_free_sop_mbuf: + rte_free(rq_sop->mbuf_ring); +err_free_cq: + /* cleanup on error */ + vnic_cq_free(&enic->cq[queue_idx]); +err_free_rq_data: + if (rq_data->in_use) + vnic_rq_free(rq_data); +err_free_rq_sop: + vnic_rq_free(rq_sop); +err_exit: + return -ENOMEM; +} + +void enic_free_wq(void *txq) +{ + struct vnic_wq *wq; + struct enic *enic; + + if (txq == NULL) + return; + + wq = (struct vnic_wq *)txq; + enic = vnic_dev_priv(wq->vdev); + rte_memzone_free(wq->cqmsg_rz); + vnic_wq_free(wq); + vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); +} + +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc) +{ + int err; + struct vnic_wq *wq = &enic->wq[queue_idx]; + unsigned int cq_index = enic_cq_wq(enic, queue_idx); + char name[RTE_MEMZONE_NAMESIZE]; + static int instance; + + wq->socket_id = socket_id; + /* + * rte_eth_tx_queue_setup() checks min, max, and alignment. So just + * print an info message for diagnostics. + */ + dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc); + + /* Allocate queue resources */ + err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, + nb_desc, + sizeof(struct wq_enet_desc)); + if (err) { + dev_err(enic, "error in allocation of wq\n"); + return err; + } + + err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, + socket_id, nb_desc, + sizeof(struct cq_enet_wq_desc)); + if (err) { + vnic_wq_free(wq); + dev_err(enic, "error in allocation of cq for wq\n"); + } + + /* setup up CQ message */ + snprintf((char *)name, sizeof(name), + "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, + instance++); + + wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, + sizeof(uint32_t), SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); + if (!wq->cqmsg_rz) + return -ENOMEM; + + return err; +} + +int enic_disable(struct enic *enic) +{ + unsigned int i; + int err; + + for (i = 0; i < enic->intr_count; i++) { + vnic_intr_mask(&enic->intr[i]); + (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ + } + enic_rxq_intr_deinit(enic); + rte_intr_disable(&enic->pdev->intr_handle); + rte_intr_callback_unregister(&enic->pdev->intr_handle, + enic_intr_handler, + (void *)enic->rte_dev); + + vnic_dev_disable(enic->vdev); + + enic_clsf_destroy(enic); + enic_fm_destroy(enic); + + if (!enic_is_sriov_vf(enic)) + vnic_dev_del_addr(enic->vdev, enic->mac_addr); + + for (i = 0; i < enic->wq_count; i++) { + err = vnic_wq_disable(&enic->wq[i]); + if (err) + return err; + } + for (i = 0; i < enic_vnic_rq_count(enic); i++) { + if (enic->rq[i].in_use) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } + } + + /* If we were using interrupts, set the interrupt vector to -1 + * to disable interrupts. We are not disabling link notifcations, + * though, as we want the polling of link status to continue working. + */ + if (enic->rte_dev->data->dev_conf.intr_conf.lsc) + vnic_dev_notify_set(enic->vdev, -1); + + vnic_dev_set_reset_flag(enic->vdev, 1); + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_clean(&enic->cq[i]); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_clean(&enic->intr[i]); + + return 0; +} + +static int enic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + int done; + int err; + int i; + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete...2 seconds max */ + for (i = 0; i < 2000; i++) { + err = finished(vdev, &done); + if (err) + return err; + if (done) + return 0; + usleep(1000); + } + return -ETIMEDOUT; +} + +static int enic_dev_open(struct enic *enic) +{ + int err; + int flags = CMD_OPENF_IG_DESCCACHE; + + err = enic_dev_wait(enic->vdev, vnic_dev_open, + vnic_dev_open_done, flags); + if (err) + dev_err(enic_get_dev(enic), + "vNIC device open failed, err %d\n", err); + + return err; +} + +static int enic_set_rsskey(struct enic *enic, uint8_t *user_key) +{ + dma_addr_t rss_key_buf_pa; + union vnic_rss_key *rss_key_buf_va = NULL; + int err, i; + uint8_t name[RTE_MEMZONE_NAMESIZE]; + + RTE_ASSERT(user_key != NULL); + snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name); + rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), + &rss_key_buf_pa, name); + if (!rss_key_buf_va) + return -ENOMEM; + + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) + rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i]; + + err = enic_set_rss_key(enic, + rss_key_buf_pa, + sizeof(union vnic_rss_key)); + + /* Save for later queries */ + if (!err) { + rte_memcpy(&enic->rss_key, rss_key_buf_va, + sizeof(union vnic_rss_key)); + } + enic_free_consistent(enic, sizeof(union vnic_rss_key), + rss_key_buf_va, rss_key_buf_pa); + + return err; +} + +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) +{ + dma_addr_t rss_cpu_buf_pa; + union vnic_rss_cpu *rss_cpu_buf_va = NULL; + int err; + uint8_t name[RTE_MEMZONE_NAMESIZE]; + + snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name); + rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), + &rss_cpu_buf_pa, name); + if (!rss_cpu_buf_va) + return -ENOMEM; + + rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu)); + + err = enic_set_rss_cpu(enic, + rss_cpu_buf_pa, + sizeof(union vnic_rss_cpu)); + + enic_free_consistent(enic, sizeof(union vnic_rss_cpu), + rss_cpu_buf_va, rss_cpu_buf_pa); + + /* Save for later queries */ + if (!err) + rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu)); + return err; +} + +static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu, + uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu, + uint8_t rss_enable) +{ + const uint8_t tso_ipid_split_en = 0; + int err; + + err = enic_set_nic_cfg(enic, + rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, + enic->ig_vlan_strip_en); + + return err; +} + +/* Initialize RSS with defaults, called from dev_configure */ +int enic_init_rss_nic_cfg(struct enic *enic) +{ + static uint8_t default_rss_key[] = { + 85, 67, 83, 97, 119, 101, 115, 111, 109, 101, + 80, 65, 76, 79, 117, 110, 105, 113, 117, 101, + 76, 73, 78, 85, 88, 114, 111, 99, 107, 115, + 69, 78, 73, 67, 105, 115, 99, 111, 111, 108, + }; + struct rte_eth_rss_conf rss_conf; + union vnic_rss_cpu rss_cpu; + int ret, i; + + rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf; + /* + * If setting key for the first time, and the user gives us none, then + * push the default key to NIC. + */ + if (rss_conf.rss_key == NULL) { + rss_conf.rss_key = default_rss_key; + rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + ret = enic_set_rss_conf(enic, &rss_conf); + if (ret) { + dev_err(enic, "Failed to configure RSS\n"); + return ret; + } + if (enic->rss_enable) { + /* If enabling RSS, use the default reta */ + for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) { + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); + } + ret = enic_set_rss_reta(enic, &rss_cpu); + if (ret) + dev_err(enic, "Failed to set RSS indirection table\n"); + } + return ret; +} + +int enic_setup_finish(struct enic *enic) +{ + enic_init_soft_stats(enic); + + /* Default conf */ + vnic_dev_packet_filter(enic->vdev, + 1 /* directed */, + 1 /* multicast */, + 1 /* broadcast */, + 0 /* promisc */, + 1 /* allmulti */); + + enic->promisc = 0; + enic->allmulti = 1; + + return 0; +} + +static int enic_rss_conf_valid(struct enic *enic, + struct rte_eth_rss_conf *rss_conf) +{ + /* RSS is disabled per VIC settings. Ignore rss_conf. */ + if (enic->flow_type_rss_offloads == 0) + return 0; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "Given rss_key is %d bytes, it must be %d\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + if (rss_conf->rss_hf != 0 && + (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) { + dev_err(enic, "Given rss_hf contains none of the supported" + " types\n"); + return -EINVAL; + } + return 0; +} + +/* Set hash type and key according to rss_conf */ +int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *eth_dev; + uint64_t rss_hf; + uint8_t rss_hash_type; + uint8_t rss_enable; + int ret; + + RTE_ASSERT(rss_conf != NULL); + ret = enic_rss_conf_valid(enic, rss_conf); + if (ret) { + dev_err(enic, "RSS configuration (rss_conf) is invalid\n"); + return ret; + } + + eth_dev = enic->rte_dev; + rss_hash_type = 0; + rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads; + if (enic->rq_count > 1 && + (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) && + rss_hf != 0) { + rss_enable = 1; + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4; + if (enic->udp_rss_weak) { + /* + * 'TCP' is not a typo. The "weak" version of + * UDP RSS requires both the TCP and UDP bits + * be set. It does enable TCP RSS as well. + */ + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + } + } + if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX | + ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6; + if (enic->udp_rss_weak) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + } + } else { + rss_enable = 0; + rss_hf = 0; + } + + /* Set the hash key if provided */ + if (rss_enable && rss_conf->rss_key) { + ret = enic_set_rsskey(enic, rss_conf->rss_key); + if (ret) { + dev_err(enic, "Failed to set RSS key\n"); + return ret; + } + } + + ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + rss_enable); + if (!ret) { + enic->rss_hf = rss_hf; + enic->rss_hash_type = rss_hash_type; + enic->rss_enable = rss_enable; + } else { + dev_err(enic, "Failed to update RSS configurations." + " hash=0x%x\n", rss_hash_type); + } + return ret; +} + +int enic_set_vlan_strip(struct enic *enic) +{ + /* + * Unfortunately, VLAN strip on/off and RSS on/off are configured + * together. So, re-do niccfg, preserving the current RSS settings. + */ + return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + enic->rss_enable); +} + +int enic_add_packet_filter(struct enic *enic) +{ + /* Args -> directed, multicast, broadcast, promisc, allmulti */ + return vnic_dev_packet_filter(enic->vdev, 1, 1, 1, + enic->promisc, enic->allmulti); +} + +int enic_get_link_status(struct enic *enic) +{ + return vnic_dev_link_status(enic->vdev); +} + +static void enic_dev_deinit(struct enic *enic) +{ + /* stop link status checking */ + vnic_dev_notify_unset(enic->vdev); + + /* mac_addrs is freed by rte_eth_dev_release_port() */ + rte_free(enic->cq); + rte_free(enic->intr); + rte_free(enic->rq); + rte_free(enic->wq); +} + + +int enic_set_vnic_res(struct enic *enic) +{ + struct rte_eth_dev *eth_dev = enic->rte_dev; + int rc = 0; + unsigned int required_rq, required_wq, required_cq, required_intr; + + /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */ + required_rq = eth_dev->data->nb_rx_queues * 2; + required_wq = eth_dev->data->nb_tx_queues; + required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues; + required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */ + if (eth_dev->data->dev_conf.intr_conf.rxq) { + required_intr += eth_dev->data->nb_rx_queues; + } + + if (enic->conf_rq_count < required_rq) { + dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", + eth_dev->data->nb_rx_queues, + required_rq, enic->conf_rq_count); + rc = -EINVAL; + } + if (enic->conf_wq_count < required_wq) { + dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", + eth_dev->data->nb_tx_queues, enic->conf_wq_count); + rc = -EINVAL; + } + + if (enic->conf_cq_count < required_cq) { + dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", + required_cq, enic->conf_cq_count); + rc = -EINVAL; + } + if (enic->conf_intr_count < required_intr) { + dev_err(dev, "Not enough Interrupts to support Rx queue" + " interrupts. Required:%u, Configured:%u\n", + required_intr, enic->conf_intr_count); + rc = -EINVAL; + } + + if (rc == 0) { + enic->rq_count = eth_dev->data->nb_rx_queues; + enic->wq_count = eth_dev->data->nb_tx_queues; + enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = required_intr; + } + + return rc; +} + +/* Initialize the completion queue for an RQ */ +static int +enic_reinit_rq(struct enic *enic, unsigned int rq_idx) +{ + struct vnic_rq *sop_rq, *data_rq; + unsigned int cq_idx; + int rc = 0; + + sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)]; + cq_idx = rq_idx; + + vnic_cq_clean(&enic->cq[cq_idx]); + vnic_cq_init(&enic->cq[cq_idx], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + 0 /* interrupt offset */, + 0 /* cq_message_addr */); + + + vnic_rq_init_start(sop_rq, enic_cq_rq(enic, + enic_rte_rq_idx_to_sop_idx(rq_idx)), 0, + sop_rq->ring.desc_count - 1, 1, 0); + if (data_rq->in_use) { + vnic_rq_init_start(data_rq, + enic_cq_rq(enic, + enic_rte_rq_idx_to_data_idx(rq_idx, enic)), + 0, data_rq->ring.desc_count - 1, 1, 0); + } + + rc = enic_alloc_rx_queue_mbufs(enic, sop_rq); + if (rc) + return rc; + + if (data_rq->in_use) { + rc = enic_alloc_rx_queue_mbufs(enic, data_rq); + if (rc) { + enic_rxmbuf_queue_release(enic, sop_rq); + return rc; + } + } + + return 0; +} + +/* The Cisco NIC can send and receive packets up to a max packet size + * determined by the NIC type and firmware. There is also an MTU + * configured into the NIC via the CIMC/UCSM management interface + * which can be overridden by this function (up to the max packet size). + * Depending on the network setup, doing so may cause packet drops + * and unexpected behavior. + */ +int enic_set_mtu(struct enic *enic, uint16_t new_mtu) +{ + unsigned int rq_idx; + struct vnic_rq *rq; + int rc = 0; + uint16_t old_mtu; /* previous setting */ + uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + old_mtu = eth_dev->data->mtu; + config_mtu = enic->config.mtu; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + if (new_mtu > enic->max_mtu) { + dev_err(enic, + "MTU not updated: requested (%u) greater than max (%u)\n", + new_mtu, enic->max_mtu); + return -EINVAL; + } + if (new_mtu < ENIC_MIN_MTU) { + dev_info(enic, + "MTU not updated: requested (%u) less than min (%u)\n", + new_mtu, ENIC_MIN_MTU); + return -EINVAL; + } + if (new_mtu > config_mtu) + dev_warning(enic, + "MTU (%u) is greater than value configured in NIC (%u)\n", + new_mtu, config_mtu); + + /* Update the MTU and maximum packet length */ + eth_dev->data->mtu = new_mtu; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = + enic_mtu_to_max_rx_pktlen(new_mtu); + + /* + * If the device has not started (enic_enable), nothing to do. + * Later, enic_enable() will set up RQs reflecting the new maximum + * packet length. + */ + if (!eth_dev->data->dev_started) + goto set_mtu_done; + + /* + * The device has started, re-do RQs on the fly. In the process, we + * pick up the new maximum packet length. + * + * Some applications rely on the ability to change MTU without stopping + * the device. So keep this behavior for now. + */ + rte_spinlock_lock(&enic->mtu_lock); + + /* Stop traffic on all RQs */ + for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) { + rq = &enic->rq[rq_idx]; + if (rq->is_sop && rq->in_use) { + rc = enic_stop_rq(enic, + enic_sop_rq_idx_to_rte_idx(rq_idx)); + if (rc) { + dev_err(enic, "Failed to stop Rq %u\n", rq_idx); + goto set_mtu_done; + } + } + } + + /* replace Rx function with a no-op to avoid getting stale pkts */ + eth_dev->rx_pkt_burst = enic_dummy_recv_pkts; + rte_mb(); + + /* Allow time for threads to exit the real Rx function. */ + usleep(100000); + + /* now it is safe to reconfigure the RQs */ + + + /* free and reallocate RQs with the new MTU */ + for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { + rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + if (!rq->in_use) + continue; + + enic_free_rq(rq); + rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp, + rq->tot_nb_desc, rq->rx_free_thresh); + if (rc) { + dev_err(enic, + "Fatal MTU alloc error- No traffic will pass\n"); + goto set_mtu_done; + } + + rc = enic_reinit_rq(enic, rq_idx); + if (rc) { + dev_err(enic, + "Fatal MTU RQ reinit- No traffic will pass\n"); + goto set_mtu_done; + } + } + + /* put back the real receive function */ + rte_mb(); + enic_pick_rx_handler(eth_dev); + rte_mb(); + + /* restart Rx traffic */ + for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { + rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + if (rq->is_sop && rq->in_use) + enic_start_rq(enic, rq_idx); + } + +set_mtu_done: + dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu); + rte_spinlock_unlock(&enic->mtu_lock); + return rc; +} + +static int enic_dev_init(struct enic *enic) +{ + int err; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + vnic_dev_intr_coal_timer_info_default(enic->vdev); + + /* Get vNIC configuration + */ + err = enic_get_vnic_config(enic); + if (err) { + dev_err(dev, "Get vNIC configuration failed, aborting\n"); + return err; + } + + /* Get available resource counts */ + enic_get_res_counts(enic); + if (enic->conf_rq_count == 1) { + dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n"); + dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n"); + dev_err(enic, "See the ENIC PMD guide for more information.\n"); + return -EINVAL; + } + /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ + enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) * + enic->conf_cq_count, 8); + enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) * + enic->conf_intr_count, 8); + enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) * + enic->conf_rq_count, 8); + enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) * + enic->conf_wq_count, 8); + if (enic->conf_cq_count > 0 && enic->cq == NULL) { + dev_err(enic, "failed to allocate vnic_cq, aborting.\n"); + return -1; + } + if (enic->conf_intr_count > 0 && enic->intr == NULL) { + dev_err(enic, "failed to allocate vnic_intr, aborting.\n"); + return -1; + } + if (enic->conf_rq_count > 0 && enic->rq == NULL) { + dev_err(enic, "failed to allocate vnic_rq, aborting.\n"); + return -1; + } + if (enic->conf_wq_count > 0 && enic->wq == NULL) { + dev_err(enic, "failed to allocate vnic_wq, aborting.\n"); + return -1; + } + + /* Get the supported filters */ + enic_fdir_info(enic); + + eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", + sizeof(struct rte_ether_addr) * + ENIC_UNICAST_PERFECT_FILTERS, 0); + if (!eth_dev->data->mac_addrs) { + dev_err(enic, "mac addr storage alloc failed, aborting.\n"); + return -1; + } + rte_ether_addr_copy((struct rte_ether_addr *)enic->mac_addr, + eth_dev->data->mac_addrs); + + vnic_dev_set_reset_flag(enic->vdev, 0); + + LIST_INIT(&enic->flows); + + /* set up link status checking */ + vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + + /* + * When Geneve with options offload is available, always disable it + * first as it can interfere with user flow rules. + */ + if (enic->geneve_opt_avail) { + /* + * Disabling fails if the feature is provisioned but + * not enabled. So ignore result and do not log error. + */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_DISABLE); + } + enic->overlay_offload = false; + if (enic->disable_overlay && enic->vxlan) { + /* + * Explicitly disable overlay offload as the setting is + * sticky, and resetting vNIC does not disable it. + */ + if (vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_DISABLE)) { + dev_err(enic, "failed to disable overlay offload\n"); + } else { + dev_info(enic, "Overlay offload is disabled\n"); + } + } + if (!enic->disable_overlay && enic->vxlan && + /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_ENABLE) == 0) { + enic->tx_offload_capa |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO; + enic->tx_offload_mask |= + PKT_TX_OUTER_IPV6 | + PKT_TX_OUTER_IPV4 | + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_MASK; + enic->overlay_offload = true; + dev_info(enic, "Overlay offload is enabled\n"); + } + /* Geneve with options offload requires overlay offload */ + if (enic->overlay_offload && enic->geneve_opt_avail && + enic->geneve_opt_request) { + if (vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_GENEVE, + OVERLAY_OFFLOAD_ENABLE)) { + dev_err(enic, "failed to enable geneve+option\n"); + } else { + enic->geneve_opt_enabled = 1; + dev_info(enic, "Geneve with options is enabled\n"); + } + } + /* + * Reset the vxlan port if HW vxlan parsing is available. It + * is always enabled regardless of overlay offload + * enable/disable. + */ + if (enic->vxlan) { + enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT; + /* + * Reset the vxlan port to the default, as the NIC firmware + * does not reset it automatically and keeps the old setting. + */ + if (vnic_dev_overlay_offload_cfg(enic->vdev, + OVERLAY_CFG_VXLAN_PORT_UPDATE, + RTE_VXLAN_DEFAULT_PORT)) { + dev_err(enic, "failed to update vxlan port\n"); + return -EINVAL; + } + } + + return 0; + +} + +int enic_probe(struct enic *enic) +{ + struct rte_pci_device *pdev = enic->pdev; + int err = -1; + + dev_debug(enic, "Initializing ENIC PMD\n"); + + /* if this is a secondary process the hardware is already initialized */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; + enic->bar0.len = pdev->mem_resource[0].len; + + /* Register vNIC device */ + enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1); + if (!enic->vdev) { + dev_err(enic, "vNIC registration failed, aborting\n"); + goto err_out; + } + + LIST_INIT(&enic->memzone_list); + rte_spinlock_init(&enic->memzone_list_lock); + + vnic_register_cbacks(enic->vdev, + enic_alloc_consistent, + enic_free_consistent); + + /* + * Allocate the consistent memory for stats upfront so both primary and + * secondary processes can dump stats. + */ + err = vnic_dev_alloc_stats_mem(enic->vdev); + if (err) { + dev_err(enic, "Failed to allocate cmd memory, aborting\n"); + goto err_out_unregister; + } + /* Issue device open to get device in known state */ + err = enic_dev_open(enic); + if (err) { + dev_err(enic, "vNIC dev open failed, aborting\n"); + goto err_out_unregister; + } + + /* Set ingress vlan rewrite mode before vnic initialization */ + dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n", + enic->ig_vlan_rewrite_mode); + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + enic->ig_vlan_rewrite_mode); + if (err) { + dev_err(enic, + "Failed to set ingress vlan rewrite mode, aborting.\n"); + goto err_out_dev_close; + } + + /* Issue device init to initialize the vnic-to-switch link. + * We'll start with carrier off and wait for link UP + * notification later to turn on carrier. We don't need + * to wait here for the vnic-to-switch link initialization + * to complete; link UP notification is the indication that + * the process is complete. + */ + + err = vnic_dev_init(enic->vdev, 0); + if (err) { + dev_err(enic, "vNIC dev init failed, aborting\n"); + goto err_out_dev_close; + } + + err = enic_dev_init(enic); + if (err) { + dev_err(enic, "Device initialization failed, aborting\n"); + goto err_out_dev_close; + } + + return 0; + +err_out_dev_close: + vnic_dev_close(enic->vdev); +err_out_unregister: + vnic_dev_unregister(enic->vdev); +err_out: + return err; +} + +void enic_remove(struct enic *enic) +{ + enic_dev_deinit(enic); + vnic_dev_close(enic->vdev); + vnic_dev_unregister(enic->vdev); +} diff --git a/src/spdk/dpdk/drivers/net/enic/enic_res.c b/src/spdk/dpdk/drivers/net/enic/enic_res.c new file mode 100644 index 000000000..20888eb25 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_res.c @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include "enic_compat.h" +#include "rte_ethdev_driver.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "enic.h" + +int enic_get_vnic_config(struct enic *enic) +{ + struct vnic_enet_config *c = &enic->config; + int err; + + err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting MAC addr, %d\n", err); + return err; + } + + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(enic->vdev, \ + offsetof(struct vnic_enet_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + dev_err(enic_get_dev(enic), \ + "Error getting %s, %d\n", #m, err); \ + return err; \ + } \ + } while (0) + + GET_CONFIG(flags); + GET_CONFIG(wq_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(mtu); + GET_CONFIG(intr_timer_type); + GET_CONFIG(intr_mode); + GET_CONFIG(intr_timer_usec); + GET_CONFIG(loop_tag); + GET_CONFIG(num_arfs); + GET_CONFIG(max_pkt_size); + + /* max packet size is only defined in newer VIC firmware + * and will be 0 for legacy firmware and VICs + */ + if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE) + enic->max_mtu = c->max_pkt_size - RTE_ETHER_HDR_LEN; + else + enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - + RTE_ETHER_HDR_LEN; + if (c->mtu == 0) + c->mtu = 1500; + + enic->rte_dev->data->mtu = RTE_MIN(enic->max_mtu, + RTE_MAX((uint16_t)ENIC_MIN_MTU, c->mtu)); + + enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev); + dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters) + ? "" : "not ")); + + err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode, + &enic->filter_actions); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting filter modes, %d\n", err); + return err; + } + vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk, + &enic->udp_rss_weak); + + dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s%s\n", + ((enic->flow_filter_mode == FILTER_FLOWMAN) ? "FLOWMAN" : + ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" : + ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" : + ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" : + "NONE")))), + ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ? + "steer " : ""), + ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ? + "tag " : ""), + ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ? + "drop " : ""), + ((enic->filter_actions & FILTER_ACTION_COUNTER_FLAG) ? + "count " : "")); + + c->wq_desc_count = RTE_MIN((uint32_t)ENIC_MAX_WQ_DESCS, + RTE_MAX((uint32_t)ENIC_MIN_WQ_DESCS, c->wq_desc_count)); + c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + c->rq_desc_count = RTE_MIN((uint32_t)ENIC_MAX_RQ_DESCS, + RTE_MAX((uint32_t)ENIC_MIN_RQ_DESCS, c->rq_desc_count)); + c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + c->intr_timer_usec = RTE_MIN(c->intr_timer_usec, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + dev_info(enic_get_dev(enic), + "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " + "wq/rq %d/%d mtu %d, max mtu:%d\n", + enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], + enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], + c->wq_desc_count, c->rq_desc_count, + enic->rte_dev->data->mtu, enic->max_mtu); + dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " + "rss %s intr mode %s type %s timer %d usec " + "loopback tag 0x%04x\n", + ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? + (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" : + ((enic->udp_rss_weak ? "+udp" : + "yes"))) : "no", + c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : + c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : + c->intr_mode == VENET_INTR_MODE_ANY ? "any" : + "unknown", + c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : + c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : + "unknown", + c->intr_timer_usec, + c->loop_tag); + + /* RSS settings from vNIC */ + enic->reta_size = ENIC_RSS_RETA_SIZE; + enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE; + enic->flow_type_rss_offloads = 0; + if (ENIC_SETTING(enic, RSSHASH_IPV4)) + /* + * IPV4 hash type handles both non-frag and frag packet types. + * TCP/UDP is controlled via a separate flag below. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV4 | + ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP; + if (ENIC_SETTING(enic, RSSHASH_IPV6)) + /* + * The VIC adapter can perform RSS on IPv6 packets with and + * without extension headers. An IPv6 "fragment" is an IPv6 + * packet with the fragment extension header. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX; + if (enic->udp_rss_weak) + enic->flow_type_rss_offloads |= + ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + + /* Zero offloads if RSS is not enabled */ + if (!ENIC_SETTING(enic, RSS)) + enic->flow_type_rss_offloads = 0; + + enic->vxlan = ENIC_SETTING(enic, VXLAN) && + vnic_dev_capable_vxlan(enic->vdev); + if (vnic_dev_capable_geneve(enic->vdev)) { + dev_info(NULL, "Geneve with options offload available\n"); + enic->geneve_opt_avail = 1; + } + /* + * Default hardware capabilities. enic_dev_init() may add additional + * flags if it enables overlay offloads. + */ + enic->tx_queue_offload_capa = 0; + enic->tx_offload_capa = + enic->tx_queue_offload_capa | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + enic->rx_offload_capa = + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH; + enic->tx_offload_mask = + PKT_TX_IPV6 | + PKT_TX_IPV4 | + PKT_TX_VLAN | + PKT_TX_IP_CKSUM | + PKT_TX_L4_MASK | + PKT_TX_TCP_SEG; + + return 0; +} + +int enic_set_nic_cfg(struct enic *enic, uint8_t rss_default_cpu, + uint8_t rss_hash_type, uint8_t rss_hash_bits, + uint8_t rss_base_cpu, uint8_t rss_enable, + uint8_t tso_ipid_split_en, uint8_t ig_vlan_strip_en) +{ + enum vnic_devcmd_cmd cmd; + uint64_t a0, a1; + uint32_t nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG; + return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait); +} + +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, uint64_t len) +{ + uint64_t a0 = (uint64_t)key_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); +} + +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, uint64_t len) +{ + uint64_t a0 = (uint64_t)cpu_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); +} + +void enic_free_vnic_resources(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_free(&enic->wq[i]); + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_free(&enic->cq[i]); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_free(&enic->intr[i]); +} + +void enic_get_res_counts(struct enic *enic) +{ + enic->conf_wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->conf_rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->conf_cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->conf_intr_count = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + dev_info(enic_get_dev(enic), + "vNIC resources avail: wq %d rq %d cq %d intr %d\n", + enic->conf_wq_count, enic->conf_rq_count, + enic->conf_cq_count, enic->conf_intr_count); +} diff --git a/src/spdk/dpdk/drivers/net/enic/enic_res.h b/src/spdk/dpdk/drivers/net/enic/enic_res.h new file mode 100644 index 000000000..34f15d5a4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_res.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _ENIC_RES_H_ +#define _ENIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" + +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS 4096 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 4096 + +/* A descriptor ring has a multiple of 32 descriptors */ +#define ENIC_ALIGN_DESCS 32 +#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1) + +/* Request a completion index every 32 buffers (roughly packets) */ +#define ENIC_WQ_CQ_THRESH 32 + +#define ENIC_MIN_MTU 68 + +/* Does not include (possible) inserted VLAN tag and FCS */ +#define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022 + +/* Does not include (possible) inserted VLAN tag and FCS */ +#define ENIC_TX_MAX_PKT_SIZE 9208 + +#define ENIC_MULTICAST_PERFECT_FILTERS 32 +#define ENIC_UNICAST_PERFECT_FILTERS 32 + +#define ENIC_NON_TSO_MAX_DESC 16 +#define ENIC_DEFAULT_RX_FREE_THRESH 32 +#define ENIC_TX_XMIT_MAX 64 +#define ENIC_RX_BURST_MAX 64 + +/* Defaults for dev_info.default_{rx,tx}portconf */ +#define ENIC_DEFAULT_RX_BURST 32 +#define ENIC_DEFAULT_RX_RINGS 1 +#define ENIC_DEFAULT_RX_RING_SIZE 512 +#define ENIC_DEFAULT_TX_BURST 32 +#define ENIC_DEFAULT_TX_RINGS 1 +#define ENIC_DEFAULT_TX_RING_SIZE 512 + +#define ENIC_RSS_DEFAULT_CPU 0 +#define ENIC_RSS_BASE_CPU 0 +#define ENIC_RSS_HASH_BITS 7 +#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS) +#define ENIC_RSS_HASH_KEY_SIZE 40 + +#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +struct enic; + +int enic_get_vnic_config(struct enic *); +int enic_set_nic_cfg(struct enic *enic, uint8_t rss_default_cpu, + uint8_t rss_hash_type, uint8_t rss_hash_bits, + uint8_t rss_base_cpu, uint8_t rss_enable, + uint8_t tso_ipid_split_en, uint8_t ig_vlan_strip_en); +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, uint64_t len); +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, uint64_t len); +void enic_get_res_counts(struct enic *enic); +void enic_init_vnic_resources(struct enic *enic); +int enic_alloc_vnic_resources(struct enic *); +void enic_free_vnic_resources(struct enic *); + +#endif /* _ENIC_RES_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c b/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c new file mode 100644 index 000000000..6a8718c08 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_rxtx.c @@ -0,0 +1,688 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include + +#include "enic_compat.h" +#include "rq_enet_desc.h" +#include "enic.h" +#include "enic_rxtx_common.h" +#include +#include +#include + +#define RTE_PMD_USE_PREFETCH + +#ifdef RTE_PMD_USE_PREFETCH +/*Prefetch a cache line into all cache levels. */ +#define rte_enic_prefetch(p) rte_prefetch0(p) +#else +#define rte_enic_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +/* dummy receive function to replace actual function in + * order to do safe reconfiguration operations. + */ +uint16_t +enic_dummy_recv_pkts(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct vnic_rq *sop_rq = rx_queue; + struct vnic_rq *data_rq; + struct vnic_rq *rq; + struct enic *enic = vnic_dev_priv(sop_rq->vdev); + uint16_t cq_idx; + uint16_t rq_idx, max_rx; + uint16_t rq_num; + struct rte_mbuf *nmb, *rxmb; + uint16_t nb_rx = 0; + struct vnic_cq *cq; + volatile struct cq_desc *cqd_ptr; + uint8_t color; + uint8_t tnl; + uint16_t seg_length; + struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; + struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; + + cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; + cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; + + data_rq = &enic->rq[sop_rq->data_queue_idx]; + + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx); + + while (max_rx) { + volatile struct rq_enet_desc *rqd_ptr; + struct cq_desc cqd; + uint8_t packet_error; + uint16_t ciflags; + + max_rx--; + + /* Check for pkts available */ + if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + break; + + /* Get the cq descriptor and extract rq info from it */ + cqd = *cqd_ptr; + rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; + rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; + + rq = &enic->rq[rq_num]; + rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx; + + /* allocate a new mbuf */ + nmb = rte_mbuf_raw_alloc(rq->mp); + if (nmb == NULL) { + rte_atomic64_inc(&enic->soft_stats.rx_nombuf); + break; + } + + /* A packet error means descriptor and data are untrusted */ + packet_error = enic_cq_rx_check_err(&cqd); + + /* Get the mbuf to return and replace with one just allocated */ + rxmb = rq->mbuf_ring[rq_idx]; + rq->mbuf_ring[rq_idx] = nmb; + cq_idx++; + + /* Prefetch next mbuf & desc while processing current one */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + rte_enic_prefetch(cqd_ptr); + + ciflags = enic_cq_rx_desc_ciflags( + (struct cq_enet_rq_desc *)&cqd); + + /* Push descriptor for newly allocated mbuf */ + nmb->data_off = RTE_PKTMBUF_HEADROOM; + /* + * Only the address needs to be refilled. length_type of the + * descriptor it set during initialization + * (enic_alloc_rx_queue_mbufs) and does not change. + */ + rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova + + RTE_PKTMBUF_HEADROOM); + + /* Fill in the rest of the mbuf */ + seg_length = enic_cq_rx_desc_n_bytes(&cqd); + + if (rq->is_sop) { + first_seg = rxmb; + first_seg->pkt_len = seg_length; + } else { + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len + + seg_length); + first_seg->nb_segs++; + last_seg->next = rxmb; + } + + rxmb->port = enic->port_id; + rxmb->data_len = seg_length; + + rq->rx_nb_hold++; + + if (!(enic_cq_rx_desc_eop(ciflags))) { + last_seg = rxmb; + continue; + } + + /* + * When overlay offload is enabled, CQ.fcoe indicates the + * packet is tunnelled. + */ + tnl = enic->overlay_offload && + (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + /* cq rx flags are only valid if eop bit is set */ + first_seg->packet_type = + enic_cq_rx_flags_to_pkt_type(&cqd, tnl); + enic_cq_rx_to_pkt_flags(&cqd, first_seg); + + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + if (unlikely(packet_error)) { + rte_pktmbuf_free(first_seg); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + continue; + } + + + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr, + RTE_PKTMBUF_HEADROOM)); + + /* store the mbuf address into the next entry of the array */ + rx_pkts[nb_rx++] = first_seg; + } + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + + sop_rq->pkt_first_seg = first_seg; + sop_rq->pkt_last_seg = last_seg; + + cq->to_clean = cq_idx; + + if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > + sop_rq->rx_free_thresh) { + if (data_rq->in_use) { + data_rq->posted_index = + enic_ring_add(data_rq->ring.desc_count, + data_rq->posted_index, + data_rq->rx_nb_hold); + data_rq->rx_nb_hold = 0; + } + sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count, + sop_rq->posted_index, + sop_rq->rx_nb_hold); + sop_rq->rx_nb_hold = 0; + + rte_mb(); + if (data_rq->in_use) + iowrite32_relaxed(data_rq->posted_index, + &data_rq->ctrl->posted_index); + rte_compiler_barrier(); + iowrite32_relaxed(sop_rq->posted_index, + &sop_rq->ctrl->posted_index); + } + + + return nb_rx; +} + +uint16_t +enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mb, **rx, **rxmb; + uint16_t cq_idx, nb_rx, max_rx; + struct cq_enet_rq_desc *cqd; + struct rq_enet_desc *rqd; + unsigned int port_id; + struct vnic_cq *cq; + struct vnic_rq *rq; + struct enic *enic; + uint8_t color; + bool overlay; + bool tnl; + + rq = rx_queue; + enic = vnic_dev_priv(rq->vdev); + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + cq_idx = cq->to_clean; + + /* + * Fill up the reserve of free mbufs. Below, we restock the receive + * ring with these mbufs to avoid allocation failures. + */ + if (rq->num_free_mbufs == 0) { + if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs, + ENIC_RX_BURST_MAX)) + return 0; + rq->num_free_mbufs = ENIC_RX_BURST_MAX; + } + + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs); + max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); + + cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; + rxmb = rq->mbuf_ring + cq_idx; + port_id = enic->port_id; + overlay = enic->overlay_offload; + + rx = rx_pkts; + while (max_rx) { + max_rx--; + if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + break; + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + cqd++; + continue; + } + + mb = *rxmb++; + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr, + RTE_PKTMBUF_HEADROOM)); + mb->data_len = cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + mb->pkt_len = mb->data_len; + mb->port = port_id; + tnl = overlay && (cqd->completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + mb->packet_type = + enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, + tnl); + enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + mb->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + cqd++; + *rx++ = mb; + } + /* Number of descriptors visited */ + nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; + if (nb_rx == 0) + return 0; + rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx; + rxmb = rq->mbuf_ring + cq_idx; + cq_idx += nb_rx; + rq->rx_nb_hold += nb_rx; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + cq->to_clean = cq_idx; + + memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs, + sizeof(struct rte_mbuf *) * nb_rx); + rq->num_free_mbufs -= nb_rx; + while (nb_rx) { + nb_rx--; + mb = *rxmb++; + mb->data_off = RTE_PKTMBUF_HEADROOM; + rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM; + rqd++; + } + if (rq->rx_nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, + rq->rx_nb_hold); + rq->rx_nb_hold = 0; + rte_wmb(); + iowrite32_relaxed(rq->posted_index, + &rq->ctrl->posted_index); + } + + return rx - rx_pkts; +} + +static inline void enic_free_wq_bufs(struct vnic_wq *wq, + uint16_t completed_index) +{ + struct rte_mbuf *buf; + struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; + unsigned int nb_to_free, nb_free = 0, i; + struct rte_mempool *pool; + unsigned int tail_idx; + unsigned int desc_count = wq->ring.desc_count; + + nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + + 1; + tail_idx = wq->tail_idx; + pool = wq->bufs[tail_idx]->pool; + for (i = 0; i < nb_to_free; i++) { + buf = wq->bufs[tail_idx]; + m = rte_pktmbuf_prefree_seg(buf); + if (unlikely(m == NULL)) { + tail_idx = enic_ring_incr(desc_count, tail_idx); + continue; + } + + if (likely(m->pool == pool)) { + RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(pool, (void *)free, nb_free); + free[0] = m; + nb_free = 1; + pool = m->pool; + } + tail_idx = enic_ring_incr(desc_count, tail_idx); + } + + if (nb_free > 0) + rte_mempool_put_bulk(pool, (void **)free, nb_free); + + wq->tail_idx = tail_idx; + wq->ring.desc_avail += nb_to_free; +} + +unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) +{ + uint16_t completed_index; + + completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; + + if (wq->last_completed_index != completed_index) { + enic_free_wq_bufs(wq, completed_index); + wq->last_completed_index = completed_index; + } + return 0; +} + +uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; + int32_t ret; + uint16_t i; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + if (!(ol_flags & PKT_TX_TCP_SEG)) { + if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_errno = EINVAL; + return i; + } + } else { + uint16_t header_len; + + header_len = m->l2_len + m->l3_len + m->l4_len; + if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) { + rte_errno = EINVAL; + return i; + } + } + + if (ol_flags & wq->tx_offload_notsup_mask) { + rte_errno = ENOTSUP; + return i; + } +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t index; + unsigned int pkt_len, data_len; + unsigned int nb_segs; + struct rte_mbuf *tx_pkt; + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; + struct enic *enic = vnic_dev_priv(wq->vdev); + unsigned short vlan_id; + uint64_t ol_flags; + uint64_t ol_flags_mask; + unsigned int wq_desc_avail; + int head_idx; + unsigned int desc_count; + struct wq_enet_desc *descs, *desc_p, desc_tmp; + uint16_t mss; + uint8_t vlan_tag_insert; + uint8_t eop, cq; + uint64_t bus_addr; + uint8_t offload_mode; + uint16_t header_len; + uint64_t tso; + rte_atomic64_t *tx_oversized; + + enic_cleanup_wq(enic, wq); + wq_desc_avail = vnic_wq_desc_avail(wq); + head_idx = wq->head_idx; + desc_count = wq->ring.desc_count; + ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; + tx_oversized = &enic->soft_stats.tx_oversized; + + nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); + + for (index = 0; index < nb_pkts; index++) { + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + data_len = tx_pkt->data_len; + ol_flags = tx_pkt->ol_flags; + nb_segs = tx_pkt->nb_segs; + tso = ol_flags & PKT_TX_TCP_SEG; + + /* drop packet if it's too big to send */ + if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_pktmbuf_free(tx_pkt); + rte_atomic64_inc(tx_oversized); + continue; + } + + if (nb_segs > wq_desc_avail) { + if (index > 0) + goto post; + goto done; + } + + mss = 0; + vlan_id = tx_pkt->vlan_tci; + vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN); + bus_addr = (dma_addr_t) + (tx_pkt->buf_iova + tx_pkt->data_off); + + descs = (struct wq_enet_desc *)wq->ring.descs; + desc_p = descs + head_idx; + + eop = (data_len == pkt_len); + offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM; + header_len = 0; + + if (tso) { + header_len = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + + /* Drop if non-TCP packet or TSO seg size is too big */ + if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz + + header_len) > ENIC_TX_MAX_PKT_SIZE))) { + rte_pktmbuf_free(tx_pkt); + rte_atomic64_inc(tx_oversized); + continue; + } + + offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; + mss = tx_pkt->tso_segsz; + /* For tunnel, need the size of outer+inner headers */ + if (ol_flags & PKT_TX_TUNNEL_MASK) { + header_len += tx_pkt->outer_l2_len + + tx_pkt->outer_l3_len; + } + } + + if ((ol_flags & ol_flags_mask) && (header_len == 0)) { + if (ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM; + + /* Nic uses just 1 bit for UDP and TCP */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + case PKT_TX_UDP_CKSUM: + mss |= ENIC_CALC_TCP_UDP_CKSUM; + break; + } + } + wq->cq_pend++; + cq = 0; + if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) { + cq = 1; + wq->cq_pend = 0; + } + wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len, + offload_mode, eop, cq, 0, vlan_tag_insert, + vlan_id, 0); + + *desc_p = desc_tmp; + wq->bufs[head_idx] = tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + + if (!eop) { + for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = + tx_pkt->next) { + data_len = tx_pkt->data_len; + + wq->cq_pend++; + cq = 0; + if (tx_pkt->next == NULL) { + eop = 1; + if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) { + cq = 1; + wq->cq_pend = 0; + } + } + desc_p = descs + head_idx; + bus_addr = (dma_addr_t)(tx_pkt->buf_iova + + tx_pkt->data_off); + wq_enet_desc_enc((struct wq_enet_desc *) + &desc_tmp, bus_addr, data_len, + mss, 0, offload_mode, eop, cq, + 0, vlan_tag_insert, vlan_id, + 0); + + *desc_p = desc_tmp; + wq->bufs[head_idx] = tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + } + } + } + post: + rte_wmb(); + iowrite32_relaxed(head_idx, &wq->ctrl->posted_index); + done: + wq->ring.desc_avail = wq_desc_avail; + wq->head_idx = head_idx; + + return index; +} + +static void enqueue_simple_pkts(struct rte_mbuf **pkts, + struct wq_enet_desc *desc, + uint16_t n, + struct enic *enic) +{ + struct rte_mbuf *p; + uint16_t mss; + + while (n) { + n--; + p = *pkts++; + desc->address = p->buf_iova + p->data_off; + desc->length = p->pkt_len; + /* VLAN insert */ + desc->vlan_tag = p->vlan_tci; + desc->header_length_flags &= + ((1 << WQ_ENET_FLAGS_EOP_SHIFT) | + (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT)); + if (p->ol_flags & PKT_TX_VLAN) { + desc->header_length_flags |= + 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT; + } + /* + * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which + * is 0, so no need to set offload_mode. + */ + mss = 0; + if (p->ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT; + if (p->ol_flags & PKT_TX_L4_MASK) + mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT; + desc->mss_loopback = mss; + + /* + * The app should not send oversized + * packets. tx_pkt_prepare includes a check as + * well. But some apps ignore the device max size and + * tx_pkt_prepare. Oversized packets cause WQ errrors + * and the NIC ends up disabling the whole WQ. So + * truncate packets.. + */ + if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + desc->length = ENIC_TX_MAX_PKT_SIZE; + rte_atomic64_inc(&enic->soft_stats.tx_oversized); + } + desc++; + } +} + +uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + unsigned int head_idx, desc_count; + struct wq_enet_desc *desc; + struct vnic_wq *wq; + struct enic *enic; + uint16_t rem, n; + + wq = (struct vnic_wq *)tx_queue; + enic = vnic_dev_priv(wq->vdev); + enic_cleanup_wq(enic, wq); + /* Will enqueue this many packets in this call */ + nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail); + if (nb_pkts == 0) + return 0; + + head_idx = wq->head_idx; + desc_count = wq->ring.desc_count; + + /* Descriptors until the end of the ring */ + n = desc_count - head_idx; + n = RTE_MIN(nb_pkts, n); + + /* Save mbuf pointers to free later */ + memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n); + + /* Enqueue until the ring end */ + rem = nb_pkts - n; + desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx; + enqueue_simple_pkts(tx_pkts, desc, n, enic); + + /* Wrap to the start of the ring */ + if (rem) { + tx_pkts += n; + memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem); + desc = (struct wq_enet_desc *)wq->ring.descs; + enqueue_simple_pkts(tx_pkts, desc, rem, enic); + } + rte_wmb(); + + /* Update head_idx and desc_avail */ + wq->ring.desc_avail -= nb_pkts; + head_idx += nb_pkts; + if (head_idx >= desc_count) + head_idx -= desc_count; + wq->head_idx = head_idx; + iowrite32_relaxed(head_idx, &wq->ctrl->posted_index); + return nb_pkts; +} diff --git a/src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h b/src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h new file mode 100644 index 000000000..d8668d189 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_rxtx_common.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _ENIC_RXTX_COMMON_H_ +#define _ENIC_RXTX_COMMON_H_ + +#include + +static inline uint16_t +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) +{ + return rte_le_to_cpu_16(crd->completed_index_flags) & + ~CQ_DESC_COMP_NDX_MASK; +} + +static inline uint16_t +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) +{ + return rte_le_to_cpu_16(crd->bytes_written_flags) & + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_desc_packet_error(uint16_t bwflags) +{ + return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED; +} + +static inline uint8_t +enic_cq_rx_desc_eop(uint16_t ciflags) +{ + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) + == CQ_ENET_RQ_DESC_FLAGS_EOP; +} + +static inline uint8_t +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) +{ + return (rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC; +} + +static inline uint8_t +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) +{ + return (uint8_t)((rte_le_to_cpu_16(cqrd->q_number_rss_type_flags) >> + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); +} + +static inline uint32_t +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) +{ + return rte_le_to_cpu_32(cqrd->rss_hash); +} + +static inline uint16_t +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) +{ + return rte_le_to_cpu_16(cqrd->vlan); +} + +static inline uint16_t +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + return rte_le_to_cpu_16(cqrd->bytes_written_flags) & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + + +static inline uint8_t +enic_cq_rx_check_err(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t bwflags; + + bwflags = enic_cq_rx_desc_bwflags(cqrd); + if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) + return 1; + return 0; +} + +/* Lookup table to translate RX CQ flags to mbuf flags. */ +static uint32_t +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint8_t cqrd_flags = cqrd->flags; + /* + * Odd-numbered entries are for tunnel packets. All packet type info + * applies to the inner packet, and there is no info on the outer + * packet. The outer flags in these entries exist only to avoid + * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf + * afterwards. + * + * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set + * RTE_PTYPE_TUNNEL_GRENAT.. + */ + static const uint32_t cq_type_table[128] __rte_cache_aligned = { + [0x00] = RTE_PTYPE_UNKNOWN, + [0x01] = RTE_PTYPE_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER, + [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + /* All others reserved */ + }; + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; + return cq_type_table[cqrd_flags + tnl]; +} + +static void +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t bwflags, pkt_flags = 0, vlan_tci; + bwflags = enic_cq_rx_desc_bwflags(cqrd); + vlan_tci = enic_cq_rx_desc_vlan(cqrd); + + /* VLAN STRIPPED flag. The L2 packet type updated here also */ + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { + pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mbuf->packet_type |= RTE_PTYPE_L2_ETHER; + } else { + if (vlan_tci != 0) { + pkt_flags |= PKT_RX_VLAN; + mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN; + } else { + mbuf->packet_type |= RTE_PTYPE_L2_ETHER; + } + } + mbuf->vlan_tci = vlan_tci; + + if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) { + struct cq_enet_rq_clsf_desc *clsf_cqd; + uint16_t filter_id; + clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd; + filter_id = clsf_cqd->filter_id; + if (filter_id) { + pkt_flags |= PKT_RX_FDIR; + if (filter_id != ENIC_MAGIC_FILTER_ID) { + /* filter_id = mark id + 1, so subtract 1 */ + mbuf->hash.fdir.hi = filter_id - 1; + pkt_flags |= PKT_RX_FDIR_ID; + } + } + } else if (enic_cq_rx_desc_rss_type(cqrd)) { + /* RSS flag */ + pkt_flags |= PKT_RX_RSS_HASH; + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); + } + + /* checksum flags */ + if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) { + if (!enic_cq_rx_desc_csum_not_calc(cqrd)) { + uint32_t l4_flags; + l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; + + /* + * When overlay offload is enabled, the NIC may + * set ipv4_csum_ok=1 if the inner packet is IPv6.. + * So, explicitly check for IPv4 before checking + * ipv4_csum_ok. + */ + if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { + if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) + pkt_flags |= PKT_RX_IP_CKSUM_GOOD; + else + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + } + + if (l4_flags == RTE_PTYPE_L4_UDP || + l4_flags == RTE_PTYPE_L4_TCP) { + if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)) + pkt_flags |= PKT_RX_L4_CKSUM_GOOD; + else + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + } + + mbuf->ol_flags = pkt_flags; +} + +#endif /* _ENIC_RXTX_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/enic/enic_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/enic/enic_rxtx_vec_avx2.c new file mode 100644 index 000000000..36d4d0dea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/enic_rxtx_vec_avx2.c @@ -0,0 +1,830 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include + +#include "enic_compat.h" +#include "rq_enet_desc.h" +#include "enic.h" +#include "enic_rxtx_common.h" + +#include + +static struct rte_mbuf * +rx_one(struct cq_enet_rq_desc *cqd, struct rte_mbuf *mb, struct enic *enic) +{ + bool tnl; + + *(uint64_t *)&mb->rearm_data = enic->mbuf_initializer; + mb->data_len = cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + mb->pkt_len = mb->data_len; + tnl = enic->overlay_offload && (cqd->completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + mb->packet_type = + enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, tnl); + enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + mb->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + return mb; +} + +static uint16_t +enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf **rx, **rxmb; + uint16_t cq_idx, nb_rx, max_rx; + struct cq_enet_rq_desc *cqd; + struct rq_enet_desc *rqd; + struct vnic_cq *cq; + struct vnic_rq *rq; + struct enic *enic; + uint8_t color; + + rq = rx_queue; + enic = vnic_dev_priv(rq->vdev); + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + cq_idx = cq->to_clean; + + /* + * Fill up the reserve of free mbufs. Below, we restock the receive + * ring with these mbufs to avoid allocation failures. + */ + if (rq->num_free_mbufs == 0) { + if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs, + ENIC_RX_BURST_MAX)) + return 0; + rq->num_free_mbufs = ENIC_RX_BURST_MAX; + } + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs); + max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); + + rxmb = rq->mbuf_ring + cq_idx; + color = cq->last_color; + cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; + rx = rx_pkts; + if (max_rx == 0 || + (cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + return 0; + + /* Step 1: Process one packet to do aligned 256-bit load below */ + if (cq_idx & 0x1) { + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + } else { + *rx++ = rx_one(cqd, *rxmb++, enic); + } + cqd++; + max_rx--; + } + + const __m256i mask = + _mm256_set_epi8(/* Second descriptor */ + 0xff, /* type_color */ + (CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT | + CQ_ENET_RQ_DESC_FLAGS_IPV4 | + CQ_ENET_RQ_DESC_FLAGS_IPV6 | + CQ_ENET_RQ_DESC_FLAGS_TCP | + CQ_ENET_RQ_DESC_FLAGS_UDP), /* flags */ + 0, 0, /* checksum_fcoe */ + 0xff, 0xff, /* vlan */ + 0x3f, 0xff, /* bytes_written_flags */ + 0xff, 0xff, 0xff, 0xff, /* rss_hash */ + 0xff, 0xff, /* q_number_rss_type_flags */ + 0, 0, /* completed_index_flags */ + /* First descriptor */ + 0xff, /* type_color */ + (CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT | + CQ_ENET_RQ_DESC_FLAGS_IPV4 | + CQ_ENET_RQ_DESC_FLAGS_IPV6 | + CQ_ENET_RQ_DESC_FLAGS_TCP | + CQ_ENET_RQ_DESC_FLAGS_UDP), /* flags */ + 0, 0, /* checksum_fcoe */ + 0xff, 0xff, /* vlan */ + 0x3f, 0xff, /* bytes_written_flags */ + 0xff, 0xff, 0xff, 0xff, /* rss_hash */ + 0xff, 0xff, /* q_number_rss_type_flags */ + 0, 0 /* completed_index_flags */ + ); + const __m256i shuffle_mask = + _mm256_set_epi8(/* Second descriptor */ + 7, 6, 5, 4, /* rss = rss_hash */ + 11, 10, /* vlan_tci = vlan */ + 9, 8, /* data_len = bytes_written */ + 0x80, 0x80, 9, 8, /* pkt_len = bytes_written */ + 0x80, 0x80, 0x80, 0x80, /* packet_type = 0 */ + /* First descriptor */ + 7, 6, 5, 4, /* rss = rss_hash */ + 11, 10, /* vlan_tci = vlan */ + 9, 8, /* data_len = bytes_written */ + 0x80, 0x80, 9, 8, /* pkt_len = bytes_written */ + 0x80, 0x80, 0x80, 0x80 /* packet_type = 0 */ + ); + /* Used to collect 8 flags from 8 desc into one register */ + const __m256i flags_shuffle_mask = + _mm256_set_epi8(/* Second descriptor */ + 1, 3, 9, 14, + 1, 3, 9, 14, + 1, 3, 9, 14, + 1, 3, 9, 14, + /* First descriptor */ + 1, 3, 9, 14, + 1, 3, 9, 14, + 1, 3, 9, 14, + /* + * Byte 3: upper byte of completed_index_flags + * bit 5 = fcoe (tunnel) + * Byte 2: upper byte of q_number_rss_type_flags + * bits 2,3,4,5 = rss type + * bit 6 = csum_not_calc + * Byte 1: upper byte of bytes_written_flags + * bit 6 = truncated + * bit 7 = vlan stripped + * Byte 0: flags + */ + 1, 3, 9, 14 + ); + /* Used to collect 8 VLAN IDs from 8 desc into one register */ + const __m256i vlan_shuffle_mask = + _mm256_set_epi8(/* Second descriptor */ + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10, + /* First descriptor */ + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10, + 0x80, 0x80, 11, 10); + /* PKT_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */ + const __m256i rss_shuffle = + _mm256_set_epi8(/* second 128 bits */ + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, /* rss_types = 0 */ + /* first 128 bits */ + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0 /* rss_types = 0 */); + /* + * VLAN offload flags. + * shuffle index: + * vlan_stripped => bit 0 + * vlan_id == 0 => bit 1 + */ + const __m256i vlan_shuffle = + _mm256_set_epi32(0, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN); + /* Use the same shuffle index as vlan_shuffle */ + const __m256i vlan_ptype_shuffle = + _mm256_set_epi32(0, 0, 0, 0, + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN); + /* + * CKSUM flags. Shift right so they fit int 8-bit integers. + * shuffle index: + * ipv4_csum_ok => bit 3 + * ip4 => bit 2 + * tcp_or_udp => bit 1 + * tcp_udp_csum_ok => bit 0 + */ + const __m256i csum_shuffle = + _mm256_set_epi8(/* second 128 bits */ + /* 1111 ip4+ip4_ok+l4+l4_ok */ + ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1), + /* 1110 ip4_ok+ip4+l4+!l4_ok */ + ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1), + (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1101 ip4+ip4_ok */ + (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1100 ip4_ok+ip4 */ + (PKT_RX_L4_CKSUM_GOOD >> 1), /* 1011 l4+l4_ok */ + (PKT_RX_L4_CKSUM_BAD >> 1), /* 1010 l4+!l4_ok */ + 0, /* 1001 */ + 0, /* 1000 */ + /* 0111 !ip4_ok+ip4+l4+l4_ok */ + ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1), + /* 0110 !ip4_ok+ip4+l4+!l4_ok */ + ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1), + (PKT_RX_IP_CKSUM_BAD >> 1), /* 0101 !ip4_ok+ip4 */ + (PKT_RX_IP_CKSUM_BAD >> 1), /* 0100 !ip4_ok+ip4 */ + (PKT_RX_L4_CKSUM_GOOD >> 1), /* 0011 l4+l4_ok */ + (PKT_RX_L4_CKSUM_BAD >> 1), /* 0010 l4+!l4_ok */ + 0, /* 0001 */ + 0, /* 0000 */ + /* first 128 bits */ + ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1), + ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1), + (PKT_RX_IP_CKSUM_GOOD >> 1), + (PKT_RX_IP_CKSUM_GOOD >> 1), + (PKT_RX_L4_CKSUM_GOOD >> 1), + (PKT_RX_L4_CKSUM_BAD >> 1), + 0, 0, + ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1), + ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1), + (PKT_RX_IP_CKSUM_BAD >> 1), + (PKT_RX_IP_CKSUM_BAD >> 1), + (PKT_RX_L4_CKSUM_GOOD >> 1), + (PKT_RX_L4_CKSUM_BAD >> 1), + 0, 0); + /* + * Non-fragment PTYPEs. + * Shuffle 4-bit index: + * ip6 => bit 0 + * ip4 => bit 1 + * udp => bit 2 + * tcp => bit 3 + * bit + * 3 2 1 0 + * ------- + * 0 0 0 0 unknown + * 0 0 0 1 ip6 | nonfrag + * 0 0 1 0 ip4 | nonfrag + * 0 0 1 1 unknown + * 0 1 0 0 unknown + * 0 1 0 1 ip6 | udp + * 0 1 1 0 ip4 | udp + * 0 1 1 1 unknown + * 1 0 0 0 unknown + * 1 0 0 1 ip6 | tcp + * 1 0 1 0 ip4 | tcp + * 1 0 1 1 unknown + * 1 1 0 0 unknown + * 1 1 0 1 unknown + * 1 1 1 0 unknown + * 1 1 1 1 unknown + * + * PTYPEs do not fit in 8 bits, so shift right 4.. + */ + const __m256i nonfrag_ptype_shuffle = + _mm256_set_epi8(/* second 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG) >> 4, + RTE_PTYPE_UNKNOWN, + /* first 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG) >> 4, + RTE_PTYPE_UNKNOWN); + /* Fragment PTYPEs. Use the same shuffle index as above. */ + const __m256i frag_ptype_shuffle = + _mm256_set_epi8(/* second 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN, + /* first 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG) >> 4, + RTE_PTYPE_UNKNOWN); + /* + * Tunnel PTYPEs. Use the same shuffle index as above. + * L4 types are not part of this table. They come from non-tunnel + * types above. + */ + const __m256i tnl_l3_ptype_shuffle = + _mm256_set_epi8(/* second 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN, + /* first 128 bits */ + RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16, + RTE_PTYPE_UNKNOWN); + + const __m256i mbuf_init = _mm256_set_epi64x(0, enic->mbuf_initializer, + 0, enic->mbuf_initializer); + + /* + * --- cq desc fields --- offset + * completed_index_flags - 0 use: fcoe + * q_number_rss_type_flags - 2 use: rss types, csum_not_calc + * rss_hash - 4 ==> mbuf.hash.rss + * bytes_written_flags - 8 ==> mbuf.pkt_len,data_len + * use: truncated, vlan_stripped + * vlan - 10 ==> mbuf.vlan_tci + * checksum_fcoe - 12 (unused) + * flags - 14 use: all bits + * type_color - 15 (unused) + * + * --- mbuf fields --- offset + * rearm_data ---- 16 + * data_off - 0 (mbuf_init) -+ + * refcnt - 2 (mbuf_init) | + * nb_segs - 4 (mbuf_init) | 16B 128b + * port - 6 (mbuf_init) | + * ol_flag - 8 (from cqd) -+ + * rx_descriptor_fields1 ---- 32 + * packet_type - 0 (from cqd) -+ + * pkt_len - 4 (from cqd) | + * data_len - 8 (from cqd) | 16B 128b + * vlan_tci - 10 (from cqd) | + * rss - 12 (from cqd) -+ + */ + + __m256i overlay_enabled = + _mm256_set1_epi32((uint32_t)enic->overlay_offload); + + /* Step 2: Process 8 packets per loop using SIMD */ + while (max_rx > 7 && (((cqd + 7)->type_color & + CQ_DESC_COLOR_MASK_NOSHIFT) != color)) { + /* Load 8 16B CQ descriptors */ + __m256i cqd01 = _mm256_load_si256((void *)cqd); + __m256i cqd23 = _mm256_load_si256((void *)(cqd + 2)); + __m256i cqd45 = _mm256_load_si256((void *)(cqd + 4)); + __m256i cqd67 = _mm256_load_si256((void *)(cqd + 6)); + /* Copy 8 mbuf pointers to rx_pkts */ + _mm256_storeu_si256((void *)rx, + _mm256_loadu_si256((void *)rxmb)); + _mm256_storeu_si256((void *)(rx + 4), + _mm256_loadu_si256((void *)(rxmb + 4))); + + /* + * Collect 8 flags (each 32 bits) into one register. + * 4 shuffles, 3 blends, 1 permute for 8 desc: 1 inst/desc + */ + __m256i flags01 = + _mm256_shuffle_epi8(cqd01, flags_shuffle_mask); + /* + * Shuffle above produces 8 x 32-bit flags for 8 descriptors + * in this order: 0, 0, 0, 0, 1, 1, 1, 1 + * The duplicates in each 128-bit lane simplifies blending + * below. + */ + __m256i flags23 = + _mm256_shuffle_epi8(cqd23, flags_shuffle_mask); + __m256i flags45 = + _mm256_shuffle_epi8(cqd45, flags_shuffle_mask); + __m256i flags67 = + _mm256_shuffle_epi8(cqd67, flags_shuffle_mask); + /* 1st blend produces flags for desc: 0, 2, 0, 0, 1, 3, 1, 1 */ + __m256i flags0_3 = _mm256_blend_epi32(flags01, flags23, 0x22); + /* 2nd blend produces flags for desc: 4, 4, 4, 6, 5, 5, 5, 7 */ + __m256i flags4_7 = _mm256_blend_epi32(flags45, flags67, 0x88); + /* 3rd blend produces flags for desc: 0, 2, 4, 6, 1, 3, 5, 7 */ + __m256i flags0_7 = _mm256_blend_epi32(flags0_3, flags4_7, 0xcc); + /* + * Swap to reorder flags in this order: 1, 3, 5, 7, 0, 2, 4, 6 + * This order simplifies blend operations way below that + * produce 'rearm' data for each mbuf. + */ + flags0_7 = _mm256_permute4x64_epi64(flags0_7, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + + /* + * Check truncated bits and bail out early on. + * 6 avx inst, 1 or, 1 if-then-else for 8 desc: 1 inst/desc + */ + __m256i trunc = + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 17), 31); + trunc = _mm256_add_epi64(trunc, _mm256_permute4x64_epi64(trunc, + (1 << 6) + (0 << 4) + (3 << 2) + 2)); + /* 0:63 contains 1+3+0+2 and 64:127 contains 5+7+4+6 */ + if (_mm256_extract_epi64(trunc, 0) || + _mm256_extract_epi64(trunc, 1)) + break; + + /* + * Compute PKT_RX_RSS_HASH. + * Use 2 shifts and 1 shuffle for 8 desc: 0.375 inst/desc + * RSS types in byte 0, 4, 8, 12, 16, 20, 24, 28 + * Everything else is zero. + */ + __m256i rss_types = + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 10), 28); + /* + * RSS flags (PKT_RX_RSS_HASH) are in + * byte 0, 4, 8, 12, 16, 20, 24, 28 + * Everything else is zero. + */ + __m256i rss_flags = _mm256_shuffle_epi8(rss_shuffle, rss_types); + + /* + * Compute CKSUM flags. First build the index and then + * use it to shuffle csum_shuffle. + * 20 instructions including const loads: 2.5 inst/desc + */ + /* + * csum_not_calc (bit 22) + * csum_not_calc (0) => 0xffffffff + * csum_not_calc (1) => 0x0 + */ + const __m256i zero4 = _mm256_setzero_si256(); + const __m256i mask22 = _mm256_set1_epi32(0x400000); + __m256i csum_not_calc = _mm256_cmpeq_epi32(zero4, + _mm256_and_si256(flags0_7, mask22)); + /* + * (tcp|udp) && !fragment => bit 1 + * tcp = bit 2, udp = bit 1, frag = bit 6 + */ + const __m256i mask1 = _mm256_set1_epi32(0x2); + __m256i tcp_udp = + _mm256_andnot_si256(_mm256_srli_epi32(flags0_7, 5), + _mm256_or_si256(flags0_7, + _mm256_srli_epi32(flags0_7, 1))); + tcp_udp = _mm256_and_si256(tcp_udp, mask1); + /* ipv4 (bit 5) => bit 2 */ + const __m256i mask2 = _mm256_set1_epi32(0x4); + __m256i ipv4 = _mm256_and_si256(mask2, + _mm256_srli_epi32(flags0_7, 3)); + /* + * ipv4_csum_ok (bit 3) => bit 3 + * tcp_udp_csum_ok (bit 0) => bit 0 + * 0x9 + */ + const __m256i mask0_3 = _mm256_set1_epi32(0x9); + __m256i csum_idx = _mm256_and_si256(flags0_7, mask0_3); + csum_idx = _mm256_and_si256(csum_not_calc, + _mm256_or_si256(_mm256_or_si256(csum_idx, ipv4), + tcp_udp)); + __m256i csum_flags = + _mm256_shuffle_epi8(csum_shuffle, csum_idx); + /* Shift left to restore CKSUM flags. See csum_shuffle. */ + csum_flags = _mm256_slli_epi32(csum_flags, 1); + /* Combine csum flags and offload flags: 0.125 inst/desc */ + rss_flags = _mm256_or_si256(rss_flags, csum_flags); + + /* + * Collect 8 VLAN IDs and compute vlan_id != 0 on each. + * 4 shuffles, 3 blends, 1 permute, 1 cmp, 1 sub for 8 desc: + * 1.25 inst/desc + */ + __m256i vlan01 = _mm256_shuffle_epi8(cqd01, vlan_shuffle_mask); + __m256i vlan23 = _mm256_shuffle_epi8(cqd23, vlan_shuffle_mask); + __m256i vlan45 = _mm256_shuffle_epi8(cqd45, vlan_shuffle_mask); + __m256i vlan67 = _mm256_shuffle_epi8(cqd67, vlan_shuffle_mask); + __m256i vlan0_3 = _mm256_blend_epi32(vlan01, vlan23, 0x22); + __m256i vlan4_7 = _mm256_blend_epi32(vlan45, vlan67, 0x88); + /* desc: 0, 2, 4, 6, 1, 3, 5, 7 */ + __m256i vlan0_7 = _mm256_blend_epi32(vlan0_3, vlan4_7, 0xcc); + /* desc: 1, 3, 5, 7, 0, 2, 4, 6 */ + vlan0_7 = _mm256_permute4x64_epi64(vlan0_7, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + /* + * Compare 0 == vlan_id produces 0xffffffff (-1) if + * vlan 0 and 0 if vlan non-0. Then subtracting the + * result from 0 produces 0 - (-1) = 1 for vlan 0, and + * 0 - 0 = 0 for vlan non-0. + */ + vlan0_7 = _mm256_cmpeq_epi32(zero4, vlan0_7); + /* vlan_id != 0 => 0, vlan_id == 0 => 1 */ + vlan0_7 = _mm256_sub_epi32(zero4, vlan0_7); + + /* + * Compute PKT_RX_VLAN and PKT_RX_VLAN_STRIPPED. + * Use 3 shifts, 1 or, 1 shuffle for 8 desc: 0.625 inst/desc + * VLAN offload flags in byte 0, 4, 8, 12, 16, 20, 24, 28 + * Everything else is zero. + */ + __m256i vlan_idx = + _mm256_or_si256(/* vlan_stripped => bit 0 */ + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, + 16), 31), + /* (vlan_id == 0) => bit 1 */ + _mm256_slli_epi32(vlan0_7, 1)); + /* + * The index captures 4 cases. + * stripped, id = 0 ==> 11b = 3 + * stripped, id != 0 ==> 01b = 1 + * not strip, id == 0 ==> 10b = 2 + * not strip, id != 0 ==> 00b = 0 + */ + __m256i vlan_flags = _mm256_permutevar8x32_epi32(vlan_shuffle, + vlan_idx); + /* Combine vlan and offload flags: 0.125 inst/desc */ + rss_flags = _mm256_or_si256(rss_flags, vlan_flags); + + /* + * Compute non-tunnel PTYPEs. + * 17 inst / 8 desc = 2.125 inst/desc + */ + /* ETHER and ETHER_VLAN */ + __m256i vlan_ptype = + _mm256_permutevar8x32_epi32(vlan_ptype_shuffle, + vlan_idx); + /* Build the ptype index from flags */ + tcp_udp = _mm256_slli_epi32(flags0_7, 29); + tcp_udp = _mm256_slli_epi32(_mm256_srli_epi32(tcp_udp, 30), 2); + __m256i ip4_ip6 = + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 26), 30); + __m256i ptype_idx = _mm256_or_si256(tcp_udp, ip4_ip6); + __m256i frag_bit = + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 25), 31); + __m256i nonfrag_ptype = + _mm256_shuffle_epi8(nonfrag_ptype_shuffle, ptype_idx); + __m256i frag_ptype = + _mm256_shuffle_epi8(frag_ptype_shuffle, ptype_idx); + /* + * Zero out the unwanted types and combine the remaining bits. + * The effect is same as selecting non-frag or frag types + * depending on the frag bit. + */ + nonfrag_ptype = _mm256_and_si256(nonfrag_ptype, + _mm256_cmpeq_epi32(zero4, frag_bit)); + frag_ptype = _mm256_and_si256(frag_ptype, + _mm256_cmpgt_epi32(frag_bit, zero4)); + __m256i ptype = _mm256_or_si256(nonfrag_ptype, frag_ptype); + ptype = _mm256_slli_epi32(ptype, 4); + /* + * Compute tunnel PTYPEs. + * 15 inst / 8 desc = 1.875 inst/desc + */ + __m256i tnl_l3_ptype = + _mm256_shuffle_epi8(tnl_l3_ptype_shuffle, ptype_idx); + tnl_l3_ptype = _mm256_slli_epi32(tnl_l3_ptype, 16); + /* + * Shift non-tunnel L4 types to make them tunnel types. + * RTE_PTYPE_L4_TCP << 16 == RTE_PTYPE_INNER_L4_TCP + */ + __m256i tnl_l4_ptype = + _mm256_slli_epi32(_mm256_and_si256(ptype, + _mm256_set1_epi32(RTE_PTYPE_L4_MASK)), 16); + __m256i tnl_ptype = + _mm256_or_si256(tnl_l3_ptype, tnl_l4_ptype); + tnl_ptype = _mm256_or_si256(tnl_ptype, + _mm256_set1_epi32(RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER)); + /* + * Select non-tunnel or tunnel types by zeroing out the + * unwanted ones. + */ + __m256i tnl_flags = _mm256_and_si256(overlay_enabled, + _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 2), 31)); + tnl_ptype = _mm256_and_si256(tnl_ptype, + _mm256_sub_epi32(zero4, tnl_flags)); + ptype = _mm256_and_si256(ptype, + _mm256_cmpeq_epi32(zero4, tnl_flags)); + /* + * Combine types and swap to have ptypes in the same order + * as desc. + * desc: 0 2 4 6 1 3 5 7 + * 3 inst / 8 desc = 0.375 inst/desc + */ + ptype = _mm256_or_si256(ptype, tnl_ptype); + ptype = _mm256_or_si256(ptype, vlan_ptype); + ptype = _mm256_permute4x64_epi64(ptype, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + + /* + * Mask packet length. + * Use 4 ands: 0.5 instructions/desc + */ + cqd01 = _mm256_and_si256(cqd01, mask); + cqd23 = _mm256_and_si256(cqd23, mask); + cqd45 = _mm256_and_si256(cqd45, mask); + cqd67 = _mm256_and_si256(cqd67, mask); + /* + * Shuffle. Two 16B sets of the mbuf fields. + * packet_type, pkt_len, data_len, vlan_tci, rss + */ + __m256i rearm01 = _mm256_shuffle_epi8(cqd01, shuffle_mask); + __m256i rearm23 = _mm256_shuffle_epi8(cqd23, shuffle_mask); + __m256i rearm45 = _mm256_shuffle_epi8(cqd45, shuffle_mask); + __m256i rearm67 = _mm256_shuffle_epi8(cqd67, shuffle_mask); + + /* + * Blend in ptypes + * 4 blends and 3 shuffles for 8 desc: 0.875 inst/desc + */ + rearm01 = _mm256_blend_epi32(rearm01, ptype, 0x11); + rearm23 = _mm256_blend_epi32(rearm23, + _mm256_shuffle_epi32(ptype, 1), 0x11); + rearm45 = _mm256_blend_epi32(rearm45, + _mm256_shuffle_epi32(ptype, 2), 0x11); + rearm67 = _mm256_blend_epi32(rearm67, + _mm256_shuffle_epi32(ptype, 3), 0x11); + + /* + * Move rss_flags into ol_flags in mbuf_init. + * Use 1 shift and 1 blend for each desc: 2 inst/desc + */ + __m256i mbuf_init4_5 = _mm256_blend_epi32(mbuf_init, + rss_flags, 0x44); + __m256i mbuf_init2_3 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(rss_flags, 4), 0x44); + __m256i mbuf_init0_1 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(rss_flags, 8), 0x44); + __m256i mbuf_init6_7 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(rss_flags, 4), 0x44); + + /* + * Build rearm, one per desc. + * 8 blends and 4 permutes: 1.5 inst/desc + */ + __m256i rearm0 = _mm256_blend_epi32(rearm01, + mbuf_init0_1, 0xf0); + __m256i rearm1 = _mm256_blend_epi32(mbuf_init0_1, + rearm01, 0xf0); + __m256i rearm2 = _mm256_blend_epi32(rearm23, + mbuf_init2_3, 0xf0); + __m256i rearm3 = _mm256_blend_epi32(mbuf_init2_3, + rearm23, 0xf0); + /* Swap upper and lower 64 bits */ + rearm0 = _mm256_permute4x64_epi64(rearm0, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + rearm2 = _mm256_permute4x64_epi64(rearm2, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + /* Second set of 4 descriptors */ + __m256i rearm4 = _mm256_blend_epi32(rearm45, + mbuf_init4_5, 0xf0); + __m256i rearm5 = _mm256_blend_epi32(mbuf_init4_5, + rearm45, 0xf0); + __m256i rearm6 = _mm256_blend_epi32(rearm67, + mbuf_init6_7, 0xf0); + __m256i rearm7 = _mm256_blend_epi32(mbuf_init6_7, + rearm67, 0xf0); + rearm4 = _mm256_permute4x64_epi64(rearm4, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + rearm6 = _mm256_permute4x64_epi64(rearm6, + (1 << 6) + (0 << 4) + (3 << 2) + 2); + + /* + * Write out 32B of mbuf fields. + * data_off - off 0 (mbuf_init) + * refcnt - 2 (mbuf_init) + * nb_segs - 4 (mbuf_init) + * port - 6 (mbuf_init) + * ol_flag - 8 (from cqd) + * packet_type - 16 (from cqd) + * pkt_len - 20 (from cqd) + * data_len - 24 (from cqd) + * vlan_tci - 26 (from cqd) + * rss - 28 (from cqd) + */ + _mm256_storeu_si256((__m256i *)&rxmb[0]->rearm_data, rearm0); + _mm256_storeu_si256((__m256i *)&rxmb[1]->rearm_data, rearm1); + _mm256_storeu_si256((__m256i *)&rxmb[2]->rearm_data, rearm2); + _mm256_storeu_si256((__m256i *)&rxmb[3]->rearm_data, rearm3); + _mm256_storeu_si256((__m256i *)&rxmb[4]->rearm_data, rearm4); + _mm256_storeu_si256((__m256i *)&rxmb[5]->rearm_data, rearm5); + _mm256_storeu_si256((__m256i *)&rxmb[6]->rearm_data, rearm6); + _mm256_storeu_si256((__m256i *)&rxmb[7]->rearm_data, rearm7); + + max_rx -= 8; + cqd += 8; + rx += 8; + rxmb += 8; + } + + /* + * Step 3: Slow path to handle a small (<8) number of packets and + * occasional truncated packets. + */ + while (max_rx && ((cqd->type_color & + CQ_DESC_COLOR_MASK_NOSHIFT) != color)) { + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + } else { + *rx++ = rx_one(cqd, *rxmb++, enic); + } + cqd++; + max_rx--; + } + + /* Number of descriptors visited */ + nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; + if (nb_rx == 0) + return 0; + rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx; + rxmb = rq->mbuf_ring + cq_idx; + cq_idx += nb_rx; + rq->rx_nb_hold += nb_rx; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + cq->to_clean = cq_idx; + + /* Step 4: Restock RQ with new mbufs */ + memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs, + sizeof(struct rte_mbuf *) * nb_rx); + rq->num_free_mbufs -= nb_rx; + while (nb_rx) { + rqd->address = (*rxmb)->buf_iova + RTE_PKTMBUF_HEADROOM; + nb_rx--; + rqd++; + rxmb++; + } + if (rq->rx_nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, + rq->rx_nb_hold); + rq->rx_nb_hold = 0; + rte_wmb(); + iowrite32_relaxed(rq->posted_index, + &rq->ctrl->posted_index); + } + + return rx - rx_pkts; +} + +bool +enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + struct rte_fdir_conf *fconf; + + /* User needs to request for the avx2 handler */ + if (!enic->enable_avx2_rx) + return false; + /* Do not support scatter Rx */ + if (!(enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0)) + return false; + /* Do not support fdir/flow */ + fconf = ð_dev->data->dev_conf.fdir_conf; + if (fconf->mode != RTE_FDIR_MODE_NONE) + return false; + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) { + ENICPMD_LOG(DEBUG, " use the non-scatter avx2 Rx handler"); + eth_dev->rx_pkt_burst = &enic_noscatter_vec_recv_pkts; + return true; + } + return false; +} diff --git a/src/spdk/dpdk/drivers/net/enic/meson.build b/src/spdk/dpdk/drivers/net/enic/meson.build new file mode 100644 index 000000000..1bd7cc7e1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/meson.build @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cisco Systems, Inc. + +sources = files( + 'base/vnic_cq.c', + 'base/vnic_dev.c', + 'base/vnic_intr.c', + 'base/vnic_rq.c', + 'base/vnic_wq.c', + 'enic_clsf.c', + 'enic_ethdev.c', + 'enic_flow.c', + 'enic_fm_flow.c', + 'enic_main.c', + 'enic_res.c', + 'enic_rxtx.c', + ) +deps += ['hash'] +includes += include_directories('base') + +# The current implementation assumes 64-bit pointers +if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') and dpdk_conf.get('RTE_ARCH_64') + sources += files('enic_rxtx_vec_avx2.c') +# Build the avx2 handler if the compiler supports it, even though 'machine' +# does not. This is to support users who build for the min supported machine +# and need to run the binary on newer CPUs too. +# This part is from i40e meson.build +elif cc.has_argument('-mavx2') and dpdk_conf.get('RTE_ARCH_64') + enic_avx2_lib = static_library('enic_avx2_lib', + 'enic_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, static_rte_bus_pci], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += enic_avx2_lib.extract_objects('enic_rxtx_vec_avx2.c') +endif diff --git a/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map b/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/enic/rte_pmd_enic_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/failsafe/Makefile b/src/spdk/dpdk/drivers/net/failsafe/Makefile new file mode 100644 index 000000000..464fd0515 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/Makefile @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 6WIND S.A. +# Copyright 2017 Mellanox Technologies, Ltd + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name +LIB = librte_pmd_failsafe.a + +EXPORT_MAP := rte_pmd_failsafe_version.map + +# Sources are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_args.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_eal.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_intr.c +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUX),y) +CFLAGS += -DLINUX +else +CFLAGS += -DBSD +endif + +# No exported include files + +# Basic CFLAGS: +CFLAGS += -std=gnu99 -Wextra +CFLAGS += -O3 +CFLAGS += -I. +CFLAGS += -D_DEFAULT_SOURCE +CFLAGS += -D_XOPEN_SOURCE=700 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-strict-prototypes +CFLAGS += -pedantic +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev +LDLIBS += -lpthread + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe.c new file mode 100644 index 000000000..72362f35d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe.c @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "failsafe_private.h" + +int failsafe_logtype; + +const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME; +static const struct rte_eth_link eth_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_UP, + .link_autoneg = ETH_LINK_AUTONEG, +}; + +static int +fs_sub_device_alloc(struct rte_eth_dev *dev, + const char *params) +{ + uint8_t nb_subs; + int ret; + int i; + struct sub_device *sdev; + uint8_t sdev_iterator; + + ret = failsafe_args_count_subdevice(dev, params); + if (ret) + return ret; + if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) { + ERROR("Cannot allocate more than %d ports", + FAILSAFE_MAX_ETHPORTS); + return -ENOSPC; + } + nb_subs = PRIV(dev)->subs_tail; + PRIV(dev)->subs = rte_zmalloc(NULL, + sizeof(struct sub_device) * nb_subs, + RTE_CACHE_LINE_SIZE); + if (PRIV(dev)->subs == NULL) { + ERROR("Could not allocate sub_devices"); + return -ENOMEM; + } + /* Initiate static sub devices linked list. */ + for (i = 1; i < nb_subs; i++) + PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs + i; + PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs; + + FOREACH_SUBDEV(sdev, sdev_iterator, dev) { + sdev->sdev_port_id = RTE_MAX_ETHPORTS; + } + return 0; +} + +static void +fs_sub_device_free(struct rte_eth_dev *dev) +{ + rte_free(PRIV(dev)->subs); +} + +static void fs_hotplug_alarm(void *arg); + +int +failsafe_hotplug_alarm_install(struct rte_eth_dev *dev) +{ + int ret; + + if (dev == NULL) + return -EINVAL; + if (PRIV(dev)->pending_alarm) + return 0; + ret = rte_eal_alarm_set(failsafe_hotplug_poll * 1000, + fs_hotplug_alarm, + dev); + if (ret) { + ERROR("Could not set up plug-in event detection"); + return ret; + } + PRIV(dev)->pending_alarm = 1; + return 0; +} + +int +failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev) +{ + int ret = 0; + + rte_errno = 0; + rte_eal_alarm_cancel(fs_hotplug_alarm, dev); + if (rte_errno) { + ERROR("rte_eal_alarm_cancel failed (errno: %s)", + strerror(rte_errno)); + ret = -rte_errno; + } else { + PRIV(dev)->pending_alarm = 0; + } + return ret; +} + +static void +fs_hotplug_alarm(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct sub_device *sdev; + int ret; + uint8_t i; + + if (!PRIV(dev)->pending_alarm) + return; + PRIV(dev)->pending_alarm = 0; + FOREACH_SUBDEV(sdev, i, dev) + if (sdev->state != PRIV(dev)->state) + break; + /* if we have non-probed device */ + if (i != PRIV(dev)->subs_tail) { + if (fs_lock(dev, 1) != 0) + goto reinstall; + ret = failsafe_eth_dev_state_sync(dev); + fs_unlock(dev, 1); + if (ret) + ERROR("Unable to synchronize sub_device state"); + } + failsafe_dev_remove(dev); +reinstall: + ret = failsafe_hotplug_alarm_install(dev); + if (ret) + ERROR("Unable to set up next alarm"); +} + +static int +fs_mutex_init(struct fs_priv *priv) +{ + int ret; + pthread_mutexattr_t attr; + + ret = pthread_mutexattr_init(&attr); + if (ret) { + ERROR("Cannot initiate mutex attributes - %s", strerror(ret)); + return ret; + } + /* Allow mutex relocks for the thread holding the mutex. */ + ret = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + if (ret) { + ERROR("Cannot set mutex type - %s", strerror(ret)); + return ret; + } + ret = pthread_mutex_init(&priv->hotplug_mutex, &attr); + if (ret) { + ERROR("Cannot initiate mutex - %s", strerror(ret)); + return ret; + } + return 0; +} + +static int +fs_eth_dev_create(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *dev; + struct rte_ether_addr *mac; + struct fs_priv *priv; + struct sub_device *sdev; + const char *params; + unsigned int socket_id; + uint8_t i; + int ret; + + dev = NULL; + priv = NULL; + socket_id = rte_socket_id(); + INFO("Creating fail-safe device on NUMA socket %u", socket_id); + params = rte_vdev_device_args(vdev); + if (params == NULL) { + ERROR("This PMD requires sub-devices, none provided"); + return -1; + } + dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); + if (dev == NULL) { + ERROR("Unable to allocate rte_eth_dev"); + return -1; + } + priv = PRIV(dev); + priv->data = dev->data; + priv->rxp = FS_RX_PROXY_INIT; + dev->dev_ops = &failsafe_ops; + dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0]; + dev->data->dev_link = eth_link; + PRIV(dev)->nb_mac_addr = 1; + TAILQ_INIT(&PRIV(dev)->flow_list); + dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst; + dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst; + ret = fs_sub_device_alloc(dev, params); + if (ret) { + ERROR("Could not allocate sub_devices"); + goto free_dev; + } + ret = failsafe_args_parse(dev, params); + if (ret) + goto free_subs; + ret = rte_eth_dev_owner_new(&priv->my_owner.id); + if (ret) { + ERROR("Failed to get unique owner identifier"); + goto free_args; + } + snprintf(priv->my_owner.name, sizeof(priv->my_owner.name), + FAILSAFE_OWNER_NAME); + DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id, + priv->my_owner.name, priv->my_owner.id); + ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, + dev); + if (ret) { + ERROR("Failed to register NEW callback"); + goto free_args; + } + ret = failsafe_eal_init(dev); + if (ret) + goto unregister_new_callback; + ret = fs_mutex_init(priv); + if (ret) + goto unregister_new_callback; + ret = failsafe_hotplug_alarm_install(dev); + if (ret) { + ERROR("Could not set up plug-in event detection"); + goto unregister_new_callback; + } + mac = &dev->data->mac_addrs[0]; + if (failsafe_mac_from_arg) { + /* + * If MAC address was provided as a parameter, + * apply to all probed slaves. + */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), + mac); + if (ret) { + ERROR("Failed to set default MAC address"); + goto cancel_alarm; + } + } + } else { + /* + * Use the ether_addr from first probed + * device, either preferred or fallback. + */ + FOREACH_SUBDEV(sdev, i, dev) + if (sdev->state >= DEV_PROBED) { + rte_ether_addr_copy( + Ð(sdev)->data->mac_addrs[0], mac); + break; + } + /* + * If no device has been probed and no ether_addr + * has been provided on the command line, use a random + * valid one. + * It will be applied during future slave state syncs to + * probed slaves. + */ + if (i == priv->subs_tail) + rte_eth_random_addr(&mac->addr_bytes[0]); + } + INFO("MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + mac->addr_bytes[0], mac->addr_bytes[1], + mac->addr_bytes[2], mac->addr_bytes[3], + mac->addr_bytes[4], mac->addr_bytes[5]); + dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + PRIV(dev)->intr_handle = (struct rte_intr_handle){ + .fd = -1, + .type = RTE_INTR_HANDLE_EXT, + }; + rte_eth_dev_probing_finish(dev); + return 0; +cancel_alarm: + failsafe_hotplug_alarm_cancel(dev); +unregister_new_callback: + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); +free_args: + failsafe_args_free(dev); +free_subs: + fs_sub_device_free(dev); +free_dev: + /* mac_addrs must not be freed alone because part of dev_private */ + dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(dev); + return -1; +} + +static int +fs_rte_eth_free(const char *name) +{ + struct rte_eth_dev *dev; + int ret; + + dev = rte_eth_dev_allocated(name); + if (dev == NULL) + return -ENODEV; + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); + ret = failsafe_eal_uninit(dev); + if (ret) + ERROR("Error while uninitializing sub-EAL"); + failsafe_args_free(dev); + fs_sub_device_free(dev); + ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); + if (ret) + ERROR("Error while destroying hotplug mutex"); + rte_free(PRIV(dev)->mcast_addrs); + /* mac_addrs must not be freed alone because part of dev_private */ + dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(dev); + return ret; +} + +static bool +devargs_already_listed(struct rte_devargs *devargs) +{ + struct rte_devargs *list_da; + + RTE_EAL_DEVARGS_FOREACH(devargs->bus->name, list_da) { + if (strcmp(list_da->name, devargs->name) == 0) + /* devargs already in the list */ + return true; + } + return false; +} + +static int +rte_pmd_failsafe_probe(struct rte_vdev_device *vdev) +{ + const char *name; + struct rte_eth_dev *eth_dev; + struct sub_device *sdev; + struct rte_devargs devargs; + uint8_t i; + int ret; + + name = rte_vdev_device_name(vdev); + INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s", + name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(vdev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + ERROR("Failed to probe %s", name); + return -1; + } + eth_dev->dev_ops = &failsafe_ops; + eth_dev->device = &vdev->device; + eth_dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst; + eth_dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst; + /* + * Failsafe will attempt to probe all of its sub-devices. + * Any failure in sub-devices is not a fatal error. + * A sub-device can be plugged later. + */ + FOREACH_SUBDEV(sdev, i, eth_dev) { + /* skip empty devargs */ + if (sdev->devargs.name[0] == '\0') + continue; + + /* rebuild devargs to be able to get the bus name. */ + ret = rte_devargs_parse(&devargs, + sdev->devargs.name); + if (ret != 0) { + ERROR("Failed to parse devargs %s", + devargs.name); + continue; + } + if (!devargs_already_listed(&devargs)) { + ret = rte_dev_probe(devargs.name); + if (ret < 0) { + ERROR("Failed to probe devargs %s", + devargs.name); + continue; + } + } + } + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + return fs_eth_dev_create(vdev); +} + +static int +rte_pmd_failsafe_remove(struct rte_vdev_device *vdev) +{ + const char *name; + + name = rte_vdev_device_name(vdev); + INFO("Uninitializing " FAILSAFE_DRIVER_NAME " for %s", name); + return fs_rte_eth_free(name); +} + +static struct rte_vdev_driver failsafe_drv = { + .probe = rte_pmd_failsafe_probe, + .remove = rte_pmd_failsafe_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING); + +RTE_INIT(failsafe_init_log) +{ + failsafe_logtype = rte_log_register("pmd.net.failsafe"); + if (failsafe_logtype >= 0) + rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c new file mode 100644 index 000000000..707490b94 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_args.c @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "failsafe_private.h" + +/* Callback used when a new device is found in devargs */ +typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params, + uint8_t head); + +uint64_t failsafe_hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS; +int failsafe_mac_from_arg; + +static const char * const pmd_failsafe_init_parameters[] = { + PMD_FAILSAFE_HOTPLUG_POLL_KVARG, + PMD_FAILSAFE_MAC_KVARG, + NULL, +}; + +/* + * input: text. + * output: 0: if text[0] != '(', + * 0: if there are no corresponding ')' + * n: distance to corresponding ')' otherwise + */ +static size_t +closing_paren(const char *text) +{ + int nb_open = 0; + size_t i = 0; + + while (text[i] != '\0') { + if (text[i] == '(') + nb_open++; + if (text[i] == ')') + nb_open--; + if (nb_open == 0) + return i; + i++; + } + return 0; +} + +static int +fs_parse_device(struct sub_device *sdev, char *args) +{ + struct rte_devargs *d; + int ret; + + d = &sdev->devargs; + DEBUG("%s", args); + ret = rte_devargs_parse(d, args); + if (ret) { + DEBUG("devargs parsing failed with code %d", ret); + return ret; + } + sdev->bus = d->bus; + sdev->state = DEV_PARSED; + return 0; +} + +static void +fs_sanitize_cmdline(char *args) +{ + char *nl; + + nl = strrchr(args, '\n'); + if (nl) + nl[0] = '\0'; +} + +static int +fs_execute_cmd(struct sub_device *sdev, char *cmdline) +{ + FILE *fp; + /* store possible newline as well */ + char output[DEVARGS_MAXLEN + 1]; + size_t len; + int ret; + + RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL); + if (sdev->cmdline == NULL) { + size_t i; + + len = strlen(cmdline) + 1; + sdev->cmdline = calloc(1, len); + if (sdev->cmdline == NULL) { + ERROR("Command line allocation failed"); + return -ENOMEM; + } + strlcpy(sdev->cmdline, cmdline, len); + /* Replace all commas in the command line by spaces */ + for (i = 0; i < len; i++) + if (sdev->cmdline[i] == ',') + sdev->cmdline[i] = ' '; + } + DEBUG("'%s'", sdev->cmdline); + fp = popen(sdev->cmdline, "r"); + if (fp == NULL) { + ret = -errno; + ERROR("popen: %s", strerror(errno)); + return ret; + } + /* We only read one line */ + if (fgets(output, sizeof(output) - 1, fp) == NULL) { + DEBUG("Could not read command output"); + ret = -ENODEV; + goto ret_pclose; + } + fs_sanitize_cmdline(output); + if (output[0] == '\0') { + ret = -ENODEV; + goto ret_pclose; + } + ret = fs_parse_device(sdev, output); + if (ret) + ERROR("Parsing device '%s' failed", output); +ret_pclose: + if (pclose(fp) == -1) + ERROR("pclose: %s", strerror(errno)); + return ret; +} + +static int +fs_read_fd(struct sub_device *sdev, char *fd_str) +{ + FILE *fp = NULL; + int fd = -1; + /* store possible newline as well */ + char output[DEVARGS_MAXLEN + 1]; + int err = -ENODEV; + int oflags; + int lcount; + + RTE_ASSERT(fd_str != NULL || sdev->fd_str != NULL); + if (sdev->fd_str == NULL) { + sdev->fd_str = strdup(fd_str); + if (sdev->fd_str == NULL) { + ERROR("Command line allocation failed"); + return -ENOMEM; + } + } + errno = 0; + fd = strtol(fd_str, &fd_str, 0); + if (errno || *fd_str || fd < 0) { + ERROR("Parsing FD number failed"); + goto error; + } + /* Fiddle with copy of file descriptor */ + fd = dup(fd); + if (fd == -1) + goto error; + oflags = fcntl(fd, F_GETFL); + if (oflags == -1) + goto error; + if (fcntl(fd, F_SETFL, oflags | O_NONBLOCK) == -1) + goto error; + fp = fdopen(fd, "r"); + if (fp == NULL) + goto error; + fd = -1; + /* Only take the last line into account */ + lcount = 0; + while (fgets(output, sizeof(output), fp)) + ++lcount; + if (lcount == 0) + goto error; + else if (ferror(fp) && errno != EAGAIN) + goto error; + /* Line must end with a newline character */ + fs_sanitize_cmdline(output); + if (output[0] == '\0') + goto error; + err = fs_parse_device(sdev, output); + if (err) + ERROR("Parsing device '%s' failed", output); +error: + if (fp) + fclose(fp); + if (fd != -1) + close(fd); + return err; +} + +static int +fs_parse_device_param(struct rte_eth_dev *dev, const char *param, + uint8_t head) +{ + struct fs_priv *priv; + struct sub_device *sdev; + char *args = NULL; + size_t a, b; + int ret; + + priv = PRIV(dev); + a = 0; + b = 0; + ret = 0; + while (param[b] != '(' && + param[b] != '\0') + b++; + a = b; + b += closing_paren(¶m[b]); + if (a == b) { + ERROR("Dangling parenthesis"); + return -EINVAL; + } + a += 1; + args = strndup(¶m[a], b - a); + if (args == NULL) { + ERROR("Not enough memory for parameter parsing"); + return -ENOMEM; + } + sdev = &priv->subs[head]; + if (strncmp(param, "dev", 3) == 0) { + ret = fs_parse_device(sdev, args); + if (ret) + goto free_args; + } else if (strncmp(param, "exec", 4) == 0) { + ret = fs_execute_cmd(sdev, args); + if (ret == -ENODEV) { + DEBUG("Reading device info from command line failed"); + ret = 0; + } + if (ret) + goto free_args; + } else if (strncmp(param, "fd(", 3) == 0) { + ret = fs_read_fd(sdev, args); + if (ret == -ENODEV) { + DEBUG("Reading device info from FD failed"); + ret = 0; + } + if (ret) + goto free_args; + } else { + ERROR("Unrecognized device type: %.*s", (int)b, param); + return -EINVAL; + } +free_args: + free(args); + return ret; +} + +static int +fs_parse_sub_devices(parse_cb *cb, + struct rte_eth_dev *dev, const char *params) +{ + size_t a, b; + uint8_t head; + int ret; + + a = 0; + head = 0; + ret = 0; + while (params[a] != '\0') { + b = a; + while (params[b] != '(' && + params[b] != ',' && + params[b] != '\0') + b++; + if (b == a) { + ERROR("Invalid parameter"); + return -EINVAL; + } + if (params[b] == ',') { + a = b + 1; + continue; + } + if (params[b] == '(') { + size_t start = b; + + b += closing_paren(¶ms[b]); + if (b == start) { + ERROR("Dangling parenthesis"); + return -EINVAL; + } + ret = (*cb)(dev, ¶ms[a], head); + if (ret) + return ret; + head += 1; + b += 1; + if (params[b] == '\0') + return 0; + } + a = b + 1; + } + return 0; +} + +static int +fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN]) +{ + char buffer[DEVARGS_MAXLEN] = {0}; + size_t a, b; + int i; + + a = 0; + i = 0; + while (params[a] != '\0') { + b = a; + while (params[b] != '(' && + params[b] != ',' && + params[b] != '\0') + b++; + if (b == a) { + ERROR("Invalid parameter"); + return -EINVAL; + } + if (params[b] == ',' || params[b] == '\0') { + size_t len = b - a; + + if (i > 0) + len += 1; + snprintf(&buffer[i], len + 1, "%s%s", + i ? "," : "", ¶ms[a]); + i += len; + } else if (params[b] == '(') { + size_t start = b; + + b += closing_paren(¶ms[b]); + if (b == start) + return -EINVAL; + b += 1; + if (params[b] == '\0') + goto out; + } + a = b + 1; + } +out: + strlcpy(params, buffer, DEVARGS_MAXLEN); + return 0; +} + +static int +fs_get_u64_arg(const char *key __rte_unused, + const char *value, void *out) +{ + uint64_t *u64 = out; + char *endptr = NULL; + + if ((value == NULL) || (out == NULL)) + return -EINVAL; + errno = 0; + *u64 = strtoull(value, &endptr, 0); + if (errno != 0) + return -errno; + if (endptr == value) + return -1; + return 0; +} + +static int +fs_get_mac_addr_arg(const char *key __rte_unused, + const char *value, void *out) +{ + struct rte_ether_addr *ea = out; + + if ((value == NULL) || (out == NULL)) + return -EINVAL; + + return rte_ether_unformat_addr(value, ea); +} + +int +failsafe_args_parse(struct rte_eth_dev *dev, const char *params) +{ + struct fs_priv *priv; + char mut_params[DEVARGS_MAXLEN] = ""; + struct rte_kvargs *kvlist = NULL; + unsigned int arg_count; + size_t n; + int ret; + + priv = PRIV(dev); + ret = 0; + priv->subs_tx = FAILSAFE_MAX_ETHPORTS; + /* default parameters */ + n = strlcpy(mut_params, params, sizeof(mut_params)); + if (n >= sizeof(mut_params)) { + ERROR("Parameter string too long (>=%zu)", + sizeof(mut_params)); + return -ENOMEM; + } + ret = fs_parse_sub_devices(fs_parse_device_param, + dev, params); + if (ret < 0) + return ret; + ret = fs_remove_sub_devices_definition(mut_params); + if (ret < 0) + return ret; + if (strnlen(mut_params, sizeof(mut_params)) > 0) { + kvlist = rte_kvargs_parse(mut_params, + pmd_failsafe_init_parameters); + if (kvlist == NULL) { + ERROR("Error parsing parameters, usage:\n" + PMD_FAILSAFE_PARAM_STRING); + return -1; + } + /* PLUG_IN event poll timer */ + arg_count = rte_kvargs_count(kvlist, + PMD_FAILSAFE_HOTPLUG_POLL_KVARG); + if (arg_count == 1) { + ret = rte_kvargs_process(kvlist, + PMD_FAILSAFE_HOTPLUG_POLL_KVARG, + &fs_get_u64_arg, &failsafe_hotplug_poll); + if (ret < 0) + goto free_kvlist; + } + /* MAC addr */ + arg_count = rte_kvargs_count(kvlist, + PMD_FAILSAFE_MAC_KVARG); + if (arg_count > 0) { + ret = rte_kvargs_process(kvlist, + PMD_FAILSAFE_MAC_KVARG, + &fs_get_mac_addr_arg, + &dev->data->mac_addrs[0]); + if (ret < 0) + goto free_kvlist; + + failsafe_mac_from_arg = 1; + } + } + PRIV(dev)->state = DEV_PARSED; +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +void +failsafe_args_free(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + + FOREACH_SUBDEV(sdev, i, dev) { + free(sdev->cmdline); + sdev->cmdline = NULL; + free(sdev->fd_str); + sdev->fd_str = NULL; + free(sdev->devargs.args); + sdev->devargs.args = NULL; + } +} + +static int +fs_count_device(struct rte_eth_dev *dev, const char *param, + uint8_t head __rte_unused) +{ + size_t b = 0; + + while (param[b] != '(' && + param[b] != '\0') + b++; + if (strncmp(param, "dev", b) != 0 && + strncmp(param, "exec", b) != 0 && + strncmp(param, "fd(", b) != 0) { + ERROR("Unrecognized device type: %.*s", (int)b, param); + return -EINVAL; + } + PRIV(dev)->subs_tail += 1; + return 0; +} + +int +failsafe_args_count_subdevice(struct rte_eth_dev *dev, + const char *params) +{ + return fs_parse_sub_devices(fs_count_device, + dev, params); +} + +static int +fs_parse_sub_device(struct sub_device *sdev) +{ + struct rte_devargs *da; + char devstr[DEVARGS_MAXLEN] = ""; + + da = &sdev->devargs; + snprintf(devstr, sizeof(devstr), "%s,%s", da->name, da->args); + return fs_parse_device(sdev, devstr); +} + +int +failsafe_args_parse_subs(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret = 0; + + FOREACH_SUBDEV(sdev, i, dev) { + if (sdev->state >= DEV_PARSED) + continue; + if (sdev->cmdline) + ret = fs_execute_cmd(sdev, sdev->cmdline); + else if (sdev->fd_str) + ret = fs_read_fd(sdev, sdev->fd_str); + else + ret = fs_parse_sub_device(sdev); + if (ret == 0) + sdev->state = DEV_PARSED; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c new file mode 100644 index 000000000..b9fc50867 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_eal.c @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include + +#include "failsafe_private.h" + +static int +fs_ethdev_portid_get(const char *name, uint16_t *port_id) +{ + uint16_t pid; + size_t len; + + if (name == NULL) { + DEBUG("Null pointer is specified\n"); + return -EINVAL; + } + len = strlen(name); + for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { + if (rte_eth_dev_is_valid_port(pid) && + !strncmp(name, rte_eth_devices[pid].device->name, len)) { + *port_id = pid; + return 0; + } + } + return -ENODEV; +} + +static int +fs_bus_init(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + struct rte_devargs *da; + uint8_t i; + uint16_t pid; + int ret; + + FOREACH_SUBDEV(sdev, i, dev) { + if (sdev->state != DEV_PARSED) + continue; + da = &sdev->devargs; + if (fs_ethdev_portid_get(da->name, &pid) != 0) { + struct rte_eth_dev_owner pid_owner; + + ret = rte_eal_hotplug_add(da->bus->name, + da->name, + da->args); + if (ret < 0) { + ERROR("sub_device %d probe failed %s%s%s", i, + rte_errno ? "(" : "", + rte_errno ? strerror(rte_errno) : "", + rte_errno ? ")" : ""); + continue; + } + if (fs_ethdev_portid_get(da->name, &pid) != 0) { + ERROR("sub_device %d init went wrong", i); + return -ENODEV; + } + /* + * The NEW callback tried to take ownership, check + * whether it succeed or didn't. + */ + rte_eth_dev_owner_get(pid, &pid_owner); + if (pid_owner.id != PRIV(dev)->my_owner.id) { + INFO("sub_device %d owner(%s_%016"PRIX64") is not my," + " owner(%s_%016"PRIX64"), will try again later", + i, pid_owner.name, pid_owner.id, + PRIV(dev)->my_owner.name, + PRIV(dev)->my_owner.id); + continue; + } + } else { + /* The sub-device port was found. */ + char devstr[DEVARGS_MAXLEN] = ""; + struct rte_devargs *probed_da = + rte_eth_devices[pid].device->devargs; + + /* Take control of probed device. */ + free(da->args); + memset(da, 0, sizeof(*da)); + if (probed_da != NULL) + snprintf(devstr, sizeof(devstr), "%s,%s", + probed_da->name, probed_da->args); + else + strlcpy(devstr, + rte_eth_devices[pid].device->name, + sizeof(devstr)); + ret = rte_devargs_parse(da, devstr); + if (ret) { + ERROR("Probed devargs parsing failed with code" + " %d", ret); + return ret; + } + INFO("Taking control of a probed sub device" + " %d named %s", i, da->name); + ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner); + if (ret < 0) { + INFO("sub_device %d owner set failed (%s), " + "will try again later", i, strerror(-ret)); + continue; + } else if (strncmp(rte_eth_devices[pid].device->name, + da->name, strlen(da->name)) != 0) { + /* + * The device probably was removed and its port + * id was reallocated before ownership set. + */ + rte_eth_dev_owner_unset(pid, + PRIV(dev)->my_owner.id); + INFO("sub_device %d was removed before taking" + " ownership, will try again later", i); + continue; + } + } + sdev->sdev_port_id = pid; + SUB_ID(sdev) = i; + sdev->fs_port_id = dev->data->port_id; + sdev->dev = ETH(sdev)->device; + sdev->state = DEV_PROBED; + } + return 0; +} + +int +failsafe_eal_init(struct rte_eth_dev *dev) +{ + int ret; + + ret = fs_bus_init(dev); + if (ret) + return ret; + if (PRIV(dev)->state < DEV_PROBED) + PRIV(dev)->state = DEV_PROBED; + fs_switch_dev(dev, NULL); + return 0; +} + +static int +fs_bus_uninit(struct rte_eth_dev *dev) +{ + struct sub_device *sdev = NULL; + uint8_t i; + int sdev_ret; + int ret = 0; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + sdev_ret = rte_dev_remove(sdev->dev); + if (sdev_ret < 0) { + ERROR("Failed to remove requested device %s (err: %d)", + sdev->dev->name, sdev_ret); + continue; + } + sdev->state = DEV_PROBED - 1; + } + return ret; +} + +int +failsafe_eal_uninit(struct rte_eth_dev *dev) +{ + int ret; + + ret = fs_bus_uninit(dev); + PRIV(dev)->state = DEV_PROBED - 1; + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c new file mode 100644 index 000000000..2b748bd8b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ether.c @@ -0,0 +1,638 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include + +#include +#include +#include + +#include "failsafe_private.h" + +/** Print a message out of a flow error. */ +static int +fs_flow_complain(struct rte_flow_error *error) +{ + static const char *const errstrlist[] = { + [RTE_FLOW_ERROR_TYPE_NONE] = "no error", + [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", + [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", + [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", + [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", + [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", + [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", + [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", + [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", + [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", + [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", + [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", + }; + const char *errstr; + char buf[32]; + int err = rte_errno; + + if ((unsigned int)error->type >= RTE_DIM(errstrlist) || + !errstrlist[error->type]) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; + ERROR("Caught error type %d (%s): %s%s\n", + error->type, errstr, + error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", + error->cause), buf) : "", + error->message ? error->message : "(no stated reason)"); + return -err; +} + +static int +eth_dev_flow_isolate_set(struct rte_eth_dev *dev, + struct sub_device *sdev) +{ + struct rte_flow_error ferror; + int ret; + + if (!PRIV(dev)->flow_isolated) { + DEBUG("Flow isolation already disabled"); + } else { + DEBUG("Enabling flow isolation"); + ret = rte_flow_isolate(PORT_ID(sdev), + PRIV(dev)->flow_isolated, + &ferror); + if (ret) { + fs_flow_complain(&ferror); + return ret; + } + } + return 0; +} + +static int +fs_eth_dev_conf_apply(struct rte_eth_dev *dev, + struct sub_device *sdev) +{ + struct rte_eth_dev *edev; + struct rte_vlan_filter_conf *vfc1; + struct rte_vlan_filter_conf *vfc2; + struct rte_flow *flow; + struct rte_flow_error ferror; + uint32_t i; + int ret; + + edev = ETH(sdev); + /* RX queue setup */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct rxq *rxq; + + rxq = dev->data->rx_queues[i]; + ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i, + rxq->info.nb_desc, rxq->socket_id, + &rxq->info.conf, rxq->info.mp); + if (ret) { + ERROR("rx_queue_setup failed"); + return ret; + } + } + /* TX queue setup */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct txq *txq; + + txq = dev->data->tx_queues[i]; + ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i, + txq->info.nb_desc, txq->socket_id, + &txq->info.conf); + if (ret) { + ERROR("tx_queue_setup failed"); + return ret; + } + } + /* dev_link.link_status */ + if (dev->data->dev_link.link_status != + edev->data->dev_link.link_status) { + DEBUG("Configuring link_status"); + if (dev->data->dev_link.link_status) + ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); + else + ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); + if (ret) { + ERROR("Failed to apply link_status"); + return ret; + } + } else { + DEBUG("link_status already set"); + } + /* promiscuous */ + if (dev->data->promiscuous != edev->data->promiscuous) { + DEBUG("Configuring promiscuous"); + if (dev->data->promiscuous) + ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); + else + ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply promiscuous mode"); + return ret; + } + } else { + DEBUG("promiscuous already set"); + } + /* all_multicast */ + if (dev->data->all_multicast != edev->data->all_multicast) { + DEBUG("Configuring all_multicast"); + if (dev->data->all_multicast) + ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); + else + ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); + if (ret != 0) { + ERROR("Failed to apply allmulticast mode"); + return ret; + } + } else { + DEBUG("all_multicast already set"); + } + /* MTU */ + if (dev->data->mtu != edev->data->mtu) { + DEBUG("Configuring MTU"); + ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu); + if (ret) { + ERROR("Failed to apply MTU"); + return ret; + } + } else { + DEBUG("MTU already set"); + } + /* default MAC */ + DEBUG("Configuring default MAC address"); + ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), + &dev->data->mac_addrs[0]); + if (ret) { + ERROR("Setting default MAC address failed"); + return ret; + } + /* additional MAC */ + if (PRIV(dev)->nb_mac_addr > 1) + DEBUG("Configure additional MAC address%s", + (PRIV(dev)->nb_mac_addr > 2 ? "es" : "")); + for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) { + struct rte_ether_addr *ea; + + ea = &dev->data->mac_addrs[i]; + ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea, + PRIV(dev)->mac_addr_pool[i]); + if (ret) { + char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(ea_fmt, + RTE_ETHER_ADDR_FMT_SIZE, ea); + ERROR("Adding MAC address %s failed", ea_fmt); + return ret; + } + } + /* + * Propagate multicast MAC addresses to sub-devices, + * if non zero number of addresses is set. + * The condition is required to avoid breakage of failsafe + * for sub-devices which do not support the operation + * if the feature is really not used. + */ + if (PRIV(dev)->nb_mcast_addr > 0) { + DEBUG("Configuring multicast MAC addresses"); + ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), + PRIV(dev)->mcast_addrs, + PRIV(dev)->nb_mcast_addr); + if (ret) { + ERROR("Failed to apply multicast MAC addresses"); + return ret; + } + } + /* VLAN filter */ + vfc1 = &dev->data->vlan_filter_conf; + vfc2 = &edev->data->vlan_filter_conf; + if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) { + uint64_t vbit; + uint64_t ids; + size_t i; + uint16_t vlan_id; + + DEBUG("Configuring VLAN filter"); + for (i = 0; i < RTE_DIM(vfc1->ids); i++) { + if (vfc1->ids[i] == 0) + continue; + ids = vfc1->ids[i]; + while (ids) { + vlan_id = 64 * i; + /* count trailing zeroes */ + vbit = ~ids & (ids - 1); + /* clear least significant bit set */ + ids ^= (ids ^ (ids - 1)) ^ vbit; + for (; vbit; vlan_id++) + vbit >>= 1; + ret = rte_eth_dev_vlan_filter( + PORT_ID(sdev), vlan_id, 1); + if (ret) { + ERROR("Failed to apply VLAN filter %hu", + vlan_id); + return ret; + } + } + } + } else { + DEBUG("VLAN filter already set"); + } + /* rte_flow */ + if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) { + DEBUG("rte_flow already set"); + } else { + DEBUG("Resetting rte_flow configuration"); + ret = rte_flow_flush(PORT_ID(sdev), &ferror); + if (ret) { + fs_flow_complain(&ferror); + return ret; + } + i = 0; + rte_errno = 0; + DEBUG("Configuring rte_flow"); + TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) { + DEBUG("Creating flow #%" PRIu32, i++); + flow->flows[SUB_ID(sdev)] = + rte_flow_create(PORT_ID(sdev), + flow->rule.attr, + flow->rule.pattern, + flow->rule.actions, + &ferror); + ret = rte_errno; + if (ret) + break; + } + if (ret) { + fs_flow_complain(&ferror); + return ret; + } + } + return 0; +} + +static void +fs_dev_remove(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + switch (sdev->state) { + case DEV_STARTED: + failsafe_rx_intr_uninstall_subdevice(sdev); + rte_eth_dev_stop(PORT_ID(sdev)); + sdev->state = DEV_ACTIVE; + /* fallthrough */ + case DEV_ACTIVE: + failsafe_eth_dev_unregister_callbacks(sdev); + rte_eth_dev_close(PORT_ID(sdev)); + sdev->state = DEV_PROBED; + /* fallthrough */ + case DEV_PROBED: + ret = rte_dev_remove(sdev->dev); + if (ret < 0) { + ERROR("Bus detach failed for sub_device %u", + SUB_ID(sdev)); + } else { + rte_eth_dev_release_port(ETH(sdev)); + } + sdev->state = DEV_PARSED; + /* fallthrough */ + case DEV_PARSED: + case DEV_UNDEFINED: + sdev->state = DEV_UNDEFINED; + sdev->sdev_port_id = RTE_MAX_ETHPORTS; + /* the end */ + break; + } + sdev->remove = 0; + failsafe_hotplug_alarm_install(fs_dev(sdev)); +} + +static void +fs_dev_stats_save(struct sub_device *sdev) +{ + struct rte_eth_stats stats; + int err; + + /* Attempt to read current stats. */ + err = rte_eth_stats_get(PORT_ID(sdev), &stats); + if (err) { + uint64_t timestamp = sdev->stats_snapshot.timestamp; + + WARN("Could not access latest statistics from sub-device %d.\n", + SUB_ID(sdev)); + if (timestamp != 0) + WARN("Using latest snapshot taken before %"PRIu64" seconds.\n", + (rte_rdtsc() - timestamp) / rte_get_tsc_hz()); + } + failsafe_stats_increment + (&PRIV(fs_dev(sdev))->stats_accumulator, + err ? &sdev->stats_snapshot.stats : &stats); + memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot)); +} + +static inline int +fs_rxtx_clean(struct sub_device *sdev) +{ + uint16_t i; + + for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++) + if (FS_ATOMIC_RX(sdev, i)) + return 0; + for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++) + if (FS_ATOMIC_TX(sdev, i)) + return 0; + return 1; +} + +void +failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + if (sdev->rmv_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to unregister RMV callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->rmv_callback = 0; + } + if (sdev->lsc_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + sdev); + if (ret) + WARN("Failed to unregister LSC callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->lsc_callback = 0; + } +} + +void +failsafe_dev_remove(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) + if (sdev->remove && fs_rxtx_clean(sdev)) { + if (fs_lock(dev, 1) != 0) + return; + fs_dev_stats_save(sdev); + fs_dev_remove(sdev); + fs_unlock(dev, 1); + } +} + +static int +failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev) +{ + struct rxq *rxq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq->info.conf.rx_deferred_start && + dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Rx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Rx + * queue has been started earlier. + */ + ret = dev->dev_ops->rx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } + } else if (dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Rx queue needs to be stopped manually + * in case an appropriate failsafe Rx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->rx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Rx queue %d", i); + return ret; + } + } + } + return 0; +} + +static int +failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev) +{ + struct txq *txq; + int ret; + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq->info.conf.tx_deferred_start && + dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STARTED) { + /* + * The subdevice Tx queue does not launch on device + * start if deferred start flag is set. It needs to be + * started manually in case an appropriate failsafe Tx + * queue has been started earlier. + */ + ret = dev->dev_ops->tx_queue_start(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } else if (dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_STOPPED) { + /* + * The subdevice Tx queue needs to be stopped manually + * in case an appropriate failsafe Tx queue has been + * stopped earlier. + */ + ret = dev->dev_ops->tx_queue_stop(dev, i); + if (ret) { + ERROR("Could not synchronize Tx queue %d", i); + return ret; + } + } + } + return 0; +} + +int +failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint32_t inactive; + int ret; + uint8_t i; + + if (PRIV(dev)->state < DEV_PARSED) + return 0; + + ret = failsafe_args_parse_subs(dev); + if (ret) + goto err_remove; + + if (PRIV(dev)->state < DEV_PROBED) + return 0; + ret = failsafe_eal_init(dev); + if (ret) + goto err_remove; + if (PRIV(dev)->state < DEV_ACTIVE) + return 0; + inactive = 0; + FOREACH_SUBDEV(sdev, i, dev) { + if (sdev->state == DEV_PROBED) { + inactive |= UINT32_C(1) << i; + ret = eth_dev_flow_isolate_set(dev, sdev); + if (ret) { + ERROR("Could not apply configuration to sub_device %d", + i); + goto err_remove; + } + } + } + ret = dev->dev_ops->dev_configure(dev); + if (ret) + goto err_remove; + FOREACH_SUBDEV(sdev, i, dev) { + if (inactive & (UINT32_C(1) << i)) { + ret = fs_eth_dev_conf_apply(dev, sdev); + if (ret) { + ERROR("Could not apply configuration to sub_device %d", + i); + goto err_remove; + } + } + } + /* + * If new devices have been configured, check if + * the link state has changed. + */ + if (inactive) + dev->dev_ops->link_update(dev, 1); + if (PRIV(dev)->state < DEV_STARTED) + return 0; + ret = dev->dev_ops->dev_start(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_rx_queues_sync(dev); + if (ret) + goto err_remove; + ret = failsafe_eth_dev_tx_queues_sync(dev); + if (ret) + goto err_remove; + return 0; +err_remove: + FOREACH_SUBDEV(sdev, i, dev) + if (sdev->state != PRIV(dev)->state) + sdev->remove = 1; + return ret; +} + +void +failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from) +{ + uint32_t i; + + RTE_ASSERT(to != NULL && from != NULL); + to->ipackets += from->ipackets; + to->opackets += from->opackets; + to->ibytes += from->ibytes; + to->obytes += from->obytes; + to->imissed += from->imissed; + to->ierrors += from->ierrors; + to->oerrors += from->oerrors; + to->rx_nombuf += from->rx_nombuf; + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + to->q_ipackets[i] += from->q_ipackets[i]; + to->q_opackets[i] += from->q_opackets[i]; + to->q_ibytes[i] += from->q_ibytes[i]; + to->q_obytes[i] += from->q_obytes[i]; + to->q_errors[i] += from->q_errors[i]; + } +} + +int +failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct sub_device *sdev = cb_arg; + + fs_lock(fs_dev(sdev), 0); + /* Switch as soon as possible tx_dev. */ + fs_switch_dev(fs_dev(sdev), sdev); + /* Use safe bursts in any case. */ + failsafe_set_burst_fn(fs_dev(sdev), 1); + /* + * Async removal, the sub-PMD will try to unregister + * the callback at the source of the current thread context. + */ + sdev->remove = 1; + fs_unlock(fs_dev(sdev), 0); + return 0; +} + +int +failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *dev = cb_arg; + int ret; + + ret = dev->dev_ops->link_update(dev, 0); + /* We must pass on the LSC event */ + if (ret) + return _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + else + return 0; +} + +/* Take sub-device ownership before it becomes exposed to the application. */ +int +failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *fs_dev = cb_arg; + struct sub_device *sdev; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { + if (sdev->state >= DEV_PROBED) + continue; + if (dev->device == NULL) { + WARN("Trying to probe malformed device %s.\n", + sdev->devargs.name); + continue; + } + if (strcmp(sdev->devargs.name, dev->device->name) != 0) + continue; + rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); + /* The actual owner will be checked after the port probing. */ + break; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c new file mode 100644 index 000000000..5e2b5f7c6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_flow.c @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "failsafe_private.h" + +static struct rte_flow * +fs_flow_allocate(const struct rte_flow_attr *attr, + const struct rte_flow_item *items, + const struct rte_flow_action *actions) +{ + struct rte_flow *flow; + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = items, + .actions_ro = actions, + }; + struct rte_flow_error error; + int ret; + + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error); + if (ret < 0) { + ERROR("Unable to process flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + return NULL; + } + flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret, + RTE_CACHE_LINE_SIZE); + if (flow == NULL) { + ERROR("Could not allocate new flow"); + return NULL; + } + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, + &error); + if (ret < 0) { + ERROR("Failed to copy flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + rte_free(flow); + return NULL; + } + return flow; +} + +static void +fs_flow_release(struct rte_flow **flow) +{ + rte_free(*flow); + *flow = NULL; +} + +static int +fs_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_flow_validate on sub_device %d", i); + ret = rte_flow_validate(PORT_ID(sdev), + attr, patterns, actions, error); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_flow_validate failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static struct rte_flow * +fs_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct sub_device *sdev; + struct rte_flow *flow; + uint8_t i; + + fs_lock(dev, 0); + flow = fs_flow_allocate(attr, patterns, actions); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + flow->flows[i] = rte_flow_create(PORT_ID(sdev), + attr, patterns, actions, error); + if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) { + ERROR("Failed to create flow on sub_device %d", + i); + goto err; + } + } + TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next); + fs_unlock(dev, 0); + return flow; +err: + FOREACH_SUBDEV(sdev, i, dev) { + if (flow->flows[i] != NULL) + rte_flow_destroy(PORT_ID(sdev), + flow->flows[i], error); + } + fs_flow_release(&flow); + fs_unlock(dev, 0); + return NULL; +} + +static int +fs_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + if (flow == NULL) { + ERROR("Invalid flow"); + return -EINVAL; + } + ret = 0; + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + int local_ret; + + if (flow->flows[i] == NULL) + continue; + local_ret = rte_flow_destroy(PORT_ID(sdev), + flow->flows[i], error); + if ((local_ret = fs_err(sdev, local_ret))) { + ERROR("Failed to destroy flow on sub_device %d: %d", + i, local_ret); + if (ret == 0) + ret = local_ret; + } + } + TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); + fs_flow_release(&flow); + fs_unlock(dev, 0); + return ret; +} + +static int +fs_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct sub_device *sdev; + struct rte_flow *flow; + void *tmp; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_flow_flush on sub_device %d", i); + ret = rte_flow_flush(PORT_ID(sdev), error); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_flow_flush failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) { + TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); + fs_flow_release(&flow); + } + fs_unlock(dev, 0); + return 0; +} + +static int +fs_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *action, + void *arg, + struct rte_flow_error *error) +{ + struct sub_device *sdev; + + fs_lock(dev, 0); + sdev = TX_SUBDEV(dev); + if (sdev != NULL) { + int ret = rte_flow_query(PORT_ID(sdev), + flow->flows[SUB_ID(sdev)], + action, arg, error); + + if ((ret = fs_err(sdev, ret))) { + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + WARN("No active sub_device to query about its flow"); + return -1; +} + +static int +fs_flow_isolate(struct rte_eth_dev *dev, + int set, + struct rte_flow_error *error) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV(sdev, i, dev) { + if (sdev->state < DEV_PROBED) + continue; + DEBUG("Calling rte_flow_isolate on sub_device %d", i); + if (PRIV(dev)->flow_isolated != sdev->flow_isolated) + WARN("flow isolation mode of sub_device %d in incoherent state.", + i); + ret = rte_flow_isolate(PORT_ID(sdev), set, error); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_flow_isolate failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + sdev->flow_isolated = set; + } + PRIV(dev)->flow_isolated = set; + fs_unlock(dev, 0); + return 0; +} + +const struct rte_flow_ops fs_flow_ops = { + .validate = fs_flow_validate, + .create = fs_flow_create, + .destroy = fs_flow_destroy, + .flush = fs_flow_flush, + .query = fs_flow_query, + .isolate = fs_flow_isolate, +}; diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c new file mode 100644 index 000000000..602c04033 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_intr.c @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +/** + * @file + * Interrupts handling for failsafe driver. + */ + +#if defined(LINUX) +#include +#endif +#include + +#include +#include +#include +#include +#include +#include + +#include "failsafe_private.h" + +#define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID) + + +/** + * Open an epoll file descriptor. + * + * @param flags + * Flags for defining epoll behavior. + * @return + * 0 on success, negative errno value otherwise. + */ +static int +fs_epoll_create1(int flags) +{ +#if defined(LINUX) + return epoll_create1(flags); +#elif defined(BSD) + RTE_SET_USED(flags); + return -ENOTSUP; +#endif +} + +/** + * Install failsafe Rx event proxy service. + * The Rx event proxy is the service that listens to Rx events from the + * subdevices and triggers failsafe Rx events accordingly. + * + * @param priv + * Pointer to failsafe private structure. + * @return + * 0 on success, negative errno value otherwise. + */ +static int +fs_rx_event_proxy_routine(void *data) +{ + struct fs_priv *priv; + struct rxq *rxq; + struct rte_epoll_event *events; + uint64_t u64; + int i, n; + int rc = 0; + + u64 = 1; + priv = data; + events = priv->rxp.evec; + n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1); + for (i = 0; i < n; i++) { + rxq = events[i].epdata.data; + if (rxq->enable_events && rxq->event_fd != -1) { + if (write(rxq->event_fd, &u64, sizeof(u64)) != + sizeof(u64)) { + ERROR("Failed to proxy Rx event to socket %d", + rxq->event_fd); + rc = -EIO; + } + } + } + return rc; +} + +/** + * Uninstall failsafe Rx event proxy service. + * + * @param priv + * Pointer to failsafe private structure. + */ +static void +fs_rx_event_proxy_service_uninstall(struct fs_priv *priv) +{ + /* Unregister the event service. */ + switch (priv->rxp.sstate) { + case SS_RUNNING: + rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0); + /* fall through */ + case SS_READY: + rte_service_runstate_set(priv->rxp.sid, 0); + rte_service_set_stats_enable(priv->rxp.sid, 0); + rte_service_component_runstate_set(priv->rxp.sid, 0); + /* fall through */ + case SS_REGISTERED: + rte_service_component_unregister(priv->rxp.sid); + /* fall through */ + default: + break; + } +} + +/** + * Install the failsafe Rx event proxy service. + * + * @param priv + * Pointer to failsafe private structure. + * @return + * 0 on success, negative errno value otherwise. + */ +static int +fs_rx_event_proxy_service_install(struct fs_priv *priv) +{ + struct rte_service_spec service; + int32_t num_service_cores; + int ret = 0; + + num_service_cores = rte_service_lcore_count(); + if (num_service_cores <= 0) { + ERROR("Failed to install Rx interrupts, " + "no service core found"); + return -ENOTSUP; + } + /* prepare service info */ + memset(&service, 0, sizeof(struct rte_service_spec)); + snprintf(service.name, sizeof(service.name), "%s_Rx_service", + priv->data->name); + service.socket_id = priv->data->numa_node; + service.callback = fs_rx_event_proxy_routine; + service.callback_userdata = priv; + + if (priv->rxp.sstate == SS_NO_SERVICE) { + uint32_t service_core_list[num_service_cores]; + + /* get a service core to work with */ + ret = rte_service_lcore_list(service_core_list, + num_service_cores); + if (ret <= 0) { + ERROR("Failed to install Rx interrupts, " + "service core list empty or corrupted"); + return -ENOTSUP; + } + priv->rxp.scid = service_core_list[0]; + ret = rte_service_lcore_add(priv->rxp.scid); + if (ret && ret != -EALREADY) { + ERROR("Failed adding service core"); + return ret; + } + /* service core may be in "stopped" state, start it */ + ret = rte_service_lcore_start(priv->rxp.scid); + if (ret && (ret != -EALREADY)) { + ERROR("Failed to install Rx interrupts, " + "service core not started"); + return ret; + } + /* register our service */ + int32_t ret = rte_service_component_register(&service, + &priv->rxp.sid); + if (ret) { + ERROR("service register() failed"); + return -ENOEXEC; + } + priv->rxp.sstate = SS_REGISTERED; + /* run the service */ + ret = rte_service_component_runstate_set(priv->rxp.sid, 1); + if (ret < 0) { + ERROR("Failed Setting component runstate\n"); + return ret; + } + ret = rte_service_set_stats_enable(priv->rxp.sid, 1); + if (ret < 0) { + ERROR("Failed enabling stats\n"); + return ret; + } + ret = rte_service_runstate_set(priv->rxp.sid, 1); + if (ret < 0) { + ERROR("Failed to run service\n"); + return ret; + } + priv->rxp.sstate = SS_READY; + /* map the service with the service core */ + ret = rte_service_map_lcore_set(priv->rxp.sid, + priv->rxp.scid, 1); + if (ret) { + ERROR("Failed to install Rx interrupts, " + "could not map service core"); + return ret; + } + priv->rxp.sstate = SS_RUNNING; + } + return 0; +} + +/** + * Install failsafe Rx event proxy subsystem. + * This is the way the failsafe PMD generates Rx events on behalf of its + * subdevices. + * + * @param priv + * Pointer to failsafe private structure. + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +fs_rx_event_proxy_install(struct fs_priv *priv) +{ + int rc = 0; + + /* + * Create the epoll fd and event vector for the proxy service to + * wait on for Rx events generated by the subdevices. + */ + priv->rxp.efd = fs_epoll_create1(0); + if (priv->rxp.efd < 0) { + rte_errno = errno; + ERROR("Failed to create epoll," + " Rx interrupts will not be supported"); + return -rte_errno; + } + priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec)); + if (priv->rxp.evec == NULL) { + ERROR("Failed to allocate memory for event vectors," + " Rx interrupts will not be supported"); + rc = -ENOMEM; + goto error; + } + rc = fs_rx_event_proxy_service_install(priv); + if (rc < 0) + goto error; + return 0; +error: + if (priv->rxp.efd >= 0) { + close(priv->rxp.efd); + priv->rxp.efd = -1; + } + if (priv->rxp.evec != NULL) { + free(priv->rxp.evec); + priv->rxp.evec = NULL; + } + rte_errno = -rc; + return rc; +} + +/** + * RX Interrupt control per subdevice. + * + * @param sdev + * Pointer to sub-device structure. + * @param op + * The operation be performed for the vector. + * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +failsafe_eth_rx_intr_ctl_subdevice(struct sub_device *sdev, int op) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev *fsdev; + int epfd; + uint16_t pid; + uint16_t qid; + struct rxq *fsrxq; + int rc; + int ret = 0; + + fsdev = fs_dev(sdev); + if (sdev == NULL || (ETH(sdev) == NULL) || + fsdev == NULL || (PRIV(fsdev) == NULL)) { + ERROR("Called with invalid arguments"); + return -EINVAL; + } + dev = ETH(sdev); + epfd = PRIV(fsdev)->rxp.efd; + pid = PORT_ID(sdev); + + if (epfd <= 0) { + if (op == RTE_INTR_EVENT_ADD) { + ERROR("Proxy events are not initialized"); + return -EBADF; + } else { + return 0; + } + } + if (dev->data->nb_rx_queues > fsdev->data->nb_rx_queues) { + ERROR("subdevice has too many queues," + " Interrupts will not be enabled"); + return -E2BIG; + } + for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { + fsrxq = fsdev->data->rx_queues[qid]; + rc = rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd, + op, (void *)fsrxq); + if (rc) { + ERROR("rte_eth_dev_rx_intr_ctl_q failed for " + "port %d queue %d, epfd %d, error %d", + pid, qid, epfd, rc); + ret = rc; + } + } + return ret; +} + +/** + * Install Rx interrupts subsystem for a subdevice. + * This is a support for dynamically adding subdevices. + * + * @param sdev + * Pointer to subdevice structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int failsafe_rx_intr_install_subdevice(struct sub_device *sdev) +{ + int rc; + int qid; + struct rte_eth_dev *fsdev; + struct rxq **rxq; + const struct rte_intr_conf *const intr_conf = + Ð(sdev)->data->dev_conf.intr_conf; + + fsdev = fs_dev(sdev); + rxq = (struct rxq **)fsdev->data->rx_queues; + if (intr_conf->rxq == 0) + return 0; + rc = failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_ADD); + if (rc) + return rc; + /* enable interrupts on already-enabled queues */ + for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { + if (rxq[qid]->enable_events) { + int ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), + qid); + if (ret && (ret != -ENOTSUP)) { + ERROR("Failed to enable interrupts on " + "port %d queue %d", PORT_ID(sdev), qid); + rc = ret; + } + } + } + return rc; +} + +/** + * Uninstall Rx interrupts subsystem for a subdevice. + * This is a support for dynamically removing subdevices. + * + * @param sdev + * Pointer to subdevice structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev) +{ + int qid; + struct rte_eth_dev *fsdev; + struct rxq *fsrxq; + + fsdev = fs_dev(sdev); + for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { + if (qid < fsdev->data->nb_rx_queues) { + fsrxq = fsdev->data->rx_queues[qid]; + if (fsrxq != NULL && fsrxq->enable_events) + rte_eth_dev_rx_intr_disable(PORT_ID(sdev), + qid); + } + } + failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_DEL); +} + +/** + * Uninstall failsafe Rx event proxy. + * + * @param priv + * Pointer to failsafe private structure. + */ +static void +fs_rx_event_proxy_uninstall(struct fs_priv *priv) +{ + fs_rx_event_proxy_service_uninstall(priv); + if (priv->rxp.evec != NULL) { + free(priv->rxp.evec); + priv->rxp.evec = NULL; + } + if (priv->rxp.efd >= 0) { + close(priv->rxp.efd); + priv->rxp.efd = -1; + } +} + +/** + * Uninstall failsafe interrupt vector. + * + * @param priv + * Pointer to failsafe private structure. + */ +static void +fs_rx_intr_vec_uninstall(struct fs_priv *priv) +{ + struct rte_intr_handle *intr_handle; + + intr_handle = &priv->intr_handle; + if (intr_handle->intr_vec != NULL) { + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + intr_handle->nb_efd = 0; +} + +/** + * Installs failsafe interrupt vector to be registered with EAL later on. + * + * @param priv + * Pointer to failsafe private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +fs_rx_intr_vec_install(struct fs_priv *priv) +{ + unsigned int i; + unsigned int rxqs_n; + unsigned int n; + unsigned int count; + struct rte_intr_handle *intr_handle; + + rxqs_n = priv->data->nb_rx_queues; + n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + count = 0; + intr_handle = &priv->intr_handle; + RTE_ASSERT(intr_handle->intr_vec == NULL); + /* Allocate the interrupt vector of the failsafe Rx proxy interrupts */ + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); + if (intr_handle->intr_vec == NULL) { + fs_rx_intr_vec_uninstall(priv); + rte_errno = ENOMEM; + ERROR("Failed to allocate memory for interrupt vector," + " Rx interrupts will not be supported"); + return -rte_errno; + } + for (i = 0; i < n; i++) { + struct rxq *rxq = priv->data->rx_queues[i]; + + /* Skip queues that cannot request interrupts. */ + if (rxq == NULL || rxq->event_fd < 0) { + /* Use invalid intr_vec[] index to disable entry. */ + intr_handle->intr_vec[i] = + RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID; + continue; + } + if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { + rte_errno = E2BIG; + ERROR("Too many Rx queues for interrupt vector size" + " (%d), Rx interrupts cannot be enabled", + RTE_MAX_RXTX_INTR_VEC_ID); + fs_rx_intr_vec_uninstall(priv); + return -rte_errno; + } + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; + intr_handle->efds[count] = rxq->event_fd; + count++; + } + if (count == 0) { + fs_rx_intr_vec_uninstall(priv); + } else { + intr_handle->nb_efd = count; + intr_handle->efd_counter_size = sizeof(uint64_t); + } + return 0; +} + + +/** + * Uninstall failsafe Rx interrupts subsystem. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +void +failsafe_rx_intr_uninstall(struct rte_eth_dev *dev) +{ + struct fs_priv *priv; + struct rte_intr_handle *intr_handle; + + priv = PRIV(dev); + intr_handle = &priv->intr_handle; + rte_intr_free_epoll_fd(intr_handle); + fs_rx_event_proxy_uninstall(priv); + fs_rx_intr_vec_uninstall(priv); + dev->intr_handle = NULL; +} + +/** + * Install failsafe Rx interrupts subsystem. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +failsafe_rx_intr_install(struct rte_eth_dev *dev) +{ + struct fs_priv *priv = PRIV(dev); + const struct rte_intr_conf *const intr_conf = + &priv->data->dev_conf.intr_conf; + + if (intr_conf->rxq == 0 || dev->intr_handle != NULL) + return 0; + if (fs_rx_intr_vec_install(priv) < 0) + return -rte_errno; + if (fs_rx_event_proxy_install(priv) < 0) { + fs_rx_intr_vec_uninstall(priv); + return -rte_errno; + } + dev->intr_handle = &priv->intr_handle; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c new file mode 100644 index 000000000..e1d08e46c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_ops.c @@ -0,0 +1,1511 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "failsafe_private.h" + +static int +fs_dev_configure(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV(sdev, i, dev) { + int rmv_interrupt = 0; + int lsc_interrupt = 0; + int lsc_enabled; + + if (sdev->state != DEV_PROBED && + !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) + continue; + + rmv_interrupt = ETH(sdev)->data->dev_flags & + RTE_ETH_DEV_INTR_RMV; + if (rmv_interrupt) { + DEBUG("Enabling RMV interrupts for sub_device %d", i); + dev->data->dev_conf.intr_conf.rmv = 1; + } else { + DEBUG("sub_device %d does not support RMV event", i); + } + lsc_enabled = dev->data->dev_conf.intr_conf.lsc; + lsc_interrupt = lsc_enabled && + (ETH(sdev)->data->dev_flags & + RTE_ETH_DEV_INTR_LSC); + if (lsc_interrupt) { + DEBUG("Enabling LSC interrupts for sub_device %d", i); + dev->data->dev_conf.intr_conf.lsc = 1; + } else if (lsc_enabled && !lsc_interrupt) { + DEBUG("Disabling LSC interrupts for sub_device %d", i); + dev->data->dev_conf.intr_conf.lsc = 0; + } + DEBUG("Configuring sub-device %d", i); + ret = rte_eth_dev_configure(PORT_ID(sdev), + dev->data->nb_rx_queues, + dev->data->nb_tx_queues, + &dev->data->dev_conf); + if (ret) { + if (!fs_err(sdev, ret)) + continue; + ERROR("Could not configure sub_device %d", i); + fs_unlock(dev, 0); + return ret; + } + if (rmv_interrupt && sdev->rmv_callback == 0) { + ret = rte_eth_dev_callback_register(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to register RMV callback for sub_device %d", + SUB_ID(sdev)); + else + sdev->rmv_callback = 1; + } + dev->data->dev_conf.intr_conf.rmv = 0; + if (lsc_interrupt && sdev->lsc_callback == 0) { + ret = rte_eth_dev_callback_register(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + dev); + if (ret) + WARN("Failed to register LSC callback for sub_device %d", + SUB_ID(sdev)); + else + sdev->lsc_callback = 1; + } + dev->data->dev_conf.intr_conf.lsc = lsc_enabled; + sdev->state = DEV_ACTIVE; + } + if (PRIV(dev)->state < DEV_ACTIVE) + PRIV(dev)->state = DEV_ACTIVE; + fs_unlock(dev, 0); + return 0; +} + +static void +fs_set_queues_state_start(struct rte_eth_dev *dev) +{ + struct rxq *rxq; + struct txq *txq; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL && !rxq->info.conf.rx_deferred_start) + dev->data->rx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL && !txq->info.conf.tx_deferred_start) + dev->data->tx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STARTED; + } +} + +static int +fs_dev_start(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + ret = failsafe_rx_intr_install(dev); + if (ret) { + fs_unlock(dev, 0); + return ret; + } + FOREACH_SUBDEV(sdev, i, dev) { + if (sdev->state != DEV_ACTIVE) + continue; + DEBUG("Starting sub_device %d", i); + ret = rte_eth_dev_start(PORT_ID(sdev)); + if (ret) { + if (!fs_err(sdev, ret)) + continue; + fs_unlock(dev, 0); + return ret; + } + ret = failsafe_rx_intr_install_subdevice(sdev); + if (ret) { + if (!fs_err(sdev, ret)) + continue; + rte_eth_dev_stop(PORT_ID(sdev)); + fs_unlock(dev, 0); + return ret; + } + sdev->state = DEV_STARTED; + } + if (PRIV(dev)->state < DEV_STARTED) { + PRIV(dev)->state = DEV_STARTED; + fs_set_queues_state_start(dev); + } + fs_switch_dev(dev, NULL); + fs_unlock(dev, 0); + return 0; +} + +static void +fs_set_queues_state_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + if (dev->data->rx_queues[i] != NULL) + dev->data->rx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STOPPED; + for (i = 0; i < dev->data->nb_tx_queues; i++) + if (dev->data->tx_queues[i] != NULL) + dev->data->tx_queue_state[i] = + RTE_ETH_QUEUE_STATE_STOPPED; +} + +static void +fs_dev_stop(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + + fs_lock(dev, 0); + PRIV(dev)->state = DEV_STARTED - 1; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { + rte_eth_dev_stop(PORT_ID(sdev)); + failsafe_rx_intr_uninstall_subdevice(sdev); + sdev->state = DEV_STARTED - 1; + } + failsafe_rx_intr_uninstall(dev); + fs_set_queues_state_stop(dev); + fs_unlock(dev, 0); +} + +static int +fs_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); + ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static int +fs_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); + ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static void fs_dev_free_queues(struct rte_eth_dev *dev); +static void +fs_dev_close(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + + fs_lock(dev, 0); + failsafe_hotplug_alarm_cancel(dev); + if (PRIV(dev)->state == DEV_STARTED) + dev->dev_ops->dev_stop(dev); + PRIV(dev)->state = DEV_ACTIVE - 1; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Closing sub_device %d", i); + failsafe_eth_dev_unregister_callbacks(sdev); + rte_eth_dev_close(PORT_ID(sdev)); + sdev->state = DEV_ACTIVE - 1; + } + fs_dev_free_queues(dev); + fs_unlock(dev, 0); +} + +static int +fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + int err = 0; + bool failure = true; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Rx queue stop failed for subdevice %d", i); + err = ret; + } else { + failure = false; + } + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + fs_unlock(dev, 0); + /* Return 0 in case of at least one successful queue stop */ + return (failure) ? err : 0; +} + +static int +fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Rx queue start failed for subdevice %d", i); + fs_rx_queue_stop(dev, rx_queue_id); + fs_unlock(dev, 0); + return ret; + } + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + fs_unlock(dev, 0); + return 0; +} + +static int +fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + int err = 0; + bool failure = true; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Tx queue stop failed for subdevice %d", i); + err = ret; + } else { + failure = false; + } + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + fs_unlock(dev, 0); + /* Return 0 in case of at least one successful queue stop */ + return (failure) ? err : 0; +} + +static int +fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + uint16_t port_id = ETH(sdev)->data->port_id; + + ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Tx queue start failed for subdevice %d", i); + fs_tx_queue_stop(dev, tx_queue_id); + fs_unlock(dev, 0); + return ret; + } + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + fs_unlock(dev, 0); + return 0; +} + +static void +fs_rx_queue_release(void *queue) +{ + struct rte_eth_dev *dev; + struct sub_device *sdev; + uint8_t i; + struct rxq *rxq; + + if (queue == NULL) + return; + rxq = queue; + dev = &rte_eth_devices[rxq->priv->data->port_id]; + fs_lock(dev, 0); + if (rxq->event_fd >= 0) + close(rxq->event_fd); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + if (ETH(sdev)->data->rx_queues != NULL && + ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { + SUBOPS(sdev, rx_queue_release) + (ETH(sdev)->data->rx_queues[rxq->qid]); + } + } + dev->data->rx_queues[rxq->qid] = NULL; + rte_free(rxq); + fs_unlock(dev, 0); +} + +static int +fs_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + /* + * FIXME: Add a proper interface in rte_eal_interrupts for + * allocating eventfd as an interrupt vector. + * For the time being, fake as if we are using MSIX interrupts, + * this will cause rte_intr_efd_enable to allocate an eventfd for us. + */ + struct rte_intr_handle intr_handle = { + .type = RTE_INTR_HANDLE_VFIO_MSIX, + .efds = { -1, }, + }; + struct sub_device *sdev; + struct rxq *rxq; + uint8_t i; + int ret; + + fs_lock(dev, 0); + if (rx_conf->rx_deferred_start) { + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + if (SUBOPS(sdev, rx_queue_start) == NULL) { + ERROR("Rx queue deferred start is not " + "supported for subdevice %d", i); + fs_unlock(dev, 0); + return -EINVAL; + } + } + } + rxq = dev->data->rx_queues[rx_queue_id]; + if (rxq != NULL) { + fs_rx_queue_release(rxq); + dev->data->rx_queues[rx_queue_id] = NULL; + } + rxq = rte_zmalloc(NULL, + sizeof(*rxq) + + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, + RTE_CACHE_LINE_SIZE); + if (rxq == NULL) { + fs_unlock(dev, 0); + return -ENOMEM; + } + FOREACH_SUBDEV(sdev, i, dev) + rte_atomic64_init(&rxq->refcnt[i]); + rxq->qid = rx_queue_id; + rxq->socket_id = socket_id; + rxq->info.mp = mb_pool; + rxq->info.conf = *rx_conf; + rxq->info.nb_desc = nb_rx_desc; + rxq->priv = PRIV(dev); + rxq->sdev = PRIV(dev)->subs; + ret = rte_intr_efd_enable(&intr_handle, 1); + if (ret < 0) { + fs_unlock(dev, 0); + return ret; + } + rxq->event_fd = intr_handle.efds[0]; + dev->data->rx_queues[rx_queue_id] = rxq; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_rx_queue_setup(PORT_ID(sdev), + rx_queue_id, + nb_rx_desc, socket_id, + rx_conf, mb_pool); + if ((ret = fs_err(sdev, ret))) { + ERROR("RX queue setup failed for sub_device %d", i); + goto free_rxq; + } + } + fs_unlock(dev, 0); + return 0; +free_rxq: + fs_rx_queue_release(rxq); + fs_unlock(dev, 0); + return ret; +} + +static int +fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct rxq *rxq; + struct sub_device *sdev; + uint8_t i; + int ret; + int rc = 0; + + fs_lock(dev, 0); + if (idx >= dev->data->nb_rx_queues) { + rc = -EINVAL; + goto unlock; + } + rxq = dev->data->rx_queues[idx]; + if (rxq == NULL || rxq->event_fd <= 0) { + rc = -EINVAL; + goto unlock; + } + /* Fail if proxy service is nor running. */ + if (PRIV(dev)->rxp.sstate != SS_RUNNING) { + ERROR("failsafe interrupt services are not running"); + rc = -EAGAIN; + goto unlock; + } + rxq->enable_events = 1; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); + ret = fs_err(sdev, ret); + if (ret) + rc = ret; + } +unlock: + fs_unlock(dev, 0); + if (rc) + rte_errno = -rc; + return rc; +} + +static int +fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct rxq *rxq; + struct sub_device *sdev; + uint64_t u64; + uint8_t i; + int rc = 0; + int ret; + + fs_lock(dev, 0); + if (idx >= dev->data->nb_rx_queues) { + rc = -EINVAL; + goto unlock; + } + rxq = dev->data->rx_queues[idx]; + if (rxq == NULL || rxq->event_fd <= 0) { + rc = -EINVAL; + goto unlock; + } + rxq->enable_events = 0; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); + ret = fs_err(sdev, ret); + if (ret) + rc = ret; + } + /* Clear pending events */ + while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) + ; +unlock: + fs_unlock(dev, 0); + if (rc) + rte_errno = -rc; + return rc; +} + +static void +fs_tx_queue_release(void *queue) +{ + struct rte_eth_dev *dev; + struct sub_device *sdev; + uint8_t i; + struct txq *txq; + + if (queue == NULL) + return; + txq = queue; + dev = &rte_eth_devices[txq->priv->data->port_id]; + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + if (ETH(sdev)->data->tx_queues != NULL && + ETH(sdev)->data->tx_queues[txq->qid] != NULL) { + SUBOPS(sdev, tx_queue_release) + (ETH(sdev)->data->tx_queues[txq->qid]); + } + } + dev->data->tx_queues[txq->qid] = NULL; + rte_free(txq); + fs_unlock(dev, 0); +} + +static int +fs_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct sub_device *sdev; + struct txq *txq; + uint8_t i; + int ret; + + fs_lock(dev, 0); + if (tx_conf->tx_deferred_start) { + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + if (SUBOPS(sdev, tx_queue_start) == NULL) { + ERROR("Tx queue deferred start is not " + "supported for subdevice %d", i); + fs_unlock(dev, 0); + return -EINVAL; + } + } + } + txq = dev->data->tx_queues[tx_queue_id]; + if (txq != NULL) { + fs_tx_queue_release(txq); + dev->data->tx_queues[tx_queue_id] = NULL; + } + txq = rte_zmalloc("ethdev TX queue", + sizeof(*txq) + + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, + RTE_CACHE_LINE_SIZE); + if (txq == NULL) { + fs_unlock(dev, 0); + return -ENOMEM; + } + FOREACH_SUBDEV(sdev, i, dev) + rte_atomic64_init(&txq->refcnt[i]); + txq->qid = tx_queue_id; + txq->socket_id = socket_id; + txq->info.conf = *tx_conf; + txq->info.nb_desc = nb_tx_desc; + txq->priv = PRIV(dev); + dev->data->tx_queues[tx_queue_id] = txq; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_tx_queue_setup(PORT_ID(sdev), + tx_queue_id, + nb_tx_desc, socket_id, + tx_conf); + if ((ret = fs_err(sdev, ret))) { + ERROR("TX queue setup failed for sub_device %d", i); + goto free_txq; + } + } + fs_unlock(dev, 0); + return 0; +free_txq: + fs_tx_queue_release(txq); + fs_unlock(dev, 0); + return ret; +} + +static void +fs_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + fs_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + fs_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +static int +fs_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret = 0; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) { + ERROR("Promiscuous mode enable failed for subdevice %d", + PORT_ID(sdev)); + break; + } + } + if (ret != 0) { + /* Rollback in the case of failure */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) + ERROR("Promiscuous mode disable during rollback failed for subdevice %d", + PORT_ID(sdev)); + } + } + fs_unlock(dev, 0); + + return ret; +} + +static int +fs_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret = 0; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) { + ERROR("Promiscuous mode disable failed for subdevice %d", + PORT_ID(sdev)); + break; + } + } + if (ret != 0) { + /* Rollback in the case of failure */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) + ERROR("Promiscuous mode enable during rollback failed for subdevice %d", + PORT_ID(sdev)); + } + } + fs_unlock(dev, 0); + + return ret; +} + +static int +fs_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret = 0; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) { + ERROR("All-multicast mode enable failed for subdevice %d", + PORT_ID(sdev)); + break; + } + } + if (ret != 0) { + /* Rollback in the case of failure */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) + ERROR("All-multicast mode disable during rollback failed for subdevice %d", + PORT_ID(sdev)); + } + } + fs_unlock(dev, 0); + + return ret; +} + +static int +fs_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret = 0; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) { + ERROR("All-multicast mode disable failed for subdevice %d", + PORT_ID(sdev)); + break; + } + } + if (ret != 0) { + /* Rollback in the case of failure */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); + ret = fs_err(sdev, ret); + if (ret != 0) + ERROR("All-multicast mode enable during rollback failed for subdevice %d", + PORT_ID(sdev)); + } + } + fs_unlock(dev, 0); + + return ret; +} + +static int +fs_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling link_update on sub_device %d", i); + ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); + if (ret && ret != -1 && sdev->remove == 0 && + rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { + ERROR("Link update failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + if (TX_SUBDEV(dev)) { + struct rte_eth_link *l1; + struct rte_eth_link *l2; + + l1 = &dev->data->dev_link; + l2 = Ð(TX_SUBDEV(dev))->data->dev_link; + if (memcmp(l1, l2, sizeof(*l1))) { + *l1 = *l2; + fs_unlock(dev, 0); + return 0; + } + } + fs_unlock(dev, 0); + return -1; +} + +static int +fs_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct rte_eth_stats backup; + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; + uint64_t *timestamp = &sdev->stats_snapshot.timestamp; + + rte_memcpy(&backup, snapshot, sizeof(backup)); + ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); + if (ret) { + if (!fs_err(sdev, ret)) { + rte_memcpy(snapshot, &backup, sizeof(backup)); + goto inc; + } + ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", + i, ret); + *timestamp = 0; + fs_unlock(dev, 0); + return ret; + } + *timestamp = rte_rdtsc(); +inc: + failsafe_stats_increment(stats, snapshot); + } + fs_unlock(dev, 0); + return 0; +} + +static int +fs_stats_reset(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_stats_reset(PORT_ID(sdev)); + if (ret) { + if (!fs_err(sdev, ret)) + continue; + + ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); + } + memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); + fs_unlock(dev, 0); + + return 0; +} + +static int +__fs_xstats_count(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + int count = 0; + uint8_t i; + int ret; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); + if (ret < 0) + return ret; + count += ret; + } + + return count; +} + +static int +__fs_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + struct sub_device *sdev; + unsigned int count = 0; + uint8_t i; + + /* Caller only cares about count */ + if (!xstats_names) + return __fs_xstats_count(dev); + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + struct rte_eth_xstat_name *sub_names = xstats_names + count; + int j, r; + + if (count >= limit) + break; + + r = rte_eth_xstats_get_names(PORT_ID(sdev), + sub_names, limit - count); + if (r < 0) + return r; + + /* add subN_ prefix to names */ + for (j = 0; j < r; j++) { + char *xname = sub_names[j].name; + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + + if ((xname[0] == 't' || xname[0] == 'r') && + xname[1] == 'x' && xname[2] == '_') + snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", + xname, i, xname + 3); + else + snprintf(tmp, sizeof(tmp), "sub%u_%s", + i, xname); + + strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); + } + count += r; + } + return count; +} + +static int +fs_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + int ret; + + fs_lock(dev, 0); + ret = __fs_xstats_get_names(dev, xstats_names, limit); + fs_unlock(dev, 0); + return ret; +} + +static int +__fs_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + unsigned int count = 0; + struct sub_device *sdev; + uint8_t i; + int j, ret; + + ret = __fs_xstats_count(dev); + /* + * if error + * or caller did not give enough space + * or just querying + */ + if (ret < 0 || ret > (int)n || xstats == NULL) + return ret; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); + if (ret < 0) + return ret; + + if (ret > (int)n) + return n + count; + + /* add offset to id's from sub-device */ + for (j = 0; j < ret; j++) + xstats[j].id += count; + + xstats += ret; + n -= ret; + count += ret; + } + + return count; +} + +static int +fs_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + int ret; + + fs_lock(dev, 0); + ret = __fs_xstats_get(dev, xstats, n); + fs_unlock(dev, 0); + + return ret; +} + + +static int +fs_xstats_reset(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + uint8_t i; + int r = 0; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + r = rte_eth_xstats_reset(PORT_ID(sdev)); + if (r < 0) + break; + } + fs_unlock(dev, 0); + + return r; +} + +static void +fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, + const struct rte_eth_desc_lim *from) +{ + to->nb_max = RTE_MIN(to->nb_max, from->nb_max); + to->nb_min = RTE_MAX(to->nb_min, from->nb_min); + to->nb_align = RTE_MAX(to->nb_align, from->nb_align); + + to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); + to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); +} + +/* + * Merge the information from sub-devices. + * + * The reported values must be the common subset of all sub devices + */ +static void +fs_dev_merge_info(struct rte_eth_dev_info *info, + const struct rte_eth_dev_info *sinfo) +{ + info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); + info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); + info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); + info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); + info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, + sinfo->max_hash_mac_addrs); + info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); + info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); + + fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); + fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); + + info->rx_offload_capa &= sinfo->rx_offload_capa; + info->tx_offload_capa &= sinfo->tx_offload_capa; + info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; + info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; + info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; + info->hash_key_size = RTE_MIN(info->hash_key_size, + sinfo->hash_key_size); +} + +/** + * Fail-safe dev_infos_get rules: + * + * No sub_device: + * Numerables: + * Use the maximum possible values for any field, so as not + * to impede any further configuration effort. + * Capabilities: + * Limits capabilities to those that are understood by the + * fail-safe PMD. This understanding stems from the fail-safe + * being capable of verifying that the related capability is + * expressed within the device configuration (struct rte_eth_conf). + * + * At least one probed sub_device: + * Numerables: + * Uses values from the active probed sub_device + * The rationale here is that if any sub_device is less capable + * (for example concerning the number of queues) than the active + * sub_device, then its subsequent configuration will fail. + * It is impossible to foresee this failure when the failing sub_device + * is supposed to be plugged-in later on, so the configuration process + * is the single point of failure and error reporting. + * Capabilities: + * Uses a logical AND of RX capabilities among + * all sub_devices and the default capabilities. + * Uses a logical AND of TX capabilities among + * the active probed sub_device and the default capabilities. + * Uses a logical AND of device capabilities among + * all sub_devices and the default capabilities. + * + */ +static int +fs_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *infos) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + /* Use maximum upper bounds by default */ + infos->max_rx_pktlen = UINT32_MAX; + infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; + infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; + infos->max_hash_mac_addrs = UINT32_MAX; + infos->max_vfs = UINT16_MAX; + infos->max_vmdq_pools = UINT16_MAX; + infos->hash_key_size = UINT8_MAX; + + /* + * Set of capabilities that can be verified upon + * configuring a sub-device. + */ + infos->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_MACSEC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_SECURITY; + + infos->rx_queue_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_MACSEC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_SECURITY; + + infos->tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + infos->flow_type_rss_offloads = + ETH_RSS_IP | + ETH_RSS_UDP | + ETH_RSS_TCP; + infos->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + struct rte_eth_dev_info sub_info; + + ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); + ret = fs_err(sdev, ret); + if (ret != 0) + return ret; + + fs_dev_merge_info(infos, &sub_info); + } + + return 0; +} + +static const uint32_t * +fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + struct sub_device *sdev; + struct rte_eth_dev *edev; + const uint32_t *ret; + + fs_lock(dev, 0); + sdev = TX_SUBDEV(dev); + if (sdev == NULL) { + ret = NULL; + goto unlock; + } + edev = ETH(sdev); + /* ENOTSUP: counts as no supported ptypes */ + if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { + ret = NULL; + goto unlock; + } + /* + * The API does not permit to do a clean AND of all ptypes, + * It is also incomplete by design and we do not really care + * to have a best possible value in this context. + * We just return the ptypes of the device of highest + * priority, usually the PREFERRED device. + */ + ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); +unlock: + fs_unlock(dev, 0); + return ret; +} + +static int +fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); + ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static int +fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); + ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static int +fs_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct sub_device *sdev; + int ret; + + fs_lock(dev, 0); + sdev = TX_SUBDEV(dev); + if (sdev == NULL) { + ret = 0; + goto unlock; + } + if (SUBOPS(sdev, flow_ctrl_get) == NULL) { + ret = -ENOTSUP; + goto unlock; + } + ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); +unlock: + fs_unlock(dev, 0); + return ret; +} + +static int +fs_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); + ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" + " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + return 0; +} + +static void +fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct sub_device *sdev; + uint8_t i; + + fs_lock(dev, 0); + /* No check: already done within the rte_eth_dev_mac_addr_remove + * call for the fail-safe device. + */ + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) + rte_eth_dev_mac_addr_remove(PORT_ID(sdev), + &dev->data->mac_addrs[index]); + PRIV(dev)->mac_addr_pool[index] = 0; + fs_unlock(dev, 0); +} + +static int +fs_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t vmdq) +{ + struct sub_device *sdev; + int ret; + uint8_t i; + + RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); + if ((ret = fs_err(sdev, ret))) { + ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" + PRIu8 " with error %d", i, ret); + fs_unlock(dev, 0); + return ret; + } + } + if (index >= PRIV(dev)->nb_mac_addr) { + DEBUG("Growing mac_addrs array"); + PRIV(dev)->nb_mac_addr = index; + } + PRIV(dev)->mac_addr_pool[index] = vmdq; + fs_unlock(dev, 0); + return 0; +} + +static int +fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + + return 0; +} + +static int +fs_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + void *mcast_addrs; + + fs_lock(dev, 0); + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), + mc_addr_set, nb_mc_addr); + if (ret != 0) { + ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", + i, ret); + goto rollback; + } + } + + mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, + nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); + if (mcast_addrs == NULL && nb_mc_addr > 0) { + ret = -ENOMEM; + goto rollback; + } + rte_memcpy(mcast_addrs, mc_addr_set, + nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); + PRIV(dev)->nb_mcast_addr = nb_mc_addr; + PRIV(dev)->mcast_addrs = mcast_addrs; + + fs_unlock(dev, 0); + return 0; + +rollback: + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), + PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); + if (rc != 0) { + ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", + i, rc); + } + } + + fs_unlock(dev, 0); + return ret; +} + +static int +fs_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_rss_hash_update" + " failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + + return 0; +} + +static int +fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type type, + enum rte_filter_op op, + void *arg) +{ + if (type == RTE_ETH_FILTER_GENERIC && + op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &fs_flow_ops; + return 0; + } + return -ENOTSUP; +} + +const struct eth_dev_ops failsafe_ops = { + .dev_configure = fs_dev_configure, + .dev_start = fs_dev_start, + .dev_stop = fs_dev_stop, + .dev_set_link_down = fs_dev_set_link_down, + .dev_set_link_up = fs_dev_set_link_up, + .dev_close = fs_dev_close, + .promiscuous_enable = fs_promiscuous_enable, + .promiscuous_disable = fs_promiscuous_disable, + .allmulticast_enable = fs_allmulticast_enable, + .allmulticast_disable = fs_allmulticast_disable, + .link_update = fs_link_update, + .stats_get = fs_stats_get, + .stats_reset = fs_stats_reset, + .xstats_get = fs_xstats_get, + .xstats_get_names = fs_xstats_get_names, + .xstats_reset = fs_xstats_reset, + .dev_infos_get = fs_dev_infos_get, + .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, + .mtu_set = fs_mtu_set, + .vlan_filter_set = fs_vlan_filter_set, + .rx_queue_start = fs_rx_queue_start, + .rx_queue_stop = fs_rx_queue_stop, + .tx_queue_start = fs_tx_queue_start, + .tx_queue_stop = fs_tx_queue_stop, + .rx_queue_setup = fs_rx_queue_setup, + .tx_queue_setup = fs_tx_queue_setup, + .rx_queue_release = fs_rx_queue_release, + .tx_queue_release = fs_tx_queue_release, + .rx_queue_intr_enable = fs_rx_intr_enable, + .rx_queue_intr_disable = fs_rx_intr_disable, + .flow_ctrl_get = fs_flow_ctrl_get, + .flow_ctrl_set = fs_flow_ctrl_set, + .mac_addr_remove = fs_mac_addr_remove, + .mac_addr_add = fs_mac_addr_add, + .mac_addr_set = fs_mac_addr_set, + .set_mc_addr_list = fs_set_mc_addr_list, + .rss_hash_update = fs_rss_hash_update, + .filter_ctrl = fs_filter_ctrl, +}; diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h b/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h new file mode 100644 index 000000000..651578a12 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_private.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef _ETH_FAILSAFE_PRIVATE_H_ +#define _ETH_FAILSAFE_PRIVATE_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define FAILSAFE_DRIVER_NAME "Fail-safe PMD" +#define FAILSAFE_OWNER_NAME "Fail-safe" + +#define PMD_FAILSAFE_MAC_KVARG "mac" +#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll" +#define PMD_FAILSAFE_PARAM_STRING \ + "dev()," \ + "exec()," \ + "fd()," \ + "mac=mac_addr," \ + "hotplug_poll=u64" \ + "" + +#define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000 + +#define FAILSAFE_MAX_ETHPORTS 2 +#define FAILSAFE_MAX_ETHADDR 128 + +#define DEVARGS_MAXLEN 4096 + +enum rxp_service_state { + SS_NO_SERVICE = 0, + SS_REGISTERED, + SS_READY, + SS_RUNNING, +}; + +/* TYPES */ + +struct rx_proxy { + /* epoll file descriptor */ + int efd; + /* event vector to be used by epoll */ + struct rte_epoll_event *evec; + /* rte service id */ + uint32_t sid; + /* service core id */ + uint32_t scid; + enum rxp_service_state sstate; +}; + +#define FS_RX_PROXY_INIT (struct rx_proxy){ \ + .efd = -1, \ + .evec = NULL, \ + .sid = 0, \ + .scid = 0, \ + .sstate = SS_NO_SERVICE, \ +} + +struct rxq { + struct fs_priv *priv; + uint16_t qid; + /* next sub_device to poll */ + struct sub_device *sdev; + unsigned int socket_id; + int event_fd; + unsigned int enable_events:1; + struct rte_eth_rxq_info info; + rte_atomic64_t refcnt[]; +}; + +struct txq { + struct fs_priv *priv; + uint16_t qid; + unsigned int socket_id; + struct rte_eth_txq_info info; + rte_atomic64_t refcnt[]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + /* sub_flows */ + struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS]; + /* flow description for synchronization */ + struct rte_flow_conv_rule rule; + uint8_t rule_data[]; +}; + +enum dev_state { + DEV_UNDEFINED, + DEV_PARSED, + DEV_PROBED, + DEV_ACTIVE, + DEV_STARTED, +}; + +struct fs_stats { + struct rte_eth_stats stats; + uint64_t timestamp; +}; + +/* + * Allocated in shared memory. + */ +struct sub_device { + /* Exhaustive DPDK device description */ + struct sub_device *next; + struct rte_devargs devargs; + struct rte_bus *bus; /* for primary process only. */ + struct rte_device *dev; /* for primary process only. */ + uint8_t sid; + /* Device state machine */ + enum dev_state state; + /* Last stats snapshot passed to user */ + struct fs_stats stats_snapshot; + /* Some device are defined as a command line */ + char *cmdline; + /* Others are retrieved through a file descriptor */ + char *fd_str; + /* fail-safe device backreference */ + uint16_t fs_port_id; /* shared between processes */ + /* sub device port id*/ + uint16_t sdev_port_id; /* shared between processes */ + /* flag calling for recollection */ + volatile unsigned int remove:1; + /* flow isolation state */ + int flow_isolated:1; + /* RMV callback registration state */ + unsigned int rmv_callback:1; + /* LSC callback registration state */ + unsigned int lsc_callback:1; +}; + +/* + * This is referenced by eth_dev->data->dev_private + * This is shared between processes. + */ +struct fs_priv { + struct rte_eth_dev_data *data; /* backreference to shared data. */ + /* + * Set of sub_devices. + * subs[0] is the preferred device + * any other is just another slave + */ + struct sub_device *subs; /* shared between processes */ + uint8_t subs_head; /* if head == tail, no subs */ + uint8_t subs_tail; /* first invalid */ + uint8_t subs_tx; /* current emitting device */ + uint8_t current_probed; + /* flow mapping */ + TAILQ_HEAD(sub_flows, rte_flow) flow_list; + /* current number of mac_addr slots allocated. */ + uint32_t nb_mac_addr; + struct rte_ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR]; + uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR]; + uint32_t nb_mcast_addr; + struct rte_ether_addr *mcast_addrs; + /* current capabilities */ + struct rte_eth_dev_owner my_owner; /* Unique owner. */ + struct rte_intr_handle intr_handle; /* Port interrupt handle. */ + /* + * Fail-safe state machine. + * This level will be tracking state of the EAL and eth + * layer at large as defined by the user application. + * It will then steer the sub_devices toward the same + * synchronized state. + */ + enum dev_state state; + struct rte_eth_stats stats_accumulator; + /* + * Rx interrupts/events proxy. + * The PMD issues Rx events to the EAL on behalf of its subdevices, + * it does that by registering an event-fd for each of its queues with + * the EAL. A PMD service thread listens to all the Rx events from the + * subdevices, when an Rx event is issued by a subdevice it will be + * caught by this service with will trigger an Rx event in the + * appropriate failsafe Rx queue. + */ + struct rx_proxy rxp; + pthread_mutex_t hotplug_mutex; + /* Hot-plug mutex is locked by the alarm mechanism. */ + volatile unsigned int alarm_lock:1; + unsigned int pending_alarm:1; /* An alarm is pending */ + /* flow isolation state */ + int flow_isolated:1; +}; + +/* FAILSAFE_INTR */ + +int failsafe_rx_intr_install(struct rte_eth_dev *dev); +void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev); +int failsafe_rx_intr_install_subdevice(struct sub_device *sdev); +void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev); + +/* MISC */ + +int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev); +int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev); + +/* RX / TX */ + +void failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe); + +uint16_t failsafe_rx_burst(void *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t failsafe_tx_burst(void *txq, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + +uint16_t failsafe_rx_burst_fast(void *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t failsafe_tx_burst_fast(void *txq, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + +/* ARGS */ + +int failsafe_args_parse(struct rte_eth_dev *dev, const char *params); +void failsafe_args_free(struct rte_eth_dev *dev); +int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params); +int failsafe_args_parse_subs(struct rte_eth_dev *dev); + +/* EAL */ + +int failsafe_eal_init(struct rte_eth_dev *dev); +int failsafe_eal_uninit(struct rte_eth_dev *dev); + +/* ETH_DEV */ + +int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev); +void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev); +void failsafe_dev_remove(struct rte_eth_dev *dev); +void failsafe_stats_increment(struct rte_eth_stats *to, + struct rte_eth_stats *from); +int failsafe_eth_rmv_event_callback(uint16_t port_id, + enum rte_eth_event_type type, + void *arg, void *out); +int failsafe_eth_lsc_event_callback(uint16_t port_id, + enum rte_eth_event_type event, + void *cb_arg, void *out); +int failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event, + void *cb_arg, void *out); + +/* GLOBALS */ + +extern const char pmd_failsafe_driver_name[]; +extern const struct eth_dev_ops failsafe_ops; +extern const struct rte_flow_ops fs_flow_ops; +extern uint64_t failsafe_hotplug_poll; +extern int failsafe_mac_from_arg; + +/* HELPERS */ + +/* dev: (struct rte_eth_dev *) fail-safe device */ +#define PRIV(dev) \ + ((struct fs_priv *)(dev)->data->dev_private) + +/* sdev: (struct sub_device *) */ +#define ETH(sdev) \ + ((sdev)->sdev_port_id == RTE_MAX_ETHPORTS ? \ + NULL : &rte_eth_devices[(sdev)->sdev_port_id]) + +/* sdev: (struct sub_device *) */ +#define PORT_ID(sdev) \ + ((sdev)->sdev_port_id) + +/* sdev: (struct sub_device *) */ +#define SUB_ID(sdev) \ + ((sdev)->sid) + +/** + * Stateful iterator construct over fail-safe sub-devices: + * s: (struct sub_device *), iterator + * i: (uint8_t), increment + * dev: (struct rte_eth_dev *), fail-safe ethdev + * state: (enum dev_state), minimum acceptable device state + */ +#define FOREACH_SUBDEV_STATE(s, i, dev, state) \ + for (s = fs_find_next((dev), 0, state, &i); \ + s != NULL; \ + s = fs_find_next((dev), i + 1, state, &i)) + +/** + * Iterator construct over fail-safe sub-devices: + * s: (struct sub_device *), iterator + * i: (uint8_t), increment + * dev: (struct rte_eth_dev *), fail-safe ethdev + */ +#define FOREACH_SUBDEV(s, i, dev) \ + FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED) + +/* dev: (struct rte_eth_dev *) fail-safe device */ +#define PREFERRED_SUBDEV(dev) \ + (&PRIV(dev)->subs[0]) + +/* dev: (struct rte_eth_dev *) fail-safe device */ +#define TX_SUBDEV(dev) \ + (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \ + : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \ + : &PRIV(dev)->subs[PRIV(dev)->subs_tx])) + +/** + * s: (struct sub_device *) + * ops: (struct eth_dev_ops) member + */ +#define SUBOPS(s, ops) \ + (ETH(s)->dev_ops->ops) + +/** + * Atomic guard + */ + +/** + * a: (rte_atomic64_t) + */ +#define FS_ATOMIC_P(a) \ + rte_atomic64_set(&(a), 1) + +/** + * a: (rte_atomic64_t) + */ +#define FS_ATOMIC_V(a) \ + rte_atomic64_set(&(a), 0) + +/** + * s: (struct sub_device *) + * i: uint16_t qid + */ +#define FS_ATOMIC_RX(s, i) \ + rte_atomic64_read( \ + &((struct rxq *) \ + (fs_dev(s)->data->rx_queues[i]))->refcnt[(s)->sid]) +/** + * s: (struct sub_device *) + * i: uint16_t qid + */ +#define FS_ATOMIC_TX(s, i) \ + rte_atomic64_read( \ + &((struct txq *) \ + (fs_dev(s)->data->tx_queues[i]))->refcnt[(s)->sid]) + +#ifdef RTE_EXEC_ENV_FREEBSD +#define FS_THREADID_TYPE void* +#define FS_THREADID_FMT "p" +#else +#define FS_THREADID_TYPE unsigned long +#define FS_THREADID_FMT "lu" +#endif + +extern int failsafe_logtype; + +#define LOG__(l, m, ...) \ + rte_log(RTE_LOG_ ## l, failsafe_logtype, \ + "net_failsafe: " m "%c", __VA_ARGS__) + +#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') +#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__) +#define INFO(...) LOG_(INFO, __VA_ARGS__) +#define WARN(...) LOG_(WARNING, __VA_ARGS__) +#define ERROR(...) LOG_(ERR, __VA_ARGS__) + +/* inlined functions */ + +static inline struct sub_device * +fs_find_next(struct rte_eth_dev *dev, + uint8_t sid, + enum dev_state min_state, + uint8_t *sid_out) +{ + struct sub_device *subs; + uint8_t tail; + + subs = PRIV(dev)->subs; + tail = PRIV(dev)->subs_tail; + while (sid < tail) { + if (subs[sid].state >= min_state) + break; + sid++; + } + *sid_out = sid; + if (sid >= tail) + return NULL; + return &subs[sid]; +} + +static inline struct rte_eth_dev * +fs_dev(struct sub_device *sdev) { + return &rte_eth_devices[sdev->fs_port_id]; +} + +/* + * Lock hot-plug mutex. + * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism. + */ +static inline int +fs_lock(struct rte_eth_dev *dev, unsigned int is_alarm) +{ + int ret; + + if (is_alarm) { + ret = pthread_mutex_trylock(&PRIV(dev)->hotplug_mutex); + if (ret) { + DEBUG("Hot-plug mutex lock trying failed(%s), will try" + " again later...", strerror(ret)); + return ret; + } + PRIV(dev)->alarm_lock = 1; + } else { + ret = pthread_mutex_lock(&PRIV(dev)->hotplug_mutex); + if (ret) { + ERROR("Cannot lock mutex(%s)", strerror(ret)); + return ret; + } + } + return ret; +} + +/* + * Unlock hot-plug mutex. + * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism. + */ +static inline void +fs_unlock(struct rte_eth_dev *dev, unsigned int is_alarm) +{ + int ret; + + if (is_alarm) { + RTE_ASSERT(PRIV(dev)->alarm_lock == 1); + PRIV(dev)->alarm_lock = 0; + } + ret = pthread_mutex_unlock(&PRIV(dev)->hotplug_mutex); + if (ret) + ERROR("Cannot unlock hot-plug mutex(%s)", strerror(ret)); +} + +/* + * Switch emitting device. + * If banned is set, banned must not be considered for + * the role of emitting device. + */ +static inline void +fs_switch_dev(struct rte_eth_dev *dev, + struct sub_device *banned) +{ + struct sub_device *txd; + enum dev_state req_state; + + req_state = PRIV(dev)->state; + txd = TX_SUBDEV(dev); + if (PREFERRED_SUBDEV(dev)->state >= req_state && + PREFERRED_SUBDEV(dev) != banned) { + if (txd != PREFERRED_SUBDEV(dev) && + (txd == NULL || + (req_state == DEV_STARTED) || + (txd && txd->state < DEV_STARTED))) { + DEBUG("Switching tx_dev to preferred sub_device"); + PRIV(dev)->subs_tx = 0; + } + } else if ((txd && txd->state < req_state) || + txd == NULL || + txd == banned) { + struct sub_device *sdev = NULL; + uint8_t i; + + /* Using acceptable device */ + FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) { + if (sdev == banned) + continue; + DEBUG("Switching tx_dev to sub_device %d", + i); + PRIV(dev)->subs_tx = i; + break; + } + if (i >= PRIV(dev)->subs_tail || sdev == NULL) { + DEBUG("No device ready, deactivating tx_dev"); + PRIV(dev)->subs_tx = PRIV(dev)->subs_tail; + } + } else { + return; + } + failsafe_set_burst_fn(dev, 0); + rte_wmb(); +} + +/* + * Adjust error value and rte_errno to the fail-safe actual error value. + */ +static inline int +fs_err(struct sub_device *sdev, int err) +{ + /* A device removal shouldn't be reported as an error. */ + if (sdev->remove == 1 || err == -EIO) + return rte_errno = 0; + return err; +} +#endif /* _ETH_FAILSAFE_PRIVATE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c b/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c new file mode 100644 index 000000000..fee08fa23 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/failsafe_rxtx.c @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include + +#include "failsafe_private.h" + +static inline int +fs_rx_unsafe(struct sub_device *sdev) +{ + return (ETH(sdev) == NULL) || + (ETH(sdev)->rx_pkt_burst == NULL) || + (sdev->state != DEV_STARTED) || + (sdev->remove != 0); +} + +static inline int +fs_tx_unsafe(struct sub_device *sdev) +{ + return (sdev == NULL) || + (ETH(sdev) == NULL) || + (ETH(sdev)->tx_pkt_burst == NULL) || + (sdev->state != DEV_STARTED); +} + +void +failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe) +{ + struct sub_device *sdev; + uint8_t i; + int need_safe; + int safe_set; + + need_safe = force_safe; + FOREACH_SUBDEV(sdev, i, dev) + need_safe |= fs_rx_unsafe(sdev); + safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst); + if (need_safe && !safe_set) { + DEBUG("Using safe RX bursts%s", + (force_safe ? " (forced)" : "")); + dev->rx_pkt_burst = &failsafe_rx_burst; + } else if (!need_safe && safe_set) { + DEBUG("Using fast RX bursts"); + dev->rx_pkt_burst = &failsafe_rx_burst_fast; + } + need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev)); + safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst); + if (need_safe && !safe_set) { + DEBUG("Using safe TX bursts%s", + (force_safe ? " (forced)" : "")); + dev->tx_pkt_burst = &failsafe_tx_burst; + } else if (!need_safe && safe_set) { + DEBUG("Using fast TX bursts"); + dev->tx_pkt_burst = &failsafe_tx_burst_fast; + } + rte_wmb(); +} + +/* + * Override source port in Rx packets. + * + * Make Rx packets originate from this PMD instance instead of one of its + * sub-devices. This is mandatory to avoid breaking applications. + */ +static void +failsafe_rx_set_port(struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint16_t port) +{ + unsigned int i; + + for (i = 0; i != nb_pkts; ++i) + rx_pkts[i]->port = port; +} + +uint16_t +failsafe_rx_burst(void *queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sub_device *sdev; + struct rxq *rxq; + void *sub_rxq; + uint16_t nb_rx; + + rxq = queue; + sdev = rxq->sdev; + do { + if (fs_rx_unsafe(sdev)) { + nb_rx = 0; + sdev = sdev->next; + continue; + } + sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; + FS_ATOMIC_P(rxq->refcnt[sdev->sid]); + nb_rx = ETH(sdev)-> + rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); + FS_ATOMIC_V(rxq->refcnt[sdev->sid]); + sdev = sdev->next; + } while (nb_rx == 0 && sdev != rxq->sdev); + rxq->sdev = sdev; + if (nb_rx) + failsafe_rx_set_port(rx_pkts, nb_rx, + rxq->priv->data->port_id); + return nb_rx; +} + +uint16_t +failsafe_rx_burst_fast(void *queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sub_device *sdev; + struct rxq *rxq; + void *sub_rxq; + uint16_t nb_rx; + + rxq = queue; + sdev = rxq->sdev; + do { + RTE_ASSERT(!fs_rx_unsafe(sdev)); + sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; + FS_ATOMIC_P(rxq->refcnt[sdev->sid]); + nb_rx = ETH(sdev)-> + rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts); + FS_ATOMIC_V(rxq->refcnt[sdev->sid]); + sdev = sdev->next; + } while (nb_rx == 0 && sdev != rxq->sdev); + rxq->sdev = sdev; + if (nb_rx) + failsafe_rx_set_port(rx_pkts, nb_rx, + rxq->priv->data->port_id); + return nb_rx; +} + +uint16_t +failsafe_tx_burst(void *queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sub_device *sdev; + struct txq *txq; + void *sub_txq; + uint16_t nb_tx; + + txq = queue; + sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); + if (unlikely(fs_tx_unsafe(sdev))) + return 0; + sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; + FS_ATOMIC_P(txq->refcnt[sdev->sid]); + nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); + FS_ATOMIC_V(txq->refcnt[sdev->sid]); + return nb_tx; +} + +uint16_t +failsafe_tx_burst_fast(void *queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sub_device *sdev; + struct txq *txq; + void *sub_txq; + uint16_t nb_tx; + + txq = queue; + sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); + RTE_ASSERT(!fs_tx_unsafe(sdev)); + sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; + FS_ATOMIC_P(txq->refcnt[sdev->sid]); + nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); + FS_ATOMIC_V(txq->refcnt[sdev->sid]); + return nb_tx; +} diff --git a/src/spdk/dpdk/drivers/net/failsafe/meson.build b/src/spdk/dpdk/drivers/net/failsafe/meson.build new file mode 100644 index 000000000..56010e212 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/meson.build @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +cflags += '-std=gnu99' +cflags += '-D_DEFAULT_SOURCE' +cflags += '-D_XOPEN_SOURCE=700' +cflags += '-pedantic' +if is_linux + cflags += '-DLINUX' +else + cflags += '-DBSD' +endif + +sources = files('failsafe_args.c', + 'failsafe.c', + 'failsafe_eal.c', + 'failsafe_ether.c', + 'failsafe_flow.c', + 'failsafe_intr.c', + 'failsafe_ops.c', + 'failsafe_rxtx.c') diff --git a/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map b/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/failsafe/rte_pmd_failsafe_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/fm10k/Makefile b/src/spdk/dpdk/drivers/net/fm10k/Makefile new file mode 100644 index 000000000..d48638992 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2013-2015 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_fm10k.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_fm10k_version.map + +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 + +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) + +# +## CFLAGS for clang +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +else +# +# CFLAGS for gcc +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough +endif +endif +endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci + +# +# Add extra flags for base driver source files to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# base driver is based on the package of cid-fm10k.2017.01.24.tar.gz +# +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c + +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_tlv.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_common.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c +ifeq ($(CONFIG_RTE_ARCH_X86), y) +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c new file mode 100644 index 000000000..dfb50a10d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.c @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_api.h" +#include "fm10k_common.h" + +/** + * fm10k_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 fm10k_set_mac_type(struct fm10k_hw *hw) +{ + s32 ret_val = FM10K_SUCCESS; + + DEBUGFUNC("fm10k_set_mac_type"); + + if (hw->vendor_id != FM10K_INTEL_VENDOR_ID) { + ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x\n", hw->vendor_id); + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + } + + switch (hw->device_id) { + case FM10K_DEV_ID_PF: +#ifdef BOULDER_RAPIDS_HW + case FM10K_DEV_ID_SDI_FM10420_QDA2: +#endif /* BOULDER_RAPIDS_HW */ +#ifdef ATWOOD_CHANNEL_HW + case FM10K_DEV_ID_SDI_FM10420_DA2: +#endif /* ATWOOD_CHANNEL_HW */ + hw->mac.type = fm10k_mac_pf; + break; + case FM10K_DEV_ID_VF: + hw->mac.type = fm10k_mac_vf; + break; + default: + ret_val = FM10K_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED, + "Unsupported device id: %x\n", + hw->device_id); + break; + } + + DEBUGOUT2("fm10k_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + + return ret_val; +} + +/** + * fm10k_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The fm10k_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 fm10k_init_shared_code(struct fm10k_hw *hw) +{ + s32 status; + + DEBUGFUNC("fm10k_init_shared_code"); + + /* Set the mac type */ + fm10k_set_mac_type(hw); + + switch (hw->mac.type) { + case fm10k_mac_pf: + status = fm10k_init_ops_pf(hw); + break; + case fm10k_mac_vf: + status = fm10k_init_ops_vf(hw); + break; + default: + status = FM10K_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + +#define fm10k_call_func(hw, func, params, error) \ + ((func) ? (func params) : (error)) + +/** + * fm10k_reset_hw - Reset the hardware to known good state + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +s32 fm10k_reset_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 fm10k_init_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_stop_hw - Prepares hardware to shutdown Rx/Tx + * @hw: pointer to hardware structure + * + * Disables Rx/Tx queues and disables the DMA engine. + **/ +s32 fm10k_stop_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * This function sets the flags indicating that the hardware is ready to + * begin operation. + **/ +s32 fm10k_start_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the fm10k_hw structure + **/ +s32 fm10k_get_bus_info(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw), + FM10K_NOT_IMPLEMENTED); +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +bool fm10k_is_slot_appropriate(struct fm10k_hw *hw) +{ + if (hw->mac.ops.is_slot_appropriate) + return hw->mac.ops.is_slot_appropriate(hw); + return true; +} + +#endif +/** + * fm10k_update_vlan - Clear VLAN ID to VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @idx: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. + **/ +s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set) +{ + return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_read_mac_addr - Reads MAC address + * @hw: pointer to hardware structure + * + * Reads the MAC address out of the interface and stores it in the HW + * structures. + **/ +s32 fm10k_read_mac_addr(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_hw_stats - Update hw statistics + * @hw: pointer to hardware structure + * + * This function updates statistics that are related to hardware. + * */ +void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) +{ + switch (hw->mac.type) { + case fm10k_mac_pf: + return fm10k_update_hw_stats_pf(hw, stats); + case fm10k_mac_vf: + return fm10k_update_hw_stats_vf(hw, stats); + default: + break; + } +} + +/** + * fm10k_rebind_hw_stats - Reset base for hw statistics + * @hw: pointer to hardware structure + * + * This function resets the base for statistics that are related to hardware. + * */ +void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) +{ + switch (hw->mac.type) { + case fm10k_mac_pf: + return fm10k_rebind_hw_stats_pf(hw, stats); + case fm10k_mac_vf: + return fm10k_rebind_hw_stats_vf(hw, stats); + default: + break; + } +} + +/** + * fm10k_configure_dglort_map - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +s32 fm10k_configure_dglort_map(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map, + (hw, dglort), FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function configures the endpoint to limit the access to memory + * beyond what is physically in the system. + **/ +void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask) +{ + if (hw->mac.ops.set_dma_mask) + hw->mac.ops.set_dma_mask(hw, dma_mask); +} + +/** + * fm10k_get_fault - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault) +{ + return fm10k_call_func(hw, hw->mac.ops.get_fault, (hw, type, fault), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_uc_addr - Update device unicast address + * @hw: pointer to the HW structure + * @lport: logical port ID to update - unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses + **/ +s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + return fm10k_call_func(hw, hw->mac.ops.update_uc_addr, + (hw, lport, mac, vid, add, flags), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_mc_addr - Update device multicast address + * @hw: pointer to the HW structure + * @lport: logical port ID to update - unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses + **/ +s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add) +{ + return fm10k_call_func(hw, hw->mac.ops.update_mc_addr, + (hw, lport, mac, vid, add), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_adjust_systime - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function is meant to update the frequency of the clock represented + * by the SYSTIME register. + **/ +s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb) +{ + return fm10k_call_func(hw, hw->mac.ops.adjust_systime, + (hw, ppb), FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_notify_offset - Notify switch of change in PTP offset + * @hw: pointer to hardware structure + * @offset: 64bit unsigned offset from hardware SYSTIME value + * + * This function is meant to notify switch of change in the PTP offset for + * the hardware SYSTIME registers. + **/ +s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset) +{ + return fm10k_call_func(hw, hw->mac.ops.notify_offset, + (hw, offset), FM10K_NOT_IMPLEMENTED); +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h new file mode 100644 index 000000000..d9593bba0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_api.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_API_H_ +#define _FM10K_API_H_ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +s32 fm10k_set_mac_type(struct fm10k_hw *hw); +s32 fm10k_reset_hw(struct fm10k_hw *hw); +s32 fm10k_init_hw(struct fm10k_hw *hw); +s32 fm10k_stop_hw(struct fm10k_hw *hw); +s32 fm10k_start_hw(struct fm10k_hw *hw); +s32 fm10k_init_shared_code(struct fm10k_hw *hw); +s32 fm10k_get_bus_info(struct fm10k_hw *hw); +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +bool fm10k_is_slot_appropriate(struct fm10k_hw *hw); +#endif +s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set); +s32 fm10k_read_mac_addr(struct fm10k_hw *hw); +void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats); +void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats); +s32 fm10k_configure_dglort_map(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort); +void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask); +s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault); +s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add, u8 flags); +s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add); +s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb); +s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset); +#endif /* _FM10K_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c new file mode 100644 index 000000000..b78d4b575 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.c @@ -0,0 +1,550 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_common.h" + +/** + * fm10k_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the fm10k_hw structure. + **/ +STATIC s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw) +{ + u16 link_cap, link_status, device_cap, device_control; + + DEBUGFUNC("fm10k_get_bus_info_generic"); + + /* Get the maximum link width and speed from PCIe config space */ + link_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_CAP); + + switch (link_cap & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus_caps.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus_caps.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus_caps.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus_caps.width = fm10k_bus_width_unknown; + break; + } + + switch (link_cap & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus_caps.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus_caps.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus_caps.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus_caps.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the PCIe maximum payload size for the PCIe function */ + device_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CAP); + + switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) { + case FM10K_PCIE_DEV_CAP_PAYLOAD_128: + hw->bus_caps.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_256: + hw->bus_caps.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_512: + hw->bus_caps.payload = fm10k_bus_payload_512; + break; + default: + hw->bus_caps.payload = fm10k_bus_payload_unknown; + break; + } + + /* Get the negotiated link width and speed from PCIe config space */ + link_status = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_STATUS); + + switch (link_status & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus.width = fm10k_bus_width_unknown; + break; + } + + switch (link_status & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the negotiated PCIe maximum payload size for the PCIe function */ + device_control = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CTRL); + + switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) { + case FM10K_PCIE_DEV_CTRL_PAYLOAD_128: + hw->bus.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_256: + hw->bus.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_512: + hw->bus.payload = fm10k_bus_payload_512; + break; + default: + hw->bus.payload = fm10k_bus_payload_unknown; + break; + } + + return FM10K_SUCCESS; +} + +u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw) +{ + u16 msix_count; + + DEBUGFUNC("fm10k_get_pcie_msix_count_generic"); + + /* read in value from MSI-X capability register */ + msix_count = FM10K_READ_PCI_WORD(hw, FM10K_PCI_MSIX_MSG_CTRL); + msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > FM10K_MAX_MSIX_VECTORS) + msix_count = FM10K_MAX_MSIX_VECTORS; + + return msix_count; +} + +/** + * fm10k_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 fm10k_init_ops_generic(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + DEBUGFUNC("fm10k_init_ops_generic"); + + /* MAC */ + mac->ops.get_bus_info = &fm10k_get_bus_info_generic; + + /* initialize GLORT state to avoid any false hits */ + mac->dglort_map = FM10K_DGLORTMAP_NONE; + + return FM10K_SUCCESS; +} + +/** + * fm10k_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * This function sets the Tx ready flag to indicate that the Tx path has + * been initialized. + **/ +s32 fm10k_start_hw_generic(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_start_hw_generic"); + + /* set flag indicating we are beginning Tx */ + hw->mac.tx_ready = true; + + return FM10K_SUCCESS; +} + +/** + * fm10k_disable_queues_generic - Stop Tx/Rx queues + * @hw: pointer to hardware structure + * @q_cnt: number of queues to be disabled + * + **/ +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt) +{ + u32 reg; + u16 i, time; + + DEBUGFUNC("fm10k_disable_queues_generic"); + + /* clear tx_ready to prevent any false hits for reset */ + hw->mac.tx_ready = false; + + if (FM10K_REMOVED(hw->hw_addr)) + return FM10K_SUCCESS; + + /* clear the enable bit for all rings */ + for (i = 0; i < q_cnt; i++) { + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), + reg & ~FM10K_TXDCTL_ENABLE); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), + reg & ~FM10K_RXQCTL_ENABLE); + } + + FM10K_WRITE_FLUSH(hw); + usec_delay(1); + + /* loop through all queues to verify that they are all disabled */ + for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) { + /* if we are at end of rings all rings are disabled */ + if (i == q_cnt) + return FM10K_SUCCESS; + + /* if queue enables cleared, then move to next ring pair */ + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) { + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) { + i++; + continue; + } + } + + /* decrement time and wait 1 usec */ + time--; + if (time) + usec_delay(1); + } + + return FM10K_ERR_REQUESTS_PENDING; +} + +/** + * fm10k_stop_hw_generic - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_stop_hw_generic"); + + return fm10k_disable_queues_generic(hw, hw->mac.max_queues); +} + +/** + * fm10k_read_hw_stats_32b - Reads value of 32-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing a 32-bit value + * + * Function reads the content of the register and returns the delta + * between the base and the current value. + * **/ +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 delta = FM10K_READ_REG(hw, addr) - stat->base_l; + + DEBUGFUNC("fm10k_read_hw_stats_32b"); + + if (FM10K_REMOVED(hw->hw_addr)) + stat->base_h = 0; + + return delta; +} + +/** + * fm10k_read_hw_stats_48b - Reads value of 48-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing the lower 32-bit value + * + * Function reads the content of 2 registers, combined to represent a 48-bit + * statistical value. Extra processing is required to handle overflowing. + * Finally, a delta value is returned representing the difference between the + * values stored in registers and values stored in the statistic counters. + * **/ +STATIC u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 count_l; + u32 count_h; + u32 count_tmp; + u64 delta; + + DEBUGFUNC("fm10k_read_hw_stats_48b"); + + count_h = FM10K_READ_REG(hw, addr + 1); + + /* Check for overflow */ + do { + count_tmp = count_h; + count_l = FM10K_READ_REG(hw, addr); + count_h = FM10K_READ_REG(hw, addr + 1); + } while (count_h != count_tmp); + + delta = ((u64)(count_h - stat->base_h) << 32) + count_l; + delta -= stat->base_l; + + return delta & FM10K_48_BIT_MASK; +} + +/** + * fm10k_update_hw_base_48b - Updates 48-bit statistic base value + * @stat: pointer to the hardware statistic structure + * @delta: value to be updated into the hardware statistic structure + * + * Function receives a value and determines if an update is required based on + * a delta calculation. Only the base value will be updated. + **/ +STATIC void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta) +{ + DEBUGFUNC("fm10k_update_hw_base_48b"); + + if (!delta) + return; + + /* update lower 32 bits */ + delta += stat->base_l; + stat->base_l = (u32)delta; + + /* update upper 32 bits */ + stat->base_h += (u32)(delta >> 32); +} + +/** + * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the TX queue statistics counters that are related to the + * hardware. + **/ +STATIC void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_tx, id_tx_prev, tx_packets; + u64 tx_bytes = 0; + + DEBUGFUNC("fm10k_update_hw_stats_tx_q"); + + /* Retrieve TX Owner Data */ + id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx)); + + /* Process TX Ring */ + do { + tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx), + &q->tx_packets); + + if (tx_packets) + tx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBTC_L(idx), + &q->tx_bytes); + + /* Re-Check Owner Data */ + id_tx_prev = id_tx; + id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx)); + } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_tx &= FM10K_TXQCTL_ID_MASK; + id_tx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->tx_stats_idx == id_tx) { + q->tx_packets.count += tx_packets; + q->tx_bytes.count += tx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); + fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); + + q->tx_stats_idx = id_tx; +} + +/** + * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the RX queue statistics counters that are related to the + * hardware. + **/ +STATIC void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_rx, id_rx_prev, rx_packets, rx_drops; + u64 rx_bytes = 0; + + DEBUGFUNC("fm10k_update_hw_stats_rx_q"); + + /* Retrieve RX Owner Data */ + id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx)); + + /* Process RX Ring */ + do { + rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), + &q->rx_drops); + + rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx), + &q->rx_packets); + + if (rx_packets) + rx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBRC_L(idx), + &q->rx_bytes); + + /* Re-Check Owner Data */ + id_rx_prev = id_rx; + id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx)); + } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_rx &= FM10K_RXQCTL_ID_MASK; + id_rx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->rx_stats_idx == id_rx) { + q->rx_drops.count += rx_drops; + q->rx_packets.count += rx_packets; + q->rx_bytes.count += rx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->rx_drops, rx_drops); + fm10k_update_hw_base_32b(&q->rx_packets, rx_packets); + fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes); + + q->rx_stats_idx = id_rx; +} + +/** + * fm10k_update_hw_stats_q - Updates queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function updates the queue statistics counters that are related to the + * hardware. + **/ +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count) +{ + u32 i; + + DEBUGFUNC("fm10k_update_hw_stats_q"); + + for (i = 0; i < count; i++, idx++, q++) { + fm10k_update_hw_stats_tx_q(hw, q, idx); + fm10k_update_hw_stats_rx_q(hw, q, idx); + } +} + +/** + * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function invalidates the index values for the queues so any updates that + * may have happened are ignored and the base for the queue stats is reset. + **/ +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + q->rx_stats_idx = 0; + q->tx_stats_idx = 0; + } +} + +/** + * fm10k_get_host_state_generic - Returns the state of the host + * @hw: pointer to hardware structure + * @host_ready: pointer to boolean value that will record host state + * + * This function will check the health of the mailbox and Tx queue 0 + * in order to determine if we should report that the link is up or not. + **/ +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_info *mac = &hw->mac; + s32 ret_val = FM10K_SUCCESS; + u32 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(0)); + + DEBUGFUNC("fm10k_get_host_state_generic"); + + /* process upstream mailbox in case interrupts were disabled */ + mbx->ops.process(hw, mbx); + + /* If Tx is no longer enabled link should come down */ + if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE)) + mac->get_host_state = true; + + /* exit if not checking for link, or link cannot be changed */ + if (!mac->get_host_state || !(~txdctl)) + goto out; + + /* if we somehow dropped the Tx enable we should reset */ + if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* if Mailbox timed out we should request reset */ + if (!mbx->timeout) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* verify Mailbox is still valid */ + if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU)) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { + if (mac->ops.request_lport_map) + ret_val = mac->ops.request_lport_map(hw); + + goto out; + } + + /* if we passed all the tests above then the switch is ready and we no + * longer need to check for link + */ + mac->get_host_state = false; + +out: + *host_ready = !mac->get_host_state; + return ret_val; +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h new file mode 100644 index 000000000..91304b072 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_common.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_COMMON_H_ +#define _FM10K_COMMON_H_ + +#include "fm10k_type.h" + +u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw); +s32 fm10k_init_ops_generic(struct fm10k_hw *hw); +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt); +s32 fm10k_start_hw_generic(struct fm10k_hw *hw); +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw); +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat); +#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta)) +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count); +#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0) +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready); +#endif /* _FM10K_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c new file mode 100644 index 000000000..2bb0d82ef --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.c @@ -0,0 +1,2225 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_common.h" + +/** + * fm10k_fifo_init - Initialize a message FIFO + * @fifo: pointer to FIFO + * @buffer: pointer to memory to be used to store FIFO + * @size: maximum message size to store in FIFO, must be 2^n - 1 + **/ +STATIC void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size) +{ + fifo->buffer = buffer; + fifo->size = size; + fifo->head = 0; + fifo->tail = 0; +} + +/** + * fm10k_fifo_used - Retrieve used space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of DWORDs used in the FIFO + **/ +STATIC u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo) +{ + return fifo->tail - fifo->head; +} + +/** + * fm10k_fifo_unused - Retrieve unused space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of unused DWORDs in the FIFO + **/ +STATIC u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) +{ + return fifo->size + fifo->head - fifo->tail; +} + +/** + * fm10k_fifo_empty - Test to verify if FIFO is empty + * @fifo: pointer to FIFO + * + * This function returns true if the FIFO is empty, else false + **/ +STATIC bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) +{ + return fifo->head == fifo->tail; +} + +/** + * fm10k_fifo_head_offset - returns indices of head with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to head + * + * This function returns the indices into the FIFO based on head + offset + **/ +STATIC u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->head + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_tail_offset - returns indices of tail with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to tail + * + * This function returns the indices into the FIFO based on tail + offset + **/ +STATIC u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->tail + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_head_len - Retrieve length of first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the first message in the FIFO + **/ +STATIC u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo) +{ + u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); + + /* verify there is at least 1 DWORD in the fifo so *head is valid */ + if (fm10k_fifo_empty(fifo)) + return 0; + + /* retieve the message length */ + return FM10K_TLV_DWORD_LEN(*head); +} + +/** + * fm10k_fifo_head_drop - Drop the first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the message dropped from the FIFO + **/ +STATIC u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) +{ + u16 len = fm10k_fifo_head_len(fifo); + + /* update head so it is at the start of next frame */ + fifo->head += len; + + return len; +} + +/** + * fm10k_fifo_drop_all - Drop all messages in FIFO + * @fifo: pointer to FIFO + * + * This function resets the head pointer to drop all messages in the FIFO and + * ensure the FIFO is empty. + **/ +STATIC void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) +{ + fifo->head = fifo->tail; +} + +/** + * fm10k_mbx_index_len - Convert a head/tail index into a length value + * @mbx: pointer to mailbox + * @head: head index + * @tail: head index + * + * This function takes the head and tail index and determines the length + * of the data indicated by this pair. + **/ +STATIC u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) +{ + u16 len = tail - head; + + /* we wrapped so subtract 2, one for index 0, one for all 1s index */ + if (len > tail) + len -= 2; + + return len & ((mbx->mbmem_len << 1) - 1); +} + +/** + * fm10k_mbx_tail_add - Determine new tail value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to tail offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (tail > mbx->tail) ? --tail : ++tail; +} + +/** + * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to tail offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (tail < mbx->tail) ? ++tail : --tail; +} + +/** + * fm10k_mbx_head_add - Determine new head value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (head > mbx->head) ? --head : ++head; +} + +/** + * fm10k_mbx_head_sub - Determine new head value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (head < mbx->head) ? ++head : --head; +} + +/** + * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed + * @mbx: pointer to mailbox + * + * This function will return the length of the message currently being + * pushed onto the tail of the Rx queue. + **/ +STATIC u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) +{ + u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); + + /* pushed tail is only valid if pushed is set */ + if (!mbx->pushed) + return 0; + + return FM10K_TLV_DWORD_LEN(*tail); +} + +/** + * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO + * @fifo: pointer to FIFO + * @msg: message array to populate + * @tail_offset: additional offset to add to tail pointer + * @len: length of FIFO to copy into message header + * + * This function will take a message and copy it into a section of the + * FIFO. In order to get something into a location other than just + * the tail you can use tail_offset to adjust the pointer. + **/ +STATIC void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo, + const u32 *msg, u16 tail_offset, u16 len) +{ + u16 end = fm10k_fifo_tail_offset(fifo, tail_offset); + u32 *tail = fifo->buffer + end; + + /* track when we should cross the end of the FIFO */ + end = fifo->size - end; + + /* copy end of message before start of message */ + if (end < len) + memcpy(fifo->buffer, msg + end, (len - end) << 2); + else + end = len; + + /* Copy remaining message into Tx FIFO */ + memcpy(tail, msg, end << 2); +} + +/** + * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO + * @fifo: pointer to FIFO + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +STATIC s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg) +{ + u16 len = FM10K_TLV_DWORD_LEN(*msg); + + DEBUGFUNC("fm10k_fifo_enqueue"); + + /* verify parameters */ + if (len > fifo->size) + return FM10K_MBX_ERR_SIZE; + + /* verify there is room for the message */ + if (len > fm10k_fifo_unused(fifo)) + return FM10K_MBX_ERR_NO_SPACE; + + /* Copy message into FIFO */ + fm10k_fifo_write_copy(fifo, msg, 0, len); + + /* memory barrier to guarantee FIFO is written before tail update */ + FM10K_WMB(); + + /* Update Tx FIFO tail */ + fifo->tail += len; + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_validate_msg_size - Validate incoming message based on size + * @mbx: pointer to mailbox + * @len: length of data pushed onto buffer + * + * This function analyzes the frame and will return a non-zero value when + * the start of a message larger than the mailbox is detected. + **/ +STATIC u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 total_len = 0, msg_len; + u32 *msg; + + DEBUGFUNC("fm10k_mbx_validate_msg_size"); + + /* length should include previous amounts pushed */ + len += mbx->pushed; + + /* offset in message is based off of current message size */ + do { + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); + msg_len = FM10K_TLV_DWORD_LEN(*msg); + total_len += msg_len; + } while (total_len < len); + + /* message extends out of pushed section, but fits in FIFO */ + if ((len < total_len) && (msg_len <= mbx->max_size)) + return 0; + + /* return length of invalid section */ + return (len < total_len) ? len : (len - total_len); +} + +/** + * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a section of the Tx FIFO and copy it into the + * mailbox memory. The offset in mbmem is based on the lower bits of the + * tail and len determines the length to copy. + **/ +STATIC void fm10k_mbx_write_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u32 mbmem = mbx->mbmem_reg; + u32 *head = fifo->buffer; + u16 end, len, tail, mask; + + DEBUGFUNC("fm10k_mbx_write_copy"); + + if (!mbx->tail_len) + return; + + /* determine data length and mbmem tail index */ + mask = mbx->mbmem_len - 1; + len = mbx->tail_len; + tail = fm10k_mbx_tail_sub(mbx, len); + if (tail > mask) + tail++; + + /* determine offset in the ring */ + end = fm10k_fifo_head_offset(fifo, mbx->pulled); + head += end; + + /* memory barrier to guarantee data is ready to be read */ + FM10K_RMB(); + + /* Copy message from Tx FIFO */ + for (end = fifo->size - end; len; head = fifo->buffer) { + do { + /* adjust tail to match offset for FIFO */ + tail &= mask; + if (!tail) + tail++; + + mbx->tx_mbmem_pulled++; + + /* write message to hardware FIFO */ + FM10K_WRITE_MBX(hw, mbmem + tail++, *(head++)); + } while (--len && --end); + } +} + +/** + * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number last received + * + * This function will push the tail index forward based on the remote + * head index. It will then pull up to mbmem_len DWORDs off of the + * head of the FIFO and will place it in the MBMEM registers + * associated with the mailbox. + **/ +STATIC void fm10k_mbx_pull_head(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + + /* update number of bytes pulled and update bytes in transit */ + mbx->pulled += mbx->tail_len - ack; + + /* determine length of data to pull, reserve space for mbmem header */ + mbmem_len = mbx->mbmem_len - 1; + len = fm10k_fifo_used(fifo) - mbx->pulled; + if (len > mbmem_len) + len = mbmem_len; + + /* update tail and record number of bytes in transit */ + mbx->tail = fm10k_mbx_tail_add(mbx, len - ack); + mbx->tail_len = len; + + /* drop pulled messages from the FIFO */ + for (len = fm10k_fifo_head_len(fifo); + len && (mbx->pulled >= len); + len = fm10k_fifo_head_len(fifo)) { + mbx->pulled -= fm10k_fifo_head_drop(fifo); + mbx->tx_messages++; + mbx->tx_dwords += len; + } + + /* Copy message out from the Tx FIFO */ + fm10k_mbx_write_copy(hw, mbx); +} + +/** + * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a section of the mailbox memory and copy it + * into the Rx FIFO. The offset is based on the lower bits of the + * head and len determines the length to copy. + **/ +STATIC void fm10k_mbx_read_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len; + u32 *tail = fifo->buffer; + u16 end, len, head; + + DEBUGFUNC("fm10k_mbx_read_copy"); + + /* determine data length and mbmem head index */ + len = mbx->head_len; + head = fm10k_mbx_head_sub(mbx, len); + if (head >= mbx->mbmem_len) + head++; + + /* determine offset in the ring */ + end = fm10k_fifo_tail_offset(fifo, mbx->pushed); + tail += end; + + /* Copy message into Rx FIFO */ + for (end = fifo->size - end; len; tail = fifo->buffer) { + do { + /* adjust head to match offset for FIFO */ + head &= mbx->mbmem_len - 1; + if (!head) + head++; + + mbx->rx_mbmem_pushed++; + + /* read message from hardware FIFO */ + *(tail++) = FM10K_READ_MBX(hw, mbmem + head++); + } while (--len && --end); + } + + /* memory barrier to guarantee FIFO is written before tail update */ + FM10K_WMB(); +} + +/** + * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will first validate the tail index and size for the + * incoming message. It then updates the acknowledgment number and + * copies the data into the FIFO. It will return the number of messages + * dequeued on success and a negative value on error. + **/ +STATIC s32 fm10k_mbx_push_tail(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); + + DEBUGFUNC("fm10k_mbx_push_tail"); + + /* determine length of data to push */ + len = fm10k_fifo_unused(fifo) - mbx->pushed; + if (len > seq) + len = seq; + + /* update head and record bytes received */ + mbx->head = fm10k_mbx_head_add(mbx, len); + mbx->head_len = len; + + /* nothing to do if there is no data */ + if (!len) + return FM10K_SUCCESS; + + /* Copy msg into Rx FIFO */ + fm10k_mbx_read_copy(hw, mbx); + + /* determine if there are any invalid lengths in message */ + if (fm10k_mbx_validate_msg_size(mbx, len)) + return FM10K_MBX_ERR_SIZE; + + /* Update pushed */ + mbx->pushed += len; + + /* flush any completed messages */ + for (len = fm10k_mbx_pushed_tail_len(mbx); + len && (mbx->pushed >= len); + len = fm10k_mbx_pushed_tail_len(mbx)) { + fifo->tail += len; + mbx->pushed -= len; + mbx->rx_messages++; + mbx->rx_dwords += len; + } + + return FM10K_SUCCESS; +} + +/* pre-generated data for generating the CRC based on the poly 0xAC9A. */ +static const u16 fm10k_crc_16b_table[256] = { + 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797, + 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678, + 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449, + 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6, + 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B, + 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4, + 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5, + 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A, + 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA, + 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035, + 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204, + 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB, + 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666, + 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789, + 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8, + 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457, + 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D, + 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2, + 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3, + 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C, + 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1, + 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E, + 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F, + 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80, + 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40, + 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF, + 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E, + 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71, + 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC, + 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13, + 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922, + 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD }; + +/** + * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data + * @data: pointer to data to process + * @seed: seed value for CRC + * @len: length measured in 16 bits words + * + * This function will generate a CRC based on the polynomial 0xAC9A and + * whatever value is stored in the seed variable. Note that this + * value inverts the local seed and the result in order to capture all + * leading and trailing zeros. + */ +STATIC u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len) +{ + u32 result = seed; + + while (len--) { + result ^= *(data++); + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + + if (!(len--)) + break; + + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + } + + return (u16)result; +} + +/** + * fm10k_fifo_crc - generate a CRC based off of FIFO data + * @fifo: pointer to FIFO + * @offset: offset point for start of FIFO + * @len: number of DWORDS words to process + * @seed: seed value for CRC + * + * This function generates a CRC for some region of the FIFO + **/ +STATIC u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset, + u16 len, u16 seed) +{ + u32 *data = fifo->buffer + offset; + + /* track when we should cross the end of the FIFO */ + offset = fifo->size - offset; + + /* if we are in 2 blocks process the end of the FIFO first */ + if (offset < len) { + seed = fm10k_crc_16b(data, seed, offset * 2); + data = fifo->buffer; + len -= offset; + } + + /* process any remaining bits */ + return fm10k_crc_16b(data, seed, len * 2); +} + +/** + * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data + * @mbx: pointer to mailbox + * @head: head index provided by remote mailbox + * + * This function will generate the CRC for all data from the end of the + * last head update to the current one. It uses the result of the + * previous CRC as the seed for this update. The result is stored in + * mbx->local. + **/ +STATIC void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) +{ + u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); + + /* determine the offset for the start of the region to be pulled */ + head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); + + /* update local CRC to include all of the pulled data */ + mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); +} + +/** + * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data + * @mbx: pointer to mailbox + * + * This function will take all data that has been provided from the remote + * end and generate a CRC for it. This is stored in mbx->remote. The + * CRC for the header is then computed and if the result is non-zero this + * is an error and we signal an error dropping all data and resetting the + * connection. + */ +STATIC s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len = mbx->head_len; + u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len; + u16 crc; + + /* update the remote CRC if new data has been received */ + if (len) + mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote); + + /* process the full header as we have to validate the CRC */ + crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1); + + /* notify other end if we have a problem */ + return crc ? FM10K_MBX_ERR_CRC : FM10K_SUCCESS; +} + +/** + * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO + * @mbx: pointer to mailbox + * + * This function returns true if there is a message in the Rx FIFO to dequeue. + **/ +STATIC bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx) +{ + u16 msg_size = fm10k_fifo_head_len(&mbx->rx); + + return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size); +} + +/** + * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx + * @mbx: pointer to mailbox + * @len: verify free space is >= this value + * + * This function returns true if the mailbox is in a state ready to transmit. + **/ +STATIC bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len) +{ + u16 fifo_unused = fm10k_fifo_unused(&mbx->tx); + + return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len); +} + +/** + * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied + * @mbx: pointer to mailbox + * + * This function returns true if the Tx FIFO is empty. + **/ +STATIC bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) +{ + return fm10k_fifo_empty(&mbx->tx); +} + +/** + * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function dequeues messages and hands them off to the TLV parser. + * It will return the number of messages processed when called. + **/ +STATIC u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + s32 err; + u16 cnt; + + /* parse Rx messages out of the Rx FIFO to empty it */ + for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) { + err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, + mbx, mbx->msg_data); + if (err < 0) + mbx->rx_parse_err++; + + fm10k_fifo_head_drop(fifo); + } + + /* shift remaining bytes back to start of FIFO */ + memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2); + + /* shift head and tail based on the memory we moved */ + fifo->tail -= fifo->head; + fifo->head = 0; + + return cnt; +} + +/** + * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +STATIC s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, const u32 *msg) +{ + u32 countdown = mbx->timeout; + s32 err; + + switch (mbx->state) { + case FM10K_STATE_CLOSED: + case FM10K_STATE_DISCONNECT: + return FM10K_MBX_ERR_NO_MBX; + default: + break; + } + + /* enqueue the message on the Tx FIFO */ + err = fm10k_fifo_enqueue(&mbx->tx, msg); + + /* if it failed give the FIFO a chance to drain */ + while (err && countdown) { + countdown--; + usec_delay(mbx->usec_delay); + mbx->ops.process(hw, mbx); + err = fm10k_fifo_enqueue(&mbx->tx, msg); + } + + /* if we failed treat the error */ + if (err) { + mbx->timeout = 0; + mbx->tx_busy++; + } + + /* begin processing message, ignore errors as this is just meant + * to start the mailbox flow so we are not concerned if there + * is a bad error, or the mailbox is already busy with a request + */ + if (!mbx->tail_len) + mbx->ops.process(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_read - Copies the mbmem to local message buffer + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the mbmem to the message array + **/ +STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_mbx_read"); + + /* only allow one reader in here at a time */ + if (mbx->mbx_hdr) + return FM10K_MBX_ERR_BUSY; + + /* read to capture initial interrupt bits */ + if (FM10K_READ_MBX(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT) + mbx->mbx_lock = FM10K_MBX_ACK; + + /* write back interrupt bits to clear */ + FM10K_WRITE_MBX(hw, mbx->mbx_reg, + FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT); + + /* read remote header */ + mbx->mbx_hdr = FM10K_READ_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_write - Copies the local message buffer to mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the message array to mbmem + **/ +STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + u32 mbmem = mbx->mbmem_reg; + + DEBUGFUNC("fm10k_mbx_write"); + + /* write new msg header to notify recipient of change */ + FM10K_WRITE_MBX(hw, mbmem, mbx->mbx_hdr); + + /* write mailbox to send interrupt */ + if (mbx->mbx_lock) + FM10K_WRITE_MBX(hw, mbx->mbx_reg, mbx->mbx_lock); + + /* we no longer are using the header so free it */ + mbx->mbx_hdr = 0; + mbx->mbx_lock = 0; +} + +/** + * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +STATIC void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx) +{ + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | + FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE); +} + +/** + * fm10k_mbx_create_data_hdr - Generate a data mailbox header + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +STATIC void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u16 crc; + + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + /* generate CRC for data in flight and header */ + crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled), + mbx->tail_len, mbx->local); + crc = fm10k_crc_16b(&hdr, crc, 1); + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a disconnect mailbox header + **/ +STATIC void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr + * @mbx: pointer to mailbox + * + * This function creates a fake disconnect header for loading into remote + * mailbox header. The primary purpose is to prevent errors on immediate + * start up after mbx->connect. + **/ +STATIC void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_error_msg - Generate an error message + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may shift the message by 1 DWORD and then place an error header + * at the start of the message. + **/ +STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_TYPE: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + case FM10K_MBX_ERR_CRC: + break; + default: + return; + } + + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) | + FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); +} + +/** + * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized type, invalid route, or a malformed message. + **/ +STATIC s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) +{ + u16 type, rsvd0, head, tail, size; + const u32 *hdr = &mbx->mbx_hdr; + + DEBUGFUNC("fm10k_mbx_validate_msg_hdr"); + + type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE); + rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + + if (rsvd0) + return FM10K_MBX_ERR_RSVD0; + + switch (type) { + case FM10K_MSG_DISCONNECT: + /* validate that all data has been received */ + if (tail != mbx->head) + return FM10K_MBX_ERR_TAIL; + + /* fall through */ + case FM10K_MSG_DATA: + /* validate that head is moving correctly */ + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + + /* validate that tail is moving correctly */ + if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL))) + return FM10K_MBX_ERR_TAIL; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + + return FM10K_MBX_ERR_TAIL; + case FM10K_MSG_CONNECT: + /* validate size is in range and is power of 2 mask */ + if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1))) + return FM10K_MBX_ERR_SIZE; + + /* fall through */ + case FM10K_MSG_ERROR: + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + /* neither create nor error include a tail offset */ + if (tail) + return FM10K_MBX_ERR_TAIL; + + break; + default: + return FM10K_MBX_ERR_TYPE; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote FIFO head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +STATIC s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* update our checksum for the outgoing data */ + fm10k_mbx_update_local_crc(mbx, head); + + /* as long as other end recognizes us keep sending data */ + fm10k_mbx_pull_head(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) + fm10k_mbx_create_data_hdr(mbx); + else + fm10k_mbx_create_disconnect_hdr(mbx); + break; + case FM10K_STATE_CONNECT: + /* send disconnect even if we aren't connected */ + fm10k_mbx_create_connect_hdr(mbx); + break; + case FM10K_STATE_CLOSED: + /* generate new header based on data */ + fm10k_mbx_create_disconnect_hdr(mbx); + default: + break; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_reset_work- Reset internal pointers for any pending work + * @mbx: pointer to mailbox + * + * This function will reset all internal pointers so any work in progress + * is dropped. This call should occur every time we transition from the + * open state to the connect state. + **/ +STATIC void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) +{ + u16 len, head, ack; + + /* reset our outgoing max size back to Rx limits */ + mbx->max_size = mbx->rx.size - 1; + + /* update mbx->pulled to account for tail_len and ack */ + head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); + ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + mbx->pulled += mbx->tail_len - ack; + + /* now drop any messages which have started or finished transmitting */ + while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) { + len = fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + if (mbx->pulled >= len) + mbx->pulled -= len; + else + mbx->pulled = 0; + } + + /* just do a quick resysnc to start of message */ + mbx->pushed = 0; + mbx->pulled = 0; + mbx->tail_len = 0; + mbx->head_len = 0; + mbx->rx.tail = 0; + mbx->rx.head = 0; +} + +/** + * fm10k_mbx_update_max_size - Update the max_size and drop any large messages + * @mbx: pointer to mailbox + * @size: new value for max_size + * + * This function updates the max_size value and drops any outgoing messages + * at the head of the Tx FIFO if they are larger than max_size. It does not + * drop all messages, as this is too difficult to parse and remove them from + * the FIFO. Instead, rely on the checking to ensure that messages larger + * than max_size aren't pushed into the memory buffer. + **/ +STATIC void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) +{ + u16 len; + + DEBUGFUNC("fm10k_mbx_update_max_size"); + + mbx->max_size = size; + + /* flush any oversized messages from the queue */ + for (len = fm10k_fifo_head_len(&mbx->tx); + len > size; + len = fm10k_fifo_head_len(&mbx->tx)) { + fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + } +} + +/** + * fm10k_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to either a disconnected state + * or a connect state depending on the current mailbox state + **/ +STATIC void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* just do a quick resysnc to start of frame */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* we cannot exit connect until the size is good */ + if (mbx->state == FM10K_STATE_OPEN) + mbx->state = FM10K_STATE_CONNECT; + else + mbx->state = FM10K_STATE_CLOSED; +} + +/** + * fm10k_mbx_process_connect - Process connect header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming connect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 size, head; + + /* we will need to pull all of the fields for verification */ + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* we cannot exit connect until the size is good */ + if (size > mbx->rx.size) { + mbx->max_size = mbx->rx.size - 1; + } else { + /* record the remote system requesting connection */ + mbx->state = FM10K_STATE_OPEN; + + fm10k_mbx_update_max_size(mbx, size); + } + break; + default: + break; + } + + /* align our tail index to remote head index */ + mbx->tail = head; + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_data - Process data header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming data header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_data(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 err; + + DEBUGFUNC("fm10k_mbx_process_data"); + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + + /* if we are in connect just update our data and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + mbx->tail = head; + mbx->state = FM10K_STATE_OPEN; + } + + /* abort on message size errors */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* verify the checksum on the incoming data */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_disconnect - Process disconnect header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming disconnect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 head; + s32 err; + + /* we will need to pull the header field for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + /* We should not be receiving disconnect if Rx is incomplete */ + if (mbx->pushed) + return FM10K_MBX_ERR_TAIL; + + /* we have already verified mbx->head == tail so we know this is 0 */ + mbx->head_len = 0; + + /* verify the checksum on the incoming header is correct */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* state doesn't change if we still have work to do */ + if (!fm10k_mbx_tx_complete(mbx)) + break; + + /* verify the head indicates we completed all transmits */ + if (head != mbx->tail) + return FM10K_MBX_ERR_HEAD; + + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_error - Process error header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming error header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* reset tail index and size to prepare for reconnect */ + mbx->tail = head; + + /* if open then reset max_size and go back to connect */ + if (mbx->state == FM10K_STATE_OPEN) { + mbx->state = FM10K_STATE_CONNECT; + break; + } + + /* send a connect message to get data flowing again */ + fm10k_mbx_create_connect_hdr(mbx); + return FM10K_SUCCESS; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_mbx_process - Process mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +STATIC s32 fm10k_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + DEBUGFUNC("fm10k_mbx_process"); + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return FM10K_SUCCESS; + + /* copy data from mailbox */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + /* validate type, source, and destination */ + err = fm10k_mbx_validate_msg_hdr(mbx); + if (err < 0) + goto msg_err; + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) { + case FM10K_MSG_CONNECT: + err = fm10k_mbx_process_connect(hw, mbx); + break; + case FM10K_MSG_DATA: + err = fm10k_mbx_process_data(hw, mbx); + break; + case FM10K_MSG_DISCONNECT: + err = fm10k_mbx_process_disconnect(hw, mbx); + break; + case FM10K_MSG_ERROR: + err = fm10k_mbx_process_error(hw, mbx); + break; + default: + err = FM10K_MBX_ERR_TYPE; + break; + } + +msg_err: + /* notify partner of errors on our end */ + if (err < 0) + fm10k_mbx_create_error_msg(mbx, err); + + /* copy data from mailbox */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + DEBUGFUNC("fm10k_mbx_disconnect"); + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + FM10K_WRITE_MBX(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + usec_delay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close, just force the mailbox into shutdown and + * drop all left over messages in the FIFO. + */ + fm10k_mbx_connect_reset(mbx); + fm10k_fifo_drop_all(&mbx->tx); + + FM10K_WRITE_MBX(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_connect - Start mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection. It will populate the + * mailbox with a broadcast connect message and then initialize the lock. + * This is safe since the connect message is a single DWORD so the mailbox + * transaction is guaranteed to be atomic. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_mbx_connect"); + + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + + fm10k_mbx_reset_work(mbx); + + /* initialize header of remote mailbox */ + fm10k_mbx_create_fake_disconnect_hdr(mbx); + FM10K_WRITE_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_mbx_create_connect_hdr(mbx); + fm10k_mbx_write(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_validate_handlers - Validate layout of message parsing data + * @msg_data: handlers for mailbox events + * + * This function validates the layout of the message parsing data. This + * should be mostly static, but it is important to catch any errors that + * are made when constructing the parsers. + **/ +STATIC s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data) +{ + const struct fm10k_tlv_attr *attr; + unsigned int id; + + DEBUGFUNC("fm10k_mbx_validate_handlers"); + + /* Allow NULL mailboxes that transmit but don't receive */ + if (!msg_data) + return FM10K_SUCCESS; + + while (msg_data->id != FM10K_TLV_ERROR) { + /* all messages should have a function handler */ + if (!msg_data->func) + return FM10K_ERR_PARAM; + + /* parser is optional */ + attr = msg_data->attr; + if (attr) { + while (attr->id != FM10K_TLV_ERROR) { + id = attr->id; + attr++; + /* ID should always be increasing */ + if (id >= attr->id) + return FM10K_ERR_PARAM; + /* ID should fit in results array */ + if (id >= FM10K_TLV_RESULTS_MAX) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if (attr->id != FM10K_TLV_ERROR) + return FM10K_ERR_PARAM; + } + + id = msg_data->id; + msg_data++; + /* ID should always be increasing */ + if (id >= msg_data->id) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func) + return FM10K_ERR_PARAM; + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function associates a set of message handling ops with a mailbox. + **/ +STATIC s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + DEBUGFUNC("fm10k_mbx_register_handlers"); + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + return FM10K_SUCCESS; +} + +/** + * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes + * + * This function initializes the mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data, u8 id) +{ + DEBUGFUNC("fm10k_pfvf_mbx_init"); + + /* initialize registers */ + switch (hw->mac.type) { + case fm10k_mac_vf: + mbx->mbx_reg = FM10K_VFMBX; + mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR); + break; + case fm10k_mac_pf: + /* there are only 64 VF <-> PF mailboxes */ + if (id < 64) { + mbx->mbx_reg = FM10K_MBX(id); + mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); + break; + } + /* fallthough */ + default: + return FM10K_MBX_ERR_NO_MBX; + } + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->usec_delay = FM10K_MBX_INIT_DELAY; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* initialize CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_mbx_connect; + mbx->ops.disconnect = fm10k_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +STATIC void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); +} + +/** + * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * @err: error flags to report if any + * + * This function returns a connection mailbox header + **/ +STATIC void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err) +{ + if (mbx->local) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | + FM10K_MSG_HDR_FIELD_SET(err, SM_ERR); +} + +/** + * fm10k_sm_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to a just connected state + **/ +STATIC void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* set local version to max and remote version to 0 */ + mbx->local = FM10K_SM_MBX_VERSION; + mbx->remote = 0; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* reset state back to connect */ + mbx->state = FM10K_STATE_CONNECT; +} + +/** + * fm10k_sm_mbx_connect - Start switch manager mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection with the switch + * manager. To do this it will first disconnect the mailbox, and then + * reconnect it in order to complete a reset of the mailbox. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_sm_mbx_connect"); + + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + + /* reset interface back to connect */ + fm10k_sm_mbx_connect_reset(mbx); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + fm10k_mbx_write(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + DEBUGFUNC("fm10k_sm_mbx_disconnect"); + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + FM10K_WRITE_REG(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + usec_delay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close just force the mailbox into shutdown */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + fm10k_mbx_reset_work(mbx); + fm10k_fifo_drop_all(&mbx->tx); + + FM10K_WRITE_REG(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized offsets or version numbers. + **/ +STATIC s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 tail, head, ver; + + DEBUGFUNC("fm10k_sm_mbx_validate_fifo_hdr"); + + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + switch (ver) { + case 0: + break; + case FM10K_SM_MBX_VERSION: + if (!head || head > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_HEAD; + if (!tail || tail > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_TAIL; + if (mbx->tail < head) + head += mbx->mbmem_len - 1; + if (tail < mbx->head) + tail += mbx->mbmem_len - 1; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + return FM10K_MBX_ERR_TAIL; + default: + return FM10K_MBX_ERR_SRC; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_process_error - Process header with error flag set + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the error flag + * is set. As a result we will terminate a connection if one is present + * and fall back into the reset state with a connection header of version + * 0 (RESET). + **/ +STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* if there is an error just disconnect */ + mbx->remote = 0; + break; + case FM10K_STATE_OPEN: + /* flush any uncompleted work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* try connnecting at lower version */ + if (mbx->remote) { + while (mbx->local > 1) + mbx->local--; + mbx->remote = 0; + } + break; + default: + break; + } + + fm10k_sm_mbx_create_connect_hdr(mbx, 0); +} + +/** + * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may set the error bit in the local message header + **/ +STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_SRC: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + break; + default: + return; + } + + /* process it as though we received an error, and send error reply */ + fm10k_sm_mbx_process_error(mbx); + fm10k_sm_mbx_create_connect_hdr(mbx, 1); +} + +/** + * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will dequeue one message from the Rx switch manager mailbox + * FIFO and place it in the Rx mailbox FIFO for processing by software. + **/ +STATIC s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + s32 err; + + DEBUGFUNC("fm10k_sm_mbx_receive"); + + /* push tail in front of head */ + if (tail < mbx->head) + tail += mbmem_len; + + /* copy data to the Rx FIFO */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + /* guarantee head aligns with the end of the last message */ + mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); + mbx->pushed = 0; + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->head > mbmem_len) + mbx->head -= mbmem_len; + + return err; +} + +/** + * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: head index of message + * + * This function will dequeue one message from the Tx mailbox FIFO and place + * it in the Tx switch manager mailbox FIFO for processing by hardware. + **/ +STATIC void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + u16 tail_len, len = 0; + u32 *msg; + + DEBUGFUNC("fm10k_sm_mbx_transmit"); + + /* push head behind tail */ + if (mbx->tail < head) + head += mbmem_len; + + fm10k_mbx_pull_head(hw, mbx, head); + + /* determine msg aligned offset for end of buffer */ + do { + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); + tail_len = len; + len += FM10K_TLV_DWORD_LEN(*msg); + } while ((len <= mbx->tail_len) && (len < mbmem_len)); + + /* guarantee we stop on a message boundary */ + if (mbx->tail_len > tail_len) { + mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len); + mbx->tail_len = tail_len; + } + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->tail > mbmem_len) + mbx->tail -= mbmem_len; +} + +/** + * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote FIFO head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush out Tx data */ + fm10k_sm_mbx_transmit(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) { + fm10k_sm_mbx_create_data_hdr(mbx); + } else { + mbx->remote = 0; + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + } + break; + case FM10K_STATE_CONNECT: + case FM10K_STATE_CLOSED: + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + break; + default: + break; + } +} + +/** + * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET) + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the version data + * is set to 0. As such we will either terminate the connection or go + * into the connect state in order to re-establish the connection. This + * function can also be used to respond to an error as the connection + * resetting would also be a means of dealing with errors. + **/ +STATIC s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err = FM10K_SUCCESS; + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* drop remote connections and disconnect */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + mbx->local = 0; + break; + case FM10K_STATE_OPEN: + /* flush any incomplete work */ + fm10k_sm_mbx_connect_reset(mbx); + err = FM10K_ERR_RESET_REQUESTED; + break; + case FM10K_STATE_CONNECT: + /* Update remote value to match local value */ + mbx->remote = mbx->local; + default: + break; + } + + fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); + + return err; +} + +/** + * fm10k_sm_mbx_process_version_1 - Process header with version == 1 + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to process messages received when the remote + * mailbox is active. + **/ +STATIC s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 len; + + /* pull all fields needed for verification */ + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + /* if we are in connect and wanting version 1 then start up and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + if (!mbx->remote) + goto send_reply; + if (mbx->remote != 1) + return FM10K_MBX_ERR_SRC; + + mbx->state = FM10K_STATE_OPEN; + } + + do { + /* abort on message size errors */ + len = fm10k_sm_mbx_receive(hw, mbx, tail); + if (len < 0) + return len; + + /* continue until we have flushed the Rx FIFO */ + } while (len); + +send_reply: + fm10k_sm_mbx_create_reply(hw, mbx, head); + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_process - Process switch manager mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + DEBUGFUNC("fm10k_sm_mbx_process"); + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return FM10K_SUCCESS; + + /* retrieve data from switch manager */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + err = fm10k_sm_mbx_validate_fifo_hdr(mbx); + if (err < 0) + goto fifo_err; + + if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) { + fm10k_sm_mbx_process_error(mbx); + goto fifo_err; + } + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { + case 0: + err = fm10k_sm_mbx_process_reset(hw, mbx); + break; + case FM10K_SM_MBX_VERSION: + err = fm10k_sm_mbx_process_version_1(hw, mbx); + break; + } + +fifo_err: + if (err < 0) + fm10k_sm_mbx_create_error_msg(mbx, err); + + /* report data to switch manager */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function initializes the PF/SM mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + DEBUGFUNC("fm10k_sm_mbx_init"); + UNREFERENCED_1PARAMETER(hw); + + mbx->mbx_reg = FM10K_GMBX; + mbx->mbmem_reg = FM10K_MBMEM_PF(0); + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->usec_delay = FM10K_MBX_INIT_DELAY; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_MBMEM_PF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_sm_mbx_connect; + mbx->ops.disconnect = fm10k_sm_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_sm_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return FM10K_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h new file mode 100644 index 000000000..a4a8e5adb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_mbx.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_MBX_H_ +#define _FM10K_MBX_H_ + +/* forward declaration */ +struct fm10k_mbx_info; + +#include "fm10k_type.h" +#include "fm10k_tlv.h" + +/* PF Mailbox Registers */ +#define FM10K_MBMEM(_n) ((_n) + 0x18000) +#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000) +#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400) +#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600) +/* XOR provides means of switching from Tx to Rx FIFO */ +#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0)) +#define FM10K_MBX(_n) ((_n) + 0x18800) +#define FM10K_MBX_REQ 0x00000002 +#define FM10K_MBX_ACK 0x00000004 +#define FM10K_MBX_REQ_INTERRUPT 0x00000008 +#define FM10K_MBX_ACK_INTERRUPT 0x00000010 +#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 +#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200 +#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400 +#define FM10K_MBICR(_n) ((_n) + 0x18840) +#define FM10K_GMBX 0x18842 + +/* VF Mailbox Registers */ +#define FM10K_VFMBX 0x00010 +#define FM10K_VFMBMEM(_n) ((_n) + 0x00020) +#define FM10K_VFMBMEM_LEN 16 +#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2) + +/* Delays/timeouts */ +#define FM10K_MBX_DISCONNECT_TIMEOUT 500 +#define FM10K_MBX_POLL_DELAY 19 +#define FM10K_MBX_INT_DELAY 20 + +#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value) + +/* PF/VF Mailbox state machine + * + * +----------+ connect() +----------+ + * | CLOSED | --------------> | CONNECT | + * +----------+ +----------+ + * ^ ^ | + * | rcv: rcv: | | rcv: + * | Connect Disconnect | | Connect + * | Disconnect Error | | Data + * | | | + * | | V + * +----------+ disconnect() +----------+ + * |DISCONNECT| <-------------- | OPEN | + * +----------+ +----------+ + * + * The diagram above describes the PF/VF mailbox state machine. There + * are four main states to this machine. + * Closed: This state represents a mailbox that is in a standby state + * with interrupts disabled. In this state the mailbox should not + * read the mailbox or write any data. The only means of exiting + * this state is for the system to make the connect() call for the + * mailbox, it will then transition to the connect state. + * Connect: In this state the mailbox is seeking a connection. It will + * post a connect message with no specified destination and will + * wait for a reply from the other side of the mailbox. This state + * is exited when either a connect with the local mailbox as the + * destination is received or when a data message is received with + * a valid sequence number. + * Open: In this state the mailbox is able to transfer data between the local + * entity and the remote. It will fall back to connect in the event of + * receiving either an error message, or a disconnect message. It will + * transition to disconnect on a call to disconnect(); + * Disconnect: In this state the mailbox is attempting to gracefully terminate + * the connection. It will do so at the first point where it knows + * that the remote endpoint is either done sending, or when the + * remote endpoint has fallen back into connect. + */ +enum fm10k_mbx_state { + FM10K_STATE_CLOSED, + FM10K_STATE_CONNECT, + FM10K_STATE_OPEN, + FM10K_STATE_DISCONNECT, +}; + +/* PF/VF Mailbox header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The layout above describes the format for the header used in the PF/VF + * mailbox. The header is broken out into the following fields: + * Type: There are 4 supported message types + * 0x8: Data header - used to transport message data + * 0xC: Connect header - used to establish connection + * 0xD: Disconnect header - used to tear down a connection + * 0xE: Error header - used to address message exceptions + * Tail: Tail index for local FIFO + * Tail index actually consists of two parts. The MSB of + * the head is a loop tracker, it is 0 on an even numbered + * loop through the FIFO, and 1 on the odd numbered loops. + * To get the actual mailbox offset based on the tail it + * is necessary to add bit 3 to bit 0 and clear bit 3. This + * gives us a valid range of 0x1 - 0xE. + * Head: Head index for remote FIFO + * Head index follows the same format as the tail index. + * Rsvd0: Reserved 0 portion of the mailbox header + * CRC: Running CRC for all data since connect plus current message header + * Size: Maximum message size - Applies only to connect headers + * The maximum message size is provided during connect to avoid + * jamming the mailbox with messages that do not fit. + * Err_no: Error number - Applies only to error headers + * The error number provides an indication of the type of error + * experienced. + */ + +/* macros for retrieving and setting header values */ +#define FM10K_MSG_HDR_MASK(name) \ + ((0x1u << FM10K_MSG_##name##_SIZE) - 1) +#define FM10K_MSG_HDR_FIELD_SET(value, name) \ + (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT) +#define FM10K_MSG_HDR_FIELD_GET(value, name) \ + ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name)) + +/* offsets shared between all headers */ +#define FM10K_MSG_TYPE_SHIFT 0 +#define FM10K_MSG_TYPE_SIZE 4 +#define FM10K_MSG_TAIL_SHIFT 4 +#define FM10K_MSG_TAIL_SIZE 4 +#define FM10K_MSG_HEAD_SHIFT 8 +#define FM10K_MSG_HEAD_SIZE 4 +#define FM10K_MSG_RSVD0_SHIFT 12 +#define FM10K_MSG_RSVD0_SIZE 4 + +/* offsets for data/disconnect headers */ +#define FM10K_MSG_CRC_SHIFT 16 +#define FM10K_MSG_CRC_SIZE 16 + +/* offsets for connect headers */ +#define FM10K_MSG_CONNECT_SIZE_SHIFT 16 +#define FM10K_MSG_CONNECT_SIZE_SIZE 16 + +/* offsets for error headers */ +#define FM10K_MSG_ERR_NO_SHIFT 16 +#define FM10K_MSG_ERR_NO_SIZE 16 + +enum fm10k_msg_type { + FM10K_MSG_DATA = 0x8, + FM10K_MSG_CONNECT = 0xC, + FM10K_MSG_DISCONNECT = 0xD, + FM10K_MSG_ERROR = 0xE, +}; + +/* HNI/SM Mailbox FIFO format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------+-----------------------+-------+-----------------------+ + * | Error | Remote Head |Version| Local Tail | + * +-------+-----------------------+-------+-----------------------+ + * | | + * . Local FIFO Data . + * . . + * +-------+-----------------------+-------+-----------------------+ + * + * The layout above describes the format for the FIFOs used by the host + * network interface and the switch manager to communicate messages back + * and forth. Both the HNI and the switch maintain one such FIFO. The + * layout in memory has the switch manager FIFO followed immediately by + * the HNI FIFO. For this reason I am using just the pointer to the + * HNI FIFO in the mailbox ops as the offset between the two is fixed. + * + * The header for the FIFO is broken out into the following fields: + * Local Tail: Offset into FIFO region for next DWORD to write. + * Version: Version info for mailbox, only values of 0/1 are supported. + * Remote Head: Offset into remote FIFO to indicate how much we have read. + * Error: Error indication, values TBD. + */ + +/* version number for switch manager mailboxes */ +#define FM10K_SM_MBX_VERSION 1 +#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1) + +/* offsets shared between all SM FIFO headers */ +#define FM10K_MSG_SM_TAIL_SHIFT 0 +#define FM10K_MSG_SM_TAIL_SIZE 12 +#define FM10K_MSG_SM_VER_SHIFT 12 +#define FM10K_MSG_SM_VER_SIZE 4 +#define FM10K_MSG_SM_HEAD_SHIFT 16 +#define FM10K_MSG_SM_HEAD_SIZE 12 +#define FM10K_MSG_SM_ERR_SHIFT 28 +#define FM10K_MSG_SM_ERR_SIZE 4 + +/* All error messages returned by mailbox functions + * The value -511 is 0xFE01 in hex. The idea is to order the errors + * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox + * messages. This also helps to avoid error number collisions as Linux + * doesn't appear to use error numbers 256 - 511. + */ +#define FM10K_MBX_ERR(_n) ((_n) - 512) +#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01) +#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03) +#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05) +#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06) +#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08) +#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09) +#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B) +#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C) +#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E) +#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F) + +#define FM10K_MBX_CRC_SEED 0xFFFF + +struct fm10k_mbx_ops { + s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *); + void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *); + bool (*rx_ready)(struct fm10k_mbx_info *); + bool (*tx_ready)(struct fm10k_mbx_info *, u16); + bool (*tx_complete)(struct fm10k_mbx_info *); + s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *, + const u32 *); + s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *); + s32 (*register_handlers)(struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +}; + +struct fm10k_mbx_fifo { + u32 *buffer; + u16 head; + u16 tail; + u16 size; +}; + +/* size of buffer to be stored in mailbox for FIFOs */ +#define FM10K_MBX_TX_BUFFER_SIZE 512 +#define FM10K_MBX_RX_BUFFER_SIZE 128 +#define FM10K_MBX_BUFFER_SIZE \ + (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE) + +/* minimum and maximum message size in dwords */ +#define FM10K_MBX_MSG_MAX_SIZE \ + ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1)) +#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1) + +#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */ + +struct fm10k_mbx_info { + /* function pointers for mailbox operations */ + struct fm10k_mbx_ops ops; + const struct fm10k_msg_data *msg_data; + + /* message FIFOs */ + struct fm10k_mbx_fifo rx; + struct fm10k_mbx_fifo tx; + + /* delay for handling timeouts */ + u32 timeout; + u32 usec_delay; + + /* mailbox state info */ + u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr; + u16 max_size, mbmem_len; + u16 tail, tail_len, pulled; + u16 head, head_len, pushed; + u16 local, remote; + enum fm10k_mbx_state state; + + /* result of last mailbox test */ + s32 test_result; + + /* statistics */ + u64 tx_busy; + u64 tx_dropped; + u64 tx_messages; + u64 tx_dwords; + u64 tx_mbmem_pulled; + u64 rx_messages; + u64 rx_dwords; + u64 rx_mbmem_pushed; + u64 rx_parse_err; + + /* Buffer to store messages */ + u32 buffer[FM10K_MBX_BUFFER_SIZE]; +}; + +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *, u8); +s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); + +#endif /* _FM10K_MBX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h new file mode 100644 index 000000000..019fba5e2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_osdep.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_OSDEP_H_ +#define _FM10K_OSDEP_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "../fm10k_logs.h" + +/* TODO: this does not look like it should be used... */ +#define ERROR_REPORT2(v1, v2, v3) do { } while (0) + +#ifndef BOULDER_RAPIDS_HW +#define BOULDER_RAPIDS_HW +#endif + +#define STATIC static +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef int64_t s64; +typedef uint64_t u64; + +#ifndef __le16 +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 +#endif + +/* offsets are WORD offsets, not BYTE offsets */ +#define FM10K_WRITE_REG(hw, reg, val) \ + rte_write32((val), ((hw)->hw_addr + (reg))) + +#define FM10K_READ_REG(hw, reg) rte_read32(((hw)->hw_addr + (reg))) + +#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL) + +#define FM10K_PCI_REG(reg) rte_read32(reg) + +#define FM10K_PCI_REG_WRITE(reg, value) rte_write32((value), (reg)) + +/* not implemented */ +#define FM10K_READ_PCI_WORD(hw, reg) 0 + +#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value) +#define FM10K_READ_MBX(hw, reg) FM10K_READ_REG(hw, reg) + +#define FM10K_LE16_TO_CPU rte_le_to_cpu_16 +#define FM10K_LE32_TO_CPU rte_le_to_cpu_32 +#define FM10K_CPU_TO_LE32 rte_cpu_to_le_32 +#define FM10K_CPU_TO_LE16 rte_cpu_to_le_16 +#define le16_to_cpu rte_le_to_cpu_16 + +#define FM10K_RMB rte_rmb +#define FM10K_WMB rte_wmb + +#define usec_delay rte_delay_us + +#define FM10K_REMOVED(hw_addr) (!(hw_addr)) + +#ifndef FM10K_IS_ZERO_ETHER_ADDR +/* make certain address is not 0 */ +#define FM10K_IS_ZERO_ETHER_ADDR(addr) \ +(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5])) +#endif + +#ifndef FM10K_IS_MULTICAST_ETHER_ADDR +#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1) +#endif + +#ifndef FM10K_IS_VALID_ETHER_ADDR +/* make certain address is not multicast or 0 */ +#define FM10K_IS_VALID_ETHER_ADDR(addr) \ +(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr)) +#endif + +#ifndef do_div +#define do_div(n, base) ({\ + (n) = (n) / (base);\ +}) +#endif /* do_div */ + +/* DPDK can't access IOMEM directly */ +#ifndef FM10K_WRITE_SW_REG +#define FM10K_WRITE_SW_REG(v1, v2, v3) do { } while (0) +#endif + +#ifndef fm10k_read_reg +#define fm10k_read_reg FM10K_READ_REG +#endif + +#define FM10K_INTEL_VENDOR_ID 0x8086 +#define FM10K_DMA_CTRL_MINMSS_SHIFT 9 +#define FM10K_EICR_PCA_FAULT 0x00000001 +#define FM10K_EICR_THI_FAULT 0x00000004 +#define FM10K_EICR_FUM_FAULT 0x00000020 +#define FM10K_EICR_SRAMERROR 0x00000400 +#define FM10K_SRAM_IP 0x13003 +#define FM10K_RXINT_TIMER_SHIFT 8 +#define FM10K_TXINT_TIMER_SHIFT 8 +#define FM10K_RXD_PKTTYPE_MASK 0x03F0 +#define FM10K_RXD_PKTTYPE_SHIFT 4 + +#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */ +#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */ + +#define FM10K_TSO_MINMSS \ + (FM10K_DMA_CTRL_MINMSS_64 >> FM10K_DMA_CTRL_MINMSS_SHIFT) +#define FM10K_TSO_MIN_HEADERLEN 54 +#define FM10K_TSO_MAX_HEADERLEN 192 + +#endif /* _FM10K_OSDEP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c new file mode 100644 index 000000000..439dd224d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c @@ -0,0 +1,2099 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +/** + * fm10k_reset_hw_pf - PF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) +{ + s32 err; + u32 reg; + u16 i; + + DEBUGFUNC("fm10k_reset_hw_pf"); + + /* Disable interrupts */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL)); + + /* Lock ITR2 reg 0 into itself and disable interrupt moderation */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0); + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0); + + /* We assume here Tx and Rx queue 0 are owned by the PF */ + + /* Shut off VF access to their queues forcing them to queue 0 */ + for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0); + } + + /* shut down all rings */ + err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); + if (err == FM10K_ERR_REQUESTS_PENDING) { + hw->mac.reset_while_pending++; + goto force_reset; + } else if (err) { + return err; + } + + /* Verify that DMA is no longer active */ + reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL); + if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) + return FM10K_ERR_DMA_PENDING; + +force_reset: + /* Inititate data path reset */ + reg = FM10K_DMA_CTRL_DATAPATH_RESET; + FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg); + + /* Flush write and allow 100us for reset to complete */ + FM10K_WRITE_FLUSH(hw); + usec_delay(FM10K_RESET_TIMEOUT); + + /* Verify we made it out of reset */ + reg = FM10K_READ_REG(hw, FM10K_IP); + if (!(reg & FM10K_IP_NOTINRESET)) + return FM10K_ERR_RESET_FAILED; + + return FM10K_SUCCESS; +} + +/** + * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support + * @hw: pointer to hardware structure + * + * Looks at the ARI hierarchy bit to determine whether ARI is supported or not. + **/ +STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw) +{ + u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL); + + DEBUGFUNC("fm10k_is_ari_hierarchy_pf"); + + return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI); +} + +/** + * fm10k_init_hw_pf - PF hardware initialization + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw) +{ + u32 dma_ctrl, txqctl; + u16 i; + + DEBUGFUNC("fm10k_init_hw_pf"); + + /* Establish default VSI as valid */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default), + FM10K_DGLORTMAP_ANY); + + /* Invalidate all other GLORT entries */ + for (i = 1; i < FM10K_DGLORT_COUNT; i++) + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); + + /* reset ITR2(0) to point to itself */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0); + + /* reset VF ITR2(0) to point to 0 avoid PF registers */ + FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0); + + /* loop through all PF ITR2 registers pointing them to the previous */ + for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++) + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1); + + /* Enable interrupt moderator if not already enabled */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); + + /* compute the default txqctl configuration */ + txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | + (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT); + + for (i = 0; i < FM10K_MAX_QUEUES; i++) { + /* configure rings for 256 Queue / 32 Descriptor cache mode */ + FM10K_WRITE_REG(hw, FM10K_TQDLOC(i), + (i * FM10K_TQDLOC_BASE_32_DESC) | + FM10K_TQDLOC_SIZE_32_DESC); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl); + + /* configure rings to provide TPH processing hints */ + FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i), + FM10K_TPH_TXCTRL_DESC_TPHEN | + FM10K_TPH_TXCTRL_DESC_RROEN | + FM10K_TPH_TXCTRL_DESC_WROEN | + FM10K_TPH_TXCTRL_DATA_RROEN); + FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i), + FM10K_TPH_RXCTRL_DESC_TPHEN | + FM10K_TPH_RXCTRL_DESC_RROEN | + FM10K_TPH_RXCTRL_DATA_WROEN | + FM10K_TPH_RXCTRL_HDR_WROEN); + } + + /* set max hold interval to align with 1.024 usec in all modes and + * store ITR scale + */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; + break; + case fm10k_bus_speed_5000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; + break; + case fm10k_bus_speed_8000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; + break; + default: + dma_ctrl = 0; + /* just in case, assume Gen3 ITR scale */ + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; + break; + } + + /* Configure TSO flags */ + FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW); + FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI); + + /* Enable DMA engine + * Set Rx Descriptor size to 32 + * Set Minimum MSS to 64 + * Set Maximum number of Rx queues to 256 / 32 Descriptor + */ + dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE | + FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 | + FM10K_DMA_CTRL_32_DESC; + + FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl); + + /* record maximum queue count, we limit ourselves to 128 */ + hw->mac.max_queues = FM10K_MAX_QUEUES_PF; + + /* We support either 64 VFs or 7 VFs depending on if we have ARI */ + hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; + + return FM10K_SUCCESS; +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_is_slot_appropriate_pf"); + + return (hw->bus.speed == hw->bus_caps.speed) && + (hw->bus.width == hw->bus_caps.width); +} + +#endif +/** + * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. In addition to the + * standard set/clear that supports one bit a multi-bit write is + * supported to set 64 bits at a time. + **/ +STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + u32 vlan_table, reg, mask, bit, len; + + /* verify the VSI index is valid */ + if (vsi > FM10K_VLAN_TABLE_VSI_MAX) + return FM10K_ERR_PARAM; + + /* VLAN multi-bit write: + * The multi-bit write has several parts to it. + * 24 16 8 0 + * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | RSVD0 | Length |C|RSVD0| VLAN ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * VLAN ID: Vlan Starting value + * RSVD0: Reserved section, must be 0 + * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message) + * Length: Number of times to repeat the bit being set + */ + len = vid >> 16; + vid = (vid << 17) >> 17; + + /* verify the reserved 0 fields are 0 */ + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* Loop through the table updating all required VLANs */ + for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32; + len < FM10K_VLAN_TABLE_VID_MAX; + len -= 32 - bit, reg++, bit = 0) { + /* record the initial state of the register */ + vlan_table = FM10K_READ_REG(hw, reg); + + /* truncate mask if we are at the start or end of the run */ + mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit; + + /* make necessary modifications to the register */ + mask &= set ? ~vlan_table : vlan_table; + if (mask) + FM10K_WRITE_REG(hw, reg, vlan_table ^ mask); + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_read_mac_addr_pf - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the SM_AREA and stores the value. + **/ +STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 serial_num; + + DEBUGFUNC("fm10k_read_mac_addr_pf"); + + serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1)); + + /* last byte should be all 1's */ + if ((~serial_num) << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(serial_num >> 24); + perm_addr[1] = (u8)(serial_num >> 16); + perm_addr[2] = (u8)(serial_num >> 8); + + serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0)); + + /* first byte should be all 1's */ + if ((~serial_num) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(serial_num >> 16); + perm_addr[4] = (u8)(serial_num >> 8); + perm_addr[5] = (u8)(serial_num); + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + memcpy(hw->mac.addr, perm_addr, ETH_ALEN); + + return FM10K_SUCCESS; +} + +/** + * fm10k_glort_valid_pf - Validate that the provided glort is valid + * @hw: pointer to the HW structure + * @glort: base glort to be validated + * + * This function will return an error if the provided glort is invalid + **/ +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) +{ + glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT; + + return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE); +} + +/** + * fm10k_update_xc_addr_pf - Update device addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function generates a message to the Switch API requesting + * that the given logical port add/remove the given L2 MAC/VLAN address. + **/ +STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_update mac_update; + u32 msg[5]; + + DEBUGFUNC("fm10k_update_xc_addr_pf"); + + /* clear set bit from VLAN ID */ + vid &= ~FM10K_VLAN_CLEAR; + + /* if glort or VLAN are not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record fields */ + mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) | + ((u32)mac[3] << 16) | + ((u32)mac[4] << 8) | + ((u32)mac[5])); + mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) | + ((u16)mac[1])); + mac_update.vlan = FM10K_CPU_TO_LE16(vid); + mac_update.glort = FM10K_CPU_TO_LE16(glort); + mac_update.action = add ? 0 : 1; + mac_update.flags = flags; + + /* populate mac_update fields */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE); + fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE, + &mac_update, sizeof(mac_update)); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_uc_addr_pf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function is used to add or remove unicast addresses for + * the PF. + **/ +STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + DEBUGFUNC("fm10k_update_uc_addr_pf"); + + /* verify MAC address is valid */ + if (!IS_VALID_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags); +} + +/** + * fm10k_update_mc_addr_pf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the PF. + **/ +STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + DEBUGFUNC("fm10k_update_mc_addr_pf"); + + /* verify multicast address is valid */ + if (!IS_MULTICAST_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0); +} + +/** + * fm10k_update_xcast_mode_pf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: base resource tag for this request + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], xcast_mode; + + DEBUGFUNC("fm10k_update_xcast_mode_pf"); + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* write xcast mode as a single u32 value, + * lower 16 bits: glort + * upper 16 bits: mode + */ + xcast_mode = ((u32)mode << 16) | glort; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_pf - Update interrupt moderator linked list + * @hw: pointer to hardware structure + * + * This function walks through the MSI-X vector table to determine the + * number of active interrupts and based on that information updates the + * interrupt moderator linked list. + **/ +STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) +{ + u32 i; + + /* Disable interrupt moderator */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0); + + /* loop through PF from last to first looking enabled vectors */ + for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) { + if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* always reset VFITR2[0] to point to last enabled PF vector */ + FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); + + /* reset ITR2[0] to point to last enabled PF vector */ + if (!hw->iov.num_vfs) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), i); + + /* Enable interrupt moderator */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); +} + +/** + * fm10k_update_lport_state_pf - Notify the switch of a change in port state + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @count: number of logical ports being updated + * @enable: boolean value indicating enable or disable + * + * This function is used to add/remove a logical port from the switch. + **/ +STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], lport_msg; + + DEBUGFUNC("fm10k_lport_state_pf"); + + /* do nothing if we are being asked to create or destroy 0 ports */ + if (!count) + return FM10K_SUCCESS; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* reset multicast mode if deleting lport */ + if (!enable) + fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE); + + /* construct the lport message from the 2 pieces of data we have */ + lport_msg = ((u32)count << 16) | glort; + + /* generate lport create/delete message */ + fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE : + FM10K_PF_MSG_ID_LPORT_DELETE); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + u16 glort, queue_count, vsi_count, pc_count; + u16 vsi, queue, pc, q_idx; + u32 txqctl, dglortdec, dglortmap; + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* verify the dglort values */ + if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) || + (dglort->vsi_l > 6) || (dglort->vsi_b > 64) || + (dglort->queue_l > 8) || (dglort->queue_b >= 256)) + return FM10K_ERR_PARAM; + + /* determine count of VSIs and queues */ + queue_count = BIT(dglort->rss_l + dglort->pc_l); + vsi_count = BIT(dglort->vsi_l + dglort->queue_l); + glort = dglort->glort; + q_idx = dglort->queue_b; + + /* configure SGLORT for queues */ + for (vsi = 0; vsi < vsi_count; vsi++, glort++) { + for (queue = 0; queue < queue_count; queue++, q_idx++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort); + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort); + } + } + + /* determine count of PCs and queues */ + queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = BIT(dglort->pc_l); + + /* configure PC for Tx queues */ + for (pc = 0; pc < pc_count; pc++) { + q_idx = pc + dglort->queue_b; + for (queue = 0; queue < queue_count; queue++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx)); + txqctl &= ~FM10K_TXQCTL_PC_MASK; + txqctl |= pc << FM10K_TXQCTL_PC_SHIFT; + FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl); + + q_idx += pc_count; + } + } + + /* configure DGLORTDEC */ + dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | + ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) | + ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) | + ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) | + ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) | + ((u32)(dglort->queue_l)); + if (dglort->inner_rss) + dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE; + + /* configure DGLORTMAP */ + dglortmap = (dglort->idx == fm10k_dglort_default) ? + FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO; + dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l; + dglortmap |= dglort->glort; + + /* write values to hardware */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap); + + return FM10K_SUCCESS; +} + +u16 fm10k_queues_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ? + 8 : FM10K_MAX_QUEUES_POOL; +} + +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 num_vfs = hw->iov.num_vfs; + u16 vf_q_idx = FM10K_MAX_QUEUES; + + vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); + + return vf_q_idx; +} + +STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 : + FM10K_MAX_VECTORS_POOL; +} + +STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx = FM10K_MAX_VECTORS_PF; + + vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx; + + return vf_v_idx; +} + +/** + * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization + * @hw: pointer to the HW structure + * @num_vfs: number of VFs to be allocated + * @num_pools: number of virtualization pools to be allocated + * + * Allocates queues and traffic classes to virtualization entities to prepare + * the PF for SR-IOV and VMDq + **/ +STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, + u16 num_pools) +{ + u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx; + u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT; + int i, j; + + /* hardware only supports up to 64 pools */ + if (num_pools > 64) + return FM10K_ERR_PARAM; + + /* the number of VFs cannot exceed the number of pools */ + if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) + return FM10K_ERR_PARAM; + + /* record number of virtualization entities */ + hw->iov.num_vfs = num_vfs; + hw->iov.num_pools = num_pools; + + /* determine qmap offsets and counts */ + qmap_stride = (num_vfs > 8) ? 32 : 256; + qpp = fm10k_queues_per_pool(hw); + vpp = fm10k_vectors_per_pool(hw); + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, 0); + qmap_idx = 0; + + /* establish TCs with -1 credits and no quanta to prevent transmit */ + for (i = 0; i < num_vfs; i++) { + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0); + FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i), + FM10K_TC_CREDIT_CREDIT_MASK); + } + + /* zero out all mbmem registers */ + for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) + FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0); + + /* clear event notification of VF FLR */ + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0); + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0); + + /* loop through unallocated rings assigning them back to PF */ + for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | + FM10K_TXQCTL_UNLIMITED_BW | vid); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); + } + + /* PF should have already updated VFITR2[0] */ + + /* update all ITR registers to flow to VFITR2[0] */ + for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) { + if (!(i & (vpp - 1))) + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1); + } + + /* update PF ITR2[0] to reference the last vector */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), + fm10k_vf_vector_index(hw, num_vfs - 1)); + + /* loop through rings populating rings and TCs */ + for (i = 0; i < num_vfs; i++) { + /* record index for VF queue 0 for use in end of loop */ + vf_q_idx0 = vf_q_idx; + + for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) { + /* assign VF and locked TC to queues */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx), + (i << FM10K_TXQCTL_TC_SHIFT) | i | + FM10K_TXQCTL_VF | vid); + FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx), + (i << FM10K_RXQCTL_VF_SHIFT) | + FM10K_RXQCTL_VF); + + /* map queue pair to VF */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx); + } + + /* repeat the first ring for all of the remaining VF rings */ + for (; j < qmap_stride; j++, qmap_idx++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0); + } + } + + /* loop through remaining indexes assigning all to queue 0 */ + while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0); + qmap_idx++; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_configure_tc_pf - Configure the shaping group for VF + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * @rate: Rate indicated in Mb/s + * + * Configured the TC for a given VF to allow only up to a given number + * of Mb/s of outgoing Tx throughput. + **/ +STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate) +{ + /* configure defaults */ + u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3; + u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* set interval to align with 4.096 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN1; + break; + case fm10k_bus_speed_5000: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN2; + break; + default: + break; + } + + if (rate) { + if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN) + return FM10K_ERR_PARAM; + + /* The quanta is measured in Bytes per 4.096 or 8.192 usec + * The rate is provided in Mbits per second + * To tralslate from rate to quanta we need to multiply the + * rate by 8.192 usec and divide by 8 bits/byte. To avoid + * dealing with floating point we can round the values up + * to the nearest whole number ratio which gives us 128 / 125. + */ + tc_rate = (rate * 128) / 125; + + /* try to keep the rate limiting accurate by increasing + * the number of credits and interval for rates less than 4Gb/s + */ + if (rate < 4000) + interval <<= 1; + else + tc_rate >>= 1; + } + + /* update rate limiter with new values */ + FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval); + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * + * Update the interrupt moderator linked list to include any MSI-X + * interrupts which the VF has enabled in the MSI-X vector table. + **/ +STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx, vf_v_limit, i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* search for first vector that is not masked */ + for (i = vf_v_limit - 1; i > vf_v_idx; i--) { + if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), i); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i); + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Assign a MAC address and default VLAN to a VF and notify it of the update + **/ +STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i; + u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0; + s32 err = FM10K_SUCCESS; + u16 vf_idx, vf_vid; + + /* verify vf is in range */ + if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + + /* calculate starting index for queues */ + vf_idx = vf_info->vf_idx; + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + qmap_idx = qmap_stride * vf_idx; + + /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is + * used here to indicate to the VF that it will not have privilege to + * write VLAN_TABLE. All policy is enforced on the PF but this allows + * the VF to correctly report errors to userspace rqeuests. + */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE; + else + vf_vid = vf_info->sw_vid; + + /* generate MAC_ADDR request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + vf_info->mac, vf_vid); + + /* Configure Queue control register with new VLAN ID. The TXQCTL + * register is RO from the VF, so the PF must do this even in the + * case of notifying the VF of a new VID via the mailbox. + */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & + FM10K_TXQCTL_VID_MASK; + txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + + for (i = 0; i < queues_per_pool; i++) + FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); + + /* try loading a message onto outgoing mailbox first */ + if (vf_info->mbx.ops.enqueue_tx) { + err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + if (err != FM10K_MBX_ERR_NO_MBX) + return err; + err = FM10K_SUCCESS; + } + + /* If we aren't connected to a mailbox, this is most likely because + * the VF driver is not running. It should thus be safe to re-map + * queues and use the registers to pass the MAC address so that the VF + * driver gets correct information during its initialization. + */ + + /* MAP Tx queue back to 0 temporarily, and disable it */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0); + + /* verify ring has disabled before modifying base address registers */ + txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx)); + for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) { + /* limit ourselves to a 1ms timeout */ + if (timeout == 10) { + err = FM10K_ERR_DMA_PENDING; + goto err_out; + } + + usec_delay(100); + txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx)); + } + + /* Update base address registers to contain MAC address */ + if (IS_VALID_ETHER_ADDR(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* Record the base address into queue 0 */ + FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah); + + /* Provide the VF the ITR scale, using software-defined fields in TDLEN + * to pass the information during VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + +err_out: + /* restore the queue back to VF ownership */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + return err; +} + +/** + * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Reassign the interrupts and queues to a VF following an FLR + **/ +STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx; + u32 tdbal = 0, tdbah = 0, txqctl, rxqctl; + u16 vf_v_idx, vf_v_limit, vf_vid; + u8 vf_idx = vf_info->vf_idx; + int i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* clear event notification of VF FLR */ + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32)); + + /* force timeout and then disconnect the mailbox */ + vf_info->mbx.timeout = 0; + if (vf_info->mbx.ops.disconnect) + vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + qmap_idx = qmap_stride * vf_idx; + + /* make all the queues inaccessible to the VF */ + for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0); + } + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid; + else + vf_vid = vf_info->sw_vid; + + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | + (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF; + + /* stop further DMA and reset queue ownership back to VF */ + for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl); + FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl); + } + + /* reset TC with -1 credits and no quanta to prevent transmit */ + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), + FM10K_TC_CREDIT_CREDIT_MASK); + + /* update our first entry in the table based on previous VF */ + if (!vf_idx) + hw->mac.ops.update_int_moderator(hw); + else + hw->iov.ops.assign_int_moderator(hw, vf_idx - 1); + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx); + + /* link remaining vectors so that next points to previous */ + for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++) + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1); + + /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */ + for (i = FM10K_VFMBMEM_LEN; i--;) + FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0); + for (i = FM10K_VLAN_TABLE_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0); + for (i = FM10K_RETA_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0); + for (i = FM10K_RSSRK_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0); + FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0); + + /* Update base address registers to contain MAC address */ + if (IS_VALID_ETHER_ADDR(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* map queue pairs back to VF from last to first */ + for (i = queues_per_pool; i--;) { + FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an + * explanation of how TDLEN is used. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i), + hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); + } + + /* repeat the first ring for all the remaining VF rings */ + for (i = queues_per_pool; i < qmap_stride; i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx); + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * @lport_idx: Logical port offset from the hardware glort + * @flags: Set of capability flags to extend port beyond basic functionality + * + * This function allows enabling a VF port by assigning it a GLORT and + * setting the flags so that it can enable an Rx mode. + **/ +STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u16 lport_idx, u8 flags) +{ + u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE; + + DEBUGFUNC("fm10k_iov_set_lport_state_pf"); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE; + vf_info->glort = glort; + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * + * This function disables a VF port by stripping it of a GLORT and + * setting the flags so that it cannot enable any Rx mode. + **/ +STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u32 msg[1]; + + DEBUGFUNC("fm10k_iov_reset_lport_state_pf"); + + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) { + /* notify switch that this port has been disabled */ + fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + } + + /* clear flags and glort if it exists */ + vf_info->vf_flags = 0; + vf_info->glort = 0; +} + +/** + * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs + * @hw: pointer to hardware structure + * @q: stats for all queues of a VF + * @vf_idx: index of VF + * + * This function collects queue stats for VFs. + **/ +STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u16 vf_idx) +{ + u32 idx, qpp; + + /* get stats for all of the queues */ + qpp = fm10k_queues_per_pool(hw); + idx = fm10k_vf_queue_index(hw, vf_idx); + fm10k_update_hw_stats_q(hw, q, idx, qpp); +} + +/** + * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MSI-X requests from the VF. The + * assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 vf_idx = vf_info->vf_idx; + + UNREFERENCED_1PARAMETER(results); + DEBUGFUNC("fm10k_iov_msg_msix_pf"); + + return hw->iov.ops.assign_int_moderator(hw, vf_idx); +} + +/** + * fm10k_iov_select_vid - Select correct default VLAN ID + * @hw: Pointer to hardware structure + * @vid: VLAN ID to correct + * + * Will report an error if the VLAN ID is out of range. For VID = 0, it will + * return either the pf_vid or sw_vid depending on which one is set. + */ +STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +{ + if (!vid) + return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; + else if (vf_info->pf_vid && vid != vf_info->pf_vid) + return FM10K_ERR_PARAM; + else + return vid; +} + +/** + * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MAC/VLAN requests from the VF. + * The assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 mac[ETH_ALEN]; + u32 *result; + int err = FM10K_SUCCESS; + bool set; + u16 vlan; + u32 vid; + + DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf"); + + /* we shouldn't be updating rules on a disabled interface */ + if (!FM10K_VF_FLAG_ENABLED(vf_info)) + err = FM10K_ERR_PARAM; + + if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { + result = results[FM10K_MAC_VLAN_MSG_VLAN]; + + /* record VLAN id requested */ + err = fm10k_tlv_attr_get_u32(result, &vid); + if (err) + return err; + + set = !(vid & FM10K_VLAN_CLEAR); + vid &= ~FM10K_VLAN_CLEAR; + + /* if the length field has been set, this is a multi-bit + * update request. For multi-bit requests, simply disallow + * them when the pf_vid has been set. In this case, the PF + * should have already cleared the VLAN_TABLE, and if we + * allowed them, it could allow a rogue VF to receive traffic + * on a VLAN it was not assigned. In the single-bit case, we + * need to modify requests for VLAN 0 to use the default PF or + * SW vid when assigned. + */ + + if (vid >> 16) { + /* prevent multi-bit requests when PF has + * administratively set the VLAN for this VF + */ + if (vf_info->pf_vid) + return FM10K_ERR_PARAM; + } else { + err = fm10k_iov_select_vid(vf_info, (u16)vid); + if (err < 0) + return err; + + vid = err; + } + + /* update VSI info for VF in regards to VLAN table */ + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { + result = results[FM10K_MAC_VLAN_MSG_MAC]; + + /* record unicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* block attempts to set MAC for a locked device */ + if (IS_VALID_ETHER_ADDR(vf_info->mac) && + memcmp(mac, vf_info->mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* notify switch of request for new unicast address */ + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, + mac, vlan, set, 0); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { + result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; + + /* record multicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* verify that the VF is allowed to request multicast */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* notify switch of request for new multicast address */ + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, + mac, vlan, set); + } + + return err; +} + +/** + * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode + * @vf_info: VF info structure containing capability flags + * @mode: Requested xcast mode + * + * This function outputs the mode that most closely matches the requested + * mode. If not modes match it will request we disable the port + **/ +STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, + u8 mode) +{ + u8 vf_flags = vf_info->vf_flags; + + /* match up mode to capabilities as best as possible */ + switch (mode) { + case FM10K_XCAST_MODE_PROMISC: + if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) + return FM10K_XCAST_MODE_PROMISC; + /* fallthough */ + case FM10K_XCAST_MODE_ALLMULTI: + if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) + return FM10K_XCAST_MODE_ALLMULTI; + /* fallthough */ + case FM10K_XCAST_MODE_MULTI: + if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) + return FM10K_XCAST_MODE_MULTI; + /* fallthough */ + case FM10K_XCAST_MODE_NONE: + if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) + return FM10K_XCAST_MODE_NONE; + /* fallthough */ + default: + break; + } + + /* disable interface as it should not be able to request any */ + return FM10K_XCAST_MODE_DISABLE; +} + +/** + * fm10k_iov_msg_lport_state_pf - Message handler for port state requests + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for port state requests. The port + * state requests for now are basic and consist of enabling or disabling + * the port. + **/ +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u32 *result; + s32 err = FM10K_SUCCESS; + u32 msg[2]; + u8 mode = 0; + + DEBUGFUNC("fm10k_iov_msg_lport_state_pf"); + + /* verify VF is allowed to enable even minimal mode */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)) + return FM10K_ERR_PARAM; + + if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { + result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + + /* XCAST mode update requested */ + err = fm10k_tlv_attr_get_u8(result, &mode); + if (err) + return FM10K_ERR_PARAM; + + /* prep for possible demotion depending on capabilities */ + mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); + + /* if mode is not currently enabled, enable it */ + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode))) + fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); + + /* swap mode back to a bit flag */ + mode = FM10K_VF_FLAG_SET_MODE(mode); + } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) { + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, + 1, false); + + /* we need to clear VF_FLAG_ENABLED flags in order to ensure + * that we actually re-enable the LPORT state below. Note that + * this has no impact if the VF is already disabled, as the + * flags are already cleared. + */ + if (!err) + vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info); + + /* when enabling the port we should reset the rate limiters */ + hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); + + /* set mode for minimal functionality */ + mode = FM10K_VF_FLAG_SET_MODE_NONE; + + /* generate port state response to notify VF it is ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY); + mbx->ops.enqueue_tx(hw, mbx, msg); + } + + /* if enable state toggled note the update */ + if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, + !!mode); + + /* if state change succeeded, then update our stored state */ + mode |= FM10K_VF_FLAG_CAPABLE(vf_info); + if (!err) + vf_info->vf_flags = mode; + + return err; +} + +#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS +const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +#endif +/** + * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function collects and aggregates global and per queue hardware + * statistics. + **/ +void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; + u32 id, id_prev; + + DEBUGFUNC("fm10k_update_hw_stats_pf"); + + /* Use Tx queue 0 as a canary to detect a reset */ + id = FM10K_READ_REG(hw, FM10K_TXQCTL(0)); + + /* Read Global Statistics */ + do { + timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT, + &stats->timeout); + ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur); + ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca); + um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um); + xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); + vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, + &stats->vlan_drop); + loopback_drop = + fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); + nodesc_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_NODESC_DROP, + &stats->nodesc_drop); + + /* if value has not changed then we have consistent data */ + id_prev = id; + id = FM10K_READ_REG(hw, FM10K_TXQCTL(0)); + } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id &= FM10K_TXQCTL_ID_MASK; + id |= FM10K_STAT_VALID; + + /* Update Global Statistics */ + if (stats->stats_idx == id) { + stats->timeout.count += timeout; + stats->ur.count += ur; + stats->ca.count += ca; + stats->um.count += um; + stats->xec.count += xec; + stats->vlan_drop.count += vlan_drop; + stats->loopback_drop.count += loopback_drop; + stats->nodesc_drop.count += nodesc_drop; + } + + /* Update bases and record current PF id */ + fm10k_update_hw_base_32b(&stats->timeout, timeout); + fm10k_update_hw_base_32b(&stats->ur, ur); + fm10k_update_hw_base_32b(&stats->ca, ca); + fm10k_update_hw_base_32b(&stats->um, um); + fm10k_update_hw_base_32b(&stats->xec, xec); + fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop); + fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop); + fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop); + stats->stats_idx = id; + + /* Update Queue Statistics */ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for global and per queue hardware + * statistics. + **/ +void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_rebind_hw_stats_pf"); + + /* Unbind Global Statistics */ + fm10k_unbind_hw_stats_32b(&stats->timeout); + fm10k_unbind_hw_stats_32b(&stats->ur); + fm10k_unbind_hw_stats_32b(&stats->ca); + fm10k_unbind_hw_stats_32b(&stats->um); + fm10k_unbind_hw_stats_32b(&stats->xec); + fm10k_unbind_hw_stats_32b(&stats->vlan_drop); + fm10k_unbind_hw_stats_32b(&stats->loopback_drop); + fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_pf(hw, stats); +} + +/** + * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order + * to limit the access to memory beyond what is physically in the system. + **/ +STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask) +{ + /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */ + u32 phyaddr = (u32)(dma_mask >> 32); + + DEBUGFUNC("fm10k_set_dma_mask_pf"); + + FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr); +} + +/** + * fm10k_get_fault_pf - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, + struct fm10k_fault *fault) +{ + u32 func; + + DEBUGFUNC("fm10k_get_fault_pf"); + + /* verify the fault register is in range and is aligned */ + switch (type) { + case FM10K_PCA_FAULT: + case FM10K_THI_FAULT: + case FM10K_FUM_FAULT: + break; + default: + return FM10K_ERR_PARAM; + } + + /* only service faults that are valid */ + func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC); + if (!(func & FM10K_FAULT_FUNC_VALID)) + return FM10K_ERR_PARAM; + + /* read remaining fields */ + fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI); + fault->address <<= 32; + fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO); + fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO); + + /* clear valid bit to allow for next error */ + FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID); + + /* Record which function triggered the error */ + if (func & FM10K_FAULT_FUNC_PF) + fault->func = 0; + else + fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >> + FM10K_FAULT_FUNC_VF_SHIFT); + + /* record fault type */ + fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; + + return FM10K_SUCCESS; +} + +/** + * fm10k_request_lport_map_pf - Request LPORT map from the switch API + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + DEBUGFUNC("fm10k_request_lport_pf"); + + /* issue request asking for LPORT map */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_get_host_state_pf - Returns the state of the switch and mailbox + * @hw: pointer to hardware structure + * @switch_ready: pointer to boolean value that will record switch state + * + * This function will check the DMA_CTRL2 register and mailbox in order + * to determine if the switch is ready for the PF to begin requesting + * addresses and mapping traffic to the local interface. + **/ +STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) +{ + u32 dma_ctrl2; + + DEBUGFUNC("fm10k_get_host_state_pf"); + + /* verify the switch is ready for interaction */ + dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2); + if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) + return FM10K_SUCCESS; + + /* retrieve generic host state info */ + return fm10k_get_host_state_generic(hw, switch_ready); +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the lport mapping based on the reply from the + * switch API. + **/ +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, mask; + u32 dglort_map; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_lport_map_pf"); + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP], + &dglort_map); + if (err) + return err; + + /* extract values out of the header */ + glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT); + mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK); + + /* verify mask is set and none of the masked bits in glort are set */ + if (!mask || (glort & ~mask)) + return FM10K_ERR_PARAM; + + /* verify the mask is contiguous, and that it is 1's followed by 0's */ + if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE) + return FM10K_ERR_PARAM; + + /* record the glort, mask, and port count */ + hw->mac.dglort_map = dglort_map; + + return FM10K_SUCCESS; +} + +const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the default VLAN for the PF + **/ +static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, pvid; + u32 pvid_update; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_update_pvid_pf"); + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VLAN ID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record the port VLAN ID value */ + hw->mac.default_vid = pvid; + + return FM10K_SUCCESS; +} + +/** + * fm10k_record_global_table_data - Move global table data to swapi table info + * @from: pointer to source table data structure + * @to: pointer to destination table info structure + * + * This function is will copy table_data to the table_info contained in + * the hw struct. + **/ +static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, + struct fm10k_swapi_table_info *to) +{ + /* convert from le32 struct to CPU byte ordered values */ + to->used = FM10K_LE32_TO_CPU(from->used); + to->avail = FM10K_LE32_TO_CPU(from->avail); +} + +const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_err_pf - Message handler for error reply + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler will capture the data for any error replies to previous + * messages that the PF has sent. + **/ +s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_error err_msg; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_err_pf"); + + /* extract structure from message */ + err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR], + &err_msg, sizeof(err_msg)); + if (err) + return err; + + /* record table status */ + fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac); + fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop); + fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu); + + /* record SW API status value */ + hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status); + + return FM10K_SUCCESS; +} + +/* currently there is no shared 1588 timestamp handler */ + +const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, + sizeof(struct fm10k_swapi_1588_timestamp)), + FM10K_TLV_ATTR_LAST +}; + +const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER, + sizeof(struct fm10k_swapi_1588_clock_owner)), + FM10K_TLV_ATTR_LAST +}; + +const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset + * @hw: pointer to hardware structure + * @vf_info: pointer to the vf info structure + * @offset: 64bit unsigned offset from hardware SYSTIME + * + * This function sends a message to a given VF to notify it of PTP offset + * changes. + **/ +STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u64 offset) +{ + u32 msg[4]; + + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); + fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset); + + if (vf_info->mbx.ops.enqueue_tx) + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); +} + +/** + * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM + * @hw: pointer to hardware structure + * @results: pointer to array containing parsed data, + * @mbx: Pointer to mailbox information structure + * + * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF + */ +s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_1588_clock_owner msg; + u16 glort; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_1588_clock_owner"); + + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER], + &msg, sizeof(msg)); + if (err) + return err; + + /* We own the clock iff the glort matches us and the enabled field is + * true. Otherwise, the clock must belong to some other port. + */ + glort = le16_to_cpu(msg.glort); + if (fm10k_glort_valid_pf(hw, glort) && msg.enabled) + hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER; + else + hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER; + + return FM10K_SUCCESS; +} + +/** + * fm10k_adjust_systime_pf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function will adjust the SYSTIME_CFG register contained in BAR 4 + * if this function is supported for BAR 4 access. The adjustment amount + * is based on the parts per billion value provided and adjusted to a + * value based on parts per 2^48 clock cycles. + * + * If adjustment is not supported or the requested value is too large + * we will return an error. + **/ +STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) +{ + u64 systime_adjust; + + DEBUGFUNC("fm10k_adjust_systime_pf"); + + /* ensure that we control the clock */ + if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER)) + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + + /* if sw_addr is not set we don't have switch register access */ + if (!hw->sw_addr) + return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS; + + /* we must convert the value from parts per billion to parts per + * 2^48 cycles. In addition I have opted to only use the 30 most + * significant bits of the adjustment value as the 8 least + * significant bits are located in another register and represent + * a value significantly less than a part per billion, the result + * of dropping the 8 least significant bits is that the adjustment + * value is effectively multiplied by 2^8 when we write it. + * + * As a result of all this the math for this breaks down as follows: + * ppb / 10^9 == adjust * 2^8 / 2^48 + * If we solve this for adjust, and simplify it comes out as: + * ppb * 2^31 / 5^9 == adjust + */ + systime_adjust = (ppb < 0) ? -ppb : ppb; + systime_adjust <<= 31; + do_div(systime_adjust, 1953125); + + /* verify the requested adjustment value is in range */ + if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) + return FM10K_ERR_PARAM; + + if (ppb > 0) + systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE; + + FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); + + return FM10K_SUCCESS; +} + +/** + * fm10k_notify_offset_pf - Notify switch of change in PTP offset + * @hw: pointer to hardware structure + * @offset: 64bit unsigned offset of SYSTIME + * + * This function sends a message to the switch to indicate a change in the + * offset of the hardware SYSTIME registers. The switch manager is + * responsible for transmitting this message to other hosts. + */ +STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + DEBUGFUNC("fm10k_notify_offset_pf"); + + /* ensure that we control the clock */ + if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER)) + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET); + fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_read_systime_pf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanosecods. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_pf[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), + FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_init_ops_pf - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for PF. + * Does not touch the hardware. + **/ +s32 fm10k_init_ops_pf(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + struct fm10k_iov_info *iov = &hw->iov; + + DEBUGFUNC("fm10k_init_ops_pf"); + + fm10k_init_ops_generic(hw); + + mac->ops.reset_hw = &fm10k_reset_hw_pf; + mac->ops.init_hw = &fm10k_init_hw_pf; + mac->ops.start_hw = &fm10k_start_hw_generic; + mac->ops.stop_hw = &fm10k_stop_hw_generic; +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf; +#endif + mac->ops.update_vlan = &fm10k_update_vlan_pf; + mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf; + mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf; + mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf; + mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf; + mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf; + mac->ops.update_lport_state = &fm10k_update_lport_state_pf; + mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf; + mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf; + mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf; + mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf; + mac->ops.get_fault = &fm10k_get_fault_pf; + mac->ops.get_host_state = &fm10k_get_host_state_pf; + mac->ops.request_lport_map = &fm10k_request_lport_map_pf; + mac->ops.adjust_systime = &fm10k_adjust_systime_pf; + mac->ops.notify_offset = &fm10k_notify_offset_pf; + mac->ops.read_systime = &fm10k_read_systime_pf; + + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + iov->ops.assign_resources = &fm10k_iov_assign_resources_pf; + iov->ops.configure_tc = &fm10k_iov_configure_tc_pf; + iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf; + iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf; + iov->ops.reset_resources = &fm10k_iov_reset_resources_pf; + iov->ops.set_lport = &fm10k_iov_set_lport_pf; + iov->ops.reset_lport = &fm10k_iov_reset_lport_pf; + iov->ops.update_stats = &fm10k_iov_update_stats_pf; + iov->ops.notify_offset = &fm10k_iov_notify_offset_pf; + + return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h new file mode 100644 index 000000000..1c2e9994d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_PF_H_ +#define _FM10K_PF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort); +u16 fm10k_queues_per_pool(struct fm10k_hw *hw); +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx); + +enum fm10k_pf_tlv_msg_id_v1 { + FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */ + FM10K_PF_MSG_ID_XCAST_MODES = 0x001, + FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002, + FM10K_PF_MSG_ID_LPORT_MAP = 0x100, + FM10K_PF_MSG_ID_LPORT_CREATE = 0x200, + FM10K_PF_MSG_ID_LPORT_DELETE = 0x201, + FM10K_PF_MSG_ID_CONFIG = 0x300, + FM10K_PF_MSG_ID_UPDATE_PVID = 0x400, + FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501, + FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502, + FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, + FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, + FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, + FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, + FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, + FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702, + FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703, +}; + +enum fm10k_pf_tlv_attr_id_v1 { + FM10K_PF_ATTR_ID_ERR = 0x00, + FM10K_PF_ATTR_ID_LPORT_MAP = 0x01, + FM10K_PF_ATTR_ID_XCAST_MODE = 0x02, + FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03, + FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04, + FM10K_PF_ATTR_ID_CONFIG = 0x05, + FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06, + FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07, + FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08, + FM10K_PF_ATTR_ID_FLOW_STATE = 0x09, + FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A, + FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, + FM10K_PF_ATTR_ID_PORT = 0x0C, + FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, + FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, + FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12, + FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14, +}; + +#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 +#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16 +#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16 +#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16 + +#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0 +#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 + +#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280 + +/* The following data structures are overlayed directly onto TLV mailbox + * messages, and must not break 4 byte alignment. Ensure the structures line + * up correctly as per their TLV definition. + */ +#ifdef C99 +#pragma pack(push, 4) +#else +#pragma pack(4) +#endif /* C99 */ + +struct fm10k_mac_update { + __le32 mac_lower; + __le16 mac_upper; + __le16 vlan; + __le16 glort; + u8 flags; + u8 action; +}; + +struct fm10k_global_table_data { + __le32 used; + __le32 avail; +}; + +struct fm10k_swapi_error { + __le32 status; + struct fm10k_global_table_data mac; + struct fm10k_global_table_data nexthop; + struct fm10k_global_table_data ffu; +}; + +struct fm10k_swapi_1588_timestamp { + __le64 egress; + __le64 ingress; + __le16 dglort; + __le16 sglort; +}; + +struct fm10k_swapi_1588_clock_owner { + __le16 glort; + __le16 enabled; +}; + +#ifdef C99 +#pragma pack(pop) +#else +#pragma pack() +#endif /* C99 */ + +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; +#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ + fm10k_lport_map_msg_attr, func) +extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; +#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ + fm10k_update_pvid_msg_attr, func) + +s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; +#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; +#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ + fm10k_1588_timestamp_msg_attr, func) + +s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[]; +#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \ + fm10k_1588_clock_owner_attr, func) + +extern const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[]; +#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \ + fm10k_master_clk_offset_attr, func) + +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS +extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; +#endif + +s32 fm10k_init_ops_pf(struct fm10k_hw *hw); + +void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); + +void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); +#endif /* _FM10K_PF_H */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c new file mode 100644 index 000000000..adffc1bce --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.c @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_tlv.h" + +/** + * fm10k_tlv_msg_init - Initialize message block for TLV data storage + * @msg: Pointer to message block + * @msg_id: Message ID indicating message type + * + * This function return success if provided with a valid message pointer + **/ +s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) +{ + DEBUGFUNC("fm10k_tlv_msg_init"); + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_null_string - Place null terminated string on message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @string: Pointer to string to be stored in attribute + * + * This function will reorder a string to be CPU endian and store it in + * the attribute buffer. It will return success if provided with a valid + * pointers. + **/ +static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) +{ + u32 attr_data = 0, len = 0; + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_null_string"); + + /* verify pointers are not NULL */ + if (!string || !msg) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy string into local variable and then write to msg */ + do { + /* write data to message */ + if (len && !(len % 4)) { + attr[len / 4] = attr_data; + attr_data = 0; + } + + /* record character to offset location */ + attr_data |= (u32)(*string) << (8 * (len % 4)); + len++; + + /* test for NULL and then increment */ + } while (*(string++)); + + /* write last piece of data to message */ + attr[(len + 3) / 4] = attr_data; + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute + * @attr: Pointer to attribute + * @string: Pointer to location of destination string + * + * This function pulls the string back out of the attribute and will place + * it in the array pointed by by string. It will return success if provided + * with a valid pointers. + **/ +static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +{ + u32 len; + + DEBUGFUNC("fm10k_tlv_attr_get_null_string"); + + /* verify pointers are not NULL */ + if (!string || !attr) + return FM10K_ERR_PARAM; + + len = *attr >> FM10K_TLV_LEN_SHIFT; + attr++; + + while (len--) + string[len] = (u8)(attr[len / 4] >> (8 * (len % 4))); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @mac_addr: MAC address to be stored + * + * This function will reorder a MAC address to be CPU endian and store it + * in the attribute buffer. It will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, + const u8 *mac_addr, u16 vlan) +{ + u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT; + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_mac_vlan"); + + /* verify pointers are not NULL */ + if (!msg || !mac_addr) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* record attribute header, update message length */ + attr[0] = len | attr_id; + + /* copy value into local variable and then write to msg */ + attr[1] = FM10K_LE32_TO_CPU(*(const __le32 *)&mac_addr[0]); + attr[2] = FM10K_LE16_TO_CPU(*(const __le16 *)&mac_addr[4]); + attr[2] |= (u32)vlan << 16; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute + * @attr: Pointer to attribute + * @attr_id: Attribute ID + * @mac_addr: location of buffer to store MAC address + * + * This function pulls the MAC address back out of the attribute and will + * place it in the array pointed by by mac_addr. It will return success + * if provided with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) +{ + DEBUGFUNC("fm10k_tlv_attr_get_mac_vlan"); + + /* verify pointers are not NULL */ + if (!mac_addr || !attr) + return FM10K_ERR_PARAM; + + *(__le32 *)&mac_addr[0] = FM10K_CPU_TO_LE32(attr[1]); + *(__le16 *)&mac_addr[4] = FM10K_CPU_TO_LE16((u16)(attr[2])); + *vlan = (u16)(attr[2] >> 16); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_bool - Add header indicating value "true" + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will simply add an attribute header, the fact + * that the header is here means the attribute value is true, else + * it is false. The function will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) +{ + DEBUGFUNC("fm10k_tlv_attr_put_bool"); + + /* verify pointers are not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* record attribute header */ + msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; + + /* add header length to length */ + *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_value - Store integer value attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @value: Value to be written + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in a message attribute. The function will return success provided + * that msg is a valid pointer, and len is 1, 2, 4, or 8. + **/ +s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) +{ + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_value"); + + /* verify non-null msg and len is 1, 2, 4, or 8 */ + if (!msg || !len || len > 8 || (len & (len - 1))) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + if (len < 4) { + attr[1] = (u32)value & (BIT(8 * len) - 1); + } else { + attr[1] = (u32)value; + if (len > 4) + attr[2] = (u32)(value >> 32); + } + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_value - Get integer value stored in attribute + * @attr: Pointer to attribute + * @value: Pointer to destination buffer + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in the offset pointed to by value. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len) +{ + DEBUGFUNC("fm10k_tlv_attr_get_value"); + + /* verify pointers are not NULL */ + if (!attr || !value) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + if (len == 8) + *(u64 *)value = ((u64)attr[2] << 32) | attr[1]; + else if (len == 4) + *(u32 *)value = attr[1]; + else if (len == 2) + *(u16 *)value = (u16)attr[1]; + else + *(u8 *)value = (u8)attr[1]; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_le_struct - Store little endian structure in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @le_struct: Pointer to structure to be written + * @len: Size of le_struct + * + * This function will place a little endian structure value in a message + * attribute. The function will return success provided that all pointers + * are valid and length is a non-zero multiple of 4. + **/ +s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, + const void *le_struct, u32 len) +{ + const __le32 *le32_ptr = (const __le32 *)le_struct; + u32 *attr; + u32 i; + + DEBUGFUNC("fm10k_tlv_attr_put_le_struct"); + + /* verify non-null msg and len is in 32 bit words */ + if (!msg || !len || (len % 4)) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy le32 structure into host byte order at 32b boundaries */ + for (i = 0; i < (len / 4); i++) + attr[i + 1] = FM10K_LE32_TO_CPU(le32_ptr[i]); + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute + * @attr: Pointer to attribute + * @le_struct: Pointer to structure to be written + * @len: Size of structure + * + * This function will place a little endian structure in the buffer + * pointed to by le_struct. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) +{ + __le32 *le32_ptr = (__le32 *)le_struct; + u32 i; + + DEBUGFUNC("fm10k_tlv_attr_get_le_struct"); + + /* verify pointers are not NULL */ + if (!le_struct || !attr) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + attr++; + + for (i = 0; len; i++, len -= 4) + le32_ptr[i] = FM10K_CPU_TO_LE32(attr[i]); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will mark off a new nested region for encapsulating + * a given set of attributes. The idea is if you wish to place a secondary + * structure within the message this mechanism allows for that. The + * function will return NULL on failure, and a pointer to the start + * of the nested attributes on success. + **/ +static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +{ + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_nest_start"); + + /* verify pointer is not NULL */ + if (!msg) + return NULL; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + attr[0] = attr_id; + + /* return pointer to nest header */ + return attr; +} + +/** + * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes + * @msg: Pointer to message block + * + * This function closes off an existing set of nested attributes. The + * message pointer should be pointing to the parent of the nest. So in + * the case of a nest within the nest this would be the outer nest pointer. + * This function will return success provided all pointers are valid. + **/ +static s32 fm10k_tlv_attr_nest_stop(u32 *msg) +{ + u32 *attr; + u32 len; + + DEBUGFUNC("fm10k_tlv_attr_nest_stop"); + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* locate the nested header and retrieve its length */ + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT; + + /* only include nest if data was added to it */ + if (len) { + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += len; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_validate - Validate attribute metadata + * @attr: Pointer to attribute + * @tlv_attr: Type and length info for attribute + * + * This function does some basic validation of the input TLV. It + * verifies the length, and in the case of null terminated strings + * it verifies that the last byte is null. The function will + * return FM10K_ERR_PARAM if any attribute is malformed, otherwise + * it returns 0. + **/ +STATIC s32 fm10k_tlv_attr_validate(u32 *attr, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 attr_id = *attr & FM10K_TLV_ID_MASK; + u16 len = *attr >> FM10K_TLV_LEN_SHIFT; + + DEBUGFUNC("fm10k_tlv_attr_validate"); + + /* verify this is an attribute and not a message */ + if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)) + return FM10K_ERR_PARAM; + + /* search through the list of attributes to find a matching ID */ + while (tlv_attr->id < attr_id) + tlv_attr++; + + /* if didn't find a match then we should exit */ + if (tlv_attr->id != attr_id) + return FM10K_NOT_IMPLEMENTED; + + /* move to start of attribute data */ + attr++; + + switch (tlv_attr->type) { + case FM10K_TLV_NULL_STRING: + if (!len || + (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4))))) + return FM10K_ERR_PARAM; + if (len > tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_MAC_ADDR: + if (len != ETH_ALEN) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_BOOL: + if (len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_UNSIGNED: + case FM10K_TLV_SIGNED: + if (len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_LE_STRUCT: + /* struct must be 4 byte aligned */ + if ((len % 4) || len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_NESTED: + /* nested attributes must be 4 byte aligned */ + if (len % 4) + return FM10K_ERR_PARAM; + break; + default: + /* attribute id is mapped to bad value */ + return FM10K_ERR_PARAM; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_parse - Parses stream of attribute data + * @attr: Pointer to attribute list + * @results: Pointer array to store pointers to attributes + * @tlv_attr: Type and length info for attributes + * + * This function validates a stream of attributes and parses them + * up into an array of pointers stored in results. The function will + * return FM10K_ERR_PARAM on any input or message error, + * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array + * and 0 on success. Any attributes not found in tlv_attr will be silently + * ignored. + **/ +static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 i, attr_id, offset = 0; + s32 err = 0; + u16 len; + + DEBUGFUNC("fm10k_tlv_attr_parse"); + + /* verify pointers are not NULL */ + if (!attr || !results) + return FM10K_ERR_PARAM; + + /* initialize results to NULL */ + for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++) + results[i] = NULL; + + /* pull length from the message header */ + len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* no attributes to parse if there is no length */ + if (!len) + return FM10K_SUCCESS; + + /* no attributes to parse, just raw data, message becomes attribute */ + if (!tlv_attr) { + results[0] = attr; + return FM10K_SUCCESS; + } + + /* move to start of attribute data */ + attr++; + + /* run through list parsing all attributes */ + while (offset < len) { + attr_id = *attr & FM10K_TLV_ID_MASK; + + if (attr_id >= FM10K_TLV_RESULTS_MAX) + return FM10K_NOT_IMPLEMENTED; + + err = fm10k_tlv_attr_validate(attr, tlv_attr); + if (err == FM10K_NOT_IMPLEMENTED) + ; /* silently ignore non-implemented attributes */ + else if (err) + return err; + else + results[attr_id] = attr; + + /* update offset */ + offset += FM10K_TLV_DWORD_LEN(*attr) * 4; + + /* move to next attribute */ + attr = &attr[FM10K_TLV_DWORD_LEN(*attr)]; + } + + /* we should find ourselves at the end of the list */ + if (offset != len) + return FM10K_ERR_PARAM; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_msg_parse - Parses message header and calls function handler + * @hw: Pointer to hardware structure + * @msg: Pointer to message + * @mbx: Pointer to mailbox information structure + * @func: Function array containing list of message handling functions + * + * This function should be the first function called upon receiving a + * message. The handler will identify the message type and call the correct + * handler for the given message. It will return the value from the function + * call on a recognized message type, otherwise it will return + * FM10K_NOT_IMPLEMENTED on an unrecognized type. + **/ +s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, + struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *data) +{ + u32 *results[FM10K_TLV_RESULTS_MAX]; + u32 msg_id; + s32 err; + + DEBUGFUNC("fm10k_tlv_msg_parse"); + + /* verify pointer is not NULL */ + if (!msg || !data) + return FM10K_ERR_PARAM; + + /* verify this is a message and not an attribute */ + if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) + return FM10K_ERR_PARAM; + + /* grab message ID */ + msg_id = *msg & FM10K_TLV_ID_MASK; + + while (data->id < msg_id) + data++; + + /* if we didn't find it then pass it up as an error */ + if (data->id != msg_id) { + while (data->id != FM10K_TLV_ERROR) + data++; + } + + /* parse the attributes into the results list */ + err = fm10k_tlv_attr_parse(msg, results, data->attr); + if (err < 0) + return err; + + return data->func(hw, results, mbx); +} + +/** + * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Unused mailbox pointer + * + * This function is a default handler for unrecognized messages. At a + * a minimum it just indicates that the message requested was + * unimplemented. + **/ +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + UNREFERENCED_3PARAMETER(hw, results, mbx); + DEBUGOUT1("Unknown message ID %u\n", **results & FM10K_TLV_ID_MASK); + return FM10K_NOT_IMPLEMENTED; +} + +STATIC const unsigned char test_str[] = "fm10k"; +STATIC const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56, + 0x78, 0x9a, 0xbc }; +STATIC const u16 test_vlan = 0x0FED; +STATIC const u64 test_u64 = 0xfedcba9876543210ull; +STATIC const u32 test_u32 = 0x87654321; +STATIC const u16 test_u16 = 0x8765; +STATIC const u8 test_u8 = 0x87; +STATIC const s64 test_s64 = -0x123456789abcdef0ll; +STATIC const s32 test_s32 = -0x1235678; +STATIC const s16 test_s16 = -0x1234; +STATIC const s8 test_s8 = -0x12; +STATIC const __le32 test_le[2] = { FM10K_CPU_TO_LE32(0x12345678), + FM10K_CPU_TO_LE32(0x9abcdef0)}; + +/* The message below is meant to be used as a test message to demonstrate + * how to use the TLV interface and to test the types. Normally this code + * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG + */ +const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { + FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR), + FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8), + FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16), + FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32), + FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64), + FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8), + FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32), + FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64), + FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8), + FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_tlv_msg_test_generate_data - Stuff message with data + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with attribute data + **/ +STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) +{ + DEBUGFUNC("fm10k_tlv_msg_test_generate_data"); + + if (attr_flags & BIT(FM10K_TEST_MSG_STRING)) + fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, + test_str); + if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR)) + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, + test_mac, test_vlan); + if (attr_flags & BIT(FM10K_TEST_MSG_U8)) + fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); + if (attr_flags & BIT(FM10K_TEST_MSG_U16)) + fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); + if (attr_flags & BIT(FM10K_TEST_MSG_U32)) + fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); + if (attr_flags & BIT(FM10K_TEST_MSG_U64)) + fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); + if (attr_flags & BIT(FM10K_TEST_MSG_S8)) + fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); + if (attr_flags & BIT(FM10K_TEST_MSG_S16)) + fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); + if (attr_flags & BIT(FM10K_TEST_MSG_S32)) + fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); + if (attr_flags & BIT(FM10K_TEST_MSG_S64)) + fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); + if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT)) + fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, + test_le, 8); +} + +/** + * fm10k_tlv_msg_test_create - Create a test message testing all attributes + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with all attribute types + * including a nested attribute. + **/ +void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) +{ + u32 *nest = NULL; + + DEBUGFUNC("fm10k_tlv_msg_test_create"); + + fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); + + fm10k_tlv_msg_test_generate_data(msg, attr_flags); + + /* check for nested attributes */ + attr_flags >>= FM10K_TEST_MSG_NESTED; + + if (attr_flags) { + nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); + + fm10k_tlv_msg_test_generate_data(nest, attr_flags); + + fm10k_tlv_attr_nest_stop(msg); + } +} + +/** + * fm10k_tlv_msg_test - Validate all results on test message receive + * @hw: Pointer to hardware structure + * @results: Pointer array to attributes in the message + * @mbx: Pointer to mailbox information structure + * + * This function does a check to verify all attributes match what the test + * message placed in the message buffer. It is the default handler + * for TLV test messages. + **/ +s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u32 *nest_results[FM10K_TLV_RESULTS_MAX]; + unsigned char result_str[80]; + unsigned char result_mac[ETH_ALEN]; + s32 err = FM10K_SUCCESS; + __le32 result_le[2]; + u16 result_vlan; + u64 result_u64; + u32 result_u32; + u16 result_u16; + u8 result_u8; + s64 result_s64; + s32 result_s32; + s16 result_s16; + s8 result_s8; + u32 reply[3]; + + DEBUGFUNC("fm10k_tlv_msg_test"); + + /* retrieve results of a previous test */ + if (!!results[FM10K_TEST_MSG_RESULT]) + return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT], + &mbx->test_result); + +parse_nested: + if (!!results[FM10K_TEST_MSG_STRING]) { + err = fm10k_tlv_attr_get_null_string( + results[FM10K_TEST_MSG_STRING], + result_str); + if (!err && memcmp(test_str, result_str, sizeof(test_str))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_MAC_ADDR]) { + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_TEST_MSG_MAC_ADDR], + result_mac, &result_vlan); + if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + err = FM10K_ERR_INVALID_VALUE; + if (!err && test_vlan != result_vlan) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U8]) { + err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8], + &result_u8); + if (!err && test_u8 != result_u8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U16]) { + err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16], + &result_u16); + if (!err && test_u16 != result_u16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U32]) { + err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32], + &result_u32); + if (!err && test_u32 != result_u32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U64]) { + err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64], + &result_u64); + if (!err && test_u64 != result_u64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S8]) { + err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8], + &result_s8); + if (!err && test_s8 != result_s8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S16]) { + err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16], + &result_s16); + if (!err && test_s16 != result_s16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S32]) { + err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32], + &result_s32); + if (!err && test_s32 != result_s32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S64]) { + err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64], + &result_s64); + if (!err && test_s64 != result_s64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_LE_STRUCT]) { + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_TEST_MSG_LE_STRUCT], + result_le, + sizeof(result_le)); + if (!err && memcmp(test_le, result_le, sizeof(test_le))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + + if (!!results[FM10K_TEST_MSG_NESTED]) { + /* clear any pointers */ + memset(nest_results, 0, sizeof(nest_results)); + + /* parse the nested attributes into the nest results list */ + err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED], + nest_results, + fm10k_tlv_msg_test_attr); + if (err) + goto report_result; + + /* loop back through to the start */ + results = nest_results; + goto parse_nested; + } + +report_result: + /* generate reply with test result */ + fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST); + fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, reply); +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h new file mode 100644 index 000000000..af2e4c76a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_tlv.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_TLV_H_ +#define _FM10K_TLV_H_ + +/* forward declaration */ +struct fm10k_msg_data; + +#include "fm10k_type.h" + +/* Message / Argument header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length | Flags | Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The message header format described here is used for messages that are + * passed between the PF and the VF. To allow for messages larger then + * mailbox size we will provide a message with the above header and it + * will be segmented and transported to the mailbox to the other side where + * it is reassembled. It contains the following fields: + * Length: Length of the message in bytes excluding the message header + * Flags: TBD + * Type/ID: These will be the message/argument types we pass + */ +/* message data header */ +#define FM10K_TLV_ID_SHIFT 0 +#define FM10K_TLV_ID_SIZE 16 +#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1) +#define FM10K_TLV_FLAGS_SHIFT 16 +#define FM10K_TLV_FLAGS_MSG 0x1 +#define FM10K_TLV_FLAGS_SIZE 4 +#define FM10K_TLV_LEN_SHIFT 20 +#define FM10K_TLV_LEN_SIZE 12 + +#define FM10K_TLV_HDR_LEN 4ul +#define FM10K_TLV_LEN_ALIGN_MASK \ + ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT) +#define FM10K_TLV_LEN_ALIGN(tlv) \ + (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK) +#define FM10K_TLV_DWORD_LEN(tlv) \ + ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1) + +#define FM10K_TLV_RESULTS_MAX 32 + +enum fm10k_tlv_type { + FM10K_TLV_NULL_STRING, + FM10K_TLV_MAC_ADDR, + FM10K_TLV_BOOL, + FM10K_TLV_UNSIGNED, + FM10K_TLV_SIGNED, + FM10K_TLV_LE_STRUCT, + FM10K_TLV_NESTED, + FM10K_TLV_MAX_TYPE +}; + +#define FM10K_TLV_ERROR (~0u) + +struct fm10k_tlv_attr { + unsigned int id; + enum fm10k_tlv_type type; + u16 len; +}; + +#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len } +#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 } +#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 } +#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 } +#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 } +#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 } +#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 } +#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 } +#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 } +#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 } +#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 } +#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len } +#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED } +#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR } + +struct fm10k_msg_data { + unsigned int id; + const struct fm10k_tlv_attr *attr; + s32 (*func)(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +}; + +#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } + +s32 fm10k_tlv_msg_init(u32 *, u16); +s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); +s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); +s32 fm10k_tlv_attr_put_bool(u32 *, u16); +s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32); +#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); +#define fm10k_tlv_attr_get_u8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8)) +#define fm10k_tlv_attr_get_u16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16)) +#define fm10k_tlv_attr_get_u32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32)) +#define fm10k_tlv_attr_get_u64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64)) +#define fm10k_tlv_attr_get_s8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8)) +#define fm10k_tlv_attr_get_s16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16)) +#define fm10k_tlv_attr_get_s32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32)) +#define fm10k_tlv_attr_get_s64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) +s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); +s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); +s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_ID_TEST 0 + +enum fm10k_tlv_test_attr_id { + FM10K_TEST_MSG_UNSET, + FM10K_TEST_MSG_STRING, + FM10K_TEST_MSG_MAC_ADDR, + FM10K_TEST_MSG_U8, + FM10K_TEST_MSG_U16, + FM10K_TEST_MSG_U32, + FM10K_TEST_MSG_U64, + FM10K_TEST_MSG_S8, + FM10K_TEST_MSG_S16, + FM10K_TEST_MSG_S32, + FM10K_TEST_MSG_S64, + FM10K_TEST_MSG_LE_STRUCT, + FM10K_TEST_MSG_NESTED, + FM10K_TEST_MSG_RESULT, + FM10K_TEST_MSG_MAX +}; + +extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[]; +void fm10k_tlv_msg_test_create(u32 *, u32); +s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_TEST_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func) +#define FM10K_TLV_MSG_ERROR_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func) +#endif /* _FM10K_MSG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h new file mode 100644 index 000000000..84781ba9b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_type.h @@ -0,0 +1,854 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_TYPE_H_ +#define _FM10K_TYPE_H_ + +/* forward declaration */ +struct fm10k_hw; + +#include "fm10k_osdep.h" +#include "fm10k_mbx.h" + +#define FM10K_INTEL_VENDOR_ID 0x8086 +#define FM10K_DEV_ID_PF 0x15A4 +#define FM10K_DEV_ID_VF 0x15A5 +#ifdef BOULDER_RAPIDS_HW +#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0 +#endif /* BOULDER_RAPIDS_HW */ +#ifdef ATWOOD_CHANNEL_HW +#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5 +#endif /* ATWOOD_CHANNEL_HW */ + +#ifndef LINUX_MACROS +#ifndef BIT +#define BIT(a) (1UL << (a)) +#endif +#endif /* LINUX_MACROS */ + +#define FM10K_MAX_QUEUES 256 +#define FM10K_MAX_QUEUES_PF 128 +#define FM10K_MAX_QUEUES_POOL 16 + +#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull +#define FM10K_STAT_VALID 0x80000000 + +/* PCI Bus Info */ +#define FM10K_PCIE_LINK_CAP 0x7C +#define FM10K_PCIE_LINK_STATUS 0x82 +#define FM10K_PCIE_LINK_WIDTH 0x3F0 +#define FM10K_PCIE_LINK_WIDTH_1 0x10 +#define FM10K_PCIE_LINK_WIDTH_2 0x20 +#define FM10K_PCIE_LINK_WIDTH_4 0x40 +#define FM10K_PCIE_LINK_WIDTH_8 0x80 +#define FM10K_PCIE_LINK_SPEED 0xF +#define FM10K_PCIE_LINK_SPEED_2500 0x1 +#define FM10K_PCIE_LINK_SPEED_5000 0x2 +#define FM10K_PCIE_LINK_SPEED_8000 0x3 + +/* PCIe payload size */ +#define FM10K_PCIE_DEV_CAP 0x74 +#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02 +#define FM10K_PCIE_DEV_CTRL 0x78 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40 + +/* PCIe MSI-X Capability info */ +#define FM10K_PCI_MSIX_MSG_CTRL 0xB2 +#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF +#define FM10K_MAX_MSIX_VECTORS 256 +#define FM10K_MAX_VECTORS_PF 256 +#define FM10K_MAX_VECTORS_POOL 32 + +/* PCIe SR-IOV Info */ +#define FM10K_PCIE_SRIOV_CTRL 0x190 +#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 + +#define FM10K_SUCCESS 0 +#define FM10K_ERR_DEVICE_NOT_SUPPORTED -1 +#define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 +#define FM10K_ERR_REQUESTS_PENDING -4 +#define FM10K_ERR_RESET_REQUESTED -5 +#define FM10K_ERR_DMA_PENDING -6 +#define FM10K_ERR_RESET_FAILED -7 +#define FM10K_ERR_INVALID_MAC_ADDR -8 +#define FM10K_ERR_INVALID_VALUE -9 +#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) (_p) +#define UNREFERENCED_2PARAMETER(_p, _q) do { (_p); (_q); } while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { (_p); (_q); (_r); } while (0) + +/* Start of PF registers */ +#define FM10K_CTRL 0x0000 +#define FM10K_CTRL_BAR4_ALLOWED 0x00000004 + +#define FM10K_CTRL_EXT 0x0001 +#define FM10K_GCR 0x0003 +#define FM10K_GCR_EXT 0x0005 + +/* Interrupt control registers */ +#define FM10K_EICR 0x0006 +#define FM10K_EICR_PCA_FAULT 0x00000001 +#define FM10K_EICR_THI_FAULT 0x00000004 +#define FM10K_EICR_FUM_FAULT 0x00000020 +#define FM10K_EICR_FAULT_MASK 0x0000003F +#define FM10K_EICR_MAILBOX 0x00000040 +#define FM10K_EICR_SWITCHREADY 0x00000080 +#define FM10K_EICR_SWITCHNOTREADY 0x00000100 +#define FM10K_EICR_SWITCHINTERRUPT 0x00000200 +#define FM10K_EICR_SRAMERROR 0x00000400 +#define FM10K_EICR_VFLR 0x00000800 +#define FM10K_EICR_MAXHOLDTIME 0x00001000 +#define FM10K_EIMR 0x0007 +#define FM10K_EIMR_PCA_FAULT 0x00000001 +#define FM10K_EIMR_THI_FAULT 0x00000010 +#define FM10K_EIMR_FUM_FAULT 0x00000400 +#define FM10K_EIMR_MAILBOX 0x00001000 +#define FM10K_EIMR_SWITCHREADY 0x00004000 +#define FM10K_EIMR_SWITCHNOTREADY 0x00010000 +#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000 +#define FM10K_EIMR_SRAMERROR 0x00100000 +#define FM10K_EIMR_VFLR 0x00400000 +#define FM10K_EIMR_MAXHOLDTIME 0x01000000 +#define FM10K_EIMR_ALL 0x55555555 +#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0) +#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1) +#define FM10K_FAULT_ADDR_LO 0x0 +#define FM10K_FAULT_ADDR_HI 0x1 +#define FM10K_FAULT_SPECINFO 0x2 +#define FM10K_FAULT_FUNC 0x3 +#define FM10K_FAULT_SIZE 0x4 +#define FM10K_FAULT_FUNC_VALID 0x00008000 +#define FM10K_FAULT_FUNC_PF 0x00004000 +#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00 +#define FM10K_FAULT_FUNC_VF_SHIFT 8 +#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF + +#define FM10K_PCA_FAULT 0x0008 +#define FM10K_THI_FAULT 0x0010 +#define FM10K_FUM_FAULT 0x001C + +/* Rx queue timeout indicator */ +#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020) + +/* Switch Manager info */ +#define FM10K_SM_AREA(_n) ((_n) + 0x0028) + +/* GLORT mapping registers */ +#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030) +#define FM10K_DGLORT_COUNT 8 +#define FM10K_DGLORTMAP_MASK_SHIFT 16 +#define FM10K_DGLORTMAP_ANY 0x00000000 +#define FM10K_DGLORTMAP_NONE 0x0000FFFF +#define FM10K_DGLORTMAP_ZERO 0xFFFF0000 +#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038) +#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4 +#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7 +#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14 +#define FM10K_DGLORTDEC_QBASE_SHIFT 16 +#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24 +#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000 +#define FM10K_TUNNEL_CFG 0x0040 +#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16 +#define FM10K_TUNNEL_CFG_GENEVE 0x0041 +#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050) +#define FM10K_SWPRI_MAX 16 +#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800) +#define FM10K_RSSRK_SIZE 10 +#define FM10K_RSSRK_ENTRIES_PER_REG 4 +#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000) +#define FM10K_RETA_SIZE 32 +#define FM10K_RETA_ENTRIES_PER_REG 4 +#define FM10K_MAX_RSS_INDICES 128 + +/* Rate limiting registers */ +#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000) +#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF +#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040) +#define FM10K_TC_MAXCREDIT_64K 0x00010000 +#define FM10K_TC_RATE(_n) ((_n) + 0x2080) +#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF +#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000 + +/* DMA control registers */ +#define FM10K_DMA_CTRL 0x20C3 +#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001 +#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008 +#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010 +#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080 +#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100 +#define FM10K_DMA_CTRL_MINMSS_SHIFT 9 +#define FM10K_DMA_CTRL_MINMSS_64 0x00008000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000 +#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000 +#define FM10K_DMA_CTRL_32_DESC 0x00000000 + +#define FM10K_DMA_CTRL2 0x20C4 +#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000 + +/* TSO flags configuration + * First packet contains all flags except for fin and psh + * Middle packet contains only urg and ack + * Last packet contains urg, ack, fin, and psh + */ +#define FM10K_TSO_FLAGS_LOW 0x00300FF6 +#define FM10K_TSO_FLAGS_HI 0x00000039 +#define FM10K_DTXTCPFLGL 0x20C5 +#define FM10K_DTXTCPFLGH 0x20C6 + +#define FM10K_TPH_CTRL 0x20C7 +#define FM10K_MRQC(_n) ((_n) + 0x2100) +#define FM10K_MRQC_TCP_IPV4 0x00000001 +#define FM10K_MRQC_IPV4 0x00000002 +#define FM10K_MRQC_IPV6 0x00000010 +#define FM10K_MRQC_TCP_IPV6 0x00000020 +#define FM10K_MRQC_UDP_IPV4 0x00000040 +#define FM10K_MRQC_UDP_IPV6 0x00000080 + +#define FM10K_TQMAP(_n) ((_n) + 0x2800) +#define FM10K_TQMAP_TABLE_SIZE 2048 +#define FM10K_RQMAP(_n) ((_n) + 0x3000) + +/* Hardware Statistics */ +#define FM10K_STATS_TIMEOUT 0x3800 +#define FM10K_STATS_UR 0x3801 +#define FM10K_STATS_CA 0x3802 +#define FM10K_STATS_UM 0x3803 +#define FM10K_STATS_XEC 0x3804 +#define FM10K_STATS_VLAN_DROP 0x3805 +#define FM10K_STATS_LOOPBACK_DROP 0x3806 +#define FM10K_STATS_NODESC_DROP 0x3807 + +/* Timesync registers */ +#define FM10K_SYSTIME 0x3814 +#define FM10K_SYSTIME_CFG 0x3818 +#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F + +/* PCIe state registers */ +#define FM10K_PHYADDR 0x381C + +/* Rx ring registers */ +#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000) +#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001) +#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002) +#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003) +#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000 +#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000 +#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004) +#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005) +#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006) +#define FM10K_RXQCTL_ENABLE 0x00000001 +#define FM10K_RXQCTL_PF 0x000000FC +#define FM10K_RXQCTL_VF_SHIFT 2 +#define FM10K_RXQCTL_VF 0x00000100 +#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF) +#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007) +#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001 +#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200 +#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008) +#define FM10K_RXINT_TIMER_SHIFT 8 +#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009) +#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */ +#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000 +#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000 + +/* Rx Statistics */ +#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A) +#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B) +#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C) +#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D) + +/* Rx GLORT register */ +#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E) + +/* Tx ring registers */ +#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) +#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) +#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR) + * scale which is based on the PCIe speed but the speed information in the PCI + * configuration space may not be accurate. The PF already knows the ITR scale + * but there is no defined method to pass that information from the PF to the + * VF. This is accomplished during VF initialization by temporarily co-opting + * the yet-to-be-used TDLEN register to have the PF store the ITR shift for + * the VF to retrieve before the VF needs to use the TDLEN register for its + * intended purpose, i.e. before the Tx resources are allocated. + */ +#define FM10K_TDLEN_ITR_SCALE_SHIFT 9 +#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00 +#define FM10K_TDLEN_ITR_SCALE_GEN1 2 +#define FM10K_TDLEN_ITR_SCALE_GEN2 1 +#define FM10K_TDLEN_ITR_SCALE_GEN3 0 +#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) +#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800 +#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000 +#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004) +#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005) +#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006) +#define FM10K_TXDCTL_ENABLE 0x00004000 +#define FM10K_TXDCTL_MAX_TIME_SHIFT 16 +#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007) +#define FM10K_TXQCTL_PF 0x0000003F +#define FM10K_TXQCTL_VF 0x00000040 +#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF) +#define FM10K_TXQCTL_PC_SHIFT 7 +#define FM10K_TXQCTL_PC_MASK 0x00000380 +#define FM10K_TXQCTL_TC_SHIFT 10 +#define FM10K_TXQCTL_VID_SHIFT 16 +#define FM10K_TXQCTL_VID_MASK 0x0FFF0000 +#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000 +#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008) +#define FM10K_TXINT_TIMER_SHIFT 8 + +/* Tx Statistics */ +#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009) +#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A) +#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B) + +/* Tx Push registers */ +#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C) +#define FM10K_TQDLOC_BASE_32_DESC 0x08 +#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000 + +/* Tx GLORT registers */ +#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D) +#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E) +#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001 + +/* Interrupt moderation and control registers */ +#define FM10K_INT_MAP(_n) ((_n) + 0x10080) +#define FM10K_INT_MAP_TIMER0 0x00000000 +#define FM10K_INT_MAP_TIMER1 0x00000100 +#define FM10K_INT_MAP_IMMEDIATE 0x00000200 +#define FM10K_INT_MAP_DISABLE 0x00000300 +#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003) +#define FM10K_INT_CTRL 0x12000 +#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400 +#define FM10K_ITR(_n) ((_n) + 0x12400) +#define FM10K_ITR_INTERVAL1_SHIFT 12 +#define FM10K_ITR_PENDING2 0x10000000 +#define FM10K_ITR_AUTOMASK 0x20000000 +#define FM10K_ITR_MASK_SET 0x40000000 +#define FM10K_ITR_MASK_CLEAR 0x80000000 +#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800) +#define FM10K_ITR_REG_COUNT 768 +#define FM10K_ITR_REG_COUNT_PF 256 + +/* Switch manager interrupt registers */ +#define FM10K_IP 0x13000 +#define FM10K_IP_NOTINRESET 0x00000100 +#define FM10K_SRAM_IP 0x13003 + +/* VLAN registers */ +#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000) +#define FM10K_VLAN_TABLE_SIZE 128 + +/* VLAN specific message offsets */ +#define FM10K_VLAN_TABLE_VID_MAX 4096 +#define FM10K_VLAN_TABLE_VSI_MAX 64 +#define FM10K_VLAN_LENGTH_SHIFT 16 +#define FM10K_VLAN_CLEAR BIT(15) +#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR +#define FM10K_VLAN_ALL \ + ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) + +/* VF FLR event notification registers */ +#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844) +#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846) + +/* Defines for size of uncacheable and write-combining memories */ +#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */ +#define FM10K_WC_ADDR_START 0x100000 /* start of Tx Desc Cache */ +#define FM10K_DBI_ADDR_START 0x200000 /* start of debug registers */ +#define FM10K_UC_ADDR_SIZE (FM10K_WC_ADDR_START - FM10K_UC_ADDR_START) +#define FM10K_WC_ADDR_SIZE (FM10K_DBI_ADDR_START - FM10K_WC_ADDR_START) + +/* Define timeouts for resets and disables */ +#define FM10K_QUEUE_DISABLE_TIMEOUT 100 +#define FM10K_RESET_TIMEOUT 150 + +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + +/* VF registers */ +#define FM10K_VFCTRL 0x00000 +#define FM10K_VFCTRL_RST 0x00000008 +#define FM10K_VFINT_MAP 0x00030 +#define FM10K_VFSYSTIME 0x00040 +#define FM10K_VFITR(_n) ((_n) + 0x00060) + +/* Registers contained in BAR 4 for Switch management */ +#define FM10K_SW_SYSTIME_ADJUST 0x0224D +#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF +#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000 +#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif /* ETH_ALEN */ + +#ifndef IS_ZERO_ETHER_ADDR +/* make certain address is not 0 */ +#define IS_ZERO_ETHER_ADDR(addr) \ +(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5])) +#endif + +#ifndef IS_MULTICAST_ETHER_ADDR +#define IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1) +#endif + +#ifndef IS_VALID_ETHER_ADDR +/* make certain address is not multicast or 0 */ +#define IS_VALID_ETHER_ADDR(addr) \ +(!IS_MULTICAST_ETHER_ADDR(addr) && !IS_ZERO_ETHER_ADDR(addr)) +#endif + +enum fm10k_int_source { + fm10k_int_mailbox = 0, + fm10k_int_pcie_fault = 1, + fm10k_int_switch_up_down = 2, + fm10k_int_switch_event = 3, + fm10k_int_sram = 4, + fm10k_int_vflr = 5, + fm10k_int_max_hold_time = 6, + fm10k_int_sources_max_pf +}; + +/* PCIe bus speeds */ +enum fm10k_bus_speed { + fm10k_bus_speed_unknown = 0, + fm10k_bus_speed_2500 = 2500, + fm10k_bus_speed_5000 = 5000, + fm10k_bus_speed_8000 = 8000, + fm10k_bus_speed_reserved +}; + +/* PCIe bus widths */ +enum fm10k_bus_width { + fm10k_bus_width_unknown = 0, + fm10k_bus_width_pcie_x1 = 1, + fm10k_bus_width_pcie_x2 = 2, + fm10k_bus_width_pcie_x4 = 4, + fm10k_bus_width_pcie_x8 = 8, + fm10k_bus_width_reserved +}; + +/* PCIe payload sizes */ +enum fm10k_bus_payload { + fm10k_bus_payload_unknown = 0, + fm10k_bus_payload_128 = 1, + fm10k_bus_payload_256 = 2, + fm10k_bus_payload_512 = 3, + fm10k_bus_payload_reserved +}; + +/* Bus parameters */ +struct fm10k_bus_info { + enum fm10k_bus_speed speed; + enum fm10k_bus_width width; + enum fm10k_bus_payload payload; +}; + +/* Statistics related declarations */ +struct fm10k_hw_stat { + u64 count; + u32 base_l; + u32 base_h; +}; + +struct fm10k_hw_stats_q { + struct fm10k_hw_stat tx_bytes; + struct fm10k_hw_stat tx_packets; +#define tx_stats_idx tx_packets.base_h + struct fm10k_hw_stat rx_bytes; + struct fm10k_hw_stat rx_packets; +#define rx_stats_idx rx_packets.base_h + struct fm10k_hw_stat rx_drops; +}; + +struct fm10k_hw_stats { + struct fm10k_hw_stat timeout; +#define stats_idx timeout.base_h + struct fm10k_hw_stat ur; + struct fm10k_hw_stat ca; + struct fm10k_hw_stat um; + struct fm10k_hw_stat xec; + struct fm10k_hw_stat vlan_drop; + struct fm10k_hw_stat loopback_drop; + struct fm10k_hw_stat nodesc_drop; + struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF]; +}; + +/* Establish DGLORT feature priority */ +enum fm10k_dglortdec_idx { + fm10k_dglort_default = 0, + fm10k_dglort_vf_rsvd0 = 1, + fm10k_dglort_vf_rss = 2, + fm10k_dglort_pf_rsvd0 = 3, + fm10k_dglort_pf_queue = 4, + fm10k_dglort_pf_vsi = 5, + fm10k_dglort_pf_rsvd1 = 6, + fm10k_dglort_pf_rss = 7 +}; + +struct fm10k_dglort_cfg { + u16 glort; /* GLORT base */ + u16 queue_b; /* Base value for queue */ + u8 vsi_b; /* Base value for VSI */ + u8 idx; /* index of DGLORTDEC entry */ + u8 rss_l; /* RSS indices */ + u8 pc_l; /* Priority Class indices */ + u8 vsi_l; /* Number of bits from GLORT used to determine VSI */ + u8 queue_l; /* Number of bits from GLORT used to determine queue */ + u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */ + u8 inner_rss; /* Boolean value if inner header is used for RSS */ +}; + +enum fm10k_pca_fault { + PCA_NO_FAULT, + PCA_UNMAPPED_ADDR, + PCA_BAD_QACCESS_PF, + PCA_BAD_QACCESS_VF, + PCA_MALICIOUS_REQ, + PCA_POISONED_TLP, + PCA_TLP_ABORT, + __PCA_MAX +}; + +enum fm10k_thi_fault { + THI_NO_FAULT, + THI_MAL_DIS_Q_FAULT, + __THI_MAX +}; + +enum fm10k_fum_fault { + FUM_NO_FAULT, + FUM_UNMAPPED_ADDR, + FUM_POISONED_TLP, + FUM_BAD_VF_QACCESS, + FUM_ADD_DECODE_ERR, + FUM_RO_ERROR, + FUM_QPRC_CRC_ERROR, + FUM_CSR_TIMEOUT, + FUM_INVALID_TYPE, + FUM_INVALID_LENGTH, + FUM_INVALID_BE, + FUM_INVALID_ALIGN, + __FUM_MAX +}; + +struct fm10k_fault { + u64 address; /* Address at the time fault was detected */ + u32 specinfo; /* Extra info on this fault (fault dependent) */ + u8 type; /* Fault value dependent on subunit */ + u8 func; /* Function number of the fault */ +}; + +struct fm10k_mac_ops { + /* basic bring-up and tear-down */ + s32 (*reset_hw)(struct fm10k_hw *); + s32 (*init_hw)(struct fm10k_hw *); + s32 (*start_hw)(struct fm10k_hw *); + s32 (*stop_hw)(struct fm10k_hw *); + s32 (*get_bus_info)(struct fm10k_hw *); + s32 (*get_host_state)(struct fm10k_hw *, bool *); + s32 (*request_lport_map)(struct fm10k_hw *); +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + bool (*is_slot_appropriate)(struct fm10k_hw *); +#endif + s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); + s32 (*read_mac_addr)(struct fm10k_hw *); + s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, + u16, bool, u8); + s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool); + s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8); + void (*update_int_moderator)(struct fm10k_hw *); + s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool); + void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + s32 (*configure_dglort_map)(struct fm10k_hw *, + struct fm10k_dglort_cfg *); + void (*set_dma_mask)(struct fm10k_hw *, u64); + s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); + s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); + s32 (*notify_offset)(struct fm10k_hw *, u64 offset); + u64 (*read_systime)(struct fm10k_hw *); +}; + +enum fm10k_mac_type { + fm10k_mac_unknown = 0, + fm10k_mac_pf, + fm10k_mac_vf, + fm10k_num_macs +}; + +struct fm10k_mac_info { + struct fm10k_mac_ops ops; + enum fm10k_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u16 default_vid; + u16 max_msix_vectors; + u16 max_queues; + bool vlan_override; + bool get_host_state; + bool tx_ready; + u32 dglort_map; + u8 itr_scale; + u64 reset_while_pending; +}; + +struct fm10k_swapi_table_info { + u32 used; + u32 avail; +}; + +struct fm10k_swapi_info { + u32 status; + struct fm10k_swapi_table_info mac; + struct fm10k_swapi_table_info nexthop; + struct fm10k_swapi_table_info ffu; +}; + +enum fm10k_xcast_modes { + FM10K_XCAST_MODE_ALLMULTI = 0, + FM10K_XCAST_MODE_MULTI = 1, + FM10K_XCAST_MODE_PROMISC = 2, + FM10K_XCAST_MODE_NONE = 3, + FM10K_XCAST_MODE_DISABLE = 4 +}; + +enum fm10k_timestamp_modes { + FM10K_TIMESTAMP_MODE_NONE = 0, + FM10K_TIMESTAMP_MODE_PEP_TO_PEP = 1, + FM10K_TIMESTAMP_MODE_PEP_TO_ANY = 2, +}; + +#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */ +#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */ + +struct fm10k_vf_info { + /* mbx must be first field in struct unless all default IOV message + * handlers are redone as the assumption is that vf_info starts + * at the same offset as the mailbox + */ + struct fm10k_mbx_info mbx; /* PF side of VF mailbox */ + int rate; /* Tx BW cap as defined by OS */ + u16 glort; /* resource tag for this VF */ + u16 sw_vid; /* Switch API assigned VLAN */ + u16 pf_vid; /* PF assigned Default VLAN */ + u8 mac[ETH_ALEN]; /* PF Default MAC address */ + u8 vsi; /* VSI identifier */ + u8 vf_idx; /* which VF this is */ + u8 vf_flags; /* flags indicating what modes + * are supported for the port + */ +#ifndef NO_FM10K_VF_TRUSTED_MODE + bool trusted; /* VF trust mode */ +#endif +}; + +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI)) +#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI)) +#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC)) +#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE)) +#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) +#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) +#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) +#define FM10K_VF_FLAG_SET_MODE_NONE \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_MULTI_ENABLED \ + (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC)) + +struct fm10k_iov_ops { + /* IOV related bring-up and tear-down */ + s32 (*assign_resources)(struct fm10k_hw *, u16, u16); + s32 (*configure_tc)(struct fm10k_hw *, u16, int); + s32 (*assign_int_moderator)(struct fm10k_hw *, u16); + s32 (*assign_default_mac_vlan)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*reset_resources)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); + void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); + void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); + void (*notify_offset)(struct fm10k_hw *, struct fm10k_vf_info*, u64); +}; + +struct fm10k_iov_info { + struct fm10k_iov_ops ops; + u16 total_vfs; + u16 num_vfs; + u16 num_pools; +}; + +struct fm10k_hw { + u32 *hw_addr; + u32 *sw_addr; + void *back; + struct fm10k_mac_info mac; + struct fm10k_bus_info bus; + struct fm10k_bus_info bus_caps; + struct fm10k_iov_info iov; + struct fm10k_mbx_info mbx; + struct fm10k_swapi_info swapi; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u32 flags; +#define FM10K_HW_FLAG_CLOCK_OWNER BIT(0) +}; + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit Descriptor */ +struct fm10k_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buflen; /* Length of data to be DMAed */ + __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */ + __le16 mss; /* MSS for segmentation offload */ + u8 hdrlen; /* Header size for segmentation offload */ + u8 flags; /* Status and offload request flags */ +}; + +/* Transmit Descriptor Cache Structure */ +struct fm10k_tx_desc_cache { + struct fm10k_tx_desc tx_desc[256]; +}; + +#define FM10K_TXD_FLAG_INT 0x01 +#define FM10K_TXD_FLAG_TIME 0x02 +#define FM10K_TXD_FLAG_CSUM 0x04 +#define FM10K_TXD_FLAG_FTAG 0x10 +#define FM10K_TXD_FLAG_RS 0x20 +#define FM10K_TXD_FLAG_LAST 0x40 +#define FM10K_TXD_FLAG_DONE 0x80 + + +/* These macros are meant to enable optimal placement of the RS and INT + * bits. It will point us to the last descriptor in the cache for either the + * start of the packet, or the end of the packet. If the index is actually + * at the start of the FIFO it will point to the offset for the last index + * in the FIFO to prevent an unnecessary write. + */ +#define FM10K_TXD_WB_FIFO_SIZE 4 + +/* Receive Descriptor - 32B */ +union fm10k_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 reserved; /* Empty space, RSS hash */ + __le64 timestamp; + } q; /* Read, Writeback, 64b quad-words */ + struct { + __le32 data; /* RSS and header data */ + __le32 rss; /* RSS Hash */ + __le32 staterr; + __le32 vlan_len; + __le32 glort; /* sglort/dglort */ + } d; /* Writeback, 32b double-words */ + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen, xC */ + __le16 rss_lower; + __le16 rss_upper; + __le16 status; /* status/error */ + __le16 csum_err; /* checksum or extended error value */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + __le16 dglort; + __le16 sglort; + } w; /* Writeback, 16b words */ +}; + +#define FM10K_RXD_RSSTYPE_MASK 0x000F +enum fm10k_rdesc_rss_type { + FM10K_RSSTYPE_NONE = 0x0, + FM10K_RSSTYPE_IPV4_TCP = 0x1, + FM10K_RSSTYPE_IPV4 = 0x2, + FM10K_RSSTYPE_IPV6_TCP = 0x3, + /* Reserved 0x4 */ + FM10K_RSSTYPE_IPV6 = 0x5, + /* Reserved 0x6 */ + FM10K_RSSTYPE_IPV4_UDP = 0x7, + FM10K_RSSTYPE_IPV6_UDP = 0x8 + /* Reserved 0x9 - 0xF */ +}; + +#define FM10K_RXD_PKTTYPE_MASK 0x03F0 +#define FM10K_RXD_PKTTYPE_SHIFT 4 +enum fm10k_rdesc_pkt_type { + /* L3 type */ + FM10K_PKTTYPE_OTHER = 0x00, + FM10K_PKTTYPE_IPV4 = 0x01, + FM10K_PKTTYPE_IPV4_EX = 0x02, + FM10K_PKTTYPE_IPV6 = 0x03, + FM10K_PKTTYPE_IPV6_EX = 0x04, + + /* L4 type */ + FM10K_PKTTYPE_TCP = 0x08, + FM10K_PKTTYPE_UDP = 0x10, + FM10K_PKTTYPE_GRE = 0x18, + FM10K_PKTTYPE_VXLAN = 0x20, + FM10K_PKTTYPE_NVGRE = 0x28, + FM10K_PKTTYPE_GENEVE = 0x30 +}; + +#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006 +enum fm10k_rxdesc_xc { + FM10K_XC_UNICAST = 0x0, + FM10K_XC_MULTICAST = 0x4, + FM10K_XC_BROADCAST = 0x6 +}; + + +#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */ +#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */ +#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */ +#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */ +#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */ +#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */ +#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */ +#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */ +#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ +#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ + +#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */ +#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */ +#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */ +#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */ +#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */ + + +struct fm10k_ftag { + __be16 swpri_type_user; + __be16 vlan; + __be16 sglort; + __be16 dglort; +}; + +#endif /* _FM10K_TYPE_H */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c new file mode 100644 index 000000000..6809c3cfd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.c @@ -0,0 +1,646 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#include "fm10k_vf.h" + +/** + * fm10k_stop_hw_vf - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) +{ + u8 *perm_addr = hw->mac.perm_addr; + u32 bal = 0, bah = 0, tdlen; + s32 err; + u16 i; + + DEBUGFUNC("fm10k_stop_hw_vf"); + + /* we need to disable the queues before taking further steps */ + err = fm10k_stop_hw_generic(hw); + if (err && err != FM10K_ERR_REQUESTS_PENDING) + return err; + + /* If permanent address is set then we need to restore it */ + if (IS_VALID_ETHER_ADDR(perm_addr)) { + bal = (((u32)perm_addr[3]) << 24) | + (((u32)perm_addr[4]) << 16) | + (((u32)perm_addr[5]) << 8); + bah = (((u32)0xFF) << 24) | + (((u32)perm_addr[0]) << 16) | + (((u32)perm_addr[1]) << 8) | + ((u32)perm_addr[2]); + } + + /* restore default itr_scale for next VF initialization */ + tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT; + + /* The queues have already been disabled so we just need to + * update their base address registers + */ + for (i = 0; i < hw->mac.max_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_TDBAL(i), bal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(i), bah); + FM10K_WRITE_REG(hw, FM10K_RDBAL(i), bal); + FM10K_WRITE_REG(hw, FM10K_RDBAH(i), bah); + /* Restore ITR scale in software-defined mechanism in TDLEN + * for next VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of + * TDLEN here. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen); + } + + return err; +} + +/** + * fm10k_reset_hw_vf - VF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after just being initialized. + **/ +STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) +{ + s32 err; + + DEBUGFUNC("fm10k_reset_hw_vf"); + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_stop_hw_vf(hw); + if (err == FM10K_ERR_REQUESTS_PENDING) + hw->mac.reset_while_pending++; + else if (err) + return err; + + /* Inititate VF reset */ + FM10K_WRITE_REG(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST); + + /* Flush write and allow 100us for reset to complete */ + FM10K_WRITE_FLUSH(hw); + usec_delay(FM10K_RESET_TIMEOUT); + + /* Clear reset bit and verify it was cleared */ + FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0); + if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) + return FM10K_ERR_RESET_FAILED; + + return FM10K_SUCCESS; +} + +/** + * fm10k_init_hw_vf - VF hardware initialization + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_init_hw_vf(struct fm10k_hw *hw) +{ + u32 tqdloc, tqdloc0 = ~FM10K_READ_REG(hw, FM10K_TQDLOC(0)); + s32 err; + u16 i; + + DEBUGFUNC("fm10k_init_hw_vf"); + + /* verify we have at least 1 queue */ + if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(0)) || + !~FM10K_READ_REG(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ + for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { + /* verify the Descriptor cache offsets are increasing */ + tqdloc = ~FM10K_READ_REG(hw, FM10K_TQDLOC(i)); + if (!tqdloc || (tqdloc == tqdloc0)) + break; + + /* check to verify the PF doesn't own any of our queues */ + if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(i)) || + !~FM10K_READ_REG(hw, FM10K_RXQCTL(i))) + break; + } + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_disable_queues_generic(hw, i); + if (err) + goto reset_max_queues; + + /* record maximum queue count */ + hw->mac.max_queues = i; + + /* fetch default VLAN and ITR scale */ + hw->mac.default_vid = (FM10K_READ_REG(hw, FM10K_TXQCTL(0)) & + FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + /* Read the ITR scale from TDLEN. See the definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is + * used here. + */ + hw->mac.itr_scale = (FM10K_READ_REG(hw, FM10K_TDLEN(0)) & + FM10K_TDLEN_ITR_SCALE_MASK) >> + FM10K_TDLEN_ITR_SCALE_SHIFT; + + return FM10K_SUCCESS; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. Since the VF has no control over + * the "slot" it is in, always indicate that the slot is appropriate. + **/ +STATIC bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_is_slot_appropriate_vf"); + + return TRUE; +} + +#endif +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), + FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Reserved, should always be 0 + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for this VF. + **/ +STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + /* verify the index is not set */ + if (vsi) + return FM10K_ERR_PARAM; + + /* clever trick to verify reserved bits in both vid and length */ + if ((vid << 16 | vid) >> 28) + return FM10K_ERR_PARAM; + + /* encode set bit into the VLAN ID */ + if (!set) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message + * @hw: pointer to the HW structure + * @results: Attributes for message + * @mbx: unused mailbox data + * + * This function should determine the MAC address for the VF + **/ +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u8 perm_addr[ETH_ALEN]; + u16 vid; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_mac_vlan_vf"); + + /* record MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC], + perm_addr, &vid); + if (err) + return err; + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE); + + return FM10K_SUCCESS; +} + +/** + * fm10k_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * + * This function should determine the MAC address for the VF + **/ +STATIC s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 base_addr; + + DEBUGFUNC("fm10k_read_mac_addr_vf"); + + base_addr = FM10K_READ_REG(hw, FM10K_TDBAL(0)); + + /* last byte should be 0 */ + if (base_addr << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(base_addr >> 24); + perm_addr[4] = (u8)(base_addr >> 16); + perm_addr[5] = (u8)(base_addr >> 8); + + base_addr = FM10K_READ_REG(hw, FM10K_TDBAH(0)); + + /* first byte should be all 1's */ + if ((~base_addr) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(base_addr >> 16); + perm_addr[1] = (u8)(base_addr >> 8); + perm_addr[2] = (u8)(base_addr); + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + memcpy(hw->mac.addr, perm_addr, ETH_ALEN); + + return FM10K_SUCCESS; +} + +/** + * fm10k_update_uc_addr_vf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses for + * the VF. + **/ +STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + DEBUGFUNC("fm10k_update_uc_addr_vf"); + + UNREFERENCED_2PARAMETER(glort, flags); + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify MAC address is valid */ + if (!IS_VALID_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + /* verify we are not locked down on the MAC address */ + if (IS_VALID_ETHER_ADDR(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_mc_addr_vf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the VF. + **/ +STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + DEBUGFUNC("fm10k_update_uc_addr_vf"); + + UNREFERENCED_1PARAMETER(glort); + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify multicast address is valid */ + if (!IS_MULTICAST_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST, + mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_vf - Request update of interrupt moderator list + * @hw: pointer to hardware structure + * + * This function will issue a request to the PF to rescan our MSI-X table + * and to update the interrupt moderator linked list. + **/ +STATIC void fm10k_update_int_moderator_vf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* generate MSI-X request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX); + + /* load onto outgoing mailbox */ + mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE), + FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE), + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler is meant to capture the indication from the PF that we + * are ready to bring up the interface. + **/ +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_lport_state_vf"); + + hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? + FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; + + return FM10K_SUCCESS; +} + +/** + * fm10k_update_lport_state_vf - Update device state in lower device + * @hw: pointer to the HW structure + * @glort: unused + * @count: number of logical ports to enable - unused (always 1) + * @enable: boolean value indicating if this is an enable or disable request + * + * Notify the lower device of a state change. If the lower device is + * enabled we can add filters, if it is disabled all filters for this + * logical port are flushed. + **/ +STATIC s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[2]; + + UNREFERENCED_2PARAMETER(glort, count); + DEBUGFUNC("fm10k_update_lport_state_vf"); + + /* reset glort mask 0 as we have to wait to be enabled */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + /* generate port state request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + if (!enable) + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_xcast_mode_vf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: unused + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +STATIC s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3]; + + UNREFERENCED_1PARAMETER(glort); + DEBUGFUNC("fm10k_update_xcast_mode_vf"); + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_1588_MSG_CLK_OFFSET), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 message handler */ + +/** + * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * + * This function collects and aggregates per queue hardware statistics. + **/ +void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_update_hw_stats_vf"); + + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for queue hardware statistics. + **/ +void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_rebind_hw_stats_vf"); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_vf(hw, stats); +} + +/** + * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +STATIC s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_configure_dglort_map_vf"); + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* stub for now until we determine correct message for this */ + + return FM10K_SUCCESS; +} + +/** + * fm10k_adjust_systime_vf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function takes an adjustment rate in parts per billion and will + * verify that this value is 0 as the VF cannot support adjusting the + * systime clock. + * + * If the ppb value is non-zero the return is ERR_PARAM else success + **/ +STATIC s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_adjust_systime_vf"); + + /* The VF cannot adjust the clock frequency, however it should + * already have a syntonic clock with whichever host interface is + * running as the master for the host interface clock domain so + * there should be not frequency adjustment necessary. + */ + return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS; +} + +/** + * fm10k_read_systime_vf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanoseconds. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_init_ops_vf - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for VF. + * Does not touch the hardware. + **/ +s32 fm10k_init_ops_vf(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + DEBUGFUNC("fm10k_init_ops_vf"); + + fm10k_init_ops_generic(hw); + + mac->ops.reset_hw = &fm10k_reset_hw_vf; + mac->ops.init_hw = &fm10k_init_hw_vf; + mac->ops.start_hw = &fm10k_start_hw_generic; + mac->ops.stop_hw = &fm10k_stop_hw_vf; +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_vf; +#endif + mac->ops.update_vlan = &fm10k_update_vlan_vf; + mac->ops.read_mac_addr = &fm10k_read_mac_addr_vf; + mac->ops.update_uc_addr = &fm10k_update_uc_addr_vf; + mac->ops.update_mc_addr = &fm10k_update_mc_addr_vf; + mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_vf; + mac->ops.update_int_moderator = &fm10k_update_int_moderator_vf; + mac->ops.update_lport_state = &fm10k_update_lport_state_vf; + mac->ops.update_hw_stats = &fm10k_update_hw_stats_vf; + mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_vf; + mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_vf; + mac->ops.get_host_state = &fm10k_get_host_state_generic; + mac->ops.adjust_systime = &fm10k_adjust_systime_vf; + mac->ops.read_systime = &fm10k_read_systime_vf; + + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h new file mode 100644 index 000000000..c90880df1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_vf.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 - 2015 Intel Corporation + */ + +#ifndef _FM10K_VF_H_ +#define _FM10K_VF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +enum fm10k_vf_tlv_msg_id { + FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */ + FM10K_VF_MSG_ID_MSIX, + FM10K_VF_MSG_ID_MAC_VLAN, + FM10K_VF_MSG_ID_LPORT_STATE, + FM10K_VF_MSG_ID_1588, + FM10K_VF_MSG_ID_MAX, +}; + +enum fm10k_tlv_mac_vlan_attr_id { + FM10K_MAC_VLAN_MSG_VLAN, + FM10K_MAC_VLAN_MSG_SET, + FM10K_MAC_VLAN_MSG_MAC, + FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + FM10K_MAC_VLAN_MSG_MULTICAST, + FM10K_MAC_VLAN_MSG_ID_MAX +}; + +enum fm10k_tlv_lport_state_attr_id { + FM10K_LPORT_STATE_MSG_DISABLE, + FM10K_LPORT_STATE_MSG_XCAST_MODE, + FM10K_LPORT_STATE_MSG_READY, + FM10K_LPORT_STATE_MSG_MAX +}; + +enum fm10k_tlv_1588_attr_id { + FM10K_1588_MSG_TIMESTAMP = 0, /* deprecated */ + FM10K_1588_MSG_CLK_OFFSET, + FM10K_1588_MSG_MAX +}; + +#define FM10K_VF_MSG_MSIX_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) + +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[]; +#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \ + fm10k_mac_vlan_msg_attr, func) + +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; +#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ + fm10k_lport_state_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; +#define FM10K_VF_MSG_1588_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) + +s32 fm10k_init_ops_vf(struct fm10k_hw *hw); + +void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); +void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); +#endif /* _FM10K_VF_H */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/base/meson.build b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build new file mode 100644 index 000000000..6ac11b201 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/base/meson.build @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = [ + 'fm10k_api.c', + 'fm10k_common.c', + 'fm10k_mbx.c', + 'fm10k_pf.c', + 'fm10k_tlv.c', + 'fm10k_vf.c' +] + +error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value', + '-Wno-strict-aliasing', '-Wno-format-extra-args', + '-Wno-unused-variable', + '-Wno-implicit-fallthrough' +] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('fm10k_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h new file mode 100644 index 000000000..916b856ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2015 Intel Corporation + */ + +#ifndef _FM10K_H_ +#define _FM10K_H_ + +#include +#include +#include +#include +#include +#include "fm10k_logs.h" +#include "base/fm10k_type.h" + +/* descriptor ring base addresses must be aligned to the following */ +#define FM10K_ALIGN_RX_DESC 128 +#define FM10K_ALIGN_TX_DESC 128 + +/* The maximum packet size that FM10K supports */ +#define FM10K_MAX_PKT_SIZE (15 * 1024) + +/* Minimum size of RX buffer FM10K supported */ +#define FM10K_MIN_RX_BUF_SIZE 256 + +/* The maximum of SRIOV VFs per port supported */ +#define FM10K_MAX_VF_NUM 64 + +/* number of descriptors must be a multiple of the following */ +#define FM10K_MULT_RX_DESC FM10K_REQ_RX_DESCRIPTOR_MULTIPLE +#define FM10K_MULT_TX_DESC FM10K_REQ_TX_DESCRIPTOR_MULTIPLE + +/* maximum size of descriptor rings */ +#define FM10K_MAX_RX_RING_SZ (512 * 1024) +#define FM10K_MAX_TX_RING_SZ (512 * 1024) + +/* minimum and maximum number of descriptors in a ring */ +#define FM10K_MIN_RX_DESC 32 +#define FM10K_MIN_TX_DESC 32 +#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc)) +#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc)) + +#define FM10K_TX_MAX_SEG UINT8_MAX +#define FM10K_TX_MAX_MTU_SEG UINT8_MAX + +/* + * byte aligment for HW RX data buffer + * Datasheet requires RX buffer addresses shall either be 512-byte aligned or + * be 8-byte aligned but without crossing host memory pages (4KB alignment + * boundaries). Satisfy first option. + */ +#define FM10K_RX_DATABUF_ALIGN 512 + +/* + * threshold default, min, max, and divisor constraints + * the configured values must satisfy the following: + * MIN <= value <= MAX + * DIV % value == 0 + */ +#define FM10K_RX_FREE_THRESH_DEFAULT(rxq) 32 +#define FM10K_RX_FREE_THRESH_MIN(rxq) 1 +#define FM10K_RX_FREE_THRESH_MAX(rxq) ((rxq)->nb_desc - 1) +#define FM10K_RX_FREE_THRESH_DIV(rxq) ((rxq)->nb_desc) + +#define FM10K_TX_FREE_THRESH_DEFAULT(txq) 32 +#define FM10K_TX_FREE_THRESH_MIN(txq) 1 +#define FM10K_TX_FREE_THRESH_MAX(txq) ((txq)->nb_desc - 3) +#define FM10K_TX_FREE_THRESH_DIV(txq) 0 + +#define FM10K_DEFAULT_RX_PTHRESH 8 +#define FM10K_DEFAULT_RX_HTHRESH 8 +#define FM10K_DEFAULT_RX_WTHRESH 0 + +#define FM10K_DEFAULT_TX_PTHRESH 32 +#define FM10K_DEFAULT_TX_HTHRESH 0 +#define FM10K_DEFAULT_TX_WTHRESH 0 + +#define FM10K_TX_RS_THRESH_DEFAULT(txq) 32 +#define FM10K_TX_RS_THRESH_MIN(txq) 1 +#define FM10K_TX_RS_THRESH_MAX(txq) \ + RTE_MIN(((txq)->nb_desc - 2), (txq)->free_thresh) +#define FM10K_TX_RS_THRESH_DIV(txq) ((txq)->nb_desc) + +#define FM10K_VLAN_TAG_SIZE 4 + +/* Maximum number of MAC addresses per PF/VF */ +#define FM10K_MAX_MACADDR_NUM 64 + +#define FM10K_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define FM10K_VFTA_SIZE (4096 / FM10K_UINT32_BIT_SIZE) + +/* vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +#define RTE_FM10K_RXQ_REARM_THRESH 32 +#define RTE_FM10K_VPMD_TX_BURST 32 +#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH +#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64 +#define RTE_FM10K_DESCS_PER_LOOP 4 + +#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +struct fm10k_macvlan_filter_info { + uint16_t vlan_num; /* Total VLAN number */ + uint16_t mac_num; /* Total mac number */ + uint16_t nb_queue_pools; /* Active queue pools number */ + /* VMDQ ID for each MAC address */ + uint8_t mac_vmdq_id[FM10K_MAX_MACADDR_NUM]; + uint32_t vfta[FM10K_VFTA_SIZE]; /* VLAN bitmap */ +}; + +struct fm10k_dev_info { + volatile uint32_t enable; + volatile uint32_t glort; + /* Protect the mailbox to avoid race condition */ + rte_spinlock_t mbx_lock; + struct fm10k_macvlan_filter_info macvlan; + /* Flag to indicate if RX vector conditions satisfied */ + bool rx_vec_allowed; + bool sm_down; +}; + +/* + * Structure to store private data for each driver instance. + */ +struct fm10k_adapter { + struct fm10k_hw hw; + struct fm10k_hw_stats stats; + struct fm10k_dev_info info; +}; + +#define FM10K_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct fm10k_adapter *)adapter)->hw) + +#define FM10K_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct fm10k_adapter *)adapter)->stats) + +#define FM10K_DEV_PRIVATE_TO_INFO(adapter) \ + (&((struct fm10k_adapter *)adapter)->info) + +#define FM10K_DEV_PRIVATE_TO_MBXLOCK(adapter) \ + (&(((struct fm10k_adapter *)adapter)->info.mbx_lock)) + +#define FM10K_DEV_PRIVATE_TO_MACVLAN(adapter) \ + (&(((struct fm10k_adapter *)adapter)->info.macvlan)) + +struct fm10k_rx_queue { + struct rte_mempool *mp; + struct rte_mbuf **sw_ring; + volatile union fm10k_rx_desc *hw_ring; + struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */ + uint64_t hw_ring_phys_addr; + uint64_t mbuf_initializer; /* value to init mbufs */ + /* need to alloc dummy mbuf, for wraparound when scanning hw ring */ + struct rte_mbuf fake_mbuf; + uint16_t next_dd; + uint16_t next_alloc; + uint16_t next_trigger; + uint16_t alloc_thresh; + volatile uint32_t *tail_ptr; + uint16_t nb_desc; + /* Number of faked desc added at the tail for Vector RX function */ + uint16_t nb_fake_desc; + uint16_t queue_id; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint16_t rx_using_sse; /* indicates that vector RX is in use */ + uint16_t port_id; + uint8_t drop_en; + uint8_t rx_deferred_start; /* don't start this queue in dev start. */ + uint16_t rx_ftag_en; /* indicates FTAG RX supported */ + uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */ +}; + +/* + * a FIFO is used to track which descriptors have their RS bit set for Tx + * queues which are configured to allow multiple descriptors per packet + */ +struct fifo { + uint16_t *list; + uint16_t *head; + uint16_t *tail; + uint16_t *endp; +}; + +struct fm10k_txq_ops; + +struct fm10k_tx_queue { + struct rte_mbuf **sw_ring; + struct fm10k_tx_desc *hw_ring; + uint64_t hw_ring_phys_addr; + struct fifo rs_tracker; + const struct fm10k_txq_ops *ops; /* txq ops */ + uint16_t last_free; + uint16_t next_free; + uint16_t nb_free; + uint16_t nb_used; + uint16_t free_thresh; + uint16_t rs_thresh; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t next_rs; /* Next pos to set RS flag */ + uint16_t next_dd; /* Next pos to check DD flag */ + volatile uint32_t *tail_ptr; + uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */ + uint16_t nb_desc; + uint16_t port_id; + uint8_t tx_deferred_start; /** don't start this queue in dev start. */ + uint16_t queue_id; + uint16_t tx_ftag_en; /* indicates FTAG TX supported */ +}; + +struct fm10k_txq_ops { + void (*reset)(struct fm10k_tx_queue *txq); +}; + +#define MBUF_DMA_ADDR(mb) \ + ((uint64_t) ((mb)->buf_iova + (mb)->data_off)) + +/* enforce 512B alignment on default Rx DMA addresses */ +#define MBUF_DMA_ADDR_DEFAULT(mb) \ + ((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\ + FM10K_RX_DATABUF_ALIGN)) + +static inline void fifo_reset(struct fifo *fifo, uint32_t len) +{ + fifo->head = fifo->tail = fifo->list; + fifo->endp = fifo->list + len; +} + +static inline void fifo_insert(struct fifo *fifo, uint16_t val) +{ + *fifo->head = val; + if (++fifo->head == fifo->endp) + fifo->head = fifo->list; +} + +/* do not worry about list being empty since we only check it once we know + * we have used enough descriptors to set the RS bit at least once */ +static inline uint16_t fifo_peek(struct fifo *fifo) +{ + return *fifo->tail; +} + +static inline uint16_t fifo_remove(struct fifo *fifo) +{ + uint16_t val; + val = *fifo->tail; + if (++fifo->tail == fifo->endp) + fifo->tail = fifo->list; + return val; +} + +static inline void +fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port) +{ + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->nb_segs = 1; + + /* enforce 512B alignment on default Rx virtual addresses */ + mb->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb->buf_addr); + mb->port = in_port; +} + +/* + * Verify Rx packet buffer alignment is valid. + * + * Hardware requires specific alignment for Rx packet buffers. At + * least one of the following two conditions must be satisfied. + * 1. Address is 512B aligned + * 2. Address is 8B aligned and buffer does not cross 4K boundary. + * + * Return 1 if buffer alignment satisfies at least one condition, + * otherwise return 0. + * + * Note: Alignment is checked by the driver when the Rx queue is reset. It + * is assumed that if an entire descriptor ring can be filled with + * buffers containing valid alignment, then all buffers in that mempool + * have valid address alignment. It is the responsibility of the user + * to ensure all buffers have valid alignment, as it is the user who + * creates the mempool. + * Note: It is assumed the buffer needs only to store a maximum size Ethernet + * frame. + */ +static inline int +fm10k_addr_alignment_valid(struct rte_mbuf *mb) +{ + uint64_t addr = MBUF_DMA_ADDR_DEFAULT(mb); + uint64_t boundary1, boundary2; + + /* 512B aligned? */ + if (RTE_ALIGN(addr, FM10K_RX_DATABUF_ALIGN) == addr) + return 1; + + /* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */ + if (RTE_ALIGN(addr, 8) == addr) { + boundary1 = RTE_ALIGN_FLOOR(addr, 4096); + boundary2 = RTE_ALIGN_FLOOR(addr + RTE_ETHER_MAX_VLAN_FRAME_LEN, + 4096); + if (boundary1 == boundary2) + return 1; + } + + PMD_INIT_LOG(ERR, "Error: Invalid buffer alignment!"); + + return 0; +} + +/* Rx and Tx prototypes */ +uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t fm10k_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +uint32_t +fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int +fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); + +int +fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset); + + +uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq); +int fm10k_rx_vec_condition_check(struct rte_eth_dev *); +void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq); +uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t); +uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **, + uint16_t); +uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq); +int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq); + +#endif diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c new file mode 100644 index 000000000..f537ab286 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_ethdev.c @@ -0,0 +1,3348 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fm10k.h" +#include "base/fm10k_api.h" + +/* Default delay to acquire mailbox lock */ +#define FM10K_MBXLOCK_DELAY_US 20 +#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL + +#define MAIN_VSI_POOL_NUMBER 0 + +/* Max try times to acquire switch status */ +#define MAX_QUERY_SWITCH_STATE_TIMES 10 +/* Wait interval to get switch status */ +#define WAIT_SWITCH_MSG_US 100000 +/* A period of quiescence for switch */ +#define FM10K_SWITCH_QUIESCE_US 100000 +/* Number of chars per uint32 type */ +#define CHARS_PER_UINT32 (sizeof(uint32_t)) +#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) + +/* default 1:1 map from queue ID to interrupt vector ID */ +#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id]) + +/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ +#define MAX_LPORT_NUM 128 +#define GLORT_FD_Q_BASE 0x40 +#define GLORT_PF_MASK 0xFFC0 +#define GLORT_FD_MASK GLORT_PF_MASK +#define GLORT_FD_INDEX GLORT_FD_Q_BASE + +int fm10k_logtype_init; +int fm10k_logtype_driver; + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX +int fm10k_logtype_rx; +#endif +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX +int fm10k_logtype_tx; +#endif +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE +int fm10k_logtype_tx_free; +#endif + +static void fm10k_close_mbx_service(struct fm10k_hw *hw); +static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev); +static inline int fm10k_glort_valid(struct fm10k_hw *hw); +static int +fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool); +static void fm10k_tx_queue_release(void *queue); +static void fm10k_rx_queue_release(void *queue); +static void fm10k_set_rx_function(struct rte_eth_dev *dev); +static void fm10k_set_tx_function(struct rte_eth_dev *dev); +static int fm10k_check_ftag(struct rte_devargs *devargs); +static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); + +static int fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev); + +struct fm10k_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = { + {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)}, + {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)}, + {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)}, + {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)}, + {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)}, + {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)}, + {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)}, + {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats, + nodesc_drop)}, +}; + +#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \ + sizeof(fm10k_hw_stats_strings[0])) + +static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)}, + {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)}, +}; + +#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \ + sizeof(fm10k_hw_stats_rx_q_strings[0])) + +static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)}, +}; + +#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \ + sizeof(fm10k_hw_stats_tx_q_strings[0])) + +#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \ + (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS)) +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); + +static void +fm10k_mbx_initlock(struct fm10k_hw *hw) +{ + rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); +} + +static void +fm10k_mbx_lock(struct fm10k_hw *hw) +{ + while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))) + rte_delay_us(FM10K_MBXLOCK_DELAY_US); +} + +static void +fm10k_mbx_unlock(struct fm10k_hw *hw) +{ + rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); +} + +/* Stubs needed for linkage when vPMD is disabled */ +__rte_weak int +fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev) +{ + return -1; +} + +__rte_weak uint16_t +fm10k_recv_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +__rte_weak uint16_t +fm10k_recv_scattered_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +__rte_weak int +fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq) + +{ + return -1; +} + +__rte_weak void +fm10k_rx_queue_release_mbufs_vec( + __rte_unused struct fm10k_rx_queue *rxq) +{ + return; +} + +__rte_weak void +fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq) +{ + return; +} + +__rte_weak int +fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) +{ + return -1; +} + +__rte_weak uint16_t +fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +/* + * reset queue to initial state, allocate software buffers used when starting + * device. + * return 0 on success + * return -ENOMEM if buffers cannot be allocated + * return -EINVAL if buffers do not satisfy alignment condition + */ +static inline int +rx_queue_reset(struct fm10k_rx_queue *q) +{ + static const union fm10k_rx_desc zero = {{0} }; + uint64_t dma_addr; + int i, diag; + PMD_INIT_FUNC_TRACE(); + + diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc); + if (diag != 0) + return -ENOMEM; + + for (i = 0; i < q->nb_desc; ++i) { + fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id); + if (!fm10k_addr_alignment_valid(q->sw_ring[i])) { + rte_mempool_put_bulk(q->mp, (void **)q->sw_ring, + q->nb_desc); + return -EINVAL; + } + dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]); + q->hw_ring[i].q.pkt_addr = dma_addr; + q->hw_ring[i].q.hdr_addr = dma_addr; + } + + /* initialize extra software ring entries. Space for these extra + * entries is always allocated. + */ + memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf)); + for (i = 0; i < q->nb_fake_desc; ++i) { + q->sw_ring[q->nb_desc + i] = &q->fake_mbuf; + q->hw_ring[q->nb_desc + i] = zero; + } + + q->next_dd = 0; + q->next_alloc = 0; + q->next_trigger = q->alloc_thresh - 1; + FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1); + q->rxrearm_start = 0; + q->rxrearm_nb = 0; + + return 0; +} + +/* + * clean queue, descriptor rings, free software buffers used when stopping + * device. + */ +static inline void +rx_queue_clean(struct fm10k_rx_queue *q) +{ + union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} }; + uint32_t i; + PMD_INIT_FUNC_TRACE(); + + /* zero descriptor rings */ + for (i = 0; i < q->nb_desc; ++i) + q->hw_ring[i] = zero; + + /* zero faked descriptors */ + for (i = 0; i < q->nb_fake_desc; ++i) + q->hw_ring[q->nb_desc + i] = zero; + + /* vPMD driver has a different way of releasing mbufs. */ + if (q->rx_using_sse) { + fm10k_rx_queue_release_mbufs_vec(q); + return; + } + + /* free software buffers */ + for (i = 0; i < q->nb_desc; ++i) { + if (q->sw_ring[i]) { + rte_pktmbuf_free_seg(q->sw_ring[i]); + q->sw_ring[i] = NULL; + } + } +} + +/* + * free all queue memory used when releasing the queue (i.e. configure) + */ +static inline void +rx_queue_free(struct fm10k_rx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + if (q) { + PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q); + rx_queue_clean(q); + if (q->sw_ring) { + rte_free(q->sw_ring); + q->sw_ring = NULL; + } + rte_free(q); + q = NULL; + } +} + +/* + * disable RX queue, wait unitl HW finished necessary flush operation + */ +static inline int +rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) +{ + uint32_t reg, i; + + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum), + reg & ~FM10K_RXQCTL_ENABLE); + + /* Wait 100us at most */ + for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { + rte_delay_us(1); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); + if (!(reg & FM10K_RXQCTL_ENABLE)) + break; + } + + if (i == FM10K_QUEUE_DISABLE_TIMEOUT) + return -1; + + return 0; +} + +/* + * reset queue to initial state, allocate software buffers used when starting + * device + */ +static inline void +tx_queue_reset(struct fm10k_tx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + q->last_free = 0; + q->next_free = 0; + q->nb_used = 0; + q->nb_free = q->nb_desc - 1; + fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh); + FM10K_PCI_REG_WRITE(q->tail_ptr, 0); +} + +/* + * clean queue, descriptor rings, free software buffers used when stopping + * device + */ +static inline void +tx_queue_clean(struct fm10k_tx_queue *q) +{ + struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0}; + uint32_t i; + PMD_INIT_FUNC_TRACE(); + + /* zero descriptor rings */ + for (i = 0; i < q->nb_desc; ++i) + q->hw_ring[i] = zero; + + /* free software buffers */ + for (i = 0; i < q->nb_desc; ++i) { + if (q->sw_ring[i]) { + rte_pktmbuf_free_seg(q->sw_ring[i]); + q->sw_ring[i] = NULL; + } + } +} + +/* + * free all queue memory used when releasing the queue (i.e. configure) + */ +static inline void +tx_queue_free(struct fm10k_tx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + if (q) { + PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q); + tx_queue_clean(q); + if (q->rs_tracker.list) { + rte_free(q->rs_tracker.list); + q->rs_tracker.list = NULL; + } + if (q->sw_ring) { + rte_free(q->sw_ring); + q->sw_ring = NULL; + } + rte_free(q); + q = NULL; + } +} + +/* + * disable TX queue, wait unitl HW finished necessary flush operation + */ +static inline int +tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) +{ + uint32_t reg, i; + + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum), + reg & ~FM10K_TXDCTL_ENABLE); + + /* Wait 100us at most */ + for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { + rte_delay_us(1); + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); + if (!(reg & FM10K_TXDCTL_ENABLE)) + break; + } + + if (i == FM10K_QUEUE_DISABLE_TIMEOUT) + return -1; + + return 0; +} + +static int +fm10k_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { + PMD_INIT_LOG(ERR, "DCB mode is not supported."); + return -EINVAL; + } + + if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG)) + return 0; + + if (hw->mac.type == fm10k_mac_vf) { + PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF."); + return -EINVAL; + } + + /* Check VMDQ queue pool number */ + if (vmdq_conf->nb_queue_pools > + sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT || + vmdq_conf->nb_queue_pools > nb_rx_q) { + PMD_INIT_LOG(ERR, "Too many of queue pools: %d", + vmdq_conf->nb_queue_pools); + return -EINVAL; + } + + return 0; +} + +static const struct fm10k_txq_ops def_txq_ops = { + .reset = tx_queue_reset, +}; + +static int +fm10k_dev_configure(struct rte_eth_dev *dev) +{ + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multipe queue mode checking */ + ret = fm10k_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.", + ret); + return ret; + } + + dev->data->scattered_rx = 0; + + return 0; +} + +static void +fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!vmdq_conf->pool_map[i].pools) + continue; + fm10k_mbx_lock(hw); + fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); +} + +static void +fm10k_dev_rss_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint32_t mrqc, *key, i, reta, j; + uint64_t hf; + +#define RSS_KEY_SIZE 40 + static uint8_t rss_intel_key[RSS_KEY_SIZE] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, + }; + + if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); + return; + } + + /* random key is rss_intel_key (default) or user provided (rss_key) */ + if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL) + key = (uint32_t *)rss_intel_key; + else + key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key; + + /* Now fill our hash function seeds, 4 bytes at a time */ + for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i) + FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]); + + /* + * Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) { + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << CHAR_BIT) | j; + if ((i & 3) == 3) + FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), + rte_bswap32(reta)); + } + + /* + * Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + hf = dev_conf->rx_adv_conf.rss_conf.rss_hf; + mrqc = 0; + mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0; + mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0; + + if (mrqc == 0) { + PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not" + "supported", hf); + return; + } + + FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc); +} + +static void +fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i; + + for (i = 0; i < nb_lport_new; i++) { + /* Set unicast mode by default. App can change + * to other mode in other API func. + */ + fm10k_mbx_lock(hw); + hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i, + FM10K_XCAST_MODE_NONE); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct fm10k_macvlan_filter_info *macvlan; + uint16_t nb_queue_pools = 0; /* pool number in configuration */ + uint16_t nb_lport_new; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + fm10k_dev_rss_configure(dev); + + /* only PF supports VMDQ */ + if (hw->mac.type != fm10k_mac_pf) + return; + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + nb_queue_pools = vmdq_conf->nb_queue_pools; + + /* no pool number change, no need to update logic port and VLAN/MAC */ + if (macvlan->nb_queue_pools == nb_queue_pools) + return; + + nb_lport_new = nb_queue_pools ? nb_queue_pools : 1; + fm10k_dev_logic_port_update(dev, nb_lport_new); + + /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */ + memset(dev->data->mac_addrs, 0, + RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM); + rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + memset(macvlan, 0, sizeof(*macvlan)); + macvlan->nb_queue_pools = nb_queue_pools; + + if (nb_queue_pools) + fm10k_dev_vmdq_rx_configure(dev); + else + fm10k_dev_pf_main_vsi_reset(dev); +} + +static int +fm10k_dev_tx_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i, ret; + struct fm10k_tx_queue *txq; + uint64_t base_addr; + uint32_t size; + + /* Disable TXINT to avoid possible interrupt */ + for (i = 0; i < hw->mac.max_queues; i++) + FM10K_WRITE_REG(hw, FM10K_TXINT(i), + 3 << FM10K_TXINT_TIMER_SHIFT); + + /* Setup TX queue */ + for (i = 0; i < dev->data->nb_tx_queues; ++i) { + txq = dev->data->tx_queues[i]; + base_addr = txq->hw_ring_phys_addr; + size = txq->nb_desc * sizeof(struct fm10k_tx_desc); + + /* disable queue to avoid issues while updating state */ + ret = tx_queue_disable(hw, i); + if (ret) { + PMD_INIT_LOG(ERR, "failed to disable queue %d", i); + return -1; + } + /* Enable use of FTAG bit in TX descriptor, PFVTCTL + * register is read-only for VF. + */ + if (fm10k_check_ftag(dev->device->devargs)) { + if (hw->mac.type == fm10k_mac_pf) { + FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + PMD_INIT_LOG(DEBUG, "FTAG mode is enabled"); + } else { + PMD_INIT_LOG(ERR, "VF FTAG is not supported."); + return -ENOTSUP; + } + } + + /* set location and size for descriptor ring */ + FM10K_WRITE_REG(hw, FM10K_TDBAL(i), + base_addr & UINT64_LOWER_32BITS_MASK); + FM10K_WRITE_REG(hw, FM10K_TDBAH(i), + base_addr >> (CHAR_BIT * sizeof(uint32_t))); + FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); + + /* assign default SGLORT for each TX queue by PF */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); + } + + /* set up vector or scalar TX function as appropriate */ + fm10k_set_tx_function(dev); + + return 0; +} + +static int +fm10k_dev_rx_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + int i, ret; + struct fm10k_rx_queue *rxq; + uint64_t base_addr; + uint32_t size; + uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + uint32_t logic_port = hw->mac.dglort_map; + uint16_t buf_size; + uint16_t queue_stride = 0; + + /* enable RXINT for interrupt mode */ + i = 0; + if (rte_intr_dp_is_en(intr_handle)) { + for (; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i)); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + } + } + /* Disable other RXINT to avoid possible interrupt */ + for (; i < hw->mac.max_queues; i++) + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + rxq = dev->data->rx_queues[i]; + base_addr = rxq->hw_ring_phys_addr; + size = rxq->nb_desc * sizeof(union fm10k_rx_desc); + + /* disable queue to avoid issues while updating state */ + ret = rx_queue_disable(hw, i); + if (ret) { + PMD_INIT_LOG(ERR, "failed to disable queue %d", i); + return -1; + } + + /* Setup the Base and Length of the Rx Descriptor Ring */ + FM10K_WRITE_REG(hw, FM10K_RDBAL(i), + base_addr & UINT64_LOWER_32BITS_MASK); + FM10K_WRITE_REG(hw, FM10K_RDBAH(i), + base_addr >> (CHAR_BIT * sizeof(uint32_t))); + FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size); + + /* Configure the Rx buffer size for one buff without split */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + /* As RX buffer is aligned to 512B within mbuf, some bytes are + * reserved for this purpose, and the worst case could be 511B. + * But SRR reg assumes all buffers have the same size. In order + * to fill the gap, we'll have to consider the worst case and + * assume 512B is reserved. If we don't do so, it's possible + * for HW to overwrite data to next mbuf. + */ + buf_size -= FM10K_RX_DATABUF_ALIGN; + + FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), + (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) | + FM10K_SRRCTL_LOOPBACK_SUPPRESS); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || + rxq->offloads & DEV_RX_OFFLOAD_SCATTER) { + uint32_t reg; + dev->data->scattered_rx = 1; + reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); + reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN; + FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg); + } + + /* Enable drop on empty, it's RO for VF */ + if (hw->mac.type == fm10k_mac_pf && rxq->drop_en) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl); + FM10K_WRITE_FLUSH(hw); + } + + /* Configure VMDQ/RSS if applicable */ + fm10k_dev_mq_rx_configure(dev); + + /* Decide the best RX function */ + fm10k_set_rx_function(dev); + + /* update RX_SGLORT for loopback suppress*/ + if (hw->mac.type != fm10k_mac_pf) + return 0; + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools) + queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools; + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + if (i && queue_stride && !(i % queue_stride)) + logic_port++; + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port); + } + + return 0; +} + +static int +fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err; + uint32_t reg; + struct fm10k_rx_queue *rxq; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + err = rx_queue_reset(rxq); + if (err == -ENOMEM) { + PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); + return err; + } else if (err == -EINVAL) { + PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" + " %d", err); + return err; + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled on real + * hardware, but BEFORE the queue is enabled when using the + * emulation platform. Do it in both places for now and remove + * this comment and the following two register writes when the + * emulation platform is no longer being used. + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + + /* Set PF ownership flag for PF devices */ + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); + if (hw->mac.type == fm10k_mac_pf) + reg |= FM10K_RXQCTL_PF; + reg |= FM10K_RXQCTL_ENABLE; + /* enable RX queue */ + FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); + FM10K_WRITE_FLUSH(hw); + + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Disable RX queue */ + rx_queue_disable(hw, rx_queue_id); + + /* Free mbuf and clean HW ring */ + rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /** @todo - this should be defined in the shared code */ +#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000 + uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY; + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; + + PMD_INIT_FUNC_TRACE(); + + q->ops->reset(q); + + /* reset head and tail pointers */ + FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); + + /* enable TX queue */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), + FM10K_TXDCTL_ENABLE | txdctl); + FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + tx_queue_disable(hw, tx_queue_id); + tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static inline int fm10k_glort_valid(struct fm10k_hw *hw) +{ + return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) + != FM10K_DGLORTMAP_NONE); +} + +static int +fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) + return 0; + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_PROMISC); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode"); + return -EAGAIN; + } + + return 0; +} + +static int +fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mode; + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) + return 0; + + if (dev->data->all_multicast == 1) + mode = FM10K_XCAST_MODE_ALLMULTI; + else + mode = FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + mode); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode"); + return -EAGAIN; + } + + return 0; +} + +static int +fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) + return 0; + + /* If promiscuous mode is enabled, it doesn't make sense to enable + * allmulticast and disable promiscuous since fm10k only can select + * one of the modes. + */ + if (dev->data->promiscuous) { + PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\ + "needn't enable allmulticast"); + return 0; + } + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_ALLMULTI); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode"); + return -EAGAIN; + } + + return 0; +} + +static int +fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) + return 0; + + if (dev->data->promiscuous) { + PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\ + "since promisc mode is enabled"); + return -EINVAL; + } + + fm10k_mbx_lock(hw); + /* Change mode to unicast mode */ + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_NONE); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode"); + return -EAGAIN; + } + + return 0; +} + +static void +fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t dglortdec, pool_len, rss_len, i, dglortmask; + uint16_t nb_queue_pools; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + nb_queue_pools = macvlan->nb_queue_pools; + pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0; + rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len; + + /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */ + dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len; + dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + hw->mac.dglort_map; + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask); + /* Configure VMDQ/RSS DGlort Decoder */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec); + + /* Flow Director configurations, only queue number is valid. */ + dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1); + dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + (hw->mac.dglort_map + GLORT_FD_Q_BASE); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask); + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec); + + /* Invalidate all other GLORT entries */ + for (i = 2; i < FM10K_DGLORT_COUNT; i++) + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), + FM10K_DGLORTMAP_NONE); +} + +#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1) +static int +fm10k_dev_start(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i, diag; + + PMD_INIT_FUNC_TRACE(); + + /* stop, init, then start the hw */ + diag = fm10k_stop_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag); + return -EIO; + } + + diag = fm10k_init_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag); + return -EIO; + } + + diag = fm10k_start_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag); + return -EIO; + } + + diag = fm10k_dev_tx_init(dev); + if (diag) { + PMD_INIT_LOG(ERR, "TX init failed: %d", diag); + return diag; + } + + if (fm10k_dev_rxq_interrupt_setup(dev)) + return -EIO; + + diag = fm10k_dev_rx_init(dev); + if (diag) { + PMD_INIT_LOG(ERR, "RX init failed: %d", diag); + return diag; + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_dglort_map_configure(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct fm10k_rx_queue *rxq; + rxq = dev->data->rx_queues[i]; + + if (rxq->rx_deferred_start) + continue; + diag = fm10k_dev_rx_queue_start(dev, i); + if (diag != 0) { + int j; + for (j = 0; j < i; ++j) + rx_queue_clean(dev->data->rx_queues[j]); + return diag; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct fm10k_tx_queue *txq; + txq = dev->data->tx_queues[i]; + + if (txq->tx_deferred_start) + continue; + diag = fm10k_dev_tx_queue_start(dev, i); + if (diag != 0) { + int j; + for (j = 0; j < i; ++j) + tx_queue_clean(dev->data->tx_queues[j]); + for (j = 0; j < dev->data->nb_rx_queues; ++j) + rx_queue_clean(dev->data->rx_queues[j]); + return diag; + } + } + + /* Update default vlan when not in VMDQ mode */ + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + + fm10k_link_update(dev, 0); + + return 0; +} + +static void +fm10k_dev_stop(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + fm10k_dev_tx_queue_stop(dev, i); + + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_dev_rx_queue_stop(dev, i); + + /* Disable datapath event */ + if (rte_intr_dp_is_en(intr_handle)) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)), + FM10K_ITR_MASK_SET); + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; +} + +static void +fm10k_dev_queue_release(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct fm10k_tx_queue *txq = dev->data->tx_queues[i]; + + tx_queue_free(txq); + } + } + + if (dev->data->rx_queues) { + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_rx_queue_release(dev->data->rx_queues[i]); + } +} + +static int +fm10k_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); + + dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_status = + dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; + + return 0; +} + +static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + unsigned i, q; + unsigned count = 0; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", fm10k_hw_stats_strings[count].name); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + count++; + } + } + } + return FM10K_NB_XSTATS; +} + +static int +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i, q, count = 0; + + if (n < FM10K_NB_XSTATS) + return FM10K_NB_XSTATS; + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + fm10k_hw_stats_strings[count].offset); + xstats[count].id = count; + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_rx_q_strings[i].offset); + xstats[count].id = count; + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_tx_q_strings[i].offset); + xstats[count].id = count; + count++; + } + } + + return FM10K_NB_XSTATS; +} + +static int +fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + uint64_t ipackets, opackets, ibytes, obytes, imissed; + struct fm10k_hw *hw = + FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int i; + + PMD_INIT_FUNC_TRACE(); + + fm10k_update_hw_stats(hw, hw_stats); + + ipackets = opackets = ibytes = obytes = imissed = 0; + for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && + (i < hw->mac.max_queues); ++i) { + stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count; + stats->q_opackets[i] = hw_stats->q[i].tx_packets.count; + stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count; + stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count; + stats->q_errors[i] = hw_stats->q[i].rx_drops.count; + ipackets += stats->q_ipackets[i]; + opackets += stats->q_opackets[i]; + ibytes += stats->q_ibytes[i]; + obytes += stats->q_obytes[i]; + imissed += stats->q_errors[i]; + } + stats->ipackets = ipackets; + stats->opackets = opackets; + stats->ibytes = ibytes; + stats->obytes = obytes; + stats->imissed = imissed; + return 0; +} + +static int +fm10k_stats_reset(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + memset(hw_stats, 0, sizeof(*hw_stats)); + fm10k_rebind_hw_stats(hw, hw_stats); + + return 0; +} + +static int +fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + + PMD_INIT_FUNC_TRACE(); + + dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE; + dev_info->max_rx_queues = hw->mac.max_queues; + dev_info->max_tx_queues = hw->mac.max_queues; + dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = pdev->max_vfs; + dev_info->vmdq_pool_base = 0; + dev_info->vmdq_queue_base = 0; + dev_info->max_vmdq_pools = ETH_32_POOLS; + dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; + dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + + dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); + dev_info->reta_size = FM10K_MAX_RSS_INDICES; + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = FM10K_DEFAULT_RX_PTHRESH, + .hthresh = FM10K_DEFAULT_RX_HTHRESH, + .wthresh = FM10K_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = FM10K_DEFAULT_TX_PTHRESH, + .hthresh = FM10K_DEFAULT_TX_HTHRESH, + .wthresh = FM10K_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), + .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_RX_DESC, + .nb_min = FM10K_MIN_RX_DESC, + .nb_align = FM10K_MULT_RX_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_TX_DESC, + .nb_min = FM10K_MIN_TX_DESC, + .nb_align = FM10K_MULT_TX_DESC, + .nb_seg_max = FM10K_TX_MAX_SEG, + .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; + + return 0; +} + +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == fm10k_recv_pkts || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts) { + static uint32_t ptypes[] = { + /* refers to rx_desc_to_ol_flags() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; + } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) { + static uint32_t ptypes_vec[] = { + /* refers to fm10k_desc_to_pktype_v() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + return ptypes_vec; + } + + return NULL; +} +#else +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + return NULL; +} +#endif + +static int +fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + s32 result; + uint16_t mac_num = 0; + uint32_t vid_idx, vid_bit, mac_index; + struct fm10k_hw *hw; + struct fm10k_macvlan_filter_info *macvlan; + struct rte_eth_dev_data *data = dev->data; + + hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */ + PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode"); + return -EINVAL; + } + + if (vlan_id > ETH_VLAN_ID_MAX) { + PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); + return -EINVAL; + } + + vid_idx = FM10K_VFTA_IDX(vlan_id); + vid_bit = FM10K_VFTA_BIT(vlan_id); + /* this VLAN ID is already in the VLAN filter table, return SUCCESS */ + if (on && (macvlan->vfta[vid_idx] & vid_bit)) + return 0; + /* this VLAN ID is NOT in the VLAN filter table, cannot remove */ + if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) { + PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing " + "in the VLAN filter table"); + return -EINVAL; + } + + fm10k_mbx_lock(hw); + result = fm10k_update_vlan(hw, vlan_id, 0, on); + fm10k_mbx_unlock(hw); + if (result != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "VLAN update failed: %d", result); + return -EIO; + } + + for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) && + (result == FM10K_SUCCESS); mac_index++) { + if (rte_is_zero_ether_addr(&data->mac_addrs[mac_index])) + continue; + if (mac_num > macvlan->mac_num - 1) { + PMD_INIT_LOG(ERR, "MAC address number " + "not match"); + break; + } + fm10k_mbx_lock(hw); + result = fm10k_update_uc_addr(hw, hw->mac.dglort_map, + data->mac_addrs[mac_index].addr_bytes, + vlan_id, on, 0); + fm10k_mbx_unlock(hw); + mac_num++; + } + if (result != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "MAC address update failed: %d", result); + return -EIO; + } + + if (on) { + macvlan->vlan_num++; + macvlan->vfta[vid_idx] |= vid_bit; + } else { + macvlan->vlan_num--; + macvlan->vfta[vid_idx] &= ~vid_bit; + } + return 0; +} + +static int +fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if (mask & ETH_VLAN_STRIP_MASK) { + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP)) + PMD_INIT_LOG(ERR, "VLAN stripping is " + "always on in fm10k"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + PMD_INIT_LOG(ERR, "VLAN QinQ is not " + "supported in fm10k"); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) + PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); + } + + return 0; +} + +/* Add/Remove a MAC address, and update filters to main VSI */ +static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + uint32_t i, j, k; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (pool != MAIN_VSI_POOL_NUMBER) { + PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set " + "mac to pool %u", pool); + return; + } + for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) { + if (!macvlan->vfta[j]) + continue; + for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { + if (!(macvlan->vfta[j] & (1 << k))) + continue; + if (i + 1 > macvlan->vlan_num) { + PMD_INIT_LOG(ERR, "vlan number not match"); + return; + } + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac, + j * FM10K_UINT32_BIT_SIZE + k, add, 0); + fm10k_mbx_unlock(hw); + i++; + } + } +} + +/* Add/Remove a MAC address, and update filters to VMDQ */ +static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (pool > macvlan->nb_queue_pools) { + PMD_DRV_LOG(ERR, "Pool number %u invalid." + " Max pool is %u", + pool, macvlan->nb_queue_pools); + return; + } + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!(vmdq_conf->pool_map[i].pools & (1UL << pool))) + continue; + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac, + vmdq_conf->pool_map[i].vlan_id, add, 0); + fm10k_mbx_unlock(hw); + } +} + +/* Add/Remove a MAC address, and update filters */ +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (macvlan->nb_queue_pools > 0) /* VMDQ mode */ + fm10k_MAC_filter_set_vmdq(dev, mac, add, pool); + else + fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool); + + if (add) + macvlan->mac_num++; + else + macvlan->mac_num--; +} + +/* Add a MAC address, and update filters */ +static int +fm10k_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t pool) +{ + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool); + macvlan->mac_vmdq_id[index] = pool; + return 0; +} + +/* Remove a MAC address, and update filters */ +static void +fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct rte_eth_dev_data *data = dev->data; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes, + FALSE, macvlan->mac_vmdq_id[index]); + macvlan->mac_vmdq_id[index] = 0; +} + +static inline int +check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request) +{ + if ((request < min) || (request > max) || ((request % mult) != 0)) + return -1; + else + return 0; +} + + +static inline int +check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request) +{ + if ((request < min) || (request > max) || ((div % request) != 0)) + return -1; + else + return 0; +} + +static inline int +handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf) +{ + uint16_t rx_free_thresh; + + if (conf->rx_free_thresh == 0) + rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q); + else + rx_free_thresh = conf->rx_free_thresh; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q), + FM10K_RX_FREE_THRESH_MAX(q), + FM10K_RX_FREE_THRESH_DIV(q), + rx_free_thresh)) { + PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q), + FM10K_RX_FREE_THRESH_MIN(q), + FM10K_RX_FREE_THRESH_DIV(q)); + return -EINVAL; + } + + q->alloc_thresh = rx_free_thresh; + q->drop_en = conf->rx_drop_en; + q->rx_deferred_start = conf->rx_deferred_start; + + return 0; +} + +/* + * Hardware requires specific alignment for Rx packet buffers. At + * least one of the following two conditions must be satisfied. + * 1. Address is 512B aligned + * 2. Address is 8B aligned and buffer does not cross 4K boundary. + * + * As such, the driver may need to adjust the DMA address within the + * buffer by up to 512B. + * + * return 1 if the element size is valid, otherwise return 0. + */ +static int +mempool_element_size_valid(struct rte_mempool *mp) +{ + uint32_t min_size; + + /* elt_size includes mbuf header and headroom */ + min_size = mp->elt_size - sizeof(struct rte_mbuf) - + RTE_PKTMBUF_HEADROOM; + + /* account for up to 512B of alignment */ + min_size -= FM10K_RX_DATABUF_ALIGN; + + /* sanity check for overflow */ + if (min_size > mp->elt_size) + return 0; + + /* size is valid */ + return 1; +} + +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_SCATTER); +} + +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_RSS_HASH); +} + +static int +fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + struct fm10k_rx_queue *q; + const struct rte_memzone *mz; + uint64_t offloads; + + PMD_INIT_FUNC_TRACE(); + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* make sure the mempool element size can account for alignment. */ + if (!mempool_element_size_valid(mp)) { + PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); + return -EINVAL; + } + + /* make sure a valid number of descriptors have been requested */ + if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC, + FM10K_MULT_RX_DESC, nb_desc)) { + PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be " + "less than or equal to %"PRIu32", " + "greater than or equal to %u, " + "and a multiple of %u", + nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC, + FM10K_MULT_RX_DESC); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->rx_queues[queue_id] != NULL) { + rx_queue_free(dev->data->rx_queues[queue_id]); + dev->data->rx_queues[queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE, + socket_id); + if (q == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + q->mp = mp; + q->nb_desc = nb_desc; + q->nb_fake_desc = FM10K_MULT_RX_DESC; + q->port_id = dev->data->port_id; + q->queue_id = queue_id; + q->tail_ptr = (volatile uint32_t *) + &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + q->offloads = offloads; + if (handle_rxconf(q, conf)) + return -EINVAL; + + /* allocate memory for the software ring */ + q->sw_ring = rte_zmalloc_socket("fm10k sw ring", + (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate software ring"); + rte_free(q); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id, + FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + q->hw_ring = mz->addr; + q->hw_ring_phys_addr = mz->iova; + + /* Check if number of descs satisfied Vector requirement */ + if (!rte_is_power_of_2(nb_desc)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx " + "preconditions - canceling the feature for " + "the whole port[%d]", + q->queue_id, q->port_id); + dev_info->rx_vec_allowed = false; + } else + fm10k_rxq_vec_setup(q); + + dev->data->rx_queues[queue_id] = q; + return 0; +} + +static void +fm10k_rx_queue_release(void *queue) +{ + PMD_INIT_FUNC_TRACE(); + + rx_queue_free(queue); +} + +static inline int +handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) +{ + uint16_t tx_free_thresh; + uint16_t tx_rs_thresh; + + /* constraint MACROs require that tx_free_thresh is configured + * before tx_rs_thresh */ + if (conf->tx_free_thresh == 0) + tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q); + else + tx_free_thresh = conf->tx_free_thresh; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q), + FM10K_TX_FREE_THRESH_MAX(q), + FM10K_TX_FREE_THRESH_DIV(q), + tx_free_thresh)) { + PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q), + FM10K_TX_FREE_THRESH_MIN(q), + FM10K_TX_FREE_THRESH_DIV(q)); + return -EINVAL; + } + + q->free_thresh = tx_free_thresh; + + if (conf->tx_rs_thresh == 0) + tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q); + else + tx_rs_thresh = conf->tx_rs_thresh; + + q->tx_deferred_start = conf->tx_deferred_start; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_TX_RS_THRESH_MIN(q), + FM10K_TX_RS_THRESH_MAX(q), + FM10K_TX_RS_THRESH_DIV(q), + tx_rs_thresh)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q), + FM10K_TX_RS_THRESH_MIN(q), + FM10K_TX_RS_THRESH_DIV(q)); + return -EINVAL; + } + + q->rs_thresh = tx_rs_thresh; + + return 0; +} + +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO); +} + +static int +fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_tx_queue *q; + const struct rte_memzone *mz; + uint64_t offloads; + + PMD_INIT_FUNC_TRACE(); + + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + + /* make sure a valid number of descriptors have been requested */ + if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, + FM10K_MULT_TX_DESC, nb_desc)) { + PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be " + "less than or equal to %"PRIu32", " + "greater than or equal to %u, " + "and a multiple of %u", + nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC, + FM10K_MULT_TX_DESC); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->tx_queues[queue_id] != NULL) { + struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id]; + + tx_queue_free(txq); + dev->data->tx_queues[queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE, + socket_id); + if (q == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + q->nb_desc = nb_desc; + q->port_id = dev->data->port_id; + q->queue_id = queue_id; + q->offloads = offloads; + q->ops = &def_txq_ops; + q->tail_ptr = (volatile uint32_t *) + &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; + if (handle_txconf(q, conf)) + return -EINVAL; + + /* allocate memory for the software ring */ + q->sw_ring = rte_zmalloc_socket("fm10k sw ring", + nb_desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate software ring"); + rte_free(q); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id, + FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + q->hw_ring = mz->addr; + q->hw_ring_phys_addr = mz->iova; + + /* + * allocate memory for the RS bit tracker. Enough slots to hold the + * descriptor index for each RS bit needing to be set are required. + */ + q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker", + ((nb_desc + 1) / q->rs_thresh) * + sizeof(uint16_t), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->rs_tracker.list == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + + dev->data->tx_queues[queue_id] = q; + return 0; +} + +static void +fm10k_tx_queue_release(void *queue) +{ + struct fm10k_tx_queue *q = queue; + PMD_INIT_FUNC_TRACE(); + + tx_queue_free(q); +} + +static int +fm10k_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i, j, idx, shift; + uint8_t mask; + uint32_t reta; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size > FM10K_MAX_RSS_INDICES) { + PMD_INIT_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, FM10K_MAX_RSS_INDICES); + return -EINVAL; + } + + /* + * Update Redirection Table RETA[n], n=0..31. The redirection table has + * 128-entries in 32 registers + */ + for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + BIT_MASK_PER_UINT32); + if (mask == 0) + continue; + + reta = 0; + if (mask != BIT_MASK_PER_UINT32) + reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2)); + + for (j = 0; j < CHARS_PER_UINT32; j++) { + if (mask & (0x1 << j)) { + if (mask != 0xF) + reta &= ~(UINT8_MAX << CHAR_BIT * j); + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + } + } + FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta); + } + + return 0; +} + +static int +fm10k_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i, j, idx, shift; + uint8_t mask; + uint32_t reta; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size < FM10K_MAX_RSS_INDICES) { + PMD_INIT_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, FM10K_MAX_RSS_INDICES); + return -EINVAL; + } + + /* + * Read Redirection Table RETA[n], n=0..31. The redirection table has + * 128-entries in 32 registers + */ + for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + BIT_MASK_PER_UINT32); + if (mask == 0) + continue; + + reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2)); + for (j = 0; j < CHARS_PER_UINT32; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = ((reta >> + CHAR_BIT * j) & UINT8_MAX); + } + } + + return 0; +} + +static int +fm10k_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t hf = rss_conf->rss_hf; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) + return -EINVAL; + + if (hf == 0) + return -EINVAL; + + mrqc = 0; + mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0; + mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0; + + /* If the mapping doesn't fit any supported, return */ + if (mrqc == 0) + return -EINVAL; + + if (key != NULL) + for (i = 0; i < FM10K_RSSRK_SIZE; ++i) + FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]); + + FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc); + + return 0; +} + +static int +fm10k_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t hf; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) + return -EINVAL; + + if (key != NULL) + for (i = 0; i < FM10K_RSSRK_SIZE; ++i) + key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i)); + + mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0)); + hf = 0; + hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0; + hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0; + hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0; + + rss_conf->rss_hf = hf; + + return 0; +} + +static void +fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; + + /* Bind all local non-queue interrupt to vector 0 */ + int_map |= FM10K_MISC_VEC_ID; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); + + /* Enable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | + FM10K_EIMR_ENABLE(THI_FAULT) | + FM10K_EIMR_ENABLE(FUM_FAULT) | + FM10K_EIMR_ENABLE(MAILBOX) | + FM10K_EIMR_ENABLE(SWITCHREADY) | + FM10K_EIMR_ENABLE(SWITCHNOTREADY) | + FM10K_EIMR_ENABLE(SRAMERROR) | + FM10K_EIMR_ENABLE(VFLR)); + + /* Enable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); + + /* Disable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(THI_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR)); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; + + /* Bind all local non-queue interrupt to vector 0 */ + int_map |= FM10K_MISC_VEC_ID; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Enable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + +static int +fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + + /* Enable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + rte_intr_ack(&pdev->intr_handle); + return 0; +} + +static int +fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + + /* Disable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)), + FM10K_ITR_MASK_SET); + return 0; +} + +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + uint32_t intr_vector, vec; + uint16_t queue_id; + int result = 0; + + /* fm10k needs one separate interrupt for mailbox, + * so only drivers which support multiple interrupt vectors + * e.g. vfio-pci can work for fm10k interrupt mode + */ + if (!rte_intr_cap_multiple(intr_handle) || + dev->data->dev_conf.intr_conf.rxq == 0) + return result; + + intr_vector = dev->data->nb_rx_queues; + + /* disable interrupt first */ + rte_intr_disable(intr_handle); + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_disable_intr_pf(dev); + else + fm10k_dev_disable_intr_vf(dev); + + if (rte_intr_efd_enable(intr_handle, intr_vector)) { + PMD_INIT_LOG(ERR, "Failed to init event fd"); + result = -EIO; + } + + if (rte_intr_dp_is_en(intr_handle) && !result) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec) { + for (queue_id = 0, vec = FM10K_RX_VEC_START; + queue_id < dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < intr_handle->nb_efd - 1 + + FM10K_RX_VEC_START) + vec++; + } + } else { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + rte_intr_efd_disable(intr_handle); + result = -ENOMEM; + } + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_enable_intr_pf(dev); + else + fm10k_dev_enable_intr_vf(dev); + rte_intr_enable(intr_handle); + hw->mac.ops.update_int_moderator(hw); + return result; +} + +static int +fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) +{ + struct fm10k_fault fault; + int err; + const char *estr = "Unknown error"; + + /* Process PCA fault */ + if (eicr & FM10K_EICR_PCA_FAULT) { + err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case PCA_NO_FAULT: + estr = "PCA_NO_FAULT"; break; + case PCA_UNMAPPED_ADDR: + estr = "PCA_UNMAPPED_ADDR"; break; + case PCA_BAD_QACCESS_PF: + estr = "PCA_BAD_QACCESS_PF"; break; + case PCA_BAD_QACCESS_VF: + estr = "PCA_BAD_QACCESS_VF"; break; + case PCA_MALICIOUS_REQ: + estr = "PCA_MALICIOUS_REQ"; break; + case PCA_POISONED_TLP: + estr = "PCA_POISONED_TLP"; break; + case PCA_TLP_ABORT: + estr = "PCA_TLP_ABORT"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + /* Process THI fault */ + if (eicr & FM10K_EICR_THI_FAULT) { + err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case THI_NO_FAULT: + estr = "THI_NO_FAULT"; break; + case THI_MAL_DIS_Q_FAULT: + estr = "THI_MAL_DIS_Q_FAULT"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + /* Process FUM fault */ + if (eicr & FM10K_EICR_FUM_FAULT) { + err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case FUM_NO_FAULT: + estr = "FUM_NO_FAULT"; break; + case FUM_UNMAPPED_ADDR: + estr = "FUM_UNMAPPED_ADDR"; break; + case FUM_POISONED_TLP: + estr = "FUM_POISONED_TLP"; break; + case FUM_BAD_VF_QACCESS: + estr = "FUM_BAD_VF_QACCESS"; break; + case FUM_ADD_DECODE_ERR: + estr = "FUM_ADD_DECODE_ERR"; break; + case FUM_RO_ERROR: + estr = "FUM_RO_ERROR"; break; + case FUM_QPRC_CRC_ERROR: + estr = "FUM_QPRC_CRC_ERROR"; break; + case FUM_CSR_TIMEOUT: + estr = "FUM_CSR_TIMEOUT"; break; + case FUM_INVALID_TYPE: + estr = "FUM_INVALID_TYPE"; break; + case FUM_INVALID_LENGTH: + estr = "FUM_INVALID_LENGTH"; break; + case FUM_INVALID_BE: + estr = "FUM_INVALID_BE"; break; + case FUM_INVALID_ALIGN: + estr = "FUM_INVALID_ALIGN"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + return 0; +error: + PMD_INIT_LOG(ERR, "Failed to handle fault event."); + return err; +} + +/** + * PF interrupt handler triggered by NIC for handling specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +fm10k_dev_interrupt_handler_pf(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t cause, status; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + int status_mbx; + s32 err; + + if (hw->mac.type != fm10k_mac_pf) + return; + + cause = FM10K_READ_REG(hw, FM10K_EICR); + + /* Handle PCI fault cases */ + if (cause & FM10K_EICR_FAULT_MASK) { + PMD_INIT_LOG(ERR, "INT: find fault!"); + fm10k_dev_handle_fault(hw, cause); + } + + /* Handle switch up/down */ + if (cause & FM10K_EICR_SWITCHNOTREADY) + PMD_INIT_LOG(ERR, "INT: Switch is not ready"); + + if (cause & FM10K_EICR_SWITCHREADY) { + PMD_INIT_LOG(INFO, "INT: Switch is ready"); + if (dev_info->sm_down == 1) { + fm10k_mbx_lock(hw); + + /* For recreating logical ports */ + status_mbx = hw->mac.ops.update_lport_state(hw, + hw->mac.dglort_map, MAX_LPORT_NUM, 1); + if (status_mbx == FM10K_SUCCESS) + PMD_INIT_LOG(INFO, + "INT: Recreated Logical port"); + else + PMD_INIT_LOG(INFO, + "INT: Logical ports weren't recreated"); + + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + false); + + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical + * ports that have been created, leave to the + * application to fully recover Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + if (!(dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, + true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } + } + + /* Handle mailbox message */ + fm10k_mbx_lock(hw); + err = hw->mbx.ops.process(hw, &hw->mbx); + fm10k_mbx_unlock(hw); + + if (err == FM10K_ERR_RESET_REQUESTED) { + PMD_INIT_LOG(INFO, "INT: Switch is down"); + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + /* Handle SRAM error */ + if (cause & FM10K_EICR_SRAMERROR) { + PMD_INIT_LOG(ERR, "INT: SRAM error on PEP"); + + status = FM10K_READ_REG(hw, FM10K_SRAM_IP); + /* Write to clear pending bits */ + FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status); + + /* Todo: print out error message after shared code updates */ + } + + /* Clear these 3 events if having any */ + cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX | + FM10K_EICR_SWITCHREADY; + if (cause) + FM10K_WRITE_REG(hw, FM10K_EICR, cause); + + /* Re-enable interrupt from device side */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + /* Re-enable interrupt from host side */ + rte_intr_ack(dev->intr_handle); +} + +/** + * VF interrupt handler triggered by NIC for handling specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +fm10k_dev_interrupt_handler_vf(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + const enum fm10k_mbx_state state = mbx->state; + int status_mbx; + + if (hw->mac.type != fm10k_mac_vf) + return; + + /* Handle mailbox message if lock is acquired */ + fm10k_mbx_lock(hw); + hw->mbx.ops.process(hw, &hw->mbx); + fm10k_mbx_unlock(hw); + + if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) { + PMD_INIT_LOG(INFO, "INT: Switch has gone down"); + + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); + fm10k_mbx_unlock(hw); + + /* Setting reset flag */ + dev_info->sm_down = 1; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (dev_info->sm_down == 1 && + hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) { + PMD_INIT_LOG(INFO, "INT: Switch has gone up"); + fm10k_mbx_lock(hw); + status_mbx = hw->mac.ops.update_xcast_mode(hw, + hw->mac.dglort_map, FM10K_XCAST_MODE_NONE); + if (status_mbx != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to set XCAST mode"); + fm10k_mbx_unlock(hw); + + /* first clear the internal SW recording structure */ + fm10k_vlan_filter_set(dev, hw->mac.default_vid, false); + fm10k_MAC_filter_set(dev, hw->mac.addr, false, + MAIN_VSI_POOL_NUMBER); + + /* + * Add default mac address and vlan for the logical ports that + * have been created, leave to the application to fully recover + * Rx filtering. + */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + + dev_info->sm_down = 0; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + /* Re-enable interrupt from device side */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + /* Re-enable interrupt from host side */ + rte_intr_ack(dev->intr_handle); +} + +/* Mailbox message handler in VF */ +static const struct fm10k_msg_data fm10k_msgdata_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static int +fm10k_setup_mbx_service(struct fm10k_hw *hw) +{ + int err = 0; + + /* Initialize mailbox lock */ + fm10k_mbx_initlock(hw); + + /* Replace default message handler with new ones */ + if (hw->mac.type == fm10k_mac_vf) + err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf); + + if (err) { + PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d", + err); + return err; + } + /* Connect to SM for PF device or PF for VF device */ + return hw->mbx.ops.connect(hw, &hw->mbx); +} + +static void +fm10k_close_mbx_service(struct fm10k_hw *hw) +{ + /* Disconnect from SM for PF device or PF for VF device */ + hw->mbx.ops.disconnect(hw, &hw->mbx); +} + +static void +fm10k_dev_close(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, false); + fm10k_mbx_unlock(hw); + + /* allow 100ms for device to quiesce */ + rte_delay_us(FM10K_SWITCH_QUIESCE_US); + + /* Stop mailbox service first */ + fm10k_close_mbx_service(hw); + fm10k_dev_stop(dev); + fm10k_dev_queue_release(dev); + fm10k_stop_hw(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio/vfio intr */ + rte_intr_disable(intr_handle); + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* disable interrupt */ + fm10k_dev_disable_intr_pf(dev); + + /* unregister callback func to eal lib */ + rte_intr_callback_unregister(intr_handle, + fm10k_dev_interrupt_handler_pf, (void *)dev); + } else { + /* disable interrupt */ + fm10k_dev_disable_intr_vf(dev); + + rte_intr_callback_unregister(intr_handle, + fm10k_dev_interrupt_handler_vf, (void *)dev); + } +} + +static const struct eth_dev_ops fm10k_eth_dev_ops = { + .dev_configure = fm10k_dev_configure, + .dev_start = fm10k_dev_start, + .dev_stop = fm10k_dev_stop, + .dev_close = fm10k_dev_close, + .promiscuous_enable = fm10k_dev_promiscuous_enable, + .promiscuous_disable = fm10k_dev_promiscuous_disable, + .allmulticast_enable = fm10k_dev_allmulticast_enable, + .allmulticast_disable = fm10k_dev_allmulticast_disable, + .stats_get = fm10k_stats_get, + .xstats_get = fm10k_xstats_get, + .xstats_get_names = fm10k_xstats_get_names, + .stats_reset = fm10k_stats_reset, + .xstats_reset = fm10k_stats_reset, + .link_update = fm10k_link_update, + .dev_infos_get = fm10k_dev_infos_get, + .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get, + .vlan_filter_set = fm10k_vlan_filter_set, + .vlan_offload_set = fm10k_vlan_offload_set, + .mac_addr_add = fm10k_macaddr_add, + .mac_addr_remove = fm10k_macaddr_remove, + .rx_queue_start = fm10k_dev_rx_queue_start, + .rx_queue_stop = fm10k_dev_rx_queue_stop, + .tx_queue_start = fm10k_dev_tx_queue_start, + .tx_queue_stop = fm10k_dev_tx_queue_stop, + .rx_queue_setup = fm10k_rx_queue_setup, + .rx_queue_release = fm10k_rx_queue_release, + .tx_queue_setup = fm10k_tx_queue_setup, + .tx_queue_release = fm10k_tx_queue_release, + .rx_queue_count = fm10k_dev_rx_queue_count, + .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_descriptor_status = fm10k_dev_rx_descriptor_status, + .tx_descriptor_status = fm10k_dev_tx_descriptor_status, + .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, + .reta_update = fm10k_reta_update, + .reta_query = fm10k_reta_query, + .rss_hash_update = fm10k_rss_hash_update, + .rss_hash_conf_get = fm10k_rss_hash_conf_get, +}; + +static int ftag_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +fm10k_check_ftag(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *ftag_key = "enable_ftag"; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, ftag_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* FTAG is enabled when there's key-value pair: enable_ftag=1 */ + if (rte_kvargs_process(kvlist, ftag_key, + ftag_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static uint16_t +fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +static void __rte_cold +fm10k_set_tx_function(struct rte_eth_dev *dev) +{ + struct fm10k_tx_queue *txq; + int i; + int use_sse = 1; + uint16_t tx_ftag_en = 0; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* primary process has set the ftag flag and offloads */ + txq = dev->data->tx_queues[0]; + if (fm10k_tx_vec_condition_check(txq)) { + dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } else { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + } + return; + } + + if (fm10k_check_ftag(dev->device->devargs)) + tx_ftag_en = 1; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->tx_ftag_en = tx_ftag_en; + /* Check if Vector Tx is satisfied */ + if (fm10k_tx_vec_condition_check(txq)) + use_sse = 0; + } + + if (use_sse) { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + fm10k_txq_vec_setup(txq); + } + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + } else { + dev->tx_pkt_burst = fm10k_xmit_pkts; + dev->tx_pkt_prepare = fm10k_prep_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } +} + +static void __rte_cold +fm10k_set_rx_function(struct rte_eth_dev *dev) +{ + struct fm10k_dev_info *dev_info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + uint16_t i, rx_using_sse; + uint16_t rx_ftag_en = 0; + + if (fm10k_check_ftag(dev->device->devargs)) + rx_ftag_en = 1; + + /* In order to allow Vector Rx there are a few configuration + * conditions to be met. + */ + if (!fm10k_rx_vec_condition_check(dev) && + dev_info->rx_vec_allowed && !rx_ftag_en) { + if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec; + else + dev->rx_pkt_burst = fm10k_recv_pkts_vec; + } else if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + else + dev->rx_pkt_burst = fm10k_recv_pkts; + + rx_using_sse = + (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_pkts_vec); + + if (rx_using_sse) + PMD_INIT_LOG(DEBUG, "Use vector Rx func"); + else + PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + rxq->rx_ftag_en = rx_ftag_en; + } +} + +static void +fm10k_params_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *info = + FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); + + /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but + * there is no way to get link status without reading BAR4. Until this + * works, assume we have maximum bandwidth. + * @todo - fix bus info + */ + hw->bus_caps.speed = fm10k_bus_speed_8000; + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + hw->bus_caps.payload = fm10k_bus_payload_512; + hw->bus.speed = fm10k_bus_speed_8000; + hw->bus.width = fm10k_bus_width_pcie_x8; + hw->bus.payload = fm10k_bus_payload_256; + + info->rx_vec_allowed = true; + info->sm_down = false; +} + +static int +eth_fm10k_dev_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pdev->intr_handle; + int diag, i; + struct fm10k_macvlan_filter_info *macvlan; + + PMD_INIT_FUNC_TRACE(); + + dev->dev_ops = &fm10k_eth_dev_ops; + dev->rx_pkt_burst = &fm10k_recv_pkts; + dev->tx_pkt_burst = &fm10k_xmit_pkts; + dev->tx_pkt_prepare = &fm10k_prep_pkts; + + /* + * Primary process does the whole initialization, for secondary + * processes, we just select the same Rx and Tx function as primary. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + fm10k_set_rx_function(dev); + fm10k_set_tx_function(dev); + return 0; + } + + rte_eth_copy_pci_info(dev, pdev); + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + memset(macvlan, 0, sizeof(*macvlan)); + /* Vendor and Device ID need to be set before init of shared code */ + memset(hw, 0, sizeof(*hw)); + hw->device_id = pdev->id.device_id; + hw->vendor_id = pdev->id.vendor_id; + hw->subsystem_device_id = pdev->id.subsystem_device_id; + hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id; + hw->revision_id = 0; + hw->hw_addr = (void *)pdev->mem_resource[0].addr; + if (hw->hw_addr == NULL) { + PMD_INIT_LOG(ERR, "Bad mem resource." + " Try to blacklist unused devices."); + return -EIO; + } + + /* Store fm10k_adapter pointer */ + hw->back = dev->data->dev_private; + + /* Initialize the shared code */ + diag = fm10k_init_shared_code(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); + return -EIO; + } + + /* Initialize parameters */ + fm10k_params_init(dev); + + /* Initialize the hw */ + diag = fm10k_init_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag); + return -EIO; + } + + /* Initialize MAC address(es) */ + dev->data->mac_addrs = rte_zmalloc("fm10k", + RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0); + if (dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses"); + return -ENOMEM; + } + + diag = fm10k_read_mac_addr(hw); + + rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + + if (diag != FM10K_SUCCESS || + !rte_is_valid_assigned_ether_addr(dev->data->mac_addrs)) { + + /* Generate a random addr */ + rte_eth_random_addr(hw->mac.addr); + memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); + rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* Reset the hw statistics */ + diag = fm10k_stats_reset(dev); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag); + return diag; + } + + /* Reset the hw */ + diag = fm10k_reset_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); + return -EIO; + } + + /* Setup mailbox service */ + diag = fm10k_setup_mbx_service(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); + return -EIO; + } + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* register callback func to eal lib */ + rte_intr_callback_register(intr_handle, + fm10k_dev_interrupt_handler_pf, (void *)dev); + + /* enable MISC interrupt */ + fm10k_dev_enable_intr_pf(dev); + } else { /* VF */ + rte_intr_callback_register(intr_handle, + fm10k_dev_interrupt_handler_vf, (void *)dev); + + fm10k_dev_enable_intr_vf(dev); + } + + /* Enable intr after callback registered */ + rte_intr_enable(intr_handle); + + hw->mac.ops.update_int_moderator(hw); + + /* Make sure Switch Manager is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + bool switch_ready = false; + + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + fm10k_mbx_lock(hw); + hw->mac.ops.get_host_state(hw, &switch_ready); + fm10k_mbx_unlock(hw); + if (switch_ready == true) + break; + /* Delay some time to acquire async LPORT_MAP info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (switch_ready == false) { + PMD_INIT_LOG(ERR, "switch is not ready"); + return -1; + } + } + + /* + * Below function will trigger operations on mailbox, acquire lock to + * avoid race condition from interrupt handler. Operations on mailbox + * FIFO will trigger interrupt to PF/SM, in which interrupt handler + * will handle and generate an interrupt to our side. Then, FIFO in + * mailbox will be touched. + */ + fm10k_mbx_lock(hw); + /* Enable port first */ + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); + + /* Set unicast mode by default. App can change to other mode in other + * API func. + */ + hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_NONE); + + fm10k_mbx_unlock(hw); + + /* Make sure default VID is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + if (hw->mac.default_vid) + break; + /* Delay some time to acquire async port VLAN info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); + return -1; + } + } + + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + return 0; +} + +static int +eth_fm10k_dev_uninit(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + fm10k_dev_close(dev); + + return 0; +} + +static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct fm10k_adapter), eth_fm10k_dev_init); +} + +static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit); +} + +/* + * The set of PCI devices this driver supports. This driver will enable both PF + * and SRIOV-VF devices. + */ +static const struct rte_pci_id pci_id_fm10k_map[] = { + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct rte_pci_driver rte_pmd_fm10k = { + .id_table = pci_id_fm10k_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_fm10k_pci_probe, + .remove = eth_fm10k_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k); +RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); +RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(fm10k_init_log) +{ + fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init"); + if (fm10k_logtype_init >= 0) + rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE); + fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver"); + if (fm10k_logtype_driver >= 0) + rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + fm10k_logtype_rx = rte_log_register("pmd.net.fm10k.rx"); + if (fm10k_logtype_rx >= 0) + rte_log_set_level(fm10k_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX + fm10k_logtype_tx = rte_log_register("pmd.net.fm10k.tx"); + if (fm10k_logtype_tx >= 0) + rte_log_set_level(fm10k_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE + fm10k_logtype_tx_free = rte_log_register("pmd.net.fm10k.tx_free"); + if (fm10k_logtype_tx_free >= 0) + rte_log_set_level(fm10k_logtype_tx_free, RTE_LOG_DEBUG); +#endif +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h new file mode 100644 index 000000000..9ae743d80 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_logs.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2015 Intel Corporation + */ + +#ifndef _FM10K_LOGS_H_ +#define _FM10K_LOGS_H_ + +#include + +extern int fm10k_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, fm10k_logtype_init, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX +extern int fm10k_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, fm10k_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX +extern int fm10k_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, fm10k_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE +extern int fm10k_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, fm10k_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int fm10k_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, fm10k_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _FM10K_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c new file mode 100644 index 000000000..4accaa2cd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx.c @@ -0,0 +1,728 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2016 Intel Corporation + */ + +#include + +#include +#include +#include +#include "fm10k.h" +#include "base/fm10k_type.h" + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX +static inline void dump_rxd(union fm10k_rx_desc *rxd) +{ + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort, + rxd->d.data); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len, + rxd->d.staterr); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| TIME TAG |"); + PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); +} +#endif + +#define FM10K_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define FM10K_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK) + +/* @note: When this function is changed, make corresponding change to + * fm10k_dev_supported_ptypes_get() + */ +static inline void +rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) +{ + static const uint32_t + ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT] + __rte_cache_aligned = { + [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER, + [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, + [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, + [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + }; + + m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK) + >> FM10K_RXD_PKTTYPE_SHIFT]; + + if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK) + m->ol_flags |= PKT_RX_RSS_HASH; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) == + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE))) + m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) == + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E))) + m->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; +} + +uint16_t +fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + union fm10k_rx_desc desc; + struct fm10k_rx_queue *q = rx_queue; + uint16_t count = 0; + int alloc = 0; + uint16_t next_dd; + int ret; + + next_dd = q->next_dd; + + nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh); + for (count = 0; count < nb_pkts; ++count) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; + mbuf = q->sw_ring[next_dd]; + desc = q->hw_ring[next_dd]; +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + dump_rxd(&desc); +#endif + rte_pktmbuf_pkt_len(mbuf) = desc.w.length; + rte_pktmbuf_data_len(mbuf) = desc.w.length; + + mbuf->ol_flags = 0; +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + rx_desc_to_ol_flags(mbuf, &desc); +#endif + + mbuf->hash.rss = desc.d.rss; + /** + * Packets in fm10k device always carry at least one VLAN tag. + * For those packets coming in without VLAN tag, + * the port default VLAN tag will be used. + * So, always PKT_RX_VLAN flag is set and vlan_tci + * is valid for each RX packet's mbuf. + */ + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mbuf->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort); + + rx_pkts[count] = mbuf; + if (++next_dd == q->nb_desc) { + next_dd = 0; + alloc = 1; + } + + /* Prefetch next mbuf while processing current one. */ + rte_prefetch0(q->sw_ring[next_dd]); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((next_dd & 0x3) == 0) { + rte_prefetch0(&q->hw_ring[next_dd]); + rte_prefetch0(&q->sw_ring[next_dd]); + } + } + + q->next_dd = next_dd; + + if ((q->next_dd > q->next_trigger) || (alloc == 1)) { + ret = rte_mempool_get_bulk(q->mp, + (void **)&q->sw_ring[q->next_alloc], + q->alloc_thresh); + + if (unlikely(ret != 0)) { + uint16_t port = q->port_id; + PMD_RX_LOG(ERR, "Failed to alloc mbuf"); + /* + * Need to restore next_dd if we cannot allocate new + * buffers to replenish the old ones. + */ + q->next_dd = (q->next_dd + q->nb_desc - count) % + q->nb_desc; + rte_eth_devices[port].data->rx_mbuf_alloc_failed++; + return 0; + } + + for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) { + mbuf = q->sw_ring[q->next_alloc]; + + /* setup static mbuf fields */ + fm10k_pktmbuf_reset(mbuf, q->port_id); + + /* write descriptor */ + desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + q->hw_ring[q->next_alloc] = desc; + } + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger); + q->next_trigger += q->alloc_thresh; + if (q->next_trigger >= q->nb_desc) { + q->next_trigger = q->alloc_thresh - 1; + q->next_alloc = 0; + } + } + + return count; +} + +uint16_t +fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + union fm10k_rx_desc desc; + struct fm10k_rx_queue *q = rx_queue; + uint16_t count = 0; + uint16_t nb_rcv, nb_seg; + int alloc = 0; + uint16_t next_dd; + struct rte_mbuf *first_seg = q->pkt_first_seg; + struct rte_mbuf *last_seg = q->pkt_last_seg; + int ret; + + next_dd = q->next_dd; + nb_rcv = 0; + + nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); + for (count = 0; count < nb_seg; count++) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; + mbuf = q->sw_ring[next_dd]; + desc = q->hw_ring[next_dd]; +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + dump_rxd(&desc); +#endif + + if (++next_dd == q->nb_desc) { + next_dd = 0; + alloc = 1; + } + + /* Prefetch next mbuf while processing current one. */ + rte_prefetch0(q->sw_ring[next_dd]); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((next_dd & 0x3) == 0) { + rte_prefetch0(&q->hw_ring[next_dd]); + rte_prefetch0(&q->sw_ring[next_dd]); + } + + /* Fill data length */ + rte_pktmbuf_data_len(mbuf) = desc.w.length; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = mbuf; + first_seg->pkt_len = desc.w.length; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rte_pktmbuf_data_len(mbuf)); + first_seg->nb_segs++; + last_seg->next = mbuf; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) { + last_seg = mbuf; + continue; + } + + first_seg->ol_flags = 0; +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + rx_desc_to_ol_flags(first_seg, &desc); +#endif + first_seg->hash.rss = desc.d.rss; + /** + * Packets in fm10k device always carry at least one VLAN tag. + * For those packets coming in without VLAN tag, + * the port default VLAN tag will be used. + * So, always PKT_RX_VLAN flag is set and vlan_tci + * is valid for each RX packet's mbuf. + */ + first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + first_seg->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + first_seg->vlan_tci_outer = + rte_le_to_cpu_16(desc.w.sglort); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rcv++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + q->next_dd = next_dd; + + if ((q->next_dd > q->next_trigger) || (alloc == 1)) { + ret = rte_mempool_get_bulk(q->mp, + (void **)&q->sw_ring[q->next_alloc], + q->alloc_thresh); + + if (unlikely(ret != 0)) { + uint16_t port = q->port_id; + PMD_RX_LOG(ERR, "Failed to alloc mbuf"); + /* + * Need to restore next_dd if we cannot allocate new + * buffers to replenish the old ones. + */ + q->next_dd = (q->next_dd + q->nb_desc - count) % + q->nb_desc; + rte_eth_devices[port].data->rx_mbuf_alloc_failed++; + return 0; + } + + for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) { + mbuf = q->sw_ring[q->next_alloc]; + + /* setup static mbuf fields */ + fm10k_pktmbuf_reset(mbuf, q->port_id); + + /* write descriptor */ + desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + q->hw_ring[q->next_alloc] = desc; + } + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger); + q->next_trigger += q->alloc_thresh; + if (q->next_trigger >= q->nb_desc) { + q->next_trigger = q->alloc_thresh - 1; + q->next_alloc = 0; + } + } + + q->pkt_first_seg = first_seg; + q->pkt_last_seg = last_seg; + + return nb_rcv; +} + +uint32_t +fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define FM10K_RXQ_SCAN_INTERVAL 4 + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->hw_ring[rxq->next_dd]; + while ((desc < rxq->nb_desc) && + rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) { + /** + * Check the DD bit of a rx descriptor of each group of 4 desc, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += FM10K_RXQ_SCAN_INTERVAL; + rxdp += FM10K_RXQ_SCAN_INTERVAL; + if (rxq->next_dd + desc >= rxq->nb_desc) + rxdp = &rxq->hw_ring[rxq->next_dd + desc - + rxq->nb_desc]; + } + + return desc; +} + +int +fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t nb_hold, trigger_last; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + if (rxq->next_trigger < rxq->alloc_thresh) + trigger_last = rxq->next_trigger + + rxq->nb_desc - rxq->alloc_thresh; + else + trigger_last = rxq->next_trigger - rxq->alloc_thresh; + + if (rxq->next_dd < trigger_last) + nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last; + else + nb_hold = rxq->next_dd - trigger_last; + + if (offset >= rxq->nb_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +int +fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + volatile struct fm10k_tx_desc *txdp; + struct fm10k_tx_queue *txq = tx_queue; + uint16_t desc; + uint16_t next_rs = txq->nb_desc; + struct fifo rs_tracker = txq->rs_tracker; + struct fifo *r = &rs_tracker; + + if (unlikely(offset >= txq->nb_desc)) + return -EINVAL; + + desc = txq->next_free + offset; + /* go to next desc that has the RS bit */ + desc = (desc / txq->rs_thresh + 1) * + txq->rs_thresh - 1; + + if (desc >= txq->nb_desc) { + desc -= txq->nb_desc; + if (desc >= txq->nb_desc) + desc -= txq->nb_desc; + } + + r->head = r->list; + for ( ; r->head != r->endp; ) { + if (*r->head >= desc && *r->head < next_rs) + next_rs = *r->head; + ++r->head; + } + + txdp = &txq->hw_ring[next_rs]; + if (txdp->flags & FM10K_TXD_FLAG_DONE) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +/* + * Free multiple TX mbuf at a time if they are in the same pool + * + * @txep: software desc ring index that starts to free + * @num: number of descs to free + * + */ +static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num) +{ + struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ]; + int i; + int nb_free = 0; + + if (unlikely(num == 0)) + return; + + m = rte_pktmbuf_prefree_seg(txep[0]); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < num; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + txep[i] = NULL; + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < num; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (m != NULL) + rte_mempool_put(m->pool, m); + txep[i] = NULL; + } + } +} + +static inline void tx_free_descriptors(struct fm10k_tx_queue *q) +{ + uint16_t next_rs, count = 0; + + next_rs = fifo_peek(&q->rs_tracker); + if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE)) + return; + + /* the DONE flag is set on this descriptor so remove the ID + * from the RS bit tracker and free the buffers */ + fifo_remove(&q->rs_tracker); + + /* wrap around? if so, free buffers from last_free up to but NOT + * including nb_desc */ + if (q->last_free > next_rs) { + count = q->nb_desc - q->last_free; + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); + q->last_free = 0; + } + + /* adjust free descriptor count before the next loop */ + q->nb_free += count + (next_rs + 1 - q->last_free); + + /* free buffers from last_free, up to and including next_rs */ + if (q->last_free <= next_rs) { + count = next_rs - q->last_free + 1; + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); + q->last_free += count; + } + + if (q->last_free == q->nb_desc) + q->last_free = 0; +} + +static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) +{ + uint16_t last_id; + uint8_t flags, hdrlen; + + /* always set the LAST flag on the last descriptor used to + * transmit the packet */ + flags = FM10K_TXD_FLAG_LAST; + last_id = q->next_free + mb->nb_segs - 1; + if (last_id >= q->nb_desc) + last_id = last_id - q->nb_desc; + + /* but only set the RS flag on the last descriptor if rs_thresh + * descriptors will be used since the RS flag was last set */ + if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) { + flags |= FM10K_TXD_FLAG_RS; + fifo_insert(&q->rs_tracker, last_id); + q->nb_used = 0; + } else { + q->nb_used = q->nb_used + mb->nb_segs; + } + + q->nb_free -= mb->nb_segs; + + q->hw_ring[q->next_free].flags = 0; + if (q->tx_ftag_en) + q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG; + /* set checksum flags on first descriptor of packet. SCTP checksum + * offload is not supported, but we do not explicitly check for this + * case in favor of greatly simplified processing. */ + if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) + q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM; + + /* set vlan if requested */ + if (mb->ol_flags & PKT_TX_VLAN_PKT) + q->hw_ring[q->next_free].vlan = mb->vlan_tci; + else + q->hw_ring[q->next_free].vlan = 0; + + q->sw_ring[q->next_free] = mb; + q->hw_ring[q->next_free].buffer_addr = + rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); + q->hw_ring[q->next_free].buflen = + rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); + + if (mb->ol_flags & PKT_TX_TCP_SEG) { + hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ? + mb->outer_l2_len + mb->outer_l3_len : 0; + if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG) + hdrlen += sizeof(struct fm10k_ftag); + + if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) && + (hdrlen <= FM10K_TSO_MAX_HEADERLEN) && + (mb->tso_segsz >= FM10K_TSO_MINMSS))) { + q->hw_ring[q->next_free].mss = mb->tso_segsz; + q->hw_ring[q->next_free].hdrlen = hdrlen; + } + } + + if (++q->next_free == q->nb_desc) + q->next_free = 0; + + /* fill up the rings */ + for (mb = mb->next; mb != NULL; mb = mb->next) { + q->sw_ring[q->next_free] = mb; + q->hw_ring[q->next_free].buffer_addr = + rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); + q->hw_ring[q->next_free].buflen = + rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); + q->hw_ring[q->next_free].flags = 0; + if (++q->next_free == q->nb_desc) + q->next_free = 0; + } + + q->hw_ring[last_id].flags |= flags; +} + +uint16_t +fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_tx_queue *q = tx_queue; + struct rte_mbuf *mb; + uint16_t count; + + for (count = 0; count < nb_pkts; ++count) { + mb = tx_pkts[count]; + + /* running low on descriptors? try to free some... */ + if (q->nb_free < q->free_thresh) + tx_free_descriptors(q); + + /* make sure there are enough free descriptors to transmit the + * entire packet before doing anything */ + if (q->nb_free < mb->nb_segs) + break; + + /* sanity check to make sure the mbuf is valid */ + if ((mb->nb_segs == 0) || + ((mb->nb_segs > 1) && (mb->next == NULL))) + break; + + /* process the packet */ + tx_xmit_pkt(q, mb); + } + + /* update the tail pointer if any packets were processed */ + if (likely(count > 0)) + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free); + + return count; +} + +uint16_t +fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if ((m->ol_flags & PKT_TX_TCP_SEG) && + (m->tso_segsz < FM10K_TSO_MINMSS)) { + rte_errno = EINVAL; + return i; + } + + if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c new file mode 100644 index 000000000..eff3933b5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -0,0 +1,892 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2015 Intel Corporation + */ + +#include + +#include +#include +#include "fm10k.h" +#include "base/fm10k_type.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static void +fm10k_reset_tx_queue(struct fm10k_tx_queue *txq); + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + +/* Vlan present flag shift */ +#define VP_SHIFT (2) +/* L3 type shift */ +#define L3TYPE_SHIFT (4) +/* L4 type shift */ +#define L4TYPE_SHIFT (7) +/* HBO flag shift */ +#define HBOFLAG_SHIFT (10) +/* RXE flag shift */ +#define RXEFLAG_SHIFT (13) +/* IPE/L4E flag shift */ +#define L3L4EFLAG_SHIFT (14) +/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */ +#define CKSUM_SHIFT (1) + +static inline void +fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + const __m128i pkttype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + + /* mask everything except rss type */ + const __m128i rsstype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x000F, 0x000F, 0x000F, 0x000F); + + /* mask for HBO and RXE flag flags */ + const __m128i rxe_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0001, 0x0001, 0x0001, 0x0001); + + /* mask the lower byte of ol_flags */ + const __m128i ol_flags_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x00FF, 0x00FF, 0x00FF, 0x00FF); + + const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT, + (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT); + + const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0); + + /* map rss type to rss hash flag */ + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + + /* Calculate RSS_hash and Vlan fields */ + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + + vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); + eflag0 = vtag1; + cksumflag = vtag1; + vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT); + vtag1 = _mm_and_si128(vtag1, pkttype_msk); + + vtag1 = _mm_or_si128(ptype0, vtag1); + + /* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */ + eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT); + eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT); + eflag0 = _mm_or_si128(eflag0, eflag1); + eflag0 = _mm_and_si128(eflag0, rxe_msk); + eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0); + + vtag1 = _mm_or_si128(eflag0, vtag1); + + /* Process L4/L3 checksum error flags */ + cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT); + cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag); + + /* clean the higher byte and shift back the flag bits */ + cksumflag = _mm_and_si128(cksumflag, ol_flags_msk); + cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT); + vtag1 = _mm_or_si128(cksumflag, vtag1); + + vol.dword = _mm_cvtsi128_si64(vtag1); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} + +/* @note: When this function is changed, make corresponding change to + * fm10k_dev_supported_ptypes_get(). + */ +static inline void +fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i l3l4type0, l3l4type1, l3type, l4type; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + /* L3 pkt type mask Bit4 to Bit6 */ + const __m128i l3type_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0070, 0x0070, 0x0070, 0x0070); + + /* L4 pkt type mask Bit7 to Bit9 */ + const __m128i l4type_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0380, 0x0380, 0x0380, 0x0380); + + /* convert RRC l3 type to mbuf format */ + const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4, 0); + + /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits + * to fill into8 bits length. + */ + const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, + RTE_PTYPE_TUNNEL_GENEVE >> 8, + RTE_PTYPE_TUNNEL_NVGRE >> 8, + RTE_PTYPE_TUNNEL_VXLAN >> 8, + RTE_PTYPE_TUNNEL_GRE >> 8, + RTE_PTYPE_L4_UDP >> 8, + RTE_PTYPE_L4_TCP >> 8, + 0); + + l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]); + l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]); + l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1); + + l3type = _mm_and_si128(l3l4type0, l3type_msk); + l4type = _mm_and_si128(l3l4type0, l4type_msk); + + l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT); + l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT); + + l3type = _mm_shuffle_epi8(l3type_flags, l3type); + /* l4type_flags shift-left for 8 bits, need shift-right back */ + l4type = _mm_shuffle_epi8(l4type_flags, l4type); + + l4type = _mm_slli_epi16(l4type, 8); + l3l4type0 = _mm_or_si128(l3type, l4type); + vol.dword = _mm_cvtsi128_si64(l3l4type0); + + rx_pkts[0]->packet_type = vol.e[0]; + rx_pkts[1]->packet_type = vol.e[1]; + rx_pkts[2]->packet_type = vol.e[2]; + rx_pkts[3]->packet_type = vol.e[3]; +} +#else +#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0) +#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0) +#endif + +int __rte_cold +fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* no header split support */ + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} + +int __rte_cold +fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + /* data_off will be ajusted after new mbuf allocated for 512-byte + * alignment. + */ + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline void +fm10k_rxq_rearm(struct fm10k_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union fm10k_rx_desc *rxdp; + struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i head_off = _mm_set_epi64x( + RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1, + RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1); + __m128i dma_addr0, dma_addr1; + /* Rx buffer need to be aligned with 512 byte */ + const __m128i hba_msk = _mm_set_epi64x(0, + UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1); + + rxdp = rxq->hw_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)mb_alloc, + RTE_FM10K_RXQ_REARM_THRESH) < 0) { + dma_addr0 = _mm_setzero_si128(); + /* Clean up all the HW/SW ring content */ + for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) { + mb_alloc[i] = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].q, + dma_addr0); + } + + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_FM10K_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) { + __m128i vaddr0, vaddr1; + uintptr_t p0, p1; + + mb0 = mb_alloc[0]; + mb1 = mb_alloc[1]; + + /* Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + */ + p0 = (uintptr_t)&mb0->rearm_data; + *(uint64_t *)p0 = rxq->mbuf_initializer; + p1 = (uintptr_t)&mb1->rearm_data; + *(uint64_t *)p1 = rxq->mbuf_initializer; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, head_off); + dma_addr1 = _mm_add_epi64(dma_addr1, head_off); + + /* Do 512 byte alignment to satisfy HW requirement, in the + * meanwhile, set Header Buffer Address to zero. + */ + dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); + dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1); + + /* enforce 512B alignment on default Rx virtual addresses */ + mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb0->buf_addr); + mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb1->buf_addr); + } + + rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id); +} + +void __rte_cold +fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_desc; i++) + if (rxq->sw_ring[i] != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } else { + for (i = rxq->next_dd; i != rxq->rxrearm_start; + i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } + rxq->rxrearm_nb = rxq->nb_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc); +} + +static inline uint16_t +fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union fm10k_rx_desc *rxdp; + struct rte_mbuf **mbufp; + uint16_t nb_pkts_recd; + int pos; + struct fm10k_rx_queue *rxq = rx_queue; + uint64_t var; + __m128i shuf_msk; + __m128i dd_check, eop_check; + uint16_t next_dd; + + next_dd = rxq->next_dd; + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->hw_ring + next_dd; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH) + fm10k_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD)) + return 0; + + /* Vecotr RX will process 4 packets at a time, strip the unaligned + * tails in case it's not multiple of 4. + */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP); + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 15, 14, /* octet 14~15, low 16 bits vlan_macip */ + 13, 12, /* octet 12~13, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 13, 12, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_type */ + 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */ + ); + /* + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + mbufp = &rxq->sw_ring[next_dd]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_FM10K_DESCS_PER_LOOP, + rxdp += RTE_FM10K_DESCS_PER_LOOP) { + __m128i descs0[RTE_FM10K_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + __m128i mbp1; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]); + + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf poitns */ + mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]); +#endif + + descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); +#endif + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]); + + /* set ol_flags with vlan packet type */ + fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_FM10K_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_FM10K_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd); + rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* vPMD receive routine + * + * Notice: + * - don't support ol_flags for rss and csum err + */ +uint16_t +fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +static inline uint16_t +fm10k_reassemble_packets(struct fm10k_rx_queue *rxq, + struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + start->hash = end->hash; + start->ol_flags = end->ol_flags; + start->packet_type = end->packet_type; +#endif + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - don't support ol_flags for rss and csum err + * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST + * numbers of DD bit + */ +uint16_t +fm10k_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0}; + unsigned i = 0; + + /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST); + /* get some new buffers */ + uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static const struct fm10k_txq_ops vec_txq_ops = { + .reset = fm10k_reset_tx_queue, +}; + +void __rte_cold +fm10k_txq_vec_setup(struct fm10k_tx_queue *txq) +{ + txq->ops = &vec_txq_ops; +} + +int __rte_cold +fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq) +{ + /* Vector TX can't offload any features yet */ + if (txq->offloads != 0) + return -1; + + if (txq->tx_ftag_en) + return -1; + + return 0; +} + +static inline void +vtx1(volatile struct fm10k_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + __m128i descriptor = _mm_set_epi64x(flags << 56 | + (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len, + MBUF_DMA_ADDR(pkt)); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct fm10k_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +static __rte_always_inline int +fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) +{ + struct rte_mbuf **txep; + uint8_t flags; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + flags = txq->hw_ring[txq->next_dd].flags; + if (!(flags & FM10K_TXD_FLAG_DONE)) + return 0; + + n = txq->rs_thresh; + + /* First buffer to free from S/W ring is at index + * next_dd - (rs_thresh-1) + */ + txep = &txq->sw_ring[txq->next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0]); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); + txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); + if (txq->next_dd >= txq->nb_desc) + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + + return txq->rs_thresh; +} + +static __rte_always_inline void +tx_backlog_entry(struct rte_mbuf **txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i] = tx_pkts[i]; +} + +uint16_t +fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; + volatile struct fm10k_tx_desc *txdp; + struct rte_mbuf **txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = FM10K_TXD_FLAG_LAST; + uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); + + if (txq->nb_free < txq->free_thresh) + fm10k_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->next_free; + txdp = &txq->hw_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + + n = (uint16_t)(txq->nb_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &(txq->hw_ring[tx_id]); + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->next_rs) { + txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS; + txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh); + } + + txq->next_free = tx_id; + + FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free); + + return nb_pkts; +} + +static void __rte_cold +fm10k_reset_tx_queue(struct fm10k_tx_queue *txq) +{ + static const struct fm10k_tx_desc zeroed_desc = {0}; + struct rte_mbuf **txe = txq->sw_ring; + uint16_t i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_desc; i++) + txq->hw_ring[i] = zeroed_desc; + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_desc; i++) + txe[i] = NULL; + + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + txq->next_free = 0; + txq->nb_used = 0; + /* Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->nb_free = (uint16_t)(txq->nb_desc - 1); + FM10K_PCI_REG_WRITE(txq->tail_ptr, 0); +} diff --git a/src/spdk/dpdk/drivers/net/fm10k/meson.build b/src/spdk/dpdk/drivers/net/fm10k/meson.build new file mode 100644 index 000000000..2772ea4df --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/meson.build @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +subdir('base') +objs = [base_objs] + +sources = files( + 'fm10k_ethdev.c', + 'fm10k_rxtx.c', +) +if arch_subdir == 'x86' + dpdk_conf.set('RTE_LIBRTE_FM10K_INC_VECTOR', 1) + sources += files('fm10k_rxtx_vec.c') +endif + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/fm10k/rte_pmd_fm10k_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/hinic/Makefile b/src/spdk/dpdk/drivers/net/hinic/Makefile new file mode 100644 index 000000000..87fd843e4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/Makefile @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Huawei Technologies Co., Ltd + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_hinic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +ifeq ($(CONFIG_RTE_ARCH_ARM64),y) +CFLAGS += -D__ARM64_NEON__ +else ifeq ($(CONFIG_RTE_ARCH_X86_64),y) +CFLAGS += -D__X86_64_SSE__ +endif + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_hash +LDLIBS += -lrte_bus_pci +LDLIBS += -lpthread + +EXPORT_MAP := rte_pmd_hinic_version.map + +# +# CFLAGS for 32-bits platforms +# +ifneq ($(CONFIG_RTE_ARCH_64),y) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +# +# CFLAGS for icc +# +CFLAGS += -diag-disable 2259 +else +# +# CFLAGS for gcc +# +CFLAGS += -Wno-int-to-pointer-cast +CFLAGS += -Wno-pointer-to-int-cast +endif +endif + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_api_cmd.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_cfg.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_cmdq.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_eqs.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_hwdev.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_hwif.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mgmt.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_niccfg.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_nicio.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mbox.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_flow.c + +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_tx.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h new file mode 100644 index 000000000..921b83012 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_compat.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_COMPAT_H_ +#define _HINIC_COMPAT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; + +#ifndef dma_addr_t +typedef uint64_t dma_addr_t; +#endif + +#ifndef gfp_t +#define gfp_t unsigned +#endif + +#ifndef bool +#define bool int +#endif + +#ifndef FALSE +#define FALSE (0) +#endif + +#ifndef TRUE +#define TRUE (1) +#endif + +#ifndef false +#define false (0) +#endif + +#ifndef true +#define true (1) +#endif + +#ifndef NULL +#define NULL ((void *)0) +#endif + +#define HINIC_ERROR (-1) +#define HINIC_OK (0) + +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((u32)(n)) + +/* Returns X / Y, rounding up. X must be nonnegative to round correctly. */ +#define DIV_ROUND_UP(X, Y) (((X) + ((Y) - 1)) / (Y)) + +/* Returns X rounded up to the nearest multiple of Y. */ +#define ROUND_UP(X, Y) (DIV_ROUND_UP(X, Y) * (Y)) + +#undef ALIGN +#define ALIGN(x, a) RTE_ALIGN(x, a) + +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) + +/* Reported driver name. */ +#define HINIC_DRIVER_NAME "net_hinic" + +extern int hinic_logtype; + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hinic_logtype, \ + HINIC_DRIVER_NAME": " fmt "\n", ##args) + +/* common definition */ +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif +#define ETH_HLEN 14 +#define ETH_CRC_LEN 4 +#define VLAN_PRIO_SHIFT 13 +#define VLAN_N_VID 4096 + +/* bit order interface */ +#define cpu_to_be16(o) rte_cpu_to_be_16(o) +#define cpu_to_be32(o) rte_cpu_to_be_32(o) +#define cpu_to_be64(o) rte_cpu_to_be_64(o) +#define cpu_to_le32(o) rte_cpu_to_le_32(o) +#define be16_to_cpu(o) rte_be_to_cpu_16(o) +#define be32_to_cpu(o) rte_be_to_cpu_32(o) +#define be64_to_cpu(o) rte_be_to_cpu_64(o) +#define le32_to_cpu(o) rte_le_to_cpu_32(o) + +/* virt memory and dma phy memory */ +#define __iomem +#define GFP_KERNEL RTE_MEMZONE_IOVA_CONTIG +#define HINIC_PAGE_SHIFT 12 +#define HINIC_PAGE_SIZE RTE_PGSIZE_4K +#define HINIC_MEM_ALLOC_ALIGN_MIN 8 + +#define HINIC_PAGE_SIZE_DPDK 6 + +static inline int hinic_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + res = ((*addr) & (1UL << nr)) != 0; + return res; +} + +static inline void hinic_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +static inline void hinic_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +static inline int hinic_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +static inline int hinic_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + + return __sync_fetch_and_or(addr, mask) & mask; +} + +void *dma_zalloc_coherent(void *dev, size_t size, dma_addr_t *dma_handle, + unsigned int socket_id); + +void *dma_zalloc_coherent_aligned(void *hwdev, size_t size, + dma_addr_t *dma_handle, unsigned int socket_id); + +void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size, + dma_addr_t *dma_handle, unsigned int socket_id); + +void dma_free_coherent(void *dev, size_t size, void *virt, dma_addr_t phys); + +/* dma pool alloc and free */ +#define pci_pool dma_pool +#define pci_pool_alloc(pool, handle) dma_pool_alloc(pool, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size, + size_t align, size_t boundary); +void dma_pool_destroy(struct dma_pool *pool); +void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr); +void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma); + +#define kzalloc(size, flag) rte_zmalloc(NULL, size, HINIC_MEM_ALLOC_ALIGN_MIN) +#define kzalloc_aligned(size, flag) rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE) +#define kfree(ptr) rte_free(ptr) + +/* mmio interface */ +static inline void writel(u32 value, volatile void *addr) +{ + *(volatile u32 *)addr = value; +} + +static inline u32 readl(const volatile void *addr) +{ + return *(const volatile u32 *)addr; +} + +#define __raw_writel(value, reg) writel((value), (reg)) +#define __raw_readl(reg) readl((reg)) + +/* Spinlock related interface */ +#define hinic_spinlock_t rte_spinlock_t + +#define spinlock_t rte_spinlock_t +#define spin_lock_init(spinlock_prt) rte_spinlock_init(spinlock_prt) +#define spin_lock_deinit(lock) +#define spin_lock(spinlock_prt) rte_spinlock_lock(spinlock_prt) +#define spin_unlock(spinlock_prt) rte_spinlock_unlock(spinlock_prt) + +static inline unsigned long get_timeofday_ms(void) +{ + struct timeval tv; + + (void)gettimeofday(&tv, NULL); + + return (unsigned long)tv.tv_sec * 1000 + tv.tv_usec / 1000; +} + +#define jiffies get_timeofday_ms() +#define msecs_to_jiffies(ms) (ms) +#define time_before(now, end) ((now) < (end)) + +/* misc kernel utils */ +static inline u16 ilog2(u32 n) +{ + u16 res = 0; + + while (n > 1) { + n >>= 1; + res++; + } + + return res; +} + +static inline int hinic_mutex_init(pthread_mutex_t *pthreadmutex, + const pthread_mutexattr_t *mattr) +{ + int err; + + err = pthread_mutex_init(pthreadmutex, mattr); + if (unlikely(err)) + PMD_DRV_LOG(ERR, "Fail to initialize mutex, error: %d", err); + + return err; +} + +static inline int hinic_mutex_destroy(pthread_mutex_t *pthreadmutex) +{ + int err; + + err = pthread_mutex_destroy(pthreadmutex); + if (unlikely(err)) + PMD_DRV_LOG(ERR, "Fail to destroy mutex, error: %d", err); + + return err; +} + +static inline int hinic_mutex_lock(pthread_mutex_t *pthreadmutex) +{ + int err; + + err = pthread_mutex_lock(pthreadmutex); + if (!err) { + return err; + } else if (err == EOWNERDEAD) { + PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno); +#if defined(__GLIBC__) +#if __GLIBC_PREREQ(2, 12) + (void)pthread_mutex_consistent(pthreadmutex); +#else + (void)pthread_mutex_consistent_np(pthreadmutex); +#endif +#else + (void)pthread_mutex_consistent(pthreadmutex); +#endif + } else { + PMD_DRV_LOG(ERR, "Mutex lock failed. (ErrorNo=%d)", errno); + } + + return err; +} + +static inline int hinic_mutex_unlock(pthread_mutex_t *pthreadmutex) +{ + return pthread_mutex_unlock(pthreadmutex); +} + +#endif /* _HINIC_COMPAT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h new file mode 100644 index 000000000..2626f6960 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_csr.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_CSR_H_ +#define _HINIC_CSR_H_ + +#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000 + +/* HW interface registers */ +#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8 +#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 +#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 + +#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100 +#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C + +#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80 + +#define HINIC_ELECTION_BASE 0x200 + +#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4 +#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \ + (HINIC_CSR_DMA_ATTR_TBL_BASE \ + + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE) + +#define HINIC_PPF_ELECTION_STRIDE 0x4 +#define HINIC_CSR_MAX_PORTS 4 +#define HINIC_CSR_PPF_ELECTION_ADDR \ + (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE) + +/* MSI-X registers */ +#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 +#define HINIC_CSR_MSIX_CNT_BASE 0x2004 + +#define HINIC_CSR_MSIX_STRIDE 0x8 + +#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ + (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ + (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +/* EQ registers */ +#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 + +#define HINIC_EQ_MTT_OFF_STRIDE 0x40 + +#define HINIC_CSR_AEQ_MTT_OFF(id) \ + (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)(HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)(HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08 +#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C + +#define HINIC_EQ_OFF_STRIDE 0x80 + +#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +/* API CMD registers */ +#define HINIC_CSR_API_CMD_BASE 0xF000 + +#define HINIC_CSR_API_CMD_STRIDE 0x100 + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +/* VF control registers in pf */ +#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400 +#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4 + +#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C + +#define HINIC_ICPL_RESERVD_ADDR 0x9204 + +#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \ + (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE) + +#endif /* _HINIC_CSR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c new file mode 100644 index 000000000..b72edc065 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.c @@ -0,0 +1,1041 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_cmd.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 + +#define API_CMD_BUF_SIZE 2048UL + +#define API_CMD_NODE_ALIGN_SIZE 512UL +#define API_PAYLOAD_ALIGN_SIZE 64 + +#define API_CHAIN_RESP_ALIGNMENT 64ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) ((id) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#undef SIZE_4BYTES +#undef SIZE_8BYTES +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = (u8 *)data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + + return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + + PMD_DRV_LOG(ERR, "chain type: 0x%x", chain->chain_type); + PMD_DRV_LOG(ERR, "chain hw cpld error: 0x%x", + HINIC_API_CMD_STATUS_GET(val, CPLD_ERR)); + PMD_DRV_LOG(ERR, "chain hw check error: 0x%x", + HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR)); + PMD_DRV_LOG(ERR, "chain hw current fsm: 0x%x", + HINIC_API_CMD_STATUS_GET(val, FSM)); + PMD_DRV_LOG(ERR, "chain hw current ci: 0x%x", + HINIC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + PMD_DRV_LOG(ERR, "Chain hw current pi: 0x%x", val); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + */ +static int chain_busy(struct hinic_api_cmd_chain *chain) +{ + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + chain->cons_idx = get_hw_cons_idx(chain); + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + PMD_DRV_LOG(ERR, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + PMD_DRV_LOG(ERR, "Unknown Chain type"); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + */ +static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, + __rte_unused u16 cmd_size) +{ + u16 cell_data_size = 0; + + switch (type) { + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + */ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + /* Read Modify Write */ + ctrl = be64_to_cpu(*cell_ctrl); + ctrl = HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, CELL_LEN) & + HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, RD_DMA_ATTR_OFF) & + HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, WR_DMA_ATTR_OFF) & + HINIC_API_CMD_CELL_CTRL_CLEAR(ctrl, XOR_CHKSUM); + + ctrl |= HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + */ +static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + /* Clear all the members before changes */ + cell->desc = HINIC_API_CMD_DESC_CLEAR(cell->desc, API_TYPE) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, RD_WR) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, MGMT_BYPASS) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, RESP_AEQE_EN) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, DEST) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, SIZE) & + HINIC_API_CMD_DESC_CLEAR(cell->desc, XOR_CHKSUM); + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + PMD_DRV_LOG(ERR, "Unknown Chain type"); + return; + } + + cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | + HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + */ +static void prepare_cell(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type, cmd_size); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hinic_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + */ +static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_status *wb_status; + enum hinic_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) { + PMD_DRV_LOG(ERR, "API CMD status Xor check error"); + return; + } + + status_header = be64_to_cpu(wb_status->header); + chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + */ +static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) +{ + unsigned long end; + int err = -ETIMEDOUT; + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + do { + api_cmd_status_update(chain); + + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + rte_delay_us(10); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + */ +static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain, + __rte_unused struct hinic_api_cmd_cell_ctxt *ctxt, + __rte_unused void *ack, __rte_unused u16 ack_size) +{ + int err = 0; + + /* poll api cmd status for debug*/ + switch (chain->chain_type) { + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + err = wait_for_status_poll(chain); + if (err) + PMD_DRV_LOG(ERR, "API CMD poll status timeout"); + break; + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + break; + default: + PMD_DRV_LOG(ERR, "Unknown API CMD chain type"); + err = -EINVAL; + break; + } + + if (err) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * @ack: pointer to messages to response + * @ack_size: the size of ack message + * Return: 0 - success, negative - failure + */ +static int api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hinic_api_cmd_cell_ctxt *ctxt; + + spin_lock(&chain->async_lock); + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + spin_unlock(&chain->async_lock); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, dest, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + rte_wmb();/* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + + spin_unlock(&chain->async_lock); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hinic_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + */ +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, dest, cmd, size, NULL, 0); +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + */ +static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + unsigned long end; + u32 reg_addr, val; + int err; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(hwif, reg_addr); + + val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic_hwif_write_reg(hwif, reg_addr, val); + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + err = -ETIMEDOUT; + do { + val = hinic_hwif_read_reg(hwif, reg_addr); + + if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + */ +static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 cell_size; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + cell_size = (u32)ilog2(chain->cell_size >> + API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hinic_hwif_read_reg(hwif, reg_addr); + + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); + + hinic_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + */ +static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + */ +static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + */ +static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + */ +static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + unsigned long end; + u32 addr, val; + u32 hw_cons_idx; + int err; + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + err = -ETIMEDOUT; + do { + val = hinic_hwif_read_reg(hwif, addr); + hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX); + + /* Wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) { + err = 0; + break; + } + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + */ +static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hinic_hwif_read_reg(hwif, addr); + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + *(initialize API command csr) + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + */ +static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + PMD_DRV_LOG(ERR, "Restart api_cmd_hw failed"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * free_cmd_buf - free the dma buffer of API CMD command + * @chain: the API CMD specific chain of the cmd + * @cell_idx: the cell index of the cmd + */ +static void free_cmd_buf(struct hinic_api_cmd_chain *chain, u32 cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + dma_free_coherent(dev, (API_CMD_BUF_SIZE + API_PAYLOAD_ALIGN_SIZE), + cell_ctxt->api_cmd_vaddr_free, + cell_ctxt->api_cmd_paddr_free); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + */ +static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, u32 cell_idx) +{ + void *dev = chain->hwdev; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + dma_addr_t cmd_paddr = 0; + void *cmd_vaddr; + void *cmd_vaddr_alloc; + int err = 0; + + cmd_vaddr_alloc = dma_zalloc_coherent(dev, (API_CMD_BUF_SIZE + + API_PAYLOAD_ALIGN_SIZE), + &cmd_paddr, SOCKET_ID_ANY); + if (!cmd_vaddr_alloc) { + PMD_DRV_LOG(ERR, "Allocate API CMD dma memory failed"); + return -ENOMEM; + } + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_paddr_free = cmd_paddr; + cell_ctxt->api_cmd_vaddr_free = cmd_vaddr_alloc; + cmd_vaddr = PTR_ALIGN(cmd_vaddr_alloc, API_PAYLOAD_ALIGN_SIZE); + cmd_paddr = cmd_paddr + ((u64)cmd_vaddr - (u64)cmd_vaddr_alloc); + + cell_ctxt->api_cmd_vaddr = cmd_vaddr; + cell_ctxt->api_cmd_paddr = cmd_paddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); + break; + default: + PMD_DRV_LOG(ERR, "Unknown API CMD chain type"); + free_cmd_buf(chain, cell_idx); + err = -EINVAL; + break; + } + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, + u32 cell_idx, + struct hinic_api_cmd_cell *pre_node, + struct hinic_api_cmd_cell **node_vaddr) +{ + void *dev = chain->hwdev; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr = 0; + void *node_vaddr_alloc; + int err = 0; + + node_vaddr_alloc = dma_zalloc_coherent(dev, (chain->cell_size + + API_CMD_NODE_ALIGN_SIZE), + &node_paddr, SOCKET_ID_ANY); + if (!node_vaddr_alloc) { + PMD_DRV_LOG(ERR, "Allocate dma API CMD cell failed"); + return -ENOMEM; + } + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->cell_vaddr_free = node_vaddr_alloc; + cell_ctxt->cell_paddr_free = node_paddr; + node = (struct hinic_api_cmd_cell *)PTR_ALIGN(node_vaddr_alloc, + API_CMD_NODE_ALIGN_SIZE); + node_paddr = node_paddr + ((u64)node - (u64)node_vaddr_alloc); + + node->read.hw_wb_resp_paddr = 0; + + cell_ctxt->cell_vaddr = node; + cell_ctxt->cell_paddr = node_paddr; + + if (!pre_node) { + chain->head_node = node; + chain->head_cell_paddr = node_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(node_paddr); + } + + /* Driver software should make sure that there is an empty + * API command cell at the end the chain + */ + node->next_cell_paddr = 0; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + PMD_DRV_LOG(ERR, "Allocate cmd buffer failed"); + goto alloc_cmd_buf_err; + } + break; + default: + PMD_DRV_LOG(ERR, "Unsupported API CMD chain type"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + *node_vaddr = node; + + return 0; + +alloc_cmd_buf_err: + dma_free_coherent(dev, (chain->cell_size + API_CMD_NODE_ALIGN_SIZE), + node_vaddr_alloc, cell_ctxt->cell_paddr_free); + + return err; +} + +/** + * api_cmd_destroy_cell - destroy API CMD cell of specific chain + * @chain: the API CMD specific chain to destroy its cell + * @cell_idx: the cell to destroy + */ +static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, + u32 cell_idx) +{ + void *dev = chain->hwdev; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + node = (struct hinic_api_cmd_cell *)(cell_ctxt->cell_vaddr_free); + node_paddr = cell_ctxt->cell_paddr_free; + + if (cell_ctxt->api_cmd_vaddr) { + switch (chain->chain_type) { + case HINIC_API_CMD_PMD_WRITE_TO_MGMT: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + free_cmd_buf(chain, cell_idx); + break; + default: + break; + } + + dma_free_coherent(dev, (chain->cell_size + + API_CMD_NODE_ALIGN_SIZE), + node, node_paddr); + } +} + +/** + * api_cmd_destroy_cells - destroy API CMD cells of specific chain + * @chain: the API CMD specific chain to destroy its cells + * @num_cells: number of cells to destroy + */ +static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, + u32 num_cells) +{ + u32 cell_idx; + + for (cell_idx = 0; cell_idx < num_cells; cell_idx++) + api_cmd_destroy_cell(chain, cell_idx); +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + PMD_DRV_LOG(ERR, "Create API CMD cell failed"); + goto create_cell_err; + } + + pre_node = node; + } + + if (!node) { + err = -EFAULT; + goto create_cell_err; + } + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; + +create_cell_err: + api_cmd_destroy_cells(chain, cell_idx); + return err; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + */ +static int api_chain_init(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev; + size_t cell_ctxt_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + spin_lock_init(&chain->async_lock); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + PMD_DRV_LOG(ERR, "Allocate cell contexts for a chain failed"); + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = (struct hinic_api_cmd_status *) + dma_zalloc_coherent(dev, sizeof(*chain->wb_status), + &chain->wb_status_paddr, SOCKET_ID_ANY); + if (!chain->wb_status) { + PMD_DRV_LOG(ERR, "Allocate DMA wb status failed"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + return 0; + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +alloc_cell_ctxt_err: + + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + */ +static void api_chain_free(struct hinic_api_cmd_chain *chain) +{ + void *dev = chain->hwdev; + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @cmd_chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain, + struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwdev *hwdev = attr->hwdev; + struct hinic_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + PMD_DRV_LOG(ERR, "Invalid number of cells, must be power of 2"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) { + PMD_DRV_LOG(ERR, "Allocate memory for the chain failed"); + return -ENOMEM; + } + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + PMD_DRV_LOG(ERR, "Initialize chain failed"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + PMD_DRV_LOG(ERR, "Create cells for API CMD chain failed"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + PMD_DRV_LOG(ERR, "Initialize chain hw info failed"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: + api_cmd_destroy_cells(chain, chain->num_cells); + +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + */ +static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) +{ + api_cmd_destroy_cells(chain, chain->num_cells); + api_chain_free(chain); + kfree(chain); +} + +/** + * hinic_api_cmd_init - Initialize all the API CMD chains + * @hwdev: the hardware interface of a pci function device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + */ +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain) +{ + struct hinic_api_cmd_chain_attr attr; + enum hinic_api_cmd_chain_type chain_type, i; + int err; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + PMD_DRV_LOG(ERR, "Create chain %d failed", + chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hinic_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + */ +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) +{ + enum hinic_api_cmd_chain_type chain_type; + + chain_type = HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h new file mode 100644 index 000000000..a48c831bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_api_cmd.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_API_CMD_H_ +#define _HINIC_PMD_API_CMD_H_ + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CELL_CTRL_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_API_CMD_CELL_CTRL_##member##_MASK << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT))) + +#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HINIC_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U +#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU +#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU +#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU + +#define HINIC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ + HINIC_API_CMD_DESC_##member##_SHIFT) + +#define HINIC_API_CMD_DESC_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_API_CMD_DESC_##member##_MASK << \ + HINIC_API_CMD_DESC_##member##_SHIFT))) + +#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF + +#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_HEADER_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HINIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + HINIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU + +#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1 +#define HINIC_API_CMD_RESP_HEAD_ERR(val) \ + ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \ + HINIC_API_CMD_RESP_HEAD_ERR_CODE) + +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF + +#define HINIC_API_CMD_RESP_RESERVED 3 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + (u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK) + +#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0 + +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU +#define HINIC_API_CMD_STATUS_FSM_SHIFT 24 + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \ + HINIC_API_CMD_STATUS_HEAD_VALID_MASK) + +#define HINIC_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK) + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define HINIC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_##member##_MASK) + +enum hinic_api_cmd_chain_type { + /* read from mgmt cpu command with completion */ + HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + /* PMD business api chain */ + HINIC_API_CMD_PMD_WRITE_TO_MGMT = 7, + HINIC_API_CMD_MAX +}; + +enum hinic_node_id { + HINIC_NODE_ID_MGMT_HOST = 21, +}; + +struct hinic_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hinic_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic_api_cmd_cell_ctxt { + dma_addr_t cell_paddr; + struct hinic_api_cmd_cell *cell_vaddr; + + dma_addr_t cell_paddr_free; + void *cell_vaddr_free; + + dma_addr_t api_cmd_paddr; + void *api_cmd_vaddr; + + dma_addr_t api_cmd_paddr_free; + void *api_cmd_vaddr_free; + + int status; + + u32 saved_prod_idx; +}; + +struct hinic_api_cmd_chain_attr { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hinic_api_cmd_chain { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + /* Async cmd can not be scheduled */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hinic_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic_api_cmd_cell *head_node; + + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *curr_node; +}; + +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size); + +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain); + +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); + +#endif /* _HINIC_PMD_API_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c new file mode 100644 index 000000000..2d25dc9d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.c @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_eqs.h" +#include "hinic_pmd_cfg.h" +#include "hinic_pmd_mbox.h" + +bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap) +{ + if (!IS_NIC_TYPE(hwdev)) + return false; + + if (cap) + memcpy(cap, &hwdev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + + return true; +} + +static void hinic_parse_shared_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + __rte_unused enum func_type type) +{ + struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; + + shared_cap->host_pctxs = dev_cap->host_pctx_num; + + if (dev_cap->host_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + shared_cap->host_cctxs = dev_cap->host_ccxt_num; + shared_cap->host_scqs = dev_cap->host_scq_num; + shared_cap->host_srqs = dev_cap->host_srq_num; + shared_cap->host_mpts = dev_cap->host_mpt_num; + + PMD_DRV_LOG(INFO, "Get share resource capability:"); + PMD_DRV_LOG(INFO, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x", + shared_cap->host_pctxs, shared_cap->host_cctxs, + shared_cap->host_scqs, shared_cap->host_srqs, + shared_cap->host_mpts); +} + +static void hinic_parse_l2nic_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + if (type == TYPE_PF || type == TYPE_PPF) { + nic_cap->max_sqs = dev_cap->nic_max_sq + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq + 1; + nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1; + nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1; + } else { + nic_cap->max_sqs = dev_cap->nic_max_sq; + nic_cap->max_rqs = dev_cap->nic_max_rq; + nic_cap->vf_max_sqs = 0; + nic_cap->vf_max_rqs = 0; + } + + if (dev_cap->nic_lro_en) + nic_cap->lro_en = true; + else + nic_cap->lro_en = false; + + nic_cap->lro_sz = dev_cap->nic_lro_sz; + nic_cap->tso_sz = dev_cap->nic_tso_sz; + + PMD_DRV_LOG(INFO, "Get l2nic resource capability:"); + PMD_DRV_LOG(INFO, "max_sqs: 0x%x, max_rqs: 0x%x, vf_max_sqs: 0x%x, vf_max_rqs: 0x%x", + nic_cap->max_sqs, nic_cap->max_rqs, + nic_cap->vf_max_sqs, nic_cap->vf_max_rqs); +} + +u16 hinic_func_max_qnum(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + return dev->cfg_mgmt->svc_cap.max_sqs; +} + +int init_cfg_mgmt(struct hinic_hwdev *hwdev) +{ + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + hwdev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = hwdev; + + return 0; +} + +void free_cfg_mgmt(struct hinic_hwdev *hwdev) +{ + kfree(hwdev->cfg_mgmt); + hwdev->cfg_mgmt = NULL; +} + +static void hinic_parse_pub_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + cap->max_cos_id = dev_cap->max_cos_id; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + + if (type == TYPE_PF || type == TYPE_PPF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + cap->max_sqs = dev_cap->nic_max_sq + 1; + cap->max_rqs = dev_cap->nic_max_rq + 1; + } else { + cap->max_vf = 0; + cap->max_sqs = dev_cap->nic_max_sq; + cap->max_rqs = dev_cap->nic_max_rq; + } + + cap->chip_svc_type = dev_cap->svc_cap_en; + cap->host_total_function = dev_cap->host_total_func; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + + PMD_DRV_LOG(INFO, "Get public resource capability:"); + PMD_DRV_LOG(INFO, "host_id: 0x%x, ep_id: 0x%x, intr_type: 0x%x, max_cos_id: 0x%x, er_id: 0x%x, port_id: 0x%x", + cap->host_id, cap->ep_id, cap->intr_chip_en, + cap->max_cos_id, cap->er_id, cap->port_id); + PMD_DRV_LOG(INFO, "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->max_vf); + PMD_DRV_LOG(INFO, "chip_svc_type: 0x%x", cap->chip_svc_type); + PMD_DRV_LOG(INFO, "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x", + cap->pf_num, cap->pf_id_start, + cap->vf_num, cap->vf_id_start); +} + +static void parse_dev_cap(struct hinic_hwdev *dev, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + hinic_parse_pub_res_cap(cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + hinic_parse_shared_res_cap(cap, dev_cap, type); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev)) + hinic_parse_l2nic_res_cap(cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type) +{ + int err; + u16 in_len, out_len; + struct hinic_dev_cap dev_cap; + + memset(&dev_cap, 0, sizeof(dev_cap)); + in_len = sizeof(dev_cap); + out_len = in_len; + dev_cap.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP, + &dev_cap, in_len, &dev_cap, &out_len, 0); + if (err || dev_cap.mgmt_msg_head.status || !out_len) { + PMD_DRV_LOG(ERR, "Get capability from FW failed, err: %d, status: %d, out_len: %d", + err, dev_cap.mgmt_msg_head.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type) +{ + int err; + u16 in_len, out_len; + struct hinic_dev_cap dev_cap; + + memset(&dev_cap, 0, sizeof(dev_cap)); + in_len = sizeof(dev_cap); + out_len = in_len; + err = hinic_mbox_to_pf(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP, + &dev_cap, in_len, &dev_cap, &out_len, + CFG_MAX_CMD_TIMEOUT); + if (err || dev_cap.mgmt_msg_head.status || !out_len) { + PMD_DRV_LOG(ERR, "Get capability from PF failed, err: %d, status: %d, out_len: %d", + err, dev_cap.mgmt_msg_head.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_dev_cap(struct hinic_hwdev *dev) +{ + int err; + enum func_type type = HINIC_FUNC_TYPE(dev); + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + err = get_cap_from_fw(dev, type); + if (err) { + PMD_DRV_LOG(ERR, "Get PF/PPF capability failed"); + return err; + } + break; + case TYPE_VF: + err = get_cap_from_pf(dev, type); + if (err) { + PMD_DRV_LOG(ERR, "Get VF capability failed, err: %d", + err); + return err; + } + break; + default: + PMD_DRV_LOG(ERR, "Unsupported PCI function type"); + return -EINVAL; + } + + return 0; +} + +int hinic_init_capability(struct hinic_hwdev *hwdev) +{ + return get_dev_cap(hwdev); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h new file mode 100644 index 000000000..1741ca44a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cfg.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_CFG_H_ +#define _HINIC_PMD_CFG_H_ + +#define CFG_MAX_CMD_TIMEOUT 8000 /* ms */ + +#define IS_NIC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0) + +struct host_shared_resource_cap { + u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ + u32 host_cctxs; /* Child Context: max 8K */ + u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ + * TOE/IOE/FCoE each uses 1 SCQ + * RoCE/IWARP uses multiple SCQs + * So 6 SCQ least + */ + u32 host_srqs; /* SRQ number: 256K */ + u32 host_mpts; /* MR number:1M */ +}; + +struct nic_service_cap { + /* PF resources */ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, VF obtain them through the MailBox mechanism from + * corresponding PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + + bool lro_en; /* LRO feature enable bit */ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ +}; + +/* service type relates define */ +enum cfg_svc_type_en { + CFG_SVC_NIC_BIT0 = (1 << 0), +}; + +/* device capability */ +struct service_cap { + enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ + + /* Host global resources */ + u16 host_total_function; + u8 host_oq_id_mask_val; + u8 host_id; + u8 ep_id; + u8 intr_chip_en; + u8 max_cos_id; /* PF/VF's max cos id */ + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + u8 max_vf; /* max VF number that PF supported */ + bool sf_en; /* stateful business status */ + u16 max_sqs; + u16 max_rqs; + + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + struct host_shared_resource_cap shared_res_cap; /* shared capability */ + struct nic_service_cap nic_cap; /* NIC capability */ +}; + +struct cfg_mgmt_info { + struct hinic_hwdev *hwdev; + struct service_cap svc_cap; +}; + +struct hinic_dev_cap { + struct hinic_mgmt_msg_head mgmt_msg_head; + + /* Public resource */ + u8 sf_svc_attr; + u8 host_id; + u8 sf_en_pf; + u8 sf_en_vf; + + u8 ep_id; + u8 intr_type; + u8 max_cos_id; + u8 er_id; + u8 port_id; + u8 max_vf; + u16 svc_cap_en; + u16 host_total_func; + u8 host_oq_id_mask_val; + u8 max_vf_cos_id; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + u8 cfg_file_ver; + u8 net_port_mode; + u8 valid_cos_bitmap; /* every bit indicate cos is valid */ + u8 rsvd1; + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + /* shared resource */ + u32 host_pctx_num; + u8 host_sf_en; + u8 rsvd2[3]; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + + /* l2nic */ + u16 nic_max_sq; + u16 nic_max_rq; + u16 nic_vf_max_sq; + u16 nic_vf_max_rq; + u8 nic_lro_en; + u8 nic_lro_sz; + u8 nic_tso_sz; + u8 rsvd3; + + u32 rsvd4[50]; +}; + +/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */ +u16 hinic_func_max_qnum(void *hwdev); + +int init_cfg_mgmt(struct hinic_hwdev *hwdev); + +void free_cfg_mgmt(struct hinic_hwdev *hwdev); + +int hinic_init_capability(struct hinic_hwdev *hwdev); + +bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap); + +#endif /* _HINIC_PMD_CFG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h new file mode 100644 index 000000000..09918a76f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmd.h @@ -0,0 +1,469 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PORT_CMD_H_ +#define _HINIC_PORT_CMD_H_ + +#define HINIC_AEQ 0 + +enum hinic_resp_aeq_num { + HINIC_AEQ0 = 0, + HINIC_AEQ1 = 1, + HINIC_AEQ2 = 2, + HINIC_AEQ3 = 3, +}; + +enum hinic_mod_type { + HINIC_MOD_COMM = 0, /* HW communication module */ + HINIC_MOD_L2NIC = 1, /* L2NIC module */ + HINIC_MOD_CFGM = 7, /* Configuration module */ + HINIC_MOD_HILINK = 14, + HINIC_MOD_MAX = 15 +}; + +/* only used by VFD communicating with PFD to register or unregister, + * command mode type is HINIC_MOD_L2NIC + */ +#define HINIC_PORT_CMD_VF_REGISTER 0x0 +#define HINIC_PORT_CMD_VF_UNREGISTER 0x1 + +/* cmd of mgmt CPU message for NIC module */ +enum hinic_port_cmd { + HINIC_PORT_CMD_MGMT_RESET = 0x0, + + HINIC_PORT_CMD_CHANGE_MTU = 0x2, + + HINIC_PORT_CMD_ADD_VLAN = 0x3, + HINIC_PORT_CMD_DEL_VLAN, + + HINIC_PORT_CMD_SET_ETS = 0x7, + HINIC_PORT_CMD_GET_ETS, + + HINIC_PORT_CMD_SET_MAC = 0x9, + HINIC_PORT_CMD_GET_MAC, + HINIC_PORT_CMD_DEL_MAC, + + HINIC_PORT_CMD_SET_RX_MODE = 0xc, + HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd, + + HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14, + HINIC_PORT_CMD_SET_PAUSE_INFO, + + HINIC_PORT_CMD_GET_LINK_STATE = 0x18, + HINIC_PORT_CMD_SET_LRO = 0x19, + HINIC_PORT_CMD_SET_RX_CSUM = 0x1a, + HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b, + + HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c, + HINIC_PORT_CMD_CLEAR_PORT_STATISTICS, + HINIC_PORT_CMD_GET_VPORT_STAT, + HINIC_PORT_CMD_CLEAN_VPORT_STAT, + + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25, + HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL, + + HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29, + HINIC_PORT_CMD_GET_PORT_ENABLE, + + HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_CTX_TBL, + HINIC_PORT_CMD_SET_RSS_CTX_TBL, + HINIC_PORT_CMD_RSS_TEMP_MGR, + + HINIC_PORT_CMD_RSS_CFG = 0x42, + + HINIC_PORT_CMD_GET_PHY_TYPE = 0x44, + HINIC_PORT_CMD_INIT_FUNC = 0x45, + + HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a, + HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE, + + HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58, + + HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b, + + HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c, + HINIC_PORT_CMD_SET_VPORT_ENABLE, + + HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e, + + HINIC_PORT_CMD_GET_LRO = 0x63, + + HINIC_PORT_CMD_GET_DMA_CS = 0x64, + HINIC_PORT_CMD_SET_DMA_CS, + + HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66, + + HINIC_PORT_CMD_SET_PFC_MISC = 0x67, + HINIC_PORT_CMD_GET_PFC_MISC, + + HINIC_PORT_CMD_SET_VF_RATE = 0x69, + HINIC_PORT_CMD_SET_VF_VLAN, + HINIC_PORT_CMD_CLR_VF_VLAN, + + HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73, + HINIC_PORT_CMD_SET_PFC_THD = 0x75, + + HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0, + + HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3, + HINIC_PORT_CMD_UPDATE_MAC = 0xa4, + + HINIC_PORT_CMD_GET_PORT_INFO = 0xaa, + + HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xaf, + HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xb0, + HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xb1, + HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xb2, + HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xb3, + + HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb, + HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc, + + HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4, + + HINIC_PORT_CMD_GET_LINK_MODE = 0xD9, + HINIC_PORT_CMD_SET_SPEED = 0xDA, + HINIC_PORT_CMD_SET_AUTONEG = 0xDB, + + HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD, + HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE, + HINIC_PORT_CMD_SET_VF_COS = 0xDF, + HINIC_PORT_CMD_GET_VF_COS = 0xE1, + + HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5, + HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6, + + HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8, + + HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB, + + HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3, + HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4, + + HINIC_PORT_CMD_SET_VHD_CFG = 0xF7, + HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8, + HINIC_PORT_CMD_Q_FILTER = 0xFC, + HINIC_PORT_CMD_TCAM_FILTER = 0xFE, + HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF +}; + +/* cmd of mgmt CPU message for HW module */ +enum hinic_mgmt_cmd { + HINIC_MGMT_CMD_RESET_MGMT = 0x0, + HINIC_MGMT_CMD_START_FLR = 0x1, + HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2, + HINIC_MGMT_CMD_GET_IO_STATUS = 0x3, + HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4, + + HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_MGMT_CMD_CMDQ_CTXT_GET, + + HINIC_MGMT_CMD_VAT_SET = 0x12, + HINIC_MGMT_CMD_VAT_GET, + + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, + + HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23, + HINIC_MGMT_CMD_RES_STATE_SET = 0x24, + HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25, + HINIC_MGMT_CMD_FFM_SET = 0x26, + + HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29, + + HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + + HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36, + HINIC_MGMT_CMD_FAULT_REPORT = 0x37, + + HINIC_MGMT_CMD_VPD_SET = 0x40, + HINIC_MGMT_CMD_VPD_GET, + HINIC_MGMT_CMD_LABEL_SET, + HINIC_MGMT_CMD_LABEL_GET, + HINIC_MGMT_CMD_SATIC_MAC_SET, + HINIC_MGMT_CMD_SATIC_MAC_GET, + HINIC_MGMT_CMD_SYNC_TIME = 0x46, + HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A, + HINIC_MGMT_CMD_L2NIC_RESET = 0x4b, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d, + HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E, + HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F, + HINIC_MGMT_CMD_PAGESIZE_SET = 0x50, + HINIC_MGMT_CMD_PAGESIZE_GET = 0x51, + HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52, + HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56, + HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61, + HINIC_MGMT_CMD_GET_PPF_STATE = 0x63, + HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65, + HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66, +}; + +/* cmd of mgmt CPU message for HILINK module */ +enum hinic_hilink_cmd { + HINIC_HILINK_CMD_GET_LINK_INFO = 0x3, + HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +/* uCode related commands */ +enum hinic_ucode_cmd { + HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT = 0, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + HINIC_UCODE_CMD_ARM_SQ, + HINIC_UCODE_CMD_ARM_RQ, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_SET_IQ_ENABLE, + HINIC_UCODE_CMD_SET_RQ_FLUSH = 10 +}; + +enum cfg_sub_cmd { + /* PPF(PF) <-> FW */ + HINIC_CFG_NIC_CAP = 0, + CFG_FW_VERSION, + CFG_UCODE_VERSION, + HINIC_CFG_MBOX_CAP = 6 +}; + +enum hinic_ack_type { + HINIC_ACK_TYPE_CMDQ, + HINIC_ACK_TYPE_SHARE_CQN, + HINIC_ACK_TYPE_APP_CQN, + + HINIC_MOD_ACK_MAX = 15, +}; + +enum sq_l4offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +enum sq_vlan_offload_flag { + VLAN_OFFLOAD_DISABLE = 0, + VLAN_OFFLOAD_ENABLE = 1, +}; + +enum sq_pkt_parsed_flag { + PKT_NOT_PARSED = 0, + PKT_PARSED = 1, +}; + +enum sq_l3_type { + UNKNOWN_L3TYPE = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum sq_md_type { + UNKNOWN_MD_TYPE = 0, +}; + +enum sq_l2type { + ETHERNET = 0, +}; + +enum sq_tunnel_l4_type { + NOT_TUNNEL, + TUNNEL_UDP_NO_CSUM, + TUNNEL_UDP_CSUM, +}; + +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +#define HINIC_RSS_TYPE_VALID_SHIFT 23 +#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define HINIC_RSS_TYPE_IPV6_SHIFT 27 +#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define HINIC_RSS_TYPE_IPV4_SHIFT 29 +#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define HINIC_RSS_TYPE_SET(val, member) \ + (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT) + +#define HINIC_RSS_TYPE_GET(val, member) \ + (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +enum hinic_speed { + HINIC_SPEED_10MB_LINK = 0, + HINIC_SPEED_100MB_LINK, + HINIC_SPEED_1000MB_LINK, + HINIC_SPEED_10GB_LINK, + HINIC_SPEED_25GB_LINK, + HINIC_SPEED_40GB_LINK, + HINIC_SPEED_100GB_LINK, + HINIC_SPEED_UNKNOWN = 0xFF, +}; + +enum { + HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ +}; + +#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HINIC_AF0_P2P_IDX_SHIFT 10 +#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14 +#define HINIC_AF0_VF_IN_PF_SHIFT 16 +#define HINIC_AF0_FUNC_TYPE_SHIFT 24 + +#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF +#define HINIC_AF0_P2P_IDX_MASK 0xF +#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_AF0_VF_IN_PF_MASK 0xFF +#define HINIC_AF0_FUNC_TYPE_MASK 0x1 + +#define HINIC_AF0_GET(val, member) \ + (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK) + +#define HINIC_AF1_PPF_IDX_SHIFT 0 +#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12 +#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20 +#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24 +#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HINIC_AF1_PPF_IDX_MASK 0x1F +#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7 +#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF +#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HINIC_AF1_GET(val, member) \ + (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK) + +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16 +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF + +#define HINIC_AF2_GET(val, member) \ + (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK) + +#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0 +#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1 +#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1 +#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HINIC_AF4_GET(val, member) \ + (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK) + +#define HINIC_AF4_SET(val, member) \ + (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT) + +#define HINIC_AF4_CLEAR(val, member) \ + ((val) & (~(HINIC_AF4_##member##_MASK << \ + HINIC_AF4_##member##_SHIFT))) + +#define HINIC_AF5_PF_STATUS_SHIFT 0 +#define HINIC_AF5_PF_STATUS_MASK 0xFFFF + +#define HINIC_AF5_SET(val, member) \ + (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT) + +#define HINIC_AF5_GET(val, member) \ + (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK) + +#define HINIC_AF5_CLEAR(val, member) \ + ((val) & (~(HINIC_AF5_##member##_MASK << \ + HINIC_AF5_##member##_SHIFT))) + +#define HINIC_PPF_ELECTION_IDX_SHIFT 0 + +#define HINIC_PPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_PPF_ELECTION_SET(val, member) \ + (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \ + HINIC_PPF_ELECTION_##member##_SHIFT) + +#define HINIC_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ + HINIC_PPF_ELECTION_##member##_MASK) + +#define HINIC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ + << HINIC_PPF_ELECTION_##member##_SHIFT))) + +#define DB_IDX(db, db_base) \ + ((u32)(((unsigned long)(db) - (unsigned long)(db_base)) / \ + HINIC_DB_PAGE_SIZE)) + +enum hinic_pcie_nosnoop { + HINIC_PCIE_SNOOP = 0, + HINIC_PCIE_NO_SNOOP = 1, +}; + +enum hinic_pcie_tph { + HINIC_PCIE_TPH_DISABLE = 0, + HINIC_PCIE_TPH_ENABLE = 1, +}; + +enum hinic_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hinic_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +enum hinic_pf_status { + HINIC_PF_STATUS_INIT = 0X0, + HINIC_PF_STATUS_ACTIVE_FLAG = 0x11, + HINIC_PF_STATUS_FLR_START_FLAG = 0x12, + HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */ +#define HINIC_DB_DWQE_SIZE 0x00080000 + +/* db page size: 4K */ +#define HINIC_DB_PAGE_SIZE 0x00001000ULL + +#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE) + +#define HINIC_PCI_MSIX_ENTRY_SIZE 16 +#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +struct hinic_mgmt_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +struct hinic_root_ctxt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +#endif /* _HINIC_PORT_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c new file mode 100644 index 000000000..2e98b9c28 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c @@ -0,0 +1,855 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_wq.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_mbox.h" +#include "hinic_pmd_cmdq.h" + +#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27 + +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU + +#define CMDQ_DB_INFO_SET(val, member) \ + (((val) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + (((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + (((val) & CMDQ_WQE_HEADER_##member##_MASK) << \ + CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ + CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 56 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0x1F +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ + ((val) & (~((u64)CMDQ_CTXT_##member##_MASK << \ + CMDQ_CTXT_##member##_SHIFT))) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 20 + +#define WQE_ERRCODE_VAL_MASK 0xF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQEBB_SHIFT 6 + +#define CMDQ_WQE_SIZE 64 + +#define HINIC_CMDQ_WQ_BUF_SIZE 4096 + +#define WQE_NUM_WQEBBS(wqe_size, wq) \ + ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 + + +static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq, + struct hinic_cmdq_ctxt *cmdq_ctxt); +static void hinic_cmdqs_free(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq) +{ + struct hinic_wq *wq = cmdq->wq; + + return ((wq->delta) == wq->q_depth ? true : false); +} + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev) +{ + struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + struct hinic_cmd_buf *cmd_buf; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Allocate cmd buffer failed"); + return NULL; + } + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed"); + goto alloc_pci_buf_err; + } + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} + +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf) +{ + struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} + +static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type) +{ + u32 wqe_size = 0; + + switch (wqe_type) { + case WQE_LCMD_TYPE: + wqe_size = WQE_LCMD_SIZE; + break; + case WQE_SCMD_TYPE: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static int cmdq_get_wqe_size(enum bufdesc_len len) +{ + int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_completion(struct hinic_cmdq_completion *complete, + struct hinic_cmd_buf *buf_out) +{ + struct hinic_sge_resp *sge_resp = &complete->sge_resp; + + hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, + HINIC_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe, + struct hinic_cmd_buf *buf_in) +{ + hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_fill_db(struct hinic_cmdq_db *db, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hinic_cmdq *cmdq, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hinic_cmdq_db db; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = cpu_to_be32(db.db_info); + + rte_wmb(); /* write all before the doorbell */ + + writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + rte_wmb();/* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format local_data_format, + enum bufdesc_len buf_len) +{ + struct hinic_ctrl *ctrl; + enum ctrl_sect_len ctrl_len; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (local_data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + case ASYNC_CMD: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static int cmdq_params_valid(struct hinic_cmd_buf *buf_in) +{ + if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) { + PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size"); + return -EINVAL; + } + + return 0; +} + +static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HINIC_CMDQ_ENABLE) + return 0; + + } while (time_before(jiffies, end)); + + return -EBUSY; +} + +static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, + int errcode) +{ + cmdq->errcode[prod_idx] = errcode; +} + +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_inline_wqe *inline_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + struct hinic_ctrl *ctrl; + u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info); + int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + int wqe_size = cmdq_get_wqe_size(buf_len); + u16 num_wqebbs; + + if (wqe_size == WQE_LCMD_SIZE) { + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + } else { + inline_wqe = &wqe->inline_wqe; + wqe_scmd = &inline_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + + rte_wmb(); /* verify wqe is clear */ + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq); + hinic_put_wqe(cmdq->wq, num_wqebbs); +} + +static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt; + enum hinic_cmdq_type cmdq_type; + u16 in_size; + int err; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_ctxt->resp_aeq_num = HINIC_AEQ1; + in_size = sizeof(*cmdq_ctxt); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_CMDQ_CTXT_SET, + cmdq_ctxt, in_size, NULL, + NULL, 0); + if (err) { + if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW || + err == HINIC_DEV_BUSY_ACTIVE_FW) { + cmdqs->status |= HINIC_CMDQ_SET_FAIL; + PMD_DRV_LOG(ERR, "PF or VF fw is hot active"); + } + PMD_DRV_LOG(ERR, "Set cmdq ctxt failed, err: %d", err); + return -EFAULT; + } + } + + cmdqs->status &= ~HINIC_CMDQ_SET_FAIL; + cmdqs->status |= HINIC_CMDQ_ENABLE; + + return 0; +} + +void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev) +{ + hinic_cmdqs_free(hwdev); +} + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq); + } + + return hinic_set_cmdq_ctxts(hwdev); +} + +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev, + struct hinic_wq *wq, enum hinic_cmdq_type q_type) +{ + void __iomem *db_base; + int err = 0; + size_t errcode_size; + size_t cmd_infos_size; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + + spin_lock_init(&cmdq->cmdq_lock); + + errcode_size = wq->q_depth * sizeof(*cmdq->errcode); + cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL); + if (!cmdq->errcode) { + PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed"); + spin_lock_deinit(&cmdq->cmdq_lock); + return -ENOMEM; + } + + cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos); + cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL); + if (!cmdq->cmd_infos) { + PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed"); + err = -ENOMEM; + goto cmd_infos_err; + } + + err = hinic_alloc_db_addr(hwdev, &db_base); + if (err) + goto alloc_db_err; + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + kfree(cmdq->errcode); + spin_lock_deinit(&cmdq->cmdq_lock); + + return err; +} + +static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq) +{ + hinic_free_db_addr(hwdev, cmdq->db_base); + kfree(cmdq->cmd_infos); + kfree(cmdq->errcode); + spin_lock_deinit(&cmdq->cmdq_lock); +} + +static int hinic_cmdqs_init(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt; + enum hinic_cmdq_type type, cmdq_type; + size_t saved_wqs_size; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + PMD_DRV_LOG(ERR, "Allocate saved wqs failed"); + err = -ENOMEM; + goto alloc_wqs_err; + } + + cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed"); + err = -ENOMEM; + goto pool_create_err; + } + + err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev, + HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE, + CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH); + if (err) { + PMD_DRV_LOG(ERR, "Allocate cmdq failed"); + goto cmdq_alloc_err; + } + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, + &cmdqs->saved_wqs[cmdq_type], cmdq_type); + if (err) { + PMD_DRV_LOG(ERR, "Initialize cmdq failed"); + goto init_cmdq_err; + } + + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt); + } + + err = hinic_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = HINIC_CMDQ_SYNC; + for ( ; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); + +cmdq_alloc_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs->saved_wqs); + +alloc_wqs_err: + kfree(cmdqs); + + return err; +} + +static void hinic_cmdqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + + hinic_cmdq_free(hwdev, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs->saved_wqs); + + kfree(cmdqs); +} + +static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth) +{ + struct hinic_root_ctxt root_ctxt; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + root_ctxt.func_idx = hinic_global_func_id(hwdev); + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + NULL, NULL, 0); +} + +int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_cmdqs_init(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Init cmd queues failed"); + return err; + } + + err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH); + if (err) { + PMD_DRV_LOG(ERR, "Set cmdq depth failed"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + hinic_cmdqs_free(hwdev); + + return err; +} + +static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq); + struct hinic_hwdev *hwdev = cmdqs->hwdev; + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, pfn; + + u16 start_ci = (u16)(wq->cons_idx); + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = wq->queue_buf_paddr; + + pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE); + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); + cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif); + cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + cmdq_ctxt->cmdq_id = cmdq->cmdq_type; +} + +static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout) +{ + struct hinic_cmdq_wqe *wqe; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_ctrl *ctrl; + struct hinic_cmdq_cmd_info *cmd_info; + u32 status_info, ctrl_info; + u16 ci; + int errcode; + unsigned long end; + int done = 0; + int rc = 0; + + wqe = hinic_read_wqe(cmdq->wq, 1, &ci); + if (wqe == NULL) { + PMD_DRV_LOG(ERR, "No outstanding cmdq msg"); + return -EINVAL; + } + + cmd_info = &cmdq->cmd_infos[ci]; + /* this cmd has not been filled and send to hw, or get TMO msg ack*/ + if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) { + PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u", + ci); + return -EINVAL; + } + + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + end = jiffies + msecs_to_jiffies(timeout); + do { + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + if (WQE_COMPLETED(ctrl_info)) { + done = 1; + break; + } + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + if (done) { + status_info = be32_to_cpu(wqe_lcmd->status.status_info); + errcode = WQE_ERRCODE_GET(status_info, VAL); + cmdq_update_errcode(cmdq, ci, errcode); + clear_wqe_complete_bit(cmdq, wqe); + rc = 0; + } else { + PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci); + rc = -ETIMEDOUT; + } + + /* set this cmd invalid */ + cmd_info->cmd_type = HINIC_CMD_TYPE_NONE; + + return rc; +} + +static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped; + u32 timeo, wqe_size; + int err; + + wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. */ + spin_lock(&cmdq->cmdq_lock); + + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + err = -EBUSY; + goto cmdq_unlock; + } + + memset(&wqe, 0, sizeof(wqe)); + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL; + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + err = hinic_cmdq_poll_msg(cmdq, timeo); + if (err) { + PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x", + curr_prod_idx); + err = -ETIMEDOUT; + goto cmdq_unlock; + } + + rte_smp_rmb(); /* read error code after completion */ + + if (out_param) { + wqe_lcmd = &curr_wqe->wqe_lcmd; + *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp); + } + + if (cmdq->errcode[curr_prod_idx] > 1) { + err = cmdq->errcode[curr_prod_idx]; + goto cmdq_unlock; + } + +cmdq_unlock: + spin_unlock(&cmdq->cmdq_lock); + + return err; +} + +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + int err = cmdq_params_valid(buf_in); + + if (err) { + PMD_DRV_LOG(ERR, "Invalid CMDQ parameters"); + return err; + } + + err = wait_cmdqs_enable(cmdqs); + if (err) { + PMD_DRV_LOG(ERR, "Cmdq is disable"); + return err; + } + + return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], + ack_type, mod, cmd, buf_in, + out_param, timeout); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h new file mode 100644 index 000000000..0d5e38012 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_CMDQ_H_ +#define _HINIC_PMD_CMDQ_H_ + +#define HINIC_DB_OFF 0x00000800 + +#define HINIC_SCMD_DATA_LEN 16 + +/* pmd driver uses 64, kernel l2nic use 4096 */ +#define HINIC_CMDQ_DEPTH 64 + +#define HINIC_CMDQ_BUF_SIZE 2048U +#define HINIC_CMDQ_BUF_HW_RSVD 8 +#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE \ + - HINIC_CMDQ_BUF_HW_RSVD) + +#define HINIC_CEQ_ID_CMDQ 0 + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +enum hinic_cmdq_type { + HINIC_CMDQ_SYNC, + HINIC_CMDQ_ASYNC, + HINIC_MAX_CMDQ_TYPES, +}; + +enum hinic_db_src_type { + HINIC_DB_SRC_CMDQ_TYPE, + HINIC_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hinic_cmdq_db_type { + HINIC_DB_SQ_RQ_TYPE, + HINIC_DB_CMDQ_TYPE, +}; + +/* CMDQ WQE CTRLS */ +struct hinic_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC_SCMD_DATA_LEN]; +}; + +struct hinic_lcmd_bufdesc { + struct hinic_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hinic_cmdq_db { + u32 db_info; + u32 rsvd; +}; + +struct hinic_status { + u32 status_info; +}; + +struct hinic_ctrl { + u32 ctrl_info; +}; + +struct hinic_sge_resp { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_cmdq_completion { + /* HW Format */ + union { + struct hinic_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic_cmdq_wqe_scmd { + struct hinic_cmdq_header header; + struct hinic_cmdq_db db; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_scmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_wqe_lcmd { + struct hinic_cmdq_header header; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_lcmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_inline_wqe { + struct hinic_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic_cmdq_wqe { + /* HW Format */ + union{ + struct hinic_cmdq_inline_wqe inline_wqe; + struct hinic_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +/* New interface */ +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; + + u16 func_idx; + u8 cmdq_id; + u8 ppf_idx; + + u8 rsvd1[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + +enum hinic_cmdq_status { + HINIC_CMDQ_ENABLE = BIT(0), + HINIC_CMDQ_SET_FAIL = BIT(1) +}; + +enum hinic_cmdq_cmd_type { + HINIC_CMD_TYPE_NONE, + HINIC_CMD_TYPE_SET_ARM, + HINIC_CMD_TYPE_NORMAL, +}; + +struct hinic_cmdq_cmd_info { + enum hinic_cmdq_cmd_type cmd_type; +}; + +struct hinic_cmdq { + struct hinic_wq *wq; + + enum hinic_cmdq_type cmdq_type; + int wrapped; + + hinic_spinlock_t cmdq_lock; + + int *errcode; + + /* doorbell area */ + u8 __iomem *db_base; + + struct hinic_cmdq_ctxt cmdq_ctxt; + + struct hinic_cmdq_cmd_info *cmd_infos; +}; + +struct hinic_cmdqs { + struct hinic_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + struct hinic_wq *saved_wqs; + + struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; + + u32 status; +}; + +struct hinic_cmd_buf { + void *buf; + dma_addr_t dma_addr; + struct rte_mbuf *mbuf; + u16 size; +}; + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq); + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev); + +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf); + +/* PF/VF send cmd to ucode by cmdq, and return if success. + * timeout=0, use default timeout. + */ +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout); + +int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev); + +void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev); + +#endif /* _HINIC_PMD_CMDQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c new file mode 100644 index 000000000..79e1b20bc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_eqs.h" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK \ + << AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK \ + << AEQ_CTRL_1_##member##_SHIFT))) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK \ + << EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) \ + (HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \ + / (size))) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + u8 idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return (checksum & 0xF); +} + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @arm_state: indicate whether report interrupts when generate eq element + */ +static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state) +{ + u32 eq_cons_idx, eq_wrap_ci, val; + u32 addr = EQ_CONS_IDX_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* Read Modify Write */ + val = hinic_hwif_read_reg(eq->hwdev->hwif, addr); + + val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) & + EQ_CONS_IDX_CLEAR(val, INT_ARMED) & + EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM); + + /* Just aeq0 use int_arm mode for pmd drv to recv + * asyn event&mbox recv data + */ + if (eq->q_id == 0) + eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(arm_state, INT_ARMED); + else + eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED); + + val |= eq_cons_idx; + + val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hinic_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * eq_update_ci - update the cons idx of event queue + * @eq: the event queue to update the cons idx for + */ +void eq_update_ci(struct hinic_eq *eq) +{ + set_eq_cons_idx(eq, HINIC_EQ_ARMED); +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + */ +static void set_aeq_ctrls(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif); + + /* set ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hinic_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl1); +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + */ +static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + struct hinic_aeq_elem *aeqe; + u16 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + rte_wmb(); /* Write the init values */ +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + */ +static int alloc_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwdev->hwif; + u32 init_val; + u64 dma_addr_size, virt_addr_size; + u16 pg_num, i; + int err; + + dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr); + virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr); + + eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr) { + PMD_DRV_LOG(ERR, "Allocate dma addr array failed"); + return -ENOMEM; + } + + eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + PMD_DRV_LOG(ERR, "Allocate virt addr array failed"); + err = -ENOMEM; + goto virt_addr_alloc_err; + } + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) { + eq->virt_addr[pg_num] = + (u8 *)dma_zalloc_coherent_aligned(eq->hwdev, + eq->page_size, &eq->dma_addr[pg_num], + SOCKET_ID_ANY); + if (!eq->virt_addr[pg_num]) { + err = -ENOMEM; + goto dma_alloc_err; + } + + hinic_hwif_write_reg(hwif, + HINIC_EQ_HI_PHYS_ADDR_REG(eq->type, + eq->q_id, pg_num), + upper_32_bits(eq->dma_addr[pg_num])); + + hinic_hwif_write_reg(hwif, + HINIC_EQ_LO_PHYS_ADDR_REG(eq->type, + eq->q_id, pg_num), + lower_32_bits(eq->dma_addr[pg_num])); + } + + init_val = EQ_WRAPPED(eq); + + aeq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_num; i++) + dma_free_coherent(eq->hwdev, eq->page_size, + eq->virt_addr[i], eq->dma_addr[i]); + +virt_addr_alloc_err: + kfree(eq->dma_addr); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + */ +static void free_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwdev *hwdev = eq->hwdev; + u16 pg_num; + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) + dma_free_coherent(hwdev, eq->page_size, + eq->virt_addr[pg_num], + eq->dma_addr[pg_num]); + + kfree(eq->virt_addr); + kfree(eq->dma_addr); +} + +#define MSIX_ENTRY_IDX_0 (0) + +/** + * init_aeq - initialize aeq + * @eq: the event queue + * @hwdev: the pointer to the private hardware device object + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @page_size: the page size of the event queue + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + */ +static int init_aeq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, + u16 q_len, u32 page_size, + __rte_unused struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = HINIC_AEQ; + eq->page_size = page_size; + eq->eq_len = q_len; + + /* clear eq_len to force eqe drop in hardware */ + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + + /* Clear PI and CI, also clear the ARM bit */ + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = HINIC_AEQE_SIZE; + eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size); + + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2"); + return -EINVAL; + } + + if (eq->num_pages > HINIC_EQ_MAX_PAGES) { + PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + PMD_DRV_LOG(ERR, "Allocate pages for eq failed"); + return err; + } + + /* pmd use MSIX_ENTRY_IDX_0 */ + eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0; + + set_aeq_ctrls(eq); + set_eq_cons_idx(eq, HINIC_EQ_ARMED); + + if (eq->q_id == 0) + hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE); + + eq->poll_retry_nr = HINIC_RETRY_NUM; + + return 0; +} + +/** + * remove_aeq - remove aeq + * @eq: the event queue + */ +static void remove_aeq(struct hinic_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + if (eq->q_id == 0) + hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC_MSIX_DISABLE); + + /* clear eq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hinic_aeqs_init - init all the aeqs + * @hwdev: the pointer to the private hardware device object + * @num_aeqs: number of aeq + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + */ +static int +hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hinic_aeqs *aeqs; + int err; + u16 i, q_id; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + + for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) { + err = init_aeq(&aeqs->aeq[q_id], hwdev, q_id, + HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, + &msix_entries[q_id]); + if (err) { + PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id); + goto init_aeq_err; + } + } + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_aeq(&aeqs->aeq[i]); + + kfree(aeqs); + + return err; +} + +/** + * hinic_aeqs_free - free all the aeqs + * @hwdev: the pointer to the private hardware device object + */ +static void hinic_aeqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + /* hinic pmd use aeq[1~3], aeq[0] used in kernel only */ + for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++) + remove_aeq(&aeqs->aeq[q_id]); + + kfree(aeqs); +} + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x", + q_id, ci, pi); + } +} + +int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev) +{ + int rc; + u16 num_aeqs; + struct irq_info aeq_irqs[HINIC_MAX_AEQS]; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs < HINIC_MAX_AEQS) { + PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d", + HINIC_MAX_AEQS, num_aeqs); + return HINIC_ERROR; + } + + memset(aeq_irqs, 0, sizeof(aeq_irqs)); + rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (rc != HINIC_OK) + PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc); + + return rc; +} + +void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev) +{ + hinic_aeqs_free(hwdev); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h new file mode 100644 index 000000000..16046ecde --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_EQS_H_ +#define _HINIC_PMD_EQS_H_ + +#define HINIC_EQ_PAGE_SIZE 0x00001000 + +#define HINIC_AEQN_START 0 +#define HINIC_MAX_AEQS 4 + +#define HINIC_EQ_MAX_PAGES 8 + +#define HINIC_AEQE_SIZE 64 +#define HINIC_CEQE_SIZE 4 + +#define HINIC_AEQE_DESC_SIZE 4 +#define HINIC_AEQE_DATA_SIZE \ + (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) + +#define HINIC_DEFAULT_AEQ_LEN 64 + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ + (((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) \ + ((struct hinic_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +enum hinic_eq_intr_mode { + HINIC_INTR_MODE_ARMED, + HINIC_INTR_MODE_ALWAYS, +}; + +enum hinic_eq_ci_arm_state { + HINIC_EQ_NOT_ARMED, + HINIC_EQ_ARMED, +}; + +enum hinic_aeq_type { + HINIC_HW_INTER_INT = 0, + HINIC_MBX_FROM_FUNC = 1, + HINIC_MSG_FROM_MGMT_CPU = 2, + HINIC_API_RSP = 3, + HINIC_API_CHAIN_STS = 4, + HINIC_MBX_SEND_RSLT = 5, + HINIC_MAX_AEQ_EVENTS +}; + +#define HINIC_RETRY_NUM (10) + +struct hinic_eq { + struct hinic_hwdev *hwdev; + u16 q_id; + u16 type; + u32 page_size; + u16 eq_len; + + u16 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + + dma_addr_t *dma_addr; + u8 **virt_addr; + + u16 poll_retry_nr; +}; + +struct hinic_aeq_elem { + u8 aeqe_data[HINIC_AEQE_DATA_SIZE]; + u32 desc; +}; + +struct hinic_aeqs { + struct hinic_hwdev *hwdev; + u16 poll_retry_nr; + + struct hinic_eq aeq[HINIC_MAX_AEQS]; + u16 num_aeqs; +}; + +void eq_update_ci(struct hinic_eq *eq); + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev); + +int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev); + +void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev); + +#endif /* _HINIC_PMD_EQS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c new file mode 100644 index 000000000..cc4207678 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c @@ -0,0 +1,1531 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_wq.h" +#include "hinic_pmd_cmdq.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_niccfg.h" +#include "hinic_pmd_mbox.h" + +#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC_FLR_TIMEOUT 1000 + +#define FFM_RECORD_NUM_MAX 32 + +#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \ + HINIC_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \ + << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HINIC_PCIE_ST_DISABLE 0 +#define HINIC_PCIE_AT_DISABLE 0 +#define HINIC_PCIE_PH_DISABLE 0 +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define HINIC_HASH_FUNC rte_jhash +#define HINIC_HASH_KEY_LEN (sizeof(dma_addr_t)) +#define HINIC_HASH_FUNC_INIT_VAL 0 + +static const char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = { + "RS-FEC", "BASE-FEC", "NO-FEC"}; + +static const char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = { + "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC", + "Back plane", "BaseT" +}; + +static const char *hinic_module_link_err[LINK_ERR_NUM] = { + "Unrecognized module", +}; + +struct hinic_vf_dma_attr_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 func_dma_entry_num; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u8 resv1[3]; +}; + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_cpu_to_be32(void *data, u32 len) +{ + u32 i; + u32 *mem = (u32 *)data; + + for (i = 0; i < (len >> 2); i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_be32_to_cpu(void *data, u32 len) +{ + u32 i; + u32 *mem = (u32 *)data; + + for (i = 0; i < (len >> 2); i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +static void *hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size, + dma_addr_t *dma_handle, unsigned int align, + unsigned int socket_id) +{ + int rc, alloc_cnt; + const struct rte_memzone *mz; + char z_name[RTE_MEMZONE_NAMESIZE]; + hash_sig_t sig; + rte_iova_t iova; + + if (dma_handle == NULL || 0 == size) + return NULL; + + alloc_cnt = rte_atomic32_add_return(&hwdev->os_dep.dma_alloc_cnt, 1); + snprintf(z_name, sizeof(z_name), "%s_%d", + hwdev->pcidev_hdl->name, alloc_cnt); + + mz = rte_memzone_reserve_aligned(z_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); + if (!mz) { + PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx", + rte_errno, z_name, size); + return NULL; + } + + iova = mz->iova; + + /* check if phys_addr already exist */ + sig = HINIC_HASH_FUNC(&iova, HINIC_HASH_KEY_LEN, + HINIC_HASH_FUNC_INIT_VAL); + rc = rte_hash_lookup_with_hash(hwdev->os_dep.dma_addr_hash, + &iova, sig); + if (rc >= 0) { + PMD_DRV_LOG(ERR, "Dma addr: %p already in hash table, error: %d, mz_name: %s", + (void *)iova, rc, z_name); + goto phys_addr_hash_err; + } + + /* record paddr in hash table */ + rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock); + rc = rte_hash_add_key_with_hash_data(hwdev->os_dep.dma_addr_hash, + &iova, sig, + (void *)(u64)mz); + rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock); + if (rc) { + PMD_DRV_LOG(ERR, "Insert dma addr: %p hash failed, error: %d, mz_name: %s", + (void *)iova, rc, z_name); + goto phys_addr_hash_err; + } + *dma_handle = iova; + memset(mz->addr, 0, size); + + return mz->addr; + +phys_addr_hash_err: + (void)rte_memzone_free(mz); + + return NULL; +} + +static void +hinic_dma_mem_free(struct hinic_hwdev *hwdev, size_t size, + void *virt, dma_addr_t phys) +{ + int rc; + struct rte_memzone *mz = NULL; + struct rte_hash *hash; + hash_sig_t sig; + + if (virt == NULL || phys == 0) + return; + + hash = hwdev->os_dep.dma_addr_hash; + sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN, + HINIC_HASH_FUNC_INIT_VAL); + rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz); + if (rc < 0) { + PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d", + (void *)phys, rc); + return; + } + + if (virt != mz->addr || size > mz->len) { + PMD_DRV_LOG(ERR, "Match mz_info failed: " + "mz.name: %s, mz.phys: %p, mz.virt: %p, mz.len: %zu, " + "phys: %p, virt: %p, size: %zu", + mz->name, (void *)mz->iova, mz->addr, mz->len, + (void *)phys, virt, size); + } + + rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock); + (void)rte_hash_del_key_with_hash(hash, &phys, sig); + rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock); + + (void)rte_memzone_free(mz); +} + +void *dma_zalloc_coherent(void *hwdev, size_t size, dma_addr_t *dma_handle, + unsigned int socket_id) +{ + return hinic_dma_mem_zalloc(hwdev, size, dma_handle, + RTE_CACHE_LINE_SIZE, socket_id); +} + +void *dma_zalloc_coherent_aligned(void *hwdev, size_t size, + dma_addr_t *dma_handle, unsigned int socket_id) +{ + return hinic_dma_mem_zalloc(hwdev, size, dma_handle, HINIC_PAGE_SIZE, + socket_id); +} + +void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size, + dma_addr_t *dma_handle, + unsigned int socket_id) +{ + return hinic_dma_mem_zalloc(hwdev, size, dma_handle, + HINIC_PAGE_SIZE * 64, socket_id); +} + +void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys) +{ + hinic_dma_mem_free(hwdev, size, virt, phys); +} + +void dma_free_coherent_volatile(void *hwdev, size_t size, + volatile void *virt, dma_addr_t phys) +{ + int rc; + struct rte_memzone *mz = NULL; + struct hinic_hwdev *dev = hwdev; + struct rte_hash *hash; + hash_sig_t sig; + + if (virt == NULL || phys == 0) + return; + + hash = dev->os_dep.dma_addr_hash; + sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN, + HINIC_HASH_FUNC_INIT_VAL); + rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz); + if (rc < 0) { + PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d", + (void *)phys, rc); + return; + } + + if (virt != mz->addr || size > mz->len) { + PMD_DRV_LOG(ERR, "Match mz_info failed: " + "mz.name:%s, mz.phys:%p, mz.virt:%p, mz.len:%zu, " + "phys:%p, virt:%p, size:%zu", + mz->name, (void *)mz->iova, mz->addr, mz->len, + (void *)phys, virt, size); + } + + rte_spinlock_lock(&dev->os_dep.dma_hash_lock); + (void)rte_hash_del_key_with_hash(hash, &phys, sig); + rte_spinlock_unlock(&dev->os_dep.dma_hash_lock); + + (void)rte_memzone_free(mz); +} + +struct dma_pool *dma_pool_create(const char *name, void *dev, + size_t size, size_t align, size_t boundary) +{ + struct pci_pool *pool; + + pool = rte_zmalloc(NULL, sizeof(*pool), HINIC_MEM_ALLOC_ALIGN_MIN); + if (!pool) + return NULL; + + rte_atomic32_set(&pool->inuse, 0); + pool->elem_size = size; + pool->align = align; + pool->boundary = boundary; + pool->hwdev = dev; + strncpy(pool->name, name, (sizeof(pool->name) - 1)); + + return pool; +} + +void dma_pool_destroy(struct dma_pool *pool) +{ + if (!pool) + return; + + if (rte_atomic32_read(&pool->inuse) != 0) { + PMD_DRV_LOG(ERR, "Leak memory, dma_pool: %s, inuse_count: %d", + pool->name, rte_atomic32_read(&pool->inuse)); + } + + rte_free(pool); +} + +void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr) +{ + void *buf; + + buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size, dma_addr, + (u32)pool->align, SOCKET_ID_ANY); + if (buf) + rte_atomic32_inc(&pool->inuse); + + return buf; +} + +void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma) +{ + rte_atomic32_dec(&pool->inuse); + hinic_dma_mem_free(pool->hwdev, pool->elem_size, vaddr, dma); +} + +#define HINIC_MAX_DMA_ENTRIES 8192 +int hinic_osdep_init(struct hinic_hwdev *hwdev) +{ + struct rte_hash_parameters dh_params = { 0 }; + struct rte_hash *paddr_hash = NULL; + + rte_atomic32_set(&hwdev->os_dep.dma_alloc_cnt, 0); + rte_spinlock_init(&hwdev->os_dep.dma_hash_lock); + + dh_params.name = hwdev->pcidev_hdl->name; + dh_params.entries = HINIC_MAX_DMA_ENTRIES; + dh_params.key_len = HINIC_HASH_KEY_LEN; + dh_params.hash_func = HINIC_HASH_FUNC; + dh_params.hash_func_init_val = HINIC_HASH_FUNC_INIT_VAL; + dh_params.socket_id = SOCKET_ID_ANY; + + paddr_hash = rte_hash_find_existing(dh_params.name); + if (paddr_hash == NULL) { + paddr_hash = rte_hash_create(&dh_params); + if (paddr_hash == NULL) { + PMD_DRV_LOG(ERR, "Create nic_dev phys_addr hash table failed"); + return -ENOMEM; + } + } else { + PMD_DRV_LOG(INFO, "Using existing dma hash table %s", + dh_params.name); + } + hwdev->os_dep.dma_addr_hash = paddr_hash; + + return 0; +} + +void hinic_osdep_deinit(struct hinic_hwdev *hwdev) +{ + uint32_t iter = 0; + dma_addr_t key_pa; + struct rte_memzone *data_mz = NULL; + struct rte_hash *paddr_hash = hwdev->os_dep.dma_addr_hash; + + if (paddr_hash) { + /* iterate through the hash table */ + while (rte_hash_iterate(paddr_hash, (const void **)&key_pa, + (void **)&data_mz, &iter) >= 0) { + if (data_mz) { + PMD_DRV_LOG(WARNING, "Free leaked dma_addr: %p, mz: %s", + (void *)key_pa, data_mz->name); + (void)rte_memzone_free(data_mz); + } + } + + /* free phys_addr hash table */ + rte_hash_free(paddr_hash); + } +} + +/** + * hinic_set_ci_table - set ci attribute table + * @hwdev: the hardware interface of a nic device + * @q_id: Queue id of SQ + * @attr: Point to SQ CI attribute table + * @return + * 0 on success and ci attribute table is filled, + * negative error value otherwise. + */ +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr) +{ + struct hinic_cons_idx_attr cons_idx_attr; + + memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); + cons_idx_attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + cons_idx_attr.func_idx = hinic_global_func_id(hwdev); + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.sq_id = q_id; + cons_idx_attr.ci_addr = attr->ci_dma_base; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + NULL, NULL, 0); +} + +/** + * hinic_set_pagesize - set page size to vat table + * @hwdev: the hardware interface of a nic device + * @page_size: vat page size + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_pagesize(void *hwdev, u8 page_size) +{ + struct hinic_page_size cmd; + + if (page_size > HINIC_PAGE_SIZE_MAX) { + PMD_DRV_LOG(ERR, "Invalid page_size %u, bigger than %u", + page_size, HINIC_PAGE_SIZE_MAX); + return -EINVAL; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + cmd.func_idx = hinic_global_func_id(hwdev); + cmd.ppf_idx = hinic_ppf_idx(hwdev); + cmd.page_size = page_size; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PAGESIZE_SET, + &cmd, sizeof(cmd), + NULL, NULL, 0); +} + +static int wait_for_flr_finish(struct hinic_hwif *hwif) +{ + unsigned long end; + enum hinic_pf_status status; + + end = jiffies + msecs_to_jiffies(HINIC_FLR_TIMEOUT); + do { + status = hinic_get_pf_status(hwif); + if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) { + return 0; + } + + rte_delay_ms(10); + } while (time_before(jiffies, end)); + + return -EFAULT; +} + +#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 1000 + +static int wait_cmdq_stop(struct hinic_hwdev *hwdev) +{ + enum hinic_cmdq_type cmdq_type; + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + unsigned long end; + int err = 0; + + if (!(cmdqs->status & HINIC_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + end = jiffies + msecs_to_jiffies(HINIC_WAIT_CMDQ_IDLE_TIMEOUT); + do { + err = 0; + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) { + err = -EBUSY; + break; + } + } + + if (!err) + return 0; + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + cmdqs->status |= HINIC_CMDQ_ENABLE; + + return err; +} + +static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_resource clr_res; + int err; + + err = wait_cmdq_stop(hwdev); + if (err) { + PMD_DRV_LOG(WARNING, "Cmdq is still working"); + return err; + } + + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif); + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, sizeof(clr_res)); + if (err) + PMD_DRV_LOG(WARNING, "Notice flush message failed"); + + /* + * PF firstly set VF doorbell flush csr to be disabled. After PF finish + * VF resources flush, PF will set VF doorbell flush csr to be enabled. + */ + err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL); + if (err) + PMD_DRV_LOG(WARNING, "Wait doorbell flush disable timeout"); + + err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL); + if (err) + PMD_DRV_LOG(WARNING, "Wait doorbell flush enable timeout"); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) + PMD_DRV_LOG(WARNING, "Reinit cmdq failed when vf flush"); + + return err; +} + +/** + * hinic_pf_rx_tx_flush - clean up hardware resource + * @hwdev: the hardware interface of a nic device + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_clear_doorbell clear_db; + struct hinic_clear_resource clr_res; + int err; + + rte_delay_ms(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Cmdq is still working"); + return err; + } + + hinic_disable_doorbell(hwif); + memset(&clear_db, 0, sizeof(clear_db)); + clear_db.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif); + clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), NULL, NULL, 0); + if (err) + PMD_DRV_LOG(WARNING, "Flush doorbell failed"); + + hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG); + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif); + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res), NULL, NULL); + if (err) + PMD_DRV_LOG(WARNING, "Notice flush message failed"); + + err = wait_for_flr_finish(hwif); + if (err) + PMD_DRV_LOG(WARNING, "Wait firmware FLR timeout"); + + hinic_enable_doorbell(hwif); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) + PMD_DRV_LOG(WARNING, "Reinit cmdq failed when pf flush"); + + return 0; +} + +int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF) + return hinic_vf_rx_tx_flush(hwdev); + else + return hinic_pf_rx_tx_flush(hwdev); +} + +/** + * hinic_get_interrupt_cfg - get interrupt configuration from NIC + * @hwdev: the hardware interface of a nic device + * @interrupt_info: Information of Interrupt aggregation + * Return: 0 on success, negative error value otherwise. + */ +static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hinic_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + msix_cfg.func_id = hinic_global_func_id(hwdev); + msix_cfg.msix_index = interrupt_info->msix_index; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Get interrupt config failed, ret: %d", + msix_cfg.mgmt_msg_head.status); + return -EINVAL; + } + + interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt; + interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt; + interrupt_info->pending_limt = msix_cfg.pending_cnt; + interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt; + interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + return 0; +} + +/** + * hinic_set_interrupt_cfg - set interrupt configuration to NIC + * @hwdev: the hardware interface of a nic device + * @interrupt_info: Information of Interrupt aggregation + * Return: 0 on success, negative error value otherwise. + */ +int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, + struct nic_interrupt_info interrupt_info) +{ + struct hinic_msix_config msix_cfg; + struct nic_interrupt_info temp_info; + u16 out_size = sizeof(msix_cfg); + int err; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + msix_cfg.func_id = hinic_global_func_id(hwdev); + msix_cfg.msix_index = (u16)interrupt_info.msix_index; + + temp_info.msix_index = interrupt_info.msix_index; + + err = hinic_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg; + msix_cfg.pending_cnt = temp_info.pending_limt; + msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg; + + if (interrupt_info.lli_set) { + msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg; + } + + if (interrupt_info.interrupt_coalesc_set) { + msix_cfg.pending_cnt = interrupt_info.pending_limt; + msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg; + } + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set interrupt config failed, ret: %d", + msix_cfg.mgmt_msg_head.status); + return -EINVAL; + } + + return 0; +} + +/** + * init_aeqs_msix_attr - Init interrupt attributes of aeq + * @hwdev: the hardware interface of a nic device + * @return + * 0 on success, + * negative error value otherwise. + */ +int init_aeqs_msix_attr(void *hwdev) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + struct hinic_aeqs *aeqs = nic_hwdev->aeqs; + struct nic_interrupt_info info = {0}; + struct hinic_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic_set_interrupt_cfg(hwdev, info); + if (err) { + PMD_DRV_LOG(ERR, "Set msix attr for aeq %d failed", + q_id); + return -EFAULT; + } + } + + return 0; +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwdev: the pointer to the private hardware device object + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + */ +static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx); + + val = hinic_hwif_read_reg(hwdev->hwif, addr); + val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) | + HINIC_DMA_ATTR_ENTRY_SET(at, AT) | + HINIC_DMA_ATTR_ENTRY_SET(ph, PH) | + HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hinic_hwif_write_reg(hwdev->hwif, addr, val); +} + +static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + struct hinic_vf_dma_attr_table attr; + + memset(&attr, 0, sizeof(attr)); + attr.func_idx = hinic_global_func_id(hwdev); + attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev); + attr.entry_idx = entry_idx; + attr.st = st; + attr.at = at; + attr.ph = ph; + attr.no_snooping = no_snooping; + attr.tph_en = tph_en; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_DMA_ATTR_SET, + &attr, sizeof(attr), NULL, NULL, 0); +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwdev: the pointer to the private hardware device object + */ +static int dma_attr_table_init(struct hinic_hwdev *hwdev) +{ + int err = 0; + + if (HINIC_IS_VF(hwdev)) + err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + else + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + + return err; +} + +/** + * hinic_init_attr_table - init dma and aeq msix attribute table + * @hwdev: the pointer to the private hardware device object + */ +int hinic_init_attr_table(struct hinic_hwdev *hwdev) +{ + int err; + + err = dma_attr_table_init(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Initialize dma attribute table failed, err: %d", + err); + return err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Initialize aeqs msix attribute failed, err: %d", + err); + return err; + } + + return 0; +} + +#define FAULT_SHOW_STR_LEN 16 +static void fault_report_show(struct hinic_hwdev *hwdev, + struct hinic_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout"}; + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char type_str[FAULT_SHOW_STR_LEN + 1] = { 0 }; + char level_str[FAULT_SHOW_STR_LEN + 1] = { 0 }; + u8 err_level; + + PMD_DRV_LOG(WARNING, "Fault event report received, func_id: %d", + hinic_global_func_id(hwdev)); + + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN); + else + strncpy(type_str, "unknown", FAULT_SHOW_STR_LEN); + PMD_DRV_LOG(WARNING, "fault type: %d [%s]", + event->type, type_str); + PMD_DRV_LOG(WARNING, "fault val[0]: 0x%08x", + event->event.val[0]); + PMD_DRV_LOG(WARNING, "fault val[1]: 0x%08x", + event->event.val[1]); + PMD_DRV_LOG(WARNING, "fault val[2]: 0x%08x", + event->event.val[2]); + PMD_DRV_LOG(WARNING, "fault val[3]: 0x%08x", + event->event.val[3]); + + switch (event->type) { + case FAULT_TYPE_CHIP: + err_level = event->event.chip.err_level; + if (err_level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[err_level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "unknown", + FAULT_SHOW_STR_LEN); + + PMD_DRV_LOG(WARNING, "err_level: %d [%s]", + err_level, level_str); + + if (err_level == FAULT_LEVEL_SERIOUS_FLR) { + PMD_DRV_LOG(WARNING, "flr func_id: %d", + event->event.chip.func_id); + } else { + PMD_DRV_LOG(WARNING, "node_id: %d", + event->event.chip.node_id); + PMD_DRV_LOG(WARNING, "err_type: %d", + event->event.chip.err_type); + PMD_DRV_LOG(WARNING, "err_csr_addr: %d", + event->event.chip.err_csr_addr); + PMD_DRV_LOG(WARNING, "err_csr_value: %d", + event->event.chip.err_csr_value); + } + break; + case FAULT_TYPE_UCODE: + PMD_DRV_LOG(WARNING, "cause_id: %d", + event->event.ucode.cause_id); + PMD_DRV_LOG(WARNING, "core_id: %d", + event->event.ucode.core_id); + PMD_DRV_LOG(WARNING, "c_id: %d", + event->event.ucode.c_id); + PMD_DRV_LOG(WARNING, "epc: %d", + event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + PMD_DRV_LOG(WARNING, "err_csr_ctrl: %d", + event->event.mem_timeout.err_csr_ctrl); + PMD_DRV_LOG(WARNING, "err_csr_data: %d", + event->event.mem_timeout.err_csr_data); + PMD_DRV_LOG(WARNING, "ctrl_tab: %d", + event->event.mem_timeout.ctrl_tab); + PMD_DRV_LOG(WARNING, "mem_index: %d", + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + PMD_DRV_LOG(WARNING, "err_csr: %d", + event->event.reg_timeout.err_csr); + break; + default: + break; + } +} + +static int resources_state_set(struct hinic_hwdev *hwdev, + enum hinic_res_state state) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_cmd_set_res_state res_state; + + memset(&res_state, 0, sizeof(res_state)); + res_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif); + res_state.state = state; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), NULL, NULL, 0); +} + +/** + * hinic_activate_hwdev_state - Active host nic state and notify mgmt channel + * that host nic is ready. + * @hwdev: the hardware interface of a nic device + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev) +{ + int rc = HINIC_OK; + + if (!hwdev) + return -EINVAL; + + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + + rc = resources_state_set(hwdev, HINIC_RES_ACTIVE); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize resources state failed"); + return rc; + } + + return 0; +} + +/** + * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt + * channel that host nic is not ready. + * @hwdev: the pointer to the private hardware device object + */ +void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev) +{ + int rc = HINIC_OK; + + if (!hwdev) + return; + + rc = resources_state_set(hwdev, HINIC_RES_CLEAN); + if (rc) + PMD_DRV_LOG(ERR, "Deinit resources state failed"); + + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); +} + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info) +{ + struct hinic_comm_board_info board_info; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&board_info, 0, sizeof(board_info)); + board_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, 0); + if (err || board_info.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x", + err, board_info.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + memcpy(info, &board_info.info, sizeof(*info)); + return 0; +} + +/** + * hinic_l2nic_reset - Restore the initial state of NIC + * @hwdev: the hardware interface of a nic device + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_l2nic_reset(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_l2nic_reset l2nic_reset; + int err = 0; + + err = hinic_set_vport_enable(hwdev, false); + if (err) { + PMD_DRV_LOG(ERR, "Set vport disable failed"); + return err; + } + + rte_delay_ms(100); + + memset(&l2nic_reset, 0, sizeof(l2nic_reset)); + l2nic_reset.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_RESET, + &l2nic_reset, sizeof(l2nic_reset), + NULL, NULL, 0); + if (err || l2nic_reset.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Reset L2NIC resources failed"); + return -EFAULT; + } + + return 0; +} + +static void +hinic_show_sw_watchdog_timeout_info(void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_mgmt_watchdog_info *watchdog_info; + u32 *dump_addr, *reg, stack_len, i, j; + + if (in_size != sizeof(*watchdog_info)) { + PMD_DRV_LOG(ERR, "Invalid mgmt watchdog report, length: %d, should be %zu", + in_size, sizeof(*watchdog_info)); + return; + } + + watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_in; + + PMD_DRV_LOG(ERR, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + PMD_DRV_LOG(ERR, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, + watchdog_info->stack_bottom); + + PMD_DRV_LOG(ERR, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr: 0x%08x", + watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr); + + PMD_DRV_LOG(ERR, "Mgmt register info"); + + for (i = 0; i < 3; i++) { + reg = watchdog_info->reg + (u64)(u32)(4 * i); + PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x", + *(reg), *(reg + 1), *(reg + 2), *(reg + 3)); + } + + PMD_DRV_LOG(ERR, "0x%08x", watchdog_info->reg[12]); + + if (watchdog_info->stack_actlen <= 1024) { + stack_len = watchdog_info->stack_actlen; + } else { + PMD_DRV_LOG(ERR, "Oops stack length: 0x%x is wrong", + watchdog_info->stack_actlen); + stack_len = 1024; + } + + PMD_DRV_LOG(ERR, "Mgmt dump stack, 16Bytes per line(start from sp)"); + for (i = 0; i < (stack_len / 16); i++) { + dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16))); + PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x", + *dump_addr, *(dump_addr + 1), *(dump_addr + 2), + *(dump_addr + 3)); + } + + for (j = 0; j < ((stack_len % 16) / 4); j++) { + dump_addr = (u32 *)(watchdog_info->data + + ((u64)(u32)(i * 16 + j * 4))); + PMD_DRV_LOG(ERR, "0x%08x", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_out; + watchdog_info->mgmt_msg_head.status = 0; +} + +static void hinic_show_pcie_dfx_info(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_pcie_dfx_ntc *notice_info = + (struct hinic_pcie_dfx_ntc *)buf_in; + struct hinic_pcie_dfx_info dfx_info; + u16 size = 0; + u16 cnt = 0; + u32 num = 0; + u32 i, j; + int err; + u32 *reg; + + if (in_size != sizeof(*notice_info)) { + PMD_DRV_LOG(ERR, "Invalid pcie dfx notice info, length: %d, should be %zu.", + in_size, sizeof(*notice_info)); + return; + } + + ((struct hinic_pcie_dfx_ntc *)buf_out)->mgmt_msg_head.status = 0; + *out_size = sizeof(*notice_info); + memset(&dfx_info, 0, sizeof(dfx_info)); + num = (u32)(notice_info->len / 1024); + PMD_DRV_LOG(INFO, "INFO LEN: %d", notice_info->len); + PMD_DRV_LOG(INFO, "PCIE DFX:"); + dfx_info.host_id = 0; + dfx_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + for (i = 0; i < num; i++) { + dfx_info.offset = i * MAX_PCIE_DFX_BUF_SIZE; + if (i == (num - 1)) + dfx_info.last = 1; + size = sizeof(dfx_info); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PCIE_DFX_GET, + &dfx_info, sizeof(dfx_info), + &dfx_info, &size, 0); + if (err || dfx_info.mgmt_msg_head.status || !size) { + PMD_DRV_LOG(ERR, "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x", + err, dfx_info.mgmt_msg_head.status, size); + return; + } + + reg = (u32 *)dfx_info.data; + for (j = 0; j < 256; j = j + 8) { + PMD_DRV_LOG(ERR, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", + cnt, reg[j], reg[(u32)(j + 1)], + reg[(u32)(j + 2)], reg[(u32)(j + 3)], + reg[(u32)(j + 4)], reg[(u32)(j + 5)], + reg[(u32)(j + 6)], reg[(u32)(j + 7)]); + cnt = cnt + 32; + } + memset(dfx_info.data, 0, MAX_PCIE_DFX_BUF_SIZE); + } +} + +static void +hinic_show_ffm_info(struct hinic_hwdev *hwdev, void *buf_in, u16 in_size) +{ + struct ffm_intr_info *intr; + + if (in_size != sizeof(struct ffm_intr_info)) { + PMD_DRV_LOG(ERR, "Invalid input buffer len, length: %d, should be %zu.", + in_size, sizeof(struct ffm_intr_info)); + return; + } + + if (hwdev->ffm_num < FFM_RECORD_NUM_MAX) { + hwdev->ffm_num++; + intr = (struct ffm_intr_info *)buf_in; + PMD_DRV_LOG(WARNING, "node_id(%d),err_csr_addr(0x%x),err_csr_val(0x%x),err_level(0x%x),err_type(0x%x)", + intr->node_id, + intr->err_csr_addr, + intr->err_csr_value, + intr->err_level, + intr->err_type); + } +} + +void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_fault_event *fault_event, *ret_fault_event; + + if (!hwdev) + return; + + *out_size = 0; + + switch (cmd) { + case HINIC_MGMT_CMD_FAULT_REPORT: + if (in_size != sizeof(*fault_event)) { + PMD_DRV_LOG(ERR, "Invalid fault event report, length: %d, should be %zu", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = (struct hinic_cmd_fault_event *)buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (hinic_func_type(hwdev) != TYPE_VF) { + ret_fault_event = + (struct hinic_cmd_fault_event *)buf_out; + ret_fault_event->mgmt_msg_head.status = 0; + *out_size = sizeof(*ret_fault_event); + } + break; + + case HINIC_MGMT_CMD_WATCHDOG_INFO: + hinic_show_sw_watchdog_timeout_info(buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_MGMT_CMD_PCIE_DFX_NTC: + hinic_show_pcie_dfx_info(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_MGMT_CMD_FFM_SET: + hinic_show_ffm_info(hwdev, buf_in, in_size); + break; + + default: + break; + } +} + +static void +hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cable_plug_event *plug_event; + struct hinic_link_err_event *link_err; + + if (cmd == HINIC_PORT_CMD_CABLE_PLUG_EVENT) { + plug_event = (struct hinic_cable_plug_event *)buf_in; + PMD_DRV_LOG(INFO, "Port module event: Cable %s", + plug_event->plugged ? "plugged" : "unplugged"); + + *out_size = sizeof(*plug_event); + plug_event = (struct hinic_cable_plug_event *)buf_out; + plug_event->mgmt_msg_head.status = 0; + } else if (cmd == HINIC_PORT_CMD_LINK_ERR_EVENT) { + link_err = (struct hinic_link_err_event *)buf_in; + if (link_err->err_type >= LINK_ERR_NUM) { + PMD_DRV_LOG(ERR, "Link failed, Unknown type: 0x%x", + link_err->err_type); + } else { + PMD_DRV_LOG(INFO, "Link failed, type: 0x%x: %s", + link_err->err_type, + hinic_module_link_err[link_err->err_type]); + } + + *out_size = sizeof(*link_err); + link_err = (struct hinic_link_err_event *)buf_out; + link_err->mgmt_msg_head.status = 0; + } +} + +static int hinic_link_event_process(struct hinic_hwdev *hwdev, + struct rte_eth_dev *eth_dev, u8 status) +{ + uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M, + ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G, + ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G, + ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G}; + struct nic_port_info port_info; + struct rte_eth_link link; + int rc = HINIC_OK; + + if (!status) { + link.link_status = ETH_LINK_DOWN; + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_FIXED; + } else { + link.link_status = ETH_LINK_UP; + + memset(&port_info, 0, sizeof(port_info)); + rc = hinic_get_port_info(hwdev, &port_info); + if (rc) { + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = ETH_LINK_FIXED; + } else { + link.link_speed = port_speed[port_info.speed % + LINK_SPEED_MAX]; + link.link_duplex = port_info.duplex; + link.link_autoneg = port_info.autoneg_state; + } + } + (void)rte_eth_linkstatus_set(eth_dev, &link); + + return rc; +} + +static void hinic_lsc_process(struct hinic_hwdev *hwdev, + struct rte_eth_dev *rte_dev, u8 status) +{ + int ret; + + ret = hinic_link_event_process(hwdev, rte_dev, status); + /* check if link has changed, notify callback */ + if (ret == 0) + _rte_eth_dev_callback_process(rte_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); +} + +void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev, + void *param, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_port_link_status *in_link; + struct rte_eth_dev *eth_dev; + + if (!hwdev) + return; + + *out_size = 0; + + switch (cmd) { + case HINIC_PORT_CMD_LINK_STATUS_REPORT: + eth_dev = param; + in_link = (struct hinic_port_link_status *)buf_in; + PMD_DRV_LOG(INFO, "Link status event report, dev_name: %s, port_id: %d, link_status: %s", + eth_dev->data->name, eth_dev->data->port_id, + in_link->link ? "UP" : "DOWN"); + + hinic_lsc_process(hwdev, eth_dev, in_link->link); + break; + + case HINIC_PORT_CMD_CABLE_PLUG_EVENT: + case HINIC_PORT_CMD_LINK_ERR_EVENT: + hinic_cable_status_event(cmd, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_PORT_CMD_MGMT_RESET: + PMD_DRV_LOG(WARNING, "Mgmt is reset"); + break; + + default: + PMD_DRV_LOG(ERR, "Unsupported event %d to process", + cmd); + break; + } +} + +static void print_cable_info(struct hinic_link_info *info) +{ + char tmp_str[512] = {0}; + char tmp_vendor[17] = {0}; + const char *port_type = "Unknown port type"; + int i; + + if (info->cable_absent) { + PMD_DRV_LOG(INFO, "Cable unpresent"); + return; + } + + if (info->port_type < LINK_PORT_MAX_TYPE) + port_type = __hw_to_char_port_type[info->port_type]; + else + PMD_DRV_LOG(INFO, "Unknown port type: %u", + info->port_type); + if (info->port_type == LINK_PORT_FIBRE) { + if (info->port_sub_type == FIBRE_SUBTYPE_SR) + port_type = "Fibre-SR"; + else if (info->port_sub_type == FIBRE_SUBTYPE_LR) + port_type = "Fibre-LR"; + } + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name)); + snprintf(tmp_str, sizeof(tmp_str), + "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type, + info->cable_length, info->cable_max_speed); + if (info->port_type != LINK_PORT_COPPER) + snprintf(tmp_str + strlen(tmp_str), + sizeof(tmp_str) - strlen(tmp_str), + ", Temperature: %u", info->cable_temp); + + PMD_DRV_LOG(INFO, "Cable information: %s", tmp_str); +} + +static void print_hi30_status(struct hinic_link_info *info) +{ + struct hi30_ffe_data *ffe_data; + struct hi30_ctle_data *ctle_data; + + ffe_data = (struct hi30_ffe_data *)info->hi30_ffe; + ctle_data = (struct hi30_ctle_data *)info->hi30_ctle; + + PMD_DRV_LOG(INFO, "TX_FFE: PRE2=%s%d; PRE1=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d", + (ffe_data->PRE1 & 0x10) ? "-" : "", + (int)(ffe_data->PRE1 & 0xf), + (ffe_data->PRE2 & 0x10) ? "-" : "", + (int)(ffe_data->PRE2 & 0xf), + (int)ffe_data->MAIN, + (ffe_data->POST1 & 0x10) ? "-" : "", + (int)(ffe_data->POST1 & 0xf), + (ffe_data->POST2 & 0x10) ? "-" : "", + (int)(ffe_data->POST2 & 0xf)); + PMD_DRV_LOG(INFO, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u", + ctle_data->ctlebst[0], ctle_data->ctlebst[1], + ctle_data->ctlebst[2], ctle_data->ctlecmband[0], + ctle_data->ctlecmband[1], ctle_data->ctlecmband[2], + ctle_data->ctlermband[0], ctle_data->ctlermband[1], + ctle_data->ctlermband[2], ctle_data->ctleza[0], + ctle_data->ctleza[1], ctle_data->ctleza[2]); +} + +static void print_link_info(struct hinic_link_info *info, + enum hilink_info_print_event type) +{ + const char *fec = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = __hw_to_char_fec[info->fec]; + else + PMD_DRV_LOG(INFO, "Unknown fec type: %u", + info->fec); + + if (type == HILINK_EVENT_LINK_UP || !info->an_state) { + PMD_DRV_LOG(INFO, "Link information: speed %dGbps, %s, autoneg %s", + info->speed, fec, info->an_state ? "on" : "off"); + } else { + PMD_DRV_LOG(INFO, "Link information: antoneg: %s", + info->an_state ? "on" : "off"); + } +} + +static const char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = { + "", "link up", "link down", "cable plugged" +}; + +static void hinic_print_hilink_info(void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hilink_link_info *hilink_info = + (struct hinic_hilink_link_info *)buf_in; + struct hinic_link_info *info; + enum hilink_info_print_event type; + + if (in_size != sizeof(*hilink_info)) { + PMD_DRV_LOG(ERR, "Invalid hilink info message size %d, should be %zu", + in_size, sizeof(*hilink_info)); + return; + } + + ((struct hinic_hilink_link_info *)buf_out)->mgmt_msg_head.status = 0; + *out_size = sizeof(*hilink_info); + + info = &hilink_info->info; + type = hilink_info->info_type; + + if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) { + PMD_DRV_LOG(INFO, "Invalid hilink info report, type: %d", + type); + return; + } + + PMD_DRV_LOG(INFO, "Hilink info report after %s", + hilink_info_report_type[type]); + + print_cable_info(info); + + print_link_info(info, type); + + print_hi30_status(info); + + if (type == HILINK_EVENT_LINK_UP) + return; + + if (type == HILINK_EVENT_CABLE_PLUGGED) { + PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u", + info->alos, info->rx_los); + return; + } + + PMD_DRV_LOG(INFO, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug inforeg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x", + info->pma_status ? "on" : "off", + info->mac_tx_en ? "enable" : "disable", + info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg, + info->pma_signal_ok_reg, info->rf_lf_status_reg); + PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x,PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x", + info->alos, info->rx_los, info->pcs_err_blk_cnt_reg, + info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt); +} + +void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + if (!hwdev) + return; + + *out_size = 0; + + switch (cmd) { + case HINIC_HILINK_CMD_GET_LINK_INFO: + hinic_print_hilink_info(buf_in, in_size, buf_out, + out_size); + break; + + default: + PMD_DRV_LOG(ERR, "Unsupported event %d to process", + cmd); + break; + } +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h new file mode 100644 index 000000000..d6896b3f1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.h @@ -0,0 +1,491 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_HWDEV_H_ +#define _HINIC_PMD_HWDEV_H_ + +#include "hinic_pmd_cmd.h" + +#define HINIC_PAGE_SIZE_MAX 20 + +#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF +#define HINIC_PF_SET_VF_ALREADY 0x4 + +#define MAX_PCIE_DFX_BUF_SIZE 1024 + +#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE + +/* dma pool */ +struct dma_pool { + rte_atomic32_t inuse; + size_t elem_size; + size_t align; + size_t boundary; + void *hwdev; + + char name[32]; +}; + +enum hinic_res_state { + HINIC_RES_CLEAN = 0, + HINIC_RES_ACTIVE = 1, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +struct hinic_port_link_status { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link; + u8 port_id; +}; + +enum link_err_status { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +struct hinic_cable_plug_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct hinic_link_err_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + +struct hinic_cons_idx_attr { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 sq_id; + u64 ci_addr; +}; + +struct hinic_clear_doorbell { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_clear_resource { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_cmd_set_res_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +struct hinic_l2nic_reset { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_page_size { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 ppf_idx; + u8 page_size; + u32 rsvd; +}; + +struct hinic_msix_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesct_timer_cnt; + u8 lli_tmier_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + +/* defined by chip */ +enum hinic_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_MAX, +}; + +/* defined by chip */ +enum hinic_fault_err_level { + /* default err_level=FAULT_LEVEL_FATAL if + * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT || + * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT || + * FAULT_TYPE_UCODE + * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP + */ + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +/* defined by chip */ +struct hinic_fault_event { + /* enum hinic_fault_type */ + u8 type; + u8 rsvd0[3]; + union { + u32 val[4]; + /* valid only type==FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum hinic_fault_err_level */ + u8 err_level; + u8 err_type; + u8 rsvd1; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only type==FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + } event; +}; + +struct hinic_cmd_fault_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_fault_event event; +}; + +struct hinic_mgmt_watchdog_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + +struct hinic_pcie_dfx_ntc { + struct hinic_mgmt_msg_head mgmt_msg_head; + + int len; + u32 rsvd; +}; + +struct hinic_pcie_dfx_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 host_id; + u8 last; + u8 rsvd[2]; + u32 offset; + + u8 data[MAX_PCIE_DFX_BUF_SIZE]; +}; + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +struct hinic_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; +}; + +struct hinic_comm_board_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_board_info info; + + u32 rsvd1[5]; +}; + +struct hi30_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; +}; + +struct hi30_ffe_data { + u8 PRE2; + u8 PRE1; + u8 POST1; + u8 POST2; + u8 MAIN; +}; + +enum hilink_fec_type { + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_MAX_TYPE, +}; + +enum hinic_link_port_type { + LINK_PORT_FIBRE = 1, + LINK_PORT_ELECTRIC, + LINK_PORT_COPPER, + LINK_PORT_AOC, + LINK_PORT_BACKPLANE, + LINK_PORT_BASET, + LINK_PORT_MAX_TYPE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +struct hinic_link_info { + u8 vendor_name[16]; + /* port type: + * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane; + * 6 - baseT; 0xffff - unknown + * + * port subtype: + * Only when port_type is fiber: + * 1 - SR; 2 - LR + */ + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */ + u8 sfp_type; /* 0 - qsfp; 1 - sfp */ + u8 rsvd0; + u32 power[4]; /* uW; if is sfp, only power[2] is valid */ + + u8 an_state; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u16 speed; /* 1(G)/10(G)/25(G)... */ + + u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */ + u8 alos; /* 0 - yes; 1 - no */ + u8 rx_los; /* 0 - yes; 1 - no */ + u8 pma_status; + u32 pma_dbg_info_reg; /* pma debug info: */ + u32 pma_signal_ok_reg; /* signal ok: */ + + u32 pcs_err_blk_cnt_reg; /* error block counter: */ + u32 rf_lf_status_reg; /* RF/LF status: */ + u8 pcs_link_reg; /* pcs link: */ + u8 mac_link_reg; /* mac link: */ + u8 mac_tx_en; + u8 mac_rx_en; + u32 pcs_err_cnt; + + u8 lane_used; + u8 hi30_ffe[5]; + u8 hi30_ctle[19]; + u8 hi30_dfe[14]; + u8 rsvd4; +}; + +struct hinic_hilink_link_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 port_id; + u8 info_type; /* 1: link up 2: link down 3 cable plugged */ + u8 rsvd1; + + struct hinic_link_info info; + + u8 rsvd2[780]; +}; + +/* dma os dependency implementation */ +struct hinic_os_dep { + /* kernel dma alloc api */ + rte_atomic32_t dma_alloc_cnt; + rte_spinlock_t dma_hash_lock; + struct rte_hash *dma_addr_hash; +}; + +struct nic_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +struct hinic_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + /* bit[63:2] is addr's high 62bit, bit[0] is valid flag */ + u64 ci_dma_base; +}; + +struct hinic_hwdev { + struct rte_pci_device *pcidev_hdl; + u32 ffm_num; + + /* dma memory allocator */ + struct hinic_os_dep os_dep; + struct hinic_hwif *hwif; + struct cfg_mgmt_info *cfg_mgmt; + struct hinic_aeqs *aeqs; + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_cmdqs *cmdqs; + struct hinic_nic_io *nic_io; +}; + +int hinic_osdep_init(struct hinic_hwdev *hwdev); + +void hinic_osdep_deinit(struct hinic_hwdev *hwdev); + +void dma_free_coherent_volatile(void *hwdev, size_t size, + volatile void *virt, dma_addr_t phys); + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info); + +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr); + +int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev); + +int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, + struct nic_interrupt_info interrupt_info); + +int init_aeqs_msix_attr(void *hwdev); + +void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev, void *param, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic_init_attr_table(struct hinic_hwdev *hwdev); + +int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev); + +void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev); + +int hinic_l2nic_reset(struct hinic_hwdev *hwdev); + +int hinic_set_pagesize(void *hwdev, u8 page_size); + +void hinic_cpu_to_be32(void *data, u32 len); + +void hinic_be32_to_cpu(void *data, u32 len); + +#endif /* _HINIC_PMD_HWDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c new file mode 100644 index 000000000..4578b689d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.c @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" + +#define HINIC_CFG_REGS_BAR 0 +#define HINIC_INTR_MSI_BAR 2 +#define HINIC_DB_MEM_BAR 4 + +#define PAGE_SIZE_4K 0x1000 +#define PAGE_SIZE_64K 0x10000 + +#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 +#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HINIC_MSIX_CNT_SET(val, member) \ + (((val) & HINIC_MSIX_CNT_##member##_MASK) << \ + HINIC_MSIX_CNT_##member##_SHIFT) + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to the private hardware device object + * Return: 0 - success, negative - failure + */ +static int hwif_ready(struct hinic_hwdev *hwdev) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); + if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS)) + return -EBUSY; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwdev->hwif, addr); + if ((HINIC_AF0_GET(attr0, FUNC_TYPE) == TYPE_VF) && + !HINIC_AF1_GET(attr1, PF_INIT_STATUS)) + return -EBUSY; + + return 0; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + */ +static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2) +{ + hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX); + + hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2, + GLOBAL_VF_ID_OF_PF); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + */ +static void get_hwif_attr(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR2_ADDR; + attr2 = hinic_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1, attr2); +} + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status) +{ + u32 attr5 = HINIC_AF5_SET(status, PF_STATUS); + u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR; + + if (hwif->attr.func_type == TYPE_VF) { + PMD_DRV_LOG(INFO, "VF doesn't support to set attr5"); + return; + } + + hinic_hwif_write_reg(hwif, addr, attr5); +} + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif) +{ + u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); + + return HINIC_AF5_GET(attr5, PF_STATUS); +} + +static enum hinic_doorbell_ctrl +hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, DOORBELL_CTRL); +} + +static enum hinic_outbound_ctrl +hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, OUTBOUND_CTRL); +} + +void hinic_enable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_disable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + */ +static void set_ppf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_PPF_ELECTION_ADDR; + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hinic_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hinic_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +static void init_db_area_idx(struct hinic_hwif *hwif) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas = hwif->db_max_areas; + u32 i; + + for (i = 0; i < db_max_areas; i++) + free_db_area->db_idx[i] = i; + + free_db_area->alloc_pos = 0; + free_db_area->return_pos = 0; + + free_db_area->num_free = db_max_areas; + + spin_lock_init(&free_db_area->idx_lock); +} + +static int get_db_idx(struct hinic_hwif *hwif, u32 *idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + + if (free_db_area->num_free == 0) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + + free_db_area->num_free--; + + pos = free_db_area->alloc_pos++; + pos &= (hwif->db_max_areas - 1); + + pg_idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = 0xFFFFFFFF; + + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic_hwif *hwif, u32 idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + + spin_lock(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= (hwif->db_max_areas - 1); + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + spin_unlock(&free_db_area->idx_lock); +} + +void hinic_free_db_addr(void *hwdev, void __iomem *db_base) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 idx = DB_IDX(db_base, hwif->db_base); + + free_db_idx(hwif, idx); +} + +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 idx; + int err; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE; + + return 0; +} + +void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag) +{ + struct hinic_hwdev *hw = hwdev; + struct hinic_hwif *hwif = hw->hwif; + u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + /* vfio-pci does not mmap msi-x vector table to user space, + * we can not access the space when kernel driver is vfio-pci + */ + if (hw->pcidev_hdl->kdrv == RTE_KDRV_VFIO) + return; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} + +static void disable_all_msix(struct hinic_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE); +} + +/** + * Wait for up enable or disable doorbell flush finished. + * @hwif: the hardware interface of a pci function device. + * @states: Disable or Enable. + */ +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states) +{ + unsigned long end; + enum hinic_doorbell_ctrl db_ctrl; + + end = jiffies + + msecs_to_jiffies(HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT); + do { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + if (db_ctrl == states) + return 0; + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + return -EFAULT; +} + +static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif) +{ + unsigned long end; + enum hinic_doorbell_ctrl db_ctrl; + enum hinic_outbound_ctrl outbound_ctrl; + + end = jiffies + + msecs_to_jiffies(HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT); + do { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && + db_ctrl == ENABLE_DOORBELL) + return 0; + + rte_delay_ms(1); + } while (time_before(jiffies, end)); + + return -EFAULT; +} + +u16 hinic_global_func_id(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} + +enum func_type hinic_func_type(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} + +u8 hinic_ppf_idx(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} + +/** + * hinic_dma_attr_entry_num - get number id of DMA attribute table. + * @hwdev: the pointer to the private hardware device object. + * Return: The number id of DMA attribute table. + */ +u8 hinic_dma_attr_entry_num(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + return hwif->attr.num_dma_attr; +} + +/** + * hinic_init_hwif - initialize the hw interface + * @hwdev: the pointer to the private hardware device object + * @cfg_reg_base: base physical address of configuration registers + * @intr_reg_base: base physical address of msi-x vector table + * @db_base_phy: base physical address of doorbell registers + * @db_base: base virtual address of doorbell registers + * @dwqe_mapping: direct wqe io mapping address + * Return: 0 - success, negative - failure + */ +static int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, __rte_unused void *dwqe_mapping) +{ + struct hinic_hwif *hwif; + struct rte_pci_device *pci_dev; + u64 db_bar_len; + int err; + + pci_dev = (struct rte_pci_device *)(hwdev->pcidev_hdl); + db_bar_len = pci_dev->mem_resource[HINIC_DB_MEM_BAR].len; + + hwif = hwdev->hwif; + + hwif->cfg_regs_base = (u8 __iomem *)cfg_reg_base; + hwif->intr_regs_base = (u8 __iomem *)intr_reg_base; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = (u8 __iomem *)db_base; + hwif->db_max_areas = db_bar_len / HINIC_DB_PAGE_SIZE; + if (hwif->db_max_areas > HINIC_DB_MAX_AREAS) + hwif->db_max_areas = HINIC_DB_MAX_AREAS; + + init_db_area_idx(hwif); + + get_hwif_attr(hwif); + + err = hwif_ready(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Hwif is not ready"); + goto hwif_ready_err; + } + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + PMD_DRV_LOG(ERR, "Hw doorbell/outbound is disabled"); + goto hwif_ready_err; + } + + if (!HINIC_IS_VF(hwdev)) + set_ppf(hwif); + + /* disable mgmt cpu report any event */ + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + return 0; + +hwif_ready_err: + spin_lock_deinit(&hwif->free_db_area.idx_lock); + + return err; +} + +#define HINIC_HWIF_ATTR_REG_PRINT_NUM (6) +#define HINIC_HWIF_APICMD_REG_PRINT_NUM (2) +#define HINIC_HWIF_EQ_REG_PRINT_NUM (2) + +static void hinic_parse_hwif_attr(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + + PMD_DRV_LOG(INFO, "Device %s hwif attribute:", hwdev->pcidev_hdl->name); + PMD_DRV_LOG(INFO, "func_idx: %u, p2p_idx: %u, pciintf_idx: %u, " + "vf_in_pf: %u, ppf_idx: %u, global_vf_id: %u, func_type: %u", + hwif->attr.func_global_idx, + hwif->attr.port_to_port_idx, hwif->attr.pci_intf_idx, + hwif->attr.vf_in_pf, hwif->attr.ppf_idx, + hwif->attr.global_vf_id_of_pf, hwif->attr.func_type); + PMD_DRV_LOG(INFO, "num_aeqs:%u, num_ceqs:%u, num_irqs:%u, dma_attr:%u", + hwif->attr.num_aeqs, hwif->attr.num_ceqs, + hwif->attr.num_irqs, hwif->attr.num_dma_attr); +} + +static void hinic_get_mmio(struct hinic_hwdev *hwdev, void **cfg_regs_base, + void **intr_base, void **db_base) +{ + struct rte_pci_device *pci_dev = hwdev->pcidev_hdl; + uint64_t bar0_size; + uint64_t bar2_size; + uint64_t bar0_phy_addr; + uint64_t pagesize = sysconf(_SC_PAGESIZE); + + *cfg_regs_base = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].addr; + *intr_base = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].addr; + *db_base = pci_dev->mem_resource[HINIC_DB_MEM_BAR].addr; + + bar0_size = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].len; + bar2_size = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].len; + + if (pagesize == PAGE_SIZE_64K && (bar0_size % pagesize != 0)) { + bar0_phy_addr = + pci_dev->mem_resource[HINIC_CFG_REGS_BAR].phys_addr; + if (bar0_phy_addr % pagesize != 0 && + (bar0_size + bar2_size <= pagesize) && + bar2_size >= bar0_size) { + *cfg_regs_base = (void *)((uint8_t *)(*intr_base) + + bar2_size); + } + } +} + +void hinic_hwif_res_free(struct hinic_hwdev *hwdev) +{ + rte_free(hwdev->hwif); + hwdev->hwif = NULL; +} + +int hinic_hwif_res_init(struct hinic_hwdev *hwdev) +{ + int err = HINIC_ERROR; + void *cfg_regs_base, *db_base, *intr_base = NULL; + + /* hinic related init */ + hwdev->hwif = rte_zmalloc("hinic_hwif", sizeof(*hwdev->hwif), + RTE_CACHE_LINE_SIZE); + if (!hwdev->hwif) { + PMD_DRV_LOG(ERR, "Allocate hwif failed, dev_name: %s", + hwdev->pcidev_hdl->name); + return -ENOMEM; + } + + hinic_get_mmio(hwdev, &cfg_regs_base, &intr_base, &db_base); + + err = hinic_init_hwif(hwdev, cfg_regs_base, + intr_base, 0, db_base, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s", + hwdev->pcidev_hdl->name); + goto init_hwif_err; + } + + /* disable msix interrupt in hw device */ + disable_all_msix(hwdev); + + /* print hwif attributes */ + hinic_parse_hwif_attr(hwdev); + + return HINIC_OK; + +init_hwif_err: + rte_free(hwdev->hwif); + hwdev->hwif = NULL; + + return err; +} + +/** + * hinic_misx_intr_clear_resend_bit - clear interrupt resend configuration + * @hwdev: the hardware interface of a nic device + * @msix_idx: Index of msix interrupt + * @clear_resend_en: enable flag of clear resend configuration + */ +void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 msix_ctrl = 0, addr; + + msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h new file mode 100644 index 000000000..de99507ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwif.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_HWIF_H_ +#define _HINIC_PMD_HWIF_H_ + +#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 30000 + +#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF) +#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF) +#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF) + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, +}; + +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, +}; + +/* Defines the IRQ information structure */ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +struct hinic_free_db_area { + u32 db_idx[HINIC_DB_MAX_AREAS]; + + u32 num_free; + + u32 alloc_pos; + u32 return_pos; + /* spinlock for idx */ + spinlock_t idx_lock; +}; + +struct hinic_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u8 num_dma_attr; /* max: 2 ^ 6 */ + + u16 global_vf_id_of_pf; +}; + +struct hinic_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u64 db_base_phy; + u8 __iomem *db_base; + u64 db_max_areas; + struct hinic_free_db_area free_db_area; + struct hinic_func_attr attr; +}; + +static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_base + reg)); +} + +static inline void +hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_base + reg); +} + +u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */ + +enum func_type hinic_func_type(void *hwdev); + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status); + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif); + +void hinic_enable_doorbell(struct hinic_hwif *hwif); + +void hinic_disable_doorbell(struct hinic_hwif *hwif); + +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base); + +void hinic_free_db_addr(void *hwdev, void __iomem *db_base); + +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states); + +void hinic_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic_msix_state flag); + +void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en); + +u8 hinic_ppf_idx(void *hwdev); + +int hinic_hwif_res_init(struct hinic_hwdev *hwdev); + +void hinic_hwif_res_free(struct hinic_hwdev *hwdev); + +u8 hinic_dma_attr_entry_num(void *hwdev); + +#endif /* _HINIC_PMD_HWIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c new file mode 100644 index 000000000..ab1106a37 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c @@ -0,0 +1,933 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_eqs.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_mbox.h" + +#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 +#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC_MBOX_INT_WB_EN_SHIFT 28 + + +#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF +#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC_MBOX_INT_WB_EN_MASK 0x1 + +#define HINIC_MBOX_INT_SET(val, field) \ + (((val) & HINIC_MBOX_INT_##field##_MASK) << \ + HINIC_MBOX_INT_##field##_SHIFT) + +enum hinic_mbox_tx_status { + TX_DONE = 0, + TX_IN_PROGRESS, +}; + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 + +#define HINIC_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ + HINIC_MBOX_CTRL_##field##_SHIFT) + +#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MBOX_HEADER_MODULE_SHIFT 11 +#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MBOX_HEADER_SEQID_SHIFT 24 +#define HINIC_MBOX_HEADER_LAST_SHIFT 30 + +#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MBOX_HEADER_CMD_SHIFT 32 +#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 +#define HINIC_MBOX_HEADER_STATUS_SHIFT 48 +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 + +#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F +#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F +#define HINIC_MBOX_HEADER_LAST_MASK 0x1 +#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MBOX_HEADER_CMD_MASK 0xFF +#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF +#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF + +#define HINIC_MBOX_HEADER_GET(val, field) \ + (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ + HINIC_MBOX_HEADER_##field##_MASK) +#define HINIC_MBOX_HEADER_SET(val, field) \ + ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ + HINIC_MBOX_HEADER_##field##_SHIFT) + +#define HINIC_MBOX_COMP_TIME_MS 8000U +#define MBOX_MSG_POLLING_TIMEOUT_MS 5000 + +/* The size unit is Bytes */ +#define HINIC_MBOX_DATA_SIZE 2040 +#define MBOX_MAX_BUF_SZ 2048UL +#define MBOX_HEADER_SZ 8 + +/* MBOX size is 64B, 8B for mbox_header, 4B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL +#define MBOX_SIZE 64 + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define SEQ_ID_START_VAL 0 + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define FUNC_ID_OFF_SET_8B 8 +#define FUNC_ID_OFF_SET_10B 10 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xFF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) + +enum hinic_hwif_direction_type { + /* driver send msg to up or up send msg to driver*/ + HINIC_HWIF_DIRECT_SEND = 0, + /* after driver/up send msg to each other, then up/driver ack the msg */ + HINIC_HWIF_RESPONSE, +}; + +enum mbox_send_mod { + MBOX_SEND_MSG_POLL = 1 +}; + +enum mbox_seg_type { + NOT_LAST_SEG, + LAST_SEG, +}; + +enum mbox_ordering_type { + STRONG_ORDER, + RELAX_ORDER, +}; + +enum mbox_write_back_type { + NOT_WRITE_BACK = 0, + WRITE_BACK, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info); + +static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size, void *param) +{ + int rc = 0; + *out_size = 0; + + switch (recv_mbox->mod) { + case HINIC_MOD_COMM: + hinic_comm_async_event_handle(func_to_func->hwdev, + recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, + buf_out, out_size); + break; + case HINIC_MOD_L2NIC: + hinic_l2nic_async_event_handle(func_to_func->hwdev, param, + recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, + buf_out, out_size); + break; + default: + PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_mbox->mod); + rc = HINIC_MBOX_VF_CMD_ERROR; + break; + } + + return rc; +} + +static void set_mbx_msg_status(struct mbox_msg_info *msg_info, int status) +{ + if (status == HINIC_DEV_BUSY_ACTIVE_FW) + msg_info->status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; + else if (status == HINIC_MBOX_VF_CMD_ERROR) + msg_info->status = HINIC_MBOX_VF_CMD_ERROR; + else if (status) + msg_info->status = HINIC_MBOX_PF_SEND_ERR; +} + +static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx, void *param) +{ + struct hinic_hwdev *dev = func_to_func->hwdev; + struct mbox_msg_info msg_info = { 0 }; + u16 out_size = MBOX_MAX_BUF_SZ; + void *buf_out = recv_mbox->buf_out; + int err = 0; + + if (HINIC_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size, param); + } else { + err = -EINVAL; + PMD_DRV_LOG(ERR, "PMD doesn't support non-VF handle mailbox message"); + } + + if (!out_size || err) + out_size = MBOX_MSG_NO_DATA_LEN; + + if (recv_mbox->ack_type == MBOX_ACK) { + msg_info.msg_id = recv_mbox->msg_info.msg_id; + set_mbx_msg_status(&msg_info, err); + send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, + buf_out, out_size, src_func_idx, + HINIC_HWIF_RESPONSE, MBOX_ACK, &msg_info); + } +} + +static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, + u8 seq_id, u8 seg_len) +{ + if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_mbox->sed_id = seq_id; + } else { + if (seq_id != recv_mbox->sed_id + 1) { + recv_mbox->sed_id = 0; + return false; + } + + recv_mbox->sed_id = seq_id; + } + + return true; +} + +static void clear_mbox_status(struct hinic_send_mbox *mbox) +{ + /* clear mailbox write back status */ + *mbox->wb_status = 0; + rte_wmb(); +} + +static void mbox_copy_header(struct hinic_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) + __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); +} + +static void +mbox_copy_send_data(struct hinic_send_mbox *mbox, void *seg, u16 seg_len) +{ + u32 *data = (u32 *)seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if (seg_len % chk_sz) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) + __raw_writel(*(data + i), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); +} + +static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, + u16 dst_func, u16 dst_aeqn, + __rte_unused u16 seg_len, int poll) +{ + u32 mbox_int, mbox_ctrl; + + mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | + HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC_MBOX_INT_SET(HINIC_MBOX_RSP_AEQN, SRC_RESP_AEQN) | + HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC_MBOX_INT_SET(ALIGN(MBOX_SIZE, MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + + rte_wmb(); + mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_IN_PROGRESS, TX_STATUS); + + if (poll) + mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + else + mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +static int init_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + int err; + + mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->mbox) { + PMD_DRV_LOG(ERR, "Alloc mbox buf_in mem failed\n"); + return -ENOMEM; + } + + mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->buf_out) { + PMD_DRV_LOG(ERR, "Alloc mbox buf_out mem failed\n"); + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(mbox_info->mbox); + + return err; +} + +static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + kfree(mbox_info->buf_out); + kfree(mbox_info->mbox); +} + +static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx, i; + int err; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { + err = init_mbox_info(&mbox_info[func_idx]); + if (err) { + PMD_DRV_LOG(ERR, "Initialize function[%d] mailbox information failed, err: %d", + func_idx, err); + goto init_mbox_info_err; + } + } + + return 0; + +init_mbox_info_err: + for (i = 0; i < func_idx; i++) + clean_mbox_info(&mbox_info[i]); + + return err; +} + +static void free_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) + clean_mbox_info(&mbox_info[func_idx]); +} + +static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev, MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, SOCKET_ID_ANY); + if (!send_mbox->wb_vaddr) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox wb status failed"); + return -ENOMEM; + } + send_mbox->wb_status = (volatile u64 *)send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); + hinic_hwif_write_reg(hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); + + dma_free_coherent(hwdev, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +static int recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + void *header, struct hinic_recv_mbox *recv_mbox, void *param) +{ + u64 mbox_header = *((u64 *)header); + void *mbox_body = MBOX_BODY_FROM_HDR(header); + u16 src_func_idx; + enum hinic_hwif_direction_type direction; + u8 seq_id, seg_len; + + seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); + direction = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); + src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { + PMD_DRV_LOG(ERR, + "Mailbox sequence and segment check failed, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", + src_func_idx, recv_mbox->sed_id, seq_id, seg_len); + return HINIC_ERROR; + } + + memcpy((u8 *)recv_mbox->mbox + seq_id * HINIC_MSG_SEG_LEN, + mbox_body, seg_len); + + if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) + return HINIC_ERROR; + + recv_mbox->sed_id = 0; + recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); + recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); + recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); + recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); + recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); + recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); + + if (direction == HINIC_HWIF_RESPONSE) { + if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) { + return HINIC_OK; + } + + PMD_DRV_LOG(ERR, "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)", + func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, + recv_mbox->msg_info.status); + return HINIC_ERROR; + } + + recv_func_mbox_handler(func_to_func, recv_mbox, src_func_idx, param); + + return HINIC_ERROR; +} + +/** + * hinic_mbox_func_aeqe_handler - Process mbox info from func which is + * sent by aeqe. + * + * @param handle + * Pointer to hradware nic device. + * @param header + * Mbox header info. + * @param size + * The size of aeqe descriptor. + * @param param + * customized parameter. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_func_aeqe_handler(void *handle, u8 *header, + __rte_unused u8 size, void *param) +{ + struct hinic_mbox_func_to_func *func_to_func = + ((struct hinic_hwdev *)handle)->func_to_func; + struct hinic_recv_mbox *recv_mbox; + u64 mbox_header = *((u64 *)header); + u16 src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (src >= HINIC_MAX_FUNCTIONS) { + PMD_DRV_LOG(ERR, "Mailbox source function id: %d is invalid", + src); + return HINIC_ERROR; + } + + recv_mbox = (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == + HINIC_HWIF_DIRECT_SEND) ? + &func_to_func->mbox_send[src] : + &func_to_func->mbox_resp[src]; + + return recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox, param); +} + +static u16 get_mbox_status(struct hinic_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rte_rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static void dump_mox_reg(struct hinic_hwdev *hwdev) +{ + u32 val; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); + PMD_DRV_LOG(WARNING, "Mailbox control reg: 0x%x", val); + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + PMD_DRV_LOG(WARNING, "Mailbox interrupt offset: 0x%x", val); +} + +static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, + u64 header, u16 dst_func, void *seg, u16 seg_len) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); + u16 dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? + HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; + u16 err_code, wb_status = 0; + u32 cnt = 0; + + clear_mbox_status(send_mbox); + + mbox_copy_header(send_mbox, &header); + + mbox_copy_send_data(send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, + MBOX_SEND_MSG_POLL); + + rte_wmb(); + + while (cnt < MBOX_MSG_POLLING_TIMEOUT_MS) { + wb_status = get_mbox_status(send_mbox); + if (MBOX_STATUS_FINISHED(wb_status)) + break; + + rte_delay_ms(1); /* loop every ms */ + cnt++; + } + + if (cnt == MBOX_MSG_POLLING_TIMEOUT_MS) { + PMD_DRV_LOG(ERR, "Send mailbox segment timeout, wb status: 0x%x", + wb_status); + dump_mox_reg(hwdev); + return -ETIMEDOUT; + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + PMD_DRV_LOG(ERR, "Send mailbox segment to function %d error, wb status: 0x%x", + dst_func, wb_status); + /* + * err_code: 0 responses no errors, other values can + * refer to FS doc. + */ + err_code = MBOX_STATUS_ERRCODE(wb_status); + return err_code ? err_code : -EFAULT; + } + + return 0; +} + +static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + int err = 0; + u32 seq_id = 0; + u16 seg_len = HINIC_MSG_SEG_LEN; + u16 left = msg_len; + u8 *msg_seg = (u8 *)msg; + u64 header = 0; + + err = hinic_mutex_lock(&func_to_func->msg_send_mutex); + if (err) + return err; + + header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MBOX_HEADER_SET(mod, MODULE) | + HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | + HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | + HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | + HINIC_MBOX_HEADER_SET(direction, DIRECTION) | + HINIC_MBOX_HEADER_SET(cmd, CMD) | + HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | + HINIC_MBOX_HEADER_SET(hinic_global_func_id(hwdev), + SRC_GLB_FUNC_IDX); + + while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { + if (left <= HINIC_MSG_SEG_LEN) { + header &= + ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, + SEG_LEN)); + header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); + header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len); + if (err) { + PMD_DRV_LOG(ERR, "Fail to send mbox seg, err: %d", err); + goto send_err; + } + + left -= HINIC_MSG_SEG_LEN; + msg_seg += HINIC_MSG_SEG_LEN; + + seq_id++; + header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, + SEQID)); + header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); + } + +send_err: + (void)hinic_mutex_unlock(&func_to_func->msg_send_mutex); + + return err; +} + +static int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + struct hinic_recv_mbox *mbox_for_resp = + &func_to_func->mbox_resp[dst_func]; + struct mbox_msg_info msg_info = {0}; + u32 time; + int err; + + err = hinic_mutex_lock(&func_to_func->mbox_send_mutex); + if (err) + return err; + + msg_info.msg_id = (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK; + MBOX_MSG_ID(func_to_func) = msg_info.msg_id; + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, + dst_func, HINIC_HWIF_DIRECT_SEND, + MBOX_ACK, &msg_info); + if (err) + goto send_err; + + time = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME_MS); + err = hinic_aeq_poll_msg(func_to_func->rsp_aeq, time, NULL); + if (err) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + PMD_DRV_LOG(ERR, "Send mailbox message time out"); + err = -ETIMEDOUT; + goto send_err; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + if (mbox_for_resp->msg_info.status) { + err = mbox_for_resp->msg_info.status; + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + PMD_DRV_LOG(ERR, "Mailbox response error: 0x%x", + mbox_for_resp->msg_info.status); + else + PMD_DRV_LOG(ERR, "Chip is in active, PF can't process VF message"); + goto send_err; + } + + rte_rmb(); + + if (mbox_for_resp->mbox_len && buf_out && out_size) { + if (mbox_for_resp->mbox_len <= *out_size) { + memcpy(buf_out, mbox_for_resp->mbox, + mbox_for_resp->mbox_len); + *out_size = mbox_for_resp->mbox_len; + } else { + PMD_DRV_LOG(ERR, "Mailbox response message len[%u] overflow", + mbox_for_resp->mbox_len); + err = -ERANGE; + } + } + +send_err: + if (err && out_size) + *out_size = 0; + (void)hinic_mutex_unlock(&func_to_func->mbox_send_mutex); + + return err; +} + +static int +mbox_func_params_valid(__rte_unused struct hinic_mbox_func_to_func *mbox_obj, + void *buf_in, u16 in_size) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > HINIC_MBOX_DATA_SIZE) { + PMD_DRV_LOG(ERR, "Mailbox message len(%d) exceed limit(%d)", + in_size, HINIC_MBOX_DATA_SIZE); + return -EINVAL; + } + + return 0; +} + +static u8 hinic_pf_id_of_vf(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + return hwif->attr.port_to_port_idx; +} + +/** + * hinic_mbox_to_pf - Send mbox info to pf and need pf to response. + * + * @param hwdev + * Pointer to hardware nic device. + * @param mod + * Mode type of hardware. + * @param cmd + * The command sent to pf. + * @param buf_in + * Input parameter. + * @param in_size + * Input parameter size. + * @param buf_out + * Output parameter. + * @param out_size + * Output parameter size. + * @param timeout + * Timeout. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) { + PMD_DRV_LOG(ERR, "Mailbox parameters check failed: %d", err); + return err; + } + + if (!HINIC_IS_VF(hwdev)) { + PMD_DRV_LOG(ERR, "Input function type error, func_type: %d", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, + hinic_pf_id_of_vf(hwdev), buf_in, in_size, + buf_out, out_size, timeout); +} + +/** + * hinic_mbox_to_pf_no_ack - Send mbox info to pf and do not need pf to response + * + * @param hwdev + * Pointer to hardware nic device. + * @param mod + * Mode type of hardware. + * @param cmd + * The command sent to pf. + * @param buf_in + * Input parameter. + * @param in_size + * Input parameter size. + * + * @return + * 0 on success, negative error value otherwise. + */ +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + int err; + struct mbox_msg_info msg_info = {0}; + + err = hinic_mutex_lock(&hwdev->func_to_func->mbox_send_mutex); + if (err) + return err; + + err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size, + hinic_pf_id_of_vf(hwdev), HINIC_HWIF_DIRECT_SEND, + MBOX_NO_ACK, &msg_info); + if (err) + PMD_DRV_LOG(ERR, "Send mailbox no ack failed, err: %d", err); + + (void)hinic_mutex_unlock(&hwdev->func_to_func->mbox_send_mutex); + + return err; +} + +static int hinic_func_to_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) { + PMD_DRV_LOG(ERR, "Allocating memory for func_to_func object failed"); + return -ENOMEM; + } + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + (void)hinic_mutex_init(&func_to_func->mbox_send_mutex, NULL); + (void)hinic_mutex_init(&func_to_func->msg_send_mutex, NULL); + + err = alloc_mbox_info(func_to_func->mbox_send); + if (err) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox sending failed"); + goto alloc_mbox_for_send_err; + } + + err = alloc_mbox_info(func_to_func->mbox_resp); + if (err) { + PMD_DRV_LOG(ERR, "Allocating memory for mailbox responding failed"); + goto alloc_mbox_for_resp_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) + goto alloc_wb_status_err; + + prepare_send_mbox(func_to_func); + + return 0; + +alloc_wb_status_err: + free_mbox_info(func_to_func->mbox_resp); + +alloc_mbox_for_resp_err: + free_mbox_info(func_to_func->mbox_send); + +alloc_mbox_for_send_err: + kfree(func_to_func); + + return err; +} + +/** + * hinic_comm_func_to_func_free - Uninitialize func to func resource. + * + * @param hwdev + * Pointer to hardware nic device. + */ +void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + free_mbox_wb_status(func_to_func); + free_mbox_info(func_to_func->mbox_resp); + free_mbox_info(func_to_func->mbox_send); + (void)hinic_mutex_destroy(&func_to_func->mbox_send_mutex); + (void)hinic_mutex_destroy(&func_to_func->msg_send_mutex); + kfree(func_to_func); +} + +/** + * hinic_comm_func_to_func_init - Initialize func to func resource. + * + * @param hwdev + * Pointer to hardware nic device. + */ +int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev) +{ + int rc; + + rc = hinic_func_to_func_init(hwdev); + if (rc) + return rc; + + hwdev->func_to_func->rsp_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RSP_AEQN]; + hwdev->func_to_func->recv_aeq = &hwdev->aeqs->aeq[HINIC_MBOX_RECV_AEQN]; + + return 0; +} + diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h new file mode 100644 index 000000000..bf7b4906d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MBOX_H_ +#define _HINIC_PMD_MBOX_H_ + +#define HINIC_MBOX_RECV_AEQN 0 +#define HINIC_MBOX_RSP_AEQN 2 + +#define HINIC_MBOX_PF_SEND_ERR 0x1 +#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2 +#define HINIC_MBOX_VF_CMD_ERROR 0x3 + +/* PFs do not support enable SR-IOV cap when PFs use PMD, VFs just receive + * mailbox message from PFs. The max number of PFs is 16, so the max number + * of mailbox buffer for functions is also 16. + */ +#define HINIC_MAX_FUNCTIONS 16 +#define HINIC_MAX_PF_FUNCS 16 + +#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF + +#define HINIC_SEQ_ID_MAX_VAL 42 +#define HINIC_MSG_SEG_LEN 48 + +enum hinic_mbox_ack_type { + MBOX_ACK, + MBOX_NO_ACK, +}; + +struct mbox_msg_info { + u8 msg_id; + u8 status; /*can only use 6 bit*/ +}; + +struct hinic_recv_mbox { + void *mbox; + u8 cmd; + enum hinic_mod_type mod; + u16 mbox_len; + void *buf_out; + enum hinic_mbox_ack_type ack_type; + struct mbox_msg_info msg_info; + u8 sed_id; +}; + +struct hinic_send_mbox { + u8 *data; + volatile u64 *wb_status; + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + EVENT_START = 0, + EVENT_TIMEOUT, + EVENT_END, +}; + +struct hinic_mbox_func_to_func { + struct hinic_hwdev *hwdev; + + pthread_mutex_t mbox_send_mutex; + pthread_mutex_t msg_send_mutex; + + struct hinic_send_mbox send_mbox; + + struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS]; + struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS]; + + struct hinic_eq *rsp_aeq; + struct hinic_eq *recv_aeq; + + u8 send_msg_id; + enum mbox_event_state event_flag; + spinlock_t mbox_lock; /* lock for mbox event flag */ +}; + +/* + * mbox function prototypes + */ +int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev); +void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev); +int hinic_mbox_func_aeqe_handler(void *handle, u8 *header, + u8 size, void *param); +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +#endif /* _HINIC_PMD_MBOX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c new file mode 100644 index 000000000..94bc45f83 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.c @@ -0,0 +1,804 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_csr.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_mbox.h" + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MAX_PF_MGMT_BUF_SIZE 2048UL + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ + +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 + +#define MSG_NO_RESP 0xFFFF + +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +#define HINIC_SEQ_ID_MAX_VAL 42 +#define HINIC_MSG_SEG_LEN 48 + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0 +#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1 + +#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK) + +#define HINIC_MSG_TO_MGMT_MAX_LEN 2016 + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + */ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: the type to response + * @direction: the direction of the original message + * @cmd: the command to do + * @msg_id: message id + */ +static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, int msg_len, enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + u8 cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + */ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, + int msg_len) +{ + u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE; + + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + cmd_buf_max -= sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + */ +static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) +{ + int err; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) { + PMD_DRV_LOG(ERR, "Allocate recv msg buf failed"); + return -ENOMEM; + } + + recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->buf_out) { + PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed"); + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(recv_msg->msg); + return err; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + */ +static void free_recv_msg(struct hinic_recv_msg *recv_msg) +{ + kfree(recv_msg->buf_out); + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + */ +static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate recv msg failed"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate resp recv msg failed"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate async msg buf failed"); + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate sync msg buf failed"); + err = -ENOMEM; + goto sync_msg_buf_err; + } + + return 0; + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + */ +static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +static int hinic_get_mgmt_channel_status(void *hwdev) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 val; + + if (hinic_func_type((struct hinic_hwdev *)hwdev) == TYPE_VF) + return false; + + val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR); + + return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: message id of response + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @ack_type: indicate mgmt command whether need ack or not + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + __rte_unused u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + /* If fw is hot active, return failed */ + if (hinic_get_mgmt_channel_status(pf_to_mgmt->hwdev)) { + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC) + return HINIC_DEV_BUSY_ACTIVE_FW; + else + return -EBUSY; + } + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, + mgmt_cmd, cmd_size); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + * Return: 0 - success, negative - failure + */ +static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) { + PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed"); + return -ENOMEM; + } + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + + err = hinic_mutex_init(&pf_to_mgmt->sync_msg_mutex, NULL); + if (err) + goto mutex_init_err; + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate msg buffers failed"); + goto alloc_msg_buf_err; + } + + err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + PMD_DRV_LOG(ERR, "Init the api cmd chains failed"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex); + +mutex_init_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + */ +static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex); + kfree(pf_to_mgmt); +} + +static int +hinic_pf_to_mgmt_sync(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + u32 timeo; + int err, i; + + err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex); + if (err) + return err; + + SYNC_MSG_ID_INC(pf_to_mgmt); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + PMD_DRV_LOG(ERR, "Send msg to mgmt failed"); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) { + err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d", + mod, cmd, pf_to_mgmt->sync_msg_id, err); + err = -ETIMEDOUT; + hinic_dump_aeq_info(hwdev); + goto unlock_sync_msg; + } else { + if (mod == recv_msg->mod && cmd == recv_msg->cmd && + recv_msg->msg_id == pf_to_mgmt->sync_msg_id) { + /* the expected response polled */ + break; + } + PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an " + "unexpected(mod=%d, cmd=%d, msg_id=%u) response", + pf_to_mgmt->rx_aeq->q_id, mod, cmd, + pf_to_mgmt->sync_msg_id, recv_msg->mod, + recv_msg->cmd, recv_msg->msg_id); + } + } + + if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) { + PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed", + i, pf_to_mgmt->rx_aeq->q_id); + err = -EBADMSG; + goto unlock_sync_msg; + } + + rte_smp_rmb(); + if (recv_msg->msg_len && buf_out && out_size) { + if (recv_msg->msg_len <= *out_size) { + memcpy(buf_out, recv_msg->msg, + recv_msg->msg_len); + *out_size = recv_msg->msg_len; + } else { + PMD_DRV_LOG(ERR, "Mgmt rsp's msg len: %u overflow.", + recv_msg->msg_len); + err = -ERANGE; + } + } + +unlock_sync_msg: + if (err && out_size) + *out_size = 0; + (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex); + return err; +} + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + int rc = HINIC_ERROR; + + if (!hwdev || in_size > HINIC_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) { + rc = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } + + return rc; +} + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, __rte_unused void *buf_out, + __rte_unused u16 *out_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + int err = -EINVAL; + + if (!MSG_SZ_IS_VALID(in_size)) { + PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid"); + return err; + } + + err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex); + if (err) + return err; + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex); + + return err; +} + +static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_msg->sed_id = seq_id; + } else { + if (seq_id != recv_msg->sed_id + 1) { + recv_msg->sed_id = 0; + return false; + } + recv_msg->sed_id = seq_id; + } + + return true; +} + +/** + * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + * @param: customized parameter + */ +static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg, + void *param) +{ + void *buf_out = recv_msg->buf_out; + u16 out_size = 0; + + switch (recv_msg->mod) { + case HINIC_MOD_COMM: + hinic_comm_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_L2NIC: + hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_HILINK: + hinic_hilink_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + default: + PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_msg->mod); + break; + } + + if (!recv_msg->async_mgmt_to_pf) { + if (!out_size) + out_size = BUF_OUT_DEFAULT_SIZE; + + /* MGMT sent sync msg, send the response */ + (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, + recv_msg->cmd, buf_out, out_size, + HINIC_MSG_RESPONSE, + recv_msg->msg_id); + } +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + * @param: customized parameter + * Return: 0 when aeq is response message, -1 default result, + * and when wrong message or not last message + */ +static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic_recv_msg *recv_msg, + void *param) +{ + u64 msg_header = *((u64 *)header); + void *msg_body = header + sizeof(msg_header); + u8 *dest_msg; + u8 seq_id, seq_len; + u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE; + + seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID); + seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + PMD_DRV_LOG(ERR, + "Mgmt msg sequence and segment check failed, " + "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x", + hinic_global_func_id(pf_to_mgmt->hwdev), + recv_msg->sed_id, seq_id, seq_len); + return HINIC_ERROR; + } + + dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN; + msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN; + memcpy(dest_msg, msg_body, seq_len); + + if (!HINIC_MSG_HEADER_GET(msg_header, LAST)) + return HINIC_ERROR; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID); + + if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE) + return HINIC_OK; + + hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param); + + return HINIC_ERROR; +} + +/** + * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to the private hardware device object + * @header: the header of the message + * @size: unused + * @param: customized parameter + * Return: 0 when aeq is response message, + * -1 default result, and when wrong message or not last message + */ +static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, + __rte_unused u8 size, void *param) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + + recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC_MSG_DIRECT_SEND) ? + &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param); +} + +static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event, + u8 *data, u8 size, void *param) +{ + int rc = 0; + + switch (event) { + case HINIC_MSG_FROM_MGMT_CPU: + rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param); + break; + case HINIC_MBX_FROM_FUNC: + rc = hinic_mbox_func_aeqe_handler(handle, data, size, param); + break; + default: + PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d", + event, size); + rc = HINIC_ERROR; + break; + } + + return rc; +} + +/** + * hinic_aeq_poll_msg - poll one or continue aeqe, and call dedicated process + * @eq: aeq of the chip + * @timeout: 0 - poll all aeqe in eq, used in interrupt mode, + * > 0 - poll aeq until get aeqe with 'last' field set to 1, + * used in polling mode. + * @param: customized parameter + * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support + */ +int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param) +{ + struct hinic_aeq_elem *aeqe_pos; + enum hinic_aeq_type event; + u32 aeqe_desc = 0; + u16 i; + u8 size; + int done = HINIC_ERROR; + int err = -EFAULT; + unsigned long end; + + for (i = 0; ((timeout == 0) && (i < eq->eq_len)) || + ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) { + err = -EIO; + end = jiffies + msecs_to_jiffies(timeout); + do { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + rte_rmb(); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, + * when it adds eq element event + */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) + != eq->wrapped) { + err = 0; + break; + } + + if (timeout != 0) + usleep(1000); + } while (time_before(jiffies, end)); + + if (err != HINIC_OK) /*poll time out*/ + break; + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + PMD_DRV_LOG(ERR, "AEQ sw event not support %d", event); + return -ENODEV; + + } else { + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + done = hinic_handle_aeqe(eq->hwdev, event, + aeqe_pos->aeqe_data, + size, param); + } + + eq->cons_idx++; + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + } + + eq_update_ci(eq); + + return err; +} + +int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + int rc; + + /* VF do not support send msg to mgmt directly */ + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + rc = hinic_pf_to_mgmt_init(hwdev); + if (rc) + return rc; + + hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN]; + + return 0; +} + +void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + /* VF do not support send msg to mgmt directly */ + if (hinic_func_type(hwdev) == TYPE_VF) + return; + + hinic_pf_to_mgmt_free(hwdev); +} + +void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param) +{ + struct hinic_eq *aeq = &hwdev->aeqs->aeq[0]; + + /* clear resend timer cnt register */ + hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + (void)hinic_aeq_poll_msg(aeq, 0, param); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h new file mode 100644 index 000000000..52b319ead --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_mgmt.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MGMT_H_ +#define _HINIC_PMD_MGMT_H_ + +#include "hinic_pmd_api_cmd.h" +#include "hinic_pmd_eqs.h" + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +#define HINIC_MGMT_RSP_AEQN (1) + +enum hinic_msg_direction_type { + HINIC_MSG_DIRECT_SEND = 0, + HINIC_MSG_RESPONSE = 1 +}; +enum hinic_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic_msg_ack_type { + HINIC_MSG_ACK = 0, + HINIC_MSG_NO_ACK = 1, +}; + +struct hinic_recv_msg { + void *msg; + void *buf_out; + + u16 msg_len; + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; + u8 sed_id; +}; + +#define HINIC_COMM_SELF_CMD_MAX 8 + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_START = 0, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +struct hinic_msg_pf_to_mgmt { + struct hinic_hwdev *hwdev; + + /* mutex for sync message */ + pthread_mutex_t sync_msg_mutex; + + void *async_msg_buf; + void *sync_msg_buf; + + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + struct hinic_eq *rx_aeq; +}; + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev); + +void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param); + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param); + +#endif /* _HINIC_PMD_MGMT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c new file mode 100644 index 000000000..c5663dfab --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c @@ -0,0 +1,2121 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_eqs.h" +#include "hinic_pmd_wq.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_cmdq.h" +#include "hinic_pmd_niccfg.h" +#include "hinic_pmd_mbox.h" + +#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, \ + in_size, buf_out, out_size) \ + hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \ + buf_in, in_size, \ + buf_out, out_size, 0) + + +/** + * hinic_init_function_table - Initialize function table. + * + * @param hwdev + * The hardware interface of a nic device. + * @param rx_buf_sz + * Receive buffer size. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) +{ + struct hinic_function_table function_table; + u16 out_size = sizeof(function_table); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&function_table, 0, sizeof(function_table)); + function_table.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + function_table.func_id = hinic_global_func_id(hwdev); + function_table.mtu = 0x3FFF; /* default, max mtu */ + function_table.rx_wqe_buf_size = rx_buf_sz; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_INIT_FUNC, + &function_table, sizeof(function_table), + &function_table, &out_size, 0); + if (err || function_table.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, + "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x", + err, function_table.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_get_base_qpn - Get global queue number. + * + * @param hwdev + * The hardware interface of a nic device. + * @param global_qpn + * Global queue number. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) +{ + struct hinic_cmd_qpn cmd_qpn; + u16 out_size = sizeof(cmd_qpn); + int err; + + if (!hwdev || !global_qpn) { + PMD_DRV_LOG(ERR, "Hwdev or global_qpn is NULL"); + return -EINVAL; + } + + memset(&cmd_qpn, 0, sizeof(cmd_qpn)); + cmd_qpn.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + cmd_qpn.func_id = hinic_global_func_id(hwdev); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_GLOBAL_QPN, + &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn, + &out_size, 0); + if (err || !out_size || cmd_qpn.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x", + err, cmd_qpn.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + *global_qpn = cmd_qpn.base_qpn; + + return 0; +} + +/** + * hinic_set_mac - Init mac_vlan table in NIC. + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * @param vlan_id + * Set 0 for mac_vlan table initialization. + * @param func_id + * Global function id of NIC. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) { + PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL"); + return -EINVAL; + } + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memmove(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || (mac_info.mgmt_msg_head.status && + mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) { + PMD_DRV_LOG(ERR, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x", + err, mac_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) { + PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore set operation."); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} + +/** + * hinic_del_mac - Uninit mac_vlan table in NIC. + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * @param vlan_id + * Set 0 for mac_vlan table initialization. + * @param func_id + * Global function id of NIC. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) { + PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL"); + return -EINVAL; + } + + if (vlan_id >= VLAN_N_VID) { + PMD_DRV_LOG(ERR, "Invalid VLAN number"); + return -EINVAL; + } + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memmove(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || (mac_info.mgmt_msg_head.status && + mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) { + PMD_DRV_LOG(ERR, "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x", + err, mac_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) { + PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore delete operation."); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} + +/** + * hinic_get_default_mac - Get default mac address from hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param mac_addr + * MAC address. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_get_default_mac(void *hwdev, u8 *mac_addr) +{ + struct hinic_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) { + PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL"); + return -EINVAL; + } + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mac_info.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || mac_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x", + err, mac_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + memmove(mac_addr, mac_info.mac, ETH_ALEN); + + return 0; +} + +/** +* hinic_update_mac - Update mac address to hardware. +* +* @param hwdev +* The hardware interface of a nic device. +* @param old_mac +* Old mac address. +* @param new_mac +* New mac address. +* @param vlan_id +* Set 0 for mac_vlan table initialization. +* @param func_id +* Global function id of NIC. +* +* @return +* 0 on success. +* negative error value otherwise. +*/ +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id) +{ + struct hinic_port_mac_update mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !old_mac || !new_mac) { + PMD_DRV_LOG(ERR, "Hwdev, old_mac or new_mac is NULL"); + return -EINVAL; + } + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.old_mac, old_mac, ETH_ALEN); + memcpy(mac_info.new_mac, new_mac, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || + (mac_info.mgmt_msg_head.status && + mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) { + PMD_DRV_LOG(ERR, "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x", + err, mac_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) { + PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore update operation"); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} + +/** + * hinic_set_port_mtu - Set MTU to port. + * + * @param hwdev + * The hardware interface of a nic device. + * @param new_mtu + * MTU size. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_port_mtu(void *hwdev, u32 new_mtu) +{ + struct hinic_mtu mtu_info; + u16 out_size = sizeof(mtu_info); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&mtu_info, 0, sizeof(mtu_info)); + mtu_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + mtu_info.func_id = hinic_global_func_id(hwdev); + mtu_info.mtu = new_mtu; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU, + &mtu_info, sizeof(mtu_info), + &mtu_info, &out_size); + if (err || !out_size || mtu_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x", + err, mtu_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_add_remove_vlan - Add or remove vlan id to vlan elb table. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vlan_id + * Vlan id. + * @param func_id + * Global function id of NIC. + * @param add + * Add or remove operation. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add) +{ + struct hinic_vlan_config vlan_info; + u16 out_size = sizeof(vlan_info); + u8 cmd; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + cmd = add ? HINIC_PORT_CMD_ADD_VLAN : HINIC_PORT_CMD_DEL_VLAN; + + memset(&vlan_info, 0, sizeof(vlan_info)); + vlan_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vlan_info, + sizeof(vlan_info), &vlan_info, + &out_size); + if (err || !out_size || vlan_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x", + add ? "add" : "remove", err, + vlan_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_config_vlan_filter - Enable or Disable vlan filter. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vlan_filter_ctrl + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_filter vlan_filter; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&vlan_filter, 0, sizeof(vlan_filter)); + vlan_filter.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_filter.func_id = hinic_global_func_id(nic_hwdev); + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(nic_hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (vlan_filter.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + (HINIC_IS_VF(nic_hwdev))) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || vlan_filter.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to config vlan filter, vlan_filter_ctrl: 0x%x, err: %d, status: 0x%x, out size: 0x%x", + vlan_filter_ctrl, err, + vlan_filter.mgmt_msg_head.status, out_size); + err = -EINVAL; + } + + return err; +} + +/** + * hinic_set_rx_vlan_offload - Enable or Disable vlan offload. + * + * @param hwdev + * The hardware interface of a nic device. + * @param en + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct hinic_vlan_offload vlan_cfg; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&vlan_cfg, 0, sizeof(vlan_cfg)); + vlan_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vlan_cfg.func_id = hinic_global_func_id(hwdev); + vlan_cfg.vlan_rx_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err || !out_size || vlan_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x", + err, vlan_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_get_link_status - Get link status from hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param link_state + * Link status. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_get_link_status(void *hwdev, u8 *link_state) +{ + struct hinic_get_link get_link; + u16 out_size = sizeof(get_link); + int err; + + if (!hwdev || !link_state) { + PMD_DRV_LOG(ERR, "Hwdev or link_state is NULL"); + return -EINVAL; + } + + memset(&get_link, 0, sizeof(get_link)); + get_link.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + get_link.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, + &get_link, sizeof(get_link), + &get_link, &out_size); + if (err || !out_size || get_link.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x", + err, get_link.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + *link_state = get_link.link_status; + + return 0; +} + +/** + * hinic_set_vport_enable - Notify firmware that driver is ready or not. + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * 1: driver is ready; 0: driver is not ok. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_vport_enable(void *hwdev, bool enable) +{ + struct hinic_vport_state en_state; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&en_state, 0, sizeof(en_state)); + en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + en_state.func_id = hinic_global_func_id(hwdev); + en_state.state = (enable ? 1 : 0); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size); + if (err || !out_size || en_state.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x", + err, en_state.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_set_port_enable - Open MAG to receive packets. + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * 1: open MAG; 0: close MAG. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_port_enable(void *hwdev, bool enable) +{ + struct hinic_port_state en_state; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) + return 0; + + memset(&en_state, 0, sizeof(en_state)); + en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + en_state.state = (enable ? HINIC_PORT_ENABLE : HINIC_PORT_DISABLE); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size); + if (err || !out_size || en_state.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set phy port state, err: %d, status: 0x%x, out size: 0x%x", + err, en_state.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info) +{ + struct hinic_port_info port_msg; + u16 out_size = sizeof(port_msg); + int err; + + if (!hwdev || !port_info) { + PMD_DRV_LOG(ERR, "Hwdev or port_info is NULL"); + return -EINVAL; + } + + memset(&port_msg, 0, sizeof(port_msg)); + port_msg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_msg.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO, + &port_msg, sizeof(port_msg), + &port_msg, &out_size); + if (err || !out_size || port_msg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x", + err, port_msg.mgmt_msg_head.status, out_size); + return err; + } + + port_info->autoneg_cap = port_msg.autoneg_cap; + port_info->autoneg_state = port_msg.autoneg_state; + port_info->duplex = port_msg.duplex; + port_info->port_type = port_msg.port_type; + port_info->speed = port_msg.speed; + + return 0; +} + +int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause) +{ + struct hinic_pause_config pause_info; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&pause_info, 0, sizeof(pause_info)); + pause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + pause_info.func_id = hinic_global_func_id(hwdev); + pause_info.auto_neg = nic_pause.auto_neg; + pause_info.rx_pause = nic_pause.rx_pause; + pause_info.tx_pause = nic_pause.tx_pause; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x", + err, pause_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic_pause_config pause_info; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev || !nic_pause) + return -EINVAL; + + memset(&pause_info, 0, sizeof(pause_info)); + pause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + pause_info.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n", + err, pause_info.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + nic_pause->auto_neg = pause_info.auto_neg; + nic_pause->rx_pause = pause_info.rx_pause; + nic_pause->tx_pause = pause_info.tx_pause; + + return 0; +} + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, + u8 *pgid, u8 *up_bw, u8 *prio) +{ + struct hinic_up_ets_cfg ets; + u16 out_size = sizeof(ets); + u16 up_bw_t = 0; + u8 pg_bw_t = 0; + int i, err; + + if (!hwdev || !up_tc || !pg_bw || !pgid || !up_bw || !prio) { + PMD_DRV_LOG(ERR, "Hwdev, up_tc, pg_bw, pgid, up_bw or prio is NULL"); + return -EINVAL; + } + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + up_bw_t += *(up_bw + i); + pg_bw_t += *(pg_bw + i); + + if (*(up_tc + i) > HINIC_DCB_TC_MAX) { + PMD_DRV_LOG(ERR, "Invalid up %d mapping tc: %d", i, + *(up_tc + i)); + return -EINVAL; + } + } + + if (pg_bw_t != 100 || (up_bw_t % 100) != 0) { + PMD_DRV_LOG(ERR, + "Invalid pg_bw: %d or up_bw: %d", pg_bw_t, up_bw_t); + return -EINVAL; + } + + memset(&ets, 0, sizeof(ets)); + ets.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + ets.port_id = 0; /* reserved */ + memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX); + memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX); + memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX); + memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX); + memcpy(ets.prio, prio, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS, + &ets, sizeof(ets), &ets, &out_size); + if (err || ets.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, + "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x", + err, ets.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats) +{ + struct hinic_port_stats_info vport_stats_cmd; + struct hinic_cmd_vport_stats vport_stats_rsp; + u16 out_size = sizeof(vport_stats_rsp); + int err; + + if (!hwdev || !stats) { + PMD_DRV_LOG(ERR, "Hwdev or stats is NULL"); + return -EINVAL; + } + + memset(&vport_stats_rsp, 0, sizeof(vport_stats_rsp)); + memset(&vport_stats_cmd, 0, sizeof(vport_stats_cmd)); + vport_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vport_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION; + vport_stats_cmd.func_id = hinic_global_func_id(hwdev); + vport_stats_cmd.stats_size = sizeof(vport_stats_rsp); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT, + &vport_stats_cmd, sizeof(vport_stats_cmd), + &vport_stats_rsp, &out_size); + if (err || !out_size || vport_stats_rsp.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Get vport stats from fw failed, err: %d, status: 0x%x, out size: 0x%x", + err, vport_stats_rsp.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + memcpy(stats, &vport_stats_rsp.stats, sizeof(*stats)); + + return 0; +} + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats) +{ + struct hinic_port_stats_info port_stats_cmd; + struct hinic_port_stats port_stats_rsp; + u16 out_size = sizeof(port_stats_rsp); + int err; + + if (!hwdev || !stats) { + PMD_DRV_LOG(ERR, "Hwdev or stats is NULL"); + return -EINVAL; + } + + memset(&port_stats_rsp, 0, sizeof(port_stats_rsp)); + memset(&port_stats_cmd, 0, sizeof(port_stats_cmd)); + port_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION; + port_stats_cmd.stats_size = sizeof(port_stats_rsp); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS, + &port_stats_cmd, sizeof(port_stats_cmd), + &port_stats_rsp, &out_size); + if (err || !out_size || port_stats_rsp.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x", + err, port_stats_rsp.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + memcpy(stats, &port_stats_rsp.stats, sizeof(*stats)); + + return 0; +} + +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type) +{ + struct nic_rss_context_tbl *ctx_tbl; + struct hinic_cmd_buf *cmd_buf; + u32 ctx = 0; + u64 out_param; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate cmd buf"); + return -ENOMEM; + } + + ctx |= HINIC_RSS_TYPE_SET(1, VALID) | + HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + + cmd_buf->size = sizeof(struct nic_rss_context_tbl); + + ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; + ctx_tbl->group_index = cpu_to_be32(tmpl_idx); + ctx_tbl->offset = 0; + ctx_tbl->size = sizeof(u32); + ctx_tbl->size = cpu_to_be32(ctx_tbl->size); + ctx_tbl->rsvd = 0; + ctx_tbl->ctx = cpu_to_be32(ctx); + + /* cfg the rss context table by command queue */ + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + cmd_buf, &out_param, 0); + + hinic_free_cmd_buf(hwdev, cmd_buf); + + if (err || out_param != 0) { + PMD_DRV_LOG(ERR, "Failed to set rss context table"); + return -EFAULT; + } + + return 0; +} + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type) +{ + struct hinic_rss_context_table ctx_tbl; + u16 out_size = sizeof(ctx_tbl); + int err; + + if (!hwdev || !rss_type) { + PMD_DRV_LOG(ERR, "Hwdev or rss_type is NULL"); + return -EINVAL; + } + + ctx_tbl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + ctx_tbl.func_id = hinic_global_func_id(hwdev); + ctx_tbl.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + if (err || !out_size || ctx_tbl.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x", + err, ctx_tbl.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = + HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT); + rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp) +{ + struct hinic_rss_template_key temp_key; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) { + PMD_DRV_LOG(ERR, "Hwdev or temp is NULL"); + return -EINVAL; + } + + memset(&temp_key, 0, sizeof(temp_key)); + temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + temp_key.func_id = hinic_global_func_id(hwdev); + temp_key.template_id = (u8)tmpl_idx; + memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x", + err, temp_key.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp) +{ + struct hinic_rss_template_key temp_key; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) { + PMD_DRV_LOG(ERR, "Hwdev or temp is NULL"); + return -EINVAL; + } + + memset(&temp_key, 0, sizeof(temp_key)); + temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + temp_key.func_id = hinic_global_func_id(hwdev); + temp_key.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x", + err, temp_key.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE); + + return 0; +} + +/** + * hinic_rss_set_hash_engine - Init rss hash function. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from NIC. + * @param type + * Hash function, such as Toeplitz or XOR. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type) +{ + struct hinic_rss_engine_type hash_type; + u16 out_size = sizeof(hash_type); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&hash_type, 0, sizeof(hash_type)); + hash_type.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + hash_type.func_id = hinic_global_func_id(hwdev); + hash_type.hash_engine = type; + hash_type.template_id = tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x", + err, hash_type.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table) +{ + struct nic_rss_indirect_tbl *indir_tbl; + struct hinic_cmd_buf *cmd_buf; + int i; + u32 *temp; + u32 indir_size; + u64 out_param; + int err; + + if (!hwdev || !indir_table) { + PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL"); + return -EINVAL; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate cmd buf"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + indir_tbl = cmd_buf->buf; + indir_tbl->group_index = cpu_to_be32(tmpl_idx); + + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + indir_tbl->entry[i] = (u8)(*(indir_table + i)); + + if (0x3 == (i & 0x3)) { + temp = (u32 *)&indir_tbl->entry[i - 3]; + *temp = cpu_to_be32(*temp); + } + } + + /* configure the rss indirect table by command queue */ + indir_size = HINIC_RSS_INDIR_SIZE / 2; + indir_tbl->offset = 0; + indir_tbl->size = cpu_to_be32(indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + PMD_DRV_LOG(ERR, "Failed to set rss indir table"); + err = -EFAULT; + goto free_buf; + } + + indir_tbl->offset = cpu_to_be32(indir_size); + indir_tbl->size = cpu_to_be32(indir_size); + memcpy(indir_tbl->entry, &indir_tbl->entry[indir_size], indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + PMD_DRV_LOG(ERR, "Failed to set rss indir table"); + err = -EFAULT; + } + +free_buf: + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table) +{ + struct hinic_rss_indir_table rss_cfg; + u16 out_size = sizeof(rss_cfg); + int err = 0, i; + + if (!hwdev || !indir_table) { + PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL"); + return -EINVAL; + } + + memset(&rss_cfg, 0, sizeof(rss_cfg)); + rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + rss_cfg.func_id = hinic_global_func_id(hwdev); + rss_cfg.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + &rss_cfg, sizeof(rss_cfg), &rss_cfg, + &out_size); + if (err || !out_size || rss_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x", + err, rss_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE); + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir_table[i] = rss_cfg.indir[i]; + + return 0; +} + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc) +{ + struct hinic_rss_config rss_cfg; + u16 out_size = sizeof(rss_cfg); + int err; + + /* micro code required: number of TC should be power of 2 */ + if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) { + PMD_DRV_LOG(ERR, "Hwdev or prio_tc is NULL, or tc_num: %u Not power of 2", + tc_num); + return -EINVAL; + } + + memset(&rss_cfg, 0, sizeof(rss_cfg)); + rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + rss_cfg.func_id = hinic_global_func_id(hwdev); + rss_cfg.rss_en = rss_en; + rss_cfg.template_id = tmpl_idx; + rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; + + memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG, + &rss_cfg, sizeof(rss_cfg), &rss_cfg, + &out_size); + if (err || !out_size || rss_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x", + err, rss_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_rss_template_alloc - Get rss template id from the chip, + * all functions share 96 templates. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from chip. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx) +{ + struct hinic_rss_template_mgmt template_mgmt; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev || !tmpl_idx) { + PMD_DRV_LOG(ERR, "Hwdev or tmpl_idx is NULL"); + return -EINVAL; + } + + memset(&template_mgmt, 0, sizeof(template_mgmt)); + template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + template_mgmt.func_id = hinic_global_func_id(hwdev); + template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x", + err, template_mgmt.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + *tmpl_idx = template_mgmt.template_id; + + return 0; +} + +/** + * hinic_rss_template_free - Free rss template id to the chip. + * + * @param hwdev + * The hardware interface of a nic device. + * @param tmpl_idx + * Index of rss template from chip. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx) +{ + struct hinic_rss_template_mgmt template_mgmt; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&template_mgmt, 0, sizeof(template_mgmt)); + template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + template_mgmt.func_id = hinic_global_func_id(hwdev); + template_mgmt.template_id = tmpl_idx; + template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x", + err, template_mgmt.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_set_rx_vhd_mode - Change rx buffer size after initialization. + * + * @param hwdev + * The hardware interface of a nic device. + * @param vhd_mode + * Not needed. + * @param rx_buf_sz + * receive buffer size. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz) +{ + struct hinic_set_vhd_mode vhd_mode_cfg; + u16 out_size = sizeof(vhd_mode_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&vhd_mode_cfg, 0, sizeof(vhd_mode_cfg)); + + vhd_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + vhd_mode_cfg.func_id = hinic_global_func_id(hwdev); + vhd_mode_cfg.vhd_type = vhd_mode; + vhd_mode_cfg.rx_wqe_buffer_size = rx_buf_sz; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VHD_CFG, + &vhd_mode_cfg, sizeof(vhd_mode_cfg), + &vhd_mode_cfg, &out_size); + if (err || !out_size || vhd_mode_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to set vhd mode, err: %d, status: 0x%x, out size: 0x%x", + err, vhd_mode_cfg.mgmt_msg_head.status, out_size); + + return -EIO; + } + + return 0; +} + +int hinic_set_rx_mode(void *hwdev, u32 enable) +{ + struct hinic_rx_mode_config rx_mode_cfg; + u16 out_size = sizeof(rx_mode_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); + rx_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + rx_mode_cfg.func_id = hinic_global_func_id(hwdev); + rx_mode_cfg.rx_mode = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE, + &rx_mode_cfg, sizeof(rx_mode_cfg), + &rx_mode_cfg, &out_size); + if (err || !out_size || rx_mode_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x", + err, rx_mode_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_get_mgmt_version - Get mgmt module version from chip. + * + * @param hwdev + * The hardware interface of a nic device. + * @param fw + * Firmware version. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_get_mgmt_version(void *hwdev, char *fw) +{ + struct hinic_version_info fw_ver; + u16 out_size = sizeof(fw_ver); + int err; + + if (!hwdev || !fw) { + PMD_DRV_LOG(ERR, "Hwdev or fw is NULL"); + return -EINVAL; + } + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size); + if (err || !out_size || fw_ver.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, fw_ver.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + snprintf(fw, HINIC_MGMT_VERSION_MAX_LEN, "%s", fw_ver.ver); + + return 0; +} + +int hinic_set_rx_csum_offload(void *hwdev, u32 en) +{ + struct hinic_checksum_offload rx_csum_cfg; + u16 out_size = sizeof(rx_csum_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&rx_csum_cfg, 0, sizeof(rx_csum_cfg)); + rx_csum_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + rx_csum_cfg.func_id = hinic_global_func_id(hwdev); + rx_csum_cfg.rx_csum_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, + &rx_csum_cfg, sizeof(rx_csum_cfg), + &rx_csum_cfg, &out_size); + if (err || !out_size || rx_csum_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x", + err, rx_csum_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num) +{ + struct hinic_lro_config lro_cfg; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&lro_cfg, 0, sizeof(lro_cfg)); + lro_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + lro_cfg.func_id = hinic_global_func_id(hwdev); + lro_cfg.lro_ipv4_en = ipv4_en; + lro_cfg.lro_ipv6_en = ipv6_en; + lro_cfg.lro_max_wqe_num = max_wqe_num; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO, + &lro_cfg, sizeof(lro_cfg), &lro_cfg, + &out_size); + if (err || !out_size || lro_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x", + err, lro_cfg.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_anti_attack(void *hwdev, bool enable) +{ + struct hinic_port_anti_attack_rate rate; + u16 out_size = sizeof(rate); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&rate, 0, sizeof(rate)); + rate.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + rate.func_id = hinic_global_func_id(hwdev); + rate.enable = enable; + rate.cir = ANTI_ATTACK_DEFAULT_CIR; + rate.xir = ANTI_ATTACK_DEFAULT_XIR; + rate.cbs = ANTI_ATTACK_DEFAULT_CBS; + rate.xbs = ANTI_ATTACK_DEFAULT_XBS; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, + &rate, sizeof(rate), &rate, + &out_size); + if (err || !out_size || rate.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Can't %s port Anti-Attack rate limit, err: %d, status: 0x%x, out size: 0x%x", + (enable ? "enable" : "disable"), err, + rate.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/* Set autoneg status and restart port link status */ +int hinic_reset_port_link_cfg(void *hwdev) +{ + struct hinic_reset_link_cfg reset_cfg; + u16 out_size = sizeof(reset_cfg); + int err; + + memset(&reset_cfg, 0, sizeof(reset_cfg)); + reset_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + reset_cfg.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG, + &reset_cfg, sizeof(reset_cfg), + &reset_cfg, &out_size); + if (err || !out_size || reset_cfg.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Reset port link configure failed, err: %d, status: 0x%x, out size: 0x%x", + err, reset_cfg.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_vf_func_init - Register VF to PF. + * + * @param hwdev + * The hardware interface of a nic device. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_vf_func_init(struct hinic_hwdev *hwdev) +{ + int err, state = 0; + + if (!HINIC_IS_VF(hwdev)) + return 0; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_REGISTER, &state, sizeof(state), + NULL, NULL, 0); + if (err) { + PMD_DRV_LOG(ERR, "Fail to register vf"); + return err; + } + + return 0; +} + +/** + * hinic_vf_func_free - Unregister VF from PF. + * + * @param hwdev + * The hardware interface of a nic device. + */ +void hinic_vf_func_free(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) != TYPE_VF) + return; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_UNREGISTER, &err, sizeof(err), + NULL, NULL, 0); + if (err) + PMD_DRV_LOG(ERR, "Fail to unregister VF, err: %d", err); +} + +int hinic_set_fast_recycle_mode(void *hwdev, u8 mode) +{ + struct hinic_fast_recycled_mode fast_recycled_mode; + u16 out_size = sizeof(fast_recycled_mode); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&fast_recycled_mode, 0, sizeof(fast_recycled_mode)); + fast_recycled_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + fast_recycled_mode.func_id = hinic_global_func_id(hwdev); + fast_recycled_mode.fast_recycled_mode = mode; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, + &fast_recycled_mode, + sizeof(fast_recycled_mode), + &fast_recycled_mode, &out_size, 0); + if (err || fast_recycled_mode.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, "Failed to set recycle mode, ret: %d", + fast_recycled_mode.mgmt_msg_head.status); + return -EFAULT; + } + + return 0; +} + +int hinic_clear_vport_stats(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_vport_stats clear_vport_stats; + u16 out_size = sizeof(clear_vport_stats); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&clear_vport_stats, 0, sizeof(clear_vport_stats)); + clear_vport_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + clear_vport_stats.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAN_VPORT_STAT, + &clear_vport_stats, + sizeof(clear_vport_stats), + &clear_vport_stats, &out_size); + if (err || !out_size || clear_vport_stats.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to clear vport statistics, err: %d, status: 0x%x, out size: 0x%x", + err, clear_vport_stats.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_port_stats clear_phy_port_stats; + u16 out_size = sizeof(clear_phy_port_stats); + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&clear_phy_port_stats, 0, sizeof(clear_phy_port_stats)); + clear_phy_port_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + clear_phy_port_stats.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_CLEAR_PORT_STATISTICS, + &clear_phy_port_stats, + sizeof(clear_phy_port_stats), + &clear_phy_port_stats, &out_size); + if (err || !out_size || clear_phy_port_stats.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to clear phy port statistics, err: %d, status: 0x%x, out size: 0x%x", + err, clear_phy_port_stats.mgmt_msg_head.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status) +{ + struct hinic_set_link_follow follow; + u16 out_size = sizeof(follow); + int err; + + if (!hwdev) + return -EINVAL; + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) + return 0; + + if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) { + PMD_DRV_LOG(ERR, "Invalid link follow status: %d", status); + return -EINVAL; + } + + memset(&follow, 0, sizeof(follow)); + follow.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + follow.func_id = hinic_global_func_id(hwdev); + follow.follow_status = status; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW, + &follow, sizeof(follow), + &follow, &out_size); + if ((follow.mgmt_msg_head.status != HINIC_MGMT_CMD_UNSUPPORTED && + follow.mgmt_msg_head.status) || err || !out_size) { + PMD_DRV_LOG(ERR, + "Failed to set link status follow phy port status, err: %d, status: 0x%x, out size: 0x%x", + err, follow.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return follow.mgmt_msg_head.status; +} + +int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised) +{ + struct hinic_link_mode_cmd link_mode; + u16 out_size = sizeof(link_mode); + int err; + + if (!hwdev || !supported || !advertised) + return -EINVAL; + + memset(&link_mode, 0, sizeof(link_mode)); + link_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + link_mode.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE, + &link_mode, sizeof(link_mode), + &link_mode, &out_size); + if (err || !out_size || link_mode.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x", + err, link_mode.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + *supported = link_mode.supported; + *advertised = link_mode.advertised; + + return 0; +} + +/** + * hinic_set_xsfp_tx_status - Enable or disable the fiber in + * tx direction when set link up or down. + * + * @param hwdev + * The hardware interface of a nic device. + * @param enable + * Enable or Disable. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_set_xsfp_tx_status(void *hwdev, bool enable) +{ + struct hinic_set_xsfp_status xsfp_status; + u16 out_size = sizeof(struct hinic_set_xsfp_status); + int err; + + memset(&xsfp_status, 0, sizeof(xsfp_status)); + xsfp_status.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + xsfp_status.port_id = hinic_global_func_id(hwdev); + xsfp_status.xsfp_tx_dis = ((enable == 0) ? 1 : 0); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_XSFP_STATUS, + &xsfp_status, sizeof(struct hinic_set_xsfp_status), + &xsfp_status, &out_size); + if (err || !out_size || xsfp_status.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Failed to %s port xsfp status, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "Disable" : "Enable", err, + xsfp_status.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport + * fake failed when device start. + * + * @param hwdev + * The hardware interface of a nic device. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_flush_qp_res(void *hwdev) +{ + struct hinic_clear_qp_resource qp_res; + u16 out_size = sizeof(qp_res); + int err; + + memset(&qp_res, 0, sizeof(qp_res)); + qp_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + qp_res.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_QP_RES, + &qp_res, sizeof(qp_res), &qp_res, + &out_size); + if (err || !out_size || qp_res.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x", + err, qp_res.mgmt_msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +/** + * hinic_vf_get_default_cos - Get default cos of VF. + * + * @param hwdev + * The hardware interface of a nic device. + * @param cos_id + * Cos value. + * + * @return + * 0 on success. + * negative error value otherwise. + */ +int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) +{ + struct hinic_vf_default_cos vf_cos; + u16 out_size = sizeof(vf_cos); + int err; + + memset(&vf_cos, 0, sizeof(vf_cos)); + vf_cos.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_VF_COS, &vf_cos, + sizeof(vf_cos), &vf_cos, + &out_size, 0); + if (err || !out_size || vf_cos.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d, status: 0x%x, out size: 0x%x", + err, vf_cos.mgmt_msg_head.status, out_size); + return -EFAULT; + } + *cos_id = vf_cos.state.default_cos; + + return 0; +} + +/** + * hinic_set_fdir_filter - Set fdir filter for control path + * packet to notify firmware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param filter_type + * Packet type to filter. + * @param qid + * Rx qid to filter. + * @param type_enable + * The status of pkt type filter. + * @param enable + * Fdir function Enable or Disable. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, u8 type_enable, + bool enable) +{ + struct hinic_port_qfilter_info port_filer_cmd; + u16 out_size = sizeof(port_filer_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_filer_cmd, 0, sizeof(port_filer_cmd)); + port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_filer_cmd.func_id = hinic_global_func_id(hwdev); + port_filer_cmd.filter_enable = (u8)enable; + port_filer_cmd.filter_type = filter_type; + port_filer_cmd.qid = qid; + port_filer_cmd.filter_type_enable = type_enable; + port_filer_cmd.fdir_flag = 0; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER, + &port_filer_cmd, sizeof(port_filer_cmd), + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set port Q filter failed, err: %d, status: 0x%x, out size: 0x%x, type: 0x%x," + " enable: 0x%x, qid: 0x%x, filter_type_enable: 0x%x\n", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + filter_type, enable, qid, type_enable); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_set_normal_filter - Set fdir filter for IO path packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param qid + * Rx qid to filter. + * @param normal_type_enable + * IO path packet function Enable or Disable + * @param key + * IO path packet filter key value, such as DIP from pkt. + * @param enable + * Fdir function Enable or Disable. + * @param flag + * Filter flag, such as dip or others. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable, + u32 key, bool enable, u8 flag) +{ + struct hinic_port_qfilter_info port_filer_cmd; + u16 out_size = sizeof(port_filer_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_filer_cmd, 0, sizeof(port_filer_cmd)); + port_filer_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_filer_cmd.func_id = hinic_global_func_id(hwdev); + port_filer_cmd.filter_enable = (u8)enable; + port_filer_cmd.qid = qid; + port_filer_cmd.normal_type_enable = normal_type_enable; + port_filer_cmd.fdir_flag = flag; /* fdir flag: support dip */ + port_filer_cmd.key = key; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_Q_FILTER, + &port_filer_cmd, sizeof(port_filer_cmd), + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set normal filter failed, err: %d, status: 0x%x, out size: 0x%x, fdir_flag: 0x%x," + " enable: 0x%x, qid: 0x%x, normal_type_enable: 0x%x, key:0x%x\n", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + flag, enable, qid, normal_type_enable, key); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_set_fdir_tcam - Set fdir filter for control packet + * by tcam table to notify hardware. + * + * @param hwdev + * The hardware interface of a nic device. + * @param type_mask + * Index of TCAM. + * @param filter_rule + * TCAM rule for control packet, such as lacp or bgp. + * @param filter_action + * TCAM action for control packet, such as accept or drop. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_set_fdir_tcam(void *hwdev, u16 type_mask, + struct tag_pa_rule *filter_rule, + struct tag_pa_action *filter_action) +{ + struct hinic_fdir_tcam_info port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_tcam_cmd.tcam_index = type_mask; + port_tcam_cmd.flag = TCAM_SET; + memcpy((void *)&port_tcam_cmd.filter_rule, + (void *)filter_rule, sizeof(struct tag_pa_rule)); + memcpy((void *)&port_tcam_cmd.filter_action, + (void *)filter_action, sizeof(struct tag_pa_action)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set tcam table failed, err: %d, status: 0x%x, out size: 0x%x", + err, port_tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_clear_fdir_tcam - Clear fdir filter TCAM table for control packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param type_mask + * Index of TCAM. + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask) +{ + struct hinic_fdir_tcam_info port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + port_tcam_cmd.tcam_index = type_mask; + port_tcam_cmd.flag = TCAM_CLEAR; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_TCAM_FILTER, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err || !out_size || port_tcam_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Clear tcam table failed, err: %d, status: 0x%x, out size: 0x%x", + err, port_tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule) +{ + u16 out_size = sizeof(struct tag_fdir_add_rule_cmd); + struct tag_fdir_add_rule_cmd tcam_cmd; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + if (tcam_rule->index >= HINIC_MAX_TCAM_RULES_NUM) { + PMD_DRV_LOG(ERR, "Tcam rules num to add is invalid"); + return -EFAULT; + } + + memset(&tcam_cmd, 0, sizeof(struct tag_fdir_add_rule_cmd)); + tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule, + sizeof(struct tag_tcam_cfg_rule)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_ADD_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err || tcam_cmd.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, + "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x", + err, tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_del_tcam_rule(void *hwdev, u32 index) +{ + u16 out_size = sizeof(struct tag_fdir_del_rule_cmd); + struct tag_fdir_del_rule_cmd tcam_cmd; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + if (index >= HINIC_MAX_TCAM_RULES_NUM) { + PMD_DRV_LOG(ERR, "Tcam rules num to del is invalid"); + return -EFAULT; + } + + memset(&tcam_cmd, 0, sizeof(struct tag_fdir_del_rule_cmd)); + tcam_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + tcam_cmd.index_start = index; + tcam_cmd.index_num = 1; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_DEL_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err || tcam_cmd.mgmt_msg_head.status || !out_size) { + PMD_DRV_LOG(ERR, + "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x", + err, tcam_cmd.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int hinic_mgmt_tcam_block(void *hwdev, u8 alloc_en, + u8 block_type, u16 *index) +{ + struct hinic_cmd_ctrl_tcam_block tcam_block_info; + u16 out_size = sizeof(struct hinic_cmd_ctrl_tcam_block); + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&tcam_block_info, 0, sizeof(struct hinic_cmd_ctrl_tcam_block)); + tcam_block_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + tcam_block_info.func_id = hinic_global_func_id(hwdev); + tcam_block_info.alloc_en = alloc_en; + tcam_block_info.tcam_type = block_type; + tcam_block_info.tcam_block_index = *index; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, + &tcam_block_info, sizeof(tcam_block_info), + &tcam_block_info, &out_size); + if (tcam_block_info.mgmt_msg_head.status == + HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + PMD_DRV_LOG(INFO, "Firmware/uP doesn't support alloc or del tcam block"); + return err; + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + (HINIC_IS_VF(nic_hwdev))) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + PMD_DRV_LOG(INFO, "VF doesn't support alloc and del tcam block."); + return err; + } else if (err || (!out_size) || tcam_block_info.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x", + err, tcam_block_info.mgmt_msg_head.status, out_size); + return -EFAULT; + } + + if (alloc_en) + *index = tcam_block_info.tcam_block_index; + + return 0; +} + +int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index) +{ + return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_ENABLE, + block_type, index); +} + +int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index) +{ + return hinic_mgmt_tcam_block(hwdev, HINIC_TCAM_BLOCK_DISABLE, + block_type, index); +} + +int hinic_flush_tcam_rule(void *hwdev) +{ + struct hinic_cmd_flush_tcam_rules tcam_flush; + u16 out_size = sizeof(struct hinic_cmd_flush_tcam_rules); + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + int err; + + if (!hwdev) { + PMD_DRV_LOG(ERR, "Hwdev is NULL"); + return -EINVAL; + } + + memset(&tcam_flush, 0, sizeof(struct hinic_cmd_flush_tcam_rules)); + tcam_flush.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + tcam_flush.func_id = hinic_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, + &tcam_flush, sizeof(struct hinic_cmd_flush_tcam_rules), + &tcam_flush, &out_size); + if (tcam_flush.mgmt_msg_head.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + PMD_DRV_LOG(INFO, "Firmware/uP doesn't support flush tcam fdir"); + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + (HINIC_IS_VF(nic_hwdev))) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + PMD_DRV_LOG(INFO, "VF doesn't support flush tcam fdir"); + } else if (err || (!out_size) || tcam_flush.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, + "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x", + err, tcam_flush.mgmt_msg_head.status, out_size); + err = -EFAULT; + } + + return err; +} + diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h new file mode 100644 index 000000000..846b5973e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.h @@ -0,0 +1,944 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_NICCFG_H_ +#define _HINIC_PMD_NICCFG_H_ + +#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define HINIC_VLAN_PRIORITY_SHIFT 13 + +#define HINIC_RSS_INDIR_SIZE 256 +#define HINIC_DCB_TC_MAX 0x8 +#define HINIC_DCB_UP_MAX 0x8 +#define HINIC_DCB_PG_MAX 0x8 +#define HINIC_RSS_KEY_SIZE 40 + +#define HINIC_MAX_NUM_RQ 64 + +#define ANTI_ATTACK_DEFAULT_CIR 500000 +#define ANTI_ATTACK_DEFAULT_XIR 600000 +#define ANTI_ATTACK_DEFAULT_CBS 10000000 +#define ANTI_ATTACK_DEFAULT_XBS 12000000 + +#define NIC_RSS_INDIR_SIZE 256 +#define NIC_RSS_KEY_SIZE 40 +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 +#define NIC_DCB_UP_MAX 0x8 + +enum hinic_rss_hash_type { + HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC_RSS_HASH_ENGINE_TYPE_TOEP, + + HINIC_RSS_HASH_ENGINE_TYPE_MAX, +}; + +struct nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +enum nic_speed_level { + LINK_SPEED_10MB = 0, + LINK_SPEED_100MB, + LINK_SPEED_1GB, + LINK_SPEED_10GB, + LINK_SPEED_25GB, + LINK_SPEED_40GB, + LINK_SPEED_100GB, + LINK_SPEED_MAX +}; + +enum hinic_link_status { + HINIC_LINK_DOWN = 0, + HINIC_LINK_UP +}; + +struct hinic_up_ets_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 port_id; + u8 rsvd1[3]; + u8 up_tc[HINIC_DCB_UP_MAX]; + u8 pg_bw[HINIC_DCB_PG_MAX]; + u8 pgid[HINIC_DCB_UP_MAX]; + u8 up_bw[HINIC_DCB_UP_MAX]; + u8 prio[HINIC_DCB_PG_MAX]; +}; + +struct nic_pause_config { + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +enum hinic_rx_mod { + HINIC_RX_MODE_UC = 1 << 0, + HINIC_RX_MODE_MC = 1 << 1, + HINIC_RX_MODE_BC = 1 << 2, + HINIC_RX_MODE_MC_ALL = 1 << 3, + HINIC_RX_MODE_PROMISC = 1 << 4, +}; + +enum hinic_link_mode { + HINIC_10GE_BASE_KR = 0, + HINIC_40GE_BASE_KR4 = 1, + HINIC_40GE_BASE_CR4 = 2, + HINIC_100GE_BASE_KR4 = 3, + HINIC_100GE_BASE_CR4 = 4, + HINIC_25GE_BASE_KR_S = 5, + HINIC_25GE_BASE_CR_S = 6, + HINIC_25GE_BASE_KR = 7, + HINIC_25GE_BASE_CR = 8, + HINIC_GE_BASE_KX = 9, + HINIC_LINK_MODE_NUMBERS, + + HINIC_SUPPORTED_UNKNOWN = 0xFFFF, +}; + +#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \ + HINIC_RX_MODE_BC) + +#define HINIC_MAX_MTU_SIZE (9600) +#define HINIC_MIN_MTU_SIZE (256) + +/* MIN_MTU + ETH_HLEN + CRC (256+14+4) */ +#define HINIC_MIN_FRAME_SIZE 274 + +/* MAX_MTU + ETH_HLEN + CRC + VLAN(9600+14+4+4) */ +#define HINIC_MAX_JUMBO_FRAME_SIZE (9622) + +#define HINIC_PORT_DISABLE 0x0 +#define HINIC_PORT_ENABLE 0x3 + +struct hinic_vport_stats { + u64 tx_unicast_pkts_vport; + u64 tx_unicast_bytes_vport; + u64 tx_multicast_pkts_vport; + u64 tx_multicast_bytes_vport; + u64 tx_broadcast_pkts_vport; + u64 tx_broadcast_bytes_vport; + + u64 rx_unicast_pkts_vport; + u64 rx_unicast_bytes_vport; + u64 rx_multicast_pkts_vport; + u64 rx_multicast_bytes_vport; + u64 rx_broadcast_pkts_vport; + u64 rx_broadcast_bytes_vport; + + u64 tx_discard_vport; + u64 rx_discard_vport; + u64 tx_err_vport; + u64 rx_err_vport; /* rx checksum err pkts in ucode */ +}; + +struct hinic_phy_port_stats { + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + + u64 mac_rx_mac_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_mac_control_pkt_num; + u64 mac_rx_y1731_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_trans_jabber_pkt_num; + + u64 mac_tx_mac_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_mac_control_pkt_num; + u64 mac_tx_y1731_pkt_num; + u64 mac_tx_1588_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 rx_higig2_ext_pkts_port; + u64 rx_higig2_message_pkts_port; + u64 rx_higig2_error_pkts_port; + u64 rx_higig2_cpu_ctrl_pkts_port; + u64 rx_higig2_unicast_pkts_port; + u64 rx_higig2_broadcast_pkts_port; + u64 rx_higig2_l2_multicast_pkts; + u64 rx_higig2_l3_multicast_pkts; + + u64 tx_higig2_message_pkts_port; + u64 tx_higig2_ext_pkts_port; + u64 tx_higig2_cpu_ctrl_pkts_port; + u64 tx_higig2_unicast_pkts_port; + u64 tx_higig2_broadcast_pkts_port; + u64 tx_higig2_l2_multicast_pkts; + u64 tx_higig2_l3_multicast_pkts; +}; + +enum hinic_link_follow_status { + HINIC_LINK_FOLLOW_DEFAULT, + HINIC_LINK_FOLLOW_PORT, + HINIC_LINK_FOLLOW_SEPARATE, + HINIC_LINK_FOLLOW_STATUS_MAX, +}; + +#define HINIC_PORT_STATS_VERSION 0 +struct hinic_port_stats_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_phy_port_stats stats; +}; + +struct hinic_cmd_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_vport_stats stats; +}; + +struct hinic_clear_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_clear_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_fast_recycled_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + /* + * 1: enable fast recycle, available in dpdk mode, + * 0: normal mode, available in kernel nic mode + */ + u8 fast_recycled_mode; + u8 rsvd1; +}; + +struct hinic_function_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rx_wqe_buf_size; + u32 mtu; +}; + +struct hinic_cmd_qpn { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 base_qpn; +}; + +struct hinic_port_mac_set { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct hinic_port_mac_update { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct hinic_vport_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 state; + u8 rsvd1[3]; +}; + +struct hinic_mtu { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_vlan_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_vlan_filter { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_vlan_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + +struct hinic_get_link { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link_status; + u8 rsvd1; +}; + +#define HINIC_DEFAUT_PAUSE_CONFIG 1 +struct hinic_pause_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct hinic_port_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 resv2[3]; +}; + +struct hinic_tso_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + +struct hinic_lro_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 resv2[13]; +}; + +struct hinic_checksum_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; + +struct hinic_rx_mode_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +#define HINIC_MGMT_VERSION_MAX_LEN 32 +#define HINIC_COMPILE_TIME_LEN 20 +#define HINIC_FW_VERSION_NAME 16 + +struct hinic_version_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u8 entry[NIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u32 ctx; +}; + +struct hinic_rss_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rss_en; + u8 template_id; + u8 rq_priority_number; + u8 rsvd1[3]; + u8 prio_tc[NIC_DCB_UP_MAX]; +}; + +struct hinic_rss_template_mgmt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +struct hinic_rss_indir_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 indir[NIC_RSS_INDIR_SIZE]; +}; + +struct hinic_rss_template_key { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 key[NIC_RSS_KEY_SIZE]; +}; + +struct hinic_rss_engine_type { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct hinic_rss_context_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u32 context; +}; + +struct hinic_reset_link_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_set_vhd_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vhd_type; + u16 rx_wqe_buffer_size; + u16 rsvd; +}; + +struct hinic_set_link_follow { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd0; + u8 follow_status; + u8 rsvd1[3]; +}; + +struct hinic_link_mode_cmd { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u16 supported; /* 0xFFFF represent Invalid value */ + u16 advertised; +}; + +struct hinic_set_xsfp_status { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u32 port_id; + u32 xsfp_tx_dis; /* 0: tx enable; 1: tx disable */ +}; + +struct hinic_clear_qp_resource { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +struct hinic_vf_default_cos { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_dcb_state state; +}; + +/* set physical port Anti-Attack rate */ +struct hinic_port_anti_attack_rate { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */ + u32 cir; /* Committed Information Rate */ + u32 xir; /* eXtended Information Rate */ + u32 cbs; /* Committed Burst Size */ + u32 xbs; /* eXtended Burst Size */ +}; + +struct pa_u8_s { + u8 val8; + u8 mask8; +}; + +struct pa_u16_s { + u16 val16; + u16 mask16; +}; + +struct pa_u32_s { + u32 val32; + u32 mask32; +}; + +struct pa_u48_s { + u8 val8[6]; + u8 mask8[6]; +}; + +struct pa_u64_s { + u8 val8[8]; + u8 mask8[8]; +}; + +struct tag_pa_eth_ip_header { + struct pa_u8_s ip_ver; /* 3bit */ + struct pa_u8_s ipv4_option_flag; /* 1bit */ + /* 8bit ipv4 option or ipv6 next header */ + struct pa_u8_s protocol; + struct pa_u8_s dscp; /* 6bit DSCP */ +}; + +struct tag_pa_common_l2_header { + struct pa_u48_s dmac; /* dmac 48bit */ + struct pa_u16_s eth_type; /* ethernet type/length 16bit */ + struct pa_u8_s tag_flag; /* tag flag: 4bit */ + struct pa_u8_s np2np_hdr_qindex; /* NP2NP Header Qindex 4bit */ + struct pa_u8_s e_tag_pcp; /* 3bit */ + struct pa_u8_s vlan_layer; /* 2bit */ + struct pa_u8_s s_tag; /* 3bit */ + struct pa_u8_s c_tag; /* 3bit */ + struct pa_u16_s vlan_id; /* 12bit */ +}; + +struct tag_pa_tcp { + struct pa_u16_s sport; /* 16bit */ + struct pa_u16_s dport; /* 16bit */ + struct pa_u16_s tcp_flag; /* 6bit */ +}; + +struct tag_pa_udp { + struct pa_u16_s sport; /* 16bit */ + struct pa_u16_s dport; /* 16bit */ + /* 8bit : + * 1.udp dport=67/68 && ipv4 protocol=0x11 + * 2.udp dport=546/547 && ipv6 next header=0x11 + * 3. do not care + */ + struct pa_u8_s dhcp_op_or_msg_type; +}; + +/* ICMP: + * ipv4 protocol = 0x1 + * ipv6 next header = 0x3A + */ +struct tag_pa_icmp { + struct pa_u8_s type; /* 8bit */ + struct pa_u8_s code; /* 8bit */ +}; + +/* IGMP: + * ipv4 protocol = 0x2 + */ +struct tag_pa_ipv4_igmp { + struct pa_u32_s dip; /* 32bit */ + struct pa_u8_s type; /* 8bit */ +}; + +struct tag_pa_rule { + struct pa_u8_s ncsi_flag; /* 1bit valid */ + struct tag_pa_common_l2_header l2_header; + + u8 eth_type; + + struct pa_u64_s eth_other; /* eth_type=other 64bit */ + struct pa_u8_s eth_roce_opcode; /* eth_type=roce 8bit opcode */ + + struct tag_pa_eth_ip_header ip_header; /* eth_type=ip */ + + u8 ip_protocol_type; + + struct tag_pa_tcp eth_ip_tcp; /* eth_type=ip && ip_protocol = tcp */ + struct tag_pa_udp eth_ip_udp; /* eth_type=ip && ip_protocol = udp */ + struct tag_pa_icmp eth_ip_icmp; /* eth_type=ip && ip_protocol = icmp */ + + /* eth_type=ip && ip_protocol = ipv4_igmp */ + struct tag_pa_ipv4_igmp eth_ipv4_igmp; + + /* eth_type=ip && ip_protocol = sctp; + * 16bit ipv4 protocol=0x84 or ipv6 nhr=0x84 + */ + struct pa_u16_s eth_ip_sctp; +}; + +struct tag_pa_action { + u16 pkt_type; + u8 err_type; + u8 pri; + u8 fwd_action; + u8 push_len; +}; + +struct hinic_fdir_tcam_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 tcam_index; + u8 flag; /* clear or set tcam table flag */ + u8 rsvd1; + struct tag_pa_rule filter_rule; + struct tag_pa_action filter_action; +}; + +#define TCAM_SET 0x1 +#define TCAM_CLEAR 0x2 + +struct hinic_port_qfilter_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 normal_type_enable; + u8 filter_type_enable; + u8 filter_enable; + u8 filter_type; + u8 qid; + u8 fdir_flag; + u32 key; +}; + +#define HINIC_MAX_TCAM_RULES_NUM (10240) +#define HINIC_TCAM_BLOCK_ENABLE 1 +#define HINIC_TCAM_BLOCK_DISABLE 0 + +struct tag_tcam_result { + u32 qid; + u32 rsvd; +}; + +#define TCAM_FLOW_KEY_SIZE 24 + +struct tag_tcam_key_x_y { + u8 x[TCAM_FLOW_KEY_SIZE]; + u8 y[TCAM_FLOW_KEY_SIZE]; +}; + +struct tag_tcam_cfg_rule { + u32 index; + struct tag_tcam_result data; + struct tag_tcam_key_x_y key; +}; + +struct tag_fdir_add_rule_cmd { + struct hinic_mgmt_msg_head mgmt_msg_head; + struct tag_tcam_cfg_rule rule; +}; + +struct tag_fdir_del_rule_cmd { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u32 index_start; + u32 index_num; +}; + +struct hinic_cmd_flush_tcam_rules { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; +}; + +struct hinic_cmd_ctrl_tcam_block { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 alloc_en; /* 0: free tcam block, 1: alloc tcam block */ + /* + * 0: alloc 1k size tcam block, + * 1: alloc 128 size tcam block, others rsvd + */ + u8 tcam_type; + u16 tcam_block_index; + u16 rsvd; +}; + +int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id); + +int hinic_get_default_mac(void *hwdev, u8 *mac_addr); + +int hinic_set_port_mtu(void *hwdev, u32 new_mtu); + +int hinic_add_remove_vlan(void *hwdev, u16 vlan_id, u16 func_id, bool add); + +int hinic_config_vlan_filter(void *hwdev, u32 vlan_filter_ctrl); + +int hinic_set_rx_vlan_offload(void *hwdev, u8 en); + +int hinic_set_vport_enable(void *hwdev, bool enable); + +int hinic_set_port_enable(void *hwdev, bool enable); + +int hinic_get_link_status(void *hwdev, u8 *link_state); + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info); + +int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz); + +int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause); + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic_reset_port_link_cfg(void *hwdev); + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw, + u8 *prio); + +int hinic_set_anti_attack(void *hwdev, bool enable); + +/* offload feature */ +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num); + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats); + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats); + +/* rss */ +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, + struct nic_rss_type rss_type); + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, + struct nic_rss_type *rss_type); + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp); + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp); + +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type); + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table); + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table); + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc); + +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx); + +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx); + +int hinic_set_rx_mode(void *hwdev, u32 enable); + +int hinic_get_mgmt_version(void *hwdev, char *fw); + +int hinic_set_rx_csum_offload(void *hwdev, u32 en); + +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status); + +int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised); + +int hinic_set_xsfp_tx_status(void *hwdev, bool enable); + +int hinic_flush_qp_res(void *hwdev); + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); + +int hinic_set_fast_recycle_mode(void *hwdev, u8 mode); + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn); + +int hinic_clear_vport_stats(struct hinic_hwdev *hwdev); + +int hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev); + +int hinic_vf_func_init(struct hinic_hwdev *hwdev); + +void hinic_vf_func_free(struct hinic_hwdev *hwdev); + +int hinic_vf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id); + +int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, + u8 type_enable, bool enable); + +int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable, + u32 key, bool enable, u8 flag); + +int hinic_set_fdir_tcam(void *hwdev, u16 type_mask, + struct tag_pa_rule *filter_rule, struct tag_pa_action *filter_action); + +int hinic_clear_fdir_tcam(void *hwdev, u16 type_mask); + +int hinic_add_tcam_rule(void *hwdev, struct tag_tcam_cfg_rule *tcam_rule); + +int hinic_del_tcam_rule(void *hwdev, u32 index); + +int hinic_alloc_tcam_block(void *hwdev, u8 block_type, u16 *index); + +int hinic_free_tcam_block(void *hwdev, u8 block_type, u16 *index); + +int hinic_flush_tcam_rule(void *hwdev); + +#endif /* _HINIC_PMD_NICCFG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c new file mode 100644 index 000000000..7f7e11dbd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c @@ -0,0 +1,907 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ +#include + +#include "hinic_compat.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_hwif.h" +#include "hinic_pmd_wq.h" +#include "hinic_pmd_mgmt.h" +#include "hinic_pmd_cmdq.h" +#include "hinic_pmd_cfg.h" +#include "hinic_pmd_niccfg.h" +#include "hinic_pmd_nicio.h" + +#define WQ_PREFETCH_MAX 6 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + +enum { + RECYCLE_MODE_NIC = 0x0, + RECYCLE_MODE_DPDK = 0x1, +}; + +/* Queue buffer related define */ +enum hinic_rx_buf_size { + HINIC_RX_BUF_SIZE_32B = 0x20, + HINIC_RX_BUF_SIZE_64B = 0x40, + HINIC_RX_BUF_SIZE_96B = 0x60, + HINIC_RX_BUF_SIZE_128B = 0x80, + HINIC_RX_BUF_SIZE_192B = 0xC0, + HINIC_RX_BUF_SIZE_256B = 0x100, + HINIC_RX_BUF_SIZE_384B = 0x180, + HINIC_RX_BUF_SIZE_512B = 0x200, + HINIC_RX_BUF_SIZE_768B = 0x300, + HINIC_RX_BUF_SIZE_1K = 0x400, + HINIC_RX_BUF_SIZE_1_5K = 0x600, + HINIC_RX_BUF_SIZE_2K = 0x800, + HINIC_RX_BUF_SIZE_3K = 0xC00, + HINIC_RX_BUF_SIZE_4K = 0x1000, + HINIC_RX_BUF_SIZE_8K = 0x2000, + HINIC_RX_BUF_SIZE_16K = 0x4000, +}; + +const u32 hinic_hw_rx_buf_size[] = { + HINIC_RX_BUF_SIZE_32B, + HINIC_RX_BUF_SIZE_64B, + HINIC_RX_BUF_SIZE_96B, + HINIC_RX_BUF_SIZE_128B, + HINIC_RX_BUF_SIZE_192B, + HINIC_RX_BUF_SIZE_256B, + HINIC_RX_BUF_SIZE_384B, + HINIC_RX_BUF_SIZE_512B, + HINIC_RX_BUF_SIZE_768B, + HINIC_RX_BUF_SIZE_1K, + HINIC_RX_BUF_SIZE_1_5K, + HINIC_RX_BUF_SIZE_2K, + HINIC_RX_BUF_SIZE_3K, + HINIC_RX_BUF_SIZE_4K, + HINIC_RX_BUF_SIZE_8K, + HINIC_RX_BUF_SIZE_16K, +}; + +struct hinic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +struct hinic_sq_ctxt { + u32 ceq_attr; + + u32 ci_owner; + + u32 wq_pfn_hi; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_rq_ctxt { + u32 ceq_attr; + + u32 pi_intr_attr; + + u32 wq_pfn_hi_ci; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_sq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_rq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_clean_queue_ctxt { + struct hinic_qp_ctxt_header cmdq_hdr; + u32 ctxt_size; +}; + + +static void +hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues, u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + + if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) + qp_ctxt_hdr->addr_offset = + SQ_CTXT_OFFSET(max_queues, max_queues, q_id); + else + qp_ctxt_hdr->addr_offset = + RQ_CTXT_OFFSET(max_queues, max_queues, q_id); + + qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); + + hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn, + struct hinic_sq_ctxt *sq_ctxt) +{ + struct hinic_wq *wq = sq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)(wq->cons_idx); + pi_start = (u16)(wq->prod_idx); + + /* read the first page from the HW table */ + wq_page_addr = wq->queue_buf_paddr; + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + /* must config as ceq disabled */ + sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) | + SQ_CTXT_CEQ_ATTR_SET(0, ARM) | + SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) | + SQ_CTXT_CEQ_ATTR_SET(0, EN); + + sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) | + SQ_CTXT_CI_SET(1, OWNER); + + sq_ctxt->wq_pfn_hi = + SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(pi_start, PI); + + sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_owner = 1; + + sq_ctxt->pref_wq_pfn_hi_ci = + SQ_CTXT_PREF_SET(ci_start, CI) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); + + sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->wq_block_pfn_hi = + SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +static void hinic_rq_prepare_ctxt(struct hinic_rq *rq, + struct hinic_rq_ctxt *rq_ctxt) +{ + struct hinic_wq *wq = rq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)(wq->cons_idx); + pi_start = (u16)(wq->prod_idx); + + /* read the first page from the HW table */ + wq_page_addr = wq->queue_buf_paddr; + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + /* must config as ceq enable but do not generate ceq */ + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) | + RQ_CTXT_CEQ_ATTR_SET(1, OWNER); + + rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) | + RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) | + RQ_CTXT_PI_SET(0, CEQ_ARM); + + rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(ci_start, CI); + + rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_owner = 1; + + rq_ctxt->pref_wq_pfn_hi_ci = + RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI); + + rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); + + rq_ctxt->wq_block_pfn_hi = + RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +static int init_sq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_sq_ctxt_block *sq_ctxt_block; + struct hinic_sq_ctxt *sq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = EIO; + u16 q_id, curr_id, global_qpn, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate cmd buf"); + return -ENOMEM; + } + + q_id = 0; + /* sq and rq number may not equal */ + while (q_id < nic_io->num_sqs) { + sq_ctxt_block = cmd_buf->buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id); + + hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_SQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + global_qpn = nic_io->global_qpn + curr_id; + + hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]); + } + + cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err: %d", + err); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_rq_ctxt_block *rq_ctxt_block; + struct hinic_rq_ctxt *rq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = 0; + u16 q_id, curr_id, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate cmd buf"); + return -ENOMEM; + } + + q_id = 0; + /* sq and rq number may not equal */ + while (q_id < nic_io->num_rqs) { + rq_ctxt_block = cmd_buf->buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id); + + hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_RQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + + hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]); + } + + cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + if ((err) || out_param != 0) { + PMD_DRV_LOG(ERR, "Failed to set RQ ctxts"); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct hinic_nic_io *nic_io) +{ + return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io)); +} + +static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io, + enum hinic_qp_ctxt_type ctxt_type) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_clean_queue_ctxt *ctxt_block; + struct hinic_cmd_buf *cmd_buf; + u64 out_param = 0; + int err; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate cmd buf"); + return -ENOMEM; + } + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.addr_offset = 0; + + /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ + ctxt_block->ctxt_size = 0x3; + + hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + + if ((err) || (out_param)) { + PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts"); + err = -EFAULT; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io) +{ + /* clean LRO/TSO context space */ + return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) || + clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ)); +} + +/** + * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size + * @rx_buf_sz: receive buffer size + * @return + * hw rx buffer size + */ +static u16 get_hw_rx_buf_size(u32 rx_buf_sz) +{ + u16 num_hw_types = sizeof(hinic_hw_rx_buf_size) + / sizeof(hinic_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hinic_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + PMD_DRV_LOG(ERR, "Hw can't support rx buf size of %u", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +/** + * hinic_set_root_ctxt - init root context in NIC + * @hwdev: the hardware interface of a nic device + * @rq_depth: the depth of receive queue + * @sq_depth: the depth of transmit queue + * @rx_buf_sz: receive buffer size from app + * Return: 0 on success, negative error value otherwise. + */ +static int +hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) +{ + struct hinic_root_ctxt root_ctxt; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + root_ctxt.func_idx = hinic_global_func_id(hwdev); + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + root_ctxt.lro_en = 1; + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + NULL, NULL, 0); +} + +/** + * hinic_clean_root_ctxt - clean root context table in NIC + * @hwdev: the hardware interface of a nic device + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_clean_root_ctxt(void *hwdev) +{ + struct hinic_root_ctxt root_ctxt; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1; + root_ctxt.func_idx = hinic_global_func_id(hwdev); + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + root_ctxt.lro_en = 0; + root_ctxt.rq_depth = 0; + root_ctxt.rx_buf_sz = 0; + root_ctxt.sq_depth = 0; + + return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + NULL, NULL, 0); +} + +/* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */ +int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_sq_attr sq_attr; + u16 q_id; + int err, rx_buf_sz; + + /* set vat page size to max queue depth page_size */ + err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK); + if (err != HINIC_OK) { + PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d", + HINIC_PAGE_SIZE_DPDK, err); + return err; + } + + if (hwdev->cmdqs->status & HINIC_CMDQ_SET_FAIL) { + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Reinit cmdq context failed when dev start, err: %d", + err); + return err; + } + } + + err = init_qp_ctxts(nic_io); + if (err) { + PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_io); + if (err) { + PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d", err); + return err; + } + + rx_buf_sz = nic_io->rq_buf_size; + + /* update rx buf size to function table */ + err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz); + if (err) { + PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d", err); + return err; + } + + err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth, + nic_io->sq_depth, rx_buf_sz); + if (err) { + PMD_DRV_LOG(ERR, "Set root context failed, rc: %d", err); + return err; + } + + for (q_id = 0; q_id < nic_io->num_sqs; q_id++) { + sq_attr.ci_dma_base = + HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2; + /* performance: sq ci update threshold as 8 */ + sq_attr.pending_limit = 1; + sq_attr.coalescing_time = 1; + sq_attr.intr_en = 0; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic_set_ci_table(hwdev, q_id, &sq_attr); + if (err) { + PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d", err); + goto set_cons_idx_table_err; + } + } + + return 0; + +set_cons_idx_table_err: + (void)hinic_clean_root_ctxt(hwdev); + return err; +} + +void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_clean_root_ctxt(hwdev); + if (err) + PMD_DRV_LOG(ERR, "Failed to clean root ctxt"); +} + +static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + u16 global_qpn, rx_buf_sz; + int err; + + err = hinic_get_base_qpn(hwdev, &global_qpn); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get base qpn"); + goto err_init_nic_hwdev; + } + + nic_io->global_qpn = global_qpn; + rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K; + err = hinic_init_function_table(hwdev, rx_buf_sz); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init function table"); + goto err_init_nic_hwdev; + } + + err = hinic_vf_func_init(hwdev); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init nic mbox"); + goto err_init_nic_hwdev; + } + + err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK); + if (err) { + PMD_DRV_LOG(ERR, "Failed to set fast recycle mode"); + goto err_init_nic_hwdev; + } + + return 0; + +err_init_nic_hwdev: + return err; +} + +static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev) +{ + hinic_vf_func_free(hwdev); + hwdev->nic_io = NULL; +} + +int hinic_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + return hinic_func_rx_tx_flush(hwdev); +} + +int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_wq *wq = &nic_io->sq_wq[q_id]; + + return (wq->delta) - 1; +} + +int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_wq *wq = &nic_io->rq_wq[q_id]; + + return (wq->delta) - 1; +} + +u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_wq *wq = &nic_io->sq_wq[q_id]; + + return (wq->cons_idx) & wq->mask; +} + +void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id, + int num_wqebbs, u16 owner) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + if (owner != sq->owner) + sq->owner = owner; + + sq->wq->delta += num_wqebbs; + sq->wq->prod_idx -= num_wqebbs; +} + +void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, + u16 q_id, int wqebb_cnt) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + hinic_put_wqe(sq->wq, wqebb_cnt); +} + +void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + return hinic_get_wqe(rq->wq, 1, pi); +} + +void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + rq->wq->delta += num_wqebbs; + rq->wq->prod_idx -= num_wqebbs; +} + +u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_wq *wq = &nic_io->rq_wq[q_id]; + + return (wq->cons_idx) & wq->mask; +} + +void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + hinic_put_wqe(rq->wq, wqe_cnt); +} + +static int hinic_alloc_nicio(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct rte_pci_device *pdev = hwdev->pcidev_hdl; + u16 max_qps, num_qp; + int err; + + max_qps = hinic_func_max_qnum(hwdev); + if ((max_qps & (max_qps - 1))) { + PMD_DRV_LOG(ERR, "Wrong number of max_qps: %d", + max_qps); + return -EINVAL; + } + + nic_io->max_qps = max_qps; + nic_io->num_qps = max_qps; + num_qp = max_qps; + + nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps), + GFP_KERNEL); + if (!nic_io->qps) { + PMD_DRV_LOG(ERR, "Failed to allocate qps"); + err = -ENOMEM; + goto alloc_qps_err; + } + + nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev, + CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE), + &nic_io->ci_dma_base, + pdev->device.numa_node); + if (!nic_io->ci_vaddr_base) { + PMD_DRV_LOG(ERR, "Failed to allocate ci area"); + err = -ENOMEM; + goto ci_base_err; + } + + nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq), + GFP_KERNEL); + if (!nic_io->sq_wq) { + PMD_DRV_LOG(ERR, "Failed to allocate sq wq array"); + err = -ENOMEM; + goto sq_wq_err; + } + + nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq), + GFP_KERNEL); + if (!nic_io->rq_wq) { + PMD_DRV_LOG(ERR, "Failed to allocate rq wq array"); + err = -ENOMEM; + goto rq_wq_err; + } + + return HINIC_OK; + +rq_wq_err: + kfree(nic_io->sq_wq); + +sq_wq_err: + dma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + +ci_base_err: + kfree(nic_io->qps); + +alloc_qps_err: + return err; +} + +static void hinic_free_nicio(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + + /* nic_io->rq_wq */ + kfree(nic_io->rq_wq); + + /* nic_io->sq_wq */ + kfree(nic_io->sq_wq); + + /* nic_io->ci_vaddr_base */ + dma_free_coherent(hwdev, + CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + + /* nic_io->qps */ + kfree(nic_io->qps); +} + +/* alloc nic hwdev and init function table */ +int hinic_init_nicio(struct hinic_hwdev *hwdev) +{ + int rc; + + hwdev->nic_io = rte_zmalloc("hinic_nicio", sizeof(*hwdev->nic_io), + RTE_CACHE_LINE_SIZE); + if (!hwdev->nic_io) { + PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s", + hwdev->pcidev_hdl->name); + return -ENOMEM; + } + hwdev->nic_io->hwdev = hwdev; + + /* alloc root working queue set */ + rc = hinic_alloc_nicio(hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s", + hwdev->pcidev_hdl->name); + goto allc_nicio_fail; + } + + rc = hinic_init_nic_hwdev(hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s", + hwdev->pcidev_hdl->name); + goto init_nic_hwdev_fail; + } + + return 0; + +init_nic_hwdev_fail: + hinic_free_nicio(hwdev); + +allc_nicio_fail: + rte_free(hwdev->nic_io); + return rc; +} + +void hinic_deinit_nicio(struct hinic_hwdev *hwdev) +{ + hinic_free_nicio(hwdev); + + hinic_free_nic_hwdev(hwdev); + + rte_free(hwdev->nic_io); + hwdev->nic_io = NULL; +} + +/** + * hinic_convert_rx_buf_size - convert rx buffer size to hw size + * @rx_buf_sz: receive buffer size of mbuf + * @match_sz: receive buffer size of hardware + * @return + * 0 on success, + * negative error value otherwise. + */ +int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz) +{ + u32 i, num_hw_types, best_match_sz; + + if (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B)) + return -EINVAL; + + if (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) { + best_match_sz = HINIC_RX_BUF_SIZE_16K; + goto size_matched; + } + + num_hw_types = sizeof(hinic_hw_rx_buf_size) / + sizeof(hinic_hw_rx_buf_size[0]); + best_match_sz = hinic_hw_rx_buf_size[0]; + for (i = 0; i < num_hw_types; i++) { + if (rx_buf_sz == hinic_hw_rx_buf_size[i]) { + best_match_sz = hinic_hw_rx_buf_size[i]; + break; + } else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) { + break; + } + best_match_sz = hinic_hw_rx_buf_size[i]; + } + +size_matched: + *match_sz = best_match_sz; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h new file mode 100644 index 000000000..9a487d024 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_nicio.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_NICIO_H_ +#define _HINIC_PMD_NICIO_H_ + +#define RX_BUF_LEN_16K 16384 +#define RX_BUF_LEN_1_5K 1536 + +#define HINIC_Q_CTXT_MAX 42 + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define HINIC_CI_Q_ADDR_SIZE 64 + +#define CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define HINIC_CI_VADDR(base_addr, q_id) \ + ((u8 *)(base_addr) + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define HINIC_CI_PADDR(base_paddr, q_id) \ + ((base_paddr) + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define Q_CTXT_SIZE 48 +#define TSO_LRO_CTXT_SIZE 240 + +#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE + \ + (q_id) * Q_CTXT_SIZE) + +#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE + \ + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE) + +#define SQ_CTXT_SIZE(num_sqs) \ + ((u16)(sizeof(struct hinic_qp_ctxt_header) + \ + (num_sqs) * sizeof(struct hinic_sq_ctxt))) + +#define RQ_CTXT_SIZE(num_rqs) \ + ((u16)(sizeof(struct hinic_qp_ctxt_header) + \ + (num_rqs) * sizeof(struct hinic_rq_ctxt))) + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8 +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 +#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 +#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31 + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU +#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U + +#define SQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((val) & SQ_CTXT_CEQ_ATTR_##member##_MASK) << \ + SQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define SQ_CTXT_CI_IDX_SHIFT 11 +#define SQ_CTXT_CI_OWNER_SHIFT 23 + +#define SQ_CTXT_CI_IDX_MASK 0xFFFU +#define SQ_CTXT_CI_OWNER_MASK 0x1U + +#define SQ_CTXT_CI_SET(val, member) \ + (((val) & SQ_CTXT_CI_##member##_MASK) << SQ_CTXT_CI_##member##_SHIFT) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20 + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU + +#define SQ_CTXT_WQ_PAGE_SET(val, member) \ + (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) << \ + SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define SQ_CTXT_PREF_CI_SHIFT 20 + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SQ_CTXT_PREF_CI_MASK 0xFFFU + +#define SQ_CTXT_PREF_SET(val, member) \ + (((val) & SQ_CTXT_PREF_##member##_MASK) << \ + SQ_CTXT_PREF_##member##_SHIFT) + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 +#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1 + +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U + +#define RQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) << \ + RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define RQ_CTXT_PI_IDX_SHIFT 0 +#define RQ_CTXT_PI_INTR_SHIFT 22 +#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31 + +#define RQ_CTXT_PI_IDX_MASK 0xFFFU +#define RQ_CTXT_PI_INTR_MASK 0x3FFU +#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U + +#define RQ_CTXT_PI_SET(val, member) \ + (((val) & RQ_CTXT_PI_##member##_MASK) << RQ_CTXT_PI_##member##_SHIFT) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20 + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU + +#define RQ_CTXT_WQ_PAGE_SET(val, member) \ + (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define RQ_CTXT_PREF_CI_SHIFT 20 + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define RQ_CTXT_PREF_CI_MASK 0xFFFU + +#define RQ_CTXT_PREF_SET(val, member) \ + (((val) & RQ_CTXT_PREF_##member##_MASK) << \ + RQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define RQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) + +enum hinic_qp_ctxt_type { + HINIC_QP_CTXT_TYPE_SQ, + HINIC_QP_CTXT_TYPE_RQ, +}; + +struct hinic_sq { + struct hinic_wq *wq; + volatile u16 *cons_idx_addr; + void __iomem *db_addr; + + u16 q_id; + u16 owner; + u16 sq_depth; +}; + +struct hinic_rq { + struct hinic_wq *wq; + volatile u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; + + u16 irq_id; + u16 msix_entry_idx; + u16 q_id; + u16 rq_depth; +}; + +struct hinic_qp { + struct hinic_sq sq; + struct hinic_rq rq; +}; + +struct hinic_event { + void (*tx_ack)(void *handle, u16 q_id); + /* status: 0 - link down; 1 - link up */ + void (*link_change)(void *handle, int status); +}; + +struct hinic_nic_io { + struct hinic_hwdev *hwdev; + + u16 global_qpn; + + struct hinic_wq *sq_wq; + struct hinic_wq *rq_wq; + + u16 max_qps; + u16 num_qps; + + u16 num_sqs; + u16 num_rqs; + + u16 sq_depth; + u16 rq_depth; + + u16 rq_buf_size; + u16 vhd_mode; + + struct hinic_qp *qps; + /* sq ci mem base addr of the function */ + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + struct hinic_event event; + void *event_handle; +}; + +struct hinic_sq_db { + u32 db_info; +}; + +int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev); + +void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev); + +int hinic_rx_tx_flush(struct hinic_hwdev *hwdev); + +int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id); + +u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id); + +void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, + int wqebb_cnt); + +void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id, + int num_wqebbs, u16 owner); + +int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id); + +void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi); + +void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs); + +u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id); + +void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt); + +int hinic_init_nicio(struct hinic_hwdev *hwdev); + +void hinic_deinit_nicio(struct hinic_hwdev *hwdev); + +int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz); + +#endif /* _HINIC_PMD_NICIO_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c new file mode 100644 index 000000000..345248c3e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.c @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_compat.h" +#include "hinic_pmd_hwdev.h" +#include "hinic_pmd_wq.h" + +static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq) +{ + dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr, + (dma_addr_t)wq->queue_buf_paddr); + + wq->queue_buf_paddr = 0; + wq->queue_buf_vaddr = 0; +} + +static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq, + unsigned int socket_id) +{ + dma_addr_t dma_addr = 0; + + wq->queue_buf_vaddr = (u64)(u64 *) + dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size, + &dma_addr, socket_id); + if (!wq->queue_buf_vaddr) { + PMD_DRV_LOG(ERR, "Failed to allocate wq page"); + return -ENOMEM; + } + + if (!ADDR_256K_ALIGNED(dma_addr)) { + PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!"); + dma_free_coherent(hwdev, wq->wq_buf_size, + (void *)wq->queue_buf_vaddr, + dma_addr); + return -ENOMEM; + } + wq->queue_buf_paddr = dma_addr; + + return 0; +} + +int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq, + u32 wqebb_shift, u16 q_depth, unsigned int socket_id) +{ + int err; + + if (q_depth & (q_depth - 1)) { + PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2"); + return -EINVAL; + } + + wq->wqebb_size = 1 << wqebb_shift; + wq->wqebb_shift = wqebb_shift; + wq->wq_buf_size = ((u32)q_depth) << wqebb_shift; + wq->q_depth = q_depth; + + if (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) { + PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can not hold", + q_depth); + return -EINVAL; + } + + err = alloc_wq_pages(hwdev, wq, socket_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate wq pages"); + return err; + } + + wq->cons_idx = 0; + wq->prod_idx = 0; + wq->delta = q_depth; + wq->mask = q_depth - 1; + + return 0; +} + +void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq) +{ + free_wq_pages(hwdev, wq); +} + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs) +{ + wq->cons_idx += num_wqebbs; + wq->delta += num_wqebbs; +} + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx) +{ + u16 curr_cons_idx; + + if ((wq->delta + num_wqebbs) > wq->q_depth) + return NULL; + + curr_cons_idx = (u16)(wq->cons_idx); + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + + *cons_idx = curr_cons_idx; + + return WQ_WQE_ADDR(wq, (u32)(*cons_idx)); +} + +int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev, + int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift, + u16 q_depth) +{ + int i, j, err = -ENOMEM; + + /* validate q_depth is power of 2 & wqebb_size is not 0 */ + for (i = 0; i < cmdq_blocks; i++) { + wq[i].wqebb_size = 1 << wqebb_shift; + wq[i].wqebb_shift = wqebb_shift; + wq[i].wq_buf_size = wq_buf_size; + wq[i].q_depth = q_depth; + + err = alloc_wq_pages(hwdev, &wq[i], SOCKET_ID_ANY); + if (err) { + PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks"); + goto cmdq_block_err; + } + + wq[i].cons_idx = 0; + wq[i].prod_idx = 0; + wq[i].delta = q_depth; + + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(hwdev, &wq[j]); + + return err; +} + +void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq, + int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(hwdev, &wq[i]); +} + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq) +{ + wq->cons_idx = 0; + wq->prod_idx = 0; + + memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size); +} + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx) +{ + u16 curr_prod_idx; + + wq->delta -= num_wqebbs; + curr_prod_idx = wq->prod_idx; + wq->prod_idx += num_wqebbs; + *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + return WQ_WQE_ADDR(wq, (u32)(*prod_idx)); +} + +/** + * hinic_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + **/ +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} diff --git a/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h new file mode 100644 index 000000000..354d0338d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_wq.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_WQ_H_ +#define _HINIC_PMD_WQ_H_ + +#define WQS_BLOCKS_PER_PAGE 4 + +#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ + ((wq)->num_q_pages - 1)) + +#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ + ((idx) & ((wq)->num_wqebbs_per_page - 1))) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_PAGE_ADDR_SIZE_SHIFT 3 +#define WQ_PAGE_ADDR(wq, idx) \ + (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \ + (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))) + +#define WQ_BLOCK_SIZE 4096UL +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT) + +#define CMDQ_BLOCKS_PER_PAGE 8 +#define CMDQ_BLOCK_SIZE 512UL +#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \ + CMDQ_BLOCK_SIZE), PAGE_SIZE) + +#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff)) +#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff)) + +#define WQ_BASE_VADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ + + (u64)(wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + (((u64)((cmdq_pages)->cmdq_page_paddr)) \ + + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQE_SHADOW_PAGE(wq, wqe) \ + (u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ + / (wq)->max_wqe_size) + +#define WQE_IN_RANGE(wqe, start, end) \ + (((unsigned long)(wqe) >= (unsigned long)(start)) && \ + ((unsigned long)(wqe) < (unsigned long)(end))) + +#define WQ_NUM_PAGES(num_wqs) \ + (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE) + +#define WQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \ + ((idx) << (wq)->wqebb_shift))) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + + +#define HINIC_SQ_WQEBB_SIZE 64 +#define HINIC_RQ_WQE_SIZE 32 +#define HINIC_SQ_WQEBB_SHIFT 6 +#define HINIC_RQ_WQEBB_SHIFT 5 + +struct hinic_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +/* Working Queue */ +struct hinic_wq { + /* The addresses are 64 bit in the HW */ + u64 queue_buf_vaddr; + + u16 q_depth; + u16 mask; + u32 delta; + + u32 cons_idx; + u32 prod_idx; + + u64 queue_buf_paddr; + + u32 wqebb_size; + u32 wqebb_shift; + + u32 wq_buf_size; + + u32 rsvd[5]; +}; + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq); + +int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev, + int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift, + u16 q_depth); + +void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq, + int cmdq_blocks); + +int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq, + u32 wqebb_shift, u16 q_depth, unsigned int socket_id); + +void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq); + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx); + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs); + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx); + +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len); + +#endif /* _HINIC_PMD_WQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/base/meson.build b/src/spdk/dpdk/drivers/net/hinic/base/meson.build new file mode 100644 index 000000000..6cf947f84 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/base/meson.build @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Huawei Technologies Co., Ltd + +sources = [ + 'hinic_pmd_api_cmd.c', + 'hinic_pmd_cfg.c', + 'hinic_pmd_cmdq.c', + 'hinic_pmd_eqs.c', + 'hinic_pmd_hwdev.c', + 'hinic_pmd_hwif.c', + 'hinic_pmd_mgmt.c', + 'hinic_pmd_niccfg.c', + 'hinic_pmd_nicio.c', + 'hinic_pmd_wq.c', + 'hinic_pmd_mbox.c', +] + +extra_flags = [] +# The driver runs only on arch64 machine, remove 32bit warnings +if not dpdk_conf.get('RTE_ARCH_64') + extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast'] +endif + +foreach flag: extra_flags + if cc.has_argument(flag) + cflags += flag + endif +endforeach + +deps += ['hash'] + +c_args = cflags + +base_lib = static_library('hinic_base', sources, + dependencies: [static_rte_eal, static_rte_ethdev, static_rte_bus_pci, static_rte_hash], + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c new file mode 100644 index 000000000..2f0f33a8d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c @@ -0,0 +1,3257 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/hinic_compat.h" +#include "base/hinic_pmd_hwdev.h" +#include "base/hinic_pmd_hwif.h" +#include "base/hinic_pmd_wq.h" +#include "base/hinic_pmd_cfg.h" +#include "base/hinic_pmd_mgmt.h" +#include "base/hinic_pmd_cmdq.h" +#include "base/hinic_pmd_niccfg.h" +#include "base/hinic_pmd_nicio.h" +#include "base/hinic_pmd_mbox.h" +#include "hinic_pmd_ethdev.h" +#include "hinic_pmd_tx.h" +#include "hinic_pmd_rx.h" + +/* Vendor ID used by Huawei devices */ +#define HINIC_HUAWEI_VENDOR_ID 0x19E5 + +/* Hinic devices */ +#define HINIC_DEV_ID_PRD 0x1822 +#define HINIC_DEV_ID_VF 0x375E +#define HINIC_DEV_ID_VF_HV 0x379E + +/* Mezz card for Blade Server */ +#define HINIC_DEV_ID_MEZZ_25GE 0x0210 +#define HINIC_DEV_ID_MEZZ_100GE 0x0205 + +/* 2*25G and 2*100G card */ +#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 +#define HINIC_DEV_ID_1822_100GE 0x0200 + +#define HINIC_SERVICE_MODE_NIC 2 + +#define HINIC_INTR_CB_UNREG_MAX_RETRIES 10 + +#define DEFAULT_BASE_COS 4 +#define NR_MAX_COS 8 + +#define HINIC_MIN_RX_BUF_SIZE 1024 +#define HINIC_MAX_UC_MAC_ADDRS 128 +#define HINIC_MAX_MC_MAC_ADDRS 2048 + +#define HINIC_DEFAULT_BURST_SIZE 32 +#define HINIC_DEFAULT_NB_QUEUES 1 +#define HINIC_DEFAULT_RING_SIZE 1024 +#define HINIC_MAX_LRO_SIZE 65536 + +/* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define HINIC_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define HINIC_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +#define HINIC_VLAN_FILTER_EN (1U << 0) + +#define HINIC_MTU_TO_PKTLEN(mtu) \ + ((mtu) + ETH_HLEN + ETH_CRC_LEN) + +#define HINIC_PKTLEN_TO_MTU(pktlen) \ + ((pktlen) - (ETH_HLEN + ETH_CRC_LEN)) + +/* lro numer limit for one packet */ +#define HINIC_LRO_WQE_NUM_DEFAULT 8 + +/* Driver-specific log messages type */ +int hinic_logtype; + +struct hinic_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + u32 offset; +}; + +#define HINIC_FUNC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .offset = offsetof(struct hinic_vport_stats, _stat_item) \ +} + +#define HINIC_PORT_STAT(_stat_item) { \ + .name = #_stat_item, \ + .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ +} + +static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = { + HINIC_FUNC_STAT(tx_unicast_pkts_vport), + HINIC_FUNC_STAT(tx_unicast_bytes_vport), + HINIC_FUNC_STAT(tx_multicast_pkts_vport), + HINIC_FUNC_STAT(tx_multicast_bytes_vport), + HINIC_FUNC_STAT(tx_broadcast_pkts_vport), + HINIC_FUNC_STAT(tx_broadcast_bytes_vport), + + HINIC_FUNC_STAT(rx_unicast_pkts_vport), + HINIC_FUNC_STAT(rx_unicast_bytes_vport), + HINIC_FUNC_STAT(rx_multicast_pkts_vport), + HINIC_FUNC_STAT(rx_multicast_bytes_vport), + HINIC_FUNC_STAT(rx_broadcast_pkts_vport), + HINIC_FUNC_STAT(rx_broadcast_bytes_vport), + + HINIC_FUNC_STAT(tx_discard_vport), + HINIC_FUNC_STAT(rx_discard_vport), + HINIC_FUNC_STAT(tx_err_vport), + HINIC_FUNC_STAT(rx_err_vport), +}; + +#define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \ + sizeof(hinic_vport_stats_strings[0])) + +static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = { + HINIC_PORT_STAT(mac_rx_total_pkt_num), + HINIC_PORT_STAT(mac_rx_total_oct_num), + HINIC_PORT_STAT(mac_rx_bad_pkt_num), + HINIC_PORT_STAT(mac_rx_bad_oct_num), + HINIC_PORT_STAT(mac_rx_good_pkt_num), + HINIC_PORT_STAT(mac_rx_good_oct_num), + HINIC_PORT_STAT(mac_rx_uni_pkt_num), + HINIC_PORT_STAT(mac_rx_multi_pkt_num), + HINIC_PORT_STAT(mac_rx_broad_pkt_num), + HINIC_PORT_STAT(mac_tx_total_pkt_num), + HINIC_PORT_STAT(mac_tx_total_oct_num), + HINIC_PORT_STAT(mac_tx_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_bad_oct_num), + HINIC_PORT_STAT(mac_tx_good_pkt_num), + HINIC_PORT_STAT(mac_tx_good_oct_num), + HINIC_PORT_STAT(mac_tx_uni_pkt_num), + HINIC_PORT_STAT(mac_tx_multi_pkt_num), + HINIC_PORT_STAT(mac_tx_broad_pkt_num), + HINIC_PORT_STAT(mac_rx_fragment_pkt_num), + HINIC_PORT_STAT(mac_rx_undersize_pkt_num), + HINIC_PORT_STAT(mac_rx_undermin_pkt_num), + HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), + HINIC_PORT_STAT(mac_rx_oversize_pkt_num), + HINIC_PORT_STAT(mac_rx_jabber_pkt_num), + HINIC_PORT_STAT(mac_rx_mac_pause_num), + HINIC_PORT_STAT(mac_rx_pfc_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), + HINIC_PORT_STAT(mac_rx_mac_control_pkt_num), + HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), + HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), + HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), + HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_fragment_pkt_num), + HINIC_PORT_STAT(mac_tx_undersize_pkt_num), + HINIC_PORT_STAT(mac_tx_undermin_pkt_num), + HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), + HINIC_PORT_STAT(mac_tx_oversize_pkt_num), + HINIC_PORT_STAT(mac_trans_jabber_pkt_num), + HINIC_PORT_STAT(mac_tx_mac_pause_num), + HINIC_PORT_STAT(mac_tx_pfc_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), + HINIC_PORT_STAT(mac_tx_mac_control_pkt_num), + HINIC_PORT_STAT(mac_tx_err_all_pkt_num), + HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), + HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), +}; + +#define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \ + sizeof(hinic_phyport_stats_strings[0])) + +static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = { + {"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)}, + {"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)}, +}; + +#define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \ + sizeof(hinic_rxq_stats_strings[0])) + +static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = { + {"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)}, + {"offload_errors", offsetof(struct hinic_txq_stats, off_errs)}, + {"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)}, + {"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)}, + {"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)}, + {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)}, + {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)}, +}; + +#define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \ + sizeof(hinic_txq_stats_strings[0])) + +static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev) +{ + if (HINIC_IS_VF(nic_dev->hwdev)) { + return (HINIC_VPORT_XSTATS_NUM + + HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + + HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); + } else { + return (HINIC_VPORT_XSTATS_NUM + + HINIC_PHYPORT_XSTATS_NUM + + HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq + + HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq); + } +} + +static const struct rte_eth_desc_lim hinic_rx_desc_lim = { + .nb_max = HINIC_MAX_QUEUE_DEPTH, + .nb_min = HINIC_MIN_QUEUE_DEPTH, + .nb_align = HINIC_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim hinic_tx_desc_lim = { + .nb_max = HINIC_MAX_QUEUE_DEPTH, + .nb_min = HINIC_MIN_QUEUE_DEPTH, + .nb_align = HINIC_TXD_ALIGN, +}; + +static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); + +/** + * Interrupt handler triggered by NIC for handling + * specific event. + * + * @param: The address of parameter (struct rte_eth_dev *) regsitered before. + */ +static void hinic_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (!hinic_test_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) { + PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return; + } + + /* aeq0 msg handler */ + hinic_dev_handle_aeq_event(nic_dev->hwdev, param); +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues, mtu size + * and configure RSS. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_dev_configure(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev; + struct hinic_nic_io *nic_io; + int err; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + nic_io = nic_dev->hwdev->nic_io; + + nic_dev->num_sq = dev->data->nb_tx_queues; + nic_dev->num_rq = dev->data->nb_rx_queues; + + nic_io->num_sqs = dev->data->nb_tx_queues; + nic_io->num_rqs = dev->data->nb_rx_queues; + + /* queue pair is max_num(sq, rq) */ + nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ? + nic_dev->num_sq : nic_dev->num_rq; + nic_io->num_qps = nic_dev->num_qps; + + if (nic_dev->num_qps > nic_io->max_qps) { + PMD_DRV_LOG(ERR, + "Queue number out of range, get queue_num:%d, max_queue_num:%d", + nic_dev->num_qps, nic_io->max_qps); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* mtu size is 256~9600 */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE || + dev->data->dev_conf.rxmode.max_rx_pkt_len > + HINIC_MAX_JUMBO_FRAME_SIZE) { + PMD_DRV_LOG(ERR, + "Max rx pkt len out of range, get max_rx_pkt_len:%d, " + "expect between %d and %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + nic_dev->mtu_size = + HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* rss template */ + err = hinic_config_mq_mode(dev, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Config multi-queue failed"); + return err; + } + + /* init vlan offoad */ + err = hinic_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); + if (err) { + PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed"); + (void)hinic_config_mq_mode(dev, FALSE); + return err; + } + + /* clear fdir filter flag in function table */ + hinic_free_fdir_filter(nic_dev); + + return HINIC_OK; +} + +/** + * DPDK callback to create the receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_idx + * RX queue index. + * @param nb_desc + * Number of descriptors for receive queue. + * @param socket_id + * NUMA socket on which memory must be allocated. + * @param rx_conf + * Thresholds parameters (unused_). + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + int rc; + struct hinic_nic_dev *nic_dev; + struct hinic_hwdev *hwdev; + struct hinic_rxq *rxq; + u16 rq_depth, rx_free_thresh; + u32 buf_size; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + hwdev = nic_dev->hwdev; + + /* queue depth must be power of 2, otherwise will be aligned up */ + rq_depth = (nb_desc & (nb_desc - 1)) ? + ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum and minimum. + */ + if (rq_depth > HINIC_MAX_QUEUE_DEPTH || + rq_depth < HINIC_MIN_QUEUE_DEPTH) { + PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", + HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, + (int)nb_desc, (int)rq_depth, + (int)dev->data->port_id, (int)queue_idx); + return -EINVAL; + } + + /* + * The RX descriptor ring will be cleaned after rxq->rx_free_thresh + * descriptors are used or if the number of descriptors required + * to transmit a packet is greater than the number of free RX + * descriptors. + * The following constraints must be satisfied: + * rx_free_thresh must be greater than 0. + * rx_free_thresh must be less than the size of the ring minus 1. + * When set to zero use default values. + */ + rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ? + rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH); + if (rx_free_thresh >= (rq_depth - 1)) { + PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)", + (unsigned int)rx_free_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + + rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s", + queue_idx, dev->data->name); + return -ENOMEM; + } + nic_dev->rxqs[queue_idx] = rxq; + + /* alloc rx sq hw wqe page */ + rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id); + if (rc) { + PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d", + queue_idx, dev->data->name, rq_depth); + goto ceate_rq_fail; + } + + /* mbuf pool must be assigned before setup rx resources */ + rxq->mb_pool = mp; + + rc = + hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM, &buf_size); + if (rc) { + PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s", + dev->data->name); + goto adjust_bufsize_fail; + } + + /* rx queue info, rearm control */ + rxq->wq = &hwdev->nic_io->rq_wq[queue_idx]; + rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr; + rxq->nic_dev = nic_dev; + rxq->q_id = queue_idx; + rxq->q_depth = rq_depth; + rxq->buf_len = (u16)buf_size; + rxq->rx_free_thresh = rx_free_thresh; + rxq->socket_id = socket_id; + + /* the last point cant do mbuf rearm in bulk */ + rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh; + + /* device port identifier */ + rxq->port_id = dev->data->port_id; + + /* alloc rx_cqe and prepare rq_wqe */ + rc = hinic_setup_rx_resources(rxq); + if (rc) { + PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s", + queue_idx, dev->data->name); + goto setup_rx_res_err; + } + + /* record nic_dev rxq in rte_eth rx_queues */ + dev->data->rx_queues[queue_idx] = rxq; + + return 0; + +setup_rx_res_err: +adjust_bufsize_fail: + hinic_destroy_rq(hwdev, queue_idx); + +ceate_rq_fail: + rte_free(rxq); + + return rc; +} + +static void hinic_reset_rx_queue(struct rte_eth_dev *dev) +{ + struct hinic_rxq *rxq; + struct hinic_nic_dev *nic_dev; + int q_id = 0; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { + rxq = dev->data->rx_queues[q_id]; + + rxq->wq->cons_idx = 0; + rxq->wq->prod_idx = 0; + rxq->wq->delta = rxq->q_depth; + rxq->wq->mask = rxq->q_depth - 1; + + /* alloc mbuf to rq */ + hinic_rx_alloc_pkts(rxq); + } +} + +/** + * DPDK callback to configure the transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_idx + * Transmit queue index. + * @param nb_desc + * Number of descriptors for transmit queue. + * @param socket_id + * NUMA socket on which memory must be allocated. + * @param tx_conf + * Tx queue configuration parameters. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + int rc; + struct hinic_nic_dev *nic_dev; + struct hinic_hwdev *hwdev; + struct hinic_txq *txq; + u16 sq_depth, tx_free_thresh; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + hwdev = nic_dev->hwdev; + + /* queue depth must be power of 2, otherwise will be aligned up */ + sq_depth = (nb_desc & (nb_desc - 1)) ? + ((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc; + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum and minimum. + */ + if (sq_depth > HINIC_MAX_QUEUE_DEPTH || + sq_depth < HINIC_MIN_QUEUE_DEPTH) { + PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)", + HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH, + (int)nb_desc, (int)sq_depth, + (int)dev->data->port_id, (int)queue_idx); + return -EINVAL; + } + + /* + * The TX descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required + * to transmit a packet is greater than the number of free TX + * descriptors. + * The following constraints must be satisfied: + * tx_free_thresh must be greater than 0. + * tx_free_thresh must be less than the size of the ring minus 1. + * When set to zero use default values. + */ + tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH); + if (tx_free_thresh >= (sq_depth - 1)) { + PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + + txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq) { + PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s", + queue_idx, dev->data->name); + return -ENOMEM; + } + nic_dev->txqs[queue_idx] = txq; + + /* alloc tx sq hw wqepage */ + rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id); + if (rc) { + PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d", + queue_idx, dev->data->name, sq_depth); + goto create_sq_fail; + } + + txq->q_id = queue_idx; + txq->q_depth = sq_depth; + txq->port_id = dev->data->port_id; + txq->tx_free_thresh = tx_free_thresh; + txq->nic_dev = nic_dev; + txq->wq = &hwdev->nic_io->sq_wq[queue_idx]; + txq->sq = &hwdev->nic_io->qps[queue_idx].sq; + txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr; + txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq); + txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) - + sizeof(struct hinic_sq_bufdesc); + txq->cos = nic_dev->default_cos; + txq->socket_id = socket_id; + + /* alloc software txinfo */ + rc = hinic_setup_tx_resources(txq); + if (rc) { + PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s", + queue_idx, dev->data->name); + goto setup_tx_res_fail; + } + + /* record nic_dev txq in rte_eth tx_queues */ + dev->data->tx_queues[queue_idx] = txq; + + return HINIC_OK; + +setup_tx_res_fail: + hinic_destroy_sq(hwdev, queue_idx); + +create_sq_fail: + rte_free(txq); + + return rc; +} + +static void hinic_reset_tx_queue(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev; + struct hinic_txq *txq; + struct hinic_nic_io *nic_io; + struct hinic_hwdev *hwdev; + volatile u32 *ci_addr; + int q_id = 0; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + hwdev = nic_dev->hwdev; + nic_io = hwdev->nic_io; + + for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { + txq = dev->data->tx_queues[q_id]; + + txq->wq->cons_idx = 0; + txq->wq->prod_idx = 0; + txq->wq->delta = txq->q_depth; + txq->wq->mask = txq->q_depth - 1; + + /* clear hardware ci */ + ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, + q_id); + *ci_addr = 0; + } +} + +/** + * Get link speed from NIC. + * + * @param dev + * Pointer to Ethernet device structure. + * @param speed_capa + * Pointer to link speed structure. + */ +static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u32 supported_link, advertised_link; + int err; + +#define HINIC_LINK_MODE_SUPPORT_1G (1U << HINIC_GE_BASE_KX) + +#define HINIC_LINK_MODE_SUPPORT_10G (1U << HINIC_10GE_BASE_KR) + +#define HINIC_LINK_MODE_SUPPORT_25G ((1U << HINIC_25GE_BASE_KR_S) | \ + (1U << HINIC_25GE_BASE_CR_S) | \ + (1U << HINIC_25GE_BASE_KR) | \ + (1U << HINIC_25GE_BASE_CR)) + +#define HINIC_LINK_MODE_SUPPORT_40G ((1U << HINIC_40GE_BASE_KR4) | \ + (1U << HINIC_40GE_BASE_CR4)) + +#define HINIC_LINK_MODE_SUPPORT_100G ((1U << HINIC_100GE_BASE_KR4) | \ + (1U << HINIC_100GE_BASE_CR4)) + + err = hinic_get_link_mode(nic_dev->hwdev, + &supported_link, &advertised_link); + if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || + advertised_link == HINIC_SUPPORTED_UNKNOWN) { + PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u", + nic_dev->proc_dev_name, dev->data->port_id); + } else { + *speed_capa = 0; + if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G)) + *speed_capa |= ETH_LINK_SPEED_1G; + if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G)) + *speed_capa |= ETH_LINK_SPEED_10G; + if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G)) + *speed_capa |= ETH_LINK_SPEED_25G; + if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G)) + *speed_capa |= ETH_LINK_SPEED_40G; + if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G)) + *speed_capa |= ETH_LINK_SPEED_100G; + } +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param info + * Pointer to Info structure output buffer. + */ +static int +hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + info->max_rx_queues = nic_dev->nic_cap.max_rqs; + info->max_tx_queues = nic_dev->nic_cap.max_sqs; + info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE; + info->max_rx_pktlen = HINIC_MAX_JUMBO_FRAME_SIZE; + info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS; + info->min_mtu = HINIC_MIN_MTU_SIZE; + info->max_mtu = HINIC_MAX_MTU_SIZE; + info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE; + + hinic_get_speed_capa(dev, &info->speed_capa); + info->rx_queue_offload_capa = 0; + info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_RSS_HASH; + + info->tx_queue_offload_capa = 0; + info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + info->hash_key_size = HINIC_RSS_KEY_SIZE; + info->reta_size = HINIC_RSS_INDIR_SIZE; + info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL; + info->rx_desc_lim = hinic_rx_desc_lim; + info->tx_desc_lim = hinic_tx_desc_lim; + + /* Driver-preferred Rx/Tx parameters */ + info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; + info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE; + info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; + info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES; + info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE; + info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE; + + return 0; +} + +static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get fw version"); + return -EINVAL; + } + + if (fw_size < strlen(fw_ver) + 1) + return (strlen(fw_ver) + 1); + + snprintf(fw_version, fw_size, "%s", fw_ver); + + return 0; +} + +static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl) +{ + int err; + + err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl); + if (err) { + PMD_DRV_LOG(ERR, "Failed to set rx mode"); + return -EINVAL; + } + nic_dev->rx_mode_status = rx_mode_ctrl; + + return 0; +} + +static int hinic_rxtx_configure(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int err; + + /* rx configure, if rss enable, need to init default configuration */ + err = hinic_rx_configure(dev); + if (err) { + PMD_DRV_LOG(ERR, "Configure rss failed"); + return err; + } + + /* rx mode init */ + err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE); + if (err) { + PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed", + HINIC_DEFAULT_RX_MODE); + goto set_rx_mode_fail; + } + + return HINIC_OK; + +set_rx_mode_fail: + hinic_rx_remove_configure(dev); + + return err; +} + +static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + (void)hinic_config_rx_mode(nic_dev, 0); + hinic_rx_remove_configure(dev); +} + +static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev, + struct rte_eth_link *link) +{ + int rc; + u8 port_link_status = 0; + struct nic_port_info port_link_info; + struct hinic_hwdev *nic_hwdev = nic_dev->hwdev; + uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M, + ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G, + ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G, + ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G}; + + rc = hinic_get_link_status(nic_hwdev, &port_link_status); + if (rc) + return rc; + + if (!port_link_status) { + link->link_status = ETH_LINK_DOWN; + link->link_speed = 0; + link->link_duplex = ETH_LINK_HALF_DUPLEX; + link->link_autoneg = ETH_LINK_FIXED; + return HINIC_OK; + } + + memset(&port_link_info, 0, sizeof(port_link_info)); + rc = hinic_get_port_info(nic_hwdev, &port_link_info); + if (rc) + return rc; + + link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX]; + link->link_duplex = port_link_info.duplex; + link->link_autoneg = port_link_info.autoneg_state; + link->link_status = port_link_status; + + return HINIC_OK; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion. + * + * @return + * 0 link status changed, -1 link status not changed + */ +static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ +#define CHECK_INTERVAL 10 /* 10ms */ +#define MAX_REPEAT_TIME 100 /* 1s (100 * 10ms) in total */ + int rc = HINIC_OK; + struct rte_eth_link link; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + unsigned int rep_cnt = MAX_REPEAT_TIME; + + memset(&link, 0, sizeof(link)); + do { + /* Get link status information from hardware */ + rc = hinic_priv_get_dev_link_status(nic_dev, &link); + if (rc != HINIC_OK) { + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + PMD_DRV_LOG(ERR, "Get link status failed"); + goto out; + } + + if (!wait_to_complete || link.link_status) + break; + + rte_delay_ms(CHECK_INTERVAL); + } while (rep_cnt--); + +out: + rc = rte_eth_linkstatus_set(dev, &link); + return rc; +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true); + if (ret) { + PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return ret; + } + + /* link status follow phy port status, up will open pma */ + ret = hinic_set_port_enable(nic_dev->hwdev, true); + if (ret) + PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + + return ret; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false); + if (ret) { + PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return ret; + } + + /* link status follow phy port status, up will close pma */ + ret = hinic_set_port_enable(nic_dev->hwdev, false); + if (ret) + PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + + return ret; +} + +/** + * DPDK callback to start the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int hinic_dev_start(struct rte_eth_dev *dev) +{ + int rc; + char *name; + struct hinic_nic_dev *nic_dev; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; + + /* reset rx and tx queue */ + hinic_reset_rx_queue(dev); + hinic_reset_tx_queue(dev); + + /* get func rx buf size */ + hinic_get_func_rx_buf_size(nic_dev); + + /* init txq and rxq context */ + rc = hinic_init_qp_ctxts(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s", + name); + goto init_qp_fail; + } + + /* rss template */ + rc = hinic_config_mq_mode(dev, TRUE); + if (rc) { + PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s", + name); + goto cfg_mq_mode_fail; + } + + /* set default mtu */ + rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size); + if (rc) { + PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s", + nic_dev->mtu_size, name); + goto set_mtu_fail; + } + + /* configure rss rx_mode and other rx or tx default feature */ + rc = hinic_rxtx_configure(dev); + if (rc) { + PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s", + name); + goto cfg_rxtx_fail; + } + + /* reactive pf status, so that uP report asyn event */ + hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + + /* open virtual port and ready to start packet receiving */ + rc = hinic_set_vport_enable(nic_dev->hwdev, true); + if (rc) { + PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name); + goto en_vport_fail; + } + + /* open physical port and start packet receiving */ + rc = hinic_set_port_enable(nic_dev->hwdev, true); + if (rc) { + PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s", + name); + goto en_port_fail; + } + + /* update eth_dev link status */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + (void)hinic_link_update(dev, 0); + + hinic_set_bit(HINIC_DEV_START, &nic_dev->dev_status); + + return 0; + +en_port_fail: + (void)hinic_set_vport_enable(nic_dev->hwdev, false); + +en_vport_fail: + hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT); + + /* Flush tx && rx chip resources in case of set vport fake fail */ + (void)hinic_flush_qp_res(nic_dev->hwdev); + rte_delay_ms(100); + + hinic_remove_rxtx_configure(dev); + +cfg_rxtx_fail: +set_mtu_fail: +cfg_mq_mode_fail: + hinic_free_qp_ctxts(nic_dev->hwdev); + +init_qp_fail: + hinic_free_all_rx_mbuf(dev); + hinic_free_all_tx_mbuf(dev); + + return rc; +} + +/** + * DPDK callback to release the receive queue. + * + * @param queue + * Generic receive queue pointer. + */ +static void hinic_rx_queue_release(void *queue) +{ + struct hinic_rxq *rxq = queue; + struct hinic_nic_dev *nic_dev; + + if (!rxq) { + PMD_DRV_LOG(WARNING, "Rxq is null when release"); + return; + } + nic_dev = rxq->nic_dev; + + /* free rxq_pkt mbuf */ + hinic_free_all_rx_mbufs(rxq); + + /* free rxq_cqe, rxq_info */ + hinic_free_rx_resources(rxq); + + /* free root rq wq */ + hinic_destroy_rq(nic_dev->hwdev, rxq->q_id); + + nic_dev->rxqs[rxq->q_id] = NULL; + + /* free rxq */ + rte_free(rxq); +} + +/** + * DPDK callback to release the transmit queue. + * + * @param queue + * Generic transmit queue pointer. + */ +static void hinic_tx_queue_release(void *queue) +{ + struct hinic_txq *txq = queue; + struct hinic_nic_dev *nic_dev; + + if (!txq) { + PMD_DRV_LOG(WARNING, "Txq is null when release"); + return; + } + nic_dev = txq->nic_dev; + + /* free txq_pkt mbuf */ + hinic_free_all_tx_mbufs(txq); + + /* free txq_info */ + hinic_free_tx_resources(txq); + + /* free root sq wq */ + hinic_destroy_sq(nic_dev->hwdev, txq->q_id); + nic_dev->txqs[txq->q_id] = NULL; + + /* free txq */ + rte_free(txq); +} + +static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev) +{ + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_rq; q_id++) + hinic_destroy_rq(nic_dev->hwdev, q_id); +} + +static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev) +{ + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_sq; q_id++) + hinic_destroy_sq(nic_dev->hwdev, q_id); +} + +/** + * DPDK callback to stop the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void hinic_dev_stop(struct rte_eth_dev *dev) +{ + int rc; + char *name; + uint16_t port_id; + struct hinic_nic_dev *nic_dev; + struct rte_eth_link link; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; + port_id = dev->data->port_id; + + if (!hinic_test_and_clear_bit(HINIC_DEV_START, &nic_dev->dev_status)) { + PMD_DRV_LOG(INFO, "Device %s already stopped", name); + return; + } + + /* just stop phy port and vport */ + rc = hinic_set_port_enable(nic_dev->hwdev, false); + if (rc) + PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d", + rc, name, port_id); + + rc = hinic_set_vport_enable(nic_dev->hwdev, false); + if (rc) + PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d", + rc, name, port_id); + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + (void)rte_eth_linkstatus_set(dev, &link); + + /* flush pending io request */ + rc = hinic_rx_tx_flush(nic_dev->hwdev); + if (rc) + PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d", + rc, name, port_id); + + /* clean rss table and rx_mode */ + hinic_remove_rxtx_configure(dev); + + /* clean root context */ + hinic_free_qp_ctxts(nic_dev->hwdev); + + hinic_destroy_fdir_filter(dev); + + /* free mbuf */ + hinic_free_all_rx_mbuf(dev); + hinic_free_all_tx_mbuf(dev); +} + +static void hinic_disable_interrupt(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + int ret, retries = 0; + + hinic_clear_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status); + + /* disable msix interrupt in hardware */ + hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE); + + /* disable rte interrupt */ + ret = rte_intr_disable(&pci_dev->intr_handle); + if (ret) + PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret); + + do { + ret = + rte_intr_callback_unregister(&pci_dev->intr_handle, + hinic_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret == -EAGAIN) { + rte_delay_ms(100); + retries++; + } else { + PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", + ret); + break; + } + } while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES); + + if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES) + PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries", + retries); +} + +static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable) +{ + u32 rx_mode_ctrl = nic_dev->rx_mode_status; + + if (enable) + rx_mode_ctrl |= HINIC_RX_MODE_PROMISC; + else + rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC); + + return hinic_config_rx_mode(nic_dev, rx_mode_ctrl); +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, + * negative error value otherwise. + */ +static int +hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + int i, err, q_num; + u64 rx_discards_pmd = 0; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_vport_stats vport_stats; + struct hinic_rxq *rxq = NULL; + struct hinic_rxq_stats rxq_stats; + struct hinic_txq *txq = NULL; + struct hinic_txq_stats txq_stats; + + err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); + if (err) { + PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s", + nic_dev->proc_dev_name); + return err; + } + + /* rx queue stats */ + q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS; + for (i = 0; i < q_num; i++) { + rxq = nic_dev->rxqs[i]; + hinic_rxq_get_stats(rxq, &rxq_stats); + stats->q_ipackets[i] = rxq_stats.packets; + stats->q_ibytes[i] = rxq_stats.bytes; + stats->q_errors[i] = rxq_stats.rx_discards; + + stats->ierrors += rxq_stats.errors; + rx_discards_pmd += rxq_stats.rx_discards; + dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf; + } + + /* tx queue stats */ + q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS; + for (i = 0; i < q_num; i++) { + txq = nic_dev->txqs[i]; + hinic_txq_get_stats(txq, &txq_stats); + stats->q_opackets[i] = txq_stats.packets; + stats->q_obytes[i] = txq_stats.bytes; + stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs); + } + + /* vport stats */ + stats->oerrors += vport_stats.tx_discard_vport; + + stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd; + + stats->ipackets = (vport_stats.rx_unicast_pkts_vport + + vport_stats.rx_multicast_pkts_vport + + vport_stats.rx_broadcast_pkts_vport - + rx_discards_pmd); + + stats->opackets = (vport_stats.tx_unicast_pkts_vport + + vport_stats.tx_multicast_pkts_vport + + vport_stats.tx_broadcast_pkts_vport); + + stats->ibytes = (vport_stats.rx_unicast_bytes_vport + + vport_stats.rx_multicast_bytes_vport + + vport_stats.rx_broadcast_bytes_vport); + + stats->obytes = (vport_stats.tx_unicast_bytes_vport + + vport_stats.tx_multicast_bytes_vport + + vport_stats.tx_broadcast_bytes_vport); + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static int hinic_dev_stats_reset(struct rte_eth_dev *dev) +{ + int qid; + struct hinic_rxq *rxq = NULL; + struct hinic_txq *txq = NULL; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_clear_vport_stats(nic_dev->hwdev); + if (ret != 0) + return ret; + + for (qid = 0; qid < nic_dev->num_rq; qid++) { + rxq = nic_dev->rxqs[qid]; + hinic_rxq_stats_reset(rxq); + } + + for (qid = 0; qid < nic_dev->num_sq; qid++) { + txq = nic_dev->txqs[qid]; + hinic_txq_stats_reset(txq); + } + + return 0; +} + +/** + * DPDK callback to clear device extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static int hinic_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int ret; + + ret = hinic_dev_stats_reset(dev); + if (ret != 0) + return ret; + + if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) { + ret = hinic_clear_phy_port_stats(nic_dev->hwdev); + if (ret != 0) + return ret; + } + + return 0; +} + +static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr) +{ + uint64_t random_value; + + /* Set Organizationally Unique Identifier (OUI) prefix */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; + /* Generate the last 3 bytes of the MAC address with a random number. */ + random_value = rte_rand(); + memcpy(&mac_addr->addr_bytes[3], &random_value, 3); +} + +/** + * Init mac_vlan table in NIC. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success and stats is filled, + * negative error value otherwise. + */ +static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; + u16 func_id = 0; + int rc = 0; + + rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes); + if (rc) + return rc; + + rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes, + ð_dev->data->mac_addrs[0]); + if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[0])) + hinic_gen_random_mac_addr(ð_dev->data->mac_addrs[0]); + + func_id = hinic_global_func_id(nic_dev->hwdev); + rc = hinic_set_mac(nic_dev->hwdev, + eth_dev->data->mac_addrs[0].addr_bytes, + 0, func_id); + if (rc && rc != HINIC_PF_SET_VF_ALREADY) + return rc; + + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], + &nic_dev->default_addr); + + return 0; +} + +static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev) +{ + u16 func_id; + u32 i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) { + if (rte_is_zero_ether_addr(&nic_dev->mc_list[i])) + break; + + hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes, + 0, func_id); + memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr)); + } +} + +/** + * Deinit mac_vlan table in NIC. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success and stats is filled, + * negative error value otherwise. + */ +static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + u16 func_id = 0; + int rc; + int i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) { + if (rte_is_zero_ether_addr(ð_dev->data->mac_addrs[i])) + continue; + + rc = hinic_del_mac(nic_dev->hwdev, + eth_dev->data->mac_addrs[i].addr_bytes, + 0, func_id); + if (rc && rc != HINIC_PF_SET_VF_ALREADY) + PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s", + eth_dev->data->name); + + memset(ð_dev->data->mac_addrs[i], 0, + sizeof(struct rte_ether_addr)); + } + + /* delete multicast mac addrs */ + hinic_delete_mc_addr_list(nic_dev); +} + +static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + uint32_t frame_size; + int ret = 0; + + PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d", + dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu)); + + if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) { + PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d", + mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE); + return -EINVAL; + } + + ret = hinic_set_port_mtu(nic_dev->hwdev, mtu); + if (ret) { + PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret); + return ret; + } + + /* update max frame size */ + frame_size = HINIC_MTU_TO_PKTLEN(mtu); + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + nic_dev->mtu_size = mtu; + + return ret; +} + +static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev, + u16 vlan_id, bool on) +{ + u32 vid_idx, vid_bit; + + vid_idx = HINIC_VFTA_IDX(vlan_id); + vid_bit = HINIC_VFTA_BIT(vlan_id); + + if (on) + nic_dev->vfta[vid_idx] |= vid_bit; + else + nic_dev->vfta[vid_idx] &= ~vid_bit; +} + +static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev, + uint16_t vlan_id) +{ + u32 vid_idx, vid_bit; + + vid_idx = HINIC_VFTA_IDX(vlan_id); + vid_bit = HINIC_VFTA_BIT(vlan_id); + + return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE; +} + +/** + * DPDK callback to set vlan filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * vlan id is used to filter vlan packets + * @param enable + * enable disable or enable vlan filter function + */ +static int hinic_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int enable) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int err = 0; + u16 func_id; + + if (vlan_id > RTE_ETHER_MAX_VLAN_ID) + return -EINVAL; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + if (enable) { + /* If vlanid is already set, just return */ + if (hinic_find_vlan_filter(nic_dev, vlan_id)) { + PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s", + vlan_id, nic_dev->proc_dev_name); + return 0; + } + + err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, + func_id, TRUE); + } else { + /* If vlanid can't be found, just return */ + if (!hinic_find_vlan_filter(nic_dev, vlan_id)) { + PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s", + vlan_id, nic_dev->proc_dev_name); + return 0; + } + + err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id, + func_id, FALSE); + } + + if (err) { + PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d", + enable ? "Add" : "Remove", func_id, vlan_id, err); + return err; + } + + hinic_store_vlan_filter(nic_dev, vlan_id, enable); + + PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s", + enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name); + return 0; +} + +/** + * DPDK callback to enable or disable vlan offload. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mask + * Definitions used for VLAN setting + */ +static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + bool on; + int err; + + /* Enable or disable VLAN filter */ + if (mask & ETH_VLAN_FILTER_MASK) { + on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ? + TRUE : FALSE; + err = hinic_config_vlan_filter(nic_dev->hwdev, on); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + PMD_DRV_LOG(WARNING, + "Current matching version does not support vlan filter configuration, device: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + } else if (err) { + PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d", + on ? "enable" : "disable", + nic_dev->proc_dev_name, + dev->data->port_id, err); + return err; + } + + PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d", + on ? "Enable" : "Disable", + nic_dev->proc_dev_name, dev->data->port_id); + } + + /* Enable or disable VLAN stripping */ + if (mask & ETH_VLAN_STRIP_MASK) { + on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ? + TRUE : FALSE; + err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on); + if (err) { + PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d", + on ? "enable" : "disable", + nic_dev->proc_dev_name, + dev->data->port_id, err); + return err; + } + + PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d", + on ? "Enable" : "Disable", + nic_dev->proc_dev_name, dev->data->port_id); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + PMD_DRV_LOG(ERR, "Don't support vlan qinq, device: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return -ENOTSUP; + } + + return 0; +} + +static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + u16 func_id; + int i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) { + /* If can't find it, continue */ + if (!hinic_find_vlan_filter(nic_dev, i)) + continue; + + (void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE); + hinic_store_vlan_filter(nic_dev, i, false); + } +} + +static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev, + bool enable) +{ + u32 rx_mode_ctrl = nic_dev->rx_mode_status; + + if (enable) + rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL; + else + rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL); + + return hinic_config_rx_mode(nic_dev, rx_mode_ctrl); +} + +/** + * DPDK callback to enable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + int ret = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + ret = hinic_set_dev_allmulticast(nic_dev, true); + if (ret) { + PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret); + return ret; + } + + PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return 0; +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + int ret = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + ret = hinic_set_dev_allmulticast(nic_dev, false); + if (ret) { + PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret); + return ret; + } + + PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d", + nic_dev->proc_dev_name, dev->data->port_id); + return 0; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + int rc = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", + nic_dev->proc_dev_name, dev->data->port_id, + dev->data->promiscuous); + + rc = hinic_set_dev_promiscuous(nic_dev, true); + if (rc) + PMD_DRV_LOG(ERR, "Enable promiscuous failed"); + + return rc; +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + int rc = HINIC_OK; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d", + nic_dev->proc_dev_name, dev->data->port_id, + dev->data->promiscuous); + + rc = hinic_set_dev_promiscuous(nic_dev, false); + if (rc) + PMD_DRV_LOG(ERR, "Disable promiscuous failed"); + + return rc; +} + +static int hinic_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct nic_pause_config nic_pause; + int err; + + memset(&nic_pause, 0, sizeof(nic_pause)); + + err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) + return err; + + if (nic_dev->pause_set || !nic_pause.auto_neg) { + nic_pause.rx_pause = nic_dev->nic_pause.rx_pause; + nic_pause.tx_pause = nic_dev->nic_pause.tx_pause; + } + + fc_conf->autoneg = nic_pause.auto_neg; + + if (nic_pause.tx_pause && nic_pause.rx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (nic_pause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else if (nic_pause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int hinic_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct nic_pause_config nic_pause; + int err; + + nic_pause.auto_neg = fc_conf->autoneg; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + nic_pause.tx_pause = true; + else + nic_pause.tx_pause = false; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + nic_pause.rx_pause = true; + else + nic_pause.rx_pause = false; + + err = hinic_set_pause_config(nic_dev->hwdev, nic_pause); + if (err) + return err; + + nic_dev->pause_set = true; + nic_dev->nic_pause.auto_neg = nic_pause.auto_neg; + nic_dev->nic_pause.rx_pause = nic_pause.rx_pause; + nic_dev->nic_pause.tx_pause = nic_pause.tx_pause; + + PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n", + nic_pause.tx_pause ? "on" : "off", + nic_pause.rx_pause ? "on" : "off", + nic_pause.auto_neg ? "on" : "off"); + + return 0; +} + +/** + * DPDK callback to update the RSS hash key and RSS hash type. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rss_conf + * RSS configuration data. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + u64 rss_hf = rss_conf->rss_hf; + struct nic_rss_type rss_type = {0}; + int err = 0; + + if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { + PMD_DRV_LOG(WARNING, "RSS is not enabled"); + return HINIC_OK; + } + + if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) { + PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d", + rss_conf->rss_key_len); + return HINIC_ERROR; + } + + if (rss_conf->rss_key) { + memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len); + err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, + hashkey); + if (err) { + PMD_DRV_LOG(ERR, "Set rss template table failed"); + goto disable_rss; + } + } + + rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0; + rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0; + rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0; + rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0; + rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0; + rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0; + rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0; + rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0; + + err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type); + if (err) { + PMD_DRV_LOG(ERR, "Set rss type table failed"); + goto disable_rss; + } + + return 0; + +disable_rss: + memset(prio_tc, 0, sizeof(prio_tc)); + (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); + return err; +} + +/** + * DPDK callback to get the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rss_conf + * RSS configuration data. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_rss_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; + struct nic_rss_type rss_type = {0}; + int err; + + if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { + PMD_DRV_LOG(WARNING, "RSS is not enabled"); + return HINIC_ERROR; + } + + err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey); + if (err) + return err; + + if (rss_conf->rss_key && + rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) { + memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey)); + rss_conf->rss_key_len = sizeof(hashkey); + } + + err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type); + if (err) + return err; + + rss_conf->rss_hf = 0; + rss_conf->rss_hf |= rss_type.ipv4 ? + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0; + rss_conf->rss_hf |= rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0; + rss_conf->rss_hf |= rss_type.ipv6 ? + (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0; + rss_conf->rss_hf |= rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0; + rss_conf->rss_hf |= rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0; + rss_conf->rss_hf |= rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0; + rss_conf->rss_hf |= rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0; + rss_conf->rss_hf |= rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0; + + return HINIC_OK; +} + +/** + * DPDK callback to update the RSS redirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RSS reta configuration data. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; + int err = 0; + u16 i = 0; + u16 idx, shift; + + if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) + return HINIC_OK; + + if (reta_size != NIC_RSS_INDIR_SIZE) { + PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); + return HINIC_ERROR; + } + + err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); + if (err) + return err; + + /* update rss indir_tbl */ + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + + if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) { + PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d " + "exceeds the maximum rxq num: %d", i, + reta_conf[idx].reta[shift], nic_dev->num_rq); + return -EINVAL; + } + + if (reta_conf[idx].mask & (1ULL << shift)) + indirtbl[i] = reta_conf[idx].reta[shift]; + } + + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); + if (err) + goto disable_rss; + + nic_dev->rss_indir_flag = true; + + return 0; + +disable_rss: + memset(prio_tc, 0, sizeof(prio_tc)); + (void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc); + + return HINIC_ERROR; +} + +/** + * DPDK callback to get the RSS indirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RSS reta configuration data. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + int err = 0; + u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0}; + u16 idx, shift; + u16 i = 0; + + if (reta_size != NIC_RSS_INDIR_SIZE) { + PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size); + return HINIC_ERROR; + } + + err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl); + if (err) { + PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d", + err); + return err; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i]; + } + + return HINIC_OK; +} + +/** + * DPDK callback to get extended device statistics. + * + * @param dev + * Pointer to Ethernet device. + * @param xstats + * Pointer to rte extended stats table. + * @param n + * The size of the stats table. + * + * @return + * Number of extended stats on success and stats is filled, + * negative error value otherwise. + */ +static int hinic_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + u16 qid = 0; + u32 i; + int err, count; + struct hinic_nic_dev *nic_dev; + struct hinic_phy_port_stats port_stats; + struct hinic_vport_stats vport_stats; + struct hinic_rxq *rxq = NULL; + struct hinic_rxq_stats rxq_stats; + struct hinic_txq *txq = NULL; + struct hinic_txq_stats txq_stats; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + count = hinic_xstats_calc_num(nic_dev); + if ((int)n < count) + return count; + + count = 0; + + /* Get stats from hinic_rxq_stats */ + for (qid = 0; qid < nic_dev->num_rq; qid++) { + rxq = nic_dev->rxqs[qid]; + hinic_rxq_get_stats(rxq, &rxq_stats); + + for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&rxq_stats) + + hinic_rxq_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + } + + /* Get stats from hinic_txq_stats */ + for (qid = 0; qid < nic_dev->num_sq; qid++) { + txq = nic_dev->txqs[qid]; + hinic_txq_get_stats(txq, &txq_stats); + + for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&txq_stats) + + hinic_txq_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + } + + /* Get stats from hinic_vport_stats */ + err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); + if (err) + return err; + + for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&vport_stats) + + hinic_vport_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + if (HINIC_IS_VF(nic_dev->hwdev)) + return count; + + /* Get stats from hinic_phy_port_stats */ + err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats); + if (err) + return err; + + for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { + xstats[count].value = *(uint64_t *)(((char *)&port_stats) + + hinic_phyport_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + return count; +} + +static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct hinic_rxq *rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->nb_desc = rxq->q_depth; +} + +static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct hinic_txq *txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->q_depth; +} + +/** + * DPDK callback to retrieve names of extended device statistics + * + * @param dev + * Pointer to Ethernet device structure. + * @param xstats_names + * Buffer to insert names into. + * + * @return + * Number of xstats names. + */ +static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int count = 0; + u16 i = 0, q_num; + + if (xstats_names == NULL) + return hinic_xstats_calc_num(nic_dev); + + /* get pmd rxq stats */ + for (q_num = 0; q_num < nic_dev->num_rq; q_num++) { + for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rxq%d_%s_pmd", + q_num, hinic_rxq_stats_strings[i].name); + count++; + } + } + + /* get pmd txq stats */ + for (q_num = 0; q_num < nic_dev->num_sq; q_num++) { + for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "txq%d_%s_pmd", + q_num, hinic_txq_stats_strings[i].name); + count++; + } + } + + /* get vport stats */ + for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hinic_vport_stats_strings[i].name); + count++; + } + + if (HINIC_IS_VF(nic_dev->hwdev)) + return count; + + /* get phy port stats */ + for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hinic_phyport_stats_strings[i].name); + count++; + } + + return count; +} + +/** + * DPDK callback to set mac address + * + * @param dev + * Pointer to Ethernet device structure. + * @param addr + * Pointer to mac address + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int err; + + func_id = hinic_global_func_id(nic_dev->hwdev); + err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes, + addr->addr_bytes, 0, func_id); + if (err) + return err; + + rte_ether_addr_copy(addr, &nic_dev->default_addr); + + PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index, should less than 128. + */ +static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int ret; + + if (index >= HINIC_MAX_UC_MAC_ADDRS) { + PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range", + index); + return; + } + + func_id = hinic_global_func_id(nic_dev->hwdev); + ret = hinic_del_mac(nic_dev->hwdev, + dev->data->mac_addrs[index].addr_bytes, 0, func_id); + if (ret) + return; + + memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * Pointer to MAC address + * @param index + * MAC address index, should less than 128. + * @param vmdq + * VMDq pool index(not used). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint32_t index, + __rte_unused uint32_t vmdq) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + unsigned int i; + u16 func_id; + int ret; + + if (index >= HINIC_MAX_UC_MAC_ADDRS) { + PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index); + return -EINVAL; + } + + /* First, make sure this address isn't already configured. */ + for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) { + /* Skip this index, it's going to be reconfigured. */ + if (i == index) + continue; + + if (memcmp(&dev->data->mac_addrs[i], + mac_addr, sizeof(*mac_addr))) + continue; + + PMD_DRV_LOG(INFO, "MAC address already configured"); + return -EADDRINUSE; + } + + func_id = hinic_global_func_id(nic_dev->hwdev); + ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id); + if (ret) + return ret; + + dev->data->mac_addrs[index] = *mac_addr; + return 0; +} + +/** + * DPDK callback to set multicast mac address + * + * @param dev + * Pointer to Ethernet device structure. + * @param mc_addr_set + * Pointer to multicast mac address + * @param nb_mc_addr + * mc addr count + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 func_id; + int ret; + u32 i; + + func_id = hinic_global_func_id(nic_dev->hwdev); + + /* delete old multi_cast addrs firstly */ + hinic_delete_mc_addr_list(nic_dev); + + if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS) + goto allmulti; + + for (i = 0; i < nb_mc_addr; i++) { + ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes, + 0, func_id); + /* if add mc addr failed, set all multi_cast */ + if (ret) { + hinic_delete_mc_addr_list(nic_dev); + goto allmulti; + } + + rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]); + } + + return 0; + +allmulti: + hinic_dev_allmulticast_enable(dev); + + return 0; +} + +/** + * DPDK callback to manage filter control operations + * + * @param dev + * Pointer to Ethernet device structure. + * @param filter_type + * Filter type, which just supports generic type. + * @param filter_op + * Filter operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + int func_id = hinic_global_func_id(nic_dev->hwdev); + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &hinic_flow_ops; + break; + default: + PMD_DRV_LOG(INFO, "Filter type (%d) not supported", + filter_type); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x," + "filter_op: 0x%x.", func_id, filter_type, filter_op); + return 0; +} + +static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev) +{ + struct nic_pause_config pause_config = {0}; + int err; + + pause_config.auto_neg = 0; + pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG; + pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG; + + err = hinic_set_pause_config(nic_dev->hwdev, pause_config); + if (err) + return err; + + nic_dev->pause_set = true; + nic_dev->nic_pause.auto_neg = pause_config.auto_neg; + nic_dev->nic_pause.rx_pause = pause_config.rx_pause; + nic_dev->nic_pause.tx_pause = pause_config.tx_pause; + + return 0; +} + +static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev) +{ + u8 up_tc[HINIC_DCB_UP_MAX] = {0}; + u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; + u8 up_bw[HINIC_DCB_UP_MAX] = {0}; + u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; + u8 up_strict[HINIC_DCB_UP_MAX] = {0}; + int i = 0; + + pg_bw[0] = 100; + for (i = 0; i < HINIC_DCB_UP_MAX; i++) + up_bw[i] = 100; + + return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, + up_pgid, up_bw, up_strict); +} + +static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev) +{ + u8 cos_id = 0; + int err; + + if (!HINIC_IS_VF(nic_dev->hwdev)) { + nic_dev->default_cos = + (hinic_global_func_id(nic_dev->hwdev) + + DEFAULT_BASE_COS) % NR_MAX_COS; + } else { + err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id); + if (err) { + PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d", + err); + return HINIC_ERROR; + } + + nic_dev->default_cos = cos_id; + } + + return 0; +} + +static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) +{ + int err; + + err = hinic_init_default_cos(nic_dev); + if (err) + return err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) + return 0; + + /* Restore DCB configure to default status */ + err = hinic_set_default_dcb_feature(nic_dev); + if (err) + return err; + + /* Set pause enable, and up will disable pfc. */ + err = hinic_set_default_pause_feature(nic_dev); + if (err) + return err; + + err = hinic_reset_port_link_cfg(nic_dev->hwdev); + if (err) + return err; + + err = hinic_set_link_status_follow(nic_dev->hwdev, + HINIC_LINK_FOLLOW_PORT); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status"); + else if (err) + return err; + + return hinic_set_anti_attack(nic_dev->hwdev, true); +} + +static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev) +{ + struct hinic_board_info info = { 0 }; + int rc; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) + return 0; + + rc = hinic_get_board_info(nic_dev->hwdev, &info); + if (rc) + return rc; + + return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK : + HINIC_ERROR); +} + +static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev) +{ + nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name); + if (nic_dev->cpy_mpool == NULL) { + nic_dev->cpy_mpool = + rte_pktmbuf_pool_create(nic_dev->proc_dev_name, + HINIC_COPY_MEMPOOL_DEPTH, + 0, 0, + HINIC_COPY_MBUF_SIZE, + rte_socket_id()); + if (!nic_dev->cpy_mpool) { + PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s", + rte_errno, nic_dev->proc_dev_name); + return -ENOMEM; + } + } + + return 0; +} + +static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev) +{ + if (nic_dev->cpy_mpool != NULL) + rte_mempool_free(nic_dev->cpy_mpool); +} + +static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev) +{ + u32 txq_size; + u32 rxq_size; + + /* allocate software txq array */ + txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs); + nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL); + if (!nic_dev->txqs) { + PMD_DRV_LOG(ERR, "Allocate txqs failed"); + return -ENOMEM; + } + + /* allocate software rxq array */ + rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs); + nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) { + /* free txqs */ + kfree(nic_dev->txqs); + nic_dev->txqs = NULL; + + PMD_DRV_LOG(ERR, "Allocate rxqs failed"); + return -ENOMEM; + } + + return HINIC_OK; +} + +static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev) +{ + kfree(nic_dev->txqs); + nic_dev->txqs = NULL; + + kfree(nic_dev->rxqs); + nic_dev->rxqs = NULL; +} + +static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + int rc; + + nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev), + RTE_CACHE_LINE_SIZE); + if (!nic_dev->hwdev) { + PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s", + eth_dev->data->name); + return -ENOMEM; + } + nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* init osdep*/ + rc = hinic_osdep_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s", + eth_dev->data->name); + goto init_osdep_fail; + } + + /* init_hwif */ + rc = hinic_hwif_res_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s", + eth_dev->data->name); + goto init_hwif_fail; + } + + /* init_cfg_mgmt */ + rc = init_cfg_mgmt(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s", + eth_dev->data->name); + goto init_cfgmgnt_fail; + } + + /* init_aeqs */ + rc = hinic_comm_aeqs_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s", + eth_dev->data->name); + goto init_aeqs_fail; + } + + /* init_pf_to_mgnt */ + rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s", + eth_dev->data->name); + goto init_pf_to_mgmt_fail; + } + + /* init mailbox */ + rc = hinic_comm_func_to_func_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s", + eth_dev->data->name); + goto init_func_to_func_fail; + } + + rc = hinic_card_workmode_check(nic_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s", + eth_dev->data->name); + goto workmode_check_fail; + } + + /* do l2nic reset to make chip clear */ + rc = hinic_l2nic_reset(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s", + eth_dev->data->name); + goto l2nic_reset_fail; + } + + /* init dma and aeq msix attribute table */ + (void)hinic_init_attr_table(nic_dev->hwdev); + + /* init_cmdqs */ + rc = hinic_comm_cmdqs_init(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s", + eth_dev->data->name); + goto init_cmdq_fail; + } + + /* set hardware state active */ + rc = hinic_activate_hwdev_state(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s", + eth_dev->data->name); + goto init_resources_state_fail; + } + + /* init_capability */ + rc = hinic_init_capability(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s", + eth_dev->data->name); + goto init_cap_fail; + } + + /* get nic capability */ + if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) { + PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s", + eth_dev->data->name); + rc = -EINVAL; + goto nic_check_fail; + } + + /* init root cla and function table */ + rc = hinic_init_nicio(nic_dev->hwdev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s", + eth_dev->data->name); + goto init_nicio_fail; + } + + /* init_software_txrxq */ + rc = hinic_init_sw_rxtxqs(nic_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s", + eth_dev->data->name); + goto init_sw_rxtxqs_fail; + } + + rc = hinic_copy_mempool_init(nic_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s", + eth_dev->data->name); + goto init_mpool_fail; + } + + /* set hardware feature to default status */ + rc = hinic_set_default_hw_feature(nic_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s", + eth_dev->data->name); + goto set_default_hw_feature_fail; + } + + return 0; + +set_default_hw_feature_fail: + hinic_copy_mempool_uninit(nic_dev); + +init_mpool_fail: + hinic_deinit_sw_rxtxqs(nic_dev); + +init_sw_rxtxqs_fail: + hinic_deinit_nicio(nic_dev->hwdev); + +nic_check_fail: +init_nicio_fail: +init_cap_fail: + hinic_deactivate_hwdev_state(nic_dev->hwdev); + +init_resources_state_fail: + hinic_comm_cmdqs_free(nic_dev->hwdev); + +init_cmdq_fail: +l2nic_reset_fail: +workmode_check_fail: + hinic_comm_func_to_func_free(nic_dev->hwdev); + +init_func_to_func_fail: + hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); + +init_pf_to_mgmt_fail: + hinic_comm_aeqs_free(nic_dev->hwdev); + +init_aeqs_fail: + free_cfg_mgmt(nic_dev->hwdev); + +init_cfgmgnt_fail: + hinic_hwif_res_free(nic_dev->hwdev); + +init_hwif_fail: + hinic_osdep_deinit(nic_dev->hwdev); + +init_osdep_fail: + rte_free(nic_dev->hwdev); + nic_dev->hwdev = NULL; + + return rc; +} + +static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + + (void)hinic_set_link_status_follow(nic_dev->hwdev, + HINIC_LINK_FOLLOW_DEFAULT); + hinic_copy_mempool_uninit(nic_dev); + hinic_deinit_sw_rxtxqs(nic_dev); + hinic_deinit_nicio(nic_dev->hwdev); + hinic_deactivate_hwdev_state(nic_dev->hwdev); + hinic_comm_cmdqs_free(nic_dev->hwdev); + hinic_comm_func_to_func_free(nic_dev->hwdev); + hinic_comm_pf_to_mgmt_free(nic_dev->hwdev); + hinic_comm_aeqs_free(nic_dev->hwdev); + free_cfg_mgmt(nic_dev->hwdev); + hinic_hwif_res_free(nic_dev->hwdev); + hinic_osdep_deinit(nic_dev->hwdev); + rte_free(nic_dev->hwdev); + nic_dev->hwdev = NULL; +} + +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void hinic_dev_close(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) { + PMD_DRV_LOG(WARNING, "Device %s already closed", + dev->data->name); + return; + } + + /* stop device first */ + hinic_dev_stop(dev); + + /* rx_cqe, rx_info */ + hinic_free_all_rx_resources(dev); + + /* tx_info */ + hinic_free_all_tx_resources(dev); + + /* free wq, pi_dma_addr */ + hinic_free_all_rq(nic_dev); + + /* free wq, db_addr */ + hinic_free_all_sq(nic_dev); + + /* deinit mac vlan tbl */ + hinic_deinit_mac_addr(dev); + hinic_remove_all_vlanid(dev); + + /* disable hardware and uio interrupt */ + hinic_disable_interrupt(dev); + + /* deinit nic hardware device */ + hinic_nic_dev_destroy(dev); +} + +static const struct eth_dev_ops hinic_pmd_ops = { + .dev_configure = hinic_dev_configure, + .dev_infos_get = hinic_dev_infos_get, + .fw_version_get = hinic_fw_version_get, + .rx_queue_setup = hinic_rx_queue_setup, + .tx_queue_setup = hinic_tx_queue_setup, + .dev_start = hinic_dev_start, + .dev_set_link_up = hinic_dev_set_link_up, + .dev_set_link_down = hinic_dev_set_link_down, + .link_update = hinic_link_update, + .rx_queue_release = hinic_rx_queue_release, + .tx_queue_release = hinic_tx_queue_release, + .dev_stop = hinic_dev_stop, + .dev_close = hinic_dev_close, + .mtu_set = hinic_dev_set_mtu, + .vlan_filter_set = hinic_vlan_filter_set, + .vlan_offload_set = hinic_vlan_offload_set, + .allmulticast_enable = hinic_dev_allmulticast_enable, + .allmulticast_disable = hinic_dev_allmulticast_disable, + .promiscuous_enable = hinic_dev_promiscuous_enable, + .promiscuous_disable = hinic_dev_promiscuous_disable, + .flow_ctrl_get = hinic_flow_ctrl_get, + .flow_ctrl_set = hinic_flow_ctrl_set, + .rss_hash_update = hinic_rss_hash_update, + .rss_hash_conf_get = hinic_rss_conf_get, + .reta_update = hinic_rss_indirtbl_update, + .reta_query = hinic_rss_indirtbl_query, + .stats_get = hinic_dev_stats_get, + .stats_reset = hinic_dev_stats_reset, + .xstats_get = hinic_dev_xstats_get, + .xstats_reset = hinic_dev_xstats_reset, + .xstats_get_names = hinic_dev_xstats_get_names, + .rxq_info_get = hinic_rxq_info_get, + .txq_info_get = hinic_txq_info_get, + .mac_addr_set = hinic_set_mac_addr, + .mac_addr_remove = hinic_mac_addr_remove, + .mac_addr_add = hinic_mac_addr_add, + .set_mc_addr_list = hinic_set_mc_addr_list, + .filter_ctrl = hinic_dev_filter_ctrl, +}; + +static const struct eth_dev_ops hinic_pmd_vf_ops = { + .dev_configure = hinic_dev_configure, + .dev_infos_get = hinic_dev_infos_get, + .fw_version_get = hinic_fw_version_get, + .rx_queue_setup = hinic_rx_queue_setup, + .tx_queue_setup = hinic_tx_queue_setup, + .dev_start = hinic_dev_start, + .link_update = hinic_link_update, + .rx_queue_release = hinic_rx_queue_release, + .tx_queue_release = hinic_tx_queue_release, + .dev_stop = hinic_dev_stop, + .dev_close = hinic_dev_close, + .mtu_set = hinic_dev_set_mtu, + .vlan_filter_set = hinic_vlan_filter_set, + .vlan_offload_set = hinic_vlan_offload_set, + .allmulticast_enable = hinic_dev_allmulticast_enable, + .allmulticast_disable = hinic_dev_allmulticast_disable, + .rss_hash_update = hinic_rss_hash_update, + .rss_hash_conf_get = hinic_rss_conf_get, + .reta_update = hinic_rss_indirtbl_update, + .reta_query = hinic_rss_indirtbl_query, + .stats_get = hinic_dev_stats_get, + .stats_reset = hinic_dev_stats_reset, + .xstats_get = hinic_dev_xstats_get, + .xstats_reset = hinic_dev_xstats_reset, + .xstats_get_names = hinic_dev_xstats_get_names, + .rxq_info_get = hinic_rxq_info_get, + .txq_info_get = hinic_txq_info_get, + .mac_addr_set = hinic_set_mac_addr, + .mac_addr_remove = hinic_mac_addr_remove, + .mac_addr_add = hinic_mac_addr_add, + .set_mc_addr_list = hinic_set_mc_addr_list, + .filter_ctrl = hinic_dev_filter_ctrl, +}; + +static int hinic_func_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct rte_ether_addr *eth_addr; + struct hinic_nic_dev *nic_dev; + struct hinic_filter_info *filter_info; + struct hinic_tcam_info *tcam_info; + u32 mac_size; + int rc; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* EAL is SECONDARY and eth_dev is already created */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + PMD_DRV_LOG(INFO, "Initialize %s in secondary process", + eth_dev->data->name); + + return 0; + } + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + memset(nic_dev, 0, sizeof(*nic_dev)); + + snprintf(nic_dev->proc_dev_name, + sizeof(nic_dev->proc_dev_name), + "hinic-%.4x:%.2x:%.2x.%x", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + /* alloc mac_addrs */ + mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr); + eth_addr = rte_zmalloc("hinic_mac", mac_size, 0); + if (!eth_addr) { + PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s", + eth_dev->data->name); + rc = -ENOMEM; + goto eth_addr_fail; + } + eth_dev->data->mac_addrs = eth_addr; + + mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr); + nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0); + if (!nic_dev->mc_list) { + PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s", + eth_dev->data->name); + rc = -ENOMEM; + goto mc_addr_fail; + } + + /* + * Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* create hardware nic_device */ + rc = hinic_nic_dev_create(eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s", + eth_dev->data->name); + goto create_nic_dev_fail; + } + + if (HINIC_IS_VF(nic_dev->hwdev)) + eth_dev->dev_ops = &hinic_pmd_vf_ops; + else + eth_dev->dev_ops = &hinic_pmd_ops; + + rc = hinic_init_mac_addr(eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s", + eth_dev->data->name); + goto init_mac_fail; + } + + /* register callback func to eal lib */ + rc = rte_intr_callback_register(&pci_dev->intr_handle, + hinic_dev_interrupt_handler, + (void *)eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s", + eth_dev->data->name); + goto reg_intr_cb_fail; + } + + /* enable uio/vfio intr/eventfd mapping */ + rc = rte_intr_enable(&pci_dev->intr_handle); + if (rc) { + PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s", + eth_dev->data->name); + goto enable_intr_fail; + } + hinic_set_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status); + + /* initialize filter info */ + filter_info = &nic_dev->filter; + tcam_info = &nic_dev->tcam; + memset(filter_info, 0, sizeof(struct hinic_filter_info)); + memset(tcam_info, 0, sizeof(struct hinic_tcam_info)); + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + TAILQ_INIT(&tcam_info->tcam_list); + TAILQ_INIT(&nic_dev->filter_ntuple_list); + TAILQ_INIT(&nic_dev->filter_ethertype_list); + TAILQ_INIT(&nic_dev->filter_fdir_rule_list); + TAILQ_INIT(&nic_dev->hinic_flow_list); + + hinic_set_bit(HINIC_DEV_INIT, &nic_dev->dev_status); + PMD_DRV_LOG(INFO, "Initialize %s in primary successfully", + eth_dev->data->name); + + return 0; + +enable_intr_fail: + (void)rte_intr_callback_unregister(&pci_dev->intr_handle, + hinic_dev_interrupt_handler, + (void *)eth_dev); + +reg_intr_cb_fail: + hinic_deinit_mac_addr(eth_dev); + +init_mac_fail: + eth_dev->dev_ops = NULL; + hinic_nic_dev_destroy(eth_dev); + +create_nic_dev_fail: + rte_free(nic_dev->mc_list); + nic_dev->mc_list = NULL; + +mc_addr_fail: + rte_free(eth_addr); + eth_dev->data->mac_addrs = NULL; + +eth_addr_fail: + PMD_DRV_LOG(ERR, "Initialize %s in primary failed", + eth_dev->data->name); + return rc; +} + +static int hinic_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function, + (rte_eal_process_type() == RTE_PROC_PRIMARY) ? + "primary" : "secondary"); + + /* rte_eth_dev rx_burst and tx_burst */ + eth_dev->rx_pkt_burst = hinic_recv_pkts; + eth_dev->tx_pkt_burst = hinic_xmit_pkts; + + return hinic_func_init(eth_dev); +} + +static int hinic_dev_uninit(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + hinic_clear_bit(HINIC_DEV_INIT, &nic_dev->dev_status); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hinic_dev_close(dev); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(nic_dev->mc_list); + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + return HINIC_OK; +} + +static struct rte_pci_id pci_id_hinic_map[] = { + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) }, + { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) }, + {.vendor_id = 0}, +}; + +static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct hinic_nic_dev), hinic_dev_init); +} + +static int hinic_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit); +} + +static struct rte_pci_driver rte_hinic_pmd = { + .id_table = pci_id_hinic_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = hinic_pci_probe, + .remove = hinic_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map); + +RTE_INIT(hinic_init_log) +{ + hinic_logtype = rte_log_register("pmd.net.hinic"); + if (hinic_logtype >= 0) + rte_log_set_level(hinic_logtype, RTE_LOG_INFO); +} diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h new file mode 100644 index 000000000..64b2c8105 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_ETHDEV_H_ +#define _HINIC_PMD_ETHDEV_H_ + +#include +#include + +#include "base/hinic_compat.h" +#include "base/hinic_pmd_cfg.h" + +#define HINIC_DEV_NAME_LEN 32 +#define HINIC_MAX_RX_QUEUES 64 + +/* mbuf pool for copy invalid mbuf segs */ +#define HINIC_COPY_MEMPOOL_DEPTH 128 +#define HINIC_COPY_MBUF_SIZE 4096 + +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3) + +#define HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev) \ + ((struct hinic_nic_dev *)(dev)->data->dev_private) + +#define HINIC_MAX_QUEUE_DEPTH 4096 +#define HINIC_MIN_QUEUE_DEPTH 128 +#define HINIC_TXD_ALIGN 1 +#define HINIC_RXD_ALIGN 1 + +#define HINIC_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define HINIC_VFTA_SIZE (4096 / HINIC_UINT32_BIT_SIZE) + +enum hinic_dev_status { + HINIC_DEV_INIT, + HINIC_DEV_CLOSE, + HINIC_DEV_START, + HINIC_DEV_INTR_EN, +}; + +#define HINIC_MAX_Q_FILTERS 64 /* hinic just support 64 filter types */ +#define HINIC_PKT_TYPE_FIND_ID(pkt_type) ((pkt_type) - HINIC_MAX_Q_FILTERS) + +/* 5tuple filter info */ +struct hinic_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + uint8_t proto; /* l4 protocol. */ + /* + * seven levels (001b-111b), 111b is highest, + * used when more than one filter matches. + */ + uint8_t priority; + + /* if mask is 1b, do not compare the response bit domain */ + uint8_t dst_ip_mask:1, + src_ip_mask:1, + dst_port_mask:1, + src_port_mask:1, + proto_mask:1; +}; + +/* 5tuple filter structure */ +struct hinic_5tuple_filter { + TAILQ_ENTRY(hinic_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct hinic_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +TAILQ_HEAD(hinic_5tuple_filter_list, hinic_5tuple_filter); + +/* + * If this filter is added by configuration, + * it should not be removed. + */ +struct hinic_pkt_filter { + uint16_t pkt_proto; + uint8_t qid; + bool enable; +}; + +/* Structure to store filters' info. */ +struct hinic_filter_info { + uint8_t pkt_type; + uint8_t qid; + uint64_t type_mask; /* Bit mask for every used filter */ + struct hinic_5tuple_filter_list fivetuple_list; + struct hinic_pkt_filter pkt_filters[HINIC_MAX_Q_FILTERS]; +}; + +/* Information about the fdir mode. */ +struct hinic_hw_fdir_mask { + uint32_t src_ipv4_mask; + uint32_t dst_ipv4_mask; + uint16_t src_port_mask; + uint16_t dst_port_mask; + uint16_t proto_mask; + uint16_t tunnel_flag; + uint16_t tunnel_inner_src_port_mask; + uint16_t tunnel_inner_dst_port_mask; + uint16_t dst_ipv6_mask; +}; + +/* Flow Director attribute */ +struct hinic_atr_input { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t src_port; + uint16_t dst_port; + uint16_t proto; + uint16_t tunnel_flag; + uint16_t tunnel_inner_src_port; + uint16_t tunnel_inner_dst_port; + uint8_t dst_ipv6[16]; +}; + +enum hinic_fdir_mode { + HINIC_FDIR_MODE_NORMAL = 0, + HINIC_FDIR_MODE_TCAM = 1, +}; + +#define HINIC_PF_MAX_TCAM_FILTERS 1024 +#define HINIC_VF_MAX_TCAM_FILTERS 128 +#define HINIC_SUPPORT_PF_MAX_NUM 4 +#define HINIC_TOTAL_PF_MAX_NUM 16 +#define HINIC_SUPPORT_VF_MAX_NUM 32 +#define HINIC_TCAM_BLOCK_TYPE_PF 0 /* 1024 tcam index of a block */ +#define HINIC_TCAM_BLOCK_TYPE_VF 1 /* 128 tcam index of a block */ + +#define HINIC_PKT_VF_TCAM_INDEX_START(block_index) \ + (HINIC_PF_MAX_TCAM_FILTERS * HINIC_SUPPORT_PF_MAX_NUM + \ + HINIC_VF_MAX_TCAM_FILTERS * (block_index)) + +TAILQ_HEAD(hinic_tcam_filter_list, hinic_tcam_filter); + +struct hinic_tcam_info { + struct hinic_tcam_filter_list tcam_list; + u8 tcam_index_array[HINIC_PF_MAX_TCAM_FILTERS]; + u16 tcam_block_index; + u16 tcam_rule_nums; +}; + +struct tag_tcam_key_mem { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) + + u32 rsvd0:16; + u32 function_id:16; + + u32 protocol:8; + /* + * tunnel packet, mask must be 0xff, spec value is 1; + * normal packet, mask must be 0, spec value is 0; + * if tunnal packet, ucode use + * sip/dip/protocol/src_port/dst_dport from inner packet + */ + u32 tunnel_flag:8; + u32 sip_h:16; + + u32 sip_l:16; + u32 dip_h:16; + + u32 dip_l:16; + u32 src_port:16; + + u32 dst_port:16; + /* + * tunnel packet and normal packet, + * ext_dip mask must be 0xffffffff + */ + u32 ext_dip_h:16; + u32 ext_dip_l:16; + u32 rsvd2:16; +#else + u32 function_id:16; + u32 rsvd0:16; + + u32 sip_h:16; + u32 tunnel_flag:8; + u32 protocol:8; + + u32 dip_h:16; + u32 sip_l:16; + + u32 src_port:16; + u32 dip_l:16; + + u32 ext_dip_h:16; + u32 dst_port:16; + + u32 rsvd2:16; + u32 ext_dip_l:16; +#endif +}; + +struct tag_tcam_key_ipv6_mem { +#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) + u32 rsvd0:16; + u32 ipv6_flag:1; + u32 protocol:7; + u32 function_id:8; + + u32 dst_port:16; + u32 ipv6_key0:16; + + u32 ipv6_key1:16; + u32 ipv6_key2:16; + + u32 ipv6_key3:16; + u32 ipv6_key4:16; + + u32 ipv6_key5:16; + u32 ipv6_key6:16; + + u32 ipv6_key7:16; + u32 rsvd2:16; +#else + u32 function_id:8; + u32 protocol:7; + u32 ipv6_flag:1; + u32 rsvd0:16; + + u32 ipv6_key0:16; + u32 dst_port:16; + + u32 ipv6_key2:16; + u32 ipv6_key1:16; + + u32 ipv6_key4:16; + u32 ipv6_key3:16; + + u32 ipv6_key6:16; + u32 ipv6_key5:16; + + u32 rsvd2:16; + u32 ipv6_key7:16; +#endif +}; + +struct tag_tcam_key { + union { + struct tag_tcam_key_mem key_info; + struct tag_tcam_key_ipv6_mem key_info_ipv6; + }; + + union { + struct tag_tcam_key_mem key_mask; + struct tag_tcam_key_ipv6_mem key_mask_ipv6; + }; +}; + +struct hinic_fdir_rule { + struct hinic_hw_fdir_mask mask; + struct hinic_atr_input hinic_fdir; /* key of fdir filter */ + uint8_t queue; /* queue assigned when matched */ + enum hinic_fdir_mode mode; /* fdir type */ + u16 tcam_index; +}; + +/* ntuple filter list structure */ +struct hinic_ntuple_filter_ele { + TAILQ_ENTRY(hinic_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; + +/* ethertype filter list structure */ +struct hinic_ethertype_filter_ele { + TAILQ_ENTRY(hinic_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; + +/* fdir filter list structure */ +struct hinic_fdir_rule_ele { + TAILQ_ENTRY(hinic_fdir_rule_ele) entries; + struct hinic_fdir_rule filter_info; +}; + +struct hinic_tcam_filter { + TAILQ_ENTRY(hinic_tcam_filter) entries; + uint16_t index; /* tcam index */ + struct tag_tcam_key tcam_key; + uint16_t queue; /* rx queue assigned to */ +}; + +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + +/* hinic_flow memory list structure */ +struct hinic_flow_mem { + TAILQ_ENTRY(hinic_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(hinic_ntuple_filter_list, hinic_ntuple_filter_ele); +TAILQ_HEAD(hinic_ethertype_filter_list, hinic_ethertype_filter_ele); +TAILQ_HEAD(hinic_fdir_rule_filter_list, hinic_fdir_rule_ele); +TAILQ_HEAD(hinic_flow_mem_list, hinic_flow_mem); + +extern const struct rte_flow_ops hinic_flow_ops; + +/* hinic nic_device */ +struct hinic_nic_dev { + /* hardware device */ + struct hinic_hwdev *hwdev; + struct hinic_txq **txqs; + struct hinic_rxq **rxqs; + struct rte_mempool *cpy_mpool; + u16 num_qps; + u16 num_sq; + u16 num_rq; + u16 mtu_size; + u8 rss_tmpl_idx; + u8 rss_indir_flag; + u8 num_rss; + u8 rx_queue_list[HINIC_MAX_RX_QUEUES]; + + bool pause_set; + struct nic_pause_config nic_pause; + + u32 vfta[HINIC_VFTA_SIZE]; /* VLAN bitmap */ + + struct rte_ether_addr default_addr; + struct rte_ether_addr *mc_list; + /* info */ + unsigned int flags; + struct nic_service_cap nic_cap; + u32 rx_mode_status; /* promisc or allmulticast */ + unsigned long dev_status; + + char proc_dev_name[HINIC_DEV_NAME_LEN]; + /* PF0->COS4, PF1->COS5, PF2->COS6, PF3->COS7, + * vf: the same with associate pf + */ + u32 default_cos; + u32 rx_csum_en; + + struct hinic_filter_info filter; + struct hinic_tcam_info tcam; + struct hinic_ntuple_filter_list filter_ntuple_list; + struct hinic_ethertype_filter_list filter_ethertype_list; + struct hinic_fdir_rule_filter_list filter_fdir_rule_list; + struct hinic_flow_mem_list hinic_flow_list; +}; + +void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev); + +void hinic_destroy_fdir_filter(struct rte_eth_dev *dev); +#endif /* _HINIC_PMD_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c new file mode 100644 index 000000000..cc0744da2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_flow.c @@ -0,0 +1,3272 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "base/hinic_compat.h" +#include "base/hinic_pmd_hwdev.h" +#include "base/hinic_pmd_hwif.h" +#include "base/hinic_pmd_wq.h" +#include "base/hinic_pmd_cmdq.h" +#include "base/hinic_pmd_niccfg.h" +#include "hinic_pmd_ethdev.h" + +#define HINIC_MAX_RX_QUEUE_NUM 64 + +#ifndef UINT8_MAX +#define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */ +#define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */ +#define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */ +#define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */ +#define ASCII_MAX (0x7F) +#endif + +/* IPSURX MACRO */ +#define PA_ETH_TYPE_ROCE 0 +#define PA_ETH_TYPE_IPV4 1 +#define PA_ETH_TYPE_IPV6 2 +#define PA_ETH_TYPE_OTHER 3 + +#define PA_IP_PROTOCOL_TYPE_TCP 1 +#define PA_IP_PROTOCOL_TYPE_UDP 2 +#define PA_IP_PROTOCOL_TYPE_ICMP 3 +#define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4 +#define PA_IP_PROTOCOL_TYPE_SCTP 5 +#define PA_IP_PROTOCOL_TYPE_VRRP 112 + +#define IP_HEADER_PROTOCOL_TYPE_TCP 6 +#define IP_HEADER_PROTOCOL_TYPE_UDP 17 +#define IP_HEADER_PROTOCOL_TYPE_ICMP 1 +#define IP_HEADER_PROTOCOL_TYPE_ICMPV6 58 + +#define FDIR_TCAM_NORMAL_PACKET 0 +#define FDIR_TCAM_TUNNEL_PACKET 1 + +#define HINIC_MIN_N_TUPLE_PRIO 1 +#define HINIC_MAX_N_TUPLE_PRIO 7 + +/* TCAM type mask in hardware */ +#define TCAM_PKT_BGP_SPORT 1 +#define TCAM_PKT_VRRP 2 +#define TCAM_PKT_BGP_DPORT 3 +#define TCAM_PKT_LACP 4 + +#define TCAM_DIP_IPV4_TYPE 0 +#define TCAM_DIP_IPV6_TYPE 1 + +#define BGP_DPORT_ID 179 +#define IPPROTO_VRRP 112 + +/* Packet type defined in hardware to perform filter */ +#define PKT_IGMP_IPV4_TYPE 64 +#define PKT_ICMP_IPV4_TYPE 65 +#define PKT_ICMP_IPV6_TYPE 66 +#define PKT_ICMP_IPV6RS_TYPE 67 +#define PKT_ICMP_IPV6RA_TYPE 68 +#define PKT_ICMP_IPV6NS_TYPE 69 +#define PKT_ICMP_IPV6NA_TYPE 70 +#define PKT_ICMP_IPV6RE_TYPE 71 +#define PKT_DHCP_IPV4_TYPE 72 +#define PKT_DHCP_IPV6_TYPE 73 +#define PKT_LACP_TYPE 74 +#define PKT_ARP_REQ_TYPE 79 +#define PKT_ARP_REP_TYPE 80 +#define PKT_ARP_TYPE 81 +#define PKT_BGPD_DPORT_TYPE 83 +#define PKT_BGPD_SPORT_TYPE 84 +#define PKT_VRRP_TYPE 85 + +#define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \ + (&((struct hinic_nic_dev *)nic_dev)->filter) + +#define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \ + (&((struct hinic_nic_dev *)nic_dev)->tcam) + + +enum hinic_atr_flow_type { + HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1, + HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2, + HINIC_ATR_FLOW_TYPE_DPORT = 0x3, + HINIC_ATR_FLOW_TYPE_SPORT = 0x4, +}; + +/* Structure to store fdir's info. */ +struct hinic_fdir_info { + uint8_t fdir_flag; + uint8_t qid; + uint32_t fdir_key; +}; + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline const struct rte_flow_item * +next_no_void_pattern(const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline const struct rte_flow_action * +next_no_void_action(const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} + +static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_filter_arg(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_ethertype_first_item(const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + /* The first non-void item should be MAC */ + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + return 0; +} + +static int +hinic_parse_ethertype_aciton(const struct rte_flow_action *actions, + const struct rte_flow_action *act, + const struct rte_flow_action_queue *act_q, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + /* Parse action */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a ethertype rule. + * And get the ethertype filter info BTW. + * pattern: + * The first not void item can be ETH. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH type 0x0807 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act = NULL; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_action_queue *act_q = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + item = next_no_void_pattern(pattern, NULL); + if (hinic_check_ethertype_first_item(item, error)) + return -rte_errno; + + eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_mask = (const struct rte_flow_item_eth *)item->mask; + + /* + * Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ether address mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ethertype mask"); + return -rte_errno; + } + + /* + * If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + /* Check if the next non-void item is END. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter."); + return -rte_errno; + } + + if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error)) + return -rte_errno; + + if (hinic_check_ethertype_attr_ele(attr, error)) + return -rte_errno; + + return 0; +} + +static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error)) + return -rte_errno; + + /* NIC doesn't support MAC address. */ + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Queue index much too big"); + return -rte_errno; + } + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "IPv4/IPv6 not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Drop option is unsupported"); + return -rte_errno; + } + + /* Hinic only support LACP/ARP for ether type */ + if (filter->ether_type != RTE_ETHER_TYPE_SLOW && + filter->ether_type != RTE_ETHER_TYPE_ARP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "only lacp/arp type supported by ethertype filter"); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr, + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + if (attr->priority < HINIC_MIN_N_TUPLE_PRIO || + attr->priority > HINIC_MAX_N_TUPLE_PRIO) + filter->priority = 1; + else + filter->priority = (uint16_t)attr->priority; + + return 0; +} + +static int +hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item, + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + /* + * n-tuple only supports forwarding, + * check if the first not void action is QUEUE. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Flow action type is not QUEUE."); + return -rte_errno; + } + filter->queue = + ((const struct rte_flow_action_queue *)act->conf)->index; + + /* Check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Next not void item is not END."); + return -rte_errno; + } + + return 0; +} + +static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item, + const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + + /* The first not void item can be MAC or IPv4 */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* if the first item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + *ipv4_item = item; + return 0; +} + +static int +hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item *item = *in_out_item; + + /* Get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /* + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum || + !ipv4_mask->hdr.next_proto_id) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + + /* Get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + return 0; +} + +static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_icmp *icmp_mask; + const struct rte_flow_item *item = *in_out_item; + u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter); + + if (item->type == RTE_FLOW_ITEM_TYPE_END) + return 0; + + /* Get TCP or UDP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec || !item->mask)) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + + /* + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = tcp_mask->hdr.dst_port; + filter->src_port_mask = tcp_mask->hdr.src_port; + if (tcp_mask->hdr.tcp_flags == 0xFF) { + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + } else if (!tcp_mask->hdr.tcp_flags) { + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + } else { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + filter->dst_port = tcp_spec->hdr.dst_port; + filter->src_port = tcp_spec->hdr.src_port; + filter->tcp_flags = tcp_spec->hdr.tcp_flags; + } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) { + icmp_mask = (const struct rte_flow_item_icmp *)item->mask; + + /* ICMP all should be masked. */ + if (icmp_mask->hdr.icmp_cksum || + icmp_mask->hdr.icmp_ident || + icmp_mask->hdr.icmp_seq_nb || + icmp_mask->hdr.icmp_type || + icmp_mask->hdr.icmp_code) { + memset(filter, 0, ntuple_filter_size); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + /* Get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + return 0; +} + +static int hinic_ntuple_item_check_end(const struct rte_flow_item *item, + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + /* Check if the next not void item is END */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + return 0; +} + +static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + if (hinic_ntuple_item_check_ether(&item, pattern, error) || + hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) || + hinic_ntuple_item_check_l4(&item, pattern, filter, error) || + hinic_ntuple_item_check_end(item, filter, error)) + return -rte_errno; + + return 0; +} + +/** + * Parse the rule to see if it is a n-tuple rule. + * And get the n-tuple filter info BTW. + * pattern: + * The first not void item can be ETH or IPV4. + * The second not void item must be IPV4 if the first one is ETH. + * The third not void item must be UDP or TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * next_proto_id 17 0xFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + * Please aware there's an asumption for all the parsers. + * rte_flow_item is using big endian, rte_flow_attr and + * rte_flow_action are using CPU order. + * Because the pattern is used to describe the packets, + * normally the packets should use network order. + */ +static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_ntuple_item_ele(item, pattern, filter, error)) + return -rte_errno; + + if (hinic_check_ntuple_act_ele(item, actions, filter, error)) + return -rte_errno; + + if (hinic_check_ntuple_attr_ele(attr, filter, error)) + return -rte_errno; + + return 0; +} + +static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); + if (ret) + return ret; + + /* Hinic doesn't support tcp flags */ + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Hinic doesn't support many priorities */ + if (filter->priority < HINIC_MIN_N_TUPLE_PRIO || + filter->priority > HINIC_MAX_N_TUPLE_PRIO) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Priority not supported by ntuple filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + /* Fixed value for hinic */ + filter->flags = RTE_5TUPLE_FLAGS; + return 0; +} + +static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item, + const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + + /* The first not void item can be MAC or IPv4 or TCP or UDP */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter,support mac,ipv4,tcp,udp"); + return -rte_errno; + } + + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* All should be masked. */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter,support mac"); + return -rte_errno; + } + /* Check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter,support mac,ipv4"); + return -rte_errno; + } + } + + *ip_item = item; + return 0; +} + +static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; + const struct rte_flow_item *item = *in_out_item; + int i; + + /* Get the IPv4 info */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid fdir filter mask"); + return -rte_errno; + } + + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /* + * Only support src & dst addresses, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support src,dst ip"); + return -rte_errno; + } + + rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr; + rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr; + rule->mode = HINIC_FDIR_MODE_NORMAL; + + if (item->spec) { + ipv4_spec = + (const struct rte_flow_item_ipv4 *)item->spec; + rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr; + rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr; + } + + /* + * Check if the next not void item is + * TCP or UDP or END. + */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_ICMP && + item->type != RTE_FLOW_ITEM_TYPE_ANY && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support tcp, udp, end"); + return -rte_errno; + } + } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* Not supported last point for range */ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid fdir filter mask"); + return -rte_errno; + } + + ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask; + + /* Only support dst addresses, others should be masked */ + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support dst ipv6"); + return -rte_errno; + } + + /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */ + for (i = 0; i < 16; i++) { + if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, do not support src ipv6"); + return -rte_errno; + } + } + + if (!item->spec) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, ipv6 spec is NULL"); + return -rte_errno; + } + + for (i = 0; i < 16; i++) { + if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX) + rule->mask.dst_ipv6_mask |= 1 << i; + } + + ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec; + rte_memcpy(rule->hinic_fdir.dst_ipv6, + ipv6_spec->hdr.dst_addr, 16); + + /* + * Check if the next not void item is TCP or UDP or ICMP. + */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_ICMP && + item->type != RTE_FLOW_ITEM_TYPE_ICMP6){ + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not supported by fdir filter, support tcp, udp, icmp"); + return -rte_errno; + } + } + + *in_out_item = item; + return 0; +} + +static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item, + __rte_unused const struct rte_flow_item pattern[], + __rte_unused struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = *in_out_item; + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by normal fdir filter, not support l4"); + return -rte_errno; + } + + return 0; +} + + +static int hinic_normal_item_check_end(const struct rte_flow_item *item, + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + /* Check if the next not void item is END */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support end"); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_normal_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + if (hinic_normal_item_check_ether(&item, pattern, error) || + hinic_normal_item_check_ip(&item, pattern, rule, error) || + hinic_normal_item_check_l4(&item, pattern, rule, error) || + hinic_normal_item_check_end(item, rule, error)) + return -rte_errno; + + return 0; +} + +static int +hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = *in_out_item; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + + if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) { + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.proto_mask = UINT16_MAX; + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP; + } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) { + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.proto_mask = UINT16_MAX; + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6; + } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) { + rule->mode = HINIC_FDIR_MODE_TCAM; + } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + if (!item->mask) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support src, dst ports"); + return -rte_errno; + } + + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + + /* + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir normal tcam filter"); + return -rte_errno; + } + + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.proto_mask = UINT16_MAX; + rule->mask.dst_port_mask = tcp_mask->hdr.dst_port; + rule->mask.src_port_mask = tcp_mask->hdr.src_port; + + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP; + if (item->spec) { + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port; + rule->hinic_fdir.src_port = tcp_spec->hdr.src_port; + } + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /* + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support src, dst ports"); + return -rte_errno; + } + + udp_mask = (const struct rte_flow_item_udp *)item->mask; + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support udp"); + return -rte_errno; + } + + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.proto_mask = UINT16_MAX; + rule->mask.src_port_mask = udp_mask->hdr.src_port; + rule->mask.dst_port_mask = udp_mask->hdr.dst_port; + + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP; + if (item->spec) { + udp_spec = (const struct rte_flow_item_udp *)item->spec; + rule->hinic_fdir.src_port = udp_spec->hdr.src_port; + rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port; + } + } else { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp"); + return -rte_errno; + } + + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter tcam normal, support end"); + return -rte_errno; + } + + /* get next no void item */ + *in_out_item = item; + + return 0; +} + +static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + if (hinic_normal_item_check_ether(&item, pattern, error) || + hinic_normal_item_check_ip(&item, pattern, rule, error) || + hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) || + hinic_normal_item_check_end(item, rule, error)) + return -rte_errno; + + return 0; +} + +static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = *in_out_item; + + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support vxlan"); + return -rte_errno; + } + + *in_out_item = item; + } else { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp"); + return -rte_errno; + } + + return 0; +} + +static int +hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = *in_out_item; + + + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_ANY) { + (void)memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support tcp/udp"); + return -rte_errno; + } + + *in_out_item = item; + } + + return 0; +} + +static int +hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item *item = *in_out_item; + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* Not supported last point for range */ + if (item->last) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* get the TCP/UDP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + /* + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support src, dst ports"); + return -rte_errno; + } + + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + (void)memset(rule, 0, + sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support tcp"); + return -rte_errno; + } + + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.tunnel_flag = UINT16_MAX; + rule->mask.tunnel_inner_src_port_mask = + tcp_mask->hdr.src_port; + rule->mask.tunnel_inner_dst_port_mask = + tcp_mask->hdr.dst_port; + rule->mask.proto_mask = UINT16_MAX; + + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP; + if (item->spec) { + tcp_spec = + (const struct rte_flow_item_tcp *)item->spec; + rule->hinic_fdir.tunnel_inner_src_port = + tcp_spec->hdr.src_port; + rule->hinic_fdir.tunnel_inner_dst_port = + tcp_spec->hdr.dst_port; + } + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /* + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support src, dst ports"); + return -rte_errno; + } + + udp_mask = (const struct rte_flow_item_udp *)item->mask; + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support udp"); + return -rte_errno; + } + + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.tunnel_flag = UINT16_MAX; + rule->mask.tunnel_inner_src_port_mask = + udp_mask->hdr.src_port; + rule->mask.tunnel_inner_dst_port_mask = + udp_mask->hdr.dst_port; + rule->mask.proto_mask = UINT16_MAX; + + rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP; + if (item->spec) { + udp_spec = + (const struct rte_flow_item_udp *)item->spec; + rule->hinic_fdir.tunnel_inner_src_port = + udp_spec->hdr.src_port; + rule->hinic_fdir.tunnel_inner_dst_port = + udp_spec->hdr.dst_port; + } + } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) { + rule->mode = HINIC_FDIR_MODE_TCAM; + rule->mask.tunnel_flag = UINT16_MAX; + } else { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter, support tcp/udp"); + return -rte_errno; + } + + /* get next no void item */ + *in_out_item = next_no_void_pattern(pattern, item); + } + + return 0; +} + +static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item, + const struct rte_flow_item pattern[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + if (hinic_normal_item_check_ether(&item, pattern, error) || + hinic_normal_item_check_ip(&item, pattern, rule, error) || + hinic_tunnel_item_check_l4(&item, pattern, rule, error) || + hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) || + hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) || + hinic_normal_item_check_end(item, rule, error)) + return -rte_errno; + + return 0; +} + +static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr, + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int hinic_check_normal_act_ele(const struct rte_flow_item *item, + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + + /* Check if the first not void action is QUEUE */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + item, "Not supported action."); + return -rte_errno; + } + + rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index; + + /* Check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rule, 0, sizeof(struct hinic_fdir_rule)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP(optional) + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 1.2.3.6 0xFFFFFFFF + * dst_addr 1.2.3.5 0xFFFFFFFF + * UDP/TCP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_normal_item_ele(item, pattern, rule, error)) + return -rte_errno; + + if (hinic_check_normal_attr_ele(attr, rule, error)) + return -rte_errno; + + if (hinic_check_normal_act_ele(item, actions, rule, error)) + return -rte_errno; + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item can be ANY/TCP/UDP + * ACTION: + * The first not void action should be QUEUE. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 1.2.3.6 0xFFFFFFFF + * dst_addr 1.2.3.5 0xFFFFFFFF + * UDP/TCP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error)) + return -rte_errno; + + if (hinic_check_normal_attr_ele(attr, rule, error)) + return -rte_errno; + + if (hinic_check_normal_act_ele(item, actions, rule, error)) + return -rte_errno; + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item must be UDP + * The next not void item must be VXLAN(optional) + * The first not void item can be ETH or IPV4 or IPV6 + * The next not void item could be ANY or UDP or TCP(optional) + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 1.2.3.6 0xFFFFFFFF + * dst_addr 1.2.3.5 0xFFFFFFFF + * UDP NULL NULL + * VXLAN NULL NULL + * UDP/TCP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = NULL; + + if (hinic_check_filter_arg(attr, pattern, actions, error)) + return -rte_errno; + + if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error)) + return -rte_errno; + + if (hinic_check_normal_attr_ele(attr, rule, error)) + return -rte_errno; + + if (hinic_check_normal_act_ele(item, actions, rule, error)) + return -rte_errno; + + return 0; +} + +static int hinic_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hinic_fdir_rule *rule, + struct rte_flow_error *error) +{ + int ret; + + ret = hinic_parse_fdir_filter_normal(attr, pattern, actions, + rule, error); + if (!ret) + goto step_next; + + ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions, + rule, error); + if (!ret) + goto step_next; + + ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions, + rule, error); + if (ret) + return ret; + +step_next: + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + + return ret; +} + +/** + * Check if the flow rule is supported by nic. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int hinic_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_ntuple_filter ntuple_filter; + struct hinic_fdir_rule fdir_rule; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + + if (!ret) + return 0; + + memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule)); + ret = hinic_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + + return ret; +} + +static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *hinic_filter_info) +{ + switch (filter->dst_ip_mask) { + case UINT32_MAX: + hinic_filter_info->dst_ip_mask = 0; + hinic_filter_info->dst_ip = filter->dst_ip; + break; + case 0: + hinic_filter_info->dst_ip_mask = 1; + hinic_filter_info->dst_ip = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + hinic_filter_info->src_ip_mask = 0; + hinic_filter_info->src_ip = filter->src_ip; + break; + case 0: + hinic_filter_info->src_ip_mask = 1; + hinic_filter_info->src_ip = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid src_ip mask."); + return -EINVAL; + } + return 0; +} + +static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *hinic_filter_info) +{ + switch (filter->dst_port_mask) { + case UINT16_MAX: + hinic_filter_info->dst_port_mask = 0; + hinic_filter_info->dst_port = filter->dst_port; + break; + case 0: + hinic_filter_info->dst_port_mask = 1; + hinic_filter_info->dst_port = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + hinic_filter_info->src_port_mask = 0; + hinic_filter_info->src_port = filter->src_port; + break; + case 0: + hinic_filter_info->src_port_mask = 1; + hinic_filter_info->src_port = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid src_port mask."); + return -EINVAL; + } + + return 0; +} + +static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *hinic_filter_info) +{ + switch (filter->proto_mask) { + case UINT8_MAX: + hinic_filter_info->proto_mask = 0; + hinic_filter_info->proto = filter->proto; + break; + case 0: + hinic_filter_info->proto_mask = 1; + hinic_filter_info->proto = 0; + break; + default: + PMD_DRV_LOG(ERR, "Invalid protocol mask."); + return -EINVAL; + } + + return 0; +} + +static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct hinic_5tuple_filter_info *filter_info) +{ + if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM || + filter->priority > HINIC_MAX_N_TUPLE_PRIO || + filter->priority < HINIC_MIN_N_TUPLE_PRIO) + return -EINVAL; + + if (ntuple_ip_filter(filter, filter_info) || + ntuple_port_filter(filter, filter_info) || + ntuple_proto_filter(filter, filter_info)) + return -EINVAL; + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +static inline struct hinic_5tuple_filter * +hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list, + struct hinic_5tuple_filter_info *key) +{ + struct hinic_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct hinic_5tuple_filter_info)) == 0) { + return it; + } + } + + return NULL; +} + +static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule lacp_rule; + struct tag_pa_action lacp_action; + + memset(&lacp_rule, 0, sizeof(lacp_rule)); + memset(&lacp_action, 0, sizeof(lacp_action)); + /* LACP TCAM rule */ + lacp_rule.eth_type = PA_ETH_TYPE_OTHER; + lacp_rule.l2_header.eth_type.val16 = 0x8809; + lacp_rule.l2_header.eth_type.mask16 = 0xffff; + + /* LACP TCAM action */ + lacp_action.err_type = 0x3f; /* err from ipsu, not convert */ + lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + lacp_action.pkt_type = PKT_LACP_TYPE; + lacp_action.pri = 0x0; + lacp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP, + &lacp_rule, &lacp_action); +} + +static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule bgp_rule; + struct tag_pa_action bgp_action; + + memset(&bgp_rule, 0, sizeof(bgp_rule)); + memset(&bgp_action, 0, sizeof(bgp_action)); + /* BGP TCAM rule */ + bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */ + bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP; + bgp_rule.ip_header.protocol.mask8 = UINT8_MAX; + bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */ + bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX; + + /* BGP TCAM action */ + bgp_action.err_type = 0x3f; /* err from ipsu, not convert */ + bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */ + bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse + * results, not need to convert + */ + bgp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action); +} + +static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule bgp_rule; + struct tag_pa_action bgp_action; + + memset(&bgp_rule, 0, sizeof(bgp_rule)); + memset(&bgp_action, 0, sizeof(bgp_action)); + /* BGP TCAM rule */ + bgp_rule.eth_type = PA_ETH_TYPE_IPV4; + bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP; + bgp_rule.ip_header.protocol.mask8 = UINT8_MAX; + bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID; + bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX; + + /* BGP TCAM action */ + bgp_action.err_type = 0x3f; /* err from ipsu, not convert */ + bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */ + bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */ + bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse + * results, not need to convert + */ + bgp_action.push_len = 0xf; /* push_len:0xf, not convert */ + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT, + &bgp_rule, &bgp_action); +} + +static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev) +{ + struct tag_pa_rule vrrp_rule; + struct tag_pa_action vrrp_action; + + memset(&vrrp_rule, 0, sizeof(vrrp_rule)); + memset(&vrrp_action, 0, sizeof(vrrp_action)); + /* VRRP TCAM rule */ + vrrp_rule.eth_type = PA_ETH_TYPE_IPV4; + vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP; + vrrp_rule.ip_header.protocol.mask8 = 0xff; + vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP; + + /* VRRP TCAM action */ + vrrp_action.err_type = 0x3f; + vrrp_action.fwd_action = 0x7; + vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */ + vrrp_action.pri = 0xf; + vrrp_action.push_len = 0xf; + + return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP, + &vrrp_rule, &vrrp_action); +} + +/** + * Clear all fdir configuration. + * + * @param nic_dev + * The hardware interface of a Ethernet device. + * + * @return + * 0 on success, + * negative error value otherwise. + */ +void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev) +{ + (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP); + + (void)hinic_flush_tcam_rule(nic_dev->hwdev); +} + +static int hinic_filter_info_init(struct hinic_5tuple_filter *filter, + struct hinic_filter_info *filter_info) +{ + switch (filter->filter_info.proto) { + case IPPROTO_TCP: + /* Filter type is bgp type if dst_port or src_port is 179 */ + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) && + !(filter->filter_info.dst_port_mask)) { + filter_info->pkt_type = PKT_BGPD_DPORT_TYPE; + } else if (filter->filter_info.src_port == + RTE_BE16(BGP_DPORT_ID) && + !(filter->filter_info.src_port_mask)) { + filter_info->pkt_type = PKT_BGPD_SPORT_TYPE; + } else { + PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters" + " just support BGP now, proto:0x%x, " + "dst_port:0x%x, dst_port_mask:0x%x." + "src_port:0x%x, src_port_mask:0x%x.", + filter->filter_info.proto, + filter->filter_info.dst_port, + filter->filter_info.dst_port_mask, + filter->filter_info.src_port, + filter->filter_info.src_port_mask); + return -EINVAL; + } + break; + + case IPPROTO_VRRP: + filter_info->pkt_type = PKT_VRRP_TYPE; + break; + + case IPPROTO_ICMP: + filter_info->pkt_type = PKT_ICMP_IPV4_TYPE; + break; + + case IPPROTO_ICMPV6: + filter_info->pkt_type = PKT_ICMP_IPV6_TYPE; + break; + + default: + PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, " + "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x." + "src_port: 0x%x, src_port_mask: 0x%x.", + filter->filter_info.proto, filter->filter_info.dst_port, + filter->filter_info.dst_port_mask, + filter->filter_info.src_port, + filter->filter_info.src_port_mask); + return -EINVAL; + } + + return 0; +} + +static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter, + struct hinic_filter_info *filter_info, int *index) +{ + int type_id; + + type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type); + + if (type_id > HINIC_MAX_Q_FILTERS - 1) { + PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type."); + return -EINVAL; + } + + if (!(filter_info->type_mask & (1 << type_id))) { + filter_info->type_mask |= 1 << type_id; + filter->index = type_id; + filter_info->pkt_filters[type_id].enable = true; + filter_info->pkt_filters[type_id].pkt_proto = + filter->filter_info.proto; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, entries); + } else { + PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id); + return -EIO; + } + + *index = type_id; + return 0; +} + +/* + * Add a 5tuple filter + * + * @param dev: + * Pointer to struct rte_eth_dev. + * @param filter: + * Pointer to the filter that will be added. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int hinic_add_5tuple_filter(struct rte_eth_dev *dev, + struct hinic_5tuple_filter *filter) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, ret_fw; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (hinic_filter_info_init(filter, filter_info) || + hinic_lookup_new_filter(filter, filter_info, &i)) + return -EFAULT; + + ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type, + filter_info->qid, + filter_info->pkt_filters[i].enable, + true); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter_info->qid, + filter_info->pkt_filters[filter->index].enable); + + switch (filter->filter_info.proto) { + case IPPROTO_TCP: + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) { + ret_fw = hinic_set_bgp_dport_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set dport bgp failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + } else if (filter->filter_info.src_port == + RTE_BE16(BGP_DPORT_ID)) { + ret_fw = hinic_set_bgp_sport_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set sport bgp failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + } + + break; + + case IPPROTO_VRRP: + ret_fw = hinic_set_vrrp_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Set VRRP failed, " + "type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -EFAULT; + } + PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x", + filter->queue, + filter_info->pkt_filters[i].enable); + break; + + default: + break; + } + + return 0; +} + +/* + * Remove a 5tuple filter + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param filter + * The pointer of the filter will be removed. + */ +static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev, + struct hinic_5tuple_filter *filter) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (filter->filter_info.proto) { + case IPPROTO_VRRP: + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP); + break; + + case IPPROTO_TCP: + if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_DPORT); + else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID)) + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_BGP_SPORT); + break; + + default: + break; + } + + hinic_filter_info_init(filter, filter_info); + + filter_info->pkt_filters[filter->index].enable = false; + filter_info->pkt_filters[filter->index].pkt_proto = 0; + + PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, + filter_info->pkt_filters[filter->index].qid, + filter_info->pkt_filters[filter->index].enable); + (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type, + filter_info->pkt_filters[filter->index].qid, + filter_info->pkt_filters[filter->index].enable, + true); + + filter_info->pkt_type = 0; + filter_info->qid = 0; + filter_info->pkt_filters[filter->index].qid = 0; + filter_info->type_mask &= ~(1 << (filter->index)); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + + rte_free(filter); +} + +/* + * Add or delete a ntuple filter + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param ntuple_filter + * Pointer to struct rte_eth_ntuple_filter + * @param add + * If true, add filter; if false, remove filter + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_5tuple_filter_info filter_5tuple; + struct hinic_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "Only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "Filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "Filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("hinic_5tuple_filter", + sizeof(struct hinic_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, &filter_5tuple, + sizeof(struct hinic_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + + filter_info->qid = ntuple_filter->queue; + + ret = hinic_add_5tuple_filter(dev, filter); + if (ret) + rte_free(filter); + + return ret; + } + + hinic_remove_5tuple_filter(dev, filter); + + return 0; +} + +static inline int +hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter) +{ + if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in" + " ethertype filter", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "Mac compare is not supported"); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "Drop option is not supported"); + return -EINVAL; + } + + return 0; +} + +static inline int +hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info, + struct hinic_pkt_filter *ethertype_filter) +{ + switch (ethertype_filter->pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + filter_info->pkt_type = PKT_LACP_TYPE; + break; + + case RTE_ETHER_TYPE_ARP: + filter_info->pkt_type = PKT_ARP_TYPE; + break; + + default: + PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters"); + return -EIO; + } + + return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type); +} + +static inline int +hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info, + struct hinic_pkt_filter *ethertype_filter) +{ + int id; + + /* Find LACP or VRRP type id */ + id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter); + if (id < 0) + return -EINVAL; + + if (!(filter_info->type_mask & (1 << id))) { + filter_info->type_mask |= 1 << id; + filter_info->pkt_filters[id].pkt_proto = + ethertype_filter->pkt_proto; + filter_info->pkt_filters[id].enable = ethertype_filter->enable; + filter_info->qid = ethertype_filter->qid; + return id; + } + + PMD_DRV_LOG(ERR, "Filter type: %d exists", id); + return -EINVAL; +} + +static inline void +hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= HINIC_MAX_Q_FILTERS) + return; + + filter_info->pkt_type = 0; + filter_info->type_mask &= ~(1 << idx); + filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0; + filter_info->pkt_filters[idx].enable = FALSE; + filter_info->pkt_filters[idx].qid = 0; +} + +static inline int +hinic_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_pkt_filter ethertype_filter; + int i; + int ret_fw; + + if (hinic_check_ethertype_filter(filter)) + return -EINVAL; + + if (add) { + ethertype_filter.pkt_proto = filter->ether_type; + ethertype_filter.enable = TRUE; + ethertype_filter.qid = (u8)filter->queue; + i = hinic_ethertype_filter_insert(filter_info, + ðertype_filter); + if (i < 0) + return -ENOSPC; + + ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, + filter_info->pkt_type, filter_info->qid, + filter_info->pkt_filters[i].enable, true); + if (ret_fw) { + PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + + hinic_ethertype_filter_remove(filter_info, i); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + + switch (ethertype_filter.pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + ret_fw = hinic_set_lacp_tcam(nic_dev); + if (ret_fw) { + PMD_DRV_LOG(ERR, "Add lacp tcam failed"); + hinic_ethertype_filter_remove(filter_info, i); + return -ENOENT; + } + + PMD_DRV_LOG(INFO, "Add lacp tcam succeed"); + break; + default: + break; + } + } else { + ethertype_filter.pkt_proto = filter->ether_type; + i = hinic_ethertype_filter_lookup(filter_info, + ðertype_filter); + + if ((filter_info->type_mask & (1 << i))) { + filter_info->pkt_filters[i].enable = FALSE; + (void)hinic_set_fdir_filter(nic_dev->hwdev, + filter_info->pkt_type, + filter_info->pkt_filters[i].qid, + filter_info->pkt_filters[i].enable, + true); + + PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, + filter_info->pkt_filters[i].qid, + filter_info->pkt_filters[i].enable); + + switch (ethertype_filter.pkt_proto) { + case RTE_ETHER_TYPE_SLOW: + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, + TCAM_PKT_LACP); + PMD_DRV_LOG(INFO, "Del lacp tcam succeed"); + break; + default: + break; + } + + hinic_ethertype_filter_remove(filter_info, i); + + } else { + PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x", + filter_info->pkt_type, filter->queue, + filter_info->pkt_filters[i].enable); + return -ENOENT; + } + } + + return 0; +} + +static int hinic_fdir_info_init(struct hinic_fdir_rule *rule, + struct hinic_fdir_info *fdir_info) +{ + switch (rule->mask.src_ipv4_mask) { + case UINT32_MAX: + fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP; + fdir_info->qid = rule->queue; + fdir_info->fdir_key = rule->hinic_fdir.src_ip; + return 0; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "Invalid src_ip mask."); + return -EINVAL; + } + + switch (rule->mask.dst_ipv4_mask) { + case UINT32_MAX: + fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP; + fdir_info->qid = rule->queue; + fdir_info->fdir_key = rule->hinic_fdir.dst_ip; + return 0; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "Invalid dst_ip mask."); + return -EINVAL; + } + + if (fdir_info->fdir_flag == 0) { + PMD_DRV_LOG(ERR, "All support mask is NULL."); + return -EINVAL; + } + + return 0; +} + +static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, bool add) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_fdir_info fdir_info; + int ret; + + memset(&fdir_info, 0, sizeof(struct hinic_fdir_info)); + + ret = hinic_fdir_info_init(rule, &fdir_info); + if (ret) { + PMD_DRV_LOG(ERR, "Init hinic fdir info failed!"); + return ret; + } + + if (add) { + ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid, + true, fdir_info.fdir_key, + true, fdir_info.fdir_flag); + if (ret) { + PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + } else { + ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid, + false, fdir_info.fdir_key, true, + fdir_info.fdir_flag); + if (ret) { + PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + return -ENOENT; + } + PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x", + fdir_info.fdir_flag, fdir_info.qid, + fdir_info.fdir_key); + } + + return 0; +} + +static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_y[idx] = src_input[idx] & mask[idx]; +} + +static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_x[idx] = key_y[idx] ^ mask[idx]; +} + +static void tcam_key_calculate(struct tag_tcam_key *tcam_key, + struct tag_tcam_cfg_rule *fdir_tcam_rule) +{ + tcam_translate_key_y(fdir_tcam_rule->key.y, + (u8 *)(&tcam_key->key_info), + (u8 *)(&tcam_key->key_mask), + TCAM_FLOW_KEY_SIZE); + tcam_translate_key_x(fdir_tcam_rule->key.x, + fdir_tcam_rule->key.y, + (u8 *)(&tcam_key->key_mask), + TCAM_FLOW_KEY_SIZE); +} + +static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, + struct tag_tcam_key *tcam_key) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (rule->mask.dst_ipv4_mask) { + case UINT32_MAX: + tcam_key->key_info.ext_dip_h = + (rule->hinic_fdir.dst_ip >> 16) & 0xffffU; + tcam_key->key_info.ext_dip_l = + rule->hinic_fdir.dst_ip & 0xffffU; + tcam_key->key_mask.ext_dip_h = + (rule->mask.dst_ipv4_mask >> 16) & 0xffffU; + tcam_key->key_mask.ext_dip_l = + rule->mask.dst_ipv4_mask & 0xffffU; + break; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + if (rule->mask.dst_port_mask > 0) { + tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port; + tcam_key->key_mask.dst_port = rule->mask.dst_port_mask; + } + + if (rule->mask.src_port_mask > 0) { + tcam_key->key_info.src_port = rule->hinic_fdir.src_port; + tcam_key->key_mask.src_port = rule->mask.src_port_mask; + } + + switch (rule->mask.tunnel_flag) { + case UINT16_MAX: + tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET; + tcam_key->key_mask.tunnel_flag = UINT8_MAX; + break; + + case 0: + tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET; + tcam_key->key_mask.tunnel_flag = 0; + break; + + default: + PMD_DRV_LOG(ERR, "invalid tunnel flag mask."); + return -EINVAL; + } + + if (rule->mask.tunnel_inner_dst_port_mask > 0) { + tcam_key->key_info.dst_port = + rule->hinic_fdir.tunnel_inner_dst_port; + tcam_key->key_mask.dst_port = + rule->mask.tunnel_inner_dst_port_mask; + } + + if (rule->mask.tunnel_inner_src_port_mask > 0) { + tcam_key->key_info.src_port = + rule->hinic_fdir.tunnel_inner_src_port; + tcam_key->key_mask.src_port = + rule->mask.tunnel_inner_src_port_mask; + } + + switch (rule->mask.proto_mask) { + case UINT16_MAX: + tcam_key->key_info.protocol = rule->hinic_fdir.proto; + tcam_key->key_mask.protocol = UINT8_MAX; + break; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "invalid tunnel flag mask."); + return -EINVAL; + } + + tcam_key->key_mask.function_id = UINT16_MAX; + tcam_key->key_info.function_id = + hinic_global_func_id(nic_dev->hwdev) & 0x7fff; + + return 0; +} + +static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, + struct tag_tcam_key *tcam_key) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (rule->mask.dst_ipv6_mask) { + case UINT16_MAX: + tcam_key->key_info_ipv6.ipv6_key0 = + ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[1]; + tcam_key->key_info_ipv6.ipv6_key1 = + ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[3]; + tcam_key->key_info_ipv6.ipv6_key2 = + ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[5]; + tcam_key->key_info_ipv6.ipv6_key3 = + ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[7]; + tcam_key->key_info_ipv6.ipv6_key4 = + ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[9]; + tcam_key->key_info_ipv6.ipv6_key5 = + ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[11]; + tcam_key->key_info_ipv6.ipv6_key6 = + ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[13]; + tcam_key->key_info_ipv6.ipv6_key7 = + ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) | + rule->hinic_fdir.dst_ipv6[15]; + tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX; + tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX; + break; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask"); + return -EINVAL; + } + + if (rule->mask.dst_port_mask > 0) { + tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port; + tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask; + } + + switch (rule->mask.proto_mask) { + case UINT16_MAX: + tcam_key->key_info_ipv6.protocol = + (rule->hinic_fdir.proto) & 0x7F; + tcam_key->key_mask_ipv6.protocol = 0x7F; + break; + + case 0: + break; + + default: + PMD_DRV_LOG(ERR, "invalid tunnel flag mask"); + return -EINVAL; + } + + tcam_key->key_info_ipv6.ipv6_flag = 1; + tcam_key->key_mask_ipv6.ipv6_flag = 1; + + tcam_key->key_mask_ipv6.function_id = UINT8_MAX; + tcam_key->key_info_ipv6.function_id = + (u8)hinic_global_func_id(nic_dev->hwdev); + + return 0; +} + +static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, + struct tag_tcam_key *tcam_key, + struct tag_tcam_cfg_rule *fdir_tcam_rule) +{ + int ret = -1; + + if (rule->mask.dst_ipv4_mask == UINT32_MAX) + ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key); + else if (rule->mask.dst_ipv6_mask == UINT16_MAX) + ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key); + + if (ret < 0) + return ret; + + fdir_tcam_rule->data.qid = rule->queue; + + tcam_key_calculate(tcam_key, fdir_tcam_rule); + + return 0; +} + +static inline struct hinic_tcam_filter * +hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list, + struct tag_tcam_key *key) +{ + struct hinic_tcam_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->tcam_key, + sizeof(struct tag_tcam_key)) == 0) { + return it; + } + } + + return NULL; +} + +static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev, + struct hinic_tcam_info *tcam_info, + struct hinic_tcam_filter *tcam_filter, + u16 *tcam_index) +{ + int index; + int max_index; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) + max_index = HINIC_VF_MAX_TCAM_FILTERS; + else + max_index = HINIC_PF_MAX_TCAM_FILTERS; + + for (index = 0; index < max_index; index++) { + if (tcam_info->tcam_index_array[index] == 0) + break; + } + + if (index == max_index) { + PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules", + hinic_global_func_id(nic_dev->hwdev), max_index); + return -EINVAL; + } + + tcam_filter->index = index; + *tcam_index = index; + + return 0; +} + +static int hinic_add_tcam_filter(struct rte_eth_dev *dev, + struct hinic_tcam_filter *tcam_filter, + struct tag_tcam_cfg_rule *fdir_tcam_rule) +{ + struct hinic_tcam_info *tcam_info = + HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private); + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u16 index = 0; + u16 tcam_block_index = 0; + int rc; + + if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index)) + return -EINVAL; + + if (tcam_info->tcam_rule_nums == 0) { + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + rc = hinic_alloc_tcam_block(nic_dev->hwdev, + HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index); + if (rc != 0) { + PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!"); + return -EFAULT; + } + } else { + rc = hinic_alloc_tcam_block(nic_dev->hwdev, + HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index); + if (rc != 0) { + PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!"); + return -EFAULT; + } + } + + tcam_info->tcam_block_index = tcam_block_index; + } else { + tcam_block_index = tcam_info->tcam_block_index; + } + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + fdir_tcam_rule->index = + HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index; + } else { + fdir_tcam_rule->index = + tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index; + } + + rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule); + if (rc != 0) { + PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!"); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x," + "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed", + hinic_global_func_id(nic_dev->hwdev), tcam_block_index, + fdir_tcam_rule->index, fdir_tcam_rule->data.qid, + tcam_info->tcam_rule_nums + 1); + + if (tcam_info->tcam_rule_nums == 0) { + rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true); + if (rc < 0) { + (void)hinic_del_tcam_rule(nic_dev->hwdev, + fdir_tcam_rule->index); + return rc; + } + } + + TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries); + + tcam_info->tcam_index_array[index] = 1; + tcam_info->tcam_rule_nums++; + + return 0; +} + +static int hinic_del_tcam_filter(struct rte_eth_dev *dev, + struct hinic_tcam_filter *tcam_filter) +{ + struct hinic_tcam_info *tcam_info = + HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private); + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + u32 index = 0; + u16 tcam_block_index = tcam_info->tcam_block_index; + int rc; + u8 block_type = 0; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + + tcam_filter->index; + block_type = HINIC_TCAM_BLOCK_TYPE_VF; + } else { + index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + + tcam_filter->index; + block_type = HINIC_TCAM_BLOCK_TYPE_PF; + } + + rc = hinic_del_tcam_rule(nic_dev->hwdev, index); + if (rc != 0) { + PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!"); + return -EFAULT; + } + + PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, " + "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed", + hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index, + tcam_info->tcam_rule_nums - 1); + + TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries); + + tcam_info->tcam_index_array[tcam_filter->index] = 0; + + rte_free(tcam_filter); + + tcam_info->tcam_rule_nums--; + + if (tcam_info->tcam_rule_nums == 0) { + (void)hinic_free_tcam_block(nic_dev->hwdev, block_type, + &tcam_block_index); + } + + return 0; +} + +static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev, + struct hinic_fdir_rule *rule, bool add) +{ + struct hinic_tcam_info *tcam_info = + HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private); + struct hinic_tcam_filter *tcam_filter; + struct tag_tcam_cfg_rule fdir_tcam_rule; + struct tag_tcam_key tcam_key; + int ret; + + memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule)); + memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key)); + + ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule); + if (ret) { + PMD_DRV_LOG(ERR, "Init hinic fdir info failed!"); + return ret; + } + + tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list, + &tcam_key); + if (tcam_filter != NULL && add) { + PMD_DRV_LOG(ERR, "Filter exists."); + return -EEXIST; + } + if (tcam_filter == NULL && !add) { + PMD_DRV_LOG(ERR, "Filter doesn't exist."); + return -ENOENT; + } + + if (add) { + tcam_filter = rte_zmalloc("hinic_5tuple_filter", + sizeof(struct hinic_tcam_filter), 0); + if (tcam_filter == NULL) + return -ENOMEM; + (void)rte_memcpy(&tcam_filter->tcam_key, + &tcam_key, sizeof(struct tag_tcam_key)); + tcam_filter->queue = fdir_tcam_rule.data.qid; + + ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule); + if (ret < 0) { + rte_free(tcam_filter); + return ret; + } + + rule->tcam_index = fdir_tcam_rule.index; + + } else { + PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter"); + ret = hinic_del_tcam_filter(dev, tcam_filter); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct hinic_fdir_rule fdir_rule; + struct rte_flow *flow = NULL; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "Failed to allocate flow memory"); + return NULL; + } + + hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem", + sizeof(struct hinic_flow_mem), 0); + if (!hinic_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr"); + rte_free(flow); + return NULL; + } + + hinic_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, + entries); + + /* Add ntuple filter */ + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) { + ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter", + sizeof(struct hinic_ntuple_filter_ele), 0); + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + + PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } + goto out; + } + + /* Add ethertype filter */ + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions, + ðertype_filter, error); + if (!ret) { + ret = hinic_add_del_ethertype_filter(dev, ðertype_filter, + TRUE); + if (!ret) { + ethertype_filter_ptr = + rte_zmalloc("hinic_ethertype_filter", + sizeof(struct hinic_ethertype_filter_ele), 0); + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + + PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } + goto out; + } + + /* Add fdir filter */ + memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule)); + ret = hinic_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) { + ret = hinic_add_del_fdir_filter(dev, + &fdir_rule, TRUE); + } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) { + ret = hinic_add_del_tcam_fdir_filter(dev, + &fdir_rule, TRUE); + } else { + PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong"); + goto out; + } + if (!ret) { + fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule", + sizeof(struct hinic_fdir_rule_ele), 0); + rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule, + sizeof(struct hinic_fdir_rule)); + TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return flow; + } + goto out; + } + +out: + TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(hinic_flow_mem_ptr); + rte_free(flow); + return NULL; +} + +/* Destroy a flow rule on hinic. */ +static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct hinic_fdir_rule fdir_rule; + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *) + pmd_flow->rule; + rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *) + pmd_flow->rule; + rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = hinic_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule; + rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct hinic_fdir_rule)); + if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) { + ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE); + } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) { + ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule, + FALSE); + } else { + PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!"); + ret = -EINVAL; + } + if (!ret) { + TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) { + if (hinic_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&nic_dev->hinic_flow_list, + hinic_flow_mem_ptr, entries); + rte_free(hinic_flow_mem_ptr); + break; + } + } + rte_free(flow); + + PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + + return ret; +} + +/* Remove all the n-tuple filters */ +static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct hinic_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + hinic_remove_5tuple_filter(dev, p_5tuple); +} + +/* Remove all the ether type filters */ +static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_filter_info *filter_info = + HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev); + int ret = 0; + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) { + hinic_ethertype_filter_remove(filter_info, + HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE)); + ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE, + filter_info->qid, false, true); + + (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP); + } + + if (filter_info->type_mask & + (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) { + hinic_ethertype_filter_remove(filter_info, + HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE)); + ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE, + filter_info->qid, false, true); + } + + if (ret) + PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x", + filter_info->pkt_type); +} + +/* Remove all the ether type filters */ +static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct hinic_tcam_info *tcam_info = + HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private); + struct hinic_tcam_filter *tcam_filter_ptr; + + while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list))) + (void)hinic_del_tcam_filter(dev, tcam_filter_ptr); + + (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false); + + (void)hinic_flush_tcam_rule(nic_dev->hwdev); +} + +static void hinic_filterlist_flush(struct rte_eth_dev *dev) +{ + struct hinic_ntuple_filter_ele *ntuple_filter_ptr; + struct hinic_ethertype_filter_ele *ethertype_filter_ptr; + struct hinic_fdir_rule_ele *fdir_rule_ptr; + struct hinic_flow_mem *hinic_flow_mem_ptr; + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + while ((ntuple_filter_ptr = + TAILQ_FIRST(&nic_dev->filter_ntuple_list))) { + TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = + TAILQ_FIRST(&nic_dev->filter_ethertype_list))) { + TAILQ_REMOVE(&nic_dev->filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((fdir_rule_ptr = + TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) { + TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((hinic_flow_mem_ptr = + TAILQ_FIRST(&nic_dev->hinic_flow_list))) { + TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, + entries); + rte_free(hinic_flow_mem_ptr->flow); + rte_free(hinic_flow_mem_ptr); + } +} + +/* Destroy all flow rules associated with a port on hinic. */ +static int hinic_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + hinic_clear_all_ntuple_filter(dev); + hinic_clear_all_ethertype_filter(dev); + hinic_clear_all_fdir_filter(dev); + hinic_filterlist_flush(dev); + + PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x", + hinic_global_func_id(nic_dev->hwdev)); + return 0; +} + +void hinic_destroy_fdir_filter(struct rte_eth_dev *dev) +{ + hinic_clear_all_ntuple_filter(dev); + hinic_clear_all_ethertype_filter(dev); + hinic_clear_all_fdir_filter(dev); + hinic_filterlist_flush(dev); +} + +const struct rte_flow_ops hinic_flow_ops = { + .validate = hinic_flow_validate, + .create = hinic_flow_create, + .destroy = hinic_flow_destroy, + .flush = hinic_flow_flush, +}; + diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c new file mode 100644 index 000000000..a49769a86 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.c @@ -0,0 +1,1089 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#ifdef __ARM64_NEON__ +#include +#endif + +#include "base/hinic_compat.h" +#include "base/hinic_pmd_hwdev.h" +#include "base/hinic_pmd_wq.h" +#include "base/hinic_pmd_niccfg.h" +#include "base/hinic_pmd_nicio.h" +#include "hinic_pmd_ethdev.h" +#include "hinic_pmd_rx.h" + +/* rxq wq operations */ +#define HINIC_GET_RQ_WQE_MASK(rxq) \ + ((rxq)->wq->mask) + +#define HINIC_GET_RQ_LOCAL_CI(rxq) \ + (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq)) + +#define HINIC_GET_RQ_LOCAL_PI(rxq) \ + (((rxq)->wq->prod_idx) & HINIC_GET_RQ_WQE_MASK(rxq)) + +#define HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt) \ + do { \ + (rxq)->wq->cons_idx += (wqebb_cnt); \ + (rxq)->wq->delta += (wqebb_cnt); \ + } while (0) + +#define HINIC_UPDATE_RQ_HW_PI(rxq, pi) \ + (*((rxq)->pi_virt_addr) = \ + cpu_to_be16((pi) & HINIC_GET_RQ_WQE_MASK(rxq))) + +#define HINIC_GET_RQ_FREE_WQEBBS(rxq) ((rxq)->wq->delta - 1) + +/* rxq cqe done and status bit */ +#define HINIC_GET_RX_DONE_BE(status) \ + ((status) & 0x80U) + +#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF + +#define RQ_CQE_SGE_VLAN_SHIFT 0 +#define RQ_CQE_SGE_LEN_SHIFT 16 + +#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define RQ_CQE_SGE_GET(val, member) \ + (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK) + +#define HINIC_GET_RX_VLAN_TAG(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define HINIC_GET_RX_PKT_LEN(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, LEN) + +#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_CQE_STATUS_FLUSH_MASK 0x1U + +#define RQ_CQE_STATUS_GET(val, member) \ + (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ + RQ_CQE_STATUS_##member##_MASK) + +#define RQ_CQE_STATUS_CLEAR(val, member) \ + ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \ + RQ_CQE_STATUS_##member##_SHIFT))) + +#define HINIC_GET_RX_CSUM_ERR(status) \ + RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define HINIC_GET_RX_DONE(status) \ + RQ_CQE_STATUS_GET(status, RXDONE) + +#define HINIC_GET_RX_FLUSH(status) \ + RQ_CQE_STATUS_GET(status, FLUSH) + +#define HINIC_GET_RX_BP_EN(status) \ + RQ_CQE_STATUS_GET(status, BP_EN) + +#define HINIC_GET_RX_NUM_LRO(status) \ + RQ_CQE_STATUS_GET(status, NUM_LRO) + +/* RQ_CTRL */ +#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 +#define RQ_CTRL_COMPLETE_LEN_SHIFT 27 +#define RQ_CTRL_LEN_SHIFT 29 + +#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U +#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U +#define RQ_CTRL_LEN_MASK 0x3U + +#define RQ_CTRL_SET(val, member) \ + (((val) & RQ_CTRL_##member##_MASK) << RQ_CTRL_##member##_SHIFT) + +#define RQ_CTRL_GET(val, member) \ + (((val) >> RQ_CTRL_##member##_SHIFT) & RQ_CTRL_##member##_MASK) + +#define RQ_CTRL_CLEAR(val, member) \ + ((val) & (~(RQ_CTRL_##member##_MASK << RQ_CTRL_##member##_SHIFT))) + +#define RQ_CQE_PKT_NUM_SHIFT 1 +#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 +#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 + +#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_NUM_MASK 0x1FU +#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 + +#define RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) + +#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK) + +#define HINIC_GET_SUPER_CQE_EN(pkt_info) \ + RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU + +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U + +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \ + RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define HINIC_GET_RSS_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define HINIC_GET_RX_PKT_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) + +#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80U +#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x39U +#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x46U +#define RQ_CQE_STATUS_CSUM_ERR_OTHER 0x100U + +#define HINIC_CSUM_ERR_BYPASSED(csum_err) \ + ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL) + +#define HINIC_CSUM_ERR_IP(csum_err) \ + ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK) + +#define HINIC_CSUM_ERR_L4(csum_err) \ + ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK) + +#define HINIC_CSUM_ERR_OTHER(csum_err) \ + ((csum_err) == RQ_CQE_STATUS_CSUM_ERR_OTHER) + + +void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev) +{ + struct hinic_rxq *rxq; + u16 q_id; + u16 buf_size = 0; + + for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { + rxq = nic_dev->rxqs[q_id]; + + if (rxq == NULL) + continue; + + if (q_id == 0) + buf_size = rxq->buf_len; + + buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size; + } + + nic_dev->hwdev->nic_io->rq_buf_size = buf_size; +} + +int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, + u16 rq_depth, unsigned int socket_id) +{ + int err; + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_qp *qp = &nic_io->qps[q_id]; + struct hinic_rq *rq = &qp->rq; + + /* in case of hardware still generate interrupt, do not use msix 0 */ + rq->msix_entry_idx = 1; + rq->q_id = q_id; + rq->rq_depth = rq_depth; + nic_io->rq_depth = rq_depth; + + err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id], + HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth, socket_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ"); + return err; + } + rq->wq = &nic_io->rq_wq[q_id]; + + rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(hwdev, + HINIC_PAGE_SIZE, &rq->pi_dma_addr, socket_id); + if (!rq->pi_virt_addr) { + PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr"); + err = -ENOMEM; + goto rq_pi_alloc_err; + } + + return HINIC_OK; + +rq_pi_alloc_err: + hinic_wq_free(hwdev, &nic_io->rq_wq[q_id]); + + return err; +} + +void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_qp *qp = &nic_io->qps[q_id]; + struct hinic_rq *rq = &qp->rq; + + if (qp->rq.wq == NULL) + return; + + dma_free_coherent_volatile(hwdev, HINIC_PAGE_SIZE, + (volatile void *)rq->pi_virt_addr, + rq->pi_dma_addr); + hinic_wq_free(nic_io->hwdev, qp->rq.wq); + qp->rq.wq = NULL; +} + +static void +hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr, + dma_addr_t cqe_dma) +{ + struct hinic_rq_wqe *rq_wqe = wqe; + struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; + struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; + struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; + u32 rq_ceq_len = sizeof(struct hinic_rq_cqe); + + ctrl->ctrl_fmt = + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) | + RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); + + hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len); + + buf_desc->addr_high = upper_32_bits(buf_addr); + buf_desc->addr_low = lower_32_bits(buf_addr); +} + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) +{ + if (!rxq || !stats) + return; + + memcpy(stats, &rxq->rxq_stats, sizeof(rxq->rxq_stats)); +} + +void hinic_rxq_stats_reset(struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *rxq_stats; + + if (rxq == NULL) + return; + + rxq_stats = &rxq->rxq_stats; + memset(rxq_stats, 0, sizeof(*rxq_stats)); +} + +static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq, unsigned int socket_id) +{ + size_t cqe_mem_size; + + cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth; + rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev, + cqe_mem_size, &rxq->cqe_start_paddr, socket_id); + if (!rxq->cqe_start_vaddr) { + PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed"); + return -ENOMEM; + } + + rxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr; + + return HINIC_OK; +} + +static void hinic_rx_free_cqe(struct hinic_rxq *rxq) +{ + size_t cqe_mem_size; + + cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth; + dma_free_coherent(rxq->nic_dev->hwdev, cqe_mem_size, + rxq->cqe_start_vaddr, rxq->cqe_start_paddr); + rxq->cqe_start_vaddr = NULL; +} + +static int hinic_rx_fill_wqe(struct hinic_rxq *rxq) +{ + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + struct hinic_rq_wqe *rq_wqe; + dma_addr_t buf_dma_addr, cqe_dma_addr; + u16 pi = 0; + int i; + + buf_dma_addr = 0; + cqe_dma_addr = rxq->cqe_start_paddr; + for (i = 0; i < rxq->q_depth; i++) { + rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi); + if (!rq_wqe) { + PMD_DRV_LOG(ERR, "Get rq wqe failed"); + break; + } + + hinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr); + cqe_dma_addr += sizeof(struct hinic_rq_cqe); + + hinic_cpu_to_be32(rq_wqe, sizeof(struct hinic_rq_wqe)); + } + + hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i); + + return i; +} + +/* alloc cqe and prepare rqe */ +int hinic_setup_rx_resources(struct hinic_rxq *rxq) +{ + u64 rx_info_sz; + int err, pkts; + + rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info); + rxq->rx_info = rte_zmalloc_socket("rx_info", rx_info_sz, + RTE_CACHE_LINE_SIZE, rxq->socket_id); + if (!rxq->rx_info) + return -ENOMEM; + + err = hinic_rx_alloc_cqe(rxq, rxq->socket_id); + if (err) { + PMD_DRV_LOG(ERR, "Allocate rx cqe failed"); + goto rx_cqe_err; + } + + pkts = hinic_rx_fill_wqe(rxq); + if (pkts != rxq->q_depth) { + PMD_DRV_LOG(ERR, "Fill rx wqe failed"); + err = -ENOMEM; + goto rx_fill_err; + } + + return 0; + +rx_fill_err: + hinic_rx_free_cqe(rxq); + +rx_cqe_err: + rte_free(rxq->rx_info); + rxq->rx_info = NULL; + + return err; +} + +void hinic_free_rx_resources(struct hinic_rxq *rxq) +{ + if (rxq->rx_info == NULL) + return; + + hinic_rx_free_cqe(rxq); + rte_free(rxq->rx_info); + rxq->rx_info = NULL; +} + +void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev) +{ + u16 q_id; + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + + for (q_id = 0; q_id < nic_dev->num_rq; q_id++) { + if (eth_dev->data->rx_queues != NULL) + eth_dev->data->rx_queues[q_id] = NULL; + + if (nic_dev->rxqs[q_id] == NULL) + continue; + + hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]); + hinic_free_rx_resources(nic_dev->rxqs[q_id]); + kfree(nic_dev->rxqs[q_id]); + nic_dev->rxqs[q_id] = NULL; + } +} + +void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev) +{ + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_rq; q_id++) + hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]); +} + +static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq, + struct rte_mbuf *head_mbuf, + u32 remain_pkt_len) +{ + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + struct rte_mbuf *cur_mbuf, *rxm = NULL; + struct hinic_rx_info *rx_info; + u16 sw_ci, rx_buf_len = rxq->buf_len; + u32 pkt_len; + + while (remain_pkt_len > 0) { + sw_ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id); + rx_info = &rxq->rx_info[sw_ci]; + + hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1); + + pkt_len = remain_pkt_len > rx_buf_len ? + rx_buf_len : remain_pkt_len; + remain_pkt_len -= pkt_len; + + cur_mbuf = rx_info->mbuf; + cur_mbuf->data_len = (u16)pkt_len; + cur_mbuf->next = NULL; + + head_mbuf->pkt_len += cur_mbuf->data_len; + head_mbuf->nb_segs++; + + if (!rxm) + head_mbuf->next = cur_mbuf; + else + rxm->next = cur_mbuf; + + rxm = cur_mbuf; + } +} + +static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev) +{ + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + (void)hinic_rss_cfg(nic_dev->hwdev, 0, + nic_dev->rss_tmpl_idx, 0, prio_tc); +} + +static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev, + struct rte_eth_rss_conf *rss_conf) +{ + u8 default_rss_key[HINIC_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; + u8 hashkey[HINIC_RSS_KEY_SIZE] = {0}; + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + + if (rss_conf->rss_key == NULL) + memcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE); + else + memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len); + + return hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey); +} + +static void hinic_fill_rss_type(struct nic_rss_type *rss_type, + struct rte_eth_rss_conf *rss_conf) +{ + u64 rss_hf = rss_conf->rss_hf; + + rss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0; + rss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0; + rss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0; + rss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0; + rss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0; + rss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0; + rss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0; + rss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0; +} + +static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir) +{ + u8 rss_queue_count = nic_dev->num_rss; + int i = 0, j; + + if (rss_queue_count == 0) { + /* delete q_id from indir tbl */ + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir[i] = 0xFF; /* Invalid value in indir tbl */ + } else { + while (i < HINIC_RSS_INDIR_SIZE) + for (j = 0; (j < rss_queue_count) && + (i < HINIC_RSS_INDIR_SIZE); j++) + indir[i++] = nic_dev->rx_queue_list[j]; + } +} + +static int hinic_rss_init(struct hinic_nic_dev *nic_dev, + __rte_unused u8 *rq2iq_map, + struct rte_eth_rss_conf *rss_conf) +{ + u32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0}; + struct nic_rss_type rss_type = {0}; + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + u8 tmpl_idx = 0xFF, num_tc = 0; + int err; + + tmpl_idx = nic_dev->rss_tmpl_idx; + + err = hinic_rss_key_init(nic_dev, rss_conf); + if (err) + return err; + + if (!nic_dev->rss_indir_flag) { + hinic_fillout_indir_tbl(nic_dev, indir_tbl); + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, + indir_tbl); + if (err) + return err; + } + + hinic_fill_rss_type(&rss_type, rss_conf); + err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type); + if (err) + return err; + + err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx, + HINIC_RSS_HASH_ENGINE_TYPE_TOEP); + if (err) + return err; + + return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc); +} + +static void +hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id) +{ + u8 rss_queue_count = nic_dev->num_rss; + + RTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1)); + + nic_dev->rx_queue_list[rss_queue_count] = queue_id; + nic_dev->num_rss++; +} + +/** + * hinic_setup_num_qps - determine num_qps from rss_tmpl_id + * @nic_dev: pointer to the private ethernet device + * Return: 0 on Success, error code otherwise. + **/ +static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev) +{ + int err, i; + + if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) { + nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG; + nic_dev->num_rss = 0; + if (nic_dev->num_rq > 1) { + /* get rss template id */ + err = hinic_rss_template_alloc(nic_dev->hwdev, + &nic_dev->rss_tmpl_idx); + if (err) { + PMD_DRV_LOG(WARNING, "Alloc rss template failed"); + return err; + } + nic_dev->flags |= ETH_MQ_RX_RSS_FLAG; + for (i = 0; i < nic_dev->num_rq; i++) + hinic_add_rq_to_rx_queue_list(nic_dev, i); + } + } + + return 0; +} + +static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev) +{ + if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { + if (hinic_rss_template_free(nic_dev->hwdev, + nic_dev->rss_tmpl_idx)) + PMD_DRV_LOG(WARNING, "Free rss template failed"); + + nic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG; + } +} + +static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on) +{ + int ret = 0; + + if (on) { + ret = hinic_setup_num_qps(nic_dev); + if (ret) + PMD_DRV_LOG(ERR, "Setup num_qps failed"); + } else { + hinic_destroy_num_qps(nic_dev); + } + + return ret; +} + +int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + int ret = 0; + + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + ret = hinic_config_mq_rx_rss(nic_dev, on); + break; + default: + break; + } + + return ret; +} + +int hinic_rx_configure(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + struct rte_eth_rss_conf rss_conf = + dev->data->dev_conf.rx_adv_conf.rss_conf; + int err; + bool lro_en; + int max_lro_size; + int lro_wqe_num; + int buf_size; + + if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { + if (rss_conf.rss_hf == 0) { + rss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL; + } else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) { + PMD_DRV_LOG(ERR, "Do not support rss offload all"); + goto rss_config_err; + } + + err = hinic_rss_init(nic_dev, NULL, &rss_conf); + if (err) { + PMD_DRV_LOG(ERR, "Init rss failed"); + goto rss_config_err; + } + } + + /* Enable both L3/L4 rx checksum offload */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM) + nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); + if (err) + goto rx_csum_ofl_err; + + /* config lro */ + lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? + true : false; + max_lro_size = dev->data->dev_conf.rxmode.max_lro_pkt_size; + buf_size = nic_dev->hwdev->nic_io->rq_buf_size; + lro_wqe_num = max_lro_size / buf_size ? (max_lro_size / buf_size) : 1; + + err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en, lro_wqe_num); + if (err) { + PMD_DRV_LOG(ERR, "%s %s lro failed, err: %d, max_lro_size: %d", + dev->data->name, lro_en ? "Enable" : "Disable", + err, max_lro_size); + goto set_rx_lro_err; + } + + return 0; + +set_rx_lro_err: +rx_csum_ofl_err: +rss_config_err: + + hinic_destroy_num_qps(nic_dev); + + return HINIC_ERROR; +} + +static void hinic_rx_remove_lro(struct hinic_nic_dev *nic_dev) +{ + int err; + + err = hinic_set_rx_lro(nic_dev->hwdev, false, false, 0); + if (err) + PMD_DRV_LOG(ERR, "%s disable LRO failed", + nic_dev->proc_dev_name); +} + +void hinic_rx_remove_configure(struct rte_eth_dev *dev) +{ + struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + + if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) { + hinic_rss_deinit(nic_dev); + hinic_destroy_num_qps(nic_dev); + } + + hinic_rx_remove_lro(nic_dev); +} + +void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq) +{ + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + struct hinic_rx_info *rx_info; + int free_wqebbs = + hinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1; + volatile struct hinic_rq_cqe *rx_cqe; + u16 ci; + + while (free_wqebbs++ < rxq->q_depth) { + ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id); + + rx_cqe = &rxq->rx_cqe[ci]; + + /* clear done bit */ + rx_cqe->status = 0; + + rx_info = &rxq->rx_info[ci]; + rte_pktmbuf_free(rx_info->mbuf); + rx_info->mbuf = NULL; + + hinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1); + } +} + +static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32, + volatile void *src_be32) +{ +#if defined(__X86_64_SSE__) + volatile __m128i *wqe_be = (volatile __m128i *)src_be32; + __m128i *wqe_le = (__m128i *)dst_le32; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, + 11, 4, 5, 6, 7, 0, 1, 2, 3); + + /* l2nic just use first 128 bits */ + wqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask); +#elif defined(__ARM64_NEON__) + volatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32; + uint8x16_t *wqe_le = (uint8x16_t *)dst_le32; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, + 9, 8, 15, 14, 13, 12}; + + /* l2nic just use first 128 bits */ + wqe_le[0] = vqtbl1q_u8(wqe_be[0], shuf_mask); +#else + u32 i; + volatile u32 *wqe_be = (volatile u32 *)src_be32; + u32 *wqe_le = (u32 *)dst_le32; + +#define HINIC_L2NIC_RQ_CQE_USED 4 /* 4Bytes unit */ + + for (i = 0; i < HINIC_L2NIC_RQ_CQE_USED; i++) { + *wqe_le = rte_be_to_cpu_32(*wqe_be); + wqe_be++; + wqe_le++; + } +#endif +} + +static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type, + uint32_t cqe_hass_val, + uint32_t *rss_hash) +{ + uint32_t rss_type; + + rss_type = HINIC_GET_RSS_TYPES(offload_type); + if (likely(rss_type != 0)) { + *rss_hash = cqe_hass_val; + return PKT_RX_RSS_HASH; + } + + return 0; +} + +static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq) +{ + uint32_t checksum_err; + uint64_t flags; + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + + if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN))) + return PKT_RX_IP_CKSUM_UNKNOWN; + + /* most case checksum is ok */ + checksum_err = HINIC_GET_RX_CSUM_ERR(status); + if (likely(checksum_err == 0)) + return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + + /* If BYPASS bit set, all other status indications should be ignored */ + if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err))) + return PKT_RX_IP_CKSUM_UNKNOWN; + + flags = 0; + + /* IP checksum error */ + if (HINIC_CSUM_ERR_IP(checksum_err)) + flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + + /* L4 checksum error */ + if (HINIC_CSUM_ERR_L4(checksum_err)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err))) + flags = PKT_RX_L4_CKSUM_NONE; + + rxq->rxq_stats.errors++; + + return flags; +} + +static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len, + uint16_t *vlan_tci) +{ + uint16_t vlan_tag; + + vlan_tag = HINIC_GET_RX_VLAN_TAG(vlan_len); + if (!HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) || 0 == vlan_tag) { + *vlan_tci = 0; + return 0; + } + + *vlan_tci = vlan_tag; + + return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; +} + +static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq, + struct rte_mbuf **mbufs, + u32 exp_mbuf_cnt) +{ + int rc; + u32 avail_cnt; + + rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, exp_mbuf_cnt); + if (likely(rc == HINIC_OK)) { + avail_cnt = exp_mbuf_cnt; + } else { + avail_cnt = 0; + rxq->rxq_stats.rx_nombuf += exp_mbuf_cnt; + } + + return avail_cnt; +} + +static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq, + dma_addr_t *dma_addr) +{ + struct rte_mbuf *mbuf = NULL; + int rc; + + rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, &mbuf, 1); + if (unlikely(rc != HINIC_OK)) + return NULL; + + *dma_addr = rte_mbuf_data_iova_default(mbuf); + + return mbuf; +} + +static inline void hinic_rearm_rxq_mbuf(struct hinic_rxq *rxq) +{ + u16 pi; + u32 i, free_wqebbs, rearm_wqebbs, exp_wqebbs; + dma_addr_t dma_addr; + struct hinic_rq_wqe *rq_wqe; + struct rte_mbuf **rearm_mbufs; + + /* check free wqebb fo rearm */ + free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq); + if (unlikely(free_wqebbs < rxq->rx_free_thresh)) + return; + + /* get rearm mbuf array */ + pi = HINIC_GET_RQ_LOCAL_PI(rxq); + rearm_mbufs = (struct rte_mbuf **)(&rxq->rx_info[pi]); + + /* check rxq free wqebbs turn around */ + exp_wqebbs = rxq->q_depth - pi; + if (free_wqebbs < exp_wqebbs) + exp_wqebbs = free_wqebbs; + + /* alloc mbuf in bulk */ + rearm_wqebbs = hinic_rx_alloc_mbuf_bulk(rxq, rearm_mbufs, exp_wqebbs); + if (unlikely(rearm_wqebbs == 0)) + return; + + /* rearm rx mbuf */ + rq_wqe = WQ_WQE_ADDR(rxq->wq, (u32)pi); + for (i = 0; i < rearm_wqebbs; i++) { + dma_addr = rte_mbuf_data_iova_default(rearm_mbufs[i]); + rq_wqe->buf_desc.addr_high = + cpu_to_be32(upper_32_bits(dma_addr)); + rq_wqe->buf_desc.addr_low = + cpu_to_be32(lower_32_bits(dma_addr)); + rq_wqe++; + } + rxq->wq->prod_idx += rearm_wqebbs; + rxq->wq->delta -= rearm_wqebbs; + + /* update rq hw_pi */ + rte_wmb(); + HINIC_UPDATE_RQ_HW_PI(rxq, pi + rearm_wqebbs); +} + +void hinic_rx_alloc_pkts(struct hinic_rxq *rxq) +{ + struct hinic_nic_dev *nic_dev = rxq->nic_dev; + struct hinic_rq_wqe *rq_wqe; + struct hinic_rx_info *rx_info; + struct rte_mbuf *mb; + dma_addr_t dma_addr; + u16 pi = 0; + int i, free_wqebbs; + + free_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq); + for (i = 0; i < free_wqebbs; i++) { + mb = hinic_rx_alloc_mbuf(rxq, &dma_addr); + if (unlikely(!mb)) { + rxq->rxq_stats.rx_nombuf++; + break; + } + + rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi); + if (unlikely(!rq_wqe)) { + rte_pktmbuf_free(mb); + break; + } + + /* fill buffer address only */ + rq_wqe->buf_desc.addr_high = + cpu_to_be32(upper_32_bits(dma_addr)); + rq_wqe->buf_desc.addr_low = + cpu_to_be32(lower_32_bits(dma_addr)); + + rx_info = &rxq->rx_info[pi]; + rx_info->mbuf = mb; + } + + if (likely(i > 0)) { + rte_wmb(); + HINIC_UPDATE_RQ_HW_PI(rxq, pi + 1); + } +} + +u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts) +{ + struct rte_mbuf *rxm; + struct hinic_rxq *rxq = rx_queue; + struct hinic_rx_info *rx_info; + volatile struct hinic_rq_cqe *rx_cqe; + u16 rx_buf_len, pkts = 0; + u16 sw_ci, ci_mask, wqebb_cnt = 0; + u32 pkt_len, status, vlan_len, lro_num; + u64 rx_bytes = 0; + struct hinic_rq_cqe cqe; + u32 offload_type, rss_hash; + + rx_buf_len = rxq->buf_len; + + /* 1. get polling start ci */ + ci_mask = HINIC_GET_RQ_WQE_MASK(rxq); + sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq); + + while (pkts < nb_pkts) { + /* 2. current ci is done */ + rx_cqe = &rxq->rx_cqe[sw_ci]; + status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE); + if (!HINIC_GET_RX_DONE_BE(status)) + break; + + /* convert cqe and get packet length */ + hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe); + vlan_len = cqe.vlan_len; + + rx_info = &rxq->rx_info[sw_ci]; + rxm = rx_info->mbuf; + + /* 3. next ci point and prefetch */ + sw_ci++; + sw_ci &= ci_mask; + + /* prefetch next mbuf first 64B */ + rte_prefetch0(rxq->rx_info[sw_ci].mbuf); + + /* 4. jumbo frame process */ + pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len); + if (likely(pkt_len <= rx_buf_len)) { + rxm->data_len = pkt_len; + rxm->pkt_len = pkt_len; + wqebb_cnt++; + } else { + rxm->data_len = rx_buf_len; + rxm->pkt_len = rx_buf_len; + + /* if receive jumbo, updating ci will be done by + * hinic_recv_jumbo_pkt function. + */ + HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1); + wqebb_cnt = 0; + hinic_recv_jumbo_pkt(rxq, rxm, pkt_len - rx_buf_len); + sw_ci = HINIC_GET_RQ_LOCAL_CI(rxq); + } + + /* 5. vlan/checksum/rss/pkt_type/gro offload */ + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->port = rxq->port_id; + offload_type = cqe.offload_type; + + /* vlan offload */ + rxm->ol_flags |= hinic_rx_vlan(offload_type, vlan_len, + &rxm->vlan_tci); + + /* checksum offload */ + rxm->ol_flags |= hinic_rx_csum(cqe.status, rxq); + + /* rss hash offload */ + rss_hash = cqe.rss_hash; + rxm->ol_flags |= hinic_rx_rss_hash(offload_type, rss_hash, + &rxm->hash.rss); + + /* lro offload */ + lro_num = HINIC_GET_RX_NUM_LRO(cqe.status); + if (unlikely(lro_num != 0)) { + rxm->ol_flags |= PKT_RX_LRO; + rxm->tso_segsz = pkt_len / lro_num; + } + + /* 6. clear done bit */ + rx_cqe->status = 0; + + rx_bytes += pkt_len; + rx_pkts[pkts++] = rxm; + } + + if (pkts) { + /* 7. update ci */ + HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt); + + /* do packet stats */ + rxq->rxq_stats.packets += pkts; + rxq->rxq_stats.bytes += rx_bytes; + } + rxq->rxq_stats.burst_pkts = pkts; + + /* 8. rearm mbuf to rxq */ + hinic_rearm_rxq_mbuf(rxq); + + return pkts; +} diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h new file mode 100644 index 000000000..49fa56517 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_RX_H_ +#define _HINIC_PMD_RX_H_ + +#define HINIC_DEFAULT_RX_FREE_THRESH 32 + +#define HINIC_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_FRAG_IPV4 |\ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +enum rq_completion_fmt { + RQ_COMPLETE_SGE = 1 +}; + +struct hinic_rq_ctrl { + u32 ctrl_fmt; +}; + +struct hinic_rq_cqe { + u32 status; + u32 vlan_len; + u32 offload_type; + u32 rss_hash; + + u32 rsvd[4]; +} __rte_cache_aligned; + +struct hinic_rq_cqe_sect { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_rq_bufdesc { + u32 addr_high; + u32 addr_low; +}; + +struct hinic_rq_wqe { + struct hinic_rq_ctrl ctrl; + u32 rsvd; + struct hinic_rq_cqe_sect cqe_sect; + struct hinic_rq_bufdesc buf_desc; +}; + +struct hinic_rxq_stats { + u64 packets; + u64 bytes; + u64 rx_nombuf; + u64 errors; + u64 rx_discards; + u64 burst_pkts; +}; + +/* Attention, Do not add any member in hinic_rx_info + * as rxq bulk rearm mode will write mbuf in rx_info + */ +struct hinic_rx_info { + struct rte_mbuf *mbuf; +}; + +struct hinic_rxq { + struct hinic_wq *wq; + volatile u16 *pi_virt_addr; + + u16 port_id; + u16 q_id; + u16 q_depth; + u16 buf_len; + + u16 rx_free_thresh; + u16 rxinfo_align_end; + + u32 socket_id; + + unsigned long status; + struct hinic_rxq_stats rxq_stats; + + struct hinic_nic_dev *nic_dev; + + struct hinic_rx_info *rx_info; + volatile struct hinic_rq_cqe *rx_cqe; + + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + struct rte_mempool *mb_pool; +}; + +int hinic_setup_rx_resources(struct hinic_rxq *rxq); + +void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev); + +void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev); + +void hinic_free_rx_resources(struct hinic_rxq *rxq); + +u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts); + +void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq); + +void hinic_rx_alloc_pkts(struct hinic_rxq *rxq); + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); + +void hinic_rxq_stats_reset(struct hinic_rxq *rxq); + +int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on); + +int hinic_rx_configure(struct rte_eth_dev *dev); + +void hinic_rx_remove_configure(struct rte_eth_dev *dev); + +void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev); + +int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, + u16 rq_depth, unsigned int socket_id); + +void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id); + +#endif /* _HINIC_PMD_RX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c new file mode 100644 index 000000000..4d999678f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.c @@ -0,0 +1,1334 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include +#include +#ifdef __ARM64_NEON__ +#include +#endif + +#include "base/hinic_compat.h" +#include "base/hinic_pmd_hwdev.h" +#include "base/hinic_pmd_hwif.h" +#include "base/hinic_pmd_wq.h" +#include "base/hinic_pmd_nicio.h" +#include "base/hinic_pmd_niccfg.h" +#include "hinic_pmd_ethdev.h" +#include "hinic_pmd_tx.h" + +/* packet header and tx offload info */ +#define ETHER_LEN_NO_VLAN 14 +#define ETHER_LEN_WITH_VLAN 18 +#define HEADER_LEN_OFFSET 2 +#define VXLANLEN 8 +#define MAX_PLD_OFFSET 221 +#define MAX_SINGLE_SGE_SIZE 65536 +#define TSO_ENABLE 1 +#define TX_MSS_DEFAULT 0x3E00 +#define TX_MSS_MIN 0x50 + +#define HINIC_NONTSO_PKT_MAX_SGE 17 /* non-tso max sge 17 */ +#define HINIC_NONTSO_SEG_NUM_INVALID(num) \ + ((num) > HINIC_NONTSO_PKT_MAX_SGE) + +#define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */ +#define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE) + +#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET 1 +#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET 0 + +/* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */ +#define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4)) + +#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) + +/* SQ_CTRL */ +#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define SQ_CTRL_LEN_SHIFT 29 +#define SQ_CTRL_OWNER_SHIFT 31 + +#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU +#define SQ_CTRL_DATA_FORMAT_MASK 0x1U +#define SQ_CTRL_LEN_MASK 0x3U +#define SQ_CTRL_OWNER_MASK 0x1U + +#define SQ_CTRL_SET(val, member) \ + (((val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 +#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU +#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ + SQ_CTRL_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) + +#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 +#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8 +#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 +#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 +#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 +#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14 +#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15 +#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 + +#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU +#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U +#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U +#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U +#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U +#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U +#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U +#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU + +#define SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ + SQ_TASK_INFO0_##member##_SHIFT) + +#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8 +#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 +#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 + +#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU + +#define SQ_TASK_INFO1_SET(val, member) \ + (((val) & SQ_TASK_INFO1_##member##_MASK) << \ + SQ_TASK_INFO1_##member##_SHIFT) + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 +#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 +#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U +#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U + +#define SQ_TASK_INFO2_SET(val, member) \ + (((val) & SQ_TASK_INFO2_##member##_MASK) << \ + SQ_TASK_INFO2_##member##_SHIFT) + +#define SQ_TASK_INFO4_L2TYPE_SHIFT 31 + +#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U + +#define SQ_TASK_INFO4_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \ + SQ_TASK_INFO4_##member##_SHIFT) + +/* SQ_DB */ +#define SQ_DB_OFF 0x00000800 +#define SQ_DB_INFO_HI_PI_SHIFT 0 +#define SQ_DB_INFO_QID_SHIFT 8 +#define SQ_DB_INFO_CFLAG_SHIFT 23 +#define SQ_DB_INFO_COS_SHIFT 24 +#define SQ_DB_INFO_TYPE_SHIFT 27 + +#define SQ_DB_INFO_HI_PI_MASK 0xFFU +#define SQ_DB_INFO_QID_MASK 0x3FFU +#define SQ_DB_INFO_CFLAG_MASK 0x1U +#define SQ_DB_INFO_COS_MASK 0x7U +#define SQ_DB_INFO_TYPE_MASK 0x1FU +#define SQ_DB_INFO_SET(val, member) \ + (((u32)(val) & SQ_DB_INFO_##member##_MASK) << \ + SQ_DB_INFO_##member##_SHIFT) + +#define SQ_DB 1 +#define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */ + +#define SQ_DB_PI_LOW_MASK 0xFF +#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK) +#define SQ_DB_PI_HI_SHIFT 8 +#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT) +#define SQ_DB_ADDR(sq, pi) \ + ((u64 *)((u8 __iomem *)((sq)->db_addr) + SQ_DB_OFF) + SQ_DB_PI_LOW(pi)) + +/* txq wq operations */ +#define HINIC_GET_SQ_WQE_MASK(txq) ((txq)->wq->mask) + +#define HINIC_GET_SQ_HW_CI(txq) \ + ((be16_to_cpu(*(txq)->cons_idx_addr)) & HINIC_GET_SQ_WQE_MASK(txq)) + +#define HINIC_GET_SQ_LOCAL_CI(txq) \ + (((txq)->wq->cons_idx) & HINIC_GET_SQ_WQE_MASK(txq)) + +#define HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt) \ + do { \ + (txq)->wq->cons_idx += wqebb_cnt; \ + (txq)->wq->delta += wqebb_cnt; \ + } while (0) + +#define HINIC_GET_SQ_FREE_WQEBBS(txq) ((txq)->wq->delta - 1) + +#define HINIC_IS_SQ_EMPTY(txq) (((txq)->wq->delta) == ((txq)->q_depth)) + +#define BUF_DESC_SIZE_SHIFT 4 + +#define HINIC_SQ_WQE_SIZE(num_sge) \ + (sizeof(struct hinic_sq_ctrl) + sizeof(struct hinic_sq_task) + \ + (unsigned int)((num_sge) << BUF_DESC_SIZE_SHIFT)) + +#define HINIC_SQ_WQEBB_CNT(num_sge) \ + (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \ + HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT) + + +static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) +{ +#if defined(__X86_64_SSE__) + int i; + __m128i *wqe_line = (__m128i *)data; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, + 11, 4, 5, 6, 7, 0, 1, 2, 3); + + for (i = 0; i < nr_wqebb; i++) { + /* convert 64B wqebb using 4 SSE instructions */ + wqe_line[0] = _mm_shuffle_epi8(wqe_line[0], shuf_mask); + wqe_line[1] = _mm_shuffle_epi8(wqe_line[1], shuf_mask); + wqe_line[2] = _mm_shuffle_epi8(wqe_line[2], shuf_mask); + wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask); + wqe_line += 4; + } +#elif defined(__ARM64_NEON__) + int i; + uint8x16_t *wqe_line = (uint8x16_t *)data; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, + 9, 8, 15, 14, 13, 12}; + + for (i = 0; i < nr_wqebb; i++) { + wqe_line[0] = vqtbl1q_u8(wqe_line[0], shuf_mask); + wqe_line[1] = vqtbl1q_u8(wqe_line[1], shuf_mask); + wqe_line[2] = vqtbl1q_u8(wqe_line[2], shuf_mask); + wqe_line[3] = vqtbl1q_u8(wqe_line[3], shuf_mask); + wqe_line += 4; + } +#else + hinic_cpu_to_be32(data, nr_wqebb * HINIC_SQ_WQEBB_SIZE); +#endif +} + +static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) +{ +#if defined(__X86_64_SSE__) + int i; + __m128i *sge_line = (__m128i *)data; + __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, + 11, 4, 5, 6, 7, 0, 1, 2, 3); + + for (i = 0; i < nr_sge; i++) { + /* convert 16B sge using 1 SSE instructions */ + *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask); + sge_line++; + } +#elif defined(__ARM64_NEON__) + int i; + uint8x16_t *sge_line = (uint8x16_t *)data; + const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, + 9, 8, 15, 14, 13, 12}; + + for (i = 0; i < nr_sge; i++) { + *sge_line = vqtbl1q_u8(*sge_line, shuf_mask); + sge_line++; + } +#else + hinic_cpu_to_be32(data, nr_sge * sizeof(struct hinic_sq_bufdesc)); +#endif +} + +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) +{ + if (!txq || !stats) { + PMD_DRV_LOG(ERR, "Txq or stats is NULL"); + return; + } + + memcpy(stats, &txq->txq_stats, sizeof(txq->txq_stats)); +} + +void hinic_txq_stats_reset(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats; + + if (txq == NULL) + return; + + txq_stats = &txq->txq_stats; + memset(txq_stats, 0, sizeof(*txq_stats)); +} + +static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev, + struct rte_mbuf *mbuf, + u16 sge_cnt) +{ + struct rte_mbuf *dst_mbuf; + u32 offset = 0; + u16 i; + + if (unlikely(!nic_dev->cpy_mpool)) + return NULL; + + dst_mbuf = rte_pktmbuf_alloc(nic_dev->cpy_mpool); + if (unlikely(!dst_mbuf)) + return NULL; + + dst_mbuf->data_off = 0; + for (i = 0; i < sge_cnt; i++) { + rte_memcpy((char *)dst_mbuf->buf_addr + offset, + (char *)mbuf->buf_addr + mbuf->data_off, + mbuf->data_len); + dst_mbuf->data_len += mbuf->data_len; + offset += mbuf->data_len; + mbuf = mbuf->next; + } + + dst_mbuf->pkt_len = dst_mbuf->data_len; + + return dst_mbuf; +} + +static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, + struct rte_mbuf *mbuf, + struct hinic_sq_bufdesc *sges, + struct hinic_wqe_info *sqe_info) +{ + dma_addr_t dma_addr; + u16 i, around_sges; + u16 nb_segs = sqe_info->sge_cnt - sqe_info->cpy_mbuf_cnt; + u16 real_nb_segs = mbuf->nb_segs; + struct hinic_sq_bufdesc *sge_idx = sges; + + if (unlikely(sqe_info->around)) { + /* parts of wqe is in sq bottom while parts + * of wqe is in sq head + */ + i = 0; + for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr; + sge_idx++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } + hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, + mbuf->data_len); + mbuf = mbuf->next; + i++; + } + + around_sges = nb_segs - i; + sge_idx = (struct hinic_sq_bufdesc *) + ((void *)txq->sq_head_addr); + for (; i < nb_segs; i++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } + hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, + mbuf->data_len); + mbuf = mbuf->next; + sge_idx++; + } + + /* covert sges at head to big endian */ + hinic_sge_cpu_to_be32((void *)txq->sq_head_addr, around_sges); + } else { + /* wqe is in continuous space */ + for (i = 0; i < nb_segs; i++) { + if (unlikely(mbuf == NULL)) { + txq->txq_stats.mbuf_null++; + return false; + } + + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } + hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, + mbuf->data_len); + mbuf = mbuf->next; + sge_idx++; + } + } + + /* for now: support non-tso over 17 sge, copy the last 2 mbuf */ + if (unlikely(sqe_info->cpy_mbuf_cnt != 0)) { + /* copy invalid mbuf segs to a valid buffer, lost performance */ + txq->txq_stats.cpy_pkts += 1; + mbuf = hinic_copy_tx_mbuf(txq->nic_dev, mbuf, + real_nb_segs - nb_segs); + if (unlikely(!mbuf)) + return false; + + txq->tx_info[sqe_info->pi].cpy_mbuf = mbuf; + + /* deal with the last mbuf */ + dma_addr = rte_mbuf_data_iova(mbuf); + if (unlikely(mbuf->data_len == 0)) { + txq->txq_stats.sge_len0++; + return false; + } + hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, + mbuf->data_len); + if (unlikely(sqe_info->around)) + hinic_sge_cpu_to_be32((void *)sge_idx, 1); + } + + return true; +} + +static inline void hinic_fill_sq_wqe_header(struct hinic_sq_ctrl *ctrl, + u32 queue_info, int nr_descs, + u8 owner) +{ + u32 ctrl_size, task_size, bufdesc_size; + + ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); + task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); + bufdesc_size = HINIC_BUF_DESC_SIZE(nr_descs); + + ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | + SQ_CTRL_SET(task_size, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(ctrl_size, LEN) | + SQ_CTRL_SET(owner, OWNER); + + ctrl->queue_info = queue_info; + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) { + ctrl->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) { + /* mss should not be less than 80 */ + ctrl->queue_info = + SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info, MSS); + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); + } +} + +static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info + *poff_info, + struct hinic_wqe_info *sqe_info) +{ + u32 total_len, limit_len, checked_len, left_len, adjust_mss; + u32 i, first_mss_sges, left_sges; + struct rte_mbuf *mbuf_head, *mbuf_pre; + + left_sges = mbuf->nb_segs; + mbuf_head = mbuf; + + /* tso sge number validation */ + if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) { + checked_len = 0; + adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ? + mbuf->tso_segsz : TX_MSS_MIN; + limit_len = adjust_mss + poff_info->payload_offset; + first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE; + + /* each continues 17 mbufs segmust do one check */ + while (left_sges >= HINIC_NONTSO_PKT_MAX_SGE) { + /* total len of first 16 mbufs must equal + * or more than limit_len + */ + total_len = 0; + for (i = 0; i < first_mss_sges; i++) { + total_len += mbuf->data_len; + mbuf_pre = mbuf; + mbuf = mbuf->next; + if (total_len >= limit_len) { + limit_len = adjust_mss; + break; + } + } + + checked_len += total_len; + + /* try to copy if not valid */ + if (unlikely(first_mss_sges == i)) { + left_sges -= first_mss_sges; + checked_len -= mbuf_pre->data_len; + + left_len = mbuf_head->pkt_len - checked_len; + if (left_len > HINIC_COPY_MBUF_SIZE) + return false; + + sqe_info->sge_cnt = mbuf_head->nb_segs - + left_sges; + sqe_info->cpy_mbuf_cnt = 1; + + return true; + } + first_mss_sges = (HINIC_NONTSO_PKT_MAX_SGE - 1); + + /* continue next 16 mbufs */ + left_sges -= (i + 1); + } /* end of while */ + } + + sqe_info->sge_cnt = mbuf_head->nb_segs; + return true; +} + +static inline void +hinic_set_l4_csum_info(struct hinic_sq_task *task, + u32 *queue_info, struct hinic_tx_offload_info *poff_info) +{ + u32 tcp_udp_cs, sctp = 0; + u16 l2hdr_len; + + if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) + sctp = 1; + + tcp_udp_cs = poff_info->inner_l4_tcp_udp; + + if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || + poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { + l2hdr_len = poff_info->outer_l2_len; + + task->pkt_info2 |= + SQ_TASK_INFO2_SET(poff_info->outer_l3_type, OUTER_L3TYPE) | + SQ_TASK_INFO2_SET(poff_info->outer_l3_len, OUTER_L3LEN); + task->pkt_info2 |= + SQ_TASK_INFO2_SET(poff_info->tunnel_type, TUNNEL_L4TYPE) | + SQ_TASK_INFO2_SET(poff_info->tunnel_length, TUNNEL_L4LEN); + } else { + l2hdr_len = poff_info->inner_l2_len; + } + + task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN); + task->pkt_info1 |= + SQ_TASK_INFO1_SET(poff_info->inner_l3_len, INNER_L3LEN); + task->pkt_info0 |= + SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE); + task->pkt_info1 |= + SQ_TASK_INFO1_SET(poff_info->inner_l4_len, INNER_L4LEN); + task->pkt_info0 |= + SQ_TASK_INFO0_SET(poff_info->inner_l4_type, L4OFFLOAD); + *queue_info |= + SQ_CTRL_QUEUE_INFO_SET(poff_info->payload_offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) | + SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP); +} + +static inline void +hinic_set_tso_info(struct hinic_sq_task *task, + u32 *queue_info, struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *poff_info) +{ + hinic_set_l4_csum_info(task, queue_info, poff_info); + + /* wqe for tso */ + task->pkt_info0 |= + SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE); + task->pkt_info0 |= SQ_TASK_INFO0_SET(TSO_ENABLE, TSO_UFO); + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(TSO_ENABLE, TSO); + /* qsf was initialized in prepare_sq_wqe */ + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mbuf->tso_segsz, MSS); +} + +static inline void +hinic_set_vlan_tx_offload(struct hinic_sq_task *task, + u32 *queue_info, u16 vlan_tag, u16 vlan_pri) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI); +} + +static inline void +hinic_fill_tx_offload_info(struct rte_mbuf *mbuf, + struct hinic_sq_task *task, u32 *queue_info, + struct hinic_tx_offload_info *tx_off_info) +{ + u16 vlan_tag; + uint64_t ol_flags = mbuf->ol_flags; + + /* clear DW0~2 of task section for offload */ + task->pkt_info0 = 0; + task->pkt_info1 = 0; + task->pkt_info2 = 0; + + /* Base VLAN */ + if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) { + vlan_tag = mbuf->vlan_tci; + hinic_set_vlan_tx_offload(task, queue_info, vlan_tag, + vlan_tag >> VLAN_PRIO_SHIFT); + } + + /* non checksum or tso */ + if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))) + return; + + if ((ol_flags & PKT_TX_TCP_SEG)) + /* set tso info for task and qsf */ + hinic_set_tso_info(task, queue_info, mbuf, tx_off_info); + else /* just support l4 checksum offload */ + hinic_set_l4_csum_info(task, queue_info, tx_off_info); +} + +static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) +{ + struct hinic_tx_info *tx_info; + struct rte_mbuf *mbuf, *m, *mbuf_free[HINIC_MAX_TX_FREE_BULK]; + int i, nb_free = 0; + u16 hw_ci, sw_ci, sq_mask; + int wqebb_cnt = 0; + + hw_ci = HINIC_GET_SQ_HW_CI(txq); + sw_ci = HINIC_GET_SQ_LOCAL_CI(txq); + sq_mask = HINIC_GET_SQ_WQE_MASK(txq); + + for (i = 0; i < txq->tx_free_thresh; ++i) { + tx_info = &txq->tx_info[sw_ci]; + if (hw_ci == sw_ci || + (((hw_ci - sw_ci) & sq_mask) < tx_info->wqebb_cnt)) + break; + + sw_ci = (sw_ci + tx_info->wqebb_cnt) & sq_mask; + + if (unlikely(tx_info->cpy_mbuf != NULL)) { + rte_pktmbuf_free(tx_info->cpy_mbuf); + tx_info->cpy_mbuf = NULL; + } + + wqebb_cnt += tx_info->wqebb_cnt; + mbuf = tx_info->mbuf; + + if (likely(mbuf->nb_segs == 1)) { + m = rte_pktmbuf_prefree_seg(mbuf); + tx_info->mbuf = NULL; + + if (unlikely(m == NULL)) + continue; + + mbuf_free[nb_free++] = m; + if (unlikely(m->pool != mbuf_free[0]->pool || + nb_free >= HINIC_MAX_TX_FREE_BULK)) { + rte_mempool_put_bulk(mbuf_free[0]->pool, + (void **)mbuf_free, (nb_free - 1)); + nb_free = 0; + mbuf_free[nb_free++] = m; + } + } else { + rte_pktmbuf_free(mbuf); + tx_info->mbuf = NULL; + } + } + + if (nb_free > 0) + rte_mempool_put_bulk(mbuf_free[0]->pool, (void **)mbuf_free, + nb_free); + + HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt); +} + +static inline struct hinic_sq_wqe * +hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt, + struct hinic_wqe_info *wqe_info) +{ + u32 cur_pi, end_pi; + u16 remain_wqebbs; + struct hinic_sq *sq = txq->sq; + struct hinic_wq *wq = txq->wq; + + /* record current pi */ + cur_pi = MASKED_WQE_IDX(wq, wq->prod_idx); + end_pi = cur_pi + wqebb_cnt; + + /* update next pi and delta */ + wq->prod_idx += wqebb_cnt; + wq->delta -= wqebb_cnt; + + /* return current pi and owner */ + wqe_info->pi = cur_pi; + wqe_info->owner = sq->owner; + wqe_info->around = 0; + wqe_info->seq_wqebbs = wqebb_cnt; + + if (unlikely(end_pi >= txq->q_depth)) { + /* update owner of next prod_idx */ + sq->owner = !sq->owner; + + /* turn around to head */ + if (unlikely(end_pi > txq->q_depth)) { + wqe_info->around = 1; + remain_wqebbs = txq->q_depth - cur_pi; + wqe_info->seq_wqebbs = remain_wqebbs; + } + } + + return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); +} + +static inline uint16_t +hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) +{ + struct ipv4_psd_header { + uint32_t src_addr; /* IP address of source host. */ + uint32_t dst_addr; /* IP address of destination host. */ + uint8_t zero; /* zero. */ + uint8_t proto; /* L4 protocol type. */ + uint16_t len; /* L4 length. */ + } psd_hdr; + uint8_t ihl; + + psd_hdr.src_addr = ipv4_hdr->src_addr; + psd_hdr.dst_addr = ipv4_hdr->dst_addr; + psd_hdr.zero = 0; + psd_hdr.proto = ipv4_hdr->next_proto_id; + if (ol_flags & PKT_TX_TCP_SEG) { + psd_hdr.len = 0; + } else { + /* ipv4_hdr->version_ihl is uint8_t big endian, ihl locates + * lower 4 bits and unit is 4 bytes + */ + ihl = (ipv4_hdr->version_ihl & 0xF) << 2; + psd_hdr.len = + rte_cpu_to_be_16(rte_be_to_cpu_16(ipv4_hdr->total_length) - + ihl); + } + return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr)); +} + +static inline uint16_t +hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags) +{ + uint32_t sum; + struct { + uint32_t len; /* L4 length. */ + uint32_t proto; /* L4 protocol - top 3 bytes must be zero */ + } psd_hdr; + + psd_hdr.proto = (ipv6_hdr->proto << 24); + if (ol_flags & PKT_TX_TCP_SEG) + psd_hdr.len = 0; + else + psd_hdr.len = ipv6_hdr->payload_len; + + sum = __rte_raw_cksum(ipv6_hdr->src_addr, + sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0); + sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum); + return __rte_raw_cksum_reduce(sum); +} + +static inline void +hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info, + int outer_cs_flag) +{ + uint64_t ol_flags = m->ol_flags; + + if (outer_cs_flag == 1) { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->outer_l2_len + + m->outer_l3_len + m->l2_len + + m->l3_len + m->l4_len; + } + } else { + if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + off_info->payload_offset = m->l2_len + m->l3_len; + } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + off_info->payload_offset = m->l2_len + m->l3_len + + m->l4_len; + } + } +} + +static inline void +hinic_analyze_tx_info(struct rte_mbuf *mbuf, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + struct rte_ipv4_hdr *ip4h; + u16 pkt_type; + u8 *hdr; + + hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); + eth_hdr = (struct rte_ether_hdr *)hdr; + pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (pkt_type == RTE_ETHER_TYPE_VLAN) { + off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; + vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); + pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } else { + off_info->outer_l2_len = ETHER_LEN_NO_VLAN; + } + + if (pkt_type == RTE_ETHER_TYPE_IPV4) { + ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); + off_info->outer_l3_len = (ip4h->version_ihl & 0xf) << + HEADER_LEN_OFFSET; + } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + /* not support ipv6 extension header */ + off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); + } +} + +static inline int +hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, + struct hinic_tx_offload_info *off_info) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_tcp_hdr *tcp_hdr; + struct rte_udp_hdr *udp_hdr; + struct rte_ether_hdr *eth_hdr; + struct rte_vlan_hdr *vlan_hdr; + u16 eth_type = 0; + uint64_t inner_l3_offset; + uint64_t ol_flags = m->ol_flags; + + /* Check if the packets set available offload flags */ + if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) + return 0; + + /* Support only vxlan offload */ + if ((ol_flags & PKT_TX_TUNNEL_MASK) && + !(ol_flags & PKT_TX_TUNNEL_VXLAN)) + return -ENOTSUP; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + if (rte_validate_tx_offload(m) != 0) + return -EINVAL; +#endif + + if (ol_flags & PKT_TX_TUNNEL_VXLAN) { + if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6) || + (ol_flags & PKT_TX_TCP_SEG)) { + inner_l3_offset = m->l2_len + m->outer_l2_len + + m->outer_l3_len; + off_info->outer_l2_len = m->outer_l2_len; + off_info->outer_l3_len = m->outer_l3_len; + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr); + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_SET); + } else { + inner_l3_offset = m->l2_len; + hinic_analyze_tx_info(m, off_info); + /* just support vxlan tunneling pkt */ + off_info->inner_l2_len = m->l2_len - VXLANLEN - + sizeof(*udp_hdr) - off_info->outer_l2_len - + off_info->outer_l3_len; + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_length = m->l2_len - + off_info->outer_l2_len - off_info->outer_l3_len; + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } + } else { + inner_l3_offset = m->l2_len; + off_info->inner_l2_len = m->l2_len; + off_info->inner_l3_len = m->l3_len; + off_info->inner_l4_len = m->l4_len; + off_info->tunnel_type = NOT_TUNNEL; + + hinic_get_pld_offset(m, off_info, + HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET); + } + + /* invalid udp or tcp header */ + if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) + return -EINVAL; + + /* Process outter udp pseudo-header checksum */ + if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) || + (ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_IPV6))) { + + /* inner_l4_tcp_udp csum should be setted to calculate outter + * udp checksum when vxlan packets without inner l3 and l4 + */ + off_info->inner_l4_tcp_udp = 1; + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (eth_type == RTE_ETHER_TYPE_VLAN) { + vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); + eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + } + + if (eth_type == RTE_ETHER_TYPE_IPV4) { + ipv4_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->outer_l2_len); + off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + ipv4_hdr->hdr_checksum = 0; + + udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + + m->outer_l3_len); + udp_hdr->dgram_cksum = 0; + } else if (eth_type == RTE_ETHER_TYPE_IPV6) { + off_info->outer_l3_type = IPV6_PKT; + ipv6_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->outer_l2_len); + + udp_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, + (m->outer_l2_len + + m->outer_l3_len)); + udp_hdr->dgram_cksum = 0; + } + } else if (ol_flags & PKT_TX_OUTER_IPV4) { + off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; + off_info->inner_l4_tcp_udp = 1; + off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; + } + + if (ol_flags & PKT_TX_IPV4) + off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ? + IPV4_PKT_WITH_CHKSUM_OFFLOAD : + IPV4_PKT_NO_CHKSUM_OFFLOAD; + else if (ol_flags & PKT_TX_IPV6) + off_info->inner_l3_type = IPV6_PKT; + + /* Process the pseudo-header checksum */ + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { + if (ol_flags & PKT_TX_IPV4) { + ipv4_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + inner_l3_offset); + + if (ol_flags & PKT_TX_IP_CKSUM) + ipv4_hdr->hdr_checksum = 0; + + udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + + m->l3_len); + udp_hdr->dgram_cksum = + hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + } else { + ipv6_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + inner_l3_offset); + + udp_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, + (inner_l3_offset + m->l3_len)); + udp_hdr->dgram_cksum = + hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + } + + off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 1; + } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || + (ol_flags & PKT_TX_TCP_SEG)) { + if (ol_flags & PKT_TX_IPV4) { + ipv4_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + inner_l3_offset); + + if (ol_flags & PKT_TX_IP_CKSUM) + ipv4_hdr->hdr_checksum = 0; + + /* non-TSO tcp */ + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + + m->l3_len); + tcp_hdr->cksum = + hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + } else { + ipv6_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + inner_l3_offset); + /* non-TSO tcp */ + tcp_hdr = + rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, + (inner_l3_offset + m->l3_len)); + tcp_hdr->cksum = + hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + } + + off_info->inner_l4_type = TCP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 1; + } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { + off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE; + off_info->inner_l4_tcp_udp = 0; + off_info->inner_l4_len = sizeof(struct rte_sctp_hdr); + } + + return 0; +} + +static inline bool hinic_get_sge_txoff_info(struct rte_mbuf *mbuf_pkt, + struct hinic_wqe_info *sqe_info, + struct hinic_tx_offload_info + *off_info) +{ + u16 i, total_len, sge_cnt = mbuf_pkt->nb_segs; + struct rte_mbuf *mbuf; + int ret; + + memset(off_info, 0, sizeof(*off_info)); + + ret = hinic_tx_offload_pkt_prepare(mbuf_pkt, off_info); + if (unlikely(ret)) + return false; + + sqe_info->cpy_mbuf_cnt = 0; + + /* non tso mbuf */ + if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) { + if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) { + /* non tso packet len must less than 64KB */ + return false; + } else if (unlikely(HINIC_NONTSO_SEG_NUM_INVALID(sge_cnt))) { + /* non tso packet buffer number must less than 17 + * the mbuf segs more than 17 must copy to one buffer + */ + total_len = 0; + mbuf = mbuf_pkt; + for (i = 0; i < (HINIC_NONTSO_PKT_MAX_SGE - 1) ; i++) { + total_len += mbuf->data_len; + mbuf = mbuf->next; + } + + /* default support copy total 4k mbuf segs */ + if ((u32)(total_len + (u16)HINIC_COPY_MBUF_SIZE) < + mbuf_pkt->pkt_len) + return false; + + sqe_info->sge_cnt = HINIC_NONTSO_PKT_MAX_SGE; + sqe_info->cpy_mbuf_cnt = 1; + return true; + } + + /* valid non tso mbuf */ + sqe_info->sge_cnt = sge_cnt; + } else { + /* tso mbuf */ + if (unlikely(HINIC_TSO_SEG_NUM_INVALID(sge_cnt))) + /* too many mbuf segs */ + return false; + + /* check tso mbuf segs are valid or not */ + if (unlikely(!hinic_is_tso_sge_valid(mbuf_pkt, + off_info, sqe_info))) + return false; + } + + return true; +} + +static inline void hinic_sq_write_db(struct hinic_sq *sq, int cos) +{ + u16 prod_idx; + u32 hi_prod_idx; + struct hinic_sq_db sq_db; + + prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx); + hi_prod_idx = SQ_DB_PI_HIGH(prod_idx); + + sq_db.db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) | + SQ_DB_INFO_SET(SQ_DB, TYPE) | + SQ_DB_INFO_SET(SQ_CFLAG_DP, CFLAG) | + SQ_DB_INFO_SET(cos, COS) | + SQ_DB_INFO_SET(sq->q_id, QID); + + /* Data should be written to HW in Big Endian Format */ + sq_db.db_info = cpu_to_be32(sq_db.db_info); + + /* Write all before the doorbell */ + rte_wmb(); + writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx)); +} + +u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts) +{ + int free_wqebb_cnt, wqe_wqebb_cnt; + u32 queue_info, tx_bytes = 0; + u16 nb_tx; + struct hinic_wqe_info sqe_info; + struct hinic_tx_offload_info off_info; + struct rte_mbuf *mbuf_pkt; + struct hinic_txq *txq = tx_queue; + struct hinic_tx_info *tx_info; + struct hinic_sq_wqe *sq_wqe; + struct hinic_sq_task *task; + + /* reclaim tx mbuf before xmit new packet */ + if (HINIC_GET_SQ_FREE_WQEBBS(txq) < txq->tx_free_thresh) + hinic_xmit_mbuf_cleanup(txq); + + /* tx loop routine */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + mbuf_pkt = *tx_pkts++; + queue_info = 0; + + /* 1. parse sge and tx offlod info from mbuf */ + if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt, + &sqe_info, &off_info))) { + txq->txq_stats.off_errs++; + break; + } + + /* 2. try to get enough wqebb */ + wqe_wqebb_cnt = HINIC_SQ_WQEBB_CNT(sqe_info.sge_cnt); + free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq); + if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) { + /* reclaim again */ + hinic_xmit_mbuf_cleanup(txq); + free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq); + if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) { + txq->txq_stats.tx_busy += (nb_pkts - nb_tx); + break; + } + } + + /* 3. get sq tail wqe address from wqe_page, + * sq have enough wqebb for this packet + */ + sq_wqe = hinic_get_sq_wqe(txq, wqe_wqebb_cnt, &sqe_info); + + /* 4. fill sq wqe sge section */ + if (unlikely(!hinic_mbuf_dma_map_sge(txq, mbuf_pkt, + sq_wqe->buf_descs, + &sqe_info))) { + hinic_return_sq_wqe(txq->nic_dev->hwdev, txq->q_id, + wqe_wqebb_cnt, sqe_info.owner); + txq->txq_stats.off_errs++; + break; + } + + /* 5. fill sq wqe task section and queue info */ + task = &sq_wqe->task; + + /* tx packet offload configure */ + hinic_fill_tx_offload_info(mbuf_pkt, task, &queue_info, + &off_info); + + /* 6. record tx info */ + tx_info = &txq->tx_info[sqe_info.pi]; + tx_info->mbuf = mbuf_pkt; + tx_info->wqebb_cnt = wqe_wqebb_cnt; + + /* 7. fill sq wqe header section */ + hinic_fill_sq_wqe_header(&sq_wqe->ctrl, queue_info, + sqe_info.sge_cnt, sqe_info.owner); + + /* 8.convert continue or bottom wqe byteorder to big endian */ + hinic_sq_wqe_cpu_to_be32(sq_wqe, sqe_info.seq_wqebbs); + + tx_bytes += mbuf_pkt->pkt_len; + } + + /* 9. write sq doorbell in burst mode */ + if (nb_tx) { + hinic_sq_write_db(txq->sq, txq->cos); + + txq->txq_stats.packets += nb_tx; + txq->txq_stats.bytes += tx_bytes; + } + txq->txq_stats.burst_pkts = nb_tx; + + return nb_tx; +} + +void hinic_free_all_tx_mbufs(struct hinic_txq *txq) +{ + u16 ci; + struct hinic_nic_dev *nic_dev = txq->nic_dev; + struct hinic_tx_info *tx_info; + int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev, + txq->q_id) + 1; + + while (free_wqebbs < txq->q_depth) { + ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id); + + tx_info = &txq->tx_info[ci]; + + if (unlikely(tx_info->cpy_mbuf != NULL)) { + rte_pktmbuf_free(tx_info->cpy_mbuf); + tx_info->cpy_mbuf = NULL; + } + + rte_pktmbuf_free(tx_info->mbuf); + hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id, + tx_info->wqebb_cnt); + + free_wqebbs += tx_info->wqebb_cnt; + tx_info->mbuf = NULL; + } +} + +void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev) +{ + u16 q_id; + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + + for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { + if (eth_dev->data->tx_queues != NULL) + eth_dev->data->tx_queues[q_id] = NULL; + + if (nic_dev->txqs[q_id] == NULL) + continue; + + /* stop tx queue free tx mbuf */ + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); + hinic_free_tx_resources(nic_dev->txqs[q_id]); + + /* free txq */ + kfree(nic_dev->txqs[q_id]); + nic_dev->txqs[q_id] = NULL; + } +} + +void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev) +{ + u16 q_id; + struct hinic_nic_dev *nic_dev = + HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); + + for (q_id = 0; q_id < nic_dev->num_sq; q_id++) + /* stop tx queue free tx mbuf */ + hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); +} + +int hinic_setup_tx_resources(struct hinic_txq *txq) +{ + u64 tx_info_sz; + + tx_info_sz = txq->q_depth * sizeof(*txq->tx_info); + txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz, + RTE_CACHE_LINE_SIZE, txq->socket_id); + if (!txq->tx_info) + return -ENOMEM; + + return HINIC_OK; +} + +void hinic_free_tx_resources(struct hinic_txq *txq) +{ + if (txq->tx_info == NULL) + return; + + rte_free(txq->tx_info); + txq->tx_info = NULL; +} + +int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, + u16 sq_depth, unsigned int socket_id) +{ + int err; + struct hinic_nic_io *nic_io = hwdev->nic_io; + struct hinic_qp *qp = &nic_io->qps[q_id]; + struct hinic_sq *sq = &qp->sq; + void __iomem *db_addr; + volatile u32 *ci_addr; + + sq->sq_depth = sq_depth; + nic_io->sq_depth = sq_depth; + + /* alloc wq */ + err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id], + HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth, + socket_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ"); + return err; + } + + /* alloc sq doorbell space */ + err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init db addr"); + goto alloc_db_err; + } + + /* clear hardware ci */ + ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id); + *ci_addr = 0; + + sq->q_id = q_id; + sq->wq = &nic_io->sq_wq[q_id]; + sq->owner = 1; + sq->cons_idx_addr = (volatile u16 *)ci_addr; + sq->db_addr = db_addr; + + return HINIC_OK; + +alloc_db_err: + hinic_wq_free(nic_io->hwdev, &nic_io->sq_wq[q_id]); + + return err; +} + +void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io; + struct hinic_qp *qp; + + nic_io = hwdev->nic_io; + qp = &nic_io->qps[q_id]; + + if (qp->sq.wq == NULL) + return; + + hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr); + hinic_wq_free(nic_io->hwdev, qp->sq.wq); + qp->sq.wq = NULL; +} diff --git a/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h new file mode 100644 index 000000000..d98abad8d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_tx.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_TX_H_ +#define _HINIC_PMD_TX_H_ + +#define HINIC_DEFAULT_TX_FREE_THRESH 32 +#define HINIC_MAX_TX_FREE_BULK 64 + +#define HINIC_GET_WQ_HEAD(txq) ((txq)->wq->queue_buf_vaddr) + +#define HINIC_GET_WQ_TAIL(txq) \ + ((txq)->wq->queue_buf_vaddr + (txq)->wq->wq_buf_size) + +#define HINIC_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_CKSUM | \ + PKT_TX_UDP_CKSUM | \ + PKT_TX_SCTP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_TCP_SEG) + +enum sq_wqe_type { + SQ_NORMAL_WQE = 0, +}; + +/* tx offload info */ +struct hinic_tx_offload_info { + u8 outer_l2_len; + u8 outer_l3_type; + u16 outer_l3_len; + + u8 inner_l2_len; + u8 inner_l3_type; + u16 inner_l3_len; + + u8 tunnel_length; + u8 tunnel_type; + u8 inner_l4_type; + u8 inner_l4_len; + + u16 payload_offset; + u8 inner_l4_tcp_udp; + u8 rsvd0; +}; + +/* tx sge info */ +struct hinic_wqe_info { + u16 pi; + u16 owner; + u16 around; + u16 seq_wqebbs; + u16 sge_cnt; + u16 cpy_mbuf_cnt; +}; + +struct hinic_sq_ctrl { + u32 ctrl_fmt; + u32 queue_info; +}; + +struct hinic_sq_task { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 ufo_v6_identify; + u32 pkt_info4; + u32 rsvd5; +}; + +struct hinic_sq_bufdesc { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_sq_wqe { + /* sq wqe control section */ + struct hinic_sq_ctrl ctrl; + + /* sq task control section */ + struct hinic_sq_task task; + + /* sq sge section start address, 1~127 sges */ + struct hinic_sq_bufdesc buf_descs[0]; +}; + +struct hinic_txq_stats { + u64 packets; + u64 bytes; + u64 rl_drop; + u64 tx_busy; + u64 off_errs; + u64 cpy_pkts; + u64 burst_pkts; + u64 sge_len0; + u64 mbuf_null; +}; + +struct hinic_tx_info { + struct rte_mbuf *mbuf; + int wqebb_cnt; + struct rte_mbuf *cpy_mbuf; +}; + +struct hinic_txq { + /* cacheline0 */ + struct hinic_nic_dev *nic_dev; + struct hinic_wq *wq; + struct hinic_sq *sq; + volatile u16 *cons_idx_addr; + struct hinic_tx_info *tx_info; + + u16 tx_free_thresh; + u16 port_id; + u16 q_id; + u16 q_depth; + u32 cos; + u32 socket_id; + + /* cacheline1 */ + struct hinic_txq_stats txq_stats; + u64 sq_head_addr; + u64 sq_bot_sge_addr; +}; + +int hinic_setup_tx_resources(struct hinic_txq *txq); + +void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev); + +void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev); + +void hinic_free_tx_resources(struct hinic_txq *txq); + +u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts); + +void hinic_free_all_tx_mbufs(struct hinic_txq *txq); + +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); + +void hinic_txq_stats_reset(struct hinic_txq *txq); + +int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, + u16 sq_depth, unsigned int socket_id); + +void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id); + +#endif /* _HINIC_PMD_TX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hinic/meson.build b/src/spdk/dpdk/drivers/net/hinic/meson.build new file mode 100644 index 000000000..bc7e24639 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Huawei Technologies Co., Ltd + +subdir('base') +objs = [base_objs] + +sources = files( + 'hinic_pmd_ethdev.c', + 'hinic_pmd_rx.c', + 'hinic_pmd_tx.c', + 'hinic_pmd_flow.c', + ) + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map b/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hinic/rte_pmd_hinic_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/hns3/Makefile b/src/spdk/dpdk/drivers/net/hns3/Makefile new file mode 100644 index 000000000..d7798a470 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018-2019 Hisilicon Limited. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_hns3.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +# Experimantal APIs: +# - rte_mp_action_register +# - rte_mp_action_unregister +# - rte_mp_reply +# - rte_mp_request_sync + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_hash +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_hns3_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_ethdev_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_cmd.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_rss.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_regs.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_dcb.c +SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_mp.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.c b/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.c new file mode 100644 index 000000000..cbb09887c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.c @@ -0,0 +1,572 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_regs.h" +#include "hns3_intr.h" +#include "hns3_logs.h" + +#define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ) + +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int +hns3_ring_space(struct hns3_cmq_ring *ring) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static bool +is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + +/* + * hns3_allocate_dma_mem - Specific memory alloc for command function. + * Malloc a memzone, which is a contiguous portion of physical memory identified + * by a name. + * @ring: pointer to the ring structure + * @size: size of memory requested + * @alignment: what to align the allocation to + */ +static int +hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring, + uint64_t size, uint32_t alignment) +{ + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + + snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand()); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, + RTE_PGSIZE_2M); + if (mz == NULL) + return -ENOMEM; + + ring->buf_size = size; + ring->desc = mz->addr; + ring->desc_dma_addr = mz->iova; + ring->zone = (const void *)mz; + hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64, + mz->name, ring->desc_dma_addr); + + return 0; +} + +static void +hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring) +{ + hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64, + ((const struct rte_memzone *)ring->zone)->name, + ring->desc_dma_addr); + rte_memzone_free((const struct rte_memzone *)ring->zone); + ring->buf_size = 0; + ring->desc = NULL; + ring->desc_dma_addr = 0; + ring->zone = NULL; +} + +static int +hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hns3_cmd_desc); + + if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) { + hns3_err(hw, "allocate dma mem failed"); + return -ENOMEM; + } + + return 0; +} + +static void +hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring) +{ + if (ring->desc) + hns3_free_dma_mem(hw, ring); +} + +static int +hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type) +{ + struct hns3_cmq_ring *ring = + (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; + int ret; + + ring->ring_type = ring_type; + ring->hw = hw; + + ret = hns3_alloc_cmd_desc(hw, ring); + if (ret) + hns3_err(hw, "descriptor %s alloc error %d", + (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret); + + return ret; +} + +void +hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read) +{ + desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); + if (is_read) + desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); + else + desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR); +} + +void +hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc, + enum hns3_opcode_type opcode, bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hns3_cmd_desc)); + desc->opcode = rte_cpu_to_le_16(opcode); + desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN); + + if (is_read) + desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR); +} + +static void +hns3_cmd_clear_regs(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0); + hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0); +} + +static void +hns3_cmd_config_regs(struct hns3_cmq_ring *ring) +{ + uint64_t dma = ring->desc_dma_addr; + + if (ring->ring_type == HNS3_TYPE_CSQ) { + hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG, + lower_32_bits(dma)); + hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG, + upper_32_bits(dma)); + hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG, + ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S | + HNS3_NIC_SW_RST_RDY); + hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0); + hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0); + } else { + hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG, + lower_32_bits(dma)); + hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG, + upper_32_bits(dma)); + hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG, + ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S); + hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0); + hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0); + } +} + +static void +hns3_cmd_init_regs(struct hns3_hw *hw) +{ + hns3_cmd_config_regs(&hw->cmq.csq); + hns3_cmd_config_regs(&hw->cmq.crq); +} + +static int +hns3_cmd_csq_clean(struct hns3_hw *hw) +{ + struct hns3_cmq_ring *csq = &hw->cmq.csq; + uint32_t head; + int clean; + + head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); + + if (!is_valid_csq_clean_head(csq, head)) { + hns3_err(hw, "wrong cmd head (%u, %u-%u)", head, + csq->next_to_use, csq->next_to_clean); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); + } + + return -EIO; + } + + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; + return clean; +} + +static int +hns3_cmd_csq_done(struct hns3_hw *hw) +{ + uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); + + return head == hw->cmq.csq.next_to_use; +} + +static bool +hns3_is_special_opcode(uint16_t opcode) +{ + /* + * These commands have several descriptors, + * and use the first one to save opcode and return value. + */ + uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT, + HNS3_OPC_STATS_32_BIT, + HNS3_OPC_STATS_MAC, + HNS3_OPC_STATS_MAC_ALL, + HNS3_OPC_QUERY_32_BIT_REG, + HNS3_OPC_QUERY_64_BIT_REG}; + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) + if (spec_opcode[i] == opcode) + return true; + + return false; +} + +static int +hns3_cmd_convert_err_code(uint16_t desc_ret) +{ + switch (desc_ret) { + case HNS3_CMD_EXEC_SUCCESS: + return 0; + case HNS3_CMD_NO_AUTH: + return -EPERM; + case HNS3_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HNS3_CMD_QUEUE_FULL: + return -EXFULL; + case HNS3_CMD_NEXT_ERR: + return -ENOSR; + case HNS3_CMD_UNEXE_ERR: + return -ENOTBLK; + case HNS3_CMD_PARA_ERR: + return -EINVAL; + case HNS3_CMD_RESULT_ERR: + return -ERANGE; + case HNS3_CMD_TIMEOUT: + return -ETIME; + case HNS3_CMD_HILINK_ERR: + return -ENOLINK; + case HNS3_CMD_QUEUE_ILLEGAL: + return -ENXIO; + case HNS3_CMD_INVALID: + return -EBADR; + default: + return -EREMOTEIO; + } +} + +static int +hns3_cmd_get_hardware_reply(struct hns3_hw *hw, + struct hns3_cmd_desc *desc, int num, int ntc) +{ + uint16_t opcode, desc_ret; + int current_ntc = ntc; + int handle; + + opcode = rte_le_to_cpu_16(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + /* Get the result of hardware write back */ + desc[handle] = hw->cmq.csq.desc[current_ntc]; + + current_ntc++; + if (current_ntc == hw->cmq.csq.desc_num) + current_ntc = 0; + } + + if (likely(!hns3_is_special_opcode(opcode))) + desc_ret = rte_le_to_cpu_16(desc[num - 1].retval); + else + desc_ret = rte_le_to_cpu_16(desc[0].retval); + + hw->cmq.last_status = desc_ret; + return hns3_cmd_convert_err_code(desc_ret); +} + +static int hns3_cmd_poll_reply(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint32_t timeout = 0; + + do { + if (hns3_cmd_csq_done(hw)) + return 0; + + if (rte_atomic16_read(&hw->reset.disable_cmd)) { + hns3_err(hw, + "Don't wait for reply because of disable_cmd"); + return -EBUSY; + } + + if (is_reset_pending(hns)) { + hns3_err(hw, "Don't wait for reply because of reset pending"); + return -EIO; + } + + rte_delay_us(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + hns3_err(hw, "Wait for reply timeout"); + return -ETIME; +} + +/* + * hns3_cmd_send - send command to command queue + * + * @param hw + * pointer to the hw struct + * @param desc + * prefilled descriptor for describing the command + * @param num + * the number of descriptors to be sent + * @return + * - -EBUSY if detect device is in resetting + * - -EIO if detect cmd csq corrupted (due to reset) or + * there is reset pending + * - -ENOMEM/-ETIME/...(Non-Zero) if other error case + * - Zero if operation completed successfully + * + * Note -BUSY/-EIO only used in reset case + * + * Note this is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int +hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) +{ + struct hns3_cmd_desc *desc_to_use; + int handle = 0; + int retval; + uint32_t ntc; + + if (rte_atomic16_read(&hw->reset.disable_cmd)) + return -EBUSY; + + rte_spinlock_lock(&hw->cmq.csq.lock); + + /* Clean the command send queue */ + retval = hns3_cmd_csq_clean(hw); + if (retval < 0) { + rte_spinlock_unlock(&hw->cmq.csq.lock); + return retval; + } + + if (num > hns3_ring_space(&hw->cmq.csq)) { + rte_spinlock_unlock(&hw->cmq.csq.lock); + return -ENOMEM; + } + + /* + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use); + + /* + * If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check. + */ + if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) { + retval = hns3_cmd_poll_reply(hw); + if (!retval) + retval = hns3_cmd_get_hardware_reply(hw, desc, num, + ntc); + } + + rte_spinlock_unlock(&hw->cmq.csq.lock); + return retval; +} + +static enum hns3_cmd_status +hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version) +{ + struct hns3_query_version_cmd *resp; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1); + resp = (struct hns3_query_version_cmd *)desc.data; + + /* Initialize the cmd function */ + ret = hns3_cmd_send(hw, &desc, 1); + if (ret == 0) + *version = rte_le_to_cpu_32(resp->firmware); + + return ret; +} + +int +hns3_cmd_init_queue(struct hns3_hw *hw) +{ + int ret; + + /* Setup the lock for command queue */ + rte_spinlock_init(&hw->cmq.csq.lock); + rte_spinlock_init(&hw->cmq.crq.lock); + + /* + * Clear up all command register, + * in case there are some residual values + */ + hns3_cmd_clear_regs(hw); + + /* Setup the queue entries for use cmd queue */ + hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM; + hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM; + + /* Setup Tx write back timeout */ + hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT; + + /* Setup queue rings */ + ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ); + if (ret) { + PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret); + return ret; + } + + ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ); + if (ret) { + PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret); + goto err_crq; + } + + return 0; + +err_crq: + hns3_free_cmd_desc(hw, &hw->cmq.csq); + + return ret; +} + +int +hns3_cmd_init(struct hns3_hw *hw) +{ + uint32_t version; + int ret; + + rte_spinlock_lock(&hw->cmq.csq.lock); + rte_spinlock_lock(&hw->cmq.crq.lock); + + hw->cmq.csq.next_to_clean = 0; + hw->cmq.csq.next_to_use = 0; + hw->cmq.crq.next_to_clean = 0; + hw->cmq.crq.next_to_use = 0; + hw->mbx_resp.head = 0; + hw->mbx_resp.tail = 0; + hw->mbx_resp.lost = 0; + hns3_cmd_init_regs(hw); + + rte_spinlock_unlock(&hw->cmq.crq.lock); + rte_spinlock_unlock(&hw->cmq.csq.lock); + + /* + * Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) { + PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd"); + ret = -EBUSY; + goto err_cmd_init; + } + rte_atomic16_clear(&hw->reset.disable_cmd); + + ret = hns3_cmd_query_firmware_version(hw, &version); + if (ret) { + PMD_INIT_LOG(ERR, "firmware version query failed %d", ret); + goto err_cmd_init; + } + + hw->fw_version = version; + PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); + + return 0; + +err_cmd_init: + rte_atomic16_set(&hw->reset.disable_cmd, 1); + return ret; +} + +static void +hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring) +{ + rte_spinlock_lock(&ring->lock); + + hns3_free_cmd_desc(hw, ring); + + rte_spinlock_unlock(&ring->lock); +} + +void +hns3_cmd_destroy_queue(struct hns3_hw *hw) +{ + hns3_destroy_queue(hw, &hw->cmq.csq); + hns3_destroy_queue(hw, &hw->cmq.crq); +} + +void +hns3_cmd_uninit(struct hns3_hw *hw) +{ + rte_spinlock_lock(&hw->cmq.csq.lock); + rte_spinlock_lock(&hw->cmq.crq.lock); + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_cmd_clear_regs(hw); + rte_spinlock_unlock(&hw->cmq.crq.lock); + rte_spinlock_unlock(&hw->cmq.csq.lock); +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.h b/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.h new file mode 100644 index 000000000..da770ac95 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_cmd.h @@ -0,0 +1,814 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_CMD_H_ +#define _HNS3_CMD_H_ + +#define HNS3_CMDQ_TX_TIMEOUT 30000 +#define HNS3_CMDQ_RX_INVLD_B 0 +#define HNS3_CMDQ_RX_OUTVLD_B 1 +#define HNS3_CMD_DESC_ALIGNMENT 4096 +#define HNS3_QUEUE_ID_MASK 0x1ff +#define HNS3_CMD_FLAG_NEXT BIT(2) + +struct hns3_hw; + +#define HNS3_CMD_DESC_DATA_NUM 6 +struct hns3_cmd_desc { + uint16_t opcode; + uint16_t flag; + uint16_t retval; + uint16_t rsv; + uint32_t data[HNS3_CMD_DESC_DATA_NUM]; +}; + +struct hns3_cmq_ring { + uint64_t desc_dma_addr; + struct hns3_cmd_desc *desc; + struct hns3_hw *hw; + + uint16_t buf_size; + uint16_t desc_num; /* max number of cmq descriptor */ + uint32_t next_to_use; + uint32_t next_to_clean; + uint8_t ring_type; /* cmq ring type */ + rte_spinlock_t lock; /* Command queue lock */ + + const void *zone; /* memory zone */ +}; + +enum hns3_cmd_return_status { + HNS3_CMD_EXEC_SUCCESS = 0, + HNS3_CMD_NO_AUTH = 1, + HNS3_CMD_NOT_SUPPORTED = 2, + HNS3_CMD_QUEUE_FULL = 3, + HNS3_CMD_NEXT_ERR = 4, + HNS3_CMD_UNEXE_ERR = 5, + HNS3_CMD_PARA_ERR = 6, + HNS3_CMD_RESULT_ERR = 7, + HNS3_CMD_TIMEOUT = 8, + HNS3_CMD_HILINK_ERR = 9, + HNS3_CMD_QUEUE_ILLEGAL = 10, + HNS3_CMD_INVALID = 11, +}; + +enum hns3_cmd_status { + HNS3_STATUS_SUCCESS = 0, + HNS3_ERR_CSQ_FULL = -1, + HNS3_ERR_CSQ_TIMEOUT = -2, + HNS3_ERR_CSQ_ERROR = -3, +}; + +struct hns3_misc_vector { + uint8_t *addr; + int vector_irq; +}; + +struct hns3_cmq { + struct hns3_cmq_ring csq; + struct hns3_cmq_ring crq; + uint16_t tx_timeout; + enum hns3_cmd_status last_status; +}; + +enum hns3_opcode_type { + /* Generic commands */ + HNS3_OPC_QUERY_FW_VER = 0x0001, + HNS3_OPC_CFG_RST_TRIGGER = 0x0020, + HNS3_OPC_GBL_RST_STATUS = 0x0021, + HNS3_OPC_QUERY_FUNC_STATUS = 0x0022, + HNS3_OPC_QUERY_PF_RSRC = 0x0023, + HNS3_OPC_QUERY_VF_RSRC = 0x0024, + HNS3_OPC_GET_CFG_PARAM = 0x0025, + HNS3_OPC_PF_RST_DONE = 0x0026, + + HNS3_OPC_STATS_64_BIT = 0x0030, + HNS3_OPC_STATS_32_BIT = 0x0031, + HNS3_OPC_STATS_MAC = 0x0032, + HNS3_OPC_QUERY_MAC_REG_NUM = 0x0033, + HNS3_OPC_STATS_MAC_ALL = 0x0034, + + HNS3_OPC_QUERY_REG_NUM = 0x0040, + HNS3_OPC_QUERY_32_BIT_REG = 0x0041, + HNS3_OPC_QUERY_64_BIT_REG = 0x0042, + + /* MAC command */ + HNS3_OPC_CONFIG_MAC_MODE = 0x0301, + HNS3_OPC_QUERY_LINK_STATUS = 0x0307, + HNS3_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HNS3_OPC_CONFIG_SPEED_DUP = 0x0309, + HNS3_MAC_COMMON_INT_EN = 0x030E, + + /* PFC/Pause commands */ + HNS3_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HNS3_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HNS3_OPC_CFG_MAC_PARA = 0x0703, + HNS3_OPC_CFG_PFC_PARA = 0x0704, + HNS3_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HNS3_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HNS3_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HNS3_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HNS3_OPC_PRI_TO_TC_MAPPING = 0x0709, + HNS3_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HNS3_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HNS3_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HNS3_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HNS3_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HNS3_OPC_TM_PORT_WEIGHT = 0x0808, + HNS3_OPC_TM_PG_WEIGHT = 0x0809, + HNS3_OPC_TM_QS_WEIGHT = 0x080A, + HNS3_OPC_TM_PRI_WEIGHT = 0x080B, + HNS3_OPC_TM_PRI_C_SHAPPING = 0x080C, + HNS3_OPC_TM_PRI_P_SHAPPING = 0x080D, + HNS3_OPC_TM_PG_C_SHAPPING = 0x080E, + HNS3_OPC_TM_PG_P_SHAPPING = 0x080F, + HNS3_OPC_TM_PORT_SHAPPING = 0x0810, + HNS3_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HNS3_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HNS3_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HNS3_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HNS3_OPC_ETS_TC_WEIGHT = 0x0843, + HNS3_OPC_QSET_DFX_STS = 0x0844, + HNS3_OPC_PRI_DFX_STS = 0x0845, + HNS3_OPC_PG_DFX_STS = 0x0846, + HNS3_OPC_PORT_DFX_STS = 0x0847, + HNS3_OPC_SCH_NQ_CNT = 0x0848, + HNS3_OPC_SCH_RQ_CNT = 0x0849, + HNS3_OPC_TM_INTERNAL_STS = 0x0850, + HNS3_OPC_TM_INTERNAL_CNT = 0x0851, + HNS3_OPC_TM_INTERNAL_STS_1 = 0x0852, + + /* Mailbox cmd */ + HNS3_OPC_MBX_VF_TO_PF = 0x2001, + + /* Packet buffer allocate commands */ + HNS3_OPC_TX_BUFF_ALLOC = 0x0901, + HNS3_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HNS3_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HNS3_OPC_RX_COM_THRD_ALLOC = 0x0904, + HNS3_OPC_RX_COM_WL_ALLOC = 0x0905, + + /* SSU module INT commands */ + HNS3_SSU_ECC_INT_CMD = 0x0989, + HNS3_SSU_COMMON_INT_CMD = 0x098C, + + /* TQP management command */ + HNS3_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP commands */ + HNS3_OPC_QUERY_TX_STATUS = 0x0B03, + HNS3_OPC_QUERY_RX_STATUS = 0x0B13, + HNS3_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HNS3_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* PPU module intr commands */ + HNS3_PPU_MPF_ECC_INT_CMD = 0x0B40, + HNS3_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HNS3_PPU_PF_OTHER_INT_CMD = 0x0B42, + + /* TSO command */ + HNS3_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HNS3_OPC_GRO_GENERIC_CONFIG = 0x0C10, + + /* RSS commands */ + HNS3_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HNS3_OPC_RSS_INPUT_TUPLE = 0x0D02, + HNS3_OPC_RSS_INDIR_TABLE = 0x0D07, + HNS3_OPC_RSS_TC_MODE = 0x0D08, + + /* Promisuous mode command */ + HNS3_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Vlan offload commands */ + HNS3_OPC_VLAN_PORT_TX_CFG = 0x0F01, + HNS3_OPC_VLAN_PORT_RX_CFG = 0x0F02, + + /* MAC commands */ + HNS3_OPC_MAC_VLAN_ADD = 0x1000, + HNS3_OPC_MAC_VLAN_REMOVE = 0x1001, + HNS3_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HNS3_OPC_MAC_VLAN_INSERT = 0x1003, + HNS3_OPC_MAC_VLAN_ALLOCATE = 0x1004, + HNS3_OPC_MAC_ETHTYPE_ADD = 0x1010, + + /* VLAN commands */ + HNS3_OPC_VLAN_FILTER_CTRL = 0x1100, + HNS3_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HNS3_OPC_VLAN_FILTER_VF_CFG = 0x1102, + + /* Flow Director command */ + HNS3_OPC_FD_MODE_CTRL = 0x1200, + HNS3_OPC_FD_GET_ALLOCATION = 0x1201, + HNS3_OPC_FD_KEY_CONFIG = 0x1202, + HNS3_OPC_FD_TCAM_OP = 0x1203, + HNS3_OPC_FD_AD_OP = 0x1204, + HNS3_OPC_FD_COUNTER_OP = 0x1205, + + /* SFP command */ + HNS3_OPC_SFP_GET_SPEED = 0x7104, + + /* Interrupts commands */ + HNS3_OPC_ADD_RING_TO_VECTOR = 0x1503, + HNS3_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* Error INT commands */ + HNS3_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HNS3_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + + /* PPP module intr commands */ + HNS3_PPP_CMD0_INT_CMD = 0x2100, + HNS3_PPP_CMD1_INT_CMD = 0x2101, +}; + +#define HNS3_CMD_FLAG_IN BIT(0) +#define HNS3_CMD_FLAG_OUT BIT(1) +#define HNS3_CMD_FLAG_NEXT BIT(2) +#define HNS3_CMD_FLAG_WR BIT(3) +#define HNS3_CMD_FLAG_NO_INTR BIT(4) +#define HNS3_CMD_FLAG_ERR_INTR BIT(5) + +#define HNS3_BUF_SIZE_UNIT 256 +#define HNS3_BUF_MUL_BY 2 +#define HNS3_BUF_DIV_BY 2 +#define NEED_RESERVE_TC_NUM 2 +#define BUF_MAX_PERCENT 100 +#define BUF_RESERVE_PERCENT 90 + +#define HNS3_MAX_TC_NUM 8 +#define HNS3_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HNS3_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +#define HNS3_TX_BUFF_RSV_NUM 8 +struct hns3_tx_buff_alloc_cmd { + uint16_t tx_pkt_buff[HNS3_MAX_TC_NUM]; + uint8_t tx_buff_rsv[HNS3_TX_BUFF_RSV_NUM]; +}; + +struct hns3_rx_priv_buff_cmd { + uint16_t buf_num[HNS3_MAX_TC_NUM]; + uint16_t shared_buf; + uint8_t rsv[6]; +}; + +#define HNS3_FW_VERSION_BYTE3_S 24 +#define HNS3_FW_VERSION_BYTE3_M GENMASK(31, 24) +#define HNS3_FW_VERSION_BYTE2_S 16 +#define HNS3_FW_VERSION_BYTE2_M GENMASK(23, 16) +#define HNS3_FW_VERSION_BYTE1_S 8 +#define HNS3_FW_VERSION_BYTE1_M GENMASK(15, 8) +#define HNS3_FW_VERSION_BYTE0_S 0 +#define HNS3_FW_VERSION_BYTE0_M GENMASK(7, 0) +struct hns3_query_version_cmd { + uint32_t firmware; + uint32_t firmware_rsv[5]; +}; + +#define HNS3_RX_PRIV_EN_B 15 +#define HNS3_TC_NUM_ONE_DESC 4 +struct hns3_priv_wl { + uint16_t high; + uint16_t low; +}; + +struct hns3_rx_priv_wl_buf { + struct hns3_priv_wl tc_wl[HNS3_TC_NUM_ONE_DESC]; +}; + +struct hns3_rx_com_thrd { + struct hns3_priv_wl com_thrd[HNS3_TC_NUM_ONE_DESC]; +}; + +struct hns3_rx_com_wl { + struct hns3_priv_wl com_wl; +}; + +struct hns3_waterline { + uint32_t low; + uint32_t high; +}; + +struct hns3_tc_thrd { + uint32_t low; + uint32_t high; +}; + +struct hns3_priv_buf { + struct hns3_waterline wl; /* Waterline for low and high */ + uint32_t buf_size; /* TC private buffer size */ + uint32_t tx_buf_size; + uint32_t enable; /* Enable TC private buffer or not */ +}; + +struct hns3_shared_buf { + struct hns3_waterline self; + struct hns3_tc_thrd tc_thrd[HNS3_MAX_TC_NUM]; + uint32_t buf_size; +}; + +struct hns3_pkt_buf_alloc { + struct hns3_priv_buf priv_buf[HNS3_MAX_TC_NUM]; + struct hns3_shared_buf s_buf; +}; + +#define HNS3_RX_COM_WL_EN_B 15 +struct hns3_rx_com_wl_buf_cmd { + uint16_t high_wl; + uint16_t low_wl; + uint8_t rsv[20]; +}; + +#define HNS3_RX_PKT_EN_B 15 +struct hns3_rx_pkt_buf_cmd { + uint16_t high_pkt; + uint16_t low_pkt; + uint8_t rsv[20]; +}; + +#define HNS3_PF_STATE_DONE_B 0 +#define HNS3_PF_STATE_MAIN_B 1 +#define HNS3_PF_STATE_BOND_B 2 +#define HNS3_PF_STATE_MAC_N_B 6 +#define HNS3_PF_MAC_NUM_MASK 0x3 +#define HNS3_PF_STATE_MAIN BIT(HNS3_PF_STATE_MAIN_B) +#define HNS3_PF_STATE_DONE BIT(HNS3_PF_STATE_DONE_B) +#define HNS3_VF_RST_STATE_NUM 4 +struct hns3_func_status_cmd { + uint32_t vf_rst_state[HNS3_VF_RST_STATE_NUM]; + uint8_t pf_state; + uint8_t mac_id; + uint8_t rsv1; + uint8_t pf_cnt_in_mac; + uint8_t pf_num; + uint8_t vf_num; + uint8_t rsv[2]; +}; + +#define HNS3_VEC_NUM_S 0 +#define HNS3_VEC_NUM_M GENMASK(7, 0) +#define HNS3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */ +struct hns3_pf_res_cmd { + uint16_t tqp_num; + uint16_t buf_size; + uint16_t msixcap_localid_ba_nic; + uint16_t msixcap_localid_ba_rocee; + uint16_t pf_intr_vector_number; + uint16_t pf_own_fun_number; + uint16_t tx_buf_size; + uint16_t dv_buf_size; + uint32_t rsv[2]; +}; + +struct hns3_vf_res_cmd { + uint16_t tqp_num; + uint16_t reserved; + uint16_t msixcap_localid_ba_nic; + uint16_t msixcap_localid_ba_rocee; + uint16_t vf_intr_vector_number; + uint16_t rsv[7]; +}; + +#define HNS3_UMV_SPC_ALC_B 0 +struct hns3_umv_spc_alc_cmd { + uint8_t allocate; + uint8_t rsv1[3]; + uint32_t space_size; + uint8_t rsv2[16]; +}; + +#define HNS3_CFG_OFFSET_S 0 +#define HNS3_CFG_OFFSET_M GENMASK(19, 0) +#define HNS3_CFG_RD_LEN_S 24 +#define HNS3_CFG_RD_LEN_M GENMASK(27, 24) +#define HNS3_CFG_RD_LEN_BYTES 16 +#define HNS3_CFG_RD_LEN_UNIT 4 + +#define HNS3_CFG_VMDQ_S 0 +#define HNS3_CFG_VMDQ_M GENMASK(7, 0) +#define HNS3_CFG_TC_NUM_S 8 +#define HNS3_CFG_TC_NUM_M GENMASK(15, 8) +#define HNS3_CFG_TQP_DESC_N_S 16 +#define HNS3_CFG_TQP_DESC_N_M GENMASK(31, 16) +#define HNS3_CFG_PHY_ADDR_S 0 +#define HNS3_CFG_PHY_ADDR_M GENMASK(7, 0) +#define HNS3_CFG_MEDIA_TP_S 8 +#define HNS3_CFG_MEDIA_TP_M GENMASK(15, 8) +#define HNS3_CFG_RX_BUF_LEN_S 16 +#define HNS3_CFG_RX_BUF_LEN_M GENMASK(31, 16) +#define HNS3_CFG_MAC_ADDR_H_S 0 +#define HNS3_CFG_MAC_ADDR_H_M GENMASK(15, 0) +#define HNS3_CFG_DEFAULT_SPEED_S 16 +#define HNS3_CFG_DEFAULT_SPEED_M GENMASK(23, 16) +#define HNS3_CFG_RSS_SIZE_S 24 +#define HNS3_CFG_RSS_SIZE_M GENMASK(31, 24) +#define HNS3_CFG_SPEED_ABILITY_S 0 +#define HNS3_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HNS3_CFG_UMV_TBL_SPACE_S 16 +#define HNS3_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) + +#define HNS3_ACCEPT_TAG1_B 0 +#define HNS3_ACCEPT_UNTAG1_B 1 +#define HNS3_PORT_INS_TAG1_EN_B 2 +#define HNS3_PORT_INS_TAG2_EN_B 3 +#define HNS3_CFG_NIC_ROCE_SEL_B 4 +#define HNS3_ACCEPT_TAG2_B 5 +#define HNS3_ACCEPT_UNTAG2_B 6 + +#define HNS3_REM_TAG1_EN_B 0 +#define HNS3_REM_TAG2_EN_B 1 +#define HNS3_SHOW_TAG1_EN_B 2 +#define HNS3_SHOW_TAG2_EN_B 3 + +/* Factor used to calculate offset and bitmap of VF num */ +#define HNS3_VF_NUM_PER_CMD 64 +#define HNS3_VF_NUM_PER_BYTE 8 + +struct hns3_cfg_param_cmd { + uint32_t offset; + uint32_t rsv; + uint32_t param[4]; +}; + +#define HNS3_VPORT_VTAG_RX_CFG_CMD_VF_BITMAP_NUM 8 +struct hns3_vport_vtag_rx_cfg_cmd { + uint8_t vport_vlan_cfg; + uint8_t vf_offset; + uint8_t rsv1[6]; + uint8_t vf_bitmap[HNS3_VPORT_VTAG_RX_CFG_CMD_VF_BITMAP_NUM]; + uint8_t rsv2[8]; +}; + +struct hns3_vport_vtag_tx_cfg_cmd { + uint8_t vport_vlan_cfg; + uint8_t vf_offset; + uint8_t rsv1[2]; + uint16_t def_vlan_tag1; + uint16_t def_vlan_tag2; + uint8_t vf_bitmap[8]; + uint8_t rsv2[8]; +}; + + +struct hns3_vlan_filter_ctrl_cmd { + uint8_t vlan_type; + uint8_t vlan_fe; + uint8_t rsv1[2]; + uint8_t vf_id; + uint8_t rsv2[19]; +}; + +#define HNS3_VLAN_OFFSET_BITMAP_NUM 20 +struct hns3_vlan_filter_pf_cfg_cmd { + uint8_t vlan_offset; + uint8_t vlan_cfg; + uint8_t rsv[2]; + uint8_t vlan_offset_bitmap[HNS3_VLAN_OFFSET_BITMAP_NUM]; +}; + +#define HNS3_VLAN_FILTER_VF_CFG_CMD_VF_BITMAP_NUM 16 +struct hns3_vlan_filter_vf_cfg_cmd { + uint16_t vlan_id; + uint8_t resp_code; + uint8_t rsv; + uint8_t vlan_cfg; + uint8_t rsv1[3]; + uint8_t vf_bitmap[HNS3_VLAN_FILTER_VF_CFG_CMD_VF_BITMAP_NUM]; +}; + +struct hns3_tx_vlan_type_cfg_cmd { + uint16_t ot_vlan_type; + uint16_t in_vlan_type; + uint8_t rsv[20]; +}; + +struct hns3_rx_vlan_type_cfg_cmd { + uint16_t ot_fst_vlan_type; + uint16_t ot_sec_vlan_type; + uint16_t in_fst_vlan_type; + uint16_t in_sec_vlan_type; + uint8_t rsv[16]; +}; + +#define HNS3_TSO_MSS_MIN_S 0 +#define HNS3_TSO_MSS_MIN_M GENMASK(13, 0) + +#define HNS3_TSO_MSS_MAX_S 16 +#define HNS3_TSO_MSS_MAX_M GENMASK(29, 16) + +struct hns3_cfg_tso_status_cmd { + rte_le16_t tso_mss_min; + rte_le16_t tso_mss_max; + uint8_t rsv[20]; +}; + +#define HNS3_GRO_EN_B 0 +struct hns3_cfg_gro_status_cmd { + rte_le16_t gro_en; + uint8_t rsv[22]; +}; + +#define HNS3_TSO_MSS_MIN 256 +#define HNS3_TSO_MSS_MAX 9668 + +#define HNS3_RSS_HASH_KEY_OFFSET_B 4 + +#define HNS3_RSS_CFG_TBL_SIZE 16 +#define HNS3_RSS_HASH_KEY_NUM 16 +/* Configure the algorithm mode and Hash Key, opcode:0x0D01 */ +struct hns3_rss_generic_config_cmd { + /* Hash_algorithm(8.0~8.3), hash_key_offset(8.4~8.7) */ + uint8_t hash_config; + uint8_t rsv[7]; + uint8_t hash_key[HNS3_RSS_HASH_KEY_NUM]; +}; + +/* Configure the tuple selection for RSS hash input, opcode:0x0D02 */ +struct hns3_rss_input_tuple_cmd { + uint8_t ipv4_tcp_en; + uint8_t ipv4_udp_en; + uint8_t ipv4_sctp_en; + uint8_t ipv4_fragment_en; + uint8_t ipv6_tcp_en; + uint8_t ipv6_udp_en; + uint8_t ipv6_sctp_en; + uint8_t ipv6_fragment_en; + uint8_t rsv[16]; +}; + +#define HNS3_RSS_CFG_TBL_SIZE 16 + +/* Configure the indirection table, opcode:0x0D07 */ +struct hns3_rss_indirection_table_cmd { + uint16_t start_table_index; /* Bit3~0 must be 0x0. */ + uint16_t rss_set_bitmap; + uint8_t rsv[4]; + uint8_t rss_result[HNS3_RSS_CFG_TBL_SIZE]; +}; + +#define HNS3_RSS_TC_OFFSET_S 0 +#define HNS3_RSS_TC_OFFSET_M (0x3ff << HNS3_RSS_TC_OFFSET_S) +#define HNS3_RSS_TC_SIZE_S 12 +#define HNS3_RSS_TC_SIZE_M (0x7 << HNS3_RSS_TC_SIZE_S) +#define HNS3_RSS_TC_VALID_B 15 + +/* Configure the tc_size and tc_offset, opcode:0x0D08 */ +struct hns3_rss_tc_mode_cmd { + uint16_t rss_tc_mode[HNS3_MAX_TC_NUM]; + uint8_t rsv[8]; +}; + +#define HNS3_LINK_STATUS_UP_B 0 +#define HNS3_LINK_STATUS_UP_M BIT(HNS3_LINK_STATUS_UP_B) +struct hns3_link_status_cmd { + uint8_t status; + uint8_t rsv[23]; +}; + +struct hns3_promisc_param { + uint8_t vf_id; + uint8_t enable; +}; + +#define HNS3_PROMISC_TX_EN_B BIT(4) +#define HNS3_PROMISC_RX_EN_B BIT(5) +#define HNS3_PROMISC_EN_B 1 +#define HNS3_PROMISC_EN_ALL 0x7 +#define HNS3_PROMISC_EN_UC 0x1 +#define HNS3_PROMISC_EN_MC 0x2 +#define HNS3_PROMISC_EN_BC 0x4 +struct hns3_promisc_cfg_cmd { + uint8_t flag; + uint8_t vf_id; + uint16_t rsv0; + uint8_t rsv1[20]; +}; + +enum hns3_promisc_type { + HNS3_UNICAST = 1, + HNS3_MULTICAST = 2, + HNS3_BROADCAST = 3, +}; + +#define HNS3_MAC_TX_EN_B 6 +#define HNS3_MAC_RX_EN_B 7 +#define HNS3_MAC_PAD_TX_B 11 +#define HNS3_MAC_PAD_RX_B 12 +#define HNS3_MAC_1588_TX_B 13 +#define HNS3_MAC_1588_RX_B 14 +#define HNS3_MAC_APP_LP_B 15 +#define HNS3_MAC_LINE_LP_B 16 +#define HNS3_MAC_FCS_TX_B 17 +#define HNS3_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HNS3_MAC_RX_FCS_STRIP_B 19 +#define HNS3_MAC_RX_FCS_B 20 +#define HNS3_MAC_TX_UNDER_MIN_ERR_B 21 +#define HNS3_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hns3_config_mac_mode_cmd { + uint32_t txrx_pad_fcs_loop_en; + uint8_t rsv[20]; +}; + +#define HNS3_CFG_SPEED_10M 6 +#define HNS3_CFG_SPEED_100M 7 +#define HNS3_CFG_SPEED_1G 0 +#define HNS3_CFG_SPEED_10G 1 +#define HNS3_CFG_SPEED_25G 2 +#define HNS3_CFG_SPEED_40G 3 +#define HNS3_CFG_SPEED_50G 4 +#define HNS3_CFG_SPEED_100G 5 + +#define HNS3_CFG_SPEED_S 0 +#define HNS3_CFG_SPEED_M GENMASK(5, 0) +#define HNS3_CFG_DUPLEX_B 7 +#define HNS3_CFG_DUPLEX_M BIT(HNS3_CFG_DUPLEX_B) + +#define HNS3_CFG_MAC_SPEED_CHANGE_EN_B 0 + +struct hns3_config_mac_speed_dup_cmd { + uint8_t speed_dup; + uint8_t mac_change_fec_en; + uint8_t rsv[22]; +}; + +#define HNS3_RING_ID_MASK GENMASK(9, 0) +#define HNS3_TQP_ENABLE_B 0 + +#define HNS3_MAC_CFG_AN_EN_B 0 +#define HNS3_MAC_CFG_AN_INT_EN_B 1 +#define HNS3_MAC_CFG_AN_INT_MSK_B 2 +#define HNS3_MAC_CFG_AN_INT_CLR_B 3 +#define HNS3_MAC_CFG_AN_RST_B 4 + +#define HNS3_MAC_CFG_AN_EN BIT(HNS3_MAC_CFG_AN_EN_B) + +struct hns3_config_auto_neg_cmd { + uint32_t cfg_an_cmd_flag; + uint8_t rsv[20]; +}; + +struct hns3_sfp_speed_cmd { + uint32_t sfp_speed; + uint32_t rsv[5]; +}; + +#define HNS3_MAC_MGR_MASK_VLAN_B BIT(0) +#define HNS3_MAC_MGR_MASK_MAC_B BIT(1) +#define HNS3_MAC_MGR_MASK_ETHERTYPE_B BIT(2) +#define HNS3_MAC_ETHERTYPE_LLDP 0x88cc + +struct hns3_mac_mgr_tbl_entry_cmd { + uint8_t flags; + uint8_t resp_code; + uint16_t vlan_tag; + uint32_t mac_addr_hi32; + uint16_t mac_addr_lo16; + uint16_t rsv1; + uint16_t ethter_type; + uint16_t egress_port; + uint16_t egress_queue; + uint8_t sw_port_id_aware; + uint8_t rsv2; + uint8_t i_port_bitmap; + uint8_t i_port_direction; + uint8_t rsv3[2]; +}; + +struct hns3_cfg_com_tqp_queue_cmd { + uint16_t tqp_id; + uint16_t stream_id; + uint8_t enable; + uint8_t rsv[19]; +}; + +#define HNS3_TQP_MAP_TYPE_PF 0 +#define HNS3_TQP_MAP_TYPE_VF 1 +#define HNS3_TQP_MAP_TYPE_B 0 +#define HNS3_TQP_MAP_EN_B 1 + +struct hns3_tqp_map_cmd { + uint16_t tqp_id; /* Absolute tqp id for in this pf */ + uint8_t tqp_vf; /* VF id */ + uint8_t tqp_flag; /* Indicate it's pf or vf tqp */ + uint16_t tqp_vid; /* Virtual id in this pf/vf */ + uint8_t rsv[18]; +}; + +enum hns3_ring_type { + HNS3_RING_TYPE_TX, + HNS3_RING_TYPE_RX +}; + +enum hns3_int_gl_idx { + HNS3_RING_GL_RX, + HNS3_RING_GL_TX, + HNS3_RING_GL_IMMEDIATE = 3 +}; + +#define HNS3_RING_GL_IDX_S 0 +#define HNS3_RING_GL_IDX_M GENMASK(1, 0) + +#define HNS3_VECTOR_ELEMENTS_PER_CMD 10 + +#define HNS3_INT_TYPE_S 0 +#define HNS3_INT_TYPE_M GENMASK(1, 0) +#define HNS3_TQP_ID_S 2 +#define HNS3_TQP_ID_M GENMASK(12, 2) +#define HNS3_INT_GL_IDX_S 13 +#define HNS3_INT_GL_IDX_M GENMASK(14, 13) +struct hns3_ctrl_vector_chain_cmd { + uint8_t int_vector_id; + uint8_t int_cause_num; + uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD]; + uint8_t vfid; + uint8_t rsv; +}; + +struct hns3_config_max_frm_size_cmd { + uint16_t max_frm_size; + uint8_t min_frm_size; + uint8_t rsv[21]; +}; + +enum hns3_mac_vlan_tbl_opcode { + HNS3_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HNS3_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HNS3_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HNS3_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +enum hns3_mac_vlan_add_resp_code { + HNS3_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */ + HNS3_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ +}; + +#define HNS3_MC_MAC_VLAN_ADD_DESC_NUM 3 + +#define HNS3_MAC_VLAN_BIT0_EN_B 0 +#define HNS3_MAC_VLAN_BIT1_EN_B 1 +#define HNS3_MAC_EPORT_SW_EN_B 12 +#define HNS3_MAC_EPORT_TYPE_B 11 +#define HNS3_MAC_EPORT_VFID_S 3 +#define HNS3_MAC_EPORT_VFID_M GENMASK(10, 3) +#define HNS3_MAC_EPORT_PFID_S 0 +#define HNS3_MAC_EPORT_PFID_M GENMASK(2, 0) +struct hns3_mac_vlan_tbl_entry_cmd { + uint8_t flags; + uint8_t resp_code; + uint16_t vlan_tag; + uint32_t mac_addr_hi32; + uint16_t mac_addr_lo16; + uint16_t rsv1; + uint8_t entry_type; + uint8_t mc_mac_en; + uint16_t egress_port; + uint16_t egress_queue; + uint8_t rsv2[6]; +}; + +#define HNS3_TQP_RESET_B 0 +struct hns3_reset_tqp_queue_cmd { + uint16_t tqp_id; + uint8_t reset_req; + uint8_t ready_to_reset; + uint8_t rsv[20]; +}; + +#define HNS3_CFG_RESET_MAC_B 3 +#define HNS3_CFG_RESET_FUNC_B 7 +struct hns3_reset_cmd { + uint8_t mac_func_reset; + uint8_t fun_reset_vfid; + uint8_t rsv[22]; +}; + +#define HNS3_MAX_TQP_NUM_PER_FUNC 64 +#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HNS3_DEFAULT_DV 0xA000 /* 40k byte */ +#define HNS3_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ +#define HNS3_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */ + +#define HNS3_TYPE_CRQ 0 +#define HNS3_TYPE_CSQ 1 + +#define HNS3_NIC_SW_RST_RDY_B 16 +#define HNS3_NIC_SW_RST_RDY BIT(HNS3_NIC_SW_RST_RDY_B) +#define HNS3_NIC_CMQ_DESC_NUM 1024 +#define HNS3_NIC_CMQ_DESC_NUM_S 3 + +#define HNS3_CMD_SEND_SYNC(flag) \ + ((flag) & HNS3_CMD_FLAG_NO_INTR) + +void hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read); +void hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc, + enum hns3_opcode_type opcode, bool is_read); +int hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num); +int hns3_cmd_init_queue(struct hns3_hw *hw); +int hns3_cmd_init(struct hns3_hw *hw); +void hns3_cmd_destroy_queue(struct hns3_hw *hw); +void hns3_cmd_uninit(struct hns3_hw *hw); + +#endif /* _HNS3_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.c b/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.c new file mode 100644 index 000000000..02628b6b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.c @@ -0,0 +1,1690 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_logs.h" +#include "hns3_regs.h" +#include "hns3_ethdev.h" +#include "hns3_dcb.h" + +#define HNS3_SHAPER_BS_U_DEF 5 +#define HNS3_SHAPER_BS_S_DEF 20 +#define BW_MAX_PERCENT 100 +#define HNS3_ETHER_MAX_RATE 100000 + +/* + * hns3_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @shaper_para: shaper parameter of IR shaper + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int +hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level, + struct hns3_shaper_parameter *shaper_para) +{ +#define SHAPER_DEFAULT_IR_B 126 +#define DIVISOR_CLK (1000 * 8) +#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) + + const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + uint8_t ir_u_calc = 0; + uint8_t ir_s_calc = 0; + uint32_t denominator; + uint32_t ir_calc; + uint32_t tick; + + /* Calc tick */ + if (shaper_level >= HNS3_SHAPER_LVL_CNT) { + hns3_err(hw, + "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)", + shaper_level, HNS3_SHAPER_LVL_CNT); + return -EINVAL; + } + + if (ir > HNS3_ETHER_MAX_RATE) { + hns3_err(hw, "rate(%d) exceeds the rate driver supported " + "HNS3_ETHER_MAX_RATE(%d)", ir, HNS3_ETHER_MAX_RATE); + return -EINVAL; + } + + tick = tick_array[shaper_level]; + + /* + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + shaper_para->ir_b = SHAPER_DEFAULT_IR_B; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + do { + ir_s_calc++; + ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); + } while (ir_calc > ir); + + if (ir_calc == ir) + shaper_para->ir_b = SHAPER_DEFAULT_IR_B; + else + shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) + + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; + } else { + /* + * Increasing the numerator to select ir_u value. ir_u_calc will + * get maximum value when ir_calc is minimum and ir is maximum. + * ir_calc gets minimum value when tick is the maximum value. + * At the same time, value of ir_u_calc can only be increased up + * to eight after the while loop if the value of ir is equal + * to HNS3_ETHER_MAX_RATE. + */ + uint32_t numerator; + do { + ir_u_calc++; + numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } while (ir_calc < ir); + + if (ir_calc == ir) { + shaper_para->ir_b = SHAPER_DEFAULT_IR_B; + } else { + --ir_u_calc; + + /* + * The maximum value of ir_u_calc in this branch is + * seven in all cases. Thus, value of denominator can + * not be zero here. + */ + denominator = DIVISOR_CLK * (1 << ir_u_calc); + shaper_para->ir_b = + (ir * tick + (denominator >> 1)) / denominator; + } + } + + shaper_para->ir_u = ir_u_calc; + shaper_para->ir_s = ir_s_calc; + + return 0; +} + +static int +hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id) +{ +#define HNS3_HALF_BYTE_BIT_OFFSET 4 + uint8_t tc = hw->dcb_info.prio_tc[pri_id]; + + if (tc >= hw->dcb_info.num_tc) + return -EINVAL; + + /* + * The register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET); + + return 0; +} + +static int +hns3_up_to_tc_map(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + uint8_t *pri = (uint8_t *)desc.data; + uint8_t pri_id; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) { + ret = hns3_fill_pri_array(hw, pri, pri_id); + if (ret) + return ret; + } + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map) +{ + struct hns3_pg_to_pri_link_cmd *map; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hns3_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_pg_to_pri_map(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_pg_info *pg_info; + int ret, i; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + for (i = 0; i < hw->dcb_info.num_pg; i++) { + /* Cfg pg to priority mapping */ + pg_info = &hw->dcb_info.pg_info[i]; + ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri) +{ + struct hns3_qs_to_pri_link_cmd *map; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hns3_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = rte_cpu_to_le_16(qs_id); + map->priority = pri; + map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr) +{ + struct hns3_qs_weight_cmd *weight; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false); + + weight = (struct hns3_qs_weight_cmd *)desc.data; + + weight->qs_id = rte_cpu_to_le_16(qs_id); + weight->dwrr = dwrr; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw) +{ +#define DEFAULT_TC_WEIGHT 1 +#define DEFAULT_TC_OFFSET 14 + struct hns3_ets_tc_weight_cmd *ets_weight; + struct hns3_cmd_desc desc; + uint8_t i; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + struct hns3_pg_info *pg_info; + + ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; + + if (!(hw->hw_tc_map & BIT(i))) + continue; + + pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr) +{ + struct hns3_priority_weight_cmd *weight; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hns3_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr) +{ + struct hns3_pg_weight_cmd *weight; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false); + + weight = (struct hns3_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hns3_cmd_send(hw, &desc, 1); +} +static int +hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR) + desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = rte_cpu_to_le_32(pg_id); + + return hns3_cmd_send(hw, &desc, 1); +} + +static uint32_t +hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s, + uint8_t bs_b, uint8_t bs_s) +{ + uint32_t shapping_para = 0; + + hns3_dcb_set_field(shapping_para, IR_B, ir_b); + hns3_dcb_set_field(shapping_para, IR_U, ir_u); + hns3_dcb_set_field(shapping_para, IR_S, ir_s); + hns3_dcb_set_field(shapping_para, BS_B, bs_b); + hns3_dcb_set_field(shapping_para, BS_S, bs_s); + + return shapping_para; +} + +static int +hns3_dcb_port_shaper_cfg(struct hns3_hw *hw) +{ + struct hns3_port_shapping_cmd *shap_cfg_cmd; + struct hns3_shaper_parameter shaper_parameter; + uint32_t shapping_para; + uint32_t ir_u, ir_b, ir_s; + struct hns3_cmd_desc desc; + int ret; + + ret = hns3_shaper_para_calc(hw, hw->mac.link_speed, + HNS3_SHAPER_LVL_PORT, &shaper_parameter); + if (ret) { + hns3_err(hw, "calculate shaper parameter failed: %d", ret); + return ret; + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false); + shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data; + + ir_b = shaper_parameter.ir_b; + ir_u = shaper_parameter.ir_u; + ir_s = shaper_parameter.ir_s; + shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s, + HNS3_SHAPER_BS_U_DEF, + HNS3_SHAPER_BS_S_DEF); + + shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket, + uint8_t pg_id, uint32_t shapping_para) +{ + struct hns3_pg_shapping_cmd *shap_cfg_cmd; + enum hns3_opcode_type opcode; + struct hns3_cmd_desc desc; + + opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING : + HNS3_OPC_TM_PG_C_SHAPPING; + hns3_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_shaper_parameter shaper_parameter; + struct hns3_pf *pf = &hns->pf; + uint32_t ir_u, ir_b, ir_s; + uint32_t shaper_para; + uint8_t i; + int ret; + + /* Cfg pg schd */ + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + /* Pg to pri */ + for (i = 0; i < hw->dcb_info.num_pg; i++) { + /* Calc shaper para */ + ret = hns3_shaper_para_calc(hw, + hw->dcb_info.pg_info[i].bw_limit, + HNS3_SHAPER_LVL_PG, + &shaper_parameter); + if (ret) { + hns3_err(hw, "calculate shaper parameter failed: %d", + ret); + return ret; + } + + shaper_para = hns3_dcb_get_shapping_para(0, 0, 0, + HNS3_SHAPER_BS_U_DEF, + HNS3_SHAPER_BS_S_DEF); + + ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i, + shaper_para); + if (ret) { + hns3_err(hw, + "config PG CIR shaper parameter failed: %d", + ret); + return ret; + } + + ir_b = shaper_parameter.ir_b; + ir_u = shaper_parameter.ir_u; + ir_s = shaper_parameter.ir_s; + shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s, + HNS3_SHAPER_BS_U_DEF, + HNS3_SHAPER_BS_S_DEF); + + ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i, + shaper_para); + if (ret) { + hns3_err(hw, + "config PG PIR shaper parameter failed: %d", + ret); + return ret; + } + } + + return 0; +} + +static int +hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false); + + if (mode == HNS3_SCH_MODE_DWRR) + desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = rte_cpu_to_le_32(qs_id); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR) + desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = rte_cpu_to_le_32(pri_id); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket, + uint8_t pri_id, uint32_t shapping_para) +{ + struct hns3_pri_shapping_cmd *shap_cfg_cmd; + enum hns3_opcode_type opcode; + struct hns3_cmd_desc desc; + + opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING : + HNS3_OPC_TM_PRI_C_SHAPPING; + + hns3_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw) +{ + struct hns3_shaper_parameter shaper_parameter; + uint32_t ir_u, ir_b, ir_s; + uint32_t shaper_para; + int ret, i; + + for (i = 0; i < hw->dcb_info.num_tc; i++) { + ret = hns3_shaper_para_calc(hw, + hw->dcb_info.tc_info[i].bw_limit, + HNS3_SHAPER_LVL_PRI, + &shaper_parameter); + if (ret) { + hns3_err(hw, "calculate shaper parameter failed: %d", + ret); + return ret; + } + + shaper_para = hns3_dcb_get_shapping_para(0, 0, 0, + HNS3_SHAPER_BS_U_DEF, + HNS3_SHAPER_BS_S_DEF); + + ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i, + shaper_para); + if (ret) { + hns3_err(hw, + "config priority CIR shaper parameter failed: %d", + ret); + return ret; + } + + ir_b = shaper_parameter.ir_b; + ir_u = shaper_parameter.ir_u; + ir_s = shaper_parameter.ir_s; + shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s, + HNS3_SHAPER_BS_U_DEF, + HNS3_SHAPER_BS_S_DEF); + + ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i, + shaper_para); + if (ret) { + hns3_err(hw, + "config priority PIR shaper parameter failed: %d", + ret); + return ret; + } + } + + return 0; +} + + +static int +hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + ret = hns3_dcb_pri_tc_base_shaper_cfg(hw); + if (ret) + hns3_err(hw, "config port shaper failed: %d", ret); + + return ret; +} + +void +hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q) +{ + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t rx_qnum_per_tc; + int i; + + rx_qnum_per_tc = nb_rx_q / hw->num_tc; + rx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc); + if (hw->alloc_rss_size != rx_qnum_per_tc) { + hns3_info(hw, "rss size changes from %u to %u", + hw->alloc_rss_size, rx_qnum_per_tc); + hw->alloc_rss_size = rx_qnum_per_tc; + } + hw->used_rx_queues = hw->num_tc * hw->alloc_rss_size; + + /* + * When rss size is changed, we need to update rss redirection table + * maintained by driver. Besides, during the entire reset process, we + * need to ensure that the rss table information are not overwritten + * and configured directly to the hardware in the RESET_STAGE_RESTORE + * stage of the reset process. + */ + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = + i % hw->alloc_rss_size; + } +} + +void +hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue) +{ + struct hns3_tc_queue_info *tc_queue; + uint8_t i; + + hw->tx_qnum_per_tc = nb_queue / hw->num_tc; + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + tc_queue = &hw->tc_queue[i]; + if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) { + tc_queue->enable = true; + tc_queue->tqp_offset = i * hw->tx_qnum_per_tc; + tc_queue->tqp_count = hw->tx_qnum_per_tc; + tc_queue->tc = i; + } else { + /* Set to default queue if TC is disable */ + tc_queue->enable = false; + tc_queue->tqp_offset = 0; + tc_queue->tqp_count = 0; + tc_queue->tc = 0; + } + } + hw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc; +} + +static void +hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, + uint16_t nb_tx_q) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + + hw->num_tc = hw->dcb_info.num_tc; + hns3_set_rss_size(hw, nb_rx_q); + hns3_tc_queue_mapping_cfg(hw, nb_tx_q); + + if (!hns->is_vf) + memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO); +} + +int +hns3_dcb_info_init(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int i, k; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE && + hw->dcb_info.num_pg != 1) + return -EINVAL; + + /* Initializing PG information */ + memset(hw->dcb_info.pg_info, 0, + sizeof(struct hns3_pg_info) * HNS3_PG_NUM); + for (i = 0; i < hw->dcb_info.num_pg; i++) { + hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT; + hw->dcb_info.pg_info[i].pg_id = i; + hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR; + hw->dcb_info.pg_info[i].bw_limit = HNS3_ETHER_MAX_RATE; + + if (i != 0) + continue; + + hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map; + for (k = 0; k < hw->dcb_info.num_tc; k++) + hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT; + } + + /* All UPs mapping to TC0 */ + for (i = 0; i < HNS3_MAX_USER_PRIO; i++) + hw->dcb_info.prio_tc[i] = 0; + + /* Initializing tc information */ + memset(hw->dcb_info.tc_info, 0, + sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM); + for (i = 0; i < hw->dcb_info.num_tc; i++) { + hw->dcb_info.tc_info[i].tc_id = i; + hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR; + hw->dcb_info.tc_info[i].pgid = 0; + hw->dcb_info.tc_info[i].bw_limit = + hw->dcb_info.pg_info[0].bw_limit; + } + + return 0; +} + +static int +hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret, i; + + /* Only being config on TC-Based scheduler mode */ + if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE) + return -EINVAL; + + for (i = 0; i < hw->dcb_info.num_pg; i++) { + ret = hns3_dcb_pg_schd_mode_cfg(hw, i); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint8_t i; + int ret; + + if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hw->dcb_info.num_tc; i++) { + ret = hns3_dcb_pri_schd_mode_cfg(hw, i); + if (ret) + return ret; + + ret = hns3_dcb_qs_schd_mode_cfg(hw, i, + HNS3_SCH_MODE_DWRR); + if (ret) + return ret; + } + } + + return 0; +} + +static int +hns3_dcb_schd_mode_cfg(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_dcb_lvl2_schd_mode_cfg(hw); + if (ret) { + hns3_err(hw, "config lvl2_schd_mode failed: %d", ret); + return ret; + } + + ret = hns3_dcb_lvl34_schd_mode_cfg(hw); + if (ret) + hns3_err(hw, "config lvl34_schd_mode failed: %d", ret); + + return ret; +} + +static int +hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw) +{ + struct hns3_pg_info *pg_info; + uint8_t dwrr; + int ret, i; + + for (i = 0; i < hw->dcb_info.num_tc; i++) { + pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr); + if (ret) { + hns3_err(hw, + "fail to send priority weight cmd: %d, ret = %d", + i, ret); + return ret; + } + + ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT); + if (ret) { + hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d", + i, ret); + return ret; + } + } + + return 0; +} + +static int +hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t version; + int ret; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw); + if (ret) + return ret; + + if (!hns3_dev_dcb_supported(hw)) + return 0; + + ret = hns3_dcb_ets_tc_dwrr_cfg(hw); + if (ret == -EOPNOTSUPP) { + version = hw->fw_version; + hns3_warn(hw, + "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); + ret = 0; + } + + return ret; +} + +static int +hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret, i; + + /* Cfg pg schd */ + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + /* Cfg pg to prio */ + for (i = 0; i < hw->dcb_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_dcb_dwrr_cfg(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_dcb_pg_dwrr_cfg(hw); + if (ret) { + hns3_err(hw, "config pg_dwrr failed: %d", ret); + return ret; + } + + ret = hns3_dcb_pri_dwrr_cfg(hw); + if (ret) + hns3_err(hw, "config pri_dwrr failed: %d", ret); + + return ret; +} + +static int +hns3_dcb_shaper_cfg(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_dcb_port_shaper_cfg(hw); + if (ret) { + hns3_err(hw, "config port shaper failed: %d", ret); + return ret; + } + + ret = hns3_dcb_pg_shaper_cfg(hw); + if (ret) { + hns3_err(hw, "config pg shaper failed: %d", ret); + return ret; + } + + return hns3_dcb_pri_shaper_cfg(hw); +} + +static int +hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id) +{ + struct hns3_nq_to_qs_link_cmd *map; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hns3_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = rte_cpu_to_le_16(q_id); + map->qset_id = rte_cpu_to_le_16(qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_q_to_qs_map(struct hns3_hw *hw) +{ + struct hns3_tc_queue_info *tc_queue; + uint16_t q_id; + uint32_t i, j; + int ret; + + for (i = 0; i < hw->num_tc; i++) { + tc_queue = &hw->tc_queue[i]; + for (j = 0; j < tc_queue->tqp_count; j++) { + q_id = tc_queue->tqp_offset + j; + ret = hns3_q_to_qs_map_cfg(hw, q_id, i); + if (ret) + return ret; + } + } + + return 0; +} + +static int +hns3_pri_q_qs_cfg(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t i; + int ret; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE) + return -EINVAL; + + /* Cfg qs -> pri mapping */ + for (i = 0; i < hw->num_tc; i++) { + ret = hns3_qs_to_pri_map_cfg(hw, i, i); + if (ret) { + hns3_err(hw, "qs_to_pri mapping fail: %d", ret); + return ret; + } + } + + /* Cfg q -> qs mapping */ + ret = hns3_q_to_qs_map(hw); + if (ret) + hns3_err(hw, "nq_to_qs mapping fail: %d", ret); + + return ret; +} + +static int +hns3_dcb_map_cfg(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_up_to_tc_map(hw); + if (ret) { + hns3_err(hw, "up_to_tc mapping fail: %d", ret); + return ret; + } + + ret = hns3_pg_to_pri_map(hw); + if (ret) { + hns3_err(hw, "pri_to_pg mapping fail: %d", ret); + return ret; + } + + return hns3_pri_q_qs_cfg(hw); +} + +static int +hns3_dcb_schd_setup_hw(struct hns3_hw *hw) +{ + int ret; + + /* Cfg dcb mapping */ + ret = hns3_dcb_map_cfg(hw); + if (ret) + return ret; + + /* Cfg dcb shaper */ + ret = hns3_dcb_shaper_cfg(hw); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hns3_dcb_dwrr_cfg(hw); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hns3_dcb_schd_mode_cfg(hw); +} + +static int +hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr, + uint8_t pause_trans_gap, uint16_t pause_trans_time) +{ + struct hns3_cfg_pause_param_cmd *pause_param; + struct hns3_cmd_desc desc; + + pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false); + + memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN); + memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN); + pause_param->pause_trans_gap = pause_trans_gap; + pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time); + + return hns3_cmd_send(hw, &desc, 1); +} + +int +hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr) +{ + struct hns3_cfg_pause_param_cmd *pause_param; + struct hns3_cmd_desc desc; + uint16_t trans_time; + uint8_t trans_gap; + int ret; + + pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + return ret; + + trans_gap = pause_param->pause_trans_gap; + trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time); + + return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time); +} + +static int +hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time) +{ +#define PAUSE_TIME_DIV_BY 2 +#define PAUSE_TIME_MIN_VALUE 0x4 + + struct hns3_mac *mac = &hw->mac; + uint8_t pause_trans_gap; + + /* + * Pause transmit gap must be less than "pause_time / 2", otherwise + * the behavior of MAC is undefined. + */ + if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP) + pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP; + else if (pause_time >= PAUSE_TIME_MIN_VALUE && + pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP) + pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1; + else { + hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time); + pause_time = PAUSE_TIME_MIN_VALUE; + pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1; + } + + return hns3_pause_param_cfg(hw, mac->mac_addr, + pause_trans_gap, pause_time); +} + +static int +hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0)); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx) +{ + struct hns3_cmd_desc desc; + struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false); + + pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0)); + + pfc->pri_en_bitmap = pfc_bitmap; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map) +{ + struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false); + + bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + bp_to_qs_map_cmd->qs_group_id = grp_id; + bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map); + + return hns3_cmd_send(hw, &desc, 1); +} + +static void +hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en) +{ + switch (hw->current_mode) { + case HNS3_FC_NONE: + *tx_en = false; + *rx_en = false; + break; + case HNS3_FC_RX_PAUSE: + *tx_en = false; + *rx_en = true; + break; + case HNS3_FC_TX_PAUSE: + *tx_en = true; + *rx_en = false; + break; + case HNS3_FC_FULL: + *tx_en = true; + *rx_en = true; + break; + default: + *tx_en = false; + *rx_en = false; + break; + } +} + +static int +hns3_mac_pause_setup_hw(struct hns3_hw *hw) +{ + bool tx_en, rx_en; + + if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) + hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en); + else { + tx_en = false; + rx_en = false; + } + + return hns3_mac_pause_en_cfg(hw, tx_en, rx_en); +} + +static int +hns3_pfc_setup_hw(struct hns3_hw *hw) +{ + bool tx_en, rx_en; + + if (hw->current_fc_status == HNS3_FC_STATUS_PFC) + hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en); + else { + tx_en = false; + rx_en = false; + } + + return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en); +} + +/* + * Each Tc has a 1024 queue sets to backpress, it divides to + * 32 group, each group contains 32 queue sets, which can be + * represented by uint32_t bitmap. + */ +static int +hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc) +{ + uint32_t qs_bitmap; + int ret; + int i; + + for (i = 0; i < HNS3_BP_GRP_NUM; i++) { + uint8_t grp, sub_grp; + qs_bitmap = 0; + + grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S); + sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M, + HNS3_BP_SUB_GRP_ID_S); + if (i == grp) + qs_bitmap |= (1 << sub_grp); + + ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_dcb_bp_setup(struct hns3_hw *hw) +{ + int ret, i; + + for (i = 0; i < hw->dcb_info.num_tc; i++) { + ret = hns3_bp_setup_hw(hw, i); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_dcb_pause_setup_hw(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret; + + ret = hns3_pause_param_setup_hw(hw, pf->pause_time); + if (ret) { + hns3_err(hw, "Fail to set pause parameter. ret = %d", ret); + return ret; + } + + ret = hns3_mac_pause_setup_hw(hw); + if (ret) { + hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret); + return ret; + } + + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ + if (!hns3_dev_dcb_supported(hw)) + return 0; + + ret = hns3_pfc_setup_hw(hw); + if (ret) { + hns3_err(hw, "config pfc failed! ret = %d", ret); + return ret; + } + + return hns3_dcb_bp_setup(hw); +} + +static uint8_t +hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en) +{ + uint8_t pfc_map = 0; + uint8_t *prio_tc; + uint8_t i, j; + + prio_tc = hw->dcb_info.prio_tc; + for (i = 0; i < hw->dcb_info.num_tc; i++) { + for (j = 0; j < HNS3_MAX_USER_PRIO; j++) { + if (prio_tc[j] == i && pfc_en & BIT(j)) { + pfc_map |= BIT(i); + break; + } + } + } + + return pfc_map; +} + +static void +hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed) +{ + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_hw *hw = &hns->hw; + uint8_t max_tc = 0; + uint8_t pfc_en; + int i; + + dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { + if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i]) + *changed = true; + + if (dcb_rx_conf->dcb_tc[i] > max_tc) + max_tc = dcb_rx_conf->dcb_tc[i]; + } + *tc = max_tc + 1; + if (*tc != hw->dcb_info.num_tc) + *changed = true; + + /* + * We ensure that dcb information can be reconfigured + * after the hns3_priority_flow_ctrl_set function called. + */ + if (hw->current_mode != HNS3_FC_FULL) + *changed = true; + pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); + if (hw->dcb_info.pfc_en != pfc_en) + *changed = true; +} + +static void +hns3_dcb_info_cfg(struct hns3_adapter *hns) +{ + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + uint8_t tc_bw, bw_rest; + uint8_t i, j; + + dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; + pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs; + pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs; + + /* Config pg0 */ + memset(hw->dcb_info.pg_info, 0, + sizeof(struct hns3_pg_info) * HNS3_PG_NUM); + hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT; + hw->dcb_info.pg_info[0].pg_id = 0; + hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR; + hw->dcb_info.pg_info[0].bw_limit = HNS3_ETHER_MAX_RATE; + hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map; + + /* Each tc has same bw for valid tc by default */ + tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc; + for (i = 0; i < hw->dcb_info.num_tc; i++) + hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw; + /* To ensure the sum of tc_dwrr is equal to 100 */ + bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc; + for (j = 0; j < bw_rest; j++) + hw->dcb_info.pg_info[0].tc_dwrr[j]++; + for (; i < dcb_rx_conf->nb_tcs; i++) + hw->dcb_info.pg_info[0].tc_dwrr[i] = 0; + + /* All tcs map to pg0 */ + memset(hw->dcb_info.tc_info, 0, + sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM); + for (i = 0; i < hw->dcb_info.num_tc; i++) { + hw->dcb_info.tc_info[i].tc_id = i; + hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR; + hw->dcb_info.tc_info[i].pgid = 0; + hw->dcb_info.tc_info[i].bw_limit = + hw->dcb_info.pg_info[0].bw_limit; + } + + for (i = 0; i < HNS3_MAX_USER_PRIO; i++) + hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i]; + + hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues, + hw->data->nb_tx_queues); +} + +static int +hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + uint16_t nb_rx_q = hw->data->nb_rx_queues; + uint16_t nb_tx_q = hw->data->nb_tx_queues; + uint8_t bit_map = 0; + uint8_t i; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE && + hw->dcb_info.num_pg != 1) + return -EINVAL; + + if (nb_rx_q < num_tc) { + hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", + nb_rx_q, num_tc); + return -EINVAL; + } + + if (nb_tx_q < num_tc) { + hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", + nb_tx_q, num_tc); + return -EINVAL; + } + + /* Currently not support uncontinuous tc */ + hw->dcb_info.num_tc = num_tc; + for (i = 0; i < hw->dcb_info.num_tc; i++) + bit_map |= BIT(i); + + if (!bit_map) { + bit_map = 1; + hw->dcb_info.num_tc = 1; + } + hw->hw_tc_map = bit_map; + hns3_dcb_info_cfg(hns); + + return 0; +} + +static int +hns3_dcb_hw_configure(struct hns3_adapter *hns) +{ + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + enum hns3_fc_status fc_status = hw->current_fc_status; + enum hns3_fc_mode current_mode = hw->current_mode; + uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map; + int ret, status; + + if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE && + pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE) + return -ENOTSUP; + + ret = hns3_dcb_schd_setup_hw(hw); + if (ret) { + hns3_err(hw, "dcb schdule configure failed! ret = %d", ret); + return ret; + } + + if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; + if (dcb_rx_conf->nb_tcs == 0) + hw->dcb_info.pfc_en = 1; /* tc0 only */ + else + hw->dcb_info.pfc_en = + RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); + + hw->dcb_info.hw_pfc_map = + hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); + + ret = hns3_buffer_alloc(hw); + if (ret) + return ret; + + hw->current_fc_status = HNS3_FC_STATUS_PFC; + hw->current_mode = HNS3_FC_FULL; + ret = hns3_dcb_pause_setup_hw(hw); + if (ret) { + hns3_err(hw, "setup pfc failed! ret = %d", ret); + goto pfc_setup_fail; + } + } else { + /* + * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT + * flag, the DCB information is configured, such as tc numbers. + * Therefore, refreshing the allocation of packet buffer is + * necessary. + */ + ret = hns3_buffer_alloc(hw); + if (ret) + return ret; + } + + return 0; + +pfc_setup_fail: + hw->current_mode = current_mode; + hw->current_fc_status = fc_status; + hw->dcb_info.hw_pfc_map = hw_pfc_map; + status = hns3_buffer_alloc(hw); + if (status) + hns3_err(hw, "recover packet buffer fail! status = %d", status); + + return ret; +} + +/* + * hns3_dcb_configure - setup dcb related config + * @hns: pointer to hns3 adapter + * Returns 0 on success, negative value on failure. + */ +int +hns3_dcb_configure(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool map_changed = false; + uint8_t num_tc = 0; + int ret; + + hns3_dcb_cfg_validate(hns, &num_tc, &map_changed); + if (map_changed || rte_atomic16_read(&hw->reset.resetting)) { + ret = hns3_dcb_info_update(hns, num_tc); + if (ret) { + hns3_err(hw, "dcb info update failed: %d", ret); + return ret; + } + + ret = hns3_dcb_hw_configure(hns); + if (ret) { + hns3_err(hw, "dcb sw configure failed: %d", ret); + return ret; + } + } + + return 0; +} + +int +hns3_dcb_init_hw(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_dcb_schd_setup_hw(hw); + if (ret) { + hns3_err(hw, "dcb schedule setup failed: %d", ret); + return ret; + } + + ret = hns3_dcb_pause_setup_hw(hw); + if (ret) + hns3_err(hw, "PAUSE setup failed: %d", ret); + + return ret; +} + +int +hns3_dcb_init(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* + * According to the 'adapter_state' identifier, the following branch + * is only executed to initialize default configurations of dcb during + * the initializing driver process. Due to driver saving dcb-related + * information before reset triggered, the reinit dev stage of the + * reset process can not access to the branch, or those information + * will be changed. + */ + if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { + hw->requested_mode = HNS3_FC_NONE; + hw->current_mode = hw->requested_mode; + pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME; + hw->current_fc_status = HNS3_FC_STATUS_NONE; + + ret = hns3_dcb_info_init(hw); + if (ret) { + hns3_err(hw, "dcb info init failed: %d", ret); + return ret; + } + hns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num, + hw->tqps_num); + } + + /* + * DCB hardware will be configured by following the function during + * the initializing driver process and the reset process. However, + * driver will restore directly configurations of dcb hardware based + * on dcb-related information soft maintained when driver + * initialization has finished and reset is coming. + */ + ret = hns3_dcb_init_hw(hw); + if (ret) { + hns3_err(hw, "dcb init hardware failed: %d", ret); + return ret; + } + + return 0; +} + +static int +hns3_update_queue_map_configure(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint16_t nb_rx_q = hw->data->nb_rx_queues; + uint16_t nb_tx_q = hw->data->nb_tx_queues; + int ret; + + hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q); + ret = hns3_q_to_qs_map(hw); + if (ret) + hns3_err(hw, "failed to map nq to qs! ret = %d", ret); + + return ret; +} + +int +hns3_dcb_cfg_update(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode; + int ret; + + if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { + ret = hns3_dcb_configure(hns); + if (ret) + hns3_err(hw, "Failed to config dcb: %d", ret); + } else { + /* + * Update queue map without PFC configuration, + * due to queues reconfigured by user. + */ + ret = hns3_update_queue_map_configure(hns); + if (ret) + hns3_err(hw, + "Failed to update queue mapping configure: %d", + ret); + } + + return ret; +} + +/* + * hns3_dcb_pfc_enable - Enable priority flow control + * @dev: pointer to ethernet device + * + * Configures the pfc settings for one porority. + */ +int +hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum hns3_fc_status fc_status = hw->current_fc_status; + enum hns3_fc_mode current_mode = hw->current_mode; + uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map; + uint8_t pfc_en = hw->dcb_info.pfc_en; + uint8_t priority = pfc_conf->priority; + uint16_t pause_time = pf->pause_time; + int ret, status; + + pf->pause_time = pfc_conf->fc.pause_time; + hw->current_mode = hw->requested_mode; + hw->current_fc_status = HNS3_FC_STATUS_PFC; + hw->dcb_info.pfc_en |= BIT(priority); + hw->dcb_info.hw_pfc_map = + hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); + ret = hns3_buffer_alloc(hw); + if (ret) + goto pfc_setup_fail; + + /* + * The flow control mode of all UPs will be changed based on + * current_mode coming from user. + */ + ret = hns3_dcb_pause_setup_hw(hw); + if (ret) { + hns3_err(hw, "enable pfc failed! ret = %d", ret); + goto pfc_setup_fail; + } + + return 0; + +pfc_setup_fail: + hw->current_mode = current_mode; + hw->current_fc_status = fc_status; + pf->pause_time = pause_time; + hw->dcb_info.pfc_en = pfc_en; + hw->dcb_info.hw_pfc_map = hw_pfc_map; + status = hns3_buffer_alloc(hw); + if (status) + hns3_err(hw, "recover packet buffer fail: %d", status); + + return ret; +} + +/* + * hns3_fc_enable - Enable MAC pause + * @dev: pointer to ethernet device + * + * Configures the MAC pause settings. + */ +int +hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum hns3_fc_status fc_status = hw->current_fc_status; + enum hns3_fc_mode current_mode = hw->current_mode; + uint16_t pause_time = pf->pause_time; + int ret; + + pf->pause_time = fc_conf->pause_time; + hw->current_mode = hw->requested_mode; + + /* + * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode + * of flow control is configured to be HNS3_FC_NONE. + */ + if (hw->current_mode == HNS3_FC_NONE) + hw->current_fc_status = HNS3_FC_STATUS_NONE; + else + hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE; + + ret = hns3_dcb_pause_setup_hw(hw); + if (ret) { + hns3_err(hw, "enable MAC Pause failed! ret = %d", ret); + goto setup_fc_fail; + } + + return 0; + +setup_fc_fail: + hw->current_mode = current_mode; + hw->current_fc_status = fc_status; + pf->pause_time = pause_time; + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.h b/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.h new file mode 100644 index 000000000..9c2c5f21c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_dcb.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_DCB_H_ +#define _HNS3_DCB_H_ + +/* MAC Pause */ +#define HNS3_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HNS3_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HNS3_DEFAULT_PAUSE_TRANS_GAP 0x18 +#define HNS3_DEFAULT_PAUSE_TRANS_TIME 0xFFFF + +/* SP or DWRR */ +#define HNS3_DCB_TX_SCHD_DWRR_MSK BIT(0) +#define HNS3_DCB_TX_SCHD_SP_MSK (0xFE) + +enum hns3_shap_bucket { + HNS3_DCB_SHAP_C_BUCKET = 0, + HNS3_DCB_SHAP_P_BUCKET, +}; + +struct hns3_priority_weight_cmd { + uint8_t pri_id; + uint8_t dwrr; +}; + +struct hns3_qs_weight_cmd { + uint16_t qs_id; + uint8_t dwrr; +}; + +struct hns3_pg_weight_cmd { + uint8_t pg_id; + uint8_t dwrr; +}; + +struct hns3_ets_tc_weight_cmd { + uint8_t tc_weight[HNS3_MAX_TC_NUM]; + uint8_t weight_offset; + uint8_t rsvd[15]; +}; + +struct hns3_qs_to_pri_link_cmd { + uint16_t qs_id; + uint16_t rsvd; + uint8_t priority; +#define HNS3_DCB_QS_PRI_LINK_VLD_MSK BIT(0) + uint8_t link_vld; +}; + +struct hns3_nq_to_qs_link_cmd { + uint16_t nq_id; + uint16_t rsvd; +#define HNS3_DCB_Q_QS_LINK_VLD_MSK BIT(10) + uint16_t qset_id; +}; + +#define HNS3_DCB_SHAP_IR_B_MSK GENMASK(7, 0) +#define HNS3_DCB_SHAP_IR_B_LSH 0 +#define HNS3_DCB_SHAP_IR_U_MSK GENMASK(11, 8) +#define HNS3_DCB_SHAP_IR_U_LSH 8 +#define HNS3_DCB_SHAP_IR_S_MSK GENMASK(15, 12) +#define HNS3_DCB_SHAP_IR_S_LSH 12 +#define HNS3_DCB_SHAP_BS_B_MSK GENMASK(20, 16) +#define HNS3_DCB_SHAP_BS_B_LSH 16 +#define HNS3_DCB_SHAP_BS_S_MSK GENMASK(25, 21) +#define HNS3_DCB_SHAP_BS_S_LSH 21 + +struct hns3_pri_shapping_cmd { + uint8_t pri_id; + uint8_t rsvd[3]; + uint32_t pri_shapping_para; +}; + +struct hns3_pg_shapping_cmd { + uint8_t pg_id; + uint8_t rsvd[3]; + uint32_t pg_shapping_para; +}; + +#define HNS3_BP_GRP_NUM 32 +#define HNS3_BP_SUB_GRP_ID_S 0 +#define HNS3_BP_SUB_GRP_ID_M GENMASK(4, 0) +#define HNS3_BP_GRP_ID_S 5 +#define HNS3_BP_GRP_ID_M GENMASK(9, 5) +struct hns3_bp_to_qs_map_cmd { + uint8_t tc_id; + uint8_t rsvd[2]; + uint8_t qs_group_id; + uint32_t qs_bit_map; + uint32_t rsvd1; +}; + +struct hns3_pfc_en_cmd { + uint8_t tx_rx_en_bitmap; + uint8_t pri_en_bitmap; +}; + +struct hns3_port_shapping_cmd { + uint32_t port_shapping_para; +}; + +struct hns3_cfg_pause_param_cmd { + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint8_t pause_trans_gap; + uint8_t rsvd; + uint16_t pause_trans_time; + uint8_t rsvd1[6]; + /* extra mac address to do double check for pause frame */ + uint8_t mac_addr_extra[RTE_ETHER_ADDR_LEN]; + uint16_t rsvd2; +}; + +struct hns3_pg_to_pri_link_cmd { + uint8_t pg_id; + uint8_t rsvd1[3]; + uint8_t pri_bit_map; +}; + +enum hns3_shaper_level { + HNS3_SHAPER_LVL_PRI = 0, + HNS3_SHAPER_LVL_PG = 1, + HNS3_SHAPER_LVL_PORT = 2, + HNS3_SHAPER_LVL_QSET = 3, + HNS3_SHAPER_LVL_CNT = 4, + HNS3_SHAPER_LVL_VF = 0, + HNS3_SHAPER_LVL_PF = 1, +}; + +struct hns3_shaper_parameter { + uint32_t ir_b; /* IR_B parameter of IR shaper */ + uint32_t ir_u; /* IR_U parameter of IR shaper */ + uint32_t ir_s; /* IR_S parameter of IR shaper */ +}; + +#define hns3_dcb_set_field(dest, string, val) \ + hns3_set_field((dest), \ + (HNS3_DCB_SHAP_##string##_MSK), \ + (HNS3_DCB_SHAP_##string##_LSH), val) +#define hns3_dcb_get_field(src, string) \ + hns3_get_field((src), (HNS3_DCB_SHAP_##string##_MSK), \ + (HNS3_DCB_SHAP_##string##_LSH)) + +int hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr); + +int hns3_dcb_configure(struct hns3_adapter *hns); + +int hns3_dcb_init(struct hns3_hw *hw); + +int hns3_dcb_init_hw(struct hns3_hw *hw); + +int hns3_dcb_info_init(struct hns3_hw *hw); + +int +hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); + +int +hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); + +void hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q); + +void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue); + +int hns3_dcb_cfg_update(struct hns3_adapter *hns); + +#endif /* _HNS3_DCB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.c b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.c new file mode 100644 index 000000000..a09ac082e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.c @@ -0,0 +1,5512 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" +#include "hns3_rxtx.h" +#include "hns3_intr.h" +#include "hns3_regs.h" +#include "hns3_dcb.h" +#include "hns3_mp.h" + +#define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 +#define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 + +#define HNS3_SERVICE_INTERVAL 1000000 /* us */ +#define HNS3_PORT_BASE_VLAN_DISABLE 0 +#define HNS3_PORT_BASE_VLAN_ENABLE 1 +#define HNS3_INVLID_PVID 0xFFFF + +#define HNS3_FILTER_TYPE_VF 0 +#define HNS3_FILTER_TYPE_PORT 1 +#define HNS3_FILTER_FE_EGRESS_V1_B BIT(0) +#define HNS3_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HNS3_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HNS3_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HNS3_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HNS3_FILTER_FE_EGRESS (HNS3_FILTER_FE_NIC_EGRESS_B \ + | HNS3_FILTER_FE_ROCE_EGRESS_B) +#define HNS3_FILTER_FE_INGRESS (HNS3_FILTER_FE_NIC_INGRESS_B \ + | HNS3_FILTER_FE_ROCE_INGRESS_B) + +/* Reset related Registers */ +#define HNS3_GLOBAL_RESET_BIT 0 +#define HNS3_CORE_RESET_BIT 1 +#define HNS3_IMP_RESET_BIT 2 +#define HNS3_FUN_RST_ING_B 0 + +#define HNS3_VECTOR0_IMP_RESET_INT_B 1 + +#define HNS3_RESET_WAIT_MS 100 +#define HNS3_RESET_WAIT_CNT 200 + +int hns3_logtype_init; +int hns3_logtype_driver; + +enum hns3_evt_cause { + HNS3_VECTOR0_EVENT_RST, + HNS3_VECTOR0_EVENT_MBX, + HNS3_VECTOR0_EVENT_ERR, + HNS3_VECTOR0_EVENT_OTHER, +}; + +static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, + uint64_t *levels); +static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, + int on); +static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); + +static int hns3_add_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +static int hns3_remove_mc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); + +static void +hns3_pf_disable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); +} + +static void +hns3_pf_enable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); +} + +static enum hns3_evt_cause +hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t vector0_int_stats; + uint32_t cmdq_src_val; + uint32_t val; + enum hns3_evt_cause ret; + + /* fetch the events from their corresponding regs */ + vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); + + /* + * Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event and defer the + * processing of the mailbox events. Since, we would have not cleared + * RX CMDQ event this time we would receive again another interrupt + * from H/W just for the mailbox. + */ + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); + if (clearval) { + hw->reset.stats.imp_cnt++; + hns3_warn(hw, "IMP reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "IMP reset detected, don't clear reset status"); + } + + ret = HNS3_VECTOR0_EVENT_RST; + goto out; + } + + /* Global reset */ + if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); + if (clearval) { + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "Global reset detected, don't clear reset status"); + } + + ret = HNS3_VECTOR0_EVENT_RST; + goto out; + } + + /* check for vector0 msix event source */ + if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) { + val = vector0_int_stats; + ret = HNS3_VECTOR0_EVENT_ERR; + goto out; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { + cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); + val = cmdq_src_val; + ret = HNS3_VECTOR0_EVENT_MBX; + goto out; + } + + if (clearval && (vector0_int_stats || cmdq_src_val)) + hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x", + vector0_int_stats, cmdq_src_val); + val = vector0_int_stats; + ret = HNS3_VECTOR0_EVENT_OTHER; +out: + + if (clearval) + *clearval = val; + return ret; +} + +static void +hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) +{ + if (event_type == HNS3_VECTOR0_EVENT_RST) + hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); + else if (event_type == HNS3_VECTOR0_EVENT_MBX) + hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static void +hns3_clear_all_event_cause(struct hns3_hw *hw) +{ + uint32_t vector0_int_stats; + vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); + + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) + hns3_warn(hw, "Probe during IMP reset interrupt"); + + if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) + hns3_warn(hw, "Probe during Global reset interrupt"); + + hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_RST, + BIT(HNS3_VECTOR0_IMPRESET_INT_B) | + BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | + BIT(HNS3_VECTOR0_CORERESET_INT_B)); + hns3_clear_event_cause(hw, HNS3_VECTOR0_EVENT_MBX, 0); +} + +static void +hns3_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + enum hns3_evt_cause event_cause; + uint32_t clearval = 0; + + /* Disable interrupt */ + hns3_pf_disable_irq0(hw); + + event_cause = hns3_check_event_cause(hns, &clearval); + + /* vector 0 interrupt is shared with reset and mailbox source events. */ + if (event_cause == HNS3_VECTOR0_EVENT_ERR) { + hns3_handle_msix_error(hns, &hw->reset.request); + hns3_schedule_reset(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) + hns3_schedule_reset(hns); + else if (event_cause == HNS3_VECTOR0_EVENT_MBX) + hns3_dev_handle_mbx_msg(hw); + else + hns3_err(hw, "Received unknown event"); + + hns3_clear_event_cause(hw, event_cause, clearval); + /* Enable interrupt if it is not cause by reset */ + hns3_pf_enable_irq0(hw); +} + +static int +hns3_set_port_vlan_filter(struct hns3_adapter *hns, uint16_t vlan_id, int on) +{ +#define HNS3_VLAN_ID_OFFSET_STEP 160 +#define HNS3_VLAN_BYTE_SIZE 8 + struct hns3_vlan_filter_pf_cfg_cmd *req; + struct hns3_hw *hw = &hns->hw; + uint8_t vlan_offset_byte_val; + struct hns3_cmd_desc desc; + uint8_t vlan_offset_byte; + uint8_t vlan_offset_base; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_base = vlan_id / HNS3_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HNS3_VLAN_ID_OFFSET_STEP) / + HNS3_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HNS3_VLAN_BYTE_SIZE); + + req = (struct hns3_vlan_filter_pf_cfg_cmd *)desc.data; + req->vlan_offset = vlan_offset_base; + req->vlan_cfg = on ? 0 : 1; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "set port vlan id failed, vlan_id =%u, ret =%d", + vlan_id, ret); + + return ret; +} + +static void +hns3_rm_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id) +{ + struct hns3_user_vlan_table *vlan_entry; + struct hns3_pf *pf = &hns->pf; + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->vlan_id == vlan_id) { + if (vlan_entry->hd_tbl_status) + hns3_set_port_vlan_filter(hns, vlan_id, 0); + LIST_REMOVE(vlan_entry, next); + rte_free(vlan_entry); + break; + } + } +} + +static void +hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id, + bool writen_to_tbl) +{ + struct hns3_user_vlan_table *vlan_entry; + struct hns3_hw *hw = &hns->hw; + struct hns3_pf *pf = &hns->pf; + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->vlan_id == vlan_id) + return; + } + + vlan_entry = rte_zmalloc("hns3_vlan_tbl", sizeof(*vlan_entry), 0); + if (vlan_entry == NULL) { + hns3_err(hw, "Failed to malloc hns3 vlan table"); + return; + } + + vlan_entry->hd_tbl_status = writen_to_tbl; + vlan_entry->vlan_id = vlan_id; + + LIST_INSERT_HEAD(&pf->vlan_list, vlan_entry, next); +} + +static int +hns3_restore_vlan_table(struct hns3_adapter *hns) +{ + struct hns3_user_vlan_table *vlan_entry; + struct hns3_pf *pf = &hns->pf; + uint16_t vlan_id; + int ret = 0; + + if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_ENABLE) + return hns3_vlan_pvid_configure(hns, + pf->port_base_vlan_cfg.pvid, 1); + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->hd_tbl_status) { + vlan_id = vlan_entry->vlan_id; + ret = hns3_set_port_vlan_filter(hns, vlan_id, 1); + if (ret) + break; + } + } + + return ret; +} + +static int +hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) +{ + struct hns3_pf *pf = &hns->pf; + bool writen_to_tbl = false; + int ret = 0; + + /* + * When vlan filter is enabled, hardware regards vlan id 0 as the entry + * for normal packet, deleting vlan id 0 is not allowed. + */ + if (on == 0 && vlan_id == 0) + return 0; + + /* + * When port base vlan enabled, we use port base vlan as the vlan + * filter condition. In this case, we don't update vlan filter table + * when user add new vlan or remove exist vlan, just update the + * vlan list. The vlan id in vlan list will be writen in vlan filter + * table until port base vlan disabled + */ + if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + ret = hns3_set_port_vlan_filter(hns, vlan_id, on); + writen_to_tbl = true; + } + + if (ret == 0 && vlan_id) { + if (on) + hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl); + else + hns3_rm_dev_vlan_table(hns, vlan_id); + } + return ret; +} + +static int +hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_vlan_filter_configure(hns, vlan_id, on); + rte_spinlock_unlock(&hw->lock); + return ret; +} + +static int +hns3_vlan_tpid_configure(struct hns3_adapter *hns, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct hns3_rx_vlan_type_cfg_cmd *rx_req; + struct hns3_tx_vlan_type_cfg_cmd *tx_req; + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc; + int ret; + + if ((vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER)) { + hns3_err(hw, "Unsupported vlan type, vlan_type =%d", vlan_type); + return -EINVAL; + } + + if (tpid != RTE_ETHER_TYPE_VLAN) { + hns3_err(hw, "Unsupported vlan tpid, vlan_type =%d", vlan_type); + return -EINVAL; + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_TYPE_ID, false); + rx_req = (struct hns3_rx_vlan_type_cfg_cmd *)desc.data; + + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); + rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); + } else if (vlan_type == ETH_VLAN_TYPE_INNER) { + rx_req->ot_fst_vlan_type = rte_cpu_to_le_16(tpid); + rx_req->ot_sec_vlan_type = rte_cpu_to_le_16(tpid); + rx_req->in_fst_vlan_type = rte_cpu_to_le_16(tpid); + rx_req->in_sec_vlan_type = rte_cpu_to_le_16(tpid); + } + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Send rxvlan protocol type command fail, ret =%d", + ret); + return ret; + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_INSERT, false); + + tx_req = (struct hns3_tx_vlan_type_cfg_cmd *)desc.data; + tx_req->ot_vlan_type = rte_cpu_to_le_16(tpid); + tx_req->in_vlan_type = rte_cpu_to_le_16(tpid); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Send txvlan protocol type command fail, ret =%d", + ret); + return ret; +} + +static int +hns3_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_vlan_tpid_configure(hns, vlan_type, tpid); + rte_spinlock_unlock(&hw->lock); + return ret; +} + +static int +hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, + struct hns3_rx_vtag_cfg *vcfg) +{ + struct hns3_vport_vtag_rx_cfg_cmd *req; + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc; + uint16_t vport_id; + uint8_t bitmap; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hns3_vport_vtag_rx_cfg_cmd *)desc.data; + hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, the PF-related vf_id is 0, just need to configure parameters + * for vport_id 0. + */ + vport_id = 0; + req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; + bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); + req->vf_bitmap[req->vf_offset] = bitmap; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Send port rxvlan cfg command fail, ret =%d", ret); + return ret; +} + +static void +hns3_update_rx_offload_cfg(struct hns3_adapter *hns, + struct hns3_rx_vtag_cfg *vcfg) +{ + struct hns3_pf *pf = &hns->pf; + memcpy(&pf->vtag_config.rx_vcfg, vcfg, sizeof(pf->vtag_config.rx_vcfg)); +} + +static void +hns3_update_tx_offload_cfg(struct hns3_adapter *hns, + struct hns3_tx_vtag_cfg *vcfg) +{ + struct hns3_pf *pf = &hns->pf; + memcpy(&pf->vtag_config.tx_vcfg, vcfg, sizeof(pf->vtag_config.tx_vcfg)); +} + +static int +hns3_en_hw_strip_rxvtag(struct hns3_adapter *hns, bool enable) +{ + struct hns3_rx_vtag_cfg rxvlan_cfg; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) { + rxvlan_cfg.strip_tag1_en = false; + rxvlan_cfg.strip_tag2_en = enable; + } else { + rxvlan_cfg.strip_tag1_en = enable; + rxvlan_cfg.strip_tag2_en = true; + } + + rxvlan_cfg.vlan1_vlan_prionly = false; + rxvlan_cfg.vlan2_vlan_prionly = false; + rxvlan_cfg.rx_vlan_offload_en = enable; + + ret = hns3_set_vlan_rx_offload_cfg(hns, &rxvlan_cfg); + if (ret) { + hns3_err(hw, "enable strip rx vtag failed, ret =%d", ret); + return ret; + } + + hns3_update_rx_offload_cfg(hns, &rxvlan_cfg); + + return ret; +} + +static int +hns3_set_vlan_filter_ctrl(struct hns3_hw *hw, uint8_t vlan_type, + uint8_t fe_type, bool filter_en, uint8_t vf_id) +{ + struct hns3_vlan_filter_ctrl_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_FILTER_CTRL, false); + + req = (struct hns3_vlan_filter_ctrl_cmd *)desc.data; + req->vlan_type = vlan_type; + req->vlan_fe = filter_en ? fe_type : 0; + req->vf_id = vf_id; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "set vlan filter fail, ret =%d", ret); + + return ret; +} + +static int +hns3_vlan_filter_init(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_VF, + HNS3_FILTER_FE_EGRESS, false, 0); + if (ret) { + hns3_err(hw, "failed to init vf vlan filter, ret = %d", ret); + return ret; + } + + ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, + HNS3_FILTER_FE_INGRESS, false, 0); + if (ret) + hns3_err(hw, "failed to init port vlan filter, ret = %d", ret); + + return ret; +} + +static int +hns3_enable_vlan_filter(struct hns3_adapter *hns, bool enable) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_set_vlan_filter_ctrl(hw, HNS3_FILTER_TYPE_PORT, + HNS3_FILTER_FE_INGRESS, enable, 0); + if (ret) + hns3_err(hw, "failed to %s port vlan filter, ret = %d", + enable ? "enable" : "disable", ret); + + return ret; +} + +static int +hns3_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct rte_eth_rxmode *rxmode; + unsigned int tmp_mask; + bool enable; + int ret = 0; + + rte_spinlock_lock(&hw->lock); + rxmode = &dev->data->dev_conf.rxmode; + tmp_mask = (unsigned int)mask; + if (tmp_mask & ETH_VLAN_FILTER_MASK) { + /* ignore vlan filter configuration during promiscuous mode */ + if (!dev->data->promiscuous) { + /* Enable or disable VLAN filter */ + enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? + true : false; + + ret = hns3_enable_vlan_filter(hns, enable); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to %s rx filter, ret = %d", + enable ? "enable" : "disable", ret); + return ret; + } + } + } + + if (tmp_mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + enable = rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? + true : false; + + ret = hns3_en_hw_strip_rxvtag(hns, enable); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to %s rx strip, ret = %d", + enable ? "enable" : "disable", ret); + return ret; + } + } + + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, + struct hns3_tx_vtag_cfg *vcfg) +{ + struct hns3_vport_vtag_tx_cfg_cmd *req; + struct hns3_cmd_desc desc; + struct hns3_hw *hw = &hns->hw; + uint16_t vport_id; + uint8_t bitmap; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hns3_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = vcfg->default_tag1; + req->def_vlan_tag2 = vcfg->default_tag2; + hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, the PF-related vf_id is 0, just need to configure parameters + * for vport_id 0. + */ + vport_id = 0; + req->vf_offset = vport_id / HNS3_VF_NUM_PER_CMD; + bitmap = 1 << (vport_id % HNS3_VF_NUM_PER_BYTE); + req->vf_bitmap[req->vf_offset] = bitmap; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Send port txvlan cfg command fail, ret =%d", ret); + + return ret; +} + +static int +hns3_vlan_txvlan_cfg(struct hns3_adapter *hns, uint16_t port_base_vlan_state, + uint16_t pvid) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_tx_vtag_cfg txvlan_cfg; + int ret; + + if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_DISABLE) { + txvlan_cfg.accept_tag1 = true; + txvlan_cfg.insert_tag1_en = false; + txvlan_cfg.default_tag1 = 0; + } else { + txvlan_cfg.accept_tag1 = false; + txvlan_cfg.insert_tag1_en = true; + txvlan_cfg.default_tag1 = pvid; + } + + txvlan_cfg.accept_untag1 = true; + txvlan_cfg.accept_tag2 = true; + txvlan_cfg.accept_untag2 = true; + txvlan_cfg.insert_tag2_en = false; + txvlan_cfg.default_tag2 = 0; + + ret = hns3_set_vlan_tx_offload_cfg(hns, &txvlan_cfg); + if (ret) { + hns3_err(hw, "pf vlan set pvid failed, pvid =%u ,ret =%d", pvid, + ret); + return ret; + } + + hns3_update_tx_offload_cfg(hns, &txvlan_cfg); + return ret; +} + +static void +hns3_store_port_base_vlan_info(struct hns3_adapter *hns, uint16_t pvid, int on) +{ + struct hns3_pf *pf = &hns->pf; + + pf->port_base_vlan_cfg.state = on ? + HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; + + pf->port_base_vlan_cfg.pvid = pvid; +} + +static void +hns3_rm_all_vlan_table(struct hns3_adapter *hns, bool is_del_list) +{ + struct hns3_user_vlan_table *vlan_entry; + struct hns3_pf *pf = &hns->pf; + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (vlan_entry->hd_tbl_status) + hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 0); + + vlan_entry->hd_tbl_status = false; + } + + if (is_del_list) { + vlan_entry = LIST_FIRST(&pf->vlan_list); + while (vlan_entry) { + LIST_REMOVE(vlan_entry, next); + rte_free(vlan_entry); + vlan_entry = LIST_FIRST(&pf->vlan_list); + } + } +} + +static void +hns3_add_all_vlan_table(struct hns3_adapter *hns) +{ + struct hns3_user_vlan_table *vlan_entry; + struct hns3_pf *pf = &hns->pf; + + LIST_FOREACH(vlan_entry, &pf->vlan_list, next) { + if (!vlan_entry->hd_tbl_status) + hns3_set_port_vlan_filter(hns, vlan_entry->vlan_id, 1); + + vlan_entry->hd_tbl_status = true; + } +} + +static void +hns3_remove_all_vlan_table(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_pf *pf = &hns->pf; + int ret; + + hns3_rm_all_vlan_table(hns, true); + if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) { + ret = hns3_set_port_vlan_filter(hns, + pf->port_base_vlan_cfg.pvid, 0); + if (ret) { + hns3_err(hw, "Failed to remove all vlan table, ret =%d", + ret); + return; + } + } +} + +static int +hns3_update_vlan_filter_entries(struct hns3_adapter *hns, + uint16_t port_base_vlan_state, + uint16_t new_pvid, uint16_t old_pvid) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + int ret = 0; + + if (port_base_vlan_state == HNS3_PORT_BASE_VLAN_ENABLE) { + if (old_pvid != HNS3_INVLID_PVID && old_pvid != 0) { + ret = hns3_set_port_vlan_filter(hns, old_pvid, 0); + if (ret) { + hns3_err(hw, + "Failed to clear clear old pvid filter, ret =%d", + ret); + return ret; + } + } + + hns3_rm_all_vlan_table(hns, false); + return hns3_set_port_vlan_filter(hns, new_pvid, 1); + } + + if (new_pvid != 0) { + ret = hns3_set_port_vlan_filter(hns, new_pvid, 0); + if (ret) { + hns3_err(hw, "Failed to set port vlan filter, ret =%d", + ret); + return ret; + } + } + + if (new_pvid == pf->port_base_vlan_cfg.pvid) + hns3_add_all_vlan_table(hns); + + return ret; +} + +static int +hns3_en_rx_strip_all(struct hns3_adapter *hns, int on) +{ + struct hns3_rx_vtag_cfg rx_vlan_cfg; + struct hns3_hw *hw = &hns->hw; + bool rx_strip_en; + int ret; + + rx_strip_en = on ? true : false; + rx_vlan_cfg.strip_tag1_en = rx_strip_en; + rx_vlan_cfg.strip_tag2_en = rx_strip_en; + rx_vlan_cfg.vlan1_vlan_prionly = false; + rx_vlan_cfg.vlan2_vlan_prionly = false; + rx_vlan_cfg.rx_vlan_offload_en = rx_strip_en; + + ret = hns3_set_vlan_rx_offload_cfg(hns, &rx_vlan_cfg); + if (ret) { + hns3_err(hw, "enable strip rx failed, ret =%d", ret); + return ret; + } + + hns3_update_rx_offload_cfg(hns, &rx_vlan_cfg); + return ret; +} + +static int +hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + uint16_t port_base_vlan_state; + uint16_t old_pvid; + int ret; + + if (on == 0 && pvid != pf->port_base_vlan_cfg.pvid) { + if (pf->port_base_vlan_cfg.pvid != HNS3_INVLID_PVID) + hns3_warn(hw, "Invalid operation! As current pvid set " + "is %u, disable pvid %u is invalid", + pf->port_base_vlan_cfg.pvid, pvid); + return 0; + } + + port_base_vlan_state = on ? HNS3_PORT_BASE_VLAN_ENABLE : + HNS3_PORT_BASE_VLAN_DISABLE; + ret = hns3_vlan_txvlan_cfg(hns, port_base_vlan_state, pvid); + if (ret) { + hns3_err(hw, "Failed to config tx vlan, ret =%d", ret); + return ret; + } + + ret = hns3_en_rx_strip_all(hns, on); + if (ret) { + hns3_err(hw, "Failed to config rx vlan strip, ret =%d", ret); + return ret; + } + + if (pvid == HNS3_INVLID_PVID) + goto out; + old_pvid = pf->port_base_vlan_cfg.pvid; + ret = hns3_update_vlan_filter_entries(hns, port_base_vlan_state, pvid, + old_pvid); + if (ret) { + hns3_err(hw, "Failed to update vlan filter entries, ret =%d", + ret); + return ret; + } + +out: + hns3_store_port_base_vlan_info(hns, pvid, on); + return ret; +} + +static int +hns3_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (pvid > RTE_ETHER_MAX_VLAN_ID) { + hns3_err(hw, "Invalid vlan_id = %u > %d", pvid, + RTE_ETHER_MAX_VLAN_ID); + return -EINVAL; + } + + rte_spinlock_lock(&hw->lock); + ret = hns3_vlan_pvid_configure(hns, pvid, on); + rte_spinlock_unlock(&hw->lock); + return ret; +} + +static void +init_port_base_vlan_info(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + + pf->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; + pf->port_base_vlan_cfg.pvid = HNS3_INVLID_PVID; +} + +static int +hns3_default_vlan_config(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_set_port_vlan_filter(hns, 0, 1); + if (ret) + hns3_err(hw, "default vlan 0 config failed, ret =%d", ret); + return ret; +} + +static int +hns3_init_vlan_config(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + /* + * This function can be called in the initialization and reset process, + * when in reset process, it means that hardware had been reseted + * successfully and we need to restore the hardware configuration to + * ensure that the hardware configuration remains unchanged before and + * after reset. + */ + if (rte_atomic16_read(&hw->reset.resetting) == 0) + init_port_base_vlan_info(hw); + + ret = hns3_vlan_filter_init(hns); + if (ret) { + hns3_err(hw, "vlan init fail in pf, ret =%d", ret); + return ret; + } + + ret = hns3_vlan_tpid_configure(hns, ETH_VLAN_TYPE_INNER, + RTE_ETHER_TYPE_VLAN); + if (ret) { + hns3_err(hw, "tpid set fail in pf, ret =%d", ret); + return ret; + } + + /* + * When in the reinit dev stage of the reset process, the following + * vlan-related configurations may differ from those at initialization, + * we will restore configurations to hardware in hns3_restore_vlan_table + * and hns3_restore_vlan_conf later. + */ + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + ret = hns3_vlan_pvid_configure(hns, HNS3_INVLID_PVID, 0); + if (ret) { + hns3_err(hw, "pvid set fail in pf, ret =%d", ret); + return ret; + } + + ret = hns3_en_hw_strip_rxvtag(hns, false); + if (ret) { + hns3_err(hw, "rx strip configure fail in pf, ret =%d", + ret); + return ret; + } + } + + return hns3_default_vlan_config(hns); +} + +static int +hns3_restore_vlan_conf(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + uint64_t offloads; + bool enable; + int ret; + + if (!hw->data->promiscuous) { + /* restore vlan filter states */ + offloads = hw->data->dev_conf.rxmode.offloads; + enable = offloads & DEV_RX_OFFLOAD_VLAN_FILTER ? true : false; + ret = hns3_enable_vlan_filter(hns, enable); + if (ret) { + hns3_err(hw, "failed to restore vlan rx filter conf, " + "ret = %d", ret); + return ret; + } + } + + ret = hns3_set_vlan_rx_offload_cfg(hns, &pf->vtag_config.rx_vcfg); + if (ret) { + hns3_err(hw, "failed to restore vlan rx conf, ret = %d", ret); + return ret; + } + + ret = hns3_set_vlan_tx_offload_cfg(hns, &pf->vtag_config.tx_vcfg); + if (ret) + hns3_err(hw, "failed to restore vlan tx conf, ret = %d", ret); + + return ret; +} + +static int +hns3_dev_configure_vlan(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_txmode *txmode; + struct hns3_hw *hw = &hns->hw; + int mask; + int ret; + + txmode = &data->dev_conf.txmode; + if (txmode->hw_vlan_reject_tagged || txmode->hw_vlan_reject_untagged) + hns3_warn(hw, + "hw_vlan_reject_tagged or hw_vlan_reject_untagged " + "configuration is not supported! Ignore these two " + "parameters: hw_vlan_reject_tagged(%d), " + "hw_vlan_reject_untagged(%d)", + txmode->hw_vlan_reject_tagged, + txmode->hw_vlan_reject_untagged); + + /* Apply vlan offload setting */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + ret = hns3_vlan_offload_set(dev, mask); + if (ret) { + hns3_err(hw, "dev config rx vlan offload failed, ret = %d", + ret); + return ret; + } + + /* + * If pvid config is not set in rte_eth_conf, driver needn't to set + * VLAN pvid related configuration to hardware. + */ + if (txmode->pvid == 0 && txmode->hw_vlan_insert_pvid == 0) + return 0; + + /* Apply pvid setting */ + ret = hns3_vlan_pvid_set(dev, txmode->pvid, + txmode->hw_vlan_insert_pvid); + if (ret) + hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d", + txmode->pvid, ret); + + return ret; +} + +static int +hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min, + unsigned int tso_mss_max) +{ + struct hns3_cfg_tso_status_cmd *req; + struct hns3_cmd_desc desc; + uint16_t tso_mss; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hns3_cfg_tso_status_cmd *)desc.data; + + tso_mss = 0; + hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, + tso_mss_min); + req->tso_mss_min = rte_cpu_to_le_16(tso_mss); + + tso_mss = 0; + hns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S, + tso_mss_max); + req->tso_mss_max = rte_cpu_to_le_16(tso_mss); + + return hns3_cmd_send(hw, &desc, 1); +} + +int +hns3_config_gro(struct hns3_hw *hw, bool en) +{ + struct hns3_cfg_gro_status_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hns3_cfg_gro_status_cmd *)desc.data; + + req->gro_en = rte_cpu_to_le_16(en ? 1 : 0); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "GRO hardware config cmd failed, ret = %d", ret); + + return ret; +} + +static int +hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size, + uint16_t *allocated_size, bool is_alloc) +{ + struct hns3_umv_spc_alc_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + req = (struct hns3_umv_spc_alc_cmd *)desc.data; + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false); + hns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1); + req->space_size = rte_cpu_to_le_32(space_size); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + PMD_INIT_LOG(ERR, "%s umv space failed for cmd_send, ret =%d", + is_alloc ? "allocate" : "free", ret); + return ret; + } + + if (is_alloc && allocated_size) + *allocated_size = rte_le_to_cpu_32(desc.data[1]); + + return 0; +} + +static int +hns3_init_umv_space(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint16_t allocated_size = 0; + int ret; + + ret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < pf->wanted_umv_size) + PMD_INIT_LOG(WARNING, "Alloc umv space failed, want %u, get %u", + pf->wanted_umv_size, allocated_size); + + pf->max_umv_size = (!!allocated_size) ? allocated_size : + pf->wanted_umv_size; + pf->used_umv_size = 0; + return 0; +} + +static int +hns3_uninit_umv_space(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + int ret; + + if (pf->max_umv_size == 0) + return 0; + + ret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false); + if (ret) + return ret; + + pf->max_umv_size = 0; + + return 0; +} + +static bool +hns3_is_umv_space_full(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + bool is_full; + + is_full = (pf->used_umv_size >= pf->max_umv_size); + + return is_full; +} + +static void +hns3_update_umv_space(struct hns3_hw *hw, bool is_free) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + + if (is_free) { + if (pf->used_umv_size > 0) + pf->used_umv_size--; + } else + pf->used_umv_size++; +} + +static void +hns3_prepare_mac_addr(struct hns3_mac_vlan_tbl_entry_cmd *new_req, + const uint8_t *addr, bool is_mc) +{ + const unsigned char *mac_addr = addr; + uint32_t high_val = ((uint32_t)mac_addr[3] << 24) | + ((uint32_t)mac_addr[2] << 16) | + ((uint32_t)mac_addr[1] << 8) | + (uint32_t)mac_addr[0]; + uint32_t low_val = ((uint32_t)mac_addr[5] << 8) | (uint32_t)mac_addr[4]; + + hns3_set_bit(new_req->flags, HNS3_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_set_bit(new_req->entry_type, HNS3_MAC_VLAN_BIT1_EN_B, 1); + hns3_set_bit(new_req->mc_mac_en, HNS3_MAC_VLAN_BIT0_EN_B, 1); + } + + new_req->mac_addr_hi32 = rte_cpu_to_le_32(high_val); + new_req->mac_addr_lo16 = rte_cpu_to_le_16(low_val & 0xffff); +} + +static int +hns3_get_mac_vlan_cmd_status(struct hns3_hw *hw, uint16_t cmdq_resp, + uint8_t resp_code, + enum hns3_mac_vlan_tbl_opcode op) +{ + if (cmdq_resp) { + hns3_err(hw, "cmdq execute failed for get_mac_vlan_cmd_status,status=%u", + cmdq_resp); + return -EIO; + } + + if (op == HNS3_MAC_VLAN_ADD) { + if (resp_code == 0 || resp_code == 1) { + return 0; + } else if (resp_code == HNS3_ADD_UC_OVERFLOW) { + hns3_err(hw, "add mac addr failed for uc_overflow"); + return -ENOSPC; + } else if (resp_code == HNS3_ADD_MC_OVERFLOW) { + hns3_err(hw, "add mac addr failed for mc_overflow"); + return -ENOSPC; + } + + hns3_err(hw, "add mac addr failed for undefined, code=%u", + resp_code); + return -EIO; + } else if (op == HNS3_MAC_VLAN_REMOVE) { + if (resp_code == 0) { + return 0; + } else if (resp_code == 1) { + hns3_dbg(hw, "remove mac addr failed for miss"); + return -ENOENT; + } + + hns3_err(hw, "remove mac addr failed for undefined, code=%u", + resp_code); + return -EIO; + } else if (op == HNS3_MAC_VLAN_LKUP) { + if (resp_code == 0) { + return 0; + } else if (resp_code == 1) { + hns3_dbg(hw, "lookup mac addr failed for miss"); + return -ENOENT; + } + + hns3_err(hw, "lookup mac addr failed for undefined, code=%u", + resp_code); + return -EIO; + } + + hns3_err(hw, "unknown opcode for get_mac_vlan_cmd_status, opcode=%u", + op); + + return -EINVAL; +} + +static int +hns3_lookup_mac_vlan_tbl(struct hns3_hw *hw, + struct hns3_mac_vlan_tbl_entry_cmd *req, + struct hns3_cmd_desc *desc, bool is_mc) +{ + uint8_t resp_code; + uint16_t retval; + int ret; + + hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + memcpy(desc[0].data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); + hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_MAC_VLAN_ADD, + true); + ret = hns3_cmd_send(hw, desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); + } else { + memcpy(desc[0].data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); + ret = hns3_cmd_send(hw, desc, 1); + } + if (ret) { + hns3_err(hw, "lookup mac addr failed for cmd_send, ret =%d.", + ret); + return ret; + } + resp_code = (rte_le_to_cpu_32(desc[0].data[0]) >> 8) & 0xff; + retval = rte_le_to_cpu_16(desc[0].retval); + + return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_LKUP); +} + +static int +hns3_add_mac_vlan_tbl(struct hns3_hw *hw, + struct hns3_mac_vlan_tbl_entry_cmd *req, + struct hns3_cmd_desc *mc_desc) +{ + uint8_t resp_code; + uint16_t retval; + int cfg_status; + int ret; + + if (mc_desc == NULL) { + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ADD, false); + memcpy(desc.data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); + ret = hns3_cmd_send(hw, &desc, 1); + resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; + retval = rte_le_to_cpu_16(desc.retval); + + cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_ADD); + } else { + hns3_cmd_reuse_desc(&mc_desc[0], false); + mc_desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_reuse_desc(&mc_desc[1], false); + mc_desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_reuse_desc(&mc_desc[2], false); + mc_desc[2].flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); + mc_desc[0].retval = 0; + ret = hns3_cmd_send(hw, mc_desc, HNS3_MC_MAC_VLAN_ADD_DESC_NUM); + resp_code = (rte_le_to_cpu_32(mc_desc[0].data[0]) >> 8) & 0xff; + retval = rte_le_to_cpu_16(mc_desc[0].retval); + + cfg_status = hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_ADD); + } + + if (ret) { + hns3_err(hw, "add mac addr failed for cmd_send, ret =%d", ret); + return ret; + } + + return cfg_status; +} + +static int +hns3_remove_mac_vlan_tbl(struct hns3_hw *hw, + struct hns3_mac_vlan_tbl_entry_cmd *req) +{ + struct hns3_cmd_desc desc; + uint8_t resp_code; + uint16_t retval; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hns3_mac_vlan_tbl_entry_cmd)); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "del mac addr failed for cmd_send, ret =%d", ret); + return ret; + } + resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; + retval = rte_le_to_cpu_16(desc.retval); + + return hns3_get_mac_vlan_cmd_status(hw, retval, resp_code, + HNS3_MAC_VLAN_REMOVE); +} + +static int +hns3_add_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mac_vlan_tbl_entry_cmd req; + struct hns3_pf *pf = &hns->pf; + struct hns3_cmd_desc desc; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + uint16_t egress_port = 0; + uint8_t vf_id; + int ret; + + /* check if mac addr is valid */ + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Add unicast mac addr err! addr(%s) invalid", + mac_str); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, the PF-related vf_id is 0, just need to configure parameters + * for vf_id 0. + */ + vf_id = 0; + hns3_set_field(egress_port, HNS3_MAC_EPORT_VFID_M, + HNS3_MAC_EPORT_VFID_S, vf_id); + + req.egress_port = rte_cpu_to_le_16(egress_port); + + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); + + /* + * Lookup the mac address in the mac_vlan table, and add + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ + ret = hns3_lookup_mac_vlan_tbl(hw, &req, &desc, false); + if (ret == -ENOENT) { + if (!hns3_is_umv_space_full(hw)) { + ret = hns3_add_mac_vlan_tbl(hw, &req, NULL); + if (!ret) + hns3_update_umv_space(hw, false); + return ret; + } + + hns3_err(hw, "UC MAC table full(%u)", pf->used_umv_size); + + return -ENOSPC; + } + + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); + + /* check if we just hit the duplicate */ + if (ret == 0) { + hns3_dbg(hw, "mac addr(%s) has been in the MAC table", mac_str); + return 0; + } + + hns3_err(hw, "PF failed to add unicast entry(%s) in the MAC table", + mac_str); + + return ret; +} + +static int +hns3_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + /* Check if there are duplicate addresses */ + if (rte_is_same_ether_addr(addr, mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to add mc mac addr, same addrs" + "(%s) is added by the set_mc_mac_addr_list " + "API", mac_str); + return -EINVAL; + } + } + + ret = hns3_add_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3_remove_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_remove_mc_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to remove mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t idx, __rte_unused uint32_t pool) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + rte_spinlock_lock(&hw->lock); + + /* + * In hns3 network engine adding UC and MC mac address with different + * commands with firmware. We need to determine whether the input + * address is a UC or a MC address to call different commands. + * By the way, it is recommended calling the API function named + * rte_eth_dev_set_mc_addr_list to set the MC mac address, because + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_add_mc_addr_common(hw, mac_addr); + else + ret = hns3_add_uc_addr_common(hw, mac_addr); + + if (ret) { + rte_spinlock_unlock(&hw->lock); + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, + ret); + return ret; + } + + if (idx == 0) + hw->mac.default_addr_setted = true; + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_remove_uc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + struct hns3_mac_vlan_tbl_entry_cmd req; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + /* check if mac addr is valid */ + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "remove unicast mac addr err! addr(%s) invalid", + mac_str); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, false); + ret = hns3_remove_mac_vlan_tbl(hw, &req); + if (ret == -ENOENT) /* mac addr isn't existent in the mac vlan table. */ + return 0; + else if (ret == 0) + hns3_update_umv_space(hw, true); + + return ret; +} + +static void +hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* index will be checked by upper level rte interface */ + struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + rte_spinlock_lock(&hw->lock); + + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3_remove_mc_addr_common(hw, mac_addr); + else + ret = hns3_remove_uc_addr_common(hw, mac_addr); + rte_spinlock_unlock(&hw->lock); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str, + ret); + } +} + +static int +hns3_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr *oaddr; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + bool default_addr_setted; + bool rm_succes = false; + int ret, ret_val; + + /* + * It has been guaranteed that input parameter named mac_addr is valid + * address in the rte layer of DPDK framework. + */ + oaddr = (struct rte_ether_addr *)hw->mac.mac_addr; + default_addr_setted = hw->mac.default_addr_setted; + if (default_addr_setted && !!rte_is_same_ether_addr(mac_addr, oaddr)) + return 0; + + rte_spinlock_lock(&hw->lock); + if (default_addr_setted) { + ret = hns3_remove_uc_addr_common(hw, oaddr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + oaddr); + hns3_warn(hw, "Remove old uc mac address(%s) fail: %d", + mac_str, ret); + rm_succes = false; + } else + rm_succes = true; + } + + ret = hns3_add_uc_addr_common(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to set mac addr(%s): %d", mac_str, ret); + goto err_add_uc_addr; + } + + ret = hns3_pause_addr_cfg(hw, mac_addr->addr_bytes); + if (ret) { + hns3_err(hw, "Failed to configure mac pause address: %d", ret); + goto err_pause_addr_cfg; + } + + rte_ether_addr_copy(mac_addr, + (struct rte_ether_addr *)hw->mac.mac_addr); + hw->mac.default_addr_setted = true; + rte_spinlock_unlock(&hw->lock); + + return 0; + +err_pause_addr_cfg: + ret_val = hns3_remove_uc_addr_common(hw, mac_addr); + if (ret_val) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_warn(hw, + "Failed to roll back to del setted mac addr(%s): %d", + mac_str, ret_val); + } + +err_add_uc_addr: + if (rm_succes) { + ret_val = hns3_add_uc_addr_common(hw, oaddr); + if (ret_val) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + oaddr); + hns3_warn(hw, + "Failed to restore old uc mac addr(%s): %d", + mac_str, ret_val); + hw->mac.default_addr_setted = false; + } + } + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; + struct rte_ether_addr *addr; + int err = 0; + int ret; + int i; + + for (i = 0; i < HNS3_UC_MACADDR_NUM; i++) { + addr = &hw->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; + if (rte_is_multicast_ether_addr(addr)) + ret = del ? hns3_remove_mc_addr(hw, addr) : + hns3_add_mc_addr(hw, addr); + else + ret = del ? hns3_remove_uc_addr_common(hw, addr) : + hns3_add_uc_addr_common(hw, addr); + + if (ret) { + err = ret; + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to %s mac addr(%s) index:%d " + "ret = %d.", del ? "remove" : "restore", + mac_str, i, ret); + } + } + return err; +} + +static void +hns3_update_desc_vfid(struct hns3_cmd_desc *desc, uint8_t vfid, bool clr) +{ +#define HNS3_VF_NUM_IN_FIRST_DESC 192 + uint8_t word_num; + uint8_t bit_num; + + if (vfid < HNS3_VF_NUM_IN_FIRST_DESC) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= + rte_cpu_to_le_32(~(1UL << bit_num)); + else + desc[1].data[word_num] |= + rte_cpu_to_le_32(1UL << bit_num); + } else { + word_num = (vfid - HNS3_VF_NUM_IN_FIRST_DESC) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= + rte_cpu_to_le_32(~(1UL << bit_num)); + else + desc[2].data[word_num] |= + rte_cpu_to_le_32(1UL << bit_num); + } +} + +static int +hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + struct hns3_mac_vlan_tbl_entry_cmd req; + struct hns3_cmd_desc desc[3]; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + uint8_t vf_id; + int ret; + + /* Check if mac addr is valid */ + if (!rte_is_multicast_ether_addr(mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr, addr(%s) invalid", + mac_str); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); + ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); + if (ret) { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + } + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, the PF-related vf_id is 0, just need to configure parameters + * for vf_id 0. + */ + vf_id = 0; + hns3_update_desc_vfid(desc, vf_id, false); + ret = hns3_add_mac_vlan_tbl(hw, &req, desc); + if (ret) { + if (ret == -ENOSPC) + hns3_err(hw, "mc mac vlan table is full"); + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr(%s): %d", mac_str, ret); + } + + return ret; +} + +static int +hns3_remove_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + struct hns3_mac_vlan_tbl_entry_cmd req; + struct hns3_cmd_desc desc[3]; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + uint8_t vf_id; + int ret; + + /* Check if mac addr is valid */ + if (!rte_is_multicast_ether_addr(mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to rm mc mac addr, addr(%s) invalid", + mac_str); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hns3_set_bit(req.entry_type, HNS3_MAC_VLAN_BIT0_EN_B, 0); + hns3_prepare_mac_addr(&req, mac_addr->addr_bytes, true); + ret = hns3_lookup_mac_vlan_tbl(hw, &req, desc, true); + if (ret == 0) { + /* + * This mac addr exist, remove this handle's VFID for it. + * In current version VF is not supported when PF is driven by + * DPDK driver, the PF-related vf_id is 0, just need to + * configure parameters for vf_id 0. + */ + vf_id = 0; + hns3_update_desc_vfid(desc, vf_id, true); + + /* All the vfid is zero, so need to delete this entry */ + ret = hns3_remove_mac_vlan_tbl(hw, &req); + } else if (ret == -ENOENT) { + /* This mac addr doesn't exist. */ + return 0; + } + + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to rm mc mac addr(%s): %d", mac_str, ret); + } + + return ret; +} + +static int +hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + uint32_t i; + uint32_t j; + + if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + "invalid. valid range: 0~%d", + nb_mc_addr, HNS3_MC_MACADDR_NUM); + return -EINVAL; + } + + /* Check if input mac addresses are valid */ + for (i = 0; i < nb_mc_addr; i++) { + addr = &mc_addr_set[i]; + if (!rte_is_multicast_ether_addr(addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, + "failed to set mc mac addr, addr(%s) invalid.", + mac_str); + return -EINVAL; + } + + /* Check if there are duplicate addresses */ + for (j = i + 1; j < nb_mc_addr; j++) { + if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. two same addrs(%s).", + mac_str); + return -EINVAL; + } + } + + /* + * Check if there are duplicate addresses between mac_addrs + * and mc_addr_set + */ + for (j = 0; j < HNS3_UC_MACADDR_NUM; j++) { + if (rte_is_same_ether_addr(addr, + &hw->data->mac_addrs[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. addrs(%s) has already " + "configured in mac_addr add API", + mac_str); + return -EINVAL; + } + } + } + + return 0; +} + +static void +hns3_set_mc_addr_calc_addr(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr_set, + int mc_addr_num, + struct rte_ether_addr *reserved_addr_list, + int *reserved_addr_num, + struct rte_ether_addr *add_addr_list, + int *add_addr_num, + struct rte_ether_addr *rm_addr_list, + int *rm_addr_num) +{ + struct rte_ether_addr *addr; + int current_addr_num; + int reserved_num = 0; + int add_num = 0; + int rm_num = 0; + int num; + int i; + int j; + bool same_addr; + + /* Calculate the mc mac address list that should be removed */ + current_addr_num = hw->mc_addrs_num; + for (i = 0; i < current_addr_num; i++) { + addr = &hw->mc_addrs[i]; + same_addr = false; + for (j = 0; j < mc_addr_num; j++) { + if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { + same_addr = true; + break; + } + } + + if (!same_addr) { + rte_ether_addr_copy(addr, &rm_addr_list[rm_num]); + rm_num++; + } else { + rte_ether_addr_copy(addr, + &reserved_addr_list[reserved_num]); + reserved_num++; + } + } + + /* Calculate the mc mac address list that should be added */ + for (i = 0; i < mc_addr_num; i++) { + addr = &mc_addr_set[i]; + same_addr = false; + for (j = 0; j < current_addr_num; j++) { + if (rte_is_same_ether_addr(addr, &hw->mc_addrs[j])) { + same_addr = true; + break; + } + } + + if (!same_addr) { + rte_ether_addr_copy(addr, &add_addr_list[add_num]); + add_num++; + } + } + + /* Reorder the mc mac address list maintained by driver */ + for (i = 0; i < reserved_num; i++) + rte_ether_addr_copy(&reserved_addr_list[i], &hw->mc_addrs[i]); + + for (i = 0; i < rm_num; i++) { + num = reserved_num + i; + rte_ether_addr_copy(&rm_addr_list[i], &hw->mc_addrs[num]); + } + + *reserved_addr_num = reserved_num; + *add_addr_num = add_num; + *rm_addr_num = rm_num; +} + +static int +hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr reserved_addr_list[HNS3_MC_MACADDR_NUM]; + struct rte_ether_addr add_addr_list[HNS3_MC_MACADDR_NUM]; + struct rte_ether_addr rm_addr_list[HNS3_MC_MACADDR_NUM]; + struct rte_ether_addr *addr; + int reserved_addr_num; + int add_addr_num; + int rm_addr_num; + int mc_addr_num; + int num; + int ret; + int i; + + /* Check if input parameters are valid */ + ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); + if (ret) + return ret; + + rte_spinlock_lock(&hw->lock); + + /* + * Calculate the mc mac address lists those should be removed and be + * added, Reorder the mc mac address list maintained by driver. + */ + mc_addr_num = (int)nb_mc_addr; + hns3_set_mc_addr_calc_addr(hw, mc_addr_set, mc_addr_num, + reserved_addr_list, &reserved_addr_num, + add_addr_list, &add_addr_num, + rm_addr_list, &rm_addr_num); + + /* Remove mc mac addresses */ + for (i = 0; i < rm_addr_num; i++) { + num = rm_addr_num - i - 1; + addr = &rm_addr_list[num]; + ret = hns3_remove_mc_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + hw->mc_addrs_num--; + } + + /* Add mc mac addresses */ + for (i = 0; i < add_addr_num; i++) { + addr = &add_addr_list[i]; + ret = hns3_add_mc_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + + num = reserved_addr_num + i; + rte_ether_addr_copy(addr, &hw->mc_addrs[num]); + hw->mc_addrs_num++; + } + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; + struct rte_ether_addr *addr; + int err = 0; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + if (!rte_is_multicast_ether_addr(addr)) + continue; + if (del) + ret = hns3_remove_mc_addr(hw, addr); + else + ret = hns3_add_mc_addr(hw, addr); + if (ret) { + err = ret; + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_dbg(hw, "%s mc mac addr: %s failed for pf: ret = %d", + del ? "Remove" : "Restore", mac_str, ret); + } + } + return err; +} + +static int +hns3_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct rte_eth_dcb_tx_conf *dcb_tx_conf; + uint8_t num_tc; + int max_tc = 0; + int i; + + dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; + + if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. " + "rx_mq_mode = %d", rx_mq_mode); + return -EINVAL; + } + + if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB || + tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { + hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB " + "is not supported. rx_mq_mode = %d, tx_mq_mode = %d", + rx_mq_mode, tx_mq_mode); + return -EINVAL; + } + + if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) { + if (dcb_rx_conf->nb_tcs > pf->tc_max) { + hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.", + dcb_rx_conf->nb_tcs, pf->tc_max); + return -EINVAL; + } + + if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS || + dcb_rx_conf->nb_tcs == HNS3_8_TCS)) { + hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, " + "nb_tcs(%d) != %d or %d in rx direction.", + dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS); + return -EINVAL; + } + + if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) { + hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)", + dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs); + return -EINVAL; + } + + for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { + if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { + hns3_err(hw, "dcb_tc[%d] = %d in rx direction, " + "is not equal to one in tx direction.", + i, dcb_rx_conf->dcb_tc[i]); + return -EINVAL; + } + if (dcb_rx_conf->dcb_tc[i] > max_tc) + max_tc = dcb_rx_conf->dcb_tc[i]; + } + + num_tc = max_tc + 1; + if (num_tc > dcb_rx_conf->nb_tcs) { + hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)", + num_tc, dcb_rx_conf->nb_tcs); + return -EINVAL; + } + } + + return 0; +} + +static int +hns3_check_dcb_cfg(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hns3_dev_dcb_supported(hw)) { + hns3_err(hw, "this port does not support dcb configurations."); + return -EOPNOTSUPP; + } + + if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) { + hns3_err(hw, "MAC pause enabled, cannot config dcb info."); + return -EOPNOTSUPP; + } + + /* Check multiple queue mode */ + return hns3_check_mq_mode(dev); +} + +static int +hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, + enum hns3_ring_type queue_type, uint16_t queue_id) +{ + struct hns3_cmd_desc desc; + struct hns3_ctrl_vector_chain_cmd *req = + (struct hns3_ctrl_vector_chain_cmd *)desc.data; + enum hns3_cmd_status status; + enum hns3_opcode_type op; + uint16_t tqp_type_and_id = 0; + const char *op_str; + uint16_t type; + uint16_t gl; + + op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + hns3_cmd_setup_basic_desc(&desc, op, false); + req->int_vector_id = vector_id; + + if (queue_type == HNS3_RING_TYPE_RX) + gl = HNS3_RING_GL_RX; + else + gl = HNS3_RING_GL_TX; + + type = queue_type; + + hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S, + type); + hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id); + hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S, + gl); + req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); + req->int_cause_num = 1; + op_str = mmap ? "Map" : "Unmap"; + status = hns3_cmd_send(hw, &desc, 1); + if (status) { + hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.", + op_str, queue_id, req->int_vector_id, status); + return status; + } + + return 0; +} + +static int +hns3_init_ring_with_vector(struct hns3_hw *hw) +{ + uint8_t vec; + int ret; + int i; + + /* + * In hns3 network engine, vector 0 is always the misc interrupt of this + * function, vector 1~N can be used respectively for the queues of the + * function. Tx and Rx queues with the same number share the interrupt + * vector. In the initialization clearing the all hardware mapping + * relationship configurations between queues and interrupt vectors is + * needed, so some error caused by the residual configurations, such as + * the unexpected Tx interrupt, can be avoid. Because of the hardware + * constraints in hns3 hardware engine, we have to implement clearing + * the mapping relationship configurations by binding all queues to the + * last interrupt vector and reserving the last interrupt vector. This + * method results in a decrease of the maximum queues when upper + * applications call the rte_eth_dev_configure API function to enable + * Rx interrupt. + */ + vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ + /* vec - 1: the last interrupt is reserved */ + hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1; + for (i = 0; i < hw->intr_tqps_num; i++) { + /* + * Set gap limiter and rate limiter configuration of queue's + * interrupt. + */ + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_TX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + + ret = hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, i); + if (ret) { + PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + } + + return 0; +} + +static int +hns3_dev_configure(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_conf *conf = &dev->data->dev_conf; + enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + struct rte_eth_rss_conf rss_conf; + uint16_t mtu; + int ret; + + /* + * Hardware does not support individually enable/disable/reset the Tx or + * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx + * and Rx queues at the same time. When the numbers of Tx queues + * allocated by upper applications are not equal to the numbers of Rx + * queues, driver needs to setup fake Tx or Rx queues to adjust numbers + * of Tx/Rx queues. otherwise, network engine can not work as usual. But + * these fake queues are imperceptible, and can not be used by upper + * applications. + */ + ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); + if (ret) { + hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret); + return ret; + } + + hw->adapter_state = HNS3_NIC_CONFIGURING; + if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + hns3_err(hw, "setting link speed/duplex not supported"); + ret = -EINVAL; + goto cfg_err; + } + + if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) { + ret = hns3_check_dcb_cfg(dev); + if (ret) + goto cfg_err; + } + + /* When RSS is not configured, redirect the packet queue 0 */ + if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { + rss_conf = conf->rx_adv_conf.rss_conf; + if (rss_conf.rss_key == NULL) { + rss_conf.rss_key = rss_cfg->key; + rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; + } + + ret = hns3_dev_rss_hash_update(dev, &rss_conf); + if (ret) + goto cfg_err; + } + + /* + * If jumbo frames are enabled, MTU needs to be refreshed + * according to the maximum RX packet length. + */ + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + /* + * Security of max_rx_pkt_len is guaranteed in dpdk frame. + * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it + * can safely assign to "uint16_t" type variable. + */ + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + ret = hns3_dev_mtu_set(dev, mtu); + if (ret) + goto cfg_err; + dev->data->mtu = mtu; + } + + ret = hns3_dev_configure_vlan(dev); + if (ret) + goto cfg_err; + + hw->adapter_state = HNS3_NIC_CONFIGURED; + + return 0; + +cfg_err: + (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); + hw->adapter_state = HNS3_NIC_INITIALIZED; + + return ret; +} + +static int +hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps) +{ + struct hns3_config_max_frm_size_cmd *req; + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hns3_config_max_frm_size_cmd *)desc.data; + req->max_frm_size = rte_cpu_to_le_16(new_mps); + req->min_frm_size = RTE_ETHER_MIN_LEN; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) +{ + int ret; + + ret = hns3_set_mac_mtu(hw, mps); + if (ret) { + hns3_err(hw, "Failed to set mtu, ret = %d", ret); + return ret; + } + + ret = hns3_buffer_alloc(hw); + if (ret) + hns3_err(hw, "Failed to allocate buffer, ret = %d", ret); + + return ret; +} + +static int +hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hns3_adapter *hns = dev->data->dev_private; + uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; + struct hns3_hw *hw = &hns->hw; + bool is_jumbo_frame; + int ret; + + if (dev->data->dev_started) { + hns3_err(hw, "Failed to set mtu, port %u must be stopped " + "before configuration", dev->data->port_id); + return -EBUSY; + } + + rte_spinlock_lock(&hw->lock); + is_jumbo_frame = frame_size > RTE_ETHER_MAX_LEN ? true : false; + frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN); + + /* + * Maximum value of frame_size is HNS3_MAX_FRAME_LEN, so it can safely + * assign to "uint16_t" type variable. + */ + ret = hns3_config_mtu(hw, (uint16_t)frame_size); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "Failed to set mtu, port %u mtu %u: %d", + dev->data->port_id, mtu, ret); + return ret; + } + hns->pf.mps = (uint16_t)frame_size; + if (is_jumbo_frame) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint16_t queue_num = hw->tqps_num; + + /* + * In interrupt mode, 'max_rx_queues' is set based on the number of + * MSI-X interrupt resources of the hardware. + */ + if (hw->data->dev_conf.intr_conf.rxq == 1) + queue_num = hw->intr_tqps_num; + + info->max_rx_queues = queue_num; + info->max_tx_queues = hw->tqps_num; + info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ + info->min_rx_bufsize = hw->rx_buf_len; + info->max_mac_addrs = HNS3_UC_MACADDR_NUM; + info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_SCTP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH); + info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + info->tx_queue_offload_capa); + + info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, + .nb_min = HNS3_MIN_RING_DESC, + .nb_align = HNS3_ALIGN_RING_DESC, + }; + + info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, + .nb_min = HNS3_MIN_RING_DESC, + .nb_align = HNS3_ALIGN_RING_DESC, + }; + + info->vmdq_queue_num = 0; + + info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->hash_key_size = HNS3_RSS_KEY_SIZE; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + + info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; + info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; + info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM; + info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; + info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; + + return 0; +} + +static int +hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, + size_t fw_size) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t version = hw->fw_version; + int ret; + + ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static int +hns3_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_mac *mac = &hw->mac; + struct rte_eth_link new_link; + + if (!hns3_is_reset_pending(hns)) { + hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); + } + + memset(&new_link, 0, sizeof(new_link)); + switch (mac->link_speed) { + case ETH_SPEED_NUM_10M: + case ETH_SPEED_NUM_100M: + case ETH_SPEED_NUM_1G: + case ETH_SPEED_NUM_10G: + case ETH_SPEED_NUM_25G: + case ETH_SPEED_NUM_40G: + case ETH_SPEED_NUM_50G: + case ETH_SPEED_NUM_100G: + new_link.link_speed = mac->link_speed; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + } + + new_link.link_duplex = mac->link_duplex; + new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + new_link.link_autoneg = + !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); + + return rte_eth_linkstatus_set(eth_dev, &new_link); +} + +static int +hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + + if (!(status->pf_state & HNS3_PF_STATE_DONE)) + return -EINVAL; + + pf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false; + + return 0; +} + +static int +hns3_query_function_status(struct hns3_hw *hw) +{ +#define HNS3_QUERY_MAX_CNT 10 +#define HNS3_QUERY_SLEEP_MSCOEND 1 + struct hns3_func_status_cmd *req; + struct hns3_cmd_desc desc; + int timeout = 0; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true); + req = (struct hns3_func_status_cmd *)desc.data; + + do { + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + PMD_INIT_LOG(ERR, "query function status failed %d", + ret); + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + + rte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND); + } while (timeout++ < HNS3_QUERY_MAX_CNT); + + return hns3_parse_func_status(hw, req); +} + +static int +hns3_query_pf_resource(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_pf_res_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + PMD_INIT_LOG(ERR, "query pf resource failed %d", ret); + return ret; + } + + req = (struct hns3_pf_res_cmd *)desc.data; + hw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num); + pf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S; + hw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); + pf->func_num = rte_le_to_cpu_16(req->pf_own_fun_number); + + if (req->tx_buf_size) + pf->tx_buf_size = + rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S; + else + pf->tx_buf_size = HNS3_DEFAULT_TX_BUF; + + pf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + pf->dv_buf_size = + rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S; + else + pf->dv_buf_size = HNS3_DEFAULT_DV; + + pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT); + + hw->num_msi = + hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number), + HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); + + return 0; +} + +static void +hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc) +{ + struct hns3_cfg_param_cmd *req; + uint64_t mac_addr_tmp_high; + uint64_t mac_addr_tmp; + uint32_t i; + + req = (struct hns3_cfg_param_cmd *)desc[0].data; + + /* get the configuration */ + cfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), + HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S); + cfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), + HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S); + cfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]), + HNS3_CFG_TQP_DESC_N_M, + HNS3_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]), + HNS3_CFG_PHY_ADDR_M, + HNS3_CFG_PHY_ADDR_S); + cfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]), + HNS3_CFG_MEDIA_TP_M, + HNS3_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]), + HNS3_CFG_RX_BUF_LEN_M, + HNS3_CFG_RX_BUF_LEN_S); + /* get mac address */ + mac_addr_tmp = rte_le_to_cpu_32(req->param[2]); + mac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]), + HNS3_CFG_MAC_ADDR_H_M, + HNS3_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]), + HNS3_CFG_DEFAULT_SPEED_M, + HNS3_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]), + HNS3_CFG_RSS_SIZE_M, + HNS3_CFG_RSS_SIZE_S); + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hns3_cfg_param_cmd *)desc[1].data; + cfg->numa_node_map = rte_le_to_cpu_32(req->param[0]); + + cfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]), + HNS3_CFG_SPEED_ABILITY_M, + HNS3_CFG_SPEED_ABILITY_S); + cfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]), + HNS3_CFG_UMV_TBL_SPACE_M, + HNS3_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF; +} + +/* hns3_get_board_cfg: query the static parameter from NCL_config file in flash + * @hw: pointer to struct hns3_hw + * @hcfg: the config structure to be getted + */ +static int +hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg) +{ + struct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM]; + struct hns3_cfg_param_cmd *req; + uint32_t offset; + uint32_t i; + int ret; + + for (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) { + offset = 0; + req = (struct hns3_cfg_param_cmd *)desc[i].data; + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM, + true); + hns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S, + i * HNS3_CFG_RD_LEN_BYTES); + /* Len should be divided by 4 when send to hardware */ + hns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S, + HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT); + req->offset = rte_cpu_to_le_32(offset); + } + + ret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM); + if (ret) { + PMD_INIT_LOG(ERR, "get config failed %d.", ret); + return ret; + } + + hns3_parse_cfg(hcfg, desc); + + return 0; +} + +static int +hns3_parse_speed(int speed_cmd, uint32_t *speed) +{ + switch (speed_cmd) { + case HNS3_CFG_SPEED_10M: + *speed = ETH_SPEED_NUM_10M; + break; + case HNS3_CFG_SPEED_100M: + *speed = ETH_SPEED_NUM_100M; + break; + case HNS3_CFG_SPEED_1G: + *speed = ETH_SPEED_NUM_1G; + break; + case HNS3_CFG_SPEED_10G: + *speed = ETH_SPEED_NUM_10G; + break; + case HNS3_CFG_SPEED_25G: + *speed = ETH_SPEED_NUM_25G; + break; + case HNS3_CFG_SPEED_40G: + *speed = ETH_SPEED_NUM_40G; + break; + case HNS3_CFG_SPEED_50G: + *speed = ETH_SPEED_NUM_50G; + break; + case HNS3_CFG_SPEED_100G: + *speed = ETH_SPEED_NUM_100G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +hns3_get_board_configuration(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_cfg cfg; + int ret; + + ret = hns3_get_board_cfg(hw, &cfg); + if (ret) { + PMD_INIT_LOG(ERR, "get board config failed %d", ret); + return ret; + } + + if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) { + PMD_INIT_LOG(ERR, "media type is copper, not supported."); + return -EOPNOTSUPP; + } + + hw->mac.media_type = cfg.media_type; + hw->rss_size_max = cfg.rss_size_max; + hw->rss_dis_flag = false; + hw->rx_buf_len = cfg.rx_buf_len; + memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); + hw->mac.phy_addr = cfg.phy_addr; + hw->mac.default_addr_setted = false; + hw->num_tx_desc = cfg.tqp_desc_num; + hw->num_rx_desc = cfg.tqp_desc_num; + hw->dcb_info.num_pg = 1; + hw->dcb_info.hw_pfc_map = 0; + + ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); + if (ret) { + PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d", + cfg.default_speed, ret); + return ret; + } + + pf->tc_max = cfg.tc_num; + if (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) { + PMD_INIT_LOG(WARNING, + "Get TC num(%u) from flash, set TC num to 1", + pf->tc_max); + pf->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hns3_dev_dcb_supported(hw)) { + pf->tc_max = 1; + pf->pfc_max = 0; + } else + pf->pfc_max = pf->tc_max; + + hw->dcb_info.num_tc = 1; + hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, + hw->tqps_num / hw->dcb_info.num_tc); + hns3_set_bit(hw->hw_tc_map, 0, 1); + pf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE; + + pf->wanted_umv_size = cfg.umv_space; + + return ret; +} + +static int +hns3_get_configuration(struct hns3_hw *hw) +{ + int ret; + + ret = hns3_query_function_status(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to query function status: %d.", ret); + return ret; + } + + /* Get pf resource */ + ret = hns3_query_pf_resource(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to query pf resource: %d", ret); + return ret; + } + + ret = hns3_get_board_configuration(hw); + if (ret) + PMD_INIT_LOG(ERR, "Failed to get board configuration: %d", ret); + + return ret; +} + +static int +hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid, + uint16_t tqp_vid, bool is_pf) +{ + struct hns3_tqp_map_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false); + + req = (struct hns3_tqp_map_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(tqp_pid); + req->tqp_vf = func_id; + req->tqp_flag = 1 << HNS3_TQP_MAP_EN_B; + if (!is_pf) + req->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B); + req->tqp_vid = rte_cpu_to_le_16(tqp_vid); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "TQP map failed %d", ret); + + return ret; +} + +static int +hns3_map_tqp(struct hns3_hw *hw) +{ + uint16_t tqps_num = hw->total_tqps_num; + uint16_t func_id; + uint16_t tqp_id; + bool is_pf; + int num; + int ret; + int i; + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, so we allocate tqps to PF as much as possible. + */ + tqp_id = 0; + num = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); + for (func_id = 0; func_id < num; func_id++) { + is_pf = func_id == 0 ? true : false; + for (i = 0; + i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) { + ret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i, + is_pf); + if (ret) + return ret; + } + } + + return 0; +} + +static int +hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) +{ + struct hns3_config_mac_speed_dup_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + req = (struct hns3_config_mac_speed_dup_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false); + + hns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0); + + switch (speed) { + case ETH_SPEED_NUM_10M: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M); + break; + case ETH_SPEED_NUM_100M: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M); + break; + case ETH_SPEED_NUM_1G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G); + break; + case ETH_SPEED_NUM_10G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G); + break; + case ETH_SPEED_NUM_25G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G); + break; + case ETH_SPEED_NUM_40G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G); + break; + case ETH_SPEED_NUM_50G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G); + break; + case ETH_SPEED_NUM_100G: + hns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M, + HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G); + break; + default: + PMD_INIT_LOG(ERR, "invalid speed (%u)", speed); + return -EINVAL; + } + + hns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "mac speed/duplex config cmd failed %d", ret); + + return ret; +} + +static int +hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_priv_buf *priv; + uint32_t i, total_size; + + total_size = pf->pkt_buf_size; + + /* alloc tx buffer for all enabled tc */ + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + + if (hw->hw_tc_map & BIT(i)) { + if (total_size < pf->tx_buf_size) + return -ENOMEM; + + priv->tx_buf_size = pf->tx_buf_size; + } else + priv->tx_buf_size = 0; + + total_size -= priv->tx_buf_size; + } + + return 0; +} + +static int +hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ +/* TX buffer size is unit by 128 byte */ +#define HNS3_BUF_SIZE_UNIT_SHIFT 7 +#define HNS3_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hns3_tx_buff_alloc_cmd *req; + struct hns3_cmd_desc desc; + uint32_t buf_size; + uint32_t i; + int ret; + + req = (struct hns3_tx_buff_alloc_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + buf_size = buf_alloc->priv_buf[i].tx_buf_size; + + buf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT; + req->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size | + HNS3_BUF_SIZE_UPDATE_EN_MSK); + } + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "tx buffer alloc cmd failed %d", ret); + + return ret; +} + +static int +hns3_get_tc_num(struct hns3_hw *hw) +{ + int cnt = 0; + uint8_t i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) + if (hw->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +static uint32_t +hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_priv_buf *priv; + uint32_t rx_priv = 0; + int i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static uint32_t +hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc) +{ + uint32_t total_tx_size = 0; + uint32_t i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) + total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; + + return total_tx_size; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int +hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_priv_buf *priv; + int cnt = 0; + uint8_t i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int +hns3_get_no_pfc_priv_num(struct hns3_hw *hw, + struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_priv_buf *priv; + int cnt = 0; + uint8_t i; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (hw->hw_tc_map & BIT(i) && + !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable) + cnt++; + } + + return cnt; +} + +static bool +hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, + uint32_t rx_all) +{ + uint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t shared_buf, aligned_mps; + uint32_t rx_priv; + uint8_t tc_num; + uint8_t i; + + tc_num = hns3_get_tc_num(hw); + aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); + + if (hns3_dev_dcb_supported(hw)) + shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + + pf->dv_buf_size; + else + shared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF + + pf->dv_buf_size; + + shared_buf_tc = tc_num * aligned_mps + aligned_mps; + shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc), + HNS3_BUF_SIZE_UNIT); + + rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc); + if (rx_all < rx_priv + shared_std) + return false; + + shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); + buf_alloc->s_buf.buf_size = shared_buf; + if (hns3_dev_dcb_supported(hw)) { + buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / HNS3_BUF_DIV_BY, + HNS3_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = + aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hns3_dev_dcb_supported(hw)) { + hi_thrd = shared_buf - pf->dv_buf_size; + + if (tc_num <= NEED_RESERVE_TC_NUM) + hi_thrd = hi_thrd * BUF_RESERVE_PERCENT + / BUF_MAX_PERCENT; + + if (tc_num) + hi_thrd = hi_thrd / tc_num; + + hi_thrd = max_t(uint32_t, hi_thrd, + HNS3_BUF_MUL_BY * aligned_mps); + hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY; + } else { + hi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; + } + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; + } + + return true; +} + +static bool +hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max, + struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_priv_buf *priv; + uint32_t aligned_mps; + uint32_t rx_all; + uint8_t i; + + rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); + aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hw->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + if (hw->dcb_info.hw_pfc_map & BIT(i)) { + priv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HNS3_BUF_SIZE_UNIT); + } else { + priv->wl.low = 0; + priv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) : + aligned_mps; + } + + priv->buf_size = priv->wl.high + pf->dv_buf_size; + } + + return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); +} + +static bool +hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw, + struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_priv_buf *priv; + int no_pfc_priv_num; + uint32_t rx_all; + uint8_t mask; + int i; + + rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); + no_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc); + + /* let the last to be cleared first */ + for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &buf_alloc->priv_buf[i]; + mask = BIT((uint8_t)i); + + if (hw->hw_tc_map & mask && + !(hw->dcb_info.hw_pfc_map & mask)) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || + no_pfc_priv_num == 0) + break; + } + + return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); +} + +static bool +hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, + struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_priv_buf *priv; + uint32_t rx_all; + int pfc_priv_num; + uint8_t mask; + int i; + + rx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); + pfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc); + + /* let the last to be cleared first */ + for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &buf_alloc->priv_buf[i]; + mask = BIT((uint8_t)i); + + if (hw->hw_tc_map & mask && + hw->dcb_info.hw_pfc_map & mask) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + if (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) || + pfc_priv_num == 0) + break; + } + + return hns3_is_rx_buf_ok(hw, buf_alloc, rx_all); +} + +static bool +hns3_only_alloc_priv_buff(struct hns3_hw *hw, + struct hns3_pkt_buf_alloc *buf_alloc) +{ +#define COMPENSATE_BUFFER 0x3C00 +#define COMPENSATE_HALF_MPS_NUM 5 +#define PRIV_WL_GAP 0x1800 + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t tc_num = hns3_get_tc_num(hw); + uint32_t half_mps = pf->mps >> 1; + struct hns3_priv_buf *priv; + uint32_t min_rx_priv; + uint32_t rx_priv; + uint8_t i; + + rx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc); + if (tc_num) + rx_priv = rx_priv / tc_num; + + if (tc_num <= NEED_RESERVE_TC_NUM) + rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; + + /* + * Minimum value of private buffer in rx direction (min_rx_priv) is + * equal to "DV + 2.5 * MPS + 15KB". Driver only allocates rx private + * buffer if rx_priv is greater than min_rx_priv. + */ + min_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER + + COMPENSATE_HALF_MPS_NUM * half_mps; + min_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT); + rx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT); + + if (rx_priv < min_rx_priv) + return false; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hw->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + priv->buf_size = rx_priv; + priv->wl.high = rx_priv - pf->dv_buf_size; + priv->wl.low = priv->wl.high - PRIV_WL_GAP; + } + + buf_alloc->s_buf.buf_size = 0; + + return true; +} + +/* + * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hw: pointer to struct hns3_hw + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate sucessful, negative: fail + */ +static int +hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hns3_dev_dcb_supported(hw)) { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + uint32_t rx_all = pf->pkt_buf_size; + + rx_all -= hns3_get_tx_buff_alloced(buf_alloc); + if (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + /* + * Try to allocate privated packet buffer for all TCs without share + * buffer. + */ + if (hns3_only_alloc_priv_buff(hw, buf_alloc)) + return 0; + + /* + * Try to allocate privated packet buffer for all TCs with share + * buffer. + */ + if (hns3_rx_buf_calc_all(hw, true, buf_alloc)) + return 0; + + /* + * For different application scenes, the enabled port number, TC number + * and no_drop TC number are different. In order to obtain the better + * performance, software could allocate the buffer size and configure + * the waterline by tring to decrease the private buffer size according + * to the order, namely, waterline of valided tc, pfc disabled tc, pfc + * enabled tc. + */ + if (hns3_rx_buf_calc_all(hw, false, buf_alloc)) + return 0; + + if (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc)) + return 0; + + if (hns3_drop_pfc_buf_till_fit(hw, buf_alloc)) + return 0; + + return -ENOMEM; +} + +static int +hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_rx_priv_buff_cmd *req; + struct hns3_cmd_desc desc; + uint32_t buf_size; + int ret; + int i; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hns3_rx_priv_buff_cmd *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + struct hns3_priv_buf *priv = &buf_alloc->priv_buf[i]; + + req->buf_num[i] = + rte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S); + req->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B); + } + + buf_size = buf_alloc->s_buf.buf_size; + req->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) | + (1 << HNS3_TC0_PRI_BUF_EN_B)); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "rx private buffer alloc cmd failed %d", ret); + + return ret; +} + +static int +hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ +#define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2 + struct hns3_rx_priv_wl_buf *req; + struct hns3_priv_buf *priv; + struct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM]; + int i, j; + int ret; + + for (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hns3_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + else + desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { + uint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j; + + priv = &buf_alloc->priv_buf[idx]; + req->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >> + HNS3_BUF_UNIT_S); + req->tc_wl[j].high |= + rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + req->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >> + HNS3_BUF_UNIT_S); + req->tc_wl[j].low |= + rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptor at one time */ + ret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM); + if (ret) + PMD_INIT_LOG(ERR, "rx private waterline config cmd failed %d", + ret); + return ret; +} + +static int +hns3_common_thrd_config(struct hns3_hw *hw, + struct hns3_pkt_buf_alloc *buf_alloc) +{ +#define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2 + struct hns3_shared_buf *s_buf = &buf_alloc->s_buf; + struct hns3_rx_com_thrd *req; + struct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM]; + struct hns3_tc_thrd *tc; + int tc_idx; + int i, j; + int ret; + + for (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC, + false); + req = (struct hns3_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + else + desc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + for (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) { + tc_idx = i * HNS3_TC_NUM_ONE_DESC + j; + tc = &s_buf->tc_thrd[tc_idx]; + + req->com_thrd[j].high = + rte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S); + req->com_thrd[j].high |= + rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + req->com_thrd[j].low = + rte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S); + req->com_thrd[j].low |= + rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptors at one time */ + ret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM); + if (ret) + PMD_INIT_LOG(ERR, "common threshold config cmd failed %d", ret); + + return ret; +} + +static int +hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) +{ + struct hns3_shared_buf *buf = &buf_alloc->s_buf; + struct hns3_rx_com_wl *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hns3_rx_com_wl *)desc.data; + req->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S); + req->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + + req->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S); + req->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B)); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "common waterline config cmd failed %d", ret); + + return ret; +} + +int +hns3_buffer_alloc(struct hns3_hw *hw) +{ + struct hns3_pkt_buf_alloc pkt_buf; + int ret; + + memset(&pkt_buf, 0, sizeof(pkt_buf)); + ret = hns3_tx_buffer_calc(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, + "could not calc tx buffer size for all TCs %d", + ret); + return ret; + } + + ret = hns3_tx_buffer_alloc(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, "could not alloc tx buffers %d", ret); + return ret; + } + + ret = hns3_rx_buffer_calc(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, + "could not calc rx priv buffer size for all TCs %d", + ret); + return ret; + } + + ret = hns3_rx_priv_buf_alloc(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, "could not alloc rx priv buffer %d", ret); + return ret; + } + + if (hns3_dev_dcb_supported(hw)) { + ret = hns3_rx_priv_wl_config(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, + "could not configure rx private waterline %d", + ret); + return ret; + } + + ret = hns3_common_thrd_config(hw, &pkt_buf); + if (ret) { + PMD_INIT_LOG(ERR, + "could not configure common threshold %d", + ret); + return ret; + } + } + + ret = hns3_common_wl_config(hw, &pkt_buf); + if (ret) + PMD_INIT_LOG(ERR, "could not configure common waterline %d", + ret); + + return ret; +} + +static int +hns3_mac_init(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mac *mac = &hw->mac; + struct hns3_pf *pf = &hns->pf; + int ret; + + pf->support_sfp_query = true; + mac->link_duplex = ETH_LINK_FULL_DUPLEX; + ret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex); + if (ret) { + PMD_INIT_LOG(ERR, "Config mac speed dup fail ret = %d", ret); + return ret; + } + + mac->link_status = ETH_LINK_DOWN; + + return hns3_config_mtu(hw, pf->mps); +} + +static int +hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) +{ +#define HNS3_ETHERTYPE_SUCCESS_ADD 0 +#define HNS3_ETHERTYPE_ALREADY_ADD 1 +#define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW 2 +#define HNS3_ETHERTYPE_KEY_CONFLICT 3 + int return_status; + + if (cmdq_resp) { + PMD_INIT_LOG(ERR, + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", + cmdq_resp); + return -EIO; + } + + switch (resp_code) { + case HNS3_ETHERTYPE_SUCCESS_ADD: + case HNS3_ETHERTYPE_ALREADY_ADD: + return_status = 0; + break; + case HNS3_ETHERTYPE_MGR_TBL_OVERFLOW: + PMD_INIT_LOG(ERR, + "add mac ethertype failed for manager table overflow."); + return_status = -EIO; + break; + case HNS3_ETHERTYPE_KEY_CONFLICT: + PMD_INIT_LOG(ERR, "add mac ethertype failed for key conflict."); + return_status = -EIO; + break; + default: + PMD_INIT_LOG(ERR, + "add mac ethertype failed for undefined, code=%d.", + resp_code); + return_status = -EIO; + break; + } + + return return_status; +} + +static int +hns3_add_mgr_tbl(struct hns3_hw *hw, + const struct hns3_mac_mgr_tbl_entry_cmd *req) +{ + struct hns3_cmd_desc desc; + uint8_t resp_code; + uint16_t retval; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false); + memcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd)); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + PMD_INIT_LOG(ERR, + "add mac ethertype failed for cmd_send, ret =%d.", + ret); + return ret; + } + + resp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff; + retval = rte_le_to_cpu_16(desc.retval); + + return hns3_get_mac_ethertype_cmd_status(retval, resp_code); +} + +static void +hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table, + int *table_item_num) +{ + struct hns3_mac_mgr_tbl_entry_cmd *tbl; + + /* + * In current version, we add one item in management table as below: + * 0x0180C200000E -- LLDP MC address + */ + tbl = mgr_table; + tbl->flags = HNS3_MAC_MGR_MASK_VLAN_B; + tbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP); + tbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200)); + tbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E)); + tbl->i_port_bitmap = 0x1; + *table_item_num = 1; +} + +static int +hns3_init_mgr_tbl(struct hns3_hw *hw) +{ +#define HNS_MAC_MGR_TBL_MAX_SIZE 16 + struct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE]; + int table_item_num; + int ret; + int i; + + memset(mgr_table, 0, sizeof(mgr_table)); + hns3_prepare_mgr_tbl(mgr_table, &table_item_num); + for (i = 0; i < table_item_num; i++) { + ret = hns3_add_mgr_tbl(hw, &mgr_table[i]); + if (ret) { + PMD_INIT_LOG(ERR, "add mac ethertype failed, ret =%d", + ret); + return ret; + } + } + + return 0; +} + +static void +hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id) +{ + if (!param) + return; + + memset(param, 0, sizeof(struct hns3_promisc_param)); + if (en_uc) + param->enable = HNS3_PROMISC_EN_UC; + if (en_mc) + param->enable |= HNS3_PROMISC_EN_MC; + if (en_bc) + param->enable |= HNS3_PROMISC_EN_BC; + param->vf_id = vport_id; +} + +static int +hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param) +{ + struct hns3_promisc_cfg_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false); + + req = (struct hns3_promisc_cfg_cmd *)desc.data; + req->vf_id = param->vf_id; + req->flag = (param->enable << HNS3_PROMISC_EN_B) | + HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "Set promisc mode fail, ret = %d", ret); + + return ret; +} + +static int +hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc) +{ + struct hns3_promisc_param param; + bool en_bc_pmc = true; + uint8_t vf_id; + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, the PF-related vf_id is 0, just need to configure parameters + * for vf_id 0. + */ + vf_id = 0; + + hns3_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id); + return hns3_cmd_set_promisc_mode(hw, ¶m); +} + +static int +hns3_clear_all_vfs_promisc_mode(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_pf *pf = &hns->pf; + struct hns3_promisc_param param; + uint16_t func_id; + int ret; + + /* func_id 0 is denoted PF, the VFs start from 1 */ + for (func_id = 1; func_id < pf->func_num; func_id++) { + hns3_promisc_param_init(¶m, false, false, false, func_id); + ret = hns3_cmd_set_promisc_mode(hw, ¶m); + if (ret) + return ret; + } + + return 0; +} + +static int +hns3_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + bool allmulti = dev->data->all_multicast ? true : false; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint64_t offloads; + int err; + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_set_promisc_mode(hw, true, true); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to enable promiscuous mode, ret = %d", + ret); + return ret; + } + + /* + * When promiscuous mode was enabled, disable the vlan filter to let + * all packets coming in in the receiving direction. + */ + offloads = dev->data->dev_conf.rxmode.offloads; + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + ret = hns3_enable_vlan_filter(hns, false); + if (ret) { + hns3_err(hw, "failed to enable promiscuous mode due to " + "failure to disable vlan filter, ret = %d", + ret); + err = hns3_set_promisc_mode(hw, false, allmulti); + if (err) + hns3_err(hw, "failed to restore promiscuous " + "status after disable vlan filter " + "failed during enabling promiscuous " + "mode, ret = %d", ret); + } + } + + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + bool allmulti = dev->data->all_multicast ? true : false; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint64_t offloads; + int err; + int ret; + + /* If now in all_multicast mode, must remain in all_multicast mode. */ + rte_spinlock_lock(&hw->lock); + ret = hns3_set_promisc_mode(hw, false, allmulti); + if (ret) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "failed to disable promiscuous mode, ret = %d", + ret); + return ret; + } + /* when promiscuous mode was disabled, restore the vlan filter status */ + offloads = dev->data->dev_conf.rxmode.offloads; + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + ret = hns3_enable_vlan_filter(hns, true); + if (ret) { + hns3_err(hw, "failed to disable promiscuous mode due to" + " failure to restore vlan filter, ret = %d", + ret); + err = hns3_set_promisc_mode(hw, true, true); + if (err) + hns3_err(hw, "failed to restore promiscuous " + "status after enabling vlan filter " + "failed during disabling promiscuous " + "mode, ret = %d", ret); + } + } + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (dev->data->promiscuous) + return 0; + + rte_spinlock_lock(&hw->lock); + ret = hns3_set_promisc_mode(hw, false, true); + rte_spinlock_unlock(&hw->lock); + if (ret) + hns3_err(hw, "failed to enable allmulticast mode, ret = %d", + ret); + + return ret; +} + +static int +hns3_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + /* If now in promiscuous mode, must remain in all_multicast mode. */ + if (dev->data->promiscuous) + return 0; + + rte_spinlock_lock(&hw->lock); + ret = hns3_set_promisc_mode(hw, false, false); + rte_spinlock_unlock(&hw->lock); + if (ret) + hns3_err(hw, "failed to disable allmulticast mode, ret = %d", + ret); + + return ret; +} + +static int +hns3_dev_promisc_restore(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool allmulti = hw->data->all_multicast ? true : false; + int ret; + + if (hw->data->promiscuous) { + ret = hns3_set_promisc_mode(hw, true, true); + if (ret) + hns3_err(hw, "failed to restore promiscuous mode, " + "ret = %d", ret); + return ret; + } + + ret = hns3_set_promisc_mode(hw, false, allmulti); + if (ret) + hns3_err(hw, "failed to restore allmulticast mode, ret = %d", + ret); + return ret; +} + +static int +hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed) +{ + struct hns3_sfp_speed_cmd *resp; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SFP_GET_SPEED, true); + resp = (struct hns3_sfp_speed_cmd *)desc.data; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + hns3_err(hw, "IMP do not support get SFP speed %d", ret); + return ret; + } else if (ret) { + hns3_err(hw, "get sfp speed failed %d", ret); + return ret; + } + + *speed = resp->sfp_speed; + + return 0; +} + +static uint8_t +hns3_check_speed_dup(uint8_t duplex, uint32_t speed) +{ + if (!(speed == ETH_SPEED_NUM_10M || speed == ETH_SPEED_NUM_100M)) + duplex = ETH_LINK_FULL_DUPLEX; + + return duplex; +} + +static int +hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) +{ + struct hns3_mac *mac = &hw->mac; + int ret; + + duplex = hns3_check_speed_dup(duplex, speed); + if (mac->link_speed == speed && mac->link_duplex == duplex) + return 0; + + ret = hns3_cfg_mac_speed_dup_hw(hw, speed, duplex); + if (ret) + return ret; + + mac->link_speed = speed; + mac->link_duplex = duplex; + + return 0; +} + +static int +hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_pf *pf = &hns->pf; + uint32_t speed; + int ret; + + /* If IMP do not support get SFP/qSFP speed, return directly */ + if (!pf->support_sfp_query) + return 0; + + ret = hns3_get_sfp_speed(hw, &speed); + if (ret == -EOPNOTSUPP) { + pf->support_sfp_query = false; + return ret; + } else if (ret) + return ret; + + if (speed == ETH_SPEED_NUM_NONE) + return 0; /* do nothing if no SFP */ + + /* Config full duplex for SFP */ + return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); +} + +static int +hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) +{ + struct hns3_config_mac_mode_cmd *req; + struct hns3_cmd_desc desc; + uint32_t loop_en = 0; + uint8_t val = 0; + int ret; + + req = (struct hns3_config_mac_mode_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAC_MODE, false); + if (enable) + val = 1; + hns3_set_bit(loop_en, HNS3_MAC_TX_EN_B, val); + hns3_set_bit(loop_en, HNS3_MAC_RX_EN_B, val); + hns3_set_bit(loop_en, HNS3_MAC_PAD_TX_B, val); + hns3_set_bit(loop_en, HNS3_MAC_PAD_RX_B, val); + hns3_set_bit(loop_en, HNS3_MAC_1588_TX_B, 0); + hns3_set_bit(loop_en, HNS3_MAC_1588_RX_B, 0); + hns3_set_bit(loop_en, HNS3_MAC_APP_LP_B, 0); + hns3_set_bit(loop_en, HNS3_MAC_LINE_LP_B, 0); + hns3_set_bit(loop_en, HNS3_MAC_FCS_TX_B, val); + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_B, val); + hns3_set_bit(loop_en, HNS3_MAC_RX_FCS_STRIP_B, val); + hns3_set_bit(loop_en, HNS3_MAC_TX_OVERSIZE_TRUNCATE_B, val); + hns3_set_bit(loop_en, HNS3_MAC_RX_OVERSIZE_TRUNCATE_B, val); + hns3_set_bit(loop_en, HNS3_MAC_TX_UNDER_MIN_ERR_B, val); + req->txrx_pad_fcs_loop_en = rte_cpu_to_le_32(loop_en); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + PMD_INIT_LOG(ERR, "mac enable fail, ret =%d.", ret); + + return ret; +} + +static int +hns3_get_mac_link_status(struct hns3_hw *hw) +{ + struct hns3_link_status_cmd *req; + struct hns3_cmd_desc desc; + int link_status; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_LINK_STATUS, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "get link status cmd failed %d", ret); + return ETH_LINK_DOWN; + } + + req = (struct hns3_link_status_cmd *)desc.data; + link_status = req->status & HNS3_LINK_STATUS_UP_M; + + return !!link_status; +} + +void +hns3_update_link_status(struct hns3_hw *hw) +{ + int state; + + state = hns3_get_mac_link_status(hw); + if (state != hw->mac.link_status) { + hw->mac.link_status = state; + hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); + } +} + +static void +hns3_service_handler(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + if (!hns3_is_reset_pending(hns)) { + hns3_update_speed_duplex(eth_dev); + hns3_update_link_status(hw); + } else + hns3_warn(hw, "Cancel the query when reset is pending"); + + rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); +} + +static int +hns3_init_hardware(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_map_tqp(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); + return ret; + } + + ret = hns3_init_umv_space(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init umv space: %d", ret); + return ret; + } + + ret = hns3_mac_init(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init MAC: %d", ret); + goto err_mac_init; + } + + ret = hns3_init_mgr_tbl(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init manager table: %d", ret); + goto err_mac_init; + } + + ret = hns3_set_promisc_mode(hw, false, false); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to set promisc mode: %d", ret); + goto err_mac_init; + } + + ret = hns3_clear_all_vfs_promisc_mode(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to clear all vfs promisc mode: %d", + ret); + goto err_mac_init; + } + + ret = hns3_init_vlan_config(hns); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init vlan: %d", ret); + goto err_mac_init; + } + + ret = hns3_dcb_init(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init dcb: %d", ret); + goto err_mac_init; + } + + ret = hns3_init_fd_config(hns); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret); + goto err_mac_init; + } + + ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret); + goto err_mac_init; + } + + ret = hns3_config_gro(hw, false); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); + goto err_mac_init; + } + + /* + * In the initialization clearing the all hardware mapping relationship + * configurations between queues and interrupt vectors is needed, so + * some error caused by the residual configurations, such as the + * unexpected interrupt, can be avoid. + */ + ret = hns3_init_ring_with_vector(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); + goto err_mac_init; + } + + return 0; + +err_mac_init: + hns3_uninit_umv_space(hw); + return ret; +} + +static int +hns3_init_pf(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); + goto err_cmd_init_queue; + } + + hns3_clear_all_event_cause(hw); + + /* Firmware command initialize */ + ret = hns3_cmd_init(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); + goto err_cmd_init; + } + + ret = rte_intr_callback_register(&pci_dev->intr_handle, + hns3_interrupt_handler, + eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); + goto err_intr_callback_register; + } + + /* Enable interrupt */ + rte_intr_enable(&pci_dev->intr_handle); + hns3_pf_enable_irq0(hw); + + /* Get configuration */ + ret = hns3_get_configuration(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); + goto err_get_config; + } + + ret = hns3_init_hardware(hns); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init hardware: %d", ret); + goto err_get_config; + } + + /* Initialize flow director filter list & hash */ + ret = hns3_fdir_filter_init(hns); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret); + goto err_hw_init; + } + + hns3_set_default_rss_args(hw); + + ret = hns3_enable_hw_error_intr(hns, true); + if (ret) { + PMD_INIT_LOG(ERR, "fail to enable hw error interrupts: %d", + ret); + goto err_fdir; + } + + return 0; + +err_fdir: + hns3_fdir_filter_uninit(hns); +err_hw_init: + hns3_uninit_umv_space(hw); + +err_get_config: + hns3_pf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, + eth_dev); +err_intr_callback_register: +err_cmd_init: + hns3_cmd_uninit(hw); + hns3_cmd_destroy_queue(hw); +err_cmd_init_queue: + hw->io_base = NULL; + + return ret; +} + +static void +hns3_uninit_pf(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct rte_device *dev = eth_dev->device; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + hns3_enable_hw_error_intr(hns, false); + hns3_rss_uninit(hns); + hns3_fdir_filter_uninit(hns); + hns3_uninit_umv_space(hw); + hns3_pf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, + eth_dev); + hns3_cmd_uninit(hw); + hns3_cmd_destroy_queue(hw); + hw->io_base = NULL; +} + +static int +hns3_do_start(struct hns3_adapter *hns, bool reset_queue) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_dcb_cfg_update(hns); + if (ret) + return ret; + + /* Enable queues */ + ret = hns3_start_queues(hns, reset_queue); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret); + return ret; + } + + /* Enable MAC */ + ret = hns3_cfg_mac_mode(hw, true); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret); + goto err_config_mac_mode; + } + return 0; + +err_config_mac_mode: + hns3_stop_queues(hns, true); + return ret; +} + +static int +hns3_map_rx_interrupt(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint32_t intr_vector; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) { + intr_vector = hw->used_rx_queues; + /* creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; + } + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + hw->used_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + hns3_err(hw, "Failed to allocate %d rx_queues" + " intr_vec", hw->used_rx_queues); + ret = -ENOMEM; + goto alloc_intr_vec_error; + } + } + + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, + q_id); + if (ret) + goto bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + rte_intr_enable(intr_handle); + return 0; + +bind_vector_error: + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + return ret; +alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); + return ret; +} + +static int +hns3_restore_rx_interrupt(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, + intr_handle->intr_vec[q_id], true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + return ret; + } + } + + return 0; +} + +static void +hns3_restore_filter(struct rte_eth_dev *dev) +{ + hns3_restore_rss_filter(dev); +} + +static int +hns3_dev_start(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + if (rte_atomic16_read(&hw->reset.resetting)) + return -EBUSY; + + rte_spinlock_lock(&hw->lock); + hw->adapter_state = HNS3_NIC_STARTING; + + ret = hns3_do_start(hns, true); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + ret = hns3_map_rx_interrupt(dev); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + + hw->adapter_state = HNS3_NIC_STARTED; + rte_spinlock_unlock(&hw->lock); + + hns3_set_rxtx_function(dev); + hns3_mp_req_start_rxtx(dev); + rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); + + hns3_restore_filter(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive/transmit + * packets. + */ + hns3_enable_all_queues(hw, true); + + hns3_info(hw, "hns3 dev start successful!"); + return 0; +} + +static int +hns3_do_stop(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool reset_queue; + int ret; + + ret = hns3_cfg_mac_mode(hw, false); + if (ret) + return ret; + hw->mac.link_status = ETH_LINK_DOWN; + + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + hns3_configure_all_mac_addr(hns, true); + reset_queue = true; + } else + reset_queue = false; + hw->mac.default_addr_setted = false; + return hns3_stop_queues(hns, reset_queue); +} + +static void +hns3_unmap_rx_interrupt(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t q_id; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return; + + /* unmap the ring with vector */ + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + (void)hns3_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, + q_id); + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +hns3_dev_stop(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_state = HNS3_NIC_STOPPING; + hns3_set_rxtx_function(dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(dev); + /* Prevent crashes when queues are still in use. */ + rte_delay_ms(hw->tqps_num); + + rte_spinlock_lock(&hw->lock); + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + hns3_do_stop(hns); + hns3_unmap_rx_interrupt(dev); + hns3_dev_release_mbufs(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + } + rte_eal_alarm_cancel(hns3_service_handler, dev); + rte_spinlock_unlock(&hw->lock); +} + +static void +hns3_dev_close(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return; + } + + if (hw->adapter_state == HNS3_NIC_STARTED) + hns3_dev_stop(eth_dev); + + hw->adapter_state = HNS3_NIC_CLOSING; + hns3_reset_abort(hns); + hw->adapter_state = HNS3_NIC_CLOSED; + + hns3_configure_all_mc_mac_addr(hns, true); + hns3_remove_all_vlan_table(hns); + hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0); + hns3_uninit_pf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + hns3_mp_uninit_primary(); + hns3_warn(hw, "Close port %d finished", hw->data->port_id); +} + +static int +hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + fc_conf->pause_time = pf->pause_time; + + /* return fc current mode */ + switch (hw->current_mode) { + case HNS3_FC_FULL: + fc_conf->mode = RTE_FC_FULL; + break; + case HNS3_FC_TX_PAUSE: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case HNS3_FC_RX_PAUSE: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case HNS3_FC_NONE: + default: + fc_conf->mode = RTE_FC_NONE; + break; + } + + return 0; +} + +static void +hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) +{ + switch (mode) { + case RTE_FC_NONE: + hw->requested_mode = HNS3_FC_NONE; + break; + case RTE_FC_RX_PAUSE: + hw->requested_mode = HNS3_FC_RX_PAUSE; + break; + case RTE_FC_TX_PAUSE: + hw->requested_mode = HNS3_FC_TX_PAUSE; + break; + case RTE_FC_FULL: + hw->requested_mode = HNS3_FC_FULL; + break; + default: + hw->requested_mode = HNS3_FC_NONE; + hns3_warn(hw, "fc_mode(%u) exceeds member scope and is " + "configured to RTE_FC_NONE", mode); + break; + } +} + +static int +hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret; + + if (fc_conf->high_water || fc_conf->low_water || + fc_conf->send_xon || fc_conf->mac_ctrl_frame_fwd) { + hns3_err(hw, "Unsupported flow control settings specified, " + "high_water(%u), low_water(%u), send_xon(%u) and " + "mac_ctrl_frame_fwd(%u) must be set to '0'", + fc_conf->high_water, fc_conf->low_water, + fc_conf->send_xon, fc_conf->mac_ctrl_frame_fwd); + return -EINVAL; + } + if (fc_conf->autoneg) { + hns3_err(hw, "Unsupported fc auto-negotiation setting."); + return -EINVAL; + } + if (!fc_conf->pause_time) { + hns3_err(hw, "Invalid pause time %d setting.", + fc_conf->pause_time); + return -EINVAL; + } + + if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || + hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)) { + hns3_err(hw, "PFC is enabled. Cannot set MAC pause. " + "current_fc_status = %d", hw->current_fc_status); + return -EOPNOTSUPP; + } + + hns3_get_fc_mode(hw, fc_conf->mode); + if (hw->requested_mode == hw->current_mode && + pf->pause_time == fc_conf->pause_time) + return 0; + + rte_spinlock_lock(&hw->lock); + ret = hns3_fc_enable(dev, fc_conf); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint8_t priority; + int ret; + + if (!hns3_dev_dcb_supported(hw)) { + hns3_err(hw, "This port does not support dcb configurations."); + return -EOPNOTSUPP; + } + + if (pfc_conf->fc.high_water || pfc_conf->fc.low_water || + pfc_conf->fc.send_xon || pfc_conf->fc.mac_ctrl_frame_fwd) { + hns3_err(hw, "Unsupported flow control settings specified, " + "high_water(%u), low_water(%u), send_xon(%u) and " + "mac_ctrl_frame_fwd(%u) must be set to '0'", + pfc_conf->fc.high_water, pfc_conf->fc.low_water, + pfc_conf->fc.send_xon, + pfc_conf->fc.mac_ctrl_frame_fwd); + return -EINVAL; + } + if (pfc_conf->fc.autoneg) { + hns3_err(hw, "Unsupported fc auto-negotiation setting."); + return -EINVAL; + } + if (pfc_conf->fc.pause_time == 0) { + hns3_err(hw, "Invalid pause time %d setting.", + pfc_conf->fc.pause_time); + return -EINVAL; + } + + if (!(hw->current_fc_status == HNS3_FC_STATUS_NONE || + hw->current_fc_status == HNS3_FC_STATUS_PFC)) { + hns3_err(hw, "MAC pause is enabled. Cannot set PFC." + "current_fc_status = %d", hw->current_fc_status); + return -EOPNOTSUPP; + } + + priority = pfc_conf->priority; + hns3_get_fc_mode(hw, pfc_conf->fc.mode); + if (hw->dcb_info.pfc_en & BIT(priority) && + hw->requested_mode == hw->current_mode && + pfc_conf->fc.pause_time == pf->pause_time) + return 0; + + rte_spinlock_lock(&hw->lock); + ret = hns3_dcb_pfc_enable(dev, pfc_conf); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; + int i; + + rte_spinlock_lock(&hw->lock); + if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = pf->local_max_tc; + else + dcb_info->nb_tcs = 1; + + for (i = 0; i < HNS3_MAX_USER_PRIO; i++) + dcb_info->prio_tc[i] = hw->dcb_info.prio_tc[i]; + for (i = 0; i < dcb_info->nb_tcs; i++) + dcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i]; + + for (i = 0; i < hw->num_tc; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i; + dcb_info->tc_queue.tc_txq[0][i].base = + hw->tc_queue[i].tqp_offset; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size; + dcb_info->tc_queue.tc_txq[0][i].nb_queue = + hw->tc_queue[i].tqp_count; + } + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_reinit_dev(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_cmd_init(hw); + if (ret) { + hns3_err(hw, "Failed to init cmd: %d", ret); + return ret; + } + + ret = hns3_reset_all_queues(hns); + if (ret) { + hns3_err(hw, "Failed to reset all queues: %d", ret); + return ret; + } + + ret = hns3_init_hardware(hns); + if (ret) { + hns3_err(hw, "Failed to init hardware: %d", ret); + return ret; + } + + ret = hns3_enable_hw_error_intr(hns, true); + if (ret) { + hns3_err(hw, "fail to enable hw error interrupts: %d", + ret); + return ret; + } + hns3_info(hw, "Reset done, driver initialization finished."); + + return 0; +} + +static bool +is_pf_reset_done(struct hns3_hw *hw) +{ + uint32_t val, reg, reg_bit; + + switch (hw->reset.level) { + case HNS3_IMP_RESET: + reg = HNS3_GLOBAL_RESET_REG; + reg_bit = HNS3_IMP_RESET_BIT; + break; + case HNS3_GLOBAL_RESET: + reg = HNS3_GLOBAL_RESET_REG; + reg_bit = HNS3_GLOBAL_RESET_BIT; + break; + case HNS3_FUNC_RESET: + reg = HNS3_FUN_RST_ING; + reg_bit = HNS3_FUN_RST_ING_B; + break; + case HNS3_FLR_RESET: + default: + hns3_err(hw, "Wait for unsupported reset level: %d", + hw->reset.level); + return true; + } + val = hns3_read_dev(hw, reg); + if (hns3_get_bit(val, reg_bit)) + return false; + else + return true; +} + +bool +hns3_is_reset_pending(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset; + + hns3_check_event_cause(hns, NULL); + reset = hns3_get_reset_level(hns, &hw->reset.pending); + if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + hns3_warn(hw, "High level reset %d is pending", reset); + return true; + } + reset = hns3_get_reset_level(hns, &hw->reset.request); + if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + hns3_warn(hw, "High level reset %d is request", reset); + return true; + } + return false; +} + +static int +hns3_wait_hardware_ready(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_wait_data *wait_data = hw->reset.wait_data; + struct timeval tv; + + if (wait_data->result == HNS3_WAIT_SUCCESS) + return 0; + else if (wait_data->result == HNS3_WAIT_TIMEOUT) { + gettimeofday(&tv, NULL); + hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + return -ETIME; + } else if (wait_data->result == HNS3_WAIT_REQUEST) + return -EAGAIN; + + wait_data->hns = hns; + wait_data->check_completion = is_pf_reset_done; + wait_data->end_ms = (uint64_t)HNS3_RESET_WAIT_CNT * + HNS3_RESET_WAIT_MS + get_timeofday_ms(); + wait_data->interval = HNS3_RESET_WAIT_MS * USEC_PER_MSEC; + wait_data->count = HNS3_RESET_WAIT_CNT; + wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); + return -EAGAIN; +} + +static int +hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) +{ + struct hns3_cmd_desc desc; + struct hns3_reset_cmd *req = (struct hns3_reset_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false); + hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_FUNC_B, 1); + req->fun_reset_vfid = func_id; + + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_imp_reset_cmd(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + + hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); + desc.data[0] = 0xeedd; + + return hns3_cmd_send(hw, &desc, 1); +} + +static void +hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) +{ + struct hns3_hw *hw = &hns->hw; + struct timeval tv; + uint32_t val; + + gettimeofday(&tv, NULL); + if (hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG) || + hns3_read_dev(hw, HNS3_FUN_RST_ING)) { + hns3_warn(hw, "Don't process msix during resetting time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + return; + } + + switch (reset_level) { + case HNS3_IMP_RESET: + hns3_imp_reset_cmd(hw); + hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; + case HNS3_GLOBAL_RESET: + val = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG); + hns3_set_bit(val, HNS3_GLOBAL_RESET_BIT, 1); + hns3_write_dev(hw, HNS3_GLOBAL_RESET_REG, val); + hns3_warn(hw, "Global Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; + case HNS3_FUNC_RESET: + hns3_warn(hw, "PF Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + /* schedule again to check later */ + hns3_atomic_set_bit(HNS3_FUNC_RESET, &hw->reset.pending); + hns3_schedule_reset(hns); + break; + default: + hns3_warn(hw, "Unsupported reset level: %d", reset_level); + return; + } + hns3_atomic_clear_bit(reset_level, &hw->reset.request); +} + +static enum hns3_reset_level +hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset_level = HNS3_NONE_RESET; + + /* Return the highest priority reset level amongst all */ + if (hns3_atomic_test_bit(HNS3_IMP_RESET, levels)) + reset_level = HNS3_IMP_RESET; + else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels)) + reset_level = HNS3_GLOBAL_RESET; + else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels)) + reset_level = HNS3_FUNC_RESET; + else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) + reset_level = HNS3_FLR_RESET; + + if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) + return HNS3_NONE_RESET; + + return reset_level; +} + +static int +hns3_prepare_reset(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint32_t reg_val; + int ret; + + switch (hw->reset.level) { + case HNS3_FUNC_RESET: + ret = hns3_func_reset_cmd(hw, 0); + if (ret) + return ret; + + /* + * After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hns3_cmd_init is called. + */ + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hw->reset.stats.request_cnt++; + break; + case HNS3_IMP_RESET: + reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); + hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val | + BIT(HNS3_VECTOR0_IMP_RESET_INT_B)); + break; + default: + break; + } + return 0; +} + +static int +hns3_set_rst_done(struct hns3_hw *hw) +{ + struct hns3_pf_rst_done_cmd *req; + struct hns3_cmd_desc desc; + + req = (struct hns3_pf_rst_done_cmd *)desc.data; + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT; + return hns3_cmd_send(hw, &desc, 1); +} + +static int +hns3_stop_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + if (hw->adapter_state == HNS3_NIC_STARTED) + rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hw->mac.link_status = ETH_LINK_DOWN; + + hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + rte_delay_ms(hw->tqps_num); + + rte_spinlock_lock(&hw->lock); + if (hns->hw.adapter_state == HNS3_NIC_STARTED || + hw->adapter_state == HNS3_NIC_STOPPING) { + hns3_do_stop(hns); + hw->reset.mbuf_deferred_free = true; + } else + hw->reset.mbuf_deferred_free = false; + + /* + * It is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + hns3_configure_all_mc_mac_addr(hns, true); + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_start_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + if (hw->reset.level == HNS3_IMP_RESET || + hw->reset.level == HNS3_GLOBAL_RESET) + hns3_set_rst_done(hw); + eth_dev = &rte_eth_devices[hw->data->port_id]; + hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + hns3_service_handler(eth_dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive + * and transmit packets. + */ + hns3_enable_all_queues(hw, true); + } + + return 0; +} + +static int +hns3_restore_conf(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3_configure_all_mac_addr(hns, false); + if (ret) + return ret; + + ret = hns3_configure_all_mc_mac_addr(hns, false); + if (ret) + goto err_mc_mac; + + ret = hns3_dev_promisc_restore(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_vlan_table(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_vlan_conf(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_all_fdir_filter(hns); + if (ret) + goto err_promisc; + + ret = hns3_restore_rx_interrupt(hw); + if (ret) + goto err_promisc; + + if (hns->hw.adapter_state == HNS3_NIC_STARTED) { + ret = hns3_do_start(hns, false); + if (ret) + goto err_promisc; + hns3_info(hw, "hns3 dev restart successful!"); + } else if (hw->adapter_state == HNS3_NIC_STOPPING) + hw->adapter_state = HNS3_NIC_CONFIGURED; + return 0; + +err_promisc: + hns3_configure_all_mc_mac_addr(hns, true); +err_mc_mac: + hns3_configure_all_mac_addr(hns, true); + return ret; +} + +static void +hns3_reset_service(void *param) +{ + struct hns3_adapter *hns = (struct hns3_adapter *)param; + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset_level; + struct timeval tv_delta; + struct timeval tv_start; + struct timeval tv; + uint64_t msec; + int ret; + + /* + * The interrupt is not triggered within the delay time. + * The interrupt may have been lost. It is necessary to handle + * the interrupt to recover from the error. + */ + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + hns3_err(hw, "Handling interrupts in delayed tasks"); + hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); + reset_level = hns3_get_reset_level(hns, &hw->reset.pending); + if (reset_level == HNS3_NONE_RESET) { + hns3_err(hw, "No reset level is set, try IMP reset"); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + } + } + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + + /* + * Check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + reset_level = hns3_get_reset_level(hns, &hw->reset.pending); + if (reset_level != HNS3_NONE_RESET) { + gettimeofday(&tv_start, NULL); + ret = hns3_reset_process(hns, reset_level); + gettimeofday(&tv, NULL); + timersub(&tv, &tv_start, &tv_delta); + msec = tv_delta.tv_sec * MSEC_PER_SEC + + tv_delta.tv_usec / USEC_PER_MSEC; + if (msec > HNS3_RESET_PROCESS_MS) + hns3_err(hw, "%d handle long time delta %" PRIx64 + " ms time=%ld.%.6ld", + hw->reset.level, msec, + tv.tv_sec, tv.tv_usec); + if (ret == -EAGAIN) + return; + } + + /* Check if we got any *new* reset requests to be honored */ + reset_level = hns3_get_reset_level(hns, &hw->reset.request); + if (reset_level != HNS3_NONE_RESET) + hns3_msix_process(hns, reset_level); +} + +static const struct eth_dev_ops hns3_eth_dev_ops = { + .dev_start = hns3_dev_start, + .dev_stop = hns3_dev_stop, + .dev_close = hns3_dev_close, + .promiscuous_enable = hns3_dev_promiscuous_enable, + .promiscuous_disable = hns3_dev_promiscuous_disable, + .allmulticast_enable = hns3_dev_allmulticast_enable, + .allmulticast_disable = hns3_dev_allmulticast_disable, + .mtu_set = hns3_dev_mtu_set, + .stats_get = hns3_stats_get, + .stats_reset = hns3_stats_reset, + .xstats_get = hns3_dev_xstats_get, + .xstats_get_names = hns3_dev_xstats_get_names, + .xstats_reset = hns3_dev_xstats_reset, + .xstats_get_by_id = hns3_dev_xstats_get_by_id, + .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, + .dev_infos_get = hns3_dev_infos_get, + .fw_version_get = hns3_fw_version_get, + .rx_queue_setup = hns3_rx_queue_setup, + .tx_queue_setup = hns3_tx_queue_setup, + .rx_queue_release = hns3_dev_rx_queue_release, + .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, + .dev_configure = hns3_dev_configure, + .flow_ctrl_get = hns3_flow_ctrl_get, + .flow_ctrl_set = hns3_flow_ctrl_set, + .priority_flow_ctrl_set = hns3_priority_flow_ctrl_set, + .mac_addr_add = hns3_add_mac_addr, + .mac_addr_remove = hns3_remove_mac_addr, + .mac_addr_set = hns3_set_default_mac_addr, + .set_mc_addr_list = hns3_set_mc_mac_addr_list, + .link_update = hns3_dev_link_update, + .rss_hash_update = hns3_dev_rss_hash_update, + .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, + .reta_update = hns3_dev_rss_reta_update, + .reta_query = hns3_dev_rss_reta_query, + .filter_ctrl = hns3_dev_filter_ctrl, + .vlan_filter_set = hns3_vlan_filter_set, + .vlan_tpid_set = hns3_vlan_tpid_set, + .vlan_offload_set = hns3_vlan_offload_set, + .vlan_pvid_set = hns3_vlan_pvid_set, + .get_reg = hns3_get_regs, + .get_dcb_info = hns3_get_dcb_info, + .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, +}; + +static const struct hns3_reset_ops hns3_reset_ops = { + .reset_service = hns3_reset_service, + .stop_service = hns3_stop_service, + .prepare_reset = hns3_prepare_reset, + .wait_hardware_ready = hns3_wait_hardware_ready, + .reinit_dev = hns3_reinit_dev, + .restore_conf = hns3_restore_conf, + .start_service = hns3_start_service, +}; + +static int +hns3_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint16_t device_id = pci_dev->id.device_id; + uint8_t revision; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); + if (ret != HNS3_PCI_REVISION_ID_LEN) { + PMD_INIT_LOG(ERR, "Failed to read pci revision id, ret = %d", + ret); + return -EIO; + } + hw->revision = revision; + + eth_dev->process_private = (struct hns3_process_private *) + rte_zmalloc_socket("hns3_filter_list", + sizeof(struct hns3_process_private), + RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); + if (eth_dev->process_private == NULL) { + PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); + return -ENOMEM; + } + /* initialize flow filter lists */ + hns3_filterlist_init(eth_dev); + + hns3_set_rxtx_function(eth_dev); + eth_dev->dev_ops = &hns3_eth_dev_ops; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + hns3_mp_init_secondary(); + hw->secondary_cnt++; + return 0; + } + + hns3_mp_init_primary(); + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC) + hns3_set_bit(hw->flag, HNS3_DEV_SUPPORT_DCB_B, 1); + + hns->is_vf = false; + hw->data = eth_dev->data; + + /* + * Set default max packet size according to the mtu + * default vale in DPDK frame. + */ + hns->pf.mps = hw->data->mtu + HNS3_ETH_OVERHEAD; + + ret = hns3_reset_init(hw); + if (ret) + goto err_init_reset; + hw->reset.ops = &hns3_reset_ops; + + ret = hns3_init_pf(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret); + goto err_init_pf; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("hns3-mac", + sizeof(struct rte_ether_addr) * + HNS3_UC_MACADDR_NUM, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " + "to store MAC addresses", + sizeof(struct rte_ether_addr) * + HNS3_UC_MACADDR_NUM); + ret = -ENOMEM; + goto err_rte_zmalloc; + } + + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, + ð_dev->data->mac_addrs[0]); + + hw->adapter_state = HNS3_NIC_INITIALIZED; + /* + * Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + hns3_err(hw, "Reschedule reset service after dev_init"); + hns3_schedule_reset(hns); + } else { + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + } + + hns3_info(hw, "hns3 dev initialization successful!"); + return 0; + +err_rte_zmalloc: + hns3_uninit_pf(eth_dev); + +err_init_pf: + rte_free(hw->reset.wait_data); +err_init_reset: + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return ret; +} + +static int +hns3_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3_dev_close(eth_dev); + + hw->adapter_state = HNS3_NIC_REMOVED; + return 0; +} + +static int +eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct hns3_adapter), + hns3_dev_init); +} + +static int +eth_hns3_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, hns3_dev_uninit); +} + +static const struct rte_pci_id pci_id_hns3_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct rte_pci_driver rte_hns3_pmd = { + .id_table = pci_id_hns3_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_hns3_pci_probe, + .remove = eth_hns3_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map); +RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci"); + +RTE_INIT(hns3_init_log) +{ + hns3_logtype_init = rte_log_register("pmd.net.hns3.init"); + if (hns3_logtype_init >= 0) + rte_log_set_level(hns3_logtype_init, RTE_LOG_NOTICE); + hns3_logtype_driver = rte_log_register("pmd.net.hns3.driver"); + if (hns3_logtype_driver >= 0) + rte_log_set_level(hns3_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.h b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.h new file mode 100644 index 000000000..06a186451 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev.h @@ -0,0 +1,670 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_ETHDEV_H_ +#define _HNS3_ETHDEV_H_ + +#include +#include + +#include "hns3_cmd.h" +#include "hns3_mbx.h" +#include "hns3_rss.h" +#include "hns3_fdir.h" +#include "hns3_stats.h" + +/* Vendor ID */ +#define PCI_VENDOR_ID_HUAWEI 0x19e5 + +/* Device IDs */ +#define HNS3_DEV_ID_GE 0xA220 +#define HNS3_DEV_ID_25GE 0xA221 +#define HNS3_DEV_ID_25GE_RDMA 0xA222 +#define HNS3_DEV_ID_50GE_RDMA 0xA224 +#define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNS3_DEV_ID_100G_VF 0xA22E +#define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F + +/* PCI Config offsets */ +#define HNS3_PCI_REVISION_ID 0x08 +#define HNS3_PCI_REVISION_ID_LEN 1 + +#define HNS3_UC_MACADDR_NUM 128 +#define HNS3_VF_UC_MACADDR_NUM 48 +#define HNS3_MC_MACADDR_NUM 128 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_NON_TSO_BD_PER_PKT 8 +#define HNS3_MAX_TSO_BD_PER_PKT 63 +#define HNS3_MAX_FRAME_LEN 9728 +#define HNS3_VLAN_TAG_SIZE 4 +#define HNS3_DEFAULT_RX_BUF_LEN 2048 +#define HNS3_MAX_BD_PAYLEN (1024 * 1024 - 1) +#define HNS3_MAX_TSO_HDR_SIZE 512 +#define HNS3_MAX_TSO_HDR_BD_NUM 3 + +#define HNS3_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + HNS3_VLAN_TAG_SIZE * 2) +#define HNS3_PKTLEN_TO_MTU(pktlen) ((pktlen) - HNS3_ETH_OVERHEAD) +#define HNS3_MAX_MTU (HNS3_MAX_FRAME_LEN - HNS3_ETH_OVERHEAD) +#define HNS3_DEFAULT_MTU 1500UL +#define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) +#define HNS3_MIN_PKT_SIZE 60 + +#define HNS3_4_TCS 4 +#define HNS3_8_TCS 8 + +#define HNS3_MAX_PF_NUM 8 +#define HNS3_UMV_TBL_SIZE 3072 +#define HNS3_DEFAULT_UMV_SPACE_PER_PF \ + (HNS3_UMV_TBL_SIZE / HNS3_MAX_PF_NUM) + +#define HNS3_PF_CFG_BLOCK_SIZE 32 +#define HNS3_PF_CFG_DESC_NUM \ + (HNS3_PF_CFG_BLOCK_SIZE / HNS3_CFG_RD_LEN_BYTES) + +#define HNS3_DEFAULT_ENABLE_PFC_NUM 0 + +#define HNS3_INTR_UNREG_FAIL_RETRY_CNT 5 +#define HNS3_INTR_UNREG_FAIL_DELAY_MS 500 + +#define HNS3_QUIT_RESET_CNT 10 +#define HNS3_QUIT_RESET_DELAY_MS 100 + +#define HNS3_POLL_RESPONE_MS 1 + +#define HNS3_MAX_USER_PRIO 8 +#define HNS3_PG_NUM 4 +enum hns3_fc_mode { + HNS3_FC_NONE, + HNS3_FC_RX_PAUSE, + HNS3_FC_TX_PAUSE, + HNS3_FC_FULL, + HNS3_FC_DEFAULT +}; + +#define HNS3_SCH_MODE_SP 0 +#define HNS3_SCH_MODE_DWRR 1 +struct hns3_pg_info { + uint8_t pg_id; + uint8_t pg_sch_mode; /* 0: sp; 1: dwrr */ + uint8_t tc_bit_map; + uint32_t bw_limit; + uint8_t tc_dwrr[HNS3_MAX_TC_NUM]; +}; + +struct hns3_tc_info { + uint8_t tc_id; + uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ + uint8_t pgid; + uint32_t bw_limit; + uint8_t up_to_tc_map; /* user priority maping on the TC */ +}; + +struct hns3_dcb_info { + uint8_t num_tc; + uint8_t num_pg; /* It must be 1 if vNET-Base schd */ + uint8_t pg_dwrr[HNS3_PG_NUM]; + uint8_t prio_tc[HNS3_MAX_USER_PRIO]; + struct hns3_pg_info pg_info[HNS3_PG_NUM]; + struct hns3_tc_info tc_info[HNS3_MAX_TC_NUM]; + uint8_t hw_pfc_map; /* Allow for packet drop or not on this TC */ + uint8_t pfc_en; /* Pfc enabled or not for user priority */ +}; + +enum hns3_fc_status { + HNS3_FC_STATUS_NONE, + HNS3_FC_STATUS_MAC_PAUSE, + HNS3_FC_STATUS_PFC, +}; + +struct hns3_tc_queue_info { + uint8_t tqp_offset; /* TQP offset from base TQP */ + uint8_t tqp_count; /* Total TQPs */ + uint8_t tc; /* TC index */ + bool enable; /* If this TC is enable or not */ +}; + +struct hns3_cfg { + uint8_t vmdq_vport_num; + uint8_t tc_num; + uint16_t tqp_desc_num; + uint16_t rx_buf_len; + uint16_t rss_size_max; + uint8_t phy_addr; + uint8_t media_type; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint8_t default_speed; + uint32_t numa_node_map; + uint8_t speed_ability; + uint16_t umv_space; +}; + +/* mac media type */ +enum hns3_media_type { + HNS3_MEDIA_TYPE_UNKNOWN, + HNS3_MEDIA_TYPE_FIBER, + HNS3_MEDIA_TYPE_COPPER, + HNS3_MEDIA_TYPE_BACKPLANE, + HNS3_MEDIA_TYPE_NONE, +}; + +struct hns3_mac { + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + bool default_addr_setted; /* whether default addr(mac_addr) is setted */ + uint8_t media_type; + uint8_t phy_addr; + uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */ + uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */ + uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */ + uint32_t link_speed; /* ETH_SPEED_NUM_ */ +}; + +struct hns3_fake_queue_data { + void **rx_queues; /* Array of pointers to fake RX queues. */ + void **tx_queues; /* Array of pointers to fake TX queues. */ + uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */ + uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */ +}; + +/* Primary process maintains driver state in main thread. + * + * +---------------+ + * | UNINITIALIZED |<-----------+ + * +---------------+ | + * |.eth_dev_init |.eth_dev_uninit + * V | + * +---------------+------------+ + * | INITIALIZED | + * +---------------+<-----------<---------------+ + * |.dev_configure | | + * V |failed | + * +---------------+------------+ | + * | CONFIGURING | | + * +---------------+----+ | + * |success | | + * | | +---------------+ + * | | | CLOSING | + * | | +---------------+ + * | | ^ + * V |.dev_configure | + * +---------------+----+ |.dev_close + * | CONFIGURED |----------------------------+ + * +---------------+<-----------+ + * |.dev_start | + * V | + * +---------------+ | + * | STARTING |------------^ + * +---------------+ failed | + * |success | + * | +---------------+ + * | | STOPPING | + * | +---------------+ + * | ^ + * V |.dev_stop + * +---------------+------------+ + * | STARTED | + * +---------------+ + */ +enum hns3_adapter_state { + HNS3_NIC_UNINITIALIZED = 0, + HNS3_NIC_INITIALIZED, + HNS3_NIC_CONFIGURING, + HNS3_NIC_CONFIGURED, + HNS3_NIC_STARTING, + HNS3_NIC_STARTED, + HNS3_NIC_STOPPING, + HNS3_NIC_CLOSING, + HNS3_NIC_CLOSED, + HNS3_NIC_REMOVED, + HNS3_NIC_NSTATES +}; + +/* Reset various stages, execute in order */ +enum hns3_reset_stage { + /* Stop query services, stop transceiver, disable MAC */ + RESET_STAGE_DOWN, + /* Clear reset completion flags, disable send command */ + RESET_STAGE_PREWAIT, + /* Inform IMP to start resetting */ + RESET_STAGE_REQ_HW_RESET, + /* Waiting for hardware reset to complete */ + RESET_STAGE_WAIT, + /* Reinitialize hardware */ + RESET_STAGE_DEV_INIT, + /* Restore user settings and enable MAC */ + RESET_STAGE_RESTORE, + /* Restart query services, start transceiver */ + RESET_STAGE_DONE, + /* Not in reset state */ + RESET_STAGE_NONE, +}; + +enum hns3_reset_level { + HNS3_NONE_RESET, + HNS3_VF_FUNC_RESET, /* A VF function reset */ + /* + * All VFs under a PF perform function reset. + * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value + * of the reset level and the one defined in kernel driver should be + * same. + */ + HNS3_VF_PF_FUNC_RESET = 2, + /* + * All VFs under a PF perform FLR reset. + * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value + * of the reset level and the one defined in kernel driver should be + * same. + */ + HNS3_VF_FULL_RESET = 3, + HNS3_FLR_RESET, /* A VF perform FLR reset */ + /* All VFs under the rootport perform a global or IMP reset */ + HNS3_VF_RESET, + HNS3_FUNC_RESET, /* A PF function reset */ + /* All PFs under the rootport perform a global reset */ + HNS3_GLOBAL_RESET, + HNS3_IMP_RESET, /* All PFs under the rootport perform a IMP reset */ + HNS3_MAX_RESET +}; + +enum hns3_wait_result { + HNS3_WAIT_UNKNOWN, + HNS3_WAIT_REQUEST, + HNS3_WAIT_SUCCESS, + HNS3_WAIT_TIMEOUT +}; + +#define HNS3_RESET_SYNC_US 100000 + +struct hns3_reset_stats { + uint64_t request_cnt; /* Total request reset times */ + uint64_t global_cnt; /* Total GLOBAL reset times */ + uint64_t imp_cnt; /* Total IMP reset times */ + uint64_t exec_cnt; /* Total reset executive times */ + uint64_t success_cnt; /* Total reset successful times */ + uint64_t fail_cnt; /* Total reset failed times */ + uint64_t merge_cnt; /* Total merged in high reset times */ +}; + +typedef bool (*check_completion_func)(struct hns3_hw *hw); + +struct hns3_wait_data { + void *hns; + uint64_t end_ms; + uint64_t interval; + int16_t count; + enum hns3_wait_result result; + check_completion_func check_completion; +}; + +struct hns3_reset_ops { + void (*reset_service)(void *arg); + int (*stop_service)(struct hns3_adapter *hns); + int (*prepare_reset)(struct hns3_adapter *hns); + int (*wait_hardware_ready)(struct hns3_adapter *hns); + int (*reinit_dev)(struct hns3_adapter *hns); + int (*restore_conf)(struct hns3_adapter *hns); + int (*start_service)(struct hns3_adapter *hns); +}; + +enum hns3_schedule { + SCHEDULE_NONE, + SCHEDULE_PENDING, + SCHEDULE_REQUESTED, + SCHEDULE_DEFERRED, +}; + +struct hns3_reset_data { + enum hns3_reset_stage stage; + rte_atomic16_t schedule; + /* Reset flag, covering the entire reset process */ + rte_atomic16_t resetting; + /* Used to disable sending cmds during reset */ + rte_atomic16_t disable_cmd; + /* The reset level being processed */ + enum hns3_reset_level level; + /* Reset level set, each bit represents a reset level */ + uint64_t pending; + /* Request reset level set, from interrupt or mailbox */ + uint64_t request; + int attempts; /* Reset failure retry */ + int retries; /* Timeout failure retry in reset_post */ + /* + * At the time of global or IMP reset, the command cannot be sent to + * stop the tx/rx queues. Tx/Rx queues may be access mbuf during the + * reset process, so the mbuf is required to be released after the reset + * is completed.The mbuf_deferred_free is used to mark whether mbuf + * needs to be released. + */ + bool mbuf_deferred_free; + struct timeval start_time; + struct hns3_reset_stats stats; + const struct hns3_reset_ops *ops; + struct hns3_wait_data *wait_data; +}; + +struct hns3_hw { + struct rte_eth_dev_data *data; + void *io_base; + uint8_t revision; /* PCI revision, low byte of class word */ + struct hns3_cmq cmq; + struct hns3_mbx_resp_status mbx_resp; /* mailbox response */ + struct hns3_mbx_arq_ring arq; /* mailbox async rx queue */ + pthread_t irq_thread_id; + struct hns3_mac mac; + unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + struct hns3_tqp_stats tqp_stats; + /* Include Mac stats | Rx stats | Tx stats */ + struct hns3_mac_stats mac_stats; + uint32_t fw_version; + + uint16_t num_msi; + uint16_t total_tqps_num; /* total task queue pairs of this PF */ + uint16_t tqps_num; /* num task queue pairs of this function */ + uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */ + uint16_t rss_size_max; /* HW defined max RSS task queue */ + uint16_t rx_buf_len; + uint16_t num_tx_desc; /* desc num of per tx queue */ + uint16_t num_rx_desc; /* desc num of per rx queue */ + + struct rte_ether_addr mc_addrs[HNS3_MC_MACADDR_NUM]; + int mc_addrs_num; /* Multicast mac addresses number */ + + /* The configuration info of RSS */ + struct hns3_rss_conf rss_info; + bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */ + + uint8_t num_tc; /* Total number of enabled TCs */ + uint8_t hw_tc_map; + enum hns3_fc_mode current_mode; + enum hns3_fc_mode requested_mode; + struct hns3_dcb_info dcb_info; + enum hns3_fc_status current_fc_status; /* current flow control status */ + struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM]; + uint16_t used_rx_queues; + uint16_t used_tx_queues; + + /* Config max queue numbers between rx and tx queues from user */ + uint16_t cfg_max_queues; + struct hns3_fake_queue_data fkq_data; /* fake queue data */ + uint16_t alloc_rss_size; /* RX queue number per TC */ + uint16_t tx_qnum_per_tc; /* TX queue number per TC */ + + uint32_t flag; + /* + * PMD setup and configuration is not thread safe. Since it is not + * performance sensitive, it is better to guarantee thread-safety + * and add device level lock. Adapter control operations which + * change its state should acquire the lock. + */ + rte_spinlock_t lock; + enum hns3_adapter_state adapter_state; + struct hns3_reset_data reset; +}; + +#define HNS3_FLAG_TC_BASE_SCH_MODE 1 +#define HNS3_FLAG_VNET_BASE_SCH_MODE 2 + +struct hns3_err_msix_intr_stats { + uint64_t mac_afifo_tnl_intr_cnt; + uint64_t ppu_mpf_abnormal_intr_st2_cnt; + uint64_t ssu_port_based_pf_intr_cnt; + uint64_t ppp_pf_abnormal_intr_cnt; + uint64_t ppu_pf_abnormal_intr_cnt; +}; + +/* vlan entry information. */ +struct hns3_user_vlan_table { + LIST_ENTRY(hns3_user_vlan_table) next; + bool hd_tbl_status; + uint16_t vlan_id; +}; + +struct hns3_port_base_vlan_config { + uint16_t state; + uint16_t pvid; +}; + +/* Vlan tag configuration for RX direction */ +struct hns3_rx_vtag_cfg { + uint8_t rx_vlan_offload_en; /* Whether enable rx vlan offload */ + uint8_t strip_tag1_en; /* Whether strip inner vlan tag */ + uint8_t strip_tag2_en; /* Whether strip outer vlan tag */ + uint8_t vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + uint8_t vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ +}; + +/* Vlan tag configuration for TX direction */ +struct hns3_tx_vtag_cfg { + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; + bool insert_tag1_en; /* Whether insert inner vlan tag */ + bool insert_tag2_en; /* Whether insert outer vlan tag */ + uint16_t default_tag1; /* The default inner vlan tag to insert */ + uint16_t default_tag2; /* The default outer vlan tag to insert */ +}; + +struct hns3_vtag_cfg { + struct hns3_rx_vtag_cfg rx_vcfg; + struct hns3_tx_vtag_cfg tx_vcfg; +}; + +/* Request types for IPC. */ +enum hns3_mp_req_type { + HNS3_MP_REQ_START_RXTX = 1, + HNS3_MP_REQ_STOP_RXTX, + HNS3_MP_REQ_MAX +}; + +/* Pameters for IPC. */ +struct hns3_mp_param { + enum hns3_mp_req_type type; + int port_id; + int result; +}; + +/* Request timeout for IPC. */ +#define HNS3_MP_REQ_TIMEOUT_SEC 5 + +/* Key string for IPC. */ +#define HNS3_MP_NAME "net_hns3_mp" + +struct hns3_pf { + struct hns3_adapter *adapter; + bool is_main_pf; + uint16_t func_num; /* num functions of this pf, include pf and vfs */ + + uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */ + uint32_t tx_buf_size; /* Tx buffer size for each TC */ + uint32_t dv_buf_size; /* Dv buffer size for each TC */ + + uint16_t mps; /* Max packet size */ + + uint8_t tx_sch_mode; + uint8_t tc_max; /* max number of tc driver supported */ + uint8_t local_max_tc; /* max number of local tc */ + uint8_t pfc_max; + uint8_t prio_tc[HNS3_MAX_USER_PRIO]; /* TC indexed by prio */ + uint16_t pause_time; + bool support_fc_autoneg; /* support FC autonegotiate */ + + uint16_t wanted_umv_size; + uint16_t max_umv_size; + uint16_t used_umv_size; + + /* Statistics information for abnormal interrupt */ + struct hns3_err_msix_intr_stats abn_int_stats; + + bool support_sfp_query; + + struct hns3_vtag_cfg vtag_config; + struct hns3_port_base_vlan_config port_base_vlan_cfg; + LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list; + + struct hns3_fdir_info fdir; /* flow director info */ + LIST_HEAD(counters, hns3_flow_counter) flow_counters; +}; + +struct hns3_vf { + struct hns3_adapter *adapter; +}; + +struct hns3_adapter { + struct hns3_hw hw; + + /* Specific for PF or VF */ + bool is_vf; /* false - PF, true - VF */ + union { + struct hns3_pf pf; + struct hns3_vf vf; + }; +}; + +#define HNS3_DEV_SUPPORT_DCB_B 0x0 + +#define hns3_dev_dcb_supported(hw) \ + hns3_get_bit((hw)->flag, HNS3_DEV_SUPPORT_DCB_B) + +#define HNS3_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct hns3_adapter *)adapter)->hw) +#define HNS3_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct hns3_adapter *)adapter) +#define HNS3_DEV_PRIVATE_TO_PF(adapter) \ + (&((struct hns3_adapter *)adapter)->pf) +#define HNS3VF_DEV_PRIVATE_TO_VF(adapter) \ + (&((struct hns3_adapter *)adapter)->vf) +#define HNS3_DEV_HW_TO_ADAPTER(hw) \ + container_of(hw, struct hns3_adapter, hw) + +#define hns3_set_field(origin, mask, shift, val) \ + do { \ + (origin) &= (~(mask)); \ + (origin) |= ((val) << (shift)) & (mask); \ + } while (0) +#define hns3_get_field(origin, mask, shift) \ + (((origin) & (mask)) >> (shift)) +#define hns3_set_bit(origin, shift, val) \ + hns3_set_field((origin), (0x1UL << (shift)), (shift), (val)) +#define hns3_get_bit(origin, shift) \ + hns3_get_field((origin), (0x1UL << (shift)), (shift)) + +/* + * upper_32_bits - return bits 32-63 of a number + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) + +/* lower_32_bits - return bits 0-31 of a number */ +#define lower_32_bits(n) ((uint32_t)(n)) + +#define BIT(nr) (1UL << (nr)) + +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define rounddown(x, y) ((x) - ((x) % (y))) + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value) +{ + rte_write32(value, (volatile void *)((char *)base + reg)); +} + +static inline uint32_t hns3_read_reg(void *base, uint32_t reg) +{ + return rte_read32((volatile void *)((char *)base + reg)); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (index)++; \ + act = actions + index; \ + } \ + } while (0) + +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L + +static inline uint64_t +get_timeofday_ms(void) +{ + struct timeval tv; + + (void)gettimeofday(&tv, NULL); + + return (uint64_t)tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC; +} + +static inline uint64_t +hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) +{ + uint64_t res; + + res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0; + return res; +} + +static inline void +hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr) +{ + __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED); +} + +static inline void +hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr) +{ + __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); +} + +static inline int64_t +hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) +{ + uint64_t mask = (1UL << nr); + + return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; +} + +int hns3_buffer_alloc(struct hns3_hw *hw); +int hns3_config_gro(struct hns3_hw *hw, bool en); +int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); +bool hns3_is_reset_pending(struct hns3_adapter *hns); +bool hns3vf_is_reset_pending(struct hns3_adapter *hns); +void hns3_update_link_status(struct hns3_hw *hw); + +static inline bool +is_reset_pending(struct hns3_adapter *hns) +{ + bool ret; + if (hns->is_vf) + ret = hns3vf_is_reset_pending(hns); + else + ret = hns3_is_reset_pending(hns); + return ret; +} + +#endif /* _HNS3_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev_vf.c new file mode 100644 index 000000000..904562e03 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_ethdev_vf.c @@ -0,0 +1,2572 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" +#include "hns3_rxtx.h" +#include "hns3_regs.h" +#include "hns3_intr.h" +#include "hns3_dcb.h" +#include "hns3_mp.h" + +#define HNS3VF_KEEP_ALIVE_INTERVAL 2000000 /* us */ +#define HNS3VF_SERVICE_INTERVAL 1000000 /* us */ + +#define HNS3VF_RESET_WAIT_MS 20 +#define HNS3VF_RESET_WAIT_CNT 2000 + +/* Reset related Registers */ +#define HNS3_GLOBAL_RESET_BIT 0 +#define HNS3_CORE_RESET_BIT 1 +#define HNS3_IMP_RESET_BIT 2 +#define HNS3_FUN_RST_ING_B 0 + +enum hns3vf_evt_cause { + HNS3VF_VECTOR0_EVENT_RST, + HNS3VF_VECTOR0_EVENT_MBX, + HNS3VF_VECTOR0_EVENT_OTHER, +}; + +static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw, + uint64_t *levels); +static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev); + +static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr); +/* set PCI bus mastering */ +static void +hns3vf_set_bus_master(const struct rte_pci_device *device, bool op) +{ + uint16_t reg; + + rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND); + + if (op) + /* set the master bit */ + reg |= PCI_COMMAND_MASTER; + else + reg &= ~(PCI_COMMAND_MASTER); + + rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND); +} + +/** + * hns3vf_find_pci_capability - lookup a capability in the PCI capability list + * @cap: the capability + * + * Return the address of the given capability within the PCI capability list. + */ +static int +hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap) +{ +#define MAX_PCIE_CAPABILITY 48 + uint16_t status; + uint8_t pos; + uint8_t id; + int ttl; + + rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + + ttl = MAX_PCIE_CAPABILITY; + rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST); + while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) { + rte_pci_read_config(device, &id, sizeof(id), + (pos + PCI_CAP_LIST_ID)); + + if (id == 0xFF) + break; + + if (id == cap) + return (int)pos; + + rte_pci_read_config(device, &pos, sizeof(pos), + (pos + PCI_CAP_LIST_NEXT)); + } + return 0; +} + +static int +hns3vf_enable_msix(const struct rte_pci_device *device, bool op) +{ + uint16_t control; + int pos; + + pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX); + if (pos) { + rte_pci_read_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + if (op) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control &= ~PCI_MSIX_FLAGS_ENABLE; + rte_pci_write_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + return 0; + } + return -ENXIO; +} + +static int +hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, + RTE_ETHER_ADDR_LEN, false, NULL, 0); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_REMOVE, + mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, + false, NULL, 0); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add uc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3vf_add_mc_addr_common(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + /* Check if there are duplicate addresses */ + if (rte_is_same_ether_addr(addr, mac_addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to add mc mac addr, same addrs" + "(%s) is added by the set_mc_mac_addr_list " + "API", mac_str); + return -EINVAL; + } + } + + ret = hns3vf_add_mc_mac_addr(hw, mac_addr); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mc mac addr(%s), ret = %d", + mac_str, ret); + } + return ret; +} + +static int +hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t idx, + __rte_unused uint32_t pool) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + rte_spinlock_lock(&hw->lock); + + /* + * In hns3 network engine adding UC and MC mac address with different + * commands with firmware. We need to determine whether the input + * address is a UC or a MC address to call different commands. + * By the way, it is recommended calling the API function named + * rte_eth_dev_set_mc_addr_list to set the MC mac address, because + * using the rte_eth_dev_mac_addr_add API function to set MC mac address + * may affect the specifications of UC mac addresses. + */ + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3vf_add_mc_addr_common(hw, mac_addr); + else + ret = hns3vf_add_uc_mac_addr(hw, mac_addr); + + rte_spinlock_unlock(&hw->lock); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str, + ret); + } + + return ret; +} + +static void +hns3vf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* index will be checked by upper level rte interface */ + struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx]; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + rte_spinlock_lock(&hw->lock); + + if (rte_is_multicast_ether_addr(mac_addr)) + ret = hns3vf_remove_mc_mac_addr(hw, mac_addr); + else + ret = hns3vf_remove_uc_mac_addr(hw, mac_addr); + + rte_spinlock_unlock(&hw->lock); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "failed to remove mac addr(%s), ret = %d", + mac_str, ret); + } +} + +static int +hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ +#define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2) + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr *old_addr; + uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + /* + * It has been guaranteed that input parameter named mac_addr is valid + * address in the rte layer of DPDK framework. + */ + old_addr = (struct rte_ether_addr *)hw->mac.mac_addr; + rte_spinlock_lock(&hw->lock); + memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); + memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, + RTE_ETHER_ADDR_LEN); + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, + HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); + if (ret) { + /* + * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev + * driver. When user has configured a MAC address for VF device + * by "ip link set ..." command based on the PF device, the hns3 + * PF kernel ethdev driver does not allow VF driver to request + * reconfiguring a different default MAC address, and return + * -EPREM to VF driver through mailbox. + */ + if (ret == -EPERM) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + old_addr); + hns3_warn(hw, "Has permanet mac addr(%s) for vf", + mac_str); + } else { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", + mac_str, ret); + } + } + + rte_ether_addr_copy(mac_addr, + (struct rte_ether_addr *)hw->mac.mac_addr); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3vf_configure_mac_addr(struct hns3_adapter *hns, bool del) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_ether_addr *addr; + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int err = 0; + int ret; + int i; + + for (i = 0; i < HNS3_VF_UC_MACADDR_NUM; i++) { + addr = &hw->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; + if (rte_is_multicast_ether_addr(addr)) + ret = del ? hns3vf_remove_mc_mac_addr(hw, addr) : + hns3vf_add_mc_mac_addr(hw, addr); + else + ret = del ? hns3vf_remove_uc_mac_addr(hw, addr) : + hns3vf_add_uc_mac_addr(hw, addr); + + if (ret) { + err = ret; + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to %s mac addr(%s) index:%d " + "ret = %d.", del ? "remove" : "restore", + mac_str, i, ret); + } + } + return err; +} + +static int +hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, + HNS3_MBX_MAC_VLAN_MC_ADD, + mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, + NULL, 0); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d", + mac_str, ret); + } + + return ret; +} + +static int +hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, + HNS3_MBX_MAC_VLAN_MC_REMOVE, + mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, + NULL, 0); + if (ret) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d", + mac_str, ret); + } + + return ret; +} + +static int +hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + uint32_t i; + uint32_t j; + + if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + "invalid. valid range: 0~%d", + nb_mc_addr, HNS3_MC_MACADDR_NUM); + return -EINVAL; + } + + /* Check if input mac addresses are valid */ + for (i = 0; i < nb_mc_addr; i++) { + addr = &mc_addr_set[i]; + if (!rte_is_multicast_ether_addr(addr)) { + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, + "failed to set mc mac addr, addr(%s) invalid.", + mac_str); + return -EINVAL; + } + + /* Check if there are duplicate addresses */ + for (j = i + 1; j < nb_mc_addr; j++) { + if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. two same addrs(%s).", + mac_str); + return -EINVAL; + } + } + + /* + * Check if there are duplicate addresses between mac_addrs + * and mc_addr_set + */ + for (j = 0; j < HNS3_VF_UC_MACADDR_NUM; j++) { + if (rte_is_same_ether_addr(addr, + &hw->data->mac_addrs[j])) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "failed to set mc mac addr, " + "addrs invalid. addrs(%s) has already " + "configured in mac_addr add API", + mac_str); + return -EINVAL; + } + } + } + + return 0; +} + +static int +hns3vf_set_mc_mac_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr *addr; + int cur_addr_num; + int set_addr_num; + int num; + int ret; + int i; + + ret = hns3vf_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); + if (ret) + return ret; + + rte_spinlock_lock(&hw->lock); + cur_addr_num = hw->mc_addrs_num; + for (i = 0; i < cur_addr_num; i++) { + num = cur_addr_num - i - 1; + addr = &hw->mc_addrs[num]; + ret = hns3vf_remove_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + + hw->mc_addrs_num--; + } + + set_addr_num = (int)nb_mc_addr; + for (i = 0; i < set_addr_num; i++) { + addr = &mc_addr_set[i]; + ret = hns3vf_add_mc_mac_addr(hw, addr); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + + rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]); + hw->mc_addrs_num++; + } + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3vf_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_hw *hw = &hns->hw; + struct rte_ether_addr *addr; + int err = 0; + int ret; + int i; + + for (i = 0; i < hw->mc_addrs_num; i++) { + addr = &hw->mc_addrs[i]; + if (!rte_is_multicast_ether_addr(addr)) + continue; + if (del) + ret = hns3vf_remove_mc_mac_addr(hw, addr); + else + ret = hns3vf_add_mc_mac_addr(hw, addr); + if (ret) { + err = ret; + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + addr); + hns3_err(hw, "Failed to %s mc mac addr: %s for vf: %d", + del ? "Remove" : "Restore", mac_str, ret); + } + } + return err; +} + +static int +hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, + bool en_uc_pmc, bool en_mc_pmc) +{ + struct hns3_mbx_vf_to_pf_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; + + /* + * The hns3 VF PMD driver depends on the hns3 PF kernel ethdev driver, + * so there are some features for promiscuous/allmulticast mode in hns3 + * VF PMD driver as below: + * 1. The promiscuous/allmulticast mode can be configured successfully + * only based on the trusted VF device. If based on the non trusted + * VF device, configuring promiscuous/allmulticast mode will fail. + * The hns3 VF device can be confiruged as trusted device by hns3 PF + * kernel ethdev driver on the host by the following command: + * "ip link set vf turst on" + * 2. After the promiscuous mode is configured successfully, hns3 VF PMD + * driver can receive the ingress and outgoing traffic. In the words, + * all the ingress packets, all the packets sent from the PF and + * other VFs on the same physical port. + * 3. Note: Because of the hardware constraints, By default vlan filter + * is enabled and couldn't be turned off based on VF device, so vlan + * filter is still effective even in promiscuous mode. If upper + * applications don't call rte_eth_dev_vlan_filter API function to + * set vlan based on VF device, hns3 VF PMD driver will can't receive + * the packets with vlan tag in promiscuoue mode. + */ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); + req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; + req->msg[1] = en_bc_pmc ? 1 : 0; + req->msg[2] = en_uc_pmc ? 1 : 0; + req->msg[3] = en_mc_pmc ? 1 : 0; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Set promisc mode fail, ret = %d", ret); + + return ret; +} + +static int +hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3vf_set_promisc_mode(hw, true, true, true); + if (ret) + hns3_err(hw, "Failed to enable promiscuous mode, ret = %d", + ret); + return ret; +} + +static int +hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + bool allmulti = dev->data->all_multicast ? true : false; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3vf_set_promisc_mode(hw, true, false, allmulti); + if (ret) + hns3_err(hw, "Failed to disable promiscuous mode, ret = %d", + ret); + return ret; +} + +static int +hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (dev->data->promiscuous) + return 0; + + ret = hns3vf_set_promisc_mode(hw, true, false, true); + if (ret) + hns3_err(hw, "Failed to enable allmulticast mode, ret = %d", + ret); + return ret; +} + +static int +hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (dev->data->promiscuous) + return 0; + + ret = hns3vf_set_promisc_mode(hw, true, false, false); + if (ret) + hns3_err(hw, "Failed to disable allmulticast mode, ret = %d", + ret); + return ret; +} + +static int +hns3vf_restore_promisc(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool allmulti = hw->data->all_multicast ? true : false; + + if (hw->data->promiscuous) + return hns3vf_set_promisc_mode(hw, true, true, true); + + return hns3vf_set_promisc_mode(hw, true, false, allmulti); +} + +static int +hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, + bool mmap, enum hns3_ring_type queue_type, + uint16_t queue_id) +{ + struct hns3_vf_bind_vector_msg bind_msg; + const char *op_str; + uint16_t code; + int ret; + + memset(&bind_msg, 0, sizeof(bind_msg)); + code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + HNS3_MBX_UNMAP_RING_TO_VECTOR; + bind_msg.vector_id = vector_id; + + if (queue_type == HNS3_RING_TYPE_RX) + bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; + else + bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; + + bind_msg.param[0].ring_type = queue_type; + bind_msg.ring_num = 1; + bind_msg.param[0].tqp_index = queue_id; + op_str = mmap ? "Map" : "Unmap"; + ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, + sizeof(bind_msg), false, NULL, 0); + if (ret) + hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.", + op_str, queue_id, bind_msg.vector_id, ret); + + return ret; +} + +static int +hns3vf_init_ring_with_vector(struct hns3_hw *hw) +{ + uint8_t vec; + int ret; + int i; + + /* + * In hns3 network engine, vector 0 is always the misc interrupt of this + * function, vector 1~N can be used respectively for the queues of the + * function. Tx and Rx queues with the same number share the interrupt + * vector. In the initialization clearing the all hardware mapping + * relationship configurations between queues and interrupt vectors is + * needed, so some error caused by the residual configurations, such as + * the unexpected Tx interrupt, can be avoid. Because of the hardware + * constraints in hns3 hardware engine, we have to implement clearing + * the mapping relationship configurations by binding all queues to the + * last interrupt vector and reserving the last interrupt vector. This + * method results in a decrease of the maximum queues when upper + * applications call the rte_eth_dev_configure API function to enable + * Rx interrupt. + */ + vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */ + /* vec - 1: the last interrupt is reserved */ + hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1; + for (i = 0; i < hw->intr_tqps_num; i++) { + /* + * Set gap limiter and rate limiter configuration of queue's + * interrupt. + */ + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX, + HNS3_TQP_INTR_GL_DEFAULT); + hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT); + + ret = hns3vf_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_TX, i); + if (ret) { + PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + + ret = hns3vf_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, i); + if (ret) { + PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " + "vector: %d, ret=%d", i, vec, ret); + return ret; + } + } + + return 0; +} + +static int +hns3vf_dev_configure(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + struct rte_eth_conf *conf = &dev->data->dev_conf; + enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + struct rte_eth_rss_conf rss_conf; + uint16_t mtu; + int ret; + + /* + * Hardware does not support individually enable/disable/reset the Tx or + * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx + * and Rx queues at the same time. When the numbers of Tx queues + * allocated by upper applications are not equal to the numbers of Rx + * queues, driver needs to setup fake Tx or Rx queues to adjust numbers + * of Tx/Rx queues. otherwise, network engine can not work as usual. But + * these fake queues are imperceptible, and can not be used by upper + * applications. + */ + ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q); + if (ret) { + hns3_err(hw, "Failed to set rx/tx fake queues: %d", ret); + return ret; + } + + hw->adapter_state = HNS3_NIC_CONFIGURING; + if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + hns3_err(hw, "setting link speed/duplex not supported"); + ret = -EINVAL; + goto cfg_err; + } + + /* When RSS is not configured, redirect the packet queue 0 */ + if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) { + rss_conf = conf->rx_adv_conf.rss_conf; + if (rss_conf.rss_key == NULL) { + rss_conf.rss_key = rss_cfg->key; + rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE; + } + + ret = hns3_dev_rss_hash_update(dev, &rss_conf); + if (ret) + goto cfg_err; + } + + /* + * If jumbo frames are enabled, MTU needs to be refreshed + * according to the maximum RX packet length. + */ + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + /* + * Security of max_rx_pkt_len is guaranteed in dpdk frame. + * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it + * can safely assign to "uint16_t" type variable. + */ + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + ret = hns3vf_dev_mtu_set(dev, mtu); + if (ret) + goto cfg_err; + dev->data->mtu = mtu; + } + + ret = hns3vf_dev_configure_vlan(dev); + if (ret) + goto cfg_err; + + hw->adapter_state = HNS3_NIC_CONFIGURED; + return 0; + +cfg_err: + (void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0); + hw->adapter_state = HNS3_NIC_INITIALIZED; + + return ret; +} + +static int +hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) +{ + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, + sizeof(mtu), true, NULL, 0); + if (ret) + hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); + + return ret; +} + +static int +hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD; + int ret; + + /* + * The hns3 PF/VF devices on the same port share the hardware MTU + * configuration. Currently, we send mailbox to inform hns3 PF kernel + * ethdev driver to finish hardware MTU configuration in hns3 VF PMD + * driver, there is no need to stop the port for hns3 VF device, and the + * MTU value issued by hns3 VF PMD driver must be less than or equal to + * PF's MTU. + */ + if (rte_atomic16_read(&hw->reset.resetting)) { + hns3_err(hw, "Failed to set mtu during resetting"); + return -EIO; + } + + rte_spinlock_lock(&hw->lock); + ret = hns3vf_config_mtu(hw, mtu); + if (ret) { + rte_spinlock_unlock(&hw->lock); + return ret; + } + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint16_t q_num = hw->tqps_num; + + /* + * In interrupt mode, 'max_rx_queues' is set based on the number of + * MSI-X interrupt resources of the hardware. + */ + if (hw->data->dev_conf.intr_conf.rxq == 1) + q_num = hw->intr_tqps_num; + + info->max_rx_queues = q_num; + info->max_tx_queues = hw->tqps_num; + info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */ + info->min_rx_bufsize = hw->rx_buf_len; + info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM; + info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD; + + info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_SCTP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH); + info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + info->tx_queue_offload_capa); + + info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, + .nb_min = HNS3_MIN_RING_DESC, + .nb_align = HNS3_ALIGN_RING_DESC, + }; + + info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, + .nb_min = HNS3_MIN_RING_DESC, + .nb_align = HNS3_ALIGN_RING_DESC, + }; + + info->vmdq_queue_num = 0; + + info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->hash_key_size = HNS3_RSS_KEY_SIZE; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; + info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC; + + return 0; +} + +static void +hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr) +{ + hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static void +hns3vf_disable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0); +} + +static void +hns3vf_enable_irq0(struct hns3_hw *hw) +{ + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); +} + +static enum hns3vf_evt_cause +hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3vf_evt_cause ret; + uint32_t cmdq_stat_reg; + uint32_t rst_ing_reg; + uint32_t val; + + /* Fetch the events from their corresponding regs */ + cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); + + if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) { + rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); + hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); + hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); + rte_atomic16_set(&hw->reset.disable_cmd, 1); + val = hns3_read_dev(hw, HNS3_VF_RST_ING); + hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); + val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); + if (clearval) { + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "Global reset detected, don't clear reset status"); + } + + ret = HNS3VF_VECTOR0_EVENT_RST; + goto out; + } + + /* Check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { + val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); + ret = HNS3VF_VECTOR0_EVENT_MBX; + goto out; + } + + val = 0; + ret = HNS3VF_VECTOR0_EVENT_OTHER; +out: + if (clearval) + *clearval = val; + return ret; +} + +static void +hns3vf_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + enum hns3vf_evt_cause event_cause; + uint32_t clearval; + + if (hw->irq_thread_id == 0) + hw->irq_thread_id = pthread_self(); + + /* Disable interrupt */ + hns3vf_disable_irq0(hw); + + /* Read out interrupt causes */ + event_cause = hns3vf_check_event_cause(hns, &clearval); + + switch (event_cause) { + case HNS3VF_VECTOR0_EVENT_RST: + hns3_schedule_reset(hns); + break; + case HNS3VF_VECTOR0_EVENT_MBX: + hns3_dev_handle_mbx_msg(hw); + break; + default: + break; + } + + /* Clear interrupt causes */ + hns3vf_clear_event_cause(hw, clearval); + + /* Enable interrupt */ + hns3vf_enable_irq0(hw); +} + +static int +hns3vf_check_tqp_info(struct hns3_hw *hw) +{ + uint16_t tqps_num; + + tqps_num = hw->tqps_num; + if (tqps_num > HNS3_MAX_TQP_NUM_PER_FUNC || tqps_num == 0) { + PMD_INIT_LOG(ERR, "Get invalid tqps_num(%u) from PF. valid " + "range: 1~%d", + tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC); + return -EINVAL; + } + + if (hw->rx_buf_len == 0) + hw->rx_buf_len = HNS3_DEFAULT_RX_BUF_LEN; + hw->alloc_rss_size = RTE_MIN(hw->rss_size_max, hw->tqps_num); + + return 0; +} + +static int +hns3vf_get_queue_info(struct hns3_hw *hw) +{ +#define HNS3VF_TQPS_RSS_INFO_LEN 6 + uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, + resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); + return ret; + } + + memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t)); + memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t)); + memcpy(&hw->rx_buf_len, &resp_msg[4], sizeof(uint16_t)); + + return hns3vf_check_tqp_info(hw); +} + +static int +hns3vf_get_queue_depth(struct hns3_hw *hw) +{ +#define HNS3VF_TQPS_DEPTH_INFO_LEN 4 + uint8_t resp_msg[HNS3VF_TQPS_DEPTH_INFO_LEN]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QDEPTH, 0, NULL, 0, true, + resp_msg, HNS3VF_TQPS_DEPTH_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp depth info from PF: %d", + ret); + return ret; + } + + memcpy(&hw->num_tx_desc, &resp_msg[0], sizeof(uint16_t)); + memcpy(&hw->num_rx_desc, &resp_msg[2], sizeof(uint16_t)); + + return 0; +} + +static int +hns3vf_get_tc_info(struct hns3_hw *hw) +{ + uint8_t resp_msg; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, + true, &resp_msg, sizeof(resp_msg)); + if (ret) { + hns3_err(hw, "VF request to get TC info from PF failed %d", + ret); + return ret; + } + + hw->hw_tc_map = resp_msg; + + return 0; +} + +static int +hns3vf_get_host_mac_addr(struct hns3_hw *hw) +{ + uint8_t host_mac[RTE_ETHER_ADDR_LEN]; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, + true, host_mac, RTE_ETHER_ADDR_LEN); + if (ret) { + hns3_err(hw, "Failed to get mac addr from PF: %d", ret); + return ret; + } + + memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN); + + return 0; +} + +static int +hns3vf_get_configuration(struct hns3_hw *hw) +{ + int ret; + + hw->mac.media_type = HNS3_MEDIA_TYPE_NONE; + hw->rss_dis_flag = false; + + /* Get queue configuration from PF */ + ret = hns3vf_get_queue_info(hw); + if (ret) + return ret; + + /* Get queue depth info from PF */ + ret = hns3vf_get_queue_depth(hw); + if (ret) + return ret; + + /* Get user defined VF MAC addr from PF */ + ret = hns3vf_get_host_mac_addr(hw); + if (ret) + return ret; + + /* Get tc configuration from PF */ + return hns3vf_get_tc_info(hw); +} + +static int +hns3vf_set_tc_info(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint16_t nb_rx_q = hw->data->nb_rx_queues; + uint16_t nb_tx_q = hw->data->nb_tx_queues; + uint8_t i; + + hw->num_tc = 0; + for (i = 0; i < HNS3_MAX_TC_NUM; i++) + if (hw->hw_tc_map & BIT(i)) + hw->num_tc++; + + if (nb_rx_q < hw->num_tc) { + hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", + nb_rx_q, hw->num_tc); + return -EINVAL; + } + + if (nb_tx_q < hw->num_tc) { + hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", + nb_tx_q, hw->num_tc); + return -EINVAL; + } + + hns3_set_rss_size(hw, nb_rx_q); + hns3_tc_queue_mapping_cfg(hw, nb_tx_q); + + return 0; +} + +static void +hns3vf_request_link_info(struct hns3_hw *hw) +{ + uint8_t resp_msg; + int ret; + + if (rte_atomic16_read(&hw->reset.resetting)) + return; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, + &resp_msg, sizeof(resp_msg)); + if (ret) + hns3_err(hw, "Failed to fetch link status from PF: %d", ret); +} + +static int +hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) +{ +#define HNS3VF_VLAN_MBX_MSG_LEN 5 + struct hns3_hw *hw = &hns->hw; + uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; + uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); + uint8_t is_kill = on ? 0 : 1; + + msg_data[0] = is_kill; + memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); + memcpy(&msg_data[3], &proto, sizeof(proto)); + + return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, + msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, + 0); +} + +static int +hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (rte_atomic16_read(&hw->reset.resetting)) { + hns3_err(hw, + "vf set vlan id failed during resetting, vlan_id =%u", + vlan_id); + return -EIO; + } + rte_spinlock_lock(&hw->lock); + ret = hns3vf_vlan_filter_configure(hns, vlan_id, on); + rte_spinlock_unlock(&hw->lock); + if (ret) + hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d", + vlan_id, ret); + + return ret; +} + +static int +hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) +{ + uint8_t msg_data; + int ret; + + msg_data = enable ? 1 : 0; + ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, + &msg_data, sizeof(msg_data), false, NULL, 0); + if (ret) + hns3_err(hw, "vf enable strip failed, ret =%d", ret); + + return ret; +} + +static int +hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + unsigned int tmp_mask; + int ret = 0; + + if (rte_atomic16_read(&hw->reset.resetting)) { + hns3_err(hw, "vf set vlan offload failed during resetting, " + "mask = 0x%x", mask); + return -EIO; + } + + tmp_mask = (unsigned int)mask; + /* Vlan stripping setting */ + if (tmp_mask & ETH_VLAN_STRIP_MASK) { + rte_spinlock_lock(&hw->lock); + /* Enable or disable VLAN stripping */ + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + ret = hns3vf_en_hw_strip_rxvtag(hw, true); + else + ret = hns3vf_en_hw_strip_rxvtag(hw, false); + rte_spinlock_unlock(&hw->lock); + } + + return ret; +} + +static int +hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on) +{ + struct rte_vlan_filter_conf *vfc; + struct hns3_hw *hw = &hns->hw; + uint16_t vlan_id; + uint64_t vbit; + uint64_t ids; + int ret = 0; + uint32_t i; + + vfc = &hw->data->vlan_filter_conf; + for (i = 0; i < RTE_DIM(vfc->ids); i++) { + if (vfc->ids[i] == 0) + continue; + ids = vfc->ids[i]; + while (ids) { + /* + * 64 means the num bits of ids, one bit corresponds to + * one vlan id + */ + vlan_id = 64 * i; + /* count trailing zeroes */ + vbit = ~ids & (ids - 1); + /* clear least significant bit set */ + ids ^= (ids ^ (ids - 1)) ^ vbit; + for (; vbit;) { + vbit >>= 1; + vlan_id++; + } + ret = hns3vf_vlan_filter_configure(hns, vlan_id, on); + if (ret) { + hns3_err(hw, + "VF handle vlan table failed, ret =%d, on = %d", + ret, on); + return ret; + } + } + } + + return ret; +} + +static int +hns3vf_remove_all_vlan_table(struct hns3_adapter *hns) +{ + return hns3vf_handle_all_vlan_table(hns, 0); +} + +static int +hns3vf_restore_vlan_conf(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_conf *dev_conf; + bool en; + int ret; + + dev_conf = &hw->data->dev_conf; + en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true + : false; + ret = hns3vf_en_hw_strip_rxvtag(hw, en); + if (ret) + hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en, + ret); + return ret; +} + +static int +hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_dev_data *data = dev->data; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (data->dev_conf.txmode.hw_vlan_reject_tagged || + data->dev_conf.txmode.hw_vlan_reject_untagged || + data->dev_conf.txmode.hw_vlan_insert_pvid) { + hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged " + "or hw_vlan_insert_pvid is not support!"); + } + + /* Apply vlan offload setting */ + ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + if (ret) + hns3_err(hw, "dev config vlan offload failed, ret =%d", ret); + + return ret; +} + +static int +hns3vf_set_alive(struct hns3_hw *hw, bool alive) +{ + uint8_t msg_data; + + msg_data = alive ? 1 : 0; + return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, + sizeof(msg_data), false, NULL, 0); +} + +static void +hns3vf_keep_alive_handler(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t respmsg; + int ret; + + ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, + false, &respmsg, sizeof(uint8_t)); + if (ret) + hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", + ret); + + rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, + eth_dev); +} + +static void +hns3vf_service_handler(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + /* + * The query link status and reset processing are executed in the + * interrupt thread.When the IMP reset occurs, IMP will not respond, + * and the query operation will time out after 30ms. In the case of + * multiple PF/VFs, each query failure timeout causes the IMP reset + * interrupt to fail to respond within 100ms. + * Before querying the link status, check whether there is a reset + * pending, and if so, abandon the query. + */ + if (!hns3vf_is_reset_pending(hns)) + hns3vf_request_link_info(hw); + else + hns3_warn(hw, "Cancel the query when reset is pending"); + + rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, + eth_dev); +} + +static int +hns3_query_vf_resource(struct hns3_hw *hw) +{ + struct hns3_vf_res_cmd *req; + struct hns3_cmd_desc desc; + uint16_t num_msi; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "query vf resource failed, ret = %d", ret); + return ret; + } + + req = (struct hns3_vf_res_cmd *)desc.data; + num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number), + HNS3_VEC_NUM_M, HNS3_VEC_NUM_S); + if (num_msi < HNS3_MIN_VECTOR_NUM) { + hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)", + num_msi, HNS3_MIN_VECTOR_NUM); + return -EINVAL; + } + + hw->num_msi = num_msi; + + return 0; +} + +static int +hns3vf_init_hardware(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + uint16_t mtu = hw->data->mtu; + int ret; + + ret = hns3vf_set_promisc_mode(hw, true, false, false); + if (ret) + return ret; + + ret = hns3vf_config_mtu(hw, mtu); + if (ret) + goto err_init_hardware; + + ret = hns3vf_vlan_filter_configure(hns, 0, 1); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret); + goto err_init_hardware; + } + + ret = hns3_config_gro(hw, false); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret); + goto err_init_hardware; + } + + /* + * In the initialization clearing the all hardware mapping relationship + * configurations between queues and interrupt vectors is needed, so + * some error caused by the residual configurations, such as the + * unexpected interrupt, can be avoid. + */ + ret = hns3vf_init_ring_with_vector(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret); + goto err_init_hardware; + } + + ret = hns3vf_set_alive(hw, true); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret); + goto err_init_hardware; + } + + hns3vf_request_link_info(hw); + return 0; + +err_init_hardware: + (void)hns3vf_set_promisc_mode(hw, false, false, false); + return ret; +} + +static int +hns3vf_clear_vport_list(struct hns3_hw *hw) +{ + return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, + HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, + NULL, 0); +} + +static int +hns3vf_init_vf(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret); + goto err_cmd_init_queue; + } + + /* Firmware command initialize */ + ret = hns3_cmd_init(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret); + goto err_cmd_init; + } + + /* Get VF resource */ + ret = hns3_query_vf_resource(hw); + if (ret) + goto err_cmd_init; + + rte_spinlock_init(&hw->mbx_resp.lock); + + hns3vf_clear_event_cause(hw, 0); + + ret = rte_intr_callback_register(&pci_dev->intr_handle, + hns3vf_interrupt_handler, eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret); + goto err_intr_callback_register; + } + + /* Enable interrupt */ + rte_intr_enable(&pci_dev->intr_handle); + hns3vf_enable_irq0(hw); + + /* Get configuration from PF */ + ret = hns3vf_get_configuration(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret); + goto err_get_config; + } + + /* + * The hns3 PF ethdev driver in kernel support setting VF MAC address + * on the host by "ip link set ..." command. To avoid some incorrect + * scenes, for example, hns3 VF PMD driver fails to receive and send + * packets after user configure the MAC address by using the + * "ip link set ..." command, hns3 VF PMD driver keep the same MAC + * address strategy as the hns3 kernel ethdev driver in the + * initialization. If user configure a MAC address by the ip command + * for VF device, then hns3 VF PMD driver will start with it, otherwise + * start with a random MAC address in the initialization. + */ + ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr); + if (ret) + rte_eth_random_addr(hw->mac.mac_addr); + + ret = hns3vf_clear_vport_list(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret); + goto err_get_config; + } + + ret = hns3vf_init_hardware(hns); + if (ret) + goto err_get_config; + + hns3_set_default_rss_args(hw); + + return 0; + +err_get_config: + hns3vf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, + eth_dev); +err_intr_callback_register: +err_cmd_init: + hns3_cmd_uninit(hw); + hns3_cmd_destroy_queue(hw); +err_cmd_init_queue: + hw->io_base = NULL; + + return ret; +} + +static void +hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + hns3_rss_uninit(hns); + (void)hns3vf_set_alive(hw, false); + (void)hns3vf_set_promisc_mode(hw, false, false, false); + hns3vf_disable_irq0(hw); + rte_intr_disable(&pci_dev->intr_handle); + hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, + eth_dev); + hns3_cmd_uninit(hw); + hns3_cmd_destroy_queue(hw); + hw->io_base = NULL; +} + +static int +hns3vf_do_stop(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + bool reset_queue; + + hw->mac.link_status = ETH_LINK_DOWN; + + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + hns3vf_configure_mac_addr(hns, true); + reset_queue = true; + } else + reset_queue = false; + return hns3_stop_queues(hns, reset_queue); +} + +static void +hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t q_id; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return; + + /* unmap the ring with vector */ + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + (void)hns3vf_bind_ring_with_vector(hw, vec, false, + HNS3_RING_TYPE_RX, + q_id); + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +hns3vf_dev_stop(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_state = HNS3_NIC_STOPPING; + hns3_set_rxtx_function(dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(dev); + /* Prevent crashes when queues are still in use. */ + rte_delay_ms(hw->tqps_num); + + rte_spinlock_lock(&hw->lock); + if (rte_atomic16_read(&hw->reset.resetting) == 0) { + hns3vf_do_stop(hns); + hns3vf_unmap_rx_interrupt(dev); + hns3_dev_release_mbufs(hns); + hw->adapter_state = HNS3_NIC_CONFIGURED; + } + rte_eal_alarm_cancel(hns3vf_service_handler, dev); + rte_spinlock_unlock(&hw->lock); +} + +static void +hns3vf_dev_close(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + if (hw->adapter_state == HNS3_NIC_STARTED) + hns3vf_dev_stop(eth_dev); + + hw->adapter_state = HNS3_NIC_CLOSING; + hns3_reset_abort(hns); + hw->adapter_state = HNS3_NIC_CLOSED; + rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev); + hns3vf_configure_all_mc_mac_addr(hns, true); + hns3vf_remove_all_vlan_table(hns); + hns3vf_uninit_vf(eth_dev); + hns3_free_all_queues(eth_dev); + rte_free(hw->reset.wait_data); + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + hns3_mp_uninit_primary(); + hns3_warn(hw, "Close port %d finished", hw->data->port_id); +} + +static int +hns3vf_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, + size_t fw_size) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t version = hw->fw_version; + int ret; + + ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu", + hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M, + HNS3_FW_VERSION_BYTE3_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M, + HNS3_FW_VERSION_BYTE2_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M, + HNS3_FW_VERSION_BYTE1_S), + hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M, + HNS3_FW_VERSION_BYTE0_S)); + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static int +hns3vf_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_mac *mac = &hw->mac; + struct rte_eth_link new_link; + + memset(&new_link, 0, sizeof(new_link)); + switch (mac->link_speed) { + case ETH_SPEED_NUM_10M: + case ETH_SPEED_NUM_100M: + case ETH_SPEED_NUM_1G: + case ETH_SPEED_NUM_10G: + case ETH_SPEED_NUM_25G: + case ETH_SPEED_NUM_40G: + case ETH_SPEED_NUM_50G: + case ETH_SPEED_NUM_100G: + new_link.link_speed = mac->link_speed; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + } + + new_link.link_duplex = mac->link_duplex; + new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; + new_link.link_autoneg = + !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); + + return rte_eth_linkstatus_set(eth_dev, &new_link); +} + +static int +hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3vf_set_tc_info(hns); + if (ret) + return ret; + + ret = hns3_start_queues(hns, reset_queue); + if (ret) + hns3_err(hw, "Failed to start queues: %d", ret); + + return ret; +} + +static int +hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint32_t intr_vector; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) { + intr_vector = hw->used_rx_queues; + /* It creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL; + } + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + hw->used_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + hns3_err(hw, "Failed to allocate %d rx_queues" + " intr_vec", hw->used_rx_queues); + ret = -ENOMEM; + goto vf_alloc_intr_vec_error; + } + } + + if (rte_intr_allow_others(intr_handle)) { + vec = RTE_INTR_VEC_RXTX_OFFSET; + base = RTE_INTR_VEC_RXTX_OFFSET; + } + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3vf_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, + q_id); + if (ret) + goto vf_bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + } + rte_intr_enable(intr_handle); + return 0; + +vf_bind_vector_error: + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + return ret; +vf_alloc_intr_vec_error: + rte_intr_efd_disable(intr_handle); + return ret; +} + +static int +hns3vf_restore_rx_interrupt(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t q_id; + int ret; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return 0; + + if (rte_intr_dp_is_en(intr_handle)) { + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3vf_bind_ring_with_vector(hw, + intr_handle->intr_vec[q_id], true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + return ret; + } + } + + return 0; +} + +static void +hns3vf_restore_filter(struct rte_eth_dev *dev) +{ + hns3_restore_rss_filter(dev); +} + +static int +hns3vf_dev_start(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + if (rte_atomic16_read(&hw->reset.resetting)) + return -EBUSY; + + rte_spinlock_lock(&hw->lock); + hw->adapter_state = HNS3_NIC_STARTING; + ret = hns3vf_do_start(hns, true); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + ret = hns3vf_map_rx_interrupt(dev); + if (ret) { + hw->adapter_state = HNS3_NIC_CONFIGURED; + rte_spinlock_unlock(&hw->lock); + return ret; + } + hw->adapter_state = HNS3_NIC_STARTED; + rte_spinlock_unlock(&hw->lock); + + hns3_set_rxtx_function(dev); + hns3_mp_req_start_rxtx(dev); + rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev); + + hns3vf_restore_filter(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive/transmit + * packets. + */ + hns3_enable_all_queues(hw, true); + + return ret; +} + +static bool +is_vf_reset_done(struct hns3_hw *hw) +{ +#define HNS3_FUN_RST_ING_BITS \ + (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \ + BIT(HNS3_VECTOR0_CORERESET_INT_B) | \ + BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \ + BIT(HNS3_VECTOR0_FUNCRESET_INT_B)) + + uint32_t val; + + if (hw->reset.level == HNS3_VF_RESET) { + val = hns3_read_dev(hw, HNS3_VF_RST_ING); + if (val & HNS3_VF_RST_ING_BIT) + return false; + } else { + val = hns3_read_dev(hw, HNS3_FUN_RST_ING); + if (val & HNS3_FUN_RST_ING_BITS) + return false; + } + return true; +} + +bool +hns3vf_is_reset_pending(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset; + + hns3vf_check_event_cause(hns, NULL); + reset = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { + hns3_warn(hw, "High level reset %d is pending", reset); + return true; + } + return false; +} + +static int +hns3vf_wait_hardware_ready(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_wait_data *wait_data = hw->reset.wait_data; + struct timeval tv; + + if (wait_data->result == HNS3_WAIT_SUCCESS) { + /* + * After vf reset is ready, the PF may not have completed + * the reset processing. The vf sending mbox to PF may fail + * during the pf reset, so it is better to add extra delay. + */ + if (hw->reset.level == HNS3_VF_FUNC_RESET || + hw->reset.level == HNS3_FLR_RESET) + return 0; + /* Reset retry process, no need to add extra delay. */ + if (hw->reset.attempts) + return 0; + if (wait_data->check_completion == NULL) + return 0; + + wait_data->check_completion = NULL; + wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC; + wait_data->count = 1; + wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, + wait_data); + hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete"); + return -EAGAIN; + } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { + gettimeofday(&tv, NULL); + hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + return -ETIME; + } else if (wait_data->result == HNS3_WAIT_REQUEST) + return -EAGAIN; + + wait_data->hns = hns; + wait_data->check_completion = is_vf_reset_done; + wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT * + HNS3VF_RESET_WAIT_MS + get_timeofday_ms(); + wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC; + wait_data->count = HNS3VF_RESET_WAIT_CNT; + wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data); + return -EAGAIN; +} + +static int +hns3vf_prepare_reset(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret = 0; + + if (hw->reset.level == HNS3_VF_FUNC_RESET) { + ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, + 0, true, NULL, 0); + } + rte_atomic16_set(&hw->reset.disable_cmd, 1); + + return ret; +} + +static int +hns3vf_stop_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + if (hw->adapter_state == HNS3_NIC_STARTED) + rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); + hw->mac.link_status = ETH_LINK_DOWN; + + hns3_set_rxtx_function(eth_dev); + rte_wmb(); + /* Disable datapath on secondary process. */ + hns3_mp_req_stop_rxtx(eth_dev); + rte_delay_ms(hw->tqps_num); + + rte_spinlock_lock(&hw->lock); + if (hw->adapter_state == HNS3_NIC_STARTED || + hw->adapter_state == HNS3_NIC_STOPPING) { + hns3vf_do_stop(hns); + hw->reset.mbuf_deferred_free = true; + } else + hw->reset.mbuf_deferred_free = false; + + /* + * It is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries. + */ + if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + hns3vf_configure_all_mc_mac_addr(hns, true); + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3vf_start_service(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; + hns3_set_rxtx_function(eth_dev); + hns3_mp_req_start_rxtx(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + hns3vf_service_handler(eth_dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); + /* + * When finished the initialization, enable queues to receive + * and transmit packets. + */ + hns3_enable_all_queues(hw, true); + } + + return 0; +} + +static int +hns3vf_check_default_mac_change(struct hns3_hw *hw) +{ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *hw_mac; + int ret; + + /* + * The hns3 PF ethdev driver in kernel support setting VF MAC address + * on the host by "ip link set ..." command. If the hns3 PF kernel + * ethdev driver sets the MAC address for VF device after the + * initialization of the related VF device, the PF driver will notify + * VF driver to reset VF device to make the new MAC address effective + * immediately. The hns3 VF PMD driver should check whether the MAC + * address has been changed by the PF kernel ethdev driver, if changed + * VF driver should configure hardware using the new MAC address in the + * recovering hardware configuration stage of the reset process. + */ + ret = hns3vf_get_host_mac_addr(hw); + if (ret) + return ret; + + hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr; + ret = rte_is_zero_ether_addr(hw_mac); + if (ret) { + rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac); + } else { + ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac); + if (!ret) { + rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]); + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + &hw->data->mac_addrs[0]); + hns3_warn(hw, "Default MAC address has been changed to:" + " %s by the host PF kernel ethdev driver", + mac_str); + } + } + + return 0; +} + +static int +hns3vf_restore_conf(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = hns3vf_check_default_mac_change(hw); + if (ret) + return ret; + + ret = hns3vf_configure_mac_addr(hns, false); + if (ret) + return ret; + + ret = hns3vf_configure_all_mc_mac_addr(hns, false); + if (ret) + goto err_mc_mac; + + ret = hns3vf_restore_promisc(hns); + if (ret) + goto err_vlan_table; + + ret = hns3vf_restore_vlan_conf(hns); + if (ret) + goto err_vlan_table; + + ret = hns3vf_restore_rx_interrupt(hw); + if (ret) + goto err_vlan_table; + + if (hw->adapter_state == HNS3_NIC_STARTED) { + ret = hns3vf_do_start(hns, false); + if (ret) + goto err_vlan_table; + hns3_info(hw, "hns3vf dev restart successful!"); + } else if (hw->adapter_state == HNS3_NIC_STOPPING) + hw->adapter_state = HNS3_NIC_CONFIGURED; + return 0; + +err_vlan_table: + hns3vf_configure_all_mc_mac_addr(hns, true); +err_mc_mac: + hns3vf_configure_mac_addr(hns, true); + return ret; +} + +static enum hns3_reset_level +hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels) +{ + enum hns3_reset_level reset_level; + + /* return the highest priority reset level amongst all */ + if (hns3_atomic_test_bit(HNS3_VF_RESET, levels)) + reset_level = HNS3_VF_RESET; + else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels)) + reset_level = HNS3_VF_FULL_RESET; + else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels)) + reset_level = HNS3_VF_PF_FUNC_RESET; + else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels)) + reset_level = HNS3_VF_FUNC_RESET; + else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels)) + reset_level = HNS3_FLR_RESET; + else + reset_level = HNS3_NONE_RESET; + + if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level) + return HNS3_NONE_RESET; + + return reset_level; +} + +static void +hns3vf_reset_service(void *param) +{ + struct hns3_adapter *hns = (struct hns3_adapter *)param; + struct hns3_hw *hw = &hns->hw; + enum hns3_reset_level reset_level; + struct timeval tv_delta; + struct timeval tv_start; + struct timeval tv; + uint64_t msec; + + /* + * The interrupt is not triggered within the delay time. + * The interrupt may have been lost. It is necessary to handle + * the interrupt to recover from the error. + */ + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + hns3_err(hw, "Handling interrupts in delayed tasks"); + hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); + reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (reset_level == HNS3_NONE_RESET) { + hns3_err(hw, "No reset level is set, try global reset"); + hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); + } + } + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + + /* + * Hardware reset has been notified, we now have to poll & check if + * hardware has actually completed the reset sequence. + */ + reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (reset_level != HNS3_NONE_RESET) { + gettimeofday(&tv_start, NULL); + hns3_reset_process(hns, reset_level); + gettimeofday(&tv, NULL); + timersub(&tv, &tv_start, &tv_delta); + msec = tv_delta.tv_sec * MSEC_PER_SEC + + tv_delta.tv_usec / USEC_PER_MSEC; + if (msec > HNS3_RESET_PROCESS_MS) + hns3_err(hw, "%d handle long time delta %" PRIx64 + " ms time=%ld.%.6ld", + hw->reset.level, msec, tv.tv_sec, tv.tv_usec); + } +} + +static int +hns3vf_reinit_dev(struct hns3_adapter *hns) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id]; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct hns3_hw *hw = &hns->hw; + int ret; + + if (hw->reset.level == HNS3_VF_FULL_RESET) { + rte_intr_disable(&pci_dev->intr_handle); + hns3vf_set_bus_master(pci_dev, true); + } + + /* Firmware command initialize */ + ret = hns3_cmd_init(hw); + if (ret) { + hns3_err(hw, "Failed to init cmd: %d", ret); + return ret; + } + + if (hw->reset.level == HNS3_VF_FULL_RESET) { + /* + * UIO enables msix by writing the pcie configuration space + * vfio_pci enables msix in rte_intr_enable. + */ + if (pci_dev->kdrv == RTE_KDRV_IGB_UIO || + pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) { + if (hns3vf_enable_msix(pci_dev, true)) + hns3_err(hw, "Failed to enable msix"); + } + + rte_intr_enable(&pci_dev->intr_handle); + } + + ret = hns3_reset_all_queues(hns); + if (ret) { + hns3_err(hw, "Failed to reset all queues: %d", ret); + return ret; + } + + ret = hns3vf_init_hardware(hns); + if (ret) { + hns3_err(hw, "Failed to init hardware: %d", ret); + return ret; + } + + return 0; +} + +static const struct eth_dev_ops hns3vf_eth_dev_ops = { + .dev_start = hns3vf_dev_start, + .dev_stop = hns3vf_dev_stop, + .dev_close = hns3vf_dev_close, + .mtu_set = hns3vf_dev_mtu_set, + .promiscuous_enable = hns3vf_dev_promiscuous_enable, + .promiscuous_disable = hns3vf_dev_promiscuous_disable, + .allmulticast_enable = hns3vf_dev_allmulticast_enable, + .allmulticast_disable = hns3vf_dev_allmulticast_disable, + .stats_get = hns3_stats_get, + .stats_reset = hns3_stats_reset, + .xstats_get = hns3_dev_xstats_get, + .xstats_get_names = hns3_dev_xstats_get_names, + .xstats_reset = hns3_dev_xstats_reset, + .xstats_get_by_id = hns3_dev_xstats_get_by_id, + .xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id, + .dev_infos_get = hns3vf_dev_infos_get, + .fw_version_get = hns3vf_fw_version_get, + .rx_queue_setup = hns3_rx_queue_setup, + .tx_queue_setup = hns3_tx_queue_setup, + .rx_queue_release = hns3_dev_rx_queue_release, + .tx_queue_release = hns3_dev_tx_queue_release, + .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable, + .dev_configure = hns3vf_dev_configure, + .mac_addr_add = hns3vf_add_mac_addr, + .mac_addr_remove = hns3vf_remove_mac_addr, + .mac_addr_set = hns3vf_set_default_mac_addr, + .set_mc_addr_list = hns3vf_set_mc_mac_addr_list, + .link_update = hns3vf_dev_link_update, + .rss_hash_update = hns3_dev_rss_hash_update, + .rss_hash_conf_get = hns3_dev_rss_hash_conf_get, + .reta_update = hns3_dev_rss_reta_update, + .reta_query = hns3_dev_rss_reta_query, + .filter_ctrl = hns3_dev_filter_ctrl, + .vlan_filter_set = hns3vf_vlan_filter_set, + .vlan_offload_set = hns3vf_vlan_offload_set, + .get_reg = hns3_get_regs, + .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, +}; + +static const struct hns3_reset_ops hns3vf_reset_ops = { + .reset_service = hns3vf_reset_service, + .stop_service = hns3vf_stop_service, + .prepare_reset = hns3vf_prepare_reset, + .wait_hardware_ready = hns3vf_wait_hardware_ready, + .reinit_dev = hns3vf_reinit_dev, + .restore_conf = hns3vf_restore_conf, + .start_service = hns3vf_start_service, +}; + +static int +hns3vf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t revision; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Get PCI revision id */ + ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, + HNS3_PCI_REVISION_ID); + if (ret != HNS3_PCI_REVISION_ID_LEN) { + PMD_INIT_LOG(ERR, "Failed to read pci revision id, ret = %d", + ret); + return -EIO; + } + hw->revision = revision; + + eth_dev->process_private = (struct hns3_process_private *) + rte_zmalloc_socket("hns3_filter_list", + sizeof(struct hns3_process_private), + RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node); + if (eth_dev->process_private == NULL) { + PMD_INIT_LOG(ERR, "Failed to alloc memory for process private"); + return -ENOMEM; + } + + /* initialize flow filter lists */ + hns3_filterlist_init(eth_dev); + + hns3_set_rxtx_function(eth_dev); + eth_dev->dev_ops = &hns3vf_eth_dev_ops; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + hns3_mp_init_secondary(); + hw->secondary_cnt++; + return 0; + } + + hns3_mp_init_primary(); + + hw->adapter_state = HNS3_NIC_UNINITIALIZED; + hns->is_vf = true; + hw->data = eth_dev->data; + + ret = hns3_reset_init(hw); + if (ret) + goto err_init_reset; + hw->reset.ops = &hns3vf_reset_ops; + + ret = hns3vf_init_vf(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret); + goto err_init_vf; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("hns3vf-mac", + sizeof(struct rte_ether_addr) * + HNS3_VF_UC_MACADDR_NUM, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %zx bytes needed " + "to store MAC addresses", + sizeof(struct rte_ether_addr) * + HNS3_VF_UC_MACADDR_NUM); + ret = -ENOMEM; + goto err_rte_zmalloc; + } + + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr, + ð_dev->data->mac_addrs[0]); + hw->adapter_state = HNS3_NIC_INITIALIZED; + /* + * Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + hns3_err(hw, "Reschedule reset service after dev_init"); + hns3_schedule_reset(hns); + } else { + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + } + rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, + eth_dev); + return 0; + +err_rte_zmalloc: + hns3vf_uninit_vf(eth_dev); + +err_init_vf: + rte_free(hw->reset.wait_data); + +err_init_reset: + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + + return ret; +} + +static int +hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + + if (hw->adapter_state < HNS3_NIC_CLOSING) + hns3vf_dev_close(eth_dev); + + hw->adapter_state = HNS3_NIC_REMOVED; + return 0; +} + +static int +eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct hns3_adapter), + hns3vf_dev_init); +} + +static int +eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit); +} + +static const struct rte_pci_id pci_id_hns3vf_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct rte_pci_driver rte_hns3vf_pmd = { + .id_table = pci_id_hns3vf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_hns3vf_pci_probe, + .remove = eth_hns3vf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci"); diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.c b/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.c new file mode 100644 index 000000000..4c5928ffc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.c @@ -0,0 +1,1080 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" + +#define HNS3_VLAN_TAG_TYPE_NONE 0 +#define HNS3_VLAN_TAG_TYPE_TAG2 1 +#define HNS3_VLAN_TAG_TYPE_TAG1 2 +#define HNS3_VLAN_TAG_TYPE_TAG1_2 3 + +#define HNS3_PF_ID_S 0 +#define HNS3_PF_ID_M GENMASK(2, 0) +#define HNS3_VF_ID_S 3 +#define HNS3_VF_ID_M GENMASK(10, 3) +#define HNS3_PORT_TYPE_B 11 +#define HNS3_NETWORK_PORT_ID_S 0 +#define HNS3_NETWORK_PORT_ID_M GENMASK(3, 0) + +#define HNS3_FD_EPORT_SW_EN_B 0 + +#define HNS3_FD_AD_DATA_S 32 +#define HNS3_FD_AD_DROP_B 0 +#define HNS3_FD_AD_DIRECT_QID_B 1 +#define HNS3_FD_AD_QID_S 2 +#define HNS3_FD_AD_QID_M GENMASK(12, 2) +#define HNS3_FD_AD_USE_COUNTER_B 12 +#define HNS3_FD_AD_COUNTER_NUM_S 13 +#define HNS3_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HNS3_FD_AD_NXT_STEP_B 20 +#define HNS3_FD_AD_NXT_KEY_S 21 +#define HNS3_FD_AD_NXT_KEY_M GENMASK(26, 21) +#define HNS3_FD_AD_WR_RULE_ID_B 0 +#define HNS3_FD_AD_RULE_ID_S 1 +#define HNS3_FD_AD_RULE_ID_M GENMASK(13, 1) + +enum HNS3_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +enum HNS3_FD_MODE { + HNS3_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HNS3_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HNS3_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HNS3_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HNS3_FD_KEY_TYPE { + HNS3_FD_KEY_BASE_ON_PTYPE, + HNS3_FD_KEY_BASE_ON_TUPLE, +}; + +enum HNS3_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +struct key_info { + uint8_t key_type; + uint8_t key_length; +}; + +static const struct key_info meta_data_key_info[] = { + {PACKET_TYPE_ID, 6}, + {IP_FRAGEMENT, 1}, + {ROCE_TYPE, 1}, + {NEXT_KEY, 5}, + {VLAN_NUMBER, 2}, + {SRC_VPORT, 12}, + {DST_VPORT, 12}, + {TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + {OUTER_DST_MAC, 48}, + {OUTER_SRC_MAC, 48}, + {OUTER_VLAN_TAG_FST, 16}, + {OUTER_VLAN_TAG_SEC, 16}, + {OUTER_ETH_TYPE, 16}, + {OUTER_L2_RSV, 16}, + {OUTER_IP_TOS, 8}, + {OUTER_IP_PROTO, 8}, + {OUTER_SRC_IP, 32}, + {OUTER_DST_IP, 32}, + {OUTER_L3_RSV, 16}, + {OUTER_SRC_PORT, 16}, + {OUTER_DST_PORT, 16}, + {OUTER_L4_RSV, 32}, + {OUTER_TUN_VNI, 24}, + {OUTER_TUN_FLOW_ID, 8}, + {INNER_DST_MAC, 48}, + {INNER_SRC_MAC, 48}, + {INNER_VLAN_TAG1, 16}, + {INNER_VLAN_TAG2, 16}, + {INNER_ETH_TYPE, 16}, + {INNER_L2_RSV, 16}, + {INNER_IP_TOS, 8}, + {INNER_IP_PROTO, 8}, + {INNER_SRC_IP, 32}, + {INNER_DST_IP, 32}, + {INNER_L3_RSV, 16}, + {INNER_SRC_PORT, 16}, + {INNER_DST_PORT, 16}, + {INNER_SCTP_TAG, 32}, +}; + +#define HNS3_BITS_PER_BYTE 8 +#define MAX_KEY_LENGTH 400 +#define MAX_200B_KEY_LENGTH 200 +#define MAX_META_DATA_LENGTH 16 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / HNS3_BITS_PER_BYTE, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) + +enum HNS3_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = k & v + */ +#define calc_x(x, k, v) ((x) = (~(k) & (v))) +#define calc_y(y, k, v) ((y) = ((k) & (v))) + +struct hns3_fd_tcam_config_1_cmd { + uint8_t stage; + uint8_t xy_sel; + uint8_t port_info; + uint8_t rsv1[1]; + rte_le32_t index; + uint8_t entry_vld; + uint8_t rsv2[7]; + uint8_t tcam_data[8]; +}; + +struct hns3_fd_tcam_config_2_cmd { + uint8_t tcam_data[24]; +}; + +struct hns3_fd_tcam_config_3_cmd { + uint8_t tcam_data[20]; + uint8_t rsv[4]; +}; + +struct hns3_get_fd_mode_cmd { + uint8_t mode; + uint8_t enable; + uint8_t rsv[22]; +}; + +struct hns3_get_fd_allocation_cmd { + rte_le32_t stage1_entry_num; + rte_le32_t stage2_entry_num; + rte_le16_t stage1_counter_num; + rte_le16_t stage2_counter_num; + uint8_t rsv[12]; +}; + +struct hns3_set_fd_key_config_cmd { + uint8_t stage; + uint8_t key_select; + uint8_t inner_sipv6_word_en; + uint8_t inner_dipv6_word_en; + uint8_t outer_sipv6_word_en; + uint8_t outer_dipv6_word_en; + uint8_t rsv1[2]; + rte_le32_t tuple_mask; + rte_le32_t meta_data_mask; + uint8_t rsv2[8]; +}; + +struct hns3_fd_ad_config_cmd { + uint8_t stage; + uint8_t rsv1[3]; + rte_le32_t index; + rte_le64_t ad_data; + uint8_t rsv2[8]; +}; + +struct hns3_fd_get_cnt_cmd { + uint8_t stage; + uint8_t rsv1[3]; + rte_le16_t index; + uint8_t rsv2[2]; + rte_le64_t value; + uint8_t rsv3[8]; +}; + +static int hns3_get_fd_mode(struct hns3_hw *hw, uint8_t *fd_mode) +{ + struct hns3_get_fd_mode_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_MODE_CTRL, true); + + req = (struct hns3_get_fd_mode_cmd *)desc.data; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Get fd mode fail, ret=%d", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hns3_get_fd_allocation(struct hns3_hw *hw, + uint32_t *stage1_entry_num, + uint32_t *stage2_entry_num, + uint16_t *stage1_counter_num, + uint16_t *stage2_counter_num) +{ + struct hns3_get_fd_allocation_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_GET_ALLOCATION, true); + + req = (struct hns3_get_fd_allocation_cmd *)desc.data; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Query fd allocation fail, ret=%d", ret); + return ret; + } + + *stage1_entry_num = rte_le_to_cpu_32(req->stage1_entry_num); + *stage2_entry_num = rte_le_to_cpu_32(req->stage2_entry_num); + *stage1_counter_num = rte_le_to_cpu_16(req->stage1_counter_num); + *stage2_counter_num = rte_le_to_cpu_16(req->stage2_counter_num); + + return ret; +} + +static int hns3_set_fd_key_config(struct hns3_adapter *hns) +{ + struct hns3_set_fd_key_config_cmd *req; + struct hns3_fd_key_cfg *key_cfg; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_KEY_CONFIG, false); + + req = (struct hns3_set_fd_key_config_cmd *)desc.data; + key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1]; + req->stage = HNS3_FD_STAGE_1; + req->key_select = key_cfg->key_sel; + req->inner_sipv6_word_en = key_cfg->inner_sipv6_word_en; + req->inner_dipv6_word_en = key_cfg->inner_dipv6_word_en; + req->outer_sipv6_word_en = key_cfg->outer_sipv6_word_en; + req->outer_dipv6_word_en = key_cfg->outer_dipv6_word_en; + req->tuple_mask = rte_cpu_to_le_32(~key_cfg->tuple_active); + req->meta_data_mask = rte_cpu_to_le_32(~key_cfg->meta_data_active); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Set fd key fail, ret=%d", ret); + + return ret; +} + +int hns3_init_fd_config(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + struct hns3_fd_key_cfg *key_cfg; + int ret; + + ret = hns3_get_fd_mode(hw, &pf->fdir.fd_cfg.fd_mode); + if (ret) + return ret; + + switch (pf->fdir.fd_cfg.fd_mode) { + case HNS3_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + pf->fdir.fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HNS3_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + pf->fdir.fd_cfg.max_key_length = MAX_200B_KEY_LENGTH; + hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit"); + break; + default: + hns3_err(hw, "Unsupported flow director mode %d", + pf->fdir.fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1]; + key_cfg->key_sel = HNS3_FD_KEY_BASE_ON_TUPLE; + key_cfg->inner_sipv6_word_en = IPV6_ADDR_WORD_MASK; + key_cfg->inner_dipv6_word_en = IPV6_ADDR_WORD_MASK; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG1) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (pf->fdir.fd_cfg.max_key_length == MAX_KEY_LENGTH) { + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC) | + BIT(OUTER_SRC_PORT) | BIT(INNER_SCTP_TAG) | + BIT(OUTER_DST_PORT) | BIT(INNER_VLAN_TAG2) | + BIT(OUTER_TUN_VNI) | BIT(OUTER_TUN_FLOW_ID) | + BIT(OUTER_ETH_TYPE) | BIT(OUTER_IP_PROTO); + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(DST_VPORT) | BIT(TUNNEL_PACKET) | + BIT(VLAN_NUMBER); + + ret = hns3_get_fd_allocation(hw, + &pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1], + &pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_2], + &pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1], + &pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_2]); + if (ret) + return ret; + + return hns3_set_fd_key_config(hns); +} + +static int hns3_fd_tcam_config(struct hns3_hw *hw, bool sel_x, int loc, + uint8_t *key, bool is_add) +{ +#define FD_TCAM_CMD_NUM 3 + struct hns3_fd_tcam_config_1_cmd *req1; + struct hns3_fd_tcam_config_2_cmd *req2; + struct hns3_fd_tcam_config_3_cmd *req3; + struct hns3_cmd_desc desc[FD_TCAM_CMD_NUM]; + int len; + int ret; + + hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_FD_TCAM_OP, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_FD_TCAM_OP, false); + desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_FD_TCAM_OP, false); + + req1 = (struct hns3_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hns3_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hns3_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = HNS3_FD_STAGE_1; + req1->xy_sel = sel_x ? 1 : 0; + hns3_set_bit(req1->port_info, HNS3_FD_EPORT_SW_EN_B, 0); + req1->index = rte_cpu_to_le_32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + len = sizeof(req1->tcam_data); + memcpy(req1->tcam_data, key, len); + key += len; + + len = sizeof(req2->tcam_data); + memcpy(req2->tcam_data, key, len); + key += len; + + len = sizeof(req3->tcam_data); + memcpy(req3->tcam_data, key, len); + } + + ret = hns3_cmd_send(hw, desc, FD_TCAM_CMD_NUM); + if (ret) + hns3_err(hw, "Config tcam key fail, ret=%d loc=%d add=%d", + ret, loc, is_add); + return ret; +} + +static int hns3_fd_ad_config(struct hns3_hw *hw, int loc, + struct hns3_fd_ad_data *action) +{ + struct hns3_fd_ad_config_cmd *req; + struct hns3_cmd_desc desc; + uint64_t ad_data = 0; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_AD_OP, false); + + req = (struct hns3_fd_ad_config_cmd *)desc.data; + req->index = rte_cpu_to_le_32(loc); + req->stage = HNS3_FD_STAGE_1; + + hns3_set_bit(ad_data, HNS3_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hns3_set_field(ad_data, HNS3_FD_AD_RULE_ID_M, HNS3_FD_AD_RULE_ID_S, + action->rule_id); + ad_data <<= HNS3_FD_AD_DATA_S; + hns3_set_bit(ad_data, HNS3_FD_AD_DROP_B, action->drop_packet); + hns3_set_bit(ad_data, HNS3_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hns3_set_field(ad_data, HNS3_FD_AD_QID_M, HNS3_FD_AD_QID_S, + action->queue_id); + hns3_set_bit(ad_data, HNS3_FD_AD_USE_COUNTER_B, action->use_counter); + hns3_set_field(ad_data, HNS3_FD_AD_COUNTER_NUM_M, + HNS3_FD_AD_COUNTER_NUM_S, action->counter_id); + hns3_set_bit(ad_data, HNS3_FD_AD_NXT_STEP_B, action->use_next_stage); + hns3_set_field(ad_data, HNS3_FD_AD_NXT_KEY_M, HNS3_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = rte_cpu_to_le_64(ad_data); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Config fd ad fail, ret=%d loc=%d", ret, loc); + + return ret; +} + +static inline void hns3_fd_convert_mac(uint8_t *key, uint8_t *mask, + uint8_t *mac_x, uint8_t *mac_y) +{ + uint8_t tmp; + int i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { + tmp = RTE_ETHER_ADDR_LEN - 1 - i; + calc_x(mac_x[tmp], key[i], mask[i]); + calc_y(mac_y[tmp], key[i], mask[i]); + } +} + +static void hns3_fd_convert_int16(uint32_t tuple, struct hns3_fdir_rule *rule, + uint8_t *val_x, uint8_t *val_y) +{ + uint16_t tmp_x_s; + uint16_t tmp_y_s; + uint16_t mask; + uint16_t key; + + switch (tuple) { + case OUTER_SRC_PORT: + key = rule->key_conf.spec.outer_src_port; + mask = rule->key_conf.mask.outer_src_port; + break; + case OUTER_DST_PORT: + key = rule->key_conf.spec.tunnel_type; + mask = rule->key_conf.mask.tunnel_type; + break; + case OUTER_ETH_TYPE: + key = rule->key_conf.spec.outer_ether_type; + mask = rule->key_conf.mask.outer_ether_type; + break; + case INNER_SRC_PORT: + key = rule->key_conf.spec.src_port; + mask = rule->key_conf.mask.src_port; + break; + case INNER_DST_PORT: + key = rule->key_conf.spec.dst_port; + mask = rule->key_conf.mask.dst_port; + break; + case INNER_VLAN_TAG1: + key = rule->key_conf.spec.vlan_tag1; + mask = rule->key_conf.mask.vlan_tag1; + break; + case INNER_VLAN_TAG2: + key = rule->key_conf.spec.vlan_tag2; + mask = rule->key_conf.mask.vlan_tag2; + break; + default: + /* INNER_ETH_TYPE: */ + key = rule->key_conf.spec.ether_type; + mask = rule->key_conf.mask.ether_type; + break; + } + calc_x(tmp_x_s, key, mask); + calc_y(tmp_y_s, key, mask); + val_x[0] = rte_cpu_to_le_16(tmp_x_s) & 0xFF; + val_x[1] = rte_cpu_to_le_16(tmp_x_s) >> HNS3_BITS_PER_BYTE; + val_y[0] = rte_cpu_to_le_16(tmp_y_s) & 0xFF; + val_y[1] = rte_cpu_to_le_16(tmp_y_s) >> HNS3_BITS_PER_BYTE; +} + +static inline void hns3_fd_convert_int32(uint32_t key, uint32_t mask, + uint8_t *val_x, uint8_t *val_y) +{ + uint32_t tmp_x_l; + uint32_t tmp_y_l; + + calc_x(tmp_x_l, key, mask); + calc_y(tmp_y_l, key, mask); + memcpy(val_x, &tmp_x_l, sizeof(tmp_x_l)); + memcpy(val_y, &tmp_y_l, sizeof(tmp_y_l)); +} + +static bool hns3_fd_convert_tuple(uint32_t tuple, uint8_t *key_x, + uint8_t *key_y, struct hns3_fdir_rule *rule) +{ + struct hns3_fdir_key_conf *key_conf; + int tmp; + int i; + + if ((rule->input_set & BIT(tuple)) == 0) + return true; + + key_conf = &rule->key_conf; + switch (tuple) { + case INNER_DST_MAC: + hns3_fd_convert_mac(key_conf->spec.dst_mac, + key_conf->mask.dst_mac, key_x, key_y); + break; + case INNER_SRC_MAC: + hns3_fd_convert_mac(key_conf->spec.src_mac, + key_conf->mask.src_mac, key_x, key_y); + break; + case OUTER_SRC_PORT: + case OUTER_DST_PORT: + case OUTER_ETH_TYPE: + case INNER_SRC_PORT: + case INNER_DST_PORT: + case INNER_VLAN_TAG1: + case INNER_VLAN_TAG2: + case INNER_ETH_TYPE: + hns3_fd_convert_int16(tuple, rule, key_x, key_y); + break; + case INNER_SRC_IP: + hns3_fd_convert_int32(key_conf->spec.src_ip[IP_ADDR_KEY_ID], + key_conf->mask.src_ip[IP_ADDR_KEY_ID], + key_x, key_y); + break; + case INNER_DST_IP: + hns3_fd_convert_int32(key_conf->spec.dst_ip[IP_ADDR_KEY_ID], + key_conf->mask.dst_ip[IP_ADDR_KEY_ID], + key_x, key_y); + break; + case INNER_SCTP_TAG: + hns3_fd_convert_int32(key_conf->spec.sctp_tag, + key_conf->mask.sctp_tag, key_x, key_y); + break; + case OUTER_TUN_VNI: + for (i = 0; i < VNI_OR_TNI_LEN; i++) { + tmp = VNI_OR_TNI_LEN - 1 - i; + calc_x(key_x[tmp], + key_conf->spec.outer_tun_vni[i], + key_conf->mask.outer_tun_vni[i]); + calc_y(key_y[tmp], + key_conf->spec.outer_tun_vni[i], + key_conf->mask.outer_tun_vni[i]); + } + break; + case OUTER_TUN_FLOW_ID: + calc_x(*key_x, key_conf->spec.outer_tun_flow_id, + key_conf->mask.outer_tun_flow_id); + calc_y(*key_y, key_conf->spec.outer_tun_flow_id, + key_conf->mask.outer_tun_flow_id); + break; + case INNER_IP_TOS: + calc_x(*key_x, key_conf->spec.ip_tos, key_conf->mask.ip_tos); + calc_y(*key_y, key_conf->spec.ip_tos, key_conf->mask.ip_tos); + break; + case OUTER_IP_PROTO: + calc_x(*key_x, key_conf->spec.outer_proto, + key_conf->mask.outer_proto); + calc_y(*key_y, key_conf->spec.outer_proto, + key_conf->mask.outer_proto); + break; + case INNER_IP_PROTO: + calc_x(*key_x, key_conf->spec.ip_proto, + key_conf->mask.ip_proto); + calc_y(*key_y, key_conf->spec.ip_proto, + key_conf->mask.ip_proto); + break; + } + return true; +} + +static uint32_t hns3_get_port_number(uint8_t pf_id, uint8_t vf_id) +{ + uint32_t port_number = 0; + + hns3_set_field(port_number, HNS3_PF_ID_M, HNS3_PF_ID_S, pf_id); + hns3_set_field(port_number, HNS3_VF_ID_M, HNS3_VF_ID_S, vf_id); + hns3_set_bit(port_number, HNS3_PORT_TYPE_B, HOST_PORT); + + return port_number; +} + +static void hns3_fd_convert_meta_data(struct hns3_fd_key_cfg *cfg, + uint8_t vf_id, + struct hns3_fdir_rule *rule, + uint8_t *key_x, uint8_t *key_y) +{ + uint16_t meta_data = 0; + uint16_t port_number; + uint8_t cur_pos = 0; + uint8_t tuple_size; + uint8_t shift_bits; + uint32_t tmp_x; + uint32_t tmp_y; + uint8_t i; + + for (i = 0; i < MAX_META_DATA; i++) { + if ((cfg->meta_data_active & BIT(i)) == 0) + continue; + + tuple_size = meta_data_key_info[i].key_length; + if (i == TUNNEL_PACKET) { + hns3_set_bit(meta_data, cur_pos, + rule->key_conf.spec.tunnel_type ? 1 : 0); + cur_pos += tuple_size; + } else if (i == VLAN_NUMBER) { + uint8_t vlan_tag; + uint8_t vlan_num; + if (rule->key_conf.spec.tunnel_type == 0) + vlan_num = rule->key_conf.vlan_num; + else + vlan_num = rule->key_conf.outer_vlan_num; + if (vlan_num == 1) + vlan_tag = HNS3_VLAN_TAG_TYPE_TAG1; + else if (vlan_num == VLAN_TAG_NUM_MAX) + vlan_tag = HNS3_VLAN_TAG_TYPE_TAG1_2; + else + vlan_tag = HNS3_VLAN_TAG_TYPE_NONE; + hns3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, + cur_pos), cur_pos, vlan_tag); + cur_pos += tuple_size; + } else if (i == DST_VPORT) { + port_number = hns3_get_port_number(0, vf_id); + hns3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + } + } + + calc_x(tmp_x, meta_data, 0xFFFF); + calc_y(tmp_y, meta_data, 0xFFFF); + shift_bits = sizeof(meta_data) * HNS3_BITS_PER_BYTE - cur_pos; + + tmp_x = rte_cpu_to_le_32(tmp_x << shift_bits); + tmp_y = rte_cpu_to_le_32(tmp_y << shift_bits); + key_x[0] = tmp_x & 0xFF; + key_x[1] = (tmp_x >> HNS3_BITS_PER_BYTE) & 0xFF; + key_y[0] = tmp_y & 0xFF; + key_y[1] = (tmp_y >> HNS3_BITS_PER_BYTE) & 0xFF; +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hns3_config_key(struct hns3_adapter *hns, + struct hns3_fdir_rule *rule) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + struct hns3_fd_key_cfg *key_cfg; + uint8_t *cur_key_x; + uint8_t *cur_key_y; + uint8_t key_x[MAX_KEY_BYTES] __rte_aligned(4); + uint8_t key_y[MAX_KEY_BYTES] __rte_aligned(4); + uint8_t vf_id = rule->vf_id; + uint8_t meta_data_region; + uint8_t tuple_size; + uint8_t i; + int ret; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1]; + for (i = 0; i < MAX_TUPLE; i++) { + bool tuple_valid; + + tuple_size = tuple_key_info[i].key_length / HNS3_BITS_PER_BYTE; + if (key_cfg->tuple_active & BIT(i)) { + tuple_valid = hns3_fd_convert_tuple(i, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + } + + meta_data_region = pf->fdir.fd_cfg.max_key_length / HNS3_BITS_PER_BYTE - + MAX_META_DATA_LENGTH / HNS3_BITS_PER_BYTE; + + hns3_fd_convert_meta_data(key_cfg, vf_id, rule, + key_x + meta_data_region, + key_y + meta_data_region); + + ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true); + if (ret) { + hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d", + rule->queue_id, ret); + return ret; + } + + ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true); + if (ret) + hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d", + rule->queue_id, ret); + return ret; +} + +static int hns3_config_action(struct hns3_hw *hw, struct hns3_fdir_rule *rule) +{ + struct hns3_fd_ad_data ad_data; + + ad_data.ad_id = rule->location; + + if (rule->action == HNS3_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + ad_data.forward_to_direct_queue = false; + ad_data.queue_id = 0; + } else { + ad_data.drop_packet = false; + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + if (unlikely(rule->flags & HNS3_RULE_FLAG_COUNTER)) { + ad_data.use_counter = true; + ad_data.counter_id = rule->act_cnt.id; + } else { + ad_data.use_counter = false; + ad_data.counter_id = 0; + } + + if (unlikely(rule->flags & HNS3_RULE_FLAG_FDID)) + ad_data.rule_id = rule->fd_id; + else + ad_data.rule_id = rule->location; + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + + return hns3_fd_ad_config(hw, ad_data.ad_id, &ad_data); +} + +static int hns3_fd_clear_all_rules(struct hns3_hw *hw, uint32_t rule_num) +{ + uint32_t i; + int ret; + + for (i = 0; i < rule_num; i++) { + ret = hns3_fd_tcam_config(hw, true, i, NULL, false); + if (ret) + return ret; + } + + return 0; +} + +int hns3_fdir_filter_init(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_fdir_info *fdir_info = &pf->fdir; + uint32_t rule_num = fdir_info->fd_cfg.rule_num[HNS3_FD_STAGE_1]; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = rule_num, + .key_len = sizeof(struct hns3_fdir_key_conf), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + }; + int ret; + + ret = hns3_fd_clear_all_rules(&hns->hw, rule_num); + if (ret) { + PMD_INIT_LOG(ERR, "Clear all fd rules fail! ret = %d", ret); + return ret; + } + + fdir_hash_params.socket_id = rte_socket_id(); + TAILQ_INIT(&fdir_info->fdir_list); + rte_spinlock_init(&fdir_info->flows_lock); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, "%s", hns->hw.data->name); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (fdir_info->hash_handle == NULL) { + PMD_INIT_LOG(ERR, "Create FDIR hash handle fail!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("hns3 FDIR hash", + rule_num * + sizeof(struct hns3_fdir_rule_ele *), + 0); + if (fdir_info->hash_map == NULL) { + PMD_INIT_LOG(ERR, "Allocate memory for FDIR hash map fail!"); + rte_hash_free(fdir_info->hash_handle); + return -ENOMEM; + } + + return 0; +} + +void hns3_fdir_filter_uninit(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_fdir_info *fdir_info = &pf->fdir; + struct hns3_fdir_rule_ele *fdir_filter; + + rte_spinlock_lock(&fdir_info->flows_lock); + if (fdir_info->hash_map) { + rte_free(fdir_info->hash_map); + fdir_info->hash_map = NULL; + } + if (fdir_info->hash_handle) { + rte_hash_free(fdir_info->hash_handle); + fdir_info->hash_handle = NULL; + } + rte_spinlock_unlock(&fdir_info->flows_lock); + + fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list); + while (fdir_filter) { + TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); + hns3_fd_tcam_config(&hns->hw, true, + fdir_filter->fdir_conf.location, NULL, + false); + rte_free(fdir_filter); + fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list); + } +} + +/* + * Find a key in the hash table. + * @return + * - Zero and positive values are key location. + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. + */ +static int hns3_fdir_filter_lookup(struct hns3_fdir_info *fdir_info, + struct hns3_fdir_key_conf *key) +{ + hash_sig_t sig; + int ret; + + rte_spinlock_lock(&fdir_info->flows_lock); + sig = rte_hash_crc(key, sizeof(*key), 0); + ret = rte_hash_lookup_with_hash(fdir_info->hash_handle, key, sig); + rte_spinlock_unlock(&fdir_info->flows_lock); + + return ret; +} + +static int hns3_insert_fdir_filter(struct hns3_hw *hw, + struct hns3_fdir_info *fdir_info, + struct hns3_fdir_rule_ele *fdir_filter) +{ + struct hns3_fdir_key_conf *key; + hash_sig_t sig; + int ret; + + key = &fdir_filter->fdir_conf.key_conf; + rte_spinlock_lock(&fdir_info->flows_lock); + sig = rte_hash_crc(key, sizeof(*key), 0); + ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig); + if (ret < 0) { + rte_spinlock_unlock(&fdir_info->flows_lock); + hns3_err(hw, "Hash table full? err:%d(%s)!", ret, + strerror(ret)); + return ret; + } + + fdir_info->hash_map[ret] = fdir_filter; + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries); + rte_spinlock_unlock(&fdir_info->flows_lock); + + return ret; +} + +static int hns3_remove_fdir_filter(struct hns3_hw *hw, + struct hns3_fdir_info *fdir_info, + struct hns3_fdir_key_conf *key) +{ + struct hns3_fdir_rule_ele *fdir_filter; + hash_sig_t sig; + int ret; + + rte_spinlock_lock(&fdir_info->flows_lock); + sig = rte_hash_crc(key, sizeof(*key), 0); + ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig); + if (ret < 0) { + rte_spinlock_unlock(&fdir_info->flows_lock); + hns3_err(hw, "Delete hash key fail ret=%d", ret); + return ret; + } + + fdir_filter = fdir_info->hash_map[ret]; + fdir_info->hash_map[ret] = NULL; + TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); + rte_spinlock_unlock(&fdir_info->flows_lock); + + rte_free(fdir_filter); + + return 0; +} + +int hns3_fdir_filter_program(struct hns3_adapter *hns, + struct hns3_fdir_rule *rule, bool del) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_fdir_info *fdir_info = &pf->fdir; + struct hns3_fdir_rule_ele *node; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (del) { + ret = hns3_fd_tcam_config(hw, true, rule->location, NULL, + false); + if (ret) + hns3_err(hw, "Failed to delete fdir: %d src_ip:%x " + "dst_ip:%x src_port:%d dst_port:%d ret = %d", + rule->location, + rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID], + rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID], + rule->key_conf.spec.src_port, + rule->key_conf.spec.dst_port, ret); + else + hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + + return ret; + } + + ret = hns3_fdir_filter_lookup(fdir_info, &rule->key_conf); + if (ret >= 0) { + hns3_err(hw, "Conflict with existing fdir loc: %d", ret); + return -EINVAL; + } + + node = rte_zmalloc("hns3 fdir rule", sizeof(struct hns3_fdir_rule_ele), + 0); + if (node == NULL) { + hns3_err(hw, "Failed to allocate fdir_rule memory"); + return -ENOMEM; + } + + rte_memcpy(&node->fdir_conf, rule, sizeof(struct hns3_fdir_rule)); + ret = hns3_insert_fdir_filter(hw, fdir_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + rule->location = ret; + node->fdir_conf.location = ret; + + rte_spinlock_lock(&fdir_info->flows_lock); + ret = hns3_config_action(hw, rule); + if (!ret) + ret = hns3_config_key(hns, rule); + rte_spinlock_unlock(&fdir_info->flows_lock); + if (ret) { + hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x " + "src_port:%d dst_port:%d ret = %d", + rule->location, + rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID], + rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID], + rule->key_conf.spec.src_port, + rule->key_conf.spec.dst_port, ret); + (void)hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + } + + return ret; +} + +/* remove all the flow director filters */ +int hns3_clear_all_fdir_filter(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_fdir_info *fdir_info = &pf->fdir; + struct hns3_fdir_rule_ele *fdir_filter; + struct hns3_hw *hw = &hns->hw; + int ret = 0; + + /* flush flow director */ + rte_spinlock_lock(&fdir_info->flows_lock); + rte_hash_reset(fdir_info->hash_handle); + rte_spinlock_unlock(&fdir_info->flows_lock); + + fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list); + while (fdir_filter) { + TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); + ret += hns3_fd_tcam_config(hw, true, + fdir_filter->fdir_conf.location, + NULL, false); + rte_free(fdir_filter); + fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list); + } + + if (ret) { + hns3_err(hw, "Fail to delete FDIR filter, ret = %d", ret); + ret = -EIO; + } + return ret; +} + +int hns3_restore_all_fdir_filter(struct hns3_adapter *hns) +{ + struct hns3_pf *pf = &hns->pf; + struct hns3_fdir_info *fdir_info = &pf->fdir; + struct hns3_fdir_rule_ele *fdir_filter; + struct hns3_hw *hw = &hns->hw; + bool err = false; + int ret; + + TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) { + ret = hns3_config_action(hw, &fdir_filter->fdir_conf); + if (!ret) + ret = hns3_config_key(hns, &fdir_filter->fdir_conf); + if (ret) { + err = true; + if (ret == -EBUSY) + break; + } + } + + if (err) { + hns3_err(hw, "Fail to restore FDIR filter, ret = %d", ret); + return -EIO; + } + return 0; +} + +int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value) +{ + struct hns3_fd_get_cnt_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_COUNTER_OP, true); + + req = (struct hns3_fd_get_cnt_cmd *)desc.data; + req->stage = HNS3_FD_STAGE_1; + req->index = rte_cpu_to_le_32(id); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Read counter fail, ret=%d", ret); + return ret; + } + + *value = req->value; + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.h b/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.h new file mode 100644 index 000000000..f7b421613 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_fdir.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_FDIR_H_ +#define _HNS3_FDIR_H_ + +#include + +struct hns3_fd_key_cfg { + uint8_t key_sel; + uint8_t inner_sipv6_word_en; + uint8_t inner_dipv6_word_en; + uint8_t outer_sipv6_word_en; + uint8_t outer_dipv6_word_en; + uint32_t tuple_active; + uint32_t meta_data_active; +}; + +enum HNS3_FD_STAGE { + HNS3_FD_STAGE_1, + HNS3_FD_STAGE_2, + HNS3_FD_STAGE_NUM, +}; + +enum HNS3_FD_ACTION { + HNS3_FD_ACTION_ACCEPT_PACKET, + HNS3_FD_ACTION_DROP_PACKET, +}; + +struct hns3_fd_cfg { + uint8_t fd_mode; + uint16_t max_key_length; + uint32_t rule_num[HNS3_FD_STAGE_NUM]; /* rule entry number */ + uint16_t cnt_num[HNS3_FD_STAGE_NUM]; /* rule hit counter number */ + struct hns3_fd_key_cfg key_cfg[HNS3_FD_STAGE_NUM]; +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HNS3_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG1, + INNER_VLAN_TAG2, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_SCTP_TAG, + MAX_TUPLE, +}; + +#define VLAN_TAG_NUM_MAX 2 +#define VNI_OR_TNI_LEN 3 +#define IP_ADDR_LEN 4 /* Length of IPv6 address. */ +#define IP_ADDR_KEY_ID 3 /* The last 32bit of IP address as FDIR search key */ +#define IPV6_ADDR_WORD_MASK 3 /* The last two word of IPv6 as FDIR search key */ + +struct hns3_fd_rule_tuples { + uint8_t src_mac[RTE_ETHER_ADDR_LEN]; + uint8_t dst_mac[RTE_ETHER_ADDR_LEN]; + uint32_t src_ip[IP_ADDR_LEN]; + uint32_t dst_ip[IP_ADDR_LEN]; + uint16_t src_port; + uint16_t dst_port; + uint16_t vlan_tag1; + uint16_t vlan_tag2; + uint16_t ether_type; + uint8_t ip_tos; + uint8_t ip_proto; + uint32_t sctp_tag; + uint16_t outer_src_port; + uint16_t tunnel_type; + uint16_t outer_ether_type; + uint8_t outer_proto; + uint8_t outer_tun_vni[VNI_OR_TNI_LEN]; + uint8_t outer_tun_flow_id; +}; + +struct hns3_fd_ad_data { + uint16_t ad_id; + uint8_t drop_packet; + uint8_t forward_to_direct_queue; + uint16_t queue_id; + uint8_t use_counter; + uint8_t counter_id; + uint8_t use_next_stage; + uint8_t write_rule_id_to_bd; + uint8_t next_input_key; + uint16_t rule_id; +}; + +struct hns3_flow_counter { + LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */ + uint32_t shared:1; /* Share counter ID with other flow rules. */ + uint32_t ref_cnt:31; /* Reference counter. */ + uint16_t id; /* Counter ID. */ + uint64_t hits; /* Number of packets matched by the rule. */ +}; + +#define HNS3_RULE_FLAG_FDID 0x1 +#define HNS3_RULE_FLAG_VF_ID 0x2 +#define HNS3_RULE_FLAG_COUNTER 0x4 + +struct hns3_fdir_key_conf { + struct hns3_fd_rule_tuples spec; + struct hns3_fd_rule_tuples mask; + uint8_t vlan_num; + uint8_t outer_vlan_num; +}; + +struct hns3_fdir_rule { + struct hns3_fdir_key_conf key_conf; + uint32_t input_set; + uint32_t flags; + uint32_t fd_id; /* APP marked unique value for this rule. */ + uint8_t action; + /* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */ + uint8_t vf_id; + uint16_t queue_id; + uint16_t location; + struct rte_flow_action_count act_cnt; +}; + +/* FDIR filter list structure */ +struct hns3_fdir_rule_ele { + TAILQ_ENTRY(hns3_fdir_rule_ele) entries; + struct hns3_fdir_rule fdir_conf; +}; + +/* rss filter list structure */ +struct hns3_rss_conf_ele { + TAILQ_ENTRY(hns3_rss_conf_ele) entries; + struct hns3_rss_conf filter_info; +}; + +/* hns3_flow memory list structure */ +struct hns3_flow_mem { + TAILQ_ENTRY(hns3_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(hns3_fdir_rule_list, hns3_fdir_rule_ele); +TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); +TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); + +struct hns3_process_private { + struct hns3_fdir_rule_list fdir_list; + struct hns3_rss_filter_list filter_rss_list; + struct hns3_flow_mem_list flow_list; +}; + +/* + * A structure used to define fields of a FDIR related info. + */ +struct hns3_fdir_info { + rte_spinlock_t flows_lock; + struct hns3_fdir_rule_list fdir_list; + struct hns3_fdir_rule_ele **hash_map; + struct rte_hash *hash_handle; + struct hns3_fd_cfg fd_cfg; +}; + +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; + uint32_t counter_id; +}; +struct hns3_adapter; + +int hns3_init_fd_config(struct hns3_adapter *hns); +int hns3_fdir_filter_init(struct hns3_adapter *hns); +void hns3_fdir_filter_uninit(struct hns3_adapter *hns); +int hns3_fdir_filter_program(struct hns3_adapter *hns, + struct hns3_fdir_rule *rule, bool del); +int hns3_clear_all_fdir_filter(struct hns3_adapter *hns); +int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value); +void hns3_filterlist_init(struct rte_eth_dev *dev); +int hns3_restore_all_fdir_filter(struct hns3_adapter *hns); + +#endif /* _HNS3_FDIR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_flow.c b/src/spdk/dpdk/drivers/net/hns3/hns3_flow.c new file mode 100644 index 000000000..aef301a8a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_flow.c @@ -0,0 +1,1923 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" + +/* Default default keys */ +static uint8_t hns3_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF }; +static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 }; + +/* Special Filter id for non-specific packet flagging. Don't change value */ +#define HNS3_MAX_FILTER_ID 0x0FFF + +#define ETHER_TYPE_MASK 0xFFFF +#define IPPROTO_MASK 0xFF +#define TUNNEL_TYPE_MASK 0xFFFF + +#define HNS3_TUNNEL_TYPE_VXLAN 0x12B5 +#define HNS3_TUNNEL_TYPE_VXLAN_GPE 0x12B6 +#define HNS3_TUNNEL_TYPE_GENEVE 0x17C1 +#define HNS3_TUNNEL_TYPE_NVGRE 0x6558 + +static enum rte_flow_item_type first_items[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_GENEVE, + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_MPLS +}; + +static enum rte_flow_item_type L2_next_items[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6 +}; + +static enum rte_flow_item_type L3_next_items[] = { + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ICMP +}; + +static enum rte_flow_item_type L4_next_items[] = { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_GENEVE, + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_MPLS +}; + +static enum rte_flow_item_type tunnel_next_items[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN +}; + +struct items_step_mngr { + enum rte_flow_item_type *items; + int count; +}; + +static inline void +net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) + dst[i] = rte_be_to_cpu_32(src[i]); +} + +static inline const struct rte_flow_action * +find_rss_action(const struct rte_flow_action actions[]) +{ + const struct rte_flow_action *next = &actions[0]; + + for (; next->type != RTE_FLOW_ACTION_TYPE_END; next++) { + if (next->type == RTE_FLOW_ACTION_TYPE_RSS) + return next; + } + return NULL; +} + +static inline struct hns3_flow_counter * +hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_pf *pf = &hns->pf; + struct hns3_flow_counter *cnt; + + LIST_FOREACH(cnt, &pf->flow_counters, next) { + if (cnt->id == id) + return cnt; + } + return NULL; +} + +static int +hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_pf *pf = &hns->pf; + struct hns3_flow_counter *cnt; + + cnt = hns3_counter_lookup(dev, id); + if (cnt) { + if (!cnt->shared || cnt->shared != shared) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + cnt, + "Counter id is used,shared flag not match"); + cnt->ref_cnt++; + return 0; + } + + cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); + if (cnt == NULL) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, cnt, + "Alloc mem for counter failed"); + cnt->id = id; + cnt->shared = shared; + cnt->ref_cnt = 1; + cnt->hits = 0; + LIST_INSERT_HEAD(&pf->flow_counters, cnt, next); + return 0; +} + +static int +hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_query_count *qc, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_flow_counter *cnt; + uint64_t value; + int ret; + + /* FDIR is available only in PF driver */ + if (hns->is_vf) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Fdir is not supported in VF"); + cnt = hns3_counter_lookup(dev, flow->counter_id); + if (cnt == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Can't find counter id"); + + ret = hns3_get_count(&hns->hw, flow->counter_id, &value); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Read counter fail."); + return ret; + } + qc->hits_set = 1; + qc->hits = value; + + return 0; +} + +static int +hns3_counter_release(struct rte_eth_dev *dev, uint32_t id) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_flow_counter *cnt; + + cnt = hns3_counter_lookup(dev, id); + if (cnt == NULL) { + hns3_err(hw, "Can't find available counter to release"); + return -EINVAL; + } + cnt->ref_cnt--; + if (cnt->ref_cnt == 0) { + LIST_REMOVE(cnt, next); + rte_free(cnt); + } + return 0; +} + +static void +hns3_counter_flush(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_pf *pf = &hns->pf; + struct hns3_flow_counter *cnt_ptr; + + cnt_ptr = LIST_FIRST(&pf->flow_counters); + while (cnt_ptr) { + LIST_REMOVE(cnt_ptr, next); + rte_free(cnt_ptr); + cnt_ptr = LIST_FIRST(&pf->flow_counters); + } +} + +static int +hns3_handle_action_queue(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + const struct rte_flow_action_queue *queue; + struct hns3_hw *hw = &hns->hw; + + queue = (const struct rte_flow_action_queue *)action->conf; + if (queue->index >= hw->used_rx_queues) { + hns3_err(hw, "queue ID(%d) is greater than number of " + "available queue (%d) in driver.", + queue->index, hw->used_rx_queues); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Invalid queue ID in PF"); + } + + rule->queue_id = queue->index; + rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; + return 0; +} + +/* + * Parse actions structure from the provided pattern. + * The pattern is validated as the items are copied. + * + * @param actions[in] + * @param rule[out] + * NIC specfilc actions derived from the actions. + * @param error[out] + */ +static int +hns3_handle_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct hns3_fdir_rule *rule, struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + const struct rte_flow_action_count *act_count; + const struct rte_flow_action_mark *mark; + struct hns3_pf *pf = &hns->pf; + uint32_t counter_num; + int ret; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = hns3_handle_action_queue(dev, actions, rule, + error); + if (ret) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + rule->action = HNS3_FD_ACTION_DROP_PACKET; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + mark = + (const struct rte_flow_action_mark *)actions->conf; + if (mark->id >= HNS3_MAX_FILTER_ID) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid Mark ID"); + rule->fd_id = mark->id; + rule->flags |= HNS3_RULE_FLAG_FDID; + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + rule->fd_id = HNS3_MAX_FILTER_ID; + rule->flags |= HNS3_RULE_FLAG_FDID; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + act_count = + (const struct rte_flow_action_count *)actions->conf; + counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; + if (act_count->id >= counter_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid counter id"); + rule->act_cnt = *act_count; + rule->flags |= HNS3_RULE_FLAG_COUNTER; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Unsupported action"); + } + } + + return 0; +} + +/* Parse to get the attr and action info of flow director rule. */ +static int +hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) +{ + if (!attr->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Ingress can't be zero"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress"); + if (attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer"); + if (attr->priority) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority"); + if (attr->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group"); + return 0; +} + +static int +hns3_parse_eth(const struct rte_flow_item *item, + struct hns3_fdir_rule *rule, struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + eth_mask = item->mask; + if (eth_mask->type) { + hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); + rule->key_conf.mask.ether_type = + rte_be_to_cpu_16(eth_mask->type); + } + if (!rte_is_zero_ether_addr(ð_mask->src)) { + hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1); + memcpy(rule->key_conf.mask.src_mac, + eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN); + } + if (!rte_is_zero_ether_addr(ð_mask->dst)) { + hns3_set_bit(rule->input_set, INNER_DST_MAC, 1); + memcpy(rule->key_conf.mask.dst_mac, + eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN); + } + } + + eth_spec = item->spec; + rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type); + memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + return 0; +} + +static int +hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + rule->key_conf.vlan_num++; + if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Vlan_num is more than 2"); + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + vlan_mask = item->mask; + if (vlan_mask->tci) { + if (rule->key_conf.vlan_num == 1) { + hns3_set_bit(rule->input_set, INNER_VLAN_TAG1, + 1); + rule->key_conf.mask.vlan_tag1 = + rte_be_to_cpu_16(vlan_mask->tci); + } else { + hns3_set_bit(rule->input_set, INNER_VLAN_TAG2, + 1); + rule->key_conf.mask.vlan_tag2 = + rte_be_to_cpu_16(vlan_mask->tci); + } + } + } + + vlan_spec = item->spec; + if (rule->key_conf.vlan_num == 1) + rule->key_conf.spec.vlan_tag1 = + rte_be_to_cpu_16(vlan_spec->tci); + else + rule->key_conf.spec.vlan_tag2 = + rte_be_to_cpu_16(vlan_spec->tci); + return 0; +} + +static int +hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); + rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4; + rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + ipv4_mask = item->mask; + + if (ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support src & dst ip,tos,proto in IPV4"); + } + + if (ipv4_mask->hdr.src_addr) { + hns3_set_bit(rule->input_set, INNER_SRC_IP, 1); + rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] = + rte_be_to_cpu_32(ipv4_mask->hdr.src_addr); + } + + if (ipv4_mask->hdr.dst_addr) { + hns3_set_bit(rule->input_set, INNER_DST_IP, 1); + rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] = + rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr); + } + + if (ipv4_mask->hdr.type_of_service) { + hns3_set_bit(rule->input_set, INNER_IP_TOS, 1); + rule->key_conf.mask.ip_tos = + ipv4_mask->hdr.type_of_service; + } + + if (ipv4_mask->hdr.next_proto_id) { + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); + rule->key_conf.mask.ip_proto = + ipv4_mask->hdr.next_proto_id; + } + } + + ipv4_spec = item->spec; + rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] = + rte_be_to_cpu_32(ipv4_spec->hdr.src_addr); + rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] = + rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr); + rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service; + rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id; + return 0; +} + +static int +hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); + rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6; + rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + ipv6_mask = item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support src & dst ip,proto in IPV6"); + } + net_addr_to_host(rule->key_conf.mask.src_ip, + (const rte_be32_t *)ipv6_mask->hdr.src_addr, + IP_ADDR_LEN); + net_addr_to_host(rule->key_conf.mask.dst_ip, + (const rte_be32_t *)ipv6_mask->hdr.dst_addr, + IP_ADDR_LEN); + rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto; + if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID]) + hns3_set_bit(rule->input_set, INNER_SRC_IP, 1); + if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID]) + hns3_set_bit(rule->input_set, INNER_DST_IP, 1); + if (ipv6_mask->hdr.proto) + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); + } + + ipv6_spec = item->spec; + net_addr_to_host(rule->key_conf.spec.src_ip, + (const rte_be32_t *)ipv6_spec->hdr.src_addr, + IP_ADDR_LEN); + net_addr_to_host(rule->key_conf.spec.dst_ip, + (const rte_be32_t *)ipv6_spec->hdr.dst_addr, + IP_ADDR_LEN); + rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto; + + return 0; +} + +static int +hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); + rule->key_conf.spec.ip_proto = IPPROTO_TCP; + rule->key_conf.mask.ip_proto = IPPROTO_MASK; + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + tcp_mask = item->mask; + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support src & dst port in TCP"); + } + + if (tcp_mask->hdr.src_port) { + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); + rule->key_conf.mask.src_port = + rte_be_to_cpu_16(tcp_mask->hdr.src_port); + } + if (tcp_mask->hdr.dst_port) { + hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); + rule->key_conf.mask.dst_port = + rte_be_to_cpu_16(tcp_mask->hdr.dst_port); + } + } + + tcp_spec = item->spec; + rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port); + rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port); + + return 0; +} + +static int +hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); + rule->key_conf.spec.ip_proto = IPPROTO_UDP; + rule->key_conf.mask.ip_proto = IPPROTO_MASK; + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + udp_mask = item->mask; + if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support src & dst port in UDP"); + } + if (udp_mask->hdr.src_port) { + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); + rule->key_conf.mask.src_port = + rte_be_to_cpu_16(udp_mask->hdr.src_port); + } + if (udp_mask->hdr.dst_port) { + hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); + rule->key_conf.mask.dst_port = + rte_be_to_cpu_16(udp_mask->hdr.dst_port); + } + } + + udp_spec = item->spec; + rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port); + rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port); + + return 0; +} + +static int +hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); + rule->key_conf.spec.ip_proto = IPPROTO_SCTP; + rule->key_conf.mask.ip_proto = IPPROTO_MASK; + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + if (item->mask) { + sctp_mask = item->mask; + if (sctp_mask->hdr.cksum) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support src & dst port in SCTP"); + + if (sctp_mask->hdr.src_port) { + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); + rule->key_conf.mask.src_port = + rte_be_to_cpu_16(sctp_mask->hdr.src_port); + } + if (sctp_mask->hdr.dst_port) { + hns3_set_bit(rule->input_set, INNER_DST_PORT, 1); + rule->key_conf.mask.dst_port = + rte_be_to_cpu_16(sctp_mask->hdr.dst_port); + } + if (sctp_mask->hdr.tag) { + hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1); + rule->key_conf.mask.sctp_tag = + rte_be_to_cpu_32(sctp_mask->hdr.tag); + } + } + + sctp_spec = item->spec; + rule->key_conf.spec.src_port = + rte_be_to_cpu_16(sctp_spec->hdr.src_port); + rule->key_conf.spec.dst_port = + rte_be_to_cpu_16(sctp_spec->hdr.dst_port); + rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag); + + return 0; +} + +/* + * Check items before tunnel, save inner configs to outer configs,and clear + * inner configs. + * The key consists of two parts: meta_data and tuple keys. + * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel + * packet(1bit). + * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit), + * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit), + * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit), + * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit), + * Vlantag2(16bit) and sctp-tag(32bit). + */ +static int +hns3_handle_tunnel(const struct rte_flow_item *item, + struct hns3_fdir_rule *rule, struct rte_flow_error *error) +{ + /* check eth config */ + if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Outer eth mac is unsupported"); + if (rule->input_set & BIT(INNER_ETH_TYPE)) { + hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1); + rule->key_conf.spec.outer_ether_type = + rule->key_conf.spec.ether_type; + rule->key_conf.mask.outer_ether_type = + rule->key_conf.mask.ether_type; + hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0); + rule->key_conf.spec.ether_type = 0; + rule->key_conf.mask.ether_type = 0; + } + + /* check vlan config */ + if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Outer vlan tags is unsupported"); + + /* clear vlan_num for inner vlan select */ + rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num; + rule->key_conf.vlan_num = 0; + + /* check L3 config */ + if (rule->input_set & + (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Outer ip is unsupported"); + if (rule->input_set & BIT(INNER_IP_PROTO)) { + hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); + rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto; + rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto; + hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0); + rule->key_conf.spec.ip_proto = 0; + rule->key_conf.mask.ip_proto = 0; + } + + /* check L4 config */ + if (rule->input_set & BIT(INNER_SCTP_TAG)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer sctp tag is unsupported"); + + if (rule->input_set & BIT(INNER_SRC_PORT)) { + hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1); + rule->key_conf.spec.outer_src_port = + rule->key_conf.spec.src_port; + rule->key_conf.mask.outer_src_port = + rule->key_conf.mask.src_port; + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0); + rule->key_conf.spec.src_port = 0; + rule->key_conf.mask.src_port = 0; + } + if (rule->input_set & BIT(INNER_DST_PORT)) { + hns3_set_bit(rule->input_set, INNER_DST_PORT, 0); + rule->key_conf.spec.dst_port = 0; + rule->key_conf.mask.dst_port = 0; + } + return 0; +} + +static int +hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + else if (item->spec && (item->mask == NULL)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Tunnel packets must configure with mask"); + + hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); + rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) + rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN; + else + rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE; + + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + vxlan_mask = item->mask; + vxlan_spec = item->spec; + + if (vxlan_mask->flags) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Flags is not supported in VxLAN"); + + /* VNI must be totally masked or not. */ + if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) && + memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VNI must be totally masked or not in VxLAN"); + if (vxlan_mask->vni[0]) { + hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); + memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni, + VNI_OR_TNI_LEN); + } + memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni, + VNI_OR_TNI_LEN); + return 0; +} + +static int +hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + else if (item->spec && (item->mask == NULL)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Tunnel packets must configure with mask"); + + hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); + rule->key_conf.spec.outer_proto = IPPROTO_GRE; + rule->key_conf.mask.outer_proto = IPPROTO_MASK; + + hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); + rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE; + rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE; + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + nvgre_mask = item->mask; + nvgre_spec = item->spec; + + if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Ver/protocal is not supported in NVGRE"); + + /* TNI must be totally masked or not. */ + if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && + memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "TNI must be totally masked or not in NVGRE"); + + if (nvgre_mask->tni[0]) { + hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); + memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni, + VNI_OR_TNI_LEN); + } + memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni, + VNI_OR_TNI_LEN); + + if (nvgre_mask->flow_id) { + hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1); + rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id; + } + rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id; + return 0; +} + +static int +hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_geneve *geneve_spec; + const struct rte_flow_item_geneve *geneve_mask; + + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask but without spec"); + else if (item->spec && (item->mask == NULL)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Tunnel packets must configure with mask"); + + hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); + rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE; + rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; + /* Only used to describe the protocol stack. */ + if (item->spec == NULL && item->mask == NULL) + return 0; + + geneve_mask = item->mask; + geneve_spec = item->spec; + + if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Ver/protocal is not supported in GENEVE"); + /* VNI must be totally masked or not. */ + if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && + memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VNI must be totally masked or not in GENEVE"); + if (geneve_mask->vni[0]) { + hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); + memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni, + VNI_OR_TNI_LEN); + } + memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni, + VNI_OR_TNI_LEN); + return 0; +} + +static int +hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + int ret; + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = hns3_parse_vxlan(item, rule, error); + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + ret = hns3_parse_nvgre(item, rule, error); + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + ret = hns3_parse_geneve(item, rule, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unsupported tunnel type!"); + } + if (ret) + return ret; + return hns3_handle_tunnel(item, rule, error); +} + +static int +hns3_parse_normal(const struct rte_flow_item *item, + struct hns3_fdir_rule *rule, + struct items_step_mngr *step_mngr, + struct rte_flow_error *error) +{ + int ret; + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + ret = hns3_parse_eth(item, rule, error); + step_mngr->items = L2_next_items; + step_mngr->count = ARRAY_SIZE(L2_next_items); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = hns3_parse_vlan(item, rule, error); + step_mngr->items = L2_next_items; + step_mngr->count = ARRAY_SIZE(L2_next_items); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = hns3_parse_ipv4(item, rule, error); + step_mngr->items = L3_next_items; + step_mngr->count = ARRAY_SIZE(L3_next_items); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = hns3_parse_ipv6(item, rule, error); + step_mngr->items = L3_next_items; + step_mngr->count = ARRAY_SIZE(L3_next_items); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = hns3_parse_tcp(item, rule, error); + step_mngr->items = L4_next_items; + step_mngr->count = ARRAY_SIZE(L4_next_items); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = hns3_parse_udp(item, rule, error); + step_mngr->items = L4_next_items; + step_mngr->count = ARRAY_SIZE(L4_next_items); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + ret = hns3_parse_sctp(item, rule, error); + step_mngr->items = L4_next_items; + step_mngr->count = ARRAY_SIZE(L4_next_items); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Unsupported normal type!"); + } + + return ret; +} + +static int +hns3_validate_item(const struct rte_flow_item *item, + struct items_step_mngr step_mngr, + struct rte_flow_error *error) +{ + int i; + + if (item->last) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + "Not supported last point for range"); + + for (i = 0; i < step_mngr.count; i++) { + if (item->type == step_mngr.items[i]) + break; + } + + if (i == step_mngr.count) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Inval or missing item"); + } + return 0; +} + +static inline bool +is_tunnel_packet(enum rte_flow_item_type type) +{ + if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || + type == RTE_FLOW_ITEM_TYPE_VXLAN || + type == RTE_FLOW_ITEM_TYPE_NVGRE || + type == RTE_FLOW_ITEM_TYPE_GENEVE || + type == RTE_FLOW_ITEM_TYPE_MPLS) + return true; + return false; +} + +/* + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) + * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 + * MAC VLAN PATTERN: + * The first not void item must be ETH. + * The second not void item must be MAC VLAN. + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE or DROP. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP/SCTP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * UDP/TCP/SCTP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * END + * MAC VLAN pattern example: + * ITEM Spec Mask + * ETH dst_addr + {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, + 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +hns3_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + const struct rte_flow_item *item; + struct items_step_mngr step_mngr; + int ret; + + /* FDIR is available only in PF driver */ + if (hns->is_vf) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Fdir not supported in VF"); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + "fdir_conf.mode isn't perfect"); + + step_mngr.items = first_items; + step_mngr.count = ARRAY_SIZE(first_items); + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + ret = hns3_validate_item(item, step_mngr, error); + if (ret) + return ret; + + if (is_tunnel_packet(item->type)) { + ret = hns3_parse_tunnel(item, rule, error); + if (ret) + return ret; + step_mngr.items = tunnel_next_items; + step_mngr.count = ARRAY_SIZE(tunnel_next_items); + } else { + ret = hns3_parse_normal(item, rule, &step_mngr, error); + if (ret) + return ret; + } + } + + return hns3_handle_actions(dev, actions, rule, error); +} + +void +hns3_filterlist_init(struct rte_eth_dev *dev) +{ + struct hns3_process_private *process_list = dev->process_private; + + TAILQ_INIT(&process_list->fdir_list); + TAILQ_INIT(&process_list->filter_rss_list); + TAILQ_INIT(&process_list->flow_list); +} + +static void +hns3_filterlist_flush(struct rte_eth_dev *dev) +{ + struct hns3_process_private *process_list = dev->process_private; + struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_flow_mem *flow_node; + + fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + while (fdir_rule_ptr) { + TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + } + + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + } + + flow_node = TAILQ_FIRST(&process_list->flow_list); + while (flow_node) { + TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + rte_free(flow_node->flow); + rte_free(flow_node); + flow_node = TAILQ_FIRST(&process_list->flow_list); + } +} + +static bool +hns3_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +static int +hns3_rss_conf_copy(struct hns3_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + if (in->key == NULL && in->key_len) + return -EINVAL; + out->conf = (struct rte_flow_action_rss) { + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + }; + out->conf.queue = + memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num); + if (in->key) + out->conf.key = memcpy(out->key, in->key, in->key_len); + + return 0; +} + +/* + * This function is used to parse rss action validatation. + */ +static int +hns3_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_conf = &hw->rss_info; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *act; + uint32_t act_index = 0; + uint64_t flow_types; + uint16_t n; + + NEXT_ITEM_OF_ACTION(act, actions, act_index); + /* Get configuration args from APP cmdline input */ + rss = act->conf; + + if (rss == NULL || rss->queue_num == 0) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "no valid queues"); + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] < dev->data->nb_rx_queues) + continue; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + } + + /* Parse flow types of RSS */ + if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Flow types is unsupported by " + "hns3's RSS"); + + flow_types = rss->types & HNS3_ETH_RSS_SUPPORT; + if (flow_types != rss->types) + hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported " + "flow types", rss->types); + + /* Parse RSS related parameters from RSS configuration */ + switch (rss->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "input RSS hash functions are not supported"); + } + + if (rss->level) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + + act_index++; + + /* Check if the next not void action is END */ + NEXT_ITEM_OF_ACTION(act, actions, act_index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct hns3_rss_conf)); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + } + + return 0; +} + +static int +hns3_disable_rss(struct hns3_hw *hw) +{ + int ret; + + /* Redirected the redirection table to queue 0 */ + ret = hns3_rss_reset_indir_table(hw); + if (ret) + return ret; + + /* Disable RSS */ + hw->rss_info.conf.types = 0; + hw->rss_dis_flag = true; + + return 0; +} + +static void +hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) +{ + if (rss_conf->key == NULL || + rss_conf->key_len < HNS3_RSS_KEY_SIZE) { + hns3_info(hw, "Default RSS hash key to be set"); + rss_conf->key = hns3_hash_key; + rss_conf->key_len = HNS3_RSS_KEY_SIZE; + } +} + +static int +hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, + uint8_t *hash_algo) +{ + enum rte_eth_hash_function algo_func = *func; + switch (algo_func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + /* Keep *hash_algo as what it used to be */ + algo_func = hw->rss_info.conf.func; + break; + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; + break; + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; + break; + default: + hns3_err(hw, "Invalid RSS algorithm configuration(%u)", + algo_func); + return -EINVAL; + } + *func = algo_func; + + return 0; +} + +static int +hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) +{ + uint8_t hash_algo = + (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_TOEPLITZ ? + HNS3_RSS_HASH_ALGO_TOEPLITZ : HNS3_RSS_HASH_ALGO_SIMPLE); + struct hns3_rss_tuple_cfg *tuple; + int ret; + + /* Parse hash key */ + hns3_parse_rss_key(hw, rss_config); + + /* Parse hash algorithm */ + ret = hns3_parse_rss_algorithm(hw, &rss_config->func, &hash_algo); + if (ret) + return ret; + + ret = hns3_set_rss_algo_key(hw, hash_algo, rss_config->key); + if (ret) + return ret; + + /* Update algorithm of hw */ + hw->rss_info.conf.func = rss_config->func; + + /* Set flow type supported */ + tuple = &hw->rss_info.rss_tuple_sets; + ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); + if (ret) + hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret); + + return ret; +} + +static int +hns3_update_indir_table(struct rte_eth_dev *dev, + const struct rte_flow_action_rss *conf, uint16_t num) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t j, allow_rss_queues; + uint8_t queue_id; + uint32_t i; + + if (num == 0) { + hns3_err(hw, "No PF queues are configured to enable RSS"); + return -ENOTSUP; + } + + allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max); + /* Fill in redirection table */ + memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, + HNS3_RSS_IND_TBL_SIZE); + for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { + j %= num; + if (conf->queue[j] >= allow_rss_queues) { + hns3_err(hw, "Invalid queue id(%u) to be set in " + "redirection table, max number of rss " + "queues: %u", conf->queue[j], + allow_rss_queues); + return -EINVAL; + } + queue_id = conf->queue[j]; + indir_tbl[i] = queue_id; + } + + return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); +} + +static int +hns3_config_rss_filter(struct rte_eth_dev *dev, + const struct hns3_rss_conf *conf, bool add) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_info; + uint64_t flow_types; + uint16_t num; + int ret; + + struct rte_flow_action_rss rss_flow_conf = { + .func = conf->conf.func, + .level = conf->conf.level, + .types = conf->conf.types, + .key_len = conf->conf.key_len, + .queue_num = conf->conf.queue_num, + .key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .queue = conf->conf.queue, + }; + + /* The types is Unsupported by hns3' RSS */ + if (!(rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT) && + rss_flow_conf.types) { + hns3_err(hw, + "Flow types(%" PRIx64 ") is unsupported by hns3's RSS", + rss_flow_conf.types); + return -EINVAL; + } + + /* Filter the unsupported flow types */ + flow_types = rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT; + if (flow_types != rss_flow_conf.types) + hns3_warn(hw, "modified RSS types based on hardware support, " + "requested:%" PRIx64 " configured:%" PRIx64, + rss_flow_conf.types, flow_types); + /* Update the useful flow types */ + rss_flow_conf.types = flow_types; + + if ((rss_flow_conf.types & ETH_RSS_PROTO_MASK) == 0) + return hns3_disable_rss(hw); + + rss_info = &hw->rss_info; + if (!add) { + if (hns3_action_rss_same(&rss_info->conf, &rss_flow_conf)) { + ret = hns3_disable_rss(hw); + if (ret) { + hns3_err(hw, "RSS disable failed(%d)", ret); + return ret; + } + memset(rss_info, 0, sizeof(struct hns3_rss_conf)); + return 0; + } + return -EINVAL; + } + + /* Get rx queues num */ + num = dev->data->nb_rx_queues; + + /* Set rx queues to use */ + num = RTE_MIN(num, rss_flow_conf.queue_num); + if (rss_flow_conf.queue_num > num) + hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", + rss_flow_conf.queue_num); + hns3_info(hw, "Max of contiguous %u PF queues are configured", num); + + rte_spinlock_lock(&hw->lock); + /* Update redirection talbe of rss */ + ret = hns3_update_indir_table(dev, &rss_flow_conf, num); + if (ret) + goto rss_config_err; + + /* Set hash algorithm and flow types by the user's config */ + ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); + if (ret) + goto rss_config_err; + + ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf); + if (ret) { + hns3_err(hw, "RSS config init fail(%d)", ret); + goto rss_config_err; + } + +rss_config_err: + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +/* Remove the rss filter */ +static int +hns3_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + if (hw->rss_info.conf.queue_num == 0) + return 0; + + return hns3_config_rss_filter(dev, &hw->rss_info, false); +} + +/* Restore the rss filter */ +int +hns3_restore_rss_filter(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + if (hw->rss_info.conf.queue_num == 0) + return 0; + + return hns3_config_rss_filter(dev, &hw->rss_info, true); +} + +static int +hns3_flow_parse_rss(struct rte_eth_dev *dev, + const struct hns3_rss_conf *conf, bool add) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + bool ret; + + /* Action rss same */ + ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); + if (ret) { + hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); + return -EINVAL; + } + + return hns3_config_rss_filter(dev, conf, add); +} + +static int +hns3_flow_args_check(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + if (pattern == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + + if (actions == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + + if (attr == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + + return hns3_check_attr(attr, error); +} + +/* + * Check if the flow rule is supported by hns3. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int +hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_fdir_rule fdir_rule; + int ret; + + ret = hns3_flow_args_check(attr, pattern, actions, error); + if (ret) + return ret; + + if (find_rss_action(actions)) + return hns3_parse_rss_filter(dev, actions, error); + + memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); + return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); +} + +/* + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_process_private *process_list = dev->process_private; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + const struct hns3_rss_conf *rss_conf; + struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_flow_mem *flow_node; + const struct rte_flow_action *act; + struct rte_flow *flow; + struct hns3_fdir_rule fdir_rule; + int ret; + + ret = hns3_flow_args_check(attr, pattern, actions, error); + if (ret) + return NULL; + + flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0); + if (flow == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate flow memory"); + return NULL; + } + flow_node = rte_zmalloc("hns3 flow node", + sizeof(struct hns3_flow_mem), 0); + if (flow_node == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate flow list memory"); + rte_free(flow); + return NULL; + } + + flow_node->flow = flow; + TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries); + + act = find_rss_action(actions); + if (act) { + rss_conf = act->conf; + + ret = hns3_flow_parse_rss(dev, rss_conf, true); + if (ret) + goto err; + + rss_filter_ptr = rte_zmalloc("hns3 rss filter", + sizeof(struct hns3_rss_conf_ele), + 0); + if (rss_filter_ptr == NULL) { + hns3_err(hw, + "Failed to allocate hns3_rss_filter memory"); + ret = -ENOMEM; + goto err; + } + memcpy(&rss_filter_ptr->filter_info, rss_conf, + sizeof(struct hns3_rss_conf)); + TAILQ_INSERT_TAIL(&process_list->filter_rss_list, + rss_filter_ptr, entries); + + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + + memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); + ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); + if (ret) + goto out; + + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) { + ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared, + fdir_rule.act_cnt.id, error); + if (ret) + goto out; + + flow->counter_id = fdir_rule.act_cnt.id; + } + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", + sizeof(struct hns3_fdir_rule_ele), + 0); + if (fdir_rule_ptr == NULL) { + hns3_err(hw, "Failed to allocate fdir_rule memory"); + ret = -ENOMEM; + goto err_fdir; + } + memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, + sizeof(struct hns3_fdir_rule)); + TAILQ_INSERT_TAIL(&process_list->fdir_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return flow; + } + +err_fdir: + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) + hns3_counter_release(dev, fdir_rule.act_cnt.id); + +err: + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow"); +out: + TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + rte_free(flow_node); + rte_free(flow); + return NULL; +} + +/* Destroy a flow rule on hns3. */ +static int +hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct hns3_process_private *process_list = dev->process_private; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_flow_mem *flow_node; + struct hns3_hw *hw = &hns->hw; + enum rte_filter_type filter_type; + struct hns3_fdir_rule fdir_rule; + int ret; + + if (flow == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + flow, "Flow is NULL"); + filter_type = flow->filter_type; + switch (filter_type) { + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule; + memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf, + sizeof(struct hns3_fdir_rule)); + + ret = hns3_fdir_filter_program(hns, &fdir_rule, true); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, + flow, + "Destroy FDIR fail.Try again"); + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) + hns3_counter_release(dev, fdir_rule.act_cnt.id); + TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + fdir_rule_ptr = NULL; + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; + ret = hns3_config_rss_filter(dev, &hw->rss_info, false); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, + flow, + "Destroy RSS fail.Try again"); + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = NULL; + break; + default: + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, flow, + "Unsupported filter type"); + } + + TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) { + if (flow_node->flow == flow) { + TAILQ_REMOVE(&process_list->flow_list, flow_node, + entries); + rte_free(flow_node); + flow_node = NULL; + break; + } + } + rte_free(flow); + flow = NULL; + + return 0; +} + +/* Destroy all flow rules associated with a port on hns3. */ +static int +hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + int ret; + + /* FDIR is available only in PF driver */ + if (!hns->is_vf) { + ret = hns3_clear_all_fdir_filter(hns); + if (ret) { + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + hns3_counter_flush(dev); + } + + ret = hns3_clear_rss_filter(dev); + if (ret) { + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rss filter"); + return ret; + } + + hns3_filterlist_flush(dev); + + return 0; +} + +/* Query an existing flow rule. */ +static int +hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *actions, void *data, + struct rte_flow_error *error) +{ + struct rte_flow_query_count *qc; + int ret; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + qc = (struct rte_flow_query_count *)data; + ret = hns3_counter_query(dev, flow, qc, error); + if (ret) + return ret; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Query action only support count"); + } + } + return 0; +} + +static const struct rte_flow_ops hns3_flow_ops = { + .validate = hns3_flow_validate, + .create = hns3_flow_create, + .destroy = hns3_flow_destroy, + .flush = hns3_flow_flush, + .query = hns3_flow_query, + .isolate = NULL, +}; + +/* + * The entry of flow API. + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise is set. + */ +int +hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + struct hns3_hw *hw; + int ret = 0; + + hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + if (hw->adapter_state >= HNS3_NIC_CLOSED) + return -ENODEV; + *(const void **)arg = &hns3_flow_ops; + break; + default: + hns3_err(hw, "Filter type (%d) not supported", filter_type); + ret = -EOPNOTSUPP; + break; + } + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_intr.c b/src/spdk/dpdk/drivers/net/hns3/hns3_intr.c new file mode 100644 index 000000000..9953a1d98 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_intr.c @@ -0,0 +1,1169 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" +#include "hns3_intr.h" +#include "hns3_regs.h" +#include "hns3_rxtx.h" + +#define SWITCH_CONTEXT_US 10 + +/* offset in MSIX bd */ +#define MAC_ERROR_OFFSET 1 +#define PPP_PF_ERROR_OFFSET 2 +#define PPU_PF_ERROR_OFFSET 3 +#define RCB_ERROR_OFFSET 5 +#define RCB_ERROR_STATUS_OFFSET 2 + +#define HNS3_CHECK_MERGE_CNT(val) \ + do { \ + if (val) \ + hw->reset.stats.merge_cnt++; \ + } while (0) + +static const char *reset_string[HNS3_MAX_RESET] = { + "none", "vf_func", "vf_pf_func", "vf_full", "flr", + "vf_global", "pf_func", "global", "IMP", +}; + +const struct hns3_hw_error mac_afifo_tnl_int[] = { + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = 0, .msg = NULL, + .reset_level = HNS3_NONE_RESET} +}; + +const struct hns3_hw_error ppu_mpf_abnormal_int_st2[] = { + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "rd_bus_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "wr_bus_err", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "reg_search_miss", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = 0, .msg = NULL, + .reset_level = HNS3_NONE_RESET} +}; + +const struct hns3_hw_error ssu_port_based_pf_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port", + .reset_level = HNS3_GLOBAL_RESET }, + { .int_msk = 0, .msg = NULL, + .reset_level = HNS3_NONE_RESET} +}; + +const struct hns3_hw_error ppp_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = 0, .msg = NULL, + .reset_level = HNS3_NONE_RESET} +}; + +const struct hns3_hw_error ppu_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + .reset_level = HNS3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + .reset_level = HNS3_FUNC_RESET }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + .reset_level = HNS3_FUNC_RESET }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout", + .reset_level = HNS3_NONE_RESET }, + { .int_msk = 0, .msg = NULL, + .reset_level = HNS3_NONE_RESET} +}; + +static int +config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc[2]; + int ret; + + /* configure PPP error interrupts */ + hns3_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HNS3_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN); + desc[0].data[4] = + rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN); + } + + desc[1].data[0] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN_MASK); + desc[1].data[2] = + rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN_MASK); + } else if (cmd == HNS3_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN); + } + + desc[1].data[0] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hns3_cmd_send(hw, &desc[0], 2); + if (ret) + hns3_err(hw, "fail to configure PPP error int: %d", ret); + + return ret; +} + +static int +enable_ppp_err_intr(struct hns3_adapter *hns, bool en) +{ + int ret; + + ret = config_ppp_err_intr(hns, HNS3_PPP_CMD0_INT_CMD, en); + if (ret) + return ret; + + return config_ppp_err_intr(hns, HNS3_PPP_CMD1_INT_CMD, en); +} + +static int +enable_ssu_err_intr(struct hns3_adapter *hns, bool en) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc[2]; + int ret; + + /* configure SSU ecc error interrupts */ + hns3_cmd_setup_basic_desc(&desc[0], HNS3_SSU_ECC_INT_CMD, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[1], HNS3_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = + rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = + rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN); + } + + desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = + rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN_MASK); + + ret = hns3_cmd_send(hw, &desc[0], 2); + if (ret) { + hns3_err(hw, "fail to configure SSU ECC error interrupt: %d", + ret); + return ret; + } + + /* configure SSU common error interrupts */ + hns3_cmd_setup_basic_desc(&desc[0], HNS3_SSU_COMMON_INT_CMD, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + hns3_cmd_setup_basic_desc(&desc[1], HNS3_SSU_COMMON_INT_CMD, false); + + if (en) { + desc[0].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN); + desc[0].data[1] = + rte_cpu_to_le_32(HNS3_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN); + } + + desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN_MASK | + HNS3_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = + rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); + + ret = hns3_cmd_send(hw, &desc[0], 2); + if (ret) + hns3_err(hw, "fail to configure SSU COMMON error intr: %d", + ret); + + return ret; +} + +static int +config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc[2]; + int num = 1; + + /* configure PPU error interrupts */ + switch (cmd) { + case HNS3_PPU_MPF_ECC_INT_CMD: + hns3_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= HNS3_CMD_FLAG_NEXT; + hns3_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN; + desc[0].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN; + desc[1].data[3] = HNS3_PPU_MPF_ABNORMAL_INT3_EN; + desc[1].data[4] = HNS3_PPU_MPF_ABNORMAL_INT2_EN; + } + + desc[1].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN_MASK; + desc[1].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN_MASK; + desc[1].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN_MASK; + desc[1].data[3] |= HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK; + num = 2; + break; + case HNS3_PPU_MPF_OTHER_INT_CMD: + hns3_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2; + + desc[0].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2_MASK; + break; + case HNS3_PPU_PF_OTHER_INT_CMD: + hns3_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HNS3_PPU_PF_ABNORMAL_INT_EN; + + desc[0].data[2] = HNS3_PPU_PF_ABNORMAL_INT_EN_MASK; + break; + default: + hns3_err(hw, + "Invalid cmd(%u) to configure PPU error interrupts.", + cmd); + return -EINVAL; + } + + return hns3_cmd_send(hw, &desc[0], num); +} + +static int +enable_ppu_err_intr(struct hns3_adapter *hns, bool en) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + ret = config_ppu_err_intrs(hns, HNS3_PPU_MPF_ECC_INT_CMD, en); + if (ret) { + hns3_err(hw, "fail to configure PPU MPF ECC error intr: %d", + ret); + return ret; + } + + ret = config_ppu_err_intrs(hns, HNS3_PPU_MPF_OTHER_INT_CMD, en); + if (ret) { + hns3_err(hw, "fail to configure PPU MPF other intr: %d", + ret); + return ret; + } + + ret = config_ppu_err_intrs(hns, HNS3_PPU_PF_OTHER_INT_CMD, en); + if (ret) + hns3_err(hw, "fail to configure PPU PF error interrupts: %d", + ret); + return ret; +} + +static int +enable_mac_err_intr(struct hns3_adapter *hns, bool en) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc; + int ret; + + /* configure MAC common error interrupts */ + hns3_cmd_setup_basic_desc(&desc, HNS3_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "fail to configure MAC COMMON error intr: %d", + ret); + + return ret; +} + +static const struct hns3_hw_blk hw_blk[] = { + { + .name = "PPP", + .enable_err_intr = enable_ppp_err_intr, + }, + { + .name = "SSU", + .enable_err_intr = enable_ssu_err_intr, + }, + { + .name = "PPU", + .enable_err_intr = enable_ppu_err_intr, + }, + { + .name = "MAC", + .enable_err_intr = enable_mac_err_intr, + }, + { + .name = NULL, + .enable_err_intr = NULL, + } +}; + +int +hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en) +{ + const struct hns3_hw_blk *module = hw_blk; + int ret = 0; + + while (module->enable_err_intr) { + ret = module->enable_err_intr(hns, en); + if (ret) + return ret; + + module++; + } + + return ret; +} + +static enum hns3_reset_level +hns3_find_highest_level(struct hns3_adapter *hns, const char *reg, + const struct hns3_hw_error *err, uint32_t err_sts) +{ + enum hns3_reset_level reset_level = HNS3_FUNC_RESET; + struct hns3_hw *hw = &hns->hw; + bool need_reset = false; + + while (err->msg) { + if (err->int_msk & err_sts) { + hns3_warn(hw, "%s %s found [error status=0x%x]", + reg, err->msg, err_sts); + if (err->reset_level != HNS3_NONE_RESET && + err->reset_level >= reset_level) { + reset_level = err->reset_level; + need_reset = true; + } + } + err++; + } + if (need_reset) + return reset_level; + else + return HNS3_NONE_RESET; +} + +static int +query_num_bds_in_msix(struct hns3_hw *hw, struct hns3_cmd_desc *desc_bd) +{ + int ret; + + hns3_cmd_setup_basic_desc(desc_bd, HNS3_QUERY_MSIX_INT_STS_BD_NUM, + true); + ret = hns3_cmd_send(hw, desc_bd, 1); + if (ret) + hns3_err(hw, "query num bds in msix failed: %d", ret); + + return ret; +} + +static int +query_all_mpf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + uint32_t mpf_bd_num) +{ + int ret; + + hns3_cmd_setup_basic_desc(desc, HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + ret = hns3_cmd_send(hw, &desc[0], mpf_bd_num); + if (ret) + hns3_err(hw, "query all mpf msix err failed: %d", ret); + + return ret; +} + +static int +clear_all_mpf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + uint32_t mpf_bd_num) +{ + int ret; + + hns3_cmd_reuse_desc(desc, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + ret = hns3_cmd_send(hw, desc, mpf_bd_num); + if (ret) + hns3_err(hw, "clear all mpf msix err failed: %d", ret); + + return ret; +} + +static int +query_all_pf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + uint32_t pf_bd_num) +{ + int ret; + + hns3_cmd_setup_basic_desc(desc, HNS3_QUERY_CLEAR_ALL_PF_MSIX_INT, true); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + ret = hns3_cmd_send(hw, desc, pf_bd_num); + if (ret) + hns3_err(hw, "query all pf msix int cmd failed: %d", ret); + + return ret; +} + +static int +clear_all_pf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + uint32_t pf_bd_num) +{ + int ret; + + hns3_cmd_reuse_desc(desc, false); + desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + + ret = hns3_cmd_send(hw, desc, pf_bd_num); + if (ret) + hns3_err(hw, "clear all pf msix err failed: %d", ret); + + return ret; +} + +void +hns3_intr_unregister(const struct rte_intr_handle *hdl, + rte_intr_callback_fn cb_fn, void *cb_arg) +{ + int retry_cnt = 0; + int ret; + + do { + ret = rte_intr_callback_unregister(hdl, cb_fn, cb_arg); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, "Failed to unregister intr: %d", ret); + break; + } + rte_delay_ms(HNS3_INTR_UNREG_FAIL_DELAY_MS); + } while (retry_cnt++ < HNS3_INTR_UNREG_FAIL_RETRY_CNT); +} + +void +hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels) +{ + uint32_t mpf_bd_num, pf_bd_num, bd_num; + enum hns3_reset_level req_level; + struct hns3_hw *hw = &hns->hw; + struct hns3_pf *pf = &hns->pf; + struct hns3_cmd_desc desc_bd; + struct hns3_cmd_desc *desc; + uint32_t *desc_data; + uint32_t status; + int ret; + + /* query the number of bds for the MSIx int status */ + ret = query_num_bds_in_msix(hw, &desc_bd); + if (ret) { + hns3_err(hw, "fail to query msix int status bd num: %d", ret); + return; + } + + mpf_bd_num = rte_le_to_cpu_32(desc_bd.data[0]); + pf_bd_num = rte_le_to_cpu_32(desc_bd.data[1]); + bd_num = max_t(uint32_t, mpf_bd_num, pf_bd_num); + if (bd_num < RCB_ERROR_OFFSET) { + hns3_err(hw, "bd_num is less than RCB_ERROR_OFFSET: %u", + bd_num); + return; + } + + desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0); + if (desc == NULL) { + hns3_err(hw, "fail to zmalloc desc"); + return; + } + + /* query all main PF MSIx errors */ + ret = query_all_mpf_msix_err(hw, &desc[0], mpf_bd_num); + if (ret) { + hns3_err(hw, "query all mpf msix int cmd failed: %d", ret); + goto out; + } + + /* log MAC errors */ + desc_data = (uint32_t *)&desc[MAC_ERROR_OFFSET]; + status = rte_le_to_cpu_32(*desc_data); + if (status) { + req_level = hns3_find_highest_level(hns, "MAC_AFIFO_TNL_INT_R", + mac_afifo_tnl_int, + status); + hns3_atomic_set_bit(req_level, levels); + pf->abn_int_stats.mac_afifo_tnl_intr_cnt++; + } + + /* log PPU(RCB) errors */ + desc_data = (uint32_t *)&desc[RCB_ERROR_OFFSET]; + status = rte_le_to_cpu_32(*(desc_data + RCB_ERROR_STATUS_OFFSET)) & + HNS3_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) { + req_level = hns3_find_highest_level(hns, + "PPU_MPF_ABNORMAL_INT_ST2", + ppu_mpf_abnormal_int_st2, + status); + hns3_atomic_set_bit(req_level, levels); + pf->abn_int_stats.ppu_mpf_abnormal_intr_st2_cnt++; + } + + /* clear all main PF MSIx errors */ + ret = clear_all_mpf_msix_err(hw, desc, mpf_bd_num); + if (ret) { + hns3_err(hw, "clear all mpf msix int cmd failed: %d", ret); + goto out; + } + + /* query all PF MSIx errors */ + memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc)); + ret = query_all_pf_msix_err(hw, &desc[0], pf_bd_num); + if (ret) { + hns3_err(hw, "query all pf msix int cmd failed (%d)", ret); + goto out; + } + + /* log SSU PF errors */ + status = rte_le_to_cpu_32(desc[0].data[0]) & + HNS3_SSU_PORT_INT_MSIX_MASK; + if (status) { + req_level = hns3_find_highest_level(hns, + "SSU_PORT_BASED_ERR_INT", + ssu_port_based_pf_int, + status); + hns3_atomic_set_bit(req_level, levels); + pf->abn_int_stats.ssu_port_based_pf_intr_cnt++; + } + + /* log PPP PF errors */ + desc_data = (uint32_t *)&desc[PPP_PF_ERROR_OFFSET]; + status = rte_le_to_cpu_32(*desc_data); + if (status) { + req_level = hns3_find_highest_level(hns, + "PPP_PF_ABNORMAL_INT_ST0", + ppp_pf_abnormal_int, + status); + hns3_atomic_set_bit(req_level, levels); + pf->abn_int_stats.ppp_pf_abnormal_intr_cnt++; + } + + /* log PPU(RCB) PF errors */ + desc_data = (uint32_t *)&desc[PPU_PF_ERROR_OFFSET]; + status = rte_le_to_cpu_32(*desc_data) & HNS3_PPU_PF_INT_MSIX_MASK; + if (status) { + req_level = hns3_find_highest_level(hns, + "PPU_PF_ABNORMAL_INT_ST", + ppu_pf_abnormal_int, + status); + hns3_atomic_set_bit(req_level, levels); + pf->abn_int_stats.ppu_pf_abnormal_intr_cnt++; + } + + /* clear all PF MSIx errors */ + ret = clear_all_pf_msix_err(hw, desc, pf_bd_num); + if (ret) + hns3_err(hw, "clear all pf msix int cmd failed: %d", ret); +out: + rte_free(desc); +} + +int +hns3_reset_init(struct hns3_hw *hw) +{ + rte_spinlock_init(&hw->lock); + hw->reset.level = HNS3_NONE_RESET; + hw->reset.stage = RESET_STAGE_NONE; + hw->reset.request = 0; + hw->reset.pending = 0; + rte_atomic16_init(&hw->reset.resetting); + rte_atomic16_init(&hw->reset.disable_cmd); + hw->reset.wait_data = rte_zmalloc("wait_data", + sizeof(struct hns3_wait_data), 0); + if (!hw->reset.wait_data) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for wait_data"); + return -ENOMEM; + } + return 0; +} + +void +hns3_schedule_reset(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + + /* Reschedule the reset process after successful initialization */ + if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING); + return; + } + + if (hw->adapter_state >= HNS3_NIC_CLOSED) + return; + + /* Schedule restart alarm if it is not scheduled yet */ + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED) + return; + if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) + rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + + rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); +} + +void +hns3_schedule_delayed_reset(struct hns3_adapter *hns) +{ +#define DEFERRED_SCHED_US (3 * MSEC_PER_SEC * USEC_PER_MSEC) + struct hns3_hw *hw = &hns->hw; + + /* Do nothing if it is uninited or closed */ + if (hw->adapter_state == HNS3_NIC_UNINITIALIZED || + hw->adapter_state >= HNS3_NIC_CLOSED) { + return; + } + + if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE) + return; + rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED); + rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); +} + +void +hns3_wait_callback(void *param) +{ + struct hns3_wait_data *data = (struct hns3_wait_data *)param; + struct hns3_adapter *hns = data->hns; + struct hns3_hw *hw = &hns->hw; + uint64_t msec; + bool done; + + data->count--; + if (data->check_completion) { + /* + * Check if the current time exceeds the deadline + * or a pending reset coming, or reset during close. + */ + msec = get_timeofday_ms(); + if (msec > data->end_ms || is_reset_pending(hns) || + hw->adapter_state == HNS3_NIC_CLOSING) { + done = false; + data->count = 0; + } else + done = data->check_completion(hw); + } else + done = true; + + if (!done && data->count > 0) { + rte_eal_alarm_set(data->interval, hns3_wait_callback, data); + return; + } + if (done) + data->result = HNS3_WAIT_SUCCESS; + else { + hns3_err(hw, "%s wait timeout at stage %d", + reset_string[hw->reset.level], hw->reset.stage); + data->result = HNS3_WAIT_TIMEOUT; + } + hns3_schedule_reset(hns); +} + +void +hns3_notify_reset_ready(struct hns3_hw *hw, bool enable) +{ + uint32_t reg_val; + + reg_val = hns3_read_dev(hw, HNS3_CMDQ_TX_DEPTH_REG); + if (enable) + reg_val |= HNS3_NIC_SW_RST_RDY; + else + reg_val &= ~HNS3_NIC_SW_RST_RDY; + + hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, reg_val); +} + +int +hns3_reset_req_hw_reset(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + + if (hw->reset.wait_data->result == HNS3_WAIT_UNKNOWN) { + hw->reset.wait_data->hns = hns; + hw->reset.wait_data->check_completion = NULL; + hw->reset.wait_data->interval = HNS3_RESET_SYNC_US; + hw->reset.wait_data->count = 1; + hw->reset.wait_data->result = HNS3_WAIT_REQUEST; + rte_eal_alarm_set(hw->reset.wait_data->interval, + hns3_wait_callback, hw->reset.wait_data); + return -EAGAIN; + } else if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) + return -EAGAIN; + + /* inform hardware that preparatory work is done */ + hns3_notify_reset_ready(hw, true); + return 0; +} + +static void +hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels) +{ + uint64_t merge_cnt = hw->reset.stats.merge_cnt; + int64_t tmp; + + switch (hw->reset.level) { + case HNS3_IMP_RESET: + hns3_atomic_clear_bit(HNS3_IMP_RESET, levels); + tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + break; + case HNS3_GLOBAL_RESET: + hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels); + tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + break; + case HNS3_FUNC_RESET: + hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels); + break; + case HNS3_VF_RESET: + hns3_atomic_clear_bit(HNS3_VF_RESET, levels); + tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + break; + case HNS3_VF_FULL_RESET: + hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels); + tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + break; + case HNS3_VF_PF_FUNC_RESET: + hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); + tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); + HNS3_CHECK_MERGE_CNT(tmp); + break; + case HNS3_VF_FUNC_RESET: + hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels); + break; + case HNS3_FLR_RESET: + hns3_atomic_clear_bit(HNS3_FLR_RESET, levels); + break; + case HNS3_NONE_RESET: + default: + return; + }; + if (merge_cnt != hw->reset.stats.merge_cnt) + hns3_warn(hw, + "No need to do low-level reset after %s reset. " + "merge cnt: %" PRIx64 " total merge cnt: %" PRIx64, + reset_string[hw->reset.level], + hw->reset.stats.merge_cnt - merge_cnt, + hw->reset.stats.merge_cnt); +} + +static bool +hns3_reset_err_handle(struct hns3_adapter *hns) +{ +#define MAX_RESET_FAIL_CNT 5 + + struct hns3_hw *hw = &hns->hw; + + if (hw->adapter_state == HNS3_NIC_CLOSING) + goto reset_fail; + + if (is_reset_pending(hns)) { + hw->reset.attempts = 0; + hw->reset.stats.fail_cnt++; + hns3_warn(hw, "%s reset fail because new Reset is pending " + "attempts:%" PRIx64, + reset_string[hw->reset.level], + hw->reset.stats.fail_cnt); + hw->reset.level = HNS3_NONE_RESET; + return true; + } + + hw->reset.attempts++; + if (hw->reset.attempts < MAX_RESET_FAIL_CNT) { + hns3_atomic_set_bit(hw->reset.level, &hw->reset.pending); + hns3_warn(hw, "%s retry to reset attempts: %d", + reset_string[hw->reset.level], + hw->reset.attempts); + return true; + } + + if (rte_atomic16_read(&hw->reset.disable_cmd)) + hns3_cmd_init(hw); +reset_fail: + hw->reset.attempts = 0; + hw->reset.stats.fail_cnt++; + hns3_warn(hw, "%s reset fail fail_cnt:%" PRIx64 " success_cnt:%" PRIx64 + " global_cnt:%" PRIx64 " imp_cnt:%" PRIx64 + " request_cnt:%" PRIx64 " exec_cnt:%" PRIx64 + " merge_cnt:%" PRIx64 "adapter_state:%d", + reset_string[hw->reset.level], hw->reset.stats.fail_cnt, + hw->reset.stats.success_cnt, hw->reset.stats.global_cnt, + hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt, + hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt, + hw->adapter_state); + + /* IMP no longer waiting the ready flag */ + hns3_notify_reset_ready(hw, true); + return false; +} + +static int +hns3_reset_pre(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct timeval tv; + int ret; + + if (hw->reset.stage == RESET_STAGE_NONE) { + rte_atomic16_set(&hns->hw.reset.resetting, 1); + hw->reset.stage = RESET_STAGE_DOWN; + ret = hw->reset.ops->stop_service(hns); + gettimeofday(&tv, NULL); + if (ret) { + hns3_warn(hw, "Reset step1 down fail=%d time=%ld.%.6ld", + ret, tv.tv_sec, tv.tv_usec); + return ret; + } + hns3_warn(hw, "Reset step1 down success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.stage = RESET_STAGE_PREWAIT; + } + if (hw->reset.stage == RESET_STAGE_PREWAIT) { + ret = hw->reset.ops->prepare_reset(hns); + gettimeofday(&tv, NULL); + if (ret) { + hns3_warn(hw, + "Reset step2 prepare wait fail=%d time=%ld.%.6ld", + ret, tv.tv_sec, tv.tv_usec); + return ret; + } + hns3_warn(hw, "Reset step2 prepare wait success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.stage = RESET_STAGE_REQ_HW_RESET; + hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; + } + return 0; +} + +static int +hns3_reset_post(struct hns3_adapter *hns) +{ +#define TIMEOUT_RETRIES_CNT 5 + struct hns3_hw *hw = &hns->hw; + struct timeval tv_delta; + struct timeval tv; + int ret = 0; + + if (hw->adapter_state == HNS3_NIC_CLOSING) { + hns3_warn(hw, "Don't do reset_post during closing, just uninit cmd"); + hns3_cmd_uninit(hw); + return -EPERM; + } + + if (hw->reset.stage == RESET_STAGE_DEV_INIT) { + rte_spinlock_lock(&hw->lock); + if (hw->reset.mbuf_deferred_free) { + hns3_dev_release_mbufs(hns); + hw->reset.mbuf_deferred_free = false; + } + ret = hw->reset.ops->reinit_dev(hns); + rte_spinlock_unlock(&hw->lock); + gettimeofday(&tv, NULL); + if (ret) { + hns3_warn(hw, "Reset step5 devinit fail=%d retries=%d", + ret, hw->reset.retries); + goto err; + } + hns3_warn(hw, "Reset step5 devinit success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.retries = 0; + hw->reset.stage = RESET_STAGE_RESTORE; + rte_eal_alarm_set(SWITCH_CONTEXT_US, + hw->reset.ops->reset_service, hns); + return -EAGAIN; + } + if (hw->reset.stage == RESET_STAGE_RESTORE) { + rte_spinlock_lock(&hw->lock); + ret = hw->reset.ops->restore_conf(hns); + rte_spinlock_unlock(&hw->lock); + gettimeofday(&tv, NULL); + if (ret) { + hns3_warn(hw, + "Reset step6 restore fail=%d retries=%d", + ret, hw->reset.retries); + goto err; + } + hns3_warn(hw, "Reset step6 restore success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.retries = 0; + hw->reset.stage = RESET_STAGE_DONE; + } + if (hw->reset.stage == RESET_STAGE_DONE) { + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + hns3_clear_reset_level(hw, &hw->reset.pending); + rte_atomic16_clear(&hns->hw.reset.resetting); + hw->reset.attempts = 0; + hw->reset.stats.success_cnt++; + hw->reset.stage = RESET_STAGE_NONE; + rte_spinlock_lock(&hw->lock); + hw->reset.ops->start_service(hns); + rte_spinlock_unlock(&hw->lock); + gettimeofday(&tv, NULL); + timersub(&tv, &hw->reset.start_time, &tv_delta); + hns3_warn(hw, "%s reset done fail_cnt:%" PRIx64 + " success_cnt:%" PRIx64 " global_cnt:%" PRIx64 + " imp_cnt:%" PRIx64 " request_cnt:%" PRIx64 + " exec_cnt:%" PRIx64 " merge_cnt:%" PRIx64, + reset_string[hw->reset.level], + hw->reset.stats.fail_cnt, hw->reset.stats.success_cnt, + hw->reset.stats.global_cnt, hw->reset.stats.imp_cnt, + hw->reset.stats.request_cnt, hw->reset.stats.exec_cnt, + hw->reset.stats.merge_cnt); + hns3_warn(hw, + "%s reset done delta %ld ms time=%ld.%.6ld", + reset_string[hw->reset.level], + tv_delta.tv_sec * MSEC_PER_SEC + + tv_delta.tv_usec / USEC_PER_MSEC, + tv.tv_sec, tv.tv_usec); + hw->reset.level = HNS3_NONE_RESET; + } + return 0; + +err: + if (ret == -ETIME) { + hw->reset.retries++; + if (hw->reset.retries < TIMEOUT_RETRIES_CNT) { + rte_eal_alarm_set(HNS3_RESET_SYNC_US, + hw->reset.ops->reset_service, hns); + return -EAGAIN; + } + } + hw->reset.retries = 0; + return -EIO; +} + +/* + * There are three scenarios as follows: + * When the reset is not in progress, the reset process starts. + * During the reset process, if the reset level has not changed, + * the reset process continues; otherwise, the reset process is aborted. + * hw->reset.level new_level action + * HNS3_NONE_RESET HNS3_XXXX_RESET start reset + * HNS3_XXXX_RESET HNS3_XXXX_RESET continue reset + * HNS3_LOW_RESET HNS3_HIGH_RESET abort + */ +int +hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level) +{ + struct hns3_hw *hw = &hns->hw; + struct timeval tv_delta; + struct timeval tv; + int ret; + + if (hw->reset.level == HNS3_NONE_RESET) { + hw->reset.level = new_level; + hw->reset.stats.exec_cnt++; + gettimeofday(&hw->reset.start_time, NULL); + hns3_warn(hw, "Start %s reset time=%ld.%.6ld", + reset_string[hw->reset.level], + hw->reset.start_time.tv_sec, + hw->reset.start_time.tv_usec); + } + + if (is_reset_pending(hns)) { + gettimeofday(&tv, NULL); + hns3_warn(hw, + "%s reset is aborted by high level time=%ld.%.6ld", + reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); + if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) + rte_eal_alarm_cancel(hns3_wait_callback, + hw->reset.wait_data); + ret = -EBUSY; + goto err; + } + + ret = hns3_reset_pre(hns); + if (ret) + goto err; + + if (hw->reset.stage == RESET_STAGE_REQ_HW_RESET) { + ret = hns3_reset_req_hw_reset(hns); + if (ret == -EAGAIN) + return ret; + gettimeofday(&tv, NULL); + hns3_warn(hw, + "Reset step3 request IMP reset success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.stage = RESET_STAGE_WAIT; + hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; + } + if (hw->reset.stage == RESET_STAGE_WAIT) { + ret = hw->reset.ops->wait_hardware_ready(hns); + if (ret) + goto retry; + gettimeofday(&tv, NULL); + hns3_warn(hw, "Reset step4 reset wait success time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + hw->reset.stage = RESET_STAGE_DEV_INIT; + } + + ret = hns3_reset_post(hns); + if (ret) + goto retry; + + return 0; +retry: + if (ret == -EAGAIN) + return ret; +err: + hns3_clear_reset_level(hw, &hw->reset.pending); + if (hns3_reset_err_handle(hns)) { + hw->reset.stage = RESET_STAGE_PREWAIT; + hns3_schedule_reset(hns); + } else { + rte_spinlock_lock(&hw->lock); + if (hw->reset.mbuf_deferred_free) { + hns3_dev_release_mbufs(hns); + hw->reset.mbuf_deferred_free = false; + } + rte_spinlock_unlock(&hw->lock); + rte_atomic16_clear(&hns->hw.reset.resetting); + hw->reset.stage = RESET_STAGE_NONE; + gettimeofday(&tv, NULL); + timersub(&tv, &hw->reset.start_time, &tv_delta); + hns3_warn(hw, "%s reset fail delta %ld ms time=%ld.%.6ld", + reset_string[hw->reset.level], + tv_delta.tv_sec * MSEC_PER_SEC + + tv_delta.tv_usec / USEC_PER_MSEC, + tv.tv_sec, tv.tv_usec); + hw->reset.level = HNS3_NONE_RESET; + } + + return -EIO; +} + +/* + * The reset process can only be terminated after handshake with IMP(step3), + * so that IMP can complete the reset process normally. + */ +void +hns3_reset_abort(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct timeval tv; + int i; + + for (i = 0; i < HNS3_QUIT_RESET_CNT; i++) { + if (hw->reset.level == HNS3_NONE_RESET) + break; + rte_delay_ms(HNS3_QUIT_RESET_DELAY_MS); + } + + /* IMP no longer waiting the ready flag */ + hns3_notify_reset_ready(hw, true); + + rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); + rte_eal_alarm_cancel(hns3_wait_callback, hw->reset.wait_data); + + if (hw->reset.level != HNS3_NONE_RESET) { + gettimeofday(&tv, NULL); + hns3_err(hw, "Failed to terminate reset: %s time=%ld.%.6ld", + reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); + } +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_intr.h b/src/spdk/dpdk/drivers/net/hns3/hns3_intr.h new file mode 100644 index 000000000..d0af16c50 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_intr.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_INTR_H_ +#define _HNS3_INTR_H_ + +#define HNS3_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HNS3_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HNS3_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HNS3_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HNS3_PPP_PF_ERR_INT_EN 0x0003 +#define HNS3_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HNS3_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HNS3_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HNS3_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HNS3_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F + +#define HNS3_MAC_COMMON_ERR_INT_EN 0x107FF +#define HNS3_MAC_COMMON_ERR_INT_EN_MASK 0x107FF + +#define HNS3_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HNS3_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HNS3_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HNS3_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HNS3_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HNS3_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HNS3_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HNS3_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HNS3_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HNS3_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HNS3_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HNS3_PPU_PF_INT_MSIX_MASK 0x27 +#define HNS3_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) + +#define HNS3_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HNS3_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HNS3_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HNS3_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HNS3_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HNS3_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HNS3_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HNS3_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) +#define HNS3_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HNS3_SSU_PORT_INT_MSIX_MASK 0x7BFF + +#define HNS3_RESET_PROCESS_MS 200 + +struct hns3_hw_blk { + const char *name; + int (*enable_err_intr)(struct hns3_adapter *hns, bool en); +}; + +struct hns3_hw_error { + uint32_t int_msk; + const char *msg; + enum hns3_reset_level reset_level; +}; + +int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool state); +void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels); +void hns3_intr_unregister(const struct rte_intr_handle *hdl, + rte_intr_callback_fn cb_fn, void *cb_arg); +void hns3_notify_reset_ready(struct hns3_hw *hw, bool enable); +int hns3_reset_init(struct hns3_hw *hw); +void hns3_wait_callback(void *param); +void hns3_schedule_reset(struct hns3_adapter *hns); +void hns3_schedule_delayed_reset(struct hns3_adapter *hns); +int hns3_reset_req_hw_reset(struct hns3_adapter *hns); +int hns3_reset_process(struct hns3_adapter *hns, + enum hns3_reset_level reset_level); +void hns3_reset_abort(struct hns3_adapter *hns); + +#endif /* _HNS3_INTR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_logs.h b/src/spdk/dpdk/drivers/net/hns3/hns3_logs.h new file mode 100644 index 000000000..f3fc7b51d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_logs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_LOGS_H_ +#define _HNS3_LOGS_H_ + +extern int hns3_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hns3_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +extern int hns3_logtype_driver; +#define PMD_DRV_LOG_RAW(hw, level, fmt, args...) \ + rte_log(level, hns3_logtype_driver, "%s %s(): " fmt, \ + (hw)->data->name, __func__, ## args) + +#define hns3_err(hw, fmt, args...) \ + PMD_DRV_LOG_RAW(hw, RTE_LOG_ERR, fmt "\n", ## args) + +#define hns3_warn(hw, fmt, args...) \ + PMD_DRV_LOG_RAW(hw, RTE_LOG_WARNING, fmt "\n", ## args) + +#define hns3_notice(hw, fmt, args...) \ + PMD_DRV_LOG_RAW(hw, RTE_LOG_NOTICE, fmt "\n", ## args) + +#define hns3_info(hw, fmt, args...) \ + PMD_DRV_LOG_RAW(hw, RTE_LOG_INFO, fmt "\n", ## args) + +#define hns3_dbg(hw, fmt, args...) \ + PMD_DRV_LOG_RAW(hw, RTE_LOG_DEBUG, fmt "\n", ## args) + +#endif /* _HNS3_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.c b/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.c new file mode 100644 index 000000000..34c8c688f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.c @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_regs.h" +#include "hns3_logs.h" +#include "hns3_intr.h" + +#define HNS3_CMD_CODE_OFFSET 2 + +static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, + {2, -ENOENT}, + {5, -EIO}, + {11, -EAGAIN}, + {12, -ENOMEM}, + {16, -EBUSY}, + {22, -EINVAL}, + {28, -ENOSPC}, + {95, -EOPNOTSUPP}, +}; + +static int +hns3_resp_to_errno(uint16_t resp_code) +{ + uint32_t i, num; + + num = sizeof(err_code_map) / sizeof(struct errno_respcode_map); + for (i = 0; i < num; i++) { + if (err_code_map[i].resp_code == resp_code) + return err_code_map[i].err_no; + } + + return -EIO; +} + +static void +hns3_poll_all_sync_msg(void) +{ + struct rte_eth_dev *eth_dev; + struct hns3_adapter *adapter; + const char *name; + uint16_t port_id; + + RTE_ETH_FOREACH_DEV(port_id) { + eth_dev = &rte_eth_devices[port_id]; + name = eth_dev->device->driver->name; + if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf")) + continue; + adapter = eth_dev->data->dev_private; + if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED) + continue; + /* Synchronous msg, the mbx_resp.req_msg_data is non-zero */ + if (adapter->hw.mbx_resp.req_msg_data) + hns3_dev_handle_mbx_msg(&adapter->hw); + } +} + +static int +hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, + uint8_t *resp_data, uint16_t resp_len) +{ +#define HNS3_MAX_RETRY_MS 500 + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mbx_resp_status *mbx_resp; + bool in_irq = false; + uint64_t now; + uint64_t end; + + if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { + hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)", + resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE); + return -EINVAL; + } + + now = get_timeofday_ms(); + end = now + HNS3_MAX_RETRY_MS; + while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) && + (now < end)) { + if (rte_atomic16_read(&hw->reset.disable_cmd)) { + hns3_err(hw, "Don't wait for mbx respone because of " + "disable_cmd"); + return -EBUSY; + } + + if (is_reset_pending(hns)) { + hw->mbx_resp.req_msg_data = 0; + hns3_err(hw, "Don't wait for mbx respone because of " + "reset pending"); + return -EIO; + } + + /* + * The mbox response is running on the interrupt thread. + * Sending mbox in the interrupt thread cannot wait for the + * response, so polling the mbox response on the irq thread. + */ + if (pthread_equal(hw->irq_thread_id, pthread_self())) { + in_irq = true; + hns3_poll_all_sync_msg(); + } else { + rte_delay_ms(HNS3_POLL_RESPONE_MS); + } + now = get_timeofday_ms(); + } + hw->mbx_resp.req_msg_data = 0; + if (now >= end) { + hw->mbx_resp.lost++; + hns3_err(hw, + "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d", + code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail, + hw->mbx_resp.lost, in_irq); + return -ETIME; + } + rte_io_rmb(); + mbx_resp = &hw->mbx_resp; + + if (mbx_resp->resp_status) + return mbx_resp->resp_status; + + if (resp_data) + memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); + + return 0; +} + +int +hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + const uint8_t *msg_data, uint8_t msg_len, bool need_resp, + uint8_t *resp_data, uint16_t resp_len) +{ + struct hns3_mbx_vf_to_pf_cmd *req; + struct hns3_cmd_desc desc; + bool is_ring_vector_msg; + int offset; + int ret; + + req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; + + /* first two bytes are reserved for code & subcode */ + if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { + hns3_err(hw, + "VF send mbx msg fail, msg len %d exceeds max payload len %d", + msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); + return -EINVAL; + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); + req->msg[0] = code; + is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || + (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || + (code == HNS3_MBX_GET_RING_VECTOR_MAP); + if (!is_ring_vector_msg) + req->msg[1] = subcode; + if (msg_data) { + offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; + memcpy(&req->msg[offset], msg_data, msg_len); + } + + /* synchronous send */ + if (need_resp) { + req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; + rte_spinlock_lock(&hw->mbx_resp.lock); + hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode; + hw->mbx_resp.head++; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + rte_spinlock_unlock(&hw->mbx_resp.lock); + hns3_err(hw, "VF failed(=%d) to send mbx message to PF", + ret); + return ret; + } + + ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); + rte_spinlock_unlock(&hw->mbx_resp.lock); + } else { + /* asynchronous send */ + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "VF failed(=%d) to send mbx message to PF", + ret); + return ret; + } + } + + return ret; +} + +static bool +hns3_cmd_crq_empty(struct hns3_hw *hw) +{ + uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + +static void +hns3_mbx_handler(struct hns3_hw *hw) +{ + struct hns3_mac *mac = &hw->mac; + enum hns3_reset_level reset_level; + uint16_t *msg_q; + uint8_t opcode; + uint32_t tail; + + tail = hw->arq.tail; + + /* process all the async queue messages */ + while (tail != hw->arq.head) { + msg_q = hw->arq.msg_q[hw->arq.head]; + + opcode = msg_q[0] & 0xff; + switch (opcode) { + case HNS3_MBX_LINK_STAT_CHANGE: + memcpy(&mac->link_speed, &msg_q[2], + sizeof(mac->link_speed)); + mac->link_status = rte_le_to_cpu_16(msg_q[1]); + mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); + break; + case HNS3_MBX_ASSERTING_RESET: + /* PF has asserted reset hence VF should go in pending + * state and poll for the hardware reset status till it + * has been completely reset. After this stack should + * eventually be re-initialized. + */ + reset_level = rte_le_to_cpu_16(msg_q[1]); + hns3_atomic_set_bit(reset_level, &hw->reset.pending); + + hns3_warn(hw, "PF inform reset level %d", reset_level); + hw->reset.stats.request_cnt++; + hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); + break; + default: + hns3_err(hw, "Fetched unsupported(%d) message from arq", + opcode); + break; + } + + hns3_mbx_head_ptr_move_arq(hw->arq); + msg_q = hw->arq.msg_q[hw->arq.head]; + } +} + +/* + * Case1: receive response after timeout, req_msg_data + * is 0, not equal resp_msg, do lost-- + * Case2: receive last response during new send_mbx_msg, + * req_msg_data is different with resp_msg, let + * lost--, continue to wait for response. + */ +static void +hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg) +{ + struct hns3_mbx_resp_status *resp = &hw->mbx_resp; + uint32_t tail = resp->tail + 1; + + if (tail > resp->head) + tail = resp->head; + if (resp->req_msg_data != resp_msg) { + if (resp->lost) + resp->lost--; + hns3_warn(hw, "Received a mismatched response req_msg(%x) " + "resp_msg(%x) head(%d) tail(%d) lost(%d)", + resp->req_msg_data, resp_msg, resp->head, tail, + resp->lost); + } else if (tail + resp->lost > resp->head) { + resp->lost--; + hns3_warn(hw, "Received a new response again resp_msg(%x) " + "head(%d) tail(%d) lost(%d)", resp_msg, + resp->head, tail, resp->lost); + } + rte_io_wmb(); + resp->tail = tail; +} + +static void +hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code) +{ + switch (link_fail_code) { + case HNS3_MBX_LF_NORMAL: + break; + case HNS3_MBX_LF_REF_CLOCK_LOST: + hns3_warn(hw, "Reference clock lost!"); + break; + case HNS3_MBX_LF_XSFP_TX_DISABLE: + hns3_warn(hw, "SFP tx is disabled!"); + break; + case HNS3_MBX_LF_XSFP_ABSENT: + hns3_warn(hw, "SFP is absent!"); + break; + default: + hns3_warn(hw, "Unknown fail code:%u!", link_fail_code); + break; + } +} + +static void +hns3_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) +{ +#define LINK_STATUS_OFFSET 1 +#define LINK_FAIL_CODE_OFFSET 2 + + if (!req->msg[LINK_STATUS_OFFSET]) + hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); + + hns3_update_link_status(hw); +} + +static void +hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en) +{ + if (!promisc_en) { + /* + * When promisc/allmulti mode is closed by the hns3 PF kernel + * ethdev driver for untrusted, modify VF's related status. + */ + hns3_warn(hw, "Promisc mode will be closed by host for being " + "untrusted."); + hw->data->promiscuous = 0; + hw->data->all_multicast = 0; + } +} + +void +hns3_dev_handle_mbx_msg(struct hns3_hw *hw) +{ + struct hns3_mbx_resp_status *resp = &hw->mbx_resp; + struct hns3_cmq_ring *crq = &hw->cmq.crq; + struct hns3_mbx_pf_to_vf_cmd *req; + struct hns3_cmd_desc *desc; + uint32_t msg_data; + uint16_t *msg_q; + uint8_t opcode; + uint16_t flag; + uint8_t *temp; + int i; + + while (!hns3_cmd_crq_empty(hw)) { + if (rte_atomic16_read(&hw->reset.disable_cmd)) + return; + + desc = &crq->desc[crq->next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; + opcode = req->msg[0] & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); + if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { + hns3_warn(hw, + "dropped invalid mailbox message, code = %d", + opcode); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); + continue; + } + + switch (opcode) { + case HNS3_MBX_PF_VF_RESP: + resp->resp_status = hns3_resp_to_errno(req->msg[3]); + + temp = (uint8_t *)&req->msg[4]; + for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE; i++) { + resp->additional_info[i] = *temp; + temp++; + } + msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2]; + hns3_update_resp_position(hw, msg_data); + break; + case HNS3_MBX_LINK_STAT_CHANGE: + case HNS3_MBX_ASSERTING_RESET: + msg_q = hw->arq.msg_q[hw->arq.tail]; + memcpy(&msg_q[0], req->msg, + HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t)); + hns3_mbx_tail_ptr_move_arq(hw->arq); + + hns3_mbx_handler(hw); + break; + case HNS3_MBX_PUSH_LINK_STATUS: + hns3_handle_link_change_event(hw, req); + break; + case HNS3_MBX_PUSH_PROMISC_INFO: + /* + * When the trust status of VF device changed by the + * hns3 PF kernel driver, VF driver will receive this + * mailbox message from PF driver. + */ + hns3_handle_promisc_info(hw, req->msg[1]); + break; + default: + hns3_err(hw, + "VF received unsupported(%d) mbx msg from PF", + req->msg[0]); + break; + } + + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, IMP need this pointer */ + hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.h b/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.h new file mode 100644 index 000000000..d6d70f686 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_mbx.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_MBX_H_ +#define _HNS3_MBX_H_ + +#define HNS3_MBX_VF_MSG_DATA_NUM 16 + +enum HNS3_MBX_OPCODE { + HNS3_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HNS3_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ + HNS3_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ + HNS3_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ + HNS3_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ + HNS3_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */ + HNS3_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */ + HNS3_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */ + HNS3_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ + HNS3_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ + HNS3_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HNS3_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */ + HNS3_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ + HNS3_MBX_GET_RETA, /* (VF -> PF) get RETA */ + HNS3_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ + HNS3_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */ + HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */ + HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ + HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ + HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ + HNS3_MBX_SET_AESTART, /* (VF -> PF) start ae */ + HNS3_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */ + HNS3_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */ + HNS3_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */ + HNS3_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ + HNS3_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ + HNS3_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ + HNS3_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ + HNS3_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ + HNS3_MBX_SET_MTU, /* (VF -> PF) set mtu */ + HNS3_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + + HNS3_MBX_PUSH_PROMISC_INFO = 36, /* (PF -> VF) push vf promisc info */ + + HNS3_MBX_HANDLE_VF_TBL = 38, /* (VF -> PF) store/clear hw cfg tbl */ + HNS3_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ + HNS3_MBX_PUSH_LINK_STATUS = 201, /* (IMP -> PF) get port link status */ +}; + +/* below are per-VF mac-vlan subcodes */ +enum hns3_mbx_mac_vlan_subcode { + HNS3_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */ + HNS3_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */ + HNS3_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */ + HNS3_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ + HNS3_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ + HNS3_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ +}; + +/* below are per-VF vlan cfg subcodes */ +enum hns3_mbx_vlan_cfg_subcode { + HNS3_MBX_VLAN_FILTER = 0, /* set vlan filter */ + HNS3_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ + HNS3_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ +}; + +enum hns3_mbx_tbl_cfg_subcode { + HNS3_MBX_VPORT_LIST_CLEAR = 0, +}; + +enum hns3_mbx_link_fail_subcode { + HNS3_MBX_LF_NORMAL = 0, + HNS3_MBX_LF_REF_CLOCK_LOST, + HNS3_MBX_LF_XSFP_TX_DISABLE, + HNS3_MBX_LF_XSFP_ABSENT, +}; + +#define HNS3_MBX_MAX_MSG_SIZE 16 +#define HNS3_MBX_MAX_RESP_DATA_SIZE 8 +#define HNS3_MBX_RING_MAP_BASIC_MSG_NUM 3 +#define HNS3_MBX_RING_NODE_VARIABLE_NUM 3 + +struct hns3_mbx_resp_status { + rte_spinlock_t lock; /* protects against contending sync cmd resp */ + uint32_t req_msg_data; + uint32_t head; + uint32_t tail; + uint32_t lost; + int resp_status; + uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; +}; + +struct errno_respcode_map { + uint16_t resp_code; + int err_no; +}; + +#define HNS3_MBX_NEED_RESP_BIT BIT(0) + +struct hns3_mbx_vf_to_pf_cmd { + uint8_t rsv; + uint8_t mbx_src_vfid; /* Auto filled by IMP */ + uint8_t mbx_need_resp; + uint8_t rsv1; + uint8_t msg_len; + uint8_t rsv2[3]; + uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; +}; + +struct hns3_mbx_pf_to_vf_cmd { + uint8_t dest_vfid; + uint8_t rsv[3]; + uint8_t msg_len; + uint8_t rsv1[3]; + uint16_t msg[8]; +}; + +struct hns3_ring_chain_param { + uint8_t ring_type; + uint8_t tqp_index; + uint8_t int_gl_index; +}; + +#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +struct hns3_vf_bind_vector_msg { + uint8_t vector_id; + uint8_t ring_num; + struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; +}; + +struct hns3_vf_rst_cmd { + uint8_t dest_vfid; + uint8_t vf_rst; + uint8_t rsv[22]; +}; + +struct hns3_pf_rst_done_cmd { + uint8_t pf_rst_done; + uint8_t rsv[23]; +}; + +#define HNS3_PF_RESET_DONE_BIT BIT(0) + +/* used by VF to store the received Async responses from PF */ +struct hns3_mbx_arq_ring { +#define HNS3_MBX_MAX_ARQ_MSG_SIZE 8 +#define HNS3_MBX_MAX_ARQ_MSG_NUM 1024 + uint32_t head; + uint32_t tail; + uint32_t count; + uint16_t msg_q[HNS3_MBX_MAX_ARQ_MSG_NUM][HNS3_MBX_MAX_ARQ_MSG_SIZE]; +}; + +#define hns3_mbx_ring_ptr_move_crq(crq) \ + ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) +#define hns3_mbx_tail_ptr_move_arq(arq) \ + ((arq).tail = ((arq).tail + 1) % HNS3_MBX_MAX_ARQ_MSG_SIZE) +#define hns3_mbx_head_ptr_move_arq(arq) \ + ((arq).head = ((arq).head + 1) % HNS3_MBX_MAX_ARQ_MSG_SIZE) + +struct hns3_hw; +void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); +int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + const uint8_t *msg_data, uint8_t msg_len, bool need_resp, + uint8_t *resp_data, uint16_t resp_len); +#endif /* _HNS3_MBX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_mp.c b/src/spdk/dpdk/drivers/net/hns3/hns3_mp.c new file mode 100644 index 000000000..596c31064 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_mp.c @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include + +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" +#include "hns3_rxtx.h" +#include "hns3_mp.h" + +/* + * Initialize IPC message. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[out] msg + * Pointer to message to fill in. + * @param[in] type + * Message type. + */ +static inline void +mp_init_msg(struct rte_eth_dev *dev, struct rte_mp_msg *msg, + enum hns3_mp_req_type type) +{ + struct hns3_mp_param *param = (struct hns3_mp_param *)msg->param; + + memset(msg, 0, sizeof(*msg)); + strlcpy(msg->name, HNS3_MP_NAME, sizeof(msg->name)); + msg->len_param = sizeof(*param); + param->type = type; + param->port_id = dev->data->port_id; +} + +/* + * IPC message handler of primary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] peer + * Pointer to the peer socket path. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mp_primary_handle(const struct rte_mp_msg *mp_msg __rte_unused, + const void *peer __rte_unused) +{ + return 0; +} + +/* + * IPC message handler of a secondary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] peer + * Pointer to the peer socket path. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_mp_msg mp_res; + struct hns3_mp_param *res = (struct hns3_mp_param *)mp_res.param; + const struct hns3_mp_param *param = + (const struct hns3_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; + int ret; + + if (!rte_eth_dev_is_valid_port(param->port_id)) { + rte_errno = ENODEV; + PMD_INIT_LOG(ERR, "port %u invalid port ID", param->port_id); + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; + switch (param->type) { + case HNS3_MP_REQ_START_RXTX: + PMD_INIT_LOG(INFO, "port %u starting datapath", + dev->data->port_id); + rte_mb(); + hns3_set_rxtx_function(dev); + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + case HNS3_MP_REQ_STOP_RXTX: + PMD_INIT_LOG(INFO, "port %u stopping datapath", + dev->data->port_id); + hns3_set_rxtx_function(dev); + rte_mb(); + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + default: + rte_errno = EINVAL; + PMD_INIT_LOG(ERR, "port %u invalid mp request type", + dev->data->port_id); + return -rte_errno; + } + return ret; +} + +/* + * Broadcast request of stopping/starting data-path to secondary processes. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] type + * Request type. + */ +static void +mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_mp_msg mp_req; + struct rte_mp_msg *mp_res; + struct rte_mp_reply mp_rep; + struct hns3_mp_param *res; + struct timespec ts; + int ret; + int i; + + if (!hw->secondary_cnt) + return; + if (type != HNS3_MP_REQ_START_RXTX && type != HNS3_MP_REQ_STOP_RXTX) { + hns3_err(hw, "port %u unknown request (req_type %d)", + dev->data->port_id, type); + return; + } + mp_init_msg(dev, &mp_req, type); + ts.tv_sec = HNS3_MP_REQ_TIMEOUT_SEC; + ts.tv_nsec = 0; + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + hns3_err(hw, "port %u failed to request stop/start Rx/Tx (%d)", + dev->data->port_id, type); + goto exit; + } + if (mp_rep.nb_sent != mp_rep.nb_received) { + PMD_INIT_LOG(ERR, + "port %u not all secondaries responded (req_type %d)", + dev->data->port_id, type); + goto exit; + } + for (i = 0; i < mp_rep.nb_received; i++) { + mp_res = &mp_rep.msgs[i]; + res = (struct hns3_mp_param *)mp_res->param; + if (res->result) { + hns3_err(hw, "port %u request failed on secondary #%d", + dev->data->port_id, i); + goto exit; + } + } +exit: + free(mp_rep.msgs); +} + +/* + * Broadcast request of starting data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void hns3_mp_req_start_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, HNS3_MP_REQ_START_RXTX); +} + +/* + * Broadcast request of stopping data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, HNS3_MP_REQ_STOP_RXTX); +} + +/* + * Initialize by primary process. + */ +void hns3_mp_init_primary(void) +{ + rte_mp_action_register(HNS3_MP_NAME, mp_primary_handle); +} + +/* + * Un-initialize by primary process. + */ +void hns3_mp_uninit_primary(void) +{ + rte_mp_action_unregister(HNS3_MP_NAME); +} + +/* + * Initialize by secondary process. + */ +void hns3_mp_init_secondary(void) +{ + rte_mp_action_register(HNS3_MP_NAME, mp_secondary_handle); +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_mp.h b/src/spdk/dpdk/drivers/net/hns3/hns3_mp.h new file mode 100644 index 000000000..aefbeb140 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_mp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_MP_H_ +#define _HNS3_MP_H_ + +void hns3_mp_req_start_rxtx(struct rte_eth_dev *dev); +void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev); +void hns3_mp_init_primary(void); +void hns3_mp_uninit_primary(void); +void hns3_mp_init_secondary(void); + +#endif /* _HNS3_MP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_regs.c b/src/spdk/dpdk/drivers/net/hns3/hns3_regs.c new file mode 100644 index 000000000..a3f2a51f9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_regs.c @@ -0,0 +1,375 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" +#include "hns3_rxtx.h" +#include "hns3_regs.h" + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(uint32_t)) + +static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_ADDR_L_REG, + HNS3_CMDQ_TX_ADDR_H_REG, + HNS3_CMDQ_TX_DEPTH_REG, + HNS3_CMDQ_TX_TAIL_REG, + HNS3_CMDQ_TX_HEAD_REG, + HNS3_CMDQ_RX_ADDR_L_REG, + HNS3_CMDQ_RX_ADDR_H_REG, + HNS3_CMDQ_RX_DEPTH_REG, + HNS3_CMDQ_RX_TAIL_REG, + HNS3_CMDQ_RX_HEAD_REG, + HNS3_VECTOR0_CMDQ_SRC_REG, + HNS3_CMDQ_INTR_STS_REG, + HNS3_CMDQ_INTR_EN_REG, + HNS3_CMDQ_INTR_GEN_REG}; + +static const uint32_t common_reg_addrs[] = {HNS3_MISC_VECTOR_REG_BASE, + HNS3_VECTOR0_OTER_EN_REG, + HNS3_MISC_RESET_STS_REG, + HNS3_VECTOR0_OTHER_INT_STS_REG, + HNS3_GLOBAL_RESET_REG, + HNS3_FUN_RST_ING, + HNS3_GRO_EN_REG}; + +static const uint32_t common_vf_reg_addrs[] = {HNS3_MISC_VECTOR_REG_BASE, + HNS3_FUN_RST_ING, + HNS3_GRO_EN_REG}; + +static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BASEADDR_L_REG, + HNS3_RING_RX_BASEADDR_H_REG, + HNS3_RING_RX_BD_NUM_REG, + HNS3_RING_RX_BD_LEN_REG, + HNS3_RING_RX_MERGE_EN_REG, + HNS3_RING_RX_TAIL_REG, + HNS3_RING_RX_HEAD_REG, + HNS3_RING_RX_FBDNUM_REG, + HNS3_RING_RX_OFFSET_REG, + HNS3_RING_RX_FBD_OFFSET_REG, + HNS3_RING_RX_STASH_REG, + HNS3_RING_RX_BD_ERR_REG, + HNS3_RING_TX_BASEADDR_L_REG, + HNS3_RING_TX_BASEADDR_H_REG, + HNS3_RING_TX_BD_NUM_REG, + HNS3_RING_TX_PRIORITY_REG, + HNS3_RING_TX_TC_REG, + HNS3_RING_TX_MERGE_EN_REG, + HNS3_RING_TX_TAIL_REG, + HNS3_RING_TX_HEAD_REG, + HNS3_RING_TX_FBDNUM_REG, + HNS3_RING_TX_OFFSET_REG, + HNS3_RING_TX_EBD_NUM_REG, + HNS3_RING_TX_EBD_OFFSET_REG, + HNS3_RING_TX_BD_ERR_REG, + HNS3_RING_EN_REG}; + +static const uint32_t tqp_intr_reg_addrs[] = {HNS3_TQP_INTR_CTRL_REG, + HNS3_TQP_INTR_GL0_REG, + HNS3_TQP_INTR_GL1_REG, + HNS3_TQP_INTR_GL2_REG, + HNS3_TQP_INTR_RL_REG}; + +static int +hns3_get_regs_num(struct hns3_hw *hw, uint32_t *regs_num_32_bit, + uint32_t *regs_num_64_bit) +{ + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_REG_NUM, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Query register number cmd failed, ret = %d", + ret); + return ret; + } + + *regs_num_32_bit = rte_le_to_cpu_32(desc.data[0]); + *regs_num_64_bit = rte_le_to_cpu_32(desc.data[1]); + + return 0; +} + +static int +hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + uint32_t regs_num_32_bit, regs_num_64_bit; + uint32_t len; + int ret; + + cmdq_lines = sizeof(cmdq_reg_addrs) / REG_LEN_PER_LINE + 1; + if (hns->is_vf) + common_lines = + sizeof(common_vf_reg_addrs) / REG_LEN_PER_LINE + 1; + else + common_lines = sizeof(common_reg_addrs) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addrs) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1; + + len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num + + tqp_intr_lines * hw->num_msi) * REG_LEN_PER_LINE; + + if (!hns->is_vf) { + ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + hns3_err(hw, "Get register number failed, ret = %d.", + ret); + return -ENOTSUP; + } + len += regs_num_32_bit * sizeof(uint32_t) + + regs_num_64_bit * sizeof(uint64_t); + } + + *length = len; + return 0; +} + +static int +hns3_get_32_bit_regs(struct hns3_hw *hw, uint32_t regs_num, void *data) +{ +#define HNS3_32_BIT_REG_RTN_DATANUM 8 +#define HNS3_32_BIT_DESC_NODATA_LEN 2 + struct hns3_cmd_desc *desc; + uint32_t *reg_val = data; + uint32_t *desc_data; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + cmd_num = DIV_ROUND_UP(regs_num + HNS3_32_BIT_DESC_NODATA_LEN, + HNS3_32_BIT_REG_RTN_DATANUM); + desc = rte_zmalloc("hns3-32bit-regs", + sizeof(struct hns3_cmd_desc) * cmd_num, 0); + if (desc == NULL) { + hns3_err(hw, "Failed to allocate %zx bytes needed to " + "store 32bit regs", + sizeof(struct hns3_cmd_desc) * cmd_num); + return -ENOMEM; + } + + hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_QUERY_32_BIT_REG, true); + ret = hns3_cmd_send(hw, desc, cmd_num); + if (ret) { + hns3_err(hw, "Query 32 bit register cmd failed, ret = %d", + ret); + rte_free(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = &desc[i].data[0]; + n = HNS3_32_BIT_REG_RTN_DATANUM - + HNS3_32_BIT_DESC_NODATA_LEN; + } else { + desc_data = (uint32_t *)(&desc[i]); + n = HNS3_32_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = rte_le_to_cpu_32(*desc_data++); + + regs_num--; + if (regs_num == 0) + break; + } + } + + rte_free(desc); + return 0; +} + +static int +hns3_get_64_bit_regs(struct hns3_hw *hw, uint32_t regs_num, void *data) +{ +#define HNS3_64_BIT_REG_RTN_DATANUM 4 +#define HNS3_64_BIT_DESC_NODATA_LEN 1 + struct hns3_cmd_desc *desc; + uint64_t *reg_val = data; + uint64_t *desc_data; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + cmd_num = DIV_ROUND_UP(regs_num + HNS3_64_BIT_DESC_NODATA_LEN, + HNS3_64_BIT_REG_RTN_DATANUM); + desc = rte_zmalloc("hns3-64bit-regs", + sizeof(struct hns3_cmd_desc) * cmd_num, 0); + if (desc == NULL) { + hns3_err(hw, "Failed to allocate %zx bytes needed to " + "store 64bit regs", + sizeof(struct hns3_cmd_desc) * cmd_num); + return -ENOMEM; + } + + hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_QUERY_64_BIT_REG, true); + ret = hns3_cmd_send(hw, desc, cmd_num); + if (ret) { + hns3_err(hw, "Query 64 bit register cmd failed, ret = %d", + ret); + rte_free(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (uint64_t *)(&desc[i].data[0]); + n = HNS3_64_BIT_REG_RTN_DATANUM - + HNS3_64_BIT_DESC_NODATA_LEN; + } else { + desc_data = (uint64_t *)(&desc[i]); + n = HNS3_64_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = rte_le_to_cpu_64(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + rte_free(desc); + return 0; +} + +static void +hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint32_t reg_offset; + int separator_num; + int reg_um; + int i, j; + + /* fetching per-PF registers values from PF PCIe register space */ + reg_um = sizeof(cmdq_reg_addrs) / sizeof(uint32_t); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *data++ = hns3_read_dev(hw, cmdq_reg_addrs[i]); + for (i = 0; i < separator_num; i++) + *data++ = SEPARATOR_VALUE; + + if (hns->is_vf) + reg_um = sizeof(common_vf_reg_addrs) / sizeof(uint32_t); + else + reg_um = sizeof(common_reg_addrs) / sizeof(uint32_t); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + if (hns->is_vf) + *data++ = hns3_read_dev(hw, common_vf_reg_addrs[i]); + else + *data++ = hns3_read_dev(hw, common_reg_addrs[i]); + for (i = 0; i < separator_num; i++) + *data++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addrs) / sizeof(uint32_t); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hw->tqps_num; j++) { + reg_offset = HNS3_TQP_REG_OFFSET + HNS3_TQP_REG_SIZE * j; + for (i = 0; i < reg_um; i++) + *data++ = hns3_read_dev(hw, + ring_reg_addrs[i] + reg_offset); + for (i = 0; i < separator_num; i++) + *data++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hw->num_msi; j++) { + reg_offset = HNS3_TQP_INTR_REG_SIZE * j; + for (i = 0; i < reg_um; i++) + *data++ = hns3_read_dev(hw, + tqp_intr_reg_addrs[i] + + reg_offset); + for (i = 0; i < separator_num; i++) + *data++ = SEPARATOR_VALUE; + } +} + +int +hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t regs_num_32_bit; + uint32_t regs_num_64_bit; + uint32_t length; + uint32_t *data; + int ret; + + if (regs == NULL) { + hns3_err(hw, "the input parameter regs is NULL!"); + return -EINVAL; + } + + ret = hns3_get_regs_length(hw, &length); + if (ret) + return ret; + + data = regs->data; + if (data == NULL) { + regs->length = length; + regs->width = sizeof(uint32_t); + return 0; + } + + /* Only full register dump is supported */ + if (regs->length && regs->length != length) + return -ENOTSUP; + + /* fetching per-PF registers values from PF PCIe register space */ + hns3_direct_access_regs(hw, data); + + if (hns->is_vf) + return 0; + + ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + hns3_err(hw, "Get register number failed, ret = %d", ret); + return ret; + } + + /* fetching PF common registers values from firmware */ + ret = hns3_get_32_bit_regs(hw, regs_num_32_bit, data); + if (ret) { + hns3_err(hw, "Get 32 bit register failed, ret = %d", ret); + return ret; + } + + data += regs_num_32_bit; + ret = hns3_get_64_bit_regs(hw, regs_num_64_bit, data); + if (ret) + hns3_err(hw, "Get 64 bit register failed, ret = %d", ret); + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_regs.h b/src/spdk/dpdk/drivers/net/hns3/hns3_regs.h new file mode 100644 index 000000000..64bd6931b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_regs.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_REGS_H_ +#define _HNS3_REGS_H_ + +/* bar registers for cmdq */ +#define HNS3_CMDQ_TX_ADDR_L_REG 0x27000 +#define HNS3_CMDQ_TX_ADDR_H_REG 0x27004 +#define HNS3_CMDQ_TX_DEPTH_REG 0x27008 +#define HNS3_CMDQ_TX_TAIL_REG 0x27010 +#define HNS3_CMDQ_TX_HEAD_REG 0x27014 +#define HNS3_CMDQ_RX_ADDR_L_REG 0x27018 +#define HNS3_CMDQ_RX_ADDR_H_REG 0x2701c +#define HNS3_CMDQ_RX_DEPTH_REG 0x27020 +#define HNS3_CMDQ_RX_TAIL_REG 0x27024 +#define HNS3_CMDQ_RX_HEAD_REG 0x27028 +#define HNS3_CMDQ_INTR_STS_REG 0x27104 +#define HNS3_CMDQ_INTR_EN_REG 0x27108 +#define HNS3_CMDQ_INTR_GEN_REG 0x2710C + +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HNS3_VECTOR0_CMDQ_SRC_REG 0x27100 +/* Vector0 interrupt CMDQ event status register(RO) */ +#define HNS3_VECTOR0_CMDQ_STAT_REG 0x27104 + +#define HNS3_VECTOR0_OTHER_INT_STS_REG 0x20800 + +#define HNS3_MISC_VECTOR_REG_BASE 0x20400 +#define HNS3_VECTOR0_OTER_EN_REG 0x20600 +#define HNS3_MISC_RESET_STS_REG 0x20700 +#define HNS3_GLOBAL_RESET_REG 0x20A00 +#define HNS3_FUN_RST_ING 0x20C00 +#define HNS3_GRO_EN_REG 0x28000 + +/* Vector0 register bits for reset */ +#define HNS3_VECTOR0_FUNCRESET_INT_B 0 +#define HNS3_VECTOR0_GLOBALRESET_INT_B 5 +#define HNS3_VECTOR0_CORERESET_INT_B 6 +#define HNS3_VECTOR0_IMPRESET_INT_B 7 + +/* CMDQ register bits for RX event(=MBX event) */ +#define HNS3_VECTOR0_RX_CMDQ_INT_B 1 +#define HNS3_VECTOR0_REG_MSIX_MASK 0x1FF00 +/* RST register bits for RESET event */ +#define HNS3_VECTOR0_RST_INT_B 2 + +#define HNS3_VF_RST_ING 0x07008 +#define HNS3_VF_RST_ING_BIT BIT(16) + +/* bar registers for rcb */ +#define HNS3_RING_RX_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_MERGE_EN_REG 0x00014 +#define HNS3_RING_RX_TAIL_REG 0x00018 +#define HNS3_RING_RX_HEAD_REG 0x0001C +#define HNS3_RING_RX_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_OFFSET_REG 0x00024 +#define HNS3_RING_RX_FBD_OFFSET_REG 0x00028 +#define HNS3_RING_RX_PKTNUM_RECORD_REG 0x0002C +#define HNS3_RING_RX_STASH_REG 0x00030 +#define HNS3_RING_RX_BD_ERR_REG 0x00034 + +#define HNS3_RING_TX_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_PRIORITY_REG 0x0004C +#define HNS3_RING_TX_TC_REG 0x00050 +#define HNS3_RING_TX_MERGE_EN_REG 0x00054 +#define HNS3_RING_TX_TAIL_REG 0x00058 +#define HNS3_RING_TX_HEAD_REG 0x0005C +#define HNS3_RING_TX_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_OFFSET_REG 0x00064 +#define HNS3_RING_TX_EBD_NUM_REG 0x00068 +#define HNS3_RING_TX_PKTNUM_RECORD_REG 0x0006C +#define HNS3_RING_TX_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_BD_ERR_REG 0x00074 + +#define HNS3_RING_EN_REG 0x00090 + +#define HNS3_RING_EN_B 0 + +#define HNS3_TQP_REG_OFFSET 0x80000 +#define HNS3_TQP_REG_SIZE 0x200 + +/* bar registers for tqp interrupt */ +#define HNS3_TQP_INTR_CTRL_REG 0x20000 +#define HNS3_TQP_INTR_GL0_REG 0x20100 +#define HNS3_TQP_INTR_GL1_REG 0x20200 +#define HNS3_TQP_INTR_GL2_REG 0x20300 +#define HNS3_TQP_INTR_RL_REG 0x20900 + +#define HNS3_TQP_INTR_REG_SIZE 4 +#define HNS3_TQP_INTR_GL_MAX 0x1FE0 +#define HNS3_TQP_INTR_GL_DEFAULT 20 +#define HNS3_TQP_INTR_RL_MAX 0xEC +#define HNS3_TQP_INTR_RL_ENABLE_MASK 0x40 +#define HNS3_TQP_INTR_RL_DEFAULT 0 + +/* gl_usec convert to hardware count, as writing each 1 represents 2us */ +#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) +/* rl_usec convert to hardware count, as writing each 1 represents 4us */ +#define HNS3_RL_USEC_TO_REG(rl_usec) ((rl_usec) >> 2) + +int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs); +#endif /* _HNS3_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_rss.c b/src/spdk/dpdk/drivers/net/hns3/hns3_rss.c new file mode 100644 index 000000000..a6cab29c9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_rss.c @@ -0,0 +1,603 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_logs.h" + +/* + * The hash key used for rss initialization. + */ +static const uint8_t hns3_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +/* + * rss_generic_config command function, opcode:0x0D01. + * Used to set algorithm, key_offset and hash key of rss. + */ +int +hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo, const uint8_t *key) +{ +#define HNS3_KEY_OFFSET_MAX 3 +#define HNS3_SET_HASH_KEY_BYTE_FOUR 2 + + struct hns3_rss_generic_config_cmd *req; + struct hns3_cmd_desc desc; + uint32_t key_offset, key_size; + const uint8_t *key_cur; + uint8_t cur_offset; + int ret; + + req = (struct hns3_rss_generic_config_cmd *)desc.data; + + /* + * key_offset=0, hash key byte0~15 is set to hardware. + * key_offset=1, hash key byte16~31 is set to hardware. + * key_offset=2, hash key byte32~39 is set to hardware. + */ + for (key_offset = 0; key_offset < HNS3_KEY_OFFSET_MAX; key_offset++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hash_algo & HNS3_RSS_HASH_ALGO_MASK); + req->hash_config |= (key_offset << HNS3_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == HNS3_SET_HASH_KEY_BYTE_FOUR) + key_size = HNS3_RSS_KEY_SIZE - HNS3_RSS_HASH_KEY_NUM * + HNS3_SET_HASH_KEY_BYTE_FOUR; + else + key_size = HNS3_RSS_HASH_KEY_NUM; + + cur_offset = key_offset * HNS3_RSS_HASH_KEY_NUM; + key_cur = key + cur_offset; + memcpy(req->hash_key, key_cur, key_size); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Configure RSS algo key failed %d", ret); + return ret; + } + } + /* Update the shadow RSS key with user specified */ + memcpy(hw->rss_info.key, key, HNS3_RSS_KEY_SIZE); + return 0; +} + +/* + * Used to configure the tuple selection for RSS hash input. + */ +static int +hns3_set_rss_input_tuple(struct hns3_hw *hw) +{ + struct hns3_rss_conf *rss_config = &hw->rss_info; + struct hns3_rss_input_tuple_cmd *req; + struct hns3_cmd_desc desc_tuple; + int ret; + + hns3_cmd_setup_basic_desc(&desc_tuple, HNS3_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hns3_rss_input_tuple_cmd *)desc_tuple.data; + + req->ipv4_tcp_en = rss_config->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_config->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_config->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_config->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_config->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_config->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_config->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_config->rss_tuple_sets.ipv6_fragment_en; + + ret = hns3_cmd_send(hw, &desc_tuple, 1); + if (ret) + hns3_err(hw, "Configure RSS input tuple mode failed %d", ret); + + return ret; +} + +/* + * rss_indirection_table command function, opcode:0x0D07. + * Used to configure the indirection table of rss. + */ +int +hns3_set_rss_indir_table(struct hns3_hw *hw, uint8_t *indir, uint16_t size) +{ + struct hns3_rss_indirection_table_cmd *req; + struct hns3_cmd_desc desc; + int ret, i, j, num; + + req = (struct hns3_rss_indirection_table_cmd *)desc.data; + + for (i = 0; i < size / HNS3_RSS_CFG_TBL_SIZE; i++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE, + false); + req->start_table_index = + rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK); + for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) { + num = i * HNS3_RSS_CFG_TBL_SIZE + j; + req->rss_result[j] = indir[num]; + } + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, + "Sets RSS indirection table failed %d size %u", + ret, size); + return ret; + } + } + + /* Update redirection table of hw */ + memcpy(hw->rss_info.rss_indirection_tbl, indir, HNS3_RSS_IND_TBL_SIZE); + + return 0; +} + +int +hns3_rss_reset_indir_table(struct hns3_hw *hw) +{ + uint8_t *lut; + int ret; + + lut = rte_zmalloc("hns3_rss_lut", HNS3_RSS_IND_TBL_SIZE, 0); + if (lut == NULL) { + hns3_err(hw, "No hns3_rss_lut memory can be allocated"); + return -ENOMEM; + } + + ret = hns3_set_rss_indir_table(hw, lut, HNS3_RSS_IND_TBL_SIZE); + if (ret) + hns3_err(hw, "RSS uninit indir table failed: %d", ret); + rte_free(lut); + + return ret; +} + +int +hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, + struct hns3_rss_tuple_cfg *tuple, uint64_t rss_hf) +{ + struct hns3_rss_input_tuple_cmd *req; + struct hns3_cmd_desc desc; + uint32_t i; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hns3_rss_input_tuple_cmd *)desc.data; + + /* Enable ipv4 or ipv6 tuple by flow type */ + for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { + switch (rss_hf & (1ULL << i)) { + case ETH_RSS_NONFRAG_IPV4_TCP: + req->ipv4_tcp_en = HNS3_RSS_INPUT_TUPLE_OTHER; + break; + case ETH_RSS_NONFRAG_IPV4_UDP: + req->ipv4_udp_en = HNS3_RSS_INPUT_TUPLE_OTHER; + break; + case ETH_RSS_NONFRAG_IPV4_SCTP: + req->ipv4_sctp_en = HNS3_RSS_INPUT_TUPLE_SCTP; + break; + case ETH_RSS_FRAG_IPV4: + req->ipv4_fragment_en |= HNS3_IP_FRAG_BIT_MASK; + break; + case ETH_RSS_NONFRAG_IPV4_OTHER: + req->ipv4_fragment_en |= HNS3_IP_OTHER_BIT_MASK; + break; + case ETH_RSS_NONFRAG_IPV6_TCP: + req->ipv6_tcp_en = HNS3_RSS_INPUT_TUPLE_OTHER; + break; + case ETH_RSS_NONFRAG_IPV6_UDP: + req->ipv6_udp_en = HNS3_RSS_INPUT_TUPLE_OTHER; + break; + case ETH_RSS_NONFRAG_IPV6_SCTP: + req->ipv6_sctp_en = HNS3_RSS_INPUT_TUPLE_SCTP; + break; + case ETH_RSS_FRAG_IPV6: + req->ipv6_fragment_en |= HNS3_IP_FRAG_BIT_MASK; + break; + case ETH_RSS_NONFRAG_IPV6_OTHER: + req->ipv6_fragment_en |= HNS3_IP_OTHER_BIT_MASK; + break; + default: + /* + * rss_hf doesn't include unsupported flow types + * because the API framework has checked it, and + * this branch will never go unless rss_hf is zero. + */ + break; + } + } + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Update RSS flow types tuples failed %d", ret); + return ret; + } + + tuple->ipv4_tcp_en = req->ipv4_tcp_en; + tuple->ipv4_udp_en = req->ipv4_udp_en; + tuple->ipv4_sctp_en = req->ipv4_sctp_en; + tuple->ipv4_fragment_en = req->ipv4_fragment_en; + tuple->ipv6_tcp_en = req->ipv6_tcp_en; + tuple->ipv6_udp_en = req->ipv6_udp_en; + tuple->ipv6_sctp_en = req->ipv6_sctp_en; + tuple->ipv6_fragment_en = req->ipv6_fragment_en; + + return 0; +} + +/* + * Configure RSS hash protocols and hash key. + * @param dev + * Pointer to Ethernet device. + * @praram rss_conf + * The configuration select of rss key size and tuple flow_types. + * @return + * 0 on success, a negative errno value otherwise is set. + */ +int +hns3_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_tuple_cfg *tuple = &hw->rss_info.rss_tuple_sets; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint8_t key_len = rss_conf->rss_key_len; + uint8_t algo; + uint64_t rss_hf = rss_conf->rss_hf; + uint8_t *key = rss_conf->rss_key; + int ret; + + if (hw->rss_dis_flag) + return -EINVAL; + + rte_spinlock_lock(&hw->lock); + ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_hf); + if (ret) + goto conf_err; + + if (rss_cfg->conf.types && rss_hf == 0) { + /* Disable RSS, reset indirection table by local variable */ + ret = hns3_rss_reset_indir_table(hw); + if (ret) + goto conf_err; + } else if (rss_hf && rss_cfg->conf.types == 0) { + /* Enable RSS, restore indirection table by hw's config */ + ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, + HNS3_RSS_IND_TBL_SIZE); + if (ret) + goto conf_err; + } + + /* Update supported flow types when set tuple success */ + rss_cfg->conf.types = rss_hf; + + if (key) { + if (key_len != HNS3_RSS_KEY_SIZE) { + hns3_err(hw, "The hash key len(%u) is invalid", + key_len); + ret = -EINVAL; + goto conf_err; + } + algo = rss_cfg->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR ? + HNS3_RSS_HASH_ALGO_SIMPLE : HNS3_RSS_HASH_ALGO_TOEPLITZ; + ret = hns3_set_rss_algo_key(hw, algo, key); + if (ret) + goto conf_err; + } + rte_spinlock_unlock(&hw->lock); + + return 0; + +conf_err: + rte_spinlock_unlock(&hw->lock); + return ret; +} + +/* + * Get rss key and rss_hf types set of RSS hash configuration. + * @param dev + * Pointer to Ethernet device. + * @praram rss_conf + * The buffer to get rss key size and tuple types. + * @return + * 0 on success. + */ +int +hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + + rte_spinlock_lock(&hw->lock); + rss_conf->rss_hf = rss_cfg->conf.types; + + /* Get the RSS Key required by the user */ + if (rss_conf->rss_key && rss_conf->rss_key_len >= HNS3_RSS_KEY_SIZE) { + memcpy(rss_conf->rss_key, rss_cfg->key, HNS3_RSS_KEY_SIZE); + rss_conf->rss_key_len = HNS3_RSS_KEY_SIZE; + } + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +/* + * Update rss redirection table of RSS. + * @param dev + * Pointer to Ethernet device. + * @praram reta_conf + * Pointer to the configuration select of mask and redirection tables. + * @param reta_size + * Redirection table size. + * @return + * 0 on success, a negative errno value otherwise is set. + */ +int +hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ + uint8_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t idx, shift, allow_rss_queues; + int ret; + + if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { + hns3_err(hw, "The size of hash lookup table configured (%u)" + "doesn't match the number hardware can supported" + "(%u)", reta_size, indir_size); + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); + memcpy(indirection_tbl, rss_cfg->rss_indirection_tbl, + HNS3_RSS_IND_TBL_SIZE); + allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max); + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].reta[shift] >= allow_rss_queues) { + rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "Invalid queue id(%u) to be set in " + "redirection table, max number of rss " + "queues: %u", reta_conf[idx].reta[shift], + allow_rss_queues); + return -EINVAL; + } + + if (reta_conf[idx].mask & (1ULL << shift)) + indirection_tbl[i] = reta_conf[idx].reta[shift]; + } + + ret = hns3_set_rss_indir_table(hw, indirection_tbl, + HNS3_RSS_IND_TBL_SIZE); + + rte_spinlock_unlock(&hw->lock); + return ret; +} + +/* + * Get rss redirection table of RSS hash configuration. + * @param dev + * Pointer to Ethernet device. + * @praram reta_conf + * Pointer to the configuration select of mask and redirection tables. + * @param reta_size + * Redirection table size. + * @return + * 0 on success, a negative errno value otherwise is set. + */ +int +hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ + uint16_t idx, shift; + + if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { + hns3_err(hw, "The size of hash lookup table configured (%u)" + " doesn't match the number hardware can supported" + "(%u)", reta_size, indir_size); + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = + rss_cfg->rss_indirection_tbl[i]; + } + rte_spinlock_unlock(&hw->lock); + return 0; +} + +/* + * Used to configure the tc_size and tc_offset. + */ +static int +hns3_set_rss_tc_mode(struct hns3_hw *hw) +{ + uint16_t rss_size = hw->alloc_rss_size; + struct hns3_rss_tc_mode_cmd *req; + uint16_t tc_offset[HNS3_MAX_TC_NUM]; + uint8_t tc_valid[HNS3_MAX_TC_NUM]; + uint16_t tc_size[HNS3_MAX_TC_NUM]; + struct hns3_cmd_desc desc; + uint16_t roundup_size; + uint16_t i; + int ret; + + req = (struct hns3_rss_tc_mode_cmd *)desc.data; + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + tc_valid[i] = !!(hw->hw_tc_map & BIT(i)); + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; + } + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_TC_MODE, false); + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + uint16_t mode = 0; + + hns3_set_bit(mode, HNS3_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hns3_set_field(mode, HNS3_RSS_TC_SIZE_M, HNS3_RSS_TC_SIZE_S, + tc_size[i]); + hns3_set_field(mode, HNS3_RSS_TC_OFFSET_M, HNS3_RSS_TC_OFFSET_S, + tc_offset[i]); + + req->rss_tc_mode[i] = rte_cpu_to_le_16(mode); + } + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Sets rss tc mode failed %d", ret); + + return ret; +} + +static void +hns3_rss_tuple_uninit(struct hns3_hw *hw) +{ + struct hns3_rss_input_tuple_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hns3_rss_input_tuple_cmd *)desc.data; + + memset(req, 0, sizeof(struct hns3_rss_tuple_cfg)); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "RSS uninit tuple failed %d", ret); + return; + } +} + +/* + * Set the default rss configuration in the init of driver. + */ +void +hns3_set_default_rss_args(struct hns3_hw *hw) +{ + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t queue_num = hw->alloc_rss_size; + int i; + + /* Default hash algorithm */ + rss_cfg->conf.func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + + /* Default RSS key */ + memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE); + + /* Initialize RSS indirection table */ + for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = i % queue_num; +} + +/* + * RSS initialization for hns3 pmd driver. + */ +int +hns3_config_rss(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint8_t hash_algo = + (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_TOEPLITZ ? + HNS3_RSS_HASH_ALGO_TOEPLITZ : HNS3_RSS_HASH_ALGO_SIMPLE); + uint8_t *hash_key = rss_cfg->key; + int ret, ret1; + + enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode; + + /* When RSS is off, redirect the packet queue 0 */ + if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) + hns3_rss_uninit(hns); + + /* Configure RSS hash algorithm and hash key offset */ + ret = hns3_set_rss_algo_key(hw, hash_algo, hash_key); + if (ret) + return ret; + + /* Configure the tuple selection for RSS hash input */ + ret = hns3_set_rss_input_tuple(hw); + if (ret) + return ret; + + /* + * When RSS is off, it doesn't need to configure rss redirection table + * to hardware. + */ + if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) { + ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, + HNS3_RSS_IND_TBL_SIZE); + if (ret) + goto rss_tuple_uninit; + } + + ret = hns3_set_rss_tc_mode(hw); + if (ret) + goto rss_indir_table_uninit; + + return ret; + +rss_indir_table_uninit: + if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) { + ret1 = hns3_rss_reset_indir_table(hw); + if (ret1 != 0) + return ret; + } + +rss_tuple_uninit: + hns3_rss_tuple_uninit(hw); + + /* Disable RSS */ + hw->rss_info.conf.types = 0; + + return ret; +} + +/* + * RSS uninitialization for hns3 pmd driver. + */ +void +hns3_rss_uninit(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + hns3_rss_tuple_uninit(hw); + ret = hns3_rss_reset_indir_table(hw); + if (ret != 0) + return; + + /* Disable RSS */ + hw->rss_info.conf.types = 0; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_rss.h b/src/spdk/dpdk/drivers/net/hns3/hns3_rss.h new file mode 100644 index 000000000..3c7905132 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_rss.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_RSS_H_ +#define _HNS3_RSS_H_ +#include +#include + +#define HNS3_ETH_RSS_SUPPORT ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + ETH_RSS_NONFRAG_IPV6_OTHER) + +#define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */ +#define HNS3_RSS_KEY_SIZE 40 +#define HNS3_RSS_CFG_TBL_NUM \ + (HNS3_RSS_IND_TBL_SIZE / HNS3_RSS_CFG_TBL_SIZE) +#define HNS3_RSS_SET_BITMAP_MSK 0xffff + +#define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 +#define HNS3_RSS_HASH_ALGO_SIMPLE 1 +#define HNS3_RSS_HASH_ALGO_MASK 0xf + +#define HNS3_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HNS3_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HNS3_IP_FRAG_BIT_MASK GENMASK(3, 2) +#define HNS3_IP_OTHER_BIT_MASK GENMASK(1, 0) + +struct hns3_rss_tuple_cfg { + uint8_t ipv4_tcp_en; /* Bit8.0~8.3 */ + uint8_t ipv4_udp_en; /* Bit9.0~9.3 */ + uint8_t ipv4_sctp_en; /* Bit10.0~10.4 */ + uint8_t ipv4_fragment_en; /* Bit11.0~11.3 */ + uint8_t ipv6_tcp_en; /* Bit12.0~12.3 */ + uint8_t ipv6_udp_en; /* Bit13.0~13.3 */ + uint8_t ipv6_sctp_en; /* Bit14.0~14.4 */ + uint8_t ipv6_fragment_en; /* Bit15.0~15.3 */ +}; + +#define HNS3_RSS_QUEUES_BUFFER_NUM 64 /* Same as the Max rx/tx queue num */ +struct hns3_rss_conf { + /* RSS parameters :algorithm, flow_types, key, queue */ + struct rte_flow_action_rss conf; + uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ + struct hns3_rss_tuple_cfg rss_tuple_sets; + uint8_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; /* Shadow table */ + uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ +}; + +#ifndef ilog2 +static inline int rss_ilog2(uint32_t x) +{ + int log = 0; + x >>= 1; + + while (x) { + log++; + x >>= 1; + } + return log; +} +#define ilog2(x) rss_ilog2(x) +#endif + +static inline uint32_t fls(uint32_t x) +{ + uint32_t position; + uint32_t i; + + if (x == 0) + return 0; + + for (i = (x >> 1), position = 0; i != 0; ++position) + i >>= 1; + + return position + 1; +} + +static inline uint32_t roundup_pow_of_two(uint32_t x) +{ + return 1UL << fls(x - 1); +} + +struct hns3_adapter; + +int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +void hns3_set_default_rss_args(struct hns3_hw *hw); +int hns3_set_rss_indir_table(struct hns3_hw *hw, uint8_t *indir, uint16_t size); +int hns3_rss_reset_indir_table(struct hns3_hw *hw); +int hns3_config_rss(struct hns3_adapter *hns); +void hns3_rss_uninit(struct hns3_adapter *hns); +int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, + struct hns3_rss_tuple_cfg *tuple, + uint64_t rss_hf); +int hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo, + const uint8_t *key); +int hns3_restore_rss_filter(struct rte_eth_dev *dev); + +#endif /* _HNS3_RSS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.c b/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.c new file mode 100644 index 000000000..8b3ced116 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.c @@ -0,0 +1,2515 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_rxtx.h" +#include "hns3_regs.h" +#include "hns3_logs.h" + +#define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1) +#define DEFAULT_RX_FREE_THRESH 32 + +static void +hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) +{ + uint16_t i; + + /* Note: Fake rx queue will not enter here */ + if (rxq->sw_ring) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq) +{ + uint16_t i; + + /* Note: Fake rx queue will not enter here */ + if (txq->sw_ring) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +hns3_rx_queue_release(void *queue) +{ + struct hns3_rx_queue *rxq = queue; + if (rxq) { + hns3_rx_queue_release_mbufs(rxq); + if (rxq->mz) + rte_memzone_free(rxq->mz); + if (rxq->sw_ring) + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +static void +hns3_tx_queue_release(void *queue) +{ + struct hns3_tx_queue *txq = queue; + if (txq) { + hns3_tx_queue_release_mbufs(txq); + if (txq->mz) + rte_memzone_free(txq->mz); + if (txq->sw_ring) + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +hns3_dev_rx_queue_release(void *queue) +{ + struct hns3_rx_queue *rxq = queue; + struct hns3_adapter *hns; + + if (rxq == NULL) + return; + + hns = rxq->hns; + rte_spinlock_lock(&hns->hw.lock); + hns3_rx_queue_release(queue); + rte_spinlock_unlock(&hns->hw.lock); +} + +void +hns3_dev_tx_queue_release(void *queue) +{ + struct hns3_tx_queue *txq = queue; + struct hns3_adapter *hns; + + if (txq == NULL) + return; + + hns = txq->hns; + rte_spinlock_lock(&hns->hw.lock); + hns3_tx_queue_release(queue); + rte_spinlock_unlock(&hns->hw.lock); +} + +static void +hns3_fake_rx_queue_release(struct hns3_rx_queue *queue) +{ + struct hns3_rx_queue *rxq = queue; + struct hns3_adapter *hns; + struct hns3_hw *hw; + uint16_t idx; + + if (rxq == NULL) + return; + + hns = rxq->hns; + hw = &hns->hw; + idx = rxq->queue_id; + if (hw->fkq_data.rx_queues[idx]) { + hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]); + hw->fkq_data.rx_queues[idx] = NULL; + } + + /* free fake rx queue arrays */ + if (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) { + hw->fkq_data.nb_fake_rx_queues = 0; + rte_free(hw->fkq_data.rx_queues); + hw->fkq_data.rx_queues = NULL; + } +} + +static void +hns3_fake_tx_queue_release(struct hns3_tx_queue *queue) +{ + struct hns3_tx_queue *txq = queue; + struct hns3_adapter *hns; + struct hns3_hw *hw; + uint16_t idx; + + if (txq == NULL) + return; + + hns = txq->hns; + hw = &hns->hw; + idx = txq->queue_id; + if (hw->fkq_data.tx_queues[idx]) { + hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]); + hw->fkq_data.tx_queues[idx] = NULL; + } + + /* free fake tx queue arrays */ + if (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) { + hw->fkq_data.nb_fake_tx_queues = 0; + rte_free(hw->fkq_data.tx_queues); + hw->fkq_data.tx_queues = NULL; + } +} + +static void +hns3_free_rx_queues(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_fake_queue_data *fkq_data; + struct hns3_hw *hw = &hns->hw; + uint16_t nb_rx_q; + uint16_t i; + + nb_rx_q = hw->data->nb_rx_queues; + for (i = 0; i < nb_rx_q; i++) { + if (dev->data->rx_queues[i]) { + hns3_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + } + + /* Free fake Rx queues */ + fkq_data = &hw->fkq_data; + for (i = 0; i < fkq_data->nb_fake_rx_queues; i++) { + if (fkq_data->rx_queues[i]) + hns3_fake_rx_queue_release(fkq_data->rx_queues[i]); + } +} + +static void +hns3_free_tx_queues(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_fake_queue_data *fkq_data; + struct hns3_hw *hw = &hns->hw; + uint16_t nb_tx_q; + uint16_t i; + + nb_tx_q = hw->data->nb_tx_queues; + for (i = 0; i < nb_tx_q; i++) { + if (dev->data->tx_queues[i]) { + hns3_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + } + + /* Free fake Tx queues */ + fkq_data = &hw->fkq_data; + for (i = 0; i < fkq_data->nb_fake_tx_queues; i++) { + if (fkq_data->tx_queues[i]) + hns3_fake_tx_queue_release(fkq_data->tx_queues[i]); + } +} + +void +hns3_free_all_queues(struct rte_eth_dev *dev) +{ + hns3_free_rx_queues(dev); + hns3_free_tx_queues(dev); +} + +static int +hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) +{ + struct rte_mbuf *mbuf; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(mbuf == NULL)) { + hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!", + i); + hns3_rx_queue_release_mbufs(rxq); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + rxq->sw_ring[i].mbuf = mbuf; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxq->rx_ring[i].addr = dma_addr; + rxq->rx_ring[i].rx.bd_base_info = 0; + } + + return 0; +} + +static int +hns3_buf_size2type(uint32_t buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void +hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq) +{ + uint32_t rx_buf_len = rxq->rx_buf_len; + uint64_t dma_addr = rxq->rx_ring_phys_addr; + + hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr); + hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG, + (uint32_t)((dma_addr >> 31) >> 1)); + + hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG, + hns3_buf_size2type(rx_buf_len)); + hns3_write_dev(rxq, HNS3_RING_RX_BD_NUM_REG, + HNS3_CFG_DESC_NUM(rxq->nb_rx_desc)); +} + +static void +hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) +{ + uint64_t dma_addr = txq->tx_ring_phys_addr; + + hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr); + hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG, + (uint32_t)((dma_addr >> 31) >> 1)); + + hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG, + HNS3_CFG_DESC_NUM(txq->nb_tx_desc)); +} + +void +hns3_enable_all_queues(struct hns3_hw *hw, bool en) +{ + uint16_t nb_rx_q = hw->data->nb_rx_queues; + uint16_t nb_tx_q = hw->data->nb_tx_queues; + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint32_t rcb_reg; + int i; + + for (i = 0; i < hw->cfg_max_queues; i++) { + if (i < nb_rx_q) + rxq = hw->data->rx_queues[i]; + else + rxq = hw->fkq_data.rx_queues[i - nb_rx_q]; + if (i < nb_tx_q) + txq = hw->data->tx_queues[i]; + else + txq = hw->fkq_data.tx_queues[i - nb_tx_q]; + if (rxq == NULL || txq == NULL || + (en && (rxq->rx_deferred_start || txq->tx_deferred_start))) + continue; + + rcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG); + if (en) + rcb_reg |= BIT(HNS3_RING_EN_B); + else + rcb_reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(rxq, HNS3_RING_EN_REG, rcb_reg); + } +} + +static int +hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) +{ + struct hns3_cfg_com_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + req = (struct hns3_cfg_com_tqp_queue_cmd *)desc.data; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + req->stream_id = 0; + hns3_set_bit(req->enable, HNS3_TQP_ENABLE_B, enable ? 1 : 0); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "TQP enable fail, ret = %d", ret); + + return ret; +} + +static int +hns3_send_reset_tqp_cmd(struct hns3_hw *hw, uint16_t queue_id, bool enable) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, false); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + hns3_set_bit(req->reset_req, HNS3_TQP_RESET_B, enable ? 1 : 0); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, "Send tqp reset cmd error, ret = %d", ret); + + return ret; +} + +static int +hns3_get_reset_status(struct hns3_hw *hw, uint16_t queue_id) +{ + struct hns3_reset_tqp_queue_cmd *req; + struct hns3_cmd_desc desc; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RESET_TQP_QUEUE, true); + + req = (struct hns3_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = rte_cpu_to_le_16(queue_id & HNS3_RING_ID_MASK); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Get reset status error, ret =%d", ret); + return ret; + } + + return hns3_get_bit(req->ready_to_reset, HNS3_TQP_RESET_B); +} + +static int +hns3_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) +{ +#define HNS3_TQP_RESET_TRY_MS 200 + uint64_t end; + int reset_status; + int ret; + + ret = hns3_tqp_enable(hw, queue_id, false); + if (ret) + return ret; + + /* + * In current version VF is not supported when PF is driven by DPDK + * driver, all task queue pairs are mapped to PF function, so PF's queue + * id is equals to the global queue id in PF range. + */ + ret = hns3_send_reset_tqp_cmd(hw, queue_id, true); + if (ret) { + hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret); + return ret; + } + ret = -ETIMEDOUT; + end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS; + do { + /* Wait for tqp hw reset */ + rte_delay_ms(HNS3_POLL_RESPONE_MS); + reset_status = hns3_get_reset_status(hw, queue_id); + if (reset_status) { + ret = 0; + break; + } + } while (get_timeofday_ms() < end); + + if (ret) { + hns3_err(hw, "Reset TQP fail, ret = %d", ret); + return ret; + } + + ret = hns3_send_reset_tqp_cmd(hw, queue_id, false); + if (ret) + hns3_err(hw, "Deassert the soft reset fail, ret = %d", ret); + + return ret; +} + +static int +hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) +{ + uint8_t msg_data[2]; + int ret; + + /* Disable VF's queue before send queue reset msg to PF */ + ret = hns3_tqp_enable(hw, queue_id, false); + if (ret) + return ret; + + memcpy(msg_data, &queue_id, sizeof(uint16_t)); + + return hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, + sizeof(msg_data), true, NULL, 0); +} + +static int +hns3_reset_queue(struct hns3_adapter *hns, uint16_t queue_id) +{ + struct hns3_hw *hw = &hns->hw; + if (hns->is_vf) + return hns3vf_reset_tqp(hw, queue_id); + else + return hns3_reset_tqp(hw, queue_id); +} + +int +hns3_reset_all_queues(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + int ret, i; + + for (i = 0; i < hw->cfg_max_queues; i++) { + ret = hns3_reset_queue(hns, i); + if (ret) { + hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret); + return ret; + } + } + return 0; +} + +void +hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + uint8_t gl_idx, uint16_t gl_value) +{ + uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG, + HNS3_TQP_INTR_GL1_REG, + HNS3_TQP_INTR_GL2_REG}; + uint32_t addr, value; + + if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX) + return; + + addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; + value = HNS3_GL_USEC_TO_REG(gl_value); + + hns3_write_dev(hw, addr, value); +} + +void +hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) +{ + uint32_t addr, value; + + if (rl_value > HNS3_TQP_INTR_RL_MAX) + return; + + addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + value = HNS3_RL_USEC_TO_REG(rl_value); + if (value > 0) + value |= HNS3_TQP_INTR_RL_ENABLE_MASK; + + hns3_write_dev(hw, addr, value); +} + +static void +hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) +{ + uint32_t addr, value; + + addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + value = en ? 1 : 0; + + hns3_write_dev(hw, addr, value); +} + +/* + * Enable all rx queue interrupt when in interrupt rx mode. + * This api was called before enable queue rx&tx (in normal start or reset + * recover scenes), used to fix hardware rx queue interrupt enable was clear + * when FLR. + */ +void +hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + uint16_t nb_rx_q = hw->data->nb_rx_queues; + int i; + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return; + + for (i = 0; i < nb_rx_q; i++) + hns3_queue_intr_enable(hw, i, en); +} + +int +hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return -ENOTSUP; + + hns3_queue_intr_enable(hw, queue_id, true); + + return rte_intr_ack(intr_handle); +} + +int +hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.intr_conf.rxq == 0) + return -ENOTSUP; + + hns3_queue_intr_enable(hw, queue_id, false); + + return 0; +} + +static int +hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_queue *rxq; + int ret; + + PMD_INIT_FUNC_TRACE(); + + rxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx]; + ret = hns3_alloc_rx_queue_mbufs(hw, rxq); + if (ret) { + hns3_err(hw, "Failed to alloc mbuf for No.%d rx queue: %d", + idx, ret); + return ret; + } + + rxq->next_to_use = 0; + rxq->next_to_clean = 0; + rxq->nb_rx_hold = 0; + hns3_init_rx_queue_hw(rxq); + + return 0; +} + +static void +hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_queue *rxq; + + rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx]; + rxq->next_to_use = 0; + rxq->next_to_clean = 0; + rxq->nb_rx_hold = 0; + hns3_init_rx_queue_hw(rxq); +} + +static void +hns3_init_tx_queue(struct hns3_tx_queue *queue) +{ + struct hns3_tx_queue *txq = queue; + struct hns3_desc *desc; + int i; + + /* Clear tx bd */ + desc = txq->tx_ring; + for (i = 0; i < txq->nb_tx_desc; i++) { + desc->tx.tp_fe_sc_vld_ra_ri = 0; + desc++; + } + + txq->next_to_use = 0; + txq->next_to_clean = 0; + txq->tx_bd_ready = txq->nb_tx_desc - 1; + hns3_init_tx_queue_hw(txq); +} + +static void +hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_tx_queue *txq; + + txq = (struct hns3_tx_queue *)hw->data->tx_queues[idx]; + hns3_init_tx_queue(txq); +} + +static void +hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_tx_queue *txq; + + txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx]; + hns3_init_tx_queue(txq); +} + +static void +hns3_init_tx_ring_tc(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_tx_queue *txq; + int i, num; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { + struct hns3_tc_queue_info *tc_queue = &hw->tc_queue[i]; + int j; + + if (!tc_queue->enable) + continue; + + for (j = 0; j < tc_queue->tqp_count; j++) { + num = tc_queue->tqp_offset + j; + txq = (struct hns3_tx_queue *)hw->data->tx_queues[num]; + if (txq == NULL) + continue; + + hns3_write_dev(txq, HNS3_RING_TX_TC_REG, tc_queue->tc); + } + } +} + +static int +hns3_start_rx_queues(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_queue *rxq; + int i, j; + int ret; + + /* Initialize RSS for queues */ + ret = hns3_config_rss(hns); + if (ret) { + hns3_err(hw, "Failed to configure rss %d", ret); + return ret; + } + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = (struct hns3_rx_queue *)hw->data->rx_queues[i]; + if (rxq == NULL || rxq->rx_deferred_start) + continue; + ret = hns3_dev_rx_queue_start(hns, i); + if (ret) { + hns3_err(hw, "Failed to start No.%d rx queue: %d", i, + ret); + goto out; + } + } + + for (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) { + rxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i]; + if (rxq == NULL || rxq->rx_deferred_start) + continue; + hns3_fake_rx_queue_start(hns, i); + } + return 0; + +out: + for (j = 0; j < i; j++) { + rxq = (struct hns3_rx_queue *)hw->data->rx_queues[j]; + hns3_rx_queue_release_mbufs(rxq); + } + + return ret; +} + +static void +hns3_start_tx_queues(struct hns3_adapter *hns) +{ + struct hns3_hw *hw = &hns->hw; + struct hns3_tx_queue *txq; + int i; + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = (struct hns3_tx_queue *)hw->data->tx_queues[i]; + if (txq == NULL || txq->tx_deferred_start) + continue; + hns3_dev_tx_queue_start(hns, i); + } + + for (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) { + txq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i]; + if (txq == NULL || txq->tx_deferred_start) + continue; + hns3_fake_tx_queue_start(hns, i); + } + + hns3_init_tx_ring_tc(hns); +} + +/* + * Start all queues. + * Note: just init and setup queues, and don't enable queue rx&tx. + */ +int +hns3_start_queues(struct hns3_adapter *hns, bool reset_queue) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + if (reset_queue) { + ret = hns3_reset_all_queues(hns); + if (ret) { + hns3_err(hw, "Failed to reset all queues %d", ret); + return ret; + } + } + + ret = hns3_start_rx_queues(hns); + if (ret) { + hns3_err(hw, "Failed to start rx queues: %d", ret); + return ret; + } + + hns3_start_tx_queues(hns); + + return 0; +} + +int +hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue) +{ + struct hns3_hw *hw = &hns->hw; + int ret; + + hns3_enable_all_queues(hw, false); + if (reset_queue) { + ret = hns3_reset_all_queues(hns); + if (ret) { + hns3_err(hw, "Failed to reset all queues %d", ret); + return ret; + } + } + return 0; +} + +static void* +hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, + struct hns3_queue_info *q_info) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + const struct rte_memzone *rx_mz; + struct hns3_rx_queue *rxq; + unsigned int rx_desc; + + rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), + RTE_CACHE_LINE_SIZE, q_info->socket_id); + if (rxq == NULL) { + hns3_err(hw, "Failed to allocate memory for No.%d rx ring!", + q_info->idx); + return NULL; + } + + /* Allocate rx ring hardware descriptors. */ + rxq->queue_id = q_info->idx; + rxq->nb_rx_desc = q_info->nb_desc; + rx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc); + rx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, + rx_desc, HNS3_RING_BASE_ALIGN, + q_info->socket_id); + if (rx_mz == NULL) { + hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!", + q_info->idx); + hns3_rx_queue_release(rxq); + return NULL; + } + rxq->mz = rx_mz; + rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; + rxq->rx_ring_phys_addr = rx_mz->iova; + + hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx, + rxq->rx_ring_phys_addr); + + return rxq; +} + +static int +hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t nb_desc, unsigned int socket_id) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_queue_info q_info; + struct hns3_rx_queue *rxq; + uint16_t nb_rx_q; + + if (hw->fkq_data.rx_queues[idx]) { + hns3_rx_queue_release(hw->fkq_data.rx_queues[idx]); + hw->fkq_data.rx_queues[idx] = NULL; + } + + q_info.idx = idx; + q_info.socket_id = socket_id; + q_info.nb_desc = nb_desc; + q_info.type = "hns3 fake RX queue"; + q_info.ring_name = "rx_fake_ring"; + rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); + if (rxq == NULL) { + hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx); + return -ENOMEM; + } + + /* Don't need alloc sw_ring, because upper applications don't use it */ + rxq->sw_ring = NULL; + + rxq->hns = hns; + rxq->rx_deferred_start = false; + rxq->port_id = dev->data->port_id; + rxq->configured = true; + nb_rx_q = dev->data->nb_rx_queues; + rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + + (nb_rx_q + idx) * HNS3_TQP_REG_SIZE); + rxq->rx_buf_len = hw->rx_buf_len; + + rte_spinlock_lock(&hw->lock); + hw->fkq_data.rx_queues[idx] = rxq; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static void* +hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, + struct hns3_queue_info *q_info) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + const struct rte_memzone *tx_mz; + struct hns3_tx_queue *txq; + struct hns3_desc *desc; + unsigned int tx_desc; + int i; + + txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), + RTE_CACHE_LINE_SIZE, q_info->socket_id); + if (txq == NULL) { + hns3_err(hw, "Failed to allocate memory for No.%d tx ring!", + q_info->idx); + return NULL; + } + + /* Allocate tx ring hardware descriptors. */ + txq->queue_id = q_info->idx; + txq->nb_tx_desc = q_info->nb_desc; + tx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc); + tx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx, + tx_desc, HNS3_RING_BASE_ALIGN, + q_info->socket_id); + if (tx_mz == NULL) { + hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!", + q_info->idx); + hns3_tx_queue_release(txq); + return NULL; + } + txq->mz = tx_mz; + txq->tx_ring = (struct hns3_desc *)tx_mz->addr; + txq->tx_ring_phys_addr = tx_mz->iova; + + hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx, + txq->tx_ring_phys_addr); + + /* Clear tx bd */ + desc = txq->tx_ring; + for (i = 0; i < txq->nb_tx_desc; i++) { + desc->tx.tp_fe_sc_vld_ra_ri = 0; + desc++; + } + + return txq; +} + +static int +hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t nb_desc, unsigned int socket_id) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_queue_info q_info; + struct hns3_tx_queue *txq; + uint16_t nb_tx_q; + + if (hw->fkq_data.tx_queues[idx] != NULL) { + hns3_tx_queue_release(hw->fkq_data.tx_queues[idx]); + hw->fkq_data.tx_queues[idx] = NULL; + } + + q_info.idx = idx; + q_info.socket_id = socket_id; + q_info.nb_desc = nb_desc; + q_info.type = "hns3 fake TX queue"; + q_info.ring_name = "tx_fake_ring"; + txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); + if (txq == NULL) { + hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx); + return -ENOMEM; + } + + /* Don't need alloc sw_ring, because upper applications don't use it */ + txq->sw_ring = NULL; + + txq->hns = hns; + txq->tx_deferred_start = false; + txq->port_id = dev->data->port_id; + txq->configured = true; + nb_tx_q = dev->data->nb_tx_queues; + txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + + (nb_tx_q + idx) * HNS3_TQP_REG_SIZE); + + rte_spinlock_lock(&hw->lock); + hw->fkq_data.tx_queues[idx] = txq; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static int +hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) +{ + uint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues; + void **rxq; + uint8_t i; + + if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) { + /* first time configuration */ + uint32_t size; + size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues; + hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size, + RTE_CACHE_LINE_SIZE); + if (hw->fkq_data.rx_queues == NULL) { + hw->fkq_data.nb_fake_rx_queues = 0; + return -ENOMEM; + } + } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) { + /* re-configure */ + rxq = hw->fkq_data.rx_queues; + for (i = nb_queues; i < old_nb_queues; i++) + hns3_dev_rx_queue_release(rxq[i]); + + rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, + RTE_CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + if (nb_queues > old_nb_queues) { + uint16_t new_qs = nb_queues - old_nb_queues; + memset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs); + } + + hw->fkq_data.rx_queues = rxq; + } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) { + rxq = hw->fkq_data.rx_queues; + for (i = nb_queues; i < old_nb_queues; i++) + hns3_dev_rx_queue_release(rxq[i]); + + rte_free(hw->fkq_data.rx_queues); + hw->fkq_data.rx_queues = NULL; + } + + hw->fkq_data.nb_fake_rx_queues = nb_queues; + + return 0; +} + +static int +hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues) +{ + uint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues; + void **txq; + uint8_t i; + + if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) { + /* first time configuration */ + uint32_t size; + size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues; + hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size, + RTE_CACHE_LINE_SIZE); + if (hw->fkq_data.tx_queues == NULL) { + hw->fkq_data.nb_fake_tx_queues = 0; + return -ENOMEM; + } + } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) { + /* re-configure */ + txq = hw->fkq_data.tx_queues; + for (i = nb_queues; i < old_nb_queues; i++) + hns3_dev_tx_queue_release(txq[i]); + txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + if (nb_queues > old_nb_queues) { + uint16_t new_qs = nb_queues - old_nb_queues; + memset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs); + } + + hw->fkq_data.tx_queues = txq; + } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) { + txq = hw->fkq_data.tx_queues; + for (i = nb_queues; i < old_nb_queues; i++) + hns3_dev_tx_queue_release(txq[i]); + + rte_free(hw->fkq_data.tx_queues); + hw->fkq_data.tx_queues = NULL; + } + hw->fkq_data.nb_fake_tx_queues = nb_queues; + + return 0; +} + +int +hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t rx_need_add_nb_q; + uint16_t tx_need_add_nb_q; + uint16_t port_id; + uint16_t q; + int ret; + + /* Setup new number of fake RX/TX queues and reconfigure device. */ + hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q); + rx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q; + tx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q; + ret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q); + if (ret) { + hns3_err(hw, "Fail to configure fake rx queues: %d", ret); + goto cfg_fake_rx_q_fail; + } + + ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); + if (ret) { + hns3_err(hw, "Fail to configure fake rx queues: %d", ret); + goto cfg_fake_tx_q_fail; + } + + /* Allocate and set up fake RX queue per Ethernet port. */ + port_id = hw->data->port_id; + for (q = 0; q < rx_need_add_nb_q; q++) { + ret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC, + rte_eth_dev_socket_id(port_id)); + if (ret) + goto setup_fake_rx_q_fail; + } + + /* Allocate and set up fake TX queue per Ethernet port. */ + for (q = 0; q < tx_need_add_nb_q; q++) { + ret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC, + rte_eth_dev_socket_id(port_id)); + if (ret) + goto setup_fake_tx_q_fail; + } + + return 0; + +setup_fake_tx_q_fail: +setup_fake_rx_q_fail: + (void)hns3_fake_tx_queue_config(hw, 0); +cfg_fake_tx_q_fail: + (void)hns3_fake_rx_queue_config(hw, 0); +cfg_fake_rx_q_fail: + hw->cfg_max_queues = 0; + + return ret; +} + +void +hns3_dev_release_mbufs(struct hns3_adapter *hns) +{ + struct rte_eth_dev_data *dev_data = hns->hw.data; + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + int i; + + if (dev_data->rx_queues) + for (i = 0; i < dev_data->nb_rx_queues; i++) { + rxq = dev_data->rx_queues[i]; + if (rxq == NULL || rxq->rx_deferred_start) + continue; + hns3_rx_queue_release_mbufs(rxq); + } + + if (dev_data->tx_queues) + for (i = 0; i < dev_data->nb_tx_queues; i++) { + txq = dev_data->tx_queues[i]; + if (txq == NULL || txq->tx_deferred_start) + continue; + hns3_tx_queue_release_mbufs(txq); + } +} + +int +hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + unsigned int socket_id, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_queue_info q_info; + struct hns3_rx_queue *rxq; + int rx_entry_len; + + if (dev->data->dev_started) { + hns3_err(hw, "rx_queue_setup after dev_start no supported"); + return -EINVAL; + } + + if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || + nb_desc % HNS3_ALIGN_RING_DESC) { + hns3_err(hw, "Number (%u) of rx descriptors is invalid", + nb_desc); + return -EINVAL; + } + + if (dev->data->rx_queues[idx]) { + hns3_rx_queue_release(dev->data->rx_queues[idx]); + dev->data->rx_queues[idx] = NULL; + } + + q_info.idx = idx; + q_info.socket_id = socket_id; + q_info.nb_desc = nb_desc; + q_info.type = "hns3 RX queue"; + q_info.ring_name = "rx_ring"; + rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); + if (rxq == NULL) { + hns3_err(hw, + "Failed to alloc mem and reserve DMA mem for rx ring!"); + return -ENOMEM; + } + + rxq->hns = hns; + rxq->mb_pool = mp; + if (conf->rx_free_thresh <= 0) + rxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH; + else + rxq->rx_free_thresh = conf->rx_free_thresh; + rxq->rx_deferred_start = conf->rx_deferred_start; + + rx_entry_len = sizeof(struct hns3_entry) * rxq->nb_rx_desc; + rxq->sw_ring = rte_zmalloc_socket("hns3 RX sw ring", rx_entry_len, + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) { + hns3_err(hw, "Failed to allocate memory for rx sw ring!"); + hns3_rx_queue_release(rxq); + return -ENOMEM; + } + + rxq->next_to_use = 0; + rxq->next_to_clean = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + rxq->port_id = dev->data->port_id; + rxq->configured = true; + rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + + idx * HNS3_TQP_REG_SIZE); + rxq->rx_buf_len = hw->rx_buf_len; + rxq->l2_errors = 0; + rxq->pkt_len_errors = 0; + rxq->l3_csum_erros = 0; + rxq->l4_csum_erros = 0; + rxq->ol3_csum_erros = 0; + rxq->ol4_csum_erros = 0; + + rte_spinlock_lock(&hw->lock); + dev->data->rx_queues[idx] = rxq; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static inline uint32_t +rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info) +{ +#define HNS3_L2TBL_NUM 4 +#define HNS3_L3TBL_NUM 16 +#define HNS3_L4TBL_NUM 16 +#define HNS3_OL3TBL_NUM 16 +#define HNS3_OL4TBL_NUM 16 + uint32_t pkt_type = 0; + uint32_t l2id, l3id, l4id; + uint32_t ol3id, ol4id; + + static const uint32_t l2table[HNS3_L2TBL_NUM] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + 0 + }; + + static const uint32_t l3table[HNS3_L3TBL_NUM] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_LLDP, + 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + static const uint32_t l4table[HNS3_L4TBL_NUM] = { + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_IGMP, + RTE_PTYPE_L4_ICMP, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + static const uint32_t inner_l2table[HNS3_L2TBL_NUM] = { + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + 0 + }; + + static const uint32_t inner_l3table[HNS3_L3TBL_NUM] = { + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + 0, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L3_IPV4_EXT, + RTE_PTYPE_INNER_L3_IPV6_EXT, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + static const uint32_t inner_l4table[HNS3_L4TBL_NUM] = { + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_L4_IGMP, + RTE_PTYPE_INNER_L4_ICMP, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + static const uint32_t ol3table[HNS3_OL3TBL_NUM] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + 0, 0, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6_EXT, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + RTE_PTYPE_UNKNOWN + }; + + static const uint32_t ol4table[HNS3_OL4TBL_NUM] = { + 0, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + l2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S); + l3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); + l4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); + ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); + ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + + if (ol4table[ol4id]) + pkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] | + inner_l4table[l4id] | ol3table[ol3id] | + ol4table[ol4id]); + else + pkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]); + return pkt_type; +} + +const uint32_t * +hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_IGMP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == hns3_recv_pkts) + return ptypes; + + return NULL; +} + +static void +hns3_clean_rx_buffers(struct hns3_rx_queue *rxq, int count) +{ + rxq->next_to_use += count; + if (rxq->next_to_use >= rxq->nb_rx_desc) + rxq->next_to_use -= rxq->nb_rx_desc; + + hns3_write_dev(rxq, HNS3_RING_RX_HEAD_REG, count); +} + +static int +hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, + uint32_t bd_base_info, uint32_t l234_info, + uint32_t *cksum_err) +{ + uint32_t tmp = 0; + + if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) { + rxq->l2_errors++; + return -EINVAL; + } + + if (unlikely(rxm->pkt_len == 0 || + (l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) { + rxq->pkt_len_errors++; + return -EINVAL; + } + + if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) { + if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { + rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + rxq->l3_csum_erros++; + tmp |= HNS3_L3_CKSUM_ERR; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { + rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; + rxq->l4_csum_erros++; + tmp |= HNS3_L4_CKSUM_ERR; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { + rxq->ol3_csum_erros++; + tmp |= HNS3_OUTER_L3_CKSUM_ERR; + } + + if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { + rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + rxq->ol4_csum_erros++; + tmp |= HNS3_OUTER_L4_CKSUM_ERR; + } + } + *cksum_err = tmp; + + return 0; +} + +static void +hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type, + const uint32_t cksum_err) +{ + if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) { + if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) && + (cksum_err & HNS3_L3_CKSUM_ERR) == 0) + rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) && + (cksum_err & HNS3_L4_CKSUM_ERR) == 0) + rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (likely(packet_type & RTE_PTYPE_L4_MASK) && + (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0) + rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + } else { + if (likely(packet_type & RTE_PTYPE_L3_MASK) && + (cksum_err & HNS3_L3_CKSUM_ERR) == 0) + rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + if (likely(packet_type & RTE_PTYPE_L4_MASK) && + (cksum_err & HNS3_L4_CKSUM_ERR) == 0) + rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } +} + +uint16_t +hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct hns3_desc *rx_ring; /* RX ring (desc) */ + volatile struct hns3_desc *rxdp; /* pointer of the current desc */ + struct hns3_rx_queue *rxq; /* RX queue */ + struct hns3_entry *sw_ring; + struct hns3_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct hns3_desc rxd; + struct rte_mbuf *nmb; /* pointer of the new mbuf */ + struct rte_mbuf *rxm; + struct rte_eth_dev *dev; + uint32_t bd_base_info; + uint32_t cksum_err; + uint32_t l234_info; + uint32_t ol_info; + uint64_t dma_addr; + uint16_t data_len; + uint16_t nb_rx_bd; + uint16_t pkt_len; + uint16_t nb_rx; + uint16_t rx_id; + int ret; + + nb_rx = 0; + nb_rx_bd = 0; + rxq = rx_queue; + dev = &rte_eth_devices[rxq->port_id]; + + rx_id = rxq->next_to_clean; + rx_ring = rxq->rx_ring; + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + sw_ring = rxq->sw_ring; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info); + if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + break; + /* + * The interactive process between software and hardware of + * receiving a new packet in hns3 network engine: + * 1. Hardware network engine firstly writes the packet content + * to the memory pointed by the 'addr' field of the Rx Buffer + * Descriptor, secondly fills the result of parsing the + * packet include the valid field into the Rx Buffer + * Descriptor in one write operation. + * 2. Driver reads the Rx BD's valid field in the loop to check + * whether it's valid, if valid then assign a new address to + * the addr field, clear the valid field, get the other + * information of the packet by parsing Rx BD's other fields, + * finally write back the number of Rx BDs processed by the + * driver to the HNS3_RING_RX_HEAD_REG register to inform + * hardware. + * In the above process, the ordering is very important. We must + * make sure that CPU read Rx BD's other fields only after the + * Rx BD is valid. + * + * There are two type of re-ordering: compiler re-ordering and + * CPU re-ordering under the ARMv8 architecture. + * 1. we use volatile to deal with compiler re-ordering, so you + * can see that rx_ring/rxdp defined with volatile. + * 2. we commonly use memory barrier to deal with CPU + * re-ordering, but the cost is high. + * + * In order to solve the high cost of using memory barrier, we + * use the data dependency order under the ARMv8 architecture, + * for example: + * instr01: load A + * instr02: load B <- A + * the instr02 will always execute after instr01. + * + * To construct the data dependency ordering, we use the + * following assignment: + * rxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) - + * (1u<mb_pool); + if (unlikely(nmb == NULL)) { + dev->data->rx_mbuf_alloc_failed++; + break; + } + + nb_rx_bd++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + rte_prefetch0(sw_ring[rx_id].mbuf); + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->rx.bd_base_info = 0; + rxdp->addr = dma_addr; + + /* Load remained descriptor data and extract necessary fields */ + data_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size)); + l234_info = rte_le_to_cpu_32(rxd.rx.l234_info); + ol_info = rte_le_to_cpu_32(rxd.rx.ol_info); + + if (first_seg == NULL) { + first_seg = rxm; + first_seg->nb_segs = 1; + } else { + first_seg->nb_segs++; + last_seg->next = rxm; + } + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->data_len = data_len; + + if (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + last_seg = rxm; + continue; + } + + /* The last buffer of the received packet */ + pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)); + first_seg->pkt_len = pkt_len; + first_seg->port = rxq->port_id; + first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); + first_seg->ol_flags = PKT_RX_RSS_HASH; + if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) { + first_seg->hash.fdir.hi = + rte_le_to_cpu_32(rxd.rx.fd_id); + first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + } + rxm->next = NULL; + + ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info, + l234_info, &cksum_err); + if (unlikely(ret)) + goto pkt_err; + + first_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info, + ol_info); + + if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) + hns3_rx_set_cksum_flag(first_seg, + first_seg->packet_type, + cksum_err); + + first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag); + first_seg->vlan_tci_outer = + rte_le_to_cpu_16(rxd.rx.ot_vlan_tag); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + continue; +pkt_err: + rte_pktmbuf_free(first_seg); + first_seg = NULL; + } + + rxq->next_to_clean = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + nb_rx_bd = nb_rx_bd + rxq->nb_rx_hold; + if (nb_rx_bd > rxq->rx_free_thresh) { + hns3_clean_rx_buffers(rxq, nb_rx_bd); + nb_rx_bd = 0; + } + rxq->nb_rx_hold = nb_rx_bd; + + return nb_rx; +} + +int +hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + unsigned int socket_id, const struct rte_eth_txconf *conf) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_queue_info q_info; + struct hns3_tx_queue *txq; + int tx_entry_len; + + if (dev->data->dev_started) { + hns3_err(hw, "tx_queue_setup after dev_start no supported"); + return -EINVAL; + } + + if (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC || + nb_desc % HNS3_ALIGN_RING_DESC) { + hns3_err(hw, "Number (%u) of tx descriptors is invalid", + nb_desc); + return -EINVAL; + } + + if (dev->data->tx_queues[idx] != NULL) { + hns3_tx_queue_release(dev->data->tx_queues[idx]); + dev->data->tx_queues[idx] = NULL; + } + + q_info.idx = idx; + q_info.socket_id = socket_id; + q_info.nb_desc = nb_desc; + q_info.type = "hns3 TX queue"; + q_info.ring_name = "tx_ring"; + txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); + if (txq == NULL) { + hns3_err(hw, + "Failed to alloc mem and reserve DMA mem for tx ring!"); + return -ENOMEM; + } + + txq->tx_deferred_start = conf->tx_deferred_start; + tx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc; + txq->sw_ring = rte_zmalloc_socket("hns3 TX sw ring", tx_entry_len, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + hns3_err(hw, "Failed to allocate memory for tx sw ring!"); + hns3_tx_queue_release(txq); + return -ENOMEM; + } + + txq->hns = hns; + txq->next_to_use = 0; + txq->next_to_clean = 0; + txq->tx_bd_ready = txq->nb_tx_desc - 1; + txq->port_id = dev->data->port_id; + txq->configured = true; + txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + + idx * HNS3_TQP_REG_SIZE); + txq->over_length_pkt_cnt = 0; + txq->exceed_limit_bd_pkt_cnt = 0; + txq->exceed_limit_bd_reassem_fail = 0; + txq->unsupported_tunnel_pkt_cnt = 0; + txq->queue_full_cnt = 0; + txq->pkt_padding_fail_cnt = 0; + rte_spinlock_lock(&hw->lock); + dev->data->tx_queues[idx] = txq; + rte_spinlock_unlock(&hw->lock); + + return 0; +} + +static inline void +hns3_queue_xmit(struct hns3_tx_queue *txq, uint32_t buf_num) +{ + hns3_write_dev(txq, HNS3_RING_TX_TAIL_REG, buf_num); +} + +static void +hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) +{ + uint16_t tx_next_clean = txq->next_to_clean; + uint16_t tx_next_use = txq->next_to_use; + uint16_t tx_bd_ready = txq->tx_bd_ready; + uint16_t tx_bd_max = txq->nb_tx_desc; + struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean]; + struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; + struct rte_mbuf *mbuf; + + while ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) && + tx_next_use != tx_next_clean) { + mbuf = tx_bak_pkt->mbuf; + if (mbuf) { + rte_pktmbuf_free_seg(mbuf); + tx_bak_pkt->mbuf = NULL; + } + + desc++; + tx_bak_pkt++; + tx_next_clean++; + tx_bd_ready++; + + if (tx_next_clean >= tx_bd_max) { + tx_next_clean = 0; + desc = txq->tx_ring; + tx_bak_pkt = txq->sw_ring; + } + } + + txq->next_to_clean = tx_next_clean; + txq->tx_bd_ready = tx_bd_ready; +} + +static int +hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags, + struct rte_mbuf *rxm, uint8_t *l2_len) +{ + uint64_t tun_flags; + uint8_t ol4_len; + uint32_t otmp; + + tun_flags = ol_flags & PKT_TX_TUNNEL_MASK; + if (tun_flags == 0) + return 0; + + otmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec); + switch (tun_flags) { + case PKT_TX_TUNNEL_GENEVE: + case PKT_TX_TUNNEL_VXLAN: + *l2_len = rxm->l2_len - RTE_ETHER_VXLAN_HLEN; + break; + case PKT_TX_TUNNEL_GRE: + /* + * OL4 header size, defined in 4 Bytes, it contains outer + * L4(GRE) length and tunneling length. + */ + ol4_len = hns3_get_field(otmp, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S); + *l2_len = rxm->l2_len - (ol4_len << HNS3_L4_LEN_UNIT); + break; + default: + /* For non UDP / GRE tunneling, drop the tunnel packet */ + return -EINVAL; + } + hns3_set_field(otmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + rxm->outer_l2_len >> HNS3_L2_LEN_UNIT); + desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(otmp); + + return 0; +} + +static void +hns3_set_tso(struct hns3_desc *desc, + uint64_t ol_flags, struct rte_mbuf *rxm) +{ + uint32_t paylen, hdr_len; + uint32_t tmp; + uint8_t l2_len = rxm->l2_len; + + if (!(ol_flags & PKT_TX_TCP_SEG)) + return; + + if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len)) + return; + + hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len; + hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? + rxm->outer_l2_len + rxm->outer_l3_len : 0; + paylen = rxm->pkt_len - hdr_len; + if (paylen <= rxm->tso_segsz) + return; + + tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len); + hns3_set_bit(tmp, HNS3_TXD_TSO_B, 1); + hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); + hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP); + hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); + hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + l2_len >> HNS3_L2_LEN_UNIT); + desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp); + desc->tx.mss = rte_cpu_to_le_16(rxm->tso_segsz); +} + +static void +fill_desc(struct hns3_tx_queue *txq, uint16_t tx_desc_id, struct rte_mbuf *rxm, + bool first, int offset) +{ + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + uint8_t frag_end = rxm->next == NULL ? 1 : 0; + uint64_t ol_flags = rxm->ol_flags; + uint16_t size = rxm->data_len; + uint16_t rrcfv = 0; + uint32_t hdr_len; + uint32_t paylen; + uint32_t tmp; + + desc->addr = rte_mbuf_data_iova(rxm) + offset; + desc->tx.send_size = rte_cpu_to_le_16(size); + hns3_set_bit(rrcfv, HNS3_TXD_VLD_B, 1); + + if (first) { + hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len; + hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? + rxm->outer_l2_len + rxm->outer_l3_len : 0; + paylen = rxm->pkt_len - hdr_len; + desc->tx.paylen = rte_cpu_to_le_32(paylen); + hns3_set_tso(desc, ol_flags, rxm); + } + + hns3_set_bit(rrcfv, HNS3_TXD_FE_B, frag_end); + desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(rrcfv); + + if (frag_end) { + if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + tmp = rte_le_to_cpu_32(desc->tx.type_cs_vlan_tso_len); + hns3_set_bit(tmp, HNS3_TXD_VLAN_B, 1); + desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp); + desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci); + } + + if (ol_flags & PKT_TX_QINQ_PKT) { + tmp = rte_le_to_cpu_32(desc->tx.ol_type_vlan_len_msec); + hns3_set_bit(tmp, HNS3_TXD_OVLAN_B, 1); + desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp); + desc->tx.outer_vlan_tag = + rte_cpu_to_le_16(rxm->vlan_tci_outer); + } + } +} + +static int +hns3_tx_alloc_mbufs(struct hns3_tx_queue *txq, struct rte_mempool *mb_pool, + uint16_t nb_new_buf, struct rte_mbuf **alloc_mbuf) +{ + struct rte_mbuf *new_mbuf = NULL; + struct rte_eth_dev *dev; + struct rte_mbuf *temp; + struct hns3_hw *hw; + uint16_t i; + + /* Allocate enough mbufs */ + for (i = 0; i < nb_new_buf; i++) { + temp = rte_pktmbuf_alloc(mb_pool); + if (unlikely(temp == NULL)) { + dev = &rte_eth_devices[txq->port_id]; + hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hns3_err(hw, "Failed to alloc TX mbuf port_id=%d," + "queue_id=%d in reassemble tx pkts.", + txq->port_id, txq->queue_id); + rte_pktmbuf_free(new_mbuf); + return -ENOMEM; + } + temp->next = new_mbuf; + new_mbuf = temp; + } + + if (new_mbuf == NULL) + return -ENOMEM; + + new_mbuf->nb_segs = nb_new_buf; + *alloc_mbuf = new_mbuf; + + return 0; +} + +static int +hns3_reassemble_tx_pkts(void *tx_queue, struct rte_mbuf *tx_pkt, + struct rte_mbuf **new_pkt) +{ + struct hns3_tx_queue *txq = tx_queue; + struct rte_mempool *mb_pool; + struct rte_mbuf *new_mbuf; + struct rte_mbuf *temp_new; + struct rte_mbuf *temp; + uint16_t last_buf_len; + uint16_t nb_new_buf; + uint16_t buf_size; + uint16_t buf_len; + uint16_t len_s; + uint16_t len_d; + uint16_t len; + uint16_t i; + int ret; + char *s; + char *d; + + mb_pool = tx_pkt->pool; + buf_size = tx_pkt->buf_len - RTE_PKTMBUF_HEADROOM; + nb_new_buf = (tx_pkt->pkt_len - 1) / buf_size + 1; + + last_buf_len = tx_pkt->pkt_len % buf_size; + if (last_buf_len == 0) + last_buf_len = buf_size; + + /* Allocate enough mbufs */ + ret = hns3_tx_alloc_mbufs(txq, mb_pool, nb_new_buf, &new_mbuf); + if (ret) + return ret; + + /* Copy the original packet content to the new mbufs */ + temp = tx_pkt; + s = rte_pktmbuf_mtod(temp, char *); + len_s = temp->data_len; + temp_new = new_mbuf; + for (i = 0; i < nb_new_buf; i++) { + d = rte_pktmbuf_mtod(temp_new, char *); + if (i < nb_new_buf - 1) + buf_len = buf_size; + else + buf_len = last_buf_len; + len_d = buf_len; + + while (len_d) { + len = RTE_MIN(len_s, len_d); + memcpy(d, s, len); + s = s + len; + d = d + len; + len_d = len_d - len; + len_s = len_s - len; + + if (len_s == 0) { + temp = temp->next; + if (temp == NULL) + break; + s = rte_pktmbuf_mtod(temp, char *); + len_s = temp->data_len; + } + } + + temp_new->data_len = buf_len; + temp_new = temp_new->next; + } + + /* free original mbufs */ + rte_pktmbuf_free(tx_pkt); + + *new_pkt = new_mbuf; + + return 0; +} + +static void +hns3_parse_outer_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec) +{ + uint32_t tmp = *ol_type_vlan_len_msec; + + /* (outer) IP header type */ + if (ol_flags & PKT_TX_OUTER_IPV4) { + /* OL3 header size, defined in 4 bytes */ + hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + hns3_set_field(tmp, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); + else + hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + } else if (ol_flags & PKT_TX_OUTER_IPV6) { + hns3_set_field(tmp, HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); + /* OL3 header size, defined in 4 bytes */ + hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); + } + + *ol_type_vlan_len_msec = tmp; +} + +static int +hns3_parse_inner_params(uint64_t ol_flags, uint32_t *ol_type_vlan_len_msec, + struct rte_net_hdr_lens *hdr_lens) +{ + uint32_t tmp = *ol_type_vlan_len_msec; + uint8_t l4_len; + + /* OL2 header size, defined in 2 bytes */ + hns3_set_field(tmp, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); + + /* L4TUNT: L4 Tunneling Type */ + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_GENEVE: + case PKT_TX_TUNNEL_VXLAN: + /* MAC in UDP tunnelling packet, include VxLAN */ + hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + /* + * OL4 header size, defined in 4 Bytes, it contains outer + * L4(UDP) length and tunneling length. + */ + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + (uint8_t)RTE_ETHER_VXLAN_HLEN >> + HNS3_L4_LEN_UNIT); + break; + case PKT_TX_TUNNEL_GRE: + hns3_set_field(tmp, HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + /* + * OL4 header size, defined in 4 Bytes, it contains outer + * L4(GRE) length and tunneling length. + */ + l4_len = hdr_lens->l4_len + hdr_lens->tunnel_len; + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + l4_len >> HNS3_L4_LEN_UNIT); + break; + default: + /* For non UDP / GRE tunneling, drop the tunnel packet */ + return -EINVAL; + } + + *ol_type_vlan_len_msec = tmp; + + return 0; +} + +static int +hns3_parse_tunneling_params(struct hns3_tx_queue *txq, uint16_t tx_desc_id, + uint64_t ol_flags, + struct rte_net_hdr_lens *hdr_lens) +{ + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + uint32_t value = 0; + int ret; + + hns3_parse_outer_params(ol_flags, &value); + ret = hns3_parse_inner_params(ol_flags, &value, hdr_lens); + if (ret) + return -EINVAL; + + desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(value); + + return 0; +} + +static void +hns3_parse_l3_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +{ + uint32_t tmp; + + /* Enable L3 checksum offloads */ + if (ol_flags & PKT_TX_IPV4) { + tmp = *type_cs_vlan_tso_len; + hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); + /* inner(/normal) L3 header size, defined in 4 bytes */ + hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + sizeof(struct rte_ipv4_hdr) >> HNS3_L3_LEN_UNIT); + if (ol_flags & PKT_TX_IP_CKSUM) + hns3_set_bit(tmp, HNS3_TXD_L3CS_B, 1); + *type_cs_vlan_tso_len = tmp; + } else if (ol_flags & PKT_TX_IPV6) { + tmp = *type_cs_vlan_tso_len; + /* L3T, IPv6 don't do checksum */ + hns3_set_field(tmp, HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); + /* inner(/normal) L3 header size, defined in 4 bytes */ + hns3_set_field(tmp, HNS3_TXD_L3LEN_M, HNS3_TXD_L3LEN_S, + sizeof(struct rte_ipv6_hdr) >> HNS3_L3_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; + } +} + +static void +hns3_parse_l4_cksum_params(uint64_t ol_flags, uint32_t *type_cs_vlan_tso_len) +{ + uint32_t tmp; + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + tmp = *type_cs_vlan_tso_len; + hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + sizeof(struct rte_tcp_hdr) >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; + break; + case PKT_TX_UDP_CKSUM: + tmp = *type_cs_vlan_tso_len; + hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + sizeof(struct rte_udp_hdr) >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; + break; + case PKT_TX_SCTP_CKSUM: + tmp = *type_cs_vlan_tso_len; + hns3_set_field(tmp, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + hns3_set_bit(tmp, HNS3_TXD_L4CS_B, 1); + hns3_set_field(tmp, HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + sizeof(struct rte_sctp_hdr) >> HNS3_L4_LEN_UNIT); + *type_cs_vlan_tso_len = tmp; + break; + default: + break; + } +} + +static void +hns3_txd_enable_checksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, + uint64_t ol_flags) +{ + struct hns3_desc *tx_ring = txq->tx_ring; + struct hns3_desc *desc = &tx_ring[tx_desc_id]; + uint32_t value = 0; + + /* inner(/normal) L2 header size, defined in 2 bytes */ + hns3_set_field(value, HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_S, + sizeof(struct rte_ether_hdr) >> HNS3_L2_LEN_UNIT); + + hns3_parse_l3_cksum_params(ol_flags, &value); + hns3_parse_l4_cksum_params(ol_flags, &value); + + desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(value); +} + +static bool +hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num) +{ + struct rte_mbuf *m_first = tx_pkts; + struct rte_mbuf *m_last = tx_pkts; + uint32_t tot_len = 0; + uint32_t hdr_len; + uint32_t i; + + /* + * Hardware requires that the sum of the data length of every 8 + * consecutive buffers is greater than MSS in hns3 network engine. + * We simplify it by ensuring pkt_headlen + the first 8 consecutive + * frags greater than gso header len + mss, and the remaining 7 + * consecutive frags greater than MSS except the last 7 frags. + */ + if (bd_num <= HNS3_MAX_NON_TSO_BD_PER_PKT) + return false; + + for (i = 0; m_last && i < HNS3_MAX_NON_TSO_BD_PER_PKT - 1; + i++, m_last = m_last->next) + tot_len += m_last->data_len; + + if (!m_last) + return true; + + /* ensure the first 8 frags is greater than mss + header */ + hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len; + hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0; + if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len) + return true; + + /* + * ensure the sum of the data length of every 7 consecutive buffer + * is greater than mss except the last one. + */ + for (i = 0; m_last && i < bd_num - HNS3_MAX_NON_TSO_BD_PER_PKT; i++) { + tot_len -= m_first->data_len; + tot_len += m_last->data_len; + + if (tot_len < tx_pkts->tso_segsz) + return true; + + m_first = m_first->next; + m_last = m_last->next; + } + + return false; +} + +static void +hns3_outer_header_cksum_prepare(struct rte_mbuf *m) +{ + uint64_t ol_flags = m->ol_flags; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_udp_hdr *udp_hdr; + uint32_t paylen, hdr_len; + + if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return; + + if (ol_flags & PKT_TX_IPV4) { + ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->outer_l2_len); + + if (ol_flags & PKT_TX_IP_CKSUM) + ipv4_hdr->hdr_checksum = 0; + } + + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM && + ol_flags & PKT_TX_TCP_SEG) { + hdr_len = m->l2_len + m->l3_len + m->l4_len; + hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? + m->outer_l2_len + m->outer_l3_len : 0; + paylen = m->pkt_len - hdr_len; + if (paylen <= m->tso_segsz) + return; + udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, + m->outer_l2_len + + m->outer_l3_len); + udp_hdr->dgram_cksum = 0; + } +} + +static inline bool +hns3_pkt_is_tso(struct rte_mbuf *m) +{ + return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG); +} + +static int +hns3_check_tso_pkt_valid(struct rte_mbuf *m) +{ + uint32_t tmp_data_len_sum = 0; + uint16_t nb_buf = m->nb_segs; + uint32_t paylen, hdr_len; + struct rte_mbuf *m_seg; + int i; + + if (nb_buf > HNS3_MAX_TSO_BD_PER_PKT) + return -EINVAL; + + hdr_len = m->l2_len + m->l3_len + m->l4_len; + hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ? + m->outer_l2_len + m->outer_l3_len : 0; + if (hdr_len > HNS3_MAX_TSO_HDR_SIZE) + return -EINVAL; + + paylen = m->pkt_len - hdr_len; + if (paylen > HNS3_MAX_BD_PAYLEN) + return -EINVAL; + + /* + * The TSO header (include outer and inner L2, L3 and L4 header) + * should be provided by three descriptors in maximum in hns3 network + * engine. + */ + m_seg = m; + for (i = 0; m_seg != NULL && i < HNS3_MAX_TSO_HDR_BD_NUM && i < nb_buf; + i++, m_seg = m_seg->next) { + tmp_data_len_sum += m_seg->data_len; + } + + if (hdr_len > tmp_data_len_sum) + return -EINVAL; + + return 0; +} + +uint16_t +hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *m; + uint16_t i; + int ret; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + /* check the size of packet */ + if (m->pkt_len < RTE_ETHER_MIN_LEN) { + rte_errno = EINVAL; + return i; + } + + if (hns3_pkt_is_tso(m) && + (hns3_pkt_need_linearized(m, m->nb_segs) || + hns3_check_tso_pkt_valid(m))) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + + hns3_outer_header_cksum_prepare(m); + } + + return i; +} + +static int +hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, + const struct rte_mbuf *m, struct rte_net_hdr_lens *hdr_lens) +{ + /* Fill in tunneling parameters if necessary */ + if (m->ol_flags & PKT_TX_TUNNEL_MASK) { + (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK); + if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags, + hdr_lens)) { + txq->unsupported_tunnel_pkt_cnt++; + return -EINVAL; + } + } + /* Enable checksum offloading */ + if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) + hns3_txd_enable_checksum(txq, tx_desc_id, m->ol_flags); + + return 0; +} + +static int +hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, + struct rte_mbuf *tx_pkt, struct hns3_tx_queue *txq) +{ + struct rte_mbuf *new_pkt; + int ret; + + if (hns3_pkt_is_tso(*m_seg)) + return 0; + + /* + * If packet length is greater than HNS3_MAX_FRAME_LEN + * driver support, the packet will be ignored. + */ + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) { + txq->over_length_pkt_cnt++; + return -EINVAL; + } + + if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) { + txq->exceed_limit_bd_pkt_cnt++; + ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt); + if (ret) { + txq->exceed_limit_bd_reassem_fail++; + return ret; + } + *m_seg = new_pkt; + } + + return 0; +} + +uint16_t +hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_net_hdr_lens hdr_lens = {0}; + struct hns3_tx_queue *txq = tx_queue; + struct hns3_entry *tx_bak_pkt; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint32_t nb_hold = 0; + uint16_t tx_next_use; + uint16_t tx_pkt_num; + uint16_t tx_bd_max; + uint16_t nb_buf; + uint16_t nb_tx; + uint16_t i; + + /* free useless buffer */ + hns3_tx_free_useless_buffer(txq); + + tx_next_use = txq->next_to_use; + tx_bd_max = txq->nb_tx_desc; + tx_pkt_num = nb_pkts; + + /* send packets */ + tx_bak_pkt = &txq->sw_ring[tx_next_use]; + for (nb_tx = 0; nb_tx < tx_pkt_num; nb_tx++) { + tx_pkt = *tx_pkts++; + + nb_buf = tx_pkt->nb_segs; + + if (nb_buf > txq->tx_bd_ready) { + txq->queue_full_cnt++; + if (nb_tx == 0) + return 0; + + goto end_of_tx; + } + + /* + * If packet length is less than minimum packet size, driver + * need to pad it. + */ + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) { + uint16_t add_len; + char *appended; + + add_len = HNS3_MIN_PKT_SIZE - + rte_pktmbuf_pkt_len(tx_pkt); + appended = rte_pktmbuf_append(tx_pkt, add_len); + if (appended == NULL) { + txq->pkt_padding_fail_cnt++; + break; + } + + memset(appended, 0, add_len); + } + + m_seg = tx_pkt; + + if (hns3_check_non_tso_pkt(nb_buf, &m_seg, tx_pkt, txq)) + goto end_of_tx; + + if (hns3_parse_cksum(txq, tx_next_use, m_seg, &hdr_lens)) + goto end_of_tx; + + i = 0; + do { + fill_desc(txq, tx_next_use, m_seg, (i == 0), 0); + tx_bak_pkt->mbuf = m_seg; + m_seg = m_seg->next; + tx_next_use++; + tx_bak_pkt++; + if (tx_next_use >= tx_bd_max) { + tx_next_use = 0; + tx_bak_pkt = txq->sw_ring; + } + + i++; + } while (m_seg != NULL); + + nb_hold += i; + txq->next_to_use = tx_next_use; + txq->tx_bd_ready -= i; + } + +end_of_tx: + + if (likely(nb_tx)) + hns3_queue_xmit(txq, nb_hold); + + return nb_tx; +} + +static uint16_t +hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) +{ + return 0; +} + +void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + + if (hns->hw.adapter_state == HNS3_NIC_STARTED && + rte_atomic16_read(&hns->hw.reset.resetting) == 0) { + eth_dev->rx_pkt_burst = hns3_recv_pkts; + eth_dev->tx_pkt_burst = hns3_xmit_pkts; + eth_dev->tx_pkt_prepare = hns3_prep_pkts; + } else { + eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst; + eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst; + eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst; + } +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.h b/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.h new file mode 100644 index 000000000..0cb92ce9b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_rxtx.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_RXTX_H_ +#define _HNS3_RXTX_H_ + +#define HNS3_MIN_RING_DESC 64 +#define HNS3_MAX_RING_DESC 32768 +#define HNS3_DEFAULT_RING_DESC 1024 +#define HNS3_ALIGN_RING_DESC 32 +#define HNS3_RING_BASE_ALIGN 128 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_STRP_TAGP_S 13 +#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) + +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 +#define HNS3_RXD_GRO_COUNT_S 24 +#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) +#define HNS3_RXD_GRO_FIXID_B 30 +#define HNS3_RXD_GRO_ECN_B 31 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_GRO_SIZE_S 16 +#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S) + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_L2_LEN_UNIT 1UL +#define HNS3_L3_LEN_UNIT 2UL +#define HNS3_L4_LEN_UNIT 2UL + +enum hns3_pkt_l2t_type { + HNS3_L2_TYPE_UNICAST, + HNS3_L2_TYPE_MULTICAST, + HNS3_L2_TYPE_BROADCAST, + HNS3_L2_TYPE_INVALID, +}; + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct hns3_desc { + union { + uint64_t addr; + struct { + uint32_t addr0; + uint32_t addr1; + }; + }; + union { + struct { + uint16_t vlan_tag; + uint16_t send_size; + union { + /* + * L3T | L4T | L3CS | L4CS | VLAN | TSO | + * L2_LEN + */ + uint32_t type_cs_vlan_tso_len; + struct { + uint8_t type_cs_vlan_tso; + uint8_t l2_len; + uint8_t l3_len; + uint8_t l4_len; + }; + }; + uint16_t outer_vlan_tag; + uint16_t tv; + union { + /* OL3T | OVALAN | MACSEC */ + uint32_t ol_type_vlan_len_msec; + struct { + uint8_t ol_type_vlan_msec; + uint8_t ol2_len; + uint8_t ol3_len; + uint8_t ol4_len; + }; + }; + + uint32_t paylen; + uint16_t tp_fe_sc_vld_ra_ri; + uint16_t mss; + } tx; + + struct { + uint32_t l234_info; + uint16_t pkt_len; + uint16_t size; + uint32_t rss_hash; + uint16_t fd_id; + uint16_t vlan_tag; + union { + uint32_t ol_info; + struct { + uint16_t o_dm_vlan_id_fb; + uint16_t ot_vlan_tag; + }; + }; + uint32_t bd_base_info; + } rx; + }; +} __rte_packed; + +struct hns3_entry { + struct rte_mbuf *mbuf; +}; + +struct hns3_rx_queue { + void *io_base; + struct hns3_adapter *hns; + struct rte_mempool *mb_pool; + struct hns3_desc *rx_ring; + uint64_t rx_ring_phys_addr; /* RX ring DMA address */ + const struct rte_memzone *mz; + struct hns3_entry *sw_ring; + + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + + uint16_t queue_id; + uint16_t port_id; + uint16_t nb_rx_desc; + uint16_t nb_rx_hold; + uint16_t rx_tail; + uint16_t next_to_clean; + uint16_t next_to_use; + uint16_t rx_buf_len; + uint16_t rx_free_thresh; + + bool rx_deferred_start; /* don't start this queue in dev start */ + bool configured; /* indicate if rx queue has been configured */ + + uint64_t l2_errors; + uint64_t pkt_len_errors; + uint64_t l3_csum_erros; + uint64_t l4_csum_erros; + uint64_t ol3_csum_erros; + uint64_t ol4_csum_erros; +}; + +struct hns3_tx_queue { + void *io_base; + struct hns3_adapter *hns; + struct hns3_desc *tx_ring; + uint64_t tx_ring_phys_addr; /* TX ring DMA address */ + const struct rte_memzone *mz; + struct hns3_entry *sw_ring; + + uint16_t queue_id; + uint16_t port_id; + uint16_t nb_tx_desc; + uint16_t next_to_clean; + uint16_t next_to_use; + uint16_t tx_bd_ready; + + bool tx_deferred_start; /* don't start this queue in dev start */ + bool configured; /* indicate if tx queue has been configured */ + + /* + * The following items are used for the abnormal errors statistics in + * the Tx datapath. When upper level application calls the + * rte_eth_tx_burst API function to send multiple packets at a time with + * burst mode based on hns3 network engine, there are some abnormal + * conditions that cause the driver to fail to operate the hardware to + * send packets correctly. + * Note: When using burst mode to call the rte_eth_tx_burst API function + * to send multiple packets at a time. When the first abnormal error is + * detected, add one to the relevant error statistics item, and then + * exit the loop of sending multiple packets of the function. That is to + * say, even if there are multiple packets in which abnormal errors may + * be detected in the burst, the relevant error statistics in the driver + * will only be increased by one. + * The detail description of the Tx abnormal errors statistic items as + * below: + * - over_length_pkt_cnt + * Total number of greater than HNS3_MAX_FRAME_LEN the driver + * supported. + * + * - exceed_limit_bd_pkt_cnt + * Total number of exceeding the hardware limited bd which process + * a packet needed bd numbers. + * + * - exceed_limit_bd_reassem_fail + * Total number of exceeding the hardware limited bd fail which + * process a packet needed bd numbers and reassemble fail. + * + * - unsupported_tunnel_pkt_cnt + * Total number of unsupported tunnel packet. The unsupported tunnel + * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet + * with MPLS-in-UDP RFC 7510 header. + * + * - queue_full_cnt + * Total count which the available bd numbers in current bd queue is + * less than the bd numbers with the pkt process needed. + * + * - pkt_padding_fail_cnt + * Total count which the packet length is less than minimum packet + * size HNS3_MIN_PKT_SIZE and fail to be appended with 0. + */ + uint64_t over_length_pkt_cnt; + uint64_t exceed_limit_bd_pkt_cnt; + uint64_t exceed_limit_bd_reassem_fail; + uint64_t unsupported_tunnel_pkt_cnt; + uint64_t queue_full_cnt; + uint64_t pkt_padding_fail_cnt; +}; + +struct hns3_queue_info { + const char *type; /* point to queue memory name */ + const char *ring_name; /* point to hardware ring name */ + uint16_t idx; + uint16_t nb_desc; + unsigned int socket_id; +}; + +#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TUNNEL_MASK) + +enum hns3_cksum_status { + HNS3_CKSUM_NONE = 0, + HNS3_L3_CKSUM_ERR = 1, + HNS3_L4_CKSUM_ERR = 2, + HNS3_OUTER_L3_CKSUM_ERR = 4, + HNS3_OUTER_L4_CKSUM_ERR = 8 +}; + +void hns3_dev_rx_queue_release(void *queue); +void hns3_dev_tx_queue_release(void *queue); +void hns3_free_all_queues(struct rte_eth_dev *dev); +int hns3_reset_all_queues(struct hns3_adapter *hns); +void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en); +int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +void hns3_enable_all_queues(struct hns3_hw *hw, bool en); +int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue); +int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue); +void hns3_dev_release_mbufs(struct hns3_adapter *hns); +int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + unsigned int socket, const struct rte_eth_txconf *conf); +uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); +void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, + uint8_t gl_idx, uint16_t gl_value); +void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, + uint16_t rl_value); +int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q); + +#endif /* _HNS3_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_stats.c b/src/spdk/dpdk/drivers/net/hns3/hns3_stats.c new file mode 100644 index 000000000..d2467a484 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_stats.c @@ -0,0 +1,1010 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "hns3_ethdev.h" +#include "hns3_rxtx.h" +#include "hns3_logs.h" +#include "hns3_regs.h" + +/* MAC statistics */ +static const struct hns3_xstats_name_offset hns3_mac_strings[] = { + {"mac_tx_mac_pause_num", + HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", + HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)}, + {"mac_tx_control_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)}, + {"mac_tx_pfc_pri0_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)}, + {"mac_tx_total_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", + HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", + HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", + HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)}, + {"mac_tx_oversize_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_2047_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)}, + {"mac_tx_2048_4095_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)}, + {"mac_tx_4096_8191_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)}, + {"mac_tx_8192_9216_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)}, + {"mac_tx_9217_12287_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)}, + {"mac_tx_12288_16383_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)}, + {"mac_tx_1519_max_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)}, + {"mac_tx_1519_max_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)}, + {"mac_rx_total_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", + HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", + HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", + HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)}, + {"mac_rx_oversize_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_2047_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)}, + {"mac_rx_2048_4095_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)}, + {"mac_rx_4096_8191_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)}, + {"mac_rx_8192_9216_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)}, + {"mac_rx_9217_12287_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)}, + {"mac_rx_12288_16383_oct_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)}, + {"mac_rx_1519_max_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)}, + {"mac_rx_1519_max_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)}, + {"mac_tx_fragment_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)}, + {"mac_tx_undermin_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)}, + {"mac_tx_jabber_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)}, + {"mac_tx_err_all_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)}, + {"mac_tx_from_app_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)}, + {"mac_tx_from_app_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)}, + {"mac_rx_fragment_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)}, + {"mac_rx_undermin_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)}, + {"mac_rx_jabber_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)}, + {"mac_rx_fcs_err_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)}, + {"mac_rx_send_app_good_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)}, + {"mac_rx_send_app_bad_pkt_num", + HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)} +}; + +static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = { + {"MAC_AFIFO_TNL_INT_R", + HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_intr_cnt)}, + {"PPU_MPF_ABNORMAL_INT_ST2", + HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_intr_st2_cnt)}, + {"SSU_PORT_BASED_ERR_INT", + HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_intr_cnt)}, + {"PPP_PF_ABNORMAL_INT_ST0", + HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_intr_cnt)}, + {"PPU_PF_ABNORMAL_INT_ST", + HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_intr_cnt)} +}; + +/* The statistic of reset */ +static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = { + {"REQ_RESET_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)}, + {"GLOBAL_RESET_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)}, + {"IMP_RESET_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)}, + {"RESET_EXEC_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)}, + {"RESET_SUCCESS_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)}, + {"RESET_FAIL_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)}, + {"RESET_MERGE_CNT", + HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)} +}; + +/* The statistic of errors in Rx BD */ +static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = { + {"RX_PKT_LEN_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)}, + {"L2_RX_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}, + {"RX_L3_CHECKSUM_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_erros)}, + {"RX_L4_CHECKSUM_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_erros)}, + {"RX_OL3_CHECKSUM_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_erros)}, + {"RX_OL4_CHECKSUM_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_erros)} +}; + +/* The statistic of the Tx errors */ +static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = { + {"TX_OVER_LENGTH_PKT_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)}, + {"TX_EXCEED_LIMITED_BD_PKT_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)}, + {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)}, + {"TX_UNSUPPORTED_TUNNEL_PKT_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)}, + {"TX_QUEUE_FULL_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)}, + {"TX_SHORT_PKT_PAD_FAIL_CNT", + HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)} +}; + +/* The statistic of rx queue */ +static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = { + {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG} +}; + +/* The statistic of tx queue */ +static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { + {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG} +}; + +#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \ + sizeof(hns3_mac_strings[0])) + +#define HNS3_NUM_ERROR_INT_XSTATS (sizeof(hns3_error_int_stats_strings) / \ + sizeof(hns3_error_int_stats_strings[0])) + +#define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \ + sizeof(hns3_reset_stats_strings[0])) + +#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \ + sizeof(hns3_rx_bd_error_strings[0])) + +#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \ + sizeof(hns3_tx_errors_strings[0])) + +#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \ + sizeof(hns3_rx_queue_strings[0])) + +#define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \ + sizeof(hns3_tx_queue_strings[0])) + +#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \ + HNS3_NUM_RESET_XSTATS) + +/* + * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034. + * This command is used before send 'query_mac_stat command', the descriptor + * number of 'query_mac_stat command' must match with reg_num in this command. + * @praram hw + * Pointer to structure hns3_hw. + * @return + * 0 on success. + */ +static int +hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num) +{ + uint64_t *data = (uint64_t *)(&hw->mac_stats); + struct hns3_cmd_desc *desc; + uint64_t *desc_data; + uint16_t i, k, n; + int ret; + + desc = rte_malloc("hns3_mac_desc", + desc_num * sizeof(struct hns3_cmd_desc), 0); + if (desc == NULL) { + hns3_err(hw, "Mac_update_stats alloced desc malloc fail"); + return -ENOMEM; + } + + hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true); + ret = hns3_cmd_send(hw, desc, desc_num); + if (ret) { + hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret); + rte_free(desc); + return ret; + } + + for (i = 0; i < desc_num; i++) { + /* For special opcode 0034, only the first desc has the head */ + if (i == 0) { + desc_data = (uint64_t *)(&desc[i].data[0]); + n = HNS3_RD_FIRST_STATS_NUM; + } else { + desc_data = (uint64_t *)(&desc[i]); + n = HNS3_RD_OTHER_STATS_NUM; + } + + for (k = 0; k < n; k++) { + *data += rte_le_to_cpu_64(*desc_data); + data++; + desc_data++; + } + } + rte_free(desc); + + return 0; +} + +/* + * Query Mac stat reg num command ,opcode id: 0x0033. + * This command is used before send 'query_mac_stat command', the descriptor + * number of 'query_mac_stat command' must match with reg_num in this command. + * @praram rte_stats + * Pointer to structure rte_eth_stats. + * @return + * 0 on success. + */ +static int +hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_cmd_desc desc; + uint32_t *desc_data; + uint32_t reg_num; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + return ret; + + /* + * The num of MAC statistics registers that are provided by IMP in this + * version. + */ + desc_data = (uint32_t *)(&desc.data[0]); + reg_num = rte_le_to_cpu_32(*desc_data); + + /* + * The descriptor number of 'query_additional_mac_stat command' is + * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)'; + * This value is 83 in this version + */ + *desc_num = 1 + ((reg_num - 3) >> 2) + + (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0); + + return 0; +} + +static int +hns3_query_update_mac_stats(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t desc_num; + int ret; + + ret = hns3_mac_query_reg_num(dev, &desc_num); + if (ret == 0) + ret = hns3_update_mac_stats(hw, desc_num); + else + hns3_err(hw, "Query mac reg num fail : %d", ret); + return ret; +} + +/* Get tqp stats from register */ +static int +hns3_update_tqp_stats(struct hns3_hw *hw) +{ + struct hns3_tqp_stats *stats = &hw->tqp_stats; + struct hns3_cmd_desc desc; + uint64_t cnt; + uint16_t i; + int ret; + + for (i = 0; i < hw->tqps_num; i++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS, + true); + + desc.data[0] = rte_cpu_to_le_32((uint32_t)i & + HNS3_QUEUE_ID_MASK); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Failed to query RX No.%d queue stat: %d", + i, ret); + return ret; + } + cnt = rte_le_to_cpu_32(desc.data[1]); + stats->rcb_rx_ring_pktnum_rcd += cnt; + stats->rcb_rx_ring_pktnum[i] += cnt; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS, + true); + + desc.data[0] = rte_cpu_to_le_32((uint32_t)i & + HNS3_QUEUE_ID_MASK); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "Failed to query TX No.%d queue stat: %d", + i, ret); + return ret; + } + cnt = rte_le_to_cpu_32(desc.data[1]); + stats->rcb_tx_ring_pktnum_rcd += cnt; + stats->rcb_tx_ring_pktnum[i] += cnt; + } + + return 0; +} + +/* + * Query tqp tx queue statistics ,opcode id: 0x0B03. + * Query tqp rx queue statistics ,opcode id: 0x0B13. + * Get all statistics of a port. + * @param eth_dev + * Pointer to Ethernet device. + * @praram rte_stats + * Pointer to structure rte_eth_stats. + * @return + * 0 on success. + */ +int +hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_tqp_stats *stats = &hw->tqp_stats; + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint64_t cnt; + uint64_t num; + uint16_t i; + int ret; + + /* Update tqp stats by read register */ + ret = hns3_update_tqp_stats(hw); + if (ret) { + hns3_err(hw, "Update tqp stats fail : %d", ret); + return ret; + } + + /* Get the error stats of received packets */ + num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues); + for (i = 0; i != num; ++i) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq) { + cnt = rxq->l2_errors + rxq->pkt_len_errors; + rte_stats->q_errors[i] = cnt; + rte_stats->q_ipackets[i] = + stats->rcb_rx_ring_pktnum[i] - cnt; + rte_stats->ierrors += cnt; + } + } + /* Get the error stats of transmitted packets */ + num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues); + for (i = 0; i < num; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq) + rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i]; + } + + rte_stats->oerrors = 0; + rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - + rte_stats->ierrors; + rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd - + rte_stats->oerrors; + rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed; + + return 0; +} + +int +hns3_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_tqp_stats *stats = &hw->tqp_stats; + struct hns3_cmd_desc desc_reset; + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i; + int ret; + + /* + * If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + for (i = 0; i < hw->tqps_num; i++) { + hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS, + true); + desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i & + HNS3_QUEUE_ID_MASK); + ret = hns3_cmd_send(hw, &desc_reset, 1); + if (ret) { + hns3_err(hw, "Failed to reset RX No.%d queue stat: %d", + i, ret); + return ret; + } + + hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS, + true); + desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i & + HNS3_QUEUE_ID_MASK); + ret = hns3_cmd_send(hw, &desc_reset, 1); + if (ret) { + hns3_err(hw, "Failed to reset TX No.%d queue stat: %d", + i, ret); + return ret; + } + } + + /* Clear the Rx BD errors stats */ + for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq) { + rxq->pkt_len_errors = 0; + rxq->l2_errors = 0; + rxq->l3_csum_erros = 0; + rxq->l4_csum_erros = 0; + rxq->ol3_csum_erros = 0; + rxq->ol4_csum_erros = 0; + } + } + + /* Clear the Tx errors stats */ + for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) { + txq = eth_dev->data->tx_queues[i]; + if (txq) { + txq->over_length_pkt_cnt = 0; + txq->exceed_limit_bd_pkt_cnt = 0; + txq->exceed_limit_bd_reassem_fail = 0; + txq->unsupported_tunnel_pkt_cnt = 0; + txq->queue_full_cnt = 0; + txq->pkt_padding_fail_cnt = 0; + } + } + + memset(stats, 0, sizeof(struct hns3_tqp_stats)); + + return 0; +} + +static int +hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_mac_stats *mac_stats = &hw->mac_stats; + int ret; + + ret = hns3_query_update_mac_stats(dev); + if (ret) { + hns3_err(hw, "Clear Mac stats fail : %d", ret); + return ret; + } + + memset(mac_stats, 0, sizeof(struct hns3_mac_stats)); + + return 0; +} + +/* This function calculates the number of xstats based on the current config */ +static int +hns3_xstats_calc_num(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS; + int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS; + int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS; + int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS; + + if (hns->is_vf) + return bderr_stats + tx_err_stats + rx_queue_stats + + tx_queue_stats + HNS3_NUM_RESET_XSTATS; + else + return bderr_stats + tx_err_stats + rx_queue_stats + + tx_queue_stats + HNS3_FIX_NUM_STATS; +} + +static void +hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint32_t reg_offset; + uint16_t i, j; + + /* Get rx queue stats */ + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) { + reg_offset = HNS3_TQP_REG_OFFSET + + HNS3_TQP_REG_SIZE * j; + xstats[*count].value = hns3_read_dev(hw, + reg_offset + hns3_rx_queue_strings[i].offset); + xstats[*count].id = *count; + (*count)++; + } + } + + /* Get tx queue stats */ + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) { + reg_offset = HNS3_TQP_REG_OFFSET + + HNS3_TQP_REG_SIZE * j; + xstats[*count].value = hns3_read_dev(hw, + reg_offset + hns3_tx_queue_strings[i].offset); + xstats[*count].id = *count; + (*count)++; + } + } + +} + +/* + * Retrieve extended(tqp | Mac) statistics of an Ethernet device. + * @param dev + * Pointer to Ethernet device. + * @praram xstats + * A pointer to a table of structure of type *rte_eth_xstat* + * to be filled with device statistics ids and values. + * This parameter can be set to NULL if n is 0. + * @param n + * The size of the xstats array (number of elements). + * @return + * 0 on fail, count(The size of the statistics elements) on success. + */ +int +hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + struct hns3_mac_stats *mac_stats = &hw->mac_stats; + struct hns3_reset_stats *reset_stats = &hw->reset.stats; + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i, j; + char *addr; + int count; + int ret; + + if (xstats == NULL) + return 0; + + count = hns3_xstats_calc_num(dev); + if ((int)n < count) + return count; + + count = 0; + + if (!hns->is_vf) { + /* Update Mac stats */ + ret = hns3_query_update_mac_stats(dev); + if (ret) { + hns3_err(hw, "Update Mac stats fail : %d", ret); + return 0; + } + + /* Get MAC stats from hw->hw_xstats.mac_stats struct */ + for (i = 0; i < HNS3_NUM_MAC_STATS; i++) { + addr = (char *)mac_stats + hns3_mac_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + + for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) { + addr = (char *)&pf->abn_int_stats + + hns3_error_int_stats_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + } + + /* Get the reset stat */ + for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) { + addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + + /* Get the Rx BD errors stats */ + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { + rxq = dev->data->rx_queues[j]; + addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + } + + /* Get the Tx errors stats */ + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { + txq = dev->data->tx_queues[j]; + addr = (char *)txq + hns3_tx_errors_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + } + + hns3_get_queue_stats(dev, xstats, &count); + return count; +} + +/* + * Retrieve names of extended statistics of an Ethernet device. + * + * There is an assumption that 'xstat_names' and 'xstats' arrays are matched + * by array index: + * xstats_names[i].name => xstats[i].value + * + * And the array index is same with id field of 'struct rte_eth_xstat': + * xstats[i].id == i + * + * This assumption makes key-value pair matching less flexible but simpler. + * + * @param dev + * Pointer to Ethernet device. + * @param xstats_names + * An rte_eth_xstat_name array of at least *size* elements to + * be filled. If set to NULL, the function returns the required number + * of elements. + * @param size + * The size of the xstats_names array (number of elements). + * @return + * - A positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + */ +int +hns3_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size) +{ + struct hns3_adapter *hns = dev->data->dev_private; + int cnt_stats = hns3_xstats_calc_num(dev); + uint32_t count = 0; + uint16_t i, j; + + if (xstats_names == NULL) + return cnt_stats; + + /* Note: size limited checked in rte_eth_xstats_get_names() */ + if (!hns->is_vf) { + /* Get MAC name from hw->hw_xstats.mac_stats struct */ + for (i = 0; i < HNS3_NUM_MAC_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hns3_mac_strings[i].name); + count++; + } + + for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hns3_error_int_stats_strings[i].name); + count++; + } + } + for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hns3_reset_stats_strings[i].name); + count++; + } + + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u%s", j, + hns3_rx_bd_error_strings[i].name); + count++; + } + } + + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u%s", j, + hns3_tx_errors_strings[i].name); + count++; + } + } + + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u%s", j, hns3_rx_queue_strings[i].name); + count++; + } + } + + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u%s", j, hns3_tx_queue_strings[i].name); + count++; + } + } + + return count; +} + +/* + * Retrieve extended statistics of an Ethernet device. + * + * @param dev + * Pointer to Ethernet device. + * @param ids + * A pointer to an ids array passed by application. This tells which + * statistics values function should retrieve. This parameter + * can be set to NULL if size is 0. In this case function will retrieve + * all avalible statistics. + * @param values + * A pointer to a table to be filled with device statistics values. + * @param size + * The size of the ids array (number of elements). + * @return + * - A positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + * - A positive value higher than size: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + * - 0 on no ids. + */ +int +hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, uint32_t size) +{ + const uint32_t cnt_stats = hns3_xstats_calc_num(dev); + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_xstat *values_copy; + struct hns3_hw *hw = &hns->hw; + uint32_t count_value; + uint64_t len; + uint32_t i; + int ret; + + if (ids == NULL || size < cnt_stats) + return cnt_stats; + + /* Update tqp stats by read register */ + ret = hns3_update_tqp_stats(hw); + if (ret) { + hns3_err(hw, "Update tqp stats fail : %d", ret); + return ret; + } + + len = cnt_stats * sizeof(struct rte_eth_xstat); + values_copy = rte_zmalloc("hns3_xstats_values", len, 0); + if (values_copy == NULL) { + hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed " + "to store statistics values", len); + return -ENOMEM; + } + + count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats); + if (count_value != cnt_stats) { + rte_free(values_copy); + return -EINVAL; + } + + for (i = 0; i < size; i++) { + if (ids[i] >= cnt_stats) { + hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, " + "should < %u", i, ids[i], cnt_stats); + rte_free(values_copy); + return -EINVAL; + } + memcpy(&values[i], &values_copy[ids[i]].value, + sizeof(values[i])); + } + + rte_free(values_copy); + return size; +} + +/* + * Retrieve names of extended statistics of an Ethernet device. + * + * @param dev + * Pointer to Ethernet device. + * @param xstats_names + * An rte_eth_xstat_name array of at least *size* elements to + * be filled. If set to NULL, the function returns the required number + * of elements. + * @param ids + * IDs array given by app to retrieve specific statistics + * @param size + * The size of the xstats_names array (number of elements). + * @return + * - A positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + * - A positive value higher than size: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + */ +int +hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, uint32_t size) +{ + const uint32_t cnt_stats = hns3_xstats_calc_num(dev); + struct hns3_adapter *hns = dev->data->dev_private; + struct rte_eth_xstat_name *names_copy; + struct hns3_hw *hw = &hns->hw; + uint64_t len; + uint32_t i; + + if (ids == NULL || xstats_names == NULL) + return cnt_stats; + + len = cnt_stats * sizeof(struct rte_eth_xstat_name); + names_copy = rte_zmalloc("hns3_xstats_names", len, 0); + if (names_copy == NULL) { + hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed " + "to store statistics names", len); + return -ENOMEM; + } + + (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats); + + for (i = 0; i < size; i++) { + if (ids[i] >= cnt_stats) { + hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, " + "should < %u", i, ids[i], cnt_stats); + rte_free(names_copy); + return -EINVAL; + } + snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), + "%s", names_copy[ids[i]].name); + } + + rte_free(names_copy); + return size; +} + +int +hns3_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_pf *pf = &hns->pf; + int ret; + + /* Clear tqp stats */ + ret = hns3_stats_reset(dev); + if (ret) + return ret; + + /* Clear reset stats */ + memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats)); + + if (hns->is_vf) + return 0; + + /* HW registers are cleared on read */ + ret = hns3_mac_stats_reset(dev); + if (ret) + return ret; + + /* Clear error stats */ + memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats)); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/hns3/hns3_stats.h b/src/spdk/dpdk/drivers/net/hns3/hns3_stats.h new file mode 100644 index 000000000..0993c5f57 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/hns3_stats.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2019 Hisilicon Limited. + */ + +#ifndef _HNS3_STATS_H_ +#define _HNS3_STATS_H_ + +/* stats macro */ +#define HNS3_MAC_CMD_NUM 21 +#define HNS3_RD_FIRST_STATS_NUM 2 +#define HNS3_RD_OTHER_STATS_NUM 4 + +/* TQP stats */ +struct hns3_tqp_stats { + uint64_t rcb_tx_ring_pktnum_rcd; /* Total num of transmitted packets */ + uint64_t rcb_rx_ring_pktnum_rcd; /* Total num of received packets */ + uint64_t rcb_tx_ring_pktnum[HNS3_MAX_TQP_NUM_PER_FUNC]; + uint64_t rcb_rx_ring_pktnum[HNS3_MAX_TQP_NUM_PER_FUNC]; +}; + +/* mac stats, Statistics counters collected by the MAC, opcode id: 0x0032 */ +struct hns3_mac_stats { + uint64_t mac_tx_mac_pause_num; + uint64_t mac_rx_mac_pause_num; + uint64_t mac_tx_pfc_pri0_pkt_num; + uint64_t mac_tx_pfc_pri1_pkt_num; + uint64_t mac_tx_pfc_pri2_pkt_num; + uint64_t mac_tx_pfc_pri3_pkt_num; + uint64_t mac_tx_pfc_pri4_pkt_num; + uint64_t mac_tx_pfc_pri5_pkt_num; + uint64_t mac_tx_pfc_pri6_pkt_num; + uint64_t mac_tx_pfc_pri7_pkt_num; + uint64_t mac_rx_pfc_pri0_pkt_num; + uint64_t mac_rx_pfc_pri1_pkt_num; + uint64_t mac_rx_pfc_pri2_pkt_num; + uint64_t mac_rx_pfc_pri3_pkt_num; + uint64_t mac_rx_pfc_pri4_pkt_num; + uint64_t mac_rx_pfc_pri5_pkt_num; + uint64_t mac_rx_pfc_pri6_pkt_num; + uint64_t mac_rx_pfc_pri7_pkt_num; + uint64_t mac_tx_total_pkt_num; + uint64_t mac_tx_total_oct_num; + uint64_t mac_tx_good_pkt_num; + uint64_t mac_tx_bad_pkt_num; + uint64_t mac_tx_good_oct_num; + uint64_t mac_tx_bad_oct_num; + uint64_t mac_tx_uni_pkt_num; + uint64_t mac_tx_multi_pkt_num; + uint64_t mac_tx_broad_pkt_num; + uint64_t mac_tx_undersize_pkt_num; + uint64_t mac_tx_oversize_pkt_num; + uint64_t mac_tx_64_oct_pkt_num; + uint64_t mac_tx_65_127_oct_pkt_num; + uint64_t mac_tx_128_255_oct_pkt_num; + uint64_t mac_tx_256_511_oct_pkt_num; + uint64_t mac_tx_512_1023_oct_pkt_num; + uint64_t mac_tx_1024_1518_oct_pkt_num; + uint64_t mac_tx_1519_2047_oct_pkt_num; + uint64_t mac_tx_2048_4095_oct_pkt_num; + uint64_t mac_tx_4096_8191_oct_pkt_num; + uint64_t rsv0; + uint64_t mac_tx_8192_9216_oct_pkt_num; + uint64_t mac_tx_9217_12287_oct_pkt_num; + uint64_t mac_tx_12288_16383_oct_pkt_num; + uint64_t mac_tx_1519_max_good_oct_pkt_num; + uint64_t mac_tx_1519_max_bad_oct_pkt_num; + + uint64_t mac_rx_total_pkt_num; + uint64_t mac_rx_total_oct_num; + uint64_t mac_rx_good_pkt_num; + uint64_t mac_rx_bad_pkt_num; + uint64_t mac_rx_good_oct_num; + uint64_t mac_rx_bad_oct_num; + uint64_t mac_rx_uni_pkt_num; + uint64_t mac_rx_multi_pkt_num; + uint64_t mac_rx_broad_pkt_num; + uint64_t mac_rx_undersize_pkt_num; + uint64_t mac_rx_oversize_pkt_num; + uint64_t mac_rx_64_oct_pkt_num; + uint64_t mac_rx_65_127_oct_pkt_num; + uint64_t mac_rx_128_255_oct_pkt_num; + uint64_t mac_rx_256_511_oct_pkt_num; + uint64_t mac_rx_512_1023_oct_pkt_num; + uint64_t mac_rx_1024_1518_oct_pkt_num; + uint64_t mac_rx_1519_2047_oct_pkt_num; + uint64_t mac_rx_2048_4095_oct_pkt_num; + uint64_t mac_rx_4096_8191_oct_pkt_num; + uint64_t rsv1; + uint64_t mac_rx_8192_9216_oct_pkt_num; + uint64_t mac_rx_9217_12287_oct_pkt_num; + uint64_t mac_rx_12288_16383_oct_pkt_num; + uint64_t mac_rx_1519_max_good_oct_pkt_num; + uint64_t mac_rx_1519_max_bad_oct_pkt_num; + + uint64_t mac_tx_fragment_pkt_num; + uint64_t mac_tx_undermin_pkt_num; + uint64_t mac_tx_jabber_pkt_num; + uint64_t mac_tx_err_all_pkt_num; + uint64_t mac_tx_from_app_good_pkt_num; + uint64_t mac_tx_from_app_bad_pkt_num; + uint64_t mac_rx_fragment_pkt_num; + uint64_t mac_rx_undermin_pkt_num; + uint64_t mac_rx_jabber_pkt_num; + uint64_t mac_rx_fcs_err_pkt_num; + uint64_t mac_rx_send_app_good_pkt_num; + uint64_t mac_rx_send_app_bad_pkt_num; + uint64_t mac_tx_pfc_pause_pkt_num; + uint64_t mac_rx_pfc_pause_pkt_num; + uint64_t mac_tx_ctrl_pkt_num; + uint64_t mac_rx_ctrl_pkt_num; +}; + +/* store statistics names and its offset in stats structure */ +struct hns3_xstats_name_offset { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset; +}; + +#define HNS3_MAC_STATS_OFFSET(f) \ + (offsetof(struct hns3_mac_stats, f)) + +#define HNS3_ERR_INT_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_err_msix_intr_stats, f)) + +struct hns3_reset_stats; +#define HNS3_RESET_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_reset_stats, f)) + +#define HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_rx_queue, f)) + +#define HNS3_TX_ERROR_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_tx_queue, f)) + +int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); +int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n); +int hns3_dev_xstats_reset(struct rte_eth_dev *dev); +int hns3_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size); +int hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, + __rte_unused const uint64_t *ids, + __rte_unused uint64_t *values, + uint32_t size); +int hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + uint32_t size); +int hns3_stats_reset(struct rte_eth_dev *dev); +#endif /* _HNS3_STATS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/hns3/meson.build b/src/spdk/dpdk/drivers/net/hns3/meson.build new file mode 100644 index 000000000..e01e6ce60 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/meson.build @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018-2019 Hisilicon Limited + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif + +if arch_subdir != 'x86' and arch_subdir != 'arm' or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on x86_64 and arm64' + subdir_done() +endif + +sources = files('hns3_cmd.c', + 'hns3_dcb.c', + 'hns3_intr.c', + 'hns3_ethdev.c', + 'hns3_ethdev_vf.c', + 'hns3_fdir.c', + 'hns3_flow.c', + 'hns3_mbx.c', + 'hns3_regs.c', + 'hns3_rss.c', + 'hns3_rxtx.c', + 'hns3_stats.c', + 'hns3_mp.c') + +deps += ['hash'] diff --git a/src/spdk/dpdk/drivers/net/hns3/rte_pmd_hns3_version.map b/src/spdk/dpdk/drivers/net/hns3/rte_pmd_hns3_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/hns3/rte_pmd_hns3_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/i40e/Makefile b/src/spdk/dpdk/drivers/net/i40e/Makefile new file mode 100644 index 000000000..7ec8d9533 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/Makefile @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_i40e.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF +CFLAGS += -DX722_A0_SUPPORT + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_i40e_version.map + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings +# +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_BASE_DRIVER = -diag-disable 593 +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +CFLAGS_BASE_DRIVER += -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-format +CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +CFLAGS_BASE_DRIVER += -Wno-unused-variable +else +CFLAGS_BASE_DRIVER = -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-format +CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +CFLAGS_BASE_DRIVER += -Wno-format-security +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +endif + +CFLAGS_i40e_lan_hmc.o += -Wno-error +endif +OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# base driver is based on the package of dpdk-i40e.2016.04.18.12.tar.gz. +# +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_diag.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_hmc.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_lan_hmc.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c + +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c +ifeq ($(CONFIG_RTE_ARCH_ARM64),y) +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c +else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y) +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c +else +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c +endif +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c + +ifeq ($(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR),y) +ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) + CC_AVX2_SUPPORT=1 +else + CC_AVX2_SUPPORT=\ + $(shell $(CC) -march=core-avx2 -dM -E - &1 | \ + grep -q AVX2 && echo 1) + ifeq ($(CC_AVX2_SUPPORT), 1) + ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) + CFLAGS_i40e_rxtx_vec_avx2.o += -march=core-avx2 + else + CFLAGS_i40e_rxtx_vec_avx2.o += -mavx2 + endif + endif +endif +endif + +ifeq ($(CC_AVX2_SUPPORT), 1) + SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_avx2.c + CFLAGS_i40e_rxtx.o += -DCC_AVX2_SUPPORT +endif + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/i40e/base/README b/src/spdk/dpdk/drivers/net/i40e/base/README new file mode 100644 index 000000000..6baca4360 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/README @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2020 Intel Corporation + */ + +Intel® I40E driver +================== + +This directory contains source code of FreeBSD i40e driver of version +cid-i40e.2020.03.04.tar.gz released by the team which develops +basic drivers for any i40e NIC. The directory of base/ contains the +original source package. +This driver is valid for the product(s) listed below + +* Intel® Ethernet Converged Network Adapters X710 +* Intel® Ethernet Converged Network Adapters XL710 +* Intel® Ethernet Network Adapter XXV710 +* Intel® Ethernet Connection X722 for 10GBASE-T +* Intel® Ethernet Connection X722 for 10GbE backplane +* Intel® Ethernet Connection X722 for 10GbE SFP+ +* Intel® Ethernet Connection X722 for 1GbE +* Intel® Ethernet Controller X710 and XL710 Family +* Intel® Ethernet Controller XXV710 for 25GbE backplane +* Intel® Ethernet Controller XXV710 for 25GbE SFP28 + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + i40e_osdep.h diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c new file mode 100644 index 000000000..c89e1fb3f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.c @@ -0,0 +1,1179 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +STATIC void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; +#ifdef PF_DRIVER + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.asq.bal = I40E_PF_ATQBAL; + hw->aq.asq.bah = I40E_PF_ATQBAH; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + hw->aq.arq.bal = I40E_PF_ARQBAL; + hw->aq.arq.bah = I40E_PF_ARQBAH; +#endif + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = CPU_TO_LE16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); + desc->params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); +#else + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#else + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); +#else + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#else + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code != I40E_SUCCESS) + goto init_config_regs; + + /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + return ret_code; + +init_config_regs: + i40e_free_asq_bufs(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + i40e_acquire_spinlock(&hw->aq.asq_spinlock); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + +shutdown_asq_out: + i40e_release_spinlock(&hw->aq.asq_spinlock); + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + i40e_acquire_spinlock(&hw->aq.arq_spinlock); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + +shutdown_arq_out: + i40e_release_spinlock(&hw->aq.arq_spinlock); + return ret_code; +} +#ifdef PF_DRIVER + +/** + * i40e_resume_aq - resume AQ processing from 0 + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} +#endif /* PF_DRIVER */ + +/** + * i40e_set_hw_flags - set HW flags + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_set_hw_flags(struct i40e_hw *hw) +{ + struct i40e_adminq_info *aq = &hw->aq; + + hw->flags = 0; + + switch (hw->mac.type) { + case I40E_MAC_XL710: + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + /* The ability to RX (not drop) 802.1ad frames */ + hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + } + break; + case I40E_MAC_X722: + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | + I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + /* fall through */ + default: + break; + } + + /* Newer versions of firmware require lock when reading the NVM */ + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 5)) + hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 8)) { + hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + hw->flags |= I40E_HW_FLAG_DROP_MODE; + } + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= 9)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; +} + +/** + * i40e_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) +{ + struct i40e_adminq_info *aq = &hw->aq; + enum i40e_status_code ret_code; + u16 cfg_ptr, oem_hi, oem_lo; + u16 eetrack_lo, eetrack_hi; + int retry = 0; + + /* verify input for valid configuration */ + if (aq->num_arq_entries == 0 || + aq->num_asq_entries == 0 || + aq->arq_buf_size == 0 || + aq->asq_buf_size == 0) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + i40e_init_spinlock(&aq->asq_spinlock); + i40e_init_spinlock(&aq->arq_spinlock); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_destroy_spinlocks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_asq; + + /* VF has no need of firmware */ + if (i40e_is_vf(hw)) + goto init_adminq_exit; + + /* There are some cases where the firmware may not be quite ready + * for AdminQ operations, so we retry the AdminQ setup a few times + * if we see timeouts in this first AQ call. + */ + do { + ret_code = i40e_aq_get_firmware_version(hw, + &aq->fw_maj_ver, + &aq->fw_min_ver, + &aq->fw_build, + &aq->api_maj_ver, + &aq->api_min_ver, + NULL); + if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + break; + retry++; + i40e_msec_delay(100); + i40e_resume_aq(hw); + } while (retry < 10); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_arq; + + /* + * Some features were introduced in different FW API version + * for different MAC type. + */ + i40e_set_hw_flags(hw); + + /* get the NVM version info */ + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; + + if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + ret_code = I40E_ERR_FIRMWARE_API_VERSION; + goto init_adminq_free_arq; + } + + /* pre-emptive resource lock release */ + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + hw->nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + + ret_code = I40E_SUCCESS; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_arq: + i40e_shutdown_arq(hw); +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_spinlocks: + i40e_destroy_spinlock(&aq->asq_spinlock); + i40e_destroy_spinlock(&aq->arq_spinlock); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_check_asq_alive(hw)) + i40e_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + i40e_destroy_spinlock(&hw->aq.asq_spinlock); + i40e_destroy_spinlock(&hw->aq.arq_spinlock); + + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_DMA); + cb_func(hw, &desc_cb); + } + i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); + i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40e_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +#ifdef VF_DRIVER +bool i40e_asq_done(struct i40e_hw *hw) +#else +STATIC bool i40e_asq_done(struct i40e_hw *hw) +#endif +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40e_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + i40e_acquire_spinlock(&hw->aq.asq_spinlock); + + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_error; + } + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + i40e_memcpy(details, + cmd_details, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_TO_NONDMA); + + /* If the cmd_details are defined copy the cookie. The + * CPU_TO_LE32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); + desc->cookie_low = + CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); + } + } else { + i40e_memset(details, 0, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_MEM); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~CPU_TO_LE16(details->flags_dis); + desc->flags |= CPU_TO_LE16(details->flags_ena); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), + I40E_NONDMA_TO_DMA); + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + i40e_memcpy(dma_buff->va, buff, buff_size, + I40E_NONDMA_TO_DMA); + desc_on_ring->datalen = CPU_TO_LE16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40e_asq_done(hw)) + break; + i40e_usec_delay(50); + total_delay += 50; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40e_asq_done(hw)) { + i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); + if (buff != NULL) + i40e_memcpy(buff, dma_buff->va, buff_size, + I40E_DMA_TO_NONDMA); + retval = LE16_TO_CPU(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = I40E_SUCCESS; + else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) + status = I40E_ERR_NOT_READY; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, + "AQTX: desc and buffer writeback:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + + /* save writeback aq if requested */ + if (details->wb_desc) + i40e_memcpy(details->wb_desc, desc_on_ring, + sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { +#ifdef PF_DRIVER + if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { +#else + if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { +#endif + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: AQ Critical error.\n"); + status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + } else { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + } + +asq_send_command_error: + i40e_release_spinlock(&hw->aq.asq_spinlock); + return status; +} + +/** + * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), + I40E_NONDMA_MEM); + desc->opcode = CPU_TO_LE16(opcode); + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); +} + +/** + * i40e_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); + + /* take the lock before we start messing with the ring */ + i40e_acquire_spinlock(&hw->aq.arq_spinlock); + + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + + /* set next_to_use to head */ +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; + else + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; +#else +#ifdef PF_DRIVER + ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; +#endif /* VF_DRIVER */ +#endif /* INTEGRATED_VF */ + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); + flags = LE16_TO_CPU(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); + datalen = LE16_TO_CPU(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + i40e_memcpy(e->msg_buf, + hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len, I40E_DMA_TO_NONDMA); + + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); + + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); + desc->datalen = CPU_TO_LE16((u16)bi->size); + desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); + desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +#ifdef PF_DRIVER + i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); +#endif /* PF_DRIVER */ +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: + i40e_release_spinlock(&hw->aq.arq_spinlock); + + return ret_code; +} + diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h new file mode 100644 index 000000000..6ce262ad4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; + struct i40e_aq_desc *wb_desc; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + + struct i40e_spinlock asq_spinlock; /* Send queue spinlock */ + struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert + **/ +STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) + return -ERANGE; + + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ + +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h new file mode 100644 index 000000000..1905167f5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_adminq_cmd.h @@ -0,0 +1,2973 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR_X722 0x000A +#define I40E_FW_API_VERSION_MINOR_X710 0x000A + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.9 for X722 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009 +/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ +#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + i40e_aqc_opc_clear_all_wol_filters = 0x025E, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + i40e_aqc_opc_replace_cloud_filters = 0x025F, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_progress = 0x0706, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, + i40e_aqc_opc_lldp_set_local_mib = 0x0A08, + i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, + i40e_aqc_opc_lldp_restore = 0x0A0A, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 + __le16 table_id; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_WOL_PRESERVE_STATUS 0x200 +#define I40E_AQC_ADDR_VALID_MASK 0x3F0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_MC_MAG_EN 0x0100 +#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Set WoL Filter (0x0120) */ + +struct i40e_aqc_set_wol_filter { + __le16 filter_index; +#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ + I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) + +#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 +#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ + I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; +#define I40E_AQC_SET_WOL_FILTER 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 +#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 +#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 + __le16 valid_flags; +#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); + +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + +/* Get Wake Reason (0x0121) */ + +struct i40e_aqc_get_wake_reason_completion { + u8 reserved_1[2]; + __le16 wake_reason; +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) + u8 reserved_2[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +/* flags used for both fields below */ +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 +#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004 + __le16 valid_flags; + /* The ethertype in switch_tag is dropped on ingress and used + * internally by the switch. Set this to zero for the default + * of 0x88a8 (802.1ad). Should be zero for firmware API + * versions lower than 1.7. + */ + __le16 switch_tag; + /* The ethertypes in first_tag and second_tag are used to + * match the outer and inner VLAN tags (respectively) when HW + * double VLAN tagging is enabled via the set port parameters + * AQ command. Otherwise these are both ignored. Set them to + * zero for their defaults of 0x8100 (802.1Q). Should be zero + * for firmware API versions lower than 1.7. + */ + __le16 first_tag; + __le16 second_tag; + /* Next byte is split into following: + * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0 + * Bit 6 : 0 : Destination Port, 1: source port + * Bit 5..4 : L4 type + * 0: rsvd + * 1: TCP + * 2: UDP + * 3: Both TCP and UDP + * Bits 3:0 Mode + * 0: default mode + * 1: L4 port only mode + * 2: non-tunneled mode + * 3: tunneled mode + */ +#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 + +#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 + +#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 +#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 +#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 +#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 + +#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 +#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 +#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 +#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 + u8 mode; + u8 rsvd5[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 big_buffer_flag; +#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1 +#define I40E_AQC_ADD_CLOUD_CMD_BB 1 + u8 reserved2[3]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + struct { + __le16 data[8]; + } raw_v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x000D reserved */ +/* 0x000E reserved */ +/* 0x000F reserved */ +/* 0x0010 to 0x0017 is for custom filters */ +#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when + * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. + */ +struct i40e_aqc_add_rm_cloud_filt_elem_ext { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); + +/* i40e_aqc_cloud_filters_element_bb is used when + * I40E_AQC_CLOUD_CMD_BB flag is set. + */ +struct i40e_aqc_cloud_filters_element_bb { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Replace filter Command 0x025F + * uses the i40e_aqc_replace_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_filter_data { + u8 filter_type; + u8 input[3]; +}; + +I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); + +struct i40e_aqc_replace_cloud_filters_cmd { + u8 valid_flags; +#define I40E_AQC_REPLACE_L1_FILTER 0x0 +#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 +#define I40E_AQC_GET_CLOUD_FILTERS 0x2 +#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 +#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 + u8 old_filter_type; + u8 new_filter_type; + u8 tr_bit; + u8 tr_bit2; + u8 reserved[3]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); + +struct i40e_aqc_replace_cloud_filters_cmd_buf { + u8 data[32]; +/* Filter type INPUT codes*/ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL) + +/* Field Vector offsets */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 + struct i40e_filter_data filters[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* Dynamic Device Personalization */ +struct i40e_aqc_write_personalization_profile { + u8 flags; + u8 reserved[3]; + __le32 profile_track_id; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); + +struct i40e_aqc_write_ddp_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +struct i40e_aqc_get_applied_profiles { + u8 flags; +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 + u8 rsv[3]; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_UNRECOGNIZED = 0xE, + I40E_PHY_TYPE_UNSUPPORTED = 0xF, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_25GBASE_KR = 0x1F, + I40E_PHY_TYPE_25GBASE_CR = 0x20, + I40E_PHY_TYPE_25GBASE_SR = 0x21, + I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_25GBASE_AOC = 0x23, + I40E_PHY_TYPE_25GBASE_ACC = 0x24, + I40E_PHY_TYPE_2_5GBASE_T = 0x30, + I40E_PHY_TYPE_5GBASE_T = 0x31, + I40E_PHY_TYPE_MAX, + I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, + I40E_PHY_TYPE_EMPTY = 0xFE, + I40E_PHY_TYPE_DEFAULT = 0xFF, +}; + +#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \ + BIT_ULL(I40E_PHY_TYPE_XAUI) | \ + BIT_ULL(I40E_PHY_TYPE_XFI) | \ + BIT_ULL(I40E_PHY_TYPE_SFI) | \ + BIT_ULL(I40E_PHY_TYPE_XLAUI) | \ + BIT_ULL(I40E_PHY_TYPE_XLPPI) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \ + BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \ + BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \ + BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \ + BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \ + BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \ + BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \ + BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \ + BIT_ULL(I40E_PHY_TYPE_5GBASE_T)) + +#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0 +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 +#define I40E_LINK_SPEED_5GB_SHIFT 0x7 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT), + I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT), +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 +#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 +#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 + __le16 eee_capability; +#define I40E_AQ_EEE_AUTO 0x0001 +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 +#define I40E_AQ_EEE_2_5GBASE_T 0x0100 +#define I40E_AQ_EEE_5GBASE_T 0x0200 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 phy_type_ext; +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02 +#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 +#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 +#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 +#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 +#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 +#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 + u8 fec_cfg_curr_mod_ext_info; +#define I40E_AQ_ENABLE_FEC_KR 0x01 +#define I40E_AQ_ENABLE_FEC_RS 0x02 +#define I40E_AQ_REQUEST_FEC_KR 0x04 +#define I40E_AQ_REQUEST_FEC_RS 0x08 +#define I40E_AQ_ENABLE_FEC_AUTO 0x10 +#define I40E_AQ_FEC +#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 +#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 + + u8 ext_comp_code; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 phy_type_ext; + u8 fec_config; +#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) +#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) +#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) +#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) +#define I40E_AQ_SET_FEC_AUTO BIT(4) +#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 +#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) + u8 reserved; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 +/* 25G Error Codes */ +#define I40E_AQ_25G_NO_ERR 0X00 +#define I40E_AQ_25G_NOT_PRESENT 0X01 +#define I40E_AQ_25G_NVM_CRC_ERR 0X02 +#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 +#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 +#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ +/* Since firmware API 1.7 loopback field keeps power class info as well */ +#define I40E_AQ_LOOPBACK_MASK 0x07 +#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 +#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 +#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + union { + struct { + u8 power_desc; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 +#define I40E_AQ_PWR_CLASS_MASK 0x03 + u8 reserved[4]; + }; + struct { + u8 link_type[4]; + u8 link_type_ext; + }; + }; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +/* Disable link manageability on a single port */ +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 +/* Disable link manageability on all ports needs both bits 4 and 5 */ +#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +#pragma pack(1) +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + u8 cmd_flags; + __le16 activity_id; +#define I40E_AQ_RUN_PHY_ACT_ID_USR_DFND 0x10 + u8 reserved; + union { + struct { + __le32 dnl_opcode; +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR 0x801a +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT 0x801b +#define I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR 0x1801b + __le32 data; + u8 reserved2[4]; + } cmd; + struct { + __le32 cmd_status; +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC 0x4 +#define I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK 0xFFFF + __le32 data0; + __le32 data1; + } resp; + } params; +}; +#pragma pack() + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + +/* Set PHY Register command (0x0628) */ +/* Get PHY Register command (0x0629) */ +struct i40e_aqc_phy_register_access { + u8 phy_interface; +#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 + u8 dev_addres; + u8 cmd_flags; +#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01 +#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02 +#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2 +#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \ + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) + u8 reserved1; + __le32 reg_address; + __le32 reg_value; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 +#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ +struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 +#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 +#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Set DCB (direct 0x0303) */ +struct i40e_aqc_set_dcb_parameters { + u8 command; +#define I40E_AQ_DCB_SET_AGENT 0x1 +#define I40E_DCB_VALID 0x1 + u8 valid_flags; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); + +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response + */ + +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) + +/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with + * word boundary layout issues, which the Linux compilers silently deal + * with by adding padding, making the actual struct larger than designed. + * However, the FW compiler for the NIC is less lenient and complains + * about the struct. Hence, the struct defined here has an extra byte in + * fields reserved3 and reserved4 to directly acknowledge that padding, + * and the new length is used in the length check macro. + */ +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3[2]; + __le16 oper_app_prio; + u8 reserved4[2]; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_set_local_mib { +#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 + u8 type; + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); + +struct i40e_aqc_lldp_set_local_mib_resp { +#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 + u8 status; + u8 reserved[15]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_stop_start_specific_agent { +#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 +#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ + (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + u8 command; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); + +/* Restore LLDP Agent factory settings (direct 0x0A0A) */ +struct i40e_aqc_lldp_restore { + u8 command; +#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 +#define I40E_AQ_LLDP_AGENT_RESTORE 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore); + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h new file mode 100644 index 000000000..ae14e4d93 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_alloc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c new file mode 100644 index 000000000..46a0b7881 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_common.c @@ -0,0 +1,8209 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "virtchnl.h" + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) +{ + enum i40e_status_code status = I40E_SUCCESS; + + DEBUGFUNC("i40e_set_mac_type\n"); + + if (hw->vendor_id == I40E_INTEL_VENDOR_ID) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_10G_B: + case I40E_DEV_ID_10G_SFP: + case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: + case I40E_DEV_ID_25G_B: + case I40E_DEV_ID_25G_SFP28: + case I40E_DEV_ID_X710_N3000: + case I40E_DEV_ID_XXV710_N3000: + hw->mac.type = I40E_MAC_XL710; + break; +#ifdef X722_A0_SUPPORT + case I40E_DEV_ID_X722_A0: +#endif + case I40E_DEV_ID_KX_X722: + case I40E_DEV_ID_QSFP_X722: + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + hw->mac.type = I40E_MAC_X722; + break; +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) + case I40E_DEV_ID_X722_VF: +#ifdef X722_A0_SUPPORT + case I40E_DEV_ID_X722_A0_VF: +#endif + hw->mac.type = I40E_MAC_X722_VF; + break; +#endif /* INTEGRATED_VF || VF_DRIVER */ +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + case I40E_DEV_ID_ADAPTIVE_VF: + hw->mac.type = I40E_MAC_VF; + break; +#endif + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40e_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40e_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) +{ + switch (stat_err) { + case I40E_SUCCESS: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** + * i40e_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u32 effective_mask = hw->debug_mask & mask; + u8 *buf = (u8 *)buffer; + u16 len; + u16 i; + + if (!effective_mask || !desc) + return; + + len = LE16_TO_CPU(aq_desc->datalen); + + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + LE16_TO_CPU(aq_desc->opcode), + LE16_TO_CPU(aq_desc->flags), + LE16_TO_CPU(aq_desc->datalen), + LE16_TO_CPU(aq_desc->retval)); + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tcookie (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->cookie_high), + LE32_TO_CPU(aq_desc->cookie_low)); + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tparam (0,1) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->params.internal.param0), + LE32_TO_CPU(aq_desc->params.internal.param1)); + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\taddr (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->params.external.addr_high), + LE32_TO_CPU(aq_desc->params.external.addr_low)); + + if (buffer && (buf_len != 0) && (len != 0) && + (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i+1], buf[i+2], buf[i+3], + buf[i+4], buf[i+5], buf[i+6], buf[i+7], + buf[i+8], buf[i+9], buf[i+10], buf[i+11], + buf[i+12], buf[i+13], buf[i+14], buf[i+15]); + /* the most we could have left is 16 bytes, pad with zeros */ + if (i < len) { + char d_buf[16]; + int j, i_sav; + + i_sav = i; + memset(d_buf, 0, sizeof(d_buf)); + for (j = 0; i < len; j++, i++) + d_buf[j] = buf[i]; + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3], + d_buf[4], d_buf[5], d_buf[6], d_buf[7], + d_buf[8], d_buf[9], d_buf[10], d_buf[11], + d_buf[12], d_buf[13], d_buf[14], d_buf[15]); + } + } +} + +/** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); +#else + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_VF_ATQLEN1_ATQENABLE_MASK); +#else + return !!(rd32(hw, hw->aq.asq.len) & + I40E_VF_ATQLEN1_ATQENABLE_MASK); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + return false; +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +STATIC enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + CPU_TO_LE16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= CPU_TO_LE16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= CPU_TO_LE16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40e_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +STATIC enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + CPU_TO_LE16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40e_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + +/** + * i40e_validate_mac_addr - Validate unicast MAC address + * @mac_addr: pointer to MAC address + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr) +{ + enum i40e_status_code status = I40E_SUCCESS; + + DEBUGFUNC("i40e_validate_mac_addr"); + + /* Broadcast addresses ARE multicast addresses + * Make sure it is not a multicast address + * Reject the zero address + */ + if (I40E_IS_MULTICAST(mac_addr) || + (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)) + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} +#ifdef PF_DRIVER + +/** + * i40e_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This assigns the MAC type and PHY code and inits the NVM. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The i40e_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) +{ + enum i40e_status_code status = I40E_SUCCESS; + u32 port, ari, func_rid; + + DEBUGFUNC("i40e_init_shared_code"); + + i40e_set_mac_type(hw); + + switch (hw->mac.type) { + case I40E_MAC_XL710: + case I40E_MAC_X722: + break; + default: + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->phy.get_link_info = true; + + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); + else + hw->pf_id = (u8)(func_rid & 0x7); + + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | + I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* NVMUpdate features structure initialization */ + hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; + hw->nvmupd_features.size = sizeof(hw->nvmupd_features); + i40e_memset(hw->nvmupd_features.features, 0x0, + I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN * + sizeof(*hw->nvmupd_features.features), + I40E_NONDMA_MEM); + + /* No features supported at the moment */ + hw->nvmupd_features.features[0] = 0; + + status = i40e_init_nvm(hw); + return status; +} + +/** + * i40e_aq_mac_address_read - Retrieve the MAC addresses + * @hw: pointer to the hw struct + * @flags: a return indicator of what addresses were added to the addr store + * @addrs: the requestor's mac addr store + * @cmd_details: pointer to command details structure or NULL + **/ +STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw, + u16 *flags, + struct i40e_aqc_mac_address_read_data *addrs, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_read *cmd_data = + (struct i40e_aqc_mac_address_read *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, addrs, + sizeof(*addrs), cmd_details); + *flags = LE16_TO_CPU(cmd_data->command_flags); + + return status; +} + +/** + * i40e_aq_mac_address_write - Change the MAC addresses + * @hw: pointer to the hw struct + * @flags: indicates which MAC to be written + * @mac_addr: address to write + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_write *cmd_data = + (struct i40e_aqc_mac_address_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_mac_address_write); + cmd_data->command_flags = CPU_TO_LE16(flags); + cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[4] << 8) | + mac_addr[5]); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_mac_addr - get MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to MAC address + * + * Reads the adapter's MAC address from register + **/ +enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + + if (flags & I40E_AQC_LAN_ADDR_VALID) + i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac), + I40E_NONDMA_TO_NONDMA); + + return status; +} + +/** + * i40e_get_port_mac_addr - get Port MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to Port MAC address + * + * Reads the adapter's Port MAC address + **/ +enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_PORT_ADDR_VALID) + i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac), + I40E_NONDMA_TO_NONDMA); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * i40e_pre_tx_queue_cfg - pre tx queue configure + * @hw: pointer to the HW structure + * @queue: target pf queue index + * @enable: state change request + * + * Handles hw requirement to indicate intention to enable + * or disable target queue. + **/ +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) +{ + u32 abs_queue_idx = hw->func_caps.base_queue + queue; + u32 reg_block = 0; + u32 reg_val; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + + if (enable) + reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; + else + reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); +} + +/** + * i40e_get_san_mac_addr - get SAN MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to SAN MAC address + * + * Reads the adapter's SAN MAC address from NVM + **/ +enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, + u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_SAN_ADDR_VALID) + i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac), + I40E_NONDMA_TO_NONDMA); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * i40e_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + enum i40e_status_code status = I40E_SUCCESS; + u16 pba_word = 0; + u16 pba_size = 0; + u16 pba_ptr = 0; + u16 i = 0; + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); + if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { + DEBUGOUT("Failed to read PBA flags or flag is invalid.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); + if (status != I40E_SUCCESS) { + DEBUGOUT("Failed to read PBA Block pointer.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); + if (status != I40E_SUCCESS) { + DEBUGOUT("Failed to read PBA Block size.\n"); + return status; + } + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size) + */ + pba_size--; + if (pba_num_size < (((u32)pba_size * 2) + 1)) { + DEBUGOUT("Buffer to small for PBA data.\n"); + return I40E_ERR_PARAM; + } + + for (i = 0; i < pba_size; i++) { + status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + if (status != I40E_SUCCESS) { + DEBUGOUT1("Failed to read PBA Block word %d.\n", i); + return status; + } + + pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + pba_num[(pba_size * 2)] = '\0'; + + return status; +} + +/** + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) +{ + enum i40e_media_type media; + + switch (hw->phy.link_info.phy_type) { + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + case I40E_PHY_TYPE_40GBASE_SR4: + case I40E_PHY_TYPE_40GBASE_LR4: + case I40E_PHY_TYPE_25GBASE_LR: + case I40E_PHY_TYPE_25GBASE_SR: + media = I40E_MEDIA_TYPE_FIBER; + break; + case I40E_PHY_TYPE_100BASE_TX: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_2_5GBASE_T: + case I40E_PHY_TYPE_5GBASE_T: + case I40E_PHY_TYPE_10GBASE_T: + media = I40E_MEDIA_TYPE_BASET; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: + case I40E_PHY_TYPE_25GBASE_CR: + case I40E_PHY_TYPE_25GBASE_AOC: + case I40E_PHY_TYPE_25GBASE_ACC: + media = I40E_MEDIA_TYPE_DA; + break; + case I40E_PHY_TYPE_1000BASE_KX: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + case I40E_PHY_TYPE_25GBASE_KR: + media = I40E_MEDIA_TYPE_BACKPLANE; + break; + case I40E_PHY_TYPE_SGMII: + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + default: + media = I40E_MEDIA_TYPE_UNKNOWN; + break; + } + + return media; +} + +/** + * i40e_poll_globr - Poll for Global Reset completion + * @hw: pointer to the hardware structure + * @retry_limit: how many times to retry before failure + **/ +STATIC enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw, + u32 retry_limit) +{ + u32 cnt, reg = 0; + + for (cnt = 0; cnt < retry_limit; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + return I40E_SUCCESS; + i40e_msec_delay(100); + } + + DEBUGOUT("Global reset failed.\n"); + DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg); + + return I40E_ERR_RESET_FAILED; +} + +#define I40E_PF_RESET_WAIT_COUNT 200 +/** + * i40e_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * Assuming someone else has triggered a global reset, + * assure the global reset is complete and then reset the PF + **/ +enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) +{ + u32 cnt = 0; + u32 cnt1 = 0; + u32 reg = 0; + u32 grst_del; + + /* Poll for Global Reset steady state in case of recent GRST. + * The grst delay value is in 100ms units, and we'll wait a + * couple counts longer to be sure we don't just miss the end. + */ + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + + grst_del = min(grst_del * 20, 160U); + + for (cnt = 0; cnt < grst_del; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + break; + i40e_msec_delay(100); + } + if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + DEBUGOUT("Global reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + + /* Now Wait for the FW to be ready */ + for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { + reg = rd32(hw, I40E_GLNVM_ULD); + reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); + if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { + DEBUGOUT1("Core and Global modules ready %d\n", cnt1); + break; + } + i40e_msec_delay(10); + } + if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { + DEBUGOUT("wait for FW Reset complete timedout\n"); + DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg); + return I40E_ERR_RESET_FAILED; + } + + /* If there was a Global Reset in progress when we got here, + * we don't need to do the PF Reset + */ + if (!cnt) { + u32 reg2 = 0; + + reg = rd32(hw, I40E_PFGEN_CTRL); + wr32(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, I40E_PFGEN_CTRL); + if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) + break; + reg2 = rd32(hw, I40E_GLGEN_RSTAT); + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) + break; + i40e_msec_delay(1); + } + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS) + return I40E_ERR_RESET_FAILED; + } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + DEBUGOUT("PF reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + } + + i40e_clear_pxe_mode(hw); + + + return I40E_SUCCESS; +} + +/** + * i40e_clear_hw - clear out any left over hw state + * @hw: pointer to the hw struct + * + * Clear queues and interrupts, typically called at init time, + * but after the capabilities have been found so we know how many + * queues and msix vectors have been allocated. + **/ +void i40e_clear_hw(struct i40e_hw *hw) +{ + u32 num_queues, base_queue; + u32 num_pf_int; + u32 num_vf_int; + u32 num_vfs; + u32 i, j; + u32 val; + u32 eol = 0x7ff; + + /* get number of interrupts, queues, and vfs */ + val = rd32(hw, I40E_GLPCI_CNF2); + num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; + num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + + val = rd32(hw, I40E_PFLAN_QALLOC); + base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> + I40E_PFLAN_QALLOC_LASTQ_SHIFT; + if (val & I40E_PFLAN_QALLOC_VALID_MASK) + num_queues = (j - base_queue) + 1; + else + num_queues = 0; + + val = rd32(hw, I40E_PF_VT_PFALLOC); + i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + num_vfs = (j - i) + 1; + else + num_vfs = 0; + + /* stop all the interrupts */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i), val); + + /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ + val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_LNKLSTN(i), val); + val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; + for (i = 0; i < num_vfs; i++) + wr32(hw, I40E_VPINT_LNKLST0(i), val); + for (i = 0; i < num_vf_int - 2; i++) + wr32(hw, I40E_VPINT_LNKLSTN(i), val); + + /* warn the HW of the coming Tx disables */ + for (i = 0; i < num_queues; i++) { + u32 abs_queue_idx = base_queue + i; + u32 reg_block = 0; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); + } + i40e_usec_delay(400); + + /* stop all the queues */ + for (i = 0; i < num_queues; i++) { + wr32(hw, I40E_QINT_TQCTL(i), 0); + wr32(hw, I40E_QTX_ENA(i), 0); + wr32(hw, I40E_QINT_RQCTL(i), 0); + wr32(hw, I40E_QRX_ENA(i), 0); + } + + /* short wait for all queue disables to settle */ + i40e_usec_delay(50); +} + +/** + * i40e_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the hw struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + **/ +void i40e_clear_pxe_mode(struct i40e_hw *hw) +{ + if (i40e_check_asq_alive(hw)) + i40e_aq_clear_pxe_mode(hw, NULL); +} + +/** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ + u32 gpio_val = 0; + u32 port; + + if (!I40E_IS_X710TL_DEVICE(hw->device_id) && + !hw->func_caps.led[idx]) + return 0; + gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); + port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> + I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + + /* if PRT_NUM_NA is 1 then this LED is not port specific, OR + * if it is not our port then ignore + */ + if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || + (port != hw->port)) + return 0; + + return gpio_val; +} + +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE +#define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_FW_LED BIT(4) +#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) + +#define I40E_LED0 22 + +#define I40E_PIN_FUNC_SDP 0x0 +#define I40E_PIN_FUNC_LED 0x1 + +/** + * i40e_led_get - return current on/off mode + * @hw: pointer to the hw struct + * + * The value returned is the 'mode' field as defined in the + * GPIO register definitions: 0x0 = off, 0xf = on, and other + * values are variations of possible behaviors relating to + * blink, link, and wire. + **/ +u32 i40e_led_get(struct i40e_hw *hw) +{ + u32 current_mode = 0; + u32 mode = 0; + int i; + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: + continue; + default: + break; + } + + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; + } + + return mode; +} + +/** + * i40e_led_set - set new on/off mode + * @hw: pointer to the hw struct + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state. + **/ +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) +{ + u32 current_mode = 0; + int i; + + if (mode & ~I40E_LED_MODE_VALID) { + DEBUGOUT1("invalid mode passed in %X\n", mode); + return; + } + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: + continue; + default: + break; + } + + if (I40E_IS_X710TL_DEVICE(hw->device_id)) { + u32 pin_func = 0; + + if (mode & I40E_FW_LED) + pin_func = I40E_PIN_FUNC_SDP; + else + pin_func = I40E_PIN_FUNC_LED; + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; + gpio_val |= ((pin_func << + I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & + I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); + } + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; + /* this & is a bit of paranoia, but serves as a range check */ + gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & + I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + + if (blink) + gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + else + gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + + wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); + break; + } +} + +/* Admin command wrappers */ + +/** + * i40e_aq_get_phy_capabilities + * @hw: pointer to the hw struct + * @abilities: structure for PHY capabilities to be filled + * @qualified_modules: report Qualified Modules + * @report_init: report init capabilities (active are default) + * @cmd_details: pointer to command details structure or NULL + * + * Returns the various PHY abilities supported on the Port. + **/ +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; + u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); + + if (!abilities) + return I40E_ERR_PARAM; + + do { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + if (qualified_modules) + desc.params.external.param0 |= + CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); + + if (report_init) + desc.params.external.param0 |= + CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); + + status = i40e_asq_send_command(hw, &desc, abilities, + abilities_size, cmd_details); + + switch (hw->aq.asq_last_status) { + case I40E_AQ_RC_EIO: + status = I40E_ERR_UNKNOWN_PHY; + break; + case I40E_AQ_RC_EAGAIN: + i40e_msec_delay(1); + total_delay++; + status = I40E_ERR_TIMEOUT; + break; + /* also covers I40E_AQ_RC_OK */ + default: + break; + } + + } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && + (total_delay < max_delay)); + + if (status != I40E_SUCCESS) + return status; + + if (report_init) { + if (hw->mac.type == I40E_MAC_XL710 && + hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + } else { + hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); + hw->phy.phy_types |= + ((u64)abilities->phy_type_ext << 32); + } + } + + return status; +} + +/** + * i40e_aq_set_phy_config + * @hw: pointer to the hw struct + * @config: structure with PHY configuration to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters + * supported on the Port.One or more of the Set PHY config parameters may be + * ignored in an MFP mode as the PF may not have the privilege to set some + * of the PHY Config parameters. This status will be indicated by the + * command response. + **/ +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_phy_config *cmd = + (struct i40e_aq_set_phy_config *)&desc.params.raw; + enum i40e_status_code status; + + if (!config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_config); + + *cmd = *config; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + u8 pause_mask = 0x0; + + *aq_failures = 0x0; + + switch (fc_mode) { + case I40E_FC_FULL: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_RX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_TX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + break; + default: + break; + } + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; + return status; + } + + memset(&config, 0, sizeof(config)); + /* clear the old pause settings */ + config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities != abilities.abilities) { + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities.phy_type; + config.phy_type_ext = abilities.phy_type_ext; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + status = i40e_aq_set_phy_config(hw, &config, NULL); + + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + } + /* Update the link info */ + status = i40e_update_link_info(hw); + if (status) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + i40e_msec_delay(1000); + status = i40e_update_link_info(hw); + } + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; + + return status; +} + +/** + * i40e_aq_set_mac_config + * @hw: pointer to the hw struct + * @max_frame_size: Maximum Frame Size to be supported by the port + * @crc_en: Tell HW to append a CRC to outgoing frames + * @pacing: Pacing configurations + * @auto_drop_blocking_packets: Tell HW to drop packets if TC queue is blocked + * @cmd_details: pointer to command details structure or NULL + * + * Configure MAC settings for frame size, jumbo frame support and the + * addition of a CRC by the hardware. + **/ +enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, + u16 max_frame_size, + bool crc_en, u16 pacing, + bool auto_drop_blocking_packets, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_mac_config *cmd = + (struct i40e_aq_set_mac_config *)&desc.params.raw; + enum i40e_status_code status; + + if (max_frame_size == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_mac_config); + + cmd->max_frame_size = CPU_TO_LE16(max_frame_size); + cmd->params = ((u8)pacing & 0x0F) << 3; + if (crc_en) + cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN; + + if (auto_drop_blocking_packets) { + if (hw->flags & I40E_HW_FLAG_DROP_MODE) + cmd->params |= + I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN; + else + i40e_debug(hw, I40E_DEBUG_ALL, + "This FW api version does not support drop mode.\n"); + } + +#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF + cmd->fc_refresh_threshold = + CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_clear_pxe_mode + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Tell the firmware that the driver is taking over from PXE + **/ +enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_clear_pxe *cmd = + (struct i40e_aqc_clear_pxe *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_pxe_mode); + + cmd->rx_cnt = 0x2; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + wr32(hw, I40E_GLLAN_RCTL_0, 0x1); + + return status; +} + +/** + * i40e_aq_set_link_restart_an + * @hw: pointer to the hw struct + * @enable_link: if true: enable link, if false: disable link + * @cmd_details: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + **/ +enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_link_restart_an *cmd = + (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_link_restart_an); + + cmd->command = I40E_AQ_PHY_RESTART_AN; + if (enable_link) + cmd->command |= I40E_AQ_PHY_LINK_ENABLE; + else + cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_link_info + * @hw: pointer to the hw struct + * @enable_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cmd_details: pointer to command details structure or NULL + * + * Returns the link status of the adapter. + **/ +enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_link_status *resp = + (struct i40e_aqc_get_link_status *)&desc.params.raw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + enum i40e_status_code status; + bool tx_pause, rx_pause; + u16 command_flags; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); + + if (enable_lse) + command_flags = I40E_AQ_LSE_ENABLE; + else + command_flags = I40E_AQ_LSE_DISABLE; + resp->command_flags = CPU_TO_LE16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_link_info_exit; + + /* save off old link status information */ + i40e_memcpy(&hw->phy.link_info_old, hw_link_info, + sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); + + /* update link status */ + hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; + hw->phy.media_type = i40e_get_media_type(hw); + hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; + hw_link_info->link_info = resp->link_info; + hw_link_info->an_info = resp->an_info; + hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | + I40E_AQ_CONFIG_FEC_RS_ENA); + hw_link_info->ext_info = resp->ext_info; + hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; + hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); + hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; + + /* update fc info */ + tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); + rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); + if (tx_pause & rx_pause) + hw->fc.current_mode = I40E_FC_FULL; + else if (tx_pause) + hw->fc.current_mode = I40E_FC_TX_PAUSE; + else if (rx_pause) + hw->fc.current_mode = I40E_FC_RX_PAUSE; + else + hw->fc.current_mode = I40E_FC_NONE; + + if (resp->config & I40E_AQ_CONFIG_CRC_ENA) + hw_link_info->crc_enable = true; + else + hw_link_info->crc_enable = false; + + if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED)) + hw_link_info->lse_enable = true; + else + hw_link_info->lse_enable = false; + + if ((hw->mac.type == I40E_MAC_XL710) && + (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && + hw->mac.type != I40E_MAC_X722) { + __le32 tmp; + + i40e_memcpy(&tmp, resp->link_type, sizeof(tmp), + I40E_NONDMA_TO_NONDMA); + hw->phy.phy_types = LE32_TO_CPU(tmp); + hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); + } + + /* save link status information */ + if (link) + i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), + I40E_NONDMA_TO_NONDMA); + + /* flag cleared so helper functions don't call AQ again */ + hw->phy.get_link_info = false; + +aq_get_link_info_exit: + return status; +} + +/** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = CPU_TO_LE16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_local_advt_reg + * @hw: pointer to the hw struct + * @advt_reg: local AN advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the Local AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *resp = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_local_advt_reg); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_local_advt_reg_exit; + + *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; + *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); + +aq_get_local_advt_reg_exit: + return status; +} + +/** + * i40e_aq_set_local_advt_reg + * @hw: pointer to the hw struct + * @advt_reg: local AN advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the Local AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *cmd = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_local_advt_reg); + + cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg)); + cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_partner_advt + * @hw: pointer to the hw struct + * @advt_reg: AN partner advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the link partner AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *resp = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_partner_advt); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_partner_advt_exit; + + *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; + *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); + +aq_get_partner_advt_exit: + return status; +} + +/** + * i40e_aq_set_lb_modes + * @hw: pointer to the hw struct + * @lb_modes: loopback mode to be set + * @cmd_details: pointer to command details structure or NULL + * + * Sets loopback modes. + **/ +enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, + u16 lb_modes, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_lb_mode *cmd = + (struct i40e_aqc_set_lb_mode *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_lb_modes); + + cmd->lb_mode = CPU_TO_LE16(lb_modes); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_phy_debug + * @hw: pointer to the hw struct + * @cmd_flags: debug command flags + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_debug *cmd = + (struct i40e_aqc_set_phy_debug *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_debug); + + cmd->command_flags = cmd_flags; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Add a VSI context to the hardware. +**/ +enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_vsi); + + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); + cmd->connection_type = vsi_ctx->connection_type; + cmd->vf_id = vsi_ctx->vf_num; + cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + if (status != I40E_SUCCESS) + goto aq_add_vsi_exit; + + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + +aq_add_vsi_exit: + return status; +} + +/** + * i40e_aq_set_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_clear_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = CPU_TO_LE16(0); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_unicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set unicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc + **/ +enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) { + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (rx_only_promisc && + (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1))) + flags |= I40E_AQC_SET_VSI_PROMISC_TX; + } + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX); + + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_multicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set multicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** +* i40e_aq_set_vsi_full_promiscuous +* @hw: pointer to the hw struct +* @seid: VSI number +* @set: set promiscuous enable/disable +* @cmd_details: pointer to command details structure or NULL +**/ +enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags = I40E_AQC_SET_VSI_PROMISC_UNICAST | + I40E_AQC_SET_VSI_PROMISC_MULTICAST | + I40E_AQC_SET_VSI_PROMISC_BROADCAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST | + I40E_AQC_SET_VSI_PROMISC_MULTICAST | + I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_mc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_uc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_bc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set broadcast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_broadcast + * @hw: pointer to the hw struct + * @seid: vsi number + * @set_filter: true to set filter, false to clear filter + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear the broadcast promiscuous flag (filter) for a given VSI. + **/ +enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 seid, bool set_filter, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set_filter) + cmd->promiscuous_flags + |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + else + cmd->promiscuous_flags + &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_vsi_params - get VSI configuration info + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + UNREFERENCED_1PARAMETER(cmd_details); + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_vsi_parameters); + + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), NULL); + + if (status != I40E_SUCCESS) + goto aq_get_vsi_params_exit; + + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + +aq_get_vsi_params_exit: + return status; +} + +/** + * i40e_aq_update_vsi_params + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Update a VSI context. + **/ +enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_update_vsi_parameters); + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + + return status; +} + +/** + * i40e_aq_get_switch_config + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of input buffer + * @start_seid: seid to start for the report, 0 == beginning + * @cmd_details: pointer to command details structure or NULL + * + * Fill the buf with switch configuration returned from AdminQ command + **/ +enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *scfg = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_config); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + scfg->seid = CPU_TO_LE16(*start_seid); + + status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); + *start_seid = LE16_TO_CPU(scfg->seid); + + return status; +} + +/** + * i40e_aq_set_switch_config + * @hw: pointer to the hardware structure + * @flags: bit flag values to set + * @mode: cloud filter mode + * @valid_flags: which bit flags to set + * @cmd_details: pointer to command details structure or NULL + * + * Set switch configuration bits + **/ +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, u8 mode, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_switch_config *scfg = + (struct i40e_aqc_set_switch_config *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_switch_config); + scfg->flags = CPU_TO_LE16(flags); + scfg->valid_flags = CPU_TO_LE16(valid_flags); + scfg->mode = mode; + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + scfg->switch_tag = CPU_TO_LE16(hw->switch_tag); + scfg->first_tag = CPU_TO_LE16(hw->first_tag); + scfg->second_tag = CPU_TO_LE16(hw->second_tag); + } + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_firmware_version + * @hw: pointer to the hw struct + * @fw_major_version: firmware major version + * @fw_minor_version: firmware minor version + * @fw_build: firmware build number + * @api_major_version: major queue version + * @api_minor_version: minor queue version + * @cmd_details: pointer to command details structure or NULL + * + * Get the firmware version from the admin queue commands + **/ +enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_version *resp = + (struct i40e_aqc_get_version *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) { + if (fw_major_version != NULL) + *fw_major_version = LE16_TO_CPU(resp->fw_major); + if (fw_minor_version != NULL) + *fw_minor_version = LE16_TO_CPU(resp->fw_minor); + if (fw_build != NULL) + *fw_build = LE32_TO_CPU(resp->fw_build); + if (api_major_version != NULL) + *api_major_version = LE16_TO_CPU(resp->api_major); + if (api_minor_version != NULL) + *api_minor_version = LE16_TO_CPU(resp->api_minor); + + /* A workaround to fix the API version in SW */ + if (api_major_version && api_minor_version && + fw_major_version && fw_minor_version && + ((*api_major_version == 1) && (*api_minor_version == 1)) && + (((*fw_major_version == 4) && (*fw_minor_version >= 2)) || + (*fw_major_version > 4))) + *api_minor_version = 2; + } + + return status; +} + +/** + * i40e_aq_send_driver_version + * @hw: pointer to the hw struct + * @dv: driver's major, minor version + * @cmd_details: pointer to command details structure or NULL + * + * Send the driver version to the firmware + **/ +enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_driver_version *cmd = + (struct i40e_aqc_driver_version *)&desc.params.raw; + enum i40e_status_code status; + u16 len; + + if (dv == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + cmd->driver_major_ver = dv->major_version; + cmd->driver_minor_ver = dv->minor_version; + cmd->driver_build_ver = dv->build_version; + cmd->driver_subbuild_ver = dv->subbuild_version; + + len = 0; + while (len < sizeof(dv->driver_string) && + (dv->driver_string[len] < 0x80) && + dv->driver_string[len]) + len++; + status = i40e_asq_send_command(hw, &desc, dv->driver_string, + len, cmd_details); + + return status; +} + +/** + * i40e_get_link_status - get status of the HW network link + * @hw: pointer to the hw struct + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up true if link is up, false if link is down. + * The variable link_up is invalid if returned value of status != I40E_SUCCESS + * + * Side effect: LinkStatusEvent reporting becomes enabled + **/ +enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up) +{ + enum i40e_status_code status = I40E_SUCCESS; + + if (hw->phy.get_link_info) { + status = i40e_update_link_info(hw); + + if (status != I40E_SUCCESS) + i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", + status); + } + + *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + + return status; +} + +/** + * i40e_updatelink_status - update status of the HW network link + * @hw: pointer to the hw struct + **/ +enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status = I40E_SUCCESS; + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + return status; + + /* extra checking needed to ensure link info to user is timely */ + if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && + ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || + !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; + + if (abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_ENABLE_FEC_AUTO) + hw->phy.link_info.req_fec_info = + (I40E_AQ_REQUEST_FEC_KR | + I40E_AQ_REQUEST_FEC_RS); + else + hw->phy.link_info.req_fec_info = + abilities.fec_cfg_curr_mod_ext_info & + (I40E_AQ_REQUEST_FEC_KR | + I40E_AQ_REQUEST_FEC_RS); + + i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA); + } + return status; +} + + +/** + * i40e_get_link_speed + * @hw: pointer to the hw struct + * + * Returns the link speed of the adapter. + **/ +enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw) +{ + enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN; + enum i40e_status_code status = I40E_SUCCESS; + + if (hw->phy.get_link_info) { + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + + if (status != I40E_SUCCESS) + goto i40e_link_speed_exit; + } + + speed = hw->phy.link_info.link_speed; + +i40e_link_speed_exit: + return speed; +} + +/** + * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC + * @hw: pointer to the hw struct + * @uplink_seid: the MAC or other gizmo SEID + * @downlink_seid: the VSI SEID + * @enabled_tc: bitmap of TCs to be enabled + * @default_port: true for default port VSI, false for control port + * @veb_seid: pointer to where to put the resulting VEB SEID + * @enable_stats: true to turn on VEB stats + * @cmd_details: pointer to command details structure or NULL + * + * This asks the FW to add a VEB between the uplink and downlink + * elements. If the uplink SEID is 0, this will be a floating VEB. + **/ +enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *veb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_veb *cmd = + (struct i40e_aqc_add_veb *)&desc.params.raw; + struct i40e_aqc_add_veb_completion *resp = + (struct i40e_aqc_add_veb_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 veb_flags = 0; + + /* SEIDs need to either both be set or both be 0 for floating VEB */ + if (!!uplink_seid != !!downlink_seid) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); + + cmd->uplink_seid = CPU_TO_LE16(uplink_seid); + cmd->downlink_seid = CPU_TO_LE16(downlink_seid); + cmd->enable_tcs = enabled_tc; + if (!uplink_seid) + veb_flags |= I40E_AQC_ADD_VEB_FLOATING; + if (default_port) + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; + else + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + + /* reverse logic here: set the bitflag to disable the stats */ + if (!enable_stats) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; + + cmd->veb_flags = CPU_TO_LE16(veb_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && veb_seid) + *veb_seid = LE16_TO_CPU(resp->veb_seid); + + return status; +} + +/** + * i40e_aq_get_veb_parameters - Retrieve VEB parameters + * @hw: pointer to the hw struct + * @veb_seid: the SEID of the VEB to query + * @switch_id: the uplink switch id + * @floating: set to true if the VEB is floating + * @statistic_index: index of the stats counter block for this VEB + * @vebs_used: number of VEB's used by function + * @vebs_free: total VEB's not reserved by any function + * @cmd_details: pointer to command details structure or NULL + * + * This retrieves the parameters for a particular VEB, specified by + * uplink_seid, and returns them to the caller. + **/ +enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, + bool *floating, u16 *statistic_index, + u16 *vebs_used, u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_veb_parameters_completion *cmd_resp = + (struct i40e_aqc_get_veb_parameters_completion *) + &desc.params.raw; + enum i40e_status_code status; + + if (veb_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_veb_parameters); + cmd_resp->seid = CPU_TO_LE16(veb_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status) + goto get_veb_exit; + + if (switch_id) + *switch_id = LE16_TO_CPU(cmd_resp->switch_id); + if (statistic_index) + *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); + if (vebs_used) + *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); + if (vebs_free) + *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); + if (floating) { + u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); + + if (flags & I40E_AQC_ADD_VEB_FLOATING) + *floating = true; + else + *floating = false; + } + +get_veb_exit: + return status; +} + +/** + * i40e_aq_add_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Add MAC/VLAN addresses to the HW filtering + **/ +enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + int i; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + for (i = 0; i < count; i++) + if (I40E_IS_MULTICAST(mv_list[i].mac_addr)) + mv_list[i].flags |= + CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Remove MAC/VLAN addresses from the HW filtering + **/ +enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule + * @hw: pointer to the hw struct + * @opcode: AQ opcode for add or delete mirror rule + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @id: Destination VSI SEID or Rule ID + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch + * + * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for + * VEBs/VEPA elements only + **/ +static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule *cmd = + (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + buf_size = count * sizeof(*mr_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, opcode); + cmd->seid = CPU_TO_LE16(sw_seid); + cmd->rule_type = CPU_TO_LE16(rule_type & + I40E_AQC_MIRROR_RULE_TYPE_MASK); + cmd->num_entries = CPU_TO_LE16(count); + /* Dest VSI for add, rule_id for delete */ + cmd->destination = CPU_TO_LE16(id); + if (mr_list) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + } + + status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, + cmd_details); + if (status == I40E_SUCCESS || + hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { + if (rule_id) + *rule_id = LE16_TO_CPU(resp->rule_id); + if (rules_used) + *rules_used = LE16_TO_CPU(resp->mirror_rules_used); + if (rules_free) + *rules_free = LE16_TO_CPU(resp->mirror_rules_free); + } + return status; +} + +/** + * i40e_aq_add_mirrorrule - add a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @dest_vsi: SEID of VSI to which packets will be mirrored + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch + * + * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only + **/ +enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || + rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, + rule_type, dest_vsi, count, mr_list, + cmd_details, rule_id, rules_used, rules_free); +} + +/** + * i40e_aq_delete_mirrorrule - delete a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @count: length of the list + * @rule_id: Rule ID that is returned in the receive desc as part of + * add_mirrorrule. + * @mr_list: list of mirrored VLAN IDs to be removed + * @cmd_details: pointer to command details structure or NULL + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch + * + * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only + **/ +enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) +{ + /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + /* count and mr_list shall be valid for rule_type INGRESS VLAN + * mirroring. For other rule_type, count and rule_type should + * not matter. + */ + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, + rule_type, rule_id, count, mr_list, + cmd_details, NULL, rules_used, rules_free); +} + +/** + * i40e_aq_add_vlan - Add VLAN ids to the HW filtering + * @hw: pointer to the hw struct + * @seid: VSI for the vlan filters + * @v_list: list of vlan filters to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !v_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*v_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, v_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_vlan - Remove VLANs from the HW filtering + * @hw: pointer to the hw struct + * @seid: VSI for the vlan filters + * @v_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !v_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*v_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, v_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: vf id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * send msg to vf + **/ +enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pf_vf_message *cmd = + (struct i40e_aqc_pf_vf_message *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); + cmd->id = CPU_TO_LE32(vfid); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + } + status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); + + return status; +} + +/** + * i40e_aq_debug_read_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the register using the admin queue commands + **/ +enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd_resp = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); + + cmd_resp->address = CPU_TO_LE32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) { + *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | + (u64)LE32_TO_CPU(cmd_resp->value_low); + } + + return status; +} + +/** + * i40e_aq_debug_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Write to a register using the admin queue commands + **/ +enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); + + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); + cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_request_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cmd_details: pointer to command details structure or NULL + * + * requests common resource using the admin queue commands + **/ +enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd_resp = + (struct i40e_aqc_request_resource *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_request_resource"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); + + cmd_resp->resource_id = CPU_TO_LE16(resource); + cmd_resp->access_type = CPU_TO_LE16(access); + cmd_resp->resource_number = CPU_TO_LE32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + * If the resource is held by someone else, the command completes with + * busy return value and the timeout field indicates the maximum time + * the current owner of the resource has to free it. + */ + if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) + *timeout = LE32_TO_CPU(cmd_resp->timeout); + + return status; +} + +/** + * i40e_aq_release_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @sdp_number: resource number + * @cmd_details: pointer to command details structure or NULL + * + * release common resource using the admin queue commands + **/ +enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd = + (struct i40e_aqc_request_resource *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_release_resource"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); + + cmd->resource_id = CPU_TO_LE16(resource); + cmd->resource_number = CPU_TO_LE32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_read_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands + **/ +enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_read_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_read_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_read_nvm_exit: + return status; +} + +/** + * i40e_aq_read_nvm_config - read an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @field_id: field or feature id + * @data: buffer for result + * @buf_size: buffer size + * @element_count: pointer to count of elements read by FW + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_read *cmd = + (struct i40e_aqc_nvm_config_read *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); + if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) + cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); + else + cmd->element_id_msw = 0; + + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + if (!status && element_count) + *element_count = LE16_TO_CPU(cmd->element_count); + + return status; +} + +/** + * i40e_aq_write_nvm_config - write an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @data: buffer for result + * @buf_size: buffer size + * @element_count: count of elements to be written + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_write *cmd = + (struct i40e_aqc_nvm_config_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->element_count = CPU_TO_LE16(element_count); + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + return status; +} + +/** + * i40e_aq_oem_post_update - triggers an OEM specific flow after update + * @hw: pointer to the hw struct + * @buff: buffer for result + * @buff_size: buffer size + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + UNREFERENCED_2PARAMETER(buff, buff_size); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) + status = I40E_ERR_NOT_IMPLEMENTED; + + return status; +} + +/** + * i40e_aq_erase_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in the module (expressed in 4 KB from module's beginning) + * @length: length of the section to be erased (expressed in 4 KB) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands + **/ +enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_erase_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_erase_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_erase_nvm_exit: + return status; +} + +/** + * i40e_parse_discover_capabilities + * @hw: pointer to the hw struct + * @buff: pointer to a buffer containing device/function capability records + * @cap_count: number of capability records in the list + * @list_type_opc: type of capabilities list to parse + * + * Parse the device/function capabilities list. + **/ +STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + u32 cap_count, + enum i40e_admin_queue_opc list_type_opc) +{ + struct i40e_aqc_list_capabilities_element_resp *cap; + u32 valid_functions, num_functions; + u32 number, logical_id, phys_id; + struct i40e_hw_capabilities *p; + enum i40e_status_code status; + u16 id, ocp_cfg_word0; + u8 major_rev; + u32 i = 0; + + cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; + + if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) + p = (struct i40e_hw_capabilities *)&hw->dev_caps; + else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) + p = (struct i40e_hw_capabilities *)&hw->func_caps; + else + return; + + for (i = 0; i < cap_count; i++, cap++) { + id = LE16_TO_CPU(cap->id); + number = LE32_TO_CPU(cap->number); + logical_id = LE32_TO_CPU(cap->logical_id); + phys_id = LE32_TO_CPU(cap->phys_id); + major_rev = cap->major_rev; + + switch (id) { + case I40E_AQ_CAP_ID_SWITCH_MODE: + p->switch_mode = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Switch mode = %d\n", + p->switch_mode); + break; + case I40E_AQ_CAP_ID_MNG_MODE: + p->management_mode = number; + if (major_rev > 1) { + p->mng_protocols_over_mctp = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Protocols over MCTP = %d\n", + p->mng_protocols_over_mctp); + } else { + p->mng_protocols_over_mctp = 0; + } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Management Mode = %d\n", + p->management_mode); + break; + case I40E_AQ_CAP_ID_NPAR_ACTIVE: + p->npar_enable = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: NPAR enable = %d\n", + p->npar_enable); + break; + case I40E_AQ_CAP_ID_OS2BMC_CAP: + p->os2bmc = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: OS2BMC = %d\n", p->os2bmc); + break; + case I40E_AQ_CAP_ID_FUNCTIONS_VALID: + p->valid_functions = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Valid Functions = %d\n", + p->valid_functions); + break; + case I40E_AQ_CAP_ID_SRIOV: + if (number == 1) + p->sr_iov_1_1 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SR-IOV = %d\n", + p->sr_iov_1_1); + break; + case I40E_AQ_CAP_ID_VF: + p->num_vfs = number; + p->vf_base_id = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF count = %d\n", + p->num_vfs); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF base_id = %d\n", + p->vf_base_id); + break; + case I40E_AQ_CAP_ID_VMDQ: + if (number == 1) + p->vmdq = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VMDQ = %d\n", p->vmdq); + break; + case I40E_AQ_CAP_ID_8021QBG: + if (number == 1) + p->evb_802_1_qbg = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbg = %d\n", number); + break; + case I40E_AQ_CAP_ID_8021QBR: + if (number == 1) + p->evb_802_1_qbh = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbh = %d\n", number); + break; + case I40E_AQ_CAP_ID_VSI: + p->num_vsis = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VSI count = %d\n", + p->num_vsis); + break; + case I40E_AQ_CAP_ID_DCB: + if (number == 1) { + p->dcb = true; + p->enabled_tcmap = logical_id; + p->maxtc = phys_id; + } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: DCB = %d\n", p->dcb); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Mapping = %d\n", + logical_id); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Max = %d\n", p->maxtc); + break; + case I40E_AQ_CAP_ID_FCOE: + if (number == 1) + p->fcoe = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: FCOE = %d\n", p->fcoe); + break; + case I40E_AQ_CAP_ID_ISCSI: + if (number == 1) + p->iscsi = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iSCSI = %d\n", p->iscsi); + break; + case I40E_AQ_CAP_ID_RSS: + p->rss = true; + p->rss_table_size = number; + p->rss_table_entry_width = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS = %d\n", p->rss); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table size = %d\n", + p->rss_table_size); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table width = %d\n", + p->rss_table_entry_width); + break; + case I40E_AQ_CAP_ID_RXQ: + p->num_rx_qp = number; + p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Rx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); + break; + case I40E_AQ_CAP_ID_TXQ: + p->num_tx_qp = number; + p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Tx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); + break; + case I40E_AQ_CAP_ID_MSIX: + p->num_msix_vectors = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX vector count = %d\n", + p->num_msix_vectors); + break; + case I40E_AQ_CAP_ID_VF_MSIX: + p->num_msix_vectors_vf = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX VF vector count = %d\n", + p->num_msix_vectors_vf); + break; + case I40E_AQ_CAP_ID_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 mode = %d\n", + p->flex10_mode); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 status = %d\n", + p->flex10_status); + break; + case I40E_AQ_CAP_ID_CEM: + if (number == 1) + p->mgmt_cem = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: CEM = %d\n", p->mgmt_cem); + break; + case I40E_AQ_CAP_ID_IWARP: + if (number == 1) + p->iwarp = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iWARP = %d\n", p->iwarp); + break; + case I40E_AQ_CAP_ID_LED: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->led[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: LED - PIN %d\n", phys_id); + break; + case I40E_AQ_CAP_ID_SDP: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->sdp[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SDP - PIN %d\n", phys_id); + break; + case I40E_AQ_CAP_ID_MDIO: + if (number == 1) { + p->mdio_port_num = phys_id; + p->mdio_port_mode = logical_id; + } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port number = %d\n", + p->mdio_port_num); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port mode = %d\n", + p->mdio_port_mode); + break; + case I40E_AQ_CAP_ID_1588: + if (number == 1) + p->ieee_1588 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: IEEE 1588 = %d\n", + p->ieee_1588); + break; + case I40E_AQ_CAP_ID_FLOW_DIRECTOR: + p->fd = true; + p->fd_filters_guaranteed = number; + p->fd_filters_best_effort = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flow Director = 1\n"); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Guaranteed FD filters = %d\n", + p->fd_filters_guaranteed); + break; + case I40E_AQ_CAP_ID_WSR_PROT: + p->wr_csr_prot = (u64)number; + p->wr_csr_prot |= (u64)logical_id << 32; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: wr_csr_prot = 0x%llX\n\n", + (p->wr_csr_prot & 0xffff)); + break; + case I40E_AQ_CAP_ID_NVM_MGMT: + if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) + p->sec_rev_disabled = true; + if (number & I40E_NVM_MGMT_UPDATE_DISABLED) + p->update_disabled = true; + break; + case I40E_AQ_CAP_ID_WOL_AND_PROXY: + hw->num_wol_proxy_filters = (u16)number; + hw->wol_proxy_vsi_seid = (u16)logical_id; + p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK; + if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK) + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK; + else + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL; + p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: WOL proxy filters = %d\n", + hw->num_wol_proxy_filters); + break; + default: + break; + } + } + + if (p->fcoe) + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); + + /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ + p->fcoe = false; + + /* count the enabled ports (aka the "not disabled" ports) */ + hw->num_ports = 0; + for (i = 0; i < 4; i++) { + u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); + u64 port_cfg = 0; + + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ + i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); + if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) + hw->num_ports++; + } + + /* OCP cards case: if a mezz is removed the ethernet port is at + * disabled state in PRTGEN_CNF register. Additional NVM read is + * needed in order to check if we are dealing with OCP card. + * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting + * physical ports results in wrong partition id calculation and thus + * not supporting WoL. + */ + if (hw->mac.type == I40E_MAC_X722) { + if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) { + status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, + 2 * I40E_SR_OCP_CFG_WORD0, + sizeof(ocp_cfg_word0), + &ocp_cfg_word0, true, NULL); + if (status == I40E_SUCCESS && + (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) + hw->num_ports = 4; + i40e_release_nvm(hw); + } + } + + valid_functions = p->valid_functions; + num_functions = 0; + while (valid_functions) { + if (valid_functions & 1) + num_functions++; + valid_functions >>= 1; + } + + /* partition id is 1-based, and functions are evenly spread + * across the ports as partitions + */ + if (hw->num_ports != 0) { + hw->partition_id = (hw->pf_id / hw->num_ports) + 1; + hw->num_partitions = num_functions / hw->num_ports; + } + + /* additional HW specific goodies that might + * someday be HW version specific + */ + p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; +} + +/** + * i40e_aq_discover_capabilities + * @hw: pointer to the hw struct + * @buff: a virtual buffer to hold the capabilities + * @buff_size: Size of the virtual buffer + * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM + * @list_type_opc: capabilities type to discover - pass in the command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Get the device capabilities descriptions from the firmware + **/ +enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_list_capabilites *cmd; + struct i40e_aq_desc desc; + enum i40e_status_code status = I40E_SUCCESS; + + cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; + + if (list_type_opc != i40e_aqc_opc_list_func_capabilities && + list_type_opc != i40e_aqc_opc_list_dev_capabilities) { + status = I40E_ERR_PARAM; + goto exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + *data_size = LE16_TO_CPU(desc.datalen); + + if (status) + goto exit; + + i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), + list_type_opc); + +exit: + return status; +} + +/** + * i40e_aq_update_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @preservation_flags: Preservation mode flags + * @cmd_details: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands + **/ +enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, u8 preservation_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_update_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_update_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + if (hw->mac.type == I40E_MAC_X722) { + if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + } + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_update_nvm_exit: + return status; +} + +/** + * i40e_aq_rearrange_nvm + * @hw: pointer to the hw struct + * @rearrange_nvm: defines direction of rearrangement + * @cmd_details: pointer to command details structure or NULL + * + * Rearrange NVM structure, available only for transition FW + **/ +enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_nvm_update *cmd; + enum i40e_status_code status; + struct i40e_aq_desc desc; + + DEBUGFUNC("i40e_aq_rearrange_nvm"); + + cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | + I40E_AQ_NVM_REARRANGE_TO_STRUCT); + + if (!rearrange_nvm) { + status = I40E_ERR_PARAM; + goto i40e_aq_rearrange_nvm_exit; + } + + cmd->command_flags |= rearrange_nvm; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_rearrange_nvm_exit: + return status; +} + +/** + * i40e_aq_nvm_progress + * @hw: pointer to the hw struct + * @progress: pointer to progress returned from AQ + * @cmd_details: pointer to command details structure or NULL + * + * Gets progress of flash rearrangement process + **/ +enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + + DEBUGFUNC("i40e_aq_nvm_progress"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + *progress = desc.params.raw[0]; + return status; +} + +/** + * i40e_aq_get_lldp_mib + * @hw: pointer to the hw struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @local_len : length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). + **/ +enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_get_mib *cmd = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + struct i40e_aqc_lldp_get_mib *resp = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + + cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; + cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + + desc.datalen = CPU_TO_LE16(buff_size); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (local_len != NULL) + *local_len = LE16_TO_CPU(resp->local_len); + if (remote_len != NULL) + *remote_len = LE16_TO_CPU(resp->remote_len); + } + + return status; +} + + /** + * i40e_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the hw struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @cmd_details: pointer to command details structure or NULL + * + * Set the LLDP MIB. + **/ +enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_set_local_mib *cmd = + (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_set_local_mib); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->type = mib_type; + cmd->length = CPU_TO_LE16(buff_size); + cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff)); + cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff)); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + return status; +} + +/** + * i40e_aq_cfg_lldp_mib_change_event + * @hw: pointer to the hw struct + * @enable_update: Enable or Disable event posting + * @cmd_details: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes + **/ +enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_update_mib *cmd = + (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); + + if (!enable_update) + cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_restore_lldp + * @hw: pointer to the hw struct + * @setting: pointer to factory setting variable or NULL + * @restore: True if factory settings should be restored + * @cmd_details: pointer to command details structure or NULL + * + * Restore LLDP Agent factory settings if @restore set to True. In other case + * only returns factory setting in AQ response. + **/ +enum i40e_status_code +i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_restore *cmd = + (struct i40e_aqc_lldp_restore *)&desc.params.raw; + enum i40e_status_code status; + + if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Restore LLDP not supported by current FW version.\n"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); + + if (restore) + cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (setting) + *setting = cmd->command & 1; + + return status; +} + +/** + * i40e_aq_stop_lldp + * @hw: pointer to the hw struct + * @shutdown_agent: True if LLDP Agent needs to be Shutdown + * @persist: True if stop of LLDP should be persistent across power cycles + * @cmd_details: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent + **/ +enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop *cmd = + (struct i40e_aqc_lldp_stop *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); + + if (shutdown_agent) + cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; + + if (persist) { + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; + else + i40e_debug(hw, I40E_DEBUG_ALL, + "Persistent Stop LLDP not supported by current FW version.\n"); + } + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_start_lldp + * @hw: pointer to the hw struct + * @persist: True if start of LLDP should be persistent across power cycles + * @cmd_details: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. + **/ +enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_start *cmd = + (struct i40e_aqc_lldp_start *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); + + cmd->command = I40E_AQ_LLDP_AGENT_START; + + if (persist) { + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) + cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; + else + i40e_debug(hw, I40E_DEBUG_ALL, + "Persistent Start LLDP not supported by current FW version.\n"); + } + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_dcb_parameters + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * @dcb_enable: True if DCB configuration needs to be applied + * + **/ +enum i40e_status_code +i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_dcb_parameters *cmd = + (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; + enum i40e_status_code status; + + if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) + return I40E_ERR_DEVICE_NOT_SUPPORTED; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_dcb_parameters); + + if (dcb_enable) { + cmd->valid_flags = I40E_DCB_VALID; + cmd->command = I40E_AQ_DCB_SET_AGENT; + } + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW + * @hw: pointer to the hw struct + * @start_agent: True if DCBx Agent needs to be Started + * False if DCBx Agent needs to be Stopped + * @cmd_details: pointer to command details structure or NULL + * + * Start/Stop the embedded dcbx Agent + **/ +enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop_start_specific_agent *cmd = + (struct i40e_aqc_lldp_stop_start_specific_agent *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_stop_start_spec_agent); + + if (start_agent) + cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add in Host byte order + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + * + * Note: Firmware expects the udp_port value to be in Little Endian format, + * and this function will call CPU_TO_LE16 to convert from Host byte order to + * Little Endian order. + **/ +enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_udp_tunnel *cmd = + (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; + struct i40e_aqc_del_udp_tunnel_completion *resp = + (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + + cmd->udp_port = CPU_TO_LE16(udp_port); + cmd->protocol_type = protocol_index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && filter_index) + *filter_index = resp->index; + + return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_udp_tunnel *cmd = + (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + + cmd->index = index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_switch_resource_alloc (0x0204) + * @hw: pointer to the hw struct + * @num_entries: pointer to u8 to store the number of resource entries returned + * @buf: pointer to a user supplied buffer. This buffer must be large enough + * to store the resource information for all resource types. Each + * resource type is a i40e_aqc_switch_resource_alloc_data structure. + * @count: size, in bytes, of the buffer provided + * @cmd_details: pointer to command details structure or NULL + * + * Query the resources allocated to a function. + **/ +enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_switch_resource_alloc *cmd_resp = + (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; + enum i40e_status_code status; + u16 length = count * sizeof(*buf); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_resource_alloc); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); + + if (!status && num_entries) + *num_entries = cmd_resp->num_entries; + + return status; +} + +/** + * i40e_aq_delete_element - Delete switch element + * @hw: pointer to the hw struct + * @seid: the SEID to delete from the switch + * @cmd_details: pointer to command details structure or NULL + * + * This deletes a switch element from the switch. + **/ +enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *cmd = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + enum i40e_status_code status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); + + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port + * @hw: pointer to the hw struct + * @flags: component flags + * @mac_seid: uplink seid (MAC SEID) + * @vsi_seid: connected vsi seid + * @ret_seid: seid of create pv component + * + * This instantiates an i40e port virtualizer with specified flags. + * Depending on specified flags the port virtualizer can act as a + * 802.1Qbr port virtualizer or a 802.1Qbg S-component. + */ +enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, + u16 mac_seid, u16 vsi_seid, + u16 *ret_seid) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_update_pv *cmd = + (struct i40e_aqc_add_update_pv *)&desc.params.raw; + struct i40e_aqc_add_update_pv_completion *resp = + (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv); + cmd->command_flags = CPU_TO_LE16(flags); + cmd->uplink_seid = CPU_TO_LE16(mac_seid); + cmd->connected_seid = CPU_TO_LE16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + if (!status && ret_seid) + *ret_seid = LE16_TO_CPU(resp->pv_seid); + + return status; +} + +/** + * i40e_aq_add_tag - Add an S/E-tag + * @hw: pointer to the hw struct + * @direct_to_queue: should s-tag direct flow to a specific queue + * @vsi_seid: VSI SEID to use this tag + * @tag: value of the tag + * @queue_num: queue number, only valid is direct_to_queue is true + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This associates an S- or E-tag to a VSI in the switch complex. It returns + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, + u16 vsi_seid, u16 tag, u16 queue_num, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_tag *cmd = + (struct i40e_aqc_add_tag *)&desc.params.raw; + struct i40e_aqc_add_remove_tag_completion *resp = + (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->tag = CPU_TO_LE16(tag); + if (direct_to_queue) { + cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE); + cmd->queue_number = CPU_TO_LE16(queue_num); + } + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_remove_tag - Remove an S- or E-tag + * @hw: pointer to the hw struct + * @vsi_seid: VSI SEID this tag is associated with + * @tag: value of the S-tag to delete + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This deletes an S- or E-tag from a VSI in the switch complex. It returns + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 tag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_tag *cmd = + (struct i40e_aqc_remove_tag *)&desc.params.raw; + struct i40e_aqc_add_remove_tag_completion *resp = + (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->tag = CPU_TO_LE16(tag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_add_mcast_etag - Add a multicast E-tag + * @hw: pointer to the hw struct + * @pv_seid: Port Virtualizer of this SEID to associate E-tag with + * @etag: value of E-tag to add + * @num_tags_in_buf: number of unicast E-tags in indirect buffer + * @buf: address of indirect buffer + * @tags_used: return value, number of E-tags in use by this port + * @tags_free: return value, number of unallocated M-tags + * @cmd_details: pointer to command details structure or NULL + * + * This associates a multicast E-tag to a port virtualizer. It will return + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + * + * The indirect buffer pointed to by buf is a list of 2-byte E-tags, + * num_tags_in_buf long. + **/ +enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid, + u16 etag, u8 num_tags_in_buf, void *buf, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_mcast_etag *cmd = + (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; + struct i40e_aqc_add_remove_mcast_etag_completion *resp = + (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 length = sizeof(u16) * num_tags_in_buf; + + if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0)) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_multicast_etag); + + cmd->pv_seid = CPU_TO_LE16(pv_seid); + cmd->etag = CPU_TO_LE16(etag); + cmd->num_unicast_etags = num_tags_in_buf; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->mcast_etags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->mcast_etags_free); + } + + return status; +} + +/** + * i40e_aq_remove_mcast_etag - Remove a multicast E-tag + * @hw: pointer to the hw struct + * @pv_seid: Port Virtualizer SEID this M-tag is associated with + * @etag: value of the E-tag to remove + * @tags_used: return value, number of tags in use by this port + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This deletes an E-tag from the port virtualizer. It will return + * the number of tags allocated by the port, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid, + u16 etag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_mcast_etag *cmd = + (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; + struct i40e_aqc_add_remove_mcast_etag_completion *resp = + (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; + enum i40e_status_code status; + + + if (pv_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_multicast_etag); + + cmd->pv_seid = CPU_TO_LE16(pv_seid); + cmd->etag = CPU_TO_LE16(etag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->mcast_etags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->mcast_etags_free); + } + + return status; +} + +/** + * i40e_aq_update_tag - Update an S/E-tag + * @hw: pointer to the hw struct + * @vsi_seid: VSI SEID using this S-tag + * @old_tag: old tag value + * @new_tag: new tag value + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This updates the value of the tag currently attached to this VSI + * in the switch complex. It will return the number of tags allocated + * by the PF, and the number of unallocated tags available. + **/ +enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 old_tag, u16 new_tag, u16 *tags_used, + u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_update_tag *cmd = + (struct i40e_aqc_update_tag *)&desc.params.raw; + struct i40e_aqc_update_tag_completion *resp = + (struct i40e_aqc_update_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->old_tag = CPU_TO_LE16(old_tag); + cmd->new_tag = CPU_TO_LE16(new_tag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs + * @hw: pointer to the hw struct + * @tcmap: TC map for request/release any ignore PFC condition + * @request: request or release ignore PFC condition + * @tcmap_ret: return TCs for which PFC is currently ignored + * @cmd_details: pointer to command details structure or NULL + * + * This sends out request/release to ignore PFC condition for a TC. + * It will return the TCs for which PFC is currently ignored. + **/ +enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, + bool request, u8 *tcmap_ret, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pfc_ignore *cmd_resp = + (struct i40e_aqc_pfc_ignore *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc); + + if (request) + cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET; + + cmd_resp->tc_bitmap = tcmap; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tcmap_ret != NULL) + *tcmap_ret = cmd_resp->tc_bitmap; + } + + return status; +} + +/** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * When LLDP is handled in PF this command is used by the PF + * to notify EMP that a DCB setting is modified. + * When LLDP is handled in EMP this command is used by the PF + * to notify EMP whenever one of the following parameters get + * modified: + * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA + * - PCIRTT in PRTDCB_GENC.PCIRTT + * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch. + * @hw: pointer to the hw struct + * @seid: defines the SEID of the switch for which the stats are requested + * @vlan_id: the VLAN ID for which the statistics are requested + * @stat_index: index of the statistics counters block assigned to this VLAN + * @cmd_details: pointer to command details structure or NULL + * + * XL710 supports 128 smonVlanStats counters.This command is used to + * allocate a set of smonVlanStats counters to a specific VLAN in a specific + * switch. + **/ +enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 *stat_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_statistics *cmd_resp = + (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; + enum i40e_status_code status; + + if ((seid == 0) || (stat_index == NULL)) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics); + + cmd_resp->seid = CPU_TO_LE16(seid); + cmd_resp->vlan = CPU_TO_LE16(vlan_id); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stat_index) + *stat_index = LE16_TO_CPU(cmd_resp->stat_index); + + return status; +} + +/** + * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch. + * @hw: pointer to the hw struct + * @seid: defines the SEID of the switch for which the stats are requested + * @vlan_id: the VLAN ID for which the statistics are requested + * @stat_index: index of the statistics counters block assigned to this VLAN + * @cmd_details: pointer to command details structure or NULL + * + * XL710 supports 128 smonVlanStats counters.This command is used to + * deallocate a set of smonVlanStats counters to a specific VLAN in a specific + * switch. + **/ +enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 stat_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_statistics *cmd = + (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; + enum i40e_status_code status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_statistics); + + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan = CPU_TO_LE16(vlan_id); + cmd->stat_index = CPU_TO_LE16(stat_index); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_port_parameters - set physical port parameters. + * @hw: pointer to the hw struct + * @bad_frame_vsi: defines the VSI to which bad frames are forwarded + * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI + * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded + * @double_vlan: if set double VLAN is enabled + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, + u16 bad_frame_vsi, bool save_bad_pac, + bool pad_short_pac, bool double_vlan, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_set_port_parameters *cmd; + enum i40e_status_code status; + struct i40e_aq_desc desc; + u16 command_flags = 0; + + cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_port_parameters); + + cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); + if (save_bad_pac) + command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS; + if (pad_short_pac) + command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS; + if (double_vlan) + command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA; + cmd->command_flags = CPU_TO_LE16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler + * @hw: pointer to the hw struct + * @seid: seid for the physical port/switching component/vsi + * @buff: Indirect buffer to hold data parameters and response + * @buff_size: Indirect buffer size + * @opcode: Tx scheduler AQ command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Generic command handler for Tx scheduler AQ commands + **/ +static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, + void *buff, u16 buff_size, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_tx_sched_ind *cmd = + (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; + enum i40e_status_code status; + bool cmd_param_flag = false; + + switch (opcode) { + case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: + case i40e_aqc_opc_configure_vsi_tc_bw: + case i40e_aqc_opc_enable_switching_comp_ets: + case i40e_aqc_opc_modify_switching_comp_ets: + case i40e_aqc_opc_disable_switching_comp_ets: + case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: + case i40e_aqc_opc_configure_switching_comp_bw_config: + cmd_param_flag = true; + break; + case i40e_aqc_opc_query_vsi_bw_config: + case i40e_aqc_opc_query_vsi_ets_sla_config: + case i40e_aqc_opc_query_switching_comp_ets_config: + case i40e_aqc_opc_query_port_ets_config: + case i40e_aqc_opc_query_switching_comp_bw_config: + cmd_param_flag = false; + break; + default: + return I40E_ERR_PARAM; + } + + i40e_fill_default_direct_cmd_desc(&desc, opcode); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (cmd_param_flag) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->vsi_seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit + * @hw: pointer to the hw struct + * @seid: VSI seid + * @credit: BW limit credits (0 = disabled) + * @max_credit: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_vsi_bw_limit *cmd = + (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_vsi_bw_limit); + + cmd->vsi_seid = CPU_TO_LE16(seid); + cmd->credit = CPU_TO_LE16(credit); + cmd->max_credit = max_credit; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit + * @hw: pointer to the hw struct + * @seid: switching component seid + * @credit: BW limit credits (0 = disabled) + * @max_bw: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_switching_comp_bw_limit *cmd = + (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_switching_comp_bw_limit); + + cmd->seid = CPU_TO_LE16(seid); + cmd->credit = CPU_TO_LE16(credit); + cmd->max_bw = max_bw; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit, + cmd_details); +} + +/** + * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_tc_bw, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @opcode: Tx scheduler AQ command opcode + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, + sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( + struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_ets_bw_limit, + cmd_details); +} + +/** + * i40e_aq_query_vsi_bw_config - Query VSI BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration per TC + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_ets_sla_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's per TC BW config + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI or switching component connected to Physical Port + * @bw_data: Buffer to hold current ETS configuration for the Physical Port + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_port_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_validate_filter_settings + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Check and validate the filter control settings passed. + * The function checks for the valid filter/context sizes being + * passed for FCoE and PE. + * + * Returns I40E_SUCCESS if the values passed are valid and within + * range else returns an error. + **/ +STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + u32 fcoe_cntx_size, fcoe_filt_size; + u32 pe_cntx_size, pe_filt_size; + u32 fcoe_fmax; + + u32 val; + + /* Validate FCoE settings passed */ + switch (settings->fcoe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + fcoe_filt_size <<= (u32)settings->fcoe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->fcoe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* Validate PE settings passed */ + switch (settings->pe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + case I40E_HASH_FILTER_SIZE_64K: + case I40E_HASH_FILTER_SIZE_128K: + case I40E_HASH_FILTER_SIZE_256K: + case I40E_HASH_FILTER_SIZE_512K: + case I40E_HASH_FILTER_SIZE_1M: + pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + pe_filt_size <<= (u32)settings->pe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->pe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + case I40E_DMA_CNTX_SIZE_8K: + case I40E_DMA_CNTX_SIZE_16K: + case I40E_DMA_CNTX_SIZE_32K: + case I40E_DMA_CNTX_SIZE_64K: + case I40E_DMA_CNTX_SIZE_128K: + case I40E_DMA_CNTX_SIZE_256K: + pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + pe_cntx_size <<= (u32)settings->pe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ + val = rd32(hw, I40E_GLHMC_FCOEFMAX); + fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) + >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; + if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) + return I40E_ERR_INVALID_SIZE; + + return I40E_SUCCESS; +} + +/** + * i40e_set_filter_control + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Set the Queue Filters for PE/FCoE and enable filters required + * for a single PF. It is expected that these settings are programmed + * at the driver initialization time. + **/ +enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 hash_lut_size = 0; + u32 val; + + if (!settings) + return I40E_ERR_PARAM; + + /* Validate the input settings */ + ret = i40e_validate_filter_settings(hw, settings); + if (ret) + return ret; + + /* Read the PF Queue Filter control register */ + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); + + /* Program required PE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; + val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEHSIZE_MASK; + /* Program required PE contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; + val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEDSIZE_MASK; + + /* Program required FCoE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + val |= ((u32)settings->fcoe_filt_num << + I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + /* Program required FCoE DDP contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + val |= ((u32)settings->fcoe_cntx_num << + I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + + /* Program Hash LUT size for the PF */ + val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) + hash_lut_size = 1; + val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & + I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + + /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ + if (settings->enable_fdir) + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + if (settings->enable_ethtype) + val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; + if (settings->enable_macvlan) + val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; + + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); + + return I40E_SUCCESS; +} + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_control_packet_filter *cmd = + (struct i40e_aqc_add_remove_control_packet_filter *) + &desc.params.raw; + struct i40e_aqc_add_remove_control_packet_filter_completion *resp = + (struct i40e_aqc_add_remove_control_packet_filter_completion *) + &desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + if (is_add) { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_control_packet_filter); + cmd->queue = CPU_TO_LE16(queue); + } else { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_control_packet_filter); + } + + if (mac_addr) + i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN, + I40E_NONDMA_TO_NONDMA); + + cmd->etype = CPU_TO_LE16(ethtype); + cmd->flags = CPU_TO_LE16(flags); + cmd->seid = CPU_TO_LE16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stats) { + stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); + stats->etype_used = LE16_TO_CPU(resp->etype_used); + stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); + stats->etype_free = LE16_TO_CPU(resp->etype_free); + } + + return status; +} + +/** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + enum i40e_status_code status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** + * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue + * @filters: list of cloud filters + * @filter_count: length of list + * + * There's an issue in the device where the Geneve VNI layout needs + * to be shifted 1 byte over from the VxLAN VNI + **/ +STATIC void i40e_fix_up_geneve_vni( + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aqc_cloud_filters_element_data *f = filters; + int i; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(f[i].flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(f[i].tenant_id); + f[i].tenant_id = CPU_TO_LE32(ti << 8); + } + } +} + +/** + * i40e_aq_add_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled + * in by the caller of the function. + * + **/ +enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + i40e_fix_up_geneve_vni(filters, filter_count); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_add_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters in big buffer to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * the function. + * + **/ +enum i40e_status_code +i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(filters[i].element.tenant_id); + filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled in by the caller + * of the function. + * + **/ +enum i40e_status_code +i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + i40e_fix_up_geneve_vni(filters, filter_count); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters in big buffer to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the big buffer cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * function. + * + **/ +enum i40e_status_code +i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (LE16_TO_CPU(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = LE32_TO_CPU(filters[i].element.tenant_id); + filters[i].element.tenant_id = CPU_TO_LE32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_replace_cloud_filters - Replace cloud filter command + * @hw: pointer to the hw struct + * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct + * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct + * + **/ +enum +i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw, + struct i40e_aqc_replace_cloud_filters_cmd *filters, + struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_replace_cloud_filters_cmd *cmd = + (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw; + enum i40e_status_code status = I40E_SUCCESS; + int i = 0; + + /* X722 doesn't support this command */ + if (hw->mac.type == I40E_MAC_X722) + return I40E_ERR_DEVICE_NOT_SUPPORTED; + + /* need FW version greater than 6.00 */ + if (hw->aq.fw_maj_ver < 6) + return I40E_NOT_SUPPORTED; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_replace_cloud_filters); + + desc.datalen = CPU_TO_LE16(32); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->old_filter_type = filters->old_filter_type; + cmd->new_filter_type = filters->new_filter_type; + cmd->valid_flags = filters->valid_flags; + cmd->tr_bit = filters->tr_bit; + cmd->tr_bit2 = filters->tr_bit2; + + status = i40e_asq_send_command(hw, &desc, cmd_buf, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL); + + /* for get cloud filters command */ + for (i = 0; i < 32; i += 4) { + cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i]; + cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1]; + cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2]; + cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3]; + } + + return status; +} + + +/** + * i40e_aq_alternate_write + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: value to be written under 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: value to be written under 'reg_addr1' + * + * Write one or two dwords to alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. + * + **/ +enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, + u32 reg_addr0, u32 reg_val0, + u32 reg_addr1, u32 reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write); + cmd_resp->address0 = CPU_TO_LE32(reg_addr0); + cmd_resp->address1 = CPU_TO_LE32(reg_addr1); + cmd_resp->data0 = CPU_TO_LE32(reg_val0); + cmd_resp->data1 = CPU_TO_LE32(reg_val1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_alternate_write_indirect + * @hw: pointer to the hardware structure + * @addr: address of a first register to be modified + * @dw_count: number of alternate structure fields to write + * @buffer: pointer to the command buffer + * + * Write 'dw_count' dwords from 'buffer' to alternate structure + * starting at 'addr'. + * + **/ +enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_ind_write *cmd_resp = + (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; + enum i40e_status_code status; + + if (buffer == NULL) + return I40E_ERR_PARAM; + + /* Indirect command */ + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_write_indirect); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (dw_count > (I40E_AQ_LARGE_BUF/4)) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd_resp->address = CPU_TO_LE32(addr); + cmd_resp->length = CPU_TO_LE32(dw_count); + + status = i40e_asq_send_command(hw, &desc, buffer, + I40E_LO_DWORD(4*dw_count), NULL); + + return status; +} + +/** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val0 == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = CPU_TO_LE32(reg_addr0); + cmd_resp->address1 = CPU_TO_LE32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (status == I40E_SUCCESS) { + *reg_val0 = LE32_TO_CPU(cmd_resp->data0); + + if (reg_val1 != NULL) + *reg_val1 = LE32_TO_CPU(cmd_resp->data1); + } + + return status; +} + +/** + * i40e_aq_alternate_read_indirect + * @hw: pointer to the hardware structure + * @addr: address of the alternate structure field + * @dw_count: number of alternate structure fields to read + * @buffer: pointer to the command buffer + * + * Read 'dw_count' dwords from alternate structure starting at 'addr' and + * place them in 'buffer'. The buffer should be allocated by caller. + * + **/ +enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_ind_write *cmd_resp = + (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; + enum i40e_status_code status; + + if (buffer == NULL) + return I40E_ERR_PARAM; + + /* Indirect command */ + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_read_indirect); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (dw_count > (I40E_AQ_LARGE_BUF/4)) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd_resp->address = CPU_TO_LE32(addr); + cmd_resp->length = CPU_TO_LE32(dw_count); + + status = i40e_asq_send_command(hw, &desc, buffer, + I40E_LO_DWORD(4*dw_count), NULL); + + return status; +} + +/** + * i40e_aq_alternate_clear + * @hw: pointer to the HW structure. + * + * Clear the alternate structures of the port from which the function + * is called. + * + **/ +enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_clear_port); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_alternate_write_done + * @hw: pointer to the HW structure. + * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS + * @reset_needed: indicates the SW should trigger GLOBAL reset + * + * Indicates to the FW that alternate structures have been changed. + * + **/ +enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, + u8 bios_mode, bool *reset_needed) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write_done *cmd = + (struct i40e_aqc_alternate_write_done *)&desc.params.raw; + enum i40e_status_code status; + + if (reset_needed == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_write_done); + + cmd->cmd_flags = CPU_TO_LE16(bios_mode); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + if (!status && reset_needed) + *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) & + I40E_AQ_ALTERNATE_RESET_NEEDED) != 0); + + return status; +} + +/** + * i40e_aq_set_oem_mode + * @hw: pointer to the HW structure. + * @oem_mode: the OEM mode to be used + * + * Sets the device to a specific operating mode. Currently the only supported + * mode is no_clp, which causes FW to refrain from using Alternate RAM. + * + **/ +enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, + u8 oem_mode) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write_done *cmd = + (struct i40e_aqc_alternate_write_done *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_set_mode); + + cmd->cmd_flags = CPU_TO_LE16(oem_mode); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ + hw->bus.type = i40e_bus_type_pci_express; + + switch (link_status & I40E_PCI_LINK_WIDTH) { + case I40E_PCI_LINK_WIDTH_1: + hw->bus.width = i40e_bus_width_pcie_x1; + break; + case I40E_PCI_LINK_WIDTH_2: + hw->bus.width = i40e_bus_width_pcie_x2; + break; + case I40E_PCI_LINK_WIDTH_4: + hw->bus.width = i40e_bus_width_pcie_x4; + break; + case I40E_PCI_LINK_WIDTH_8: + hw->bus.width = i40e_bus_width_pcie_x8; + break; + default: + hw->bus.width = i40e_bus_width_unknown; + break; + } + + switch (link_status & I40E_PCI_LINK_SPEED) { + case I40E_PCI_LINK_SPEED_2500: + hw->bus.speed = i40e_bus_speed_2500; + break; + case I40E_PCI_LINK_SPEED_5000: + hw->bus.speed = i40e_bus_speed_5000; + break; + case I40E_PCI_LINK_SPEED_8000: + hw->bus.speed = i40e_bus_speed_8000; + break; + default: + hw->bus.speed = i40e_bus_speed_unknown; + break; + } +} + +/** + * i40e_aq_debug_dump + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table id within cluster + * @start_index: index of line in the block to read + * @buff_size: dump buffer size + * @buff: dump buffer + * @ret_buff_size: actual buffer size returned + * @ret_next_table: next block to read + * @ret_next_index: next index to read + * @cmd_details: pointer to command details structure or NULL + * + * Dump internal FW/HW data for debug purposes. + * + **/ +enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_dump_internals *cmd = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + struct i40e_aqc_debug_dump_internals *resp = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_debug_dump_internals); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->cluster_id = cluster_id; + cmd->table_id = table_id; + cmd->idx = CPU_TO_LE32(start_index); + + desc.datalen = CPU_TO_LE16(buff_size); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (ret_buff_size != NULL) + *ret_buff_size = LE16_TO_CPU(desc.datalen); + if (ret_next_table != NULL) + *ret_next_table = resp->table_id; + if (ret_next_index != NULL) + *ret_next_index = LE32_TO_CPU(resp->idx); + } + + return status; +} + + +/** + * i40e_enable_eee + * @hw: pointer to the hardware structure + * @enable: state of Energy Efficient Ethernet mode to be set + * + * Enables or disables Energy Efficient Ethernet (EEE) mode + * accordingly to @enable parameter. + **/ +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + __le16 eee_capability; + + /* Get initial PHY capabilities */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (status) + goto err; + + /* Check whether NIC configuration is compatible with Energy Efficient + * Ethernet (EEE) mode. + */ + if (abilities.eee_capability == 0) { + status = I40E_ERR_CONFIG; + goto err; + } + + /* Cache initial EEE capability */ + eee_capability = abilities.eee_capability; + + /* Get current configuration */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) + goto err; + + /* Cache current configuration */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + /* Set desired EEE state */ + if (enable) { + config.eee_capability = eee_capability; + config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } else { + config.eee_capability = 0; + config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK; + } + + /* Save modified config */ + status = i40e_aq_set_phy_config(hw, &config, NULL); +err: + return status; +} + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + enum i40e_status_code status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + desc.datalen = CPU_TO_LE16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); + + return status; +} + +/** + * i40e_read_phy_register_clause22 + * @hw: pointer to the HW structure + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 *value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + u32 command = 0; + u16 retry = 1000; + + command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | + (I40E_MDIO_CLAUSE22_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + } else { + command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); + *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> + I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + } + + return status; +} + +/** + * i40e_write_phy_register_clause22 + * @hw: pointer to the HW structure + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes specified PHY register value + **/ +enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + u32 command = 0; + u16 retry = 1000; + + command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; + wr32(hw, I40E_GLGEN_MSRWD(port_num), command); + + command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | + (I40E_MDIO_CLAUSE22_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK); + + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + return status; +} + +/** + * i40e_read_phy_register_clause45 + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | + (I40E_MDIO_CLAUSE45_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_read_end; + } + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | + (I40E_MDIO_CLAUSE45_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + if (!status) { + command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); + *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> + I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + } else { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't read register value from external PHY.\n"); + } + +phy_read_end: + return status; +} + +/** + * i40e_write_phy_register_clause45 + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes value to specified PHY register + **/ +enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | + (I40E_MDIO_CLAUSE45_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_write_end; + } + + command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; + wr32(hw, I40E_GLGEN_MSRWD(port_num), command); + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | + (I40E_MDIO_CLAUSE45_STCODE_MASK) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + +phy_write_end: + return status; +} + +/** + * i40e_write_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes value to specified PHY register + **/ +enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value) +{ + enum i40e_status_code status; + + switch (hw->device_id) { + case I40E_DEV_ID_1G_BASE_T_X722: + status = i40e_write_phy_register_clause22(hw, + reg, phy_addr, value); + break; + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: + case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_25G_B: + case I40E_DEV_ID_25G_SFP28: + status = i40e_write_phy_register_clause45(hw, + page, reg, phy_addr, value); + break; + default: + status = I40E_ERR_UNKNOWN_PHY; + break; + } + + return status; +} + +/** + * i40e_read_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_addr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value) +{ + enum i40e_status_code status; + + switch (hw->device_id) { + case I40E_DEV_ID_1G_BASE_T_X722: + status = i40e_read_phy_register_clause22(hw, reg, phy_addr, + value); + break; + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_5G_BASE_T_BC: + case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_25G_B: + case I40E_DEV_ID_25G_SFP28: + status = i40e_read_phy_register_clause45(hw, page, reg, + phy_addr, value); + break; + default: + status = I40E_ERR_UNKNOWN_PHY; + break; + } + + return status; +} + +/** + * i40e_get_phy_address + * @hw: pointer to the HW structure + * @dev_num: PHY port num that address we want + * + * Gets PHY address for current port + **/ +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) +{ + u8 port_num = (u8)hw->func_caps.mdio_port_num; + u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); + + return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; +} + +/** + * i40e_blink_phy_led + * @hw: pointer to the HW structure + * @time: time how long led will blinks in secs + * @interval: gap between LED on and off in msecs + * + * Blinks PHY link LED + **/ +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval) +{ + enum i40e_status_code status = I40E_SUCCESS; + u32 i; + u16 led_ctl = 0; + u16 gpio_led_port; + u16 led_reg; + u16 led_addr = I40E_PHY_LED_PROV_REG_1; + u8 phy_addr = 0; + u8 port_num; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + led_addr++) { + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + &led_reg); + if (status) + goto phy_blinking_end; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto phy_blinking_end; + break; + } + } + + if (time > 0 && interval > 0) { + for (i = 0; i < time * 1000; i += interval) { + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto restore_config; + if (led_reg & I40E_PHY_LED_MANUAL_ON) + led_reg = 0; + else + led_reg = I40E_PHY_LED_MANUAL_ON; + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + goto restore_config; + i40e_msec_delay(interval); + } + } + +restore_config: + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_ctl); + +phy_blinking_end: + return status; +} + +/** + * i40e_led_get_reg - read LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: read register value + **/ +enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, + u32 *reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + + *reg_val = 0; + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, true, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + phy_addr = i40e_get_phy_address(hw, hw->port); + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16 *)reg_val); + } + return status; +} + +/** + * i40e_led_set_reg - write LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: register value to write + **/ +enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, + u32 reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = i40e_aq_set_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, true, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + phy_addr = i40e_get_phy_address(hw, hw->port); + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16)reg_val); + } + + return status; +} + +/** + * i40e_led_get_phy - return current on/off mode + * @hw: pointer to the hw struct + * @led_addr: address of led register to use + * @val: original value of register to use + * + **/ +enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val) +{ + enum i40e_status_code status = I40E_SUCCESS; + u16 gpio_led_port; + u32 reg_val_aq; + u16 temp_addr; + u8 phy_addr = 0; + u16 reg_val; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, true, + I40E_PHY_LED_PROV_REG_1, + ®_val_aq, NULL); + if (status == I40E_SUCCESS) + *val = (u16)reg_val_aq; + return status; + } + temp_addr = I40E_PHY_LED_PROV_REG_1; + phy_addr = i40e_get_phy_address(hw, hw->port); + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + temp_addr++) { + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + temp_addr, phy_addr, + ®_val); + if (status) + return status; + *val = reg_val; + if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { + *led_addr = temp_addr; + break; + } + } + return status; +} + +/** + * i40e_led_set_phy + * @hw: pointer to the HW structure + * @on: true or false + * @led_addr: address of led register to use + * @mode: original val plus bit for set or ignore + * + * Set led's on or off when controlled by the PHY + * + **/ +enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode) +{ + enum i40e_status_code status = I40E_SUCCESS; + u32 led_ctl = 0; + u32 led_reg = 0; + + status = i40e_led_get_reg(hw, led_addr, &led_reg); + if (status) + return status; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_led_set_reg(hw, led_addr, led_reg); + if (status) + return status; + } + status = i40e_led_get_reg(hw, led_addr, &led_reg); + if (status) + goto restore_config; + if (on) + led_reg = I40E_PHY_LED_MANUAL_ON; + else + led_reg = 0; + status = i40e_led_set_reg(hw, led_addr, led_reg); + if (status) + goto restore_config; + if (mode & I40E_PHY_LED_MODE_ORIG) { + led_ctl = (mode & I40E_PHY_LED_MODE_MASK); + status = i40e_led_set_reg(hw, led_addr, led_ctl); + } + return status; + +restore_config: + status = i40e_led_set_reg(hw, led_addr, led_ctl); + return status; +} +#endif /* PF_DRIVER */ +/** + * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * + * Read LPI state directly from external PHY register or from MAC + * register, depending on device ID and current link speed. + */ +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 val; + + stat->rx_lpi_status = 0; + stat->tx_lpi_status = 0; + + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB || + hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) { + ret = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_BCM_PHY_PCS_STATUS1_PAGE, + true, + I40E_BCM_PHY_PCS_STATUS1_REG, + &val, NULL); + + if (ret != I40E_SUCCESS) + return ret; + + stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI); + stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI); + + return ret; + } + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + + return ret; +} + +/** + * i40e_get_lpi_counters - read LPI counters from EEE statistics + * @hw: pointer to the hw struct + * @tx_counter: pointer to memory for TX LPI counter + * @rx_counter: pointer to memory for RX LPI counter + * @is_clear: returns true if counters are clear after read + * + * Read Low Power Idle (LPI) mode counters from Energy Efficient + * Ethernet (EEE) statistics. + **/ +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, + u32 *tx_counter, u32 *rx_counter, + bool *is_clear) +{ + /* only X710-T*L requires special handling of counters + * for other devices we just read the MAC registers + */ + if ((hw->device_id == I40E_DEV_ID_10G_BASE_T_BC || + hw->device_id == I40E_DEV_ID_5G_BASE_T_BC) && + hw->phy.link_info.link_speed != I40E_LINK_SPEED_1GB) { + enum i40e_status_code retval; + u32 cmd_status = 0; + + *is_clear = false; + retval = i40e_aq_run_phy_activity(hw, + I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT, + &cmd_status, tx_counter, rx_counter, NULL); + + if (cmd_status != I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + retval = I40E_ERR_ADMIN_QUEUE_ERROR; + + return retval; + } + + *is_clear = true; + *tx_counter = rd32(hw, I40E_PRTPM_TLPIC); + *rx_counter = rd32(hw, I40E_PRTPM_RLPIC); + + return I40E_SUCCESS; +} + +/** + * i40e_get_lpi_duration - read LPI time duration from EEE statistics + * @hw: pointer to the hw struct + * @stat: pointer to structure with status of rx and tx lpi + * @tx_duration: pointer to memory for TX LPI time duration + * @rx_duration: pointer to memory for RX LPI time duration + * + * Read Low Power Idle (LPI) mode time duration from Energy Efficient + * Ethernet (EEE) statistics. + */ +enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration) +{ + u32 tx_time_dur, rx_time_dur; + enum i40e_status_code retval; + u32 cmd_status; + + if (hw->device_id != I40E_DEV_ID_10G_BASE_T_BC && + hw->device_id != I40E_DEV_ID_5G_BASE_T_BC) + return I40E_ERR_NOT_IMPLEMENTED; + + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_DUR, + &cmd_status, &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + + if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB && + !tx_time_dur && !rx_time_dur && + stat->tx_lpi_status && stat->rx_lpi_status) { + retval = i40e_aq_run_phy_activity + (hw, I40E_AQ_RUN_PHY_ACT_ID_USR_DFND, + I40E_AQ_RUN_PHY_ACT_DNL_OPCODE_GET_EEE_STAT_DUR, + &cmd_status, + &tx_time_dur, &rx_time_dur, NULL); + + if (retval) + return retval; + if ((cmd_status & I40E_AQ_RUN_PHY_ACT_CMD_STAT_MASK) != + I40E_AQ_RUN_PHY_ACT_CMD_STAT_SUCC) + return I40E_ERR_ADMIN_QUEUE_ERROR; + tx_time_dur = 0; + rx_time_dur = 0; + } + + *tx_duration = tx_time_dur; + *rx_duration = rx_time_dur; + + return retval; +} + +/** + * i40e_lpi_stat_update - update LPI counters with values relative to offset + * @hw: pointer to the hw struct + * @offset_loaded: flag indicating need of writing current value to offset + * @tx_offset: pointer to offset of TX LPI counter + * @tx_stat: pointer to value of TX LPI counter + * @rx_offset: pointer to offset of RX LPI counter + * @rx_stat: pointer to value of RX LPI counter + * + * Update Low Power Idle (LPI) mode counters while having regard to passed + * offsets. + **/ +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat) +{ + enum i40e_status_code retval; + u32 tx_counter, rx_counter; + bool is_clear; + + retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter, &is_clear); + if (retval) + goto err; + + if (is_clear) { + *tx_stat += tx_counter; + *rx_stat += rx_counter; + } else { + if (!offset_loaded) { + *tx_offset = tx_counter; + *rx_offset = rx_counter; + } + + *tx_stat = (tx_counter >= *tx_offset) ? + (u32)(tx_counter - *tx_offset) : + (u32)((tx_counter + BIT_ULL(32)) - *tx_offset); + *rx_stat = (rx_counter >= *rx_offset) ? + (u32)(rx_counter - *rx_offset) : + (u32)((rx_counter + BIT_ULL(32)) - *rx_offset); + } +err: + return retval; +} + +/** + * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = CPU_TO_LE32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) + *reg_val = LE32_TO_CPU(cmd_resp->value); + + return status; +} + +/** + * i40e_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + enum i40e_status_code status = I40E_SUCCESS; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + i40e_msec_delay(1); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40e_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value = CPU_TO_LE32(reg_val); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + enum i40e_status_code status = I40E_SUCCESS; + bool use_register; + int retry = 5; + + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + i40e_msec_delay(1); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} + +/** + * i40e_mdio_if_number_selection - MDIO I/F number selection + * @hw: pointer to the hw struct + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number + * @cmd: pointer to PHY Register command structure + **/ +static void +i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num, + struct i40e_aqc_phy_register_access *cmd) +{ + if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) + cmd->cmd_flags |= + I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | + ((mdio_num << + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & + I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); + else + i40e_debug(hw, I40E_DEBUG_PHY, + "MDIO I/F number selection not supported by current FW version.\n"); + } +} + +/** + * i40e_aq_set_phy_register_ext + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @page_change: enable auto page change + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number + * @reg_addr: PHY register address + * @reg_val: new register value + * @cmd_details: pointer to command details structure or NULL + * + * Write the external PHY register. + * NOTE: In common cases MDIO I/F number should not be changed, thats why you + * may use simple wrapper i40e_aq_set_phy_register. + **/ +enum i40e_status_code +i40e_aq_set_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_addres = dev_addr; + cmd->reg_address = CPU_TO_LE32(reg_addr); + cmd->reg_value = CPU_TO_LE32(reg_val); + + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + + i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_phy_register_ext + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @page_change: enable auto page change + * @set_mdio: use MDIO I/F number specified by mdio_num + * @mdio_num: MDIO I/F number + * @reg_addr: PHY register address + * @reg_val: read register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the external PHY register. + * NOTE: In common cases MDIO I/F number should not be changed, thats why you + * may use simple wrapper i40e_aq_get_phy_register. + **/ +enum i40e_status_code +i40e_aq_get_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_addres = dev_addr; + cmd->reg_address = CPU_TO_LE32(reg_addr); + + if (!page_change) + cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; + + i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (!status) + *reg_val = LE32_TO_CPU(cmd->reg_value); + + return status; +} + +/** + * i40e_aq_run_phy_activity + * @hw: pointer to the hw struct + * @activity_id: ID of DNL activity to run + * @dnl_opcode: opcode passed to DNL script + * @cmd_status: pointer to memory to write return value of DNL script + * @data0: pointer to memory for first 4 bytes of data returned by DNL script + * @data1: pointer to memory for last 4 bytes of data returned by DNL script + * @cmd_details: pointer to command details structure or NULL + * + * Run DNL admin command. + **/ +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_run_phy_activity *cmd; + enum i40e_status_code retval; + struct i40e_aq_desc desc; + + cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw; + + if (!cmd_status || !data0 || !data1) { + retval = I40E_ERR_PARAM; + goto err; + } + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_run_phy_activity); + + cmd->activity_id = CPU_TO_LE16(activity_id); + cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode); + + retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (retval) + goto err; + + *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status); + *data0 = LE32_TO_CPU(cmd->params.resp.data0); + *data1 = LE32_TO_CPU(cmd->params.resp.data1); +err: + return retval; +} + +#ifdef VF_DRIVER + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for + * completion before returning. + **/ +enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + enum i40e_status_code v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_asq_cmd_details details; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + } + if (!cmd_details) { + i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); + details.async = true; + cmd_details = &details; + } + status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, + msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.iwarp = (msg->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { + i40e_memcpy(hw->mac.perm_addr, + vsi_res->default_mac_addr, + ETH_ALEN, + I40E_NONDMA_TO_NONDMA); + i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, + ETH_ALEN, + I40E_NONDMA_TO_NONDMA); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, + I40E_SUCCESS, NULL, 0, NULL); +} +#endif /* VF_DRIVER */ + +/** + * i40e_aq_set_arp_proxy_config + * @hw: pointer to the HW structure + * @proxy_config: pointer to proxy config command table struct + * @cmd_details: pointer to command details + * + * Set ARP offload parameters from pre-populated + * i40e_aqc_arp_proxy_data struct + **/ +enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (!proxy_config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + desc.params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config)); + desc.params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config)); + desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data)); + + status = i40e_asq_send_command(hw, &desc, proxy_config, + sizeof(struct i40e_aqc_arp_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_opc_set_ns_proxy_table_entry + * @hw: pointer to the HW structure + * @ns_proxy_table_entry: pointer to NS table entry command struct + * @cmd_details: pointer to command details + * + * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters + * from pre-populated i40e_aqc_ns_proxy_data struct + **/ +enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (!ns_proxy_table_entry) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_ns_proxy_table_entry); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + desc.params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry)); + desc.params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry)); + desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data)); + + status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry, + sizeof(struct i40e_aqc_ns_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_set_clear_wol_filter + * @hw: pointer to the hw struct + * @filter_index: index of filter to modify (0-7) + * @filter: buffer containing filter to be set + * @set_filter: true to set filter, false to clear filter + * @no_wol_tco: if true, pass through packets cannot cause wake-up + * if false, pass through packets may cause wake-up + * @filter_valid: true if filter action is valid + * @no_wol_tco_valid: true if no WoL in TCO traffic action valid + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear WoL filter for port attached to the PF + **/ +enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_wol_filter *cmd = + (struct i40e_aqc_set_wol_filter *)&desc.params.raw; + enum i40e_status_code status; + u16 cmd_flags = 0; + u16 valid_flags = 0; + u16 buff_len = 0; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter); + + if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS) + return I40E_ERR_PARAM; + cmd->filter_index = CPU_TO_LE16(filter_index); + + if (set_filter) { + if (!filter) + return I40E_ERR_PARAM; + + cmd_flags |= I40E_AQC_SET_WOL_FILTER; + cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR; + } + + if (no_wol_tco) + cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL; + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + + if (filter_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID; + if (no_wol_tco_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID; + cmd->valid_flags = CPU_TO_LE16(valid_flags); + + buff_len = sizeof(*filter); + desc.datalen = CPU_TO_LE16(buff_len); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter)); + cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter)); + + status = i40e_asq_send_command(hw, &desc, filter, + buff_len, cmd_details); + + return status; +} + +/** + * i40e_aq_get_wake_event_reason + * @hw: pointer to the hw struct + * @wake_reason: return value, index of matching filter + * @cmd_details: pointer to command details structure or NULL + * + * Get information for the reason of a Wake Up event + **/ +enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_wake_reason_completion *resp = + (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) + *wake_reason = LE16_TO_CPU(resp->wake_reason); + + return status; +} + +/** +* i40e_aq_clear_all_wol_filters +* @hw: pointer to the hw struct +* @cmd_details: pointer to command details structure or NULL +* +* Get information for the reason of a Wake Up event +**/ +enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_all_wol_filters); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_write_ddp - Write dynamic device personalization (ddp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @track_id: package tracking id + * @error_offset: returns error offset + * @error_info: returns error information + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_write_personalization_profile *cmd = + (struct i40e_aqc_write_personalization_profile *) + &desc.params.raw; + struct i40e_aqc_write_ddp_resp *resp; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_write_personalization_profile); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->profile_track_id = CPU_TO_LE32(track_id); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @flags: AdminQ command flags + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_applied_profiles *cmd = + (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_personalization_profile_list); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->flags = flags; + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_find_segment_in_package + * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + **/ +struct i40e_generic_seg_header * +i40e_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_hdr) +{ + struct i40e_generic_seg_header *segment; + u32 i; + + /* Search all package segments for the requested segment type */ + for (i = 0; i < pkg_hdr->segment_count; i++) { + segment = + (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + + pkg_hdr->segment_offset[i]); + + if (segment->type == segment_type) + return segment; + } + + return NULL; +} + +/* Get section table in profile */ +#define I40E_SECTION_TABLE(profile, sec_tbl) \ + do { \ + struct i40e_profile_segment *p = (profile); \ + u32 count; \ + u32 *nvm; \ + count = p->device_table_count; \ + nvm = (u32 *)&p->device_table[count]; \ + sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ + } while (0) + +/* Get section header in profile */ +#define I40E_SECTION_HEADER(profile, offset) \ + (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) + +/** + * i40e_find_section_in_profile + * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) + * @profile: pointer to the i40e segment header to be searched + * + * This function searches i40e segment for a particular section type. On + * success it returns a pointer to the section header, otherwise it will + * return NULL. + **/ +struct i40e_profile_section_header * +i40e_find_section_in_profile(u32 section_type, + struct i40e_profile_segment *profile) +{ + struct i40e_profile_section_header *sec; + struct i40e_section_table *sec_tbl; + u32 sec_off; + u32 i; + + if (profile->header.type != SEGMENT_TYPE_I40E) + return NULL; + + I40E_SECTION_TABLE(profile, sec_tbl); + + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + if (sec->section.type == section_type) + return sec; + } + + return NULL; +} + +/** + * i40e_ddp_exec_aq_section - Execute generic AQ for DDP + * @hw: pointer to the hw struct + * @aq: command buffer containing all data to execute AQ + **/ +STATIC enum +i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, + struct i40e_profile_aq_section *aq) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + u8 *msg = NULL; + u16 msglen; + + i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); + desc.flags |= CPU_TO_LE16(aq->flags); + i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw), + I40E_NONDMA_TO_NONDMA); + + msglen = aq->datalen; + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + msg = &aq->data[0]; + } + + status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); + + if (status != I40E_SUCCESS) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "unable to exec DDP AQ opcode %u, error %d\n", + aq->opcode, status); + return status; + } + + /* copy returned desc to aq_buf */ + i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw), + I40E_NONDMA_TO_NONDMA); + + return I40E_SUCCESS; +} + +/** + * i40e_validate_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be validated + * @track_id: package tracking id + * @rollback: flag if the profile is for rollback. + * + * Validates supported devices and profile's sections. + */ +STATIC enum i40e_status_code +i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id, bool rollback) +{ + struct i40e_profile_section_header *sec = NULL; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_section_table *sec_tbl; + u32 vendor_dev_id; + u32 dev_cnt; + u32 sec_off; + u32 i; + + if (track_id == I40E_DDP_TRACKID_INVALID) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); + return I40E_NOT_SUPPORTED; + } + + dev_cnt = profile->device_table_count; + for (i = 0; i < dev_cnt; i++) { + vendor_dev_id = profile->device_table[i].vendor_dev_id; + if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID && + hw->device_id == (vendor_dev_id & 0xFFFF)) + break; + } + if (dev_cnt && (i == dev_cnt)) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Device doesn't support DDP\n"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + I40E_SECTION_TABLE(profile, sec_tbl); + + /* Validate sections types */ + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + if (rollback) { + if (sec->section.type == SECTION_TYPE_MMIO || + sec->section.type == SECTION_TYPE_AQ || + sec->section.type == SECTION_TYPE_RB_AQ) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Not a roll-back package\n"); + return I40E_NOT_SUPPORTED; + } + } else { + if (sec->section.type == SECTION_TYPE_RB_AQ || + sec->section.type == SECTION_TYPE_RB_MMIO) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Not an original package\n"); + return I40E_NOT_SUPPORTED; + } + } + } + + return status; +} + +/** + * i40e_write_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be downloaded + * @track_id: package tracking id + * + * Handles the download of a complete package. + */ +enum i40e_status_code +i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_section_table *sec_tbl; + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_aq_section *ddp_aq; + u32 section_size = 0; + u32 offset = 0, info = 0; + u32 sec_off; + u32 i; + + status = i40e_validate_profile(hw, profile, track_id, false); + if (status) + return status; + + I40E_SECTION_TABLE(profile, sec_tbl); + + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + /* Process generic admin command */ + if (sec->section.type == SECTION_TYPE_AQ) { + ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; + status = i40e_ddp_exec_aq_section(hw, ddp_aq); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to execute aq: section %d, opcode %u\n", + i, ddp_aq->opcode); + break; + } + sec->section.type = SECTION_TYPE_RB_AQ; + } + + /* Skip any non-mmio sections */ + if (sec->section.type != SECTION_TYPE_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write MMIO section */ + status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: section %d, offset %d, info %d\n", + i, offset, info); + break; + } + } + return status; +} + +/** + * i40e_rollback_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be removed + * @track_id: package tracking id + * + * Rolls back previously loaded package. + */ +enum i40e_status_code +i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + struct i40e_profile_section_header *sec = NULL; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_section_table *sec_tbl; + u32 offset = 0, info = 0; + u32 section_size = 0; + u32 sec_off; + int i; + + status = i40e_validate_profile(hw, profile, track_id, true); + if (status) + return status; + + I40E_SECTION_TABLE(profile, sec_tbl); + + /* For rollback write sections in reverse */ + for (i = sec_tbl->section_count - 1; i >= 0; i--) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + + /* Skip any non-rollback sections */ + if (sec->section.type != SECTION_TYPE_RB_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write roll-back MMIO section */ + status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: section %d, offset %d, info %d\n", + i, offset, info); + break; + } + } + return status; +} + +/** + * i40e_add_pinfo_to_list + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package + * @profile_info_sec: buffer for information section + * @track_id: package tracking id + * + * Register a profile to the list of loaded profiles. + */ +enum i40e_status_code +i40e_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_info *pinfo; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_DDP_ADD_TRACKID; + i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE, + I40E_NONDMA_TO_NONDMA); + + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c new file mode 100644 index 000000000..a07c61e67 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.c @@ -0,0 +1,1410 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ + u32 reg; + + if (!status) + return I40E_ERR_PARAM; + + reg = rd32(hw, I40E_PRTDCB_GENS); + *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> + I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + + return I40E_SUCCESS; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> + I40E_IEEE_ETS_WILLING_SHIFT); + etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> + I40E_IEEE_ETS_CBS_SHIFT); + etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> + I40E_IEEE_ETS_MAXTC_SHIFT); + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* Move offset to priority table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + dcbcfg->etsrec.prioritytable[i*2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> + I40E_IEEE_PFC_WILLING_SHIFT); + dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> + I40E_IEEE_PFC_MBC_SHIFT); + dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> + I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength; + u16 offset = 0; + u16 length; + int i = 0; + u8 *buf; + + typelength = I40E_NTOHS(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + buf = tlv->tlvinfo; + + /* The App priority table starts 5 octets after TLV header */ + length -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < length) { + dcbcfg->app[i].priority = (u8)((buf[offset] & + I40E_IEEE_APP_PRIO_MASK) >> + I40E_IEEE_APP_PRIO_SHIFT); + dcbcfg->app[i].selector = (u8)((buf[offset] & + I40E_IEEE_APP_SEL_MASK) >> + I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + switch (subtype) { + case I40E_IEEE_SUBTYPE_ETS_CFG: + i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_ETS_REC: + i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_PFC_CFG: + i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_APP_PRI: + i40e_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + **/ +static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * i40e_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + **/ +static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcenable = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * i40e_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + **/ +static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, typelength, offset = 0; + struct i40e_cee_app_prio *app; + u8 i; + + typelength = I40E_NTOHS(tlv->hdr.typelen); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + + dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) + return; + if (dcbcfg->numapps > I40E_DCBX_MAX_APPS) + dcbcfg->numapps = I40E_DCBX_MAX_APPS; + + for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { + if (app->prio_map & BIT(up)) + break; + } + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); + switch (selector) { + case I40E_CEE_APP_SEL_ETHTYPE: + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + break; + case I40E_CEE_APP_SEL_TCPIP: + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + break; + default: + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + } + + dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * i40e_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 len, tlvlen, sublen, typelength; + struct i40e_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u32 ouisubtype; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + /* Return if not CEE DCBX */ + if (subtype != I40E_CEE_DCBX_TYPE) + return; + + typelength = I40E_NTOHS(tlv->typelength); + tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + len = sizeof(tlv->typelength) + sizeof(ouisubtype) + + sizeof(struct i40e_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { + typelength = I40E_NTOHS(sub_tlv->hdr.typelen); + sublen = (u16)((typelength & + I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + switch (subtype) { + case I40E_CEE_SUBTYPE_PG_CFG: + i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_PFC_CFG: + i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_APP_PRI: + i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> + I40E_LLDP_TLV_OUI_SHIFT); + switch (oui) { + case I40E_IEEE_8021QAZ_OUI: + i40e_parse_ieee_tlv(tlv, dcbcfg); + break; + case I40E_CEE_DCBX_OUI: + i40e_parse_cee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_org_tlv *tlv; + u16 type; + u16 length; + u16 typelength; + u16 offset = 0; + + if (!lldpmib || !dcbcfg) + return I40E_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += I40E_LLDP_MIB_HLEN; + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + typelength = I40E_NTOHS(tlv->typelength); + type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; + + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; + + switch (type) { + case I40E_TLV_TYPE_ORG: + i40e_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + + length); + } + + return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_virt_mem mem; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, + (void *)lldpmib, I40E_LLDPDU_SIZE, + NULL, NULL, NULL); + if (ret) + goto free_mem; + + /* Parse LLDP MIB to get dcb configuration */ + ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); + u8 i, tc, err; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + /* Add APPs if Error is False */ + if (!err) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + i = 0; + status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> + I40E_AQC_CEE_FCOE_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FCoE APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* FCoE APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; + i++; + } + + status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> + I40E_AQC_CEE_ISCSI_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add iSCSI APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* iSCSI APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; + i++; + } + + status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> + I40E_AQC_CEE_FIP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FIP APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* FIP APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; + i++; + } + dcbcfg->numapps = i; +} + +/** + * i40e_get_ieee_dcb_config + * @hw: pointer to the hw struct + * + * Get IEEE mode DCB configuration from the Firmware + **/ +STATIC enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = I40E_SUCCESS; + +out: + return ret; +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 on X710/XL710, IEEE only */ + if ((hw->mac.type == I40E_MAC_XL710) && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4))) + return i40e_get_ieee_dcb_config(hw); + + /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ + if ((hw->mac.type == I40E_MAC_XL710) && + ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (ret == I40E_SUCCESS) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + LE16_TO_CPU(cee_v1_cfg.tlv_status); + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (ret == I40E_SUCCESS) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + LE32_TO_CPU(cee_cfg.tlv_status); + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + return i40e_get_ieee_dcb_config(hw); + + if (ret != I40E_SUCCESS) + goto out; + + /* Get CEE DCB Desired Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->desired_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = I40E_SUCCESS; + +out: + return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * @enable_mib_change: enable mib change event + * + * Update DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; + + if (!hw->func_caps.dcb) + return I40E_NOT_SUPPORTED; + + /* Read LLDP NVM area */ + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + u8 offset = 0; + + if (hw->mac.type == I40E_MAC_XL710) + offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; + else if (hw->mac.type == I40E_MAC_X722) + offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; + else + return I40E_NOT_SUPPORTED; + + ret = i40e_read_nvm_module_data(hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + offset, + I40E_LLDP_CURRENT_STATUS_OFFSET, + I40E_LLDP_CURRENT_STATUS_SIZE, + &lldp_cfg.adminstatus); + } else { + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + } + if (ret) + return I40E_ERR_NOT_READY; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return I40E_ERR_NOT_READY; + } + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || + hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + if (ret) + return ret; + } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + return I40E_ERR_NOT_READY; + } + + /* Configure the LLDP MIB change event */ + if (enable_mib_change) + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + + return ret; +} + +/** + * i40e_get_fw_lldp_status + * @hw: pointer to the hw struct + * @lldp_status: pointer to the status enum + * + * Get status of FW Link Layer Discovery Protocol (LLDP) Agent. + * Status of agent is reported via @lldp_status parameter. + **/ +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status) +{ + enum i40e_status_code ret; + struct i40e_virt_mem mem; + u8 *lldpmib; + + if (!lldp_status) + return I40E_ERR_PARAM; + + /* Allocate buffer for the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib, + I40E_LLDPDU_SIZE, NULL, NULL, NULL); + + if (ret == I40E_SUCCESS) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) { + /* MIB is not available yet but the agent is running */ + *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; + ret = I40E_SUCCESS; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED; + ret = I40E_SUCCESS; + } + + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format + * @tlv: Fill the ETS config data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 priority0, priority1, maxtcwilling = 0; + struct i40e_dcb_ets_config *etscfg; + u16 offset = 0, typelength, i; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + if (etscfg->willing) + maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT); + maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK; + buf[offset] = maxtcwilling; + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etscfg->prioritytable[i * 2] & 0xF; + priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tsatable[i]; +} + +/** + * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format + * @tlv: Fill ETS Recommended TLV in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etsrec; + u16 offset = 0, typelength, i; + u8 priority0, priority1; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + etsrec = &dcbcfg->etsrec; + /* First Octet is reserved */ + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etsrec->prioritytable[i * 2] & 0xF; + priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tsatable[i]; +} + + /** + * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store to get PFC CFG data + * + * Prepare IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelength; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_PFC_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + if (dcbcfg->pfc.willing) + buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT); + + buf[0] |= dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcenable; +} + +/** + * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format + * @tlv: Fill APP TLV in IEEE format + * @dcbcfg: Local store to get APP CFG data + * + * Prepare IEEE 802.1Qaz APP CFG TLV + **/ +static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength, length, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + /* No APP TLVs then just return */ + if (dcbcfg->numapps == 0) + return; + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* Move offset to App Priority Table */ + offset++; + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].priority & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; + buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + /* length includes size of ouisubtype + 1 reserved + 3*numapps */ + length = sizeof(tlv->ouisubtype) + 1 + (i*3); + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + (length & 0x1FF)); + tlv->typelength = I40E_HTONS(typelength); +} + + /** + * i40e_add_dcb_tlv - Add all IEEE TLVs + * @tlv: pointer to org tlv + * + * add tlv information + **/ +static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg, + u16 tlvid) +{ + switch (tlvid) { + case I40E_IEEE_TLV_ID_ETS_CFG: + i40e_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_ETS_REC: + i40e_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_PFC_CFG: + i40e_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_APP_PRI: + i40e_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + + /** + * i40e_set_dcb_config - Set the local LLDP MIB to FW + * @hw: pointer to the hw struct + * + * Set DCB configuration to the Firmware + **/ +enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_dcbx_config *dcbcfg; + struct i40e_virt_mem mem; + u8 mib_type, *lldpmib; + u16 miblen; + + /* update the hw local config */ + dcbcfg = &hw->local_dcbx_config; + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB; + if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) { + mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS << + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT; + } + lldpmib = (u8 *)mem.va; + ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg); + ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); + + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format + * @hw: pointer to the hw struct + * @dcbcfg: store for LLDPDU data + * + * send DCB configuration to FW + **/ +enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, offset = 0, tlvid = I40E_TLV_ID_START; + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_org_tlv *tlv; + u16 typelength; + + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); + typelength = I40E_NTOHS(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + if (length) + offset += length + 2; + /* END TLV or beyond LLDPDU size */ + if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) || + (offset > I40E_LLDPDU_SIZE)) + break; + /* Move to next TLV */ + if (length) + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + length); + } + *miblen = offset; + return ret; +} + + +/** + * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * @module: address of the module pointer + * @word_offset: offset of LLDP configuration + * + * Reads the LLDP configuration data from NVM using passed addresses + **/ +static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) +{ + u32 address, offset = (2 * word_offset); + enum i40e_status_code ret; + __le16 raw_mem; + u16 mem; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + return ret; + + ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + mem = LE16_TO_CPU(raw_mem); + /* Check if this pointer needs to be read in word size or 4K sector + * units. + */ + if (mem & I40E_PTR_TYPE) + address = (0x7FFF & mem) * 4096; + else + address = (0x7FFF & mem) * 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + mem = LE16_TO_CPU(raw_mem); + offset = mem + word_offset; + offset *= 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, 0, address + offset, + sizeof(struct i40e_lldp_variables), lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 mem; + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + return ret; + + ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), + &mem, true, NULL); + i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + /* Read a bit that holds information whether we are running flat or + * structured NVM image. Flat image has LLDP configuration in shadow + * ram, so there is a need to pass different addresses for both cases. + */ + if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { + /* Flat NVM case */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, + I40E_SR_LLDP_CFG_PTR); + } else { + /* Good old structured NVM image */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, + I40E_NVM_LLDP_CFG_PTR); + } + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h new file mode 100644 index 000000000..0409fd3e1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_dcb.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_OFFLOAD_DISABLED 0 +#define I40E_DCBX_OFFLOAD_ENABLED 1 + +#define I40E_DCBX_STATUS_NOT_STARTED 0 +#define I40E_DCBX_STATUS_IN_PROGRESS 1 +#define I40E_DCBX_STATUS_DONE 2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 +#define I40E_DCBX_STATUS_DISABLED 7 + +#define I40E_TLV_TYPE_END 0 +#define I40E_TLV_TYPE_ORG 127 + +#define I40E_IEEE_8021QAZ_OUI 0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG 9 +#define I40E_IEEE_SUBTYPE_ETS_REC 10 +#define I40E_IEEE_SUBTYPE_PFC_CFG 11 +#define I40E_IEEE_SUBTYPE_APP_PRI 12 + +#define I40E_CEE_DCBX_OUI 0x001b21 +#define I40E_CEE_DCBX_TYPE 2 + +#define I40E_CEE_SUBTYPE_CTRL 1 +#define I40E_CEE_SUBTYPE_PG_CFG 2 +#define I40E_CEE_SUBTYPE_PFC_CFG 3 +#define I40E_CEE_SUBTYPE_APP_PRI 4 + +#define I40E_CEE_MAX_FEAT_TYPE 3 +#define I40E_LLDP_ADMINSTATUS_DISABLED 0 +#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1 +#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2 +#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3 + +#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B +#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 +#define I40E_LLDP_CURRENT_STATUS_OFFSET 1 +#define I40E_LLDP_CURRENT_STATUS_SIZE 1 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_MIB_HLEN 14 +#define I40E_LLDP_TLV_LEN_SHIFT 0 +#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT 9 +#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 +#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT 8 +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT 0 +#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT 6 +#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT 7 +#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 +#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 +#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT 0 +#define I40E_IEEE_TSA_CBS 1 +#define I40E_IEEE_TSA_ETS 2 +#define I40E_IEEE_TSA_VENDOR 255 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT 0 +#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT 6 +#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT 7 +#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT 0 +#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT 5 +#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) + +/* TLV definitions for preparing MIB */ +#define I40E_TLV_ID_CHASSIS_ID 0 +#define I40E_TLV_ID_PORT_ID 1 +#define I40E_TLV_ID_TIME_TO_LIVE 2 +#define I40E_IEEE_TLV_ID_ETS_CFG 3 +#define I40E_IEEE_TLV_ID_ETS_REC 4 +#define I40E_IEEE_TLV_ID_PFC_CFG 5 +#define I40E_IEEE_TLV_ID_APP_PRI 6 +#define I40E_TLV_ID_END_OF_LLDPPDU 7 +#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG + +#define I40E_IEEE_ETS_TLV_LENGTH 25 +#define I40E_IEEE_PFC_TLV_LENGTH 6 +#define I40E_IEEE_APP_TLV_LENGTH 11 + +#pragma pack(1) + +/* IEEE 802.1AB LLDP TLV structure */ +struct i40e_lldp_generic_tlv { + __be16 typelength; + u8 tlvinfo[1]; +}; + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { + __be16 typelength; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct i40e_cee_ctrl_tlv { + struct i40e_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct i40e_cee_feat_tlv { + struct i40e_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 +#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 +#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define I40E_CEE_APP_SELECTOR_MASK 0x03 + __be16 lower_oui; + u8 prio_map; +}; +#pragma pack() + +/* + * TODO: The below structures related LLDP/DCBX variables + * and statistics are defined but need to find how to get + * the required information from the Firmware to use them + */ + +/* IEEE 802.1AB LLDP Agent Statistics */ +struct i40e_lldp_stats { + u64 remtablelastchangetime; + u64 remtableinserts; + u64 remtabledeletes; + u64 remtabledrops; + u64 remtableageouts; + u64 txframestotal; + u64 rxframesdiscarded; + u64 rxportframeerrors; + u64 rxportframestotal; + u64 rxporttlvsdiscardedtotal; + u64 rxporttlvsunrecognizedtotal; + u64 remtoomanyneighbors; +}; + +/* IEEE 802.1Qaz DCBX variables */ +struct i40e_dcbx_variables { + u32 defmaxtrafficclasses; + u32 defprioritytcmapping; + u32 deftcbandwidth; + u32 deftsaassignment; +}; + + +enum i40e_get_fw_lldp_status_resp { + I40E_GET_FW_LLDP_STATUS_DISABLED = 0, + I40E_GET_FW_LLDP_STATUS_ENABLED = 1 +}; + +enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw); +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, + bool enable_mib_change); +enum i40e_status_code +i40e_get_fw_lldp_status(struct i40e_hw *hw, + enum i40e_get_fw_lldp_status_resp *lldp_status); +enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw); +enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg); +#endif /* _I40E_DCB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h new file mode 100644 index 000000000..02ae7be55 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_devids.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Vendor ID */ +#define I40E_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_25G_B 0x158A +#define I40E_DEV_ID_25G_SFP28 0x158B +#define I40E_DEV_ID_X710_N3000 0x0CF8 +#define I40E_DEV_ID_XXV710_N3000 0x0D58 +#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF +#define I40E_DEV_ID_5G_BASE_T_BC 0x101F +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_ADAPTIVE_VF 0x1889 +#endif /* VF_DRIVER */ +#ifdef X722_A0_SUPPORT +#define I40E_DEV_ID_X722_A0 0x374C +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) +#define I40E_DEV_ID_X722_A0_VF 0x374D +#endif +#endif +#define I40E_DEV_ID_10G_B 0x104F +#define I40E_DEV_ID_10G_SFP 0x104E +#define I40E_IS_X710TL_DEVICE(d) \ + (((d) == I40E_DEV_ID_10G_BASE_T_BC) || \ + ((d) == I40E_DEV_ID_5G_BASE_T_BC)) +#define I40E_DEV_ID_KX_X722 0x37CE +#define I40E_DEV_ID_QSFP_X722 0x37CF +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) +#define I40E_DEV_ID_X722_VF 0x37CD +#endif /* VF_DRIVER */ + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \ + (d) == I40E_DEV_ID_25G_SFP28) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c new file mode 100644 index 000000000..b3c4cfd3a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.c @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_diag.h" +#include "i40e_prototype.h" + +/** + * i40e_diag_set_loopback + * @hw: pointer to the hw struct + * @mode: loopback mode + * + * Set chosen loopback mode + **/ +enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw, + enum i40e_lb_mode mode) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_aq_set_lb_modes(hw, mode, NULL)) + ret_code = I40E_ERR_DIAG_TEST_FAILED; + + return ret_code; +} + +/** + * i40e_diag_reg_pattern_test + * @hw: pointer to the hw struct + * @reg: reg to be tested + * @mask: bits to be touched + **/ +static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw, + u32 reg, u32 mask) +{ + const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u32 pat, val, orig_val; + int i; + + orig_val = rd32(hw, reg); + for (i = 0; i < ARRAY_SIZE(patterns); i++) { + pat = patterns[i]; + wr32(hw, reg, (pat & mask)); + val = rd32(hw, reg); + if ((val & mask) != (pat & mask)) { + return I40E_ERR_DIAG_TEST_FAILED; + } + } + + wr32(hw, reg, orig_val); + val = rd32(hw, reg); + if (val != orig_val) { + return I40E_ERR_DIAG_TEST_FAILED; + } + + return I40E_SUCCESS; +} + +static struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, + {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, + { 0 } +}; + +/** + * i40e_diag_reg_test + * @hw: pointer to the hw struct + * + * Perform registers diagnostic test + **/ +enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg, mask; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + ret_code == I40E_SUCCESS; i++) { + + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) + i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) + i40e_reg_list[i].elements = + hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; + for (j = 0; j < i40e_reg_list[i].elements && + ret_code == I40E_SUCCESS; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); + } + } + + return ret_code; +} + +/** + * i40e_diag_eeprom_test + * @hw: pointer to the hw struct + * + * Perform EEPROM diagnostic test + **/ +enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + u16 reg_val; + + /* read NVM control word and if NVM valid, validate EEPROM checksum*/ + ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); + if ((ret_code == I40E_SUCCESS) && + ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == + BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) + return i40e_validate_nvm_checksum(hw, NULL); + else + return I40E_ERR_DIAG_TEST_FAILED; +} + +/** + * i40e_diag_fw_alive_test + * @hw: pointer to the hw struct + * + * Perform FW alive diagnostic test + **/ +enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return I40E_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h new file mode 100644 index 000000000..cb59285d9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_diag.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_DIAG_H_ +#define _I40E_DIAG_H_ + +#include "i40e_type.h" + +enum i40e_lb_mode { + I40E_LB_MODE_NONE = 0x0, + I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL, + I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, + I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL, +}; + +struct i40e_diag_reg_test_info { + u32 offset; /* the base register */ + u32 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw, + enum i40e_lb_mode mode); +enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw); +enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw); +enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw); + +#endif /* _I40E_DIAG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c new file mode 100644 index 000000000..a47d6e0d7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.c @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_status.h" +#include "i40e_alloc.h" +#include "i40e_hmc.h" +#include "i40e_type.h" + +/** + * i40e_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + **/ +enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + enum i40e_memory_type mem_type; + bool dma_mem_alloc_done = false; + struct i40e_dma_mem mem; + u64 alloc_len; + + if (NULL == hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n"); + goto exit; + } + + if (sd_index >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n"); + goto exit; + } + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (I40E_SD_TYPE_PAGED == type) { + mem_type = i40e_mem_pd; + alloc_len = I40E_HMC_PAGED_BP_SIZE; + } else { + mem_type = i40e_mem_bp_jumbo; + alloc_len = direct_mode_sz; + } + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (I40E_SD_TYPE_PAGED == type) { + ret_code = i40e_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40e_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = + (struct i40e_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); + } else { + i40e_memcpy(&sd_entry->u.bp.addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); + sd_entry->u.bp.sd_pd_index = sd_index; + } + /* initialize the sd entry */ + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + /* increment the ref count */ + I40E_INC_SD_REFCNT(&hmc_info->sd_table); + } + /* Increment backing page reference count */ + if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) + I40E_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (I40E_SUCCESS != ret_code) + if (dma_mem_alloc_done) + i40e_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40e_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40e_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + **/ +enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n"); + goto exit; + } + + /* find corresponding sd */ + sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); + if (I40E_SD_TYPE_PAGED != + hmc_info->sd_table.sd_entry[sd_idx].entry_type) + goto exit; + + rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } + + i40e_memcpy(&pd_entry->bp.addr, page, + sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; + /* Set page address and valid bit */ + page_desc = page->pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + /* Add the backing page physical address in the pd entry */ + i40e_memcpy(pd_addr, &page_desc, sizeof(u64), + I40E_NONDMA_TO_DMA); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40E_INC_PD_REFCNT(pd_table); + } + I40E_INC_BP_REFCNT(&pd_entry->bp); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * + * This function: + * 1. Marks the entry in pd tabe (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + **/ +enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + + /* calculate index */ + sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); + goto exit; + } + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { + ret_code = I40E_ERR_INVALID_SD_TYPE; + DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); + goto exit; + } + /* get the entry and decrease its ref counter */ + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40E_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + goto exit; + + /* mark the entry invalid */ + pd_entry->valid = false; + I40E_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); + I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + + /* free memory here */ + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (I40E_SUCCESS != ret_code) + goto exit; + if (!pd_table->ref_cnt) + i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); +exit: + return ret_code; +} + +/** + * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + **/ +enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); + + /* mark the entry invalid */ + sd_entry->valid = false; +exit: + return ret_code; +} + +/** + * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: used to distinguish between VF and PF + **/ +enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + + if (!is_pf) + return I40E_NOT_SUPPORTED; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + + return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); +} + +/** + * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + **/ +enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + + /* mark the entry invalid */ + sd_entry->valid = false; + + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_page_new - Removes a PD page from sd entry. + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * @is_pf: used to distinguish between VF and PF + **/ +enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + + if (!is_pf) + return I40E_NOT_SUPPORTED; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + + return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h new file mode 100644 index 000000000..f9aad7dc3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_hmc.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool rsrc_pg; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(I40E_HI_DWORD(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); +enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c new file mode 100644 index 000000000..d3969396f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c @@ -0,0 +1,1382 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_type.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_prototype.h" + +/* lan specific interface functions */ + +/** + * i40e_align_l2obj_base - aligns base object pointer to 512 bytes + * @offset: base address offset needing alignment + * + * Aligns the layer 2 function private memory so it's 512-byte aligned. + **/ +STATIC u64 i40e_align_l2obj_base(u64 offset) +{ + u64 aligned_offset = offset; + + if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) + aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - + (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); + + return aligned_offset; +} + +/** + * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * Calculates the maximum amount of memory for the function required, based + * on the number of resources it must provide context for. + **/ +u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num) +{ + u64 fpm_size = 0; + + fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); + fpm_size = i40e_align_l2obj_base(fpm_size); + + return fpm_size; +} + +/** + * i40e_init_lan_hmc - initialize i40e_hmc_info struct + * @hw: pointer to the HW structure + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * This function will be called once per physical function initialization. + * It will fill out the i40e_hmc_obj_info structure for LAN objects based on + * the driver's provided input, as well as information from the HMC itself + * loaded from NVRAM. + * + * Assumptions: + * - HMC Resource Profile has been selected before calling this function. + **/ +enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num) +{ + struct i40e_hmc_obj_info *obj, *full_obj; + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 l2fpm_size; + u32 size_exp; + + hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; + hw->hmc.hmc_fn_id = hw->pf_id; + + /* allocate memory for hmc_obj */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, + sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) + hw->hmc.hmc_obj_virt_mem.va; + + /* The full object will be used to create the LAN HMC SD */ + full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; + full_obj->max_cnt = 0; + full_obj->cnt = 0; + full_obj->base = 0; + full_obj->size = 0; + + /* Tx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = txq_num; + obj->base = 0; + size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (txq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + txq_num, obj->max_cnt, ret_code); + goto free_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* Rx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = rxq_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (rxq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + rxq_num, obj->max_cnt, ret_code); + goto free_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); + obj->cnt = fcoe_cntx_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_cntx_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_cntx_num, obj->max_cnt, ret_code); + goto free_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE filter information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); + obj->cnt = fcoe_filt_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * + hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_filt_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_filt_num, obj->max_cnt, ret_code); + goto free_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + hw->hmc.first_sd_index = 0; + hw->hmc.sd_table.ref_cnt = 0; + l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, + fcoe_filt_num); + if (NULL == hw->hmc.sd_table.sd_entry) { + hw->hmc.sd_table.sd_cnt = (u32) + (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / + I40E_HMC_DIRECT_BP_SIZE; + + /* allocate the sd_entry members in the sd_table */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, + (sizeof(struct i40e_hmc_sd_entry) * + hw->hmc.sd_table.sd_cnt)); + if (ret_code) + goto free_hmc_out; + hw->hmc.sd_table.sd_entry = + (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; + } + /* store in the LAN full object for later */ + full_obj->size = l2fpm_size; + +init_lan_hmc_out: + return ret_code; +free_hmc_out: + if (hw->hmc.hmc_obj_virt_mem.va) + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + + return ret_code; +} + +/** + * i40e_remove_pd_page - Remove a page from the page descriptor table + * @hw: pointer to the HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) invalid + * 2. write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for pd_entry + * assumptions: + * 1. caller can deallocate the memory used by pd after this function + * returns. + **/ +STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS) + ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_remove_sd_bp - remove a backing page from a segment descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * + * This function: + * 1. Marks the entry in sd table (for direct address mode) invalid + * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set + * to 0) and PMSDDATAHIGH to invalidate the sd page + * 3. Decrement the ref count for the sd_entry + * assumptions: + * 1. caller can deallocate the memory used by backing storage after this + * function returns. + **/ +STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS) + ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_create_lan_hmc_object - allocate backing store for hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + **/ +enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 pd_idx = 0, pd_lmt = 0; + bool pd_error = false; + u32 sd_idx, sd_lmt; + u64 sd_size; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n"); + goto exit; + } + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + /* find pd index */ + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + /* This is to cover for cases where you may not want to have an SD with + * the full 2M memory but something smaller. By not filling out any + * size, the function will default the SD size to be 2M. + */ + if (info->direct_mode_sz == 0) + sd_size = I40E_HMC_DIRECT_BP_SIZE; + else + sd_size = info->direct_mode_sz; + + /* check if all the sds are valid. If not, allocate a page and + * initialize it. + */ + for (j = sd_idx; j < sd_lmt; j++) { + /* update the sd table entry */ + ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, + info->entry_type, + sd_size); + if (I40E_SUCCESS != ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + /* check if all the pds in this sd are valid. If not, + * allocate a page and initialize it. + */ + + /* find pd_idx and pd_lmt in this sd */ + pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + ((j + 1) * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40e_add_pd_table_entry(hw, + info->hmc_info, + i, NULL); + if (I40E_SUCCESS != ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + /* remove the backing pages from pd_idx1 to i */ + while (i && (i > pd_idx1)) { + i40e_remove_pd_bp(hw, info->hmc_info, + (i - 1)); + i--; + } + } + } + if (!sd_entry->valid) { + sd_entry->valid = true; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + I40E_SET_PF_SD_ENTRY(hw, + sd_entry->u.pd_table.pd_page_addr.pa, + j, sd_entry->entry_type); + break; + case I40E_SD_TYPE_DIRECT: + I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, + j, sd_entry->entry_type); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + goto exit; + } + } + } + goto exit; + +exit_sd_error: + /* cleanup for sd entries from j to sd_idx */ + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + ((j - 1) * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) + i40e_remove_pd_bp(hw, info->hmc_info, i); + i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); + break; + case I40E_SD_TYPE_DIRECT: + i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + break; + } + j--; + } +exit: + return ret_code; +} + +/** + * i40e_configure_lan_hmc - prepare the HMC backing store + * @hw: pointer to the hw structure + * @model: the model for the layout of the SD/PD tables + * + * - This function will be called once per physical function initialization. + * - This function will be called after i40e_init_lan_hmc() and before + * any LAN/FCoE HMC objects can be created. + **/ +enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model) +{ + struct i40e_hmc_lan_create_obj_info info; + u8 hmc_fn_id = hw->hmc.hmc_fn_id; + struct i40e_hmc_obj_info *obj; + enum i40e_status_code ret_code = I40E_SUCCESS; + + /* Initialize part of the create object info struct */ + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; + + /* Build the SD entry for the LAN objects */ + switch (model) { + case I40E_HMC_MODEL_DIRECT_PREFERRED: + case I40E_HMC_MODEL_DIRECT_ONLY: + info.entry_type = I40E_SD_TYPE_DIRECT; + /* Make one big object, a single SD */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + goto try_type_paged; + else if (ret_code != I40E_SUCCESS) + goto configure_lan_hmc_out; + /* else clause falls through the break */ + break; + case I40E_HMC_MODEL_PAGED_ONLY: +try_type_paged: + info.entry_type = I40E_SD_TYPE_PAGED; + /* Make one big object in the PD table */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code != I40E_SUCCESS) + goto configure_lan_hmc_out; + break; + default: + /* unsupported type */ + ret_code = I40E_ERR_INVALID_SD_TYPE; + DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n", + ret_code); + goto configure_lan_hmc_out; + } + + /* Configure and program the FPM registers so objects can be created */ + + /* Tx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); + + /* Rx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); + + /* FCoE contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); + + /* FCoE filters */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); + +configure_lan_hmc_out: + return ret_code; +} + +/** + * i40e_delete_hmc_object - remove hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_delete_obj_info struct + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + **/ +enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_table *pd_table; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 sd_idx, sd_lmt; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n"); + goto exit; + } + + if (NULL == info->hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n"); + goto exit; + } + + if (NULL == info->hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); + goto exit; + } + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40E_HMC_PD_CNT_IN_SD; + + if (I40E_SD_TYPE_PAGED != + info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) + continue; + + rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; + + pd_table = + &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); + if (I40E_SUCCESS != ret_code) + goto exit; + } + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40E_SD_TYPE_DIRECT: + ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); + if (I40E_SUCCESS != ret_code) + goto exit; + break; + case I40E_SD_TYPE_PAGED: + ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); + if (I40E_SUCCESS != ret_code) + goto exit; + break; + default: + break; + } + } +exit: + return ret_code; +} + +/** + * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory + * @hw: pointer to the hw structure + * + * This must be called by drivers as they are shutting down and being + * removed from the OS. + **/ +enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw) +{ + struct i40e_hmc_lan_delete_obj_info info; + enum i40e_status_code ret_code; + + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.count = 1; + + /* delete the object */ + ret_code = i40e_delete_lan_hmc_object(hw, &info); + + /* free the SD table entry for LAN */ + i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); + hw->hmc.sd_table.sd_cnt = 0; + hw->hmc.sd_table.sd_entry = NULL; + + /* free memory used for hmc_obj */ + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + hw->hmc.hmc_obj = NULL; + + return ret_code; +} + +#define I40E_HMC_STORE(_struct, _ele) \ + offsetof(struct _struct, _ele), \ + FIELD_SIZEOF(struct _struct, _ele) + +struct i40e_context_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +/* LAN Tx Queue Context */ +static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { + /* Field Width LSB */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, +/* line 1 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, +/* line 7 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, + { 0 } +}; + +/* LAN Rx Queue Context */ +static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { + /* Field Width LSB */ + { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, + { 0 } +}; + +/** + * i40e_write_byte - replace HMC context byte + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_word - replace HMC context word + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u16 src_word, mask; + u8 *from, *dest; + u16 shift_width; + __le16 dest_word; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA); + + dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ + dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_dword - replace HMC context dword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u32 src_dword, mask; + u8 *from, *dest; + u16 shift_width; + __le32 dest_dword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = ~(u32)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA); + + dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ + dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_qword - replace HMC context qword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u64 src_qword, mask; + u8 *from, *dest; + u16 shift_width; + __le64 dest_qword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = ~(u64)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA); + + dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ + dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_byte - read HMC context byte into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u8 dest_byte, mask; + u8 *src, *target; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA); + + dest_byte &= ~(mask); + + dest_byte >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_word - read HMC context word into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u16 dest_word, mask; + u8 *src, *target; + u16 shift_width; + __le16 src_word; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_word &= ~(CPU_TO_LE16(mask)); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); + + dest_word >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_dword - read HMC context dword into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u32 dest_dword, mask; + u8 *src, *target; + u16 shift_width; + __le32 src_dword; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = ~(u32)0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_dword &= ~(CPU_TO_LE32(mask)); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); + + dest_dword >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_dword, sizeof(dest_dword), + I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_qword - read HMC context qword into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u64 dest_qword, mask; + u8 *src, *target; + u16 shift_width; + __le64 src_qword; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = ~(u64)0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_qword &= ~(CPU_TO_LE64(mask)); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); + + dest_qword >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_qword, sizeof(dest_qword), + I40E_NONDMA_TO_DMA); +} + +/** + * i40e_get_hmc_context - extract HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + switch (ce_info[f].size_of) { + case 1: + i40e_read_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_read_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_read_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_read_qword(context_bytes, &ce_info[f], dest); + break; + default: + /* nothing to do, just keep going */ + break; + } + } + + return I40E_SUCCESS; +} + +/** + * i40e_clear_hmc_context - zero out the HMC context bits + * @hw: the hardware struct + * @context_bytes: pointer to the context bit array (DMA memory) + * @hmc_type: the type of HMC resource + **/ +static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw, + u8 *context_bytes, + enum i40e_hmc_lan_rsrc_type hmc_type) +{ + /* clean the bit array */ + i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size, + I40E_DMA_MEM); + + return I40E_SUCCESS; +} + +/** + * i40e_set_hmc_context - replace HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + + /* we have to deal with each element of the HMC using the + * correct size so that we are correct regardless of the + * endianness of the machine + */ + switch (ce_info[f].size_of) { + case 1: + i40e_write_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_write_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_write_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_write_qword(context_bytes, &ce_info[f], dest); + break; + } + } + + return I40E_SUCCESS; +} + +/** + * i40e_hmc_get_object_va - retrieves an object's virtual address + * @hw: pointer to the hw structure + * @object_base: pointer to u64 to get the va + * @rsrc_type: the hmc resource type + * @obj_idx: hmc object index + * + * This function retrieves the object's virtual address from the object + * base pointer. This function is used for LAN Queue contexts. + **/ +STATIC +enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw, + u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) +{ + u32 obj_offset_in_sd, obj_offset_in_pd; + struct i40e_hmc_info *hmc_info = &hw->hmc; + struct i40e_hmc_sd_entry *sd_entry; + struct i40e_hmc_pd_entry *pd_entry; + u32 pd_idx, pd_lmt, rel_pd_idx; + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 obj_offset_in_fpm; + u32 sd_idx, sd_lmt; + + if (NULL == hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); + goto exit; + } + if (NULL == object_base) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n"); + goto exit; + } + if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { + DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n", + ret_code); + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + goto exit; + } + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &sd_idx, &sd_lmt); + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + + hmc_info->hmc_obj[rsrc_type].size * obj_idx; + + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &pd_idx, &pd_lmt); + rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; + obj_offset_in_pd = (u32)(obj_offset_in_fpm % + I40E_HMC_PAGED_BP_SIZE); + *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; + } else { + obj_offset_in_sd = (u32)(obj_offset_in_fpm % + I40E_HMC_DIRECT_BP_SIZE); + *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; + } +exit: + return ret_code; +} + +/** + * i40e_get_lan_tx_queue_context - return the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_get_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); +} + +/** + * i40e_set_lan_tx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_get_lan_rx_queue_context - return the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_get_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); +} + +/** + * i40e_set_lan_rx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h new file mode 100644 index 000000000..aa5dceb79 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_lan_hmc.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num); +enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); +enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); +enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info); +enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c b/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c new file mode 100644 index 000000000..6466d8648 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_nvm.c @@ -0,0 +1,1792 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "i40e_prototype.h" + +/** + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always through the Shadow RAM. + **/ +enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 fla, gens; + u8 sr_size; + + DEBUGFUNC("i40e_init_nvm"); + + /* The SR size is stored regardless of the nvm programming mode + * as the blank mode may be used in the factory line. + */ + gens = rd32(hw, I40E_GLNVM_GENS); + sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> + I40E_GLNVM_GENS_SR_SIZE_SHIFT); + /* Switching to words (sr_size contains power of 2KB) */ + nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, I40E_GLNVM_FLA); + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ + nvm->timeout = I40E_MAX_NVM_TIMEOUT; + nvm->blank_nvm_mode = false; + } else { /* Blank programming mode */ + nvm->blank_nvm_mode = true; + ret_code = I40E_ERR_NVM_BLANK_MODE; + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); + } + + return ret_code; +} + +/** + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership for reading + * via the proper Admin Command. + **/ +enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 gtime, timeout; + u64 time_left = 0; + + DEBUGFUNC("i40e_acquire_nvm"); + + if (hw->nvm.blank_nvm_mode) + goto i40e_i40e_acquire_nvm_exit; + + ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, + 0, &time_left, NULL); + /* Reading the Global Device Timer */ + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + + /* Store the timeout */ + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; + + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { + /* Poll until the current NVM owner timeouts */ + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { + i40e_msec_delay(10); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + ret_code = i40e_aq_request_resource(hw, + I40E_NVM_RESOURCE_ID, + access, 0, &time_left, + NULL); + if (ret_code == I40E_SUCCESS) { + hw->nvm.hw_semaphore_timeout = + I40E_MS_TO_GTIME(time_left) + gtime; + break; + } + } + if (ret_code != I40E_SUCCESS) { + hw->nvm.hw_semaphore_timeout = 0; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); + } + } + +i40e_i40e_acquire_nvm_exit: + return ret_code; +} + +/** + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM resource via the proper Admin Command. + **/ +void i40e_release_nvm(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + DEBUGFUNC("i40e_release_nvm"); + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + i40e_msec_delay(1); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, 0, NULL); + total_delay++; + } +} + +/** + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure + * + * Polls the SRCTL Shadow RAM register done bit. + **/ +static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + u32 srctl, wait_cnt; + + DEBUGFUNC("i40e_poll_sr_srctl_done_bit"); + + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ + for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { + srctl = rd32(hw, I40E_GLNVM_SRCTL); + if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { + ret_code = I40E_SUCCESS; + break; + } + i40e_usec_delay(5); + } + if (ret_code == I40E_ERR_TIMEOUT) + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); + return ret_code; +} + +/** + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, + u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + u32 sr_reg; + + DEBUGFUNC("i40e_read_nvm_word_srctl"); + + if (offset >= hw->nvm.sr_size) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); + ret_code = I40E_ERR_PARAM; + goto read_nvm_exit; + } + + /* Poll the done bit first */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (ret_code == I40E_SUCCESS) { + /* Write the address and start reading */ + sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + BIT(I40E_GLNVM_SRCTL_START_SHIFT); + wr32(hw, I40E_GLNVM_SRCTL, sr_reg); + + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (ret_code == I40E_SUCCESS) { + sr_reg = rd32(hw, I40E_GLNVM_SRDATA); + *data = (u16)((sr_reg & + I40E_GLNVM_SRDATA_RDDATA_MASK) + >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + } + } + if (ret_code != I40E_SUCCESS) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); + +read_nvm_exit: + return ret_code; +} + +/** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data, + bool last_command) +{ + enum i40e_status_code ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + DEBUGFUNC("i40e_read_nvm_aq"); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the AdminQ + **/ +STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + + DEBUGFUNC("i40e_read_nvm_word_aq"); + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); + *data = LE16_TO_CPU(*(__le16 *)data); + + return ret_code; +} + +/** + * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM. + * + * Do not use this function except in cases where the nvm lock is already + * taken via i40e_acquire_nvm(). + **/ +enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, + u16 *data) +{ + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + return i40e_read_nvm_word_aq(hw, offset, data); + + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM. + **/ +enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + + if (ret_code) + return ret_code; + ret_code = __i40e_read_nvm_word(hw, offset, data); + + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + i40e_release_nvm(hw); + return ret_code; +} + +/** + * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location + * @hw: Pointer to the HW structure + * @module_ptr: Pointer to module in words with respect to NVM beginning + * @module_offset: Offset in words from module start + * @data_offset: Offset in words from reading data area start + * @words_data_size: Words to read from NVM + * @data_ptr: Pointer to memory location where resulting buffer will be stored + **/ +enum i40e_status_code +i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, + u16 data_offset, u16 words_data_size, u16 *data_ptr) +{ + enum i40e_status_code status; + u16 specific_ptr = 0; + u16 ptr_value = 0; + u16 offset = 0; + + if (module_ptr != 0) { + status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); + if (status != I40E_SUCCESS) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } +#define I40E_NVM_INVALID_PTR_VAL 0x7FFF +#define I40E_NVM_INVALID_VAL 0xFFFF + + /* Pointer not initialized */ + if (ptr_value == I40E_NVM_INVALID_PTR_VAL || + ptr_value == I40E_NVM_INVALID_VAL) { + i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); + return I40E_ERR_BAD_PTR; + } + + /* Check whether the module is in SR mapped area or outside */ + if (ptr_value & I40E_PTR_TYPE) { + /* Pointer points outside of the Shared RAM mapped area */ + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); + + return I40E_ERR_PARAM; + } else { + /* Read from the Shadow RAM */ + + status = i40e_read_nvm_word(hw, ptr_value + module_offset, + &specific_ptr); + if (status != I40E_SUCCESS) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + + offset = ptr_value + module_offset + specific_ptr + + data_offset; + + status = i40e_read_nvm_buffer(hw, offset, &words_data_size, + data_ptr); + if (status != I40E_SUCCESS) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm buffer failed.Error code: %d.\n", + status); + } + } + + return status; +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 index, word; + + DEBUGFUNC("i40e_read_nvm_buffer_srctl"); + + /* Loop through the selected region */ + for (word = 0; word < *words; word++) { + index = offset + word; + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); + if (ret_code != I40E_SUCCESS) + break; + } + + /* Update the number of words read from the Shadow RAM */ + *words = word; + + return ret_code; +} + +/** + * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code; + u16 read_size = *words; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + DEBUGFUNC("i40e_read_nvm_buffer_aq"); + + do { + /* Calculate number of bytes we should read in this step. + * FVL AQ do not allow to read more than one page at a time or + * to cross page boundaries. + */ + if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) + read_size = min(*words, + (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - + (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); + else + read_size = min((*words - words_read), + I40E_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, + data + words_read, last_cmd); + if (ret_code != I40E_SUCCESS) + goto read_nvm_buffer_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = LE16_TO_CPU(((__le16 *)data)[i]); + +read_nvm_buffer_aq_exit: + *words = words_read; + return ret_code; +} + +/** + * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. + **/ +enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, + u16 *words, u16 *data) +{ + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + return i40e_read_nvm_buffer_aq(hw, offset, words, data); + + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + + return ret_code; +} + +/** + * i40e_write_nvm_aq - Writes Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + enum i40e_status_code ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + DEBUGFUNC("i40e_write_nvm_aq"); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n"); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n"); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n"); + else + ret_code = i40e_aq_update_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, 0, + &cmd_details); + + return ret_code; +} + +/** + * __i40e_write_nvm_word - Writes Shadow RAM word + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to write + * @data: word to write to the Shadow RAM + * + * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method. + * NVM ownership have to be acquired and released (on ARQ completion event + * reception) by caller. To commit SR to NVM update checksum function + * should be called. + **/ +enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, + void *data) +{ + DEBUGFUNC("i40e_write_nvm_word"); + + *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data)); + + /* Value 0x00 below means that we treat SR as a flat mem */ + return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false); +} + +/** + * __i40e_write_nvm_buffer - Writes Shadow RAM buffer + * @hw: pointer to the HW structure + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset of the Shadow RAM buffer to write + * @words: number of words to write + * @data: words to write to the Shadow RAM + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. To commit SR to NVM update + * checksum function should be called. + **/ +enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data) +{ + __le16 *le_word_ptr = (__le16 *)data; + u16 *word_ptr = (u16 *)data; + u32 i = 0; + + DEBUGFUNC("i40e_write_nvm_buffer"); + + for (i = 0; i < words; i++) + le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]); + + /* Here we will only write one buffer as the size of the modules + * mirrored in the Shadow RAM is always less than 4K. + */ + return i40e_write_nvm_aq(hw, module_pointer, offset, words, + data, false); +} + +/** + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum + * + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). + **/ +enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_virt_mem vmem; + u16 pcie_alt_module = 0; + u16 checksum_local = 0; + u16 vpd_module = 0; + u16 *data; + u16 i = 0; + + DEBUGFUNC("i40e_calc_nvm_checksum"); + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; + + /* read pointer to VPD area */ + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* read pointer to PCIe Alt Auto-load module */ + ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* Calculate SW checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules + */ + for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + + /* Skip Checksum word */ + if (i == I40E_SR_SW_CHECKSUM_WORD) + continue; + /* Skip VPD module (convert byte size to word count) */ + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; + } + /* Skip PCIe ALT module (convert byte size to word count) */ + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; + } + + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; + } + + *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; + +i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); + return ret_code; +} + +/** + * i40e_update_nvm_checksum - Updates the NVM checksum + * @hw: pointer to hardware structure + * + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. + * This function will commit SR to NVM. + **/ +enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 checksum; + __le16 le_sum; + + DEBUGFUNC("i40e_update_nvm_checksum"); + + ret_code = i40e_calc_nvm_checksum(hw, &checksum); + le_sum = CPU_TO_LE16(checksum); + if (ret_code == I40E_SUCCESS) + ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, + 1, &le_sum, true); + + return ret_code; +} + +/** + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum + * + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. + **/ +enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 checksum_sr = 0; + u16 checksum_local = 0; + + DEBUGFUNC("i40e_validate_nvm_checksum"); + + /* We must acquire the NVM lock in order to correctly synchronize the + * NVM accesses across multiple PFs. Without doing so it is possible + * for one of the PFs to read invalid data potentially indicating that + * the checksum is invalid. + */ + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_code) + return ret_code; + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + i40e_release_nvm(hw); + if (ret_code) + return ret_code; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (checksum_local != checksum_sr) + ret_code = I40E_ERR_NVM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum) + *checksum = checksum_local; + + return ret_code; +} + +STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC INLINE u8 i40e_nvmupd_get_module(u32 val) +{ + return (u8)(val & I40E_NVM_MOD_PNT_MASK); +} +STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val) +{ + return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); +} + +STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val) +{ + return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> + I40E_NVM_PRESERVATION_FLAGS_SHIFT); +} + +STATIC const char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", + "I40E_NVMUPD_STATUS", + "I40E_NVMUPD_EXEC_AQ", + "I40E_NVMUPD_GET_AQ_RESULT", + "I40E_NVMUPD_GET_AQ_EVENT", + "I40E_NVMUPD_GET_FEATURES", +}; + +/** + * i40e_nvmupd_command - Process an NVM update command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * Dispatches command depending on what update state is current + **/ +enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_command"); + + /* assume success */ + *perrno = 0; + + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->nvm_release_on_done, hw->nvm_wait_opcode, + cmd->command, cmd->config, cmd->offset, cmd->data_size); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); + } + + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + + bytes[0] = hw->nvmupd_state; + + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } + + /* Clear error status on read */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + + return I40E_SUCCESS; + } + + /* + * A supported features request returns immediately + * rather than going into state machine + */ + if (upd_cmd == I40E_NVMUPD_FEATURES) { + if (cmd->data_size < hw->nvmupd_features.size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + + /* + * If buffer is bigger than i40e_nvmupd_features structure, + * make sure the trailing bytes are set to 0x0. + */ + if (cmd->data_size > hw->nvmupd_features.size) + i40e_memset(bytes + hw->nvmupd_features.size, 0x0, + cmd->data_size - hw->nvmupd_features.size, + I40E_NONDMA_MEM); + + i40e_memcpy(bytes, &hw->nvmupd_features, + hw->nvmupd_features.size, I40E_NONDMA_MEM); + + return I40E_SUCCESS; + } + + /* Clear status even it is not read and log */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + i40e_debug(hw, I40E_DEBUG_NVM, + "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } + + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. + */ + i40e_acquire_spinlock(&hw->aq.arq_spinlock); + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_clear_wait_state(hw); + status = I40E_SUCCESS; + break; + } + + status = I40E_ERR_NOT_READY; + *perrno = -EBUSY; + break; + + default: + /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + + i40e_release_spinlock(&hw->aq.arq_spinlock); + return status; +} + +/** + * i40e_nvmupd_state_init - Handle NVM update state Init + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * Process legitimate commands of the Init state and conditionally set next + * state. Reject all other commands. + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_state_init"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + i40e_release_nvm(hw); + } + break; + + case I40E_NVMUPD_READ_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + } + break; + + case I40E_NVMUPD_WRITE_ERA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); + if (status) { + i40e_release_nvm(hw); + } else { + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_WRITE_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + i40e_release_nvm(hw); + } else { + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_WRITE_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + i40e_release_nvm(hw); + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } + } + break; + + case I40E_NVMUPD_CSUM_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + i40e_release_nvm(hw); + } else { + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_EXEC_AQ: + status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_GET_AQ_RESULT: + status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_GET_AQ_EVENT: + status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_ERR_NVM; + *perrno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_reading - Handle NVM update state Reading + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands. + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_state_reading"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + case I40E_NVMUPD_READ_CON: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_READ_LCB: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + i40e_release_nvm(hw); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_writing - Handle NVM update state Writing + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; + + DEBUGFUNC("i40e_nvmupd_state_writing"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + +retry: + switch (upd_cmd) { + case I40E_NVMUPD_WRITE_CON: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (!status) { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } + break; + + case I40E_NVMUPD_WRITE_LCB: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + break; + + case I40E_NVMUPD_CSUM_CON: + /* Assumes the caller has acquired the nvm */ + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } + break; + + case I40E_NVMUPD_CSUM_LCB: + /* Assumes the caller has acquired the nvm */ + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + enum i40e_status_code old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + + return status; +} + +/** + * i40e_nvmupd_clear_wait_state - clear wait state on hw + * @hw: pointer to the hardware structure + **/ +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) +{ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", + hw->nvm_wait_opcode); + + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; + + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } +} + +/** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + * @desc: AdminQ descriptor + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc) +{ + u32 aq_desc_len = sizeof(struct i40e_aq_desc); + + if (opcode == hw->nvm_wait_opcode) { + i40e_memcpy(&hw->nvm_aq_event_desc, desc, + aq_desc_len, I40E_NONDMA_TO_NONDMA); + i40e_nvmupd_clear_wait_state(hw); + } +} + +/** + * i40e_nvmupd_validate_command - Validate given command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * Return one of the valid command types or I40E_NVMUPD_INVALID + **/ +STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + enum i40e_nvmupd_cmd upd_cmd; + u8 module, transaction; + + DEBUGFUNC("i40e_nvmupd_validate_command\n"); + + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + + /* limits on data size */ + if ((cmd->data_size < 1) || + (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; + } + + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + case I40E_NVM_EXEC: + switch (module) { + case I40E_NVM_EXEC_GET_AQ_RESULT: + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + case I40E_NVM_EXEC_FEATURES: + upd_cmd = I40E_NVMUPD_FEATURES; + break; + case I40E_NVM_EXEC_STATUS: + upd_cmd = I40E_NVMUPD_STATUS; + break; + default: + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; + } + break; + case I40E_NVM_AQE: + upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; + break; + } + break; + + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM|I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM|I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; + } + break; + } + + return upd_cmd; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + enum i40e_status_code status; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + if (cmd->offset == 0xffff) + return I40E_SUCCESS; + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); + } + + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len, + I40E_NONDMA_TO_NONDMA); + } + } + + if (cmd->offset) + memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_exec_aq err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + return status; + } + + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + + return status; +} + +/** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA); + } + + return I40E_SUCCESS; +} + +/** + * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen); + + /* check copylength range */ + if (cmd->data_size > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, aq_total_len); + cmd->data_size = aq_total_len; + } + + i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size, + I40E_NONDMA_TO_NONDMA); + + return I40E_SUCCESS; +} + +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + enum i40e_status_code status; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + u8 preservation_flags; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, + preservation_flags, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h new file mode 100644 index 000000000..58be39677 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_osdep.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../i40e_logs.h" + +#define INLINE inline +#define STATIC static + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; + +typedef enum i40e_status_code i40e_status; +#define __iomem +#define hw_dbg(hw, S, A...) do {} while (0) +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((u32)(n)) +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#ifndef __le16 +#define __le16 uint16_t +#endif +#ifndef __le32 +#define __le32 uint32_t +#endif +#ifndef __le64 +#define __le64 uint64_t +#endif +#ifndef __be16 +#define __be16 uint16_t +#endif +#ifndef __be32 +#define __be32 uint32_t +#endif +#ifndef __be64 +#define __be64 uint64_t +#endif + +#define FALSE 0 +#define TRUE 1 +#define false 0 +#define true 1 + +#define min(a,b) RTE_MIN(a,b) +#define max(a,b) RTE_MAX(a,b) + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) + +#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S) +#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT6 DEBUGOUT3 +#define DEBUGOUT7 DEBUGOUT6 + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s, \ + (h)->bus.device, (h)->bus.func, \ + ##__VA_ARGS__); \ +} while (0) + +/* AQ commands based interfaces of i40e_read_rx_ctl() and i40e_write_rx_ctl() + * are required for reading/writing below registers, as reading/writing it + * directly may not function correctly if the device is under heavy small + * packet traffic. Note that those interfaces are available from FVL5 and not + * suitable before the AdminQ is ready during initialization. + * + * I40E_PFQF_CTL_0 + * I40E_PFQF_HENA + * I40E_PFQF_FDALLOC + * I40E_PFQF_HREGION + * I40E_PFLAN_QALLOC + * I40E_VPQF_CTL + * I40E_VFQF_HENA + * I40E_VFQF_HREGION + * I40E_VSIQF_CTL + * I40E_VSILAN_QBASE + * I40E_VSILAN_QTABLE + * I40E_VSIQF_TCREGION + * I40E_PFQF_HKEY + * I40E_VFQF_HKEY + * I40E_PRTQF_CTL_0 + * I40E_GLFCOE_RCTL + * I40E_GLFCOE_RSOF + * I40E_GLQF_CTL + * I40E_GLQF_SWAP + * I40E_GLQF_HASH_MSK + * I40E_GLQF_HASH_INSET + * I40E_GLQF_HSYM + * I40E_GLQF_FC_MSK + * I40E_GLQF_FC_INSET + * I40E_GLQF_FD_MSK + * I40E_PRTQF_FD_INSET + * I40E_PRTQF_FD_FLXINSET + * I40E_PRTQF_FD_MSK + */ + +#define I40E_PCI_REG(reg) rte_read32(reg) +#define I40E_PCI_REG_ADDR(a, reg) \ + ((volatile uint32_t *)((char *)(a)->hw_addr + (reg))) +static inline uint32_t i40e_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(I40E_PCI_REG(addr)); +} + +#define I40E_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) +#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) + +#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT) +#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT) + +#define I40E_READ_REG(hw, reg) i40e_read_addr(I40E_PCI_REG_ADDR((hw), (reg))) +#define I40E_WRITE_REG(hw, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value)) + +#define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg))) +#define wr32(a, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value)) +#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT))) + +#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0])) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + u64 pa; + u32 size; + const void *zone; +} __rte_packed; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40e_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __rte_packed; + +#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) + +#define CPU_TO_LE16(o) rte_cpu_to_le_16(o) +#define CPU_TO_LE32(s) rte_cpu_to_le_32(s) +#define CPU_TO_LE64(h) rte_cpu_to_le_64(h) +#define LE16_TO_CPU(a) rte_le_to_cpu_16(a) +#define LE32_TO_CPU(c) rte_le_to_cpu_32(c) +#define LE64_TO_CPU(k) rte_le_to_cpu_64(k) + +#define cpu_to_le16(o) rte_cpu_to_le_16(o) +#define cpu_to_le32(s) rte_cpu_to_le_32(s) +#define cpu_to_le64(h) rte_cpu_to_le_64(h) +#define le16_to_cpu(a) rte_le_to_cpu_16(a) +#define le32_to_cpu(c) rte_le_to_cpu_32(c) +#define le64_to_cpu(k) rte_le_to_cpu_64(k) + +/* SW spinlock */ +struct i40e_spinlock { + rte_spinlock_t spinlock; +}; + +#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp) +#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp) +#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp) +#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp) + +#define I40E_NTOHS(a) rte_be_to_cpu_16(a) +#define I40E_NTOHL(a) rte_be_to_cpu_32(a) +#define I40E_HTONS(a) rte_cpu_to_be_16(a) +#define I40E_HTONL(a) rte_cpu_to_be_32(a) + +#define i40e_memset(a, b, c, d) memset((a), (b), (c)) +#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DELAY(x) rte_delay_us_sleep(x) +#define i40e_usec_delay(x) DELAY(x) +#define i40e_msec_delay(x) DELAY(1000 * (x)) +#define udelay(x) DELAY(x) +#define msleep(x) DELAY(1000*(x)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +#endif /* _I40E_OSDEP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h new file mode 100644 index 000000000..91fa23491 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_prototype.h @@ -0,0 +1,644 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw); +enum i40e_status_code i40e_init_asq(struct i40e_hw *hw); +enum i40e_status_code i40e_init_arq(struct i40e_hw *hw); +enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw); +enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw); +u16 i40e_clean_asq(struct i40e_hw *hw); +void i40e_free_adminq_asq(struct i40e_hw *hw); +void i40e_free_adminq_arq(struct i40e_hw *hw); +enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +#ifdef VF_DRIVER +bool i40e_asq_done(struct i40e_hw *hw); +#endif + +/* debug function for adminq */ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); + +enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err); + +#ifdef PF_DRIVER + +u32 i40e_led_get(struct i40e_hw *hw); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); +enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode); +enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val); +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); +enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, + u32 *reg_val); +enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, + u32 reg_val); +enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw, + struct i40e_hw_port_stats *stats); +enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter, + u32 *rx_counter, bool *is_clear); +enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw, + bool offset_loaded, u64 *tx_offset, + u64 *tx_stat, u64 *rx_offset, + u64 *rx_stat); +enum i40e_status_code i40e_get_lpi_duration(struct i40e_hw *hw, + struct i40e_hw_port_stats *stat, + u64 *tx_duration, u64 *rx_duration); +/* admin send queue commands */ + +enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_reset); +enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, + u16 max_frame_size, bool crc_en, u16 pacing, + bool auto_drop_blocking_packets, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 vsi_id, bool set_filter, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc); +enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *pveb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, bool *floating, + u16 *statistic_index, u16 *vebs_used, + u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); + +enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, u8 mode, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, u8 preservation_flags, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, + bool dcb_enable, + struct i40e_asq_cmd_details + *cmd_details); +enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + bool persist, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, + u16 mac_seid, u16 vsi_seid, + u16 *ret_seid); +enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, + u16 vsi_seid, u16 tag, u16 queue_num, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 tag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid, + u16 etag, u8 num_tags_in_buf, void *buf, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid, + u16 etag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 old_tag, u16 new_tag, u16 *tags_used, + u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 *stat_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 stat_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, + u16 bad_frame_vsi, bool save_bad_pac, + bool pad_short_pac, bool double_vlan, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, + u8 tcmap, bool request, u8 *tcmap_ret, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( + struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count); +enum i40e_status_code +i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count); +enum i40e_status_code +i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count); +enum i40e_status_code +i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count); +enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); +enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw, + struct i40e_aqc_replace_cloud_filters_cmd *filters, + struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf); +enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1); +enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer); +enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, + u32 reg_addr0, u32 reg_val0, + u32 reg_addr1, u32 reg_val1); +enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer); +enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, + u8 bios_mode, bool *reset_needed); +enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, + u8 oem_mode); + +/* i40e_common */ +enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw); +enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw); +void i40e_clear_hw(struct i40e_hw *hw); +void i40e_clear_pxe_mode(struct i40e_hw *hw); +enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw); +enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid); +enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size); +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); +enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw); +/* prototype for functions used for NVM access */ +enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw); +enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access); +void i40e_release_nvm(struct i40e_hw *hw); +enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code +i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, + u16 data_offset, u16 words_data_size, u16 *data_ptr); +enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module, + u32 offset, u16 words, void *data, + bool last_command); +enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, + void *data); +enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module, + u32 offset, u16 words, void *data); +enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum); +enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw); +enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum); +enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc); +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +#endif /* PF_DRIVER */ +enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable); + +enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw); + +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} + +#ifdef PF_DRIVER +/** + * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition + * @link_speed: the speed to convert + * + * Returns the link_speed in terms of the virtchnl interface, for use in + * converting link_speed as reported by the AdminQ into the format used for + * talking to virtchnl devices. If we can't represent the link speed properly, + * report LINK_SPEED_UNKNOWN. + **/ +STATIC INLINE enum virtchnl_link_speed +i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return VIRTCHNL_LINK_SPEED_100MB; + case I40E_LINK_SPEED_1GB: + return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_2_5GB: + return VIRTCHNL_LINK_SPEED_2_5GB; + case I40E_LINK_SPEED_5GB: + return VIRTCHNL_LINK_SPEED_5GB; + case I40E_LINK_SPEED_10GB: + return VIRTCHNL_LINK_SPEED_10GB; + case I40E_LINK_SPEED_40GB: + return VIRTCHNL_LINK_SPEED_40GB; + case I40E_LINK_SPEED_20GB: + return VIRTCHNL_LINK_SPEED_20GB; + case I40E_LINK_SPEED_25GB: + return VIRTCHNL_LINK_SPEED_25GB; + case I40E_LINK_SPEED_UNKNOWN: + default: + return VIRTCHNL_LINK_SPEED_UNKNOWN; + } +} +#endif /* PF_DRIVER */ +/* prototype for functions used for SW spinlocks */ +void i40e_init_spinlock(struct i40e_spinlock *sp); +void i40e_acquire_spinlock(struct i40e_spinlock *sp); +void i40e_release_spinlock(struct i40e_spinlock *sp); +void i40e_destroy_spinlock(struct i40e_spinlock *sp); + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct virtchnl_vf_resource *msg); +enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + enum i40e_status_code v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); +enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +enum i40e_status_code +i40e_aq_set_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code +i40e_aq_get_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); + +/* Convenience wrappers for most common use case */ +#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \ + i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) +#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \ + i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) + +enum i40e_status_code +i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode, + u32 *cmd_status, u32 *data0, u32 *data1, + struct i40e_asq_cmd_details *cmd_details); + +enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 *value); +enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 value); +enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value); +enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value); +enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value); +enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); +enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details * + cmd_details); +enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details * + cmd_details); +struct i40e_generic_seg_header * +i40e_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_header); +struct i40e_profile_section_header * +i40e_find_section_in_profile(u32 section_type, + struct i40e_profile_segment *profile); +enum i40e_status_code +i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code +i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code +i40e_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h new file mode 100644 index 000000000..ee4f333f9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_register.h @@ -0,0 +1,5438 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + + +#ifdef PF_DRIVER +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_SHIFT 0 +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_SHIFT 0 +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQH_ATQH_SHIFT 0 +#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) +#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ +#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) +#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) +#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) +#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) +#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) +#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ +#define I40E_GL_ATQT_ATQT_SHIFT 0 +#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) +#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \ + I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \ + I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK \ + I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK \ + I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \ + I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \ + I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ +#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 +#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ +#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ +#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 +#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) +#endif /* PF_DRIVER */ +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#ifdef PF_DRIVER +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 +#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ +#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 +#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 +#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) +#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 +#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ +#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 +#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) +#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 +#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GL_PRS_FVBM_MAX_INDEX 3 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 +#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_GLQF_HKEY_MAX_INDEX 12 +#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 +#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) +#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 +#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) +#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 +#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) +#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 +#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1_L_MAX_INDEX 143 +#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 +#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) +#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR2_L_MAX_INDEX 143 +#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 +#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17 +#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK \ + I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT) +#define I40E_PRTTSYN_AUX_0_PTP_OUT_SYNC_CLK_IO 0xF +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ +#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 +#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 +#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) +#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 +#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 25 +#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#endif /* PF_DRIVER */ +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) + +#ifdef PF_DRIVER +#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ +#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 +#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) +#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ +#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 +#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 +#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) +#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 +#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 +#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) +#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ +#define I40E_MNGSB_FDS_START_BC_SHIFT 0 +#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) +#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 +#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) + +#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) +#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) + +#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) + +#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) +#define I40E_GL_FWSTS_FWROWD_SHIFT 8 +#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) +#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) +#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) +#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) +#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) +#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) +#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ +#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 +#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) +#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QBASE_MAX_INDEX 127 +#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 +#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) +#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 +#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) +#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) +#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ +#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 +#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) +#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 +#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) +#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 +#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) +#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 +#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) +#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) + +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 +#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) +#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 +#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 +#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) +#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 +#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) +#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ +#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 +#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 +#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) +#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 +#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 +#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 +#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 +#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) +#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 +#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 +#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) +#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) +#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ +#define I40E_MNGSB_DADD_ADDR_SHIFT 0 +#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) +#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ +#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 +#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) +#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ +#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 +#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) +#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 +#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) +#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 +#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 +#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) +#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 +#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) +#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ +#define I40E_MNGSB_RDATA_DATA_SHIFT 0 +#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) +#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ +#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 +#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) +#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 +#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) +#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 +#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) +#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) +#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 +#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) +#define I40E_MNGSB_RHDR0_EH_SHIFT 31 +#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) +#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 +#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 +#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) +#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ +#define I40E_MNGSB_WDATA_DATA_SHIFT 0 +#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) +#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ +#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 +#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) +#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 +#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 +#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) +#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ +#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 +#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) +#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ +#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 +#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) + +#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 +#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) + +#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) +#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) + +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) +#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ +#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 +#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) +#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) +#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ +#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_FD_MSK_MAX_INDEX 1 +#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 +#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) +#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) +#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 +#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 +#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) +#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 +#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 +#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) +#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_ORT_MAX_INDEX 63 +#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 +#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) +#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 +#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) +#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 +#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ +#define I40E_GLQF_PIT_MAX_INDEX 23 +#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) +#define I40E_GLQF_PIT_FSIZE_SHIFT 5 +#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) +#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 +#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) +#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) +#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) +#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ +#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) +/* Redefined for X722 family */ +#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 +#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) +#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HKEY_MAX_INDEX 12 +#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) +#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) +#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) +#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) +#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HLUT_MAX_INDEX 15 +#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 +#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) +#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 +#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) +#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 +#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) +#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 +#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) +#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) +#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +#endif /* PF_DRIVER */ +/* Flow Director */ +#define I40E_REG_INSET_L2_DMAC_SHIFT 60 +#define I40E_REG_INSET_L2_DMAC_MASK I40E_MASK(0xEULL, I40E_REG_INSET_L2_DMAC_SHIFT) +#define I40E_REG_INSET_L2_SMAC_SHIFT 56 +#define I40E_REG_INSET_L2_SMAC_MASK I40E_MASK(0x1CULL, I40E_REG_INSET_L2_SMAC_SHIFT) +#define I40E_REG_INSET_L2_OUTER_VLAN_SHIFT 26 +#define I40E_REG_INSET_L2_OUTER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_OUTER_VLAN_SHIFT) +#define I40E_REG_INSET_L2_INNER_VLAN_SHIFT 55 +#define I40E_REG_INSET_L2_INNER_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L2_INNER_VLAN_SHIFT) +#define I40E_REG_INSET_TUNNEL_VLAN_SHIFT 56 +#define I40E_REG_INSET_TUNNEL_VLAN_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_VLAN_SHIFT) +#define I40E_REG_INSET_L3_SRC_IP4_SHIFT 47 +#define I40E_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_SRC_IP4_SHIFT) +#define I40E_REG_INSET_L3_DST_IP4_SHIFT 35 +#define I40E_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L3_DST_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT 49 +#define I40E_X722_REG_INSET_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_SRC_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_DST_IP4_SHIFT 41 +#define I40E_X722_REG_INSET_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_X722_REG_INSET_L3_DST_IP4_SHIFT) +#define I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT 52 +#define I40E_X722_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_PROTO_SHIFT) +#define I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT 52 +#define I40E_X722_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_X722_REG_INSET_L3_IP4_TTL_SHIFT) +#define I40E_REG_INSET_L3_IP4_TOS_SHIFT 54 +#define I40E_REG_INSET_L3_IP4_TOS_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TOS_SHIFT) +#define I40E_REG_INSET_L3_IP4_PROTO_SHIFT 50 +#define I40E_REG_INSET_L3_IP4_PROTO_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_PROTO_SHIFT) +#define I40E_REG_INSET_L3_IP4_TTL_SHIFT 50 +#define I40E_REG_INSET_L3_IP4_TTL_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP4_TTL_SHIFT) +#define I40E_REG_INSET_L3_SRC_IP6_SHIFT 43 +#define I40E_REG_INSET_L3_SRC_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_SRC_IP6_SHIFT) +#define I40E_REG_INSET_L3_DST_IP6_SHIFT 35 +#define I40E_REG_INSET_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_L3_DST_IP6_SHIFT) +#define I40E_REG_INSET_L3_IP6_TC_SHIFT 54 +#define I40E_REG_INSET_L3_IP6_TC_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_TC_SHIFT) +#define I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT 51 +#define I40E_REG_INSET_L3_IP6_NEXT_HDR_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_NEXT_HDR_SHIFT) +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT 51 +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L3_IP6_HOP_LIMIT_SHIFT) +#define I40E_REG_INSET_L4_SRC_PORT_SHIFT 34 +#define I40E_REG_INSET_L4_SRC_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_SRC_PORT_SHIFT) +#define I40E_REG_INSET_L4_DST_PORT_SHIFT 33 +#define I40E_REG_INSET_L4_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_L4_DST_PORT_SHIFT) +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT 31 +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG_SHIFT) +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT 22 +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC_SHIFT) +#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT 11 +#define I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_MASK I40E_MASK(0x7ULL, I40E_REG_INSET_TUNNEL_L2_INNER_SRC_MAC_SHIFT) +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT 21 +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT_SHIFT) +#define I40E_REG_INSET_TUNNEL_ID_SHIFT 18 +#define I40E_REG_INSET_TUNNEL_ID_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_ID_SHIFT) +#define I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT 14 +#define I40E_REG_INSET_LAST_ETHER_TYPE_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_LAST_ETHER_TYPE_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT 8 +#define I40E_REG_INSET_TUNNEL_L3_SRC_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_SRC_IP4_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT 6 +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4_MASK I40E_MASK(0x3ULL, I40E_REG_INSET_TUNNEL_L3_DST_IP4_SHIFT) +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT 6 +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_TUNNEL_L3_DST_IP6_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT 13 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD1_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT 12 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD2_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT 11 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD3_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT 10 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD4_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT 9 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD5_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT 8 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD6_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT 7 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD7_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT 6 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8_MASK I40E_MASK(0x1ULL, I40E_REG_INSET_FLEX_PAYLOAD_WORD8_SHIFT) +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT 6 +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS_MASK I40E_MASK(0xFFULL, I40E_REG_INSET_FLEX_PAYLOAD_WORDS_SHIFT) +#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL + +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) + +#endif /* _I40E_REGISTER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h new file mode 100644 index 000000000..cd72169f1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_status.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, + I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h b/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h new file mode 100644 index 000000000..014a4c132 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/i40e_type.h @@ -0,0 +1,2051 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_devids.h" + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) (_p); +#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q); +#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r); +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s); +#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t); + +#ifndef LINUX_MACROS +#ifndef BIT +#define BIT(a) (1UL << (a)) +#endif /* BIT */ +#ifndef BIT_ULL +#define BIT_ULL(a) (1ULL << (a)) +#endif /* BIT_ULL */ +#endif /* LINUX_MACROS */ + +#ifndef I40E_MASK +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) (mask << shift) +#endif + +#define I40E_MAX_PF 16 +#define I40E_MAX_PF_VSI 64 +#define I40E_MAX_PF_QP 128 +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 4 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* something less than 1 minute */ +#define I40E_HEARTBEAT_TIMEOUT (HZ * 50) + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Max timeout in ms for the phy to respond */ +#define I40E_MAX_PHY_TIMEOUT 500 + +/* Check whether address is multicast. */ +#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define I40E_IS_BROADCAST(address) \ + ((((u8 *)(address))[0] == ((u8)0xff)) && \ + (((u8 *)(address))[1] == ((u8)0xff))) + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif +/* Data type manipulation macros. */ +#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) +#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) + +#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) +#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF)) + +#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) +#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) + +/* Number of Transmit Descriptors must be a multiple of 32. */ +#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 32 +/* Number of Receive Descriptors must be a multiple of 32 if + * the number of descriptors is greater than 32. + */ +#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32 + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* PCI Bus Info */ +#define I40E_PCI_LINK_STATUS 0xB2 +#define I40E_PCI_LINK_WIDTH 0x3F0 +#define I40E_PCI_LINK_WIDTH_1 0x10 +#define I40E_PCI_LINK_WIDTH_2 0x20 +#define I40E_PCI_LINK_WIDTH_4 0x40 +#define I40E_PCI_LINK_WIDTH_8 0x80 +#define I40E_PCI_LINK_SPEED 0xF +#define I40E_PCI_LINK_SPEED_2500 0x1 +#define I40E_PCI_LINK_SPEED_5000 0x2 +#define I40E_PCI_LINK_SPEED_8000 0x3 + +#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \ + I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) + +#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \ + I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) + +#define I40E_PHY_COM_REG_PAGE 0x1E +#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 +#define I40E_PHY_LED_MANUAL_ON 0x100 +#define I40E_PHY_LED_PROV_REG_1 0xC430 +#define I40E_PHY_LED_MODE_MASK 0xFFFF +#define I40E_PHY_LED_MODE_ORIG 0x80000000 + +/* Memory types */ +enum i40e_memset_type { + I40E_NONDMA_MEM = 0, + I40E_DMA_MEM +}; + +/* Memcpy types */ +enum i40e_memcpy_type { + I40E_NONDMA_TO_NONDMA = 0, + I40E_NONDMA_TO_DMA, + I40E_DMA_TO_DMA, + I40E_DMA_TO_NONDMA +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 req_fec_info; + u8 fec_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + bool get_link_info; + enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + u64 phy_types; +}; + +#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) +#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) +#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) +#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) +#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) +#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) +#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) +#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) +#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) +#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) +#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) +#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) +#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) +#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) +#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) +#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) +#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) +#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) +#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) +#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) +#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) +/* + * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some + * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit + * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, + * a shift is needed to adjust for this with values larger than 31. The + * only affected values are I40E_PHY_TYPE_25GBASE_*. + */ +#define I40E_PHY_TYPE_OFFSET 1 +#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \ + I40E_PHY_TYPE_OFFSET) +/* Offset for 2.5G/5G PHY Types value to bit number conversion */ +#define I40E_PHY_TYPE_OFFSET2 (-10) +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \ + I40E_PHY_TYPE_OFFSET2) +#define I40E_HW_CAP_MAX_GPIO 30 +#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 +#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 + +enum i40e_acpi_programming_method { + I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, + I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 +}; + +#define I40E_WOL_SUPPORT_MASK 0x1 +#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2 +#define I40E_PROXY_SUPPORT_MASK 0x4 + +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + /* Cloud filter modes: + * Mode1: Filter on L4 port only + * Mode2: Filter for non-tunneled traffic + * Mode3: Filter for tunnel traffic + */ +#define I40E_CLOUD_FILTER_MODE1 0x6 +#define I40E_CLOUD_FILTER_MODE2 0x7 +#define I40E_CLOUD_FILTER_MODE3 0x8 +#define I40E_SWITCH_MODE_MASK 0xF + + u32 management_mode; + u32 mng_protocols_over_mctp; +#define I40E_MNG_PROTOCOL_PLDM 0x2 +#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 +#define I40E_MNG_PROTOCOL_NCSI 0x8 + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; + bool apm_wol_support; + enum i40e_acpi_programming_method acpi_prog_method; + bool proxy_support; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, + I40E_NVMUPD_GET_AQ_EVENT, + I40E_NVMUPD_FEATURES, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, + I40E_NVMUPD_STATE_ERROR +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 +#define I40E_NVM_PRESERVATION_FLAGS_MASK \ + (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 +#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_AQE 0xe +#define I40E_NVM_EXEC 0xf + +#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0 +#define I40E_NVM_EXEC_FEATURES 0xe +#define I40E_NVM_EXEC_STATUS 0xf + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* NVMUpdate features API */ +#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0 +#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14 +#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12 + +#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0) + +struct i40e_nvmupd_features { + u8 major; + u8 minor; + u16 size; + u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN]; +}; + +/* (Q)SFP module access definitions */ +#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 +#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 +#define I40E_MODULE_TYPE_ADDR 0x00 +#define I40E_MODULE_REVISION_ADDR 0x01 +#define I40E_MODULE_SFF_8472_COMP 0x5E +#define I40E_MODULE_SFF_8472_SWAP 0x5C +#define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_SFF_DIAG_CAPAB 0x40 +#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D +#define I40E_MODULE_TYPE_QSFP28 0x11 +#define I40E_MODULE_QSFP_MAX_LEN 640 + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 +#define I40E_CEE_APP_SEL_ETHTYPE 0x0 +#define I40E_CEE_APP_SEL_TCPIP 0x1 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define I40E_DCBX_APPS_NON_WILLING 0x1 + u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* switch device is used to get link status when i40e is in ipn3ke */ + struct rte_eth_dev *switch_dev; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + bool adapter_closed; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_aq_desc nvm_aq_event_desc; + struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + + /* WoL and proxy support */ + u16 num_wol_proxy_filters; + u16 wol_proxy_vsi_seid; + +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) +#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) +#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) +#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) +#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) +#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) +#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6) +#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) + u64 flags; + + /* Used in set switch config AQ command */ + u16 switch_tag; + u16 first_tag; + u16 second_tag; + + /* NVMUpdate features */ + struct i40e_nvmupd_features nvmupd_features; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw) +{ + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8 +#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \ + I40E_RXD_QW0_MIRROR_STATUS_SHIFT) +#define I40E_RXD_QW0_FCOEINDX_SHIFT 0 +#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \ + I40E_RXD_QW0_FCOEINDX_SHIFT) + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ + I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST +#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_UMBCAST_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_PACKET_TYPE_UNICAST 0 +#define I40E_RXD_PACKET_TYPE_MULTICAST 1 +#define I40E_RXD_PACKET_TYPE_BROADCAST 2 +#define I40E_RXD_PACKET_TYPE_MIRRORED 3 + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF +#define I40E_RX_PTYPE_SHIFT 56 + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +#define I40E_RXD_QW1_NEXTP_SHIFT 38 +#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT) + +#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0 +#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \ + I40E_RXD_QW2_EXT_STATUS_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +#define I40E_RXD_QW2_L2TAG2_SHIFT 0 +#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT) + +#define I40E_RXD_QW2_L2TAG3_SHIFT 16 +#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT) + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0 +#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +#define I40E_TWO_BIT_MASK 0x3 +#define I40E_THREE_BIT_MASK 0x7 +#define I40E_FOUR_BIT_MASK 0xF +#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT) +#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT) +#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) +#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) +struct i40e_nop_desc { + __le64 rsvd; + __le64 dtype_cmd; +}; + +#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT) + +#define I40E_TXD_NOP_QW1_CMD_SHIFT 4 +#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT) + +enum i40e_tx_nop_desc_cmd_bits { + /* Note: These are predefined bit offsets */ + I40E_TX_NOP_DESC_EOP_SHIFT = 0, + I40E_TX_NOP_DESC_RS_SHIFT = 1, + I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */ +}; + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* Statistics collected per function for FCoE */ +struct i40e_fcoe_stats { + u64 rx_fcoe_packets; /* fcoeprc */ + u64 rx_fcoe_dwords; /* focedwrc */ + u64 rx_fcoe_dropped; /* fcoerpdc */ + u64 tx_fcoe_packets; /* fcoeptc */ + u64 tx_fcoe_dwords; /* focedwtc */ + u64 fcoe_bad_fccrc; /* fcoecrc */ + u64 fcoe_last_error; /* fcoelast */ + u64 fcoe_ddp_count; /* fcoeddpc */ +}; + +/* offset to per function FCoE statistics block */ +#define I40E_FCOE_VF_STAT_OFFSET 0 +#define I40E_FCOE_PF_STAT_OFFSET 128 +#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF) + +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ + u64 tx_lpi_duration; + u64 rx_lpi_duration; +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03 +#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04 +#define I40E_SR_OPTION_ROM_PTR 0x05 +#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 +#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07 +#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 +#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09 +#define I40E_SR_RO_PCIE_LCB_PTR 0x0A +#define I40E_SR_EMP_IMAGE_PTR 0x0B +#define I40E_SR_PE_IMAGE_PTR 0x0C +#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D +#define I40E_SR_MNG_CONFIG_PTR 0x0E +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 +#define I40E_SR_PBA_FLAGS 0x15 +#define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_BOOT_CONFIG_PTR 0x17 +#define I40E_NVM_OEM_VER_OFF 0x83 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 +#define I40E_SR_NVM_MAP_VERSION 0x29 +#define I40E_SR_NVM_IMAGE_VERSION 0x2A +#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PXE_SETUP_PTR 0x30 +#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31 +#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34 +#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35 +#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37 +#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38 +#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A +#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B +#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C +#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F +#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40 +#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42 +#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44 +#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46 +#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 +#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49 +#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D +#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) +#define I40E_PTR_TYPE BIT(15) +#define I40E_SR_OCP_CFG_WORD0 0x2B +#define I40E_SR_OCP_ENABLED BIT(15) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_BUF_ALIGNMENT 4096 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */ + +enum i40E_fcoe_tx_ctx_desc_cmd_bits { + I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10, + I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20, + I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40, + I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80 +}; + +/* FCoE DIF/DIX Context descriptor */ +struct i40e_fcoe_difdix_context_desc { + __le64 flags_buff0_buff1_ref; + __le64 difapp_msk_bias; +}; + +#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0 +#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT) + +enum i40e_fcoe_difdix_ctx_desc_flags_bits { + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800 +}; + +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12 +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22 +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32 +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0 +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16 +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32 +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT) + +/* FCoE DIF/DIX Buffers descriptor */ +struct i40e_fcoe_difdix_buffers_desc { + __le64 buff_addr0; + __le64 buff_addr1; +}; + +/* FCoE DDP Context descriptor */ +struct i40e_fcoe_ddp_context_desc { + __le64 rsvd; + __le64 type_cmd_foff_lsize; +}; + +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4 +#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) + +enum i40e_fcoe_ddp_ctx_desc_cmd_bits { + I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */ + I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */ +}; + +#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16 +#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32 +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT) + +/* FCoE DDP/DWO Queue Context descriptor */ +struct i40e_fcoe_queue_context_desc { + __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */ + __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */ +}; + +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12 +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13 +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +enum i40e_fcoe_queue_ctx_desc_tph_bits { + I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1, + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2 +}; + +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30 +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT) + +/* FCoE DDP/DWO Filter Context descriptor */ +struct i40e_fcoe_filter_context_desc { + __le32 param; + __le16 seqn; + + /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */ + __le16 rsvd_dmaindx; + + /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */ + __le64 flags_rsvd_lanq; +}; + +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4 +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \ + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT) + +enum i40e_fcoe_filter_ctx_desc_flags_bits { + I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00, + I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01, + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00, + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04 +}; + +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0 +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \ + I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \ + I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53 +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \ + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT) + +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_L3_SRC_SHIFT 47 +#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) +#define I40E_L3_V6_SRC_SHIFT 43 +#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) +#define I40E_L3_DST_SHIFT 35 +#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) +#define I40E_L3_V6_DST_SHIFT 35 +#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) +#define I40E_L4_SRC_SHIFT 34 +#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) +#define I40E_L4_DST_SHIFT 33 +#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) +#define I40E_VERIFY_TAG_SHIFT 31 +#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) + +#define I40E_FLEX_50_SHIFT 13 +#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) +#define I40E_FLEX_51_SHIFT 12 +#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) +#define I40E_FLEX_52_SHIFT 11 +#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) +#define I40E_FLEX_53_SHIFT 10 +#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) +#define I40E_FLEX_54_SHIFT 9 +#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) +#define I40E_FLEX_55_SHIFT 8 +#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) +#define I40E_FLEX_56_SHIFT 7 +#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) +#define I40E_FLEX_57_SHIFT 6 +#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) + +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define I40E_DDP_NAME_SIZE 32 + +/* Package header */ +struct i40e_package_header { + struct i40e_ddp_version version; + u32 segment_count; + u32 segment_offset[1]; +}; + +/* Generic segment header */ +struct i40e_generic_seg_header { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_NOTES 0x00000002 +#define SEGMENT_TYPE_I40E 0x00000011 +#define SEGMENT_TYPE_X722 0x00000012 + u32 type; + struct i40e_ddp_version version; + u32 size; + char name[I40E_DDP_NAME_SIZE]; +}; + +struct i40e_metadata_segment { + struct i40e_generic_seg_header header; + struct i40e_ddp_version version; +#define I40E_DDP_TRACKID_RDONLY 0 +#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF + u32 track_id; + char name[I40E_DDP_NAME_SIZE]; +}; + +struct i40e_device_id_entry { + u32 vendor_dev_id; + u32 sub_vendor_dev_id; +}; + +struct i40e_profile_segment { + struct i40e_generic_seg_header header; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; + u32 device_table_count; + struct i40e_device_id_entry device_table[1]; +}; + +struct i40e_section_table { + u32 section_count; + u32 section_offset[1]; +}; + +struct i40e_profile_section_header { + u16 tbl_size; + u16 data_end; + struct { +#define SECTION_TYPE_INFO 0x00000010 +#define SECTION_TYPE_MMIO 0x00000800 +#define SECTION_TYPE_RB_MMIO 0x00001800 +#define SECTION_TYPE_AQ 0x00000801 +#define SECTION_TYPE_RB_AQ 0x00001801 +#define SECTION_TYPE_NOTE 0x80000000 +#define SECTION_TYPE_NAME 0x80000001 +#define SECTION_TYPE_PROTO 0x80000002 +#define SECTION_TYPE_PCTYPE 0x80000003 +#define SECTION_TYPE_PTYPE 0x80000004 + u32 type; + u32 offset; + u32 size; + } section; +}; + +struct i40e_profile_tlv_section_record { + u8 rtype; + u8 type; + u16 len; + u8 data[12]; +}; + +/* Generic AQ section in proflie */ +struct i40e_profile_aq_section { + u16 opcode; + u16 flags; + u8 param[16]; + u16 datalen; + u8 data[1]; +}; + +struct i40e_profile_info { + u32 track_id; + struct i40e_ddp_version version; + u8 op; +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 + u8 reserved[7]; + u8 name[I40E_DDP_NAME_SIZE]; +}; + +#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3 +#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001 +#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8) +#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9) + +#endif /* _I40E_TYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/base/meson.build b/src/spdk/dpdk/drivers/net/i40e/base/meson.build new file mode 100644 index 000000000..8bc6a0fa0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/meson.build @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2020 Intel Corporation + +sources = [ + 'i40e_adminq.c', + 'i40e_common.c', + 'i40e_dcb.c', + 'i40e_diag.c', + 'i40e_hmc.c', + 'i40e_lan_hmc.c', + 'i40e_nvm.c' +] + +error_cflags = ['-Wno-sign-compare', '-Wno-unused-value', + '-Wno-format', '-Wno-format-security', + '-Wno-format-nonliteral', + '-Wno-strict-aliasing', '-Wno-unused-but-set-variable', + '-Wno-unused-parameter', +] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('i40e_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h b/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h new file mode 100644 index 000000000..4f498ca45 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/base/virtchnl.h @@ -0,0 +1,761 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ +#ifdef VIRTCHNL_SOL_VF_SUPPORT + VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19, +#endif +#ifdef VIRTCHNL_IWARP + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ +#endif + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + +}; + +/* These macros are used to generate compilation errors if a structure/union + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures.*/ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF capability flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 + +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u8 crc_disable; + u8 pad1[3]; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +#ifdef VIRTCHNL_SOL_VF_SUPPORT +/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG + * VF sends this message to get the default MTU and list of additional ethernet + * addresses it is allowed to use. + * PF responds with an indirect message containing + * virtchnl_addnl_solaris_config with zero or more + * virtchnl_ether_addr structures. + * + * It is expected that this operation will only ever be needed for Solaris VFs + * running under a Solaris PF. + */ +struct virtchnl_addnl_solaris_config { + u16 default_mtu; + struct virtchnl_ether_addr_list al; +}; + +#endif +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_ATTENTION 1 +#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +#ifdef VIRTCHNL_IWARP + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define QUEUE_TYPE_PE_AEQ 0x80 +#define QUEUE_INVALID_IDX 0xFFFF + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +#endif + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; +#ifdef VIRTCHNL_IWARP + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; +#endif + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_STATUS_ERR_PARAM; + } + /* few more checks */ + if (err_msg_format || valid_len != msglen) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c new file mode 100644 index 000000000..970a31cb2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -0,0 +1,13447 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" +#include "base/i40e_register.h" +#include "base/i40e_dcb.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_pf.h" +#include "i40e_regs.h" +#include "rte_pmd_i40e.h" + +#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" +#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" +#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" +#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" +#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec" +#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg" + +#define I40E_CLEAR_PXE_WAIT_MS 200 + +/* Maximun number of capability elements */ +#define I40E_MAX_CAP_ELE_NUM 128 + +/* Wait count and interval */ +#define I40E_CHK_Q_ENA_COUNT 1000 +#define I40E_CHK_Q_ENA_INTERVAL_US 1000 + +/* Maximun number of VSI */ +#define I40E_MAX_NUM_VSIS (384UL) + +#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */ + +/* Flow control default timer */ +#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU + +/* Flow control enable fwd bit */ +#define I40E_PRTMAC_FWD_CTRL 0x00000001 + +/* Receive Packet Buffer size */ +#define I40E_RXPBSIZE (968 * 1024) + +/* Kilobytes shift */ +#define I40E_KILOSHIFT 10 + +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Receive Average Packet Size in Byte*/ +#define I40E_PACKET_AVERAGE_SIZE 128 + +/* Mask of PF interrupt causes */ +#define I40E_PFINT_ICR0_ENA_MASK ( \ + I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \ + I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \ + I40E_PFINT_ICR0_ENA_GRST_MASK | \ + I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \ + I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \ + I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \ + I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \ + I40E_PFINT_ICR0_ENA_VFLR_MASK | \ + I40E_PFINT_ICR0_ENA_ADMINQ_MASK) + +#define I40E_FLOW_TYPES ( \ + (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1UL << RTE_ETH_FLOW_L2_PAYLOAD)) + +/* Additional timesync values. */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL +#define I40E_PRTTSYN_TSYNENA 0x80000000 +#define I40E_PRTTSYN_TSYNTYPE 0x0e000000 +#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +/** + * Below are values for writing un-exposed registers suggested + * by silicon experts + */ +/* Destination MAC address */ +#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL +/* Source MAC address */ +#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL +/* Outer (S-Tag) VLAN tag in the outer L2 header */ +#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL +/* Inner (C-Tag) or single VLAN tag in the outer L2 header */ +#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL +/* Single VLAN tag in the inner L2 header */ +#define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL +/* Source IPv4 address */ +#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL +/* Destination IPv4 address */ +#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL +/* Source IPv4 address for X722 */ +#define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL +/* Destination IPv4 address for X722 */ +#define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL +/* IPv4 Protocol for X722 */ +#define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL +/* IPv4 Time to Live for X722 */ +#define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL +/* IPv4 Type of Service (TOS) */ +#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL +/* IPv4 Protocol */ +#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL +/* IPv4 Time to Live */ +#define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL +/* Source IPv6 address */ +#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL +/* Destination IPv6 address */ +#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL +/* IPv6 Traffic Class (TC) */ +#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL +/* IPv6 Next Header */ +#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL +/* IPv6 Hop Limit */ +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL +/* Source L4 port */ +#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL +/* Destination L4 port */ +#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL +/* SCTP verification tag */ +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL +/* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/ +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL +/* Source port of tunneling UDP */ +#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL +/* Destination port of tunneling UDP */ +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL +/* UDP Tunneling ID, NVGRE/GRE key */ +#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL +/* Last ether type */ +#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL +/* Tunneling outer destination IPv4 address */ +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL +/* Tunneling outer destination IPv6 address */ +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL +/* 1st word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL +/* 2nd word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL +/* 3rd word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL +/* 4th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL +/* 5th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL +/* 6th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL +/* 7th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL +/* 8th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL +/* all 8 words flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL +#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL + +#define I40E_TRANSLATE_INSET 0 +#define I40E_TRANSLATE_REG 1 + +#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL +#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL +#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL +#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL +#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL +#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL + +/* PCI offset for querying capability */ +#define PCI_DEV_CAP_REG 0xA4 +/* PCI offset for enabling/disabling Extended Tag */ +#define PCI_DEV_CTRL_REG 0xA8 +/* Bit mask of Extended Tag capability */ +#define PCI_DEV_CAP_EXT_TAG_MASK 0x20 +/* Bit shift of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8 +/* Bit mask of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) + +static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); +static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); +static int i40e_dev_configure(struct rte_eth_dev *dev); +static int i40e_dev_start(struct rte_eth_dev *dev); +static void i40e_dev_stop(struct rte_eth_dev *dev); +static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_reset(struct rte_eth_dev *dev); +static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40e_dev_set_link_up(struct rte_eth_dev *dev); +static int i40e_dev_set_link_down(struct rte_eth_dev *dev); +static int i40e_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int i40e_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); +static int i40e_dev_stats_reset(struct rte_eth_dev *dev); +static int i40e_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); +static int i40e_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int i40e_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid); +static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue, + int on); +static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on); +static int i40e_dev_led_on(struct rte_eth_dev *dev); +static int i40e_dev_led_off(struct rte_eth_dev *dev); +static int i40e_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf); +static int i40e_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + uint32_t pool); +static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); +static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static int i40e_get_cap(struct i40e_hw *hw); +static int i40e_pf_parameter_init(struct rte_eth_dev *dev); +static int i40e_pf_setup(struct i40e_pf *pf); +static int i40e_dev_rxtx_init(struct i40e_pf *pf); +static int i40e_vmdq_setup(struct rte_eth_dev *dev); +static int i40e_dcb_setup(struct rte_eth_dev *dev); +static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg, + bool offset_loaded, uint64_t *offset, uint64_t *stat); +static void i40e_stat_update_48(struct i40e_hw *hw, + uint32_t hireg, + uint32_t loreg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat); +static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); +static void i40e_dev_interrupt_handler(void *param); +static void i40e_dev_alarm_handler(void *param); +static int i40e_res_pool_init(struct i40e_res_pool_info *pool, + uint32_t base, uint32_t num); +static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); +static int i40e_res_pool_free(struct i40e_res_pool_info *pool, + uint32_t base); +static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool, + uint16_t num); +static int i40e_dev_init_vlan(struct rte_eth_dev *dev); +static int i40e_veb_release(struct i40e_veb *veb); +static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, + struct i40e_vsi *vsi); +static int i40e_pf_config_mq_rx(struct i40e_pf *pf); +static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); +static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, + uint16_t vlan); +static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi); +static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static void i40e_filter_input_set_init(struct i40e_pf *pf); +static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info); +static int i40e_dev_sync_phy_type(struct i40e_hw *hw); +static void i40e_configure_registers(struct i40e_hw *hw); +static void i40e_hw_init(struct rte_eth_dev *dev); +static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); +static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, + uint16_t rule_type, + uint16_t *entries, + uint16_t count, + uint16_t rule_id); +static int i40e_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t sw_id, uint8_t on); +static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id); + +static int i40e_timesync_enable(struct rte_eth_dev *dev); +static int i40e_timesync_disable(struct rte_eth_dev *dev); +static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw); + +static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); + +static int i40e_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int i40e_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); + +static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev); + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); + +static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int i40e_ethertype_filter_convert( + const struct rte_eth_ethertype_filter *input, + struct i40e_ethertype_filter *filter); +static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter); + +static int i40e_tunnel_filter_convert( + struct i40e_aqc_cloud_filters_element_bb *cld_filter, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf); + +static void i40e_ethertype_filter_restore(struct i40e_pf *pf); +static void i40e_tunnel_filter_restore(struct i40e_pf *pf); +static void i40e_filter_restore(struct i40e_pf *pf); +static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); + +int i40e_logtype_init; +int i40e_logtype_driver; +#ifdef RTE_LIBRTE_I40E_DEBUG_RX +int i40e_logtype_rx; +#endif +#ifdef RTE_LIBRTE_I40E_DEBUG_TX +int i40e_logtype_tx; +#endif +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE +int i40e_logtype_tx_free; +#endif + +static const char *const valid_keys[] = { + ETH_I40E_FLOATING_VEB_ARG, + ETH_I40E_FLOATING_VEB_LIST_ARG, + ETH_I40E_SUPPORT_MULTI_DRIVER, + ETH_I40E_QUEUE_NUM_PER_VF_ARG, + ETH_I40E_USE_LATEST_VEC, + ETH_I40E_VF_MSG_CFG, + NULL}; + +static const struct rte_pci_id pci_id_i40e_map[] = { + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops i40e_eth_dev_ops = { + .dev_configure = i40e_dev_configure, + .dev_start = i40e_dev_start, + .dev_stop = i40e_dev_stop, + .dev_close = i40e_dev_close, + .dev_reset = i40e_dev_reset, + .promiscuous_enable = i40e_dev_promiscuous_enable, + .promiscuous_disable = i40e_dev_promiscuous_disable, + .allmulticast_enable = i40e_dev_allmulticast_enable, + .allmulticast_disable = i40e_dev_allmulticast_disable, + .dev_set_link_up = i40e_dev_set_link_up, + .dev_set_link_down = i40e_dev_set_link_down, + .link_update = i40e_dev_link_update, + .stats_get = i40e_dev_stats_get, + .xstats_get = i40e_dev_xstats_get, + .xstats_get_names = i40e_dev_xstats_get_names, + .stats_reset = i40e_dev_stats_reset, + .xstats_reset = i40e_dev_stats_reset, + .fw_version_get = i40e_fw_version_get, + .dev_infos_get = i40e_dev_info_get, + .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, + .vlan_filter_set = i40e_vlan_filter_set, + .vlan_tpid_set = i40e_vlan_tpid_set, + .vlan_offload_set = i40e_vlan_offload_set, + .vlan_strip_queue_set = i40e_vlan_strip_queue_set, + .vlan_pvid_set = i40e_vlan_pvid_set, + .rx_queue_start = i40e_dev_rx_queue_start, + .rx_queue_stop = i40e_dev_rx_queue_stop, + .tx_queue_start = i40e_dev_tx_queue_start, + .tx_queue_stop = i40e_dev_tx_queue_stop, + .rx_queue_setup = i40e_dev_rx_queue_setup, + .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, + .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_count = i40e_dev_rx_queue_count, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, + .tx_queue_setup = i40e_dev_tx_queue_setup, + .tx_queue_release = i40e_dev_tx_queue_release, + .dev_led_on = i40e_dev_led_on, + .dev_led_off = i40e_dev_led_off, + .flow_ctrl_get = i40e_flow_ctrl_get, + .flow_ctrl_set = i40e_flow_ctrl_set, + .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set, + .mac_addr_add = i40e_macaddr_add, + .mac_addr_remove = i40e_macaddr_remove, + .reta_update = i40e_dev_rss_reta_update, + .reta_query = i40e_dev_rss_reta_query, + .rss_hash_update = i40e_dev_rss_hash_update, + .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, + .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, + .filter_ctrl = i40e_dev_filter_ctrl, + .rxq_info_get = i40e_rxq_info_get, + .txq_info_get = i40e_txq_info_get, + .rx_burst_mode_get = i40e_rx_burst_mode_get, + .tx_burst_mode_get = i40e_tx_burst_mode_get, + .mirror_rule_set = i40e_mirror_rule_set, + .mirror_rule_reset = i40e_mirror_rule_reset, + .timesync_enable = i40e_timesync_enable, + .timesync_disable = i40e_timesync_disable, + .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, + .get_dcb_info = i40e_dev_get_dcb_info, + .timesync_adjust_time = i40e_timesync_adjust_time, + .timesync_read_time = i40e_timesync_read_time, + .timesync_write_time = i40e_timesync_write_time, + .get_reg = i40e_get_regs, + .get_eeprom_length = i40e_get_eeprom_length, + .get_eeprom = i40e_get_eeprom, + .get_module_info = i40e_get_module_info, + .get_module_eeprom = i40e_get_module_eeprom, + .mac_addr_set = i40e_set_default_mac_addr, + .mtu_set = i40e_dev_mtu_set, + .tm_ops_get = i40e_tm_ops_get, + .tx_done_cleanup = i40e_tx_done_cleanup, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_i40e_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { + {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, + {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, + {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, + rx_unknown_protocol)}, + {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, + {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)}, +}; + +#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ + sizeof(rte_i40e_stats_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = { + {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats, + tx_dropped_link_down)}, + {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)}, + {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats, + illegal_bytes)}, + {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)}, + {"mac_local_errors", offsetof(struct i40e_hw_port_stats, + mac_local_faults)}, + {"mac_remote_errors", offsetof(struct i40e_hw_port_stats, + mac_remote_faults)}, + {"rx_length_errors", offsetof(struct i40e_hw_port_stats, + rx_length_errors)}, + {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)}, + {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)}, + {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)}, + {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)}, + {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)}, + {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats, + rx_size_127)}, + {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats, + rx_size_255)}, + {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats, + rx_size_511)}, + {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats, + rx_size_1023)}, + {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats, + rx_size_1522)}, + {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats, + rx_size_big)}, + {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats, + rx_undersize)}, + {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats, + rx_oversize)}, + {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats, + mac_short_packet_dropped)}, + {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats, + rx_fragments)}, + {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)}, + {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)}, + {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats, + tx_size_127)}, + {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats, + tx_size_255)}, + {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats, + tx_size_511)}, + {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats, + tx_size_1023)}, + {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats, + tx_size_1522)}, + {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats, + tx_size_big)}, + {"rx_flow_director_atr_match_packets", + offsetof(struct i40e_hw_port_stats, fd_atr_match)}, + {"rx_flow_director_sb_match_packets", + offsetof(struct i40e_hw_port_stats, fd_sb_match)}, + {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats, + tx_lpi_status)}, + {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats, + rx_lpi_status)}, + {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats, + tx_lpi_count)}, + {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats, + rx_lpi_count)}, +}; + +#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \ + sizeof(rte_i40e_hw_port_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_rx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_rx)}, +}; + +#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \ + sizeof(rte_i40e_rxq_prio_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_tx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_tx)}, + {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_2_xoff)}, +}; + +#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ + sizeof(rte_i40e_txq_prio_strings[0])) + +static int +eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct i40e_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_i40e_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + /* probe VF representor ports */ + struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated( + pci_dev->device.name); + + if (pf_ethdev == NULL) + return -ENODEV; + + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct i40e_vf_representor representor = { + .vf_id = eth_da.representor_ports[i], + .switch_domain_id = I40E_DEV_PRIVATE_TO_PF( + pf_ethdev->data->dev_private)->switch_domain_id, + .adapter = I40E_DEV_PRIVATE_TO_ADAPTER( + pf_ethdev->data->dev_private) + }; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct i40e_vf_representor), NULL, NULL, + i40e_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create i40e vf " + "representor %s.", name); + } + + return 0; +} + +static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return 0; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_pci_generic_remove(pci_dev, + i40e_vf_representor_uninit); + else + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_i40e_dev_uninit); +} + +static struct rte_pci_driver rte_i40e_pmd = { + .id_table = pci_id_i40e_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_i40e_pci_probe, + .remove = eth_i40e_pci_remove, +}; + +static inline void +i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr, + uint32_t reg_val) +{ + uint32_t ori_reg_val; + struct rte_eth_dev *dev; + + ori_reg_val = i40e_read_rx_ctl(hw, reg_addr); + dev = ((struct i40e_adapter *)hw->back)->eth_dev; + i40e_write_rx_ctl(hw, reg_addr, reg_val); + if (ori_reg_val != reg_val) + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%08x, new: 0x%08x", + dev->device->name, reg_addr, ori_reg_val, reg_val); +} + +RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); + +#ifndef I40E_GLQF_ORT +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) +#endif +#ifndef I40E_GLQF_PIT +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) +#endif +#ifndef I40E_GLQF_L3_MAP +#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4)) +#endif + +static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) +{ + /* + * Initialize registers for parsing packet type of QinQ + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. + */ + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420); +} + +static inline void i40e_config_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + + /* If support multi-driver, PF will use INT0. */ + if (!pf->support_multi_driver) + val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); +} + +#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 + +/* + * Add a ethertype filter to drop all flow control frames transmitted + * from VSIs. +*/ +static void +i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + int ret; + + ret = i40e_aq_add_rem_control_packet_filter(hw, NULL, + I40E_FLOW_CONTROL_ETHERTYPE, flags, + pf->main_vsi_seid, 0, + TRUE, NULL, NULL); + if (ret) + PMD_INIT_LOG(ERR, + "Failed to add filter to drop flow control frames from VSIs."); +} + +static int +floating_veb_list_handler(__rte_unused const char *key, + const char *floating_veb_value, + void *opaque) +{ + int idx = 0; + unsigned int count = 0; + char *end = NULL; + int min, max; + bool *vf_floating_veb = opaque; + + while (isblank(*floating_veb_value)) + floating_veb_value++; + + /* Reset floating VEB configuration for VFs */ + for (idx = 0; idx < I40E_MAX_VF; idx++) + vf_floating_veb[idx] = false; + + min = I40E_MAX_VF; + do { + while (isblank(*floating_veb_value)) + floating_veb_value++; + if (*floating_veb_value == '\0') + return -1; + errno = 0; + idx = strtoul(floating_veb_value, &end, 10); + if (errno || end == NULL) + return -1; + while (isblank(*end)) + end++; + if (*end == '-') { + min = idx; + } else if ((*end == ';') || (*end == '\0')) { + max = idx; + if (min == I40E_MAX_VF) + min = idx; + if (max >= I40E_MAX_VF) + max = I40E_MAX_VF - 1; + for (idx = min; idx <= max; idx++) { + vf_floating_veb[idx] = true; + count++; + } + min = I40E_MAX_VF; + } else { + return -1; + } + floating_veb_value = end + 1; + } while (*end != '\0'); + + if (count == 0) + return -1; + + return 0; +} + +static void +config_vf_floating_veb(struct rte_devargs *devargs, + uint16_t floating_veb, + bool *vf_floating_veb) +{ + struct rte_kvargs *kvlist; + int i; + const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG; + + if (!floating_veb) + return; + /* All the VFs attach to the floating VEB by default + * when the floating VEB is enabled. + */ + for (i = 0; i < I40E_MAX_VF; i++) + vf_floating_veb[i] = true; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, valid_keys); + if (kvlist == NULL) + return; + + if (!rte_kvargs_count(kvlist, floating_veb_list)) { + rte_kvargs_free(kvlist); + return; + } + /* When the floating_veb_list parameter exists, all the VFs + * will attach to the legacy VEB firstly, then configure VFs + * to the floating VEB according to the floating_veb_list. + */ + if (rte_kvargs_process(kvlist, floating_veb_list, + floating_veb_list_handler, + vf_floating_veb) < 0) { + rte_kvargs_free(kvlist); + return; + } + rte_kvargs_free(kvlist); +} + +static int +i40e_check_floating_handler(__rte_unused const char *key, + const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +is_floating_veb_supported(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, valid_keys); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, floating_veb_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* Floating VEB is enabled when there's key-value: + * enable_floating_veb=1 + */ + if (rte_kvargs_process(kvlist, floating_veb_key, + i40e_check_floating_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void +config_floating_veb(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list)); + + if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) { + pf->floating_veb = + is_floating_veb_supported(pci_dev->device.devargs); + config_vf_floating_veb(pci_dev->device.devargs, + pf->floating_veb, + pf->floating_veb_list); + } else { + pf->floating_veb = false; + } +} + +#define I40E_L2_TAGS_S_TAG_SHIFT 1 +#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT) + +static int +i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + char ethertype_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters ethertype_hash_params = { + .name = ethertype_hash_name, + .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, + .key_len = sizeof(struct i40e_ethertype_filter_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize ethertype filter rule list and hash */ + TAILQ_INIT(ðertype_rule->ethertype_list); + snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE, + "ethertype_%s", dev->device->name); + ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params); + if (!ethertype_rule->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!"); + return -EINVAL; + } + ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map", + sizeof(struct i40e_ethertype_filter *) * + I40E_MAX_ETHERTYPE_FILTER_NUM, + 0); + if (!ethertype_rule->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for ethertype hash map!"); + ret = -ENOMEM; + goto err_ethertype_hash_map_alloc; + } + + return 0; + +err_ethertype_hash_map_alloc: + rte_hash_free(ethertype_rule->hash_table); + + return ret; +} + +static int +i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + char tunnel_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters tunnel_hash_params = { + .name = tunnel_hash_name, + .entries = I40E_MAX_TUNNEL_FILTER_NUM, + .key_len = sizeof(struct i40e_tunnel_filter_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize tunnel filter rule list and hash */ + TAILQ_INIT(&tunnel_rule->tunnel_list); + snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, + "tunnel_%s", dev->device->name); + tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); + if (!tunnel_rule->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); + return -EINVAL; + } + tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map", + sizeof(struct i40e_tunnel_filter *) * + I40E_MAX_TUNNEL_FILTER_NUM, + 0); + if (!tunnel_rule->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for tunnel hash map!"); + ret = -ENOMEM; + goto err_tunnel_hash_map_alloc; + } + + return 0; + +err_tunnel_hash_map_alloc: + rte_hash_free(tunnel_rule->hash_table); + + return ret; +} + +static int +i40e_init_fdir_filter_list(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_fdir_info *fdir_info = &pf->fdir; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = I40E_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct i40e_fdir_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize flow director filter rule list and hash */ + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", dev->device->name); + fdir_info->hash_table = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map", + sizeof(struct i40e_fdir_filter *) * + I40E_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + ret = -ENOMEM; + goto err_fdir_hash_map_alloc; + } + return 0; + +err_fdir_hash_map_alloc: + rte_hash_free(fdir_info->hash_table); + + return ret; +} + +static void +i40e_init_customized_info(struct i40e_pf *pf) +{ + int i; + + /* Initialize customized pctype */ + for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) { + pf->customized_pctype[i].index = i; + pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID; + pf->customized_pctype[i].valid = false; + } + + pf->gtp_support = false; + pf->esp_support = false; +} + +void +i40e_init_queue_region_conf(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i; + + for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0); + + memset(info, 0, sizeof(struct i40e_queue_regions)); +} + +static int +i40e_parse_multi_drv_handler(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long support_multi_driver; + char *end; + + pf = (struct i40e_pf *)opaque; + + errno = 0; + support_multi_driver = strtoul(value, &end, 10); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong global configuration"); + return -(EINVAL); + } + + if (support_multi_driver == 1 || support_multi_driver == 0) + pf->support_multi_driver = (bool)support_multi_driver; + else + PMD_DRV_LOG(WARNING, "%s must be 1 or 0,", + "enable global configuration by default." + ETH_I40E_SUPPORT_MULTI_DRIVER); + return 0; +} + +static int +i40e_support_multi_driver(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_kvargs *kvlist; + int kvargs_count; + + /* Enable global configuration by default */ + pf->support_multi_driver = false; + + if (!dev->device->devargs) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_SUPPORT_MULTI_DRIVER); + + if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER, + i40e_parse_multi_drv_handler, pf) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + + rte_kvargs_free(kvlist); + return 0; +} + +static int +i40e_aq_debug_write_global_register(struct i40e_hw *hw, + uint32_t reg_addr, uint64_t reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + uint64_t ori_reg_val; + struct rte_eth_dev *dev; + int ret; + + ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Fail to debug read from 0x%08x", + reg_addr); + return -EIO; + } + dev = ((struct i40e_adapter *)hw->back)->eth_dev; + + if (ori_reg_val != reg_val) + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%"PRIx64", after: 0x%"PRIx64, + dev->device->name, reg_addr, ori_reg_val, reg_val); + + return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); +} + +static int +i40e_parse_latest_vec_handler(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_adapter *ad = opaque; + int use_latest_vec; + + use_latest_vec = atoi(value); + + if (use_latest_vec != 0 && use_latest_vec != 1) + PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!"); + + ad->use_latest_vec = (uint8_t)use_latest_vec; + + return 0; +} + +static int +i40e_use_latest_vec(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_kvargs *kvlist; + int kvargs_count; + + ad->use_latest_vec = false; + + if (!dev->device->devargs) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_USE_LATEST_VEC); + + if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC, + i40e_parse_latest_vec_handler, ad) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + + rte_kvargs_free(kvlist); + return 0; +} + +static int +read_vf_msg_config(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_vf_msg_cfg *cfg = opaque; + + if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period, + &cfg->ignore_second) != 3) { + memset(cfg, 0, sizeof(*cfg)); + PMD_DRV_LOG(ERR, "format error! example: " + "%s=60@120:180", ETH_I40E_VF_MSG_CFG); + return -EINVAL; + } + + /* + * If the message validation function been enabled, the 'period' + * and 'ignore_second' must greater than 0. + */ + if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) { + memset(cfg, 0, sizeof(*cfg)); + PMD_DRV_LOG(ERR, "%s error! the second and third" + " number must be greater than 0!", + ETH_I40E_VF_MSG_CFG); + return -EINVAL; + } + + return 0; +} + +static int +i40e_parse_vf_msg_config(struct rte_eth_dev *dev, + struct i40e_vf_msg_cfg *msg_cfg) +{ + struct rte_kvargs *kvlist; + int kvargs_count; + int ret = 0; + + memset(msg_cfg, 0, sizeof(*msg_cfg)); + + if (!dev->device->devargs) + return ret; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG); + if (!kvargs_count) + goto free_end; + + if (kvargs_count > 1) { + PMD_DRV_LOG(ERR, "More than one argument \"%s\"!", + ETH_I40E_VF_MSG_CFG); + ret = -EINVAL; + goto free_end; + } + + if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG, + read_vf_msg_config, msg_cfg) < 0) + ret = -EINVAL; + +free_end: + rte_kvargs_free(kvlist); + return ret; +} + +#define I40E_ALARM_INTERVAL 50000 /* us */ + +static int +eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) +{ + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi; + int ret; + uint32_t len, val; + uint8_t aq_fail = 0; + + PMD_INIT_FUNC_TRACE(); + + dev->dev_ops = &i40e_eth_dev_ops; + dev->rx_pkt_burst = i40e_recv_pkts; + dev->tx_pkt_burst = i40e_xmit_pkts; + dev->tx_pkt_prepare = i40e_prep_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + i40e_set_rx_function(dev); + i40e_set_tx_function(dev); + return 0; + } + i40e_set_default_ptype_table(dev); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + intr_handle = &pci_dev->intr_handle; + + rte_eth_copy_pci_info(dev, pci_dev); + + pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + pf->adapter->eth_dev = dev; + pf->dev_data = dev->data; + + hw->back = I40E_PF_TO_ADAPTER(pf); + hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr); + if (!hw->hw_addr) { + PMD_INIT_LOG(ERR, + "Hardware is not available, as address is NULL"); + return -ENODEV; + } + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->adapter_stopped = 0; + hw->adapter_closed = 0; + + /* Init switch device pointer */ + hw->switch_dev = NULL; + + /* + * Switch Tag value should not be identical to either the First Tag + * or Second Tag values. So set something other than common Ethertype + * for internal switching. + */ + hw->switch_tag = 0xffff; + + val = I40E_READ_REG(hw, I40E_GL_FWSTS); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + + i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg); + /* Check if need to support multi-driver */ + i40e_support_multi_driver(dev); + /* Check if users want the latest supported vec path */ + i40e_use_latest_vec(dev); + + /* Make sure all is clean before doing PF reset */ + i40e_clear_hw(hw); + + /* Reset here to make sure all is clean for each PF */ + ret = i40e_pf_reset(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret); + return ret; + } + + /* Initialize the shared code (base driver) */ + ret = i40e_init_shared_code(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret); + return ret; + } + + /* Initialize the parameters for adminq */ + i40e_init_adminq_parameter(hw); + ret = i40e_init_adminq(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); + return -EIO; + } + /* Firmware of SFP x722 does not support adminq option */ + if (hw->device_id == I40E_DEV_ID_SFP_X722) + hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; + + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack); + + /* Initialize the hardware */ + i40e_hw_init(dev); + + i40e_config_automask(pf); + + i40e_set_default_pctype_table(dev); + + /* + * To work around the NVM issue, initialize registers + * for packet type of QinQ by software. + * It should be removed once issues are fixed in NVM. + */ + if (!pf->support_multi_driver) + i40e_GLQF_reg_init(hw); + + /* Initialize the input set for filters (hash and fd) to default value */ + i40e_filter_input_set_init(pf); + + /* initialise the L3_MAP register */ + if (!pf->support_multi_driver) { + ret = i40e_aq_debug_write_global_register(hw, + I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", + ret); + PMD_INIT_LOG(DEBUG, + "Global register 0x%08x is changed with 0x28", + I40E_GLQF_L3_MAP(40)); + } + + /* Need the special FW version to support floating VEB */ + config_floating_veb(dev); + /* Clear PXE mode */ + i40e_clear_pxe_mode(hw); + i40e_dev_sync_phy_type(hw); + + /* + * On X710, performance number is far from the expectation on recent + * firmware versions. The fix for this issue may not be integrated in + * the following firmware version. So the workaround in software driver + * is needed. It needs to modify the initial values of 3 internal only + * registers. Note that the workaround can be removed when it is fixed + * in firmware in the future. + */ + i40e_configure_registers(hw); + + /* Get hw capabilities */ + ret = i40e_get_cap(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret); + goto err_get_capabilities; + } + + /* Initialize parameters for PF */ + ret = i40e_pf_parameter_init(dev); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret); + goto err_parameter_init; + } + + /* Initialize the queue management */ + ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to init queue pool"); + goto err_qp_pool_init; + } + ret = i40e_res_pool_init(&pf->msix_pool, 1, + hw->func_caps.num_msix_vectors - 1); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); + goto err_msix_pool_init; + } + + /* Initialize lan hmc */ + ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, 0, 0); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret); + goto err_init_lan_hmc; + } + + /* Configure lan hmc */ + ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret); + goto err_configure_lan_hmc; + } + + /* Get and check the mac address */ + i40e_get_mac_addr(hw, hw->mac.addr); + if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "mac address is not valid"); + ret = -EIO; + goto err_get_mac_addr; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + (struct rte_ether_addr *)hw->mac.perm_addr); + + /* Disable flow control */ + hw->fc.requested_mode = I40E_FC_NONE; + i40e_set_fc(hw, &aq_fail, TRUE); + + /* Set the global registers with default ether type value */ + if (!pf->support_multi_driver) { + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + RTE_ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, + "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + } + + /* PF setup, which includes VSI setup */ + ret = i40e_pf_setup(pf); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret); + goto err_setup_pf_switch; + } + + vsi = pf->main_vsi; + + /* Disable double vlan by default */ + i40e_vsi_config_double_vlan(vsi, FALSE); + + /* Disable S-TAG identification when floating_veb is disabled */ + if (!pf->floating_veb) { + ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN); + if (ret & I40E_L2_TAGS_S_TAG_MASK) { + ret &= ~I40E_L2_TAGS_S_TAG_MASK; + I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret); + } + } + + if (!vsi->max_macaddrs) + len = RTE_ETHER_ADDR_LEN; + else + len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs; + + /* Should be after VSI initialized */ + dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); + if (!dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, + "Failed to allocated memory for storing mac address"); + goto err_mac_alloc; + } + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, + &dev->data->mac_addrs[0]); + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + + /* initialize pf host driver to setup SRIOV resource if applicable */ + i40e_pf_host_init(dev); + + /* register callback func to eal lib */ + rte_intr_callback_register(intr_handle, + i40e_dev_interrupt_handler, dev); + + /* configure and enable device interrupt */ + i40e_pf_config_irq0(hw, TRUE); + i40e_pf_enable_irq0(hw); + + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + + /* By default disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + + /* + * Add an ethertype filter to drop all flow control frames transmitted + * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC + * frames to wire. + */ + i40e_add_tx_flow_control_drop_filter(pf); + + /* Set the max frame size to 0x2600 by default, + * in case other drivers changed the default value. + */ + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL); + + /* initialize mirror rule list */ + TAILQ_INIT(&pf->mirror_list); + + /* initialize RSS rule list */ + TAILQ_INIT(&pf->rss_config_list); + + /* initialize Traffic Manager configuration */ + i40e_tm_conf_init(dev); + + /* Initialize customized information */ + i40e_init_customized_info(pf); + + ret = i40e_init_ethtype_filter_list(dev); + if (ret < 0) + goto err_init_ethtype_filter_list; + ret = i40e_init_tunnel_filter_list(dev); + if (ret < 0) + goto err_init_tunnel_filter_list; + ret = i40e_init_fdir_filter_list(dev); + if (ret < 0) + goto err_init_fdir_filter_list; + + /* initialize queue region configuration */ + i40e_init_queue_region_conf(dev); + + /* initialize RSS configuration from rte_flow */ + memset(&pf->rss_info, 0, + sizeof(struct i40e_rte_flow_rss_conf)); + + /* reset all stats of the device, including pf and main vsi */ + i40e_dev_stats_reset(dev); + + return 0; + +err_init_fdir_filter_list: + rte_free(pf->tunnel.hash_table); + rte_free(pf->tunnel.hash_map); +err_init_tunnel_filter_list: + rte_free(pf->ethertype.hash_table); + rte_free(pf->ethertype.hash_map); +err_init_ethtype_filter_list: + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; +err_mac_alloc: + i40e_vsi_release(pf->main_vsi); +err_setup_pf_switch: +err_get_mac_addr: +err_configure_lan_hmc: + (void)i40e_shutdown_lan_hmc(hw); +err_init_lan_hmc: + i40e_res_pool_destroy(&pf->msix_pool); +err_msix_pool_init: + i40e_res_pool_destroy(&pf->qp_pool); +err_qp_pool_init: +err_parameter_init: +err_get_capabilities: + (void)i40e_shutdown_adminq(hw); + + return ret; +} + +static void +i40e_rm_ethtype_filter_list(struct i40e_pf *pf) +{ + struct i40e_ethertype_filter *p_ethertype; + struct i40e_ethertype_rule *ethertype_rule; + + ethertype_rule = &pf->ethertype; + /* Remove all ethertype filter rules and hash */ + if (ethertype_rule->hash_map) + rte_free(ethertype_rule->hash_map); + if (ethertype_rule->hash_table) + rte_hash_free(ethertype_rule->hash_table); + + while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) { + TAILQ_REMOVE(ðertype_rule->ethertype_list, + p_ethertype, rules); + rte_free(p_ethertype); + } +} + +static void +i40e_rm_tunnel_filter_list(struct i40e_pf *pf) +{ + struct i40e_tunnel_filter *p_tunnel; + struct i40e_tunnel_rule *tunnel_rule; + + tunnel_rule = &pf->tunnel; + /* Remove all tunnel director rules and hash */ + if (tunnel_rule->hash_map) + rte_free(tunnel_rule->hash_map); + if (tunnel_rule->hash_table) + rte_hash_free(tunnel_rule->hash_table); + + while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) { + TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules); + rte_free(p_tunnel); + } +} + +static void +i40e_rm_fdir_filter_list(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *p_fdir; + struct i40e_fdir_info *fdir_info; + + fdir_info = &pf->fdir; + /* Remove all flow director rules and hash */ + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_table) + rte_hash_free(fdir_info->hash_table); + + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); + rte_free(p_fdir); + } +} + +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) +{ + /* + * Disable by default flexible payload + * for corresponding L2/L3/L4 layers. + */ + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000); +} + +static int +eth_i40e_dev_uninit(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->adapter_closed == 0) + i40e_dev_close(dev); + + return 0; +} + +static int +i40e_dev_configure(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; + int i, ret; + + ret = i40e_dev_sync_phy_type(hw); + if (ret) + return ret; + + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * bulk allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following codes should also be + * removed. + */ + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to setup flow director."); + return -ENOTSUP; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to configure fdir."); + goto err; + } + } else + i40e_fdir_teardown(pf); + + ret = i40e_dev_init_vlan(dev); + if (ret < 0) + goto err; + + /* VMDQ setup. + * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and + * RSS setting have different requirements. + * General PMD driver call sequence are NIC init, configure, + * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it + * will try to lookup the VSI that specific queue belongs to if VMDQ + * applicable. So, VMDQ setting has to be done before + * rx/tx_queue_setup(). This function is good to place vmdq_setup. + * For RSS setting, it will try to calculate actual configured RX queue + * number, which will be available after rx_queue_setup(). dev_start() + * function is good to place RSS setup. + */ + if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) { + ret = i40e_vmdq_setup(dev); + if (ret) + goto err; + } + + if (mq_mode & ETH_MQ_RX_DCB_FLAG) { + ret = i40e_dcb_setup(dev); + if (ret) { + PMD_DRV_LOG(ERR, "failed to configure DCB."); + goto err_dcb; + } + } + + TAILQ_INIT(&pf->flow_list); + + return 0; + +err_dcb: + /* need to release vmdq resource if exists */ + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_release(pf->vmdq[i].vsi); + pf->vmdq[i].vsi = NULL; + } + rte_free(pf->vmdq); + pf->vmdq = NULL; +err: + /* Need to release fdir resource if exists. + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ + i40e_fdir_teardown(pf); + return ret; +} + +void +i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t i; + + for (i = 0; i < vsi->nb_qps; i++) { + I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); + rte_wmb(); + } + + if (vsi->type != I40E_VSI_SRIOV) { + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + 0); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), 0); + } + } else { + uint32_t reg; + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); + + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + } + I40E_WRITE_FLUSH(hw); +} + +static void +__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, + int base_queue, int nb_queue, + uint16_t itr_idx) +{ + int i; + uint32_t val; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + + /* Bind all RX queues to allocated MSIX interrupt */ + for (i = 0; i < nb_queue; i++) { + val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | + itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT | + ((base_queue + i + 1) << + I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK; + + if (i == nb_queue - 1) + val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK; + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val); + } + + /* Write first RX queue to Link list register as the head element */ + if (vsi->type != I40E_VSI_SRIOV) { + uint16_t interval = + i40e_calc_itr_interval(1, pf->support_multi_driver); + + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + (base_queue << + I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + interval); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + (base_queue << + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), + interval); + } + } else { + uint32_t reg; + + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, + I40E_VPINT_LNKLST0(vsi->user_param), + (base_queue << + I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + } else { + /* num_msix_vectors_vf needs to minus irq0 */ + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); + + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + (base_queue << + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + } + } + + I40E_WRITE_FLUSH(hw); +} + +void +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t queue_idx = 0; + int record = 0; + int i; + + for (i = 0; i < vsi->nb_qps; i++) { + I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); + } + + /* VF bind interrupt */ + if (vsi->type == I40E_VSI_SRIOV) { + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue, vsi->nb_qps, + itr_idx); + return; + } + + /* PF & VMDq bind interrupt */ + if (rte_intr_dp_is_en(intr_handle)) { + if (vsi->type == I40E_VSI_MAIN) { + queue_idx = 0; + record = 1; + } else if (vsi->type == I40E_VSI_VMDQ2) { + struct i40e_vsi *main_vsi = + I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter); + queue_idx = vsi->base_queue - main_vsi->nb_qps; + record = 1; + } + } + + for (i = 0; i < vsi->nb_used_qps; i++) { + if (nb_msix <= 1) { + if (!rte_intr_allow_others(intr_handle)) + /* allow to share MISC_VEC_ID */ + msix_vect = I40E_MISC_VEC_ID; + + /* no enough msix_vect, map all to one */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, + vsi->nb_used_qps - i, + itr_idx); + for (; !!record && i < vsi->nb_used_qps; i++) + intr_handle->intr_vec[queue_idx + i] = + msix_vect; + break; + } + /* 1:1 queue/msix_vect mapping */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, 1, + itr_idx); + if (!!record) + intr_handle->intr_vec[queue_idx + i] = msix_vect; + + msix_vect++; + nb_msix--; + } +} + +static void +i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + + I40E_WRITE_FLUSH(hw); +} + +static void +i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + + I40E_WRITE_FLUSH(hw); +} + +static inline uint8_t +i40e_parse_link_speeds(uint16_t link_speeds) +{ + uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN; + + if (link_speeds & ETH_LINK_SPEED_40G) + link_speed |= I40E_LINK_SPEED_40GB; + if (link_speeds & ETH_LINK_SPEED_25G) + link_speed |= I40E_LINK_SPEED_25GB; + if (link_speeds & ETH_LINK_SPEED_20G) + link_speed |= I40E_LINK_SPEED_20GB; + if (link_speeds & ETH_LINK_SPEED_10G) + link_speed |= I40E_LINK_SPEED_10GB; + if (link_speeds & ETH_LINK_SPEED_1G) + link_speed |= I40E_LINK_SPEED_1GB; + if (link_speeds & ETH_LINK_SPEED_100M) + link_speed |= I40E_LINK_SPEED_100MB; + + return link_speed; +} + +static int +i40e_phy_conf_link(struct i40e_hw *hw, + uint8_t abilities, + uint8_t force_speed, + bool is_up) +{ + enum i40e_status_code status; + struct i40e_aq_get_phy_abilities_resp phy_ab; + struct i40e_aq_set_phy_config phy_conf; + enum i40e_aq_phy_type cnt; + uint8_t avail_speed; + uint32_t phy_type_mask = 0; + + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | + I40E_AQ_PHY_FLAG_PAUSE_RX | + I40E_AQ_PHY_FLAG_PAUSE_RX | + I40E_AQ_PHY_FLAG_LOW_POWER; + int ret = -ENOTSUP; + + /* To get phy capabilities of available speeds. */ + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + if (status) { + PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n", + status); + return ret; + } + avail_speed = phy_ab.link_speed; + + /* To get the current phy config. */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, + NULL); + if (status) { + PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n", + status); + return ret; + } + + /* If link needs to go up and it is in autoneg mode the speed is OK, + * no need to set up again. + */ + if (is_up && phy_ab.phy_type != 0 && + abilities & I40E_AQ_PHY_AN_ENABLED && + phy_ab.link_speed != 0) + return I40E_SUCCESS; + + memset(&phy_conf, 0, sizeof(phy_conf)); + + /* bits 0-2 use the values from get_phy_abilities_resp */ + abilities &= ~mask; + abilities |= phy_ab.abilities & mask; + + phy_conf.abilities = abilities; + + /* If link needs to go up, but the force speed is not supported, + * Warn users and config the default available speeds. + */ + if (is_up && !(force_speed & avail_speed)) { + PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n"); + phy_conf.link_speed = avail_speed; + } else { + phy_conf.link_speed = is_up ? force_speed : avail_speed; + } + + /* PHY type mask needs to include each type except PHY type extension */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++) + phy_type_mask |= 1 << cnt; + + /* use get_phy_abilities_resp value for the rest */ + phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; + phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | + I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | + I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; + phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info; + phy_conf.eee_capability = phy_ab.eee_capability; + phy_conf.eeer = phy_ab.eeer_val; + phy_conf.low_power_ctrl = phy_ab.d3_lpan; + + PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x", + phy_ab.abilities, phy_ab.link_speed); + PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x", + phy_conf.abilities, phy_conf.link_speed); + + status = i40e_aq_set_phy_config(hw, &phy_conf, NULL); + if (status) + return ret; + + return I40E_SUCCESS; +} + +static int +i40e_apply_link_speed(struct rte_eth_dev *dev) +{ + uint8_t speed; + uint8_t abilities = 0; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *conf = &dev->data->dev_conf; + + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK | + I40E_AQ_PHY_LINK_ENABLED; + + if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) { + conf->link_speeds = ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_100M; + + abilities |= I40E_AQ_PHY_AN_ENABLED; + } else { + abilities &= ~I40E_AQ_PHY_AN_ENABLED; + } + speed = i40e_parse_link_speeds(conf->link_speeds); + + return i40e_phy_conf_link(hw, abilities, speed, true); +} + +static int +i40e_dev_start(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *main_vsi = pf->main_vsi; + int ret, i; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + struct i40e_vsi *vsi; + uint16_t nb_rxq, nb_txq; + + hw->adapter_stopped = 0; + + rte_intr_disable(intr_handle); + + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), + 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* Initialize VSI */ + ret = i40e_dev_rxtx_init(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to init rx/tx queues"); + return ret; + } + + /* Map queues with MSIX interrupt */ + main_vsi->nb_used_qps = dev->data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + i40e_vsi_enable_queues_intr(main_vsi); + + /* Map VMDQ VSI queues with MSIX interrupt */ + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); + i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); + } + + /* enable FDIR MSIX interrupt */ + if (pf->fdir.fdir_vsi) { + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, + I40E_ITR_INDEX_NONE); + i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + } + + /* Enable all queues which have been configured */ + for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) { + ret = i40e_dev_rx_queue_start(dev, nb_rxq); + if (ret) + goto rx_err; + } + + for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) { + ret = i40e_dev_tx_queue_start(dev, nb_txq); + if (ret) + goto tx_err; + } + + /* Enable receiving broadcast packets */ + ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid, + true, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); + } + + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + + /* Enable mac loopback mode */ + if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE || + dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) { + ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "fail to set loopback link"); + goto tx_err; + } + } + + /* Apply link configure */ + ret = i40e_apply_link_speed(dev); + if (I40E_SUCCESS != ret) { + PMD_DRV_LOG(ERR, "Fail to apply link setting"); + goto tx_err; + } + + if (!rte_intr_allow_others(intr_handle)) { + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + /* configure and enable device interrupt */ + i40e_pf_config_irq0(hw, FALSE); + i40e_pf_enable_irq0(hw); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, + "lsc won't enable because of no intr multiplex"); + } else { + ret = i40e_aq_set_phy_int_mask(hw, + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL | + I40E_AQ_EVENT_MEDIA_NA), NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(WARNING, "Fail to set phy mask"); + + /* Call get_link_info aq commond to enable/disable LSE */ + i40e_dev_link_update(dev, 0); + } + + if (dev->data->dev_conf.intr_conf.rxq == 0) { + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); + } else { + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + } + + i40e_filter_restore(pf); + + if (pf->tm_conf.root && !pf->tm_conf.committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + + return I40E_SUCCESS; + +tx_err: + for (i = 0; i < nb_txq; i++) + i40e_dev_tx_queue_stop(dev, i); +rx_err: + for (i = 0; i < nb_rxq; i++) + i40e_dev_rx_queue_stop(dev, i); + + return ret; +} + +static void +i40e_dev_stop(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *main_vsi = pf->main_vsi; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int i; + + if (hw->adapter_stopped == 1) + return; + + if (dev->data->dev_conf.intr_conf.rxq == 0) { + rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); + rte_intr_enable(intr_handle); + } + + /* Disable all queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + i40e_dev_tx_queue_stop(dev, i); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + i40e_dev_rx_queue_stop(dev, i); + + /* un-map queues with interrupt registers */ + i40e_vsi_disable_queues_intr(main_vsi); + i40e_vsi_queues_unbind_intr(main_vsi); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); + } + + if (pf->fdir.fdir_vsi) { + i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); + i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); + } + /* Clear all queues and release memory */ + i40e_dev_clear_queues(dev); + + /* Set link down */ + i40e_dev_set_link_down(dev); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + /* reset hierarchy commit */ + pf->tm_conf.committed = false; + + hw->adapter_stopped = 1; + + pf->adapter->rss_reta_updated = 0; +} + +static void +i40e_dev_close(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_mirror_rule *p_mirror; + struct i40e_filter_control_settings settings; + struct rte_flow *p_flow; + uint32_t reg; + int i; + int ret; + uint8_t aq_fail = 0; + int retries = 0; + + PMD_INIT_FUNC_TRACE(); + + ret = rte_eth_switch_domain_free(pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + + + i40e_dev_stop(dev); + + /* Remove all mirror rules */ + while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + p_mirror->rule_type, + p_mirror->entries, + p_mirror->num_entries, + p_mirror->id); + if (ret < 0) + PMD_DRV_LOG(ERR, "failed to remove mirror rule: " + "status = %d, aq_err = %d.", ret, + hw->aq.asq_last_status); + + /* remove mirror software resource anyway */ + TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); + rte_free(p_mirror); + pf->nb_mirror_rule--; + } + + i40e_dev_free_queues(dev); + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + rte_intr_disable(intr_handle); + + /* + * Only legacy filter API needs the following fdir config. So when the + * legacy filter API is deprecated, the following code should also be + * removed. + */ + i40e_fdir_teardown(pf); + + /* shutdown and destroy the HMC */ + i40e_shutdown_lan_hmc(hw); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_release(pf->vmdq[i].vsi); + pf->vmdq[i].vsi = NULL; + } + rte_free(pf->vmdq); + pf->vmdq = NULL; + + /* release all the existing VSIs and VEBs */ + i40e_vsi_release(pf->main_vsi); + + /* shutdown the adminq */ + i40e_aq_queue_shutdown(hw, true); + i40e_shutdown_adminq(hw); + + i40e_res_pool_destroy(&pf->qp_pool); + i40e_res_pool_destroy(&pf->msix_pool); + + /* Disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + + /* force a PF reset to clean anything leftover */ + reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL); + I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + I40E_WRITE_FLUSH(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Clear PXE mode */ + i40e_clear_pxe_mode(hw); + + /* Unconfigure filter control */ + memset(&settings, 0, sizeof(settings)); + ret = i40e_set_filter_control(hw, &settings); + if (ret) + PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", + ret); + + /* Disable flow control */ + hw->fc.requested_mode = I40E_FC_NONE; + i40e_set_fc(hw, &aq_fail, TRUE); + + /* uninitialize pf host driver */ + i40e_pf_host_uninit(dev); + + do { + ret = rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + i40e_msec_delay(500); + } while (retries++ < 5); + + i40e_rm_ethtype_filter_list(pf); + i40e_rm_tunnel_filter_list(pf); + i40e_rm_fdir_filter_list(pf); + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + rte_free(p_flow); + } + + /* Remove all Traffic Manager configuration */ + i40e_tm_conf_uninit(dev); + + hw->adapter_closed = 1; +} + +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +i40e_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to i40e PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_i40e_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_i40e_dev_init(dev, NULL); + + return ret; +} + +static int +i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int status; + + status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + true, NULL, true); + if (status != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous"); + return -EAGAIN; + } + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + TRUE, NULL); + if (status != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); + /* Rollback unicast promiscuous mode */ + i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + false, NULL, true); + return -EAGAIN; + } + + return 0; +} + +static int +i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int status; + + status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + false, NULL, true); + if (status != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); + return -EAGAIN; + } + + /* must remain in all_multicast mode */ + if (dev->data->all_multicast == 1) + return 0; + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + false, NULL); + if (status != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); + /* Rollback unicast promiscuous mode */ + i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + true, NULL, true); + return -EAGAIN; + } + + return 0; +} + +static int +i40e_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int ret; + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); + return -EAGAIN; + } + + return 0; +} + +static int +i40e_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int ret; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, + vsi->seid, FALSE, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); + return -EAGAIN; + } + + return 0; +} + +/* + * Set device link up. + */ +static int +i40e_dev_set_link_up(struct rte_eth_dev *dev) +{ + /* re-apply link speed setting */ + return i40e_apply_link_speed(dev); +} + +/* + * Set device link down. + */ +static int +i40e_dev_set_link_down(struct rte_eth_dev *dev) +{ + uint8_t speed = I40E_LINK_SPEED_UNKNOWN; + uint8_t abilities = 0; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + return i40e_phy_conf_link(hw, abilities, speed, false); +} + +static __rte_always_inline void +update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) +{ +/* Link status registers and values*/ +#define I40E_PRTMAC_LINKSTA 0x001E2420 +#define I40E_REG_LINK_UP 0x40000080 +#define I40E_PRTMAC_MACC 0x001E24E0 +#define I40E_REG_MACC_25GB 0x00020000 +#define I40E_REG_SPEED_MASK 0x38000000 +#define I40E_REG_SPEED_0 0x00000000 +#define I40E_REG_SPEED_1 0x08000000 +#define I40E_REG_SPEED_2 0x10000000 +#define I40E_REG_SPEED_3 0x18000000 +#define I40E_REG_SPEED_4 0x20000000 + uint32_t link_speed; + uint32_t reg_val; + + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + link_speed = reg_val & I40E_REG_SPEED_MASK; + reg_val &= I40E_REG_LINK_UP; + link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; + + if (unlikely(link->link_status == 0)) + return; + + /* Parse the link status */ + switch (link_speed) { + case I40E_REG_SPEED_0: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_REG_SPEED_1: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_REG_SPEED_2: + if (hw->mac.type == I40E_MAC_X722) + link->link_speed = ETH_SPEED_NUM_2_5G; + else + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_REG_SPEED_3: + if (hw->mac.type == I40E_MAC_X722) { + link->link_speed = ETH_SPEED_NUM_5G; + } else { + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + } + break; + case I40E_REG_SPEED_4: + if (hw->mac.type == I40E_MAC_X722) + link->link_speed = ETH_SPEED_NUM_10G; + else + link->link_speed = ETH_SPEED_NUM_20G; + break; + default: + PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); + break; + } +} + +static __rte_always_inline void +update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse, int wait_to_complete) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; + struct i40e_link_status link_status; + int status; + + memset(&link_status, 0, sizeof(link_status)); + + do { + memset(&link_status, 0, sizeof(link_status)); + + /* Get link status information from hardware */ + status = i40e_aq_get_link_info(hw, enable_lse, + &link_status, NULL); + if (unlikely(status != I40E_SUCCESS)) { + link->link_speed = ETH_SPEED_NUM_NONE; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + PMD_DRV_LOG(ERR, "Failed to get link info"); + return; + } + + link->link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (!wait_to_complete || link->link_status) + break; + + rte_delay_ms(CHECK_INTERVAL); + } while (--rep_cnt); + + /* Parse the link status */ + switch (link_status.link_speed) { + case I40E_LINK_SPEED_100MB: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_LINK_SPEED_1GB: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_LINK_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_LINK_SPEED_20GB: + link->link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_LINK_SPEED_25GB: + link->link_speed = ETH_SPEED_NUM_25G; + break; + case I40E_LINK_SPEED_40GB: + link->link_speed = ETH_SPEED_NUM_40G; + break; + default: + link->link_speed = ETH_SPEED_NUM_NONE; + break; + } +} + +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + int ret; + + memset(&link, 0, sizeof(link)); + + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + if (!wait_to_complete && !enable_lse) + update_link_reg(hw, &link); + else + update_link_aq(hw, &link, enable_lse, wait_to_complete); + + if (hw->switch_dev) + rte_eth_linkstatus_get(hw->switch_dev, &link); + + ret = rte_eth_linkstatus_set(dev, &link); + i40e_notify_all_vfs_link_status(dev); + + return ret; +} + +/* Get all the statistics of a VSI */ +void +i40e_update_vsi_stats(struct i40e_vsi *vsi) +{ + struct i40e_eth_stats *oes = &vsi->eth_stats_offset; + struct i40e_eth_stats *nes = &vsi->eth_stats; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx); + + i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), + vsi->offset_loaded, &oes->rx_bytes, + &nes->rx_bytes); + i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx), + vsi->offset_loaded, &oes->rx_unicast, + &nes->rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx), + vsi->offset_loaded, &oes->rx_multicast, + &nes->rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx), + vsi->offset_loaded, &oes->rx_broadcast, + &nes->rx_broadcast); + /* exclude CRC bytes */ + nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + + nes->rx_broadcast) * RTE_ETHER_CRC_LEN; + + i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, + &oes->rx_discards, &nes->rx_discards); + /* GLV_REPC not supported */ + /* GLV_RMPC not supported */ + i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded, + &oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), + vsi->offset_loaded, &oes->tx_bytes, + &nes->tx_bytes); + i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx), + vsi->offset_loaded, &oes->tx_unicast, + &nes->tx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx), + vsi->offset_loaded, &oes->tx_multicast, + &nes->tx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx), + vsi->offset_loaded, &oes->tx_broadcast, + &nes->tx_broadcast); + /* GLV_TDPC not supported */ + i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded, + &oes->tx_errors, &nes->tx_errors); + vsi->offset_loaded = true; + + PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************", + vsi->vsi_id); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + nes->rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); + PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************", + vsi->vsi_id); +} + +static void +i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) +{ + unsigned int i; + struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + + /* Get rx/tx bytes of internal transfer packets */ + i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes); + + i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes); + /* Get total internal rx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), + I40E_GLV_UPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_unicast, + &pf->internal_stats.rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port), + I40E_GLV_MPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_multicast, + &pf->internal_stats.rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port), + I40E_GLV_BPRCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_broadcast, + &pf->internal_stats.rx_broadcast); + /* Get total internal tx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port), + I40E_GLV_UPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_unicast, + &pf->internal_stats.tx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port), + I40E_GLV_MPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_multicast, + &pf->internal_stats.tx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port), + I40E_GLV_BPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_broadcast, + &pf->internal_stats.tx_broadcast); + + /* exclude CRC size */ + pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + + pf->internal_stats.rx_multicast + + pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN; + + /* Get statistics of struct i40e_eth_stats */ + i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->offset_loaded, &os->eth.rx_bytes, + &ns->eth.rx_bytes); + i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port), + I40E_GLPRT_UPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_unicast, + &ns->eth.rx_unicast); + i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port), + I40E_GLPRT_MPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_multicast, + &ns->eth.rx_multicast); + i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port), + I40E_GLPRT_BPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_broadcast, + &ns->eth.rx_broadcast); + /* Workaround: CRC size should not be included in byte statistics, + * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx + * packet. + */ + ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + + ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; + + /* exclude internal rx bytes + * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before + * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative + * value. + * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L]. + */ + if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) + ns->eth.rx_bytes = 0; + else + ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; + + if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast) + ns->eth.rx_unicast = 0; + else + ns->eth.rx_unicast -= pf->internal_stats.rx_unicast; + + if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast) + ns->eth.rx_multicast = 0; + else + ns->eth.rx_multicast -= pf->internal_stats.rx_multicast; + + if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast) + ns->eth.rx_broadcast = 0; + else + ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast; + + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), + pf->offset_loaded, &os->eth.rx_discards, + &ns->eth.rx_discards); + /* GLPRT_REPC not supported */ + /* GLPRT_RMPC not supported */ + i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port), + pf->offset_loaded, + &os->eth.rx_unknown_protocol, + &ns->eth.rx_unknown_protocol); + i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->offset_loaded, &os->eth.tx_bytes, + &ns->eth.tx_bytes); + i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port), + I40E_GLPRT_UPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_unicast, + &ns->eth.tx_unicast); + i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port), + I40E_GLPRT_MPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_multicast, + &ns->eth.tx_multicast); + i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port), + I40E_GLPRT_BPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_broadcast, + &ns->eth.tx_broadcast); + ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + + ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; + + /* exclude internal tx bytes + * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before + * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative + * value. + * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L]. + */ + if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) + ns->eth.tx_bytes = 0; + else + ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; + + if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast) + ns->eth.tx_unicast = 0; + else + ns->eth.tx_unicast -= pf->internal_stats.tx_unicast; + + if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast) + ns->eth.tx_multicast = 0; + else + ns->eth.tx_multicast -= pf->internal_stats.tx_multicast; + + if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast) + ns->eth.tx_broadcast = 0; + else + ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast; + + /* GLPRT_TEPC not supported */ + + /* additional port specific stats */ + i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port), + pf->offset_loaded, &os->tx_dropped_link_down, + &ns->tx_dropped_link_down); + i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port), + pf->offset_loaded, &os->crc_errors, + &ns->crc_errors); + i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port), + pf->offset_loaded, &os->illegal_bytes, + &ns->illegal_bytes); + /* GLPRT_ERRBC not supported */ + i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port), + pf->offset_loaded, &os->mac_local_faults, + &ns->mac_local_faults); + i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port), + pf->offset_loaded, &os->mac_remote_faults, + &ns->mac_remote_faults); + i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port), + pf->offset_loaded, &os->rx_length_errors, + &ns->rx_length_errors); + i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port), + pf->offset_loaded, &os->link_xon_rx, + &ns->link_xon_rx); + i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->offset_loaded, &os->link_xoff_rx, + &ns->link_xoff_rx); + for (i = 0; i < 8; i++) { + i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i), + pf->offset_loaded, + &os->priority_xon_rx[i], + &ns->priority_xon_rx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->offset_loaded, + &os->priority_xoff_rx[i], + &ns->priority_xoff_rx[i]); + } + i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port), + pf->offset_loaded, &os->link_xon_tx, + &ns->link_xon_tx); + i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port), + pf->offset_loaded, &os->link_xoff_tx, + &ns->link_xoff_tx); + for (i = 0; i < 8; i++) { + i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i), + pf->offset_loaded, + &os->priority_xon_tx[i], + &ns->priority_xon_tx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), + pf->offset_loaded, + &os->priority_xoff_tx[i], + &ns->priority_xoff_tx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), + pf->offset_loaded, + &os->priority_xon_2_xoff[i], + &ns->priority_xon_2_xoff[i]); + } + i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port), + I40E_GLPRT_PRC64L(hw->port), + pf->offset_loaded, &os->rx_size_64, + &ns->rx_size_64); + i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port), + I40E_GLPRT_PRC127L(hw->port), + pf->offset_loaded, &os->rx_size_127, + &ns->rx_size_127); + i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port), + I40E_GLPRT_PRC255L(hw->port), + pf->offset_loaded, &os->rx_size_255, + &ns->rx_size_255); + i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port), + I40E_GLPRT_PRC511L(hw->port), + pf->offset_loaded, &os->rx_size_511, + &ns->rx_size_511); + i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port), + I40E_GLPRT_PRC1023L(hw->port), + pf->offset_loaded, &os->rx_size_1023, + &ns->rx_size_1023); + i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port), + I40E_GLPRT_PRC1522L(hw->port), + pf->offset_loaded, &os->rx_size_1522, + &ns->rx_size_1522); + i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port), + I40E_GLPRT_PRC9522L(hw->port), + pf->offset_loaded, &os->rx_size_big, + &ns->rx_size_big); + i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port), + pf->offset_loaded, &os->rx_undersize, + &ns->rx_undersize); + i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port), + pf->offset_loaded, &os->rx_fragments, + &ns->rx_fragments); + i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port), + pf->offset_loaded, &os->rx_oversize, + &ns->rx_oversize); + i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port), + pf->offset_loaded, &os->rx_jabber, + &ns->rx_jabber); + i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port), + I40E_GLPRT_PTC64L(hw->port), + pf->offset_loaded, &os->tx_size_64, + &ns->tx_size_64); + i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port), + I40E_GLPRT_PTC127L(hw->port), + pf->offset_loaded, &os->tx_size_127, + &ns->tx_size_127); + i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port), + I40E_GLPRT_PTC255L(hw->port), + pf->offset_loaded, &os->tx_size_255, + &ns->tx_size_255); + i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port), + I40E_GLPRT_PTC511L(hw->port), + pf->offset_loaded, &os->tx_size_511, + &ns->tx_size_511); + i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port), + I40E_GLPRT_PTC1023L(hw->port), + pf->offset_loaded, &os->tx_size_1023, + &ns->tx_size_1023); + i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port), + I40E_GLPRT_PTC1522L(hw->port), + pf->offset_loaded, &os->tx_size_1522, + &ns->tx_size_1522); + i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port), + I40E_GLPRT_PTC9522L(hw->port), + pf->offset_loaded, &os->tx_size_big, + &ns->tx_size_big); + i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index), + pf->offset_loaded, + &os->fd_sb_match, &ns->fd_sb_match); + /* GLPRT_MSPDC not supported */ + /* GLPRT_XEC not supported */ + + pf->offset_loaded = true; + + if (pf->main_vsi) + i40e_update_vsi_stats(pf->main_vsi); +} + +/* Get all statistics of a port */ +static int +i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + struct i40e_vsi *vsi; + unsigned i; + + /* call read registers - updates values, now write them to struct */ + i40e_read_stats_registers(pf, hw); + + stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + + pf->main_vsi->eth_stats.rx_multicast + + pf->main_vsi->eth_stats.rx_broadcast - + pf->main_vsi->eth_stats.rx_discards; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; + stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; + stats->obytes = ns->eth.tx_bytes; + stats->oerrors = ns->eth.tx_errors + + pf->main_vsi->eth_stats.tx_errors; + + /* Rx Errors */ + stats->imissed = ns->eth.rx_discards + + pf->main_vsi->eth_stats.rx_discards; + stats->ierrors = ns->crc_errors + + ns->rx_length_errors + ns->rx_undersize + + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_update_vsi_stats(vsi); + + stats->ipackets += (vsi->eth_stats.rx_unicast + + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast - + vsi->eth_stats.rx_discards); + stats->ibytes += vsi->eth_stats.rx_bytes; + stats->oerrors += vsi->eth_stats.tx_errors; + stats->imissed += vsi->eth_stats.rx_discards; + } + } + + PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************"); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + ns->eth.rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); + + PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", + ns->tx_dropped_link_down); + PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); + PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", + ns->illegal_bytes); + PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); + PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", + ns->mac_local_faults); + PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", + ns->mac_remote_faults); + PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"", + ns->rx_length_errors); + PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); + PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); + for (i = 0; i < 8; i++) { + PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"", + i, ns->priority_xon_rx[i]); + PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"", + i, ns->priority_xoff_rx[i]); + } + PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); + PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); + for (i = 0; i < 8; i++) { + PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"", + i, ns->priority_xon_tx[i]); + PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"", + i, ns->priority_xoff_tx[i]); + PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"", + i, ns->priority_xon_2_xoff[i]); + } + PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); + PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); + PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); + PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); + PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); + PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); + PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); + PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); + PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); + PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); + PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); + PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); + PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); + PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); + PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); + PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); + PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); + PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); + PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"", + ns->mac_short_packet_dropped); + PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"", + ns->checksum_error); + PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match); + PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); + return 0; +} + +/* Reset the statistics */ +static int +i40e_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Mark PF and VSI stats to update the offset, aka "reset" */ + pf->offset_loaded = false; + if (pf->main_vsi) + pf->main_vsi->offset_loaded = false; + + /* read the stats, reading current register values into offset */ + i40e_read_stats_registers(pf, hw); + + return 0; +} + +static uint32_t +i40e_xstats_calc_num(void) +{ + return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + + (I40E_NB_RXQ_PRIO_XSTATS * 8) + + (I40E_NB_TXQ_PRIO_XSTATS * 8); +} + +static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned count = 0; + unsigned i, prio; + + if (xstats_names == NULL) + return i40e_xstats_calc_num(); + + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Get stats from i40e_eth_stats struct */ + for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { + strlcpy(xstats_names[count].name, + rte_i40e_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* Get individiual stats from i40e_hw_port struct */ + for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { + strlcpy(xstats_names[count].name, + rte_i40e_hw_port_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", prio, + rte_i40e_rxq_prio_strings[i].name); + count++; + } + } + + for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", prio, + rte_i40e_txq_prio_strings[i].name); + count++; + } + } + return count; +} + +static int +i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + unsigned i, count, prio; + struct i40e_hw_port_stats *hw_stats = &pf->stats; + + count = i40e_xstats_calc_num(); + if (n < count) + return count; + + i40e_read_stats_registers(pf, hw); + + if (xstats == NULL) + return 0; + + count = 0; + + /* Get stats from i40e_eth_stats struct */ + for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) + + rte_i40e_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* Get individiual stats from i40e_hw_port struct */ + for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_i40e_hw_port_strings[i].offset); + xstats[count].id = count; + count++; + } + + for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_rxq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + xstats[count].id = count; + count++; + } + } + + for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_txq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + xstats[count].id = count; + count++; + } + } + + return count; +} + +static int +i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 full_ver; + u8 ver, patch; + u16 build; + int ret; + + full_ver = hw->nvm.oem_ver; + ver = (u8)(full_ver >> 24); + build = (u16)((full_ver >> 8) & 0xffff); + patch = (u8)(full_ver & 0xff); + + ret = snprintf(fw_version, fw_size, + "%d.%d%d 0x%08x %d.%d.%d", + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack, + ver, build, patch); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +/* + * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later, + * the Rx data path does not hang if the FW LLDP is stopped. + * return true if lldp need to stop + * return false if we cannot disable the LLDP to avoid Rx data path blocking. + */ +static bool +i40e_need_stop_lldp(struct rte_eth_dev *dev) +{ + double nvm_ver; + char ver_str[64] = {0}; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_fw_version_get(dev, ver_str, 64); + nvm_ver = atof(ver_str); + if ((hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) && + ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000))) + return true; + else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000)) + return true; + + return false; +} + +static int +i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + dev_info->max_rx_queues = vsi->nb_qps; + dev_info->max_tx_queues = vsi->nb_qps; + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->max_mac_addrs = vsi->max_macaddrs; + dev_info->max_vfs = pci_dev->max_vfs; + dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->rx_queue_offload_capa = 0; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; + + dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + dev_info->tx_queue_offload_capa; + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + dev_info->reta_size = pf->hash_lut_size; + dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + .nb_seg_max = I40E_TX_MAX_SEG, + .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG, + }; + + if (pf->flags & I40E_FLAG_VMDQ) { + dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi; + dev_info->vmdq_queue_base = dev_info->max_rx_queues; + dev_info->vmdq_queue_num = pf->vmdq_nb_qps * + pf->max_nb_vmdq_vsi; + dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE; + dev_info->max_rx_queues += dev_info->vmdq_queue_num; + dev_info->max_tx_queues += dev_info->vmdq_queue_num; + } + + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { + /* For XL710 */ + dev_info->speed_capa = ETH_LINK_SPEED_40G; + dev_info->default_rxportconf.nb_queues = 2; + dev_info->default_txportconf.nb_queues = 2; + if (dev->data->nb_rx_queues == 1) + dev_info->default_rxportconf.ring_size = 2048; + else + dev_info->default_rxportconf.ring_size = 1024; + if (dev->data->nb_tx_queues == 1) + dev_info->default_txportconf.ring_size = 1024; + else + dev_info->default_txportconf.ring_size = 512; + + } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) { + /* For XXV710 */ + dev_info->speed_capa = ETH_LINK_SPEED_25G; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } else { + /* For X710 */ + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) { + dev_info->default_rxportconf.ring_size = 512; + dev_info->default_txportconf.ring_size = 256; + } else { + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } + } + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + + return 0; +} + +static int +i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + PMD_INIT_FUNC_TRACE(); + + if (on) + return i40e_vsi_add_vlan(vsi, vlan_id); + else + return i40e_vsi_delete_vlan(vsi, vlan_id); +} + +static int +i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid, int qinq) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t reg_r = 0; + uint64_t reg_w = 0; + uint16_t reg_id = 3; + int ret; + + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + reg_id = 2; + } + + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; + } + PMD_DRV_LOG(DEBUG, + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); + + reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); + reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); + if (reg_r == reg_w) { + PMD_DRV_LOG(DEBUG, "No need to write"); + return 0; + } + + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_SWT_L2TAGCTRL(reg_id), + reg_w, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); + return -EIO; + } + PMD_DRV_LOG(DEBUG, + "Global register 0x%08x is changed with value 0x%08x", + I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w); + + return 0; +} + +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + int ret = 0; + + if ((vlan_type != ETH_VLAN_TYPE_INNER && + vlan_type != ETH_VLAN_TYPE_OUTER) || + (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) { + PMD_DRV_LOG(ERR, + "Unsupported vlan type."); + return -EINVAL; + } + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Setting TPID is not supported."); + return -ENOTSUP; + } + + /* 802.1ad frames ability is added in NVM API 1.7*/ + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + if (qinq) { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->first_tag = rte_cpu_to_le_16(tpid); + else if (vlan_type == ETH_VLAN_TYPE_INNER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } else { + if (vlan_type == ETH_VLAN_TYPE_OUTER) + hw->second_tag = rte_cpu_to_le_16(tpid); + } + ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Set switch config failed aq_err: %d", + hw->aq.asq_last_status); + ret = -EIO; + } + } else + /* If NVM API < 1.7, keep the register setting */ + ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, + tpid, qinq); + + return ret; +} + +static int +i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + + if (mask & ETH_QINQ_STRIP_MASK) { + PMD_DRV_LOG(ERR, "Strip qinq is not supported."); + return -ENOTSUP; + } + + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + i40e_vsi_config_vlan_filter(vsi, TRUE); + else + i40e_vsi_config_vlan_filter(vsi, FALSE); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + i40e_vsi_config_vlan_stripping(vsi, TRUE); + else + i40e_vsi_config_vlan_stripping(vsi, FALSE); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { + i40e_vsi_config_double_vlan(vsi, TRUE); + /* Set global registers with default ethertype. */ + i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + RTE_ETHER_TYPE_VLAN); + i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, + RTE_ETHER_TYPE_VLAN); + } + else + i40e_vsi_config_double_vlan(vsi, FALSE); + } + + return 0; +} + +static void +i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue, + __rte_unused int on) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_vsi_vlan_pvid_info info; + + memset(&info, 0, sizeof(info)); + info.on = on; + if (info.on) + info.config.pvid = pvid; + else { + info.config.reject.tagged = + data->dev_conf.txmode.hw_vlan_reject_tagged; + info.config.reject.untagged = + data->dev_conf.txmode.hw_vlan_reject_untagged; + } + + return i40e_vsi_vlan_pvid_set(vsi, &info); +} + +static int +i40e_dev_led_on(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mode = i40e_led_get(hw); + + if (mode == 0) + i40e_led_set(hw, 0xf, true); /* 0xf means led always true */ + + return 0; +} + +static int +i40e_dev_led_off(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mode = i40e_led_get(hw); + + if (mode != 0) + i40e_led_set(hw, 0, false); + + return 0; +} + +static int +i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; + fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; + + /* Return current mode according to actual setting*/ + switch (hw->fc.current_mode) { + case I40E_FC_FULL: + fc_conf->mode = RTE_FC_FULL; + break; + case I40E_FC_TX_PAUSE: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case I40E_FC_RX_PAUSE: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case I40E_FC_NONE: + default: + fc_conf->mode = RTE_FC_NONE; + }; + + return 0; +} + +static int +i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + uint32_t mflcn_reg, fctrl_reg, reg; + uint32_t max_high_water; + uint8_t i, aq_failure; + int err; + struct i40e_hw *hw; + struct i40e_pf *pf; + enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = { + [RTE_FC_NONE] = I40E_FC_NONE, + [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE, + [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE, + [RTE_FC_FULL] = I40E_FC_FULL + }; + + /* high_water field in the rte_eth_fc_conf using the kilobytes unit */ + + max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, + "Invalid high/low water setup value in KB, High_water must be <= %d.", + max_high_water); + return -EINVAL; + } + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode]; + + pf->fc_conf.pause_time = fc_conf->pause_time; + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water; + + PMD_INIT_FUNC_TRACE(); + + /* All the link flow control related enable/disable register + * configuration is handle by the F/W + */ + err = i40e_set_fc(hw, &aq_failure, true); + if (err < 0) + return -ENOSYS; + + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { + /* Configure flow control refresh threshold, + * the value for stat_tx_pause_refresh_timer[8] + * is used for global pause operation. + */ + + I40E_WRITE_REG(hw, + I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8), + pf->fc_conf.pause_time); + + /* configure the timer value included in transmitted pause + * frame, + * the value for stat_tx_pause_quanta[8] is used for global + * pause operation + */ + I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8), + pf->fc_conf.pause_time); + + fctrl_reg = I40E_READ_REG(hw, + I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL); + + if (fc_conf->mac_ctrl_frame_fwd != 0) + fctrl_reg |= I40E_PRTMAC_FWD_CTRL; + else + fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL; + + I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, + fctrl_reg); + } else { + /* Configure pause time (2 TCs per register) */ + reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++) + I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg); + + /* Configure flow control refresh threshold value */ + I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV, + pf->fc_conf.pause_time / 2); + + mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN); + + /* set or clear MFLCN.PMCF & MFLCN.DPF bits + *depending on configuration + */ + if (fc_conf->mac_ctrl_frame_fwd != 0) { + mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK; + mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK; + } else { + mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK; + mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK; + } + + I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg); + } + + if (!pf->support_multi_driver) { + /* config water marker both based on the packets and bytes */ + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW, + (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW, + (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW, + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW, + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + } else { + PMD_DRV_LOG(ERR, + "Water marker configuration is not supported."); + } + + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_eth_pfc_conf *pfc_conf) +{ + PMD_INIT_FUNC_TRACE(); + + return -ENOSYS; +} + +/* Add a MAC address, and update filters */ +static int +i40e_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, + uint32_t pool) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_mac_filter_info mac_filter; + struct i40e_vsi *vsi; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + int ret; + + /* If VMDQ not enabled or configured, return */ + if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) || + !pf->nb_cfg_vmdq_vsi)) { + PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", + pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", + pool); + return -ENOTSUP; + } + + if (pool > pf->nb_cfg_vmdq_vsi) { + PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u", + pool, pf->nb_cfg_vmdq_vsi); + return -EINVAL; + } + + rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + else + mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; + + if (pool == 0) + vsi = pf->main_vsi; + else + vsi = pf->vmdq[pool - 1].vsi; + + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); + return -ENODEV; + } + return 0; +} + +/* Remove a MAC address, and update filters */ +static void +i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi; + struct rte_eth_dev_data *data = dev->data; + struct rte_ether_addr *macaddr; + int ret; + uint32_t i; + uint64_t pool_sel; + + macaddr = &(data->mac_addrs[index]); + + pool_sel = dev->data->mac_pool_sel[index]; + + for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) { + if (pool_sel & (1ULL << i)) { + if (i == 0) + vsi = pf->main_vsi; + else { + /* No VMDQ pool enabled or configured */ + if (!(pf->flags & I40E_FLAG_VMDQ) || + (i > pf->nb_cfg_vmdq_vsi)) { + PMD_DRV_LOG(ERR, + "No VMDQ pool enabled/configured"); + return; + } + vsi = pf->vmdq[i - 1].vsi; + } + ret = i40e_vsi_delete_mac(vsi, macaddr); + + if (ret) { + PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter"); + return; + } + } + } +} + +/* Set perfect match or hash match of MAC and VLAN for a VF */ +static int +i40e_vf_mac_filter_set(struct i40e_pf *pf, + struct rte_eth_mac_filter *filter, + bool add) +{ + struct i40e_hw *hw; + struct i40e_mac_filter_info mac_filter; + struct rte_ether_addr old_mac; + struct rte_ether_addr *new_mac; + struct i40e_pf_vf *vf = NULL; + uint16_t vf_id; + int ret; + + if (pf == NULL) { + PMD_DRV_LOG(ERR, "Invalid PF argument."); + return -EINVAL; + } + hw = I40E_PF_TO_HW(pf); + + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Invalid mac filter argument."); + return -EINVAL; + } + + new_mac = &filter->mac_addr; + + if (rte_is_zero_ether_addr(new_mac)) { + PMD_DRV_LOG(ERR, "Invalid ethernet address."); + return -EINVAL; + } + + vf_id = filter->dst_id; + + if (vf_id > pf->vf_num - 1 || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[vf_id]; + + if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) { + PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address."); + return -EINVAL; + } + + if (add) { + rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN); + rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + RTE_ETHER_ADDR_LEN); + + mac_filter.filter_type = filter->filter_type; + ret = i40e_vsi_add_mac(vf->vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter."); + return -1; + } + rte_ether_addr_copy(new_mac, &pf->dev_addr); + } else { + rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + RTE_ETHER_ADDR_LEN); + ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete MAC filter."); + return -1; + } + + /* Clear device address as it has been removed */ + if (rte_is_same_ether_addr(&pf->dev_addr, new_mac)) + memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr)); + } + + return 0; +} + +/* MAC filter handle */ +static int +i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_mac_filter *filter; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + int ret = I40E_NOT_SUPPORTED; + + filter = (struct rte_eth_mac_filter *)(arg); + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + ret = I40E_SUCCESS; + break; + case RTE_ETH_FILTER_ADD: + i40e_pf_disable_irq0(hw); + if (filter->is_vf) + ret = i40e_vf_mac_filter_set(pf, filter, 1); + i40e_pf_enable_irq0(hw); + break; + case RTE_ETH_FILTER_DELETE: + i40e_pf_disable_irq0(hw); + if (filter->is_vf) + ret = i40e_vf_mac_filter_set(pf, filter, 0); + i40e_pf_enable_irq0(hw); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = I40E_ERR_PARAM; + break; + } + + return ret; +} + +static int +i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; + int ret; + + if (!lut) + return -EINVAL; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= lut_size_dw; i++) { + reg = I40E_VFQF_HLUT1(i, vsi->user_param); + lut_dw[i] = i40e_read_rx_ctl(hw, reg); + } + } else { + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, + I40E_PFQF_HLUT(i)); + } + } + + return 0; +} + +int +i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_pf *pf; + struct i40e_hw *hw; + int ret; + + if (!vsi || !lut) + return -EINVAL; + + pf = I40E_VSI_TO_PF(vsi); + hw = I40E_VSI_TO_HW(vsi); + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, + vsi->type != I40E_VSI_SRIOV, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HLUT1(i, vsi->user_param), + lut_dw[i]); + } else { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), + lut_dw[i]); + } + I40E_WRITE_FLUSH(hw); + } + + return 0; +} + +static int +i40e_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != lut_size || + reta_size > ETH_RSS_RETA_SIZE_512) { + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); + + pf->adapter->rss_reta_updated = 1; + +out: + rte_free(lut); + + return ret; +} + +static int +i40e_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != lut_size || + reta_size > ETH_RSS_RETA_SIZE_512) { + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = lut[i]; + } + +out: + rte_free(lut); + + return ret; +} + +/** + * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +enum i40e_status_code +i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw, + struct i40e_dma_mem *mem, + u64 size, + u32 alignment) +{ + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + + if (!mem) + return I40E_ERR_PARAM; + + snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); + if (!mz) + return I40E_ERR_NO_MEMORY; + + mem->size = size; + mem->va = mz->addr; + mem->pa = mz->iova; + mem->zone = (const void *)mz; + PMD_DRV_LOG(DEBUG, + "memzone %s allocated with physical address: %"PRIu64, + mz->name, mem->pa); + + return I40E_SUCCESS; +} + +/** + * i40e_free_dma_mem_d - specific memory free for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +enum i40e_status_code +i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw, + struct i40e_dma_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + PMD_DRV_LOG(DEBUG, + "memzone %s to be freed with physical address: %"PRIu64, + ((const struct rte_memzone *)mem->zone)->name, mem->pa); + rte_memzone_free((const struct rte_memzone *)mem->zone); + mem->zone = NULL; + mem->va = NULL; + mem->pa = (u64)0; + + return I40E_SUCCESS; +} + +/** + * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to fill out + * @size: size of memory requested + **/ +enum i40e_status_code +i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size) +{ + if (!mem) + return I40E_ERR_PARAM; + + mem->size = size; + mem->va = rte_zmalloc("i40e", size, 0); + + if (mem->va) + return I40E_SUCCESS; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40e_free_virt_mem_d - specific memory free for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to free + **/ +enum i40e_status_code +i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw, + struct i40e_virt_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + rte_free(mem->va); + mem->va = NULL; + + return I40E_SUCCESS; +} + +void +i40e_init_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_init(&sp->spinlock); +} + +void +i40e_acquire_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_lock(&sp->spinlock); +} + +void +i40e_release_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_unlock(&sp->spinlock); +} + +void +i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp) +{ + return; +} + +/** + * Get the hardware capabilities, which will be parsed + * and saved into struct i40e_hw. + */ +static int +i40e_get_cap(struct i40e_hw *hw) +{ + struct i40e_aqc_list_capabilities_element_resp *buf; + uint16_t len, size = 0; + int ret; + + /* Calculate a huge enough buff for saving response data temporarily */ + len = sizeof(struct i40e_aqc_list_capabilities_element_resp) * + I40E_MAX_CAP_ELE_NUM; + buf = rte_zmalloc("i40e", len, 0); + if (!buf) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + /* Get, parse the capabilities and save it to hw */ + ret = i40e_aq_discover_capabilities(hw, buf, len, &size, + i40e_aqc_opc_list_func_capabilities, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to discover capabilities"); + + /* Free the temporary buffer after being used */ + rte_free(buf); + + return ret; +} + +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 + +static int i40e_pf_parse_vf_queue_number_handler(const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long num; + char *end; + + pf = (struct i40e_pf *)opaque; + RTE_SET_USED(key); + + errno = 0; + num = strtoul(value, &end, 0); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is " + "kept the value = %hu", value, pf->vf_nb_qp_max); + return -(EINVAL); + } + + if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num)) + pf->vf_nb_qp_max = (uint16_t)num; + else + /* here return 0 to make next valid same argument work */ + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be " + "power of 2 and equal or less than 16 !, Now it is " + "kept the value = %hu", num, pf->vf_nb_qp_max); + + return 0; +} + +static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_kvargs *kvlist; + int kvargs_count; + + /* set default queue number per VF as 4 */ + pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + + if (dev->device->devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (kvlist == NULL) + return -(EINVAL); + + kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG); + if (!kvargs_count) { + rte_kvargs_free(kvlist); + return 0; + } + + if (kvargs_count > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_QUEUE_NUM_PER_VF_ARG); + + rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG, + i40e_pf_parse_vf_queue_number_handler, pf); + + rte_kvargs_free(kvlist); + + return 0; +} + +static int +i40e_pf_parameter_init(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t qp_count = 0, vsi_count = 0; + + if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { + PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); + return -EINVAL; + } + + i40e_pf_config_vf_rxq_number(dev); + + /* Add the parameter init for LFC */ + pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME; + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER; + + pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED; + pf->max_num_vsi = hw->func_caps.num_vsis; + pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF; + pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + + /* FDir queue/VSI allocation */ + pf->fdir_qp_offset = 0; + if (hw->func_caps.fd) { + pf->flags |= I40E_FLAG_FDIR; + pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR; + } else { + pf->fdir_nb_qps = 0; + } + qp_count += pf->fdir_nb_qps; + vsi_count += 1; + + /* LAN queue/VSI allocation */ + pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps; + if (!hw->func_caps.rss) { + pf->lan_nb_qps = 1; + } else { + pf->flags |= I40E_FLAG_RSS; + if (hw->mac.type == I40E_MAC_X722) + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE; + pf->lan_nb_qps = pf->lan_nb_qp_max; + } + qp_count += pf->lan_nb_qps; + vsi_count += 1; + + /* VF queue/VSI allocation */ + pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; + if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) { + pf->flags |= I40E_FLAG_SRIOV; + pf->vf_nb_qps = pf->vf_nb_qp_max; + pf->vf_num = pci_dev->max_vfs; + PMD_DRV_LOG(DEBUG, + "%u VF VSIs, %u queues per VF VSI, in total %u queues", + pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); + } else { + pf->vf_nb_qps = 0; + pf->vf_num = 0; + } + qp_count += pf->vf_nb_qps * pf->vf_num; + vsi_count += pf->vf_num; + + /* VMDq queue/VSI allocation */ + pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num; + pf->vmdq_nb_qps = 0; + pf->max_nb_vmdq_vsi = 0; + if (hw->func_caps.vmdq) { + if (qp_count < hw->func_caps.num_tx_qp && + vsi_count < hw->func_caps.num_vsis) { + pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp - + qp_count) / pf->vmdq_nb_qp_max; + + /* Limit the maximum number of VMDq vsi to the maximum + * ethdev can support + */ + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + hw->func_caps.num_vsis - vsi_count); + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + ETH_64_POOLS); + if (pf->max_nb_vmdq_vsi) { + pf->flags |= I40E_FLAG_VMDQ; + pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; + PMD_DRV_LOG(DEBUG, + "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps, + pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); + } else { + PMD_DRV_LOG(INFO, + "No enough queues left for VMDq"); + } + } else { + PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); + } + } + qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi; + vsi_count += pf->max_nb_vmdq_vsi; + + if (hw->func_caps.dcb) + pf->flags |= I40E_FLAG_DCB; + + if (qp_count > hw->func_caps.num_tx_qp) { + PMD_DRV_LOG(ERR, + "Failed to allocate %u queues, which exceeds the hardware maximum %u", + qp_count, hw->func_caps.num_tx_qp); + return -EINVAL; + } + if (vsi_count > hw->func_caps.num_vsis) { + PMD_DRV_LOG(ERR, + "Failed to allocate %u VSIs, which exceeds the hardware maximum %u", + vsi_count, hw->func_caps.num_vsis); + return -EINVAL; + } + + return 0; +} + +static int +i40e_pf_get_switch_config(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_aqc_get_switch_config_resp *switch_config; + struct i40e_aqc_switch_config_element_resp *element; + uint16_t start_seid = 0, num_reported; + int ret; + + switch_config = (struct i40e_aqc_get_switch_config_resp *)\ + rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0); + if (!switch_config) { + PMD_DRV_LOG(ERR, "Failed to allocated memory"); + return -ENOMEM; + } + + /* Get the switch configurations */ + ret = i40e_aq_get_switch_config(hw, switch_config, + I40E_AQ_LARGE_BUF, &start_seid, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch configurations"); + goto fail; + } + num_reported = rte_le_to_cpu_16(switch_config->header.num_reported); + if (num_reported != 1) { /* The number should be 1 */ + PMD_DRV_LOG(ERR, "Wrong number of switch config reported"); + goto fail; + } + + /* Parse the switch configuration elements */ + element = &(switch_config->element[0]); + if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) { + pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid); + pf->main_vsi_seid = rte_le_to_cpu_16(element->seid); + } else + PMD_DRV_LOG(INFO, "Unknown element type"); + +fail: + rte_free(switch_config); + + return ret; +} + +static int +i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base, + uint32_t num) +{ + struct pool_entry *entry; + + if (pool == NULL || num == 0) + return -EINVAL; + + entry = rte_zmalloc("i40e", sizeof(*entry), 0); + if (entry == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool"); + return -ENOMEM; + } + + /* queue heap initialize */ + pool->num_free = num; + pool->num_alloc = 0; + pool->base = base; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); + + /* Initialize element */ + entry->base = 0; + entry->len = num; + + LIST_INSERT_HEAD(&pool->free_list, entry, next); + return 0; +} + +static void +i40e_res_pool_destroy(struct i40e_res_pool_info *pool) +{ + struct pool_entry *entry, *next_entry; + + if (pool == NULL) + return; + + for (entry = LIST_FIRST(&pool->alloc_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + for (entry = LIST_FIRST(&pool->free_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + pool->num_free = 0; + pool->num_alloc = 0; + pool->base = 0; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); +} + +static int +i40e_res_pool_free(struct i40e_res_pool_info *pool, + uint32_t base) +{ + struct pool_entry *entry, *next, *prev, *valid_entry = NULL; + uint32_t pool_offset; + uint16_t len; + int insert; + + if (pool == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + pool_offset = base - pool->base; + /* Lookup in alloc list */ + LIST_FOREACH(entry, &pool->alloc_list, next) { + if (entry->base == pool_offset) { + valid_entry = entry; + LIST_REMOVE(entry, next); + break; + } + } + + /* Not find, return */ + if (valid_entry == NULL) { + PMD_DRV_LOG(ERR, "Failed to find entry"); + return -EINVAL; + } + + /** + * Found it, move it to free list and try to merge. + * In order to make merge easier, always sort it by qbase. + * Find adjacent prev and last entries. + */ + prev = next = NULL; + LIST_FOREACH(entry, &pool->free_list, next) { + if (entry->base > valid_entry->base) { + next = entry; + break; + } + prev = entry; + } + + insert = 0; + len = valid_entry->len; + /* Try to merge with next one*/ + if (next != NULL) { + /* Merge with next one */ + if (valid_entry->base + len == next->base) { + next->base = valid_entry->base; + next->len += len; + rte_free(valid_entry); + valid_entry = next; + insert = 1; + } + } + + if (prev != NULL) { + /* Merge with previous one */ + if (prev->base + prev->len == valid_entry->base) { + prev->len += len; + /* If it merge with next one, remove next node */ + if (insert == 1) { + LIST_REMOVE(valid_entry, next); + rte_free(valid_entry); + valid_entry = NULL; + } else { + rte_free(valid_entry); + valid_entry = NULL; + insert = 1; + } + } + } + + /* Not find any entry to merge, insert */ + if (insert == 0) { + if (prev != NULL) + LIST_INSERT_AFTER(prev, valid_entry, next); + else if (next != NULL) + LIST_INSERT_BEFORE(next, valid_entry, next); + else /* It's empty list, insert to head */ + LIST_INSERT_HEAD(&pool->free_list, valid_entry, next); + } + + pool->num_free += len; + pool->num_alloc -= len; + + return 0; +} + +static int +i40e_res_pool_alloc(struct i40e_res_pool_info *pool, + uint16_t num) +{ + struct pool_entry *entry, *valid_entry; + + if (pool == NULL || num == 0) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + if (pool->num_free < num) { + PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u", + num, pool->num_free); + return -ENOMEM; + } + + valid_entry = NULL; + /* Lookup in free list and find most fit one */ + LIST_FOREACH(entry, &pool->free_list, next) { + if (entry->len >= num) { + /* Find best one */ + if (entry->len == num) { + valid_entry = entry; + break; + } + if (valid_entry == NULL || valid_entry->len > entry->len) + valid_entry = entry; + } + } + + /* Not find one to satisfy the request, return */ + if (valid_entry == NULL) { + PMD_DRV_LOG(ERR, "No valid entry found"); + return -ENOMEM; + } + /** + * The entry have equal queue number as requested, + * remove it from alloc_list. + */ + if (valid_entry->len == num) { + LIST_REMOVE(valid_entry, next); + } else { + /** + * The entry have more numbers than requested, + * create a new entry for alloc_list and minus its + * queue base and number in free_list. + */ + entry = rte_zmalloc("res_pool", sizeof(*entry), 0); + if (entry == NULL) { + PMD_DRV_LOG(ERR, + "Failed to allocate memory for resource pool"); + return -ENOMEM; + } + entry->base = valid_entry->base; + entry->len = num; + valid_entry->base += num; + valid_entry->len -= num; + valid_entry = entry; + } + + /* Insert it into alloc list, not sorted */ + LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); + + pool->num_free -= valid_entry->len; + pool->num_alloc += valid_entry->len; + + return valid_entry->base + pool->base; +} + +/** + * bitmap_is_subset - Check whether src2 is subset of src1 + **/ +static inline int +bitmap_is_subset(uint8_t src1, uint8_t src2) +{ + return !((src1 ^ src2) & src2); +} + +static enum i40e_status_code +validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + + /* If DCB is not supported, only default TC is supported */ + if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) { + PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported"); + return I40E_NOT_SUPPORTED; + } + + if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { + PMD_DRV_LOG(ERR, + "Enabled TC map 0x%x not applicable to HW support 0x%x", + hw->func_caps.enabled_tcmap, enabled_tcmap); + return I40E_NOT_SUPPORTED; + } + return I40E_SUCCESS; +} + +int +i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, + struct i40e_vsi_vlan_pvid_info *info) +{ + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + uint8_t vlan_flags = 0; + int ret; + + if (vsi == NULL || info == NULL) { + PMD_DRV_LOG(ERR, "invalid parameters"); + return I40E_ERR_PARAM; + } + + if (info->on) { + vsi->info.pvid = info->config.pvid; + /** + * If insert pvid is enabled, only tagged pkts are + * allowed to be sent out. + */ + vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_MODE_TAGGED; + } else { + vsi->info.pvid = 0; + if (info->config.reject.tagged == 0) + vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED; + + if (info->config.reject.untagged == 0) + vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + } + vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_MODE_MASK); + vsi->info.port_vlan_flags |= vlan_flags; + vsi->info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + + return ret; +} + +static int +i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int i, ret; + struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + if (!vsi->seid) { + PMD_DRV_LOG(ERR, "seid not valid"); + return -EINVAL; + } + + memset(&tc_bw_data, 0, sizeof(tc_bw_data)); + tc_bw_data.tc_valid_bits = enabled_tcmap; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + tc_bw_data.tc_bw_credits[i] = + (enabled_tcmap & (1 << i)) ? 1 : 0; + + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure TC BW"); + return ret; + } + + rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + sizeof(vsi->info.qs_handle)); + return I40E_SUCCESS; +} + +static enum i40e_status_code +i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, + struct i40e_aqc_vsi_properties_data *info, + uint8_t enabled_tcmap) +{ + enum i40e_status_code ret; + int i, total_tc = 0; + uint16_t qpnum_per_tc, bsf, qp_idx; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + if (enabled_tcmap & (1 << i)) + total_tc++; + if (total_tc == 0) + total_tc = 1; + vsi->enabled_tc = enabled_tcmap; + + /* Number of queues per enabled TC */ + qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc); + qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC); + bsf = rte_bsf32(qpnum_per_tc); + + /* Adjust the queue number to actual queues that can be applied */ + if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1)) + vsi->nb_qps = qpnum_per_tc * total_tc; + + /** + * Configure TC and queue mapping parameters, for enabled TC, + * allocate qpnum_per_tc queues to this traffic. For disabled TC, + * default queue will serve it. + */ + qp_idx = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & (1 << i)) { + info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx << + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + qp_idx += qpnum_per_tc; + } else + info->tc_mapping[i] = 0; + } + + /* Associate queue number with VSI */ + if (vsi->type == I40E_VSI_SRIOV) { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->nb_qps; i++) + info->queue_mapping[i] = + rte_cpu_to_le_16(vsi->base_queue + i); + } else { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + } + info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + return I40E_SUCCESS; +} + +static int +i40e_veb_release(struct i40e_veb *veb) +{ + struct i40e_vsi *vsi; + struct i40e_hw *hw; + + if (veb == NULL) + return -EINVAL; + + if (!TAILQ_EMPTY(&veb->head)) { + PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove"); + return -EACCES; + } + /* associate_vsi field is NULL for floating VEB */ + if (veb->associate_vsi != NULL) { + vsi = veb->associate_vsi; + hw = I40E_VSI_TO_HW(vsi); + + vsi->uplink_seid = veb->uplink_seid; + vsi->veb = NULL; + } else { + veb->associate_pf->main_vsi->floating_veb = NULL; + hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi); + } + + i40e_aq_delete_element(hw, veb->seid, NULL); + rte_free(veb); + return I40E_SUCCESS; +} + +/* Setup a veb */ +static struct i40e_veb * +i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + int ret; + struct i40e_hw *hw; + + if (pf == NULL) { + PMD_DRV_LOG(ERR, + "veb setup failed, associated PF shouldn't null"); + return NULL; + } + hw = I40E_PF_TO_HW(pf); + + veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0); + if (!veb) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for veb"); + goto fail; + } + + veb->associate_vsi = vsi; + veb->associate_pf = pf; + TAILQ_INIT(&veb->head); + veb->uplink_seid = vsi ? vsi->uplink_seid : 0; + + /* create floating veb if vsi is NULL */ + if (vsi != NULL) { + ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, + I40E_DEFAULT_TCMAP, false, + &veb->seid, false, NULL); + } else { + ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP, + true, &veb->seid, false, NULL); + } + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d", + hw->aq.asq_last_status); + goto fail; + } + veb->enabled_tc = I40E_DEFAULT_TCMAP; + + /* get statistics index */ + ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, + &veb->stats_idx, NULL, NULL, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d", + hw->aq.asq_last_status); + goto fail; + } + /* Get VEB bandwidth, to be implemented */ + /* Now associated vsi binding to the VEB, set uplink to this VEB */ + if (vsi) + vsi->uplink_seid = veb->seid; + + return veb; +fail: + rte_free(veb); + return NULL; +} + +int +i40e_vsi_release(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi_list *vsi_list; + void *temp; + int ret; + struct i40e_mac_filter *f; + uint16_t user_param; + + if (!vsi) + return I40E_SUCCESS; + + if (!vsi->adapter) + return -EFAULT; + + user_param = vsi->user_param; + + pf = I40E_VSI_TO_PF(vsi); + hw = I40E_VSI_TO_HW(vsi); + + /* VSI has child to attach, release child first */ + if (vsi->veb) { + TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) { + if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) + return -1; + } + i40e_veb_release(vsi->veb); + } + + if (vsi->floating_veb) { + TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) { + if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) + return -1; + } + } + + /* Remove all macvlan filters of the VSI */ + i40e_vsi_remove_all_macvlan_filter(vsi); + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) + rte_free(f); + + if (vsi->type != I40E_VSI_MAIN && + ((vsi->type != I40E_VSI_SRIOV) || + !pf->floating_veb_list[user_param])) { + /* Remove vsi from parent's sibling list */ + if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) { + PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL"); + return I40E_ERR_PARAM; + } + TAILQ_REMOVE(&vsi->parent_vsi->veb->head, + &vsi->sib_vsi_list, list); + + /* Remove all switch element of the VSI */ + ret = i40e_aq_delete_element(hw, vsi->seid, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to delete element"); + } + + if ((vsi->type == I40E_VSI_SRIOV) && + pf->floating_veb_list[user_param]) { + /* Remove vsi from parent's sibling list */ + if (vsi->parent_vsi == NULL || + vsi->parent_vsi->floating_veb == NULL) { + PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL"); + return I40E_ERR_PARAM; + } + TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head, + &vsi->sib_vsi_list, list); + + /* Remove all switch element of the VSI */ + ret = i40e_aq_delete_element(hw, vsi->seid, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to delete element"); + } + + i40e_res_pool_free(&pf->qp_pool, vsi->base_queue); + + if (vsi->type != I40E_VSI_SRIOV) + i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr); + rte_free(vsi); + + return I40E_SUCCESS; +} + +static int +i40e_update_default_filter_setting(struct i40e_vsi *vsi) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_remove_macvlan_element_data def_filter; + struct i40e_mac_filter_info filter; + int ret; + + if (vsi->type != I40E_VSI_MAIN) + return I40E_ERR_CONFIG; + memset(&def_filter, 0, sizeof(def_filter)); + rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + ETH_ADDR_LEN); + def_filter.vlan_tag = 0; + def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL); + if (ret != I40E_SUCCESS) { + struct i40e_mac_filter *f; + struct rte_ether_addr *mac; + + PMD_DRV_LOG(DEBUG, + "Cannot remove the default macvlan filter"); + /* It needs to add the permanent mac into mac list */ + f = rte_zmalloc("macv_filter", sizeof(*f), 0); + if (f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + mac = &f->mac_info.mac_addr; + rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + ETH_ADDR_LEN); + f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; + TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); + vsi->mac_num++; + + return ret; + } + rte_memcpy(&filter.mac_addr, + (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + return i40e_vsi_add_mac(vsi, &filter); +} + +/* + * i40e_vsi_get_bw_config - Query VSI BW Information + * @vsi: the VSI to be queried + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_vsi_get_bw_config(struct i40e_vsi *vsi) +{ + struct i40e_aqc_query_vsi_bw_config_resp bw_config; + struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config; + struct i40e_hw *hw = &vsi->adapter->hw; + i40e_status ret; + int i; + uint32_t bw_max; + + memset(&bw_config, 0, sizeof(bw_config)); + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_sla_config, 0, sizeof(ets_sla_config)); + ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, + &ets_sla_config, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "VSI failed to get TC bandwdith configuration %u", + hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit); + vsi->bw_info.bw_max = bw_config.max_bw; + PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) | + (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + vsi->bw_info.bw_ets_share_credits[i] = + ets_sla_config.share_credits[i]; + vsi->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(ets_sla_config.credits[i]); + /* 4 bits per TC, 4th bit is reserved */ + vsi->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i, + vsi->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i, + vsi->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i, + vsi->bw_info.bw_ets_max[i]); + } + + return I40E_SUCCESS; +} + +/* i40e_enable_pf_lb + * @pf: pointer to the pf structure + * + * allow loopback on pf + */ +static inline void +i40e_enable_pf_lb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi_context ctxt; + int ret; + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return; + } + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = hw->pf_id; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d", + ret, hw->aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d", + hw->aq.asq_last_status); +} + +/* Setup a VSI */ +struct i40e_vsi * +i40e_vsi_setup(struct i40e_pf *pf, + enum i40e_vsi_type type, + struct i40e_vsi *uplink_vsi, + uint16_t user_param) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_mac_filter_info filter; + int ret; + struct i40e_vsi_context ctxt; + struct rte_ether_addr broadcast = + {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; + + if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && + uplink_vsi == NULL) { + PMD_DRV_LOG(ERR, + "VSI setup failed, VSI link shouldn't be NULL"); + return NULL; + } + + if (type == I40E_VSI_MAIN && uplink_vsi != NULL) { + PMD_DRV_LOG(ERR, + "VSI setup failed, MAIN VSI uplink VSI should be NULL"); + return NULL; + } + + /* two situations + * 1.type is not MAIN and uplink vsi is not NULL + * If uplink vsi didn't setup VEB, create one first under veb field + * 2.type is SRIOV and the uplink is NULL + * If floating VEB is NULL, create one veb under floating veb field + */ + + if (type != I40E_VSI_MAIN && uplink_vsi != NULL && + uplink_vsi->veb == NULL) { + uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi); + + if (uplink_vsi->veb == NULL) { + PMD_DRV_LOG(ERR, "VEB setup failed"); + return NULL; + } + /* set ALLOWLOOPBACk on pf, when veb is created */ + i40e_enable_pf_lb(pf); + } + + if (type == I40E_VSI_SRIOV && uplink_vsi == NULL && + pf->main_vsi->floating_veb == NULL) { + pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi); + + if (pf->main_vsi->floating_veb == NULL) { + PMD_DRV_LOG(ERR, "VEB setup failed"); + return NULL; + } + } + + vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi"); + return NULL; + } + TAILQ_INIT(&vsi->mac_list); + vsi->type = type; + vsi->adapter = I40E_PF_TO_ADAPTER(pf); + vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; + vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; + vsi->user_param = user_param; + vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; + /* Allocate queues */ + switch (vsi->type) { + case I40E_VSI_MAIN : + vsi->nb_qps = pf->lan_nb_qps; + break; + case I40E_VSI_SRIOV : + vsi->nb_qps = pf->vf_nb_qps; + break; + case I40E_VSI_VMDQ2: + vsi->nb_qps = pf->vmdq_nb_qps; + break; + case I40E_VSI_FDIR: + vsi->nb_qps = pf->fdir_nb_qps; + break; + default: + goto fail_mem; + } + /* + * The filter status descriptor is reported in rx queue 0, + * while the tx queue for fdir filter programming has no + * such constraints, can be non-zero queues. + * To simplify it, choose FDIR vsi use queue 0 pair. + * To make sure it will use queue 0 pair, queue allocation + * need be done before this function is called + */ + if (type != I40E_VSI_FDIR) { + ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d", + vsi->seid, ret); + goto fail_mem; + } + vsi->base_queue = ret; + } else + vsi->base_queue = I40E_FDIR_QUEUE_ID; + + /* VF has MSIX interrupt in VF range, don't allocate here */ + if (type == I40E_VSI_MAIN) { + if (pf->support_multi_driver) { + /* If support multi-driver, need to use INT0 instead of + * allocating from msix pool. The Msix pool is init from + * INT1, so it's OK just set msix_intr to 0 and nb_msix + * to 1 without calling i40e_res_pool_alloc. + */ + vsi->msix_intr = 0; + vsi->nb_msix = 1; + } else { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID); + } + } else if (type != I40E_VSI_SRIOV) { + ret = i40e_res_pool_alloc(&pf->msix_pool, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = 1; + } else { + vsi->msix_intr = 0; + vsi->nb_msix = 0; + } + + /* Add VSI */ + if (type == I40E_VSI_MAIN) { + /* For main VSI, no need to add since it's default one */ + vsi->uplink_seid = pf->mac_seid; + vsi->seid = pf->main_vsi_seid; + /* Bind queues with specific MSIX interrupt */ + /** + * Needs 2 interrupt at least, one for misc cause which will + * enabled from OS side, Another for queues binding the + * interrupt from device side only. + */ + + /* Get default VSI parameters from hardware */ + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.vf_num = 0; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get VSI params"); + goto fail_msix_alloc; + } + rte_memcpy(&vsi->info, &ctxt.info, + sizeof(struct i40e_aqc_vsi_properties_data)); + vsi->vsi_id = ctxt.vsi_number; + vsi->info.valid_sections = 0; + + /* Configure tc, enabled TC0 only */ + if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) != + I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update TC bandwidth"); + goto fail_msix_alloc; + } + + /* TC, queue mapping */ + memset(&ctxt, 0, sizeof(ctxt)); + vsi->info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + rte_memcpy(&ctxt.info, &vsi->info, + sizeof(struct i40e_aqc_vsi_properties_data)); + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); + goto fail_msix_alloc; + } + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.vf_num = 0; + + /* Update VSI parameters */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + goto fail_msix_alloc; + } + + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + ETH_ADDR_LEN); + + /** + * Updating default filter settings are necessary to prevent + * reception of tagged packets. + * Some old firmware configurations load a default macvlan + * filter which accepts both tagged and untagged packets. + * The updating is to use a normal filter instead if needed. + * For NVM 4.2.2 or after, the updating is not needed anymore. + * The firmware with correct configurations load the default + * macvlan filter which is expected and cannot be removed. + */ + i40e_update_default_filter_setting(vsi); + i40e_config_qinq(hw, vsi); + } else if (type == I40E_VSI_SRIOV) { + memset(&ctxt, 0, sizeof(ctxt)); + /** + * For other VSI, the uplink_seid equals to uplink VSI's + * uplink_seid since they share same VEB + */ + if (uplink_vsi == NULL) + vsi->uplink_seid = pf->main_vsi->floating_veb->seid; + else + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = hw->func_caps.vf_base_id + user_param; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + + /* Use the VEB configuration if FW >= v5.0 */ + if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) { + /* Configure switch ID */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Configure port/vlan */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + hw->func_caps.enabled_tcmap); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); + goto fail_msix_alloc; + } + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + /** + * Since VSI is not created yet, only configure parameter, + * will add vsi below. + */ + + i40e_config_qinq(hw, vsi); + } else if (type == I40E_VSI_VMDQ2) { + memset(&ctxt, 0, sizeof(ctxt)); + /* + * For other VSI, the uplink_seid equals to uplink VSI's + * uplink_seid since they share same VEB + */ + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; + ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; + + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + /* user_param carries flag to enable loop back */ + if (user_param) { + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Configure port/vlan */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + } else if (type == I40E_VSI_FDIR) { + memset(&ctxt, 0, sizeof(ctxt)); + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; /* regular data port */ + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping."); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + } else { + PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet"); + goto fail_msix_alloc; + } + + if (vsi->type != I40E_VSI_MAIN) { + ret = i40e_aq_add_vsi(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d", + hw->aq.asq_last_status); + goto fail_msix_alloc; + } + memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info.valid_sections = 0; + vsi->seid = ctxt.seid; + vsi->vsi_id = ctxt.vsi_number; + vsi->sib_vsi_list.vsi = vsi; + if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) { + TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head, + &vsi->sib_vsi_list, list); + } else { + TAILQ_INSERT_TAIL(&uplink_vsi->veb->head, + &vsi->sib_vsi_list, list); + } + } + + /* MAC/VLAN configuration */ + rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + + ret = i40e_vsi_add_mac(vsi, &filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); + goto fail_msix_alloc; + } + + /* Get VSI BW information */ + i40e_vsi_get_bw_config(vsi); + return vsi; +fail_msix_alloc: + i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr); +fail_queue_alloc: + i40e_res_pool_free(&pf->qp_pool,vsi->base_queue); +fail_mem: + rte_free(vsi); + return NULL; +} + +/* Configure vlan filter on or off */ +int +i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) +{ + int i, num; + struct i40e_mac_filter *f; + void *temp; + struct i40e_mac_filter_info *mac_filter; + enum rte_mac_filter_type desired_filter; + int ret = I40E_SUCCESS; + + if (on) { + /* Filter to match MAC and VLAN */ + desired_filter = RTE_MACVLAN_PERFECT_MATCH; + } else { + /* Filter to match only MAC */ + desired_filter = RTE_MAC_PERFECT_MATCH; + } + + num = vsi->mac_num; + + mac_filter = rte_zmalloc("mac_filter_info_data", + num * sizeof(*mac_filter), 0); + if (mac_filter == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + + /* Remove all existing mac */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + mac_filter[i] = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + i++; + } + + /* Override with new filter */ + for (i = 0; i < num; i++) { + mac_filter[i].filter_type = desired_filter; + ret = i40e_vsi_add_mac(vsi, &mac_filter[i]); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + } + +DONE: + rte_free(mac_filter); + return ret; +} + +/* Configure vlan stripping on or off */ +int +i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_vsi_context ctxt; + uint8_t vlan_flags; + int ret = I40E_SUCCESS; + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) { + if (on) { + if ((vsi->info.port_vlan_flags & + I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0) + return 0; /* already on */ + } else { + if ((vsi->info.port_vlan_flags & + I40E_AQ_VSI_PVLAN_EMOD_MASK) == + I40E_AQ_VSI_PVLAN_EMOD_MASK) + return 0; /* already off */ + } + } + + if (on) + vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + else + vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + vsi->info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); + vsi->info.port_vlan_flags |= vlan_flags; + ctxt.seid = vsi->seid; + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", + on ? "enable" : "disable"); + + return ret; +} + +static int +i40e_dev_init_vlan(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + int ret; + int mask = 0; + + /* Apply vlan offload setting */ + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = i40e_vlan_offload_set(dev, mask); + if (ret) { + PMD_DRV_LOG(INFO, "Failed to update vlan offload"); + return ret; + } + + /* Apply pvid setting */ + ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, + data->dev_conf.txmode.hw_vlan_insert_pvid); + if (ret) + PMD_DRV_LOG(INFO, "Failed to update VSI params"); + + return ret; +} + +static int +i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + + return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL); +} + +static int +i40e_update_flow_control(struct i40e_hw *hw) +{ +#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX) + struct i40e_link_status link_status; + uint32_t rxfc = 0, txfc = 0, reg; + uint8_t an_info; + int ret; + + memset(&link_status, 0, sizeof(link_status)); + ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link status information"); + goto write_reg; /* Disable flow control */ + } + + an_info = hw->phy.link_info.an_info; + if (!(an_info & I40E_AQ_AN_COMPLETED)) { + PMD_DRV_LOG(INFO, "Link auto negotiation not completed"); + ret = I40E_ERR_NOT_READY; + goto write_reg; /* Disable flow control */ + } + /** + * If link auto negotiation is enabled, flow control needs to + * be configured according to it + */ + switch (an_info & I40E_LINK_PAUSE_RXTX) { + case I40E_LINK_PAUSE_RXTX: + rxfc = 1; + txfc = 1; + hw->fc.current_mode = I40E_FC_FULL; + break; + case I40E_AQ_LINK_PAUSE_RX: + rxfc = 1; + hw->fc.current_mode = I40E_FC_RX_PAUSE; + break; + case I40E_AQ_LINK_PAUSE_TX: + txfc = 1; + hw->fc.current_mode = I40E_FC_TX_PAUSE; + break; + default: + hw->fc.current_mode = I40E_FC_NONE; + break; + } + +write_reg: + I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG, + txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); + reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN); + reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK; + reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT; + I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg); + + return ret; +} + +/* PF setup */ +static int +i40e_pf_setup(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_filter_control_settings settings; + struct i40e_vsi *vsi; + int ret; + + /* Clear all stats counters */ + pf->offset_loaded = FALSE; + memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); + memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); + memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); + memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); + + ret = i40e_pf_get_switch_config(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret); + return ret; + } + + ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, + "failed to allocate switch domain for device %d", ret); + + if (pf->flags & I40E_FLAG_FDIR) { + /* make queue allocated first, let FDIR use queue pair 0*/ + ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); + if (ret != I40E_FDIR_QUEUE_ID) { + PMD_DRV_LOG(ERR, + "queue allocation fails for FDIR: ret =%d", + ret); + pf->flags &= ~I40E_FLAG_FDIR; + } + } + /* main VSI setup */ + vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Setup of main vsi failed"); + return I40E_ERR_NOT_READY; + } + pf->main_vsi = vsi; + + /* Configure filter control */ + memset(&settings, 0, sizeof(settings)); + if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128) + settings.hash_lut_size = I40E_HASH_LUT_SIZE_128; + else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) + settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; + else { + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", + hw->func_caps.rss_table_size); + return I40E_ERR_PARAM; + } + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u", + hw->func_caps.rss_table_size); + pf->hash_lut_size = hw->func_caps.rss_table_size; + + /* Enable ethtype and macvlan filters */ + settings.enable_ethtype = TRUE; + settings.enable_macvlan = TRUE; + ret = i40e_set_filter_control(hw, &settings); + if (ret) + PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", + ret); + + /* Update flow control according to the auto negotiation */ + i40e_update_flow_control(hw); + + return I40E_SUCCESS; +} + +int +i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) +{ + uint32_t reg; + uint16_t j; + + /** + * Set or clear TX Queue Disable flags, + * which is required by hardware. + */ + i40e_pre_tx_queue_cfg(hw, q_idx, on); + rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US); + + /* Wait until the request is finished */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx)); + if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^ + ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) + & 0x1))) { + break; + } + } + if (on) { + if (reg & I40E_QTX_ENA_QENA_STAT_MASK) + return I40E_SUCCESS; /* already on, skip next steps */ + + I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0); + reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) + return I40E_SUCCESS; /* already off, skip next steps */ + reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + /* Write the register */ + I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg); + /* Check the result */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx)); + if (on) { + if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) && + (reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + } else { + if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) && + !(reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + } + } + /* Check if it is timeout */ + if (j >= I40E_CHK_Q_ENA_COUNT) { + PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]", + (on ? "enable" : "disable"), q_idx); + return I40E_ERR_TIMEOUT; + } + + return I40E_SUCCESS; +} + +int +i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) +{ + uint32_t reg; + uint16_t j; + + /* Wait until the request is finished */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx)); + if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^ + ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1)) + break; + } + + if (on) { + if (reg & I40E_QRX_ENA_QENA_STAT_MASK) + return I40E_SUCCESS; /* Already on, skip next steps */ + reg |= I40E_QRX_ENA_QENA_REQ_MASK; + } else { + if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) + return I40E_SUCCESS; /* Already off, skip next steps */ + reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + } + + /* Write the register */ + I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg); + /* Check the result */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx)); + if (on) { + if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) && + (reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + } else { + if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) && + !(reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + } + } + + /* Check if it is timeout */ + if (j >= I40E_CHK_Q_ENA_COUNT) { + PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", + (on ? "enable" : "disable"), q_idx); + return I40E_ERR_TIMEOUT; + } + + return I40E_SUCCESS; +} + +/* Initialize VSI for TX */ +static int +i40e_dev_tx_init(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + uint16_t i; + uint32_t ret = I40E_SUCCESS; + struct i40e_tx_queue *txq; + + for (i = 0; i < data->nb_tx_queues; i++) { + txq = data->tx_queues[i]; + if (!txq || !txq->q_set) + continue; + ret = i40e_tx_queue_init(txq); + if (ret != I40E_SUCCESS) + break; + } + if (ret == I40E_SUCCESS) + i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); + + return ret; +} + +/* Initialize VSI for RX */ +static int +i40e_dev_rx_init(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + int ret = I40E_SUCCESS; + uint16_t i; + struct i40e_rx_queue *rxq; + + i40e_pf_config_mq_rx(pf); + for (i = 0; i < data->nb_rx_queues; i++) { + rxq = data->rx_queues[i]; + if (!rxq || !rxq->q_set) + continue; + + ret = i40e_rx_queue_init(rxq); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + break; + } + } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); + + return ret; +} + +static int +i40e_dev_rxtx_init(struct i40e_pf *pf) +{ + int err; + + err = i40e_dev_tx_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do TX initialization"); + return err; + } + err = i40e_dev_rx_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do RX initialization"); + return err; + } + + return err; +} + +static int +i40e_vmdq_setup(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int i, err, conf_vsis, j, loop; + struct i40e_vsi *vsi; + struct i40e_vmdq_info *vmdq_info; + struct rte_eth_vmdq_rx_conf *vmdq_conf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + /* + * Disable interrupt to avoid message from VF. Furthermore, it will + * avoid race condition in VSI creation/destroy. + */ + i40e_pf_disable_irq0(hw); + + if ((pf->flags & I40E_FLAG_VMDQ) == 0) { + PMD_INIT_LOG(ERR, "FW doesn't support VMDQ"); + return -ENOTSUP; + } + + conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools; + if (conf_vsis > pf->max_nb_vmdq_vsi) { + PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u", + conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools, + pf->max_nb_vmdq_vsi); + return -ENOTSUP; + } + + if (pf->vmdq != NULL) { + PMD_INIT_LOG(INFO, "VMDQ already configured"); + return 0; + } + + pf->vmdq = rte_zmalloc("vmdq_info_struct", + sizeof(*vmdq_info) * conf_vsis, 0); + + if (pf->vmdq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory"); + return -ENOMEM; + } + + vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf; + + /* Create VMDQ VSI */ + for (i = 0; i < conf_vsis; i++) { + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi, + vmdq_conf->enable_loop_back); + if (vsi == NULL) { + PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI"); + err = -1; + goto err_vsi_setup; + } + vmdq_info = &pf->vmdq[i]; + vmdq_info->pf = pf; + vmdq_info->vsi = vsi; + } + pf->nb_cfg_vmdq_vsi = conf_vsis; + + /* Configure Vlan */ + loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT; + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) { + if (vmdq_conf->pool_map[i].pools & (1UL << j)) { + PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u", + vmdq_conf->pool_map[i].vlan_id, j); + + err = i40e_vsi_add_vlan(pf->vmdq[j].vsi, + vmdq_conf->pool_map[i].vlan_id); + if (err) { + PMD_INIT_LOG(ERR, "Failed to add vlan"); + err = -1; + goto err_vsi_setup; + } + } + } + } + + i40e_pf_enable_irq0(hw); + + return 0; + +err_vsi_setup: + for (i = 0; i < conf_vsis; i++) + if (pf->vmdq[i].vsi == NULL) + break; + else + i40e_vsi_release(pf->vmdq[i].vsi); + + rte_free(pf->vmdq); + pf->vmdq = NULL; + i40e_pf_enable_irq0(hw); + return err; +} + +static void +i40e_stat_update_32(struct i40e_hw *hw, + uint32_t reg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)I40E_READ_REG(hw, reg); + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = (uint64_t)(new_data - *offset); + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static void +i40e_stat_update_48(struct i40e_hw *hw, + uint32_t hireg, + uint32_t loreg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)I40E_READ_REG(hw, loreg); + new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & + I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = new_data - *offset; + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +/* Disable IRQ0 */ +void +i40e_pf_disable_irq0(struct i40e_hw *hw) +{ + /* Disable all interrupt types */ + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + I40E_WRITE_FLUSH(hw); +} + +/* Enable IRQ0 */ +void +i40e_pf_enable_irq0(struct i40e_hw *hw) +{ + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + I40E_WRITE_FLUSH(hw); +} + +static void +i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue) +{ + /* read pending request and disable first */ + i40e_pf_disable_irq0(hw); + I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK); + I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0, + I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK); + + if (no_queue) + /* Link no queues with irq0 */ + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); +} + +static void +i40e_dev_handle_vfr_event(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int i; + uint16_t abs_vf_id; + uint32_t index, offset, val; + + if (!pf->vfs) + return; + /** + * Try to find which VF trigger a reset, use absolute VF id to access + * since the reg is global register. + */ + for (i = 0; i < pf->vf_num; i++) { + abs_vf_id = hw->func_caps.vf_base_id + i; + index = abs_vf_id / I40E_UINT32_BIT_SIZE; + offset = abs_vf_id % I40E_UINT32_BIT_SIZE; + val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index)); + /* VFR event occurred */ + if (val & (0x1 << offset)) { + int ret; + + /* Clear the event first */ + I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index), + (0x1 << offset)); + PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id); + /** + * Only notify a VF reset event occurred, + * don't trigger another SW reset + */ + ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to do VF reset"); + } + } +} + +static void +i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int i; + + for (i = 0; i < pf->vf_num; i++) + i40e_notify_vf_link_status(dev, &pf->vfs[i]); +} + +static void +i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_arq_event_info info; + uint16_t pending, opcode; + int ret; + + info.buf_len = I40E_AQ_BUF_SZ; + info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0); + if (!info.msg_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate mem"); + return; + } + + pending = 1; + while (pending) { + ret = i40e_clean_arq_element(hw, &info, &pending); + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, + "Failed to read msg from AdminQ, aq_err: %u", + hw->aq.asq_last_status); + break; + } + opcode = rte_le_to_cpu_16(info.desc.opcode); + + switch (opcode) { + case i40e_aqc_opc_send_msg_to_pf: + /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/ + i40e_pf_host_handle_vf_msg(dev, + rte_le_to_cpu_16(info.desc.retval), + rte_le_to_cpu_32(info.desc.cookie_high), + rte_le_to_cpu_32(info.desc.cookie_low), + info.msg_buf, + info.msg_len); + break; + case i40e_aqc_opc_get_link_status: + ret = i40e_dev_link_update(dev, 0); + if (!ret) + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); + break; + default: + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", + opcode); + break; + } + } + rte_free(info.msg_buf); +} + +static void +i40e_handle_mdd_event(struct rte_eth_dev *dev) +{ +#define I40E_MDD_CLEAR32 0xFFFFFFFF +#define I40E_MDD_CLEAR16 0xFFFF + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool mdd_detected = false; + struct i40e_pf_vf *vf; + uint32_t reg; + int i; + + /* find what triggered the MDD event */ + reg = I40E_READ_REG(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " + "queue %d PF number 0x%02x VF number 0x%02x device %s\n", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + reg = I40E_READ_REG(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " + "queue %d of function 0x%02x device %s\n", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32); + mdd_detected = true; + } + + if (mdd_detected) { + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, + I40E_MDD_CLEAR16); + PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->vf_num && mdd_detected; i++) { + vf = &pf->vfs[i]; + reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + + reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i), + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-" + PRIu64 "times\n", + i, vf->num_mdd_events); + } + } +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +i40e_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) { + PMD_DRV_LOG(INFO, "No interrupt event"); + goto done; + } + if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { + PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) + PMD_DRV_LOG(INFO, "ICR0: global reset requested"); + if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + PMD_DRV_LOG(INFO, "ICR0: PCI exception activated"); + if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK) + PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state"); + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: HMC error"); + if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); + i40e_dev_handle_vfr_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + PMD_DRV_LOG(INFO, "ICR0: adminq event"); + i40e_dev_handle_aq_msg(dev); + } + +done: + /* Enable interrupt */ + i40e_pf_enable_irq0(hw); +} + +static void +i40e_dev_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) + goto done; + if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { + PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + i40e_handle_mdd_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) + PMD_DRV_LOG(INFO, "ICR0: global reset requested"); + if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + PMD_DRV_LOG(INFO, "ICR0: PCI exception activated"); + if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK) + PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state"); + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: HMC error"); + if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); + i40e_dev_handle_vfr_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + PMD_DRV_LOG(INFO, "ICR0: adminq event"); + i40e_dev_handle_aq_msg(dev); + } + +done: + /* Enable interrupt */ + i40e_pf_enable_irq0(hw); + rte_eal_alarm_set(I40E_ALARM_INTERVAL, + i40e_dev_alarm_handler, dev); +} + +int +i40e_add_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total) +{ + int ele_num, ele_buff_size; + int num, actual_num, i; + uint16_t flags; + int ret = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_macvlan_element_data *req_list; + + if (filter == NULL || total == 0) + return I40E_ERR_PARAM; + ele_num = hw->aq.asq_buf_size / sizeof(*req_list); + ele_buff_size = hw->aq.asq_buf_size; + + req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0); + if (req_list == NULL) { + PMD_DRV_LOG(ERR, "Fail to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + num = 0; + do { + actual_num = (num + ele_num > total) ? (total - num) : ele_num; + memset(req_list, 0, ele_buff_size); + + for (i = 0; i < actual_num; i++) { + rte_memcpy(req_list[i].mac_addr, + &filter[num + i].macaddr, ETH_ADDR_LEN); + req_list[i].vlan_tag = + rte_cpu_to_le_16(filter[num + i].vlan_id); + + switch (filter[num + i].filter_type) { + case RTE_MAC_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + break; + case RTE_MACVLAN_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + break; + case RTE_MAC_HASH_MATCH: + flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + break; + case RTE_MACVLAN_HASH_MATCH: + flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; + break; + default: + PMD_DRV_LOG(ERR, "Invalid MAC match type"); + ret = I40E_ERR_PARAM; + goto DONE; + } + + req_list[i].queue_number = 0; + + req_list[i].flags = rte_cpu_to_le_16(flags); + } + + ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list, + actual_num, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add macvlan filter"); + goto DONE; + } + num += actual_num; + } while (num < total); + +DONE: + rte_free(req_list); + return ret; +} + +int +i40e_remove_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total) +{ + int ele_num, ele_buff_size; + int num, actual_num, i; + uint16_t flags; + int ret = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_remove_macvlan_element_data *req_list; + + if (filter == NULL || total == 0) + return I40E_ERR_PARAM; + + ele_num = hw->aq.asq_buf_size / sizeof(*req_list); + ele_buff_size = hw->aq.asq_buf_size; + + req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0); + if (req_list == NULL) { + PMD_DRV_LOG(ERR, "Fail to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + num = 0; + do { + actual_num = (num + ele_num > total) ? (total - num) : ele_num; + memset(req_list, 0, ele_buff_size); + + for (i = 0; i < actual_num; i++) { + rte_memcpy(req_list[i].mac_addr, + &filter[num + i].macaddr, ETH_ADDR_LEN); + req_list[i].vlan_tag = + rte_cpu_to_le_16(filter[num + i].vlan_id); + + switch (filter[num + i].filter_type) { + case RTE_MAC_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + break; + case RTE_MACVLAN_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + break; + case RTE_MAC_HASH_MATCH: + flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + break; + case RTE_MACVLAN_HASH_MATCH: + flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; + break; + default: + PMD_DRV_LOG(ERR, "Invalid MAC filter type"); + ret = I40E_ERR_PARAM; + goto DONE; + } + req_list[i].flags = rte_cpu_to_le_16(flags); + } + + ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list, + actual_num, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to remove macvlan filter"); + goto DONE; + } + num += actual_num; + } while (num < total); + +DONE: + rte_free(req_list); + return ret; +} + +/* Find out specific MAC filter */ +static struct i40e_mac_filter * +i40e_find_mac_filter(struct i40e_vsi *vsi, + struct rte_ether_addr *macaddr) +{ + struct i40e_mac_filter *f; + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) + return f; + } + + return NULL; +} + +static bool +i40e_find_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id) +{ + uint32_t vid_idx, vid_bit; + + if (vlan_id > ETH_VLAN_ID_MAX) + return 0; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); + + if (vsi->vfta[vid_idx] & vid_bit) + return 1; + else + return 0; +} + +static void +i40e_store_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + uint32_t vid_idx, vid_bit; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); + + if (on) + vsi->vfta[vid_idx] |= vid_bit; + else + vsi->vfta[vid_idx] &= ~vid_bit; +} + +void +i40e_set_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + i40e_store_vlan_filter(vsi, vlan_id, on); + + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) + return; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + + if (on) { + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); + } else { + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, + "Failed to remove vlan filter"); + } +} + +/** + * Find all vlan options for specific mac addr, + * return with actual vlan found. + */ +int +i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, struct rte_ether_addr *addr) +{ + int i; + uint32_t j, k; + + /** + * Not to use i40e_find_vlan_filter to decrease the loop time, + * although the code looks complex. + */ + if (num < vsi->vlan_num) + return I40E_ERR_PARAM; + + i = 0; + for (j = 0; j < I40E_VFTA_SIZE; j++) { + if (vsi->vfta[j]) { + for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { + if (vsi->vfta[j] & (1 << k)) { + if (i > num - 1) { + PMD_DRV_LOG(ERR, + "vlan number doesn't match"); + return I40E_ERR_PARAM; + } + rte_memcpy(&mv_f[i].macaddr, + addr, ETH_ADDR_LEN); + mv_f[i].vlan_id = + j * I40E_UINT32_BIT_SIZE + k; + i++; + } + } + } + } + return I40E_SUCCESS; +} + +static inline int +i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, + uint16_t vlan) +{ + int i = 0; + struct i40e_mac_filter *f; + + if (num < vsi->mac_num) + return I40E_ERR_PARAM; + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (i > num - 1) { + PMD_DRV_LOG(ERR, "buffer number not match"); + return I40E_ERR_PARAM; + } + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + ETH_ADDR_LEN); + mv_f[i].vlan_id = vlan; + mv_f[i].filter_type = f->mac_info.filter_type; + i++; + } + + return I40E_SUCCESS; +} + +static int +i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) +{ + int i, j, num; + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int ret = I40E_SUCCESS; + + if (vsi == NULL || vsi->mac_num == 0) + return I40E_ERR_PARAM; + + /* Case that no vlan is set */ + if (vsi->vlan_num == 0) + num = vsi->mac_num; + else + num = vsi->mac_num * vsi->vlan_num; + + mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + if (vsi->vlan_num == 0) { + TAILQ_FOREACH(f, &vsi->mac_list, next) { + rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, ETH_ADDR_LEN); + mv_f[i].filter_type = f->mac_info.filter_type; + mv_f[i].vlan_id = 0; + i++; + } + } else { + TAILQ_FOREACH(f, &vsi->mac_list, next) { + ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i], + vsi->vlan_num, &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) + goto DONE; + for (j = i; j < i + vsi->vlan_num; j++) + mv_f[j].filter_type = f->mac_info.filter_type; + i += vsi->vlan_num; + } + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, num); +DONE: + rte_free(mv_f); + + return ret; +} + +int +i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan) +{ + struct i40e_macvlan_filter *mv_f; + int mac_num; + int ret = I40E_SUCCESS; + + if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID) + return I40E_ERR_PARAM; + + /* If it's already set, just return */ + if (i40e_find_vlan_filter(vsi,vlan)) + return I40E_SUCCESS; + + mac_num = vsi->mac_num; + + if (mac_num == 0) { + PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr"); + return I40E_ERR_PARAM; + } + + mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0); + + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan); + + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num); + + if (ret != I40E_SUCCESS) + goto DONE; + + i40e_set_vlan_filter(vsi, vlan, 1); + + vsi->vlan_num++; + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +int +i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan) +{ + struct i40e_macvlan_filter *mv_f; + int mac_num; + int ret = I40E_SUCCESS; + + /** + * Vlan 0 is the generic filter for untagged packets + * and can't be removed. + */ + if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID) + return I40E_ERR_PARAM; + + /* If can't find it, just return */ + if (!i40e_find_vlan_filter(vsi, vlan)) + return I40E_ERR_PARAM; + + mac_num = vsi->mac_num; + + if (mac_num == 0) { + PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr"); + return I40E_ERR_PARAM; + } + + mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0); + + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan); + + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num); + + if (ret != I40E_SUCCESS) + goto DONE; + + /* This is last vlan to remove, replace all mac filter with vlan 0 */ + if (vsi->vlan_num == 1) { + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0); + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num); + if (ret != I40E_SUCCESS) + goto DONE; + } + + i40e_set_vlan_filter(vsi, vlan, 0); + + vsi->vlan_num--; + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +int +i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num = 0; + int ret = I40E_SUCCESS; + + /* If it's add and we've config it, return */ + f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr); + if (f != NULL) + return I40E_SUCCESS; + if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) || + (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) { + + /** + * If vlan_num is 0, that's the first time to add mac, + * set mask for vlan_id 0. + */ + if (vsi->vlan_num == 0) { + i40e_set_vlan_filter(vsi, 0, 1); + vsi->vlan_num = 1; + } + vlan_num = vsi->vlan_num; + } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) || + (mac_filter->filter_type == RTE_MAC_HASH_MATCH)) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = mac_filter->filter_type; + rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + ETH_ADDR_LEN); + } + + if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &mac_filter->mac_addr); + if (ret != I40E_SUCCESS) + goto DONE; + } + + ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) + goto DONE; + + /* Add the mac addr into mac list */ + f = rte_zmalloc("macv_filter", sizeof(*f), 0); + if (f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + ret = I40E_ERR_NO_MEMORY; + goto DONE; + } + rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + ETH_ADDR_LEN); + f->mac_info.filter_type = mac_filter->filter_type; + TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); + vsi->mac_num++; + + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + + return ret; +} + +int +i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num; + enum rte_mac_filter_type filter_type; + int ret = I40E_SUCCESS; + + /* Can't find it, return an error */ + f = i40e_find_mac_filter(vsi, addr); + if (f == NULL) + return I40E_ERR_PARAM; + + vlan_num = vsi->vlan_num; + filter_type = f->mac_info.filter_type; + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + if (vlan_num == 0) { + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); + return I40E_ERR_PARAM; + } + } else if (filter_type == RTE_MAC_PERFECT_MATCH || + filter_type == RTE_MAC_HASH_MATCH) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = filter_type; + rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr); + if (ret != I40E_SUCCESS) + goto DONE; + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) + goto DONE; + + /* Remove the mac addr into mac list */ + TAILQ_REMOVE(&vsi->mac_list, f, next); + rte_free(f); + vsi->mac_num--; + + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +/* Configure hash enable flags for RSS */ +uint64_t +i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags) +{ + uint64_t hena = 0; + int i; + + if (!flags) + return hena; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & (1ULL << i)) + hena |= adapter->pctypes_tbl[i]; + } + + return hena; +} + +/* Parse the hash enable flags */ +uint64_t +i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags) +{ + uint64_t rss_hf = 0; + + if (!flags) + return rss_hf; + int i; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) { + if (flags & adapter->pctypes_tbl[i]) + rss_hf |= (1ULL << i); + } + return rss_hf; +} + +/* Disable RSS */ +static void +i40e_pf_disable_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + I40E_WRITE_FLUSH(hw); +} + +int +i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? + I40E_VFQF_HKEY_MAX_INDEX : + I40E_PFQF_HKEY_MAX_INDEX; + int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (key_idx + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + struct i40e_aqc_get_set_rss_key_data *key_dw = + (struct i40e_aqc_get_set_rss_key_data *)key; + + ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); + } else { + uint32_t *hash_key = (uint32_t *)key; + uint16_t i; + + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HKEY1(i, vsi->user_param), + hash_key[i]); + + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), + hash_key[i]); + } + I40E_WRITE_FLUSH(hw); + } + + return ret; +} + +static int +i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; + int ret; + + if (!key || !key_len) + return 0; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, + (struct i40e_aqc_get_set_rss_key_data *)key); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ"); + return ret; + } + } else { + uint32_t *key_dw = (uint32_t *)key; + uint16_t i; + + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_VFQF_HKEY1(i, vsi->user_param); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_PFQF_HKEY(i); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + } + return 0; +} + +static int +i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t hena; + int ret; + + ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key, + rss_conf->rss_key_len); + if (ret) + return ret; + + hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40e_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask; + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + + if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -EINVAL; + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -EINVAL; + + return i40e_hw_rss_hash_set(pf, rss_conf); +} + +static int +i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t hena; + int ret; + + if (!rss_conf) + return -EINVAL; + + ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, + &rss_conf->rss_key_len); + if (ret) + return ret; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena); + + return 0; +} + +static int +i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) +{ + switch (filter_type) { + case RTE_TUNNEL_FILTER_IMAC_IVLAN: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + break; + case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + break; + case RTE_TUNNEL_FILTER_IMAC_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID; + break; + case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC; + break; + case ETH_TUNNEL_FILTER_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + break; + case ETH_TUNNEL_FILTER_OIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; + break; + case ETH_TUNNEL_FILTER_IIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; + break; + default: + PMD_DRV_LOG(ERR, "invalid tunnel filter type"); + return -EINVAL; + } + + return 0; +} + +/* Convert tunnel filter structure */ +static int +i40e_tunnel_filter_convert( + struct i40e_aqc_cloud_filters_element_bb *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) +{ + rte_ether_addr_copy((struct rte_ether_addr *) + &cld_filter->element.outer_mac, + (struct rte_ether_addr *)&tunnel_filter->input.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &cld_filter->element.inner_mac, + (struct rte_ether_addr *)&tunnel_filter->input.inner_mac); + tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->element.flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; + tunnel_filter->input.flags = cld_filter->element.flags; + tunnel_filter->input.tenant_id = cld_filter->element.tenant_id; + tunnel_filter->queue = cld_filter->element.queue_number; + rte_memcpy(tunnel_filter->input.general_fields, + cld_filter->general_fields, + sizeof(cld_filter->general_fields)); + + return 0; +} + +/* Check if there exists the tunnel filter */ +struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, + const struct i40e_tunnel_filter_input *input) +{ + int ret; + + ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return tunnel_rule->hash_map[ret]; +} + +/* Add a tunnel filter into the SW list */ +static int +i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_rule *rule = &pf->tunnel; + int ret; + + ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert tunnel filter to hash table %d!", + ret); + return ret; + } + rule->hash_map[ret] = tunnel_filter; + + TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules); + + return 0; +} + +/* Delete a tunnel filter from the SW list */ +int +i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter_input *input) +{ + struct i40e_tunnel_rule *rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel_filter; + int ret; + + ret = rte_hash_del_key(rule->hash_table, input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete tunnel filter to hash table %d!", + ret); + return ret; + } + tunnel_filter = rule->hash_map[ret]; + rule->hash_map[ret] = NULL; + + TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules); + rte_free(tunnel_filter); + + return 0; +} + +int +i40e_dev_tunnel_filter_set(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr, ipv4_addr_le; + uint8_t i, tun_type = 0; + /* internal varialbe to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); + + if (NULL == cld_filter) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + pfilter = cld_filter; + + rte_ether_addr_copy(&tunnel_filter->outer_mac, + (struct rte_ether_addr *)&pfilter->element.outer_mac); + rte_ether_addr_copy(&tunnel_filter->inner_mac, + (struct rte_ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &ipv4_addr_le, + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case RTE_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case RTE_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + case RTE_TUNNEL_TYPE_VXLAN_GPE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); + return -EINVAL; + } + + if (add) { + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); + } else { + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48 +#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4 +#define I40E_TR_GENEVE_KEY_MASK 0x8 +#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40 +#define I40E_TR_GRE_KEY_MASK 0x400 +#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 +#define I40E_TR_GRE_NO_KEY_MASK 0x8000 + +static enum +i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + enum i40e_status_code status = I40E_SUCCESS; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[7] = 0xF0; + filter_replace_buf.data[8] + = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK | + I40E_TR_GENEVE_KEY_MASK | + I40E_TR_GENERIC_UDP_TUNNEL_MASK; + filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK | + I40E_TR_GRE_KEY_WITH_XSUM_MASK | + I40E_TR_GRE_NO_KEY_MASK) >> 8; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +static enum +i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + enum i40e_status_code status = I40E_SUCCESS; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + /* For MPLSoUDP */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + /* For MPLSoGRE */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER | + I40E_AQC_MIRROR_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +static enum i40e_status_code +i40e_replace_gtp_l1_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + enum i40e_status_code status = I40E_SUCCESS; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + /* For GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace.tr_bit = I40E_AQC_NEW_TR_22 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace.tr_bit = I40E_AQC_NEW_TR_21 | + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0xFF; + filter_replace_buf.data[3] = 0xFF; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[6] = 0xFF; + filter_replace_buf.data[7] = 0xFF; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +static enum +i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) +{ + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + enum i40e_status_code status = I40E_SUCCESS; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + /* for GTP-C */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (status < 0) + return status; + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + /* for GTP-U */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr, ipv4_addr_le; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_cloud_filters_element_bb *cld_filter; + struct i40e_aqc_cloud_filters_element_bb *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + uint32_t teid_le; + bool big_buffer = 0; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext), + 0); + + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + pfilter = cld_filter; + + rte_ether_addr_copy(&tunnel_filter->outer_mac, + (struct rte_ether_addr *)&pfilter->element.outer_mac); + rte_ether_addr_copy(&tunnel_filter->inner_mac, + (struct rte_ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &ipv4_addr_le, + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32( + tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case I40E_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case I40E_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case I40E_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + case I40E_TUNNEL_TYPE_MPLSoUDP: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x40; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP; + break; + case I40E_TUNNEL_TYPE_MPLSoGRE: + if (!pf->mpls_replace_flag) { + i40e_replace_mpls_l1_filter(pf); + i40e_replace_mpls_cloud_filter(pf); + pf->mpls_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + teid_le >> 4; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + (teid_le & 0xF) << 12; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + 0x0; + big_buffer = 1; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE; + break; + case I40E_TUNNEL_TYPE_GTPC: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] = + 0x0; + big_buffer = 1; + break; + case I40E_TUNNEL_TYPE_GTPU: + if (!pf->gtp_replace_flag) { + i40e_replace_gtp_l1_filter(pf); + i40e_replace_gtp_cloud_filter(pf); + pf->gtp_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] = + (teid_le >> 16) & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] = + teid_le & 0xFFFF; + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] = + 0x0; + big_buffer = 1; + break; + case I40E_TUNNEL_TYPE_QINQ: + if (!pf->qinq_replace_flag) { + ret = i40e_cloud_filter_qinq_create(pf); + if (ret < 0) + PMD_DRV_LOG(DEBUG, + "QinQ tunnel filter already created."); + pf->qinq_replace_flag = 1; + } + /* Add in the General fields the values of + * the Outer and Inner VLAN + * Big Buffer should be set, see changes in + * i40e_aq_add_cloud_filters + */ + pfilter->general_fields[0] = tunnel_filter->inner_vlan; + pfilter->general_fields[1] = tunnel_filter->outer_vlan; + big_buffer = 1; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU) + pfilter->element.flags = + I40E_AQC_ADD_CLOUD_FILTER_0X12; + else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_0X10; + else { + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (!tunnel_filter->is_to_vf) + vsi = pf->main_vsi; + else { + if (tunnel_filter->vf_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid argument."); + rte_free(cld_filter); + return -EINVAL; + } + vf = &pf->vfs[tunnel_filter->vf_id]; + vsi = vf->vsi; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + check_filter.is_to_vf = tunnel_filter->is_to_vf; + check_filter.vf_id = tunnel_filter->vf_id; + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); + return -EINVAL; + } + + if (add) { + if (big_buffer) + ret = i40e_aq_add_cloud_filters_bb(hw, + vsi->seid, cld_filter, 1); + else + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); + return -ENOTSUP; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); + } else { + if (big_buffer) + ret = i40e_aq_rem_cloud_filters_bb( + hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); + return -ENOTSUP; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + +static int +i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) +{ + uint8_t i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return -1; +} + +static int +i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type) +{ + int idx, ret; + uint8_t filter_idx = 0; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx >= 0) { + PMD_DRV_LOG(ERR, "Port %d already offloaded", port); + return -EINVAL; + } + + /* Now check if there is space to add the new port */ + idx = i40e_get_vxlan_port_idx(pf, 0); + if (idx < 0) { + PMD_DRV_LOG(ERR, + "Maximum number of UDP ports reached, not adding port %d", + port); + return -ENOSPC; + } + + ret = i40e_aq_add_udp_tunnel(hw, port, udp_type, + &filter_idx, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d", + port, filter_idx); + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[idx] = port; + pf->vxlan_bitmap |= (1 << idx); + + if (!(pf->flags & I40E_FLAG_VXLAN)) + pf->flags |= I40E_FLAG_VXLAN; + + return 0; +} + +static int +i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) { + PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured."); + return -EINVAL; + } + + idx = i40e_get_vxlan_port_idx(pf, port); + + if (idx < 0) { + PMD_DRV_LOG(ERR, "Port %d doesn't exist", port); + return -EINVAL; + } + + if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) { + PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d", + port, idx); + + pf->vxlan_ports[idx] = 0; + pf->vxlan_bitmap &= ~(1 << idx); + + if (!pf->vxlan_bitmap) + pf->flags &= ~I40E_FLAG_VXLAN; + + return 0; +} + +/* Add UDP tunneling port */ +static int +i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, + I40E_AQC_TUNNEL_TYPE_VXLAN); + break; + case RTE_TUNNEL_TYPE_VXLAN_GPE: + ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, + I40E_AQC_TUNNEL_TYPE_VXLAN_GPE); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -1; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -1; + break; + } + + return ret; +} + +/* Remove UDP tunneling port */ +static int +i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + case RTE_TUNNEL_TYPE_VXLAN_GPE: + ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -1; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -1; + break; + } + + return ret; +} + +/* Calculate the maximum number of contiguous PF queues that are configured */ +static int +i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + int i, num; + struct i40e_rx_queue *rxq; + + num = 0; + for (i = 0; i < pf->lan_nb_qps; i++) { + rxq = data->rx_queues[i]; + if (rxq && rxq->q_set) + num++; + else + break; + } + + return num; +} + +/* Configure RSS */ +static int +i40e_pf_config_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_rss_conf rss_conf; + uint32_t i, lut = 0; + uint16_t j, num; + + /* + * If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, I40E_MAX_Q_PER_TC); + PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_INIT_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); + return -ENOTSUP; + } + + if (pf->adapter->rss_reta_updated == 0) { + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (j & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), + rte_bswap32(lut)); + } + } + + rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { + i40e_pf_disable_rss(pf); + return 0; + } + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + return i40e_hw_rss_hash_set(pf, &rss_conf); +} + +static int +i40e_tunnel_filter_param_check(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *filter) +{ + if (pf == NULL || filter == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + if (filter->queue_id >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + + if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid inner VLAN ID"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && + (rte_is_zero_ether_addr(&filter->outer_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && + (rte_is_zero_ether_addr(&filter->inner_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); + return -EINVAL; + } + + return 0; +} + +#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000 +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) +static int +i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) +{ + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + uint32_t val, reg; + int ret = -EINVAL; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported"); + return -ENOTSUP; + } + + val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); + + if (len == 3) { + reg = val | I40E_GL_PRS_FVBM_MSK_ENA; + } else if (len == 4) { + reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA; + } else { + PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len); + return ret; + } + + if (reg != val) { + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_PRS_FVBM(2), + reg, NULL); + if (ret != 0) + return ret; + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed " + "with value 0x%08x", + I40E_GL_PRS_FVBM(2), reg); + } else { + ret = 0; + } + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x", + I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); + + return ret; +} + +static int +i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg) +{ + int ret = -EINVAL; + + if (!hw || !cfg) + return -EINVAL; + + switch (cfg->cfg_type) { + case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN: + ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len); + break; + default: + PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type); + break; + } + + return ret; +} + +static int +i40e_filter_ctrl_global_config(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = I40E_ERR_PARAM; + + switch (filter_op) { + case RTE_ETH_FILTER_SET: + ret = i40e_dev_global_config_set(hw, + (struct rte_eth_global_cfg *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + break; + } + + return ret; +} + +static int +i40e_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct rte_eth_tunnel_filter_conf *filter; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = I40E_SUCCESS; + + filter = (struct rte_eth_tunnel_filter_conf *)(arg); + + if (i40e_tunnel_filter_param_check(pf, filter) < 0) + return I40E_ERR_PARAM; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + if (!(pf->flags & I40E_FLAG_VXLAN)) + ret = I40E_NOT_SUPPORTED; + break; + case RTE_ETH_FILTER_ADD: + ret = i40e_dev_tunnel_filter_set(pf, filter, 1); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_dev_tunnel_filter_set(pf, filter, 0); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = I40E_ERR_PARAM; + break; + } + + return ret; +} + +static int +i40e_pf_config_mq_rx(struct i40e_pf *pf) +{ + int ret = 0; + enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; + + /* RSS setup */ + if (mq_mode & ETH_MQ_RX_RSS_FLAG) + ret = i40e_pf_config_rss(pf); + else + i40e_pf_disable_rss(pf); + + return ret; +} + +/* Get the symmetric hash enable configurations per port */ +static void +i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) +{ + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); + + *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; +} + +/* Set the symmetric hash enable configurations per port */ +static void +i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) +{ + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); + + if (enable > 0) { + if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { + PMD_DRV_LOG(INFO, + "Symmetric hash has already been enabled"); + return; + } + reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } else { + if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { + PMD_DRV_LOG(INFO, + "Symmetric hash has already been disabled"); + return; + } + reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } + i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); + I40E_WRITE_FLUSH(hw); +} + +/* + * Get global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note that global configuration means it affects all + * the ports on the same NIC. + */ +static int +i40e_get_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + uint32_t reg; + uint16_t i, j; + + memset(g_cfg, 0, sizeof(*g_cfg)); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (reg & I40E_GLQF_CTL_HTOEP_MASK) + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + else + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + PMD_DRV_LOG(DEBUG, "Hash function is %s", + (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); + + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be checked. + */ + for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + g_cfg->valid_bit_mask[i] = 0ULL; + g_cfg->sym_hash_enable_mask[i] = 0ULL; + } + + g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (!adapter->pctypes_tbl[i]) + continue; + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { + g_cfg->sym_hash_enable_mask[0] |= + (1ULL << i); + } + } + } + } + + return 0; +} + +static int +i40e_hash_global_config_check(const struct i40e_adapter *adapter, + const struct rte_eth_hash_global_conf *g_cfg) +{ + uint32_t i; + uint64_t mask0, i40e_mask = adapter->flow_types_mask; + + if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) { + PMD_DRV_LOG(ERR, "Unsupported hash function type %d", + g_cfg->hash_func); + return -EINVAL; + } + + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be checked. + */ + mask0 = g_cfg->valid_bit_mask[0]; + for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + if (i == 0) { + /* Check if any unsupported flow type configured */ + if ((mask0 | i40e_mask) ^ i40e_mask) + goto mask_err; + } else { + if (g_cfg->valid_bit_mask[i]) + goto mask_err; + } + } + + return 0; + +mask_err: + PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured"); + + return -EINVAL; +} + +/* + * Set global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note any modifying global configuration will affect + * all the ports on the same NIC. + */ +static int +i40e_set_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + int ret; + uint16_t i, j; + uint32_t reg; + uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash global configuration is not supported."); + return -ENOTSUP; + } + + /* Check the input parameters */ + ret = i40e_hash_global_config_check(adapter, g_cfg); + if (ret < 0) + return ret; + + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be configured. + */ + for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) { + reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? + I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + reg); + } + } + } + + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + /* Toeplitz */ + if (reg & I40E_GLQF_CTL_HTOEP_MASK) { + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); + goto out; + } + reg |= I40E_GLQF_CTL_HTOEP_MASK; + } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Simple XOR */ + if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { + PMD_DRV_LOG(DEBUG, + "Hash function already set to Simple XOR"); + goto out; + } + reg &= ~I40E_GLQF_CTL_HTOEP_MASK; + } else + /* Use the default, and keep it as it is */ + goto out; + + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + +out: + I40E_WRITE_FLUSH(hw); + + return 0; +} + +/** + * Valid input sets for hash and flow director filters per PCTYPE + */ +static uint64_t +i40e_get_valid_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter) +{ + uint64_t valid; + + static const uint64_t valid_hash_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC | + I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC | + I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE | + I40E_INSET_FLEX_PAYLOAD, + }; + + /** + * Flow director supports only fields defined in + * union rte_eth_fdir_flow. + */ + static const uint64_t valid_fdir_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_LAST_ETHER_TYPE, + }; + + if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) + return 0; + if (filter == RTE_ETH_FILTER_HASH) + valid = valid_hash_inset_table[pctype]; + else + valid = valid_fdir_inset_table[pctype]; + + return valid; +} + +/** + * Validate if the input set is allowed for a specific PCTYPE + */ +int +i40e_validate_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter, uint64_t inset) +{ + uint64_t valid; + + valid = i40e_get_valid_input_set(pctype, filter); + if (inset & (~valid)) + return -EINVAL; + + return 0; +} + +/* default input set fields combination per pctype */ +uint64_t +i40e_get_default_input_set(uint16_t pctype) +{ + static const uint64_t default_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_LAST_ETHER_TYPE, + }; + + if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) + return 0; + + return default_inset_table[pctype]; +} + +/** + * Parse the input set from index to logical bit masks + */ +static int +i40e_parse_input_set(uint64_t *inset, + enum i40e_filter_pctype pctype, + enum rte_eth_input_set_field *field, + uint16_t size) +{ + uint16_t i, j; + int ret = -EINVAL; + + static const struct { + enum rte_eth_input_set_field field; + uint64_t inset; + } inset_convert_table[] = { + {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE}, + {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, + {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, + {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, + {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, + {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, + {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, + {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, + {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, + {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, + {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, + {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, + {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, + {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, + {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER, + I40E_INSET_IPV6_NEXT_HDR}, + {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS, + I40E_INSET_IPV6_HOP_LIMIT}, + {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG, + I40E_INSET_SCTP_VT}, + {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, + I40E_INSET_TUNNEL_DMAC}, + {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN, + I40E_INSET_VLAN_TUNNEL}, + {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY, + I40E_INSET_TUNNEL_ID}, + {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, + I40E_INSET_FLEX_PAYLOAD_W1}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, + I40E_INSET_FLEX_PAYLOAD_W2}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, + I40E_INSET_FLEX_PAYLOAD_W3}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W4}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W5}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W6}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W7}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W8}, + }; + + if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX) + return ret; + + /* Only one item allowed for default or all */ + if (size == 1) { + if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) { + *inset = i40e_get_default_input_set(pctype); + return 0; + } else if (field[0] == RTE_ETH_INPUT_SET_NONE) { + *inset = I40E_INSET_NONE; + return 0; + } + } + + for (i = 0, *inset = 0; i < size; i++) { + for (j = 0; j < RTE_DIM(inset_convert_table); j++) { + if (field[i] == inset_convert_table[j].field) { + *inset |= inset_convert_table[j].inset; + break; + } + } + + /* It contains unsupported input set, return immediately */ + if (j == RTE_DIM(inset_convert_table)) + return ret; + } + + return 0; +} + +/** + * Translate the input set from bit masks to register aware bit masks + * and vice versa + */ +uint64_t +i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input) +{ + uint64_t val = 0; + uint16_t i; + + struct inset_map { + uint64_t inset; + uint64_t inset_reg; + }; + + static const struct inset_map inset_map_common[] = { + {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC}, + {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC}, + {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN}, + {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN}, + {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE}, + {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS}, + {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6}, + {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6}, + {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC}, + {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR}, + {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT}, + {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT}, + {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT}, + {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG}, + {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID}, + {I40E_INSET_TUNNEL_DMAC, + I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC}, + {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4}, + {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6}, + {I40E_INSET_TUNNEL_SRC_PORT, + I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT}, + {I40E_INSET_TUNNEL_DST_PORT, + I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT}, + {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN}, + {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1}, + {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2}, + {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3}, + {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4}, + {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5}, + {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6}, + {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7}, + {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8}, + }; + + /* some different registers map in x722*/ + static const struct inset_map inset_map_diff_x722[] = { + {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4}, + {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4}, + {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO}, + {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL}, + }; + + static const struct inset_map inset_map_diff_not_x722[] = { + {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4}, + {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4}, + {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO}, + {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL}, + }; + + if (input == 0) + return val; + + /* Translate input set to register aware inset */ + if (type == I40E_MAC_X722) { + for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) { + if (input & inset_map_diff_x722[i].inset) + val |= inset_map_diff_x722[i].inset_reg; + } + } else { + for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) { + if (input & inset_map_diff_not_x722[i].inset) + val |= inset_map_diff_not_x722[i].inset_reg; + } + } + + for (i = 0; i < RTE_DIM(inset_map_common); i++) { + if (input & inset_map_common[i].inset) + val |= inset_map_common[i].inset_reg; + } + + return val; +} + +int +i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) +{ + uint8_t i, idx = 0; + uint64_t inset_need_mask = inset; + + static const struct { + uint64_t inset; + uint32_t mask; + } inset_mask_map[] = { + {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK}, + {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0}, + {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK}, + {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK}, + {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK}, + {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0}, + {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK}, + {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK}, + }; + + if (!inset || !mask || !nb_elem) + return 0; + + for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + /* Clear the inset bit, if no MASK is required, + * for example proto + ttl + */ + if ((inset & inset_mask_map[i].inset) == + inset_mask_map[i].inset && inset_mask_map[i].mask == 0) + inset_need_mask &= ~inset_mask_map[i].inset; + if (!inset_need_mask) + return 0; + } + for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + if ((inset_need_mask & inset_mask_map[i].inset) == + inset_mask_map[i].inset) { + if (idx >= nb_elem) { + PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks"); + return -EINVAL; + } + mask[idx] = inset_mask_map[i].mask; + idx++; + } + } + + return idx; +} + +void +i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) +{ + uint32_t reg = i40e_read_rx_ctl(hw, addr); + + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); + if (reg != val) + i40e_write_rx_ctl(hw, addr, val); + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, + (uint32_t)i40e_read_rx_ctl(hw, addr)); +} + +void +i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) +{ + uint32_t reg = i40e_read_rx_ctl(hw, addr); + struct rte_eth_dev *dev; + + dev = ((struct i40e_adapter *)hw->back)->eth_dev; + if (reg != val) { + i40e_write_rx_ctl(hw, addr, val); + PMD_DRV_LOG(WARNING, + "i40e device %s changed global register [0x%08x]." + " original: 0x%08x, new: 0x%08x", + dev->device->name, addr, reg, + (uint32_t)i40e_read_rx_ctl(hw, addr)); + } +} + +static void +i40e_filter_input_set_init(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int num, i; + uint16_t flow_type; + + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { + flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; + + input_set = i40e_get_default_input_set(pctype); + + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); + return; + } + inset_reg = i40e_translate_input_set_reg(hw->mac.type, + input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + if (!pf->support_multi_driver) { + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + } + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + 0); + } + } else { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); + } + I40E_WRITE_FLUSH(hw); + + /* store the default input set */ + if (!pf->support_multi_driver) + pf->hash_input_set[pctype] = input_set; + pf->fdir.input_set[pctype] = input_set; + } +} + +int +i40e_hash_filter_inset_select(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf) +{ + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; + + if (!conf) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); + return -EINVAL; + } + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash input set setting is not supported."); + return -ENOTSUP; + } + + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); + return -EINVAL; + } + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, + I40E_GLQF_FD_PCTYPES((int)pctype)); + } + + ret = i40e_parse_input_set(&input_set, pctype, conf->field, + conf->inset_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parse input set"); + return -EINVAL; + } + + if (conf->op == RTE_ETH_INPUT_SET_ADD) { + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + input_set |= pf->hash_input_set[pctype]; + } + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + + inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); + + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + I40E_WRITE_FLUSH(hw); + + pf->hash_input_set[pctype] = input_set; + return 0; +} + +int +i40e_fdir_filter_inset_select(struct i40e_pf *pf, + struct rte_eth_input_set_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; + + if (!hw || !conf) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); + return -EINVAL; + } + + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); + + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); + return -EINVAL; + } + + ret = i40e_parse_input_set(&input_set, pctype, conf->field, + conf->inset_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parse input set"); + return -EINVAL; + } + + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + + /* Can not change the inset reg for flex payload for fdir, + * it is done by writing I40E_PRTQF_FD_FLXINSET + * in i40e_set_flex_mask_on_pctype. + */ + if (conf->op == RTE_ETH_INPUT_SET_SELECT) + inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; + else + input_set |= pf->fdir.input_set[pctype]; + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + return -ENOTSUP; + } + + inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + if (!pf->support_multi_driver) { + for (i = 0; i < num; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + } else { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + } + I40E_WRITE_FLUSH(hw); + + pf->fdir.input_set[pctype] = input_set; + return 0; +} + +static int +i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_get_symmetric_hash_enable_per_port(hw, + &(info->info.enable)); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_get_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_set_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + ret = i40e_hash_filter_inset_select(hw, + &(info->info.input_set_conf)); + break; + + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Operations for hash function */ +static int +i40e_hash_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + break; + case RTE_ETH_FILTER_GET: + ret = i40e_hash_filter_get(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + case RTE_ETH_FILTER_SET: + ret = i40e_hash_filter_set(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", + filter_op); + ret = -ENOTSUP; + break; + } + + return ret; +} + +/* Convert ethertype filter structure */ +static int +i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, + struct i40e_ethertype_filter *filter) +{ + rte_memcpy(&filter->input.mac_addr, &input->mac_addr, + RTE_ETHER_ADDR_LEN); + filter->input.ether_type = input->ether_type; + filter->flags = input->flags; + filter->queue = input->queue; + + return 0; +} + +/* Check if there exists the ehtertype filter */ +struct i40e_ethertype_filter * +i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, + const struct i40e_ethertype_filter_input *input) +{ + int ret; + + ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return ethertype_rule->hash_map[ret]; +} + +/* Add ethertype filter in SW list */ +static int +i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter) +{ + struct i40e_ethertype_rule *rule = &pf->ethertype; + int ret; + + ret = rte_hash_add_key(rule->hash_table, &filter->input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert ethertype filter" + " to hash table %d!", + ret); + return ret; + } + rule->hash_map[ret] = filter; + + TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules); + + return 0; +} + +/* Delete ethertype filter in SW list */ +int +i40e_sw_ethertype_filter_del(struct i40e_pf *pf, + struct i40e_ethertype_filter_input *input) +{ + struct i40e_ethertype_rule *rule = &pf->ethertype; + struct i40e_ethertype_filter *filter; + int ret; + + ret = rte_hash_del_key(rule->hash_table, input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete ethertype filter" + " to hash table %d!", + ret); + return ret; + } + filter = rule->hash_map[ret]; + rule->hash_map[ret] = NULL; + + TAILQ_REMOVE(&rule->ethertype_list, filter, rules); + rte_free(filter); + + return 0; +} + +/* + * Configure ethertype filter, which can director packet by filtering + * with mac address and ether_type or only ether_type + */ +int +i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + struct i40e_ethertype_filter *ethertype_filter, *node; + struct i40e_ethertype_filter check_filter; + struct i40e_control_filter_stats stats; + uint16_t flags = 0; + int ret; + + if (filter->queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, + "unsupported ether_type(0x%04x) in control packet filter.", + filter->ether_type); + return -EINVAL; + } + if (filter->ether_type == RTE_ETHER_TYPE_VLAN) + PMD_DRV_LOG(WARNING, + "filter vlan ether_type in first tag is not supported."); + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_ethertype_filter_convert(filter, &check_filter); + node = i40e_sw_ethertype_filter_lookup(ethertype_rule, + &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!"); + return -EINVAL; + } + + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + ret = i40e_aq_add_rem_control_packet_filter(hw, + filter->mac_addr.addr_bytes, + filter->ether_type, flags, + pf->main_vsi->seid, + filter->queue, add, &stats, NULL); + + PMD_DRV_LOG(INFO, + "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); + if (ret < 0) + return -ENOSYS; + + /* Add or delete a filter in SW list */ + if (add) { + ethertype_filter = rte_zmalloc("ethertype_filter", + sizeof(*ethertype_filter), 0); + if (ethertype_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + + rte_memcpy(ethertype_filter, &check_filter, + sizeof(check_filter)); + ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter); + if (ret < 0) + rte_free(ethertype_filter); + } else { + ret = i40e_sw_ethertype_filter_del(pf, &node->input); + } + + return ret; +} + +/* + * Handle operations for ethertype filter. + */ +static int +i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -ENOSYS; + break; + } + return ret; +} + +static int +i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (dev == NULL) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NONE: + /* For global configuration */ + ret = i40e_filter_ctrl_global_config(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_HASH: + ret = i40e_hash_filter_ctrl(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_MACVLAN: + ret = i40e_mac_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = i40e_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_TUNNEL: + ret = i40e_tunnel_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = i40e_fdir_ctrl_func(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &i40e_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * Check and enable Extended Tag. + * Enabling Extended Tag is important for 40G performance. + */ +static void +i40e_enable_extended_tag(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint32_t buf = 0; + int ret; + + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), + PCI_DEV_CAP_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CAP_REG); + return; + } + if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) { + PMD_DRV_LOG(ERR, "Does not support Extended Tag"); + return; + } + + buf = 0; + ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } + if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) { + PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled"); + return; + } + buf |= PCI_DEV_CTRL_EXT_TAG_MASK; + ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } +} + +/* + * As some registers wouldn't be reset unless a global hardware reset, + * hardware initialization is needed to put those registers into an + * expected initial state. + */ +static void +i40e_hw_init(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_enable_extended_tag(dev); + + /* clear the PF Queue Filter control register */ + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0); + + /* Disable symmetric hash per port */ + i40e_set_symmetric_hash_enable_per_port(hw, 0); +} + +/* + * For X722 it is possible to have multiple pctypes mapped to the same flowtype + * however this function will return only one highest pctype index, + * which is not quite correct. This is known problem of i40e driver + * and needs to be fixed later. + */ +enum i40e_filter_pctype +i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type) +{ + int i; + uint64_t pctype_mask; + + if (flow_type < I40E_FLOW_TYPE_MAX) { + pctype_mask = adapter->pctypes_tbl[flow_type]; + for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) { + if (pctype_mask & (1ULL << i)) + return (enum i40e_filter_pctype)i; + } + } + return I40E_FILTER_PCTYPE_INVALID; +} + +uint16_t +i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, + enum i40e_filter_pctype pctype) +{ + uint16_t flowtype; + uint64_t pctype_mask = 1ULL << pctype; + + for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX; + flowtype++) { + if (adapter->pctypes_tbl[flowtype] & pctype_mask) + return flowtype; + } + + return RTE_ETH_FLOW_UNKNOWN; +} + +/* + * On X710, performance number is far from the expectation on recent firmware + * versions; on XL710, performance number is also far from the expectation on + * recent firmware versions, if promiscuous mode is disabled, or promiscuous + * mode is enabled and port MAC address is equal to the packet destination MAC + * address. The fix for this issue may not be integrated in the following + * firmware version. So the workaround in software driver is needed. It needs + * to modify the initial values of 3 internal only registers for both X710 and + * XL710. Note that the values for X710 or XL710 could be different, and the + * workaround can be removed when it is fixed in firmware in the future. + */ + +/* For both X710 and XL710 */ +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 + +#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 +#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 + +/* For X722 */ +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200 +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200 + +/* For X710 */ +#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 +/* For XL710 */ +#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606 +#define I40E_GL_SWR_PM_UP_THR 0x269FBC + +/* + * GL_SWR_PM_UP_THR: + * The value is not impacted from the link speed, its value is set according + * to the total number of ports for a better pipe-monitor configuration. + */ +static bool +i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value) +{ +#define I40E_GL_SWR_PM_EF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE + +#define I40E_GL_SWR_PM_SF_DEVICE(dev) \ + .device_id = (dev), \ + .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE + + static const struct { + uint16_t device_id; + uint32_t val; + } swr_pm_table[] = { + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) }, + { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) }, + + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) }, + { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) }, + }; + uint32_t i; + + if (value == NULL) { + PMD_DRV_LOG(ERR, "value is NULL"); + return false; + } + + for (i = 0; i < RTE_DIM(swr_pm_table); i++) { + if (hw->device_id == swr_pm_table[i].device_id) { + *value = swr_pm_table[i].val; + + PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR " + "value - 0x%08x", + hw->device_id, *value); + return true; + } + } + + return false; +} + +static int +i40e_dev_sync_phy_type(struct i40e_hw *hw) +{ + enum i40e_status_code status; + struct i40e_aq_get_phy_abilities_resp phy_ab; + int ret = -ENOTSUP; + int retries = 0; + + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + + while (status) { + PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d", + status); + retries++; + rte_delay_us(100000); + if (retries < 5) + status = i40e_aq_get_phy_capabilities(hw, false, + true, &phy_ab, NULL); + else + return ret; + } + return 0; +} + +static void +i40e_configure_registers(struct i40e_hw *hw) +{ + static struct { + uint32_t addr; + uint64_t val; + } reg_table[] = { + {I40E_GL_SWR_PRI_JOIN_MAP_0, 0}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, 0}, + {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ + }; + uint64_t reg; + uint32_t i; + int ret; + + for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; + else /* For X710/XL710/XXV710 */ + if (hw->aq.fw_maj_ver < 6) + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1; + else + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2; + } + + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE; + } + + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { + uint32_t cfg_val; + + if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) { + PMD_DRV_LOG(DEBUG, "Device 0x%x skips " + "GL_SWR_PM_UP_THR value fixup", + hw->device_id); + continue; + } + + reg_table[i].val = cfg_val; + } + + ret = i40e_aq_debug_read_register(hw, reg_table[i].addr, + ®, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32, + reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64, + reg_table[i].addr, reg); + if (reg == reg_table[i].val) + continue; + + ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, + reg_table[i].val, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32, + reg_table[i].val, reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " + "0x%"PRIx32, reg_table[i].val, reg_table[i].addr); + } +} + +#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) +#define I40E_VSI_TSR_QINQ_CONFIG 0xc030 +#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) +#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab +static int +i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) +{ + uint32_t reg; + int ret; + + if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { + PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); + return -EINVAL; + } + + /* Configure for double VLAN RX stripping */ + reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); + if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) { + reg |= I40E_VSI_TSR_QINQ_CONFIG; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_TSR(vsi->vsi_id), + reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; + } + } + + /* Configure for double VLAN TX insertion */ + reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id)); + if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) { + reg = I40E_VSI_L2TAGSTXVALID_QINQ; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_L2TAGSTXVALID( + vsi->vsi_id), reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to update VSI_L2TAGSTXVALID[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; + } + } + + return 0; +} + +/** + * i40e_aq_add_mirror_rule + * @hw: pointer to the hardware structure + * @seid: VEB seid to add mirror rule to + * @dst_id: destination vsi seid + * @entries: Buffer which contains the entities to be mirrored + * @count: number of entities contained in the buffer + * @rule_id:the rule_id of the rule to be added + * + * Add a mirror rule for a given veb. + * + **/ +static enum i40e_status_code +i40e_aq_add_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t dst_id, + uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t *rule_id) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule cmd; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *) + &desc.params.raw; + uint16_t buff_len; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_mirror_rule); + memset(&cmd, 0, sizeof(cmd)); + + buff_len = sizeof(uint16_t) * count; + desc.datalen = rte_cpu_to_le_16(buff_len); + if (buff_len > 0) + desc.flags |= rte_cpu_to_le_16( + (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd.rule_type = rte_cpu_to_le_16(rule_type << + I40E_AQC_MIRROR_RULE_TYPE_SHIFT); + cmd.num_entries = rte_cpu_to_le_16(count); + cmd.seid = rte_cpu_to_le_16(seid); + cmd.destination = rte_cpu_to_le_16(dst_id); + + rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); + status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); + PMD_DRV_LOG(INFO, + "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", + hw->aq.asq_last_status, resp->rule_id, + resp->mirror_rules_used, resp->mirror_rules_free); + *rule_id = rte_le_to_cpu_16(resp->rule_id); + + return status; +} + +/** + * i40e_aq_del_mirror_rule + * @hw: pointer to the hardware structure + * @seid: VEB seid to add mirror rule to + * @entries: Buffer which contains the entities to be mirrored + * @count: number of entities contained in the buffer + * @rule_id:the rule_id of the rule to be delete + * + * Delete a mirror rule for a given veb. + * + **/ +static enum i40e_status_code +i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t rule_id) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule cmd; + uint16_t buff_len = 0; + enum i40e_status_code status; + void *buff = NULL; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_delete_mirror_rule); + memset(&cmd, 0, sizeof(cmd)); + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + cmd.num_entries = count; + buff_len = sizeof(uint16_t) * count; + desc.datalen = rte_cpu_to_le_16(buff_len); + buff = (void *)entries; + } else + /* rule id is filled in destination field for deleting mirror rule */ + cmd.destination = rte_cpu_to_le_16(rule_id); + + cmd.rule_type = rte_cpu_to_le_16(rule_type << + I40E_AQC_MIRROR_RULE_TYPE_SHIFT); + cmd.seid = rte_cpu_to_le_16(seid); + + rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); + status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL); + + return status; +} + +/** + * i40e_mirror_rule_set + * @dev: pointer to the hardware structure + * @mirror_conf: mirror rule info + * @sw_id: mirror rule's sw_id + * @on: enable/disable + * + * set a mirror rule. + * + **/ +static int +i40e_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t sw_id, uint8_t on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_rule *it, *mirr_rule = NULL; + struct i40e_mirror_rule *parent = NULL; + uint16_t seid, dst_seid, rule_id; + uint16_t i, j = 0; + int ret; + + PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); + + if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); + return -ENOSYS; + } + if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { + PMD_DRV_LOG(ERR, "mirror table is full."); + return -ENOSPC; + } + if (mirror_conf->dst_pool > pf->vf_num) { + PMD_DRV_LOG(ERR, "invalid destination pool %u.", + mirror_conf->dst_pool); + return -EINVAL; + } + + seid = pf->main_vsi->veb->seid; + + TAILQ_FOREACH(it, &pf->mirror_list, rules) { + if (sw_id <= it->index) { + mirr_rule = it; + break; + } + parent = it; + } + if (mirr_rule && sw_id == mirr_rule->index) { + if (on) { + PMD_DRV_LOG(ERR, "mirror rule exists."); + return -EEXIST; + } else { + ret = i40e_aq_del_mirror_rule(hw, seid, + mirr_rule->rule_type, + mirr_rule->entries, + mirr_rule->num_entries, mirr_rule->id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); + rte_free(mirr_rule); + pf->nb_mirror_rule--; + return 0; + } + } else if (!on) { + PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); + return -ENOENT; + } + + mirr_rule = rte_zmalloc("i40e_mirror_rule", + sizeof(struct i40e_mirror_rule) , 0); + if (!mirr_rule) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + switch (mirror_conf->rule_type) { + case ETH_MIRROR_VLAN: + for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + mirr_rule->entries[j] = + mirror_conf->vlan.vlan_id[i]; + j++; + } + } + if (j == 0) { + PMD_DRV_LOG(ERR, "vlan is not specified."); + rte_free(mirr_rule); + return -EINVAL; + } + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + break; + case ETH_MIRROR_VIRTUAL_POOL_UP: + case ETH_MIRROR_VIRTUAL_POOL_DOWN: + /* check if the specified pool bit is out of range */ + if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) { + PMD_DRV_LOG(ERR, "pool mask is out of range."); + rte_free(mirr_rule); + return -EINVAL; + } + for (i = 0, j = 0; i < pf->vf_num; i++) { + if (mirror_conf->pool_mask & (1ULL << i)) { + mirr_rule->entries[j] = pf->vfs[i].vsi->seid; + j++; + } + } + if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) { + /* add pf vsi to entries */ + mirr_rule->entries[j] = pf->main_vsi_seid; + j++; + } + if (j == 0) { + PMD_DRV_LOG(ERR, "pool is not specified."); + rte_free(mirr_rule); + return -EINVAL; + } + /* egress and ingress in aq commands means from switch but not port */ + mirr_rule->rule_type = + (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ? + I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS : + I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + break; + case ETH_MIRROR_UPLINK_PORT: + /* egress and ingress in aq commands means from switch but not port*/ + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + break; + case ETH_MIRROR_DOWNLINK_PORT: + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + break; + default: + PMD_DRV_LOG(ERR, "unsupported mirror type %d.", + mirror_conf->rule_type); + rte_free(mirr_rule); + return -EINVAL; + } + + /* If the dst_pool is equal to vf_num, consider it as PF */ + if (mirror_conf->dst_pool == pf->vf_num) + dst_seid = pf->main_vsi_seid; + else + dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid; + + ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid, + mirr_rule->rule_type, mirr_rule->entries, + j, &rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + rte_free(mirr_rule); + return -ENOSYS; + } + + mirr_rule->index = sw_id; + mirr_rule->num_entries = j; + mirr_rule->id = rule_id; + mirr_rule->dst_vsi_seid = dst_seid; + + if (parent) + TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules); + else + TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules); + + pf->nb_mirror_rule++; + return 0; +} + +/** + * i40e_mirror_rule_reset + * @dev: pointer to the device + * @sw_id: mirror rule's sw_id + * + * reset a mirror rule. + * + **/ +static int +i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_rule *it, *mirr_rule = NULL; + uint16_t seid; + int ret; + + PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id); + + seid = pf->main_vsi->veb->seid; + + TAILQ_FOREACH(it, &pf->mirror_list, rules) { + if (sw_id == it->index) { + mirr_rule = it; + break; + } + } + if (mirr_rule) { + ret = i40e_aq_del_mirror_rule(hw, seid, + mirr_rule->rule_type, + mirr_rule->entries, + mirr_rule->num_entries, mirr_rule->id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: status = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); + rte_free(mirr_rule); + pf->nb_mirror_rule--; + } else { + PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); + return -ENOENT; + } + return 0; +} + +static uint64_t +i40e_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systim_cycles; + + systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L); + systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H) + << 32; + + return systim_cycles; +} + +static uint64_t +i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp; + + rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index)); + rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index)) + << 32; + + return rx_tstamp; +} + +static uint64_t +i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp; + + tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L); + tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H) + << 32; + + return tx_tstamp; +} + +static void +i40e_start_timecounters(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = dev->data->dev_private; + struct rte_eth_link link; + uint32_t tsync_inc_l; + uint32_t tsync_inc_h; + + /* Get current link speed. */ + i40e_dev_link_update(dev, 1); + rte_eth_linkstatus_get(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_40G: + case ETH_SPEED_NUM_25G: + tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32; + break; + case ETH_SPEED_NUM_10G: + tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32; + break; + case ETH_SPEED_NUM_1G: + tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32; + break; + default: + tsync_inc_l = 0x0; + tsync_inc_h = 0x0; + } + + /* Set the timesync increment value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h); + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = 0; + adapter->systime_tc.nsec_mask = 0; + + adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = 0; + adapter->rx_tstamp_tc.nsec_mask = 0; + + adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = 0; + adapter->tx_tstamp_tc.nsec_mask = 0; +} + +static int +i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct i40e_adapter *adapter = dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct i40e_adapter *adapter = dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct i40e_adapter *adapter = dev->data->dev_private; + + systime_cycles = i40e_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +i40e_timesync_enable(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl_l; + uint32_t tsync_ctl_h; + + /* Stop the timesync system time. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); + /* Reset the timesync system time value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0); + + i40e_start_timecounters(dev); + + /* Clear timesync registers. */ + I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); + I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable timestamping of PTP packets. */ + tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0); + tsync_ctl_l |= I40E_PRTTSYN_TSYNENA; + + tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1); + tsync_ctl_h |= I40E_PRTTSYN_TSYNENA; + tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE; + + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h); + + return 0; +} + +static int +i40e_timesync_disable(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl_l; + uint32_t tsync_ctl_h; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0); + tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA; + + tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1); + tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA; + + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h); + + /* Reset the timesync increment value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); + + return 0; +} + +static int +i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, uint32_t flags) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = dev->data->dev_private; + uint32_t sync_status; + uint32_t index = flags & 0x03; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1); + if ((sync_status & (1 << index)) == 0) + return -EINVAL; + + rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = dev->data->dev_private; + uint32_t sync_status; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); + if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0) + return -EINVAL; + + tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +/* + * i40e_parse_dcb_configure - parse dcb configure from user + * @dev: the device being configured + * @dcb_cfg: pointer of the result of parse + * @*tc_map: bit map of enabled traffic classes + * + * Returns 0 on success, negative value on failure + */ +static int +i40e_parse_dcb_configure(struct rte_eth_dev *dev, + struct i40e_dcbx_config *dcb_cfg, + uint8_t *tc_map) +{ + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + uint8_t i, tc_bw, bw_lf; + + memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config)); + + dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) { + PMD_INIT_LOG(ERR, "number of tc exceeds max."); + return -EINVAL; + } + + /* assume each tc has the same bw */ + tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs; + for (i = 0; i < dcb_rx_conf->nb_tcs; i++) + dcb_cfg->etscfg.tcbwtable[i] = tc_bw; + /* to ensure the sum of tcbw is equal to 100 */ + bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs; + for (i = 0; i < bw_lf; i++) + dcb_cfg->etscfg.tcbwtable[i]++; + + /* assume each tc has the same Transmission Selection Algorithm */ + for (i = 0; i < dcb_rx_conf->nb_tcs; i++) + dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + dcb_cfg->etscfg.prioritytable[i] = + dcb_rx_conf->dcb_tc[i]; + + /* FW needs one App to configure HW */ + dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM; + dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO; + dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + if (dcb_rx_conf->nb_tcs == 0) + *tc_map = 1; /* tc0 only */ + else + *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t); + + if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + dcb_cfg->pfc.willing = 0; + dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + dcb_cfg->pfc.pfcenable = *tc_map; + } + return 0; +} + + +static enum i40e_status_code +i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, + struct i40e_aqc_vsi_properties_data *info, + uint8_t enabled_tcmap) +{ + enum i40e_status_code ret; + int i, total_tc = 0; + uint16_t qpnum_per_tc, bsf, qp_idx; + struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t used_queues; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tcmap & (1 << i)) + total_tc++; + } + if (total_tc == 0) + total_tc = 1; + vsi->enabled_tc = enabled_tcmap; + + /* different VSI has different queues assigned */ + if (vsi->type == I40E_VSI_MAIN) + used_queues = dev_data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else if (vsi->type == I40E_VSI_VMDQ2) + used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else { + PMD_INIT_LOG(ERR, "unsupported VSI type."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + + qpnum_per_tc = used_queues / total_tc; + /* Number of queues per enabled TC */ + if (qpnum_per_tc == 0) { + PMD_INIT_LOG(ERR, " number of queues is less that tcs."); + return I40E_ERR_INVALID_QP_ID; + } + qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc), + I40E_MAX_Q_PER_TC); + bsf = rte_bsf32(qpnum_per_tc); + + /** + * Configure TC and queue mapping parameters, for enabled TC, + * allocate qpnum_per_tc queues to this traffic. For disabled TC, + * default queue will serve it. + */ + qp_idx = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & (1 << i)) { + info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx << + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + qp_idx += qpnum_per_tc; + } else + info->tc_mapping[i] = 0; + } + + /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */ + if (vsi->type == I40E_VSI_SRIOV) { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->nb_qps; i++) + info->queue_mapping[i] = + rte_cpu_to_le_16(vsi->base_queue + i); + } else { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + } + info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + return I40E_SUCCESS; +} + +/* + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map + * @veb: VEB to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_query; + struct i40e_aqc_query_switching_comp_ets_config_resp ets_query; + struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + uint32_t bw_max; + + /* Check if enabled_tc is same as existing or new TCs */ + if (veb->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&veb_bw, 0, sizeof(veb_bw)); + veb_bw.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + veb_bw.tc_bw_share_credits[i] = 1; + } + ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, + &veb_bw, NULL); + if (ret) { + PMD_INIT_LOG(ERR, + "AQ command Config switch_comp BW allocation per TC failed = %d", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_query, 0, sizeof(ets_query)); + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to get switch_comp ETS configuration %u", + hw->aq.asq_last_status); + return ret; + } + memset(&bw_query, 0, sizeof(bw_query)); + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to get switch_comp bandwidth configuration %u", + hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit); + veb->bw_info.bw_max = ets_query.tc_bw_max; + PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) | + (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_info.bw_ets_share_credits[i] = + bw_query.tc_bw_share_credits[i]; + veb->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(bw_query.tc_bw_limits[i]); + /* 4 bits per TC, 4th bit is reserved */ + veb->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i, + veb->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i, + veb->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i, + veb->bw_info.bw_ets_max[i]); + } + + veb->enabled_tc = tc_map; + + return ret; +} + + +/* + * i40e_vsi_config_tc - Configure VSI tc setting for given TC map + * @vsi: VSI to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + struct i40e_vsi_context ctxt; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + + /* Check if enabled_tc is same as existing or new TCs */ + if (vsi->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&bw_data, 0, sizeof(bw_data)); + bw_data.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + bw_data.tc_bw_credits[i] = 1; + } + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL); + if (ret) { + PMD_INIT_LOG(ERR, + "AQ command Config VSI BW allocation per TC failed = %d", + hw->aq.asq_last_status); + goto out; + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + vsi->info.qs_handle[i] = bw_data.qs_handles[i]; + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + i40e_get_cap(hw); + ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map); + if (ret) + goto out; + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d", + hw->aq.asq_last_status); + goto out; + } + /* update the local VSI info with updated queue map */ + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + /* query and update current VSI BW information */ + ret = i40e_vsi_get_bw_config(vsi); + if (ret) { + PMD_INIT_LOG(ERR, + "Failed updating vsi bw info, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + goto out; + } + + vsi->enabled_tc = tc_map; + +out: + return ret; +} + +/* + * i40e_dcb_hw_configure - program the dcb setting to hw + * @pf: pf the configuration is taken on + * @new_cfg: new configuration + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_dcb_hw_configure(struct i40e_pf *pf, + struct i40e_dcbx_config *new_cfg, + uint8_t tc_map) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_vsi_list *vsi_list; + enum i40e_status_code ret; + int i; + uint32_t val; + + /* Use the FW API if FW > v4.4*/ + if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || + (hw->aq.fw_maj_ver >= 5))) { + PMD_INIT_LOG(ERR, + "FW < v4.4, can not use FW LLDP API to configure DCB"); + return I40E_ERR_FIRMWARE_API_VERSION; + } + + /* Check if need reconfiguration */ + if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) { + PMD_INIT_LOG(ERR, "No Change in DCB Config required."); + return I40E_SUCCESS; + } + + /* Copy the new config to the current config */ + *old_cfg = *new_cfg; + old_cfg->etsrec = old_cfg->etscfg; + ret = i40e_set_dcb_config(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* set receive Arbiter to RR mode and ETS scheme by default */ + for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) { + val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i)); + val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | + I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | + I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); + val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] << + I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) & + I40E_PRTDCB_RETSTCC_BWSHARE_MASK; + val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) & + I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK; + val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) & + I40E_PRTDCB_RETSTCC_ETSTC_MASK; + I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val); + } + /* get local mib to check whether it is configured correctly */ + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + + /* if Veb is created, need to update TC of it at first */ + if (main_vsi->veb) { + ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VEB seid=%d", + main_vsi->veb->seid); + } + /* Update each VSI */ + i40e_vsi_config_tc(main_vsi, tc_map); + if (main_vsi->veb) { + TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) { + /* Beside main VSI and VMDQ VSIs, only enable default + * TC for other VSIs + */ + if (vsi_list->vsi->type == I40E_VSI_VMDQ2) + ret = i40e_vsi_config_tc(vsi_list->vsi, + tc_map); + else + ret = i40e_vsi_config_tc(vsi_list->vsi, + I40E_DEFAULT_TCMAP); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VSI seid=%d", + vsi_list->vsi->seid); + /* continue */ + } + } + return I40E_SUCCESS; +} + +/* + * i40e_dcb_init_configure - initial dcb config + * @dev: device being configured + * @sw_dcb: indicate whether dcb is sw configured or hw offload + * + * Returns 0 on success, negative value on failure + */ +int +i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i, ret = 0; + + if ((pf->flags & I40E_FLAG_DCB) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support DCB"); + return -ENOTSUP; + } + + /* DCB initialization: + * Update DCB configuration from the Firmware and configure + * LLDP MIB change event. + */ + if (sw_dcb == TRUE) { + /* Stopping lldp is necessary for DPDK, but it will cause + * DCB init failed. For i40e_init_dcb(), the prerequisite + * for successful initialization of DCB is that LLDP is + * enabled. So it is needed to start lldp before DCB init + * and stop it after initialization. + */ + ret = i40e_aq_start_lldp(hw, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to start lldp"); + + ret = i40e_init_dcb(hw, true); + /* If lldp agent is stopped, the return value from + * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM + * adminq status. Otherwise, it should return success. + */ + if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS && + hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) { + memset(&hw->local_dcbx_config, 0, + sizeof(struct i40e_dcbx_config)); + /* set dcb default configuration */ + hw->local_dcbx_config.etscfg.willing = 0; + hw->local_dcbx_config.etscfg.maxtcs = 0; + hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; + hw->local_dcbx_config.etscfg.tsatable[0] = + I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; + hw->local_dcbx_config.etsrec = + hw->local_dcbx_config.etscfg; + hw->local_dcbx_config.pfc.willing = 0; + hw->local_dcbx_config.pfc.pfccap = + I40E_MAX_TRAFFIC_CLASS; + /* FW needs one App to configure HW */ + hw->local_dcbx_config.numapps = 1; + hw->local_dcbx_config.app[0].selector = + I40E_APP_SEL_ETHTYPE; + hw->local_dcbx_config.app[0].priority = 3; + hw->local_dcbx_config.app[0].protocolid = + I40E_APP_PROTOID_FCOE; + ret = i40e_set_dcb_config(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "default dcb config fails. err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + } else { + PMD_INIT_LOG(ERR, + "DCB initialization in FW fails, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOTSUP; + } + + if (i40e_need_stop_lldp(dev)) { + ret = i40e_aq_stop_lldp(hw, true, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); + } + } else { + ret = i40e_aq_start_lldp(hw, true, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to start lldp"); + + ret = i40e_init_dcb(hw, true); + if (!ret) { + if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + PMD_INIT_LOG(ERR, + "HW doesn't support DCBX offload."); + return -ENOTSUP; + } + } else { + PMD_INIT_LOG(ERR, + "DCBX configuration failed, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOTSUP; + } + } + return 0; +} + +/* + * i40e_dcb_setup - setup dcb related config + * @dev: device being configured + * + * Returns 0 on success, negative value on failure + */ +static int +i40e_dcb_setup(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_dcbx_config dcb_cfg; + uint8_t tc_map = 0; + int ret = 0; + + if ((pf->flags & I40E_FLAG_DCB) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support DCB"); + return -ENOTSUP; + } + + if (pf->vf_num != 0) + PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis."); + + ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map); + if (ret) { + PMD_INIT_LOG(ERR, "invalid dcb config"); + return -EINVAL; + } + ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map); + if (ret) { + PMD_INIT_LOG(ERR, "dcb sw configure fails"); + return -ENOSYS; + } + + return 0; +} + +static int +i40e_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config; + uint16_t bsf, tc_mapping; + int i, j = 0; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); + else + dcb_info->nb_tcs = 1; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i]; + for (i = 0; i < dcb_info->nb_tcs; i++) + dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i]; + + /* get queue mapping if vmdq is disabled */ + if (!pf->nb_cfg_vmdq_vsi) { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + dcb_info->tc_queue.tc_rxq[j][i].base = + (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; + } + return 0; + } + + /* get queue mapping if vmdq is enabled */ + do { + vsi = pf->vmdq[j].vsi; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + dcb_info->tc_queue.tc_rxq[j][i].base = + (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; + } + j++; + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); + return 0; +} + +static int +i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); + + I40E_WRITE_FLUSH(hw); + rte_intr_ack(&pci_dev->intr_handle); + + return 0; +} + +static int +i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +/** + * This function is used to check if the register is valid. + * Below is the valid registers list for X722 only: + * 0x2b800--0x2bb00 + * 0x38700--0x38a00 + * 0x3d800--0x3db00 + * 0x208e00--0x209000 + * 0x20be00--0x20c000 + * 0x263c00--0x264000 + * 0x265c00--0x266000 + */ +static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset) +{ + if ((type != I40E_MAC_X722) && + ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) || + (reg_offset >= 0x38700 && reg_offset <= 0x38a00) || + (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) || + (reg_offset >= 0x208e00 && reg_offset <= 0x209000) || + (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) || + (reg_offset >= 0x263c00 && reg_offset <= 0x264000) || + (reg_offset >= 0x265c00 && reg_offset <= 0x266000))) + return 0; + else + return 1; +} + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *ptr_data = regs->data; + uint32_t reg_idx, arr_idx, arr_idx2, reg_offset; + const struct i40e_reg_info *reg_info; + + if (ptr_data == NULL) { + regs->length = I40E_GLGEN_STAT_CLEAR + 4; + regs->width = sizeof(uint32_t); + return 0; + } + + /* The first few registers have to be read using AQ operations */ + reg_idx = 0; + while (i40e_regs_adminq[reg_idx].name) { + reg_info = &i40e_regs_adminq[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + reg_offset += reg_info->base_addr; + ptr_data[reg_offset >> 2] = + i40e_read_rx_ctl(hw, reg_offset); + } + } + + /* The remaining registers can be read using primitives */ + reg_idx = 0; + while (i40e_regs_others[reg_idx].name) { + reg_info = &i40e_regs_others[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + reg_offset += reg_info->base_addr; + if (!i40e_valid_regs(hw->mac.type, reg_offset)) + ptr_data[reg_offset >> 2] = 0; + else + ptr_data[reg_offset >> 2] = + I40E_READ_REG(hw, reg_offset); + } + } + + return 0; +} + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Convert word count to byte count */ + return hw->nvm.sr_size << 1; +} + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t *data = eeprom->data; + uint16_t offset, length, cnt_words; + int ret_code; + + offset = eeprom->offset >> 1; + length = eeprom->length >> 1; + cnt_words = length; + + if (offset > hw->nvm.sr_size || + offset + length > hw->nvm.sr_size) { + PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range."); + return -EINVAL; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data); + if (ret_code != I40E_SUCCESS || cnt_words != length) { + PMD_DRV_LOG(ERR, "EEPROM read failed."); + return -EIO; + } + + return 0; +} + +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t sff8472_comp = 0; + uint32_t sff8472_swap = 0; + uint32_t sff8636_rev = 0; + i40e_status status; + uint32_t type = 0; + + /* Check if firmware supports reading module EEPROM. */ + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + PMD_DRV_LOG(ERR, + "Module EEPROM memory read not supported. " + "Please update the NVM image.\n"); + return -EINVAL; + } + + status = i40e_update_link_info(hw); + if (status) + return -EIO; + + if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { + PMD_DRV_LOG(ERR, + "Cannot read module EEPROM memory. " + "No module connected.\n"); + return -EINVAL; + } + + type = hw->phy.link_info.module_type[0]; + + switch (type) { + case I40E_MODULE_TYPE_SFP: + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, 1, + I40E_MODULE_SFF_8472_COMP, + &sff8472_comp, NULL); + if (status) + return -EIO; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, 1, + I40E_MODULE_SFF_8472_SWAP, + &sff8472_swap, NULL); + if (status) + return -EIO; + + /* Check if the module requires address swap to access + * the other EEPROM memory page. + */ + if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { + PMD_DRV_LOG(WARNING, + "Module address swap to access " + "page 0xA2 is not supported.\n"); + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp == 0x00) { + /* Module is not SFF-8472 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP_PLUS: + /* Read from memory page 0. */ + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + 0, 1, + I40E_MODULE_REVISION_ADDR, + &sff8636_rev, NULL); + if (status) + return -EIO; + /* Determine revision compliance byte */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP28: + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + break; + default: + PMD_DRV_LOG(ERR, "Module type unrecognized\n"); + return -EINVAL; + } + return 0; +} + +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + bool is_sfp = false; + i40e_status status; + uint8_t *data; + uint32_t value = 0; + uint32_t i; + + if (!info || !info->length || !info->data) + return -EINVAL; + + if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) + is_sfp = true; + + data = info->data; + for (i = 0; i < info->length; i++) { + u32 offset = i + info->offset; + u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; + + /* Check if we need to access the other memory page */ + if (is_sfp) { + if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) { + offset -= RTE_ETH_MODULE_SFF_8079_LEN; + addr = I40E_I2C_EEPROM_DEV_ADDR2; + } + } else { + while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) { + /* Compute memory page number and offset. */ + offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2; + addr++; + } + } + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + addr, offset, 1, &value, NULL); + if (status) + return -EIO; + data[i] = (uint8_t)value; + } + return 0; +} + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_mac_filter_info mac_filter; + struct i40e_mac_filter *f; + int ret; + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); + return -EINVAL; + } + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (rte_is_same_ether_addr(&pf->dev_addr, + &f->mac_info.mac_addr)) + break; + } + + if (f == NULL) { + PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); + return -EIO; + } + + mac_filter = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete mac filter"); + return -EIO; + } + memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN); + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add mac filter"); + return -EIO; + } + memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); + + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, + mac_addr->addr_bytes, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to change mac"); + return -EIO; + } + + return 0; +} + +static int +i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = pf->dev_data; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; + int ret = 0; + + /* check if mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + if (dev_data->dev_started) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev_data->port_id); + return -EBUSY; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return ret; +} + +/* Restore ethertype filter */ +static void +i40e_ethertype_filter_restore(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_ethertype_filter_list + *ethertype_list = &pf->ethertype.ethertype_list; + struct i40e_ethertype_filter *f; + struct i40e_control_filter_stats stats; + uint16_t flags; + + TAILQ_FOREACH(f, ethertype_list, rules) { + flags = 0; + if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (f->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + i40e_aq_add_rem_control_packet_filter(hw, + f->input.mac_addr.addr_bytes, + f->input.ether_type, + flags, pf->main_vsi->seid, + f->queue, 1, &stats, NULL); + } + PMD_DRV_LOG(INFO, "Ethertype filter:" + " mac_etype_used = %u, etype_used = %u," + " mac_etype_free = %u, etype_free = %u", + stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); +} + +/* Restore tunnel filter */ +static void +i40e_tunnel_filter_restore(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_pf_vf *vf; + struct i40e_tunnel_filter_list + *tunnel_list = &pf->tunnel.tunnel_list; + struct i40e_tunnel_filter *f; + struct i40e_aqc_cloud_filters_element_bb cld_filter; + bool big_buffer = 0; + + TAILQ_FOREACH(f, tunnel_list, rules) { + if (!f->is_to_vf) + vsi = pf->main_vsi; + else { + vf = &pf->vfs[f->vf_id]; + vsi = vf->vsi; + } + memset(&cld_filter, 0, sizeof(cld_filter)); + rte_ether_addr_copy((struct rte_ether_addr *) + &f->input.outer_mac, + (struct rte_ether_addr *)&cld_filter.element.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *) + &f->input.inner_mac, + (struct rte_ether_addr *)&cld_filter.element.inner_mac); + cld_filter.element.inner_vlan = f->input.inner_vlan; + cld_filter.element.flags = f->input.flags; + cld_filter.element.tenant_id = f->input.tenant_id; + cld_filter.element.queue_number = f->queue; + rte_memcpy(cld_filter.general_fields, + f->input.general_fields, + sizeof(f->input.general_fields)); + + if (((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_0X11) == + I40E_AQC_ADD_CLOUD_FILTER_0X11) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_0X12) == + I40E_AQC_ADD_CLOUD_FILTER_0X12) || + ((f->input.flags & + I40E_AQC_ADD_CLOUD_FILTER_0X10) == + I40E_AQC_ADD_CLOUD_FILTER_0X10)) + big_buffer = 1; + + if (big_buffer) + i40e_aq_add_cloud_filters_bb(hw, + vsi->seid, &cld_filter, 1); + else + i40e_aq_add_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); + } +} + +/* Restore RSS filter */ +static inline void +i40e_rss_filter_restore(struct i40e_pf *pf) +{ + struct i40e_rss_conf_list *list = &pf->rss_config_list; + struct i40e_rss_filter *filter; + + TAILQ_FOREACH(filter, list, next) { + i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE); + } +} + +static void +i40e_filter_restore(struct i40e_pf *pf) +{ + i40e_ethertype_filter_restore(pf); + i40e_tunnel_filter_restore(pf); + i40e_fdir_filter_restore(pf); + i40e_rss_filter_restore(pf); +} + +bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool +is_i40e_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_i40e_pmd); +} + +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) +{ + int i; + + for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) { + if (pf->customized_pctype[i].index == index) + return &pf->customized_pctype[i]; + } + return NULL; +} + +static int +i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t pctype_num; + struct rte_pmd_i40e_ptype_info *pctype; + uint32_t buff_size; + struct i40e_customized_pctype *new_pctype = NULL; + uint8_t proto_id; + uint8_t pctype_value; + char name[64]; + uint32_t i, j, n; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&pctype_num, sizeof(pctype_num), + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype number"); + return -1; + } + if (!pctype_num) { + PMD_DRV_LOG(INFO, "No new pctype added"); + return -1; + } + + buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info); + pctype = rte_zmalloc("new_pctype", buff_size, 0); + if (!pctype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + /* get information about new pctype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)pctype, buff_size, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get pctype list"); + rte_free(pctype); + return -1; + } + + /* Update customized pctype. */ + for (i = 0; i < pctype_num; i++) { + pctype_value = pctype[i].ptype_id; + memset(name, 0, sizeof(name)); + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = pctype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + strlcat(name, proto[n].name, sizeof(name)); + strlcat(name, "_", sizeof(name)); + break; + } + } + name[strlen(name) - 1] = '\0'; + PMD_DRV_LOG(INFO, "name = %s\n", name); + if (!strcmp(name, "GTPC")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPC); + else if (!strcmp(name, "GTPU_IPV4")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV4); + else if (!strcmp(name, "GTPU_IPV6")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV6); + else if (!strcmp(name, "GTPU")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU); + else if (!strcmp(name, "IPV4_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (!strcmp(name, "IPV6_L2TPV3")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + else if (!strcmp(name, "IPV4_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (!strcmp(name, "IPV6_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + else if (!strcmp(name, "IPV4_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (!strcmp(name, "IPV6_UDP_ESP")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + else if (!strcmp(name, "IPV4_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV4); + else if (!strcmp(name, "IPV6_AH")) + new_pctype = + i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_AH_IPV6); + if (new_pctype) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { + new_pctype->pctype = pctype_value; + new_pctype->valid = true; + } else { + new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID; + new_pctype->valid = false; + } + } + } + + rte_free(pctype); + return 0; +} + +static int +i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) +{ + struct rte_pmd_i40e_ptype_mapping *ptype_mapping; + uint16_t port_id = dev->data->port_id; + uint32_t ptype_num; + struct rte_pmd_i40e_ptype_info *ptype; + uint32_t buff_size; + uint8_t proto_id; + char name[RTE_PMD_I40E_DDP_NAME_SIZE]; + uint32_t i, j, n; + bool in_tunnel; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + rte_pmd_i40e_ptype_mapping_reset(port_id); + return 0; + } + + /* get information about new ptype num */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&ptype_num, sizeof(ptype_num), + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype number"); + return ret; + } + if (!ptype_num) { + PMD_DRV_LOG(INFO, "No new ptype added"); + return -1; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info); + ptype = rte_zmalloc("new_ptype", buff_size, 0); + if (!ptype) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -1; + } + + /* get information about new ptype list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)ptype, buff_size, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get ptype list"); + rte_free(ptype); + return ret; + } + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping); + ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0); + if (!ptype_mapping) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + rte_free(ptype); + return -1; + } + + /* Update ptype mapping table. */ + for (i = 0; i < ptype_num; i++) { + ptype_mapping[i].hw_ptype = ptype[i].ptype_id; + ptype_mapping[i].sw_ptype = 0; + in_tunnel = false; + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = ptype[i].protocols[j]; + if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) + continue; + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id != proto_id) + continue; + memset(name, 0, sizeof(name)); + strcpy(name, proto[n].name); + PMD_DRV_LOG(INFO, "name = %s\n", name); + if (!strncasecmp(name, "PPPOE", 5)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L2_ETHER_PPPOE; + else if (!strncasecmp(name, "IPV4FRAG", 8) && + !in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV4FRAG", 8) && + in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncasecmp(name, "OIPV4", 5)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV4", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV4", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV6FRAG", 8) && + !in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV6FRAG", 8) && + in_tunnel) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_FRAG; + } else if (!strncasecmp(name, "OIPV6", 5)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV6", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV6", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "UDP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_UDP; + else if (!strncasecmp(name, "UDP", 3) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_UDP; + else if (!strncasecmp(name, "TCP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_TCP; + else if (!strncasecmp(name, "TCP", 3) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_TCP; + else if (!strncasecmp(name, "SCTP", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_SCTP; + else if (!strncasecmp(name, "SCTP", 4) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_SCTP; + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_ICMP; + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_INNER_L4_ICMP; + else if (!strncasecmp(name, "GTPC", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPC; + in_tunnel = true; + } else if (!strncasecmp(name, "GTPU", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPU; + in_tunnel = true; + } else if (!strncasecmp(name, "ESP", 3)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_ESP; + in_tunnel = true; + } else if (!strncasecmp(name, "GRENAT", 6)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GRENAT; + in_tunnel = true; + } else if (!strncasecmp(name, "L2TPV2CTL", 9) || + !strncasecmp(name, "L2TPV2", 6) || + !strncasecmp(name, "L2TPV3", 6)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_L2TP; + in_tunnel = true; + } + + break; + } + } + } + + ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping, + ptype_num, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to update ptype mapping table."); + + rte_free(ptype_mapping); + rte_free(ptype); + return ret; +} + +void +i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, enum rte_pmd_i40e_package_op op) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint32_t proto_num; + struct rte_pmd_i40e_proto_info *proto; + uint32_t buff_size; + uint32_t i; + int ret; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return; + } + + /* get information about protocol number */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&proto_num, sizeof(proto_num), + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol number"); + return; + } + if (!proto_num) { + PMD_DRV_LOG(INFO, "No new protocol added"); + return; + } + + buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info); + proto = rte_zmalloc("new_proto", buff_size, 0); + if (!proto) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return; + } + + /* get information about protocol list */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)proto, buff_size, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get protocol list"); + rte_free(proto); + return; + } + + /* Check if GTP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "GTP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->gtp_support = true; + else + pf->gtp_support = false; + break; + } + } + + /* Check if ESP is supported. */ + for (i = 0; i < proto_num; i++) { + if (!strncmp(proto[i].name, "ESP", 3)) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->esp_support = true; + else + pf->esp_support = false; + break; + } + } + + /* Update customized pctype info */ + ret = i40e_update_customized_pctype(dev, pkg, pkg_size, + proto_num, proto, op); + if (ret) + PMD_DRV_LOG(INFO, "No pctype is updated."); + + /* Update customized ptype info */ + ret = i40e_update_customized_ptype(dev, pkg, pkg_size, + proto_num, proto, op); + if (ret) + PMD_DRV_LOG(INFO, "No ptype is updated."); + + rte_free(proto); +} + +/* Create a QinQ cloud filter + * + * The Fortville NIC has limited resources for tunnel filters, + * so we can only reuse existing filters. + * + * In step 1 we define which Field Vector fields can be used for + * filter types. + * As we do not have the inner tag defined as a field, + * we have to define it first, by reusing one of L1 entries. + * + * In step 2 we are replacing one of existing filter types with + * a new one for QinQ. + * As we reusing L1 and replacing L2, some of the default filter + * types will disappear,which depends on L1 and L2 entries we reuse. + * + * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b) + * + * 1. Create L1 filter of outer vlan (12b) which will be in use + * later when we define the cloud filter. + * a. Valid_flags.replace_cloud = 0 + * b. Old_filter = 10 (Stag_Inner_Vlan) + * c. New_filter = 0x10 + * d. TR bit = 0xff (optional, not used here) + * e. Buffer – 2 entries: + * i. Byte 0 = 8 (outer vlan FV index). + * Byte 1 = 0 (rsv) + * Byte 2-3 = 0x0fff + * ii. Byte 0 = 37 (inner vlan FV index). + * Byte 1 =0 (rsv) + * Byte 2-3 = 0x0fff + * + * Step 2: + * 2. Create cloud filter using two L1 filters entries: stag and + * new filter(outer vlan+ inner vlan) + * a. Valid_flags.replace_cloud = 1 + * b. Old_filter = 1 (instead of outer IP) + * c. New_filter = 0x10 + * d. Buffer – 2 entries: + * i. Byte 0 = 0x80 | 7 (valid | Stag). + * Byte 1-3 = 0 (rsv) + * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1) + * Byte 9-11 = 0 (rsv) + */ +static int +i40e_cloud_filter_qinq_create(struct i40e_pf *pf) +{ + int ret = -ENOTSUP; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return ret; + } + + /* Init */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; + filter_replace.tr_bit = 0; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[2] = 0xff; + filter_replace_buf.data[3] = 0x0f; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + /* Field Vector 12b mask */ + filter_replace_buf.data[6] = 0xff; + filter_replace_buf.data[7] = 0x0f; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (ret != I40E_SUCCESS) + return ret; + + if (filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + /* Apply the second L2 cloud filter */ + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L2 filter, input for L2 filter will be L1 filter */ + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10; + + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!ret && (filter_replace.old_filter_type != + filter_replace.new_filter_type)) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return ret; +} + +int +i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + if (!in->key && in->key_len) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + if (in->key) + out->conf.key = memcpy(out->key, in->key, in->key_len); + return 0; +} + +/* Write HENA register to enable hash */ +static int +i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key; + uint64_t hena; + int ret; + + ret = i40e_set_rss_key(pf->main_vsi, key, + rss_conf->conf.key_len); + if (ret) + return ret; + + hena = i40e_config_hena(pf->adapter, rss_conf->conf.types); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +/* Configure hash input set */ +static int +i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_input_set_conf conf; + uint64_t mask0; + int ret = 0; + uint32_t j; + int i; + static const struct { + uint64_t type; + enum rte_eth_input_set_field field; + } inset_match_table[] = { + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP4}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, + RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, + RTE_ETH_INPUT_SET_L3_DST_IP6}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY, + RTE_ETH_INPUT_SET_UNKNOWN}, + }; + + mask0 = types & pf->adapter->flow_types_mask; + conf.op = RTE_ETH_INPUT_SET_SELECT; + conf.inset_size = 0; + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) { + if (mask0 & (1ULL << i)) { + conf.flow_type = i; + break; + } + } + + for (j = 0; j < RTE_DIM(inset_match_table); j++) { + if ((types & inset_match_table[j].type) == + inset_match_table[j].type) { + if (inset_match_table[j].field == + RTE_ETH_INPUT_SET_UNKNOWN) + return -EINVAL; + + conf.field[conf.inset_size] = + inset_match_table[j].field; + conf.inset_size++; + } + } + + if (conf.inset_size) { + ret = i40e_hash_filter_inset_select(hw, &conf); + if (ret) + return ret; + } + + return ret; +} + +/* Look up the conflicted rule then mark it as invalid */ +static void +i40e_rss_mark_invalid_rule(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rss_filter *rss_item; + uint64_t rss_inset; + + /* Clear input set bits before comparing the pctype */ + rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | + ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY); + + /* Look up the conflicted rule then mark it as invalid */ + TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) { + if (!rss_item->rss_filter_info.valid) + continue; + + if (conf->conf.queue_num && + rss_item->rss_filter_info.conf.queue_num) + rss_item->rss_filter_info.valid = false; + + if (conf->conf.types && + (rss_item->rss_filter_info.conf.types & + rss_inset) == + (conf->conf.types & rss_inset)) + rss_item->rss_filter_info.valid = false; + + if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + rss_item->rss_filter_info.conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + rss_item->rss_filter_info.valid = false; + } +} + +/* Configure RSS hash function */ +static int +i40e_rss_config_hash_function(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t reg, i; + uint64_t mask0; + uint16_t j; + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { + PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR"); + I40E_WRITE_FLUSH(hw); + i40e_rss_mark_invalid_rule(pf, conf); + + return 0; + } + reg &= ~I40E_GLQF_CTL_HTOEP_MASK; + + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + I40E_WRITE_FLUSH(hw); + i40e_rss_mark_invalid_rule(pf, conf); + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { + mask0 = conf->conf.types & pf->adapter->flow_types_mask; + + i40e_set_symmetric_hash_enable_per_port(hw, 1); + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) + break; + } + + if (i == UINT64_BIT) + return -EINVAL; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + I40E_GLQF_HSYM_SYMH_ENA_MASK); + } + } + + return 0; +} + +/* Enable RSS according to the configuration */ +static int +i40e_rss_enable_hash(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct i40e_rte_flow_rss_conf rss_conf; + + if (!(conf->conf.types & pf->adapter->flow_types_mask)) + return -ENOTSUP; + + memset(&rss_conf, 0, sizeof(rss_conf)); + rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); + + /* Configure hash input set */ + if (i40e_rss_conf_hash_inset(pf, conf->conf.types)) + return -EINVAL; + + if (rss_conf.conf.key == NULL || rss_conf.conf.key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.conf.key = (uint8_t *)rss_key_default; + rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + PMD_DRV_LOG(INFO, + "No valid RSS key config for i40e, using default\n"); + } + + rss_conf.conf.types |= rss_info->conf.types; + i40e_rss_hash_set(pf, &rss_conf); + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) + i40e_rss_config_hash_function(pf, conf); + + i40e_rss_mark_invalid_rule(pf, conf); + + return 0; +} + +/* Configure RSS queue region */ +static int +i40e_rss_config_queue_region(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t lut = 0; + uint16_t j, num; + uint32_t i; + + /* If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, conf->conf.queue_num); + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_DRV_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); + return -ENOTSUP; + } + + /* Fill in redirection table */ + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + i40e_rss_mark_invalid_rule(pf, conf); + + return 0; +} + +/* Configure RSS hash function to default */ +static int +i40e_rss_clear_hash_function(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t i, reg; + uint64_t mask0; + uint16_t j; + + if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (reg & I40E_GLQF_CTL_HTOEP_MASK) { + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); + I40E_WRITE_FLUSH(hw); + + return 0; + } + reg |= I40E_GLQF_CTL_HTOEP_MASK; + + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + I40E_WRITE_FLUSH(hw); + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) { + mask0 = conf->conf.types & pf->adapter->flow_types_mask; + + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { + if (mask0 & (1UL << i)) + break; + } + + if (i == UINT64_BIT) + return -EINVAL; + + for (j = I40E_FILTER_PCTYPE_INVALID + 1; + j < I40E_FILTER_PCTYPE_MAX; j++) { + if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) + i40e_write_global_rx_ctl(hw, + I40E_GLQF_HSYM(j), + 0); + } + } + + return 0; +} + +/* Disable RSS hash and configure default input set */ +static int +i40e_rss_disable_hash(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_rte_flow_rss_conf rss_conf; + uint32_t i; + + memset(&rss_conf, 0, sizeof(rss_conf)); + rte_memcpy(&rss_conf, conf, sizeof(rss_conf)); + + /* Disable RSS hash */ + rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types); + i40e_rss_hash_set(pf, &rss_conf); + + for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) { + if (!(pf->adapter->flow_types_mask & (1ULL << i)) || + !(conf->conf.types & (1ULL << i))) + continue; + + /* Configure default input set */ + struct rte_eth_input_set_conf input_conf = { + .op = RTE_ETH_INPUT_SET_SELECT, + .flow_type = i, + .inset_size = 1, + }; + input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT; + i40e_hash_filter_inset_select(hw, &input_conf); + } + + rss_info->conf.types = rss_conf.conf.types; + + i40e_rss_clear_hash_function(pf, conf); + + return 0; +} + +/* Configure RSS queue region to default */ +static int +i40e_rss_clear_queue_region(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + uint16_t queue[I40E_MAX_Q_PER_TC]; + uint32_t num_rxq, i; + uint32_t lut = 0; + uint16_t j, num; + + num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC); + + for (j = 0; j < num_rxq; j++) + queue[j] = j; + + /* If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, num_rxq); + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_DRV_LOG(ERR, + "No PF queues are configured to enable RSS for port %u", + pf->dev_data->port_id); + return -ENOTSUP; + } + + /* Fill in redirection table */ + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (queue[j] & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + rss_info->conf.queue_num = 0; + memset(&rss_info->conf.queue, 0, sizeof(uint16_t)); + + return 0; +} + +int +i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add) +{ + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct rte_flow_action_rss update_conf = rss_info->conf; + int ret = 0; + + if (add) { + if (conf->conf.queue_num) { + /* Configure RSS queue region */ + ret = i40e_rss_config_queue_region(pf, conf); + if (ret) + return ret; + + update_conf.queue_num = conf->conf.queue_num; + update_conf.queue = conf->conf.queue; + } else if (conf->conf.func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Configure hash function */ + ret = i40e_rss_config_hash_function(pf, conf); + if (ret) + return ret; + + update_conf.func = conf->conf.func; + } else { + /* Configure hash enable and input set */ + ret = i40e_rss_enable_hash(pf, conf); + if (ret) + return ret; + + update_conf.types |= conf->conf.types; + update_conf.key = conf->conf.key; + update_conf.key_len = conf->conf.key_len; + } + + /* Update RSS info in pf */ + if (i40e_rss_conf_init(rss_info, &update_conf)) + return -EINVAL; + } else { + if (!conf->valid) + return 0; + + if (conf->conf.queue_num) + i40e_rss_clear_queue_region(pf); + else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + i40e_rss_clear_hash_function(pf, conf); + else + i40e_rss_disable_hash(pf, conf); + } + + return 0; +} + +RTE_INIT(i40e_init_log) +{ + i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); + if (i40e_logtype_init >= 0) + rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); + i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver"); + if (i40e_logtype_driver >= 0) + rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_I40E_DEBUG_RX + i40e_logtype_rx = rte_log_register("pmd.net.i40e.rx"); + if (i40e_logtype_rx >= 0) + rte_log_set_level(i40e_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX + i40e_logtype_tx = rte_log_register("pmd.net.i40e.tx"); + if (i40e_logtype_tx >= 0) + rte_log_set_level(i40e_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE + i40e_logtype_tx_free = rte_log_register("pmd.net.i40e.tx_free"); + if (i40e_logtype_tx_free >= 0) + rte_log_set_level(i40e_logtype_tx_free, RTE_LOG_DEBUG); +#endif +} + +RTE_PMD_REGISTER_PARAM_STRING(net_i40e, + ETH_I40E_FLOATING_VEB_ARG "=1" + ETH_I40E_FLOATING_VEB_LIST_ARG "=" + ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" + ETH_I40E_SUPPORT_MULTI_DRIVER "=1" + ETH_I40E_USE_LATEST_VEC "=0|1"); diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h new file mode 100644 index 000000000..e5d0ce53f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.h @@ -0,0 +1,1522 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#ifndef _I40E_ETHDEV_H_ +#define _I40E_ETHDEV_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include "rte_pmd_i40e.h" + +#include "base/i40e_register.h" + +#define I40E_VLAN_TAG_SIZE 4 + +#define I40E_AQ_LEN 32 +#define I40E_AQ_BUF_SZ 4096 +/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */ +#define I40E_MAX_Q_PER_TC 64 +#define I40E_NUM_DESC_DEFAULT 512 +#define I40E_NUM_DESC_ALIGN 32 +#define I40E_BUF_SIZE_MIN 1024 +#define I40E_FRAME_SIZE_MAX 9728 +#define I40E_TSO_FRAME_SIZE_MAX 262144 +#define I40E_QUEUE_BASE_ADDR_UNIT 128 +/* number of VSIs and queue default setting */ +#define I40E_MAX_QP_NUM_PER_VF 16 +#define I40E_DEFAULT_QP_NUM_FDIR 1 +#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE) +/* Maximun number of MAC addresses */ +#define I40E_NUM_MACADDR_MAX 64 +/* Maximum number of VFs */ +#define I40E_MAX_VF 128 +/*flag of no loopback*/ +#define I40E_AQ_LB_MODE_NONE 0x0 +/* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define I40E_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define I40E_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +/* Default TC traffic in case DCB is not enabled */ +#define I40E_DEFAULT_TCMAP 0x1 +#define I40E_FDIR_QUEUE_ID 0 + +/* Always assign pool 0 to main VSI, VMDQ will start from 1 */ +#define I40E_VMDQ_POOL_BASE 1 + +#define I40E_DEFAULT_RX_FREE_THRESH 32 +#define I40E_DEFAULT_RX_PTHRESH 8 +#define I40E_DEFAULT_RX_HTHRESH 8 +#define I40E_DEFAULT_RX_WTHRESH 0 + +#define I40E_DEFAULT_TX_FREE_THRESH 32 +#define I40E_DEFAULT_TX_PTHRESH 32 +#define I40E_DEFAULT_TX_HTHRESH 0 +#define I40E_DEFAULT_TX_WTHRESH 0 +#define I40E_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define I40E_4_BIT_WIDTH (CHAR_BIT / 2) +#define I40E_4_BIT_MASK RTE_LEN2MASK(I40E_4_BIT_WIDTH, uint8_t) +#define I40E_8_BIT_WIDTH CHAR_BIT +#define I40E_8_BIT_MASK UINT8_MAX +#define I40E_16_BIT_WIDTH (CHAR_BIT * 2) +#define I40E_16_BIT_MASK UINT16_MAX +#define I40E_32_BIT_WIDTH (CHAR_BIT * 4) +#define I40E_32_BIT_MASK UINT32_MAX +#define I40E_48_BIT_WIDTH (CHAR_BIT * 6) +#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t) + +/* Linux PF host with virtchnl version 1.1 */ +#define PF_IS_V11(vf) \ + (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \ + ((vf)->version_minor == 1)) + +#define I40E_WRITE_GLB_REG(hw, reg, value) \ + do { \ + uint32_t ori_val; \ + struct rte_eth_dev *dev; \ + ori_val = I40E_READ_REG((hw), (reg)); \ + dev = ((struct i40e_adapter *)hw->back)->eth_dev; \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \ + (reg)), (value)); \ + if (ori_val != value) \ + PMD_DRV_LOG(WARNING, \ + "i40e device %s changed global " \ + "register [0x%08x]. original: 0x%08x, " \ + "new: 0x%08x ", \ + (dev->device->name), (reg), \ + (ori_val), (value)); \ + } while (0) + +/* index flex payload per layer */ +enum i40e_flxpld_layer_idx { + I40E_FLXPLD_L2_IDX = 0, + I40E_FLXPLD_L3_IDX = 1, + I40E_FLXPLD_L4_IDX = 2, + I40E_MAX_FLXPLD_LAYER = 3, +}; +#define I40E_MAX_FLXPLD_FIED 3 /* max number of flex payload fields */ +#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */ +#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */ +#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */ +#define I40E_INSET_MASK_NUM_REG 2 /* number of input set mask registers */ + +/* i40e flags */ +#define I40E_FLAG_RSS (1ULL << 0) +#define I40E_FLAG_DCB (1ULL << 1) +#define I40E_FLAG_VMDQ (1ULL << 2) +#define I40E_FLAG_SRIOV (1ULL << 3) +#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4) +#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5) +#define I40E_FLAG_FDIR (1ULL << 6) +#define I40E_FLAG_VXLAN (1ULL << 7) +#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8) +#define I40E_FLAG_ALL (I40E_FLAG_RSS | \ + I40E_FLAG_DCB | \ + I40E_FLAG_VMDQ | \ + I40E_FLAG_SRIOV | \ + I40E_FLAG_HEADER_SPLIT_DISABLED | \ + I40E_FLAG_HEADER_SPLIT_ENABLED | \ + I40E_FLAG_FDIR | \ + I40E_FLAG_VXLAN | \ + I40E_FLAG_RSS_AQ_CAPABLE) + +#define I40E_RSS_OFFLOAD_ALL ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + ETH_RSS_NONFRAG_IPV6_OTHER | \ + ETH_RSS_L2_PAYLOAD) + +/* All bits of RSS hash enable for X722*/ +#define I40E_RSS_HENA_ALL_X722 ( \ + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + I40E_RSS_HENA_ALL) + +/* All bits of RSS hash enable */ +#define I40E_RSS_HENA_ALL ( \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* Default queue interrupt throttling time in microseconds */ +#define I40E_ITR_INDEX_DEFAULT 0 +#define I40E_ITR_INDEX_NONE 3 +#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ +#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +/* Special FW support this floating VEB feature */ +#define FLOATING_VEB_SUPPORTED_FW_MAJ 5 +#define FLOATING_VEB_SUPPORTED_FW_MIN 0 + +#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ + I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) + +#define I40E_RSS_TYPE_NONE 0ULL +#define I40E_RSS_TYPE_INVALID 1ULL + +#define I40E_INSET_NONE 0x00000000000000000ULL + +/* bit0 ~ bit 7 */ +#define I40E_INSET_DMAC 0x0000000000000001ULL +#define I40E_INSET_SMAC 0x0000000000000002ULL +#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL +#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL +#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL + +/* bit 8 ~ bit 15 */ +#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL +#define I40E_INSET_IPV4_DST 0x0000000000000200ULL +#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL +#define I40E_INSET_IPV6_DST 0x0000000000000800ULL +#define I40E_INSET_SRC_PORT 0x0000000000001000ULL +#define I40E_INSET_DST_PORT 0x0000000000002000ULL +#define I40E_INSET_SCTP_VT 0x0000000000004000ULL + +/* bit 16 ~ bit 31 */ +#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL +#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL +#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL +#define I40E_INSET_IPV6_TC 0x0000000000080000ULL +#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL +#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL +#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL +#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL + +/* bit 32 ~ bit 47, tunnel fields */ +#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL +#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL +#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL +#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL +#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL +#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL + +/* bit 48 ~ bit 55 */ +#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL + +/* bit 56 ~ bit 63, Flex Payload */ +#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD \ + (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \ + I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \ + I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \ + I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8) + +/* The max bandwidth of i40e is 40Gbps. */ +#define I40E_QOS_BW_MAX 40000 +/* The bandwidth should be the multiple of 50Mbps. */ +#define I40E_QOS_BW_GRANULARITY 50 +/* The min bandwidth weight is 1. */ +#define I40E_QOS_BW_WEIGHT_MIN 1 +/* The max bandwidth weight is 127. */ +#define I40E_QOS_BW_WEIGHT_MAX 127 +/* The max queue region index is 7. */ +#define I40E_REGION_MAX_INDEX 7 + +#define I40E_MAX_PERCENT 100 +#define I40E_DEFAULT_DCB_APP_NUM 1 +#define I40E_DEFAULT_DCB_APP_PRIO 3 + +/** + * The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define I40E_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2) + +struct i40e_adapter; +struct rte_pci_driver; + +/** + * MAC filter structure + */ +struct i40e_mac_filter_info { + enum rte_mac_filter_type filter_type; + struct rte_ether_addr mac_addr; +}; + +TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter); + +/* MAC filter list structure */ +struct i40e_mac_filter { + TAILQ_ENTRY(i40e_mac_filter) next; + struct i40e_mac_filter_info mac_info; +}; + +TAILQ_HEAD(i40e_vsi_list_head, i40e_vsi_list); + +struct i40e_vsi; + +/* VSI list structure */ +struct i40e_vsi_list { + TAILQ_ENTRY(i40e_vsi_list) list; + struct i40e_vsi *vsi; +}; + +struct i40e_rx_queue; +struct i40e_tx_queue; + +/* Bandwidth limit information */ +struct i40e_bw_info { + uint16_t bw_limit; /* BW Limit (0 = disabled) */ + uint8_t bw_max; /* Max BW limit if enabled */ + + /* Relative credits within same TC with respect to other VSIs or Comps */ + uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Bandwidth limit per TC */ + uint16_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Max bandwidth limit per TC */ + uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* Structure that defines a VEB */ +struct i40e_veb { + struct i40e_vsi_list_head head; + struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */ + struct i40e_pf *associate_pf; /* Associate PF who owns the VEB */ + uint16_t seid; /* The seid of VEB itself */ + uint16_t uplink_seid; /* The uplink seid of this VEB */ + uint16_t stats_idx; + struct i40e_eth_stats stats; + uint8_t enabled_tc; /* The traffic class enabled */ + uint8_t strict_prio_tc; /* bit map of TCs set to strict priority mode */ + struct i40e_bw_info bw_info; /* VEB bandwidth information */ +}; + +/* i40e MACVLAN filter structure */ +struct i40e_macvlan_filter { + struct rte_ether_addr macaddr; + enum rte_mac_filter_type filter_type; + uint16_t vlan_id; +}; + +/* + * Structure that defines a VSI, associated with a adapter. + */ +struct i40e_vsi { + struct i40e_adapter *adapter; /* Backreference to associated adapter */ + struct i40e_aqc_vsi_properties_data info; /* VSI properties */ + + struct i40e_eth_stats eth_stats_offset; + struct i40e_eth_stats eth_stats; + /* + * When drivers loaded, only a default main VSI exists. In case new VSI + * needs to add, HW needs to know the layout that VSIs are organized. + * Besides that, VSI isan element and can't switch packets, which needs + * to add new component VEB to perform switching. So, a new VSI needs + * to specify the uplink VSI (Parent VSI) before created. The + * uplink VSI will check whether it had a VEB to switch packets. If no, + * it will try to create one. Then, uplink VSI will move the new VSI + * into its' sib_vsi_list to manage all the downlink VSI. + * sib_vsi_list: the VSI list that shared the same uplink VSI. + * parent_vsi : the uplink VSI. It's NULL for main VSI. + * veb : the VEB associates with the VSI. + */ + struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */ + struct i40e_vsi *parent_vsi; + struct i40e_veb *veb; /* Associated veb, could be null */ + struct i40e_veb *floating_veb; /* Associated floating veb */ + bool offset_loaded; + enum i40e_vsi_type type; /* VSI types */ + uint16_t vlan_num; /* Total VLAN number */ + uint16_t mac_num; /* Total mac number */ + uint32_t vfta[I40E_VFTA_SIZE]; /* VLAN bitmap */ + struct i40e_mac_filter_list mac_list; /* macvlan filter list */ + /* specific VSI-defined parameters, SRIOV stored the vf_id */ + uint32_t user_param; + uint16_t seid; /* The seid of VSI itself */ + uint16_t uplink_seid; /* The uplink seid of this VSI */ + uint16_t nb_qps; /* Number of queue pairs VSI can occupy */ + uint16_t nb_used_qps; /* Number of queue pairs VSI uses */ + uint16_t max_macaddrs; /* Maximum number of MAC addresses */ + uint16_t base_queue; /* The first queue index of this VSI */ + /* + * The offset to visit VSI related register, assigned by HW when + * creating VSI + */ + uint16_t vsi_id; + uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ + uint16_t nb_msix; /* The max number of msix vector */ + uint8_t enabled_tc; /* The traffic class enabled */ + uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */ + uint8_t vlan_filter_on; /* The VLAN filter enabled */ + struct i40e_bw_info bw_info; /* VSI bandwidth information */ +}; + +struct pool_entry { + LIST_ENTRY(pool_entry) next; + uint16_t base; + uint16_t len; +}; + +LIST_HEAD(res_list, pool_entry); + +struct i40e_res_pool_info { + uint32_t base; /* Resource start index */ + uint32_t num_alloc; /* Allocated resource number */ + uint32_t num_free; /* Total available resource number */ + struct res_list alloc_list; /* Allocated resource list */ + struct res_list free_list; /* Available resource list */ +}; + +enum I40E_VF_STATE { + I40E_VF_INACTIVE = 0, + I40E_VF_INRESET, + I40E_VF_ININIT, + I40E_VF_ACTIVE, +}; + +/* + * Structure to store private data for PF host. + */ +struct i40e_pf_vf { + struct i40e_pf *pf; + struct i40e_vsi *vsi; + enum I40E_VF_STATE state; /* The number of queue pairs available */ + uint16_t vf_idx; /* VF index in pf->vfs */ + uint16_t lan_nb_qps; /* Actual queues allocated */ + uint16_t reset_cnt; /* Total vf reset times */ + struct rte_ether_addr mac_addr; /* Default MAC address */ + /* version of the virtchnl from VF */ + struct virtchnl_version_info version; + uint32_t request_caps; /* offload caps requested from VF */ + uint64_t num_mdd_events; /* num of mdd events detected */ + + /* + * Variables for store the arrival timestamp of VF messages. + * If the timestamp of latest message stored at + * `msg_timestamps[index % max]` then the timestamp of + * earliest message stored at `msg_time[(index + 1) % max]`. + * When a new message come, the timestamp of this message + * will be stored at `msg_timestamps[(index + 1) % max]` and the + * earliest message timestamp is at + * `msg_timestamps[(index + 2) % max]` now... + */ + uint32_t msg_index; + uint64_t *msg_timestamps; + + /* cycle of stop ignoring VF message */ + uint64_t ignore_end_cycle; +}; + +/* + * Structure to store private data for flow control. + */ +struct i40e_fc_conf { + uint16_t pause_time; /* Flow control pause timer */ + /* FC high water 0-7 for pfc and 8 for lfc unit:kilobytes */ + uint32_t high_water[I40E_MAX_TRAFFIC_CLASS + 1]; + /* FC low water 0-7 for pfc and 8 for lfc unit:kilobytes */ + uint32_t low_water[I40E_MAX_TRAFFIC_CLASS + 1]; +}; + +/* + * Structure to store private data for VMDQ instance + */ +struct i40e_vmdq_info { + struct i40e_pf *pf; + struct i40e_vsi *vsi; +}; + +#define I40E_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */ +#define I40E_MAX_FLX_SOURCE_OFF 480 +#define NONUSE_FLX_PIT_DEST_OFF 63 +#define NONUSE_FLX_PIT_FSIZE 1 +#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50 +#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \ + (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \ + (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ + I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \ + ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \ + NONUSE_FLX_PIT_DEST_OFF : \ + ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \ + I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)) +#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF)) +#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) +#define I40E_FDIR_IPv6_TC_OFFSET 20 + +/* A structure used to define the input for GTP flow */ +struct i40e_gtp_flow { + struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */ + uint8_t msg_type; /* Message type. */ + uint32_t teid; /* TEID in big endian. */ +}; + +/* A structure used to define the input for GTP IPV4 flow */ +struct i40e_gtp_ipv4_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv4_flow ip4; +}; + +/* A structure used to define the input for GTP IPV6 flow */ +struct i40e_gtp_ipv6_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv6_flow ip6; +}; + +/* A structure used to define the input for ESP IPV4 flow */ +struct i40e_esp_ipv4_flow { + struct rte_eth_ipv4_flow ipv4; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for ESP IPV6 flow */ +struct i40e_esp_ipv6_flow { + struct rte_eth_ipv6_flow ipv6; + uint32_t spi; /* SPI in big endian. */ +}; +/* A structure used to define the input for ESP IPV4 UDP flow */ +struct i40e_esp_ipv4_udp_flow { + struct rte_eth_udpv4_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for ESP IPV6 UDP flow */ +struct i40e_esp_ipv6_udp_flow { + struct rte_eth_udpv6_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for raw type flow */ +struct i40e_raw_flow { + uint16_t pctype; + void *packet; + uint32_t length; +}; + +/* A structure used to define the input for L2TPv3 over IPv4 flow */ +struct i40e_ipv4_l2tpv3oip_flow { + struct rte_eth_ipv4_flow ip4; + uint32_t session_id; /* Session ID in big endian. */ +}; + +/* A structure used to define the input for L2TPv3 over IPv6 flow */ +struct i40e_ipv6_l2tpv3oip_flow { + struct rte_eth_ipv6_flow ip6; + uint32_t session_id; /* Session ID in big endian. */ +}; + +/* A structure used to define the input for l2 dst type flow */ +struct i40e_l2_flow { + struct rte_ether_addr dst; + struct rte_ether_addr src; + uint16_t ether_type; /**< Ether type in big endian */ +}; + +/* + * A union contains the inputs for all types of flow + * items in flows need to be in big endian + */ +union i40e_fdir_flow { + struct i40e_l2_flow l2_flow; + struct rte_eth_udpv4_flow udp4_flow; + struct rte_eth_tcpv4_flow tcp4_flow; + struct rte_eth_sctpv4_flow sctp4_flow; + struct rte_eth_ipv4_flow ip4_flow; + struct rte_eth_udpv6_flow udp6_flow; + struct rte_eth_tcpv6_flow tcp6_flow; + struct rte_eth_sctpv6_flow sctp6_flow; + struct rte_eth_ipv6_flow ipv6_flow; + struct i40e_gtp_flow gtp_flow; + struct i40e_gtp_ipv4_flow gtp_ipv4_flow; + struct i40e_gtp_ipv6_flow gtp_ipv6_flow; + struct i40e_raw_flow raw_flow; + struct i40e_ipv4_l2tpv3oip_flow ip4_l2tpv3oip_flow; + struct i40e_ipv6_l2tpv3oip_flow ip6_l2tpv3oip_flow; + struct i40e_esp_ipv4_flow esp_ipv4_flow; + struct i40e_esp_ipv6_flow esp_ipv6_flow; + struct i40e_esp_ipv4_udp_flow esp_ipv4_udp_flow; + struct i40e_esp_ipv6_udp_flow esp_ipv6_udp_flow; +}; + +enum i40e_fdir_ip_type { + I40E_FDIR_IPTYPE_IPV4, + I40E_FDIR_IPTYPE_IPV6, +}; + +/* A structure used to contain extend input of flow */ +struct i40e_fdir_flow_ext { + uint16_t vlan_tci; + uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN]; + /* It is filled by the flexible payload to match. */ + uint8_t is_vf; /* 1 for VF, 0 for port dev */ + uint16_t dst_id; /* VF ID, available when is_vf is 1*/ + bool inner_ip; /* If there is inner ip */ + enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */ + enum i40e_fdir_ip_type oip_type; /* ip type for outer ip */ + bool customized_pctype; /* If customized pctype is used */ + bool pkt_template; /* If raw packet template is used */ + bool is_udp; /* ipv4|ipv6 udp flow */ +}; + +/* A structure used to define the input for a flow director filter entry */ +struct i40e_fdir_input { + enum i40e_filter_pctype pctype; + union i40e_fdir_flow flow; + /* Flow fields to match, dependent on flow_type */ + struct i40e_fdir_flow_ext flow_ext; + /* Additional fields to match */ +}; + +/* Behavior will be taken if FDIR match */ +enum i40e_fdir_behavior { + I40E_FDIR_ACCEPT = 0, + I40E_FDIR_REJECT, + I40E_FDIR_PASSTHRU, +}; + +/* Flow director report status + * It defines what will be reported if FDIR entry is matched. + */ +enum i40e_fdir_status { + I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */ + I40E_FDIR_REPORT_ID, /* Only report FD ID. */ + I40E_FDIR_REPORT_ID_FLEX_4, /* Report FD ID and 4 flex bytes. */ + I40E_FDIR_REPORT_FLEX_8, /* Report 8 flex bytes. */ +}; + +/* A structure used to define an action when match FDIR packet filter. */ +struct i40e_fdir_action { + uint16_t rx_queue; /* Queue assigned to if FDIR match. */ + enum i40e_fdir_behavior behavior; /* Behavior will be taken */ + enum i40e_fdir_status report_status; /* Status report option */ + /* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or + * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported + * flex bytes start from in flexible payload. + */ + uint8_t flex_off; +}; + +/* A structure used to define the flow director filter entry by filter_ctrl API + * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and + * RTE_ETH_FILTER_DELETE operations. + */ +struct i40e_fdir_filter_conf { + uint32_t soft_id; + /* ID, an unique value is required when deal with FDIR entry */ + struct i40e_fdir_input input; /* Input set */ + struct i40e_fdir_action action; /* Action taken when match */ +}; + +/* + * Structure to store flex pit for flow diretor. + */ +struct i40e_fdir_flex_pit { + uint8_t src_offset; /* offset in words from the beginning of payload */ + uint8_t size; /* size in words */ + uint8_t dst_offset; /* offset in words of flexible payload */ +}; + +struct i40e_fdir_flex_mask { + uint8_t word_mask; /**< Bit i enables word i of flexible payload */ + uint8_t nb_bitmask; + struct { + uint8_t offset; + uint16_t mask; + } bitmask[I40E_FDIR_BITMASK_NUM_WORD]; +}; + +#define I40E_FILTER_PCTYPE_INVALID 0 +#define I40E_FILTER_PCTYPE_MAX 64 +#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8) + +struct i40e_fdir_filter { + TAILQ_ENTRY(i40e_fdir_filter) rules; + struct i40e_fdir_filter_conf fdir; +}; + +TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter); +/* + * A structure used to define fields of a FDIR related info. + */ +struct i40e_fdir_info { + struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + uint16_t match_counter_index; /* Statistic counter index used for fdir*/ + struct i40e_tx_queue *txq; + struct i40e_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + /* input set bits for each pctype */ + uint64_t input_set[I40E_FILTER_PCTYPE_MAX]; + /* + * the rule how bytes stream is extracted as flexible payload + * for each payload layer, the setting can up to three elements + */ + struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED]; + struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX]; + + struct i40e_fdir_filter_list fdir_list; + struct i40e_fdir_filter **hash_map; + struct rte_hash *hash_table; + + /* Mark if flex pit and mask is set */ + bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER]; + bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX]; + + bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */ +}; + +/* Ethertype filter number HW supports */ +#define I40E_MAX_ETHERTYPE_FILTER_NUM 768 + +/* Ethertype filter struct */ +struct i40e_ethertype_filter_input { + struct rte_ether_addr mac_addr; /* Mac address to match */ + uint16_t ether_type; /* Ether type to match */ +}; + +struct i40e_ethertype_filter { + TAILQ_ENTRY(i40e_ethertype_filter) rules; + struct i40e_ethertype_filter_input input; + uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */ + uint16_t queue; /* Queue assigned to when match */ +}; + +TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter); + +struct i40e_ethertype_rule { + struct i40e_ethertype_filter_list ethertype_list; + struct i40e_ethertype_filter **hash_map; + struct rte_hash *hash_table; +}; + +/* queue region info */ +struct i40e_queue_region_info { + /* the region id for this configuration */ + uint8_t region_id; + /* the start queue index for this region */ + uint8_t queue_start_index; + /* the total queue number of this queue region */ + uint8_t queue_num; + /* the total number of user priority for this region */ + uint8_t user_priority_num; + /* the packet's user priority for this region */ + uint8_t user_priority[I40E_MAX_USER_PRIORITY]; + /* the total number of flowtype for this region */ + uint8_t flowtype_num; + /** + * the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX]; +}; + +struct i40e_queue_regions { + /* the total number of queue region for this port */ + uint16_t queue_region_number; + struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1]; +}; + +struct i40e_rss_pattern_info { + uint8_t action_flag; + uint64_t types; +}; + +/* Tunnel filter number HW supports */ +#define I40E_MAX_TUNNEL_FILTER_NUM 400 + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP 8 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE 9 +#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10 +#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11 +#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X11 0x11 +#define I40E_AQC_ADD_L1_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X13 0x13 +#define I40E_AQC_NEW_TR_21 21 +#define I40E_AQC_NEW_TR_22 22 + +enum i40e_tunnel_iptype { + I40E_TUNNEL_IPTYPE_IPV4, + I40E_TUNNEL_IPTYPE_IPV6, +}; + +/* Tunnel filter struct */ +struct i40e_tunnel_filter_input { + uint8_t outer_mac[6]; /* Outer mac address to match */ + uint8_t inner_mac[6]; /* Inner mac address to match */ + uint16_t inner_vlan; /* Inner vlan address to match */ + enum i40e_tunnel_iptype ip_type; + uint16_t flags; /* Filter type flag */ + uint32_t tenant_id; /* Tenant id to match */ + uint16_t general_fields[32]; /* Big buffer */ +}; + +struct i40e_tunnel_filter { + TAILQ_ENTRY(i40e_tunnel_filter) rules; + struct i40e_tunnel_filter_input input; + uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */ + uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */ + uint16_t queue; /* Queue assigned to when match */ +}; + +TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter); + +struct i40e_tunnel_rule { + struct i40e_tunnel_filter_list tunnel_list; + struct i40e_tunnel_filter **hash_map; + struct rte_hash *hash_table; +}; + +/** + * Tunnel type. + */ +enum i40e_tunnel_type { + I40E_TUNNEL_TYPE_NONE = 0, + I40E_TUNNEL_TYPE_VXLAN, + I40E_TUNNEL_TYPE_GENEVE, + I40E_TUNNEL_TYPE_TEREDO, + I40E_TUNNEL_TYPE_NVGRE, + I40E_TUNNEL_TYPE_IP_IN_GRE, + I40E_L2_TUNNEL_TYPE_E_TAG, + I40E_TUNNEL_TYPE_MPLSoUDP, + I40E_TUNNEL_TYPE_MPLSoGRE, + I40E_TUNNEL_TYPE_QINQ, + I40E_TUNNEL_TYPE_GTPC, + I40E_TUNNEL_TYPE_GTPU, + I40E_TUNNEL_TYPE_ESPoUDP, + I40E_TUNNEL_TYPE_ESPoIP, + I40E_TUNNEL_TYPE_MAX, +}; + +/** + * Tunneling Packet filter configuration. + */ +struct i40e_tunnel_filter_conf { + struct rte_ether_addr outer_mac; /**< Outer MAC address to match. */ + struct rte_ether_addr inner_mac; /**< Inner MAC address to match. */ + uint16_t inner_vlan; /**< Inner VLAN to match. */ + uint32_t outer_vlan; /**< Outer VLAN to match */ + enum i40e_tunnel_iptype ip_type; /**< IP address type. */ + /** + * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP + * is set in filter_type, or inner destination IP address to match + * if ETH_TUNNEL_FILTER_IIP is set in filter_type. + */ + union { + uint32_t ipv4_addr; /**< IPv4 address in big endian. */ + uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */ + } ip_addr; + /** Flags from ETH_TUNNEL_FILTER_XX - see above. */ + uint16_t filter_type; + enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */ + uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */ + uint16_t queue_id; /**< Queue assigned to if match. */ + uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */ + uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */ +}; + +#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64 +#define I40E_MAX_MIRROR_RULES 64 +/* + * Mirror rule structure + */ +struct i40e_mirror_rule { + TAILQ_ENTRY(i40e_mirror_rule) rules; + uint8_t rule_type; + uint16_t index; /* the sw index of mirror rule */ + uint16_t id; /* the rule id assigned by firmware */ + uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */ + uint16_t num_entries; + /* the info stores depend on the rule type. + If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here. + If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored. + */ + uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE]; +}; + +TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule); + +/* + * Struct to store flow created. + */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + enum rte_filter_type filter_type; + void *rule; +}; + +TAILQ_HEAD(i40e_flow_list, rte_flow); + +/* Struct to store Traffic Manager shaper profile. */ +struct i40e_tm_shaper_profile { + TAILQ_ENTRY(i40e_tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params profile; +}; + +TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile); + +/* node type of Traffic Manager */ +enum i40e_tm_node_type { + I40E_TM_NODE_TYPE_PORT, + I40E_TM_NODE_TYPE_TC, + I40E_TM_NODE_TYPE_QUEUE, + I40E_TM_NODE_TYPE_MAX, +}; + +/* Struct to store Traffic Manager node configuration. */ +struct i40e_tm_node { + TAILQ_ENTRY(i40e_tm_node) node; + uint32_t id; + uint32_t priority; + uint32_t weight; + uint32_t reference_count; + struct i40e_tm_node *parent; + struct i40e_tm_shaper_profile *shaper_profile; + struct rte_tm_node_params params; +}; + +TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node); + +/* Struct to store all the Traffic Manager configuration. */ +struct i40e_tm_conf { + struct i40e_shaper_profile_list shaper_profile_list; + struct i40e_tm_node *root; /* root node - port */ + struct i40e_tm_node_list tc_list; /* node list for all the TCs */ + struct i40e_tm_node_list queue_list; /* node list for all the queues */ + /** + * The number of added TC nodes. + * It should be no more than the TC number of this port. + */ + uint32_t nb_tc_node; + /** + * The number of added queue nodes. + * It should be no more than the queue number of this port. + */ + uint32_t nb_queue_node; + /** + * This flag is used to check if APP can change the TM node + * configuration. + * When it's true, means the configuration is applied to HW, + * APP should not change the configuration. + * As we don't support on-the-fly configuration, when starting + * the port, APP should call the hierarchy_commit API to set this + * flag to true. When stopping the port, this flag should be set + * to false. + */ + bool committed; +}; + +enum i40e_new_pctype { + I40E_CUSTOMIZED_GTPC = 0, + I40E_CUSTOMIZED_GTPU_IPV4, + I40E_CUSTOMIZED_GTPU_IPV6, + I40E_CUSTOMIZED_GTPU, + I40E_CUSTOMIZED_IPV4_L2TPV3, + I40E_CUSTOMIZED_IPV6_L2TPV3, + I40E_CUSTOMIZED_ESP_IPV4, + I40E_CUSTOMIZED_ESP_IPV6, + I40E_CUSTOMIZED_ESP_IPV4_UDP, + I40E_CUSTOMIZED_ESP_IPV6_UDP, + I40E_CUSTOMIZED_AH_IPV4, + I40E_CUSTOMIZED_AH_IPV6, + I40E_CUSTOMIZED_MAX, +}; + +#define I40E_FILTER_PCTYPE_INVALID 0 +struct i40e_customized_pctype { + enum i40e_new_pctype index; /* Indicate which customized pctype */ + uint8_t pctype; /* New pctype value */ + bool valid; /* Check if it's valid */ +}; + +struct i40e_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint16_t queue_region_conf; /**< Queue region config flag */ + uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ? + I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)]; /* Hash key. */ + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ + bool valid; /* Check if it's valid */ +}; + +TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter); + +/* RSS filter list structure */ +struct i40e_rss_filter { + TAILQ_ENTRY(i40e_rss_filter) next; + struct i40e_rte_flow_rss_conf rss_filter_info; +}; + +struct i40e_vf_msg_cfg { + /* maximal VF message during a statistic period */ + uint32_t max_msg; + + /* statistic period, in second */ + uint32_t period; + /* + * If message statistics from a VF exceed the maximal limitation, + * the PF will ignore any new message from that VF for + * 'ignor_second' time. + */ + uint32_t ignore_second; +}; + +/* + * Structure to store private data specific for PF instance. + */ +struct i40e_pf { + struct i40e_adapter *adapter; /* The adapter this PF associate to */ + struct i40e_vsi *main_vsi; /* pointer to main VSI structure */ + uint16_t mac_seid; /* The seid of the MAC of this PF */ + uint16_t main_vsi_seid; /* The seid of the main VSI */ + uint16_t max_num_vsi; + struct i40e_res_pool_info qp_pool; /*Queue pair pool */ + struct i40e_res_pool_info msix_pool; /* MSIX interrupt pool */ + + struct i40e_hw_port_stats stats_offset; + struct i40e_hw_port_stats stats; + /* internal packet statistics, it should be excluded from the total */ + struct i40e_eth_stats internal_stats_offset; + struct i40e_eth_stats internal_stats; + bool offset_loaded; + + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ + struct rte_ether_addr dev_addr; /* PF device mac address */ + uint64_t flags; /* PF feature flags */ + /* All kinds of queue pair setting for different VSIs */ + struct i40e_pf_vf *vfs; + uint16_t vf_num; + /* Each of below queue pairs should be power of 2 since it's the + precondition after TC configuration applied */ + uint16_t lan_nb_qp_max; + uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ + uint16_t lan_qp_offset; + uint16_t vmdq_nb_qp_max; + uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */ + uint16_t vmdq_qp_offset; + uint16_t vf_nb_qp_max; + uint16_t vf_nb_qps; /* The number of queue pairs of VF */ + uint16_t vf_qp_offset; + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + + uint16_t hash_lut_size; /* The size of hash lookup table */ + /* input set bits for each pctype */ + uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX]; + /* store VXLAN UDP ports */ + uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + uint16_t vxlan_bitmap; /* Vxlan bit mask */ + + /* VMDQ related info */ + uint16_t max_nb_vmdq_vsi; /* Max number of VMDQ VSIs supported */ + uint16_t nb_cfg_vmdq_vsi; /* number of VMDQ VSIs configured */ + struct i40e_vmdq_info *vmdq; + + struct i40e_fdir_info fdir; /* flow director info */ + struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ + struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ + struct i40e_rte_flow_rss_conf rss_info; /* RSS info */ + struct i40e_rss_conf_list rss_config_list; /* RSS rule list */ + struct i40e_queue_regions queue_region; /* queue region info */ + struct i40e_fc_conf fc_conf; /* Flow control conf */ + struct i40e_mirror_rule_list mirror_list; + uint16_t nb_mirror_rule; /* The number of mirror rules */ + bool floating_veb; /* The flag to use the floating VEB */ + /* The floating enable flag for the specific VF */ + bool floating_veb_list[I40E_MAX_VF]; + struct i40e_flow_list flow_list; + bool mpls_replace_flag; /* 1 - MPLS filter replace is done */ + bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */ + bool qinq_replace_flag; /* QINQ filter replace is done */ + struct i40e_tm_conf tm_conf; + bool support_multi_driver; /* 1 - support multiple driver */ + + /* Dynamic Device Personalization */ + bool gtp_support; /* 1 - support GTP-C and GTP-U */ + bool esp_support; /* 1 - support ESP SPI */ + /* customer customized pctype */ + struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; + /* Switch Domain Id */ + uint16_t switch_domain_id; + + struct i40e_vf_msg_cfg vf_msg_cfg; +}; + +enum pending_msg { + PFMSG_LINK_CHANGE = 0x1, + PFMSG_RESET_IMPENDING = 0x2, + PFMSG_DRIVER_CLOSE = 0x4, +}; + +struct i40e_vsi_vlan_pvid_info { + uint16_t on; /* Enable or disable pvid */ + union { + uint16_t pvid; /* Valid in case 'on' is set to set pvid */ + struct { + /* Valid in case 'on' is cleared. 'tagged' will reject tagged packets, + * while 'untagged' will reject untagged packets. + */ + uint8_t tagged; + uint8_t untagged; + } reject; + } config; +}; + +struct i40e_vf_rx_queues { + uint64_t rx_dma_addr; + uint32_t rx_ring_len; + uint32_t buff_size; +}; + +struct i40e_vf_tx_queues { + uint64_t tx_dma_addr; + uint32_t tx_ring_len; +}; + +/* + * Structure to store private data specific for VF instance. + */ +struct i40e_vf { + struct i40e_adapter *adapter; /* The adapter this VF associate to */ + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ + uint16_t num_queue_pairs; + uint16_t max_pkt_len; /* Maximum packet length */ + bool promisc_unicast_enabled; + bool promisc_multicast_enabled; + + uint32_t version_major; /* Major version number */ + uint32_t version_minor; /* Minor version number */ + uint16_t promisc_flags; /* Promiscuous setting */ + uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */ + + /* Multicast addrs */ + struct rte_ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; + uint16_t mc_addrs_num; /* Multicast mac addresses number */ + + /* Event from pf */ + bool dev_closed; + bool link_up; + enum virtchnl_link_speed link_speed; + bool vf_reset; + volatile uint32_t pend_cmd; /* pending command not finished yet */ + int32_t cmd_retval; /* return value of the cmd response from PF */ + u16 pend_msg; /* flags indicates events from pf not handled yet */ + uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + + /* VSI info */ + struct virtchnl_vf_resource *vf_res; /* All VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ + struct i40e_vsi vsi; + uint64_t flags; +}; + +#define I40E_MAX_PKT_TYPE 256 +#define I40E_FLOW_TYPE_MAX 64 + +/* + * Structure to store private data for each PF/VF instance. + */ +struct i40e_adapter { + /* Common for both PF and VF */ + struct i40e_hw hw; + struct rte_eth_dev *eth_dev; + + /* Specific for PF or VF */ + union { + struct i40e_pf pf; + struct i40e_vf vf; + }; + + /* For vector PMD */ + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_simple_allowed; + bool tx_vec_allowed; + + /* For PTP */ + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; + + /* ptype mapping table */ + uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned; + /* flow type to pctype mapping table */ + uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned; + uint64_t flow_types_mask; + uint64_t pctypes_mask; + + /* For devargs */ + uint8_t use_latest_vec; + + /* For RSS reta table update */ + uint8_t rss_reta_updated; +}; + +/** + * Strucute to store private data for each VF representor instance + */ +struct i40e_vf_representor { + uint16_t switch_domain_id; + /**< Virtual Function ID */ + uint16_t vf_id; + /**< Virtual Function ID */ + struct i40e_adapter *adapter; + /**< Private data store of assocaiated physical function */ + struct i40e_eth_stats stats_offset; + /**< Zero-point of VF statistics*/ +}; + +extern const struct rte_flow_ops i40e_flow_ops; + +union i40e_filter_t { + struct rte_eth_ethertype_filter ethertype_filter; + struct i40e_fdir_filter_conf fdir_filter; + struct rte_eth_tunnel_filter_conf tunnel_filter; + struct i40e_tunnel_filter_conf consistent_tunnel_filter; + struct i40e_rte_flow_rss_conf rss_conf; +}; + +typedef int (*parse_filter_t)(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +struct i40e_valid_pattern { + enum rte_flow_item_type *items; + parse_filter_t parse_filter; +}; + +int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); +int i40e_vsi_release(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, + enum i40e_vsi_type type, + struct i40e_vsi *uplink_vsi, + uint16_t user_param); +int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on); +int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan); +int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan); +int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter); +int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr); +void i40e_update_vsi_stats(struct i40e_vsi *vsi); +void i40e_pf_disable_irq0(struct i40e_hw *hw); +void i40e_pf_enable_irq0(struct i40e_hw *hw); +int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); +void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx); +void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi); +int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, + struct i40e_vsi_vlan_pvid_info *info); +int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); +int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on); +uint64_t i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags); +uint64_t i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags); +enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf); +enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf); +int i40e_fdir_setup(struct i40e_pf *pf); +const struct rte_memzone *i40e_memzone_reserve(const char *name, + uint32_t len, + int socket_id); +int i40e_fdir_configure(struct rte_eth_dev *dev); +void i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on); +void i40e_fdir_teardown(struct i40e_pf *pf); +enum i40e_filter_pctype + i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, + uint16_t flow_type); +uint16_t i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, + enum i40e_filter_pctype pctype); +int i40e_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +int i40e_select_filter_input_set(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf, + enum rte_filter_type filter); +void i40e_fdir_filter_restore(struct i40e_pf *pf); +int i40e_hash_filter_inset_select(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf); +int i40e_fdir_filter_inset_select(struct i40e_pf *pf, + struct rte_eth_input_set_conf *conf); +int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode, + uint32_t retval, uint8_t *msg, + uint16_t msglen); +void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +struct i40e_ethertype_filter * +i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, + const struct i40e_ethertype_filter_input *input); +int i40e_sw_ethertype_filter_del(struct i40e_pf *pf, + struct i40e_ethertype_filter_input *input); +int i40e_sw_fdir_filter_del(struct i40e_pf *pf, + struct i40e_fdir_input *input); +struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, + const struct i40e_tunnel_filter_input *input); +int i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter_input *input); +uint64_t i40e_get_default_input_set(uint16_t pctype); +int i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add); +int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *filter, + bool add); +int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct i40e_fdir_filter_conf *filter, + bool add); +int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *tunnel_filter, + uint8_t add); +int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add); +int i40e_fdir_flush(struct rte_eth_dev *dev); +int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, struct rte_ether_addr *addr); +int i40e_remove_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total); +void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on); +int i40e_add_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total); +bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv); +bool is_i40e_supported(struct rte_eth_dev *dev); +bool is_i40evf_supported(struct rte_eth_dev *dev); + +int i40e_validate_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter, uint64_t inset); +int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, + uint8_t nb_elem); +uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input); +void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val); +void i40e_check_write_global_reg(struct i40e_hw *hw, + uint32_t addr, uint32_t val); + +int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops); +void i40e_tm_conf_init(struct rte_eth_dev *dev); +void i40e_tm_conf_uninit(struct rte_eth_dev *dev); +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index); +void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, + enum rte_pmd_i40e_package_op op); +int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); +int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); +void i40e_init_queue_region_conf(struct rte_eth_dev *dev); +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw); +int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len); +int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); +int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add); +int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev); + +#define I40E_DEV_TO_PCI(eth_dev) \ + RTE_DEV_TO_PCI((eth_dev)->device) + +/* I40E_DEV_PRIVATE_TO */ +#define I40E_DEV_PRIVATE_TO_PF(adapter) \ + (&((struct i40e_adapter *)adapter)->pf) +#define I40E_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct i40e_adapter *)adapter)->hw) +#define I40E_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct i40e_adapter *)adapter) + +/* I40EVF_DEV_PRIVATE_TO */ +#define I40EVF_DEV_PRIVATE_TO_VF(adapter) \ + (&((struct i40e_adapter *)adapter)->vf) + +static inline struct i40e_vsi * +i40e_get_vsi_from_adapter(struct i40e_adapter *adapter) +{ + struct i40e_hw *hw; + + if (!adapter) + return NULL; + + hw = I40E_DEV_PRIVATE_TO_HW(adapter); + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(adapter); + return &vf->vsi; + } else { + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(adapter); + return pf->main_vsi; + } +} +#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \ + i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter) + +/* I40E_VSI_TO */ +#define I40E_VSI_TO_HW(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->hw)) +#define I40E_VSI_TO_PF(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->pf)) +#define I40E_VSI_TO_VF(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->vf)) +#define I40E_VSI_TO_DEV_DATA(vsi) \ + (((struct i40e_vsi *)vsi)->adapter->pf.dev_data) +#define I40E_VSI_TO_ETH_DEV(vsi) \ + (((struct i40e_vsi *)vsi)->adapter->eth_dev) + +/* I40E_PF_TO */ +#define I40E_PF_TO_HW(pf) \ + (&(((struct i40e_pf *)pf)->adapter->hw)) +#define I40E_PF_TO_ADAPTER(pf) \ + ((struct i40e_adapter *)pf->adapter) + +/* I40E_VF_TO */ +#define I40E_VF_TO_HW(vf) \ + (&(((struct i40e_vf *)vf)->adapter->hw)) + +static inline void +i40e_init_adminq_parameter(struct i40e_hw *hw) +{ + hw->aq.num_arq_entries = I40E_AQ_LEN; + hw->aq.num_asq_entries = I40E_AQ_LEN; + hw->aq.arq_buf_size = I40E_AQ_BUF_SZ; + hw->aq.asq_buf_size = I40E_AQ_BUF_SZ; +} + +static inline int +i40e_align_floor(int n) +{ + if (n == 0) + return 0; + return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); +} + +static inline uint16_t +i40e_calc_itr_interval(bool is_pf, bool is_multi_drv) +{ + uint16_t interval = 0; + + if (is_multi_drv) { + interval = I40E_QUEUE_ITR_INTERVAL_MAX; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + else + interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT; + } + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +#define I40E_VALID_FLOW(flow_type) \ + ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER || \ + (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \ + (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD) + +#define I40E_VALID_PCTYPE_X722(pctype) \ + ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD) + +#define I40E_VALID_PCTYPE(pctype) \ + ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD) + +#define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \ + (((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_AOC) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_SR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_LR4)) + +#define I40E_PHY_TYPE_SUPPORT_25G(phy_type) \ + (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_AOC) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_ACC)) + +#endif /* _I40E_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c new file mode 100644 index 000000000..eca716a6a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c @@ -0,0 +1,2882 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" + +#include "i40e_rxtx.h" +#include "i40e_ethdev.h" +#include "i40e_pf.h" + +/* busy wait delay in msec */ +#define I40EVF_BUSY_WAIT_DELAY 10 +#define I40EVF_BUSY_WAIT_COUNT 50 +#define MAX_RESET_WAIT_CNT 20 + +#define I40EVF_ALARM_INTERVAL 50000 /* us */ + +struct i40evf_arq_msg_info { + enum virtchnl_ops ops; + enum i40e_status_code result; + uint16_t buf_len; + uint16_t msg_len; + uint8_t *msg; +}; + +struct vf_cmd_info { + enum virtchnl_ops ops; + uint8_t *in_args; + uint32_t in_args_size; + uint8_t *out_buffer; + /* Input & output type. pass in buffer size and pass out + * actual return result + */ + uint32_t out_size; +}; + +enum i40evf_aq_result { + I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + I40EVF_MSG_NON, /* Read nothing from admin queue */ + I40EVF_MSG_SYS, /* Read system msg from admin queue */ + I40EVF_MSG_CMD, /* Read async command result */ +}; + +static int i40evf_dev_configure(struct rte_eth_dev *dev); +static int i40evf_dev_start(struct rte_eth_dev *dev); +static void i40evf_dev_stop(struct rte_eth_dev *dev); +static int i40evf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int i40evf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int i40evf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); +static int i40evf_dev_xstats_reset(struct rte_eth_dev *dev); +static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void i40evf_dev_close(struct rte_eth_dev *dev); +static int i40evf_dev_reset(struct rte_eth_dev *dev); +static int i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40evf_init_vlan(struct rte_eth_dev *dev); +static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, + uint16_t tx_queue_id); +static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, + uint16_t tx_queue_id); +static int i40evf_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + uint32_t index, + uint32_t pool); +static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40evf_config_rss(struct i40e_vf *vf); +static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static void i40evf_handle_pf_event(struct rte_eth_dev *dev, + uint8_t *msg, + uint16_t msglen); + +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr, bool add); +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static void +i40evf_dev_alarm_handler(void *param); + +/* Default hash key buffer for RSS */ +static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; + +struct rte_i40evf_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = { + {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)}, + {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, + {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, + {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, + rx_unknown_protocol)}, + {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, + {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)}, + {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)}, +}; + +#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \ + sizeof(rte_i40evf_stats_strings[0])) + +static const struct eth_dev_ops i40evf_eth_dev_ops = { + .dev_configure = i40evf_dev_configure, + .dev_start = i40evf_dev_start, + .dev_stop = i40evf_dev_stop, + .promiscuous_enable = i40evf_dev_promiscuous_enable, + .promiscuous_disable = i40evf_dev_promiscuous_disable, + .allmulticast_enable = i40evf_dev_allmulticast_enable, + .allmulticast_disable = i40evf_dev_allmulticast_disable, + .link_update = i40evf_dev_link_update, + .stats_get = i40evf_dev_stats_get, + .stats_reset = i40evf_dev_xstats_reset, + .xstats_get = i40evf_dev_xstats_get, + .xstats_get_names = i40evf_dev_xstats_get_names, + .xstats_reset = i40evf_dev_xstats_reset, + .dev_close = i40evf_dev_close, + .dev_reset = i40evf_dev_reset, + .dev_infos_get = i40evf_dev_info_get, + .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, + .vlan_filter_set = i40evf_vlan_filter_set, + .vlan_offload_set = i40evf_vlan_offload_set, + .rx_queue_start = i40evf_dev_rx_queue_start, + .rx_queue_stop = i40evf_dev_rx_queue_stop, + .tx_queue_start = i40evf_dev_tx_queue_start, + .tx_queue_stop = i40evf_dev_tx_queue_stop, + .rx_queue_setup = i40e_dev_rx_queue_setup, + .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, + .tx_queue_setup = i40e_dev_tx_queue_setup, + .tx_queue_release = i40e_dev_tx_queue_release, + .rx_queue_count = i40e_dev_rx_queue_count, + .rxq_info_get = i40e_rxq_info_get, + .txq_info_get = i40e_txq_info_get, + .mac_addr_add = i40evf_add_mac_addr, + .mac_addr_remove = i40evf_del_mac_addr, + .set_mc_addr_list = i40evf_set_mc_addr_list, + .reta_update = i40evf_dev_rss_reta_update, + .reta_query = i40evf_dev_rss_reta_query, + .rss_hash_update = i40evf_dev_rss_hash_update, + .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, + .mtu_set = i40evf_dev_mtu_set, + .mac_addr_set = i40evf_set_default_mac_addr, + .tx_done_cleanup = i40e_tx_done_cleanup, +}; + +/* + * Read data in admin queue to get msg from pf driver + */ +static enum i40evf_aq_result +i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_arq_event_info event; + enum virtchnl_ops opcode; + enum i40e_status_code retval; + int ret; + enum i40evf_aq_result result = I40EVF_MSG_NON; + + event.buf_len = data->buf_len; + event.msg_buf = data->msg; + ret = i40e_clean_arq_element(hw, &event, NULL); + /* Can't read any msg from adminQ */ + if (ret) { + if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK) + result = I40EVF_MSG_ERR; + return result; + } + + opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); + retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low); + /* pf sys event */ + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + + result = I40EVF_MSG_SYS; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + vf->link_speed = + vpe->event_data.link_event.link_speed; + vf->pend_msg |= PFMSG_LINK_CHANGE; + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + vf->pend_msg |= PFMSG_RESET_IMPENDING; + PMD_DRV_LOG(INFO, "vf is reseting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + vf->pend_msg |= PFMSG_DRIVER_CLOSE; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = I40EVF_MSG_CMD; + /* Actual data length read from PF */ + data->msg_len = event.msg_len; + } + + data->result = retval; + data->ops = opcode; + + return result; +} + +/** + * clear current command. Only call in case execute + * _atomic_set_cmd successfully. + */ +static inline void +_clear_cmd(struct i40e_vf *vf) +{ + rte_wmb(); + vf->pend_cmd = VIRTCHNL_OP_UNKNOWN; +} + +/* + * Check there is pending cmd in execution. If none, set new command. + */ +static inline int +_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops) +{ + int ret = rte_atomic32_cmpset(&vf->pend_cmd, + VIRTCHNL_OP_UNKNOWN, ops); + + if (!ret) + PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); + + return !ret; +} + +#define MAX_TRY_TIMES 200 +#define ASQ_DELAY_MS 10 + +static int +i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40evf_arq_msg_info info; + enum i40evf_aq_result ret; + int err, i = 0; + + if (_atomic_set_cmd(vf, args->ops)) + return -1; + + info.msg = args->out_buffer; + info.buf_len = args->out_size; + info.ops = VIRTCHNL_OP_UNKNOWN; + info.result = I40E_SUCCESS; + + err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS, + args->in_args, args->in_args_size, NULL); + if (err) { + PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops); + _clear_cmd(vf); + return err; + } + + switch (args->ops) { + case VIRTCHNL_OP_RESET_VF: + /*no need to process in this function */ + err = 0; + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_GET_VF_RESOURCES: + /* for init adminq commands, need to poll the response */ + err = -1; + do { + ret = i40evf_read_pfmsg(dev, &info); + vf->cmd_retval = info.result; + if (ret == I40EVF_MSG_CMD) { + err = 0; + break; + } else if (ret == I40EVF_MSG_ERR) + break; + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + _clear_cmd(vf); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + /** + * ignore async reply, only wait for system message, + * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING, + * if not, means request queues failed. + */ + err = -1; + do { + ret = i40evf_read_pfmsg(dev, &info); + vf->cmd_retval = info.result; + if (ret == I40EVF_MSG_SYS && vf->vf_reset) { + err = 0; + break; + } else if (ret == I40EVF_MSG_ERR || + ret == I40EVF_MSG_CMD) { + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + _clear_cmd(vf); + break; + + default: + /* for other adminq in running time, waiting the cmd done flag */ + err = -1; + do { + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) { + err = 0; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + /* If there's no response is received, clear command */ + if (i >= MAX_TRY_TIMES) { + PMD_DRV_LOG(WARNING, "No response for %d", args->ops); + _clear_cmd(vf); + } + break; + } + + return err | vf->cmd_retval; +} + +/* + * Check API version with sync wait until version read or fail from admin queue + */ +static int +i40evf_check_api_version(struct rte_eth_dev *dev) +{ + struct virtchnl_version_info version, *pver; + int err; + struct vf_cmd_info args; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + version.major = VIRTCHNL_VERSION_MAJOR; + version.minor = VIRTCHNL_VERSION_MINOR; + + args.ops = VIRTCHNL_OP_VERSION; + args.in_args = (uint8_t *)&version; + args.in_args_size = sizeof(version); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION"); + return err; + } + + pver = (struct virtchnl_version_info *)args.out_buffer; + vf->version_major = pver->major; + vf->version_minor = pver->minor; + if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) + PMD_DRV_LOG(INFO, "Peer is Linux PF host"); + else { + PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)", + vf->version_major, vf->version_minor, + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); + return -1; + } + + return 0; +} + +static int +i40evf_get_vf_resource(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + uint32_t caps, len; + + args.ops = VIRTCHNL_OP_GET_VF_RESOURCES; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + if (PF_IS_V11(vf)) { + caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_AQ | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN | + VIRTCHNL_VF_OFFLOAD_RX_POLLING; + args.in_args = (uint8_t *)∩︀ + args.in_args_size = sizeof(caps); + } else { + args.in_args = NULL; + args.in_args_size = 0; + } + err = i40evf_execute_vf_cmd(dev, &args); + + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE"); + return err; + } + + len = sizeof(struct virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + + rte_memcpy(vf->vf_res, args.out_buffer, + RTE_MIN(args.out_size, len)); + i40e_vf_parse_hw_config(hw, vf->vf_res); + + return 0; +} + +static int +i40evf_config_promisc(struct rte_eth_dev *dev, + bool enable_unicast, + bool enable_multicast) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + struct virtchnl_promisc_info promisc; + + promisc.flags = 0; + promisc.vsi_id = vf->vsi_res->vsi_id; + + if (enable_unicast) + promisc.flags |= FLAG_VF_UNICAST_PROMISC; + + if (enable_multicast) + promisc.flags |= FLAG_VF_MULTICAST_PROMISC; + + args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + args.in_args = (uint8_t *)&promisc; + args.in_args_size = sizeof(promisc); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "CONFIG_PROMISCUOUS_MODE"); + return err; +} + +static int +i40evf_enable_vlan_strip(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct vf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + ret = i40evf_execute_vf_cmd(dev, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING"); + + return ret; +} + +static int +i40evf_disable_vlan_strip(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct vf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + ret = i40evf_execute_vf_cmd(dev, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING"); + + return ret; +} + +static void +i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info, + uint16_t vsi_id, + uint16_t queue_id, + uint16_t nb_txq, + struct i40e_tx_queue *txq) +{ + txq_info->vsi_id = vsi_id; + txq_info->queue_id = queue_id; + if (queue_id < nb_txq && txq) { + txq_info->ring_len = txq->nb_tx_desc; + txq_info->dma_ring_addr = txq->tx_ring_phys_addr; + } +} + +static void +i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info, + uint16_t vsi_id, + uint16_t queue_id, + uint16_t nb_rxq, + uint32_t max_pkt_size, + struct i40e_rx_queue *rxq) +{ + rxq_info->vsi_id = vsi_id; + rxq_info->queue_id = queue_id; + rxq_info->max_pkt_size = max_pkt_size; + if (queue_id < nb_rxq && rxq) { + rxq_info->ring_len = rxq->nb_rx_desc; + rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; + rxq_info->databuffer_size = + (rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + } +} + +static int +i40evf_configure_vsi_queues(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_rx_queue **rxq = + (struct i40e_rx_queue **)dev->data->rx_queues; + struct i40e_tx_queue **txq = + (struct i40e_tx_queue **)dev->data->tx_queues; + struct virtchnl_vsi_queue_config_info *vc_vqci; + struct virtchnl_queue_pair_info *vc_qpi; + struct vf_cmd_info args; + uint16_t i, nb_qp = vf->num_queue_pairs; + const uint32_t size = + I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp); + uint8_t buff[size]; + int ret; + + memset(buff, 0, sizeof(buff)); + vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff; + vc_vqci->vsi_id = vf->vsi_res->vsi_id; + vc_vqci->num_queue_pairs = nb_qp; + + for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) { + i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq, + vc_vqci->vsi_id, i, dev->data->nb_tx_queues, + txq ? txq[i] : NULL); + i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq, + vc_vqci->vsi_id, i, dev->data->nb_rx_queues, + vf->max_pkt_len, rxq ? rxq[i] : NULL); + } + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.in_args = (uint8_t *)vc_vqci; + args.in_args_size = size; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + ret = i40evf_execute_vf_cmd(dev, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + return ret; +} + +static int +i40evf_config_irq_map(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct vf_cmd_info args; + uint8_t *cmd_buffer = NULL; + struct virtchnl_irq_map_info *map_info; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec, cmd_buffer_size, max_vectors, nb_msix, msix_base, i; + uint16_t rxq_map[vf->vf_res->max_vectors]; + int err; + + memset(rxq_map, 0, sizeof(rxq_map)); + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_allow_others(intr_handle)) { + msix_base = I40E_RX_VEC_START; + /* For interrupt mode, available vector id is from 1. */ + max_vectors = vf->vf_res->max_vectors - 1; + nb_msix = RTE_MIN(max_vectors, intr_handle->nb_efd); + + vec = msix_base; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->vf_res->max_vectors) + vec = msix_base; + } + } else { + msix_base = I40E_MISC_VEC_ID; + nb_msix = 1; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq_map[msix_base] |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = msix_base; + } + } + + cmd_buffer_size = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * nb_msix; + cmd_buffer = rte_zmalloc("i40e", cmd_buffer_size, 0); + if (!cmd_buffer) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + map_info = (struct virtchnl_irq_map_info *)cmd_buffer; + map_info->num_vectors = nb_msix; + for (i = 0; i < nb_msix; i++) { + map_info->vecmap[i].rxitr_idx = I40E_ITR_INDEX_DEFAULT; + map_info->vecmap[i].vsi_id = vf->vsi_res->vsi_id; + map_info->vecmap[i].vector_id = msix_base + i; + map_info->vecmap[i].txq_map = 0; + map_info->vecmap[i].rxq_map = rxq_map[msix_base + i]; + } + + args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.in_args = (u8 *)cmd_buffer; + args.in_args_size = cmd_buffer_size; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); + + rte_free(cmd_buffer); + + return err; +} + +static int +i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid, + bool on) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct virtchnl_queue_select queue_select; + int err; + struct vf_cmd_info args; + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + if (isrx) + queue_select.rx_queues |= 1 << qid; + else + queue_select.tx_queues |= 1 << qid; + + if (on) + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; + else + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to switch %s %u %s", + isrx ? "RX" : "TX", qid, on ? "on" : "off"); + + return err; +} + +static int +i40evf_start_queues(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + int i; + struct i40e_rx_queue *rxq; + struct i40e_tx_queue *txq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev_data->rx_queues[i]; + if (rxq->rx_deferred_start) + continue; + if (i40evf_dev_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev_data->tx_queues[i]; + if (txq->tx_deferred_start) + continue; + if (i40evf_dev_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + return 0; +} + +static int +i40evf_stop_queues(struct rte_eth_dev *dev) +{ + int i; + + /* Stop TX queues first */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i40evf_dev_tx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); + } + } + + /* Then stop RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i40evf_dev_rx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); + } + } + + return 0; +} + +static int +i40evf_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \ + sizeof(struct virtchnl_ether_addr)]; + int err; + struct vf_cmd_info args; + + if (rte_is_zero_ether_addr(addr)) { + PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + return I40E_ERR_INVALID_MAC_ADDR; + } + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = 1; + rte_memcpy(list->list[0].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + + args.ops = VIRTCHNL_OP_ADD_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "OP_ADD_ETHER_ADDRESS"); + else + vf->vsi.mac_num++; + + return err; +} + +static void +i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \ + sizeof(struct virtchnl_ether_addr)]; + int err; + struct vf_cmd_info args; + + if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + return; + } + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = 1; + rte_memcpy(list->list[0].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + + args.ops = VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "OP_DEL_ETHER_ADDRESS"); + else + vf->vsi.mac_num--; + return; +} + +static void +i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_ether_addr *addr; + + addr = &data->mac_addrs[index]; + + i40evf_del_mac_addr_by_addr(dev, addr); +} + +static int +i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct virtchnl_queue_select q_stats; + int err; + struct vf_cmd_info args; + + memset(&q_stats, 0, sizeof(q_stats)); + q_stats.vsi_id = vf->vsi_res->vsi_id; + args.ops = VIRTCHNL_OP_GET_STATS; + args.in_args = (u8 *)&q_stats; + args.in_args_size = sizeof(q_stats); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); + *pstats = NULL; + return err; + } + *pstats = (struct i40e_eth_stats *)args.out_buffer; + return 0; +} + +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static void +i40evf_update_stats(struct i40e_vsi *vsi, + struct i40e_eth_stats *nes) +{ + struct i40e_eth_stats *oes = &vsi->eth_stats_offset; + + i40evf_stat_update_48(&oes->rx_bytes, + &nes->rx_bytes); + i40evf_stat_update_48(&oes->rx_unicast, + &nes->rx_unicast); + i40evf_stat_update_48(&oes->rx_multicast, + &nes->rx_multicast); + i40evf_stat_update_48(&oes->rx_broadcast, + &nes->rx_broadcast); + i40evf_stat_update_32(&oes->rx_discards, + &nes->rx_discards); + i40evf_stat_update_32(&oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + i40evf_stat_update_48(&oes->tx_bytes, + &nes->tx_bytes); + i40evf_stat_update_48(&oes->tx_unicast, + &nes->tx_unicast); + i40evf_stat_update_48(&oes->tx_multicast, + &nes->tx_multicast); + i40evf_stat_update_48(&oes->tx_broadcast, + &nes->tx_broadcast); + i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors); + i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards); +} + +static int +i40evf_dev_xstats_reset(struct rte_eth_dev *dev) +{ + int ret; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_eth_stats *pstats = NULL; + + /* read stat values to clear hardware registers */ + ret = i40evf_query_stats(dev, &pstats); + + /* set stats offset base on current values */ + if (ret == 0) + vf->vsi.eth_stats_offset = *pstats; + + return ret; +} + +static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < I40EVF_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_i40evf_stats_strings[i].name); + } + return I40EVF_NB_XSTATS; +} + +static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n) +{ + int ret; + unsigned i; + struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; + + if (n < I40EVF_NB_XSTATS) + return I40EVF_NB_XSTATS; + + ret = i40evf_query_stats(dev, &pstats); + if (ret != 0) + return 0; + + if (!xstats) + return 0; + + i40evf_update_stats(vsi, pstats); + + /* loop over xstats array and values from pstats */ + for (i = 0; i < I40EVF_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = *(uint64_t *)(((char *)pstats) + + rte_i40evf_stats_strings[i].offset); + } + + return I40EVF_NB_XSTATS; +} + +static int +i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + int err; + struct vf_cmd_info args; + + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = vf->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + args.ops = VIRTCHNL_OP_ADD_VLAN; + args.in_args = (u8 *)&cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN"); + + return err; +} + +static int +i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct virtchnl_vf_res_request vfres; + struct vf_cmd_info args; + int err; + + vfres.num_queue_pairs = num; + + args.ops = VIRTCHNL_OP_REQUEST_QUEUES; + args.in_args = (u8 *)&vfres; + args.in_args_size = sizeof(vfres); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, dev); + return err; +} + +static int +i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + int err; + struct vf_cmd_info args; + + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = vf->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + args.ops = VIRTCHNL_OP_DEL_VLAN; + args.in_args = (u8 *)&cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN"); + + return err; +} + +static const struct rte_pci_id pci_id_i40evf_map[] = { + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +/* Disable IRQ0 */ +static inline void +i40evf_disable_irq0(struct i40e_hw *hw) +{ + /* Disable all interrupt types */ + I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0); + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); +} + +/* Enable IRQ0 */ +static inline void +i40evf_enable_irq0(struct i40e_hw *hw) +{ + /* Enable admin queue interrupt trigger */ + uint32_t val; + + i40evf_disable_irq0(hw); + val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1); + val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK | + I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK; + I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val); + + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_check_vf_reset_done(struct rte_eth_dev *dev) +{ + int i, reset; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + for (i = 0; i < MAX_RESET_WAIT_CNT; i++) { + reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; + if (reset == VIRTCHNL_VFR_VFACTIVE || + reset == VIRTCHNL_VFR_COMPLETED) + break; + rte_delay_ms(50); + } + + if (i >= MAX_RESET_WAIT_CNT) + return -1; + + vf->vf_reset = false; + vf->pend_msg &= ~PFMSG_RESET_IMPENDING; + + return 0; +} +static int +i40evf_reset_vf(struct rte_eth_dev *dev) +{ + int ret; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (i40e_vf_reset(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Reset VF NIC failed"); + return -1; + } + /** + * After issuing vf reset command to pf, pf won't necessarily + * reset vf, it depends on what state it exactly is. If it's not + * initialized yet, it won't have vf reset since it's in a certain + * state. If not, it will try to reset. Even vf is reset, pf will + * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set + * it to ACTIVE. In this duration, vf may not catch the moment that + * COMPLETE is set. So, for vf, we'll try to wait a long time. + */ + rte_delay_ms(200); + + ret = i40evf_check_vf_reset_done(dev); + if (ret) { + PMD_INIT_LOG(ERR, "VF is still resetting"); + return ret; + } + + return 0; +} + +static int +i40evf_init_vf(struct rte_eth_dev *dev) +{ + int i, err, bufsz; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(0, 0); + + vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + vf->dev_data = dev->data; + err = i40e_set_mac_type(hw); + if (err) { + PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); + goto err; + } + + err = i40evf_check_vf_reset_done(dev); + if (err) + goto err; + + i40e_init_adminq_parameter(hw); + err = i40e_init_adminq(hw); + if (err) { + PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); + goto err; + } + + /* Reset VF and wait until it's complete */ + if (i40evf_reset_vf(dev)) { + PMD_INIT_LOG(ERR, "reset NIC failed"); + goto err_aq; + } + + /* VF reset, shutdown admin queue and initialize again */ + if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed"); + goto err; + } + + i40e_init_adminq_parameter(hw); + if (i40e_init_adminq(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "init_adminq failed"); + goto err; + } + + vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0); + if (!vf->aq_resp) { + PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); + goto err_aq; + } + if (i40evf_check_api_version(dev) != 0) { + PMD_INIT_LOG(ERR, "check_api version failed"); + goto err_api; + } + bufsz = sizeof(struct virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); + vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); + if (!vf->vf_res) { + PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); + goto err_api; + } + + if (i40evf_get_vf_resource(dev) != 0) { + PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed"); + goto err_alloc; + } + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < vf->vf_res->num_vsis; i++) { + if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + vf->vsi_res = &vf->vf_res->vsi_res[i]; + } + + if (!vf->vsi_res) { + PMD_INIT_LOG(ERR, "no LAN VSI found"); + goto err_alloc; + } + + if (hw->mac.type == I40E_MAC_X722_VF) + vf->flags = I40E_FLAG_RSS_AQ_CAPABLE; + vf->vsi.vsi_id = vf->vsi_res->vsi_id; + + switch (vf->vsi_res->vsi_type) { + case VIRTCHNL_VSI_SRIOV: + vf->vsi.type = I40E_VSI_SRIOV; + break; + default: + vf->vsi.type = I40E_VSI_TYPE_UNKNOWN; + break; + } + vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; + vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Store the MAC address configured by host, or generate random one */ + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)hw->mac.addr)) + rte_eth_random_addr(hw->mac.addr); /* Generate a random one */ + + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + (I40E_ITR_INDEX_DEFAULT << + I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40EVF_WRITE_FLUSH(hw); + + return 0; + +err_alloc: + rte_free(vf->vf_res); + vf->vsi_res = NULL; +err_api: + rte_free(vf->aq_resp); +err_aq: + i40e_shutdown_adminq(hw); /* ignore error */ +err: + return -1; +} + +static int +i40evf_uninit_vf(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (hw->adapter_closed == 0) + i40evf_dev_close(dev); + + return 0; +} + +static void +i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, + __rte_unused uint16_t msglen) +{ + struct virtchnl_pf_event *pf_msg = + (struct virtchnl_pf_event *)msg; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + switch (pf_msg->event) { + case VIRTCHNL_EVENT_RESET_IMPENDING: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + break; + case VIRTCHNL_EVENT_LINK_CHANGE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); + vf->link_up = pf_msg->event_data.link_event.link_status; + vf->link_speed = pf_msg->event_data.link_event.link_speed; + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); + break; + default: + PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event); + break; + } +} + +static void +i40evf_handle_aq_msg(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_arq_event_info info; + uint16_t pending, aq_opc; + enum virtchnl_ops msg_opc; + enum i40e_status_code msg_ret; + int ret; + + info.buf_len = I40E_AQ_BUF_SZ; + if (!vf->aq_resp) { + PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL"); + return; + } + info.msg_buf = vf->aq_resp; + + pending = 1; + while (pending) { + ret = i40e_clean_arq_element(hw, &info, &pending); + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ," + "ret: %d", ret); + break; + } + aq_opc = rte_le_to_cpu_16(info.desc.opcode); + /* For the message sent from pf to vf, opcode is stored in + * cookie_high of struct i40e_aq_desc, while return error code + * are stored in cookie_low, Which is done by + * i40e_aq_send_msg_to_vf in PF driver.*/ + msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32( + info.desc.cookie_high); + msg_ret = (enum i40e_status_code)rte_le_to_cpu_32( + info.desc.cookie_low); + switch (aq_opc) { + case i40e_aqc_opc_send_msg_to_vf: + if (msg_opc == VIRTCHNL_OP_EVENT) + /* process event*/ + i40evf_handle_pf_event(dev, info.msg_buf, + info.msg_len); + else { + /* read message and it's expected one */ + if (msg_opc == vf->pend_cmd) { + vf->cmd_retval = msg_ret; + /* prevent compiler reordering */ + rte_compiler_barrier(); + _clear_cmd(vf); + } else + PMD_DRV_LOG(ERR, "command mismatch," + "expect %u, get %u", + vf->pend_cmd, msg_opc); + PMD_DRV_LOG(DEBUG, "adminq response is received," + " opcode = %d", msg_opc); + } + break; + default: + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", + aq_opc); + break; + } + } +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. Only adminq interrupt is processed in VF. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +i40evf_dev_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + i40evf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) + goto done; + + if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) { + PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported"); + i40evf_handle_aq_msg(dev); + } + + /* Link Status Change interrupt */ + if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK) + PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported," + " do nothing"); + +done: + i40evf_enable_irq0(hw); + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, dev); +} + +static int +i40evf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct i40e_hw *hw + = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + PMD_INIT_FUNC_TRACE(); + + /* assign ops func pointer */ + eth_dev->dev_ops = &i40evf_eth_dev_ops; + eth_dev->rx_pkt_burst = &i40e_recv_pkts; + eth_dev->tx_pkt_burst = &i40e_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + i40e_set_rx_function(eth_dev); + i40e_set_tx_function(eth_dev); + return 0; + } + i40e_set_default_ptype_table(eth_dev); + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->adapter_stopped = 0; + hw->adapter_closed = 0; + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + if(i40evf_init_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "Init vf failed"); + return -1; + } + + i40e_set_default_pctype_table(eth_dev); + rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, + i40evf_dev_alarm_handler, eth_dev); + + /* configure and enable device interrupt */ + i40evf_enable_irq0(hw); + + /* copy mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac", + RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX, + 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" + " store MAC addresses", + RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX); + return -ENOMEM; + } + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + + return 0; +} + +static int +i40evf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + if (i40evf_uninit_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed"); + return -1; + } + + return 0; +} + +static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct i40e_adapter), i40evf_dev_init); +} + +static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit); +} + +/* + * virtual function driver struct + */ +static struct rte_pci_driver rte_i40evf_pmd = { + .id_table = pci_id_i40evf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_i40evf_pci_probe, + .remove = eth_i40evf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci"); + +static int +i40evf_dev_configure(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + + if (num_queue_pairs > vf->vsi_res->num_queue_pairs) { + int ret = 0; + + PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", + vf->vsi_res->num_queue_pairs, num_queue_pairs); + ret = i40evf_request_queues(dev, num_queue_pairs); + if (ret != 0) + return ret; + + ret = i40evf_dev_reset(dev); + if (ret != 0) + return ret; + } + + return i40evf_init_vlan(dev); +} + +static int +i40evf_init_vlan(struct rte_eth_dev *dev) +{ + /* Apply vlan offload setting */ + i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + + return 0; +} + +static int +i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + /* Vlan stripping setting */ + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + i40evf_enable_vlan_strip(dev); + else + i40evf_disable_vlan_strip(dev); + } + + return 0; +} + +static int +i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + I40EVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + return err; + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + int err; + + PMD_INIT_FUNC_TRACE(); + + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_tx_queue *txq; + int err; + + txq = dev->data->tx_queues[tx_queue_id]; + + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int ret; + + if (on) + ret = i40evf_add_vlan(dev, vlan_id); + else + ret = i40evf_del_vlan(dev,vlan_id); + + return ret; +} + +static int +i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = dev->data; + struct rte_pktmbuf_pool_private *mbp_priv; + uint16_t buf_size, len; + + rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + I40EVF_WRITE_FLUSH(hw); + + /* Calculate the maximum packet length allowed */ + mbp_priv = rte_mempool_get_priv(rxq->mp); + buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + rxq->hs_mode = i40e_header_split_none; + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS; + rxq->max_pkt_len = RTE_MIN(len, + dev_data->dev_conf.rxmode.max_rx_pkt_len); + + /** + * Check if the jumbo frame and maximum packet length are set correctly + */ + if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " + "frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " + "frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return I40E_ERR_CONFIG; + } + } + + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + rxq->max_pkt_len > buf_size) + dev_data->scattered_rx = 1; + + return 0; +} + +static int +i40evf_rx_init(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t i; + int ret = I40E_SUCCESS; + struct i40e_rx_queue **rxq = + (struct i40e_rx_queue **)dev->data->rx_queues; + + i40evf_config_rss(vf); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = i40evf_rxq_init(dev, rxq[i]); + if (ret != I40E_SUCCESS) + break; + } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(dev); + + return ret; +} + +static void +i40evf_tx_init(struct rte_eth_dev *dev) +{ + uint16_t i; + struct i40e_tx_queue **txq = + (struct i40e_tx_queue **)dev->data->tx_queues; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) + txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i); + + i40e_set_tx_function(dev); +} + +static inline void +i40evf_enable_queues_intr(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); + return; + } + + I40EVF_WRITE_FLUSH(hw); +} + +static inline void +i40evf_disable_queues_intr(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); + return; + } + + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(0, 0); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); + + I40EVF_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + 0); + + I40EVF_WRITE_FLUSH(hw); + + return 0; +} + +static void +i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err, i, j; + int next_begin = 0; + int begin = 0; + uint32_t len; + struct rte_ether_addr *addr; + struct vf_cmd_info args; + + do { + j = 0; + len = sizeof(struct virtchnl_ether_addr_list); + for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) { + if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) + continue; + len += sizeof(struct virtchnl_ether_addr); + if (len >= I40E_AQ_BUF_SZ) { + next_begin = i + 1; + break; + } + } + + list = rte_zmalloc("i40evf_del_mac_buffer", len, 0); + if (!list) { + PMD_DRV_LOG(ERR, "fail to allocate memory"); + return; + } + + for (i = begin; i < next_begin; i++) { + addr = &dev->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; + rte_memcpy(list->list[j].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + j++; + } + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = j; + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : + VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = (uint8_t *)list; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : + "OP_DEL_ETHER_ADDRESS"); + } else { + if (add) + vf->vsi.mac_num++; + else + vf->vsi.mac_num--; + } + rte_free(list); + begin = next_begin; + } while (begin < I40E_NUM_MACADDR_MAX); +} + +static int +i40evf_dev_start(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_stopped = 0; + + vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + if (i40evf_rx_init(dev) != 0){ + PMD_DRV_LOG(ERR, "failed to do RX init"); + return -1; + } + + i40evf_tx_init(dev); + + if (i40evf_configure_vsi_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + if (i40evf_config_irq_map(dev)) { + PMD_DRV_LOG(ERR, "config_irq_map failed"); + goto err_queue; + } + + /* Set all mac addrs */ + i40evf_add_del_all_mac_addr(dev, TRUE); + /* Set all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + TRUE); + + if (i40evf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); + goto err_mac; + } + + /* only enable interrupt in rx interrupt mode */ + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_enable(intr_handle); + + i40evf_enable_queues_intr(dev); + + return 0; + +err_mac: + i40evf_add_del_all_mac_addr(dev, FALSE); + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); +err_queue: + return -1; +} + +static void +i40evf_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); + + if (hw->adapter_stopped == 1) + return; + i40evf_stop_queues(dev); + i40evf_disable_queues_intr(dev); + i40e_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + /* remove all mac addrs */ + i40evf_add_del_all_mac_addr(dev, FALSE); + /* remove all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); + hw->adapter_stopped = 1; + +} + +static int +i40evf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct rte_eth_link new_link; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + /* + * DPDK pf host provide interfacet to acquire link status + * while Linux driver does not + */ + + memset(&new_link, 0, sizeof(new_link)); + /* Linux driver PF host */ + switch (vf->link_speed) { + case I40E_LINK_SPEED_100MB: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_LINK_SPEED_1GB: + new_link.link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_LINK_SPEED_10GB: + new_link.link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_LINK_SPEED_20GB: + new_link.link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_LINK_SPEED_25GB: + new_link.link_speed = ETH_SPEED_NUM_25G; + break; + case I40E_LINK_SPEED_40GB: + new_link.link_speed = ETH_SPEED_NUM_40G; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + /* full duplex only */ + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_status = vf->link_up && + new_link.link_speed != ETH_SPEED_NUM_NONE + ? ETH_LINK_UP + : ETH_LINK_DOWN; + new_link.link_autoneg = + !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); + + return rte_eth_linkstatus_set(dev, &new_link); +} + +static int +i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); + if (ret == 0) + vf->promisc_unicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; +} + +static int +i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); + if (ret == 0) + vf->promisc_unicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; +} + +static int +i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); + if (ret == 0) + vf->promisc_multicast_enabled = TRUE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; +} + +static int +i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); + if (ret == 0) + vf->promisc_multicast_enabled = FALSE; + else if (ret == I40E_NOT_SUPPORTED) + ret = -ENOTSUP; + else + ret = -EAGAIN; + + return ret; +} + +static int +i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF; + dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF; + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_64; + dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask; + dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_queue_offload_capa = 0; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER; + + dev_info->tx_queue_offload_capa = 0; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + return 0; +} + +static int +i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + int ret; + struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; + + ret = i40evf_query_stats(dev, &pstats); + if (ret == 0) { + i40evf_update_stats(vsi, pstats); + + stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + + pstats->rx_broadcast; + stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + + pstats->tx_unicast; + stats->imissed = pstats->rx_discards; + stats->oerrors = pstats->tx_errors + pstats->tx_discards; + stats->ibytes = pstats->rx_bytes; + stats->obytes = pstats->tx_bytes; + } else { + PMD_DRV_LOG(ERR, "Get statistics failed"); + } + return ret; +} + +static void +i40evf_dev_close(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + i40evf_dev_stop(dev); + i40e_dev_free_queues(dev); + /* + * disable promiscuous mode before reset vf + * it is a workaround solution when work with kernel driver + * and it is not the normal way + */ + i40evf_dev_promiscuous_disable(dev); + i40evf_dev_allmulticast_disable(dev); + rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev); + + i40evf_reset_vf(dev); + i40e_shutdown_adminq(hw); + i40evf_disable_irq0(hw); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(vf->vf_res); + vf->vf_res = NULL; + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + + hw->adapter_closed = 1; +} + +/* + * Reset VF device only to re-initialize resources in PMD layer + */ +static int +i40evf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = i40evf_dev_uninit(dev); + if (ret) + return ret; + + ret = i40evf_dev_init(dev); + + return ret; +} + +static int +i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!lut) + return -EINVAL; + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i)); + } + + return 0; +} + +static int +i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_vf *vf; + struct i40e_hw *hw; + int ret; + + if (!vsi || !lut) + return -EINVAL; + + vf = I40E_VSI_TO_VF(vsi); + hw = I40E_VSI_TO_HW(vsi); + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]); + I40EVF_WRITE_FLUSH(hw); + } + + return 0; +} + +static int +i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t *lut; + uint16_t i, idx, shift; + int ret; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size); + +out: + rte_free(lut); + + return ret; +} + +static int +i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t i, idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = lut[i]; + } + +out: + rte_free(lut); + + return ret; +} + +static int +i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); + return -EINVAL; + } + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + struct i40e_aqc_get_set_rss_key_data *key_dw = + (struct i40e_aqc_get_set_rss_key_data *)key; + + ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS key " + "via AQ"); + } else { + uint32_t *hash_key = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]); + I40EVF_WRITE_FLUSH(hw); + } + + return ret; +} + +static int +i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!key || !key_len) + return -EINVAL; + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, + (struct i40e_aqc_get_set_rss_key_data *)key); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ"); + return ret; + } + } else { + uint32_t *key_dw = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i)); + } + *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + + return 0; +} + +static int +i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + uint64_t hena; + int ret; + + ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key, + rss_conf->rss_key_len); + if (ret) + return ret; + + hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); + I40EVF_WRITE_FLUSH(hw); + + return 0; +} + +static void +i40evf_disable_rss(struct i40e_vf *vf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0); + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_config_rss(struct i40e_vf *vf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + struct rte_eth_rss_conf rss_conf; + uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + uint32_t rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4; + uint16_t num; + uint8_t *lut_info; + int ret; + + if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + i40evf_disable_rss(vf); + PMD_DRV_LOG(DEBUG, "RSS not configured"); + return 0; + } + + num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF); + /* Fill out the look up table */ + if (!(vf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) { + for (i = 0, j = 0; i < nb_q; i++, j++) { + if (j >= num) + j = 0; + lut = (lut << 8) | j; + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); + } + } else { + lut_info = rte_zmalloc("i40e_rss_lut", rss_lut_size, 0); + if (!lut_info) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + for (i = 0; i < rss_lut_size; i++) + lut_info[i] = i % vf->num_queue_pairs; + + ret = i40evf_set_rss_lut(&vf->vsi, lut_info, + rss_lut_size); + rte_free(lut_info); + if (ret) + return ret; + } + + rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) { + i40evf_disable_rss(vf); + PMD_DRV_LOG(DEBUG, "No hash flag is set"); + return 0; + } + + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Calculate the default hash key */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + rss_key_default[i] = (uint32_t)rte_rand(); + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + return i40evf_hw_rss_hash_set(vf, &rss_conf); +} + +static int +i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask; + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + + if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -EINVAL; + return 0; + } + + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -EINVAL; + + return i40evf_hw_rss_hash_set(vf, rss_conf); +} + +static int +i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t hena; + + i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key, + &rss_conf->rss_key_len); + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena); + + return 0; +} + +static int +i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = vf->dev_data; + uint32_t frame_size = mtu + I40E_ETH_OVERHEAD; + int ret = 0; + + /* check if mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + if (dev_data->dev_started) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev_data->port_id); + return -EBUSY; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return ret; +} + +static int +i40evf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); + return -EINVAL; + } + + i40evf_del_mac_addr_by_addr(dev, (struct rte_ether_addr *)hw->mac.addr); + + if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0) + return -EIO; + + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr); + return 0; +} + +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + + (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))]; + uint32_t i; + int err; + struct vf_cmd_info args; + + if (mc_addrs == NULL || mc_addrs_num == 0) + return 0; + + if (mc_addrs_num > I40E_NUM_MACADDR_MAX) + return -EINVAL; + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = mc_addrs_num; + + for (i = 0; i < mc_addrs_num; i++) { + if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) { + PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", + mc_addrs[i].addr_bytes[0], + mc_addrs[i].addr_bytes[1], + mc_addrs[i].addr_bytes[2], + mc_addrs[i].addr_bytes[3], + mc_addrs[i].addr_bytes[4], + mc_addrs[i].addr_bytes[5]); + return -EINVAL; + } + + memcpy(list->list[i].addr, mc_addrs[i].addr_bytes, + sizeof(list->list[i].addr)); + } + + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(struct virtchnl_ether_addr_list) + + i * sizeof(struct virtchnl_ether_addr); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); + return err; + } + + return 0; +} + +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + + /* flush previous addresses */ + err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); + if (err) + return err; + + vf->mc_addrs_num = 0; + + /* add new ones */ + err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num, + TRUE); + if (err) + return err; + + vf->mc_addrs_num = mc_addrs_num; + memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); + + return 0; +} + +bool +is_i40evf_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_i40evf_pmd); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c b/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c new file mode 100644 index 000000000..d59399afe --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_fdir.c @@ -0,0 +1,2339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_type.h" +#include "base/i40e_prototype.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE" +#ifndef IPV6_ADDR_LEN +#define IPV6_ADDR_LEN 16 +#endif + +#ifndef IPPROTO_L2TP +#define IPPROTO_L2TP 115 +#endif + +#define I40E_FDIR_PKT_LEN 512 +#define I40E_FDIR_IP_DEFAULT_LEN 420 +#define I40E_FDIR_IP_DEFAULT_TTL 0x40 +#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45 +#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50 +#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000 + +#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF +#define I40E_FDIR_IPv6_PAYLOAD_LEN 380 +#define I40E_FDIR_UDP_DEFAULT_LEN 400 +#define I40E_FDIR_GTP_DEFAULT_LEN 384 +#define I40E_FDIR_INNER_IP_DEFAULT_LEN 384 +#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344 + +#define I40E_FDIR_GTPC_DST_PORT 2123 +#define I40E_FDIR_GTPU_DST_PORT 2152 +#define I40E_FDIR_GTP_VER_FLAG_0X30 0x30 +#define I40E_FDIR_GTP_VER_FLAG_0X32 0x32 +#define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01 +#define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF + +#define I40E_FDIR_ESP_DST_PORT 4500 + +/* Wait time for fdir filter programming */ +#define I40E_FDIR_MAX_WAIT_US 10000 + +/* Wait count and interval for fdir filter flush */ +#define I40E_FDIR_FLUSH_RETRY 50 +#define I40E_FDIR_FLUSH_INTERVAL_MS 5 + +#define I40E_COUNTER_PF 2 +/* Statistic counter index for one pf */ +#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) + +#define I40E_FDIR_FLOWS ( \ + (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)) + +static int i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_filter *filter, + bool add); +static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input, + struct i40e_fdir_filter *filter); +static struct i40e_fdir_filter * +i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info, + const struct i40e_fdir_input *input); +static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf, + struct i40e_fdir_filter *filter); +static int +i40e_flow_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct i40e_fdir_filter_conf *filter, + bool add); + +static int +i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct i40e_hmc_obj_rxq rx_ctx; + int err = I40E_SUCCESS; + + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + /* Init the RX queue in hardware */ + rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = 0; + rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + rx_ctx.dtype = i40e_header_split_none; + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.rxmax = RTE_ETHER_MAX_LEN; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 0; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 0; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context."); + return err; + } + err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context."); + return err; + } + rxq->qrx_tail = hw->hw_addr + + I40E_QRX_TAIL(rxq->vsi->base_queue); + + rte_wmb(); + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return err; +} + +/* + * i40e_fdir_setup - reserve and initialize the Flow Director resources + * @pf: board private structure + */ +int +i40e_fdir_setup(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + int err = I40E_SUCCESS; + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + + if ((pf->flags & I40E_FLAG_FDIR) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); + return I40E_NOT_SUPPORTED; + } + + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u," + " num_filters_best_effort = %u.", + hw->func_caps.fd_filters_guaranteed, + hw->func_caps.fd_filters_best_effort); + + vsi = pf->fdir.fdir_vsi; + if (vsi) { + PMD_DRV_LOG(INFO, "FDIR initialization has been done."); + return I40E_SUCCESS; + } + /* make new FDIR VSI */ + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + pf->fdir.fdir_vsi = vsi; + + /*Fdir tx queue setup*/ + err = i40e_fdir_setup_tx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); + goto fail_setup_tx; + } + + /*Fdir rx queue setup*/ + err = i40e_fdir_setup_rx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); + goto fail_setup_rx; + } + + err = i40e_tx_queue_init(pf->fdir.txq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization."); + goto fail_mem; + } + + /* need switch on before dev start*/ + err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on."); + goto fail_mem; + } + + /* Init the rx queue in hardware */ + err = i40e_fdir_rx_queue_init(pf->fdir.rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization."); + goto fail_mem; + } + + /* switch on rx queue */ + err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on."); + goto fail_mem; + } + + /* reserve memory for the fdir programming packet */ + snprintf(z_name, sizeof(z_name), "%s_%s_%d", + eth_dev->device->driver->name, + I40E_FDIR_MZ_NAME, + eth_dev->data->port_id); + mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY); + if (!mz) { + PMD_DRV_LOG(ERR, "Cannot init memzone for " + "flow director program packet."); + err = I40E_ERR_NO_MEMORY; + goto fail_mem; + } + pf->fdir.prg_pkt = mz->addr; + pf->fdir.dma_addr = mz->iova; + + pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id); + PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", + vsi->base_queue); + return I40E_SUCCESS; + +fail_mem: + i40e_dev_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; +fail_setup_rx: + i40e_dev_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; +fail_setup_tx: + i40e_vsi_release(vsi); + pf->fdir.fdir_vsi = NULL; + return err; +} + +/* + * i40e_fdir_teardown - release the Flow Director resources + * @pf: board private structure + */ +void +i40e_fdir_teardown(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + + vsi = pf->fdir.fdir_vsi; + if (!vsi) + return; + int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE); + if (err) + PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off"); + err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE); + if (err) + PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off"); + i40e_dev_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; + i40e_dev_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; + i40e_vsi_release(vsi); + pf->fdir.fdir_vsi = NULL; +} + +/* check whether the flow director table in empty */ +static inline int +i40e_fdir_empty(struct i40e_hw *hw) +{ + uint32_t guarant_cnt, best_cnt; + + guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + if (best_cnt + guarant_cnt > 0) + return -1; + + return 0; +} + +/* + * Initialize the configuration about bytes stream extracted as flexible payload + * and mask setting + */ +static inline void +i40e_init_flx_pld(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint8_t pctype; + int i, index; + uint16_t flow_type; + + /* + * Define the bytes stream extracted as flexible payload in + * field vector. By default, select 8 words from the beginning + * of payload as flexible payload. + */ + for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) { + index = i * I40E_MAX_FLXPLD_FIED; + pf->fdir.flex_set[index].src_offset = 0; + pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM; + pf->fdir.flex_set[index].dst_offset = 0; + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900); + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ + } + + /* initialize the masks */ + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { + flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; + pf->fdir.flex_mask[pctype].word_mask = 0; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); + for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { + pf->fdir.flex_mask[pctype].bitmask[i].offset = 0; + pf->fdir.flex_mask[pctype].bitmask[i].mask = 0; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0); + } + } +} + +#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \ + if ((flex_pit2).src_offset < \ + (flex_pit1).src_offset + (flex_pit1).size) { \ + PMD_DRV_LOG(ERR, "src_offset should be not" \ + " less than than previous offset" \ + " + previous FSIZE."); \ + return -EINVAL; \ + } \ +} while (0) + +/* + * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure, + * and the flex_pit will be sorted by it's src_offset value + */ +static inline uint16_t +i40e_srcoff_to_flx_pit(const uint16_t *src_offset, + struct i40e_fdir_flex_pit *flex_pit) +{ + uint16_t src_tmp, size, num = 0; + uint16_t i, k, j = 0; + + while (j < I40E_FDIR_MAX_FLEX_LEN) { + size = 1; + for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) { + if (src_offset[j + 1] == src_offset[j] + 1) + size++; + else + break; + } + src_tmp = src_offset[j] + 1 - size; + /* the flex_pit need to be sort by src_offset */ + for (i = 0; i < num; i++) { + if (src_tmp < flex_pit[i].src_offset) + break; + } + /* if insert required, move backward */ + for (k = num; k > i; k--) + flex_pit[k] = flex_pit[k - 1]; + /* insert */ + flex_pit[i].dst_offset = j + 1 - size; + flex_pit[i].src_offset = src_tmp; + flex_pit[i].size = size; + j++; + num++; + } + return num; +} + +/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */ +static inline int +i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg) +{ + struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN]; + uint16_t num, i; + + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) { + if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) { + PMD_DRV_LOG(ERR, "exceeds maxmial payload limit."); + return -EINVAL; + } + } + + memset(flex_pit, 0, sizeof(flex_pit)); + num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit); + if (num > I40E_MAX_FLXPLD_FIED) { + PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields."); + return -EINVAL; + } + for (i = 0; i < num; i++) { + if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 || + flex_pit[i].src_offset & 0x01) { + PMD_DRV_LOG(ERR, "flexpayload should be measured" + " in word"); + return -EINVAL; + } + if (i != num - 1) + I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]); + } + return 0; +} + +/* + * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter, + const struct rte_eth_fdir_flex_conf *conf) +{ + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint16_t mask_tmp; + uint8_t nb_bitmask; + uint16_t i, j; + int ret = 0; + enum i40e_filter_pctype pctype; + + if (conf == NULL) { + PMD_DRV_LOG(INFO, "NULL pointer."); + return -EINVAL; + } + /* check flexible payload setting configuration */ + if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid number of payload setting."); + return -EINVAL; + } + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid payload type."); + return -EINVAL; + } + ret = i40e_check_fdir_flex_payload(flex_cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "invalid flex payload arguments."); + return -EINVAL; + } + } + + /* check flex mask setting configuration */ + if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) { + PMD_DRV_LOG(ERR, "invalid number of flex masks."); + return -EINVAL; + } + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + PMD_DRV_LOG(WARNING, "invalid flow type."); + return -EINVAL; + } + nb_bitmask = 0; + for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(flex_mask->mask[j], + flex_mask->mask[j + 1]); + if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) { + nb_bitmask++; + if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) { + PMD_DRV_LOG(ERR, " exceed maximal" + " number of bitmasks."); + return -EINVAL; + } + } + } + } + return 0; +} + +/* + * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload + * @pf: board private structure + * @cfg: the rule how bytes stream is extracted as flexible payload + */ +static void +i40e_set_flx_pld_cfg(struct i40e_pf *pf, + const struct rte_eth_flex_payload_cfg *cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED]; + uint32_t flx_pit, flx_ort; + uint16_t num, min_next_off; /* in words */ + uint8_t field_idx = 0; + uint8_t layer_idx = 0; + uint16_t i; + + if (cfg->type == RTE_ETH_L2_PAYLOAD) + layer_idx = I40E_FLXPLD_L2_IDX; + else if (cfg->type == RTE_ETH_L3_PAYLOAD) + layer_idx = I40E_FLXPLD_L3_IDX; + else if (cfg->type == RTE_ETH_L4_PAYLOAD) + layer_idx = I40E_FLXPLD_L4_IDX; + + memset(flex_pit, 0, sizeof(flex_pit)); + num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit), + RTE_DIM(flex_pit)); + + if (num) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + } + + for (i = 0; i < num; i++) { + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + /* record the info in fdir structure */ + pf->fdir.flex_set[field_idx].src_offset = + flex_pit[i].src_offset / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].size = + flex_pit[i].size / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].dst_offset = + flex_pit[i].dst_offset / sizeof(uint16_t); + flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, + pf->fdir.flex_set[field_idx].size, + pf->fdir.flex_set[field_idx].dst_offset); + + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + } + min_next_off = pf->fdir.flex_set[field_idx].src_offset + + pf->fdir.flex_set[field_idx].size; + + for (; i < I40E_MAX_FLXPLD_FIED; i++) { + /* set the non-used register obeying register's constrain */ + flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, + NONUSE_FLX_PIT_DEST_OFF); + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i), + flx_pit); + min_next_off++; + } +} + +/* + * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload + * @pf: board private structure + * @pctype: packet classify type + * @flex_masks: mask for flexible payload + */ +static void +i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_flex_mask *mask_cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_mask *flex_mask; + uint32_t flxinset, fd_mask; + uint16_t mask_tmp; + uint8_t i, nb_bitmask = 0; + + flex_mask = &pf->fdir.flex_mask[pctype]; + memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]); + if (mask_tmp != 0x0) { + flex_mask->word_mask |= + I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); + if (mask_tmp != UINT16_MAX) { + /* set bit mask */ + flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp; + flex_mask->bitmask[nb_bitmask].offset = + i / sizeof(uint16_t); + nb_bitmask++; + } + } + } + /* write mask to hw */ + flxinset = (flex_mask->word_mask << + I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & + I40E_PRTQF_FD_FLXINSET_INSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); + + for (i = 0; i < nb_bitmask; i++) { + fd_mask = (flex_mask->bitmask[i].mask << + I40E_PRTQF_FD_MSK_MASK_SHIFT) & + I40E_PRTQF_FD_MSK_MASK_MASK; + fd_mask |= ((flex_mask->bitmask[i].offset + + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << + I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & + I40E_PRTQF_FD_MSK_OFFSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); + } +} + +/* + * Enable/disable flow director RX processing in vector routines. + */ +void +i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on) +{ + int32_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + rxq->fdir_enabled = on; + } + PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on); +} + +/* + * Configure flow director related setting + */ +int +i40e_fdir_configure(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_fdir_flex_conf *conf; + enum i40e_filter_pctype pctype; + uint32_t val; + uint8_t i; + int ret = 0; + + /* + * configuration need to be done before + * flow director filters are added + * If filters exist, flush them. + */ + if (i40e_fdir_empty(hw) < 0) { + ret = i40e_fdir_flush(dev); + if (ret) { + PMD_DRV_LOG(ERR, "failed to flush fdir table."); + return ret; + } + } + + /* enable FDIR filter */ + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); + + i40e_init_flx_pld(pf); /* set flex config to default value */ + + conf = &dev->data->dev_conf.fdir_conf.flex_conf; + ret = i40e_check_fdir_flex_conf(pf->adapter, conf); + if (ret < 0) { + PMD_DRV_LOG(ERR, " invalid configuration arguments."); + return -EINVAL; + } + + if (!pf->support_multi_driver) { + /* configure flex payload */ + for (i = 0; i < conf->nb_payloads; i++) + i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); + /* configure flex mask*/ + for (i = 0; i < conf->nb_flexmasks; i++) { + if (hw->mac.type == I40E_MAC_X722) { + /* get pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype) + i40e_read_rx_ctl(hw, + I40E_GLQF_FD_PCTYPES( + (int)i40e_flowtype_to_pctype( + pf->adapter, + conf->flex_mask[i].flow_type))); + } else { + pctype = i40e_flowtype_to_pctype(pf->adapter, + conf->flex_mask[i].flow_type); + } + + i40e_set_flex_mask_on_pctype(pf, pctype, + &conf->flex_mask[i]); + } + } else { + PMD_DRV_LOG(ERR, "Not support flexible payload."); + } + + /* Enable FDIR processing in RX routines */ + i40e_fdir_rx_proc_enable(dev, 1); + + return ret; +} + +static inline int +i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt, + bool vlan) +{ + static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; + uint16_t *ether_type; + uint8_t len = 2 * sizeof(struct rte_ether_addr); + struct rte_ipv4_hdr *ip; + struct rte_ipv6_hdr *ip6; + static const uint8_t next_proto[] = { + [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP, + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP, + [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE, + }; + + raw_pkt += 2 * sizeof(struct rte_ether_addr); + if (vlan && fdir_input->flow_ext.vlan_tci) { + rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); + rte_memcpy(raw_pkt + sizeof(uint16_t), + &fdir_input->flow_ext.vlan_tci, + sizeof(uint16_t)); + raw_pkt += sizeof(vlan_frame); + len += sizeof(vlan_frame); + } + ether_type = (uint16_t *)raw_pkt; + raw_pkt += sizeof(uint16_t); + len += sizeof(uint16_t); + + switch (fdir_input->flow_type) { + case RTE_ETH_FLOW_L2_PAYLOAD: + *ether_type = fdir_input->flow.l2_flow.ether_type; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: + ip = (struct rte_ipv4_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + /* set len to by default */ + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->next_proto_id = fdir_input->flow.ip4_flow.proto ? + fdir_input->flow.ip4_flow.proto : + next_proto[fdir_input->flow_type]; + ip->time_to_live = fdir_input->flow.ip4_flow.ttl ? + fdir_input->flow.ip4_flow.ttl : + I40E_FDIR_IP_DEFAULT_TTL; + ip->type_of_service = fdir_input->flow.ip4_flow.tos; + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.ip4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; + len += sizeof(struct rte_ipv4_hdr); + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: + ip6 = (struct rte_ipv6_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + ip6->vtc_flow = + rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (fdir_input->flow.ipv6_flow.tc << + I40E_FDIR_IPv6_TC_OFFSET)); + ip6->payload_len = + rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->proto = fdir_input->flow.ipv6_flow.proto ? + fdir_input->flow.ipv6_flow.proto : + next_proto[fdir_input->flow_type]; + ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ? + fdir_input->flow.ipv6_flow.hop_limits : + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), + &(fdir_input->flow.ipv6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), + &(fdir_input->flow.ipv6_flow.src_ip), + IPV6_ADDR_LEN); + len += sizeof(struct rte_ipv6_hdr); + break; + default: + PMD_DRV_LOG(ERR, "unknown flow type %u.", + fdir_input->flow_type); + return -1; + } + return len; +} + + +/* + * i40e_fdir_construct_pkt - construct packet based on fields in input + * @pf: board private structure + * @fdir_input: input set of the flow director entry + * @raw_pkt: a packet to be constructed + */ +static int +i40e_fdir_construct_pkt(struct i40e_pf *pf, + const struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt) +{ + unsigned char *payload, *ptr; + struct rte_udp_hdr *udp; + struct rte_tcp_hdr *tcp; + struct rte_sctp_hdr *sctp; + uint8_t size, dst = 0; + uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ + int len; + + /* fill the ethernet and IP head */ + len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt, + !!fdir_input->flow_ext.vlan_tci); + if (len < 0) + return -EINVAL; + + /* fill the L4 head */ + switch (fdir_input->flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + udp = (struct rte_udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->src_port = fdir_input->flow.tcp4_flow.dst_port; + tcp->dst_port = fdir_input->flow.tcp4_flow.src_port; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + sctp = (struct rte_sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp4_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp4_flow.src_port; + sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + udp = (struct rte_udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + tcp->src_port = fdir_input->flow.udp6_flow.dst_port; + tcp->dst_port = fdir_input->flow.udp6_flow.src_port; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + sctp = (struct rte_sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp6_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp6_flow.src_port; + sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + payload = raw_pkt + len; + /* + * ARP packet is a special case on which the payload + * starts after the whole ARP header + */ + if (fdir_input->flow.l2_flow.ether_type == + rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) + payload += sizeof(struct rte_arp_hdr); + set_idx = I40E_FLXPLD_L2_IDX; + break; + default: + PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type); + return -EINVAL; + } + + /* fill the flexbytes to payload */ + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i; + size = pf->fdir.flex_set[pit_idx].size; + if (size == 0) + continue; + dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t); + ptr = payload + + pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t); + rte_memcpy(ptr, + &fdir_input->flow_ext.flexbytes[dst], + size * sizeof(uint16_t)); + } + + return 0; +} + +static struct i40e_customized_pctype * +i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype) +{ + struct i40e_customized_pctype *cus_pctype; + enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC; + + for (; i < I40E_CUSTOMIZED_MAX; i++) { + cus_pctype = &pf->customized_pctype[i]; + if (pctype == cus_pctype->pctype) + return cus_pctype; + } + return NULL; +} + +static inline int +fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt, + uint8_t next_proto, uint8_t len, uint16_t *ether_type) +{ + struct rte_ipv6_hdr *ip6; + + ip6 = (struct rte_ipv6_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET)); + ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->proto = fdir_input->flow.ipv6_flow.proto ? + fdir_input->flow.ipv6_flow.proto : next_proto; + ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ? + fdir_input->flow.ipv6_flow.hop_limits : + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip, + IPV6_ADDR_LEN); + rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip, + IPV6_ADDR_LEN); + len += sizeof(struct rte_ipv6_hdr); + + return len; +} + +static inline int +fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt, + uint8_t next_proto, uint8_t len, uint16_t *ether_type) +{ + struct rte_ipv4_hdr *ip4; + + ip4 = (struct rte_ipv4_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + /* set len to by default */ + ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ? + fdir_input->flow.ip4_flow.ttl : + I40E_FDIR_IP_DEFAULT_TTL; + ip4->type_of_service = fdir_input->flow.ip4_flow.tos; + ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ? + fdir_input->flow.ip4_flow.proto : next_proto; + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip; + ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip; + len += sizeof(struct rte_ipv4_hdr); + + return len; +} + +static inline int +i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, + const struct i40e_fdir_input *fdir_input, + unsigned char *raw_pkt, + bool vlan) +{ + struct i40e_customized_pctype *cus_pctype = NULL; + static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; + uint16_t *ether_type; + uint8_t len = 2 * sizeof(struct rte_ether_addr); + uint8_t pctype = fdir_input->pctype; + bool is_customized_pctype = fdir_input->flow_ext.customized_pctype; + static const uint8_t next_proto[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE, + }; + + rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst, + sizeof(struct rte_ether_addr)); + rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr), + &fdir_input->flow.l2_flow.src, + sizeof(struct rte_ether_addr)); + raw_pkt += 2 * sizeof(struct rte_ether_addr); + + if (vlan && fdir_input->flow_ext.vlan_tci) { + rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); + rte_memcpy(raw_pkt + sizeof(uint16_t), + &fdir_input->flow_ext.vlan_tci, + sizeof(uint16_t)); + raw_pkt += sizeof(vlan_frame); + len += sizeof(vlan_frame); + } + ether_type = (uint16_t *)raw_pkt; + raw_pkt += sizeof(uint16_t); + len += sizeof(uint16_t); + + if (is_customized_pctype) { + cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype); + if (!cus_pctype) { + PMD_DRV_LOG(ERR, "unknown pctype %u.", + fdir_input->pctype); + return -1; + } + } + + if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) + *ether_type = fdir_input->flow.l2_flow.ether_type; + else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 || + is_customized_pctype) { + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) { + len = fill_ip4_head(fdir_input, raw_pkt, + next_proto[pctype], len, ether_type); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || + pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) { + len = fill_ip6_head(fdir_input, raw_pkt, + next_proto[pctype], len, + ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU) { + len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP, + len, ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) { + len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP, + len, ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) { + len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP, + len, ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) { + len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP, + len, ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) { + len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP, + len, ether_type); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6) + len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP, + len, ether_type); + else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) + len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP, + len, ether_type); + else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) + len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP, + len, ether_type); + } else { + PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype); + return -1; + } + + return len; +} + +/** + * i40e_flow_fdir_construct_pkt - construct packet based on fields in input + * @pf: board private structure + * @fdir_input: input set of the flow director entry + * @raw_pkt: a packet to be constructed + */ +static int +i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, + const struct i40e_fdir_input *fdir_input, + unsigned char *raw_pkt) +{ + unsigned char *payload = NULL; + unsigned char *ptr; + struct rte_udp_hdr *udp; + struct rte_tcp_hdr *tcp; + struct rte_sctp_hdr *sctp; + struct rte_flow_item_gtp *gtp; + struct rte_ipv4_hdr *gtp_ipv4; + struct rte_ipv6_hdr *gtp_ipv6; + struct rte_flow_item_l2tpv3oip *l2tpv3oip; + struct rte_flow_item_esp *esp; + struct rte_ipv4_hdr *esp_ipv4; + struct rte_ipv6_hdr *esp_ipv6; + + uint8_t size, dst = 0; + uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ + int len; + uint8_t pctype = fdir_input->pctype; + struct i40e_customized_pctype *cus_pctype; + + /* raw pcket template - just copy contents of the raw packet */ + if (fdir_input->flow_ext.pkt_template) { + memcpy(raw_pkt, fdir_input->flow.raw_flow.packet, + fdir_input->flow.raw_flow.length); + return 0; + } + + /* fill the ethernet and IP head */ + len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt, + !!fdir_input->flow_ext.vlan_tci); + if (len < 0) + return -EINVAL; + + /* fill the L4 head */ + if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) { + udp = (struct rte_udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->src_port = fdir_input->flow.tcp4_flow.dst_port; + tcp->dst_port = fdir_input->flow.tcp4_flow.src_port; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) { + sctp = (struct rte_sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp4_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp4_flow.src_port; + sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) { + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) { + udp = (struct rte_udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + tcp->src_port = fdir_input->flow.udp6_flow.dst_port; + tcp->dst_port = fdir_input->flow.udp6_flow.src_port; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) { + sctp = (struct rte_sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr); + /** + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp6_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp6_flow.src_port; + sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; + } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || + pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) { + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) { + payload = raw_pkt + len; + /** + * ARP packet is a special case on which the payload + * starts after the whole ARP header + */ + if (fdir_input->flow.l2_flow.ether_type == + rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) + payload += sizeof(struct rte_arp_hdr); + set_idx = I40E_FLXPLD_L2_IDX; + } else if (fdir_input->flow_ext.customized_pctype) { + /* If customized pctype is used */ + cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype); + if (cus_pctype->index == I40E_CUSTOMIZED_GTPC || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_GTPU) { + udp = (struct rte_udp_hdr *)(raw_pkt + len); + udp->dgram_len = + rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + + gtp = (struct rte_flow_item_gtp *) + ((unsigned char *)udp + + sizeof(struct rte_udp_hdr)); + gtp->msg_len = + rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN); + gtp->teid = fdir_input->flow.gtp_flow.teid; + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01; + + /* GTP-C message type is not supported. */ + if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) { + udp->dst_port = + rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT); + gtp->v_pt_rsv_flags = + I40E_FDIR_GTP_VER_FLAG_0X32; + } else { + udp->dst_port = + rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT); + gtp->v_pt_rsv_flags = + I40E_FDIR_GTP_VER_FLAG_0X30; + } + + if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) { + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF; + gtp_ipv4 = (struct rte_ipv4_hdr *) + ((unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp)); + gtp_ipv4->version_ihl = + I40E_FDIR_IP_DEFAULT_VERSION_IHL; + gtp_ipv4->next_proto_id = IPPROTO_IP; + gtp_ipv4->total_length = + rte_cpu_to_be_16( + I40E_FDIR_INNER_IP_DEFAULT_LEN); + payload = (unsigned char *)gtp_ipv4 + + sizeof(struct rte_ipv4_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_GTPU_IPV6) { + gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF; + gtp_ipv6 = (struct rte_ipv6_hdr *) + ((unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp)); + gtp_ipv6->vtc_flow = + rte_cpu_to_be_32( + I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (0 << I40E_FDIR_IPv6_TC_OFFSET)); + gtp_ipv6->proto = IPPROTO_NONE; + gtp_ipv6->payload_len = + rte_cpu_to_be_16( + I40E_FDIR_INNER_IPV6_DEFAULT_LEN); + gtp_ipv6->hop_limits = + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + payload = (unsigned char *)gtp_ipv6 + + sizeof(struct rte_ipv6_hdr); + } else + payload = (unsigned char *)gtp + + sizeof(struct rte_flow_item_gtp); + } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 || + cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) { + l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt + + len); + + if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) + l2tpv3oip->session_id = + fdir_input->flow.ip4_l2tpv3oip_flow.session_id; + else + l2tpv3oip->session_id = + fdir_input->flow.ip6_l2tpv3oip_flow.session_id; + payload = (unsigned char *)l2tpv3oip + + sizeof(struct rte_flow_item_l2tpv3oip); + } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP || + cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) { + if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) { + esp_ipv4 = (struct rte_ipv4_hdr *) + (raw_pkt + len); + esp = (struct rte_flow_item_esp *)esp_ipv4; + esp->hdr.spi = + fdir_input->flow.esp_ipv4_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV4_UDP) { + esp_ipv4 = (struct rte_ipv4_hdr *) + (raw_pkt + len); + udp = (struct rte_udp_hdr *)esp_ipv4; + udp->dst_port = rte_cpu_to_be_16 + (I40E_FDIR_ESP_DST_PORT); + + udp->dgram_len = rte_cpu_to_be_16 + (I40E_FDIR_UDP_DEFAULT_LEN); + esp = (struct rte_flow_item_esp *) + ((unsigned char *)esp_ipv4 + + sizeof(struct rte_udp_hdr)); + esp->hdr.spi = + fdir_input->flow.esp_ipv4_udp_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_udp_hdr) + + sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV6) { + esp_ipv6 = (struct rte_ipv6_hdr *) + (raw_pkt + len); + esp = (struct rte_flow_item_esp *)esp_ipv6; + esp->hdr.spi = + fdir_input->flow.esp_ipv6_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_esp_hdr); + } else if (cus_pctype->index == + I40E_CUSTOMIZED_ESP_IPV6_UDP) { + esp_ipv6 = (struct rte_ipv6_hdr *) + (raw_pkt + len); + udp = (struct rte_udp_hdr *)esp_ipv6; + udp->dst_port = rte_cpu_to_be_16 + (I40E_FDIR_ESP_DST_PORT); + + udp->dgram_len = rte_cpu_to_be_16 + (I40E_FDIR_UDP_DEFAULT_LEN); + esp = (struct rte_flow_item_esp *) + ((unsigned char *)esp_ipv6 + + sizeof(struct rte_udp_hdr)); + esp->hdr.spi = + fdir_input->flow.esp_ipv6_udp_flow.spi; + payload = (unsigned char *)esp + + sizeof(struct rte_esp_hdr); + len += sizeof(struct rte_udp_hdr) + + sizeof(struct rte_esp_hdr); + } + } + } else { + PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype); + return -1; + } + + /* fill the flexbytes to payload */ + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i; + size = pf->fdir.flex_set[pit_idx].size; + if (size == 0) + continue; + dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t); + ptr = payload + + pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t); + (void)rte_memcpy(ptr, + &fdir_input->flow_ext.flexbytes[dst], + size * sizeof(uint16_t)); + } + + return 0; +} + +/* Construct the tx flags */ +static inline uint64_t +i40e_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/* + * check the programming status descriptor in rx queue. + * done after Programming Flow Director is programmed on + * tx queue + */ +static inline int +i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + uint64_t qword1; + uint32_t rx_status; + uint32_t len, id; + uint32_t error; + int ret = 0; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + + if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT; + id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + + if (len == I40E_RX_PROG_STATUS_DESC_LENGTH && + id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) { + error = (qword1 & + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to add FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to delete FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else + PMD_DRV_LOG(ERR, "invalid programming status" + " reported, error = %u.", error); + } else + PMD_DRV_LOG(INFO, "unknown programming status" + " reported, len = %d, id = %u.", len, id); + rxdp->wb.qword1.status_error_len = 0; + rxq->rx_tail++; + if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) + rxq->rx_tail = 0; + if (rxq->rx_tail == 0) + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + else + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); + } + + return ret; +} + +static int +i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input, + struct i40e_fdir_filter *filter) +{ + rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf)); + if (input->input.flow_ext.pkt_template) { + filter->fdir.input.flow.raw_flow.packet = NULL; + filter->fdir.input.flow.raw_flow.length = + rte_hash_crc(input->input.flow.raw_flow.packet, + input->input.flow.raw_flow.length, + input->input.flow.raw_flow.pctype); + } + return 0; +} + +/* Check if there exists the flow director filter */ +static struct i40e_fdir_filter * +i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info, + const struct i40e_fdir_input *input) +{ + int ret; + + if (input->flow_ext.pkt_template) + ret = rte_hash_lookup_with_hash(fdir_info->hash_table, + (const void *)input, + input->flow.raw_flow.length); + else + ret = rte_hash_lookup(fdir_info->hash_table, + (const void *)input); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +/* Add a flow director filter into the SW list */ +static int +i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter) +{ + struct i40e_fdir_info *fdir_info = &pf->fdir; + int ret; + + if (filter->fdir.input.flow_ext.pkt_template) + ret = rte_hash_add_key_with_hash(fdir_info->hash_table, + &filter->fdir.input, + filter->fdir.input.flow.raw_flow.length); + else + ret = rte_hash_add_key(fdir_info->hash_table, + &filter->fdir.input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir filter to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = filter; + + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules); + + return 0; +} + +/* Delete a flow director filter from the SW list */ +int +i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input) +{ + struct i40e_fdir_info *fdir_info = &pf->fdir; + struct i40e_fdir_filter *filter; + int ret; + + if (input->flow_ext.pkt_template) + ret = rte_hash_del_key_with_hash(fdir_info->hash_table, + input, + input->flow.raw_flow.length); + else + ret = rte_hash_del_key(fdir_info->hash_table, input); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete fdir filter to hash table %d!", + ret); + return ret; + } + filter = fdir_info->hash_map[ret]; + fdir_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules); + rte_free(filter); + + return 0; +} + +/* + * i40e_add_del_fdir_filter - add or remove a flow director filter. + * @pf: board private structure + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +int +i40e_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *filter, + bool add) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + enum i40e_filter_pctype pctype; + int ret = 0; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "FDIR is not enabled, please" + " check the mode in fdir_conf."); + return -ENOTSUP; + } + + pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); + return -EINVAL; + } + if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->input.flow_ext.is_vf && + filter->input.flow_ext.dst_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid VF ID"); + return -EINVAL; + } + + memset(pkt, 0, I40E_FDIR_PKT_LEN); + + ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt); + if (ret < 0) { + PMD_DRV_LOG(ERR, "construct packet for fdir fails."); + return ret; + } + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( + hw, I40E_GLQF_FD_PCTYPES((int)pctype)); + } + + ret = i40e_fdir_filter_programming(pf, pctype, filter, add); + if (ret < 0) { + PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", + pctype); + return ret; + } + + return ret; +} + +/** + * i40e_flow_add_del_fdir_filter - add or remove a flow director filter. + * @pf: board private structure + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +int +i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct i40e_fdir_filter_conf *filter, + bool add) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + enum i40e_filter_pctype pctype; + struct i40e_fdir_info *fdir_info = &pf->fdir; + struct i40e_fdir_filter *fdir_filter, *node; + struct i40e_fdir_filter check_filter; /* Check if the filter exists */ + int ret = 0; + + if (pf->fdir.fdir_vsi == NULL) { + PMD_DRV_LOG(ERR, "FDIR is not enabled"); + return -ENOTSUP; + } + + if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->input.flow_ext.is_vf && + filter->input.flow_ext.dst_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid VF ID"); + return -EINVAL; + } + if (filter->input.flow_ext.pkt_template) { + if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN || + !filter->input.flow.raw_flow.packet) { + PMD_DRV_LOG(ERR, "Invalid raw packet template" + " flow filter parameters!"); + return -EINVAL; + } + pctype = filter->input.flow.raw_flow.pctype; + } else { + pctype = filter->input.pctype; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_fdir_filter_convert(filter, &check_filter); + node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input); + if (add && node) { + PMD_DRV_LOG(ERR, + "Conflict with existing flow director rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, + "There's no corresponding flow firector filter!"); + return -EINVAL; + } + + memset(pkt, 0, I40E_FDIR_PKT_LEN); + + ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt); + if (ret < 0) { + PMD_DRV_LOG(ERR, "construct packet for fdir fails."); + return ret; + } + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( + hw, I40E_GLQF_FD_PCTYPES((int)pctype)); + } + + ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add); + if (ret < 0) { + PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", + pctype); + return ret; + } + + if (add) { + fdir_filter = rte_zmalloc("fdir_filter", + sizeof(*fdir_filter), 0); + if (fdir_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + + rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter)); + ret = i40e_sw_fdir_filter_insert(pf, fdir_filter); + if (ret < 0) + rte_free(fdir_filter); + } else { + ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input); + } + + return ret; +} + +/* + * i40e_fdir_filter_programming - Program a flow director filter rule. + * Is done by Flow Director Programming Descriptor followed by packet + * structure that contains the filter fields need to match. + * @pf: board private structure + * @pctype: pctype + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +static int +i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_filter *filter, + bool add) +{ + struct i40e_tx_queue *txq = pf->fdir.txq; + struct i40e_rx_queue *rxq = pf->fdir.rxq; + const struct rte_eth_fdir_action *fdir_action = &filter->action; + volatile struct i40e_tx_desc *txdp; + volatile struct i40e_filter_program_desc *fdirdp; + uint32_t td_cmd; + uint16_t vsi_id, i; + uint8_t dest; + + PMD_DRV_LOG(INFO, "filling filter programming descriptor."); + fdirdp = (volatile struct i40e_filter_program_desc *) + (&(txq->tx_ring[txq->tx_tail])); + + fdirdp->qindex_flex_ptype_vsi = + rte_cpu_to_le_32((fdir_action->rx_queue << + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_action->flex_off << + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pctype << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK); + + if (filter->input.flow_ext.is_vf) + vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id; + else + /* Use LAN VSI Id by default */ + vsi_id = pf->main_vsi->vsi_id; + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32(((uint32_t)vsi_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + + fdirdp->dtype_cmd_cntindex = + rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG); + + if (add) + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + else + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + if (fdir_action->behavior == RTE_ETH_FDIR_REJECT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER; + else { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " unsupported fdir behavior."); + return -EINVAL; + } + + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest << + I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->report_status<< + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32( + ((uint32_t)pf->fdir.match_counter_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + + fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id); + + PMD_DRV_LOG(INFO, "filling transmit descriptor."); + txdp = &(txq->tx_ring[txq->tx_tail + 1]); + txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = I40E_TX_DESC_CMD_EOP | + I40E_TX_DESC_CMD_RS | + I40E_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) { + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + break; + rte_delay_us(1); + } + if (i >= I40E_FDIR_MAX_WAIT_US) { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " time out to get DD on tx queue."); + return -ETIMEDOUT; + } + /* totally delay 10 ms to check programming status*/ + for (; i < I40E_FDIR_MAX_WAIT_US; i++) { + if (i40e_check_fdir_programming_status(rxq) >= 0) + return 0; + rte_delay_us(1); + } + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: programming status reported."); + return -ETIMEDOUT; +} + +/* + * i40e_flow_fdir_filter_programming - Program a flow director filter rule. + * Is done by Flow Director Programming Descriptor followed by packet + * structure that contains the filter fields need to match. + * @pf: board private structure + * @pctype: pctype + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +static int +i40e_flow_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct i40e_fdir_filter_conf *filter, + bool add) +{ + struct i40e_tx_queue *txq = pf->fdir.txq; + struct i40e_rx_queue *rxq = pf->fdir.rxq; + const struct i40e_fdir_action *fdir_action = &filter->action; + volatile struct i40e_tx_desc *txdp; + volatile struct i40e_filter_program_desc *fdirdp; + uint32_t td_cmd; + uint16_t vsi_id, i; + uint8_t dest; + + PMD_DRV_LOG(INFO, "filling filter programming descriptor."); + fdirdp = (volatile struct i40e_filter_program_desc *) + (&txq->tx_ring[txq->tx_tail]); + + fdirdp->qindex_flex_ptype_vsi = + rte_cpu_to_le_32((fdir_action->rx_queue << + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_action->flex_off << + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pctype << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK); + + if (filter->input.flow_ext.is_vf) + vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id; + else + /* Use LAN VSI Id by default */ + vsi_id = pf->main_vsi->vsi_id; + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32(((uint32_t)vsi_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + + fdirdp->dtype_cmd_cntindex = + rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG); + + if (add) + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + else + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + if (fdir_action->behavior == I40E_FDIR_REJECT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else if (fdir_action->behavior == I40E_FDIR_ACCEPT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + else if (fdir_action->behavior == I40E_FDIR_PASSTHRU) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER; + else { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior."); + return -EINVAL; + } + + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest << + I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->report_status << + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32( + ((uint32_t)pf->fdir.match_counter_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + + fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id); + + PMD_DRV_LOG(INFO, "filling transmit descriptor."); + txdp = &txq->tx_ring[txq->tx_tail + 1]; + txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = I40E_TX_DESC_CMD_EOP | + I40E_TX_DESC_CMD_RS | + I40E_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) { + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + break; + rte_delay_us(1); + } + if (i >= I40E_FDIR_MAX_WAIT_US) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: time out to get DD on tx queue."); + return -ETIMEDOUT; + } + /* totally delay 10 ms to check programming status*/ + rte_delay_us(I40E_FDIR_MAX_WAIT_US); + if (i40e_check_fdir_programming_status(rxq) < 0) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: programming status reported."); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * i40e_fdir_flush - clear all filters of Flow Director table + * @pf: board private structure + */ +int +i40e_fdir_flush(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t reg; + uint16_t guarant_cnt, best_cnt; + uint16_t i; + + I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + I40E_WRITE_FLUSH(hw); + + for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) { + rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS); + reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } + if (i >= I40E_FDIR_FLUSH_RETRY) { + PMD_DRV_LOG(ERR, "FD table did not flush, may need more time."); + return -ETIMEDOUT; + } + guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + if (guarant_cnt != 0 || best_cnt != 0) { + PMD_DRV_LOG(ERR, "Failed to flush FD table."); + return -ENOSYS; + } else + PMD_DRV_LOG(INFO, "FD table Flush success."); + return 0; +} + +static inline void +i40e_fdir_info_get_flex_set(struct i40e_pf *pf, + struct rte_eth_flex_payload_cfg *flex_set, + uint16_t *num) +{ + struct i40e_fdir_flex_pit *flex_pit; + struct rte_eth_flex_payload_cfg *ptr = flex_set; + uint16_t src, dst, size, j, k; + uint8_t i, layer_idx; + + for (layer_idx = I40E_FLXPLD_L2_IDX; + layer_idx <= I40E_FLXPLD_L4_IDX; + layer_idx++) { + if (layer_idx == I40E_FLXPLD_L2_IDX) + ptr->type = RTE_ETH_L2_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L3_IDX) + ptr->type = RTE_ETH_L3_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L4_IDX) + ptr->type = RTE_ETH_L4_PAYLOAD; + + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + flex_pit = &pf->fdir.flex_set[layer_idx * + I40E_MAX_FLXPLD_FIED + i]; + if (flex_pit->size == 0) + continue; + src = flex_pit->src_offset * sizeof(uint16_t); + dst = flex_pit->dst_offset * sizeof(uint16_t); + size = flex_pit->size * sizeof(uint16_t); + for (j = src, k = dst; j < src + size; j++, k++) + ptr->src_offset[k] = j; + } + (*num)++; + ptr++; + } +} + +static inline void +i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, + struct rte_eth_fdir_flex_mask *flex_mask, + uint16_t *num) +{ + struct i40e_fdir_flex_mask *mask; + struct rte_eth_fdir_flex_mask *ptr = flex_mask; + uint16_t flow_type; + uint8_t i, j; + uint16_t off_bytes, mask_tmp; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; + i++) { + mask = &pf->fdir.flex_mask[i]; + flow_type = i40e_pctype_to_flowtype(pf->adapter, + (enum i40e_filter_pctype)i); + if (flow_type == RTE_ETH_FLOW_UNKNOWN) + continue; + + for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) { + if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) { + ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX; + ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX; + } else { + ptr->mask[j * sizeof(uint16_t)] = 0x0; + ptr->mask[j * sizeof(uint16_t) + 1] = 0x0; + } + } + for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) { + off_bytes = mask->bitmask[j].offset * sizeof(uint16_t); + mask_tmp = ~mask->bitmask[j].mask; + ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp); + ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp); + } + ptr->flow_type = flow_type; + ptr++; + (*num)++; + } +} + +/* + * i40e_fdir_info_get - get information of Flow Director + * @pf: ethernet device to get info from + * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with + * the flow director information. + */ +static void +i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t num_flex_set = 0; + uint16_t num_flex_mask = 0; + uint16_t i; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) + fdir->mode = RTE_FDIR_MODE_PERFECT; + else + fdir->mode = RTE_FDIR_MODE_NONE; + + fdir->guarant_spc = + (uint32_t)hw->func_caps.fd_filters_guaranteed; + fdir->best_spc = + (uint32_t)hw->func_caps.fd_filters_best_effort; + fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; + fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; + for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + fdir->flow_types_mask[i] = 0ULL; + fdir->flex_payload_unit = sizeof(uint16_t); + fdir->flex_bitmask_unit = sizeof(uint16_t); + fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED; + fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF; + fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD; + + i40e_fdir_info_get_flex_set(pf, + fdir->flex_conf.flex_set, + &num_flex_set); + i40e_fdir_info_get_flex_mask(pf, + fdir->flex_conf.flex_mask, + &num_flex_mask); + + fdir->flex_conf.nb_payloads = num_flex_set; + fdir->flex_conf.nb_flexmasks = num_flex_mask; +} + +/* + * i40e_fdir_stat_get - get statistics of Flow Director + * @pf: ethernet device to get info from + * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with + * the flow director statistics. + */ +static void +i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t fdstat; + + fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT); + stat->guarant_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + stat->best_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); +} + +static int +i40e_fdir_filter_set(struct rte_eth_dev *dev, + struct rte_eth_fdir_filter_info *info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if (!info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT: + ret = i40e_fdir_filter_inset_select(pf, + &(info->info.input_set_conf)); + break; + default: + PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported", + info->info_type); + return -EINVAL; + } + + return ret; +} + +/* + * i40e_fdir_ctrl_func - deal with all operations on flow director. + * @pf: board private structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +int +i40e_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if ((pf->flags & I40E_FLAG_FDIR) == 0) + return -ENOTSUP; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = i40e_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_FLUSH: + ret = i40e_fdir_flush(dev); + break; + case RTE_ETH_FILTER_INFO: + i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); + break; + case RTE_ETH_FILTER_SET: + ret = i40e_fdir_filter_set(dev, + (struct rte_eth_fdir_filter_info *)arg); + break; + case RTE_ETH_FILTER_STATS: + i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +/* Restore flow director filter */ +void +i40e_fdir_filter_restore(struct i40e_pf *pf) +{ + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi); + struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list; + struct i40e_fdir_filter *f; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t fdstat; + uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */ + uint32_t best_cnt; /**< Number of filters in best effort spaces. */ + + TAILQ_FOREACH(f, fdir_list, rules) + i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE); + + fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT); + guarant_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + best_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + + PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d", + guarant_cnt, best_cnt); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c b/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c new file mode 100644 index 000000000..8f8df6fae --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_flow.c @@ -0,0 +1,5464 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_type.h" +#include "base/i40e_prototype.h" +#include "i40e_ethdev.h" + +#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET) +#define I40E_IPV6_FRAG_HEADER 44 +#define I40E_TENANT_ARRAY_NUM 3 +#define I40E_TCI_MASK 0xFFFF + +static int i40e_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static int i40e_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +static int i40e_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error); +static int +i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct rte_eth_ethertype_filter *filter); +static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct rte_eth_ethertype_filter *filter); +static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_fdir_filter_conf *filter); +static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_fdir_filter_conf *filter); +static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter); +static int i40e_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error); +static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter); +static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, + struct i40e_tunnel_filter *filter); +static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); +static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); +static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); +static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); +static int +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter); +static int +i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter); + +const struct rte_flow_ops i40e_flow_ops = { + .validate = i40e_flow_validate, + .create = i40e_flow_create, + .destroy = i40e_flow_destroy, + .flush = i40e_flow_flush, +}; + +static union i40e_filter_t cons_filter; +static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; + +/* Pattern matched ethertype filter */ +static enum rte_flow_item_type pattern_ethertype[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* Pattern matched flow director filter */ +static enum rte_flow_item_type pattern_fdir_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPC, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPC, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_VF, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* Pattern matched tunnel filter */ +static enum rte_flow_item_type pattern_vxlan_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_vxlan_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_vxlan_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_vxlan_4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_nvgre_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_nvgre_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_nvgre_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_nvgre_4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_mpls_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_mpls_2[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_mpls_3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_mpls_4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_qinq_1[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +static struct i40e_valid_pattern i40e_supported_patterns[] = { + /* Ethertype */ + { pattern_ethertype, i40e_flow_parse_ethertype_filter }, + /* FDIR - support default flow type without flexible payload*/ + { pattern_ethertype, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter }, + /* FDIR - support default flow type with flexible payload */ + { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter }, + /* FDIR - support single vlan input set */ + { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter }, + /* FDIR - support VF item */ + { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter }, + { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter }, + /* VXLAN */ + { pattern_vxlan_1, i40e_flow_parse_vxlan_filter }, + { pattern_vxlan_2, i40e_flow_parse_vxlan_filter }, + { pattern_vxlan_3, i40e_flow_parse_vxlan_filter }, + { pattern_vxlan_4, i40e_flow_parse_vxlan_filter }, + /* NVGRE */ + { pattern_nvgre_1, i40e_flow_parse_nvgre_filter }, + { pattern_nvgre_2, i40e_flow_parse_nvgre_filter }, + { pattern_nvgre_3, i40e_flow_parse_nvgre_filter }, + { pattern_nvgre_4, i40e_flow_parse_nvgre_filter }, + /* MPLSoUDP & MPLSoGRE */ + { pattern_mpls_1, i40e_flow_parse_mpls_filter }, + { pattern_mpls_2, i40e_flow_parse_mpls_filter }, + { pattern_mpls_3, i40e_flow_parse_mpls_filter }, + { pattern_mpls_4, i40e_flow_parse_mpls_filter }, + /* GTP-C & GTP-U */ + { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter }, + { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter }, + /* QINQ */ + { pattern_qinq_1, i40e_flow_parse_qinq_filter }, + /* L2TPv3 over IP */ + { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter }, + { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter }, +}; + +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = actions + index; \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + index++; \ + act = actions + index; \ + } \ + } while (0) + +/* Find the first VOID or non-VOID item pointer */ +static const struct rte_flow_item * +i40e_find_first_item(const struct rte_flow_item *item, bool is_void) +{ + bool is_find; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (is_void) + is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID; + else + is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID; + if (is_find) + break; + item++; + } + return item; +} + +/* Skip all VOID items of the pattern */ +static void +i40e_pattern_skip_void_item(struct rte_flow_item *items, + const struct rte_flow_item *pattern) +{ + uint32_t cpy_count = 0; + const struct rte_flow_item *pb = pattern, *pe = pattern; + + for (;;) { + /* Find a non-void item first */ + pb = i40e_find_first_item(pb, false); + if (pb->type == RTE_FLOW_ITEM_TYPE_END) { + pe = pb; + break; + } + + /* Find a void item */ + pe = i40e_find_first_item(pb + 1, true); + + cpy_count = pe - pb; + rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); + + items += cpy_count; + + if (pe->type == RTE_FLOW_ITEM_TYPE_END) { + pb = pe; + break; + } + + pb = pe + 1; + } + /* Copy the END item. */ + rte_memcpy(items, pe, sizeof(struct rte_flow_item)); +} + +/* Check if the pattern matches a supported item type array */ +static bool +i40e_match_pattern(enum rte_flow_item_type *item_array, + struct rte_flow_item *pattern) +{ + struct rte_flow_item *item = pattern; + + while ((*item_array == item->type) && + (*item_array != RTE_FLOW_ITEM_TYPE_END)) { + item_array++; + item++; + } + + return (*item_array == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +/* Find if there's parse filter function matched */ +static parse_filter_t +i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx) +{ + parse_filter_t parse_filter = NULL; + uint8_t i = *idx; + + for (; i < RTE_DIM(i40e_supported_patterns); i++) { + if (i40e_match_pattern(i40e_supported_patterns[i].items, + pattern)) { + parse_filter = i40e_supported_patterns[i].parse_filter; + break; + } + } + + *idx = ++i; + + return parse_filter; +} + +/* Parse attributes */ +static int +i40e_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static uint16_t +i40e_get_outer_vlan(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; + uint64_t reg_r = 0; + uint16_t reg_id; + uint16_t tpid; + + if (qinq) + reg_id = 2; + else + reg_id = 3; + + i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); + + tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF; + + return tpid; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE. + * 3. SRC mac_addr mask should be 00:00:00:00:00:00. + * 4. DST mac_addr mask should be 00:00:00:00:00:00 or + * FF:FF:FF:FF:FF:FF + * 5. Ether_type mask should be 0xFFFF. + */ +static int +i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct rte_eth_ethertype_filter *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + enum rte_flow_item_type item_type; + uint16_t outer_tpid; + + outer_tpid = i40e_get_outer_vlan(dev); + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + /* Get the MAC info. */ + if (!eth_spec || !eth_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "NULL ETH spec/mask"); + return -rte_errno; + } + + /* Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid MAC_addr mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ethertype mask"); + return -rte_errno; + } + + /* If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6 || + filter->ether_type == RTE_ETHER_TYPE_LLDP || + filter->ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type in" + " control packet filter."); + return -rte_errno; + } + break; + default: + break; + } + } + + return 0; +} + +/* Ethertype action only supports QUEUE or DROP. */ +static int +i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct rte_eth_ethertype_filter *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + uint32_t index = 0; + + /* Check if the first non-void action is QUEUE or DROP. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = act->conf; + filter->queue = act_q->index; + if (filter->queue >= pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid queue ID for" + " ethertype_filter."); + return -rte_errno; + } + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static int +i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct rte_eth_ethertype_filter *ethertype_filter = + &filter->ethertype_filter; + int ret; + + ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error, + ethertype_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_ethertype_action(dev, actions, error, + ethertype_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_ETHERTYPE; + + return ret; +} + +static int +i40e_flow_check_raw_item(const struct rte_flow_item *item, + const struct rte_flow_item_raw *raw_spec, + struct rte_flow_error *error) +{ + if (!raw_spec->relative) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Relative should be 1."); + return -rte_errno; + } + + if (raw_spec->offset % sizeof(uint16_t)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Offset should be even."); + return -rte_errno; + } + + if (raw_spec->search || raw_spec->limit) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "search or limit is not supported."); + return -rte_errno; + } + + if (raw_spec->offset < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Offset should be non-negative."); + return -rte_errno; + } + return 0; +} + +static int +i40e_flow_store_flex_pit(struct i40e_pf *pf, + struct i40e_fdir_flex_pit *flex_pit, + enum i40e_flxpld_layer_idx layer_idx, + uint8_t raw_id) +{ + uint8_t field_idx; + + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id; + /* Check if the configuration is conflicted */ + if (pf->fdir.flex_pit_flag[layer_idx] && + (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset || + pf->fdir.flex_set[field_idx].size != flex_pit->size || + pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset)) + return -1; + + /* Check if the configuration exists. */ + if (pf->fdir.flex_pit_flag[layer_idx] && + (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset && + pf->fdir.flex_set[field_idx].size == flex_pit->size && + pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset)) + return 1; + + pf->fdir.flex_set[field_idx].src_offset = + flex_pit->src_offset; + pf->fdir.flex_set[field_idx].size = + flex_pit->size; + pf->fdir.flex_set[field_idx].dst_offset = + flex_pit->dst_offset; + + return 0; +} + +static int +i40e_flow_store_flex_mask(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + uint8_t *mask) +{ + struct i40e_fdir_flex_mask flex_mask; + uint16_t mask_tmp; + uint8_t i, nb_bitmask = 0; + + memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(mask[i], mask[i + 1]); + if (mask_tmp) { + flex_mask.word_mask |= + I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); + if (mask_tmp != UINT16_MAX) { + flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp; + flex_mask.bitmask[nb_bitmask].offset = + i / sizeof(uint16_t); + nb_bitmask++; + if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) + return -1; + } + } + } + flex_mask.nb_bitmask = nb_bitmask; + + if (pf->fdir.flex_mask_flag[pctype] && + (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype], + sizeof(struct i40e_fdir_flex_mask)))) + return -2; + else if (pf->fdir.flex_mask_flag[pctype] && + !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype], + sizeof(struct i40e_fdir_flex_mask)))) + return 1; + + memcpy(&pf->fdir.flex_mask[pctype], &flex_mask, + sizeof(struct i40e_fdir_flex_mask)); + return 0; +} + +static void +i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, + enum i40e_flxpld_layer_idx layer_idx, + uint8_t raw_id) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t flx_pit, flx_ort; + uint8_t field_idx; + uint16_t min_next_off = 0; /* in words */ + uint8_t i; + + if (raw_id) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + } + + /* Set flex pit */ + for (i = 0; i < raw_id; i++) { + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, + pf->fdir.flex_set[field_idx].size, + pf->fdir.flex_set[field_idx].dst_offset); + + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + min_next_off = pf->fdir.flex_set[field_idx].src_offset + + pf->fdir.flex_set[field_idx].size; + } + + for (; i < I40E_MAX_FLXPLD_FIED; i++) { + /* set the non-used register obeying register's constrain */ + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, + NONUSE_FLX_PIT_DEST_OFF); + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + min_next_off++; + } + + pf->fdir.flex_pit_flag[layer_idx] = 1; +} + +static void +i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf, + enum i40e_filter_pctype pctype) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_mask *flex_mask; + uint32_t flxinset, fd_mask; + uint8_t i; + + /* Set flex mask */ + flex_mask = &pf->fdir.flex_mask[pctype]; + flxinset = (flex_mask->word_mask << + I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & + I40E_PRTQF_FD_FLXINSET_INSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); + + for (i = 0; i < flex_mask->nb_bitmask; i++) { + fd_mask = (flex_mask->bitmask[i].mask << + I40E_PRTQF_FD_MSK_MASK_SHIFT) & + I40E_PRTQF_FD_MSK_MASK_MASK; + fd_mask |= ((flex_mask->bitmask[i].offset + + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << + I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & + I40E_PRTQF_FD_MSK_OFFSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); + } + + pf->fdir.flex_mask_flag[pctype] = 1; +} + +static int +i40e_flow_set_fdir_inset(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + uint64_t input_set) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int i, num; + + /* Check if the input set is valid */ + if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, + input_set) != 0) { + PMD_DRV_LOG(ERR, "Invalid input set"); + return -EINVAL; + } + + /* Check if the configuration is conflicted */ + if (pf->fdir.inset_flag[pctype] && + memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t))) + return -1; + + if (pf->fdir.inset_flag[pctype] && + !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t))) + return 0; + + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + + if (pf->support_multi_driver) { + for (i = 0; i < num; i++) + if (i40e_read_rx_ctl(hw, + I40E_GLQF_FD_MSK(i, pctype)) != + mask_reg[i]) { + PMD_DRV_LOG(ERR, "Input set setting is not" + " supported with" + " `support-multi-driver`" + " enabled!"); + return -EPERM; + } + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + if (i40e_read_rx_ctl(hw, + I40E_GLQF_FD_MSK(i, pctype)) != 0) { + PMD_DRV_LOG(ERR, "Input set setting is not" + " supported with" + " `support-multi-driver`" + " enabled!"); + return -EPERM; + } + + } else { + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), 0); + } + + inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + I40E_WRITE_FLUSH(hw); + + pf->fdir.input_set[pctype] = input_set; + pf->fdir.inset_flag[pctype] = 1; + return 0; +} + +static uint8_t +i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, + enum rte_flow_item_type item_type, + struct i40e_fdir_filter_conf *filter) +{ + struct i40e_customized_pctype *cus_pctype = NULL; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_GTPC: + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPC); + break; + case RTE_FLOW_ITEM_TYPE_GTPU: + if (!filter->input.flow_ext.inner_ip) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU); + else if (filter->input.flow_ext.iip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV4); + else if (filter->input.flow_ext.iip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_GTPU_IPV6); + break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV4_L2TPV3); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_IPV6_L2TPV3); + break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!filter->input.flow_ext.is_udp) { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6); + } else { + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV4_UDP); + else if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) + cus_pctype = i40e_find_customized_pctype(pf, + I40E_CUSTOMIZED_ESP_IPV6_UDP); + filter->input.flow_ext.is_udp = false; + } + break; + default: + PMD_DRV_LOG(ERR, "Unsupported item type"); + break; + } + + if (cus_pctype && cus_pctype->valid) + return cus_pctype->pctype; + + return I40E_FILTER_PCTYPE_INVALID; +} + +static void +i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter, + const struct rte_flow_item_esp *esp_spec) +{ + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV4) { + if (filter->input.flow_ext.is_udp) + filter->input.flow.esp_ipv4_udp_flow.spi = + esp_spec->hdr.spi; + else + filter->input.flow.esp_ipv4_flow.spi = + esp_spec->hdr.spi; + } + if (filter->input.flow_ext.oip_type == + I40E_FDIR_IPTYPE_IPV6) { + if (filter->input.flow_ext.is_udp) + filter->input.flow.esp_ipv6_udp_flow.spi = + esp_spec->hdr.spi; + else + filter->input.flow.esp_ipv6_flow.spi = + esp_spec->hdr.spi; + } +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported patterns: refer to array i40e_supported_patterns. + * 3. Default supported flow type and input set: refer to array + * valid_fdir_inset_table in i40e_ethdev.c. + * 4. Mask of fields which need to be matched should be + * filled with 1. + * 5. Mask of fields which needn't to be matched should be + * filled with 0. + * 6. GTP profile supports GTPv1 only. + * 7. GTP-C response message ('source_port' = 2123) is not supported. + */ +static int +i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_fdir_filter_conf *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; + const struct rte_flow_item_raw *raw_spec, *raw_mask; + const struct rte_flow_item_vf *vf_spec; + const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask; + + uint8_t pctype = 0; + uint64_t input_set = I40E_INSET_NONE; + uint16_t frag_off; + enum rte_flow_item_type item_type; + enum rte_flow_item_type next_type; + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END; + uint32_t i, j; + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX; + uint8_t raw_id = 0; + int32_t off_arr[I40E_MAX_FLXPLD_FIED]; + uint16_t len_arr[I40E_MAX_FLXPLD_FIED]; + struct i40e_fdir_flex_pit flex_pit; + uint8_t next_dst_off = 0; + uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN]; + uint16_t flex_size; + bool cfg_flex_pit = true; + bool cfg_flex_msk = true; + uint16_t outer_tpid; + uint16_t ether_type; + uint32_t vtc_flow_cpu; + bool outer_ip = true; + int ret; + + memset(off_arr, 0, sizeof(off_arr)); + memset(len_arr, 0, sizeof(len_arr)); + memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN); + outer_tpid = i40e_get_outer_vlan(dev); + filter->input.flow_ext.customized_pctype = false; + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + next_type = (item + 1)->type; + + if (next_type == RTE_FLOW_ITEM_TYPE_END && + (!eth_spec || !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "NULL eth spec/mask."); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + if (rte_is_broadcast_ether_addr(ð_mask->dst) && + rte_is_zero_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.dst = + eth_spec->dst; + input_set |= I40E_INSET_DMAC; + } else if (rte_is_zero_ether_addr(ð_mask->dst) && + rte_is_broadcast_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.src = + eth_spec->src; + input_set |= I40E_INSET_SMAC; + } else if (rte_is_broadcast_ether_addr(ð_mask->dst) && + rte_is_broadcast_ether_addr(ð_mask->src)) { + filter->input.flow.l2_flow.dst = + eth_spec->dst; + filter->input.flow.l2_flow.src = + eth_spec->src; + input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC); + } else if (!rte_is_zero_ether_addr(ð_mask->src) || + !rte_is_zero_ether_addr(ð_mask->dst)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid MAC_addr mask."); + return -rte_errno; + } + } + if (eth_spec && eth_mask && + next_type == RTE_FLOW_ITEM_TYPE_END) { + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid type mask."); + return -rte_errno; + } + + ether_type = rte_be_to_cpu_16(eth_spec->type); + + if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || + ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; + } + + pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; + layer_idx = I40E_FLXPLD_L2_IDX; + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + + RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE)); + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) { + input_set |= I40E_INSET_VLAN_INNER; + filter->input.flow_ext.vlan_tci = + vlan_spec->tci; + } + } + if (vlan_spec && vlan_mask && vlan_mask->inner_type) { + if (vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner_type" + " mask."); + return -rte_errno; + } + + ether_type = + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; + } + + pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; + layer_idx = I40E_FLXPLD_L2_IDX; + + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_spec = item->spec; + ipv4_mask = item->mask; + pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + layer_idx = I40E_FLXPLD_L3_IDX; + + if (ipv4_spec && ipv4_mask && outer_ip) { + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + + if (ipv4_mask->hdr.src_addr == UINT32_MAX) + input_set |= I40E_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) + input_set |= I40E_INSET_IPV4_DST; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + input_set |= I40E_INSET_IPV4_TOS; + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) + input_set |= I40E_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) + input_set |= I40E_INSET_IPV4_PROTO; + + /* Check if it is fragment. */ + frag_off = ipv4_spec->hdr.fragment_offset; + frag_off = rte_be_to_cpu_16(frag_off); + if (frag_off & RTE_IPV4_HDR_OFFSET_MASK || + frag_off & RTE_IPV4_HDR_MF_FLAG) + pctype = I40E_FILTER_PCTYPE_FRAG_IPV4; + + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & (I40E_INSET_IPV4_SRC | + I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L3 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get the filter info */ + filter->input.flow.ip4_flow.proto = + ipv4_spec->hdr.next_proto_id; + filter->input.flow.ip4_flow.tos = + ipv4_spec->hdr.type_of_service; + filter->input.flow.ip4_flow.ttl = + ipv4_spec->hdr.time_to_live; + filter->input.flow.ip4_flow.src_ip = + ipv4_spec->hdr.src_addr; + filter->input.flow.ip4_flow.dst_ip = + ipv4_spec->hdr.dst_addr; + + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; + } + } else if (!ipv4_spec && !ipv4_mask && !outer_ip) { + filter->input.flow_ext.inner_ip = true; + filter->input.flow_ext.iip_type = + I40E_FDIR_IPTYPE_IPV4; + } else if (!ipv4_spec && !ipv4_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; + } else if ((ipv4_spec || ipv4_mask) && !outer_ip) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner IPv4 mask."); + return -rte_errno; + } + + if (outer_ip) + outer_ip = false; + + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_spec = item->spec; + ipv6_mask = item->mask; + pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; + layer_idx = I40E_FLXPLD_L3_IDX; + + if (ipv6_spec && ipv6_mask && outer_ip) { + /* Check IPv6 mask and update input set */ + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask"); + return -rte_errno; + } + + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) + input_set |= I40E_INSET_IPV6_SRC; + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) + input_set |= I40E_INSET_IPV6_DST; + + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(I40E_IPV6_TC_MASK)) + == rte_cpu_to_be_32(I40E_IPV6_TC_MASK)) + input_set |= I40E_INSET_IPV6_TC; + if (ipv6_mask->hdr.proto == UINT8_MAX) + input_set |= I40E_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) + input_set |= I40E_INSET_IPV6_HOP_LIMIT; + + /* Get filter info */ + vtc_flow_cpu = + rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); + filter->input.flow.ipv6_flow.tc = + (uint8_t)(vtc_flow_cpu >> + I40E_FDIR_IPv6_TC_OFFSET); + filter->input.flow.ipv6_flow.proto = + ipv6_spec->hdr.proto; + filter->input.flow.ipv6_flow.hop_limits = + ipv6_spec->hdr.hop_limits; + + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + + rte_memcpy(filter->input.flow.ipv6_flow.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->input.flow.ipv6_flow.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + + /* Check if it is fragment. */ + if (ipv6_spec->hdr.proto == + I40E_IPV6_FRAG_HEADER) + pctype = I40E_FILTER_PCTYPE_FRAG_IPV6; + } else if (!ipv6_spec && !ipv6_mask && !outer_ip) { + filter->input.flow_ext.inner_ip = true; + filter->input.flow_ext.iip_type = + I40E_FDIR_IPTYPE_IPV6; + } else if (!ipv6_spec && !ipv6_mask && outer_ip) { + filter->input.flow_ext.inner_ip = false; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + } else if ((ipv6_spec || ipv6_mask) && !outer_ip) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner IPv6 mask"); + return -rte_errno; + } + + if (outer_ip) + outer_ip = false; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + if (tcp_spec && tcp_mask) { + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + if (tcp_mask->hdr.src_port == UINT16_MAX) + input_set |= I40E_INSET_SRC_PORT; + if (tcp_mask->hdr.dst_port == UINT16_MAX) + input_set |= I40E_INSET_DST_PORT; + + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & + (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L4 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.tcp4_flow.src_port = + tcp_spec->hdr.src_port; + filter->input.flow.tcp4_flow.dst_port = + tcp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.tcp6_flow.src_port = + tcp_spec->hdr.src_port; + filter->input.flow.tcp6_flow.dst_port = + tcp_spec->hdr.dst_port; + } + } + } + + layer_idx = I40E_FLXPLD_L4_IDX; + + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + + if (udp_spec && udp_mask) { + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (udp_mask->hdr.src_port == UINT16_MAX) + input_set |= I40E_INSET_SRC_PORT; + if (udp_mask->hdr.dst_port == UINT16_MAX) + input_set |= I40E_INSET_DST_PORT; + + if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) { + if (input_set & + (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L2 and L4 input set are exclusive."); + return -rte_errno; + } + } else { + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.udp4_flow.src_port = + udp_spec->hdr.src_port; + filter->input.flow.udp4_flow.dst_port = + udp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.udp6_flow.src_port = + udp_spec->hdr.src_port; + filter->input.flow.udp6_flow.dst_port = + udp_spec->hdr.dst_port; + } + } + } + filter->input.flow_ext.is_udp = true; + layer_idx = I40E_FLXPLD_L4_IDX; + + break; + case RTE_FLOW_ITEM_TYPE_GTPC: + case RTE_FLOW_ITEM_TYPE_GTPU: + if (!pf->gtp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported protocol"); + return -rte_errno; + } + + gtp_spec = item->spec; + gtp_mask = item->mask; + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len || + gtp_mask->teid != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + filter->input.flow.gtp_flow.teid = + gtp_spec->teid; + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + } + break; + case RTE_FLOW_ITEM_TYPE_ESP: + if (!pf->esp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ESP protocol"); + return -rte_errno; + } + + esp_spec = item->spec; + esp_mask = item->mask; + + if (!esp_spec || !esp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP item"); + return -rte_errno; + } + + if (esp_spec && esp_mask) { + if (esp_mask->hdr.spi != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ESP mask"); + return -rte_errno; + } + i40e_flow_set_filter_spi(filter, esp_spec); + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + } + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + pctype = + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; + + if (sctp_spec && sctp_mask) { + /* Check SCTP mask and update input set */ + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (sctp_mask->hdr.src_port == UINT16_MAX) + input_set |= I40E_INSET_SRC_PORT; + if (sctp_mask->hdr.dst_port == UINT16_MAX) + input_set |= I40E_INSET_DST_PORT; + if (sctp_mask->hdr.tag == UINT32_MAX) + input_set |= I40E_INSET_SCTP_VT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.sctp4_flow.src_port = + sctp_spec->hdr.src_port; + filter->input.flow.sctp4_flow.dst_port = + sctp_spec->hdr.dst_port; + filter->input.flow.sctp4_flow.verify_tag + = sctp_spec->hdr.tag; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.sctp6_flow.src_port = + sctp_spec->hdr.src_port; + filter->input.flow.sctp6_flow.dst_port = + sctp_spec->hdr.dst_port; + filter->input.flow.sctp6_flow.verify_tag + = sctp_spec->hdr.tag; + } + } + + layer_idx = I40E_FLXPLD_L4_IDX; + + break; + case RTE_FLOW_ITEM_TYPE_RAW: + raw_spec = item->spec; + raw_mask = item->mask; + + if (!raw_spec || !raw_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "NULL RAW spec/mask"); + return -rte_errno; + } + + if (pf->support_multi_driver) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported flexible payload."); + return -rte_errno; + } + + ret = i40e_flow_check_raw_item(item, raw_spec, error); + if (ret < 0) + return ret; + + off_arr[raw_id] = raw_spec->offset; + len_arr[raw_id] = raw_spec->length; + + flex_size = 0; + memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit)); + flex_pit.size = + raw_spec->length / sizeof(uint16_t); + flex_pit.dst_offset = + next_dst_off / sizeof(uint16_t); + + for (i = 0; i <= raw_id; i++) { + if (i == raw_id) + flex_pit.src_offset += + raw_spec->offset / + sizeof(uint16_t); + else + flex_pit.src_offset += + (off_arr[i] + len_arr[i]) / + sizeof(uint16_t); + flex_size += len_arr[i]; + } + if (((flex_pit.src_offset + flex_pit.size) >= + I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) || + flex_size > I40E_FDIR_MAX_FLEXLEN) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Exceeds maxmial payload limit."); + return -rte_errno; + } + + /* Store flex pit to SW */ + ret = i40e_flow_store_flex_pit(pf, &flex_pit, + layer_idx, raw_id); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Conflict with the first flexible rule."); + return -rte_errno; + } else if (ret > 0) + cfg_flex_pit = false; + + for (i = 0; i < raw_spec->length; i++) { + j = i + next_dst_off; + filter->input.flow_ext.flexbytes[j] = + raw_spec->pattern[i]; + flex_mask[j] = raw_mask->pattern[i]; + } + + next_dst_off += raw_spec->length; + raw_id++; + break; + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = item->spec; + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic" + " without affecting it" + " (transfer attribute)" + " is unsupported"); + return -rte_errno; + } + filter->input.flow_ext.is_vf = 1; + filter->input.flow_ext.dst_id = vf_spec->id; + if (filter->input.flow_ext.is_vf && + filter->input.flow_ext.dst_id >= pf->vf_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VF ID for FDIR."); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tpv3oip_spec = item->spec; + l2tpv3oip_mask = item->mask; + + if (!l2tpv3oip_spec || !l2tpv3oip_mask) + break; + + if (l2tpv3oip_mask->session_id != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid L2TPv3 mask"); + return -rte_errno; + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.flow.ip4_l2tpv3oip_flow.session_id = + l2tpv3oip_spec->session_id; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV4; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.flow.ip6_l2tpv3oip_flow.session_id = + l2tpv3oip_spec->session_id; + filter->input.flow_ext.oip_type = + I40E_FDIR_IPTYPE_IPV6; + } + + filter->input.flow_ext.customized_pctype = true; + cus_proto = item_type; + break; + default: + break; + } + } + + /* Get customized pctype value */ + if (filter->input.flow_ext.customized_pctype) { + pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter); + if (pctype == I40E_FILTER_PCTYPE_INVALID) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported pctype"); + return -rte_errno; + } + } + + /* If customized pctype is not used, set fdir configuration.*/ + if (!filter->input.flow_ext.customized_pctype) { + ret = i40e_flow_set_fdir_inset(pf, pctype, input_set); + if (ret == -1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Conflict with the first rule's input set."); + return -rte_errno; + } else if (ret == -EINVAL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid pattern mask."); + return -rte_errno; + } + + /* Store flex mask to SW */ + ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask); + if (ret == -1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Exceed maximal number of bitmasks"); + return -rte_errno; + } else if (ret == -2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Conflict with the first flexible rule"); + return -rte_errno; + } else if (ret > 0) + cfg_flex_msk = false; + + if (cfg_flex_pit) + i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id); + + if (cfg_flex_msk) + i40e_flow_set_fdir_flex_msk(pf, pctype); + } + + filter->input.pctype = pctype; + + return 0; +} + +/* Parse to get the action info of a FDIR filter. + * FDIR action supports QUEUE or (QUEUE + MARK). + */ +static int +i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_fdir_filter_conf *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark_spec = NULL; + uint32_t index = 0; + + /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + act_q = act->conf; + filter->action.rx_queue = act_q->index; + if ((!filter->input.flow_ext.is_vf && + filter->action.rx_queue >= pf->dev_data->nb_rx_queues) || + (filter->input.flow_ext.is_vf && + filter->action.rx_queue >= pf->vf_nb_qps)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue ID for FDIR."); + return -rte_errno; + } + filter->action.behavior = I40E_FDIR_ACCEPT; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + filter->action.behavior = I40E_FDIR_REJECT; + break; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + filter->action.behavior = I40E_FDIR_PASSTHRU; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + filter->action.behavior = I40E_FDIR_PASSTHRU; + mark_spec = act->conf; + filter->action.report_status = I40E_FDIR_REPORT_ID; + filter->soft_id = mark_spec->id; + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + + /* Check if the next non-void item is MARK or FLAG or END. */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_MARK: + if (mark_spec) { + /* Double MARK actions requested */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + mark_spec = act->conf; + filter->action.report_status = I40E_FDIR_REPORT_ID; + filter->soft_id = mark_spec->id; + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + if (mark_spec) { + /* MARK + FLAG not supported */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (filter->action.behavior != I40E_FDIR_PASSTHRU) { + /* RSS filter won't be next if FDIR did not pass thru */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + break; + case RTE_FLOW_ACTION_TYPE_END: + return 0; + default: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid action."); + return -rte_errno; + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid action."); + return -rte_errno; + } + + return 0; +} + +static int +i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_fdir_filter_conf *fdir_filter = + &filter->fdir_filter; + int ret; + + ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error, + fdir_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_FDIR; + + if (pf->fdir.fdir_vsi == NULL) { + /* Enable fdir when fdir flow is added at first time. */ + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to setup fdir."); + return -rte_errno; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to configure fdir."); + goto err; + } + } + + /* If create the first fdir rule, enable fdir check for rx queues */ + if (TAILQ_EMPTY(&pf->fdir.fdir_list)) + i40e_fdir_rx_proc_enable(dev, 1); + + return 0; +err: + i40e_fdir_teardown(pf); + return -rte_errno; +} + +/* Parse to get the action info of a tunnel filter + * Tunnel action only supports PF, VF and QUEUE. + */ +static int +i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + uint32_t index = 0; + + /* Check if the first non-void action is PF or VF. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_PF && + act->type != RTE_FLOW_ACTION_TYPE_VF) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = act->conf; + filter->vf_id = act_vf->id; + filter->is_to_vf = 1; + if (filter->vf_id >= pf->vf_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid VF ID for tunnel filter"); + return -rte_errno; + } + } + + /* Check if the next non-void item is QUEUE */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = act->conf; + filter->queue_id = act_q->index; + if ((!filter->is_to_vf) && + (filter->queue_id >= pf->dev_data->nb_rx_queues)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid queue ID for tunnel filter"); + return -rte_errno; + } else if (filter->is_to_vf && + (filter->queue_id >= pf->vf_nb_qps)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid queue ID for tunnel filter"); + return -rte_errno; + } + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static uint16_t i40e_supported_tunnel_filter_types[] = { + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID, + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IMAC, + ETH_TUNNEL_FILTER_IMAC, +}; + +static int +i40e_check_tunnel_filter_type(uint8_t filter_type) +{ + uint8_t i; + + for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) { + if (filter_type == i40e_supported_tunnel_filter_types[i]) + return 0; + } + + return -1; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN, + * IMAC_TENID, OMAC_TENID_IMAC and IMAC. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + */ +static int +i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + uint8_t filter_type = 0; + bool is_vni_masked = 0; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + enum rte_flow_item_type item_type; + bool vxlan_flag = 0; + uint32_t tenant_id_be = 0; + int ret; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + /* Check if ETH item is used for place holder. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!eth_spec && eth_mask) || + (eth_spec && !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + /* DST address of inner MAC shouldn't be masked. + * SRC address of Inner MAC should be masked. + */ + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || + eth_mask->type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (!vxlan_flag) { + rte_memcpy(&filter->outer_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_OMAC; + } else { + rte_memcpy(&filter->inner_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_IMAC; + } + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vlan item"); + return -rte_errno; + } + + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) + filter->inner_vlan = + rte_be_to_cpu_16(vlan_spec->tci) & + I40E_TCI_MASK; + filter_type |= ETH_TUNNEL_FILTER_IVLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6; + /* IPv6 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + /* UDP is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + /* Check if VNI is masked. */ + if (vxlan_spec && vxlan_mask) { + is_vni_masked = + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (is_vni_masked) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VNI mask"); + return -rte_errno; + } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->tenant_id = + rte_be_to_cpu_32(tenant_id_be); + filter_type |= ETH_TUNNEL_FILTER_TENID; + } + + vxlan_flag = 1; + break; + default: + break; + } + } + + ret = i40e_check_tunnel_filter_type(filter_type); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Invalid filter type"); + return -rte_errno; + } + filter->filter_type = filter_type; + + filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN; + + return 0; +} + +static int +i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_vxlan_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN, + * IMAC_TENID, OMAC_TENID_IMAC and IMAC. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + */ +static int +i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + enum rte_flow_item_type item_type; + uint8_t filter_type = 0; + bool is_tni_masked = 0; + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; + bool nvgre_flag = 0; + uint32_t tenant_id_be = 0; + int ret; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + /* Check if ETH item is used for place holder. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!eth_spec && eth_mask) || + (eth_spec && !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + /* DST address of inner MAC shouldn't be masked. + * SRC address of Inner MAC should be masked. + */ + if (!rte_is_broadcast_ether_addr(ð_mask->dst) || + !rte_is_zero_ether_addr(ð_mask->src) || + eth_mask->type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ether spec/mask"); + return -rte_errno; + } + + if (!nvgre_flag) { + rte_memcpy(&filter->outer_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_OMAC; + } else { + rte_memcpy(&filter->inner_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_IMAC; + } + } + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vlan item"); + return -rte_errno; + } + + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) + filter->inner_vlan = + rte_be_to_cpu_16(vlan_spec->tci) & + I40E_TCI_MASK; + filter_type |= ETH_TUNNEL_FILTER_IVLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6; + /* IPv6 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec && nvgre_mask) { + is_tni_masked = + !!memcmp(nvgre_mask->tni, tni_mask, + RTE_DIM(tni_mask)); + if (is_tni_masked) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TNI mask"); + return -rte_errno; + } + if (nvgre_mask->protocol && + nvgre_mask->protocol != 0xFFFF) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_spec->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_mask->protocol && + nvgre_spec->protocol != + rte_cpu_to_be_16(0x6558)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + nvgre_spec->tni, 3); + filter->tenant_id = + rte_be_to_cpu_32(tenant_id_be); + filter_type |= ETH_TUNNEL_FILTER_TENID; + } + + nvgre_flag = 1; + break; + default: + break; + } + } + + ret = i40e_check_tunnel_filter_type(filter_type); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Invalid filter type"); + return -rte_errno; + } + filter->filter_type = filter_type; + + filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE; + + return 0; +} + +static int +i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_nvgre_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: MPLS label. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + */ +static int +i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_mpls *mpls_spec; + const struct rte_flow_item_mpls *mpls_mask; + enum rte_flow_item_type item_type; + bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */ + const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0}; + uint32_t label_be = 0; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ETH item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6; + /* IPv6 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + /* UDP is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP item"); + return -rte_errno; + } + is_mplsoudp = 1; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + /* GRE is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GRE item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + mpls_spec = item->spec; + mpls_mask = item->mask; + + if (!mpls_spec || !mpls_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid MPLS item"); + return -rte_errno; + } + + if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid MPLS label mask"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&label_be + 1), + mpls_spec->label_tc_s, 3); + filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4; + break; + default: + break; + } + } + + if (is_mplsoudp) + filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP; + else + filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE; + + return 0; +} + +static int +i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_mpls_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: GTP TEID. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + * 5. GTP profile supports GTPv1 only. + * 6. GTP-C response message ('source_port' = 2123) is not supported. + */ +static int +i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_gtp *gtp_spec; + const struct rte_flow_item_gtp *gtp_mask; + enum rte_flow_item_type item_type; + + if (!pf->gtp_support) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "GTP is not supported by default."); + return -rte_errno; + } + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ETH item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; + /* IPv4 is used to describe protocol, + * spec and mask should be NULL. + */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_GTPC: + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + + if (!gtp_spec || !gtp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP item"); + return -rte_errno; + } + + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len || + gtp_mask->teid != UINT32_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + if (item_type == RTE_FLOW_ITEM_TYPE_GTPC) + filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC; + else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU) + filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU; + + filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid); + + break; + default: + break; + } + } + + return 0; +} + +static int +i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_gtp_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/* 1. Last in item should be NULL as range is not supported. + * 2. Supported filter types: QINQ. + * 3. Mask of fields which need to be matched should be + * filled with 1. + * 4. Mask of fields which needn't to be matched should be + * filled with 0. + */ +static int +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_tunnel_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + const struct rte_flow_item_vlan *vlan_spec = NULL; + const struct rte_flow_item_vlan *vlan_mask = NULL; + const struct rte_flow_item_vlan *i_vlan_spec = NULL; + const struct rte_flow_item_vlan *i_vlan_mask = NULL; + const struct rte_flow_item_vlan *o_vlan_spec = NULL; + const struct rte_flow_item_vlan *o_vlan_mask = NULL; + + enum rte_flow_item_type item_type; + bool vlan_flag = 0; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ETH item"); + return -rte_errno; + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vlan item"); + return -rte_errno; + } + + if (!vlan_flag) { + o_vlan_spec = vlan_spec; + o_vlan_mask = vlan_mask; + vlan_flag = 1; + } else { + i_vlan_spec = vlan_spec; + i_vlan_mask = vlan_mask; + vlan_flag = 0; + } + break; + + default: + break; + } + } + + /* Get filter specification */ + if ((o_vlan_mask != NULL) && (o_vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) && + (i_vlan_mask != NULL) && + (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) { + filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci) + & I40E_TCI_MASK; + filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci) + & I40E_TCI_MASK; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Invalid filter type"); + return -rte_errno; + } + + filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ; + return 0; +} + +static int +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; + int ret; + + ret = i40e_flow_parse_qinq_pattern(dev, pattern, + error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_TUNNEL; + + return ret; +} + +/** + * This function is used to do configuration i40e existing RSS with rte_flow. + * It also enable queue region configuration using flow API for i40e. + * pattern can be used indicate what parameters will be include in flow, + * like user_priority or flowtype for queue region or HASH function for RSS. + * Action is used to transmit parameter like queue index and HASH + * function for RSS, or flowtype for queue region configuration. + * For example: + * pattern: + * Case 1: try to transform patterns to pctype. valid pctype will be + * used in parse action. + * Case 2: only ETH, indicate flowtype for queue region will be parsed. + * Case 3: only VLAN, indicate user_priority for queue region will be parsed. + * So, pattern choice is depened on the purpose of configuration of + * that flow. + * action: + * action RSS will be used to transmit valid parameter with + * struct rte_flow_action_rss for all the 3 case. + */ +static int +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + struct i40e_rss_pattern_info *p_info, + struct i40e_queue_regions *info) +{ + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + struct rte_flow_item *items; + uint32_t item_num = 0; /* non-void item number of pattern*/ + uint32_t i = 0; + static const struct { + enum rte_flow_item_type *item_array; + uint64_t type; + } i40e_rss_pctype_patterns[] = { + { pattern_fdir_ipv4, + ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER }, + { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP }, + { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP }, + { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP }, + { pattern_fdir_ipv4_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv6, + ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER }, + { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP }, + { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP }, + { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP }, + { pattern_ethertype, ETH_RSS_L2_PAYLOAD }, + { pattern_fdir_ipv6_esp, ETH_RSS_ESP }, + { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP }, + }; + + p_info->types = I40E_RSS_TYPE_INVALID; + + if (item->type == RTE_FLOW_ITEM_TYPE_END) { + p_info->types = I40E_RSS_TYPE_NONE; + return 0; + } + + /* Convert pattern to RSS offload types */ + while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { + if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) + item_num++; + i++; + } + item_num++; + + items = rte_zmalloc("i40e_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No memory for PMD internal items."); + return -ENOMEM; + } + + i40e_pattern_skip_void_item(items, pattern); + + for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) { + if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array, + items)) { + p_info->types = i40e_rss_pctype_patterns[i].type; + break; + } + } + + rte_free(items); + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + p_info->action_flag = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) { + info->region[0].user_priority[0] = + (rte_be_to_cpu_16( + vlan_spec->tci) >> 13) & 0x7; + info->region[0].user_priority_num = 1; + info->queue_region_number = 1; + p_info->action_flag = 0; + } + } + break; + default: + p_info->action_flag = 0; + memset(info, 0, sizeof(struct i40e_queue_regions)); + return 0; + } + } + + return 0; +} + +/** + * This function is used to parse RSS queue index, total queue number and + * hash functions, If the purpose of this configuration is for queue region + * configuration, it will set queue_region_conf flag to TRUE, else to FALSE. + * In queue region configuration, it also need to parse hardware flowtype + * and user_priority from configuration, it will also cheeck the validity + * of these parameters. For example, The queue region sizes should + * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the + * hw_flowtype or PCTYPE max index should be 63, the user priority + * max index should be 7, and so on. And also, queue index should be + * continuous sequence and queue region index should be part of RSS + * queue index for this port. + * For hash params, the pctype in action and pattern must be same. + * Set queue index must be with non-types. + */ +static int +i40e_flow_parse_rss_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct i40e_rss_pattern_info p_info, + struct i40e_queue_regions *conf_info, + union i40e_filter_t *filter) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_rte_flow_rss_conf *rss_config = + &filter->rss_conf; + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + uint16_t i, j, n, tmp, nb_types; + uint32_t index = 0; + uint64_t hf_bit = 1; + + static const struct { + uint64_t rss_type; + enum i40e_filter_pctype pctype; + } pctype_match_table[] = { + {ETH_RSS_FRAG_IPV4, + I40E_FILTER_PCTYPE_FRAG_IPV4}, + {ETH_RSS_NONFRAG_IPV4_TCP, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP}, + {ETH_RSS_NONFRAG_IPV4_UDP, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP}, + {ETH_RSS_NONFRAG_IPV4_SCTP, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP}, + {ETH_RSS_NONFRAG_IPV4_OTHER, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER}, + {ETH_RSS_FRAG_IPV6, + I40E_FILTER_PCTYPE_FRAG_IPV6}, + {ETH_RSS_NONFRAG_IPV6_TCP, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP}, + {ETH_RSS_NONFRAG_IPV6_UDP, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP}, + {ETH_RSS_NONFRAG_IPV6_SCTP, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP}, + {ETH_RSS_NONFRAG_IPV6_OTHER, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER}, + {ETH_RSS_L2_PAYLOAD, + I40E_FILTER_PCTYPE_L2_PAYLOAD}, + }; + + NEXT_ITEM_OF_ACTION(act, actions, index); + rss = act->conf; + + /** + * RSS only supports forwarding, + * check if the first not void action is RSS. + */ + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (p_info.action_flag && rss->queue_num) { + for (j = 0; j < RTE_DIM(pctype_match_table); j++) { + if (rss->types & pctype_match_table[j].rss_type) { + conf_info->region[0].hw_flowtype[0] = + (uint8_t)pctype_match_table[j].pctype; + conf_info->region[0].flowtype_num = 1; + conf_info->queue_region_number = 1; + break; + } + } + } + + /** + * Do some queue region related parameters check + * in order to keep queue index for queue region to be + * continuous sequence and also to be part of RSS + * queue index for this port. + */ + if (conf_info->queue_region_number) { + for (i = 0; i < rss->queue_num; i++) { + for (j = 0; j < rss_info->conf.queue_num; j++) { + if (rss->queue[i] == rss_info->conf.queue[j]) + break; + } + if (j == rss_info->conf.queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + } + + /* Parse queue region related parameters from configuration */ + for (n = 0; n < conf_info->queue_region_number; n++) { + if (conf_info->region[n].user_priority_num || + conf_info->region[n].flowtype_num) { + if (!((rte_is_power_of_2(rss->queue_num)) && + rss->queue_num <= 64)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return -rte_errno; + } + + if (conf_info->region[n].user_priority[n] >= + I40E_MAX_USER_PRIORITY) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the user priority max index is 7"); + return -rte_errno; + } + + if (conf_info->region[n].hw_flowtype[n] >= + I40E_FILTER_PCTYPE_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the hw_flowtype or PCTYPE max index is 63"); + return -rte_errno; + } + + for (i = 0; i < info->queue_region_number; i++) { + if (info->region[i].queue_num == + rss->queue_num && + info->region[i].queue_start_index == + rss->queue[0]) + break; + } + + if (i == info->queue_region_number) { + if (i > I40E_REGION_MAX_INDEX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the queue region max index is 7"); + return -rte_errno; + } + + info->region[i].queue_num = + rss->queue_num; + info->region[i].queue_start_index = + rss->queue[0]; + info->region[i].region_id = + info->queue_region_number; + + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[0]; + if (conf_info->region[n].flowtype_num) { + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + info->queue_region_number++; + } else { + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[0]; + if (conf_info->region[n].flowtype_num) { + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + } + } + + rss_config->queue_region_conf = TRUE; + } + + /** + * Return function if this flow is used for queue region configuration + */ + if (rss_config->queue_region_conf) + return 0; + + if (!rss) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "invalid rule"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->queue_num && (p_info.types || rss->types)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS types must be empty while configuring queue region"); + + /* validate pattern and pctype */ + if (!(rss->types & p_info.types) && + (rss->types || p_info.types) && !rss->queue_num) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + act, "invalid pctype"); + + nb_types = 0; + for (n = 0; n < RTE_ETH_FLOW_MAX; n++) { + if (rss->types & (hf_bit << n)) + nb_types++; + if (nb_types > 1) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + act, "multi pctype is not supported"); + } + + if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + (p_info.types || rss->types || rss->queue_num)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "pattern, type and queues must be empty while" + " setting hash function as simple_xor"); + + if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ && + !(p_info.types && rss->types)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "pctype and queues can not be empty while" + " setting hash function as symmetric toeplitz"); + + /* Parse RSS related parameters from configuration */ + if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX || + rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key too large"); + if (rss->queue_num > RTE_DIM(rss_config->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (i40e_rss_conf_init(rss_config, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + index++; + + /* check if the next not void action is END */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + rss_config->queue_region_conf = FALSE; + + return 0; +} + +static int +i40e_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + struct i40e_rss_pattern_info p_info; + struct i40e_queue_regions info; + int ret; + + memset(&info, 0, sizeof(struct i40e_queue_regions)); + memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info)); + + ret = i40e_flow_parse_rss_pattern(dev, pattern, + error, &p_info, &info); + if (ret) + return ret; + + ret = i40e_flow_parse_rss_action(dev, actions, error, + p_info, &info, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_HASH; + + return 0; +} + +static int +i40e_config_rss_filter_set(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_rss_filter *rss_filter; + int ret; + + if (conf->queue_region_conf) { + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + } else { + ret = i40e_config_rss_filter(pf, conf, 1); + } + + if (ret) + return ret; + + rss_filter = rte_zmalloc("i40e_rss_filter", + sizeof(*rss_filter), 0); + if (rss_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rss_filter->rss_filter_info = *conf; + /* the rule new created is always valid + * the existing rule covered by new rule will be set invalid + */ + rss_filter->rss_filter_info.valid = true; + + TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next); + + return 0; +} + +static int +i40e_config_rss_filter_del(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_rss_filter *rss_filter; + void *temp; + + if (conf->queue_region_conf) + i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + else + i40e_config_rss_filter(pf, conf, 0); + + TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) { + if (!memcmp(&rss_filter->rss_filter_info, conf, + sizeof(struct rte_flow_action_rss))) { + TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next); + rte_free(rss_filter); + } + } + return 0; +} + +static int +i40e_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow_item *items; /* internal pattern w/o VOID items */ + parse_filter_t parse_filter; + uint32_t item_num = 0; /* non-void item number of pattern*/ + uint32_t i = 0; + bool flag = false; + int ret = I40E_NOT_SUPPORTED; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + memset(&cons_filter, 0, sizeof(cons_filter)); + + /* Get the non-void item of action */ + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID) + i++; + + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) { + ret = i40e_parse_rss_filter(dev, attr, pattern, + actions, &cons_filter, error); + return ret; + } + + i = 0; + /* Get the non-void item number of pattern */ + while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { + if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) + item_num++; + i++; + } + item_num++; + + items = rte_zmalloc("i40e_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No memory for PMD internal items."); + return -ENOMEM; + } + + i40e_pattern_skip_void_item(items, pattern); + + i = 0; + do { + parse_filter = i40e_find_parse_filter_func(items, &i); + if (!parse_filter && !flag) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "Unsupported pattern"); + rte_free(items); + return -rte_errno; + } + if (parse_filter) + ret = parse_filter(dev, attr, items, actions, + error, &cons_filter); + flag = true; + } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns))); + + rte_free(items); + + return ret; +} + +static struct rte_flow * +i40e_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_flow *flow; + int ret; + + flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + ret = i40e_flow_validate(dev, attr, pattern, actions, error); + if (ret < 0) + return NULL; + + switch (cons_filter_type) { + case RTE_ETH_FILTER_ETHERTYPE: + ret = i40e_ethertype_filter_set(pf, + &cons_filter.ethertype_filter, 1); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list, + i40e_ethertype_filter_list); + break; + case RTE_ETH_FILTER_FDIR: + ret = i40e_flow_add_del_fdir_filter(dev, + &cons_filter.fdir_filter, 1); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->fdir.fdir_list, + i40e_fdir_filter_list); + break; + case RTE_ETH_FILTER_TUNNEL: + ret = i40e_dev_consistent_tunnel_filter_set(pf, + &cons_filter.consistent_tunnel_filter, 1); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list, + i40e_tunnel_filter_list); + break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_set(dev, + &cons_filter.rss_conf); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->rss_config_list, + i40e_rss_conf_list); + break; + default: + goto free_flow; + } + + flow->filter_type = cons_filter_type; + TAILQ_INSERT_TAIL(&pf->flow_list, flow, node); + return flow; + +free_flow: + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(flow); + return NULL; +} + +static int +i40e_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum rte_filter_type filter_type = flow->filter_type; + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_ETHERTYPE: + ret = i40e_flow_destroy_ethertype_filter(pf, + (struct i40e_ethertype_filter *)flow->rule); + break; + case RTE_ETH_FILTER_TUNNEL: + ret = i40e_flow_destroy_tunnel_filter(pf, + (struct i40e_tunnel_filter *)flow->rule); + break; + case RTE_ETH_FILTER_FDIR: + ret = i40e_flow_add_del_fdir_filter(dev, + &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); + + /* If the last flow is destroyed, disable fdir. */ + if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) { + i40e_fdir_rx_proc_enable(dev, 0); + } + break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_del(dev, + &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (!ret) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } else + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + + return ret; +} + +static int +i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf, + struct i40e_ethertype_filter *filter) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + struct i40e_ethertype_filter *node; + struct i40e_control_filter_stats stats; + uint16_t flags = 0; + int ret = 0; + + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + ret = i40e_aq_add_rem_control_packet_filter(hw, + filter->input.mac_addr.addr_bytes, + filter->input.ether_type, + flags, pf->main_vsi->seid, + filter->queue, 0, &stats, NULL); + if (ret < 0) + return ret; + + node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input); + if (!node) + return -EINVAL; + + ret = i40e_sw_ethertype_filter_del(pf, &node->input); + + return ret; +} + +static int +i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf, + struct i40e_tunnel_filter *filter) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_pf_vf *vf; + struct i40e_aqc_cloud_filters_element_bb cld_filter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *node; + bool big_buffer = 0; + int ret = 0; + + memset(&cld_filter, 0, sizeof(cld_filter)); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac, + (struct rte_ether_addr *)&cld_filter.element.outer_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac, + (struct rte_ether_addr *)&cld_filter.element.inner_mac); + cld_filter.element.inner_vlan = filter->input.inner_vlan; + cld_filter.element.flags = filter->input.flags; + cld_filter.element.tenant_id = filter->input.tenant_id; + cld_filter.element.queue_number = filter->queue; + rte_memcpy(cld_filter.general_fields, + filter->input.general_fields, + sizeof(cld_filter.general_fields)); + + if (!filter->is_to_vf) + vsi = pf->main_vsi; + else { + vf = &pf->vfs[filter->vf_id]; + vsi = vf->vsi; + } + + if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) == + I40E_AQC_ADD_CLOUD_FILTER_0X11) || + ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) == + I40E_AQC_ADD_CLOUD_FILTER_0X12) || + ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) == + I40E_AQC_ADD_CLOUD_FILTER_0X10)) + big_buffer = 1; + + if (big_buffer) + ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid, + &cld_filter, 1); + else + ret = i40e_aq_rem_cloud_filters(hw, vsi->seid, + &cld_filter.element, 1); + if (ret < 0) + return -ENOTSUP; + + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input); + if (!node) + return -EINVAL; + + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + + return ret; +} + +static int +i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret; + + ret = i40e_flow_flush_fdir_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush FDIR flows."); + return -rte_errno; + } + + ret = i40e_flow_flush_ethertype_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to ethertype flush flows."); + return -rte_errno; + } + + ret = i40e_flow_flush_tunnel_filter(pf); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush tunnel flows."); + return -rte_errno; + } + + ret = i40e_flow_flush_rss_filter(dev); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush RSS flows."); + return -rte_errno; + } + + return ret; +} + +static int +i40e_flow_flush_fdir_filter(struct i40e_pf *pf) +{ + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct i40e_fdir_info *fdir_info = &pf->fdir; + struct i40e_fdir_filter *fdir_filter; + enum i40e_filter_pctype pctype; + struct rte_flow *flow; + void *temp; + int ret; + + ret = i40e_fdir_flush(dev); + if (!ret) { + /* Delete FDIR filters in FDIR list. */ + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + ret = i40e_sw_fdir_filter_del(pf, + &fdir_filter->fdir.input); + if (ret < 0) + return ret; + } + + /* Delete FDIR flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_FDIR) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) + pf->fdir.inset_flag[pctype] = 0; + + /* Disable FDIR processing as all FDIR rules are now flushed */ + i40e_fdir_rx_proc_enable(dev, 0); + } + + return ret; +} + +/* Flush all ethertype filters */ +static int +i40e_flow_flush_ethertype_filter(struct i40e_pf *pf) +{ + struct i40e_ethertype_filter_list + *ethertype_list = &pf->ethertype.ethertype_list; + struct i40e_ethertype_filter *filter; + struct rte_flow *flow; + void *temp; + int ret = 0; + + while ((filter = TAILQ_FIRST(ethertype_list))) { + ret = i40e_flow_destroy_ethertype_filter(pf, filter); + if (ret) + return ret; + } + + /* Delete ethertype flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + + return ret; +} + +/* Flush all tunnel filters */ +static int +i40e_flow_flush_tunnel_filter(struct i40e_pf *pf) +{ + struct i40e_tunnel_filter_list + *tunnel_list = &pf->tunnel.tunnel_list; + struct i40e_tunnel_filter *filter; + struct rte_flow *flow; + void *temp; + int ret = 0; + + while ((filter = TAILQ_FIRST(tunnel_list))) { + ret = i40e_flow_destroy_tunnel_filter(pf, filter); + if (ret) + return ret; + } + + /* Delete tunnel flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + } + + return ret; +} + +/* remove the RSS filter */ +static int +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + void *temp; + int32_t ret = -EINVAL; + + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + + /* Delete RSS flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + if (flow->filter_type != RTE_ETH_FILTER_HASH) + continue; + + if (flow->rule) { + ret = i40e_config_rss_filter_del(dev, + &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); + if (ret) + return ret; + } + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h b/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h new file mode 100644 index 000000000..dac3267eb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_logs.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _I40E_LOGS_H_ +#define _I40E_LOGS_H_ + +extern int i40e_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, i40e_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_I40E_DEBUG_RX +extern int i40e_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, i40e_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX +extern int i40e_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, i40e_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE +extern int i40e_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, i40e_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int i40e_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, i40e_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _I40E_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c new file mode 100644 index 000000000..7bf1e7941 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.c @@ -0,0 +1,1607 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_pf.h" +#include "rte_pmd_i40e.h" + +#define I40E_CFG_CRCSTRIP_DEFAULT 1 + +static int +i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, + struct virtchnl_queue_select *qsel, + bool on); + +/** + * Bind PF queues with VSI and VF. + **/ +static int +i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf) +{ + int i; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t vsi_id = vf->vsi->vsi_id; + uint16_t vf_id = vf->vf_idx; + uint16_t nb_qps = vf->vsi->nb_qps; + uint16_t qbase = vf->vsi->base_queue; + uint16_t q1, q2; + uint32_t val; + + /* + * VF should use scatter range queues. So, it needn't + * to set QBASE in this register. + */ + i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + + /* Set to enable VFLAN_QTABLE[] registers valid */ + I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id), + I40E_VPLAN_MAPENA_TXRX_ENA_MASK); + + /* map PF queues to VF */ + for (i = 0; i < nb_qps; i++) { + val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK); + I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val); + } + + /* map PF queues to VSI */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) { + if (2 * i > nb_qps - 1) + q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; + else + q1 = qbase + 2 * i; + + if (2 * i + 1 > nb_qps - 1) + q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; + else + q2 = qbase + 2 * i + 1; + + val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1; + i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val); + } + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} + + +/** + * Proceed VF reset operation. + */ +int +i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) +{ + uint32_t val, i; + struct i40e_hw *hw; + struct i40e_pf *pf; + uint16_t vf_id, abs_vf_id, vf_msix_num; + int ret; + struct virtchnl_queue_select qsel; + + if (vf == NULL) + return -EINVAL; + + pf = vf->pf; + hw = I40E_PF_TO_HW(vf->pf); + vf_id = vf->vf_idx; + abs_vf_id = vf_id + hw->func_caps.vf_base_id; + + /* Notify VF that we are in VFR progress */ + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS); + + /* + * If require a SW VF reset, a VFLR interrupt will be generated, + * this function will be called again. To avoid it, + * disable interrupt first. + */ + if (do_hw_reset) { + vf->state = I40E_VF_INRESET; + val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); + val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); + I40E_WRITE_FLUSH(hw); + } + +#define VFRESET_MAX_WAIT_CNT 100 + /* Wait until VF reset is done */ + for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { + rte_delay_us(10); + val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id)); + if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK) + break; + } + + if (i >= VFRESET_MAX_WAIT_CNT) { + PMD_DRV_LOG(ERR, "VF reset timeout"); + return -ETIMEDOUT; + } + /* This is not first time to do reset, do cleanup job first */ + if (vf->vsi) { + /* Disable queues */ + memset(&qsel, 0, sizeof(qsel)); + for (i = 0; i < vf->vsi->nb_qps; i++) + qsel.rx_queues |= 1 << i; + qsel.tx_queues = qsel.rx_queues; + ret = i40e_pf_host_switch_queues(vf, &qsel, false); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Disable VF queues failed"); + return -EFAULT; + } + + /* Disable VF interrupt setting */ + vf_msix_num = hw->func_caps.num_msix_vectors_vf; + for (i = 0; i < vf_msix_num; i++) { + if (!i) + val = I40E_VFINT_DYN_CTL0(vf_id); + else + val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) * + (vf_id)) + (i - 1)); + I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + } + I40E_WRITE_FLUSH(hw); + + /* remove VSI */ + ret = i40e_vsi_release(vf->vsi); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Release VSI failed"); + return -EFAULT; + } + } + +#define I40E_VF_PCI_ADDR 0xAA +#define I40E_VF_PEND_MASK 0x20 + /* Check the pending transactions of this VF */ + /* Use absolute VF id, refer to datasheet for details */ + I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR | + (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { + rte_delay_us(1); + val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD); + if ((val & I40E_VF_PEND_MASK) == 0) + break; + } + + if (i >= VFRESET_MAX_WAIT_CNT) { + PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout"); + return -ETIMEDOUT; + } + + /* Reset done, Set COMPLETE flag and clear reset bit */ + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED); + val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); + val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); + vf->reset_cnt++; + I40E_WRITE_FLUSH(hw); + + /* Allocate resource again */ + if (pf->floating_veb && pf->floating_veb_list[vf_id]) { + vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, + NULL, vf->vf_idx); + } else { + vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, + vf->pf->main_vsi, vf->vf_idx); + } + + if (vf->vsi == NULL) { + PMD_DRV_LOG(ERR, "Add vsi failed"); + return -EFAULT; + } + + ret = i40e_pf_vf_queues_mapping(vf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "queue mapping error"); + i40e_vsi_release(vf->vsi); + return -EFAULT; + } + + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE); + + return ret; +} + +int +i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, + uint32_t opcode, + uint32_t retval, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx; + int ret; + + ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval, + msg, msglen, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u", + hw->aq.asq_last_status); + } + + return ret; +} + +static void +i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) +{ + struct virtchnl_version_info info; + + /* VF and PF drivers need to follow the Virtchnl definition, No matter + * it's DPDK or other kernel drivers. + * The original DPDK host specific feature + * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available. + */ + + info.major = VIRTCHNL_VERSION_MAJOR; + vf->version = *(struct virtchnl_version_info *)msg; + if (VF_IS_V10(&vf->version)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + else + info.minor = VIRTCHNL_VERSION_MINOR; + + if (b_op) + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, + I40E_SUCCESS, + (uint8_t *)&info, + sizeof(info)); + else + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, + I40E_NOT_SUPPORTED, + (uint8_t *)&info, + sizeof(info)); +} + +static int +i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf) +{ + i40e_pf_host_vf_reset(vf, 1); + + /* No feedback will be sent to VF for VFLR */ + return I40E_SUCCESS; +} + +static int +i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) +{ + struct virtchnl_vf_resource *vf_res = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint32_t len = 0; + uint64_t default_hena = I40E_RSS_HENA_ALL; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf(vf, + VIRTCHNL_OP_GET_VF_RESOURCES, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + /* only have 1 VSI by default */ + len = sizeof(struct virtchnl_vf_resource) + + I40E_DEFAULT_VF_VSI_NUM * + sizeof(struct virtchnl_vsi_resource); + + vf_res = rte_zmalloc("i40e_vf_res", len, 0); + if (vf_res == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate mem"); + ret = I40E_ERR_NO_MEMORY; + vf_res = NULL; + len = 0; + goto send_msg; + } + + if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */ + vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_VLAN; + else + vf->request_caps = *(uint32_t *)msg; + + /* enable all RSS by default, + * doesn't support hena setting by virtchnnl yet. + */ + if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx), + (uint32_t)default_hena); + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx), + (uint32_t)(default_hena >> 32)); + I40E_WRITE_FLUSH(hw); + } + + vf_res->vf_cap_flags = vf->request_caps & + I40E_VIRTCHNL_OFFLOAD_CAPS; + /* For X722, it supports write back on ITR + * without binding queue to interrupt vector. + */ + if (hw->mac.type == I40E_MAC_X722) + vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; + vf_res->num_queue_pairs = vf->vsi->nb_qps; + vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; + vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4; + vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4; + + /* Change below setting if PF host can support more VSIs for VF */ + vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; + vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id; + vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; + rte_ether_addr_copy(&vf->mac_addr, + (struct rte_ether_addr *)vf_res->vsi_res[0].default_mac_addr); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, + ret, (uint8_t *)vf_res, len); + rte_free(vf_res); + + return ret; +} + +static int +i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw, + struct i40e_pf_vf *vf, + struct virtchnl_rxq_info *rxq, + uint8_t crcstrip) +{ + int err = I40E_SUCCESS; + struct i40e_hmc_obj_rxq rx_ctx; + uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id; + + /* Clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; + rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->ring_len; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + + if (rxq->splithdr_enabled) { + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL; + rx_ctx.dtype = i40e_header_split_enabled; + } else { + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.dtype = i40e_header_split_none; + } + rx_ctx.rxmax = rxq->max_pkt_size; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = crcstrip; + rx_ctx.l2tsel = 1; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id); + if (err != I40E_SUCCESS) + return err; + err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx); + + return err; +} + +static inline uint8_t +i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi, + uint16_t queue_id) +{ + struct i40e_aqc_vsi_properties_data *info = &vsi->info; + uint16_t bsf, qp_idx; + uint8_t i; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & (1 << i)) { + qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] & + I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT); + bsf = rte_le_to_cpu_16((info->tc_mapping[i] & + I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf)) + return i; + } + } + return 0; +} + +static int +i40e_pf_host_hmc_config_txq(struct i40e_hw *hw, + struct i40e_pf_vf *vf, + struct virtchnl_txq_info *txq) +{ + int err = I40E_SUCCESS; + struct i40e_hmc_obj_txq tx_ctx; + struct i40e_vsi *vsi = vf->vsi; + uint32_t qtx_ctl; + uint16_t abs_queue_id = vsi->base_queue + txq->queue_id; + uint8_t dcb_tc; + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->ring_len; + dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id); + tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]); + tx_ctx.head_wb_ena = txq->headwb_enabled; + tx_ctx.head_wb_addr = txq->dma_headwb_addr; + + err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id); + if (err != I40E_SUCCESS) + return err; + + err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx); + if (err != I40E_SUCCESS) + return err; + + /* bind queue with VF function, since TX/QX will appear in pair, + * so only has QTX_CTL to set. + */ + qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) | + ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK) | + (((vf->vf_idx + hw->func_caps.vf_base_id) << + I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK); + I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl); + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} + +static int +i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct i40e_vsi *vsi = vf->vsi; + struct virtchnl_vsi_queue_config_info *vc_vqci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *vc_qpi; + int i, ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf(vf, + VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps || + vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP || + msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, + vc_vqci->num_queue_pairs)) { + PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vc_qpi = vc_vqci->qpair; + for (i = 0; i < vc_vqci->num_queue_pairs; i++) { + if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 || + vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* + * Apply VF RX queue setting to HMC. + * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + * then the extra information of + * 'struct virtchnl_queue_pair_extra_info' is needed, + * otherwise set the last parameter to NULL. + */ + if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq, + I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* Apply VF TX queue setting to HMC */ + if (i40e_pf_host_hmc_config_txq(hw, vf, + &vc_qpi[i].txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + ret, NULL, 0); + + return ret; +} + +static void +i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf, + struct virtchnl_vector_map *vvm) +{ +#define BITS_PER_CHAR 8 + uint64_t linklistmap = 0, tempmap; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t qid; + bool b_first_q = true; + enum i40e_queue_type qtype; + uint16_t vector_id; + uint32_t reg, reg_idx; + uint16_t itr_idx = 0, i; + + vector_id = vvm->vector_id; + /* setup the head */ + if (!vector_id) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx); + else + reg_idx = I40E_VPINT_LNKLSTN( + ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx) + + (vector_id - 1)); + + if (vvm->rxq_map == 0 && vvm->txq_map == 0) { + I40E_WRITE_REG(hw, reg_idx, + I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); + goto cfg_irq_done; + } + + /* sort all rx and tx queues */ + tempmap = vvm->rxq_map; + for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) { + if (tempmap & 0x1) + linklistmap |= (1 << (2 * i)); + tempmap >>= 1; + } + + tempmap = vvm->txq_map; + for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) { + if (tempmap & 0x1) + linklistmap |= (1 << (2 * i + 1)); + tempmap >>= 1; + } + + /* Link all rx and tx queues into a chained list */ + tempmap = linklistmap; + i = 0; + b_first_q = true; + do { + if (tempmap & 0x1) { + qtype = (enum i40e_queue_type)(i % 2); + qid = vf->vsi->base_queue + i / 2; + if (b_first_q) { + /* This is header */ + b_first_q = false; + reg = ((qtype << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) + | qid); + } else { + /* element in the link list */ + reg = (vector_id) | + (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + } + I40E_WRITE_REG(hw, reg_idx, reg); + /* find next register to program */ + switch (qtype) { + case I40E_QUEUE_TYPE_RX: + reg_idx = I40E_QINT_RQCTL(qid); + itr_idx = vvm->rxitr_idx; + break; + case I40E_QUEUE_TYPE_TX: + reg_idx = I40E_QINT_TQCTL(qid); + itr_idx = vvm->txitr_idx; + break; + default: + break; + } + } + i++; + tempmap >>= 1; + } while (tempmap); + + /* Terminate the link list */ + reg = (vector_id) | + (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + I40E_WRITE_REG(hw, reg_idx, reg); + +cfg_irq_done: + I40E_WRITE_FLUSH(hw); +} + +static int +i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, + uint8_t *msg, uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct virtchnl_irq_map_info *irqmap = + (struct virtchnl_irq_map_info *)msg; + struct virtchnl_vector_map *map; + int i; + uint16_t vector_id, itr_idx; + unsigned long qbit_max; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) { + PMD_DRV_LOG(ERR, "buffer too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* PF host will support both DPDK VF or Linux VF driver, identify by + * number of vectors requested. + */ + + /* DPDK VF only requires single vector */ + if (irqmap->num_vectors == 1) { + /* This MSIX intr store the intr in VF range */ + vf->vsi->msix_intr = irqmap->vecmap[0].vector_id; + vf->vsi->nb_msix = irqmap->num_vectors; + vf->vsi->nb_used_qps = vf->vsi->nb_qps; + itr_idx = irqmap->vecmap[0].rxitr_idx; + + /* Don't care how the TX/RX queue mapping with this vector. + * Link all VF RX queues together. Only did mapping work. + * VF can disable/enable the intr by itself. + */ + i40e_vsi_queues_bind_intr(vf->vsi, itr_idx); + goto send_msg; + } + + /* Then, it's Linux VF driver */ + qbit_max = 1 << pf->vf_nb_qp_max; + for (i = 0; i < irqmap->num_vectors; i++) { + map = &irqmap->vecmap[i]; + + vector_id = map->vector_id; + /* validate msg params */ + if (vector_id >= hw->func_caps.num_msix_vectors_vf) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) { + i40e_pf_config_irq_link_list(vf, map); + } else { + /* configured queue size excceed limit */ + ret = I40E_ERR_PARAM; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, + struct virtchnl_queue_select *qsel, + bool on) +{ + int ret = I40E_SUCCESS; + int i; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t baseq = vf->vsi->base_queue; + + if (qsel->rx_queues + qsel->tx_queues == 0) + return I40E_ERR_PARAM; + + /* always enable RX first and disable last */ + /* Enable RX if it's enable */ + if (on) { + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->rx_queues & (1 << i)) { + ret = i40e_switch_rx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + } + + /* Enable/Disable TX */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->tx_queues & (1 << i)) { + ret = i40e_switch_tx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + + /* disable RX last if it's disable */ + if (!on) { + /* disable RX */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->rx_queues & (1 << i)) { + ret = i40e_switch_rx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + } + + return ret; +} + +static int +i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct virtchnl_queue_select *q_sel = + (struct virtchnl_queue_select *)msg; + + if (msg == NULL || msglen != sizeof(*q_sel)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + ret = i40e_pf_host_switch_queues(vf, q_sel, true); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_queue_select *q_sel = + (struct virtchnl_queue_select *)msg; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DISABLE_QUEUES, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen != sizeof(*q_sel)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + ret = i40e_pf_host_switch_queues(vf, q_sel, false); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, + ret, NULL, 0); + + return ret; +} + + +static int +i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_ether_addr_list *addr_list = + (struct virtchnl_ether_addr_list *)msg; + struct i40e_mac_filter_info filter; + int i; + struct rte_ether_addr *mac; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_ADD_ETH_ADDR, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + memset(&filter, 0 , sizeof(struct i40e_mac_filter_info)); + + if (msg == NULL || msglen <= sizeof(*addr_list)) { + PMD_DRV_LOG(ERR, "add_ether_address argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + for (i = 0; i < addr_list->num_elements; i++) { + mac = (struct rte_ether_addr *)(addr_list->list[i].addr); + rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + if (rte_is_zero_ether_addr(mac) || + i40e_vsi_add_mac(vf->vsi, &filter)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_ether_addr_list *addr_list = + (struct virtchnl_ether_addr_list *)msg; + int i; + struct rte_ether_addr *mac; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DEL_ETH_ADDR, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen <= sizeof(*addr_list)) { + PMD_DRV_LOG(ERR, "delete_ether_address argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + for (i = 0; i < addr_list->num_elements; i++) { + mac = (struct rte_ether_addr *)(addr_list->list[i].addr); + if (rte_is_zero_ether_addr(mac) || + i40e_vsi_delete_mac(vf->vsi, mac)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf, + uint8_t *msg, uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_vlan_filter_list *vlan_filter_list = + (struct virtchnl_vlan_filter_list *)msg; + int i; + uint16_t *vid; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_ADD_VLAN, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) { + PMD_DRV_LOG(ERR, "add_vlan argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vid = vlan_filter_list->vlan_id; + + for (i = 0; i < vlan_filter_list->num_elements; i++) { + ret = i40e_vsi_add_vlan(vf->vsi, vid[i]); + if(ret != I40E_SUCCESS) + goto send_msg; + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_vlan_filter_list *vlan_filter_list = + (struct virtchnl_vlan_filter_list *)msg; + int i; + uint16_t *vid; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DEL_VLAN, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) { + PMD_DRV_LOG(ERR, "delete_vlan argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vid = vlan_filter_list->vlan_id; + for (i = 0; i < vlan_filter_list->num_elements; i++) { + ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]); + if(ret != I40E_SUCCESS) + goto send_msg; + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_config_promisc_mode( + struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + int ret = I40E_SUCCESS; + struct virtchnl_promisc_info *promisc = + (struct virtchnl_promisc_info *)msg; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + bool unicast = FALSE, multicast = FALSE; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (msg == NULL || msglen != sizeof(*promisc)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + if (promisc->flags & FLAG_VF_UNICAST_PROMISC) + unicast = TRUE; + ret = i40e_aq_set_vsi_unicast_promiscuous(hw, + vf->vsi->seid, unicast, NULL, true); + if (ret != I40E_SUCCESS) + goto send_msg; + + if (promisc->flags & FLAG_VF_MULTICAST_PROMISC) + multicast = TRUE; + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid, + multicast, NULL); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op) +{ + i40e_update_vsi_stats(vf->vsi); + + if (b_op) + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, + I40E_SUCCESS, + (uint8_t *)&vf->vsi->eth_stats, + sizeof(vf->vsi->eth_stats)); + else + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, + I40E_NOT_SUPPORTED, + (uint8_t *)&vf->vsi->eth_stats, + sizeof(vf->vsi->eth_stats)); + + return I40E_SUCCESS; +} + +static int +i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op) +{ + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to enable vlan stripping"); + + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op) +{ + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to disable vlan stripping"); + + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_CONFIG_RSS_LUT, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) { + PMD_DRV_LOG(ERR, "set_rss_lut argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_lut length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) { + PMD_DRV_LOG(ERR, "set_rss_key argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_key length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, + ret, NULL, 0); + + return ret; +} + +void +i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct virtchnl_pf_event event; + uint16_t vf_id = vf->vf_idx; + uint32_t tval, rval; + + event.event = VIRTCHNL_EVENT_LINK_CHANGE; + event.event_data.link_event.link_status = + dev->data->dev_link.link_status; + + /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */ + switch (dev->data->dev_link.link_speed) { + case ETH_SPEED_NUM_100M: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB; + break; + case ETH_SPEED_NUM_1G: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB; + break; + case ETH_SPEED_NUM_10G: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB; + break; + case ETH_SPEED_NUM_20G: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB; + break; + case ETH_SPEED_NUM_25G: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB; + break; + case ETH_SPEED_NUM_40G: + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; + break; + default: + event.event_data.link_event.link_speed = + VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + + tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id)); + rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id)); + + if (tval & I40E_VF_ATQLEN_ATQLEN_MASK || + tval & I40E_VF_ATQLEN_ATQENABLE_MASK || + rval & I40E_VF_ARQLEN_ARQLEN_MASK || + rval & I40E_VF_ARQLEN_ARQENABLE_MASK) + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT, + I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); +} + +/** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure + * + * indicate a pending reset to the given VF + **/ +static void +i40e_vc_notify_vf_reset(struct i40e_pf_vf *vf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct virtchnl_pf_event pfe; + int abs_vf_id; + uint16_t vf_id = vf->vf_idx; + + abs_vf_id = vf_id + hw->func_caps.vf_base_id; + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(struct virtchnl_pf_event), NULL); +} + +static int +i40e_pf_host_process_cmd_request_queues(struct i40e_pf_vf *vf, uint8_t *msg) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + struct i40e_pf *pf; + uint32_t req_pairs = vfres->num_queue_pairs; + uint32_t cur_pairs = vf->vsi->nb_used_qps; + + pf = vf->pf; + + if (!rte_is_power_of_2(req_pairs)) + req_pairs = i40e_align_floor(req_pairs) << 1; + + if (req_pairs == 0) { + PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_idx); + } else if (req_pairs > I40E_MAX_QP_NUM_PER_VF) { + PMD_DRV_LOG(ERR, + "VF %d tried to request more than %d queues.\n", + vf->vf_idx, + I40E_MAX_QP_NUM_PER_VF); + vfres->num_queue_pairs = I40E_MAX_QP_NUM_PER_VF; + } else if (req_pairs > cur_pairs + pf->qp_pool.num_free) { + PMD_DRV_LOG(ERR, "VF %d requested %d queues (rounded to %d) " + "but only %d available\n", + vf->vf_idx, + vfres->num_queue_pairs, + req_pairs, + cur_pairs + pf->qp_pool.num_free); + vfres->num_queue_pairs = i40e_align_floor(pf->qp_pool.num_free + + cur_pairs); + } else { + i40e_vc_notify_vf_reset(vf); + vf->vsi->nb_qps = req_pairs; + pf->vf_nb_qps = req_pairs; + i40e_pf_host_process_cmd_reset_vf(vf); + + return 0; + } + + return i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, + (u8 *)vfres, sizeof(*vfres)); +} + +void +i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, + uint16_t abs_vf_id, uint32_t opcode, + __rte_unused uint32_t retval, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf_vf *vf; + /* AdminQ will pass absolute VF id, transfer to internal vf id */ + uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id; + struct rte_pmd_i40e_mb_event_param ret_param; + uint64_t first_cycle, cur_cycle; + bool b_op = TRUE; + int ret; + + if (vf_id > pf->vf_num - 1 || !pf->vfs) { + PMD_DRV_LOG(ERR, "invalid argument"); + return; + } + + vf = &pf->vfs[vf_id]; + + cur_cycle = rte_get_timer_cycles(); + + /* if the VF being blocked, ignore the message and return */ + if (cur_cycle < vf->ignore_end_cycle) + return; + + if (!vf->vsi) { + PMD_DRV_LOG(ERR, "NO VSI associated with VF found"); + i40e_pf_host_send_msg_to_vf(vf, opcode, + I40E_ERR_NO_AVAILABLE_VSI, NULL, 0); + goto check; + } + + /* perform basic checks on the msg */ + ret = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msglen); + + /* perform additional checks specific to this driver */ + if (opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)) + ret = VIRTCHNL_ERR_PARAM; + } else if (opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)) + ret = VIRTCHNL_ERR_PARAM; + } + + if (ret) { + PMD_DRV_LOG(ERR, "Invalid message from VF %u, opcode %u, len %u", + vf_id, opcode, msglen); + i40e_pf_host_send_msg_to_vf(vf, opcode, + I40E_ERR_PARAM, NULL, 0); + goto check; + } + + /** + * initialise structure to send to user application + * will return response from user in retval field + */ + ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED; + ret_param.vfid = vf_id; + ret_param.msg_type = opcode; + ret_param.msg = (void *)msg; + ret_param.msglen = msglen; + + /** + * Ask user application if we're allowed to perform those functions. + * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED, + * then business as usual. + * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK, + * do nothing and send not_supported to VF. As PF must send a response + * to VF and ACK/NACK is not defined. + */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param); + if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) { + PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!", + opcode); + b_op = FALSE; + } + + switch (opcode) { + case VIRTCHNL_OP_VERSION: + PMD_DRV_LOG(INFO, "OP_VERSION received"); + i40e_pf_host_process_cmd_version(vf, msg, b_op); + break; + case VIRTCHNL_OP_RESET_VF: + PMD_DRV_LOG(INFO, "OP_RESET_VF received"); + i40e_pf_host_process_cmd_reset_vf(vf); + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received"); + i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received"); + i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, + msglen, b_op); + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received"); + i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received"); + if (b_op) { + i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen); + i40e_notify_vf_link_status(dev, vf); + } else { + i40e_pf_host_send_msg_to_vf( + vf, VIRTCHNL_OP_ENABLE_QUEUES, + I40E_NOT_SUPPORTED, NULL, 0); + } + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received"); + i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received"); + i40e_pf_host_process_cmd_add_ether_address(vf, msg, + msglen, b_op); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received"); + i40e_pf_host_process_cmd_del_ether_address(vf, msg, + msglen, b_op); + break; + case VIRTCHNL_OP_ADD_VLAN: + PMD_DRV_LOG(INFO, "OP_ADD_VLAN received"); + i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_DEL_VLAN: + PMD_DRV_LOG(INFO, "OP_DEL_VLAN received"); + i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received"); + i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, + msglen, b_op); + break; + case VIRTCHNL_OP_GET_STATS: + PMD_DRV_LOG(INFO, "OP_GET_STATS received"); + i40e_pf_host_process_cmd_get_stats(vf, b_op); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received"); + i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received"); + i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op); + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received"); + i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received"); + i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + PMD_DRV_LOG(INFO, "OP_REQUEST_QUEUES received"); + i40e_pf_host_process_cmd_request_queues(vf, msg); + break; + + /* Don't add command supported below, which will + * return an error code. + */ + default: + PMD_DRV_LOG(ERR, "%u received, not supported", opcode); + i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM, + NULL, 0); + break; + } + +check: + /* if message validation not enabled */ + if (!pf->vf_msg_cfg.max_msg) + return; + + /* store current cycle */ + vf->msg_timestamps[vf->msg_index++] = cur_cycle; + vf->msg_index %= pf->vf_msg_cfg.max_msg; + + /* read the timestamp of earliest message */ + first_cycle = vf->msg_timestamps[vf->msg_index]; + + /* + * If the time span from the arrival time of first message to + * the arrival time of current message smaller than `period`, + * that mean too much message in this statistic period. + */ + if (first_cycle && cur_cycle < first_cycle + + (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) { + PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u" + " seconds),\n\tany new message from which" + " will be ignored during next %u seconds!", + vf_id, pf->vf_msg_cfg.max_msg, + (uint32_t)((cur_cycle - first_cycle + + rte_get_timer_hz() - 1) / rte_get_timer_hz()), + pf->vf_msg_cfg.ignore_second); + vf->ignore_end_cycle = rte_get_timer_cycles() + + pf->vf_msg_cfg.ignore_second * + rte_get_timer_hz(); + } +} + +int +i40e_pf_host_init(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + size_t size; + int ret, i; + uint32_t val; + + PMD_INIT_FUNC_TRACE(); + + /** + * return if SRIOV not enabled, VF number not configured or + * no queue assigned. + */ + if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0) + return I40E_SUCCESS; + + /* Allocate memory to store VF structure */ + pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0); + if(pf->vfs == NULL) + return -ENOMEM; + + /* Disable irq0 for VFR event */ + i40e_pf_disable_irq0(hw); + + /* Disable VF link status interrupt */ + val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); + I40E_WRITE_FLUSH(hw); + + /* calculate the memory size for storing timestamp of messages */ + size = pf->vf_msg_cfg.max_msg * sizeof(uint64_t); + + for (i = 0; i < pf->vf_num; i++) { + pf->vfs[i].pf = pf; + pf->vfs[i].state = I40E_VF_INACTIVE; + pf->vfs[i].vf_idx = i; + + if (size) { + /* allocate memory for store timestamp of messages */ + pf->vfs[i].msg_timestamps = + rte_zmalloc("i40e_pf_vf", size, 0); + if (pf->vfs[i].msg_timestamps == NULL) { + ret = -ENOMEM; + goto fail; + } + } + + ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); + if (ret != I40E_SUCCESS) + goto fail; + } + + RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num; + /* restore irq0 */ + i40e_pf_enable_irq0(hw); + + return I40E_SUCCESS; + +fail: + for (; i >= 0; i--) + rte_free(pf->vfs[i].msg_timestamps); + rte_free(pf->vfs); + i40e_pf_enable_irq0(hw); + + return ret; +} + +int +i40e_pf_host_uninit(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + int i; + + PMD_INIT_FUNC_TRACE(); + + /** + * return if SRIOV not enabled, VF number not configured or + * no queue assigned. + */ + if ((!hw->func_caps.sr_iov_1_1) || + (pf->vf_num == 0) || + (pf->vf_nb_qps == 0)) + return I40E_SUCCESS; + + /* free memory for store timestamp of messages */ + for (i = 0; i < pf->vf_num; i++) + rte_free(pf->vfs[i].msg_timestamps); + + /* free memory to store VF structure */ + rte_free(pf->vfs); + pf->vfs = NULL; + + /* Disable irq0 for VFR event */ + i40e_pf_disable_irq0(hw); + + /* Disable VF link status interrupt */ + val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h new file mode 100644 index 000000000..1809ba4d2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_pf.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#ifndef _I40E_PF_H_ +#define _I40E_PF_H_ + +/* Default setting on number of VSIs that VF can contain */ +#define I40E_DEFAULT_VF_VSI_NUM 1 + +#define I40E_VIRTCHNL_OFFLOAD_CAPS ( \ + VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF | \ + VIRTCHNL_VF_OFFLOAD_RX_POLLING) + +struct virtchnl_vlan_offload_info { + uint16_t vsi_id; + uint8_t enable_vlan_strip; + uint8_t reserved; +}; + +/* + * Macro to calculate the memory size for configuring VSI queues + * via virtual channel. + */ +#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \ + (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n)) + +int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset); +void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, + uint16_t abs_vf_id, uint32_t opcode, + uint32_t retval, + uint8_t *msg, uint16_t msglen); +int i40e_pf_host_init(struct rte_eth_dev *dev); +int i40e_pf_host_uninit(struct rte_eth_dev *dev); +void i40e_notify_vf_link_status(struct rte_eth_dev *dev, + struct i40e_pf_vf *vf); + +#endif /* _I40E_PF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h b/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h new file mode 100644 index 000000000..b19bb1d5a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_regs.h @@ -0,0 +1,968 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +struct i40e_reg_info { + uint32_t base_addr; + uint32_t count1; + uint32_t stride1; + uint32_t count2; + uint32_t stride2; + const char *name; +}; + +static const struct i40e_reg_info i40e_regs_adminq[] = { + {I40E_VFQF_HENA(0), 1, 4, 0, 0, "VFQF_HENA"}, + {I40E_VFQF_HKEY(0), 12, 4, 0, 0, "VFQF_HKEY"}, + {I40E_VFQF_HREGION(0), 7, 4, 0, 0, "VFQF_HREGION"}, + {I40E_VPQF_CTL(0), 127, 4, 0, 0, "VPQF_CTL"}, + {I40E_PFLAN_QALLOC, 0, 0, 0, 0, "PFLAN_QALLOC"}, + {I40E_PFQF_CTL_0, 0, 0, 0, 0, "PFQF_CTL_0"}, + {I40E_VSILAN_QTABLE(0, 0), 7, 2048, 383, 4, "VSILAN_QTABLE"}, + {I40E_VSIQF_TCREGION(0, 0), 3, 2048, 383, 4, "VSIQF_TCREGION"}, + {I40E_VSILAN_QBASE(0), 383, 4, 0, 0, "VSILAN_QBASE"}, + {I40E_VSIQF_CTL(0), 383, 4, 0, 0, "VSIQF_CTL"}, + {I40E_PFQF_HKEY(0), 12, 128, 0, 0, "PFQF_HKEY"}, + {I40E_PFQF_HREGION(0), 7, 128, 0, 0, "PFQF_HREGION"}, + {I40E_PFQF_HENA(0), 1, 128, 0, 0, "PFQF_HENA"}, + {I40E_PFQF_FDALLOC, 0, 0, 0, 0, "PFQF_FDALLOC"}, + {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"}, + {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"}, + {I40E_PRTQF_FD_MSK(0, 0), 63, 64, 1, 32, "PRTQF_FD_MSK"}, + {I40E_PRTQF_FD_FLXINSET(0), 63, 32, 0, 0, "PRTQF_FD_FLXINSET"}, + {I40E_PRTQF_CTL_0, 0, 0, 0, 0, "PRTQF_CTL_0"}, + {I40E_GLQF_FD_MSK(0, 0), 1, 4, 63, 8, "GLQF_FD_MSK"}, + {I40E_GLQF_HASH_INSET(0, 0), 1, 4, 63, 8, "GLQF_HASH_INSET"}, + {I40E_GLQF_HASH_MSK(0, 0), 1, 4, 63, 8, "GLQF_HASH_MSK"}, + {I40E_GLQF_SWAP(0, 0), 1, 4, 63, 8, "GLQF_SWAP"}, + {I40E_GLFCOE_RCTL, 0, 0, 0, 0, "GLFCOE_RCTL"}, + {I40E_GLQF_CTL, 0, 0, 0, 0, "GLQF_CTL"}, + {I40E_GLQF_HSYM(0), 63, 4, 0, 0, "GLQF_HSYM"}, + {0, 0, 0, 0, 0, NULL} +}; + +static const struct i40e_reg_info i40e_regs_others[] = { + {I40E_QTX_TAIL1(0), 15, 4, 0, 0, "QTX_TAIL1"}, + {I40E_VFPE_CQPDB(0), 127, 4, 0, 0, "VFPE_CQPDB"}, + {I40E_VFPE_CQPTAIL(0), 127, 4, 0, 0, "VFPE_CQPTAIL"}, + {I40E_VFPE_CCQPSTATUS(0), 127, 4, 0, 0, "VFPE_CCQPSTATUS"}, + {I40E_VFPE_CCQPLOW(0), 127, 4, 0, 0, "VFPE_CCQPLOW"}, + {I40E_VFPE_CCQPHIGH(0), 127, 4, 0, 0, "VFPE_CCQPHIGH"}, + {I40E_VFPE_IPCONFIG0(0), 127, 4, 0, 0, "VFPE_IPCONFIG0"}, + {I40E_VFPE_CQPERRCODES(0), 127, 4, 0, 0, "VFPE_CQPERRCODES"}, + {I40E_QRX_TAIL1(0), 15, 4, 0, 0, "QRX_TAIL1"}, + {I40E_VFINT_ITRN1(0, 0), 2, 64, 15, 4, "VFINT_ITRN1"}, + {I40E_VFPE_TCPNOWTIMER(0), 127, 4, 0, 0, "VFPE_TCPNOWTIMER"}, + {I40E_VFPE_MRTEIDXMASK(0), 127, 4, 0, 0, "VFPE_MRTEIDXMASK"}, + {I40E_VFPE_RCVUNEXPECTEDERROR(0), 127, 4, 0, 0, + "VFPE_RCVUNEXPECTEDERROR"}, + {I40E_VFINT_DYN_CTLN1(0), 15, 4, 0, 0, "VFINT_DYN_CTLN1"}, + {I40E_VFINT_ICR01, 0, 0, 0, 0, "VFINT_ICR01"}, + {I40E_VFINT_ITR01(0), 2, 4, 0, 0, "VFINT_ITR01"}, + {I40E_VFINT_ICR0_ENA1, 0, 0, 0, 0, "VFINT_ICR0_ENA1"}, + {I40E_VFINT_STAT_CTL01, 0, 0, 0, 0, "VFINT_STAT_CTL01"}, + {I40E_VFINT_DYN_CTL01, 0, 0, 0, 0, "VFINT_DYN_CTL01"}, + {I40E_VF_ARQBAH1, 0, 0, 0, 0, "VF_ARQBAH1"}, + {I40E_VF_ATQH1, 0, 0, 0, 0, "VF_ATQH1"}, + {I40E_VF_ATQLEN1, 0, 0, 0, 0, "VF_ATQLEN1"}, + {I40E_VF_ARQBAL1, 0, 0, 0, 0, "VF_ARQBAL1"}, + {I40E_VF_ARQT1, 0, 0, 0, 0, "VF_ARQT1"}, + {I40E_VF_ARQH1, 0, 0, 0, 0, "VF_ARQH1"}, + {I40E_VF_ATQBAH1, 0, 0, 0, 0, "VF_ATQBAH1"}, + {I40E_VF_ATQBAL1, 0, 0, 0, 0, "VF_ATQBAL1"}, + {I40E_VF_ARQLEN1, 0, 0, 0, 0, "VF_ARQLEN1"}, + {I40E_PFPE_CQPDB, 0, 0, 0, 0, "PFPE_CQPDB"}, + {I40E_PFPE_CQPTAIL, 0, 0, 0, 0, "PFPE_CQPTAIL"}, + {I40E_PFPE_CCQPSTATUS, 0, 0, 0, 0, "PFPE_CCQPSTATUS"}, + {I40E_PFPE_CCQPLOW, 0, 0, 0, 0, "PFPE_CCQPLOW"}, + {I40E_PFPE_CCQPHIGH, 0, 0, 0, 0, "PFPE_CCQPHIGH"}, + {I40E_PFPE_IPCONFIG0, 0, 0, 0, 0, "PFPE_IPCONFIG0"}, + {I40E_VF_ATQT1, 0, 0, 0, 0, "VF_ATQT1"}, + {I40E_PFPE_TCPNOWTIMER, 0, 0, 0, 0, "PFPE_TCPNOWTIMER"}, + {I40E_PFPE_MRTEIDXMASK, 0, 0, 0, 0, "PFPE_MRTEIDXMASK"}, + {I40E_PFPE_RCVUNEXPECTEDERROR, 0, 0, 0, 0, "PFPE_RCVUNEXPECTEDERROR"}, + {I40E_PFPE_UDACTRL, 0, 0, 0, 0, "PFPE_UDACTRL"}, + {I40E_PFPE_UDAUCFBQPN, 0, 0, 0, 0, "PFPE_UDAUCFBQPN"}, + {I40E_VFGEN_RSTAT, 0, 0, 0, 0, "VFGEN_RSTAT"}, + {I40E_PFPE_CQPERRCODES, 0, 0, 0, 0, "PFPE_CQPERRCODES"}, + {I40E_PFPE_FLMXMITALLOCERR, 0, 0, 0, 0, "PFPE_FLMXMITALLOCERR"}, + {I40E_PFPE_FLMQ1ALLOCERR, 0, 0, 0, 0, "PFPE_FLMQ1ALLOCERR"}, + {I40E_VFPE_IPCONFIG01, 0, 0, 0, 0, "VFPE_IPCONFIG01"}, + {I40E_VFPE_MRTEIDXMASK1, 0, 0, 0, 0, "VFPE_MRTEIDXMASK1"}, + {I40E_VFPE_RCVUNEXPECTEDERROR1, 0, 0, 0, 0, "VFPE_RCVUNEXPECTEDERROR1"}, + {I40E_VFPE_CCQPHIGH1, 0, 0, 0, 0, "VFPE_CCQPHIGH1"}, + {I40E_VFPE_CQPERRCODES1, 0, 0, 0, 0, "VFPE_CQPERRCODES1"}, + {I40E_VFPE_CQPTAIL1, 0, 0, 0, 0, "VFPE_CQPTAIL1"}, + {I40E_VFPE_AEQALLOC1, 0, 0, 0, 0, "VFPE_AEQALLOC1"}, + {I40E_VFPE_TCPNOWTIMER1, 0, 0, 0, 0, "VFPE_TCPNOWTIMER1"}, + {I40E_VFPE_CCQPLOW1, 0, 0, 0, 0, "VFPE_CCQPLOW1"}, + {I40E_VFPE_CQACK1, 0, 0, 0, 0, "VFPE_CQACK1"}, + {I40E_VFPE_CQARM1, 0, 0, 0, 0, "VFPE_CQARM1"}, + {I40E_VFPE_CCQPSTATUS1, 0, 0, 0, 0, "VFPE_CCQPSTATUS1"}, + {I40E_VFPE_CQPDB1, 0, 0, 0, 0, "VFPE_CQPDB1"}, + {I40E_GLPE_VFUDACTRL(0), 31, 4, 0, 0, "GLPE_VFUDACTRL"}, + {I40E_VFPE_WQEALLOC1, 0, 0, 0, 0, "VFPE_WQEALLOC1"}, + {I40E_GLPE_VFUDAUCFBQPN(0), 31, 4, 0, 0, "GLPE_VFUDAUCFBQPN"}, + {I40E_GLPE_VFFLMXMITALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMXMITALLOCERR"}, + {I40E_GLPE_VFFLMQ1ALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMQ1ALLOCERR"}, + {I40E_VFQF_HLUT(0), 15, 4, 0, 0, "VFQF_HLUT"}, + {I40E_GLPE_CPUSTATUS0, 0, 0, 0, 0, "GLPE_CPUSTATUS0"}, + {I40E_GLPE_CPUSTATUS1, 0, 0, 0, 0, "GLPE_CPUSTATUS1"}, + {I40E_GLPE_CPUSTATUS2, 0, 0, 0, 0, "GLPE_CPUSTATUS2"}, + {I40E_GLPE_CPUTRIG0, 0, 0, 0, 0, "GLPE_CPUTRIG0"}, + {I40E_GLPE_VFFLMOBJCTRL(0), 31, 4, 0, 0, "GLPE_VFFLMOBJCTRL"}, + {I40E_VFCM_PE_ERRINFO, 0, 0, 0, 0, "VFCM_PE_ERRINFO"}, + {I40E_GLPE_RUPM_GCTL, 0, 0, 0, 0, "GLPE_RUPM_GCTL"}, + {I40E_GLPE_DUAL40_RUPM, 0, 0, 0, 0, "GLPE_DUAL40_RUPM"}, + {I40E_GLPE_RUPM_TXHOST_EN, 0, 0, 0, 0, "GLPE_RUPM_TXHOST_EN"}, + {I40E_PRTPE_RUPM_THRES, 0, 0, 0, 0, "PRTPE_RUPM_THRES"}, + {I40E_PRTPE_RUPM_CTL, 0, 0, 0, 0, "PRTPE_RUPM_CTL"}, + {I40E_PRTPE_RUPM_PFCCTL, 0, 0, 0, 0, "PRTPE_RUPM_PFCCTL"}, + {I40E_PRTPE_RUPM_PFCPC, 0, 0, 0, 0, "PRTPE_RUPM_PFCPC"}, + {I40E_PRTPE_RUPM_PFCTCC, 0, 0, 0, 0, "PRTPE_RUPM_PFCTCC"}, + {I40E_GLPE_RUPM_PUSHPOOL, 0, 0, 0, 0, "GLPE_RUPM_PUSHPOOL"}, + {I40E_GLPE_RUPM_FLRPOOL, 0, 0, 0, 0, "GLPE_RUPM_FLRPOOL"}, + {I40E_GLPE_RUPM_PTXPOOL, 0, 0, 0, 0, "GLPE_RUPM_PTXPOOL"}, + {I40E_GLPE_RUPM_CQPPOOL, 0, 0, 0, 0, "GLPE_RUPM_CQPPOOL"}, + {I40E_PRTE_RUPM_TCCNTR03, 0, 0, 0, 0, "PRTE_RUPM_TCCNTR03"}, + {I40E_PRTPE_RUPM_TCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_TCCNTR47"}, + {I40E_PRTPE_RUPM_CNTR, 0, 0, 0, 0, "PRTPE_RUPM_CNTR"}, + {I40E_PRTPE_RUPM_PTXTCCNTR03, 0, 0, 0, 0, "PRTPE_RUPM_PTXTCCNTR03"}, + {I40E_PRTPE_RUPM_PTCTCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_PTCTCCNTR47"}, + {I40E_VFCM_PE_ERRDATA, 0, 0, 0, 0, "VFCM_PE_ERRDATA"}, + {I40E_PFPCI_VF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VF_FLUSH_DONE"}, + {I40E_GLPES_PFRXVLANERR(0), 15, 4, 0, 0, "GLPES_PFRXVLANERR"}, + {I40E_GLPES_PFIP4RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSLO"}, + {I40E_GLPES_PFIP4RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSHI"}, + {I40E_GLPES_PFIP4RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSLO"}, + {I40E_GLPES_PFIP4RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSHI"}, + {I40E_GLPES_PFIP4RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP4RXDISCARD"}, + {I40E_GLPES_PFIP4RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP4RXTRUNC"}, + {I40E_GLPES_PFIP4RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSLO"}, + {I40E_GLPES_PFIP4RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSHI"}, + {I40E_GLPES_PFIP4RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSLO"}, + {I40E_GLPES_PFIP4RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSHI"}, + {I40E_GLPES_PFIP4RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSLO"}, + {I40E_GLPES_PFIP4RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSHI"}, + {I40E_GLPES_PFIP6RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSLO"}, + {I40E_GLPES_PFIP6RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSHI"}, + {I40E_GLPES_PFIP6RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSLO"}, + {I40E_GLPES_PFIP6RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSHI"}, + {I40E_GLPES_PFIP6RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP6RXDISCARD"}, + {I40E_GLPES_PFIP6RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP6RXTRUNC"}, + {I40E_GLPES_PFIP6RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSLO"}, + {I40E_GLPES_PFIP6RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSHI"}, + {I40E_GLPES_PFIP6RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSLO"}, + {I40E_GLPES_PFIP6RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSHI"}, + {I40E_GLPES_PFIP6RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSLO"}, + {I40E_GLPES_PFIP6RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSHI"}, + {I40E_GLPES_PFIP4TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSLO"}, + {I40E_GLPES_PFIP4TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSHI"}, + {I40E_GLPES_PFIP4TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSLO"}, + {I40E_GLPES_PFIP4TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSHI"}, + {I40E_GLPES_PFIP4TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSLO"}, + {I40E_GLPES_PFIP4TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSHI"}, + {I40E_GLPES_PFIP4TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSLO"}, + {I40E_GLPES_PFIP4TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSHI"}, + {I40E_GLPES_PFIP4TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSLO"}, + {I40E_GLPES_PFIP4TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSHI"}, + {I40E_GLPES_PFIP6TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSLO"}, + {I40E_GLPES_PFIP6TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSHI"}, + {I40E_GLPES_PFIP6TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSLO"}, + {I40E_GLPES_PFIP6TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSHI"}, + {I40E_GLPES_PFIP6TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSLO"}, + {I40E_GLPES_PFIP6TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSHI"}, + {I40E_GLPES_PFIP6TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSLO"}, + {I40E_GLPES_PFIP6TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSHI"}, + {I40E_GLPES_PFIP6TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSLO"}, + {I40E_GLPES_PFIP6TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSHI"}, + {I40E_GLPES_PFIP4TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP4TXNOROUTE"}, + {I40E_GLPES_PFIP6TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP6TXNOROUTE"}, + {I40E_GLPES_PFTCPRXSEGSLO(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSLO"}, + {I40E_GLPES_PFTCPRXSEGSHI(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSHI"}, + {I40E_GLPES_PFTCPRXOPTERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXOPTERR"}, + {I40E_GLPES_PFTCPRXPROTOERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXPROTOERR"}, + {I40E_GLPES_PFTCPTXSEGLO(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGLO"}, + {I40E_GLPES_PFTCPTXSEGHI(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGHI"}, + {I40E_GLPES_PFTCPRTXSEG(0), 15, 4, 0, 0, "GLPES_PFTCPRTXSEG"}, + {I40E_GLPES_PFUDPRXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSLO"}, + {I40E_GLPES_PFUDPRXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSHI"}, + {I40E_GLPES_PFUDPTXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSLO"}, + {I40E_GLPES_PFUDPTXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSHI"}, + {I40E_GLPES_PFRDMARXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSLO"}, + {I40E_GLPES_PFRDMARXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSHI"}, + {I40E_GLPES_PFRDMARXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSLO"}, + {I40E_GLPES_PFRDMARXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSHI"}, + {I40E_GLPES_PFRDMARXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSLO"}, + {I40E_GLPES_PFRDMARXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSHI"}, + {I40E_GLPES_PFRDMATXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSLO"}, + {I40E_GLPES_PFRDMATXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSHI"}, + {I40E_GLPES_PFRDMATXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSLO"}, + {I40E_GLPES_PFRDMATXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSHI"}, + {I40E_GLPES_PFRDMATXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSLO"}, + {I40E_GLPES_PFRDMATXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSHI"}, + {I40E_GLPES_PFRDMAVBNDLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDLO"}, + {I40E_GLPES_PFRDMAVBNDHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDHI"}, + {I40E_GLPES_PFRDMAVINVLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVLO"}, + {I40E_GLPES_PFRDMAVINVHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVHI"}, + {I40E_GLPES_VFRXVLANERR(0), 31, 4, 0, 0, "GLPES_VFRXVLANERR"}, + {I40E_GLPES_VFIP4RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSLO"}, + {I40E_GLPES_VFIP4RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSHI"}, + {I40E_GLPES_VFIP4RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSLO"}, + {I40E_GLPES_VFIP4RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSHI"}, + {I40E_GLPES_VFIP4RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP4RXDISCARD"}, + {I40E_GLPES_VFIP4RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP4RXTRUNC"}, + {I40E_GLPES_VFIP4RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSLO"}, + {I40E_GLPES_VFIP4RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSHI"}, + {I40E_GLPES_VFIP4RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSLO"}, + {I40E_GLPES_VFIP4RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSHI"}, + {I40E_GLPES_VFIP4RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSLO"}, + {I40E_GLPES_VFIP4RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSHI"}, + {I40E_GLPES_VFIP6RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSLO"}, + {I40E_GLPES_VFIP6RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSHI"}, + {I40E_GLPES_VFIP6RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSLO"}, + {I40E_GLPES_VFIP6RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSHI"}, + {I40E_GLPES_VFIP6RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP6RXDISCARD"}, + {I40E_GLPES_VFIP6RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP6RXTRUNC"}, + {I40E_GLPES_VFIP6RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSLO"}, + {I40E_GLPES_VFIP6RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSHI"}, + {I40E_GLPES_VFIP6RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSLO"}, + {I40E_GLPES_VFIP6RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSHI"}, + {I40E_GLPES_VFIP6RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSLO"}, + {I40E_GLPES_VFIP6RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSHI"}, + {I40E_GLPES_VFIP4TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSLO"}, + {I40E_GLPES_VFIP4TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSHI"}, + {I40E_GLPES_VFIP4TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSLO"}, + {I40E_GLPES_VFIP4TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSHI"}, + {I40E_GLPES_VFIP4TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSLO"}, + {I40E_GLPES_VFIP4TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSHI"}, + {I40E_GLPES_VFIP4TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSLO"}, + {I40E_GLPES_VFIP4TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSHI"}, + {I40E_GLPES_VFIP4TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSLO"}, + {I40E_GLPES_VFIP4TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSHI"}, + {I40E_GLPES_VFIP6TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSLO"}, + {I40E_GLPES_VFIP6TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSHI"}, + {I40E_GLPES_VFIP6TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSLO"}, + {I40E_GLPES_VFIP6TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSHI"}, + {I40E_GLPES_VFIP6TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSLO"}, + {I40E_GLPES_VFIP6TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSHI"}, + {I40E_GLPES_VFIP6TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSLO"}, + {I40E_GLPES_VFIP6TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSHI"}, + {I40E_GLPES_VFIP6TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSLO"}, + {I40E_GLPES_VFIP6TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSHI"}, + {I40E_GLPES_VFIP4TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP4TXNOROUTE"}, + {I40E_GLPES_VFIP6TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP6TXNOROUTE"}, + {I40E_GLPES_VFTCPRXSEGSLO(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSLO"}, + {I40E_GLPES_VFTCPRXSEGSHI(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSHI"}, + {I40E_GLPES_VFTCPRXOPTERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXOPTERR"}, + {I40E_GLPES_VFTCPRXPROTOERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXPROTOERR"}, + {I40E_GLPES_VFTCPTXSEGLO(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGLO"}, + {I40E_GLPES_VFTCPTXSEGHI(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGHI"}, + {I40E_GLPES_VFTCPRTXSEG(0), 31, 4, 0, 0, "GLPES_VFTCPRTXSEG"}, + {I40E_GLPES_VFUDPRXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSLO"}, + {I40E_GLPES_VFUDPRXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSHI"}, + {I40E_GLPES_VFUDPTXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSLO"}, + {I40E_GLPES_VFUDPTXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSHI"}, + {I40E_GLPES_VFRDMARXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSLO"}, + {I40E_GLPES_VFRDMARXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSHI"}, + {I40E_GLPES_VFRDMARXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSLO"}, + {I40E_GLPES_VFRDMARXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSHI"}, + {I40E_GLPES_VFRDMARXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSLO"}, + {I40E_GLPES_VFRDMARXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSHI"}, + {I40E_GLPES_VFRDMATXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSLO"}, + {I40E_GLPES_VFRDMATXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSHI"}, + {I40E_GLPES_VFRDMATXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSLO"}, + {I40E_GLPES_VFRDMATXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSHI"}, + {I40E_GLPES_VFRDMATXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSLO"}, + {I40E_GLPES_VFRDMATXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSHI"}, + {I40E_GLPES_VFRDMAVBNDLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDLO"}, + {I40E_GLPES_VFRDMAVBNDHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDHI"}, + {I40E_GLPES_VFRDMAVINVLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVLO"}, + {I40E_GLPES_VFRDMAVINVHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVHI"}, + {I40E_GLPES_RDMARXUNALIGN, 0, 0, 0, 0, "GLPES_RDMARXUNALIGN"}, + {I40E_GLPES_RDMARXOOONOMARK, 0, 0, 0, 0, "GLPES_RDMARXOOONOMARK"}, + {I40E_GLPES_RDMARXMULTFPDUSLO, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSLO"}, + {I40E_GLPES_RDMARXMULTFPDUSHI, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSHI"}, + {I40E_GLPES_RDMARXOOODDPLO, 0, 0, 0, 0, "GLPES_RDMARXOOODDPLO"}, + {I40E_GLPES_RDMARXOOODDPHI, 0, 0, 0, 0, "GLPES_RDMARXOOODDPHI"}, + {I40E_GLPES_TCPRXPUREACKSLO, 0, 0, 0, 0, "GLPES_TCPRXPUREACKSLO"}, + {I40E_GLPES_TCPRXPUREACKHI, 0, 0, 0, 0, "GLPES_TCPRXPUREACKHI"}, + {I40E_GLPES_TCPRXONEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXONEHOLELO"}, + {I40E_GLPES_TCPRXONEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXONEHOLEHI"}, + {I40E_GLPES_TCPRXTWOHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLELO"}, + {I40E_GLPES_TCPRXTWOHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLEHI"}, + {I40E_GLPES_TCPRXTHREEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLELO"}, + {I40E_GLPES_TCPRXTHREEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLEHI"}, + {I40E_GLPES_TCPRXFOURHOLELO, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLELO"}, + {I40E_GLPES_TCPRXFOURHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLEHI"}, + {I40E_GLPES_TCPTXRETRANSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTLO"}, + {I40E_GLPES_TCPTXRETRANSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTHI"}, + {I40E_GLPES_TCPTXTOUTSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTLO"}, + {I40E_GLPES_TCPTXTOUTSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTHI"}, + {I40E_GLPES_TCPTXTOUTSLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSLO"}, + {I40E_GLPES_TCPTXTOUTSHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSHI"}, + {I40E_PRTDCB_TCMSTC_RLPM(0), 7, 32, 0, 0, "PRTDCB_TCMSTC_RLPM"}, + {I40E_PRTDCB_RLPMC, 0, 0, 0, 0, "PRTDCB_RLPMC"}, + {I40E_PRTDCB_TCPMC_RLPM, 0, 0, 0, 0, "PRTDCB_TCPMC_RLPM"}, + {I40E_VFINT_ITRN(0, 0), 2, 2048, 511, 4, "VFINT_ITRN"}, + {I40E_VFINT_DYN_CTLN(0), 511, 4, 0, 0, "VFINT_DYN_CTLN"}, + {I40E_VPINT_LNKLSTN(0), 511, 4, 0, 0, "VPINT_LNKLSTN"}, + {I40E_VPINT_RATEN(0), 511, 4, 0, 0, "VPINT_RATEN"}, + {I40E_VPINT_CEQCTL(0), 511, 4, 0, 0, "VPINT_CEQCTL"}, + {I40E_VFINT_ITR0(0, 0), 2, 1024, 127, 4, "VFINT_ITR0"}, + {I40E_VFINT_STAT_CTL0(0), 127, 4, 0, 0, "VFINT_STAT_CTL0"}, + {I40E_VFINT_DYN_CTL0(0), 127, 4, 0, 0, "VFINT_DYN_CTL0"}, + {I40E_VPINT_LNKLST0(0), 127, 4, 0, 0, "VPINT_LNKLST0"}, + {I40E_VPINT_RATE0(0), 127, 4, 0, 0, "VPINT_RATE0"}, + {I40E_VPINT_AEQCTL(0), 127, 4, 0, 0, "VPINT_AEQCTL"}, + {I40E_VFINT_ICR0(0), 127, 4, 0, 0, "VFINT_ICR0"}, + {I40E_VFINT_ICR0_ENA(0), 127, 4, 0, 0, "VFINT_ICR0_ENA"}, + {I40E_PFINT_ITRN(0, 0), 2, 2048, 511, 4, "PFINT_ITRN"}, + {I40E_PFINT_DYN_CTLN(0), 511, 4, 0, 0, "PFINT_DYN_CTLN"}, + {I40E_PFINT_LNKLSTN(0), 511, 4, 0, 0, "PFINT_LNKLSTN"}, + {I40E_PFINT_RATEN(0), 511, 4, 0, 0, "PFINT_RATEN"}, + {I40E_PFINT_CEQCTL(0), 511, 4, 0, 0, "PFINT_CEQCTL"}, + {I40E_PFINT_ITR0(0), 2, 128, 0, 0, "PFINT_ITR0"}, + {I40E_PFINT_STAT_CTL0, 0, 0, 0, 0, "PFINT_STAT_CTL0"}, + {I40E_PFINT_DYN_CTL0, 0, 0, 0, 0, "PFINT_DYN_CTL0"}, + {I40E_PFINT_LNKLST0, 0, 0, 0, 0, "PFINT_LNKLST0"}, + {I40E_PFINT_RATE0, 0, 0, 0, 0, "PFINT_RATE0"}, + {I40E_PFINT_AEQCTL, 0, 0, 0, 0, "PFINT_AEQCTL"}, + {I40E_PFINT_ICR0, 0, 0, 0, 0, "PFINT_ICR0"}, + {I40E_PFINT_ICR0_ENA, 0, 0, 0, 0, "PFINT_ICR0_ENA"}, + {I40E_QINT_RQCTL(0), 1535, 4, 0, 0, "QINT_RQCTL"}, + {I40E_QINT_TQCTL(0), 1535, 4, 0, 0, "QINT_TQCTL"}, + {I40E_PFGEN_PORTMDIO_NUM, 0, 0, 0, 0, "PFGEN_PORTMDIO_NUM"}, + {I40E_GLINT_CTL, 0, 0, 0, 0, "GLINT_CTL"}, + {I40E_GLLAN_TSOMSK_F, 0, 0, 0, 0, "GLLAN_TSOMSK_F"}, + {I40E_GLLAN_TSOMSK_M, 0, 0, 0, 0, "GLLAN_TSOMSK_M"}, + {I40E_GLLAN_TSOMSK_L, 0, 0, 0, 0, "GLLAN_TSOMSK_L"}, + {I40E_GL_RDPU_CNTRL, 0, 0, 0, 0, "GL_RDPU_CNTRL"}, + {I40E_PFPM_FHFT_LENGTH(0), 7, 128, 0, 0, "PFPM_FHFT_LENGTH"}, + {I40E_PFPM_WUC, 0, 0, 0, 0, "PFPM_WUC"}, + {I40E_PFPM_WUFC, 0, 0, 0, 0, "PFPM_WUFC"}, + {I40E_PFPM_WUS, 0, 0, 0, 0, "PFPM_WUS"}, + {I40E_PRTPM_FHFHR, 0, 0, 0, 0, "PRTPM_FHFHR"}, + {I40E_GLPM_WUMC, 0, 0, 0, 0, "GLPM_WUMC"}, + {I40E_VPLAN_QTABLE(0, 0), 15, 1024, 127, 4, "VPLAN_QTABLE"}, + {I40E_VPLAN_MAPENA(0), 127, 4, 0, 0, "VPLAN_MAPENA"}, + {I40E_VFGEN_RSTAT1(0), 127, 4, 0, 0, "VFGEN_RSTAT1"}, + {I40E_VPLAN_QBASE(0), 127, 4, 0, 0, "VPLAN_QBASE"}, + {I40E_PF_ATQBAL, 0, 0, 0, 0, "PF_ATQBAL"}, + {I40E_GL_ATQBAL, 0, 0, 0, 0, "GL_ATQBAL"}, + {I40E_PF_ARQBAL, 0, 0, 0, 0, "PF_ARQBAL"}, + {I40E_GL_ARQBAL, 0, 0, 0, 0, "GL_ARQBAL"}, + {I40E_PF_ATQBAH, 0, 0, 0, 0, "PF_ATQBAH"}, + {I40E_GL_ATQBAH, 0, 0, 0, 0, "GL_ATQBAH"}, + {I40E_PF_ARQBAH, 0, 0, 0, 0, "PF_ARQBAH"}, + {I40E_GL_ARQBAH, 0, 0, 0, 0, "GL_ARQBAH"}, + {I40E_PF_ATQLEN, 0, 0, 0, 0, "PF_ATQLEN"}, + {I40E_GL_ATQLEN, 0, 0, 0, 0, "GL_ATQLEN"}, + {I40E_PF_ARQLEN, 0, 0, 0, 0, "PF_ARQLEN"}, + {I40E_PF_ATQH, 0, 0, 0, 0, "PF_ATQH"}, + {I40E_GL_ATQH, 0, 0, 0, 0, "GL_ATQH"}, + {I40E_PF_ARQH, 0, 0, 0, 0, "PF_ARQH"}, + {I40E_GL_ARQH, 0, 0, 0, 0, "GL_ARQH"}, + {I40E_PF_ATQT, 0, 0, 0, 0, "PF_ATQT"}, + {I40E_GL_ATQT, 0, 0, 0, 0, "GL_ATQT"}, + {I40E_PF_ARQT, 0, 0, 0, 0, "PF_ARQT"}, + {I40E_GL_ARQT, 0, 0, 0, 0, "GL_ARQT"}, + {I40E_VF_ATQBAL(0), 127, 4, 0, 0, "VF_ATQBAL"}, + {I40E_VF_ARQBAL(0), 127, 4, 0, 0, "VF_ARQBAL"}, + {I40E_VF_ATQBAH(0), 127, 4, 0, 0, "VF_ATQBAH"}, + {I40E_VF_ARQBAH(0), 127, 4, 0, 0, "VF_ARQBAH"}, + {I40E_VF_ATQLEN(0), 127, 4, 0, 0, "VF_ATQLEN"}, + {I40E_VF_ARQLEN(0), 127, 4, 0, 0, "VF_ARQLEN"}, + {I40E_VF_ATQH(0), 127, 4, 0, 0, "VF_ATQH"}, + {I40E_VF_ARQH(0), 127, 4, 0, 0, "VF_ARQH"}, + {I40E_VF_ATQT(0), 127, 4, 0, 0, "VF_ATQT"}, + {I40E_VF_ARQT(0), 127, 4, 0, 0, "VF_ARQT"}, + {I40E_PRTDCB_GENC, 0, 0, 0, 0, "PRTDCB_GENC"}, + {I40E_PRTDCB_GENS, 0, 0, 0, 0, "PRTDCB_GENS"}, + {I40E_GLDCB_GENC, 0, 0, 0, 0, "GLDCB_GENC"}, + {I40E_GL_FWSTS, 0, 0, 0, 0, "GL_FWSTS"}, + {I40E_GL_FWRESETCNT, 0, 0, 0, 0, "GL_FWRESETCNT"}, + {I40E_GL_VF_CTRL_TX(0), 127, 4, 0, 0, "GL_VF_CTRL_TX"}, + {I40E_GL_VF_CTRL_RX(0), 127, 4, 0, 0, "GL_VF_CTRL_RX"}, + {I40E_PRTTSYN_CTL1, 0, 0, 0, 0, "PRTTSYN_CTL1"}, + {I40E_PRTTSYN_RXTIME_H(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_H"}, + {I40E_PRTTSYN_RXTIME_L(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_L"}, + {I40E_PRTTSYN_STAT_1, 0, 0, 0, 0, "PRTTSYN_STAT_1"}, + {I40E_PRT_MNG_FTFT_MASK(0), 7, 32, 0, 0, "PRT_MNG_FTFT_MASK"}, + {I40E_PRT_MNG_FTFT_LENGTH, 0, 0, 0, 0, "PRT_MNG_FTFT_LENGTH"}, + {I40E_PRT_MNG_FTFT_DATA(0), 31, 32, 0, 0, "PRT_MNG_FTFT_DATA"}, + {I40E_GL_PPRS_SPARE, 0, 0, 0, 0, "GL_PPRS_SPARE"}, + {I40E_PFGEN_STATE, 0, 0, 0, 0, "PFGEN_STATE"}, + {I40E_PFINT_GPIO_ENA, 0, 0, 0, 0, "PFINT_GPIO_ENA"}, + {I40E_GLGEN_MISC_SPARE, 0, 0, 0, 0, "GLGEN_MISC_SPARE"}, + {I40E_GLGEN_GPIO_CTL(0), 29, 4, 0, 0, "GLGEN_GPIO_CTL"}, + {I40E_GLGEN_LED_CTL, 0, 0, 0, 0, "GLGEN_LED_CTL"}, + {I40E_GLGEN_GPIO_STAT, 0, 0, 0, 0, "GLGEN_GPIO_STAT"}, + {I40E_GLGEN_GPIO_TRANSIT, 0, 0, 0, 0, "GLGEN_GPIO_TRANSIT"}, + {I40E_GLGEN_GPIO_SET, 0, 0, 0, 0, "GLGEN_GPIO_SET"}, + {I40E_EMPINT_GPIO_ENA, 0, 0, 0, 0, "EMPINT_GPIO_ENA"}, + {I40E_GLGEN_MSCA(0), 3, 4, 0, 0, "GLGEN_MSCA"}, + {I40E_GLGEN_MSRWD(0), 3, 4, 0, 0, "GLGEN_MSRWD"}, + {I40E_GLGEN_I2CPARAMS(0), 3, 4, 0, 0, "GLGEN_I2CPARAMS"}, + {I40E_GLVFGEN_TIMER, 0, 0, 0, 0, "GLVFGEN_TIMER"}, + {I40E_GLGEN_MDIO_I2C_SEL(0), 3, 4, 0, 0, "GLGEN_MDIO_I2C_SEL"}, + {I40E_GLGEN_MDIO_CTRL(0), 3, 4, 0, 0, "GLGEN_MDIO_CTRL"}, + {I40E_GLGEN_I2CCMD(0), 3, 4, 0, 0, "GLGEN_I2CCMD"}, + {I40E_PRTMAC_PCS_XAUI_SWAP_A, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_A"}, + {I40E_PRTMAC_PCS_XAUI_SWAP_B, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_B"}, + {I40E_VSIGEN_RTRIG(0), 383, 4, 0, 0, "VSIGEN_RTRIG"}, + {I40E_VSIGEN_RSTAT(0), 383, 4, 0, 0, "VSIGEN_RSTAT"}, + {I40E_VPGEN_VFRTRIG(0), 127, 4, 0, 0, "VPGEN_VFRTRIG"}, + {I40E_VPGEN_VFRSTAT(0), 127, 4, 0, 0, "VPGEN_VFRSTAT"}, + {I40E_PFGEN_CTRL, 0, 0, 0, 0, "PFGEN_CTRL"}, + {I40E_PFGEN_DRUN, 0, 0, 0, 0, "PFGEN_DRUN"}, + {I40E_GLGEN_VFLRSTAT(0), 3, 4, 0, 0, "GLGEN_VFLRSTAT"}, + {I40E_GL_UFUSE, 0, 0, 0, 0, "GL_UFUSE"}, + {I40E_GL_GP_FUSE(0), 28, 4, 0, 0, "GL_GP_FUSE"}, + {I40E_PRTDCB_TETSC_TPB, 0, 0, 0, 0, "PRTDCB_TETSC_TPB"}, + {I40E_PF_FUNC_RID, 0, 0, 0, 0, "PF_FUNC_RID"}, + {I40E_PF_PCI_CIAA, 0, 0, 0, 0, "PF_PCI_CIAA"}, + {I40E_PF_PCI_CIAD, 0, 0, 0, 0, "PF_PCI_CIAD"}, + {I40E_PFPCI_FACTPS, 0, 0, 0, 0, "PFPCI_FACTPS"}, + {I40E_PFPCI_ICAUSE, 0, 0, 0, 0, "PFPCI_ICAUSE"}, + {I40E_PFPCI_IENA, 0, 0, 0, 0, "PFPCI_IENA"}, + {I40E_PFPCI_VMINDEX, 0, 0, 0, 0, "PFPCI_VMINDEX"}, + {I40E_PFPCI_VMPEND, 0, 0, 0, 0, "PFPCI_VMPEND"}, + {I40E_GLPCI_DREVID, 0, 0, 0, 0, "GLPCI_DREVID"}, + {I40E_GLPCI_BYTCTH, 0, 0, 0, 0, "GLPCI_BYTCTH"}, + {I40E_GLPCI_BYTCTL, 0, 0, 0, 0, "GLPCI_BYTCTL"}, + {I40E_GLPCI_GSCL_1, 0, 0, 0, 0, "GLPCI_GSCL_1"}, + {I40E_GLPCI_GSCL_2, 0, 0, 0, 0, "GLPCI_GSCL_2"}, + {I40E_GLPCI_GSCL_5_8(0), 3, 4, 0, 0, "GLPCI_GSCL_5_8"}, + {I40E_GLPCI_GSCN_0_3(0), 3, 4, 0, 0, "GLPCI_GSCN_0_3"}, + {I40E_GLPCI_PKTCT, 0, 0, 0, 0, "GLPCI_PKTCT"}, + {I40E_GLPCI_PQ_MAX_USED_SPC, 0, 0, 0, 0, "GLPCI_PQ_MAX_USED_SPC"}, + {I40E_GLPCI_PM_MUX_PFB, 0, 0, 0, 0, "GLPCI_PM_MUX_PFB"}, + {I40E_GLPCI_PM_MUX_NPQ, 0, 0, 0, 0, "GLPCI_PM_MUX_NPQ"}, + {I40E_GLPCI_SPARE_BITS_0, 0, 0, 0, 0, "GLPCI_SPARE_BITS_0"}, + {I40E_GLPCI_SPARE_BITS_1, 0, 0, 0, 0, "GLPCI_SPARE_BITS_1"}, + {I40E_GLPCI_CUR_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_ALWD"}, + {I40E_GLPCI_CUR_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_ALWD"}, + {I40E_GLPCI_CUR_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_ALWD"}, + {I40E_GLPCI_CUR_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_ALWD"}, + {I40E_GLPCI_CUR_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_ALWD"}, + {I40E_GLPCI_CUR_MNG_ALWD, 0, 0, 0, 0, "GLPCI_CUR_MNG_ALWD"}, + {I40E_GLPCI_CUR_TDPU_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_ALWD"}, + {I40E_GLPCI_CUR_RLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_RSVD"}, + {I40E_GLPCI_CUR_TLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_RSVD"}, + {I40E_GLPCI_CUR_RXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_RSVD"}, + {I40E_GLPCI_CUR_TXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_RSVD"}, + {I40E_GLPCI_CUR_PMAT_RSVD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_RSVD"}, + {I40E_GLPCI_CUR_MNG_RSVD, 0, 0, 0, 0, "GLPCI_CUR_MNG_RSVD"}, + {I40E_GLPCI_CUR_TDPU_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_RSVD"}, + {I40E_PFPCI_VF_FLUSH_DONE1(0), 127, 4, 0, 0, "PFPCI_VF_FLUSH_DONE1"}, + {I40E_PFPCI_PF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_PF_FLUSH_DONE"}, + {I40E_PFPCI_VM_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VM_FLUSH_DONE"}, + {I40E_GLPCI_NPQ_CFG, 0, 0, 0, 0, "GLPCI_NPQ_CFG"}, + {I40E_GLPCI_CUR_CLNT_COMMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_COMMON"}, + {I40E_GLPCI_CUR_CLNT_PIPEMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_PIPEMON"}, + {I40E_GLPCI_CUR_WATMK_CLNT_COMMON, 0, 0, 0, 0, + "GLPCI_CUR_WATMK_CLNT_COMMON"}, + {I40E_GLPCI_WATMK_CLNT_PIPEMON, 0, 0, 0, 0, + "GLPCI_WATMK_CLNT_PIPEMON"}, + {I40E_GLPCI_WATMK_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RLAN_ALWD"}, + {I40E_GLPCI_WATMK_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TLAN_ALWD"}, + {I40E_GLPCI_WATMK_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RXPE_ALWD"}, + {I40E_GLPCI_WATMK_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TXPE_ALWD"}, + {I40E_GLPCI_WATMK_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_PMAT_ALWD"}, + {I40E_GLPCI_WATMK_MNG_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_MNG_ALWD"}, + {I40E_GLPCI_WATMK_TPDU_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TPDU_ALWD"}, + {I40E_PRTDCB_TCMSTC(0), 7, 32, 0, 0, "PRTDCB_TCMSTC"}, + {I40E_PRTDCB_TFMSTC(0), 7, 32, 0, 0, "PRTDCB_TFMSTC"}, + {I40E_PRTDCB_TDPMC, 0, 0, 0, 0, "PRTDCB_TDPMC"}, + {I40E_PRTDCB_TCWSTC(0), 7, 32, 0, 0, "PRTDCB_TCWSTC"}, + {I40E_PRTDCB_TCPMC, 0, 0, 0, 0, "PRTDCB_TCPMC"}, + {I40E_GL_TUPM_SPARE, 0, 0, 0, 0, "GL_TUPM_SPARE"}, + {I40E_GLPEOC_CACHESIZE, 0, 0, 0, 0, "GLPEOC_CACHESIZE"}, + {I40E_GLPBLOC_CACHESIZE, 0, 0, 0, 0, "GLPBLOC_CACHESIZE"}, + {I40E_GLFOC_CACHESIZE, 0, 0, 0, 0, "GLFOC_CACHESIZE"}, + {I40E_PRTRPB_DHW(0), 7, 32, 0, 0, "PRTRPB_DHW"}, + {I40E_PRTRPB_DLW(0), 7, 32, 0, 0, "PRTRPB_DLW"}, + {I40E_PRTRPB_DPS(0), 7, 32, 0, 0, "PRTRPB_DPS"}, + {I40E_PRTRPB_SHT(0), 7, 32, 0, 0, "PRTRPB_SHT"}, + {I40E_PRTRPB_SHW, 0, 0, 0, 0, "PRTRPB_SHW"}, + {I40E_PRTRPB_SLT(0), 7, 32, 0, 0, "PRTRPB_SLT"}, + {I40E_PRTRPB_SLW, 0, 0, 0, 0, "PRTRPB_SLW"}, + {I40E_PRTRPB_SPS, 0, 0, 0, 0, "PRTRPB_SPS"}, + {I40E_GLRPB_DPSS, 0, 0, 0, 0, "GLRPB_DPSS"}, + {I40E_GLRPB_GHW, 0, 0, 0, 0, "GLRPB_GHW"}, + {I40E_GLRPB_GLW, 0, 0, 0, 0, "GLRPB_GLW"}, + {I40E_GLRPB_PHW, 0, 0, 0, 0, "GLRPB_PHW"}, + {I40E_GLRPB_PLW, 0, 0, 0, 0, "GLRPB_PLW"}, + {I40E_PRTDCB_TETSC_TCB, 0, 0, 0, 0, "PRTDCB_TETSC_TCB"}, + {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"}, + {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"}, + {I40E_GLNVM_PROTCSR(0), 59, 4, 0, 0, "GLNVM_PROTCSR"}, + {I40E_GLNVM_GENS, 0, 0, 0, 0, "GLNVM_GENS"}, + {I40E_GLNVM_FLASHID, 0, 0, 0, 0, "GLNVM_FLASHID"}, + {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"}, + {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"}, + {I40E_GLNVM_SRCTL, 0, 0, 0, 0, "GLNVM_SRCTL"}, + {I40E_GLNVM_SRDATA, 0, 0, 0, 0, "GLNVM_SRDATA"}, + {I40E_GLGEN_STAT, 0, 0, 0, 0, "GLGEN_STAT"}, + {I40E_GL_MNG_HWARB_CTRL, 0, 0, 0, 0, "GL_MNG_HWARB_CTRL"}, + {I40E_GL_MNG_FWSM, 0, 0, 0, 0, "GL_MNG_FWSM"}, + {I40E_GLNVM_ALTIMERS, 0, 0, 0, 0, "GLNVM_ALTIMERS"}, + {I40E_GLNVM_ULT, 0, 0, 0, 0, "GLNVM_ULT"}, + {I40E_MEM_INIT_DONE_STAT, 0, 0, 0, 0, "MEM_INIT_DONE_STAT"}, + {I40E_GLNVM_AL_REQ, 0, 0, 0, 0, "GLNVM_AL_REQ"}, + {I40E_MNGSB_MSGCTL, 0, 0, 0, 0, "MNGSB_MSGCTL"}, + {I40E_MNGSB_RSPCTL, 0, 0, 0, 0, "MNGSB_RSPCTL"}, + {I40E_MNGSB_DADD, 0, 0, 0, 0, "MNGSB_DADD"}, + {I40E_MNGSB_DCNT, 0, 0, 0, 0, "MNGSB_DCNT"}, + {I40E_MNGSB_FDCS, 0, 0, 0, 0, "MNGSB_FDCS"}, + {I40E_MNGSB_FDS, 0, 0, 0, 0, "MNGSB_FDS"}, + {I40E_MNGSB_FDCRC, 0, 0, 0, 0, "MNGSB_FDCRC"}, + {I40E_MNGSB_WHDR0, 0, 0, 0, 0, "MNGSB_WHDR0"}, + {I40E_MNGSB_WHDR1, 0, 0, 0, 0, "MNGSB_WHDR1"}, + {I40E_MNGSB_WHDR2, 0, 0, 0, 0, "MNGSB_WHDR2"}, + {I40E_MNGSB_WDATA, 0, 0, 0, 0, "MNGSB_WDATA"}, + {I40E_MNGSB_RHDR0, 0, 0, 0, 0, "MNGSB_RHDR0"}, + {I40E_MNGSB_RDATA, 0, 0, 0, 0, "MNGSB_RDATA"}, + {I40E_PFPM_APM, 0, 0, 0, 0, "PFPM_APM"}, + {I40E_PRTGEN_STATUS, 0, 0, 0, 0, "PRTGEN_STATUS"}, + {I40E_PRTGEN_CNF, 0, 0, 0, 0, "PRTGEN_CNF"}, + {I40E_PRTPM_GC, 0, 0, 0, 0, "PRTPM_GC"}, + {I40E_PRTGEN_CNF2, 0, 0, 0, 0, "PRTGEN_CNF2"}, + {I40E_GLGEN_RSTCTL, 0, 0, 0, 0, "GLGEN_RSTCTL"}, + {I40E_GLGEN_CLKSTAT, 0, 0, 0, 0, "GLGEN_CLKSTAT"}, + {I40E_GLGEN_RSTAT, 0, 0, 0, 0, "GLGEN_RSTAT"}, + {I40E_GLGEN_RTRIG, 0, 0, 0, 0, "GLGEN_RTRIG"}, + {I40E_GLGEN_PME_TO, 0, 0, 0, 0, "GLGEN_PME_TO"}, + {I40E_GLGEN_CAR_DEBUG, 0, 0, 0, 0, "GLGEN_CAR_DEBUG"}, + {I40E_PFPCI_CNF, 0, 0, 0, 0, "PFPCI_CNF"}, + {I40E_PFPCI_DEVID, 0, 0, 0, 0, "PFPCI_DEVID"}, + {I40E_PFPCI_SUBSYSID, 0, 0, 0, 0, "PFPCI_SUBSYSID"}, + {I40E_PFPCI_FUNC2, 0, 0, 0, 0, "PFPCI_FUNC2"}, + {I40E_PFPCI_FUNC, 0, 0, 0, 0, "PFPCI_FUNC"}, + {I40E_PFPCI_STATUS1, 0, 0, 0, 0, "PFPCI_STATUS1"}, + {I40E_PFPCI_PM, 0, 0, 0, 0, "PFPCI_PM"}, + {I40E_PFPCI_CLASS, 0, 0, 0, 0, "PFPCI_CLASS"}, + {I40E_GLTPH_CTRL, 0, 0, 0, 0, "GLTPH_CTRL"}, + {I40E_GLPCI_LBARCTRL, 0, 0, 0, 0, "GLPCI_LBARCTRL"}, + {I40E_GLPCI_SUBVENID, 0, 0, 0, 0, "GLPCI_SUBVENID"}, + {I40E_GLPCI_PWRDATA, 0, 0, 0, 0, "GLPCI_PWRDATA"}, + {I40E_GLPCI_CNF2, 0, 0, 0, 0, "GLPCI_CNF2"}, + {I40E_GLPCI_SERL, 0, 0, 0, 0, "GLPCI_SERL"}, + {I40E_GLPCI_SERH, 0, 0, 0, 0, "GLPCI_SERH"}, + {I40E_GLPCI_CAPCTRL, 0, 0, 0, 0, "GLPCI_CAPCTRL"}, + {I40E_GLPCI_CAPSUP, 0, 0, 0, 0, "GLPCI_CAPSUP"}, + {I40E_GLPCI_LINKCAP, 0, 0, 0, 0, "GLPCI_LINKCAP"}, + {I40E_GLPCI_PMSUP, 0, 0, 0, 0, "GLPCI_PMSUP"}, + {I40E_GLPCI_REVID, 0, 0, 0, 0, "GLPCI_REVID"}, + {I40E_GLPCI_VFSUP, 0, 0, 0, 0, "GLPCI_VFSUP"}, + {I40E_GLPCI_CNF, 0, 0, 0, 0, "GLPCI_CNF"}, + {I40E_GLPCI_UPADD, 0, 0, 0, 0, "GLPCI_UPADD"}, + {I40E_GLPCI_PCIERR, 0, 0, 0, 0, "GLPCI_PCIERR"}, + {I40E_GLPCI_VENDORID, 0, 0, 0, 0, "GLPCI_VENDORID"}, + {I40E_GL_UFUSE_SOC, 0, 0, 0, 0, "GL_UFUSE_SOC"}, + {I40E_PFHMC_SDCMD, 0, 0, 0, 0, "PFHMC_SDCMD"}, + {I40E_PFHMC_SDDATALOW, 0, 0, 0, 0, "PFHMC_SDDATALOW"}, + {I40E_PFHMC_SDDATAHIGH, 0, 0, 0, 0, "PFHMC_SDDATAHIGH"}, + {I40E_PFHMC_PDINV, 0, 0, 0, 0, "PFHMC_PDINV"}, + {I40E_PFHMC_ERRORINFO, 0, 0, 0, 0, "PFHMC_ERRORINFO"}, + {I40E_PFHMC_ERRORDATA, 0, 0, 0, 0, "PFHMC_ERRORDATA"}, + {I40E_GLHMC_SDPART(0), 15, 4, 0, 0, "GLHMC_SDPART"}, + {I40E_GLHMC_PFPESDPART(0), 15, 4, 0, 0, "GLHMC_PFPESDPART"}, + {I40E_GLHMC_PFASSIGN(0), 15, 4, 0, 0, "GLHMC_PFASSIGN"}, + {I40E_GLHMC_LANTXOBJSZ, 0, 0, 0, 0, "GLHMC_LANTXOBJSZ"}, + {I40E_GLHMC_LANQMAX, 0, 0, 0, 0, "GLHMC_LANQMAX"}, + {I40E_GLHMC_LANRXOBJSZ, 0, 0, 0, 0, "GLHMC_LANRXOBJSZ"}, + {I40E_GLHMC_FCOEDDPOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEDDPOBJSZ"}, + {I40E_GLHMC_FCOEMAX, 0, 0, 0, 0, "GLHMC_FCOEMAX"}, + {I40E_GLHMC_FCOEFOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEFOBJSZ"}, + {I40E_GLHMC_PEQPOBJSZ, 0, 0, 0, 0, "GLHMC_PEQPOBJSZ"}, + {I40E_GLHMC_PECQOBJSZ, 0, 0, 0, 0, "GLHMC_PECQOBJSZ"}, + {I40E_GLHMC_PESRQOBJSZ, 0, 0, 0, 0, "GLHMC_PESRQOBJSZ"}, + {I40E_GLHMC_PESRQMAX, 0, 0, 0, 0, "GLHMC_PESRQMAX"}, + {I40E_GLHMC_PEHTEOBJSZ, 0, 0, 0, 0, "GLHMC_PEHTEOBJSZ"}, + {I40E_GLHMC_PEHTMAX, 0, 0, 0, 0, "GLHMC_PEHTMAX"}, + {I40E_GLHMC_PEARPOBJSZ, 0, 0, 0, 0, "GLHMC_PEARPOBJSZ"}, + {I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"}, + {I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"}, + {I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"}, + {I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"}, + {I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"}, + {I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"}, + {I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"}, + {I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"}, + {I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"}, + {I40E_GLHMC_FSIMCOBJSZ, 0, 0, 0, 0, "GLHMC_FSIMCOBJSZ"}, + {I40E_GLHMC_FSIMCMAX, 0, 0, 0, 0, "GLHMC_FSIMCMAX"}, + {I40E_GLHMC_FSIAVOBJSZ, 0, 0, 0, 0, "GLHMC_FSIAVOBJSZ"}, + {I40E_GLHMC_FSIAVMAX, 0, 0, 0, 0, "GLHMC_FSIAVMAX"}, + {I40E_GLHMC_PEPBLMAX, 0, 0, 0, 0, "GLHMC_PEPBLMAX"}, + {I40E_GLHMC_PETIMEROBJSZ, 0, 0, 0, 0, "GLHMC_PETIMEROBJSZ"}, + {I40E_GLHMC_PETIMERMAX, 0, 0, 0, 0, "GLHMC_PETIMERMAX"}, + {I40E_GLHMC_FCOEFMAX, 0, 0, 0, 0, "GLHMC_FCOEFMAX"}, + {I40E_GLHMC_PEPFFIRSTSD, 0, 0, 0, 0, "GLHMC_PEPFFIRSTSD"}, + {I40E_GLHMC_DBQPMAX, 0, 0, 0, 0, "GLHMC_DBQPMAX"}, + {I40E_GLHMC_DBCQMAX, 0, 0, 0, 0, "GLHMC_DBCQMAX"}, + {I40E_GLHMC_PEQPBASE(0), 15, 4, 0, 0, "GLHMC_PEQPBASE"}, + {I40E_GLHMC_PEQPCNT(0), 15, 4, 0, 0, "GLHMC_PEQPCNT"}, + {I40E_GLHMC_PECQBASE(0), 15, 4, 0, 0, "GLHMC_PECQBASE"}, + {I40E_GLHMC_PECQCNT(0), 15, 4, 0, 0, "GLHMC_PECQCNT"}, + {I40E_GLHMC_PESRQBASE(0), 15, 4, 0, 0, "GLHMC_PESRQBASE"}, + {I40E_GLHMC_PESRQCNT(0), 15, 4, 0, 0, "GLHMC_PESRQCNT"}, + {I40E_GLHMC_PEHTEBASE(0), 15, 4, 0, 0, "GLHMC_PEHTEBASE"}, + {I40E_GLHMC_PEHTCNT(0), 15, 4, 0, 0, "GLHMC_PEHTCNT"}, + {I40E_GLHMC_PEARPBASE(0), 15, 4, 0, 0, "GLHMC_PEARPBASE"}, + {I40E_GLHMC_PEARPCNT(0), 15, 4, 0, 0, "GLHMC_PEARPCNT"}, + {I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"}, + {I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"}, + {I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"}, + {I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"}, + {I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"}, + {I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"}, + {I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"}, + {I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"}, + {I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"}, + {I40E_GLHMC_FSIAVBASE(0), 15, 4, 0, 0, "GLHMC_FSIAVBASE"}, + {I40E_GLHMC_FSIAVCNT(0), 15, 4, 0, 0, "GLHMC_FSIAVCNT"}, + {I40E_GLHMC_PEPBLBASE(0), 15, 4, 0, 0, "GLHMC_PEPBLBASE"}, + {I40E_GLHMC_PEPBLCNT(0), 15, 4, 0, 0, "GLHMC_PEPBLCNT"}, + {I40E_GLHMC_PETIMERBASE(0), 15, 4, 0, 0, "GLHMC_PETIMERBASE"}, + {I40E_GLHMC_PETIMERCNT(0), 15, 4, 0, 0, "GLHMC_PETIMERCNT"}, + {I40E_GLHMC_FSIMCBASE(0), 15, 4, 0, 0, "GLHMC_FSIMCBASE"}, + {I40E_GLHMC_FSIMCCNT(0), 15, 4, 0, 0, "GLHMC_FSIMCCNT"}, + {I40E_GLHMC_LANTXBASE(0), 15, 4, 0, 0, "GLHMC_LANTXBASE"}, + {I40E_GLHMC_LANTXCNT(0), 15, 4, 0, 0, "GLHMC_LANTXCNT"}, + {I40E_GLHMC_LANRXBASE(0), 15, 4, 0, 0, "GLHMC_LANRXBASE"}, + {I40E_GLHMC_LANRXCNT(0), 15, 4, 0, 0, "GLHMC_LANRXCNT"}, + {I40E_GLHMC_FCOEDDPBASE(0), 15, 4, 0, 0, "GLHMC_FCOEDDPBASE"}, + {I40E_GLHMC_FCOEDDPCNT(0), 15, 4, 0, 0, "GLHMC_FCOEDDPCNT"}, + {I40E_GLHMC_FCOEFBASE(0), 15, 4, 0, 0, "GLHMC_FCOEFBASE"}, + {I40E_GLHMC_FCOEFCNT(0), 15, 4, 0, 0, "GLHMC_FCOEFCNT"}, + {I40E_GLHMC_VFPDINV(0), 31, 4, 0, 0, "GLHMC_VFPDINV"}, + {I40E_GLHMC_VFSDPART(0), 31, 4, 0, 0, "GLHMC_VFSDPART"}, + {I40E_GLHMC_VFPEQPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQPBASE"}, + {I40E_GLHMC_VFPEQPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEQPCNT"}, + {I40E_GLHMC_VFPECQBASE(0), 31, 4, 0, 0, "GLHMC_VFPECQBASE"}, + {I40E_GLHMC_VFPECQCNT(0), 31, 4, 0, 0, "GLHMC_VFPECQCNT"}, + {I40E_GLHMC_VFPESRQBASE(0), 31, 4, 0, 0, "GLHMC_VFPESRQBASE"}, + {I40E_GLHMC_VFPESRQCNT(0), 31, 4, 0, 0, "GLHMC_VFPESRQCNT"}, + {I40E_GLHMC_VFPEHTEBASE(0), 31, 4, 0, 0, "GLHMC_VFPEHTEBASE"}, + {I40E_GLHMC_VFPEHTCNT(0), 31, 4, 0, 0, "GLHMC_VFPEHTCNT"}, + {I40E_GLHMC_VFPEARPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEARPBASE"}, + {I40E_GLHMC_VFPEARPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEARPCNT"}, + {I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"}, + {I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"}, + {I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"}, + {I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"}, + {I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"}, + {I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"}, + {I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"}, + {I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"}, + {I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"}, + {I40E_GLHMC_VFFSIAVBASE(0), 31, 4, 0, 0, "GLHMC_VFFSIAVBASE"}, + {I40E_GLHMC_VFFSIAVCNT(0), 31, 4, 0, 0, "GLHMC_VFFSIAVCNT"}, + {I40E_GLHMC_VFPEPBLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEPBLBASE"}, + {I40E_GLHMC_VFPEPBLCNT(0), 31, 4, 0, 0, "GLHMC_VFPEPBLCNT"}, + {I40E_GLHMC_VFPETIMERBASE(0), 31, 4, 0, 0, "GLHMC_VFPETIMERBASE"}, + {I40E_GLHMC_VFPETIMERCNT(0), 31, 4, 0, 0, "GLHMC_VFPETIMERCNT"}, + {I40E_GLPDOC_CACHESIZE, 0, 0, 0, 0, "GLPDOC_CACHESIZE"}, + {I40E_QTX_HEAD(0), 1535, 4, 0, 0, "QTX_HEAD"}, + {I40E_VP_MDET_TX(0), 127, 4, 0, 0, "VP_MDET_TX"}, + {I40E_PF_MDET_TX, 0, 0, 0, 0, "PF_MDET_TX"}, + {I40E_GL_MDET_TX, 0, 0, 0, 0, "GL_MDET_TX"}, + {I40E_GL_TLAN_SPARE, 0, 0, 0, 0, "GL_TLAN_SPARE"}, + {I40E_GLLAN_TXPRE_QDIS(0), 11, 4, 0, 0, "GLLAN_TXPRE_QDIS"}, + {I40E_QTX_ENA(0), 1535, 4, 0, 0, "QTX_ENA"}, + {I40E_QTX_CTL(0), 1535, 4, 0, 0, "QTX_CTL"}, + {I40E_QTX_TAIL(0), 1535, 4, 0, 0, "QTX_TAIL"}, + {I40E_PFCM_LAN_ERRINFO, 0, 0, 0, 0, "PFCM_LAN_ERRINFO"}, + {I40E_PFCM_LAN_ERRDATA, 0, 0, 0, 0, "PFCM_LAN_ERRDATA"}, + {I40E_PFCM_LANCTXDATA(0), 3, 128, 0, 0, "PFCM_LANCTXDATA"}, + {I40E_PFCM_LANCTXCTL, 0, 0, 0, 0, "PFCM_LANCTXCTL"}, + {I40E_PFCM_LANCTXSTAT, 0, 0, 0, 0, "PFCM_LANCTXSTAT"}, + {I40E_GLCM_LAN_CACHESIZE, 0, 0, 0, 0, "GLCM_LAN_CACHESIZE"}, + {I40E_QRX_ENA(0), 1535, 4, 0, 0, "QRX_ENA"}, + {I40E_PRTDCB_RETSTCC(0), 7, 32, 0, 0, "PRTDCB_RETSTCC"}, + {I40E_PRTDCB_RPPMC, 0, 0, 0, 0, "PRTDCB_RPPMC"}, + {I40E_PRTDCB_RETSC, 0, 0, 0, 0, "PRTDCB_RETSC"}, + {I40E_PRTDCB_RUPTQ(0), 7, 32, 0, 0, "PRTDCB_RUPTQ"}, + {I40E_GLDCB_RUPTI, 0, 0, 0, 0, "GLDCB_RUPTI"}, + {I40E_QRX_TAIL(0), 1535, 4, 0, 0, "QRX_TAIL"}, + {I40E_VP_MDET_RX(0), 127, 4, 0, 0, "VP_MDET_RX"}, + {I40E_PF_MDET_RX, 0, 0, 0, 0, "PF_MDET_RX"}, + {I40E_GLLAN_RCTL_0, 0, 0, 0, 0, "GLLAN_RCTL_0"}, + {I40E_GL_MDET_RX, 0, 0, 0, 0, "GL_MDET_RX"}, + {I40E_VFPE_CQARM(0), 127, 4, 0, 0, "VFPE_CQARM"}, + {I40E_VFPE_CQACK(0), 127, 4, 0, 0, "VFPE_CQACK"}, + {I40E_VFPE_AEQALLOC(0), 127, 4, 0, 0, "VFPE_AEQALLOC"}, + {I40E_PFPE_CQARM, 0, 0, 0, 0, "PFPE_CQARM"}, + {I40E_PFPE_CQACK, 0, 0, 0, 0, "PFPE_CQACK"}, + {I40E_PFPE_AEQALLOC, 0, 0, 0, 0, "PFPE_AEQALLOC"}, + {I40E_GLHMC_DBCQPART(0), 15, 4, 0, 0, "GLHMC_DBCQPART"}, + {I40E_GLHMC_CEQPART(0), 15, 4, 0, 0, "GLHMC_CEQPART"}, + {I40E_GLPE_PFCQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCQEDROPCNT"}, + {I40E_GLPE_PFCEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCEQEDROPCNT"}, + {I40E_GLPE_PFAEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFAEQEDROPCNT"}, + {I40E_GLHMC_VFDBCQPART(0), 31, 4, 0, 0, "GLHMC_VFDBCQPART"}, + {I40E_GLHMC_VFCEQPART(0), 31, 4, 0, 0, "GLHMC_VFCEQPART"}, + {I40E_GLPE_VFCQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCQEDROPCNT"}, + {I40E_GLPE_VFCEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCEQEDROPCNT"}, + {I40E_GLPE_VFAEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFAEQEDROPCNT"}, + {I40E_VFPE_WQEALLOC(0), 127, 4, 0, 0, "VFPE_WQEALLOC"}, + {I40E_VFCM_PE_ERRINFO1(0), 127, 4, 0, 0, "VFCM_PE_ERRINFO1"}, + {I40E_VFCM_PE_ERRDATA1(0), 127, 4, 0, 0, "VFCM_PE_ERRDATA1"}, + {I40E_PFPE_WQEALLOC, 0, 0, 0, 0, "PFPE_WQEALLOC"}, + {I40E_PFCM_PE_ERRINFO, 0, 0, 0, 0, "PFCM_PE_ERRINFO"}, + {I40E_PFCM_PE_ERRDATA, 0, 0, 0, 0, "PFCM_PE_ERRDATA"}, + {I40E_GLHMC_DBQPPART(0), 15, 4, 0, 0, "GLHMC_DBQPPART"}, + {I40E_GLHMC_VFDBQPPART(0), 31, 4, 0, 0, "GLHMC_VFDBQPPART"}, + {I40E_GLCM_PE_CACHESIZE, 0, 0, 0, 0, "GLCM_PE_CACHESIZE"}, + {I40E_PFGEN_PORTNUM, 0, 0, 0, 0, "PFGEN_PORTNUM"}, + {I40E_PF_VT_PFALLOC, 0, 0, 0, 0, "PF_VT_PFALLOC"}, + {I40E_PRTDCB_TC2PFC, 0, 0, 0, 0, "PRTDCB_TC2PFC"}, + {I40E_PRTDCB_RUP2TC, 0, 0, 0, 0, "PRTDCB_RUP2TC"}, + {I40E_GLGEN_PCIFCNCNT, 0, 0, 0, 0, "GLGEN_PCIFCNCNT"}, + {I40E_PRTDCB_RUP, 0, 0, 0, 0, "PRTDCB_RUP"}, + {I40E_PRT_L2TAGSEN, 0, 0, 0, 0, "PRT_L2TAGSEN"}, + {I40E_PRTGL_SAL, 0, 0, 0, 0, "PRTGL_SAL"}, + {I40E_PRTGL_SAH, 0, 0, 0, 0, "PRTGL_SAH"}, + {I40E_PRTDCB_MFLCN, 0, 0, 0, 0, "PRTDCB_MFLCN"}, + {I40E_PRTMAC_LINK_DOWN_COUNTER, 0, 0, 0, 0, + "PRTMAC_LINK_DOWN_COUNTER"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_GCP"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_GPP"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_PPP"}, + {I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(0), 8, 16, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(0), 8, 16, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER"}, + {I40E_PRTMAC_HSEC_CTL_TX_SA_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_SA_PART1"}, + {I40E_PRTMAC_HSEC_CTL_TX_SA_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_SA_PART2"}, + {I40E_PRTTSYN_INC_L, 0, 0, 0, 0, "PRTTSYN_INC_L"}, + {I40E_PRTTSYN_INC_H, 0, 0, 0, 0, "PRTTSYN_INC_H"}, + {I40E_PRTTSYN_EVNT_L(0), 1, 32, 0, 0, "PRTTSYN_EVNT_L"}, + {I40E_PRTTSYN_EVNT_H(0), 1, 32, 0, 0, "PRTTSYN_EVNT_H"}, + {I40E_PRTTSYN_TIME_L, 0, 0, 0, 0, "PRTTSYN_TIME_L"}, + {I40E_PRTTSYN_TIME_H, 0, 0, 0, 0, "PRTTSYN_TIME_H"}, + {I40E_PRTTSYN_TGT_L(0), 1, 32, 0, 0, "PRTTSYN_TGT_L"}, + {I40E_PRTTSYN_TGT_H(0), 1, 32, 0, 0, "PRTTSYN_TGT_H"}, + {I40E_PRTTSYN_TXTIME_L, 0, 0, 0, 0, "PRTTSYN_TXTIME_L"}, + {I40E_PRTTSYN_TXTIME_H, 0, 0, 0, 0, "PRTTSYN_TXTIME_H"}, + {I40E_PRTTSYN_CTL0, 0, 0, 0, 0, "PRTTSYN_CTL0"}, + {I40E_PRTTSYN_STAT_0, 0, 0, 0, 0, "PRTTSYN_STAT_0"}, + {I40E_PRTTSYN_CLKO(0), 1, 32, 0, 0, "PRTTSYN_CLKO"}, + {I40E_PRTTSYN_ADJ, 0, 0, 0, 0, "PRTTSYN_ADJ"}, + {I40E_PRTTSYN_AUX_0(0), 1, 32, 0, 0, "PRTTSYN_AUX_0"}, + {I40E_PRTTSYN_AUX_1(0), 1, 32, 0, 0, "PRTTSYN_AUX_1"}, + {I40E_PRTPM_EEE_STAT, 0, 0, 0, 0, "PRTPM_EEE_STAT"}, + {I40E_PRTPM_EEER, 0, 0, 0, 0, "PRTPM_EEER"}, + {I40E_PRTPM_EEEC, 0, 0, 0, 0, "PRTPM_EEEC"}, + {I40E_PRTPM_RLPIC, 0, 0, 0, 0, "PRTPM_RLPIC"}, + {I40E_PRTPM_TLPIC, 0, 0, 0, 0, "PRTPM_TLPIC"}, + {I40E_PRTPM_EEETXC, 0, 0, 0, 0, "PRTPM_EEETXC"}, + {I40E_PRTPM_EEEFWD, 0, 0, 0, 0, "PRTPM_EEEFWD"}, + {I40E_PRTPM_SAL(0), 3, 32, 0, 0, "PRTPM_SAL"}, + {I40E_PRTPM_SAH(0), 3, 32, 0, 0, "PRTPM_SAH"}, + {I40E_PRTDCB_TFCS, 0, 0, 0, 0, "PRTDCB_TFCS"}, + {I40E_PRTDCB_FCTTVN(0), 3, 32, 0, 0, "PRTDCB_FCTTVN"}, + {I40E_PRTDCB_FCRTV, 0, 0, 0, 0, "PRTDCB_FCRTV"}, + {I40E_PRTDCB_FCCFG, 0, 0, 0, 0, "PRTDCB_FCCFG"}, + {I40E_PRTDCB_TPFCTS(0), 7, 32, 0, 0, "PRTDCB_TPFCTS"}, + {I40E_VFQF_HLUT1(0, 0), 15, 1024, 127, 4, "VFQF_HLUT1"}, + {I40E_VSIQF_HLUT(0, 0), 15, 2048, 383, 4, "VSIQF_HLUT"}, + {I40E_VFQF_HKEY1(0, 0), 12, 1024, 127, 4, "VFQF_HKEY1"}, + {I40E_VFQF_HREGION1(0, 0), 7, 1024, 127, 4, "VFQF_HREGION1"}, + {I40E_VFQF_HENA1(0, 0), 1, 1024, 127, 4, "VFQF_HENA1"}, + {I40E_PFQF_HLUT(0), 127, 128, 0, 0, "PFQF_HLUT"}, + {I40E_X722_PFQF_HLUT(0), 127, 128, 0, 0, "X722_PFQF_HLUT"}, + {I40E_PFQF_CTL_1, 0, 0, 0, 0, "PFQF_CTL_1"}, + {I40E_PFQF_FDSTAT, 0, 0, 0, 0, "PFQF_FDSTAT"}, + {I40E_PRT_MNG_MIPAF6(0), 15, 32, 0, 0, "PRT_MNG_MIPAF6"}, + {I40E_PRT_MNG_MFUTP(0), 15, 32, 0, 0, "PRT_MNG_MFUTP"}, + {I40E_PRTQF_FLX_PIT(0), 8, 32, 0, 0, "PRTQF_FLX_PIT"}, + {I40E_PRT_MNG_MAVTV(0), 7, 32, 0, 0, "PRT_MNG_MAVTV"}, + {I40E_PRT_MNG_MDEF(0), 7, 32, 0, 0, "PRT_MNG_MDEF"}, + {I40E_PRT_MNG_MDEF_EXT(0), 7, 32, 0, 0, "PRT_MNG_MDEF_EXT"}, + {I40E_PRT_MNG_MIPAF4(0), 3, 32, 0, 0, "PRT_MNG_MIPAF4"}, + {I40E_PRT_MNG_MMAH(0), 3, 32, 0, 0, "PRT_MNG_MMAH"}, + {I40E_PRT_MNG_MMAL(0), 3, 32, 0, 0, "PRT_MNG_MMAL"}, + {I40E_PRT_MNG_MDEFVSI(0), 3, 32, 0, 0, "PRT_MNG_MDEFVSI"}, + {I40E_PRT_MNG_METF(0), 3, 32, 0, 0, "PRT_MNG_METF"}, + {I40E_PRT_MNG_MANC, 0, 0, 0, 0, "PRT_MNG_MANC"}, + {I40E_PRT_MNG_MNGONLY, 0, 0, 0, 0, "PRT_MNG_MNGONLY"}, + {I40E_PRT_MNG_MSFM, 0, 0, 0, 0, "PRT_MNG_MSFM"}, + {I40E_GLQF_APBVT(0), 2047, 4, 0, 0, "GLQF_APBVT"}, + {I40E_GLQF_PCNT(0), 511, 4, 0, 0, "GLQF_PCNT"}, + {I40E_GLQF_FD_PCTYPES(0), 63, 4, 0, 0, "GLQF_FD_PCTYPES"}, + {I40E_GLQF_ORT(0), 63, 4, 0, 0, "GLQF_ORT"}, + {I40E_GLQF_PIT(0), 23, 4, 0, 0, "GLQF_PIT"}, + {I40E_GL_PRS_FVBM(0), 3, 4, 0, 0, "GL_PRS_FVBM"}, + {I40E_GLQF_FDCNT_0, 0, 0, 0, 0, "GLQF_FDCNT_0"}, + {I40E_GL_MTG_FLU_MSK_H, 0, 0, 0, 0, "GL_MTG_FLU_MSK_H"}, + {I40E_GL_SWR_DEF_ACT_EN(0), 1, 4, 0, 0, "GL_SWR_DEF_ACT_EN"}, + {I40E_GLQF_HKEY(0), 12, 4, 0, 0, "GLQF_HKEY"}, + {I40E_GL_SWR_DEF_ACT(0), 35, 4, 0, 0, "GL_SWR_DEF_ACT"}, + {I40E_GLQF_FDEVICTFLAG, 0, 0, 0, 0, "GLQF_FDEVICTFLAG"}, + {I40E_PFQF_CTL_2, 0, 0, 0, 0, "PFQF_CTL_2"}, + {I40E_GLQF_FDEVICTENA(0), 1, 4, 0, 0, "GLQF_FDEVICTENA"}, + {I40E_VSIQF_HKEY(0, 0), 12, 2048, 383, 4, "VSIQF_HKEY"}, + {I40E_GLPRT_GORCL(0), 3, 8, 0, 0, "GLPRT_GORCL"}, + {I40E_GLPRT_GORCH(0), 3, 8, 0, 0, "GLPRT_GORCH"}, + {I40E_GLPRT_MLFC(0), 3, 8, 0, 0, "GLPRT_MLFC"}, + {I40E_GLPRT_MRFC(0), 3, 8, 0, 0, "GLPRT_MRFC"}, + {I40E_GLPRT_CRCERRS(0), 3, 8, 0, 0, "GLPRT_CRCERRS"}, + {I40E_GLPRT_RLEC(0), 3, 8, 0, 0, "GLPRT_RLEC"}, + {I40E_GLPRT_ILLERRC(0), 3, 8, 0, 0, "GLPRT_ILLERRC"}, + {I40E_GLPRT_RUC(0), 3, 8, 0, 0, "GLPRT_RUC"}, + {I40E_GLPRT_ROC(0), 3, 8, 0, 0, "GLPRT_ROC"}, + {I40E_GLPRT_LXONRXC(0), 3, 8, 0, 0, "GLPRT_LXONRXC"}, + {I40E_GLPRT_LXOFFRXC(0), 3, 8, 0, 0, "GLPRT_LXOFFRXC"}, + {I40E_GLPRT_PXONRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONRXC"}, + {I40E_GLPRT_PXOFFRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFRXC"}, + {I40E_GLPRT_RXON2OFFCNT(0, 0), 3, 8, 7, 32, "GLPRT_RXON2OFFCNT"}, + {I40E_GLPRT_PRC64L(0), 3, 8, 0, 0, "GLPRT_PRC64L"}, + {I40E_GLPRT_PRC64H(0), 3, 8, 0, 0, "GLPRT_PRC64H"}, + {I40E_GLPRT_PRC127L(0), 3, 8, 0, 0, "GLPRT_PRC127L"}, + {I40E_GLPRT_PRC127H(0), 3, 8, 0, 0, "GLPRT_PRC127H"}, + {I40E_GLPRT_PRC255L(0), 3, 8, 0, 0, "GLPRT_PRC255L"}, + {I40E_GLPRT_PRC255H(0), 3, 8, 0, 0, "GLPRT_PRC255H"}, + {I40E_GLPRT_PRC511L(0), 3, 8, 0, 0, "GLPRT_PRC511L"}, + {I40E_GLPRT_PRC511H(0), 3, 8, 0, 0, "GLPRT_PRC511H"}, + {I40E_GLPRT_PRC1023L(0), 3, 8, 0, 0, "GLPRT_PRC1023L"}, + {I40E_GLPRT_PRC1023H(0), 3, 8, 0, 0, "GLPRT_PRC1023H"}, + {I40E_GLPRT_PRC1522L(0), 3, 8, 0, 0, "GLPRT_PRC1522L"}, + {I40E_GLPRT_PRC1522H(0), 3, 8, 0, 0, "GLPRT_PRC1522H"}, + {I40E_GLPRT_PRC9522L(0), 3, 8, 0, 0, "GLPRT_PRC9522L"}, + {I40E_GLPRT_PRC9522H(0), 3, 8, 0, 0, "GLPRT_PRC9522H"}, + {I40E_GLPRT_RFC(0), 3, 8, 0, 0, "GLPRT_RFC"}, + {I40E_GLPRT_RJC(0), 3, 8, 0, 0, "GLPRT_RJC"}, + {I40E_GLPRT_UPRCL(0), 3, 8, 0, 0, "GLPRT_UPRCL"}, + {I40E_GLPRT_UPRCH(0), 3, 8, 0, 0, "GLPRT_UPRCH"}, + {I40E_GLPRT_MPRCL(0), 3, 8, 0, 0, "GLPRT_MPRCL"}, + {I40E_GLPRT_MPRCH(0), 3, 8, 0, 0, "GLPRT_MPRCH"}, + {I40E_GLPRT_BPRCL(0), 3, 8, 0, 0, "GLPRT_BPRCL"}, + {I40E_GLPRT_BPRCH(0), 3, 8, 0, 0, "GLPRT_BPRCH"}, + {I40E_GLPRT_RDPC(0), 3, 8, 0, 0, "GLPRT_RDPC"}, + {I40E_GLPRT_LDPC(0), 3, 8, 0, 0, "GLPRT_LDPC"}, + {I40E_GLPRT_RUPP(0), 3, 8, 0, 0, "GLPRT_RUPP"}, + {I40E_GLPRT_GOTCL(0), 3, 8, 0, 0, "GLPRT_GOTCL"}, + {I40E_GLPRT_GOTCH(0), 3, 8, 0, 0, "GLPRT_GOTCH"}, + {I40E_GLPRT_PTC64L(0), 3, 8, 0, 0, "GLPRT_PTC64L"}, + {I40E_GLPRT_PTC64H(0), 3, 8, 0, 0, "GLPRT_PTC64H"}, + {I40E_GLPRT_PTC127L(0), 3, 8, 0, 0, "GLPRT_PTC127L"}, + {I40E_GLPRT_PTC127H(0), 3, 8, 0, 0, "GLPRT_PTC127H"}, + {I40E_GLPRT_PTC255L(0), 3, 8, 0, 0, "GLPRT_PTC255L"}, + {I40E_GLPRT_PTC255H(0), 3, 8, 0, 0, "GLPRT_PTC255H"}, + {I40E_GLPRT_PTC511L(0), 3, 8, 0, 0, "GLPRT_PTC511L"}, + {I40E_GLPRT_PTC511H(0), 3, 8, 0, 0, "GLPRT_PTC511H"}, + {I40E_GLPRT_PTC1023L(0), 3, 8, 0, 0, "GLPRT_PTC1023L"}, + {I40E_GLPRT_PTC1023H(0), 3, 8, 0, 0, "GLPRT_PTC1023H"}, + {I40E_GLPRT_PTC1522L(0), 3, 8, 0, 0, "GLPRT_PTC1522L"}, + {I40E_GLPRT_PTC1522H(0), 3, 8, 0, 0, "GLPRT_PTC1522H"}, + {I40E_GLPRT_PTC9522L(0), 3, 8, 0, 0, "GLPRT_PTC9522L"}, + {I40E_GLPRT_PTC9522H(0), 3, 8, 0, 0, "GLPRT_PTC9522H"}, + {I40E_GLPRT_PXONTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONTXC"}, + {I40E_GLPRT_PXOFFTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFTXC"}, + {I40E_GLPRT_LXONTXC(0), 3, 8, 0, 0, "GLPRT_LXONTXC"}, + {I40E_GLPRT_LXOFFTXC(0), 3, 8, 0, 0, "GLPRT_LXOFFTXC"}, + {I40E_GLPRT_UPTCL(0), 3, 8, 0, 0, "GLPRT_UPTCL"}, + {I40E_GLPRT_UPTCH(0), 3, 8, 0, 0, "GLPRT_UPTCH"}, + {I40E_GLPRT_MPTCL(0), 3, 8, 0, 0, "GLPRT_MPTCL"}, + {I40E_GLPRT_MPTCH(0), 3, 8, 0, 0, "GLPRT_MPTCH"}, + {I40E_GLPRT_BPTCL(0), 3, 8, 0, 0, "GLPRT_BPTCL"}, + {I40E_GLPRT_BPTCH(0), 3, 8, 0, 0, "GLPRT_BPTCH"}, + {I40E_GLPRT_TDOLD(0), 3, 8, 0, 0, "GLPRT_TDOLD"}, + {I40E_GLV_RDPC(0), 383, 8, 0, 0, "GLV_RDPC"}, + {I40E_GL_FCOELAST(0), 143, 8, 0, 0, "GL_FCOELAST"}, + {I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"}, + {I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"}, + {I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"}, + {I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"}, + {I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"}, + {I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"}, + {I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"}, + {I40E_GL_FCOEDWRCH(0), 143, 8, 0, 0, "GL_FCOEDWRCH"}, + {I40E_GL_FCOERPDC(0), 143, 8, 0, 0, "GL_FCOERPDC"}, + {I40E_GLV_GOTCL(0), 383, 8, 0, 0, "GLV_GOTCL"}, + {I40E_GLV_GOTCH(0), 383, 8, 0, 0, "GLV_GOTCH"}, + {I40E_GLSW_GOTCL(0), 15, 8, 0, 0, "GLSW_GOTCL"}, + {I40E_GLSW_GOTCH(0), 15, 8, 0, 0, "GLSW_GOTCH"}, + {I40E_GLVEBVL_GOTCL(0), 127, 8, 0, 0, "GLVEBVL_GOTCL"}, + {I40E_GLVEBVL_GOTCH(0), 127, 8, 0, 0, "GLVEBVL_GOTCH"}, + {I40E_GLVEBTC_TBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCL"}, + {I40E_GLVEBTC_TBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCH"}, + {I40E_GLVEBTC_TPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCL"}, + {I40E_GLVEBTC_TPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCH"}, + {I40E_GLV_UPTCL(0), 383, 8, 0, 0, "GLV_UPTCL"}, + {I40E_GLV_UPTCH(0), 383, 8, 0, 0, "GLV_UPTCH"}, + {I40E_GLV_MPTCL(0), 383, 8, 0, 0, "GLV_MPTCL"}, + {I40E_GLV_MPTCH(0), 383, 8, 0, 0, "GLV_MPTCH"}, + {I40E_GLV_BPTCL(0), 383, 8, 0, 0, "GLV_BPTCL"}, + {I40E_GLV_BPTCH(0), 383, 8, 0, 0, "GLV_BPTCH"}, + {I40E_GLSW_UPTCL(0), 15, 8, 0, 0, "GLSW_UPTCL"}, + {I40E_GLSW_UPTCH(0), 15, 8, 0, 0, "GLSW_UPTCH"}, + {I40E_GLSW_MPTCL(0), 15, 8, 0, 0, "GLSW_MPTCL"}, + {I40E_GLSW_MPTCH(0), 15, 8, 0, 0, "GLSW_MPTCH"}, + {I40E_GLSW_BPTCL(0), 15, 8, 0, 0, "GLSW_BPTCL"}, + {I40E_GLSW_BPTCH(0), 15, 8, 0, 0, "GLSW_BPTCH"}, + {I40E_GLV_TEPC(0), 383, 4, 0, 0, "GLV_TEPC"}, + {I40E_GL_FCOEPTC(0), 143, 8, 0, 0, "GL_FCOEPTC"}, + {I40E_GLSW_TDPC(0), 15, 8, 0, 0, "GLSW_TDPC"}, + {I40E_GL_FCOEDWTCL(0), 143, 8, 0, 0, "GL_FCOEDWTCL"}, + {I40E_GL_FCOEDWTCH(0), 143, 8, 0, 0, "GL_FCOEDWTCH"}, + {I40E_GL_FCOEDIXEC(0), 143, 8, 0, 0, "GL_FCOEDIXEC"}, + {I40E_GL_FCOEDIXVC(0), 143, 8, 0, 0, "GL_FCOEDIXVC"}, + {I40E_GL_FCOEDIFTCL(0), 143, 8, 0, 0, "GL_FCOEDIFTCL"}, + {I40E_GLV_GORCL(0), 383, 8, 0, 0, "GLV_GORCL"}, + {I40E_GLV_GORCH(0), 383, 8, 0, 0, "GLV_GORCH"}, + {I40E_GLSW_GORCL(0), 15, 8, 0, 0, "GLSW_GORCL"}, + {I40E_GLSW_GORCH(0), 15, 8, 0, 0, "GLSW_GORCH"}, + {I40E_GLVEBVL_GORCL(0), 127, 8, 0, 0, "GLVEBVL_GORCL"}, + {I40E_GLVEBVL_GORCH(0), 127, 8, 0, 0, "GLVEBVL_GORCH"}, + {I40E_GLVEBTC_RBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCL"}, + {I40E_GLVEBTC_RBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCH"}, + {I40E_GLVEBTC_RPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCL"}, + {I40E_GLVEBTC_RPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCH"}, + {I40E_GLV_UPRCL(0), 383, 8, 0, 0, "GLV_UPRCL"}, + {I40E_GLV_UPRCH(0), 383, 8, 0, 0, "GLV_UPRCH"}, + {I40E_GLV_MPRCL(0), 383, 8, 0, 0, "GLV_MPRCL"}, + {I40E_GLV_MPRCH(0), 383, 8, 0, 0, "GLV_MPRCH"}, + {I40E_GLV_BPRCL(0), 383, 8, 0, 0, "GLV_BPRCL"}, + {I40E_GLV_BPRCH(0), 383, 8, 0, 0, "GLV_BPRCH"}, + {I40E_GLV_RUPP(0), 383, 8, 0, 0, "GLV_RUPP"}, + {I40E_GLSW_UPRCL(0), 15, 8, 0, 0, "GLSW_UPRCL"}, + {I40E_GLSW_UPRCH(0), 15, 8, 0, 0, "GLSW_UPRCH"}, + {I40E_GLSW_MPRCL(0), 15, 8, 0, 0, "GLSW_MPRCL"}, + {I40E_GLSW_MPRCH(0), 15, 8, 0, 0, "GLSW_MPRCH"}, + {I40E_GLSW_BPRCL(0), 15, 8, 0, 0, "GLSW_BPRCL"}, + {I40E_GLSW_BPRCH(0), 15, 8, 0, 0, "GLSW_BPRCH"}, + {I40E_GLSW_RUPP(0), 15, 8, 0, 0, "GLSW_RUPP"}, + {I40E_GLVEBVL_UPCL(0), 127, 8, 0, 0, "GLVEBVL_UPCL"}, + {I40E_GLVEBVL_UPCH(0), 127, 8, 0, 0, "GLVEBVL_UPCH"}, + {I40E_GLVEBVL_MPCL(0), 127, 8, 0, 0, "GLVEBVL_MPCL"}, + {I40E_GLVEBVL_MPCH(0), 127, 8, 0, 0, "GLVEBVL_MPCH"}, + {I40E_GLVEBVL_BPCL(0), 127, 8, 0, 0, "GLVEBVL_BPCL"}, + {I40E_GLVEBVL_BPCH(0), 127, 8, 0, 0, "GLVEBVL_BPCH"}, + {I40E_GLGEN_STAT_HALT, 0, 0, 0, 0, "GLGEN_STAT_HALT"}, + {I40E_GLGEN_STAT_CLEAR, 0, 0, 0, 0, "GLGEN_STAT_CLEAR"}, + {0, 0, 0, 0, 0, NULL} +}; diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c new file mode 100644 index 000000000..840b6f387 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.c @@ -0,0 +1,3516 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 + +#define I40E_TX_MAX_BURST 32 + +#define I40E_DMA_MEM_ALIGN 4096 + +/* Base address of the HW descriptor ring should be 128B aligned. */ +#define I40E_RING_BASE_ALIGN 128 + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) + +#ifdef RTE_LIBRTE_IEEE1588 +#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define I40E_TX_IEEE1588_TMST 0 +#endif + +#define I40E_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_OUTER_IP_CKSUM) + +#define I40E_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_QINQ_PKT | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_TUNNEL_MASK | \ + I40E_TX_IEEE1588_TMST) + +#define I40E_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK) + +static inline void +i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) +{ + if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); + PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1)); + } else { + mb->vlan_tci = 0; + } +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) & + (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) { + mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | + PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1), + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2)); + } else { + mb->vlan_tci_outer = 0; + } +#endif + PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", + mb->vlan_tci, mb->vlan_tci_outer); +} + +/* Translate the rx descriptor status to pkt flags */ +static inline uint64_t +i40e_rxd_status_to_pkt_flags(uint64_t qword) +{ + uint64_t flags; + + /* Check if RSS_HASH */ + flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & + I40E_RX_DESC_FLTSTAT_RSS_HASH) == + I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; + + /* Check if FDIR Match */ + flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ? + PKT_RX_FDIR : 0); + + return flags; +} + +static inline uint64_t +i40e_rxd_error_to_pkt_flags(uint64_t qword) +{ + uint64_t flags = 0; + uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT); + +#define I40E_RX_ERR_BITS 0x3f + if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) { + flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))) + flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) + flags |= PKT_RX_EIP_CKSUM_BAD; + + return flags; +} + +/* Function to check and set the ieee1588 timesync index and get the + * appropriate flags. + */ +#ifdef RTE_LIBRTE_IEEE1588 +static inline uint64_t +i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) +{ + uint64_t pkt_flags = 0; + uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK + | I40E_RXD_QW1_STATUS_TSYNINDX_MASK)) + >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT; + + if ((mb->packet_type & RTE_PTYPE_L2_MASK) + == RTE_PTYPE_L2_ETHER_TIMESYNC) + pkt_flags = PKT_RX_IEEE1588_PTP; + if (tsyn & 0x04) { + pkt_flags |= PKT_RX_IEEE1588_TMST; + mb->timesync = tsyn & 0x03; + } + + return pkt_flags; +} +#endif + +static inline uint64_t +i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb) +{ + uint64_t flags = 0; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + uint16_t flexbh, flexbl; + + flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) & + I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK; + flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) & + I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK; + + + if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; + } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi); + flags |= PKT_RX_FDIR_FLX; + } + if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) { + mb->hash.fdir.lo = + rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo); + flags |= PKT_RX_FDIR_FLX; + } +#else + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; +#endif + return flags; +} + +static inline void +i40e_parse_tunneling_params(uint64_t ol_flags, + union i40e_tx_offload tx_offload, + uint32_t *cd_tunneling) +{ + /* EIPT: External (outer) IP header type */ + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + else if (ol_flags & PKT_TX_OUTER_IPV4) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + else if (ol_flags & PKT_TX_OUTER_IPV6) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + + /* EIPLEN: External (outer) IP header length, in DWords */ + *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* L4TUNT: L4 Tunneling Type */ + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + /* for non UDP / GRE tunneling, set to 00b */ + break; + case PKT_TX_TUNNEL_VXLAN: + case PKT_TX_TUNNEL_GENEVE: + *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING; + break; + case PKT_TX_TUNNEL_GRE: + *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING; + break; + default: + PMD_TX_LOG(ERR, "Tunnel type not supported"); + return; + } + + /* L4TUNLEN: L4 Tunneling Length, in Words + * + * We depend on app to set rte_mbuf.l2_len correctly. + * For IP in GRE it should be set to the length of the GRE + * header; + * for MAC in GRE or MAC in UDP it should be set to the length + * of the GRE or UDP headers plus the inner MAC up to including + * its last Ethertype. + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; +} + +static inline void +i40e_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union i40e_tx_offload tx_offload) +{ + /* Set MACLEN */ + if (ol_flags & PKT_TX_TUNNEL_MASK) + *td_offset |= (tx_offload.outer_l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + else + *td_offset |= (tx_offload.l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloads */ + if (ol_flags & PKT_TX_IP_CKSUM) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV4) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV6) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + + if (ol_flags & PKT_TX_TCP_SEG) { + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) + << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_UDP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/* Construct the tx flags */ +static inline uint64_t +i40e_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +static inline int +i40e_xmit_cleanup(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *sw_ring = txq->sw_ring; + volatile struct i40e_tx_desc *txd = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if ((txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) { + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + return -1; + } + + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + txd[desc_to_clean_to].cmd_type_offset_bsz = 0; + + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + return 0; +} + +static inline int +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC +check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq) +#else +check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq) +#endif +{ + int ret = 0; + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_I40E_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST); + ret = -EINVAL; + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); + ret = -EINVAL; + } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = -EINVAL; + } +#else + ret = -EINVAL; +#endif + + return ret; +} + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC +#define I40E_LOOK_AHEAD 8 +#if (I40E_LOOK_AHEAD != 8) +#error "PMD I40E: I40E_LOOK_AHEAD must be 8\n" +#endif +static inline int +i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t qword1; + uint32_t rx_status; + int32_t s[I40E_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + /* Make sure there is at least 1 packet to receive */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /** + * Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD, + rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) { + qword1 = rte_le_to_cpu_64(\ + rxdp[j].wb.qword1.status_error_len); + s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + } + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + mb = rxep[j].mbuf; + qword1 = rte_le_to_cpu_64(\ + rxdp[j].wb.qword1.status_error_len); + pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + i40e_rxd_to_vlan_tci(mb, &rxdp[j]); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + mb->packet_type = + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT)]; + if (pkt_flags & PKT_RX_RSS_HASH) + mb->hash.rss = rte_le_to_cpu_32(\ + rxdp[j].wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(mb, qword1); +#endif + mb->ol_flags |= pkt_flags; + + } + + for (j = 0; j < I40E_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j].mbuf; + + if (nb_dd != I40E_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + + return nb_rx; +} + +static inline uint16_t +i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + for (i = 0; i < nb_pkts; i++) + rx_pkts[i] = stage[i]; + + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline int +i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx, i; + uint64_t dma_addr; + int diag; + + /* Allocate buffers in bulk */ + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); + rxep = &(rxq->sw_ring[alloc_idx]); + diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) { + PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk"); + return -ENOMEM; + } + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1].mbuf); + + mb = rxep[i].mbuf; + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(\ + rte_mbuf_data_iova_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update rx tail regsiter */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); + + rxq->rx_free_trigger = + (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + return 0; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue; + struct rte_eth_dev *dev; + uint16_t nb_rx = 0; + + if (!nb_pkts) + return 0; + + if (rxq->rx_nb_avail) + return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + if (rxq->rx_tail > rxq->rx_free_trigger) { + if (i40e_rx_alloc_bufs(rxq) != 0) { + uint16_t i, j; + + dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) + rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; + + return 0; + } + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + if (rxq->rx_nb_avail) + return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +static uint16_t +i40e_recv_pkts_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx = 0, n, count; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + while (nb_pkts) { + n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST); + count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + count); + nb_pkts = (uint16_t)(nb_pkts - count); + if (count < n) + break; + } + + return nb_rx; +} +#else +static uint16_t +i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + +uint16_t +i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq; + volatile union i40e_rx_desc *rx_ring; + volatile union i40e_rx_desc *rxdp; + union i40e_rx_desc rxd; + struct i40e_rx_entry *sw_ring; + struct i40e_rx_entry *rxe; + struct rte_eth_dev *dev; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + uint16_t nb_rx; + uint32_t rx_status; + uint64_t qword1; + uint16_t rx_packet_len; + uint16_t rx_id, nb_hold; + uint64_t dma_addr; + uint64_t pkt_flags; + uint32_t *ptype_tbl; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit first */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed++; + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(sw_ring[rx_id].mbuf); + + /** + * When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + i40e_rxd_to_vlan_tci(rxm, &rxd); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + rxm->packet_type = + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)]; + if (pkt_flags & PKT_RX_RSS_HASH) + rxm->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(rxm, qword1); +#endif + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the receive tail register of queue. + * Update that register with the value of the last processed RX + * descriptor minus 1. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return nb_rx; +} + +uint16_t +i40e_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = rx_queue; + volatile union i40e_rx_desc *rx_ring = rxq->rx_ring; + volatile union i40e_rx_desc *rxdp; + union i40e_rx_desc rxd; + struct i40e_rx_entry *sw_ring = rxq->sw_ring; + struct i40e_rx_entry *rxe; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + struct rte_mbuf *nmb, *rxm; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len; + struct rte_eth_dev *dev; + uint32_t rx_status; + uint64_t qword1; + uint64_t dma_addr; + uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = I40E_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed++; + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(sw_ring[rx_id].mbuf); + + /** + * When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + + /* Set data buffer address and data length of the mbuf */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /** + * If this is the first buffer of the received packet, set the + * pointer to the first mbuf of the packet and initialize its + * context. Otherwise, update the total length and the number + * of segments of the current scattered packet, and update the + * pointer to the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = rxm; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rx_packet_len); + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /** + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) { + last_seg = rxm; + continue; + } + + /** + * This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - + (RTE_ETHER_CRC_LEN - rx_packet_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t)(rx_packet_len - + RTE_ETHER_CRC_LEN); + } + + first_seg->port = rxq->port_id; + first_seg->ol_flags = 0; + i40e_rxd_to_vlan_tci(first_seg, &rxd); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + first_seg->packet_type = + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)]; + if (pkt_flags & PKT_RX_RSS_HASH) + first_seg->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1); +#endif + first_seg->ol_flags |= pkt_flags; + + /* Prefetch data of first segment, if configured to do so. */ + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + } + + /* Record index of the next RX descriptor to probe. */ + rxq->rx_tail = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. Update the RDT with the value of the last processed RX + * descriptor minus 1, to guarantee that the RDT register is never + * equal to the RDH register, which creates a "full" ring situtation + * from the hardware point of view. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t)(rx_id == 0 ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return nb_rx; +} + +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +i40e_calc_context_desc(uint64_t flags) +{ + static uint64_t mask = PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TCP_SEG | + PKT_TX_QINQ_PKT | + PKT_TX_TUNNEL_MASK; + +#ifdef RTE_LIBRTE_IEEE1588 + mask |= PKT_TX_IEEE1588_TMST; +#endif + + return (flags & mask) ? 1 : 0; +} + +/* set i40e TSO context descriptor */ +static inline uint64_t +i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_DRV_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; + + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((uint64_t)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((uint64_t)mbuf->tso_segsz << + I40E_TXD_CTX_QW1_MSS_SHIFT); + + return ctx_desc; +} + +/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */ +#define I40E_MAX_DATA_PER_TXD \ + (I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT) +/* Calculate the number of TX descriptors needed for each pkt */ +static inline uint16_t +i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) +{ + struct rte_mbuf *txd = tx_pkt; + uint16_t count = 0; + + while (txd != NULL) { + count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD); + txd = txd->next; + } + + return count; +} + +uint16_t +i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq; + struct i40e_tx_entry *sw_ring; + struct i40e_tx_entry *txe, *txn; + volatile struct i40e_tx_desc *txd; + volatile struct i40e_tx_desc *txr; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint32_t cd_tunneling_params; + uint16_t tx_id; + uint16_t nb_tx; + uint32_t td_cmd; + uint32_t td_offset; + uint32_t td_tag; + uint64_t ol_flags; + uint16_t nb_used; + uint16_t nb_ctx; + uint16_t tx_last; + uint16_t slen; + uint64_t buf_dma_addr; + union i40e_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Check if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + (void)i40e_xmit_cleanup(txq); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + td_cmd = 0; + td_tag = 0; + td_offset = 0; + + tx_pkt = *tx_pkts++; + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + + /* Calculate the number of context descriptors needed. */ + nb_ctx = i40e_calc_context_desc(ol_flags); + + /** + * The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus 1 context descriptor if needed. + * Recalculate the needed tx descs when TSO enabled in case + * the mbuf data size exceeds max data size that hw allows + * per tx desc. + */ + if (ol_flags & PKT_TX_TCP_SEG) + nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) + + nb_ctx); + else + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + if (nb_used > txq->nb_tx_free) { + if (i40e_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + if (unlikely(nb_used > txq->tx_rs_thresh)) { + while (nb_used > txq->nb_tx_free) { + if (i40e_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* Descriptor based VLAN insertion */ + if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = tx_pkt->vlan_tci; + } + + /* Always enable CRC offload insertion */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; + if (ol_flags & PKT_TX_TUNNEL_MASK) + i40e_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); + /* Enable checksum offloading */ + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) + i40e_txd_enable_checksum(ol_flags, &td_cmd, + &td_offset, tx_offload); + + if (nb_ctx) { + /* Setup TX context descriptor if required */ + volatile struct i40e_tx_context_desc *ctx_txd = + (volatile struct i40e_tx_context_desc *)\ + &txr[tx_id]; + uint16_t cd_l2tag2 = 0; + uint64_t cd_type_cmd_tso_mss = + I40E_TX_DESC_DTYPE_CONTEXT; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled means no timestamp */ + if (ol_flags & PKT_TX_TCP_SEG) + cd_type_cmd_tso_mss |= + i40e_set_tso_ctx(tx_pkt, tx_offload); + else { +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cd_type_cmd_tso_mss |= + ((uint64_t)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT); +#endif + } + + ctx_txd->tunneling_params = + rte_cpu_to_le_32(cd_tunneling_params); + if (ol_flags & PKT_TX_QINQ_PKT) { + cd_l2tag2 = tx_pkt->vlan_tci_outer; + cd_type_cmd_tso_mss |= + ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 << + I40E_TXD_CTX_QW1_CMD_SHIFT); + } + ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); + ctx_txd->type_cmd_tso_mss = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + + PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n" + "tunneling_params: %#x;\n" + "l2tag2: %#hx;\n" + "rsvd: %#hx;\n" + "type_cmd_tso_mss: %#"PRIx64";\n", + tx_pkt, tx_id, + ctx_txd->tunneling_params, + ctx_txd->l2tag2, + ctx_txd->rsvd, + ctx_txd->type_cmd_tso_mss); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Setup TX Descriptor */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + + while ((ol_flags & PKT_TX_TCP_SEG) && + unlikely(slen > I40E_MAX_DATA_PER_TXD)) { + txd->buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, + td_offset, I40E_MAX_DATA_PER_TXD, + td_tag); + + buf_dma_addr += I40E_MAX_DATA_PER_TXD; + slen -= I40E_MAX_DATA_PER_TXD; + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + } + PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" + "buf_dma_addr: %#"PRIx64";\n" + "td_cmd: %#x;\n" + "td_offset: %#x;\n" + "td_len: %u;\n" + "td_tag: %#x;\n", + tx_pkt, tx_id, buf_dma_addr, + td_cmd, td_offset, slen, td_tag); + + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd, + td_offset, slen, td_tag); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* The last packet data descriptor needs End Of Packet (EOP) */ + td_cmd |= I40E_TX_DESC_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + td_cmd |= I40E_TX_DESC_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + + txd->cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)td_cmd) << + I40E_TXD_QW1_CMD_SHIFT); + } + +end_of_tx: + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + + rte_cio_wmb(); + I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +static __rte_always_inline int +i40e_tx_free_bufs(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txep; + uint16_t i; + + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); + + for (i = 0; i < txq->tx_rs_thresh; i++) + rte_prefetch0((txep + i)->mbuf); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_mempool_put(txep->mbuf->pool, txep->mbuf); + txep->mbuf = NULL; + } + } else { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_pktmbuf_free_seg(txep->mbuf); + txep->mbuf = NULL; + } + } + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +/* Populate 4 descriptors with data from 4 mbufs */ +static inline void +tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + uint32_t i; + + for (i = 0; i < 4; i++, txdp++, pkts++) { + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, + (*pkts)->data_len, 0); + } +} + +/* Populate 1 descriptor with data from 1 mbuf */ +static inline void +tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, + (*pkts)->data_len, 0); +} + +/* Fill hardware descriptor ring with mbuf data */ +static inline void +i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq, + struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ + volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]); + struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]); + const int N_PER_LOOP = 4; + const int N_PER_LOOP_MASK = N_PER_LOOP - 1; + int mainpart, leftover; + int i, j; + + mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK)); + leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK)); + for (i = 0; i < mainpart; i += N_PER_LOOP) { + for (j = 0; j < N_PER_LOOP; ++j) { + (txep + i + j)->mbuf = *(pkts + i + j); + } + tx4(txdp + i, pkts + i); + } + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; ++i) { + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); + tx1(txdp + mainpart + i, pkts + mainpart + i); + } + } +} + +static inline uint16_t +tx_xmit_pkts(struct i40e_tx_queue *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + volatile struct i40e_tx_desc *txr = txq->tx_ring; + uint16_t n = 0; + + /** + * Begin scanning the H/W ring for done descriptors when the number + * of available descriptors drops below tx_free_thresh. For each done + * descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + /* Use available descriptor only */ + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(!nb_pkts)) + return 0; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); + i40e_tx_fill_hw_ring(txq, tx_pkts, n); + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_tail = 0; + } + + /* Fill hardware descriptor ring with mbuf data */ + i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); + + /* Determin if RS bit needs to be set */ + if (txq->tx_tail > txq->tx_next_rs) { + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + if (txq->tx_next_rs >= txq->nb_tx_desc) + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + } + + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + /* Update the tx tail register */ + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +static uint16_t +i40e_xmit_pkts_simple(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + + if (likely(nb_pkts <= I40E_TX_MAX_BURST)) + return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue, + tx_pkts, nb_pkts); + + while (nb_pkts) { + uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, + I40E_TX_MAX_BURST); + + ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue, + &tx_pkts[nb_tx], num); + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < num) + break; + } + + return nb_tx; +} + +static uint16_t +i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /* Check for m->nb_segs to not exceed the limits. */ + if (!(ol_flags & PKT_TX_TCP_SEG)) { + if (m->nb_segs > I40E_TX_MAX_MTU_SEG || + m->pkt_len > I40E_FRAME_SIZE_MAX) { + rte_errno = EINVAL; + return i; + } + } else if (m->nb_segs > I40E_TX_MAX_SEG || + m->tso_segsz < I40E_MIN_TSO_MSS || + m->tso_segsz > I40E_MAX_TSO_MSS || + m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) { + /* MSS outside the range (256B - 9674B) are considered + * malicious + */ + rte_errno = EINVAL; + return i; + } + + if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + + /* check the size of packet */ + if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + return i; +} + +/* + * Find the VSI the queue belongs to. 'queue_idx' is the queue index + * application used, which assume having sequential ones. But from driver's + * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64 + * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application + * running on host, q1-64 and q97-128 can be used, total 96 queues. They can + * use queue_idx from 0 to 95 to access queues, while real queue would be + * different. This function will do a queue mapping to find VSI the queue + * belongs to. + */ +static struct i40e_vsi* +i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx) +{ + /* the queue in MAIN VSI range */ + if (queue_idx < pf->main_vsi->nb_qps) + return pf->main_vsi; + + queue_idx -= pf->main_vsi->nb_qps; + + /* queue_idx is greater than VMDQ VSIs range */ + if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) { + PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?"); + return NULL; + } + + return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi; +} + +static uint16_t +i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx) +{ + /* the queue in MAIN VSI range */ + if (queue_idx < pf->main_vsi->nb_qps) + return queue_idx; + + /* It's VMDQ queues */ + queue_idx -= pf->main_vsi->nb_qps; + + if (pf->nb_cfg_vmdq_vsi) + return queue_idx % pf->vmdq_nb_qps; + else { + PMD_INIT_LOG(ERR, "Fail to get queue offset"); + return (uint16_t)(-1); + } +} + +int +i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + if (!rxq || !rxq->q_set) { + PMD_DRV_LOG(ERR, "RX queue %u not available or setup", + rx_queue_id); + return -EINVAL; + } + + if (rxq->rx_deferred_start) + PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start", + rx_queue_id); + + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + return err; + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rxq = dev->data->rx_queues[rx_queue_id]; + if (!rxq || !rxq->q_set) { + PMD_DRV_LOG(ERR, "RX queue %u not available or setup", + rx_queue_id); + return -EINVAL; + } + + /* + * rx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + int err; + struct i40e_tx_queue *txq; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + if (!txq || !txq->q_set) { + PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", + tx_queue_id); + return -EINVAL; + } + + if (txq->tx_deferred_start) + PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start", + tx_queue_id); + + /* + * tx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_tx_queue *txq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + txq = dev->data->tx_queues[tx_queue_id]; + if (!txq || !txq->q_set) { + PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", + tx_queue_id); + return -EINVAL; + } + + /* + * tx_queue_id is queue id application refers to, while + * txq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", + tx_queue_id); + return err; + } + + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +const uint32_t * +i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to i40e_rxd_pkt_type_mapping() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == i40e_recv_pkts || +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || +#endif + dev->rx_pkt_burst == i40e_recv_scattered_pkts || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2) + return ptypes; + return NULL; +} + +static int +i40e_dev_first_queue(uint16_t idx, void **queues, int num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + if (i != idx && queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + (rxq->max_pkt_len > buf_size); + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(rxq->queue_id, + dev->data->rx_queues, + dev->data->nb_rx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" + " number %d of queue %d isn't power of 2", + rxq->nb_rx_desc, rxq->queue_id); + return -EINVAL; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + +int +i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; + struct i40e_rx_queue *rxq; + const struct rte_memzone *rz; + uint32_t ring_size; + uint16_t len, i; + uint16_t reg_idx, base, bsf, tc_mapping; + int q_offset, use_def_burst_func = 1; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vsi = &vf->vsi; + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; + } + + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { + PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("i40e rx queue", + sizeof(struct i40e_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue data structure"); + return -ENOMEM; + } + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = reg_idx; + rxq->port_id = dev->data->port_id; + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + rxq->drop_en = rx_conf->rx_drop_en; + rxq->vsi = vsi; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; + + /* Allocate the maximun number of RX ring hardware descriptor. */ + len = I40E_MAX_RING_DESC; + + /** + * Allocating a little more memory because vectorized/bulk_alloc Rx + * functions doesn't check boundaries each time. + */ + len += RTE_PMD_I40E_RX_MAX_BURST; + + ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc), + I40E_DMA_MEM_ALIGN); + + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); + if (!rz) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX"); + return -ENOMEM; + } + + /* Zero all the descriptors in the ring. */ + memset(rz->addr, 0, ring_size); + + rxq->rx_ring_phys_addr = rz->iova; + rxq->rx_ring = (union i40e_rx_desc *)rz->addr; + + len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST); + + /* Allocate the software ring. */ + rxq->sw_ring = + rte_zmalloc_socket("i40e rx sw ring", + sizeof(struct i40e_rx_entry) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring"); + return -ENOMEM; + } + + i40e_reset_rx_queue(rxq); + rxq->q_set = TRUE; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + rxq->dcb_tc = i; + } + + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; + return 0; +} + +void +i40e_dev_rx_queue_release(void *rxq) +{ + struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + + i40e_rx_queue_release_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +uint32_t +i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define I40E_RXQ_SCAN_INTERVAL 4 + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + while ((desc < rxq->nb_rx_desc) && + ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) & + (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + /** + * Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += I40E_RXQ_SCAN_INTERVAL; + rxdp += I40E_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_queue *rxq = rx_queue; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset); + return 0; + } + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &(rxq->rx_ring[desc]); + + ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) & + (1 << I40E_RX_DESC_STATUS_DD_SHIFT)); + + return ret; +} + +int +i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct i40e_rx_queue *rxq = rx_queue; + volatile uint64_t *status; + uint64_t mask; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.qword1.status_error_len; + mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT) + << I40E_RXD_QW1_STATUS_SHIFT); + if (*status & mask) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct i40e_tx_queue *txq = tx_queue; + volatile uint64_t *status; + uint64_t mask, expect; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].cmd_type_offset_bsz; + mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK); + expect = rte_cpu_to_le_64( + I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT); + if ((*status & mask) == expect) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(txq->queue_id, + dev->data->tx_queues, + dev->data->nb_tx_queues)) { + /** + * If it is the first queue to setup, + * set all flags and call + * i40e_set_tx_function. + */ + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 || + txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) { + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + } + + return 0; +} + +int +i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; + struct i40e_tx_queue *txq; + const struct rte_memzone *tz; + uint32_t ring_size; + uint16_t tx_rs_thresh, tx_free_thresh; + uint16_t reg_idx, i, base, bsf, tc_mapping; + int q_offset; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vsi = &vf->vsi; + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; + } + + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { + PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /** + * The following two parameters control the setting of the RS bit on + * transmit descriptors. TX descriptors will have their RS bit set + * after txq->tx_rs_thresh descriptors have been used. The TX + * descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required to + * transmit a packet is greater than the number of free TX descriptors. + * + * The following constraints must be satisfied: + * - tx_rs_thresh must be greater than 0. + * - tx_rs_thresh must be less than the size of the ring minus 2. + * - tx_rs_thresh must be less than or equal to tx_free_thresh. + * - tx_rs_thresh must be a divisor of the ring size. + * - tx_free_thresh must be greater than 0. + * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. + * + * One descriptor in the TX ring is used as a sentinel to avoid a H/W + * race condition, hence the maximum threshold constraints. When set + * to zero use default values. + */ + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "number of TX descriptors minus 2. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " + "equal to tx_free_thresh. (tx_free_thresh=%u" + " tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u" + " port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("i40e tx queue", + sizeof(struct i40e_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure"); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); + if (!tz) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX"); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->reg_idx = reg_idx; + txq->port_id = dev->data->port_id; + txq->offloads = offloads; + txq->vsi = vsi; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring = (struct i40e_tx_desc *)tz->addr; + + /* Allocate software ring */ + txq->sw_ring = + rte_zmalloc_socket("i40e tx sw ring", + sizeof(struct i40e_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq->sw_ring) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring"); + return -ENOMEM; + } + + i40e_reset_tx_queue(txq); + txq->q_set = TRUE; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + txq->dcb_tc = i; + } + + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + + return 0; +} + +void +i40e_dev_tx_queue_release(void *txq) +{ + struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); + return; + } + + i40e_tx_queue_release_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +const struct rte_memzone * +i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN); + return mz; +} + +void +i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) +{ + uint16_t i; + + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + i40e_rx_queue_release_mbufs_vec(rxq); + return; + } + + if (!rxq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); + return; + } + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) { + struct rte_mbuf *mbuf; + + mbuf = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mbuf); + } + rxq->rx_nb_avail = 0; +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ +} + +void +i40e_reset_rx_queue(struct i40e_rx_queue *rxq) +{ + unsigned i; + uint16_t len; + + if (!rxq) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0) + len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST); + else +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + len = rxq->nb_rx_desc; + + for (i = 0; i < len * sizeof(union i40e_rx_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i) + rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +} + +void +i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) +{ + struct rte_eth_dev *dev; + uint16_t i; + + if (!txq || !txq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL"); + return; + } + + dev = &rte_eth_devices[txq->port_id]; + + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 || + dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } else { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static int +i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + struct i40e_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && i40e_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (i40e_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = i40e_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +static int +i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} +int +i40e_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq; + struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) + return i40e_tx_done_cleanup_vec(q, free_cnt); + else + return i40e_tx_done_cleanup_simple(q, free_cnt); + } else { + return i40e_tx_done_cleanup_full(q, free_cnt); + } +} + +void +i40e_reset_tx_queue(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txe; + uint16_t i, prev, size; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile struct i40e_tx_desc *txd = &txq->tx_ring[i]; + + txd->cmd_type_offset_bsz = + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); +} + +/* Init the TX queue in hardware */ +int +i40e_tx_queue_init(struct i40e_tx_queue *txq) +{ + enum i40e_status_code err = I40E_SUCCESS; + struct i40e_vsi *vsi = txq->vsi; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t pf_q = txq->reg_idx; + struct i40e_hmc_obj_txq tx_ctx; + uint32_t qtx_ctl; + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + tx_ctx.new_context = 1; + tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->nb_tx_desc; + +#ifdef RTE_LIBRTE_IEEE1588 + tx_ctx.timesync_ena = 1; +#endif + tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]); + if (vsi->type == I40E_VSI_FDIR) + tx_ctx.fd_ena = TRUE; + + err = i40e_clear_lan_tx_queue_context(hw, pf_q); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context"); + return err; + } + + err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failure of set lan tx queue context"); + return err; + } + + /* Now associate this queue with this PCI function */ + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); + I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl); + I40E_WRITE_FLUSH(hw); + + txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); + + return err; +} + +int +i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) +{ + struct i40e_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union i40e_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); + + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */ + + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/* + * Calculate the buffer length, and check the jumbo frame + * and maximum packet length. + */ +static int +i40e_rx_queue_config(struct i40e_rx_queue *rxq) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct rte_eth_dev_data *data = pf->dev_data; + uint16_t buf_size; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + + switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED | + I40E_FLAG_HEADER_SPLIT_ENABLED)) { + case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */ + rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024, + (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + rxq->hs_mode = i40e_header_split_enabled; + break; + case I40E_FLAG_HEADER_SPLIT_DISABLED: + default: + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + rxq->hs_mode = i40e_header_split_none; + break; + } + + rxq->max_pkt_len = + RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len * + rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len); + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u," + "as jumbo frame is enabled", + (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return I40E_ERR_CONFIG; + } + } + + return 0; +} + +/* Init the RX queue in hardware */ +int +i40e_rx_queue_init(struct i40e_rx_queue *rxq) +{ + int err = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi); + uint16_t pf_q = rxq->reg_idx; + uint16_t buf_size; + struct i40e_hmc_obj_rxq rx_ctx; + + err = i40e_rx_queue_config(rxq); + if (err < 0) { + PMD_DRV_LOG(ERR, "Failed to config RX queue"); + return err; + } + + /* Clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; + + rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + rx_ctx.dtype = rxq->hs_mode; + if (rxq->hs_mode) + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL; + else + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.rxmax = rxq->max_pkt_len; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; + rx_ctx.l2tsel = 1; + /* showiv indicates if inner VLAN is stripped inside of tunnel + * packet. When set it to 1, vlan information is stripped from + * the inner header, but the hardware does not put it in the + * descriptor. So set it zero by default. + */ + rx_ctx.showiv = 0; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, pf_q); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context"); + return err; + } + err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context"); + return err; + } + + rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + + /* Check if scattered RX needs to be used. */ + if (rxq->max_pkt_len > buf_size) + dev_data->scattered_rx = 1; + + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return 0; +} + +void +i40e_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (!dev->data->tx_queues[i]) + continue; + i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]); + i40e_reset_tx_queue(dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!dev->data->rx_queues[i]) + continue; + i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]); + i40e_reset_rx_queue(dev->data->rx_queues[i]); + } +} + +void +i40e_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!dev->data->rx_queues[i]) + continue; + i40e_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (!dev->data->tx_queues[i]) + continue; + i40e_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } +} + +#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC +#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC + +enum i40e_status_code +i40e_fdir_setup_tx_resources(struct i40e_pf *pf) +{ + struct i40e_tx_queue *txq; + const struct rte_memzone *tz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("i40e fdir tx queue", + sizeof(struct i40e_tx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure."); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!tz) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); + return I40E_ERR_NO_MEMORY; + } + + txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC; + txq->queue_id = I40E_FDIR_QUEUE_ID; + txq->reg_idx = pf->fdir.fdir_vsi->base_queue; + txq->vsi = pf->fdir.fdir_vsi; + + txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring = (struct i40e_tx_desc *)tz->addr; + /* + * don't need to allocate software ring and reset for the fdir + * program queue just set the queue has been configured. + */ + txq->q_set = TRUE; + pf->fdir.txq = txq; + + return I40E_SUCCESS; +} + +enum i40e_status_code +i40e_fdir_setup_rx_resources(struct i40e_pf *pf) +{ + struct i40e_rx_queue *rxq; + const struct rte_memzone *rz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the RX queue data structure. */ + rxq = rte_zmalloc_socket("i40e fdir rx queue", + sizeof(struct i40e_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue structure."); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate RX hardware ring descriptors. */ + ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!rz) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); + return I40E_ERR_NO_MEMORY; + } + + rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC; + rxq->queue_id = I40E_FDIR_QUEUE_ID; + rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; + rxq->vsi = pf->fdir.fdir_vsi; + + rxq->rx_ring_phys_addr = rz->iova; + memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc)); + rxq->rx_ring = (union i40e_rx_desc *)rz->addr; + + /* + * Don't need to allocate software ring and reset for the fdir + * rx queue, just set the queue has been configured. + */ + rxq->q_set = TRUE; + pf->fdir.rxq = rxq; + + return I40E_SUCCESS; +} + +void +i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct i40e_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; +} + +void +i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct i40e_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; + qinfo->conf.offloads = txq->offloads; +} + +static eth_rx_burst_t +i40e_get_latest_rx_vec(bool scatter) +{ +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + return scatter ? i40e_recv_scattered_pkts_vec_avx2 : + i40e_recv_pkts_vec_avx2; +#endif + return scatter ? i40e_recv_scattered_pkts_vec : + i40e_recv_pkts_vec; +} + +static eth_rx_burst_t +i40e_get_recommend_rx_vec(bool scatter) +{ +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) + /* + * since AVX frequency can be different to base frequency, limit + * use of AVX2 version to later plaforms, not all those that could + * theoretically run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + return scatter ? i40e_recv_scattered_pkts_vec_avx2 : + i40e_recv_pkts_vec_avx2; +#endif + return scatter ? i40e_recv_scattered_pkts_vec : + i40e_recv_pkts_vec; +} + +void __rte_cold +i40e_set_rx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t rx_using_sse, i; + /* In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (i40e_rx_vec_dev_conf_condition_check(dev) || + !ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" + " Vector Rx preconditions", + dev->data->port_id); + + ad->rx_vec_allowed = false; + } + if (ad->rx_vec_allowed) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = + dev->data->rx_queues[i]; + + if (rxq && i40e_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + } + } + + if (ad->rx_vec_allowed) { + /* Vec Rx path */ + PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.", + dev->data->port_id); + if (ad->use_latest_vec) + dev->rx_pkt_burst = + i40e_get_latest_rx_vec(dev->data->scattered_rx); + else + dev->rx_pkt_burst = + i40e_get_recommend_rx_vec(dev->data->scattered_rx); + } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + } else { + /* Simple Rx Path. */ + PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.", + dev->data->port_id); + dev->rx_pkt_burst = dev->data->scattered_rx ? + i40e_recv_scattered_pkts : + i40e_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rx_using_sse = + (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + + if (rxq) + rxq->rx_using_sse = rx_using_sse; + } + } +} + +static const struct { + eth_rx_burst_t pkt_burst; + const char *info; +} i40e_rx_burst_infos[] = { + { i40e_recv_scattered_pkts, "Scalar Scattered" }, + { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, + { i40e_recv_pkts, "Scalar" }, +#ifdef RTE_ARCH_X86 + { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, + { i40e_recv_pkts_vec_avx2, "Vector AVX2" }, + { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" }, + { i40e_recv_pkts_vec, "Vector SSE" }, +#elif defined(RTE_ARCH_ARM64) + { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" }, + { i40e_recv_pkts_vec, "Vector Neon" }, +#elif defined(RTE_ARCH_PPC_64) + { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" }, + { i40e_recv_pkts_vec, "Vector AltiVec" }, +#endif +}; + +int +i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(i40e_rx_burst_infos); ++i) { + if (pkt_burst == i40e_rx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + i40e_rx_burst_infos[i].info); + ret = 0; + break; + } + } + + return ret; +} + +void __rte_cold +i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST); + ad->tx_vec_allowed = (ad->tx_simple_allowed && + txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ); + + if (ad->tx_vec_allowed) + PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.", + txq->queue_id); + else if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Neither simple nor vector Tx enabled on Tx queue %u\n", + txq->queue_id); +} + +static eth_tx_burst_t +i40e_get_latest_tx_vec(void) +{ +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + return i40e_xmit_pkts_vec_avx2; +#endif + return i40e_xmit_pkts_vec; +} + +static eth_tx_burst_t +i40e_get_recommend_tx_vec(void) +{ +#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) + /* + * since AVX frequency can be different to base frequency, limit + * use of AVX2 version to later plaforms, not all those that could + * theoretically run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + return i40e_xmit_pkts_vec_avx2; +#endif + return i40e_xmit_pkts_vec; +} + +void __rte_cold +i40e_set_tx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (ad->tx_vec_allowed) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct i40e_tx_queue *txq = + dev->data->tx_queues[i]; + + if (txq && i40e_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + } + } + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); + if (ad->use_latest_vec) + dev->tx_pkt_burst = + i40e_get_latest_tx_vec(); + else + dev->tx_pkt_burst = + i40e_get_recommend_tx_vec(); + } else { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_simple; + } + dev->tx_pkt_prepare = NULL; + } else { + PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts; + dev->tx_pkt_prepare = i40e_prep_pkts; + } +} + +static const struct { + eth_tx_burst_t pkt_burst; + const char *info; +} i40e_tx_burst_infos[] = { + { i40e_xmit_pkts_simple, "Scalar Simple" }, + { i40e_xmit_pkts, "Scalar" }, +#ifdef RTE_ARCH_X86 + { i40e_xmit_pkts_vec_avx2, "Vector AVX2" }, + { i40e_xmit_pkts_vec, "Vector SSE" }, +#elif defined(RTE_ARCH_ARM64) + { i40e_xmit_pkts_vec, "Vector Neon" }, +#elif defined(RTE_ARCH_PPC_64) + { i40e_xmit_pkts_vec, "Vector AltiVec" }, +#endif +}; + +int +i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(i40e_tx_burst_infos); ++i) { + if (pkt_burst == i40e_tx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + i40e_tx_burst_infos[i].info); + ret = 0; + break; + } + } + + return ret; +} + +void __rte_cold +i40e_set_default_ptype_table(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + for (i = 0; i < I40E_MAX_PKT_TYPE; i++) + ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); +} + +void __rte_cold +i40e_set_default_pctype_table(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_tbl[i] = 0ULL; + ad->flow_types_mask = 0ULL; + ad->pctypes_mask = 0ULL; + + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] = + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = + (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); + + if (hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) { + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |= + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + } + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) { + if (ad->pctypes_tbl[i]) + ad->flow_types_mask |= (1ULL << i); + ad->pctypes_mask |= ad->pctypes_tbl[i]; + } +} + +#ifndef RTE_LIBRTE_I40E_INC_VECTOR +/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */ +int +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t +i40e_recv_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +i40e_recv_scattered_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) +{ + return -1; +} + +int +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return -1; +} + +void +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) +{ + return; +} + +uint16_t +i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */ + +#ifndef CC_AVX2_SUPPORT +uint16_t +i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +#endif /* ifndef CC_AVX2_SUPPORT */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h new file mode 100644 index 000000000..8f11f011a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx.h @@ -0,0 +1,812 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _I40E_RXTX_H_ +#define _I40E_RXTX_H_ + +#define RTE_PMD_I40E_RX_MAX_BURST 32 +#define RTE_PMD_I40E_TX_MAX_BURST 32 + +#define RTE_I40E_VPMD_RX_BURST 32 +#define RTE_I40E_VPMD_TX_BURST 32 +#define RTE_I40E_RXQ_REARM_THRESH 32 +#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH +#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64 +#define RTE_I40E_DESCS_PER_LOOP 4 + +#define I40E_RXBUF_SZ_1024 1024 +#define I40E_RXBUF_SZ_2048 2048 + +/* In none-PXE mode QLEN must be whole number of 32 descriptors. */ +#define I40E_ALIGN_RING_DESC 32 + +#define I40E_MIN_RING_DESC 64 +#define I40E_MAX_RING_DESC 4096 + +#define I40E_MIN_TSO_MSS 256 +#define I40E_MAX_TSO_MSS 9674 + +#define I40E_TX_MAX_SEG UINT8_MAX +#define I40E_TX_MAX_MTU_SEG 8 + +#define I40E_TX_MIN_PKT_LEN 17 + +/* Shared FDIR masks between scalar / vector drivers */ +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 +#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03 +#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01 + +#undef container_of +#define container_of(ptr, type, member) ({ \ + typeof(((type *)0)->member)(*__mptr) = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) + +#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\ + I40E_TX_DESC_CMD_EOP) + +enum i40e_header_split_mode { + i40e_header_split_none = 0, + i40e_header_split_enabled = 1, + i40e_header_split_always = 2, + i40e_header_split_reserved +}; + +#define I40E_HEADER_SPLIT_NONE ((uint8_t)0) +#define I40E_HEADER_SPLIT_L2 ((uint8_t)(1 << 0)) +#define I40E_HEADER_SPLIT_IP ((uint8_t)(1 << 1)) +#define I40E_HEADER_SPLIT_UDP_TCP ((uint8_t)(1 << 2)) +#define I40E_HEADER_SPLIT_SCTP ((uint8_t)(1 << 3)) +#define I40E_HEADER_SPLIT_ALL (I40E_HEADER_SPLIT_L2 | \ + I40E_HEADER_SPLIT_IP | \ + I40E_HEADER_SPLIT_UDP_TCP | \ + I40E_HEADER_SPLIT_SCTP) + +/* HW desc structure, both 16-byte and 32-byte types are supported */ +#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC +#define i40e_rx_desc i40e_16byte_rx_desc +#else +#define i40e_rx_desc i40e_32byte_rx_desc +#endif + +struct i40e_rx_entry { + struct rte_mbuf *mbuf; +}; + +/* + * Structure associated with each RX queue. + */ +struct i40e_rx_queue { + struct rte_mempool *mp; /**< mbuf pool to populate RX ring */ + volatile union i40e_rx_desc *rx_ring;/**< RX ring virtual address */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address */ + struct i40e_rx_entry *sw_ring; /**< address of RX soft ring */ + uint16_t nb_rx_desc; /**< number of RX descriptors */ + uint16_t rx_free_thresh; /**< max free RX desc to hold */ + uint16_t rx_tail; /**< current value of tail */ + uint16_t nb_rx_hold; /**< number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */ + struct rte_mbuf fake_mbuf; /**< dummy mbuf */ +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + uint16_t rx_nb_avail; /**< number of staged packets ready */ + uint16_t rx_next_avail; /**< index of next staged packets */ + uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ + struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2]; +#endif + + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + + uint16_t port_id; /**< device port ID */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /**< 0 if FDIR disabled, 1 when enabled */ + uint16_t queue_id; /**< RX queue index */ + uint16_t reg_idx; /**< RX queue register index */ + uint8_t drop_en; /**< if not 0, set register bit */ + volatile uint8_t *qrx_tail; /**< register address of tail */ + struct i40e_vsi *vsi; /**< the VSI this queue belongs to */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + uint8_t hs_mode; /* Header Split mode */ + bool q_set; /**< indicate if rx queue has been configured */ + bool rx_deferred_start; /**< don't start this queue in dev start */ + uint16_t rx_using_sse; /** IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [77] reserved */ + [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [84] reserved */ + [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [143] reserved */ + [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [150] reserved */ + [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* L2 NSH packet type */ + [154] = RTE_PTYPE_L2_ETHER_NSH, + [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* All others reserved */ + }; + + return type_table[ptype]; +} + +#endif /* _I40E_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c new file mode 100644 index 000000000..6862a017e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -0,0 +1,616 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010 - 2015 Intel Corporation + * Copyright(c) 2017 IBM Corporation. + */ + +#include +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include + +#pragma GCC diagnostic ignored "-Wcast-qual" + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + + vector unsigned long hdr_room = (vector unsigned long){ + RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM}; + vector unsigned long dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = (vector unsigned long){}; + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + vec_st(dma_addr0, 0, + (vector unsigned long *)&rxdp[i].read); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + vector unsigned long vaddr0, vaddr1; + uintptr_t p0, p1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + p0 = (uintptr_t)&mb0->rearm_data; + *(uint64_t *)p0 = rxq->mbuf_initializer; + p1 = (uintptr_t)&mb1->rearm_data; + *(uint64_t *)p1 = rxq->mbuf_initializer; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr); + vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = vec_mergel(vaddr0, vaddr0); + dma_addr1 = vec_mergel(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = vec_add(dma_addr0, hdr_room); + dma_addr1 = vec_add(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read); + vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read); + } + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline void +desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) +{ + vector unsigned int vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const vector unsigned int rss_vlan_msk = (vector unsigned int){ + (int32_t)0x1c03804, (int32_t)0x1c03804, + (int32_t)0x1c03804, (int32_t)0x1c03804}; + + /* map rss and vlan type to rss hash and vlan flag */ + const vector unsigned char vlan_flags = (vector unsigned char){ + 0, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const vector unsigned char rss_flags = (vector unsigned char){ + 0, PKT_RX_FDIR, 0, 0, + 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const vector unsigned char l3_l4e_flags = (vector unsigned char){ + 0, + PKT_RX_IP_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + | PKT_RX_IP_CKSUM_BAD, + 0, 0, 0, 0, 0, 0, 0, 0}; + + vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]); + vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]); + vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1); + + vlan1 = vec_and(vlan0, rss_vlan_msk); + vlan0 = (vector unsigned int)vec_perm(vlan_flags, + (vector unsigned char){}, + *(vector unsigned char *)&vlan1); + + rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11}); + rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){}, + *(vector unsigned char *)&rss); + + l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22}); + l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags, + (vector unsigned char){}, + *(vector unsigned char *)&l3_l4e); + + vlan0 = vec_or(vlan0, rss); + vlan0 = vec_or(vlan0, l3_l4e); + + rx_pkts[0]->ol_flags = (uint64_t)vlan0[2]; + rx_pkts[1]->ol_flags = (uint64_t)vlan0[3]; + rx_pkts[2]->ol_flags = (uint64_t)vlan0[0]; + rx_pkts[3]->ol_flags = (uint64_t)vlan0[1]; +} + +#define PKTLEN_SHIFT 10 + +static inline void +desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) +{ + vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]); + vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]); + + ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30}); + ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30}); + + rx_pkts[0]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype0)[0]]; + rx_pkts[1]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype0)[8]]; + rx_pkts[2]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype1)[0]]; + rx_pkts[3]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype1)[8]]; +} + + /* Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + vector unsigned char shuf_msk; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + vector unsigned short crc_adjust = (vector unsigned short){ + 0, 0, /* ignore pkt_type field */ + rxq->crc_len, /* sub crc on pkt_len */ + 0, /* ignore high-16bits of pkt_len */ + rxq->crc_len, /* sub crc on data_len */ + 0, 0, 0 /* ignore non-length fields */ + }; + vector unsigned long dd_check, eop_check; + + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* 4 packets DD mask */ + dd_check = (vector unsigned long){0x0000000100000001ULL, + 0x0000000100000001ULL}; + + /* 4 packets EOP mask */ + eop_check = (vector unsigned long){0x0000000200000002ULL, + 0x0000000200000002ULL}; + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = (vector unsigned char){ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 14, 15, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 14, 15, /* octet 15~14, 16 bits data_len */ + 2, 3, /* octet 2~3, low 16 bits vlan_macip */ + 4, 5, 6, 7 /* octet 4~7, 32bits rss */ + }; + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_I40E_DESCS_PER_LOOP, + rxdp += RTE_I40E_DESCS_PER_LOOP) { + vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP]; + vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + vector unsigned short staterr, sterr_tmp1, sterr_tmp2; + vector unsigned long mbp1, mbp2; /* two mbuf pointer + * in one XMM reg. + */ + + /* B.1 load 1 mbuf point */ + mbp1 = *(vector unsigned long *)&sw_ring[pos]; + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = *(vector unsigned long *)(rxdp + 3); + rte_compiler_barrier(); + + /* B.2 copy 2 mbuf point into rx_pkts */ + *(vector unsigned long *)&rx_pkts[pos] = mbp1; + + /* B.1 load 1 mbuf point */ + mbp2 = *(vector unsigned long *)&sw_ring[pos + 2]; + + descs[2] = *(vector unsigned long *)(rxdp + 2); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = *(vector unsigned long *)(rxdp + 1); + rte_compiler_barrier(); + descs[0] = *(vector unsigned long *)(rxdp); + + /* B.2 copy 2 mbuf point into rx_pkts */ + *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2; + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + const vector unsigned int len3 = vec_sl( + vec_ld(0, (vector unsigned int *)&descs[3]), + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); + + const vector unsigned int len2 = vec_sl( + vec_ld(0, (vector unsigned int *)&descs[2]), + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); + + /* merge the now-aligned packet length fields back in */ + descs[3] = (vector unsigned long)len3; + descs[2] = (vector unsigned long)len2; + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = vec_perm((vector unsigned char)descs[3], + (vector unsigned char){}, shuf_msk); + pkt_mb3 = vec_perm((vector unsigned char)descs[2], + (vector unsigned char){}, shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = vec_mergel((vector unsigned short)descs[3], + (vector unsigned short)descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = vec_mergel((vector unsigned short)descs[1], + (vector unsigned short)descs[0]); + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = (vector unsigned char)vec_sub( + (vector unsigned short)pkt_mb4, crc_adjust); + pkt_mb3 = (vector unsigned char)vec_sub( + (vector unsigned short)pkt_mb3, crc_adjust); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + const vector unsigned int len1 = vec_sl( + vec_ld(0, (vector unsigned int *)&descs[1]), + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); + const vector unsigned int len0 = vec_sl( + vec_ld(0, (vector unsigned int *)&descs[0]), + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); + + /* merge the now-aligned packet length fields back in */ + descs[1] = (vector unsigned long)len1; + descs[0] = (vector unsigned long)len0; + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = vec_perm((vector unsigned char)descs[1], + (vector unsigned char){}, shuf_msk); + pkt_mb1 = vec_perm((vector unsigned char)descs[0], + (vector unsigned char){}, shuf_msk); + + /* C.2 get 4 pkts staterr value */ + staterr = (vector unsigned short)vec_mergeh( + sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + vec_st(pkt_mb4, 0, + (vector unsigned char *)&rx_pkts[pos + 3] + ->rx_descriptor_fields1 + ); + vec_st(pkt_mb3, 0, + (vector unsigned char *)&rx_pkts[pos + 2] + ->rx_descriptor_fields1 + ); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = (vector unsigned char)vec_sub( + (vector unsigned short)pkt_mb2, crc_adjust); + pkt_mb1 = (vector unsigned char)vec_sub( + (vector unsigned short)pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + vector unsigned char eop_shuf_mask = + (vector unsigned char){ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + }; + + /* and with mask to extract bits, flipping 1-0 */ + vector unsigned char eop_bits = vec_and( + (vector unsigned char)vec_nor(staterr, staterr), + (vector unsigned char)eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = vec_perm(eop_bits, (vector unsigned char){}, + eop_shuf_mask); + /* store the resulting 32-bit value */ + *split_packet = (vec_ld(0, + (vector unsigned int *)&eop_bits))[0]; + split_packet += RTE_I40E_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + /* C.3 calc available number of desc */ + staterr = vec_and(staterr, (vector unsigned short)dd_check); + + /* D.3 copy final 1,2 data to rx_pkts */ + vec_st(pkt_mb2, 0, + (vector unsigned char *)&rx_pkts[pos + 1] + ->rx_descriptor_fields1 + ); + vec_st(pkt_mb1, 0, + (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1 + ); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + desc_to_olflags_v(descs, &rx_pkts[pos]); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll((vec_ld(0, + (vector unsigned long *)&staterr)[0])); + nb_pkts_recd += var; + if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + + /* Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + + /* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + vector unsigned long descriptor = (vector unsigned long){ + pkt->buf_iova + pkt->data_off, high_qw}; + *(vector unsigned long *)txdp = descriptor; +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + nb_commit = nb_pkts; + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +void __rte_cold +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + _i40e_rx_queue_release_mbufs_vec(rxq); +} + +int __rte_cold +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) +{ + return i40e_rxq_vec_setup_default(rxq); +} + +int __rte_cold +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq) +{ + return 0; +} + +int __rte_cold +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return i40e_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c new file mode 100644 index 000000000..3bcef1363 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -0,0 +1,949 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + struct rte_mbuf *mb0, *mb1; + __m128i dma_addr0, dma_addr1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } +#else + struct rte_mbuf *mb0, *mb1, *mb2, *mb3; + __m256i dma_addr0_1, dma_addr2_3; + __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 4 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; + i += 4, rxep += 4, rxdp += 4) { + __m128i vaddr0, vaddr1, vaddr2, vaddr3; + __m256i vaddr0_1, vaddr2_3; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + mb2 = rxep[2].mbuf; + mb3 = rxep[3].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); + vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); + + /* + * merge 0 & 1, by casting 0 to 256-bit and inserting 1 + * into the high lanes. Similarly for 2 & 3 + */ + vaddr0_1 = _mm256_inserti128_si256( + _mm256_castsi128_si256(vaddr0), vaddr1, 1); + vaddr2_3 = _mm256_inserti128_si256( + _mm256_castsi128_si256(vaddr2), vaddr3, 1); + + /* convert pa to dma_addr hdr/data */ + dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); + dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); + + /* add headroom to pa values */ + dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); + dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); + + /* flush desc with pa dma_addr */ + _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); + _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); + } + +#endif + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC +/* Handles 32B descriptor FDIR ID processing: + * rxdp: receive descriptor ring, required to load 2nd 16B half of each desc + * rx_pkts: required to store metadata back to mbufs + * pkt_idx: offset into the burst, increments in vector widths + * desc_idx: required to select the correct shift at compile time + */ +static inline __m256i +desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp, + struct rte_mbuf **rx_pkts, + const uint32_t pkt_idx, + const uint32_t desc_idx) +{ + /* 32B desc path: load rxdp.wb.qword2 for EXT_STATUS and FLEXBH_STAT */ + __m128i *rxdp_desc_0 = (void *)(&rxdp[desc_idx + 0].wb.qword2); + __m128i *rxdp_desc_1 = (void *)(&rxdp[desc_idx + 1].wb.qword2); + const __m128i desc_qw2_0 = _mm_load_si128(rxdp_desc_0); + const __m128i desc_qw2_1 = _mm_load_si128(rxdp_desc_1); + + /* Mask for FLEXBH_STAT, and the FDIR_ID value to compare against. The + * remaining data is set to all 1's to pass through data. + */ + const __m256i flexbh_mask = _mm256_set_epi32(-1, -1, -1, 3 << 4, + -1, -1, -1, 3 << 4); + const __m256i flexbh_id = _mm256_set_epi32(-1, -1, -1, 1 << 4, + -1, -1, -1, 1 << 4); + + /* Load descriptor, check for FLEXBH bits, generate a mask for both + * packets in the register. + */ + __m256i desc_qw2_0_1 = + _mm256_inserti128_si256(_mm256_castsi128_si256(desc_qw2_0), + desc_qw2_1, 1); + __m256i desc_tmp_msk = _mm256_and_si256(flexbh_mask, desc_qw2_0_1); + __m256i fdir_mask = _mm256_cmpeq_epi32(flexbh_id, desc_tmp_msk); + __m256i fdir_data = _mm256_alignr_epi8(desc_qw2_0_1, desc_qw2_0_1, 12); + __m256i desc_fdir_data = _mm256_and_si256(fdir_mask, fdir_data); + + /* Write data out to the mbuf. There is no store to this area of the + * mbuf today, so we cannot combine it with another store. + */ + const uint32_t idx_0 = pkt_idx + desc_idx; + const uint32_t idx_1 = pkt_idx + desc_idx + 1; + rx_pkts[idx_0]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 0); + rx_pkts[idx_1]->hash.fdir.hi = _mm256_extract_epi32(desc_fdir_data, 4); + + /* Create mbuf flags as required for mbuf_flags layout + * (That's high lane [1,3,5,7, 0,2,4,6] as u32 lanes). + * Approach: + * - Mask away bits not required from the fdir_mask + * - Leave the PKT_FDIR_ID bit (1 << 13) + * - Position that bit correctly based on packet number + * - OR in the resulting bit to mbuf_flags + */ + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + __m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13, + 0, 0, 0, 1 << 13); + __m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask); + + /* For static-inline function, this will be stripped out + * as the desc_idx is a hard-coded constant. + */ + switch (desc_idx) { + case 0: + return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 4); + case 2: + return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 8); + case 4: + return _mm256_alignr_epi8(desc_flag_bit, desc_flag_bit, 12); + case 6: + return desc_flag_bit; + default: + break; + } + + /* NOT REACHED, see above switch returns */ + return _mm256_setzero_si256(); +} +#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */ + +#define PKTLEN_SHIFT 10 + +/* Force inline as some compilers will not inline by default. */ +static __rte_always_inline uint16_t +_recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ +#define RTE_I40E_DESCS_PER_LOOP_AVX 8 + + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + const int avx_aligned = ((rxq->rx_tail & 1) == 0); + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* constants used in processing loop */ + const __m256i crc_adjust = _mm256_set_epi16( + /* first descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0, /* ignore pkt_type field */ + /* second descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + I40E_RX_DESC_STATUS_EOF_SHIFT); + + /* mask to shuffle from desc. to mbuf (2 descriptors)*/ + const __m256i shuf_msk = _mm256_set_epi8( + /* first descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /*pkt_type set as unknown */ + /* second descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /* + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /* + * mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. Bits 3-5 of error + * field (bits 22-24) are for IP/L4 checksum errors + */ + const __m256i flags_mask = _mm256_set1_epi32( + (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22)); + /* + * data to be shuffled by result of flag mask. If VLAN bit is set, + * (bit 2), then position 4 in this array will be used in the + * destination + */ + const __m256i vlan_flags_shuf = _mm256_set_epi32( + 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, + 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0); + /* + * data to be shuffled by result of flag mask, shifted down 11. + * If RSS/FDIR bits are set, shuffle moves appropriate flags in + * place. + */ + const __m256i rss_flags_shuf = _mm256_set_epi8( + 0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + + /* + * data to be shuffled by the result of the flags mask shifted by 22 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + const __m256i cksum_mask = _mm256_set1_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */ + + uint16_t i, received; + for (i = 0, received = 0; i < nb_pkts; + i += RTE_I40E_DESCS_PER_LOOP_AVX, + rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC + /* for AVX we need alignment otherwise loads are not atomic */ + if (avx_aligned) { + /* load in descriptors, 2 at a time, in reverse order */ + raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); + rte_compiler_barrier(); + raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); + rte_compiler_barrier(); + raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); + rte_compiler_barrier(); + raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); + } else +#endif + do { + const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); + raw_desc4_5 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); + raw_desc2_3 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); + raw_desc0_1 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + } while (0); + + if (split_packet) { + int j; + for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /* + * convert descriptors 4-7 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT); + const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT); + const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80); + const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80); + __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk); + __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk); + mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust); + mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust); + /* + * to get packet types, shift 64-bit values down 30 bits + * and so ptype is in lower 8-bits in each + */ + const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30); + const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30); + const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24); + const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8); + const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24); + const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8); + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4); + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0); + /* merge the status bits into one register */ + const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7, + desc4_5); + + /* + * convert descriptors 0-3 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT); + const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT); + const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80); + const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80); + __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk); + __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk); + mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust); + mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust); + /* get the packet types */ + const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30); + const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30); + const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24); + const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8); + const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24); + const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8); + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4); + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0); + /* merge the status bits into one register */ + const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3, + desc0_1); + + /* + * take the two sets of status bits and merge to one + * After merge, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, + status0_3); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = _mm256_and_si256( + status0_7, flags_mask); + /* set vlan and rss flags */ + const __m256i vlan_flags = _mm256_shuffle_epi8( + vlan_flags_shuf, flag_bits); + const __m256i rss_fdir_bits = _mm256_srli_epi32(flag_bits, 11); + const __m256i rss_flags = _mm256_shuffle_epi8(rss_flags_shuf, + rss_fdir_bits); + + /* + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 22)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + + /* merge flags */ + __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + _mm256_or_si256(rss_flags, vlan_flags)); + + /* If the rxq has FDIR enabled, read and process the FDIR info + * from the descriptor. This can cause more loads/stores, so is + * not always performed. Branch over the code when not enabled. + */ + if (rxq->fdir_enabled) { +#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC + /* 16B descriptor code path: + * RSS and FDIR ID use the same offset in the desc, so + * only one can be present at a time. The code below + * identifies an FDIR ID match, and zeros the RSS value + * in the mbuf on FDIR match to keep mbuf data clean. + */ +#define FDIR_BLEND_MASK ((1 << 3) | (1 << 7)) + + /* Flags: + * - Take flags, shift bits to null out + * - CMPEQ with known FDIR ID, to get 0xFFFF or 0 mask + * - Strip bits from mask, leaving 0 or 1 for FDIR ID + * - Merge with mbuf_flags + */ + /* FLM = 1, FLTSTAT = 0b01, (FLM | FLTSTAT) == 3. + * Shift left by 28 to avoid having to mask. + */ + const __m256i fdir = _mm256_slli_epi32(rss_fdir_bits, 28); + const __m256i fdir_id = _mm256_set1_epi32(3 << 28); + + /* As above, the fdir_mask to packet mapping is this: + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + * Then OR FDIR flags to mbuf_flags on FDIR ID hit. + */ + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13); + const __m256i fdir_mask = _mm256_cmpeq_epi32(fdir, fdir_id); + __m256i fdir_bits = _mm256_and_si256(fdir_mask, pkt_fdir_bit); + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_bits); + + /* Based on FDIR_MASK, clear the RSS or FDIR value. + * The FDIR ID value is masked to zero if not a hit, + * otherwise the mb0_1 register RSS field is zeroed. + */ + const __m256i fdir_zero_mask = _mm256_setzero_si256(); + __m256i tmp0_1 = _mm256_blend_epi32(fdir_zero_mask, + fdir_mask, FDIR_BLEND_MASK); + __m256i fdir_mb0_1 = _mm256_and_si256(mb0_1, fdir_mask); + mb0_1 = _mm256_andnot_si256(tmp0_1, mb0_1); + + /* Write to mbuf: no stores to combine with, so just a + * scalar store to push data here. + */ + rx_pkts[i + 0]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb0_1, 3); + rx_pkts[i + 1]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb0_1, 7); + + /* Same as above, only shift the fdir_mask to align + * the packet FDIR mask with the FDIR_ID desc lane. + */ + __m256i tmp2_3 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 12); + __m256i fdir_mb2_3 = _mm256_and_si256(mb2_3, tmp2_3); + tmp2_3 = _mm256_blend_epi32(fdir_zero_mask, tmp2_3, + FDIR_BLEND_MASK); + mb2_3 = _mm256_andnot_si256(tmp2_3, mb2_3); + rx_pkts[i + 2]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb2_3, 3); + rx_pkts[i + 3]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb2_3, 7); + + __m256i tmp4_5 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 8); + __m256i fdir_mb4_5 = _mm256_and_si256(mb4_5, tmp4_5); + tmp4_5 = _mm256_blend_epi32(fdir_zero_mask, tmp4_5, + FDIR_BLEND_MASK); + mb4_5 = _mm256_andnot_si256(tmp4_5, mb4_5); + rx_pkts[i + 4]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb4_5, 3); + rx_pkts[i + 5]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb4_5, 7); + + __m256i tmp6_7 = _mm256_alignr_epi8(fdir_mask, fdir_mask, 4); + __m256i fdir_mb6_7 = _mm256_and_si256(mb6_7, tmp6_7); + tmp6_7 = _mm256_blend_epi32(fdir_zero_mask, tmp6_7, + FDIR_BLEND_MASK); + mb6_7 = _mm256_andnot_si256(tmp6_7, mb6_7); + rx_pkts[i + 6]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb6_7, 3); + rx_pkts[i + 7]->hash.fdir.hi = _mm256_extract_epi32(fdir_mb6_7, 7); + + /* End of 16B descriptor handling */ +#else + /* 32B descriptor FDIR ID mark handling. Returns bits + * to be OR-ed into the mbuf olflags. + */ + __m256i fdir_add_flags; + fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 0); + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags); + + fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 2); + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags); + + fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 4); + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags); + + fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 6); + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags); + /* End 32B desc handling */ +#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */ + + } /* if() on FDIR enabled */ + + /* + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04); + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = _mm256_castsi128_si256( + _mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04); + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = _mm_set1_epi16( + 1 << I40E_RX_DESC_STATUS_EOF_SHIFT); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = _mm_packus_epi32( + _mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, 1)); + /* + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /* + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 8, 0, 10, 2, /* move values to lo 64b */ + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits); + split_packet += RTE_I40E_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64( + _mm256_extracti128_si256(status0_7, 1))); + burst += __builtin_popcountll(_mm_cvtsi128_si64( + _mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != RTE_I40E_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +uint16_t +i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/* + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +uint16_t +i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + while (nb_pkts > RTE_I40E_VPMD_RX_BURST) { + uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, RTE_I40E_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < RTE_I40E_VPMD_RX_BURST) + return retval; + } + return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, nb_pkts); +} + + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw2 = hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw1 = hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw0 = hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + + __m256i desc2_3 = _mm256_set_epi64x( + hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = _mm256_set_epi64x( + hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h new file mode 100644 index 000000000..31f73f605 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _I40E_RXTX_VEC_COMMON_H_ +#define _I40E_RXTX_VEC_COMMON_H_ +#include +#include +#include + +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +static inline uint16_t +reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->vlan_tci = end->vlan_tci; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) + end->data_len -= rxq->crc_len; + else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static __rte_always_inline int +i40e_tx_free_bufs(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static __rte_always_inline void +tx_backlog_entry(struct i40e_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline int +i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + struct i40e_rx_queue *rxq; + uint16_t desc, i; + bool first_queue; + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* no header split support */ + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) + return -1; + + /* no QinQ support */ + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + return -1; + + /** + * Vector mode is allowed only when number of Rx queue + * descriptor is power of 2. + */ + if (!dev->data->dev_started) { + first_queue = true; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + desc = rxq->nb_rx_desc; + if (first_queue) + ad->rx_vec_allowed = + rte_is_power_of_2(desc); + else + ad->rx_vec_allowed = + ad->rx_vec_allowed ? + rte_is_power_of_2(desc) : + ad->rx_vec_allowed; + first_queue = false; + } + } else { + /* Only check the first queue's descriptor number */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + desc = rxq->nb_rx_desc; + ad->rx_vec_allowed = rte_is_power_of_2(desc); + break; + } + } + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} +#endif diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c new file mode 100644 index 000000000..1dfd0478b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation. + * Copyright(c) 2016-2018, Linaro Limited. + */ + +#include +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include + +#pragma GCC diagnostic ignored "-Wcast-qual" + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + uint64x2_t dma_addr0, dma_addr1; + uint64x2_t zero = vdupq_n_u64(0); + uint64_t paddr; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (unlikely(rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0)) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + vst1q_u64((uint64_t *)&rxdp[i].read, zero); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr0 = vdupq_n_u64(paddr); + + /* flush desc with pa dma_addr */ + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); + + paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr1 = vdupq_n_u64(paddr); + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + rte_cio_wmb(); + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id); +} + +static inline void +desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4], + struct rte_mbuf **rx_pkts) +{ + uint32x4_t vlan0, vlan1, rss, l3_l4e; + const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0}; + uint64x2_t rearm0, rearm1, rearm2, rearm3; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const uint32x4_t rss_vlan_msk = { + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804}; + + const uint32x4_t cksum_mask = { + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD}; + + /* map rss and vlan type to rss hash and vlan flag */ + const uint8x16_t vlan_flags = { + 0, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const uint8x16_t rss_flags = { + 0, PKT_RX_FDIR, 0, 0, + 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const uint8x16_t l3_l4e_flags = { + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + 0, 0, 0, 0, 0, 0, 0, 0}; + + vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]), + vreinterpretq_u32_u64(descs[2])).val[1]; + vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]), + vreinterpretq_u32_u64(descs[3])).val[1]; + vlan0 = vzipq_u32(vlan0, vlan1).val[0]; + + vlan1 = vandq_u32(vlan0, rss_vlan_msk); + vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags, + vreinterpretq_u8_u32(vlan1))); + + rss = vshrq_n_u32(vlan1, 11); + rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags, + vreinterpretq_u8_u32(rss))); + + l3_l4e = vshrq_n_u32(vlan1, 22); + l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags, + vreinterpretq_u8_u32(l3_l4e))); + /* then we shift left 1 bit */ + l3_l4e = vshlq_n_u32(l3_l4e, 1); + /* we need to mask out the reduntant bits */ + l3_l4e = vandq_u32(l3_l4e, cksum_mask); + + vlan0 = vorrq_u32(vlan0, rss); + vlan0 = vorrq_u32(vlan0, l3_l4e); + + rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1); + rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1); + rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1); + rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1); + + vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0); + vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1); + vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2); + vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3); +} + +#define PKTLEN_SHIFT 10 +#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t)) + +static inline void +desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__restrict rx_pkts, + uint32_t *__restrict ptype_tbl) +{ + int i; + uint8_t ptype; + uint8x16_t tmp; + + for (i = 0; i < 4; i++) { + tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30)); + ptype = vgetq_lane_u8(tmp, 8); + rx_pkts[i]->packet_type = ptype_tbl[ptype]; + } + +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct i40e_rx_queue *__restrict rxq, struct rte_mbuf + **__restrict rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + /* mask to shuffle from desc. to mbuf */ + uint8x16_t shuf_msk = { + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 14, 15, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 14, 15, /* octet 15~14, 16 bits data_len */ + 2, 3, /* octet 2~3, low 16 bits vlan_macip */ + 4, 5, 6, 7 /* octet 4~7, 32bits rss */ + }; + + uint8x16_t eop_check = { + 0x02, 0x00, 0x02, 0x00, + 0x02, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + + uint16x8_t crc_adjust = { + 0, 0, /* ignore pkt_type field */ + rxq->crc_len, /* sub crc on pkt_len */ + 0, /* ignore high-16bits of pkt_len */ + rxq->crc_len, /* sub crc on data_len */ + 0, 0, 0 /* ignore non-length fields */ + }; + + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch_non_temporal(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_I40E_DESCS_PER_LOOP, + rxdp += RTE_I40E_DESCS_PER_LOOP) { + uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP]; + uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + uint16x8x2_t sterr_tmp1, sterr_tmp2; + uint64x2_t mbp1, mbp2; + uint16x8_t staterr; + uint16x8_t tmp; + uint64_t stat; + + int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT}; + + /* B.1 load 1 mbuf point */ + mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + + descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); + descs[0] = vld1q_u64((uint64_t *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]), + len_shl); + descs[3] = vreinterpretq_u64_u32(len3); + uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]), + len_shl); + descs[2] = vreinterpretq_u64_u32(len2); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); + pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]), + vreinterpretq_u16_u64(descs[3])); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]), + vreinterpretq_u16_u64(descs[2])); + + /* C.2 get 4 pkts staterr value */ + staterr = vzipq_u16(sterr_tmp1.val[1], + sterr_tmp2.val[1]).val[0]; + + desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); + pkt_mb4 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); + pkt_mb3 = vreinterpretq_u8_u16(tmp); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]), + len_shl); + descs[1] = vreinterpretq_u64_u32(len1); + uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]), + len_shl); + descs[0] = vreinterpretq_u64_u32(len0); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); + pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); + + /* D.3 copy final 3,4 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + + /* C* extract and record EOP bit */ + if (split_packet) { + uint8x16_t eop_shuf_mask = { + 0x00, 0x02, 0x04, 0x06, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF}; + uint8x16_t eop_bits; + + /* and with mask to extract bits, flipping 1-0 */ + eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr)); + eop_bits = vandq_u8(eop_bits, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask); + + /* store the resulting 32-bit value */ + vst1q_lane_u32((uint32_t *)split_packet, + vreinterpretq_u32_u8(eop_bits), 0); + split_packet += RTE_I40E_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1); + staterr = vreinterpretq_u16_s16( + vshrq_n_s16(vreinterpretq_s16_u16(staterr), + I40E_UINT16_BIT - 1)); + stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); + + rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); + + /* D.3 copy final 1,2 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + /* C.4 calc avaialbe number of desc */ + if (unlikely(stat == 0)) { + nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; + } else { + nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT; + break; + } + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_pkts_vec(void *__restrict rx_queue, + struct rte_mbuf **__restrict rx_pkts, uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + + /* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw}; + vst1q_u64((uint64_t *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +i40e_xmit_fixed_burst_vec(void *__restrict tx_queue, + struct rte_mbuf **__restrict tx_pkts, uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + rte_cio_wmb(); + I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + + return nb_pkts; +} + +void __rte_cold +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + _i40e_rx_queue_release_mbufs_vec(rxq); +} + +int __rte_cold +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) +{ + return i40e_rxq_vec_setup_default(rxq); +} + +int __rte_cold +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return 0; +} + +int __rte_cold +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return i40e_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c new file mode 100644 index 000000000..698518349 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -0,0 +1,763 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC +/* SSE version of FDIR mark extraction for 4 32B descriptors at a time */ +static inline __m128i +descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt) +{ + /* 32B descriptors: Load 2nd half of descriptors for FDIR ID data */ + __m128i desc0_qw23, desc1_qw23, desc2_qw23, desc3_qw23; + desc0_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 0)->wb.qword2); + desc1_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 1)->wb.qword2); + desc2_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 2)->wb.qword2); + desc3_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 3)->wb.qword2); + + /* FDIR ID data: move last u32 of each desc to 4 u32 lanes */ + __m128i v_unpack_01, v_unpack_23; + v_unpack_01 = _mm_unpackhi_epi32(desc0_qw23, desc1_qw23); + v_unpack_23 = _mm_unpackhi_epi32(desc2_qw23, desc3_qw23); + __m128i v_fdir_ids = _mm_unpackhi_epi64(v_unpack_01, v_unpack_23); + + /* Extended Status: extract from each lower 32 bits, to u32 lanes */ + v_unpack_01 = _mm_unpacklo_epi32(desc0_qw23, desc1_qw23); + v_unpack_23 = _mm_unpacklo_epi32(desc2_qw23, desc3_qw23); + __m128i v_flt_status = _mm_unpacklo_epi64(v_unpack_01, v_unpack_23); + + /* Shift u32 left and right to "mask away" bits not required. + * Data required is 4:5 (zero based), so left shift by 26 (32-6) + * and then right shift by 30 (32 - 2 bits required). + */ + v_flt_status = _mm_slli_epi32(v_flt_status, 26); + v_flt_status = _mm_srli_epi32(v_flt_status, 30); + + /* Generate constant 1 in all u32 lanes and compare */ + RTE_BUILD_BUG_ON(I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID != 1); + __m128i v_zeros = _mm_setzero_si128(); + __m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros); + __m128i v_u32_one = _mm_srli_epi32(v_ffff, 31); + + /* per desc mask, bits set if FDIR ID is valid */ + __m128i v_fd_id_mask = _mm_cmpeq_epi32(v_flt_status, v_u32_one); + + /* Mask ID data to zero if the FD_ID bit not set in desc */ + v_fdir_ids = _mm_and_si128(v_fdir_ids, v_fd_id_mask); + + /* Extract and store as u32. No advantage to combining into SSE + * stores, there are no surrounding stores to around fdir.hi + */ + rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0); + rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1); + rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2); + rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3); + + /* convert fdir_id_mask into a single bit, then shift as required for + * correct location in the mbuf->olflags + */ + const uint32_t FDIR_ID_BIT_SHIFT = 13; + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT)); + v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31); + v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, FDIR_ID_BIT_SHIFT); + + /* The returned value must be combined into each mbuf. This is already + * being done for RSS and VLAN mbuf olflags, so return bits to OR in. + */ + return v_fd_id_mask; +} + +#else /* 32 or 16B FDIR ID handling */ + +/* Handle 16B descriptor FDIR ID flag setting based on FLM. See scalar driver + * for scalar implementation of the same functionality. + */ +static inline __m128i +descs_to_fdir_16b(__m128i fltstat, __m128i descs[4], struct rte_mbuf **rx_pkt) +{ + /* unpack filter-status data from descriptors */ + __m128i v_tmp_01 = _mm_unpacklo_epi32(descs[0], descs[1]); + __m128i v_tmp_23 = _mm_unpacklo_epi32(descs[2], descs[3]); + __m128i v_fdir_ids = _mm_unpackhi_epi64(v_tmp_01, v_tmp_23); + + /* Generate one bit in each u32 lane */ + __m128i v_zeros = _mm_setzero_si128(); + __m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros); + __m128i v_111_mask = _mm_srli_epi32(v_ffff, 29); + __m128i v_11_mask = _mm_srli_epi32(v_ffff, 30); + + /* Top lane ones mask for FDIR isolation */ + __m128i v_desc_fdir_mask = _mm_insert_epi32(v_zeros, UINT32_MAX, 1); + + /* Compare and mask away FDIR ID data if bit not set */ + __m128i v_u32_bits = _mm_and_si128(v_111_mask, fltstat); + __m128i v_fdir_id_mask = _mm_cmpeq_epi32(v_u32_bits, v_11_mask); + v_fdir_ids = _mm_and_si128(v_fdir_id_mask, v_fdir_ids); + + /* Store data to fdir.hi in mbuf */ + rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0); + rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1); + rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2); + rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3); + + /* Move fdir_id_mask to correct lane, blend RSS to zero on hits */ + __m128i v_desc3_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 8); + __m128i v_desc3_mask = _mm_and_si128(v_desc_fdir_mask, v_desc3_shift); + descs[3] = _mm_blendv_epi8(descs[3], _mm_setzero_si128(), v_desc3_mask); + + __m128i v_desc2_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 4); + __m128i v_desc2_mask = _mm_and_si128(v_desc_fdir_mask, v_desc2_shift); + descs[2] = _mm_blendv_epi8(descs[2], _mm_setzero_si128(), v_desc2_mask); + + __m128i v_desc1_shift = v_fdir_id_mask; + __m128i v_desc1_mask = _mm_and_si128(v_desc_fdir_mask, v_desc1_shift); + descs[1] = _mm_blendv_epi8(descs[1], _mm_setzero_si128(), v_desc1_mask); + + __m128i v_desc0_shift = _mm_alignr_epi8(v_fdir_id_mask, v_zeros, 12); + __m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift); + descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask); + + /* Shift to 1 or 0 bit per u32 lane, then to PKT_RX_FDIR_ID offset */ + const uint32_t FDIR_ID_BIT_SHIFT = 13; + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT)); + __m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31); + return _mm_slli_epi32(v_mask_one_bit, FDIR_ID_BIT_SHIFT); +} +#endif + +static inline void +desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp, + __m128i descs[4], struct rte_mbuf **rx_pkts) +{ + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + + __m128i vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const __m128i rss_vlan_msk = _mm_set_epi32( + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804); + + const __m128i cksum_mask = _mm_set_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + 0, 0, 0, 0); + + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + + const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + /* Unpack "status" from quadword 1, bits 0:32 */ + vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); + vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); + vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + + const __m128i desc_fltstat = _mm_srli_epi32(vlan1, 11); + rss = _mm_shuffle_epi8(rss_flags, desc_fltstat); + + l3_l4e = _mm_srli_epi32(vlan1, 22); + l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); + /* then we shift left 1 bit */ + l3_l4e = _mm_slli_epi32(l3_l4e, 1); + /* we need to mask out the reduntant bits */ + l3_l4e = _mm_and_si128(l3_l4e, cksum_mask); + + vlan0 = _mm_or_si128(vlan0, rss); + vlan0 = _mm_or_si128(vlan0, l3_l4e); + + /* Extract FDIR ID only if FDIR is enabled to avoid useless work */ + if (rxq->fdir_enabled) { +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + __m128i v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts); +#else + (void)rxdp; /* rxdp not required for 16B desc mode */ + __m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat, + descs, rx_pkts); +#endif + /* OR in ol_flag bits after descriptor speicific extraction */ + vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags); + } + + /* + * At this point, we have the 4 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +#define PKTLEN_SHIFT 10 + +static inline void +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) +{ + __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); + __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); + + ptype0 = _mm_srli_epi64(ptype0, 30); + ptype1 = _mm_srli_epi64(ptype1, 30); + + rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)]; + rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)]; + rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)]; + rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)]; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + __m128i shuf_msk; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + /* + * compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + __m128i dd_check, eop_check; + + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /* + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_I40E_DESCS_PER_LOOP, + rxdp += RTE_I40E_DESCS_PER_LOOP) { + __m128i descs[RTE_I40E_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT); + const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[3] = _mm_blend_epi16(descs[3], len3, 0x80); + descs[2] = _mm_blend_epi16(descs[2], len2, 0x80); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT); + const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[1] = _mm_blend_epi16(descs[1], len1, 0x80); + descs[0] = _mm_blend_epi16(descs[0], len0, 0x80); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_I40E_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + + /* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +void __rte_cold +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + _i40e_rx_queue_release_mbufs_vec(rxq); +} + +int __rte_cold +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) +{ + return i40e_rxq_vec_setup_default(rxq); +} + +int __rte_cold +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return 0; +} + +int __rte_cold +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return i40e_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c b/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c new file mode 100644 index 000000000..c76760c97 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_tm.c @@ -0,0 +1,971 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include + +#include "base/i40e_prototype.h" +#include "i40e_ethdev.h" + +static int i40e_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error); +static int i40e_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error); +static int i40e_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error); +static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error); +static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error); +static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error); +static int i40e_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int i40e_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); +static int i40e_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); + +const struct rte_tm_ops i40e_tm_ops = { + .capabilities_get = i40e_tm_capabilities_get, + .shaper_profile_add = i40e_shaper_profile_add, + .shaper_profile_delete = i40e_shaper_profile_del, + .node_add = i40e_node_add, + .node_delete = i40e_node_delete, + .node_type_get = i40e_node_type_get, + .level_capabilities_get = i40e_level_capabilities_get, + .node_capabilities_get = i40e_node_capabilities_get, + .hierarchy_commit = i40e_hierarchy_commit, +}; + +int +i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &i40e_tm_ops; + + return 0; +} + +void +i40e_tm_conf_init(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + /* initialize shaper profile list */ + TAILQ_INIT(&pf->tm_conf.shaper_profile_list); + + /* initialize node configuration */ + pf->tm_conf.root = NULL; + TAILQ_INIT(&pf->tm_conf.tc_list); + TAILQ_INIT(&pf->tm_conf.queue_list); + pf->tm_conf.nb_tc_node = 0; + pf->tm_conf.nb_queue_node = 0; + pf->tm_conf.committed = false; +} + +void +i40e_tm_conf_uninit(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tm_shaper_profile *shaper_profile; + struct i40e_tm_node *tm_node; + + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { + TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); + rte_free(tm_node); + } + pf->tm_conf.nb_queue_node = 0; + while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) { + TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); + rte_free(tm_node); + } + pf->tm_conf.nb_tc_node = 0; + if (pf->tm_conf.root) { + rte_free(pf->tm_conf.root); + pf->tm_conf.root = NULL; + } + + /* Remove all shaper profiles */ + while ((shaper_profile = + TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { + TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, + shaper_profile, node); + rte_free(shaper_profile); + } +} + +static inline uint16_t +i40e_tc_nb_get(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *main_vsi = pf->main_vsi; + uint16_t sum = 0; + int i; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (main_vsi->enabled_tc & BIT_ULL(i)) + sum++; + } + + return sum; +} + +static int +i40e_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t tc_nb = i40e_tc_nb_get(dev); + + if (!cap || !error) + return -EINVAL; + + if (tc_nb > hw->func_caps.num_tx_qp) + return -EINVAL; + + error->type = RTE_TM_ERROR_TYPE_NONE; + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(struct rte_tm_capabilities)); + + /** + * support port + TCs + queues + * here shows the max capability not the current configuration. + */ + cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp; + cap->n_levels_max = 3; /* port, TC, queue */ + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->n_nodes_max; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 0; + /* 40Gbps -> 5GBps */ + cap->shaper_private_rate_max = 5000000000ull; + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + cap->sched_n_children_max = hw->func_caps.num_tx_qp; + /** + * HW supports SP. But no plan to support it now. + * So, all the nodes should have the same priority. + */ + cap->sched_sp_n_priorities_max = 1; + cap->sched_wfq_n_children_per_group_max = 0; + cap->sched_wfq_n_groups_max = 0; + /** + * SW only supports fair round robin now. + * So, all the nodes should have the same weight. + */ + cap->sched_wfq_weight_max = 1; + cap->cman_head_drop_supported = 0; + cap->dynamic_update_mask = 0; + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + cap->stats_mask = 0; + + return 0; +} + +static inline struct i40e_tm_shaper_profile * +i40e_shaper_profile_search(struct rte_eth_dev *dev, + uint32_t shaper_profile_id) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_shaper_profile_list *shaper_profile_list = + &pf->tm_conf.shaper_profile_list; + struct i40e_tm_shaper_profile *shaper_profile; + + TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { + if (shaper_profile_id == shaper_profile->shaper_profile_id) + return shaper_profile; + } + + return NULL; +} + +static int +i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + /* min rate not supported */ + if (profile->committed.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "committed rate not supported"; + return -EINVAL; + } + /* min bucket size not supported */ + if (profile->committed.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + error->message = "committed bucket size not supported"; + return -EINVAL; + } + /* max bucket size not supported */ + if (profile->peak.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + error->message = "peak bucket size not supported"; + return -EINVAL; + } + /* length adjustment not supported */ + if (profile->pkt_length_adjust) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; + error->message = "packet length adjustment not supported"; + return -EINVAL; + } + + return 0; +} + +static int +i40e_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tm_shaper_profile *shaper_profile; + int ret; + + if (!profile || !error) + return -EINVAL; + + ret = i40e_shaper_profile_param_check(profile, error); + if (ret) + return ret; + + shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id); + + if (shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID exist"; + return -EINVAL; + } + + shaper_profile = rte_zmalloc("i40e_tm_shaper_profile", + sizeof(struct i40e_tm_shaper_profile), + 0); + if (!shaper_profile) + return -ENOMEM; + shaper_profile->shaper_profile_id = shaper_profile_id; + rte_memcpy(&shaper_profile->profile, profile, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list, + shaper_profile, node); + + return 0; +} + +static int +i40e_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tm_shaper_profile *shaper_profile; + + if (!error) + return -EINVAL; + + shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id); + + if (!shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID not exist"; + return -EINVAL; + } + + /* don't delete a profile if it's used by one or several nodes */ + if (shaper_profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "profile in use"; + return -EINVAL; + } + + TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); + rte_free(shaper_profile); + + return 0; +} + +static inline struct i40e_tm_node * +i40e_tm_node_search(struct rte_eth_dev *dev, + uint32_t node_id, enum i40e_tm_node_type *node_type) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list; + struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list; + struct i40e_tm_node *tm_node; + + if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) { + *node_type = I40E_TM_NODE_TYPE_PORT; + return pf->tm_conf.root; + } + + TAILQ_FOREACH(tm_node, tc_list, node) { + if (tm_node->id == node_id) { + *node_type = I40E_TM_NODE_TYPE_TC; + return tm_node; + } + } + + TAILQ_FOREACH(tm_node, queue_list, node) { + if (tm_node->id == node_id) { + *node_type = I40E_TM_NODE_TYPE_QUEUE; + return tm_node; + } + } + + return NULL; +} + +static int +i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t priority, uint32_t weight, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + if (priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority should be 0"; + return -EINVAL; + } + + if (weight != 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "weight must be 1"; + return -EINVAL; + } + + /* not support shared shaper */ + if (params->shared_shaper_id) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; + error->message = "shared shaper not supported"; + return -EINVAL; + } + if (params->n_shared_shapers) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; + error->message = "shared shaper not supported"; + return -EINVAL; + } + + /* for non-leaf node */ + if (node_id >= hw->func_caps.num_tx_qp) { + if (params->nonleaf.wfq_weight_mode) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFQ not supported"; + return -EINVAL; + } + if (params->nonleaf.n_sp_priorities != 1) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; + error->message = "SP priority not supported"; + return -EINVAL; + } else if (params->nonleaf.wfq_weight_mode && + !(*params->nonleaf.wfq_weight_mode)) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFP should be byte mode"; + return -EINVAL; + } + + return 0; + } + + /* for leaf node */ + if (params->leaf.cman) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; + error->message = "Congestion management not supported"; + return -EINVAL; + } + if (params->leaf.wred.wred_profile_id != + RTE_TM_WRED_PROFILE_ID_NONE) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.shared_wred_context_id) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.n_shared_wred_contexts) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; + error->message = "WRED not supported"; + return -EINVAL; + } + + return 0; +} + +/** + * Now the TC and queue configuration is controlled by DCB. + * We need check if the node configuration follows the DCB configuration. + * In the future, we may use TM to cover DCB. + */ +static int +i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX; + enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX; + struct i40e_tm_shaper_profile *shaper_profile = NULL; + struct i40e_tm_node *tm_node; + struct i40e_tm_node *parent_node; + uint16_t tc_nb = 0; + int ret; + + if (!params || !error) + return -EINVAL; + + /* if already committed */ + if (pf->tm_conf.committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + ret = i40e_node_param_check(dev, node_id, priority, weight, + params, error); + if (ret) + return ret; + + /* check if the node ID is already used */ + if (i40e_tm_node_search(dev, node_id, &node_type)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node id already used"; + return -EINVAL; + } + + /* check the shaper profile id */ + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = i40e_shaper_profile_search( + dev, params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } + } + + /* root node if not have a parent */ + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id > I40E_TM_NODE_TYPE_PORT) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* obviously no more than one root */ + if (pf->tm_conf.root) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "already have a root"; + return -EINVAL; + } + + /* add the root node */ + tm_node = rte_zmalloc("i40e_tm_node", + sizeof(struct i40e_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->parent = NULL; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + pf->tm_conf.root = tm_node; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; + } + + /* TC or queue node */ + /* check the parent node */ + parent_node = i40e_tm_node_search(dev, parent_node_id, + &parent_node_type); + if (!parent_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent not exist"; + return -EINVAL; + } + if (parent_node_type != I40E_TM_NODE_TYPE_PORT && + parent_node_type != I40E_TM_NODE_TYPE_TC) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent is not port or TC"; + return -EINVAL; + } + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id != parent_node_type + 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* check the node number */ + if (parent_node_type == I40E_TM_NODE_TYPE_PORT) { + /* check the TC number */ + tc_nb = i40e_tc_nb_get(dev); + if (pf->tm_conf.nb_tc_node >= tc_nb) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many TCs"; + return -EINVAL; + } + } else { + /* check the queue number */ + if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues"; + return -EINVAL; + } + + /** + * check the node id. + * For queue, the node id means queue id. + */ + if (node_id >= hw->func_caps.num_tx_qp) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too large queue id"; + return -EINVAL; + } + } + + /* add the TC or queue node */ + tm_node = rte_zmalloc("i40e_tm_node", + sizeof(struct i40e_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->parent = parent_node; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + if (parent_node_type == I40E_TM_NODE_TYPE_PORT) { + TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, + tm_node, node); + pf->tm_conf.nb_tc_node++; + } else { + TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, + tm_node, node); + pf->tm_conf.nb_queue_node++; + } + tm_node->parent->reference_count++; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; +} + +static int +i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX; + struct i40e_tm_node *tm_node; + + if (!error) + return -EINVAL; + + /* if already committed */ + if (pf->tm_conf.committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = i40e_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* the node should have no child */ + if (tm_node->reference_count) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = + "cannot delete a node which has children"; + return -EINVAL; + } + + /* root node */ + if (node_type == I40E_TM_NODE_TYPE_PORT) { + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + rte_free(tm_node); + pf->tm_conf.root = NULL; + return 0; + } + + /* TC or queue node */ + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + tm_node->parent->reference_count--; + if (node_type == I40E_TM_NODE_TYPE_TC) { + TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); + pf->tm_conf.nb_tc_node--; + } else { + TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); + pf->tm_conf.nb_queue_node--; + } + rte_free(tm_node); + + return 0; +} + +static int +i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX; + struct i40e_tm_node *tm_node; + + if (!is_leaf || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = i40e_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (node_type == I40E_TM_NODE_TYPE_QUEUE) + *is_leaf = true; + else + *is_leaf = false; + + return 0; +} + +static int +i40e_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= I40E_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == I40E_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + } else if (level_id == I40E_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->func_caps.num_tx_qp; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != I40E_TM_NODE_TYPE_QUEUE) { + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 40Gbps -> 5GBps */ + cap->nonleaf.shaper_private_rate_max = 5000000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + if (level_id == I40E_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + I40E_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->func_caps.num_tx_qp; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* queue node */ + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 40Gbps -> 5GBps */ + cap->leaf.shaper_private_rate_max = 5000000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +i40e_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum i40e_tm_node_type node_type; + struct i40e_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = i40e_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 40Gbps -> 5GBps */ + cap->shaper_private_rate_max = 5000000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == I40E_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == I40E_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + I40E_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->func_caps.num_tx_qp; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + +static int +i40e_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list; + struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list; + struct i40e_tm_node *tm_node; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw; + uint64_t bw; + uint8_t tc_map; + int ret; + int i; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!pf->tm_conf.root) + goto done; + + vsi = pf->main_vsi; + hw = I40E_VSI_TO_HW(vsi); + + /** + * Don't support bandwidth control for port and TCs in parallel. + * If the port has a max bandwidth, the TCs should have none. + */ + /* port */ + if (pf->tm_conf.root->shaper_profile) + bw = pf->tm_conf.root->shaper_profile->profile.peak.rate; + else + bw = 0; + if (bw) { + /* check if any TC has a max bandwidth */ + TAILQ_FOREACH(tm_node, tc_list, node) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port and TC max bandwidth" + " in parallel"; + goto fail_clear; + } + } + + /* interpret Bps to 50Mbps */ + bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY; + + /* set the max bandwidth */ + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, + (uint16_t)bw, 0, NULL); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "fail to set port max bandwidth"; + goto fail_clear; + } + + goto done; + } + + /* TC */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + tc_map = vsi->enabled_tc; + TAILQ_FOREACH(tm_node, tc_list, node) { + if (!tm_node->reference_count) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "TC without queue assigned"; + goto fail_clear; + } + + i = 0; + while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i))) + i++; + if (i >= I40E_MAX_TRAFFIC_CLASS) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "cannot find the TC"; + goto fail_clear; + } + tc_map &= ~BIT_ULL(i); + + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; + if (!bw) + continue; + + /* interpret Bps to 50Mbps */ + bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY; + + tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw); + } + + TAILQ_FOREACH(tm_node, queue_list, node) { + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; + if (bw) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "not support queue QoS"; + goto fail_clear; + } + } + + ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "fail to set TC max bandwidth"; + goto fail_clear; + } + +done: + pf->tm_conf.committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + i40e_tm_conf_uninit(dev); + i40e_tm_conf_init(dev); + } + return -EINVAL; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c b/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c new file mode 100644 index 000000000..b07b35c03 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/i40e_vf_representor.c @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include +#include + +#include "base/i40e_type.h" +#include "base/virtchnl.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "rte_pmd_i40e.h" + +static int +i40e_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return i40e_dev_link_update(representor->adapter->eth_dev, + wait_to_complete); +} +static int +i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + /* get dev info for the vdev */ + dev_info->device = ethdev->device; + + dev_info->max_rx_queues = ethdev->data->nb_rx_queues; + dev_info->max_tx_queues = ethdev->data->nb_tx_queues; + + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_64; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->switch_info.name = + representor->adapter->eth_dev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; + + return 0; +} + +static int +i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void +i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int +i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static int +rte_pmd_i40e_get_vf_native_stats(uint16_t port, + uint16_t vf_id, + struct i40e_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + i40e_update_vsi_stats(vsi); + memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats)); + + return 0; +} + +static int +i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev, + struct rte_eth_stats *stats) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct i40e_eth_stats native_stats; + int ret; + + ret = rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &native_stats); + if (ret == 0) { + i40evf_stat_update_48( + &representor->stats_offset.rx_bytes, + &native_stats.rx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.rx_unicast, + &native_stats.rx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_multicast, + &native_stats.rx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_broadcast, + &native_stats.rx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.rx_discards, + &native_stats.rx_discards); + i40evf_stat_update_32( + &representor->stats_offset.rx_unknown_protocol, + &native_stats.rx_unknown_protocol); + i40evf_stat_update_48( + &representor->stats_offset.tx_bytes, + &native_stats.tx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.tx_unicast, + &native_stats.tx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_multicast, + &native_stats.tx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_broadcast, + &native_stats.tx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.tx_errors, + &native_stats.tx_errors); + i40evf_stat_update_32( + &representor->stats_offset.tx_discards, + &native_stats.tx_discards); + + stats->ipackets = native_stats.rx_unicast + + native_stats.rx_multicast + + native_stats.rx_broadcast; + stats->opackets = native_stats.tx_unicast + + native_stats.tx_multicast + + native_stats.tx_broadcast; + stats->ibytes = native_stats.rx_bytes; + stats->obytes = native_stats.tx_bytes; + stats->ierrors = native_stats.rx_discards; + stats->oerrors = native_stats.tx_errors + native_stats.tx_discards; + } + return ret; +} + +static int +i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &representor->stats_offset); +} + +static int +i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static int +i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static int +i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static int +i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static void +i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_remove_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, ðdev->data->mac_addrs[index]); +} + +static int +i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct rte_ether_addr *mac_addr) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, mac_addr); +} + +static int +i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_i40e_set_vf_vlan_filter( + representor->adapter->eth_dev->data->port_id, + vlan_id, vf_mask, on); +} + +static int +i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct rte_eth_dev *pdev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + uint32_t vfid; + + pdev = representor->adapter->eth_dev; + vfid = representor->vf_id; + + if (!is_i40e_supported(pdev)) { + PMD_DRV_LOG(ERR, "Invalid PF dev."); + return -EINVAL; + } + + pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private); + + if (vfid >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vf = &pf->vfs[vfid]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (mask & ETH_VLAN_FILTER_MASK) { + /* Enable or disable VLAN filtering offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + return i40e_vsi_config_vlan_filter(vsi, TRUE); + else + return i40e_vsi_config_vlan_filter(vsi, FALSE); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + return i40e_vsi_config_vlan_stripping(vsi, TRUE); + else + return i40e_vsi_config_vlan_stripping(vsi, FALSE); + } + + return -EINVAL; +} + +static void +i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_vlan_stripq( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, on); +} + +static int +i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id, + __rte_unused int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_vlan_insert( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, vlan_id); +} + +static const struct eth_dev_ops i40e_representor_dev_ops = { + .dev_infos_get = i40e_vf_representor_dev_infos_get, + + .dev_start = i40e_vf_representor_dev_start, + .dev_configure = i40e_vf_representor_dev_configure, + .dev_stop = i40e_vf_representor_dev_stop, + + .rx_queue_setup = i40e_vf_representor_rx_queue_setup, + .tx_queue_setup = i40e_vf_representor_tx_queue_setup, + + .link_update = i40e_vf_representor_link_update, + + .stats_get = i40e_vf_representor_stats_get, + .stats_reset = i40e_vf_representor_stats_reset, + + .promiscuous_enable = i40e_vf_representor_promiscuous_enable, + .promiscuous_disable = i40e_vf_representor_promiscuous_disable, + + .allmulticast_enable = i40e_vf_representor_allmulticast_enable, + .allmulticast_disable = i40e_vf_representor_allmulticast_disable, + + .mac_addr_remove = i40e_vf_representor_mac_addr_remove, + .mac_addr_set = i40e_vf_representor_mac_addr_set, + + .vlan_filter_set = i40e_vf_representor_vlan_filter_set, + .vlan_offload_set = i40e_vf_representor_vlan_offload_set, + .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set, + .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set + +}; + +static uint16_t +i40e_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +i40e_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + struct i40e_pf *pf; + struct i40e_pf_vf *vf; + struct rte_eth_link *link; + + representor->vf_id = + ((struct i40e_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct i40e_vf_representor *)init_params)->switch_domain_id; + representor->adapter = + ((struct i40e_vf_representor *)init_params)->adapter; + + pf = I40E_DEV_PRIVATE_TO_PF( + representor->adapter->eth_dev->data->dev_private); + + if (representor->vf_id >= pf->vf_num) + return -ENODEV; + + /* Set representor device ops */ + ethdev->dev_ops = &i40e_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst; + ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; + + vf = &pf->vfs[representor->vf_id]; + + if (!vf->vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -ENODEV; + } + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + ethdev->data->representor_id = representor->vf_id; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = vf->vsi->nb_qps; + ethdev->data->nb_tx_queues = vf->vsi->nb_qps; + + ethdev->data->mac_addrs = &vf->mac_addr; + + /* Link state. Inherited from PF */ + link = &representor->adapter->eth_dev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +i40e_vf_representor_uninit(struct rte_eth_dev *ethdev) +{ + /* mac_addrs must not be freed because part of i40e_pf_vf */ + ethdev->data->mac_addrs = NULL; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/meson.build b/src/spdk/dpdk/drivers/net/i40e/meson.build new file mode 100644 index 000000000..c452420ee --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/meson.build @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +cflags += ['-DPF_DRIVER', + '-DVF_DRIVER', + '-DINTEGRATED_VF', + '-DX722_A0_SUPPORT'] + +subdir('base') +objs = [base_objs] + +sources = files( + 'i40e_ethdev.c', + 'i40e_rxtx.c', + 'i40e_ethdev_vf.c', + 'i40e_pf.c', + 'i40e_fdir.c', + 'i40e_flow.c', + 'i40e_tm.c', + 'i40e_vf_representor.c', + 'rte_pmd_i40e.c' + ) + +deps += ['hash'] +includes += include_directories('base') + +if arch_subdir == 'x86' + dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1) + sources += files('i40e_rxtx_vec_sse.c') + + # compile AVX2 version if either: + # a. we have AVX supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') + cflags += ['-DCC_AVX2_SUPPORT'] + sources += files('i40e_rxtx_vec_avx2.c') + elif cc.has_argument('-mavx2') + cflags += ['-DCC_AVX2_SUPPORT'] + i40e_avx2_lib = static_library('i40e_avx2_lib', + 'i40e_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') + endif +elif arch_subdir == 'ppc' + dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1) + sources += files('i40e_rxtx_vec_altivec.c') +endif + +install_headers('rte_pmd_i40e.h') diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c new file mode 100644 index 000000000..446e31710 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.c @@ -0,0 +1,3231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include + +#include "base/i40e_prototype.h" +#include "base/i40e_dcb.h" +#include "i40e_ethdev.h" +#include "i40e_pf.h" +#include "i40e_rxtx.h" +#include "rte_pmd_i40e.h" + +int +rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + i40e_notify_vf_link_status(dev, &pf->vfs[vf]); + + return 0; +} + +int +rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) { + if (on) { + if ((vsi->info.sec_flags & + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) + return 0; /* already on */ + } else { + if ((vsi->info.sec_flags & + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0) + return 0; /* already off */ + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (on) + vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + else + vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +static int +i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add) +{ + uint32_t j, k; + uint16_t vlan_id; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + for (j = 0; j < I40E_VFTA_SIZE; j++) { + if (!vsi->vfta[j]) + continue; + + for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { + if (!(vsi->vfta[j] & (1 << k))) + continue; + + vlan_id = j * I40E_UINT32_BIT_SIZE + k; + if (!vlan_id) + continue; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + if (add) + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + else + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to add/rm vlan filter"); + return ret; + } + } + } + + return I40E_SUCCESS; +} + +int +rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + /* Check if it has been already on or off */ + if (vsi->vlan_anti_spoof_on == on) + return 0; /* already on or off */ + + vsi->vlan_anti_spoof_on = on; + if (!vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, on); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters."); + return -ENOTSUP; + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (on) + vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; + else + vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; + + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +static int +i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num; + enum rte_mac_filter_type filter_type; + int ret = I40E_SUCCESS; + void *temp; + + /* remove all the MACs */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + vlan_num = vsi->vlan_num; + filter_type = f->mac_info.filter_type; + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + if (vlan_num == 0) { + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); + return I40E_ERR_PARAM; + } + } else if (filter_type == RTE_MAC_PERFECT_MATCH || + filter_type == RTE_MAC_HASH_MATCH) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (!mv_f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = filter_type; + rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + + rte_free(mv_f); + ret = I40E_SUCCESS; + } + + return ret; +} + +static int +i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num = 0; + int ret = I40E_SUCCESS; + void *temp; + + /* restore all the MACs */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) || + (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) { + /** + * If vlan_num is 0, that's the first time to add mac, + * set mask for vlan_id 0. + */ + if (vsi->vlan_num == 0) { + i40e_set_vlan_filter(vsi, 0, 1); + vsi->vlan_num = 1; + } + vlan_num = vsi->vlan_num; + } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) || + (f->mac_info.filter_type == RTE_MAC_HASH_MATCH)) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (!mv_f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = f->mac_info.filter_type; + rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + + if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH || + f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + } + + ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + + rte_free(mv_f); + ret = I40E_SUCCESS; + } + + return ret; +} + +static int +i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) +{ + struct i40e_vsi_context ctxt; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return -EINVAL; + + hw = I40E_VSI_TO_HW(vsi); + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return -ENOTSUP; + } + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) { + if (on) { + if ((vsi->info.switch_id & + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) + return 0; /* already on */ + } else { + if ((vsi->info.switch_id & + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0) + return 0; /* already off */ + } + } + + /* remove all the MAC and VLAN first */ + ret = i40e_vsi_rm_mac_filter(vsi); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to remove MAC filters."); + return ret; + } + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, 0); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to remove VLAN filters."); + return ret; + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + if (on) + vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; + else + vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; + + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + return ret; + } + + /* add all the MAC and VLAN back */ + ret = i40e_vsi_restore_mac_filter(vsi); + if (ret) + return ret; + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, 1); + if (ret) + return ret; + } + + return ret; +} + +int +rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + uint16_t vf_id; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + /* setup PF TX loopback */ + vsi = pf->main_vsi; + ret = i40e_vsi_set_tx_loopback(vsi, on); + if (ret) + return -ENOTSUP; + + /* setup TX loopback for all the VFs */ + if (!pf->vfs) { + /* if no VF, do nothing. */ + return 0; + } + + for (vf_id = 0; vf_id < pf->vf_num; vf_id++) { + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + + ret = i40e_vsi_set_tx_loopback(vsi, on); + if (ret) + return -ENOTSUP; + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + on, NULL, true); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode"); + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + on, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode"); + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) +{ + struct i40e_mac_filter *f; + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + void *temp; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + rte_ether_addr_copy(mac_addr, &vf->mac_addr); + + /* Remove all existing mac */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) + if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr) + != I40E_SUCCESS) + PMD_DRV_LOG(WARNING, "Delete MAC failed"); + + return 0; +} + +static const struct rte_ether_addr null_mac_addr; + +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + int ret; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (rte_is_same_ether_addr(mac_addr, &vf->mac_addr)) + /* Reset the mac with NULL address */ + rte_ether_addr_copy(&null_mac_addr, &vf->mac_addr); + + /* Remove the mac */ + ret = i40e_vsi_delete_mac(vsi, mac_addr); + if (ret != I40E_SUCCESS) + return ret; + return 0; +} + +/* Set vlan strip on/off for specific VF from host */ +int +rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + + if (!vsi) + return -EINVAL; + + ret = i40e_vsi_config_vlan_stripping(vsi, !!on); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!"); + } + + return ret; +} + +int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id, + uint16_t vlan_id) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (vlan_id > RTE_ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid VLAN ID."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) + return -ENODEV; + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.pvid = vlan_id; + if (vlan_id > 0) + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; + else + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID; + + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, + uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_mac_filter_info filter; + struct rte_ether_addr broadcast = { + .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) { + PMD_DRV_LOG(ERR, "on should be 0 or 1."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (on) { + rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + ret = i40e_vsi_add_mac(vsi, &filter); + } else { + ret = i40e_vsi_delete_mac(vsi, &broadcast); + } + + if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VSI broadcast"); + } else { + ret = 0; + } + + return ret; +} + +int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) { + PMD_DRV_LOG(ERR, "on should be 0 or 1."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + if (on) { + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED; + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + } else { + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED; + } + + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +static int +i40e_vlan_filter_count(struct i40e_vsi *vsi) +{ + uint32_t j, k; + uint16_t vlan_id; + int count = 0; + + for (j = 0; j < I40E_VFTA_SIZE; j++) { + if (!vsi->vfta[j]) + continue; + + for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { + if (!(vsi->vfta[j] & (1 << k))) + continue; + + vlan_id = j * I40E_UINT32_BIT_SIZE + k; + if (!vlan_id) + continue; + + count++; + } + } + + return count; +} + +int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id, + uint64_t vf_mask, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + uint16_t vf_idx; + int ret = I40E_SUCCESS; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) { + PMD_DRV_LOG(ERR, "Invalid VLAN ID."); + return -EINVAL; + } + + if (vf_mask == 0) { + PMD_DRV_LOG(ERR, "No VF."); + return -EINVAL; + } + + if (on > 1) { + PMD_DRV_LOG(ERR, "on is should be 0 or 1."); + return -EINVAL; + } + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + vsi = pf->vfs[vf_idx].vsi; + if (on) { + if (!vsi->vlan_filter_on) { + vsi->vlan_filter_on = true; + i40e_aq_set_vsi_vlan_promisc(hw, + vsi->seid, + false, + NULL); + if (!vsi->vlan_anti_spoof_on) + i40e_add_rm_all_vlan_filter( + vsi, true); + } + ret = i40e_vsi_add_vlan(vsi, vlan_id); + } else { + ret = i40e_vsi_delete_vlan(vsi, vlan_id); + + if (!i40e_vlan_filter_count(vsi)) { + vsi->vlan_filter_on = false; + i40e_aq_set_vsi_vlan_promisc(hw, + vsi->seid, + true, + NULL); + } + } + } + } + + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on); + } + + return ret; +} + +int +rte_pmd_i40e_get_vf_stats(uint16_t port, + uint16_t vf_id, + struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + i40e_update_vsi_stats(vsi); + + stats->ipackets = vsi->eth_stats.rx_unicast + + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast; + stats->opackets = vsi->eth_stats.tx_unicast + + vsi->eth_stats.tx_multicast + + vsi->eth_stats.tx_broadcast; + stats->ibytes = vsi->eth_stats.rx_bytes; + stats->obytes = vsi->eth_stats.tx_bytes; + stats->ierrors = vsi->eth_stats.rx_discards; + stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards; + + return 0; +} + +int +rte_pmd_i40e_reset_vf_stats(uint16_t port, + uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->offset_loaded = false; + i40e_update_vsi_stats(vsi); + + return 0; +} + +int +rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret = 0; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (bw > I40E_QOS_BW_MAX) { + PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.", + I40E_QOS_BW_MAX); + return -EINVAL; + } + + if (bw % I40E_QOS_BW_GRANULARITY) { + PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.", + I40E_QOS_BW_GRANULARITY); + return -EINVAL; + } + + bw /= I40E_QOS_BW_GRANULARITY; + + hw = I40E_VSI_TO_HW(vsi); + + /* No change. */ + if (bw == vsi->bw_info.bw_limit) { + PMD_DRV_LOG(INFO, + "No change for VF max bandwidth. Nothing to do."); + return 0; + } + + /** + * VF bandwidth limitation and TC bandwidth limitation cannot be + * enabled in parallel, quit if TC bandwidth limitation is enabled. + * + * If bw is 0, means disable bandwidth limitation. Then no need to + * check TC bandwidth limitation. + */ + if (bw) { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if ((vsi->enabled_tc & BIT_ULL(i)) && + vsi->bw_info.bw_ets_credits[i]) + break; + } + if (i != I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, + "TC max bandwidth has been set on this VF," + " please disable it first."); + return -EINVAL; + } + } + + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d bandwidth, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + vsi->bw_info.bw_limit = (uint16_t)bw; + vsi->bw_info.bw_max = 0; + + return 0; +} + +int +rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id, + uint8_t tc_num, uint8_t *bw_weight) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_tc_bw_data tc_bw; + int ret = 0; + int i, j; + uint16_t sum; + bool b_change = false; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (tc_num > I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TCs should be no more than %d.", + I40E_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) + sum++; + } + if (sum != tc_num) { + PMD_DRV_LOG(ERR, + "Weight should be set for all %d enabled TCs.", + sum); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < tc_num; i++) { + if (!bw_weight[i]) { + PMD_DRV_LOG(ERR, + "The weight should be 1 at least."); + return -EINVAL; + } + sum += bw_weight[i]; + } + if (sum != 100) { + PMD_DRV_LOG(ERR, + "The summary of the TC weight should be 100."); + return -EINVAL; + } + + /** + * Create the configuration for all the TCs. + */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + if (bw_weight[j] != + vsi->bw_info.bw_ets_share_credits[i]) + b_change = true; + + tc_bw.tc_bw_credits[i] = bw_weight[j]; + j++; + } + } + + /* No change. */ + if (!b_change) { + PMD_DRV_LOG(INFO, + "No change for TC allocated bandwidth." + " Nothing to do."); + return 0; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d TC bandwidth weight, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j]; + j++; + } + } + + return 0; +} + +int +rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id, + uint8_t tc_no, uint32_t bw) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw; + int ret = 0; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (bw > I40E_QOS_BW_MAX) { + PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.", + I40E_QOS_BW_MAX); + return -EINVAL; + } + + if (bw % I40E_QOS_BW_GRANULARITY) { + PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.", + I40E_QOS_BW_GRANULARITY); + return -EINVAL; + } + + bw /= I40E_QOS_BW_GRANULARITY; + + if (tc_no >= I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TC No. should be less than %d.", + I40E_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + if (!(vsi->enabled_tc & BIT_ULL(tc_no))) { + PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.", + vf_id, tc_no); + return -EINVAL; + } + + /* No change. */ + if (bw == vsi->bw_info.bw_ets_credits[tc_no]) { + PMD_DRV_LOG(INFO, + "No change for TC max bandwidth. Nothing to do."); + return 0; + } + + /** + * VF bandwidth limitation and TC bandwidth limitation cannot be + * enabled in parallel, disable VF bandwidth limitation if it's + * enabled. + * If bw is 0, means disable bandwidth limitation. Then no need to + * care about VF bandwidth limitation configuration. + */ + if (bw && vsi->bw_info.bw_limit) { + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to disable VF(%d)" + " bandwidth limitation, err(%d).", + vf_id, ret); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, + "VF max bandwidth is disabled according" + " to TC max bandwidth setting."); + } + + /** + * Get all the TCs' info to create a whole picture. + * Because the incremental change isn't permitted. + */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + tc_bw.tc_bw_credits[i] = + rte_cpu_to_le_16( + vsi->bw_info.bw_ets_credits[i]); + } + } + tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw); + + ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d TC %d max bandwidth, err(%d).", + vf_id, tc_no, ret); + return -EINVAL; + } + + /* Store the configuration. */ + vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw; + + return 0; +} + +int +rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_veb *veb; + struct i40e_hw *hw; + struct i40e_aqc_configure_switching_comp_ets_data ets_data; + int i; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + vsi = pf->main_vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + veb = vsi->veb; + if (!veb) { + PMD_DRV_LOG(ERR, "Invalid VEB."); + return -EINVAL; + } + + if ((tc_map & veb->enabled_tc) != tc_map) { + PMD_DRV_LOG(ERR, + "TC bitmap isn't the subset of enabled TCs 0x%x.", + veb->enabled_tc); + return -EINVAL; + } + + if (tc_map == veb->strict_prio_tc) { + PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do."); + return 0; + } + + hw = I40E_VSI_TO_HW(vsi); + + /* Disable DCBx if it's the first time to set strict priority. */ + if (!veb->strict_prio_tc) { + ret = i40e_aq_stop_lldp(hw, true, true, NULL); + if (ret) + PMD_DRV_LOG(INFO, + "Failed to disable DCBx as it's already" + " disabled."); + else + PMD_DRV_LOG(INFO, + "DCBx is disabled according to strict" + " priority setting."); + } + + memset(&ets_data, 0, sizeof(ets_data)); + ets_data.tc_valid_bits = veb->enabled_tc; + ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK; + ets_data.tc_strict_priority_flags = tc_map; + /* Get all TCs' bandwidth. */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (veb->enabled_tc & BIT_ULL(i)) { + /* For rubust, if bandwidth is 0, use 1 instead. */ + if (veb->bw_info.bw_ets_share_credits[i]) + ets_data.tc_bw_share_credits[i] = + veb->bw_info.bw_ets_share_credits[i]; + else + ets_data.tc_bw_share_credits[i] = + I40E_QOS_BW_WEIGHT_MIN; + } + } + + if (!veb->strict_prio_tc) + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_enable_switching_comp_ets, + NULL); + else if (tc_map) + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_modify_switching_comp_ets, + NULL); + else + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_disable_switching_comp_ets, + NULL); + + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set TCs' strict priority mode." + " err (%d)", ret); + return -EINVAL; + } + + veb->strict_prio_tc = tc_map; + + /* Enable DCBx again, if all the TCs' strict priority disabled. */ + if (!tc_map) { + ret = i40e_aq_start_lldp(hw, true, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to enable DCBx, err(%d).", ret); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, + "DCBx is enabled again according to strict" + " priority setting."); + } + + return ret; +} + +#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info) +#define I40E_MAX_PROFILE_NUM 16 + +static void +i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version, + uint32_t track_id, uint8_t *profile_info_sec, + bool add) +{ + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_info *pinfo; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE); + memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version)); + if (add) + pinfo->op = I40E_DDP_ADD_TRACKID; + else + pinfo->op = I40E_DDP_REMOVE_TRACKID; +} + +static enum i40e_status_code +i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_profile_section_header *sec; + uint32_t track_id; + uint32_t offset = 0; + uint32_t info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + track_id = ((struct i40e_profile_info *)(profile_info_sec + + sec->section.offset))->track_id; + + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + if (status) + PMD_DRV_LOG(ERR, "Failed to add/remove profile info: " + "offset %d, info %d", + offset, info); + + return status; +} + +/* Check if the profile info exists */ +static int +i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port]; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t *buff; + struct rte_pmd_i40e_profile_list *p_list; + struct rte_pmd_i40e_profile_info *pinfo, *p; + uint32_t i; + int ret; + static const uint32_t group_mask = 0x00ff0000; + + pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec + + sizeof(struct i40e_profile_section_header)); + if (pinfo->track_id == 0) { + PMD_DRV_LOG(INFO, "Read-only profile."); + return 0; + } + buff = rte_zmalloc("pinfo_list", + (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4), + 0); + if (!buff) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return -1; + } + + ret = i40e_aq_get_ddp_list( + hw, (void *)buff, + (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4), + 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get profile info list."); + rte_free(buff); + return -1; + } + p_list = (struct rte_pmd_i40e_profile_list *)buff; + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if (pinfo->track_id == p->track_id) { + PMD_DRV_LOG(INFO, "Profile exists."); + rte_free(buff); + return 1; + } + } + /* profile with group id 0xff is compatible with any other profile */ + if ((pinfo->track_id & group_mask) == group_mask) { + rte_free(buff); + return 0; + } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == 0) { + PMD_DRV_LOG(INFO, "Profile of the group 0 exists."); + rte_free(buff); + return 2; + } + } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == group_mask) + continue; + if ((pinfo->track_id & group_mask) != + (p->track_id & group_mask)) { + PMD_DRV_LOG(INFO, "Profile of different group exists."); + rte_free(buff); + return 3; + } + } + + rte_free(buff); + return 0; +} + +int +rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, + uint32_t size, + enum rte_pmd_i40e_package_op op) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + struct i40e_package_header *pkg_hdr; + struct i40e_generic_seg_header *profile_seg_hdr; + struct i40e_generic_seg_header *metadata_seg_hdr; + uint32_t track_id; + uint8_t *profile_info_sec; + int is_exist; + enum i40e_status_code status = I40E_SUCCESS; + static const uint32_t type_mask = 0xff000000; + + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_ONLY && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Operation not supported."); + return -ENOTSUP; + } + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (size < (sizeof(struct i40e_package_header) + + sizeof(struct i40e_metadata_segment) + + sizeof(uint32_t) * 2)) { + PMD_DRV_LOG(ERR, "Buff is invalid."); + return -EINVAL; + } + + pkg_hdr = (struct i40e_package_header *)buff; + + if (!pkg_hdr) { + PMD_DRV_LOG(ERR, "Failed to fill the package structure"); + return -EINVAL; + } + + if (pkg_hdr->segment_count < 2) { + PMD_DRV_LOG(ERR, "Segment_count should be 2 at least."); + return -EINVAL; + } + + /* Find metadata segment */ + metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, + pkg_hdr); + if (!metadata_seg_hdr) { + PMD_DRV_LOG(ERR, "Failed to find metadata segment header"); + return -EINVAL; + } + track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id; + if (track_id == I40E_DDP_TRACKID_INVALID) { + PMD_DRV_LOG(ERR, "Invalid track_id"); + return -EINVAL; + } + + /* force read-only track_id for type 0 */ + if ((track_id & type_mask) == 0) + track_id = 0; + + /* Find profile segment */ + profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, + pkg_hdr); + if (!profile_seg_hdr) { + PMD_DRV_LOG(ERR, "Failed to find profile segment header"); + return -EINVAL; + } + + profile_info_sec = rte_zmalloc( + "i40e_profile_info", + sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info), + 0); + if (!profile_info_sec) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return -EINVAL; + } + + /* Check if the profile already loaded */ + i40e_generate_profile_info_sec( + ((struct i40e_profile_segment *)profile_seg_hdr)->name, + &((struct i40e_profile_segment *)profile_seg_hdr)->version, + track_id, profile_info_sec, + op == RTE_PMD_I40E_PKG_OP_WR_ADD); + is_exist = i40e_check_profile_info(port, profile_info_sec); + if (is_exist < 0) { + PMD_DRV_LOG(ERR, "Failed to check profile."); + rte_free(profile_info_sec); + return -EINVAL; + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { + if (is_exist) { + if (is_exist == 1) + PMD_DRV_LOG(ERR, "Profile already exists."); + else if (is_exist == 2) + PMD_DRV_LOG(ERR, "Profile of group 0 already exists."); + else if (is_exist == 3) + PMD_DRV_LOG(ERR, "Profile of different group already exists"); + i40e_update_customized_info(dev, buff, size, op); + rte_free(profile_info_sec); + return -EEXIST; + } + } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + if (is_exist != 1) { + PMD_DRV_LOG(ERR, "Profile does not exist."); + rte_free(profile_info_sec); + return -EACCES; + } + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + status = i40e_rollback_profile( + hw, + (struct i40e_profile_segment *)profile_seg_hdr, + track_id); + if (status) { + PMD_DRV_LOG(ERR, "Failed to write profile for delete."); + rte_free(profile_info_sec); + return status; + } + } else { + status = i40e_write_profile( + hw, + (struct i40e_profile_segment *)profile_seg_hdr, + track_id); + if (status) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + PMD_DRV_LOG(ERR, "Failed to write profile for add."); + else + PMD_DRV_LOG(ERR, "Failed to write profile."); + rte_free(profile_info_sec); + return status; + } + } + + if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) { + /* Modify loaded profiles info list */ + status = i40e_add_rm_profile_info(hw, profile_info_sec); + if (status) { + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + PMD_DRV_LOG(ERR, "Failed to add profile to info list."); + else + PMD_DRV_LOG(ERR, "Failed to delete profile from info list."); + } + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD || + op == RTE_PMD_I40E_PKG_OP_WR_DEL) + i40e_update_customized_info(dev, buff, size, op); + + rte_free(profile_info_sec); + return status; +} + +/* Get number of tvl records in the section */ +static unsigned int +i40e_get_tlv_section_size(struct i40e_profile_section_header *sec) +{ + unsigned int i, nb_rec, nb_tlv = 0; + struct i40e_profile_tlv_section_record *tlv; + + if (!sec) + return nb_tlv; + + /* get number of records in the section */ + nb_rec = sec->section.size / + sizeof(struct i40e_profile_tlv_section_record); + for (i = 0; i < nb_rec; ) { + tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i]; + i += tlv->len; + nb_tlv++; + } + return nb_tlv; +} + +int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size, + uint8_t *info_buff, uint32_t info_size, + enum rte_pmd_i40e_package_info type) +{ + uint32_t ret_size; + struct i40e_package_header *pkg_hdr; + struct i40e_generic_seg_header *i40e_seg_hdr; + struct i40e_generic_seg_header *note_seg_hdr; + struct i40e_generic_seg_header *metadata_seg_hdr; + + if (!info_buff) { + PMD_DRV_LOG(ERR, "Output info buff is invalid."); + return -EINVAL; + } + + if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) + + sizeof(struct i40e_metadata_segment) + + sizeof(uint32_t) * 2)) { + PMD_DRV_LOG(ERR, "Package buff is invalid."); + return -EINVAL; + } + + pkg_hdr = (struct i40e_package_header *)pkg_buff; + if (pkg_hdr->segment_count < 2) { + PMD_DRV_LOG(ERR, "Segment_count should be 2 at least."); + return -EINVAL; + } + + /* Find metadata segment */ + metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, + pkg_hdr); + + /* Find global notes segment */ + note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES, + pkg_hdr); + + /* Find i40e profile segment */ + i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr); + + /* get global header info */ + if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) { + struct rte_pmd_i40e_profile_info *info = + (struct rte_pmd_i40e_profile_info *)info_buff; + + if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) { + PMD_DRV_LOG(ERR, "Output info buff size is invalid."); + return -EINVAL; + } + + if (!metadata_seg_hdr) { + PMD_DRV_LOG(ERR, "Failed to find metadata segment header"); + return -EINVAL; + } + + memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info)); + info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN; + info->track_id = + ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id; + + memcpy(info->name, + ((struct i40e_metadata_segment *)metadata_seg_hdr)->name, + I40E_DDP_NAME_SIZE); + memcpy(&info->version, + &((struct i40e_metadata_segment *)metadata_seg_hdr)->version, + sizeof(struct i40e_ddp_version)); + return I40E_SUCCESS; + } + + /* get global note size */ + if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) { + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + if (note_seg_hdr == NULL) + ret_size = 0; + else + ret_size = note_seg_hdr->size; + *(uint32_t *)info_buff = ret_size; + return I40E_SUCCESS; + } + + /* get global note */ + if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) { + if (note_seg_hdr == NULL) + return -ENOTSUP; + if (info_size < note_seg_hdr->size) { + PMD_DRV_LOG(ERR, "Information buffer size is too small"); + return -EINVAL; + } + memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size); + return I40E_SUCCESS; + } + + /* get i40e segment header info */ + if (type == RTE_PMD_I40E_PKG_INFO_HEADER) { + struct rte_pmd_i40e_profile_info *info = + (struct rte_pmd_i40e_profile_info *)info_buff; + + if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) { + PMD_DRV_LOG(ERR, "Output info buff size is invalid."); + return -EINVAL; + } + + if (!metadata_seg_hdr) { + PMD_DRV_LOG(ERR, "Failed to find metadata segment header"); + return -EINVAL; + } + + if (!i40e_seg_hdr) { + PMD_DRV_LOG(ERR, "Failed to find i40e segment header"); + return -EINVAL; + } + + memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info)); + info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN; + info->track_id = + ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id; + + memcpy(info->name, + ((struct i40e_profile_segment *)i40e_seg_hdr)->name, + I40E_DDP_NAME_SIZE); + memcpy(&info->version, + &((struct i40e_profile_segment *)i40e_seg_hdr)->version, + sizeof(struct i40e_ddp_version)); + return I40E_SUCCESS; + } + + /* get number of devices */ + if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) { + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + *(uint32_t *)info_buff = + ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count; + return I40E_SUCCESS; + } + + /* get list of devices */ + if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) { + uint32_t dev_num; + dev_num = + ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count; + if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + memcpy(info_buff, + ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table, + sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num); + return I40E_SUCCESS; + } + + /* get number of protocols */ + if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) { + struct i40e_profile_section_header *proto; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto); + return I40E_SUCCESS; + } + + /* get list of protocols */ + if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_proto_info *pinfo; + struct i40e_profile_section_header *proto; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_proto_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_proto_info); + for (i = 0; i < nb_proto_info; i++) { + pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED; + memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE); + } + proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(proto); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + /* get number of records in the section */ + nb_rec = proto->section.size / + sizeof(struct i40e_profile_tlv_section_record); + tlv = (struct i40e_profile_tlv_section_record *)&proto[1]; + for (i = j = 0; i < nb_rec; j++) { + pinfo[j].proto_id = tlv->data[0]; + strlcpy(pinfo[j].name, (const char *)&tlv->data[1], + I40E_DDP_NAME_SIZE); + i += tlv->len; + tlv = &tlv[tlv->len]; + } + return I40E_SUCCESS; + } + + /* get number of packet classification types */ + if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) { + struct i40e_profile_section_header *pctype; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype); + return I40E_SUCCESS; + } + + /* get list of packet classification types */ + if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_ptype_info *pinfo; + struct i40e_profile_section_header *pctype; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_ptype_info); + for (i = 0; i < nb_proto_info; i++) + memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED, + sizeof(struct rte_pmd_i40e_ptype_info)); + pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(pctype); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + + /* get number of records in the section */ + nb_rec = pctype->section.size / + sizeof(struct i40e_profile_tlv_section_record); + tlv = (struct i40e_profile_tlv_section_record *)&pctype[1]; + for (i = j = 0; i < nb_rec; j++) { + memcpy(&pinfo[j], tlv->data, + sizeof(struct rte_pmd_i40e_ptype_info)); + i += tlv->len; + tlv = &tlv[tlv->len]; + } + return I40E_SUCCESS; + } + + /* get number of packet types */ + if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) { + struct i40e_profile_section_header *ptype; + + if (info_size < sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype); + return I40E_SUCCESS; + } + + /* get list of packet types */ + if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) { + uint32_t i, j, nb_tlv, nb_rec, nb_proto_info; + struct rte_pmd_i40e_ptype_info *pinfo; + struct i40e_profile_section_header *ptype; + struct i40e_profile_tlv_section_record *tlv; + + pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff; + nb_proto_info = info_size / + sizeof(struct rte_pmd_i40e_ptype_info); + for (i = 0; i < nb_proto_info; i++) + memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED, + sizeof(struct rte_pmd_i40e_ptype_info)); + ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE, + (struct i40e_profile_segment *)i40e_seg_hdr); + nb_tlv = i40e_get_tlv_section_size(ptype); + if (nb_tlv == 0) + return I40E_SUCCESS; + if (nb_proto_info < nb_tlv) { + PMD_DRV_LOG(ERR, "Invalid information buffer size"); + return -EINVAL; + } + /* get number of records in the section */ + nb_rec = ptype->section.size / + sizeof(struct i40e_profile_tlv_section_record); + for (i = j = 0; i < nb_rec; j++) { + tlv = (struct i40e_profile_tlv_section_record *) + &ptype[1 + i]; + memcpy(&pinfo[j], tlv->data, + sizeof(struct rte_pmd_i40e_ptype_info)); + i += tlv->len; + } + return I40E_SUCCESS; + } + + PMD_DRV_LOG(ERR, "Info type %u is invalid.", type); + return -EINVAL; +} + +int +rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + enum i40e_status_code status = I40E_SUCCESS; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + status = i40e_aq_get_ddp_list(hw, (void *)buff, + size, 0, NULL); + + return status; +} + +static int check_invalid_pkt_type(uint32_t pkt_type) +{ + uint32_t l2, l3, l4, tnl, il2, il3, il4; + + l2 = pkt_type & RTE_PTYPE_L2_MASK; + l3 = pkt_type & RTE_PTYPE_L3_MASK; + l4 = pkt_type & RTE_PTYPE_L4_MASK; + tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK; + il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK; + il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK; + il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK; + + if (l2 && + l2 != RTE_PTYPE_L2_ETHER && + l2 != RTE_PTYPE_L2_ETHER_TIMESYNC && + l2 != RTE_PTYPE_L2_ETHER_ARP && + l2 != RTE_PTYPE_L2_ETHER_LLDP && + l2 != RTE_PTYPE_L2_ETHER_NSH && + l2 != RTE_PTYPE_L2_ETHER_VLAN && + l2 != RTE_PTYPE_L2_ETHER_QINQ && + l2 != RTE_PTYPE_L2_ETHER_PPPOE) + return -1; + + if (l3 && + l3 != RTE_PTYPE_L3_IPV4 && + l3 != RTE_PTYPE_L3_IPV4_EXT && + l3 != RTE_PTYPE_L3_IPV6 && + l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN && + l3 != RTE_PTYPE_L3_IPV6_EXT && + l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) + return -1; + + if (l4 && + l4 != RTE_PTYPE_L4_TCP && + l4 != RTE_PTYPE_L4_UDP && + l4 != RTE_PTYPE_L4_FRAG && + l4 != RTE_PTYPE_L4_SCTP && + l4 != RTE_PTYPE_L4_ICMP && + l4 != RTE_PTYPE_L4_NONFRAG) + return -1; + + if (tnl && + tnl != RTE_PTYPE_TUNNEL_IP && + tnl != RTE_PTYPE_TUNNEL_GRENAT && + tnl != RTE_PTYPE_TUNNEL_VXLAN && + tnl != RTE_PTYPE_TUNNEL_NVGRE && + tnl != RTE_PTYPE_TUNNEL_GENEVE && + tnl != RTE_PTYPE_TUNNEL_GRENAT && + tnl != RTE_PTYPE_TUNNEL_GTPC && + tnl != RTE_PTYPE_TUNNEL_GTPU && + tnl != RTE_PTYPE_TUNNEL_L2TP && + tnl != RTE_PTYPE_TUNNEL_ESP) + return -1; + + if (il2 && + il2 != RTE_PTYPE_INNER_L2_ETHER && + il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN && + il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ) + return -1; + + if (il3 && + il3 != RTE_PTYPE_INNER_L3_IPV4 && + il3 != RTE_PTYPE_INNER_L3_IPV4_EXT && + il3 != RTE_PTYPE_INNER_L3_IPV6 && + il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN && + il3 != RTE_PTYPE_INNER_L3_IPV6_EXT && + il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN) + return -1; + + if (il4 && + il4 != RTE_PTYPE_INNER_L4_TCP && + il4 != RTE_PTYPE_INNER_L4_UDP && + il4 != RTE_PTYPE_INNER_L4_FRAG && + il4 != RTE_PTYPE_INNER_L4_SCTP && + il4 != RTE_PTYPE_INNER_L4_ICMP && + il4 != RTE_PTYPE_INNER_L4_NONFRAG) + return -1; + + return 0; +} + +static int check_invalid_ptype_mapping( + struct rte_pmd_i40e_ptype_mapping *mapping_table, + uint16_t count) +{ + int i; + + for (i = 0; i < count; i++) { + uint16_t ptype = mapping_table[i].hw_ptype; + uint32_t pkt_type = mapping_table[i].sw_ptype; + + if (ptype >= I40E_MAX_PKT_TYPE) + return -1; + + if (pkt_type == RTE_PTYPE_UNKNOWN) + continue; + + if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK) + continue; + + if (check_invalid_pkt_type(pkt_type)) + return -1; + } + + return 0; +} + +int +rte_pmd_i40e_ptype_mapping_update( + uint16_t port, + struct rte_pmd_i40e_ptype_mapping *mapping_items, + uint16_t count, + uint8_t exclusive) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (count > I40E_MAX_PKT_TYPE) + return -EINVAL; + + if (check_invalid_ptype_mapping(mapping_items, count)) + return -EINVAL; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (exclusive) { + for (i = 0; i < I40E_MAX_PKT_TYPE; i++) + ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN; + } + + for (i = 0; i < count; i++) + ad->ptype_tbl[mapping_items[i].hw_ptype] + = mapping_items[i].sw_ptype; + + return 0; +} + +int rte_pmd_i40e_ptype_mapping_reset(uint16_t port) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + i40e_set_default_ptype_table(dev); + + return 0; +} + +int rte_pmd_i40e_ptype_mapping_get( + uint16_t port, + struct rte_pmd_i40e_ptype_mapping *mapping_items, + uint16_t size, + uint16_t *count, + uint8_t valid_only) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + int n = 0; + uint16_t i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + for (i = 0; i < I40E_MAX_PKT_TYPE; i++) { + if (n >= size) + break; + if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN) + continue; + mapping_items[n].hw_ptype = i; + mapping_items[n].sw_ptype = ad->ptype_tbl[i]; + n++; + } + + *count = n; + return 0; +} + +int rte_pmd_i40e_ptype_mapping_replace(uint16_t port, + uint32_t target, + uint8_t mask, + uint32_t pkt_type) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + uint16_t i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (!mask && check_invalid_pkt_type(target)) + return -EINVAL; + + if (check_invalid_pkt_type(pkt_type)) + return -EINVAL; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + for (i = 0; i < I40E_MAX_PKT_TYPE; i++) { + if (mask) { + if ((target | ad->ptype_tbl[i]) == target && + (target & ad->ptype_tbl[i])) + ad->ptype_tbl[i] = pkt_type; + } else { + if (ad->ptype_tbl[i] == target) + ad->ptype_tbl[i] = pkt_type; + } + } + + return 0; +} + +int +rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + struct i40e_mac_filter_info mac_filter; + int ret; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + rte_ether_addr_copy(mac_addr, &mac_filter.mac_addr); + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter."); + return -1; + } + + return 0; +} + +int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev) && + !is_i40evf_supported(dev)) + return -ENOTSUP; + + i40e_set_default_pctype_table(dev); + + return 0; +} + +int rte_pmd_i40e_flow_type_mapping_get( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + uint16_t i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev) && + !is_i40evf_supported(dev)) + return -ENOTSUP; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) { + mapping_items[i].flow_type = i; + mapping_items[i].pctype = ad->pctypes_tbl[i]; + } + + return 0; +} + +int +rte_pmd_i40e_flow_type_mapping_update( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items, + uint16_t count, + uint8_t exclusive) +{ + struct rte_eth_dev *dev; + struct i40e_adapter *ad; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev) && + !is_i40evf_supported(dev)) + return -ENOTSUP; + + if (count > I40E_FLOW_TYPE_MAX) + return -EINVAL; + + for (i = 0; i < count; i++) + if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX || + mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN || + (mapping_items[i].pctype & + (1ULL << I40E_FILTER_PCTYPE_INVALID))) + return -EINVAL; + + ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (exclusive) { + for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_tbl[i] = 0ULL; + ad->flow_types_mask = 0ULL; + } + + for (i = 0; i < count; i++) { + ad->pctypes_tbl[mapping_items[i].flow_type] = + mapping_items[i].pctype; + if (mapping_items[i].pctype) + ad->flow_types_mask |= + (1ULL << mapping_items[i].flow_type); + else + ad->flow_types_mask &= + ~(1ULL << mapping_items[i].flow_type); + } + + for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++) + ad->pctypes_mask |= ad->pctypes_tbl[i]; + + return 0; +} + +int +rte_pmd_i40e_query_vfid_by_mac(uint16_t port, + const struct rte_ether_addr *vf_mac) +{ + struct rte_eth_dev *dev; + struct rte_ether_addr *mac; + struct i40e_pf *pf; + int vf_id; + struct i40e_pf_vf *vf; + uint16_t vf_num; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + vf_num = pf->vf_num; + + for (vf_id = 0; vf_id < vf_num; vf_id++) { + vf = &pf->vfs[vf_id]; + mac = &vf->mac_addr; + + if (rte_is_same_ether_addr(mac, vf_mac)) + return vf_id; + } + + return -EINVAL; +} + +static int +i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint16_t i; + struct i40e_vsi *vsi = pf->main_vsi; + uint16_t queue_offset, bsf, tc_index; + struct i40e_vsi_context ctxt; + struct i40e_aqc_vsi_properties_data *vsi_info; + struct i40e_queue_regions *region_info = + &pf->queue_region; + int32_t ret = -EINVAL; + + if (!region_info->queue_region_number) { + PMD_INIT_LOG(ERR, "there is no that region id been set before"); + return ret; + } + + memset(&ctxt, 0, sizeof(struct i40e_vsi_context)); + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + vsi_info = &ctxt.info; + + memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8); + memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16); + + /* Configure queue region and queue mapping parameters, + * for enabled queue region, allocate queues to this region. + */ + + for (i = 0; i < region_info->queue_region_number; i++) { + tc_index = region_info->region[i].region_id; + bsf = rte_bsf32(region_info->region[i].queue_num); + queue_offset = region_info->region[i].queue_start_index; + vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16( + (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + } + + /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */ + vsi_info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + vsi_info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ", + hw->aq.asq_last_status); + return ret; + } + /* update the local VSI info with updated queue map */ + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + return 0; +} + + +static int +i40e_queue_region_set_region(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *conf_ptr) +{ + uint16_t i; + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + + if (!((rte_is_power_of_2(conf_ptr->queue_num)) && + conf_ptr->queue_num <= 64)) { + PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return ret; + } + + if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if ((conf_ptr->queue_start_index + conf_ptr->queue_num) + > main_vsi->nb_used_qps) { + PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (conf_ptr->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number && + i <= I40E_REGION_MAX_INDEX) { + info->region[i].region_id = conf_ptr->region_id; + info->region[i].queue_num = conf_ptr->queue_num; + info->region[i].queue_start_index = + conf_ptr->queue_start_index; + info->queue_region_number++; + } else { + PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before"); + return ret; + } + + return 0; +} + +static int +i40e_queue_region_set_flowtype(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i, j; + uint16_t region_index, flowtype_index; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) { + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); + return ret; + } + + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + if (rss_region_conf->hw_flowtype == + info->region[i].hw_flowtype[j]) { + PMD_DRV_LOG(ERR, "that hw_flowtype has been set before"); + return 0; + } + } + } + + flowtype_index = info->region[region_index].flowtype_num; + info->region[region_index].hw_flowtype[flowtype_index] = + rss_region_conf->hw_flowtype; + info->region[region_index].flowtype_num++; + + return 0; +} + +static void +i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint8_t hw_flowtype; + uint32_t pfqf_hregion; + uint16_t i, j, index; + struct i40e_queue_regions *info = &pf->queue_region; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + hw_flowtype = info->region[i].hw_flowtype[j]; + index = hw_flowtype >> 3; + pfqf_hregion = + i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index)); + + if ((hw_flowtype & 0x7) == 0) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_0_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT; + } else if ((hw_flowtype & 0x7) == 1) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_1_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT; + } else if ((hw_flowtype & 0x7) == 2) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_2_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT; + } else if ((hw_flowtype & 0x7) == 3) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_3_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT; + } else if ((hw_flowtype & 0x7) == 4) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_4_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT; + } else if ((hw_flowtype & 0x7) == 5) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_5_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT; + } else if ((hw_flowtype & 0x7) == 6) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_6_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT; + } else { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_7_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT; + } + + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index), + pfqf_hregion); + } + } +} + +static int +i40e_queue_region_set_user_priority(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + uint16_t i, j, region_index; + + if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the region_id max index is 7"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + if (info->region[i].user_priority[j] == + rss_region_conf->user_priority) { + PMD_DRV_LOG(ERR, "that user priority has been set before"); + return 0; + } + } + } + + j = info->region[region_index].user_priority_num; + info->region[region_index].user_priority[j] = + rss_region_conf->user_priority; + info->region[region_index].user_priority_num++; + + return 0; +} + +static int +i40e_queue_region_dcb_configure(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + struct i40e_dcbx_config dcb_cfg_local; + struct i40e_dcbx_config *dcb_cfg; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; + int32_t ret = -EINVAL; + uint16_t i, j, prio_index, region_index; + uint8_t tc_map, tc_bw, bw_lf, dcb_flag = 0; + + if (!info->queue_region_number) { + PMD_DRV_LOG(ERR, "No queue region been set before"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) { + if (info->region[i].user_priority_num) { + dcb_flag = 1; + break; + } + } + + if (dcb_flag == 0) + return 0; + + dcb_cfg = &dcb_cfg_local; + memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config)); + + /* assume each tc has the same bw */ + tc_bw = I40E_MAX_PERCENT / info->queue_region_number; + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tcbwtable[i] = tc_bw; + /* to ensure the sum of tcbw is equal to 100 */ + bw_lf = I40E_MAX_PERCENT % info->queue_region_number; + for (i = 0; i < bw_lf; i++) + dcb_cfg->etscfg.tcbwtable[i]++; + + /* assume each tc has the same Transmission Selection Algorithm */ + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + prio_index = info->region[i].user_priority[j]; + region_index = info->region[i].region_id; + dcb_cfg->etscfg.prioritytable[prio_index] = + region_index; + } + } + + /* FW needs one App to configure HW */ + dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM; + dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO; + dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t); + + dcb_cfg->pfc.willing = 0; + dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + dcb_cfg->pfc.pfcenable = tc_map; + + /* Copy the new config to the current config */ + *old_cfg = *dcb_cfg; + old_cfg->etsrec = old_cfg->etscfg; + ret = i40e_set_dcb_config(hw); + + if (ret) { + PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + + return 0; +} + +int +i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_vsi *main_vsi = pf->main_vsi; + + if (on) { + i40e_queue_region_pf_flowtype_conf(hw, pf); + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + return ret; + } + + ret = i40e_queue_region_dcb_configure(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + return ret; + } + + return 0; + } + + if (info->queue_region_number) { + info->queue_region_number = 1; + info->region[0].queue_num = main_vsi->nb_used_qps; + info->region[0].queue_start_index = 0; + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + + i40e_init_queue_region_conf(dev); + } + return 0; +} + +static int +i40e_queue_region_pf_check_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + + if (!hena) + return -ENOTSUP; + + return 0; +} + +static int +i40e_queue_region_get_all_info(struct i40e_pf *pf, + struct i40e_queue_regions *regions_ptr) +{ + struct i40e_queue_regions *info = &pf->queue_region; + + rte_memcpy(regions_ptr, info, + sizeof(struct i40e_queue_regions)); + + return 0; +} + +int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, + enum rte_pmd_i40e_queue_region_op op_type, void *arg) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (!(!i40e_queue_region_pf_check_rss(pf))) + return -ENOTSUP; + + /* This queue region feature only support pf by now. It should + * be called after dev_start, and will be clear after dev_stop. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON" + * is just an enable function which server for other configuration, + * it is for all configuration about queue region from up layer, + * at first will only keep in DPDK softwarestored in driver, + * only after "FLUSH_ON", it commit all configuration to HW. + * Because PMD had to set hardware configuration at a time, so + * it will record all up layer command at first. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is + * just clean all configuration about queue region just now, + * and restore all to DPDK i40e driver default + * config when start up. + */ + + switch (op_type) { + case RTE_PMD_I40E_RSS_QUEUE_REGION_SET: + ret = i40e_queue_region_set_region(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET: + ret = i40e_queue_region_set_flowtype(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET: + ret = i40e_queue_region_set_user_priority(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET: + ret = i40e_queue_region_get_all_info(pf, + (struct i40e_queue_regions *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "op type (%d) not supported", + op_type); + ret = -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + + return ret; +} + +int rte_pmd_i40e_flow_add_del_packet_template( + uint16_t port, + const struct rte_pmd_i40e_pkt_template_conf *conf, + uint8_t add) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port]; + struct i40e_fdir_filter_conf filter_conf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + memset(&filter_conf, 0, sizeof(filter_conf)); + filter_conf.soft_id = conf->soft_id; + filter_conf.input.flow.raw_flow.pctype = conf->input.pctype; + filter_conf.input.flow.raw_flow.packet = conf->input.packet; + filter_conf.input.flow.raw_flow.length = conf->input.length; + filter_conf.input.flow_ext.pkt_template = true; + + filter_conf.action.rx_queue = conf->action.rx_queue; + filter_conf.action.behavior = + (enum i40e_fdir_behavior)conf->action.behavior; + filter_conf.action.report_status = + (enum i40e_fdir_status)conf->action.report_status; + filter_conf.action.flex_off = conf->action.flex_off; + + return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add); +} + +int +rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + memset(inset, 0, sizeof(struct rte_pmd_i40e_inset)); + + switch (inset_type) { + case INSET_HASH: + /* Get input set */ + inset_reg = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + /* Get field mask */ + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype)); + break; + case INSET_FDIR: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype)); + break; + case INSET_FDIR_FLX: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1)); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + inset->inset = inset_reg; + + for (i = 0; i < 2; i++) { + inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F); + inset->mask[i].mask = mask_reg[i] & 0xFFFF; + } + + return 0; +} + +int +rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + struct i40e_pf *pf; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Input set configuration is not supported."); + return -ENOTSUP; + } + + inset_reg = inset->inset; + for (i = 0; i < 2; i++) + mask_reg[i] = (inset->mask[i].field_idx << 16) | + inset->mask[i].mask; + + switch (inset_type) { + case INSET_HASH: + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR: + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR_FLX: + i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i), + mask_reg[i]); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + return 0; +} + +int +rte_pmd_i40e_set_switch_dev(uint16_t port_id, struct rte_eth_dev *switch_dev) +{ + struct rte_eth_dev *i40e_dev; + struct i40e_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + i40e_dev = &rte_eth_devices[port_id]; + if (!is_i40e_supported(i40e_dev)) + return -ENOTSUP; + + hw = I40E_DEV_PRIVATE_TO_HW(i40e_dev->data->dev_private); + if (!hw) + return -1; + + hw->switch_dev = switch_dev; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h new file mode 100644 index 000000000..915cdf076 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e.h @@ -0,0 +1,1082 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _PMD_I40E_H_ +#define _PMD_I40E_H_ + +/** + * @file rte_pmd_i40e.h + * + * i40e PMD specific functions. + * + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + */ + +#include + +/** + * Response sent back to i40e driver from user app after callback + */ +enum rte_pmd_i40e_mb_event_rsp { + RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */ + RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */ + RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */ + RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */ +}; + +/** + * Data sent to the user application when the callback is executed. + */ +struct rte_pmd_i40e_mb_event_param { + uint16_t vfid; /**< Virtual Function number */ + uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */ + uint16_t retval; /**< return value */ + void *msg; /**< pointer to message */ + uint16_t msglen; /**< length of the message */ +}; + +/** + * Option of package processing. + */ +enum rte_pmd_i40e_package_op { + RTE_PMD_I40E_PKG_OP_UNDEFINED = 0, + RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */ + RTE_PMD_I40E_PKG_OP_WR_DEL, /**< load package and delete from info list */ + RTE_PMD_I40E_PKG_OP_WR_ONLY, /**< load package without modifying info list */ + RTE_PMD_I40E_PKG_OP_MAX = 32 +}; + +/** + * Types of package information. + */ +enum rte_pmd_i40e_package_info { + RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0, + RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER, + RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE, + RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES, + RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024, + RTE_PMD_I40E_PKG_INFO_HEADER, + RTE_PMD_I40E_PKG_INFO_DEVID_NUM, + RTE_PMD_I40E_PKG_INFO_DEVID_LIST, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST, + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST, + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST, + RTE_PMD_I40E_PKG_INFO_MAX = (int)0xFFFFFFFF +}; + +/** + * Option types of queue region. + */ +enum rte_pmd_i40e_queue_region_op { + RTE_PMD_I40E_RSS_QUEUE_REGION_UNDEFINED, + /** add queue region set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_SET, + /** add PF region pctype set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET, + /** add queue region user priority set */ + RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET, + /** + * ALL configuration about queue region from up layer + * at first will only keep in DPDK software stored in driver, + * only after " FLUSH_ON ", it commit all configuration to HW. + * Because PMD had to set hardware configuration at a time, so + * it will record all up layer command at first. + */ + RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON, + /** + * "FLUSH_OFF " is just clean all configuration about queue + * region just now, and restore all to DPDK i40e driver default + * config when start up. + */ + RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF, + RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET, + RTE_PMD_I40E_RSS_QUEUE_REGION_OP_MAX +}; + +#define RTE_PMD_I40E_DDP_NAME_SIZE 32 +#define RTE_PMD_I40E_PCTYPE_MAX 64 +#define RTE_PMD_I40E_REGION_MAX_NUM 8 +#define RTE_PMD_I40E_MAX_USER_PRIORITY 8 + +/** + * Version for dynamic device personalization. + * Version in "major.minor.update.draft" format. + */ +struct rte_pmd_i40e_ddp_version { + uint8_t major; + uint8_t minor; + uint8_t update; + uint8_t draft; +}; + +/** + * Device ID for dynamic device personalization. + */ +struct rte_pmd_i40e_ddp_device_id { + uint32_t vendor_dev_id; + uint32_t sub_vendor_dev_id; +}; + +/** + * Profile information in profile info list. + */ +struct rte_pmd_i40e_profile_info { + uint32_t track_id; + struct rte_pmd_i40e_ddp_version version; + uint8_t owner; + uint8_t reserved[7]; + uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE]; +}; + +#define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF + +/** + * Profile information list returned from HW. + */ +struct rte_pmd_i40e_profile_list { + uint32_t p_count; + struct rte_pmd_i40e_profile_info p_info[1]; +}; + +#define RTE_PMD_I40E_PROTO_NUM 6 +#define RTE_PMD_I40E_PROTO_UNUSED 0xFF + +/** + * Protocols information stored in profile + */ +struct rte_pmd_i40e_proto_info { + uint8_t proto_id; + char name[RTE_PMD_I40E_DDP_NAME_SIZE]; +}; + +/** + * Packet classification/ packet type information stored in profile + */ +struct rte_pmd_i40e_ptype_info { + uint8_t ptype_id; + uint8_t protocols[RTE_PMD_I40E_PROTO_NUM]; +}; + +/** + * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype. + * A ptype with MSB set will be regarded as a user defined ptype. + * Below macro help to create a user defined ptype. + */ +#define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000 + +struct rte_pmd_i40e_ptype_mapping { + uint16_t hw_ptype; /**< hardware defined packet type*/ + uint32_t sw_ptype; /**< software defined packet type */ +}; + +/** + * Queue region related information. + */ +struct rte_pmd_i40e_queue_region_conf { + /** the region id for this configuration */ + uint8_t region_id; + /** the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype; + /** the start queue index for this region */ + uint8_t queue_start_index; + /** the total queue number of this queue region */ + uint8_t queue_num; + /** the packet's user priority for this region */ + uint8_t user_priority; +}; + +/* queue region info */ +struct rte_pmd_i40e_queue_region_info { + /** the region id for this configuration */ + uint8_t region_id; + /** the start queue index for this region */ + uint8_t queue_start_index; + /** the total queue number of this queue region */ + uint8_t queue_num; + /** the total number of user priority for this region */ + uint8_t user_priority_num; + /** the packet's user priority for this region */ + uint8_t user_priority[RTE_PMD_I40E_MAX_USER_PRIORITY]; + /** the total number of flowtype for this region */ + uint8_t flowtype_num; + /** + * the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype[RTE_PMD_I40E_PCTYPE_MAX]; +}; + +struct rte_pmd_i40e_queue_regions { + /** the total number of queue region for this port */ + uint16_t queue_region_number; + struct rte_pmd_i40e_queue_region_info + region[RTE_PMD_I40E_REGION_MAX_NUM]; +}; + +/** + * Behavior will be taken if raw packet template is matched. + */ +enum rte_pmd_i40e_pkt_template_behavior { + RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT, + RTE_PMD_I40E_PKT_TEMPLATE_REJECT, + RTE_PMD_I40E_PKT_TEMPLATE_PASSTHRU, +}; + +/** + * Flow director report status + * It defines what will be reported if raw packet template is matched. + */ +enum rte_pmd_i40e_pkt_template_status { + /** report nothing */ + RTE_PMD_I40E_PKT_TEMPLATE_NO_REPORT_STATUS, + /** only report FD ID */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID, + /** report FD ID and 4 flex bytes */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4, + /** report 8 flex bytes */ + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, +}; + +/** + * A structure used to define an action when raw packet template is matched. + */ +struct rte_pmd_i40e_pkt_template_action { + /** queue assigned to if raw packet template match */ + uint16_t rx_queue; + /** behavior will be taken */ + enum rte_pmd_i40e_pkt_template_behavior behavior; + /** status report option */ + enum rte_pmd_i40e_pkt_template_status report_status; + /** + * If report_status is RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4 or + * RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, flex_off specifies + * where the reported flex bytes start from in flexible payload. + */ + uint8_t flex_off; +}; + +/** + * A structure used to define the input for raw packet template. + */ +struct rte_pmd_i40e_pkt_template_input { + /** the pctype used for raw packet template */ + uint16_t pctype; + /** the buffer containing raw packet template */ + void *packet; + /** the length of buffer with raw packet template */ + uint32_t length; +}; + +/** + * A structure used to define the configuration parameters + * for raw packet template. + */ +struct rte_pmd_i40e_pkt_template_conf { + /** the input for raw packet template. */ + struct rte_pmd_i40e_pkt_template_input input; + /** the action to be taken when raw packet template is matched */ + struct rte_pmd_i40e_pkt_template_action action; + /** ID, an unique software index for the raw packet template filter */ + uint32_t soft_id; +}; + +enum rte_pmd_i40e_inset_type { + INSET_NONE = 0, + INSET_HASH, + INSET_FDIR, + INSET_FDIR_FLX, +}; + +struct rte_pmd_i40e_inset_mask { + uint8_t field_idx; + uint16_t mask; +}; + +struct rte_pmd_i40e_inset { + uint64_t inset; + struct rte_pmd_i40e_inset_mask mask[2]; +}; + +/** + * Add or remove raw packet template filter to Flow Director. + * + * @param port + * The port identifier of the Ethernet device. + * @param conf + * Specifies configuration parameters of raw packet template filter. + * @param add + * Specifies an action to be taken - add or remove raw packet template filter. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *conf* invalid. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_flow_add_del_packet_template( + uint16_t port, + const struct rte_pmd_i40e_pkt_template_conf *conf, + uint8_t add); + +/** + * Notify VF when PF link status changes. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* invalid. + */ +int rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf); + +/** + * Enable/Disable VF MAC anti spoofing. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to set MAC anti spoofing. + * @param on + * 1 - Enable VFs MAC anti spoofing. + * 0 - Disable VFs MAC anti spoofing. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, + uint16_t vf_id, + uint8_t on); + +/** + * Enable/Disable VF VLAN anti spoofing. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to set VLAN anti spoofing. + * @param on + * 1 - Enable VFs VLAN anti spoofing. + * 0 - Disable VFs VLAN anti spoofing. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, + uint16_t vf_id, + uint8_t on); + +/** + * Enable/Disable TX loopback on all the PF and VFs. + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable TX loopback. + * 0 - Disable TX loopback. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_tx_loopback(uint16_t port, + uint8_t on); + +/** + * Enable/Disable VF unicast promiscuous mode. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to set. + * @param on + * 1 - Enable. + * 0 - Disable. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, + uint16_t vf_id, + uint8_t on); + +/** + * Enable/Disable VF multicast promiscuous mode. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to set. + * @param on + * 1 - Enable. + * 0 - Disable. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, + uint16_t vf_id, + uint8_t on); + +/** + * Set the VF MAC address. + * + * PF should set MAC address before VF initialized, if PF sets the MAC + * address after VF initialized, new MAC address won't be effective until + * VF reinitialize. + * + * This will remove all existing MAC filters. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr); + +/** + * Remove the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr); + +/** + * Enable/Disable vf vlan strip for all queues in a pool + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param on + * 1 - Enable VF's vlan strip on RX queues. + * 0 - Disable VF's vlan strip on RX queues. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Enable/Disable vf vlan insert + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param vlan_id + * 0 - Disable VF's vlan insert. + * n - Enable; n is inserted as the vlan id. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id, + uint16_t vlan_id); + +/** + * Enable/Disable vf broadcast mode + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param on + * 0 - Disable broadcast. + * 1 - Enable broadcast. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, + uint8_t on); + +/** + * Enable/Disable vf vlan tag + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param on + * 0 - Disable VF's vlan tag. + * n - Enable VF's vlan tag. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on); + +/** + * Enable/Disable VF VLAN filter + * + * @param port + * The port identifier of the Ethernet device. + * @param vlan_id + * ID specifying VLAN + * @param vf_mask + * Mask to filter VF's + * @param on + * 0 - Disable VF's VLAN filter. + * 1 - Enable VF's VLAN filter. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id, + uint64_t vf_mask, uint8_t on); + +/** + * Get VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @param stats + * A pointer to a structure of type *rte_eth_stats* to be filled with + * the values of device counters for the following set of statistics: + * - *ipackets* with the total of successfully received packets. + * - *opackets* with the total of successfully transmitted packets. + * - *ibytes* with the total of successfully received bytes. + * - *obytes* with the total of successfully transmitted bytes. + * - *ierrors* with the total of erroneous received packets. + * - *oerrors* with the total of failed transmitted packets. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ + +int rte_pmd_i40e_get_vf_stats(uint16_t port, + uint16_t vf_id, + struct rte_eth_stats *stats); + +/** + * Clear VF's statistics + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF on which to get. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_reset_vf_stats(uint16_t port, + uint16_t vf_id); + +/** + * Set VF's max bandwidth. + * + * Per VF bandwidth limitation and per TC bandwidth limitation cannot + * be enabled in parallel. If per TC bandwidth is enabled, this function + * will disable it. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param bw + * Bandwidth for this VF. + * The value should be an absolute bandwidth in Mbps. + * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets. + * Not count the bytes added by physical layer. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_set_vf_max_bw(uint16_t port, + uint16_t vf_id, + uint32_t bw); + +/** + * Set all the TCs' bandwidth weight on a specific VF. + * + * The bw_weight means the percentage occupied by the TC. + * It can be taken as the relative min bandwidth setting. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param tc_num + * Number of TCs. + * @param bw_weight + * An array of relative bandwidth weight for all the TCs. + * The summary of the bw_weight should be 100. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, + uint16_t vf_id, + uint8_t tc_num, + uint8_t *bw_weight); + +/** + * Set a specific TC's max bandwidth on a specific VF. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * ID specifying VF. + * @param tc_no + * Number specifying TC. + * @param bw + * Max bandwidth for this TC. + * The value should be an absolute bandwidth in Mbps. + * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets. + * Not count the bytes added by physical layer. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, + uint16_t vf_id, + uint8_t tc_no, + uint32_t bw); + +/** + * Set some TCs to strict priority mode on a physical port. + * + * @param port + * The port identifier of the Ethernet device. + * @param tc_map + * A bit map for the TCs. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map); + +/** + * Load/Unload a ddp package + * + * @param port + * The port identifier of the Ethernet device. + * @param buff + * buffer of package. + * @param size + * size of buffer. + * @param op + * Operation of package processing + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-EEXIST) if profile exists. + * - (-EACCES) if profile does not exist. + * - (-ENOTSUP) if operation not supported. + */ +int rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, + uint32_t size, + enum rte_pmd_i40e_package_op op); + +/** + * rte_pmd_i40e_get_ddp_info - Get profile's info + * @param pkg + * buffer of package. + * @param pkg_size + * package buffer size + * @param info + * buffer for response + * @param size + * response buffer size + * @param type + * type of information requested + * @return + * - (0) if successful. + * - (-ENOTSUP) if information type not supported by the profile. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size, + uint8_t *info, uint32_t size, + enum rte_pmd_i40e_package_info type); + +/** + * rte_pmd_i40e_get_ddp_list - Get loaded profile list + * @param port + * port id + * @param buff + * buffer for response + * @param size + * buffer size + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size); + +/** + * Update hardware defined ptype to software defined packet type + * mapping table. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the mapping items array. + * @param count + * number of mapping items. + * @param exclusive + * the flag indicate different ptype mapping update method. + * -(0) only overwrite referred PTYPE mapping, + * keep other PTYPEs mapping unchanged. + * -(!0) overwrite referred PTYPE mapping, + * set other PTYPEs maps to PTYPE_UNKNOWN. + */ +int rte_pmd_i40e_ptype_mapping_update( + uint16_t port, + struct rte_pmd_i40e_ptype_mapping *mapping_items, + uint16_t count, + uint8_t exclusive); + +/** + * Reset hardware defined ptype to software defined ptype + * mapping table to default. + * + * @param port + * pointer to port identifier of the device + */ +int rte_pmd_i40e_ptype_mapping_reset(uint16_t port); + +/** + * Get hardware defined ptype to software defined ptype + * mapping items. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the array to store returned items. + * @param size + * the size of the input array. + * @param count + * the place to store the number of returned items. + * @param valid_only + * -(0) return full mapping table. + * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN. + */ +int rte_pmd_i40e_ptype_mapping_get( + uint16_t port, + struct rte_pmd_i40e_ptype_mapping *mapping_items, + uint16_t size, + uint16_t *count, + uint8_t valid_only); + +/** + * Replace a specific or a group of software defined ptypes + * with a new one + * + * @param port + * pointer to port identifier of the device + * @param target + * the packet type to be replaced + * @param mask + * -(0) target represent a specific software defined ptype. + * -(!0) target is a mask to represent a group of software defined ptypes. + * @param pkt_type + * the new packet type to overwrite + */ +int rte_pmd_i40e_ptype_mapping_replace(uint16_t port, + uint32_t target, + uint8_t mask, + uint32_t pkt_type); + +/** + * Add a VF MAC address. + * + * Add more MAC address for VF. The existing MAC addresses + * are still effective. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr); + +#define RTE_PMD_I40E_PCTYPE_MAX 64 +#define RTE_PMD_I40E_FLOW_TYPE_MAX 64 + +struct rte_pmd_i40e_flow_type_mapping { + uint16_t flow_type; /**< software defined flow type*/ + uint64_t pctype; /**< hardware defined pctype */ +}; + +/** + * Update hardware defined pctype to software defined flow type + * mapping table. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the mapping items array. + * @param count + * number of mapping items. + * @param exclusive + * the flag indicate different pctype mapping update method. + * -(0) only overwrite referred PCTYPE mapping, + * keep other PCTYPEs mapping unchanged. + * -(!0) overwrite referred PCTYPE mapping, + * set other PCTYPEs maps to PCTYPE_INVALID. + */ +int rte_pmd_i40e_flow_type_mapping_update( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items, + uint16_t count, + uint8_t exclusive); + +/** + * Get software defined flow type to hardware defined pctype + * mapping items. + * + * @param port + * pointer to port identifier of the device. + * @param mapping_items + * the base address of the array to store returned items. + * array should be allocated by caller with minimum size of + * RTE_PMD_I40E_FLOW_TYPE_MAX items + */ +int rte_pmd_i40e_flow_type_mapping_get( + uint16_t port, + struct rte_pmd_i40e_flow_type_mapping *mapping_items); + +/** + * Reset hardware defined pctype to software defined flow type + * mapping table to default. + * + * @param port + * pointer to port identifier of the device + */ +int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port); + +/** + * On the PF, find VF index based on VF MAC address + * + * @param port + * pointer to port identifier of the device + * @param vf_mac + * the mac address of the vf to determine index of + * @return + * The index of vfid If successful. + * -EINVAL: vf mac address does not exist for this port + * -ENOTSUP: i40e not supported for this port. + */ +int rte_pmd_i40e_query_vfid_by_mac(uint16_t port, + const struct rte_ether_addr *vf_mac); + +/** + * Do RSS queue region configuration for that port as + * the command option type + * + * @param port_id + * The port identifier of the Ethernet device. + * @param op_type + * Queue region operation type + * @param arg + * Queue region operation type specific data + */ +int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, + enum rte_pmd_i40e_queue_region_op op_type, void *arg); + +int rte_pmd_i40e_cfg_hash_inset(uint16_t port, + uint64_t pctype, uint64_t inset); + +/** + * Get input set + * + * @param port + * The port identifier of the Ethernet device. + * @param pctype + * HW pctype. + * @param inset + * Buffer for input set info. + * @param inset_type + * Type of input set. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if operation not supported. + */ +int rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type); + +/** + * Set input set + * + * @param port + * The port identifier of the Ethernet device. + * @param pctype + * HW pctype. + * @param inset + * Input set info. + * @param inset_type + * Type of input set. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if operation not supported. + */ +int rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type); + +/** + * Get bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (1) if set. + * - (0) if cleared. + */ +static inline int +rte_pmd_i40e_inset_field_get(uint64_t inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return 0; + + bit_idx = 63 - field_idx; + if (inset & (1ULL << bit_idx)) + return 1; + + return 0; +} + +/** + * Set bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (-1) if failed. + * - (0) if success. + */ +static inline int +rte_pmd_i40e_inset_field_set(uint64_t *inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return -1; + + bit_idx = 63 - field_idx; + *inset = *inset | (1ULL << bit_idx); + + return 0; +} + +/** + * Clear bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (-1) if failed. + * - (0) if success. + */ +static inline int +rte_pmd_i40e_inset_field_clear(uint64_t *inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return -1; + + bit_idx = 63 - field_idx; + *inset = *inset & ~(1ULL << bit_idx); + + return 0; +} + +/** + * For ipn3ke, i40e works with FPGA. + * In this situation, i40e get link status from fpga, + * fpga works as switch_dev for i40e. + * This function set switch_dev for i40e. + * + * @param port_id + * port_id of i40e device to be set switch device. + * @param switch_dev + * target switch device from which i40e device to get link status from. + * @return + * - (less than 0) if failed. + * - (0) if success. + */ +__rte_experimental +int +rte_pmd_i40e_set_switch_dev(uint16_t port_id, struct rte_eth_dev *switch_dev); + +#endif /* _PMD_I40E_H_ */ diff --git a/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map new file mode 100644 index 000000000..c92c0cf46 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/i40e/rte_pmd_i40e_version.map @@ -0,0 +1,46 @@ +DPDK_20.0 { + global: + + rte_pmd_i40e_add_vf_mac_addr; + rte_pmd_i40e_flow_add_del_packet_template; + rte_pmd_i40e_flow_type_mapping_get; + rte_pmd_i40e_flow_type_mapping_reset; + rte_pmd_i40e_flow_type_mapping_update; + rte_pmd_i40e_get_ddp_info; + rte_pmd_i40e_get_ddp_list; + rte_pmd_i40e_get_vf_stats; + rte_pmd_i40e_inset_get; + rte_pmd_i40e_inset_set; + rte_pmd_i40e_ping_vfs; + rte_pmd_i40e_process_ddp_package; + rte_pmd_i40e_ptype_mapping_get; + rte_pmd_i40e_ptype_mapping_replace; + rte_pmd_i40e_ptype_mapping_reset; + rte_pmd_i40e_ptype_mapping_update; + rte_pmd_i40e_query_vfid_by_mac; + rte_pmd_i40e_reset_vf_stats; + rte_pmd_i40e_rss_queue_region_conf; + rte_pmd_i40e_set_tc_strict_prio; + rte_pmd_i40e_set_tx_loopback; + rte_pmd_i40e_set_vf_broadcast; + rte_pmd_i40e_set_vf_mac_addr; + rte_pmd_i40e_set_vf_mac_anti_spoof; + rte_pmd_i40e_set_vf_max_bw; + rte_pmd_i40e_set_vf_multicast_promisc; + rte_pmd_i40e_set_vf_tc_bw_alloc; + rte_pmd_i40e_set_vf_tc_max_bw; + rte_pmd_i40e_set_vf_unicast_promisc; + rte_pmd_i40e_set_vf_vlan_anti_spoof; + rte_pmd_i40e_set_vf_vlan_filter; + rte_pmd_i40e_set_vf_vlan_insert; + rte_pmd_i40e_set_vf_vlan_stripq; + rte_pmd_i40e_set_vf_vlan_tag; + + local: *; +}; + +EXPERIMENTAL { + global: + + rte_pmd_i40e_set_switch_dev; +}; diff --git a/src/spdk/dpdk/drivers/net/iavf/Makefile b/src/spdk/dpdk/drivers/net/iavf/Makefile new file mode 100644 index 000000000..792cbb7f7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/Makefile @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_iavf.a + +CFLAGS += -I$(RTE_SDK)/drivers/common/iavf +CFLAGS += -O3 $(WERROR_FLAGS) -Wno-strict-aliasing +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_common_iavf + +EXPORT_MAP := rte_pmd_iavf_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_hash.c +ifeq ($(CONFIG_RTE_ARCH_X86), y) +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c +endif + +ifeq ($(CONFIG_RTE_LIBRTE_IAVF_PMD), y) + ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) + CC_AVX2_SUPPORT=1 + else + CC_AVX2_SUPPORT=\ + $(shell $(CC) -march=core-avx2 -dM -E - &1 | \ + grep -q AVX2 && echo 1) + ifeq ($(CC_AVX2_SUPPORT), 1) + ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) + CFLAGS_iavf_rxtx_vec_avx2.o += -march=core-avx2 + else + CFLAGS_iavf_rxtx_vec_avx2.o += -mavx2 + endif + endif + endif +endif + +ifeq ($(CC_AVX2_SUPPORT), 1) + SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_avx2.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf.h b/src/spdk/dpdk/drivers/net/iavf/iavf.h new file mode 100644 index 000000000..9be8a2381 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _IAVF_ETHDEV_H_ +#define _IAVF_ETHDEV_H_ + +#include +#include +#include +#include + +#include "iavf_log.h" + +#define IAVF_AQ_LEN 32 +#define IAVF_AQ_BUF_SZ 4096 +#define IAVF_RESET_WAIT_CNT 50 +#define IAVF_BUF_SIZE_MIN 1024 +#define IAVF_FRAME_SIZE_MAX 9728 +#define IAVF_QUEUE_BASE_ADDR_UNIT 128 + +#define IAVF_MAX_NUM_QUEUES 16 + +#define IAVF_NUM_MACADDR_MAX 64 + +#define IAVF_DEFAULT_RX_PTHRESH 8 +#define IAVF_DEFAULT_RX_HTHRESH 8 +#define IAVF_DEFAULT_RX_WTHRESH 0 + +#define IAVF_DEFAULT_RX_FREE_THRESH 32 + +#define IAVF_DEFAULT_TX_PTHRESH 32 +#define IAVF_DEFAULT_TX_HTHRESH 0 +#define IAVF_DEFAULT_TX_WTHRESH 0 + +#define IAVF_DEFAULT_TX_FREE_THRESH 32 +#define IAVF_DEFAULT_TX_RS_THRESH 32 + +#define IAVF_BASIC_OFFLOAD_CAPS ( \ + VF_BASE_MODE_OFFLOADS | \ + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \ + VIRTCHNL_VF_OFFLOAD_RX_POLLING) + +#define IAVF_RSS_OFFLOAD_ALL ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER) + +#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* Default queue interrupt throttling time in microseconds */ +#define IAVF_ITR_INDEX_DEFAULT 0 +#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + +/* The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define IAVF_VLAN_TAG_SIZE 4 +#define IAVF_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2) + +#define IAVF_32_BIT_WIDTH (CHAR_BIT * 4) +#define IAVF_48_BIT_WIDTH (CHAR_BIT * 6) +#define IAVF_48_BIT_MASK RTE_LEN2MASK(IAVF_48_BIT_WIDTH, uint64_t) + +#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 +#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 + +struct iavf_adapter; +struct iavf_rx_queue; +struct iavf_tx_queue; + +/* Structure that defines a VSI, associated with a adapter. */ +struct iavf_vsi { + struct iavf_adapter *adapter; /* Backreference to associated adapter */ + uint16_t vsi_id; + uint16_t nb_qps; /* Number of queue pairs VSI can occupy */ + uint16_t nb_used_qps; /* Number of queue pairs VSI uses */ + uint16_t max_macaddrs; /* Maximum number of MAC addresses */ + uint16_t base_vector; + uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ + struct virtchnl_eth_stats eth_stats_offset; +}; + +struct rte_flow; +TAILQ_HEAD(iavf_flow_list, rte_flow); + +struct iavf_flow_parser_node; +TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node); + +struct iavf_fdir_conf { + struct virtchnl_fdir_add add_fltr; + struct virtchnl_fdir_del del_fltr; + uint64_t input_set; + uint32_t flow_id; + uint32_t mark_flag; +}; + +struct iavf_fdir_info { + struct iavf_fdir_conf conf; +}; + +/* TODO: is that correct to assume the max number to be 16 ?*/ +#define IAVF_MAX_MSIX_VECTORS 16 + +/* Structure to store private data specific for VF instance. */ +struct iavf_info { + uint16_t num_queue_pairs; + uint16_t max_pkt_len; /* Maximum packet length */ + uint16_t mac_num; /* Number of MAC addresses */ + bool promisc_unicast_enabled; + bool promisc_multicast_enabled; + + struct virtchnl_version_info virtchnl_version; + struct virtchnl_vf_resource *vf_res; /* VF resource */ + struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ + uint64_t supported_rxdid; + + volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ + uint32_t cmd_retval; /* return value of the cmd response from PF */ + uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + + /* Event from pf */ + bool dev_closed; + bool link_up; + uint32_t link_speed; + + struct iavf_vsi vsi; + bool vf_reset; + uint64_t flags; + + uint8_t *rss_lut; + uint8_t *rss_key; + uint16_t nb_msix; /* number of MSI-X interrupts on Rx */ + uint16_t msix_base; /* msix vector base from */ + /* queue bitmask for each vector */ + uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS]; + struct iavf_flow_list flow_list; + rte_spinlock_t flow_ops_lock; + struct iavf_parser_list rss_parser_list; + struct iavf_parser_list dist_parser_list; + + struct iavf_fdir_info fdir; /* flow director info */ +}; + +#define IAVF_MAX_PKT_TYPE 1024 + +/* Structure to store private data for each VF instance. */ +struct iavf_adapter { + struct iavf_hw hw; + struct rte_eth_dev *eth_dev; + struct iavf_info vf; + + bool rx_bulk_alloc_allowed; + /* For vector PMD */ + bool rx_vec_allowed; + bool tx_vec_allowed; + const uint32_t *ptype_tbl; + bool stopped; + uint16_t fdir_ref_cnt; +}; + +/* IAVF_DEV_PRIVATE_TO */ +#define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct iavf_adapter *)adapter) +#define IAVF_DEV_PRIVATE_TO_VF(adapter) \ + (&((struct iavf_adapter *)adapter)->vf) +#define IAVF_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct iavf_adapter *)adapter)->hw) + +/* IAVF_VSI_TO */ +#define IAVF_VSI_TO_HW(vsi) \ + (&(((struct iavf_vsi *)vsi)->adapter->hw)) +#define IAVF_VSI_TO_VF(vsi) \ + (&(((struct iavf_vsi *)vsi)->adapter->vf)) +#define IAVF_VSI_TO_ETH_DEV(vsi) \ + (((struct iavf_vsi *)vsi)->adapter->eth_dev) + +static inline void +iavf_init_adminq_parameter(struct iavf_hw *hw) +{ + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_AQ_BUF_SZ; + hw->aq.asq_buf_size = IAVF_AQ_BUF_SZ; +} + +static inline uint16_t +iavf_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX) + interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +/* structure used for sending and checking response of virtchnl ops */ +struct iavf_cmd_info { + enum virtchnl_ops ops; + uint8_t *in_args; /* buffer for sending */ + uint32_t in_args_size; /* buffer size for sending */ + uint8_t *out_buffer; /* buffer for response */ + uint32_t out_size; /* buffer size for response */ +}; + +/* notify current command done. Only call in case execute + * _atomic_set_cmd successfully. + */ +static inline void +_notify_cmd(struct iavf_info *vf, uint32_t msg_ret) +{ + vf->cmd_retval = msg_ret; + rte_wmb(); + vf->pend_cmd = VIRTCHNL_OP_UNKNOWN; +} + +/* clear current command. Only call in case execute + * _atomic_set_cmd successfully. + */ +static inline void +_clear_cmd(struct iavf_info *vf) +{ + rte_wmb(); + vf->pend_cmd = VIRTCHNL_OP_UNKNOWN; + vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS; +} + +/* Check there is pending cmd in execution. If none, set new command. */ +static inline int +_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops) +{ + int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops); + + if (!ret) + PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); + + return !ret; +} + +int iavf_check_api_version(struct iavf_adapter *adapter); +int iavf_get_vf_resource(struct iavf_adapter *adapter); +void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); +int iavf_enable_vlan_strip(struct iavf_adapter *adapter); +int iavf_disable_vlan_strip(struct iavf_adapter *adapter); +int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on); +int iavf_enable_queues(struct iavf_adapter *adapter); +int iavf_disable_queues(struct iavf_adapter *adapter); +int iavf_configure_rss_lut(struct iavf_adapter *adapter); +int iavf_configure_rss_key(struct iavf_adapter *adapter); +int iavf_configure_queues(struct iavf_adapter *adapter); +int iavf_get_supported_rxdid(struct iavf_adapter *adapter); +int iavf_config_irq_map(struct iavf_adapter *adapter); +void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); +int iavf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); +int iavf_query_stats(struct iavf_adapter *adapter, + struct virtchnl_eth_stats **pstats); +int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast, + bool enable_multicast); +int iavf_add_del_eth_addr(struct iavf_adapter *adapter, + struct rte_ether_addr *addr, bool add); +int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add); +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter); +int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter); +int iavf_fdir_check(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter); +int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, + struct virtchnl_rss_cfg *rss_cfg, bool add); +#endif /* _IAVF_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c b/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c new file mode 100644 index 000000000..e09efffd1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_ethdev.c @@ -0,0 +1,1586 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" +#include "iavf_generic_flow.h" + +static int iavf_dev_configure(struct rte_eth_dev *dev); +static int iavf_dev_start(struct rte_eth_dev *dev); +static void iavf_dev_stop(struct rte_eth_dev *dev); +static void iavf_dev_close(struct rte_eth_dev *dev); +static int iavf_dev_reset(struct rte_eth_dev *dev); +static int iavf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev); +static int iavf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int iavf_dev_stats_reset(struct rte_eth_dev *dev); +static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr, + uint32_t index, + uint32_t pool); +static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); + + +int iavf_logtype_init; +int iavf_logtype_driver; + +#ifdef RTE_LIBRTE_IAVF_DEBUG_RX +int iavf_logtype_rx; +#endif +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX +int iavf_logtype_tx; +#endif +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE +int iavf_logtype_tx_free; +#endif + +static const struct rte_pci_id pci_id_iavf_map[] = { + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops iavf_eth_dev_ops = { + .dev_configure = iavf_dev_configure, + .dev_start = iavf_dev_start, + .dev_stop = iavf_dev_stop, + .dev_close = iavf_dev_close, + .dev_reset = iavf_dev_reset, + .dev_infos_get = iavf_dev_info_get, + .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get, + .link_update = iavf_dev_link_update, + .stats_get = iavf_dev_stats_get, + .stats_reset = iavf_dev_stats_reset, + .promiscuous_enable = iavf_dev_promiscuous_enable, + .promiscuous_disable = iavf_dev_promiscuous_disable, + .allmulticast_enable = iavf_dev_allmulticast_enable, + .allmulticast_disable = iavf_dev_allmulticast_disable, + .mac_addr_add = iavf_dev_add_mac_addr, + .mac_addr_remove = iavf_dev_del_mac_addr, + .vlan_filter_set = iavf_dev_vlan_filter_set, + .vlan_offload_set = iavf_dev_vlan_offload_set, + .rx_queue_start = iavf_dev_rx_queue_start, + .rx_queue_stop = iavf_dev_rx_queue_stop, + .tx_queue_start = iavf_dev_tx_queue_start, + .tx_queue_stop = iavf_dev_tx_queue_stop, + .rx_queue_setup = iavf_dev_rx_queue_setup, + .rx_queue_release = iavf_dev_rx_queue_release, + .tx_queue_setup = iavf_dev_tx_queue_setup, + .tx_queue_release = iavf_dev_tx_queue_release, + .mac_addr_set = iavf_dev_set_default_mac_addr, + .reta_update = iavf_dev_rss_reta_update, + .reta_query = iavf_dev_rss_reta_query, + .rss_hash_update = iavf_dev_rss_hash_update, + .rss_hash_conf_get = iavf_dev_rss_hash_conf_get, + .rxq_info_get = iavf_dev_rxq_info_get, + .txq_info_get = iavf_dev_txq_info_get, + .rx_queue_count = iavf_dev_rxq_count, + .rx_descriptor_status = iavf_dev_rx_desc_status, + .tx_descriptor_status = iavf_dev_tx_desc_status, + .mtu_set = iavf_dev_mtu_set, + .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable, + .filter_ctrl = iavf_dev_filter_ctrl, +}; + +static int +iavf_dev_configure(struct rte_eth_dev *dev) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + + ad->rx_bulk_alloc_allowed = true; + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * vector Rx/Tx preconditions, it will be reset. + */ + ad->rx_vec_allowed = true; + ad->tx_vec_allowed = true; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* Vlan stripping setting */ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + iavf_enable_vlan_strip(ad); + else + iavf_disable_vlan_strip(ad); + } + return 0; +} + +static int +iavf_init_rss(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_eth_rss_conf *rss_conf; + uint8_t i, j, nb_q; + int ret; + + rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, + IAVF_MAX_NUM_QUEUES); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); + /* set all lut items to default queue */ + for (i = 0; i < vf->vf_res->rss_lut_size; i++) + vf->rss_lut[i] = 0; + ret = iavf_configure_rss_lut(adapter); + return ret; + } + + /* In IAVF, RSS enablement is set by PF driver. It is not supported + * to set based on rss_conf->rss_hf. + */ + + /* configure RSS key */ + if (!rss_conf->rss_key) { + /* Calculate the default hash key */ + for (i = 0; i <= vf->vf_res->rss_key_size; i++) + vf->rss_key[i] = (uint8_t)rte_rand(); + } else + rte_memcpy(vf->rss_key, rss_conf->rss_key, + RTE_MIN(rss_conf->rss_key_len, + vf->vf_res->rss_key_size)); + + /* init RSS LUT table */ + for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) { + if (j >= nb_q) + j = 0; + vf->rss_lut[i] = j; + } + /* send virtchnnl ops to configure rss*/ + ret = iavf_configure_rss_lut(adapter); + if (ret) + return ret; + ret = iavf_configure_rss_key(adapter); + if (ret) + return ret; + + return 0; +} + +static int +iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = dev->data; + uint16_t buf_size, max_pkt_len, len; + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + + /* Calculate the maximum packet length allowed */ + len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS; + max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* Check if the jumbo frame and maximum packet length are set + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (max_pkt_len <= RTE_ETHER_MAX_LEN || + max_pkt_len > IAVF_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", + (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)IAVF_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < RTE_ETHER_MIN_LEN || + max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return -EINVAL; + } + } + + rxq->max_pkt_len = max_pkt_len; + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + (rxq->max_pkt_len + 2 * IAVF_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + return 0; +} + +static int +iavf_init_queues(struct rte_eth_dev *dev) +{ + struct iavf_rx_queue **rxq = + (struct iavf_rx_queue **)dev->data->rx_queues; + int i, ret = IAVF_SUCCESS; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = iavf_init_rxq(dev, rxq[i]); + if (ret != IAVF_SUCCESS) + break; + } + /* set rx/tx function to vector/scatter/single-segment + * according to parameters + */ + iavf_set_rx_function(dev); + iavf_set_tx_function(dev); + + return ret; +} + +static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + uint16_t interval, i; + int vec; + + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", + dev->data->nb_rx_queues); + return -1; + } + } + + if (!dev->data->dev_conf.intr_conf.rxq || + !rte_intr_dp_is_en(intr_handle)) { + /* Rx interrupt disabled, Map interrupt only for writeback */ + vf->nb_msix = 1; + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* If WB_ON_ITR supports, enable it */ + vf->msix_base = IAVF_RX_VEC_START; + IAVF_WRITE_REG(hw, + IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1), + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); + } else { + /* If no WB_ON_ITR offload flags, need to set + * interrupt for descriptor write back. + */ + vf->msix_base = IAVF_MISC_VEC_ID; + + /* set ITR to max */ + interval = iavf_calc_itr_interval( + IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + (IAVF_ITR_INDEX_DEFAULT << + IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + } + IAVF_WRITE_FLUSH(hw); + /* map all queues to the same interrupt */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + vf->rxq_map[vf->msix_base] |= 1 << i; + } else { + if (!rte_intr_allow_others(intr_handle)) { + vf->nb_msix = 1; + vf->msix_base = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vf->rxq_map[vf->msix_base] |= 1 << i; + intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; + } + PMD_DRV_LOG(DEBUG, + "vector %u are mapping to all Rx queues", + vf->msix_base); + } else { + /* If Rx interrupt is reuquired, and we can use + * multi interrupts, then the vec is from 1 + */ + vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, + intr_handle->nb_efd); + vf->msix_base = IAVF_RX_VEC_START; + vec = IAVF_RX_VEC_START; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vf->rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= vf->nb_msix) + vec = IAVF_RX_VEC_START; + } + PMD_DRV_LOG(DEBUG, + "%u vectors are mapping to %u Rx queues", + vf->nb_msix, dev->data->nb_rx_queues); + } + } + + if (iavf_config_irq_map(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + return 0; +} + +static int +iavf_start_queues(struct rte_eth_dev *dev) +{ + struct iavf_rx_queue *rxq; + struct iavf_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq->tx_deferred_start) + continue; + if (iavf_dev_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq->rx_deferred_start) + continue; + if (iavf_dev_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + return 0; +} + +static int +iavf_dev_start(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct rte_intr_handle *intr_handle = dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + adapter->stopped = 0; + + vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + if (iavf_init_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "failed to do Queue init"); + return -1; + } + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (iavf_init_rss(adapter) != 0) { + PMD_DRV_LOG(ERR, "configure rss failed"); + goto err_rss; + } + } + + if (iavf_configure_queues(adapter) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + + if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) { + PMD_DRV_LOG(ERR, "configure irq failed"); + goto err_queue; + } + /* re-enable intr again, because efd assign may change */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); + } + + /* Set all mac addrs */ + iavf_add_del_all_mac_addr(adapter, true); + + if (iavf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); + goto err_mac; + } + + return 0; + +err_mac: + iavf_add_del_all_mac_addr(adapter, false); +err_queue: +err_rss: + return -1; +} + +static void +iavf_dev_stop(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_intr_handle *intr_handle = dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + if (adapter->stopped == 1) + return; + + iavf_stop_queues(dev); + + /* Disable the interrupt for Rx */ + rte_intr_efd_disable(intr_handle); + /* Rx interrupt vector mapping free */ + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + /* remove all mac addrs */ + iavf_add_del_all_mac_addr(adapter, false); + adapter->stopped = 1; +} + +static int +iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; + dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; + dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX; + dev_info->hash_key_size = vf->vf_res->rss_key_size; + dev_info->reta_size = vf->vf_res->rss_lut_size; + dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_RSS_HASH; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = IAVF_MAX_RING_DESC, + .nb_min = IAVF_MIN_RING_DESC, + .nb_align = IAVF_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = IAVF_MAX_RING_DESC, + .nb_min = IAVF_MIN_RING_DESC, + .nb_align = IAVF_ALIGN_RING_DESC, + }; + + return 0; +} + +static const uint32_t * +iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + return ptypes; +} + +int +iavf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct rte_eth_link new_link; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + /* Only read status info stored in VF, and the info is updated + * when receive LINK_CHANGE evnet from PF by Virtchnnl. + */ + switch (vf->link_speed) { + case 10: + new_link.link_speed = ETH_SPEED_NUM_10M; + break; + case 100: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + case 1000: + new_link.link_speed = ETH_SPEED_NUM_1G; + break; + case 10000: + new_link.link_speed = ETH_SPEED_NUM_10G; + break; + case 20000: + new_link.link_speed = ETH_SPEED_NUM_20G; + break; + case 25000: + new_link.link_speed = ETH_SPEED_NUM_25G; + break; + case 40000: + new_link.link_speed = ETH_SPEED_NUM_40G; + break; + case 50000: + new_link.link_speed = ETH_SPEED_NUM_50G; + break; + case 100000: + new_link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_status = vf->link_up ? ETH_LINK_UP : + ETH_LINK_DOWN; + new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link, + *(uint64_t *)&dev->data->dev_link, + *(uint64_t *)&new_link) == 0) + return -1; + + return 0; +} + +static int +iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret; + + if (vf->promisc_unicast_enabled) + return 0; + + ret = iavf_config_promisc(adapter, true, vf->promisc_multicast_enabled); + if (!ret) + vf->promisc_unicast_enabled = true; + else + ret = -EAGAIN; + + return ret; +} + +static int +iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret; + + if (!vf->promisc_unicast_enabled) + return 0; + + ret = iavf_config_promisc(adapter, false, + vf->promisc_multicast_enabled); + if (!ret) + vf->promisc_unicast_enabled = false; + else + ret = -EAGAIN; + + return ret; +} + +static int +iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret; + + if (vf->promisc_multicast_enabled) + return 0; + + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, true); + if (!ret) + vf->promisc_multicast_enabled = true; + else + ret = -EAGAIN; + + return ret; +} + +static int +iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret; + + if (!vf->promisc_multicast_enabled) + return 0; + + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, false); + if (!ret) + vf->promisc_multicast_enabled = false; + else + ret = -EAGAIN; + + return ret; +} + +static int +iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int err; + + if (rte_is_zero_ether_addr(addr)) { + PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); + return -EINVAL; + } + + err = iavf_add_del_eth_addr(adapter, addr, true); + if (err) { + PMD_DRV_LOG(ERR, "fail to add MAC address"); + return -EIO; + } + + vf->mac_num++; + + return 0; +} + +static void +iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_ether_addr *addr; + int err; + + addr = &dev->data->mac_addrs[index]; + + err = iavf_add_del_eth_addr(adapter, addr, false); + if (err) + PMD_DRV_LOG(ERR, "fail to delete MAC address"); + + vf->mac_num--; +} + +static int +iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int err; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + err = iavf_add_del_vlan(adapter, vlan_id, on); + if (err) + return -EIO; + return 0; +} + +static int +iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + int err; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + /* Vlan stripping setting */ + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + err = iavf_enable_vlan_strip(adapter); + else + err = iavf_disable_vlan_strip(adapter); + + if (err) + return -EIO; + } + return 0; +} + +static int +iavf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint8_t *lut; + uint16_t i, idx, shift; + int ret; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + if (reta_size != vf->vf_res->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, vf->vf_res->rss_lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + /* store the old lut table temporarily */ + rte_memcpy(lut, vf->rss_lut, reta_size); + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + + rte_memcpy(vf->rss_lut, lut, reta_size); + /* send virtchnnl ops to configure rss*/ + ret = iavf_configure_rss_lut(adapter); + if (ret) /* revert back */ + rte_memcpy(vf->rss_lut, lut, reta_size); + rte_free(lut); + + return ret; +} + +static int +iavf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint16_t i, idx, shift; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + if (reta_size != vf->vf_res->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, vf->vf_res->rss_lut_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = vf->rss_lut[i]; + } + + return 0; +} + +static int +iavf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + /* HENA setting, it is enabled by default, no change */ + if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) { + PMD_DRV_LOG(ERR, "The size of hash key configured " + "(%d) doesn't match the size of hardware can " + "support (%d)", rss_conf->rss_key_len, + vf->vf_res->rss_key_size); + return -EINVAL; + } + + rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len); + + return iavf_configure_rss_key(adapter); +} + +static int +iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + /* Just set it to default value now. */ + rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL; + + if (!rss_conf->rss_key) + return 0; + + rss_conf->rss_key_len = vf->vf_res->rss_key_size; + rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len); + + return 0; +} + +static int +iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD; + int ret = 0; + + if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + if (dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port must be stopped before configuration"); + return -EBUSY; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return ret; +} + +static int +iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct rte_ether_addr *perm_addr, *old_addr; + int ret; + + old_addr = (struct rte_ether_addr *)hw->mac.addr; + perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr; + + if (rte_is_same_ether_addr(mac_addr, old_addr)) + return 0; + + /* If the MAC address is configured by host, skip the setting */ + if (rte_is_valid_assigned_ether_addr(perm_addr)) + return -EPERM; + + ret = iavf_add_del_eth_addr(adapter, old_addr, false); + if (ret) + PMD_DRV_LOG(ERR, "Fail to delete old MAC:" + " %02X:%02X:%02X:%02X:%02X:%02X", + old_addr->addr_bytes[0], + old_addr->addr_bytes[1], + old_addr->addr_bytes[2], + old_addr->addr_bytes[3], + old_addr->addr_bytes[4], + old_addr->addr_bytes[5]); + + ret = iavf_add_del_eth_addr(adapter, mac_addr, true); + if (ret) + PMD_DRV_LOG(ERR, "Fail to add new MAC:" + " %02X:%02X:%02X:%02X:%02X:%02X", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5]); + + if (ret) + return -EIO; + + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr); + return 0; +} + +static void +iavf_stat_update_48(uint64_t *offset, uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset); + + *stat &= IAVF_48_BIT_MASK; +} + +static void +iavf_stat_update_32(uint64_t *offset, uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset); +} + +static void +iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes) +{ + struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset; + + iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes); + iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast); + iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast); + iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast); + iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards); + iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes); + iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast); + iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast); + iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast); + iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors); + iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); +} + +static int +iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_vsi *vsi = &vf->vsi; + struct virtchnl_eth_stats *pstats = NULL; + int ret; + + ret = iavf_query_stats(adapter, &pstats); + if (ret == 0) { + iavf_update_stats(vsi, pstats); + stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + + pstats->rx_broadcast - pstats->rx_discards; + stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + + pstats->tx_unicast; + stats->imissed = pstats->rx_discards; + stats->oerrors = pstats->tx_errors + pstats->tx_discards; + stats->ibytes = pstats->rx_bytes; + stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; + stats->obytes = pstats->tx_bytes; + } else { + PMD_DRV_LOG(ERR, "Get statistics failed"); + } + return ret; +} + +static int +iavf_dev_stats_reset(struct rte_eth_dev *dev) +{ + int ret; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_vsi *vsi = &vf->vsi; + struct virtchnl_eth_stats *pstats = NULL; + + /* read stat values to clear hardware registers */ + ret = iavf_query_stats(adapter, &pstats); + if (ret != 0) + return ret; + + /* set stats offset base on current values */ + vsi->eth_stats_offset = *pstats; + + return 0; +} + +static int +iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + uint16_t msix_intr; + + msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; + if (msix_intr == IAVF_MISC_VEC_ID) { + PMD_DRV_LOG(INFO, "MISC is also enabled for control"); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + } else { + IAVF_WRITE_REG(hw, + IAVF_VFINT_DYN_CTLN1 + (msix_intr - IAVF_RX_VEC_START), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); + } + + IAVF_WRITE_FLUSH(hw); + + rte_intr_ack(&pci_dev->intr_handle); + + return 0; +} + +static int +iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; + if (msix_intr == IAVF_MISC_VEC_ID) { + PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it"); + return -EIO; + } + + IAVF_WRITE_REG(hw, + IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), + 0); + + IAVF_WRITE_FLUSH(hw); + return 0; +} + +static int +iavf_check_vf_reset_done(struct iavf_hw *hw) +{ + int i, reset; + + for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) { + reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT; + if (reset == VIRTCHNL_VFR_VFACTIVE || + reset == VIRTCHNL_VFR_COMPLETED) + break; + rte_delay_ms(20); + } + + if (i >= IAVF_RESET_WAIT_CNT) + return -1; + + return 0; +} + +static int +iavf_init_vf(struct rte_eth_dev *dev) +{ + int err, bufsz; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + err = iavf_set_mac_type(hw); + if (err) { + PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); + goto err; + } + + err = iavf_check_vf_reset_done(hw); + if (err) { + PMD_INIT_LOG(ERR, "VF is still resetting"); + goto err; + } + + iavf_init_adminq_parameter(hw); + err = iavf_init_adminq(hw); + if (err) { + PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); + goto err; + } + + vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0); + if (!vf->aq_resp) { + PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); + goto err_aq; + } + if (iavf_check_api_version(adapter) != 0) { + PMD_INIT_LOG(ERR, "check_api version failed"); + goto err_api; + } + + bufsz = sizeof(struct virtchnl_vf_resource) + + (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); + vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); + if (!vf->vf_res) { + PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); + goto err_api; + } + if (iavf_get_vf_resource(adapter) != 0) { + PMD_INIT_LOG(ERR, "iavf_get_vf_config failed"); + goto err_alloc; + } + /* Allocate memort for RSS info */ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vf->rss_key = rte_zmalloc("rss_key", + vf->vf_res->rss_key_size, 0); + if (!vf->rss_key) { + PMD_INIT_LOG(ERR, "unable to allocate rss_key memory"); + goto err_rss; + } + vf->rss_lut = rte_zmalloc("rss_lut", + vf->vf_res->rss_lut_size, 0); + if (!vf->rss_lut) { + PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory"); + goto err_rss; + } + } + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { + if (iavf_get_supported_rxdid(adapter) != 0) { + PMD_INIT_LOG(ERR, "failed to do get supported rxdid"); + goto err_rss; + } + } + + return 0; +err_rss: + rte_free(vf->rss_key); + rte_free(vf->rss_lut); +err_alloc: + rte_free(vf->vf_res); + vf->vsi_res = NULL; +err_api: + rte_free(vf->aq_resp); +err_aq: + iavf_shutdown_adminq(hw); +err: + return -1; +} + +/* Enable default admin queue interrupt setting */ +static inline void +iavf_enable_irq0(struct iavf_hw *hw) +{ + /* Enable admin queue interrupt trigger */ + IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, + IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); + + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + + IAVF_WRITE_FLUSH(hw); +} + +static inline void +iavf_disable_irq0(struct iavf_hw *hw) +{ + /* Disable all interrupt types */ + IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0); + IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + IAVF_WRITE_FLUSH(hw); +} + +static void +iavf_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + iavf_disable_irq0(hw); + + iavf_handle_virtchnl_msg(dev); + + iavf_enable_irq0(hw); +} + +static int +iavf_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &iavf_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + + +static int +iavf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + /* assign ops func pointer */ + eth_dev->dev_ops = &iavf_eth_dev_ops; + eth_dev->rx_pkt_burst = &iavf_recv_pkts; + eth_dev->tx_pkt_burst = &iavf_xmit_pkts; + eth_dev->tx_pkt_prepare = &iavf_prep_pkts; + + /* For secondary processes, we don't initialise any further as primary + * has already done this work. Only check if we need a different RX + * and TX function. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + iavf_set_rx_function(eth_dev); + iavf_set_tx_function(eth_dev); + return 0; + } + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.bus_id = pci_dev->addr.bus; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); + adapter->eth_dev = eth_dev; + adapter->stopped = 1; + + if (iavf_init_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "Init vf failed"); + return -1; + } + + /* set default ptype table */ + adapter->ptype_tbl = iavf_get_default_ptype_table(); + + /* copy mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc( + "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0); + if (!eth_dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" + " store MAC addresses", + RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); + return -ENOMEM; + } + /* If the MAC address is not configured by host, + * generate a random one. + */ + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)hw->mac.addr)) + rte_eth_random_addr(hw->mac.addr); + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + + /* register callback func to eal lib */ + rte_intr_callback_register(&pci_dev->intr_handle, + iavf_dev_interrupt_handler, + (void *)eth_dev); + + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + + /* configure and enable device interrupt */ + iavf_enable_irq0(hw); + + ret = iavf_flow_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize flow"); + return ret; + } + + return 0; +} + +static void +iavf_dev_close(struct rte_eth_dev *dev) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + iavf_dev_stop(dev); + iavf_flow_flush(dev, NULL); + iavf_shutdown_adminq(hw); + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(intr_handle, + iavf_dev_interrupt_handler, dev); + iavf_disable_irq0(hw); + + iavf_flow_uninit(adapter); +} + +static int +iavf_dev_uninit(struct rte_eth_dev *dev) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + iavf_dev_close(dev); + + rte_free(vf->vf_res); + vf->vsi_res = NULL; + vf->vf_res = NULL; + + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + + if (vf->rss_lut) { + rte_free(vf->rss_lut); + vf->rss_lut = NULL; + } + if (vf->rss_key) { + rte_free(vf->rss_key); + vf->rss_key = NULL; + } + + return 0; +} + +/* + * Reset VF device only to re-initialize resources in PMD layer + */ +static int +iavf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = iavf_dev_uninit(dev); + if (ret) + return ret; + + return iavf_dev_init(dev); +} + +static int +iavf_dcf_cap_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "dcf")) + return -1; + + return 0; +} + +static int +iavf_dcf_cap_selected(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *key = "cap"; + int ret = 0; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, key)) + goto exit; + + /* dcf capability selected when there's a key-value pair: cap=dcf */ + if (rte_kvargs_process(kvlist, key, + iavf_dcf_cap_check_handler, NULL) < 0) + goto exit; + + ret = 1; + +exit: + rte_kvargs_free(kvlist); + return ret; +} + +static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + if (iavf_dcf_cap_selected(pci_dev->device.devargs)) + return 1; + + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct iavf_adapter), iavf_dev_init); +} + +static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit); +} + +/* Adaptive virtual function driver struct */ +static struct rte_pci_driver rte_iavf_pmd = { + .id_table = pci_id_iavf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_iavf_pci_probe, + .remove = eth_iavf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf"); +RTE_INIT(iavf_init_log) +{ + iavf_logtype_init = rte_log_register("pmd.net.iavf.init"); + if (iavf_logtype_init >= 0) + rte_log_set_level(iavf_logtype_init, RTE_LOG_NOTICE); + iavf_logtype_driver = rte_log_register("pmd.net.iavf.driver"); + if (iavf_logtype_driver >= 0) + rte_log_set_level(iavf_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_IAVF_DEBUG_RX + iavf_logtype_rx = rte_log_register("pmd.net.iavf.rx"); + if (iavf_logtype_rx >= 0) + rte_log_set_level(iavf_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX + iavf_logtype_tx = rte_log_register("pmd.net.iavf.tx"); + if (iavf_logtype_tx >= 0) + rte_log_set_level(iavf_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE + iavf_logtype_tx_free = rte_log_register("pmd.net.iavf.tx_free"); + if (iavf_logtype_tx_free >= 0) + rte_log_set_level(iavf_logtype_tx_free, RTE_LOG_DEBUG); +#endif +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c b/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c new file mode 100644 index 000000000..264c47d83 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_fdir.c @@ -0,0 +1,971 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_generic_flow.h" +#include "virtchnl.h" +#include "iavf_rxtx.h" + +#define IAVF_FDIR_MAX_QREGION_SIZE 128 + +#define IAVF_FDIR_IPV6_TC_OFFSET 20 +#define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET) + +#define IAVF_FDIR_INSET_ETH (\ + IAVF_INSET_ETHERTYPE) + +#define IAVF_FDIR_INSET_ETH_IPV4 (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \ + IAVF_INSET_IPV4_TTL) + +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6 (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \ + IAVF_INSET_IPV6_HOP_LIMIT) + +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT) + +#define IAVF_FDIR_INSET_GTPU (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_GTPU_TEID) + +#define IAVF_FDIR_INSET_GTPU_EH (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI) + +#define IAVF_FDIR_INSET_L2TPV3OIP (\ + IAVF_L2TPV3OIP_SESSION_ID) + +#define IAVF_FDIR_INSET_ESP (\ + IAVF_INSET_ESP_SPI) + +#define IAVF_FDIR_INSET_AH (\ + IAVF_INSET_AH_SPI) + +#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_ESP_SPI) + +#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_ESP_SPI) + +#define IAVF_FDIR_INSET_PFCP (\ + IAVF_INSET_PFCP_S_FIELD) + +static struct iavf_pattern_match_item iavf_fdir_pattern[] = { + {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE}, +}; + +static struct iavf_flow_parser iavf_fdir_parser; + +static int +iavf_fdir_init(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_parser *parser; + + if (!vf->vf_res) + return -EINVAL; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + parser = &iavf_fdir_parser; + else + return -ENOTSUP; + + return iavf_register_parser(parser, ad); +} + +static void +iavf_fdir_uninit(struct iavf_adapter *ad) +{ + iavf_unregister_parser(&iavf_fdir_parser, ad); +} + +static int +iavf_fdir_create(struct iavf_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter = meta; + struct iavf_fdir_conf *rule; + int ret; + + rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); + if (!rule) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory for fdir rule"); + return -rte_errno; + } + + ret = iavf_fdir_add(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to add filter rule."); + goto free_entry; + } + + if (filter->mark_flag == 1) + iavf_fdir_rx_proc_enable(ad, 1); + + rte_memcpy(rule, filter, sizeof(*rule)); + flow->rule = rule; + + return 0; + +free_entry: + rte_free(rule); + return -rte_errno; +} + +static int +iavf_fdir_destroy(struct iavf_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter; + int ret; + + filter = (struct iavf_fdir_conf *)flow->rule; + + ret = iavf_fdir_del(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to delete filter rule."); + return -rte_errno; + } + + if (filter->mark_flag == 1) + iavf_fdir_rx_proc_enable(ad, 0); + + flow->rule = NULL; + rte_free(filter); + + return 0; +} + +static int +iavf_fdir_validation(struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter = meta; + int ret; + + ret = iavf_fdir_check(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to validate filter rule."); + return -rte_errno; + } + + return 0; +}; + +static struct iavf_flow_engine iavf_fdir_engine = { + .init = iavf_fdir_init, + .uninit = iavf_fdir_uninit, + .create = iavf_fdir_create, + .destroy = iavf_fdir_destroy, + .validation = iavf_fdir_validation, + .type = IAVF_FLOW_ENGINE_FDIR, +}; + +static int +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, + struct rte_flow_error *error, + const struct rte_flow_action *act, + struct virtchnl_filter_action *filter_action) +{ + const struct rte_flow_action_rss *rss = act->conf; + uint32_t i; + + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + + if (rss->queue_num <= 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Queue region size can't be 0 or 1."); + return -rte_errno; + } + + /* check if queue index for queue region is continuous */ + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Discontinuous queue region"); + return -rte_errno; + } + } + + if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue region indexes."); + return -rte_errno; + } + + if (!(rte_is_power_of_2(rss->queue_num) && + rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "The region size should be any of the following values:" + "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number " + "of queues do not exceed the VSI allocation."); + return -rte_errno; + } + + filter_action->act_conf.queue.index = rss->queue[0]; + filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1; + + return 0; +} + +static int +iavf_fdir_parse_action(struct iavf_adapter *ad, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct iavf_fdir_conf *filter) +{ + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark_spec = NULL; + uint32_t dest_num = 0; + uint32_t mark_num = 0; + int ret; + + int number = 0; + struct virtchnl_filter_action *filter_action; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + dest_num++; + + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + + filter_action->type = VIRTCHNL_ACTION_PASSTHRU; + + filter->add_fltr.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + dest_num++; + + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + + filter_action->type = VIRTCHNL_ACTION_DROP; + + filter->add_fltr.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + + act_q = actions->conf; + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + + filter_action->type = VIRTCHNL_ACTION_QUEUE; + filter_action->act_conf.queue.index = act_q->index; + + if (filter_action->act_conf.queue.index >= + ad->eth_dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "Invalid queue for FDIR."); + return -rte_errno; + } + + filter->add_fltr.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_RSS: + dest_num++; + + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + + filter_action->type = VIRTCHNL_ACTION_Q_REGION; + + ret = iavf_fdir_parse_action_qregion(ad, + error, actions, filter_action); + if (ret) + return ret; + + filter->add_fltr.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + mark_num++; + + filter->mark_flag = 1; + mark_spec = actions->conf; + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + + filter_action->type = VIRTCHNL_ACTION_MARK; + filter_action->act_conf.mark_id = mark_spec->id; + + filter->add_fltr.rule_cfg.action_set.count = ++number; + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action."); + return -rte_errno; + } + } + + if (number > VIRTCHNL_MAX_NUM_ACTIONS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Action numbers exceed the maximum value"); + return -rte_errno; + } + + if (dest_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Unsupported action combination"); + return -rte_errno; + } + + if (mark_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many mark actions"); + return -rte_errno; + } + + if (dest_num + mark_num == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Empty action"); + return -rte_errno; + } + + /* Mark only is equal to mark + passthru. */ + if (dest_num == 0) { + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number]; + filter_action->type = VIRTCHNL_ACTION_PASSTHRU; + filter->add_fltr.rule_cfg.action_set.count = ++number; + } + + return 0; +} + +static int +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct iavf_fdir_conf *filter) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; + const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; + const struct rte_flow_item_ah *ah_spec, *ah_mask; + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; + uint64_t input_set = IAVF_INSET_NONE; + + enum rte_flow_item_type next_type; + uint16_t ether_type; + + int layer = 0; + struct virtchnl_proto_hdr *hdr; + + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not support range"); + } + + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + next_type = (item + 1)->type; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); + + if (next_type == RTE_FLOW_ITEM_TYPE_END && + (!eth_spec || !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "NULL eth spec/mask."); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + !rte_is_zero_ether_addr(ð_mask->dst)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid MAC_addr mask."); + return -rte_errno; + } + } + + if (eth_spec && eth_mask && eth_mask->type) { + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid type mask."); + return -rte_errno; + } + + ether_type = rte_be_to_cpu_16(eth_spec->type); + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } + + input_set |= IAVF_INSET_ETHERTYPE; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); + + rte_memcpy(hdr->buffer, + eth_spec, sizeof(*eth_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (ipv4_spec && ipv4_mask) { + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv4 mask."); + return -rte_errno; + } + + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TOS; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_PROTO; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TTL; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL); + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); + } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); + } + + rte_memcpy(hdr->buffer, + &ipv4_spec->hdr, + sizeof(ipv4_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (ipv6_spec && ipv6_mask) { + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv6 mask"); + return -rte_errno; + } + + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) + == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) { + input_set |= IAVF_INSET_IPV6_TC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_NEXT_HDR; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_HOP_LIMIT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT); + } + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) { + input_set |= IAVF_INSET_IPV6_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); + } + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) { + input_set |= IAVF_INSET_IPV6_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); + } + + rte_memcpy(hdr->buffer, + &ipv6_spec->hdr, + sizeof(ipv6_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (udp_spec && udp_mask) { + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (udp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (tcp_spec && tcp_mask) { + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid TCP mask"); + return -rte_errno; + } + + if (tcp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (sctp_spec && sctp_mask) { + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (sctp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_SCTP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); + } + if (sctp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_SCTP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &sctp_spec->hdr, + sizeof(sctp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &sctp_spec->hdr, + sizeof(sctp_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP); + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid GTP mask"); + return -rte_errno; + } + + if (gtp_mask->teid == UINT32_MAX) { + input_set |= IAVF_INSET_GTPU_TEID; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID); + } + + rte_memcpy(hdr->buffer, + gtp_spec, sizeof(*gtp_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); + + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->qfi == UINT8_MAX) { + input_set |= IAVF_INSET_GTPU_QFI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI); + } + + rte_memcpy(hdr->buffer, gtp_psc_spec, + sizeof(*gtp_psc_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tpv3oip_spec = item->spec; + l2tpv3oip_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); + + if (l2tpv3oip_spec && l2tpv3oip_mask) { + if (l2tpv3oip_mask->session_id == UINT32_MAX) { + input_set |= IAVF_L2TPV3OIP_SESSION_ID; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); + } + + rte_memcpy(hdr->buffer, l2tpv3oip_spec, + sizeof(*l2tpv3oip_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_ESP: + esp_spec = item->spec; + esp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + if (esp_spec && esp_mask) { + if (esp_mask->hdr.spi == UINT32_MAX) { + input_set |= IAVF_INSET_ESP_SPI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); + } + + rte_memcpy(hdr->buffer, &esp_spec->hdr, + sizeof(esp_spec->hdr)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_AH: + ah_spec = item->spec; + ah_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); + + if (ah_spec && ah_mask) { + if (ah_mask->spi == UINT32_MAX) { + input_set |= IAVF_INSET_AH_SPI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); + } + + rte_memcpy(hdr->buffer, ah_spec, + sizeof(*ah_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_PFCP: + pfcp_spec = item->spec; + pfcp_mask = item->mask; + + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); + + if (pfcp_spec && pfcp_mask) { + if (pfcp_mask->s_field == UINT8_MAX) { + input_set |= IAVF_INSET_PFCP_S_FIELD; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); + } + + rte_memcpy(hdr->buffer, pfcp_spec, + sizeof(*pfcp_spec)); + } + + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid pattern item."); + return -rte_errno; + } + } + + if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Protocol header layers exceed the maximum value"); + return -rte_errno; + } + + filter->input_set = input_set; + + return 0; +} + +static int +iavf_fdir_parse(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_fdir_conf *filter = &vf->fdir.conf; + struct iavf_pattern_match_item *item = NULL; + uint64_t input_set; + int ret; + + memset(filter, 0, sizeof(*filter)); + + item = iavf_search_pattern_match_item(pattern, array, array_len, error); + if (!item) + return -rte_errno; + + ret = iavf_fdir_parse_pattern(ad, pattern, error, filter); + if (ret) + goto error; + + input_set = filter->input_set; + if (!input_set || input_set & ~item->input_set_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern, + "Invalid input set"); + ret = -rte_errno; + goto error; + } + + ret = iavf_fdir_parse_action(ad, actions, error, filter); + if (ret) + goto error; + + if (meta) + *meta = filter; + +error: + rte_free(item); + return ret; +} + +static struct iavf_flow_parser iavf_fdir_parser = { + .engine = &iavf_fdir_engine, + .array = iavf_fdir_pattern, + .array_len = RTE_DIM(iavf_fdir_pattern), + .parse_pattern_action = iavf_fdir_parse, + .stage = IAVF_FLOW_STAGE_DISTRIBUTOR, +}; + +RTE_INIT(iavf_fdir_engine_register) +{ + iavf_register_flow_engine(&iavf_fdir_engine); +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c new file mode 100644 index 000000000..b6c26c4fd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.c @@ -0,0 +1,1044 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_generic_flow.h" + +static struct iavf_engine_list engine_list = + TAILQ_HEAD_INITIALIZER(engine_list); + +static int iavf_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static int iavf_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +static int iavf_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); + +const struct rte_flow_ops iavf_flow_ops = { + .validate = iavf_flow_validate, + .create = iavf_flow_create, + .destroy = iavf_flow_destroy, + .flush = iavf_flow_flush, + .query = iavf_flow_query, +}; + +/* empty */ +enum rte_flow_item_type iavf_pattern_empty[] = { + RTE_FLOW_ITEM_TYPE_END, +}; + +/* L2 */ +enum rte_flow_item_type iavf_pattern_ethertype[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* ARP */ +enum rte_flow_item_type iavf_pattern_eth_arp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv4 */ +enum rte_flow_item_type iavf_pattern_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv6 */ +enum rte_flow_item_type iavf_pattern_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* GTPU */ +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, + +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* ESP */ +enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* AH */ +enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* L2TPV3 */ +enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* PFCP */ +enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; + +typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad, + struct rte_flow *flow, + struct iavf_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); + +void +iavf_register_flow_engine(struct iavf_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +int +iavf_flow_init(struct iavf_adapter *ad) +{ + int ret; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + void *temp; + struct iavf_flow_engine *engine; + + TAILQ_INIT(&vf->flow_list); + TAILQ_INIT(&vf->rss_parser_list); + TAILQ_INIT(&vf->dist_parser_list); + rte_spinlock_init(&vf->flow_ops_lock); + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->init == NULL) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(ad); + if (ret && ret != -ENOTSUP) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + return 0; +} + +void +iavf_flow_uninit(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_engine *engine; + struct rte_flow *p_flow; + struct iavf_flow_parser_node *p_parser; + void *temp; + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(ad); + } + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&vf->flow_list))) { + TAILQ_REMOVE(&vf->flow_list, p_flow, node); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } + + /* Cleanup parser list */ + while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) { + TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node); + rte_free(p_parser); + } + + while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) { + TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node); + rte_free(p_parser); + } +} + +int +iavf_register_parser(struct iavf_flow_parser *parser, + struct iavf_adapter *ad) +{ + struct iavf_parser_list *list = NULL; + struct iavf_flow_parser_node *parser_node; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + + parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0); + if (parser_node == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory."); + return -ENOMEM; + } + parser_node->parser = parser; + + if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) { + list = &vf->rss_parser_list; + TAILQ_INSERT_TAIL(list, parser_node, node); + } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) { + list = &vf->dist_parser_list; + TAILQ_INSERT_HEAD(list, parser_node, node); + } else { + return -EINVAL; + } + + return 0; +} + +void +iavf_unregister_parser(struct iavf_flow_parser *parser, + struct iavf_adapter *ad) +{ + struct iavf_parser_list *list = NULL; + struct iavf_flow_parser_node *p_parser; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + void *temp; + + if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) + list = &vf->rss_parser_list; + else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) + list = &vf->dist_parser_list; + + if (list == NULL) + return; + + TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { + if (p_parser->parser->engine->type == parser->engine->type) { + TAILQ_REMOVE(list, p_parser, node); + rte_free(p_parser); + } + } +} + +static int +iavf_flow_valid_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +/* Find the first VOID or non-VOID item pointer */ +static const struct rte_flow_item * +iavf_find_first_item(const struct rte_flow_item *item, bool is_void) +{ + bool is_find; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (is_void) + is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID; + else + is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID; + if (is_find) + break; + item++; + } + return item; +} + +/* Skip all VOID items of the pattern */ +static void +iavf_pattern_skip_void_item(struct rte_flow_item *items, + const struct rte_flow_item *pattern) +{ + uint32_t cpy_count = 0; + const struct rte_flow_item *pb = pattern, *pe = pattern; + + for (;;) { + /* Find a non-void item first */ + pb = iavf_find_first_item(pb, false); + if (pb->type == RTE_FLOW_ITEM_TYPE_END) { + pe = pb; + break; + } + + /* Find a void item */ + pe = iavf_find_first_item(pb + 1, true); + + cpy_count = pe - pb; + rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); + + items += cpy_count; + + if (pe->type == RTE_FLOW_ITEM_TYPE_END) + break; + + pb = pe + 1; + } + /* Copy the END item. */ + rte_memcpy(items, pe, sizeof(struct rte_flow_item)); +} + +/* Check if the pattern matches a supported item type array */ +static bool +iavf_match_pattern(enum rte_flow_item_type *item_array, + const struct rte_flow_item *pattern) +{ + const struct rte_flow_item *item = pattern; + + while ((*item_array == item->type) && + (*item_array != RTE_FLOW_ITEM_TYPE_END)) { + item_array++; + item++; + } + + return (*item_array == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +struct iavf_pattern_match_item * +iavf_search_pattern_match_item(const struct rte_flow_item pattern[], + struct iavf_pattern_match_item *array, + uint32_t array_len, + struct rte_flow_error *error) +{ + uint16_t i = 0; + struct iavf_pattern_match_item *pattern_match_item; + /* need free by each filter */ + struct rte_flow_item *items; /* used for pattern without VOID items */ + uint32_t item_num = 0; /* non-void item number */ + + /* Get the non-void item number of pattern */ + while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { + if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) + item_num++; + i++; + } + item_num++; + + items = rte_zmalloc("iavf_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No memory for PMD internal items."); + return NULL; + } + pattern_match_item = rte_zmalloc("iavf_pattern_match_item", + sizeof(struct iavf_pattern_match_item), 0); + if (!pattern_match_item) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate memory."); + return NULL; + } + + iavf_pattern_skip_void_item(items, pattern); + + for (i = 0; i < array_len; i++) + if (iavf_match_pattern(array[i].pattern_list, + items)) { + pattern_match_item->input_set_mask = + array[i].input_set_mask; + pattern_match_item->pattern_list = + array[i].pattern_list; + pattern_match_item->meta = array[i].meta; + rte_free(items); + return pattern_match_item; + } + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "Unsupported pattern"); + + rte_free(items); + rte_free(pattern_match_item); + return NULL; +} + +static struct iavf_flow_engine * +iavf_parse_engine_create(struct iavf_adapter *ad, + struct rte_flow *flow, + struct iavf_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct iavf_flow_engine *engine = NULL; + struct iavf_flow_parser_node *parser_node; + void *temp; + void *meta = NULL; + + TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + if (parser_node->parser->parse_pattern_action(ad, + parser_node->parser->array, + parser_node->parser->array_len, + pattern, actions, &meta, error) < 0) + continue; + + engine = parser_node->parser->engine; + + RTE_ASSERT(engine->create != NULL); + if (!(engine->create(ad, flow, meta, error))) + return engine; + } + return NULL; +} + +static struct iavf_flow_engine * +iavf_parse_engine_validate(struct iavf_adapter *ad, + struct rte_flow *flow, + struct iavf_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct iavf_flow_engine *engine = NULL; + struct iavf_flow_parser_node *parser_node; + void *temp; + void *meta = NULL; + + TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + if (parser_node->parser->parse_pattern_action(ad, + parser_node->parser->array, + parser_node->parser->array_len, + pattern, actions, &meta, error) < 0) + continue; + + engine = parser_node->parser->engine; + if (engine->validation == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Validation not support"); + continue; + } + + if (engine->validation(ad, flow, meta, error)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Validation failed"); + break; + } + } + return engine; +} + + +static int +iavf_flow_process_filter(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct iavf_flow_engine **engine, + parse_engine_t iavf_parse_engine, + struct rte_flow_error *error) +{ + int ret = IAVF_ERR_CONFIG; + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + ret = iavf_flow_valid_attr(attr, error); + if (ret) + return ret; + + *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern, + actions, error); + if (*engine) + return 0; + + *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern, + actions, error); + + if (!*engine) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create parser engine."); + return -rte_errno; + } + + return 0; +} + +static int +iavf_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct iavf_flow_engine *engine; + + return iavf_flow_process_filter(dev, NULL, attr, pattern, actions, + &engine, iavf_parse_engine_validate, error); +} + +static struct rte_flow * +iavf_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_engine *engine = NULL; + struct rte_flow *flow = NULL; + int ret; + + flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions, + &engine, iavf_parse_engine_create, error); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to create flow"); + rte_free(flow); + flow = NULL; + goto free_flow; + } + + flow->engine = engine; + TAILQ_INSERT_TAIL(&vf->flow_list, flow, node); + PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); + +free_flow: + rte_spinlock_unlock(&vf->flow_ops_lock); + return flow; +} + +static bool +iavf_flow_is_valid(struct rte_flow *flow) +{ + struct iavf_flow_engine *engine; + void *temp; + + if (flow && flow->engine) { + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine == flow->engine) + return true; + } + } + + return false; +} + +static int +iavf_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + int ret = 0; + + if (!iavf_flow_is_valid(flow) || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow destroy"); + return -rte_errno; + } + + rte_spinlock_lock(&vf->flow_ops_lock); + + ret = flow->engine->destroy(ad, flow, error); + + if (!ret) { + TAILQ_REMOVE(&vf->flow_list, flow, node); + rte_free(flow); + } else { + PMD_DRV_LOG(ERR, "Failed to destroy flow"); + } + + rte_spinlock_unlock(&vf->flow_ops_lock); + + return ret; +} + +int +iavf_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct rte_flow *p_flow; + void *temp; + int ret = 0; + + TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) { + ret = iavf_flow_destroy(dev, p_flow, error); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; + } + } + + return ret; +} + +static int +iavf_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = -EINVAL; + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_flow_query_count *count = data; + + if (!iavf_flow_is_valid(flow) || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow query"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(ad, flow, count, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + return ret; +} + diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h new file mode 100644 index 000000000..978d0716b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_generic_flow.h @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IAVF_GENERIC_FLOW_H_ +#define _IAVF_GENERIC_FLOW_H_ + +#include + +/* protocol */ + +#define IAVF_PROT_MAC_INNER (1ULL << 1) +#define IAVF_PROT_MAC_OUTER (1ULL << 2) +#define IAVF_PROT_VLAN_INNER (1ULL << 3) +#define IAVF_PROT_VLAN_OUTER (1ULL << 4) +#define IAVF_PROT_IPV4_INNER (1ULL << 5) +#define IAVF_PROT_IPV4_OUTER (1ULL << 6) +#define IAVF_PROT_IPV6_INNER (1ULL << 7) +#define IAVF_PROT_IPV6_OUTER (1ULL << 8) +#define IAVF_PROT_TCP_INNER (1ULL << 9) +#define IAVF_PROT_TCP_OUTER (1ULL << 10) +#define IAVF_PROT_UDP_INNER (1ULL << 11) +#define IAVF_PROT_UDP_OUTER (1ULL << 12) +#define IAVF_PROT_SCTP_INNER (1ULL << 13) +#define IAVF_PROT_SCTP_OUTER (1ULL << 14) +#define IAVF_PROT_ICMP4_INNER (1ULL << 15) +#define IAVF_PROT_ICMP4_OUTER (1ULL << 16) +#define IAVF_PROT_ICMP6_INNER (1ULL << 17) +#define IAVF_PROT_ICMP6_OUTER (1ULL << 18) +#define IAVF_PROT_VXLAN (1ULL << 19) +#define IAVF_PROT_NVGRE (1ULL << 20) +#define IAVF_PROT_GTPU (1ULL << 21) +#define IAVF_PROT_ESP (1ULL << 22) +#define IAVF_PROT_AH (1ULL << 23) +#define IAVF_PROT_L2TPV3OIP (1ULL << 24) +#define IAVF_PROT_PFCP (1ULL << 25) + + +/* field */ + +#define IAVF_SMAC (1ULL << 63) +#define IAVF_DMAC (1ULL << 62) +#define IAVF_ETHERTYPE (1ULL << 61) +#define IAVF_IP_SRC (1ULL << 60) +#define IAVF_IP_DST (1ULL << 59) +#define IAVF_IP_PROTO (1ULL << 58) +#define IAVF_IP_TTL (1ULL << 57) +#define IAVF_IP_TOS (1ULL << 56) +#define IAVF_SPORT (1ULL << 55) +#define IAVF_DPORT (1ULL << 54) +#define IAVF_ICMP_TYPE (1ULL << 53) +#define IAVF_ICMP_CODE (1ULL << 52) +#define IAVF_VXLAN_VNI (1ULL << 51) +#define IAVF_NVGRE_TNI (1ULL << 50) +#define IAVF_GTPU_TEID (1ULL << 49) +#define IAVF_GTPU_QFI (1ULL << 48) +#define IAVF_ESP_SPI (1ULL << 47) +#define IAVF_AH_SPI (1ULL << 46) +#define IAVF_L2TPV3OIP_SESSION_ID (1ULL << 45) +#define IAVF_PFCP_S_FIELD (1ULL << 44) +#define IAVF_PFCP_SEID (1ULL << 43) + +/* input set */ + +#define IAVF_INSET_NONE 0ULL + +/* non-tunnel */ + +#define IAVF_INSET_SMAC (IAVF_PROT_MAC_OUTER | IAVF_SMAC) +#define IAVF_INSET_DMAC (IAVF_PROT_MAC_OUTER | IAVF_DMAC) +#define IAVF_INSET_VLAN_INNER (IAVF_PROT_VLAN_INNER) +#define IAVF_INSET_VLAN_OUTER (IAVF_PROT_VLAN_OUTER) +#define IAVF_INSET_ETHERTYPE (IAVF_ETHERTYPE) + +#define IAVF_INSET_IPV4_SRC \ + (IAVF_PROT_IPV4_OUTER | IAVF_IP_SRC) +#define IAVF_INSET_IPV4_DST \ + (IAVF_PROT_IPV4_OUTER | IAVF_IP_DST) +#define IAVF_INSET_IPV4_TOS \ + (IAVF_PROT_IPV4_OUTER | IAVF_IP_TOS) +#define IAVF_INSET_IPV4_PROTO \ + (IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO) +#define IAVF_INSET_IPV4_TTL \ + (IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL) +#define IAVF_INSET_IPV6_SRC \ + (IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC) +#define IAVF_INSET_IPV6_DST \ + (IAVF_PROT_IPV6_OUTER | IAVF_IP_DST) +#define IAVF_INSET_IPV6_NEXT_HDR \ + (IAVF_PROT_IPV6_OUTER | IAVF_IP_PROTO) +#define IAVF_INSET_IPV6_HOP_LIMIT \ + (IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL) +#define IAVF_INSET_IPV6_TC \ + (IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS) + +#define IAVF_INSET_TCP_SRC_PORT \ + (IAVF_PROT_TCP_OUTER | IAVF_SPORT) +#define IAVF_INSET_TCP_DST_PORT \ + (IAVF_PROT_TCP_OUTER | IAVF_DPORT) +#define IAVF_INSET_UDP_SRC_PORT \ + (IAVF_PROT_UDP_OUTER | IAVF_SPORT) +#define IAVF_INSET_UDP_DST_PORT \ + (IAVF_PROT_UDP_OUTER | IAVF_DPORT) +#define IAVF_INSET_SCTP_SRC_PORT \ + (IAVF_PROT_SCTP_OUTER | IAVF_SPORT) +#define IAVF_INSET_SCTP_DST_PORT \ + (IAVF_PROT_SCTP_OUTER | IAVF_DPORT) +#define IAVF_INSET_ICMP4_SRC_PORT \ + (IAVF_PROT_ICMP4_OUTER | IAVF_SPORT) +#define IAVF_INSET_ICMP4_DST_PORT \ + (IAVF_PROT_ICMP4_OUTER | IAVF_DPORT) +#define IAVF_INSET_ICMP6_SRC_PORT \ + (IAVF_PROT_ICMP6_OUTER | IAVF_SPORT) +#define IAVF_INSET_ICMP6_DST_PORT \ + (IAVF_PROT_ICMP6_OUTER | IAVF_DPORT) +#define IAVF_INSET_ICMP4_TYPE \ + (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_TYPE) +#define IAVF_INSET_ICMP4_CODE \ + (IAVF_PROT_ICMP4_OUTER | IAVF_ICMP_CODE) +#define IAVF_INSET_ICMP6_TYPE \ + (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_TYPE) +#define IAVF_INSET_ICMP6_CODE \ + (IAVF_PROT_ICMP6_OUTER | IAVF_ICMP_CODE) +#define IAVF_INSET_GTPU_TEID \ + (IAVF_PROT_GTPU | IAVF_GTPU_TEID) +#define IAVF_INSET_GTPU_QFI \ + (IAVF_PROT_GTPU | IAVF_GTPU_QFI) +#define IAVF_INSET_ESP_SPI \ + (IAVF_PROT_ESP | IAVF_ESP_SPI) +#define IAVF_INSET_AH_SPI \ + (IAVF_PROT_AH | IAVF_AH_SPI) +#define IAVF_INSET_L2TPV3OIP_SESSION_ID \ + (IAVF_PROT_L2TPV3OIP | IAVF_L2TPV3OIP_SESSION_ID) +#define IAVF_INSET_PFCP_S_FIELD \ + (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD) +#define IAVF_INSET_PFCP_SEID \ + (IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD | IAVF_PFCP_SEID) + + +/* empty pattern */ +extern enum rte_flow_item_type iavf_pattern_empty[]; + +/* L2 */ +extern enum rte_flow_item_type iavf_pattern_ethertype[]; +extern enum rte_flow_item_type iavf_pattern_ethertype_vlan[]; +extern enum rte_flow_item_type iavf_pattern_ethertype_qinq[]; + +/* ARP */ +extern enum rte_flow_item_type iavf_pattern_eth_arp[]; + +/* non-tunnel IPv4 */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[]; + +/* non-tunnel IPv6 */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv6[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[]; +extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[]; +extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[]; + +/* GTPU */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[]; + +/* ESP */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[]; + +/* AH */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[]; + +/* L2TPV3 */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[]; + +/* PFCP */ +extern enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[]; +extern enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[]; + + +extern const struct rte_flow_ops iavf_flow_ops; + +/* pattern structure */ +struct iavf_pattern_match_item { + enum rte_flow_item_type *pattern_list; + /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */ + uint64_t input_set_mask; + void *meta; +}; + +typedef int (*engine_init_t)(struct iavf_adapter *ad); +typedef void (*engine_uninit_t)(struct iavf_adapter *ad); +typedef int (*engine_validation_t)(struct iavf_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_create_t)(struct iavf_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct iavf_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct iavf_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error); + +/* engine types. */ +enum iavf_flow_engine_type { + IAVF_FLOW_ENGINE_NONE = 0, + IAVF_FLOW_ENGINE_FDIR, + IAVF_FLOW_ENGINE_HASH, + IAVF_FLOW_ENGINE_MAX, +}; + +/** + * classification stages. + * for non-pipeline mode, we have two classification stages: Distributor/RSS + * for pipeline-mode we have three classification stages: + * Permission/Distributor/RSS + */ +enum iavf_flow_classification_stage { + IAVF_FLOW_STAGE_NONE = 0, + IAVF_FLOW_STAGE_RSS, + IAVF_FLOW_STAGE_DISTRIBUTOR, + IAVF_FLOW_STAGE_MAX, +}; + +/* Struct to store engine created. */ +struct iavf_flow_engine { + TAILQ_ENTRY(iavf_flow_engine) node; + engine_init_t init; + engine_uninit_t uninit; + engine_validation_t validation; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_free_t free; + enum iavf_flow_engine_type type; +}; + +TAILQ_HEAD(iavf_engine_list, iavf_flow_engine); + +/* Struct to store flow created. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + struct iavf_flow_engine *engine; + void *rule; +}; + +struct iavf_flow_parser { + struct iavf_flow_engine *engine; + struct iavf_pattern_match_item *array; + uint32_t array_len; + parse_pattern_action_t parse_pattern_action; + enum iavf_flow_classification_stage stage; +}; + +/* Struct to store parser created. */ +struct iavf_flow_parser_node { + TAILQ_ENTRY(iavf_flow_parser_node) node; + struct iavf_flow_parser *parser; +}; + +void iavf_register_flow_engine(struct iavf_flow_engine *engine); +int iavf_flow_init(struct iavf_adapter *ad); +void iavf_flow_uninit(struct iavf_adapter *ad); +int iavf_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error); +int iavf_register_parser(struct iavf_flow_parser *parser, + struct iavf_adapter *ad); +void iavf_unregister_parser(struct iavf_flow_parser *parser, + struct iavf_adapter *ad); +struct iavf_pattern_match_item * +iavf_search_pattern_match_item(const struct rte_flow_item pattern[], + struct iavf_pattern_match_item *array, + uint32_t array_len, + struct rte_flow_error *error); +#endif diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c b/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c new file mode 100644 index 000000000..af528863b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_hash.c @@ -0,0 +1,1236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iavf_log.h" +#include "iavf.h" +#include "iavf_generic_flow.h" + +enum iavf_pattern_hint_type { + IAVF_PATTERN_HINT_NONE, + IAVF_PATTERN_HINT_IPV4, + IAVF_PATTERN_HINT_IPV4_UDP, + IAVF_PATTERN_HINT_IPV4_TCP, + IAVF_PATTERN_HINT_IPV4_SCTP, + IAVF_PATTERN_HINT_IPV6, + IAVF_PATTERN_HINT_IPV6_UDP, + IAVF_PATTERN_HINT_IPV6_TCP, + IAVF_PATTERN_HINT_IPV6_SCTP, +}; + +struct iavf_pattern_match_type { + enum iavf_pattern_hint_type phint_type; +}; + +struct iavf_hash_match_type { + enum iavf_pattern_hint_type phint_type; + uint64_t hash_type; + struct virtchnl_proto_hdrs *proto_hdrs; +}; + +struct iavf_rss_meta { + struct virtchnl_proto_hdrs *proto_hdrs; + enum virtchnl_rss_algorithm rss_algorithm; +}; + +struct iavf_hash_flow_cfg { + struct virtchnl_rss_cfg *rss_cfg; + bool simple_xor; +}; + +static int +iavf_hash_init(struct iavf_adapter *ad); +static int +iavf_hash_create(struct iavf_adapter *ad, struct rte_flow *flow, void *meta, + struct rte_flow_error *error); +static int +iavf_hash_destroy(struct iavf_adapter *ad, struct rte_flow *flow, + struct rte_flow_error *error); +static void +iavf_hash_uninit(struct iavf_adapter *ad); +static void +iavf_hash_free(struct rte_flow *flow); +static int +iavf_hash_parse_pattern_action(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error); + +struct iavf_pattern_match_type phint_empty = { + IAVF_PATTERN_HINT_NONE}; +struct iavf_pattern_match_type phint_eth_ipv4 = { + IAVF_PATTERN_HINT_IPV4}; +struct iavf_pattern_match_type phint_eth_ipv4_udp = { + IAVF_PATTERN_HINT_IPV4_UDP}; +struct iavf_pattern_match_type phint_eth_ipv4_tcp = { + IAVF_PATTERN_HINT_IPV4_TCP}; +struct iavf_pattern_match_type phint_eth_ipv4_sctp = { + IAVF_PATTERN_HINT_IPV4_SCTP}; +struct iavf_pattern_match_type phint_eth_ipv4_gtpu_eh = { + IAVF_PATTERN_HINT_IPV4_UDP}; +struct iavf_pattern_match_type phint_eth_ipv4_esp = { + IAVF_PATTERN_HINT_IPV4}; +struct iavf_pattern_match_type phint_eth_ipv4_ah = { + IAVF_PATTERN_HINT_IPV4}; +struct iavf_pattern_match_type phint_eth_ipv4_l2tpv3 = { + IAVF_PATTERN_HINT_IPV4}; +struct iavf_pattern_match_type phint_eth_ipv4_pfcp = { + IAVF_PATTERN_HINT_IPV4_UDP}; +struct iavf_pattern_match_type phint_eth_ipv6 = { + IAVF_PATTERN_HINT_IPV6}; +struct iavf_pattern_match_type phint_eth_ipv6_udp = { + IAVF_PATTERN_HINT_IPV6_UDP}; +struct iavf_pattern_match_type phint_eth_ipv6_tcp = { + IAVF_PATTERN_HINT_IPV6_TCP}; +struct iavf_pattern_match_type phint_eth_ipv6_sctp = { + IAVF_PATTERN_HINT_IPV6_SCTP}; +struct iavf_pattern_match_type phint_eth_ipv6_esp = { + IAVF_PATTERN_HINT_IPV6}; +struct iavf_pattern_match_type phint_eth_ipv6_ah = { + IAVF_PATTERN_HINT_IPV6}; +struct iavf_pattern_match_type phint_eth_ipv6_l2tpv3 = { + IAVF_PATTERN_HINT_IPV6}; +struct iavf_pattern_match_type phint_eth_ipv6_pfcp = { + IAVF_PATTERN_HINT_IPV6_UDP}; + +/** + * Supported pattern for hash. + * The first member is pattern item type, + * the second member is input set mask, + * the third member is pattern hint for hash. + */ +static struct iavf_pattern_match_item iavf_hash_pattern_list[] = { + {iavf_pattern_eth_ipv4, IAVF_INSET_NONE, &phint_eth_ipv4}, + {iavf_pattern_eth_ipv4_udp, IAVF_INSET_NONE, &phint_eth_ipv4_udp}, + {iavf_pattern_eth_ipv4_tcp, IAVF_INSET_NONE, &phint_eth_ipv4_tcp}, + {iavf_pattern_eth_ipv4_sctp, IAVF_INSET_NONE, &phint_eth_ipv4_sctp}, + {iavf_pattern_eth_ipv6, IAVF_INSET_NONE, &phint_eth_ipv6}, + {iavf_pattern_eth_ipv4_gtpu_eh_ipv4, IAVF_INSET_NONE, + &phint_eth_ipv4_gtpu_eh}, + {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_INSET_NONE, + &phint_eth_ipv4_gtpu_eh}, + {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_INSET_NONE, + &phint_eth_ipv4_gtpu_eh}, + {iavf_pattern_eth_ipv4_esp, IAVF_INSET_NONE, &phint_eth_ipv4_esp}, + {iavf_pattern_eth_ipv4_ah, IAVF_INSET_NONE, &phint_eth_ipv4_ah}, + {iavf_pattern_eth_ipv4_l2tpv3, IAVF_INSET_NONE, + &phint_eth_ipv4_l2tpv3}, + {iavf_pattern_eth_ipv4_pfcp, IAVF_INSET_NONE, &phint_eth_ipv4_pfcp}, + {iavf_pattern_eth_ipv6_udp, IAVF_INSET_NONE, &phint_eth_ipv6_udp}, + {iavf_pattern_eth_ipv6_tcp, IAVF_INSET_NONE, &phint_eth_ipv6_tcp}, + {iavf_pattern_eth_ipv6_sctp, IAVF_INSET_NONE, &phint_eth_ipv6_sctp}, + {iavf_pattern_eth_ipv6_esp, IAVF_INSET_NONE, &phint_eth_ipv6_esp}, + {iavf_pattern_eth_ipv6_ah, IAVF_INSET_NONE, &phint_eth_ipv6_ah}, + {iavf_pattern_eth_ipv6_l2tpv3, IAVF_INSET_NONE, + &phint_eth_ipv6_l2tpv3}, + {iavf_pattern_eth_ipv6_pfcp, IAVF_INSET_NONE, &phint_eth_ipv6_pfcp}, + {iavf_pattern_empty, IAVF_INSET_NONE, &phint_empty}, +}; + +#define GTP_EH_PDU_LINK_UP 1 +#define GTP_EH_PDU_LINK_DWN 0 + +#define TUNNEL_LEVEL_OUTER 0 +#define TUNNEL_LEVEL_FIRST_INNER 1 + +#define PROTO_COUNT_ONE 1 +#define PROTO_COUNT_TWO 2 +#define PROTO_COUNT_THREE 3 + +#define BUFF_NOUSED 0 +#define FIELD_FOR_PROTO_ONLY 0 + +#define proto_hint_eth_src { \ + VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_SRC, {BUFF_NOUSED } } + +#define proto_hint_eth_dst { \ + VIRTCHNL_PROTO_HDR_ETH, VIRTCHNL_PROTO_HDR_ETH_DST, {BUFF_NOUSED } } + +#define proto_hint_eth_only { \ + VIRTCHNL_PROTO_HDR_ETH, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_eth { \ + VIRTCHNL_PROTO_HDR_ETH, \ + VIRTCHNL_PROTO_HDR_ETH_SRC | VIRTCHNL_PROTO_HDR_ETH_DST, \ + {BUFF_NOUSED } } + +#define proto_hint_svlan { \ + VIRTCHNL_PROTO_HDR_S_VLAN, VIRTCHNL_PROTO_HDR_S_VLAN_ID, \ + {BUFF_NOUSED } } + +#define proto_hint_cvlan { \ + VIRTCHNL_PROTO_HDR_C_VLAN, VIRTCHNL_PROTO_HDR_C_VLAN_ID, \ + {BUFF_NOUSED } } + +#define proto_hint_ipv4_src { \ + VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_SRC, {BUFF_NOUSED } } + +#define proto_hint_ipv4_dst { \ + VIRTCHNL_PROTO_HDR_IPV4, VIRTCHNL_PROTO_HDR_IPV4_DST, {BUFF_NOUSED } } + +#define proto_hint_ipv4_only { \ + VIRTCHNL_PROTO_HDR_IPV4, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_ipv4 { \ + VIRTCHNL_PROTO_HDR_IPV4, \ + VIRTCHNL_PROTO_HDR_IPV4_SRC | VIRTCHNL_PROTO_HDR_IPV4_DST, \ + {BUFF_NOUSED } } + +#define proto_hint_udp_src_port { \ + VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_udp_dst_port { \ + VIRTCHNL_PROTO_HDR_UDP, VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_udp_only { \ + VIRTCHNL_PROTO_HDR_UDP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_udp { \ + VIRTCHNL_PROTO_HDR_UDP, \ + VIRTCHNL_PROTO_HDR_UDP_SRC_PORT | VIRTCHNL_PROTO_HDR_UDP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_tcp_src_port { \ + VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_tcp_dst_port { \ + VIRTCHNL_PROTO_HDR_TCP, VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_tcp_only { \ + VIRTCHNL_PROTO_HDR_TCP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_tcp { \ + VIRTCHNL_PROTO_HDR_TCP, \ + VIRTCHNL_PROTO_HDR_TCP_SRC_PORT | VIRTCHNL_PROTO_HDR_TCP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_sctp_src_port { \ + VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_sctp_dst_port { \ + VIRTCHNL_PROTO_HDR_SCTP, VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_sctp_only { \ + VIRTCHNL_PROTO_HDR_SCTP, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_sctp { \ + VIRTCHNL_PROTO_HDR_SCTP, \ + VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT | VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, \ + {BUFF_NOUSED } } + +#define proto_hint_ipv6_src { \ + VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_SRC, {BUFF_NOUSED } } + +#define proto_hint_ipv6_dst { \ + VIRTCHNL_PROTO_HDR_IPV6, VIRTCHNL_PROTO_HDR_IPV6_DST, {BUFF_NOUSED } } + +#define proto_hint_ipv6_only { \ + VIRTCHNL_PROTO_HDR_IPV6, FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_ipv6 { \ + VIRTCHNL_PROTO_HDR_IPV6, \ + VIRTCHNL_PROTO_HDR_IPV6_SRC | VIRTCHNL_PROTO_HDR_IPV6_DST, \ + {BUFF_NOUSED } } + +#define proto_hint_gtpu_up_only { \ + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, \ + FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_gtpu_dwn_only { \ + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, \ + FIELD_FOR_PROTO_ONLY, {BUFF_NOUSED } } + +#define proto_hint_esp { \ + VIRTCHNL_PROTO_HDR_ESP, \ + VIRTCHNL_PROTO_HDR_ESP_SPI, {BUFF_NOUSED } } + +#define proto_hint_ah { \ + VIRTCHNL_PROTO_HDR_AH, \ + VIRTCHNL_PROTO_HDR_AH_SPI, {BUFF_NOUSED } } + +#define proto_hint_l2tpv3 { \ + VIRTCHNL_PROTO_HDR_L2TPV3, \ + VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, {BUFF_NOUSED } } + +#define proto_hint_pfcp { \ + VIRTCHNL_PROTO_HDR_PFCP, VIRTCHNL_PROTO_HDR_PFCP_SEID, {BUFF_NOUSED } } + +struct virtchnl_proto_hdrs hdrs_hint_eth_src = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_src } +}; + +struct virtchnl_proto_hdrs hdrs_hint_eth_dst = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth_dst } +}; + +struct virtchnl_proto_hdrs hdrs_hint_eth = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_eth } +}; + +struct virtchnl_proto_hdrs hdrs_hint_svlan = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_svlan } +}; + +struct virtchnl_proto_hdrs hdrs_hint_cvlan = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_cvlan } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_src } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4_dst } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_gtpu_up = { + TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_up_only, + proto_hint_ipv4_src } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_gtpu_dwn = { + TUNNEL_LEVEL_FIRST_INNER, PROTO_COUNT_TWO, {proto_hint_gtpu_dwn_only, + proto_hint_ipv4_dst } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_esp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_esp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_ah = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_ah } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_l2tpv3 = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_l2tpv3 } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_pfcp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_pfcp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4 = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv4 } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_udp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4, + proto_hint_udp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_tcp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4, + proto_hint_tcp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_src_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_src, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_dst_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_dst, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4_only, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv4_sctp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv4, + proto_hint_sctp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_src } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6_dst } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_esp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only, + proto_hint_esp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_ah = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only, + proto_hint_ah } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_l2tpv3 = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only, + proto_hint_l2tpv3 } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_pfcp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_only, + proto_hint_pfcp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6 = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_ONE, {proto_hint_ipv6 } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_udp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_udp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_udp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6, + proto_hint_udp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_tcp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_tcp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_tcp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6, + proto_hint_tcp } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_src_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_src, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_dst_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6_dst, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp_src_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_sctp_src_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp_dst_port = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_udp_only, + proto_hint_sctp_dst_port } +}; + +struct virtchnl_proto_hdrs hdrs_hint_ipv6_sctp = { + TUNNEL_LEVEL_OUTER, PROTO_COUNT_TWO, {proto_hint_ipv6, + proto_hint_sctp } +}; + +/** + * The first member is pattern hint type, + * the second member is hash type, + * the third member is virtchnl protocol hdrs. + */ +struct iavf_hash_match_type iavf_hash_type_list[] = { + /* IPV4 */ + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2_SRC_ONLY, &hdrs_hint_eth_src}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2_DST_ONLY, &hdrs_hint_eth_dst}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH | ETH_RSS_L2_SRC_ONLY, + &hdrs_hint_eth_src}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH | ETH_RSS_L2_DST_ONLY, + &hdrs_hint_eth_dst}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ETH, &hdrs_hint_eth}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_S_VLAN, &hdrs_hint_svlan}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_C_VLAN, &hdrs_hint_cvlan}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_ESP, &hdrs_hint_ipv4_esp}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_AH, &hdrs_hint_ipv4_ah}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_L2TPV3, &hdrs_hint_ipv4_l2tpv3}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_IPV4, &hdrs_hint_ipv4}, + /* IPV4 UDP */ + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_udp_src_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU, + &hdrs_hint_ipv4_src_gtpu_up}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_udp_src_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU, + &hdrs_hint_ipv4_dst_gtpu_dwn}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_udp_src_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_GTPU, + &hdrs_hint_ipv4_src_gtpu_up}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_udp_src_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_GTPU, + &hdrs_hint_ipv4_dst_gtpu_dwn}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_udp_src_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_PFCP, + &hdrs_hint_ipv4_pfcp}, + {IAVF_PATTERN_HINT_IPV4_UDP, ETH_RSS_NONFRAG_IPV4_UDP, + &hdrs_hint_ipv4_udp}, + /* IPV4 TCP */ + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_TCP, ETH_RSS_NONFRAG_IPV4_TCP, + &hdrs_hint_ipv4_tcp}, + /* IPV4 SCTP */ + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_src_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_src_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv4_src}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_dst_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_dst_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv4_dst}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv4_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv4_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV4_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP, + &hdrs_hint_ipv4_sctp}, + /* IPV6 */ + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2_SRC_ONLY, &hdrs_hint_eth_src}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2_DST_ONLY, &hdrs_hint_eth_dst}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L2_SRC_ONLY, + &hdrs_hint_eth_src}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L2_DST_ONLY, + &hdrs_hint_eth_dst}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_ETH, &hdrs_hint_eth}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_S_VLAN, &hdrs_hint_svlan}, + {IAVF_PATTERN_HINT_IPV4, ETH_RSS_C_VLAN, &hdrs_hint_cvlan}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_ESP, &hdrs_hint_ipv6_esp}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_AH, &hdrs_hint_ipv6_ah}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_L2TPV3, &hdrs_hint_ipv6_l2tpv3}, + {IAVF_PATTERN_HINT_IPV6, ETH_RSS_IPV6, &hdrs_hint_ipv6}, + /* IPV6 UDP */ + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_udp_src_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_udp_src_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_udp_src_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_udp_src_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_udp_src_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_udp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_PFCP, + &hdrs_hint_ipv6_pfcp}, + {IAVF_PATTERN_HINT_IPV6_UDP, ETH_RSS_NONFRAG_IPV6_UDP, + &hdrs_hint_ipv6_udp}, + /* IPV6 TCP */ + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_tcp_src_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_tcp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_TCP, ETH_RSS_NONFRAG_IPV6_TCP, + &hdrs_hint_ipv6_tcp}, + /* IPV6 SCTP */ + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_SRC_ONLY, + &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L3_DST_ONLY, + &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_src_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_src_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_SRC_ONLY, &hdrs_hint_ipv6_src}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_dst_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_dst_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_L3_DST_ONLY, &hdrs_hint_ipv6_dst}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L4_SRC_ONLY, + &hdrs_hint_ipv6_sctp_src_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_L4_DST_ONLY, + &hdrs_hint_ipv6_sctp_dst_port}, + {IAVF_PATTERN_HINT_IPV6_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP, + &hdrs_hint_ipv6_sctp}, +}; + +struct virtchnl_proto_hdrs *iavf_hash_default_hdrs[] = { + &hdrs_hint_ipv4, + &hdrs_hint_ipv4_udp, + &hdrs_hint_ipv4_tcp, + &hdrs_hint_ipv4_sctp, + &hdrs_hint_ipv6, + &hdrs_hint_ipv6_udp, + &hdrs_hint_ipv6_tcp, + &hdrs_hint_ipv6_sctp, +}; + +static struct iavf_flow_engine iavf_hash_engine = { + .init = iavf_hash_init, + .create = iavf_hash_create, + .destroy = iavf_hash_destroy, + .uninit = iavf_hash_uninit, + .free = iavf_hash_free, + .type = IAVF_FLOW_ENGINE_HASH, +}; + +/* Register parser for comms package. */ +static struct iavf_flow_parser iavf_hash_parser = { + .engine = &iavf_hash_engine, + .array = iavf_hash_pattern_list, + .array_len = RTE_DIM(iavf_hash_pattern_list), + .parse_pattern_action = iavf_hash_parse_pattern_action, + .stage = IAVF_FLOW_STAGE_RSS, +}; + +static int +iavf_hash_default_set(struct iavf_adapter *ad, bool add) +{ + struct virtchnl_rss_cfg *rss_cfg; + uint16_t i; + int ret; + + rss_cfg = rte_zmalloc("iavf rss rule", + sizeof(struct virtchnl_rss_cfg), 0); + if (!rss_cfg) + return -ENOMEM; + + for (i = 0; i < RTE_DIM(iavf_hash_default_hdrs); i++) { + rss_cfg->proto_hdrs = *iavf_hash_default_hdrs[i]; + rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + + ret = iavf_add_del_rss_cfg(ad, rss_cfg, add); + if (ret) { + PMD_DRV_LOG(ERR, "fail to %s RSS configure", + add ? "add" : "delete"); + rte_free(rss_cfg); + return ret; + } + } + + return ret; +} + +RTE_INIT(iavf_hash_engine_init) +{ + struct iavf_flow_engine *engine = &iavf_hash_engine; + + iavf_register_flow_engine(engine); +} + +static int +iavf_hash_init(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_parser *parser; + int ret; + + if (!vf->vf_res) + return -EINVAL; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)) + return -ENOTSUP; + + parser = &iavf_hash_parser; + + ret = iavf_register_parser(parser, ad); + if (ret) { + PMD_DRV_LOG(ERR, "fail to register hash parser"); + return ret; + } + + ret = iavf_hash_default_set(ad, true); + if (ret) { + PMD_DRV_LOG(ERR, "fail to set default RSS"); + iavf_unregister_parser(parser, ad); + } + + return ret; +} + +static int +iavf_hash_check_inset(const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = pattern; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not support range"); + return -rte_errno; + } + } + + return 0; +} + +static uint64_t +iavf_hash_refine_type(uint64_t rss_type, const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item *item; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) { + const struct rte_flow_item_gtp_psc *psc = item->spec; + + if (psc && (psc->pdu_type == GTP_EH_PDU_LINK_UP || + psc->pdu_type == GTP_EH_PDU_LINK_DWN)) { + rss_type |= ETH_RSS_GTPU; + } + } + } + + return rss_type; +} + +static int +iavf_hash_parse_action(struct iavf_pattern_match_item *pattern_match_item, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, struct rte_flow_error *error) +{ + struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)*meta; + uint32_t type_list_len = RTE_DIM(iavf_hash_type_list); + struct iavf_hash_match_type *type_match_item; + enum rte_flow_action_type action_type; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *action; + bool item_found = false; + uint64_t rss_type; + uint16_t i; + + struct iavf_pattern_match_type *tt = (struct iavf_pattern_match_type *) + (pattern_match_item->meta); + + /* Supported action is RSS. */ + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_RSS: + rss = action->conf; + rss_type = rss->types; + + /** + * Check simultaneous use of SRC_ONLY and DST_ONLY + * of the same level. + */ + rss_type = rte_eth_rss_hf_refine(rss_type); + + /** + * Refine the hash type base on some specific item of + * the pattern, such as identify the gtpu hash. + */ + rss_type = iavf_hash_refine_type(rss_type, pattern); + + /* Check if pattern is empty. */ + if (pattern_match_item->pattern_list != + iavf_pattern_empty && rss->func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not supported flow"); + + if (rss->level) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a nonzero RSS encapsulation level is not supported"); + + if (rss->key_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a nonzero RSS key_len is not supported"); + + if (rss->queue_num) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a non-NULL RSS queue is not supported"); + + /* Check hash function and save it to rss_meta. */ + if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + rss_meta->rss_algorithm = + VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC; + else if (rss->func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) + rss_meta->rss_algorithm = + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + else + rss_meta->rss_algorithm = + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; + + type_match_item = + rte_zmalloc("iavf_type_match_item", + sizeof(struct iavf_hash_match_type), 0); + if (!type_match_item) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "No memory for type_match_item"); + return -ENOMEM; + } + + /* Find matched proto hdrs according to hash type. */ + for (i = 0; i < type_list_len; i++) { + struct iavf_hash_match_type *ht_map = + &iavf_hash_type_list[i]; + if (rss_type == ht_map->hash_type && + tt->phint_type == ht_map->phint_type) { + type_match_item->hash_type = + ht_map->hash_type; + type_match_item->proto_hdrs = + ht_map->proto_hdrs; + rss_meta->proto_hdrs = + type_match_item->proto_hdrs; + item_found = true; + } + } + + rte_free(type_match_item); + + if (!item_found) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not supported flow"); + break; + + case RTE_FLOW_ACTION_TYPE_END: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Invalid action."); + return -rte_errno; + } + } + + return 0; +} + +static int +iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct iavf_pattern_match_item *pattern_match_item; + struct iavf_rss_meta *rss_meta_ptr; + int ret = 0; + + rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0); + if (!rss_meta_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for rss_meta_ptr"); + return -ENOMEM; + } + + /* Check rss supported pattern and find matched pattern. */ + pattern_match_item = + iavf_search_pattern_match_item(pattern, array, array_len, + error); + if (!pattern_match_item) { + ret = -rte_errno; + goto error; + } + + ret = iavf_hash_check_inset(pattern, error); + if (ret) + goto error; + + /* Check rss action. */ + ret = iavf_hash_parse_action(pattern_match_item, pattern, actions, + (void **)&rss_meta_ptr, error); + +error: + if (!ret && meta) + *meta = rss_meta_ptr; + else + rte_free(rss_meta_ptr); + + rte_free(pattern_match_item); + + return ret; +} + +static int +iavf_hash_create(__rte_unused struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, void *meta, + __rte_unused struct rte_flow_error *error) +{ + struct iavf_rss_meta *rss_meta = (struct iavf_rss_meta *)meta; + struct virtchnl_rss_cfg *rss_cfg; + int ret = 0; + + rss_cfg = rte_zmalloc("iavf rss rule", + sizeof(struct virtchnl_rss_cfg), 0); + if (!rss_cfg) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for rss rule"); + return -ENOMEM; + } + + rss_cfg->proto_hdrs = *rss_meta->proto_hdrs; + rss_cfg->rss_algorithm = rss_meta->rss_algorithm; + + ret = iavf_add_del_rss_cfg(ad, rss_cfg, true); + if (!ret) { + flow->rule = rss_cfg; + } else { + PMD_DRV_LOG(ERR, "fail to add RSS configure"); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to add rss rule."); + rte_free(rss_cfg); + return -rte_errno; + } + + rte_free(meta); + + return ret; +} + +static int +iavf_hash_destroy(__rte_unused struct iavf_adapter *ad, + struct rte_flow *flow, + __rte_unused struct rte_flow_error *error) +{ + struct virtchnl_rss_cfg *rss_cfg; + int ret = 0; + + rss_cfg = (struct virtchnl_rss_cfg *)flow->rule; + + ret = iavf_add_del_rss_cfg(ad, rss_cfg, false); + if (ret) { + PMD_DRV_LOG(ERR, "fail to del RSS configure"); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to delete rss rule."); + return -rte_errno; + } + return ret; +} + +static void +iavf_hash_uninit(struct iavf_adapter *ad) +{ + if (iavf_hash_default_set(ad, false)) + PMD_DRV_LOG(ERR, "fail to delete default RSS"); + + iavf_unregister_parser(&iavf_hash_parser, ad); +} + +static void +iavf_hash_free(struct rte_flow *flow) +{ + rte_free(flow->rule); +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_log.h b/src/spdk/dpdk/drivers/net/iavf/iavf_log.h new file mode 100644 index 000000000..1088ec75f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_log.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _IAVF_LOG_H_ +#define _IAVF_LOG_H_ + +extern int iavf_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, iavf_logtype_init, "%s(): " fmt "\n", \ + __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +extern int iavf_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, iavf_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, " >>") + + +#ifdef RTE_LIBRTE_IAVF_DEBUG_RX +extern int iavf_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, iavf_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX +extern int iavf_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, iavf_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE +extern int iavf_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, iavf_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +#endif /* _IAVF_LOG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c new file mode 100644 index 000000000..05a7dd898 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.c @@ -0,0 +1,2869 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" + +static inline int +check_rx_thresh(uint16_t nb_desc, uint16_t thresh) +{ + /* The following constraints must be satisfied: + * thresh < rxq->nb_rx_desc + */ + if (thresh >= nb_desc) { + PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u", + thresh, nb_desc); + return -EINVAL; + } + return 0; +} + +static inline int +check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, + uint16_t tx_free_thresh) +{ + /* TX descriptors will have their RS bit set after tx_rs_thresh + * descriptors have been used. The TX descriptor ring will be cleaned + * after tx_free_thresh descriptors are used or if the number of + * descriptors required to transmit a packet is greater than the + * number of free TX descriptors. + * + * The following constraints must be satisfied: + * - tx_rs_thresh must be less than the size of the ring minus 2. + * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_rs_thresh must be less than or equal to tx_free_thresh. + * - tx_rs_thresh must be a divisor of the ring size. + * + * One descriptor in the TX ring is used as a sentinel to avoid a H/W + * race condition, hence the maximum threshold constraints. When set + * to zero use default values. + */ + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 2", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 3.", + tx_free_thresh, nb_desc); + return -EINVAL; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or " + "equal to tx_free_thresh (%u).", + tx_rs_thresh, tx_free_thresh); + return -EINVAL; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the " + "number of TX descriptors (%u).", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + + return 0; +} + +static inline bool +check_rx_vec_allow(struct iavf_rx_queue *rxq) +{ + if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST && + rxq->nb_rx_desc % rxq->rx_free_thresh == 0) { + PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq."); + return true; + } + + PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq."); + return false; +} + +static inline bool +check_tx_vec_allow(struct iavf_tx_queue *txq) +{ + if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) && + txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST && + txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { + PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); + return true; + } + PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq."); + return false; +} + +static inline bool +check_rx_bulk_allow(struct iavf_rx_queue *rxq) +{ + int ret = true; + + if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "IAVF_RX_MAX_BURST=%d", + rxq->rx_free_thresh, IAVF_RX_MAX_BURST); + ret = false; + } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = false; + } + return ret; +} + +static inline void +reset_rx_queue(struct iavf_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (!rxq) + return; + + len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(union iavf_rx_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < IAVF_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf; + + /* for rx bulk */ + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +static inline void +reset_tx_queue(struct iavf_tx_queue *txq) +{ + struct iavf_tx_entry *txe; + uint32_t i, size; + uint16_t prev; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i].cmd_type_offset_bsz = + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_tail = 0; + txq->nb_used = 0; + + txq->last_desc_cleaned = txq->nb_tx_desc - 1; + txq->nb_free = txq->nb_tx_desc - 1; + + txq->next_dd = txq->rs_thresh - 1; + txq->next_rs = txq->rs_thresh - 1; +} + +static int +alloc_rxq_mbufs(struct iavf_rx_queue *rxq) +{ + volatile union iavf_rx_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif + + rxq->sw_ring[i] = mbuf; + } + + return 0; +} + +static inline void +release_rxq_mbufs(struct iavf_rx_queue *rxq) +{ + uint16_t i; + + if (!rxq->sw_ring) + return; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i]) { + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + rxq->sw_ring[i] = NULL; + } + } + + /* for rx bulk */ + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) { + struct rte_mbuf *mbuf; + + mbuf = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mbuf); + } + rxq->rx_nb_avail = 0; +} + +static inline void +release_txq_mbufs(struct iavf_tx_queue *txq) +{ + uint16_t i; + + if (!txq || !txq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } +} + +static const struct iavf_rxq_ops def_rxq_ops = { + .release_mbufs = release_rxq_mbufs, +}; + +static const struct iavf_txq_ops def_txq_ops = { + .release_mbufs = release_txq_mbufs, +}; + +int +iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = + IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_vsi *vsi = &vf->vsi; + struct iavf_rx_queue *rxq; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t len; + uint16_t rx_free_thresh; + + PMD_INIT_FUNC_TRACE(); + + if (nb_desc % IAVF_ALIGN_RING_DESC != 0 || + nb_desc > IAVF_MAX_RING_DESC || + nb_desc < IAVF_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /* Check free threshold */ + rx_free_thresh = (rx_conf->rx_free_thresh == 0) ? + IAVF_DEFAULT_RX_FREE_THRESH : + rx_conf->rx_free_thresh; + if (check_rx_thresh(nb_desc, rx_free_thresh) != 0) + return -EINVAL; + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("iavf rxq", + sizeof(struct iavf_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "rx queue data structure"); + return -ENOMEM; + } + + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { + rxq->rxdid = IAVF_RXDID_COMMS_OVS_1; + } else { + rxq->rxdid = IAVF_RXDID_LEGACY_1; + } + + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = 0; /* crc stripping by default */ + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->rx_hdr_len = 0; + rxq->vsi = vsi; + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT)); + + /* Allocate the software ring. */ + len = nb_desc + IAVF_RX_MAX_BURST; + rxq->sw_ring = + rte_zmalloc_socket("iavf rx sw ring", + sizeof(struct rte_mbuf *) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + rte_free(rxq); + return -ENOMEM; + } + + /* Allocate the maximun number of RX ring hardware descriptor with + * a liitle more to support bulk allocate. + */ + len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST; + ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc), + IAVF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, IAVF_RING_BASE_ALIGN, + socket_id); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); + rte_free(rxq->sw_ring); + rte_free(rxq); + return -ENOMEM; + } + /* Zero all the descriptors in the ring. */ + memset(mz->addr, 0, ring_size); + rxq->rx_ring_phys_addr = mz->iova; + rxq->rx_ring = (union iavf_rx_desc *)mz->addr; + + rxq->mz = mz; + reset_rx_queue(rxq); + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); + rxq->ops = &def_rxq_ops; + + if (check_rx_bulk_allow(rxq) == true) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested " + "on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + + if (check_rx_vec_allow(rxq) == false) + ad->rx_vec_allowed = false; + + return 0; +} + +int +iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_tx_queue *txq; + const struct rte_memzone *mz; + uint32_t ring_size; + uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; + + PMD_INIT_FUNC_TRACE(); + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + if (nb_desc % IAVF_ALIGN_RING_DESC != 0 || + nb_desc > IAVF_MAX_RING_DESC || + nb_desc < IAVF_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh); + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("iavf txq", + sizeof(struct iavf_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "tx queue structure"); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->rs_thresh = tx_rs_thresh; + txq->free_thresh = tx_free_thresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->offloads = offloads; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + /* Allocate software ring */ + txq->sw_ring = + rte_zmalloc_socket("iavf tx sw ring", + sizeof(struct iavf_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq->sw_ring) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); + rte_free(txq); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN); + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, IAVF_RING_BASE_ALIGN, + socket_id); + if (!mz) { + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); + rte_free(txq->sw_ring); + rte_free(txq); + return -ENOMEM; + } + txq->tx_ring_phys_addr = mz->iova; + txq->tx_ring = (struct iavf_tx_desc *)mz->addr; + + txq->mz = mz; + reset_tx_queue(txq); + txq->q_set = true; + dev->data->tx_queues[queue_idx] = txq; + txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx); + txq->ops = &def_txq_ops; + + if (check_tx_vec_allow(txq) == false) { + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + ad->tx_vec_allowed = false; + } + + return 0; +} + +int +iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_rx_queue *rxq; + int err = 0; + + PMD_DRV_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = alloc_rxq_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = iavf_switch_queue(adapter, rx_queue_id, true, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + else + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +int +iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_tx_queue *txq; + int err = 0; + + PMD_DRV_FUNC_TRACE(); + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Init the RX tail register. */ + IAVF_PCI_REG_WRITE(txq->qtx_tail, 0); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = iavf_switch_queue(adapter, tx_queue_id, false, true); + + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return err; +} + +int +iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_rx_queue *rxq; + int err; + + PMD_DRV_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = iavf_switch_queue(adapter, rx_queue_id, true, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxq->ops->release_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_tx_queue *txq; + int err; + + PMD_DRV_FUNC_TRACE(); + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = iavf_switch_queue(adapter, tx_queue_id, false, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + txq->ops->release_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +iavf_dev_rx_queue_release(void *rxq) +{ + struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq; + + if (!q) + return; + + q->ops->release_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +} + +void +iavf_dev_tx_queue_release(void *txq) +{ + struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq; + + if (!q) + return; + + q->ops->release_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +} + +void +iavf_stop_queues(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_rx_queue *rxq; + struct iavf_tx_queue *txq; + int ret, i; + + /* Stop All queues */ + ret = iavf_disable_queues(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + txq->ops->release_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + rxq->ops->release_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } +} + +static inline void +iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp) +{ + if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); + } else { + mb->vlan_tci = 0; + } +} + +static inline void +iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + if (rte_le_to_cpu_64(rxdp->wb.status_error0) & + (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.l2tag1); + } else { + mb->vlan_tci = 0; + } +} + +/* Translate the rx descriptor status and error fields to pkt flags */ +static inline uint64_t +iavf_rxd_to_pkt_flags(uint64_t qword) +{ + uint64_t flags; + uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT); + +#define IAVF_RX_ERR_BITS 0x3f + + /* Check if RSS_HASH */ + flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) & + IAVF_RX_DESC_FLTSTAT_RSS_HASH) == + IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; + + /* Check if FDIR Match */ + flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ? + PKT_RX_FDIR : 0); + + if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) { + flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT))) + flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT))) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + /* TODO: Oversize error bit is not processed here */ + + return flags; +} + +static inline uint64_t +iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb) +{ + uint64_t flags = 0; +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + uint16_t flexbh; + + flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) & + IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK; + + if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; + } +#else + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; +#endif + return flags; +} + + +/* Translate the rx flex descriptor status to pkt flags */ +static inline void +iavf_rxd_to_pkt_fields(struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc = + (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp; +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + uint16_t stat_err; + + stat_err = rte_le_to_cpu_16(desc->status_error0); + if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { + mb->ol_flags |= PKT_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); + } +#endif + + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } +} + +#define IAVF_RX_FLEX_ERR0_BITS \ + ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S)) + +/* Rx L3/L4 checksum */ +static inline uint64_t +iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0) +{ + uint64_t flags = 0; + + /* check if HW has decoded the packet and checksum */ + if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S)))) + return 0; + + if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) { + flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) + flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) + flags |= PKT_RX_EIP_CKSUM_BAD; + + return flags; +} + +/* If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. Update the RDT with the value of the last processed RX + * descriptor minus 1, to guarantee that the RDT register is never + * equal to the RDH register, which creates a "full" ring situation + * from the hardware point of view. + */ +static inline void +iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) +{ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold); + rx_id = (uint16_t)((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; +} + +/* implement recv_pkts */ +uint16_t +iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile union iavf_rx_desc *rx_ring; + volatile union iavf_rx_desc *rxdp; + struct iavf_rx_queue *rxq; + union iavf_rx_desc rxd; + struct rte_mbuf *rxe; + struct rte_eth_dev *dev; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + uint16_t nb_rx; + uint32_t rx_status; + uint64_t qword1; + uint16_t rx_packet_len; + uint16_t rx_id, nb_hold; + uint64_t dma_addr; + uint64_t pkt_flags; + const uint32_t *ptype_tbl; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit first */ + if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT))) + break; + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", rxq->port_id, rxq->queue_id); + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = rxq->sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(rxq->sw_ring[rx_id]); + + /* When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(rxq->sw_ring[rx_id]); + } + rxm = rxe; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + iavf_rxd_to_vlan_tci(rxm, &rxd); + pkt_flags = iavf_rxd_to_pkt_flags(qword1); + rxm->packet_type = + ptype_tbl[(uint8_t)((qword1 & + IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)]; + + if (pkt_flags & PKT_RX_RSS_HASH) + rxm->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm); + + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + iavf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; +} + +/* implement recv_pkts for flexible Rx descriptor */ +uint16_t +iavf_recv_pkts_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile union iavf_rx_desc *rx_ring; + volatile union iavf_rx_flex_desc *rxdp; + struct iavf_rx_queue *rxq; + union iavf_rx_flex_desc rxd; + struct rte_mbuf *rxe; + struct rte_eth_dev *dev; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + uint16_t nb_rx; + uint16_t rx_stat_err0; + uint16_t rx_packet_len; + uint16_t rx_id, nb_hold; + uint64_t dma_addr; + uint64_t pkt_flags; + const uint32_t *ptype_tbl; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id]; + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Check the DD bit first */ + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) + break; + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", rxq->port_id, rxq->queue_id); + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = rxq->sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(rxq->sw_ring[rx_id]); + + /* When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(rxq->sw_ring[rx_id]); + } + rxm = rxe; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; + iavf_flex_rxd_to_vlan_tci(rxm, &rxd); + iavf_rxd_to_pkt_fields(rxm, &rxd); + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + iavf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; +} + +/* implement recv_scattered_pkts for flexible Rx descriptor */ +uint16_t +iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + union iavf_rx_flex_desc rxd; + struct rte_mbuf *rxe; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + struct rte_mbuf *nmb, *rxm; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len; + struct rte_eth_dev *dev; + uint16_t rx_stat_err0; + uint64_t dma_addr; + uint64_t pkt_flags; + + volatile union iavf_rx_desc *rx_ring = rxq->rx_ring; + volatile union iavf_rx_flex_desc *rxdp; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id]; + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Check the DD bit */ + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) + break; + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", rxq->port_id, rxq->queue_id); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = rxq->sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(rxq->sw_ring[rx_id]); + + /* When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(rxq->sw_ring[rx_id]); + } + + rxm = rxe; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + + /* Set data buffer address and data length of the mbuf */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & + IAVF_RX_FLX_DESC_PKT_LEN_M; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* If this is the first buffer of the received packet, set the + * pointer to the first mbuf of the packet and initialize its + * context. Otherwise, update the total length and the number + * of segments of the current scattered packet, and update the + * pointer to the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = rxm; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rx_packet_len); + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) { + last_seg = rxm; + continue; + } + + /* This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - + (RTE_ETHER_CRC_LEN - rx_packet_len)); + last_seg->next = NULL; + } else { + rxm->data_len = (uint16_t)(rx_packet_len - + RTE_ETHER_CRC_LEN); + } + } + + first_seg->port = rxq->port_id; + first_seg->ol_flags = 0; + first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; + iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); + iavf_rxd_to_pkt_fields(first_seg, &rxd); + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); + + first_seg->ol_flags |= pkt_flags; + + /* Prefetch data of first segment, if configured to do so. */ + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + } + + /* Record index of the next RX descriptor to probe. */ + rxq->rx_tail = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + iavf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; +} + +/* implement recv_scattered_pkts */ +uint16_t +iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + union iavf_rx_desc rxd; + struct rte_mbuf *rxe; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + struct rte_mbuf *nmb, *rxm; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len; + struct rte_eth_dev *dev; + uint32_t rx_status; + uint64_t qword1; + uint64_t dma_addr; + uint64_t pkt_flags; + + volatile union iavf_rx_desc *rx_ring = rxq->rx_ring; + volatile union iavf_rx_desc *rxdp; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit */ + if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT))) + break; + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id); + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", rxq->port_id, rxq->queue_id); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + + rxd = *rxdp; + nb_hold++; + rxe = rxq->sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(rxq->sw_ring[rx_id]); + + /* When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(rxq->sw_ring[rx_id]); + } + + rxm = rxe; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + + /* Set data buffer address and data length of the mbuf */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* If this is the first buffer of the received packet, set the + * pointer to the first mbuf of the packet and initialize its + * context. Otherwise, update the total length and the number + * of segments of the current scattered packet, and update the + * pointer to the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = rxm; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rx_packet_len); + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) { + last_seg = rxm; + continue; + } + + /* This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - + (RTE_ETHER_CRC_LEN - rx_packet_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t)(rx_packet_len - + RTE_ETHER_CRC_LEN); + } + + first_seg->port = rxq->port_id; + first_seg->ol_flags = 0; + iavf_rxd_to_vlan_tci(first_seg, &rxd); + pkt_flags = iavf_rxd_to_pkt_flags(qword1); + first_seg->packet_type = + ptype_tbl[(uint8_t)((qword1 & + IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)]; + + if (pkt_flags & PKT_RX_RSS_HASH) + first_seg->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg); + + first_seg->ol_flags |= pkt_flags; + + /* Prefetch data of first segment, if configured to do so. */ + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + } + + /* Record index of the next RX descriptor to probe. */ + rxq->rx_tail = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + iavf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; +} + +#define IAVF_LOOK_AHEAD 8 +static inline int +iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) +{ + volatile union iavf_rx_flex_desc *rxdp; + struct rte_mbuf **rxep; + struct rte_mbuf *mb; + uint16_t stat_err0; + uint16_t pkt_len; + int32_t s[IAVF_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Make sure there is at least 1 packet to receive */ + if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD, + rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) + s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + IAVF_DUMP_RX_DESC(rxq, &rxdp[j], + rxq->rx_tail + + i * IAVF_LOOK_AHEAD + j); + + mb = rxep[j]; + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + + mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; + iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); + iavf_rxd_to_pkt_fields(mb, &rxdp[j]); + stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); + + mb->ol_flags |= pkt_flags; + } + + for (j = 0; j < IAVF_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j]; + + if (nb_dd != IAVF_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i] = NULL; + + return nb_rx; +} + +static inline int +iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) +{ + volatile union iavf_rx_desc *rxdp; + struct rte_mbuf **rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t qword1; + uint32_t rx_status; + int32_t s[IAVF_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; + + /* Make sure there is at least 1 packet to receive */ + if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD, + rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) { + qword1 = rte_le_to_cpu_64( + rxdp[j].wb.qword1.status_error_len); + s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >> + IAVF_RXD_QW1_STATUS_SHIFT; + } + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + IAVF_DUMP_RX_DESC(rxq, &rxdp[j], + rxq->rx_tail + i * IAVF_LOOK_AHEAD + j); + + mb = rxep[j]; + qword1 = rte_le_to_cpu_64 + (rxdp[j].wb.qword1.status_error_len); + pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> + IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + iavf_rxd_to_vlan_tci(mb, &rxdp[j]); + pkt_flags = iavf_rxd_to_pkt_flags(qword1); + mb->packet_type = + ptype_tbl[(uint8_t)((qword1 & + IAVF_RXD_QW1_PTYPE_MASK) >> + IAVF_RXD_QW1_PTYPE_SHIFT)]; + + if (pkt_flags & PKT_RX_RSS_HASH) + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.qword0.hi_dword.rss); + + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb); + + mb->ol_flags |= pkt_flags; + } + + for (j = 0; j < IAVF_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j]; + + if (nb_dd != IAVF_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i] = NULL; + + return nb_rx; +} + +static inline uint16_t +iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + for (i = 0; i < nb_pkts; i++) + rx_pkts[i] = stage[i]; + + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline int +iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq) +{ + volatile union iavf_rx_desc *rxdp; + struct rte_mbuf **rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx, i; + uint64_t dma_addr; + int diag; + + /* Allocate buffers in bulk */ + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); + rxep = &rxq->sw_ring[alloc_idx]; + diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) { + PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); + return -ENOMEM; + } + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1]); + + mb = rxep[i]; + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update rx tail register */ + rte_wmb(); + IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); + + rxq->rx_free_trigger = + (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + return 0; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue; + uint16_t nb_rx = 0; + + if (!nb_pkts) + return 0; + + if (rxq->rx_nb_avail) + return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1) + nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq); + else + nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + if (rxq->rx_tail > rxq->rx_free_trigger) { + if (iavf_rx_alloc_bufs(rxq) != 0) { + uint16_t i, j; + + /* TODO: count rx_mbuf_alloc_failed here */ + + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) + rxq->sw_ring[j] = rxq->rx_stage[i]; + + return 0; + } + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u", + rxq->port_id, rxq->queue_id, + rxq->rx_tail, nb_rx); + + if (rxq->rx_nb_avail) + return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +static uint16_t +iavf_recv_pkts_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx = 0, n, count; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= IAVF_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + while (nb_pkts) { + n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST); + count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + count); + nb_pkts = (uint16_t)(nb_pkts - count); + if (count < n) + break; + } + + return nb_rx; +} + +static inline int +iavf_xmit_cleanup(struct iavf_tx_queue *txq) +{ + struct iavf_tx_entry *sw_ring = txq->sw_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + volatile struct iavf_tx_desc *txd = txq->tx_ring; + + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if ((txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) { + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + return -1; + } + + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + txd[desc_to_clean_to].cmd_type_offset_bsz = 0; + + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean); + + return 0; +} + +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +iavf_calc_context_desc(uint64_t flags) +{ + static uint64_t mask = PKT_TX_TCP_SEG; + + return (flags & mask) ? 1 : 0; +} + +static inline void +iavf_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union iavf_tx_offload tx_offload) +{ + /* Set MACLEN */ + *td_offset |= (tx_offload.l2_len >> 1) << + IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloads */ + if (ol_flags & PKT_TX_IP_CKSUM) { + *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV4) { + *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV6) { + *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } + + if (ol_flags & PKT_TX_TCP_SEG) { + *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_UDP_CKSUM: + *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/* set TSO context descriptor + * support IP -> L4 and IP -> IP -> L4 + */ +static inline uint64_t +iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + hdr_len = tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + + cd_cmd = IAVF_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | + ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT); + + return ctx_desc; +} + +/* Construct the tx flags */ +static inline uint64_t +iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << + IAVF_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << + IAVF_TXD_QW1_L2TAG1_SHIFT)); +} + +/* TX function */ +uint16_t +iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + volatile struct iavf_tx_desc *txd; + volatile struct iavf_tx_desc *txr; + struct iavf_tx_queue *txq; + struct iavf_tx_entry *sw_ring; + struct iavf_tx_entry *txe, *txn; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint16_t tx_id; + uint16_t nb_tx; + uint32_t td_cmd; + uint32_t td_offset; + uint32_t td_tag; + uint64_t ol_flags; + uint16_t nb_used; + uint16_t nb_ctx; + uint16_t tx_last; + uint16_t slen; + uint64_t buf_dma_addr; + union iavf_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Check if the descriptor ring needs to be cleaned. */ + if (txq->nb_free < txq->free_thresh) + iavf_xmit_cleanup(txq); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + td_cmd = 0; + td_tag = 0; + td_offset = 0; + + tx_pkt = *tx_pkts++; + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + + /* Calculate the number of context descriptors needed. */ + nb_ctx = iavf_calc_context_desc(ol_flags); + + /* The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus 1 context descriptor if needed. + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u" + " tx_first=%u tx_last=%u", + txq->port_id, txq->queue_id, tx_id, tx_last); + + if (nb_used > txq->nb_free) { + if (iavf_xmit_cleanup(txq)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + if (unlikely(nb_used > txq->rs_thresh)) { + while (nb_used > txq->nb_free) { + if (iavf_xmit_cleanup(txq)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* Descriptor based VLAN insertion */ + if (ol_flags & PKT_TX_VLAN_PKT) { + td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; + td_tag = tx_pkt->vlan_tci; + } + + /* According to datasheet, the bit2 is reserved and must be + * set to 1. + */ + td_cmd |= 0x04; + + /* Enable checksum offloading */ + if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK) + iavf_txd_enable_checksum(ol_flags, &td_cmd, + &td_offset, tx_offload); + + if (nb_ctx) { + /* Setup TX context descriptor if required */ + uint64_t cd_type_cmd_tso_mss = + IAVF_TX_DESC_DTYPE_CONTEXT; + volatile struct iavf_tx_context_desc *ctx_txd = + (volatile struct iavf_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled */ + if (ol_flags & PKT_TX_TCP_SEG) + cd_type_cmd_tso_mss |= + iavf_set_tso_ctx(tx_pkt, tx_offload); + + ctx_txd->type_cmd_tso_mss = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + + IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Setup TX Descriptor */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd, + td_offset, + slen, + td_tag); + + IAVF_DUMP_TX_DESC(txq, txd, tx_id); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg); + + /* The last packet data descriptor needs End Of Packet (EOP) */ + td_cmd |= IAVF_TX_DESC_CMD_EOP; + txq->nb_used = (uint16_t)(txq->nb_used + nb_used); + txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + + if (txq->nb_used >= txq->rs_thresh) { + PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + td_cmd |= IAVF_TX_DESC_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_used = 0; + } + + txd->cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)td_cmd) << + IAVF_TXD_QW1_CMD_SHIFT); + IAVF_DUMP_TX_DESC(txq, txd, tx_id); + } + +end_of_tx: + rte_wmb(); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + txq->port_id, txq->queue_id, tx_id, nb_tx); + + IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/* TX prep functions */ +uint16_t +iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */ + if (!(ol_flags & PKT_TX_TCP_SEG)) { + if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) { + rte_errno = EINVAL; + return i; + } + } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) || + (m->tso_segsz > IAVF_MAX_TSO_MSS)) { + /* MSS outside the range are considered malicious */ + rte_errno = EINVAL; + return i; + } + + if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +/* choose rx function*/ +void +iavf_set_rx_function(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); +#ifdef RTE_ARCH_X86 + struct iavf_rx_queue *rxq; + int i; + bool use_avx2 = false; + + if (!iavf_rx_vec_dev_check(dev)) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + (void)iavf_rxq_vec_setup(rxq); + } + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, + "Using %sVector Scattered Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) + dev->rx_pkt_burst = use_avx2 ? + iavf_recv_scattered_pkts_vec_avx2_flex_rxd : + iavf_recv_scattered_pkts_vec_flex_rxd; + else + dev->rx_pkt_burst = use_avx2 ? + iavf_recv_scattered_pkts_vec_avx2 : + iavf_recv_scattered_pkts_vec; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) + dev->rx_pkt_burst = use_avx2 ? + iavf_recv_pkts_vec_avx2_flex_rxd : + iavf_recv_pkts_vec_flex_rxd; + else + dev->rx_pkt_burst = use_avx2 ? + iavf_recv_pkts_vec_avx2 : + iavf_recv_pkts_vec; + } + + return; + } +#endif + + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", + dev->data->port_id); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) + dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd; + else + dev->rx_pkt_burst = iavf_recv_scattered_pkts; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc; + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).", + dev->data->port_id); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) + dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd; + else + dev->rx_pkt_burst = iavf_recv_pkts; + } +} + +/* choose tx function*/ +void +iavf_set_tx_function(struct rte_eth_dev *dev) +{ +#ifdef RTE_ARCH_X86 + struct iavf_tx_queue *txq; + int i; + bool use_avx2 = false; + + if (!iavf_tx_vec_dev_check(dev)) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + iavf_txq_vec_setup(txq); + } + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + iavf_xmit_pkts_vec_avx2 : + iavf_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + + return; + } +#endif + + PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).", + dev->data->port_id); + dev->tx_pkt_burst = iavf_xmit_pkts; + dev->tx_pkt_prepare = iavf_prep_pkts; +} + +void +iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct iavf_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = true; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct iavf_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_free_thresh = txq->free_thresh; + qinfo->conf.tx_rs_thresh = txq->rs_thresh; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/* Get the number of used descriptors of a rx queue */ +uint32_t +iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id) +{ +#define IAVF_RXQ_SCAN_INTERVAL 4 + volatile union iavf_rx_desc *rxdp; + struct iavf_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; + + while ((desc < rxq->nb_rx_desc) && + ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) & + (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) { + /* Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += IAVF_RXQ_SCAN_INTERVAL; + rxdp += IAVF_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset) +{ + struct iavf_rx_queue *rxq = rx_queue; + volatile uint64_t *status; + uint64_t mask; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.qword1.status_error_len; + mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT) + << IAVF_RXD_QW1_STATUS_SHIFT); + if (*status & mask) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset) +{ + struct iavf_tx_queue *txq = tx_queue; + volatile uint64_t *status; + uint64_t mask, expect; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) * + txq->rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].cmd_type_offset_bsz; + mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK); + expect = rte_cpu_to_le_64( + IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT); + if ((*status & mask) == expect) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +const uint32_t * +iavf_get_default_ptype_table(void) +{ + static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] + __rte_cache_aligned = { + /* L2 types */ + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, + /* [3] - [5] reserved */ + [6] = RTE_PTYPE_L2_ETHER_LLDP, + /* [7] - [10] reserved */ + [11] = RTE_PTYPE_L2_ETHER_ARP, + /* [12] - [21] reserved */ + + /* Non tunneled IPv4 */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv4 --> IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* [73] - [87] reserved */ + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* [139] - [299] reserved */ + + /* PPPoE */ + [300] = RTE_PTYPE_L2_ETHER_PPPOE, + [301] = RTE_PTYPE_L2_ETHER_PPPOE, + + /* PPPoE --> IPv4 */ + [302] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [303] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [304] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [305] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [306] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [307] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* PPPoE --> IPv6 */ + [308] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [309] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [310] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [311] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [312] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [313] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + /* [314] - [324] reserved */ + + /* IPv4/IPv6 --> GTPC/GTPU */ + [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU, + [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU, + + /* IPv4 --> GTPU --> IPv4 */ + [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GTPU --> IPv4 */ + [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GTPU --> IPv6 */ + [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GTPU --> IPv6 */ + [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* All others reserved */ + }; + + return ptype_tbl; +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h new file mode 100644 index 000000000..59625a979 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx.h @@ -0,0 +1,534 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _IAVF_RXTX_H_ +#define _IAVF_RXTX_H_ + +/* In QLEN must be whole number of 32 descriptors. */ +#define IAVF_ALIGN_RING_DESC 32 +#define IAVF_MIN_RING_DESC 64 +#define IAVF_MAX_RING_DESC 4096 +#define IAVF_DMA_MEM_ALIGN 4096 +/* Base address of the HW descriptor ring should be 128B aligned. */ +#define IAVF_RING_BASE_ALIGN 128 + +/* used for Rx Bulk Allocate */ +#define IAVF_RX_MAX_BURST 32 + +/* used for Vector PMD */ +#define IAVF_VPMD_RX_MAX_BURST 32 +#define IAVF_VPMD_TX_MAX_BURST 32 +#define IAVF_RXQ_REARM_THRESH 32 +#define IAVF_VPMD_DESCS_PER_LOOP 4 +#define IAVF_VPMD_TX_MAX_FREE_BUF 64 + +#define IAVF_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_TCP_CKSUM) + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 + +#define IAVF_MIN_TSO_MSS 256 +#define IAVF_MAX_TSO_MSS 9668 +#define IAVF_TSO_MAX_SEG UINT8_MAX +#define IAVF_TX_MAX_MTU_SEG 8 + +#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define IAVF_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define IAVF_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) + +/* HW desc structure, both 16-byte and 32-byte types are supported */ +#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC +#define iavf_rx_desc iavf_16byte_rx_desc +#define iavf_rx_flex_desc iavf_16b_rx_flex_desc +#else +#define iavf_rx_desc iavf_32byte_rx_desc +#define iavf_rx_flex_desc iavf_32b_rx_flex_desc +#endif + +struct iavf_rxq_ops { + void (*release_mbufs)(struct iavf_rx_queue *rxq); +}; + +struct iavf_txq_ops { + void (*release_mbufs)(struct iavf_tx_queue *txq); +}; + +/* Structure associated with each Rx queue. */ +struct iavf_rx_queue { + struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ + const struct rte_memzone *mz; /* memzone for Rx ring */ + volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */ + uint64_t rx_ring_phys_addr; /* Rx ring DMA address */ + struct rte_mbuf **sw_ring; /* address of SW ring */ + uint16_t nb_rx_desc; /* ring length */ + uint16_t rx_tail; /* current value of tail */ + volatile uint8_t *qrx_tail; /* register address of tail */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t nb_rx_hold; /* number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ + struct rte_mbuf fake_mbuf; /* dummy mbuf */ + uint8_t rxdid; + + /* used for VPMD */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint64_t mbuf_initializer; /* value to init mbufs */ + + /* for rx bulk */ + uint16_t rx_nb_avail; /* number of staged packets ready */ + uint16_t rx_next_avail; /* index of next staged packets */ + uint16_t rx_free_trigger; /* triggers rx buffer allocation */ + struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */ + + uint16_t port_id; /* device port ID */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ + uint16_t queue_id; /* Rx queue index */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + struct iavf_vsi *vsi; /**< the VSI this queue belongs to */ + + bool q_set; /* if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ + const struct iavf_rxq_ops *ops; +}; + +struct iavf_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +/* Structure associated with each TX queue. */ +struct iavf_tx_queue { + const struct rte_memzone *mz; /* memzone for Tx ring */ + volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */ + uint64_t tx_ring_phys_addr; /* Tx ring DMA address */ + struct iavf_tx_entry *sw_ring; /* address array of SW ring */ + uint16_t nb_tx_desc; /* ring length */ + uint16_t tx_tail; /* current value of tail */ + volatile uint8_t *qtx_tail; /* register address of tail */ + /* number of used desc since RS bit set */ + uint16_t nb_used; + uint16_t nb_free; + uint16_t last_desc_cleaned; /* last desc have been cleaned*/ + uint16_t free_thresh; + uint16_t rs_thresh; + + uint16_t port_id; + uint16_t queue_id; + uint64_t offloads; + uint16_t next_dd; /* next to set RS, for VPMD */ + uint16_t next_rs; /* next to check DD, for VPMD */ + + bool q_set; /* if rx queue has been configured */ + bool tx_deferred_start; /* don't start this queue in dev start */ + const struct iavf_txq_ops *ops; +}; + +/* Offload features */ +union iavf_tx_offload { + uint64_t data; + struct { + uint64_t l2_len:7; /* L2 (MAC) Header Length. */ + uint64_t l3_len:9; /* L3 (IP) Header Length. */ + uint64_t l4_len:8; /* L4 Header Length. */ + uint64_t tso_segsz:16; /* TCP TSO segment size */ + /* uint64_t unused : 24; */ + }; +}; + +/* Rx Flex Descriptors + * These descriptors are used instead of the legacy version descriptors + */ +union iavf_16b_rx_flex_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + } read; + struct { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile ID */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + } wb; /* writeback */ +}; + +union iavf_32b_rx_flex_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile ID */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 time_stamp_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flex_meta2; + __le16 flex_meta3; + union { + struct { + __le16 flex_meta4; + __le16 flex_meta5; + } flex; + __le32 ts_high; + } flex_ts; + } wb; /* writeback */ +}; + +/* Rx Flex Descriptor + * RxDID Profile ID 16-21 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Flow ID upper 16-bits + * Flex-field 4: AUX0 + * Flex-field 5: AUX1 + */ +struct iavf_32b_rx_flex_desc_comms { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 aux0; + __le16 aux1; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor + * RxDID Profile ID 22-23 (swap Hash and FlowID) + * Flex-field 0: Flow ID lower 16-bits + * Flex-field 1: Flow ID upper 16-bits + * Flex-field 2: RSS hash lower 16-bits + * Flex-field 3: RSS hash upper 16-bits + * Flex-field 4: AUX0 + * Flex-field 5: AUX1 + */ +struct iavf_32b_rx_flex_desc_comms_ovs { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 flow_id; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rss_hash; + union { + struct { + __le16 aux0; + __le16 aux1; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Receive Flex Descriptor profile IDs: There are a total + * of 64 profiles where profile IDs 0/1 are for legacy; and + * profiles 2-63 are flex profiles that can be programmed + * with a specific metadata (profile 7 reserved for HW) + */ +enum iavf_rxdid { + IAVF_RXDID_LEGACY_0 = 0, + IAVF_RXDID_LEGACY_1 = 1, + IAVF_RXDID_FLEX_NIC = 2, + IAVF_RXDID_FLEX_NIC_2 = 6, + IAVF_RXDID_HW = 7, + IAVF_RXDID_COMMS_GENERIC = 16, + IAVF_RXDID_COMMS_AUX_VLAN = 17, + IAVF_RXDID_COMMS_AUX_IPV4 = 18, + IAVF_RXDID_COMMS_AUX_IPV6 = 19, + IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20, + IAVF_RXDID_COMMS_AUX_TCP = 21, + IAVF_RXDID_COMMS_OVS_1 = 22, + IAVF_RXDID_COMMS_OVS_2 = 23, + IAVF_RXDID_LAST = 63, +}; + +enum iavf_rx_flex_desc_status_error_0_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0, + IAVF_RX_FLEX_DESC_STATUS0_EOF_S, + IAVF_RX_FLEX_DESC_STATUS0_HBO_S, + IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S, + IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, + IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, + IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, + IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, + IAVF_RX_FLEX_DESC_STATUS0_LPBK_S, + IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, + IAVF_RX_FLEX_DESC_STATUS0_RXE_S, + IAVF_RX_FLEX_DESC_STATUS0_CRCP_S, + IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S, + IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S, + IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, + IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, + IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ +}; + +/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */ +#define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ + +/* for iavf_32b_rx_flex_desc.pkt_len member */ +#define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ + +int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); + +int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void iavf_dev_rx_queue_release(void *rxq); + +int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void iavf_dev_tx_queue_release(void *txq); +void iavf_stop_queues(struct rte_eth_dev *dev); +uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void iavf_set_rx_function(struct rte_eth_dev *dev); +void iavf_set_tx_function(struct rte_eth_dev *dev); +void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id); +int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset); +int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset); + +uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int iavf_rx_vec_dev_check(struct rte_eth_dev *dev); +int iavf_tx_vec_dev_check(struct rte_eth_dev *dev); +int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq); +int iavf_txq_vec_setup(struct iavf_tx_queue *txq); + +const uint32_t *iavf_get_default_ptype_table(void); + +static inline +void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq, + const volatile void *desc, + uint16_t rx_id) +{ +#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + const volatile union iavf_16byte_rx_desc *rx_desc = desc; + + printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", + rxq->queue_id, rx_id, rx_desc->read.pkt_addr, + rx_desc->read.hdr_addr); +#else + const volatile union iavf_32byte_rx_desc *rx_desc = desc; + + printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64 + " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id, + rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr, + rx_desc->read.rsvd1, rx_desc->read.rsvd2); +#endif +} + +/* All the descriptors are 16 bytes, so just use one of them + * to print the qwords + */ +static inline +void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, + const volatile void *desc, uint16_t tx_id) +{ + const char *name; + const volatile struct iavf_tx_desc *tx_desc = desc; + enum iavf_tx_desc_dtype_value type; + + type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64( + tx_desc->cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)); + switch (type) { + case IAVF_TX_DESC_DTYPE_DATA: + name = "Tx_data_desc"; + break; + case IAVF_TX_DESC_DTYPE_CONTEXT: + name = "Tx_context_desc"; + break; + default: + name = "unknown_desc"; + break; + } + + printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", + txq->queue_id, name, tx_id, tx_desc->buffer_addr, + tx_desc->cmd_type_offset_bsz); +} + +#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \ + int i; \ + for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \ + struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \ + if (!rxq) \ + continue; \ + rxq->fdir_enabled = on; \ + } \ + PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \ +} while (0) + +/* Enable/disable flow director Rx processing in data path. */ +static inline +void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on) +{ + if (on) { + /* enable flow director processing */ + FDIR_PROC_ENABLE_PER_QUEUE(ad, on); + ad->fdir_ref_cnt++; + } else { + if (ad->fdir_ref_cnt >= 1) { + ad->fdir_ref_cnt--; + + if (ad->fdir_ref_cnt == 0) + FDIR_PROC_ENABLE_PER_QUEUE(ad, on); + } + } +} + +#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC +#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \ + iavf_dump_rx_descriptor(rxq, desc, rx_id) +#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \ + iavf_dump_tx_descriptor(txq, desc, tx_id) +#else +#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0) +#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0) +#endif + +#endif /* _IAVF_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c new file mode 100644 index 000000000..e5e0fd309 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -0,0 +1,1541 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "iavf_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +iavf_rxq_rearm(struct iavf_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union iavf_rx_desc *rxdp; + struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start]; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxp, + IAVF_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) { + rxp[i] = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + IAVF_RXQ_REARM_THRESH; + return; + } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + struct rte_mbuf *mb0, *mb1; + __m128i dma_addr0, dma_addr1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxp[0]; + mb1 = rxp[1]; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } +#else + struct rte_mbuf *mb0, *mb1, *mb2, *mb3; + __m256i dma_addr0_1, dma_addr2_3; + __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 4 mbufs in one loop */ + for (i = 0; i < IAVF_RXQ_REARM_THRESH; + i += 4, rxp += 4, rxdp += 4) { + __m128i vaddr0, vaddr1, vaddr2, vaddr3; + __m256i vaddr0_1, vaddr2_3; + + mb0 = rxp[0]; + mb1 = rxp[1]; + mb2 = rxp[2]; + mb3 = rxp[3]; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); + vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); + + /** + * merge 0 & 1, by casting 0 to 256-bit and inserting 1 + * into the high lanes. Similarly for 2 & 3 + */ + vaddr0_1 = + _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), + vaddr1, 1); + vaddr2_3 = + _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), + vaddr3, 1); + + /* convert pa to dma_addr hdr/data */ + dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); + dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); + + /* add headroom to pa values */ + dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); + dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); + + /* flush desc with pa dma_addr */ + _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); + _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); + } + +#endif + + rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +#define PKTLEN_SHIFT 10 + +static inline uint16_t +_iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ +#define IAVF_DESCS_PER_LOOP_AVX 8 + + /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */ + const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; + + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + /* struct iavf_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; */ + struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + const int avx_aligned = ((rxq->rx_tail & 1) == 0); + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH) + iavf_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* constants used in processing loop */ + const __m256i crc_adjust = + _mm256_set_epi16 + (/* first descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0, /* ignore pkt_type field */ + /* second descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + IAVF_RX_DESC_STATUS_EOF_SHIFT); + + /* mask to shuffle from desc. to mbuf (2 descriptors)*/ + const __m256i shuf_msk = + _mm256_set_epi8 + (/* first descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /*pkt_type set as unknown */ + /* second descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. Bits 3-5 of error + * field (bits 22-24) are for IP/L4 checksum errors + */ + const __m256i flags_mask = + _mm256_set1_epi32((1 << 2) | (1 << 11) | + (3 << 12) | (7 << 22)); + /** + * data to be shuffled by result of flag mask. If VLAN bit is set, + * (bit 2), then position 4 in this array will be used in the + * destination + */ + const __m256i vlan_flags_shuf = + _mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, + 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0); + /** + * data to be shuffled by result of flag mask, shifted down 11. + * If RSS/FDIR bits are set, shuffle moves appropriate flags in + * place. + */ + const __m256i rss_flags_shuf = + _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, + 0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, + 0, 0, 0, 0, PKT_RX_FDIR, 0); + + /** + * data to be shuffled by the result of the flags mask shifted by 22 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */ + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += IAVF_DESCS_PER_LOOP_AVX, + rxdp += IAVF_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + /* for AVX we need alignment otherwise loads are not atomic */ + if (avx_aligned) { + /* load in descriptors, 2 at a time, in reverse order */ + raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); + rte_compiler_barrier(); + raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); + rte_compiler_barrier(); + raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); + rte_compiler_barrier(); + raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); + } else +#endif + { + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + } + + if (split_packet) { + int j; + + for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 4-7 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, + PKTLEN_SHIFT); + const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, + PKTLEN_SHIFT); + const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, + len6_7, 0x80); + const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, + len4_5, 0x80); + __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk); + __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk); + + mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust); + mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust); + /** + * to get packet types, shift 64-bit values down 30 bits + * and so ptype is in lower 8-bits in each + */ + const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30); + const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30); + const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24); + const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8); + const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24); + const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8); + + mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4); + mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0); + mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4); + mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0); + /* merge the status bits into one register */ + const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7, + desc4_5); + + /** + * convert descriptors 0-3 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, + PKTLEN_SHIFT); + const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, + PKTLEN_SHIFT); + const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, + len2_3, 0x80); + const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, + len0_1, 0x80); + __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk); + __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk); + + mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust); + mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust); + /* get the packet types */ + const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30); + const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30); + const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24); + const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8); + const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24); + const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8); + + mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4); + mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0); + mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4); + mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0); + /* merge the status bits into one register */ + const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3, + desc0_1); + + /** + * take the two sets of status bits and merge to one + * After merge, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, + status0_3); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /* set vlan and rss flags */ + const __m256i vlan_flags = + _mm256_shuffle_epi8(vlan_flags_shuf, flag_bits); + const __m256i rss_flags = + _mm256_shuffle_epi8(rss_flags_shuf, + _mm256_srli_epi32(flag_bits, 11)); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 22)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + _mm256_or_si256(rss_flags, vlan_flags)); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += IAVF_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != IAVF_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +static inline __m256i +flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7) +{ +#define FDID_MIS_MAGIC 0xFFFFFFFF + RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2)); + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR | + PKT_RX_FDIR_ID); + /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */ + const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC); + __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7, + fdir_mis_mask); + /* this XOR op results to bit-reverse the fdir_mask */ + fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask); + const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit); + + return fdir_flags; +} + +static inline uint16_t +_iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ +#define IAVF_DESCS_PER_LOOP_AVX 8 + + const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; + + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union iavf_rx_flex_desc *rxdp = + (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH) + iavf_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m256i crc_adjust = + _mm256_set_epi16 + (/* first descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0, /* ignore pkt_type field */ + /* second descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + IAVF_RX_FLEX_DESC_STATUS0_EOF_S); + + /* mask to shuffle from desc. to mbuf (2 descriptors)*/ + const __m256i shuf_msk = + _mm256_set_epi8 + (/* first descriptor */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /*pkt_type set as unknown */ + /* second descriptor */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += IAVF_DESCS_PER_LOOP_AVX, + rxdp += IAVF_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; + + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + + if (split_packet) { + int j; + + for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 4-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk); + __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk); + + mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust); + mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust); + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m256i ptype_mask = + _mm256_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M); + const __m256i ptypes6_7 = + _mm256_and_si256(raw_desc6_7, ptype_mask); + const __m256i ptypes4_5 = + _mm256_and_si256(raw_desc4_5, ptype_mask); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + + mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype7], 4); + mb6_7 = _mm256_insert_epi32(mb6_7, type_table[ptype6], 0); + mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype5], 4); + mb4_5 = _mm256_insert_epi32(mb4_5, type_table[ptype4], 0); + /* merge the status bits into one register */ + const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7, + raw_desc4_5); + + /** + * convert descriptors 0-3 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk); + __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk); + + mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust); + mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust); + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m256i ptypes2_3 = + _mm256_and_si256(raw_desc2_3, ptype_mask); + const __m256i ptypes0_1 = + _mm256_and_si256(raw_desc0_1, ptype_mask); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype3], 4); + mb2_3 = _mm256_insert_epi32(mb2_3, type_table[ptype2], 0); + mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype1], 4); + mb0_1 = _mm256_insert_epi32(mb0_1, type_table[ptype0], 0); + /* merge the status bits into one register */ + const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3, + raw_desc0_1); + + /** + * take the two sets of status bits and merge to one + * After merge, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, + status0_3); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + + if (rxq->fdir_enabled) { + const __m256i fdir_id4_7 = + _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5); + + const __m256i fdir_id0_3 = + _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1); + + const __m256i fdir_id0_7 = + _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3); + + const __m256i fdir_flags = + flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7); + + /* merge with fdir_flags */ + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags); + + /* write to mbuf: have to use scalar store here */ + rx_pkts[i + 0]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 3); + + rx_pkts[i + 1]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 7); + + rx_pkts[i + 2]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 2); + + rx_pkts[i + 3]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 6); + + rx_pkts[i + 4]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 1); + + rx_pkts[i + 5]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 5); + + rx_pkts[i + 6]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 0); + + rx_pkts[i + 7]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 4); + } /* if() on fdir_enabled */ + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif + + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << + IAVF_RX_FLEX_DESC_STATUS0_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += IAVF_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != IAVF_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +uint16_t +iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _iavf_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +uint16_t +iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rx_queue, rx_pkts, + nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +uint16_t +iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) { + uint16_t burst = iavf_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < IAVF_VPMD_RX_MAX_BURST) + return retval; + } + return retval + iavf_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, nb_pkts); +} + +/** + * vPMD receive routine that reassembles single burst of + * 32 scattered packets for flex RxD + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq, + rx_pkts, nb_pkts, split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets for flex RxD. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + */ +uint16_t +iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) { + uint16_t burst = + iavf_recv_scattered_burst_vec_avx2_flex_rxd + (rx_queue, rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < IAVF_VPMD_RX_MAX_BURST) + return retval; + } + return retval + iavf_recv_scattered_burst_vec_avx2_flex_rxd(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +iavf_vtx1(volatile struct iavf_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (IAVF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +iavf_vtx(volatile struct iavf_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (IAVF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + iavf_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + iavf_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue; + volatile struct iavf_tx_desc *txdp; + struct iavf_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + /* bit2 is reserved and must be set to 1 according to Spec */ + uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC; + uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); + + if (txq->nb_free < txq->free_thresh) + iavf_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + iavf_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + iavf_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + iavf_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->next_rs) { + txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << + IAVF_TXD_QW1_CMD_SHIFT); + txq->next_rs = + (uint16_t)(txq->next_rs + txq->rs_thresh); + } + + txq->tx_tail = tx_id; + + IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = iavf_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h new file mode 100644 index 000000000..25bb502de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _IAVF_RXTX_VEC_COMMON_H_ +#define _IAVF_RXTX_VEC_COMMON_H_ +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" + +static inline uint16_t +reassemble_packets(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[IAVF_VPMD_RX_MAX_BURST]; + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned int pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->vlan_tci = end->vlan_tci; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) { + end->data_len -= rxq->crc_len; + } else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = NULL; + end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static __rte_always_inline int +iavf_tx_free_bufs(struct iavf_tx_queue *txq) +{ + struct iavf_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); + txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); + if (txq->next_dd >= txq->nb_tx_desc) + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + + return txq->rs_thresh; +} + +static __rte_always_inline void +tx_backlog_entry(struct iavf_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq) +{ + const unsigned int mask = rxq->nb_rx_desc - 1; + unsigned int i; + + if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i]) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i]) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline void +_iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq) +{ + unsigned i; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + + if (!txq->sw_ring || txq->nb_free == max_desc) + return; + + i = txq->next_dd - txq->rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } +} + +static inline int +iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq) +{ + if (!rxq) + return -1; + + if (!rte_is_power_of_2(rxq->nb_rx_desc)) + return -1; + + if (rxq->rx_free_thresh < IAVF_VPMD_RX_MAX_BURST) + return -1; + + if (rxq->nb_rx_desc % rxq->rx_free_thresh) + return -1; + + return 0; +} + +static inline int +iavf_tx_vec_queue_default(struct iavf_tx_queue *txq) +{ + if (!txq) + return -1; + + if (txq->offloads & IAVF_NO_VECTOR_FLAGS) + return -1; + + if (txq->rs_thresh < IAVF_VPMD_TX_MAX_BURST || + txq->rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) + return -1; + + return 0; +} + +static inline int +iavf_rx_vec_dev_check_default(struct rte_eth_dev *dev) +{ + int i; + struct iavf_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (iavf_rx_vec_queue_default(rxq)) + return -1; + } + + return 0; +} + +static inline int +iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev) +{ + int i; + struct iavf_tx_queue *txq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (iavf_tx_vec_queue_default(txq)) + return -1; + } + + return 0; +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c new file mode 100644 index 000000000..85c5bd4af --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -0,0 +1,1191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" +#include "iavf_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +iavf_rxq_rearm(struct iavf_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + + volatile union iavf_rx_desc *rxdp; + struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, (void *)rxp, + rxq->rx_free_thresh) < 0) { + if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) { + rxp[i] = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxp[0]; + mb1 = rxp[1]; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += rxq->rx_free_thresh; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= rxq->rx_free_thresh; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "rearm_start=%u rearm_nb=%u", + rxq->port_id, rxq->queue_id, + rx_id, rxq->rxrearm_start, rxq->rxrearm_nb); + + /* Update the tail pointer on the NIC */ + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline void +desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + struct rte_mbuf **rx_pkts) +{ + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + + __m128i vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const __m128i rss_vlan_msk = _mm_set_epi32( + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804); + + const __m128i cksum_mask = _mm_set_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + 0, 0, 0, 0); + + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + + const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); + vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); + vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + + rss = _mm_srli_epi32(vlan1, 11); + rss = _mm_shuffle_epi8(rss_flags, rss); + + l3_l4e = _mm_srli_epi32(vlan1, 22); + l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); + /* then we shift left 1 bit */ + l3_l4e = _mm_slli_epi32(l3_l4e, 1); + /* we need to mask out the reduntant bits */ + l3_l4e = _mm_and_si128(l3_l4e, cksum_mask); + + vlan0 = _mm_or_si128(vlan0, rss); + vlan0 = _mm_or_si128(vlan0, l3_l4e); + + /* At this point, we have the 4 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +static inline __m128i +flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3) +{ +#define FDID_MIS_MAGIC 0xFFFFFFFF + RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2)); + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR | + PKT_RX_FDIR_ID); + /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */ + const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC); + __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3, + fdir_mis_mask); + /* this XOR op results to bit-reverse the fdir_mask */ + fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask); + const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit); + + return fdir_flags; +} + +static inline void +flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + struct rte_mbuf **rx_pkts) +{ + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + + __m128i tmp_desc, flags, rss_vlan; + + /* mask everything except checksum, RSS and VLAN flags. + * bit6:4 for checksum. + * bit12 for RSS indication. + * bit13 for VLAN indication. + */ + const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, + 0x3070, 0x3070); + + const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD); + + /* map the checksum, rss and vlan fields to the checksum, rss + * and vlan flag + */ + const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + + const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + /* merge 4 descriptors */ + flags = _mm_unpackhi_epi32(descs[0], descs[1]); + tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]); + tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc); + tmp_desc = _mm_and_si128(flags, desc_mask); + + /* checksum flags */ + tmp_desc = _mm_srli_epi32(tmp_desc, 4); + flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); + /* then we shift left 1 bit */ + flags = _mm_slli_epi32(flags, 1); + /* we need to mask out the redundant bits introduced by RSS or + * VLAN fields. + */ + flags = _mm_and_si128(flags, cksum_mask); + + /* RSS, VLAN flag */ + tmp_desc = _mm_srli_epi32(tmp_desc, 8); + rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc); + + /* merge the flags */ + flags = _mm_or_si128(flags, rss_vlan); + + if (rxq->fdir_enabled) { + const __m128i fdir_id0_1 = + _mm_unpackhi_epi32(descs[0], descs[1]); + + const __m128i fdir_id2_3 = + _mm_unpackhi_epi32(descs[2], descs[3]); + + const __m128i fdir_id0_3 = + _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3); + + const __m128i fdir_flags = + flex_rxd_to_fdir_flags_vec(fdir_id0_3); + + /* merge with fdir_flags */ + flags = _mm_or_si128(flags, fdir_flags); + + /* write fdir_id to mbuf */ + rx_pkts[0]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 0); + + rx_pkts[1]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 1); + + rx_pkts[2]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 2); + + rx_pkts[3]->hash.fdir.hi = + _mm_extract_epi32(fdir_id0_3, 3); + } /* if() on fdir_enabled */ + + /** + * At this point, we have the 4 sets of flags in the low 16-bits + * of each 32-bit value in flags. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +#define PKTLEN_SHIFT 10 + +static inline void +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + const uint32_t *type_table) +{ + __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); + __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); + + ptype0 = _mm_srli_epi64(ptype0, 30); + ptype1 = _mm_srli_epi64(ptype1, 30); + + rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)]; + rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)]; + rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)]; + rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)]; +} + +static inline void +flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + const uint32_t *type_table) +{ + const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M, + 0, IAVF_RX_FLEX_DESC_PTYPE_M, + 0, IAVF_RX_FLEX_DESC_PTYPE_M, + 0, IAVF_RX_FLEX_DESC_PTYPE_M); + __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]); + __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]); + __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23); + + ptype_all = _mm_and_si128(ptype_all, ptype_mask); + + rx_pkts[0]->packet_type = type_table[_mm_extract_epi16(ptype_all, 1)]; + rx_pkts[1]->packet_type = type_table[_mm_extract_epi16(ptype_all, 3)]; + rx_pkts[2]->packet_type = type_table[_mm_extract_epi16(ptype_all, 5)]; + rx_pkts[3]->packet_type = type_table[_mm_extract_epi16(ptype_all, 7)]; +} + +/* Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union iavf_rx_desc *rxdp; + struct rte_mbuf **sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + __m128i shuf_msk; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + /* compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + __m128i dd_check, eop_check; + + /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + + /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > rxq->rx_free_thresh) + iavf_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */ + ); + /* Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += IAVF_VPMD_DESCS_PER_LOOP, + rxdp += IAVF_VPMD_DESCS_PER_LOOP) { + __m128i descs[IAVF_VPMD_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT); + const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[3] = _mm_blend_epi16(descs[3], len3, 0x80); + descs[2] = _mm_blend_epi16(descs[2], len2, 0x80); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* C.1 4=>2 status err info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT); + const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[1] = _mm_blend_epi16(descs[1], len1, 0x80); + descs[0] = _mm_blend_epi16(descs[0], len0, 0x80); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.2 get 4 pkts status err value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128( + (void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128( + (void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += IAVF_VPMD_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128( + (void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != IAVF_VPMD_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union iavf_rx_flex_desc *rxdp; + struct rte_mbuf **sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + __m128i crc_adjust = _mm_set_epi16 + (0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + const __m128i zero = _mm_setzero_si128(); + /* mask to shuffle from desc. to mbuf */ + const __m128i shuf_msk = _mm_set_epi8 + (0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /* pkt_type set as unknown */ + ); + const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0x04, 0x0C, + 0x00, 0x08); + + /** + * compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + + /* 4 packets DD mask */ + const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL, + 0x0000000100000001LL); + /* 4 packets EOP mask */ + const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL, + 0x0000000200000002LL); + + /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + + /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = (union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > rxq->rx_free_thresh) + iavf_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /** + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += IAVF_VPMD_DESCS_PER_LOOP, + rxdp += IAVF_VPMD_DESCS_PER_LOOP) { + __m128i descs[IAVF_VPMD_DESCS_PER_LOOP]; + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __m128i staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust); + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m128i rss_hash3 = + _mm_slli_epi64(raw_desc_bh3, 32); + __m128i rss_hash2 = + _mm_slli_epi64(raw_desc_bh2, 32); + __m128i rss_hash1 = + _mm_slli_epi64(raw_desc_bh1, 32); + __m128i rss_hash0 = + _mm_slli_epi64(raw_desc_bh0, 32); + + __m128i rss_hash_msk = + _mm_set_epi32(0xFFFFFFFF, 0, 0, 0); + + rss_hash3 = _mm_and_si128 + (rss_hash3, rss_hash_msk); + rss_hash2 = _mm_and_si128 + (rss_hash2, rss_hash_msk); + rss_hash1 = _mm_and_si128 + (rss_hash1, rss_hash_msk); + rss_hash0 = _mm_and_si128 + (rss_hash0, rss_hash_msk); + + pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3); + pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2); + pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1); + pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0); + } /* if() on RSS hash parsing */ +#endif + + /* C.2 get 4 pkts staterr value */ + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb3); + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb2); + + /* C* extract and record EOP bit */ + if (split_packet) { + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += IAVF_VPMD_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb1); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb0); + flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + /* C.4 calc available number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != IAVF_VPMD_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* Notice: + * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + unsigned int i = 0; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/* vPMD receive routine that reassembles scattered packets for flex RxD + * Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct iavf_rx_queue *rxq = rx_queue; + uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + unsigned int i = 0; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct iavf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (IAVF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << IAVF_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +iavf_vtx(volatile struct iavf_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue; + volatile struct iavf_tx_desc *txdp; + struct iavf_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = IAVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */ + uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); + + if (txq->nb_free < txq->free_thresh) + iavf_tx_free_bufs(txq); + + nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + nb_commit = nb_pkts; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + iavf_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->next_rs) { + txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << + IAVF_TXD_QW1_CMD_SHIFT); + txq->next_rs = + (uint16_t)(txq->next_rs + txq->rs_thresh); + } + + txq->tx_tail = tx_id; + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u", + txq->port_id, txq->queue_id, tx_id, nb_pkts); + + IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = iavf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +static void __rte_cold +iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq) +{ + _iavf_rx_queue_release_mbufs_vec(rxq); +} + +static void __rte_cold +iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq) +{ + _iavf_tx_queue_release_mbufs_vec(txq); +} + +static const struct iavf_rxq_ops sse_vec_rxq_ops = { + .release_mbufs = iavf_rx_queue_release_mbufs_sse, +}; + +static const struct iavf_txq_ops sse_vec_txq_ops = { + .release_mbufs = iavf_tx_queue_release_mbufs_sse, +}; + +int __rte_cold +iavf_txq_vec_setup(struct iavf_tx_queue *txq) +{ + txq->ops = &sse_vec_txq_ops; + return 0; +} + +int __rte_cold +iavf_rxq_vec_setup(struct iavf_rx_queue *rxq) +{ + rxq->ops = &sse_vec_rxq_ops; + return iavf_rxq_vec_setup_default(rxq); +} + +int __rte_cold +iavf_rx_vec_dev_check(struct rte_eth_dev *dev) +{ + return iavf_rx_vec_dev_check_default(dev); +} + +int __rte_cold +iavf_tx_vec_dev_check(struct rte_eth_dev *dev) +{ + return iavf_tx_vec_dev_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c b/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c new file mode 100644 index 000000000..33acea54a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/iavf_vchnl.c @@ -0,0 +1,1077 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" + +#define MAX_TRY_TIMES 200 +#define ASQ_DELAY_MS 10 + +/* Read data in admin queue to get msg from pf driver */ +static enum iavf_status +iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, + uint8_t *buf) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_arq_event_info event; + enum virtchnl_ops opcode; + int ret; + + event.buf_len = buf_len; + event.msg_buf = buf; + ret = iavf_clean_arq_element(hw, &event, NULL); + /* Can't read any msg from adminQ */ + if (ret) { + PMD_DRV_LOG(DEBUG, "Can't read msg from AQ"); + return ret; + } + + opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); + vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32( + event.desc.cookie_low); + + PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d", + opcode, vf->cmd_retval); + + if (opcode != vf->pend_cmd) { + PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u", + vf->pend_cmd, opcode); + return IAVF_ERR_OPCODE_MISMATCH; + } + + return IAVF_SUCCESS; +} + +static int +iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + enum iavf_status ret; + int err = 0; + int i = 0; + + if (_atomic_set_cmd(vf, args->ops)) + return -1; + + ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS, + args->in_args, args->in_args_size, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops); + _clear_cmd(vf); + return err; + } + + switch (args->ops) { + case VIRTCHNL_OP_RESET_VF: + /*no need to wait for response */ + _clear_cmd(vf); + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: + /* for init virtchnl ops, need to poll the response */ + do { + ret = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (ret == IAVF_SUCCESS) + break; + rte_delay_ms(ASQ_DELAY_MS); + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } + _clear_cmd(vf); + break; + + default: + /* For other virtchnl ops in running time, + * wait for the cmd done flag. + */ + do { + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) + break; + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + /* If there's no response is received, clear command */ + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + _clear_cmd(vf); + } + break; + } + + return err; +} + +static uint32_t +iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) +{ + uint32_t speed; + + switch (virt_link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + speed = 100; + break; + case VIRTCHNL_LINK_SPEED_1GB: + speed = 1000; + break; + case VIRTCHNL_LINK_SPEED_10GB: + speed = 10000; + break; + case VIRTCHNL_LINK_SPEED_40GB: + speed = 40000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + speed = 20000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + speed = 25000; + break; + case VIRTCHNL_LINK_SPEED_2_5GB: + speed = 2500; + break; + case VIRTCHNL_LINK_SPEED_5GB: + speed = 5000; + break; + default: + speed = 0; + break; + } + + return speed; +} + +static void +iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, + uint16_t msglen) +{ + struct virtchnl_pf_event *pf_msg = + (struct virtchnl_pf_event *)msg; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (msglen < sizeof(struct virtchnl_pf_event)) { + PMD_DRV_LOG(DEBUG, "Error event"); + return; + } + switch (pf_msg->event) { + case VIRTCHNL_EVENT_RESET_IMPENDING: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + break; + case VIRTCHNL_EVENT_LINK_CHANGE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); + vf->link_up = pf_msg->event_data.link_event.link_status; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_speed = + pf_msg->event_data.link_event_adv.link_speed; + } else { + enum virtchnl_link_speed speed; + speed = pf_msg->event_data.link_event.link_speed; + vf->link_speed = iavf_convert_link_speed(speed); + } + iavf_dev_link_update(dev, 0); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); + break; + default: + PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event); + break; + } +} + +void +iavf_handle_virtchnl_msg(struct rte_eth_dev *dev) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_arq_event_info info; + uint16_t pending, aq_opc; + enum virtchnl_ops msg_opc; + enum iavf_status msg_ret; + int ret; + + info.buf_len = IAVF_AQ_BUF_SZ; + if (!vf->aq_resp) { + PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL"); + return; + } + info.msg_buf = vf->aq_resp; + + pending = 1; + while (pending) { + ret = iavf_clean_arq_element(hw, &info, &pending); + + if (ret != IAVF_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ," + "ret: %d", ret); + break; + } + aq_opc = rte_le_to_cpu_16(info.desc.opcode); + /* For the message sent from pf to vf, opcode is stored in + * cookie_high of struct iavf_aq_desc, while return error code + * are stored in cookie_low, Which is done by PF driver. + */ + msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32( + info.desc.cookie_high); + msg_ret = (enum iavf_status)rte_le_to_cpu_32( + info.desc.cookie_low); + switch (aq_opc) { + case iavf_aqc_opc_send_msg_to_vf: + if (msg_opc == VIRTCHNL_OP_EVENT) { + iavf_handle_pf_event_msg(dev, info.msg_buf, + info.msg_len); + } else { + /* read message and it's expected one */ + if (msg_opc == vf->pend_cmd) + _notify_cmd(vf, msg_ret); + else + PMD_DRV_LOG(ERR, "command mismatch," + "expect %u, get %u", + vf->pend_cmd, msg_opc); + PMD_DRV_LOG(DEBUG, + "adminq response is received," + " opcode = %d", msg_opc); + } + break; + default: + PMD_DRV_LOG(ERR, "Request %u is not supported yet", + aq_opc); + break; + } + } +} + +int +iavf_enable_vlan_strip(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + ret = iavf_execute_vf_cmd(adapter, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_ENABLE_VLAN_STRIPPING"); + + return ret; +} + +int +iavf_disable_vlan_strip(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + int ret; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + ret = iavf_execute_vf_cmd(adapter, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_DISABLE_VLAN_STRIPPING"); + + return ret; +} + +#define VIRTCHNL_VERSION_MAJOR_START 1 +#define VIRTCHNL_VERSION_MINOR_START 1 + +/* Check API version with sync wait until version read from admin queue */ +int +iavf_check_api_version(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_version_info version, *pver; + struct iavf_cmd_info args; + int err; + + version.major = VIRTCHNL_VERSION_MAJOR; + version.minor = VIRTCHNL_VERSION_MINOR; + + args.ops = VIRTCHNL_OP_VERSION; + args.in_args = (uint8_t *)&version; + args.in_args_size = sizeof(version); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION"); + return err; + } + + pver = (struct virtchnl_version_info *)args.out_buffer; + vf->virtchnl_version = *pver; + + if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START || + (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START && + vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) { + PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower" + " than (%u.%u) to support Adapative VF", + VIRTCHNL_VERSION_MAJOR_START, + VIRTCHNL_VERSION_MAJOR_START); + return -1; + } else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR || + (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR && + vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) { + PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)", + vf->virtchnl_version.major, + vf->virtchnl_version.minor, + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); + return -1; + } + + PMD_DRV_LOG(DEBUG, "Peer is supported PF host"); + return 0; +} + +int +iavf_get_vf_resource(struct iavf_adapter *adapter) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + uint32_t caps, len; + int err, i; + + args.ops = VIRTCHNL_OP_GET_VF_RESOURCES; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | + VIRTCHNL_VF_OFFLOAD_FDIR_PF | + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; + + args.in_args = (uint8_t *)∩︀ + args.in_args_size = sizeof(caps); + + err = iavf_execute_vf_cmd(adapter, &args); + + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_VF_RESOURCE"); + return -1; + } + + len = sizeof(struct virtchnl_vf_resource) + + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + + rte_memcpy(vf->vf_res, args.out_buffer, + RTE_MIN(args.out_size, len)); + /* parse VF config message back from PF*/ + iavf_vf_parse_hw_config(hw, vf->vf_res); + for (i = 0; i < vf->vf_res->num_vsis; i++) { + if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + vf->vsi_res = &vf->vf_res->vsi_res[i]; + } + + if (!vf->vsi_res) { + PMD_INIT_LOG(ERR, "no LAN VSI found"); + return -1; + } + + vf->vsi.vsi_id = vf->vsi_res->vsi_id; + vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; + vf->vsi.adapter = adapter; + + return 0; +} + +int +iavf_get_supported_rxdid(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + int ret; + + args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + ret = iavf_execute_vf_cmd(adapter, &args); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); + return ret; + } + + vf->supported_rxdid = + ((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids; + + return 0; +} + +int +iavf_enable_queues(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct iavf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES"); + return err; + } + return 0; +} + +int +iavf_disable_queues(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct iavf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1; + queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1; + + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES"); + return err; + } + return 0; +} + +int +iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select queue_select; + struct iavf_cmd_info args; + int err; + + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + if (rx) + queue_select.rx_queues |= 1 << qid; + else + queue_select.tx_queues |= 1 << qid; + + if (on) + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; + else + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); + return err; +} + +int +iavf_configure_rss_lut(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_rss_lut *rss_lut; + struct iavf_cmd_info args; + int len, err = 0; + + len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1; + rss_lut = rte_zmalloc("rss_lut", len, 0); + if (!rss_lut) + return -ENOMEM; + + rss_lut->vsi_id = vf->vsi_res->vsi_id; + rss_lut->lut_entries = vf->vf_res->rss_lut_size; + rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size); + + args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT; + args.in_args = (u8 *)rss_lut; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_LUT"); + + rte_free(rss_lut); + return err; +} + +int +iavf_configure_rss_key(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_rss_key *rss_key; + struct iavf_cmd_info args; + int len, err = 0; + + len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1; + rss_key = rte_zmalloc("rss_key", len, 0); + if (!rss_key) + return -ENOMEM; + + rss_key->vsi_id = vf->vsi_res->vsi_id; + rss_key->key_len = vf->vf_res->rss_key_size; + rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size); + + args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY; + args.in_args = (u8 *)rss_key; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_KEY"); + + rte_free(rss_key); + return err; +} + +int +iavf_configure_queues(struct iavf_adapter *adapter) +{ + struct iavf_rx_queue **rxq = + (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues; + struct iavf_tx_queue **txq = + (struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_vsi_queue_config_info *vc_config; + struct virtchnl_queue_pair_info *vc_qp; + struct iavf_cmd_info args; + uint16_t i, size; + int err; + + size = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * vf->num_queue_pairs; + vc_config = rte_zmalloc("cfg_queue", size, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = vf->vsi_res->vsi_id; + vc_config->num_queue_pairs = vf->num_queue_pairs; + + for (i = 0, vc_qp = vc_config->qpair; + i < vf->num_queue_pairs; + i++, vc_qp++) { + vc_qp->txq.vsi_id = vf->vsi_res->vsi_id; + vc_qp->txq.queue_id = i; + /* Virtchnnl configure queues by pairs */ + if (i < adapter->eth_dev->data->nb_tx_queues) { + vc_qp->txq.ring_len = txq[i]->nb_tx_desc; + vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr; + } + vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id; + vc_qp->rxq.queue_id = i; + vc_qp->rxq.max_pkt_size = vf->max_pkt_len; + /* Virtchnnl configure queues by pairs */ + if (i < adapter->eth_dev->data->nb_rx_queues) { + vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; + vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr; + vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; + } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { + vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1; + PMD_DRV_LOG(NOTICE, "request RXDID == %d in " + "Queue[%d]", vc_qp->rxq.rxdid, i); + } else { + vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1; + PMD_DRV_LOG(NOTICE, "request RXDID == %d in " + "Queue[%d]", vc_qp->rxq.rxdid, i); + } +#else + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) { + vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0; + PMD_DRV_LOG(NOTICE, "request RXDID == %d in " + "Queue[%d]", vc_qp->rxq.rxdid, i); + } else { + PMD_DRV_LOG(ERR, "RXDID == 0 is not supported"); + return -1; + } +#endif + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.in_args = (uint8_t *)vc_config; + args.in_args_size = size; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + rte_free(vc_config); + return err; +} + +int +iavf_config_irq_map(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_irq_map_info *map_info; + struct virtchnl_vector_map *vecmap; + struct iavf_cmd_info args; + int len, i, err; + + len = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * vf->nb_msix; + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->num_vectors = vf->nb_msix; + for (i = 0; i < vf->nb_msix; i++) { + vecmap = &map_info->vecmap[i]; + vecmap->vsi_id = vf->vsi_res->vsi_id; + vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT; + vecmap->vector_id = vf->msix_base + i; + vecmap->txq_map = 0; + vecmap->rxq_map = vf->rxq_map[vf->msix_base + i]; + } + + args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + + rte_free(map_info); + return err; +} + +void +iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_ether_addr *addr; + struct iavf_cmd_info args; + int len, err, i, j; + int next_begin = 0; + int begin = 0; + + do { + j = 0; + len = sizeof(struct virtchnl_ether_addr_list); + for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) { + addr = &adapter->eth_dev->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; + len += sizeof(struct virtchnl_ether_addr); + if (len >= IAVF_AQ_BUF_SZ) { + next_begin = i + 1; + break; + } + } + + list = rte_zmalloc("iavf_del_mac_buffer", len, 0); + if (!list) { + PMD_DRV_LOG(ERR, "fail to allocate memory"); + return; + } + + for (i = begin; i < next_begin; i++) { + addr = &adapter->eth_dev->data->mac_addrs[i]; + if (rte_is_zero_ether_addr(addr)) + continue; + rte_memcpy(list->list[j].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + j++; + } + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = j; + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : + VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = (uint8_t *)list; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : + "OP_DEL_ETHER_ADDRESS"); + rte_free(list); + begin = next_begin; + } while (begin < IAVF_NUM_MACADDR_MAX); +} + +int +iavf_query_stats(struct iavf_adapter *adapter, + struct virtchnl_eth_stats **pstats) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_select q_stats; + struct iavf_cmd_info args; + int err; + + memset(&q_stats, 0, sizeof(q_stats)); + q_stats.vsi_id = vf->vsi_res->vsi_id; + args.ops = VIRTCHNL_OP_GET_STATS; + args.in_args = (uint8_t *)&q_stats; + args.in_args_size = sizeof(q_stats); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); + *pstats = NULL; + return err; + } + *pstats = (struct virtchnl_eth_stats *)args.out_buffer; + return 0; +} + +int +iavf_config_promisc(struct iavf_adapter *adapter, + bool enable_unicast, + bool enable_multicast) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_promisc_info promisc; + struct iavf_cmd_info args; + int err; + + promisc.flags = 0; + promisc.vsi_id = vf->vsi_res->vsi_id; + + if (enable_unicast) + promisc.flags |= FLAG_VF_UNICAST_PROMISC; + + if (enable_multicast) + promisc.flags |= FLAG_VF_MULTICAST_PROMISC; + + args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + args.in_args = (uint8_t *)&promisc; + args.in_args_size = sizeof(promisc); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + + if (err) + PMD_DRV_LOG(ERR, + "fail to execute command CONFIG_PROMISCUOUS_MODE"); + return err; +} + +int +iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, + bool add) +{ + struct virtchnl_ether_addr_list *list; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + + sizeof(struct virtchnl_ether_addr)]; + struct iavf_cmd_info args; + int err; + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = 1; + rte_memcpy(list->list[0].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); + return err; +} + +int +iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) +{ + struct virtchnl_vlan_filter_list *vlan_list; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + struct iavf_cmd_info args; + int err; + + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = vf->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + args.ops = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); + + return err; +} + +int +iavf_fdir_add(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_add *fdir_ret; + + struct iavf_cmd_info args; + int err; + + filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->add_fltr.validate_only = 0; + + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->add_fltr); + args.in_args_size = sizeof(*(&filter->add_fltr)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); + return err; + } + + fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer; + filter->flow_id = fdir_ret->flow_id; + + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { + PMD_DRV_LOG(INFO, + "Succeed in adding rule request by PF"); + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to no hw resource"); + return -1; + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to the rule is already existed"); + return -1; + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to the rule is conflict with existing rule"); + return -1; + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to the hw doesn't support"); + return -1; + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to time out for programming"); + return -1; + } else { + PMD_DRV_LOG(ERR, + "Failed to add rule request due to other reasons"); + return -1; + } + + return 0; +}; + +int +iavf_fdir_del(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_del *fdir_ret; + + struct iavf_cmd_info args; + int err; + + filter->del_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->del_fltr.flow_id = filter->flow_id; + + args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->del_fltr); + args.in_args_size = sizeof(filter->del_fltr); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); + return err; + } + + fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer; + + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { + PMD_DRV_LOG(INFO, + "Succeed in deleting rule request by PF"); + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { + PMD_DRV_LOG(ERR, + "Failed to delete rule request due to this rule doesn't exist"); + return -1; + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) { + PMD_DRV_LOG(ERR, + "Failed to delete rule request due to time out for programming"); + return -1; + } else { + PMD_DRV_LOG(ERR, + "Failed to delete rule request due to other reasons"); + return -1; + } + + return 0; +}; + +int +iavf_fdir_check(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_add *fdir_ret; + + struct iavf_cmd_info args; + int err; + + filter->add_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->add_fltr.validate_only = 1; + + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->add_fltr); + args.in_args_size = sizeof(*(&filter->add_fltr)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); + return err; + } + + fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer; + + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) { + PMD_DRV_LOG(INFO, + "Succeed in checking rule request by PF"); + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) { + PMD_DRV_LOG(ERR, + "Failed to check rule request due to parameters validation" + " or HW doesn't support"); + return -1; + } else { + PMD_DRV_LOG(ERR, + "Failed to check rule request due to other reasons"); + return -1; + } + + return 0; +} + +int +iavf_add_del_rss_cfg(struct iavf_adapter *adapter, + struct virtchnl_rss_cfg *rss_cfg, bool add) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + int err; + + memset(&args, 0, sizeof(args)); + args.ops = add ? VIRTCHNL_OP_ADD_RSS_CFG : + VIRTCHNL_OP_DEL_RSS_CFG; + args.in_args = (u8 *)rss_cfg; + args.in_args_size = sizeof(*rss_cfg); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of %s", + add ? "OP_ADD_RSS_CFG" : + "OP_DEL_RSS_INPUT_CFG"); + + return err; +} diff --git a/src/spdk/dpdk/drivers/net/iavf/meson.build b/src/spdk/dpdk/drivers/net/iavf/meson.build new file mode 100644 index 000000000..a3fad363d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/meson.build @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Luca Boccassi + +cflags += ['-Wno-strict-aliasing'] + +includes += include_directories('../../common/iavf') +deps += ['common_iavf'] + +sources = files( + 'iavf_ethdev.c', + 'iavf_rxtx.c', + 'iavf_vchnl.c', + 'iavf_generic_flow.c', + 'iavf_fdir.c', + 'iavf_hash.c', +) + +if arch_subdir == 'x86' + sources += files('iavf_rxtx_vec_sse.c') + + # compile AVX2 version if either: + # a. we have AVX supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') + cflags += ['-DCC_AVX2_SUPPORT'] + sources += files('iavf_rxtx_vec_avx2.c') + elif cc.has_argument('-mavx2') + cflags += ['-DCC_AVX2_SUPPORT'] + iavf_avx2_lib = static_library('iavf_avx2_lib', + 'iavf_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += iavf_avx2_lib.extract_objects('iavf_rxtx_vec_avx2.c') + endif +endif diff --git a/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map b/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/iavf/rte_pmd_iavf_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ice/Makefile b/src/spdk/dpdk/drivers/net/ice/Makefile new file mode 100644 index 000000000..34cd4024b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/Makefile @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ice.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/iavf + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_kvargs +LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_hash +LDLIBS += -lrte_net -lrte_common_iavf +LDLIBS += -lpthread + +EXPORT_MAP := rte_pmd_ice_version.map + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings +# +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_BASE_DRIVER += +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-unused-variable +else +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +endif + +endif +OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_controlq.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_common.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_sched.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flex_pipe.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcb.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_acl.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_acl_ctrl.c + +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx.c +ifeq ($(CONFIG_RTE_ARCH_X86), y) +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c +endif + +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_hash.c +ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) + CC_AVX2_SUPPORT=1 +else + CC_AVX2_SUPPORT=\ + $(shell $(CC) -march=core-avx2 -dM -E - &1 | \ + grep -q AVX2 && echo 1) + ifeq ($(CC_AVX2_SUPPORT), 1) + ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) + CFLAGS_ice_rxtx_vec_avx2.o += -march=core-avx2 + else + CFLAGS_ice_rxtx_vec_avx2.o += -mavx2 + endif + endif +endif + +ifeq ($(CC_AVX2_SUPPORT), 1) + SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c +endif +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c + +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf_parent.c + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_ICE_PMD)-include := rte_pmd_ice.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ice/base/README b/src/spdk/dpdk/drivers/net/ice/base/README new file mode 100644 index 000000000..726593971 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/README @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +Intel® ICE driver +================== + +This directory contains source code of FreeBSD ice driver of version +2020.03.26 released by the team which develops +basic drivers for any ice NIC. The directory of base/ contains the +original source package. +This driver is valid for the product(s) listed below + +* Intel® Ethernet Network Adapters E810 + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + ice_osdep.h diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_acl.c b/src/spdk/dpdk/drivers/net/ice/base/ice_acl.c new file mode 100644 index 000000000..31244cb4b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_acl.c @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_acl.h" +#include "ice_adminq_cmd.h" + +/** + * ice_aq_alloc_acl_tbl - allocate ACL table + * @hw: pointer to the HW struct + * @tbl: pointer to ice_acl_alloc_tbl struct + * @cd: pointer to command details structure or NULL + * + * Allocate ACL table (indirect 0x0C10) + */ +enum ice_status +ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, + struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_alloc_table *cmd; + struct ice_aq_desc desc; + + if (!tbl->act_pairs_per_entry) + return ICE_ERR_PARAM; + + if (tbl->act_pairs_per_entry > ICE_AQC_MAX_ACTION_MEMORIES) + return ICE_ERR_MAX_LIMIT; + + /* If this is concurrent table, then buffer shall be valid and + * contain DependentAllocIDs, 'num_dependent_alloc_ids' should be valid + * and within limit + */ + if (tbl->concurr) { + if (!tbl->num_dependent_alloc_ids) + return ICE_ERR_PARAM; + if (tbl->num_dependent_alloc_ids > + ICE_AQC_MAX_CONCURRENT_ACL_TBL) + return ICE_ERR_INVAL_SIZE; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_tbl); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.alloc_table; + cmd->table_width = CPU_TO_LE16(tbl->width * BITS_PER_BYTE); + cmd->table_depth = CPU_TO_LE16(tbl->depth); + cmd->act_pairs_per_entry = tbl->act_pairs_per_entry; + if (tbl->concurr) + cmd->table_type = tbl->num_dependent_alloc_ids; + + return ice_aq_send_cmd(hw, &desc, &tbl->buf, sizeof(tbl->buf), cd); +} + +/** + * ice_aq_dealloc_acl_tbl - deallocate ACL table + * @hw: pointer to the HW struct + * @alloc_id: allocation ID of the table being released + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Deallocate ACL table (indirect 0x0C11) + * + * NOTE: This command has no buffer format for command itself but response + * format is 'struct ice_aqc_acl_generic', pass ptr to that struct + * as 'buf' and its size as 'buf_size' + */ +enum ice_status +ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_tbl_actpair *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_tbl); + cmd = &desc.params.tbl_actpair; + cmd->alloc_id = CPU_TO_LE16(alloc_id); + + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +static enum ice_status +ice_aq_acl_entry(struct ice_hw *hw, u16 opcode, u8 tcam_idx, u16 entry_idx, + struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_entry *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + + if (opcode == ice_aqc_opc_program_acl_entry) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.program_query_entry; + cmd->tcam_index = tcam_idx; + cmd->entry_index = CPU_TO_LE16(entry_idx); + + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_aq_program_acl_entry - program ACL entry + * @hw: pointer to the HW struct + * @tcam_idx: Updated TCAM block index + * @entry_idx: updated entry index + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Program ACL entry (direct 0x0C20) + */ +enum ice_status +ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, + struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) +{ + return ice_aq_acl_entry(hw, ice_aqc_opc_program_acl_entry, tcam_idx, + entry_idx, buf, cd); +} + +/** + * ice_aq_query_acl_entry - query ACL entry + * @hw: pointer to the HW struct + * @tcam_idx: Updated TCAM block index + * @entry_idx: updated entry index + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Query ACL entry (direct 0x0C24) + * + * NOTE: Caller of this API to parse 'buf' appropriately since it contains + * response (key and key invert) + */ +enum ice_status +ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, + struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd) +{ + return ice_aq_acl_entry(hw, ice_aqc_opc_query_acl_entry, tcam_idx, + entry_idx, buf, cd); +} + +/* Helper function to alloc/dealloc ACL action pair */ +static enum ice_status +ice_aq_actpair_a_d(struct ice_hw *hw, u16 opcode, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_tbl_actpair *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + cmd = &desc.params.tbl_actpair; + cmd->alloc_id = CPU_TO_LE16(alloc_id); + + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_aq_alloc_actpair - allocate actionpair for specified ACL table + * @hw: pointer to the HW struct + * @alloc_id: allocation ID of the table being associated with the actionpair + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Allocate ACL actionpair (direct 0x0C12) + * + * This command doesn't need and doesn't have its own command buffer + * but for response format is as specified in 'struct ice_aqc_acl_generic' + */ +enum ice_status +ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) +{ + return ice_aq_actpair_a_d(hw, ice_aqc_opc_alloc_acl_actpair, alloc_id, + buf, cd); +} + +/** + * ice_aq_dealloc_actpair - dealloc actionpair for specified ACL table + * @hw: pointer to the HW struct + * @alloc_id: allocation ID of the table being associated with the actionpair + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Deallocate ACL actionpair (direct 0x0C13) + */ +enum ice_status +ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd) +{ + return ice_aq_actpair_a_d(hw, ice_aqc_opc_dealloc_acl_actpair, alloc_id, + buf, cd); +} + +/* Helper function to program/query ACL action pair */ +static enum ice_status +ice_aq_actpair_p_q(struct ice_hw *hw, u16 opcode, u8 act_mem_idx, + u16 act_entry_idx, struct ice_aqc_actpair *buf, + struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_actpair *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + + if (opcode == ice_aqc_opc_program_acl_actpair) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.program_query_actpair; + cmd->act_mem_index = act_mem_idx; + cmd->act_entry_index = CPU_TO_LE16(act_entry_idx); + + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_aq_program_actpair - program ACL actionpair + * @hw: pointer to the HW struct + * @act_mem_idx: action memory index to program/update/query + * @act_entry_idx: the entry index in action memory to be programmed/updated + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Program action entries (indirect 0x0C1C) + */ +enum ice_status +ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, + struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) +{ + return ice_aq_actpair_p_q(hw, ice_aqc_opc_program_acl_actpair, + act_mem_idx, act_entry_idx, buf, cd); +} + +/** + * ice_aq_query_actpair - query ACL actionpair + * @hw: pointer to the HW struct + * @act_mem_idx: action memory index to program/update/query + * @act_entry_idx: the entry index in action memory to be programmed/updated + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Query ACL actionpair (indirect 0x0C25) + */ +enum ice_status +ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, + struct ice_aqc_actpair *buf, struct ice_sq_cd *cd) +{ + return ice_aq_actpair_p_q(hw, ice_aqc_opc_query_acl_actpair, + act_mem_idx, act_entry_idx, buf, cd); +} + +/** + * ice_aq_dealloc_acl_res - deallocate ACL resources + * @hw: pointer to the HW struct + * @cd: pointer to command details structure or NULL + * + * ACL - de-allocate (direct 0x0C1A) resources. Used by SW to release all the + * resources allocated for it using a single command + */ +enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_res); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_acl_prof_aq_send - sending acl profile aq commands + * @hw: pointer to the HW struct + * @opc: command opcode + * @prof_id: profile ID + * @buf: ptr to buffer + * @cd: pointer to command details structure or NULL + * + * This function sends ACL profile commands + */ +static enum ice_status +ice_acl_prof_aq_send(struct ice_hw *hw, u16 opc, u8 prof_id, + struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + desc.params.profile.profile_id = prof_id; + if (opc == ice_aqc_opc_program_acl_prof_extraction || + opc == ice_aqc_opc_program_acl_prof_ranges) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_prgm_acl_prof_extrt - program ACL profile extraction sequence + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @buf: ptr to buffer + * @cd: pointer to command details structure or NULL + * + * ACL - program ACL profile extraction (indirect 0x0C1D) + */ +enum ice_status +ice_prgm_acl_prof_extrt(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_sq_cd *cd) +{ + return ice_acl_prof_aq_send(hw, ice_aqc_opc_program_acl_prof_extraction, + prof_id, buf, cd); +} + +/** + * ice_query_acl_prof - query ACL profile + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @buf: ptr to buffer (which will contain response of this command) + * @cd: pointer to command details structure or NULL + * + * ACL - query ACL profile (indirect 0x0C21) + */ +enum ice_status +ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_sq_cd *cd) +{ + return ice_acl_prof_aq_send(hw, ice_aqc_opc_query_acl_prof, prof_id, + buf, cd); +} + +/** + * ice_aq_acl_cntrs_chk_params - Checks ACL counter parameters + * @cntrs: ptr to buffer describing input and output params + * + * This function checks the counter bank range for counter type and returns + * success or failure. + */ +static enum ice_status ice_aq_acl_cntrs_chk_params(struct ice_acl_cntrs *cntrs) +{ + enum ice_status status = ICE_SUCCESS; + + if (!cntrs || !cntrs->amount) + return ICE_ERR_PARAM; + + switch (cntrs->type) { + case ICE_AQC_ACL_CNT_TYPE_SINGLE: + /* Single counter type - configured to count either bytes + * or packets, the valid values for byte or packet counters + * shall be 0-3. + */ + if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_SINGLE) + status = ICE_ERR_OUT_OF_RANGE; + break; + case ICE_AQC_ACL_CNT_TYPE_DUAL: + /* Pair counter type - counts number of bytes and packets + * The valid values for byte/packet counter duals shall be 0-1 + */ + if (cntrs->bank > ICE_AQC_ACL_MAX_CNT_DUAL) + status = ICE_ERR_OUT_OF_RANGE; + break; + default: + /* Unspecified counter type - Invalid or error*/ + status = ICE_ERR_PARAM; + } + + return status; +} + +/** + * ice_aq_alloc_acl_cntrs - allocate ACL counters + * @hw: pointer to the HW struct + * @cntrs: ptr to buffer describing input and output params + * @cd: pointer to command details structure or NULL + * + * ACL - allocate (indirect 0x0C16) counters. This function attempts to + * allocate a contiguous block of counters. In case of failures, caller can + * attempt to allocate a smaller chunk. The allocation is considered + * unsuccessful if returned counter value is invalid. In this case it returns + * an error otherwise success. + */ +enum ice_status +ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, + struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_alloc_counters *cmd; + u16 first_cntr, last_cntr; + struct ice_aq_desc desc; + enum ice_status status; + + /* check for invalid params */ + status = ice_aq_acl_cntrs_chk_params(cntrs); + if (status) + return status; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_counters); + cmd = &desc.params.alloc_counters; + cmd->counter_amount = cntrs->amount; + cmd->counters_type = cntrs->type; + cmd->bank_alloc = cntrs->bank; + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + first_cntr = LE16_TO_CPU(cmd->ops.resp.first_counter); + last_cntr = LE16_TO_CPU(cmd->ops.resp.last_counter); + if (first_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL || + last_cntr == ICE_AQC_ACL_ALLOC_CNT_INVAL) + return ICE_ERR_OUT_OF_RANGE; + cntrs->first_cntr = first_cntr; + cntrs->last_cntr = last_cntr; + } + return status; +} + +/** + * ice_aq_dealloc_acl_cntrs - deallocate ACL counters + * @hw: pointer to the HW struct + * @cntrs: ptr to buffer describing input and output params + * @cd: pointer to command details structure or NULL + * + * ACL - de-allocate (direct 0x0C17) counters. + * This function deallocate ACL counters. + */ +enum ice_status +ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, + struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_dealloc_counters *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + /* check for invalid params */ + status = ice_aq_acl_cntrs_chk_params(cntrs); + if (status) + return status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_counters); + cmd = &desc.params.dealloc_counters; + cmd->first_counter = CPU_TO_LE16(cntrs->first_cntr); + cmd->last_counter = CPU_TO_LE16(cntrs->last_cntr); + cmd->counters_type = cntrs->type; + cmd->bank_alloc = cntrs->bank; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_query_acl_cntrs - query ACL counter + * @hw: pointer to the HW struct + * @bank: queries counter bank + * @index: queried counter index + * @cntr_val: pointer to counter or packet counter value + * @cd: pointer to command details structure or NULL + * + * ACL - query ACL counter (direct 0x0C27) + */ +enum ice_status +ice_aq_query_acl_cntrs(struct ice_hw *hw, u8 bank, u16 index, u64 *cntr_val, + struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_query_counter *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!cntr_val) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_acl_counter); + cmd = &desc.params.query_counter; + cmd->counter_index = CPU_TO_LE16(index); + cmd->counter_bank = bank; + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + __le64 resp_val = 0; + + ice_memcpy(&resp_val, cmd->ops.resp.val, + sizeof(cmd->ops.resp.val), ICE_NONDMA_TO_NONDMA); + *cntr_val = LE64_TO_CPU(resp_val); + } + return status; +} + +/** + * ice_prog_acl_prof_ranges - program ACL profile ranges + * @hw: pointer to the HW struct + * @prof_id: programmed or updated profile ID + * @buf: pointer to input buffer + * @cd: pointer to command details structure or NULL + * + * ACL - program ACL profile ranges (indirect 0x0C1E) + */ +enum ice_status +ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_profile_ranges *buf, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_program_acl_prof_ranges); + desc.params.profile.profile_id = prof_id; + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_query_acl_prof_ranges - query ACL profile ranges + * @hw: pointer to the HW struct + * @prof_id: programmed or updated profile ID + * @buf: pointer to response buffer + * @cd: pointer to command details structure or NULL + * + * ACL - query ACL profile ranges (indirect 0x0C22) + */ +enum ice_status +ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_profile_ranges *buf, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_query_acl_prof_ranges); + desc.params.profile.profile_id = prof_id; + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_aq_alloc_acl_scen - allocate ACL scenario + * @hw: pointer to the HW struct + * @scen_id: memory location to receive allocated scenario ID + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Allocate ACL scenario (indirect 0x0C14) + */ +enum ice_status +ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_alloc_scen *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!scen_id) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_alloc_acl_scen); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + cmd = &desc.params.alloc_scen; + + status = ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); + if (!status) + *scen_id = LE16_TO_CPU(cmd->ops.resp.scen_id); + + return status; +} + +/** + * ice_aq_dealloc_acl_scen - deallocate ACL scenario + * @hw: pointer to the HW struct + * @scen_id: scen_id to be deallocated (input and output field) + * @cd: pointer to command details structure or NULL + * + * Deallocate ACL scenario (direct 0x0C15) + */ +enum ice_status +ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_dealloc_scen *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dealloc_acl_scen); + cmd = &desc.params.dealloc_scen; + cmd->scen_id = CPU_TO_LE16(scen_id); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_update_query_scen - update or query ACL scenario + * @hw: pointer to the HW struct + * @opcode: aq command opcode for either query or update scenario + * @scen_id: scen_id to be updated or queried + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Calls update or query ACL scenario + */ +static enum ice_status +ice_aq_update_query_scen(struct ice_hw *hw, u16 opcode, u16 scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) +{ + struct ice_aqc_acl_update_query_scen *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + if (opcode == ice_aqc_opc_update_acl_scen) + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + cmd = &desc.params.update_query_scen; + cmd->scen_id = CPU_TO_LE16(scen_id); + + return ice_aq_send_cmd(hw, &desc, buf, sizeof(*buf), cd); +} + +/** + * ice_aq_update_acl_scen - update ACL scenario + * @hw: pointer to the HW struct + * @scen_id: scen_id to be updated + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Update ACL scenario (indirect 0x0C1B) + */ +enum ice_status +ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) +{ + return ice_aq_update_query_scen(hw, ice_aqc_opc_update_acl_scen, + scen_id, buf, cd); +} + +/** + * ice_aq_query_acl_scen - query ACL scenario + * @hw: pointer to the HW struct + * @scen_id: scen_id to be queried + * @buf: address of indirect data buffer + * @cd: pointer to command details structure or NULL + * + * Query ACL scenario (indirect 0x0C23) + */ +enum ice_status +ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd) +{ + return ice_aq_update_query_scen(hw, ice_aqc_opc_query_acl_scen, + scen_id, buf, cd); +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_acl.h b/src/spdk/dpdk/drivers/net/ice/base/ice_acl.h new file mode 100644 index 000000000..500db0c35 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_acl.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_ACL_H_ +#define _ICE_ACL_H_ + +#include "ice_common.h" +#include "ice_adminq_cmd.h" + +struct ice_acl_tbl_params { + u16 width; /* Select/match bytes */ + u16 depth; /* Number of entries */ + +#define ICE_ACL_TBL_MAX_DEP_TBLS 15 + u16 dep_tbls[ICE_ACL_TBL_MAX_DEP_TBLS]; + + u8 entry_act_pairs; /* Action pairs per entry */ + u8 concurr; /* Concurrent table lookup enable */ +}; + +struct ice_acl_act_mem { + u8 act_mem; +#define ICE_ACL_ACT_PAIR_MEM_INVAL 0xff + u8 member_of_tcam; +}; + +struct ice_acl_tbl { + /* TCAM configuration */ + u8 first_tcam; /* Index of the first TCAM block */ + u8 last_tcam; /* Index of the last TCAM block */ + /* Index of the first entry in the first TCAM */ + u16 first_entry; + /* Index of the last entry in the last TCAM */ + u16 last_entry; + + /* List of active scenarios */ + struct LIST_HEAD_TYPE scens; + + struct ice_acl_tbl_params info; + struct ice_acl_act_mem act_mems[ICE_AQC_MAX_ACTION_MEMORIES]; + + /* Keep track of available 64-entry chunks in TCAMs */ + ice_declare_bitmap(avail, ICE_AQC_ACL_ALLOC_UNITS); + + u16 id; +}; + +#define ICE_MAX_ACL_TCAM_ENTRY (ICE_AQC_ACL_TCAM_DEPTH * ICE_AQC_ACL_SLICES) +enum ice_acl_entry_prior { + ICE_LOW = 0, + ICE_NORMAL, + ICE_HIGH, + ICE_MAX_PRIOR +}; + +/* Scenario structure + * A scenario is a logical partition within an ACL table. It can span more + * than one TCAM in cascade mode to support select/mask key widths larger. + * than the width of a TCAM. It can also span more than one TCAM in stacked + * mode to support larger number of entries than what a TCAM can hold. It is + * used to select values from selection bases (field vectors holding extract + * protocol header fields) to form lookup keys, and to associate action memory + * banks to the TCAMs used. + */ +struct ice_acl_scen { + struct LIST_ENTRY_TYPE list_entry; + /* If nth bit of act_mem_bitmap is set, then nth action memory will + * participate in this scenario + */ + ice_declare_bitmap(act_mem_bitmap, ICE_AQC_MAX_ACTION_MEMORIES); + + /* If nth bit of entry_bitmap is set, then nth entry will + * be available in this scenario + */ + ice_declare_bitmap(entry_bitmap, ICE_MAX_ACL_TCAM_ENTRY); + u16 first_idx[ICE_MAX_PRIOR]; + u16 last_idx[ICE_MAX_PRIOR]; + + u16 id; + u16 start; /* Number of entry from the start of the parent table */ +#define ICE_ACL_SCEN_MIN_WIDTH 0x3 + u16 width; /* Number of select/mask bytes */ + u16 num_entry; /* Number of scenario entry */ + u16 end; /* Last addressable entry from start of table */ + u8 eff_width; /* Available width in bytes to match */ +#define ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM 0x2 +#define ICE_ACL_SCEN_PID_IDX_IN_TCAM 0x3 +#define ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM 0x4 + u8 pid_idx; /* Byte index used to match profile ID */ + u8 rng_chk_idx; /* Byte index used to match range checkers result */ + u8 pkt_dir_idx; /* Byte index used to match packet direction */ +}; + +/* This structure represents input fields needed to allocate ACL table */ +struct ice_acl_alloc_tbl { + /* Table's width in number of bytes matched */ + u16 width; + /* Table's depth in number of entries. */ + u16 depth; + u8 num_dependent_alloc_ids; /* number of depdendent alloc IDs */ + u8 concurr; /* true for concurrent table type */ + + /* Amount of action pairs per table entry. Minimal valid + * value for this field is 1 (e.g. single pair of actions) + */ + u8 act_pairs_per_entry; + union { + struct ice_aqc_acl_alloc_table_data data_buf; + struct ice_aqc_acl_generic resp_buf; + } buf; +}; + +/* This structure is used to communicate input and output params for + * [de]allocate_acl_counters + */ +struct ice_acl_cntrs { + u8 amount; + u8 type; + u8 bank; + + /* Next 2 variables are used for output in case of alloc_acl_counters + * and input in case of deallocate_acl_counters + */ + u16 first_cntr; + u16 last_cntr; +}; + +enum ice_status +ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params); +enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw); +enum ice_status +ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, + u16 *scen_id); +enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id); +enum ice_status +ice_aq_alloc_acl_tbl(struct ice_hw *hw, struct ice_acl_alloc_tbl *tbl, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_dealloc_acl_tbl(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_program_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, + struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_query_acl_entry(struct ice_hw *hw, u8 tcam_idx, u16 entry_idx, + struct ice_aqc_acl_data *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_alloc_actpair(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_dealloc_actpair(struct ice_hw *hw, u16 alloc_id, + struct ice_aqc_acl_generic *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_program_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, + struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_query_actpair(struct ice_hw *hw, u8 act_mem_idx, u16 act_entry_idx, + struct ice_aqc_actpair *buf, struct ice_sq_cd *cd); +enum ice_status ice_aq_dealloc_acl_res(struct ice_hw *hw, struct ice_sq_cd *cd); +enum ice_status +ice_prgm_acl_prof_extrt(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_sq_cd *cd); +enum ice_status +ice_query_acl_prof(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_alloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_dealloc_acl_cntrs(struct ice_hw *hw, struct ice_acl_cntrs *cntrs, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_query_acl_cntrs(struct ice_hw *hw, u8 bank, u16 index, u64 *cntr_val, + struct ice_sq_cd *cd); +enum ice_status +ice_prog_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_profile_ranges *buf, + struct ice_sq_cd *cd); +enum ice_status +ice_query_acl_prof_ranges(struct ice_hw *hw, u8 prof_id, + struct ice_aqc_acl_profile_ranges *buf, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_alloc_acl_scen(struct ice_hw *hw, u16 *scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_dealloc_acl_scen(struct ice_hw *hw, u16 scen_id, struct ice_sq_cd *cd); +enum ice_status +ice_aq_update_acl_scen(struct ice_hw *hw, u16 scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); +enum ice_status +ice_aq_query_acl_scen(struct ice_hw *hw, u16 scen_id, + struct ice_aqc_acl_scen *buf, struct ice_sq_cd *cd); +enum ice_status +ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, + enum ice_acl_entry_prior prior, u8 *keys, u8 *inverts, + struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx); +enum ice_status +ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, + struct ice_acl_act_entry *acts, u8 acts_cnt, u16 entry_idx); +enum ice_status +ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx); +#endif /* _ICE_ACL_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_acl_ctrl.c b/src/spdk/dpdk/drivers/net/ice/base/ice_acl_ctrl.c new file mode 100644 index 000000000..e67605141 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_acl_ctrl.c @@ -0,0 +1,1185 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_acl.h" +#include "ice_flow.h" + +/* Determine the TCAM index of entry 'e' within the ACL table */ +#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH) + +/* Determine the entry index within the TCAM */ +#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH) + +#define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF +/** + * ice_acl_init_entry + * @scen: pointer to the scenario struct + * + * Initialize the scenario control structure. + */ +static void ice_acl_init_entry(struct ice_acl_scen *scen) +{ + /** + * low priority: start from the highest index, 25% of total entries + * normal priority: start from the highest index, 50% of total entries + * high priority: start from the lowest index, 25% of total entries + */ + scen->first_idx[ICE_LOW] = scen->num_entry - 1; + scen->first_idx[ICE_NORMAL] = scen->num_entry - scen->num_entry / 4 - 1; + scen->first_idx[ICE_HIGH] = 0; + + scen->last_idx[ICE_LOW] = scen->num_entry - scen->num_entry / 4; + scen->last_idx[ICE_NORMAL] = scen->num_entry / 4; + scen->last_idx[ICE_HIGH] = scen->num_entry / 4 - 1; +} + +/** + * ice_acl_scen_assign_entry_idx + * @scen: pointer to the scenario struct + * @prior: the priority of the flow entry being allocated + * + * To find the index of an available entry in scenario + * + * Returns ICE_ACL_SCEN_ENTRY_INVAL if fails + * Returns index on success + */ +static u16 ice_acl_scen_assign_entry_idx(struct ice_acl_scen *scen, + enum ice_acl_entry_prior prior) +{ + u16 first_idx, last_idx, i; + s8 step; + + if (prior >= ICE_MAX_PRIOR) + return ICE_ACL_SCEN_ENTRY_INVAL; + + first_idx = scen->first_idx[prior]; + last_idx = scen->last_idx[prior]; + step = first_idx <= last_idx ? 1 : -1; + + for (i = first_idx; i != last_idx + step; i += step) + if (!ice_test_and_set_bit(i, scen->entry_bitmap)) + return i; + + return ICE_ACL_SCEN_ENTRY_INVAL; +} + +/** + * ice_acl_scen_free_entry_idx + * @scen: pointer to the scenario struct + * @idx: the index of the flow entry being de-allocated + * + * To mark an entry available in scenario + */ +static enum ice_status +ice_acl_scen_free_entry_idx(struct ice_acl_scen *scen, u16 idx) +{ + if (idx >= scen->num_entry) + return ICE_ERR_MAX_LIMIT; + + if (!ice_test_and_clear_bit(idx, scen->entry_bitmap)) + return ICE_ERR_DOES_NOT_EXIST; + + return ICE_SUCCESS; +} + +/** + * ice_acl_tbl_calc_end_idx + * @start: start index of the TCAM entry of this partition + * @num_entries: number of entries in this partition + * @width: width of a partition in number of TCAMs + * + * Calculate the end entry index for a partition with starting entry index + * 'start', entries 'num_entries', and width 'width'. + */ +static u16 ice_acl_tbl_calc_end_idx(u16 start, u16 num_entries, u16 width) +{ + u16 end_idx, add_entries = 0; + + end_idx = start + (num_entries - 1); + + /* In case that our ACL partition requires cascading TCAMs */ + if (width > 1) { + u16 num_stack_level; + + /* Figure out the TCAM stacked level in this ACL scenario */ + num_stack_level = (start % ICE_AQC_ACL_TCAM_DEPTH) + + num_entries; + num_stack_level = DIVIDE_AND_ROUND_UP(num_stack_level, + ICE_AQC_ACL_TCAM_DEPTH); + + /* In this case, each entries in our ACL partition span + * multiple TCAMs. Thus, we will need to add + * ((width - 1) * num_stack_level) TCAM's entries to + * end_idx. + * + * For example : In our case, our scenario is 2x2: + * [TCAM 0] [TCAM 1] + * [TCAM 2] [TCAM 3] + * Assuming that a TCAM will have 512 entries. If "start" + * is 500, "num_entries" is 3 and "width" = 2, then end_idx + * should be 1024 (belongs to TCAM 2). + * Before going to this if statement, end_idx will have the + * value of 512. If "width" is 1, then the final value of + * end_idx is 512. However, in our case, width is 2, then we + * will need add (2 - 1) * 1 * 512. As result, end_idx will + * have the value of 1024. + */ + add_entries = (width - 1) * num_stack_level * + ICE_AQC_ACL_TCAM_DEPTH; + } + + return end_idx + add_entries; +} + +/** + * ice_acl_init_tbl + * @hw: pointer to the hardware structure + * + * Initialize the ACL table by invalidating TCAM entries and action pairs. + */ +static enum ice_status ice_acl_init_tbl(struct ice_hw *hw) +{ + struct ice_aqc_actpair act_buf; + struct ice_aqc_acl_data buf; + enum ice_status status = ICE_SUCCESS; + struct ice_acl_tbl *tbl; + u8 tcam_idx, i; + u16 idx; + + tbl = hw->acl_tbl; + if (!tbl) { + status = ICE_ERR_CFG; + return status; + } + + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM); + + tcam_idx = tbl->first_tcam; + idx = tbl->first_entry; + while (tcam_idx < tbl->last_tcam || + (tcam_idx == tbl->last_tcam && idx <= tbl->last_entry)) { + /* Use the same value for entry_key and entry_key_inv since + * we are initializing the fields to 0 + */ + status = ice_aq_program_acl_entry(hw, tcam_idx, idx, &buf, + NULL); + if (status) + return status; + + if (++idx > tbl->last_entry) { + tcam_idx++; + idx = tbl->first_entry; + } + } + + for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) { + u16 act_entry_idx, start, end; + + if (tbl->act_mems[i].act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL) + continue; + + start = tbl->first_entry; + end = tbl->last_entry; + + for (act_entry_idx = start; act_entry_idx <= end; + act_entry_idx++) { + /* Invalidate all allocated action pairs */ + status = ice_aq_program_actpair(hw, i, act_entry_idx, + &act_buf, NULL); + if (status) + return status; + } + } + + return status; +} + +/** + * ice_acl_assign_act_mems_to_tcam + * @tbl: pointer to acl table structure + * @cur_tcam: Index of current TCAM. Value = 0 to (ICE_AQC_ACL_SLICES - 1) + * @cur_mem_idx: Index of current action memory bank. Value = 0 to + * (ICE_AQC_MAX_ACTION_MEMORIES - 1) + * @num_mem: Number of action memory banks for this TCAM + * + * Assign "num_mem" valid action memory banks from "curr_mem_idx" to + * "curr_tcam" TCAM. + */ +static void +ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam, + u8 *cur_mem_idx, u8 num_mem) +{ + u8 mem_cnt; + + for (mem_cnt = 0; + *cur_mem_idx < ICE_AQC_MAX_ACTION_MEMORIES && mem_cnt < num_mem; + (*cur_mem_idx)++) { + struct ice_acl_act_mem *p_mem = &tbl->act_mems[*cur_mem_idx]; + + if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL) + continue; + + p_mem->member_of_tcam = cur_tcam; + + mem_cnt++; + } +} + +/** + * ice_acl_divide_act_mems_to_tcams + * @tbl: pointer to acl table structure + * + * Figure out how to divide given action memory banks to given TCAMs. This + * division is for SW book keeping. In the time when scenario is created, + * an action memory bank can be used for different TCAM. + * + * For example, given that we have 2x2 ACL table with each table entry has + * 2 action memory pairs. As the result, we will have 4 TCAMs (T1,T2,T3,T4) + * and 4 action memory banks (A1,A2,A3,A4) + * [T1 - T2] { A1 - A2 } + * [T3 - T4] { A3 - A4 } + * In the time when we need to create a scenario, for example, 2x1 scenario, + * we will use [T3,T4] in a cascaded layout. As it is a requirement that all + * action memory banks in a cascaded TCAM's row will need to associate with + * the last TCAM. Thus, we will associate action memory banks [A3] and [A4] + * for TCAM [T4]. + * For SW book-keeping purpose, we will keep theoretical maps between TCAM + * [Tn] to action memory bank [An]. + */ +static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl) +{ + u16 num_cscd, stack_level, stack_idx, min_act_mem; + u8 tcam_idx = tbl->first_tcam; + u16 max_idx_to_get_extra; + u8 mem_idx = 0; + + /* Determine number of stacked TCAMs */ + stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth, + ICE_AQC_ACL_TCAM_DEPTH); + + /* Determine number of cascaded TCAMs */ + num_cscd = DIVIDE_AND_ROUND_UP(tbl->info.width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + + /* In a line of cascaded TCAM, given the number of action memory + * banks per ACL table entry, we want to fairly divide these action + * memory banks between these TCAMs. + * + * For example, there are 3 TCAMs (TCAM 3,4,5) in a line of + * cascaded TCAM, and there are 7 act_mems for each ACL table entry. + * The result is: + * [TCAM_3 will have 3 act_mems] + * [TCAM_4 will have 2 act_mems] + * [TCAM_5 will have 2 act_mems] + */ + min_act_mem = tbl->info.entry_act_pairs / num_cscd; + max_idx_to_get_extra = tbl->info.entry_act_pairs % num_cscd; + + for (stack_idx = 0; stack_idx < stack_level; stack_idx++) { + u16 i; + + for (i = 0; i < num_cscd; i++) { + u8 total_act_mem = min_act_mem; + + if (i < max_idx_to_get_extra) + total_act_mem++; + + ice_acl_assign_act_mems_to_tcam(tbl, tcam_idx, + &mem_idx, + total_act_mem); + + tcam_idx++; + } + } +} + +/** + * ice_acl_create_tbl + * @hw: pointer to the HW struct + * @params: parameters for the table to be created + * + * Create a LEM table for ACL usage. We are currently starting with some fixed + * values for the size of the table, but this will need to grow as more flow + * entries are added by the user level. + */ +enum ice_status +ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params) +{ + u16 width, depth, first_e, last_e, i; + struct ice_aqc_acl_generic *resp_buf; + struct ice_acl_alloc_tbl tbl_alloc; + struct ice_acl_tbl *tbl; + enum ice_status status; + + if (hw->acl_tbl) + return ICE_ERR_ALREADY_EXISTS; + + if (!params) + return ICE_ERR_PARAM; + + /* round up the width to the next TCAM width boundary. */ + width = ROUND_UP(params->width, (u16)ICE_AQC_ACL_KEY_WIDTH_BYTES); + /* depth should be provided in chunk (64 entry) increments */ + depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT); + + if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) { + params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES; + + if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS) + params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS; + } + + /* Validate that width*depth will not exceed the TCAM limit */ + if ((DIVIDE_AND_ROUND_UP(depth, ICE_AQC_ACL_TCAM_DEPTH) * + (width / ICE_AQC_ACL_KEY_WIDTH_BYTES)) > ICE_AQC_ACL_SLICES) + return ICE_ERR_MAX_LIMIT; + + ice_memset(&tbl_alloc, 0, sizeof(tbl_alloc), ICE_NONDMA_MEM); + tbl_alloc.width = width; + tbl_alloc.depth = depth; + tbl_alloc.act_pairs_per_entry = params->entry_act_pairs; + tbl_alloc.concurr = params->concurr; + /* Set dependent_alloc_id only for concurrent table type */ + if (params->concurr) { + tbl_alloc.num_dependent_alloc_ids = + ICE_AQC_MAX_CONCURRENT_ACL_TBL; + + for (i = 0; i < ICE_AQC_MAX_CONCURRENT_ACL_TBL; i++) + tbl_alloc.buf.data_buf.alloc_ids[i] = + CPU_TO_LE16(params->dep_tbls[i]); + } + + /* call the aq command to create the ACL table with these values */ + status = ice_aq_alloc_acl_tbl(hw, &tbl_alloc, NULL); + + if (status) { + if (LE16_TO_CPU(tbl_alloc.buf.resp_buf.alloc_id) < + ICE_AQC_ALLOC_ID_LESS_THAN_4K) + ice_debug(hw, ICE_DBG_ACL, + "Alloc ACL table failed. Unavailable resource.\n"); + else + ice_debug(hw, ICE_DBG_ACL, + "AQ allocation of ACL failed with error. status: %d\n", + status); + return status; + } + + tbl = (struct ice_acl_tbl *)ice_malloc(hw, sizeof(*tbl)); + if (!tbl) { + status = ICE_ERR_NO_MEMORY; + + goto out; + } + + resp_buf = &tbl_alloc.buf.resp_buf; + + /* Retrieve information of the allocated table */ + tbl->id = LE16_TO_CPU(resp_buf->alloc_id); + tbl->first_tcam = resp_buf->ops.table.first_tcam; + tbl->last_tcam = resp_buf->ops.table.last_tcam; + tbl->first_entry = LE16_TO_CPU(resp_buf->first_entry); + tbl->last_entry = LE16_TO_CPU(resp_buf->last_entry); + + tbl->info = *params; + tbl->info.width = width; + tbl->info.depth = depth; + hw->acl_tbl = tbl; + + for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) + tbl->act_mems[i].act_mem = resp_buf->act_mem[i]; + + /* Figure out which TCAMs that these newly allocated action memories + * belong to. + */ + ice_acl_divide_act_mems_to_tcams(tbl); + + /* Initialize the resources allocated by invalidating all TCAM entries + * and all the action pairs + */ + status = ice_acl_init_tbl(hw); + if (status) { + ice_free(hw, tbl); + hw->acl_tbl = NULL; + ice_debug(hw, ICE_DBG_ACL, + "Initialization of TCAM entries failed. status: %d\n", + status); + goto out; + } + + first_e = (tbl->first_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + + (tbl->first_entry / ICE_ACL_ENTRY_ALLOC_UNIT); + last_e = (tbl->last_tcam * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + + (tbl->last_entry / ICE_ACL_ENTRY_ALLOC_UNIT); + + /* Indicate available entries in the table */ + for (i = first_e; i <= last_e; i++) + ice_set_bit(i, tbl->avail); + + INIT_LIST_HEAD(&tbl->scens); +out: + + return status; +} + +/** + * ice_acl_alloc_partition - Allocate a partition from the ACL table + * @hw: pointer to the hardware structure + * @req: info of partition being allocated + */ +static enum ice_status +ice_acl_alloc_partition(struct ice_hw *hw, struct ice_acl_scen *req) +{ + u16 start = 0, cnt = 0, off = 0; + u16 width, r_entries, row; + bool done = false; + int dir; + + /* Determine the number of TCAMs each entry overlaps */ + width = DIVIDE_AND_ROUND_UP(req->width, ICE_AQC_ACL_KEY_WIDTH_BYTES); + + /* Check if we have enough TCAMs to accommodate the width */ + if (width > hw->acl_tbl->last_tcam - hw->acl_tbl->first_tcam + 1) + return ICE_ERR_MAX_LIMIT; + + /* Number of entries must be multiple of ICE_ACL_ENTRY_ALLOC_UNIT's */ + r_entries = ICE_ALIGN(req->num_entry, ICE_ACL_ENTRY_ALLOC_UNIT); + + /* To look for an available partition that can accommodate the request, + * the process first logically arranges available TCAMs in rows such + * that each row produces entries with the requested width. It then + * scans the TCAMs' available bitmap, one bit at a time, and + * accumulates contiguous available 64-entry chunks until there are + * enough of them or when all TCAM configurations have been checked. + * + * For width of 1 TCAM, the scanning process starts from the top most + * TCAM, and goes downward. Available bitmaps are examined from LSB + * to MSB. + * + * For width of multiple TCAMs, the process starts from the bottom-most + * row of TCAMs, and goes upward. Available bitmaps are examined from + * the MSB to the LSB. + * + * To make sure that adjacent TCAMs can be logically arranged in the + * same row, the scanning process may have multiple passes. In each + * pass, the first TCAM of the bottom-most row is displaced by one + * additional TCAM. The width of the row and the number of the TCAMs + * available determine the number of passes. When the displacement is + * more than the size of width, the TCAM row configurations will + * repeat. The process will terminate when the configurations repeat. + * + * Available partitions can span more than one row of TCAMs. + */ + if (width == 1) { + row = hw->acl_tbl->first_tcam; + dir = 1; + } else { + /* Start with the bottom-most row, and scan for available + * entries upward + */ + row = hw->acl_tbl->last_tcam + 1 - width; + dir = -1; + } + + do { + u16 i; + + /* Scan all 64-entry chunks, one chunk at a time, in the + * current TCAM row + */ + for (i = 0; + i < ICE_AQC_MAX_TCAM_ALLOC_UNITS && cnt < r_entries; + i++) { + bool avail = true; + u16 w, p; + + /* Compute the cumulative available mask across the + * TCAM row to determine if the current 64-entry chunk + * is available. + */ + p = dir > 0 ? i : ICE_AQC_MAX_TCAM_ALLOC_UNITS - i - 1; + for (w = row; w < row + width && avail; w++) { + u16 b; + + b = (w * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + p; + avail &= ice_is_bit_set(hw->acl_tbl->avail, b); + } + + if (!avail) { + cnt = 0; + } else { + /* Compute the starting index of the newly + * found partition. When 'dir' is negative, the + * scan processes is going upward. If so, the + * starting index needs to be updated for every + * available 64-entry chunk found. + */ + if (!cnt || dir < 0) + start = (row * ICE_AQC_ACL_TCAM_DEPTH) + + (p * ICE_ACL_ENTRY_ALLOC_UNIT); + cnt += ICE_ACL_ENTRY_ALLOC_UNIT; + } + } + + if (cnt >= r_entries) { + req->start = start; + req->num_entry = r_entries; + req->end = ice_acl_tbl_calc_end_idx(start, r_entries, + width); + break; + } + + row = (dir > 0) ? (row + width) : (row - width); + if (row > hw->acl_tbl->last_tcam || + row < hw->acl_tbl->first_tcam) { + /* All rows have been checked. Increment 'off' that + * will help yield a different TCAM configuration in + * which adjacent TCAMs can be alternatively in the + * same row. + */ + off++; + + /* However, if the new 'off' value yields previously + * checked configurations, then exit. + */ + if (off >= width) + done = true; + else + row = dir > 0 ? off : + hw->acl_tbl->last_tcam + 1 - off - + width; + } + } while (!done); + + return cnt >= r_entries ? ICE_SUCCESS : ICE_ERR_MAX_LIMIT; +} + +/** + * ice_acl_fill_tcam_select + * @scen_buf: Pointer to the scenario buffer that needs to be populated + * @scen: Pointer to the available space for the scenario + * @tcam_idx: Index of the TCAM used for this scenario + * @tcam_idx_in_cascade : Local index of the TCAM in the cascade scenario + * + * For all TCAM that participate in this scenario, fill out the tcam_select + * value. + */ +static void +ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf, + struct ice_acl_scen *scen, u16 tcam_idx, + u16 tcam_idx_in_cascade) +{ + u16 cascade_cnt, idx; + u8 j; + + idx = tcam_idx_in_cascade * ICE_AQC_ACL_KEY_WIDTH_BYTES; + cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + + /* For each scenario, we reserved last three bytes of scenario width for + * profile ID, range checker, and packet direction. Thus, the last three + * bytes of the last cascaded TCAMs will have value of 1st, 31st and + * 32nd byte location of BYTE selection base. + * + * For other bytes in the TCAMs: + * For non-cascade mode (1 TCAM wide) scenario, TCAM[x]'s Select {0-1} + * select indices 0-1 of the Byte Selection Base + * For cascade mode, the leftmost TCAM of the first cascade row selects + * indices 0-4 of the Byte Selection Base; the second TCAM in the + * cascade row selects indices starting with 5-n + */ + for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) { + /* PKT DIR uses the 1st location of Byte Selection Base: + 1 */ + u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx; + + if (tcam_idx_in_cascade == cascade_cnt - 1) { + if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM) + val = ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK; + else if (j == ICE_ACL_SCEN_PID_IDX_IN_TCAM) + val = ICE_AQC_ACL_BYTE_SEL_BASE_PID; + else if (j == ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM) + val = ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR; + } + + /* In case that scenario's width is greater than the width of + * the Byte selection base, we will not assign a value to the + * tcam_select[j]. As a result, the tcam_select[j] will have + * default value which is zero. + */ + if (val > ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK) + continue; + + scen_buf->tcam_cfg[tcam_idx].tcam_select[j] = val; + + idx++; + } +} + +/** + * ice_acl_set_scen_chnk_msk + * @scen_buf: Pointer to the scenario buffer that needs to be populated + * @scen: pointer to the available space for the scenario + * + * Set the chunk mask for the entries that will be used by this scenario + */ +static void +ice_acl_set_scen_chnk_msk(struct ice_aqc_acl_scen *scen_buf, + struct ice_acl_scen *scen) +{ + u16 tcam_idx, num_cscd, units, cnt; + u8 chnk_offst; + + /* Determine the starting TCAM index and offset of the start entry */ + tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start); + chnk_offst = (u8)((scen->start % ICE_AQC_ACL_TCAM_DEPTH) / + ICE_ACL_ENTRY_ALLOC_UNIT); + + /* Entries are allocated and tracked in multiple of 64's */ + units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT; + + /* Determine number of cascaded TCAMs */ + num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES; + + for (cnt = 0; cnt < units; cnt++) { + u16 i; + + /* Set the corresponding bitmap of individual 64-entry + * chunk spans across a cascade of 1 or more TCAMs + * For each TCAM, there will be (ICE_AQC_ACL_TCAM_DEPTH + * / ICE_ACL_ENTRY_ALLOC_UNIT) or 8 chunks. + */ + for (i = tcam_idx; i < tcam_idx + num_cscd; i++) + scen_buf->tcam_cfg[i].chnk_msk |= BIT(chnk_offst); + + chnk_offst = (chnk_offst + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS; + if (!chnk_offst) + tcam_idx += num_cscd; + } +} + +/** + * ice_acl_assign_act_mem_for_scen + * @tbl: pointer to acl table structure + * @scen: pointer to the scenario struct + * @scen_buf: pointer to the available space for the scenario + * @current_tcam_idx: theoretical index of the TCAM that we associated those + * action memory banks with, at the table creation time. + * @target_tcam_idx: index of the TCAM that we want to associate those action + * memory banks with. + */ +static void +ice_acl_assign_act_mem_for_scen(struct ice_acl_tbl *tbl, + struct ice_acl_scen *scen, + struct ice_aqc_acl_scen *scen_buf, + u8 current_tcam_idx, + u8 target_tcam_idx) +{ + u8 i; + + for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) { + struct ice_acl_act_mem *p_mem = &tbl->act_mems[i]; + + if (p_mem->act_mem == ICE_ACL_ACT_PAIR_MEM_INVAL || + p_mem->member_of_tcam != current_tcam_idx) + continue; + + scen_buf->act_mem_cfg[i] = target_tcam_idx; + scen_buf->act_mem_cfg[i] |= ICE_AQC_ACL_SCE_ACT_MEM_EN; + ice_set_bit(i, scen->act_mem_bitmap); + } +} + +/** + * ice_acl_commit_partition - Indicate if the specified partition is active + * @hw: pointer to the hardware structure + * @scen: pointer to the scenario struct + * @commit: true if the partition is being commit + */ +static void +ice_acl_commit_partition(struct ice_hw *hw, struct ice_acl_scen *scen, + bool commit) +{ + u16 tcam_idx, off, num_cscd, units, cnt; + + /* Determine the starting TCAM index and offset of the start entry */ + tcam_idx = ICE_ACL_TBL_TCAM_IDX(scen->start); + off = (scen->start % ICE_AQC_ACL_TCAM_DEPTH) / + ICE_ACL_ENTRY_ALLOC_UNIT; + + /* Entries are allocated and tracked in multiple of 64's */ + units = scen->num_entry / ICE_ACL_ENTRY_ALLOC_UNIT; + + /* Determine number of cascaded TCAM */ + num_cscd = scen->width / ICE_AQC_ACL_KEY_WIDTH_BYTES; + + for (cnt = 0; cnt < units; cnt++) { + u16 w; + + /* Set/clear the corresponding bitmap of individual 64-entry + * chunk spans across a row of 1 or more TCAMs + */ + for (w = 0; w < num_cscd; w++) { + u16 b; + + b = ((tcam_idx + w) * ICE_AQC_MAX_TCAM_ALLOC_UNITS) + + off; + if (commit) + ice_set_bit(b, hw->acl_tbl->avail); + else + ice_clear_bit(b, hw->acl_tbl->avail); + } + + off = (off + 1) % ICE_AQC_MAX_TCAM_ALLOC_UNITS; + if (!off) + tcam_idx += num_cscd; + } +} + +/** + * ice_acl_create_scen + * @hw: pointer to the hardware structure + * @match_width: number of bytes to be matched in this scenario + * @num_entries: number of entries to be allocated for the scenario + * @scen_id: holds returned scenario ID if successful + */ +enum ice_status +ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries, + u16 *scen_id) +{ + u8 cascade_cnt, first_tcam, last_tcam, i, k; + struct ice_aqc_acl_scen scen_buf; + struct ice_acl_scen *scen; + enum ice_status status; + + if (!hw->acl_tbl) + return ICE_ERR_DOES_NOT_EXIST; + + scen = (struct ice_acl_scen *)ice_malloc(hw, sizeof(*scen)); + if (!scen) + return ICE_ERR_NO_MEMORY; + + scen->start = hw->acl_tbl->first_entry; + scen->width = ICE_AQC_ACL_KEY_WIDTH_BYTES * + DIVIDE_AND_ROUND_UP(match_width, ICE_AQC_ACL_KEY_WIDTH_BYTES); + scen->num_entry = num_entries; + + status = ice_acl_alloc_partition(hw, scen); + if (status) { + ice_free(hw, scen); + return status; + } + + ice_memset(&scen_buf, 0, sizeof(scen_buf), ICE_NONDMA_MEM); + + /* Determine the number of cascade TCAMs, given the scenario's width */ + cascade_cnt = DIVIDE_AND_ROUND_UP(scen->width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + first_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start); + last_tcam = ICE_ACL_TBL_TCAM_IDX(scen->end); + + /* For each scenario, we reserved last three bytes of scenario width for + * packet direction flag, profile ID and range checker. Thus, we want to + * return back to the caller the eff_width, pkt_dir_idx, rng_chk_idx and + * pid_idx. + */ + scen->eff_width = cascade_cnt * ICE_AQC_ACL_KEY_WIDTH_BYTES - + ICE_ACL_SCEN_MIN_WIDTH; + scen->rng_chk_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES + + ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM; + scen->pid_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES + + ICE_ACL_SCEN_PID_IDX_IN_TCAM; + scen->pkt_dir_idx = (cascade_cnt - 1) * ICE_AQC_ACL_KEY_WIDTH_BYTES + + ICE_ACL_SCEN_PKT_DIR_IDX_IN_TCAM; + + /* set the chunk mask for the tcams */ + ice_acl_set_scen_chnk_msk(&scen_buf, scen); + + /* set the TCAM select and start_cmp and start_set bits */ + k = first_tcam; + /* set the START_SET bit at the beginning of the stack */ + scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET; + while (k <= last_tcam) { + u8 last_tcam_idx_cascade = cascade_cnt + k - 1; + + /* set start_cmp for the first cascaded TCAM */ + scen_buf.tcam_cfg[k].start_cmp_set |= + ICE_AQC_ACL_ALLOC_SCE_START_CMP; + + /* cascade TCAMs up to the width of the scenario */ + for (i = k; i < cascade_cnt + k; i++) { + ice_acl_fill_tcam_select(&scen_buf, scen, i, i - k); + ice_acl_assign_act_mem_for_scen(hw->acl_tbl, scen, + &scen_buf, + i, + last_tcam_idx_cascade); + } + + k = i; + } + + /* We need to set the start_cmp bit for the unused TCAMs. */ + i = 0; + while (i < first_tcam) + scen_buf.tcam_cfg[i++].start_cmp_set = + ICE_AQC_ACL_ALLOC_SCE_START_CMP; + + i = last_tcam + 1; + while (i < ICE_AQC_ACL_SLICES) + scen_buf.tcam_cfg[i++].start_cmp_set = + ICE_AQC_ACL_ALLOC_SCE_START_CMP; + + status = ice_aq_alloc_acl_scen(hw, scen_id, &scen_buf, NULL); + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "AQ allocation of ACL scenario failed. status: %d\n", + status); + ice_free(hw, scen); + return status; + } + + scen->id = *scen_id; + ice_acl_commit_partition(hw, scen, false); + ice_acl_init_entry(scen); + LIST_ADD(&scen->list_entry, &hw->acl_tbl->scens); + + return status; +} + +/** + * ice_acl_destroy_tbl - Destroy a previously created LEM table for ACL + * @hw: pointer to the HW struct + */ +enum ice_status ice_acl_destroy_tbl(struct ice_hw *hw) +{ + struct ice_acl_scen *pos_scen, *tmp_scen; + struct ice_aqc_acl_generic resp_buf; + struct ice_aqc_acl_scen buf; + enum ice_status status; + u8 i; + + if (!hw->acl_tbl) + return ICE_ERR_DOES_NOT_EXIST; + + /* Mark all the created scenario's TCAM to stop the packet lookup and + * delete them afterward + */ + LIST_FOR_EACH_ENTRY_SAFE(pos_scen, tmp_scen, &hw->acl_tbl->scens, + ice_acl_scen, list_entry) { + status = ice_aq_query_acl_scen(hw, pos_scen->id, &buf, NULL); + if (status) { + ice_debug(hw, ICE_DBG_ACL, "ice_aq_query_acl_scen() failed. status: %d\n", + status); + return status; + } + + for (i = 0; i < ICE_AQC_ACL_SLICES; i++) { + buf.tcam_cfg[i].chnk_msk = 0; + buf.tcam_cfg[i].start_cmp_set = + ICE_AQC_ACL_ALLOC_SCE_START_CMP; + } + + for (i = 0; i < ICE_AQC_MAX_ACTION_MEMORIES; i++) + buf.act_mem_cfg[i] = 0; + + status = ice_aq_update_acl_scen(hw, pos_scen->id, &buf, NULL); + if (status) { + ice_debug(hw, ICE_DBG_ACL, "ice_aq_update_acl_scen() failed. status: %d\n", + status); + return status; + } + + status = ice_acl_destroy_scen(hw, pos_scen->id); + if (status) { + ice_debug(hw, ICE_DBG_ACL, "deletion of scenario failed. status: %d\n", + status); + return status; + } + } + + /* call the aq command to destroy the ACL table */ + status = ice_aq_dealloc_acl_tbl(hw, hw->acl_tbl->id, &resp_buf, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "AQ de-allocation of ACL failed. status: %d\n", + status); + return status; + } + + ice_free(hw, hw->acl_tbl); + hw->acl_tbl = NULL; + + return ICE_SUCCESS; +} + +/** + * ice_acl_add_entry - Add a flow entry to an ACL scenario + * @hw: pointer to the HW struct + * @scen: scenario to add the entry to + * @prior: priority level of the entry being added + * @keys: buffer of the value of the key to be programmed to the ACL entry + * @inverts: buffer of the value of the key inverts to be programmed + * @acts: pointer to a buffer containing formatted actions + * @acts_cnt: indicates the number of actions stored in "acts" + * @entry_idx: returned scenario relative index of the added flow entry + * + * Given an ACL table and a scenario, to add the specified key and key invert + * to an available entry in the specified scenario. + * The "keys" and "inverts" buffers must be of the size which is the same as + * the scenario's width + */ +enum ice_status +ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen, + enum ice_acl_entry_prior prior, u8 *keys, u8 *inverts, + struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx) +{ + u8 i, entry_tcam, num_cscd, idx, offset; + struct ice_aqc_acl_data buf; + enum ice_status status = ICE_SUCCESS; + + if (!scen) + return ICE_ERR_DOES_NOT_EXIST; + + *entry_idx = ice_acl_scen_assign_entry_idx(scen, prior); + if (*entry_idx >= scen->num_entry) { + *entry_idx = 0; + return ICE_ERR_MAX_LIMIT; + } + + /* Determine number of cascaded TCAMs */ + num_cscd = DIVIDE_AND_ROUND_UP(scen->width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + + entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start); + idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + *entry_idx); + + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + for (i = 0; i < num_cscd; i++) { + /* If the key spans more than one TCAM in the case of cascaded + * TCAMs, the key and key inverts need to be properly split + * among TCAMs.E.g.bytes 0 - 4 go to an index in the first TCAM + * and bytes 5 - 9 go to the same index in the next TCAM, etc. + * If the entry spans more than one TCAM in a cascaded TCAM + * mode, the programming of the entries in the TCAMs must be in + * reversed order - the TCAM entry of the rightmost TCAM should + * be programmed first; the TCAM entry of the leftmost TCAM + * should be programmed last. + */ + offset = num_cscd - i - 1; + ice_memcpy(&buf.entry_key.val, + &keys[offset * sizeof(buf.entry_key.val)], + sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA); + ice_memcpy(&buf.entry_key_invert.val, + &inverts[offset * sizeof(buf.entry_key_invert.val)], + sizeof(buf.entry_key_invert.val), + ICE_NONDMA_TO_NONDMA); + status = ice_aq_program_acl_entry(hw, entry_tcam + offset, idx, + &buf, NULL); + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "aq program acl entry failed status: %d\n", + status); + goto out; + } + } + + /* Program the action memory */ + status = ice_acl_prog_act(hw, scen, acts, acts_cnt, *entry_idx); + +out: + if (status) { + ice_acl_rem_entry(hw, scen, *entry_idx); + *entry_idx = 0; + } + + return status; +} + +/** + * ice_acl_prog_act - Program a scenario's action memory + * @hw: pointer to the HW struct + * @scen: scenario to add the entry to + * @acts: pointer to a buffer containing formatted actions + * @acts_cnt: indicates the number of actions stored in "acts" + * @entry_idx: scenario relative index of the added flow entry + * + * Program a scenario's action memory + */ +enum ice_status +ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen, + struct ice_acl_act_entry *acts, u8 acts_cnt, + u16 entry_idx) +{ + u8 entry_tcam, num_cscd, i, actx_idx = 0; + struct ice_aqc_actpair act_buf; + enum ice_status status = ICE_SUCCESS; + u16 idx; + + if (entry_idx >= scen->num_entry) + return ICE_ERR_MAX_LIMIT; + + ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM); + + /* Determine number of cascaded TCAMs */ + num_cscd = DIVIDE_AND_ROUND_UP(scen->width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + + entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start); + idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx); + + i = ice_find_first_bit(scen->act_mem_bitmap, + ICE_AQC_MAX_ACTION_MEMORIES); + while (i < ICE_AQC_MAX_ACTION_MEMORIES) { + struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i]; + + if (actx_idx >= acts_cnt) + break; + if (mem->member_of_tcam >= entry_tcam && + mem->member_of_tcam < entry_tcam + num_cscd) { + ice_memcpy(&act_buf.act[0], &acts[actx_idx], + sizeof(struct ice_acl_act_entry), + ICE_NONDMA_TO_NONDMA); + + if (++actx_idx < acts_cnt) { + ice_memcpy(&act_buf.act[1], &acts[actx_idx], + sizeof(struct ice_acl_act_entry), + ICE_NONDMA_TO_NONDMA); + } + + status = ice_aq_program_actpair(hw, i, idx, &act_buf, + NULL); + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "program actpair failed status: %d\n", + status); + break; + } + actx_idx++; + } + + i = ice_find_next_bit(scen->act_mem_bitmap, + ICE_AQC_MAX_ACTION_MEMORIES, i + 1); + } + + if (!status && actx_idx < acts_cnt) + status = ICE_ERR_MAX_LIMIT; + + return status; +} + +/** + * ice_acl_rem_entry - Remove a flow entry from an ACL scenario + * @hw: pointer to the HW struct + * @scen: scenario to remove the entry from + * @entry_idx: the scenario-relative index of the flow entry being removed + */ +enum ice_status +ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx) +{ + struct ice_aqc_actpair act_buf; + struct ice_aqc_acl_data buf; + u8 entry_tcam, num_cscd, i; + enum ice_status status = ICE_SUCCESS; + u16 idx; + + if (!scen) + return ICE_ERR_DOES_NOT_EXIST; + + if (entry_idx >= scen->num_entry) + return ICE_ERR_MAX_LIMIT; + + if (!ice_is_bit_set(scen->entry_bitmap, entry_idx)) + return ICE_ERR_DOES_NOT_EXIST; + + /* Determine number of cascaded TCAMs */ + num_cscd = DIVIDE_AND_ROUND_UP(scen->width, + ICE_AQC_ACL_KEY_WIDTH_BYTES); + + entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start); + idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx); + + /* invalidate the flow entry */ + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + for (i = 0; i < num_cscd; i++) { + status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf, + NULL); + if (status) + ice_debug(hw, ICE_DBG_ACL, + "aq program acl entry failed status: %d\n", + status); + } + + ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM); + i = ice_find_first_bit(scen->act_mem_bitmap, + ICE_AQC_MAX_ACTION_MEMORIES); + while (i < ICE_AQC_MAX_ACTION_MEMORIES) { + struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i]; + + if (mem->member_of_tcam >= entry_tcam && + mem->member_of_tcam < entry_tcam + num_cscd) { + /* Invalidate allocated action pairs */ + status = ice_aq_program_actpair(hw, i, idx, &act_buf, + NULL); + if (status) + ice_debug(hw, ICE_DBG_ACL, + "program actpair failed.status: %d\n", + status); + } + + i = ice_find_next_bit(scen->act_mem_bitmap, + ICE_AQC_MAX_ACTION_MEMORIES, i + 1); + } + + ice_acl_scen_free_entry_idx(scen, entry_idx); + + return status; +} + +/** + * ice_acl_destroy_scen - Destroy an ACL scenario + * @hw: pointer to the HW struct + * @scen_id: ID of the remove scenario + */ +enum ice_status ice_acl_destroy_scen(struct ice_hw *hw, u16 scen_id) +{ + struct ice_acl_scen *scen, *tmp_scen; + struct ice_flow_prof *p, *tmp; + enum ice_status status; + + if (!hw->acl_tbl) + return ICE_ERR_DOES_NOT_EXIST; + + /* Remove profiles that use "scen_id" scenario */ + LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[ICE_BLK_ACL], + ice_flow_prof, l_entry) + if (p->cfg.scen && p->cfg.scen->id == scen_id) { + status = ice_flow_rem_prof(hw, ICE_BLK_ACL, p->id); + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "ice_flow_rem_prof failed. status: %d\n", + status); + goto exit; + } + } + + /* Call the aq command to destroy the targeted scenario */ + status = ice_aq_dealloc_acl_scen(hw, scen_id, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_ACL, + "AQ de-allocation of scenario failed. status: %d\n", + status); + goto exit; + } + + /* Remove scenario from hw->acl_tbl->scens */ + LIST_FOR_EACH_ENTRY_SAFE(scen, tmp_scen, &hw->acl_tbl->scens, + ice_acl_scen, list_entry) + if (scen->id == scen_id) { + LIST_DEL(&scen->list_entry); + ice_free(hw, scen); + } +exit: + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/src/spdk/dpdk/drivers/net/ice/base/ice_adminq_cmd.h new file mode 100644 index 000000000..57a785508 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_adminq_cmd.h @@ -0,0 +1,2975 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_ADMINQ_CMD_H_ +#define _ICE_ADMINQ_CMD_H_ + +/* This header file defines the Admin Queue commands, error codes and + * descriptor format. It is shared between Firmware and Software. + */ + +#define ICE_MAX_VSI 768 +#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 +#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 + +struct ice_aqc_generic { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get version (direct 0x0001) */ +struct ice_aqc_get_ver { + __le32 rom_ver; + __le32 fw_build; + u8 fw_branch; + u8 fw_major; + u8 fw_minor; + u8 fw_patch; + u8 api_branch; + u8 api_major; + u8 api_minor; + u8 api_patch; +}; + +/* Send driver version (indirect 0x0002) */ +struct ice_aqc_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Queue Shutdown (direct 0x0003) */ +struct ice_aqc_q_shutdown { + u8 driver_unloading; +#define ICE_AQC_DRIVER_UNLOADING BIT(0) + u8 reserved[15]; +}; + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +struct ice_aqc_req_res { + __le16 res_id; +#define ICE_AQC_RES_ID_NVM 1 +#define ICE_AQC_RES_ID_SDP 2 +#define ICE_AQC_RES_ID_CHNG_LOCK 3 +#define ICE_AQC_RES_ID_GLBL_LOCK 4 + __le16 access_type; +#define ICE_AQC_RES_ACCESS_READ 1 +#define ICE_AQC_RES_ACCESS_WRITE 2 + + /* Upon successful completion, FW writes this value and driver is + * expected to release resource before timeout. This value is provided + * in milliseconds. + */ + __le32 timeout; +#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 +#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 +#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 +#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 + /* For SDP: pin ID of the SDP */ + __le32 res_number; + /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ + __le16 status; +#define ICE_AQ_RES_GLBL_SUCCESS 0 +#define ICE_AQ_RES_GLBL_IN_PROG 1 +#define ICE_AQ_RES_GLBL_DONE 2 + u8 reserved[2]; +}; + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct ice_aqc_list_caps { + u8 cmd_flags; + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +/* Device/Function buffer entry, repeated per reported capability */ +struct ice_aqc_list_caps_elem { + __le16 cap; +#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005 +#define ICE_AQC_MAX_VALID_FUNCTIONS 0x8 +#define ICE_AQC_CAPS_VSI 0x0017 +#define ICE_AQC_CAPS_DCB 0x0018 +#define ICE_AQC_CAPS_RSS 0x0040 +#define ICE_AQC_CAPS_RXQS 0x0041 +#define ICE_AQC_CAPS_TXQS 0x0042 +#define ICE_AQC_CAPS_MSIX 0x0043 +#define ICE_AQC_CAPS_FD 0x0045 +#define ICE_AQC_CAPS_MAX_MTU 0x0047 + + u8 major_ver; + u8 minor_ver; + /* Number of resources described by this capability */ + __le32 number; + /* Only meaningful for some types of resources */ + __le32 logical_id; + /* Only meaningful for some types of resources */ + __le32 phys_id; + __le64 rsvd1; + __le64 rsvd2; +}; + +/* Manage MAC address, read command - indirect (0x0107) + * This struct is also used for the response + */ +struct ice_aqc_manage_mac_read { + __le16 flags; /* Zeroed by device driver */ +#define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4) +#define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5) +#define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6) +#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) +#define ICE_AQC_MAN_MAC_MC_MAG_EN BIT(8) +#define ICE_AQC_MAN_MAC_WOL_PRESERVE_ON_PFR BIT(9) +#define ICE_AQC_MAN_MAC_READ_S 4 +#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) + u8 rsvd[2]; + u8 num_addr; /* Used in response */ + u8 rsvd1[3]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response buffer format for manage MAC read command */ +struct ice_aqc_manage_mac_read_resp { + u8 lport_num; + u8 addr_type; +#define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0 +#define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1 + u8 mac_addr[ETH_ALEN]; +}; + +/* Manage MAC address, write command - direct (0x0108) */ +struct ice_aqc_manage_mac_write { + u8 rsvd; + u8 flags; +#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) +#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) +#define ICE_AQC_MAN_MAC_WR_S 6 +#define ICE_AQC_MAN_MAC_WR_M MAKEMASK(3, ICE_AQC_MAN_MAC_WR_S) +#define ICE_AQC_MAN_MAC_UPDATE_LAA 0 +#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S) + /* byte stream in network order */ + u8 mac_addr[ETH_ALEN]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Clear PXE Command and response (direct 0x0110) */ +struct ice_aqc_clear_pxe { + u8 rx_cnt; +#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2 + u8 reserved[15]; +}; + +/* Configure No-Drop Policy Command (direct 0x0112) */ +struct ice_aqc_config_no_drop_policy { + u8 opts; +#define ICE_AQC_FORCE_NO_DROP BIT(0) + u8 rsvd[15]; +}; + +/* Get switch configuration (0x0200) */ +struct ice_aqc_get_sw_cfg { + /* Reserved for command and copy of request flags for response */ + __le16 flags; + /* First desc in case of command and next_elem in case of response + * In case of response, if it is not zero, means all the configuration + * was not returned and new command shall be sent with this value in + * the 'first desc' field + */ + __le16 element; + /* Reserved for command, only used for response */ + __le16 num_elems; + __le16 rsvd; + __le32 addr_high; + __le32 addr_low; +}; + +/* Each entry in the response buffer is of the following type: */ +struct ice_aqc_get_sw_cfg_resp_elem { + /* VSI/Port Number */ + __le16 vsi_port_num; +#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0 +#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \ + (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S) +#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14 +#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S) +#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0 +#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1 +#define ICE_AQC_GET_SW_CONF_RESP_VSI 2 + + /* SWID VSI/Port belongs to */ + __le16 swid; + + /* Bit 14..0 : PF/VF number VSI belongs to + * Bit 15 : VF indication bit + */ + __le16 pf_vf_num; +#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0 +#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \ + (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S) +#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) +}; + +/* The response buffer is as follows. Note that the length of the + * elements array varies with the length of the command response. + */ +struct ice_aqc_get_sw_cfg_resp { + struct ice_aqc_get_sw_cfg_resp_elem elements[1]; +}; + +/* These resource type defines are used for all switch resource + * commands where a resource type is required, such as: + * Get Resource Allocation command (indirect 0x0204) + * Allocate Resources command (indirect 0x0208) + * Free Resources command (indirect 0x0209) + * Get Allocated Resource Descriptors Command (indirect 0x020A) + */ +#define ICE_AQC_RES_TYPE_VEB_COUNTER 0x00 +#define ICE_AQC_RES_TYPE_VLAN_COUNTER 0x01 +#define ICE_AQC_RES_TYPE_MIRROR_RULE 0x02 +#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 +#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 +#define ICE_AQC_RES_TYPE_RECIPE 0x05 +#define ICE_AQC_RES_TYPE_PROFILE 0x06 +#define ICE_AQC_RES_TYPE_SWID 0x07 +#define ICE_AQC_RES_TYPE_VSI 0x08 +#define ICE_AQC_RES_TYPE_FLU 0x09 +#define ICE_AQC_RES_TYPE_WIDE_TABLE_1 0x0A +#define ICE_AQC_RES_TYPE_WIDE_TABLE_2 0x0B +#define ICE_AQC_RES_TYPE_WIDE_TABLE_4 0x0C +#define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH 0x20 +#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 +#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 +#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 +#define ICE_AQC_RES_TYPE_FLEX_DESC_PROG 0x30 +#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID 0x48 +#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49 +#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50 +#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51 +#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58 +#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59 +#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 +#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 +/* Resource types 0x62-67 are reserved for Hash profile builder */ +#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID 0x68 +#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM 0x69 + +#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) +#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) +#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) + +#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 + +#define ICE_AQC_RES_TYPE_S 0 +#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S) + +/* Get Resource Allocation command (indirect 0x0204) */ +struct ice_aqc_get_res_alloc { + __le16 resp_elem_num; /* Used in response, reserved in command */ + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Resource Allocation Response Buffer per response */ +struct ice_aqc_get_res_resp_elem { + __le16 res_type; /* Types defined above cmd 0x0204 */ + __le16 total_capacity; /* Resources available to all PF's */ + __le16 total_function; /* Resources allocated for a PF */ + __le16 total_shared; /* Resources allocated as shared */ + __le16 total_free; /* Resources un-allocated/not reserved by any PF */ +}; + +/* Buffer for Get Resource command */ +struct ice_aqc_get_res_resp { + /* Number of resource entries to be calculated using + * datalen/sizeof(struct ice_aqc_cmd_resp)). + * Value of 'datalen' gets updated as part of response. + */ + struct ice_aqc_get_res_resp_elem elem[1]; +}; + +/* Allocate Resources command (indirect 0x0208) + * Free Resources command (indirect 0x0209) + */ +struct ice_aqc_alloc_free_res_cmd { + __le16 num_entries; /* Number of Resource entries */ + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Resource descriptor */ +struct ice_aqc_res_elem { + union { + __le16 sw_resp; + __le16 flu_resp; + } e; +}; + +/* Buffer for Allocate/Free Resources commands */ +struct ice_aqc_alloc_free_res_elem { + __le16 res_type; /* Types defined above cmd 0x0204 */ +#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8 +#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \ + (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S) + __le16 num_elems; + struct ice_aqc_res_elem elem[1]; +}; + +/* Get Allocated Resource Descriptors Command (indirect 0x020A) */ +struct ice_aqc_get_allocd_res_desc { + union { + struct { + __le16 res; /* Types defined above cmd 0x0204 */ + __le16 first_desc; + __le32 reserved; + } cmd; + struct { + __le16 res; + __le16 next_desc; + __le16 num_desc; + __le16 reserved; + } resp; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_get_allocd_res_desc_resp { + struct ice_aqc_res_elem elem[1]; +}; + +/* Add VSI (indirect 0x0210) + * Update VSI (indirect 0x0211) + * Get VSI (indirect 0x0212) + * Free VSI (indirect 0x0213) + */ +struct ice_aqc_add_get_update_free_vsi { + __le16 vsi_num; +#define ICE_AQ_VSI_NUM_S 0 +#define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S) +#define ICE_AQ_VSI_IS_VALID BIT(15) + __le16 cmd_flags; +#define ICE_AQ_VSI_KEEP_ALLOC 0x1 + u8 vf_id; + u8 reserved; + __le16 vsi_flags; +#define ICE_AQ_VSI_TYPE_S 0 +#define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S) +#define ICE_AQ_VSI_TYPE_VF 0x0 +#define ICE_AQ_VSI_TYPE_VMDQ2 0x1 +#define ICE_AQ_VSI_TYPE_PF 0x2 +#define ICE_AQ_VSI_TYPE_EMP_MNG 0x3 + __le32 addr_high; + __le32 addr_low; +}; + +/* Response descriptor for: + * Add VSI (indirect 0x0210) + * Update VSI (indirect 0x0211) + * Free VSI (indirect 0x0213) + */ +struct ice_aqc_add_update_free_vsi_resp { + __le16 vsi_num; + __le16 ext_status; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_get_vsi_resp { + __le16 vsi_num; + u8 vf_id; + /* The vsi_flags field uses the ICE_AQ_VSI_TYPE_* defines for values. + * These are found above in struct ice_aqc_add_get_update_free_vsi. + */ + u8 vsi_flags; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_vsi_props { + __le16 valid_sections; +#define ICE_AQ_VSI_PROP_SW_VALID BIT(0) +#define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1) +#define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2) +#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3) +#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4) +#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5) +#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6) +#define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7) +#define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8) +#define ICE_AQ_VSI_PROP_ACL_VALID BIT(10) +#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11) +#define ICE_AQ_VSI_PROP_PASID_VALID BIT(12) + /* switch section */ + u8 sw_id; + u8 sw_flags; +#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5) +#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6) +#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7) + u8 sw_flags2; +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \ + (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) +#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) +#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) + u8 veb_stat_id; +#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 +#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) +#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5) + /* security section */ + u8 sec_flags; +#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0) +#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2) +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) +#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0) + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + u8 pvlan_reserved[2]; + u8 vlan_flags; +#define ICE_AQ_VSI_VLAN_MODE_S 0 +#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) +#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 +#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 +#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 +#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) +#define ICE_AQ_VSI_VLAN_EMOD_S 3 +#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) + u8 pvlan_reserved2[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 +#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) +#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 +#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) +#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 +#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) +#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 +#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) +#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 +#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) +#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 +#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) +#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 +#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) +#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 +#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) + __le32 egress_table; /* same defines as for ingress table */ + /* outer tags section */ + __le16 outer_tag; + u8 outer_tag_flags; +#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0 +#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S) +#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0 +#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1 +#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) +#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 +#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 +#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4) +#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6) + u8 outer_tag_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 +#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) + __le16 q_mapping[16]; +#define ICE_AQ_VSI_Q_S 0 +#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) + __le16 tc_mapping[8]; +#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 +#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) +#define ICE_AQ_VSI_TC_Q_NUM_S 11 +#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) + /* queueing option section */ + u8 q_opt_rss; +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) + u8 q_opt_tc; +#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 +#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) +#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) + u8 q_opt_flags; +#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) + u8 q_opt_reserved[3]; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + /* acl section */ + __le16 acl_def_act; +#define ICE_AQ_VSI_ACL_DEF_RX_PROF_S 0 +#define ICE_AQ_VSI_ACL_DEF_RX_PROF_M (0xF << ICE_AQ_VSI_ACL_DEF_RX_PROF_S) +#define ICE_AQ_VSI_ACL_DEF_RX_TABLE_S 4 +#define ICE_AQ_VSI_ACL_DEF_RX_TABLE_M (0xF << ICE_AQ_VSI_ACL_DEF_RX_TABLE_S) +#define ICE_AQ_VSI_ACL_DEF_TX_PROF_S 8 +#define ICE_AQ_VSI_ACL_DEF_TX_PROF_M (0xF << ICE_AQ_VSI_ACL_DEF_TX_PROF_S) +#define ICE_AQ_VSI_ACL_DEF_TX_TABLE_S 12 +#define ICE_AQ_VSI_ACL_DEF_TX_TABLE_M (0xF << ICE_AQ_VSI_ACL_DEF_TX_TABLE_S) + /* flow director section */ + __le16 fd_options; +#define ICE_AQ_VSI_FD_ENABLE BIT(0) +#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) +#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) + __le16 max_fd_fltr_dedicated; + __le16 max_fd_fltr_shared; + __le16 fd_def_q; +#define ICE_AQ_VSI_FD_DEF_Q_S 0 +#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) +#define ICE_AQ_VSI_FD_DEF_GRP_S 12 +#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) + __le16 fd_report_opt; +#define ICE_AQ_VSI_FD_REPORT_Q_S 0 +#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) +#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 +#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) +#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) + /* PASID section */ + __le32 pasid_id; +#define ICE_AQ_VSI_PASID_ID_S 0 +#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) +#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) + u8 reserved[24]; +}; + +/* Add/update mirror rule - direct (0x0260) */ +#define ICE_AQC_RULE_ID_VALID_S 7 +#define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S) +#define ICE_AQC_RULE_ID_S 0 +#define ICE_AQC_RULE_ID_M (0x3F << ICE_AQC_RULE_ID_S) + +/* Following defines to be used while processing caller specified mirror list + * of VSI indexes. + */ +/* Action: Byte.bit (1.7) + * 0 = Remove VSI from mirror rule + * 1 = Add VSI to mirror rule + */ +#define ICE_AQC_RULE_ACT_S 15 +#define ICE_AQC_RULE_ACT_M (0x1 << ICE_AQC_RULE_ACT_S) +/* Action: 1.2:0.0 = Mirrored VSI */ +#define ICE_AQC_RULE_MIRRORED_VSI_S 0 +#define ICE_AQC_RULE_MIRRORED_VSI_M (0x7FF << ICE_AQC_RULE_MIRRORED_VSI_S) + +/* This is to be used by add/update mirror rule Admin Queue command. + * In case of add mirror rule - if rule ID is specified as + * INVAL_MIRROR_RULE_ID, new rule ID is allocated from shared pool. + * If specified rule_id is valid, then it is used. If specified rule_id + * is in use then new mirroring rule is added. + */ +#define ICE_INVAL_MIRROR_RULE_ID 0xFFFF + +struct ice_aqc_add_update_mir_rule { + __le16 rule_id; + + __le16 rule_type; +#define ICE_AQC_RULE_TYPE_S 0 +#define ICE_AQC_RULE_TYPE_M (0x7 << ICE_AQC_RULE_TYPE_S) + /* VPORT ingress/egress */ +#define ICE_AQC_RULE_TYPE_VPORT_INGRESS 0x1 +#define ICE_AQC_RULE_TYPE_VPORT_EGRESS 0x2 + /* Physical port ingress mirroring. + * All traffic received by this port + */ +#define ICE_AQC_RULE_TYPE_PPORT_INGRESS 0x6 + /* Physical port egress mirroring. All traffic sent by this port */ +#define ICE_AQC_RULE_TYPE_PPORT_EGRESS 0x7 + + /* Number of mirrored entries. + * The values are in the command buffer + */ + __le16 num_entries; + + /* Destination VSI */ + __le16 dest; + __le32 addr_high; + __le32 addr_low; +}; + +/* Delete mirror rule - direct(0x0261) */ +struct ice_aqc_delete_mir_rule { + __le16 rule_id; + __le16 rsvd; + + /* Byte.bit: 20.0 = Keep allocation. If set VSI stays part of + * the PF allocated resources, otherwise it is returned to the + * shared pool + */ +#define ICE_AQC_FLAG_KEEP_ALLOCD_S 0 +#define ICE_AQC_FLAG_KEEP_ALLOCD_M (0x1 << ICE_AQC_FLAG_KEEP_ALLOCD_S) + __le16 flags; + + u8 reserved[10]; +}; + +/* Set/Get storm config - (direct 0x0280, 0x0281) */ +/* This structure holds get storm configuration response and same structure + * is used to perform set_storm_cfg + */ +struct ice_aqc_storm_cfg { + __le32 bcast_thresh_size; + __le32 mcast_thresh_size; + /* Bit 18:0 - Traffic upper threshold size + * Bit 31:19 - Reserved + */ +#define ICE_AQ_THRESHOLD_S 0 +#define ICE_AQ_THRESHOLD_M (0x7FFFF << ICE_AQ_THRESHOLD_S) + + __le32 storm_ctrl_ctrl; + /* Bit 0: MDIPW - Drop Multicast packets in previous window + * Bit 1: MDICW - Drop multicast packets in current window + * Bit 2: BDIPW - Drop broadcast packets in previous window + * Bit 3: BDICW - Drop broadcast packets in current window + */ +#define ICE_AQ_STORM_CTRL_MDIPW_DROP_MULTICAST BIT(0) +#define ICE_AQ_STORM_CTRL_MDICW_DROP_MULTICAST BIT(1) +#define ICE_AQ_STORM_CTRL_BDIPW_DROP_MULTICAST BIT(2) +#define ICE_AQ_STORM_CTRL_BDICW_DROP_MULTICAST BIT(3) + /* Bit 7:5 : Reserved */ + /* Bit 27:8 : Interval - BSC/MSC Time-interval specification: The + * interval size for applying ingress broadcast or multicast storm + * control. + */ +#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S 8 +#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_M \ + (0xFFFFF << ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S) + __le32 reserved; +}; + +#define ICE_MAX_NUM_RECIPES 64 + +/* Add/Get Recipe (indirect 0x0290/0x0292)*/ +struct ice_aqc_add_get_recipe { + __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */ + __le16 return_index; /* Input, used for Get cmd only */ + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_recipe_content { + u8 rid; +#define ICE_AQ_RECIPE_ID_S 0 +#define ICE_AQ_RECIPE_ID_M (0x3F << ICE_AQ_RECIPE_ID_S) +#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7) +#define ICE_AQ_SW_ID_LKUP_IDX 0 + u8 lkup_indx[5]; +#define ICE_AQ_RECIPE_LKUP_DATA_S 0 +#define ICE_AQ_RECIPE_LKUP_DATA_M (0x3F << ICE_AQ_RECIPE_LKUP_DATA_S) +#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7) +#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF + __le16 mask[5]; + u8 result_indx; +#define ICE_AQ_RECIPE_RESULT_DATA_S 0 +#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S) +#define ICE_AQ_RECIPE_RESULT_EN BIT(7) + u8 rsvd0[3]; + u8 act_ctrl_join_priority; + u8 act_ctrl_fwd_priority; +#define ICE_AQ_RECIPE_FWD_PRIORITY_S 0 +#define ICE_AQ_RECIPE_FWD_PRIORITY_M (0xF << ICE_AQ_RECIPE_FWD_PRIORITY_S) + u8 act_ctrl; +#define ICE_AQ_RECIPE_ACT_NEED_PASS_L2 BIT(0) +#define ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2 BIT(1) +#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2) +#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_S 4 +#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_M (0x3 << ICE_AQ_RECIPE_ACT_PRUNE_INDX_S) + u8 rsvd1; + __le32 dflt_act; +#define ICE_AQ_RECIPE_DFLT_ACT_S 0 +#define ICE_AQ_RECIPE_DFLT_ACT_M (0x7FFFF << ICE_AQ_RECIPE_DFLT_ACT_S) +#define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31) +}; + +struct ice_aqc_recipe_data_elem { + u8 recipe_indx; + u8 resp_bits; +#define ICE_AQ_RECIPE_WAS_UPDATED BIT(0) + u8 rsvd0[2]; + u8 recipe_bitmap[8]; + u8 rsvd1[4]; + struct ice_aqc_recipe_content content; + u8 rsvd2[20]; +}; + +/* This struct contains a number of entries as per the + * num_sub_recipes in the command + */ +struct ice_aqc_add_get_recipe_data { + struct ice_aqc_recipe_data_elem recipe[1]; +}; + +/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */ +struct ice_aqc_recipe_to_profile { + __le16 profile_id; + u8 rsvd[6]; + ice_declare_bitmap(recipe_assoc, ICE_MAX_NUM_RECIPES); +}; + +/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) + */ +struct ice_aqc_sw_rules { + /* ops: add switch rules, referring the number of rules. + * ops: update switch rules, referring the number of filters + * ops: remove switch rules, referring the entry index. + * ops: get switch rules, referring to the number of filters. + */ + __le16 num_rules_fltr_entry_index; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +#pragma pack(1) +/* Add/Update/Get/Remove lookup Rx/Tx command/response entry + * This structures describes the lookup rules and associated actions. "index" + * is returned as part of a response to a successful Add command, and can be + * used to identify the rule for Update/Get/Remove commands. + */ +struct ice_sw_rule_lkup_rx_tx { + __le16 recipe_id; +#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10 + /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */ + __le16 src; + __le32 act; + + /* Bit 0:1 - Action type */ +#define ICE_SINGLE_ACT_TYPE_S 0x00 +#define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S) + + /* Bit 2 - Loop back enable + * Bit 3 - LAN enable + */ +#define ICE_SINGLE_ACT_LB_ENABLE BIT(2) +#define ICE_SINGLE_ACT_LAN_ENABLE BIT(3) + + /* Action type = 0 - Forward to VSI or VSI list */ +#define ICE_SINGLE_ACT_VSI_FORWARDING 0x0 + +#define ICE_SINGLE_ACT_VSI_ID_S 4 +#define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S) +#define ICE_SINGLE_ACT_VSI_LIST_ID_S 4 +#define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S) + /* This bit needs to be set if action is forward to VSI list */ +#define ICE_SINGLE_ACT_VSI_LIST BIT(14) +#define ICE_SINGLE_ACT_VALID_BIT BIT(17) +#define ICE_SINGLE_ACT_DROP BIT(18) + + /* Action type = 1 - Forward to Queue of Queue group */ +#define ICE_SINGLE_ACT_TO_Q 0x1 +#define ICE_SINGLE_ACT_Q_INDEX_S 4 +#define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S) +#define ICE_SINGLE_ACT_Q_REGION_S 15 +#define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S) +#define ICE_SINGLE_ACT_Q_PRIORITY BIT(18) + + /* Action type = 2 - Prune */ +#define ICE_SINGLE_ACT_PRUNE 0x2 +#define ICE_SINGLE_ACT_EGRESS BIT(15) +#define ICE_SINGLE_ACT_INGRESS BIT(16) +#define ICE_SINGLE_ACT_PRUNET BIT(17) + /* Bit 18 should be set to 0 for this action */ + + /* Action type = 2 - Pointer */ +#define ICE_SINGLE_ACT_PTR 0x2 +#define ICE_SINGLE_ACT_PTR_VAL_S 4 +#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S) + /* Bit 18 should be set to 1 */ +#define ICE_SINGLE_ACT_PTR_BIT BIT(18) + + /* Action type = 3 - Other actions. Last two bits + * are other action identifier + */ +#define ICE_SINGLE_ACT_OTHER_ACTS 0x3 +#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 +#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ + (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) + + /* Bit 17:18 - Defines other actions */ + /* Other action = 0 - Mirror VSI */ +#define ICE_SINGLE_OTHER_ACT_MIRROR 0 +#define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4 +#define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \ + (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S) + + /* Other action = 3 - Set Stat count */ +#define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3 +#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4 +#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \ + (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S) + + __le16 index; /* The index of the rule in the lookup table */ + /* Length and values of the header to be matched per recipe or + * lookup-type + */ + __le16 hdr_len; + u8 hdr[1]; +}; +#pragma pack() + +/* Add/Update/Remove large action command/response entry + * "index" is returned as part of a response to a successful Add command, and + * can be used to identify the action for Update/Get/Remove commands. + */ +struct ice_sw_rule_lg_act { + __le16 index; /* Index in large action table */ + __le16 size; + __le32 act[1]; /* array of size for actions */ + /* Max number of large actions */ +#define ICE_MAX_LG_ACT 4 + /* Bit 0:1 - Action type */ +#define ICE_LG_ACT_TYPE_S 0 +#define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S) + + /* Action type = 0 - Forward to VSI or VSI list */ +#define ICE_LG_ACT_VSI_FORWARDING 0 +#define ICE_LG_ACT_VSI_ID_S 3 +#define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S) +#define ICE_LG_ACT_VSI_LIST_ID_S 3 +#define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S) + /* This bit needs to be set if action is forward to VSI list */ +#define ICE_LG_ACT_VSI_LIST BIT(13) + +#define ICE_LG_ACT_VALID_BIT BIT(16) + + /* Action type = 1 - Forward to Queue of Queue group */ +#define ICE_LG_ACT_TO_Q 0x1 +#define ICE_LG_ACT_Q_INDEX_S 3 +#define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S) +#define ICE_LG_ACT_Q_REGION_S 14 +#define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S) +#define ICE_LG_ACT_Q_PRIORITY_SET BIT(17) + + /* Action type = 2 - Prune */ +#define ICE_LG_ACT_PRUNE 0x2 +#define ICE_LG_ACT_EGRESS BIT(14) +#define ICE_LG_ACT_INGRESS BIT(15) +#define ICE_LG_ACT_PRUNET BIT(16) + + /* Action type = 3 - Mirror VSI */ +#define ICE_LG_OTHER_ACT_MIRROR 0x3 +#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 +#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) + + /* Action type = 5 - Generic Value */ +#define ICE_LG_ACT_GENERIC 0x5 +#define ICE_LG_ACT_GENERIC_VALUE_S 3 +#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) +#define ICE_LG_ACT_GENERIC_OFFSET_S 19 +#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) +#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 +#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) +#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 + + /* Action = 7 - Set Stat count */ +#define ICE_LG_ACT_STAT_COUNT 0x7 +#define ICE_LG_ACT_STAT_COUNT_S 3 +#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) +}; + +/* Add/Update/Remove VSI list command/response entry + * "index" is returned as part of a response to a successful Add command, and + * can be used to identify the VSI list for Update/Get/Remove commands. + */ +struct ice_sw_rule_vsi_list { + __le16 index; /* Index of VSI/Prune list */ + __le16 number_vsi; + __le16 vsi[1]; /* Array of number_vsi VSI numbers */ +}; + +#pragma pack(1) +/* Query VSI list command/response entry */ +struct ice_sw_rule_vsi_list_query { + __le16 index; + ice_declare_bitmap(vsi_list, ICE_MAX_VSI); +}; +#pragma pack() + +#pragma pack(1) +/* Add switch rule response: + * Content of return buffer is same as the input buffer. The status field and + * LUT index are updated as part of the response + */ +struct ice_aqc_sw_rules_elem { + __le16 type; /* Switch rule type, one of T_... */ +#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 +#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 +#define ICE_AQC_SW_RULES_T_LG_ACT 0x2 +#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 +#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 +#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 + __le16 status; + union { + struct ice_sw_rule_lkup_rx_tx lkup_tx_rx; + struct ice_sw_rule_lg_act lg_act; + struct ice_sw_rule_vsi_list vsi_list; + struct ice_sw_rule_vsi_list_query vsi_list_query; + } pdata; +}; + +#pragma pack() + +/* PFC Ignore (direct 0x0301) + * The command and response use the same descriptor structure + */ +struct ice_aqc_pfc_ignore { + u8 tc_bitmap; + u8 cmd_flags; /* unused in response */ +#define ICE_AQC_PFC_IGNORE_SET BIT(7) +#define ICE_AQC_PFC_IGNORE_CLEAR 0 + u8 reserved[14]; +}; + +/* Set PFC Mode (direct 0x0303) + * Query PFC Mode (direct 0x0302) + */ +struct ice_aqc_set_query_pfc_mode { + u8 pfc_mode; +/* For Set Command response, reserved in all other cases */ +#define ICE_AQC_PFC_NOT_CONFIGURED 0 +/* For Query Command response, reserved in all other cases */ +#define ICE_AQC_DCB_DIS 0 +#define ICE_AQC_PFC_VLAN_BASED_PFC 1 +#define ICE_AQC_PFC_DSCP_BASED_PFC 2 + u8 rsvd[15]; +}; + +/* Set DCB Parameters (direct 0x0306) */ +struct ice_aqc_set_dcb_params { + u8 cmd_flags; /* unused in response */ +#define ICE_AQC_LINK_UP_DCB_CFG BIT(0) +#define ICE_AQC_PERSIST_DCB_CFG BIT(1) + u8 valid_flags; /* unused in response */ +#define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0) +#define ICE_AQC_PERSIST_DCB_CFG_VALID BIT(1) + u8 rsvd[14]; +}; + +/* Get Default Topology (indirect 0x0400) */ +struct ice_aqc_get_topo { + u8 port_num; + u8 num_branches; + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Update TSE (indirect 0x0403) + * Get TSE (indirect 0x0404) + * Add TSE (indirect 0x0401) + * Delete TSE (indirect 0x040F) + * Move TSE (indirect 0x0408) + * Suspend Nodes (indirect 0x0409) + * Resume Nodes (indirect 0x040A) + */ +struct ice_aqc_sched_elem_cmd { + __le16 num_elem_req; /* Used by commands */ + __le16 num_elem_resp; /* Used by responses */ + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the buffer for: + * Suspend Nodes (indirect 0x0409) + * Resume Nodes (indirect 0x040A) + */ +struct ice_aqc_suspend_resume_elem { + __le32 teid[1]; +}; + +struct ice_aqc_txsched_move_grp_info_hdr { + __le32 src_parent_teid; + __le32 dest_parent_teid; + __le16 num_elems; + __le16 reserved; +}; + +struct ice_aqc_move_elem { + struct ice_aqc_txsched_move_grp_info_hdr hdr; + __le32 teid[1]; +}; + +struct ice_aqc_elem_info_bw { + __le16 bw_profile_idx; + __le16 bw_alloc; +}; + +struct ice_aqc_txsched_elem { + u8 elem_type; /* Special field, reserved for some aq calls */ +#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0 +#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1 +#define ICE_AQC_ELEM_TYPE_TC 0x2 +#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3 +#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4 +#define ICE_AQC_ELEM_TYPE_LEAF 0x5 +#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6 + u8 valid_sections; +#define ICE_AQC_ELEM_VALID_GENERIC BIT(0) +#define ICE_AQC_ELEM_VALID_CIR BIT(1) +#define ICE_AQC_ELEM_VALID_EIR BIT(2) +#define ICE_AQC_ELEM_VALID_SHARED BIT(3) + u8 generic; +#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1 +#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1 +#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) +#define ICE_AQC_ELEM_GENERIC_SP_S 0x4 +#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) +#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5 +#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \ + (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) + u8 flags; /* Special field, reserved for some aq calls */ +#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1 + struct ice_aqc_elem_info_bw cir_bw; + struct ice_aqc_elem_info_bw eir_bw; + __le16 srl_id; + __le16 reserved2; +}; + +struct ice_aqc_txsched_elem_data { + __le32 parent_teid; + __le32 node_teid; + struct ice_aqc_txsched_elem data; +}; + +struct ice_aqc_txsched_topo_grp_info_hdr { + __le32 parent_teid; + __le16 num_elems; + __le16 reserved2; +}; + +struct ice_aqc_add_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + struct ice_aqc_txsched_elem_data generic[1]; +}; + +struct ice_aqc_conf_elem { + struct ice_aqc_txsched_elem_data generic[1]; +}; + +struct ice_aqc_get_elem { + struct ice_aqc_txsched_elem_data generic[1]; +}; + +struct ice_aqc_get_topo_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + struct ice_aqc_txsched_elem_data + generic[ICE_AQC_TOPO_MAX_LEVEL_NUM]; +}; + +struct ice_aqc_delete_elem { + struct ice_aqc_txsched_topo_grp_info_hdr hdr; + __le32 teid[1]; +}; + +/* Query Port ETS (indirect 0x040E) + * + * This indirect command is used to query port TC node configuration. + */ +struct ice_aqc_query_port_ets { + __le32 port_teid; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_port_ets_elem { + u8 tc_valid_bits; + u8 reserved[3]; + /* 3 bits for UP per TC 0-7, 4th byte reserved */ + __le32 up2tc; + u8 tc_bw_share[8]; + __le32 port_eir_prof_id; + __le32 port_cir_prof_id; + /* 3 bits per Node priority to TC 0-7, 4th byte reserved */ + __le32 tc_node_prio; +#define ICE_TC_NODE_PRIO_S 0x4 + u8 reserved1[4]; + __le32 tc_node_teid[8]; /* Used for response, reserved in command */ +}; + +/* Rate limiting profile for + * Add RL profile (indirect 0x0410) + * Query RL profile (indirect 0x0411) + * Remove RL profile (indirect 0x0415) + * These indirect commands acts on single or multiple + * RL profiles with specified data. + */ +struct ice_aqc_rl_profile { + __le16 num_profiles; + __le16 num_processed; /* Only for response. Reserved in Command. */ + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_rl_profile_elem { + u8 level; + u8 flags; +#define ICE_AQC_RL_PROFILE_TYPE_S 0x0 +#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S) +#define ICE_AQC_RL_PROFILE_TYPE_CIR 0 +#define ICE_AQC_RL_PROFILE_TYPE_EIR 1 +#define ICE_AQC_RL_PROFILE_TYPE_SRL 2 +/* The following flag is used for Query RL Profile Data */ +#define ICE_AQC_RL_PROFILE_INVAL_S 0x7 +#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S) + + __le16 profile_id; + __le16 max_burst_size; + __le16 rl_multiply; + __le16 wake_up_calc; + __le16 rl_encode; +}; + +struct ice_aqc_rl_profile_generic_elem { + struct ice_aqc_rl_profile_elem generic[1]; +}; + +/* Configure L2 Node CGD (indirect 0x0414) + * This indirect command allows configuring a congestion domain for given L2 + * node TEIDs in the scheduler topology. + */ +struct ice_aqc_cfg_l2_node_cgd { + __le16 num_l2_nodes; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_cfg_l2_node_cgd_elem { + __le32 node_teid; + u8 cgd; + u8 reserved[3]; +}; + +struct ice_aqc_cfg_l2_node_cgd_data { + struct ice_aqc_cfg_l2_node_cgd_elem elem[1]; +}; + +/* Query Scheduler Resource Allocation (indirect 0x0412) + * This indirect command retrieves the scheduler resources allocated by + * EMP Firmware to the given PF. + */ +struct ice_aqc_query_txsched_res { + u8 reserved[8]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_generic_sched_props { + __le16 phys_levels; + __le16 logical_levels; + u8 flattening_bitmap; + u8 max_device_cgds; + u8 max_pf_cgds; + u8 rsvd0; + __le16 rdma_qsets; + u8 rsvd1[22]; +}; + +struct ice_aqc_layer_props { + u8 logical_layer; + u8 chunk_size; + __le16 max_device_nodes; + __le16 max_pf_nodes; + u8 rsvd0[4]; + __le16 max_sibl_grp_sz; + __le16 max_cir_rl_profiles; + __le16 max_eir_rl_profiles; + __le16 max_srl_profiles; + u8 rsvd1[14]; +}; + +struct ice_aqc_query_txsched_res_resp { + struct ice_aqc_generic_sched_props sched_props; + struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM]; +}; + +/* Query Node to Root Topology (indirect 0x0413) + * This command uses ice_aqc_get_elem as its data buffer. + */ +struct ice_aqc_query_node_to_root { + __le32 teid; + __le32 num_nodes; /* Response only */ + __le32 addr_high; + __le32 addr_low; +}; + +/* Get PHY capabilities (indirect 0x0600) */ +struct ice_aqc_get_phy_caps { + u8 lport_num; + u8 reserved; + __le16 param0; + /* 18.0 - Report qualified modules */ +#define ICE_AQC_GET_PHY_RQM BIT(0) + /* 18.1 - 18.2 : Report mode + * 00b - Report NVM capabilities + * 01b - Report topology capabilities + * 10b - Report SW configured + */ +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_NVM_CAP 0 +#define ICE_AQC_REPORT_TOPO_CAP BIT(1) +#define ICE_AQC_REPORT_SW_CFG BIT(2) + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is #define of PHY type (Extended): + * The first set of defines is for phy_type_low. + */ +#define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) +#define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) +#define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) +#define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) +#define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) +#define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) +#define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) +#define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) +#define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) +#define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) +#define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) +#define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) +#define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) +#define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) +#define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) +#define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) +#define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) +#define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) +#define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) +#define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) +#define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) +#define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) +#define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) +#define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) +#define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) +#define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) +#define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) +#define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) +#define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) +#define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) +#define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30) +#define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31) +#define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32) +#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) +#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) +#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) +#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36) +#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37) +#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38) +#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39) +#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40) +#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41) +#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42) +#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43) +#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44) +#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45) +#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46) +#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47) +#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48) +#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49) +#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50) +#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51) +#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52) +#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53) +#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54) +#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55) +#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56) +#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57) +#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58) +#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59) +#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60) +#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61) +#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62) +#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63) +#define ICE_PHY_TYPE_LOW_MAX_INDEX 63 +/* The second set of defines is for phy_type_high. */ +#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0) +#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1) +#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) +#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) +#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19 + +struct ice_aqc_get_phy_caps_data { + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + u8 caps; +#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) +#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) +#define ICE_AQC_PHY_LOW_POWER_MODE BIT(2) +#define ICE_AQC_PHY_EN_LINK BIT(3) +#define ICE_AQC_PHY_AN_MODE BIT(4) +#define ICE_AQC_PHY_EN_MOD_QUAL BIT(5) +#define ICE_AQC_PHY_EN_LESM BIT(6) +#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) +#define ICE_AQC_PHY_CAPS_MASK MAKEMASK(0xff, 0) + u8 low_power_ctrl_an; +#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) +#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) +#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) +#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3) + __le16 eee_cap; +#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) +#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) +#define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2) +#define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3) +#define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4) +#define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5) +#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) +#define ICE_AQC_PHY_EEE_EN_50GBASE_KR2 BIT(7) +#define ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4 BIT(8) +#define ICE_AQC_PHY_EEE_EN_100GBASE_KR4 BIT(9) +#define ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4 BIT(10) + __le16 eeer_value; + u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ + u8 phy_fw_ver[8]; + u8 link_fec_options; +#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) +#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) +#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2) +#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3) +#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) +#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) +#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) +#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0) + u8 module_compliance_enforcement; +#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0) + u8 extended_compliance_code; +#define ICE_MODULE_TYPE_TOTAL_BYTE 3 + u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; +#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 +#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 +#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) +#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) +#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) +#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 +#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 + u8 qualified_module_count; + u8 rsvd2[7]; /* Bytes 47:41 reserved */ +#define ICE_AQC_QUAL_MOD_COUNT_MAX 16 + struct { + u8 v_oui[3]; + u8 rsvd3; + u8 v_part[16]; + __le32 v_rev; + __le64 rsvd4; + } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; +}; + +/* Set PHY capabilities (direct 0x0601) + * NOTE: This command must be followed by setup link and restart auto-neg + */ +struct ice_aqc_set_phy_cfg { + u8 lport_num; + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set PHY config command data structure */ +struct ice_aqc_set_phy_cfg_data { + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ + u8 caps; +#define ICE_AQ_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0) +#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) +#define ICE_AQ_PHY_ENA_LINK BIT(3) +#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define ICE_AQ_PHY_ENA_LESM BIT(6) +#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) + u8 low_power_ctrl_an; + __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ + __le16 eeer_value; + u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ + u8 module_compliance_enforcement; +}; + +/* Set MAC Config command data structure (direct 0x0603) */ +struct ice_aqc_set_mac_cfg { + __le16 max_frame_size; + u8 params; +#define ICE_AQ_SET_MAC_PACE_S 3 +#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S) +#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7) +#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0 +#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M + u8 tx_tmr_priority; + __le16 tx_tmr_value; + __le16 fc_refresh_threshold; + u8 drop_opts; +#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0) +#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0 +#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0) + u8 reserved[7]; +}; + +/* Restart AN command data structure (direct 0x0605) + * Also used for response, with only the lport_num field present. + */ +struct ice_aqc_restart_an { + u8 lport_num; + u8 reserved; + u8 cmd_flags; +#define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1) +#define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2) + u8 reserved2[13]; +}; + +/* Get link status (indirect 0x0607), also used for Link Status Event */ +struct ice_aqc_get_link_status { + u8 lport_num; + u8 reserved; + __le16 cmd_flags; +#define ICE_AQ_LSE_M 0x3 +#define ICE_AQ_LSE_NOP 0x0 +#define ICE_AQ_LSE_DIS 0x2 +#define ICE_AQ_LSE_ENA 0x3 + /* only response uses this flag */ +#define ICE_AQ_LSE_IS_ENABLED 0x1 + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get link status response data structure, also used for Link Status Event */ +struct ice_aqc_get_link_status_data { + u8 topo_media_conflict; +#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0) +#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1) +#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2) +#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4) +#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5) +#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) +#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) + u8 link_cfg_err; +#define ICE_AQ_LINK_CFG_ERR BIT(0) + u8 link_info; +#define ICE_AQ_LINK_UP BIT(0) /* Link Status */ +#define ICE_AQ_LINK_FAULT BIT(1) +#define ICE_AQ_LINK_FAULT_TX BIT(2) +#define ICE_AQ_LINK_FAULT_RX BIT(3) +#define ICE_AQ_LINK_FAULT_REMOTE BIT(4) +#define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */ +#define ICE_AQ_MEDIA_AVAILABLE BIT(6) +#define ICE_AQ_SIGNAL_DETECT BIT(7) + u8 an_info; +#define ICE_AQ_AN_COMPLETED BIT(0) +#define ICE_AQ_LP_AN_ABILITY BIT(1) +#define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */ +#define ICE_AQ_FEC_EN BIT(3) +#define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */ +#define ICE_AQ_LINK_PAUSE_TX BIT(5) +#define ICE_AQ_LINK_PAUSE_RX BIT(6) +#define ICE_AQ_QUALIFIED_MODULE BIT(7) + u8 ext_info; +#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0) +#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ + /* Port Tx Suspended */ +#define ICE_AQ_LINK_TX_S 2 +#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S) +#define ICE_AQ_LINK_TX_ACTIVE 0 +#define ICE_AQ_LINK_TX_DRAINED 1 +#define ICE_AQ_LINK_TX_FLUSHED 3 + u8 lb_status; +#define ICE_AQ_LINK_LB_PHY_LCL BIT(0) +#define ICE_AQ_LINK_LB_PHY_RMT BIT(1) +#define ICE_AQ_LINK_LB_MAC_LCL BIT(2) +#define ICE_AQ_LINK_LB_PHY_IDX_S 3 +#define ICE_AQ_LINK_LB_PHY_IDX_M (0x7 << ICE_AQ_LB_PHY_IDX_S) + __le16 max_frame_size; + u8 cfg; +#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) +#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) +#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) +#define ICE_AQ_FEC_MASK MAKEMASK(0x7, 0) + /* Pacing Config */ +#define ICE_AQ_CFG_PACING_S 3 +#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) +#define ICE_AQ_CFG_PACING_TYPE_M BIT(7) +#define ICE_AQ_CFG_PACING_TYPE_AVG 0 +#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M + /* External Device Power Ability */ + u8 power_desc; +#define ICE_AQ_PWR_CLASS_M 0x3 +#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0 +#define ICE_AQ_LINK_PWR_BASET_HIGH 1 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 +#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +#define ICE_AQ_LINK_SPEED_M 0x7FF +#define ICE_AQ_LINK_SPEED_10MB BIT(0) +#define ICE_AQ_LINK_SPEED_100MB BIT(1) +#define ICE_AQ_LINK_SPEED_1000MB BIT(2) +#define ICE_AQ_LINK_SPEED_2500MB BIT(3) +#define ICE_AQ_LINK_SPEED_5GB BIT(4) +#define ICE_AQ_LINK_SPEED_10GB BIT(5) +#define ICE_AQ_LINK_SPEED_20GB BIT(6) +#define ICE_AQ_LINK_SPEED_25GB BIT(7) +#define ICE_AQ_LINK_SPEED_40GB BIT(8) +#define ICE_AQ_LINK_SPEED_50GB BIT(9) +#define ICE_AQ_LINK_SPEED_100GB BIT(10) +#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) + __le32 reserved3; /* Aligns next field to 8-byte boundary */ + __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ +}; + +/* Set event mask command (direct 0x0613) */ +struct ice_aqc_set_event_mask { + u8 lport_num; + u8 reserved[7]; + __le16 event_mask; +#define ICE_AQ_LINK_EVENT_UPDOWN BIT(1) +#define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2) +#define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3) +#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4) +#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) +#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6) +#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) +#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) +#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) +#define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10) +#define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11) + u8 reserved1[6]; +}; + +/* Set MAC Loopback command (direct 0x0620) */ +struct ice_aqc_set_mac_lb { + u8 lb_mode; +#define ICE_AQ_MAC_LB_EN BIT(0) +#define ICE_AQ_MAC_LB_OSC_CLK BIT(1) + u8 reserved[15]; +}; + +struct ice_aqc_link_topo_addr { + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) + u8 node_type_ctx; +#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S) +#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 +#define ICE_AQC_LINK_TOPO_NODE_CTX_M \ + (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) +#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0 +#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1 +#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2 +#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3 +#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 +#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 + u8 index; + __le16 handle; +#define ICE_AQC_LINK_TOPO_HANDLE_S 0 +#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) +/* Used to decode the handle field */ +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 +#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 +/* In case of a Mezzanine type */ +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ + (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6 +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S) +/* In case of a LOM type */ +#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \ + (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) +}; + +/* Get Link Topology Handle (direct, 0x06E0) */ +struct ice_aqc_get_link_topo { + struct ice_aqc_link_topo_addr addr; + u8 node_part_num; + u8 rsvd[9]; +}; + +/* Set Port Identification LED (direct, 0x06E9) */ +struct ice_aqc_set_port_id_led { + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_PORT_ID_PORT_NUM_VALID BIT(0) + u8 ident_mode; +#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0) +#define ICE_AQC_PORT_IDENT_LED_ORIG 0 + u8 rsvd[13]; +}; + +/* Read/Write SFF EEPROM command (indirect 0x06EE) */ +struct ice_aqc_sff_eeprom { + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0) + __le16 i2c_bus_addr; +#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F +#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF +#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10) +#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0 +#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M +#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11 +#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S) +#define ICE_AQC_SFF_NO_PAGE_CHANGE 0 +#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1 +#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2 +#define ICE_AQC_SFF_IS_WRITE BIT(15) + __le16 i2c_mem_addr; + __le16 eeprom_page; +#define ICE_AQC_SFF_EEPROM_BANK_S 0 +#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S) +#define ICE_AQC_SFF_EEPROM_PAGE_S 8 +#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S) + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Write commands (indirect 0x0703) + * NVM Write Activate commands (direct 0x0707) + * NVM Shadow RAM Dump commands (direct 0x0707) + */ +struct ice_aqc_nvm { +#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF + __le16 offset_low; + u8 offset_high; /* For Write Activate offset_high is used as flags2 */ + u8 cmd_flags; +#define ICE_AQC_NVM_LAST_CMD BIT(0) +#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ +#define ICE_AQC_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */ +#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_ALL BIT(1) +#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ +#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4) +#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5) +#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6) +#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ +#define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3) +#define ICE_AQC_NVM_FLASH_ONLY BIT(7) + __le16 module_typeid; + __le16 length; +#define ICE_AQC_NVM_ERASE_LEN 0xFFFF + __le32 addr_high; + __le32 addr_low; +}; + +/* NVM Module_Type ID, needed offset and read_len for struct ice_aqc_nvm. */ +#define ICE_AQC_NVM_SECTOR_UNIT 4096 /* In Bytes */ +#define ICE_AQC_NVM_WORD_UNIT 2 /* In Bytes */ + +#define ICE_AQC_NVM_START_POINT 0 +#define ICE_AQC_NVM_EMP_SR_PTR_OFFSET 0x90 +#define ICE_AQC_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */ +#define ICE_AQC_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0) +#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_S 15 +#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_M BIT(15) +#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_SECTOR 1 + +#define ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET 0x46 +#define ICE_AQC_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */ +#define ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */ + +#define ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID 0x129 +#define ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */ +#define ICE_AQC_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0) +#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */ +#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */ + +/* The result of netlist NVM read comes in a TLV format. The actual data + * (netlist header) starts from word offset 1 (byte 2). The FW strips + * out the type field from the TLV header so all the netlist fields + * should adjust their offset value by 1 word (2 bytes) in order to map + * their correct location. + */ +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1 +#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */ +#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2 +#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */ +#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5 +#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */ + +/* netlist ID block field offsets (word offsets) */ +#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4 +#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5 +#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6 +#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7 +#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8 +#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9 +#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA +#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F + +/* Used for 0x0704 as well as for 0x0705 commands */ +struct ice_aqc_nvm_cfg { + u8 cmd_flags; +#define ICE_AQC_ANVM_MULTIPLE_ELEMS BIT(0) +#define ICE_AQC_ANVM_IMMEDIATE_FIELD BIT(1) +#define ICE_AQC_ANVM_NEW_CFG BIT(2) + u8 reserved; + __le16 count; + __le16 id; + u8 reserved1[2]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_nvm_cfg_data { + __le16 field_id; + __le16 field_options; + __le16 field_value; +}; + +/* NVM Checksum Command (direct, 0x0706) */ +struct ice_aqc_nvm_checksum { + u8 flags; +#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0) +#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1) + u8 rsvd; + __le16 checksum; /* Used only by response */ +#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA + u8 rsvd2[12]; +}; + +/* Get LLDP MIB (indirect 0x0A00) + * Note: This is also used by the LLDP MIB Change Event (0x0A01) + * as the format is the same. + */ +struct ice_aqc_lldp_get_mib { + u8 type; +#define ICE_AQ_LLDP_MIB_TYPE_S 0 +#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S) +#define ICE_AQ_LLDP_MIB_LOCAL 0 +#define ICE_AQ_LLDP_MIB_REMOTE 1 +#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2 +#define ICE_AQ_LLDP_BRID_TYPE_S 2 +#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S) +#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0 +#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1 +/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */ +#define ICE_AQ_LLDP_TX_S 0x4 +#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S) +#define ICE_AQ_LLDP_TX_ACTIVE 0 +#define ICE_AQ_LLDP_TX_SUSPENDED 1 +#define ICE_AQ_LLDP_TX_FLUSHED 3 +/* The following bytes are reserved for the Get LLDP MIB command (0x0A00) + * and in the LLDP MIB Change Event (0x0A01). They are valid for the + * Get LLDP MIB (0x0A00) response only. + */ + u8 reserved1; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Configure LLDP MIB Change Event (direct 0x0A01) */ +/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */ +struct ice_aqc_lldp_set_mib_change { + u8 command; +#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 + u8 reserved[15]; +}; + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct ice_aqc_lldp_add_delete_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Update LLDP TLV (indirect 0x0A03) */ +struct ice_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +/* Stop LLDP (direct 0x0A05) */ +struct ice_aqc_lldp_stop { + u8 command; +#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0) +#define ICE_AQ_LLDP_AGENT_STOP 0x0 +#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK +#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1) + u8 reserved[15]; +}; + +/* Start LLDP (direct 0x0A06) */ +struct ice_aqc_lldp_start { + u8 command; +#define ICE_AQ_LLDP_AGENT_START BIT(0) +#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1) + u8 reserved[15]; +}; + +/* Get CEE DCBX Oper Config (0x0A07) + * The command uses the generic descriptor struct and + * returns the struct below as an indirect response. + */ +struct ice_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; +#define ICE_AQC_CEE_APP_FCOE_S 0 +#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S) +#define ICE_AQC_CEE_APP_ISCSI_S 3 +#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S) +#define ICE_AQC_CEE_APP_FIP_S 8 +#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S) + __le32 tlv_status; +#define ICE_AQC_CEE_PG_STATUS_S 0 +#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S) +#define ICE_AQC_CEE_PFC_STATUS_S 3 +#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S) +#define ICE_AQC_CEE_FCOE_STATUS_S 8 +#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S) +#define ICE_AQC_CEE_ISCSI_STATUS_S 11 +#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S) +#define ICE_AQC_CEE_FIP_STATUS_S 16 +#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S) + u8 reserved[12]; +}; + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBX + */ +struct ice_aqc_lldp_set_local_mib { + u8 type; +#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0) +#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0 +#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1) +#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0 +#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_lldp_set_local_mib_resp { + u8 status; +#define SET_LOCAL_MIB_RESP_EVENT_M BIT(0) +#define SET_LOCAL_MIB_RESP_MIB_CHANGE_SILENT 0 +#define SET_LOCAL_MIB_RESP_MIB_CHANGE_EVENT SET_LOCAL_MIB_RESP_EVENT_M + u8 reserved[15]; +}; + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBX. + * The same structure is used for the response, with the command field + * being used as the status field. + */ +struct ice_aqc_lldp_stop_start_specific_agent { + u8 command; +#define ICE_AQC_START_STOP_AGENT_M BIT(0) +#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0 +#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M + u8 reserved[15]; +}; + +/* Get/Set RSS key (indirect 0x0B04/0x0B02) */ +struct ice_aqc_get_set_rss_key { +#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) +#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0 +#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 +#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC +#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \ + (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \ + ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE) + +/** + * struct ice_aqc_get_set_rss_keys - Get/Set RSS hash key command buffer + * @standard_rss_key: 40 most significant bytes of hash key + * @extended_hash_key: 12 least significant bytes of hash key + * + * Set/Get 40 byte hash key using standard_rss_key field, and set + * extended_hash_key field to zero. Set/Get 52 byte hash key using + * standard_rss_key field for 40 most significant bytes and the + * extended_hash_key field for the 12 least significant bytes of hash key. + */ +struct ice_aqc_get_set_rss_keys { + u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; + u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; +}; + +/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ +struct ice_aqc_get_set_rss_lut { +#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) + __le16 vsi_id; +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ + (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) + +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1 +#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2 + +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \ + (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) + +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048 +#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2 + +#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4 +#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \ + (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) + + __le16 flags; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +/* Clear FD Table Command (direct, 0x0B06) */ +struct ice_aqc_clear_fd_table { + u8 clear_type; +#define CL_FD_VM_VF_TYPE_VSI_IDX 1 +#define CL_FD_VM_VF_TYPE_PF_IDX 2 + u8 rsvd; + __le16 vsi_index; + u8 reserved[12]; +}; + +/* ACL - allocate (indirect 0x0C10) table */ +#define ICE_AQC_ACL_KEY_WIDTH 40 +#define ICE_AQC_ACL_KEY_WIDTH_BYTES 5 +#define ICE_AQC_ACL_TCAM_DEPTH 512 +#define ICE_ACL_ENTRY_ALLOC_UNIT 64 +#define ICE_AQC_MAX_CONCURRENT_ACL_TBL 15 +#define ICE_AQC_MAX_ACTION_MEMORIES 20 +#define ICE_AQC_MAX_ACTION_ENTRIES 512 +#define ICE_AQC_ACL_SLICES 16 +#define ICE_AQC_ALLOC_ID_LESS_THAN_4K 0x1000 +/* The ACL block supports up to 8 actions per a single output. */ +#define ICE_AQC_TBL_MAX_ACTION_PAIRS 4 + +#define ICE_AQC_MAX_TCAM_ALLOC_UNITS (ICE_AQC_ACL_TCAM_DEPTH / \ + ICE_ACL_ENTRY_ALLOC_UNIT) +#define ICE_AQC_ACL_ALLOC_UNITS (ICE_AQC_ACL_SLICES * \ + ICE_AQC_MAX_TCAM_ALLOC_UNITS) + +struct ice_aqc_acl_alloc_table { + __le16 table_width; + __le16 table_depth; + u8 act_pairs_per_entry; + /* For non-concurrent table allocation, this field needs + * to be set to zero(0) otherwise it shall specify the + * amount of concurrent tables whose AllocIDs are + * specified in buffer. Thus the newly allocated table + * is concurrent with table IDs specified in AllocIDs. + */ +#define ICE_AQC_ACL_ALLOC_TABLE_TYPE_NONCONCURR 0 + u8 table_type; + __le16 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +/* Allocate ACL table command buffer format */ +struct ice_aqc_acl_alloc_table_data { + /* Dependent table AllocIDs. Each word in this 15 word array specifies + * a dependent table AllocID according to the amount specified in the + * "table_type" field. All unused words shall be set to 0xFFFF + */ +#define ICE_AQC_CONCURR_ID_INVALID 0xffff + __le16 alloc_ids[ICE_AQC_MAX_CONCURRENT_ACL_TBL]; +}; + +/* ACL - deallocate (indirect 0x0C11) table + * ACL - allocate (indirect 0x0C12) action-pair + * ACL - deallocate (indirect 0x0C13) action-pair + */ + +/* Following structure is common and used in case of deallocation + * of ACL table and action-pair + */ +struct ice_aqc_acl_tbl_actpair { + /* Alloc ID of the table being released */ + __le16 alloc_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* This response structure is same in case of alloc/dealloc table, + * alloc/dealloc action-pair + */ +struct ice_aqc_acl_generic { + /* if alloc_id is below 0x1000 then alllocation failed due to + * unavailable resources, else this is set by FW to identify + * table allocation + */ + __le16 alloc_id; + + union { + /* to be used only in case of alloc/dealloc table */ + struct { + /* Index of the first TCAM block, otherwise set to 0xFF + * for a failed allocation + */ + u8 first_tcam; + /* Index of the last TCAM block. This index shall be + * set to the value of first_tcam for single TCAM block + * allocation, otherwise set to 0xFF for a failed + * allocation + */ + u8 last_tcam; + } table; + /* reserved in case of alloc/dealloc action-pair */ + struct { + __le16 reserved; + } act_pair; + } ops; + + /* index of first entry (in both TCAM and action memories), + * otherwise set to 0xFF for a failed allocation + */ + __le16 first_entry; + /* index of last entry (in both TCAM and action memories), + * otherwise set to 0xFF for a failed allocation + */ + __le16 last_entry; + + /* Each act_mem element specifies the order of the memory + * otherwise 0xFF + */ + u8 act_mem[ICE_AQC_MAX_ACTION_MEMORIES]; +}; + +/* ACL - allocate (indirect 0x0C14) scenario. This command doesn't have separate + * response buffer since original command buffer gets updated with + * 'scen_id' in case of success + */ +struct ice_aqc_acl_alloc_scen { + union { + struct { + u8 reserved[8]; + } cmd; + struct { + __le16 scen_id; + u8 reserved[6]; + } resp; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* ACL - de-allocate (direct 0x0C15) scenario. This command doesn't need + * separate response buffer since nothing to be returned as a response + * except status. + */ +struct ice_aqc_acl_dealloc_scen { + __le16 scen_id; + u8 reserved[14]; +}; + +/* ACL - update (direct 0x0C1B) scenario */ +/* ACL - query (direct 0x0C23) scenario */ +struct ice_aqc_acl_update_query_scen { + __le16 scen_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Input buffer format in case allocate/update ACL scenario and same format + * is used for response buffer in case of query ACL scenario. + * NOTE: de-allocate ACL scenario is direct command and doesn't require + * "buffer", hence no buffer format. + */ +struct ice_aqc_acl_scen { + struct { + /* Byte [x] selection for the TCAM key. This value must be set + * set to 0x0 for unusued TCAM. + * Only Bit 6..0 is used in each byte and MSB is reserved + */ +#define ICE_AQC_ACL_ALLOC_SCE_SELECT_M 0x7F +#define ICE_AQC_ACL_BYTE_SEL_BASE 0x20 +#define ICE_AQC_ACL_BYTE_SEL_BASE_PID 0x3E +#define ICE_AQC_ACL_BYTE_SEL_BASE_PKT_DIR ICE_AQC_ACL_BYTE_SEL_BASE +#define ICE_AQC_ACL_BYTE_SEL_BASE_RNG_CHK 0x3F + u8 tcam_select[5]; + /* TCAM Block entry masking. This value should be set to 0x0 for + * unused TCAM + */ + u8 chnk_msk; + /* Bit 0 : masks TCAM entries 0-63 + * Bit 1 : masks TCAM entries 64-127 + * Bit 2 to 7 : follow the pattern of bit 0 and 1 + */ +#define ICE_AQC_ACL_ALLOC_SCE_START_CMP BIT(0) +#define ICE_AQC_ACL_ALLOC_SCE_START_SET BIT(1) + u8 start_cmp_set; + + } tcam_cfg[ICE_AQC_ACL_SLICES]; + + /* Each byte, Bit 6..0: Action memory association to a TCAM block, + * otherwise it shall be set to 0x0 for disabled memory action. + * Bit 7 : Action memory enable for this scenario + */ +#define ICE_AQC_ACL_SCE_ACT_MEM_TCAM_ASSOC_M 0x7F +#define ICE_AQC_ACL_SCE_ACT_MEM_EN BIT(7) + u8 act_mem_cfg[ICE_AQC_MAX_ACTION_MEMORIES]; +}; + +/* ACL - allocate (indirect 0x0C16) counters */ +struct ice_aqc_acl_alloc_counters { + /* Amount of contiguous counters requested. Min value is 1 and + * max value is 255 + */ +#define ICE_AQC_ACL_ALLOC_CNT_MIN_AMT 0x1 +#define ICE_AQC_ACL_ALLOC_CNT_MAX_AMT 0xFF + u8 counter_amount; + + /* Counter type: 'single counter' which can be configured to count + * either bytes or packets + */ +#define ICE_AQC_ACL_CNT_TYPE_SINGLE 0x0 + + /* Counter type: 'counter pair' which counts number of bytes and number + * of packets. + */ +#define ICE_AQC_ACL_CNT_TYPE_DUAL 0x1 + /* requested counter type, single/dual */ + u8 counters_type; + + /* counter bank allocation shall be 0-3 for 'byte or packet counter' */ +#define ICE_AQC_ACL_MAX_CNT_SINGLE 0x3 +/* counter bank allocation shall be 0-1 for 'byte and packet counter dual' */ +#define ICE_AQC_ACL_MAX_CNT_DUAL 0x1 + /* requested counter bank allocation */ + u8 bank_alloc; + + u8 reserved; + + union { + /* Applicable only in case of command */ + struct { + u8 reserved[12]; + } cmd; + /* Applicable only in case of response */ +#define ICE_AQC_ACL_ALLOC_CNT_INVAL 0xFFFF + struct { + /* Index of first allocated counter. 0xFFFF in case + * of unsuccessful allocation + */ + __le16 first_counter; + /* Index of last allocated counter. 0xFFFF in case + * of unsuccessful allocation + */ + __le16 last_counter; + u8 rsvd[8]; + } resp; + } ops; +}; + +/* ACL - de-allocate (direct 0x0C17) counters */ +struct ice_aqc_acl_dealloc_counters { + /* first counter being released */ + __le16 first_counter; + /* last counter being released */ + __le16 last_counter; + /* requested counter type, single/dual */ + u8 counters_type; + /* requested counter bank allocation */ + u8 bank_alloc; + u8 reserved[10]; +}; + +/* ACL - de-allocate (direct 0x0C1A) resources. Used by SW to release all the + * resources allocated for it using a single command + */ +struct ice_aqc_acl_dealloc_res { + u8 reserved[16]; +}; + +/* ACL - program actionpair (indirect 0x0C1C) */ +/* ACL - query actionpair (indirect 0x0C25) */ +struct ice_aqc_acl_actpair { + /* action mem index to program/update */ + u8 act_mem_index; + u8 reserved; + /* The entry index in action memory to be programmed/updated */ + __le16 act_entry_index; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Input buffer format for program/query action-pair admin command */ +struct ice_acl_act_entry { + /* Action priority, values must be between 0..7 */ +#define ICE_AQC_ACT_PRIO_VALID_MAX 7 +#define ICE_AQC_ACT_PRIO_MSK MAKEMASK(0xff, 0) + u8 prio; + /* Action meta-data identifier. This field should be set to 0x0 + * for a NOP action + */ +#define ICE_AQC_ACT_MDID_S 8 +#define ICE_AQC_ACT_MDID_MSK MAKEMASK(0xff00, ICE_AQC_ACT_MDID_S) + u8 mdid; + /* Action value */ +#define ICE_AQC_ACT_VALUE_S 16 +#define ICE_AQC_ACT_VALUE_MSK MAKEMASK(0xffff0000, 16) + __le16 value; +}; + +#define ICE_ACL_NUM_ACT_PER_ACT_PAIR 2 +struct ice_aqc_actpair { + struct ice_acl_act_entry act[ICE_ACL_NUM_ACT_PER_ACT_PAIR]; +}; + +/* Generic format used to describe either input or response buffer + * for admin commands related to ACL profile + */ +struct ice_aqc_acl_prof_generic_frmt { + /* The first byte of the byte selection base is reserved to keep the + * first byte of the field vector where the packet direction info is + * available. Thus we should start at index 1 of the field vector to + * map its entries to the byte selection base. + */ +#define ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX 1 + /* In each byte: + * Bit 0..5 = Byte selection for the byte selection base from the + * extracted fields (expressed as byte offset in extracted fields). + * Applicable values are 0..63 + * Bit 6..7 = Reserved + */ +#define ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS 30 + u8 byte_selection[ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS]; + /* In each byte: + * Bit 0..4 = Word selection for the word selection base from the + * extracted fields (expressed as word offset in extracted fields). + * Applicable values are 0..31 + * Bit 5..7 = Reserved + */ +#define ICE_AQC_ACL_PROF_WORD_SEL_ELEMS 32 + u8 word_selection[ICE_AQC_ACL_PROF_WORD_SEL_ELEMS]; + /* In each byte: + * Bit 0..3 = Double word selection for the double-word selection base + * from the extracted fields (expressed as double-word offset in + * extracted fields). + * Applicable values are 0..15 + * Bit 4..7 = Reserved + */ +#define ICE_AQC_ACL_PROF_DWORD_SEL_ELEMS 15 + u8 dword_selection[ICE_AQC_ACL_PROF_DWORD_SEL_ELEMS]; + /* Scenario numbers for individual Physical Function's */ +#define ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS 8 + u8 pf_scenario_num[ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS]; +}; + +/* ACL - program ACL profile extraction (indirect 0x0C1D) */ +/* ACL - program ACL profile ranges (indirect 0x0C1E) */ +/* ACL - query ACL profile (indirect 0x0C21) */ +/* ACL - query ACL profile ranges (indirect 0x0C22) */ +struct ice_aqc_acl_profile { + u8 profile_id; /* Programmed/Updated profile ID */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Input buffer format for program profile extraction admin command and + * response buffer format for query profile admin command is as defined + * in struct ice_aqc_acl_prof_generic_frmt + */ + +/* Input buffer format for program profile ranges and query profile ranges + * admin commands. Same format is used for response buffer in case of query + * profile ranges command + */ +struct ice_acl_rng_data { + /* The range checker output shall be sent when the value + * related to this range checker is lower than low boundary + */ + __be16 low_boundary; + /* The range checker output shall be sent when the value + * related to this range checker is higher than high boundary + */ + __be16 high_boundary; + /* A value of '0' in bit shall clear the relevant bit input + * to the range checker + */ + __be16 mask; +}; + +struct ice_aqc_acl_profile_ranges { +#define ICE_AQC_ACL_PROF_RANGES_NUM_CFG 8 + struct ice_acl_rng_data checker_cfg[ICE_AQC_ACL_PROF_RANGES_NUM_CFG]; +}; + +/* ACL - program ACL entry (indirect 0x0C20) */ +/* ACL - query ACL entry (indirect 0x0C24) */ +struct ice_aqc_acl_entry { + u8 tcam_index; /* Updated TCAM block index */ + u8 reserved; + __le16 entry_index; /* Updated entry index */ + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Input buffer format in case of program ACL entry and response buffer format + * in case of query ACL entry + */ +struct ice_aqc_acl_data { + /* Entry key and entry key invert are 40 bits wide. + * Byte 0..4 : entry key and Byte 5..7 are reserved + * Byte 8..12: entry key invert and Byte 13..15 are reserved + */ + struct { + u8 val[5]; + u8 reserved[3]; + } entry_key, entry_key_invert; +}; + +/* ACL - query ACL counter (direct 0x0C27) */ +struct ice_aqc_acl_query_counter { + /* Queried counter index */ + __le16 counter_index; + /* Queried counter bank */ + u8 counter_bank; + union { + struct { + u8 reserved[13]; + } cmd; + struct { + /* Holds counter value/packet counter value */ + u8 val[5]; + u8 reserved[8]; + } resp; + } ops; +}; + +/* Add Tx LAN Queues (indirect 0x0C30) */ +struct ice_aqc_add_txqs { + u8 num_qgrps; + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* This is the descriptor of each queue entry for the Add Tx LAN Queues + * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. + */ +struct ice_aqc_add_txqs_perq { + __le16 txq_id; + u8 rsvd[2]; + __le32 q_teid; + u8 txq_ctx[22]; + u8 rsvd2[2]; + struct ice_aqc_txsched_elem info; +}; + +/* The format of the command buffer for Add Tx LAN Queues (0x0C30) + * is an array of the following structs. Please note that the length of + * each struct ice_aqc_add_tx_qgrp is variable due + * to the variable number of queues in each group! + */ +struct ice_aqc_add_tx_qgrp { + __le32 parent_teid; + u8 num_txqs; + u8 rsvd[3]; + struct ice_aqc_add_txqs_perq txqs[1]; +}; + +/* Disable Tx LAN Queues (indirect 0x0C31) */ +struct ice_aqc_dis_txqs { + u8 cmd_type; +#define ICE_AQC_Q_DIS_CMD_S 0 +#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S) +#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2) +#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3) + u8 num_entries; + __le16 vmvf_and_timeout; +#define ICE_AQC_Q_DIS_VMVF_NUM_S 0 +#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S) +#define ICE_AQC_Q_DIS_TIMEOUT_S 10 +#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S) + __le32 blocked_cgds; + __le32 addr_high; + __le32 addr_low; +}; + +/* The buffer for Disable Tx LAN Queues (indirect 0x0C31) + * contains the following structures, arrayed one after the + * other. + * Note: Since the q_id is 16 bits wide, if the + * number of queues is even, then 2 bytes of alignment MUST be + * added before the start of the next group, to allow correct + * alignment of the parent_teid field. + */ +struct ice_aqc_dis_txq_item { + __le32 parent_teid; + u8 num_qs; + u8 rsvd; + /* The length of the q_id array varies according to num_qs */ + __le16 q_id[1]; + /* This only applies from F8 onward */ +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15 +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \ + (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) +#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \ + (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) +}; + +struct ice_aqc_dis_txq { + struct ice_aqc_dis_txq_item qgrps[1]; +}; + +/* Tx LAN Queues Cleanup Event (0x0C31) */ +struct ice_aqc_txqs_cleanup { + __le16 caller_opc; + __le16 cmd_tag; + u8 reserved[12]; +}; + +/* Move / Reconfigure Tx Queues (indirect 0x0C32) */ +struct ice_aqc_move_txqs { + u8 cmd_type; +#define ICE_AQC_Q_CMD_TYPE_S 0 +#define ICE_AQC_Q_CMD_TYPE_M (0x3 << ICE_AQC_Q_CMD_TYPE_S) +#define ICE_AQC_Q_CMD_TYPE_MOVE 1 +#define ICE_AQC_Q_CMD_TYPE_TC_CHANGE 2 +#define ICE_AQC_Q_CMD_TYPE_MOVE_AND_TC 3 +#define ICE_AQC_Q_CMD_SUBSEQ_CALL BIT(2) +#define ICE_AQC_Q_CMD_FLUSH_PIPE BIT(3) + u8 num_qs; + u8 rsvd; + u8 timeout; +#define ICE_AQC_Q_CMD_TIMEOUT_S 2 +#define ICE_AQC_Q_CMD_TIMEOUT_M (0x3F << ICE_AQC_Q_CMD_TIMEOUT_S) + __le32 blocked_cgds; + __le32 addr_high; + __le32 addr_low; +}; + +/* Per-queue data buffer for the Move Tx LAN Queues command/response */ +struct ice_aqc_move_txqs_elem { + __le16 txq_id; + u8 q_cgd; + u8 rsvd; + __le32 q_teid; +}; + +/* Indirect data buffer for the Move Tx LAN Queues command/response */ +struct ice_aqc_move_txqs_data { + __le32 src_teid; + __le32 dest_teid; + struct ice_aqc_move_txqs_elem txqs[1]; +}; + +/* Download Package (indirect 0x0C40) */ +/* Also used for Update Package (indirect 0x0C42) */ +struct ice_aqc_download_pkg { + u8 flags; +#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_download_pkg_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Package Info List (indirect 0x0C43) */ +struct ice_aqc_get_pkg_info_list { + __le32 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Version format for packages */ +struct ice_pkg_ver { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define ICE_PKG_NAME_SIZE 32 +#define ICE_SEG_NAME_SIZE 28 + +struct ice_aqc_get_pkg_info { + struct ice_pkg_ver ver; + char name[ICE_SEG_NAME_SIZE]; + __le32 track_id; + u8 is_in_nvm; + u8 is_active; + u8 is_active_at_boot; + u8 is_modified; +}; + +/* Get Package Info List response buffer format (0x0C43) */ +struct ice_aqc_get_pkg_info_resp { + __le32 count; + struct ice_aqc_get_pkg_info pkg_info[1]; +}; + +/* Driver Shared Parameters (direct, 0x0C90) */ +struct ice_aqc_driver_shared_params { + u8 set_or_get_op; +#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0) +#define ICE_AQC_DRIVER_PARAM_SET 0 +#define ICE_AQC_DRIVER_PARAM_GET 1 + u8 param_indx; +#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15 + u8 rsvd[2]; + __le32 param_val; + __le32 addr_high; + __le32 addr_low; +}; + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct ice_aqc_event_lan_overflow { + __le32 prtdcb_ruptq; + __le32 qtx_ctl; + u8 reserved[8]; +}; + +/** + * struct ice_aq_desc - Admin Queue (AQ) descriptor + * @flags: ICE_AQ_FLAG_* flags + * @opcode: AQ command opcode + * @datalen: length in bytes of indirect/external data buffer + * @retval: return value from firmware + * @cookie_h: opaque data high-half + * @cookie_l: opaque data low-half + * @params: command-specific parameters + * + * Descriptor format for commands the driver posts on the Admin Transmit Queue + * (ATQ). The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate + * result of the command are written to the Admin Receive Queue (ARQ) using + * the same descriptor format. Descriptors are in little-endian notation with + * 32-bit words. + */ +struct ice_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + u8 raw[16]; + struct ice_aqc_generic generic; + struct ice_aqc_get_ver get_ver; + struct ice_aqc_driver_ver driver_ver; + struct ice_aqc_q_shutdown q_shutdown; + struct ice_aqc_req_res res_owner; + struct ice_aqc_manage_mac_read mac_read; + struct ice_aqc_manage_mac_write mac_write; + struct ice_aqc_clear_pxe clear_pxe; + struct ice_aqc_config_no_drop_policy no_drop; + struct ice_aqc_add_update_mir_rule add_update_rule; + struct ice_aqc_delete_mir_rule del_rule; + struct ice_aqc_list_caps get_cap; + struct ice_aqc_get_phy_caps get_phy; + struct ice_aqc_set_phy_cfg set_phy; + struct ice_aqc_restart_an restart_an; + struct ice_aqc_sff_eeprom read_write_sff_param; + struct ice_aqc_set_port_id_led set_port_id_led; + struct ice_aqc_get_sw_cfg get_sw_conf; + struct ice_aqc_sw_rules sw_rules; + struct ice_aqc_storm_cfg storm_conf; + struct ice_aqc_add_get_recipe add_get_recipe; + struct ice_aqc_recipe_to_profile recipe_to_profile; + struct ice_aqc_get_topo get_topo; + struct ice_aqc_sched_elem_cmd sched_elem_cmd; + struct ice_aqc_query_txsched_res query_sched_res; + struct ice_aqc_query_node_to_root query_node_to_root; + struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd; + struct ice_aqc_query_port_ets port_ets; + struct ice_aqc_rl_profile rl_profile; + struct ice_aqc_nvm nvm; + struct ice_aqc_nvm_cfg nvm_cfg; + struct ice_aqc_nvm_checksum nvm_checksum; + struct ice_aqc_pfc_ignore pfc_ignore; + struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; + struct ice_aqc_set_dcb_params set_dcb_params; + struct ice_aqc_lldp_get_mib lldp_get_mib; + struct ice_aqc_lldp_set_mib_change lldp_set_event; + struct ice_aqc_lldp_add_delete_tlv lldp_add_delete_tlv; + struct ice_aqc_lldp_update_tlv lldp_update_tlv; + struct ice_aqc_lldp_stop lldp_stop; + struct ice_aqc_lldp_start lldp_start; + struct ice_aqc_lldp_set_local_mib lldp_set_mib; + struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; + struct ice_aqc_get_set_rss_lut get_set_rss_lut; + struct ice_aqc_get_set_rss_key get_set_rss_key; + struct ice_aqc_clear_fd_table clear_fd_table; + struct ice_aqc_acl_alloc_table alloc_table; + struct ice_aqc_acl_tbl_actpair tbl_actpair; + struct ice_aqc_acl_alloc_scen alloc_scen; + struct ice_aqc_acl_dealloc_scen dealloc_scen; + struct ice_aqc_acl_update_query_scen update_query_scen; + struct ice_aqc_acl_alloc_counters alloc_counters; + struct ice_aqc_acl_dealloc_counters dealloc_counters; + struct ice_aqc_acl_dealloc_res dealloc_res; + struct ice_aqc_acl_entry program_query_entry; + struct ice_aqc_acl_actpair program_query_actpair; + struct ice_aqc_acl_profile profile; + struct ice_aqc_acl_query_counter query_counter; + struct ice_aqc_add_txqs add_txqs; + struct ice_aqc_dis_txqs dis_txqs; + struct ice_aqc_move_txqs move_txqs; + struct ice_aqc_txqs_cleanup txqs_cleanup; + struct ice_aqc_add_get_update_free_vsi vsi_cmd; + struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; + struct ice_aqc_get_vsi_resp get_vsi_resp; + struct ice_aqc_download_pkg download_pkg; + struct ice_aqc_get_pkg_info_list get_pkg_info_list; + struct ice_aqc_driver_shared_params drv_shared_params; + struct ice_aqc_set_mac_lb set_mac_lb; + struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; + struct ice_aqc_get_res_alloc get_res; + struct ice_aqc_get_allocd_res_desc get_res_desc; + struct ice_aqc_set_mac_cfg set_mac_cfg; + struct ice_aqc_set_event_mask set_event_mask; + struct ice_aqc_get_link_status get_link_status; + struct ice_aqc_event_lan_overflow lan_overflow; + struct ice_aqc_get_link_topo get_link_topo; + } params; +}; + +/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ +#define ICE_AQ_LG_BUF 512 + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets */ +#define ICE_AQ_FLAG_DD_S 0 +#define ICE_AQ_FLAG_CMP_S 1 +#define ICE_AQ_FLAG_ERR_S 2 +#define ICE_AQ_FLAG_VFE_S 3 +#define ICE_AQ_FLAG_LB_S 9 +#define ICE_AQ_FLAG_RD_S 10 +#define ICE_AQ_FLAG_VFC_S 11 +#define ICE_AQ_FLAG_BUF_S 12 +#define ICE_AQ_FLAG_SI_S 13 +#define ICE_AQ_FLAG_EI_S 14 +#define ICE_AQ_FLAG_FE_S 15 + +#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ +#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ +#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ +#define ICE_AQ_FLAG_VFE BIT(ICE_AQ_FLAG_VFE_S) /* 0x8 */ +#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ +#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ +#define ICE_AQ_FLAG_VFC BIT(ICE_AQ_FLAG_VFC_S) /* 0x800 */ +#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */ +#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */ +#define ICE_AQ_FLAG_EI BIT(ICE_AQ_FLAG_EI_S) /* 0x4000 */ +#define ICE_AQ_FLAG_FE BIT(ICE_AQ_FLAG_FE_S) /* 0x8000 */ + +/* error codes */ +enum ice_aq_err { + ICE_AQ_RC_OK = 0, /* Success */ + ICE_AQ_RC_EPERM = 1, /* Operation not permitted */ + ICE_AQ_RC_ENOENT = 2, /* No such element */ + ICE_AQ_RC_ESRCH = 3, /* Bad opcode */ + ICE_AQ_RC_EINTR = 4, /* Operation interrupted */ + ICE_AQ_RC_EIO = 5, /* I/O error */ + ICE_AQ_RC_ENXIO = 6, /* No such resource */ + ICE_AQ_RC_E2BIG = 7, /* Arg too long */ + ICE_AQ_RC_EAGAIN = 8, /* Try again */ + ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ + ICE_AQ_RC_EACCES = 10, /* Permission denied */ + ICE_AQ_RC_EFAULT = 11, /* Bad address */ + ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ + ICE_AQ_RC_EEXIST = 13, /* Object already exists */ + ICE_AQ_RC_EINVAL = 14, /* Invalid argument */ + ICE_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ + ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ + ICE_AQ_RC_ERANGE = 18, /* Parameter out of range */ + ICE_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + ICE_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + ICE_AQ_RC_EFBIG = 22, /* File too big */ + ICE_AQ_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */ + ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ + ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ + ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ + ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ + ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ + ICE_AQ_RC_EACCES_BMCU = 29, /* BMC Update in progress */ +}; + +/* Admin Queue command opcodes */ +enum ice_adminq_opc { + /* AQ commands */ + ice_aqc_opc_get_ver = 0x0001, + ice_aqc_opc_driver_ver = 0x0002, + ice_aqc_opc_q_shutdown = 0x0003, + ice_aqc_opc_get_exp_err = 0x0005, + + /* resource ownership */ + ice_aqc_opc_req_res = 0x0008, + ice_aqc_opc_release_res = 0x0009, + + /* device/function capabilities */ + ice_aqc_opc_list_func_caps = 0x000A, + ice_aqc_opc_list_dev_caps = 0x000B, + + /* manage MAC address */ + ice_aqc_opc_manage_mac_read = 0x0107, + ice_aqc_opc_manage_mac_write = 0x0108, + + /* PXE */ + ice_aqc_opc_clear_pxe_mode = 0x0110, + + ice_aqc_opc_config_no_drop_policy = 0x0112, + + /* internal switch commands */ + ice_aqc_opc_get_sw_cfg = 0x0200, + + /* Alloc/Free/Get Resources */ + ice_aqc_opc_get_res_alloc = 0x0204, + ice_aqc_opc_alloc_res = 0x0208, + ice_aqc_opc_free_res = 0x0209, + ice_aqc_opc_get_allocd_res_desc = 0x020A, + + /* VSI commands */ + ice_aqc_opc_add_vsi = 0x0210, + ice_aqc_opc_update_vsi = 0x0211, + ice_aqc_opc_get_vsi_params = 0x0212, + ice_aqc_opc_free_vsi = 0x0213, + + /* Mirroring rules - add/update, delete */ + ice_aqc_opc_add_update_mir_rule = 0x0260, + ice_aqc_opc_del_mir_rule = 0x0261, + + /* storm configuration */ + ice_aqc_opc_set_storm_cfg = 0x0280, + ice_aqc_opc_get_storm_cfg = 0x0281, + + /* recipe commands */ + ice_aqc_opc_add_recipe = 0x0290, + ice_aqc_opc_recipe_to_profile = 0x0291, + ice_aqc_opc_get_recipe = 0x0292, + ice_aqc_opc_get_recipe_to_profile = 0x0293, + + /* switch rules population commands */ + ice_aqc_opc_add_sw_rules = 0x02A0, + ice_aqc_opc_update_sw_rules = 0x02A1, + ice_aqc_opc_remove_sw_rules = 0x02A2, + ice_aqc_opc_get_sw_rules = 0x02A3, + ice_aqc_opc_clear_pf_cfg = 0x02A4, + + /* DCB commands */ + ice_aqc_opc_pfc_ignore = 0x0301, + ice_aqc_opc_query_pfc_mode = 0x0302, + ice_aqc_opc_set_pfc_mode = 0x0303, + ice_aqc_opc_set_dcb_params = 0x0306, + + /* transmit scheduler commands */ + ice_aqc_opc_get_dflt_topo = 0x0400, + ice_aqc_opc_add_sched_elems = 0x0401, + ice_aqc_opc_cfg_sched_elems = 0x0403, + ice_aqc_opc_get_sched_elems = 0x0404, + ice_aqc_opc_move_sched_elems = 0x0408, + ice_aqc_opc_suspend_sched_elems = 0x0409, + ice_aqc_opc_resume_sched_elems = 0x040A, + ice_aqc_opc_query_port_ets = 0x040E, + ice_aqc_opc_delete_sched_elems = 0x040F, + ice_aqc_opc_add_rl_profiles = 0x0410, + ice_aqc_opc_query_rl_profiles = 0x0411, + ice_aqc_opc_query_sched_res = 0x0412, + ice_aqc_opc_query_node_to_root = 0x0413, + ice_aqc_opc_cfg_l2_node_cgd = 0x0414, + ice_aqc_opc_remove_rl_profiles = 0x0415, + + /* PHY commands */ + ice_aqc_opc_get_phy_caps = 0x0600, + ice_aqc_opc_set_phy_cfg = 0x0601, + ice_aqc_opc_set_mac_cfg = 0x0603, + ice_aqc_opc_restart_an = 0x0605, + ice_aqc_opc_get_link_status = 0x0607, + ice_aqc_opc_set_event_mask = 0x0613, + ice_aqc_opc_set_mac_lb = 0x0620, + ice_aqc_opc_get_link_topo = 0x06E0, + ice_aqc_opc_set_port_id_led = 0x06E9, + ice_aqc_opc_get_port_options = 0x06EA, + ice_aqc_opc_set_port_option = 0x06EB, + ice_aqc_opc_set_gpio = 0x06EC, + ice_aqc_opc_get_gpio = 0x06ED, + ice_aqc_opc_sff_eeprom = 0x06EE, + + /* NVM commands */ + ice_aqc_opc_nvm_read = 0x0701, + ice_aqc_opc_nvm_erase = 0x0702, + ice_aqc_opc_nvm_write = 0x0703, + ice_aqc_opc_nvm_cfg_read = 0x0704, + ice_aqc_opc_nvm_cfg_write = 0x0705, + ice_aqc_opc_nvm_checksum = 0x0706, + ice_aqc_opc_nvm_write_activate = 0x0707, + ice_aqc_opc_nvm_sr_dump = 0x0707, + ice_aqc_opc_nvm_save_factory_settings = 0x0708, + ice_aqc_opc_nvm_update_empr = 0x0709, + + /* LLDP commands */ + ice_aqc_opc_lldp_get_mib = 0x0A00, + ice_aqc_opc_lldp_set_mib_change = 0x0A01, + ice_aqc_opc_lldp_add_tlv = 0x0A02, + ice_aqc_opc_lldp_update_tlv = 0x0A03, + ice_aqc_opc_lldp_delete_tlv = 0x0A04, + ice_aqc_opc_lldp_stop = 0x0A05, + ice_aqc_opc_lldp_start = 0x0A06, + ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, + ice_aqc_opc_lldp_set_local_mib = 0x0A08, + ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, + + /* RSS commands */ + ice_aqc_opc_set_rss_key = 0x0B02, + ice_aqc_opc_set_rss_lut = 0x0B03, + ice_aqc_opc_get_rss_key = 0x0B04, + ice_aqc_opc_get_rss_lut = 0x0B05, + ice_aqc_opc_clear_fd_table = 0x0B06, + /* ACL commands */ + ice_aqc_opc_alloc_acl_tbl = 0x0C10, + ice_aqc_opc_dealloc_acl_tbl = 0x0C11, + ice_aqc_opc_alloc_acl_actpair = 0x0C12, + ice_aqc_opc_dealloc_acl_actpair = 0x0C13, + ice_aqc_opc_alloc_acl_scen = 0x0C14, + ice_aqc_opc_dealloc_acl_scen = 0x0C15, + ice_aqc_opc_alloc_acl_counters = 0x0C16, + ice_aqc_opc_dealloc_acl_counters = 0x0C17, + ice_aqc_opc_dealloc_acl_res = 0x0C1A, + ice_aqc_opc_update_acl_scen = 0x0C1B, + ice_aqc_opc_program_acl_actpair = 0x0C1C, + ice_aqc_opc_program_acl_prof_extraction = 0x0C1D, + ice_aqc_opc_program_acl_prof_ranges = 0x0C1E, + ice_aqc_opc_program_acl_entry = 0x0C20, + ice_aqc_opc_query_acl_prof = 0x0C21, + ice_aqc_opc_query_acl_prof_ranges = 0x0C22, + ice_aqc_opc_query_acl_scen = 0x0C23, + ice_aqc_opc_query_acl_entry = 0x0C24, + ice_aqc_opc_query_acl_actpair = 0x0C25, + ice_aqc_opc_query_acl_counter = 0x0C27, + + /* Tx queue handling commands/events */ + ice_aqc_opc_add_txqs = 0x0C30, + ice_aqc_opc_dis_txqs = 0x0C31, + ice_aqc_opc_txqs_cleanup = 0x0C31, + ice_aqc_opc_move_recfg_txqs = 0x0C32, + + /* package commands */ + ice_aqc_opc_download_pkg = 0x0C40, + ice_aqc_opc_upload_section = 0x0C41, + ice_aqc_opc_update_pkg = 0x0C42, + ice_aqc_opc_get_pkg_info_list = 0x0C43, + + ice_aqc_opc_driver_shared_params = 0x0C90, + + /* Standalone Commands/Events */ + ice_aqc_opc_event_lan_overflow = 0x1001, +}; + +#endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_alloc.h b/src/spdk/dpdk/drivers/net/ice/base/ice_alloc.h new file mode 100644 index 000000000..cfe919940 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_alloc.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_ALLOC_H_ +#define _ICE_ALLOC_H_ + +/* Memory types */ +enum ice_memset_type { + ICE_NONDMA_MEM = 0, + ICE_DMA_MEM +}; + +/* Memcpy types */ +enum ice_memcpy_type { + ICE_NONDMA_TO_NONDMA = 0, + ICE_NONDMA_TO_DMA, + ICE_DMA_TO_DMA, + ICE_DMA_TO_NONDMA +}; + +#endif /* _ICE_ALLOC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_bitops.h b/src/spdk/dpdk/drivers/net/ice/base/ice_bitops.h new file mode 100644 index 000000000..87f47b238 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_bitops.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_BITOPS_H_ +#define _ICE_BITOPS_H_ + +/* Define the size of the bitmap chunk */ +typedef u32 ice_bitmap_t; + +/* Number of bits per bitmap chunk */ +#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t)) +/* Determine which chunk a bit belongs in */ +#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK) +/* How many chunks are required to store this many bits */ +#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK) +/* Which bit inside a chunk this bit corresponds to */ +#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK) +/* How many bits are valid in the last chunk, assumes nr > 0 */ +#define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1) +/* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */ +#define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \ + (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr))) + +#define ice_declare_bitmap(A, sz) \ + ice_bitmap_t A[BITS_TO_CHUNKS(sz)] + +static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap) +{ + return !!(*bitmap & BIT(nr)); +} + +/* + * If atomic version of the bitops are required, each specific OS + * implementation will need to implement OS/platform specific atomic + * version of the functions below: + * + * ice_clear_bit_internal + * ice_set_bit_internal + * ice_test_and_clear_bit_internal + * ice_test_and_set_bit_internal + * + * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic + * implementation. + */ +static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap) +{ + *bitmap &= ~BIT(nr); +} + +static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) +{ + *bitmap |= BIT(nr); +} + +static inline bool ice_test_and_clear_bit_internal(u16 nr, + ice_bitmap_t *bitmap) +{ + if (ice_is_bit_set_internal(nr, bitmap)) { + ice_clear_bit_internal(nr, bitmap); + return true; + } + return false; +} + +static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) +{ + if (ice_is_bit_set_internal(nr, bitmap)) + return true; + + ice_set_bit_internal(nr, bitmap); + return false; +} + +/** + * ice_is_bit_set - Check state of a bit in a bitmap + * @bitmap: the bitmap to check + * @nr: the bit to check + * + * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr + * is less than the size of the bitmap. + */ +static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr) +{ + return ice_is_bit_set_internal(BIT_IN_CHUNK(nr), + &bitmap[BIT_CHUNK(nr)]); +} + +/** + * ice_clear_bit - Clear a bit in a bitmap + * @bitmap: the bitmap to change + * @nr: the bit to change + * + * Clears the bit nr in bitmap. Assumes that nr is less than the size of the + * bitmap. + */ +static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap) +{ + ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); +} + +/** + * ice_set_bit - Set a bit in a bitmap + * @bitmap: the bitmap to change + * @nr: the bit to change + * + * Sets the bit nr in bitmap. Assumes that nr is less than the size of the + * bitmap. + */ +static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap) +{ + ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); +} + +/** + * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value + * @nr: the bit to change + * @bitmap: the bitmap to change + * + * Check and clear the bit nr in bitmap. Assumes that nr is less than the size + * of the bitmap. + */ +static inline bool +ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap) +{ + return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr), + &bitmap[BIT_CHUNK(nr)]); +} + +/** + * ice_test_and_set_bit - Atomically set a bit and return the old bit value + * @nr: the bit to change + * @bitmap: the bitmap to change + * + * Check and set the bit nr in bitmap. Assumes that nr is less than the size of + * the bitmap. + */ +static inline bool +ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap) +{ + return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr), + &bitmap[BIT_CHUNK(nr)]); +} + +/* ice_zero_bitmap - set bits of bitmap to zero. + * @bmp: bitmap to set zeros + * @size: Size of the bitmaps in bits + * + * Set all of the bits in a bitmap to zero. Note that this function assumes it + * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It + * will zero every bit in the last chunk, even if those bits are beyond the + * size. + */ +static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size) +{ + ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), + ICE_NONDMA_MEM); +} + +/** + * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap + * @dst: Destination bitmap that receive the result of the operation + * @bmp1: The first bitmap to intersect + * @bmp2: The second bitmap to intersect wit the first + * @size: Size of the bitmaps in bits + * + * This function performs a bitwise AND on two "source" bitmaps of the same size + * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same + * size as the "source" bitmaps to avoid buffer overflows. This function returns + * a non-zero value if at least one bit location from both "source" bitmaps is + * non-zero. + */ +static inline int +ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, + const ice_bitmap_t *bmp2, u16 size) +{ + ice_bitmap_t res = 0, mask; + u16 i; + + /* Handle all but the last chunk */ + for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) { + dst[i] = bmp1[i] & bmp2[i]; + res |= dst[i]; + } + + /* We want to take care not to modify any bits outside of the bitmap + * size, even in the destination bitmap. Thus, we won't directly + * assign the last bitmap, but instead use a bitmask to ensure we only + * modify bits which are within the size, and leave any bits above the + * size value alone. + */ + mask = LAST_CHUNK_MASK(size); + dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask); + res |= dst[i] & mask; + + return res != 0; +} + +/** + * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap + * @dst: Destination bitmap that receive the result of the operation + * @bmp1: The first bitmap to intersect + * @bmp2: The second bitmap to intersect wit the first + * @size: Size of the bitmaps in bits + * + * This function performs a bitwise OR on two "source" bitmaps of the same size + * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same + * size as the "source" bitmaps to avoid buffer overflows. + */ +static inline void +ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, + const ice_bitmap_t *bmp2, u16 size) +{ + ice_bitmap_t mask; + u16 i; + + /* Handle all but last chunk*/ + for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) + dst[i] = bmp1[i] | bmp2[i]; + + /* We want to only OR bits within the size. Furthermore, we also do + * not want to modify destination bits which are beyond the specified + * size. Use a bitmask to ensure that we only modify the bits that are + * within the specified size. + */ + mask = LAST_CHUNK_MASK(size); + dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask); +} + +/** + * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap + * @dst: Destination bitmap that receive the result of the operation + * @bmp1: The first bitmap of XOR operation + * @bmp2: The second bitmap to XOR with the first + * @size: Size of the bitmaps in bits + * + * This function performs a bitwise XOR on two "source" bitmaps of the same size + * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same + * size as the "source" bitmaps to avoid buffer overflows. + */ +static inline void +ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, + const ice_bitmap_t *bmp2, u16 size) +{ + ice_bitmap_t mask; + u16 i; + + /* Handle all but last chunk*/ + for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) + dst[i] = bmp1[i] ^ bmp2[i]; + + /* We want to only XOR bits within the size. Furthermore, we also do + * not want to modify destination bits which are beyond the specified + * size. Use a bitmask to ensure that we only modify the bits that are + * within the specified size. + */ + mask = LAST_CHUNK_MASK(size); + dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask); +} + +/** + * ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap + * @dst: Destination bitmap that receive the result of the operation + * @bmp1: The first bitmap of ANDNOT operation + * @bmp2: The second bitmap to ANDNOT operation + * @size: Size of the bitmaps in bits + * + * This function performs a bitwise ANDNOT on two "source" bitmaps of the same + * size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the + * same size as the "source" bitmaps to avoid buffer overflows. + */ +static inline void +ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, + const ice_bitmap_t *bmp2, u16 size) +{ + ice_bitmap_t mask; + u16 i; + + /* Handle all but last chunk*/ + for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) + dst[i] = bmp1[i] & ~bmp2[i]; + + /* We want to only clear bits within the size. Furthermore, we also do + * not want to modify destination bits which are beyond the specified + * size. Use a bitmask to ensure that we only modify the bits that are + * within the specified size. + */ + mask = LAST_CHUNK_MASK(size); + dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask); +} + +/** + * ice_find_next_bit - Find the index of the next set bit of a bitmap + * @bitmap: the bitmap to scan + * @size: the size in bits of the bitmap + * @offset: the offset to start at + * + * Scans the bitmap and returns the index of the first set bit which is equal + * to or after the specified offset. Will return size if no bits are set. + */ +static inline u16 +ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset) +{ + u16 i, j; + + if (offset >= size) + return size; + + /* Since the starting position may not be directly on a chunk + * boundary, we need to be careful to handle the first chunk specially + */ + i = BIT_CHUNK(offset); + if (bitmap[i] != 0) { + u16 off = i * BITS_PER_CHUNK; + + for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) { + if (ice_is_bit_set(bitmap, off + j)) + return min(size, (u16)(off + j)); + } + } + + /* Now we handle the remaining chunks, if any */ + for (i++; i < BITS_TO_CHUNKS(size); i++) { + if (bitmap[i] != 0) { + u16 off = i * BITS_PER_CHUNK; + + for (j = 0; j < BITS_PER_CHUNK; j++) { + if (ice_is_bit_set(bitmap, off + j)) + return min(size, (u16)(off + j)); + } + } + } + return size; +} + +/** + * ice_find_first_bit - Find the index of the first set bit of a bitmap + * @bitmap: the bitmap to scan + * @size: the size in bits of the bitmap + * + * Scans the bitmap and returns the index of the first set bit. Will return + * size if no bits are set. + */ +static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size) +{ + return ice_find_next_bit(bitmap, size, 0); +} + +/** + * ice_is_any_bit_set - Return true of any bit in the bitmap is set + * @bitmap: the bitmap to check + * @size: the size of the bitmap + * + * Equivalent to checking if ice_find_first_bit returns a value less than the + * bitmap size. + */ +static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size) +{ + return ice_find_first_bit(bitmap, size) < size; +} + +/** + * ice_cp_bitmap - copy bitmaps. + * @dst: bitmap destination + * @src: bitmap to copy from + * @size: Size of the bitmaps in bits + * + * This function copy bitmap from src to dst. Note that this function assumes + * it is operating on a bitmap declared using ice_declare_bitmap. It will copy + * the entire last chunk even if this contains bits beyond the size. + */ +static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size) +{ + ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), + ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_cmp_bitmaps - compares two bitmaps. + * @bmp1: the bitmap to compare + * @bmp2: the bitmap to compare with bmp1 + * @size: Size of the bitmaps in bits + * + * This function compares two bitmaps, and returns result as true or false. + */ +static inline bool +ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size) +{ + ice_bitmap_t mask; + u16 i; + + /* Handle all but last chunk*/ + for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) + if (bmp1[i] != bmp2[i]) + return false; + + /* We want to only compare bits within the size.*/ + mask = LAST_CHUNK_MASK(size); + if ((bmp1[i] & mask) != (bmp2[i] & mask)) + return false; + + return true; +} + +#endif /* _ICE_BITOPS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_common.c b/src/spdk/dpdk/drivers/net/ice/base/ice_common.c new file mode 100644 index 000000000..17ffdee00 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_common.c @@ -0,0 +1,4409 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" +#include "ice_sched.h" +#include "ice_adminq_cmd.h" + +#include "ice_flow.h" +#include "ice_switch.h" + +#define ICE_PF_RESET_WAIT_COUNT 300 + +/** + * ice_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the MAC type of the adapter based on the + * vendor ID and device ID stored in the HW structure. + */ +static enum ice_status ice_set_mac_type(struct ice_hw *hw) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (hw->vendor_id != ICE_INTEL_VENDOR_ID) + return ICE_ERR_DEVICE_NOT_SUPPORTED; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_BACKPLANE: + case ICE_DEV_ID_E810_XXV_QSFP: + case ICE_DEV_ID_E810_XXV_SFP: + hw->mac_type = ICE_MAC_E810; + break; + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_SGMII: + hw->mac_type = ICE_MAC_GENERIC; + break; + default: + hw->mac_type = ICE_MAC_UNKNOWN; + break; + } + + ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); + return ICE_SUCCESS; +} + +/** + * ice_clear_pf_cfg - Clear PF configuration + * @hw: pointer to the hardware structure + * + * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port + * configuration, flow director filters, etc.). + */ +enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_manage_mac_read - manage MAC address read command + * @hw: pointer to the HW struct + * @buf: a virtual buffer to hold the manage MAC read response + * @buf_size: Size of the virtual buffer + * @cd: pointer to command details structure or NULL + * + * This function is used to return per PF station MAC address (0x0107). + * NOTE: Upon successful completion of this command, MAC address information + * is returned in user specified buffer. Please interpret user specified + * buffer as "manage_mac_read" response. + * Response such as various MAC addresses are stored in HW struct (port.mac) + * ice_aq_discover_caps is expected to be called before this function is called. + */ +static enum ice_status +ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_manage_mac_read_resp *resp; + struct ice_aqc_manage_mac_read *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 flags; + u8 i; + + cmd = &desc.params.mac_read; + + if (buf_size < sizeof(*resp)) + return ICE_ERR_BUF_TOO_SHORT; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + + resp = (struct ice_aqc_manage_mac_read_resp *)buf; + flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; + + if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { + ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); + return ICE_ERR_CFG; + } + + /* A single port can report up to two (LAN and WoL) addresses */ + for (i = 0; i < cmd->num_addr; i++) + if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { + ice_memcpy(hw->port_info->mac.lan_addr, + resp[i].mac_addr, ETH_ALEN, + ICE_DMA_TO_NONDMA); + ice_memcpy(hw->port_info->mac.perm_addr, + resp[i].mac_addr, + ETH_ALEN, ICE_DMA_TO_NONDMA); + break; + } + return ICE_SUCCESS; +} + +/** + * ice_aq_get_phy_caps - returns PHY capabilities + * @pi: port information structure + * @qual_mods: report qualified modules + * @report_mode: report mode capabilities + * @pcaps: structure for PHY capabilities to be filled + * @cd: pointer to command details structure or NULL + * + * Returns the various PHY capabilities supported on the Port (0x0600) + */ +enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *pcaps, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_phy_caps *cmd; + u16 pcaps_size = sizeof(*pcaps); + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_phy; + + if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); + + if (qual_mods) + cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); + + cmd->param0 |= CPU_TO_LE16(report_mode); + status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); + + if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); + pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); + } + + return status; +} + +/** + * ice_aq_get_link_topo_handle - get link topology node return status + * @pi: port information structure + * @node_type: requested node type + * @cd: pointer to command details structure or NULL + * + * Get link topology node return status for specified node type (0x06E0) + * + * Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present, then + * connection type is backplane or BASE-T. + */ +static enum ice_status +ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.get_link_topo; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + /* set node type */ + cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); +} + +/** + * ice_is_media_cage_present + * @pi: port information structure + * + * Returns true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ice_is_media_cage_present(struct ice_port_info *pi) +{ + /* Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present then + * connection type is backplane or BASE-T. + */ + return !ice_aq_get_link_topo_handle(pi, + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, + NULL); +} + +/** + * ice_get_media_type - Gets media type + * @pi: port information structure + */ +static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) +{ + struct ice_link_status *hw_link_info; + + if (!pi) + return ICE_MEDIA_UNKNOWN; + + hw_link_info = &pi->phy.link_info; + if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) + /* If more than one media type is selected, report unknown */ + return ICE_MEDIA_UNKNOWN; + + if (hw_link_info->phy_type_low) { + switch (hw_link_info->phy_type_low) { + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_50GBASE_SR2: + case ICE_PHY_TYPE_LOW_50GBASE_LR2: + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + case ICE_PHY_TYPE_LOW_100GBASE_DR: + return ICE_MEDIA_FIBER; + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_T: + return ICE_MEDIA_BASET; + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_50GBASE_CR2: + case ICE_PHY_TYPE_LOW_50GBASE_CP: + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_CP2: + return ICE_MEDIA_DA; + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50G_AUI1: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_DA; + /* fall-through */ + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50GBASE_KR2: + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + return ICE_MEDIA_BACKPLANE; + } + } else { + switch (hw_link_info->phy_type_high) { + case ICE_PHY_TYPE_HIGH_100G_AUI2: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_DA; + /* fall-through */ + case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: + return ICE_MEDIA_BACKPLANE; + } + } + return ICE_MEDIA_UNKNOWN; +} + +/** + * ice_aq_get_link_info + * @pi: port information structure + * @ena_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cd: pointer to command details structure or NULL + * + * Get Link Status (0x607). Returns the link status of the adapter. + */ +enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_link_status_data link_data = { 0 }; + struct ice_aqc_get_link_status *resp; + struct ice_link_status *li_old, *li; + enum ice_media_type *hw_media_type; + struct ice_fc_info *hw_fc_info; + bool tx_pause, rx_pause; + struct ice_aq_desc desc; + enum ice_status status; + struct ice_hw *hw; + u16 cmd_flags; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + li_old = &pi->phy.link_info_old; + hw_media_type = &pi->phy.media_type; + li = &pi->phy.link_info; + hw_fc_info = &pi->fc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); + cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; + resp = &desc.params.get_link_status; + resp->cmd_flags = CPU_TO_LE16(cmd_flags); + resp->lport_num = pi->lport; + + status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); + + if (status != ICE_SUCCESS) + return status; + + /* save off old link status information */ + *li_old = *li; + + /* update current link status information */ + li->link_speed = LE16_TO_CPU(link_data.link_speed); + li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low); + li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); + *hw_media_type = ice_get_media_type(pi); + li->link_info = link_data.link_info; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); + li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | + ICE_AQ_CFG_PACING_TYPE_M); + + /* update fc info */ + tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); + rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); + if (tx_pause && rx_pause) + hw_fc_info->current_mode = ICE_FC_FULL; + else if (tx_pause) + hw_fc_info->current_mode = ICE_FC_TX_PAUSE; + else if (rx_pause) + hw_fc_info->current_mode = ICE_FC_RX_PAUSE; + else + hw_fc_info->current_mode = ICE_FC_NONE; + + li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED)); + + ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)li->phy_type_low); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)li->phy_type_high); + ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); + ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); + ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); + ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); + ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); + ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); + ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); + + /* save link status information */ + if (link) + *link = *li; + + /* flag cleared so calling functions don't call AQ again */ + pi->phy.get_link_info = false; + + return ICE_SUCCESS; +} + +/** + * ice_aq_set_mac_cfg + * @hw: pointer to the HW struct + * @max_frame_size: Maximum Frame Size to be supported + * @cd: pointer to command details structure or NULL + * + * Set MAC configuration (0x0603) + */ +enum ice_status +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) +{ + u16 fc_threshold_val, tx_timer_val; + struct ice_aqc_set_mac_cfg *cmd; + struct ice_aq_desc desc; + u32 reg_val; + + cmd = &desc.params.set_mac_cfg; + + if (max_frame_size == 0) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); + + cmd->max_frame_size = CPU_TO_LE16(max_frame_size); + + /* We read back the transmit timer and fc threshold value of + * LFC. Thus, we will use index = + * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. + * + * Also, because we are opearating on transmit timer and fc + * threshold of LFC, we don't turn on any bit in tx_tmr_priority + */ +#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX + + /* Retrieve the transmit timer */ + reg_val = rd32(hw, + PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); + tx_timer_val = reg_val & + PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; + cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); + + /* Retrieve the fc threshold */ + reg_val = rd32(hw, + PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); + fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0); + cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_init_fltr_mgmt_struct - initializes filter management list and locks + * @hw: pointer to the HW struct + */ +enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +{ + struct ice_switch_info *sw; + + hw->switch_info = (struct ice_switch_info *) + ice_malloc(hw, sizeof(*hw->switch_info)); + + sw = hw->switch_info; + + if (!sw) + return ICE_ERR_NO_MEMORY; + + INIT_LIST_HEAD(&sw->vsi_list_map_head); + + return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); +} + +/** + * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks + * @hw: pointer to the HW struct + */ +void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_vsi_list_map_info *v_pos_map; + struct ice_vsi_list_map_info *v_tmp_map; + struct ice_sw_recipe *recps; + u8 i; + + LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, + ice_vsi_list_map_info, list_entry) { + LIST_DEL(&v_pos_map->list_entry); + ice_free(hw, v_pos_map); + } + recps = hw->switch_info->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct ice_recp_grp_entry *rg_entry, *tmprg_entry; + + recps[i].root_rid = i; + LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, + &recps[i].rg_list, ice_recp_grp_entry, + l_entry) { + LIST_DEL(&rg_entry->l_entry); + ice_free(hw, rg_entry); + } + + if (recps[i].adv_rule) { + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + ice_destroy_lock(&recps[i].filt_rule_lock); + LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, + &recps[i].filt_rules, + ice_adv_fltr_mgmt_list_entry, + list_entry) { + LIST_DEL(&lst_itr->list_entry); + ice_free(hw, lst_itr->lkups); + ice_free(hw, lst_itr); + } + } else { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + ice_destroy_lock(&recps[i].filt_rule_lock); + LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, + &recps[i].filt_rules, + ice_fltr_mgmt_list_entry, + list_entry) { + LIST_DEL(&lst_itr->list_entry); + ice_free(hw, lst_itr); + } + } + if (recps[i].root_buf) + ice_free(hw, recps[i].root_buf); + } + ice_rm_all_sw_replay_rule_info(hw); + ice_free(hw, sw->recp_list); + ice_free(hw, sw); +} + +/** + * ice_get_itr_intrl_gran + * @hw: pointer to the HW struct + * + * Determines the ITR/INTRL granularities based on the maximum aggregate + * bandwidth according to the device's configuration during power-on. + */ +static void ice_get_itr_intrl_gran(struct ice_hw *hw) +{ + u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & + GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> + GL_PWR_MODE_CTL_CAR_MAX_BW_S; + + switch (max_agg_bw) { + case ICE_MAX_AGG_BW_200G: + case ICE_MAX_AGG_BW_100G: + case ICE_MAX_AGG_BW_50G: + hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; + hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; + break; + case ICE_MAX_AGG_BW_25G: + hw->itr_gran = ICE_ITR_GRAN_MAX_25; + hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; + break; + } +} + +/** + * ice_print_rollback_msg - print FW rollback message + * @hw: pointer to the hardware structure + */ +void ice_print_rollback_msg(struct ice_hw *hw) +{ + char nvm_str[ICE_NVM_VER_LEN] = { 0 }; + struct ice_nvm_info *nvm = &hw->nvm; + struct ice_orom_info *orom; + + orom = &nvm->orom; + + SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", + nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major, + orom->build, orom->patch); + ice_warn(hw, + "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", + nvm_str, hw->fw_maj_ver, hw->fw_min_ver); +} + +/** + * ice_init_hw - main hardware initialization routine + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw(struct ice_hw *hw) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + u16 mac_buf_len; + void *mac_buf; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Set MAC type based on DeviceID */ + status = ice_set_mac_type(hw); + if (status) + return status; + + hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & + PF_FUNC_RID_FUNCTION_NUMBER_M) >> + PF_FUNC_RID_FUNCTION_NUMBER_S; + + status = ice_reset(hw, ICE_RESET_PFR); + if (status) + return status; + + ice_get_itr_intrl_gran(hw); + + status = ice_create_all_ctrlq(hw); + if (status) + goto err_unroll_cqinit; + + status = ice_init_nvm(hw); + if (status) + goto err_unroll_cqinit; + + if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) + ice_print_rollback_msg(hw); + + status = ice_clear_pf_cfg(hw); + if (status) + goto err_unroll_cqinit; + + /* Set bit to enable Flow Director filters */ + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); + INIT_LIST_HEAD(&hw->fdir_list_head); + + ice_clear_pxe_mode(hw); + + status = ice_get_caps(hw); + if (status) + goto err_unroll_cqinit; + + hw->port_info = (struct ice_port_info *) + ice_malloc(hw, sizeof(*hw->port_info)); + if (!hw->port_info) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_cqinit; + } + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + /* Initialize port_info struct with switch configuration data */ + status = ice_get_initial_sw_cfg(hw); + if (status) + goto err_unroll_alloc; + + hw->evb_veb = true; + /* Query the allocated resources for Tx scheduler */ + status = ice_sched_query_res_alloc(hw); + if (status) { + ice_debug(hw, ICE_DBG_SCHED, + "Failed to get scheduler allocated resources\n"); + goto err_unroll_alloc; + } + ice_sched_get_psm_clk_freq(hw); + + /* Initialize port_info struct with scheduler data */ + status = ice_sched_init_port(hw->port_info); + if (status) + goto err_unroll_sched; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_sched; + } + + /* Initialize port_info struct with PHY capabilities */ + status = ice_aq_get_phy_caps(hw->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ice_free(hw, pcaps); + if (status) + goto err_unroll_sched; + + /* Initialize port_info struct with link information */ + status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); + if (status) + goto err_unroll_sched; + /* need a valid SW entry point to build a Tx tree */ + if (!hw->sw_entry_point_layer) { + ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); + status = ICE_ERR_CFG; + goto err_unroll_sched; + } + INIT_LIST_HEAD(&hw->agg_list); + /* Initialize max burst size */ + if (!hw->max_burst_size) + ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); + + status = ice_init_fltr_mgmt_struct(hw); + if (status) + goto err_unroll_sched; + + /* Get MAC information */ + /* A single port can report up to two (LAN and WoL) addresses */ + mac_buf = ice_calloc(hw, 2, + sizeof(struct ice_aqc_manage_mac_read_resp)); + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); + + if (!mac_buf) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_fltr_mgmt_struct; + } + + status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); + ice_free(hw, mac_buf); + + if (status) + goto err_unroll_fltr_mgmt_struct; + /* Obtain counter base index which would be used by flow director */ + status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); + if (status) + goto err_unroll_fltr_mgmt_struct; + status = ice_init_hw_tbls(hw); + if (status) + goto err_unroll_fltr_mgmt_struct; + ice_init_lock(&hw->tnl_lock); + return ICE_SUCCESS; + +err_unroll_fltr_mgmt_struct: + ice_cleanup_fltr_mgmt_struct(hw); +err_unroll_sched: + ice_sched_cleanup_all(hw); +err_unroll_alloc: + ice_free(hw, hw->port_info); + hw->port_info = NULL; +err_unroll_cqinit: + ice_destroy_all_ctrlq(hw); + return status; +} + +/** + * ice_deinit_hw - unroll initialization operations done by ice_init_hw + * @hw: pointer to the hardware structure + * + * This should be called only during nominal operation, not as a result of + * ice_init_hw() failing since ice_init_hw() will take care of unrolling + * applicable initializations if it fails for any reason. + */ +void ice_deinit_hw(struct ice_hw *hw) +{ + ice_free_fd_res_cntr(hw, hw->fd_ctr_base); + ice_cleanup_fltr_mgmt_struct(hw); + + ice_sched_cleanup_all(hw); + ice_sched_clear_agg(hw); + ice_free_seg(hw); + ice_free_hw_tbls(hw); + ice_destroy_lock(&hw->tnl_lock); + + if (hw->port_info) { + ice_free(hw, hw->port_info); + hw->port_info = NULL; + } + + ice_destroy_all_ctrlq(hw); + + /* Clear VSI contexts if not already cleared */ + ice_clear_all_vsi_ctx(hw); +} + +/** + * ice_check_reset - Check to see if a global reset is complete + * @hw: pointer to the hardware structure + */ +enum ice_status ice_check_reset(struct ice_hw *hw) +{ + u32 cnt, reg = 0, grst_delay, uld_mask; + + /* Poll for Device Active state in case a recent CORER, GLOBR, + * or EMPR has occurred. The grst delay value is in 100ms units. + * Add 1sec for outstanding AQ commands that can take a long time. + */ + grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; + + for (cnt = 0; cnt < grst_delay; cnt++) { + ice_msec_delay(100, true); + reg = rd32(hw, GLGEN_RSTAT); + if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) + break; + } + + if (cnt == grst_delay) { + ice_debug(hw, ICE_DBG_INIT, + "Global reset polling failed to complete.\n"); + return ICE_ERR_RESET_FAILED; + } + +#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ + GLNVM_ULD_PCIER_DONE_1_M |\ + GLNVM_ULD_CORER_DONE_M |\ + GLNVM_ULD_GLOBR_DONE_M |\ + GLNVM_ULD_POR_DONE_M |\ + GLNVM_ULD_POR_DONE_1_M |\ + GLNVM_ULD_PCIER_DONE_2_M) + + uld_mask = ICE_RESET_DONE_MASK; + + /* Device is Active; check Global Reset processes are done */ + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, GLNVM_ULD) & uld_mask; + if (reg == uld_mask) { + ice_debug(hw, ICE_DBG_INIT, + "Global reset processes done. %d\n", cnt); + break; + } + ice_msec_delay(10, true); + } + + if (cnt == ICE_PF_RESET_WAIT_COUNT) { + ice_debug(hw, ICE_DBG_INIT, + "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", + reg); + return ICE_ERR_RESET_FAILED; + } + + return ICE_SUCCESS; +} + +/** + * ice_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * If a global reset has been triggered, this function checks + * for its completion and then issues the PF reset + */ +static enum ice_status ice_pf_reset(struct ice_hw *hw) +{ + u32 cnt, reg; + + /* If at function entry a global reset was already in progress, i.e. + * state is not 'device active' or any of the reset done bits are not + * set in GLNVM_ULD, there is no need for a PF Reset; poll until the + * global reset is done. + */ + if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || + (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { + /* poll on global reset currently in progress until done */ + if (ice_check_reset(hw)) + return ICE_ERR_RESET_FAILED; + + return ICE_SUCCESS; + } + + /* Reset the PF */ + reg = rd32(hw, PFGEN_CTRL); + + wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); + + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, PFGEN_CTRL); + if (!(reg & PFGEN_CTRL_PFSWR_M)) + break; + + ice_msec_delay(1, true); + } + + if (cnt == ICE_PF_RESET_WAIT_COUNT) { + ice_debug(hw, ICE_DBG_INIT, + "PF reset polling failed to complete.\n"); + return ICE_ERR_RESET_FAILED; + } + + return ICE_SUCCESS; +} + +/** + * ice_reset - Perform different types of reset + * @hw: pointer to the hardware structure + * @req: reset request + * + * This function triggers a reset as specified by the req parameter. + * + * Note: + * If anything other than a PF reset is triggered, PXE mode is restored. + * This has to be cleared using ice_clear_pxe_mode again, once the AQ + * interface has been restored in the rebuild flow. + */ +enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +{ + u32 val = 0; + + switch (req) { + case ICE_RESET_PFR: + return ice_pf_reset(hw); + case ICE_RESET_CORER: + ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); + val = GLGEN_RTRIG_CORER_M; + break; + case ICE_RESET_GLOBR: + ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); + val = GLGEN_RTRIG_GLOBR_M; + break; + default: + return ICE_ERR_PARAM; + } + + val |= rd32(hw, GLGEN_RTRIG); + wr32(hw, GLGEN_RTRIG, val); + ice_flush(hw); + + /* wait for the FW to be ready */ + return ice_check_reset(hw); +} + +/** + * ice_copy_rxq_ctx_to_hw + * @hw: pointer to the hardware structure + * @ice_rxq_ctx: pointer to the rxq context + * @rxq_index: the index of the Rx queue + * + * Copies rxq context from dense structure to HW register space + */ +static enum ice_status +ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +{ + u8 i; + + if (!ice_rxq_ctx) + return ICE_ERR_BAD_PTR; + + if (rxq_index > QRX_CTRL_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Copy each dword separately to HW */ + for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + wr32(hw, QRX_CONTEXT(i, rxq_index), + *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, + *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + } + + return ICE_SUCCESS; +} + +/* LAN Rx Queue Context */ +static const struct ice_ctx_ele ice_rlan_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), + ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), + ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), + ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), + ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), + ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), + ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), + ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), + ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), + ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), + ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), + ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), + ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), + ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), + ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), + ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), + ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), + ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), + ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), + { 0 } +}; + +/** + * ice_write_rxq_ctx + * @hw: pointer to the hardware structure + * @rlan_ctx: pointer to the rxq context + * @rxq_index: the index of the Rx queue + * + * Converts rxq context from sparse to dense structure and then writes + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand + */ +enum ice_status +ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index) +{ + u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); + return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); +} + +/** + * ice_clear_rxq_ctx + * @hw: pointer to the hardware structure + * @rxq_index: the index of the Rx queue to clear + * + * Clears rxq context in HW register space + */ +enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) +{ + u8 i; + + if (rxq_index > QRX_CTRL_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Clear each dword register separately */ + for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) + wr32(hw, QRX_CONTEXT(i, rxq_index), 0); + + return ICE_SUCCESS; +} + +/* LAN Tx Queue Context */ +const struct ice_ctx_ele ice_tlan_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), + ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), + ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), + ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), + ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), + ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), + ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), + ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), + ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), + ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), + ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), + ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), + ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), + ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), + ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), + ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), + ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), + ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), + ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), + ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), + ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), + ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), + ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), + ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), + ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), + ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), + ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), + ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), + { 0 } +}; + +/** + * ice_copy_tx_cmpltnq_ctx_to_hw + * @hw: pointer to the hardware structure + * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context + * @tx_cmpltnq_index: the index of the completion queue + * + * Copies Tx completion queue context from dense structure to HW register space + */ +static enum ice_status +ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, + u32 tx_cmpltnq_index) +{ + u8 i; + + if (!ice_tx_cmpltnq_ctx) + return ICE_ERR_BAD_PTR; + + if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Copy each dword separately to HW */ + for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { + wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), + *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); + + ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i, + *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); + } + + return ICE_SUCCESS; +} + +/* LAN Tx Completion Queue Context */ +static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161), + ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192), + { 0 } +}; + +/** + * ice_write_tx_cmpltnq_ctx + * @hw: pointer to the hardware structure + * @tx_cmpltnq_ctx: pointer to the completion queue context + * @tx_cmpltnq_index: the index of the completion queue + * + * Converts completion queue context from sparse to dense structure and then + * writes it to HW register space + */ +enum ice_status +ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, + struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, + u32 tx_cmpltnq_index) +{ + u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; + + ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); + return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); +} + +/** + * ice_clear_tx_cmpltnq_ctx + * @hw: pointer to the hardware structure + * @tx_cmpltnq_index: the index of the completion queue to clear + * + * Clears Tx completion queue context in HW register space + */ +enum ice_status +ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) +{ + u8 i; + + if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Clear each dword register separately */ + for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) + wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0); + + return ICE_SUCCESS; +} + +/** + * ice_copy_tx_drbell_q_ctx_to_hw + * @hw: pointer to the hardware structure + * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context + * @tx_drbell_q_index: the index of the doorbell queue + * + * Copies doorbell queue context from dense structure to HW register space + */ +static enum ice_status +ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, + u32 tx_drbell_q_index) +{ + u8 i; + + if (!ice_tx_drbell_q_ctx) + return ICE_ERR_BAD_PTR; + + if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Copy each dword separately to HW */ + for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { + wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), + *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); + + ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i, + *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); + } + + return ICE_SUCCESS; +} + +/* LAN Tx Doorbell Queue Context info */ +static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { + /* Field Width LSB */ + ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128), + ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144), + { 0 } +}; + +/** + * ice_write_tx_drbell_q_ctx + * @hw: pointer to the hardware structure + * @tx_drbell_q_ctx: pointer to the doorbell queue context + * @tx_drbell_q_index: the index of the doorbell queue + * + * Converts doorbell queue context from sparse to dense structure and then + * writes it to HW register space + */ +enum ice_status +ice_write_tx_drbell_q_ctx(struct ice_hw *hw, + struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, + u32 tx_drbell_q_index) +{ + u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; + + ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info); + return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); +} + +/** + * ice_clear_tx_drbell_q_ctx + * @hw: pointer to the hardware structure + * @tx_drbell_q_index: the index of the doorbell queue to clear + * + * Clears doorbell queue context in HW register space + */ +enum ice_status +ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) +{ + u8 i; + + if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) + return ICE_ERR_PARAM; + + /* Clear each dword register separately */ + for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) + wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0); + + return ICE_SUCCESS; +} + +/* FW Admin Queue command wrappers */ + +/** + * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue + * @hw: pointer to the HW struct + * @desc: descriptor describing the command + * @buf: buffer to use for indirect commands (NULL for direct commands) + * @buf_size: size of buffer for indirect commands (0 for direct commands) + * @cd: pointer to command details structure + * + * Helper function to send FW Admin Queue commands to the FW Admin Queue. + */ +enum ice_status +ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + if (hw->aq_send_cmd_fn) { + enum ice_status status = ICE_ERR_NOT_READY; + u16 retval = ICE_AQ_RC_OK; + + ice_acquire_lock(&hw->adminq.sq_lock); + if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc, + buf, buf_size)) { + retval = LE16_TO_CPU(desc->retval); + /* strip off FW internal code */ + if (retval) + retval &= 0xff; + if (retval == ICE_AQ_RC_OK) + status = ICE_SUCCESS; + else + status = ICE_ERR_AQ_ERROR; + } + + hw->adminq.sq_last_status = (enum ice_aq_err)retval; + ice_release_lock(&hw->adminq.sq_lock); + + return status; + } + return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); +} + +/** + * ice_aq_get_fw_ver + * @hw: pointer to the HW struct + * @cd: pointer to command details structure or NULL + * + * Get the firmware version (0x0001) from the admin queue commands + */ +enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_ver *resp; + struct ice_aq_desc desc; + enum ice_status status; + + resp = &desc.params.get_ver; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + + if (!status) { + hw->fw_branch = resp->fw_branch; + hw->fw_maj_ver = resp->fw_major; + hw->fw_min_ver = resp->fw_minor; + hw->fw_patch = resp->fw_patch; + hw->fw_build = LE32_TO_CPU(resp->fw_build); + hw->api_branch = resp->api_branch; + hw->api_maj_ver = resp->api_major; + hw->api_min_ver = resp->api_minor; + hw->api_patch = resp->api_patch; + } + + return status; +} + +/** + * ice_aq_send_driver_ver + * @hw: pointer to the HW struct + * @dv: driver's major, minor version + * @cd: pointer to command details structure or NULL + * + * Send the driver version (0x0002) to the firmware + */ +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd) +{ + struct ice_aqc_driver_ver *cmd; + struct ice_aq_desc desc; + u16 len; + + cmd = &desc.params.driver_ver; + + if (!dv) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + cmd->major_ver = dv->major_ver; + cmd->minor_ver = dv->minor_ver; + cmd->build_ver = dv->build_ver; + cmd->subbuild_ver = dv->subbuild_ver; + + len = 0; + while (len < sizeof(dv->driver_string) && + IS_ASCII(dv->driver_string[len]) && dv->driver_string[len]) + len++; + + return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); +} + +/** + * ice_aq_q_shutdown + * @hw: pointer to the HW struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well (0x0003). + */ +enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +{ + struct ice_aqc_q_shutdown *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.q_shutdown; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); + + if (unloading) + cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_req_res + * @hw: pointer to the HW struct + * @res: resource ID + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cd: pointer to command details structure or NULL + * + * Requests common resource using the admin queue commands (0x0008). + * When attempting to acquire the Global Config Lock, the driver can + * learn of three states: + * 1) ICE_SUCCESS - acquired lock, and can perform download package + * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load + * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading + * + * Note that if the caller is in an acquire lock, perform action, release lock + * phase of operation, it is possible that the FW may detect a timeout and issue + * a CORER. In this case, the driver will receive a CORER interrupt and will + * have to determine its cause. The calling thread that is handling this flow + * will likely get an error propagated back to it indicating the Download + * Package, Update Package or the Release Resource AQ commands timed out. + */ +static enum ice_status +ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, + struct ice_sq_cd *cd) +{ + struct ice_aqc_req_res *cmd_resp; + struct ice_aq_desc desc; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd_resp = &desc.params.res_owner; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); + + cmd_resp->res_id = CPU_TO_LE16(res); + cmd_resp->access_type = CPU_TO_LE16(access); + cmd_resp->res_number = CPU_TO_LE32(sdp_number); + cmd_resp->timeout = CPU_TO_LE32(*timeout); + *timeout = 0; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + */ + + /* Global config lock response utilizes an additional status field. + * + * If the Global config lock resource is held by some other driver, the + * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field + * and the timeout field indicates the maximum time the current owner + * of the resource has to free it. + */ + if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { + if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { + *timeout = LE32_TO_CPU(cmd_resp->timeout); + return ICE_SUCCESS; + } else if (LE16_TO_CPU(cmd_resp->status) == + ICE_AQ_RES_GLBL_IN_PROG) { + *timeout = LE32_TO_CPU(cmd_resp->timeout); + return ICE_ERR_AQ_ERROR; + } else if (LE16_TO_CPU(cmd_resp->status) == + ICE_AQ_RES_GLBL_DONE) { + return ICE_ERR_AQ_NO_WORK; + } + + /* invalid FW response, force a timeout immediately */ + *timeout = 0; + return ICE_ERR_AQ_ERROR; + } + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. + */ + if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) + *timeout = LE32_TO_CPU(cmd_resp->timeout); + + return status; +} + +/** + * ice_aq_release_res + * @hw: pointer to the HW struct + * @res: resource ID + * @sdp_number: resource number + * @cd: pointer to command details structure or NULL + * + * release common resource using the admin queue commands (0x0009) + */ +static enum ice_status +ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, + struct ice_sq_cd *cd) +{ + struct ice_aqc_req_res *cmd; + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd = &desc.params.res_owner; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); + + cmd->res_id = CPU_TO_LE16(res); + cmd->res_number = CPU_TO_LE32(sdp_number); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_acquire_res + * @hw: pointer to the HW structure + * @res: resource ID + * @access: access type (read or write) + * @timeout: timeout in milliseconds + * + * This function will attempt to acquire the ownership of a resource. + */ +enum ice_status +ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access, u32 timeout) +{ +#define ICE_RES_POLLING_DELAY_MS 10 + u32 delay = ICE_RES_POLLING_DELAY_MS; + u32 time_left = timeout; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); + + /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. + */ + if (status == ICE_ERR_AQ_NO_WORK) + goto ice_acquire_res_exit; + + if (status) + ice_debug(hw, ICE_DBG_RES, + "resource %d acquire type %d failed.\n", res, access); + + /* If necessary, poll until the current lock owner timeouts */ + timeout = time_left; + while (status && timeout && time_left) { + ice_msec_delay(delay, true); + timeout = (timeout > delay) ? timeout - delay : 0; + status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); + + if (status == ICE_ERR_AQ_NO_WORK) + /* lock free, but no work to do */ + break; + + if (!status) + /* lock acquired */ + break; + } + if (status && status != ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); + +ice_acquire_res_exit: + if (status == ICE_ERR_AQ_NO_WORK) { + if (access == ICE_RES_WRITE) + ice_debug(hw, ICE_DBG_RES, + "resource indicates no work to do.\n"); + else + ice_debug(hw, ICE_DBG_RES, + "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + } + return status; +} + +/** + * ice_release_res + * @hw: pointer to the HW structure + * @res: resource ID + * + * This function will release a resource using the proper Admin Command. + */ +void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) +{ + enum ice_status status; + u32 total_delay = 0; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + status = ice_aq_release_res(hw, res, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin queue timeout, so handle them correctly + */ + while ((status == ICE_ERR_AQ_TIMEOUT) && + (total_delay < hw->adminq.sq_cmd_timeout)) { + ice_msec_delay(1, true); + status = ice_aq_release_res(hw, res, 0, NULL); + total_delay++; + } +} + +/** + * ice_aq_alloc_free_res - command to allocate/free resources + * @hw: pointer to the HW struct + * @num_entries: number of resource entries in buffer + * @buf: Indirect buffer to hold data parameters and response + * @buf_size: size of buffer for indirect commands + * @opc: pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Helper function to allocate/free resources using the admin queue commands + */ +enum ice_status +ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aqc_alloc_free_res_cmd *cmd; + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd = &desc.params.sw_res_ctrl; + + if (!buf) + return ICE_ERR_PARAM; + + if (buf_size < (num_entries * sizeof(buf->elem[0]))) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd->num_entries = CPU_TO_LE16(num_entries); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_alloc_hw_res - allocate resource + * @hw: pointer to the HW struct + * @type: type of resource + * @num: number of resources to allocate + * @btm: allocate from bottom + * @res: pointer to array that will receive the resources + */ +enum ice_status +ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + buf_len = ice_struct_size(buf, elem, num - 1); + buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!buf) + return ICE_ERR_NO_MEMORY; + + /* Prepare buffer to allocate resource. */ + buf->num_elems = CPU_TO_LE16(num); + buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | + ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); + if (btm) + buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); + + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (status) + goto ice_alloc_res_exit; + + ice_memcpy(res, buf->elem, sizeof(buf->elem) * num, + ICE_NONDMA_TO_NONDMA); + +ice_alloc_res_exit: + ice_free(hw, buf); + return status; +} + +/** + * ice_free_hw_res - free allocated HW resource + * @hw: pointer to the HW struct + * @type: type of resource to free + * @num: number of resources + * @res: pointer to array that contains the resources to free + */ +enum ice_status +ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + buf_len = ice_struct_size(buf, elem, num - 1); + buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); + if (!buf) + return ICE_ERR_NO_MEMORY; + + /* Prepare buffer to free resource. */ + buf->num_elems = CPU_TO_LE16(num); + buf->res_type = CPU_TO_LE16(type); + ice_memcpy(buf->elem, res, sizeof(buf->elem) * num, + ICE_NONDMA_TO_NONDMA); + + status = ice_aq_alloc_free_res(hw, num, buf, buf_len, + ice_aqc_opc_free_res, NULL); + if (status) + ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); + + ice_free(hw, buf); + return status; +} + +/** + * ice_get_num_per_func - determine number of resources per PF + * @hw: pointer to the HW structure + * @max: value to be evenly split between each PF + * + * Determine the number of valid functions by going through the bitmap returned + * from parsing capabilities and use this to calculate the number of resources + * per PF based on the max value passed in. + */ +static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) +{ + u8 funcs; + +#define ICE_CAPS_VALID_FUNCS_M 0xFF + funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions & + ICE_CAPS_VALID_FUNCS_M); + + if (!funcs) + return 0; + + return max / funcs; +} + +/** + * ice_parse_caps - parse function/device capabilities + * @hw: pointer to the HW struct + * @buf: pointer to a buffer containing function/device capability records + * @cap_count: number of capability records in the list + * @opc: type of capabilities list to parse + * + * Helper function to parse function(0x000a)/device(0x000b) capabilities list. + */ +static void +ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, + enum ice_adminq_opc opc) +{ + struct ice_aqc_list_caps_elem *cap_resp; + struct ice_hw_func_caps *func_p = NULL; + struct ice_hw_dev_caps *dev_p = NULL; + struct ice_hw_common_caps *caps; + char const *prefix; + u32 i; + + if (!buf) + return; + + cap_resp = (struct ice_aqc_list_caps_elem *)buf; + + if (opc == ice_aqc_opc_list_dev_caps) { + dev_p = &hw->dev_caps; + caps = &dev_p->common_cap; + prefix = "dev cap"; + } else if (opc == ice_aqc_opc_list_func_caps) { + func_p = &hw->func_caps; + caps = &func_p->common_cap; + prefix = "func cap"; + } else { + ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); + return; + } + + for (i = 0; caps && i < cap_count; i++, cap_resp++) { + u32 logical_id = LE32_TO_CPU(cap_resp->logical_id); + u32 phys_id = LE32_TO_CPU(cap_resp->phys_id); + u32 number = LE32_TO_CPU(cap_resp->number); + u16 cap = LE16_TO_CPU(cap_resp->cap); + + switch (cap) { + case ICE_AQC_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + ice_debug(hw, ICE_DBG_INIT, + "%s: valid_functions (bitmap) = %d\n", prefix, + caps->valid_functions); + + /* store func count for resource management purposes */ + if (dev_p) + dev_p->num_funcs = ice_hweight32(number); + break; + case ICE_AQC_CAPS_VSI: + if (dev_p) { + dev_p->num_vsi_allocd_to_host = number; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_vsi_allocd_to_host = %d\n", + prefix, + dev_p->num_vsi_allocd_to_host); + } else if (func_p) { + func_p->guar_num_vsi = + ice_get_num_per_func(hw, ICE_MAX_VSI); + ice_debug(hw, ICE_DBG_INIT, + "%s: guar_num_vsi (fw) = %d\n", + prefix, number); + ice_debug(hw, ICE_DBG_INIT, + "%s: guar_num_vsi = %d\n", + prefix, func_p->guar_num_vsi); + } + break; + case ICE_AQC_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, + "%s: active_tc_bitmap = %d\n", prefix, + caps->active_tc_bitmap); + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d\n", prefix, caps->maxtc); + break; + case ICE_AQC_CAPS_RSS: + caps->rss_table_size = number; + caps->rss_table_entry_width = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: rss_table_size = %d\n", prefix, + caps->rss_table_size); + ice_debug(hw, ICE_DBG_INIT, + "%s: rss_table_entry_width = %d\n", prefix, + caps->rss_table_entry_width); + break; + case ICE_AQC_CAPS_RXQS: + caps->num_rxq = number; + caps->rxq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_rxq = %d\n", prefix, + caps->num_rxq); + ice_debug(hw, ICE_DBG_INIT, + "%s: rxq_first_id = %d\n", prefix, + caps->rxq_first_id); + break; + case ICE_AQC_CAPS_TXQS: + caps->num_txq = number; + caps->txq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_txq = %d\n", prefix, + caps->num_txq); + ice_debug(hw, ICE_DBG_INIT, + "%s: txq_first_id = %d\n", prefix, + caps->txq_first_id); + break; + case ICE_AQC_CAPS_MSIX: + caps->num_msix_vectors = number; + caps->msix_vector_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_msix_vectors = %d\n", prefix, + caps->num_msix_vectors); + ice_debug(hw, ICE_DBG_INIT, + "%s: msix_vector_first_id = %d\n", prefix, + caps->msix_vector_first_id); + break; + case ICE_AQC_CAPS_FD: + if (dev_p) { + dev_p->num_flow_director_fltr = number; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_flow_director_fltr = %d\n", + prefix, + dev_p->num_flow_director_fltr); + } + if (func_p) { + u32 reg_val, val; + if (hw->dcf_enabled) + break; + reg_val = rd32(hw, GLQF_FD_SIZE); + val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> + GLQF_FD_SIZE_FD_GSIZE_S; + func_p->fd_fltr_guar = + ice_get_num_per_func(hw, val); + val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> + GLQF_FD_SIZE_FD_BSIZE_S; + func_p->fd_fltr_best_effort = val; + ice_debug(hw, ICE_DBG_INIT, + "%s: fd_fltr_guar = %d\n", + prefix, func_p->fd_fltr_guar); + ice_debug(hw, ICE_DBG_INIT, + "%s: fd_fltr_best_effort = %d\n", + prefix, func_p->fd_fltr_best_effort); + } + break; + case ICE_AQC_CAPS_MAX_MTU: + caps->max_mtu = number; + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", + prefix, caps->max_mtu); + break; + default: + ice_debug(hw, ICE_DBG_INIT, + "%s: unknown capability[%d]: 0x%x\n", prefix, + i, cap); + break; + } + } + + /* Re-calculate capabilities that are dependent on the number of + * physical ports; i.e. some features are not supported or function + * differently on devices with more than 4 ports. + */ + if (hw->dev_caps.num_funcs > 4) { + /* Max 4 TCs per port */ + caps->maxtc = 4; + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d (based on #ports)\n", prefix, + caps->maxtc); + } +} + +/** + * ice_aq_discover_caps - query function/device capabilities + * @hw: pointer to the HW struct + * @buf: a virtual buffer to hold the capabilities + * @buf_size: Size of the virtual buffer + * @cap_count: cap count needed if AQ err==ENOMEM + * @opc: capabilities type to discover - pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Get the function(0x000a)/device(0x000b) capabilities description from + * the firmware. + */ +static enum ice_status +ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aqc_list_caps *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_cap; + + if (opc != ice_aqc_opc_list_func_caps && + opc != ice_aqc_opc_list_dev_caps) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) + ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc); + else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) + *cap_count = LE32_TO_CPU(cmd->count); + return status; +} + +/** + * ice_discover_caps - get info about the HW + * @hw: pointer to the hardware structure + * @opc: capabilities type to discover - pass in the command opcode + */ +static enum ice_status +ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) +{ + enum ice_status status; + u32 cap_count; + u16 cbuf_len; + u8 retries; + + /* The driver doesn't know how many capabilities the device will return + * so the buffer size required isn't known ahead of time. The driver + * starts with cbuf_len and if this turns out to be insufficient, the + * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. + * The driver then allocates the buffer based on the count and retries + * the operation. So it follows that the retry count is 2. + */ +#define ICE_GET_CAP_BUF_COUNT 40 +#define ICE_GET_CAP_RETRY_COUNT 2 + + cap_count = ICE_GET_CAP_BUF_COUNT; + retries = ICE_GET_CAP_RETRY_COUNT; + + do { + void *cbuf; + + cbuf_len = (u16)(cap_count * + sizeof(struct ice_aqc_list_caps_elem)); + cbuf = ice_malloc(hw, cbuf_len); + if (!cbuf) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, + opc, NULL); + ice_free(hw, cbuf); + + if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) + break; + + /* If ENOMEM is returned, try again with bigger buffer */ + } while (--retries); + + return status; +} + +/** + * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode + * @hw: pointer to the hardware structure + */ +void ice_set_safe_mode_caps(struct ice_hw *hw) +{ + struct ice_hw_func_caps *func_caps = &hw->func_caps; + struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; + u32 valid_func, rxq_first_id, txq_first_id; + u32 msix_vector_first_id, max_mtu; + u32 num_funcs; + + /* cache some func_caps values that should be restored after memset */ + valid_func = func_caps->common_cap.valid_functions; + txq_first_id = func_caps->common_cap.txq_first_id; + rxq_first_id = func_caps->common_cap.rxq_first_id; + msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; + max_mtu = func_caps->common_cap.max_mtu; + + /* unset func capabilities */ + memset(func_caps, 0, sizeof(*func_caps)); + + /* restore cached values */ + func_caps->common_cap.valid_functions = valid_func; + func_caps->common_cap.txq_first_id = txq_first_id; + func_caps->common_cap.rxq_first_id = rxq_first_id; + func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + func_caps->common_cap.max_mtu = max_mtu; + + /* one Tx and one Rx queue in safe mode */ + func_caps->common_cap.num_rxq = 1; + func_caps->common_cap.num_txq = 1; + + /* two MSIX vectors, one for traffic and one for misc causes */ + func_caps->common_cap.num_msix_vectors = 2; + func_caps->guar_num_vsi = 1; + + /* cache some dev_caps values that should be restored after memset */ + valid_func = dev_caps->common_cap.valid_functions; + txq_first_id = dev_caps->common_cap.txq_first_id; + rxq_first_id = dev_caps->common_cap.rxq_first_id; + msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; + max_mtu = dev_caps->common_cap.max_mtu; + num_funcs = dev_caps->num_funcs; + + /* unset dev capabilities */ + memset(dev_caps, 0, sizeof(*dev_caps)); + + /* restore cached values */ + dev_caps->common_cap.valid_functions = valid_func; + dev_caps->common_cap.txq_first_id = txq_first_id; + dev_caps->common_cap.rxq_first_id = rxq_first_id; + dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + dev_caps->common_cap.max_mtu = max_mtu; + dev_caps->num_funcs = num_funcs; + + /* one Tx and one Rx queue per function in safe mode */ + dev_caps->common_cap.num_rxq = num_funcs; + dev_caps->common_cap.num_txq = num_funcs; + + /* two MSIX vectors per function */ + dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; +} + +/** + * ice_get_caps - get info about the HW + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_caps(struct ice_hw *hw) +{ + enum ice_status status; + + status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); + if (!status) + status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); + + return status; +} + +/** + * ice_aq_manage_mac_write - manage MAC address write command + * @hw: pointer to the HW struct + * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address + * @flags: flags to control write behavior + * @cd: pointer to command details structure or NULL + * + * This function is used to write MAC address to the NVM (0x0108). + */ +enum ice_status +ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, + struct ice_sq_cd *cd) +{ + struct ice_aqc_manage_mac_write *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.mac_write; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); + + cmd->flags = flags; + ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_clear_pxe_mode + * @hw: pointer to the HW struct + * + * Tell the firmware that the driver is taking over from PXE (0x0110). + */ +static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); + desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the HW struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + */ +void ice_clear_pxe_mode(struct ice_hw *hw) +{ + if (ice_check_sq_alive(hw, &hw->adminq)) + ice_aq_clear_pxe_mode(hw); +} + +/** + * ice_get_link_speed_based_on_phy_type - returns link speed + * @phy_type_low: lower part of phy_type + * @phy_type_high: higher part of phy_type + * + * This helper function will convert an entry in PHY type structure + * [phy_type_low, phy_type_high] to its corresponding link speed. + * Note: In the structure of [phy_type_low, phy_type_high], there should + * be one bit set, as this function will convert one PHY type to its + * speed. + * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + */ +static u16 +ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) +{ + u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; + u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + + switch (phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_100M_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_1G_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; + break; + case ICE_PHY_TYPE_LOW_50GBASE_CR2: + case ICE_PHY_TYPE_LOW_50GBASE_SR2: + case ICE_PHY_TYPE_LOW_50GBASE_LR2: + case ICE_PHY_TYPE_LOW_50GBASE_KR2: + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50GBASE_CP: + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; + break; + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_CP2: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + case ICE_PHY_TYPE_LOW_100GBASE_DR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; + break; + default: + speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + switch (phy_type_high) { + case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_CAUI2: + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_AUI2: + speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; + break; + default: + speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) + return ICE_AQ_LINK_SPEED_UNKNOWN; + else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) + return ICE_AQ_LINK_SPEED_UNKNOWN; + else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) + return speed_phy_type_low; + else + return speed_phy_type_high; +} + +/** + * ice_update_phy_type + * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type + * @link_speeds_bitmap: targeted link speeds bitmap + * + * Note: For the link_speeds_bitmap structure, you can check it at + * [ice_aqc_get_link_status->link_speed]. Caller can pass in + * link_speeds_bitmap include multiple speeds. + * + * Each entry in this [phy_type_low, phy_type_high] structure will + * present a certain link speed. This helper function will turn on bits + * in [phy_type_low, phy_type_high] structure based on the value of + * link_speeds_bitmap input parameter. + */ +void +ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, + u16 link_speeds_bitmap) +{ + u64 pt_high; + u64 pt_low; + int index; + u16 speed; + + /* We first check with low part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { + pt_low = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); + + if (link_speeds_bitmap & speed) + *phy_type_low |= BIT_ULL(index); + } + + /* We then check with high part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { + pt_high = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(0, pt_high); + + if (link_speeds_bitmap & speed) + *phy_type_high |= BIT_ULL(index); + } +} + +/** + * ice_aq_set_phy_cfg + * @hw: pointer to the HW struct + * @pi: port info structure of the interested logical port + * @cfg: structure with PHY configuration data to be set + * @cd: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters supported on the Port. + * One or more of the Set PHY config parameters may be ignored in an MFP + * mode as the PF may not have the privilege to set some of the PHY Config + * parameters. This status will be indicated by the command response (0x0601). + */ +enum ice_status +ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + enum ice_status status; + + if (!cfg) + return ICE_ERR_PARAM; + + /* Ensure that only valid bits of cfg->caps can be turned on. */ + if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { + ice_debug(hw, ICE_DBG_PHY, + "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", + cfg->caps); + + cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); + desc.params.set_phy.lport_num = pi->lport; + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)LE64_TO_CPU(cfg->phy_type_low)); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); + ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); + ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n", + cfg->low_power_ctrl_an); + ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); + ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); + ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); + + status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); + + if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) + status = ICE_SUCCESS; + + if (!status) + pi->phy.curr_user_phy_cfg = *cfg; + + return status; +} + +/** + * ice_update_link_info - update status of the HW network link + * @pi: port info structure of the interested logical port + */ +enum ice_status ice_update_link_info(struct ice_port_info *pi) +{ + struct ice_link_status *li; + enum ice_status status; + + if (!pi) + return ICE_ERR_PARAM; + + li = &pi->phy.link_info; + + status = ice_aq_get_link_info(pi, true, NULL, NULL); + if (status) + return status; + + if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_hw *hw; + + hw = pi->hw; + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + pcaps, NULL); + if (status == ICE_SUCCESS) + ice_memcpy(li->module_type, &pcaps->module_type, + sizeof(li->module_type), + ICE_NONDMA_TO_NONDMA); + + ice_free(hw, pcaps); + } + + return status; +} + +/** + * ice_cache_phy_user_req + * @pi: port information structure + * @cache_data: PHY logging data + * @cache_mode: PHY logging mode + * + * Log the user request on (FC, FEC, SPEED) for later user. + */ +static void +ice_cache_phy_user_req(struct ice_port_info *pi, + struct ice_phy_cache_mode_data cache_data, + enum ice_phy_cache_mode cache_mode) +{ + if (!pi) + return; + + switch (cache_mode) { + case ICE_FC_MODE: + pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; + break; + case ICE_SPEED_MODE: + pi->phy.curr_user_speed_req = + cache_data.data.curr_user_speed_req; + break; + case ICE_FEC_MODE: + pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; + break; + default: + break; + } +} + +/** + * ice_caps_to_fc_mode + * @caps: PHY capabilities + * + * Convert PHY FC capabilities to ice FC mode + */ +enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) +{ + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && + caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_FULL; + + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) + return ICE_FC_TX_PAUSE; + + if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_RX_PAUSE; + + return ICE_FC_NONE; +} + +/** + * ice_caps_to_fec_mode + * @caps: PHY capabilities + * @fec_options: Link FEC options + * + * Convert PHY FEC capabilities to ice FEC mode + */ +enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) +{ + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) + return ICE_FEC_AUTO; + + if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | + ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | + ICE_AQC_PHY_FEC_25G_KR_REQ)) + return ICE_FEC_BASER; + + if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | + ICE_AQC_PHY_FEC_25G_RS_544_REQ | + ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) + return ICE_FEC_RS; + + return ICE_FEC_NONE; +} + +static enum ice_status +ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fc_mode req_mode) +{ + struct ice_aqc_get_phy_caps_data *pcaps = NULL; + struct ice_phy_cache_mode_data cache_data; + enum ice_status status = ICE_SUCCESS; + u8 pause_mask = 0x0; + + if (!pi || !cfg) + return ICE_ERR_BAD_PTR; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(pi->hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + /* Cache user FC request */ + cache_data.data.curr_user_fc_req = req_mode; + ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); + + switch (req_mode) { + case ICE_FC_AUTO: + /* Query the value of FC that both the NIC and attached media + * can do. + */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + pcaps, NULL); + if (status) + goto out; + + pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; + pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; + case ICE_FC_FULL: + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; + case ICE_FC_RX_PAUSE: + pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; + case ICE_FC_TX_PAUSE: + pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; + break; + default: + break; + } + + /* clear the old pause settings */ + cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | + ICE_AQC_PHY_EN_RX_LINK_PAUSE); + + /* set the new capabilities */ + cfg->caps |= pause_mask; + +out: + ice_free(pi->hw, pcaps); + return status; +} + +/** + * ice_set_fc + * @pi: port information structure + * @aq_failures: pointer to status code, specific to ice_set_fc routine + * @ena_auto_link_update: enable automatic link update + * + * Set the requested flow control mode. + */ +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) +{ + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + struct ice_hw *hw; + + if (!pi || !aq_failures) + return ICE_ERR_BAD_PTR; + + hw = pi->hw; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + /* Get the current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + *aq_failures = ICE_SET_FC_AQ_FAIL_GET; + goto out; + } + + ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); + + /* Configure the set phy data */ + status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); + if (status) { + if (status != ICE_ERR_BAD_PTR) + *aq_failures = ICE_SET_FC_AQ_FAIL_GET; + + goto out; + } + + /* If the capabilities have changed, then set the new config */ + if (cfg.caps != pcaps->caps) { + int retry_count, retry_max = 10; + + /* Auto restart link so settings take effect */ + if (ena_auto_link_update) + cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); + if (status) { + *aq_failures = ICE_SET_FC_AQ_FAIL_SET; + goto out; + } + + /* Update the link info + * It sometimes takes a really long time for link to + * come back from the atomic reset. Thus, we wait a + * little bit. + */ + for (retry_count = 0; retry_count < retry_max; retry_count++) { + status = ice_update_link_info(pi); + + if (status == ICE_SUCCESS) + break; + + ice_msec_delay(100, true); + } + + if (status) + *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; + } + +out: + ice_free(hw, pcaps); + return status; +} + +/** + * ice_phy_caps_equals_cfg + * @phy_caps: PHY capabilities + * @phy_cfg: PHY configuration + * + * Helper function to determine if PHY capabilities matches PHY + * configuration + */ +bool +ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + u8 caps_mask, cfg_mask; + + if (!phy_caps || !phy_cfg) + return false; + + /* These bits are not common between capabilities and configuration. + * Do not use them to determine equality. + */ + caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | + ICE_AQC_PHY_EN_MOD_QUAL); + cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + if (phy_caps->phy_type_low != phy_cfg->phy_type_low || + phy_caps->phy_type_high != phy_cfg->phy_type_high || + ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || + phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || + phy_caps->eee_cap != phy_cfg->eee_cap || + phy_caps->eeer_value != phy_cfg->eeer_value || + phy_caps->link_fec_options != phy_cfg->link_fec_opt) + return false; + + return true; +} + +/** + * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @pi: port information structure + * @caps: PHY ability structure to copy date from + * @cfg: PHY configuration structure to copy data to + * + * Helper function to copy AQC PHY get ability data to PHY set configuration + * data structure + */ +void +ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg) +{ + if (!pi || !caps || !cfg) + return; + + ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); + cfg->phy_type_low = caps->phy_type_low; + cfg->phy_type_high = caps->phy_type_high; + cfg->caps = caps->caps; + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; + cfg->eee_cap = caps->eee_cap; + cfg->eeer_value = caps->eeer_value; + cfg->link_fec_opt = caps->link_fec_options; + cfg->module_compliance_enforcement = + caps->module_compliance_enforcement; + + if (ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + return; + + if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) + cfg->module_compliance_enforcement |= + ICE_LINK_OVERRIDE_STRICT_MODE; + } +} + +/** + * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode + * @pi: port information structure + * @cfg: PHY configuration data to set FEC mode + * @fec: FEC mode to configure + */ +enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw; + + if (!pi || !cfg) + return ICE_ERR_BAD_PTR; + + hw = pi->hw; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) + goto out; + + switch (fec) { + case ICE_FEC_BASER: + /* Clear RS bits, and AND BASE-R ability + * bits and OR request bits. + */ + cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | + ICE_AQC_PHY_FEC_25G_KR_REQ; + break; + case ICE_FEC_RS: + /* Clear BASE-R bits, and AND RS ability + * bits and OR request bits. + */ + cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | + ICE_AQC_PHY_FEC_25G_RS_544_REQ; + break; + case ICE_FEC_NONE: + /* Clear all FEC option bits. */ + cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; + break; + case ICE_FEC_AUTO: + /* AND auto FEC bit, and all caps bits. */ + cfg->caps &= ICE_AQC_PHY_CAPS_MASK; + cfg->link_fec_opt |= pcaps->link_fec_options; + break; + default: + status = ICE_ERR_PARAM; + break; + } + + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + goto out; + + if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && + (tlv.options & ICE_LINK_OVERRIDE_EN)) + cfg->link_fec_opt = tlv.fec_options; + } + +out: + ice_free(hw, pcaps); + + return status; +} + +/** + * ice_get_link_status - get status of the HW network link + * @pi: port information structure + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up is true if link is up, false if link is down. + * The variable link_up is invalid if status is non zero. As a + * result of this call, link status reporting becomes enabled + */ +enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +{ + struct ice_phy_info *phy_info; + enum ice_status status = ICE_SUCCESS; + + if (!pi || !link_up) + return ICE_ERR_PARAM; + + phy_info = &pi->phy; + + if (phy_info->get_link_info) { + status = ice_update_link_info(pi); + + if (status) + ice_debug(pi->hw, ICE_DBG_LINK, + "get link status error, status = %d\n", + status); + } + + *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; + + return status; +} + +/** + * ice_aq_set_link_restart_an + * @pi: pointer to the port information structure + * @ena_link: if true: enable link, if false: disable link + * @cd: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + */ +enum ice_status +ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, + struct ice_sq_cd *cd) +{ + struct ice_aqc_restart_an *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.restart_an; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); + + cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; + cmd->lport_num = pi->lport; + if (ena_link) + cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; + else + cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; + + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_event_mask + * @hw: pointer to the HW struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * @cd: pointer to command details structure or NULL + * + * Set event mask (0x0613) + */ +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_event_mask *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_event_mask; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = CPU_TO_LE16(mask); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_mac_loopback + * @hw: pointer to the HW struct + * @ena_lpbk: Enable or Disable loopback + * @cd: pointer to command details structure or NULL + * + * Enable/disable loopback on a given port + */ +enum ice_status +ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_mac_lb *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_mac_lb; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); + if (ena_lpbk) + cmd->lb_mode = ICE_AQ_MAC_LB_EN; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_port_id_led + * @pi: pointer to the port information + * @is_orig_mode: is this LED set to original mode (by the net-list) + * @cd: pointer to command details structure or NULL + * + * Set LED value for the given port (0x06e9) + */ +enum ice_status +ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_port_id_led *cmd; + struct ice_hw *hw = pi->hw; + struct ice_aq_desc desc; + + cmd = &desc.params.set_port_id_led; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); + + if (is_orig_mode) + cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; + else + cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_sff_eeprom + * @hw: pointer to the HW struct + * @lport: bits [7:0] = logical port, bit [8] = logical port valid + * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) + * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. + * @page: QSFP page + * @set_page: set or ignore the page + * @data: pointer to data buffer to be read/written to the I2C device. + * @length: 1-16 for read, 1 for write. + * @write: 0 read, 1 for write. + * @cd: pointer to command details structure or NULL + * + * Read/Write SFF EEPROM (0x06EE) + */ +enum ice_status +ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, + u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, + bool write, struct ice_sq_cd *cd) +{ + struct ice_aqc_sff_eeprom *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!data || (mem_addr & 0xff00)) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); + cmd = &desc.params.read_write_sff_param; + desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); + cmd->lport_num = (u8)(lport & 0xff); + cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); + cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & + ICE_AQC_SFF_I2CBUS_7BIT_M) | + ((set_page << + ICE_AQC_SFF_SET_EEPROM_PAGE_S) & + ICE_AQC_SFF_SET_EEPROM_PAGE_M)); + cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); + cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); + if (write) + cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); + + status = ice_aq_send_cmd(hw, &desc, data, length, cd); + return status; +} + +/** + * __ice_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: VSI FW index + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * @glob_lut_idx: global LUT index + * @set: set true to set the table, false to get the table + * + * Internal function to get (0x0B05) or set (0x0B03) RSS look up table + */ +static enum ice_status +__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, + u16 lut_size, u8 glob_lut_idx, bool set) +{ + struct ice_aqc_get_set_rss_lut *cmd_resp; + struct ice_aq_desc desc; + enum ice_status status; + u16 flags = 0; + + cmd_resp = &desc.params.get_set_rss_lut; + + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); + } + + cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << + ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & + ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | + ICE_AQC_GSET_RSS_LUT_VSI_VALID); + + switch (lut_type) { + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: + case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: + flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); + break; + default: + status = ICE_ERR_PARAM; + goto ice_aq_get_set_rss_lut_exit; + } + + if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { + flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & + ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); + + if (!set) + goto ice_aq_get_set_rss_lut_send; + } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { + if (!set) + goto ice_aq_get_set_rss_lut_send; + } else { + goto ice_aq_get_set_rss_lut_send; + } + + /* LUT size is only valid for Global and PF table types */ + switch (lut_size) { + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + break; + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + break; + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: + if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + break; + } + /* fall-through */ + default: + status = ICE_ERR_PARAM; + goto ice_aq_get_set_rss_lut_exit; + } + +ice_aq_get_set_rss_lut_send: + cmd_resp->flags = CPU_TO_LE16(flags); + status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); + +ice_aq_get_set_rss_lut_exit: + return status; +} + +/** + * ice_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * + * get the RSS lookup table, PF or VSI type + */ +enum ice_status +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) +{ + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, false); +} + +/** + * ice_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @lut_type: LUT table type + * @lut: pointer to the LUT buffer provided by the caller + * @lut_size: size of the LUT buffer + * + * set the RSS lookup table, PF or VSI type + */ +enum ice_status +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, + u8 *lut, u16 lut_size) +{ + if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle), + lut_type, lut, lut_size, 0, true); +} + +/** + * __ice_aq_get_set_rss_key + * @hw: pointer to the HW struct + * @vsi_id: VSI FW index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get (0x0B04) or set (0x0B02) the RSS key per VSI + */ +static enum +ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key, + bool set) +{ + struct ice_aqc_get_set_rss_key *cmd_resp; + u16 key_size = sizeof(*key); + struct ice_aq_desc desc; + + cmd_resp = &desc.params.get_set_rss_key; + + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); + } + + cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << + ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & + ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | + ICE_AQC_GSET_RSS_KEY_VSI_VALID); + + return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); +} + +/** + * ice_aq_get_rss_key + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * @key: pointer to key info struct + * + * get the RSS key per VSI + */ +enum ice_status +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, + struct ice_aqc_get_set_rss_keys *key) +{ + if (!ice_is_vsi_valid(hw, vsi_handle) || !key) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + key, false); +} + +/** + * ice_aq_set_rss_key + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * @keys: pointer to key info struct + * + * set the RSS key per VSI + */ +enum ice_status +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, + struct ice_aqc_get_set_rss_keys *keys) +{ + if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) + return ICE_ERR_PARAM; + + return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), + keys, true); +} + +/** + * ice_aq_add_lan_txq + * @hw: pointer to the hardware structure + * @num_qgrps: Number of added queue groups + * @qg_list: list of queue groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * Add Tx LAN queue (0x0C30) + * + * NOTE: + * Prior to calling add Tx LAN queue: + * Initialize the following as part of the Tx queue context: + * Completion queue ID if the queue uses Completion queue, Quanta profile, + * Cache profile and Packet shaper profile. + * + * After add Tx LAN queue AQ command is completed: + * Interrupts should be associated with specific queues, + * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue + * flow. + */ +enum ice_status +ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, + struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, + struct ice_sq_cd *cd) +{ + u16 i, sum_header_size, sum_q_size = 0; + struct ice_aqc_add_tx_qgrp *list; + struct ice_aqc_add_txqs *cmd; + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd = &desc.params.add_txqs; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); + + if (!qg_list) + return ICE_ERR_PARAM; + + if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) + return ICE_ERR_PARAM; + + sum_header_size = num_qgrps * + (sizeof(*qg_list) - sizeof(*qg_list->txqs)); + + list = qg_list; + for (i = 0; i < num_qgrps; i++) { + struct ice_aqc_add_txqs_perq *q = list->txqs; + + sum_q_size += list->num_txqs * sizeof(*q); + list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); + } + + if (buf_size != (sum_header_size + sum_q_size)) + return ICE_ERR_PARAM; + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd->num_qgrps = num_qgrps; + + return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); +} + +/** + * ice_aq_dis_lan_txq + * @hw: pointer to the hardware structure + * @num_qgrps: number of groups in the list + * @qg_list: the list of groups to disable + * @buf_size: the total size of the qg_list buffer in bytes + * @rst_src: if called due to reset, specifies the reset source + * @vmvf_num: the relative VM or VF number that is undergoing the reset + * @cd: pointer to command details structure or NULL + * + * Disable LAN Tx queue (0x0C31) + */ +static enum ice_status +ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, + struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, + enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd) +{ + struct ice_aqc_dis_txqs *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 i, sz = 0; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + cmd = &desc.params.dis_txqs; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); + + /* qg_list can be NULL only in VM/VF reset flow */ + if (!qg_list && !rst_src) + return ICE_ERR_PARAM; + + if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) + return ICE_ERR_PARAM; + + cmd->num_entries = num_qgrps; + + cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & + ICE_AQC_Q_DIS_TIMEOUT_M); + + switch (rst_src) { + case ICE_VM_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; + cmd->vmvf_and_timeout |= + CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_NO_RESET: + default: + break; + } + + /* flush pipe on time out */ + cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; + /* If no queue group info, we are in a reset flow. Issue the AQ */ + if (!qg_list) + goto do_aq; + + /* set RD bit to indicate that command buffer is provided by the driver + * and it needs to be read by the firmware + */ + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + for (i = 0; i < num_qgrps; ++i) { + /* Calculate the size taken up by the queue IDs in this group */ + sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); + + /* Add the size of the group header */ + sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); + + /* If the num of queues is even, add 2 bytes of padding */ + if ((qg_list[i].num_qs % 2) == 0) + sz += 2; + } + + if (buf_size != sz) + return ICE_ERR_PARAM; + +do_aq: + status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); + if (status) { + if (!qg_list) + ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", + vmvf_num, hw->adminq.sq_last_status); + else + ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", + LE16_TO_CPU(qg_list[0].q_id[0]), + hw->adminq.sq_last_status); + } + return status; +} + +/** + * ice_aq_move_recfg_lan_txq + * @hw: pointer to the hardware structure + * @num_qs: number of queues to move/reconfigure + * @is_move: true if this operation involves node movement + * @is_tc_change: true if this operation involves a TC change + * @subseq_call: true if this operation is a subsequent call + * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN + * @timeout: timeout in units of 100 usec (valid values 0-50) + * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN + * @buf: struct containing src/dest TEID and per-queue info + * @buf_size: size of buffer for indirect command + * @txqs_moved: out param, number of queues successfully moved + * @cd: pointer to command details structure or NULL + * + * Move / Reconfigure Tx LAN queues (0x0C32) + */ +enum ice_status +ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, + bool is_tc_change, bool subseq_call, bool flush_pipe, + u8 timeout, u32 *blocked_cgds, + struct ice_aqc_move_txqs_data *buf, u16 buf_size, + u8 *txqs_moved, struct ice_sq_cd *cd) +{ + struct ice_aqc_move_txqs *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.move_txqs; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); + +#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 + if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) + return ICE_ERR_PARAM; + + if (is_tc_change && !flush_pipe && !blocked_cgds) + return ICE_ERR_PARAM; + + if (!is_move && !is_tc_change) + return ICE_ERR_PARAM; + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (is_move) + cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; + + if (is_tc_change) + cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; + + if (subseq_call) + cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; + + if (flush_pipe) + cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; + + cmd->num_qs = num_qs; + cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & + ICE_AQC_Q_CMD_TIMEOUT_M); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + + if (!status && txqs_moved) + *txqs_moved = cmd->num_qs; + + if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && + is_tc_change && !flush_pipe) + *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); + + return status; +} + +/* End of FW Admin Queue command wrappers */ + +/** + * ice_write_byte - write a byte to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); +} + +/** + * ice_write_word - write a word to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + u16 src_word, mask; + __le16 dest_word; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA); + + dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ + dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ + + /* put it all back */ + ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); +} + +/** + * ice_write_dword - write a dword to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + u32 src_dword, mask; + __le32 dest_dword; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = (u32)~0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA); + + dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ + dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ + + /* put it all back */ + ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); +} + +/** + * ice_write_qword - write a qword to a packed context structure + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + u64 src_qword, mask; + __le64 dest_qword; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src_ctx + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = (u64)~0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = dest_ctx + (ce_info->lsb / 8); + + ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA); + + dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ + dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ + + /* put it all back */ + ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); +} + +/** + * ice_set_ctx - set context bits in packed structure + * @src_ctx: pointer to a generic non-packed context structure + * @dest_ctx: pointer to memory for the packed structure + * @ce_info: a description of the structure to be transformed + */ +enum ice_status +ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +{ + int f; + + for (f = 0; ce_info[f].width; f++) { + /* We have to deal with each element of the FW response + * using the correct size so that we are correct regardless + * of the endianness of the machine. + */ + switch (ce_info[f].size_of) { + case sizeof(u8): + ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u16): + ice_write_word(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u32): + ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); + break; + case sizeof(u64): + ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); + break; + default: + return ICE_ERR_INVAL_SIZE; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_read_byte - read context byte into struct + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) +{ + u8 dest_byte, mask; + u8 *src, *target; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = src_ctx + (ce_info->lsb / 8); + + ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); + + dest_byte &= ~(mask); + + dest_byte >>= shift_width; + + /* get the address from the struct field */ + target = dest_ctx + ce_info->offset; + + /* put it back in the struct */ + ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); +} + +/** + * ice_read_word - read context word into struct + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) +{ + u16 dest_word, mask; + u8 *src, *target; + __le16 src_word; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = src_ctx + (ce_info->lsb / 8); + + ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_word &= ~(CPU_TO_LE16(mask)); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); + + dest_word >>= shift_width; + + /* get the address from the struct field */ + target = dest_ctx + ce_info->offset; + + /* put it back in the struct */ + ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); +} + +/** + * ice_read_dword - read context dword into struct + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) +{ + u32 dest_dword, mask; + __le32 src_dword; + u8 *src, *target; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = (u32)~0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = src_ctx + (ce_info->lsb / 8); + + ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_dword &= ~(CPU_TO_LE32(mask)); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); + + dest_dword >>= shift_width; + + /* get the address from the struct field */ + target = dest_ctx + ce_info->offset; + + /* put it back in the struct */ + ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); +} + +/** + * ice_read_qword - read context qword into struct + * @src_ctx: the context structure to read from + * @dest_ctx: the context to be written to + * @ce_info: a description of the struct to be filled + */ +static void +ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) +{ + u64 dest_qword, mask; + __le64 src_qword; + u8 *src, *target; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = (u64)~0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = src_ctx + (ce_info->lsb / 8); + + ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_qword &= ~(CPU_TO_LE64(mask)); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); + + dest_qword >>= shift_width; + + /* get the address from the struct field */ + target = dest_ctx + ce_info->offset; + + /* put it back in the struct */ + ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); +} + +/** + * ice_get_ctx - extract context bits from a packed structure + * @src_ctx: pointer to a generic packed context structure + * @dest_ctx: pointer to a generic non-packed context structure + * @ce_info: a description of the structure to be read from + */ +enum ice_status +ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) +{ + int f; + + for (f = 0; ce_info[f].width; f++) { + switch (ce_info[f].size_of) { + case 1: + ice_read_byte(src_ctx, dest_ctx, &ce_info[f]); + break; + case 2: + ice_read_word(src_ctx, dest_ctx, &ce_info[f]); + break; + case 4: + ice_read_dword(src_ctx, dest_ctx, &ce_info[f]); + break; + case 8: + ice_read_qword(src_ctx, dest_ctx, &ce_info[f]); + break; + default: + /* nothing to do, just keep going */ + break; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * @tc: TC number + * @q_handle: software queue handle + */ +struct ice_q_ctx * +ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) +{ + struct ice_vsi_ctx *vsi; + struct ice_q_ctx *q_ctx; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi) + return NULL; + if (q_handle >= vsi->num_lan_q_entries[tc]) + return NULL; + if (!vsi->lan_q_ctx[tc]) + return NULL; + q_ctx = vsi->lan_q_ctx[tc]; + return &q_ctx[q_handle]; +} + +/** + * ice_ena_vsi_txq + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @q_handle: software queue handle + * @num_qgrps: Number of added queue groups + * @buf: list of queue groups to be added + * @buf_size: size of buffer for indirect command + * @cd: pointer to command details structure or NULL + * + * This function adds one LAN queue + */ +enum ice_status +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_txsched_elem_data node = { 0 }; + struct ice_sched_node *parent; + struct ice_q_ctx *q_ctx; + enum ice_status status; + struct ice_hw *hw; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + if (num_qgrps > 1 || buf->num_txqs > 1) + return ICE_ERR_MAX_LIMIT; + + hw = pi->hw; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + + q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); + if (!q_ctx) { + ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", + q_handle); + status = ICE_ERR_PARAM; + goto ena_txq_exit; + } + + /* find a parent node */ + parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, + ICE_SCHED_NODE_OWNER_LAN); + if (!parent) { + status = ICE_ERR_PARAM; + goto ena_txq_exit; + } + + buf->parent_teid = parent->info.node_teid; + node.parent_teid = parent->info.node_teid; + /* Mark that the values in the "generic" section as valid. The default + * value in the "generic" section is zero. This means that : + * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. + * - 0 priority among siblings, indicated by Bit 1-3. + * - WFQ, indicated by Bit 4. + * - 0 Adjustment value is used in PSM credit update flow, indicated by + * Bit 5-6. + * - Bit 7 is reserved. + * Without setting the generic section as valid in valid_sections, the + * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. + */ + buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + + /* add the LAN queue */ + status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", + LE16_TO_CPU(buf->txqs[0].txq_id), + hw->adminq.sq_last_status); + goto ena_txq_exit; + } + + node.node_teid = buf->txqs[0].q_teid; + node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; + q_ctx->q_handle = q_handle; + q_ctx->q_teid = LE32_TO_CPU(node.node_teid); + + /* add a leaf node into scheduler tree queue layer */ + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); + if (!status) + status = ice_sched_replay_q_bw(pi, q_ctx); + +ena_txq_exit: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_dis_vsi_txq + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @num_queues: number of queues + * @q_handles: pointer to software queue handle array + * @q_ids: pointer to the q_id array + * @q_teids: pointer to queue node teids + * @rst_src: if called due to reset, specifies the reset source + * @vmvf_num: the relative VM or VF number that is undergoing the reset + * @cd: pointer to command details structure or NULL + * + * This function removes queues and their corresponding nodes in SW DB + */ +enum ice_status +ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, + u16 *q_handles, u16 *q_ids, u32 *q_teids, + enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd) +{ + enum ice_status status = ICE_ERR_DOES_NOT_EXIST; + struct ice_aqc_dis_txq_item qg_list; + struct ice_q_ctx *q_ctx; + u16 i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + if (!num_queues) { + /* if queue is disabled already yet the disable queue command + * has to be sent to complete the VF reset, then call + * ice_aq_dis_lan_txq without any queue information + */ + if (rst_src) + return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, + vmvf_num, NULL); + return ICE_ERR_CFG; + } + + ice_acquire_lock(&pi->sched_lock); + + for (i = 0; i < num_queues; i++) { + struct ice_sched_node *node; + + node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); + if (!node) + continue; + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); + if (!q_ctx) { + ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n", + q_handles[i]); + continue; + } + if (q_ctx->q_handle != q_handles[i]) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n", + q_ctx->q_handle, q_handles[i]); + continue; + } + qg_list.parent_teid = node->info.parent_teid; + qg_list.num_qs = 1; + qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]); + status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, + sizeof(qg_list), rst_src, vmvf_num, + cd); + + if (status != ICE_SUCCESS) + break; + ice_free_sched_node(pi, node); + q_ctx->q_handle = ICE_INVAL_Q_HANDLE; + } + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_qs - configure the new/existing VSI queues + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap + * @maxqs: max queues array per TC + * @owner: LAN or RDMA + * + * This function adds/updates the VSI queues per TC. + */ +static enum ice_status +ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *maxqs, u8 owner) +{ + enum ice_status status = ICE_SUCCESS; + u8 i; + + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return ICE_ERR_CFG; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + + ice_for_each_traffic_class(i) { + /* configuration is possible only if TC node is present */ + if (!ice_sched_get_tc_node(pi, i)) + continue; + + status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, + ice_is_tc_ena(tc_bitmap, i)); + if (status) + break; + } + + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_lan - configure VSI LAN queues + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap + * @max_lanqs: max LAN queues array per TC + * + * This function adds/updates the VSI LAN queues per TC. + */ +enum ice_status +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_lanqs) +{ + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, + ICE_SCHED_NODE_OWNER_LAN); +} + +/** + * ice_replay_pre_init - replay pre initialization + * @hw: pointer to the HW struct + * + * Initializes required config data for VSI, FD, ACL, and RSS before replay. + */ +static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + /* Delete old entries from replay filter list head if there is any */ + ice_rm_all_sw_replay_rule_info(hw); + /* In start of replay, move entries into replay_rules list, it + * will allow adding rules entries back to filt_rules list, + * which is operational list. + */ + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) + LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules, + &sw->recp_list[i].filt_replay_rules); + ice_sched_replay_agg_vsi_preinit(hw); + + return ice_sched_replay_tc_node_bw(hw->port_info); +} + +/** + * ice_replay_vsi - replay VSI configuration + * @hw: pointer to the HW struct + * @vsi_handle: driver VSI handle + * + * Restore all VSI configuration after reset. It is required to call this + * function with main VSI first. + */ +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + enum ice_status status; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + /* Replay pre-initialization if there is any */ + if (vsi_handle == ICE_MAIN_VSI_HANDLE) { + status = ice_replay_pre_init(hw); + if (status) + return status; + } + /* Replay per VSI all RSS configurations */ + status = ice_replay_rss_cfg(hw, vsi_handle); + if (status) + return status; + /* Replay per VSI all filters */ + status = ice_replay_vsi_all_fltr(hw, vsi_handle); + if (!status) + status = ice_replay_vsi_agg(hw, vsi_handle); + return status; +} + +/** + * ice_replay_post - post replay configuration cleanup + * @hw: pointer to the HW struct + * + * Post replay cleanup. + */ +void ice_replay_post(struct ice_hw *hw) +{ + /* Delete old entries from replay filter list head */ + ice_rm_all_sw_replay_rule_info(hw); + ice_sched_replay_agg(hw); +} + +/** + * ice_stat_update40 - read 40 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @reg: offset of 64 bit HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) +{ + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. + */ + if (!prev_stat_loaded) { + *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ + if (new_data >= *prev_stat) + *cur_stat += new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; +} + +/** + * ice_stat_update32 - read 32 bit stat from the chip and update stat values + * @hw: ptr to the hardware info + * @reg: offset of HW register to read from + * @prev_stat_loaded: bool to specify if previous stats are loaded + * @prev_stat: ptr to previous loaded stat value + * @cur_stat: ptr to current stat value + */ +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + + /* device stats are not reset at PFR, they likely will not be zeroed + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. + */ + if (!prev_stat_loaded) { + *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ + if (new_data >= *prev_stat) + *cur_stat += new_data - *prev_stat; + else + /* to manage the potential roll-over */ + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; +} + +/** + * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values + * @hw: ptr to the hardware info + * @vsi_handle: VSI handle + * @prev_stat_loaded: bool to specify if the previous stat values are loaded + * @cur_stats: ptr to current stats structure + * + * The GLV_REPC statistic register actually tracks two 16bit statistics, and + * thus cannot be read using the normal ice_stat_update32 function. + * + * Read the GLV_REPC register associated with the given VSI, and update the + * rx_no_desc and rx_error values in the ice_eth_stats structure. + * + * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be + * cleared each time it's read. + * + * Note that the GLV_RDPC register also counts the causes that would trigger + * GLV_REPC. However, it does not give the finer grained detail about why the + * packets are being dropped. The GLV_REPC values can be used to distinguish + * whether Rx packets are dropped due to errors or due to no available + * descriptors. + */ +void +ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, + struct ice_eth_stats *cur_stats) +{ + u16 vsi_num, no_desc, error_cnt; + u32 repc; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return; + + vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + + /* If we haven't loaded stats yet, just clear the current value */ + if (!prev_stat_loaded) { + wr32(hw, GLV_REPC(vsi_num), 0); + return; + } + + repc = rd32(hw, GLV_REPC(vsi_num)); + no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; + error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; + + /* Clear the count by writing to the stats register */ + wr32(hw, GLV_REPC(vsi_num), 0); + + cur_stats->rx_no_desc += no_desc; + cur_stats->rx_errors += error_cnt; +} + +/** + * ice_sched_query_elem - query element information from HW + * @hw: pointer to the HW struct + * @node_teid: node TEID to be queried + * @buf: buffer to element information + * + * This function queries HW element information + */ +enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf) +{ + u16 buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf); + ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM); + buf->generic[0].node_teid = CPU_TO_LE32(node_teid); + status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, + NULL); + if (status != ICE_SUCCESS || num_elem_ret != 1) + ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); + return status; +} + +/** + * ice_get_fw_mode - returns FW mode + * @hw: pointer to the HW struct + */ +enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) +{ +#define ICE_FW_MODE_DBG_M BIT(0) +#define ICE_FW_MODE_REC_M BIT(1) +#define ICE_FW_MODE_ROLLBACK_M BIT(2) + u32 fw_mode; + + /* check the current FW mode */ + fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; + + if (fw_mode & ICE_FW_MODE_DBG_M) + return ICE_FW_MODE_DBG; + else if (fw_mode & ICE_FW_MODE_REC_M) + return ICE_FW_MODE_REC; + else if (fw_mode & ICE_FW_MODE_ROLLBACK_M) + return ICE_FW_MODE_ROLLBACK; + else + return ICE_FW_MODE_NORMAL; +} + +/** + * ice_fw_supports_link_override + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports link override + */ +bool ice_fw_supports_link_override(struct ice_hw *hw) +{ + if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { + if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && + hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { + return true; + } + + return false; +} + +/** + * ice_get_link_default_override + * @ldo: pointer to the link default override struct + * @pi: pointer to the port info struct + * + * Gets the link default override for a port + */ +enum ice_status +ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, + struct ice_port_info *pi) +{ + u16 i, tlv, tlv_len, tlv_start, buf, offset; + struct ice_hw *hw = pi->hw; + enum ice_status status; + + status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, + ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read link override TLV.\n"); + return status; + } + + /* Each port has its own config; calculate for our port */ + tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; + + /* link options first */ + status = ice_read_sr_word(hw, tlv_start, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; + ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> + ICE_LINK_OVERRIDE_PHY_CFG_S; + + /* link PHY config */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; + status = ice_read_sr_word(hw, offset, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override phy config.\n"); + return status; + } + ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; + + /* PHY types low */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_low |= ((u64)buf << (i * 16)); + } + + /* PHY types high */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_high |= ((u64)buf << (i * 16)); + } + + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_common.h b/src/spdk/dpdk/drivers/net/ice/base/ice_common.h new file mode 100644 index 000000000..2a1077b90 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_common.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_COMMON_H_ +#define _ICE_COMMON_H_ + +#include "ice_type.h" +#include "ice_nvm.h" +#include "ice_flex_pipe.h" +#include "ice_switch.h" +#include "ice_fdir.h" + +enum ice_fw_modes { + ICE_FW_MODE_NORMAL, + ICE_FW_MODE_DBG, + ICE_FW_MODE_REC, + ICE_FW_MODE_ROLLBACK +}; + +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); +enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw); +void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw); +enum ice_status ice_init_hw(struct ice_hw *hw); +void ice_deinit_hw(struct ice_hw *hw); +enum ice_status ice_check_reset(struct ice_hw *hw); +enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); + +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); +enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_destroy_all_ctrlq(struct ice_hw *hw); +enum ice_status +ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_rq_event_info *e, u16 *pending); +enum ice_status +ice_get_link_status(struct ice_port_info *pi, bool *link_up); +enum ice_status ice_update_link_info(struct ice_port_info *pi); +enum ice_status +ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, + enum ice_aq_res_access_type access, u32 timeout); +void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); +enum ice_status +ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); +enum ice_status +ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); +enum ice_status +ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, + enum ice_adminq_opc opc, struct ice_sq_cd *cd); +enum ice_status +ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd); +void ice_clear_pxe_mode(struct ice_hw *hw); + +enum ice_status ice_get_caps(struct ice_hw *hw); + +void ice_set_safe_mode_caps(struct ice_hw *hw); + +/* Define a macro that will align a pointer to point to the next memory address + * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For + * example, given the variable pointer = 0x1006, then after the following call: + * + * pointer = ICE_ALIGN(pointer, 4) + * + * ... the value of pointer would equal 0x1008, since 0x1008 is the next + * address after 0x1006 which is divisible by 4. + */ +#define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1)) + +enum ice_status +ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index); +enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); +enum ice_status +ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index); +enum ice_status +ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, + struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, + u32 tx_cmpltnq_index); +enum ice_status +ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index); +enum ice_status +ice_write_tx_drbell_q_ctx(struct ice_hw *hw, + struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, + u32 tx_drbell_q_index); + +enum ice_status +ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, + u16 lut_size); +enum ice_status +ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut, + u16 lut_size); +enum ice_status +ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, + struct ice_aqc_get_set_rss_keys *keys); +enum ice_status +ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, + struct ice_aqc_get_set_rss_keys *keys); +enum ice_status +ice_aq_add_lan_txq(struct ice_hw *hw, u8 count, + struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, + bool is_tc_change, bool subseq_call, bool flush_pipe, + u8 timeout, u32 *blocked_cgds, + struct ice_aqc_move_txqs_data *buf, u16 buf_size, + u8 *txqs_moved, struct ice_sq_cd *cd); + +bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); +enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); +extern const struct ice_ctx_ele ice_tlan_ctx_info[]; +enum ice_status +ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); +enum ice_status +ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, + void *buf, u16 buf_size, struct ice_sq_cd *cd); +enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_sq_cd *cd); +void +ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, + u16 link_speeds_bitmap); +enum ice_status +ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, + struct ice_sq_cd *cd); + +enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); +enum ice_status +ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +bool ice_fw_supports_link_override(struct ice_hw *hw); +enum ice_status +ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, + struct ice_port_info *pi); + +enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); +enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, + bool ena_auto_link_update); +bool +ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg); +void +ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg); +enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec); +enum ice_status +ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, + u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, + bool write, struct ice_sq_cd *cd); + +enum ice_status +ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info); +enum ice_status +ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, + u16 *q_handle, u16 *q_ids, u32 *q_teids, + enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd); +enum ice_status +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, + u16 *max_lanqs); +enum ice_status +ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +void ice_replay_post(struct ice_hw *hw); +void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); +void ice_sched_replay_agg(struct ice_hw *hw); +enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi); +enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +struct ice_q_ctx * +ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); +void +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, + struct ice_eth_stats *cur_stats); +enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw); +void ice_print_rollback_msg(struct ice_hw *hw); +enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf); +#endif /* _ICE_COMMON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c b/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c new file mode 100644 index 000000000..47dde2f7c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c @@ -0,0 +1,1206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" + +#define ICE_CQ_INIT_REGS(qinfo, prefix) \ +do { \ + (qinfo)->sq.head = prefix##_ATQH; \ + (qinfo)->sq.tail = prefix##_ATQT; \ + (qinfo)->sq.len = prefix##_ATQLEN; \ + (qinfo)->sq.bah = prefix##_ATQBAH; \ + (qinfo)->sq.bal = prefix##_ATQBAL; \ + (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ + (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ + (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ + (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ + (qinfo)->rq.head = prefix##_ARQH; \ + (qinfo)->rq.tail = prefix##_ARQT; \ + (qinfo)->rq.len = prefix##_ARQLEN; \ + (qinfo)->rq.bah = prefix##_ARQBAH; \ + (qinfo)->rq.bal = prefix##_ARQBAL; \ + (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ + (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ + (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ + (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ +} while (0) + +/** + * ice_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_adminq_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + ICE_CQ_INIT_REGS(cq, PF_FW); +} + +/** + * ice_mailbox_init_regs - Initialize Mailbox registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_mailbox_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->mailboxq; + + ICE_CQ_INIT_REGS(cq, PF_MBX); +} + +/** + * ice_check_sq_alive + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * + * Returns true if Queue is enabled else false. + */ +bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* check both queue-length and queue-enable fields */ + if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) + return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | + cq->sq.len_ena_mask)) == + (cq->num_sq_entries | cq->sq.len_ena_mask); + + return false; +} + +/** + * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); + + cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size); + if (!cq->sq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + + cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, + sizeof(struct ice_sq_cd)); + if (!cq->sq.cmd_buf) { + ice_free_dma_mem(hw, &cq->sq.desc_buf); + return ICE_ERR_NO_MEMORY; + } + + return ICE_SUCCESS; +} + +/** + * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); + + cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size); + if (!cq->rq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + return ICE_SUCCESS; +} + +/** + * ice_free_cq_ring - Free control queue ring + * @hw: pointer to the hardware structure + * @ring: pointer to the specific control queue ring + * + * This assumes the posted buffers have already been cleaned + * and de-allocated + */ +static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) +{ + ice_free_dma_mem(hw, &ring->desc_buf); +} + +/** + * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries, + sizeof(cq->rq.desc_buf)); + if (!cq->rq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_rq_entries; i++) { + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + + bi = &cq->rq.r.rq_bi[i]; + bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size); + if (!bi->va) + goto unwind_alloc_rq_bufs; + + /* now configure the descriptors for use */ + desc = ICE_CTL_Q_DESC(cq->rq, i); + + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = CPU_TO_LE16(bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.generic.addr_high = + CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); + desc->params.generic.addr_low = + CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); + desc->params.generic.param0 = 0; + desc->params.generic.param1 = 0; + } + return ICE_SUCCESS; + +unwind_alloc_rq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); + ice_free(hw, cq->rq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries, + sizeof(cq->sq.desc_buf)); + if (!cq->sq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_sq_entries; i++) { + struct ice_dma_mem *bi; + + bi = &cq->sq.r.sq_bi[i]; + bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size); + if (!bi->va) + goto unwind_alloc_sq_bufs; + } + return ICE_SUCCESS; + +unwind_alloc_sq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); + ice_free(hw, cq->sq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +static enum ice_status +ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) +{ + /* Clear Head and Tail */ + wr32(hw, ring->head, 0); + wr32(hw, ring->tail, 0); + + /* set starting point */ + wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); + wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa)); + wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa)); + + /* Check one register to verify that config was applied */ + if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa)) + return ICE_ERR_AQ_ERROR; + + return ICE_SUCCESS; +} + +/** + * ice_cfg_sq_regs - configure Control ATQ registers + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the transmit queue + */ +static enum ice_status +ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); +} + +/** + * ice_cfg_rq_regs - configure Control ARQ register + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the receive (event queue) + */ +static enum ice_status +ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status status; + + status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); + if (status) + return status; + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); + + return ICE_SUCCESS; +} + +/** + * ice_init_sq - main initialization routine for Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * This is the main initialization routine for the Control Send Queue + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->sq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (cq->sq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_sq_entries || !cq->sq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->sq.next_to_use = 0; + cq->sq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_sq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_sq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->sq.count = cq->num_sq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_cq_ring(hw, &cq->sq); + +init_ctrlq_exit: + return ret_code; +} + +/** + * ice_init_rq - initialize ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure: + * - cq->num_rq_entries + * - cq->rq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (cq->rq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->rq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->rq.next_to_use = 0; + cq->rq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_rq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_rq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->rq.count = cq->num_rq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_cq_ring(hw, &cq->rq); + +init_ctrlq_exit: + return ret_code; +} + +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) \ + ice_free_dma_mem((hw), \ + &(qi)->ring.r.ring##_bi[i]); \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + ice_free(hw, (qi)->ring.cmd_buf); \ + /* free DMA head */ \ + ice_free(hw, (qi)->ring.dma_head); \ +} while (0) + +/** + * ice_shutdown_sq - shutdown the Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Transmit Queue + */ +static enum ice_status +ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = ICE_SUCCESS; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + ice_acquire_lock(&cq->sq_lock); + + if (!cq->sq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_sq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, cq->sq.head, 0); + wr32(hw, cq->sq.tail, 0); + wr32(hw, cq->sq.len, 0); + wr32(hw, cq->sq.bal, 0); + wr32(hw, cq->sq.bah, 0); + + cq->sq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers and the ring itself */ + ICE_FREE_CQ_BUFS(hw, cq, sq); + ice_free_cq_ring(hw, &cq->sq); + +shutdown_sq_out: + ice_release_lock(&cq->sq_lock); + return ret_code; +} + +/** + * ice_aq_ver_check - Check the reported AQ API version. + * @hw: pointer to the hardware structure + * + * Checks if the driver should load on a given AQ API version. + * + * Return: 'true' iff the driver should attempt to load. 'false' otherwise. + */ +static bool ice_aq_ver_check(struct ice_hw *hw) +{ + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } + return true; +} + +/** + * ice_shutdown_rq - shutdown Control ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Receive Queue + */ +static enum ice_status +ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = ICE_SUCCESS; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + ice_acquire_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_rq_out; + } + + /* Stop Control Queue processing */ + wr32(hw, cq->rq.head, 0); + wr32(hw, cq->rq.tail, 0); + wr32(hw, cq->rq.len, 0); + wr32(hw, cq->rq.bal, 0); + wr32(hw, cq->rq.bah, 0); + + /* set rq.count to 0 to indicate uninitialized queue */ + cq->rq.count = 0; + + /* free ring buffers and the ring itself */ + ICE_FREE_CQ_BUFS(hw, cq, rq); + ice_free_cq_ring(hw, &cq->rq); + +shutdown_rq_out: + ice_release_lock(&cq->rq_lock); + return ret_code; +} + +/** + * ice_init_check_adminq - Check version for Admin Queue to know if its alive + * @hw: pointer to the hardware structure + */ +static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + status = ice_aq_get_fw_ver(hw, NULL); + if (status) + goto init_ctrlq_free_rq; + + if (!ice_aq_ver_check(hw)) { + status = ICE_ERR_FW_API_VER; + goto init_ctrlq_free_rq; + } + + return ICE_SUCCESS; + +init_ctrlq_free_rq: + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); + return status; +} + +/** + * ice_init_ctrlq - main initialization routine for any control Queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks + */ +static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + enum ice_status ret_code; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + ice_adminq_init_regs(hw); + cq = &hw->adminq; + break; + case ICE_CTL_Q_MAILBOX: + ice_mailbox_init_regs(hw); + cq = &hw->mailboxq; + break; + default: + return ICE_ERR_PARAM; + } + cq->qtype = q_type; + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->num_sq_entries || + !cq->rq_buf_size || !cq->sq_buf_size) { + return ICE_ERR_CFG; + } + + /* setup SQ command write back timeout */ + cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; + + /* allocate the ATQ */ + ret_code = ice_init_sq(hw, cq); + if (ret_code) + return ret_code; + + /* allocate the ARQ */ + ret_code = ice_init_rq(hw, cq); + if (ret_code) + goto init_ctrlq_free_sq; + + /* success! */ + return ICE_SUCCESS; + +init_ctrlq_free_sq: + ice_shutdown_sq(hw, cq); + return ret_code; +} + +/** + * ice_shutdown_ctrlq - shutdown routine for any control queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. + */ +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + cq = &hw->adminq; + if (ice_check_sq_alive(hw, cq)) + ice_aq_q_shutdown(hw, true); + break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; + default: + return; + } + + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); +} + +/** + * ice_shutdown_all_ctrlq - shutdown routine for all control queues + * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. + */ +void ice_shutdown_all_ctrlq(struct ice_hw *hw) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + /* Shutdown FW admin queue */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); +} + +/** + * ice_init_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks. + */ +enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +{ + enum ice_status status; + u32 retry = 0; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Init FW admin queue */ + do { + status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); + if (status) + return status; + + status = ice_init_check_adminq(hw); + if (status != ICE_ERR_AQ_FW_CRITICAL) + break; + + ice_debug(hw, ICE_DBG_AQ_MSG, + "Retry Admin Queue init due to FW critical error\n"); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true); + } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); + + if (status) + return status; + /* Init Mailbox queue */ + return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); +} + +/** + * ice_init_ctrlq_locks - Initialize locks for a control queue + * @cq: pointer to the control queue + * + * Initializes the send and receive queue locks for a given control queue. + */ +static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + ice_init_lock(&cq->sq_lock); + ice_init_lock(&cq->rq_lock); +} + +/** + * ice_create_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * This function creates all the control queue locks and then calls + * ice_init_all_ctrlq. It should be called once during driver load. If the + * driver needs to re-initialize control queues at run time it should call + * ice_init_all_ctrlq instead. + */ +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +{ + ice_init_ctrlq_locks(&hw->adminq); + ice_init_ctrlq_locks(&hw->mailboxq); + + return ice_init_all_ctrlq(hw); +} + +/** + * ice_destroy_ctrlq_locks - Destroy locks for a control queue + * @cq: pointer to the control queue + * + * Destroys the send and receive queue locks for a given control queue. + */ +static void +ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + ice_destroy_lock(&cq->sq_lock); + ice_destroy_lock(&cq->rq_lock); +} + +/** + * ice_destroy_all_ctrlq - exit routine for all control queues + * @hw: pointer to the hardware structure + * + * This function shuts down all the control queues and then destroys the + * control queue locks. It should be called once during driver unload. The + * driver should call ice_shutdown_all_ctrlq if it needs to shut down and + * reinitialize control queues, such as in response to a reset event. + */ +void ice_destroy_all_ctrlq(struct ice_hw *hw) +{ + /* shut down all the control queues first */ + ice_shutdown_all_ctrlq(hw); + + ice_destroy_ctrlq_locks(&hw->adminq); + ice_destroy_ctrlq_locks(&hw->mailboxq); +} + +/** + * ice_clean_sq - cleans Admin send queue (ATQ) + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * returns the number of free desc + */ +static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + struct ice_ctl_q_ring *sq = &cq->sq; + u16 ntc = sq->next_to_clean; + struct ice_sq_cd *details; + struct ice_aq_desc *desc; + + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + + while (rd32(hw, cq->sq.head) != ntc) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); + ntc++; + if (ntc == sq->count) + ntc = 0; + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + } + + sq->next_to_clean = ntc; + + return ICE_CTL_Q_DESC_UNUSED(sq); +} + +/** + * ice_debug_cq + * @hw: pointer to the hardware structure + * @desc: pointer to control queue descriptor + * @buf: pointer to command buffer + * @buf_len: max length of buf + * + * Dumps debug log about control command with descriptor contents. + */ +static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) +{ + struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; + u16 datalen, flags; + + if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) + return; + + if (!desc) + return; + + datalen = LE16_TO_CPU(cq_desc->datalen); + flags = LE16_TO_CPU(cq_desc->flags); + + ice_debug(hw, ICE_DBG_AQ_DESC, + "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + LE16_TO_CPU(cq_desc->opcode), flags, datalen, + LE16_TO_CPU(cq_desc->retval)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(cq_desc->cookie_high), + LE32_TO_CPU(cq_desc->cookie_low)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", + LE32_TO_CPU(cq_desc->params.generic.param0), + LE32_TO_CPU(cq_desc->params.generic.param1)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(cq_desc->params.generic.addr_high), + LE32_TO_CPU(cq_desc->params.generic.addr_low)); + /* Dump buffer iff 1) one exists and 2) is either a response indicated + * by the DD and/or CMP flag set or a command with the RD flag set. + */ + if (buf && cq_desc->datalen != 0 && + (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) || + flags & ICE_AQ_FLAG_RD)) { + ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); + ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, + MIN_T(u16, buf_len, datalen)); + } +} + +/** + * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + */ +static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, cq->sq.head) == cq->sq.next_to_use; +} + +/** + * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * This is the main send command routine for the ATQ. It runs the queue, + * cleans the queue, etc. + */ +static enum ice_status +ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_dma_mem *dma_buf = NULL; + struct ice_aq_desc *desc_on_ring; + bool cmd_completed = false; + enum ice_status status = ICE_SUCCESS; + struct ice_sq_cd *details; + u32 total_delay = 0; + u16 retval = 0; + u32 val = 0; + + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; + + cq->sq_last_status = ICE_AQ_RC_OK; + + if (!cq->sq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send queue not initialized.\n"); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + if ((buf && !buf_size) || (!buf && buf_size)) { + status = ICE_ERR_PARAM; + goto sq_send_command_error; + } + + if (buf) { + if (buf_size > cq->sq_buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Invalid buffer size for Control Send queue: %d.\n", + buf_size); + status = ICE_ERR_INVAL_SIZE; + goto sq_send_command_error; + } + + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + } + + val = rd32(hw, cq->sq.head); + if (val >= cq->num_sq_entries) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "head overrun at %d in the Control Send Queue ring\n", + val); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); + if (cd) + *details = *cd; + else + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); + + /* Call clean and check queue available function to reclaim the + * descriptors that were processed by FW/MBX; the function returns the + * number of desc available. The clean function called here could be + * called in a separate thread in case of asynchronous completions. + */ + if (ice_clean_sq(hw, cq) == 0) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Error: Control Send Queue is full.\n"); + status = ICE_ERR_AQ_FULL; + goto sq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring), + ICE_NONDMA_TO_DMA); + + /* if buf is not NULL assume indirect command */ + if (buf) { + dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; + /* copy the user buf into the respective DMA buf */ + ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA); + desc_on_ring->datalen = CPU_TO_LE16(buf_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.generic.addr_high = + CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa)); + desc_on_ring->params.generic.addr_low = + CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa)); + } + + /* Debug desc and buffer */ + ice_debug(hw, ICE_DBG_AQ_DESC, + "ATQ: Control Send queue desc and buffer:\n"); + + ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); + + (cq->sq.next_to_use)++; + if (cq->sq.next_to_use == cq->sq.count) + cq->sq.next_to_use = 0; + wr32(hw, cq->sq.tail, cq->sq.next_to_use); + + do { + if (ice_sq_done(hw, cq)) + break; + + ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false); + total_delay++; + } while (total_delay < cq->sq_cmd_timeout); + + /* if ready, copy the desc back to temp */ + if (ice_sq_done(hw, cq)) { + ice_memcpy(desc, desc_on_ring, sizeof(*desc), + ICE_DMA_TO_NONDMA); + if (buf) { + /* get returned length to copy */ + u16 copy_size = LE16_TO_CPU(desc->datalen); + + if (copy_size > buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Return len %d > than buf len %d\n", + copy_size, buf_size); + status = ICE_ERR_AQ_ERROR; + } else { + ice_memcpy(buf, dma_buf->va, copy_size, + ICE_DMA_TO_NONDMA); + } + } + retval = LE16_TO_CPU(desc->retval); + if (retval) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue command 0x%04X completed with error 0x%X\n", + LE16_TO_CPU(desc->opcode), + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if (!status && retval != ICE_AQ_RC_OK) + status = ICE_ERR_AQ_ERROR; + cq->sq_last_status = (enum ice_aq_err)retval; + } + + ice_debug(hw, ICE_DBG_AQ_MSG, + "ATQ: desc and buffer writeback:\n"); + + ice_debug_cq(hw, (void *)desc, buf, buf_size); + + /* save writeback AQ if requested */ + if (details->wb_desc) + ice_memcpy(details->wb_desc, desc_on_ring, + sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA); + + /* update the error if time out occurred */ + if (!cmd_completed) { + if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || + rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Critical FW error.\n"); + status = ICE_ERR_AQ_FW_CRITICAL; + } else { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue Writeback timeout.\n"); + status = ICE_ERR_AQ_TIMEOUT; + } + } + +sq_send_command_error: + return status; +} + +/** + * ice_sq_send_cmd - send command to Control Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * This is the main send command routine for the ATQ. It runs the queue, + * cleans the queue, etc. + */ +enum ice_status +ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + enum ice_status status = ICE_SUCCESS; + + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; + + ice_acquire_lock(&cq->sq_lock); + status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd); + ice_release_lock(&cq->sq_lock); + + return status; +} + +/** + * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + */ +void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) +{ + /* zero out the desc */ + ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM); + desc->opcode = CPU_TO_LE16(opcode); + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI); +} + +/** + * ice_clean_rq_elem + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending'. + */ +enum ice_status +ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_rq_event_info *e, u16 *pending) +{ + u16 ntc = cq->rq.next_to_clean; + enum ice_status ret_code = ICE_SUCCESS; + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM); + + /* take the lock before we start messing with the ring */ + ice_acquire_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive queue not initialized.\n"); + ret_code = ICE_ERR_AQ_EMPTY; + goto clean_rq_elem_err; + } + + /* set next_to_use to head */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = ICE_ERR_AQ_NO_WORK; + goto clean_rq_elem_out; + } + + /* now clean the next descriptor */ + desc = ICE_CTL_Q_DESC(cq->rq, ntc); + desc_idx = ntc; + + cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); + flags = LE16_TO_CPU(desc->flags); + if (flags & ICE_AQ_FLAG_ERR) { + ret_code = ICE_ERR_AQ_ERROR; + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive Queue Event 0x%04X received with error 0x%X\n", + LE16_TO_CPU(desc->opcode), + cq->rq_last_status); + } + ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA); + datalen = LE16_TO_CPU(desc->datalen); + e->msg_len = MIN_T(u16, datalen, e->buf_len); + if (e->msg_buf && e->msg_len) + ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, + e->msg_len, ICE_DMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); + + ice_debug_cq(hw, (void *)desc, e->msg_buf, + cq->rq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message size + */ + bi = &cq->rq.r.rq_bi[ntc]; + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + desc->datalen = CPU_TO_LE16(bi->size); + desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); + desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, cq->rq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == cq->num_rq_entries) + ntc = 0; + cq->rq.next_to_clean = ntc; + cq->rq.next_to_use = ntu; + +clean_rq_elem_out: + /* Set pending if needed, unlock and return */ + if (pending) { + /* re-read HW head to calculate actual pending messages */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); + } +clean_rq_elem_err: + ice_release_lock(&cq->rq_lock); + + return ret_code; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.h b/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.h new file mode 100644 index 000000000..e5e000178 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_CONTROLQ_H_ +#define _ICE_CONTROLQ_H_ + +#include "ice_adminq_cmd.h" + +/* Maximum buffer lengths for all control queue types */ +#define ICE_AQ_MAX_BUF_LEN 4096 +#define ICE_MBXQ_MAX_BUF_LEN 4096 + +#define ICE_CTL_Q_DESC(R, i) \ + (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) + +#define ICE_CTL_Q_DESC_UNUSED(R) \ + (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* Defines that help manage the driver vs FW API checks. + * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. + */ +#define EXP_FW_API_VER_BRANCH 0x00 +#define EXP_FW_API_VER_MAJOR 0x01 +#define EXP_FW_API_VER_MINOR 0x05 + +/* Different control queue types: These are mainly for SW consumption. */ +enum ice_ctl_q { + ICE_CTL_Q_UNKNOWN = 0, + ICE_CTL_Q_ADMIN, + ICE_CTL_Q_MAILBOX, +}; + +/* Control Queue timeout settings - max delay 250ms */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ +#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ +#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ + +struct ice_ctl_q_ring { + void *dma_head; /* Virtual address to DMA head */ + struct ice_dma_mem desc_buf; /* descriptor ring memory */ + void *cmd_buf; /* command buffer memory */ + + union { + struct ice_dma_mem *sq_bi; + struct ice_dma_mem *rq_bi; + } r; + + u16 count; /* Number of descriptors */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; + u32 len_mask; + u32 len_ena_mask; + u32 len_crit_mask; + u32 head_mask; +}; + +/* sq transaction details */ +struct ice_sq_cd { + struct ice_aq_desc *wb_desc; +}; + +#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) + +/* rq event information */ +struct ice_rq_event_info { + struct ice_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Control Queue information */ +struct ice_ctl_q_info { + enum ice_ctl_q qtype; + enum ice_aq_err rq_last_status; /* last status on receive queue */ + struct ice_ctl_q_ring rq; /* receive queue */ + struct ice_ctl_q_ring sq; /* send queue */ + u32 sq_cmd_timeout; /* send queue cmd write back timeout */ + u16 num_rq_entries; /* receive queue depth */ + u16 num_sq_entries; /* send queue depth */ + u16 rq_buf_size; /* receive queue buffer size */ + u16 sq_buf_size; /* send queue buffer size */ + enum ice_aq_err sq_last_status; /* last status on send queue */ + struct ice_lock sq_lock; /* Send queue lock */ + struct ice_lock rq_lock; /* Receive queue lock */ +}; + +#endif /* _ICE_CONTROLQ_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.c b/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.c new file mode 100644 index 000000000..50634a145 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.c @@ -0,0 +1,1441 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" +#include "ice_sched.h" +#include "ice_dcb.h" + +/** + * ice_aq_get_lldp_mib + * @hw: pointer to the HW struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @local_len: length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cd: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). (0x0A00) + */ +enum ice_status +ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, + u16 buf_size, u16 *local_len, u16 *remote_len, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_get_mib *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.lldp_get_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); + + cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M; + cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & + ICE_AQ_LLDP_BRID_TYPE_M; + + desc.datalen = CPU_TO_LE16(buf_size); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) { + if (local_len) + *local_len = LE16_TO_CPU(cmd->local_len); + if (remote_len) + *remote_len = LE16_TO_CPU(cmd->remote_len); + } + + return status; +} + +/** + * ice_aq_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_update: Enable or Disable event posting + * @cd: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes (0x0A01) + */ +enum ice_status +ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_mib_change *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_event; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change); + + if (!ena_update) + cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_stop_lldp + * @hw: pointer to the HW struct + * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown + * False if LLDP Agent needs to be Stopped + * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across + * reboots + * @cd: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent (0x0A05) + */ +enum ice_status +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_stop *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_stop; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop); + + if (shutdown_lldp_agent) + cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN; + + if (persist) + cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_start_lldp + * @hw: pointer to the HW struct + * @persist: True if Start of LLDP Agent needs to be persistent across reboots + * @cd: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. (0x0A06) + */ +enum ice_status +ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_start *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_start; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start); + + cmd->command = ICE_AQ_LLDP_AGENT_START; + + if (persist) + cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the HW struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @cd: pointer to command details structure or NULL + * + * Set the LLDP MIB. (0x0A08) + */ +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_local_mib *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); + + desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD); + desc.datalen = CPU_TO_LE16(buf_size); + + cmd->type = mib_type; + cmd->length = CPU_TO_LE16(buf_size); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_get_dcbx_status + * @hw: pointer to the HW struct + * + * Get the DCBX status from the Firmware + */ +u8 ice_get_dcbx_status(struct ice_hw *hw) +{ + u32 reg; + + reg = rd32(hw, PRTDCB_GENS); + return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >> + PRTDCB_GENS_DCBX_STATUS_S); +} + +/** + * ice_parse_ieee_ets_common_tlv + * @buf: Data buffer to be parsed for ETS CFG/REC data + * @ets_cfg: Container to store parsed data + * + * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV + */ +static void +ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) +{ + u8 offset = 0; + int i; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + ets_cfg->prio_table[i * 2] = + ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >> + ICE_IEEE_ETS_PRIO_1_S); + ets_cfg->prio_table[i * 2 + 1] = + ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >> + ICE_IEEE_ETS_PRIO_0_S); + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + * + * TSA Assignment Table (8 octets) + * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16| + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) { + ets_cfg->tcbwtable[i] = buf[offset]; + ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++]; + } +} + +/** + * ice_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + */ +static void +ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >> + ICE_IEEE_ETS_WILLING_S); + etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S); + etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >> + ICE_IEEE_ETS_MAXTC_S); + + /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ + ice_parse_ieee_ets_common_tlv(&buf[1], etscfg); +} + +/** + * ice_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + */ +static void +ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ + ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec); +} + +/** + * ice_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + */ +static void +ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >> + ICE_IEEE_PFC_WILLING_S); + dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S); + dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >> + ICE_IEEE_PFC_CAP_S); + dcbcfg->pfc.pfcena = buf[1]; +} + +/** + * ice_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + */ +static void +ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u16 offset = 0; + u16 typelen; + int i = 0; + u16 len; + u8 *buf; + + typelen = NTOHS(tlv->typelen); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + buf = tlv->tlvinfo; + + /* Removing sizeof(ouisubtype) and reserved byte from len. + * Remaining len div 3 is number of APP TLVs. + */ + len -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < len) { + dcbcfg->app[i].priority = ((buf[offset] & + ICE_IEEE_APP_PRIO_M) >> + ICE_IEEE_APP_PRIO_S); + dcbcfg->app[i].selector = ((buf[offset] & + ICE_IEEE_APP_SEL_M) >> + ICE_IEEE_APP_SEL_S); + dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= ICE_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * ice_parse_ieee_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + */ +static void +ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> + ICE_LLDP_TLV_SUBTYPE_S); + switch (subtype) { + case ICE_IEEE_SUBTYPE_ETS_CFG: + ice_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_ETS_REC: + ice_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_PFC_CFG: + ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_APP_PRI: + ice_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + */ +static void +ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + etscfg->prio_table[i * 2] = + ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >> + ICE_CEE_PGID_PRIO_1_S); + etscfg->prio_table[i * 2 + 1] = + ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >> + ICE_CEE_PGID_PRIO_0_S); + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) { + etscfg->tcbwtable[i] = buf[offset++]; + + if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT) + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + else + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * ice_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + */ +static void +ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcena = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * ice_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + */ +static void +ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u16 len, typelen, offset = 0; + struct ice_cee_app_prio *app; + u8 i; + + typelen = NTOHS(tlv->hdr.typelen); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + + dcbcfg->numapps = len / sizeof(*app); + if (!dcbcfg->numapps) + return; + if (dcbcfg->numapps > ICE_DCBX_MAX_APPS) + dcbcfg->numapps = ICE_DCBX_MAX_APPS; + + for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + + app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < ICE_MAX_USER_PRIORITY; up++) + if (app->prio_map & BIT(up)) + break; + + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M); + switch (selector) { + case ICE_CEE_APP_SEL_ETHTYPE: + dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE; + break; + case ICE_CEE_APP_SEL_TCPIP: + dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP; + break; + default: + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + } + + dcbcfg->app[i].prot_id = NTOHS(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * ice_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + */ +static void +ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u16 len, tlvlen, typelen; + u32 ouisubtype; + + ouisubtype = NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> + ICE_LLDP_TLV_SUBTYPE_S); + /* Return if not CEE DCBX */ + if (subtype != ICE_CEE_DCBX_TYPE) + return; + + typelen = NTOHS(tlv->typelen); + tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = sizeof(tlv->typelen) + sizeof(ouisubtype) + + sizeof(struct ice_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) { + u16 sublen; + + typelen = NTOHS(sub_tlv->hdr.typelen); + sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> + ICE_LLDP_TLV_TYPE_S); + switch (subtype) { + case ICE_CEE_SUBTYPE_PG_CFG: + ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case ICE_CEE_SUBTYPE_PFC_CFG: + ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case ICE_CEE_SUBTYPE_APP_PRI: + ice_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct ice_cee_feat_tlv *) + ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** + * ice_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + */ +static void +ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = NTOHL(tlv->ouisubtype); + oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S); + switch (oui) { + case ICE_IEEE_8021QAZ_OUI: + ice_parse_ieee_tlv(tlv, dcbcfg); + break; + case ICE_CEE_DCBX_OUI: + ice_parse_cee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_lldp_to_dcb_cfg + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + */ +enum ice_status +ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_lldp_org_tlv *tlv; + enum ice_status ret = ICE_SUCCESS; + u16 offset = 0; + u16 typelen; + u16 type; + u16 len; + + if (!lldpmib || !dcbcfg) + return ICE_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += ETH_HEADER_LEN; + tlv = (struct ice_lldp_org_tlv *)lldpmib; + while (1) { + typelen = NTOHS(tlv->typelen); + type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + offset += sizeof(typelen) + len; + + /* END TLV or beyond LLDPDU size */ + if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE) + break; + + switch (type) { + case ICE_TLV_TYPE_ORG: + ice_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + } + + return ret; +} + +/** + * ice_aq_get_dcb_cfg + * @hw: pointer to the HW struct + * @mib_type: MIB type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the firmware + */ +enum ice_status +ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, + struct ice_dcbx_cfg *dcbcfg) +{ + enum ice_status ret; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE); + if (!lldpmib) + return ICE_ERR_NO_MEMORY; + + ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, + ICE_LLDPDU_SIZE, NULL, NULL, NULL); + + if (ret == ICE_SUCCESS) + /* Parse LLDP MIB to get DCB configuration */ + ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg); + + ice_free(hw, lldpmib); + + return ret; +} + +/** + * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW + * @hw: pointer to the HW struct + * @start_dcbx_agent: True if DCBX Agent needs to be started + * False if DCBX Agent needs to be stopped + * @dcbx_agent_status: FW indicates back the DCBX agent status + * True if DCBX Agent is active + * False if DCBX Agent is stopped + * @cd: pointer to command details structure or NULL + * + * Start/Stop the embedded dcbx Agent. In case that this wrapper function + * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) + */ +enum ice_status +ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, + bool *dcbx_agent_status, struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_stop_start_specific_agent *cmd; + enum ice_status status; + struct ice_aq_desc desc; + u16 opcode; + + cmd = &desc.params.lldp_agent_ctrl; + + opcode = ice_aqc_opc_lldp_stop_start_specific_agent; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + + if (start_dcbx_agent) + cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + + *dcbx_agent_status = false; + + if (status == ICE_SUCCESS && + cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX) + *dcbx_agent_status = true; + + return status; +} + +/** + * ice_aq_get_cee_dcb_cfg + * @hw: pointer to the HW struct + * @buff: response buffer that stores CEE operational configuration + * @cd: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware (0x0A07) + */ +enum ice_status +ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, + struct ice_aqc_get_cee_dcb_cfg_resp *buff, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg); + + return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd); +} + +/** + * ice_cee_to_dcb_cfg + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + */ +static void +ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct ice_dcbx_cfg *dcbcfg) +{ + u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); + u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); + u8 i, err, sync, oper, app_index, ice_app_sel_type; + u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; + u16 ice_app_prot_id_type; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { + dcbcfg->etscfg.prio_table[i * 2] = + ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >> + ICE_CEE_PGID_PRIO_0_S); + dcbcfg->etscfg.prio_table[i * 2 + 1] = + ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >> + ICE_CEE_PGID_PRIO_1_S); + } + + ice_for_each_traffic_class(i) { + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; + + app_index = 0; + for (i = 0; i < 3; i++) { + if (i == 0) { + /* FCoE APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S; + ice_app_sel_type = ICE_APP_SEL_ETHTYPE; + ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE; + } else if (i == 1) { + /* iSCSI APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; + ice_app_sel_type = ICE_APP_SEL_TCPIP; + ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + } else { + /* FIP APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S; + ice_app_sel_type = ICE_APP_SEL_ETHTYPE; + ice_app_prot_id_type = ICE_APP_PROT_ID_FIP; + } + + status = (tlv_status & ice_aqc_cee_status_mask) >> + ice_aqc_cee_status_shift; + err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0; + /* Add FCoE/iSCSI/FIP APP if Error is False and + * Oper/Sync is True + */ + if (!err && sync && oper) { + dcbcfg->app[app_index].priority = + (app_prio & ice_aqc_cee_app_mask) >> + ice_aqc_cee_app_shift; + dcbcfg->app[app_index].selector = ice_app_sel_type; + dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; + app_index++; + } + } + + dcbcfg->numapps = app_index; +} + +/** + * ice_get_ieee_dcb_cfg + * @pi: port information structure + * @dcbx_mode: mode of DCBX (IEEE or CEE) + * + * Get IEEE or CEE mode DCB configuration from the Firmware + */ +STATIC enum ice_status +ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) +{ + struct ice_dcbx_cfg *dcbx_cfg = NULL; + enum ice_status ret; + + if (!pi) + return ICE_ERR_PARAM; + + if (dcbx_mode == ICE_DCBX_MODE_IEEE) + dcbx_cfg = &pi->local_dcbx_cfg; + else if (dcbx_mode == ICE_DCBX_MODE_CEE) + dcbx_cfg = &pi->desired_dcbx_cfg; + + /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE + * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE + */ + ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); + if (ret) + goto out; + + /* Get Remote DCB Config */ + dcbx_cfg = &pi->remote_dcbx_cfg; + ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) + ret = ICE_SUCCESS; + +out: + return ret; +} + +/** + * ice_get_dcb_cfg + * @pi: port information structure + * + * Get DCB configuration from the Firmware + */ +enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +{ + struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct ice_dcbx_cfg *dcbx_cfg; + enum ice_status ret; + + if (!pi) + return ICE_ERR_PARAM; + + ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); + if (ret == ICE_SUCCESS) { + /* CEE mode */ + dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE; + dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status); + ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg); + ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); + } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { + /* CEE mode not enabled try querying IEEE data */ + dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE); + } + + return ret; +} + +/** + * ice_init_dcb + * @hw: pointer to the HW struct + * @enable_mib_change: enable MIB change event + * + * Update DCB configuration from the Firmware + */ +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret = ICE_SUCCESS; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + pi->is_sw_lldp = true; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || + pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || + pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { + /* Get current DCBX configuration */ + ret = ice_get_dcb_cfg(pi); + if (ret) + return ret; + pi->is_sw_lldp = false; + } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) { + return ICE_ERR_NOT_READY; + } + + /* Configure the LLDP MIB change event */ + if (enable_mib_change) { + ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (ret) + pi->is_sw_lldp = true; + } + + return ret; +} + +/** + * ice_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_mib: enable/disable MIB change event + * + * Configure (disable/enable) MIB + */ +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) + return ICE_ERR_NOT_READY; + + ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); + if (!ret) + pi->is_sw_lldp = !ena_mib; + + return ret; +} + +/** + * ice_add_ieee_ets_common_tlv + * @buf: Data buffer to be populated with ice_dcb_ets_cfg data + * @ets_cfg: Container for ice_dcb_ets_cfg data + * + * Populate the TLV buffer with ice_dcb_ets_cfg data + */ +static void +ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) +{ + u8 priority0, priority1; + u8 offset = 0; + int i; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { + priority0 = ets_cfg->prio_table[i * 2] & 0xF; + priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + * + * TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) { + buf[offset] = ets_cfg->tcbwtable[i]; + buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i]; + offset++; + } +} + +/** + * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format + * @tlv: Fill the ETS config data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS CFG TLV + */ +static void +ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u8 maxtcwilling = 0; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = HTONS(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = HTONL(ouisubtype); + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + if (etscfg->willing) + maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S); + maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; + buf[0] = maxtcwilling; + + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ + ice_add_ieee_ets_common_tlv(&buf[1], etscfg); +} + +/** + * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format + * @tlv: Fill ETS Recommended TLV in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS REC TLV + */ +static void +ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etsrec; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = HTONS(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = HTONL(ouisubtype); + + etsrec = &dcbcfg->etsrec; + + /* First Octet is reserved */ + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ + ice_add_ieee_ets_common_tlv(&buf[1], etsrec); +} + +/** + * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store which holds the PFC CFG data + * + * Prepare IEEE 802.1Qaz PFC CFG TLV + */ +static void +ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_PFC_TLV_LEN); + tlv->typelen = HTONS(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = HTONL(ouisubtype); + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + if (dcbcfg->pfc.willing) + buf[0] = BIT(ICE_IEEE_PFC_WILLING_S); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(ICE_IEEE_PFC_MBC_S); + + buf[0] |= dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcena; +} + +/** + * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format + * @tlv: Fill APP TLV in IEEE format + * @dcbcfg: Local store which holds the APP CFG data + * + * Prepare IEEE 802.1Qaz APP CFG TLV + */ +static void +ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u16 typelen, len, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + /* No APP TLVs then just return */ + if (dcbcfg->numapps == 0) + return; + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = HTONL(ouisubtype); + + /* Move offset to App Priority Table */ + offset++; + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].priority & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector; + buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; + /* Move to next app */ + offset += 3; + i++; + if (i >= ICE_DCBX_MAX_APPS) + break; + } + /* len includes size of ouisubtype + 1 reserved + 3*numapps */ + len = sizeof(tlv->ouisubtype) + 1 + (i * 3); + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF)); + tlv->typelen = HTONS(typelen); +} + +/** + * ice_add_dcb_tlv - Add all IEEE TLVs + * @tlv: Fill TLV data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * @tlvid: Type of IEEE TLV + * + * Add tlv information + */ +static void +ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, + u16 tlvid) +{ + switch (tlvid) { + case ICE_IEEE_TLV_ID_ETS_CFG: + ice_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_ETS_REC: + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_PFC_CFG: + ice_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_APP_PRI: + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format + * @lldpmib: pointer to the HW struct + * @miblen: length of LLDP MIB + * @dcbcfg: Local store which holds the DCB Config + * + * Convert the DCB configuration to MIB format + */ +void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) +{ + u16 len, offset = 0, tlvid = ICE_TLV_ID_START; + struct ice_lldp_org_tlv *tlv; + u16 typelen; + + tlv = (struct ice_lldp_org_tlv *)lldpmib; + while (1) { + ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); + typelen = NTOHS(tlv->typelen); + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + if (len) + offset += len + 2; + /* END TLV or beyond LLDPDU size */ + if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU || + offset > ICE_LLDPDU_SIZE) + break; + /* Move to next TLV */ + if (len) + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + } + *miblen = offset; +} + +/** + * ice_set_dcb_cfg - Set the local LLDP MIB to FW + * @pi: port information structure + * + * Set DCB configuration to the Firmware + */ +enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +{ + u8 mib_type, *lldpmib = NULL; + struct ice_dcbx_cfg *dcbcfg; + enum ice_status ret; + struct ice_hw *hw; + u16 miblen; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + /* update the HW local config */ + dcbcfg = &pi->local_dcbx_cfg; + /* Allocate the LLDPDU */ + lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE); + if (!lldpmib) + return ICE_ERR_NO_MEMORY; + + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; + if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) + mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING; + + ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg); + ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, + NULL); + + ice_free(hw, lldpmib); + + return ret; +} + +/** + * ice_aq_query_port_ets - query port ETS configuration + * @pi: port information structure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * query current port ETS configuration + */ +enum ice_status +ice_aq_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_query_port_ets *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!pi) + return ICE_ERR_PARAM; + cmd = &desc.params.port_ets; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); + cmd->port_teid = pi->root->info.node_teid; + + status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); + return status; +} + +/** + * ice_update_port_tc_tree_cfg - update TC tree configuration + * @pi: port information structure + * @buf: pointer to buffer + * + * update the SW DB with the new TC changes + */ +enum ice_status +ice_update_port_tc_tree_cfg(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf) +{ + struct ice_sched_node *node, *tc_node; + struct ice_aqc_get_elem elem; + enum ice_status status = ICE_SUCCESS; + u32 teid1, teid2; + u8 i, j; + + if (!pi) + return ICE_ERR_PARAM; + /* suspend the missing TC nodes */ + for (i = 0; i < pi->root->num_children; i++) { + teid1 = LE32_TO_CPU(pi->root->children[i]->info.node_teid); + ice_for_each_traffic_class(j) { + teid2 = LE32_TO_CPU(buf->tc_node_teid[j]); + if (teid1 == teid2) + break; + } + if (j < ICE_MAX_TRAFFIC_CLASS) + continue; + /* TC is missing */ + pi->root->children[i]->in_use = false; + } + /* add the new TC nodes */ + ice_for_each_traffic_class(j) { + teid2 = LE32_TO_CPU(buf->tc_node_teid[j]); + if (teid2 == ICE_INVAL_TEID) + continue; + /* Is it already present in the tree ? */ + for (i = 0; i < pi->root->num_children; i++) { + tc_node = pi->root->children[i]; + if (!tc_node) + continue; + teid1 = LE32_TO_CPU(tc_node->info.node_teid); + if (teid1 == teid2) { + tc_node->tc_num = j; + tc_node->in_use = true; + break; + } + } + if (i < pi->root->num_children) + continue; + /* new TC */ + status = ice_sched_query_elem(pi->hw, teid2, &elem); + if (!status) + status = ice_sched_add_node(pi, 1, &elem.generic[0]); + if (status) + break; + /* update the TC number */ + node = ice_sched_find_node_by_teid(pi->root, teid2); + if (node) + node->tc_num = j; + } + return status; +} + +/** + * ice_query_port_ets - query port ETS configuration + * @pi: port information structure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * query current port ETS configuration and update the + * SW DB with the TC changes + */ +enum ice_status +ice_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + enum ice_status status; + + ice_acquire_lock(&pi->sched_lock); + status = ice_aq_query_port_ets(pi, buf, buf_size, cd); + if (!status) + status = ice_update_port_tc_tree_cfg(pi, buf); + ice_release_lock(&pi->sched_lock); + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.h b/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.h new file mode 100644 index 000000000..3ffeb864c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_dcb.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_DCB_H_ +#define _ICE_DCB_H_ + +#include "ice_type.h" +#include "ice_common.h" + +#define ICE_DCBX_OFFLOAD_DIS 0 +#define ICE_DCBX_OFFLOAD_ENABLED 1 + +#define ICE_DCBX_STATUS_NOT_STARTED 0 +#define ICE_DCBX_STATUS_IN_PROGRESS 1 +#define ICE_DCBX_STATUS_DONE 2 +#define ICE_DCBX_STATUS_MULTIPLE_PEERS 3 +#define ICE_DCBX_STATUS_DIS 7 + +#define ICE_TLV_TYPE_END 0 +#define ICE_TLV_TYPE_ORG 127 + +#define ICE_IEEE_8021QAZ_OUI 0x0080C2 +#define ICE_IEEE_SUBTYPE_ETS_CFG 9 +#define ICE_IEEE_SUBTYPE_ETS_REC 10 +#define ICE_IEEE_SUBTYPE_PFC_CFG 11 +#define ICE_IEEE_SUBTYPE_APP_PRI 12 + +#define ICE_CEE_DCBX_OUI 0x001B21 +#define ICE_CEE_DCBX_TYPE 2 + +#define ICE_CEE_SUBTYPE_CTRL 1 +#define ICE_CEE_SUBTYPE_PG_CFG 2 +#define ICE_CEE_SUBTYPE_PFC_CFG 3 +#define ICE_CEE_SUBTYPE_APP_PRI 4 + +#define ICE_CEE_MAX_FEAT_TYPE 3 +#define ICE_LLDP_ADMINSTATUS_DIS 0 +#define ICE_LLDP_ADMINSTATUS_ENA_RX 1 +#define ICE_LLDP_ADMINSTATUS_ENA_TX 2 +#define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3 + +/* Defines for LLDP TLV header */ +#define ICE_LLDP_TLV_LEN_S 0 +#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S) +#define ICE_LLDP_TLV_TYPE_S 9 +#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S) +#define ICE_LLDP_TLV_SUBTYPE_S 0 +#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S) +#define ICE_LLDP_TLV_OUI_S 8 +#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S) + +/* Defines for IEEE ETS TLV */ +#define ICE_IEEE_ETS_MAXTC_S 0 +#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S) +#define ICE_IEEE_ETS_CBS_S 6 +#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S) +#define ICE_IEEE_ETS_WILLING_S 7 +#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S) +#define ICE_IEEE_ETS_PRIO_0_S 0 +#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S) +#define ICE_IEEE_ETS_PRIO_1_S 4 +#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S) +#define ICE_CEE_PGID_PRIO_0_S 0 +#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S) +#define ICE_CEE_PGID_PRIO_1_S 4 +#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S) +#define ICE_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define ICE_IEEE_TSA_STRICT 0 +#define ICE_IEEE_TSA_CBS 1 +#define ICE_IEEE_TSA_ETS 2 +#define ICE_IEEE_TSA_VENDOR 255 + +/* Defines for IEEE PFC TLV */ +#define ICE_IEEE_PFC_CAP_S 0 +#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S) +#define ICE_IEEE_PFC_MBC_S 6 +#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S) +#define ICE_IEEE_PFC_WILLING_S 7 +#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S) + +/* Defines for IEEE APP TLV */ +#define ICE_IEEE_APP_SEL_S 0 +#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S) +#define ICE_IEEE_APP_PRIO_S 5 +#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) + +/* TLV definitions for preparing MIB */ +#define ICE_TLV_ID_CHASSIS_ID 0 +#define ICE_TLV_ID_PORT_ID 1 +#define ICE_TLV_ID_TIME_TO_LIVE 2 +#define ICE_IEEE_TLV_ID_ETS_CFG 3 +#define ICE_IEEE_TLV_ID_ETS_REC 4 +#define ICE_IEEE_TLV_ID_PFC_CFG 5 +#define ICE_IEEE_TLV_ID_APP_PRI 6 +#define ICE_TLV_ID_END_OF_LLDPPDU 7 +#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG + +#define ICE_IEEE_ETS_TLV_LEN 25 +#define ICE_IEEE_PFC_TLV_LEN 6 +#define ICE_IEEE_APP_TLV_LEN 11 + +#pragma pack(1) +/* IEEE 802.1AB LLDP TLV structure */ +struct ice_lldp_generic_tlv { + __be16 typelen; + u8 tlvinfo[1]; +}; + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct ice_lldp_org_tlv { + __be16 typelen; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; +#pragma pack() + +struct ice_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct ice_cee_ctrl_tlv { + struct ice_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct ice_cee_feat_tlv { + struct ice_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define ICE_CEE_FEAT_TLV_ENA_M 0x80 +#define ICE_CEE_FEAT_TLV_WILLING_M 0x40 +#define ICE_CEE_FEAT_TLV_ERR_M 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +#pragma pack(1) +struct ice_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define ICE_CEE_APP_SELECTOR_M 0x03 + __be16 lower_oui; + u8 prio_map; +}; +#pragma pack() + +/* TODO: The below structures related LLDP/DCBX variables + * and statistics are defined but need to find how to get + * the required information from the Firmware to use them + */ + +/* IEEE 802.1AB LLDP Agent Statistics */ +struct ice_lldp_stats { + u64 remtablelastchangetime; + u64 remtableinserts; + u64 remtabledeletes; + u64 remtabledrops; + u64 remtableageouts; + u64 txframestotal; + u64 rxframesdiscarded; + u64 rxportframeerrors; + u64 rxportframestotal; + u64 rxporttlvsdiscardedtotal; + u64 rxporttlvsunrecognizedtotal; + u64 remtoomanyneighbors; +}; + +/* IEEE 802.1Qaz DCBX variables */ +struct ice_dcbx_variables { + u32 defmaxtrafficclasses; + u32 defprioritytcmapping; + u32 deftcbandwidth; + u32 deftsaassignment; +}; + +enum ice_status +ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, + u16 buf_size, u16 *local_len, u16 *remote_len, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, + struct ice_aqc_get_cee_dcb_cfg_resp *buff, + struct ice_sq_cd *cd); +enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg); +u8 ice_get_dcbx_status(struct ice_hw *hw); +enum ice_status +ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, + struct ice_dcbx_cfg *dcbcfg); +enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); +enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); +void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg); +enum ice_status +ice_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cmd_details); +enum ice_status +ice_aq_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status +ice_update_port_tc_tree_cfg(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf); +enum ice_status +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); +enum ice_status +ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, + bool *dcbx_agent_status, struct ice_sq_cd *cd); +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); +enum ice_status +ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, + struct ice_sq_cd *cd); +#endif /* _ICE_DCB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_devids.h b/src/spdk/dpdk/drivers/net/ice/base/ice_devids.h new file mode 100644 index 000000000..e396f445a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_devids.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_DEVIDS_H_ +#define _ICE_DEVIDS_H_ + +/* Device IDs */ +/* Intel(R) Ethernet Controller E810-C for backplane */ +#define ICE_DEV_ID_E810C_BACKPLANE 0x1591 +/* Intel(R) Ethernet Controller E810-C for QSFP */ +#define ICE_DEV_ID_E810C_QSFP 0x1592 +/* Intel(R) Ethernet Controller E810-C for SFP */ +#define ICE_DEV_ID_E810C_SFP 0x1593 +/* Intel(R) Ethernet Controller E810-XXV for backplane */ +#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 +/* Intel(R) Ethernet Controller E810-XXV for QSFP */ +#define ICE_DEV_ID_E810_XXV_QSFP 0x159A +/* Intel(R) Ethernet Controller E810-XXV for SFP */ +#define ICE_DEV_ID_E810_XXV_SFP 0x159B +/* Intel(R) Ethernet Connection E822-C for backplane */ +#define ICE_DEV_ID_E822C_BACKPLANE 0x1890 +/* Intel(R) Ethernet Connection E822-C for QSFP */ +#define ICE_DEV_ID_E822C_QSFP 0x1891 +/* Intel(R) Ethernet Connection E822-C for SFP */ +#define ICE_DEV_ID_E822C_SFP 0x1892 +/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */ +#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 +/* Intel(R) Ethernet Connection E822-C 1GbE */ +#define ICE_DEV_ID_E822C_SGMII 0x1894 +/* Intel(R) Ethernet Connection E822-L for backplane */ +#define ICE_DEV_ID_E822L_BACKPLANE 0x1897 +/* Intel(R) Ethernet Connection E822-L for SFP */ +#define ICE_DEV_ID_E822L_SFP 0x1898 +/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ +#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 +/* Intel(R) Ethernet Connection E822-L 1GbE */ +#define ICE_DEV_ID_E822L_SGMII 0x189A + +#endif /* _ICE_DEVIDS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.c b/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.c new file mode 100644 index 000000000..4e9aafc39 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.c @@ -0,0 +1,1124 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" +#include "ice_fdir.h" + +/* These are training packet headers used to program flow director filters. */ +static const u8 ice_fdir_tcpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 ice_fdir_udpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_sctpv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 +}; + +static const u8 ice_fdir_udp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_icmp4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv4_gtpu4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_tcpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_udpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctpv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 ice_fdir_ipv6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ip4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_tcp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_udp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_sctp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 ice_fdir_ip6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* Flow Director no-op training packet table */ +static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { + { + ICE_FLTR_PTYPE_NONF_IPV4_TCP, + sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, + sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_UDP, + sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt, + sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, + sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt, + sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt, + sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + sizeof(ice_fdir_udp4_gtpu4_pkt), + ice_fdir_udp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + sizeof(ice_fdir_tcp4_gtpu4_pkt), + ice_fdir_tcp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + sizeof(ice_fdir_icmp4_gtpu4_pkt), + ice_fdir_icmp4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + sizeof(ice_fdir_ipv4_gtpu4_pkt), + ice_fdir_ipv4_gtpu4_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_TCP, + sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt, + sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_UDP, + sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt, + sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, + sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt, + sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt, + }, + { + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, + sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt, + sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt, + }, +}; + +#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt) + +/** + * ice_set_dflt_val_fd_desc + * @fd_fltr_ctx: pointer to fd filter descriptor + */ +void +ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx) +{ + fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; + fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; + fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST; + fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE; + fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX; + fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1; + fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT; + fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO; + fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE; + fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0; + fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0; + fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG; + fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO; + fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO; + fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET; + fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; + fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD; + fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO; +} + +/** + * ice_set_fd_desc_val + * @ctx: pointer to fd filter descriptor context + * @fdir_desc: populated with fd filter descriptor values + */ +static void +ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx, + struct ice_fltr_desc *fdir_desc) +{ + u64 qword; + + /* prep QW0 of FD filter programming desc */ + qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) & + ICE_FXD_FLTR_QW0_QINDEX_M; + qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) & + ICE_FXD_FLTR_QW0_COMP_Q_M; + qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) & + ICE_FXD_FLTR_QW0_COMP_REPORT_M; + qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) & + ICE_FXD_FLTR_QW0_FD_SPACE_M; + qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) & + ICE_FXD_FLTR_QW0_STAT_CNT_M; + qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) & + ICE_FXD_FLTR_QW0_STAT_ENA_M; + qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) & + ICE_FXD_FLTR_QW0_EVICT_ENA_M; + qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) & + ICE_FXD_FLTR_QW0_TO_Q_M; + qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) & + ICE_FXD_FLTR_QW0_TO_Q_PRI_M; + qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) & + ICE_FXD_FLTR_QW0_DPU_RECIPE_M; + qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) & + ICE_FXD_FLTR_QW0_DROP_M; + qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) & + ICE_FXD_FLTR_QW0_FLEX_PRI_M; + qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) & + ICE_FXD_FLTR_QW0_FLEX_MDID_M; + qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) & + ICE_FXD_FLTR_QW0_FLEX_VAL_M; + fdir_desc->qidx_compq_space_stat = CPU_TO_LE64(qword); + + /* prep QW1 of FD filter programming desc */ + qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) & + ICE_FXD_FLTR_QW1_DTYPE_M; + qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) & + ICE_FXD_FLTR_QW1_PCMD_M; + qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) & + ICE_FXD_FLTR_QW1_PROF_PRI_M; + qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) & + ICE_FXD_FLTR_QW1_PROF_M; + qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) & + ICE_FXD_FLTR_QW1_FD_VSI_M; + qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) & + ICE_FXD_FLTR_QW1_SWAP_M; + qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) & + ICE_FXD_FLTR_QW1_FDID_PRI_M; + qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) & + ICE_FXD_FLTR_QW1_FDID_MDID_M; + qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) & + ICE_FXD_FLTR_QW1_FDID_M; + fdir_desc->dtype_cmd_vsi_fdid = CPU_TO_LE64(qword); +} + +/** + * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct + * @hw: pointer to the hardware structure + * @input: filter + * @fdesc: filter descriptor + * @add: if add is true, this is an add operation, false implies delete + */ +void +ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, + struct ice_fltr_desc *fdesc, bool add) +{ + struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 }; + + /* set default context info */ + ice_set_dflt_val_fd_desc(&fdir_fltr_ctx); + + /* change sideband filtering values */ + fdir_fltr_ctx.fdid = input->fltr_id; + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES; + fdir_fltr_ctx.qindex = 0; + } else if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) { + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; + fdir_fltr_ctx.qindex = 0; + } else { + if (input->dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP) + fdir_fltr_ctx.toq = input->q_region; + fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO; + fdir_fltr_ctx.qindex = input->q_index; + } + fdir_fltr_ctx.cnt_ena = input->cnt_ena; + fdir_fltr_ctx.cnt_index = input->cnt_index; + fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi); + fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE; + if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) + fdir_fltr_ctx.toq_prio = 0; + else + fdir_fltr_ctx.toq_prio = 3; + fdir_fltr_ctx.pcmd = (add) ? ICE_FXD_FLTR_QW1_PCMD_ADD : + ICE_FXD_FLTR_QW1_PCMD_REMOVE; + fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET; + fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO; + fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; + fdir_fltr_ctx.fdid_prio = input->fdid_prio; + fdir_fltr_ctx.desc_prof = 1; + fdir_fltr_ctx.desc_prof_prio = 3; + ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc); +} + +/** + * ice_alloc_fd_res_cntr - obtain counter resource for FD type + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + */ +enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); +} + +/** + * ice_free_fd_res_cntr - Free counter resource for FD type + * @hw: pointer to the hardware structure + * @cntr_id: counter index to be freed + */ +enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +{ + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); +} + +/** + * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + * @num_fltr: number of filter entries to be allocated + */ +enum ice_status +ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_free_fd_guar_item - Free flow director guaranteed entries + * @hw: pointer to the hardware structure + * @cntr_id: counter index that needs to be freed + * @num_fltr: number of filters to be freed + */ +enum ice_status +ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr) +{ + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries + * @hw: pointer to the hardware structure + * @cntr_id: returns counter index + * @num_fltr: number of filter entries to be allocated + */ +enum ice_status +ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_free_fd_shrd_item - Free flow director shared entries + * @hw: pointer to the hardware structure + * @cntr_id: counter index that needs to be freed + * @num_fltr: number of filters to be freed + */ +enum ice_status +ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr) +{ + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, + cntr_id); +} + +/** + * ice_get_fdir_cnt_all - get the number of Flow Director filters + * @hw: hardware data structure + * + * Returns the number of filters available on device + */ +int ice_get_fdir_cnt_all(struct ice_hw *hw) +{ + return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort; +} + +/** + * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @addr: IPv6 address to convert and insert into pkt at offset + */ +static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) +{ + int idx; + + for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++) + ice_memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx], + sizeof(*addr), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_u6_qfi - insert a u6 value qfi into a memory buffer for gtpu + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting qfi (6 bits) for gtpu. + */ +static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data) +{ + u8 ret; + + ret = (data & 0x3F) + (*(pkt + offset) & 0xC0); + ice_memcpy(pkt + offset, &ret, sizeof(ret), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_u8 - insert a u8 value into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data) +{ + ice_memcpy(pkt + offset, &data, sizeof(data), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 8 bit value to convert and insert into pkt at offset + * + * This function is designed for inserting Traffic Class (TC) for IPv6, + * since that TC is not aligned in number of bytes. Here we split it out + * into two part and fill each byte with data copy from pkt, then insert + * the two bytes data one by one. + */ +static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data) +{ + u8 high, low; + + high = (data >> 4) + (*(pkt + offset) & 0xF0); + ice_memcpy(pkt + offset, &high, sizeof(high), ICE_NONDMA_TO_NONDMA); + + low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4); + ice_memcpy(pkt + offset + 1, &low, sizeof(low), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_u16 - insert a be16 value into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 16 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data) +{ + ice_memcpy(pkt + offset, &data, sizeof(data), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_u32 - insert a be32 value into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @data: 32 bit value to convert and insert into pkt at offset + */ +static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data) +{ + ice_memcpy(pkt + offset, &data, sizeof(data), ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer. + * @pkt: packet buffer + * @offset: offset into buffer + * @addr: MAC address to convert and insert into pkt at offset + */ +static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) +{ + ice_memcpy(pkt, addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); +} + +/** + * ice_fdir_get_gen_prgm_pkt - generate a training packet + * @hw: pointer to the hardware structure + * @input: flow director filter data structure + * @pkt: pointer to return filter packet + * @frag: generate a fragment packet + * @tun: true implies generate a tunnel packet + */ +enum ice_status +ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, + u8 *pkt, bool frag, bool tun) +{ + enum ice_fltr_ptype flow; + u16 tnl_port; + u8 *loc; + u16 idx; + + if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { + switch (input->ip.v4.proto) { + case ICE_IP_PROTO_TCP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + break; + case ICE_IP_PROTO_UDP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + break; + case ICE_IP_PROTO_SCTP: + flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + break; + default: + flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + break; + } + } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { + switch (input->ip.v6.proto) { + case ICE_IP_PROTO_TCP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + break; + case ICE_IP_PROTO_UDP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + break; + case ICE_IP_PROTO_SCTP: + flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + break; + default: + flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + break; + } + } else { + flow = input->flow_type; + } + + for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++) + if (ice_fdir_pkt[idx].flow == flow) + break; + if (idx == ICE_FDIR_NUM_PKT) + return ICE_ERR_PARAM; + if (!tun) { + ice_memcpy(pkt, ice_fdir_pkt[idx].pkt, + ice_fdir_pkt[idx].pkt_len, ICE_NONDMA_TO_NONDMA); + loc = pkt; + } else { + if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port)) + return ICE_ERR_DOES_NOT_EXIST; + if (!ice_fdir_pkt[idx].tun_pkt) + return ICE_ERR_PARAM; + ice_memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, + ice_fdir_pkt[idx].tun_pkt_len, ICE_NONDMA_TO_NONDMA); + ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, + HTONS(tnl_port)); + loc = &pkt[ICE_FDIR_TUN_PKT_OFF]; + } + + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + if (frag) + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET, + input->ip.v4.dst_port); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET, + input->ip.v4.src_port); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos); + ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); + ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET, + input->ip.v4.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET, + input->ip.v4.src_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, + input->ip.v4.dst_ip); + ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET, + input->gtpu_data.teid); + ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET, + input->gtpu_data.qfi); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET, + input->ip.v6.dst_port); + ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET, + input->ip.v6.src_port); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET, + input->ip.v6.dst_ip); + ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET, + input->ip.v6.src_ip); + ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc); + ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim); + ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET, + input->ip.v6.proto); + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + break; + default: + return ICE_ERR_PARAM; + } + + if (input->flex_fltr) + ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word); + + return ICE_SUCCESS; +} + +/** + * ice_fdir_get_prgm_pkt - generate a training packet + * @input: flow director filter data structure + * @pkt: pointer to return filter packet + * @frag: generate a fragment packet + */ +enum ice_status +ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag) +{ + return ice_fdir_get_gen_prgm_pkt(NULL, input, pkt, frag, false); +} + +/** + * ice_fdir_has_frag - does flow type have 2 ptypes + * @flow: flow ptype + * + * returns true is there is a fragment packet for this ptype + */ +bool ice_fdir_has_frag(enum ice_fltr_ptype flow) +{ + if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) + return true; + else + return false; +} + +/** + * ice_fdir_find_by_idx - find filter with idx + * @hw: pointer to hardware structure + * @fltr_idx: index to find. + * + * Returns pointer to filter if found or null + */ +struct ice_fdir_fltr * +ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx) +{ + struct ice_fdir_fltr *rule; + + LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr, + fltr_node) { + /* rule ID found in the list */ + if (fltr_idx == rule->fltr_id) + return rule; + if (fltr_idx < rule->fltr_id) + break; + } + return NULL; +} + +/** + * ice_fdir_list_add_fltr - add a new node to the flow director filter list + * @hw: hardware structure + * @fltr: filter node to add to structure + */ +void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr) +{ + struct ice_fdir_fltr *rule, *parent = NULL; + + LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr, + fltr_node) { + /* rule ID found or pass its spot in the list */ + if (rule->fltr_id >= fltr->fltr_id) + break; + parent = rule; + } + + if (parent) + LIST_ADD_AFTER(&fltr->fltr_node, &parent->fltr_node); + else + LIST_ADD(&fltr->fltr_node, &hw->fdir_list_head); +} + +/** + * ice_fdir_update_cntrs - increment / decrement filter counter + * @hw: pointer to hardware structure + * @flow: filter flow type + * @acl_fltr: true indicates an ACL filter + * @add: true implies filters added + */ +void +ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, + bool acl_fltr, bool add) +{ + int incr; + + incr = add ? 1 : -1; + hw->fdir_active_fltr += incr; + if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX) { + ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow); + } else { + if (acl_fltr) + hw->acl_fltr_cnt[flow] += incr; + else + hw->fdir_fltr_cnt[flow] += incr; + } +} + +/** + * ice_cmp_ipv6_addr - compare 2 IP v6 addresses + * @a: IP v6 address + * @b: IP v6 address + * + * Returns 0 on equal, returns non-0 if different + */ +static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) +{ + return memcmp(a, b, 4 * sizeof(__be32)); +} + +/** + * ice_fdir_comp_rules - compare 2 filters + * @a: a Flow Director filter data structure + * @b: a Flow Director filter data structure + * @v6: bool true if v6 filter + * + * Returns true if the filters match + */ +static bool +ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +{ + enum ice_fltr_ptype flow_type = a->flow_type; + + /* The calling function already checks that the two filters have the + * same flow_type. + */ + if (!v6) { + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.dst_port == b->ip.v4.dst_port && + a->ip.v4.src_port == b->ip.v4.src_port) + return true; + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.l4_header == b->ip.v4.l4_header && + a->ip.v4.proto == b->ip.v4.proto && + a->ip.v4.ip_ver == b->ip.v4.ip_ver && + a->ip.v4.tos == b->ip.v4.tos) + return true; + } + } else { + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port && + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, + b->ip.v6.dst_ip) && + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, + b->ip.v6.src_ip)) + return true; + } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port) + return true; + } + } + + return false; +} + +/** + * ice_fdir_is_dup_fltr - test if filter is already in list for PF + * @hw: hardware data structure + * @input: Flow Director filter data structure + * + * Returns true if the filter is found in the list + */ +bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) +{ + struct ice_fdir_fltr *rule; + bool ret = false; + + LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr, + fltr_node) { + enum ice_fltr_ptype flow_type; + + if (rule->flow_type != input->flow_type) + continue; + + flow_type = input->flow_type; + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || + flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) + ret = ice_fdir_comp_rules(rule, input, false); + else + ret = ice_fdir_comp_rules(rule, input, true); + if (ret) { + if (rule->fltr_id == input->fltr_id && + rule->q_index != input->q_index) + ret = false; + else + break; + } + } + + return ret; +} + +/** + * ice_clear_vsi_fd_table - admin command to clear FD table for a VSI + * @hw: hardware data structure + * @vsi_num: vsi_num (HW VSI num) + * + * Clears FD table entries by issuing admin command (direct, 0x0B06) + * Must to pass valid vsi_num as returned by "AddVSI". + */ +enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num) +{ + struct ice_aqc_clear_fd_table *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.clear_fd_table; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_fd_table); + cmd->clear_type = CL_FD_VM_VF_TYPE_VSI_IDX; + + cmd->vsi_index = CPU_TO_LE16(vsi_num); + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_clear_pf_fd_table - admin command to clear FD table for PF + * @hw: hardware data structure + * + * Clears FD table entries for a PF by issuing admin command (direct, 0x0B06) + */ +enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw) +{ + struct ice_aqc_clear_fd_table *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.clear_fd_table; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_fd_table); + cmd->clear_type = CL_FD_VM_VF_TYPE_PF_IDX; + /* vsi_index must be 0 to clear FD table for a PF */ + cmd->vsi_index = CPU_TO_LE16(0); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.h b/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.h new file mode 100644 index 000000000..18656c55f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_fdir.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_FDIR_H_ +#define _ICE_FDIR_H_ + +#include "ice_common.h" + +/* Flow Director (FD) Filter Programming descriptor */ +struct ice_fd_fltr_desc_ctx { + u32 fdid; + u16 qindex; + u16 cnt_index; + u16 fd_vsi; + u16 flex_val; + u8 comp_q; + u8 comp_report; + u8 fd_space; + u8 cnt_ena; + u8 evict_ena; + u8 toq; + u8 toq_prio; + u8 dpu_recipe; + u8 drop; + u8 flex_prio; + u8 flex_mdid; + u8 dtype; + u8 pcmd; + u8 desc_prof_prio; + u8 desc_prof; + u8 swap; + u8 fdid_prio; + u8 fdid_mdid; +}; + +enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx); +enum ice_status +ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +enum ice_status +ice_free_fd_guar_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); +enum ice_status +ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +enum ice_status +ice_free_fd_shrd_item(struct ice_hw *hw, u16 cntr_id, u16 num_fltr); +enum ice_status ice_clear_vsi_fd_table(struct ice_hw *hw, u16 vsi_num); +enum ice_status ice_clear_pf_fd_table(struct ice_hw *hw); + +#define ICE_FDIR_IP_PROTOCOLS +#define ICE_IP_PROTO_TCP 6 +#define ICE_IP_PROTO_UDP 17 +#define ICE_IP_PROTO_SCTP 132 +#define ICE_IP_PROTO_IP 0 +#define ICE_IP_PROTO_ESP 50 + +#define ICE_FDIR_TUN_PKT_OFF 50 +#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) +#define ICE_FDIR_BUF_FULL_MARGIN 10 +#define ICE_FDIR_BUF_HEAD_ROOM 32 + +/* macros for offsets into packets for flow director programming */ +#define ICE_IPV4_SRC_ADDR_OFFSET 26 +#define ICE_IPV4_DST_ADDR_OFFSET 30 +#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_TCP_DST_PORT_OFFSET 36 +#define ICE_IPV4_UDP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_UDP_DST_PORT_OFFSET 36 +#define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34 +#define ICE_IPV4_SCTP_DST_PORT_OFFSET 36 +#define ICE_IPV4_PROTO_OFFSET 23 +#define ICE_IPV6_SRC_ADDR_OFFSET 22 +#define ICE_IPV6_DST_ADDR_OFFSET 38 +#define ICE_IPV6_TCP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_TCP_DST_PORT_OFFSET 56 +#define ICE_IPV6_UDP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_UDP_DST_PORT_OFFSET 56 +#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54 +#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56 + +#define ICE_IPV4_TOS_OFFSET 15 +#define ICE_IPV4_TTL_OFFSET 22 +#define ICE_IPV6_TC_OFFSET 14 +#define ICE_IPV6_HLIM_OFFSET 21 +#define ICE_IPV6_PROTO_OFFSET 20 +#define ICE_IPV4_GTPU_TEID_OFFSET 46 +#define ICE_IPV4_GTPU_QFI_OFFSET 56 + +#define ICE_FDIR_MAX_FLTRS 16384 + +/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF + * requests that the packet not be fragmented. MF indicates that a packet has + * been fragmented. + */ +#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 +#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x40 + +enum ice_fltr_prgm_desc_dest { + ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP, + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER, +}; + +enum ice_fltr_prgm_desc_fd_status { + ICE_FLTR_PRGM_DESC_FD_STATUS_NONE, + ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID, + ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES, + ICE_FLTR_PRGM_DESC_FD_STATUS_8FLEX_BYTES, +}; + +#define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16) + +struct ice_rx_flow_userdef { + u16 flex_word; + u16 flex_offset; + u16 flex_fltr; +}; + +struct ice_fdir_v4 { + __be32 dst_ip; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be32 l4_header; + __be32 sec_parm_idx; /* security parameter index */ + u8 tos; + u8 ip_ver; + u8 proto; + u8 ttl; +}; + +#define ICE_IPV6_ADDR_LEN_AS_U32 4 + +struct ice_fdir_v6 { + __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be32 l4_header; /* next header */ + __be32 sec_parm_idx; /* security parameter index */ + u8 tc; + u8 proto; + u8 hlim; +}; + +struct ice_fdir_udp_gtp { + u8 flags; + u8 msg_type; + __be16 rsrvd_len; + __be32 teid; + __be16 rsrvd_seq_nbr; + u8 rsrvd_n_pdu_nbr; + u8 rsrvd_next_ext_type; + u8 rsvrd_ext_len; + u8 pdu_type:4, + spare:4; + u8 ppp:1, + rqi:1, + qfi:6; + u32 rsvrd; + u8 next_ext; +}; + +struct ice_fdir_extra { + u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u32 usr_def[2]; /* user data */ + __be16 vlan_type; /* VLAN ethertype */ + __be16 vlan_tag; /* VLAN tag info */ +}; + +struct ice_fdir_fltr { + struct LIST_ENTRY_TYPE fltr_node; + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; + + /* flex byte filter data */ + __be16 flex_word; + /* queue region size (=2^q_region) */ + u8 q_region; + u16 flex_offset; + u16 flex_fltr; + + /* filter control */ + u16 q_index; + u16 dest_vsi; + u8 dest_ctl; + u8 cnt_ena; + u8 fltr_status; + u16 cnt_index; + u32 fltr_id; + u8 fdid_prio; + /* Set to true for an ACL filter */ + bool acl_fltr; +}; + +/* Dummy packet filter definition structure. */ +struct ice_fdir_base_pkt { + enum ice_fltr_ptype flow; + u16 pkt_len; + const u8 *pkt; + u16 tun_pkt_len; + const u8 *tun_pkt; +}; + +void +ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, + struct ice_fltr_desc *fdesc, bool add); +enum ice_status +ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, + u8 *pkt, bool frag, bool tun); +enum ice_status +ice_fdir_get_prgm_pkt(struct ice_fdir_fltr *input, u8 *pkt, bool frag); +enum ice_status +ice_add_del_fdir(struct ice_hw *hw, struct ice_fdir_fltr *input, bool add); +int ice_get_fdir_cnt_all(struct ice_hw *hw); +bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); +enum ice_status +ice_update_fdir_list_entry(struct ice_hw *hw, struct ice_fdir_fltr *input, + u16 sw_idx); +bool ice_fdir_has_frag(enum ice_fltr_ptype flow); +struct ice_fdir_fltr * +ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx); +void +ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, + bool acl_fltr, bool add); +void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); +#endif /* _ICE_FDIR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.c new file mode 100644 index 000000000..512ced6b8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.c @@ -0,0 +1,5955 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" +#include "ice_flex_pipe.h" +#include "ice_protocol_type.h" +#include "ice_flow.h" + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +static const struct ice_tunnel_type_scan tnls[] = { + { TNL_VXLAN, "TNL_VXLAN_PF" }, + { TNL_GENEVE, "TNL_GENEVE_PF" }, + { TNL_LAST, "" } +}; + +static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { + /* SWITCH */ + { + ICE_SID_XLT0_SW, + ICE_SID_XLT_KEY_BUILDER_SW, + ICE_SID_XLT1_SW, + ICE_SID_XLT2_SW, + ICE_SID_PROFID_TCAM_SW, + ICE_SID_PROFID_REDIR_SW, + ICE_SID_FLD_VEC_SW, + ICE_SID_CDID_KEY_BUILDER_SW, + ICE_SID_CDID_REDIR_SW + }, + + /* ACL */ + { + ICE_SID_XLT0_ACL, + ICE_SID_XLT_KEY_BUILDER_ACL, + ICE_SID_XLT1_ACL, + ICE_SID_XLT2_ACL, + ICE_SID_PROFID_TCAM_ACL, + ICE_SID_PROFID_REDIR_ACL, + ICE_SID_FLD_VEC_ACL, + ICE_SID_CDID_KEY_BUILDER_ACL, + ICE_SID_CDID_REDIR_ACL + }, + + /* FD */ + { + ICE_SID_XLT0_FD, + ICE_SID_XLT_KEY_BUILDER_FD, + ICE_SID_XLT1_FD, + ICE_SID_XLT2_FD, + ICE_SID_PROFID_TCAM_FD, + ICE_SID_PROFID_REDIR_FD, + ICE_SID_FLD_VEC_FD, + ICE_SID_CDID_KEY_BUILDER_FD, + ICE_SID_CDID_REDIR_FD + }, + + /* RSS */ + { + ICE_SID_XLT0_RSS, + ICE_SID_XLT_KEY_BUILDER_RSS, + ICE_SID_XLT1_RSS, + ICE_SID_XLT2_RSS, + ICE_SID_PROFID_TCAM_RSS, + ICE_SID_PROFID_REDIR_RSS, + ICE_SID_FLD_VEC_RSS, + ICE_SID_CDID_KEY_BUILDER_RSS, + ICE_SID_CDID_REDIR_RSS + }, + + /* PE */ + { + ICE_SID_XLT0_PE, + ICE_SID_XLT_KEY_BUILDER_PE, + ICE_SID_XLT1_PE, + ICE_SID_XLT2_PE, + ICE_SID_PROFID_TCAM_PE, + ICE_SID_PROFID_REDIR_PE, + ICE_SID_FLD_VEC_PE, + ICE_SID_CDID_KEY_BUILDER_PE, + ICE_SID_CDID_REDIR_PE + } +}; + +/** + * ice_sect_id - returns section ID + * @blk: block type + * @sect: section type + * + * This helper function returns the proper section ID given a block type and a + * section type. + */ +static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) +{ + return ice_sect_lkup[blk][sect]; +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = LE16_TO_CPU(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = LE16_TO_CPU(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + LE32_TO_CPU(ice_seg->device_table_count)); + + return (_FORCE_ struct ice_buf_table *) + (nvms->vers + LE32_TO_CPU(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +static void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + CPU_TO_LE32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +static void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void * +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = (struct ice_boost_tcam_section *)section; + if (index >= LE16_TO_CPU(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static enum ice_status +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + tcam = (struct ice_boost_tcam_entry *) + ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && LE16_TO_CPU(tcam->addr) == addr) { + *entry = tcam; + return ICE_SUCCESS; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return ICE_ERR_CFG; +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void * +ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = (struct ice_label_section *)section; + if (index >= LE16_TO_CPU(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char * +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, + u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, + NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = LE16_TO_CPU(label->value); + return label->name; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].in_use = false; + hw->tnl.tbl[hw->tnl.count].marked = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) + hw->tnl.tbl[i].valid = true; + } +} + +/* Key creation */ + +#define ICE_DC_KEY 0x1 /* don't care */ +#define ICE_DC_KEYINV 0x1 +#define ICE_NM_KEY 0x0 /* never match */ +#define ICE_NM_KEYINV 0x0 +#define ICE_0_KEY 0x1 /* match 0 */ +#define ICE_0_KEYINV 0x0 +#define ICE_1_KEY 0x0 /* match 1 */ +#define ICE_1_KEYINV 0x1 + +/** + * ice_gen_key_word - generate 16-bits of a key/mask word + * @val: the value + * @valid: valid bits mask (change only the valid bits) + * @dont_care: don't care mask + * @nvr_mtch: never match mask + * @key: pointer to an array of where the resulting key portion + * @key_inv: pointer to an array of where the resulting key invert portion + * + * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask + * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits + * of key and 8 bits of key invert. + * + * '0' = b01, always match a 0 bit + * '1' = b10, always match a 1 bit + * '?' = b11, don't care bit (always matches) + * '~' = b00, never match bit + * + * Input: + * val: b0 1 0 1 0 1 + * dont_care: b0 0 1 1 0 0 + * never_mtch: b0 0 0 0 1 1 + * ------------------------------ + * Result: key: b01 10 11 11 00 00 + */ +static enum ice_status +ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, + u8 *key_inv) +{ + u8 in_key = *key, in_key_inv = *key_inv; + u8 i; + + /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ + if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) + return ICE_ERR_CFG; + + *key = 0; + *key_inv = 0; + + /* encode the 8 bits into 8-bit key and 8-bit key invert */ + for (i = 0; i < 8; i++) { + *key >>= 1; + *key_inv >>= 1; + + if (!(valid & 0x1)) { /* change only valid bits */ + *key |= (in_key & 0x1) << 7; + *key_inv |= (in_key_inv & 0x1) << 7; + } else if (dont_care & 0x1) { /* don't care bit */ + *key |= ICE_DC_KEY << 7; + *key_inv |= ICE_DC_KEYINV << 7; + } else if (nvr_mtch & 0x1) { /* never match bit */ + *key |= ICE_NM_KEY << 7; + *key_inv |= ICE_NM_KEYINV << 7; + } else if (val & 0x01) { /* exact 1 match */ + *key |= ICE_1_KEY << 7; + *key_inv |= ICE_1_KEYINV << 7; + } else { /* exact 0 match */ + *key |= ICE_0_KEY << 7; + *key_inv |= ICE_0_KEYINV << 7; + } + + dont_care >>= 1; + nvr_mtch >>= 1; + valid >>= 1; + val >>= 1; + in_key >>= 1; + in_key_inv >>= 1; + } + + return ICE_SUCCESS; +} + +/** + * ice_bits_max_set - determine if the number of bits set is within a maximum + * @mask: pointer to the byte array which is the mask + * @size: the number of bytes in the mask + * @max: the max number of set bits + * + * This function determines if there are at most 'max' number of bits set in an + * array. Returns true if the number for bits set is <= max or will return false + * otherwise. + */ +static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) +{ + u16 count = 0; + u16 i; + + /* check each byte */ + for (i = 0; i < size; i++) { + /* if 0, go to next byte */ + if (!mask[i]) + continue; + + /* We know there is at least one set bit in this byte because of + * the above check; if we already have found 'max' number of + * bits set, then we can return failure now. + */ + if (count == max) + return false; + + /* count the bits in this byte, checking threshold */ + count += ice_hweight8(mask[i]); + if (count > max) + return false; + } + + return true; +} + +/** + * ice_set_key - generate a variable sized key with multiples of 16-bits + * @key: pointer to where the key will be stored + * @size: the size of the complete key in bytes (must be even) + * @val: array of 8-bit values that makes up the value portion of the key + * @upd: array of 8-bit masks that determine what key portion to update + * @dc: array of 8-bit masks that make up the don't care mask + * @nm: array of 8-bit masks that make up the never match mask + * @off: the offset of the first byte in the key to update + * @len: the number of bytes in the key update + * + * This function generates a key from a value, a don't care mask and a never + * match mask. + * upd, dc, and nm are optional parameters, and can be NULL: + * upd == NULL --> udp mask is all 1's (update all bits) + * dc == NULL --> dc mask is all 0's (no don't care bits) + * nm == NULL --> nm mask is all 0's (no never match bits) + */ +enum ice_status +ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, + u16 len) +{ + u16 half_size; + u16 i; + + /* size must be a multiple of 2 bytes. */ + if (size % 2) + return ICE_ERR_CFG; + half_size = size / 2; + + if (off + len > half_size) + return ICE_ERR_CFG; + + /* Make sure at most one bit is set in the never match mask. Having more + * than one never match mask bit set will cause HW to consume excessive + * power otherwise; this is a power management efficiency check. + */ +#define ICE_NVR_MTCH_BITS_MAX 1 + if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) + return ICE_ERR_CFG; + + for (i = 0; i < len; i++) + if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, + dc ? dc[i] : 0, nm ? nm[i] : 0, + key + off + i, key + half_size + off + i)) + return ICE_ERR_CFG; + + return ICE_SUCCESS; +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_acquire_change_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the change lock. + */ +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, + ICE_CHANGE_LOCK_TIMEOUT); +} + +/** + * ice_release_change_lock + * @hw: pointer to the HW structure + * + * This function will release the change lock using the proper Admin Command. + */ +void ice_release_change_lock(struct ice_hw *hw) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static enum ice_status +ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, + bool last_buf, u32 *error_offset, u32 *error_info, + struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +static struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); + + if (LE32_TO_CPU(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + u32 offset, info, i; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + + status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + ice_release_change_lock(hw); + + return status; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_status +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + struct ice_buf_hdr *bh; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_ERR_PARAM; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_SUCCESS; + + /* reset pkg_dwnld_status in case this function is called in the + * reset/rebuild flow + */ + hw->pkg_dwnld_status = ICE_AQ_RC_OK; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; + else + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + return status; + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (LE16_TO_CPU(bh->section_count)) + if (LE32_TO_CPU(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + + if (last) + break; + } + + ice_release_global_cfg_lock(hw); + + return status; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_status +ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + LE32_TO_CPU(ice_seg->hdr.seg_type), + LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_status +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_global_metadata_seg *meta_seg; + struct ice_generic_seg_hdr *seg_hdr; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + if (!pkg_hdr) + return ICE_ERR_PARAM; + + meta_seg = (struct ice_global_metadata_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); + if (meta_seg) { + hw->pkg_ver = meta_seg->pkg_ver; + ice_memcpy(hw->pkg_name, meta_seg->pkg_name, + sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, + meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, + meta_seg->pkg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find metadata segment in driver package\n"); + return ICE_ERR_CFG; + } + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + hw->ice_pkg_ver = seg_hdr->seg_format_ver; + ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id, + sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, + seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_ERR_CFG; + } + + return ICE_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +{ + struct ice_aqc_get_pkg_info_resp *pkg_info; + enum ice_status status; + u16 size; + u32 i; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1); + pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg_info) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); + if (status) + goto init_pkg_free_alloc; + + for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + LE32_TO_CPU(pkg_info->pkg_info[i].track_id); + ice_memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name), + ICE_NONDMA_TO_NONDMA); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + ice_free(hw, pkg_info); + + return status; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < sizeof(*pkg)) + return ICE_ERR_BUF_TOO_SHORT; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_ERR_CFG; + + /* pkg must have at least one segment */ + seg_count = LE32_TO_CPU(pkg->seg_count); + if (seg_count < 1) + return ICE_ERR_CFG; + + /* make sure segment array fits in package length */ + if (len < ice_struct_size(pkg, seg_offset, seg_count - 1)) + return ICE_ERR_BUF_TOO_SHORT; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = LE32_TO_CPU(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_ERR_BUF_TOO_SHORT; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + LE32_TO_CPU(seg->seg_size)) + return ICE_ERR_BUF_TOO_SHORT; + } + + return ICE_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + ice_free(hw, hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + if (hw->dcf_enabled) + return; + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || + pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) + return ICE_ERR_NOT_SUPPORTED; + + return ICE_SUCCESS; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_status +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_status status; + u16 size; + u32 i; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Check package version compatibility */ + status = ice_chk_pkg_version(&hw->pkg_ver); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return status; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_ERR_CFG; + } + + /* Check if FW is compatible with the OS package */ + size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1); + pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); + if (status) + goto fw_ddp_compat_free_alloc; + + for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + status = ICE_ERR_FW_DDP_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, + "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + ice_free(hw, pkg); + return status; +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + struct ice_pkg_hdr *pkg; + enum ice_status status; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_ERR_PARAM; + + pkg = (struct ice_pkg_hdr *)buf; + status = ice_verify_pkg(pkg, len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + status); + return status; + } + + /* initialize package info */ + status = ice_init_pkg_info(hw, pkg); + if (status) + return status; + + /* before downloading the package, check package version for + * compatibility with driver + */ + status = ice_chk_pkg_compat(hw, pkg, &seg); + if (status) + return status; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + status = ice_download_pkg(hw, seg); + if (status == ICE_ERR_AQ_NO_WORK) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + status = ICE_SUCCESS; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!status) { + status = ice_get_pkg_info(hw); + if (!status) + status = ice_chk_pkg_version(&hw->active_pkg_ver); + } + + if (!status) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + status); + } + + return status; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_status status; + u8 *buf_copy; + + if (!buf || !len) + return ICE_ERR_PARAM; + + buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); + + status = ice_init_pkg(hw, buf_copy, len); + if (status) { + /* Free the copy, since we failed to initialize the package */ + ice_free(hw, buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return status; +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, + section_entry)); + return bld; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector + * vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = + (struct ice_sw_fv_section *)section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= LE16_TO_CPU(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = LE16_TO_CPU(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) +{ + u16 i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + + /* PPPOE tunnel will have PPPOE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE) + return ICE_PROF_TUN_PPPOE; + } + + return ICE_PROF_NON_TUN; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + ice_bitmap_t *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (req_profs == ICE_PROF_ALL) { + u16 i; + + for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) + ice_set_bit(i, bm); + return; + } + + ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); + + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv); + + if (req_profs & prof_type) + ice_set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @prot_ids: field vector to search for with a given protocol ID + * @ids_cnt: lookup/protocol count + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ids_cnt || !hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + do { + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!ice_is_bit_set(bm, (u16)offset)) + continue; + + for (i = 0; i < ids_cnt; i++) { + int j; + + /* This code assumes that if a switch field vector line + * has a matching protocol, then this line will contain + * the entries necessary to represent every field in + * that protocol header. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == prot_ids[i]) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == ids_cnt) { + fvl = (struct ice_sw_fv_list_entry *) + ice_malloc(hw, sizeof(*fvl)); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + LIST_ADD(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (LIST_EMPTY(fv_list)) + return ICE_ERR_CFG; + return ICE_SUCCESS; + +err: + LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, + list_entry) { + LIST_DEL(&fvl->list_entry); + ice_free(hw, fvl); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + ice_zero_bitmap(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + ice_set_bit(i, + hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + ice_free(hw, bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +static enum ice_status +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return ICE_ERR_PARAM; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = LE16_TO_CPU(buf->section_count); + if (section_count > 0) + return ICE_ERR_CFG; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return ICE_ERR_CFG; + bld->reserved_section_table_entries += count; + + data_end = LE16_TO_CPU(buf->data_end) + + (count * sizeof(buf->section_entry[0])); + buf->data_end = CPU_TO_LE16(data_end); + + return ICE_SUCCESS; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +static void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = LE16_TO_CPU(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ICE_ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = LE16_TO_CPU(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); + buf->section_entry[sect_count].size = CPU_TO_LE16(size); + buf->section_entry[sect_count].type = CPU_TO_LE32(type); + + data_end += size; + buf->data_end = CPU_TO_LE16(data_end); + + buf->section_count = CPU_TO_LE16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return LE16_TO_CPU(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (!bld) + return NULL; + + return &bld->buf; +} + +/** + * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_tunnel_port_in_use + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +{ + bool res; + + ice_acquire_lock(&hw->tnl_lock); + res = ice_tunnel_port_in_use_hlpr(hw, port, index); + ice_release_lock(&hw->tnl_lock); + + return res; +} + +/** + * ice_tunnel_get_type + * @hw: pointer to the HW structure + * @port: port to search for + * @type: returns tunnel index + * + * For a given port number, will return the type of tunnel. + */ +bool +ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) +{ + bool res = false; + u16 i; + + ice_acquire_lock(&hw->tnl_lock); + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { + *type = hw->tnl.tbl[i].type; + res = true; + break; + } + + ice_release_lock(&hw->tnl_lock); + + return res; +} + +/** + * ice_find_free_tunnel_entry + * @hw: pointer to the HW structure + * @type: tunnel type + * @index: optionally returns index + * + * Returns whether there is a free tunnel entry, and optionally its index + */ +static bool +ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && + hw->tnl.tbl[i].type == type) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_get_open_tunnel_port - retrieve an open tunnel port + * @hw: pointer to the HW structure + * @type: tunnel type (TNL_ALL will return any open port) + * @port: returns open port + */ +bool +ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *port) +{ + bool res = false; + u16 i; + + ice_acquire_lock(&hw->tnl_lock); + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { + *port = hw->tnl.tbl[i].port; + res = true; + break; + } + + ice_release_lock(&hw->tnl_lock); + + return res; +} + +/** + * ice_create_tunnel + * @hw: pointer to the HW structure + * @type: type of tunnel + * @port: port of tunnel to create + * + * Create a tunnel by updating the parse graph in the parser. We do that by + * creating a package buffer with the tunnel info and issuing an update package + * command. + */ +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 index; + + ice_acquire_lock(&hw->tnl_lock); + + if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { + hw->tnl.tbl[index].ref++; + status = ICE_SUCCESS; + goto ice_create_tunnel_end; + } + + if (!ice_find_free_tunnel_entry(hw, type, &index)) { + status = ICE_ERR_OUT_OF_RANGE; + goto ice_create_tunnel_end; + } + + bld = ice_pkg_buf_alloc(hw); + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_tunnel_end; + } + + /* allocate 2 sections, one for Rx parser, one for Tx parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_create_tunnel_err; + + sect_rx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(*sect_rx)); + if (!sect_rx) + goto ice_create_tunnel_err; + sect_rx->count = CPU_TO_LE16(1); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + sizeof(*sect_tx)); + if (!sect_tx) + goto ice_create_tunnel_err; + sect_tx->count = CPU_TO_LE16(1); + + /* copy original boost entry to update package buffer */ + ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, + sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); + + /* over-write the never-match dest port key bits with the encoded port + * bits + */ + ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), + (u8 *)&port, NULL, NULL, NULL, + (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), + sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); + + /* exact copy of entry to Tx section entry */ + ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) { + hw->tnl.tbl[index].port = port; + hw->tnl.tbl[index].in_use = true; + hw->tnl.tbl[index].ref = 1; + } + +ice_create_tunnel_err: + ice_pkg_buf_free(hw, bld); + +ice_create_tunnel_end: + ice_release_lock(&hw->tnl_lock); + + return status; +} + +/** + * ice_destroy_tunnel + * @hw: pointer to the HW structure + * @port: port of tunnel to destroy (ignored if the all parameter is true) + * @all: flag that states to destroy all tunnels + * + * Destroys a tunnel or all tunnels by creating an update package buffer + * targeting the specific updates requested and then performing an update + * package. + */ +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 count = 0; + u16 index; + u16 size; + u16 i; + + ice_acquire_lock(&hw->tnl_lock); + + if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) + if (hw->tnl.tbl[index].ref > 1) { + hw->tnl.tbl[index].ref--; + status = ICE_SUCCESS; + goto ice_destroy_tunnel_end; + } + + /* determine count */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) + count++; + + if (!count) { + status = ICE_ERR_PARAM; + goto ice_destroy_tunnel_end; + } + + /* size of section - there is at least one entry */ + size = ice_struct_size(sect_rx, tcam, count - 1); + + bld = ice_pkg_buf_alloc(hw); + if (!bld) { + status = ICE_ERR_NO_MEMORY; + goto ice_destroy_tunnel_end; + } + + /* allocate 2 sections, one for Rx parser, one for Tx parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_destroy_tunnel_err; + + sect_rx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + size); + if (!sect_rx) + goto ice_destroy_tunnel_err; + sect_rx->count = CPU_TO_LE16(1); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + size); + if (!sect_tx) + goto ice_destroy_tunnel_err; + sect_tx->count = CPU_TO_LE16(1); + + /* copy original boost entry to update package buffer, one copy to Rx + * section, another copy to the Tx section + */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) { + ice_memcpy(sect_rx->tcam + i, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_rx->tcam), + ICE_NONDMA_TO_NONDMA); + ice_memcpy(sect_tx->tcam + i, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + hw->tnl.tbl[i].marked = true; + } + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) + for (i = 0; i < hw->tnl.count && + i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].marked) { + hw->tnl.tbl[i].ref = 0; + hw->tnl.tbl[i].port = 0; + hw->tnl.tbl[i].in_use = false; + hw->tnl.tbl[i].marked = false; + } + +ice_destroy_tunnel_err: + ice_pkg_buf_free(hw, bld); + +ice_destroy_tunnel_end: + ice_release_lock(&hw->tnl_lock); + + return status; +} + +/** + * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index + * @hw: pointer to the hardware structure + * @blk: hardware block + * @prof: profile ID + * @fv_idx: field vector word index + * @prot: variable to receive the protocol ID + * @off: variable to receive the protocol offset + */ +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off) +{ + struct ice_fv_word *fv_ext; + + if (prof >= hw->blk[blk].es.count) + return ICE_ERR_PARAM; + + if (fv_idx >= hw->blk[blk].es.fvw) + return ICE_ERR_PARAM; + + fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); + + *prot = fv_ext[fv_idx].prot_id; + *off = fv_ext[fv_idx].off; + + return ICE_SUCCESS; +} + +/* PTG Management */ + +/** + * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to search for + * @ptg: pointer to variable that receives the PTG + * + * This function will search the PTGs for a particular ptype, returning the + * PTG ID that contains it through the PTG parameter, with the value of + * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. + */ +static enum ice_status +ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) +{ + if (ptype >= ICE_XLT1_CNT || !ptg) + return ICE_ERR_PARAM; + + *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; + return ICE_SUCCESS; +} + +/** + * ice_ptg_alloc_val - Allocates a new packet type group ID by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptg: the PTG to allocate + * + * This function allocates a given packet type group ID specified by the PTG + * parameter. + */ +static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) +{ + hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; +} + +/** + * ice_ptg_remove_ptype - Removes ptype from a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to remove + * @ptg: the PTG to remove the ptype from + * + * This function will remove the ptype from the specific PTG, and move it to + * the default PTG (ICE_DEFAULT_PTG). + */ +static enum ice_status +ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + struct ice_ptg_ptype **ch; + struct ice_ptg_ptype *p; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* Should not happen if .in_use is set, bad config */ + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) + return ICE_ERR_CFG; + + /* find the ptype within this PTG, and bypass the link over it */ + p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + while (p) { + if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { + *ch = p->next_ptype; + break; + } + + ch = &p->next_ptype; + p = p->next_ptype; + } + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; + + return ICE_SUCCESS; +} + +/** + * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to add or move + * @ptg: the PTG to add or move the ptype to + * + * This function will either add or move a ptype to a particular PTG depending + * on if the ptype is already part of another group. Note that using a + * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the + * default PTG. + */ +static enum ice_status +ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + enum ice_status status; + u8 original_ptg; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); + if (status) + return status; + + /* Is ptype already in the correct PTG? */ + if (original_ptg == ptg) + return ICE_SUCCESS; + + /* Remove from original PTG and move back to the default PTG */ + if (original_ptg != ICE_DEFAULT_PTG) + ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); + + /* Moving to default PTG? Then we're done with this request */ + if (ptg == ICE_DEFAULT_PTG) + return ICE_SUCCESS; + + /* Add ptype to PTG at beginning of list */ + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = + &hw->blk[blk].xlt1.ptypes[ptype]; + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; + hw->blk[blk].xlt1.t[ptype] = ptg; + + return ICE_SUCCESS; +} + +/* Block / table size info */ +struct ice_blk_size_details { + u16 xlt1; /* # XLT1 entries */ + u16 xlt2; /* # XLT2 entries */ + u16 prof_tcam; /* # profile ID TCAM entries */ + u16 prof_id; /* # profile IDs */ + u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ + u16 prof_redir; /* # profile redirection entries */ + u16 es; /* # extraction sequence entries */ + u16 fvw; /* # field vector words */ + u8 overwrite; /* overwrite existing entries allowed */ + u8 reverse; /* reverse FV order */ +}; + +static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { + /** + * Table Definitions + * XLT1 - Number of entries in XLT1 table + * XLT2 - Number of entries in XLT2 table + * TCAM - Number of entries Profile ID TCAM table + * CDID - Control Domain ID of the hardware block + * PRED - Number of entries in the Profile Redirection Table + * FV - Number of entries in the Field Vector + * FVW - Width (in WORDs) of the Field Vector + * OVR - Overwrite existing table entries + * REV - Reverse FV + */ + /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ + /* Overwrite , Reverse FV */ + /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, + false, false }, + /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, + false, false }, + /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + false, true }, + /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + true, true }, + /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, + false, false }, +}; + +enum ice_sid_all { + ICE_SID_XLT1_OFF = 0, + ICE_SID_XLT2_OFF, + ICE_SID_PR_OFF, + ICE_SID_PR_REDIR_OFF, + ICE_SID_ES_OFF, + ICE_SID_OFF_COUNT, +}; + +/* Characteristic handling */ + +/** + * ice_match_prop_lst - determine if properties of two lists match + * @list1: first properties list + * @list2: second properties list + * + * Count, cookies and the order must match in order to be considered equivalent. + */ +static bool +ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) +{ + struct ice_vsig_prof *tmp1; + struct ice_vsig_prof *tmp2; + u16 chk_count = 0; + u16 count = 0; + + /* compare counts */ + LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) { + count++; + } + LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) { + chk_count++; + } + if (!count || count != chk_count) + return false; + + tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); + tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); + + /* profile cookies must compare, and in the exact same order to take + * into account priority + */ + while (count--) { + if (tmp2->profile_cookie != tmp1->profile_cookie) + return false; + + tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); + tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); + } + + return true; +} + +/* VSIG Management */ + +/** + * ice_vsig_find_vsi - find a VSIG that contains a specified VSI + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI of interest + * @vsig: pointer to receive the VSI group + * + * This function will lookup the VSI entry in the XLT2 list and return + * the VSI group its associated with. + */ +enum ice_status +ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) +{ + if (!vsig || vsi >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + + /* As long as there's a default or valid VSIG associated with the input + * VSI, the functions returns a success. Any handling of VSIG will be + * done by the following add, update or remove functions. + */ + *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; + + return ICE_SUCCESS; +} + +/** + * ice_vsig_alloc_val - allocate a new VSIG by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: the VSIG to allocate + * + * This function will allocate a given VSIG specified by the VSIG parameter. + */ +static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { + INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); + hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; + } + + return ICE_VSIG_VALUE(idx, hw->pf_id); +} + +/** + * ice_vsig_alloc - Finds a free entry and allocates a new VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * + * This function will iterate through the VSIG list and mark the first + * unused entry for the new VSIG entry as used and return that value. + */ +static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) +{ + u16 i; + + for (i = 1; i < ICE_MAX_VSIGS; i++) + if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) + return ice_vsig_alloc_val(hw, blk, i); + + return ICE_DEFAULT_VSIG; +} + +/** + * ice_find_dup_props_vsig - find VSI group with a specified set of properties + * @hw: pointer to the hardware structure + * @blk: HW block + * @chs: characteristic list + * @vsig: returns the VSIG with the matching profiles, if found + * + * Each VSIG is associated with a characteristic set; i.e. all VSIs under + * a group have the same characteristic set. To check if there exists a VSIG + * which has the same characteristics as the input characteristics; this + * function will iterate through the XLT2 list and return the VSIG that has a + * matching configuration. In order to make sure that priorities are accounted + * for, the list must match exactly, including the order in which the + * characteristics are listed. + */ +static enum ice_status +ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, + struct LIST_HEAD_TYPE *chs, u16 *vsig) +{ + struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; + u16 i; + + for (i = 0; i < xlt2->count; i++) { + if (xlt2->vsig_tbl[i].in_use && + ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { + *vsig = ICE_VSIG_VALUE(i, hw->pf_id); + return ICE_SUCCESS; + } + } + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_vsig_free - free VSI group + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: VSIG to remove + * + * The function will remove all VSIs associated with the input VSIG and move + * them to the DEFAULT_VSIG and mark the VSIG available. + */ +static enum ice_status +ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + struct ice_vsig_prof *dtmp, *del; + struct ice_vsig_vsi *vsi_cur; + u16 idx; + + idx = vsig & ICE_VSIG_IDX_M; + if (idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; + + vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + /* If the VSIG has at least 1 VSI then iterate through the + * list and remove the VSIs before deleting the group. + */ + if (vsi_cur) { + /* remove all vsis associated with this VSIG XLT2 entry */ + do { + struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; + + vsi_cur->vsig = ICE_DEFAULT_VSIG; + vsi_cur->changed = 1; + vsi_cur->next_vsi = NULL; + vsi_cur = tmp; + } while (vsi_cur); + + /* NULL terminate head of VSI list */ + hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; + } + + /* free characteristic list */ + LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, + &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + LIST_DEL(&del->list); + ice_free(hw, del); + } + + /* if VSIG characteristic list was cleared for reset + * re-initialize the list head + */ + INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); + + return ICE_SUCCESS; +} + +/** + * ice_vsig_remove_vsi - remove VSI from VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to remove + * @vsig: VSI group to remove from + * + * The function will remove the input VSI from its VSI group and move it + * to the DEFAULT_VSIG. + */ +static enum ice_status +ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; + u16 idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* entry already in default VSIG, don't have to remove */ + if (idx == ICE_DEFAULT_VSIG) + return ICE_SUCCESS; + + vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + if (!(*vsi_head)) + return ICE_ERR_CFG; + + vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; + vsi_cur = (*vsi_head); + + /* iterate the VSI list, skip over the entry to be removed */ + while (vsi_cur) { + if (vsi_tgt == vsi_cur) { + (*vsi_head) = vsi_cur->next_vsi; + break; + } + vsi_head = &vsi_cur->next_vsi; + vsi_cur = vsi_cur->next_vsi; + } + + /* verify if VSI was removed from group list */ + if (!vsi_cur) + return ICE_ERR_DOES_NOT_EXIST; + + vsi_cur->vsig = ICE_DEFAULT_VSIG; + vsi_cur->changed = 1; + vsi_cur->next_vsi = NULL; + + return ICE_SUCCESS; +} + +/** + * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to move + * @vsig: destination VSI group + * + * This function will move or add the input VSI to the target VSIG. + * The function will find the original VSIG the VSI belongs to and + * move the entry to the DEFAULT_VSIG, update the original VSIG and + * then move entry to the new VSIG. + */ +static enum ice_status +ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi *tmp; + enum ice_status status; + u16 orig_vsig, idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + /* if VSIG not in use and VSIG is not default type this VSIG + * doesn't exist. + */ + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && + vsig != ICE_DEFAULT_VSIG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); + if (status) + return status; + + /* no update required if vsigs match */ + if (orig_vsig == vsig) + return ICE_SUCCESS; + + if (orig_vsig != ICE_DEFAULT_VSIG) { + /* remove entry from orig_vsig and add to default VSIG */ + status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); + if (status) + return status; + } + + if (idx == ICE_DEFAULT_VSIG) + return ICE_SUCCESS; + + /* Create VSI entry and add VSIG and prop_mask values */ + hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; + hw->blk[blk].xlt2.vsis[vsi].changed = 1; + + /* Add new entry to the head of the VSIG list */ + tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = + &hw->blk[blk].xlt2.vsis[vsi]; + hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; + hw->blk[blk].xlt2.t[vsi] = vsig; + + return ICE_SUCCESS; +} + +/** + * ice_prof_has_mask_idx - determine if profile index masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @idx: profile index to check + * @masks: masks to match + */ +static bool +ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, + u16 mask) +{ + bool expect_no_mask = false; + bool found = false; + bool match = false; + u16 i; + + /* If mask is 0x0000 or 0xffff, then there is no masking */ + if (mask == 0 || mask == 0xffff) + expect_no_mask = true; + + /* Scan the enabled masks on this profile, for the specified idx */ + for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first + + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) + if (hw->blk[blk].masks.masks[i].in_use && + hw->blk[blk].masks.masks[i].idx == idx) { + found = true; + if (hw->blk[blk].masks.masks[i].mask == mask) + match = true; + break; + } + + if (expect_no_mask) { + if (found) + return false; + } else { + if (!match) + return false; + } + + return true; +} + +/** + * ice_prof_has_mask - determine if profile masking is identical + * @hw: pointer to the hardware structure + * @blk: HW block + * @prof: profile to check + * @masks: masks to match + */ +static bool +ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) +{ + u16 i; + + /* es->mask_ena[prof] will have the mask */ + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i])) + return false; + + return true; +} + +/** + * ice_find_prof_id_with_mask - find profile ID for a given field vector + * @hw: pointer to the hardware structure + * @blk: HW block + * @fv: field vector to search for + * @masks: masks for fv + * @prof_id: receives the profile ID + */ +static enum ice_status +ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, + struct ice_fv_word *fv, u16 *masks, u8 *prof_id) +{ + struct ice_es *es = &hw->blk[blk].es; + u8 i; + + for (i = 0; i < (u8)es->count; i++) { + u16 off = i * es->fvw; + + if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) + continue; + + /* check if masks settings are the same for this profile */ + if (masks && !ice_prof_has_mask(hw, blk, i, masks)) + continue; + + *prof_id = i; + return ICE_SUCCESS; + } + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_prof_id_rsrc_type - get profile ID resource type for a block type + * @blk: the block type + * @rsrc_type: pointer to variable to receive the resource type + */ +static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) +{ + switch (blk) { + case ICE_BLK_SW: + *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID; + break; + case ICE_BLK_ACL: + *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID; + break; + case ICE_BLK_FD: + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID; + break; + case ICE_BLK_RSS: + *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; + break; + case ICE_BLK_PE: + *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; + break; + default: + return false; + } + return true; +} + +/** + * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type + * @blk: the block type + * @rsrc_type: pointer to variable to receive the resource type + */ +static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) +{ + switch (blk) { + case ICE_BLK_SW: + *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM; + break; + case ICE_BLK_ACL: + *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM; + break; + case ICE_BLK_FD: + *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM; + break; + case ICE_BLK_RSS: + *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; + break; + case ICE_BLK_PE: + *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; + break; + default: + return false; + } + return true; +} + +/** + * ice_alloc_tcam_ent - allocate hardware TCAM entry + * @hw: pointer to the HW struct + * @blk: the block to allocate the TCAM for + * @tcam_idx: pointer to variable to receive the TCAM entry + * + * This function allocates a new entry in a Profile ID TCAM for a specific + * block. + */ +static enum ice_status +ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx) +{ + u16 res_type; + + if (!ice_tcam_ent_rsrc_type(blk, &res_type)) + return ICE_ERR_PARAM; + + return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx); +} + +/** + * ice_free_tcam_ent - free hardware TCAM entry + * @hw: pointer to the HW struct + * @blk: the block from which to free the TCAM entry + * @tcam_idx: the TCAM entry to free + * + * This function frees an entry in a Profile ID TCAM for a specific block. + */ +static enum ice_status +ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) +{ + u16 res_type; + + if (!ice_tcam_ent_rsrc_type(blk, &res_type)) + return ICE_ERR_PARAM; + + return ice_free_hw_res(hw, res_type, 1, &tcam_idx); +} + +/** + * ice_alloc_prof_id - allocate profile ID + * @hw: pointer to the HW struct + * @blk: the block to allocate the profile ID for + * @prof_id: pointer to variable to receive the profile ID + * + * This function allocates a new profile ID, which also corresponds to a Field + * Vector (Extraction Sequence) entry. + */ +static enum ice_status +ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) +{ + enum ice_status status; + u16 res_type; + u16 get_prof; + + if (!ice_prof_id_rsrc_type(blk, &res_type)) + return ICE_ERR_PARAM; + + status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); + if (!status) + *prof_id = (u8)get_prof; + + return status; +} + +/** + * ice_free_prof_id - free profile ID + * @hw: pointer to the HW struct + * @blk: the block from which to free the profile ID + * @prof_id: the profile ID to free + * + * This function frees a profile ID, which also corresponds to a Field Vector. + */ +static enum ice_status +ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +{ + u16 tmp_prof_id = (u16)prof_id; + u16 res_type; + + if (!ice_prof_id_rsrc_type(blk, &res_type)) + return ICE_ERR_PARAM; + + return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); +} + +/** + * ice_prof_inc_ref - increment reference count for profile + * @hw: pointer to the HW struct + * @blk: the block from which to free the profile ID + * @prof_id: the profile ID for which to increment the reference count + */ +static enum ice_status +ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +{ + if (prof_id > hw->blk[blk].es.count) + return ICE_ERR_PARAM; + + hw->blk[blk].es.ref_count[prof_id]++; + + return ICE_SUCCESS; +} + +/** + * ice_write_prof_mask_reg - write profile mask register + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: mask index + * @idx: index of the FV which will use the mask + * @mask: the 16-bit mask + */ +static void +ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + u16 idx, u16 mask) +{ + u32 offset; + u32 val; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); + val = (idx << GLQF_HMASK_MSK_INDEX_S) & + GLQF_HMASK_MSK_INDEX_M; + val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & + GLQF_FDMASK_MSK_INDEX_M; + val |= (mask << GLQF_FDMASK_MASK_S) & + GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, val); + ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n", + blk, idx, offset, val); +} + +/** + * ice_write_prof_mask_enable_res - write profile mask enable register + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @enable_mask: enable mask + */ +static void +ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk, + u16 prof_id, u32 enable_mask) +{ + u32 offset; + + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK_SEL(prof_id); + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK_SEL(prof_id); + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", + blk); + return; + } + + wr32(hw, offset, enable_mask); + ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n", + blk, prof_id, offset, enable_mask); +} + +/** + * ice_init_prof_masks - initial prof masks + * @hw: pointer to the HW struct + * @blk: hardware block + */ +static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 per_pf; + u16 i; + + ice_init_lock(&hw->blk[blk].masks.lock); + + per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; + + hw->blk[blk].masks.count = per_pf; + hw->blk[blk].masks.first = hw->pf_id * per_pf; + + ice_memset(hw->blk[blk].masks.masks, 0, + sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + ice_write_prof_mask_reg(hw, blk, i, 0, 0); +} + +/** + * ice_init_all_prof_masks - initial all prof masks + * @hw: pointer to the HW struct + */ +void ice_init_all_prof_masks(struct ice_hw *hw) +{ + ice_init_prof_masks(hw, ICE_BLK_RSS); + ice_init_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_alloc_prof_mask - allocate profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @idx: index of FV which will use the mask + * @mask: the 16-bit mask + * @mask_idx: variable to receive the mask index + */ +static enum ice_status +ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, + u16 *mask_idx) +{ + bool found_unused = false, found_copy = false; + enum ice_status status = ICE_ERR_MAX_LIMIT; + u16 unused_idx = 0, copy_idx = 0; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) + if (hw->blk[blk].masks.masks[i].in_use) { + /* if mask is in use and it exactly duplicates the + * desired mask and index, then in can be reused + */ + if (hw->blk[blk].masks.masks[i].mask == mask && + hw->blk[blk].masks.masks[i].idx == idx) { + found_copy = true; + copy_idx = i; + break; + } + } else { + /* save off unused index, but keep searching in case + * there is an exact match later on + */ + if (!found_unused) { + found_unused = true; + unused_idx = i; + } + } + + if (found_copy) + i = copy_idx; + else if (found_unused) + i = unused_idx; + else + goto err_ice_alloc_prof_mask; + + /* update mask for a new entry */ + if (found_unused) { + hw->blk[blk].masks.masks[i].in_use = true; + hw->blk[blk].masks.masks[i].mask = mask; + hw->blk[blk].masks.masks[i].idx = idx; + hw->blk[blk].masks.masks[i].ref = 0; + ice_write_prof_mask_reg(hw, blk, i, idx, mask); + } + + hw->blk[blk].masks.masks[i].ref++; + *mask_idx = i; + status = ICE_SUCCESS; + +err_ice_alloc_prof_mask: + ice_release_lock(&hw->blk[blk].masks.lock); + + return status; +} + +/** + * ice_free_prof_mask - free profile mask + * @hw: pointer to the HW struct + * @blk: hardware block + * @mask_idx: index of mask + */ +static enum ice_status +ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) +{ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + if (!(mask_idx >= hw->blk[blk].masks.first && + mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) + return ICE_ERR_DOES_NOT_EXIST; + + ice_acquire_lock(&hw->blk[blk].masks.lock); + + if (!hw->blk[blk].masks.masks[mask_idx].in_use) + goto exit_ice_free_prof_mask; + + if (hw->blk[blk].masks.masks[mask_idx].ref > 1) { + hw->blk[blk].masks.masks[mask_idx].ref--; + goto exit_ice_free_prof_mask; + } + + /* remove mask */ + hw->blk[blk].masks.masks[mask_idx].in_use = false; + hw->blk[blk].masks.masks[mask_idx].mask = 0; + hw->blk[blk].masks.masks[mask_idx].idx = 0; + + /* update mask as unused entry */ + ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk, + mask_idx); + ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0); + +exit_ice_free_prof_mask: + ice_release_lock(&hw->blk[blk].masks.lock); + + return ICE_SUCCESS; +} + +/** + * ice_free_prof_masks - free all profile masks for a profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + */ +static enum ice_status +ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) +{ + u32 mask_bm; + u16 i; + + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_ERR_PARAM; + + mask_bm = hw->blk[blk].es.mask_ena[prof_id]; + for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) + if (mask_bm & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return ICE_SUCCESS; +} + +/** + * ice_shutdown_prof_masks - releases lock for masking + * @hw: pointer to the HW struct + * @blk: hardware block + * + * This should be called before unloading the driver + */ +static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk) +{ + u16 i; + + ice_acquire_lock(&hw->blk[blk].masks.lock); + + for (i = hw->blk[blk].masks.first; + i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) { + ice_write_prof_mask_reg(hw, blk, i, 0, 0); + + hw->blk[blk].masks.masks[i].in_use = false; + hw->blk[blk].masks.masks[i].idx = 0; + hw->blk[blk].masks.masks[i].mask = 0; + } + + ice_release_lock(&hw->blk[blk].masks.lock); + ice_destroy_lock(&hw->blk[blk].masks.lock); +} + +/** + * ice_shutdown_all_prof_masks - releases all locks for masking + * @hw: pointer to the HW struct + * @blk: hardware block + * + * This should be called before unloading the driver + */ +void ice_shutdown_all_prof_masks(struct ice_hw *hw) +{ + ice_shutdown_prof_masks(hw, ICE_BLK_RSS); + ice_shutdown_prof_masks(hw, ICE_BLK_FD); +} + +/** + * ice_update_prof_masking - set registers according to masking + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof_id: profile ID + * @es: field vector + * @masks: masks + */ +static enum ice_status +ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, + struct ice_fv_word *es, u16 *masks) +{ + bool err = false; + u32 ena_mask = 0; + u16 idx; + u16 i; + + /* Only support FD and RSS masking, otherwise nothing to be done */ + if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) + return ICE_SUCCESS; + + for (i = 0; i < hw->blk[blk].es.fvw; i++) + if (masks[i] && masks[i] != 0xFFFF) { + if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) { + ena_mask |= BIT(idx); + } else { + /* not enough bitmaps */ + err = true; + break; + } + } + + if (err) { + /* free any bitmaps we have allocated */ + for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++) + if (ena_mask & BIT(i)) + ice_free_prof_mask(hw, blk, i); + + return ICE_ERR_OUT_OF_RANGE; + } + + /* enable the masks for this profile */ + ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask); + + /* store enabled masks with profile so that they can be freed later */ + hw->blk[blk].es.mask_ena[prof_id] = ena_mask; + + return ICE_SUCCESS; +} + +/** + * ice_write_es - write an extraction sequence to hardware + * @hw: pointer to the HW struct + * @blk: the block in which to write the extraction sequence + * @prof_id: the profile ID to write + * @fv: pointer to the extraction sequence to write - NULL to clear extraction + */ +static void +ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, + struct ice_fv_word *fv) +{ + u16 off; + + off = prof_id * hw->blk[blk].es.fvw; + if (!fv) { + ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * + sizeof(*fv), ICE_NONDMA_MEM); + hw->blk[blk].es.written[prof_id] = false; + } else { + ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * + sizeof(*fv), ICE_NONDMA_TO_NONDMA); + } +} + +/** + * ice_prof_dec_ref - decrement reference count for profile + * @hw: pointer to the HW struct + * @blk: the block from which to free the profile ID + * @prof_id: the profile ID for which to decrement the reference count + */ +static enum ice_status +ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +{ + if (prof_id > hw->blk[blk].es.count) + return ICE_ERR_PARAM; + + if (hw->blk[blk].es.ref_count[prof_id] > 0) { + if (!--hw->blk[blk].es.ref_count[prof_id]) { + ice_write_es(hw, blk, prof_id, NULL); + ice_free_prof_masks(hw, blk, prof_id); + return ice_free_prof_id(hw, blk, prof_id); + } + } + + return ICE_SUCCESS; +} + +/* Block / table section IDs */ +static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { + /* SWITCH */ + { ICE_SID_XLT1_SW, + ICE_SID_XLT2_SW, + ICE_SID_PROFID_TCAM_SW, + ICE_SID_PROFID_REDIR_SW, + ICE_SID_FLD_VEC_SW + }, + + /* ACL */ + { ICE_SID_XLT1_ACL, + ICE_SID_XLT2_ACL, + ICE_SID_PROFID_TCAM_ACL, + ICE_SID_PROFID_REDIR_ACL, + ICE_SID_FLD_VEC_ACL + }, + + /* FD */ + { ICE_SID_XLT1_FD, + ICE_SID_XLT2_FD, + ICE_SID_PROFID_TCAM_FD, + ICE_SID_PROFID_REDIR_FD, + ICE_SID_FLD_VEC_FD + }, + + /* RSS */ + { ICE_SID_XLT1_RSS, + ICE_SID_XLT2_RSS, + ICE_SID_PROFID_TCAM_RSS, + ICE_SID_PROFID_REDIR_RSS, + ICE_SID_FLD_VEC_RSS + }, + + /* PE */ + { ICE_SID_XLT1_PE, + ICE_SID_XLT2_PE, + ICE_SID_PROFID_TCAM_PE, + ICE_SID_PROFID_REDIR_PE, + ICE_SID_FLD_VEC_PE + } +}; + +/** + * ice_init_sw_xlt1_db - init software XLT1 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 pt; + + for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { + u8 ptg; + + ptg = hw->blk[blk].xlt1.t[pt]; + if (ptg != ICE_DEFAULT_PTG) { + ice_ptg_alloc_val(hw, blk, ptg); + ice_ptg_add_mv_ptype(hw, blk, pt, ptg); + } + } +} + +/** + * ice_init_sw_xlt2_db - init software XLT2 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 vsi; + + for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { + u16 vsig; + + vsig = hw->blk[blk].xlt2.t[vsi]; + if (vsig) { + ice_vsig_alloc_val(hw, blk, vsig); + ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); + /* no changes at this time, since this has been + * initialized from the original package + */ + hw->blk[blk].xlt2.vsis[vsi].changed = 0; + } + } +} + +/** + * ice_init_sw_db - init software database from HW tables + * @hw: pointer to the hardware structure + */ +static void ice_init_sw_db(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + ice_init_sw_xlt1_db(hw, (enum ice_block)i); + ice_init_sw_xlt2_db(hw, (enum ice_block)i); + } +} + +/** + * ice_fill_tbl - Reads content of a single table type into database + * @hw: pointer to the hardware structure + * @block_id: Block ID of the table to copy + * @sid: Section ID of the table to copy + * + * Will attempt to read the entire content of a given table of a single block + * into the driver database. We assume that the buffer will always + * be as large or larger than the data contained in the package. If + * this condition is not met, there is most likely an error in the package + * contents. + */ +static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) +{ + u32 dst_len, sect_len, offset = 0; + struct ice_prof_redir_section *pr; + struct ice_prof_id_section *pid; + struct ice_xlt1_section *xlt1; + struct ice_xlt2_section *xlt2; + struct ice_sw_fv_section *es; + struct ice_pkg_enum state; + u8 *src, *dst; + void *sect; + + /* if the HW segment pointer is null then the first iteration of + * ice_pkg_enum_section() will fail. In this case the HW tables will + * not be filled and return success. + */ + if (!hw->seg) { + ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); + return; + } + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = LE16_TO_CPU(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (_FORCE_ u8 *)xlt2->value; + sect_len = LE16_TO_CPU(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = LE16_TO_CPU(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = LE16_TO_CPU(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(LE16_TO_CPU(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_fill_blk_tbls - Read package context for tables + * @hw: pointer to the hardware structure + * + * Reads the current package contents and populates the driver + * database with the data iteratively for all advanced feature + * blocks. Assume that the HW tables have been allocated. + */ +void ice_fill_blk_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + enum ice_block blk_id = (enum ice_block)i; + + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); + } + + ice_init_sw_db(hw); +} + +/** + * ice_free_prof_map - free profile map + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) +{ + struct ice_es *es = &hw->blk[blk_idx].es; + struct ice_prof_map *del, *tmp; + + ice_acquire_lock(&es->prof_map_lock); + LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, + ice_prof_map, list) { + LIST_DEL(&del->list); + ice_free(hw, del); + } + INIT_LIST_HEAD(&es->prof_map); + ice_release_lock(&es->prof_map_lock); +} + +/** + * ice_free_flow_profs - free flow profile entries + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) +{ + struct ice_flow_prof *p, *tmp; + + ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); + LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], + ice_flow_prof, l_entry) { + struct ice_flow_entry *e, *t; + + LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries, + ice_flow_entry, l_entry) + ice_flow_rem_entry(hw, (enum ice_block)blk_idx, + ICE_FLOW_ENTRY_HNDL(e)); + + LIST_DEL(&p->l_entry); + if (p->acts) + ice_free(hw, p->acts); + ice_free(hw, p); + } + ice_release_lock(&hw->fl_profs_locks[blk_idx]); + + /* if driver is in reset and tables are being cleared + * re-initialize the flow profile list heads + */ + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); +} + +/** + * ice_free_vsig_tbl - free complete VSIG table entries + * @hw: pointer to the hardware structure + * @blk: the HW block on which to free the VSIG table entries + */ +static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) +{ + u16 i; + + if (!hw->blk[blk].xlt2.vsig_tbl) + return; + + for (i = 1; i < ICE_MAX_VSIGS; i++) + if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) + ice_vsig_free(hw, blk, i); +} + +/** + * ice_free_hw_tbls - free hardware table memory + * @hw: pointer to the hardware structure + */ +void ice_free_hw_tbls(struct ice_hw *hw) +{ + struct ice_rss_cfg *r, *rt; + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + if (hw->blk[i].is_list_init) { + struct ice_es *es = &hw->blk[i].es; + + ice_free_prof_map(hw, i); + ice_destroy_lock(&es->prof_map_lock); + ice_free_flow_profs(hw, i); + ice_destroy_lock(&hw->fl_profs_locks[i]); + + hw->blk[i].is_list_init = false; + } + ice_free_vsig_tbl(hw, (enum ice_block)i); + ice_free(hw, hw->blk[i].xlt1.ptypes); + ice_free(hw, hw->blk[i].xlt1.ptg_tbl); + ice_free(hw, hw->blk[i].xlt1.t); + ice_free(hw, hw->blk[i].xlt2.t); + ice_free(hw, hw->blk[i].xlt2.vsig_tbl); + ice_free(hw, hw->blk[i].xlt2.vsis); + ice_free(hw, hw->blk[i].prof.t); + ice_free(hw, hw->blk[i].prof_redir.t); + ice_free(hw, hw->blk[i].es.t); + ice_free(hw, hw->blk[i].es.ref_count); + ice_free(hw, hw->blk[i].es.written); + ice_free(hw, hw->blk[i].es.mask_ena); + } + + LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, + ice_rss_cfg, l_entry) { + LIST_DEL(&r->l_entry); + ice_free(hw, r); + } + ice_destroy_lock(&hw->rss_locks); + if (!hw->dcf_enabled) + ice_shutdown_all_prof_masks(hw); + ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); +} + +/** + * ice_init_flow_profs - init flow profile locks and list heads + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) +{ + ice_init_lock(&hw->fl_profs_locks[blk_idx]); + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); +} + +/** + * ice_clear_hw_tbls - clear HW tables and flow profiles + * @hw: pointer to the hardware structure + */ +void ice_clear_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + + if (hw->blk[i].is_list_init) { + ice_free_prof_map(hw, i); + ice_free_flow_profs(hw, i); + } + + ice_free_vsig_tbl(hw, (enum ice_block)i); + + ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), + ICE_NONDMA_MEM); + ice_memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), + ICE_NONDMA_MEM); + ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), + ICE_NONDMA_MEM); + + ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), + ICE_NONDMA_MEM); + ice_memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl), + ICE_NONDMA_MEM); + ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), + ICE_NONDMA_MEM); + + ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), + ICE_NONDMA_MEM); + ice_memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t), + ICE_NONDMA_MEM); + + ice_memset(es->t, 0, es->count * sizeof(*es->t), + ICE_NONDMA_MEM); + ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), + ICE_NONDMA_MEM); + ice_memset(es->written, 0, es->count * sizeof(*es->written), + ICE_NONDMA_MEM); + ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena), + ICE_NONDMA_MEM); + } +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + ice_init_lock(&hw->rss_locks); + INIT_LIST_HEAD(&hw->rss_list_head); + if (!hw->dcf_enabled) + ice_init_all_prof_masks(hw); + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + ice_init_flow_profs(hw, i); + ice_init_lock(&es->prof_map_lock); + INIT_LIST_HEAD(&es->prof_map); + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = (struct ice_ptg_ptype *) + ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = (struct ice_ptg_entry *) + ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = (struct ice_vsig_vsi *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = (struct ice_vsig_entry *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = (struct ice_prof_tcam_entry *) + ice_calloc(hw, prof->count, sizeof(*prof->t)); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, + sizeof(*prof_redir->t)); + + if (!prof_redir->t) + goto err; + + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = (struct ice_fv_word *) + ice_calloc(hw, (u32)(es->count * es->fvw), + sizeof(*es->t)); + if (!es->t) + goto err; + + es->ref_count = (u16 *) + ice_calloc(hw, es->count, sizeof(*es->ref_count)); + + es->written = (u8 *) + ice_calloc(hw, es->count, sizeof(*es->written)); + es->mask_ena = (u32 *) + ice_calloc(hw, es->count, sizeof(*es->mask_ena)); + if (!es->ref_count) + goto err; + } + return ICE_SUCCESS; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_prof_gen_key - generate profile ID key + * @hw: pointer to the HW struct + * @blk: the block in which to write profile ID to + * @ptg: packet type group (PTG) portion of key + * @vsig: VSIG portion of key + * @cdid: CDID portion of key + * @flags: flag portion of key + * @vl_msk: valid mask + * @dc_msk: don't care mask + * @nm_msk: never match mask + * @key: output of profile ID key + */ +static enum ice_status +ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, + u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], + u8 key[ICE_TCAM_KEY_SZ]) +{ + struct ice_prof_id_key inkey; + + inkey.xlt1 = ptg; + inkey.xlt2_cdid = CPU_TO_LE16(vsig); + inkey.flags = CPU_TO_LE16(flags); + + switch (hw->blk[blk].prof.cdid_bits) { + case 0: + break; + case 2: +#define ICE_CD_2_M 0xC000U +#define ICE_CD_2_S 14 + inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); + inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); + break; + case 4: +#define ICE_CD_4_M 0xF000U +#define ICE_CD_4_S 12 + inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); + inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); + break; + case 8: +#define ICE_CD_8_M 0xFF00U +#define ICE_CD_8_S 16 + inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); + inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); + break; + default: + ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); + break; + } + + return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, + nm_msk, 0, ICE_TCAM_KEY_SZ / 2); +} + +/** + * ice_tcam_write_entry - write TCAM entry + * @hw: pointer to the HW struct + * @blk: the block in which to write profile ID to + * @idx: the entry index to write to + * @prof_id: profile ID + * @ptg: packet type group (PTG) portion of key + * @vsig: VSIG portion of key + * @cdid: CDID: portion of key + * @flags: flag portion of key + * @vl_msk: valid mask + * @dc_msk: don't care mask + * @nm_msk: never match mask + */ +static enum ice_status +ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, + u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, + u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], + u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) +{ + struct ice_prof_tcam_entry; + enum ice_status status; + + status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, + dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); + if (!status) { + hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); + hw->blk[blk].prof.t[idx].prof_id = prof_id; + } + + return status; +} + +/** + * ice_vsig_get_ref - returns number of VSIs belong to a VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: VSIG to query + * @refs: pointer to variable to receive the reference count + */ +static enum ice_status +ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + struct ice_vsig_vsi *ptr; + + *refs = 0; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + while (ptr) { + (*refs)++; + ptr = ptr->next_vsi; + } + + return ICE_SUCCESS; +} + +/** + * ice_has_prof_vsig - check to see if VSIG has a specific profile + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: VSIG to check against + * @hdl: profile handle + */ +static bool +ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + struct ice_vsig_prof *ent; + + LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + if (ent->profile_cookie == hdl) + return true; + } + + ice_debug(hw, ICE_DBG_INIT, + "Characteristic list for VSI group %d not found.\n", + vsig); + return false; +} + +/** + * ice_prof_bld_es - build profile ID extraction sequence changes + * @hw: pointer to the HW struct + * @blk: hardware block + * @bld: the update package buffer build to add to + * @chgs: the list of changes to make in hardware + */ +static enum ice_status +ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, + struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) +{ + u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); + struct ice_chs_chg *tmp; + + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { + u16 off = tmp->prof_id * hw->blk[blk].es.fvw; + struct ice_pkg_es *p; + u32 id; + + id = ice_sect_id(blk, ICE_VEC_TBL); + p = (struct ice_pkg_es *) + ice_pkg_buf_alloc_section(bld, id, sizeof(*p) + + vec_size - + sizeof(p->es[0])); + + if (!p) + return ICE_ERR_MAX_LIMIT; + + p->count = CPU_TO_LE16(1); + p->offset = CPU_TO_LE16(tmp->prof_id); + + ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, + ICE_NONDMA_TO_NONDMA); + } + } + + return ICE_SUCCESS; +} + +/** + * ice_prof_bld_tcam - build profile ID TCAM changes + * @hw: pointer to the HW struct + * @blk: hardware block + * @bld: the update package buffer build to add to + * @chgs: the list of changes to make in hardware + */ +static enum ice_status +ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, + struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) +{ + struct ice_chs_chg *tmp; + + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { + struct ice_prof_id_section *p; + u32 id; + + id = ice_sect_id(blk, ICE_PROF_TCAM); + p = (struct ice_prof_id_section *) + ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + + if (!p) + return ICE_ERR_MAX_LIMIT; + + p->count = CPU_TO_LE16(1); + p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); + p->entry[0].prof_id = tmp->prof_id; + + ice_memcpy(p->entry[0].key, + &hw->blk[blk].prof.t[tmp->tcam_idx].key, + sizeof(hw->blk[blk].prof.t->key), + ICE_NONDMA_TO_NONDMA); + } + } + + return ICE_SUCCESS; +} + +/** + * ice_prof_bld_xlt1 - build XLT1 changes + * @blk: hardware block + * @bld: the update package buffer build to add to + * @chgs: the list of changes to make in hardware + */ +static enum ice_status +ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, + struct LIST_HEAD_TYPE *chgs) +{ + struct ice_chs_chg *tmp; + + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { + struct ice_xlt1_section *p; + u32 id; + + id = ice_sect_id(blk, ICE_XLT1); + p = (struct ice_xlt1_section *) + ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + + if (!p) + return ICE_ERR_MAX_LIMIT; + + p->count = CPU_TO_LE16(1); + p->offset = CPU_TO_LE16(tmp->ptype); + p->value[0] = tmp->ptg; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_prof_bld_xlt2 - build XLT2 changes + * @blk: hardware block + * @bld: the update package buffer build to add to + * @chgs: the list of changes to make in hardware + */ +static enum ice_status +ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, + struct LIST_HEAD_TYPE *chgs) +{ + struct ice_chs_chg *tmp; + + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + struct ice_xlt2_section *p; + u32 id; + + switch (tmp->type) { + case ICE_VSIG_ADD: + case ICE_VSI_MOVE: + case ICE_VSIG_REM: + id = ice_sect_id(blk, ICE_XLT2); + p = (struct ice_xlt2_section *) + ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + + if (!p) + return ICE_ERR_MAX_LIMIT; + + p->count = CPU_TO_LE16(1); + p->offset = CPU_TO_LE16(tmp->vsi); + p->value[0] = CPU_TO_LE16(tmp->vsig); + break; + default: + break; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_upd_prof_hw - update hardware using the change list + * @hw: pointer to the HW struct + * @blk: hardware block + * @chgs: the list of changes to make in hardware + */ +static enum ice_status +ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, + struct LIST_HEAD_TYPE *chgs) +{ + struct ice_buf_build *b; + struct ice_chs_chg *tmp; + enum ice_status status; + u16 pkg_sects; + u16 xlt1 = 0; + u16 xlt2 = 0; + u16 tcam = 0; + u16 es = 0; + u16 sects; + + /* count number of sections we need */ + LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { + switch (tmp->type) { + case ICE_PTG_ES_ADD: + if (tmp->add_ptg) + xlt1++; + if (tmp->add_prof) + es++; + break; + case ICE_TCAM_ADD: + tcam++; + break; + case ICE_VSIG_ADD: + case ICE_VSI_MOVE: + case ICE_VSIG_REM: + xlt2++; + break; + default: + break; + } + } + sects = xlt1 + xlt2 + tcam + es; + + if (!sects) + return ICE_SUCCESS; + + /* Build update package buffer */ + b = ice_pkg_buf_alloc(hw); + if (!b) + return ICE_ERR_NO_MEMORY; + + status = ice_pkg_buf_reserve_section(b, sects); + if (status) + goto error_tmp; + + /* Preserve order of table update: ES, TCAM, PTG, VSIG */ + if (es) { + status = ice_prof_bld_es(hw, blk, b, chgs); + if (status) + goto error_tmp; + } + + if (tcam) { + status = ice_prof_bld_tcam(hw, blk, b, chgs); + if (status) + goto error_tmp; + } + + if (xlt1) { + status = ice_prof_bld_xlt1(blk, b, chgs); + if (status) + goto error_tmp; + } + + if (xlt2) { + status = ice_prof_bld_xlt2(blk, b, chgs); + if (status) + goto error_tmp; + } + + /* After package buffer build check if the section count in buffer is + * non-zero and matches the number of sections detected for package + * update. + */ + pkg_sects = ice_pkg_buf_get_active_sections(b); + if (!pkg_sects || pkg_sects != sects) { + status = ICE_ERR_INVAL_SIZE; + goto error_tmp; + } + + /* update package */ + status = ice_update_pkg(hw, ice_pkg_buf(b), 1); + if (status == ICE_ERR_AQ_ERROR) + ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); + +error_tmp: + ice_pkg_buf_free(hw, b); + return status; +} + +/** + * ice_update_fd_mask - set Flow Director Field Vector mask for a profile + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @mask_sel: mask select + * + * This function enable any of the masks selected by the mask select parameter + * for the profile specified. + */ +static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel) +{ + wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel); + + ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id, + GLQF_FDMASK_SEL(prof_id), mask_sel); +} + +struct ice_fd_src_dst_pair { + u8 prot_id; + u8 count; + u16 off; +}; + +static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { + /* These are defined in pairs */ + { ICE_PROT_IPV4_OF_OR_S, 2, 12 }, + { ICE_PROT_IPV4_OF_OR_S, 2, 16 }, + + { ICE_PROT_IPV4_IL, 2, 12 }, + { ICE_PROT_IPV4_IL, 2, 16 }, + + { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, + { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, + + { ICE_PROT_IPV6_IL, 8, 8 }, + { ICE_PROT_IPV6_IL, 8, 24 }, + + { ICE_PROT_TCP_IL, 1, 0 }, + { ICE_PROT_TCP_IL, 1, 2 }, + + { ICE_PROT_UDP_OF, 1, 0 }, + { ICE_PROT_UDP_OF, 1, 2 }, + + { ICE_PROT_UDP_IL_OR_S, 1, 0 }, + { ICE_PROT_UDP_IL_OR_S, 1, 2 }, + + { ICE_PROT_SCTP_IL, 1, 0 }, + { ICE_PROT_SCTP_IL, 1, 2 } +}; + +#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs) + +/** + * ice_update_fd_swap - set register appropriately for a FD FV extraction + * @hw: pointer to the HW struct + * @prof_id: profile ID + * @es: extraction sequence (length of array is determined by the block) + */ +static enum ice_status +ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) +{ + ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); + u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 }; +#define ICE_FD_FV_NOT_FOUND (-2) + s8 first_free = ICE_FD_FV_NOT_FOUND; + u8 used[ICE_MAX_FV_WORDS] = { 0 }; + s8 orig_free, si; + u32 mask_sel = 0; + u8 i, j, k; + + ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); + + /* This code assumes that the Flow Director field vectors are assigned + * from the end of the FV indexes working towards the zero index, that + * only complete fields will be included and will be consecutive, and + * that there are no gaps between valid indexes. + */ + + /* Determine swap fields present */ + for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { + /* Find the first free entry, assuming right to left population. + * This is where we can start adding additional pairs if needed. + */ + if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id != + ICE_PROT_INVALID) + first_free = i - 1; + + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) { + if (es[i].prot_id == ice_fd_pairs[j].prot_id && + es[i].off == ice_fd_pairs[j].off) { + ice_set_bit(j, pair_list); + pair_start[j] = i; + } + } + } + + orig_free = first_free; + + /* determine missing swap fields that need to be added */ + for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) { + u8 bit1 = ice_is_bit_set(pair_list, i + 1); + u8 bit0 = ice_is_bit_set(pair_list, i); + + if (bit0 ^ bit1) { + u8 index; + + /* add the appropriate 'paired' entry */ + if (!bit0) + index = i; + else + index = i + 1; + + /* check for room */ + if (first_free + 1 < (s8)ice_fd_pairs[index].count) + return ICE_ERR_MAX_LIMIT; + + /* place in extraction sequence */ + for (k = 0; k < ice_fd_pairs[index].count; k++) { + es[first_free - k].prot_id = + ice_fd_pairs[index].prot_id; + es[first_free - k].off = + ice_fd_pairs[index].off + (k * 2); + + if (k > first_free) + return ICE_ERR_OUT_OF_RANGE; + + /* keep track of non-relevant fields */ + mask_sel |= BIT(first_free - k); + } + + pair_start[index] = first_free; + first_free -= ice_fd_pairs[index].count; + } + } + + /* fill in the swap array */ + si = hw->blk[ICE_BLK_FD].es.fvw - 1; + while (si >= 0) { + u8 indexes_used = 1; + + /* assume flat at this index */ +#define ICE_SWAP_VALID 0x80 + used[si] = si | ICE_SWAP_VALID; + + if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) { + si -= indexes_used; + continue; + } + + /* check for a swap location */ + for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) { + if (es[si].prot_id == ice_fd_pairs[j].prot_id && + es[si].off == ice_fd_pairs[j].off) { + u8 idx; + + /* determine the appropriate matching field */ + idx = j + ((j % 2) ? -1 : 1); + + indexes_used = ice_fd_pairs[idx].count; + for (k = 0; k < indexes_used; k++) { + used[si - k] = (pair_start[idx] - k) | + ICE_SWAP_VALID; + } + + break; + } + } + + si -= indexes_used; + } + + /* for each set of 4 swap and 4 inset indexes, write the appropriate + * register + */ + for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { + u32 raw_swap = 0; + u32 raw_in = 0; + + for (k = 0; k < 4; k++) { + u8 idx; + + idx = (j * 4) + k; + if (used[idx] && !(mask_sel & BIT(idx))) { + raw_swap |= used[idx] << (k * BITS_PER_BYTE); +#define ICE_INSET_DFLT 0x9f + raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); + } + } + + /* write the appropriate swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); + + /* write the appropriate inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n", + prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); + } + + /* initially clear the mask select for this profile */ + ice_update_fd_mask(hw, prof_id, 0); + + return ICE_SUCCESS; +} + +/* The entries here needs to match the order of enum ice_ptype_attrib */ +static const struct ice_ptype_attrib_info ice_ptype_attributes[] = { + { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK }, + { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK }, + { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK }, +}; + +/** + * ice_get_ptype_attrib_info - get ptype attribute information + * @type: attribute type + * @info: pointer to variable to the attribute information + */ +static void +ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, + struct ice_ptype_attrib_info *info) +{ + *info = ice_ptype_attributes[type]; +} + +/** + * ice_add_prof_attrib - add any PTG with attributes to profile + * @prof: pointer to the profile to which PTG entries will be added + * @ptg: PTG to be added + * @ptype: PTYPE that needs to be looked up + * @attr: array of attributes that will be considered + * @attr_cnt: number of elements in the attribute array + */ +static enum ice_status +ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, + const struct ice_ptype_attributes *attr, u16 attr_cnt) +{ + bool found = false; + u16 i; + + for (i = 0; i < attr_cnt; i++) { + if (attr[i].ptype == ptype) { + found = true; + + prof->ptg[prof->ptg_cnt] = ptg; + ice_get_ptype_attrib_info(attr[i].attrib, + &prof->attr[prof->ptg_cnt]); + + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + return ICE_ERR_MAX_LIMIT; + } + } + + if (!found) + return ICE_ERR_DOES_NOT_EXIST; + + return ICE_SUCCESS; +} + +/** + * ice_add_prof - add profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @id: profile tracking ID + * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @attr: array of attributes + * @attr_cnt: number of elements in attrib array + * @es: extraction sequence (length of array is determined by the block) + * @masks: mask for extraction sequence + * + * This function registers a profile, which matches a set of PTYPES with a + * particular extraction sequence. While the hardware profile is allocated + * it will not be written until the first call to ice_add_flow that specifies + * the ID value used here. + */ +enum ice_status +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks) +{ + u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); + ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); + struct ice_prof_map *prof; + enum ice_status status; + u8 byte = 0; + u8 prof_id; + + ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); + + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); + + /* search for existing profile */ + status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); + if (status) { + /* allocate profile ID */ + status = ice_alloc_prof_id(hw, blk, &prof_id); + if (status) + goto err_ice_add_prof; + if (blk == ICE_BLK_FD) { + /* For Flow Director block, the extraction sequence may + * need to be altered in the case where there are paired + * fields that have no match. This is necessary because + * for Flow Director, src and dest fields need to paired + * for filter programming and these values are swapped + * during Tx. + */ + status = ice_update_fd_swap(hw, prof_id, es); + if (status) + goto err_ice_add_prof; + } + status = ice_update_prof_masking(hw, blk, prof_id, es, masks); + if (status) + goto err_ice_add_prof; + + /* and write new es */ + ice_write_es(hw, blk, prof_id, es); + } + + ice_prof_inc_ref(hw, blk, prof_id); + + /* add profile info */ + + prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); + if (!prof) + goto err_ice_add_prof; + + prof->profile_cookie = id; + prof->prof_id = prof_id; + prof->ptg_cnt = 0; + prof->context = 0; + + /* build list of ptgs */ + while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { + u8 bit; + + if (!ptypes[byte]) { + bytes--; + byte++; + continue; + } + /* Examine 8 bits per byte */ + for (bit = 0; bit < 8; bit++) { + if (ptypes[byte] & BIT(bit)) { + u16 ptype; + u8 ptg; + u8 m; + + ptype = byte * BITS_PER_BYTE + bit; + + /* The package should place all ptypes in a + * non-zero PTG, so the following call should + * never fail. + */ + if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) + continue; + + /* If PTG is already added, skip and continue */ + if (ice_is_bit_set(ptgs_used, ptg)) + continue; + + ice_set_bit(ptg, ptgs_used); + /* Check to see there are any attributes for + * this ptype, and add them if found. + */ + status = ice_add_prof_attrib(prof, ptg, ptype, + attr, attr_cnt); + if (status == ICE_ERR_MAX_LIMIT) + break; + if (status) { + /* This is simple a ptype/PTG with no + * attribute + */ + prof->ptg[prof->ptg_cnt] = ptg; + prof->attr[prof->ptg_cnt].flags = 0; + prof->attr[prof->ptg_cnt].mask = 0; + + if (++prof->ptg_cnt >= + ICE_MAX_PTG_PER_PROFILE) + break; + } + + /* nothing left in byte, then exit */ + m = ~(u8)((1 << (bit + 1)) - 1); + if (!(ptypes[byte] & m)) + break; + } + } + + bytes--; + byte++; + } + + LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); + status = ICE_SUCCESS; + +err_ice_add_prof: + ice_release_lock(&hw->blk[blk].es.prof_map_lock); + return status; +} + +/** + * ice_search_prof_id_low - Search for a profile tracking ID low level + * @hw: pointer to the HW struct + * @blk: hardware block + * @id: profile tracking ID + * + * This will search for a profile tracking ID which was previously added. This + * version assumes that the caller has already acquired the prof map lock. + */ +static struct ice_prof_map * +ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) +{ + struct ice_prof_map *entry = NULL; + struct ice_prof_map *map; + + LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, + list) { + if (map->profile_cookie == id) { + entry = map; + break; + } + } + + return entry; +} + +/** + * ice_search_prof_id - Search for a profile tracking ID + * @hw: pointer to the HW struct + * @blk: hardware block + * @id: profile tracking ID + * + * This will search for a profile tracking ID which was previously added. + */ +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) +{ + struct ice_prof_map *entry; + + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); + entry = ice_search_prof_id_low(hw, blk, id); + ice_release_lock(&hw->blk[blk].es.prof_map_lock); + + return entry; +} + +/** + * ice_vsig_prof_id_count - count profiles in a VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: VSIG to remove the profile from + */ +static u16 +ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; + struct ice_vsig_prof *p; + + LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + count++; + } + + return count; +} + +/** + * ice_rel_tcam_idx - release a TCAM index + * @hw: pointer to the HW struct + * @blk: hardware block + * @idx: the index to release + */ +static enum ice_status +ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) +{ + /* Masks to invoke a never match entry */ + u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; + u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; + enum ice_status status; + + /* write the TCAM entry */ + status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, + dc_msk, nm_msk); + if (status) + return status; + + /* release the TCAM entry */ + status = ice_free_tcam_ent(hw, blk, idx); + + return status; +} + +/** + * ice_rem_prof_id - remove one profile from a VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @prof: pointer to profile structure to remove + */ +static enum ice_status +ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, + struct ice_vsig_prof *prof) +{ + enum ice_status status; + u16 i; + + for (i = 0; i < prof->tcam_count; i++) { + if (prof->tcam[i].in_use) { + prof->tcam[i].in_use = false; + status = ice_rel_tcam_idx(hw, blk, + prof->tcam[i].tcam_idx); + if (status) + return ICE_ERR_HW_TABLE; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_rem_vsig - remove VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: the VSIG to remove + * @chg: the change list + */ +static enum ice_status +ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, + struct LIST_HEAD_TYPE *chg) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + struct ice_vsig_vsi *vsi_cur; + struct ice_vsig_prof *d, *t; + enum ice_status status; + + /* remove TCAM entries */ + LIST_FOR_EACH_ENTRY_SAFE(d, t, + &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + status = ice_rem_prof_id(hw, blk, d); + if (status) + return status; + + LIST_DEL(&d->list); + ice_free(hw, d); + } + + /* Move all VSIS associated with this VSIG to the default VSIG */ + vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + /* If the VSIG has at least 1 VSI then iterate through the list + * and remove the VSIs before deleting the group. + */ + if (vsi_cur) { + do { + struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; + struct ice_chs_chg *p; + + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + return ICE_ERR_NO_MEMORY; + + p->type = ICE_VSIG_REM; + p->orig_vsig = vsig; + p->vsig = ICE_DEFAULT_VSIG; + p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; + + LIST_ADD(&p->list_entry, chg); + + vsi_cur = tmp; + } while (vsi_cur); + } + + return ice_vsig_free(hw, blk, vsig); +} + +/** + * ice_rem_prof_id_vsig - remove a specific profile from a VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: VSIG to remove the profile from + * @hdl: profile handle indicating which profile to remove + * @chg: list to receive a record of changes + */ +static enum ice_status +ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, + struct LIST_HEAD_TYPE *chg) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + struct ice_vsig_prof *p, *t; + enum ice_status status; + + LIST_FOR_EACH_ENTRY_SAFE(p, t, + &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + if (p->profile_cookie == hdl) { + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) + /* this is the last profile, remove the VSIG */ + return ice_rem_vsig(hw, blk, vsig, chg); + + status = ice_rem_prof_id(hw, blk, p); + if (!status) { + LIST_DEL(&p->list); + ice_free(hw, p); + } + return status; + } + } + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_rem_flow_all - remove all flows with a particular profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @id: profile tracking ID + */ +static enum ice_status +ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) +{ + struct ice_chs_chg *del, *tmp; + struct LIST_HEAD_TYPE chg; + enum ice_status status; + u16 i; + + INIT_LIST_HEAD(&chg); + + for (i = 1; i < ICE_MAX_VSIGS; i++) { + if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { + if (ice_has_prof_vsig(hw, blk, i, id)) { + status = ice_rem_prof_id_vsig(hw, blk, i, id, + &chg); + if (status) + goto err_ice_rem_flow_all; + } + } + } + + status = ice_upd_prof_hw(hw, blk, &chg); + +err_ice_rem_flow_all: + LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { + LIST_DEL(&del->list_entry); + ice_free(hw, del); + } + + return status; +} + +/** + * ice_rem_prof - remove profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @id: profile tracking ID + * + * This will remove the profile specified by the ID parameter, which was + * previously created through ice_add_prof. If any existing entries + * are associated with this profile, they will be removed as well. + */ +enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) +{ + struct ice_prof_map *pmap; + enum ice_status status; + + ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); + + pmap = ice_search_prof_id_low(hw, blk, id); + if (!pmap) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_rem_prof; + } + + /* remove all flows with this profile */ + status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); + if (status) + goto err_ice_rem_prof; + + /* dereference profile, and possibly remove */ + ice_prof_dec_ref(hw, blk, pmap->prof_id); + + LIST_DEL(&pmap->list); + ice_free(hw, pmap); + +err_ice_rem_prof: + ice_release_lock(&hw->blk[blk].es.prof_map_lock); + return status; +} + +/** + * ice_get_prof - get profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @hdl: profile handle + * @chg: change list + */ +static enum ice_status +ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, + struct LIST_HEAD_TYPE *chg) +{ + struct ice_prof_map *map; + struct ice_chs_chg *p; + u16 i; + + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) + return ICE_ERR_DOES_NOT_EXIST; + + for (i = 0; i < map->ptg_cnt; i++) { + if (!hw->blk[blk].es.written[map->prof_id]) { + /* add ES to change list */ + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + goto err_ice_get_prof; + + p->type = ICE_PTG_ES_ADD; + p->ptype = 0; + p->ptg = map->ptg[i]; + p->attr = map->attr[i]; + p->add_ptg = 0; + + p->add_prof = 1; + p->prof_id = map->prof_id; + + hw->blk[blk].es.written[map->prof_id] = true; + + LIST_ADD(&p->list_entry, chg); + } + } + + return ICE_SUCCESS; + +err_ice_get_prof: + /* let caller clean up the change list */ + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: VSIG from which to copy the list + * @lst: output list + * + * This routine makes a copy of the list of profiles in the specified VSIG. + */ +static enum ice_status +ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, + struct LIST_HEAD_TYPE *lst) +{ + struct ice_vsig_prof *ent1, *ent2; + u16 idx = vsig & ICE_VSIG_IDX_M; + + LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + struct ice_vsig_prof *p; + + /* copy to the input list */ + p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), + ICE_NONDMA_TO_NONDMA); + if (!p) + goto err_ice_get_profs_vsig; + + LIST_ADD_TAIL(&p->list, lst); + } + + return ICE_SUCCESS; + +err_ice_get_profs_vsig: + LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { + LIST_DEL(&ent1->list); + ice_free(hw, ent1); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_add_prof_to_lst - add profile entry to a list + * @hw: pointer to the HW struct + * @blk: hardware block + * @lst: the list to be added to + * @hdl: profile handle of entry to add + */ +static enum ice_status +ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, + struct LIST_HEAD_TYPE *lst, u64 hdl) +{ + struct ice_prof_map *map; + struct ice_vsig_prof *p; + u16 i; + + map = ice_search_prof_id(hw, blk, hdl); + if (!map) + return ICE_ERR_DOES_NOT_EXIST; + + p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); + if (!p) + return ICE_ERR_NO_MEMORY; + + p->profile_cookie = map->profile_cookie; + p->prof_id = map->prof_id; + p->tcam_count = map->ptg_cnt; + + for (i = 0; i < map->ptg_cnt; i++) { + p->tcam[i].prof_id = map->prof_id; + p->tcam[i].tcam_idx = ICE_INVALID_TCAM; + p->tcam[i].ptg = map->ptg[i]; + p->tcam[i].attr = map->attr[i]; + } + + LIST_ADD(&p->list, lst); + + return ICE_SUCCESS; +} + +/** + * ice_move_vsi - move VSI to another VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: the VSI to move + * @vsig: the VSIG to move the VSI to + * @chg: the change list + */ +static enum ice_status +ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, + struct LIST_HEAD_TYPE *chg) +{ + enum ice_status status; + struct ice_chs_chg *p; + u16 orig_vsig; + + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + return ICE_ERR_NO_MEMORY; + + status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); + if (!status) + status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); + + if (status) { + ice_free(hw, p); + return status; + } + + p->type = ICE_VSI_MOVE; + p->vsi = vsi; + p->orig_vsig = orig_vsig; + p->vsig = vsig; + + LIST_ADD(&p->list_entry, chg); + + return ICE_SUCCESS; +} + +/** + * ice_set_tcam_flags - set TCAM flag don't care mask + * @mask: mask for flags + * @dc_mask: pointer to the don't care mask + */ +static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ]) +{ + u16 *flag_word; + + /* flags are lowest u16 */ + flag_word = (u16 *)dc_mask; + *flag_word = ~mask; +} + +/** + * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list + * @hw: pointer to the HW struct + * @idx: the index of the TCAM entry to remove + * @chg: the list of change structures to search + */ +static void +ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) +{ + struct ice_chs_chg *pos, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) { + if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { + LIST_DEL(&tmp->list_entry); + ice_free(hw, tmp); + } + } +} + +/** + * ice_prof_tcam_ena_dis - add enable or disable TCAM change + * @hw: pointer to the HW struct + * @blk: hardware block + * @enable: true to enable, false to disable + * @vsig: the VSIG of the TCAM entry + * @tcam: pointer the TCAM info structure of the TCAM to disable + * @chg: the change list + * + * This function appends an enable or disable TCAM entry in the change log + */ +static enum ice_status +ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, + u16 vsig, struct ice_tcam_inf *tcam, + struct LIST_HEAD_TYPE *chg) +{ + enum ice_status status; + struct ice_chs_chg *p; + + u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; + u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + + /* if disabling, free the TCAM */ + if (!enable) { + status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); + + /* if we have already created a change for this TCAM entry, then + * we need to remove that entry, in order to prevent writing to + * a TCAM entry we no longer will have ownership of. + */ + ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); + tcam->tcam_idx = 0; + tcam->in_use = 0; + return status; + } + + /* for re-enabling, reallocate a TCAM */ + status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx); + if (status) + return status; + + /* add TCAM to change list */ + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + return ICE_ERR_NO_MEMORY; + + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(tcam->attr.mask, dc_msk); + + status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, + tcam->ptg, vsig, 0, tcam->attr.flags, + vl_msk, dc_msk, nm_msk); + if (status) + goto err_ice_prof_tcam_ena_dis; + + tcam->in_use = 1; + + p->type = ICE_TCAM_ADD; + p->add_tcam_idx = true; + p->prof_id = tcam->prof_id; + p->ptg = tcam->ptg; + p->vsig = 0; + p->tcam_idx = tcam->tcam_idx; + + /* log change */ + LIST_ADD(&p->list_entry, chg); + + return ICE_SUCCESS; + +err_ice_prof_tcam_ena_dis: + ice_free(hw, p); + return status; +} + +/** + * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use + * @ptg_attr: pointer to the PTG and attribute pair to check + * @ptgs_used: bitmap that denotes which PTGs are in use + * @attr_used: array of PTG and attributes pairs already used + * @attr_cnt: count of entries in the attr_used array + */ +static bool +ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used, + struct ice_tcam_inf *attr_used[], u16 attr_cnt) +{ + u16 i; + + if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg)) + return false; + + /* the PTG is used, so now look for correct attributes */ + for (i = 0; i < attr_cnt; i++) + if (attr_used[i]->ptg == ptg_attr->ptg && + attr_used[i]->attr.flags == ptg_attr->attr.flags && + attr_used[i]->attr.mask == ptg_attr->attr.mask) + return true; + + return false; +} + +/** + * ice_adj_prof_priorities - adjust profile based on priorities + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: the VSIG for which to adjust profile priorities + * @chg: the change list + */ +static enum ice_status +ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, + struct LIST_HEAD_TYPE *chg) +{ + ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); + struct ice_tcam_inf **attr_used; + enum ice_status status = ICE_SUCCESS; + struct ice_vsig_prof *t; + u16 attr_used_cnt = 0; + u16 idx; + +#define ICE_MAX_PTG_ATTRS 1024 + attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS, + sizeof(*attr_used)); + if (!attr_used) + return ICE_ERR_NO_MEMORY; + + ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); + idx = vsig & ICE_VSIG_IDX_M; + + /* Priority is based on the order in which the profiles are added. The + * newest added profile has highest priority and the oldest added + * profile has the lowest priority. Since the profile property list for + * a VSIG is sorted from newest to oldest, this code traverses the list + * in order and enables the first of each PTG that it finds (that is not + * already enabled); it also disables any duplicate PTGs that it finds + * in the older profiles (that are currently enabled). + */ + + LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, + ice_vsig_prof, list) { + u16 i; + + for (i = 0; i < t->tcam_count; i++) { + bool used; + + /* Scan the priorities from newest to oldest. + * Make sure that the newest profiles take priority. + */ + used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used, + attr_used, attr_used_cnt); + + if (used && t->tcam[i].in_use) { + /* need to mark this PTG as never match, as it + * was already in use and therefore duplicate + * (and lower priority) + */ + status = ice_prof_tcam_ena_dis(hw, blk, false, + vsig, + &t->tcam[i], + chg); + if (status) + goto err_ice_adj_prof_priorities; + } else if (!used && !t->tcam[i].in_use) { + /* need to enable this PTG, as it in not in use + * and not enabled (highest priority) + */ + status = ice_prof_tcam_ena_dis(hw, blk, true, + vsig, + &t->tcam[i], + chg); + if (status) + goto err_ice_adj_prof_priorities; + } + + /* keep track of used ptgs */ + ice_set_bit(t->tcam[i].ptg, ptgs_used); + if (attr_used_cnt < ICE_MAX_PTG_ATTRS) + attr_used[attr_used_cnt++] = &t->tcam[i]; + else + ice_debug(hw, ICE_DBG_INIT, + "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); + } + } + +err_ice_adj_prof_priorities: + ice_free(hw, attr_used); + return status; +} + +/** + * ice_add_prof_id_vsig - add profile to VSIG + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsig: the VSIG to which this profile is to be added + * @hdl: the profile handle indicating the profile to add + * @rev: true to add entries to the end of the list + * @chg: the change list + */ +static enum ice_status +ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, + bool rev, struct LIST_HEAD_TYPE *chg) +{ + /* Masks that ignore flags */ + u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; + u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + struct ice_prof_map *map; + struct ice_vsig_prof *t; + struct ice_chs_chg *p; + u16 vsig_idx, i; + + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) + return ICE_ERR_DOES_NOT_EXIST; + + /* Error, if this VSIG already has this profile */ + if (ice_has_prof_vsig(hw, blk, vsig, hdl)) + return ICE_ERR_ALREADY_EXISTS; + + /* new VSIG profile structure */ + t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); + if (!t) + return ICE_ERR_NO_MEMORY; + + t->profile_cookie = map->profile_cookie; + t->prof_id = map->prof_id; + t->tcam_count = map->ptg_cnt; + + /* create TCAM entries */ + for (i = 0; i < map->ptg_cnt; i++) { + enum ice_status status; + u16 tcam_idx; + + /* add TCAM to change list */ + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + goto err_ice_add_prof_id_vsig; + + /* allocate the TCAM entry index */ + status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); + if (status) { + ice_free(hw, p); + goto err_ice_add_prof_id_vsig; + } + + t->tcam[i].ptg = map->ptg[i]; + t->tcam[i].prof_id = map->prof_id; + t->tcam[i].tcam_idx = tcam_idx; + t->tcam[i].attr = map->attr[i]; + t->tcam[i].in_use = true; + + p->type = ICE_TCAM_ADD; + p->add_tcam_idx = true; + p->prof_id = t->tcam[i].prof_id; + p->ptg = t->tcam[i].ptg; + p->vsig = vsig; + p->tcam_idx = t->tcam[i].tcam_idx; + + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk); + + /* write the TCAM entry */ + status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, + t->tcam[i].prof_id, + t->tcam[i].ptg, vsig, 0, + t->tcam[i].attr.flags, vl_msk, + dc_msk, nm_msk); + if (status) { + ice_free(hw, p); + goto err_ice_add_prof_id_vsig; + } + + /* log change */ + LIST_ADD(&p->list_entry, chg); + } + + /* add profile to VSIG */ + vsig_idx = vsig & ICE_VSIG_IDX_M; + if (rev) + LIST_ADD_TAIL(&t->list, + &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); + else + LIST_ADD(&t->list, + &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); + + return ICE_SUCCESS; + +err_ice_add_prof_id_vsig: + /* let caller clean up the change list */ + ice_free(hw, t); + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_create_prof_id_vsig - add a new VSIG with a single profile + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: the initial VSI that will be in VSIG + * @hdl: the profile handle of the profile that will be added to the VSIG + * @chg: the change list + */ +static enum ice_status +ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, + struct LIST_HEAD_TYPE *chg) +{ + enum ice_status status; + struct ice_chs_chg *p; + u16 new_vsig; + + p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); + if (!p) + return ICE_ERR_NO_MEMORY; + + new_vsig = ice_vsig_alloc(hw, blk); + if (!new_vsig) { + status = ICE_ERR_HW_TABLE; + goto err_ice_create_prof_id_vsig; + } + + status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); + if (status) + goto err_ice_create_prof_id_vsig; + + status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); + if (status) + goto err_ice_create_prof_id_vsig; + + p->type = ICE_VSIG_ADD; + p->vsi = vsi; + p->orig_vsig = ICE_DEFAULT_VSIG; + p->vsig = new_vsig; + + LIST_ADD(&p->list_entry, chg); + + return ICE_SUCCESS; + +err_ice_create_prof_id_vsig: + /* let caller clean up the change list */ + ice_free(hw, p); + return status; +} + +/** + * ice_create_vsig_from_lst - create a new VSIG with a list of profiles + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: the initial VSI that will be in VSIG + * @lst: the list of profile that will be added to the VSIG + * @new_vsig: return of new VSIG + * @chg: the change list + */ +static enum ice_status +ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, + struct LIST_HEAD_TYPE *lst, u16 *new_vsig, + struct LIST_HEAD_TYPE *chg) +{ + struct ice_vsig_prof *t; + enum ice_status status; + u16 vsig; + + vsig = ice_vsig_alloc(hw, blk); + if (!vsig) + return ICE_ERR_HW_TABLE; + + status = ice_move_vsi(hw, blk, vsi, vsig, chg); + if (status) + return status; + + LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { + /* Reverse the order here since we are copying the list */ + status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, + true, chg); + if (status) + return status; + } + + *new_vsig = vsig; + + return ICE_SUCCESS; +} + +/** + * ice_find_prof_vsig - find a VSIG with a specific profile handle + * @hw: pointer to the HW struct + * @blk: hardware block + * @hdl: the profile handle of the profile to search for + * @vsig: returns the VSIG with the matching profile + */ +static bool +ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) +{ + struct ice_vsig_prof *t; + struct LIST_HEAD_TYPE lst; + enum ice_status status; + + INIT_LIST_HEAD(&lst); + + t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); + if (!t) + return false; + + t->profile_cookie = hdl; + LIST_ADD(&t->list, &lst); + + status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); + + LIST_DEL(&t->list); + ice_free(hw, t); + + return status == ICE_SUCCESS; +} + +/** + * ice_add_vsi_flow - add VSI flow + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: input VSI + * @vsig: target VSIG to include the input VSI + * + * Calling this function will add the VSI to a given VSIG and + * update the HW tables accordingly. This call can be used to + * add multiple VSIs to a VSIG if we know beforehand that those + * VSIs have the same characteristics of the VSIG. This will + * save time in generating a new VSIG and TCAMs till a match is + * found and subsequent rollback when a matching VSIG is found. + */ +enum ice_status +ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_chs_chg *tmp, *del; + struct LIST_HEAD_TYPE chg; + enum ice_status status; + + /* if target VSIG is default the move is invalid */ + if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) + return ICE_ERR_PARAM; + + INIT_LIST_HEAD(&chg); + + /* move VSI to the VSIG that matches */ + status = ice_move_vsi(hw, blk, vsi, vsig, &chg); + /* update hardware if success */ + if (!status) + status = ice_upd_prof_hw(hw, blk, &chg); + + LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { + LIST_DEL(&del->list_entry); + ice_free(hw, del); + } + + return status; +} + +/** + * ice_add_prof_id_flow - add profile flow + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: the VSI to enable with the profile specified by ID + * @hdl: profile handle + * + * Calling this function will update the hardware tables to enable the + * profile indicated by the ID parameter for the VSIs specified in the VSI + * array. Once successfully called, the flow will be enabled. + */ +enum ice_status +ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) +{ + struct ice_vsig_prof *tmp1, *del1; + struct LIST_HEAD_TYPE union_lst; + struct ice_chs_chg *tmp, *del; + struct LIST_HEAD_TYPE chg; + enum ice_status status; + u16 vsig; + + INIT_LIST_HEAD(&union_lst); + INIT_LIST_HEAD(&chg); + + /* Get profile */ + status = ice_get_prof(hw, blk, hdl, &chg); + if (status) + return status; + + /* determine if VSI is already part of a VSIG */ + status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); + if (!status && vsig) { + bool only_vsi; + u16 or_vsig; + u16 ref; + + /* found in VSIG */ + or_vsig = vsig; + + /* make sure that there is no overlap/conflict between the new + * characteristics and the existing ones; we don't support that + * scenario + */ + if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { + status = ICE_ERR_ALREADY_EXISTS; + goto err_ice_add_prof_id_flow; + } + + /* last VSI in the VSIG? */ + status = ice_vsig_get_ref(hw, blk, vsig, &ref); + if (status) + goto err_ice_add_prof_id_flow; + only_vsi = (ref == 1); + + /* create a union of the current profiles and the one being + * added + */ + status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); + if (status) + goto err_ice_add_prof_id_flow; + + status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); + if (status) + goto err_ice_add_prof_id_flow; + + /* search for an existing VSIG with an exact charc match */ + status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); + if (!status) { + /* move VSI to the VSIG that matches */ + status = ice_move_vsi(hw, blk, vsi, vsig, &chg); + if (status) + goto err_ice_add_prof_id_flow; + + /* VSI has been moved out of or_vsig. If the or_vsig had + * only that VSI it is now empty and can be removed. + */ + if (only_vsi) { + status = ice_rem_vsig(hw, blk, or_vsig, &chg); + if (status) + goto err_ice_add_prof_id_flow; + } + } else if (only_vsi) { + /* If the original VSIG only contains one VSI, then it + * will be the requesting VSI. In this case the VSI is + * not sharing entries and we can simply add the new + * profile to the VSIG. + */ + status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, + &chg); + if (status) + goto err_ice_add_prof_id_flow; + + /* Adjust priorities */ + status = ice_adj_prof_priorities(hw, blk, vsig, &chg); + if (status) + goto err_ice_add_prof_id_flow; + } else { + /* No match, so we need a new VSIG */ + status = ice_create_vsig_from_lst(hw, blk, vsi, + &union_lst, &vsig, + &chg); + if (status) + goto err_ice_add_prof_id_flow; + + /* Adjust priorities */ + status = ice_adj_prof_priorities(hw, blk, vsig, &chg); + if (status) + goto err_ice_add_prof_id_flow; + } + } else { + /* need to find or add a VSIG */ + /* search for an existing VSIG with an exact charc match */ + if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { + /* found an exact match */ + /* add or move VSI to the VSIG that matches */ + status = ice_move_vsi(hw, blk, vsi, vsig, &chg); + if (status) + goto err_ice_add_prof_id_flow; + } else { + /* we did not find an exact match */ + /* we need to add a VSIG */ + status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, + &chg); + if (status) + goto err_ice_add_prof_id_flow; + } + } + + /* update hardware */ + if (!status) + status = ice_upd_prof_hw(hw, blk, &chg); + +err_ice_add_prof_id_flow: + LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { + LIST_DEL(&del->list_entry); + ice_free(hw, del); + } + + LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { + LIST_DEL(&del1->list); + ice_free(hw, del1); + } + + return status; +} + +/** + * ice_rem_prof_from_list - remove a profile from list + * @hw: pointer to the HW struct + * @lst: list to remove the profile from + * @hdl: the profile handle indicating the profile to remove + */ +static enum ice_status +ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) +{ + struct ice_vsig_prof *ent, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) { + if (ent->profile_cookie == hdl) { + LIST_DEL(&ent->list); + ice_free(hw, ent); + return ICE_SUCCESS; + } + } + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_rem_prof_id_flow - remove flow + * @hw: pointer to the HW struct + * @blk: hardware block + * @vsi: the VSI from which to remove the profile specified by ID + * @hdl: profile tracking handle + * + * Calling this function will update the hardware tables to remove the + * profile indicated by the ID parameter for the VSIs specified in the VSI + * array. Once successfully called, the flow will be disabled. + */ +enum ice_status +ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) +{ + struct ice_vsig_prof *tmp1, *del1; + struct LIST_HEAD_TYPE chg, copy; + struct ice_chs_chg *tmp, *del; + enum ice_status status; + u16 vsig; + + INIT_LIST_HEAD(©); + INIT_LIST_HEAD(&chg); + + /* determine if VSI is already part of a VSIG */ + status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); + if (!status && vsig) { + bool last_profile; + bool only_vsi; + u16 ref; + + /* found in VSIG */ + last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; + status = ice_vsig_get_ref(hw, blk, vsig, &ref); + if (status) + goto err_ice_rem_prof_id_flow; + only_vsi = (ref == 1); + + if (only_vsi) { + /* If the original VSIG only contains one reference, + * which will be the requesting VSI, then the VSI is not + * sharing entries and we can simply remove the specific + * characteristics from the VSIG. + */ + + if (last_profile) { + /* If there are no profiles left for this VSIG, + * then simply remove the the VSIG. + */ + status = ice_rem_vsig(hw, blk, vsig, &chg); + if (status) + goto err_ice_rem_prof_id_flow; + } else { + status = ice_rem_prof_id_vsig(hw, blk, vsig, + hdl, &chg); + if (status) + goto err_ice_rem_prof_id_flow; + + /* Adjust priorities */ + status = ice_adj_prof_priorities(hw, blk, vsig, + &chg); + if (status) + goto err_ice_rem_prof_id_flow; + } + + } else { + /* Make a copy of the VSIG's list of Profiles */ + status = ice_get_profs_vsig(hw, blk, vsig, ©); + if (status) + goto err_ice_rem_prof_id_flow; + + /* Remove specified profile entry from the list */ + status = ice_rem_prof_from_list(hw, ©, hdl); + if (status) + goto err_ice_rem_prof_id_flow; + + if (LIST_EMPTY(©)) { + status = ice_move_vsi(hw, blk, vsi, + ICE_DEFAULT_VSIG, &chg); + if (status) + goto err_ice_rem_prof_id_flow; + + } else if (!ice_find_dup_props_vsig(hw, blk, ©, + &vsig)) { + /* found an exact match */ + /* add or move VSI to the VSIG that matches */ + /* Search for a VSIG with a matching profile + * list + */ + + /* Found match, move VSI to the matching VSIG */ + status = ice_move_vsi(hw, blk, vsi, vsig, &chg); + if (status) + goto err_ice_rem_prof_id_flow; + } else { + /* since no existing VSIG supports this + * characteristic pattern, we need to create a + * new VSIG and TCAM entries + */ + status = ice_create_vsig_from_lst(hw, blk, vsi, + ©, &vsig, + &chg); + if (status) + goto err_ice_rem_prof_id_flow; + + /* Adjust priorities */ + status = ice_adj_prof_priorities(hw, blk, vsig, + &chg); + if (status) + goto err_ice_rem_prof_id_flow; + } + } + } else { + status = ICE_ERR_DOES_NOT_EXIST; + } + + /* update hardware tables */ + if (!status) + status = ice_upd_prof_hw(hw, blk, &chg); + +err_ice_rem_prof_id_flow: + LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { + LIST_DEL(&del->list_entry); + ice_free(hw, del); + } + + LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { + LIST_DEL(&del1->list); + ice_free(hw, del1); + } + + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.h b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.h new file mode 100644 index 000000000..214c7a283 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_pipe.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_FLEX_PIPE_H_ +#define _ICE_FLEX_PIPE_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); +enum ice_status +ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, + u8 *prot, u16 *off); +enum ice_status +ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, + u16 *value); +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, + ice_bitmap_t *bm); +void +ice_init_prof_result_bm(struct ice_hw *hw); +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); +bool +ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *port); +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); +bool +ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); + +/* XLT2/VSI group functions */ +enum ice_status +ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig); +enum ice_status +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], + const struct ice_ptype_attributes *attr, u16 attr_cnt, + struct ice_fv_word *es, u16 *masks); +void ice_init_all_prof_masks(struct ice_hw *hw); +void ice_shutdown_all_prof_masks(struct ice_hw *hw); +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); +enum ice_status +ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); +enum ice_status +ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); +enum ice_status +ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_status +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +void ice_free_seg(struct ice_hw *hw); +void ice_fill_blk_tbls(struct ice_hw *hw); +void ice_clear_hw_tbls(struct ice_hw *hw); +void ice_free_hw_tbls(struct ice_hw *hw); +enum ice_status +ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); + +enum ice_status +ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, + u16 len); +#endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_flex_type.h b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_type.h new file mode 100644 index 000000000..b58007fb3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_flex_type.h @@ -0,0 +1,790 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_FLEX_TYPE_H_ +#define _ICE_FLEX_TYPE_H_ + +#define ICE_FV_OFFSET_INVAL 0x1FF + +#pragma pack(1) +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +}; +#pragma pack() + +#define ICE_MAX_NUM_PROFILES 256 + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[1]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[1]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[1]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[1]; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[1]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 +#define ICE_SID_RXPARSER_XLT0_BUILDER 53 +#define ICE_SID_RXPARSER_NODE_PTYPE 54 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_RXPARSER_XLT0 59 + +#define ICE_SID_TXPARSER_CAM 60 +#define ICE_SID_TXPARSER_NOMATCH_CAM 61 +#define ICE_SID_TXPARSER_IMEM 62 +#define ICE_SID_TXPARSER_XLT0_BUILDER 63 +#define ICE_SID_TXPARSER_NODE_PTYPE 64 +#define ICE_SID_TXPARSER_MARKER_PTYPE 65 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_TXPARSER_PROTO_GRP 67 +#define ICE_SID_TXPARSER_METADATA_INIT 68 +#define ICE_SID_TXPARSER_XLT0 69 + +#define ICE_SID_RXPARSER_INIT_REDIR 70 +#define ICE_SID_TXPARSER_INIT_REDIR 71 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_TXPARSER_MARKER_GRP 73 +#define ICE_SID_RXPARSER_LAST_PROTO 74 +#define ICE_SID_TXPARSER_LAST_PROTO 75 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_TXPARSER_PG_SPILL 77 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 +#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 +#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 +#define ICE_SID_LBL_RESERVED_12 0x80000012 +#define ICE_SID_LBL_RESERVED_13 0x80000013 +#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 +#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 +#define ICE_SID_LBL_PTYPE 0x80000016 +#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 +#define ICE_SID_LBL_RXPARSER_PG 0x8000001A +#define ICE_SID_LBL_TXPARSER_PG 0x8000001B +#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C +#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D +#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E +#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F +#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 +#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 +#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 +#define ICE_SID_LBL_FLAG 0x80000023 +#define ICE_SID_LBL_REG 0x80000024 +#define ICE_SID_LBL_SW_PTG 0x80000025 +#define ICE_SID_LBL_ACL_PTG 0x80000026 +#define ICE_SID_LBL_PE_PTG 0x80000027 +#define ICE_SID_LBL_RSS_PTG 0x80000028 +#define ICE_SID_LBL_FD_PTG 0x80000029 +#define ICE_SID_LBL_SW_VSIG 0x8000002A +#define ICE_SID_LBL_ACL_VSIG 0x8000002B +#define ICE_SID_LBL_PE_VSIG 0x8000002C +#define ICE_SID_LBL_RSS_VSIG 0x8000002D +#define ICE_SID_LBL_FD_VSIG 0x8000002E +#define ICE_SID_LBL_PTYPE_META 0x8000002F +#define ICE_SID_LBL_SW_PROFID 0x80000030 +#define ICE_SID_LBL_ACL_PROFID 0x80000031 +#define ICE_SID_LBL_PE_PROFID 0x80000032 +#define ICE_SID_LBL_RSS_PROFID 0x80000033 +#define ICE_SID_LBL_FD_PROFID 0x80000034 +#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 +#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 +#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 +#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* Packet Type (PTYPE) values */ +#define ICE_PTYPE_MAC_PAY 1 +#define ICE_PTYPE_IPV4FRAG_PAY 22 +#define ICE_PTYPE_IPV4_PAY 23 +#define ICE_PTYPE_IPV4_UDP_PAY 24 +#define ICE_PTYPE_IPV4_TCP_PAY 26 +#define ICE_PTYPE_IPV4_SCTP_PAY 27 +#define ICE_PTYPE_IPV4_ICMP_PAY 28 +#define ICE_PTYPE_IPV6FRAG_PAY 88 +#define ICE_PTYPE_IPV6_PAY 89 +#define ICE_PTYPE_IPV6_UDP_PAY 90 +#define ICE_PTYPE_IPV6_TCP_PAY 92 +#define ICE_PTYPE_IPV6_SCTP_PAY 93 +#define ICE_PTYPE_IPV6_ICMP_PAY 94 +#define ICE_MAC_IPV4_GTPC_TEID 325 +#define ICE_MAC_IPV6_GTPC_TEID 326 +#define ICE_MAC_IPV4_GTPC 327 +#define ICE_MAC_IPV6_GTPC 328 +#define ICE_MAC_IPV4_GTPU 329 +#define ICE_MAC_IPV6_GTPU 330 +#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 +#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 +#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 +#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334 +#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335 +#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336 +#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337 +#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338 +#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339 +#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340 +#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341 +#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342 +#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343 +#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344 +#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345 +#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346 +#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347 +#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 +#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 +#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 + +/* Attributes that can modify PTYPE definitions. + * + * These values will represent special attributes for PTYPES, which will + * resolve into metadata packet flags definitions that can be used in the TCAM + * for identifying a PTYPE with specific characteristics. + */ +enum ice_ptype_attrib_type { + /* GTP PTYPES */ + ICE_PTYPE_ATTR_GTP_PDU_EH, + ICE_PTYPE_ATTR_GTP_SESSION, + ICE_PTYPE_ATTR_GTP_DOWNLINK, + ICE_PTYPE_ATTR_GTP_UPLINK, +}; + +struct ice_ptype_attrib_info { + u16 flags; + u16 mask; +}; + +/* TCAM flag definitions */ +#define ICE_GTP_PDU BIT(14) +#define ICE_GTP_PDU_LINK BIT(13) + +/* GTP attributes */ +#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU) +#define ICE_GTP_PDU_EH ICE_GTP_PDU + +#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) +#define ICE_GTP_SESSION 0 +#define ICE_GTP_DOWNLINK ICE_GTP_PDU +#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK) + +struct ice_ptype_attributes { + u16 ptype; + enum ice_ptype_attrib_type attrib; +}; + +/* Packet Type Groups (PTG) - Inner Most fields (IM) */ +#define ICE_PTG_IM_IPV4_TCP 16 +#define ICE_PTG_IM_IPV4_UDP 17 +#define ICE_PTG_IM_IPV4_SCTP 18 +#define ICE_PTG_IM_IPV4_PAY 20 +#define ICE_PTG_IM_IPV4_OTHER 21 +#define ICE_PTG_IM_IPV6_TCP 32 +#define ICE_PTG_IM_IPV6_UDP 33 +#define ICE_PTG_IM_IPV6_SCTP 34 +#define ICE_PTG_IM_IPV6_OTHER 37 +#define ICE_PTG_IM_L2_OTHER 67 + +struct ice_flex_fields { + union { + struct { + u8 src_ip; + u8 dst_ip; + u8 flow_label; /* valid for IPv6 only */ + } ip_fields; + + struct { + u8 src_prt; + u8 dst_prt; + } tcp_udp_fields; + + struct { + u8 src_ip; + u8 dst_ip; + u8 src_prt; + u8 dst_prt; + } ip_tcp_udp_fields; + + struct { + u8 src_prt; + u8 dst_prt; + u8 flow_label; /* valid for IPv6 only */ + u8 spi; + } ip_esp_fields; + + struct { + u32 offset; + u32 length; + } off_len; + } fields; +}; + +#define ICE_XLT1_DFLT_GRP 0 +#define ICE_XLT1_TABLE_SIZE 1024 + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[1]; +}; + +#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_label_section) - sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[1]; +}; + +struct ice_sw_fv_list_entry { + struct LIST_ENTRY_TYPE list_entry; + u32 profile_id; + struct ice_fv *fv_ptr; +}; + +#pragma pack(1) +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +}; +#pragma pack() + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[1]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_boost_tcam_section) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +#pragma pack(1) +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[1]; +}; +#pragma pack() + +#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \ + (sizeof(u8) * ((n) - 1))) + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[1]; +}; + +#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \ + (sizeof(u16) * ((n) - 1))) + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[1]; +}; + +#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \ + (sizeof(u8) * ((n) - 1))) + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +/* Tunnel enabling */ + +enum ice_tunnel_type { + TNL_VXLAN = 0, + TNL_GENEVE, + TNL_LAST = 0xFF, + TNL_ALL = 0xFF, +}; + +struct ice_tunnel_type_scan { + enum ice_tunnel_type type; + const char *label_prefix; +}; + +struct ice_tunnel_entry { + enum ice_tunnel_type type; + u16 boost_addr; + u16 port; + u16 ref; + struct ice_boost_tcam_entry *boost_entry; + u8 valid; + u8 in_use; + u8 marked; +}; + +#define ICE_TUNNEL_MAX_ENTRIES 16 + +struct ice_tunnel_table { + struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES]; + u16 count; +}; + +struct ice_pkg_es { + __le16 count; + __le16 offset; + struct ice_fv_word es[1]; +}; + +struct ice_es { + u32 sid; + u16 count; + u16 fvw; + u16 *ref_count; + u32 *mask_ena; + struct LIST_HEAD_TYPE prof_map; + struct ice_fv_word *t; + struct ice_lock prof_map_lock; /* protect access to profiles list */ + u8 *written; + u8 reverse; /* set to true to reverse FV order */ +}; + +/* PTYPE Group management */ + +/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type + * group (PTG) ID as output. + * + * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE + * are a part of this group until moved to a new PTG. + */ +#define ICE_DEFAULT_PTG 0 + +struct ice_ptg_entry { + struct ice_ptg_ptype *first_ptype; + u8 in_use; +}; + +struct ice_ptg_ptype { + struct ice_ptg_ptype *next_ptype; + u8 ptg; +}; + +#define ICE_MAX_TCAM_PER_PROFILE 32 +#define ICE_MAX_PTG_PER_PROFILE 32 + +struct ice_prof_map { + struct LIST_ENTRY_TYPE list; + u64 profile_cookie; + u64 context; + u8 prof_id; + u8 ptg_cnt; + u8 ptg[ICE_MAX_PTG_PER_PROFILE]; + struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE]; +}; + +#define ICE_INVALID_TCAM 0xFFFF + +struct ice_tcam_inf { + u16 tcam_idx; + struct ice_ptype_attrib_info attr; + u8 ptg; + u8 prof_id; + u8 in_use; +}; + +struct ice_vsig_prof { + struct LIST_ENTRY_TYPE list; + u64 profile_cookie; + u8 prof_id; + u8 tcam_count; + struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE]; +}; + +struct ice_vsig_entry { + struct LIST_HEAD_TYPE prop_lst; + struct ice_vsig_vsi *first_vsi; + u8 in_use; +}; + +struct ice_vsig_vsi { + struct ice_vsig_vsi *next_vsi; + u32 prop_mask; + u16 changed; + u16 vsig; +}; + +#define ICE_XLT1_CNT 1024 +#define ICE_MAX_PTGS 256 + +/* XLT1 Table */ +struct ice_xlt1 { + struct ice_ptg_entry *ptg_tbl; + struct ice_ptg_ptype *ptypes; + u8 *t; + u32 sid; + u16 count; +}; + +#define ICE_XLT2_CNT 768 +#define ICE_MAX_VSIGS 768 + +/* VSIG bit layout: + * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS + * [13:15]: PF number of device + */ +#define ICE_VSIG_IDX_M (0x1FFF) +#define ICE_PF_NUM_S 13 +#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) +#define ICE_VSIG_VALUE(vsig, pf_id) \ + (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) +#define ICE_DEFAULT_VSIG 0 + +/* XLT2 Table */ +struct ice_xlt2 { + struct ice_vsig_entry *vsig_tbl; + struct ice_vsig_vsi *vsis; + u16 *t; + u32 sid; + u16 count; +}; + +/* Extraction sequence - list of match fields: + * protocol ID, offset, profile length + */ +union ice_match_fld { + struct { + u8 prot_id; + u8 offset; + u8 length; + u8 reserved; /* must be zero */ + } fld; + u32 val; +}; + +#define ICE_MATCH_LIST_SZ 20 +#pragma pack(1) +struct ice_match { + u8 count; + union ice_match_fld list[ICE_MATCH_LIST_SZ]; +}; + +/* Profile ID Management */ +struct ice_prof_id_key { + __le16 flags; + u8 xlt1; + __le16 xlt2_cdid; +}; + +/* Keys are made up of two values, each one-half the size of the key. + * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) + */ +#define ICE_TCAM_KEY_VAL_SZ 5 +#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) + +struct ice_prof_tcam_entry { + __le16 addr; + u8 key[ICE_TCAM_KEY_SZ]; + u8 prof_id; +}; + +struct ice_prof_id_section { + __le16 count; + struct ice_prof_tcam_entry entry[1]; +}; +#pragma pack() + +struct ice_prof_tcam { + u32 sid; + u16 count; + u16 max_prof_id; + struct ice_prof_tcam_entry *t; + u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ +}; + +struct ice_prof_redir { + u8 *t; + u32 sid; + u16 count; +}; + +struct ice_mask { + u16 mask; /* 16-bit mask */ + u16 idx; /* index */ + u16 ref; /* reference count */ + u8 in_use; /* non-zero if used */ +}; + +struct ice_masks { + struct ice_lock lock; /* lock to protect this structure */ + u16 first; /* first mask owned by the PF */ + u16 count; /* number of masks owned by the PF */ +#define ICE_PROF_MASK_COUNT 32 + struct ice_mask masks[ICE_PROF_MASK_COUNT]; +}; + +/* Tables per block */ +struct ice_blk_info { + struct ice_xlt1 xlt1; + struct ice_xlt2 xlt2; + struct ice_prof_tcam prof; + struct ice_prof_redir prof_redir; + struct ice_es es; + struct ice_masks masks; + u8 overwrite; /* set to true to allow overwrite of table entries */ + u8 is_list_init; +}; + +enum ice_chg_type { + ICE_TCAM_NONE = 0, + ICE_PTG_ES_ADD, + ICE_TCAM_ADD, + ICE_VSIG_ADD, + ICE_VSIG_REM, + ICE_VSI_MOVE, +}; + +struct ice_chs_chg { + struct LIST_ENTRY_TYPE list_entry; + enum ice_chg_type type; + + u8 add_ptg; + u8 add_vsig; + u8 add_tcam_idx; + u8 add_prof; + u16 ptype; + u8 ptg; + u8 prof_id; + u16 vsi; + u16 vsig; + u16 orig_vsig; + u16 tcam_idx; + struct ice_ptype_attrib_info attr; +}; + +#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT + +enum ice_prof_type { + ICE_PROF_NON_TUN = 0x1, + ICE_PROF_TUN_UDP = 0x2, + ICE_PROF_TUN_GRE = 0x4, + ICE_PROF_TUN_PPPOE = 0x8, + ICE_PROF_TUN_ALL = 0xE, + ICE_PROF_ALL = 0xFF, +}; +#endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_flow.c b/src/spdk/dpdk/drivers/net/ice/base/ice_flow.c new file mode 100644 index 000000000..e741f5940 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_flow.c @@ -0,0 +1,3699 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" +#include "ice_flow.h" + +/* Size of known protocol header fields */ +#define ICE_FLOW_FLD_SZ_ETH_TYPE 2 +#define ICE_FLOW_FLD_SZ_VLAN 2 +#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 +#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 +#define ICE_FLOW_FLD_SZ_IP_DSCP 1 +#define ICE_FLOW_FLD_SZ_IP_TTL 1 +#define ICE_FLOW_FLD_SZ_IP_PROT 1 +#define ICE_FLOW_FLD_SZ_PORT 2 +#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 +#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 +#define ICE_FLOW_FLD_SZ_ICMP_CODE 1 +#define ICE_FLOW_FLD_SZ_ARP_OPER 2 +#define ICE_FLOW_FLD_SZ_GRE_KEYID 4 +#define ICE_FLOW_FLD_SZ_GTP_TEID 4 +#define ICE_FLOW_FLD_SZ_GTP_QFI 2 +#define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2 +#define ICE_FLOW_FLD_SZ_PFCP_SEID 8 +#define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4 +#define ICE_FLOW_FLD_SZ_ESP_SPI 4 +#define ICE_FLOW_FLD_SZ_AH_SPI 4 +#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 + +/* Describe properties of a protocol header field */ +struct ice_flow_field_info { + enum ice_flow_seg_hdr hdr; + s16 off; /* Offset from start of a protocol header, in bits */ + u16 size; /* Size of fields in bits */ + u16 mask; /* 16-bit mask for field */ +}; + +#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ + .hdr = _hdr, \ + .off = (_offset_bytes) * BITS_PER_BYTE, \ + .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = 0, \ +} + +#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ + .hdr = _hdr, \ + .off = (_offset_bytes) * BITS_PER_BYTE, \ + .size = (_size_bytes) * BITS_PER_BYTE, \ + .mask = _mask, \ +} + +/* Table containing properties of supported protocol header fields */ +static const +struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { + /* Ether */ + /* ICE_FLOW_FIELD_IDX_ETH_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ETH_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_S_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN), + /* ICE_FLOW_FIELD_IDX_C_VLAN */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN), + /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE), + /* IPv4 / IPv6 */ + /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP, + 0x00fc), + /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP, + 0x0ff0), + /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, + ICE_FLOW_FLD_SZ_IP_TTL, 0xff00), + /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, + ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, + ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff), + /* ICE_FLOW_FIELD_IDX_IPV6_PROT */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, + ICE_FLOW_FLD_SZ_IP_PROT, 0xff00), + /* ICE_FLOW_FIELD_IDX_IPV4_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV4_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), + /* Transport */ + /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT), + /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), + /* ARP */ + /* ICE_FLOW_FIELD_IDX_ARP_SIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR), + /* ICE_FLOW_FIELD_IDX_ARP_DIP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR), + /* ICE_FLOW_FIELD_IDX_ARP_SHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_DHA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), + /* ICE_FLOW_FIELD_IDX_ARP_OP */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER), + /* ICMP */ + /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE), + /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE), + /* GRE */ + /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID), + /* GTP */ + /* ICE_FLOW_FIELD_IDX_GTPC_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* PPPOE */ + /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, + ICE_FLOW_FLD_SZ_PPPOE_SESS_ID), + /* PFCP */ + /* ICE_FLOW_FIELD_IDX_PFCP_SEID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, + ICE_FLOW_FLD_SZ_PFCP_SEID), + /* L2TPV3 */ + /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, + ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID), + /* ESP */ + /* ICE_FLOW_FIELD_IDX_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, + ICE_FLOW_FLD_SZ_ESP_SPI), + /* AH */ + /* ICE_FLOW_FIELD_IDX_AH_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, + ICE_FLOW_FLD_SZ_AH_SPI), + /* NAT_T_ESP */ + /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, + ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI), +}; + +/* Bitmaps indicating relevant packet types for a particular protocol header + * + * Packet types for packets with an Outer/First/Single MAC header + */ +static const u32 ice_ptypes_mac_ofos[] = { + 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last MAC VLAN header */ +static const u32 ice_ptypes_macvlan_il[] = { + 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, + 0x0000077E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv4 header */ +static const u32 ice_ptypes_ipv4_ofos[] = { + 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x00000000, 0x00000155, 0x00000000, 0x00000000, + 0x0003000F, 0x000FC000, 0x83E0F800, 0x00000101, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last IPv4 header */ +static const u32 ice_ptypes_ipv4_il[] = { + 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, + 0x0000000E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x001FF800, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outer/First/Single IPv6 header */ +static const u32 ice_ptypes_ipv6_ofos[] = { + 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x000002AA, 0x00000000, 0x00000000, + 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last IPv6 header */ +static const u32 ice_ptypes_ipv6_il[] = { + 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, + 0x00000770, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outermost/First ARP header */ +static const u32 ice_ptypes_arp_of[] = { + 0x00000800, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* UDP Packet types for non-tunneled packets or tunneled + * packets with inner UDP. + */ +static const u32 ice_ptypes_udp_il[] = { + 0x81000000, 0x20204040, 0x04000010, 0x80810102, + 0x00000040, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00410000, 0x90842000, 0x00000007, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last TCP header */ +static const u32 ice_ptypes_tcp_il[] = { + 0x04000000, 0x80810102, 0x10000040, 0x02040408, + 0x00000102, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00820000, 0x21084000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last SCTP header */ +static const u32 ice_ptypes_sctp_il[] = { + 0x08000000, 0x01020204, 0x20000081, 0x04080810, + 0x00000204, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x01040000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outermost/First ICMP header */ +static const u32 ice_ptypes_icmp_of[] = { + 0x10000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last ICMP header */ +static const u32 ice_ptypes_icmp_il[] = { + 0x00000000, 0x02040408, 0x40000102, 0x08101020, + 0x00000408, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x42108000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Outermost/First GRE header */ +static const u32 ice_ptypes_gre_of[] = { + 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, + 0x0000017E, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with an Innermost/Last MAC header */ +static const u32 ice_ptypes_mac_il[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC */ +static const u32 ice_ptypes_gtpc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000180, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPC with TEID */ +static const u32 ice_ptypes_gtpc_tid[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000060, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for GTPU */ +static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_down[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK }, +}; + +static const struct ice_ptype_attributes ice_attr_gtpu_up[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK }, +}; + +static const u32 ice_ptypes_gtpu[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for pppoe */ +static const u32 ice_ptypes_pppoe[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03FFF000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP NODE header */ +static const u32 ice_ptypes_pfcp_node[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x80000000, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with PFCP SESSION header */ +static const u32 ice_ptypes_pfcp_session[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000005, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for l2tpv3 */ +static const u32 ice_ptypes_l2tpv3[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000300, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for esp */ +static const u32 ice_ptypes_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000003, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for ah */ +static const u32 ice_ptypes_ah[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000000C, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Packet types for packets with NAT_T ESP header */ +static const u32 ice_ptypes_nat_t_esp[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +/* Manage parameters and info. used during the creation of a flow profile */ +struct ice_flow_prof_params { + enum ice_block blk; + u16 entry_length; /* # of bytes formatted entry will require */ + u8 es_cnt; + struct ice_flow_prof *prof; + + /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 + * This will give us the direction flags. + */ + struct ice_fv_word es[ICE_MAX_FV_WORDS]; + /* attributes can be used to add attributes to a particular PTYPE */ + const struct ice_ptype_attributes *attr; + u16 attr_cnt; + + u16 mask[ICE_MAX_FV_WORDS]; + ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX); +}; + +#define ICE_FLOW_RSS_HDRS_INNER_MASK \ + (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \ + ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \ + ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ + ICE_FLOW_SEG_HDR_NAT_T_ESP) + +#define ICE_FLOW_SEG_HDRS_L2_MASK \ + (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) +#define ICE_FLOW_SEG_HDRS_L3_MASK \ + (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \ + ICE_FLOW_SEG_HDR_ARP) +#define ICE_FLOW_SEG_HDRS_L4_MASK \ + (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ + ICE_FLOW_SEG_HDR_SCTP) + +/** + * ice_flow_val_hdrs - validates packet segments for valid protocol headers + * @segs: array of one or more packet segments that describe the flow + * @segs_cnt: number of packet segments provided + */ +static enum ice_status +ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) +{ + u8 i; + + for (i = 0; i < segs_cnt; i++) { + /* Multiple L3 headers */ + if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && + !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) + return ICE_ERR_PARAM; + + /* Multiple L4 headers */ + if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && + !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) + return ICE_ERR_PARAM; + } + + return ICE_SUCCESS; +} + +/* Sizes of fixed known protocol headers without header options */ +#define ICE_FLOW_PROT_HDR_SZ_MAC 14 +#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) +#define ICE_FLOW_PROT_HDR_SZ_IPV4 20 +#define ICE_FLOW_PROT_HDR_SZ_IPV6 40 +#define ICE_FLOW_PROT_HDR_SZ_ARP 28 +#define ICE_FLOW_PROT_HDR_SZ_ICMP 8 +#define ICE_FLOW_PROT_HDR_SZ_TCP 20 +#define ICE_FLOW_PROT_HDR_SZ_UDP 8 +#define ICE_FLOW_PROT_HDR_SZ_SCTP 12 + +/** + * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers + * @params: information about the flow to be processed + * @seg: index of packet segment whose header size is to be determined + */ +static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) +{ + u16 sz; + + /* L2 headers */ + sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? + ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; + + /* L3 headers */ + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) + sz += ICE_FLOW_PROT_HDR_SZ_IPV4; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) + sz += ICE_FLOW_PROT_HDR_SZ_IPV6; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) + sz += ICE_FLOW_PROT_HDR_SZ_ARP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) + /* A L3 header is required if L4 is specified */ + return 0; + + /* L4 headers */ + if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) + sz += ICE_FLOW_PROT_HDR_SZ_ICMP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) + sz += ICE_FLOW_PROT_HDR_SZ_TCP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) + sz += ICE_FLOW_PROT_HDR_SZ_UDP; + else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) + sz += ICE_FLOW_PROT_HDR_SZ_SCTP; + + return sz; +} + +/** + * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments + * @params: information about the flow to be processed + * + * This function identifies the packet types associated with the protocol + * headers being present in packet segments of the specified flow profile. + */ +static enum ice_status +ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) +{ + struct ice_flow_prof *prof; + u8 i; + + ice_memset(params->ptypes, 0xff, sizeof(params->ptypes), + ICE_NONDMA_MEM); + + prof = params->prof; + + for (i = 0; i < params->prof->segs_cnt; i++) { + const ice_bitmap_t *src; + u32 hdrs; + + hdrs = prof->segs[i].hdrs; + + if (hdrs & ICE_FLOW_SEG_HDR_ETH) { + src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos : + (const ice_bitmap_t *)ice_ptypes_mac_il; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { + src = (const ice_bitmap_t *)ice_ptypes_macvlan_il; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { + ice_and_bitmap(params->ptypes, params->ptypes, + (const ice_bitmap_t *)ice_ptypes_arp_of, + ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) { + src = (const ice_bitmap_t *)ice_ptypes_pppoe; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { + src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos : + (const ice_bitmap_t *)ice_ptypes_ipv4_il; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + if (hdrs & ICE_FLOW_SEG_HDR_UDP) { + src = (const ice_bitmap_t *)ice_ptypes_udp_il; + ice_and_bitmap(params->ptypes, + params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { + ice_and_bitmap(params->ptypes, params->ptypes, + (const ice_bitmap_t *) + ice_ptypes_tcp_il, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { + src = (const ice_bitmap_t *)ice_ptypes_sctp_il; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } + } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { + src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos : + (const ice_bitmap_t *)ice_ptypes_ipv6_il; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + if (hdrs & ICE_FLOW_SEG_HDR_UDP) { + src = (const ice_bitmap_t *)ice_ptypes_udp_il; + ice_and_bitmap(params->ptypes, + params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { + ice_and_bitmap(params->ptypes, params->ptypes, + (const ice_bitmap_t *) + ice_ptypes_tcp_il, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { + src = (const ice_bitmap_t *)ice_ptypes_sctp_il; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } + } + + if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { + src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of : + (const ice_bitmap_t *)ice_ptypes_icmp_il; + ice_and_bitmap(params->ptypes, params->ptypes, src, + ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { + if (!i) { + src = (const ice_bitmap_t *)ice_ptypes_gre_of; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) { + src = (const ice_bitmap_t *)ice_ptypes_gtpc; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) { + src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) { + src = (const ice_bitmap_t *)ice_ptypes_gtpu; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with downlink */ + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) { + src = (const ice_bitmap_t *)ice_ptypes_gtpu; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with uplink */ + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) { + src = (const ice_bitmap_t *)ice_ptypes_gtpu; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + + /* Attributes for GTP packet with Extension Header */ + params->attr = ice_attr_gtpu_eh; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh); + } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { + src = (const ice_bitmap_t *)ice_ptypes_gtpu; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) { + src = (const ice_bitmap_t *)ice_ptypes_l2tpv3; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) { + src = (const ice_bitmap_t *)ice_ptypes_esp; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_AH) { + src = (const ice_bitmap_t *)ice_ptypes_ah; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) { + src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp; + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } + + if (hdrs & ICE_FLOW_SEG_HDR_PFCP) { + if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE) + src = + (const ice_bitmap_t *)ice_ptypes_pfcp_node; + else + src = + (const ice_bitmap_t *)ice_ptypes_pfcp_session; + + ice_and_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } else { + src = (const ice_bitmap_t *)ice_ptypes_pfcp_node; + ice_andnot_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + + src = (const ice_bitmap_t *)ice_ptypes_pfcp_session; + ice_andnot_bitmap(params->ptypes, params->ptypes, + src, ICE_FLOW_PTYPE_MAX); + } + } + + return ICE_SUCCESS; +} + +/** + * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata. + * + * This function will allocate an extraction sequence entries for a DWORD size + * chunk of the packet flags. + */ +static enum ice_status +ice_flow_xtract_pkt_flags(struct ice_hw *hw, + struct ice_flow_prof_params *params, + enum ice_flex_mdid_pkt_flags flags) +{ + u8 fv_words = hw->blk[params->blk].es.fvw; + u8 idx; + + /* Make sure the number of extraction sequence entries required does not + * exceed the block's capacity. + */ + if (params->es_cnt >= fv_words) + return ICE_ERR_MAX_LIMIT; + + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = ICE_PROT_META_ID; + params->es[idx].off = flags; + params->es_cnt++; + + return ICE_SUCCESS; +} + +/** + * ice_flow_xtract_fld - Create an extraction sequence entry for the given field + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + * @seg: packet segment index of the field to be extracted + * @fld: ID of field to be extracted + * @match: bitfield of all fields + * + * This function determines the protocol ID, offset, and size of the given + * field. It then allocates one or more extraction sequence entries for the + * given field, and fill the entries with protocol ID and offset information. + */ +static enum ice_status +ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, + u8 seg, enum ice_flow_field fld, u64 match) +{ + enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; + enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; + u8 fv_words = hw->blk[params->blk].es.fvw; + struct ice_flow_fld_info *flds; + u16 cnt, ese_bits, i; + u16 sib_mask = 0; + s16 adj = 0; + u16 mask; + u16 off; + + flds = params->prof->segs[seg].fields; + + switch (fld) { + case ICE_FLOW_FIELD_IDX_ETH_DA: + case ICE_FLOW_FIELD_IDX_ETH_SA: + case ICE_FLOW_FIELD_IDX_S_VLAN: + case ICE_FLOW_FIELD_IDX_C_VLAN: + prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; + break; + case ICE_FLOW_FIELD_IDX_ETH_TYPE: + prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV6_DSCP: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV4_TTL: + case ICE_FLOW_FIELD_IDX_IPV4_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; + case ICE_FLOW_FIELD_IDX_IPV6_TTL: + case ICE_FLOW_FIELD_IDX_IPV6_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. + */ + if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) + sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; + else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) + sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; + + /* If the sibling field is also included, that field's + * mask needs to be included. + */ + if (match & BIT(sib)) + sib_mask = ice_flds_info[sib].mask; + break; + case ICE_FLOW_FIELD_IDX_IPV4_SA: + case ICE_FLOW_FIELD_IDX_IPV4_DA: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; + break; + case ICE_FLOW_FIELD_IDX_IPV6_SA: + case ICE_FLOW_FIELD_IDX_IPV6_DA: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; + break; + case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: + case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: + case ICE_FLOW_FIELD_IDX_TCP_FLAGS: + prot_id = ICE_PROT_TCP_IL; + break; + case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: + case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: + case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: + prot_id = ICE_PROT_SCTP_IL; + break; + case ICE_FLOW_FIELD_IDX_GTPC_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID: + case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI: + /* GTP is accessed through UDP OF protocol */ + prot_id = ICE_PROT_UDP_OF; + break; + case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID: + prot_id = ICE_PROT_PPPOE; + break; + case ICE_FLOW_FIELD_IDX_PFCP_SEID: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID: + prot_id = ICE_PROT_L2TPV3; + break; + case ICE_FLOW_FIELD_IDX_ESP_SPI: + prot_id = ICE_PROT_ESP_F; + break; + case ICE_FLOW_FIELD_IDX_AH_SPI: + prot_id = ICE_PROT_ESP_2; + break; + case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI: + prot_id = ICE_PROT_UDP_IL_OR_S; + break; + case ICE_FLOW_FIELD_IDX_ARP_SIP: + case ICE_FLOW_FIELD_IDX_ARP_DIP: + case ICE_FLOW_FIELD_IDX_ARP_SHA: + case ICE_FLOW_FIELD_IDX_ARP_DHA: + case ICE_FLOW_FIELD_IDX_ARP_OP: + prot_id = ICE_PROT_ARP_OF; + break; + case ICE_FLOW_FIELD_IDX_ICMP_TYPE: + case ICE_FLOW_FIELD_IDX_ICMP_CODE: + /* ICMP type and code share the same extraction seq. entry */ + prot_id = (params->prof->segs[seg].hdrs & + ICE_FLOW_SEG_HDR_IPV4) ? + ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; + sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? + ICE_FLOW_FIELD_IDX_ICMP_CODE : + ICE_FLOW_FIELD_IDX_ICMP_TYPE; + break; + case ICE_FLOW_FIELD_IDX_GRE_KEYID: + prot_id = ICE_PROT_GRE_OF; + break; + default: + return ICE_ERR_NOT_IMPL; + } + + /* Each extraction sequence entry is a word in size, and extracts a + * word-aligned offset from a protocol header. + */ + ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; + + flds[fld].xtrct.prot_id = prot_id; + flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * + ICE_FLOW_FV_EXTRACT_SZ; + flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits); + flds[fld].xtrct.idx = params->es_cnt; + flds[fld].xtrct.mask = ice_flds_info[fld].mask; + + /* Adjust the next field-entry index after accommodating the number of + * entries this field consumes + */ + cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp + + ice_flds_info[fld].size, ese_bits); + + /* Fill in the extraction sequence entries needed for this field */ + off = flds[fld].xtrct.off; + mask = flds[fld].xtrct.mask; + for (i = 0; i < cnt; i++) { + /* Only consume an extraction sequence entry if there is no + * sibling field associated with this field or the sibling entry + * already extracts the word shared with this field. + */ + if (sib == ICE_FLOW_FIELD_IDX_MAX || + flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || + flds[sib].xtrct.off != off) { + u8 idx; + + /* Make sure the number of extraction sequence required + * does not exceed the block's capability + */ + if (params->es_cnt >= fv_words) + return ICE_ERR_MAX_LIMIT; + + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = prot_id; + params->es[idx].off = off; + params->mask[idx] = mask | sib_mask; + params->es_cnt++; + } + + off += ICE_FLOW_FV_EXTRACT_SZ; + } + + return ICE_SUCCESS; +} + +/** + * ice_flow_xtract_raws - Create extract sequence entries for raw bytes + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + * @seg: index of packet segment whose raw fields are to be be extracted + */ +static enum ice_status +ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, + u8 seg) +{ + u16 fv_words; + u16 hdrs_sz; + u8 i; + + if (!params->prof->segs[seg].raws_cnt) + return ICE_SUCCESS; + + if (params->prof->segs[seg].raws_cnt > + ARRAY_SIZE(params->prof->segs[seg].raws)) + return ICE_ERR_MAX_LIMIT; + + /* Offsets within the segment headers are not supported */ + hdrs_sz = ice_flow_calc_seg_sz(params, seg); + if (!hdrs_sz) + return ICE_ERR_PARAM; + + fv_words = hw->blk[params->blk].es.fvw; + + for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { + struct ice_flow_seg_fld_raw *raw; + u16 off, cnt, j; + + raw = ¶ms->prof->segs[seg].raws[i]; + + /* Storing extraction information */ + raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; + raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * + ICE_FLOW_FV_EXTRACT_SZ; + raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * + BITS_PER_BYTE; + raw->info.xtrct.idx = params->es_cnt; + + /* Determine the number of field vector entries this raw field + * consumes. + */ + cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp + + (raw->info.src.last * BITS_PER_BYTE), + (ICE_FLOW_FV_EXTRACT_SZ * + BITS_PER_BYTE)); + off = raw->info.xtrct.off; + for (j = 0; j < cnt; j++) { + u16 idx; + + /* Make sure the number of extraction sequence required + * does not exceed the block's capability + */ + if (params->es_cnt >= hw->blk[params->blk].es.count || + params->es_cnt >= ICE_MAX_FV_WORDS) + return ICE_ERR_MAX_LIMIT; + + /* some blocks require a reversed field vector layout */ + if (hw->blk[params->blk].es.reverse) + idx = fv_words - params->es_cnt - 1; + else + idx = params->es_cnt; + + params->es[idx].prot_id = raw->info.xtrct.prot_id; + params->es[idx].off = off; + params->es_cnt++; + off += ICE_FLOW_FV_EXTRACT_SZ; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + * + * This function iterates through all matched fields in the given segments, and + * creates an extraction sequence for the fields. + */ +static enum ice_status +ice_flow_create_xtrct_seq(struct ice_hw *hw, + struct ice_flow_prof_params *params) +{ + enum ice_status status = ICE_SUCCESS; + u8 i; + + /* For ACL, we also need to extract the direction bit (Rx,Tx) data from + * packet flags + */ + if (params->blk == ICE_BLK_ACL) { + status = ice_flow_xtract_pkt_flags(hw, params, + ICE_RX_MDID_PKT_FLAGS_15_0); + if (status) + return status; + } + + for (i = 0; i < params->prof->segs_cnt; i++) { + u64 match = params->prof->segs[i].match; + enum ice_flow_field j; + + for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) { + const u64 bit = BIT_ULL(j); + + if (match & bit) { + status = ice_flow_xtract_fld(hw, params, i, j, + match); + if (status) + return status; + match &= ~bit; + } + } + + /* Process raw matching bytes */ + status = ice_flow_xtract_raws(hw, params, i); + if (status) + return status; + } + + return status; +} + +/** + * ice_flow_sel_acl_scen - returns the specific scenario + * @hw: pointer to the hardware structure + * @params: information about the flow to be processed + * + * This function will return the specific scenario based on the + * params passed to it + */ +static enum ice_status +ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params) +{ + /* Find the best-fit scenario for the provided match width */ + struct ice_acl_scen *cand_scen = NULL, *scen; + + if (!hw->acl_tbl) + return ICE_ERR_DOES_NOT_EXIST; + + /* Loop through each scenario and match against the scenario width + * to select the specific scenario + */ + LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry) + if (scen->eff_width >= params->entry_length && + (!cand_scen || cand_scen->eff_width > scen->eff_width)) + cand_scen = scen; + if (!cand_scen) + return ICE_ERR_DOES_NOT_EXIST; + + params->prof->cfg.scen = cand_scen; + + return ICE_SUCCESS; +} + +/** + * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries + * @params: information about the flow to be processed + */ +static enum ice_status +ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params) +{ + u16 index, i, range_idx = 0; + + index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; + + for (i = 0; i < params->prof->segs_cnt; i++) { + struct ice_flow_seg_info *seg = ¶ms->prof->segs[i]; + u64 match = seg->match; + u8 j; + + for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) { + struct ice_flow_fld_info *fld; + const u64 bit = BIT_ULL(j); + + if (!(match & bit)) + continue; + + fld = &seg->fields[j]; + fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL; + + if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) { + fld->entry.last = ICE_FLOW_FLD_OFF_INVAL; + + /* Range checking only supported for single + * words + */ + if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size + + fld->xtrct.disp, + BITS_PER_BYTE * 2) > 1) + return ICE_ERR_PARAM; + + /* Ranges must define low and high values */ + if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL || + fld->src.last == ICE_FLOW_FLD_OFF_INVAL) + return ICE_ERR_PARAM; + + fld->entry.val = range_idx++; + } else { + /* Store adjusted byte-length of field for later + * use, taking into account potential + * non-byte-aligned displacement + */ + fld->entry.last = DIVIDE_AND_ROUND_UP + (ice_flds_info[j].size + + (fld->xtrct.disp % BITS_PER_BYTE), + BITS_PER_BYTE); + fld->entry.val = index; + index += fld->entry.last; + } + + match &= ~bit; + } + + for (j = 0; j < seg->raws_cnt; j++) { + struct ice_flow_seg_fld_raw *raw = &seg->raws[j]; + + raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL; + raw->info.entry.val = index; + raw->info.entry.last = raw->info.src.last; + index += raw->info.entry.last; + } + } + + /* Currently only support using the byte selection base, which only + * allows for an effective entry size of 30 bytes. Reject anything + * larger. + */ + if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS) + return ICE_ERR_PARAM; + + /* Only 8 range checkers per profile, reject anything trying to use + * more + */ + if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG) + return ICE_ERR_PARAM; + + /* Store # bytes required for entry for later use */ + params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; + + return ICE_SUCCESS; +} + +/** + * ice_flow_proc_segs - process all packet segments associated with a profile + * @hw: pointer to the HW struct + * @params: information about the flow to be processed + */ +static enum ice_status +ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) +{ + enum ice_status status; + + status = ice_flow_proc_seg_hdrs(params); + if (status) + return status; + + status = ice_flow_create_xtrct_seq(hw, params); + if (status) + return status; + + switch (params->blk) { + case ICE_BLK_FD: + case ICE_BLK_RSS: + status = ICE_SUCCESS; + break; + case ICE_BLK_ACL: + status = ice_flow_acl_def_entry_frmt(params); + if (status) + return status; + status = ice_flow_sel_acl_scen(hw, params); + if (status) + return status; + break; + case ICE_BLK_SW: + default: + return ICE_ERR_NOT_IMPL; + } + + return status; +} + +#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 +#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 +#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 + +/** + * ice_flow_find_prof_conds - Find a profile matching headers and conditions + * @hw: pointer to the HW struct + * @blk: classification stage + * @dir: flow direction + * @segs: array of one or more packet segments that describe the flow + * @segs_cnt: number of packet segments provided + * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) + * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) + */ +static struct ice_flow_prof * +ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, + enum ice_flow_dir dir, struct ice_flow_seg_info *segs, + u8 segs_cnt, u16 vsi_handle, u32 conds) +{ + struct ice_flow_prof *p, *prof = NULL; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { + if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && + segs_cnt && segs_cnt == p->segs_cnt) { + u8 i; + + /* Check for profile-VSI association if specified */ + if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && + ice_is_vsi_valid(hw, vsi_handle) && + !ice_is_bit_set(p->vsis, vsi_handle)) + continue; + + /* Protocol headers must be checked. Matched fields are + * checked if specified. + */ + for (i = 0; i < segs_cnt; i++) + if (segs[i].hdrs != p->segs[i].hdrs || + ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && + segs[i].match != p->segs[i].match)) + break; + + /* A match is found if all segments are matched */ + if (i == segs_cnt) { + prof = p; + break; + } + } + } + ice_release_lock(&hw->fl_profs_locks[blk]); + + return prof; +} + +/** + * ice_flow_find_prof - Look up a profile matching headers and matched fields + * @hw: pointer to the HW struct + * @blk: classification stage + * @dir: flow direction + * @segs: array of one or more packet segments that describe the flow + * @segs_cnt: number of packet segments provided + */ +u64 +ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, + struct ice_flow_seg_info *segs, u8 segs_cnt) +{ + struct ice_flow_prof *p; + + p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt, + ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS); + + return p ? p->id : ICE_FLOW_PROF_ID_INVAL; +} + +/** + * ice_flow_find_prof_id - Look up a profile with given profile ID + * @hw: pointer to the HW struct + * @blk: classification stage + * @prof_id: unique ID to identify this flow profile + */ +static struct ice_flow_prof * +ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) +{ + struct ice_flow_prof *p; + + LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { + if (p->id == prof_id) + return p; + } + + return NULL; +} + +/** + * ice_dealloc_flow_entry - Deallocate flow entry memory + * @hw: pointer to the HW struct + * @entry: flow entry to be removed + */ +static void +ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) +{ + if (!entry) + return; + + if (entry->entry) + ice_free(hw, entry->entry); + + if (entry->range_buf) { + ice_free(hw, entry->range_buf); + entry->range_buf = NULL; + } + + if (entry->acts) { + ice_free(hw, entry->acts); + entry->acts = NULL; + entry->acts_cnt = 0; + } + + ice_free(hw, entry); +} + +#define ICE_ACL_INVALID_SCEN 0x3f + +/** + * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any pf + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * @buf: destination buffer function writes partial xtrct sequence to + * + * returns ICE_SUCCESS if no pf is associated to the given profile + * returns ICE_ERR_IN_USE if at least one pf is associated to the given profile + * returns other error code for real error + */ +static enum ice_status +ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof, + struct ice_aqc_acl_prof_generic_frmt *buf) +{ + enum ice_status status; + u8 prof_id = 0; + + status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); + if (status) + return status; + + status = ice_query_acl_prof(hw, prof_id, buf, NULL); + if (status) + return status; + + /* If all pf's associated scenarios are all 0 or all + * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has + * not been configured yet. + */ + if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 && + buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 && + buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 && + buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0) + return ICE_SUCCESS; + + if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN && + buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN) + return ICE_SUCCESS; + else + return ICE_ERR_IN_USE; +} + +/** + * ice_flow_acl_free_act_cntr - Free the acl rule's actions + * @hw: pointer to the hardware structure + * @acts: array of actions to be performed on a match + * @acts_cnt: number of actions + */ +static enum ice_status +ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts, + u8 acts_cnt) +{ + int i; + + for (i = 0; i < acts_cnt; i++) { + if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT || + acts[i].type == ICE_FLOW_ACT_CNTR_BYTES || + acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) { + struct ice_acl_cntrs cntrs; + enum ice_status status; + + cntrs.bank = 0; /* Only bank0 for the moment */ + cntrs.first_cntr = + LE16_TO_CPU(acts[i].data.acl_act.value); + cntrs.last_cntr = + LE16_TO_CPU(acts[i].data.acl_act.value); + + if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) + cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL; + else + cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE; + + status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL); + if (status) + return status; + } + } + return ICE_SUCCESS; +} + +/** + * ice_flow_acl_disassoc_scen - Disassociate the scenario to the Profile + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * + * Disassociate the scenario to the Profile for the PF of the VSI. + */ +static enum ice_status +ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof) +{ + struct ice_aqc_acl_prof_generic_frmt buf; + enum ice_status status = ICE_SUCCESS; + u8 prof_id = 0; + + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + + status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); + if (status) + return status; + + status = ice_query_acl_prof(hw, prof_id, &buf, NULL); + if (status) + return status; + + /* Clear scenario for this pf */ + buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN; + status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL); + + return status; +} + +/** + * ice_flow_rem_entry_sync - Remove a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @entry: flow entry to be removed + */ +static enum ice_status +ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk, + struct ice_flow_entry *entry) +{ + if (!entry) + return ICE_ERR_BAD_PTR; + + if (blk == ICE_BLK_ACL) { + enum ice_status status; + + if (!entry->prof) + return ICE_ERR_BAD_PTR; + + status = ice_acl_rem_entry(hw, entry->prof->cfg.scen, + entry->scen_entry_idx); + if (status) + return status; + + /* Checks if we need to release an ACL counter. */ + if (entry->acts_cnt && entry->acts) + ice_flow_acl_free_act_cntr(hw, entry->acts, + entry->acts_cnt); + } + + LIST_DEL(&entry->l_entry); + + ice_dealloc_flow_entry(hw, entry); + + return ICE_SUCCESS; +} + +/** + * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields + * @hw: pointer to the HW struct + * @blk: classification stage + * @dir: flow direction + * @prof_id: unique ID to identify this flow profile + * @segs: array of one or more packet segments that describe the flow + * @segs_cnt: number of packet segments provided + * @acts: array of default actions + * @acts_cnt: number of default actions + * @prof: stores the returned flow profile added + * + * Assumption: the caller has acquired the lock to the profile list + */ +static enum ice_status +ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, + enum ice_flow_dir dir, u64 prof_id, + struct ice_flow_seg_info *segs, u8 segs_cnt, + struct ice_flow_action *acts, u8 acts_cnt, + struct ice_flow_prof **prof) +{ + struct ice_flow_prof_params params; + enum ice_status status; + u8 i; + + if (!prof || (acts_cnt && !acts)) + return ICE_ERR_BAD_PTR; + + ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM); + params.prof = (struct ice_flow_prof *) + ice_malloc(hw, sizeof(*params.prof)); + if (!params.prof) + return ICE_ERR_NO_MEMORY; + + /* initialize extraction sequence to all invalid (0xff) */ + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + params.es[i].prot_id = ICE_PROT_INVALID; + params.es[i].off = ICE_FV_OFFSET_INVAL; + } + + params.blk = blk; + params.prof->id = prof_id; + params.prof->dir = dir; + params.prof->segs_cnt = segs_cnt; + + /* Make a copy of the segments that need to be persistent in the flow + * profile instance + */ + for (i = 0; i < segs_cnt; i++) + ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs), + ICE_NONDMA_TO_NONDMA); + + /* Make a copy of the actions that need to be persistent in the flow + * profile instance. + */ + if (acts_cnt) { + params.prof->acts = (struct ice_flow_action *) + ice_memdup(hw, acts, acts_cnt * sizeof(*acts), + ICE_NONDMA_TO_NONDMA); + + if (!params.prof->acts) { + status = ICE_ERR_NO_MEMORY; + goto out; + } + } + + status = ice_flow_proc_segs(hw, ¶ms); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, + "Error processing a flow's packet segments\n"); + goto out; + } + + /* Add a HW profile for this flow profile */ + status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, + params.attr, params.attr_cnt, params.es, + params.mask); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); + goto out; + } + + INIT_LIST_HEAD(¶ms.prof->entries); + ice_init_lock(¶ms.prof->entries_lock); + *prof = params.prof; + +out: + if (status) { + if (params.prof->acts) + ice_free(hw, params.prof->acts); + ice_free(hw, params.prof); + } + + return status; +} + +/** + * ice_flow_rem_prof_sync - remove a flow profile + * @hw: pointer to the hardware structure + * @blk: classification stage + * @prof: pointer to flow profile to remove + * + * Assumption: the caller has acquired the lock to the profile list + */ +static enum ice_status +ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, + struct ice_flow_prof *prof) +{ + enum ice_status status; + + /* Remove all remaining flow entries before removing the flow profile */ + if (!LIST_EMPTY(&prof->entries)) { + struct ice_flow_entry *e, *t; + + ice_acquire_lock(&prof->entries_lock); + + LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry, + l_entry) { + status = ice_flow_rem_entry_sync(hw, blk, e); + if (status) + break; + } + + ice_release_lock(&prof->entries_lock); + } + + if (blk == ICE_BLK_ACL) { + struct ice_aqc_acl_profile_ranges query_rng_buf; + struct ice_aqc_acl_prof_generic_frmt buf; + u8 prof_id = 0; + + /* Deassociate the scenario to the Profile for the PF */ + status = ice_flow_acl_disassoc_scen(hw, prof); + if (status) + return status; + + /* Clear the range-checker if the profile ID is no longer + * used by any PF + */ + status = ice_flow_acl_is_prof_in_use(hw, prof, &buf); + if (status && status != ICE_ERR_IN_USE) { + return status; + } else if (!status) { + /* Clear the range-checker value for profile ID */ + ice_memset(&query_rng_buf, 0, + sizeof(struct ice_aqc_acl_profile_ranges), + ICE_NONDMA_MEM); + + status = ice_flow_get_hw_prof(hw, blk, prof->id, + &prof_id); + if (status) + return status; + + status = ice_prog_acl_prof_ranges(hw, prof_id, + &query_rng_buf, NULL); + if (status) + return status; + } + } + + /* Remove all hardware profiles associated with this flow profile */ + status = ice_rem_prof(hw, blk, prof->id); + if (!status) { + LIST_DEL(&prof->l_entry); + ice_destroy_lock(&prof->entries_lock); + if (prof->acts) + ice_free(hw, prof->acts); + ice_free(hw, prof); + } + + return status; +} + +/** + * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field + * @buf: Destination buffer function writes partial xtrct sequence to + * @info: Info about field + */ +static void +ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf, + struct ice_flow_fld_info *info) +{ + u16 dst, i; + u8 src; + + src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ + + info->xtrct.disp / BITS_PER_BYTE; + dst = info->entry.val; + for (i = 0; i < info->entry.last; i++) + /* HW stores field vector words in LE, convert words back to BE + * so constructed entries will end up in network order + */ + buf->byte_selection[dst++] = src++ ^ 1; +} + +/** + * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + */ +static enum ice_status +ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof) +{ + struct ice_aqc_acl_prof_generic_frmt buf; + struct ice_flow_fld_info *info; + enum ice_status status; + u8 prof_id = 0; + u16 i; + + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + + status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); + if (status) + return status; + + status = ice_flow_acl_is_prof_in_use(hw, prof, &buf); + if (status && status != ICE_ERR_IN_USE) + return status; + + if (!status) { + /* Program the profile dependent configuration. This is done + * only once regardless of the number of PFs using that profile + */ + ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM); + + for (i = 0; i < prof->segs_cnt; i++) { + struct ice_flow_seg_info *seg = &prof->segs[i]; + u64 match = seg->match; + u16 j; + + for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) { + const u64 bit = BIT_ULL(j); + + if (!(match & bit)) + continue; + + info = &seg->fields[j]; + + if (info->type == ICE_FLOW_FLD_TYPE_RANGE) + buf.word_selection[info->entry.val] = + info->xtrct.idx; + else + ice_flow_acl_set_xtrct_seq_fld(&buf, + info); + + match &= ~bit; + } + + for (j = 0; j < seg->raws_cnt; j++) { + info = &seg->raws[j].info; + ice_flow_acl_set_xtrct_seq_fld(&buf, info); + } + } + + ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN, + ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS, + ICE_NONDMA_MEM); + } + + /* Update the current PF */ + buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id; + status = ice_prgm_acl_prof_extrt(hw, prof_id, &buf, NULL); + + return status; +} + +/** + * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG + * @hw: pointer to the hardware structure + * @blk: classification stage + * @vsi_handle: software VSI handle + * @vsig: target VSI group + * + * Assumption: the caller has already verified that the VSI to + * be added has the same characteristics as the VSIG and will + * thereby have access to all resources added to that VSIG. + */ +enum ice_status +ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, + u16 vsig) +{ + enum ice_status status; + + if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), + vsig); + ice_release_lock(&hw->fl_profs_locks[blk]); + + return status; +} + +/** + * ice_flow_assoc_prof - associate a VSI with a flow profile + * @hw: pointer to the hardware structure + * @blk: classification stage + * @prof: pointer to flow profile + * @vsi_handle: software VSI handle + * + * Assumption: the caller has acquired the lock to the profile list + * and the software VSI handle has been validated + */ +static enum ice_status +ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, + struct ice_flow_prof *prof, u16 vsi_handle) +{ + enum ice_status status = ICE_SUCCESS; + + if (!ice_is_bit_set(prof->vsis, vsi_handle)) { + if (blk == ICE_BLK_ACL) { + status = ice_flow_acl_set_xtrct_seq(hw, prof); + if (status) + return status; + } + status = ice_add_prof_id_flow(hw, blk, + ice_get_hw_vsi_num(hw, + vsi_handle), + prof->id); + if (!status) + ice_set_bit(vsi_handle, prof->vsis); + else + ice_debug(hw, ICE_DBG_FLOW, + "HW profile add failed, %d\n", + status); + } + + return status; +} + +/** + * ice_flow_disassoc_prof - disassociate a VSI from a flow profile + * @hw: pointer to the hardware structure + * @blk: classification stage + * @prof: pointer to flow profile + * @vsi_handle: software VSI handle + * + * Assumption: the caller has acquired the lock to the profile list + * and the software VSI handle has been validated + */ +static enum ice_status +ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, + struct ice_flow_prof *prof, u16 vsi_handle) +{ + enum ice_status status = ICE_SUCCESS; + + if (ice_is_bit_set(prof->vsis, vsi_handle)) { + status = ice_rem_prof_id_flow(hw, blk, + ice_get_hw_vsi_num(hw, + vsi_handle), + prof->id); + if (!status) + ice_clear_bit(vsi_handle, prof->vsis); + else + ice_debug(hw, ICE_DBG_FLOW, + "HW profile remove failed, %d\n", + status); + } + + return status; +} + +/** + * ice_flow_add_prof - Add a flow profile for packet segments and matched fields + * @hw: pointer to the HW struct + * @blk: classification stage + * @dir: flow direction + * @prof_id: unique ID to identify this flow profile + * @segs: array of one or more packet segments that describe the flow + * @segs_cnt: number of packet segments provided + * @acts: array of default actions + * @acts_cnt: number of default actions + * @prof: stores the returned flow profile added + */ +enum ice_status +ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, + u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, + struct ice_flow_action *acts, u8 acts_cnt, + struct ice_flow_prof **prof) +{ + enum ice_status status; + + if (segs_cnt > ICE_FLOW_SEG_MAX) + return ICE_ERR_MAX_LIMIT; + + if (!segs_cnt) + return ICE_ERR_PARAM; + + if (!segs) + return ICE_ERR_BAD_PTR; + + status = ice_flow_val_hdrs(segs, segs_cnt); + if (status) + return status; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + + status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, + acts, acts_cnt, prof); + if (!status) + LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]); + + ice_release_lock(&hw->fl_profs_locks[blk]); + + return status; +} + +/** + * ice_flow_rem_prof - Remove a flow profile and all entries associated with it + * @hw: pointer to the HW struct + * @blk: the block for which the flow profile is to be removed + * @prof_id: unique ID of the flow profile to be removed + */ +enum ice_status +ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) +{ + struct ice_flow_prof *prof; + enum ice_status status; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + + prof = ice_flow_find_prof_id(hw, blk, prof_id); + if (!prof) { + status = ICE_ERR_DOES_NOT_EXIST; + goto out; + } + + /* prof becomes invalid after the call */ + status = ice_flow_rem_prof_sync(hw, blk, prof); + +out: + ice_release_lock(&hw->fl_profs_locks[blk]); + + return status; +} + +/** + * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle + * @hw: pointer to the HW struct + * @blk: classification stage + * @prof_id: the profile ID handle + * @hw_prof_id: pointer to variable to receive the HW profile ID + */ +enum ice_status +ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u8 *hw_prof_id) +{ + struct ice_prof_map *map; + + map = ice_search_prof_id(hw, blk, prof_id); + if (map) { + *hw_prof_id = map->prof_id; + return ICE_SUCCESS; + } + + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_flow_find_entry - look for a flow entry using its unique ID + * @hw: pointer to the HW struct + * @blk: classification stage + * @entry_id: unique ID to identify this flow entry + * + * This function looks for the flow entry with the specified unique ID in all + * flow profiles of the specified classification stage. If the entry is found, + * and it returns the handle to the flow entry. Otherwise, it returns + * ICE_FLOW_ENTRY_ID_INVAL. + */ +u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id) +{ + struct ice_flow_entry *found = NULL; + struct ice_flow_prof *p; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + + LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { + struct ice_flow_entry *e; + + ice_acquire_lock(&p->entries_lock); + LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry) + if (e->id == entry_id) { + found = e; + break; + } + ice_release_lock(&p->entries_lock); + + if (found) + break; + } + + ice_release_lock(&hw->fl_profs_locks[blk]); + + return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL; +} + +/** + * ice_flow_acl_check_actions - Checks the acl rule's actions + * @hw: pointer to the hardware structure + * @acts: array of actions to be performed on a match + * @acts_cnt: number of actions + * @cnt_alloc: indicates if a ACL counter has been allocated. + */ +static enum ice_status +ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts, + u8 acts_cnt, bool *cnt_alloc) +{ + ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2); + int i; + + ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2); + *cnt_alloc = false; + + if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT) + return ICE_ERR_OUT_OF_RANGE; + + for (i = 0; i < acts_cnt; i++) { + if (acts[i].type != ICE_FLOW_ACT_NOP && + acts[i].type != ICE_FLOW_ACT_DROP && + acts[i].type != ICE_FLOW_ACT_CNTR_PKT && + acts[i].type != ICE_FLOW_ACT_FWD_QUEUE) + return ICE_ERR_CFG; + + /* If the caller want to add two actions of the same type, then + * it is considered invalid configuration. + */ + if (ice_test_and_set_bit(acts[i].type, dup_check)) + return ICE_ERR_PARAM; + } + + /* Checks if ACL counters are needed. */ + for (i = 0; i < acts_cnt; i++) { + if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT || + acts[i].type == ICE_FLOW_ACT_CNTR_BYTES || + acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) { + struct ice_acl_cntrs cntrs; + enum ice_status status; + + cntrs.amount = 1; + cntrs.bank = 0; /* Only bank0 for the moment */ + + if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) + cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL; + else + cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE; + + status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL); + if (status) + return status; + /* Counter index within the bank */ + acts[i].data.acl_act.value = + CPU_TO_LE16(cntrs.first_cntr); + *cnt_alloc = true; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_flow_acl_frmt_entry_range - Format an acl range checker for a given field + * @fld: number of the given field + * @info: info about field + * @range_buf: range checker configuration buffer + * @data: pointer to a data buffer containing flow entry's match values/masks + * @range: Input/output param indicating which range checkers are being used + */ +static void +ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info, + struct ice_aqc_acl_profile_ranges *range_buf, + u8 *data, u8 *range) +{ + u16 new_mask; + + /* If not specified, default mask is all bits in field */ + new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ? + BIT(ice_flds_info[fld].size) - 1 : + (*(u16 *)(data + info->src.mask))) << info->xtrct.disp; + + /* If the mask is 0, then we don't need to worry about this input + * range checker value. + */ + if (new_mask) { + u16 new_high = + (*(u16 *)(data + info->src.last)) << info->xtrct.disp; + u16 new_low = + (*(u16 *)(data + info->src.val)) << info->xtrct.disp; + u8 range_idx = info->entry.val; + + range_buf->checker_cfg[range_idx].low_boundary = + CPU_TO_BE16(new_low); + range_buf->checker_cfg[range_idx].high_boundary = + CPU_TO_BE16(new_high); + range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask); + + /* Indicate which range checker is being used */ + *range |= BIT(range_idx); + } +} + +/** + * ice_flow_acl_frmt_entry_fld - Partially format acl entry for a given field + * @fld: number of the given field + * @info: info about the field + * @buf: buffer containing the entry + * @dontcare: buffer containing don't care mask for entry + * @data: pointer to a data buffer containing flow entry's match values/masks + */ +static void +ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf, + u8 *dontcare, u8 *data) +{ + u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0; + bool use_mask = false; + u8 disp; + + src = info->src.val; + mask = info->src.mask; + dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; + disp = info->xtrct.disp % BITS_PER_BYTE; + + if (mask != ICE_FLOW_FLD_OFF_INVAL) + use_mask = true; + + for (k = 0; k < info->entry.last; k++, dst++) { + /* Add overflow bits from previous byte */ + buf[dst] = (tmp_s & 0xff00) >> 8; + + /* If mask is not valid, tmp_m is always zero, so just setting + * dontcare to 0 (no masked bits). If mask is valid, pulls in + * overflow bits of mask from prev byte + */ + dontcare[dst] = (tmp_m & 0xff00) >> 8; + + /* If there is displacement, last byte will only contain + * displaced data, but there is no more data to read from user + * buffer, so skip so as not to potentially read beyond end of + * user buffer + */ + if (!disp || k < info->entry.last - 1) { + /* Store shifted data to use in next byte */ + tmp_s = data[src++] << disp; + + /* Add current (shifted) byte */ + buf[dst] |= tmp_s & 0xff; + + /* Handle mask if valid */ + if (use_mask) { + tmp_m = (~data[mask++] & 0xff) << disp; + dontcare[dst] |= tmp_m & 0xff; + } + } + } + + /* Fill in don't care bits at beginning of field */ + if (disp) { + dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; + for (k = 0; k < disp; k++) + dontcare[dst] |= BIT(k); + } + + end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE; + + /* Fill in don't care bits at end of field */ + if (end_disp) { + dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX + + info->entry.last - 1; + for (k = end_disp; k < BITS_PER_BYTE; k++) + dontcare[dst] |= BIT(k); + } +} + +/** + * ice_flow_acl_frmt_entry - Format acl entry + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * @e: pointer to the flow entry + * @data: pointer to a data buffer containing flow entry's match values/masks + * @acts: array of actions to be performed on a match + * @acts_cnt: number of actions + * + * Formats the key (and key_inverse) to be matched from the data passed in, + * along with data from the flow profile. This key/key_inverse pair makes up + * the 'entry' for an acl flow entry. + */ +static enum ice_status +ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof, + struct ice_flow_entry *e, u8 *data, + struct ice_flow_action *acts, u8 acts_cnt) +{ + u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk; + struct ice_aqc_acl_profile_ranges *range_buf = NULL; + enum ice_status status; + bool cnt_alloc; + u8 prof_id = 0; + u16 i, buf_sz; + + status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id); + if (status) + return status; + + /* Format the result action */ + + status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc); + if (status) + return status; + + status = ICE_ERR_NO_MEMORY; + + e->acts = (struct ice_flow_action *) + ice_memdup(hw, acts, acts_cnt * sizeof(*acts), + ICE_NONDMA_TO_NONDMA); + + if (!e->acts) + goto out; + + e->acts_cnt = acts_cnt; + + /* Format the matching data */ + buf_sz = prof->cfg.scen->width; + buf = (u8 *)ice_malloc(hw, buf_sz); + if (!buf) + goto out; + + dontcare = (u8 *)ice_malloc(hw, buf_sz); + if (!dontcare) + goto out; + + /* 'key' buffer will store both key and key_inverse, so must be twice + * size of buf + */ + key = (u8 *)ice_malloc(hw, buf_sz * 2); + if (!key) + goto out; + + range_buf = (struct ice_aqc_acl_profile_ranges *) + ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges)); + if (!range_buf) + goto out; + + /* Set don't care mask to all 1's to start, will zero out used bytes */ + ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM); + + for (i = 0; i < prof->segs_cnt; i++) { + struct ice_flow_seg_info *seg = &prof->segs[i]; + u64 match = seg->match; + u16 j; + + for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) { + struct ice_flow_fld_info *info; + const u64 bit = BIT_ULL(j); + + if (!(match & bit)) + continue; + + info = &seg->fields[j]; + + if (info->type == ICE_FLOW_FLD_TYPE_RANGE) + ice_flow_acl_frmt_entry_range(j, info, + range_buf, data, + &range); + else + ice_flow_acl_frmt_entry_fld(j, info, buf, + dontcare, data); + + match &= ~bit; + } + + for (j = 0; j < seg->raws_cnt; j++) { + struct ice_flow_fld_info *info = &seg->raws[j].info; + u16 dst, src, mask, k; + bool use_mask = false; + + src = info->src.val; + dst = info->entry.val - + ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX; + mask = info->src.mask; + + if (mask != ICE_FLOW_FLD_OFF_INVAL) + use_mask = true; + + for (k = 0; k < info->entry.last; k++, dst++) { + buf[dst] = data[src++]; + if (use_mask) + dontcare[dst] = ~data[mask++]; + else + dontcare[dst] = 0; + } + } + } + + buf[prof->cfg.scen->pid_idx] = (u8)prof_id; + dontcare[prof->cfg.scen->pid_idx] = 0; + + /* Format the buffer for direction flags */ + dir_flag_msk = BIT(ICE_FLG_PKT_DIR); + + if (prof->dir == ICE_FLOW_RX) + buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk; + + if (range) { + buf[prof->cfg.scen->rng_chk_idx] = range; + /* Mark any unused range checkers as don't care */ + dontcare[prof->cfg.scen->rng_chk_idx] = ~range; + e->range_buf = range_buf; + } else { + ice_free(hw, range_buf); + } + + status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0, + buf_sz); + if (status) + goto out; + + e->entry = key; + e->entry_sz = buf_sz * 2; + +out: + if (buf) + ice_free(hw, buf); + + if (dontcare) + ice_free(hw, dontcare); + + if (status && key) + ice_free(hw, key); + + if (status && range_buf) { + ice_free(hw, range_buf); + e->range_buf = NULL; + } + + if (status && e->acts) { + ice_free(hw, e->acts); + e->acts = NULL; + e->acts_cnt = 0; + } + + if (status && cnt_alloc) + ice_flow_acl_free_act_cntr(hw, acts, acts_cnt); + + return status; +} + +/** + * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches + * the compared data. + * @prof: pointer to flow profile + * @e: pointer to the comparing flow entry + * @do_chg_action: decide if we want to change the ACL action + * @do_add_entry: decide if we want to add the new ACL entry + * @do_rem_entry: decide if we want to remove the current ACL entry + * + * Find an ACL scenario entry that matches the compared data. In the same time, + * this function also figure out: + * a/ If we want to change the ACL action + * b/ If we want to add the new ACL entry + * c/ If we want to remove the current ACL entry + */ +static struct ice_flow_entry * +ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof, + struct ice_flow_entry *e, bool *do_chg_action, + bool *do_add_entry, bool *do_rem_entry) +{ + struct ice_flow_entry *p, *return_entry = NULL; + u8 i, j; + + /* Check if: + * a/ There exists an entry with same matching data, but different + * priority, then we remove this existing ACL entry. Then, we + * will add the new entry to the ACL scenario. + * b/ There exists an entry with same matching data, priority, and + * result action, then we do nothing + * c/ There exists an entry with same matching data, priority, but + * different, action, then do only change the action's entry. + * d/ Else, we add this new entry to the ACL scenario. + */ + *do_chg_action = false; + *do_add_entry = true; + *do_rem_entry = false; + LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) { + if (memcmp(p->entry, e->entry, p->entry_sz)) + continue; + + /* From this point, we have the same matching_data. */ + *do_add_entry = false; + return_entry = p; + + if (p->priority != e->priority) { + /* matching data && !priority */ + *do_add_entry = true; + *do_rem_entry = true; + break; + } + + /* From this point, we will have matching_data && priority */ + if (p->acts_cnt != e->acts_cnt) + *do_chg_action = true; + for (i = 0; i < p->acts_cnt; i++) { + bool found_not_match = false; + + for (j = 0; j < e->acts_cnt; j++) + if (memcmp(&p->acts[i], &e->acts[j], + sizeof(struct ice_flow_action))) { + found_not_match = true; + break; + } + + if (found_not_match) { + *do_chg_action = true; + break; + } + } + + /* (do_chg_action = true) means : + * matching_data && priority && !result_action + * (do_chg_action = false) means : + * matching_data && priority && result_action + */ + break; + } + + return return_entry; +} + +/** + * ice_flow_acl_convert_to_acl_prior - Convert to ACL priority + * @p: flow priority + */ +static enum ice_acl_entry_prior +ice_flow_acl_convert_to_acl_prior(enum ice_flow_priority p) +{ + enum ice_acl_entry_prior acl_prior; + + switch (p) { + case ICE_FLOW_PRIO_LOW: + acl_prior = ICE_LOW; + break; + case ICE_FLOW_PRIO_NORMAL: + acl_prior = ICE_NORMAL; + break; + case ICE_FLOW_PRIO_HIGH: + acl_prior = ICE_HIGH; + break; + default: + acl_prior = ICE_NORMAL; + break; + } + + return acl_prior; +} + +/** + * ice_flow_acl_union_rng_chk - Perform union operation between two + * range-range checker buffers + * @dst_buf: pointer to destination range checker buffer + * @src_buf: pointer to source range checker buffer + * + * For this function, we do the union between dst_buf and src_buf + * range checker buffer, and we will save the result back to dst_buf + */ +static enum ice_status +ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf, + struct ice_aqc_acl_profile_ranges *src_buf) +{ + u8 i, j; + + if (!dst_buf || !src_buf) + return ICE_ERR_BAD_PTR; + + for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) { + struct ice_acl_rng_data *cfg_data = NULL, *in_data; + bool will_populate = false; + + in_data = &src_buf->checker_cfg[i]; + + if (!in_data->mask) + break; + + for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) { + cfg_data = &dst_buf->checker_cfg[j]; + + if (!cfg_data->mask || + !memcmp(cfg_data, in_data, + sizeof(struct ice_acl_rng_data))) { + will_populate = true; + break; + } + } + + if (will_populate) { + ice_memcpy(cfg_data, in_data, + sizeof(struct ice_acl_rng_data), + ICE_NONDMA_TO_NONDMA); + } else { + /* No available slot left to program range checker */ + return ICE_ERR_MAX_LIMIT; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * @entry: double pointer to the flow entry + * + * For this function, we will look at the current added entries in the + * corresponding ACL scenario. Then, we will perform matching logic to + * see if we want to add/modify/do nothing with this new entry. + */ +static enum ice_status +ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof, + struct ice_flow_entry **entry) +{ + bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk; + struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf; + struct ice_acl_act_entry *acts = NULL; + struct ice_flow_entry *exist; + enum ice_status status = ICE_SUCCESS; + struct ice_flow_entry *e; + u8 i; + + if (!entry || !(*entry) || !prof) + return ICE_ERR_BAD_PTR; + + e = *(entry); + + do_chg_rng_chk = false; + if (e->range_buf) { + u8 prof_id = 0; + + status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, + &prof_id); + if (status) + return status; + + /* Query the current range-checker value in FW */ + status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf, + NULL); + if (status) + return status; + ice_memcpy(&cfg_rng_buf, &query_rng_buf, + sizeof(struct ice_aqc_acl_profile_ranges), + ICE_NONDMA_TO_NONDMA); + + /* Generate the new range-checker value */ + status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf); + if (status) + return status; + + /* Reconfigure the range check if the buffer is changed. */ + do_chg_rng_chk = false; + if (memcmp(&query_rng_buf, &cfg_rng_buf, + sizeof(struct ice_aqc_acl_profile_ranges))) { + status = ice_prog_acl_prof_ranges(hw, prof_id, + &cfg_rng_buf, NULL); + if (status) + return status; + + do_chg_rng_chk = true; + } + } + + /* Figure out if we want to (change the ACL action) and/or + * (Add the new ACL entry) and/or (Remove the current ACL entry) + */ + exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action, + &do_add_entry, &do_rem_entry); + + if (do_rem_entry) { + status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist); + if (status) + return status; + } + + /* Prepare the result action buffer */ + acts = (struct ice_acl_act_entry *)ice_calloc + (hw, e->entry_sz, sizeof(struct ice_acl_act_entry)); + for (i = 0; i < e->acts_cnt; i++) + ice_memcpy(&acts[i], &e->acts[i].data.acl_act, + sizeof(struct ice_acl_act_entry), + ICE_NONDMA_TO_NONDMA); + + if (do_add_entry) { + enum ice_acl_entry_prior prior; + u8 *keys, *inverts; + u16 entry_idx; + + keys = (u8 *)e->entry; + inverts = keys + (e->entry_sz / 2); + prior = ice_flow_acl_convert_to_acl_prior(e->priority); + + status = ice_acl_add_entry(hw, prof->cfg.scen, prior, keys, + inverts, acts, e->acts_cnt, + &entry_idx); + if (status) + goto out; + + e->scen_entry_idx = entry_idx; + LIST_ADD(&e->l_entry, &prof->entries); + } else { + if (do_chg_action) { + /* For the action memory info, update the SW's copy of + * exist entry with e's action memory info + */ + ice_free(hw, exist->acts); + exist->acts_cnt = e->acts_cnt; + exist->acts = (struct ice_flow_action *) + ice_calloc(hw, exist->acts_cnt, + sizeof(struct ice_flow_action)); + + if (!exist->acts) { + status = ICE_ERR_NO_MEMORY; + goto out; + } + + ice_memcpy(exist->acts, e->acts, + sizeof(struct ice_flow_action) * e->acts_cnt, + ICE_NONDMA_TO_NONDMA); + + status = ice_acl_prog_act(hw, prof->cfg.scen, acts, + e->acts_cnt, + exist->scen_entry_idx); + if (status) + goto out; + } + + if (do_chg_rng_chk) { + /* In this case, we want to update the range checker + * information of the exist entry + */ + status = ice_flow_acl_union_rng_chk(exist->range_buf, + e->range_buf); + if (status) + goto out; + } + + /* As we don't add the new entry to our SW DB, deallocate its + * memories, and return the exist entry to the caller + */ + ice_dealloc_flow_entry(hw, e); + *(entry) = exist; + } +out: + if (acts) + ice_free(hw, acts); + + return status; +} + +/** + * ice_flow_acl_add_scen_entry - Add entry to ACL scenario + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * @e: double pointer to the flow entry + */ +static enum ice_status +ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof, + struct ice_flow_entry **e) +{ + enum ice_status status; + + ice_acquire_lock(&prof->entries_lock); + status = ice_flow_acl_add_scen_entry_sync(hw, prof, e); + ice_release_lock(&prof->entries_lock); + + return status; +} + +/** + * ice_flow_add_entry - Add a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @prof_id: ID of the profile to add a new flow entry to + * @entry_id: unique ID to identify this flow entry + * @vsi_handle: software VSI handle for the flow entry + * @prio: priority of the flow entry + * @data: pointer to a data buffer containing flow entry's match values/masks + * @acts: arrays of actions to be performed on a match + * @acts_cnt: number of actions + * @entry_h: pointer to buffer that receives the new flow entry's handle + */ +enum ice_status +ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, + void *data, struct ice_flow_action *acts, u8 acts_cnt, + u64 *entry_h) +{ + struct ice_flow_entry *e = NULL; + struct ice_flow_prof *prof; + enum ice_status status = ICE_SUCCESS; + + /* ACL entries must indicate an action */ + if (blk == ICE_BLK_ACL && (!acts || !acts_cnt)) + return ICE_ERR_PARAM; + + /* No flow entry data is expected for RSS */ + if (!entry_h || (!data && blk != ICE_BLK_RSS)) + return ICE_ERR_BAD_PTR; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + + prof = ice_flow_find_prof_id(hw, blk, prof_id); + if (!prof) { + status = ICE_ERR_DOES_NOT_EXIST; + } else { + /* Allocate memory for the entry being added and associate + * the VSI to the found flow profile + */ + e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e)); + if (!e) + status = ICE_ERR_NO_MEMORY; + else + status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); + } + + ice_release_lock(&hw->fl_profs_locks[blk]); + if (status) + goto out; + + e->id = entry_id; + e->vsi_handle = vsi_handle; + e->prof = prof; + e->priority = prio; + + switch (blk) { + case ICE_BLK_FD: + case ICE_BLK_RSS: + break; + case ICE_BLK_ACL: + /* ACL will handle the entry management */ + status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts, + acts_cnt); + if (status) + goto out; + + status = ice_flow_acl_add_scen_entry(hw, prof, &e); + if (status) + goto out; + + break; + case ICE_BLK_SW: + case ICE_BLK_PE: + default: + status = ICE_ERR_NOT_IMPL; + goto out; + } + + if (blk != ICE_BLK_ACL) { + /* ACL will handle the entry management */ + ice_acquire_lock(&prof->entries_lock); + LIST_ADD(&e->l_entry, &prof->entries); + ice_release_lock(&prof->entries_lock); + } + + *entry_h = ICE_FLOW_ENTRY_HNDL(e); + +out: + if (status && e) { + if (e->entry) + ice_free(hw, e->entry); + ice_free(hw, e); + } + + return status; +} + +/** + * ice_flow_rem_entry - Remove a flow entry + * @hw: pointer to the HW struct + * @blk: classification stage + * @entry_h: handle to the flow entry to be removed + */ +enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, + u64 entry_h) +{ + struct ice_flow_entry *entry; + struct ice_flow_prof *prof; + enum ice_status status = ICE_SUCCESS; + + if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) + return ICE_ERR_PARAM; + + entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h); + + /* Retain the pointer to the flow profile as the entry will be freed */ + prof = entry->prof; + + if (prof) { + ice_acquire_lock(&prof->entries_lock); + status = ice_flow_rem_entry_sync(hw, blk, entry); + ice_release_lock(&prof->entries_lock); + } + + return status; +} + +/** + * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer + * @seg: packet segment the field being set belongs to + * @fld: field to be set + * @field_type: type of the field + * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from + * entry's input buffer + * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's + * input buffer + * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from + * entry's input buffer + * + * This helper function stores information of a field being matched, including + * the type of the field and the locations of the value to match, the mask, and + * and the upper-bound value in the start of the input buffer for a flow entry. + * This function should only be used for fixed-size data structures. + * + * This function also opportunistically determines the protocol headers to be + * present based on the fields being set. Some fields cannot be used alone to + * determine the protocol headers present. Sometimes, fields for particular + * protocol headers are not matched. In those cases, the protocol headers + * must be explicitly set. + */ +static void +ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + enum ice_flow_fld_match_type field_type, u16 val_loc, + u16 mask_loc, u16 last_loc) +{ + u64 bit = BIT_ULL(fld); + + seg->match |= bit; + if (field_type == ICE_FLOW_FLD_TYPE_RANGE) + seg->range |= bit; + + seg->fields[fld].type = field_type; + seg->fields[fld].src.val = val_loc; + seg->fields[fld].src.mask = mask_loc; + seg->fields[fld].src.last = last_loc; + + ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); +} + +/** + * ice_flow_set_fld - specifies locations of field from entry's input buffer + * @seg: packet segment the field being set belongs to + * @fld: field to be set + * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from + * entry's input buffer + * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's + * input buffer + * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from + * entry's input buffer + * @range: indicate if field being matched is to be in a range + * + * This function specifies the locations, in the form of byte offsets from the + * start of the input buffer for a flow entry, from where the value to match, + * the mask value, and upper value can be extracted. These locations are then + * stored in the flow profile. When adding a flow entry associated with the + * flow profile, these locations will be used to quickly extract the values and + * create the content of a match entry. This function should only be used for + * fixed-size data structures. + */ +void +ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + u16 val_loc, u16 mask_loc, u16 last_loc, bool range) +{ + enum ice_flow_fld_match_type t = range ? + ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; + + ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); +} + +/** + * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf + * @seg: packet segment the field being set belongs to + * @fld: field to be set + * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from + * entry's input buffer + * @pref_loc: location of prefix value from entry's input buffer + * @pref_sz: size of the location holding the prefix value + * + * This function specifies the locations, in the form of byte offsets from the + * start of the input buffer for a flow entry, from where the value to match + * and the IPv4 prefix value can be extracted. These locations are then stored + * in the flow profile. When adding flow entries to the associated flow profile, + * these locations can be used to quickly extract the values to create the + * content of a match entry. This function should only be used for fixed-size + * data structures. + */ +void +ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + u16 val_loc, u16 pref_loc, u8 pref_sz) +{ + /* For this type of field, the "mask" location is for the prefix value's + * location and the "last" location is for the size of the location of + * the prefix value. + */ + ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc, + pref_loc, (u16)pref_sz); +} + +/** + * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf + * @seg: packet segment the field being set belongs to + * @off: offset of the raw field from the beginning of the segment in bytes + * @len: length of the raw pattern to be matched + * @val_loc: location of the value to match from entry's input buffer + * @mask_loc: location of mask value from entry's input buffer + * + * This function specifies the offset of the raw field to be match from the + * beginning of the specified packet segment, and the locations, in the form of + * byte offsets from the start of the input buffer for a flow entry, from where + * the value to match and the mask value to be extracted. These locations are + * then stored in the flow profile. When adding flow entries to the associated + * flow profile, these locations can be used to quickly extract the values to + * create the content of a match entry. This function should only be used for + * fixed-size data structures. + */ +void +ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, + u16 val_loc, u16 mask_loc) +{ + if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { + seg->raws[seg->raws_cnt].off = off; + seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; + seg->raws[seg->raws_cnt].info.src.val = val_loc; + seg->raws[seg->raws_cnt].info.src.mask = mask_loc; + /* The "last" field is used to store the length of the field */ + seg->raws[seg->raws_cnt].info.src.last = len; + } + + /* Overflows of "raws" will be handled as an error condition later in + * the flow when this information is processed. + */ + seg->raws_cnt++; +} + +#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ +(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) + +#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ + (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) + +#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ + (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ + ICE_FLOW_SEG_HDR_SCTP) + +#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ + (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ + ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ + ICE_FLOW_RSS_SEG_HDR_L4_MASKS) + +/** + * ice_flow_set_rss_seg_info - setup packet segments for RSS + * @segs: pointer to the flow field segment(s) + * @hash_fields: fields to be hashed on for the segment(s) + * @flow_hdr: protocol header fields within a packet segment + * + * Helper function to extract fields from hash bitmap and use flow + * header value to set flow field segment for further use in flow + * profile entry or removal. + */ +static enum ice_status +ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, + u32 flow_hdr) +{ + u64 val = hash_fields; + u8 i; + + for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) { + u64 bit = BIT_ULL(i); + + if (val & bit) { + ice_flow_set_fld(segs, (enum ice_flow_field)i, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + val &= ~bit; + } + } + ICE_FLOW_SET_HDRS(segs, flow_hdr); + + if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + ~ICE_FLOW_RSS_HDRS_INNER_MASK) + return ICE_ERR_PARAM; + + val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); + if (val && !ice_is_pow2(val)) + return ICE_ERR_CFG; + + val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); + if (val && !ice_is_pow2(val)) + return ICE_ERR_CFG; + + return ICE_SUCCESS; +} + +/** + * ice_rem_vsi_rss_list - remove VSI from RSS list + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * + * Remove the VSI from all RSS configurations in the list. + */ +void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_rss_cfg *r, *tmp; + + if (LIST_EMPTY(&hw->rss_list_head)) + return; + + ice_acquire_lock(&hw->rss_locks); + LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, + ice_rss_cfg, l_entry) { + if (ice_test_and_clear_bit(vsi_handle, r->vsis)) + if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { + LIST_DEL(&r->l_entry); + ice_free(hw, r); + } + } + ice_release_lock(&hw->rss_locks); +} + +/** + * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * + * This function will iterate through all flow profiles and disassociate + * the VSI from that profile. If the flow profile has no VSIs it will + * be removed. + */ +enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +{ + const enum ice_block blk = ICE_BLK_RSS; + struct ice_flow_prof *p, *t; + enum ice_status status = ICE_SUCCESS; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (LIST_EMPTY(&hw->fl_profs[blk])) + return ICE_SUCCESS; + + ice_acquire_lock(&hw->fl_profs_locks[blk]); + LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof, + l_entry) { + if (ice_is_bit_set(p->vsis, vsi_handle)) { + status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); + if (status) + break; + + if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) { + status = ice_flow_rem_prof_sync(hw, blk, p); + if (status) + break; + } + } + } + ice_release_lock(&hw->fl_profs_locks[blk]); + + return status; +} + +/** + * ice_rem_rss_list - remove RSS configuration from list + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @prof: pointer to flow profile + * + * Assumption: lock has already been acquired for RSS list + */ +static void +ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) +{ + struct ice_rss_cfg *r, *tmp; + + /* Search for RSS hash fields associated to the VSI that match the + * hash configurations associated to the flow profile. If found + * remove from the RSS entry list of the VSI context and delete entry. + */ + LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, + ice_rss_cfg, l_entry) { + if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && + r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + ice_clear_bit(vsi_handle, r->vsis); + if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { + LIST_DEL(&r->l_entry); + ice_free(hw, r); + } + return; + } + } +} + +/** + * ice_add_rss_list - add RSS configuration to list + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @prof: pointer to flow profile + * + * Assumption: lock has already been acquired for RSS list + */ +static enum ice_status +ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) +{ + struct ice_rss_cfg *r, *rss_cfg; + + LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, + ice_rss_cfg, l_entry) + if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && + r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + ice_set_bit(vsi_handle, r->vsis); + return ICE_SUCCESS; + } + + rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg)); + if (!rss_cfg) + return ICE_ERR_NO_MEMORY; + + rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; + rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->symm = prof->cfg.symm; + ice_set_bit(vsi_handle, rss_cfg->vsis); + + LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head); + + return ICE_SUCCESS; +} + +#define ICE_FLOW_PROF_HASH_S 0 +#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) +#define ICE_FLOW_PROF_HDR_S 32 +#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) +#define ICE_FLOW_PROF_ENCAP_S 63 +#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) + +#define ICE_RSS_OUTER_HEADERS 1 +#define ICE_RSS_INNER_HEADERS 2 + +/* Flow profile ID format: + * [0:31] - Packet match fields + * [32:62] - Protocol header + * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled + */ +#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ + (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ + (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ + ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) + +static void +ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst) +{ + u32 s = ((src % 4) << 3); /* byte shift */ + u32 v = dst | 0x80; /* value to program */ + u8 i = src / 4; /* register index */ + u32 reg; + + reg = rd32(hw, GLQF_HSYMM(prof_id, i)); + reg = (reg & ~(0xff << s)) | (v << s); + wr32(hw, GLQF_HSYMM(prof_id, i), reg); +} + +static void +ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len) +{ + int fv_last_word = + ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1; + int i; + + for (i = 0; i < len; i++) { + ice_rss_config_xor_word(hw, prof_id, + /* Yes, field vector in GLQF_HSYMM and + * GLQF_HINSET is inversed! + */ + fv_last_word - (src + i), + fv_last_word - (dst + i)); + ice_rss_config_xor_word(hw, prof_id, + fv_last_word - (dst + i), + fv_last_word - (src + i)); + } +} + +static void +ice_rss_update_symm(struct ice_hw *hw, + struct ice_flow_prof *prof) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id); + prof_id = map->prof_id; + + /* clear to default */ + for (m = 0; m < 6; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + if (prof->cfg.symm) { + struct ice_flow_seg_info *seg = + &prof->segs[prof->segs_cnt - 1]; + + struct ice_flow_seg_xtrct *ipv4_src = + &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct; + struct ice_flow_seg_xtrct *ipv4_dst = + &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct; + struct ice_flow_seg_xtrct *ipv6_src = + &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct; + struct ice_flow_seg_xtrct *ipv6_dst = + &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct; + + struct ice_flow_seg_xtrct *tcp_src = + &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct; + struct ice_flow_seg_xtrct *tcp_dst = + &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct; + + struct ice_flow_seg_xtrct *udp_src = + &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct; + struct ice_flow_seg_xtrct *udp_dst = + &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct; + + struct ice_flow_seg_xtrct *sctp_src = + &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct; + struct ice_flow_seg_xtrct *sctp_dst = + &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct; + + /* xor IPv4 */ + if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv4_src->idx, ipv4_dst->idx, 2); + + /* xor IPv6 */ + if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv6_src->idx, ipv6_dst->idx, 8); + + /* xor TCP */ + if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + tcp_src->idx, tcp_dst->idx, 1); + + /* xor UDP */ + if (udp_src->prot_id != 0 && udp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + udp_src->idx, udp_dst->idx, 1); + + /* xor SCTP */ + if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + sctp_src->idx, sctp_dst->idx, 1); + } +} + +/** + * ice_add_rss_cfg_sync - add an RSS configuration + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure + * @addl_hdrs: protocol header fields + * @segs_cnt: packet segment count + * @symm: symmetric hash enable/disable + * + * Assumption: lock has already been acquired for RSS list + */ +static enum ice_status +ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs, u8 segs_cnt, bool symm) +{ + const enum ice_block blk = ICE_BLK_RSS; + struct ice_flow_prof *prof = NULL; + struct ice_flow_seg_info *segs; + enum ice_status status; + + if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) + return ICE_ERR_PARAM; + + segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, + sizeof(*segs)); + if (!segs) + return ICE_ERR_NO_MEMORY; + + /* Construct the packet segment info from the hashed fields */ + status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, + addl_hdrs); + if (status) + goto exit; + + /* Search for a flow profile that has matching headers, hash fields + * and has the input VSI associated to it. If found, no further + * operations required and exit. + */ + prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, + vsi_handle, + ICE_FLOW_FIND_PROF_CHK_FLDS | + ICE_FLOW_FIND_PROF_CHK_VSI); + if (prof) { + if (prof->cfg.symm == symm) + goto exit; + prof->cfg.symm = symm; + goto update_symm; + } + + /* Check if a flow profile exists with the same protocol headers and + * associated with the input VSI. If so disasscociate the VSI from + * this profile. The VSI will be added to a new profile created with + * the protocol header and new hash field configuration. + */ + prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, + vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); + if (prof) { + status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); + if (!status) + ice_rem_rss_list(hw, vsi_handle, prof); + else + goto exit; + + /* Remove profile if it has no VSIs associated */ + if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) { + status = ice_flow_rem_prof(hw, blk, prof->id); + if (status) + goto exit; + } + } + + /* Search for a profile that has same match fields only. If this + * exists then associate the VSI to this profile. + */ + prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, + vsi_handle, + ICE_FLOW_FIND_PROF_CHK_FLDS); + if (prof) { + if (prof->cfg.symm == symm) { + status = ice_flow_assoc_prof(hw, blk, prof, + vsi_handle); + if (!status) + status = ice_add_rss_list(hw, vsi_handle, + prof); + } else { + /* if a profile exist but with different symmetric + * requirement, just return error. + */ + status = ICE_ERR_NOT_SUPPORTED; + } + goto exit; + } + + /* Create a new flow profile with generated profile and packet + * segment information. + */ + status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, + ICE_FLOW_GEN_PROFID(hashed_flds, + segs[segs_cnt - 1].hdrs, + segs_cnt), + segs, segs_cnt, NULL, 0, &prof); + if (status) + goto exit; + + status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); + /* If association to a new flow profile failed then this profile can + * be removed. + */ + if (status) { + ice_flow_rem_prof(hw, blk, prof->id); + goto exit; + } + + status = ice_add_rss_list(hw, vsi_handle, prof); + + prof->cfg.symm = symm; + +update_symm: + ice_rss_update_symm(hw, prof); + +exit: + ice_free(hw, segs); + return status; +} + +/** + * ice_add_rss_cfg - add an RSS configuration with specified hashed fields + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure + * @addl_hdrs: protocol header fields + * @symm: symmetric hash enable/disable + * + * This function will generate a flow profile based on fields associated with + * the input fields to hash on, the flow type and use the VSI number to add + * a flow entry to the profile. + */ +enum ice_status +ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs, bool symm) +{ + enum ice_status status; + + if (hashed_flds == ICE_HASH_INVALID || + !ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->rss_locks); + status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, + ICE_RSS_OUTER_HEADERS, symm); + if (!status) + status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, + addl_hdrs, ICE_RSS_INNER_HEADERS, + symm); + ice_release_lock(&hw->rss_locks); + + return status; +} + +/** + * ice_rem_rss_cfg_sync - remove an existing RSS configuration + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove + * @addl_hdrs: Protocol header fields within a packet segment + * @segs_cnt: packet segment count + * + * Assumption: lock has already been acquired for RSS list + */ +static enum ice_status +ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs, u8 segs_cnt) +{ + const enum ice_block blk = ICE_BLK_RSS; + struct ice_flow_seg_info *segs; + struct ice_flow_prof *prof; + enum ice_status status; + + segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, + sizeof(*segs)); + if (!segs) + return ICE_ERR_NO_MEMORY; + + /* Construct the packet segment info from the hashed fields */ + status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, + addl_hdrs); + if (status) + goto out; + + prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, + vsi_handle, + ICE_FLOW_FIND_PROF_CHK_FLDS); + if (!prof) { + status = ICE_ERR_DOES_NOT_EXIST; + goto out; + } + + status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); + if (status) + goto out; + + /* Remove RSS configuration from VSI context before deleting + * the flow profile. + */ + ice_rem_rss_list(hw, vsi_handle, prof); + + if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) + status = ice_flow_rem_prof(hw, blk, prof->id); + +out: + ice_free(hw, segs); + return status; +} + +/** + * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove + * @addl_hdrs: Protocol header fields within a packet segment + * + * This function will lookup the flow profile based on the input + * hash field bitmap, iterate through the profile entry list of + * that profile and find entry associated with input VSI to be + * removed. Calls are made to underlying flow apis which will in + * turn build or update buffers for RSS XLT1 section. + */ +enum ice_status +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs) +{ + enum ice_status status; + + if (hashed_flds == ICE_HASH_INVALID || + !ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->rss_locks); + status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, + ICE_RSS_OUTER_HEADERS); + if (!status) + status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, + addl_hdrs, ICE_RSS_INNER_HEADERS); + ice_release_lock(&hw->rss_locks); + + return status; +} + +/** + * ice_replay_rss_cfg - replay RSS configurations associated with VSI + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + */ +enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +{ + enum ice_status status = ICE_SUCCESS; + struct ice_rss_cfg *r; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&hw->rss_locks); + LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, + ice_rss_cfg, l_entry) { + if (ice_is_bit_set(r->vsis, vsi_handle)) { + status = ice_add_rss_cfg_sync(hw, vsi_handle, + r->hashed_flds, + r->packet_hdr, + ICE_RSS_OUTER_HEADERS, + r->symm); + if (status) + break; + status = ice_add_rss_cfg_sync(hw, vsi_handle, + r->hashed_flds, + r->packet_hdr, + ICE_RSS_INNER_HEADERS, + r->symm); + if (status) + break; + } + } + ice_release_lock(&hw->rss_locks); + + return status; +} + +/** + * ice_get_rss_cfg - returns hashed fields for the given header types + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @hdrs: protocol header type + * + * This function will return the match fields of the first instance of flow + * profile having the given header types and containing input VSI + */ +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) +{ + struct ice_rss_cfg *r, *rss_cfg = NULL; + + /* verify if the protocol header is non zero and VSI is valid */ + if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) + return ICE_HASH_INVALID; + + ice_acquire_lock(&hw->rss_locks); + LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, + ice_rss_cfg, l_entry) + if (ice_is_bit_set(r->vsis, vsi_handle) && + r->packet_hdr == hdrs) { + rss_cfg = r; + break; + } + ice_release_lock(&hw->rss_locks); + + return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_flow.h b/src/spdk/dpdk/drivers/net/ice/base/ice_flow.h new file mode 100644 index 000000000..c8a0483e3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_flow.h @@ -0,0 +1,496 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_FLOW_H_ +#define _ICE_FLOW_H_ + +#include "ice_flex_type.h" +#include "ice_acl.h" +#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix))) +#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful +#define ICE_FLOW_PROF_ID_BYPASS 0 +#define ICE_FLOW_PROF_ID_DEFAULT 1 +#define ICE_FLOW_ENTRY_HANDLE_INVAL 0 +#define ICE_FLOW_VSI_INVAL 0xffff +#define ICE_FLOW_FLD_OFF_INVAL 0xffff + +/* Generate flow hash field from flow field type(s) */ +#define ICE_FLOW_HASH_ETH \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)) +#define ICE_FLOW_HASH_IPV4 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)) +#define ICE_FLOW_HASH_IPV6 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) +#define ICE_FLOW_HASH_TCP_PORT \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) +#define ICE_FLOW_HASH_UDP_PORT \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)) +#define ICE_FLOW_HASH_SCTP_PORT \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)) + +#define ICE_HASH_INVALID 0 +#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) + +#define ICE_FLOW_HASH_GTP_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) + +#define ICE_FLOW_HASH_GTP_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) +#define ICE_FLOW_HASH_GTP_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) + +#define ICE_FLOW_HASH_GTP_U_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID) +#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID) + +#define ICE_FLOW_HASH_GTP_U_EH_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)) + +#define ICE_FLOW_HASH_GTP_U_EH_QFI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI)) + +#define ICE_FLOW_HASH_GTP_U_IPV4_EH \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) +#define ICE_FLOW_HASH_GTP_U_IPV6_EH \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ + ICE_FLOW_HASH_GTP_U_EH_QFI) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)) + +#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_TCP_ID \ + (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) +#define ICE_FLOW_HASH_PPPOE_UDP_ID \ + (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) + +#define ICE_FLOW_HASH_PFCP_SEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)) +#define ICE_FLOW_HASH_PFCP_IPV4_SEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID) +#define ICE_FLOW_HASH_PFCP_IPV6_SEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID) + +#define ICE_FLOW_HASH_L2TPV3_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID) +#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID) + +#define ICE_FLOW_HASH_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)) +#define ICE_FLOW_HASH_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI) +#define ICE_FLOW_HASH_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI) + +#define ICE_FLOW_HASH_AH_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)) +#define ICE_FLOW_HASH_AH_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI) +#define ICE_FLOW_HASH_AH_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI) + +#define ICE_FLOW_HASH_NAT_T_ESP_SPI \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI)) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) + +/* Protocol header fields within a packet segment. A segment consists of one or + * more protocol headers that make up a logical group of protocol headers. Each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization such as GRE, + * VxLAN, etc. + */ +enum ice_flow_seg_hdr { + ICE_FLOW_SEG_HDR_NONE = 0x00000000, + ICE_FLOW_SEG_HDR_ETH = 0x00000001, + ICE_FLOW_SEG_HDR_VLAN = 0x00000002, + ICE_FLOW_SEG_HDR_IPV4 = 0x00000004, + ICE_FLOW_SEG_HDR_IPV6 = 0x00000008, + ICE_FLOW_SEG_HDR_ARP = 0x00000010, + ICE_FLOW_SEG_HDR_ICMP = 0x00000020, + ICE_FLOW_SEG_HDR_TCP = 0x00000040, + ICE_FLOW_SEG_HDR_UDP = 0x00000080, + ICE_FLOW_SEG_HDR_SCTP = 0x00000100, + ICE_FLOW_SEG_HDR_GRE = 0x00000200, + ICE_FLOW_SEG_HDR_GTPC = 0x00000400, + ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, + ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000, + ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000, + ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, + ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000, + ICE_FLOW_SEG_HDR_PPPOE = 0x00010000, + ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000, + ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000, + ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000, + ICE_FLOW_SEG_HDR_ESP = 0x00100000, + ICE_FLOW_SEG_HDR_AH = 0x00200000, + ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, +}; + +/* These segements all have the same PTYPES, but are otherwise distinguished by + * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags: + * + * gtp_eh_pdu gtp_eh_pdu_link + * ICE_FLOW_SEG_HDR_GTPU_IP 0 0 + * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care + * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0 + * ICE_FLOW_SEG_HDR_GTPU_UP 1 1 + */ +#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \ + ICE_FLOW_SEG_HDR_GTPU_EH | \ + ICE_FLOW_SEG_HDR_GTPU_DWN | \ + ICE_FLOW_SEG_HDR_GTPU_UP) +#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \ + ICE_FLOW_SEG_HDR_PFCP_SESSION) + +enum ice_flow_field { + /* L2 */ + ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FIELD_IDX_ETH_TYPE, + /* L3 */ + ICE_FLOW_FIELD_IDX_IPV4_DSCP, + ICE_FLOW_FIELD_IDX_IPV6_DSCP, + ICE_FLOW_FIELD_IDX_IPV4_TTL, + ICE_FLOW_FIELD_IDX_IPV4_PROT, + ICE_FLOW_FIELD_IDX_IPV6_TTL, + ICE_FLOW_FIELD_IDX_IPV6_PROT, + ICE_FLOW_FIELD_IDX_IPV4_SA, + ICE_FLOW_FIELD_IDX_IPV4_DA, + ICE_FLOW_FIELD_IDX_IPV6_SA, + ICE_FLOW_FIELD_IDX_IPV6_DA, + /* L4 */ + ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, + ICE_FLOW_FIELD_IDX_TCP_DST_PORT, + ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, + ICE_FLOW_FIELD_IDX_UDP_DST_PORT, + ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, + ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, + ICE_FLOW_FIELD_IDX_TCP_FLAGS, + /* ARP */ + ICE_FLOW_FIELD_IDX_ARP_SIP, + ICE_FLOW_FIELD_IDX_ARP_DIP, + ICE_FLOW_FIELD_IDX_ARP_SHA, + ICE_FLOW_FIELD_IDX_ARP_DHA, + ICE_FLOW_FIELD_IDX_ARP_OP, + /* ICMP */ + ICE_FLOW_FIELD_IDX_ICMP_TYPE, + ICE_FLOW_FIELD_IDX_ICMP_CODE, + /* GRE */ + ICE_FLOW_FIELD_IDX_GRE_KEYID, + /* GTPC_TEID */ + ICE_FLOW_FIELD_IDX_GTPC_TEID, + /* GTPU_IP */ + ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, + /* GTPU_EH */ + ICE_FLOW_FIELD_IDX_GTPU_EH_TEID, + ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, + /* GTPU_UP */ + ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + /* GTPU_DWN */ + ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, + /* PPPOE */ + ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, + /* PFCP */ + ICE_FLOW_FIELD_IDX_PFCP_SEID, + /* L2TPV3 */ + ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, + /* ESP */ + ICE_FLOW_FIELD_IDX_ESP_SPI, + /* AH */ + ICE_FLOW_FIELD_IDX_AH_SPI, + /* NAT_T ESP */ + ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + /* The total number of enums must not exceed 64 */ + ICE_FLOW_FIELD_IDX_MAX +}; + +/* Flow headers and fields for AVF support */ +enum ice_flow_avf_hdr_field { + /* Values 0 - 28 are reserved for future use */ + ICE_AVF_FLOW_FIELD_INVALID = 0, + ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29, + ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP, + ICE_AVF_FLOW_FIELD_IPV4_UDP, + ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK, + ICE_AVF_FLOW_FIELD_IPV4_TCP, + ICE_AVF_FLOW_FIELD_IPV4_SCTP, + ICE_AVF_FLOW_FIELD_IPV4_OTHER, + ICE_AVF_FLOW_FIELD_FRAG_IPV4, + /* Values 37-38 are reserved */ + ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39, + ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP, + ICE_AVF_FLOW_FIELD_IPV6_UDP, + ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK, + ICE_AVF_FLOW_FIELD_IPV6_TCP, + ICE_AVF_FLOW_FIELD_IPV6_SCTP, + ICE_AVF_FLOW_FIELD_IPV6_OTHER, + ICE_AVF_FLOW_FIELD_FRAG_IPV6, + ICE_AVF_FLOW_FIELD_RSVD47, + ICE_AVF_FLOW_FIELD_FCOE_OX, + ICE_AVF_FLOW_FIELD_FCOE_RX, + ICE_AVF_FLOW_FIELD_FCOE_OTHER, + /* Values 51-62 are reserved */ + ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63, + ICE_AVF_FLOW_FIELD_MAX +}; + +/* Supported RSS offloads This macro is defined to support + * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware + * capabilities to the caller of this ops. + */ +#define ICE_DEFAULT_RSS_HENA ( \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ + BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) + +enum ice_flow_dir { + ICE_FLOW_DIR_UNDEFINED = 0, + ICE_FLOW_TX = 0x01, + ICE_FLOW_RX = 0x02, + ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX +}; + +enum ice_flow_priority { + ICE_FLOW_PRIO_LOW, + ICE_FLOW_PRIO_NORMAL, + ICE_FLOW_PRIO_HIGH +}; + +#define ICE_FLOW_SEG_MAX 2 +#define ICE_FLOW_SEG_RAW_FLD_MAX 2 +#define ICE_FLOW_PROFILE_MAX 1024 +#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48 +#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32 +#define ICE_FLOW_FV_EXTRACT_SZ 2 + +#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) + +struct ice_flow_seg_xtrct { + u8 prot_id; /* Protocol ID of extracted header field */ + u16 off; /* Starting offset of the field in header in bytes */ + u8 idx; /* Index of FV entry used */ + u8 disp; /* Displacement of field in bits fr. FV entry's start */ + u16 mask; /* Mask for field */ +}; + +enum ice_flow_fld_match_type { + ICE_FLOW_FLD_TYPE_REG, /* Value, mask */ + ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */ + ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */ + ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */ +}; + +struct ice_flow_fld_loc { + /* Describe offsets of field information relative to the beginning of + * input buffer provided when adding flow entries. + */ + u16 val; /* Offset where the value is located */ + u16 mask; /* Offset where the mask/prefix value is located */ + u16 last; /* Length or offset where the upper value is located */ +}; + +struct ice_flow_fld_info { + enum ice_flow_fld_match_type type; + /* Location where to retrieve data from an input buffer */ + struct ice_flow_fld_loc src; + /* Location where to put the data into the final entry buffer */ + struct ice_flow_fld_loc entry; + struct ice_flow_seg_xtrct xtrct; +}; + +struct ice_flow_seg_fld_raw { + struct ice_flow_fld_info info; + u16 off; /* Offset from the start of the segment */ +}; + +struct ice_flow_seg_info { + u32 hdrs; /* Bitmask indicating protocol headers present */ + u64 match; /* Bitmask indicating header fields to be matched */ + u64 range; /* Bitmask indicating header fields matched as ranges */ + + struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX]; + + u8 raws_cnt; /* Number of raw fields to be matched */ + struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX]; +}; + +/* This structure describes a flow entry, and is tracked only in this file */ +struct ice_flow_entry { + struct LIST_ENTRY_TYPE l_entry; + + u64 id; + struct ice_flow_prof *prof; + /* Action list */ + struct ice_flow_action *acts; + /* Flow entry's content */ + void *entry; + /* Range buffer (For ACL only) */ + struct ice_aqc_acl_profile_ranges *range_buf; + enum ice_flow_priority priority; + u16 vsi_handle; + u16 entry_sz; + /* Entry index in the ACL's scenario */ + u16 scen_entry_idx; +#define ICE_FLOW_ACL_MAX_NUM_ACT 2 + u8 acts_cnt; +}; + +#define ICE_FLOW_ENTRY_HNDL(e) ((unsigned long)e) +#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h)) + +struct ice_flow_prof { + struct LIST_ENTRY_TYPE l_entry; + + u64 id; + enum ice_flow_dir dir; + u8 segs_cnt; + u8 acts_cnt; + + /* Keep track of flow entries associated with this flow profile */ + struct ice_lock entries_lock; + struct LIST_HEAD_TYPE entries; + + struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX]; + + /* software VSI handles referenced by this flow profile */ + ice_declare_bitmap(vsis, ICE_MAX_VSI); + + union { + /* struct sw_recipe */ + struct ice_acl_scen *scen; + /* struct fd */ + u32 data; + /* Symmetric Hash for RSS */ + bool symm; + } cfg; + + /* Default actions */ + struct ice_flow_action *acts; +}; + +struct ice_rss_cfg { + struct LIST_ENTRY_TYPE l_entry; + /* bitmap of VSIs added to the RSS entry */ + ice_declare_bitmap(vsis, ICE_MAX_VSI); + u64 hashed_flds; + u32 packet_hdr; + bool symm; +}; + +enum ice_flow_action_type { + ICE_FLOW_ACT_NOP, + ICE_FLOW_ACT_ALLOW, + ICE_FLOW_ACT_DROP, + ICE_FLOW_ACT_CNTR_PKT, + ICE_FLOW_ACT_FWD_VSI, + ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */ + ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */ + ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */ + ICE_FLOW_ACT_PUSH, + ICE_FLOW_ACT_POP, + ICE_FLOW_ACT_MODIFY, + ICE_FLOW_ACT_CNTR_BYTES, + ICE_FLOW_ACT_CNTR_PKT_BYTES, + ICE_FLOW_ACT_GENERIC_0, + ICE_FLOW_ACT_GENERIC_1, + ICE_FLOW_ACT_GENERIC_2, + ICE_FLOW_ACT_GENERIC_3, + ICE_FLOW_ACT_GENERIC_4, + ICE_FLOW_ACT_RPT_FLOW_ID, + ICE_FLOW_ACT_BUILD_PROF_IDX, +}; + +struct ice_flow_action { + enum ice_flow_action_type type; + union { + struct ice_acl_act_entry acl_act; + u32 dummy; + } data; +}; + +u64 +ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, + struct ice_flow_seg_info *segs, u8 segs_cnt); +enum ice_status +ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, + u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, + struct ice_flow_action *acts, u8 acts_cnt, + struct ice_flow_prof **prof); +enum ice_status +ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); +enum ice_status +ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, + u16 vsig); +enum ice_status +ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u8 *hw_prof); + +u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id); +enum ice_status +ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, + u64 entry_id, u16 vsi, enum ice_flow_priority prio, + void *data, struct ice_flow_action *acts, u8 acts_cnt, + u64 *entry_h); +enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, + u64 entry_h); +void +ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + u16 val_loc, u16 mask_loc, u16 last_loc, bool range); +void +ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, + u16 val_loc, u16 prefix_loc, u8 prefix_sz); +void +ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, + u16 val_loc, u16 mask_loc); +void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); +enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs, bool symm); +enum ice_status +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, + u32 addl_hdrs); +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); +#endif /* _ICE_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_hw_autogen.h b/src/spdk/dpdk/drivers/net/ice/base/ice_hw_autogen.h new file mode 100644 index 000000000..1c9c84dfb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_hw_autogen.h @@ -0,0 +1,9452 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +/* Machine-generated file; do not edit */ +#ifndef _ICE_HW_AUTOGEN_H_ +#define _ICE_HW_AUTOGEN_H_ + +#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */ +#define GL_RDPU_CNTRL_RX_PAD_EN_S 0 +#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0) +#define GL_RDPU_CNTRL_UDP_ZERO_EN_S 1 +#define GL_RDPU_CNTRL_UDP_ZERO_EN_M BIT(1) +#define GL_RDPU_CNTRL_BLNC_EN_S 2 +#define GL_RDPU_CNTRL_BLNC_EN_M BIT(2) +#define GL_RDPU_CNTRL_RECIPE_BYPASS_S 3 +#define GL_RDPU_CNTRL_RECIPE_BYPASS_M BIT(3) +#define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_S 4 +#define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 4) +#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10 +#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10) +#define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16 +#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16) +#define GL_RDPU_CNTRL_ECO_S 21 +#define GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21) +#define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */ +#define MSIX_PBA_MAX_INDEX 2 +#define MSIX_PBA_PENBIT_S 0 +#define MSIX_PBA_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ +#define MSIX_TADD_MAX_INDEX 64 +#define MSIX_TADD_MSIXTADD10_S 0 +#define MSIX_TADD_MSIXTADD10_M MAKEMASK(0x3, 0) +#define MSIX_TADD_MSIXTADD_S 2 +#define MSIX_TADD_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) +#define MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ +#define MSIX_TUADD_MAX_INDEX 64 +#define MSIX_TUADD_MSIXTUADD_S 0 +#define MSIX_TUADD_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ +#define MSIX_TVCTRL_MAX_INDEX 64 +#define MSIX_TVCTRL_MASK_S 0 +#define MSIX_TVCTRL_MASK_M BIT(0) +#define PF0_FW_HLP_ARQBAH_PAGE 0x02D00180 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_HLP_ARQBAL_PAGE 0x02D00080 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_HLP_ARQH_PAGE 0x02D00380 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQH_PAGE_ARQH_S 0 +#define PF0_FW_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ARQLEN_PAGE 0x02D00280 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_FW_HLP_ARQT_PAGE 0x02D00480 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQT_PAGE_ARQT_S 0 +#define PF0_FW_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQBAH_PAGE 0x02D00100 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_HLP_ATQBAL_PAGE 0x02D00000 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_S 0 +#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_HLP_ATQH_PAGE 0x02D00300 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQH_PAGE_ATQH_S 0 +#define PF0_FW_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQLEN_PAGE 0x02D00200 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_FW_HLP_ATQT_PAGE 0x02D00400 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQT_PAGE_ATQT_S 0 +#define PF0_FW_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQBAH_PAGE 0x02D40180 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_PSM_ARQBAL_PAGE 0x02D40080 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_PSM_ARQH_PAGE 0x02D40380 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQH_PAGE_ARQH_S 0 +#define PF0_FW_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQLEN_PAGE 0x02D40280 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_FW_PSM_ARQT_PAGE 0x02D40480 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQT_PAGE_ARQT_S 0 +#define PF0_FW_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQBAH_PAGE 0x02D40100 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_PSM_ATQBAL_PAGE 0x02D40000 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_S 0 +#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_PSM_ATQH_PAGE 0x02D40300 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQH_PAGE_ATQH_S 0 +#define PF0_FW_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQLEN_PAGE 0x02D40200 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_FW_PSM_ATQT_PAGE 0x02D40400 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQT_PAGE_ATQT_S 0 +#define PF0_FW_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQBAH_PAGE 0x02D80190 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_CPM_ARQBAL_PAGE 0x02D80090 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_CPM_ARQH_PAGE 0x02D80390 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQH_PAGE_ARQH_S 0 +#define PF0_MBX_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQLEN_PAGE 0x02D80290 /* Reset Source: PFR */ +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_MBX_CPM_ARQT_PAGE 0x02D80490 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQT_PAGE_ARQT_S 0 +#define PF0_MBX_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQBAH_PAGE 0x02D80110 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_CPM_ATQBAL_PAGE 0x02D80010 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_CPM_ATQH_PAGE 0x02D80310 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQH_PAGE_ATQH_S 0 +#define PF0_MBX_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQLEN_PAGE 0x02D80210 /* Reset Source: PFR */ +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_MBX_CPM_ATQT_PAGE 0x02D80410 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQT_PAGE_ATQT_S 0 +#define PF0_MBX_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQBAH_PAGE 0x02D00190 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_HLP_ARQBAL_PAGE 0x02D00090 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_HLP_ARQH_PAGE 0x02D00390 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQH_PAGE_ARQH_S 0 +#define PF0_MBX_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQLEN_PAGE 0x02D00290 /* Reset Source: PFR */ +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_MBX_HLP_ARQT_PAGE 0x02D00490 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQT_PAGE_ARQT_S 0 +#define PF0_MBX_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQBAH_PAGE 0x02D00110 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_HLP_ATQBAL_PAGE 0x02D00010 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_HLP_ATQH_PAGE 0x02D00310 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQH_PAGE_ATQH_S 0 +#define PF0_MBX_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQLEN_PAGE 0x02D00210 /* Reset Source: PFR */ +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_MBX_HLP_ATQT_PAGE 0x02D00410 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQT_PAGE_ATQT_S 0 +#define PF0_MBX_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQBAH_PAGE 0x02D40190 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_PSM_ARQBAL_PAGE 0x02D40090 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_PSM_ARQH_PAGE 0x02D40390 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQH_PAGE_ARQH_S 0 +#define PF0_MBX_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQLEN_PAGE 0x02D40290 /* Reset Source: PFR */ +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_MBX_PSM_ARQT_PAGE 0x02D40490 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQT_PAGE_ARQT_S 0 +#define PF0_MBX_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQBAH_PAGE 0x02D40110 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_PSM_ATQBAL_PAGE 0x02D40010 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_PSM_ATQH_PAGE 0x02D40310 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQH_PAGE_ATQH_S 0 +#define PF0_MBX_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQLEN_PAGE 0x02D40210 /* Reset Source: PFR */ +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_MBX_PSM_ATQT_PAGE 0x02D40410 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQT_PAGE_ATQT_S 0 +#define PF0_MBX_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQBAH_PAGE 0x02D801A0 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_CPM_ARQBAL_PAGE 0x02D800A0 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_CPM_ARQH_PAGE 0x02D803A0 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQH_PAGE_ARQH_S 0 +#define PF0_SB_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQLEN_PAGE 0x02D802A0 /* Reset Source: PFR */ +#define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_SB_CPM_ARQT_PAGE 0x02D804A0 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQT_PAGE_ARQT_S 0 +#define PF0_SB_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQBAH_PAGE 0x02D80120 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_CPM_ATQBAL_PAGE 0x02D80020 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_CPM_ATQH_PAGE 0x02D80320 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQH_PAGE_ATQH_S 0 +#define PF0_SB_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQLEN_PAGE 0x02D80220 /* Reset Source: PFR */ +#define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_SB_CPM_ATQT_PAGE 0x02D80420 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQT_PAGE_ATQT_S 0 +#define PF0_SB_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ARQBAH_PAGE 0x02D001A0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_S 0 +#define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_HLP_ARQBAL_PAGE 0x02D000A0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 +#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_S 6 +#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_HLP_ARQH_PAGE 0x02D003A0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQH_PAGE_ARQH_S 0 +#define PF0_SB_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ARQLEN_PAGE 0x02D002A0 /* Reset Source: PFR */ +#define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_S 0 +#define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_S 28 +#define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) +#define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_S 29 +#define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) +#define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_S 30 +#define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) +#define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_S 31 +#define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) +#define PF0_SB_HLP_ARQT_PAGE 0x02D004A0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQT_PAGE_ARQT_S 0 +#define PF0_SB_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQBAH_PAGE 0x02D00120 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_S 0 +#define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_HLP_ATQBAL_PAGE 0x02D00020 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_S 6 +#define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_HLP_ATQH_PAGE 0x02D00320 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQH_PAGE_ATQH_S 0 +#define PF0_SB_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQLEN_PAGE 0x02D00220 /* Reset Source: PFR */ +#define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_S 0 +#define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_S 28 +#define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) +#define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_S 29 +#define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) +#define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_S 30 +#define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) +#define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_S 31 +#define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) +#define PF0_SB_HLP_ATQT_PAGE 0x02D00420 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQT_PAGE_ATQT_S 0 +#define PF0_SB_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0INT_DYN_CTL(_i) (0x03000000 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define PF0INT_DYN_CTL_MAX_INDEX 2047 +#define PF0INT_DYN_CTL_INTENA_S 0 +#define PF0INT_DYN_CTL_INTENA_M BIT(0) +#define PF0INT_DYN_CTL_CLEARPBA_S 1 +#define PF0INT_DYN_CTL_CLEARPBA_M BIT(1) +#define PF0INT_DYN_CTL_SWINT_TRIG_S 2 +#define PF0INT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define PF0INT_DYN_CTL_ITR_INDX_S 3 +#define PF0INT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) +#define PF0INT_DYN_CTL_INTERVAL_S 5 +#define PF0INT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) +#define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_S 24 +#define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) +#define PF0INT_DYN_CTL_SW_ITR_INDX_S 25 +#define PF0INT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) +#define PF0INT_DYN_CTL_WB_ON_ITR_S 30 +#define PF0INT_DYN_CTL_WB_ON_ITR_M BIT(30) +#define PF0INT_DYN_CTL_INTENA_MSK_S 31 +#define PF0INT_DYN_CTL_INTENA_MSK_M BIT(31) +#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define PF0INT_ITR_0_MAX_INDEX 2047 +#define PF0INT_ITR_0_INTERVAL_S 0 +#define PF0INT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0) +#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define PF0INT_ITR_1_MAX_INDEX 2047 +#define PF0INT_ITR_1_INTERVAL_S 0 +#define PF0INT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0) +#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define PF0INT_ITR_2_MAX_INDEX 2047 +#define PF0INT_ITR_2_INTERVAL_S 0 +#define PF0INT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0) +#define PF0INT_OICR_CPM_PAGE 0x02D03000 /* Reset Source: CORER */ +#define PF0INT_OICR_CPM_PAGE_INTEVENT_S 0 +#define PF0INT_OICR_CPM_PAGE_INTEVENT_M BIT(0) +#define PF0INT_OICR_CPM_PAGE_QUEUE_S 1 +#define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1) +#define PF0INT_OICR_CPM_PAGE_RSV1_S 2 +#define PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_CPM_PAGE_HH_COMP_S 10 +#define PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10) +#define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11 +#define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11) +#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12 +#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_CPM_PAGE_TSYN_TGT_S 13 +#define PF0INT_OICR_CPM_PAGE_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_CPM_PAGE_HLP_RDY_S 14 +#define PF0INT_OICR_CPM_PAGE_HLP_RDY_M BIT(14) +#define PF0INT_OICR_CPM_PAGE_CPM_RDY_S 15 +#define PF0INT_OICR_CPM_PAGE_CPM_RDY_M BIT(15) +#define PF0INT_OICR_CPM_PAGE_ECC_ERR_S 16 +#define PF0INT_OICR_CPM_PAGE_ECC_ERR_M BIT(16) +#define PF0INT_OICR_CPM_PAGE_RSV2_S 17 +#define PF0INT_OICR_CPM_PAGE_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_CPM_PAGE_MAL_DETECT_S 19 +#define PF0INT_OICR_CPM_PAGE_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_CPM_PAGE_GRST_S 20 +#define PF0INT_OICR_CPM_PAGE_GRST_M BIT(20) +#define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_CPM_PAGE_GPIO_S 22 +#define PF0INT_OICR_CPM_PAGE_GPIO_M BIT(22) +#define PF0INT_OICR_CPM_PAGE_RSV3_S 23 +#define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23) +#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24 +#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26 +#define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26) +#define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27 +#define PF0INT_OICR_CPM_PAGE_PE_PUSH_M BIT(27) +#define PF0INT_OICR_CPM_PAGE_PE_CRITERR_S 28 +#define PF0INT_OICR_CPM_PAGE_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_CPM_PAGE_VFLR_S 29 +#define PF0INT_OICR_CPM_PAGE_VFLR_M BIT(29) +#define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_S 30 +#define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_CPM_PAGE_SWINT_S 31 +#define PF0INT_OICR_CPM_PAGE_SWINT_M BIT(31) +#define PF0INT_OICR_ENA_CPM_PAGE 0x02D03100 /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_CPM_PAGE_RSV0_S 0 +#define PF0INT_OICR_ENA_CPM_PAGE_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_S 1 +#define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_ENA_HLP_PAGE 0x02D01100 /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_HLP_PAGE_RSV0_S 0 +#define PF0INT_OICR_ENA_HLP_PAGE_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_S 1 +#define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_ENA_PSM_PAGE 0x02D02100 /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_PSM_PAGE_RSV0_S 0 +#define PF0INT_OICR_ENA_PSM_PAGE_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_S 1 +#define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_HLP_PAGE 0x02D01000 /* Reset Source: CORER */ +#define PF0INT_OICR_HLP_PAGE_INTEVENT_S 0 +#define PF0INT_OICR_HLP_PAGE_INTEVENT_M BIT(0) +#define PF0INT_OICR_HLP_PAGE_QUEUE_S 1 +#define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1) +#define PF0INT_OICR_HLP_PAGE_RSV1_S 2 +#define PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_HLP_PAGE_HH_COMP_S 10 +#define PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10) +#define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11 +#define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11) +#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12 +#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_HLP_PAGE_TSYN_TGT_S 13 +#define PF0INT_OICR_HLP_PAGE_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_HLP_PAGE_HLP_RDY_S 14 +#define PF0INT_OICR_HLP_PAGE_HLP_RDY_M BIT(14) +#define PF0INT_OICR_HLP_PAGE_CPM_RDY_S 15 +#define PF0INT_OICR_HLP_PAGE_CPM_RDY_M BIT(15) +#define PF0INT_OICR_HLP_PAGE_ECC_ERR_S 16 +#define PF0INT_OICR_HLP_PAGE_ECC_ERR_M BIT(16) +#define PF0INT_OICR_HLP_PAGE_RSV2_S 17 +#define PF0INT_OICR_HLP_PAGE_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_HLP_PAGE_MAL_DETECT_S 19 +#define PF0INT_OICR_HLP_PAGE_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_HLP_PAGE_GRST_S 20 +#define PF0INT_OICR_HLP_PAGE_GRST_M BIT(20) +#define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_HLP_PAGE_GPIO_S 22 +#define PF0INT_OICR_HLP_PAGE_GPIO_M BIT(22) +#define PF0INT_OICR_HLP_PAGE_RSV3_S 23 +#define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23) +#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24 +#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26 +#define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26) +#define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27 +#define PF0INT_OICR_HLP_PAGE_PE_PUSH_M BIT(27) +#define PF0INT_OICR_HLP_PAGE_PE_CRITERR_S 28 +#define PF0INT_OICR_HLP_PAGE_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_HLP_PAGE_VFLR_S 29 +#define PF0INT_OICR_HLP_PAGE_VFLR_M BIT(29) +#define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_S 30 +#define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_HLP_PAGE_SWINT_S 31 +#define PF0INT_OICR_HLP_PAGE_SWINT_M BIT(31) +#define PF0INT_OICR_PSM_PAGE 0x02D02000 /* Reset Source: CORER */ +#define PF0INT_OICR_PSM_PAGE_INTEVENT_S 0 +#define PF0INT_OICR_PSM_PAGE_INTEVENT_M BIT(0) +#define PF0INT_OICR_PSM_PAGE_QUEUE_S 1 +#define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1) +#define PF0INT_OICR_PSM_PAGE_RSV1_S 2 +#define PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_PSM_PAGE_HH_COMP_S 10 +#define PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10) +#define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11 +#define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11) +#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12 +#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_PSM_PAGE_TSYN_TGT_S 13 +#define PF0INT_OICR_PSM_PAGE_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_PSM_PAGE_HLP_RDY_S 14 +#define PF0INT_OICR_PSM_PAGE_HLP_RDY_M BIT(14) +#define PF0INT_OICR_PSM_PAGE_CPM_RDY_S 15 +#define PF0INT_OICR_PSM_PAGE_CPM_RDY_M BIT(15) +#define PF0INT_OICR_PSM_PAGE_ECC_ERR_S 16 +#define PF0INT_OICR_PSM_PAGE_ECC_ERR_M BIT(16) +#define PF0INT_OICR_PSM_PAGE_RSV2_S 17 +#define PF0INT_OICR_PSM_PAGE_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_PSM_PAGE_MAL_DETECT_S 19 +#define PF0INT_OICR_PSM_PAGE_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_PSM_PAGE_GRST_S 20 +#define PF0INT_OICR_PSM_PAGE_GRST_M BIT(20) +#define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_PSM_PAGE_GPIO_S 22 +#define PF0INT_OICR_PSM_PAGE_GPIO_M BIT(22) +#define PF0INT_OICR_PSM_PAGE_RSV3_S 23 +#define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23) +#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24 +#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26 +#define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26) +#define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27 +#define PF0INT_OICR_PSM_PAGE_PE_PUSH_M BIT(27) +#define PF0INT_OICR_PSM_PAGE_PE_CRITERR_S 28 +#define PF0INT_OICR_PSM_PAGE_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_PSM_PAGE_VFLR_S 29 +#define PF0INT_OICR_PSM_PAGE_VFLR_M BIT(29) +#define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_S 30 +#define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_PSM_PAGE_SWINT_S 31 +#define PF0INT_OICR_PSM_PAGE_SWINT_M BIT(31) +#define QRX_TAIL_PAGE(_QRX) (0x03800000 + ((_QRX) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define QRX_TAIL_PAGE_MAX_INDEX 2047 +#define QRX_TAIL_PAGE_TAIL_S 0 +#define QRX_TAIL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) +#define QTX_COMM_DBELL_PAGE(_DBQM) (0x04000000 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */ +#define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383 +#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0 +#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) +#define QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */ +#define QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255 +#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0 +#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) +#define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ARQBAH_MAX_INDEX 767 +#define VSI_MBX_ARQBAH_ARQBAH_S 0 +#define VSI_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VSI_MBX_ARQBAL(_VSI) (0x02000014 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ARQBAL_MAX_INDEX 767 +#define VSI_MBX_ARQBAL_ARQBAL_LSB_S 0 +#define VSI_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VSI_MBX_ARQBAL_ARQBAL_S 6 +#define VSI_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VSI_MBX_ARQH(_VSI) (0x02000020 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ARQH_MAX_INDEX 767 +#define VSI_MBX_ARQH_ARQH_S 0 +#define VSI_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VSI_MBX_ARQLEN(_VSI) (0x0200001C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSI_MBX_ARQLEN_MAX_INDEX 767 +#define VSI_MBX_ARQLEN_ARQLEN_S 0 +#define VSI_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VSI_MBX_ARQLEN_ARQVFE_S 28 +#define VSI_MBX_ARQLEN_ARQVFE_M BIT(28) +#define VSI_MBX_ARQLEN_ARQOVFL_S 29 +#define VSI_MBX_ARQLEN_ARQOVFL_M BIT(29) +#define VSI_MBX_ARQLEN_ARQCRIT_S 30 +#define VSI_MBX_ARQLEN_ARQCRIT_M BIT(30) +#define VSI_MBX_ARQLEN_ARQENABLE_S 31 +#define VSI_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define VSI_MBX_ARQT(_VSI) (0x02000024 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ARQT_MAX_INDEX 767 +#define VSI_MBX_ARQT_ARQT_S 0 +#define VSI_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VSI_MBX_ATQBAH(_VSI) (0x02000004 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ATQBAH_MAX_INDEX 767 +#define VSI_MBX_ATQBAH_ATQBAH_S 0 +#define VSI_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VSI_MBX_ATQBAL(_VSI) (0x02000000 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ATQBAL_MAX_INDEX 767 +#define VSI_MBX_ATQBAL_ATQBAL_S 6 +#define VSI_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VSI_MBX_ATQH(_VSI) (0x0200000C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ATQH_MAX_INDEX 767 +#define VSI_MBX_ATQH_ATQH_S 0 +#define VSI_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VSI_MBX_ATQLEN(_VSI) (0x02000008 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSI_MBX_ATQLEN_MAX_INDEX 767 +#define VSI_MBX_ATQLEN_ATQLEN_S 0 +#define VSI_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VSI_MBX_ATQLEN_ATQVFE_S 28 +#define VSI_MBX_ATQLEN_ATQVFE_M BIT(28) +#define VSI_MBX_ATQLEN_ATQOVFL_S 29 +#define VSI_MBX_ATQLEN_ATQOVFL_M BIT(29) +#define VSI_MBX_ATQLEN_ATQCRIT_S 30 +#define VSI_MBX_ATQLEN_ATQCRIT_M BIT(30) +#define VSI_MBX_ATQLEN_ATQENABLE_S 31 +#define VSI_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define VSI_MBX_ATQT(_VSI) (0x02000010 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_MBX_ATQT_MAX_INDEX 767 +#define VSI_MBX_ATQT_ATQT_S 0 +#define VSI_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define GL_ACL_ACCESS_CMD 0x00391000 /* Reset Source: CORER */ +#define GL_ACL_ACCESS_CMD_TABLE_ID_S 0 +#define GL_ACL_ACCESS_CMD_TABLE_ID_M MAKEMASK(0xFF, 0) +#define GL_ACL_ACCESS_CMD_ENTRY_INDEX_S 8 +#define GL_ACL_ACCESS_CMD_ENTRY_INDEX_M MAKEMASK(0xFFF, 8) +#define GL_ACL_ACCESS_CMD_OPERATION_S 20 +#define GL_ACL_ACCESS_CMD_OPERATION_M BIT(20) +#define GL_ACL_ACCESS_CMD_OBJ_TYPE_S 24 +#define GL_ACL_ACCESS_CMD_OBJ_TYPE_M MAKEMASK(0xF, 24) +#define GL_ACL_ACCESS_CMD_EXECUTE_S 31 +#define GL_ACL_ACCESS_CMD_EXECUTE_M BIT(31) +#define GL_ACL_ACCESS_STATUS 0x00391004 /* Reset Source: CORER */ +#define GL_ACL_ACCESS_STATUS_BUSY_S 0 +#define GL_ACL_ACCESS_STATUS_BUSY_M BIT(0) +#define GL_ACL_ACCESS_STATUS_DONE_S 1 +#define GL_ACL_ACCESS_STATUS_DONE_M BIT(1) +#define GL_ACL_ACCESS_STATUS_ERROR_S 2 +#define GL_ACL_ACCESS_STATUS_ERROR_M BIT(2) +#define GL_ACL_ACCESS_STATUS_OPERATION_S 3 +#define GL_ACL_ACCESS_STATUS_OPERATION_M BIT(3) +#define GL_ACL_ACCESS_STATUS_ERROR_CODE_S 4 +#define GL_ACL_ACCESS_STATUS_ERROR_CODE_M MAKEMASK(0xF, 4) +#define GL_ACL_ACCESS_STATUS_TABLE_ID_S 8 +#define GL_ACL_ACCESS_STATUS_TABLE_ID_M MAKEMASK(0xFF, 8) +#define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_S 16 +#define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_M MAKEMASK(0xFFF, 16) +#define GL_ACL_ACCESS_STATUS_OBJ_TYPE_S 28 +#define GL_ACL_ACCESS_STATUS_OBJ_TYPE_M MAKEMASK(0xF, 28) +#define GL_ACL_ACTMEM_ACT(_i) (0x00393824 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GL_ACL_ACTMEM_ACT_MAX_INDEX 1 +#define GL_ACL_ACTMEM_ACT_VALUE_S 0 +#define GL_ACL_ACTMEM_ACT_VALUE_M MAKEMASK(0xFFFF, 0) +#define GL_ACL_ACTMEM_ACT_MDID_S 20 +#define GL_ACL_ACTMEM_ACT_MDID_M MAKEMASK(0x3F, 20) +#define GL_ACL_ACTMEM_ACT_PRIORITY_S 28 +#define GL_ACL_ACTMEM_ACT_PRIORITY_M MAKEMASK(0x7, 28) +#define GL_ACL_CHICKEN_REGISTER 0x00393810 /* Reset Source: CORER */ +#define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_S 0 +#define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_M BIT(0) +#define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_S 1 +#define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_M BIT(1) +#define GL_ACL_DEFAULT_ACT(_i) (0x00391168 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GL_ACL_DEFAULT_ACT_MAX_INDEX 15 +#define GL_ACL_DEFAULT_ACT_VALUE_S 0 +#define GL_ACL_DEFAULT_ACT_VALUE_M MAKEMASK(0xFFFF, 0) +#define GL_ACL_DEFAULT_ACT_MDID_S 20 +#define GL_ACL_DEFAULT_ACT_MDID_M MAKEMASK(0x3F, 20) +#define GL_ACL_DEFAULT_ACT_PRIORITY_S 28 +#define GL_ACL_DEFAULT_ACT_PRIORITY_M MAKEMASK(0x7, 28) +#define GL_ACL_PROFILE_BWSB_SEL(_i) (0x00391008 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GL_ACL_PROFILE_BWSB_SEL_MAX_INDEX 31 +#define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_S 0 +#define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_M MAKEMASK(0x3F, 0) +#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_S 8 +#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M MAKEMASK(0x1F, 8) +#define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15 +#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 +#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0) +#define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7 +#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0 +#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_M MAKEMASK(0x3F, 0) +#define GL_ACL_PROFILE_RC_CFG(_i) (0x003910E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_ACL_PROFILE_RC_CFG_MAX_INDEX 7 +#define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_S 0 +#define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_M MAKEMASK(0xFFFF, 0) +#define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_S 16 +#define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_M MAKEMASK(0xFFFF, 16) +#define GL_ACL_PROFILE_RCF_MASK(_i) (0x00391108 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_ACL_PROFILE_RCF_MASK_MAX_INDEX 7 +#define GL_ACL_PROFILE_RCF_MASK_MASK_S 0 +#define GL_ACL_PROFILE_RCF_MASK_MASK_M MAKEMASK(0xFFFF, 0) +#define GL_ACL_SCENARIO_ACT_CFG(_i) (0x003938AC + ((_i) * 4)) /* _i=0...19 */ /* Reset Source: CORER */ +#define GL_ACL_SCENARIO_ACT_CFG_MAX_INDEX 19 +#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_S 0 +#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_M MAKEMASK(0xF, 0) +#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_S 8 +#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_M BIT(8) +#define GL_ACL_SCENARIO_CFG_H(_i) (0x0039386C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GL_ACL_SCENARIO_CFG_H_MAX_INDEX 15 +#define GL_ACL_SCENARIO_CFG_H_SELECT4_S 0 +#define GL_ACL_SCENARIO_CFG_H_SELECT4_M MAKEMASK(0x1F, 0) +#define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_S 8 +#define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_M MAKEMASK(0xFF, 8) +#define GL_ACL_SCENARIO_CFG_H_START_COMPARE_S 24 +#define GL_ACL_SCENARIO_CFG_H_START_COMPARE_M BIT(24) +#define GL_ACL_SCENARIO_CFG_H_START_SET_S 28 +#define GL_ACL_SCENARIO_CFG_H_START_SET_M BIT(28) +#define GL_ACL_SCENARIO_CFG_L(_i) (0x0039382C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GL_ACL_SCENARIO_CFG_L_MAX_INDEX 15 +#define GL_ACL_SCENARIO_CFG_L_SELECT0_S 0 +#define GL_ACL_SCENARIO_CFG_L_SELECT0_M MAKEMASK(0x7F, 0) +#define GL_ACL_SCENARIO_CFG_L_SELECT1_S 8 +#define GL_ACL_SCENARIO_CFG_L_SELECT1_M MAKEMASK(0x7F, 8) +#define GL_ACL_SCENARIO_CFG_L_SELECT2_S 16 +#define GL_ACL_SCENARIO_CFG_L_SELECT2_M MAKEMASK(0x7F, 16) +#define GL_ACL_SCENARIO_CFG_L_SELECT3_S 24 +#define GL_ACL_SCENARIO_CFG_L_SELECT3_M MAKEMASK(0x7F, 24) +#define GL_ACL_TCAM_KEY_H 0x00393818 /* Reset Source: CORER */ +#define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_S 0 +#define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_M MAKEMASK(0xFF, 0) +#define GL_ACL_TCAM_KEY_INV_H 0x00393820 /* Reset Source: CORER */ +#define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_S 0 +#define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_M MAKEMASK(0xFF, 0) +#define GL_ACL_TCAM_KEY_INV_L 0x0039381C /* Reset Source: CORER */ +#define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_S 0 +#define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACL_TCAM_KEY_L 0x00393814 /* Reset Source: CORER */ +#define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_S 0 +#define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_M MAKEMASK(0xFFFFFFFF, 0) +#define VSI_ACL_DEF_SEL(_VSI) (0x00391800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_ACL_DEF_SEL_MAX_INDEX 767 +#define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_S 0 +#define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 0) +#define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_S 4 +#define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_M MAKEMASK(0x3, 4) +#define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_S 8 +#define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 8) +#define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_S 12 +#define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_M MAKEMASK(0x3, 12) +#define GL_SWT_L2TAG0(_i) (0x000492A8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_SWT_L2TAG0_MAX_INDEX 7 +#define GL_SWT_L2TAG0_DATA_S 0 +#define GL_SWT_L2TAG0_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_SWT_L2TAG1(_i) (0x000492C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_SWT_L2TAG1_MAX_INDEX 7 +#define GL_SWT_L2TAG1_DATA_S 0 +#define GL_SWT_L2TAG1_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_SWT_L2TAGCTRL(_i) (0x001D2660 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_SWT_L2TAGCTRL_MAX_INDEX 7 +#define GL_SWT_L2TAGCTRL_LENGTH_S 0 +#define GL_SWT_L2TAGCTRL_LENGTH_M MAKEMASK(0x7F, 0) +#define GL_SWT_L2TAGCTRL_HAS_UP_S 7 +#define GL_SWT_L2TAGCTRL_HAS_UP_M BIT(7) +#define GL_SWT_L2TAGCTRL_ISVLAN_S 9 +#define GL_SWT_L2TAGCTRL_ISVLAN_M BIT(9) +#define GL_SWT_L2TAGCTRL_INNERUP_S 10 +#define GL_SWT_L2TAGCTRL_INNERUP_M BIT(10) +#define GL_SWT_L2TAGCTRL_OUTERUP_S 11 +#define GL_SWT_L2TAGCTRL_OUTERUP_M BIT(11) +#define GL_SWT_L2TAGCTRL_LONG_S 12 +#define GL_SWT_L2TAGCTRL_LONG_M BIT(12) +#define GL_SWT_L2TAGCTRL_ISMPLS_S 13 +#define GL_SWT_L2TAGCTRL_ISMPLS_M BIT(13) +#define GL_SWT_L2TAGCTRL_ISNSH_S 14 +#define GL_SWT_L2TAGCTRL_ISNSH_M BIT(14) +#define GL_SWT_L2TAGCTRL_ETHERTYPE_S 16 +#define GL_SWT_L2TAGCTRL_ETHERTYPE_M MAKEMASK(0xFFFF, 16) +#define GL_SWT_L2TAGRXEB(_i) (0x00052000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_SWT_L2TAGRXEB_MAX_INDEX 7 +#define GL_SWT_L2TAGRXEB_OFFSET_S 0 +#define GL_SWT_L2TAGRXEB_OFFSET_M MAKEMASK(0xFF, 0) +#define GL_SWT_L2TAGRXEB_LENGTH_S 8 +#define GL_SWT_L2TAGRXEB_LENGTH_M MAKEMASK(0x3, 8) +#define GL_SWT_L2TAGTXIB(_i) (0x000492E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_SWT_L2TAGTXIB_MAX_INDEX 7 +#define GL_SWT_L2TAGTXIB_OFFSET_S 0 +#define GL_SWT_L2TAGTXIB_OFFSET_M MAKEMASK(0xFF, 0) +#define GL_SWT_L2TAGTXIB_LENGTH_S 8 +#define GL_SWT_L2TAGTXIB_LENGTH_M MAKEMASK(0x3, 8) +#define GLCM_PE_CACHESIZE 0x005046B4 /* Reset Source: CORER */ +#define GLCM_PE_CACHESIZE_WORD_SIZE_S 0 +#define GLCM_PE_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFFF, 0) +#define GLCM_PE_CACHESIZE_SETS_S 12 +#define GLCM_PE_CACHESIZE_SETS_M MAKEMASK(0xF, 12) +#define GLCM_PE_CACHESIZE_WAYS_S 16 +#define GLCM_PE_CACHESIZE_WAYS_M MAKEMASK(0x1FF, 16) +#define GLCOMM_CQ_CTL(_CQ) (0x000F0000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLCOMM_CQ_CTL_MAX_INDEX 511 +#define GLCOMM_CQ_CTL_COMP_TYPE_S 0 +#define GLCOMM_CQ_CTL_COMP_TYPE_M MAKEMASK(0x7, 0) +#define GLCOMM_CQ_CTL_CMD_S 4 +#define GLCOMM_CQ_CTL_CMD_M MAKEMASK(0x7, 4) +#define GLCOMM_CQ_CTL_ID_S 16 +#define GLCOMM_CQ_CTL_ID_M MAKEMASK(0x3FFF, 16) +#define GLCOMM_MIN_MAX_PKT 0x000FC064 /* Reset Source: CORER */ +#define GLCOMM_MIN_MAX_PKT_MAHDL_S 0 +#define GLCOMM_MIN_MAX_PKT_MAHDL_M MAKEMASK(0x3FFF, 0) +#define GLCOMM_MIN_MAX_PKT_MIHDL_S 16 +#define GLCOMM_MIN_MAX_PKT_MIHDL_M MAKEMASK(0x3F, 16) +#define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_S 22 +#define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_M MAKEMASK(0x3FF, 22) +#define GLCOMM_PKT_SHAPER_PROF(_i) (0x002D2DA8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLCOMM_PKT_SHAPER_PROF_MAX_INDEX 7 +#define GLCOMM_PKT_SHAPER_PROF_PKTCNT_S 0 +#define GLCOMM_PKT_SHAPER_PROF_PKTCNT_M MAKEMASK(0x3F, 0) +#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8 /* Reset Source: CORER */ +#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_S 0 +#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x3FFF, 0) +#define GLCOMM_QTX_CNTX_CTL_CMD_S 16 +#define GLCOMM_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16) +#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_S 19 +#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19) +#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: CORER */ +#define GLCOMM_QTX_CNTX_DATA_MAX_INDEX 9 +#define GLCOMM_QTX_CNTX_DATA_DATA_S 0 +#define GLCOMM_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLCOMM_QTX_CNTX_STAT 0x002D2DCC /* Reset Source: CORER */ +#define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_S 0 +#define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0) +#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLCOMM_QUANTA_PROF_MAX_INDEX 15 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M MAKEMASK(0x3FFF, 0) +#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16 +#define GLCOMM_QUANTA_PROF_MAX_CMD_M MAKEMASK(0xFF, 16) +#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24 +#define GLCOMM_QUANTA_PROF_MAX_DESC_M MAKEMASK(0x3F, 24) +#define GLLAN_TCLAN_CACHE_CTL 0x000FC0B8 /* Reset Source: CORER */ +#define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_S 0 +#define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_M MAKEMASK(0x3F, 0) +#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_S 6 +#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6) +#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7 +#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M MAKEMASK(0x7F, 7) +#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 +#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14) +#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22 +#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M MAKEMASK(0x3FF, 22) +#define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX0_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_S 0 +#define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX1(_CQ) (0x000F1000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX1_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_S 0 +#define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_M MAKEMASK(0x1FFFFFF, 0) +#define GLTCLAN_CQ_CNTX10(_CQ) (0x000F5800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX10_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX11(_CQ) (0x000F6000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX11_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX12(_CQ) (0x000F6800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX12_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX13(_CQ) (0x000F7000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX13_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX14(_CQ) (0x000F7800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX14_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX15(_CQ) (0x000F8000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX15_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX16(_CQ) (0x000F8800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX16_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX17(_CQ) (0x000F9000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX17_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX18(_CQ) (0x000F9800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX18_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX19(_CQ) (0x000FA000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX19_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX2(_CQ) (0x000F1800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX2_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX2_RING_LEN_S 0 +#define GLTCLAN_CQ_CNTX2_RING_LEN_M MAKEMASK(0x3FFFF, 0) +#define GLTCLAN_CQ_CNTX20(_CQ) (0x000FA800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX20_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX21(_CQ) (0x000FB000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX21_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX3(_CQ) (0x000F2000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX3_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX3_GENERATION_S 0 +#define GLTCLAN_CQ_CNTX3_GENERATION_M BIT(0) +#define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_S 1 +#define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_M MAKEMASK(0x3FFFFF, 1) +#define GLTCLAN_CQ_CNTX4(_CQ) (0x000F2800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX4_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX4_PF_NUM_S 0 +#define GLTCLAN_CQ_CNTX4_PF_NUM_M MAKEMASK(0x7, 0) +#define GLTCLAN_CQ_CNTX4_VMVF_NUM_S 3 +#define GLTCLAN_CQ_CNTX4_VMVF_NUM_M MAKEMASK(0x3FF, 3) +#define GLTCLAN_CQ_CNTX4_VMVF_TYPE_S 13 +#define GLTCLAN_CQ_CNTX4_VMVF_TYPE_M MAKEMASK(0x3, 13) +#define GLTCLAN_CQ_CNTX5(_CQ) (0x000F3000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX5_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX5_TPH_EN_S 0 +#define GLTCLAN_CQ_CNTX5_TPH_EN_M BIT(0) +#define GLTCLAN_CQ_CNTX5_CPU_ID_S 1 +#define GLTCLAN_CQ_CNTX5_CPU_ID_M MAKEMASK(0xFF, 1) +#define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_S 9 +#define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_M BIT(9) +#define GLTCLAN_CQ_CNTX6(_CQ) (0x000F3800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX6_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX7(_CQ) (0x000F4000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX7_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX8(_CQ) (0x000F4800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX8_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCLAN_CQ_CNTX9(_CQ) (0x000F5000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLTCLAN_CQ_CNTX9_MAX_INDEX 511 +#define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_S 0 +#define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) +#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ +#define QTX_COMM_DBELL_MAX_INDEX 16383 +#define QTX_COMM_DBELL_QTX_COMM_DBELL_S 0 +#define QTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) +#define QTX_COMM_DBLQ_CNTX(_i, _DBLQ) (0x002D0000 + ((_i) * 1024 + (_DBLQ) * 4)) /* _i=0...4, _DBLQ=0...255 */ /* Reset Source: CORER */ +#define QTX_COMM_DBLQ_CNTX_MAX_INDEX 4 +#define QTX_COMM_DBLQ_CNTX_DATA_S 0 +#define QTX_COMM_DBLQ_CNTX_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define QTX_COMM_DBLQ_DBELL(_DBLQ) (0x002D1400 + ((_DBLQ) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define QTX_COMM_DBLQ_DBELL_MAX_INDEX 255 +#define QTX_COMM_DBLQ_DBELL_TAIL_S 0 +#define QTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0) +#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ +#define QTX_COMM_HEAD_MAX_INDEX 16383 +#define QTX_COMM_HEAD_HEAD_S 0 +#define QTX_COMM_HEAD_HEAD_M MAKEMASK(0x1FFF, 0) +#define QTX_COMM_HEAD_RS_PENDING_S 16 +#define QTX_COMM_HEAD_RS_PENDING_M BIT(16) +#define GL_FW_TOOL_ARQBAH 0x000801C0 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ARQBAH_ARQBAH_S 0 +#define GL_FW_TOOL_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_FW_TOOL_ARQBAL 0x000800C0 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_S 0 +#define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define GL_FW_TOOL_ARQBAL_ARQBAL_S 6 +#define GL_FW_TOOL_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define GL_FW_TOOL_ARQH 0x000803C0 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ARQH_ARQH_S 0 +#define GL_FW_TOOL_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define GL_FW_TOOL_ARQLEN 0x000802C0 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ARQLEN_ARQLEN_S 0 +#define GL_FW_TOOL_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define GL_FW_TOOL_ARQLEN_ARQVFE_S 28 +#define GL_FW_TOOL_ARQLEN_ARQVFE_M BIT(28) +#define GL_FW_TOOL_ARQLEN_ARQOVFL_S 29 +#define GL_FW_TOOL_ARQLEN_ARQOVFL_M BIT(29) +#define GL_FW_TOOL_ARQLEN_ARQCRIT_S 30 +#define GL_FW_TOOL_ARQLEN_ARQCRIT_M BIT(30) +#define GL_FW_TOOL_ARQLEN_ARQENABLE_S 31 +#define GL_FW_TOOL_ARQLEN_ARQENABLE_M BIT(31) +#define GL_FW_TOOL_ARQT 0x000804C0 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ARQT_ARQT_S 0 +#define GL_FW_TOOL_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define GL_FW_TOOL_ATQBAH 0x00080140 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ATQBAH_ATQBAH_S 0 +#define GL_FW_TOOL_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_FW_TOOL_ATQBAL 0x00080040 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_S 0 +#define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define GL_FW_TOOL_ATQBAL_ATQBAL_S 6 +#define GL_FW_TOOL_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define GL_FW_TOOL_ATQH 0x00080340 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ATQH_ATQH_S 0 +#define GL_FW_TOOL_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define GL_FW_TOOL_ATQLEN 0x00080240 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ATQLEN_ATQLEN_S 0 +#define GL_FW_TOOL_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define GL_FW_TOOL_ATQLEN_ATQVFE_S 28 +#define GL_FW_TOOL_ATQLEN_ATQVFE_M BIT(28) +#define GL_FW_TOOL_ATQLEN_ATQOVFL_S 29 +#define GL_FW_TOOL_ATQLEN_ATQOVFL_M BIT(29) +#define GL_FW_TOOL_ATQLEN_ATQCRIT_S 30 +#define GL_FW_TOOL_ATQLEN_ATQCRIT_M BIT(30) +#define GL_FW_TOOL_ATQLEN_ATQENABLE_S 31 +#define GL_FW_TOOL_ATQLEN_ATQENABLE_M BIT(31) +#define GL_FW_TOOL_ATQT 0x00080440 /* Reset Source: EMPR */ +#define GL_FW_TOOL_ATQT_ATQT_S 0 +#define GL_FW_TOOL_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define GL_MBX_PASID 0x00231EC0 /* Reset Source: CORER */ +#define GL_MBX_PASID_PASID_MODE_S 0 +#define GL_MBX_PASID_PASID_MODE_M BIT(0) +#define GL_MBX_PASID_PASID_MODE_VALID_S 1 +#define GL_MBX_PASID_PASID_MODE_VALID_M BIT(1) +#define PF_FW_ARQBAH 0x00080180 /* Reset Source: EMPR */ +#define PF_FW_ARQBAH_ARQBAH_S 0 +#define PF_FW_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_FW_ARQBAL 0x00080080 /* Reset Source: EMPR */ +#define PF_FW_ARQBAL_ARQBAL_LSB_S 0 +#define PF_FW_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF_FW_ARQBAL_ARQBAL_S 6 +#define PF_FW_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_FW_ARQH 0x00080380 /* Reset Source: EMPR */ +#define PF_FW_ARQH_ARQH_S 0 +#define PF_FW_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF_FW_ARQLEN 0x00080280 /* Reset Source: EMPR */ +#define PF_FW_ARQLEN_ARQLEN_S 0 +#define PF_FW_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF_FW_ARQLEN_ARQVFE_S 28 +#define PF_FW_ARQLEN_ARQVFE_M BIT(28) +#define PF_FW_ARQLEN_ARQOVFL_S 29 +#define PF_FW_ARQLEN_ARQOVFL_M BIT(29) +#define PF_FW_ARQLEN_ARQCRIT_S 30 +#define PF_FW_ARQLEN_ARQCRIT_M BIT(30) +#define PF_FW_ARQLEN_ARQENABLE_S 31 +#define PF_FW_ARQLEN_ARQENABLE_M BIT(31) +#define PF_FW_ARQT 0x00080480 /* Reset Source: EMPR */ +#define PF_FW_ARQT_ARQT_S 0 +#define PF_FW_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF_FW_ATQBAH 0x00080100 /* Reset Source: EMPR */ +#define PF_FW_ATQBAH_ATQBAH_S 0 +#define PF_FW_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_FW_ATQBAL 0x00080000 /* Reset Source: EMPR */ +#define PF_FW_ATQBAL_ATQBAL_LSB_S 0 +#define PF_FW_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF_FW_ATQBAL_ATQBAL_S 6 +#define PF_FW_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_FW_ATQH 0x00080300 /* Reset Source: EMPR */ +#define PF_FW_ATQH_ATQH_S 0 +#define PF_FW_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF_FW_ATQLEN 0x00080200 /* Reset Source: EMPR */ +#define PF_FW_ATQLEN_ATQLEN_S 0 +#define PF_FW_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF_FW_ATQLEN_ATQVFE_S 28 +#define PF_FW_ATQLEN_ATQVFE_M BIT(28) +#define PF_FW_ATQLEN_ATQOVFL_S 29 +#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) +#define PF_FW_ATQLEN_ATQCRIT_S 30 +#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) +#define PF_FW_ATQLEN_ATQENABLE_S 31 +#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) +#define PF_FW_ATQT 0x00080400 /* Reset Source: EMPR */ +#define PF_FW_ATQT_ATQT_S 0 +#define PF_FW_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ARQBAH 0x0022E400 /* Reset Source: CORER */ +#define PF_MBX_ARQBAH_ARQBAH_S 0 +#define PF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_MBX_ARQBAL 0x0022E380 /* Reset Source: CORER */ +#define PF_MBX_ARQBAL_ARQBAL_LSB_S 0 +#define PF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF_MBX_ARQBAL_ARQBAL_S 6 +#define PF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_MBX_ARQH 0x0022E500 /* Reset Source: CORER */ +#define PF_MBX_ARQH_ARQH_S 0 +#define PF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ARQLEN 0x0022E480 /* Reset Source: PFR */ +#define PF_MBX_ARQLEN_ARQLEN_S 0 +#define PF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ARQLEN_ARQVFE_S 28 +#define PF_MBX_ARQLEN_ARQVFE_M BIT(28) +#define PF_MBX_ARQLEN_ARQOVFL_S 29 +#define PF_MBX_ARQLEN_ARQOVFL_M BIT(29) +#define PF_MBX_ARQLEN_ARQCRIT_S 30 +#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30) +#define PF_MBX_ARQLEN_ARQENABLE_S 31 +#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define PF_MBX_ARQT 0x0022E580 /* Reset Source: CORER */ +#define PF_MBX_ARQT_ARQT_S 0 +#define PF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ATQBAH 0x0022E180 /* Reset Source: CORER */ +#define PF_MBX_ATQBAH_ATQBAH_S 0 +#define PF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_MBX_ATQBAL 0x0022E100 /* Reset Source: CORER */ +#define PF_MBX_ATQBAL_ATQBAL_S 6 +#define PF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_MBX_ATQH 0x0022E280 /* Reset Source: CORER */ +#define PF_MBX_ATQH_ATQH_S 0 +#define PF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ATQLEN 0x0022E200 /* Reset Source: PFR */ +#define PF_MBX_ATQLEN_ATQLEN_S 0 +#define PF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF_MBX_ATQLEN_ATQVFE_S 28 +#define PF_MBX_ATQLEN_ATQVFE_M BIT(28) +#define PF_MBX_ATQLEN_ATQOVFL_S 29 +#define PF_MBX_ATQLEN_ATQOVFL_M BIT(29) +#define PF_MBX_ATQLEN_ATQCRIT_S 30 +#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30) +#define PF_MBX_ATQLEN_ATQENABLE_S 31 +#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define PF_MBX_ATQT 0x0022E300 /* Reset Source: CORER */ +#define PF_MBX_ATQT_ATQT_S 0 +#define PF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF_SB_ARQBAH 0x0022FF00 /* Reset Source: CORER */ +#define PF_SB_ARQBAH_ARQBAH_S 0 +#define PF_SB_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_SB_ARQBAL 0x0022FE80 /* Reset Source: CORER */ +#define PF_SB_ARQBAL_ARQBAL_LSB_S 0 +#define PF_SB_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF_SB_ARQBAL_ARQBAL_S 6 +#define PF_SB_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_SB_ARQH 0x00230000 /* Reset Source: CORER */ +#define PF_SB_ARQH_ARQH_S 0 +#define PF_SB_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF_SB_ARQLEN 0x0022FF80 /* Reset Source: PFR */ +#define PF_SB_ARQLEN_ARQLEN_S 0 +#define PF_SB_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF_SB_ARQLEN_ARQVFE_S 28 +#define PF_SB_ARQLEN_ARQVFE_M BIT(28) +#define PF_SB_ARQLEN_ARQOVFL_S 29 +#define PF_SB_ARQLEN_ARQOVFL_M BIT(29) +#define PF_SB_ARQLEN_ARQCRIT_S 30 +#define PF_SB_ARQLEN_ARQCRIT_M BIT(30) +#define PF_SB_ARQLEN_ARQENABLE_S 31 +#define PF_SB_ARQLEN_ARQENABLE_M BIT(31) +#define PF_SB_ARQT 0x00230080 /* Reset Source: CORER */ +#define PF_SB_ARQT_ARQT_S 0 +#define PF_SB_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF_SB_ATQBAH 0x0022FC80 /* Reset Source: CORER */ +#define PF_SB_ATQBAH_ATQBAH_S 0 +#define PF_SB_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF_SB_ATQBAL 0x0022FC00 /* Reset Source: CORER */ +#define PF_SB_ATQBAL_ATQBAL_S 6 +#define PF_SB_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF_SB_ATQH 0x0022FD80 /* Reset Source: CORER */ +#define PF_SB_ATQH_ATQH_S 0 +#define PF_SB_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF_SB_ATQLEN 0x0022FD00 /* Reset Source: PFR */ +#define PF_SB_ATQLEN_ATQLEN_S 0 +#define PF_SB_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF_SB_ATQLEN_ATQVFE_S 28 +#define PF_SB_ATQLEN_ATQVFE_M BIT(28) +#define PF_SB_ATQLEN_ATQOVFL_S 29 +#define PF_SB_ATQLEN_ATQOVFL_M BIT(29) +#define PF_SB_ATQLEN_ATQCRIT_S 30 +#define PF_SB_ATQLEN_ATQCRIT_M BIT(30) +#define PF_SB_ATQLEN_ATQENABLE_S 31 +#define PF_SB_ATQLEN_ATQENABLE_M BIT(31) +#define PF_SB_ATQT 0x0022FE00 /* Reset Source: CORER */ +#define PF_SB_ATQT_ATQT_S 0 +#define PF_SB_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF_SB_REM_DEV_CTL 0x002300F0 /* Reset Source: CORER */ +#define PF_SB_REM_DEV_CTL_DEST_EN_S 0 +#define PF_SB_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) +#define PF0_FW_HLP_ARQBAH 0x000801C8 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQBAH_ARQBAH_S 0 +#define PF0_FW_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_HLP_ARQBAL 0x000800C8 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_HLP_ARQBAL_ARQBAL_S 6 +#define PF0_FW_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_HLP_ARQH 0x000803C8 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQH_ARQH_S 0 +#define PF0_FW_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ARQLEN 0x000802C8 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQLEN_ARQLEN_S 0 +#define PF0_FW_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ARQLEN_ARQVFE_S 28 +#define PF0_FW_HLP_ARQLEN_ARQVFE_M BIT(28) +#define PF0_FW_HLP_ARQLEN_ARQOVFL_S 29 +#define PF0_FW_HLP_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_FW_HLP_ARQLEN_ARQCRIT_S 30 +#define PF0_FW_HLP_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_FW_HLP_ARQLEN_ARQENABLE_S 31 +#define PF0_FW_HLP_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_FW_HLP_ARQT 0x000804C8 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ARQT_ARQT_S 0 +#define PF0_FW_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQBAH 0x00080148 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQBAH_ATQBAH_S 0 +#define PF0_FW_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_HLP_ATQBAL 0x00080048 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_S 0 +#define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_HLP_ATQBAL_ATQBAL_S 6 +#define PF0_FW_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_HLP_ATQH 0x00080348 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQH_ATQH_S 0 +#define PF0_FW_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQLEN 0x00080248 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQLEN_ATQLEN_S 0 +#define PF0_FW_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_HLP_ATQLEN_ATQVFE_S 28 +#define PF0_FW_HLP_ATQLEN_ATQVFE_M BIT(28) +#define PF0_FW_HLP_ATQLEN_ATQOVFL_S 29 +#define PF0_FW_HLP_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_FW_HLP_ATQLEN_ATQCRIT_S 30 +#define PF0_FW_HLP_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_FW_HLP_ATQLEN_ATQENABLE_S 31 +#define PF0_FW_HLP_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_FW_HLP_ATQT 0x00080448 /* Reset Source: EMPR */ +#define PF0_FW_HLP_ATQT_ATQT_S 0 +#define PF0_FW_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQBAH 0x000801C4 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQBAH_ARQBAH_S 0 +#define PF0_FW_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_PSM_ARQBAL 0x000800C4 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_PSM_ARQBAL_ARQBAL_S 6 +#define PF0_FW_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_PSM_ARQH 0x000803C4 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQH_ARQH_S 0 +#define PF0_FW_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQLEN 0x000802C4 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQLEN_ARQLEN_S 0 +#define PF0_FW_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ARQLEN_ARQVFE_S 28 +#define PF0_FW_PSM_ARQLEN_ARQVFE_M BIT(28) +#define PF0_FW_PSM_ARQLEN_ARQOVFL_S 29 +#define PF0_FW_PSM_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_FW_PSM_ARQLEN_ARQCRIT_S 30 +#define PF0_FW_PSM_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_FW_PSM_ARQLEN_ARQENABLE_S 31 +#define PF0_FW_PSM_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_FW_PSM_ARQT 0x000804C4 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ARQT_ARQT_S 0 +#define PF0_FW_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQBAH 0x00080144 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQBAH_ATQBAH_S 0 +#define PF0_FW_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_FW_PSM_ATQBAL 0x00080044 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_S 0 +#define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_FW_PSM_ATQBAL_ATQBAL_S 6 +#define PF0_FW_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_FW_PSM_ATQH 0x00080344 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQH_ATQH_S 0 +#define PF0_FW_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQLEN 0x00080244 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQLEN_ATQLEN_S 0 +#define PF0_FW_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_FW_PSM_ATQLEN_ATQVFE_S 28 +#define PF0_FW_PSM_ATQLEN_ATQVFE_M BIT(28) +#define PF0_FW_PSM_ATQLEN_ATQOVFL_S 29 +#define PF0_FW_PSM_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_FW_PSM_ATQLEN_ATQCRIT_S 30 +#define PF0_FW_PSM_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_FW_PSM_ATQLEN_ATQENABLE_S 31 +#define PF0_FW_PSM_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_FW_PSM_ATQT 0x00080444 /* Reset Source: EMPR */ +#define PF0_FW_PSM_ATQT_ATQT_S 0 +#define PF0_FW_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQBAH 0x0022E5D8 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQBAH_ARQBAH_S 0 +#define PF0_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_CPM_ARQBAL 0x0022E5D4 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_CPM_ARQBAL_ARQBAL_S 6 +#define PF0_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_CPM_ARQH 0x0022E5E0 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQH_ARQH_S 0 +#define PF0_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQLEN 0x0022E5DC /* Reset Source: PFR */ +#define PF0_MBX_CPM_ARQLEN_ARQLEN_S 0 +#define PF0_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ARQLEN_ARQVFE_S 28 +#define PF0_MBX_CPM_ARQLEN_ARQVFE_M BIT(28) +#define PF0_MBX_CPM_ARQLEN_ARQOVFL_S 29 +#define PF0_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_MBX_CPM_ARQLEN_ARQCRIT_S 30 +#define PF0_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_MBX_CPM_ARQLEN_ARQENABLE_S 31 +#define PF0_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_MBX_CPM_ARQT 0x0022E5E4 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ARQT_ARQT_S 0 +#define PF0_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQBAH 0x0022E5C4 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQBAH_ATQBAH_S 0 +#define PF0_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_CPM_ATQBAL 0x0022E5C0 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQBAL_ATQBAL_S 6 +#define PF0_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_CPM_ATQH 0x0022E5CC /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQH_ATQH_S 0 +#define PF0_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQLEN 0x0022E5C8 /* Reset Source: PFR */ +#define PF0_MBX_CPM_ATQLEN_ATQLEN_S 0 +#define PF0_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_CPM_ATQLEN_ATQVFE_S 28 +#define PF0_MBX_CPM_ATQLEN_ATQVFE_M BIT(28) +#define PF0_MBX_CPM_ATQLEN_ATQOVFL_S 29 +#define PF0_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_MBX_CPM_ATQLEN_ATQCRIT_S 30 +#define PF0_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_MBX_CPM_ATQLEN_ATQENABLE_S 31 +#define PF0_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_MBX_CPM_ATQT 0x0022E5D0 /* Reset Source: CORER */ +#define PF0_MBX_CPM_ATQT_ATQT_S 0 +#define PF0_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQBAH 0x0022E600 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQBAH_ARQBAH_S 0 +#define PF0_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_HLP_ARQBAL 0x0022E5FC /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_HLP_ARQBAL_ARQBAL_S 6 +#define PF0_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_HLP_ARQH 0x0022E608 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQH_ARQH_S 0 +#define PF0_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQLEN 0x0022E604 /* Reset Source: PFR */ +#define PF0_MBX_HLP_ARQLEN_ARQLEN_S 0 +#define PF0_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ARQLEN_ARQVFE_S 28 +#define PF0_MBX_HLP_ARQLEN_ARQVFE_M BIT(28) +#define PF0_MBX_HLP_ARQLEN_ARQOVFL_S 29 +#define PF0_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_MBX_HLP_ARQLEN_ARQCRIT_S 30 +#define PF0_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_MBX_HLP_ARQLEN_ARQENABLE_S 31 +#define PF0_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_MBX_HLP_ARQT 0x0022E60C /* Reset Source: CORER */ +#define PF0_MBX_HLP_ARQT_ARQT_S 0 +#define PF0_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQBAH 0x0022E5EC /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQBAH_ATQBAH_S 0 +#define PF0_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_HLP_ATQBAL 0x0022E5E8 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQBAL_ATQBAL_S 6 +#define PF0_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_HLP_ATQH 0x0022E5F4 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQH_ATQH_S 0 +#define PF0_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQLEN 0x0022E5F0 /* Reset Source: PFR */ +#define PF0_MBX_HLP_ATQLEN_ATQLEN_S 0 +#define PF0_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_HLP_ATQLEN_ATQVFE_S 28 +#define PF0_MBX_HLP_ATQLEN_ATQVFE_M BIT(28) +#define PF0_MBX_HLP_ATQLEN_ATQOVFL_S 29 +#define PF0_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_MBX_HLP_ATQLEN_ATQCRIT_S 30 +#define PF0_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_MBX_HLP_ATQLEN_ATQENABLE_S 31 +#define PF0_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_MBX_HLP_ATQT 0x0022E5F8 /* Reset Source: CORER */ +#define PF0_MBX_HLP_ATQT_ATQT_S 0 +#define PF0_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQBAH 0x0022E628 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQBAH_ARQBAH_S 0 +#define PF0_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_PSM_ARQBAL 0x0022E624 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_MBX_PSM_ARQBAL_ARQBAL_S 6 +#define PF0_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_PSM_ARQH 0x0022E630 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQH_ARQH_S 0 +#define PF0_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQLEN 0x0022E62C /* Reset Source: PFR */ +#define PF0_MBX_PSM_ARQLEN_ARQLEN_S 0 +#define PF0_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ARQLEN_ARQVFE_S 28 +#define PF0_MBX_PSM_ARQLEN_ARQVFE_M BIT(28) +#define PF0_MBX_PSM_ARQLEN_ARQOVFL_S 29 +#define PF0_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_MBX_PSM_ARQLEN_ARQCRIT_S 30 +#define PF0_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_MBX_PSM_ARQLEN_ARQENABLE_S 31 +#define PF0_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_MBX_PSM_ARQT 0x0022E634 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ARQT_ARQT_S 0 +#define PF0_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQBAH 0x0022E614 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQBAH_ATQBAH_S 0 +#define PF0_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_MBX_PSM_ATQBAL 0x0022E610 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQBAL_ATQBAL_S 6 +#define PF0_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_MBX_PSM_ATQH 0x0022E61C /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQH_ATQH_S 0 +#define PF0_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQLEN 0x0022E618 /* Reset Source: PFR */ +#define PF0_MBX_PSM_ATQLEN_ATQLEN_S 0 +#define PF0_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_MBX_PSM_ATQLEN_ATQVFE_S 28 +#define PF0_MBX_PSM_ATQLEN_ATQVFE_M BIT(28) +#define PF0_MBX_PSM_ATQLEN_ATQOVFL_S 29 +#define PF0_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_MBX_PSM_ATQLEN_ATQCRIT_S 30 +#define PF0_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_MBX_PSM_ATQLEN_ATQENABLE_S 31 +#define PF0_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_MBX_PSM_ATQT 0x0022E620 /* Reset Source: CORER */ +#define PF0_MBX_PSM_ATQT_ATQT_S 0 +#define PF0_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQBAH 0x0022E650 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQBAH_ARQBAH_S 0 +#define PF0_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_CPM_ARQBAL 0x0022E64C /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_SB_CPM_ARQBAL_ARQBAL_S 6 +#define PF0_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_CPM_ARQH 0x0022E658 /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQH_ARQH_S 0 +#define PF0_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQLEN 0x0022E654 /* Reset Source: PFR */ +#define PF0_SB_CPM_ARQLEN_ARQLEN_S 0 +#define PF0_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ARQLEN_ARQVFE_S 28 +#define PF0_SB_CPM_ARQLEN_ARQVFE_M BIT(28) +#define PF0_SB_CPM_ARQLEN_ARQOVFL_S 29 +#define PF0_SB_CPM_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_SB_CPM_ARQLEN_ARQCRIT_S 30 +#define PF0_SB_CPM_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_SB_CPM_ARQLEN_ARQENABLE_S 31 +#define PF0_SB_CPM_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_SB_CPM_ARQT 0x0022E65C /* Reset Source: CORER */ +#define PF0_SB_CPM_ARQT_ARQT_S 0 +#define PF0_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQBAH 0x0022E63C /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQBAH_ATQBAH_S 0 +#define PF0_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_CPM_ATQBAL 0x0022E638 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQBAL_ATQBAL_S 6 +#define PF0_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_CPM_ATQH 0x0022E644 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQH_ATQH_S 0 +#define PF0_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQLEN 0x0022E640 /* Reset Source: PFR */ +#define PF0_SB_CPM_ATQLEN_ATQLEN_S 0 +#define PF0_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_ATQLEN_ATQVFE_S 28 +#define PF0_SB_CPM_ATQLEN_ATQVFE_M BIT(28) +#define PF0_SB_CPM_ATQLEN_ATQOVFL_S 29 +#define PF0_SB_CPM_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_SB_CPM_ATQLEN_ATQCRIT_S 30 +#define PF0_SB_CPM_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_SB_CPM_ATQLEN_ATQENABLE_S 31 +#define PF0_SB_CPM_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_SB_CPM_ATQT 0x0022E648 /* Reset Source: CORER */ +#define PF0_SB_CPM_ATQT_ATQT_S 0 +#define PF0_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_CPM_REM_DEV_CTL 0x002300F4 /* Reset Source: CORER */ +#define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_S 0 +#define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) +#define PF0_SB_HLP_ARQBAH 0x002300D8 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQBAH_ARQBAH_S 0 +#define PF0_SB_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_HLP_ARQBAL 0x002300D4 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_S 0 +#define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define PF0_SB_HLP_ARQBAL_ARQBAL_S 6 +#define PF0_SB_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_HLP_ARQH 0x002300E0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQH_ARQH_S 0 +#define PF0_SB_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ARQLEN 0x002300DC /* Reset Source: PFR */ +#define PF0_SB_HLP_ARQLEN_ARQLEN_S 0 +#define PF0_SB_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ARQLEN_ARQVFE_S 28 +#define PF0_SB_HLP_ARQLEN_ARQVFE_M BIT(28) +#define PF0_SB_HLP_ARQLEN_ARQOVFL_S 29 +#define PF0_SB_HLP_ARQLEN_ARQOVFL_M BIT(29) +#define PF0_SB_HLP_ARQLEN_ARQCRIT_S 30 +#define PF0_SB_HLP_ARQLEN_ARQCRIT_M BIT(30) +#define PF0_SB_HLP_ARQLEN_ARQENABLE_S 31 +#define PF0_SB_HLP_ARQLEN_ARQENABLE_M BIT(31) +#define PF0_SB_HLP_ARQT 0x002300E4 /* Reset Source: CORER */ +#define PF0_SB_HLP_ARQT_ARQT_S 0 +#define PF0_SB_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQBAH 0x002300C4 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQBAH_ATQBAH_S 0 +#define PF0_SB_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define PF0_SB_HLP_ATQBAL 0x002300C0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQBAL_ATQBAL_S 6 +#define PF0_SB_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define PF0_SB_HLP_ATQH 0x002300CC /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQH_ATQH_S 0 +#define PF0_SB_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQLEN 0x002300C8 /* Reset Source: PFR */ +#define PF0_SB_HLP_ATQLEN_ATQLEN_S 0 +#define PF0_SB_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_ATQLEN_ATQVFE_S 28 +#define PF0_SB_HLP_ATQLEN_ATQVFE_M BIT(28) +#define PF0_SB_HLP_ATQLEN_ATQOVFL_S 29 +#define PF0_SB_HLP_ATQLEN_ATQOVFL_M BIT(29) +#define PF0_SB_HLP_ATQLEN_ATQCRIT_S 30 +#define PF0_SB_HLP_ATQLEN_ATQCRIT_M BIT(30) +#define PF0_SB_HLP_ATQLEN_ATQENABLE_S 31 +#define PF0_SB_HLP_ATQLEN_ATQENABLE_M BIT(31) +#define PF0_SB_HLP_ATQT 0x002300D0 /* Reset Source: CORER */ +#define PF0_SB_HLP_ATQT_ATQT_S 0 +#define PF0_SB_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define PF0_SB_HLP_REM_DEV_CTL 0x002300E8 /* Reset Source: CORER */ +#define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_S 0 +#define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) +#define SB_REM_DEV_DEST(_i) (0x002300F8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define SB_REM_DEV_DEST_MAX_INDEX 7 +#define SB_REM_DEV_DEST_DEST_S 0 +#define SB_REM_DEV_DEST_DEST_M MAKEMASK(0xF, 0) +#define SB_REM_DEV_DEST_DEST_VALID_S 31 +#define SB_REM_DEV_DEST_DEST_VALID_M BIT(31) +#define VF_MBX_ARQBAH(_VF) (0x0022B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ARQBAH_MAX_INDEX 255 +#define VF_MBX_ARQBAH_ARQBAH_S 0 +#define VF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_ARQBAL(_VF) (0x0022B400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ARQBAL_MAX_INDEX 255 +#define VF_MBX_ARQBAL_ARQBAL_LSB_S 0 +#define VF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_ARQBAL_ARQBAL_S 6 +#define VF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_ARQH(_VF) (0x0022C000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ARQH_MAX_INDEX 255 +#define VF_MBX_ARQH_ARQH_S 0 +#define VF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VF_MBX_ARQLEN_MAX_INDEX 255 +#define VF_MBX_ARQLEN_ARQLEN_S 0 +#define VF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ARQLEN_ARQVFE_S 28 +#define VF_MBX_ARQLEN_ARQVFE_M BIT(28) +#define VF_MBX_ARQLEN_ARQOVFL_S 29 +#define VF_MBX_ARQLEN_ARQOVFL_M BIT(29) +#define VF_MBX_ARQLEN_ARQCRIT_S 30 +#define VF_MBX_ARQLEN_ARQCRIT_M BIT(30) +#define VF_MBX_ARQLEN_ARQENABLE_S 31 +#define VF_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define VF_MBX_ARQT(_VF) (0x0022C400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ARQT_MAX_INDEX 255 +#define VF_MBX_ARQT_ARQT_S 0 +#define VF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQBAH(_VF) (0x0022A400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ATQBAH_MAX_INDEX 255 +#define VF_MBX_ATQBAH_ATQBAH_S 0 +#define VF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_ATQBAL(_VF) (0x0022A000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ATQBAL_MAX_INDEX 255 +#define VF_MBX_ATQBAL_ATQBAL_S 6 +#define VF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_ATQH(_VF) (0x0022AC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ATQH_MAX_INDEX 255 +#define VF_MBX_ATQH_ATQH_S 0 +#define VF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VF_MBX_ATQLEN_MAX_INDEX 255 +#define VF_MBX_ATQLEN_ATQLEN_S 0 +#define VF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQLEN_ATQVFE_S 28 +#define VF_MBX_ATQLEN_ATQVFE_M BIT(28) +#define VF_MBX_ATQLEN_ATQOVFL_S 29 +#define VF_MBX_ATQLEN_ATQOVFL_M BIT(29) +#define VF_MBX_ATQLEN_ATQCRIT_S 30 +#define VF_MBX_ATQLEN_ATQCRIT_M BIT(30) +#define VF_MBX_ATQLEN_ATQENABLE_S 31 +#define VF_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define VF_MBX_ATQT(_VF) (0x0022B000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VF_MBX_ATQT_MAX_INDEX 255 +#define VF_MBX_ATQT_ATQT_S 0 +#define VF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ARQBAH(_VF128) (0x0022D400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQBAH_MAX_INDEX 127 +#define VF_MBX_CPM_ARQBAH_ARQBAH_S 0 +#define VF_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_CPM_ARQBAL(_VF128) (0x0022D200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQBAL_MAX_INDEX 127 +#define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0 +#define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_CPM_ARQBAL_ARQBAL_S 6 +#define VF_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_CPM_ARQH(_VF128) (0x0022D800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQH_MAX_INDEX 127 +#define VF_MBX_CPM_ARQH_ARQH_S 0 +#define VF_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ARQLEN(_VF128) (0x0022D600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ +#define VF_MBX_CPM_ARQLEN_MAX_INDEX 127 +#define VF_MBX_CPM_ARQLEN_ARQLEN_S 0 +#define VF_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ARQLEN_ARQVFE_S 28 +#define VF_MBX_CPM_ARQLEN_ARQVFE_M BIT(28) +#define VF_MBX_CPM_ARQLEN_ARQOVFL_S 29 +#define VF_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29) +#define VF_MBX_CPM_ARQLEN_ARQCRIT_S 30 +#define VF_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30) +#define VF_MBX_CPM_ARQLEN_ARQENABLE_S 31 +#define VF_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31) +#define VF_MBX_CPM_ARQT(_VF128) (0x0022DA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQT_MAX_INDEX 127 +#define VF_MBX_CPM_ARQT_ARQT_S 0 +#define VF_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQBAH(_VF128) (0x0022CA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQBAH_MAX_INDEX 127 +#define VF_MBX_CPM_ATQBAH_ATQBAH_S 0 +#define VF_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_CPM_ATQBAL(_VF128) (0x0022C800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQBAL_MAX_INDEX 127 +#define VF_MBX_CPM_ATQBAL_ATQBAL_S 6 +#define VF_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_CPM_ATQH(_VF128) (0x0022CE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQH_MAX_INDEX 127 +#define VF_MBX_CPM_ATQH_ATQH_S 0 +#define VF_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQLEN(_VF128) (0x0022CC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ +#define VF_MBX_CPM_ATQLEN_MAX_INDEX 127 +#define VF_MBX_CPM_ATQLEN_ATQLEN_S 0 +#define VF_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQLEN_ATQVFE_S 28 +#define VF_MBX_CPM_ATQLEN_ATQVFE_M BIT(28) +#define VF_MBX_CPM_ATQLEN_ATQOVFL_S 29 +#define VF_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29) +#define VF_MBX_CPM_ATQLEN_ATQCRIT_S 30 +#define VF_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30) +#define VF_MBX_CPM_ATQLEN_ATQENABLE_S 31 +#define VF_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31) +#define VF_MBX_CPM_ATQT(_VF128) (0x0022D000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQT_MAX_INDEX 127 +#define VF_MBX_CPM_ATQT_ATQT_S 0 +#define VF_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQBAH(_VF16) (0x0022DD80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQBAH_MAX_INDEX 15 +#define VF_MBX_HLP_ARQBAH_ARQBAH_S 0 +#define VF_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_HLP_ARQBAL(_VF16) (0x0022DD40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQBAL_MAX_INDEX 15 +#define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0 +#define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_HLP_ARQBAL_ARQBAL_S 6 +#define VF_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_HLP_ARQH(_VF16) (0x0022DE00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQH_MAX_INDEX 15 +#define VF_MBX_HLP_ARQH_ARQH_S 0 +#define VF_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQLEN(_VF16) (0x0022DDC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ +#define VF_MBX_HLP_ARQLEN_MAX_INDEX 15 +#define VF_MBX_HLP_ARQLEN_ARQLEN_S 0 +#define VF_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQLEN_ARQVFE_S 28 +#define VF_MBX_HLP_ARQLEN_ARQVFE_M BIT(28) +#define VF_MBX_HLP_ARQLEN_ARQOVFL_S 29 +#define VF_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29) +#define VF_MBX_HLP_ARQLEN_ARQCRIT_S 30 +#define VF_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30) +#define VF_MBX_HLP_ARQLEN_ARQENABLE_S 31 +#define VF_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31) +#define VF_MBX_HLP_ARQT(_VF16) (0x0022DE40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQT_MAX_INDEX 15 +#define VF_MBX_HLP_ARQT_ARQT_S 0 +#define VF_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQBAH(_VF16) (0x0022DC40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQBAH_MAX_INDEX 15 +#define VF_MBX_HLP_ATQBAH_ATQBAH_S 0 +#define VF_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_HLP_ATQBAL(_VF16) (0x0022DC00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQBAL_MAX_INDEX 15 +#define VF_MBX_HLP_ATQBAL_ATQBAL_S 6 +#define VF_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_HLP_ATQH(_VF16) (0x0022DCC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQH_MAX_INDEX 15 +#define VF_MBX_HLP_ATQH_ATQH_S 0 +#define VF_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQLEN(_VF16) (0x0022DC80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ +#define VF_MBX_HLP_ATQLEN_MAX_INDEX 15 +#define VF_MBX_HLP_ATQLEN_ATQLEN_S 0 +#define VF_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQLEN_ATQVFE_S 28 +#define VF_MBX_HLP_ATQLEN_ATQVFE_M BIT(28) +#define VF_MBX_HLP_ATQLEN_ATQOVFL_S 29 +#define VF_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29) +#define VF_MBX_HLP_ATQLEN_ATQCRIT_S 30 +#define VF_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30) +#define VF_MBX_HLP_ATQLEN_ATQENABLE_S 31 +#define VF_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31) +#define VF_MBX_HLP_ATQT(_VF16) (0x0022DD00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQT_MAX_INDEX 15 +#define VF_MBX_HLP_ATQT_ATQT_S 0 +#define VF_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQBAH(_VF16) (0x0022E000 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQBAH_MAX_INDEX 15 +#define VF_MBX_PSM_ARQBAH_ARQBAH_S 0 +#define VF_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_PSM_ARQBAL(_VF16) (0x0022DFC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQBAL_MAX_INDEX 15 +#define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0 +#define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_PSM_ARQBAL_ARQBAL_S 6 +#define VF_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_PSM_ARQH(_VF16) (0x0022E080 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQH_MAX_INDEX 15 +#define VF_MBX_PSM_ARQH_ARQH_S 0 +#define VF_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQLEN(_VF16) (0x0022E040 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ +#define VF_MBX_PSM_ARQLEN_MAX_INDEX 15 +#define VF_MBX_PSM_ARQLEN_ARQLEN_S 0 +#define VF_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQLEN_ARQVFE_S 28 +#define VF_MBX_PSM_ARQLEN_ARQVFE_M BIT(28) +#define VF_MBX_PSM_ARQLEN_ARQOVFL_S 29 +#define VF_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29) +#define VF_MBX_PSM_ARQLEN_ARQCRIT_S 30 +#define VF_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30) +#define VF_MBX_PSM_ARQLEN_ARQENABLE_S 31 +#define VF_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31) +#define VF_MBX_PSM_ARQT(_VF16) (0x0022E0C0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQT_MAX_INDEX 15 +#define VF_MBX_PSM_ARQT_ARQT_S 0 +#define VF_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQBAH(_VF16) (0x0022DEC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQBAH_MAX_INDEX 15 +#define VF_MBX_PSM_ATQBAH_ATQBAH_S 0 +#define VF_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_PSM_ATQBAL(_VF16) (0x0022DE80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQBAL_MAX_INDEX 15 +#define VF_MBX_PSM_ATQBAL_ATQBAL_S 6 +#define VF_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_PSM_ATQH(_VF16) (0x0022DF40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQH_MAX_INDEX 15 +#define VF_MBX_PSM_ATQH_ATQH_S 0 +#define VF_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQLEN(_VF16) (0x0022DF00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */ +#define VF_MBX_PSM_ATQLEN_MAX_INDEX 15 +#define VF_MBX_PSM_ATQLEN_ATQLEN_S 0 +#define VF_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQLEN_ATQVFE_S 28 +#define VF_MBX_PSM_ATQLEN_ATQVFE_M BIT(28) +#define VF_MBX_PSM_ATQLEN_ATQOVFL_S 29 +#define VF_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29) +#define VF_MBX_PSM_ATQLEN_ATQCRIT_S 30 +#define VF_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30) +#define VF_MBX_PSM_ATQLEN_ATQENABLE_S 31 +#define VF_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31) +#define VF_MBX_PSM_ATQT(_VF16) (0x0022DF80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQT_MAX_INDEX 15 +#define VF_MBX_PSM_ATQT_ATQT_S 0 +#define VF_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQBAH(_VF128) (0x0022F400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ARQBAH_MAX_INDEX 127 +#define VF_SB_CPM_ARQBAH_ARQBAH_S 0 +#define VF_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_SB_CPM_ARQBAL(_VF128) (0x0022F200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ARQBAL_MAX_INDEX 127 +#define VF_SB_CPM_ARQBAL_ARQBAL_LSB_S 0 +#define VF_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_SB_CPM_ARQBAL_ARQBAL_S 6 +#define VF_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_SB_CPM_ARQH(_VF128) (0x0022F800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ARQH_MAX_INDEX 127 +#define VF_SB_CPM_ARQH_ARQH_S 0 +#define VF_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQLEN(_VF128) (0x0022F600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ +#define VF_SB_CPM_ARQLEN_MAX_INDEX 127 +#define VF_SB_CPM_ARQLEN_ARQLEN_S 0 +#define VF_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQLEN_ARQVFE_S 28 +#define VF_SB_CPM_ARQLEN_ARQVFE_M BIT(28) +#define VF_SB_CPM_ARQLEN_ARQOVFL_S 29 +#define VF_SB_CPM_ARQLEN_ARQOVFL_M BIT(29) +#define VF_SB_CPM_ARQLEN_ARQCRIT_S 30 +#define VF_SB_CPM_ARQLEN_ARQCRIT_M BIT(30) +#define VF_SB_CPM_ARQLEN_ARQENABLE_S 31 +#define VF_SB_CPM_ARQLEN_ARQENABLE_M BIT(31) +#define VF_SB_CPM_ARQT(_VF128) (0x0022FA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ARQT_MAX_INDEX 127 +#define VF_SB_CPM_ARQT_ARQT_S 0 +#define VF_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQBAH(_VF128) (0x0022EA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ATQBAH_MAX_INDEX 127 +#define VF_SB_CPM_ATQBAH_ATQBAH_S 0 +#define VF_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_SB_CPM_ATQBAL(_VF128) (0x0022E800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ATQBAL_MAX_INDEX 127 +#define VF_SB_CPM_ATQBAL_ATQBAL_S 6 +#define VF_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_SB_CPM_ATQH(_VF128) (0x0022EE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ATQH_MAX_INDEX 127 +#define VF_SB_CPM_ATQH_ATQH_S 0 +#define VF_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQLEN(_VF128) (0x0022EC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */ +#define VF_SB_CPM_ATQLEN_MAX_INDEX 127 +#define VF_SB_CPM_ATQLEN_ATQLEN_S 0 +#define VF_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQLEN_ATQVFE_S 28 +#define VF_SB_CPM_ATQLEN_ATQVFE_M BIT(28) +#define VF_SB_CPM_ATQLEN_ATQOVFL_S 29 +#define VF_SB_CPM_ATQLEN_ATQOVFL_M BIT(29) +#define VF_SB_CPM_ATQLEN_ATQCRIT_S 30 +#define VF_SB_CPM_ATQLEN_ATQCRIT_M BIT(30) +#define VF_SB_CPM_ATQLEN_ATQENABLE_S 31 +#define VF_SB_CPM_ATQLEN_ATQENABLE_M BIT(31) +#define VF_SB_CPM_ATQT(_VF128) (0x0022F000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VF_SB_CPM_ATQT_MAX_INDEX 127 +#define VF_SB_CPM_ATQT_ATQT_S 0 +#define VF_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_REM_DEV_CTL 0x002300EC /* Reset Source: CORER */ +#define VF_SB_CPM_REM_DEV_CTL_DEST_EN_S 0 +#define VF_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) +#define VP_MBX_CPM_PF_VF_CTRL(_VP128) (0x00231800 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VP_MBX_CPM_PF_VF_CTRL_MAX_INDEX 127 +#define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_S 0 +#define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0) +#define VP_MBX_HLP_PF_VF_CTRL(_VP16) (0x00231A00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VP_MBX_HLP_PF_VF_CTRL_MAX_INDEX 15 +#define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_S 0 +#define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_M BIT(0) +#define VP_MBX_PF_VF_CTRL(_VSI) (0x00230800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VP_MBX_PF_VF_CTRL_MAX_INDEX 767 +#define VP_MBX_PF_VF_CTRL_QUEUE_EN_S 0 +#define VP_MBX_PF_VF_CTRL_QUEUE_EN_M BIT(0) +#define VP_MBX_PSM_PF_VF_CTRL(_VP16) (0x00231A40 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VP_MBX_PSM_PF_VF_CTRL_MAX_INDEX 15 +#define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_S 0 +#define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_M BIT(0) +#define VP_SB_CPM_PF_VF_CTRL(_VP128) (0x00231C00 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VP_SB_CPM_PF_VF_CTRL_MAX_INDEX 127 +#define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_S 0 +#define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0) +#define GL_DCB_TDSCP2TC_BLOCK_DIS 0x00049218 /* Reset Source: CORER */ +#define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_S 0 +#define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_M BIT(0) +#define GL_DCB_TDSCP2TC_BLOCK_IPV4(_i) (0x00049018 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_DCB_TDSCP2TC_BLOCK_IPV4_MAX_INDEX 63 +#define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_S 0 +#define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_DCB_TDSCP2TC_BLOCK_IPV6(_i) (0x00049118 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_DCB_TDSCP2TC_BLOCK_IPV6_MAX_INDEX 63 +#define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_S 0 +#define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_GENC 0x00083044 /* Reset Source: CORER */ +#define GLDCB_GENC_PCIRTT_S 0 +#define GLDCB_GENC_PCIRTT_M MAKEMASK(0xFFFF, 0) +#define GLDCB_PRS_RETSTCC(_i) (0x002000B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_PRS_RETSTCC_MAX_INDEX 31 +#define GLDCB_PRS_RETSTCC_BWSHARE_S 0 +#define GLDCB_PRS_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) +#define GLDCB_PRS_RETSTCC_ETSTC_S 31 +#define GLDCB_PRS_RETSTCC_ETSTC_M BIT(31) +#define GLDCB_PRS_RSPMC 0x00200160 /* Reset Source: CORER */ +#define GLDCB_PRS_RSPMC_RSPM_S 0 +#define GLDCB_PRS_RSPMC_RSPM_M MAKEMASK(0xFF, 0) +#define GLDCB_PRS_RSPMC_RPM_MODE_S 8 +#define GLDCB_PRS_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8) +#define GLDCB_PRS_RSPMC_PRR_MAX_EXP_S 10 +#define GLDCB_PRS_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10) +#define GLDCB_PRS_RSPMC_PFCTIMER_S 14 +#define GLDCB_PRS_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14) +#define GLDCB_PRS_RSPMC_RPM_DIS_S 31 +#define GLDCB_PRS_RSPMC_RPM_DIS_M BIT(31) +#define GLDCB_RETSTCC(_i) (0x00122140 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_RETSTCC_MAX_INDEX 31 +#define GLDCB_RETSTCC_BWSHARE_S 0 +#define GLDCB_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) +#define GLDCB_RETSTCC_ETSTC_S 31 +#define GLDCB_RETSTCC_ETSTC_M BIT(31) +#define GLDCB_RETSTCS(_i) (0x001221C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_RETSTCS_MAX_INDEX 31 +#define GLDCB_RETSTCS_CREDITS_S 0 +#define GLDCB_RETSTCS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_RTC2PFC_RCB 0x00122100 /* Reset Source: CORER */ +#define GLDCB_RTC2PFC_RCB_TC2PFC_S 0 +#define GLDCB_RTC2PFC_RCB_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_SWT_RETSTCC(_i) (0x0020A040 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_SWT_RETSTCC_MAX_INDEX 31 +#define GLDCB_SWT_RETSTCC_BWSHARE_S 0 +#define GLDCB_SWT_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0) +#define GLDCB_SWT_RETSTCC_ETSTC_S 31 +#define GLDCB_SWT_RETSTCC_ETSTC_M BIT(31) +#define GLDCB_TC2PFC 0x001D2694 /* Reset Source: CORER */ +#define GLDCB_TC2PFC_TC2PFC_S 0 +#define GLDCB_TC2PFC_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TCB_MNG_SP 0x000AE12C /* Reset Source: CORER */ +#define GLDCB_TCB_MNG_SP_MNG_SP_S 0 +#define GLDCB_TCB_MNG_SP_MNG_SP_M BIT(0) +#define GLDCB_TCB_TCLL_CFG 0x000AE134 /* Reset Source: CORER */ +#define GLDCB_TCB_TCLL_CFG_LLTC_S 0 +#define GLDCB_TCB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TCB_WB_SP 0x000AE310 /* Reset Source: CORER */ +#define GLDCB_TCB_WB_SP_WB_SP_S 0 +#define GLDCB_TCB_WB_SP_WB_SP_M BIT(0) +#define GLDCB_TCUPM_IMM_EN 0x000BC824 /* Reset Source: CORER */ +#define GLDCB_TCUPM_IMM_EN_IMM_EN_S 0 +#define GLDCB_TCUPM_IMM_EN_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TCUPM_LEGACY_TC 0x000BC828 /* Reset Source: CORER */ +#define GLDCB_TCUPM_LEGACY_TC_LEGTC_S 0 +#define GLDCB_TCUPM_LEGACY_TC_LEGTC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TCUPM_NO_EXCEED_DIS 0x000BC830 /* Reset Source: CORER */ +#define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_S 0 +#define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_M BIT(0) +#define GLDCB_TCUPM_WB_DIS 0x000BC834 /* Reset Source: CORER */ +#define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_S 0 +#define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_M BIT(0) +#define GLDCB_TCUPM_WB_DIS_TC_DISABLE_S 1 +#define GLDCB_TCUPM_WB_DIS_TC_DISABLE_M BIT(1) +#define GLDCB_TFPFCI 0x0009949C /* Reset Source: CORER */ +#define GLDCB_TFPFCI_GLDCB_TFPFCI_S 0 +#define GLDCB_TFPFCI_GLDCB_TFPFCI_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TLPM_IMM_TCB 0x000A0190 /* Reset Source: CORER */ +#define GLDCB_TLPM_IMM_TCB_IMM_EN_S 0 +#define GLDCB_TLPM_IMM_TCB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TLPM_IMM_TCUPM 0x000A018C /* Reset Source: CORER */ +#define GLDCB_TLPM_IMM_TCUPM_IMM_EN_S 0 +#define GLDCB_TLPM_IMM_TCUPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TLPM_PCI_DM 0x000A0180 /* Reset Source: CORER */ +#define GLDCB_TLPM_PCI_DM_MONITOR_S 0 +#define GLDCB_TLPM_PCI_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define GLDCB_TLPM_PCI_DTHR 0x000A0184 /* Reset Source: CORER */ +#define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_S 0 +#define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_M MAKEMASK(0xFFF, 0) +#define GLDCB_TPB_IMM_TLPM 0x00099468 /* Reset Source: CORER */ +#define GLDCB_TPB_IMM_TLPM_IMM_EN_S 0 +#define GLDCB_TPB_IMM_TLPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TPB_IMM_TPB 0x0009946C /* Reset Source: CORER */ +#define GLDCB_TPB_IMM_TPB_IMM_EN_S 0 +#define GLDCB_TPB_IMM_TPB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_TPB_TCLL_CFG 0x00099464 /* Reset Source: CORER */ +#define GLDCB_TPB_TCLL_CFG_LLTC_S 0 +#define GLDCB_TPB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTCB_BULK_DWRR_REG_QUANTA 0x000AE0E0 /* Reset Source: CORER */ +#define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_S 0 +#define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define GLTCB_BULK_DWRR_REG_SAT 0x000AE0F0 /* Reset Source: CORER */ +#define GLTCB_BULK_DWRR_REG_SAT_SATURATION_S 0 +#define GLTCB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define GLTCB_BULK_DWRR_WB_QUANTA 0x000AE0E4 /* Reset Source: CORER */ +#define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_S 0 +#define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define GLTCB_BULK_DWRR_WB_SAT 0x000AE0F4 /* Reset Source: CORER */ +#define GLTCB_BULK_DWRR_WB_SAT_SATURATION_S 0 +#define GLTCB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define GLTCB_CREDIT_EXP_CTL 0x000AE120 /* Reset Source: CORER */ +#define GLTCB_CREDIT_EXP_CTL_EN_S 0 +#define GLTCB_CREDIT_EXP_CTL_EN_M BIT(0) +#define GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1 +#define GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1) +#define GLTCB_LL_DWRR_REG_QUANTA 0x000AE0E8 /* Reset Source: CORER */ +#define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_S 0 +#define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define GLTCB_LL_DWRR_REG_SAT 0x000AE0F8 /* Reset Source: CORER */ +#define GLTCB_LL_DWRR_REG_SAT_SATURATION_S 0 +#define GLTCB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define GLTCB_LL_DWRR_WB_QUANTA 0x000AE0EC /* Reset Source: CORER */ +#define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_S 0 +#define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define GLTCB_LL_DWRR_WB_SAT 0x000AE0FC /* Reset Source: CORER */ +#define GLTCB_LL_DWRR_WB_SAT_SATURATION_S 0 +#define GLTCB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define GLTCB_WB_RL 0x000AE238 /* Reset Source: CORER */ +#define GLTCB_WB_RL_PERIOD_S 0 +#define GLTCB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0) +#define GLTCB_WB_RL_EN_S 16 +#define GLTCB_WB_RL_EN_M BIT(16) +#define GLTPB_WB_RL 0x00099460 /* Reset Source: CORER */ +#define GLTPB_WB_RL_PERIOD_S 0 +#define GLTPB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0) +#define GLTPB_WB_RL_EN_S 16 +#define GLTPB_WB_RL_EN_M BIT(16) +#define PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */ +#define PRTDCB_FCCFG_TFCE_S 3 +#define PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3) +#define PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */ +#define PRTDCB_FCRTV_FC_REFRESH_TH_S 0 +#define PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0) +#define PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */ +#define PRTDCB_FCTTVN_MAX_INDEX 3 +#define PRTDCB_FCTTVN_TTV_2N_S 0 +#define PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0) +#define PRTDCB_FCTTVN_TTV_2N_P1_S 16 +#define PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16) +#define PRTDCB_GENC 0x00083000 /* Reset Source: CORER */ +#define PRTDCB_GENC_NUMTC_S 2 +#define PRTDCB_GENC_NUMTC_M MAKEMASK(0xF, 2) +#define PRTDCB_GENC_FCOEUP_S 6 +#define PRTDCB_GENC_FCOEUP_M MAKEMASK(0x7, 6) +#define PRTDCB_GENC_FCOEUP_VALID_S 9 +#define PRTDCB_GENC_FCOEUP_VALID_M BIT(9) +#define PRTDCB_GENC_PFCLDA_S 16 +#define PRTDCB_GENC_PFCLDA_M MAKEMASK(0xFFFF, 16) +#define PRTDCB_GENS 0x00083020 /* Reset Source: CORER */ +#define PRTDCB_GENS_DCBX_STATUS_S 0 +#define PRTDCB_GENS_DCBX_STATUS_M MAKEMASK(0x7, 0) +#define PRTDCB_PRS_RETSC 0x002001A0 /* Reset Source: CORER */ +#define PRTDCB_PRS_RETSC_ETS_MODE_S 0 +#define PRTDCB_PRS_RETSC_ETS_MODE_M BIT(0) +#define PRTDCB_PRS_RETSC_NON_ETS_MODE_S 1 +#define PRTDCB_PRS_RETSC_NON_ETS_MODE_M BIT(1) +#define PRTDCB_PRS_RETSC_ETS_MAX_EXP_S 2 +#define PRTDCB_PRS_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) +#define PRTDCB_PRS_RPRRC 0x00200180 /* Reset Source: CORER */ +#define PRTDCB_PRS_RPRRC_BWSHARE_S 0 +#define PRTDCB_PRS_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0) +#define PRTDCB_PRS_RPRRC_BWSHARE_DIS_S 31 +#define PRTDCB_PRS_RPRRC_BWSHARE_DIS_M BIT(31) +#define PRTDCB_RETSC 0x001222A0 /* Reset Source: CORER */ +#define PRTDCB_RETSC_ETS_MODE_S 0 +#define PRTDCB_RETSC_ETS_MODE_M BIT(0) +#define PRTDCB_RETSC_NON_ETS_MODE_S 1 +#define PRTDCB_RETSC_NON_ETS_MODE_M BIT(1) +#define PRTDCB_RETSC_ETS_MAX_EXP_S 2 +#define PRTDCB_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) +#define PRTDCB_RPRRC 0x001220C0 /* Reset Source: CORER */ +#define PRTDCB_RPRRC_BWSHARE_S 0 +#define PRTDCB_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0) +#define PRTDCB_RPRRC_BWSHARE_DIS_S 31 +#define PRTDCB_RPRRC_BWSHARE_DIS_M BIT(31) +#define PRTDCB_RPRRS 0x001220E0 /* Reset Source: CORER */ +#define PRTDCB_RPRRS_CREDITS_S 0 +#define PRTDCB_RPRRS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTDCB_RUP_TDPU 0x00040960 /* Reset Source: CORER */ +#define PRTDCB_RUP_TDPU_NOVLANUP_S 0 +#define PRTDCB_RUP_TDPU_NOVLANUP_M MAKEMASK(0x7, 0) +#define PRTDCB_RUP2TC 0x001D2640 /* Reset Source: CORER */ +#define PRTDCB_RUP2TC_UP0TC_S 0 +#define PRTDCB_RUP2TC_UP0TC_M MAKEMASK(0x7, 0) +#define PRTDCB_RUP2TC_UP1TC_S 3 +#define PRTDCB_RUP2TC_UP1TC_M MAKEMASK(0x7, 3) +#define PRTDCB_RUP2TC_UP2TC_S 6 +#define PRTDCB_RUP2TC_UP2TC_M MAKEMASK(0x7, 6) +#define PRTDCB_RUP2TC_UP3TC_S 9 +#define PRTDCB_RUP2TC_UP3TC_M MAKEMASK(0x7, 9) +#define PRTDCB_RUP2TC_UP4TC_S 12 +#define PRTDCB_RUP2TC_UP4TC_M MAKEMASK(0x7, 12) +#define PRTDCB_RUP2TC_UP5TC_S 15 +#define PRTDCB_RUP2TC_UP5TC_M MAKEMASK(0x7, 15) +#define PRTDCB_RUP2TC_UP6TC_S 18 +#define PRTDCB_RUP2TC_UP6TC_M MAKEMASK(0x7, 18) +#define PRTDCB_RUP2TC_UP7TC_S 21 +#define PRTDCB_RUP2TC_UP7TC_M MAKEMASK(0x7, 21) +#define PRTDCB_SWT_RETSC 0x0020A140 /* Reset Source: CORER */ +#define PRTDCB_SWT_RETSC_ETS_MODE_S 0 +#define PRTDCB_SWT_RETSC_ETS_MODE_M BIT(0) +#define PRTDCB_SWT_RETSC_NON_ETS_MODE_S 1 +#define PRTDCB_SWT_RETSC_NON_ETS_MODE_M BIT(1) +#define PRTDCB_SWT_RETSC_ETS_MAX_EXP_S 2 +#define PRTDCB_SWT_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2) +#define PRTDCB_TCB_DWRR_CREDITS 0x000AE000 /* Reset Source: CORER */ +#define PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0 +#define PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define PRTDCB_TCB_DWRR_QUANTA 0x000AE020 /* Reset Source: CORER */ +#define PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0 +#define PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define PRTDCB_TCB_DWRR_SAT 0x000AE040 /* Reset Source: CORER */ +#define PRTDCB_TCB_DWRR_SAT_SATURATION_S 0 +#define PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define PRTDCB_TCUPM_NO_EXCEED_DM 0x000BC3C0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_S 0 +#define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define PRTDCB_TCUPM_REG_CM 0x000BC360 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_CM_MONITOR_S 0 +#define PRTDCB_TCUPM_REG_CM_MONITOR_M MAKEMASK(0x7FFF, 0) +#define PRTDCB_TCUPM_REG_CTHR 0x000BC380 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_S 0 +#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_M MAKEMASK(0x7FFF, 0) +#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_S 15 +#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_M MAKEMASK(0x7FFF, 15) +#define PRTDCB_TCUPM_REG_DM 0x000BC3A0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_DM_MONITOR_S 0 +#define PRTDCB_TCUPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define PRTDCB_TCUPM_REG_DTHR 0x000BC3E0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_S 0 +#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_S 12 +#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) +#define PRTDCB_TCUPM_REG_PE_HB_DM 0x000BC400 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_S 0 +#define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TCUPM_REG_PE_HB_DTHR 0x000BC420 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_S 0 +#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_S 12 +#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) +#define PRTDCB_TCUPM_WAIT_PFC_CM 0x000BC440 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_M MAKEMASK(0x7FFF, 0) +#define PRTDCB_TCUPM_WAIT_PFC_CTHR 0x000BC460 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_M MAKEMASK(0x7FFF, 0) +#define PRTDCB_TCUPM_WAIT_PFC_DM 0x000BC480 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define PRTDCB_TCUPM_WAIT_PFC_DTHR 0x000BC4A0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM 0x000BC4C0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR 0x000BC4E0 /* Reset Source: CORER */ +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_S 0 +#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TDPUC 0x00040940 /* Reset Source: CORER */ +#define PRTDCB_TDPUC_MAX_TXFRAME_S 0 +#define PRTDCB_TDPUC_MAX_TXFRAME_M MAKEMASK(0xFFFF, 0) +#define PRTDCB_TDPUC_MAL_LENGTH_S 16 +#define PRTDCB_TDPUC_MAL_LENGTH_M BIT(16) +#define PRTDCB_TDPUC_MAL_CMD_S 17 +#define PRTDCB_TDPUC_MAL_CMD_M BIT(17) +#define PRTDCB_TDPUC_TTL_DROP_S 18 +#define PRTDCB_TDPUC_TTL_DROP_M BIT(18) +#define PRTDCB_TDPUC_UR_DROP_S 19 +#define PRTDCB_TDPUC_UR_DROP_M BIT(19) +#define PRTDCB_TDPUC_DUMMY_S 20 +#define PRTDCB_TDPUC_DUMMY_M BIT(20) +#define PRTDCB_TDPUC_BIG_PKT_SIZE_S 21 +#define PRTDCB_TDPUC_BIG_PKT_SIZE_M BIT(21) +#define PRTDCB_TDPUC_L2_ACCEPT_FAIL_S 22 +#define PRTDCB_TDPUC_L2_ACCEPT_FAIL_M BIT(22) +#define PRTDCB_TDPUC_DSCP_CHECK_FAIL_S 23 +#define PRTDCB_TDPUC_DSCP_CHECK_FAIL_M BIT(23) +#define PRTDCB_TDPUC_RCU_ANTISPOOF_S 24 +#define PRTDCB_TDPUC_RCU_ANTISPOOF_M BIT(24) +#define PRTDCB_TDPUC_NIC_DSI_S 25 +#define PRTDCB_TDPUC_NIC_DSI_M BIT(25) +#define PRTDCB_TDPUC_NIC_IPSEC_S 26 +#define PRTDCB_TDPUC_NIC_IPSEC_M BIT(26) +#define PRTDCB_TDPUC_CLEAR_DROP_S 31 +#define PRTDCB_TDPUC_CLEAR_DROP_M BIT(31) +#define PRTDCB_TFCS 0x001E4560 /* Reset Source: GLOBR */ +#define PRTDCB_TFCS_TXOFF_S 0 +#define PRTDCB_TFCS_TXOFF_M BIT(0) +#define PRTDCB_TFCS_TXOFF0_S 8 +#define PRTDCB_TFCS_TXOFF0_M BIT(8) +#define PRTDCB_TFCS_TXOFF1_S 9 +#define PRTDCB_TFCS_TXOFF1_M BIT(9) +#define PRTDCB_TFCS_TXOFF2_S 10 +#define PRTDCB_TFCS_TXOFF2_M BIT(10) +#define PRTDCB_TFCS_TXOFF3_S 11 +#define PRTDCB_TFCS_TXOFF3_M BIT(11) +#define PRTDCB_TFCS_TXOFF4_S 12 +#define PRTDCB_TFCS_TXOFF4_M BIT(12) +#define PRTDCB_TFCS_TXOFF5_S 13 +#define PRTDCB_TFCS_TXOFF5_M BIT(13) +#define PRTDCB_TFCS_TXOFF6_S 14 +#define PRTDCB_TFCS_TXOFF6_M BIT(14) +#define PRTDCB_TFCS_TXOFF7_S 15 +#define PRTDCB_TFCS_TXOFF7_M BIT(15) +#define PRTDCB_TLPM_REG_DM 0x000A0000 /* Reset Source: CORER */ +#define PRTDCB_TLPM_REG_DM_MONITOR_S 0 +#define PRTDCB_TLPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define PRTDCB_TLPM_REG_DTHR 0x000A0020 /* Reset Source: CORER */ +#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_S 0 +#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_S 12 +#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12) +#define PRTDCB_TLPM_WAIT_PFC_DM 0x000A0040 /* Reset Source: CORER */ +#define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_S 0 +#define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define PRTDCB_TLPM_WAIT_PFC_DTHR 0x000A0060 /* Reset Source: CORER */ +#define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_S 0 +#define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0) +#define PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ +#define PRTDCB_TPFCTS_MAX_INDEX 7 +#define PRTDCB_TPFCTS_PFCTIMER_S 0 +#define PRTDCB_TPFCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0) +#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ +#define PRTDCB_TUP2TC_UP0TC_S 0 +#define PRTDCB_TUP2TC_UP0TC_M MAKEMASK(0x7, 0) +#define PRTDCB_TUP2TC_UP1TC_S 3 +#define PRTDCB_TUP2TC_UP1TC_M MAKEMASK(0x7, 3) +#define PRTDCB_TUP2TC_UP2TC_S 6 +#define PRTDCB_TUP2TC_UP2TC_M MAKEMASK(0x7, 6) +#define PRTDCB_TUP2TC_UP3TC_S 9 +#define PRTDCB_TUP2TC_UP3TC_M MAKEMASK(0x7, 9) +#define PRTDCB_TUP2TC_UP4TC_S 12 +#define PRTDCB_TUP2TC_UP4TC_M MAKEMASK(0x7, 12) +#define PRTDCB_TUP2TC_UP5TC_S 15 +#define PRTDCB_TUP2TC_UP5TC_M MAKEMASK(0x7, 15) +#define PRTDCB_TUP2TC_UP6TC_S 18 +#define PRTDCB_TUP2TC_UP6TC_M MAKEMASK(0x7, 18) +#define PRTDCB_TUP2TC_UP7TC_S 21 +#define PRTDCB_TUP2TC_UP7TC_M MAKEMASK(0x7, 21) +#define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */ +#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0 +#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0) +#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1 +#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M MAKEMASK(0x7, 1) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_S 4 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_S 8 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_S 12 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_S 16 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_S 20 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_S 24 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24) +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_S 28 +#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT(_i) (0x00040AA0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */ +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_MAX_INDEX 7 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_S 0 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_S 4 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_S 8 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_S 12 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_S 16 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_S 20 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_S 24 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24) +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_S 28 +#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28) +#define PRTTCB_BULK_DWRR_REG_CREDITS 0x000AE060 /* Reset Source: CORER */ +#define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0 +#define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define PRTTCB_BULK_DWRR_WB_CREDITS 0x000AE080 /* Reset Source: CORER */ +#define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0 +#define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define PRTTCB_CREDIT_EXP 0x000AE100 /* Reset Source: CORER */ +#define PRTTCB_CREDIT_EXP_EXPANSION_S 0 +#define PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0) +#define PRTTCB_LL_DWRR_REG_CREDITS 0x000AE0A0 /* Reset Source: CORER */ +#define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 +#define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define PRTTCB_LL_DWRR_WB_CREDITS 0x000AE0C0 /* Reset Source: CORER */ +#define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 +#define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TCDCB_TCUPM_WAIT_CM(_i) (0x000BC520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_CM_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_CM_MONITOR_S 0 +#define TCDCB_TCUPM_WAIT_CM_MONITOR_M MAKEMASK(0x7FFF, 0) +#define TCDCB_TCUPM_WAIT_CTHR(_i) (0x000BC5A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_CTHR_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_S 0 +#define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_M MAKEMASK(0x7FFF, 0) +#define TCDCB_TCUPM_WAIT_DM(_i) (0x000BC620 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_DM_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_DM_MONITOR_S 0 +#define TCDCB_TCUPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define TCDCB_TCUPM_WAIT_DTHR(_i) (0x000BC6A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_DTHR_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_S 0 +#define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) +#define TCDCB_TCUPM_WAIT_PE_HB_DM(_i) (0x000BC720 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_PE_HB_DM_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_S 0 +#define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0) +#define TCDCB_TCUPM_WAIT_PE_HB_DTHR(_i) (0x000BC7A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_MAX_INDEX 31 +#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_S 0 +#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) +#define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TLPM_WAIT_DM_MAX_INDEX 31 +#define TCDCB_TLPM_WAIT_DM_MONITOR_S 0 +#define TCDCB_TLPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0) +#define TCDCB_TLPM_WAIT_DTHR(_i) (0x000A0100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCDCB_TLPM_WAIT_DTHR_MAX_INDEX 31 +#define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_S 0 +#define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0) +#define TCTCB_WB_RL_TC_CFG(_i) (0x000AE138 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCTCB_WB_RL_TC_CFG_MAX_INDEX 31 +#define TCTCB_WB_RL_TC_CFG_TOKENS_S 0 +#define TCTCB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0) +#define TCTCB_WB_RL_TC_CFG_BURST_SIZE_S 12 +#define TCTCB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12) +#define TCTCB_WB_RL_TC_STAT(_i) (0x000AE1B8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TCTCB_WB_RL_TC_STAT_MAX_INDEX 31 +#define TCTCB_WB_RL_TC_STAT_BUCKET_S 0 +#define TCTCB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0) +#define TPB_BULK_DWRR_REG_QUANTA 0x00099340 /* Reset Source: CORER */ +#define TPB_BULK_DWRR_REG_QUANTA_QUANTA_S 0 +#define TPB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define TPB_BULK_DWRR_REG_SAT 0x00099350 /* Reset Source: CORER */ +#define TPB_BULK_DWRR_REG_SAT_SATURATION_S 0 +#define TPB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define TPB_BULK_DWRR_WB_QUANTA 0x00099344 /* Reset Source: CORER */ +#define TPB_BULK_DWRR_WB_QUANTA_QUANTA_S 0 +#define TPB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define TPB_BULK_DWRR_WB_SAT 0x00099354 /* Reset Source: CORER */ +#define TPB_BULK_DWRR_WB_SAT_SATURATION_S 0 +#define TPB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define TPB_GLDCB_TCB_WB_SP 0x0009966C /* Reset Source: CORER */ +#define TPB_GLDCB_TCB_WB_SP_WB_SP_S 0 +#define TPB_GLDCB_TCB_WB_SP_WB_SP_M BIT(0) +#define TPB_GLTCB_CREDIT_EXP_CTL 0x00099664 /* Reset Source: CORER */ +#define TPB_GLTCB_CREDIT_EXP_CTL_EN_S 0 +#define TPB_GLTCB_CREDIT_EXP_CTL_EN_M BIT(0) +#define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1 +#define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1) +#define TPB_LL_DWRR_REG_QUANTA 0x00099348 /* Reset Source: CORER */ +#define TPB_LL_DWRR_REG_QUANTA_QUANTA_S 0 +#define TPB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define TPB_LL_DWRR_REG_SAT 0x00099358 /* Reset Source: CORER */ +#define TPB_LL_DWRR_REG_SAT_SATURATION_S 0 +#define TPB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define TPB_LL_DWRR_WB_QUANTA 0x0009934C /* Reset Source: CORER */ +#define TPB_LL_DWRR_WB_QUANTA_QUANTA_S 0 +#define TPB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define TPB_LL_DWRR_WB_SAT 0x0009935C /* Reset Source: CORER */ +#define TPB_LL_DWRR_WB_SAT_SATURATION_S 0 +#define TPB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define TPB_PRTDCB_TCB_DWRR_CREDITS 0x000991C0 /* Reset Source: CORER */ +#define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0 +#define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TPB_PRTDCB_TCB_DWRR_QUANTA 0x00099220 /* Reset Source: CORER */ +#define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0 +#define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0) +#define TPB_PRTDCB_TCB_DWRR_SAT 0x00099260 /* Reset Source: CORER */ +#define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_S 0 +#define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0) +#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS 0x000992A0 /* Reset Source: CORER */ +#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0 +#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS 0x000992C0 /* Reset Source: CORER */ +#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0 +#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TPB_PRTTCB_CREDIT_EXP 0x00099644 /* Reset Source: CORER */ +#define TPB_PRTTCB_CREDIT_EXP_EXPANSION_S 0 +#define TPB_PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0) +#define TPB_PRTTCB_LL_DWRR_REG_CREDITS 0x00099300 /* Reset Source: CORER */ +#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0 +#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */ +#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0 +#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0) +#define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TPB_WB_RL_TC_CFG_MAX_INDEX 31 +#define TPB_WB_RL_TC_CFG_TOKENS_S 0 +#define TPB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0) +#define TPB_WB_RL_TC_CFG_BURST_SIZE_S 12 +#define TPB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12) +#define TPB_WB_RL_TC_STAT(_i) (0x000993E0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define TPB_WB_RL_TC_STAT_MAX_INDEX 31 +#define TPB_WB_RL_TC_STAT_BUCKET_S 0 +#define TPB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0) +#define GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2 +#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0 +#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) +#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8 +#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) +#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16 +#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) +#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24 +#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) +#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30 +#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) +#define GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 +#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) +#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 +#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) +#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2 +#define GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0 +#define GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2 +#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 +#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) +#define GL_ACLEXT_DFLT_L2PRFL_ACL(_i) (0x00393800 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_DFLT_L2PRFL_ACL_MAX_INDEX 2 +#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_S 0 +#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) +#define GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2 +#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0 +#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) +#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16 +#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) +#define GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2 +#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0 +#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) +#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16 +#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) +#define GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2 +#define GL_ACLEXT_FLGS_L1TBL_LSB_S 0 +#define GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) +#define GL_ACLEXT_FLGS_L1TBL_MSB_S 16 +#define GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) +#define GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2 +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0 +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2 +#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0 +#define GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) +#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31 +#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) +#define GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) +#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2 +#define GL_ACLEXT_K2N_L2DATA_DATA0_S 0 +#define GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_K2N_L2DATA_DATA1_S 8 +#define GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_ACLEXT_K2N_L2DATA_DATA2_S 16 +#define GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_ACLEXT_K2N_L2DATA_DATA3_S 24 +#define GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2_PMASK0_MAX_INDEX 2 +#define GL_ACLEXT_L2_PMASK0_BITMASK_S 0 +#define GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2_PMASK1_MAX_INDEX 2 +#define GL_ACLEXT_L2_PMASK1_BITMASK_S 0 +#define GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) +#define GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2_TMASK0_MAX_INDEX 2 +#define GL_ACLEXT_L2_TMASK0_BITMASK_S 0 +#define GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2_TMASK1_MAX_INDEX 2 +#define GL_ACLEXT_L2_TMASK1_BITMASK_S 0 +#define GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2BMP0_3_MAX_INDEX 2 +#define GL_ACLEXT_L2BMP0_3_BMP0_S 0 +#define GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_L2BMP0_3_BMP1_S 8 +#define GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8) +#define GL_ACLEXT_L2BMP0_3_BMP2_S 16 +#define GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16) +#define GL_ACLEXT_L2BMP0_3_BMP3_S 24 +#define GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24) +#define GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2BMP4_7_MAX_INDEX 2 +#define GL_ACLEXT_L2BMP4_7_BMP4_S 0 +#define GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_L2BMP4_7_BMP5_S 8 +#define GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8) +#define GL_ACLEXT_L2BMP4_7_BMP6_S 16 +#define GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16) +#define GL_ACLEXT_L2BMP4_7_BMP7_S 24 +#define GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24) +#define GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_L2PRTMOD_MAX_INDEX 2 +#define GL_ACLEXT_L2PRTMOD_XLT1_S 0 +#define GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) +#define GL_ACLEXT_L2PRTMOD_XLT2_S 8 +#define GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) +#define GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) +#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2 +#define GL_ACLEXT_N2N_L2DATA_DATA0_S 0 +#define GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_N2N_L2DATA_DATA1_S 8 +#define GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_ACLEXT_N2N_L2DATA_DATA2_S 16 +#define GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_ACLEXT_N2N_L2DATA_DATA3_S 24 +#define GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2 +#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) +#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2 +#define GL_ACLEXT_P2P_L1DATA_DATA_S 0 +#define GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2 +#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 +#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) +#define GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_PLVL_SEL_MAX_INDEX 2 +#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0 +#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0) +#define GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) +#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2 +#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0 +#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2 +#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0 +#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2 +#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) +#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2 +#define GL_ACLEXT_XLT0_L1DATA_DATA_S 0 +#define GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) +#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2 +#define GL_ACLEXT_XLT1_L2DATA_DATA_S 0 +#define GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2 +#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0 +#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) +#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31 +#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) +#define GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2 +#define GL_ACLEXT_XLT2_L2DATA_DATA_S 0 +#define GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_CDMD_L1SEL(_i) (0x0020F054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_CDMD_L1SEL_MAX_INDEX 2 +#define GL_PREEXT_CDMD_L1SEL_RX_SEL_S 0 +#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) +#define GL_PREEXT_CDMD_L1SEL_TX_SEL_S 8 +#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) +#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_S 16 +#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) +#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_S 24 +#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) +#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_S 30 +#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) +#define GL_PREEXT_CTLTBL_L2ADDR(_i) (0x0020F084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_CTLTBL_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 +#define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) +#define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 +#define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) +#define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_CTLTBL_L2DATA(_i) (0x0020F090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_CTLTBL_L2DATA_MAX_INDEX 2 +#define GL_PREEXT_CTLTBL_L2DATA_DATA_S 0 +#define GL_PREEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_DFLT_L2PRFL(_i) (0x0020F138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_DFLT_L2PRFL_MAX_INDEX 2 +#define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 +#define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) +#define GL_PREEXT_FLGS_L1SEL0_1(_i) (0x0020F06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_FLGS_L1SEL0_1_MAX_INDEX 2 +#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_S 0 +#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) +#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_S 16 +#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) +#define GL_PREEXT_FLGS_L1SEL2_3(_i) (0x0020F078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_FLGS_L1SEL2_3_MAX_INDEX 2 +#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_S 0 +#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) +#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_S 16 +#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) +#define GL_PREEXT_FLGS_L1TBL(_i) (0x0020F060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_FLGS_L1TBL_MAX_INDEX 2 +#define GL_PREEXT_FLGS_L1TBL_LSB_S 0 +#define GL_PREEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) +#define GL_PREEXT_FLGS_L1TBL_MSB_S 16 +#define GL_PREEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) +#define GL_PREEXT_FORCE_L1CDID(_i) (0x0020F018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2 +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0 +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_FORCE_PID_MAX_INDEX 2 +#define GL_PREEXT_FORCE_PID_STATIC_PID_S 0 +#define GL_PREEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) +#define GL_PREEXT_FORCE_PID_STATIC_PID_EN_S 31 +#define GL_PREEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) +#define GL_PREEXT_K2N_L2ADDR(_i) (0x0020F144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_K2N_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_K2N_L2ADDR_LINE_IDX_S 0 +#define GL_PREEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) +#define GL_PREEXT_K2N_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_K2N_L2DATA(_i) (0x0020F150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_K2N_L2DATA_MAX_INDEX 2 +#define GL_PREEXT_K2N_L2DATA_DATA0_S 0 +#define GL_PREEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_K2N_L2DATA_DATA1_S 8 +#define GL_PREEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_PREEXT_K2N_L2DATA_DATA2_S 16 +#define GL_PREEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_PREEXT_K2N_L2DATA_DATA3_S 24 +#define GL_PREEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2_PMASK0_MAX_INDEX 2 +#define GL_PREEXT_L2_PMASK0_BITMASK_S 0 +#define GL_PREEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2_PMASK1_MAX_INDEX 2 +#define GL_PREEXT_L2_PMASK1_BITMASK_S 0 +#define GL_PREEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) +#define GL_PREEXT_L2_TMASK0(_i) (0x0020F498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2_TMASK0_MAX_INDEX 2 +#define GL_PREEXT_L2_TMASK0_BITMASK_S 0 +#define GL_PREEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_L2_TMASK1(_i) (0x0020F4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2_TMASK1_MAX_INDEX 2 +#define GL_PREEXT_L2_TMASK1_BITMASK_S 0 +#define GL_PREEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_L2BMP0_3(_i) (0x0020F0A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2BMP0_3_MAX_INDEX 2 +#define GL_PREEXT_L2BMP0_3_BMP0_S 0 +#define GL_PREEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_L2BMP0_3_BMP1_S 8 +#define GL_PREEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8) +#define GL_PREEXT_L2BMP0_3_BMP2_S 16 +#define GL_PREEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16) +#define GL_PREEXT_L2BMP0_3_BMP3_S 24 +#define GL_PREEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24) +#define GL_PREEXT_L2BMP4_7(_i) (0x0020F0B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2BMP4_7_MAX_INDEX 2 +#define GL_PREEXT_L2BMP4_7_BMP4_S 0 +#define GL_PREEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_L2BMP4_7_BMP5_S 8 +#define GL_PREEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8) +#define GL_PREEXT_L2BMP4_7_BMP6_S 16 +#define GL_PREEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16) +#define GL_PREEXT_L2BMP4_7_BMP7_S 24 +#define GL_PREEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24) +#define GL_PREEXT_L2PRTMOD(_i) (0x0020F09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_L2PRTMOD_MAX_INDEX 2 +#define GL_PREEXT_L2PRTMOD_XLT1_S 0 +#define GL_PREEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) +#define GL_PREEXT_L2PRTMOD_XLT2_S 8 +#define GL_PREEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) +#define GL_PREEXT_N2N_L2ADDR(_i) (0x0020F15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_N2N_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_N2N_L2ADDR_LINE_IDX_S 0 +#define GL_PREEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) +#define GL_PREEXT_N2N_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_N2N_L2DATA(_i) (0x0020F168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_N2N_L2DATA_MAX_INDEX 2 +#define GL_PREEXT_N2N_L2DATA_DATA0_S 0 +#define GL_PREEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_N2N_L2DATA_DATA1_S 8 +#define GL_PREEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_PREEXT_N2N_L2DATA_DATA2_S 16 +#define GL_PREEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_PREEXT_N2N_L2DATA_DATA3_S 24 +#define GL_PREEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_PREEXT_P2P_L1ADDR(_i) (0x0020F024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_P2P_L1ADDR_MAX_INDEX 2 +#define GL_PREEXT_P2P_L1ADDR_LINE_IDX_S 0 +#define GL_PREEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) +#define GL_PREEXT_P2P_L1ADDR_AUTO_INC_S 31 +#define GL_PREEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_P2P_L1DATA(_i) (0x0020F030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_P2P_L1DATA_MAX_INDEX 2 +#define GL_PREEXT_P2P_L1DATA_DATA_S 0 +#define GL_PREEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_PID_L2GKTYPE(_i) (0x0020F0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_PID_L2GKTYPE_MAX_INDEX 2 +#define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 +#define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) +#define GL_PREEXT_PLVL_SEL(_i) (0x0020F00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_PLVL_SEL_MAX_INDEX 2 +#define GL_PREEXT_PLVL_SEL_PLVL_SEL_S 0 +#define GL_PREEXT_PLVL_SEL_PLVL_SEL_M BIT(0) +#define GL_PREEXT_TCAM_L2ADDR(_i) (0x0020F114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_TCAM_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_S 0 +#define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) +#define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_TCAM_L2DATALSB(_i) (0x0020F120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_TCAM_L2DATALSB_MAX_INDEX 2 +#define GL_PREEXT_TCAM_L2DATALSB_DATALSB_S 0 +#define GL_PREEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_TCAM_L2DATAMSB(_i) (0x0020F12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_TCAM_L2DATAMSB_MAX_INDEX 2 +#define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_S 0 +#define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_XLT0_L1ADDR(_i) (0x0020F03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT0_L1ADDR_MAX_INDEX 2 +#define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_S 0 +#define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) +#define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_S 31 +#define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_XLT0_L1DATA(_i) (0x0020F048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT0_L1DATA_MAX_INDEX 2 +#define GL_PREEXT_XLT0_L1DATA_DATA_S 0 +#define GL_PREEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_XLT1_L2ADDR(_i) (0x0020F0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT1_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_S 0 +#define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) +#define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_XLT1_L2DATA(_i) (0x0020F0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT1_L2DATA_MAX_INDEX 2 +#define GL_PREEXT_XLT1_L2DATA_DATA_S 0 +#define GL_PREEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PREEXT_XLT2_L2ADDR(_i) (0x0020F0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT2_L2ADDR_MAX_INDEX 2 +#define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_S 0 +#define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) +#define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_S 31 +#define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PREEXT_XLT2_L2DATA(_i) (0x0020F0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PREEXT_XLT2_L2DATA_MAX_INDEX 2 +#define GL_PREEXT_XLT2_L2DATA_DATA_S 0 +#define GL_PREEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_CDMD_L1SEL(_i) (0x0020E054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_CDMD_L1SEL_MAX_INDEX 2 +#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_S 0 +#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0) +#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_S 8 +#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8) +#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_S 16 +#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16) +#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_S 24 +#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24) +#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_S 30 +#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30) +#define GL_PSTEXT_CTLTBL_L2ADDR(_i) (0x0020E084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_CTLTBL_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_S 0 +#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0) +#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_S 8 +#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8) +#define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_CTLTBL_L2DATA(_i) (0x0020E090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_CTLTBL_L2DATA_MAX_INDEX 2 +#define GL_PSTEXT_CTLTBL_L2DATA_DATA_S 0 +#define GL_PSTEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_DFLT_L2PRFL(_i) (0x0020E138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_DFLT_L2PRFL_MAX_INDEX 2 +#define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_S 0 +#define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0) +#define GL_PSTEXT_FL15_BMPLSB(_i) (0x0020E480 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FL15_BMPLSB_MAX_INDEX 2 +#define GL_PSTEXT_FL15_BMPLSB_BMPLSB_S 0 +#define GL_PSTEXT_FL15_BMPLSB_BMPLSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_FL15_BMPMSB(_i) (0x0020E48C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FL15_BMPMSB_MAX_INDEX 2 +#define GL_PSTEXT_FL15_BMPMSB_BMPMSB_S 0 +#define GL_PSTEXT_FL15_BMPMSB_BMPMSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_FLGS_L1SEL0_1(_i) (0x0020E06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FLGS_L1SEL0_1_MAX_INDEX 2 +#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_S 0 +#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0) +#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_S 16 +#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16) +#define GL_PSTEXT_FLGS_L1SEL2_3(_i) (0x0020E078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FLGS_L1SEL2_3_MAX_INDEX 2 +#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_S 0 +#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0) +#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_S 16 +#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16) +#define GL_PSTEXT_FLGS_L1TBL(_i) (0x0020E060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FLGS_L1TBL_MAX_INDEX 2 +#define GL_PSTEXT_FLGS_L1TBL_LSB_S 0 +#define GL_PSTEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0) +#define GL_PSTEXT_FLGS_L1TBL_MSB_S 16 +#define GL_PSTEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16) +#define GL_PSTEXT_FORCE_L1CDID(_i) (0x0020E018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2 +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0 +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0) +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31 +#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31) +#define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_FORCE_PID_MAX_INDEX 2 +#define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0 +#define GL_PSTEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0) +#define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_S 31 +#define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_M BIT(31) +#define GL_PSTEXT_K2N_L2ADDR(_i) (0x0020E144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_K2N_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0) +#define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_K2N_L2DATA(_i) (0x0020E150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_K2N_L2DATA_MAX_INDEX 2 +#define GL_PSTEXT_K2N_L2DATA_DATA0_S 0 +#define GL_PSTEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_K2N_L2DATA_DATA1_S 8 +#define GL_PSTEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_PSTEXT_K2N_L2DATA_DATA2_S 16 +#define GL_PSTEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_PSTEXT_K2N_L2DATA_DATA3_S 24 +#define GL_PSTEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_PSTEXT_L2_PMASK0(_i) (0x0020E0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_L2_PMASK0_MAX_INDEX 2 +#define GL_PSTEXT_L2_PMASK0_BITMASK_S 0 +#define GL_PSTEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_L2_PMASK1(_i) (0x0020E108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_L2_PMASK1_MAX_INDEX 2 +#define GL_PSTEXT_L2_PMASK1_BITMASK_S 0 +#define GL_PSTEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0) +#define GL_PSTEXT_L2_TMASK0(_i) (0x0020E498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_L2_TMASK0_MAX_INDEX 2 +#define GL_PSTEXT_L2_TMASK0_BITMASK_S 0 +#define GL_PSTEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_L2_TMASK1(_i) (0x0020E4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_L2_TMASK1_MAX_INDEX 2 +#define GL_PSTEXT_L2_TMASK1_BITMASK_S 0 +#define GL_PSTEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_L2PRTMOD(_i) (0x0020E09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_L2PRTMOD_MAX_INDEX 2 +#define GL_PSTEXT_L2PRTMOD_XLT1_S 0 +#define GL_PSTEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0) +#define GL_PSTEXT_L2PRTMOD_XLT2_S 8 +#define GL_PSTEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8) +#define GL_PSTEXT_N2N_L2ADDR(_i) (0x0020E15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_N2N_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0) +#define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_N2N_L2DATA(_i) (0x0020E168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_N2N_L2DATA_MAX_INDEX 2 +#define GL_PSTEXT_N2N_L2DATA_DATA0_S 0 +#define GL_PSTEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_N2N_L2DATA_DATA1_S 8 +#define GL_PSTEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8) +#define GL_PSTEXT_N2N_L2DATA_DATA2_S 16 +#define GL_PSTEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16) +#define GL_PSTEXT_N2N_L2DATA_DATA3_S 24 +#define GL_PSTEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24) +#define GL_PSTEXT_P2P_L1ADDR(_i) (0x0020E024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_P2P_L1ADDR_MAX_INDEX 2 +#define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_M BIT(0) +#define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_P2P_L1DATA(_i) (0x0020E030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_P2P_L1DATA_MAX_INDEX 2 +#define GL_PSTEXT_P2P_L1DATA_DATA_S 0 +#define GL_PSTEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_PID_L2GKTYPE(_i) (0x0020E0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PID_L2GKTYPE_MAX_INDEX 2 +#define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_S 0 +#define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0) +#define GL_PSTEXT_PLVL_SEL(_i) (0x0020E00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PLVL_SEL_MAX_INDEX 2 +#define GL_PSTEXT_PLVL_SEL_PLVL_SEL_S 0 +#define GL_PSTEXT_PLVL_SEL_PLVL_SEL_M BIT(0) +#define GL_PSTEXT_PRFLM_CTRL(_i) (0x0020E474 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PRFLM_CTRL_MAX_INDEX 2 +#define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_S 0 +#define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_PRFLM_CTRL_RD_REQ_S 30 +#define GL_PSTEXT_PRFLM_CTRL_RD_REQ_M BIT(30) +#define GL_PSTEXT_PRFLM_CTRL_WR_REQ_S 31 +#define GL_PSTEXT_PRFLM_CTRL_WR_REQ_M BIT(31) +#define GL_PSTEXT_PRFLM_DATA_0(_i) (0x0020E174 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PRFLM_DATA_0_MAX_INDEX 63 +#define GL_PSTEXT_PRFLM_DATA_0_PROT_S 0 +#define GL_PSTEXT_PRFLM_DATA_0_PROT_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_PRFLM_DATA_0_OFF_S 16 +#define GL_PSTEXT_PRFLM_DATA_0_OFF_M MAKEMASK(0x1FF, 16) +#define GL_PSTEXT_PRFLM_DATA_1(_i) (0x0020E274 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PRFLM_DATA_1_MAX_INDEX 63 +#define GL_PSTEXT_PRFLM_DATA_1_PROT_S 0 +#define GL_PSTEXT_PRFLM_DATA_1_PROT_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_PRFLM_DATA_1_OFF_S 16 +#define GL_PSTEXT_PRFLM_DATA_1_OFF_M MAKEMASK(0x1FF, 16) +#define GL_PSTEXT_PRFLM_DATA_2(_i) (0x0020E374 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_PSTEXT_PRFLM_DATA_2_MAX_INDEX 63 +#define GL_PSTEXT_PRFLM_DATA_2_PROT_S 0 +#define GL_PSTEXT_PRFLM_DATA_2_PROT_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_PRFLM_DATA_2_OFF_S 16 +#define GL_PSTEXT_PRFLM_DATA_2_OFF_M MAKEMASK(0x1FF, 16) +#define GL_PSTEXT_TCAM_L2ADDR(_i) (0x0020E114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_TCAM_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0) +#define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_TCAM_L2DATALSB(_i) (0x0020E120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_TCAM_L2DATALSB_MAX_INDEX 2 +#define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_S 0 +#define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_TCAM_L2DATAMSB(_i) (0x0020E12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_TCAM_L2DATAMSB_MAX_INDEX 2 +#define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_S 0 +#define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_XLT0_L1ADDR(_i) (0x0020E03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT0_L1ADDR_MAX_INDEX 2 +#define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0) +#define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_XLT0_L1DATA(_i) (0x0020E048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT0_L1DATA_MAX_INDEX 2 +#define GL_PSTEXT_XLT0_L1DATA_DATA_S 0 +#define GL_PSTEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_XLT1_L2ADDR(_i) (0x0020E0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT1_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0) +#define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_XLT1_L2DATA(_i) (0x0020E0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT1_L2DATA_MAX_INDEX 2 +#define GL_PSTEXT_XLT1_L2DATA_DATA_S 0 +#define GL_PSTEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PSTEXT_XLT2_L2ADDR(_i) (0x0020E0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT2_L2ADDR_MAX_INDEX 2 +#define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_S 0 +#define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0) +#define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_S 31 +#define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31) +#define GL_PSTEXT_XLT2_L2DATA(_i) (0x0020E0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GL_PSTEXT_XLT2_L2DATA_MAX_INDEX 2 +#define GL_PSTEXT_XLT2_L2DATA_DATA_S 0 +#define GL_PSTEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLFLXP_PTYPE_TRANSLATION(_i) (0x0045C000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define GLFLXP_PTYPE_TRANSLATION_MAX_INDEX 255 +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_S 0 +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_M MAKEMASK(0xFF, 0) +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_S 8 +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_M MAKEMASK(0xFF, 8) +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_S 16 +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_M MAKEMASK(0xFF, 16) +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_S 24 +#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_M MAKEMASK(0xFF, 24) +#define GLFLXP_RX_CMD_LX_PROT_IDX(_i) (0x0045C400 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define GLFLXP_RX_CMD_LX_PROT_IDX_MAX_INDEX 255 +#define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_S 0 +#define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_M MAKEMASK(0x7, 0) +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_S 4 +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M MAKEMASK(0x7, 4) +#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8 +#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M MAKEMASK(0x7, 8) +#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12 +#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M MAKEMASK(0x3, 12) +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14 +#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M MAKEMASK(0x3, 14) +#define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */ +#define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255 +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0 +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_M MAKEMASK(0xFF, 0) +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_S 8 +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_M MAKEMASK(0xFF, 8) +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_S 16 +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_M MAKEMASK(0xFF, 16) +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_S 24 +#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_M MAKEMASK(0xFF, 24) +#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...4 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLAGS_MAX_INDEX 63 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M MAKEMASK(0x3F, 0) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M MAKEMASK(0x3F, 8) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M MAKEMASK(0x3F, 16) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M MAKEMASK(0x3F, 24) +#define GLFLXP_RXDID_FLAGS1_OVERRIDE(_i) (0x0045D600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLAGS1_OVERRIDE_MAX_INDEX 63 +#define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_S 0 +#define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_M MAKEMASK(0xF, 0) +#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045C800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_0_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045C900 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_1_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045CA00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_2_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045CB00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_3_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_RXDID_FLX_WRD_4(_i) (0x0045CC00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_4_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_RXDID_FLX_WRD_5(_i) (0x0045CD00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLFLXP_RXDID_FLX_WRD_5_MAX_INDEX 63 +#define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_S 0 +#define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_M MAKEMASK(0xFF, 0) +#define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_S 8 +#define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8) +#define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_S 30 +#define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_M MAKEMASK(0x3, 30) +#define GLFLXP_TX_SCHED_CORRECT(_i, _j) (0x00458000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...31 */ /* Reset Source: CORER */ +#define GLFLXP_TX_SCHED_CORRECT_MAX_INDEX 63 +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_S 0 +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M MAKEMASK(0xFF, 0) +#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8 +#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M MAKEMASK(0x1F, 8) +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16 +#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M MAKEMASK(0xFF, 16) +#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24 +#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M MAKEMASK(0x1F, 24) +#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define QRXFLXP_CNTXT_MAX_INDEX 2047 +#define QRXFLXP_CNTXT_RXDID_IDX_S 0 +#define QRXFLXP_CNTXT_RXDID_IDX_M MAKEMASK(0x3F, 0) +#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 +#define QRXFLXP_CNTXT_RXDID_PRIO_M MAKEMASK(0x7, 8) +#define QRXFLXP_CNTXT_TS_S 11 +#define QRXFLXP_CNTXT_TS_M BIT(11) +#define GL_FWSTS 0x00083048 /* Reset Source: POR */ +#define GL_FWSTS_FWS0B_S 0 +#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0) +#define GL_FWSTS_FWROWD_S 8 +#define GL_FWSTS_FWROWD_M BIT(8) +#define GL_FWSTS_FWRI_S 9 +#define GL_FWSTS_FWRI_M BIT(9) +#define GL_FWSTS_FWS1B_S 16 +#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16) +#define GL_TCVMLR_DRAIN_CNTR_CTL 0x000A21E0 /* Reset Source: CORER */ +#define GL_TCVMLR_DRAIN_CNTR_CTL_OP_S 0 +#define GL_TCVMLR_DRAIN_CNTR_CTL_OP_M BIT(0) +#define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_S 1 +#define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_M MAKEMASK(0x7, 1) +#define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_S 4 +#define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_M MAKEMASK(0x3FFF, 4) +#define GL_TCVMLR_DRAIN_DONE_DEC 0x000A21A8 /* Reset Source: CORER */ +#define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_S 0 +#define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_M BIT(0) +#define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_S 1 +#define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_M MAKEMASK(0x1F, 1) +#define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_S 6 +#define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_M MAKEMASK(0xFF, 6) +#define GL_TCVMLR_DRAIN_DONE_TCLAN(_i) (0x000A20A8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GL_TCVMLR_DRAIN_DONE_TCLAN_MAX_INDEX 31 +#define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_S 0 +#define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_M MAKEMASK(0xFF, 0) +#define GL_TCVMLR_DRAIN_DONE_TPB(_i) (0x000A2128 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GL_TCVMLR_DRAIN_DONE_TPB_MAX_INDEX 31 +#define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_S 0 +#define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_M MAKEMASK(0xFF, 0) +#define GL_TCVMLR_DRAIN_MARKER 0x000A2008 /* Reset Source: CORER */ +#define GL_TCVMLR_DRAIN_MARKER_PORT_S 0 +#define GL_TCVMLR_DRAIN_MARKER_PORT_M MAKEMASK(0x7, 0) +#define GL_TCVMLR_DRAIN_MARKER_TC_S 3 +#define GL_TCVMLR_DRAIN_MARKER_TC_M MAKEMASK(0x1F, 3) +#define GL_TCVMLR_ERR_STAT 0x000A2024 /* Reset Source: CORER */ +#define GL_TCVMLR_ERR_STAT_ERROR_S 0 +#define GL_TCVMLR_ERR_STAT_ERROR_M BIT(0) +#define GL_TCVMLR_ERR_STAT_FW_REQ_S 1 +#define GL_TCVMLR_ERR_STAT_FW_REQ_M BIT(1) +#define GL_TCVMLR_ERR_STAT_STAT_S 2 +#define GL_TCVMLR_ERR_STAT_STAT_M MAKEMASK(0x7, 2) +#define GL_TCVMLR_ERR_STAT_ENT_TYPE_S 5 +#define GL_TCVMLR_ERR_STAT_ENT_TYPE_M MAKEMASK(0x7, 5) +#define GL_TCVMLR_ERR_STAT_ENT_ID_S 8 +#define GL_TCVMLR_ERR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 8) +#define GL_TCVMLR_QCFG 0x000A2010 /* Reset Source: CORER */ +#define GL_TCVMLR_QCFG_QID_S 0 +#define GL_TCVMLR_QCFG_QID_M MAKEMASK(0x3FFF, 0) +#define GL_TCVMLR_QCFG_OP_S 14 +#define GL_TCVMLR_QCFG_OP_M BIT(14) +#define GL_TCVMLR_QCFG_PORT_S 15 +#define GL_TCVMLR_QCFG_PORT_M MAKEMASK(0x7, 15) +#define GL_TCVMLR_QCFG_TC_S 18 +#define GL_TCVMLR_QCFG_TC_M MAKEMASK(0x1F, 18) +#define GL_TCVMLR_QCFG_RD 0x000A2014 /* Reset Source: CORER */ +#define GL_TCVMLR_QCFG_RD_QID_S 0 +#define GL_TCVMLR_QCFG_RD_QID_M MAKEMASK(0x3FFF, 0) +#define GL_TCVMLR_QCFG_RD_PORT_S 14 +#define GL_TCVMLR_QCFG_RD_PORT_M MAKEMASK(0x7, 14) +#define GL_TCVMLR_QCFG_RD_TC_S 17 +#define GL_TCVMLR_QCFG_RD_TC_M MAKEMASK(0x1F, 17) +#define GL_TCVMLR_QCNTR 0x000A200C /* Reset Source: CORER */ +#define GL_TCVMLR_QCNTR_CNTR_S 0 +#define GL_TCVMLR_QCNTR_CNTR_M MAKEMASK(0x7FFF, 0) +#define GL_TCVMLR_QCTL 0x000A2004 /* Reset Source: CORER */ +#define GL_TCVMLR_QCTL_QID_S 0 +#define GL_TCVMLR_QCTL_QID_M MAKEMASK(0x3FFF, 0) +#define GL_TCVMLR_QCTL_OP_S 14 +#define GL_TCVMLR_QCTL_OP_M BIT(14) +#define GL_TCVMLR_REQ_STAT 0x000A2018 /* Reset Source: CORER */ +#define GL_TCVMLR_REQ_STAT_ENT_TYPE_S 0 +#define GL_TCVMLR_REQ_STAT_ENT_TYPE_M MAKEMASK(0x7, 0) +#define GL_TCVMLR_REQ_STAT_ENT_ID_S 3 +#define GL_TCVMLR_REQ_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3) +#define GL_TCVMLR_REQ_STAT_OP_S 17 +#define GL_TCVMLR_REQ_STAT_OP_M BIT(17) +#define GL_TCVMLR_REQ_STAT_WRITE_STATUS_S 18 +#define GL_TCVMLR_REQ_STAT_WRITE_STATUS_M MAKEMASK(0x7, 18) +#define GL_TCVMLR_STAT 0x000A201C /* Reset Source: CORER */ +#define GL_TCVMLR_STAT_ENT_TYPE_S 0 +#define GL_TCVMLR_STAT_ENT_TYPE_M MAKEMASK(0x7, 0) +#define GL_TCVMLR_STAT_ENT_ID_S 3 +#define GL_TCVMLR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3) +#define GL_TCVMLR_STAT_STATUS_S 17 +#define GL_TCVMLR_STAT_STATUS_M MAKEMASK(0x7, 17) +#define GL_XLR_MARKER_TRIG_TCVMLR 0x000A2000 /* Reset Source: CORER */ +#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_S 0 +#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) +#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_S 10 +#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) +#define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_S 12 +#define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_M MAKEMASK(0x7, 12) +#define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_S 16 +#define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_M MAKEMASK(0x7, 16) +#define GL_XLR_MARKER_TRIG_VMLR 0x00093804 /* Reset Source: CORER */ +#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_S 0 +#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) +#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_S 10 +#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) +#define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_S 12 +#define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_M MAKEMASK(0x7, 12) +#define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_S 16 +#define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_M MAKEMASK(0x7, 16) +#define GLGEN_ANA_ABORT_PTYPE 0x0020C21C /* Reset Source: CORER */ +#define GLGEN_ANA_ABORT_PTYPE_ABORT_S 0 +#define GLGEN_ANA_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0) +#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT 0x0020C208 /* Reset Source: CORER */ +#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 +#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_CFG_CTRL 0x0020C104 /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_CTRL_LINE_IDX_S 0 +#define GLGEN_ANA_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0) +#define GLGEN_ANA_CFG_CTRL_TABLE_ID_S 18 +#define GLGEN_ANA_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18) +#define GLGEN_ANA_CFG_CTRL_RESRVED_S 26 +#define GLGEN_ANA_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26) +#define GLGEN_ANA_CFG_CTRL_OPERATION_ID_S 29 +#define GLGEN_ANA_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29) +#define GLGEN_ANA_CFG_HTBL_LU_RESULT 0x0020C158 /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_S 0 +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_M BIT(0) +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1 +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1) +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_S 4 +#define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) +#define GLGEN_ANA_CFG_LU_KEY(_i) (0x0020C14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_LU_KEY_MAX_INDEX 2 +#define GLGEN_ANA_CFG_LU_KEY_LU_KEY_S 0 +#define GLGEN_ANA_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_CFG_RDDATA(_i) (0x0020C10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_RDDATA_MAX_INDEX 15 +#define GLGEN_ANA_CFG_RDDATA_RD_DATA_S 0 +#define GLGEN_ANA_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT 0x0020C15C /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_S 0 +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_S 1 +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1) +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_S 4 +#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) +#define GLGEN_ANA_CFG_WRDATA 0x0020C108 /* Reset Source: CORER */ +#define GLGEN_ANA_CFG_WRDATA_WR_DATA_S 0 +#define GLGEN_ANA_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_DEF_PTYPE 0x0020C100 /* Reset Source: CORER */ +#define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_S 0 +#define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0) +#define GLGEN_ANA_ERR_CTRL 0x0020C220 /* Reset Source: CORER */ +#define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_S 0 +#define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_FLAG_MAP(_i) (0x0020C000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLGEN_ANA_FLAG_MAP_MAX_INDEX 63 +#define GLGEN_ANA_FLAG_MAP_FLAG_EN_S 0 +#define GLGEN_ANA_FLAG_MAP_FLAG_EN_M BIT(0) +#define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_S 1 +#define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1) +#define GLGEN_ANA_INV_NODE_PTYPE 0x0020C210 /* Reset Source: CORER */ +#define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0 +#define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0) +#define GLGEN_ANA_INV_PTYPE_MARKER 0x0020C218 /* Reset Source: CORER */ +#define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0 +#define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0) +#define GLGEN_ANA_LAST_PROT_ID(_i) (0x0020C1E4 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */ +#define GLGEN_ANA_LAST_PROT_ID_MAX_INDEX 5 +#define GLGEN_ANA_LAST_PROT_ID_EN_S 0 +#define GLGEN_ANA_LAST_PROT_ID_EN_M BIT(0) +#define GLGEN_ANA_LAST_PROT_ID_PROT_ID_S 1 +#define GLGEN_ANA_LAST_PROT_ID_PROT_ID_M MAKEMASK(0xFF, 1) +#define GLGEN_ANA_NMPG_KEYMASK(_i) (0x0020C1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_NMPG_KEYMASK_MAX_INDEX 3 +#define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_S 0 +#define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_NMPG0_HASHKEY(_i) (0x0020C1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_NMPG0_HASHKEY_MAX_INDEX 3 +#define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_S 0 +#define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_NO_HIT_PG_NM_PG 0x0020C204 /* Reset Source: CORER */ +#define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_S 0 +#define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_OUT_OF_PKT 0x0020C200 /* Reset Source: CORER */ +#define GLGEN_ANA_OUT_OF_PKT_NPC_S 0 +#define GLGEN_ANA_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_P2P(_i) (0x0020C160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLGEN_ANA_P2P_MAX_INDEX 15 +#define GLGEN_ANA_P2P_TARGET_PROF_S 0 +#define GLGEN_ANA_P2P_TARGET_PROF_M MAKEMASK(0xF, 0) +#define GLGEN_ANA_PG_KEYMASK(_i) (0x0020C1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_PG_KEYMASK_MAX_INDEX 3 +#define GLGEN_ANA_PG_KEYMASK_HASH_KEY_S 0 +#define GLGEN_ANA_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_PG0_HASHKEY(_i) (0x0020C1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_PG0_HASHKEY_MAX_INDEX 3 +#define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_S 0 +#define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_PROFIL_CTRL 0x0020C1FC /* Reset Source: CORER */ +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0 +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0) +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5 +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5) +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 +#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9) +#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 +#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14) +#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16 +#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16) +#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 +#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) +#define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */ +#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0 +#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0) +#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0 +#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0 +#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0) +#define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_S 18 +#define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18) +#define GLGEN_ANA_TX_CFG_CTRL_RESRVED_S 26 +#define GLGEN_ANA_TX_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26) +#define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_S 29 +#define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29) +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT 0x0020D158 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_S 0 +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_M BIT(0) +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1 +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1) +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_S 4 +#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) +#define GLGEN_ANA_TX_CFG_LU_KEY(_i) (0x0020D14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_LU_KEY_MAX_INDEX 2 +#define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_S 0 +#define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_CFG_RDDATA(_i) (0x0020D10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_RDDATA_MAX_INDEX 15 +#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0 +#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0 +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0) +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1 +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1) +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4 +#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4) +#define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_S 0 +#define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_DEF_PTYPE 0x0020D100 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_S 0 +#define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0) +#define GLGEN_ANA_TX_DFD_PACE_OUT 0x0020D4CC /* Reset Source: CORER */ +#define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_S 0 +#define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_M BIT(0) +#define GLGEN_ANA_TX_ERR_CTRL 0x0020D220 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_S 0 +#define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_FLAG_MAP(_i) (0x0020D000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_FLAG_MAP_MAX_INDEX 63 +#define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_S 0 +#define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_M BIT(0) +#define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_S 1 +#define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1) +#define GLGEN_ANA_TX_INV_NODE_PTYPE 0x0020D210 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0 +#define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0) +#define GLGEN_ANA_TX_INV_PROT_ID 0x0020D214 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_S 0 +#define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_TX_INV_PTYPE_MARKER 0x0020D218 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0 +#define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0) +#define GLGEN_ANA_TX_NMPG_KEYMASK(_i) (0x0020D1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_NMPG_KEYMASK_MAX_INDEX 3 +#define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_S 0 +#define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_NMPG0_HASHKEY(_i) (0x0020D1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_NMPG0_HASHKEY_MAX_INDEX 3 +#define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_S 0 +#define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG 0x0020D204 /* Reset Source: CORER */ +#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_S 0 +#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0) +#define GLGEN_ANA_TX_P2P(_i) (0x0020D160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_P2P_MAX_INDEX 15 +#define GLGEN_ANA_TX_P2P_TARGET_PROF_S 0 +#define GLGEN_ANA_TX_P2P_TARGET_PROF_M MAKEMASK(0xF, 0) +#define GLGEN_ANA_TX_PG_KEYMASK(_i) (0x0020D1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_PG_KEYMASK_MAX_INDEX 3 +#define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_S 0 +#define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_PG0_HASHKEY(_i) (0x0020D1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLGEN_ANA_TX_PG0_HASHKEY_MAX_INDEX 3 +#define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_S 0 +#define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ANA_TX_PROFIL_CTRL 0x0020D1FC /* Reset Source: CORER */ +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0 +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0) +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5 +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5) +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9 +#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9) +#define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14 +#define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14) +#define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_S 16 +#define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16) +#define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20 +#define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20) +#define GLGEN_ASSERT_HLP 0x000B81E4 /* Reset Source: POR */ +#define GLGEN_ASSERT_HLP_CORE_ON_RST_S 0 +#define GLGEN_ASSERT_HLP_CORE_ON_RST_M BIT(0) +#define GLGEN_ASSERT_HLP_FULL_ON_RST_S 1 +#define GLGEN_ASSERT_HLP_FULL_ON_RST_M BIT(1) +#define GLGEN_CLKSTAT 0x000B8184 /* Reset Source: POR */ +#define GLGEN_CLKSTAT_U_CLK_SPEED_S 0 +#define GLGEN_CLKSTAT_U_CLK_SPEED_M MAKEMASK(0x7, 0) +#define GLGEN_CLKSTAT_L_CLK_SPEED_S 3 +#define GLGEN_CLKSTAT_L_CLK_SPEED_M MAKEMASK(0x7, 3) +#define GLGEN_CLKSTAT_PSM_CLK_SPEED_S 6 +#define GLGEN_CLKSTAT_PSM_CLK_SPEED_M MAKEMASK(0x7, 6) +#define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_S 9 +#define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_M MAKEMASK(0x7, 9) +#define GLGEN_CLKSTAT_UANA_CLK_SPEED_S 12 +#define GLGEN_CLKSTAT_UANA_CLK_SPEED_M MAKEMASK(0x7, 12) +#define GLGEN_CLKSTAT_PE_CLK_SPEED_S 18 +#define GLGEN_CLKSTAT_PE_CLK_SPEED_M MAKEMASK(0x7, 18) +#define GLGEN_CLKSTAT_SRC 0x000B826C /* Reset Source: POR */ +#define GLGEN_CLKSTAT_SRC_U_CLK_SRC_S 0 +#define GLGEN_CLKSTAT_SRC_U_CLK_SRC_M MAKEMASK(0x3, 0) +#define GLGEN_CLKSTAT_SRC_L_CLK_SRC_S 2 +#define GLGEN_CLKSTAT_SRC_L_CLK_SRC_M MAKEMASK(0x3, 2) +#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4 +#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M MAKEMASK(0x3, 4) +#define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_S 6 +#define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_M MAKEMASK(0x3, 6) +#define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_S 8 +#define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_M MAKEMASK(0xF, 8) +#define GLGEN_ECC_ERR_INT_TOG_MASK_H 0x00093A00 /* Reset Source: CORER */ +#define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_S 0 +#define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0) +#define GLGEN_ECC_ERR_INT_TOG_MASK_L 0x000939FC /* Reset Source: CORER */ +#define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_S 0 +#define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_ECC_ERR_RST_MASK_H 0x000939F8 /* Reset Source: CORER */ +#define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_S 0 +#define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0) +#define GLGEN_ECC_ERR_RST_MASK_L 0x000939F4 /* Reset Source: CORER */ +#define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_S 0 +#define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_GPIO_CTL(_i) (0x000880C8 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: POR */ +#define GLGEN_GPIO_CTL_MAX_INDEX 6 +#define GLGEN_GPIO_CTL_IN_VALUE_S 0 +#define GLGEN_GPIO_CTL_IN_VALUE_M BIT(0) +#define GLGEN_GPIO_CTL_IN_TRANSIT_S 1 +#define GLGEN_GPIO_CTL_IN_TRANSIT_M BIT(1) +#define GLGEN_GPIO_CTL_OUT_VALUE_S 2 +#define GLGEN_GPIO_CTL_OUT_VALUE_M BIT(2) +#define GLGEN_GPIO_CTL_NO_P_UP_S 3 +#define GLGEN_GPIO_CTL_NO_P_UP_M BIT(3) +#define GLGEN_GPIO_CTL_PIN_DIR_S 4 +#define GLGEN_GPIO_CTL_PIN_DIR_M BIT(4) +#define GLGEN_GPIO_CTL_TRI_CTL_S 5 +#define GLGEN_GPIO_CTL_TRI_CTL_M BIT(5) +#define GLGEN_GPIO_CTL_PIN_FUNC_S 8 +#define GLGEN_GPIO_CTL_PIN_FUNC_M MAKEMASK(0xF, 8) +#define GLGEN_GPIO_CTL_INT_MODE_S 12 +#define GLGEN_GPIO_CTL_INT_MODE_M MAKEMASK(0x3, 12) +#define GLGEN_MARKER_COUNT 0x000939E8 /* Reset Source: CORER */ +#define GLGEN_MARKER_COUNT_MARKER_COUNT_S 0 +#define GLGEN_MARKER_COUNT_MARKER_COUNT_M MAKEMASK(0xFF, 0) +#define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_S 31 +#define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_M BIT(31) +#define GLGEN_RSTAT 0x000B8188 /* Reset Source: POR */ +#define GLGEN_RSTAT_DEVSTATE_S 0 +#define GLGEN_RSTAT_DEVSTATE_M MAKEMASK(0x3, 0) +#define GLGEN_RSTAT_RESET_TYPE_S 2 +#define GLGEN_RSTAT_RESET_TYPE_M MAKEMASK(0x3, 2) +#define GLGEN_RSTAT_CORERCNT_S 4 +#define GLGEN_RSTAT_CORERCNT_M MAKEMASK(0x3, 4) +#define GLGEN_RSTAT_GLOBRCNT_S 6 +#define GLGEN_RSTAT_GLOBRCNT_M MAKEMASK(0x3, 6) +#define GLGEN_RSTAT_EMPRCNT_S 8 +#define GLGEN_RSTAT_EMPRCNT_M MAKEMASK(0x3, 8) +#define GLGEN_RSTAT_TIME_TO_RST_S 10 +#define GLGEN_RSTAT_TIME_TO_RST_M MAKEMASK(0x3F, 10) +#define GLGEN_RSTAT_RTRIG_FLR_S 16 +#define GLGEN_RSTAT_RTRIG_FLR_M BIT(16) +#define GLGEN_RSTAT_RTRIG_ECC_S 17 +#define GLGEN_RSTAT_RTRIG_ECC_M BIT(17) +#define GLGEN_RSTAT_RTRIG_FW_AUX_S 18 +#define GLGEN_RSTAT_RTRIG_FW_AUX_M BIT(18) +#define GLGEN_RSTCTL 0x000B8180 /* Reset Source: POR */ +#define GLGEN_RSTCTL_GRSTDEL_S 0 +#define GLGEN_RSTCTL_GRSTDEL_M MAKEMASK(0x3F, 0) +#define GLGEN_RSTCTL_ECC_RST_ENA_S 8 +#define GLGEN_RSTCTL_ECC_RST_ENA_M BIT(8) +#define GLGEN_RSTCTL_ECC_RT_EN_S 30 +#define GLGEN_RSTCTL_ECC_RT_EN_M BIT(30) +#define GLGEN_RSTCTL_FLR_RT_EN_S 31 +#define GLGEN_RSTCTL_FLR_RT_EN_M BIT(31) +#define GLGEN_RTRIG 0x000B8190 /* Reset Source: CORER */ +#define GLGEN_RTRIG_CORER_S 0 +#define GLGEN_RTRIG_CORER_M BIT(0) +#define GLGEN_RTRIG_GLOBR_S 1 +#define GLGEN_RTRIG_GLOBR_M BIT(1) +#define GLGEN_RTRIG_EMPFWR_S 2 +#define GLGEN_RTRIG_EMPFWR_M BIT(2) +#define GLGEN_STAT 0x000B612C /* Reset Source: POR */ +#define GLGEN_STAT_RSVD4FW_S 0 +#define GLGEN_STAT_RSVD4FW_M MAKEMASK(0xFF, 0) +#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLGEN_VFLRSTAT_MAX_INDEX 7 +#define GLGEN_VFLRSTAT_VFLRS_S 0 +#define GLGEN_VFLRSTAT_VFLRS_M MAKEMASK(0xFFFFFFFF, 0) +#define GLGEN_XLR_MSK2HLP_RDY 0x000939F0 /* Reset Source: CORER */ +#define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_S 0 +#define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_M BIT(0) +#define GLGEN_XLR_TRNS_WAIT_COUNT 0x000939EC /* Reset Source: CORER */ +#define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_S 0 +#define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_M MAKEMASK(0x1F, 0) +#define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_S 8 +#define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_M MAKEMASK(0xFF, 8) +#define GLVFGEN_TIMER 0x000B8214 /* Reset Source: POR */ +#define GLVFGEN_TIMER_GTIME_S 0 +#define GLVFGEN_TIMER_GTIME_M MAKEMASK(0xFFFFFFFF, 0) +#define PFGEN_CTRL 0x00091000 /* Reset Source: CORER */ +#define PFGEN_CTRL_PFSWR_S 0 +#define PFGEN_CTRL_PFSWR_M BIT(0) +#define PFGEN_DRUN 0x00091180 /* Reset Source: CORER */ +#define PFGEN_DRUN_DRVUNLD_S 0 +#define PFGEN_DRUN_DRVUNLD_M BIT(0) +#define PFGEN_PFRSTAT 0x00091080 /* Reset Source: CORER */ +#define PFGEN_PFRSTAT_PFRD_S 0 +#define PFGEN_PFRSTAT_PFRD_M BIT(0) +#define PFGEN_PORTNUM 0x001D2400 /* Reset Source: CORER */ +#define PFGEN_PORTNUM_PORT_NUM_S 0 +#define PFGEN_PORTNUM_PORT_NUM_M MAKEMASK(0x7, 0) +#define PFGEN_STATE 0x00088000 /* Reset Source: CORER */ +#define PFGEN_STATE_PFPEEN_S 0 +#define PFGEN_STATE_PFPEEN_M BIT(0) +#define PFGEN_STATE_RSVD_S 1 +#define PFGEN_STATE_RSVD_M BIT(1) +#define PFGEN_STATE_PFLINKEN_S 2 +#define PFGEN_STATE_PFLINKEN_M BIT(2) +#define PFGEN_STATE_PFSCEN_S 3 +#define PFGEN_STATE_PFSCEN_M BIT(3) +#define PRT_TCVMLR_DRAIN_CNTR 0x000A21C0 /* Reset Source: CORER */ +#define PRT_TCVMLR_DRAIN_CNTR_CNTR_S 0 +#define PRT_TCVMLR_DRAIN_CNTR_CNTR_M MAKEMASK(0x3FFF, 0) +#define PRTGEN_CNF 0x000B8120 /* Reset Source: POR */ +#define PRTGEN_CNF_PORT_DIS_S 0 +#define PRTGEN_CNF_PORT_DIS_M BIT(0) +#define PRTGEN_CNF_ALLOW_PORT_DIS_S 1 +#define PRTGEN_CNF_ALLOW_PORT_DIS_M BIT(1) +#define PRTGEN_CNF_EMP_PORT_DIS_S 2 +#define PRTGEN_CNF_EMP_PORT_DIS_M BIT(2) +#define PRTGEN_CNF2 0x000B8160 /* Reset Source: POR */ +#define PRTGEN_CNF2_ACTIVATE_PORT_LINK_S 0 +#define PRTGEN_CNF2_ACTIVATE_PORT_LINK_M BIT(0) +#define PRTGEN_CNF3 0x000B8280 /* Reset Source: POR */ +#define PRTGEN_CNF3_PORT_STAGERING_EN_S 0 +#define PRTGEN_CNF3_PORT_STAGERING_EN_M BIT(0) +#define PRTGEN_STATUS 0x000B8100 /* Reset Source: POR */ +#define PRTGEN_STATUS_PORT_VALID_S 0 +#define PRTGEN_STATUS_PORT_VALID_M BIT(0) +#define PRTGEN_STATUS_PORT_ACTIVE_S 1 +#define PRTGEN_STATUS_PORT_ACTIVE_M BIT(1) +#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: VFR */ +#define VFGEN_RSTAT_MAX_INDEX 255 +#define VFGEN_RSTAT_VFR_STATE_S 0 +#define VFGEN_RSTAT_VFR_STATE_M MAKEMASK(0x3, 0) +#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPGEN_VFRSTAT_MAX_INDEX 255 +#define VPGEN_VFRSTAT_VFRD_S 0 +#define VPGEN_VFRSTAT_VFRD_M BIT(0) +#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPGEN_VFRTRIG_MAX_INDEX 255 +#define VPGEN_VFRTRIG_VFSWR_S 0 +#define VPGEN_VFRTRIG_VFSWR_M BIT(0) +#define VSIGEN_RSTAT(_VSI) (0x00092800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIGEN_RSTAT_MAX_INDEX 767 +#define VSIGEN_RSTAT_VMRD_S 0 +#define VSIGEN_RSTAT_VMRD_M BIT(0) +#define VSIGEN_RTRIG(_VSI) (0x00091800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIGEN_RTRIG_MAX_INDEX 767 +#define VSIGEN_RTRIG_VMSWR_S 0 +#define VSIGEN_RTRIG_VMSWR_M BIT(0) +#define GLHMC_APBVTINUSEBASE(_i) (0x00524A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_APBVTINUSEBASE_MAX_INDEX 7 +#define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_S 0 +#define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_CEQPART(_i) (0x005031C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_CEQPART_MAX_INDEX 7 +#define GLHMC_CEQPART_PMCEQBASE_S 0 +#define GLHMC_CEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0) +#define GLHMC_CEQPART_PMCEQSIZE_S 16 +#define GLHMC_CEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16) +#define GLHMC_DBCQMAX 0x005220F0 /* Reset Source: CORER */ +#define GLHMC_DBCQMAX_GLHMC_DBCQMAX_S 0 +#define GLHMC_DBCQMAX_GLHMC_DBCQMAX_M MAKEMASK(0xFFFFF, 0) +#define GLHMC_DBCQPART(_i) (0x00503180 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_DBCQPART_MAX_INDEX 7 +#define GLHMC_DBCQPART_PMDBCQBASE_S 0 +#define GLHMC_DBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0) +#define GLHMC_DBCQPART_PMDBCQSIZE_S 16 +#define GLHMC_DBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16) +#define GLHMC_DBQPMAX 0x005220EC /* Reset Source: CORER */ +#define GLHMC_DBQPMAX_GLHMC_DBQPMAX_S 0 +#define GLHMC_DBQPMAX_GLHMC_DBQPMAX_M MAKEMASK(0x7FFFF, 0) +#define GLHMC_DBQPPART(_i) (0x005044C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_DBQPPART_MAX_INDEX 7 +#define GLHMC_DBQPPART_PMDBQPBASE_S 0 +#define GLHMC_DBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0) +#define GLHMC_DBQPPART_PMDBQPSIZE_S 16 +#define GLHMC_DBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16) +#define GLHMC_FSIAVBASE(_i) (0x00525600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_FSIAVBASE_MAX_INDEX 7 +#define GLHMC_FSIAVBASE_FPMFSIAVBASE_S 0 +#define GLHMC_FSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_FSIAVCNT(_i) (0x00525700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_FSIAVCNT_MAX_INDEX 7 +#define GLHMC_FSIAVCNT_FPMFSIAVCNT_S 0 +#define GLHMC_FSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_FSIAVMAX 0x00522068 /* Reset Source: CORER */ +#define GLHMC_FSIAVMAX_PMFSIAVMAX_S 0 +#define GLHMC_FSIAVMAX_PMFSIAVMAX_M MAKEMASK(0x3FFFF, 0) +#define GLHMC_FSIAVOBJSZ 0x00522064 /* Reset Source: CORER */ +#define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_S 0 +#define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_FSIMCBASE(_i) (0x00526000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_FSIMCBASE_MAX_INDEX 7 +#define GLHMC_FSIMCBASE_FPMFSIMCBASE_S 0 +#define GLHMC_FSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_FSIMCCNT(_i) (0x00526100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_FSIMCCNT_MAX_INDEX 7 +#define GLHMC_FSIMCCNT_FPMFSIMCSZ_S 0 +#define GLHMC_FSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_FSIMCMAX 0x00522060 /* Reset Source: CORER */ +#define GLHMC_FSIMCMAX_PMFSIMCMAX_S 0 +#define GLHMC_FSIMCMAX_PMFSIMCMAX_M MAKEMASK(0x3FFF, 0) +#define GLHMC_FSIMCOBJSZ 0x0052205C /* Reset Source: CORER */ +#define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_S 0 +#define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_FWPDINV 0x0052207C /* Reset Source: CORER */ +#define GLHMC_FWPDINV_PMSDIDX_S 0 +#define GLHMC_FWPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define GLHMC_FWPDINV_PMSDPARTSEL_S 15 +#define GLHMC_FWPDINV_PMSDPARTSEL_M BIT(15) +#define GLHMC_FWPDINV_PMPDIDX_S 16 +#define GLHMC_FWPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define GLHMC_FWPDINV_FPMAT 0x0010207C /* Reset Source: CORER */ +#define GLHMC_FWPDINV_FPMAT_PMSDIDX_S 0 +#define GLHMC_FWPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_S 15 +#define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_M BIT(15) +#define GLHMC_FWPDINV_FPMAT_PMPDIDX_S 16 +#define GLHMC_FWPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define GLHMC_FWSDDATAHIGH 0x00522078 /* Reset Source: CORER */ +#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0 +#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */ +#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 +#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */ +#define GLHMC_FWSDDATALOW_PMSDVALID_S 0 +#define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0) +#define GLHMC_FWSDDATALOW_PMSDTYPE_S 1 +#define GLHMC_FWSDDATALOW_PMSDTYPE_M BIT(1) +#define GLHMC_FWSDDATALOW_PMSDBPCOUNT_S 2 +#define GLHMC_FWSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define GLHMC_FWSDDATALOW_PMSDDATALOW_S 12 +#define GLHMC_FWSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define GLHMC_FWSDDATALOW_FPMAT 0x00102074 /* Reset Source: CORER */ +#define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_S 0 +#define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_M BIT(0) +#define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_S 1 +#define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_M BIT(1) +#define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_S 2 +#define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_S 12 +#define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define GLHMC_PEARPBASE(_i) (0x00524800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEARPBASE_MAX_INDEX 7 +#define GLHMC_PEARPBASE_FPMPEARPBASE_S 0 +#define GLHMC_PEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEARPCNT(_i) (0x00524900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEARPCNT_MAX_INDEX 7 +#define GLHMC_PEARPCNT_FPMPEARPCNT_S 0 +#define GLHMC_PEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEARPMAX 0x00522038 /* Reset Source: CORER */ +#define GLHMC_PEARPMAX_PMPEARPMAX_S 0 +#define GLHMC_PEARPMAX_PMPEARPMAX_M MAKEMASK(0x1FFFF, 0) +#define GLHMC_PEARPOBJSZ 0x00522034 /* Reset Source: CORER */ +#define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_S 0 +#define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_M MAKEMASK(0x7, 0) +#define GLHMC_PECQBASE(_i) (0x00524200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PECQBASE_MAX_INDEX 7 +#define GLHMC_PECQBASE_FPMPECQBASE_S 0 +#define GLHMC_PECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PECQCNT(_i) (0x00524300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PECQCNT_MAX_INDEX 7 +#define GLHMC_PECQCNT_FPMPECQCNT_S 0 +#define GLHMC_PECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PECQOBJSZ 0x00522020 /* Reset Source: CORER */ +#define GLHMC_PECQOBJSZ_PMPECQOBJSZ_S 0 +#define GLHMC_PECQOBJSZ_PMPECQOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEHDRBASE(_i) (0x00526200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHDRBASE_MAX_INDEX 7 +#define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_S 0 +#define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEHDRCNT(_i) (0x00526300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHDRCNT_MAX_INDEX 7 +#define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_S 0 +#define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEHDRMAX 0x00522008 /* Reset Source: CORER */ +#define GLHMC_PEHDRMAX_PMPEHDRMAX_S 0 +#define GLHMC_PEHDRMAX_PMPEHDRMAX_M MAKEMASK(0x7FFFF, 0) +#define GLHMC_PEHDRMAX_RSVD_S 19 +#define GLHMC_PEHDRMAX_RSVD_M MAKEMASK(0x1FFF, 19) +#define GLHMC_PEHDROBJSZ 0x00522004 /* Reset Source: CORER */ +#define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_S 0 +#define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEHDROBJSZ_RSVD_S 4 +#define GLHMC_PEHDROBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) +#define GLHMC_PEHTCNT(_i) (0x00524700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHTCNT_MAX_INDEX 7 +#define GLHMC_PEHTCNT_FPMPEHTCNT_S 0 +#define GLHMC_PEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEHTCNT_FPMAT(_i) (0x00104700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHTCNT_FPMAT_MAX_INDEX 7 +#define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_S 0 +#define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEHTEBASE(_i) (0x00524600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHTEBASE_MAX_INDEX 7 +#define GLHMC_PEHTEBASE_FPMPEHTEBASE_S 0 +#define GLHMC_PEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEHTEBASE_FPMAT(_i) (0x00104600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEHTEBASE_FPMAT_MAX_INDEX 7 +#define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_S 0 +#define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEHTEOBJSZ 0x0052202C /* Reset Source: CORER */ +#define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_S 0 +#define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEHTEOBJSZ_FPMAT 0x0010202C /* Reset Source: CORER */ +#define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_S 0 +#define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEHTMAX 0x00522030 /* Reset Source: CORER */ +#define GLHMC_PEHTMAX_PMPEHTMAX_S 0 +#define GLHMC_PEHTMAX_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0) +#define GLHMC_PEHTMAX_FPMAT 0x00102030 /* Reset Source: CORER */ +#define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_S 0 +#define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0) +#define GLHMC_PEMDBASE(_i) (0x00526400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEMDBASE_MAX_INDEX 7 +#define GLHMC_PEMDBASE_GLHMC_PEMDBASE_S 0 +#define GLHMC_PEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEMDCNT(_i) (0x00526500 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEMDCNT_MAX_INDEX 7 +#define GLHMC_PEMDCNT_GLHMC_PEMDCNT_S 0 +#define GLHMC_PEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEMDMAX 0x00522010 /* Reset Source: CORER */ +#define GLHMC_PEMDMAX_PMPEMDMAX_S 0 +#define GLHMC_PEMDMAX_PMPEMDMAX_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEMDMAX_RSVD_S 24 +#define GLHMC_PEMDMAX_RSVD_M MAKEMASK(0xFF, 24) +#define GLHMC_PEMDOBJSZ 0x0052200C /* Reset Source: CORER */ +#define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_S 0 +#define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEMDOBJSZ_RSVD_S 4 +#define GLHMC_PEMDOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) +#define GLHMC_PEMRBASE(_i) (0x00524C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEMRBASE_MAX_INDEX 7 +#define GLHMC_PEMRBASE_FPMPEMRBASE_S 0 +#define GLHMC_PEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEMRCNT(_i) (0x00524D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEMRCNT_MAX_INDEX 7 +#define GLHMC_PEMRCNT_FPMPEMRSZ_S 0 +#define GLHMC_PEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEMRMAX 0x00522040 /* Reset Source: CORER */ +#define GLHMC_PEMRMAX_PMPEMRMAX_S 0 +#define GLHMC_PEMRMAX_PMPEMRMAX_M MAKEMASK(0x7FFFFF, 0) +#define GLHMC_PEMROBJSZ 0x0052203C /* Reset Source: CORER */ +#define GLHMC_PEMROBJSZ_PMPEMROBJSZ_S 0 +#define GLHMC_PEMROBJSZ_PMPEMROBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEOOISCBASE(_i) (0x00526600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEOOISCBASE_MAX_INDEX 7 +#define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_S 0 +#define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEOOISCCNT(_i) (0x00526700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEOOISCCNT_MAX_INDEX 7 +#define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_S 0 +#define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEOOISCFFLBASE(_i) (0x00526C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEOOISCFFLBASE_MAX_INDEX 7 +#define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0 +#define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PEOOISCFFLCNT_PMAT(_i) (0x00526D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEOOISCFFLCNT_PMAT_MAX_INDEX 7 +#define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_S 0 +#define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEOOISCFFLMAX 0x005220A4 /* Reset Source: CORER */ +#define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_S 0 +#define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_M MAKEMASK(0x7FFFF, 0) +#define GLHMC_PEOOISCFFLMAX_RSVD_S 19 +#define GLHMC_PEOOISCFFLMAX_RSVD_M MAKEMASK(0x1FFF, 19) +#define GLHMC_PEOOISCMAX 0x00522018 /* Reset Source: CORER */ +#define GLHMC_PEOOISCMAX_PMPEOOISCMAX_S 0 +#define GLHMC_PEOOISCMAX_PMPEOOISCMAX_M MAKEMASK(0x7FFFF, 0) +#define GLHMC_PEOOISCMAX_RSVD_S 19 +#define GLHMC_PEOOISCMAX_RSVD_M MAKEMASK(0x1FFF, 19) +#define GLHMC_PEOOISCOBJSZ 0x00522014 /* Reset Source: CORER */ +#define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_S 0 +#define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEOOISCOBJSZ_RSVD_S 4 +#define GLHMC_PEOOISCOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) +#define GLHMC_PEPBLBASE(_i) (0x00525800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEPBLBASE_MAX_INDEX 7 +#define GLHMC_PEPBLBASE_FPMPEPBLBASE_S 0 +#define GLHMC_PEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEPBLCNT(_i) (0x00525900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEPBLCNT_MAX_INDEX 7 +#define GLHMC_PEPBLCNT_FPMPEPBLCNT_S 0 +#define GLHMC_PEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEPBLMAX 0x0052206C /* Reset Source: CORER */ +#define GLHMC_PEPBLMAX_PMPEPBLMAX_S 0 +#define GLHMC_PEPBLMAX_PMPEPBLMAX_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEQ1BASE(_i) (0x00525200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEQ1BASE_MAX_INDEX 7 +#define GLHMC_PEQ1BASE_FPMPEQ1BASE_S 0 +#define GLHMC_PEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEQ1CNT(_i) (0x00525300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEQ1CNT_MAX_INDEX 7 +#define GLHMC_PEQ1CNT_FPMPEQ1CNT_S 0 +#define GLHMC_PEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEQ1FLBASE(_i) (0x00525400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEQ1FLBASE_MAX_INDEX 7 +#define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_S 0 +#define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEQ1FLMAX 0x00522058 /* Reset Source: CORER */ +#define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_S 0 +#define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_M MAKEMASK(0x3FFFFFF, 0) +#define GLHMC_PEQ1MAX 0x00522054 /* Reset Source: CORER */ +#define GLHMC_PEQ1MAX_PMPEQ1MAX_S 0 +#define GLHMC_PEQ1MAX_PMPEQ1MAX_M MAKEMASK(0xFFFFFFF, 0) +#define GLHMC_PEQ1OBJSZ 0x00522050 /* Reset Source: CORER */ +#define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_S 0 +#define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEQPBASE(_i) (0x00524000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEQPBASE_MAX_INDEX 7 +#define GLHMC_PEQPBASE_FPMPEQPBASE_S 0 +#define GLHMC_PEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEQPCNT(_i) (0x00524100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEQPCNT_MAX_INDEX 7 +#define GLHMC_PEQPCNT_FPMPEQPCNT_S 0 +#define GLHMC_PEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEQPOBJSZ 0x0052201C /* Reset Source: CORER */ +#define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_S 0 +#define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PERRFBASE(_i) (0x00526800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PERRFBASE_MAX_INDEX 7 +#define GLHMC_PERRFBASE_GLHMC_PERRFBASE_S 0 +#define GLHMC_PERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PERRFCNT(_i) (0x00526900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PERRFCNT_MAX_INDEX 7 +#define GLHMC_PERRFCNT_GLHMC_PERRFCNT_S 0 +#define GLHMC_PERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PERRFFLBASE(_i) (0x00526A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PERRFFLBASE_MAX_INDEX 7 +#define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_S 0 +#define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_PERRFFLCNT_PMAT(_i) (0x00526B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PERRFFLCNT_PMAT_MAX_INDEX 7 +#define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_S 0 +#define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PERRFFLMAX 0x005220A0 /* Reset Source: CORER */ +#define GLHMC_PERRFFLMAX_PMPERRFFLMAX_S 0 +#define GLHMC_PERRFFLMAX_PMPERRFFLMAX_M MAKEMASK(0x3FFFFFF, 0) +#define GLHMC_PERRFFLMAX_RSVD_S 26 +#define GLHMC_PERRFFLMAX_RSVD_M MAKEMASK(0x3F, 26) +#define GLHMC_PERRFMAX 0x0052209C /* Reset Source: CORER */ +#define GLHMC_PERRFMAX_PMPERRFMAX_S 0 +#define GLHMC_PERRFMAX_PMPERRFMAX_M MAKEMASK(0xFFFFFFF, 0) +#define GLHMC_PERRFMAX_RSVD_S 28 +#define GLHMC_PERRFMAX_RSVD_M MAKEMASK(0xF, 28) +#define GLHMC_PERRFOBJSZ 0x00522098 /* Reset Source: CORER */ +#define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_S 0 +#define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PERRFOBJSZ_RSVD_S 4 +#define GLHMC_PERRFOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4) +#define GLHMC_PETIMERBASE(_i) (0x00525A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PETIMERBASE_MAX_INDEX 7 +#define GLHMC_PETIMERBASE_FPMPETIMERBASE_S 0 +#define GLHMC_PETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PETIMERCNT(_i) (0x00525B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PETIMERCNT_MAX_INDEX 7 +#define GLHMC_PETIMERCNT_FPMPETIMERCNT_S 0 +#define GLHMC_PETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PETIMERMAX 0x00522084 /* Reset Source: CORER */ +#define GLHMC_PETIMERMAX_PMPETIMERMAX_S 0 +#define GLHMC_PETIMERMAX_PMPETIMERMAX_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PETIMEROBJSZ 0x00522080 /* Reset Source: CORER */ +#define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_S 0 +#define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PEXFBASE(_i) (0x00524E00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEXFBASE_MAX_INDEX 7 +#define GLHMC_PEXFBASE_FPMPEXFBASE_S 0 +#define GLHMC_PEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEXFCNT(_i) (0x00524F00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEXFCNT_MAX_INDEX 7 +#define GLHMC_PEXFCNT_FPMPEXFCNT_S 0 +#define GLHMC_PEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_PEXFFLBASE(_i) (0x00525000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PEXFFLBASE_MAX_INDEX 7 +#define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_S 0 +#define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_PEXFFLMAX 0x0052204C /* Reset Source: CORER */ +#define GLHMC_PEXFFLMAX_PMPEXFFLMAX_S 0 +#define GLHMC_PEXFFLMAX_PMPEXFFLMAX_M MAKEMASK(0xFFFFFFF, 0) +#define GLHMC_PEXFMAX 0x00522048 /* Reset Source: CORER */ +#define GLHMC_PEXFMAX_PMPEXFMAX_S 0 +#define GLHMC_PEXFMAX_PMPEXFMAX_M MAKEMASK(0xFFFFFFF, 0) +#define GLHMC_PEXFOBJSZ 0x00522044 /* Reset Source: CORER */ +#define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_S 0 +#define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_M MAKEMASK(0xF, 0) +#define GLHMC_PFPESDPART(_i) (0x00520880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PFPESDPART_MAX_INDEX 7 +#define GLHMC_PFPESDPART_PMSDBASE_S 0 +#define GLHMC_PFPESDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_PFPESDPART_PMSDSIZE_S 16 +#define GLHMC_PFPESDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLHMC_PFPESDPART_FPMAT(_i) (0x00100880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_PFPESDPART_FPMAT_MAX_INDEX 7 +#define GLHMC_PFPESDPART_FPMAT_PMSDBASE_S 0 +#define GLHMC_PFPESDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_S 16 +#define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLHMC_SDPART(_i) (0x00520800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_SDPART_MAX_INDEX 7 +#define GLHMC_SDPART_PMSDBASE_S 0 +#define GLHMC_SDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_SDPART_PMSDSIZE_S 16 +#define GLHMC_SDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLHMC_SDPART_FPMAT(_i) (0x00100800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLHMC_SDPART_FPMAT_MAX_INDEX 7 +#define GLHMC_SDPART_FPMAT_PMSDBASE_S 0 +#define GLHMC_SDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_SDPART_FPMAT_PMSDSIZE_S 16 +#define GLHMC_SDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLHMC_VFAPBVTINUSEBASE(_i) (0x0052CA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_S 0 +#define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFCEQPART(_i) (0x00502F00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFCEQPART_MAX_INDEX 31 +#define GLHMC_VFCEQPART_PMCEQBASE_S 0 +#define GLHMC_VFCEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0) +#define GLHMC_VFCEQPART_PMCEQSIZE_S 16 +#define GLHMC_VFCEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16) +#define GLHMC_VFDBCQPART(_i) (0x00502E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFDBCQPART_MAX_INDEX 31 +#define GLHMC_VFDBCQPART_PMDBCQBASE_S 0 +#define GLHMC_VFDBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0) +#define GLHMC_VFDBCQPART_PMDBCQSIZE_S 16 +#define GLHMC_VFDBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16) +#define GLHMC_VFDBQPPART(_i) (0x00504520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFDBQPPART_MAX_INDEX 31 +#define GLHMC_VFDBQPPART_PMDBQPBASE_S 0 +#define GLHMC_VFDBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0) +#define GLHMC_VFDBQPPART_PMDBQPSIZE_S 16 +#define GLHMC_VFDBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16) +#define GLHMC_VFFSIAVBASE(_i) (0x0052D600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_S 0 +#define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFFSIAVCNT(_i) (0x0052D700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_S 0 +#define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFFSIMCBASE(_i) (0x0052E000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFFSIMCBASE_MAX_INDEX 31 +#define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_S 0 +#define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFFSIMCCNT(_i) (0x0052E100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFFSIMCCNT_MAX_INDEX 31 +#define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_S 0 +#define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPDINV_MAX_INDEX 31 +#define GLHMC_VFPDINV_PMSDIDX_S 0 +#define GLHMC_VFPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define GLHMC_VFPDINV_PMSDPARTSEL_S 15 +#define GLHMC_VFPDINV_PMSDPARTSEL_M BIT(15) +#define GLHMC_VFPDINV_PMPDIDX_S 16 +#define GLHMC_VFPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define GLHMC_VFPDINV_FPMAT(_i) (0x00108300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPDINV_FPMAT_MAX_INDEX 31 +#define GLHMC_VFPDINV_FPMAT_PMSDIDX_S 0 +#define GLHMC_VFPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_S 15 +#define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_M BIT(15) +#define GLHMC_VFPDINV_FPMAT_PMPDIDX_S 16 +#define GLHMC_VFPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define GLHMC_VFPEARPBASE(_i) (0x0052C800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define GLHMC_VFPEARPBASE_FPMPEARPBASE_S 0 +#define GLHMC_VFPEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEARPCNT(_i) (0x0052C900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define GLHMC_VFPEARPCNT_FPMPEARPCNT_S 0 +#define GLHMC_VFPEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPECQBASE(_i) (0x0052C200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPECQBASE_MAX_INDEX 31 +#define GLHMC_VFPECQBASE_FPMPECQBASE_S 0 +#define GLHMC_VFPECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPECQCNT(_i) (0x0052C300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPECQCNT_MAX_INDEX 31 +#define GLHMC_VFPECQCNT_FPMPECQCNT_S 0 +#define GLHMC_VFPECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEHDRBASE(_i) (0x0052E200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHDRBASE_MAX_INDEX 31 +#define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_S 0 +#define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEHDRCNT(_i) (0x0052E300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHDRCNT_MAX_INDEX 31 +#define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_S 0 +#define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEHTCNT(_i) (0x0052C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define GLHMC_VFPEHTCNT_FPMPEHTCNT_S 0 +#define GLHMC_VFPEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEHTCNT_FPMAT(_i) (0x0010C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHTCNT_FPMAT_MAX_INDEX 31 +#define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_S 0 +#define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEHTEBASE(_i) (0x0052C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_S 0 +#define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEHTEBASE_FPMAT(_i) (0x0010C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEHTEBASE_FPMAT_MAX_INDEX 31 +#define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_S 0 +#define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEMDBASE(_i) (0x0052E400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEMDBASE_MAX_INDEX 31 +#define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_S 0 +#define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEMDCNT(_i) (0x0052E500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEMDCNT_MAX_INDEX 31 +#define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_S 0 +#define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEMRBASE(_i) (0x0052CC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define GLHMC_VFPEMRBASE_FPMPEMRBASE_S 0 +#define GLHMC_VFPEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEMRCNT(_i) (0x0052CD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define GLHMC_VFPEMRCNT_FPMPEMRSZ_S 0 +#define GLHMC_VFPEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEOOISCBASE_MAX_INDEX 31 +#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0 +#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEOOISCCNT_MAX_INDEX 31 +#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0 +#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEOOISCFFLBASE(_i) (0x0052EC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEOOISCFFLBASE_MAX_INDEX 31 +#define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0 +#define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPEPBLBASE(_i) (0x0052D800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_S 0 +#define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEPBLCNT(_i) (0x0052D900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_S 0 +#define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEQ1BASE(_i) (0x0052D200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_S 0 +#define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEQ1CNT(_i) (0x0052D300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_S 0 +#define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEQ1FLBASE(_i) (0x0052D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_S 0 +#define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEQPBASE(_i) (0x0052C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define GLHMC_VFPEQPBASE_FPMPEQPBASE_S 0 +#define GLHMC_VFPEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEQPCNT(_i) (0x0052C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define GLHMC_VFPEQPCNT_FPMPEQPCNT_S 0 +#define GLHMC_VFPEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPERRFBASE(_i) (0x0052E800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPERRFBASE_MAX_INDEX 31 +#define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_S 0 +#define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPERRFCNT(_i) (0x0052E900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPERRFCNT_MAX_INDEX 31 +#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_S 0 +#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPERRFFLBASE_MAX_INDEX 31 +#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0 +#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0 +#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPETIMERCNT(_i) (0x0052DB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_S 0 +#define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEXFBASE(_i) (0x0052CE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define GLHMC_VFPEXFBASE_FPMPEXFBASE_S 0 +#define GLHMC_VFPEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFPEXFCNT(_i) (0x0052CF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define GLHMC_VFPEXFCNT_FPMPEXFCNT_S 0 +#define GLHMC_VFPEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0) +#define GLHMC_VFPEXFFLBASE(_i) (0x0052D000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_S 0 +#define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0) +#define GLHMC_VFSDDATAHIGH(_i) (0x00528200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDDATAHIGH_MAX_INDEX 31 +#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_S 0 +#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31 +#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 +#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDDATALOW_MAX_INDEX 31 +#define GLHMC_VFSDDATALOW_PMSDVALID_S 0 +#define GLHMC_VFSDDATALOW_PMSDVALID_M BIT(0) +#define GLHMC_VFSDDATALOW_PMSDTYPE_S 1 +#define GLHMC_VFSDDATALOW_PMSDTYPE_M BIT(1) +#define GLHMC_VFSDDATALOW_PMSDBPCOUNT_S 2 +#define GLHMC_VFSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define GLHMC_VFSDDATALOW_PMSDDATALOW_S 12 +#define GLHMC_VFSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define GLHMC_VFSDDATALOW_FPMAT(_i) (0x00108100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDDATALOW_FPMAT_MAX_INDEX 31 +#define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_S 0 +#define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_M BIT(0) +#define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_S 1 +#define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_M BIT(1) +#define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_S 2 +#define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_S 12 +#define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define GLHMC_VFSDPART(_i) (0x00528800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDPART_MAX_INDEX 31 +#define GLHMC_VFSDPART_PMSDBASE_S 0 +#define GLHMC_VFSDPART_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_VFSDPART_PMSDSIZE_S 16 +#define GLHMC_VFSDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLHMC_VFSDPART_FPMAT(_i) (0x00108800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLHMC_VFSDPART_FPMAT_MAX_INDEX 31 +#define GLHMC_VFSDPART_FPMAT_PMSDBASE_S 0 +#define GLHMC_VFSDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0) +#define GLHMC_VFSDPART_FPMAT_PMSDSIZE_S 16 +#define GLHMC_VFSDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16) +#define GLMDOC_CACHESIZE 0x0051C06C /* Reset Source: CORER */ +#define GLMDOC_CACHESIZE_WORD_SIZE_S 0 +#define GLMDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLMDOC_CACHESIZE_SETS_S 8 +#define GLMDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLMDOC_CACHESIZE_WAYS_S 20 +#define GLMDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLPBLOC0_CACHESIZE 0x00518074 /* Reset Source: CORER */ +#define GLPBLOC0_CACHESIZE_WORD_SIZE_S 0 +#define GLPBLOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPBLOC0_CACHESIZE_SETS_S 8 +#define GLPBLOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLPBLOC0_CACHESIZE_WAYS_S 20 +#define GLPBLOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLPBLOC1_CACHESIZE 0x0051A074 /* Reset Source: CORER */ +#define GLPBLOC1_CACHESIZE_WORD_SIZE_S 0 +#define GLPBLOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPBLOC1_CACHESIZE_SETS_S 8 +#define GLPBLOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLPBLOC1_CACHESIZE_WAYS_S 20 +#define GLPBLOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLPDOC_CACHESIZE 0x00530048 /* Reset Source: CORER */ +#define GLPDOC_CACHESIZE_WORD_SIZE_S 0 +#define GLPDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPDOC_CACHESIZE_SETS_S 8 +#define GLPDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLPDOC_CACHESIZE_WAYS_S 20 +#define GLPDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLPDOC_CACHESIZE_FPMAT 0x00110088 /* Reset Source: CORER */ +#define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_S 0 +#define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPDOC_CACHESIZE_FPMAT_SETS_S 8 +#define GLPDOC_CACHESIZE_FPMAT_SETS_M MAKEMASK(0xFFF, 8) +#define GLPDOC_CACHESIZE_FPMAT_WAYS_S 20 +#define GLPDOC_CACHESIZE_FPMAT_WAYS_M MAKEMASK(0xF, 20) +#define GLPEOC0_CACHESIZE 0x005140A8 /* Reset Source: CORER */ +#define GLPEOC0_CACHESIZE_WORD_SIZE_S 0 +#define GLPEOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPEOC0_CACHESIZE_SETS_S 8 +#define GLPEOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLPEOC0_CACHESIZE_WAYS_S 20 +#define GLPEOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLPEOC1_CACHESIZE 0x005160A8 /* Reset Source: CORER */ +#define GLPEOC1_CACHESIZE_WORD_SIZE_S 0 +#define GLPEOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLPEOC1_CACHESIZE_SETS_S 8 +#define GLPEOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLPEOC1_CACHESIZE_WAYS_S 20 +#define GLPEOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define PFHMC_ERRORDATA 0x00520500 /* Reset Source: PFR */ +#define PFHMC_ERRORDATA_HMC_ERROR_DATA_S 0 +#define PFHMC_ERRORDATA_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0) +#define PFHMC_ERRORDATA_FPMAT 0x00100500 /* Reset Source: PFR */ +#define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_S 0 +#define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0) +#define PFHMC_ERRORINFO 0x00520400 /* Reset Source: PFR */ +#define PFHMC_ERRORINFO_PMF_INDEX_S 0 +#define PFHMC_ERRORINFO_PMF_INDEX_M MAKEMASK(0x1F, 0) +#define PFHMC_ERRORINFO_PMF_ISVF_S 7 +#define PFHMC_ERRORINFO_PMF_ISVF_M BIT(7) +#define PFHMC_ERRORINFO_HMC_ERROR_TYPE_S 8 +#define PFHMC_ERRORINFO_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8) +#define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S 16 +#define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16) +#define PFHMC_ERRORINFO_ERROR_DETECTED_S 31 +#define PFHMC_ERRORINFO_ERROR_DETECTED_M BIT(31) +#define PFHMC_ERRORINFO_FPMAT 0x00100400 /* Reset Source: PFR */ +#define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_S 0 +#define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_M MAKEMASK(0x1F, 0) +#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_S 7 +#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7) +#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8 +#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8) +#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16 +#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16) +#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31 +#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31) +#define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */ +#define PFHMC_PDINV_PMSDIDX_S 0 +#define PFHMC_PDINV_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define PFHMC_PDINV_PMSDPARTSEL_S 15 +#define PFHMC_PDINV_PMSDPARTSEL_M BIT(15) +#define PFHMC_PDINV_PMPDIDX_S 16 +#define PFHMC_PDINV_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define PFHMC_PDINV_FPMAT 0x00100300 /* Reset Source: PFR */ +#define PFHMC_PDINV_FPMAT_PMSDIDX_S 0 +#define PFHMC_PDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define PFHMC_PDINV_FPMAT_PMSDPARTSEL_S 15 +#define PFHMC_PDINV_FPMAT_PMSDPARTSEL_M BIT(15) +#define PFHMC_PDINV_FPMAT_PMPDIDX_S 16 +#define PFHMC_PDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16) +#define PFHMC_SDCMD 0x00520000 /* Reset Source: PFR */ +#define PFHMC_SDCMD_PMSDIDX_S 0 +#define PFHMC_SDCMD_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define PFHMC_SDCMD_PMSDPARTSEL_S 15 +#define PFHMC_SDCMD_PMSDPARTSEL_M BIT(15) +#define PFHMC_SDCMD_PMSDWR_S 31 +#define PFHMC_SDCMD_PMSDWR_M BIT(31) +#define PFHMC_SDCMD_FPMAT 0x00100000 /* Reset Source: PFR */ +#define PFHMC_SDCMD_FPMAT_PMSDIDX_S 0 +#define PFHMC_SDCMD_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0) +#define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_S 15 +#define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_M BIT(15) +#define PFHMC_SDCMD_FPMAT_PMSDWR_S 31 +#define PFHMC_SDCMD_FPMAT_PMSDWR_M BIT(31) +#define PFHMC_SDDATAHIGH 0x00520200 /* Reset Source: PFR */ +#define PFHMC_SDDATAHIGH_PMSDDATAHIGH_S 0 +#define PFHMC_SDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define PFHMC_SDDATAHIGH_FPMAT 0x00100200 /* Reset Source: PFR */ +#define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0 +#define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define PFHMC_SDDATALOW 0x00520100 /* Reset Source: PFR */ +#define PFHMC_SDDATALOW_PMSDVALID_S 0 +#define PFHMC_SDDATALOW_PMSDVALID_M BIT(0) +#define PFHMC_SDDATALOW_PMSDTYPE_S 1 +#define PFHMC_SDDATALOW_PMSDTYPE_M BIT(1) +#define PFHMC_SDDATALOW_PMSDBPCOUNT_S 2 +#define PFHMC_SDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define PFHMC_SDDATALOW_PMSDDATALOW_S 12 +#define PFHMC_SDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define PFHMC_SDDATALOW_FPMAT 0x00100100 /* Reset Source: PFR */ +#define PFHMC_SDDATALOW_FPMAT_PMSDVALID_S 0 +#define PFHMC_SDDATALOW_FPMAT_PMSDVALID_M BIT(0) +#define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_S 1 +#define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_M BIT(1) +#define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_S 2 +#define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2) +#define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_S 12 +#define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12) +#define GL_DSI_REPC 0x00294208 /* Reset Source: CORER */ +#define GL_DSI_REPC_NO_DESC_CNT_S 0 +#define GL_DSI_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0) +#define GL_DSI_REPC_ERROR_CNT_S 16 +#define GL_DSI_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16) +#define GL_MDCK_TDAT_TCLAN 0x000FC0DC /* Reset Source: CORER */ +#define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_S 0 +#define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_M BIT(0) +#define GL_MDCK_TDAT_TCLAN_UR_S 1 +#define GL_MDCK_TDAT_TCLAN_UR_M BIT(1) +#define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_S 2 +#define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_M BIT(2) +#define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_S 3 +#define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_M BIT(3) +#define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_S 4 +#define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_M BIT(4) +#define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_S 5 +#define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_M BIT(5) +#define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_S 6 +#define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_M BIT(6) +#define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_S 7 +#define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_M BIT(7) +#define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_S 8 +#define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_M BIT(8) +#define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_S 9 +#define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_M BIT(9) +#define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_S 10 +#define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_M BIT(10) +#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_S 11 +#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11) +#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12 +#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12) +#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13 +#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13) +#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14 +#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14) +#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15 +#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_M BIT(15) +#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_S 16 +#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_M BIT(16) +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_S 17 +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_M BIT(17) +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_S 18 +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_M BIT(18) +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_S 19 +#define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_M BIT(19) +#define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_S 20 +#define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_M BIT(20) +#define GLCORE_CLKCTL_H 0x000B81E8 /* Reset Source: POR */ +#define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_S 0 +#define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_M MAKEMASK(0x3, 0) +#define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_S 2 +#define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_M MAKEMASK(0x3, 2) +#define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_S 4 +#define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_M MAKEMASK(0x3, 4) +#define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_S 6 +#define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_M MAKEMASK(0x3, 6) +#define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_S 8 +#define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_M MAKEMASK(0x7, 8) +#define GLCORE_CLKCTL_L 0x000B8254 /* Reset Source: POR */ +#define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_S 0 +#define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_M MAKEMASK(0x3, 0) +#define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_S 2 +#define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_M MAKEMASK(0x3, 2) +#define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_S 4 +#define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_M MAKEMASK(0x3, 4) +#define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_S 6 +#define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_M MAKEMASK(0x3, 6) +#define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_S 8 +#define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_M MAKEMASK(0x7, 8) +#define GLCORE_CLKCTL_M 0x000B8258 /* Reset Source: POR */ +#define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_S 0 +#define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_M MAKEMASK(0x3, 0) +#define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_S 2 +#define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_M MAKEMASK(0x3, 2) +#define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_S 4 +#define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_M MAKEMASK(0x3, 4) +#define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_S 6 +#define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_M MAKEMASK(0x3, 6) +#define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_S 8 +#define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_M MAKEMASK(0x7, 8) +#define GLFOC_CACHESIZE 0x000AA074 /* Reset Source: CORER */ +#define GLFOC_CACHESIZE_WORD_SIZE_S 0 +#define GLFOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0) +#define GLFOC_CACHESIZE_SETS_S 8 +#define GLFOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8) +#define GLFOC_CACHESIZE_WAYS_S 20 +#define GLFOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20) +#define GLMAC_CLKSTAT 0x000B8210 /* Reset Source: POR */ +#define GLMAC_CLKSTAT_P0_CLK_SPEED_S 0 +#define GLMAC_CLKSTAT_P0_CLK_SPEED_M MAKEMASK(0xF, 0) +#define GLMAC_CLKSTAT_P1_CLK_SPEED_S 4 +#define GLMAC_CLKSTAT_P1_CLK_SPEED_M MAKEMASK(0xF, 4) +#define GLMAC_CLKSTAT_P2_CLK_SPEED_S 8 +#define GLMAC_CLKSTAT_P2_CLK_SPEED_M MAKEMASK(0xF, 8) +#define GLMAC_CLKSTAT_P3_CLK_SPEED_S 12 +#define GLMAC_CLKSTAT_P3_CLK_SPEED_M MAKEMASK(0xF, 12) +#define GLMAC_CLKSTAT_P4_CLK_SPEED_S 16 +#define GLMAC_CLKSTAT_P4_CLK_SPEED_M MAKEMASK(0xF, 16) +#define GLMAC_CLKSTAT_P5_CLK_SPEED_S 20 +#define GLMAC_CLKSTAT_P5_CLK_SPEED_M MAKEMASK(0xF, 20) +#define GLMAC_CLKSTAT_P6_CLK_SPEED_S 24 +#define GLMAC_CLKSTAT_P6_CLK_SPEED_M MAKEMASK(0xF, 24) +#define GLMAC_CLKSTAT_P7_CLK_SPEED_S 28 +#define GLMAC_CLKSTAT_P7_CLK_SPEED_M MAKEMASK(0xF, 28) +#define GLTPB_100G_MAC_FC_THRESH 0x00099510 /* Reset Source: CORER */ +#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_S 0 +#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0) +#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_S 16 +#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16) +#define GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */ +#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0 +#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0) +#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16 +#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16) +#define GLTPB_PACING_10G 0x000994E4 /* Reset Source: CORER */ +#define GLTPB_PACING_10G_N_S 0 +#define GLTPB_PACING_10G_N_M MAKEMASK(0xFF, 0) +#define GLTPB_PACING_10G_K_S 8 +#define GLTPB_PACING_10G_K_M MAKEMASK(0xFF, 8) +#define GLTPB_PACING_10G_S_S 16 +#define GLTPB_PACING_10G_S_M MAKEMASK(0x1FF, 16) +#define GLTPB_PACING_25G 0x000994E0 /* Reset Source: CORER */ +#define GLTPB_PACING_25G_N_S 0 +#define GLTPB_PACING_25G_N_M MAKEMASK(0xFF, 0) +#define GLTPB_PACING_25G_K_S 8 +#define GLTPB_PACING_25G_K_M MAKEMASK(0xFF, 8) +#define GLTPB_PACING_25G_S_S 16 +#define GLTPB_PACING_25G_S_M MAKEMASK(0x1FF, 16) +#define GLTPB_PORT_PACING_SPEED 0x000994E8 /* Reset Source: CORER */ +#define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_S 0 +#define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_M BIT(0) +#define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_S 1 +#define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_M BIT(1) +#define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_S 2 +#define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_M BIT(2) +#define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_S 3 +#define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_M BIT(3) +#define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_S 4 +#define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_M BIT(4) +#define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_S 5 +#define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_M BIT(5) +#define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_S 6 +#define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_M BIT(6) +#define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_S 7 +#define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_M BIT(7) +#define TPB_CFG_SCHEDULED_BC_THRESHOLD 0x00099494 /* Reset Source: CORER */ +#define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_S 0 +#define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_M MAKEMASK(0x7FFF, 0) +#define GL_UFUSE_SOC 0x000A400C /* Reset Source: POR */ +#define GL_UFUSE_SOC_PORT_MODE_S 0 +#define GL_UFUSE_SOC_PORT_MODE_M MAKEMASK(0x3, 0) +#define GL_UFUSE_SOC_BANDWIDTH_S 2 +#define GL_UFUSE_SOC_BANDWIDTH_M MAKEMASK(0x3, 2) +#define GL_UFUSE_SOC_PE_DISABLE_S 4 +#define GL_UFUSE_SOC_PE_DISABLE_M BIT(4) +#define GL_UFUSE_SOC_SWITCH_MODE_S 5 +#define GL_UFUSE_SOC_SWITCH_MODE_M BIT(5) +#define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_S 6 +#define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_M BIT(6) +#define GL_UFUSE_SOC_SERIAL_50G_S 7 +#define GL_UFUSE_SOC_SERIAL_50G_M BIT(7) +#define GL_UFUSE_SOC_NIC_ID_S 8 +#define GL_UFUSE_SOC_NIC_ID_M BIT(8) +#define GL_UFUSE_SOC_BLOCK_BME_TO_FW_S 9 +#define GL_UFUSE_SOC_BLOCK_BME_TO_FW_M BIT(9) +#define GL_UFUSE_SOC_SOC_TYPE_S 10 +#define GL_UFUSE_SOC_SOC_TYPE_M BIT(10) +#define GL_UFUSE_SOC_BTS_MODE_S 11 +#define GL_UFUSE_SOC_BTS_MODE_M BIT(11) +#define GL_UFUSE_SOC_SPARE_FUSES_S 12 +#define GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12) +#define EMPINT_GPIO_ENA 0x000880C0 /* Reset Source: POR */ +#define EMPINT_GPIO_ENA_GPIO0_ENA_S 0 +#define EMPINT_GPIO_ENA_GPIO0_ENA_M BIT(0) +#define EMPINT_GPIO_ENA_GPIO1_ENA_S 1 +#define EMPINT_GPIO_ENA_GPIO1_ENA_M BIT(1) +#define EMPINT_GPIO_ENA_GPIO2_ENA_S 2 +#define EMPINT_GPIO_ENA_GPIO2_ENA_M BIT(2) +#define EMPINT_GPIO_ENA_GPIO3_ENA_S 3 +#define EMPINT_GPIO_ENA_GPIO3_ENA_M BIT(3) +#define EMPINT_GPIO_ENA_GPIO4_ENA_S 4 +#define EMPINT_GPIO_ENA_GPIO4_ENA_M BIT(4) +#define EMPINT_GPIO_ENA_GPIO5_ENA_S 5 +#define EMPINT_GPIO_ENA_GPIO5_ENA_M BIT(5) +#define EMPINT_GPIO_ENA_GPIO6_ENA_S 6 +#define EMPINT_GPIO_ENA_GPIO6_ENA_M BIT(6) +#define GLGEN_MAC_LINK_TOPO 0x000B81DC /* Reset Source: GLOBR */ +#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_S 0 +#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M MAKEMASK(0x3, 0) +#define GLINT_CEQCTL(_INT) (0x0015C000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define GLINT_CEQCTL_MAX_INDEX 2047 +#define GLINT_CEQCTL_MSIX_INDX_S 0 +#define GLINT_CEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define GLINT_CEQCTL_ITR_INDX_S 11 +#define GLINT_CEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define GLINT_CEQCTL_CAUSE_ENA_S 30 +#define GLINT_CEQCTL_CAUSE_ENA_M BIT(30) +#define GLINT_CEQCTL_INTEVENT_S 31 +#define GLINT_CEQCTL_INTEVENT_M BIT(31) +#define GLINT_CTL 0x0016CC54 /* Reset Source: CORER */ +#define GLINT_CTL_DIS_AUTOMASK_S 0 +#define GLINT_CTL_DIS_AUTOMASK_M BIT(0) +#define GLINT_CTL_RSVD_S 1 +#define GLINT_CTL_RSVD_M MAKEMASK(0x7FFF, 1) +#define GLINT_CTL_ITR_GRAN_200_S 16 +#define GLINT_CTL_ITR_GRAN_200_M MAKEMASK(0xF, 16) +#define GLINT_CTL_ITR_GRAN_100_S 20 +#define GLINT_CTL_ITR_GRAN_100_M MAKEMASK(0xF, 20) +#define GLINT_CTL_ITR_GRAN_50_S 24 +#define GLINT_CTL_ITR_GRAN_50_M MAKEMASK(0xF, 24) +#define GLINT_CTL_ITR_GRAN_25_S 28 +#define GLINT_CTL_ITR_GRAN_25_M MAKEMASK(0xF, 28) +#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define GLINT_DYN_CTL_MAX_INDEX 2047 +#define GLINT_DYN_CTL_INTENA_S 0 +#define GLINT_DYN_CTL_INTENA_M BIT(0) +#define GLINT_DYN_CTL_CLEARPBA_S 1 +#define GLINT_DYN_CTL_CLEARPBA_M BIT(1) +#define GLINT_DYN_CTL_SWINT_TRIG_S 2 +#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) +#define GLINT_DYN_CTL_INTERVAL_S 5 +#define GLINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) +#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 +#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) +#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 +#define GLINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) +#define GLINT_DYN_CTL_WB_ON_ITR_S 30 +#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) +#define GLINT_DYN_CTL_INTENA_MSK_S 31 +#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) +#define GLINT_FW_TOOL_CTL 0x0016C840 /* Reset Source: CORER */ +#define GLINT_FW_TOOL_CTL_MSIX_INDX_S 0 +#define GLINT_FW_TOOL_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define GLINT_FW_TOOL_CTL_ITR_INDX_S 11 +#define GLINT_FW_TOOL_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define GLINT_FW_TOOL_CTL_CAUSE_ENA_S 30 +#define GLINT_FW_TOOL_CTL_CAUSE_ENA_M BIT(30) +#define GLINT_FW_TOOL_CTL_INTEVENT_S 31 +#define GLINT_FW_TOOL_CTL_INTEVENT_M BIT(31) +#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) /* _i=0...2, _INT=0...2047 */ /* Reset Source: CORER */ +#define GLINT_ITR_MAX_INDEX 2 +#define GLINT_ITR_INTERVAL_S 0 +#define GLINT_ITR_INTERVAL_M MAKEMASK(0xFFF, 0) +#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define GLINT_RATE_MAX_INDEX 2047 +#define GLINT_RATE_INTERVAL_S 0 +#define GLINT_RATE_INTERVAL_M MAKEMASK(0x3F, 0) +#define GLINT_RATE_INTRL_ENA_S 6 +#define GLINT_RATE_INTRL_ENA_M BIT(6) +#define GLINT_TSYN_PFMSTR(_i) (0x0016CCC0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLINT_TSYN_PFMSTR_MAX_INDEX 1 +#define GLINT_TSYN_PFMSTR_PF_MASTER_S 0 +#define GLINT_TSYN_PFMSTR_PF_MASTER_M MAKEMASK(0x7, 0) +#define GLINT_TSYN_PHY 0x0016CC50 /* Reset Source: CORER */ +#define GLINT_TSYN_PHY_PHY_INDX_S 0 +#define GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0) +#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define GLINT_VECT2FUNC_MAX_INDEX 2047 +#define GLINT_VECT2FUNC_VF_NUM_S 0 +#define GLINT_VECT2FUNC_VF_NUM_M MAKEMASK(0xFF, 0) +#define GLINT_VECT2FUNC_PF_NUM_S 12 +#define GLINT_VECT2FUNC_PF_NUM_M MAKEMASK(0x7, 12) +#define GLINT_VECT2FUNC_IS_PF_S 16 +#define GLINT_VECT2FUNC_IS_PF_M BIT(16) +#define PF0INT_FW_HLP_CTL 0x0016C844 /* Reset Source: CORER */ +#define PF0INT_FW_HLP_CTL_MSIX_INDX_S 0 +#define PF0INT_FW_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_FW_HLP_CTL_ITR_INDX_S 11 +#define PF0INT_FW_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_FW_HLP_CTL_CAUSE_ENA_S 30 +#define PF0INT_FW_HLP_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_FW_HLP_CTL_INTEVENT_S 31 +#define PF0INT_FW_HLP_CTL_INTEVENT_M BIT(31) +#define PF0INT_FW_PSM_CTL 0x0016C848 /* Reset Source: CORER */ +#define PF0INT_FW_PSM_CTL_MSIX_INDX_S 0 +#define PF0INT_FW_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_FW_PSM_CTL_ITR_INDX_S 11 +#define PF0INT_FW_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_FW_PSM_CTL_CAUSE_ENA_S 30 +#define PF0INT_FW_PSM_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_FW_PSM_CTL_INTEVENT_S 31 +#define PF0INT_FW_PSM_CTL_INTEVENT_M BIT(31) +#define PF0INT_MBX_CPM_CTL 0x0016B2C0 /* Reset Source: CORER */ +#define PF0INT_MBX_CPM_CTL_MSIX_INDX_S 0 +#define PF0INT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_MBX_CPM_CTL_ITR_INDX_S 11 +#define PF0INT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_MBX_CPM_CTL_CAUSE_ENA_S 30 +#define PF0INT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_MBX_CPM_CTL_INTEVENT_S 31 +#define PF0INT_MBX_CPM_CTL_INTEVENT_M BIT(31) +#define PF0INT_MBX_HLP_CTL 0x0016B2C4 /* Reset Source: CORER */ +#define PF0INT_MBX_HLP_CTL_MSIX_INDX_S 0 +#define PF0INT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_MBX_HLP_CTL_ITR_INDX_S 11 +#define PF0INT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_MBX_HLP_CTL_CAUSE_ENA_S 30 +#define PF0INT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_MBX_HLP_CTL_INTEVENT_S 31 +#define PF0INT_MBX_HLP_CTL_INTEVENT_M BIT(31) +#define PF0INT_MBX_PSM_CTL 0x0016B2C8 /* Reset Source: CORER */ +#define PF0INT_MBX_PSM_CTL_MSIX_INDX_S 0 +#define PF0INT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_MBX_PSM_CTL_ITR_INDX_S 11 +#define PF0INT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_MBX_PSM_CTL_CAUSE_ENA_S 30 +#define PF0INT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_MBX_PSM_CTL_INTEVENT_S 31 +#define PF0INT_MBX_PSM_CTL_INTEVENT_M BIT(31) +#define PF0INT_OICR_CPM 0x0016CC40 /* Reset Source: CORER */ +#define PF0INT_OICR_CPM_INTEVENT_S 0 +#define PF0INT_OICR_CPM_INTEVENT_M BIT(0) +#define PF0INT_OICR_CPM_QUEUE_S 1 +#define PF0INT_OICR_CPM_QUEUE_M BIT(1) +#define PF0INT_OICR_CPM_RSV1_S 2 +#define PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_CPM_HH_COMP_S 10 +#define PF0INT_OICR_CPM_HH_COMP_M BIT(10) +#define PF0INT_OICR_CPM_TSYN_TX_S 11 +#define PF0INT_OICR_CPM_TSYN_TX_M BIT(11) +#define PF0INT_OICR_CPM_TSYN_EVNT_S 12 +#define PF0INT_OICR_CPM_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_CPM_TSYN_TGT_S 13 +#define PF0INT_OICR_CPM_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_CPM_HLP_RDY_S 14 +#define PF0INT_OICR_CPM_HLP_RDY_M BIT(14) +#define PF0INT_OICR_CPM_CPM_RDY_S 15 +#define PF0INT_OICR_CPM_CPM_RDY_M BIT(15) +#define PF0INT_OICR_CPM_ECC_ERR_S 16 +#define PF0INT_OICR_CPM_ECC_ERR_M BIT(16) +#define PF0INT_OICR_CPM_RSV2_S 17 +#define PF0INT_OICR_CPM_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_CPM_MAL_DETECT_S 19 +#define PF0INT_OICR_CPM_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_CPM_GRST_S 20 +#define PF0INT_OICR_CPM_GRST_M BIT(20) +#define PF0INT_OICR_CPM_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_CPM_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_CPM_GPIO_S 22 +#define PF0INT_OICR_CPM_GPIO_M BIT(22) +#define PF0INT_OICR_CPM_RSV3_S 23 +#define PF0INT_OICR_CPM_RSV3_M BIT(23) +#define PF0INT_OICR_CPM_STORM_DETECT_S 24 +#define PF0INT_OICR_CPM_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_CPM_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_CPM_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_CPM_HMC_ERR_S 26 +#define PF0INT_OICR_CPM_HMC_ERR_M BIT(26) +#define PF0INT_OICR_CPM_PE_PUSH_S 27 +#define PF0INT_OICR_CPM_PE_PUSH_M BIT(27) +#define PF0INT_OICR_CPM_PE_CRITERR_S 28 +#define PF0INT_OICR_CPM_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_CPM_VFLR_S 29 +#define PF0INT_OICR_CPM_VFLR_M BIT(29) +#define PF0INT_OICR_CPM_XLR_HW_DONE_S 30 +#define PF0INT_OICR_CPM_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_CPM_SWINT_S 31 +#define PF0INT_OICR_CPM_SWINT_M BIT(31) +#define PF0INT_OICR_CTL_CPM 0x0016CC48 /* Reset Source: CORER */ +#define PF0INT_OICR_CTL_CPM_MSIX_INDX_S 0 +#define PF0INT_OICR_CTL_CPM_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_OICR_CTL_CPM_ITR_INDX_S 11 +#define PF0INT_OICR_CTL_CPM_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_OICR_CTL_CPM_CAUSE_ENA_S 30 +#define PF0INT_OICR_CTL_CPM_CAUSE_ENA_M BIT(30) +#define PF0INT_OICR_CTL_CPM_INTEVENT_S 31 +#define PF0INT_OICR_CTL_CPM_INTEVENT_M BIT(31) +#define PF0INT_OICR_CTL_HLP 0x0016CC5C /* Reset Source: CORER */ +#define PF0INT_OICR_CTL_HLP_MSIX_INDX_S 0 +#define PF0INT_OICR_CTL_HLP_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_OICR_CTL_HLP_ITR_INDX_S 11 +#define PF0INT_OICR_CTL_HLP_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_OICR_CTL_HLP_CAUSE_ENA_S 30 +#define PF0INT_OICR_CTL_HLP_CAUSE_ENA_M BIT(30) +#define PF0INT_OICR_CTL_HLP_INTEVENT_S 31 +#define PF0INT_OICR_CTL_HLP_INTEVENT_M BIT(31) +#define PF0INT_OICR_CTL_PSM 0x0016CC64 /* Reset Source: CORER */ +#define PF0INT_OICR_CTL_PSM_MSIX_INDX_S 0 +#define PF0INT_OICR_CTL_PSM_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_OICR_CTL_PSM_ITR_INDX_S 11 +#define PF0INT_OICR_CTL_PSM_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_OICR_CTL_PSM_CAUSE_ENA_S 30 +#define PF0INT_OICR_CTL_PSM_CAUSE_ENA_M BIT(30) +#define PF0INT_OICR_CTL_PSM_INTEVENT_S 31 +#define PF0INT_OICR_CTL_PSM_INTEVENT_M BIT(31) +#define PF0INT_OICR_ENA_CPM 0x0016CC60 /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_CPM_RSV0_S 0 +#define PF0INT_OICR_ENA_CPM_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_CPM_INT_ENA_S 1 +#define PF0INT_OICR_ENA_CPM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_ENA_HLP 0x0016CC4C /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_HLP_RSV0_S 0 +#define PF0INT_OICR_ENA_HLP_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_HLP_INT_ENA_S 1 +#define PF0INT_OICR_ENA_HLP_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_ENA_PSM 0x0016CC58 /* Reset Source: CORER */ +#define PF0INT_OICR_ENA_PSM_RSV0_S 0 +#define PF0INT_OICR_ENA_PSM_RSV0_M BIT(0) +#define PF0INT_OICR_ENA_PSM_INT_ENA_S 1 +#define PF0INT_OICR_ENA_PSM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PF0INT_OICR_HLP 0x0016CC68 /* Reset Source: CORER */ +#define PF0INT_OICR_HLP_INTEVENT_S 0 +#define PF0INT_OICR_HLP_INTEVENT_M BIT(0) +#define PF0INT_OICR_HLP_QUEUE_S 1 +#define PF0INT_OICR_HLP_QUEUE_M BIT(1) +#define PF0INT_OICR_HLP_RSV1_S 2 +#define PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_HLP_HH_COMP_S 10 +#define PF0INT_OICR_HLP_HH_COMP_M BIT(10) +#define PF0INT_OICR_HLP_TSYN_TX_S 11 +#define PF0INT_OICR_HLP_TSYN_TX_M BIT(11) +#define PF0INT_OICR_HLP_TSYN_EVNT_S 12 +#define PF0INT_OICR_HLP_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_HLP_TSYN_TGT_S 13 +#define PF0INT_OICR_HLP_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_HLP_HLP_RDY_S 14 +#define PF0INT_OICR_HLP_HLP_RDY_M BIT(14) +#define PF0INT_OICR_HLP_CPM_RDY_S 15 +#define PF0INT_OICR_HLP_CPM_RDY_M BIT(15) +#define PF0INT_OICR_HLP_ECC_ERR_S 16 +#define PF0INT_OICR_HLP_ECC_ERR_M BIT(16) +#define PF0INT_OICR_HLP_RSV2_S 17 +#define PF0INT_OICR_HLP_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_HLP_MAL_DETECT_S 19 +#define PF0INT_OICR_HLP_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_HLP_GRST_S 20 +#define PF0INT_OICR_HLP_GRST_M BIT(20) +#define PF0INT_OICR_HLP_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_HLP_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_HLP_GPIO_S 22 +#define PF0INT_OICR_HLP_GPIO_M BIT(22) +#define PF0INT_OICR_HLP_RSV3_S 23 +#define PF0INT_OICR_HLP_RSV3_M BIT(23) +#define PF0INT_OICR_HLP_STORM_DETECT_S 24 +#define PF0INT_OICR_HLP_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_HLP_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_HLP_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_HLP_HMC_ERR_S 26 +#define PF0INT_OICR_HLP_HMC_ERR_M BIT(26) +#define PF0INT_OICR_HLP_PE_PUSH_S 27 +#define PF0INT_OICR_HLP_PE_PUSH_M BIT(27) +#define PF0INT_OICR_HLP_PE_CRITERR_S 28 +#define PF0INT_OICR_HLP_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_HLP_VFLR_S 29 +#define PF0INT_OICR_HLP_VFLR_M BIT(29) +#define PF0INT_OICR_HLP_XLR_HW_DONE_S 30 +#define PF0INT_OICR_HLP_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_HLP_SWINT_S 31 +#define PF0INT_OICR_HLP_SWINT_M BIT(31) +#define PF0INT_OICR_PSM 0x0016CC44 /* Reset Source: CORER */ +#define PF0INT_OICR_PSM_INTEVENT_S 0 +#define PF0INT_OICR_PSM_INTEVENT_M BIT(0) +#define PF0INT_OICR_PSM_QUEUE_S 1 +#define PF0INT_OICR_PSM_QUEUE_M BIT(1) +#define PF0INT_OICR_PSM_RSV1_S 2 +#define PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2) +#define PF0INT_OICR_PSM_HH_COMP_S 10 +#define PF0INT_OICR_PSM_HH_COMP_M BIT(10) +#define PF0INT_OICR_PSM_TSYN_TX_S 11 +#define PF0INT_OICR_PSM_TSYN_TX_M BIT(11) +#define PF0INT_OICR_PSM_TSYN_EVNT_S 12 +#define PF0INT_OICR_PSM_TSYN_EVNT_M BIT(12) +#define PF0INT_OICR_PSM_TSYN_TGT_S 13 +#define PF0INT_OICR_PSM_TSYN_TGT_M BIT(13) +#define PF0INT_OICR_PSM_HLP_RDY_S 14 +#define PF0INT_OICR_PSM_HLP_RDY_M BIT(14) +#define PF0INT_OICR_PSM_CPM_RDY_S 15 +#define PF0INT_OICR_PSM_CPM_RDY_M BIT(15) +#define PF0INT_OICR_PSM_ECC_ERR_S 16 +#define PF0INT_OICR_PSM_ECC_ERR_M BIT(16) +#define PF0INT_OICR_PSM_RSV2_S 17 +#define PF0INT_OICR_PSM_RSV2_M MAKEMASK(0x3, 17) +#define PF0INT_OICR_PSM_MAL_DETECT_S 19 +#define PF0INT_OICR_PSM_MAL_DETECT_M BIT(19) +#define PF0INT_OICR_PSM_GRST_S 20 +#define PF0INT_OICR_PSM_GRST_M BIT(20) +#define PF0INT_OICR_PSM_PCI_EXCEPTION_S 21 +#define PF0INT_OICR_PSM_PCI_EXCEPTION_M BIT(21) +#define PF0INT_OICR_PSM_GPIO_S 22 +#define PF0INT_OICR_PSM_GPIO_M BIT(22) +#define PF0INT_OICR_PSM_RSV3_S 23 +#define PF0INT_OICR_PSM_RSV3_M BIT(23) +#define PF0INT_OICR_PSM_STORM_DETECT_S 24 +#define PF0INT_OICR_PSM_STORM_DETECT_M BIT(24) +#define PF0INT_OICR_PSM_LINK_STAT_CHANGE_S 25 +#define PF0INT_OICR_PSM_LINK_STAT_CHANGE_M BIT(25) +#define PF0INT_OICR_PSM_HMC_ERR_S 26 +#define PF0INT_OICR_PSM_HMC_ERR_M BIT(26) +#define PF0INT_OICR_PSM_PE_PUSH_S 27 +#define PF0INT_OICR_PSM_PE_PUSH_M BIT(27) +#define PF0INT_OICR_PSM_PE_CRITERR_S 28 +#define PF0INT_OICR_PSM_PE_CRITERR_M BIT(28) +#define PF0INT_OICR_PSM_VFLR_S 29 +#define PF0INT_OICR_PSM_VFLR_M BIT(29) +#define PF0INT_OICR_PSM_XLR_HW_DONE_S 30 +#define PF0INT_OICR_PSM_XLR_HW_DONE_M BIT(30) +#define PF0INT_OICR_PSM_SWINT_S 31 +#define PF0INT_OICR_PSM_SWINT_M BIT(31) +#define PF0INT_SB_CPM_CTL 0x0016B2CC /* Reset Source: CORER */ +#define PF0INT_SB_CPM_CTL_MSIX_INDX_S 0 +#define PF0INT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_SB_CPM_CTL_ITR_INDX_S 11 +#define PF0INT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_SB_CPM_CTL_CAUSE_ENA_S 30 +#define PF0INT_SB_CPM_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_SB_CPM_CTL_INTEVENT_S 31 +#define PF0INT_SB_CPM_CTL_INTEVENT_M BIT(31) +#define PF0INT_SB_HLP_CTL 0x0016B640 /* Reset Source: CORER */ +#define PF0INT_SB_HLP_CTL_MSIX_INDX_S 0 +#define PF0INT_SB_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PF0INT_SB_HLP_CTL_ITR_INDX_S 11 +#define PF0INT_SB_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PF0INT_SB_HLP_CTL_CAUSE_ENA_S 30 +#define PF0INT_SB_HLP_CTL_CAUSE_ENA_M BIT(30) +#define PF0INT_SB_HLP_CTL_INTEVENT_S 31 +#define PF0INT_SB_HLP_CTL_INTEVENT_M BIT(31) +#define PFINT_AEQCTL 0x0016CB00 /* Reset Source: CORER */ +#define PFINT_AEQCTL_MSIX_INDX_S 0 +#define PFINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PFINT_AEQCTL_ITR_INDX_S 11 +#define PFINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PFINT_AEQCTL_CAUSE_ENA_S 30 +#define PFINT_AEQCTL_CAUSE_ENA_M BIT(30) +#define PFINT_AEQCTL_INTEVENT_S 31 +#define PFINT_AEQCTL_INTEVENT_M BIT(31) +#define PFINT_ALLOC 0x001D2600 /* Reset Source: CORER */ +#define PFINT_ALLOC_FIRST_S 0 +#define PFINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0) +#define PFINT_ALLOC_LAST_S 12 +#define PFINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12) +#define PFINT_ALLOC_VALID_S 31 +#define PFINT_ALLOC_VALID_M BIT(31) +#define PFINT_ALLOC_PCI 0x0009D800 /* Reset Source: PCIR */ +#define PFINT_ALLOC_PCI_FIRST_S 0 +#define PFINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0) +#define PFINT_ALLOC_PCI_LAST_S 12 +#define PFINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12) +#define PFINT_ALLOC_PCI_VALID_S 31 +#define PFINT_ALLOC_PCI_VALID_M BIT(31) +#define PFINT_FW_CTL 0x0016C800 /* Reset Source: CORER */ +#define PFINT_FW_CTL_MSIX_INDX_S 0 +#define PFINT_FW_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PFINT_FW_CTL_ITR_INDX_S 11 +#define PFINT_FW_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PFINT_FW_CTL_CAUSE_ENA_S 30 +#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_FW_CTL_INTEVENT_S 31 +#define PFINT_FW_CTL_INTEVENT_M BIT(31) +#define PFINT_GPIO_ENA 0x00088080 /* Reset Source: CORER */ +#define PFINT_GPIO_ENA_GPIO0_ENA_S 0 +#define PFINT_GPIO_ENA_GPIO0_ENA_M BIT(0) +#define PFINT_GPIO_ENA_GPIO1_ENA_S 1 +#define PFINT_GPIO_ENA_GPIO1_ENA_M BIT(1) +#define PFINT_GPIO_ENA_GPIO2_ENA_S 2 +#define PFINT_GPIO_ENA_GPIO2_ENA_M BIT(2) +#define PFINT_GPIO_ENA_GPIO3_ENA_S 3 +#define PFINT_GPIO_ENA_GPIO3_ENA_M BIT(3) +#define PFINT_GPIO_ENA_GPIO4_ENA_S 4 +#define PFINT_GPIO_ENA_GPIO4_ENA_M BIT(4) +#define PFINT_GPIO_ENA_GPIO5_ENA_S 5 +#define PFINT_GPIO_ENA_GPIO5_ENA_M BIT(5) +#define PFINT_GPIO_ENA_GPIO6_ENA_S 6 +#define PFINT_GPIO_ENA_GPIO6_ENA_M BIT(6) +#define PFINT_MBX_CTL 0x0016B280 /* Reset Source: CORER */ +#define PFINT_MBX_CTL_MSIX_INDX_S 0 +#define PFINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PFINT_MBX_CTL_ITR_INDX_S 11 +#define PFINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PFINT_MBX_CTL_CAUSE_ENA_S 30 +#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_MBX_CTL_INTEVENT_S 31 +#define PFINT_MBX_CTL_INTEVENT_M BIT(31) +#define PFINT_OICR 0x0016CA00 /* Reset Source: CORER */ +#define PFINT_OICR_INTEVENT_S 0 +#define PFINT_OICR_INTEVENT_M BIT(0) +#define PFINT_OICR_QUEUE_S 1 +#define PFINT_OICR_QUEUE_M BIT(1) +#define PFINT_OICR_RSV1_S 2 +#define PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2) +#define PFINT_OICR_HH_COMP_S 10 +#define PFINT_OICR_HH_COMP_M BIT(10) +#define PFINT_OICR_TSYN_TX_S 11 +#define PFINT_OICR_TSYN_TX_M BIT(11) +#define PFINT_OICR_TSYN_EVNT_S 12 +#define PFINT_OICR_TSYN_EVNT_M BIT(12) +#define PFINT_OICR_TSYN_TGT_S 13 +#define PFINT_OICR_TSYN_TGT_M BIT(13) +#define PFINT_OICR_HLP_RDY_S 14 +#define PFINT_OICR_HLP_RDY_M BIT(14) +#define PFINT_OICR_CPM_RDY_S 15 +#define PFINT_OICR_CPM_RDY_M BIT(15) +#define PFINT_OICR_ECC_ERR_S 16 +#define PFINT_OICR_ECC_ERR_M BIT(16) +#define PFINT_OICR_RSV2_S 17 +#define PFINT_OICR_RSV2_M MAKEMASK(0x3, 17) +#define PFINT_OICR_MAL_DETECT_S 19 +#define PFINT_OICR_MAL_DETECT_M BIT(19) +#define PFINT_OICR_GRST_S 20 +#define PFINT_OICR_GRST_M BIT(20) +#define PFINT_OICR_PCI_EXCEPTION_S 21 +#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) +#define PFINT_OICR_GPIO_S 22 +#define PFINT_OICR_GPIO_M BIT(22) +#define PFINT_OICR_RSV3_S 23 +#define PFINT_OICR_RSV3_M BIT(23) +#define PFINT_OICR_STORM_DETECT_S 24 +#define PFINT_OICR_STORM_DETECT_M BIT(24) +#define PFINT_OICR_LINK_STAT_CHANGE_S 25 +#define PFINT_OICR_LINK_STAT_CHANGE_M BIT(25) +#define PFINT_OICR_HMC_ERR_S 26 +#define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_PUSH_S 27 +#define PFINT_OICR_PE_PUSH_M BIT(27) +#define PFINT_OICR_PE_CRITERR_S 28 +#define PFINT_OICR_PE_CRITERR_M BIT(28) +#define PFINT_OICR_VFLR_S 29 +#define PFINT_OICR_VFLR_M BIT(29) +#define PFINT_OICR_XLR_HW_DONE_S 30 +#define PFINT_OICR_XLR_HW_DONE_M BIT(30) +#define PFINT_OICR_SWINT_S 31 +#define PFINT_OICR_SWINT_M BIT(31) +#define PFINT_OICR_CTL 0x0016CA80 /* Reset Source: CORER */ +#define PFINT_OICR_CTL_MSIX_INDX_S 0 +#define PFINT_OICR_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PFINT_OICR_CTL_ITR_INDX_S 11 +#define PFINT_OICR_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PFINT_OICR_CTL_CAUSE_ENA_S 30 +#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR_CTL_INTEVENT_S 31 +#define PFINT_OICR_CTL_INTEVENT_M BIT(31) +#define PFINT_OICR_ENA 0x0016C900 /* Reset Source: CORER */ +#define PFINT_OICR_ENA_RSV0_S 0 +#define PFINT_OICR_ENA_RSV0_M BIT(0) +#define PFINT_OICR_ENA_INT_ENA_S 1 +#define PFINT_OICR_ENA_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) +#define PFINT_SB_CTL 0x0016B600 /* Reset Source: CORER */ +#define PFINT_SB_CTL_MSIX_INDX_S 0 +#define PFINT_SB_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define PFINT_SB_CTL_ITR_INDX_S 11 +#define PFINT_SB_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define PFINT_SB_CTL_CAUSE_ENA_S 30 +#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_SB_CTL_INTEVENT_S 31 +#define PFINT_SB_CTL_INTEVENT_M BIT(31) +#define PFINT_TSYN_MSK 0x0016C980 /* Reset Source: CORER */ +#define PFINT_TSYN_MSK_PHY_INDX_S 0 +#define PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0) +#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define QINT_RQCTL_MAX_INDEX 2047 +#define QINT_RQCTL_MSIX_INDX_S 0 +#define QINT_RQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define QINT_RQCTL_ITR_INDX_S 11 +#define QINT_RQCTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define QINT_RQCTL_CAUSE_ENA_S 30 +#define QINT_RQCTL_CAUSE_ENA_M BIT(30) +#define QINT_RQCTL_INTEVENT_S 31 +#define QINT_RQCTL_INTEVENT_M BIT(31) +#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ +#define QINT_TQCTL_MAX_INDEX 16383 +#define QINT_TQCTL_MSIX_INDX_S 0 +#define QINT_TQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define QINT_TQCTL_ITR_INDX_S 11 +#define QINT_TQCTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define QINT_TQCTL_CAUSE_ENA_S 30 +#define QINT_TQCTL_CAUSE_ENA_M BIT(30) +#define QINT_TQCTL_INTEVENT_S 31 +#define QINT_TQCTL_INTEVENT_M BIT(31) +#define VPINT_AEQCTL(_VF) (0x0016B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPINT_AEQCTL_MAX_INDEX 255 +#define VPINT_AEQCTL_MSIX_INDX_S 0 +#define VPINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_AEQCTL_ITR_INDX_S 11 +#define VPINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_AEQCTL_CAUSE_ENA_S 30 +#define VPINT_AEQCTL_CAUSE_ENA_M BIT(30) +#define VPINT_AEQCTL_INTEVENT_S 31 +#define VPINT_AEQCTL_INTEVENT_M BIT(31) +#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPINT_ALLOC_MAX_INDEX 255 +#define VPINT_ALLOC_FIRST_S 0 +#define VPINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0) +#define VPINT_ALLOC_LAST_S 12 +#define VPINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12) +#define VPINT_ALLOC_VALID_S 31 +#define VPINT_ALLOC_VALID_M BIT(31) +#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */ +#define VPINT_ALLOC_PCI_MAX_INDEX 255 +#define VPINT_ALLOC_PCI_FIRST_S 0 +#define VPINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0) +#define VPINT_ALLOC_PCI_LAST_S 12 +#define VPINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12) +#define VPINT_ALLOC_PCI_VALID_S 31 +#define VPINT_ALLOC_PCI_VALID_M BIT(31) +#define VPINT_MBX_CPM_CTL(_VP128) (0x0016B000 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VPINT_MBX_CPM_CTL_MAX_INDEX 127 +#define VPINT_MBX_CPM_CTL_MSIX_INDX_S 0 +#define VPINT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_MBX_CPM_CTL_ITR_INDX_S 11 +#define VPINT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_MBX_CPM_CTL_CAUSE_ENA_S 30 +#define VPINT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30) +#define VPINT_MBX_CPM_CTL_INTEVENT_S 31 +#define VPINT_MBX_CPM_CTL_INTEVENT_M BIT(31) +#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VPINT_MBX_CTL_MAX_INDEX 767 +#define VPINT_MBX_CTL_MSIX_INDX_S 0 +#define VPINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_MBX_CTL_ITR_INDX_S 11 +#define VPINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_MBX_CTL_CAUSE_ENA_S 30 +#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30) +#define VPINT_MBX_CTL_INTEVENT_S 31 +#define VPINT_MBX_CTL_INTEVENT_M BIT(31) +#define VPINT_MBX_HLP_CTL(_VP16) (0x0016B200 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VPINT_MBX_HLP_CTL_MAX_INDEX 15 +#define VPINT_MBX_HLP_CTL_MSIX_INDX_S 0 +#define VPINT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_MBX_HLP_CTL_ITR_INDX_S 11 +#define VPINT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_MBX_HLP_CTL_CAUSE_ENA_S 30 +#define VPINT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30) +#define VPINT_MBX_HLP_CTL_INTEVENT_S 31 +#define VPINT_MBX_HLP_CTL_INTEVENT_M BIT(31) +#define VPINT_MBX_PSM_CTL(_VP16) (0x0016B240 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VPINT_MBX_PSM_CTL_MAX_INDEX 15 +#define VPINT_MBX_PSM_CTL_MSIX_INDX_S 0 +#define VPINT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_MBX_PSM_CTL_ITR_INDX_S 11 +#define VPINT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_MBX_PSM_CTL_CAUSE_ENA_S 30 +#define VPINT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30) +#define VPINT_MBX_PSM_CTL_INTEVENT_S 31 +#define VPINT_MBX_PSM_CTL_INTEVENT_M BIT(31) +#define VPINT_SB_CPM_CTL(_VP128) (0x0016B400 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define VPINT_SB_CPM_CTL_MAX_INDEX 127 +#define VPINT_SB_CPM_CTL_MSIX_INDX_S 0 +#define VPINT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0) +#define VPINT_SB_CPM_CTL_ITR_INDX_S 11 +#define VPINT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11) +#define VPINT_SB_CPM_CTL_CAUSE_ENA_S 30 +#define VPINT_SB_CPM_CTL_CAUSE_ENA_M BIT(30) +#define VPINT_SB_CPM_CTL_INTEVENT_S 31 +#define VPINT_SB_CPM_CTL_INTEVENT_M BIT(31) +#define GL_HLP_PRT_IPG_PREAMBLE_SIZE(_i) (0x00049240 + ((_i) * 4)) /* _i=0...20 */ /* Reset Source: CORER */ +#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_MAX_INDEX 20 +#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_S 0 +#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_M MAKEMASK(0xFF, 0) +#define GL_TDPU_PSM_DEFAULT_RECIPE(_i) (0x00049294 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GL_TDPU_PSM_DEFAULT_RECIPE_MAX_INDEX 3 +#define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_S 0 +#define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_M BIT(0) +#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_S 1 +#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_M BIT(1) +#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_S 2 +#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_M BIT(2) +#define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_S 3 +#define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_M BIT(3) +#define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_S 4 +#define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_M BIT(4) +#define GLLAN_PF_RECIPE(_i) (0x0029420C + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLLAN_PF_RECIPE_MAX_INDEX 7 +#define GLLAN_PF_RECIPE_RECIPE_S 0 +#define GLLAN_PF_RECIPE_RECIPE_M MAKEMASK(0x3, 0) +#define GLLAN_RCTL_0 0x002941F8 /* Reset Source: CORER */ +#define GLLAN_RCTL_0_PXE_MODE_S 0 +#define GLLAN_RCTL_0_PXE_MODE_M BIT(0) +#define GLLAN_RCTL_1 0x002941FC /* Reset Source: CORER */ +#define GLLAN_RCTL_1_RXMAX_EXPANSION_S 12 +#define GLLAN_RCTL_1_RXMAX_EXPANSION_M MAKEMASK(0xF, 12) +#define GLLAN_RCTL_1_RXDRDCTL_S 17 +#define GLLAN_RCTL_1_RXDRDCTL_M BIT(17) +#define GLLAN_RCTL_1_RXDESCRDROEN_S 18 +#define GLLAN_RCTL_1_RXDESCRDROEN_M BIT(18) +#define GLLAN_RCTL_1_RXDATAWRROEN_S 19 +#define GLLAN_RCTL_1_RXDATAWRROEN_M BIT(19) +#define GLLAN_TSOMSK_F 0x00049308 /* Reset Source: CORER */ +#define GLLAN_TSOMSK_F_TCPMSKF_S 0 +#define GLLAN_TSOMSK_F_TCPMSKF_M MAKEMASK(0xFFF, 0) +#define GLLAN_TSOMSK_L 0x00049310 /* Reset Source: CORER */ +#define GLLAN_TSOMSK_L_TCPMSKL_S 0 +#define GLLAN_TSOMSK_L_TCPMSKL_M MAKEMASK(0xFFF, 0) +#define GLLAN_TSOMSK_M 0x0004930C /* Reset Source: CORER */ +#define GLLAN_TSOMSK_M_TCPMSKM_S 0 +#define GLLAN_TSOMSK_M_TCPMSKM_M MAKEMASK(0xFFF, 0) +#define PFLAN_CP_QALLOC 0x00075700 /* Reset Source: CORER */ +#define PFLAN_CP_QALLOC_FIRSTQ_S 0 +#define PFLAN_CP_QALLOC_FIRSTQ_M MAKEMASK(0x1FF, 0) +#define PFLAN_CP_QALLOC_LASTQ_S 16 +#define PFLAN_CP_QALLOC_LASTQ_M MAKEMASK(0x1FF, 16) +#define PFLAN_CP_QALLOC_VALID_S 31 +#define PFLAN_CP_QALLOC_VALID_M BIT(31) +#define PFLAN_DB_QALLOC 0x00075680 /* Reset Source: CORER */ +#define PFLAN_DB_QALLOC_FIRSTQ_S 0 +#define PFLAN_DB_QALLOC_FIRSTQ_M MAKEMASK(0xFF, 0) +#define PFLAN_DB_QALLOC_LASTQ_S 16 +#define PFLAN_DB_QALLOC_LASTQ_M MAKEMASK(0xFF, 16) +#define PFLAN_DB_QALLOC_VALID_S 31 +#define PFLAN_DB_QALLOC_VALID_M BIT(31) +#define PFLAN_RX_QALLOC 0x001D2500 /* Reset Source: CORER */ +#define PFLAN_RX_QALLOC_FIRSTQ_S 0 +#define PFLAN_RX_QALLOC_FIRSTQ_M MAKEMASK(0x7FF, 0) +#define PFLAN_RX_QALLOC_LASTQ_S 16 +#define PFLAN_RX_QALLOC_LASTQ_M MAKEMASK(0x7FF, 16) +#define PFLAN_RX_QALLOC_VALID_S 31 +#define PFLAN_RX_QALLOC_VALID_M BIT(31) +#define PFLAN_TX_QALLOC 0x001D2580 /* Reset Source: CORER */ +#define PFLAN_TX_QALLOC_FIRSTQ_S 0 +#define PFLAN_TX_QALLOC_FIRSTQ_M MAKEMASK(0x3FFF, 0) +#define PFLAN_TX_QALLOC_LASTQ_S 16 +#define PFLAN_TX_QALLOC_LASTQ_M MAKEMASK(0x3FFF, 16) +#define PFLAN_TX_QALLOC_VALID_S 31 +#define PFLAN_TX_QALLOC_VALID_M BIT(31) +#define PRT_TDPUL2TAGSEN 0x00040BA0 /* Reset Source: CORER */ +#define PRT_TDPUL2TAGSEN_ENABLE_S 0 +#define PRT_TDPUL2TAGSEN_ENABLE_M MAKEMASK(0xFF, 0) +#define PRT_TDPUL2TAGSEN_NONLAST_TAG_S 8 +#define PRT_TDPUL2TAGSEN_NONLAST_TAG_M MAKEMASK(0xFF, 8) +#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) /* _i=0...7, _QRX=0...2047 */ /* Reset Source: CORER */ +#define QRX_CONTEXT_MAX_INDEX 7 +#define QRX_CONTEXT_RXQ_CONTEXT_S 0 +#define QRX_CONTEXT_RXQ_CONTEXT_M MAKEMASK(0xFFFFFFFF, 0) +#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: PFR */ +#define QRX_CTRL_MAX_INDEX 2047 +#define QRX_CTRL_QENA_REQ_S 0 +#define QRX_CTRL_QENA_REQ_M BIT(0) +#define QRX_CTRL_FAST_QDIS_S 1 +#define QRX_CTRL_FAST_QDIS_M BIT(1) +#define QRX_CTRL_QENA_STAT_S 2 +#define QRX_CTRL_QENA_STAT_M BIT(2) +#define QRX_CTRL_CDE_S 3 +#define QRX_CTRL_CDE_M BIT(3) +#define QRX_CTRL_CDS_S 4 +#define QRX_CTRL_CDS_M BIT(4) +#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define QRX_ITR_MAX_INDEX 2047 +#define QRX_ITR_NO_EXPR_S 0 +#define QRX_ITR_NO_EXPR_M BIT(0) +#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define QRX_TAIL_MAX_INDEX 2047 +#define QRX_TAIL_TAIL_S 0 +#define QRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0) +#define VPDSI_RX_QTABLE(_i, _VP16) (0x00074C00 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */ +#define VPDSI_RX_QTABLE_MAX_INDEX 15 +#define VPDSI_RX_QTABLE_PAGE_INDEX0_S 0 +#define VPDSI_RX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0) +#define VPDSI_RX_QTABLE_PAGE_INDEX1_S 8 +#define VPDSI_RX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8) +#define VPDSI_RX_QTABLE_PAGE_INDEX2_S 16 +#define VPDSI_RX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16) +#define VPDSI_RX_QTABLE_PAGE_INDEX3_S 24 +#define VPDSI_RX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24) +#define VPDSI_TX_QTABLE(_i, _VP16) (0x001D2000 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */ +#define VPDSI_TX_QTABLE_MAX_INDEX 15 +#define VPDSI_TX_QTABLE_PAGE_INDEX0_S 0 +#define VPDSI_TX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0) +#define VPDSI_TX_QTABLE_PAGE_INDEX1_S 8 +#define VPDSI_TX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8) +#define VPDSI_TX_QTABLE_PAGE_INDEX2_S 16 +#define VPDSI_TX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16) +#define VPDSI_TX_QTABLE_PAGE_INDEX3_S 24 +#define VPDSI_TX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24) +#define VPLAN_DB_QTABLE(_i, _VF) (0x00070000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...3, _VF=0...255 */ /* Reset Source: CORER */ +#define VPLAN_DB_QTABLE_MAX_INDEX 3 +#define VPLAN_DB_QTABLE_QINDEX_S 0 +#define VPLAN_DB_QTABLE_QINDEX_M MAKEMASK(0x1FF, 0) +#define VPLAN_DSI_VF_MODE(_VP16) (0x002D2C00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define VPLAN_DSI_VF_MODE_MAX_INDEX 15 +#define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_S 0 +#define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_M BIT(0) +#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPLAN_RX_QBASE_MAX_INDEX 255 +#define VPLAN_RX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_RX_QBASE_VFFIRSTQ_M MAKEMASK(0x7FF, 0) +#define VPLAN_RX_QBASE_VFNUMQ_S 16 +#define VPLAN_RX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16) +#define VPLAN_RX_QBASE_VFQTABLE_ENA_S 31 +#define VPLAN_RX_QBASE_VFQTABLE_ENA_M BIT(31) +#define VPLAN_RX_QTABLE(_i, _VF) (0x00060000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */ +#define VPLAN_RX_QTABLE_MAX_INDEX 15 +#define VPLAN_RX_QTABLE_QINDEX_S 0 +#define VPLAN_RX_QTABLE_QINDEX_M MAKEMASK(0xFFF, 0) +#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPLAN_RXQ_MAPENA_MAX_INDEX 255 +#define VPLAN_RXQ_MAPENA_RX_ENA_S 0 +#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0) +#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPLAN_TX_QBASE_MAX_INDEX 255 +#define VPLAN_TX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_TX_QBASE_VFFIRSTQ_M MAKEMASK(0x3FFF, 0) +#define VPLAN_TX_QBASE_VFNUMQ_S 16 +#define VPLAN_TX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16) +#define VPLAN_TX_QBASE_VFQTABLE_ENA_S 31 +#define VPLAN_TX_QBASE_VFQTABLE_ENA_M BIT(31) +#define VPLAN_TX_QTABLE(_i, _VF) (0x001C0000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */ +#define VPLAN_TX_QTABLE_MAX_INDEX 15 +#define VPLAN_TX_QTABLE_QINDEX_S 0 +#define VPLAN_TX_QTABLE_QINDEX_M MAKEMASK(0x7FFF, 0) +#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPLAN_TXQ_MAPENA_MAX_INDEX 255 +#define VPLAN_TXQ_MAPENA_TX_ENA_S 0 +#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) +#define VSILAN_QBASE(_VSI) (0x0044C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSILAN_QBASE_MAX_INDEX 767 +#define VSILAN_QBASE_VSIBASE_S 0 +#define VSILAN_QBASE_VSIBASE_M MAKEMASK(0x7FF, 0) +#define VSILAN_QBASE_VSIQTABLE_ENA_S 11 +#define VSILAN_QBASE_VSIQTABLE_ENA_M BIT(11) +#define VSILAN_QTABLE(_i, _VSI) (0x00440000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...7, _VSI=0...767 */ /* Reset Source: PFR */ +#define VSILAN_QTABLE_MAX_INDEX 7 +#define VSILAN_QTABLE_QINDEX_0_S 0 +#define VSILAN_QTABLE_QINDEX_0_M MAKEMASK(0x7FF, 0) +#define VSILAN_QTABLE_QINDEX_1_S 16 +#define VSILAN_QTABLE_QINDEX_1_M MAKEMASK(0x7FF, 16) +#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0) +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0) +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0) +#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0 +#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0) +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0 +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0 +#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0 +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0 +#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0 +#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0) +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0 +#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0 +#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0 +#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */ +#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0 +#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */ +#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_S 0 +#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ +#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7 +#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_S 0 +#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */ +#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7 +#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_S 0 +#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTMAC_RX_CNT_MRKR 0x001E48E0 /* Reset Source: GLOBR */ +#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_S 0 +#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */ +#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_S 0 +#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16 +#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16) +#define PRTMAC_TX_CNT_MRKR 0x001E48C0 /* Reset Source: GLOBR */ +#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_S 0 +#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_M MAKEMASK(0xFFFF, 0) +#define PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */ +#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_S 0 +#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_M MAKEMASK(0xFFFF, 0) +#define GL_MDCK_CFG1_TX_PQM 0x002D2DF4 /* Reset Source: CORER */ +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_S 0 +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_M MAKEMASK(0xFF, 0) +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_S 8 +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_M MAKEMASK(0x3F, 8) +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_S 16 +#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_M MAKEMASK(0x3F, 16) +#define GL_MDCK_EN_TX_PQM 0x002D2DFC /* Reset Source: CORER */ +#define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_S 0 +#define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_M BIT(0) +#define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_S 1 +#define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_M BIT(1) +#define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_S 3 +#define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_M BIT(3) +#define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_S 4 +#define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_M BIT(4) +#define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_S 5 +#define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_M BIT(5) +#define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_S 6 +#define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_M BIT(6) +#define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_S 7 +#define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_M BIT(7) +#define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_S 8 +#define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_M BIT(8) +#define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_S 9 +#define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_M BIT(9) +#define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_S 10 +#define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_M BIT(10) +#define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_S 11 +#define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_M BIT(11) +#define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_S 12 +#define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_M BIT(12) +#define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_S 13 +#define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_M BIT(13) +#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_S 14 +#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_M BIT(14) +#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_S 15 +#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_M BIT(15) +#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_S 16 +#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_M BIT(16) +#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_S 17 +#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17) +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18 +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18) +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19 +#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19) +#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20 +#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20) +#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21 +#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21) +#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22 +#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_M BIT(22) +#define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_S 23 +#define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_M BIT(23) +#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_S 24 +#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_M BIT(24) +#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_S 25 +#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_M BIT(25) +#define GL_MDCK_EN_TX_PQM_RSVD_S 26 +#define GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26) +#define GL_MDCK_RX 0x0029422C /* Reset Source: CORER */ +#define GL_MDCK_RX_DESC_ADDR_S 0 +#define GL_MDCK_RX_DESC_ADDR_M BIT(0) +#define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */ +#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0 +#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0) +#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1 +#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) +#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2 +#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2) +#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3 +#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_M BIT(3) +#define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_S 4 +#define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_M BIT(4) +#define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_S 5 +#define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_M BIT(5) +#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_S 6 +#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6) +#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7 +#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7) +#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8 +#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8) +#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9 +#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9) +#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10 +#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_M BIT(10) +#define GL_MDET_RX 0x00294C00 /* Reset Source: CORER */ +#define GL_MDET_RX_QNUM_S 0 +#define GL_MDET_RX_QNUM_M MAKEMASK(0x7FFF, 0) +#define GL_MDET_RX_VF_NUM_S 15 +#define GL_MDET_RX_VF_NUM_M MAKEMASK(0xFF, 15) +#define GL_MDET_RX_PF_NUM_S 23 +#define GL_MDET_RX_PF_NUM_M MAKEMASK(0x7, 23) +#define GL_MDET_RX_MAL_TYPE_S 26 +#define GL_MDET_RX_MAL_TYPE_M MAKEMASK(0x1F, 26) +#define GL_MDET_RX_VALID_S 31 +#define GL_MDET_RX_VALID_M BIT(31) +#define GL_MDET_TX_PQM 0x002D2E00 /* Reset Source: CORER */ +#define GL_MDET_TX_PQM_PF_NUM_S 0 +#define GL_MDET_TX_PQM_PF_NUM_M MAKEMASK(0x7, 0) +#define GL_MDET_TX_PQM_VF_NUM_S 4 +#define GL_MDET_TX_PQM_VF_NUM_M MAKEMASK(0xFF, 4) +#define GL_MDET_TX_PQM_QNUM_S 12 +#define GL_MDET_TX_PQM_QNUM_M MAKEMASK(0x3FFF, 12) +#define GL_MDET_TX_PQM_MAL_TYPE_S 26 +#define GL_MDET_TX_PQM_MAL_TYPE_M MAKEMASK(0x1F, 26) +#define GL_MDET_TX_PQM_VALID_S 31 +#define GL_MDET_TX_PQM_VALID_M BIT(31) +#define GL_MDET_TX_TCLAN 0x000FC068 /* Reset Source: CORER */ +#define GL_MDET_TX_TCLAN_QNUM_S 0 +#define GL_MDET_TX_TCLAN_QNUM_M MAKEMASK(0x7FFF, 0) +#define GL_MDET_TX_TCLAN_VF_NUM_S 15 +#define GL_MDET_TX_TCLAN_VF_NUM_M MAKEMASK(0xFF, 15) +#define GL_MDET_TX_TCLAN_PF_NUM_S 23 +#define GL_MDET_TX_TCLAN_PF_NUM_M MAKEMASK(0x7, 23) +#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26 +#define GL_MDET_TX_TCLAN_MAL_TYPE_M MAKEMASK(0x1F, 26) +#define GL_MDET_TX_TCLAN_VALID_S 31 +#define GL_MDET_TX_TCLAN_VALID_M BIT(31) +#define GL_MDET_TX_TDPU 0x00049350 /* Reset Source: CORER */ +#define GL_MDET_TX_TDPU_QNUM_S 0 +#define GL_MDET_TX_TDPU_QNUM_M MAKEMASK(0x7FFF, 0) +#define GL_MDET_TX_TDPU_VF_NUM_S 15 +#define GL_MDET_TX_TDPU_VF_NUM_M MAKEMASK(0xFF, 15) +#define GL_MDET_TX_TDPU_PF_NUM_S 23 +#define GL_MDET_TX_TDPU_PF_NUM_M MAKEMASK(0x7, 23) +#define GL_MDET_TX_TDPU_MAL_TYPE_S 26 +#define GL_MDET_TX_TDPU_MAL_TYPE_M MAKEMASK(0x1F, 26) +#define GL_MDET_TX_TDPU_VALID_S 31 +#define GL_MDET_TX_TDPU_VALID_M BIT(31) +#define GLRLAN_MDET 0x00294200 /* Reset Source: CORER */ +#define GLRLAN_MDET_PCKT_EXTRCT_ERR_S 0 +#define GLRLAN_MDET_PCKT_EXTRCT_ERR_M BIT(0) +#define PF_MDET_RX 0x00294280 /* Reset Source: CORER */ +#define PF_MDET_RX_VALID_S 0 +#define PF_MDET_RX_VALID_M BIT(0) +#define PF_MDET_TX_PQM 0x002D2C80 /* Reset Source: CORER */ +#define PF_MDET_TX_PQM_VALID_S 0 +#define PF_MDET_TX_PQM_VALID_M BIT(0) +#define PF_MDET_TX_TCLAN 0x000FC000 /* Reset Source: CORER */ +#define PF_MDET_TX_TCLAN_VALID_S 0 +#define PF_MDET_TX_TCLAN_VALID_M BIT(0) +#define PF_MDET_TX_TDPU 0x00040800 /* Reset Source: CORER */ +#define PF_MDET_TX_TDPU_VALID_S 0 +#define PF_MDET_TX_TDPU_VALID_M BIT(0) +#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VP_MDET_RX_MAX_INDEX 255 +#define VP_MDET_RX_VALID_S 0 +#define VP_MDET_RX_VALID_M BIT(0) +#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VP_MDET_TX_PQM_MAX_INDEX 255 +#define VP_MDET_TX_PQM_VALID_S 0 +#define VP_MDET_TX_PQM_VALID_M BIT(0) +#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VP_MDET_TX_TCLAN_MAX_INDEX 255 +#define VP_MDET_TX_TCLAN_VALID_S 0 +#define VP_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VP_MDET_TX_TDPU_MAX_INDEX 255 +#define VP_MDET_TX_TDPU_VALID_S 0 +#define VP_MDET_TX_TDPU_VALID_M BIT(0) +#define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */ +#define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9 +#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0 +#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */ +#define GL_FWRESETCNT_FWRESETCNT_S 0 +#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */ +#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_S 0 +#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_M BIT(0) +#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1 +#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1) +#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */ +#define GL_MNG_FWSM_FW_MODES_S 0 +#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0) +#define GL_MNG_FWSM_RSV0_S 3 +#define GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3) +#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10 +#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10) +#define GL_MNG_FWSM_RSV1_S 11 +#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11) +#define GL_MNG_FWSM_RSV2_S 15 +#define GL_MNG_FWSM_RSV2_M BIT(15) +#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16 +#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16) +#define GL_MNG_FWSM_POR_AL_FAILURE_S 17 +#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17) +#define GL_MNG_FWSM_RSV3_S 18 +#define GL_MNG_FWSM_RSV3_M BIT(18) +#define GL_MNG_FWSM_EXT_ERR_IND_S 19 +#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19) +#define GL_MNG_FWSM_RSV4_S 25 +#define GL_MNG_FWSM_RSV4_M BIT(25) +#define GL_MNG_FWSM_RESERVED_11_S 26 +#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26) +#define GL_MNG_FWSM_RSV5_S 30 +#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30) +#define GL_MNG_HWARB_CTRL 0x000B6130 /* Reset Source: POR */ +#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_S 0 +#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_M BIT(0) +#define GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */ +#define GL_MNG_SHA_EXTEND_MAX_INDEX 7 +#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_S 0 +#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */ +#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7 +#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_S 0 +#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_MNG_SHA_EXTEND_STATUS 0x00083148 /* Reset Source: EMPR */ +#define GL_MNG_SHA_EXTEND_STATUS_STAGE_S 0 +#define GL_MNG_SHA_EXTEND_STATUS_STAGE_M MAKEMASK(0x7, 0) +#define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_S 30 +#define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_M BIT(30) +#define GL_MNG_SHA_EXTEND_STATUS_DONE_S 31 +#define GL_MNG_SHA_EXTEND_STATUS_DONE_M BIT(31) +#define GL_SWT_PRT2MDEF(_i) (0x00216018 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: POR */ +#define GL_SWT_PRT2MDEF_MAX_INDEX 31 +#define GL_SWT_PRT2MDEF_MDEFIDX_S 0 +#define GL_SWT_PRT2MDEF_MDEFIDX_M MAKEMASK(0x7, 0) +#define GL_SWT_PRT2MDEF_MDEFENA_S 31 +#define GL_SWT_PRT2MDEF_MDEFENA_M BIT(31) +#define PRT_MNG_MANC 0x00214720 /* Reset Source: POR */ +#define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_S 0 +#define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_M BIT(0) +#define PRT_MNG_MANC_NCSI_DISCARD_S 1 +#define PRT_MNG_MANC_NCSI_DISCARD_M BIT(1) +#define PRT_MNG_MANC_RCV_TCO_EN_S 17 +#define PRT_MNG_MANC_RCV_TCO_EN_M BIT(17) +#define PRT_MNG_MANC_RCV_ALL_S 19 +#define PRT_MNG_MANC_RCV_ALL_M BIT(19) +#define PRT_MNG_MANC_FIXED_NET_TYPE_S 25 +#define PRT_MNG_MANC_FIXED_NET_TYPE_M BIT(25) +#define PRT_MNG_MANC_NET_TYPE_S 26 +#define PRT_MNG_MANC_NET_TYPE_M BIT(26) +#define PRT_MNG_MANC_EN_BMC2OS_S 28 +#define PRT_MNG_MANC_EN_BMC2OS_M BIT(28) +#define PRT_MNG_MANC_EN_BMC2NET_S 29 +#define PRT_MNG_MANC_EN_BMC2NET_M BIT(29) +#define PRT_MNG_MAVTV(_i) (0x00214780 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ +#define PRT_MNG_MAVTV_MAX_INDEX 7 +#define PRT_MNG_MAVTV_VID_S 0 +#define PRT_MNG_MAVTV_VID_M MAKEMASK(0xFFF, 0) +#define PRT_MNG_MDEF(_i) (0x00214880 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ +#define PRT_MNG_MDEF_MAX_INDEX 7 +#define PRT_MNG_MDEF_MAC_EXACT_AND_S 0 +#define PRT_MNG_MDEF_MAC_EXACT_AND_M MAKEMASK(0xF, 0) +#define PRT_MNG_MDEF_BROADCAST_AND_S 4 +#define PRT_MNG_MDEF_BROADCAST_AND_M BIT(4) +#define PRT_MNG_MDEF_VLAN_AND_S 5 +#define PRT_MNG_MDEF_VLAN_AND_M MAKEMASK(0xFF, 5) +#define PRT_MNG_MDEF_IPV4_ADDRESS_AND_S 13 +#define PRT_MNG_MDEF_IPV4_ADDRESS_AND_M MAKEMASK(0xF, 13) +#define PRT_MNG_MDEF_IPV6_ADDRESS_AND_S 17 +#define PRT_MNG_MDEF_IPV6_ADDRESS_AND_M MAKEMASK(0xF, 17) +#define PRT_MNG_MDEF_MAC_EXACT_OR_S 21 +#define PRT_MNG_MDEF_MAC_EXACT_OR_M MAKEMASK(0xF, 21) +#define PRT_MNG_MDEF_BROADCAST_OR_S 25 +#define PRT_MNG_MDEF_BROADCAST_OR_M BIT(25) +#define PRT_MNG_MDEF_MULTICAST_AND_S 26 +#define PRT_MNG_MDEF_MULTICAST_AND_M BIT(26) +#define PRT_MNG_MDEF_ARP_REQUEST_OR_S 27 +#define PRT_MNG_MDEF_ARP_REQUEST_OR_M BIT(27) +#define PRT_MNG_MDEF_ARP_RESPONSE_OR_S 28 +#define PRT_MNG_MDEF_ARP_RESPONSE_OR_M BIT(28) +#define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_S 29 +#define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_M BIT(29) +#define PRT_MNG_MDEF_PORT_0X298_OR_S 30 +#define PRT_MNG_MDEF_PORT_0X298_OR_M BIT(30) +#define PRT_MNG_MDEF_PORT_0X26F_OR_S 31 +#define PRT_MNG_MDEF_PORT_0X26F_OR_M BIT(31) +#define PRT_MNG_MDEF_EXT(_i) (0x00214A00 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */ +#define PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_S 0 +#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_M MAKEMASK(0xF, 0) +#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_S 4 +#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_M MAKEMASK(0xF, 4) +#define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_S 8 +#define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_M MAKEMASK(0xFFFF, 8) +#define PRT_MNG_MDEF_EXT_FLEX_TCO_S 24 +#define PRT_MNG_MDEF_EXT_FLEX_TCO_M BIT(24) +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_S 25 +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_M BIT(25) +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_S 26 +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_M BIT(26) +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_S 27 +#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_M BIT(27) +#define PRT_MNG_MDEF_EXT_ICMP_OR_S 28 +#define PRT_MNG_MDEF_EXT_ICMP_OR_M BIT(28) +#define PRT_MNG_MDEF_EXT_MLD_S 29 +#define PRT_MNG_MDEF_EXT_MLD_M BIT(29) +#define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_S 30 +#define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_M BIT(30) +#define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_S 31 +#define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_M BIT(31) +#define PRT_MNG_MDEFVSI(_i) (0x00214980 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ +#define PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define PRT_MNG_MDEFVSI_MDEFVSI_2N_S 0 +#define PRT_MNG_MDEFVSI_MDEFVSI_2N_M MAKEMASK(0xFFFF, 0) +#define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_S 16 +#define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_M MAKEMASK(0xFFFF, 16) +#define PRT_MNG_METF(_i) (0x00214120 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ +#define PRT_MNG_METF_MAX_INDEX 3 +#define PRT_MNG_METF_ETYPE_S 0 +#define PRT_MNG_METF_ETYPE_M MAKEMASK(0xFFFF, 0) +#define PRT_MNG_METF_POLARITY_S 30 +#define PRT_MNG_METF_POLARITY_M BIT(30) +#define PRT_MNG_MFUTP(_i) (0x00214320 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */ +#define PRT_MNG_MFUTP_MAX_INDEX 15 +#define PRT_MNG_MFUTP_MFUTP_N_S 0 +#define PRT_MNG_MFUTP_MFUTP_N_M MAKEMASK(0xFFFF, 0) +#define PRT_MNG_MFUTP_UDP_S 16 +#define PRT_MNG_MFUTP_UDP_M BIT(16) +#define PRT_MNG_MFUTP_TCP_S 17 +#define PRT_MNG_MFUTP_TCP_M BIT(17) +#define PRT_MNG_MFUTP_SOURCE_DESTINATION_S 18 +#define PRT_MNG_MFUTP_SOURCE_DESTINATION_M BIT(18) +#define PRT_MNG_MIPAF4(_i) (0x002141A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ +#define PRT_MNG_MIPAF4_MAX_INDEX 3 +#define PRT_MNG_MIPAF4_MIPAF_S 0 +#define PRT_MNG_MIPAF4_MIPAF_M MAKEMASK(0xFFFFFFFF, 0) +#define PRT_MNG_MIPAF6(_i) (0x00214520 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */ +#define PRT_MNG_MIPAF6_MAX_INDEX 15 +#define PRT_MNG_MIPAF6_MIPAF_S 0 +#define PRT_MNG_MIPAF6_MIPAF_M MAKEMASK(0xFFFFFFFF, 0) +#define PRT_MNG_MMAH(_i) (0x00214220 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ +#define PRT_MNG_MMAH_MAX_INDEX 3 +#define PRT_MNG_MMAH_MMAH_S 0 +#define PRT_MNG_MMAH_MMAH_M MAKEMASK(0xFFFF, 0) +#define PRT_MNG_MMAL(_i) (0x002142A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */ +#define PRT_MNG_MMAL_MAX_INDEX 3 +#define PRT_MNG_MMAL_MMAL_S 0 +#define PRT_MNG_MMAL_MMAL_M MAKEMASK(0xFFFFFFFF, 0) +#define PRT_MNG_MNGONLY 0x00214740 /* Reset Source: POR */ +#define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_S 0 +#define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_M MAKEMASK(0xFF, 0) +#define PRT_MNG_MSFM 0x00214760 /* Reset Source: POR */ +#define PRT_MNG_MSFM_PORT_26F_UDP_S 0 +#define PRT_MNG_MSFM_PORT_26F_UDP_M BIT(0) +#define PRT_MNG_MSFM_PORT_26F_TCP_S 1 +#define PRT_MNG_MSFM_PORT_26F_TCP_M BIT(1) +#define PRT_MNG_MSFM_PORT_298_UDP_S 2 +#define PRT_MNG_MSFM_PORT_298_UDP_M BIT(2) +#define PRT_MNG_MSFM_PORT_298_TCP_S 3 +#define PRT_MNG_MSFM_PORT_298_TCP_M BIT(3) +#define PRT_MNG_MSFM_IPV6_0_MASK_S 4 +#define PRT_MNG_MSFM_IPV6_0_MASK_M BIT(4) +#define PRT_MNG_MSFM_IPV6_1_MASK_S 5 +#define PRT_MNG_MSFM_IPV6_1_MASK_M BIT(5) +#define PRT_MNG_MSFM_IPV6_2_MASK_S 6 +#define PRT_MNG_MSFM_IPV6_2_MASK_M BIT(6) +#define PRT_MNG_MSFM_IPV6_3_MASK_S 7 +#define PRT_MNG_MSFM_IPV6_3_MASK_M BIT(7) +#define MSIX_PBA_PAGE(_i) (0x02E08000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */ +#define MSIX_PBA_PAGE_MAX_INDEX 63 +#define MSIX_PBA_PAGE_PENBIT_S 0 +#define MSIX_PBA_PAGE_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_PBA1(_i) (0x00008000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */ +#define MSIX_PBA1_MAX_INDEX 63 +#define MSIX_PBA1_PENBIT_S 0 +#define MSIX_PBA1_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TADD_PAGE(_i) (0x02E00000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TADD_PAGE_MAX_INDEX 2047 +#define MSIX_TADD_PAGE_MSIXTADD10_S 0 +#define MSIX_TADD_PAGE_MSIXTADD10_M MAKEMASK(0x3, 0) +#define MSIX_TADD_PAGE_MSIXTADD_S 2 +#define MSIX_TADD_PAGE_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) +#define MSIX_TADD1(_i) (0x00000000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TADD1_MAX_INDEX 2047 +#define MSIX_TADD1_MSIXTADD10_S 0 +#define MSIX_TADD1_MSIXTADD10_M MAKEMASK(0x3, 0) +#define MSIX_TADD1_MSIXTADD_S 2 +#define MSIX_TADD1_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) +#define MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TMSG_MAX_INDEX 2047 +#define MSIX_TMSG_MSIXTMSG_S 0 +#define MSIX_TMSG_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TMSG_PAGE(_i) (0x02E00008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TMSG_PAGE_MAX_INDEX 2047 +#define MSIX_TMSG_PAGE_MSIXTMSG_S 0 +#define MSIX_TMSG_PAGE_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TUADD_PAGE(_i) (0x02E00004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TUADD_PAGE_MAX_INDEX 2047 +#define MSIX_TUADD_PAGE_MSIXTUADD_S 0 +#define MSIX_TUADD_PAGE_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TUADD1(_i) (0x00000004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TUADD1_MAX_INDEX 2047 +#define MSIX_TUADD1_MSIXTUADD_S 0 +#define MSIX_TUADD1_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) +#define MSIX_TVCTRL_PAGE(_i) (0x02E0000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TVCTRL_PAGE_MAX_INDEX 2047 +#define MSIX_TVCTRL_PAGE_MASK_S 0 +#define MSIX_TVCTRL_PAGE_MASK_M BIT(0) +#define MSIX_TVCTRL1(_i) (0x0000000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */ +#define MSIX_TVCTRL1_MAX_INDEX 2047 +#define MSIX_TVCTRL1_MASK_S 0 +#define MSIX_TVCTRL1_MASK_M BIT(0) +#define GLNVM_AL_DONE_HLP 0x000824C4 /* Reset Source: POR */ +#define GLNVM_AL_DONE_HLP_HLP_CORER_S 0 +#define GLNVM_AL_DONE_HLP_HLP_CORER_M BIT(0) +#define GLNVM_AL_DONE_HLP_HLP_FULLR_S 1 +#define GLNVM_AL_DONE_HLP_HLP_FULLR_M BIT(1) +#define GLNVM_ALTIMERS 0x000B6140 /* Reset Source: POR */ +#define GLNVM_ALTIMERS_PCI_ALTIMER_S 0 +#define GLNVM_ALTIMERS_PCI_ALTIMER_M MAKEMASK(0xFFF, 0) +#define GLNVM_ALTIMERS_GEN_ALTIMER_S 12 +#define GLNVM_ALTIMERS_GEN_ALTIMER_M MAKEMASK(0xFFFFF, 12) +#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */ +#define GLNVM_FLA_LOCKED_S 6 +#define GLNVM_FLA_LOCKED_M BIT(6) +#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */ +#define GLNVM_GENS_NVM_PRES_S 0 +#define GLNVM_GENS_NVM_PRES_M BIT(0) +#define GLNVM_GENS_SR_SIZE_S 5 +#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5) +#define GLNVM_GENS_BANK1VAL_S 8 +#define GLNVM_GENS_BANK1VAL_M BIT(8) +#define GLNVM_GENS_ALT_PRST_S 23 +#define GLNVM_GENS_ALT_PRST_M BIT(23) +#define GLNVM_GENS_FL_AUTO_RD_S 25 +#define GLNVM_GENS_FL_AUTO_RD_M BIT(25) +#define GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset Source: POR */ +#define GLNVM_PROTCSR_MAX_INDEX 59 +#define GLNVM_PROTCSR_ADDR_BLOCK_S 0 +#define GLNVM_PROTCSR_ADDR_BLOCK_M MAKEMASK(0xFFFFFF, 0) +#define GLNVM_ULD 0x000B6008 /* Reset Source: POR */ +#define GLNVM_ULD_PCIER_DONE_S 0 +#define GLNVM_ULD_PCIER_DONE_M BIT(0) +#define GLNVM_ULD_PCIER_DONE_1_S 1 +#define GLNVM_ULD_PCIER_DONE_1_M BIT(1) +#define GLNVM_ULD_CORER_DONE_S 3 +#define GLNVM_ULD_CORER_DONE_M BIT(3) +#define GLNVM_ULD_GLOBR_DONE_S 4 +#define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define GLNVM_ULD_POR_DONE_S 5 +#define GLNVM_ULD_POR_DONE_M BIT(5) +#define GLNVM_ULD_POR_DONE_1_S 8 +#define GLNVM_ULD_POR_DONE_1_M BIT(8) +#define GLNVM_ULD_PCIER_DONE_2_S 9 +#define GLNVM_ULD_PCIER_DONE_2_M BIT(9) +#define GLNVM_ULD_PE_DONE_S 10 +#define GLNVM_ULD_PE_DONE_M BIT(10) +#define GLNVM_ULD_HLP_CORE_DONE_S 11 +#define GLNVM_ULD_HLP_CORE_DONE_M BIT(11) +#define GLNVM_ULD_HLP_FULL_DONE_S 12 +#define GLNVM_ULD_HLP_FULL_DONE_M BIT(12) +#define GLNVM_ULT 0x000B6154 /* Reset Source: POR */ +#define GLNVM_ULT_CONF_PCIR_AE_S 0 +#define GLNVM_ULT_CONF_PCIR_AE_M BIT(0) +#define GLNVM_ULT_CONF_PCIRTL_AE_S 1 +#define GLNVM_ULT_CONF_PCIRTL_AE_M BIT(1) +#define GLNVM_ULT_RESERVED_1_S 2 +#define GLNVM_ULT_RESERVED_1_M BIT(2) +#define GLNVM_ULT_CONF_CORE_AE_S 3 +#define GLNVM_ULT_CONF_CORE_AE_M BIT(3) +#define GLNVM_ULT_CONF_GLOBAL_AE_S 4 +#define GLNVM_ULT_CONF_GLOBAL_AE_M BIT(4) +#define GLNVM_ULT_CONF_POR_AE_S 5 +#define GLNVM_ULT_CONF_POR_AE_M BIT(5) +#define GLNVM_ULT_RESERVED_2_S 6 +#define GLNVM_ULT_RESERVED_2_M BIT(6) +#define GLNVM_ULT_RESERVED_3_S 7 +#define GLNVM_ULT_RESERVED_3_M BIT(7) +#define GLNVM_ULT_RESERVED_5_S 8 +#define GLNVM_ULT_RESERVED_5_M BIT(8) +#define GLNVM_ULT_CONF_PCIALT_AE_S 9 +#define GLNVM_ULT_CONF_PCIALT_AE_M BIT(9) +#define GLNVM_ULT_CONF_PE_AE_S 10 +#define GLNVM_ULT_CONF_PE_AE_M BIT(10) +#define GLNVM_ULT_RESERVED_4_S 11 +#define GLNVM_ULT_RESERVED_4_M MAKEMASK(0x1FFFFF, 11) +#define GL_COTF_MARKER_STATUS 0x00200200 /* Reset Source: CORER */ +#define GL_COTF_MARKER_STATUS_MRKR_BUSY_S 0 +#define GL_COTF_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFF, 0) +#define GL_COTF_MARKER_TRIG_RCU_PRS(_i) (0x002001D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GL_COTF_MARKER_TRIG_RCU_PRS_MAX_INDEX 7 +#define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_S 0 +#define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(0) +#define GL_PRS_MARKER_ERROR 0x00200204 /* Reset Source: CORER */ +#define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_S 0 +#define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_M BIT(0) +#define GL_PRS_MARKER_ERROR_QH_CFG_ERR_S 1 +#define GL_PRS_MARKER_ERROR_QH_CFG_ERR_M BIT(1) +#define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_S 2 +#define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_M BIT(2) +#define GL_PRS_RX_PIPE_INIT0(_i) (0x0020000C + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ +#define GL_PRS_RX_PIPE_INIT0_MAX_INDEX 6 +#define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_S 0 +#define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_RX_PIPE_INIT1 0x00200028 /* Reset Source: CORER */ +#define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_S 0 +#define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_RX_PIPE_INIT2 0x0020002C /* Reset Source: CORER */ +#define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_S 0 +#define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_RX_SIZE_CTRL 0x00200004 /* Reset Source: CORER */ +#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_S 0 +#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0) +#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_S 15 +#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15) +#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_S 16 +#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16) +#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_S 31 +#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31) +#define GL_PRS_TX_PIPE_INIT0(_i) (0x00202018 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ +#define GL_PRS_TX_PIPE_INIT0_MAX_INDEX 6 +#define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_S 0 +#define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_TX_PIPE_INIT1 0x00202034 /* Reset Source: CORER */ +#define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_S 0 +#define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_TX_PIPE_INIT2 0x00202038 /* Reset Source: CORER */ +#define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_S 0 +#define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0) +#define GL_PRS_TX_SIZE_CTRL 0x00202014 /* Reset Source: CORER */ +#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_S 0 +#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0) +#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_S 15 +#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15) +#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_S 16 +#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16) +#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_S 31 +#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31) +#define GL_QH_MARKER_STATUS 0x002001FC /* Reset Source: CORER */ +#define GL_QH_MARKER_STATUS_MRKR_BUSY_S 0 +#define GL_QH_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xF, 0) +#define GL_QH_MARKER_TRIG_RCU_PRS(_i) (0x002001C4 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GL_QH_MARKER_TRIG_RCU_PRS_MAX_INDEX 3 +#define GL_QH_MARKER_TRIG_RCU_PRS_QPID_S 0 +#define GL_QH_MARKER_TRIG_RCU_PRS_QPID_M MAKEMASK(0x3FFFF, 0) +#define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_S 18 +#define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_M MAKEMASK(0xFF, 18) +#define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_S 26 +#define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 26) +#define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_S 31 +#define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(31) +#define GL_RPRS_ANA_CSR_CTRL 0x00200708 /* Reset Source: CORER */ +#define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_S 0 +#define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0) +#define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1 +#define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1) +#define GL_TPRS_ANA_CSR_CTRL 0x00202100 /* Reset Source: CORER */ +#define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_S 0 +#define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0) +#define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1 +#define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1) +#define GL_TPRS_MNG_PM_THR 0x00202004 /* Reset Source: CORER */ +#define GL_TPRS_MNG_PM_THR_MNG_PM_THR_S 0 +#define GL_TPRS_MNG_PM_THR_MNG_PM_THR_M MAKEMASK(0x3FFF, 0) +#define GL_TPRS_PM_CNT(_i) (0x00202008 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GL_TPRS_PM_CNT_MAX_INDEX 1 +#define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_S 0 +#define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_M MAKEMASK(0x3FFF, 0) +#define GL_TPRS_PM_THR 0x00202000 /* Reset Source: CORER */ +#define GL_TPRS_PM_THR_PM_THR_S 0 +#define GL_TPRS_PM_THR_PM_THR_M MAKEMASK(0x3FFF, 0) +#define GL_XLR_MARKER_LOG_RCU_PRS(_i) (0x00200208 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_XLR_MARKER_LOG_RCU_PRS_MAX_INDEX 63 +#define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_S 0 +#define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_XLR_MARKER_STATUS(_i) (0x002001F4 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GL_XLR_MARKER_STATUS_MAX_INDEX 1 +#define GL_XLR_MARKER_STATUS_MRKR_BUSY_S 0 +#define GL_XLR_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_XLR_MARKER_TRIG_PE 0x005008C0 /* Reset Source: CORER */ +#define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_S 0 +#define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_M MAKEMASK(0x3FF, 0) +#define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_S 10 +#define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_M MAKEMASK(0x3, 10) +#define GL_XLR_MARKER_TRIG_PE_PF_NUM_S 12 +#define GL_XLR_MARKER_TRIG_PE_PF_NUM_M MAKEMASK(0x7, 12) +#define GL_XLR_MARKER_TRIG_PE_PORT_NUM_S 16 +#define GL_XLR_MARKER_TRIG_PE_PORT_NUM_M MAKEMASK(0x7, 16) +#define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */ +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0 +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M MAKEMASK(0x3FF, 0) +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10 +#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M MAKEMASK(0x3, 10) +#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12 +#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M MAKEMASK(0x7, 12) +#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16 +#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 16) +#define GL_CLKGATE_EVENTS 0x0009DE70 /* Reset Source: PERST */ +#define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_S 0 +#define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 0) +#define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_S 16 +#define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 16) +#define GLPCI_BYTCTH_NP_C 0x000BFDA8 /* Reset Source: PCIR */ +#define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_S 0 +#define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_BYTCTH_P 0x0009E970 /* Reset Source: PCIR */ +#define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_S 0 +#define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_BYTCTL_NP_C 0x000BFDAC /* Reset Source: PCIR */ +#define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_S 0 +#define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_BYTCTL_P 0x0009E994 /* Reset Source: PCIR */ +#define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_S 0 +#define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_CAPCTRL 0x0009DE88 /* Reset Source: PCIR */ +#define GLPCI_CAPCTRL_VPD_EN_S 0 +#define GLPCI_CAPCTRL_VPD_EN_M BIT(0) +#define GLPCI_CAPSUP 0x0009DE8C /* Reset Source: PCIR */ +#define GLPCI_CAPSUP_PCIE_VER_S 0 +#define GLPCI_CAPSUP_PCIE_VER_M BIT(0) +#define GLPCI_CAPSUP_RESERVED_2_S 1 +#define GLPCI_CAPSUP_RESERVED_2_M BIT(1) +#define GLPCI_CAPSUP_LTR_EN_S 2 +#define GLPCI_CAPSUP_LTR_EN_M BIT(2) +#define GLPCI_CAPSUP_TPH_EN_S 3 +#define GLPCI_CAPSUP_TPH_EN_M BIT(3) +#define GLPCI_CAPSUP_ARI_EN_S 4 +#define GLPCI_CAPSUP_ARI_EN_M BIT(4) +#define GLPCI_CAPSUP_IOV_EN_S 5 +#define GLPCI_CAPSUP_IOV_EN_M BIT(5) +#define GLPCI_CAPSUP_ACS_EN_S 6 +#define GLPCI_CAPSUP_ACS_EN_M BIT(6) +#define GLPCI_CAPSUP_SEC_EN_S 7 +#define GLPCI_CAPSUP_SEC_EN_M BIT(7) +#define GLPCI_CAPSUP_PASID_EN_S 8 +#define GLPCI_CAPSUP_PASID_EN_M BIT(8) +#define GLPCI_CAPSUP_DLFE_EN_S 9 +#define GLPCI_CAPSUP_DLFE_EN_M BIT(9) +#define GLPCI_CAPSUP_GEN4_EXT_EN_S 10 +#define GLPCI_CAPSUP_GEN4_EXT_EN_M BIT(10) +#define GLPCI_CAPSUP_GEN4_MARG_EN_S 11 +#define GLPCI_CAPSUP_GEN4_MARG_EN_M BIT(11) +#define GLPCI_CAPSUP_ECRC_GEN_EN_S 16 +#define GLPCI_CAPSUP_ECRC_GEN_EN_M BIT(16) +#define GLPCI_CAPSUP_ECRC_CHK_EN_S 17 +#define GLPCI_CAPSUP_ECRC_CHK_EN_M BIT(17) +#define GLPCI_CAPSUP_IDO_EN_S 18 +#define GLPCI_CAPSUP_IDO_EN_M BIT(18) +#define GLPCI_CAPSUP_MSI_MASK_S 19 +#define GLPCI_CAPSUP_MSI_MASK_M BIT(19) +#define GLPCI_CAPSUP_CSR_CONF_EN_S 20 +#define GLPCI_CAPSUP_CSR_CONF_EN_M BIT(20) +#define GLPCI_CAPSUP_WAKUP_EN_S 21 +#define GLPCI_CAPSUP_WAKUP_EN_M BIT(21) +#define GLPCI_CAPSUP_LOAD_SUBSYS_ID_S 30 +#define GLPCI_CAPSUP_LOAD_SUBSYS_ID_M BIT(30) +#define GLPCI_CAPSUP_LOAD_DEV_ID_S 31 +#define GLPCI_CAPSUP_LOAD_DEV_ID_M BIT(31) +#define GLPCI_CNF 0x0009DEA0 /* Reset Source: POR */ +#define GLPCI_CNF_FLEX10_S 1 +#define GLPCI_CNF_FLEX10_M BIT(1) +#define GLPCI_CNF_WAKE_PIN_EN_S 2 +#define GLPCI_CNF_WAKE_PIN_EN_M BIT(2) +#define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_S 3 +#define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_M BIT(3) +#define GLPCI_CNF2 0x000BE004 /* Reset Source: PCIR */ +#define GLPCI_CNF2_RO_DIS_S 0 +#define GLPCI_CNF2_RO_DIS_M BIT(0) +#define GLPCI_CNF2_CACHELINE_SIZE_S 1 +#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) +#define GLPCI_DREVID 0x0009E9AC /* Reset Source: PCIR */ +#define GLPCI_DREVID_DEFAULT_REVID_S 0 +#define GLPCI_DREVID_DEFAULT_REVID_M MAKEMASK(0xFF, 0) +#define GLPCI_GSCL_1_NP_C 0x000BFDA4 /* Reset Source: PCIR */ +#define GLPCI_GSCL_1_NP_C_RT_MODE_S 8 +#define GLPCI_GSCL_1_NP_C_RT_MODE_M BIT(8) +#define GLPCI_GSCL_1_NP_C_RT_EVENT_S 9 +#define GLPCI_GSCL_1_NP_C_RT_EVENT_M MAKEMASK(0x1F, 9) +#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_S 14 +#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_M BIT(14) +#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_S 15 +#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_M MAKEMASK(0x1F, 15) +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_S 29 +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_M BIT(29) +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_S 30 +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_M BIT(30) +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_S 31 +#define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_M BIT(31) +#define GLPCI_GSCL_1_P 0x0009E9B4 /* Reset Source: PCIR */ +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_S 0 +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_M BIT(0) +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_S 1 +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_M BIT(1) +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_S 2 +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_M BIT(2) +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_S 3 +#define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_M BIT(3) +#define GLPCI_GSCL_1_P_LBC_ENABLE_0_S 4 +#define GLPCI_GSCL_1_P_LBC_ENABLE_0_M BIT(4) +#define GLPCI_GSCL_1_P_LBC_ENABLE_1_S 5 +#define GLPCI_GSCL_1_P_LBC_ENABLE_1_M BIT(5) +#define GLPCI_GSCL_1_P_LBC_ENABLE_2_S 6 +#define GLPCI_GSCL_1_P_LBC_ENABLE_2_M BIT(6) +#define GLPCI_GSCL_1_P_LBC_ENABLE_3_S 7 +#define GLPCI_GSCL_1_P_LBC_ENABLE_3_M BIT(7) +#define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_S 14 +#define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_M BIT(14) +#define GLPCI_GSCL_1_P_GIO_64_BIT_EN_S 28 +#define GLPCI_GSCL_1_P_GIO_64_BIT_EN_M BIT(28) +#define GLPCI_GSCL_1_P_GIO_COUNT_RESET_S 29 +#define GLPCI_GSCL_1_P_GIO_COUNT_RESET_M BIT(29) +#define GLPCI_GSCL_1_P_GIO_COUNT_STOP_S 30 +#define GLPCI_GSCL_1_P_GIO_COUNT_STOP_M BIT(30) +#define GLPCI_GSCL_1_P_GIO_COUNT_START_S 31 +#define GLPCI_GSCL_1_P_GIO_COUNT_START_M BIT(31) +#define GLPCI_GSCL_2 0x0009E998 /* Reset Source: PCIR */ +#define GLPCI_GSCL_2_GIO_EVENT_NUM_0_S 0 +#define GLPCI_GSCL_2_GIO_EVENT_NUM_0_M MAKEMASK(0xFF, 0) +#define GLPCI_GSCL_2_GIO_EVENT_NUM_1_S 8 +#define GLPCI_GSCL_2_GIO_EVENT_NUM_1_M MAKEMASK(0xFF, 8) +#define GLPCI_GSCL_2_GIO_EVENT_NUM_2_S 16 +#define GLPCI_GSCL_2_GIO_EVENT_NUM_2_M MAKEMASK(0xFF, 16) +#define GLPCI_GSCL_2_GIO_EVENT_NUM_3_S 24 +#define GLPCI_GSCL_2_GIO_EVENT_NUM_3_M MAKEMASK(0xFF, 24) +#define GLPCI_GSCL_5_8(_i) (0x0009E954 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */ +#define GLPCI_GSCL_5_8_MAX_INDEX 3 +#define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_S 0 +#define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_M MAKEMASK(0xFFFF, 0) +#define GLPCI_GSCL_5_8_LBC_TIMER_N_S 16 +#define GLPCI_GSCL_5_8_LBC_TIMER_N_M MAKEMASK(0xFFFF, 16) +#define GLPCI_GSCN_0_3(_i) (0x0009E99C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */ +#define GLPCI_GSCN_0_3_MAX_INDEX 3 +#define GLPCI_GSCN_0_3_EVENT_COUNTER_S 0 +#define GLPCI_GSCN_0_3_EVENT_COUNTER_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_LATCT_NP_C 0x000BFDA0 /* Reset Source: PCIR */ +#define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_S 0 +#define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_LBARCTRL 0x0009DE74 /* Reset Source: POR */ +#define GLPCI_LBARCTRL_PREFBAR_S 0 +#define GLPCI_LBARCTRL_PREFBAR_M BIT(0) +#define GLPCI_LBARCTRL_BAR32_S 1 +#define GLPCI_LBARCTRL_BAR32_M BIT(1) +#define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_S 2 +#define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_M BIT(2) +#define GLPCI_LBARCTRL_FLASH_EXPOSE_S 3 +#define GLPCI_LBARCTRL_FLASH_EXPOSE_M BIT(3) +#define GLPCI_LBARCTRL_PE_DB_SIZE_S 4 +#define GLPCI_LBARCTRL_PE_DB_SIZE_M MAKEMASK(0x3, 4) +#define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_S 9 +#define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_M BIT(9) +#define GLPCI_LBARCTRL_EXROM_SIZE_S 11 +#define GLPCI_LBARCTRL_EXROM_SIZE_M MAKEMASK(0x7, 11) +#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_S 14 +#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_M MAKEMASK(0x3, 14) +#define GLPCI_LINKCAP 0x0009DE90 /* Reset Source: PCIR */ +#define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_S 0 +#define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_M MAKEMASK(0x3F, 0) +#define GLPCI_LINKCAP_MAX_LINK_WIDTH_S 9 +#define GLPCI_LINKCAP_MAX_LINK_WIDTH_M MAKEMASK(0xF, 9) +#define GLPCI_NPQ_CFG 0x000BFD80 /* Reset Source: PCIR */ +#define GLPCI_NPQ_CFG_EXTEND_TO_S 0 +#define GLPCI_NPQ_CFG_EXTEND_TO_M BIT(0) +#define GLPCI_NPQ_CFG_SMALL_TO_S 1 +#define GLPCI_NPQ_CFG_SMALL_TO_M BIT(1) +#define GLPCI_NPQ_CFG_WEIGHT_AVG_S 2 +#define GLPCI_NPQ_CFG_WEIGHT_AVG_M MAKEMASK(0xF, 2) +#define GLPCI_NPQ_CFG_NPQ_SPARE_S 6 +#define GLPCI_NPQ_CFG_NPQ_SPARE_M MAKEMASK(0x3FF, 6) +#define GLPCI_NPQ_CFG_NPQ_ERR_STAT_S 16 +#define GLPCI_NPQ_CFG_NPQ_ERR_STAT_M MAKEMASK(0xF, 16) +#define GLPCI_PKTCT_NP_C 0x000BFD9C /* Reset Source: PCIR */ +#define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_S 0 +#define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_PKTCT_P 0x0009E9B0 /* Reset Source: PCIR */ +#define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_S 0 +#define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_PMSUP 0x0009DE94 /* Reset Source: PCIR */ +#define GLPCI_PMSUP_RESERVED_0_S 0 +#define GLPCI_PMSUP_RESERVED_0_M MAKEMASK(0x3, 0) +#define GLPCI_PMSUP_RESERVED_1_S 2 +#define GLPCI_PMSUP_RESERVED_1_M MAKEMASK(0x7, 2) +#define GLPCI_PMSUP_RESERVED_2_S 5 +#define GLPCI_PMSUP_RESERVED_2_M MAKEMASK(0x7, 5) +#define GLPCI_PMSUP_L0S_ACC_LAT_S 8 +#define GLPCI_PMSUP_L0S_ACC_LAT_M MAKEMASK(0x7, 8) +#define GLPCI_PMSUP_L1_ACC_LAT_S 11 +#define GLPCI_PMSUP_L1_ACC_LAT_M MAKEMASK(0x7, 11) +#define GLPCI_PMSUP_RESERVED_3_S 14 +#define GLPCI_PMSUP_RESERVED_3_M BIT(14) +#define GLPCI_PMSUP_OBFF_SUP_S 15 +#define GLPCI_PMSUP_OBFF_SUP_M MAKEMASK(0x3, 15) +#define GLPCI_PUSH_PE_IF_TO_STATUS 0x0009DF44 /* Reset Source: PCIR */ +#define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_S 0 +#define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_M BIT(0) +#define GLPCI_PWRDATA 0x0009DE7C /* Reset Source: PCIR */ +#define GLPCI_PWRDATA_D0_POWER_S 0 +#define GLPCI_PWRDATA_D0_POWER_M MAKEMASK(0xFF, 0) +#define GLPCI_PWRDATA_COMM_POWER_S 8 +#define GLPCI_PWRDATA_COMM_POWER_M MAKEMASK(0xFF, 8) +#define GLPCI_PWRDATA_D3_POWER_S 16 +#define GLPCI_PWRDATA_D3_POWER_M MAKEMASK(0xFF, 16) +#define GLPCI_PWRDATA_DATA_SCALE_S 24 +#define GLPCI_PWRDATA_DATA_SCALE_M MAKEMASK(0x3, 24) +#define GLPCI_REVID 0x0009DE98 /* Reset Source: PCIR */ +#define GLPCI_REVID_NVM_REVID_S 0 +#define GLPCI_REVID_NVM_REVID_M MAKEMASK(0xFF, 0) +#define GLPCI_SERH 0x0009DE84 /* Reset Source: PCIR */ +#define GLPCI_SERH_SER_NUM_H_S 0 +#define GLPCI_SERH_SER_NUM_H_M MAKEMASK(0xFFFF, 0) +#define GLPCI_SERL 0x0009DE80 /* Reset Source: PCIR */ +#define GLPCI_SERL_SER_NUM_L_S 0 +#define GLPCI_SERL_SER_NUM_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPCI_SUBVENID 0x0009DEE8 /* Reset Source: PCIR */ +#define GLPCI_SUBVENID_SUB_VEN_ID_S 0 +#define GLPCI_SUBVENID_SUB_VEN_ID_M MAKEMASK(0xFFFF, 0) +#define GLPCI_UPADD 0x000BE0D4 /* Reset Source: PCIR */ +#define GLPCI_UPADD_ADDRESS_S 1 +#define GLPCI_UPADD_ADDRESS_M MAKEMASK(0x7FFFFFFF, 1) +#define GLPCI_VENDORID 0x0009DEC8 /* Reset Source: PCIR */ +#define GLPCI_VENDORID_VENDORID_S 0 +#define GLPCI_VENDORID_VENDORID_M MAKEMASK(0xFFFF, 0) +#define GLPCI_VFSUP 0x0009DE9C /* Reset Source: PCIR */ +#define GLPCI_VFSUP_VF_PREFETCH_S 0 +#define GLPCI_VFSUP_VF_PREFETCH_M BIT(0) +#define GLPCI_VFSUP_VR_BAR_TYPE_S 1 +#define GLPCI_VFSUP_VR_BAR_TYPE_M BIT(1) +#define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90 /* Reset Source: PCIR */ +#define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_S 0 +#define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_M MAKEMASK(0xFFFF, 0) +#define PF_FUNC_RID 0x0009E880 /* Reset Source: PCIR */ +#define PF_FUNC_RID_FUNCTION_NUMBER_S 0 +#define PF_FUNC_RID_FUNCTION_NUMBER_M MAKEMASK(0x7, 0) +#define PF_FUNC_RID_DEVICE_NUMBER_S 3 +#define PF_FUNC_RID_DEVICE_NUMBER_M MAKEMASK(0x1F, 3) +#define PF_FUNC_RID_BUS_NUMBER_S 8 +#define PF_FUNC_RID_BUS_NUMBER_M MAKEMASK(0xFF, 8) +#define PF_PCI_CIAA 0x0009E580 /* Reset Source: FLR */ +#define PF_PCI_CIAA_ADDRESS_S 0 +#define PF_PCI_CIAA_ADDRESS_M MAKEMASK(0xFFF, 0) +#define PF_PCI_CIAA_VF_NUM_S 12 +#define PF_PCI_CIAA_VF_NUM_M MAKEMASK(0xFF, 12) +#define PF_PCI_CIAD 0x0009E500 /* Reset Source: FLR */ +#define PF_PCI_CIAD_DATA_S 0 +#define PF_PCI_CIAD_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPCI_CLASS 0x0009DB00 /* Reset Source: PCIR */ +#define PFPCI_CLASS_STORAGE_CLASS_S 0 +#define PFPCI_CLASS_STORAGE_CLASS_M BIT(0) +#define PFPCI_CLASS_PF_IS_LAN_S 2 +#define PFPCI_CLASS_PF_IS_LAN_M BIT(2) +#define PFPCI_CNF 0x0009DF00 /* Reset Source: PCIR */ +#define PFPCI_CNF_MSI_EN_S 2 +#define PFPCI_CNF_MSI_EN_M BIT(2) +#define PFPCI_CNF_EXROM_DIS_S 3 +#define PFPCI_CNF_EXROM_DIS_M BIT(3) +#define PFPCI_CNF_IO_BAR_S 4 +#define PFPCI_CNF_IO_BAR_M BIT(4) +#define PFPCI_CNF_INT_PIN_S 5 +#define PFPCI_CNF_INT_PIN_M MAKEMASK(0x3, 5) +#define PFPCI_DEVID 0x0009DE00 /* Reset Source: PCIR */ +#define PFPCI_DEVID_PF_DEV_ID_S 0 +#define PFPCI_DEVID_PF_DEV_ID_M MAKEMASK(0xFFFF, 0) +#define PFPCI_DEVID_VF_DEV_ID_S 16 +#define PFPCI_DEVID_VF_DEV_ID_M MAKEMASK(0xFFFF, 16) +#define PFPCI_FACTPS 0x0009E900 /* Reset Source: FLR */ +#define PFPCI_FACTPS_FUNC_POWER_STATE_S 0 +#define PFPCI_FACTPS_FUNC_POWER_STATE_M MAKEMASK(0x3, 0) +#define PFPCI_FACTPS_FUNC_AUX_EN_S 3 +#define PFPCI_FACTPS_FUNC_AUX_EN_M BIT(3) +#define PFPCI_FUNC 0x0009D980 /* Reset Source: POR */ +#define PFPCI_FUNC_FUNC_DIS_S 0 +#define PFPCI_FUNC_FUNC_DIS_M BIT(0) +#define PFPCI_FUNC_ALLOW_FUNC_DIS_S 1 +#define PFPCI_FUNC_ALLOW_FUNC_DIS_M BIT(1) +#define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_S 2 +#define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_M BIT(2) +#define PFPCI_PF_FLUSH_DONE 0x0009E400 /* Reset Source: PCIR */ +#define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_S 0 +#define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_M BIT(0) +#define PFPCI_PM 0x0009DA80 /* Reset Source: POR */ +#define PFPCI_PM_PME_EN_S 0 +#define PFPCI_PM_PME_EN_M BIT(0) +#define PFPCI_STATUS1 0x0009DA00 /* Reset Source: POR */ +#define PFPCI_STATUS1_FUNC_VALID_S 0 +#define PFPCI_STATUS1_FUNC_VALID_M BIT(0) +#define PFPCI_SUBSYSID 0x0009D880 /* Reset Source: PCIR */ +#define PFPCI_SUBSYSID_PF_SUBSYS_ID_S 0 +#define PFPCI_SUBSYSID_PF_SUBSYS_ID_M MAKEMASK(0xFFFF, 0) +#define PFPCI_SUBSYSID_VF_SUBSYS_ID_S 16 +#define PFPCI_SUBSYSID_VF_SUBSYS_ID_M MAKEMASK(0xFFFF, 16) +#define PFPCI_VF_FLUSH_DONE(_VF) (0x0009E000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */ +#define PFPCI_VF_FLUSH_DONE_MAX_INDEX 255 +#define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_S 0 +#define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_M BIT(0) +#define PFPCI_VM_FLUSH_DONE 0x0009E480 /* Reset Source: PCIR */ +#define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_S 0 +#define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_M BIT(0) +#define PFPCI_VMINDEX 0x0009E600 /* Reset Source: PCIR */ +#define PFPCI_VMINDEX_VMINDEX_S 0 +#define PFPCI_VMINDEX_VMINDEX_M MAKEMASK(0x3FF, 0) +#define PFPCI_VMPEND 0x0009E800 /* Reset Source: PCIR */ +#define PFPCI_VMPEND_PENDING_S 0 +#define PFPCI_VMPEND_PENDING_M BIT(0) +#define PQ_FIFO_STATUS 0x0009DF40 /* Reset Source: PCIR */ +#define PQ_FIFO_STATUS_PQ_FIFO_COUNT_S 0 +#define PQ_FIFO_STATUS_PQ_FIFO_COUNT_M MAKEMASK(0x7FFFFFFF, 0) +#define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_S 31 +#define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_M BIT(31) +#define GLPE_CPUSTATUS0 0x0050BA5C /* Reset Source: CORER */ +#define GLPE_CPUSTATUS0_PECPUSTATUS0_S 0 +#define GLPE_CPUSTATUS0_PECPUSTATUS0_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPE_CPUSTATUS1 0x0050BA60 /* Reset Source: CORER */ +#define GLPE_CPUSTATUS1_PECPUSTATUS1_S 0 +#define GLPE_CPUSTATUS1_PECPUSTATUS1_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPE_CPUSTATUS2 0x0050BA64 /* Reset Source: CORER */ +#define GLPE_CPUSTATUS2_PECPUSTATUS2_S 0 +#define GLPE_CPUSTATUS2_PECPUSTATUS2_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPE_MDQ_BASE(_i) (0x00536000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLPE_MDQ_BASE_MAX_INDEX 511 +#define GLPE_MDQ_BASE_MDOC_INDEX_S 0 +#define GLPE_MDQ_BASE_MDOC_INDEX_M MAKEMASK(0xFFFFFFF, 0) +#define GLPE_MDQ_PTR(_i) (0x00537000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLPE_MDQ_PTR_MAX_INDEX 511 +#define GLPE_MDQ_PTR_MDQ_HEAD_S 0 +#define GLPE_MDQ_PTR_MDQ_HEAD_M MAKEMASK(0x3FFF, 0) +#define GLPE_MDQ_PTR_MDQ_TAIL_S 16 +#define GLPE_MDQ_PTR_MDQ_TAIL_M MAKEMASK(0x3FFF, 16) +#define GLPE_MDQ_SIZE(_i) (0x00536800 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLPE_MDQ_SIZE_MAX_INDEX 511 +#define GLPE_MDQ_SIZE_MDQ_SIZE_S 0 +#define GLPE_MDQ_SIZE_MDQ_SIZE_M MAKEMASK(0x3FFF, 0) +#define GLPE_PEPM_CTRL 0x0050C000 /* Reset Source: PERST */ +#define GLPE_PEPM_CTRL_PEPM_ENABLE_S 0 +#define GLPE_PEPM_CTRL_PEPM_ENABLE_M BIT(0) +#define GLPE_PEPM_CTRL_PEPM_HALT_S 8 +#define GLPE_PEPM_CTRL_PEPM_HALT_M BIT(8) +#define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_S 16 +#define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_M MAKEMASK(0xFF, 16) +#define GLPE_PEPM_DEALLOC 0x0050C004 /* Reset Source: PERST */ +#define GLPE_PEPM_DEALLOC_MDQ_CREDITS_S 0 +#define GLPE_PEPM_DEALLOC_MDQ_CREDITS_M MAKEMASK(0x3FFF, 0) +#define GLPE_PEPM_DEALLOC_PSQ_CREDITS_S 14 +#define GLPE_PEPM_DEALLOC_PSQ_CREDITS_M MAKEMASK(0x1F, 14) +#define GLPE_PEPM_DEALLOC_PQID_S 19 +#define GLPE_PEPM_DEALLOC_PQID_M MAKEMASK(0x1FF, 19) +#define GLPE_PEPM_DEALLOC_PORT_S 28 +#define GLPE_PEPM_DEALLOC_PORT_M MAKEMASK(0x7, 28) +#define GLPE_PEPM_DEALLOC_DEALLOC_RDY_S 31 +#define GLPE_PEPM_DEALLOC_DEALLOC_RDY_M BIT(31) +#define GLPE_PEPM_PSQ_COUNT 0x0050C020 /* Reset Source: PERST */ +#define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_S 0 +#define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PEPM_THRESH(_i) (0x0050C840 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */ +#define GLPE_PEPM_THRESH_MAX_INDEX 511 +#define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_S 0 +#define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_M MAKEMASK(0x1F, 0) +#define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_S 16 +#define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_M MAKEMASK(0x3FFF, 16) +#define GLPE_PFAEQEDROPCNT(_i) (0x00503240 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFAEQEDROPCNT_MAX_INDEX 7 +#define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_S 0 +#define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFCEQEDROPCNT(_i) (0x00503220 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFCEQEDROPCNT_MAX_INDEX 7 +#define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_S 0 +#define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFCQEDROPCNT(_i) (0x00503200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFCQEDROPCNT_MAX_INDEX 7 +#define GLPE_PFCQEDROPCNT_CQEDROPCNT_S 0 +#define GLPE_PFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFFLMOOISCALLOCERR(_i) (0x0050B960 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFFLMOOISCALLOCERR_MAX_INDEX 7 +#define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_S 0 +#define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFFLMQ1ALLOCERR(_i) (0x0050B920 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFFLMQ1ALLOCERR_MAX_INDEX 7 +#define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_S 0 +#define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFFLMRRFALLOCERR(_i) (0x0050B940 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFFLMRRFALLOCERR_MAX_INDEX 7 +#define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_S 0 +#define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFFLMXMITALLOCERR(_i) (0x0050B900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFFLMXMITALLOCERR_MAX_INDEX 7 +#define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_S 0 +#define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_PFTCPNOW50USCNT(_i) (0x0050B8C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPE_PFTCPNOW50USCNT_MAX_INDEX 7 +#define GLPE_PFTCPNOW50USCNT_CNT_S 0 +#define GLPE_PFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPE_PUSH_PEPM 0x0053241C /* Reset Source: CORER */ +#define GLPE_PUSH_PEPM_MDQ_CREDITS_S 0 +#define GLPE_PUSH_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0) +#define GLPE_VFAEQEDROPCNT(_i) (0x00503100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_S 0 +#define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFCEQEDROPCNT(_i) (0x00503080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_S 0 +#define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFCQEDROPCNT(_i) (0x00503000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define GLPE_VFCQEDROPCNT_CQEDROPCNT_S 0 +#define GLPE_VFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFFLMOOISCALLOCERR(_i) (0x0050B580 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFFLMOOISCALLOCERR_MAX_INDEX 31 +#define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_S 0 +#define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFFLMQ1ALLOCERR(_i) (0x0050B480 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_S 0 +#define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFFLMRRFALLOCERR(_i) (0x0050B500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFFLMRRFALLOCERR_MAX_INDEX 31 +#define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_S 0 +#define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFFLMXMITALLOCERR(_i) (0x0050B400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_S 0 +#define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0) +#define GLPE_VFTCPNOW50USCNT(_i) (0x0050B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: PE_CORER */ +#define GLPE_VFTCPNOW50USCNT_MAX_INDEX 31 +#define GLPE_VFTCPNOW50USCNT_CNT_S 0 +#define GLPE_VFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPE_AEQALLOC 0x00502D00 /* Reset Source: PFR */ +#define PFPE_AEQALLOC_AECOUNT_S 0 +#define PFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPE_CCQPHIGH 0x0050A100 /* Reset Source: PFR */ +#define PFPE_CCQPHIGH_PECCQPHIGH_S 0 +#define PFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPE_CCQPLOW 0x0050A080 /* Reset Source: PFR */ +#define PFPE_CCQPLOW_PECCQPLOW_S 0 +#define PFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPE_CCQPSTATUS 0x0050A000 /* Reset Source: PFR */ +#define PFPE_CCQPSTATUS_CCQP_DONE_S 0 +#define PFPE_CCQPSTATUS_CCQP_DONE_M BIT(0) +#define PFPE_CCQPSTATUS_HMC_PROFILE_S 4 +#define PFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4) +#define PFPE_CCQPSTATUS_RDMA_EN_VFS_S 16 +#define PFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) +#define PFPE_CCQPSTATUS_CCQP_ERR_S 31 +#define PFPE_CCQPSTATUS_CCQP_ERR_M BIT(31) +#define PFPE_CQACK 0x00502C80 /* Reset Source: PFR */ +#define PFPE_CQACK_PECQID_S 0 +#define PFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0) +#define PFPE_CQARM 0x00502C00 /* Reset Source: PFR */ +#define PFPE_CQARM_PECQID_S 0 +#define PFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0) +#define PFPE_CQPDB 0x00500800 /* Reset Source: PFR */ +#define PFPE_CQPDB_WQHEAD_S 0 +#define PFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0) +#define PFPE_CQPERRCODES 0x0050A200 /* Reset Source: PFR */ +#define PFPE_CQPERRCODES_CQP_MINOR_CODE_S 0 +#define PFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) +#define PFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16 +#define PFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) +#define PFPE_CQPTAIL 0x00500880 /* Reset Source: PFR */ +#define PFPE_CQPTAIL_WQTAIL_S 0 +#define PFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0) +#define PFPE_CQPTAIL_CQP_OP_ERR_S 31 +#define PFPE_CQPTAIL_CQP_OP_ERR_M BIT(31) +#define PFPE_IPCONFIG0 0x0050A180 /* Reset Source: PFR */ +#define PFPE_IPCONFIG0_PEIPID_S 0 +#define PFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0) +#define PFPE_IPCONFIG0_USEENTIREIDRANGE_S 16 +#define PFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16) +#define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17 +#define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17) +#define PFPE_MRTEIDXMASK 0x0050A300 /* Reset Source: PFR */ +#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0 +#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) +#define PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */ +#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0 +#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) +#define PFPE_TCPNOWTIMER 0x0050A280 /* Reset Source: PFR */ +#define PFPE_TCPNOWTIMER_TCP_NOW_S 0 +#define PFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) +#define PFPE_WQEALLOC 0x00504400 /* Reset Source: PFR */ +#define PFPE_WQEALLOC_PEQPID_S 0 +#define PFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0) +#define PFPE_WQEALLOC_WQE_DESC_INDEX_S 20 +#define PFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) +#define PRT_PEPM_COUNT(_i) (0x0050C040 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */ +#define PRT_PEPM_COUNT_MAX_INDEX 511 +#define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_S 0 +#define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0x1F, 0) +#define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_S 16 +#define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_M MAKEMASK(0x3FFF, 16) +#define VFPE_AEQALLOC(_VF) (0x00502800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_AEQALLOC_MAX_INDEX 255 +#define VFPE_AEQALLOC_AECOUNT_S 0 +#define VFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPHIGH(_VF) (0x00508800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CCQPHIGH_MAX_INDEX 255 +#define VFPE_CCQPHIGH_PECCQPHIGH_S 0 +#define VFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPLOW(_VF) (0x00508400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CCQPLOW_MAX_INDEX 255 +#define VFPE_CCQPLOW_PECCQPLOW_S 0 +#define VFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPSTATUS(_VF) (0x00508000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CCQPSTATUS_MAX_INDEX 255 +#define VFPE_CCQPSTATUS_CCQP_DONE_S 0 +#define VFPE_CCQPSTATUS_CCQP_DONE_M BIT(0) +#define VFPE_CCQPSTATUS_HMC_PROFILE_S 4 +#define VFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4) +#define VFPE_CCQPSTATUS_RDMA_EN_VFS_S 16 +#define VFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) +#define VFPE_CCQPSTATUS_CCQP_ERR_S 31 +#define VFPE_CCQPSTATUS_CCQP_ERR_M BIT(31) +#define VFPE_CQACK(_VF) (0x00502400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CQACK_MAX_INDEX 255 +#define VFPE_CQACK_PECQID_S 0 +#define VFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0) +#define VFPE_CQARM(_VF) (0x00502000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CQARM_MAX_INDEX 255 +#define VFPE_CQARM_PECQID_S 0 +#define VFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0) +#define VFPE_CQPDB(_VF) (0x00500000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CQPDB_MAX_INDEX 255 +#define VFPE_CQPDB_WQHEAD_S 0 +#define VFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0) +#define VFPE_CQPERRCODES(_VF) (0x00509000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CQPERRCODES_MAX_INDEX 255 +#define VFPE_CQPERRCODES_CQP_MINOR_CODE_S 0 +#define VFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) +#define VFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16 +#define VFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) +#define VFPE_CQPTAIL(_VF) (0x00500400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_CQPTAIL_MAX_INDEX 255 +#define VFPE_CQPTAIL_WQTAIL_S 0 +#define VFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0) +#define VFPE_CQPTAIL_CQP_OP_ERR_S 31 +#define VFPE_CQPTAIL_CQP_OP_ERR_M BIT(31) +#define VFPE_IPCONFIG0(_VF) (0x00508C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_IPCONFIG0_MAX_INDEX 255 +#define VFPE_IPCONFIG0_PEIPID_S 0 +#define VFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0) +#define VFPE_IPCONFIG0_USEENTIREIDRANGE_S 16 +#define VFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16) +#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17 +#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17) +#define VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255 +#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0 +#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) +#define VFPE_TCPNOWTIMER(_VF) (0x00509400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_TCPNOWTIMER_MAX_INDEX 255 +#define VFPE_TCPNOWTIMER_TCP_NOW_S 0 +#define VFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_WQEALLOC(_VF) (0x00504000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_WQEALLOC_MAX_INDEX 255 +#define VFPE_WQEALLOC_PEQPID_S 0 +#define VFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0) +#define VFPE_WQEALLOC_WQE_DESC_INDEX_S 20 +#define VFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) +#define GLPES_PFIP4RXDISCARD(_i) (0x00541400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXDISCARD_MAX_INDEX 127 +#define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_S 0 +#define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXFRAGSHI(_i) (0x00541C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXFRAGSHI_MAX_INDEX 127 +#define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_S 0 +#define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4RXFRAGSLO(_i) (0x00541C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXFRAGSLO_MAX_INDEX 127 +#define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_S 0 +#define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXMCOCTSHI(_i) (0x00542404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_S 0 +#define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4RXMCOCTSLO(_i) (0x00542400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_S 0 +#define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXMCPKTSHI(_i) (0x00542C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_S 0 +#define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4RXMCPKTSLO(_i) (0x00542C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_S 0 +#define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXOCTSHI(_i) (0x00540404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_S 0 +#define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4RXOCTSLO(_i) (0x00540400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_S 0 +#define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXPKTSHI(_i) (0x00540C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_S 0 +#define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4RXPKTSLO(_i) (0x00540C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_S 0 +#define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4RXTRUNC(_i) (0x00541800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4RXTRUNC_MAX_INDEX 127 +#define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_S 0 +#define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4TXFRAGSHI(_i) (0x00547404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXFRAGSHI_MAX_INDEX 127 +#define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_S 0 +#define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4TXFRAGSLO(_i) (0x00547400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXFRAGSLO_MAX_INDEX 127 +#define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_S 0 +#define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4TXMCOCTSHI(_i) (0x00547C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_S 0 +#define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4TXMCOCTSLO(_i) (0x00547C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_S 0 +#define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4TXMCPKTSHI(_i) (0x00548404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_S 0 +#define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4TXMCPKTSLO(_i) (0x00548400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_S 0 +#define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4TXNOROUTE(_i) (0x0054B400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXNOROUTE_MAX_INDEX 127 +#define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_S 0 +#define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_PFIP4TXOCTSHI(_i) (0x00546404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_S 0 +#define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4TXOCTSLO(_i) (0x00546400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_S 0 +#define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP4TXPKTSHI(_i) (0x00546C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_S 0 +#define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP4TXPKTSLO(_i) (0x00546C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP4TXPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_S 0 +#define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXDISCARD(_i) (0x00544400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXDISCARD_MAX_INDEX 127 +#define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_S 0 +#define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXFRAGSHI(_i) (0x00544C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXFRAGSHI_MAX_INDEX 127 +#define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_S 0 +#define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6RXFRAGSLO(_i) (0x00544C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXFRAGSLO_MAX_INDEX 127 +#define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_S 0 +#define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXMCOCTSHI(_i) (0x00545404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_S 0 +#define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6RXMCOCTSLO(_i) (0x00545400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_S 0 +#define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXMCPKTSHI(_i) (0x00545C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_S 0 +#define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6RXMCPKTSLO(_i) (0x00545C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_S 0 +#define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXOCTSHI(_i) (0x00543404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_S 0 +#define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6RXOCTSLO(_i) (0x00543400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_S 0 +#define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXPKTSHI(_i) (0x00543C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_S 0 +#define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6RXPKTSLO(_i) (0x00543C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_S 0 +#define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6RXTRUNC(_i) (0x00544800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6RXTRUNC_MAX_INDEX 127 +#define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_S 0 +#define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6TXFRAGSHI(_i) (0x00549C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXFRAGSHI_MAX_INDEX 127 +#define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_S 0 +#define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6TXFRAGSLO(_i) (0x00549C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXFRAGSLO_MAX_INDEX 127 +#define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_S 0 +#define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6TXMCOCTSHI(_i) (0x0054A404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_S 0 +#define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6TXMCOCTSLO(_i) (0x0054A400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_S 0 +#define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6TXMCPKTSHI(_i) (0x0054AC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_S 0 +#define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6TXMCPKTSLO(_i) (0x0054AC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_S 0 +#define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6TXNOROUTE(_i) (0x0054B800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXNOROUTE_MAX_INDEX 127 +#define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_S 0 +#define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_PFIP6TXOCTSHI(_i) (0x00548C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXOCTSHI_MAX_INDEX 127 +#define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_S 0 +#define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6TXOCTSLO(_i) (0x00548C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXOCTSLO_MAX_INDEX 127 +#define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_S 0 +#define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFIP6TXPKTSHI(_i) (0x00549404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXPKTSHI_MAX_INDEX 127 +#define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_S 0 +#define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFIP6TXPKTSLO(_i) (0x00549400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFIP6TXPKTSLO_MAX_INDEX 127 +#define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_S 0 +#define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMARXRDSHI(_i) (0x0054EC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXRDSHI_MAX_INDEX 127 +#define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_S 0 +#define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMARXRDSLO(_i) (0x0054EC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXRDSLO_MAX_INDEX 127 +#define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_S 0 +#define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMARXSNDSHI(_i) (0x0054F404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXSNDSHI_MAX_INDEX 127 +#define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_S 0 +#define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMARXSNDSLO(_i) (0x0054F400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXSNDSLO_MAX_INDEX 127 +#define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_S 0 +#define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMARXWRSHI(_i) (0x0054E404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXWRSHI_MAX_INDEX 127 +#define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_S 0 +#define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMARXWRSLO(_i) (0x0054E400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMARXWRSLO_MAX_INDEX 127 +#define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_S 0 +#define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMATXRDSHI(_i) (0x00550404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXRDSHI_MAX_INDEX 127 +#define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_S 0 +#define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMATXRDSLO(_i) (0x00550400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXRDSLO_MAX_INDEX 127 +#define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_S 0 +#define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMATXSNDSHI(_i) (0x00550C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXSNDSHI_MAX_INDEX 127 +#define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_S 0 +#define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMATXSNDSLO(_i) (0x00550C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXSNDSLO_MAX_INDEX 127 +#define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_S 0 +#define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMATXWRSHI(_i) (0x0054FC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXWRSHI_MAX_INDEX 127 +#define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_S 0 +#define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMATXWRSLO(_i) (0x0054FC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMATXWRSLO_MAX_INDEX 127 +#define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_S 0 +#define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMAVBNDHI(_i) (0x00551404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMAVBNDHI_MAX_INDEX 127 +#define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_S 0 +#define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMAVBNDLO(_i) (0x00551400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMAVBNDLO_MAX_INDEX 127 +#define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_S 0 +#define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRDMAVINVHI(_i) (0x00551C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMAVINVHI_MAX_INDEX 127 +#define GLPES_PFRDMAVINVHI_RDMAVINVHI_S 0 +#define GLPES_PFRDMAVINVHI_RDMAVINVHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFRDMAVINVLO(_i) (0x00551C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRDMAVINVLO_MAX_INDEX 127 +#define GLPES_PFRDMAVINVLO_RDMAVINVLO_S 0 +#define GLPES_PFRDMAVINVLO_RDMAVINVLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFRXVLANERR(_i) (0x00540000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFRXVLANERR_MAX_INDEX 127 +#define GLPES_PFRXVLANERR_RXVLANERR_S 0 +#define GLPES_PFRXVLANERR_RXVLANERR_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_PFTCPRTXSEG(_i) (0x00552400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPRTXSEG_MAX_INDEX 127 +#define GLPES_PFTCPRTXSEG_TCPRTXSEG_S 0 +#define GLPES_PFTCPRTXSEG_TCPRTXSEG_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFTCPRXOPTERR(_i) (0x0054C400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPRXOPTERR_MAX_INDEX 127 +#define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_S 0 +#define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_PFTCPRXPROTOERR(_i) (0x0054C800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPRXPROTOERR_MAX_INDEX 127 +#define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_S 0 +#define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_PFTCPRXSEGSHI(_i) (0x0054BC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPRXSEGSHI_MAX_INDEX 127 +#define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_S 0 +#define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFTCPRXSEGSLO(_i) (0x0054BC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPRXSEGSLO_MAX_INDEX 127 +#define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_S 0 +#define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFTCPTXSEGHI(_i) (0x0054CC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPTXSEGHI_MAX_INDEX 127 +#define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_S 0 +#define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFTCPTXSEGLO(_i) (0x0054CC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFTCPTXSEGLO_MAX_INDEX 127 +#define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_S 0 +#define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFUDPRXPKTSHI(_i) (0x0054D404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFUDPRXPKTSHI_MAX_INDEX 127 +#define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_S 0 +#define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFUDPRXPKTSLO(_i) (0x0054D400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFUDPRXPKTSLO_MAX_INDEX 127 +#define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_S 0 +#define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_PFUDPTXPKTSHI(_i) (0x0054DC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFUDPTXPKTSHI_MAX_INDEX 127 +#define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_S 0 +#define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_M MAKEMASK(0xFFFF, 0) +#define GLPES_PFUDPTXPKTSLO(_i) (0x0054DC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLPES_PFUDPTXPKTSLO_MAX_INDEX 127 +#define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_S 0 +#define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_RDMARXMULTFPDUSHI 0x0055E00C /* Reset Source: CORER */ +#define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_S 0 +#define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_RDMARXMULTFPDUSLO 0x0055E008 /* Reset Source: CORER */ +#define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_S 0 +#define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_RDMARXOOODDPHI 0x0055E014 /* Reset Source: CORER */ +#define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_S 0 +#define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_RDMARXOOODDPLO 0x0055E010 /* Reset Source: CORER */ +#define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_S 0 +#define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_RDMARXOOONOMARK 0x0055E004 /* Reset Source: CORER */ +#define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_S 0 +#define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_RDMARXUNALIGN 0x0055E000 /* Reset Source: CORER */ +#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0 +#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */ +#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0 +#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */ +#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0 +#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */ +#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0 +#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPRXONEHOLELO 0x0055E020 /* Reset Source: CORER */ +#define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_S 0 +#define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPRXPUREACKHI 0x0055E01C /* Reset Source: CORER */ +#define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_S 0 +#define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPRXPUREACKSLO 0x0055E018 /* Reset Source: CORER */ +#define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_S 0 +#define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPRXTHREEHOLEHI 0x0055E034 /* Reset Source: CORER */ +#define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_S 0 +#define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPRXTHREEHOLELO 0x0055E030 /* Reset Source: CORER */ +#define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_S 0 +#define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPRXTWOHOLEHI 0x0055E02C /* Reset Source: CORER */ +#define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_S 0 +#define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPRXTWOHOLELO 0x0055E028 /* Reset Source: CORER */ +#define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_S 0 +#define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPTXRETRANSFASTHI 0x0055E044 /* Reset Source: CORER */ +#define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_S 0 +#define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPTXRETRANSFASTLO 0x0055E040 /* Reset Source: CORER */ +#define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_S 0 +#define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPTXTOUTSFASTHI 0x0055E04C /* Reset Source: CORER */ +#define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_S 0 +#define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPTXTOUTSFASTLO 0x0055E048 /* Reset Source: CORER */ +#define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_S 0 +#define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPES_TCPTXTOUTSHI 0x0055E054 /* Reset Source: CORER */ +#define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_S 0 +#define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_M MAKEMASK(0xFFFFFF, 0) +#define GLPES_TCPTXTOUTSLO 0x0055E050 /* Reset Source: CORER */ +#define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_S 0 +#define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_PWR_MODE_CTL 0x000B820C /* Reset Source: POR */ +#define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_S 0 +#define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_M BIT(0) +#define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_S 1 +#define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_M BIT(1) +#define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_S 2 +#define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_M BIT(2) +#define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_S 3 +#define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_M MAKEMASK(0x3, 3) +#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 +#define GL_PWR_MODE_CTL_CAR_MAX_BW_M MAKEMASK(0x3, 30) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT 0x000B825C /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 +#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT 0x000B8218 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 +#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT 0x000B8260 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15) +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_S 18 +#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK 0x000B8200 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK 0x000B81F0 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM 0x000B81FC /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL 0x000B81F8 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA 0x000B8208 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK 0x000B81F4 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK 0x000B8244 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK 0x000B8220 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM 0x000B8240 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL 0x000B823C /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA 0x000B8248 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK 0x000B8238 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK 0x000B8230 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK 0x000B821C /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM 0x000B822C /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL 0x000B8228 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA 0x000B8234 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK 0x000B8224 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL 0x000B81EC /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_S 0 +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_S 3 +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_S 6 +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_S 9 +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_S 12 +#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12) +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL 0x000B824C /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_S 0 +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_S 3 +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_S 6 +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_S 9 +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_S 12 +#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12) +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL 0x000B8250 /* Reset Source: POR */ +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_S 0 +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0) +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_S 3 +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3) +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_S 6 +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6) +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_S 9 +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9) +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_S 12 +#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12) +#define GL_S5_PWR_MODE_EXIT_CTL 0x000B8270 /* Reset Source: POR */ +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_S 0 +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_M BIT(0) +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_S 1 +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_M BIT(1) +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_S 3 +#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_M BIT(3) +#define GLGEN_PME_TO 0x000B81BC /* Reset Source: POR */ +#define GLGEN_PME_TO_PME_TO_FOR_PE_S 0 +#define GLGEN_PME_TO_PME_TO_FOR_PE_M BIT(0) +#define PRTPM_EEE_STAT 0x001E4320 /* Reset Source: GLOBR */ +#define PRTPM_EEE_STAT_EEE_NEG_S 29 +#define PRTPM_EEE_STAT_EEE_NEG_M BIT(29) +#define PRTPM_EEE_STAT_RX_LPI_STATUS_S 30 +#define PRTPM_EEE_STAT_RX_LPI_STATUS_M BIT(30) +#define PRTPM_EEE_STAT_TX_LPI_STATUS_S 31 +#define PRTPM_EEE_STAT_TX_LPI_STATUS_M BIT(31) +#define PRTPM_EEEC 0x001E4380 /* Reset Source: GLOBR */ +#define PRTPM_EEEC_TW_WAKE_MIN_S 16 +#define PRTPM_EEEC_TW_WAKE_MIN_M MAKEMASK(0x3F, 16) +#define PRTPM_EEEC_TX_LU_LPI_DLY_S 24 +#define PRTPM_EEEC_TX_LU_LPI_DLY_M MAKEMASK(0x3, 24) +#define PRTPM_EEEC_TEEE_DLY_S 26 +#define PRTPM_EEEC_TEEE_DLY_M MAKEMASK(0x3F, 26) +#define PRTPM_EEEFWD 0x001E4400 /* Reset Source: GLOBR */ +#define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_S 31 +#define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_M BIT(31) +#define PRTPM_EEER 0x001E4360 /* Reset Source: GLOBR */ +#define PRTPM_EEER_TW_SYSTEM_S 0 +#define PRTPM_EEER_TW_SYSTEM_M MAKEMASK(0xFFFF, 0) +#define PRTPM_EEER_TX_LPI_EN_S 16 +#define PRTPM_EEER_TX_LPI_EN_M BIT(16) +#define PRTPM_EEETXC 0x001E43E0 /* Reset Source: GLOBR */ +#define PRTPM_EEETXC_TW_PHY_S 0 +#define PRTPM_EEETXC_TW_PHY_M MAKEMASK(0xFFFF, 0) +#define PRTPM_RLPIC 0x001E43A0 /* Reset Source: GLOBR */ +#define PRTPM_RLPIC_ERLPIC_S 0 +#define PRTPM_RLPIC_ERLPIC_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTPM_TLPIC 0x001E43C0 /* Reset Source: GLOBR */ +#define PRTPM_TLPIC_ETLPIC_S 0 +#define PRTPM_TLPIC_ETLPIC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLRPB_DHW(_i) (0x000AC000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPB_DHW_MAX_INDEX 15 +#define GLRPB_DHW_DHW_TCN_S 0 +#define GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_DLW(_i) (0x000AC044 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPB_DLW_MAX_INDEX 15 +#define GLRPB_DLW_DLW_TCN_S 0 +#define GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_DPS(_i) (0x000AC084 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPB_DPS_MAX_INDEX 15 +#define GLRPB_DPS_DPS_TCN_S 0 +#define GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_DSI_EN 0x000AC324 /* Reset Source: CORER */ +#define GLRPB_DSI_EN_DSI_EN_S 0 +#define GLRPB_DSI_EN_DSI_EN_M BIT(0) +#define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_S 1 +#define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_M BIT(1) +#define GLRPB_SHW(_i) (0x000AC120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPB_SHW_MAX_INDEX 7 +#define GLRPB_SHW_SHW_S 0 +#define GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_SLW(_i) (0x000AC140 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPB_SLW_MAX_INDEX 7 +#define GLRPB_SLW_SLW_S 0 +#define GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_SPS(_i) (0x000AC0C4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPB_SPS_MAX_INDEX 7 +#define GLRPB_SPS_SPS_TCN_S 0 +#define GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_TC_CFG(_i) (0x000AC2A4 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPB_TC_CFG_MAX_INDEX 31 +#define GLRPB_TC_CFG_D_POOL_S 0 +#define GLRPB_TC_CFG_D_POOL_M MAKEMASK(0xFFFF, 0) +#define GLRPB_TC_CFG_S_POOL_S 16 +#define GLRPB_TC_CFG_S_POOL_M MAKEMASK(0xFFFF, 16) +#define GLRPB_TCHW(_i) (0x000AC330 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPB_TCHW_MAX_INDEX 31 +#define GLRPB_TCHW_TCHW_S 0 +#define GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0) +#define GLRPB_TCLW(_i) (0x000AC3B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPB_TCLW_MAX_INDEX 31 +#define GLRPB_TCLW_TCLW_S 0 +#define GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0) +#define GLQF_APBVT(_i) (0x00450000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ +#define GLQF_APBVT_MAX_INDEX 2047 +#define GLQF_APBVT_APBVT_S 0 +#define GLQF_APBVT_APBVT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_FD_CLSN_0 0x00460028 /* Reset Source: CORER */ +#define GLQF_FD_CLSN_0_HITSBCNT_S 0 +#define GLQF_FD_CLSN_0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_FD_CLSN1 0x00460030 /* Reset Source: CORER */ +#define GLQF_FD_CLSN1_HITLBCNT_S 0 +#define GLQF_FD_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_FD_CNT 0x00460018 /* Reset Source: CORER */ +#define GLQF_FD_CNT_FD_GCNT_S 0 +#define GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0) +#define GLQF_FD_CNT_FD_BCNT_S 16 +#define GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16) +#define GLQF_FD_CTL 0x00460000 /* Reset Source: CORER */ +#define GLQF_FD_CTL_FDLONG_S 0 +#define GLQF_FD_CTL_FDLONG_M MAKEMASK(0xF, 0) +#define GLQF_FD_CTL_HASH_REPORT_S 4 +#define GLQF_FD_CTL_HASH_REPORT_M BIT(4) +#define GLQF_FD_CTL_FLT_ADDR_REPORT_S 5 +#define GLQF_FD_CTL_FLT_ADDR_REPORT_M BIT(5) +#define GLQF_FD_SIZE 0x00460010 /* Reset Source: CORER */ +#define GLQF_FD_SIZE_FD_GSIZE_S 0 +#define GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0) +#define GLQF_FD_SIZE_FD_BSIZE_S 16 +#define GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16) +#define GLQF_FDCNT_0 0x00460020 /* Reset Source: CORER */ +#define GLQF_FDCNT_0_BUCKETCNT_S 0 +#define GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0) +#define GLQF_FDCNT_0_CNT_NOT_VLD_S 31 +#define GLQF_FDCNT_0_CNT_NOT_VLD_M BIT(31) +#define GLQF_FDEVICTENA(_i) (0x00452000 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLQF_FDEVICTENA_MAX_INDEX 3 +#define GLQF_FDEVICTENA_FDEVICTENA_S 0 +#define GLQF_FDEVICTENA_FDEVICTENA_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ +#define GLQF_FDINSET_MAX_INDEX 127 +#define GLQF_FDINSET_FV_WORD_INDX0_S 0 +#define GLQF_FDINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) +#define GLQF_FDINSET_FV_WORD_VAL0_S 7 +#define GLQF_FDINSET_FV_WORD_VAL0_M BIT(7) +#define GLQF_FDINSET_FV_WORD_INDX1_S 8 +#define GLQF_FDINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) +#define GLQF_FDINSET_FV_WORD_VAL1_S 15 +#define GLQF_FDINSET_FV_WORD_VAL1_M BIT(15) +#define GLQF_FDINSET_FV_WORD_INDX2_S 16 +#define GLQF_FDINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) +#define GLQF_FDINSET_FV_WORD_VAL2_S 23 +#define GLQF_FDINSET_FV_WORD_VAL2_M BIT(23) +#define GLQF_FDINSET_FV_WORD_INDX3_S 24 +#define GLQF_FDINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) +#define GLQF_FDINSET_FV_WORD_VAL3_S 31 +#define GLQF_FDINSET_FV_WORD_VAL3_M BIT(31) +#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLQF_FDMASK_MAX_INDEX 31 +#define GLQF_FDMASK_MSK_INDEX_S 0 +#define GLQF_FDMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) +#define GLQF_FDMASK_MASK_S 16 +#define GLQF_FDMASK_MASK_M MAKEMASK(0xFFFF, 16) +#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLQF_FDMASK_SEL_MAX_INDEX 127 +#define GLQF_FDMASK_SEL_MASK_SEL_S 0 +#define GLQF_FDMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ +#define GLQF_FDSWAP_MAX_INDEX 127 +#define GLQF_FDSWAP_FV_WORD_INDX0_S 0 +#define GLQF_FDSWAP_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) +#define GLQF_FDSWAP_FV_WORD_VAL0_S 7 +#define GLQF_FDSWAP_FV_WORD_VAL0_M BIT(7) +#define GLQF_FDSWAP_FV_WORD_INDX1_S 8 +#define GLQF_FDSWAP_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) +#define GLQF_FDSWAP_FV_WORD_VAL1_S 15 +#define GLQF_FDSWAP_FV_WORD_VAL1_M BIT(15) +#define GLQF_FDSWAP_FV_WORD_INDX2_S 16 +#define GLQF_FDSWAP_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) +#define GLQF_FDSWAP_FV_WORD_VAL2_S 23 +#define GLQF_FDSWAP_FV_WORD_VAL2_M BIT(23) +#define GLQF_FDSWAP_FV_WORD_INDX3_S 24 +#define GLQF_FDSWAP_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) +#define GLQF_FDSWAP_FV_WORD_VAL3_S 31 +#define GLQF_FDSWAP_FV_WORD_VAL3_M BIT(31) +#define GLQF_HINSET(_i, _j) (0x0040E000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ +#define GLQF_HINSET_MAX_INDEX 127 +#define GLQF_HINSET_FV_WORD_INDX0_S 0 +#define GLQF_HINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) +#define GLQF_HINSET_FV_WORD_VAL0_S 7 +#define GLQF_HINSET_FV_WORD_VAL0_M BIT(7) +#define GLQF_HINSET_FV_WORD_INDX1_S 8 +#define GLQF_HINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) +#define GLQF_HINSET_FV_WORD_VAL1_S 15 +#define GLQF_HINSET_FV_WORD_VAL1_M BIT(15) +#define GLQF_HINSET_FV_WORD_INDX2_S 16 +#define GLQF_HINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) +#define GLQF_HINSET_FV_WORD_VAL2_S 23 +#define GLQF_HINSET_FV_WORD_VAL2_M BIT(23) +#define GLQF_HINSET_FV_WORD_INDX3_S 24 +#define GLQF_HINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) +#define GLQF_HINSET_FV_WORD_VAL3_S 31 +#define GLQF_HINSET_FV_WORD_VAL3_M BIT(31) +#define GLQF_HKEY(_i) (0x00456000 + ((_i) * 4)) /* _i=0...12 */ /* Reset Source: CORER */ +#define GLQF_HKEY_MAX_INDEX 12 +#define GLQF_HKEY_KEY_0_S 0 +#define GLQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0) +#define GLQF_HKEY_KEY_1_S 8 +#define GLQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8) +#define GLQF_HKEY_KEY_2_S 16 +#define GLQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16) +#define GLQF_HKEY_KEY_3_S 24 +#define GLQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24) +#define GLQF_HLUT(_i, _j) (0x00438000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...15 */ /* Reset Source: CORER */ +#define GLQF_HLUT_MAX_INDEX 127 +#define GLQF_HLUT_LUT0_S 0 +#define GLQF_HLUT_LUT0_M MAKEMASK(0x3F, 0) +#define GLQF_HLUT_LUT1_S 8 +#define GLQF_HLUT_LUT1_M MAKEMASK(0x3F, 8) +#define GLQF_HLUT_LUT2_S 16 +#define GLQF_HLUT_LUT2_M MAKEMASK(0x3F, 16) +#define GLQF_HLUT_LUT3_S 24 +#define GLQF_HLUT_LUT3_M MAKEMASK(0x3F, 24) +#define GLQF_HLUT_SIZE(_i) (0x00455400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLQF_HLUT_SIZE_MAX_INDEX 15 +#define GLQF_HLUT_SIZE_HSIZE_S 0 +#define GLQF_HLUT_SIZE_HSIZE_M BIT(0) +#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLQF_HMASK_MAX_INDEX 31 +#define GLQF_HMASK_MSK_INDEX_S 0 +#define GLQF_HMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) +#define GLQF_HMASK_MASK_S 16 +#define GLQF_HMASK_MASK_M MAKEMASK(0xFFFF, 16) +#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GLQF_HMASK_SEL_MAX_INDEX 127 +#define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define GLQF_HMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */ +#define GLQF_HSYMM_MAX_INDEX 127 +#define GLQF_HSYMM_FV_SYMM_INDX0_S 0 +#define GLQF_HSYMM_FV_SYMM_INDX0_M MAKEMASK(0x1F, 0) +#define GLQF_HSYMM_SYMM0_ENA_S 7 +#define GLQF_HSYMM_SYMM0_ENA_M BIT(7) +#define GLQF_HSYMM_FV_SYMM_INDX1_S 8 +#define GLQF_HSYMM_FV_SYMM_INDX1_M MAKEMASK(0x1F, 8) +#define GLQF_HSYMM_SYMM1_ENA_S 15 +#define GLQF_HSYMM_SYMM1_ENA_M BIT(15) +#define GLQF_HSYMM_FV_SYMM_INDX2_S 16 +#define GLQF_HSYMM_FV_SYMM_INDX2_M MAKEMASK(0x1F, 16) +#define GLQF_HSYMM_SYMM2_ENA_S 23 +#define GLQF_HSYMM_SYMM2_ENA_M BIT(23) +#define GLQF_HSYMM_FV_SYMM_INDX3_S 24 +#define GLQF_HSYMM_FV_SYMM_INDX3_M MAKEMASK(0x1F, 24) +#define GLQF_HSYMM_SYMM3_ENA_S 31 +#define GLQF_HSYMM_SYMM3_ENA_M BIT(31) +#define GLQF_PE_APBVT_CNT 0x00455500 /* Reset Source: CORER */ +#define GLQF_PE_APBVT_CNT_APBVT_LAN_S 0 +#define GLQF_PE_APBVT_CNT_APBVT_LAN_M MAKEMASK(0xFFFFFFFF, 0) +#define GLQF_PE_CMD 0x00471080 /* Reset Source: CORER */ +#define GLQF_PE_CMD_ADDREM_STS_S 0 +#define GLQF_PE_CMD_ADDREM_STS_M MAKEMASK(0xFFFFFF, 0) +#define GLQF_PE_CMD_ADDREM_ID_S 28 +#define GLQF_PE_CMD_ADDREM_ID_M MAKEMASK(0xF, 28) +#define GLQF_PE_CTL 0x004710C0 /* Reset Source: CORER */ +#define GLQF_PE_CTL_PELONG_S 0 +#define GLQF_PE_CTL_PELONG_M MAKEMASK(0xF, 0) +#define GLQF_PE_CTL2(_i) (0x00455200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLQF_PE_CTL2_MAX_INDEX 31 +#define GLQF_PE_CTL2_TO_QH_S 0 +#define GLQF_PE_CTL2_TO_QH_M MAKEMASK(0x3, 0) +#define GLQF_PE_CTL2_APBVT_ENA_S 2 +#define GLQF_PE_CTL2_APBVT_ENA_M BIT(2) +#define GLQF_PE_FVE 0x0020E514 /* Reset Source: CORER */ +#define GLQF_PE_FVE_W_ENA_S 0 +#define GLQF_PE_FVE_W_ENA_M MAKEMASK(0xFFFFFF, 0) +#define GLQF_PE_OSR_STS 0x00471040 /* Reset Source: CORER */ +#define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_S 0 +#define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_M MAKEMASK(0x3FF, 0) +#define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_S 16 +#define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_M MAKEMASK(0x3FF, 16) +#define GLQF_PEINSET(_i, _j) (0x00415000 + ((_i) * 4 + (_j) * 128)) /* _i=0...31, _j=0...5 */ /* Reset Source: CORER */ +#define GLQF_PEINSET_MAX_INDEX 31 +#define GLQF_PEINSET_FV_WORD_INDX0_S 0 +#define GLQF_PEINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0) +#define GLQF_PEINSET_FV_WORD_VAL0_S 7 +#define GLQF_PEINSET_FV_WORD_VAL0_M BIT(7) +#define GLQF_PEINSET_FV_WORD_INDX1_S 8 +#define GLQF_PEINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8) +#define GLQF_PEINSET_FV_WORD_VAL1_S 15 +#define GLQF_PEINSET_FV_WORD_VAL1_M BIT(15) +#define GLQF_PEINSET_FV_WORD_INDX2_S 16 +#define GLQF_PEINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16) +#define GLQF_PEINSET_FV_WORD_VAL2_S 23 +#define GLQF_PEINSET_FV_WORD_VAL2_M BIT(23) +#define GLQF_PEINSET_FV_WORD_INDX3_S 24 +#define GLQF_PEINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24) +#define GLQF_PEINSET_FV_WORD_VAL3_S 31 +#define GLQF_PEINSET_FV_WORD_VAL3_M BIT(31) +#define GLQF_PEMASK(_i) (0x00415400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLQF_PEMASK_MAX_INDEX 15 +#define GLQF_PEMASK_MSK_INDEX_S 0 +#define GLQF_PEMASK_MSK_INDEX_M MAKEMASK(0x1F, 0) +#define GLQF_PEMASK_MASK_S 16 +#define GLQF_PEMASK_MASK_M MAKEMASK(0xFFFF, 16) +#define GLQF_PEMASK_SEL(_i) (0x00415500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLQF_PEMASK_SEL_MAX_INDEX 31 +#define GLQF_PEMASK_SEL_MASK_SEL_S 0 +#define GLQF_PEMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFF, 0) +#define GLQF_PETABLE_CLR(_i) (0x000AA078 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLQF_PETABLE_CLR_MAX_INDEX 1 +#define GLQF_PETABLE_CLR_VM_VF_NUM_S 0 +#define GLQF_PETABLE_CLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0) +#define GLQF_PETABLE_CLR_VM_VF_TYPE_S 10 +#define GLQF_PETABLE_CLR_VM_VF_TYPE_M MAKEMASK(0x3, 10) +#define GLQF_PETABLE_CLR_PF_NUM_S 12 +#define GLQF_PETABLE_CLR_PF_NUM_M MAKEMASK(0x7, 12) +#define GLQF_PETABLE_CLR_PE_BUSY_S 16 +#define GLQF_PETABLE_CLR_PE_BUSY_M BIT(16) +#define GLQF_PETABLE_CLR_PE_CLEAR_S 17 +#define GLQF_PETABLE_CLR_PE_CLEAR_M BIT(17) +#define GLQF_PROF2TC(_i, _j) (0x0044D000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...3 */ /* Reset Source: CORER */ +#define GLQF_PROF2TC_MAX_INDEX 127 +#define GLQF_PROF2TC_OVERRIDE_ENA_0_S 0 +#define GLQF_PROF2TC_OVERRIDE_ENA_0_M BIT(0) +#define GLQF_PROF2TC_REGION_0_S 1 +#define GLQF_PROF2TC_REGION_0_M MAKEMASK(0x7, 1) +#define GLQF_PROF2TC_OVERRIDE_ENA_1_S 4 +#define GLQF_PROF2TC_OVERRIDE_ENA_1_M BIT(4) +#define GLQF_PROF2TC_REGION_1_S 5 +#define GLQF_PROF2TC_REGION_1_M MAKEMASK(0x7, 5) +#define GLQF_PROF2TC_OVERRIDE_ENA_2_S 8 +#define GLQF_PROF2TC_OVERRIDE_ENA_2_M BIT(8) +#define GLQF_PROF2TC_REGION_2_S 9 +#define GLQF_PROF2TC_REGION_2_M MAKEMASK(0x7, 9) +#define GLQF_PROF2TC_OVERRIDE_ENA_3_S 12 +#define GLQF_PROF2TC_OVERRIDE_ENA_3_M BIT(12) +#define GLQF_PROF2TC_REGION_3_S 13 +#define GLQF_PROF2TC_REGION_3_M MAKEMASK(0x7, 13) +#define GLQF_PROF2TC_OVERRIDE_ENA_4_S 16 +#define GLQF_PROF2TC_OVERRIDE_ENA_4_M BIT(16) +#define GLQF_PROF2TC_REGION_4_S 17 +#define GLQF_PROF2TC_REGION_4_M MAKEMASK(0x7, 17) +#define GLQF_PROF2TC_OVERRIDE_ENA_5_S 20 +#define GLQF_PROF2TC_OVERRIDE_ENA_5_M BIT(20) +#define GLQF_PROF2TC_REGION_5_S 21 +#define GLQF_PROF2TC_REGION_5_M MAKEMASK(0x7, 21) +#define GLQF_PROF2TC_OVERRIDE_ENA_6_S 24 +#define GLQF_PROF2TC_OVERRIDE_ENA_6_M BIT(24) +#define GLQF_PROF2TC_REGION_6_S 25 +#define GLQF_PROF2TC_REGION_6_M MAKEMASK(0x7, 25) +#define GLQF_PROF2TC_OVERRIDE_ENA_7_S 28 +#define GLQF_PROF2TC_OVERRIDE_ENA_7_M BIT(28) +#define GLQF_PROF2TC_REGION_7_S 29 +#define GLQF_PROF2TC_REGION_7_M MAKEMASK(0x7, 29) +#define PFQF_FD_CNT 0x00460180 /* Reset Source: CORER */ +#define PFQF_FD_CNT_FD_GCNT_S 0 +#define PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0) +#define PFQF_FD_CNT_FD_BCNT_S 16 +#define PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16) +#define PFQF_FD_ENA 0x0043A000 /* Reset Source: CORER */ +#define PFQF_FD_ENA_FD_ENA_S 0 +#define PFQF_FD_ENA_FD_ENA_M BIT(0) +#define PFQF_FD_SIZE 0x00460100 /* Reset Source: CORER */ +#define PFQF_FD_SIZE_FD_GSIZE_S 0 +#define PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0) +#define PFQF_FD_SIZE_FD_BSIZE_S 16 +#define PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16) +#define PFQF_FD_SUBTRACT 0x00460200 /* Reset Source: CORER */ +#define PFQF_FD_SUBTRACT_FD_GCNT_S 0 +#define PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0) +#define PFQF_FD_SUBTRACT_FD_BCNT_S 16 +#define PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16) +#define PFQF_HLUT(_i) (0x00430000 + ((_i) * 64)) /* _i=0...511 */ /* Reset Source: CORER */ +#define PFQF_HLUT_MAX_INDEX 511 +#define PFQF_HLUT_LUT0_S 0 +#define PFQF_HLUT_LUT0_M MAKEMASK(0xFF, 0) +#define PFQF_HLUT_LUT1_S 8 +#define PFQF_HLUT_LUT1_M MAKEMASK(0xFF, 8) +#define PFQF_HLUT_LUT2_S 16 +#define PFQF_HLUT_LUT2_M MAKEMASK(0xFF, 16) +#define PFQF_HLUT_LUT3_S 24 +#define PFQF_HLUT_LUT3_M MAKEMASK(0xFF, 24) +#define PFQF_HLUT_SIZE 0x00455480 /* Reset Source: CORER */ +#define PFQF_HLUT_SIZE_HSIZE_S 0 +#define PFQF_HLUT_SIZE_HSIZE_M MAKEMASK(0x3, 0) +#define PFQF_PE_CLSN0 0x00470480 /* Reset Source: CORER */ +#define PFQF_PE_CLSN0_HITSBCNT_S 0 +#define PFQF_PE_CLSN0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define PFQF_PE_CLSN1 0x00470500 /* Reset Source: CORER */ +#define PFQF_PE_CLSN1_HITLBCNT_S 0 +#define PFQF_PE_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define PFQF_PE_CTL1 0x00470000 /* Reset Source: CORER */ +#define PFQF_PE_CTL1_PEHSIZE_S 0 +#define PFQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0) +#define PFQF_PE_CTL2 0x00470040 /* Reset Source: CORER */ +#define PFQF_PE_CTL2_PEDSIZE_S 0 +#define PFQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0) +#define PFQF_PE_FILTERING_ENA 0x0043A080 /* Reset Source: CORER */ +#define PFQF_PE_FILTERING_ENA_PE_ENA_S 0 +#define PFQF_PE_FILTERING_ENA_PE_ENA_M BIT(0) +#define PFQF_PE_FLHD 0x00470100 /* Reset Source: CORER */ +#define PFQF_PE_FLHD_FLHD_S 0 +#define PFQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0) +#define PFQF_PE_ST_CTL 0x00470400 /* Reset Source: CORER */ +#define PFQF_PE_ST_CTL_PF_CNT_EN_S 0 +#define PFQF_PE_ST_CTL_PF_CNT_EN_M BIT(0) +#define PFQF_PE_ST_CTL_VFS_CNT_EN_S 1 +#define PFQF_PE_ST_CTL_VFS_CNT_EN_M BIT(1) +#define PFQF_PE_ST_CTL_VF_CNT_EN_S 2 +#define PFQF_PE_ST_CTL_VF_CNT_EN_M BIT(2) +#define PFQF_PE_ST_CTL_VF_NUM_S 16 +#define PFQF_PE_ST_CTL_VF_NUM_M MAKEMASK(0xFF, 16) +#define PFQF_PE_TC_CTL 0x00452080 /* Reset Source: CORER */ +#define PFQF_PE_TC_CTL_TC_EN_PF_S 0 +#define PFQF_PE_TC_CTL_TC_EN_PF_M MAKEMASK(0xFF, 0) +#define PFQF_PE_TC_CTL_TC_EN_VF_S 16 +#define PFQF_PE_TC_CTL_TC_EN_VF_M MAKEMASK(0xFF, 16) +#define PFQF_PECNT_0 0x00470200 /* Reset Source: CORER */ +#define PFQF_PECNT_0_BUCKETCNT_S 0 +#define PFQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0) +#define PFQF_PECNT_1 0x00470300 /* Reset Source: CORER */ +#define PFQF_PECNT_1_FLTCNT_S 0 +#define PFQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0) +#define VPQF_PE_CTL1(_VF) (0x00474000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PE_CTL1_MAX_INDEX 255 +#define VPQF_PE_CTL1_PEHSIZE_S 0 +#define VPQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0) +#define VPQF_PE_CTL2(_VF) (0x00474800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PE_CTL2_MAX_INDEX 255 +#define VPQF_PE_CTL2_PEDSIZE_S 0 +#define VPQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0) +#define VPQF_PE_FILTERING_ENA(_VF) (0x00455800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PE_FILTERING_ENA_MAX_INDEX 255 +#define VPQF_PE_FILTERING_ENA_PE_ENA_S 0 +#define VPQF_PE_FILTERING_ENA_PE_ENA_M BIT(0) +#define VPQF_PE_FLHD(_VF) (0x00472000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PE_FLHD_MAX_INDEX 255 +#define VPQF_PE_FLHD_FLHD_S 0 +#define VPQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0) +#define VPQF_PECNT_0(_VF) (0x00472800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PECNT_0_MAX_INDEX 255 +#define VPQF_PECNT_0_BUCKETCNT_S 0 +#define VPQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0) +#define VPQF_PECNT_1(_VF) (0x00473000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VPQF_PECNT_1_MAX_INDEX 255 +#define VPQF_PECNT_1_FLTCNT_S 0 +#define VPQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0) +#define GLDCB_RMPMC 0x001223C8 /* Reset Source: CORER */ +#define GLDCB_RMPMC_RSPM_S 0 +#define GLDCB_RMPMC_RSPM_M MAKEMASK(0x3F, 0) +#define GLDCB_RMPMC_MIQ_NODROP_MODE_S 6 +#define GLDCB_RMPMC_MIQ_NODROP_MODE_M MAKEMASK(0x1F, 6) +#define GLDCB_RMPMC_RPM_DIS_S 31 +#define GLDCB_RMPMC_RPM_DIS_M BIT(31) +#define GLDCB_RMPMS 0x001223CC /* Reset Source: CORER */ +#define GLDCB_RMPMS_RMPM_S 0 +#define GLDCB_RMPMS_RMPM_M MAKEMASK(0xFFFF, 0) +#define GLDCB_RPCC 0x00122260 /* Reset Source: CORER */ +#define GLDCB_RPCC_EN_S 0 +#define GLDCB_RPCC_EN_M BIT(0) +#define GLDCB_RPCC_SCL_FACT_S 4 +#define GLDCB_RPCC_SCL_FACT_M MAKEMASK(0x1F, 4) +#define GLDCB_RPCC_THRSH_S 16 +#define GLDCB_RPCC_THRSH_M MAKEMASK(0xFFF, 16) +#define GLDCB_RSPMC 0x001223C4 /* Reset Source: CORER */ +#define GLDCB_RSPMC_RSPM_S 0 +#define GLDCB_RSPMC_RSPM_M MAKEMASK(0xFF, 0) +#define GLDCB_RSPMC_RPM_MODE_S 8 +#define GLDCB_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8) +#define GLDCB_RSPMC_PRR_MAX_EXP_S 10 +#define GLDCB_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10) +#define GLDCB_RSPMC_PFCTIMER_S 14 +#define GLDCB_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14) +#define GLDCB_RSPMC_RPM_DIS_S 31 +#define GLDCB_RSPMC_RPM_DIS_M BIT(31) +#define GLDCB_RSPMS 0x001223C0 /* Reset Source: CORER */ +#define GLDCB_RSPMS_RSPM_S 0 +#define GLDCB_RSPMS_RSPM_M MAKEMASK(0x3FFFF, 0) +#define GLDCB_RTCTI 0x001223D0 /* Reset Source: CORER */ +#define GLDCB_RTCTI_PFCTIMEOUT_TC_S 0 +#define GLDCB_RTCTI_PFCTIMEOUT_TC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLDCB_RTCTQ(_i) (0x001222C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_RTCTQ_MAX_INDEX 31 +#define GLDCB_RTCTQ_RXQNUM_S 0 +#define GLDCB_RTCTQ_RXQNUM_M MAKEMASK(0x7FF, 0) +#define GLDCB_RTCTQ_IS_PF_Q_S 16 +#define GLDCB_RTCTQ_IS_PF_Q_M BIT(16) +#define GLDCB_RTCTS(_i) (0x00122340 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLDCB_RTCTS_MAX_INDEX 31 +#define GLDCB_RTCTS_PFCTIMER_S 0 +#define GLDCB_RTCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0) +#define GLRCB_CFG_COTF_CNT(_i) (0x001223D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRCB_CFG_COTF_CNT_MAX_INDEX 7 +#define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_S 0 +#define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_M MAKEMASK(0x3F, 0) +#define GLRCB_CFG_COTF_ST 0x001223F4 /* Reset Source: CORER */ +#define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_S 0 +#define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_M MAKEMASK(0xFF, 0) +#define GLRPRS_PMCFG_DHW(_i) (0x00200388 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_DHW_MAX_INDEX 15 +#define GLRPRS_PMCFG_DHW_DHW_S 0 +#define GLRPRS_PMCFG_DHW_DHW_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_DLW(_i) (0x002003C8 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_DLW_MAX_INDEX 15 +#define GLRPRS_PMCFG_DLW_DLW_S 0 +#define GLRPRS_PMCFG_DLW_DLW_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_DPS(_i) (0x00200308 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_DPS_MAX_INDEX 15 +#define GLRPRS_PMCFG_DPS_DPS_S 0 +#define GLRPRS_PMCFG_DPS_DPS_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_SHW(_i) (0x00200448 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_SHW_MAX_INDEX 7 +#define GLRPRS_PMCFG_SHW_SHW_S 0 +#define GLRPRS_PMCFG_SHW_SHW_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_SLW(_i) (0x00200468 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_SLW_MAX_INDEX 7 +#define GLRPRS_PMCFG_SLW_SLW_S 0 +#define GLRPRS_PMCFG_SLW_SLW_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_SPS(_i) (0x00200408 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_SPS_MAX_INDEX 7 +#define GLRPRS_PMCFG_SPS_SPS_S 0 +#define GLRPRS_PMCFG_SPS_SPS_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_TC_CFG(_i) (0x00200488 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_TC_CFG_MAX_INDEX 31 +#define GLRPRS_PMCFG_TC_CFG_D_POOL_S 0 +#define GLRPRS_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0) +#define GLRPRS_PMCFG_TC_CFG_S_POOL_S 16 +#define GLRPRS_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16) +#define GLRPRS_PMCFG_TCHW(_i) (0x00200588 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_TCHW_MAX_INDEX 31 +#define GLRPRS_PMCFG_TCHW_TCHW_S 0 +#define GLRPRS_PMCFG_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0) +#define GLRPRS_PMCFG_TCLW(_i) (0x00200608 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLRPRS_PMCFG_TCLW_MAX_INDEX 31 +#define GLRPRS_PMCFG_TCLW_TCLW_S 0 +#define GLRPRS_PMCFG_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0) +#define GLSWT_PMCFG_TC_CFG(_i) (0x00204900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSWT_PMCFG_TC_CFG_MAX_INDEX 31 +#define GLSWT_PMCFG_TC_CFG_D_POOL_S 0 +#define GLSWT_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0) +#define GLSWT_PMCFG_TC_CFG_S_POOL_S 16 +#define GLSWT_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16) +#define PRTDCB_RLANPMS 0x00122280 /* Reset Source: CORER */ +#define PRTDCB_RLANPMS_LANRPPM_S 0 +#define PRTDCB_RLANPMS_LANRPPM_M MAKEMASK(0x3FFFF, 0) +#define PRTDCB_RPPMC 0x00122240 /* Reset Source: CORER */ +#define PRTDCB_RPPMC_LANRPPM_S 0 +#define PRTDCB_RPPMC_LANRPPM_M MAKEMASK(0xFF, 0) +#define PRTDCB_RPPMC_RDMARPPM_S 8 +#define PRTDCB_RPPMC_RDMARPPM_M MAKEMASK(0xFF, 8) +#define PRTDCB_RRDMAPMS 0x00122120 /* Reset Source: CORER */ +#define PRTDCB_RRDMAPMS_RDMARPPM_S 0 +#define PRTDCB_RRDMAPMS_RDMARPPM_M MAKEMASK(0x3FFFF, 0) +#define GL_STAT_SWR_BPCH(_i) (0x00347804 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_BPCH_MAX_INDEX 127 +#define GL_STAT_SWR_BPCH_VLBPCH_S 0 +#define GL_STAT_SWR_BPCH_VLBPCH_M MAKEMASK(0xFF, 0) +#define GL_STAT_SWR_BPCL(_i) (0x00347800 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_BPCL_MAX_INDEX 127 +#define GL_STAT_SWR_BPCL_VLBPCL_S 0 +#define GL_STAT_SWR_BPCL_VLBPCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_STAT_SWR_GORCH(_i) (0x00342004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_GORCH_MAX_INDEX 127 +#define GL_STAT_SWR_GORCH_VLBCH_S 0 +#define GL_STAT_SWR_GORCH_VLBCH_M MAKEMASK(0xFF, 0) +#define GL_STAT_SWR_GORCL(_i) (0x00342000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_GORCL_MAX_INDEX 127 +#define GL_STAT_SWR_GORCL_VLBCL_S 0 +#define GL_STAT_SWR_GORCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_STAT_SWR_GOTCH(_i) (0x00304004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_GOTCH_MAX_INDEX 127 +#define GL_STAT_SWR_GOTCH_VLBCH_S 0 +#define GL_STAT_SWR_GOTCH_VLBCH_M MAKEMASK(0xFF, 0) +#define GL_STAT_SWR_GOTCL(_i) (0x00304000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_GOTCL_MAX_INDEX 127 +#define GL_STAT_SWR_GOTCL_VLBCL_S 0 +#define GL_STAT_SWR_GOTCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_STAT_SWR_MPCH(_i) (0x00347404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_MPCH_MAX_INDEX 127 +#define GL_STAT_SWR_MPCH_VLMPCH_S 0 +#define GL_STAT_SWR_MPCH_VLMPCH_M MAKEMASK(0xFF, 0) +#define GL_STAT_SWR_MPCL(_i) (0x00347400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_MPCL_MAX_INDEX 127 +#define GL_STAT_SWR_MPCL_VLMPCL_S 0 +#define GL_STAT_SWR_MPCL_VLMPCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_STAT_SWR_UPCH(_i) (0x00347004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_UPCH_MAX_INDEX 127 +#define GL_STAT_SWR_UPCH_VLUPCH_S 0 +#define GL_STAT_SWR_UPCH_VLUPCH_M MAKEMASK(0xFF, 0) +#define GL_STAT_SWR_UPCL(_i) (0x00347000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */ +#define GL_STAT_SWR_UPCL_MAX_INDEX 127 +#define GL_STAT_SWR_UPCL_VLUPCL_S 0 +#define GL_STAT_SWR_UPCL_VLUPCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_AORCL(_i) (0x003812C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_AORCL_MAX_INDEX 7 +#define GLPRT_AORCL_AORCL_S 0 +#define GLPRT_AORCL_AORCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_BPRCH_MAX_INDEX 7 +#define GLPRT_BPRCH_UPRCH_S 0 +#define GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0) +#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_BPRCL_MAX_INDEX 7 +#define GLPRT_BPRCL_UPRCH_S 0 +#define GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_BPTCH_MAX_INDEX 7 +#define GLPRT_BPTCH_UPRCH_S 0 +#define GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0) +#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_BPTCL_MAX_INDEX 7 +#define GLPRT_BPTCL_UPRCH_S 0 +#define GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_CRCERRS_MAX_INDEX 7 +#define GLPRT_CRCERRS_CRCERRS_S 0 +#define GLPRT_CRCERRS_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_CRCERRS_H(_i) (0x00380104 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_CRCERRS_H_MAX_INDEX 7 +#define GLPRT_CRCERRS_H_CRCERRS_S 0 +#define GLPRT_CRCERRS_H_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_GORCH_MAX_INDEX 7 +#define GLPRT_GORCH_GORCH_S 0 +#define GLPRT_GORCH_GORCH_M MAKEMASK(0xFF, 0) +#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_GORCL_MAX_INDEX 7 +#define GLPRT_GORCL_GORCL_S 0 +#define GLPRT_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_GOTCH_MAX_INDEX 7 +#define GLPRT_GOTCH_GOTCH_S 0 +#define GLPRT_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) +#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_GOTCL_MAX_INDEX 7 +#define GLPRT_GOTCL_GOTCL_S 0 +#define GLPRT_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_ILLERRC_MAX_INDEX 7 +#define GLPRT_ILLERRC_ILLERRC_S 0 +#define GLPRT_ILLERRC_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_ILLERRC_H(_i) (0x003801C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_ILLERRC_H_MAX_INDEX 7 +#define GLPRT_ILLERRC_H_ILLERRC_S 0 +#define GLPRT_ILLERRC_H_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXOFFRXC_MAX_INDEX 7 +#define GLPRT_LXOFFRXC_LXOFFRXCNT_S 0 +#define GLPRT_LXOFFRXC_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXOFFRXC_H(_i) (0x003802C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXOFFRXC_H_MAX_INDEX 7 +#define GLPRT_LXOFFRXC_H_LXOFFRXCNT_S 0 +#define GLPRT_LXOFFRXC_H_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXOFFTXC_MAX_INDEX 7 +#define GLPRT_LXOFFTXC_LXOFFTXC_S 0 +#define GLPRT_LXOFFTXC_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXOFFTXC_H(_i) (0x00381184 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXOFFTXC_H_MAX_INDEX 7 +#define GLPRT_LXOFFTXC_H_LXOFFTXC_S 0 +#define GLPRT_LXOFFTXC_H_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXONRXC_MAX_INDEX 7 +#define GLPRT_LXONRXC_LXONRXCNT_S 0 +#define GLPRT_LXONRXC_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXONRXC_H(_i) (0x00380284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXONRXC_H_MAX_INDEX 7 +#define GLPRT_LXONRXC_H_LXONRXCNT_S 0 +#define GLPRT_LXONRXC_H_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXONTXC_MAX_INDEX 7 +#define GLPRT_LXONTXC_LXONTXC_S 0 +#define GLPRT_LXONTXC_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_LXONTXC_H(_i) (0x00381144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_LXONTXC_H_MAX_INDEX 7 +#define GLPRT_LXONTXC_H_LXONTXC_S 0 +#define GLPRT_LXONTXC_H_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MLFC_MAX_INDEX 7 +#define GLPRT_MLFC_MLFC_S 0 +#define GLPRT_MLFC_MLFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MLFC_H(_i) (0x00380044 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MLFC_H_MAX_INDEX 7 +#define GLPRT_MLFC_H_MLFC_S 0 +#define GLPRT_MLFC_H_MLFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MPRCH_MAX_INDEX 7 +#define GLPRT_MPRCH_MPRCH_S 0 +#define GLPRT_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) +#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MPRCL_MAX_INDEX 7 +#define GLPRT_MPRCL_MPRCL_S 0 +#define GLPRT_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MPTCH_MAX_INDEX 7 +#define GLPRT_MPTCH_MPTCH_S 0 +#define GLPRT_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) +#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MPTCL_MAX_INDEX 7 +#define GLPRT_MPTCL_MPTCL_S 0 +#define GLPRT_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MRFC_MAX_INDEX 7 +#define GLPRT_MRFC_MRFC_S 0 +#define GLPRT_MRFC_MRFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_MRFC_H(_i) (0x00380084 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_MRFC_H_MAX_INDEX 7 +#define GLPRT_MRFC_H_MRFC_S 0 +#define GLPRT_MRFC_H_MRFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC1023H_MAX_INDEX 7 +#define GLPRT_PRC1023H_PRC1023H_S 0 +#define GLPRT_PRC1023H_PRC1023H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC1023L_MAX_INDEX 7 +#define GLPRT_PRC1023L_PRC1023L_S 0 +#define GLPRT_PRC1023L_PRC1023L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC127H_MAX_INDEX 7 +#define GLPRT_PRC127H_PRC127H_S 0 +#define GLPRT_PRC127H_PRC127H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC127L_MAX_INDEX 7 +#define GLPRT_PRC127L_PRC127L_S 0 +#define GLPRT_PRC127L_PRC127L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC1522H_MAX_INDEX 7 +#define GLPRT_PRC1522H_PRC1522H_S 0 +#define GLPRT_PRC1522H_PRC1522H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC1522L_MAX_INDEX 7 +#define GLPRT_PRC1522L_PRC1522L_S 0 +#define GLPRT_PRC1522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC255H_MAX_INDEX 7 +#define GLPRT_PRC255H_PRTPRC255H_S 0 +#define GLPRT_PRC255H_PRTPRC255H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC255L_MAX_INDEX 7 +#define GLPRT_PRC255L_PRC255L_S 0 +#define GLPRT_PRC255L_PRC255L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC511H_MAX_INDEX 7 +#define GLPRT_PRC511H_PRC511H_S 0 +#define GLPRT_PRC511H_PRC511H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC511L_MAX_INDEX 7 +#define GLPRT_PRC511L_PRC511L_S 0 +#define GLPRT_PRC511L_PRC511L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC64H_MAX_INDEX 7 +#define GLPRT_PRC64H_PRC64H_S 0 +#define GLPRT_PRC64H_PRC64H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC64L_MAX_INDEX 7 +#define GLPRT_PRC64L_PRC64L_S 0 +#define GLPRT_PRC64L_PRC64L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC9522H_MAX_INDEX 7 +#define GLPRT_PRC9522H_PRC1522H_S 0 +#define GLPRT_PRC9522H_PRC1522H_M MAKEMASK(0xFF, 0) +#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PRC9522L_MAX_INDEX 7 +#define GLPRT_PRC9522L_PRC1522L_S 0 +#define GLPRT_PRC9522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC1023H_MAX_INDEX 7 +#define GLPRT_PTC1023H_PTC1023H_S 0 +#define GLPRT_PTC1023H_PTC1023H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC1023L_MAX_INDEX 7 +#define GLPRT_PTC1023L_PTC1023L_S 0 +#define GLPRT_PTC1023L_PTC1023L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC127H_MAX_INDEX 7 +#define GLPRT_PTC127H_PTC127H_S 0 +#define GLPRT_PTC127H_PTC127H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC127L_MAX_INDEX 7 +#define GLPRT_PTC127L_PTC127L_S 0 +#define GLPRT_PTC127L_PTC127L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC1522H_MAX_INDEX 7 +#define GLPRT_PTC1522H_PTC1522H_S 0 +#define GLPRT_PTC1522H_PTC1522H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC1522L_MAX_INDEX 7 +#define GLPRT_PTC1522L_PTC1522L_S 0 +#define GLPRT_PTC1522L_PTC1522L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC255H_MAX_INDEX 7 +#define GLPRT_PTC255H_PTC255H_S 0 +#define GLPRT_PTC255H_PTC255H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC255L_MAX_INDEX 7 +#define GLPRT_PTC255L_PTC255L_S 0 +#define GLPRT_PTC255L_PTC255L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC511H_MAX_INDEX 7 +#define GLPRT_PTC511H_PTC511H_S 0 +#define GLPRT_PTC511H_PTC511H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC511L_MAX_INDEX 7 +#define GLPRT_PTC511L_PTC511L_S 0 +#define GLPRT_PTC511L_PTC511L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC64H_MAX_INDEX 7 +#define GLPRT_PTC64H_PTC64H_S 0 +#define GLPRT_PTC64H_PTC64H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC64L_MAX_INDEX 7 +#define GLPRT_PTC64L_PTC64L_S 0 +#define GLPRT_PTC64L_PTC64L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC9522H_MAX_INDEX 7 +#define GLPRT_PTC9522H_PTC9522H_S 0 +#define GLPRT_PTC9522H_PTC9522H_M MAKEMASK(0xFF, 0) +#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PTC9522L_MAX_INDEX 7 +#define GLPRT_PTC9522L_PTC9522L_S 0 +#define GLPRT_PTC9522L_PTC9522L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXOFFRXC_MAX_INDEX 7 +#define GLPRT_PXOFFRXC_PRPXOFFRXCNT_S 0 +#define GLPRT_PXOFFRXC_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXOFFRXC_H(_i, _j) (0x00380504 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXOFFRXC_H_MAX_INDEX 7 +#define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_S 0 +#define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXOFFTXC_MAX_INDEX 7 +#define GLPRT_PXOFFTXC_PRPXOFFTXCNT_S 0 +#define GLPRT_PXOFFTXC_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXOFFTXC_H(_i, _j) (0x00380F44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXOFFTXC_H_MAX_INDEX 7 +#define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_S 0 +#define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXONRXC_MAX_INDEX 7 +#define GLPRT_PXONRXC_PRPXONRXCNT_S 0 +#define GLPRT_PXONRXC_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXONRXC_H(_i, _j) (0x00380304 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXONRXC_H_MAX_INDEX 7 +#define GLPRT_PXONRXC_H_PRPXONRXCNT_S 0 +#define GLPRT_PXONRXC_H_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXONTXC_MAX_INDEX 7 +#define GLPRT_PXONTXC_PRPXONTXC_S 0 +#define GLPRT_PXONTXC_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_PXONTXC_H(_i, _j) (0x00380D44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_PXONTXC_H_MAX_INDEX 7 +#define GLPRT_PXONTXC_H_PRPXONTXC_S 0 +#define GLPRT_PXONTXC_H_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RFC_MAX_INDEX 7 +#define GLPRT_RFC_RFC_S 0 +#define GLPRT_RFC_RFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RFC_H(_i) (0x00380AC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RFC_H_MAX_INDEX 7 +#define GLPRT_RFC_H_RFC_S 0 +#define GLPRT_RFC_H_RFC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RJC_MAX_INDEX 7 +#define GLPRT_RJC_RJC_S 0 +#define GLPRT_RJC_RJC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RJC_H(_i) (0x00380B04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RJC_H_MAX_INDEX 7 +#define GLPRT_RJC_H_RJC_S 0 +#define GLPRT_RJC_H_RJC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RLEC_MAX_INDEX 7 +#define GLPRT_RLEC_RLEC_S 0 +#define GLPRT_RLEC_RLEC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RLEC_H(_i) (0x00380144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RLEC_H_MAX_INDEX 7 +#define GLPRT_RLEC_H_RLEC_S 0 +#define GLPRT_RLEC_H_RLEC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_ROC_MAX_INDEX 7 +#define GLPRT_ROC_ROC_S 0 +#define GLPRT_ROC_ROC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_ROC_H(_i) (0x00380244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_ROC_H_MAX_INDEX 7 +#define GLPRT_ROC_H_ROC_S 0 +#define GLPRT_ROC_H_ROC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RUC_MAX_INDEX 7 +#define GLPRT_RUC_RUC_S 0 +#define GLPRT_RUC_RUC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RUC_H(_i) (0x00380204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RUC_H_MAX_INDEX 7 +#define GLPRT_RUC_H_RUC_S 0 +#define GLPRT_RUC_H_RUC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RXON2OFFCNT_MAX_INDEX 7 +#define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_S 0 +#define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_RXON2OFFCNT_H(_i, _j) (0x00380704 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */ +#define GLPRT_RXON2OFFCNT_H_MAX_INDEX 7 +#define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_S 0 +#define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_STDC(_i) (0x00340000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_STDC_MAX_INDEX 7 +#define GLPRT_STDC_STDC_S 0 +#define GLPRT_STDC_STDC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_TDOLD_MAX_INDEX 7 +#define GLPRT_TDOLD_GLPRT_TDOLD_S 0 +#define GLPRT_TDOLD_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_TDOLD_H(_i) (0x00381284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_TDOLD_H_MAX_INDEX 7 +#define GLPRT_TDOLD_H_GLPRT_TDOLD_S 0 +#define GLPRT_TDOLD_H_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_UPRCH_MAX_INDEX 7 +#define GLPRT_UPRCH_UPRCH_S 0 +#define GLPRT_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) +#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_UPRCL_MAX_INDEX 7 +#define GLPRT_UPRCL_UPRCL_S 0 +#define GLPRT_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_UPTCH_MAX_INDEX 7 +#define GLPRT_UPTCH_UPTCH_S 0 +#define GLPRT_UPTCH_UPTCH_M MAKEMASK(0xFF, 0) +#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */ +#define GLPRT_UPTCL_MAX_INDEX 7 +#define GLPRT_UPTCL_VUPTCH_S 0 +#define GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_ACL_CNT_0_H(_i) (0x00388004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_0_H_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_0_H_CNT_MSB_S 0 +#define GLSTAT_ACL_CNT_0_H_CNT_MSB_M MAKEMASK(0xFF, 0) +#define GLSTAT_ACL_CNT_0_L(_i) (0x00388000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_0_L_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_0_L_CNT_LSB_S 0 +#define GLSTAT_ACL_CNT_0_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_ACL_CNT_1_H(_i) (0x00389004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_1_H_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_1_H_CNT_MSB_S 0 +#define GLSTAT_ACL_CNT_1_H_CNT_MSB_M MAKEMASK(0xFF, 0) +#define GLSTAT_ACL_CNT_1_L(_i) (0x00389000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_1_L_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_1_L_CNT_LSB_S 0 +#define GLSTAT_ACL_CNT_1_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_ACL_CNT_2_H(_i) (0x0038A004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_2_H_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_2_H_CNT_MSB_S 0 +#define GLSTAT_ACL_CNT_2_H_CNT_MSB_M MAKEMASK(0xFF, 0) +#define GLSTAT_ACL_CNT_2_L(_i) (0x0038A000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_2_L_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_2_L_CNT_LSB_S 0 +#define GLSTAT_ACL_CNT_2_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_ACL_CNT_3_H(_i) (0x0038B004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_3_H_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_3_H_CNT_MSB_S 0 +#define GLSTAT_ACL_CNT_3_H_CNT_MSB_M MAKEMASK(0xFF, 0) +#define GLSTAT_ACL_CNT_3_L(_i) (0x0038B000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */ +#define GLSTAT_ACL_CNT_3_L_MAX_INDEX 511 +#define GLSTAT_ACL_CNT_3_L_CNT_LSB_S 0 +#define GLSTAT_ACL_CNT_3_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_FD_CNT0H(_i) (0x003A0004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ +#define GLSTAT_FD_CNT0H_MAX_INDEX 4095 +#define GLSTAT_FD_CNT0H_FD0_CNT_H_S 0 +#define GLSTAT_FD_CNT0H_FD0_CNT_H_M MAKEMASK(0xFF, 0) +#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ +#define GLSTAT_FD_CNT0L_MAX_INDEX 4095 +#define GLSTAT_FD_CNT0L_FD0_CNT_L_S 0 +#define GLSTAT_FD_CNT0L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSTAT_FD_CNT1H(_i) (0x003A8004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ +#define GLSTAT_FD_CNT1H_MAX_INDEX 4095 +#define GLSTAT_FD_CNT1H_FD0_CNT_H_S 0 +#define GLSTAT_FD_CNT1H_FD0_CNT_H_M MAKEMASK(0xFF, 0) +#define GLSTAT_FD_CNT1L(_i) (0x003A8000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */ +#define GLSTAT_FD_CNT1L_MAX_INDEX 4095 +#define GLSTAT_FD_CNT1L_FD0_CNT_L_S 0 +#define GLSTAT_FD_CNT1L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_BPRCH(_i) (0x00346204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_BPRCH_MAX_INDEX 31 +#define GLSW_BPRCH_BPRCH_S 0 +#define GLSW_BPRCH_BPRCH_M MAKEMASK(0xFF, 0) +#define GLSW_BPRCL(_i) (0x00346200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_BPRCL_MAX_INDEX 31 +#define GLSW_BPRCL_BPRCL_S 0 +#define GLSW_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_BPTCH(_i) (0x00310204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_BPTCH_MAX_INDEX 31 +#define GLSW_BPTCH_BPTCH_S 0 +#define GLSW_BPTCH_BPTCH_M MAKEMASK(0xFF, 0) +#define GLSW_BPTCL(_i) (0x00310200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_BPTCL_MAX_INDEX 31 +#define GLSW_BPTCL_BPTCL_S 0 +#define GLSW_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_GORCH(_i) (0x00341004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_GORCH_MAX_INDEX 31 +#define GLSW_GORCH_GORCH_S 0 +#define GLSW_GORCH_GORCH_M MAKEMASK(0xFF, 0) +#define GLSW_GORCL(_i) (0x00341000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_GORCL_MAX_INDEX 31 +#define GLSW_GORCL_GORCL_S 0 +#define GLSW_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_GOTCH(_i) (0x00302004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_GOTCH_MAX_INDEX 31 +#define GLSW_GOTCH_GOTCH_S 0 +#define GLSW_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) +#define GLSW_GOTCL(_i) (0x00302000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_GOTCL_MAX_INDEX 31 +#define GLSW_GOTCL_GOTCL_S 0 +#define GLSW_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_MPRCH(_i) (0x00346104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_MPRCH_MAX_INDEX 31 +#define GLSW_MPRCH_MPRCH_S 0 +#define GLSW_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) +#define GLSW_MPRCL(_i) (0x00346100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_MPRCL_MAX_INDEX 31 +#define GLSW_MPRCL_MPRCL_S 0 +#define GLSW_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_MPTCH(_i) (0x00310104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_MPTCH_MAX_INDEX 31 +#define GLSW_MPTCH_MPTCH_S 0 +#define GLSW_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) +#define GLSW_MPTCL(_i) (0x00310100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_MPTCL_MAX_INDEX 31 +#define GLSW_MPTCL_MPTCL_S 0 +#define GLSW_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_UPRCH(_i) (0x00346004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_UPRCH_MAX_INDEX 31 +#define GLSW_UPRCH_UPRCH_S 0 +#define GLSW_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) +#define GLSW_UPRCL(_i) (0x00346000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_UPRCL_MAX_INDEX 31 +#define GLSW_UPRCL_UPRCL_S 0 +#define GLSW_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSW_UPTCH(_i) (0x00310004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_UPTCH_MAX_INDEX 31 +#define GLSW_UPTCH_UPTCH_S 0 +#define GLSW_UPTCH_UPTCH_M MAKEMASK(0xFF, 0) +#define GLSW_UPTCL(_i) (0x00310000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GLSW_UPTCL_MAX_INDEX 31 +#define GLSW_UPTCL_UPTCL_S 0 +#define GLSW_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSWID_RUPP(_i) (0x00345000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define GLSWID_RUPP_MAX_INDEX 255 +#define GLSWID_RUPP_RUPP_S 0 +#define GLSWID_RUPP_RUPP_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_BPRCH_MAX_INDEX 767 +#define GLV_BPRCH_BPRCH_S 0 +#define GLV_BPRCH_BPRCH_M MAKEMASK(0xFF, 0) +#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_BPRCL_MAX_INDEX 767 +#define GLV_BPRCL_BPRCL_S 0 +#define GLV_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_BPTCH_MAX_INDEX 767 +#define GLV_BPTCH_BPTCH_S 0 +#define GLV_BPTCH_BPTCH_M MAKEMASK(0xFF, 0) +#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_BPTCL_MAX_INDEX 767 +#define GLV_BPTCL_BPTCL_S 0 +#define GLV_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_GORCH_MAX_INDEX 767 +#define GLV_GORCH_GORCH_S 0 +#define GLV_GORCH_GORCH_M MAKEMASK(0xFF, 0) +#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_GORCL_MAX_INDEX 767 +#define GLV_GORCL_GORCL_S 0 +#define GLV_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_GOTCH_MAX_INDEX 767 +#define GLV_GOTCH_GOTCH_S 0 +#define GLV_GOTCH_GOTCH_M MAKEMASK(0xFF, 0) +#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_GOTCL_MAX_INDEX 767 +#define GLV_GOTCL_GOTCL_S 0 +#define GLV_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_MPRCH_MAX_INDEX 767 +#define GLV_MPRCH_MPRCH_S 0 +#define GLV_MPRCH_MPRCH_M MAKEMASK(0xFF, 0) +#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_MPRCL_MAX_INDEX 767 +#define GLV_MPRCL_MPRCL_S 0 +#define GLV_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_MPTCH_MAX_INDEX 767 +#define GLV_MPTCH_MPTCH_S 0 +#define GLV_MPTCH_MPTCH_M MAKEMASK(0xFF, 0) +#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_MPTCL_MAX_INDEX 767 +#define GLV_MPTCL_MPTCL_S 0 +#define GLV_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_RDPC_MAX_INDEX 767 +#define GLV_RDPC_RDPC_S 0 +#define GLV_RDPC_RDPC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_REPC(_i) (0x00295804 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_REPC_MAX_INDEX 767 +#define GLV_REPC_NO_DESC_CNT_S 0 +#define GLV_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0) +#define GLV_REPC_ERROR_CNT_S 16 +#define GLV_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16) +#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_TEPC_MAX_INDEX 767 +#define GLV_TEPC_TEPC_S 0 +#define GLV_TEPC_TEPC_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_UPRCH_MAX_INDEX 767 +#define GLV_UPRCH_UPRCH_S 0 +#define GLV_UPRCH_UPRCH_M MAKEMASK(0xFF, 0) +#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_UPRCL_MAX_INDEX 767 +#define GLV_UPRCL_UPRCL_S 0 +#define GLV_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_UPTCH_MAX_INDEX 767 +#define GLV_UPTCH_GLVUPTCH_S 0 +#define GLV_UPTCH_GLVUPTCH_M MAKEMASK(0xFF, 0) +#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */ +#define GLV_UPTCL_MAX_INDEX 767 +#define GLV_UPTCL_UPTCL_S 0 +#define GLV_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLVEBUP_RBCH(_i, _j) (0x00343004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_RBCH_MAX_INDEX 7 +#define GLVEBUP_RBCH_UPBCH_S 0 +#define GLVEBUP_RBCH_UPBCH_M MAKEMASK(0xFF, 0) +#define GLVEBUP_RBCL(_i, _j) (0x00343000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_RBCL_MAX_INDEX 7 +#define GLVEBUP_RBCL_UPBCL_S 0 +#define GLVEBUP_RBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLVEBUP_RPCH(_i, _j) (0x00344004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_RPCH_MAX_INDEX 7 +#define GLVEBUP_RPCH_UPPCH_S 0 +#define GLVEBUP_RPCH_UPPCH_M MAKEMASK(0xFF, 0) +#define GLVEBUP_RPCL(_i, _j) (0x00344000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_RPCL_MAX_INDEX 7 +#define GLVEBUP_RPCL_UPPCL_S 0 +#define GLVEBUP_RPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLVEBUP_TBCH(_i, _j) (0x00306004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_TBCH_MAX_INDEX 7 +#define GLVEBUP_TBCH_UPBCH_S 0 +#define GLVEBUP_TBCH_UPBCH_M MAKEMASK(0xFF, 0) +#define GLVEBUP_TBCL(_i, _j) (0x00306000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_TBCL_MAX_INDEX 7 +#define GLVEBUP_TBCL_UPBCL_S 0 +#define GLVEBUP_TBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLVEBUP_TPCH(_i, _j) (0x00308004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_TPCH_MAX_INDEX 7 +#define GLVEBUP_TPCH_UPPCH_S 0 +#define GLVEBUP_TPCH_UPPCH_M MAKEMASK(0xFF, 0) +#define GLVEBUP_TPCL(_i, _j) (0x00308000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */ +#define GLVEBUP_TPCL_MAX_INDEX 7 +#define GLVEBUP_TPCL_UPPCL_S 0 +#define GLVEBUP_TPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTRPB_LDPC 0x000AC280 /* Reset Source: CORER */ +#define PRTRPB_LDPC_CRCERRS_S 0 +#define PRTRPB_LDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTRPB_RDPC 0x000AC260 /* Reset Source: CORER */ +#define PRTRPB_RDPC_CRCERRS_S 0 +#define PRTRPB_RDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0) +#define PRTTPB_STAT_TC_BYTES_SENTL(_i) (0x00098200 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define PRTTPB_STAT_TC_BYTES_SENTL_MAX_INDEX 63 +#define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_S 0 +#define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define TPB_PRTTPB_STAT_PKT_SENT(_i) (0x00099470 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ +#define TPB_PRTTPB_STAT_PKT_SENT_MAX_INDEX 7 +#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0 +#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63 +#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0 +#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M MAKEMASK(0xFFFFFFFF, 0) +#define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */ +#define EMP_SWT_PRUNIND_OPCODE_S 0 +#define EMP_SWT_PRUNIND_OPCODE_M MAKEMASK(0xF, 0) +#define EMP_SWT_PRUNIND_LIST_INDEX_NUM_S 4 +#define EMP_SWT_PRUNIND_LIST_INDEX_NUM_M MAKEMASK(0x3FF, 4) +#define EMP_SWT_PRUNIND_VSI_NUM_S 16 +#define EMP_SWT_PRUNIND_VSI_NUM_M MAKEMASK(0x3FF, 16) +#define EMP_SWT_PRUNIND_BIT_VALUE_S 31 +#define EMP_SWT_PRUNIND_BIT_VALUE_M BIT(31) +#define EMP_SWT_REPIND 0x0020401C /* Reset Source: CORER */ +#define EMP_SWT_REPIND_OPCODE_S 0 +#define EMP_SWT_REPIND_OPCODE_M MAKEMASK(0xF, 0) +#define EMP_SWT_REPIND_LIST_INDEX_NUMBER_S 4 +#define EMP_SWT_REPIND_LIST_INDEX_NUMBER_M MAKEMASK(0x3FF, 4) +#define EMP_SWT_REPIND_VSI_NUM_S 16 +#define EMP_SWT_REPIND_VSI_NUM_M MAKEMASK(0x3FF, 16) +#define EMP_SWT_REPIND_BIT_VALUE_S 31 +#define EMP_SWT_REPIND_BIT_VALUE_M BIT(31) +#define GL_OVERRIDEC 0x002040A4 /* Reset Source: CORER */ +#define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_S 0 +#define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_M MAKEMASK(0xFFFF, 0) +#define GL_OVERRIDEC_LAST_VSI_S 16 +#define GL_OVERRIDEC_LAST_VSI_M MAKEMASK(0x3FF, 16) +#define GL_PLG_AVG_CALC_CFG 0x0020A5AC /* Reset Source: CORER */ +#define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_S 0 +#define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_M MAKEMASK(0x7FFFFFFF, 0) +#define GL_PLG_AVG_CALC_CFG_MODE_S 31 +#define GL_PLG_AVG_CALC_CFG_MODE_M BIT(31) +#define GL_PLG_AVG_CALC_ST 0x0020A5B0 /* Reset Source: CORER */ +#define GL_PLG_AVG_CALC_ST_IN_DATA_S 0 +#define GL_PLG_AVG_CALC_ST_IN_DATA_M MAKEMASK(0x7FFF, 0) +#define GL_PLG_AVG_CALC_ST_OUT_DATA_S 16 +#define GL_PLG_AVG_CALC_ST_OUT_DATA_M MAKEMASK(0x7FFF, 16) +#define GL_PLG_AVG_CALC_ST_VALID_S 31 +#define GL_PLG_AVG_CALC_ST_VALID_M BIT(31) +#define GL_PRE_CFG_CMD 0x00214090 /* Reset Source: CORER */ +#define GL_PRE_CFG_CMD_ADDR_S 0 +#define GL_PRE_CFG_CMD_ADDR_M MAKEMASK(0x1FFF, 0) +#define GL_PRE_CFG_CMD_TBLIDX_S 16 +#define GL_PRE_CFG_CMD_TBLIDX_M MAKEMASK(0x7, 16) +#define GL_PRE_CFG_CMD_CMD_S 29 +#define GL_PRE_CFG_CMD_CMD_M BIT(29) +#define GL_PRE_CFG_CMD_DONE_S 31 +#define GL_PRE_CFG_CMD_DONE_M BIT(31) +#define GL_PRE_CFG_DATA(_i) (0x00214074 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */ +#define GL_PRE_CFG_DATA_MAX_INDEX 6 +#define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_S 0 +#define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_SWT_FUNCFILT 0x001D2698 /* Reset Source: CORER */ +#define GL_SWT_FUNCFILT_FUNCFILT_S 0 +#define GL_SWT_FUNCFILT_FUNCFILT_M BIT(0) +#define GL_SWT_FW_STS(_i) (0x00216000 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */ +#define GL_SWT_FW_STS_MAX_INDEX 5 +#define GL_SWT_FW_STS_GL_SWT_FW_STS_S 0 +#define GL_SWT_FW_STS_GL_SWT_FW_STS_M MAKEMASK(0xFFFFFFFF, 0) +#define GL_SWT_LAT_DOUBLE 0x00204004 /* Reset Source: CORER */ +#define GL_SWT_LAT_DOUBLE_BASE_S 0 +#define GL_SWT_LAT_DOUBLE_BASE_M MAKEMASK(0x7FF, 0) +#define GL_SWT_LAT_DOUBLE_SIZE_S 16 +#define GL_SWT_LAT_DOUBLE_SIZE_M MAKEMASK(0x7FF, 16) +#define GL_SWT_LAT_QUAD 0x00204008 /* Reset Source: CORER */ +#define GL_SWT_LAT_QUAD_BASE_S 0 +#define GL_SWT_LAT_QUAD_BASE_M MAKEMASK(0x7FF, 0) +#define GL_SWT_LAT_QUAD_SIZE_S 16 +#define GL_SWT_LAT_QUAD_SIZE_M MAKEMASK(0x7FF, 16) +#define GL_SWT_LAT_SINGLE 0x00204000 /* Reset Source: CORER */ +#define GL_SWT_LAT_SINGLE_BASE_S 0 +#define GL_SWT_LAT_SINGLE_BASE_M MAKEMASK(0x7FF, 0) +#define GL_SWT_LAT_SINGLE_SIZE_S 16 +#define GL_SWT_LAT_SINGLE_SIZE_M MAKEMASK(0x7FF, 16) +#define GL_SWT_MD_PRI 0x002040AC /* Reset Source: CORER */ +#define GL_SWT_MD_PRI_VSI_PRI_S 0 +#define GL_SWT_MD_PRI_VSI_PRI_M MAKEMASK(0x7, 0) +#define GL_SWT_MD_PRI_LB_PRI_S 4 +#define GL_SWT_MD_PRI_LB_PRI_M MAKEMASK(0x7, 4) +#define GL_SWT_MD_PRI_LAN_EN_PRI_S 8 +#define GL_SWT_MD_PRI_LAN_EN_PRI_M MAKEMASK(0x7, 8) +#define GL_SWT_MD_PRI_QH_PRI_S 12 +#define GL_SWT_MD_PRI_QH_PRI_M MAKEMASK(0x7, 12) +#define GL_SWT_MD_PRI_QL_PRI_S 16 +#define GL_SWT_MD_PRI_QL_PRI_M MAKEMASK(0x7, 16) +#define GL_SWT_MIRTARVSI(_i) (0x00204500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define GL_SWT_MIRTARVSI_MAX_INDEX 63 +#define GL_SWT_MIRTARVSI_VFVMNUMBER_S 0 +#define GL_SWT_MIRTARVSI_VFVMNUMBER_M MAKEMASK(0x3FF, 0) +#define GL_SWT_MIRTARVSI_FUNCTIONTYPE_S 10 +#define GL_SWT_MIRTARVSI_FUNCTIONTYPE_M MAKEMASK(0x3, 10) +#define GL_SWT_MIRTARVSI_PFNUMBER_S 12 +#define GL_SWT_MIRTARVSI_PFNUMBER_M MAKEMASK(0x7, 12) +#define GL_SWT_MIRTARVSI_TARGETVSI_S 20 +#define GL_SWT_MIRTARVSI_TARGETVSI_M MAKEMASK(0x3FF, 20) +#define GL_SWT_MIRTARVSI_RULEENABLE_S 31 +#define GL_SWT_MIRTARVSI_RULEENABLE_M BIT(31) +#define GL_SWT_SWIDFVIDX 0x00214114 /* Reset Source: CORER */ +#define GL_SWT_SWIDFVIDX_SWIDFVIDX_S 0 +#define GL_SWT_SWIDFVIDX_SWIDFVIDX_M MAKEMASK(0x3F, 0) +#define GL_SWT_SWIDFVIDX_PORT_TYPE_S 31 +#define GL_SWT_SWIDFVIDX_PORT_TYPE_M BIT(31) +#define GL_VP_SWITCHID(_i) (0x00214094 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define GL_VP_SWITCHID_MAX_INDEX 31 +#define GL_VP_SWITCHID_SWITCHID_S 0 +#define GL_VP_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0) +#define GLSWID_STAT_BLOCK(_i) (0x0020A1A4 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define GLSWID_STAT_BLOCK_MAX_INDEX 255 +#define GLSWID_STAT_BLOCK_VEBID_S 0 +#define GLSWID_STAT_BLOCK_VEBID_M MAKEMASK(0x1F, 0) +#define GLSWID_STAT_BLOCK_VEBID_VALID_S 31 +#define GLSWID_STAT_BLOCK_VEBID_VALID_M BIT(31) +#define GLSWT_ACT_RESP_0 0x0020A5A4 /* Reset Source: CORER */ +#define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_S 0 +#define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSWT_ACT_RESP_1 0x0020A5A8 /* Reset Source: CORER */ +#define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_S 0 +#define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0) +#define GLSWT_ARB_MODE 0x0020A674 /* Reset Source: CORER */ +#define GLSWT_ARB_MODE_FLU_PRI_SHM_S 0 +#define GLSWT_ARB_MODE_FLU_PRI_SHM_M BIT(0) +#define GLSWT_ARB_MODE_TX_RX_FWD_PRI_S 1 +#define GLSWT_ARB_MODE_TX_RX_FWD_PRI_M BIT(1) +#define PRT_SBPVSI 0x00204120 /* Reset Source: CORER */ +#define PRT_SBPVSI_BAD_FRAMES_VSI_S 0 +#define PRT_SBPVSI_BAD_FRAMES_VSI_M MAKEMASK(0x3FF, 0) +#define PRT_SBPVSI_SBP_S 31 +#define PRT_SBPVSI_SBP_M BIT(31) +#define PRT_SCSTS 0x00204140 /* Reset Source: CORER */ +#define PRT_SCSTS_BSCA_S 0 +#define PRT_SCSTS_BSCA_M BIT(0) +#define PRT_SCSTS_BSCAP_S 1 +#define PRT_SCSTS_BSCAP_M BIT(1) +#define PRT_SCSTS_MSCA_S 2 +#define PRT_SCSTS_MSCA_M BIT(2) +#define PRT_SCSTS_MSCAP_S 3 +#define PRT_SCSTS_MSCAP_M BIT(3) +#define PRT_SWT_BSCCNT 0x00204160 /* Reset Source: CORER */ +#define PRT_SWT_BSCCNT_CCOUNT_S 0 +#define PRT_SWT_BSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0) +#define PRT_SWT_BSCTRH 0x00204180 /* Reset Source: CORER */ +#define PRT_SWT_BSCTRH_UTRESH_S 0 +#define PRT_SWT_BSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0) +#define PRT_SWT_MIREG 0x002042A0 /* Reset Source: CORER */ +#define PRT_SWT_MIREG_MIRRULE_S 0 +#define PRT_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0) +#define PRT_SWT_MIREG_MIRENA_S 7 +#define PRT_SWT_MIREG_MIRENA_M BIT(7) +#define PRT_SWT_MIRIG 0x00204280 /* Reset Source: CORER */ +#define PRT_SWT_MIRIG_MIRRULE_S 0 +#define PRT_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0) +#define PRT_SWT_MIRIG_MIRENA_S 7 +#define PRT_SWT_MIRIG_MIRENA_M BIT(7) +#define PRT_SWT_MSCCNT 0x00204100 /* Reset Source: CORER */ +#define PRT_SWT_MSCCNT_CCOUNT_S 0 +#define PRT_SWT_MSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0) +#define PRT_SWT_MSCTRH 0x002041C0 /* Reset Source: CORER */ +#define PRT_SWT_MSCTRH_UTRESH_S 0 +#define PRT_SWT_MSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0) +#define PRT_SWT_SCBI 0x002041E0 /* Reset Source: CORER */ +#define PRT_SWT_SCBI_BI_S 0 +#define PRT_SWT_SCBI_BI_M MAKEMASK(0x1FFFFFF, 0) +#define PRT_SWT_SCCRL 0x00204200 /* Reset Source: CORER */ +#define PRT_SWT_SCCRL_MDIPW_S 0 +#define PRT_SWT_SCCRL_MDIPW_M BIT(0) +#define PRT_SWT_SCCRL_MDICW_S 1 +#define PRT_SWT_SCCRL_MDICW_M BIT(1) +#define PRT_SWT_SCCRL_BDIPW_S 2 +#define PRT_SWT_SCCRL_BDIPW_M BIT(2) +#define PRT_SWT_SCCRL_BDICW_S 3 +#define PRT_SWT_SCCRL_BDICW_M BIT(3) +#define PRT_SWT_SCCRL_INTERVAL_S 8 +#define PRT_SWT_SCCRL_INTERVAL_M MAKEMASK(0xFFFFF, 8) +#define PRT_TCTUPR(_i) (0x00040840 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ +#define PRT_TCTUPR_MAX_INDEX 31 +#define PRT_TCTUPR_UP0_S 0 +#define PRT_TCTUPR_UP0_M MAKEMASK(0x7, 0) +#define PRT_TCTUPR_UP1_S 4 +#define PRT_TCTUPR_UP1_M MAKEMASK(0x7, 4) +#define PRT_TCTUPR_UP2_S 8 +#define PRT_TCTUPR_UP2_M MAKEMASK(0x7, 8) +#define PRT_TCTUPR_UP3_S 12 +#define PRT_TCTUPR_UP3_M MAKEMASK(0x7, 12) +#define PRT_TCTUPR_UP4_S 16 +#define PRT_TCTUPR_UP4_M MAKEMASK(0x7, 16) +#define PRT_TCTUPR_UP5_S 20 +#define PRT_TCTUPR_UP5_M MAKEMASK(0x7, 20) +#define PRT_TCTUPR_UP6_S 24 +#define PRT_TCTUPR_UP6_M MAKEMASK(0x7, 24) +#define PRT_TCTUPR_UP7_S 28 +#define PRT_TCTUPR_UP7_M MAKEMASK(0x7, 28) +#define GLHH_ART_CTL 0x000A41D4 /* Reset Source: POR */ +#define GLHH_ART_CTL_ACTIVE_S 0 +#define GLHH_ART_CTL_ACTIVE_M BIT(0) +#define GLHH_ART_CTL_TIME_OUT1_S 1 +#define GLHH_ART_CTL_TIME_OUT1_M BIT(1) +#define GLHH_ART_CTL_TIME_OUT2_S 2 +#define GLHH_ART_CTL_TIME_OUT2_M BIT(2) +#define GLHH_ART_CTL_RESET_HH_S 31 +#define GLHH_ART_CTL_RESET_HH_M BIT(31) +#define GLHH_ART_DATA 0x000A41E0 /* Reset Source: POR */ +#define GLHH_ART_DATA_AGENT_TYPE_S 0 +#define GLHH_ART_DATA_AGENT_TYPE_M MAKEMASK(0x7, 0) +#define GLHH_ART_DATA_SYNC_TYPE_S 3 +#define GLHH_ART_DATA_SYNC_TYPE_M BIT(3) +#define GLHH_ART_DATA_MAX_DELAY_S 4 +#define GLHH_ART_DATA_MAX_DELAY_M MAKEMASK(0xF, 4) +#define GLHH_ART_DATA_TIME_BASE_S 8 +#define GLHH_ART_DATA_TIME_BASE_M MAKEMASK(0xF, 8) +#define GLHH_ART_DATA_RSV_DATA_S 12 +#define GLHH_ART_DATA_RSV_DATA_M MAKEMASK(0xFFFFF, 12) +#define GLHH_ART_TIME_H 0x000A41D8 /* Reset Source: POR */ +#define GLHH_ART_TIME_H_ART_TIME_H_S 0 +#define GLHH_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLHH_ART_TIME_L 0x000A41DC /* Reset Source: POR */ +#define GLHH_ART_TIME_L_ART_TIME_L_S 0 +#define GLHH_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_IN_0_MAX_INDEX 1 +#define GLTSYN_AUX_IN_0_EVNTLVL_S 0 +#define GLTSYN_AUX_IN_0_EVNTLVL_M MAKEMASK(0x3, 0) +#define GLTSYN_AUX_IN_0_INT_ENA_S 4 +#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4) +#define GLTSYN_AUX_IN_1(_i) (0x000889E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_IN_1_MAX_INDEX 1 +#define GLTSYN_AUX_IN_1_EVNTLVL_S 0 +#define GLTSYN_AUX_IN_1_EVNTLVL_M MAKEMASK(0x3, 0) +#define GLTSYN_AUX_IN_1_INT_ENA_S 4 +#define GLTSYN_AUX_IN_1_INT_ENA_M BIT(4) +#define GLTSYN_AUX_IN_2(_i) (0x000889E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_IN_2_MAX_INDEX 1 +#define GLTSYN_AUX_IN_2_EVNTLVL_S 0 +#define GLTSYN_AUX_IN_2_EVNTLVL_M MAKEMASK(0x3, 0) +#define GLTSYN_AUX_IN_2_INT_ENA_S 4 +#define GLTSYN_AUX_IN_2_INT_ENA_M BIT(4) +#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_OUT_0_MAX_INDEX 1 +#define GLTSYN_AUX_OUT_0_OUT_ENA_S 0 +#define GLTSYN_AUX_OUT_0_OUT_ENA_M BIT(0) +#define GLTSYN_AUX_OUT_0_OUTMOD_S 1 +#define GLTSYN_AUX_OUT_0_OUTMOD_M MAKEMASK(0x3, 1) +#define GLTSYN_AUX_OUT_0_OUTLVL_S 3 +#define GLTSYN_AUX_OUT_0_OUTLVL_M BIT(3) +#define GLTSYN_AUX_OUT_0_INT_ENA_S 4 +#define GLTSYN_AUX_OUT_0_INT_ENA_M BIT(4) +#define GLTSYN_AUX_OUT_0_PULSEW_S 8 +#define GLTSYN_AUX_OUT_0_PULSEW_M MAKEMASK(0xF, 8) +#define GLTSYN_AUX_OUT_1(_i) (0x000889A0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_OUT_1_MAX_INDEX 1 +#define GLTSYN_AUX_OUT_1_OUT_ENA_S 0 +#define GLTSYN_AUX_OUT_1_OUT_ENA_M BIT(0) +#define GLTSYN_AUX_OUT_1_OUTMOD_S 1 +#define GLTSYN_AUX_OUT_1_OUTMOD_M MAKEMASK(0x3, 1) +#define GLTSYN_AUX_OUT_1_OUTLVL_S 3 +#define GLTSYN_AUX_OUT_1_OUTLVL_M BIT(3) +#define GLTSYN_AUX_OUT_1_INT_ENA_S 4 +#define GLTSYN_AUX_OUT_1_INT_ENA_M BIT(4) +#define GLTSYN_AUX_OUT_1_PULSEW_S 8 +#define GLTSYN_AUX_OUT_1_PULSEW_M MAKEMASK(0xF, 8) +#define GLTSYN_AUX_OUT_2(_i) (0x000889A8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_OUT_2_MAX_INDEX 1 +#define GLTSYN_AUX_OUT_2_OUT_ENA_S 0 +#define GLTSYN_AUX_OUT_2_OUT_ENA_M BIT(0) +#define GLTSYN_AUX_OUT_2_OUTMOD_S 1 +#define GLTSYN_AUX_OUT_2_OUTMOD_M MAKEMASK(0x3, 1) +#define GLTSYN_AUX_OUT_2_OUTLVL_S 3 +#define GLTSYN_AUX_OUT_2_OUTLVL_M BIT(3) +#define GLTSYN_AUX_OUT_2_INT_ENA_S 4 +#define GLTSYN_AUX_OUT_2_INT_ENA_M BIT(4) +#define GLTSYN_AUX_OUT_2_PULSEW_S 8 +#define GLTSYN_AUX_OUT_2_PULSEW_M MAKEMASK(0xF, 8) +#define GLTSYN_AUX_OUT_3(_i) (0x000889B0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_AUX_OUT_3_MAX_INDEX 1 +#define GLTSYN_AUX_OUT_3_OUT_ENA_S 0 +#define GLTSYN_AUX_OUT_3_OUT_ENA_M BIT(0) +#define GLTSYN_AUX_OUT_3_OUTMOD_S 1 +#define GLTSYN_AUX_OUT_3_OUTMOD_M MAKEMASK(0x3, 1) +#define GLTSYN_AUX_OUT_3_OUTLVL_S 3 +#define GLTSYN_AUX_OUT_3_OUTLVL_M BIT(3) +#define GLTSYN_AUX_OUT_3_INT_ENA_S 4 +#define GLTSYN_AUX_OUT_3_INT_ENA_M BIT(4) +#define GLTSYN_AUX_OUT_3_PULSEW_S 8 +#define GLTSYN_AUX_OUT_3_PULSEW_M MAKEMASK(0xF, 8) +#define GLTSYN_CLKO_0(_i) (0x000889B8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_CLKO_0_MAX_INDEX 1 +#define GLTSYN_CLKO_0_TSYNCLKO_S 0 +#define GLTSYN_CLKO_0_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_CLKO_1(_i) (0x000889C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_CLKO_1_MAX_INDEX 1 +#define GLTSYN_CLKO_1_TSYNCLKO_S 0 +#define GLTSYN_CLKO_1_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_CLKO_2(_i) (0x000889C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_CLKO_2_MAX_INDEX 1 +#define GLTSYN_CLKO_2_TSYNCLKO_S 0 +#define GLTSYN_CLKO_2_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_CLKO_3(_i) (0x000889D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_CLKO_3_MAX_INDEX 1 +#define GLTSYN_CLKO_3_TSYNCLKO_S 0 +#define GLTSYN_CLKO_3_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_CMD 0x00088810 /* Reset Source: CORER */ +#define GLTSYN_CMD_CMD_S 0 +#define GLTSYN_CMD_CMD_M MAKEMASK(0xFF, 0) +#define GLTSYN_CMD_SEL_MASTER_S 8 +#define GLTSYN_CMD_SEL_MASTER_M BIT(8) +#define GLTSYN_CMD_SYNC 0x00088814 /* Reset Source: CORER */ +#define GLTSYN_CMD_SYNC_SYNC_S 0 +#define GLTSYN_CMD_SYNC_SYNC_M MAKEMASK(0x3, 0) +#define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_ENA_MAX_INDEX 1 +#define GLTSYN_ENA_TSYN_ENA_S 0 +#define GLTSYN_ENA_TSYN_ENA_M BIT(0) +#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_H_0_MAX_INDEX 1 +#define GLTSYN_EVNT_H_0_TSYNEVNT_H_S 0 +#define GLTSYN_EVNT_H_0_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_EVNT_H_1(_i) (0x00088980 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_H_1_MAX_INDEX 1 +#define GLTSYN_EVNT_H_1_TSYNEVNT_H_S 0 +#define GLTSYN_EVNT_H_1_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_EVNT_H_2(_i) (0x00088990 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_H_2_MAX_INDEX 1 +#define GLTSYN_EVNT_H_2_TSYNEVNT_H_S 0 +#define GLTSYN_EVNT_H_2_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_L_0_MAX_INDEX 1 +#define GLTSYN_EVNT_L_0_TSYNEVNT_L_S 0 +#define GLTSYN_EVNT_L_0_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_EVNT_L_1(_i) (0x00088978 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_L_1_MAX_INDEX 1 +#define GLTSYN_EVNT_L_1_TSYNEVNT_L_S 0 +#define GLTSYN_EVNT_L_1_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_EVNT_L_2(_i) (0x00088988 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_EVNT_L_2_MAX_INDEX 1 +#define GLTSYN_EVNT_L_2_TSYNEVNT_L_S 0 +#define GLTSYN_EVNT_L_2_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_HHTIME_H_MAX_INDEX 1 +#define GLTSYN_HHTIME_H_TSYNEVNT_H_S 0 +#define GLTSYN_HHTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_HHTIME_L_MAX_INDEX 1 +#define GLTSYN_HHTIME_L_TSYNEVNT_L_S 0 +#define GLTSYN_HHTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_INCVAL_H_MAX_INDEX 1 +#define GLTSYN_INCVAL_H_INCVAL_H_S 0 +#define GLTSYN_INCVAL_H_INCVAL_H_M MAKEMASK(0xFF, 0) +#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_INCVAL_L_MAX_INDEX 1 +#define GLTSYN_INCVAL_L_INCVAL_L_S 0 +#define GLTSYN_INCVAL_L_INCVAL_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_SHADJ_H_MAX_INDEX 1 +#define GLTSYN_SHADJ_H_ADJUST_H_S 0 +#define GLTSYN_SHADJ_H_ADJUST_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_SHADJ_L_MAX_INDEX 1 +#define GLTSYN_SHADJ_L_ADJUST_L_S 0 +#define GLTSYN_SHADJ_L_ADJUST_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_SHTIME_0_MAX_INDEX 1 +#define GLTSYN_SHTIME_0_TSYNTIME_0_S 0 +#define GLTSYN_SHTIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_SHTIME_H_MAX_INDEX 1 +#define GLTSYN_SHTIME_H_TSYNTIME_H_S 0 +#define GLTSYN_SHTIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_SHTIME_L_MAX_INDEX 1 +#define GLTSYN_SHTIME_L_TSYNTIME_L_S 0 +#define GLTSYN_SHTIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_STAT_MAX_INDEX 1 +#define GLTSYN_STAT_EVENT0_S 0 +#define GLTSYN_STAT_EVENT0_M BIT(0) +#define GLTSYN_STAT_EVENT1_S 1 +#define GLTSYN_STAT_EVENT1_M BIT(1) +#define GLTSYN_STAT_EVENT2_S 2 +#define GLTSYN_STAT_EVENT2_M BIT(2) +#define GLTSYN_STAT_TGT0_S 4 +#define GLTSYN_STAT_TGT0_M BIT(4) +#define GLTSYN_STAT_TGT1_S 5 +#define GLTSYN_STAT_TGT1_M BIT(5) +#define GLTSYN_STAT_TGT2_S 6 +#define GLTSYN_STAT_TGT2_M BIT(6) +#define GLTSYN_STAT_TGT3_S 7 +#define GLTSYN_STAT_TGT3_M BIT(7) +#define GLTSYN_SYNC_DLAY 0x00088818 /* Reset Source: CORER */ +#define GLTSYN_SYNC_DLAY_SYNC_DELAY_S 0 +#define GLTSYN_SYNC_DLAY_SYNC_DELAY_M MAKEMASK(0x1F, 0) +#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_H_0_MAX_INDEX 1 +#define GLTSYN_TGT_H_0_TSYNTGTT_H_S 0 +#define GLTSYN_TGT_H_0_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_H_1(_i) (0x00088940 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_H_1_MAX_INDEX 1 +#define GLTSYN_TGT_H_1_TSYNTGTT_H_S 0 +#define GLTSYN_TGT_H_1_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_H_2(_i) (0x00088950 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_H_2_MAX_INDEX 1 +#define GLTSYN_TGT_H_2_TSYNTGTT_H_S 0 +#define GLTSYN_TGT_H_2_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_H_3(_i) (0x00088960 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_H_3_MAX_INDEX 1 +#define GLTSYN_TGT_H_3_TSYNTGTT_H_S 0 +#define GLTSYN_TGT_H_3_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_L_0_MAX_INDEX 1 +#define GLTSYN_TGT_L_0_TSYNTGTT_L_S 0 +#define GLTSYN_TGT_L_0_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_L_1(_i) (0x00088938 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_L_1_MAX_INDEX 1 +#define GLTSYN_TGT_L_1_TSYNTGTT_L_S 0 +#define GLTSYN_TGT_L_1_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_L_2(_i) (0x00088948 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_L_2_MAX_INDEX 1 +#define GLTSYN_TGT_L_2_TSYNTGTT_L_S 0 +#define GLTSYN_TGT_L_2_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TGT_L_3(_i) (0x00088958 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TGT_L_3_MAX_INDEX 1 +#define GLTSYN_TGT_L_3_TSYNTGTT_L_S 0 +#define GLTSYN_TGT_L_3_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TIME_0_MAX_INDEX 1 +#define GLTSYN_TIME_0_TSYNTIME_0_S 0 +#define GLTSYN_TIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TIME_H_MAX_INDEX 1 +#define GLTSYN_TIME_H_TSYNTIME_H_S 0 +#define GLTSYN_TIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0) +#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ +#define GLTSYN_TIME_L_MAX_INDEX 1 +#define GLTSYN_TIME_L_TSYNTIME_L_S 0 +#define GLTSYN_TIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0) +#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */ +#define PFHH_SEM_BUSY_S 0 +#define PFHH_SEM_BUSY_M BIT(0) +#define PFHH_SEM_PF_OWNER_S 4 +#define PFHH_SEM_PF_OWNER_M MAKEMASK(0x7, 4) +#define PFTSYN_SEM 0x00088880 /* Reset Source: PFR */ +#define PFTSYN_SEM_BUSY_S 0 +#define PFTSYN_SEM_BUSY_M BIT(0) +#define PFTSYN_SEM_PF_OWNER_S 4 +#define PFTSYN_SEM_PF_OWNER_M MAKEMASK(0x7, 4) +#define GLPE_TSCD_FLR(_i) (0x0051E24C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define GLPE_TSCD_FLR_MAX_INDEX 3 +#define GLPE_TSCD_FLR_DRAIN_VCTR_ID_S 0 +#define GLPE_TSCD_FLR_DRAIN_VCTR_ID_M MAKEMASK(0x3, 0) +#define GLPE_TSCD_FLR_PORT_S 2 +#define GLPE_TSCD_FLR_PORT_M MAKEMASK(0x7, 2) +#define GLPE_TSCD_FLR_PF_NUM_S 5 +#define GLPE_TSCD_FLR_PF_NUM_M MAKEMASK(0x7, 5) +#define GLPE_TSCD_FLR_VM_VF_TYPE_S 8 +#define GLPE_TSCD_FLR_VM_VF_TYPE_M MAKEMASK(0x3, 8) +#define GLPE_TSCD_FLR_VM_VF_NUM_S 16 +#define GLPE_TSCD_FLR_VM_VF_NUM_M MAKEMASK(0x3FF, 16) +#define GLPE_TSCD_FLR_VLD_S 31 +#define GLPE_TSCD_FLR_VLD_M BIT(31) +#define GLPE_TSCD_PEPM 0x0051E228 /* Reset Source: CORER */ +#define GLPE_TSCD_PEPM_MDQ_CREDITS_S 0 +#define GLPE_TSCD_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0) +#define PF_VIRT_VSTATUS 0x0009E680 /* Reset Source: PFR */ +#define PF_VIRT_VSTATUS_NUM_VFS_S 0 +#define PF_VIRT_VSTATUS_NUM_VFS_M MAKEMASK(0xFF, 0) +#define PF_VIRT_VSTATUS_TOTAL_VFS_S 8 +#define PF_VIRT_VSTATUS_TOTAL_VFS_M MAKEMASK(0xFF, 8) +#define PF_VIRT_VSTATUS_IOV_ACTIVE_S 16 +#define PF_VIRT_VSTATUS_IOV_ACTIVE_M BIT(16) +#define PF_VT_PFALLOC 0x001D2480 /* Reset Source: CORER */ +#define PF_VT_PFALLOC_FIRSTVF_S 0 +#define PF_VT_PFALLOC_FIRSTVF_M MAKEMASK(0xFF, 0) +#define PF_VT_PFALLOC_LASTVF_S 8 +#define PF_VT_PFALLOC_LASTVF_M MAKEMASK(0xFF, 8) +#define PF_VT_PFALLOC_VALID_S 31 +#define PF_VT_PFALLOC_VALID_M BIT(31) +#define PF_VT_PFALLOC_HIF 0x0009DD80 /* Reset Source: PCIR */ +#define PF_VT_PFALLOC_HIF_FIRSTVF_S 0 +#define PF_VT_PFALLOC_HIF_FIRSTVF_M MAKEMASK(0xFF, 0) +#define PF_VT_PFALLOC_HIF_LASTVF_S 8 +#define PF_VT_PFALLOC_HIF_LASTVF_M MAKEMASK(0xFF, 8) +#define PF_VT_PFALLOC_HIF_VALID_S 31 +#define PF_VT_PFALLOC_HIF_VALID_M BIT(31) +#define PF_VT_PFALLOC_PCIE 0x000BE080 /* Reset Source: PCIR */ +#define PF_VT_PFALLOC_PCIE_FIRSTVF_S 0 +#define PF_VT_PFALLOC_PCIE_FIRSTVF_M MAKEMASK(0xFF, 0) +#define PF_VT_PFALLOC_PCIE_LASTVF_S 8 +#define PF_VT_PFALLOC_PCIE_LASTVF_M MAKEMASK(0xFF, 8) +#define PF_VT_PFALLOC_PCIE_VALID_S 31 +#define PF_VT_PFALLOC_PCIE_VALID_M BIT(31) +#define VSI_L2TAGSTXVALID(_VSI) (0x00046000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_L2TAGSTXVALID_MAX_INDEX 767 +#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_S 0 +#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_M MAKEMASK(0x7, 0) +#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_S 3 +#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_M BIT(3) +#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_S 4 +#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_M MAKEMASK(0x7, 4) +#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_S 7 +#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_M BIT(7) +#define VSI_L2TAGSTXVALID_TIR0INSERTID_S 16 +#define VSI_L2TAGSTXVALID_TIR0INSERTID_M MAKEMASK(0x7, 16) +#define VSI_L2TAGSTXVALID_TIR0_INSERT_S 19 +#define VSI_L2TAGSTXVALID_TIR0_INSERT_M BIT(19) +#define VSI_L2TAGSTXVALID_TIR1INSERTID_S 20 +#define VSI_L2TAGSTXVALID_TIR1INSERTID_M MAKEMASK(0x7, 20) +#define VSI_L2TAGSTXVALID_TIR1_INSERT_S 23 +#define VSI_L2TAGSTXVALID_TIR1_INSERT_M BIT(23) +#define VSI_L2TAGSTXVALID_TIR2INSERTID_S 24 +#define VSI_L2TAGSTXVALID_TIR2INSERTID_M MAKEMASK(0x7, 24) +#define VSI_L2TAGSTXVALID_TIR2_INSERT_S 27 +#define VSI_L2TAGSTXVALID_TIR2_INSERT_M BIT(27) +#define VSI_PASID(_VSI) (0x0009C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSI_PASID_MAX_INDEX 767 +#define VSI_PASID_PASID_S 0 +#define VSI_PASID_PASID_M MAKEMASK(0xFFFFF, 0) +#define VSI_PASID_EN_S 31 +#define VSI_PASID_EN_M BIT(31) +#define VSI_RUPR(_VSI) (0x00050000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_RUPR_MAX_INDEX 767 +#define VSI_RUPR_UP0_S 0 +#define VSI_RUPR_UP0_M MAKEMASK(0x7, 0) +#define VSI_RUPR_UP1_S 3 +#define VSI_RUPR_UP1_M MAKEMASK(0x7, 3) +#define VSI_RUPR_UP2_S 6 +#define VSI_RUPR_UP2_M MAKEMASK(0x7, 6) +#define VSI_RUPR_UP3_S 9 +#define VSI_RUPR_UP3_M MAKEMASK(0x7, 9) +#define VSI_RUPR_UP4_S 12 +#define VSI_RUPR_UP4_M MAKEMASK(0x7, 12) +#define VSI_RUPR_UP5_S 15 +#define VSI_RUPR_UP5_M MAKEMASK(0x7, 15) +#define VSI_RUPR_UP6_S 18 +#define VSI_RUPR_UP6_M MAKEMASK(0x7, 18) +#define VSI_RUPR_UP7_S 21 +#define VSI_RUPR_UP7_M MAKEMASK(0x7, 21) +#define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_RXSWCTRL_MAX_INDEX 767 +#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_S 8 +#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8) +#define VSI_RXSWCTRL_PRUNEENABLE_S 9 +#define VSI_RXSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 9) +#define VSI_RXSWCTRL_SRCPRUNEENABLE_S 13 +#define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13) +#define VSI_SRCSWCTRL(_VSI) (0x00209000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_SRCSWCTRL_MAX_INDEX 767 +#define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_S 0 +#define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_M BIT(0) +#define VSI_SRCSWCTRL_ALLOWLOOPBACK_S 1 +#define VSI_SRCSWCTRL_ALLOWLOOPBACK_M BIT(1) +#define VSI_SRCSWCTRL_LANENABLE_S 2 +#define VSI_SRCSWCTRL_LANENABLE_M BIT(2) +#define VSI_SRCSWCTRL_MACAS_S 3 +#define VSI_SRCSWCTRL_MACAS_M BIT(3) +#define VSI_SRCSWCTRL_PRUNEENABLE_S 4 +#define VSI_SRCSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 4) +#define VSI_SWITCHID(_VSI) (0x00215000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_SWITCHID_MAX_INDEX 767 +#define VSI_SWITCHID_SWITCHID_S 0 +#define VSI_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0) +#define VSI_SWT_MIREG(_VSI) (0x00207000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_SWT_MIREG_MAX_INDEX 767 +#define VSI_SWT_MIREG_MIRRULE_S 0 +#define VSI_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0) +#define VSI_SWT_MIREG_MIRENA_S 7 +#define VSI_SWT_MIREG_MIRENA_M BIT(7) +#define VSI_SWT_MIRIG(_VSI) (0x00208000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_SWT_MIRIG_MAX_INDEX 767 +#define VSI_SWT_MIRIG_MIRRULE_S 0 +#define VSI_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0) +#define VSI_SWT_MIRIG_MIRENA_S 7 +#define VSI_SWT_MIRIG_MIRENA_M BIT(7) +#define VSI_TAIR(_VSI) (0x00044000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSI_TAIR_MAX_INDEX 767 +#define VSI_TAIR_PORT_TAG_ID_S 0 +#define VSI_TAIR_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) +#define VSI_TAR(_VSI) (0x00045000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TAR_MAX_INDEX 767 +#define VSI_TAR_ACCEPTTAGGED_S 0 +#define VSI_TAR_ACCEPTTAGGED_M MAKEMASK(0x3FF, 0) +#define VSI_TAR_ACCEPTUNTAGGED_S 16 +#define VSI_TAR_ACCEPTUNTAGGED_M MAKEMASK(0x3FF, 16) +#define VSI_TIR_0(_VSI) (0x00041000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TIR_0_MAX_INDEX 767 +#define VSI_TIR_0_PORT_TAG_ID_S 0 +#define VSI_TIR_0_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) +#define VSI_TIR_1(_VSI) (0x00042000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TIR_1_MAX_INDEX 767 +#define VSI_TIR_1_PORT_TAG_ID_S 0 +#define VSI_TIR_1_PORT_TAG_ID_M MAKEMASK(0xFFFFFFFF, 0) +#define VSI_TIR_2(_VSI) (0x00043000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TIR_2_MAX_INDEX 767 +#define VSI_TIR_2_PORT_TAG_ID_S 0 +#define VSI_TIR_2_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0) +#define VSI_TSR(_VSI) (0x00051000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TSR_MAX_INDEX 767 +#define VSI_TSR_STRIPTAG_S 0 +#define VSI_TSR_STRIPTAG_M MAKEMASK(0x3FF, 0) +#define VSI_TSR_SHOWTAG_S 10 +#define VSI_TSR_SHOWTAG_M MAKEMASK(0x3FF, 10) +#define VSI_TSR_SHOWPRIONLY_S 20 +#define VSI_TSR_SHOWPRIONLY_M MAKEMASK(0x3FF, 20) +#define VSI_TUPIOM(_VSI) (0x00048000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TUPIOM_MAX_INDEX 767 +#define VSI_TUPIOM_UP0_S 0 +#define VSI_TUPIOM_UP0_M MAKEMASK(0x7, 0) +#define VSI_TUPIOM_UP1_S 3 +#define VSI_TUPIOM_UP1_M MAKEMASK(0x7, 3) +#define VSI_TUPIOM_UP2_S 6 +#define VSI_TUPIOM_UP2_M MAKEMASK(0x7, 6) +#define VSI_TUPIOM_UP3_S 9 +#define VSI_TUPIOM_UP3_M MAKEMASK(0x7, 9) +#define VSI_TUPIOM_UP4_S 12 +#define VSI_TUPIOM_UP4_M MAKEMASK(0x7, 12) +#define VSI_TUPIOM_UP5_S 15 +#define VSI_TUPIOM_UP5_M MAKEMASK(0x7, 15) +#define VSI_TUPIOM_UP6_S 18 +#define VSI_TUPIOM_UP6_M MAKEMASK(0x7, 18) +#define VSI_TUPIOM_UP7_S 21 +#define VSI_TUPIOM_UP7_M MAKEMASK(0x7, 21) +#define VSI_TUPR(_VSI) (0x00047000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSI_TUPR_MAX_INDEX 767 +#define VSI_TUPR_UP0_S 0 +#define VSI_TUPR_UP0_M MAKEMASK(0x7, 0) +#define VSI_TUPR_UP1_S 3 +#define VSI_TUPR_UP1_M MAKEMASK(0x7, 3) +#define VSI_TUPR_UP2_S 6 +#define VSI_TUPR_UP2_M MAKEMASK(0x7, 6) +#define VSI_TUPR_UP3_S 9 +#define VSI_TUPR_UP3_M MAKEMASK(0x7, 9) +#define VSI_TUPR_UP4_S 12 +#define VSI_TUPR_UP4_M MAKEMASK(0x7, 12) +#define VSI_TUPR_UP5_S 15 +#define VSI_TUPR_UP5_M MAKEMASK(0x7, 15) +#define VSI_TUPR_UP6_S 18 +#define VSI_TUPR_UP6_M MAKEMASK(0x7, 18) +#define VSI_TUPR_UP7_S 21 +#define VSI_TUPR_UP7_M MAKEMASK(0x7, 21) +#define VSI_VSI2F(_VSI) (0x001D0000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSI_VSI2F_MAX_INDEX 767 +#define VSI_VSI2F_VFVMNUMBER_S 0 +#define VSI_VSI2F_VFVMNUMBER_M MAKEMASK(0x3FF, 0) +#define VSI_VSI2F_FUNCTIONTYPE_S 10 +#define VSI_VSI2F_FUNCTIONTYPE_M MAKEMASK(0x3, 10) +#define VSI_VSI2F_PFNUMBER_S 12 +#define VSI_VSI2F_PFNUMBER_M MAKEMASK(0x7, 12) +#define VSI_VSI2F_BUFFERNUMBER_S 16 +#define VSI_VSI2F_BUFFERNUMBER_M MAKEMASK(0x7, 16) +#define VSI_VSI2F_VSI_NUMBER_S 20 +#define VSI_VSI2F_VSI_NUMBER_M MAKEMASK(0x3FF, 20) +#define VSI_VSI2F_VSI_ENABLE_S 31 +#define VSI_VSI2F_VSI_ENABLE_M BIT(31) +#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */ +#define VSIQF_FD_CNT_MAX_INDEX 767 +#define VSIQF_FD_CNT_FD_GCNT_S 0 +#define VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0) +#define VSIQF_FD_CNT_FD_BCNT_S 16 +#define VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16) +#define VSIQF_FD_CTL1(_VSI) (0x00411000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIQF_FD_CTL1_MAX_INDEX 767 +#define VSIQF_FD_CTL1_FLT_ENA_S 0 +#define VSIQF_FD_CTL1_FLT_ENA_M BIT(0) +#define VSIQF_FD_CTL1_CFG_ENA_S 1 +#define VSIQF_FD_CTL1_CFG_ENA_M BIT(1) +#define VSIQF_FD_CTL1_EVICT_ENA_S 2 +#define VSIQF_FD_CTL1_EVICT_ENA_M BIT(2) +#define VSIQF_FD_DFLT(_VSI) (0x00457000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIQF_FD_DFLT_MAX_INDEX 767 +#define VSIQF_FD_DFLT_DEFLT_QINDX_S 0 +#define VSIQF_FD_DFLT_DEFLT_QINDX_M MAKEMASK(0x7FF, 0) +#define VSIQF_FD_DFLT_DEFLT_TOQUEUE_S 12 +#define VSIQF_FD_DFLT_DEFLT_TOQUEUE_M MAKEMASK(0x7, 12) +#define VSIQF_FD_DFLT_COMP_QINDX_S 16 +#define VSIQF_FD_DFLT_COMP_QINDX_M MAKEMASK(0x7FF, 16) +#define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_S 28 +#define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_M MAKEMASK(0x7, 28) +#define VSIQF_FD_DFLT_DEFLT_DROP_S 31 +#define VSIQF_FD_DFLT_DEFLT_DROP_M BIT(31) +#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIQF_FD_SIZE_MAX_INDEX 767 +#define VSIQF_FD_SIZE_FD_GSIZE_S 0 +#define VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0) +#define VSIQF_FD_SIZE_FD_BSIZE_S 16 +#define VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16) +#define VSIQF_HASH_CTL(_VSI) (0x0040D000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIQF_HASH_CTL_MAX_INDEX 767 +#define VSIQF_HASH_CTL_HASH_LUT_SEL_S 0 +#define VSIQF_HASH_CTL_HASH_LUT_SEL_M MAKEMASK(0x3, 0) +#define VSIQF_HASH_CTL_GLOB_LUT_S 2 +#define VSIQF_HASH_CTL_GLOB_LUT_M MAKEMASK(0xF, 2) +#define VSIQF_HASH_CTL_HASH_SCHEME_S 6 +#define VSIQF_HASH_CTL_HASH_SCHEME_M MAKEMASK(0x3, 6) +#define VSIQF_HASH_CTL_TC_OVER_SEL_S 8 +#define VSIQF_HASH_CTL_TC_OVER_SEL_M MAKEMASK(0x1F, 8) +#define VSIQF_HASH_CTL_TC_OVER_ENA_S 15 +#define VSIQF_HASH_CTL_TC_OVER_ENA_M BIT(15) +#define VSIQF_HKEY(_i, _VSI) (0x00400000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...12, _VSI=0...767 */ /* Reset Source: PFR */ +#define VSIQF_HKEY_MAX_INDEX 12 +#define VSIQF_HKEY_KEY_0_S 0 +#define VSIQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0) +#define VSIQF_HKEY_KEY_1_S 8 +#define VSIQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8) +#define VSIQF_HKEY_KEY_2_S 16 +#define VSIQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16) +#define VSIQF_HKEY_KEY_3_S 24 +#define VSIQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24) +#define VSIQF_HLUT(_i, _VSI) (0x00420000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...15, _VSI=0...767 */ /* Reset Source: PFR */ +#define VSIQF_HLUT_MAX_INDEX 15 +#define VSIQF_HLUT_LUT0_S 0 +#define VSIQF_HLUT_LUT0_M MAKEMASK(0xF, 0) +#define VSIQF_HLUT_LUT1_S 8 +#define VSIQF_HLUT_LUT1_M MAKEMASK(0xF, 8) +#define VSIQF_HLUT_LUT2_S 16 +#define VSIQF_HLUT_LUT2_M MAKEMASK(0xF, 16) +#define VSIQF_HLUT_LUT3_S 24 +#define VSIQF_HLUT_LUT3_M MAKEMASK(0xF, 24) +#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ +#define VSIQF_PE_CTL1_MAX_INDEX 767 +#define VSIQF_PE_CTL1_PE_FLTENA_S 0 +#define VSIQF_PE_CTL1_PE_FLTENA_M BIT(0) +#define VSIQF_TC_REGION(_i, _VSI) (0x00448000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...3, _VSI=0...767 */ /* Reset Source: CORER */ +#define VSIQF_TC_REGION_MAX_INDEX 3 +#define VSIQF_TC_REGION_TC_BASE0_S 0 +#define VSIQF_TC_REGION_TC_BASE0_M MAKEMASK(0x7FF, 0) +#define VSIQF_TC_REGION_TC_SIZE0_S 11 +#define VSIQF_TC_REGION_TC_SIZE0_M MAKEMASK(0xF, 11) +#define VSIQF_TC_REGION_TC_BASE1_S 16 +#define VSIQF_TC_REGION_TC_BASE1_M MAKEMASK(0x7FF, 16) +#define VSIQF_TC_REGION_TC_SIZE1_S 27 +#define VSIQF_TC_REGION_TC_SIZE1_M MAKEMASK(0xF, 27) +#define GLPM_WUMC 0x0009DEE4 /* Reset Source: POR */ +#define GLPM_WUMC_MNG_WU_PF_S 16 +#define GLPM_WUMC_MNG_WU_PF_M MAKEMASK(0xFF, 16) +#define PFPM_APM 0x000B8080 /* Reset Source: POR */ +#define PFPM_APM_APME_S 0 +#define PFPM_APM_APME_M BIT(0) +#define PFPM_WUC 0x0009DC80 /* Reset Source: POR */ +#define PFPM_WUC_EN_APM_D0_S 5 +#define PFPM_WUC_EN_APM_D0_M BIT(5) +#define PFPM_WUFC 0x0009DC00 /* Reset Source: POR */ +#define PFPM_WUFC_LNKC_S 0 +#define PFPM_WUFC_LNKC_M BIT(0) +#define PFPM_WUFC_MAG_S 1 +#define PFPM_WUFC_MAG_M BIT(1) +#define PFPM_WUFC_MNG_S 3 +#define PFPM_WUFC_MNG_M BIT(3) +#define PFPM_WUFC_FLX0_ACT_S 4 +#define PFPM_WUFC_FLX0_ACT_M BIT(4) +#define PFPM_WUFC_FLX1_ACT_S 5 +#define PFPM_WUFC_FLX1_ACT_M BIT(5) +#define PFPM_WUFC_FLX2_ACT_S 6 +#define PFPM_WUFC_FLX2_ACT_M BIT(6) +#define PFPM_WUFC_FLX3_ACT_S 7 +#define PFPM_WUFC_FLX3_ACT_M BIT(7) +#define PFPM_WUFC_FLX4_ACT_S 8 +#define PFPM_WUFC_FLX4_ACT_M BIT(8) +#define PFPM_WUFC_FLX5_ACT_S 9 +#define PFPM_WUFC_FLX5_ACT_M BIT(9) +#define PFPM_WUFC_FLX6_ACT_S 10 +#define PFPM_WUFC_FLX6_ACT_M BIT(10) +#define PFPM_WUFC_FLX7_ACT_S 11 +#define PFPM_WUFC_FLX7_ACT_M BIT(11) +#define PFPM_WUFC_FLX0_S 16 +#define PFPM_WUFC_FLX0_M BIT(16) +#define PFPM_WUFC_FLX1_S 17 +#define PFPM_WUFC_FLX1_M BIT(17) +#define PFPM_WUFC_FLX2_S 18 +#define PFPM_WUFC_FLX2_M BIT(18) +#define PFPM_WUFC_FLX3_S 19 +#define PFPM_WUFC_FLX3_M BIT(19) +#define PFPM_WUFC_FLX4_S 20 +#define PFPM_WUFC_FLX4_M BIT(20) +#define PFPM_WUFC_FLX5_S 21 +#define PFPM_WUFC_FLX5_M BIT(21) +#define PFPM_WUFC_FLX6_S 22 +#define PFPM_WUFC_FLX6_M BIT(22) +#define PFPM_WUFC_FLX7_S 23 +#define PFPM_WUFC_FLX7_M BIT(23) +#define PFPM_WUFC_FW_RST_WK_S 31 +#define PFPM_WUFC_FW_RST_WK_M BIT(31) +#define PFPM_WUS 0x0009DB80 /* Reset Source: POR */ +#define PFPM_WUS_LNKC_S 0 +#define PFPM_WUS_LNKC_M BIT(0) +#define PFPM_WUS_MAG_S 1 +#define PFPM_WUS_MAG_M BIT(1) +#define PFPM_WUS_PME_STATUS_S 2 +#define PFPM_WUS_PME_STATUS_M BIT(2) +#define PFPM_WUS_MNG_S 3 +#define PFPM_WUS_MNG_M BIT(3) +#define PFPM_WUS_FLX0_S 16 +#define PFPM_WUS_FLX0_M BIT(16) +#define PFPM_WUS_FLX1_S 17 +#define PFPM_WUS_FLX1_M BIT(17) +#define PFPM_WUS_FLX2_S 18 +#define PFPM_WUS_FLX2_M BIT(18) +#define PFPM_WUS_FLX3_S 19 +#define PFPM_WUS_FLX3_M BIT(19) +#define PFPM_WUS_FLX4_S 20 +#define PFPM_WUS_FLX4_M BIT(20) +#define PFPM_WUS_FLX5_S 21 +#define PFPM_WUS_FLX5_M BIT(21) +#define PFPM_WUS_FLX6_S 22 +#define PFPM_WUS_FLX6_M BIT(22) +#define PFPM_WUS_FLX7_S 23 +#define PFPM_WUS_FLX7_M BIT(23) +#define PFPM_WUS_FW_RST_WK_S 31 +#define PFPM_WUS_FW_RST_WK_M BIT(31) +#define PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ +#define PRTPM_SAH_MAX_INDEX 3 +#define PRTPM_SAH_PFPM_SAH_S 0 +#define PRTPM_SAH_PFPM_SAH_M MAKEMASK(0xFFFF, 0) +#define PRTPM_SAH_PF_NUM_S 26 +#define PRTPM_SAH_PF_NUM_M MAKEMASK(0xF, 26) +#define PRTPM_SAH_MC_MAG_EN_S 30 +#define PRTPM_SAH_MC_MAG_EN_M BIT(30) +#define PRTPM_SAH_AV_S 31 +#define PRTPM_SAH_AV_M BIT(31) +#define PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */ +#define PRTPM_SAL_MAX_INDEX 3 +#define PRTPM_SAL_PFPM_SAL_S 0 +#define PRTPM_SAL_PFPM_SAL_M MAKEMASK(0xFFFFFFFF, 0) +#define GLPE_CQM_FUNC_INVALIDATE 0x00503300 /* Reset Source: CORER */ +#define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_S 0 +#define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_M MAKEMASK(0x7, 0) +#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_S 3 +#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_M MAKEMASK(0x3FF, 3) +#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_S 13 +#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_M MAKEMASK(0x3, 13) +#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_S 31 +#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_M BIT(31) +#define VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */ +#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0 +#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) +#define GLTSYN_HH_DLAY 0x0008881C /* Reset Source: CORER */ +#define GLTSYN_HH_DLAY_SYNC_DELAY_S 0 +#define GLTSYN_HH_DLAY_SYNC_DELAY_M MAKEMASK(0xF, 0) +#define VF_MBX_ARQBAH1 0x00006000 /* Reset Source: CORER */ +#define VF_MBX_ARQBAH1_ARQBAH_S 0 +#define VF_MBX_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_ARQBAL1 0x00006C00 /* Reset Source: CORER */ +#define VF_MBX_ARQBAL1_ARQBAL_LSB_S 0 +#define VF_MBX_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_ARQBAL1_ARQBAL_S 6 +#define VF_MBX_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_ARQH1 0x00007400 /* Reset Source: CORER */ +#define VF_MBX_ARQH1_ARQH_S 0 +#define VF_MBX_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ARQLEN1 0x00008000 /* Reset Source: PFR */ +#define VF_MBX_ARQLEN1_ARQLEN_S 0 +#define VF_MBX_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ARQLEN1_ARQVFE_S 28 +#define VF_MBX_ARQLEN1_ARQVFE_M BIT(28) +#define VF_MBX_ARQLEN1_ARQOVFL_S 29 +#define VF_MBX_ARQLEN1_ARQOVFL_M BIT(29) +#define VF_MBX_ARQLEN1_ARQCRIT_S 30 +#define VF_MBX_ARQLEN1_ARQCRIT_M BIT(30) +#define VF_MBX_ARQLEN1_ARQENABLE_S 31 +#define VF_MBX_ARQLEN1_ARQENABLE_M BIT(31) +#define VF_MBX_ARQT1 0x00007000 /* Reset Source: CORER */ +#define VF_MBX_ARQT1_ARQT_S 0 +#define VF_MBX_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQBAH1 0x00007800 /* Reset Source: CORER */ +#define VF_MBX_ATQBAH1_ATQBAH_S 0 +#define VF_MBX_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_ATQBAL1 0x00007C00 /* Reset Source: CORER */ +#define VF_MBX_ATQBAL1_ATQBAL_S 6 +#define VF_MBX_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_ATQH1 0x00006400 /* Reset Source: CORER */ +#define VF_MBX_ATQH1_ATQH_S 0 +#define VF_MBX_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQLEN1 0x00006800 /* Reset Source: PFR */ +#define VF_MBX_ATQLEN1_ATQLEN_S 0 +#define VF_MBX_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_ATQLEN1_ATQVFE_S 28 +#define VF_MBX_ATQLEN1_ATQVFE_M BIT(28) +#define VF_MBX_ATQLEN1_ATQOVFL_S 29 +#define VF_MBX_ATQLEN1_ATQOVFL_M BIT(29) +#define VF_MBX_ATQLEN1_ATQCRIT_S 30 +#define VF_MBX_ATQLEN1_ATQCRIT_M BIT(30) +#define VF_MBX_ATQLEN1_ATQENABLE_S 31 +#define VF_MBX_ATQLEN1_ATQENABLE_M BIT(31) +#define VF_MBX_ATQT1 0x00008400 /* Reset Source: CORER */ +#define VF_MBX_ATQT1_ATQT_S 0 +#define VF_MBX_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) +#define PFPCI_VF_FLUSH_DONE1 0x0000E400 /* Reset Source: PCIR */ +#define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_S 0 +#define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_M BIT(0) +#define VFGEN_RSTAT1 0x00008800 /* Reset Source: VFR */ +#define VFGEN_RSTAT1_VFR_STATE_S 0 +#define VFGEN_RSTAT1_VFR_STATE_M MAKEMASK(0x3, 0) +#define VFINT_DYN_CTL0 0x00005C00 /* Reset Source: CORER */ +#define VFINT_DYN_CTL0_INTENA_S 0 +#define VFINT_DYN_CTL0_INTENA_M BIT(0) +#define VFINT_DYN_CTL0_CLEARPBA_S 1 +#define VFINT_DYN_CTL0_CLEARPBA_M BIT(1) +#define VFINT_DYN_CTL0_SWINT_TRIG_S 2 +#define VFINT_DYN_CTL0_SWINT_TRIG_M BIT(2) +#define VFINT_DYN_CTL0_ITR_INDX_S 3 +#define VFINT_DYN_CTL0_ITR_INDX_M MAKEMASK(0x3, 3) +#define VFINT_DYN_CTL0_INTERVAL_S 5 +#define VFINT_DYN_CTL0_INTERVAL_M MAKEMASK(0xFFF, 5) +#define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_S 24 +#define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_M BIT(24) +#define VFINT_DYN_CTL0_SW_ITR_INDX_S 25 +#define VFINT_DYN_CTL0_SW_ITR_INDX_M MAKEMASK(0x3, 25) +#define VFINT_DYN_CTL0_WB_ON_ITR_S 30 +#define VFINT_DYN_CTL0_WB_ON_ITR_M BIT(30) +#define VFINT_DYN_CTL0_INTENA_MSK_S 31 +#define VFINT_DYN_CTL0_INTENA_MSK_M BIT(31) +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */ +#define VFINT_DYN_CTLN_MAX_INDEX 63 +#define VFINT_DYN_CTLN_INTENA_S 0 +#define VFINT_DYN_CTLN_INTENA_M BIT(0) +#define VFINT_DYN_CTLN_CLEARPBA_S 1 +#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define VFINT_DYN_CTLN_SWINT_TRIG_S 2 +#define VFINT_DYN_CTLN_SWINT_TRIG_M BIT(2) +#define VFINT_DYN_CTLN_ITR_INDX_S 3 +#define VFINT_DYN_CTLN_ITR_INDX_M MAKEMASK(0x3, 3) +#define VFINT_DYN_CTLN_INTERVAL_S 5 +#define VFINT_DYN_CTLN_INTERVAL_M MAKEMASK(0xFFF, 5) +#define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_S 24 +#define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(24) +#define VFINT_DYN_CTLN_SW_ITR_INDX_S 25 +#define VFINT_DYN_CTLN_SW_ITR_INDX_M MAKEMASK(0x3, 25) +#define VFINT_DYN_CTLN_WB_ON_ITR_S 30 +#define VFINT_DYN_CTLN_WB_ON_ITR_M BIT(30) +#define VFINT_DYN_CTLN_INTENA_MSK_S 31 +#define VFINT_DYN_CTLN_INTENA_MSK_M BIT(31) +#define VFINT_ITR0(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */ +#define VFINT_ITR0_MAX_INDEX 2 +#define VFINT_ITR0_INTERVAL_S 0 +#define VFINT_ITR0_INTERVAL_M MAKEMASK(0xFFF, 0) +#define VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */ +#define VFINT_ITRN_MAX_INDEX 2 +#define VFINT_ITRN_INTERVAL_S 0 +#define VFINT_ITRN_INTERVAL_M MAKEMASK(0xFFF, 0) +#define QRX_TAIL1(_QRX) (0x00002000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define QRX_TAIL1_MAX_INDEX 255 +#define QRX_TAIL1_TAIL_S 0 +#define QRX_TAIL1_TAIL_M MAKEMASK(0x1FFF, 0) +#define QTX_TAIL(_DBQM) (0x00000000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define QTX_TAIL_MAX_INDEX 255 +#define QTX_TAIL_QTX_COMM_DBELL_S 0 +#define QTX_TAIL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_CPM_ARQBAH1 0x0000F060 /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQBAH1_ARQBAH_S 0 +#define VF_MBX_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_CPM_ARQBAL1 0x0000F050 /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_S 0 +#define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_CPM_ARQBAL1_ARQBAL_S 6 +#define VF_MBX_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_CPM_ARQH1 0x0000F080 /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQH1_ARQH_S 0 +#define VF_MBX_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ARQLEN1 0x0000F070 /* Reset Source: PFR */ +#define VF_MBX_CPM_ARQLEN1_ARQLEN_S 0 +#define VF_MBX_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ARQLEN1_ARQVFE_S 28 +#define VF_MBX_CPM_ARQLEN1_ARQVFE_M BIT(28) +#define VF_MBX_CPM_ARQLEN1_ARQOVFL_S 29 +#define VF_MBX_CPM_ARQLEN1_ARQOVFL_M BIT(29) +#define VF_MBX_CPM_ARQLEN1_ARQCRIT_S 30 +#define VF_MBX_CPM_ARQLEN1_ARQCRIT_M BIT(30) +#define VF_MBX_CPM_ARQLEN1_ARQENABLE_S 31 +#define VF_MBX_CPM_ARQLEN1_ARQENABLE_M BIT(31) +#define VF_MBX_CPM_ARQT1 0x0000F090 /* Reset Source: CORER */ +#define VF_MBX_CPM_ARQT1_ARQT_S 0 +#define VF_MBX_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQBAH1 0x0000F010 /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQBAH1_ATQBAH_S 0 +#define VF_MBX_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_CPM_ATQBAL1 0x0000F000 /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQBAL1_ATQBAL_S 6 +#define VF_MBX_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_CPM_ATQH1 0x0000F030 /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQH1_ATQH_S 0 +#define VF_MBX_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQLEN1 0x0000F020 /* Reset Source: PFR */ +#define VF_MBX_CPM_ATQLEN1_ATQLEN_S 0 +#define VF_MBX_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_CPM_ATQLEN1_ATQVFE_S 28 +#define VF_MBX_CPM_ATQLEN1_ATQVFE_M BIT(28) +#define VF_MBX_CPM_ATQLEN1_ATQOVFL_S 29 +#define VF_MBX_CPM_ATQLEN1_ATQOVFL_M BIT(29) +#define VF_MBX_CPM_ATQLEN1_ATQCRIT_S 30 +#define VF_MBX_CPM_ATQLEN1_ATQCRIT_M BIT(30) +#define VF_MBX_CPM_ATQLEN1_ATQENABLE_S 31 +#define VF_MBX_CPM_ATQLEN1_ATQENABLE_M BIT(31) +#define VF_MBX_CPM_ATQT1 0x0000F040 /* Reset Source: CORER */ +#define VF_MBX_CPM_ATQT1_ATQT_S 0 +#define VF_MBX_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQBAH1 0x00020060 /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQBAH1_ARQBAH_S 0 +#define VF_MBX_HLP_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_HLP_ARQBAL1 0x00020050 /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_S 0 +#define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_HLP_ARQBAL1_ARQBAL_S 6 +#define VF_MBX_HLP_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_HLP_ARQH1 0x00020080 /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQH1_ARQH_S 0 +#define VF_MBX_HLP_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQLEN1 0x00020070 /* Reset Source: PFR */ +#define VF_MBX_HLP_ARQLEN1_ARQLEN_S 0 +#define VF_MBX_HLP_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ARQLEN1_ARQVFE_S 28 +#define VF_MBX_HLP_ARQLEN1_ARQVFE_M BIT(28) +#define VF_MBX_HLP_ARQLEN1_ARQOVFL_S 29 +#define VF_MBX_HLP_ARQLEN1_ARQOVFL_M BIT(29) +#define VF_MBX_HLP_ARQLEN1_ARQCRIT_S 30 +#define VF_MBX_HLP_ARQLEN1_ARQCRIT_M BIT(30) +#define VF_MBX_HLP_ARQLEN1_ARQENABLE_S 31 +#define VF_MBX_HLP_ARQLEN1_ARQENABLE_M BIT(31) +#define VF_MBX_HLP_ARQT1 0x00020090 /* Reset Source: CORER */ +#define VF_MBX_HLP_ARQT1_ARQT_S 0 +#define VF_MBX_HLP_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQBAH1 0x00020010 /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQBAH1_ATQBAH_S 0 +#define VF_MBX_HLP_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_HLP_ATQBAL1 0x00020000 /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQBAL1_ATQBAL_S 6 +#define VF_MBX_HLP_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_HLP_ATQH1 0x00020030 /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQH1_ATQH_S 0 +#define VF_MBX_HLP_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQLEN1 0x00020020 /* Reset Source: PFR */ +#define VF_MBX_HLP_ATQLEN1_ATQLEN_S 0 +#define VF_MBX_HLP_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_HLP_ATQLEN1_ATQVFE_S 28 +#define VF_MBX_HLP_ATQLEN1_ATQVFE_M BIT(28) +#define VF_MBX_HLP_ATQLEN1_ATQOVFL_S 29 +#define VF_MBX_HLP_ATQLEN1_ATQOVFL_M BIT(29) +#define VF_MBX_HLP_ATQLEN1_ATQCRIT_S 30 +#define VF_MBX_HLP_ATQLEN1_ATQCRIT_M BIT(30) +#define VF_MBX_HLP_ATQLEN1_ATQENABLE_S 31 +#define VF_MBX_HLP_ATQLEN1_ATQENABLE_M BIT(31) +#define VF_MBX_HLP_ATQT1 0x00020040 /* Reset Source: CORER */ +#define VF_MBX_HLP_ATQT1_ATQT_S 0 +#define VF_MBX_HLP_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQBAH1 0x00021060 /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQBAH1_ARQBAH_S 0 +#define VF_MBX_PSM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_PSM_ARQBAL1 0x00021050 /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_S 0 +#define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_MBX_PSM_ARQBAL1_ARQBAL_S 6 +#define VF_MBX_PSM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_PSM_ARQH1 0x00021080 /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQH1_ARQH_S 0 +#define VF_MBX_PSM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQLEN1 0x00021070 /* Reset Source: PFR */ +#define VF_MBX_PSM_ARQLEN1_ARQLEN_S 0 +#define VF_MBX_PSM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ARQLEN1_ARQVFE_S 28 +#define VF_MBX_PSM_ARQLEN1_ARQVFE_M BIT(28) +#define VF_MBX_PSM_ARQLEN1_ARQOVFL_S 29 +#define VF_MBX_PSM_ARQLEN1_ARQOVFL_M BIT(29) +#define VF_MBX_PSM_ARQLEN1_ARQCRIT_S 30 +#define VF_MBX_PSM_ARQLEN1_ARQCRIT_M BIT(30) +#define VF_MBX_PSM_ARQLEN1_ARQENABLE_S 31 +#define VF_MBX_PSM_ARQLEN1_ARQENABLE_M BIT(31) +#define VF_MBX_PSM_ARQT1 0x00021090 /* Reset Source: CORER */ +#define VF_MBX_PSM_ARQT1_ARQT_S 0 +#define VF_MBX_PSM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQBAH1 0x00021010 /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQBAH1_ATQBAH_S 0 +#define VF_MBX_PSM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_MBX_PSM_ATQBAL1 0x00021000 /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQBAL1_ATQBAL_S 6 +#define VF_MBX_PSM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_MBX_PSM_ATQH1 0x00021030 /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQH1_ATQH_S 0 +#define VF_MBX_PSM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQLEN1 0x00021020 /* Reset Source: PFR */ +#define VF_MBX_PSM_ATQLEN1_ATQLEN_S 0 +#define VF_MBX_PSM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_MBX_PSM_ATQLEN1_ATQVFE_S 28 +#define VF_MBX_PSM_ATQLEN1_ATQVFE_M BIT(28) +#define VF_MBX_PSM_ATQLEN1_ATQOVFL_S 29 +#define VF_MBX_PSM_ATQLEN1_ATQOVFL_M BIT(29) +#define VF_MBX_PSM_ATQLEN1_ATQCRIT_S 30 +#define VF_MBX_PSM_ATQLEN1_ATQCRIT_M BIT(30) +#define VF_MBX_PSM_ATQLEN1_ATQENABLE_S 31 +#define VF_MBX_PSM_ATQLEN1_ATQENABLE_M BIT(31) +#define VF_MBX_PSM_ATQT1 0x00021040 /* Reset Source: CORER */ +#define VF_MBX_PSM_ATQT1_ATQT_S 0 +#define VF_MBX_PSM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQBAH1 0x0000F160 /* Reset Source: CORER */ +#define VF_SB_CPM_ARQBAH1_ARQBAH_S 0 +#define VF_SB_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_SB_CPM_ARQBAL1 0x0000F150 /* Reset Source: CORER */ +#define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_S 0 +#define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0) +#define VF_SB_CPM_ARQBAL1_ARQBAL_S 6 +#define VF_SB_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_SB_CPM_ARQH1 0x0000F180 /* Reset Source: CORER */ +#define VF_SB_CPM_ARQH1_ARQH_S 0 +#define VF_SB_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQLEN1 0x0000F170 /* Reset Source: PFR */ +#define VF_SB_CPM_ARQLEN1_ARQLEN_S 0 +#define VF_SB_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ARQLEN1_ARQVFE_S 28 +#define VF_SB_CPM_ARQLEN1_ARQVFE_M BIT(28) +#define VF_SB_CPM_ARQLEN1_ARQOVFL_S 29 +#define VF_SB_CPM_ARQLEN1_ARQOVFL_M BIT(29) +#define VF_SB_CPM_ARQLEN1_ARQCRIT_S 30 +#define VF_SB_CPM_ARQLEN1_ARQCRIT_M BIT(30) +#define VF_SB_CPM_ARQLEN1_ARQENABLE_S 31 +#define VF_SB_CPM_ARQLEN1_ARQENABLE_M BIT(31) +#define VF_SB_CPM_ARQT1 0x0000F190 /* Reset Source: CORER */ +#define VF_SB_CPM_ARQT1_ARQT_S 0 +#define VF_SB_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQBAH1 0x0000F110 /* Reset Source: CORER */ +#define VF_SB_CPM_ATQBAH1_ATQBAH_S 0 +#define VF_SB_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) +#define VF_SB_CPM_ATQBAL1 0x0000F100 /* Reset Source: CORER */ +#define VF_SB_CPM_ATQBAL1_ATQBAL_S 6 +#define VF_SB_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) +#define VF_SB_CPM_ATQH1 0x0000F130 /* Reset Source: CORER */ +#define VF_SB_CPM_ATQH1_ATQH_S 0 +#define VF_SB_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQLEN1 0x0000F120 /* Reset Source: PFR */ +#define VF_SB_CPM_ATQLEN1_ATQLEN_S 0 +#define VF_SB_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0) +#define VF_SB_CPM_ATQLEN1_ATQVFE_S 28 +#define VF_SB_CPM_ATQLEN1_ATQVFE_M BIT(28) +#define VF_SB_CPM_ATQLEN1_ATQOVFL_S 29 +#define VF_SB_CPM_ATQLEN1_ATQOVFL_M BIT(29) +#define VF_SB_CPM_ATQLEN1_ATQCRIT_S 30 +#define VF_SB_CPM_ATQLEN1_ATQCRIT_M BIT(30) +#define VF_SB_CPM_ATQLEN1_ATQENABLE_S 31 +#define VF_SB_CPM_ATQLEN1_ATQENABLE_M BIT(31) +#define VF_SB_CPM_ATQT1 0x0000F140 /* Reset Source: CORER */ +#define VF_SB_CPM_ATQT1_ATQT_S 0 +#define VF_SB_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0) +#define VFINT_DYN_CTL(_i) (0x00023000 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ +#define VFINT_DYN_CTL_MAX_INDEX 7 +#define VFINT_DYN_CTL_INTENA_S 0 +#define VFINT_DYN_CTL_INTENA_M BIT(0) +#define VFINT_DYN_CTL_CLEARPBA_S 1 +#define VFINT_DYN_CTL_CLEARPBA_M BIT(1) +#define VFINT_DYN_CTL_SWINT_TRIG_S 2 +#define VFINT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define VFINT_DYN_CTL_ITR_INDX_S 3 +#define VFINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) +#define VFINT_DYN_CTL_INTERVAL_S 5 +#define VFINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) +#define VFINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 +#define VFINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) +#define VFINT_DYN_CTL_SW_ITR_INDX_S 25 +#define VFINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) +#define VFINT_DYN_CTL_WB_ON_ITR_S 30 +#define VFINT_DYN_CTL_WB_ON_ITR_M BIT(30) +#define VFINT_DYN_CTL_INTENA_MSK_S 31 +#define VFINT_DYN_CTL_INTENA_MSK_M BIT(31) +#define VFINT_ITR_0(_i) (0x00023004 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ +#define VFINT_ITR_0_MAX_INDEX 7 +#define VFINT_ITR_0_INTERVAL_S 0 +#define VFINT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0) +#define VFINT_ITR_1(_i) (0x00023008 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ +#define VFINT_ITR_1_MAX_INDEX 7 +#define VFINT_ITR_1_INTERVAL_S 0 +#define VFINT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0) +#define VFINT_ITR_2(_i) (0x0002300C + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */ +#define VFINT_ITR_2_MAX_INDEX 7 +#define VFINT_ITR_2_INTERVAL_S 0 +#define VFINT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0) +#define VFQRX_TAIL(_QRX) (0x0002E000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VFQRX_TAIL_MAX_INDEX 255 +#define VFQRX_TAIL_TAIL_S 0 +#define VFQRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0) +#define VFQTX_COMM_DBELL(_DBQM) (0x00030000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ +#define VFQTX_COMM_DBELL_MAX_INDEX 255 +#define VFQTX_COMM_DBELL_QTX_COMM_DBELL_S 0 +#define VFQTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) +#define VFQTX_COMM_DBLQ_DBELL(_DBLQ) (0x00022000 + ((_DBLQ) * 4)) /* _i=0...3 */ /* Reset Source: CORER */ +#define VFQTX_COMM_DBLQ_DBELL_MAX_INDEX 3 +#define VFQTX_COMM_DBLQ_DBELL_TAIL_S 0 +#define VFQTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0) +#define MSIX_TMSG1(_i) (0x00000008 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ +#define MSIX_TMSG1_MAX_INDEX 64 +#define MSIX_TMSG1_MSIXTMSG_S 0 +#define MSIX_TMSG1_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_AEQALLOC1 0x0000A400 /* Reset Source: VFR */ +#define VFPE_AEQALLOC1_AECOUNT_S 0 +#define VFPE_AEQALLOC1_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPHIGH1 0x00009800 /* Reset Source: VFR */ +#define VFPE_CCQPHIGH1_PECCQPHIGH_S 0 +#define VFPE_CCQPHIGH1_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPLOW1 0x0000AC00 /* Reset Source: VFR */ +#define VFPE_CCQPLOW1_PECCQPLOW_S 0 +#define VFPE_CCQPLOW1_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_CCQPSTATUS1 0x0000B800 /* Reset Source: VFR */ +#define VFPE_CCQPSTATUS1_CCQP_DONE_S 0 +#define VFPE_CCQPSTATUS1_CCQP_DONE_M BIT(0) +#define VFPE_CCQPSTATUS1_HMC_PROFILE_S 4 +#define VFPE_CCQPSTATUS1_HMC_PROFILE_M MAKEMASK(0x7, 4) +#define VFPE_CCQPSTATUS1_RDMA_EN_VFS_S 16 +#define VFPE_CCQPSTATUS1_RDMA_EN_VFS_M MAKEMASK(0x3F, 16) +#define VFPE_CCQPSTATUS1_CCQP_ERR_S 31 +#define VFPE_CCQPSTATUS1_CCQP_ERR_M BIT(31) +#define VFPE_CQACK1 0x0000B000 /* Reset Source: VFR */ +#define VFPE_CQACK1_PECQID_S 0 +#define VFPE_CQACK1_PECQID_M MAKEMASK(0x7FFFF, 0) +#define VFPE_CQARM1 0x0000B400 /* Reset Source: VFR */ +#define VFPE_CQARM1_PECQID_S 0 +#define VFPE_CQARM1_PECQID_M MAKEMASK(0x7FFFF, 0) +#define VFPE_CQPDB1 0x0000BC00 /* Reset Source: VFR */ +#define VFPE_CQPDB1_WQHEAD_S 0 +#define VFPE_CQPDB1_WQHEAD_M MAKEMASK(0x7FF, 0) +#define VFPE_CQPERRCODES1 0x00009C00 /* Reset Source: VFR */ +#define VFPE_CQPERRCODES1_CQP_MINOR_CODE_S 0 +#define VFPE_CQPERRCODES1_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0) +#define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_S 16 +#define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16) +#define VFPE_CQPTAIL1 0x0000A000 /* Reset Source: VFR */ +#define VFPE_CQPTAIL1_WQTAIL_S 0 +#define VFPE_CQPTAIL1_WQTAIL_M MAKEMASK(0x7FF, 0) +#define VFPE_CQPTAIL1_CQP_OP_ERR_S 31 +#define VFPE_CQPTAIL1_CQP_OP_ERR_M BIT(31) +#define VFPE_IPCONFIG01 0x00008C00 /* Reset Source: VFR */ +#define VFPE_IPCONFIG01_PEIPID_S 0 +#define VFPE_IPCONFIG01_PEIPID_M MAKEMASK(0xFFFF, 0) +#define VFPE_IPCONFIG01_USEENTIREIDRANGE_S 16 +#define VFPE_IPCONFIG01_USEENTIREIDRANGE_M BIT(16) +#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_S 17 +#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_M BIT(17) +#define VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */ +#define VFPE_MRTEIDXMASK1_MAX_INDEX 255 +#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0 +#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0) +#define VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */ +#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0 +#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0) +#define VFPE_TCPNOWTIMER1 0x0000A800 /* Reset Source: VFR */ +#define VFPE_TCPNOWTIMER1_TCP_NOW_S 0 +#define VFPE_TCPNOWTIMER1_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0) +#define VFPE_WQEALLOC1 0x0000C000 /* Reset Source: VFR */ +#define VFPE_WQEALLOC1_PEQPID_S 0 +#define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0) +#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20 +#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20) + +#endif diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/src/spdk/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h new file mode 100644 index 000000000..a0e284a8d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h @@ -0,0 +1,2377 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_LAN_TX_RX_H_ +#define _ICE_LAN_TX_RX_H_ +#include "ice_osdep.h" + +/* Rx Descriptors */ +union ice_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow Director filter ID */ + } hi_dword; + } qword0; + struct { + /* ext status/error/PTYPE/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union ice_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow Director filter ID */ + } hi_dword; + } qword0; + struct { + /* status/error/PTYPE/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; + } wb; /* writeback */ +}; + +struct ice_fltr_desc { + __le64 qidx_compq_space_stat; + __le64 dtype_cmd_vsi_fdid; +}; + +#define ICE_FXD_FLTR_QW0_QINDEX_S 0 +#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S) +#define ICE_FXD_FLTR_QW0_COMP_Q_S 11 +#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S) +#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL +#define ICE_FXD_FLTR_QW0_COMP_Q_QINDX 0x1ULL + +#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12 +#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ + (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) +#define ICE_FXD_FLTR_QW0_COMP_REPORT_NONE 0x0ULL +#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL +#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL + +#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 +#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) +#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR 0x0ULL +#define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_EFFORT 0x1ULL +#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL +#define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_GUAR 0x3ULL + +#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16 +#define ICE_FXD_FLTR_QW0_STAT_CNT_M \ + (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S) +#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29 +#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S) +#define ICE_FXD_FLTR_QW0_STAT_ENA_NONE 0x0ULL +#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL +#define ICE_FXD_FLTR_QW0_STAT_ENA_BYTES 0x2ULL +#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS_BYTES 0x3ULL + +#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31 +#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S) +#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL +#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL + +#define ICE_FXD_FLTR_QW0_TO_Q_S 32 +#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S) +#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL + +#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35 +#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) +#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL + +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38 +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \ + (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) +#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL + +#define ICE_FXD_FLTR_QW0_DROP_S 40 +#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S) +#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL +#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL + +#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41 +#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S) +#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL + +#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44 +#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S) +#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL + +#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48 +#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \ + (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S) +#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL + +#define ICE_FXD_FLTR_QW1_DTYPE_S 0 +#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S) +#define ICE_FXD_FLTR_QW1_PCMD_S 4 +#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S) +#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL +#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL + +#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5 +#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S) +#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL + +#define ICE_FXD_FLTR_QW1_PROF_S 8 +#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S) +#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL + +#define ICE_FXD_FLTR_QW1_FD_VSI_S 14 +#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S) +#define ICE_FXD_FLTR_QW1_SWAP_S 24 +#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S) +#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL +#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL + +#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 +#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) +#define ICE_FXD_FLTR_QW1_FDID_PRI_ZERO 0x0ULL +#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL +#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL + +#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 +#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) +#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL + +#define ICE_FXD_FLTR_QW1_FDID_S 32 +#define ICE_FXD_FLTR_QW1_FDID_M \ + (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) +#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL + +enum ice_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_DESC_STATUS_DD_S = 0, + ICE_RX_DESC_STATUS_EOF_S = 1, + ICE_RX_DESC_STATUS_L2TAG1P_S = 2, + ICE_RX_DESC_STATUS_L3L4P_S = 3, + ICE_RX_DESC_STATUS_CRCP_S = 4, + ICE_RX_DESC_STATUS_TSYNINDX_S = 5, + ICE_RX_DESC_STATUS_TSYNVALID_S = 7, + ICE_RX_DESC_STATUS_EXT_UDP_0_S = 8, + ICE_RX_DESC_STATUS_UMBCAST_S = 9, + ICE_RX_DESC_STATUS_FLM_S = 11, + ICE_RX_DESC_STATUS_FLTSTAT_S = 12, + ICE_RX_DESC_STATUS_LPBK_S = 14, + ICE_RX_DESC_STATUS_IPV6EXADD_S = 15, + ICE_RX_DESC_STATUS_RESERVED2_S = 16, + ICE_RX_DESC_STATUS_INT_UDP_0_S = 18, + ICE_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define ICE_RXD_QW1_STATUS_S 0 +#define ICE_RXD_QW1_STATUS_M ((BIT(ICE_RX_DESC_STATUS_LAST) - 1) << \ + ICE_RXD_QW1_STATUS_S) + +#define ICE_RXD_QW1_STATUS_TSYNINDX_S ICE_RX_DESC_STATUS_TSYNINDX_S +#define ICE_RXD_QW1_STATUS_TSYNINDX_M (0x3UL << ICE_RXD_QW1_STATUS_TSYNINDX_S) + +#define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S +#define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S) + +enum ice_rx_desc_fltstat_values { + ICE_RX_DESC_FLTSTAT_NO_DATA = 0, + ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + ICE_RX_DESC_FLTSTAT_RSV = 2, + ICE_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define ICE_RXD_QW1_ERROR_S 19 +#define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S) + +enum ice_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_DESC_ERROR_RXE_S = 0, + ICE_RX_DESC_ERROR_RECIPE_S = 1, + ICE_RX_DESC_ERROR_HBO_S = 2, + ICE_RX_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ + ICE_RX_DESC_ERROR_IPE_S = 3, + ICE_RX_DESC_ERROR_L4E_S = 4, + ICE_RX_DESC_ERROR_EIPE_S = 5, + ICE_RX_DESC_ERROR_OVERSIZE_S = 6, + ICE_RX_DESC_ERROR_PPRS_S = 7 +}; + +enum ice_rx_desc_error_l3l4e_masks { + ICE_RX_DESC_ERROR_L3L4E_NONE = 0, + ICE_RX_DESC_ERROR_L3L4E_PROT = 1, +}; + +#define ICE_RXD_QW1_PTYPE_S 30 +#define ICE_RXD_QW1_PTYPE_M (0xFFULL << ICE_RXD_QW1_PTYPE_S) + +/* Packet type non-ip values */ +enum ice_rx_l2_ptype { + ICE_RX_PTYPE_L2_RESERVED = 0, + ICE_RX_PTYPE_L2_MAC_PAY2 = 1, + ICE_RX_PTYPE_L2_FIP_PAY2 = 3, + ICE_RX_PTYPE_L2_OUI_PAY2 = 4, + ICE_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + ICE_RX_PTYPE_L2_LLDP_PAY2 = 6, + ICE_RX_PTYPE_L2_ECP_PAY2 = 7, + ICE_RX_PTYPE_L2_EVB_PAY2 = 8, + ICE_RX_PTYPE_L2_QCN_PAY2 = 9, + ICE_RX_PTYPE_L2_EAPOL_PAY2 = 10, + ICE_RX_PTYPE_L2_ARP = 11, +}; + +struct ice_rx_ptype_decoded { + u32 ptype:10; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum ice_rx_ptype_outer_ip { + ICE_RX_PTYPE_OUTER_L2 = 0, + ICE_RX_PTYPE_OUTER_IP = 1, +}; + +enum ice_rx_ptype_outer_ip_ver { + ICE_RX_PTYPE_OUTER_NONE = 0, + ICE_RX_PTYPE_OUTER_IPV4 = 1, + ICE_RX_PTYPE_OUTER_IPV6 = 2, +}; + +enum ice_rx_ptype_outer_fragmented { + ICE_RX_PTYPE_NOT_FRAG = 0, + ICE_RX_PTYPE_FRAG = 1, +}; + +enum ice_rx_ptype_tunnel_type { + ICE_RX_PTYPE_TUNNEL_NONE = 0, + ICE_RX_PTYPE_TUNNEL_IP_IP = 1, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum ice_rx_ptype_tunnel_end_prot { + ICE_RX_PTYPE_TUNNEL_END_NONE = 0, + ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, + ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum ice_rx_ptype_inner_prot { + ICE_RX_PTYPE_INNER_PROT_NONE = 0, + ICE_RX_PTYPE_INNER_PROT_UDP = 1, + ICE_RX_PTYPE_INNER_PROT_TCP = 2, + ICE_RX_PTYPE_INNER_PROT_SCTP = 3, + ICE_RX_PTYPE_INNER_PROT_ICMP = 4, +}; + +enum ice_rx_ptype_payload_layer { + ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define ICE_RXD_QW1_LEN_PBUF_S 38 +#define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S) + +#define ICE_RXD_QW1_LEN_HBUF_S 52 +#define ICE_RXD_QW1_LEN_HBUF_M (0x7FFULL << ICE_RXD_QW1_LEN_HBUF_S) + +#define ICE_RXD_QW1_LEN_SPH_S 63 +#define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S) + +enum ice_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0, + ICE_RX_DESC_EXT_STATUS_L2TAG3P_S = 1, + ICE_RX_DESC_EXT_STATUS_FLEXBL_S = 2, + ICE_RX_DESC_EXT_STATUS_FLEXBH_S = 4, + ICE_RX_DESC_EXT_STATUS_FDLONGB_S = 9, + ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11, +}; + +enum ice_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */ + ICE_RX_DESC_PE_STATUS_L4PORT_S = 0, /* 16 BITS */ + ICE_RX_DESC_PE_STATUS_IPINDEX_S = 16, /* 8 BITS */ + ICE_RX_DESC_PE_STATUS_QPIDHIT_S = 24, + ICE_RX_DESC_PE_STATUS_APBVTHIT_S = 25, + ICE_RX_DESC_PE_STATUS_PORTV_S = 26, + ICE_RX_DESC_PE_STATUS_URG_S = 27, + ICE_RX_DESC_PE_STATUS_IPFRAG_S = 28, + ICE_RX_DESC_PE_STATUS_IPOPT_S = 29 +}; + +#define ICE_RX_PROG_STATUS_DESC_LEN_S 38 +#define ICE_RX_PROG_STATUS_DESC_LEN 0x2000000 + +#define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S 2 +#define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \ + (0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S) + +#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19 +#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \ + (0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S) + +enum ice_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_PROG_STATUS_DESC_DD_S = 0, + ICE_RX_PROG_STATUS_DESC_PROG_ID_S = 2 /* 3 BITS */ +}; + +enum ice_rx_prog_status_desc_prog_id_masks { + ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS = 1, +}; + +enum ice_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S = 0, + ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S = 1, +}; + +/* Rx Flex Descriptors + * These descriptors are used instead of the legacy version descriptors when + * ice_rlan_ctx.adv_desc is set + */ +union ice_16b_rx_flex_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + } read; + struct { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile ID */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + } wb; /* writeback */ +}; + +union ice_32b_rx_flex_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile ID */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 time_stamp_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flex_meta2; + __le16 flex_meta3; + union { + struct { + __le16 flex_meta4; + __le16 flex_meta5; + } flex; + __le32 ts_high; + } flex_ts; + } wb; /* writeback */ +}; + +/* Rx Flex Descriptor NIC Profile + * RxDID Profile ID 2 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Flow ID higher 16-bits + * Flex-field 4: reserved, VLAN ID taken from L2Tag + */ +struct ice_32b_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor Switch Profile + * RxDID Profile ID 3 + * Flex-field 0: Source VSI + */ +struct ice_32b_rx_flex_desc_sw { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 src_vsi; /* [10:15] are reserved */ + __le16 flex_md1_rsvd; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC VEB Profile + * RxDID Profile ID 4 + * Flex-field 0: Destination VSI + */ +struct ice_32b_rx_flex_desc_nic_veb_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 dst_vsi; /* [0:12]: destination VSI */ + /* 13: VSI valid bit */ + /* [14:15] are reserved */ + __le16 flex_field_1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC ACL Profile + * RxDID Profile ID 5 + * Flex-field 0: ACL Counter 0 + * Flex-field 1: ACL Counter 1 + * Flex-field 2: ACL Counter 2 + */ +struct ice_32b_rx_flex_desc_nic_acl_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 acl_ctr0; + __le16 acl_ctr1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 acl_ctr2; + __le16 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC Profile + * RxDID Profile ID 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Source VSI + * Flex-field 4: reserved, VLAN ID taken from L2Tag + */ +struct ice_32b_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor for Comms Package Profile + * RxDID Profile ID 16-21 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Flow ID upper 16-bits + * Flex-field 4: AUX0 + * Flex-field 5: AUX1 + */ +struct ice_32b_rx_flex_desc_comms { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 aux0; + __le16 aux1; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Receive Flex Descriptor profile IDs: There are a total + * of 64 profiles where profile IDs 0/1 are for legacy; and + * profiles 2-63 are flex profiles that can be programmed + * with a specific metadata (profile 7 reserved for HW) + */ +enum ice_rxdid { + ICE_RXDID_LEGACY_0 = 0, + ICE_RXDID_LEGACY_1 = 1, + ICE_RXDID_FLEX_NIC = 2, + ICE_RXDID_FLEX_NIC_2 = 6, + ICE_RXDID_HW = 7, + ICE_RXDID_COMMS_GENERIC = 16, + ICE_RXDID_COMMS_AUX_VLAN = 17, + ICE_RXDID_COMMS_AUX_IPV4 = 18, + ICE_RXDID_COMMS_AUX_IPV6 = 19, + ICE_RXDID_COMMS_AUX_IPV6_FLOW = 20, + ICE_RXDID_COMMS_AUX_TCP = 21, + ICE_RXDID_LAST = 63, +}; + +/* Recceive Flex descriptor Dword Index */ +enum ice_flex_word { + ICE_RX_FLEX_DWORD_0 = 0, + ICE_RX_FLEX_DWORD_1, + ICE_RX_FLEX_DWORD_2, + ICE_RX_FLEX_DWORD_3, + ICE_RX_FLEX_DWORD_4, + ICE_RX_FLEX_DWORD_5 +}; + +/* Receive Flex Descriptor Rx opcode values */ +enum ice_flex_opcode { + ICE_RX_OPC_DEBUG = 0, + ICE_RX_OPC_MDID, + ICE_RX_OPC_EXTRACT, + ICE_RX_OPC_PROTID +}; + +/* Receive Descriptor MDID values that access packet flags */ +enum ice_flex_mdid_pkt_flags { + ICE_RX_MDID_PKT_FLAGS_15_0 = 20, + ICE_RX_MDID_PKT_FLAGS_31_16, + ICE_RX_MDID_PKT_FLAGS_47_32, + ICE_RX_MDID_PKT_FLAGS_63_48, +}; + +/* Generic descriptor MDID values */ +enum ice_flex_mdid { + ICE_MDID_GENERIC_WORD_0, + ICE_MDID_GENERIC_WORD_1, + ICE_MDID_GENERIC_WORD_2, + ICE_MDID_GENERIC_WORD_3, + ICE_MDID_GENERIC_WORD_4, + ICE_MDID_FLOW_ID_LOWER, + ICE_MDID_FLOW_ID_HIGH, + ICE_MDID_RX_DESCR_PROF_IDX, + ICE_MDID_RX_PKT_DROP, + ICE_MDID_RX_DST_Q = 12, + ICE_MDID_RX_DST_VSI, + ICE_MDID_SRC_VSI = 19, + ICE_MDID_ACL_NOP = 55, + /* Entry 56 */ + ICE_MDID_RX_HASH_LOW, + ICE_MDID_ACL_CNTR_PKT = ICE_MDID_RX_HASH_LOW, + /* Entry 57 */ + ICE_MDID_RX_HASH_HIGH, + ICE_MDID_ACL_CNTR_BYTES = ICE_MDID_RX_HASH_HIGH, + ICE_MDID_ACL_CNTR_PKT_BYTES +}; + +/* for ice_32byte_rx_flex_desc.mir_id_umb_cast member */ +#define ICE_RX_FLEX_DESC_MIRROR_M (0x3F) /* 6-bits */ + +/* Rx/Tx Flag64 packet flag bits */ +enum ice_flg64_bits { + ICE_FLG_PKT_DSI = 0, + /* If there is a 1 in this bit position then that means Rx packet */ + ICE_FLG_PKT_DIR = 4, + ICE_FLG_EVLAN_x8100 = 14, + ICE_FLG_EVLAN_x9100, + ICE_FLG_VLAN_x8100, + ICE_FLG_TNL_MAC = 22, + ICE_FLG_TNL_VLAN, + ICE_FLG_PKT_FRG, + ICE_FLG_FIN = 32, + ICE_FLG_SYN, + ICE_FLG_RST, + ICE_FLG_TNL0 = 38, + ICE_FLG_TNL1, + ICE_FLG_TNL2, + ICE_FLG_UDP_GRE, + ICE_FLG_RSVD = 63 +}; + +enum ice_rx_flex_desc_umb_cast_bits { /* field is 2 bits long */ + ICE_RX_FLEX_DESC_UMB_CAST_S = 6, + ICE_RX_FLEX_DESC_UMB_CAST_LAST /* this entry must be last!!! */ +}; + +enum ice_umbcast_dest_addr_types { + ICE_DEST_UNICAST = 0, + ICE_DEST_MULTICAST, + ICE_DEST_BROADCAST, + ICE_DEST_MIRRORED, +}; + +/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ +#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ + +enum ice_rx_flex_desc_flexi_flags0_bits { /* field is 6 bits long */ + ICE_RX_FLEX_DESC_FLEXI_FLAGS0_S = 10, + ICE_RX_FLEX_DESC_FLEXI_FLAGS0_LAST /* this entry must be last!!! */ +}; + +/* for ice_32byte_rx_flex_desc.pkt_length member */ +#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ + +/* for ice_32byte_rx_flex_desc.header_length_sph_flexi_flags1 member */ +#define ICE_RX_FLEX_DESC_HEADER_LEN_M (0x7FF) /* 11-bits */ + +enum ice_rx_flex_desc_sph_bits { /* field is 1 bit long */ + ICE_RX_FLEX_DESC_SPH_S = 11, + ICE_RX_FLEX_DESC_SPH_LAST /* this entry must be last!!! */ +}; + +enum ice_rx_flex_desc_flexi_flags1_bits { /* field is 4 bits long */ + ICE_RX_FLEX_DESC_FLEXI_FLAGS1_S = 12, + ICE_RX_FLEX_DESC_FLEXI_FLAGS1_LAST /* this entry must be last!!! */ +}; + +enum ice_rx_flex_desc_ext_status_bits { /* field is 4 bits long */ + ICE_RX_FLEX_DESC_EXT_STATUS_EXT_UDP_S = 12, + ICE_RX_FLEX_DESC_EXT_STATUS_INT_UDP_S = 13, + ICE_RX_FLEX_DESC_EXT_STATUS_RECIPE_S = 14, + ICE_RX_FLEX_DESC_EXT_STATUS_OVERSIZE_S = 15, + ICE_RX_FLEX_DESC_EXT_STATUS_LAST /* entry must be last!!! */ +}; + +enum ice_rx_flex_desc_status_error_0_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, + ICE_RX_FLEX_DESC_STATUS0_EOF_S, + ICE_RX_FLEX_DESC_STATUS0_HBO_S, + ICE_RX_FLEX_DESC_STATUS0_L3L4P_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, + ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, + ICE_RX_FLEX_DESC_STATUS0_LPBK_S, + ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, + ICE_RX_FLEX_DESC_STATUS0_RXE_S, + ICE_RX_FLEX_DESC_STATUS0_CRCP_S, + ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S, + ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, + ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ +}; + +enum ice_rx_flex_desc_status_error_1_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */ + ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4, + ICE_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5, + /* [10:6] reserved */ + ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11, + ICE_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12, + ICE_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13, + ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14, + ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15, + ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ +}; + +enum ice_rx_flex_desc_exstat_bits { + /* Note: These are predefined bit offsets */ + ICE_RX_FLEX_DESC_EXSTAT_EXTUDP_S = 0, + ICE_RX_FLEX_DESC_EXSTAT_INTUDP_S = 1, + ICE_RX_FLEX_DESC_EXSTAT_RECIPE_S = 2, + ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3, +}; + +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 +#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 +#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) + +/* RLAN Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct ice_rlan_ctx { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ +#define ICE_RLAN_BASE_S 7 + u64 base; + u16 qlen; +#define ICE_RLAN_CTX_DBUF_S 7 + u16 dbuf; /* bigger than needed, see above for reason */ +#define ICE_RLAN_CTX_HBUF_S 6 + u16 hbuf; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +struct ice_ctx_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ + .offset = offsetof(struct _struct, _ele), \ + .size_of = FIELD_SIZEOF(struct _struct, _ele), \ + .width = _width, \ + .lsb = _lsb, \ +} + +/* for hsplit_0 field of Rx RLAN context */ +enum ice_rlan_ctx_rx_hsplit_0 { + ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, + ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1, + ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2, + ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* for hsplit_1 field of Rx RLAN context */ +enum ice_rlan_ctx_rx_hsplit_1 { + ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0, + ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1, + ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, +}; + +/* Tx Descriptor */ +struct ice_tx_desc { + __le64 buf_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define ICE_TXD_QW1_DTYPE_S 0 +#define ICE_TXD_QW1_DTYPE_M (0xFUL << ICE_TXD_QW1_DTYPE_S) + +enum ice_tx_desc_dtype_value { + ICE_TX_DESC_DTYPE_DATA = 0x0, + ICE_TX_DESC_DTYPE_CTX = 0x1, + ICE_TX_DESC_DTYPE_IPSEC = 0x3, + ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8, + ICE_TX_DESC_DTYPE_HLP_META = 0x9, + /* DESC_DONE - HW has completed write-back of descriptor */ + ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, +}; + +#define ICE_TXD_QW1_CMD_S 4 +#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S) + +enum ice_tx_desc_cmd_bits { + ICE_TX_DESC_CMD_EOP = 0x0001, + ICE_TX_DESC_CMD_RS = 0x0002, + ICE_TX_DESC_CMD_RSVD = 0x0004, + ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, + ICE_TX_DESC_CMD_DUMMY = 0x0010, + ICE_TX_DESC_CMD_IIPT_NONIP = 0x0000, + ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, + ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, + ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, + ICE_TX_DESC_CMD_RSVD2 = 0x0080, + ICE_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, + ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, + ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, + ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, + ICE_TX_DESC_CMD_RE = 0x0400, + ICE_TX_DESC_CMD_RSVD3 = 0x0800, +}; + +#define ICE_TXD_QW1_OFFSET_S 16 +#define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S) + +enum ice_tx_desc_len_fields { + /* Note: These are predefined bit offsets */ + ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ + ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ + ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ +}; + +#define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S) +#define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S) +#define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S) + +/* Tx descriptor field limits in bytes */ +#define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \ + ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD) +#define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \ + ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD) +#define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \ + ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD) + +#define ICE_TXD_QW1_TX_BUF_SZ_S 34 +#define ICE_TXD_QW1_TX_BUF_SZ_M (0x3FFFULL << ICE_TXD_QW1_TX_BUF_SZ_S) + +#define ICE_TXD_QW1_L2TAG1_S 48 +#define ICE_TXD_QW1_L2TAG1_M (0xFFFFULL << ICE_TXD_QW1_L2TAG1_S) + +/* Context descriptors */ +struct ice_tx_ctx_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 qw1; +}; + +#define ICE_TXD_CTX_QW1_DTYPE_S 0 +#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) + +#define ICE_TXD_CTX_QW1_CMD_S 4 +#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) + +#define ICE_TXD_CTX_QW1_IPSEC_S 11 +#define ICE_TXD_CTX_QW1_IPSEC_M (0x7FUL << ICE_TXD_CTX_QW1_IPSEC_S) + +#define ICE_TXD_CTX_QW1_TSO_LEN_S 30 +#define ICE_TXD_CTX_QW1_TSO_LEN_M \ + (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) + +#define ICE_TXD_CTX_QW1_TSYN_S ICE_TXD_CTX_QW1_TSO_LEN_S +#define ICE_TXD_CTX_QW1_TSYN_M ICE_TXD_CTX_QW1_TSO_LEN_M + +#define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_QW1_MSS_M (0x3FFFULL << ICE_TXD_CTX_QW1_MSS_S) +#define ICE_TXD_CTX_MIN_MSS 64 +#define ICE_TXD_CTX_MAX_MSS 9668 + +#define ICE_TXD_CTX_QW1_VSI_S 50 +#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) + +enum ice_tx_ctx_desc_cmd_bits { + ICE_TX_CTX_DESC_TSO = 0x01, + ICE_TX_CTX_DESC_TSYN = 0x02, + ICE_TX_CTX_DESC_IL2TAG2 = 0x04, + ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + ICE_TX_CTX_DESC_SWTCH_VSI = 0x30, + ICE_TX_CTX_DESC_RESERVED = 0x40 +}; + +enum ice_tx_ctx_desc_eipt_offload { + ICE_TX_CTX_EIPT_NONE = 0x0, + ICE_TX_CTX_EIPT_IPV6 = 0x1, + ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2, + ICE_TX_CTX_EIPT_IPV4 = 0x3 +}; + +#define ICE_TXD_CTX_QW0_EIPT_S 0 +#define ICE_TXD_CTX_QW0_EIPT_M (0x3ULL << ICE_TXD_CTX_QW0_EIPT_S) + +#define ICE_TXD_CTX_QW0_EIPLEN_S 2 +#define ICE_TXD_CTX_QW0_EIPLEN_M (0x7FUL << ICE_TXD_CTX_QW0_EIPLEN_S) + +#define ICE_TXD_CTX_QW0_L4TUNT_S 9 +#define ICE_TXD_CTX_QW0_L4TUNT_M (0x3ULL << ICE_TXD_CTX_QW0_L4TUNT_S) + +#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S) +#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S) + +#define ICE_TXD_CTX_QW0_EIP_NOINC_S 11 +#define ICE_TXD_CTX_QW0_EIP_NOINC_M BIT_ULL(ICE_TXD_CTX_QW0_EIP_NOINC_S) + +#define ICE_TXD_CTX_EIP_NOINC_IPID_CONST ICE_TXD_CTX_QW0_EIP_NOINC_M + +#define ICE_TXD_CTX_QW0_NATLEN_S 12 +#define ICE_TXD_CTX_QW0_NATLEN_M (0X7FULL << ICE_TXD_CTX_QW0_NATLEN_S) + +#define ICE_TXD_CTX_QW0_DECTTL_S 19 +#define ICE_TXD_CTX_QW0_DECTTL_M (0xFULL << ICE_TXD_CTX_QW0_DECTTL_S) + +#define ICE_TXD_CTX_QW0_L4T_CS_S 23 +#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S) + +#define ICE_LAN_TXQ_MAX_QGRPS 127 +#define ICE_LAN_TXQ_MAX_QDIS 1023 + +/* Tx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct ice_tlan_ctx { +#define ICE_TLAN_CTX_BASE_S 7 + u64 base; /* base is defined in 128-byte units */ + u8 port_num; + u16 cgd_num; /* bigger than needed, see above for reason */ + u8 pf_num; + u16 vmvf_num; + u8 vmvf_type; +#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TLAN_CTX_VMVF_TYPE_PF 2 + u16 src_vsi; + u8 tsyn_ena; + u8 internal_usage_flag; + u8 alt_vlan; + u16 cpuid; /* bigger than needed, see above for reason */ + u8 wb_mode; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; + u16 cmpq_id; + u16 qnum_in_func; + u8 itr_notification_mode; + u8 adjust_prof_id; + u32 qlen; /* bigger than needed, see above for reason */ + u8 quanta_prof_idx; + u8 tso_ena; + u16 tso_qnum; + u8 legacy_int; + u8 drop_ena; + u8 cache_prof_idx; + u8 pkt_shaper_prof_idx; + u8 int_q_state; /* width not needed - internal do not write */ +}; + +/* LAN Tx Completion Queue data */ +#pragma pack(1) +struct ice_tx_cmpltnq { + u16 txq_id; + u8 generation; + u16 tx_head; + u8 cmpl_type; +}; +#pragma pack() + + +/* LAN Tx Completion Queue Context */ +#pragma pack(1) +struct ice_tx_cmpltnq_ctx { + u64 base; + u32 q_len; +#define ICE_TX_CMPLTNQ_CTX_Q_LEN_S 4 + u8 generation; + u32 wrt_ptr; + u8 pf_num; + u16 vmvf_num; + u8 vmvf_type; + u8 tph_desc_wr; + u8 cpuid; + u32 cmpltn_cache[16]; +}; +#pragma pack() + +/* LAN Tx Doorbell Descriptor Format */ +struct ice_tx_drbell_fmt { + u16 txq_id; + u8 dd; + u8 rs; + u32 db; +}; + + +/* LAN Tx Doorbell Queue Context */ +#pragma pack(1) +struct ice_tx_drbell_q_ctx { + u64 base; + u16 ring_len; + u8 pf_num; + u16 vf_num; + u8 vmvf_type; + u8 cpuid; + u8 tph_desc_rd; + u8 tph_desc_wr; + u8 db_q_en; + u16 rd_head; + u16 rd_tail; +}; +#pragma pack() + +/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT ice_ptype_lkup[ptype].known + * THEN + * Packet is unknown + * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum ice_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + ICE_RX_PTYPE_OUTER_##OUTER_IP, \ + ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + ICE_RX_PTYPE_##OUTER_FRAG, \ + ICE_RX_PTYPE_TUNNEL_##T, \ + ICE_RX_PTYPE_TUNNEL_END_##TE, \ + ICE_RX_PTYPE_##TEF, \ + ICE_RX_PTYPE_INNER_PROT_##I, \ + ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG +#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { + /* L2 Packet types */ + ICE_PTT_UNUSED_ENTRY(0), + ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(3), + ICE_PTT_UNUSED_ENTRY(4), + ICE_PTT_UNUSED_ENTRY(5), + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(8), + ICE_PTT_UNUSED_ENTRY(9), + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(12), + ICE_PTT_UNUSED_ENTRY(13), + ICE_PTT_UNUSED_ENTRY(14), + ICE_PTT_UNUSED_ENTRY(15), + ICE_PTT_UNUSED_ENTRY(16), + ICE_PTT_UNUSED_ENTRY(17), + ICE_PTT_UNUSED_ENTRY(18), + ICE_PTT_UNUSED_ENTRY(19), + ICE_PTT_UNUSED_ENTRY(20), + ICE_PTT_UNUSED_ENTRY(21), + + /* Non Tunneled IPv4 */ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(25), + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(32), + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(39), + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(47), + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(54), + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(62), + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(69), + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(77), + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(84), + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT_UNUSED_ENTRY(91), + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(98), + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(105), + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(113), + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(120), + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(128), + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(135), + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(143), + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(150), + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + ICE_PTT_UNUSED_ENTRY(154), + ICE_PTT_UNUSED_ENTRY(155), + ICE_PTT_UNUSED_ENTRY(156), + ICE_PTT_UNUSED_ENTRY(157), + ICE_PTT_UNUSED_ENTRY(158), + ICE_PTT_UNUSED_ENTRY(159), + + ICE_PTT_UNUSED_ENTRY(160), + ICE_PTT_UNUSED_ENTRY(161), + ICE_PTT_UNUSED_ENTRY(162), + ICE_PTT_UNUSED_ENTRY(163), + ICE_PTT_UNUSED_ENTRY(164), + ICE_PTT_UNUSED_ENTRY(165), + ICE_PTT_UNUSED_ENTRY(166), + ICE_PTT_UNUSED_ENTRY(167), + ICE_PTT_UNUSED_ENTRY(168), + ICE_PTT_UNUSED_ENTRY(169), + + ICE_PTT_UNUSED_ENTRY(170), + ICE_PTT_UNUSED_ENTRY(171), + ICE_PTT_UNUSED_ENTRY(172), + ICE_PTT_UNUSED_ENTRY(173), + ICE_PTT_UNUSED_ENTRY(174), + ICE_PTT_UNUSED_ENTRY(175), + ICE_PTT_UNUSED_ENTRY(176), + ICE_PTT_UNUSED_ENTRY(177), + ICE_PTT_UNUSED_ENTRY(178), + ICE_PTT_UNUSED_ENTRY(179), + + ICE_PTT_UNUSED_ENTRY(180), + ICE_PTT_UNUSED_ENTRY(181), + ICE_PTT_UNUSED_ENTRY(182), + ICE_PTT_UNUSED_ENTRY(183), + ICE_PTT_UNUSED_ENTRY(184), + ICE_PTT_UNUSED_ENTRY(185), + ICE_PTT_UNUSED_ENTRY(186), + ICE_PTT_UNUSED_ENTRY(187), + ICE_PTT_UNUSED_ENTRY(188), + ICE_PTT_UNUSED_ENTRY(189), + + ICE_PTT_UNUSED_ENTRY(190), + ICE_PTT_UNUSED_ENTRY(191), + ICE_PTT_UNUSED_ENTRY(192), + ICE_PTT_UNUSED_ENTRY(193), + ICE_PTT_UNUSED_ENTRY(194), + ICE_PTT_UNUSED_ENTRY(195), + ICE_PTT_UNUSED_ENTRY(196), + ICE_PTT_UNUSED_ENTRY(197), + ICE_PTT_UNUSED_ENTRY(198), + ICE_PTT_UNUSED_ENTRY(199), + + ICE_PTT_UNUSED_ENTRY(200), + ICE_PTT_UNUSED_ENTRY(201), + ICE_PTT_UNUSED_ENTRY(202), + ICE_PTT_UNUSED_ENTRY(203), + ICE_PTT_UNUSED_ENTRY(204), + ICE_PTT_UNUSED_ENTRY(205), + ICE_PTT_UNUSED_ENTRY(206), + ICE_PTT_UNUSED_ENTRY(207), + ICE_PTT_UNUSED_ENTRY(208), + ICE_PTT_UNUSED_ENTRY(209), + + ICE_PTT_UNUSED_ENTRY(210), + ICE_PTT_UNUSED_ENTRY(211), + ICE_PTT_UNUSED_ENTRY(212), + ICE_PTT_UNUSED_ENTRY(213), + ICE_PTT_UNUSED_ENTRY(214), + ICE_PTT_UNUSED_ENTRY(215), + ICE_PTT_UNUSED_ENTRY(216), + ICE_PTT_UNUSED_ENTRY(217), + ICE_PTT_UNUSED_ENTRY(218), + ICE_PTT_UNUSED_ENTRY(219), + + ICE_PTT_UNUSED_ENTRY(220), + ICE_PTT_UNUSED_ENTRY(221), + ICE_PTT_UNUSED_ENTRY(222), + ICE_PTT_UNUSED_ENTRY(223), + ICE_PTT_UNUSED_ENTRY(224), + ICE_PTT_UNUSED_ENTRY(225), + ICE_PTT_UNUSED_ENTRY(226), + ICE_PTT_UNUSED_ENTRY(227), + ICE_PTT_UNUSED_ENTRY(228), + ICE_PTT_UNUSED_ENTRY(229), + + ICE_PTT_UNUSED_ENTRY(230), + ICE_PTT_UNUSED_ENTRY(231), + ICE_PTT_UNUSED_ENTRY(232), + ICE_PTT_UNUSED_ENTRY(233), + ICE_PTT_UNUSED_ENTRY(234), + ICE_PTT_UNUSED_ENTRY(235), + ICE_PTT_UNUSED_ENTRY(236), + ICE_PTT_UNUSED_ENTRY(237), + ICE_PTT_UNUSED_ENTRY(238), + ICE_PTT_UNUSED_ENTRY(239), + + ICE_PTT_UNUSED_ENTRY(240), + ICE_PTT_UNUSED_ENTRY(241), + ICE_PTT_UNUSED_ENTRY(242), + ICE_PTT_UNUSED_ENTRY(243), + ICE_PTT_UNUSED_ENTRY(244), + ICE_PTT_UNUSED_ENTRY(245), + ICE_PTT_UNUSED_ENTRY(246), + ICE_PTT_UNUSED_ENTRY(247), + ICE_PTT_UNUSED_ENTRY(248), + ICE_PTT_UNUSED_ENTRY(249), + + ICE_PTT_UNUSED_ENTRY(250), + ICE_PTT_UNUSED_ENTRY(251), + ICE_PTT_UNUSED_ENTRY(252), + ICE_PTT_UNUSED_ENTRY(253), + ICE_PTT_UNUSED_ENTRY(254), + ICE_PTT_UNUSED_ENTRY(255), + ICE_PTT_UNUSED_ENTRY(256), + ICE_PTT_UNUSED_ENTRY(257), + ICE_PTT_UNUSED_ENTRY(258), + ICE_PTT_UNUSED_ENTRY(259), + + ICE_PTT_UNUSED_ENTRY(260), + ICE_PTT_UNUSED_ENTRY(261), + ICE_PTT_UNUSED_ENTRY(262), + ICE_PTT_UNUSED_ENTRY(263), + ICE_PTT_UNUSED_ENTRY(264), + ICE_PTT_UNUSED_ENTRY(265), + ICE_PTT_UNUSED_ENTRY(266), + ICE_PTT_UNUSED_ENTRY(267), + ICE_PTT_UNUSED_ENTRY(268), + ICE_PTT_UNUSED_ENTRY(269), + + ICE_PTT_UNUSED_ENTRY(270), + ICE_PTT_UNUSED_ENTRY(271), + ICE_PTT_UNUSED_ENTRY(272), + ICE_PTT_UNUSED_ENTRY(273), + ICE_PTT_UNUSED_ENTRY(274), + ICE_PTT_UNUSED_ENTRY(275), + ICE_PTT_UNUSED_ENTRY(276), + ICE_PTT_UNUSED_ENTRY(277), + ICE_PTT_UNUSED_ENTRY(278), + ICE_PTT_UNUSED_ENTRY(279), + + ICE_PTT_UNUSED_ENTRY(280), + ICE_PTT_UNUSED_ENTRY(281), + ICE_PTT_UNUSED_ENTRY(282), + ICE_PTT_UNUSED_ENTRY(283), + ICE_PTT_UNUSED_ENTRY(284), + ICE_PTT_UNUSED_ENTRY(285), + ICE_PTT_UNUSED_ENTRY(286), + ICE_PTT_UNUSED_ENTRY(287), + ICE_PTT_UNUSED_ENTRY(288), + ICE_PTT_UNUSED_ENTRY(289), + + ICE_PTT_UNUSED_ENTRY(290), + ICE_PTT_UNUSED_ENTRY(291), + ICE_PTT_UNUSED_ENTRY(292), + ICE_PTT_UNUSED_ENTRY(293), + ICE_PTT_UNUSED_ENTRY(294), + ICE_PTT_UNUSED_ENTRY(295), + ICE_PTT_UNUSED_ENTRY(296), + ICE_PTT_UNUSED_ENTRY(297), + ICE_PTT_UNUSED_ENTRY(298), + ICE_PTT_UNUSED_ENTRY(299), + + ICE_PTT_UNUSED_ENTRY(300), + ICE_PTT_UNUSED_ENTRY(301), + ICE_PTT_UNUSED_ENTRY(302), + ICE_PTT_UNUSED_ENTRY(303), + ICE_PTT_UNUSED_ENTRY(304), + ICE_PTT_UNUSED_ENTRY(305), + ICE_PTT_UNUSED_ENTRY(306), + ICE_PTT_UNUSED_ENTRY(307), + ICE_PTT_UNUSED_ENTRY(308), + ICE_PTT_UNUSED_ENTRY(309), + + ICE_PTT_UNUSED_ENTRY(310), + ICE_PTT_UNUSED_ENTRY(311), + ICE_PTT_UNUSED_ENTRY(312), + ICE_PTT_UNUSED_ENTRY(313), + ICE_PTT_UNUSED_ENTRY(314), + ICE_PTT_UNUSED_ENTRY(315), + ICE_PTT_UNUSED_ENTRY(316), + ICE_PTT_UNUSED_ENTRY(317), + ICE_PTT_UNUSED_ENTRY(318), + ICE_PTT_UNUSED_ENTRY(319), + + ICE_PTT_UNUSED_ENTRY(320), + ICE_PTT_UNUSED_ENTRY(321), + ICE_PTT_UNUSED_ENTRY(322), + ICE_PTT_UNUSED_ENTRY(323), + ICE_PTT_UNUSED_ENTRY(324), + ICE_PTT_UNUSED_ENTRY(325), + ICE_PTT_UNUSED_ENTRY(326), + ICE_PTT_UNUSED_ENTRY(327), + ICE_PTT_UNUSED_ENTRY(328), + ICE_PTT_UNUSED_ENTRY(329), + + ICE_PTT_UNUSED_ENTRY(330), + ICE_PTT_UNUSED_ENTRY(331), + ICE_PTT_UNUSED_ENTRY(332), + ICE_PTT_UNUSED_ENTRY(333), + ICE_PTT_UNUSED_ENTRY(334), + ICE_PTT_UNUSED_ENTRY(335), + ICE_PTT_UNUSED_ENTRY(336), + ICE_PTT_UNUSED_ENTRY(337), + ICE_PTT_UNUSED_ENTRY(338), + ICE_PTT_UNUSED_ENTRY(339), + + ICE_PTT_UNUSED_ENTRY(340), + ICE_PTT_UNUSED_ENTRY(341), + ICE_PTT_UNUSED_ENTRY(342), + ICE_PTT_UNUSED_ENTRY(343), + ICE_PTT_UNUSED_ENTRY(344), + ICE_PTT_UNUSED_ENTRY(345), + ICE_PTT_UNUSED_ENTRY(346), + ICE_PTT_UNUSED_ENTRY(347), + ICE_PTT_UNUSED_ENTRY(348), + ICE_PTT_UNUSED_ENTRY(349), + + ICE_PTT_UNUSED_ENTRY(350), + ICE_PTT_UNUSED_ENTRY(351), + ICE_PTT_UNUSED_ENTRY(352), + ICE_PTT_UNUSED_ENTRY(353), + ICE_PTT_UNUSED_ENTRY(354), + ICE_PTT_UNUSED_ENTRY(355), + ICE_PTT_UNUSED_ENTRY(356), + ICE_PTT_UNUSED_ENTRY(357), + ICE_PTT_UNUSED_ENTRY(358), + ICE_PTT_UNUSED_ENTRY(359), + + ICE_PTT_UNUSED_ENTRY(360), + ICE_PTT_UNUSED_ENTRY(361), + ICE_PTT_UNUSED_ENTRY(362), + ICE_PTT_UNUSED_ENTRY(363), + ICE_PTT_UNUSED_ENTRY(364), + ICE_PTT_UNUSED_ENTRY(365), + ICE_PTT_UNUSED_ENTRY(366), + ICE_PTT_UNUSED_ENTRY(367), + ICE_PTT_UNUSED_ENTRY(368), + ICE_PTT_UNUSED_ENTRY(369), + + ICE_PTT_UNUSED_ENTRY(370), + ICE_PTT_UNUSED_ENTRY(371), + ICE_PTT_UNUSED_ENTRY(372), + ICE_PTT_UNUSED_ENTRY(373), + ICE_PTT_UNUSED_ENTRY(374), + ICE_PTT_UNUSED_ENTRY(375), + ICE_PTT_UNUSED_ENTRY(376), + ICE_PTT_UNUSED_ENTRY(377), + ICE_PTT_UNUSED_ENTRY(378), + ICE_PTT_UNUSED_ENTRY(379), + + ICE_PTT_UNUSED_ENTRY(380), + ICE_PTT_UNUSED_ENTRY(381), + ICE_PTT_UNUSED_ENTRY(382), + ICE_PTT_UNUSED_ENTRY(383), + ICE_PTT_UNUSED_ENTRY(384), + ICE_PTT_UNUSED_ENTRY(385), + ICE_PTT_UNUSED_ENTRY(386), + ICE_PTT_UNUSED_ENTRY(387), + ICE_PTT_UNUSED_ENTRY(388), + ICE_PTT_UNUSED_ENTRY(389), + + ICE_PTT_UNUSED_ENTRY(390), + ICE_PTT_UNUSED_ENTRY(391), + ICE_PTT_UNUSED_ENTRY(392), + ICE_PTT_UNUSED_ENTRY(393), + ICE_PTT_UNUSED_ENTRY(394), + ICE_PTT_UNUSED_ENTRY(395), + ICE_PTT_UNUSED_ENTRY(396), + ICE_PTT_UNUSED_ENTRY(397), + ICE_PTT_UNUSED_ENTRY(398), + ICE_PTT_UNUSED_ENTRY(399), + + ICE_PTT_UNUSED_ENTRY(400), + ICE_PTT_UNUSED_ENTRY(401), + ICE_PTT_UNUSED_ENTRY(402), + ICE_PTT_UNUSED_ENTRY(403), + ICE_PTT_UNUSED_ENTRY(404), + ICE_PTT_UNUSED_ENTRY(405), + ICE_PTT_UNUSED_ENTRY(406), + ICE_PTT_UNUSED_ENTRY(407), + ICE_PTT_UNUSED_ENTRY(408), + ICE_PTT_UNUSED_ENTRY(409), + + ICE_PTT_UNUSED_ENTRY(410), + ICE_PTT_UNUSED_ENTRY(411), + ICE_PTT_UNUSED_ENTRY(412), + ICE_PTT_UNUSED_ENTRY(413), + ICE_PTT_UNUSED_ENTRY(414), + ICE_PTT_UNUSED_ENTRY(415), + ICE_PTT_UNUSED_ENTRY(416), + ICE_PTT_UNUSED_ENTRY(417), + ICE_PTT_UNUSED_ENTRY(418), + ICE_PTT_UNUSED_ENTRY(419), + + ICE_PTT_UNUSED_ENTRY(420), + ICE_PTT_UNUSED_ENTRY(421), + ICE_PTT_UNUSED_ENTRY(422), + ICE_PTT_UNUSED_ENTRY(423), + ICE_PTT_UNUSED_ENTRY(424), + ICE_PTT_UNUSED_ENTRY(425), + ICE_PTT_UNUSED_ENTRY(426), + ICE_PTT_UNUSED_ENTRY(427), + ICE_PTT_UNUSED_ENTRY(428), + ICE_PTT_UNUSED_ENTRY(429), + + ICE_PTT_UNUSED_ENTRY(430), + ICE_PTT_UNUSED_ENTRY(431), + ICE_PTT_UNUSED_ENTRY(432), + ICE_PTT_UNUSED_ENTRY(433), + ICE_PTT_UNUSED_ENTRY(434), + ICE_PTT_UNUSED_ENTRY(435), + ICE_PTT_UNUSED_ENTRY(436), + ICE_PTT_UNUSED_ENTRY(437), + ICE_PTT_UNUSED_ENTRY(438), + ICE_PTT_UNUSED_ENTRY(439), + + ICE_PTT_UNUSED_ENTRY(440), + ICE_PTT_UNUSED_ENTRY(441), + ICE_PTT_UNUSED_ENTRY(442), + ICE_PTT_UNUSED_ENTRY(443), + ICE_PTT_UNUSED_ENTRY(444), + ICE_PTT_UNUSED_ENTRY(445), + ICE_PTT_UNUSED_ENTRY(446), + ICE_PTT_UNUSED_ENTRY(447), + ICE_PTT_UNUSED_ENTRY(448), + ICE_PTT_UNUSED_ENTRY(449), + + ICE_PTT_UNUSED_ENTRY(450), + ICE_PTT_UNUSED_ENTRY(451), + ICE_PTT_UNUSED_ENTRY(452), + ICE_PTT_UNUSED_ENTRY(453), + ICE_PTT_UNUSED_ENTRY(454), + ICE_PTT_UNUSED_ENTRY(455), + ICE_PTT_UNUSED_ENTRY(456), + ICE_PTT_UNUSED_ENTRY(457), + ICE_PTT_UNUSED_ENTRY(458), + ICE_PTT_UNUSED_ENTRY(459), + + ICE_PTT_UNUSED_ENTRY(460), + ICE_PTT_UNUSED_ENTRY(461), + ICE_PTT_UNUSED_ENTRY(462), + ICE_PTT_UNUSED_ENTRY(463), + ICE_PTT_UNUSED_ENTRY(464), + ICE_PTT_UNUSED_ENTRY(465), + ICE_PTT_UNUSED_ENTRY(466), + ICE_PTT_UNUSED_ENTRY(467), + ICE_PTT_UNUSED_ENTRY(468), + ICE_PTT_UNUSED_ENTRY(469), + + ICE_PTT_UNUSED_ENTRY(470), + ICE_PTT_UNUSED_ENTRY(471), + ICE_PTT_UNUSED_ENTRY(472), + ICE_PTT_UNUSED_ENTRY(473), + ICE_PTT_UNUSED_ENTRY(474), + ICE_PTT_UNUSED_ENTRY(475), + ICE_PTT_UNUSED_ENTRY(476), + ICE_PTT_UNUSED_ENTRY(477), + ICE_PTT_UNUSED_ENTRY(478), + ICE_PTT_UNUSED_ENTRY(479), + + ICE_PTT_UNUSED_ENTRY(480), + ICE_PTT_UNUSED_ENTRY(481), + ICE_PTT_UNUSED_ENTRY(482), + ICE_PTT_UNUSED_ENTRY(483), + ICE_PTT_UNUSED_ENTRY(484), + ICE_PTT_UNUSED_ENTRY(485), + ICE_PTT_UNUSED_ENTRY(486), + ICE_PTT_UNUSED_ENTRY(487), + ICE_PTT_UNUSED_ENTRY(488), + ICE_PTT_UNUSED_ENTRY(489), + + ICE_PTT_UNUSED_ENTRY(490), + ICE_PTT_UNUSED_ENTRY(491), + ICE_PTT_UNUSED_ENTRY(492), + ICE_PTT_UNUSED_ENTRY(493), + ICE_PTT_UNUSED_ENTRY(494), + ICE_PTT_UNUSED_ENTRY(495), + ICE_PTT_UNUSED_ENTRY(496), + ICE_PTT_UNUSED_ENTRY(497), + ICE_PTT_UNUSED_ENTRY(498), + ICE_PTT_UNUSED_ENTRY(499), + + ICE_PTT_UNUSED_ENTRY(500), + ICE_PTT_UNUSED_ENTRY(501), + ICE_PTT_UNUSED_ENTRY(502), + ICE_PTT_UNUSED_ENTRY(503), + ICE_PTT_UNUSED_ENTRY(504), + ICE_PTT_UNUSED_ENTRY(505), + ICE_PTT_UNUSED_ENTRY(506), + ICE_PTT_UNUSED_ENTRY(507), + ICE_PTT_UNUSED_ENTRY(508), + ICE_PTT_UNUSED_ENTRY(509), + + ICE_PTT_UNUSED_ENTRY(510), + ICE_PTT_UNUSED_ENTRY(511), + ICE_PTT_UNUSED_ENTRY(512), + ICE_PTT_UNUSED_ENTRY(513), + ICE_PTT_UNUSED_ENTRY(514), + ICE_PTT_UNUSED_ENTRY(515), + ICE_PTT_UNUSED_ENTRY(516), + ICE_PTT_UNUSED_ENTRY(517), + ICE_PTT_UNUSED_ENTRY(518), + ICE_PTT_UNUSED_ENTRY(519), + + ICE_PTT_UNUSED_ENTRY(520), + ICE_PTT_UNUSED_ENTRY(521), + ICE_PTT_UNUSED_ENTRY(522), + ICE_PTT_UNUSED_ENTRY(523), + ICE_PTT_UNUSED_ENTRY(524), + ICE_PTT_UNUSED_ENTRY(525), + ICE_PTT_UNUSED_ENTRY(526), + ICE_PTT_UNUSED_ENTRY(527), + ICE_PTT_UNUSED_ENTRY(528), + ICE_PTT_UNUSED_ENTRY(529), + + ICE_PTT_UNUSED_ENTRY(530), + ICE_PTT_UNUSED_ENTRY(531), + ICE_PTT_UNUSED_ENTRY(532), + ICE_PTT_UNUSED_ENTRY(533), + ICE_PTT_UNUSED_ENTRY(534), + ICE_PTT_UNUSED_ENTRY(535), + ICE_PTT_UNUSED_ENTRY(536), + ICE_PTT_UNUSED_ENTRY(537), + ICE_PTT_UNUSED_ENTRY(538), + ICE_PTT_UNUSED_ENTRY(539), + + ICE_PTT_UNUSED_ENTRY(540), + ICE_PTT_UNUSED_ENTRY(541), + ICE_PTT_UNUSED_ENTRY(542), + ICE_PTT_UNUSED_ENTRY(543), + ICE_PTT_UNUSED_ENTRY(544), + ICE_PTT_UNUSED_ENTRY(545), + ICE_PTT_UNUSED_ENTRY(546), + ICE_PTT_UNUSED_ENTRY(547), + ICE_PTT_UNUSED_ENTRY(548), + ICE_PTT_UNUSED_ENTRY(549), + + ICE_PTT_UNUSED_ENTRY(550), + ICE_PTT_UNUSED_ENTRY(551), + ICE_PTT_UNUSED_ENTRY(552), + ICE_PTT_UNUSED_ENTRY(553), + ICE_PTT_UNUSED_ENTRY(554), + ICE_PTT_UNUSED_ENTRY(555), + ICE_PTT_UNUSED_ENTRY(556), + ICE_PTT_UNUSED_ENTRY(557), + ICE_PTT_UNUSED_ENTRY(558), + ICE_PTT_UNUSED_ENTRY(559), + + ICE_PTT_UNUSED_ENTRY(560), + ICE_PTT_UNUSED_ENTRY(561), + ICE_PTT_UNUSED_ENTRY(562), + ICE_PTT_UNUSED_ENTRY(563), + ICE_PTT_UNUSED_ENTRY(564), + ICE_PTT_UNUSED_ENTRY(565), + ICE_PTT_UNUSED_ENTRY(566), + ICE_PTT_UNUSED_ENTRY(567), + ICE_PTT_UNUSED_ENTRY(568), + ICE_PTT_UNUSED_ENTRY(569), + + ICE_PTT_UNUSED_ENTRY(570), + ICE_PTT_UNUSED_ENTRY(571), + ICE_PTT_UNUSED_ENTRY(572), + ICE_PTT_UNUSED_ENTRY(573), + ICE_PTT_UNUSED_ENTRY(574), + ICE_PTT_UNUSED_ENTRY(575), + ICE_PTT_UNUSED_ENTRY(576), + ICE_PTT_UNUSED_ENTRY(577), + ICE_PTT_UNUSED_ENTRY(578), + ICE_PTT_UNUSED_ENTRY(579), + + ICE_PTT_UNUSED_ENTRY(580), + ICE_PTT_UNUSED_ENTRY(581), + ICE_PTT_UNUSED_ENTRY(582), + ICE_PTT_UNUSED_ENTRY(583), + ICE_PTT_UNUSED_ENTRY(584), + ICE_PTT_UNUSED_ENTRY(585), + ICE_PTT_UNUSED_ENTRY(586), + ICE_PTT_UNUSED_ENTRY(587), + ICE_PTT_UNUSED_ENTRY(588), + ICE_PTT_UNUSED_ENTRY(589), + + ICE_PTT_UNUSED_ENTRY(590), + ICE_PTT_UNUSED_ENTRY(591), + ICE_PTT_UNUSED_ENTRY(592), + ICE_PTT_UNUSED_ENTRY(593), + ICE_PTT_UNUSED_ENTRY(594), + ICE_PTT_UNUSED_ENTRY(595), + ICE_PTT_UNUSED_ENTRY(596), + ICE_PTT_UNUSED_ENTRY(597), + ICE_PTT_UNUSED_ENTRY(598), + ICE_PTT_UNUSED_ENTRY(599), + + ICE_PTT_UNUSED_ENTRY(600), + ICE_PTT_UNUSED_ENTRY(601), + ICE_PTT_UNUSED_ENTRY(602), + ICE_PTT_UNUSED_ENTRY(603), + ICE_PTT_UNUSED_ENTRY(604), + ICE_PTT_UNUSED_ENTRY(605), + ICE_PTT_UNUSED_ENTRY(606), + ICE_PTT_UNUSED_ENTRY(607), + ICE_PTT_UNUSED_ENTRY(608), + ICE_PTT_UNUSED_ENTRY(609), + + ICE_PTT_UNUSED_ENTRY(610), + ICE_PTT_UNUSED_ENTRY(611), + ICE_PTT_UNUSED_ENTRY(612), + ICE_PTT_UNUSED_ENTRY(613), + ICE_PTT_UNUSED_ENTRY(614), + ICE_PTT_UNUSED_ENTRY(615), + ICE_PTT_UNUSED_ENTRY(616), + ICE_PTT_UNUSED_ENTRY(617), + ICE_PTT_UNUSED_ENTRY(618), + ICE_PTT_UNUSED_ENTRY(619), + + ICE_PTT_UNUSED_ENTRY(620), + ICE_PTT_UNUSED_ENTRY(621), + ICE_PTT_UNUSED_ENTRY(622), + ICE_PTT_UNUSED_ENTRY(623), + ICE_PTT_UNUSED_ENTRY(624), + ICE_PTT_UNUSED_ENTRY(625), + ICE_PTT_UNUSED_ENTRY(626), + ICE_PTT_UNUSED_ENTRY(627), + ICE_PTT_UNUSED_ENTRY(628), + ICE_PTT_UNUSED_ENTRY(629), + + ICE_PTT_UNUSED_ENTRY(630), + ICE_PTT_UNUSED_ENTRY(631), + ICE_PTT_UNUSED_ENTRY(632), + ICE_PTT_UNUSED_ENTRY(633), + ICE_PTT_UNUSED_ENTRY(634), + ICE_PTT_UNUSED_ENTRY(635), + ICE_PTT_UNUSED_ENTRY(636), + ICE_PTT_UNUSED_ENTRY(637), + ICE_PTT_UNUSED_ENTRY(638), + ICE_PTT_UNUSED_ENTRY(639), + + ICE_PTT_UNUSED_ENTRY(640), + ICE_PTT_UNUSED_ENTRY(641), + ICE_PTT_UNUSED_ENTRY(642), + ICE_PTT_UNUSED_ENTRY(643), + ICE_PTT_UNUSED_ENTRY(644), + ICE_PTT_UNUSED_ENTRY(645), + ICE_PTT_UNUSED_ENTRY(646), + ICE_PTT_UNUSED_ENTRY(647), + ICE_PTT_UNUSED_ENTRY(648), + ICE_PTT_UNUSED_ENTRY(649), + + ICE_PTT_UNUSED_ENTRY(650), + ICE_PTT_UNUSED_ENTRY(651), + ICE_PTT_UNUSED_ENTRY(652), + ICE_PTT_UNUSED_ENTRY(653), + ICE_PTT_UNUSED_ENTRY(654), + ICE_PTT_UNUSED_ENTRY(655), + ICE_PTT_UNUSED_ENTRY(656), + ICE_PTT_UNUSED_ENTRY(657), + ICE_PTT_UNUSED_ENTRY(658), + ICE_PTT_UNUSED_ENTRY(659), + + ICE_PTT_UNUSED_ENTRY(660), + ICE_PTT_UNUSED_ENTRY(661), + ICE_PTT_UNUSED_ENTRY(662), + ICE_PTT_UNUSED_ENTRY(663), + ICE_PTT_UNUSED_ENTRY(664), + ICE_PTT_UNUSED_ENTRY(665), + ICE_PTT_UNUSED_ENTRY(666), + ICE_PTT_UNUSED_ENTRY(667), + ICE_PTT_UNUSED_ENTRY(668), + ICE_PTT_UNUSED_ENTRY(669), + + ICE_PTT_UNUSED_ENTRY(670), + ICE_PTT_UNUSED_ENTRY(671), + ICE_PTT_UNUSED_ENTRY(672), + ICE_PTT_UNUSED_ENTRY(673), + ICE_PTT_UNUSED_ENTRY(674), + ICE_PTT_UNUSED_ENTRY(675), + ICE_PTT_UNUSED_ENTRY(676), + ICE_PTT_UNUSED_ENTRY(677), + ICE_PTT_UNUSED_ENTRY(678), + ICE_PTT_UNUSED_ENTRY(679), + + ICE_PTT_UNUSED_ENTRY(680), + ICE_PTT_UNUSED_ENTRY(681), + ICE_PTT_UNUSED_ENTRY(682), + ICE_PTT_UNUSED_ENTRY(683), + ICE_PTT_UNUSED_ENTRY(684), + ICE_PTT_UNUSED_ENTRY(685), + ICE_PTT_UNUSED_ENTRY(686), + ICE_PTT_UNUSED_ENTRY(687), + ICE_PTT_UNUSED_ENTRY(688), + ICE_PTT_UNUSED_ENTRY(689), + + ICE_PTT_UNUSED_ENTRY(690), + ICE_PTT_UNUSED_ENTRY(691), + ICE_PTT_UNUSED_ENTRY(692), + ICE_PTT_UNUSED_ENTRY(693), + ICE_PTT_UNUSED_ENTRY(694), + ICE_PTT_UNUSED_ENTRY(695), + ICE_PTT_UNUSED_ENTRY(696), + ICE_PTT_UNUSED_ENTRY(697), + ICE_PTT_UNUSED_ENTRY(698), + ICE_PTT_UNUSED_ENTRY(699), + + ICE_PTT_UNUSED_ENTRY(700), + ICE_PTT_UNUSED_ENTRY(701), + ICE_PTT_UNUSED_ENTRY(702), + ICE_PTT_UNUSED_ENTRY(703), + ICE_PTT_UNUSED_ENTRY(704), + ICE_PTT_UNUSED_ENTRY(705), + ICE_PTT_UNUSED_ENTRY(706), + ICE_PTT_UNUSED_ENTRY(707), + ICE_PTT_UNUSED_ENTRY(708), + ICE_PTT_UNUSED_ENTRY(709), + + ICE_PTT_UNUSED_ENTRY(710), + ICE_PTT_UNUSED_ENTRY(711), + ICE_PTT_UNUSED_ENTRY(712), + ICE_PTT_UNUSED_ENTRY(713), + ICE_PTT_UNUSED_ENTRY(714), + ICE_PTT_UNUSED_ENTRY(715), + ICE_PTT_UNUSED_ENTRY(716), + ICE_PTT_UNUSED_ENTRY(717), + ICE_PTT_UNUSED_ENTRY(718), + ICE_PTT_UNUSED_ENTRY(719), + + ICE_PTT_UNUSED_ENTRY(720), + ICE_PTT_UNUSED_ENTRY(721), + ICE_PTT_UNUSED_ENTRY(722), + ICE_PTT_UNUSED_ENTRY(723), + ICE_PTT_UNUSED_ENTRY(724), + ICE_PTT_UNUSED_ENTRY(725), + ICE_PTT_UNUSED_ENTRY(726), + ICE_PTT_UNUSED_ENTRY(727), + ICE_PTT_UNUSED_ENTRY(728), + ICE_PTT_UNUSED_ENTRY(729), + + ICE_PTT_UNUSED_ENTRY(730), + ICE_PTT_UNUSED_ENTRY(731), + ICE_PTT_UNUSED_ENTRY(732), + ICE_PTT_UNUSED_ENTRY(733), + ICE_PTT_UNUSED_ENTRY(734), + ICE_PTT_UNUSED_ENTRY(735), + ICE_PTT_UNUSED_ENTRY(736), + ICE_PTT_UNUSED_ENTRY(737), + ICE_PTT_UNUSED_ENTRY(738), + ICE_PTT_UNUSED_ENTRY(739), + + ICE_PTT_UNUSED_ENTRY(740), + ICE_PTT_UNUSED_ENTRY(741), + ICE_PTT_UNUSED_ENTRY(742), + ICE_PTT_UNUSED_ENTRY(743), + ICE_PTT_UNUSED_ENTRY(744), + ICE_PTT_UNUSED_ENTRY(745), + ICE_PTT_UNUSED_ENTRY(746), + ICE_PTT_UNUSED_ENTRY(747), + ICE_PTT_UNUSED_ENTRY(748), + ICE_PTT_UNUSED_ENTRY(749), + + ICE_PTT_UNUSED_ENTRY(750), + ICE_PTT_UNUSED_ENTRY(751), + ICE_PTT_UNUSED_ENTRY(752), + ICE_PTT_UNUSED_ENTRY(753), + ICE_PTT_UNUSED_ENTRY(754), + ICE_PTT_UNUSED_ENTRY(755), + ICE_PTT_UNUSED_ENTRY(756), + ICE_PTT_UNUSED_ENTRY(757), + ICE_PTT_UNUSED_ENTRY(758), + ICE_PTT_UNUSED_ENTRY(759), + + ICE_PTT_UNUSED_ENTRY(760), + ICE_PTT_UNUSED_ENTRY(761), + ICE_PTT_UNUSED_ENTRY(762), + ICE_PTT_UNUSED_ENTRY(763), + ICE_PTT_UNUSED_ENTRY(764), + ICE_PTT_UNUSED_ENTRY(765), + ICE_PTT_UNUSED_ENTRY(766), + ICE_PTT_UNUSED_ENTRY(767), + ICE_PTT_UNUSED_ENTRY(768), + ICE_PTT_UNUSED_ENTRY(769), + + ICE_PTT_UNUSED_ENTRY(770), + ICE_PTT_UNUSED_ENTRY(771), + ICE_PTT_UNUSED_ENTRY(772), + ICE_PTT_UNUSED_ENTRY(773), + ICE_PTT_UNUSED_ENTRY(774), + ICE_PTT_UNUSED_ENTRY(775), + ICE_PTT_UNUSED_ENTRY(776), + ICE_PTT_UNUSED_ENTRY(777), + ICE_PTT_UNUSED_ENTRY(778), + ICE_PTT_UNUSED_ENTRY(779), + + ICE_PTT_UNUSED_ENTRY(780), + ICE_PTT_UNUSED_ENTRY(781), + ICE_PTT_UNUSED_ENTRY(782), + ICE_PTT_UNUSED_ENTRY(783), + ICE_PTT_UNUSED_ENTRY(784), + ICE_PTT_UNUSED_ENTRY(785), + ICE_PTT_UNUSED_ENTRY(786), + ICE_PTT_UNUSED_ENTRY(787), + ICE_PTT_UNUSED_ENTRY(788), + ICE_PTT_UNUSED_ENTRY(789), + + ICE_PTT_UNUSED_ENTRY(790), + ICE_PTT_UNUSED_ENTRY(791), + ICE_PTT_UNUSED_ENTRY(792), + ICE_PTT_UNUSED_ENTRY(793), + ICE_PTT_UNUSED_ENTRY(794), + ICE_PTT_UNUSED_ENTRY(795), + ICE_PTT_UNUSED_ENTRY(796), + ICE_PTT_UNUSED_ENTRY(797), + ICE_PTT_UNUSED_ENTRY(798), + ICE_PTT_UNUSED_ENTRY(799), + + ICE_PTT_UNUSED_ENTRY(800), + ICE_PTT_UNUSED_ENTRY(801), + ICE_PTT_UNUSED_ENTRY(802), + ICE_PTT_UNUSED_ENTRY(803), + ICE_PTT_UNUSED_ENTRY(804), + ICE_PTT_UNUSED_ENTRY(805), + ICE_PTT_UNUSED_ENTRY(806), + ICE_PTT_UNUSED_ENTRY(807), + ICE_PTT_UNUSED_ENTRY(808), + ICE_PTT_UNUSED_ENTRY(809), + + ICE_PTT_UNUSED_ENTRY(810), + ICE_PTT_UNUSED_ENTRY(811), + ICE_PTT_UNUSED_ENTRY(812), + ICE_PTT_UNUSED_ENTRY(813), + ICE_PTT_UNUSED_ENTRY(814), + ICE_PTT_UNUSED_ENTRY(815), + ICE_PTT_UNUSED_ENTRY(816), + ICE_PTT_UNUSED_ENTRY(817), + ICE_PTT_UNUSED_ENTRY(818), + ICE_PTT_UNUSED_ENTRY(819), + + ICE_PTT_UNUSED_ENTRY(820), + ICE_PTT_UNUSED_ENTRY(821), + ICE_PTT_UNUSED_ENTRY(822), + ICE_PTT_UNUSED_ENTRY(823), + ICE_PTT_UNUSED_ENTRY(824), + ICE_PTT_UNUSED_ENTRY(825), + ICE_PTT_UNUSED_ENTRY(826), + ICE_PTT_UNUSED_ENTRY(827), + ICE_PTT_UNUSED_ENTRY(828), + ICE_PTT_UNUSED_ENTRY(829), + + ICE_PTT_UNUSED_ENTRY(830), + ICE_PTT_UNUSED_ENTRY(831), + ICE_PTT_UNUSED_ENTRY(832), + ICE_PTT_UNUSED_ENTRY(833), + ICE_PTT_UNUSED_ENTRY(834), + ICE_PTT_UNUSED_ENTRY(835), + ICE_PTT_UNUSED_ENTRY(836), + ICE_PTT_UNUSED_ENTRY(837), + ICE_PTT_UNUSED_ENTRY(838), + ICE_PTT_UNUSED_ENTRY(839), + + ICE_PTT_UNUSED_ENTRY(840), + ICE_PTT_UNUSED_ENTRY(841), + ICE_PTT_UNUSED_ENTRY(842), + ICE_PTT_UNUSED_ENTRY(843), + ICE_PTT_UNUSED_ENTRY(844), + ICE_PTT_UNUSED_ENTRY(845), + ICE_PTT_UNUSED_ENTRY(846), + ICE_PTT_UNUSED_ENTRY(847), + ICE_PTT_UNUSED_ENTRY(848), + ICE_PTT_UNUSED_ENTRY(849), + + ICE_PTT_UNUSED_ENTRY(850), + ICE_PTT_UNUSED_ENTRY(851), + ICE_PTT_UNUSED_ENTRY(852), + ICE_PTT_UNUSED_ENTRY(853), + ICE_PTT_UNUSED_ENTRY(854), + ICE_PTT_UNUSED_ENTRY(855), + ICE_PTT_UNUSED_ENTRY(856), + ICE_PTT_UNUSED_ENTRY(857), + ICE_PTT_UNUSED_ENTRY(858), + ICE_PTT_UNUSED_ENTRY(859), + + ICE_PTT_UNUSED_ENTRY(860), + ICE_PTT_UNUSED_ENTRY(861), + ICE_PTT_UNUSED_ENTRY(862), + ICE_PTT_UNUSED_ENTRY(863), + ICE_PTT_UNUSED_ENTRY(864), + ICE_PTT_UNUSED_ENTRY(865), + ICE_PTT_UNUSED_ENTRY(866), + ICE_PTT_UNUSED_ENTRY(867), + ICE_PTT_UNUSED_ENTRY(868), + ICE_PTT_UNUSED_ENTRY(869), + + ICE_PTT_UNUSED_ENTRY(870), + ICE_PTT_UNUSED_ENTRY(871), + ICE_PTT_UNUSED_ENTRY(872), + ICE_PTT_UNUSED_ENTRY(873), + ICE_PTT_UNUSED_ENTRY(874), + ICE_PTT_UNUSED_ENTRY(875), + ICE_PTT_UNUSED_ENTRY(876), + ICE_PTT_UNUSED_ENTRY(877), + ICE_PTT_UNUSED_ENTRY(878), + ICE_PTT_UNUSED_ENTRY(879), + + ICE_PTT_UNUSED_ENTRY(880), + ICE_PTT_UNUSED_ENTRY(881), + ICE_PTT_UNUSED_ENTRY(882), + ICE_PTT_UNUSED_ENTRY(883), + ICE_PTT_UNUSED_ENTRY(884), + ICE_PTT_UNUSED_ENTRY(885), + ICE_PTT_UNUSED_ENTRY(886), + ICE_PTT_UNUSED_ENTRY(887), + ICE_PTT_UNUSED_ENTRY(888), + ICE_PTT_UNUSED_ENTRY(889), + + ICE_PTT_UNUSED_ENTRY(890), + ICE_PTT_UNUSED_ENTRY(891), + ICE_PTT_UNUSED_ENTRY(892), + ICE_PTT_UNUSED_ENTRY(893), + ICE_PTT_UNUSED_ENTRY(894), + ICE_PTT_UNUSED_ENTRY(895), + ICE_PTT_UNUSED_ENTRY(896), + ICE_PTT_UNUSED_ENTRY(897), + ICE_PTT_UNUSED_ENTRY(898), + ICE_PTT_UNUSED_ENTRY(899), + + ICE_PTT_UNUSED_ENTRY(900), + ICE_PTT_UNUSED_ENTRY(901), + ICE_PTT_UNUSED_ENTRY(902), + ICE_PTT_UNUSED_ENTRY(903), + ICE_PTT_UNUSED_ENTRY(904), + ICE_PTT_UNUSED_ENTRY(905), + ICE_PTT_UNUSED_ENTRY(906), + ICE_PTT_UNUSED_ENTRY(907), + ICE_PTT_UNUSED_ENTRY(908), + ICE_PTT_UNUSED_ENTRY(909), + + ICE_PTT_UNUSED_ENTRY(910), + ICE_PTT_UNUSED_ENTRY(911), + ICE_PTT_UNUSED_ENTRY(912), + ICE_PTT_UNUSED_ENTRY(913), + ICE_PTT_UNUSED_ENTRY(914), + ICE_PTT_UNUSED_ENTRY(915), + ICE_PTT_UNUSED_ENTRY(916), + ICE_PTT_UNUSED_ENTRY(917), + ICE_PTT_UNUSED_ENTRY(918), + ICE_PTT_UNUSED_ENTRY(919), + + ICE_PTT_UNUSED_ENTRY(920), + ICE_PTT_UNUSED_ENTRY(921), + ICE_PTT_UNUSED_ENTRY(922), + ICE_PTT_UNUSED_ENTRY(923), + ICE_PTT_UNUSED_ENTRY(924), + ICE_PTT_UNUSED_ENTRY(925), + ICE_PTT_UNUSED_ENTRY(926), + ICE_PTT_UNUSED_ENTRY(927), + ICE_PTT_UNUSED_ENTRY(928), + ICE_PTT_UNUSED_ENTRY(929), + + ICE_PTT_UNUSED_ENTRY(930), + ICE_PTT_UNUSED_ENTRY(931), + ICE_PTT_UNUSED_ENTRY(932), + ICE_PTT_UNUSED_ENTRY(933), + ICE_PTT_UNUSED_ENTRY(934), + ICE_PTT_UNUSED_ENTRY(935), + ICE_PTT_UNUSED_ENTRY(936), + ICE_PTT_UNUSED_ENTRY(937), + ICE_PTT_UNUSED_ENTRY(938), + ICE_PTT_UNUSED_ENTRY(939), + + ICE_PTT_UNUSED_ENTRY(940), + ICE_PTT_UNUSED_ENTRY(941), + ICE_PTT_UNUSED_ENTRY(942), + ICE_PTT_UNUSED_ENTRY(943), + ICE_PTT_UNUSED_ENTRY(944), + ICE_PTT_UNUSED_ENTRY(945), + ICE_PTT_UNUSED_ENTRY(946), + ICE_PTT_UNUSED_ENTRY(947), + ICE_PTT_UNUSED_ENTRY(948), + ICE_PTT_UNUSED_ENTRY(949), + + ICE_PTT_UNUSED_ENTRY(950), + ICE_PTT_UNUSED_ENTRY(951), + ICE_PTT_UNUSED_ENTRY(952), + ICE_PTT_UNUSED_ENTRY(953), + ICE_PTT_UNUSED_ENTRY(954), + ICE_PTT_UNUSED_ENTRY(955), + ICE_PTT_UNUSED_ENTRY(956), + ICE_PTT_UNUSED_ENTRY(957), + ICE_PTT_UNUSED_ENTRY(958), + ICE_PTT_UNUSED_ENTRY(959), + + ICE_PTT_UNUSED_ENTRY(960), + ICE_PTT_UNUSED_ENTRY(961), + ICE_PTT_UNUSED_ENTRY(962), + ICE_PTT_UNUSED_ENTRY(963), + ICE_PTT_UNUSED_ENTRY(964), + ICE_PTT_UNUSED_ENTRY(965), + ICE_PTT_UNUSED_ENTRY(966), + ICE_PTT_UNUSED_ENTRY(967), + ICE_PTT_UNUSED_ENTRY(968), + ICE_PTT_UNUSED_ENTRY(969), + + ICE_PTT_UNUSED_ENTRY(970), + ICE_PTT_UNUSED_ENTRY(971), + ICE_PTT_UNUSED_ENTRY(972), + ICE_PTT_UNUSED_ENTRY(973), + ICE_PTT_UNUSED_ENTRY(974), + ICE_PTT_UNUSED_ENTRY(975), + ICE_PTT_UNUSED_ENTRY(976), + ICE_PTT_UNUSED_ENTRY(977), + ICE_PTT_UNUSED_ENTRY(978), + ICE_PTT_UNUSED_ENTRY(979), + + ICE_PTT_UNUSED_ENTRY(980), + ICE_PTT_UNUSED_ENTRY(981), + ICE_PTT_UNUSED_ENTRY(982), + ICE_PTT_UNUSED_ENTRY(983), + ICE_PTT_UNUSED_ENTRY(984), + ICE_PTT_UNUSED_ENTRY(985), + ICE_PTT_UNUSED_ENTRY(986), + ICE_PTT_UNUSED_ENTRY(987), + ICE_PTT_UNUSED_ENTRY(988), + ICE_PTT_UNUSED_ENTRY(989), + + ICE_PTT_UNUSED_ENTRY(990), + ICE_PTT_UNUSED_ENTRY(991), + ICE_PTT_UNUSED_ENTRY(992), + ICE_PTT_UNUSED_ENTRY(993), + ICE_PTT_UNUSED_ENTRY(994), + ICE_PTT_UNUSED_ENTRY(995), + ICE_PTT_UNUSED_ENTRY(996), + ICE_PTT_UNUSED_ENTRY(997), + ICE_PTT_UNUSED_ENTRY(998), + ICE_PTT_UNUSED_ENTRY(999), + + ICE_PTT_UNUSED_ENTRY(1000), + ICE_PTT_UNUSED_ENTRY(1001), + ICE_PTT_UNUSED_ENTRY(1002), + ICE_PTT_UNUSED_ENTRY(1003), + ICE_PTT_UNUSED_ENTRY(1004), + ICE_PTT_UNUSED_ENTRY(1005), + ICE_PTT_UNUSED_ENTRY(1006), + ICE_PTT_UNUSED_ENTRY(1007), + ICE_PTT_UNUSED_ENTRY(1008), + ICE_PTT_UNUSED_ENTRY(1009), + + ICE_PTT_UNUSED_ENTRY(1010), + ICE_PTT_UNUSED_ENTRY(1011), + ICE_PTT_UNUSED_ENTRY(1012), + ICE_PTT_UNUSED_ENTRY(1013), + ICE_PTT_UNUSED_ENTRY(1014), + ICE_PTT_UNUSED_ENTRY(1015), + ICE_PTT_UNUSED_ENTRY(1016), + ICE_PTT_UNUSED_ENTRY(1017), + ICE_PTT_UNUSED_ENTRY(1018), + ICE_PTT_UNUSED_ENTRY(1019), + + ICE_PTT_UNUSED_ENTRY(1020), + ICE_PTT_UNUSED_ENTRY(1021), + ICE_PTT_UNUSED_ENTRY(1022), + ICE_PTT_UNUSED_ENTRY(1023), +}; + +static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) +{ + return ice_ptype_lkup[ptype]; +} + +#define ICE_LINK_SPEED_UNKNOWN 0 +#define ICE_LINK_SPEED_10MBPS 10 +#define ICE_LINK_SPEED_100MBPS 100 +#define ICE_LINK_SPEED_1000MBPS 1000 +#define ICE_LINK_SPEED_2500MBPS 2500 +#define ICE_LINK_SPEED_5000MBPS 5000 +#define ICE_LINK_SPEED_10000MBPS 10000 +#define ICE_LINK_SPEED_20000MBPS 20000 +#define ICE_LINK_SPEED_25000MBPS 25000 +#define ICE_LINK_SPEED_40000MBPS 40000 +#define ICE_LINK_SPEED_50000MBPS 50000 +#define ICE_LINK_SPEED_100000MBPS 100000 + +#endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.c b/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.c new file mode 100644 index 000000000..d5e6215de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.c @@ -0,0 +1,802 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_common.h" + +/** + * ice_aq_read_nvm + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @read_shadow_ram: tell if this is a shadow RAM read + * @cd: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands (0x0701) + */ +static enum ice_status +ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, + void *data, bool last_command, bool read_shadow_ram, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd = &desc.params.nvm; + + if (offset > ICE_AQC_NVM_MAX_OFFSET) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); + + if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) + cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; + cmd->module_typeid = CPU_TO_LE16(module_typeid); + cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = CPU_TO_LE16(length); + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/** + * ice_read_flat_nvm - Read portion of NVM by flat offset + * @hw: pointer to the HW struct + * @offset: offset from beginning of NVM + * @length: (in) number of bytes to read; (out) number of bytes actually read + * @data: buffer to return data in (sized to fit the specified length) + * @read_shadow_ram: if true, read from shadow RAM instead of NVM + * + * Reads a portion of the NVM, as a flat memory space. This function correctly + * breaks read requests across Shadow RAM sectors and ensures that no single + * read request exceeds the maximum 4Kb read for a single AdminQ command. + * + * Returns a status code on failure. Note that the data pointer may be + * partially updated if some reads succeed before a failure. + */ +enum ice_status +ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + bool read_shadow_ram) +{ + enum ice_status status; + u32 inlen = *length; + u32 bytes_read = 0; + bool last_cmd; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + *length = 0; + + /* Verify the length of the read if this is for the Shadow RAM */ + if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { + ice_debug(hw, ICE_DBG_NVM, + "NVM error: requested data is beyond Shadow RAM limit\n"); + return ICE_ERR_PARAM; + } + + do { + u32 read_size, sector_offset; + + /* ice_aq_read_nvm cannot read more than 4Kb at a time. + * Additionally, a read from the Shadow RAM may not cross over + * a sector boundary. Conveniently, the sector size is also + * 4Kb. + */ + sector_offset = offset % ICE_AQ_MAX_BUF_LEN; + read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, + inlen - bytes_read); + + last_cmd = !(bytes_read + read_size < inlen); + + /* ice_aq_read_nvm takes the length as a u16. Our read_size is + * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum + * size guarantees that it will fit within the 2 bytes. + */ + status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram, NULL); + if (status) + break; + + bytes_read += read_size; + offset += read_size; + } while (!last_cmd); + + *length = bytes_read; + return status; +} + +/** + * ice_read_sr_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. + */ +static enum ice_status +ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) +{ + u32 bytes = sizeof(u16); + enum ice_status status; + __le16 data_local; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Note that ice_read_flat_nvm checks if the read is past the Shadow + * RAM size, and ensures we don't read across a Shadow RAM sector + * boundary + */ + status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, + (u8 *)&data_local, true); + if (status) + return status; + + *data = LE16_TO_CPU(data_local); + return ICE_SUCCESS; +} + +/** + * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is + * taken before reading the buffer and later released. + */ +static enum ice_status +ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) +{ + u32 bytes = *words * 2, i; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM + * sector restrictions necessary when reading from the NVM. + */ + status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); + + /* Report the number of words successfully read */ + *words = bytes / 2; + + /* Byte swap the words up to the amount we actually read */ + for (i = 0; i < *words; i++) + data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); + + return status; +} + +/** + * ice_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership. + */ +static enum ice_status +ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (hw->nvm.blank_nvm_mode) + return ICE_SUCCESS; + + return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); +} + +/** + * ice_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM ownership. + */ +static void ice_release_nvm(struct ice_hw *hw) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (hw->nvm.blank_nvm_mode) + return; + + ice_release_res(hw, ICE_NVM_RES_ID); +} + +/** + * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. + */ +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +{ + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (!status) { + status = ice_read_sr_word_aq(hw, offset, data); + ice_release_nvm(hw); + } + + return status; +} + +/** + * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA + * @hw: pointer to hardware structure + * @module_tlv: pointer to module TLV to return + * @module_tlv_len: pointer to module TLV length to return + * @module_type: module type requested + * + * Finds the requested sub module TLV type from the Preserved Field + * Area (PFA) and returns the TLV pointer and length. The caller can + * use these to read the variable length TLV value. + */ +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type) +{ + enum ice_status status; + u16 pfa_len, pfa_ptr; + u16 next_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); + return status; + } + status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); + return status; + } + /* Starting with first TLV after PFA length, iterate through the list + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; + while (next_tlv < pfa_ptr + pfa_len) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ + status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ + status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { + *module_tlv = next_tlv; + *module_tlv_len = tlv_len; + return ICE_SUCCESS; + } + return ICE_ERR_INVAL_SIZE; + } + /* Check next TLV, i.e. current TLV pointer + length + 2 words + * (for current TLV's type and length) + */ + next_tlv = next_tlv + tlv_len + 2; + } + /* Module does not exist */ + return ICE_ERR_DOES_NOT_EXIST; +} + +/** + * ice_get_orom_ver_info - Read Option ROM version information + * @hw: pointer to the HW struct + * + * Read the Combo Image version data from the Boot Configuration TLV and fill + * in the option ROM version data. + */ +static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) +{ + u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len; + struct ice_orom_info *orom = &hw->nvm.orom; + enum ice_status status; + u32 combo_ver; + + status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, + ICE_SR_BOOT_CFG_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read Boot Configuration Block TLV.\n"); + return status; + } + + /* Boot Configuration Block must have length at least 2 words + * (Combo Image Version High and Combo Image Version Low) + */ + if (boot_cfg_tlv_len < 2) { + ice_debug(hw, ICE_DBG_INIT, + "Invalid Boot Configuration Block TLV size.\n"); + return ICE_ERR_INVAL_SIZE; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF), + &combo_hi); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n"); + return status; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1), + &combo_lo); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n"); + return status; + } + + combo_ver = ((u32)combo_hi << 16) | combo_lo; + + orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> + ICE_OROM_VER_SHIFT); + orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); + orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> + ICE_OROM_VER_BUILD_SHIFT); + + return ICE_SUCCESS; +} + +/** + * ice_discover_flash_size - Discover the available flash size. + * @hw: pointer to the HW struct + * + * The device flash could be up to 16MB in size. However, it is possible that + * the actual size is smaller. Use bisection to determine the accessible size + * of flash memory. + */ +static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +{ + u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) + return status; + + while ((max_size - min_size) > 1) { + u32 offset = (max_size + min_size) / 2; + u32 len = 1; + u8 data; + + status = ice_read_flat_nvm(hw, offset, &len, &data, false); + if (status == ICE_ERR_AQ_ERROR && + hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { + ice_debug(hw, ICE_DBG_NVM, + "%s: New upper bound of %u bytes\n", + __func__, offset); + status = ICE_SUCCESS; + max_size = offset; + } else if (!status) { + ice_debug(hw, ICE_DBG_NVM, + "%s: New lower bound of %u bytes\n", + __func__, offset); + min_size = offset; + } else { + /* an unexpected error occurred */ + goto err_read_flat_nvm; + } + } + + ice_debug(hw, ICE_DBG_NVM, + "Predicted flash size is %u bytes\n", max_size); + + hw->nvm.flash_size = max_size; + +err_read_flat_nvm: + ice_release_nvm(hw); + + return status; +} + +/** + * ice_init_nvm - initializes NVM setting + * @hw: pointer to the HW struct + * + * This function reads and populates NVM settings such as Shadow RAM size, + * max_timeout, and blank_nvm_mode + */ +enum ice_status ice_init_nvm(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->nvm; + u16 eetrack_lo, eetrack_hi, ver; + enum ice_status status; + u32 fla, gens_stat; + u8 sr_size; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* The SR size is stored regardless of the NVM programming mode + * as the blank mode may be used in the factory line. + */ + gens_stat = rd32(hw, GLNVM_GENS); + sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; + + /* Switching to words (sr_size contains power of 2) */ + nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, GLNVM_FLA); + if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ + nvm->blank_nvm_mode = false; + } else { + /* Blank programming mode */ + nvm->blank_nvm_mode = true; + ice_debug(hw, ICE_DBG_NVM, + "NVM init error: unsupported blank mode.\n"); + return ICE_ERR_NVM_BLANK_MODE; + } + + status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read DEV starter version.\n"); + return status; + } + nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; + nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; + + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n"); + return status; + } + status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n"); + return status; + } + + nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; + + status = ice_discover_flash_size(hw); + if (status) { + ice_debug(hw, ICE_DBG_NVM, + "NVM init error: failed to discover flash size.\n"); + return status; + } + + switch (hw->device_id) { + /* the following devices do not have boot_cfg_tlv yet */ + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_SGMII: + return status; + default: + break; + } + + status = ice_get_orom_ver_info(hw); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); + return status; + } + + return ICE_SUCCESS; +} + +/** + * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq + * method. The buf read is preceded by the NVM ownership take + * and followed by the release. + */ +enum ice_status +ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) +{ + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (!status) { + status = ice_read_sr_buf_aq(hw, offset, words, data); + ice_release_nvm(hw); + } + + return status; +} + +/** + * ice_nvm_validate_checksum + * @hw: pointer to the HW struct + * + * Verify NVM PFA checksum validity (0x0706) + */ +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +{ + struct ice_aqc_nvm_checksum *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) + return status; + + cmd = &desc.params.nvm_checksum; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); + cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + ice_release_nvm(hw); + + if (!status) + if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) + status = ICE_ERR_NVM_CHECKSUM; + + return status; +} + +/** + * ice_nvm_access_get_features - Return the NVM access features structure + * @cmd: NVM access command to process + * @data: storage for the driver NVM features + * + * Fill in the data section of the NVM access request with a copy of the NVM + * features structure. + */ +enum ice_status +ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data) +{ + /* The provided data_size must be at least as large as our NVM + * features structure. A larger size should not be treated as an + * error, to allow future extensions to to the features structure to + * work on older drivers. + */ + if (cmd->data_size < sizeof(struct ice_nvm_features)) + return ICE_ERR_NO_MEMORY; + + /* Initialize the data buffer to zeros */ + ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); + + /* Fill in the features data */ + data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; + data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; + data->drv_features.size = sizeof(struct ice_nvm_features); + data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; + + return ICE_SUCCESS; +} + +/** + * ice_nvm_access_get_module - Helper function to read module value + * @cmd: NVM access command structure + * + * Reads the module value out of the NVM access config field. + */ +u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) +{ + return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); +} + +/** + * ice_nvm_access_get_flags - Helper function to read flags value + * @cmd: NVM access command structure + * + * Reads the flags value out of the NVM access config field. + */ +u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) +{ + return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); +} + +/** + * ice_nvm_access_get_adapter - Helper function to read adapter info + * @cmd: NVM access command structure + * + * Read the adapter info value out of the NVM access config field. + */ +u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) +{ + return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> + ICE_NVM_CFG_ADAPTER_INFO_S); +} + +/** + * ice_validate_nvm_rw_reg - Check than an NVM access request is valid + * @cmd: NVM access command structure + * + * Validates that an NVM access structure is request to read or write a valid + * register offset. First validates that the module and flags are correct, and + * then ensures that the register offset is one of the accepted registers. + */ +static enum ice_status +ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) +{ + u32 module, flags, offset; + u16 i; + + module = ice_nvm_access_get_module(cmd); + flags = ice_nvm_access_get_flags(cmd); + offset = cmd->offset; + + /* Make sure the module and flags indicate a read/write request */ + if (module != ICE_NVM_REG_RW_MODULE || + flags != ICE_NVM_REG_RW_FLAGS || + cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) + return ICE_ERR_PARAM; + + switch (offset) { + case GL_HICR: + case GL_HICR_EN: /* Note, this register is read only */ + case GL_FWSTS: + case GL_MNG_FWSM: + case GLGEN_CSR_DEBUG_C: + case GLGEN_RSTAT: + case GLPCI_LBARCTRL: + case GLNVM_GENS: + case GLNVM_FLA: + case PF_FUNC_RID: + return ICE_SUCCESS; + default: + break; + } + + for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) + if (offset == (u32)GL_HIDA(i)) + return ICE_SUCCESS; + + for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) + if (offset == (u32)GL_HIBA(i)) + return ICE_SUCCESS; + + /* All other register offsets are not valid */ + return ICE_ERR_OUT_OF_RANGE; +} + +/** + * ice_nvm_access_read - Handle an NVM read request + * @hw: pointer to the HW struct + * @cmd: NVM access command to process + * @data: storage for the register value read + * + * Process an NVM access request to read a register. + */ +enum ice_status +ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data) +{ + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Always initialize the output data, even on failure */ + ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); + + /* Make sure this is a valid read/write access request */ + status = ice_validate_nvm_rw_reg(cmd); + if (status) + return status; + + ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", + cmd->offset); + + /* Read the register and store the contents in the data field */ + data->regval = rd32(hw, cmd->offset); + + return ICE_SUCCESS; +} + +/** + * ice_nvm_access_write - Handle an NVM write request + * @hw: pointer to the HW struct + * @cmd: NVM access command to process + * @data: NVM access data to write + * + * Process an NVM access request to write a register. + */ +enum ice_status +ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data) +{ + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Make sure this is a valid read/write access request */ + status = ice_validate_nvm_rw_reg(cmd); + if (status) + return status; + + /* Reject requests to write to read-only registers */ + switch (cmd->offset) { + case GL_HICR_EN: + case GLGEN_RSTAT: + return ICE_ERR_OUT_OF_RANGE; + default: + break; + } + + ice_debug(hw, ICE_DBG_NVM, + "NVM access: writing register %08x with value %08x\n", + cmd->offset, data->regval); + + /* Write the data field to the specified register */ + wr32(hw, cmd->offset, data->regval); + + return ICE_SUCCESS; +} + +/** + * ice_handle_nvm_access - Handle an NVM access request + * @hw: pointer to the HW struct + * @cmd: NVM access command info + * @data: pointer to read or return data + * + * Process an NVM access request. Read the command structure information and + * determine if it is valid. If not, report an error indicating the command + * was invalid. + * + * For valid commands, perform the necessary function, copying the data into + * the provided data buffer. + */ +enum ice_status +ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data) +{ + u32 module, flags, adapter_info; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* Extended flags are currently reserved and must be zero */ + if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) + return ICE_ERR_PARAM; + + /* Adapter info must match the HW device ID */ + adapter_info = ice_nvm_access_get_adapter(cmd); + if (adapter_info != hw->device_id) + return ICE_ERR_PARAM; + + switch (cmd->command) { + case ICE_NVM_CMD_READ: + module = ice_nvm_access_get_module(cmd); + flags = ice_nvm_access_get_flags(cmd); + + /* Getting the driver's NVM features structure shares the same + * command type as reading a register. Read the config field + * to determine if this is a request to get features. + */ + if (module == ICE_NVM_GET_FEATURES_MODULE && + flags == ICE_NVM_GET_FEATURES_FLAGS && + cmd->offset == 0) + return ice_nvm_access_get_features(cmd, data); + else + return ice_nvm_access_read(hw, cmd, data); + case ICE_NVM_CMD_WRITE: + return ice_nvm_access_write(hw, cmd, data); + default: + return ICE_ERR_PARAM; + } +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.h b/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.h new file mode 100644 index 000000000..9a61d4153 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_nvm.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_NVM_H_ +#define _ICE_NVM_H_ + +#define ICE_NVM_CMD_READ 0x0000000B +#define ICE_NVM_CMD_WRITE 0x0000000C + +/* NVM Access config bits */ +#define ICE_NVM_CFG_MODULE_M MAKEMASK(0xFF, 0) +#define ICE_NVM_CFG_MODULE_S 0 +#define ICE_NVM_CFG_FLAGS_M MAKEMASK(0xF, 8) +#define ICE_NVM_CFG_FLAGS_S 8 +#define ICE_NVM_CFG_EXT_FLAGS_M MAKEMASK(0xF, 12) +#define ICE_NVM_CFG_EXT_FLAGS_S 12 +#define ICE_NVM_CFG_ADAPTER_INFO_M MAKEMASK(0xFFFF, 16) +#define ICE_NVM_CFG_ADAPTER_INFO_S 16 + +/* NVM Read Get Driver Features */ +#define ICE_NVM_GET_FEATURES_MODULE 0xE +#define ICE_NVM_GET_FEATURES_FLAGS 0xF + +/* NVM Read/Write Mapped Space */ +#define ICE_NVM_REG_RW_MODULE 0x0 +#define ICE_NVM_REG_RW_FLAGS 0x1 + +#define ICE_NVM_ACCESS_MAJOR_VER 0 +#define ICE_NVM_ACCESS_MINOR_VER 5 + +/* NVM Access feature flags. Other bits in the features field are reserved and + * should be set to zero when reporting the ice_nvm_features structure. + */ +#define ICE_NVM_FEATURES_0_REG_ACCESS BIT(1) + +/* NVM Access Features */ +struct ice_nvm_features { + u8 major; /* Major version (informational only) */ + u8 minor; /* Minor version (informational only) */ + u16 size; /* size of ice_nvm_features structure */ + u8 features[12]; /* Array of feature bits */ +}; + +/* NVM Access command */ +struct ice_nvm_access_cmd { + u32 command; /* NVM command: READ or WRITE */ + u32 config; /* NVM command configuration */ + u32 offset; /* offset to read/write, in bytes */ + u32 data_size; /* size of data field, in bytes */ +}; + +/* NVM Access data */ +union ice_nvm_access_data { + u32 regval; /* Storage for register value */ + struct ice_nvm_features drv_features; /* NVM features */ +}; + +/* NVM Access registers */ +#define GL_HIDA(_i) (0x00082000 + ((_i) * 4)) +#define GL_HIBA(_i) (0x00081000 + ((_i) * 4)) +#define GL_HICR 0x00082040 +#define GL_HICR_EN 0x00082044 +#define GLGEN_CSR_DEBUG_C 0x00075750 +#define GLPCI_LBARCTRL 0x0009DE74 +#define GLNVM_GENS 0x000B6100 +#define GLNVM_FLA 0x000B6108 + +#define ICE_NVM_ACCESS_GL_HIDA_MAX 15 +#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023 + +u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd); +u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd); +u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd); +enum ice_status +ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data); +enum ice_status +ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data); +enum ice_status +ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data); +enum ice_status +ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, + union ice_nvm_access_data *data); +enum ice_status +ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + bool read_shadow_ram); +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type); +enum ice_status ice_init_nvm(struct ice_hw *hw); +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +enum ice_status +ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data); +#endif /* _ICE_NVM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_osdep.h b/src/spdk/dpdk/drivers/net/ice/base/ice_osdep.h new file mode 100644 index 000000000..360e435b8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_osdep.h @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018-2020 Intel Corporation + */ + +#ifndef _ICE_OSDEP_H_ +#define _ICE_OSDEP_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ice_alloc.h" + +#include "../ice_logs.h" + +#ifndef __INTEL_NET_BASE_OSDEP__ +#define __INTEL_NET_BASE_OSDEP__ + +#define INLINE inline +#define STATIC static + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef uint64_t s64; + +#ifndef __le16 +#define __le16 uint16_t +#endif +#ifndef __le32 +#define __le32 uint32_t +#endif +#ifndef __le64 +#define __le64 uint64_t +#endif +#ifndef __be16 +#define __be16 uint16_t +#endif +#ifndef __be32 +#define __be32 uint32_t +#endif +#ifndef __be64 +#define __be64 uint64_t +#endif + +#define min(a, b) RTE_MIN(a, b) +#define max(a, b) RTE_MAX(a, b) + +#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f) +#define ARRAY_SIZE(arr) RTE_DIM(arr) + +#define CPU_TO_LE16(o) rte_cpu_to_le_16(o) +#define CPU_TO_LE32(s) rte_cpu_to_le_32(s) +#define CPU_TO_LE64(h) rte_cpu_to_le_64(h) +#define LE16_TO_CPU(a) rte_le_to_cpu_16(a) +#define LE32_TO_CPU(c) rte_le_to_cpu_32(c) +#define LE64_TO_CPU(k) rte_le_to_cpu_64(k) + +#define CPU_TO_BE16(o) rte_cpu_to_be_16(o) +#define CPU_TO_BE32(o) rte_cpu_to_be_32(o) +#define CPU_TO_BE64(o) rte_cpu_to_be_64(o) +#define BE16_TO_CPU(o) rte_be_to_cpu_16(o) + +#define NTOHS(a) rte_be_to_cpu_16(a) +#define NTOHL(a) rte_be_to_cpu_32(a) +#define HTONS(a) rte_cpu_to_be_16(a) +#define HTONL(a) rte_cpu_to_be_32(a) + +static __rte_always_inline uint32_t +readl(volatile void *addr) +{ + return rte_le_to_cpu_32(rte_read32(addr)); +} + +static __rte_always_inline void +writel(uint32_t value, volatile void *addr) +{ + rte_write32(rte_cpu_to_le_32(value), addr); +} + +static __rte_always_inline void +writel_relaxed(uint32_t value, volatile void *addr) +{ + rte_write32_relaxed(rte_cpu_to_le_32(value), addr); +} + +static __rte_always_inline uint64_t +readq(volatile void *addr) +{ + return rte_le_to_cpu_64(rte_read64(addr)); +} + +static __rte_always_inline void +writeq(uint64_t value, volatile void *addr) +{ + rte_write64(rte_cpu_to_le_64(value), addr); +} + +#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg)) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg)) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#endif /* __INTEL_NET_BASE_OSDEP__ */ + +#ifndef __always_unused +#define __always_unused __rte_unused +#endif +#ifndef __maybe_unused +#define __maybe_unused __rte_unused +#endif +#ifndef __packed +#define __packed __rte_packed +#endif + +#ifndef BIT_ULL +#define BIT_ULL(a) (1ULL << (a)) +#endif + +#define MAKEMASK(m, s) ((m) << (s)) + +#define ice_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + PMD_DRV_LOG_RAW(DEBUG, "ice %02x.%x " s, \ + (h)->bus.device, (h)->bus.func, \ + ##__VA_ARGS__); \ +} while (0) + +#define ice_info(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args) +#define ice_warn(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args) +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +do { \ + struct ice_hw *hw_l = hw; \ + u16 len_l = len; \ + u8 *buf_l = buf; \ + int i; \ + for (i = 0; i < len_l; i += 8) \ + ice_debug(hw_l, type, \ + "0x%04X 0x%016"PRIx64"\n", \ + i, *((u64 *)((buf_l) + i))); \ +} while (0) +#define ice_snprintf snprintf +#ifndef SNPRINTF +#define SNPRINTF ice_snprintf +#endif + +#define ICE_PCI_REG_WRITE(reg, value) writel(value, reg) + +#define ICE_READ_REG(hw, reg) rd32(hw, reg) +#define ICE_WRITE_REG(hw, reg, value) wr32(hw, reg, value) + +#define ice_flush(a) ICE_READ_REG((a), GLGEN_STAT) +#define icevf_flush(a) ICE_READ_REG((a), VFGEN_RSTAT) + +#define flush(a) ICE_READ_REG((a), GLGEN_STAT) +#define div64_long(n, d) ((n) / (d)) + +#define BITS_PER_BYTE 8 + +/* memory allocation tracking */ +struct ice_dma_mem { + void *va; + u64 pa; + u32 size; + const void *zone; +} __rte_packed; + +struct ice_virt_mem { + void *va; + u32 size; +} __rte_packed; + +#define ice_malloc(h, s) rte_zmalloc(NULL, s, 0) +#define ice_calloc(h, c, s) rte_zmalloc(NULL, (c) * (s), 0) +#define ice_free(h, m) rte_free(m) + +#define ice_memset(a, b, c, d) memset((a), (b), (c)) +#define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) + +/* SW spinlock */ +struct ice_lock { + rte_spinlock_t spinlock; +}; + +static inline void +ice_init_lock(struct ice_lock *sp) +{ + rte_spinlock_init(&sp->spinlock); +} + +static inline void +ice_acquire_lock(struct ice_lock *sp) +{ + rte_spinlock_lock(&sp->spinlock); +} + +static inline void +ice_release_lock(struct ice_lock *sp) +{ + rte_spinlock_unlock(&sp->spinlock); +} + +static inline void +ice_destroy_lock(__rte_unused struct ice_lock *sp) +{ +} + +struct ice_hw; + +static __rte_always_inline void * +ice_memdup(__rte_unused struct ice_hw *hw, const void *src, size_t size, + __rte_unused enum ice_memcpy_type dir) +{ + void *p; + + p = ice_malloc(hw, size); + if (p) + rte_memcpy(p, src, size); + + return p; +} + +static inline void * +ice_alloc_dma_mem(__rte_unused struct ice_hw *hw, + struct ice_dma_mem *mem, u64 size) +{ + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + + if (!mem) + return NULL; + + snprintf(z_name, sizeof(z_name), "ice_dma_%"PRIu64, rte_rand()); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, + 0, RTE_PGSIZE_2M); + if (!mz) + return NULL; + + mem->size = size; + mem->va = mz->addr; + mem->pa = mz->phys_addr; + mem->zone = (const void *)mz; + PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " + "%"PRIu64, mz->name, mem->pa); + + return mem->va; +} + +static inline void +ice_free_dma_mem(__rte_unused struct ice_hw *hw, + struct ice_dma_mem *mem) +{ + PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " + "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, + mem->pa); + rte_memzone_free((const struct rte_memzone *)mem->zone); + mem->zone = NULL; + mem->va = NULL; + mem->pa = (u64)0; +} + +static inline u8 +ice_hweight8(u32 num) +{ + u8 bits = 0; + u32 i; + + for (i = 0; i < 8; i++) { + bits += (u8)(num & 0x1); + num >>= 1; + } + + return bits; +} + +static inline u8 +ice_hweight32(u32 num) +{ + u8 bits = 0; + u32 i; + + for (i = 0; i < 32; i++) { + bits += (u8)(num & 0x1); + num >>= 1; + } + + return bits; +} + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DELAY(x) rte_delay_us(x) +#define ice_usec_delay(x, y) rte_delay_us(x) +#define ice_msec_delay(x, y) rte_delay_us(1000 * (x)) +#define udelay(x) DELAY(x) +#define msleep(x) DELAY(1000 * (x)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +struct ice_list_entry { + LIST_ENTRY(ice_list_entry) next; +}; + +LIST_HEAD(ice_list_head, ice_list_entry); + +#define LIST_ENTRY_TYPE ice_list_entry +#define LIST_HEAD_TYPE ice_list_head +#define INIT_LIST_HEAD(list_head) LIST_INIT(list_head) +#define LIST_DEL(entry) LIST_REMOVE(entry, next) +/* LIST_EMPTY(list_head)) the same in sys/queue.h */ + +/*Note parameters are swapped*/ +#define LIST_FIRST_ENTRY(head, type, field) (type *)((head)->lh_first) +#define LIST_NEXT_ENTRY(entry, type, field) \ + ((type *)(entry)->field.next.le_next) +#define LIST_ADD(entry, list_head) LIST_INSERT_HEAD(list_head, entry, next) +#define LIST_ADD_AFTER(entry, list_entry) \ + LIST_INSERT_AFTER(list_entry, entry, next) + +static inline void list_add_tail(struct ice_list_entry *entry, + struct ice_list_head *head) +{ + struct ice_list_entry *tail = head->lh_first; + + if (tail == NULL) { + LIST_INSERT_HEAD(head, entry, next); + return; + } + while (tail->next.le_next != NULL) + tail = tail->next.le_next; + LIST_INSERT_AFTER(tail, entry, next); +} + +#define LIST_ADD_TAIL(entry, head) list_add_tail(entry, head) +#define LIST_FOR_EACH_ENTRY(pos, head, type, member) \ + for ((pos) = (head)->lh_first ? \ + container_of((head)->lh_first, struct type, member) : \ + 0; \ + (pos); \ + (pos) = (pos)->member.next.le_next ? \ + container_of((pos)->member.next.le_next, struct type, \ + member) : \ + 0) + +#define LIST_FOR_EACH_ENTRY_SAFE(pos, tmp, head, type, member) \ + for ((pos) = (head)->lh_first ? \ + container_of((head)->lh_first, struct type, member) : \ + 0, \ + (tmp) = (pos) == 0 ? 0 : ((pos)->member.next.le_next ? \ + container_of((pos)->member.next.le_next, struct type, \ + member) : \ + 0); \ + (pos); \ + (pos) = (tmp), \ + (tmp) = (pos) == 0 ? 0 : ((tmp)->member.next.le_next ? \ + container_of((pos)->member.next.le_next, struct type, \ + member) : \ + 0)) + +#define LIST_REPLACE_INIT(list_head, head) do { \ + (head)->lh_first = (list_head)->lh_first; \ + INIT_LIST_HEAD(list_head); \ +} while (0) + +#define HLIST_NODE_TYPE LIST_ENTRY_TYPE +#define HLIST_HEAD_TYPE LIST_HEAD_TYPE +#define INIT_HLIST_HEAD(list_head) INIT_LIST_HEAD(list_head) +#define HLIST_ADD_HEAD(entry, list_head) LIST_ADD(entry, list_head) +#define HLIST_EMPTY(list_head) LIST_EMPTY(list_head) +#define HLIST_DEL(entry) LIST_DEL(entry) +#define HLIST_FOR_EACH_ENTRY(pos, head, type, member) \ + LIST_FOR_EACH_ENTRY(pos, head, type, member) + +#ifndef ICE_DBG_TRACE +#define ICE_DBG_TRACE BIT_ULL(0) +#endif + +#ifndef DIVIDE_AND_ROUND_UP +#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) +#endif + +#ifndef ICE_INTEL_VENDOR_ID +#define ICE_INTEL_VENDOR_ID 0x8086 +#endif + +#ifndef IS_UNICAST_ETHER_ADDR +#define IS_UNICAST_ETHER_ADDR(addr) \ + ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0)) +#endif + +#ifndef IS_MULTICAST_ETHER_ADDR +#define IS_MULTICAST_ETHER_ADDR(addr) \ + ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1)) +#endif + +#ifndef IS_BROADCAST_ETHER_ADDR +/* Check whether an address is broadcast. */ +#define IS_BROADCAST_ETHER_ADDR(addr) \ + ((bool)((((u16 *)(addr))[0] == ((u16)0xffff)))) +#endif + +#ifndef IS_ZERO_ETHER_ADDR +#define IS_ZERO_ETHER_ADDR(addr) \ + (((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \ + ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \ + ((bool)((((u16 *)(addr))[2] == ((u16)0x0))))) +#endif + +#endif /* _ICE_OSDEP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_protocol_type.h b/src/spdk/dpdk/drivers/net/ice/base/ice_protocol_type.h new file mode 100644 index 000000000..b75a340aa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_protocol_type.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_PROTOCOL_TYPE_H_ +#define _ICE_PROTOCOL_TYPE_H_ +#include "ice_flex_type.h" +#define ICE_IPV6_ADDR_LENGTH 16 + +/* Each recipe can match up to 5 different fields. Fields to match can be meta- + * data, values extracted from packet headers, or results from other recipes. + * One of the 5 fields is reserved for matching the switch ID. So, up to 4 + * recipes can provide intermediate results to another one through chaining, + * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + */ +#define ICE_NUM_WORDS_RECIPE 4 + +/* Max recipes that can be chained */ +#define ICE_MAX_CHAIN_RECIPE 5 + +/* 1 word reserved for switch ID from allowed 5 words. + * So a recipe can have max 4 words. And you can chain 5 such recipes + * together. So maximum words that can be programmed for look up is 5 * 4. + */ +#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) + +/* Field vector index corresponding to chaining */ +#define ICE_CHAIN_FV_INDEX_START 47 + +enum ice_protocol_type { + ICE_MAC_OFOS = 0, + ICE_MAC_IL, + ICE_ETYPE_OL, + ICE_VLAN_OFOS, + ICE_IPV4_OFOS, + ICE_IPV4_IL, + ICE_IPV6_OFOS, + ICE_IPV6_IL, + ICE_TCP_IL, + ICE_UDP_OF, + ICE_UDP_ILOS, + ICE_SCTP_IL, + ICE_VXLAN, + ICE_GENEVE, + ICE_VXLAN_GPE, + ICE_NVGRE, + ICE_GTP, + ICE_PPPOE, + ICE_PFCP, + ICE_L2TPV3, + ICE_ESP, + ICE_AH, + ICE_NAT_T, + ICE_PROTOCOL_LAST +}; + +enum ice_sw_tunnel_type { + ICE_NON_TUN = 0, + ICE_SW_TUN_AND_NON_TUN, + ICE_SW_TUN_VXLAN_GPE, + ICE_SW_TUN_GENEVE, + ICE_SW_TUN_VXLAN, + ICE_SW_TUN_NVGRE, + ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN + * and GENEVE + */ + ICE_SW_TUN_GTP, + ICE_SW_TUN_PPPOE, + ICE_SW_TUN_IPV4_ESP, + ICE_SW_TUN_IPV6_ESP, + ICE_SW_TUN_IPV4_AH, + ICE_SW_TUN_IPV6_AH, + ICE_SW_TUN_IPV4_NAT_T, + ICE_SW_TUN_IPV6_NAT_T, + ICE_SW_TUN_IPV4_L2TPV3, + ICE_SW_TUN_IPV6_L2TPV3, + ICE_SW_TUN_PROFID_IPV6_ESP, + ICE_SW_TUN_PROFID_IPV6_AH, + ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3, + ICE_SW_TUN_PROFID_IPV6_NAT_T, + ICE_SW_TUN_PROFID_IPV4_PFCP_NODE, + ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION, + ICE_SW_TUN_PROFID_IPV6_PFCP_NODE, + ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION, + ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ +}; + +/* Decoders for ice_prot_id: + * - F: First + * - I: Inner + * - L: Last + * - O: Outer + * - S: Single + */ +enum ice_prot_id { + ICE_PROT_ID_INVAL = 0, + ICE_PROT_MAC_OF_OR_S = 1, + ICE_PROT_MAC_O2 = 2, + ICE_PROT_MAC_IL = 4, + ICE_PROT_MAC_IN_MAC = 7, + ICE_PROT_ETYPE_OL = 9, + ICE_PROT_ETYPE_IL = 10, + ICE_PROT_PAY = 15, + ICE_PROT_EVLAN_O = 16, + ICE_PROT_VLAN_O = 17, + ICE_PROT_VLAN_IF = 18, + ICE_PROT_MPLS_OL_MINUS_1 = 27, + ICE_PROT_MPLS_OL_OR_OS = 28, + ICE_PROT_MPLS_IL = 29, + ICE_PROT_IPV4_OF_OR_S = 32, + ICE_PROT_IPV4_IL = 33, + ICE_PROT_IPV6_OF_OR_S = 40, + ICE_PROT_IPV6_IL = 41, + ICE_PROT_IPV6_FRAG = 47, + ICE_PROT_TCP_IL = 49, + ICE_PROT_UDP_OF = 52, + ICE_PROT_UDP_IL_OR_S = 53, + ICE_PROT_GRE_OF = 64, + ICE_PROT_NSH_F = 84, + ICE_PROT_ESP_F = 88, + ICE_PROT_ESP_2 = 89, + ICE_PROT_SCTP_IL = 96, + ICE_PROT_ICMP_IL = 98, + ICE_PROT_ICMPV6_IL = 100, + ICE_PROT_VRRP_F = 101, + ICE_PROT_OSPF = 102, + ICE_PROT_PPPOE = 103, + ICE_PROT_L2TPV3 = 104, + ICE_PROT_ATAOE_OF = 114, + ICE_PROT_CTRL_OF = 116, + ICE_PROT_LLDP_OF = 117, + ICE_PROT_ARP_OF = 118, + ICE_PROT_EAPOL_OF = 120, + ICE_PROT_META_ID = 255, /* when offset == metaddata */ + ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ +}; + +#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ + +#define ICE_MAC_OFOS_HW 1 +#define ICE_MAC_IL_HW 4 +#define ICE_ETYPE_OL_HW 9 +#define ICE_VLAN_OL_HW 17 +#define ICE_IPV4_OFOS_HW 32 +#define ICE_IPV4_IL_HW 33 +#define ICE_IPV6_OFOS_HW 40 +#define ICE_IPV6_IL_HW 41 +#define ICE_TCP_IL_HW 49 +#define ICE_UDP_ILOS_HW 53 +#define ICE_ESP_HW 88 +#define ICE_AH_HW 89 +#define ICE_SCTP_IL_HW 96 +#define ICE_PPPOE_HW 103 +#define ICE_L2TPV3_HW 104 + +/* ICE_UDP_OF is used to identify all 3 tunnel types + * VXLAN, GENEVE and VXLAN_GPE. To differentiate further + * need to use flags from the field vector + */ +#define ICE_UDP_OF_HW 52 /* UDP Tunnels */ +#define ICE_GRE_OF_HW 64 /* NVGRE */ +#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ + +#define ICE_MDID_SIZE 2 +#define ICE_TUN_FLAG_MDID 21 +#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) +#define ICE_TUN_FLAG_MASK 0xFF +#define ICE_TUN_FLAG_FV_IND 2 + +#define ICE_PROTOCOL_MAX_ENTRIES 16 + +/* Mapping of software defined protocol ID to hardware defined protocol ID */ +struct ice_protocol_entry { + enum ice_protocol_type type; + u8 protocol_id; +}; + +struct ice_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; +}; + +struct ice_ethtype_hdr { + __be16 ethtype_id; +}; + +struct ice_ether_vlan_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 vlan_id; +}; + +struct ice_vlan_hdr { + __be16 vlan; + __be16 type; +}; + +struct ice_ipv4_hdr { + u8 version; + u8 tos; + __be16 total_length; + __be16 id; + __be16 frag_off; + u8 time_to_live; + u8 protocol; + __be16 check; + __be32 src_addr; + __be32 dst_addr; +}; + +struct ice_le_ver_tc_flow { + union { + struct { + u32 flow_label : 20; + u32 tc : 8; + u32 version : 4; + } fld; + u32 val; + } u; +}; + +struct ice_ipv6_hdr { + __be32 be_ver_tc_flow; + __be16 payload_len; + u8 next_hdr; + u8 hop_limit; + u8 src_addr[ICE_IPV6_ADDR_LENGTH]; + u8 dst_addr[ICE_IPV6_ADDR_LENGTH]; +}; + +struct ice_sctp_hdr { + __be16 src_port; + __be16 dst_port; + __be32 verification_tag; + __be32 check; +}; + +struct ice_l4_hdr { + __be16 src_port; + __be16 dst_port; + __be16 len; + __be16 check; +}; + +struct ice_udp_tnl_hdr { + __be16 field; + __be16 proto_type; + __be32 vni; /* only use lower 24-bits */ +}; + +struct ice_udp_gtp_hdr { + u8 flags; + u8 msg_type; + __be16 rsrvd_len; + __be32 teid; + __be16 rsrvd_seq_nbr; + u8 rsrvd_n_pdu_nbr; + u8 rsrvd_next_ext; + u8 rsvrd_ext_len; + u8 pdu_type; + u8 qfi; + u8 rsvrd; +}; + +struct ice_pppoe_hdr { + u8 rsrvd_ver_type; + u8 rsrvd_code; + __be16 session_id; + __be16 length; + __be16 ppp_prot_id; /* control and data only */ +}; + +struct ice_pfcp_hdr { + u8 flags; + u8 msg_type; + __be16 length; + __be64 seid; + __be32 seq; + u8 spare; +}; + +struct ice_l2tpv3_sess_hdr { + __be32 session_id; + __be64 cookie; +}; + +struct ice_esp_hdr { + __be32 spi; + __be32 seq; +}; + +struct ice_ah_hdr { + u8 next_hdr; + u8 paylen; + __be16 rsrvd; + __be32 spi; + __be32 seq; +}; + +struct ice_nat_t_hdr { + struct ice_esp_hdr esp; +}; + +struct ice_nvgre { + __be16 flags; + __be16 protocol; + __be32 tni_flow; +}; + +union ice_prot_hdr { + struct ice_ether_hdr eth_hdr; + struct ice_ethtype_hdr ethertype; + struct ice_vlan_hdr vlan_hdr; + struct ice_ipv4_hdr ipv4_hdr; + struct ice_ipv6_hdr ipv6_hdr; + struct ice_l4_hdr l4_hdr; + struct ice_sctp_hdr sctp_hdr; + struct ice_udp_tnl_hdr tnl_hdr; + struct ice_nvgre nvgre_hdr; + struct ice_udp_gtp_hdr gtp_hdr; + struct ice_pppoe_hdr pppoe_hdr; + struct ice_pfcp_hdr pfcp_hdr; + struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; + struct ice_esp_hdr esp_hdr; + struct ice_ah_hdr ah_hdr; + struct ice_nat_t_hdr nat_t_hdr; +}; + +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for e.g. dst address is 3 words in ethertype header and corresponding bytes + * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + */ +struct ice_prot_ext_tbl_entry { + enum ice_protocol_type prot_type; + /* Byte offset into header of given protocol type */ + u8 offs[sizeof(union ice_prot_hdr)]; +}; + +/* Extractions to be looked up for a given recipe */ +struct ice_prot_lkup_ext { + u16 prot_type; + u8 n_val_words; + /* create a buffer to hold max words per recipe */ + u16 field_off[ICE_MAX_CHAIN_WORDS]; + u16 field_mask[ICE_MAX_CHAIN_WORDS]; + + struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; + + /* Indicate field offsets that have field vector indices assigned */ + ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS); +}; + +struct ice_pref_recipe_group { + u8 n_val_pairs; /* Number of valid pairs */ + struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; + u16 mask[ICE_NUM_WORDS_RECIPE]; +}; + +struct ice_recp_grp_entry { + struct LIST_ENTRY_TYPE l_entry; + +#define ICE_INVAL_CHAIN_IND 0xFF + u16 rid; + u8 chain_idx; + u16 fv_idx[ICE_NUM_WORDS_RECIPE]; + u16 fv_mask[ICE_NUM_WORDS_RECIPE]; + struct ice_pref_recipe_group r_group; +}; +#endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_sbq_cmd.h b/src/spdk/dpdk/drivers/net/ice/base/ice_sbq_cmd.h new file mode 100644 index 000000000..22bfcebc3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_sbq_cmd.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_SBQ_CMD_H_ +#define _ICE_SBQ_CMD_H_ + +/* This header file defines the Sideband Queue commands, error codes and + * descriptor format. It is shared between Firmware and Software. + */ + +/* Sideband Queue command structure and opcodes */ +enum ice_sbq_opc { + /* Sideband Queue commands */ + ice_sbq_opc_neigh_dev_req = 0x0C00, + ice_sbq_opc_neigh_dev_ev = 0x0C01 +}; + +/* Sideband Queue descriptor. Indirect command + * and non posted + */ +struct ice_sbq_cmd_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 cmd_retval; + + /* Opaque message data */ + __le32 cookie_high; + __le32 cookie_low; + + union { + __le16 cmd_len; + __le16 cmpl_len; + } param0; + + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_sbq_evt_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 cmd_retval; + u8 data[24]; +}; + +enum ice_sbq_msg_dev { + rmn_0 = 0x02, + rmn_1 = 0x03, + rmn_2 = 0x04, + cgu = 0x06 +}; + +enum ice_sbq_msg_opcode { + ice_sbq_msg_rd = 0x00, + ice_sbq_msg_wr = 0x01 +}; + +#define ICE_SBQ_MSG_FLAGS 0x40 +#define ICE_SBQ_MSG_SBE_FBE 0x0F + +struct ice_sbq_msg_req { + u8 dest_dev; + u8 src_dev; + u8 opcode; + u8 flags; + u8 sbe_fbe; + u8 func_id; + __le16 msg_addr_low; + __le32 msg_addr_high; + __le32 data; +}; + +struct ice_sbq_msg_cmpl { + u8 dest_dev; + u8 src_dev; + u8 opcode; + u8 flags; + __le32 data; +}; + +/* Internal struct */ +struct ice_sbq_msg_input { + u8 dest_dev; + u8 opcode; + u16 msg_addr_low; + u32 msg_addr_high; + u32 data; +}; +#endif /* _ICE_SBQ_CMD_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_sched.c b/src/spdk/dpdk/drivers/net/ice/base/ice_sched.c new file mode 100644 index 000000000..af42fadfe --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_sched.c @@ -0,0 +1,5513 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_sched.h" + +/** + * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB + * @pi: port information structure + * @info: Scheduler element information from firmware + * + * This function inserts the root node of the scheduling tree topology + * to the SW DB. + */ +static enum ice_status +ice_sched_add_root_node(struct ice_port_info *pi, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_sched_node *root; + struct ice_hw *hw; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); + if (!root) + return ICE_ERR_NO_MEMORY; + + /* coverity[suspicious_sizeof] */ + root->children = (struct ice_sched_node **) + ice_calloc(hw, hw->max_children[0], sizeof(*root)); + if (!root->children) { + ice_free(hw, root); + return ICE_ERR_NO_MEMORY; + } + + ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); + pi->root = root; + return ICE_SUCCESS; +} + +/** + * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB + * @start_node: pointer to the starting ice_sched_node struct in a sub-tree + * @teid: node TEID to search + * + * This function searches for a node matching the TEID in the scheduling tree + * from the SW DB. The search is recursive and is restricted by the number of + * layers it has searched through; stopping at the max supported layer. + * + * This function needs to be called when holding the port_info->sched_lock + */ +struct ice_sched_node * +ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) +{ + u16 i; + + /* The TEID is same as that of the start_node */ + if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) + return start_node; + + /* The node has no children or is at the max layer */ + if (!start_node->num_children || + start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || + start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) + return NULL; + + /* Check if TEID matches to any of the children nodes */ + for (i = 0; i < start_node->num_children; i++) + if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) + return start_node->children[i]; + + /* Search within each child's sub-tree */ + for (i = 0; i < start_node->num_children; i++) { + struct ice_sched_node *tmp; + + tmp = ice_sched_find_node_by_teid(start_node->children[i], + teid); + if (tmp) + return tmp; + } + + return NULL; +} + +/** + * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd + * @hw: pointer to the HW struct + * @cmd_opc: cmd opcode + * @elems_req: number of elements to request + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_resp: returns total number of elements response + * @cd: pointer to command details structure or NULL + * + * This function sends a scheduling elements cmd (cmd_opc) + */ +static enum ice_status +ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, + u16 elems_req, void *buf, u16 buf_size, + u16 *elems_resp, struct ice_sq_cd *cd) +{ + struct ice_aqc_sched_elem_cmd *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.sched_elem_cmd; + ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); + cmd->num_elem_req = CPU_TO_LE16(elems_req); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && elems_resp) + *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); + + return status; +} + +/** + * ice_aq_query_sched_elems - query scheduler elements + * @hw: pointer to the HW struct + * @elems_req: number of elements to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements returned + * @cd: pointer to command details structure or NULL + * + * Query scheduling elements (0x0404) + */ +enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); +} + +/** + * ice_sched_add_node - Insert the Tx scheduler node in SW DB + * @pi: port information structure + * @layer: Scheduler layer of the node + * @info: Scheduler element information from firmware + * + * This function inserts a scheduler node to the SW DB. + */ +enum ice_status +ice_sched_add_node(struct ice_port_info *pi, u8 layer, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_sched_node *parent; + struct ice_aqc_get_elem elem; + struct ice_sched_node *node; + enum ice_status status; + struct ice_hw *hw; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + /* A valid parent node should be there */ + parent = ice_sched_find_node_by_teid(pi->root, + LE32_TO_CPU(info->parent_teid)); + if (!parent) { + ice_debug(hw, ICE_DBG_SCHED, + "Parent Node not found for parent_teid=0x%x\n", + LE32_TO_CPU(info->parent_teid)); + return ICE_ERR_PARAM; + } + + /* query the current node information from FW before additing it + * to the SW DB + */ + status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); + if (status) + return status; + node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); + if (!node) + return ICE_ERR_NO_MEMORY; + if (hw->max_children[layer]) { + /* coverity[suspicious_sizeof] */ + node->children = (struct ice_sched_node **) + ice_calloc(hw, hw->max_children[layer], sizeof(*node)); + if (!node->children) { + ice_free(hw, node); + return ICE_ERR_NO_MEMORY; + } + } + + node->in_use = true; + node->parent = parent; + node->tx_sched_layer = layer; + parent->children[parent->num_children++] = node; + node->info = elem.generic[0]; + return ICE_SUCCESS; +} + +/** + * ice_aq_delete_sched_elems - delete scheduler elements + * @hw: pointer to the HW struct + * @grps_req: number of groups to delete + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @grps_del: returns total number of elements deleted + * @cd: pointer to command details structure or NULL + * + * Delete scheduling elements (0x040F) + */ +static enum ice_status +ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_delete_elem *buf, u16 buf_size, + u16 *grps_del, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, + grps_req, (void *)buf, buf_size, + grps_del, cd); +} + +/** + * ice_sched_remove_elems - remove nodes from HW + * @hw: pointer to the HW struct + * @parent: pointer to the parent node + * @num_nodes: number of nodes + * @node_teids: array of node teids to be deleted + * + * This function remove nodes from HW + */ +static enum ice_status +ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, + u16 num_nodes, u32 *node_teids) +{ + struct ice_aqc_delete_elem *buf; + u16 i, num_groups_removed = 0; + enum ice_status status; + u16 buf_size; + + buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1); + buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->hdr.parent_teid = parent->info.node_teid; + buf->hdr.num_elems = CPU_TO_LE16(num_nodes); + for (i = 0; i < num_nodes; i++) + buf->teid[i] = CPU_TO_LE32(node_teids[i]); + + status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, + &num_groups_removed, NULL); + if (status != ICE_SUCCESS || num_groups_removed != 1) + ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", + hw->adminq.sq_last_status); + + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_get_first_node - get the first node of the given layer + * @pi: port information structure + * @parent: pointer the base node of the subtree + * @layer: layer number + * + * This function retrieves the first node of the given layer from the subtree + */ +static struct ice_sched_node * +ice_sched_get_first_node(struct ice_port_info *pi, + struct ice_sched_node *parent, u8 layer) +{ + return pi->sib_head[parent->tc_num][layer]; +} + +/** + * ice_sched_get_tc_node - get pointer to TC node + * @pi: port information structure + * @tc: TC number + * + * This function returns the TC node pointer + */ +struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) +{ + u8 i; + + if (!pi || !pi->root) + return NULL; + for (i = 0; i < pi->root->num_children; i++) + if (pi->root->children[i]->tc_num == tc) + return pi->root->children[i]; + return NULL; +} + +/** + * ice_free_sched_node - Free a Tx scheduler node from SW DB + * @pi: port information structure + * @node: pointer to the ice_sched_node struct + * + * This function frees up a node from SW DB as well as from HW + * + * This function needs to be called with the port_info->sched_lock held + */ +void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) +{ + struct ice_sched_node *parent; + struct ice_hw *hw = pi->hw; + u8 i, j; + + /* Free the children before freeing up the parent node + * The parent array is updated below and that shifts the nodes + * in the array. So always pick the first child if num children > 0 + */ + while (node->num_children) + ice_free_sched_node(pi, node->children[0]); + + /* Leaf, TC and root nodes can't be deleted by SW */ + if (node->tx_sched_layer >= hw->sw_entry_point_layer && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { + u32 teid = LE32_TO_CPU(node->info.node_teid); + + ice_sched_remove_elems(hw, node->parent, 1, &teid); + } + parent = node->parent; + /* root has no parent */ + if (parent) { + struct ice_sched_node *p; + + /* update the parent */ + for (i = 0; i < parent->num_children; i++) + if (parent->children[i] == node) { + for (j = i + 1; j < parent->num_children; j++) + parent->children[j - 1] = + parent->children[j]; + parent->num_children--; + break; + } + + p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); + while (p) { + if (p->sibling == node) { + p->sibling = node->sibling; + break; + } + p = p->sibling; + } + + /* update the sibling head if head is getting removed */ + if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) + pi->sib_head[node->tc_num][node->tx_sched_layer] = + node->sibling; + } + + /* leaf nodes have no children */ + if (node->children) + ice_free(hw, node->children); + ice_free(hw, node); +} + +/** + * ice_aq_get_dflt_topo - gets default scheduler topology + * @hw: pointer to the HW struct + * @lport: logical port number + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_branches: returns total number of queue to port branches + * @cd: pointer to command details structure or NULL + * + * Get default scheduler topology (0x400) + */ +static enum ice_status +ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, + struct ice_aqc_get_topo_elem *buf, u16 buf_size, + u8 *num_branches, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_topo *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_topo; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); + cmd->port_num = lport; + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && num_branches) + *num_branches = cmd->num_branches; + + return status; +} + +/** + * ice_aq_add_sched_elems - adds scheduling element + * @hw: pointer to the HW struct + * @grps_req: the number of groups that are requested to be added + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @grps_added: returns total number of groups added + * @cd: pointer to command details structure or NULL + * + * Add scheduling elements (0x0401) + */ +static enum ice_status +ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_add_elem *buf, u16 buf_size, + u16 *grps_added, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, + grps_req, (void *)buf, buf_size, + grps_added, cd); +} + +/** + * ice_aq_cfg_sched_elems - configures scheduler elements + * @hw: pointer to the HW struct + * @elems_req: number of elements to configure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_cfgd: returns total number of elements configured + * @cd: pointer to command details structure or NULL + * + * Configure scheduling elements (0x0403) + */ +static enum ice_status +ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_conf_elem *buf, u16 buf_size, + u16 *elems_cfgd, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, + elems_req, (void *)buf, buf_size, + elems_cfgd, cd); +} + +/** + * ice_aq_move_sched_elems - move scheduler elements + * @hw: pointer to the HW struct + * @grps_req: number of groups to move + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @grps_movd: returns total number of groups moved + * @cd: pointer to command details structure or NULL + * + * Move scheduling elements (0x0408) + */ +static enum ice_status +ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_move_elem *buf, u16 buf_size, + u16 *grps_movd, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, + grps_req, (void *)buf, buf_size, + grps_movd, cd); +} + +/** + * ice_aq_suspend_sched_elems - suspend scheduler elements + * @hw: pointer to the HW struct + * @elems_req: number of elements to suspend + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements suspended + * @cd: pointer to command details structure or NULL + * + * Suspend scheduling elements (0x0409) + */ +static enum ice_status +ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_suspend_resume_elem *buf, + u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); +} + +/** + * ice_aq_resume_sched_elems - resume scheduler elements + * @hw: pointer to the HW struct + * @elems_req: number of elements to resume + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements resumed + * @cd: pointer to command details structure or NULL + * + * resume scheduling elements (0x040A) + */ +static enum ice_status +ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_suspend_resume_elem *buf, + u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); +} + +/** + * ice_aq_query_sched_res - query scheduler resource + * @hw: pointer to the HW struct + * @buf_size: buffer size in bytes + * @buf: pointer to buffer + * @cd: pointer to command details structure or NULL + * + * Query scheduler resource allocation (0x0412) + */ +static enum ice_status +ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, + struct ice_aqc_query_txsched_res_resp *buf, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_sched_suspend_resume_elems - suspend or resume HW nodes + * @hw: pointer to the HW struct + * @num_nodes: number of nodes + * @node_teids: array of node teids to be suspended or resumed + * @suspend: true means suspend / false means resume + * + * This function suspends or resumes HW nodes + */ +static enum ice_status +ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, + bool suspend) +{ + struct ice_aqc_suspend_resume_elem *buf; + u16 i, buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf) * num_nodes; + buf = (struct ice_aqc_suspend_resume_elem *) + ice_malloc(hw, buf_size); + if (!buf) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < num_nodes; i++) + buf->teid[i] = CPU_TO_LE32(node_teids[i]); + + if (suspend) + status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, + buf_size, &num_elem_ret, + NULL); + else + status = ice_aq_resume_sched_elems(hw, num_nodes, buf, + buf_size, &num_elem_ret, + NULL); + if (status != ICE_SUCCESS || num_elem_ret != num_nodes) + ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); + + ice_free(hw, buf); + return status; +} + +/** + * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * @tc: TC number + * @new_numqs: number of queues + */ +static enum ice_status +ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) +{ + struct ice_vsi_ctx *vsi_ctx; + struct ice_q_ctx *q_ctx; + + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + /* allocate LAN queue contexts */ + if (!vsi_ctx->lan_q_ctx[tc]) { + vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) + ice_calloc(hw, new_numqs, sizeof(*q_ctx)); + if (!vsi_ctx->lan_q_ctx[tc]) + return ICE_ERR_NO_MEMORY; + vsi_ctx->num_lan_q_entries[tc] = new_numqs; + return ICE_SUCCESS; + } + /* num queues are increased, update the queue contexts */ + if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { + u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; + + q_ctx = (struct ice_q_ctx *) + ice_calloc(hw, new_numqs, sizeof(*q_ctx)); + if (!q_ctx) + return ICE_ERR_NO_MEMORY; + ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], + prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); + ice_free(hw, vsi_ctx->lan_q_ctx[tc]); + vsi_ctx->lan_q_ctx[tc] = q_ctx; + vsi_ctx->num_lan_q_entries[tc] = new_numqs; + } + return ICE_SUCCESS; +} + +/** + * ice_aq_rl_profile - performs a rate limiting task + * @hw: pointer to the HW struct + * @opcode:opcode for add, query, or remove profile(s) + * @num_profiles: the number of profiles + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_processed: number of processed add or remove profile(s) to return + * @cd: pointer to command details structure + * + * Rl profile function to add, query, or remove profile(s) + */ +static enum ice_status +ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, + u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) +{ + struct ice_aqc_rl_profile *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.rl_profile; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + cmd->num_profiles = CPU_TO_LE16(num_profiles); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && num_processed) + *num_processed = LE16_TO_CPU(cmd->num_processed); + return status; +} + +/** + * ice_aq_add_rl_profile - adds rate limiting profile(s) + * @hw: pointer to the HW struct + * @num_profiles: the number of profile(s) to be add + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_profiles_added: total number of profiles added to return + * @cd: pointer to command details structure + * + * Add RL profile (0x0410) + */ +static enum ice_status +ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_profiles_added, + struct ice_sq_cd *cd) +{ + return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, + num_profiles, buf, + buf_size, num_profiles_added, cd); +} + +/** + * ice_aq_query_rl_profile - query rate limiting profile(s) + * @hw: pointer to the HW struct + * @num_profiles: the number of profile(s) to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure + * + * Query RL profile (0x0411) + */ +enum ice_status +ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, + num_profiles, buf, buf_size, NULL, cd); +} + +/** + * ice_aq_remove_rl_profile - removes RL profile(s) + * @hw: pointer to the HW struct + * @num_profiles: the number of profile(s) to remove + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_profiles_removed: total number of profiles removed to return + * @cd: pointer to command details structure or NULL + * + * Remove RL profile (0x0415) + */ +static enum ice_status +ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_profiles_removed, + struct ice_sq_cd *cd) +{ + return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, + num_profiles, buf, + buf_size, num_profiles_removed, cd); +} + +/** + * ice_sched_del_rl_profile - remove RL profile + * @hw: pointer to the HW struct + * @rl_info: rate limit profile information + * + * If the profile ID is not referenced anymore, it removes profile ID with + * its associated parameters from HW DB,and locally. The caller needs to + * hold scheduler lock. + */ +static enum ice_status +ice_sched_del_rl_profile(struct ice_hw *hw, + struct ice_aqc_rl_profile_info *rl_info) +{ + struct ice_aqc_rl_profile_generic_elem *buf; + u16 num_profiles_removed; + enum ice_status status; + u16 num_profiles = 1; + + if (rl_info->prof_id_ref != 0) + return ICE_ERR_IN_USE; + + /* Safe to remove profile ID */ + buf = (struct ice_aqc_rl_profile_generic_elem *) + &rl_info->profile; + status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), + &num_profiles_removed, NULL); + if (status || num_profiles_removed != num_profiles) + return ICE_ERR_CFG; + + /* Delete stale entry now */ + LIST_DEL(&rl_info->list_entry); + ice_free(hw, rl_info); + return status; +} + +/** + * ice_sched_clear_rl_prof - clears RL prof entries + * @pi: port information structure + * + * This function removes all RL profile from HW as well as from SW DB. + */ +static void ice_sched_clear_rl_prof(struct ice_port_info *pi) +{ + u16 ln; + + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { + struct ice_aqc_rl_profile_info *rl_prof_elem; + struct ice_aqc_rl_profile_info *rl_prof_tmp; + + LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, + &pi->rl_prof_list[ln], + ice_aqc_rl_profile_info, list_entry) { + struct ice_hw *hw = pi->hw; + enum ice_status status; + + rl_prof_elem->prof_id_ref = 0; + status = ice_sched_del_rl_profile(hw, rl_prof_elem); + if (status) { + ice_debug(hw, ICE_DBG_SCHED, + "Remove rl profile failed\n"); + /* On error, free mem required */ + LIST_DEL(&rl_prof_elem->list_entry); + ice_free(hw, rl_prof_elem); + } + } + } +} + +/** + * ice_sched_clear_agg - clears the aggregator related information + * @hw: pointer to the hardware structure + * + * This function removes aggregator list and free up aggregator related memory + * previously allocated. + */ +void ice_sched_clear_agg(struct ice_hw *hw) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_agg_info *atmp; + + LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, + ice_sched_agg_info, + list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *vtmp; + + LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, + &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) { + LIST_DEL(&agg_vsi_info->list_entry); + ice_free(hw, agg_vsi_info); + } + LIST_DEL(&agg_info->list_entry); + ice_free(hw, agg_info); + } +} + +/** + * ice_sched_clear_tx_topo - clears the schduler tree nodes + * @pi: port information structure + * + * This function removes all the nodes from HW as well as from SW DB. + */ +static void ice_sched_clear_tx_topo(struct ice_port_info *pi) +{ + if (!pi) + return; + /* remove RL profiles related lists */ + ice_sched_clear_rl_prof(pi); + if (pi->root) { + ice_free_sched_node(pi, pi->root); + pi->root = NULL; + } +} + +/** + * ice_sched_clear_port - clear the scheduler elements from SW DB for a port + * @pi: port information structure + * + * Cleanup scheduling elements from SW DB + */ +void ice_sched_clear_port(struct ice_port_info *pi) +{ + if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) + return; + + pi->port_state = ICE_SCHED_PORT_STATE_INIT; + ice_acquire_lock(&pi->sched_lock); + ice_sched_clear_tx_topo(pi); + ice_release_lock(&pi->sched_lock); + ice_destroy_lock(&pi->sched_lock); +} + +/** + * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports + * @hw: pointer to the HW struct + * + * Cleanup scheduling elements from SW DB for all the ports + */ +void ice_sched_cleanup_all(struct ice_hw *hw) +{ + if (!hw) + return; + + if (hw->layer_info) { + ice_free(hw, hw->layer_info); + hw->layer_info = NULL; + } + + ice_sched_clear_port(hw->port_info); + + hw->num_tx_sched_layers = 0; + hw->num_tx_sched_phys_layers = 0; + hw->flattened_layers = 0; + hw->max_cgds = 0; +} + +/** + * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping + * @hw: pointer to the HW struct + * @num_l2_nodes: the number of L2 nodes whose CGDs to configure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * Configure L2 Node CGD (0x0414) + */ +enum ice_status +ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, + struct ice_aqc_cfg_l2_node_cgd_data *buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aqc_cfg_l2_node_cgd *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.cfg_l2_node_cgd; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_sched_add_elems - add nodes to HW and SW DB + * @pi: port information structure + * @tc_node: pointer to the branch node + * @parent: pointer to the parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes + * @num_nodes_added: pointer to num nodes added + * @first_node_teid: if new nodes are added then return the TEID of first node + * + * This function add nodes to HW as well as to SW DB for a given layer + */ +static enum ice_status +ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, u16 num_nodes, + u16 *num_nodes_added, u32 *first_node_teid) +{ + struct ice_sched_node *prev, *new_node; + struct ice_aqc_add_elem *buf; + u16 i, num_groups_added = 0; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u16 buf_size; + u32 teid; + + buf_size = ice_struct_size(buf, generic, num_nodes - 1); + buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->hdr.parent_teid = parent->info.node_teid; + buf->hdr.num_elems = CPU_TO_LE16(num_nodes); + for (i = 0; i < num_nodes; i++) { + buf->generic[i].parent_teid = parent->info.node_teid; + buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; + buf->generic[i].data.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->generic[i].data.generic = 0; + buf->generic[i].data.cir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.cir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); + buf->generic[i].data.eir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.eir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); + } + + status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, + &num_groups_added, NULL); + if (status != ICE_SUCCESS || num_groups_added != 1) { + ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", + hw->adminq.sq_last_status); + ice_free(hw, buf); + return ICE_ERR_CFG; + } + + *num_nodes_added = num_nodes; + /* add nodes to the SW DB */ + for (i = 0; i < num_nodes; i++) { + status = ice_sched_add_node(pi, layer, &buf->generic[i]); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_SCHED, + "add nodes in SW DB failed status =%d\n", + status); + break; + } + + teid = LE32_TO_CPU(buf->generic[i].node_teid); + new_node = ice_sched_find_node_by_teid(parent, teid); + if (!new_node) { + ice_debug(hw, ICE_DBG_SCHED, + "Node is missing for teid =%d\n", teid); + break; + } + + new_node->sibling = NULL; + new_node->tc_num = tc_node->tc_num; + + /* add it to previous node sibling pointer */ + /* Note: siblings are not linked across branches */ + prev = ice_sched_get_first_node(pi, tc_node, layer); + if (prev && prev != new_node) { + while (prev->sibling) + prev = prev->sibling; + prev->sibling = new_node; + } + + /* initialize the sibling head */ + if (!pi->sib_head[tc_node->tc_num][layer]) + pi->sib_head[tc_node->tc_num][layer] = new_node; + + if (i == 0) + *first_node_teid = teid; + } + + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * @pi: port information structure + * @tc_node: pointer to TC node + * @parent: pointer to parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes to be added + * @first_node_teid: pointer to the first node TEID + * @num_nodes_added: pointer to number of nodes added + * + * This function add nodes to a given layer. + */ +static enum ice_status +ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) +{ + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes, max_child_nodes; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u16 num_added = 0; + u32 temp; + + *num_nodes_added = 0; + + if (!num_nodes) + return status; + + if (!parent || layer < hw->sw_entry_point_layer) + return ICE_ERR_PARAM; + + /* max children per node per layer */ + max_child_nodes = hw->max_children[parent->tx_sched_layer]; + + /* current number of children + required nodes exceed max children ? */ + if ((parent->num_children + num_nodes) > max_child_nodes) { + /* Fail if the parent is a TC node */ + if (parent == tc_node) + return ICE_ERR_CFG; + + /* utilize all the spaces if the parent is not full */ + if (parent->num_children < max_child_nodes) { + new_num_nodes = max_child_nodes - parent->num_children; + /* this recursion is intentional, and wouldn't + * go more than 2 calls + */ + status = ice_sched_add_nodes_to_layer(pi, tc_node, + parent, layer, + new_num_nodes, + first_node_teid, + &num_added); + if (status != ICE_SUCCESS) + return status; + + *num_nodes_added += num_added; + } + /* Don't modify the first node TEID memory if the first node was + * added already in the above call. Instead send some temp + * memory for all other recursive calls. + */ + if (num_added) + first_teid_ptr = &temp; + + new_num_nodes = num_nodes - num_added; + + /* This parent is full, try the next sibling */ + parent = parent->sibling; + + /* this recursion is intentional, for 1024 queues + * per VSI, it goes max of 16 iterations. + * 1024 / 8 = 128 layer 8 nodes + * 128 /8 = 16 (add 8 nodes per iteration) + */ + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, + layer, new_num_nodes, + first_teid_ptr, + &num_added); + *num_nodes_added += num_added; + return status; + } + + status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, + num_nodes_added, first_node_teid); + return status; +} + +/** + * ice_sched_get_qgrp_layer - get the current queue group layer number + * @hw: pointer to the HW struct + * + * This function returns the current queue group layer number + */ +static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) +{ + /* It's always total layers - 1, the array is 0 relative so -2 */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; +} + +/** + * ice_sched_get_vsi_layer - get the current VSI layer number + * @hw: pointer to the HW struct + * + * This function returns the current VSI layer number + */ +static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) +{ + /* Num Layers VSI layer + * 9 6 + * 7 4 + * 5 or less sw_entry_point_layer + */ + /* calculate the VSI layer based on number of layers. */ + if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { + u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + + if (layer > hw->sw_entry_point_layer) + return layer; + } + return hw->sw_entry_point_layer; +} + +/** + * ice_sched_get_agg_layer - get the current aggregator layer number + * @hw: pointer to the HW struct + * + * This function returns the current aggregator layer number + */ +static u8 ice_sched_get_agg_layer(struct ice_hw *hw) +{ + /* Num Layers aggregator layer + * 9 4 + * 7 or less sw_entry_point_layer + */ + /* calculate the aggregator layer based on number of layers. */ + if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { + u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; + + if (layer > hw->sw_entry_point_layer) + return layer; + } + return hw->sw_entry_point_layer; +} + +/** + * ice_rm_dflt_leaf_node - remove the default leaf node in the tree + * @pi: port information structure + * + * This function removes the leaf node that was created by the FW + * during initialization + */ +static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) +{ + struct ice_sched_node *node; + + node = pi->root; + while (node) { + if (!node->num_children) + break; + node = node->children[0]; + } + if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { + u32 teid = LE32_TO_CPU(node->info.node_teid); + enum ice_status status; + + /* remove the default leaf node */ + status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); + if (!status) + ice_free_sched_node(pi, node); + } +} + +/** + * ice_sched_rm_dflt_nodes - free the default nodes in the tree + * @pi: port information structure + * + * This function frees all the nodes except root and TC that were created by + * the FW during initialization + */ +static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) +{ + struct ice_sched_node *node; + + ice_rm_dflt_leaf_node(pi); + + /* remove the default nodes except TC and root nodes */ + node = pi->root; + while (node) { + if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && + node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { + ice_free_sched_node(pi, node); + break; + } + + if (!node->num_children) + break; + node = node->children[0]; + } +} + +/** + * ice_sched_init_port - Initialize scheduler by querying information from FW + * @pi: port info structure for the tree to cleanup + * + * This function is the initial call to find the total number of Tx scheduler + * resources, default topology created by firmware and storing the information + * in SW DB. + */ +enum ice_status ice_sched_init_port(struct ice_port_info *pi) +{ + struct ice_aqc_get_topo_elem *buf; + enum ice_status status; + struct ice_hw *hw; + u8 num_branches; + u16 num_elems; + u8 i, j; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + + /* Query the Default Topology from FW */ + buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, + ICE_AQ_MAX_BUF_LEN); + if (!buf) + return ICE_ERR_NO_MEMORY; + + /* Query default scheduling tree topology */ + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, + &num_branches, NULL); + if (status) + goto err_init_port; + + /* num_branches should be between 1-8 */ + if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { + ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", + num_branches); + status = ICE_ERR_PARAM; + goto err_init_port; + } + + /* get the number of elements on the default/first branch */ + num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); + + /* num_elems should always be between 1-9 */ + if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { + ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", + num_elems); + status = ICE_ERR_PARAM; + goto err_init_port; + } + + /* If the last node is a leaf node then the index of the queue group + * layer is two less than the number of elements. + */ + if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == + ICE_AQC_ELEM_TYPE_LEAF) + pi->last_node_teid = + LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); + else + pi->last_node_teid = + LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); + + /* Insert the Tx Sched root node */ + status = ice_sched_add_root_node(pi, &buf[0].generic[0]); + if (status) + goto err_init_port; + + /* Parse the default tree and cache the information */ + for (i = 0; i < num_branches; i++) { + num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); + + /* Skip root element as already inserted */ + for (j = 1; j < num_elems; j++) { + /* update the sw entry point */ + if (buf[0].generic[j].data.elem_type == + ICE_AQC_ELEM_TYPE_ENTRY_POINT) + hw->sw_entry_point_layer = j; + + status = ice_sched_add_node(pi, j, &buf[i].generic[j]); + if (status) + goto err_init_port; + } + } + + /* Remove the default nodes. */ + if (pi->root) + ice_sched_rm_dflt_nodes(pi); + + /* initialize the port for handling the scheduler tree */ + pi->port_state = ICE_SCHED_PORT_STATE_READY; + ice_init_lock(&pi->sched_lock); + for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) + INIT_LIST_HEAD(&pi->rl_prof_list[i]); + +err_init_port: + if (status && pi->root) { + ice_free_sched_node(pi, pi->root); + pi->root = NULL; + } + + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_get_node - Get the struct ice_sched_node for given TEID + * @pi: port information structure + * @teid: Scheduler node TEID + * + * This function retrieves the ice_sched_node struct for given TEID from + * the SW DB and returns it to the caller. + */ +struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) +{ + struct ice_sched_node *node; + + if (!pi) + return NULL; + + /* Find the node starting from root */ + ice_acquire_lock(&pi->sched_lock); + node = ice_sched_find_node_by_teid(pi->root, teid); + ice_release_lock(&pi->sched_lock); + + if (!node) + ice_debug(pi->hw, ICE_DBG_SCHED, + "Node not found for teid=0x%x\n", teid); + + return node; +} + +/** + * ice_sched_query_res_alloc - query the FW for num of logical sched layers + * @hw: pointer to the HW struct + * + * query FW for allocated scheduler resources and store in HW struct + */ +enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +{ + struct ice_aqc_query_txsched_res_resp *buf; + enum ice_status status = ICE_SUCCESS; + __le16 max_sibl; + u8 i; + + if (hw->layer_info) + return status; + + buf = (struct ice_aqc_query_txsched_res_resp *) + ice_malloc(hw, sizeof(*buf)); + if (!buf) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); + if (status) + goto sched_query_out; + + hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels); + hw->num_tx_sched_phys_layers = + LE16_TO_CPU(buf->sched_props.phys_levels); + hw->flattened_layers = buf->sched_props.flattening_bitmap; + hw->max_cgds = buf->sched_props.max_pf_cgds; + + /* max sibling group size of current layer refers to the max children + * of the below layer node. + * layer 1 node max children will be layer 2 max sibling group size + * layer 2 node max children will be layer 3 max sibling group size + * and so on. This array will be populated from root (index 0) to + * qgroup layer 7. Leaf node has no children. + */ + for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { + max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; + hw->max_children[i] = LE16_TO_CPU(max_sibl); + } + + hw->layer_info = (struct ice_aqc_layer_props *) + ice_memdup(hw, buf->layer_props, + (hw->num_tx_sched_layers * + sizeof(*hw->layer_info)), + ICE_DMA_TO_DMA); + if (!hw->layer_info) { + status = ICE_ERR_NO_MEMORY; + goto sched_query_out; + } + +sched_query_out: + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_get_psm_clk_freq - determine the PSM clock frequency + * @hw: pointer to the HW struct + * + * Determine the PSM clock frequency and store in HW struct + */ +void ice_sched_get_psm_clk_freq(struct ice_hw *hw) +{ + u32 val, clk_src; + + val = rd32(hw, GLGEN_CLKSTAT_SRC); + clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> + GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; + +#define PSM_CLK_SRC_367_MHZ 0x0 +#define PSM_CLK_SRC_416_MHZ 0x1 +#define PSM_CLK_SRC_446_MHZ 0x2 +#define PSM_CLK_SRC_390_MHZ 0x3 + + switch (clk_src) { + case PSM_CLK_SRC_367_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; + break; + case PSM_CLK_SRC_416_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; + break; + case PSM_CLK_SRC_446_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; + break; + case PSM_CLK_SRC_390_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; + break; + default: + ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", + clk_src); + /* fall back to a safe default */ + hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; + } +} + +/** + * ice_sched_find_node_in_subtree - Find node in part of base node subtree + * @hw: pointer to the HW struct + * @base: pointer to the base node + * @node: pointer to the node to search + * + * This function checks whether a given node is part of the base node + * subtree or not + */ +bool +ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, + struct ice_sched_node *node) +{ + u8 i; + + for (i = 0; i < base->num_children; i++) { + struct ice_sched_node *child = base->children[i]; + + if (node == child) + return true; + + if (child->tx_sched_layer > node->tx_sched_layer) + return false; + + /* this recursion is intentional, and wouldn't + * go more than 8 calls + */ + if (ice_sched_find_node_in_subtree(hw, child, node)) + return true; + } + return false; +} + +/** + * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: branch number + * @owner: LAN or RDMA + * + * This function retrieves a free LAN or RDMA queue group node + */ +struct ice_sched_node * +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u8 owner) +{ + struct ice_sched_node *vsi_node, *qgrp_node = NULL; + struct ice_vsi_ctx *vsi_ctx; + u16 max_children; + u8 qgrp_layer; + + qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + max_children = pi->hw->max_children[qgrp_layer]; + + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return NULL; + vsi_node = vsi_ctx->sched.vsi_node[tc]; + /* validate invalid VSI ID */ + if (!vsi_node) + goto lan_q_exit; + + /* get the first queue group node from VSI sub-tree */ + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); + while (qgrp_node) { + /* make sure the qgroup node is part of the VSI subtree */ + if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) + if (qgrp_node->num_children < max_children && + qgrp_node->owner == owner) + break; + qgrp_node = qgrp_node->sibling; + } + +lan_q_exit: + return qgrp_node; +} + +/** + * ice_sched_get_vsi_node - Get a VSI node based on VSI ID + * @pi: pointer to the port information structure + * @tc_node: pointer to the TC node + * @vsi_handle: software VSI handle + * + * This function retrieves a VSI node for a given VSI ID from a given + * TC branch + */ +struct ice_sched_node * +ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, + u16 vsi_handle) +{ + struct ice_sched_node *node; + u8 vsi_layer; + + vsi_layer = ice_sched_get_vsi_layer(pi->hw); + node = ice_sched_get_first_node(pi, tc_node, vsi_layer); + + /* Check whether it already exists */ + while (node) { + if (node->vsi_handle == vsi_handle) + return node; + node = node->sibling; + } + + return node; +} + +/** + * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID + * @pi: pointer to the port information structure + * @tc_node: pointer to the TC node + * @agg_id: aggregator ID + * + * This function retrieves an aggregator node for a given aggregator ID from + * a given TC branch + */ +static struct ice_sched_node * +ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, + u32 agg_id) +{ + struct ice_sched_node *node; + struct ice_hw *hw = pi->hw; + u8 agg_layer; + + if (!hw) + return NULL; + agg_layer = ice_sched_get_agg_layer(hw); + node = ice_sched_get_first_node(pi, tc_node, agg_layer); + + /* Check whether it already exists */ + while (node) { + if (node->agg_id == agg_id) + return node; + node = node->sibling; + } + + return node; +} + +/** + * ice_sched_check_node - Compare node parameters between SW DB and HW DB + * @hw: pointer to the HW struct + * @node: pointer to the ice_sched_node struct + * + * This function queries and compares the HW element with SW DB node parameters + */ +static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) +{ + struct ice_aqc_get_elem buf; + enum ice_status status; + u32 node_teid; + + node_teid = LE32_TO_CPU(node->info.node_teid); + status = ice_sched_query_elem(hw, node_teid, &buf); + if (status != ICE_SUCCESS) + return false; + + if (memcmp(buf.generic, &node->info, sizeof(*buf.generic))) { + ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", + node_teid); + return false; + } + + return true; +} + +/** + * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes + * @hw: pointer to the HW struct + * @num_qs: number of queues + * @num_nodes: num nodes array + * + * This function calculates the number of VSI child nodes based on the + * number of queues. + */ +static void +ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) +{ + u16 num = num_qs; + u8 i, qgl, vsil; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + + /* calculate num nodes from queue group to VSI layer */ + for (i = qgl; i > vsil; i--) { + /* round to the next integer if there is a remainder */ + num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); + + /* need at least one node */ + num_nodes[i] = num ? num : 1; + } +} + +/** + * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_node: pointer to the TC node + * @num_nodes: pointer to the num nodes that needs to be added per layer + * @owner: node owner (LAN or RDMA) + * + * This function adds the VSI child nodes to tree. It gets called for + * LAN and RDMA separately. + */ +static enum ice_status +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, + struct ice_sched_node *tc_node, u16 *num_nodes, + u8 owner) +{ + struct ice_sched_node *parent, *node; + struct ice_hw *hw = pi->hw; + enum ice_status status; + u32 first_node_teid; + u16 num_added = 0; + u8 i, qgl, vsil; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + for (i = vsil + 1; i <= qgl; i++) { + if (!parent) + return ICE_ERR_CFG; + + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, + num_nodes[i], + &first_node_teid, + &num_added); + if (status != ICE_SUCCESS || num_nodes[i] != num_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_added) { + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + node = parent; + while (node) { + node->owner = owner; + node = node->sibling; + } + } else { + parent = parent->children[0]; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes + * @pi: pointer to the port info structure + * @tc_node: pointer to TC node + * @num_nodes: pointer to num nodes array + * + * This function calculates the number of supported nodes needed to add this + * VSI into Tx tree including the VSI, parent and intermediate nodes in below + * layers + */ +static void +ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, + struct ice_sched_node *tc_node, u16 *num_nodes) +{ + struct ice_sched_node *node; + u8 vsil; + int i; + + vsil = ice_sched_get_vsi_layer(pi->hw); + for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) + /* Add intermediate nodes if TC has no children and + * need at least one node for VSI + */ + if (!tc_node->num_children || i == vsil) { + num_nodes[i]++; + } else { + /* If intermediate nodes are reached max children + * then add a new one. + */ + node = ice_sched_get_first_node(pi, tc_node, (u8)i); + /* scan all the siblings */ + while (node) { + if (node->num_children < + pi->hw->max_children[i]) + break; + node = node->sibling; + } + + /* tree has one intermediate node to add this new VSI. + * So no need to calculate supported nodes for below + * layers. + */ + if (node) + break; + /* all the nodes are full, allocate a new one */ + num_nodes[i]++; + } +} + +/** + * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc_node: pointer to TC node + * @num_nodes: pointer to num nodes array + * + * This function adds the VSI supported nodes into Tx tree including the + * VSI, its parent and intermediate nodes in below layers + */ +static enum ice_status +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, + struct ice_sched_node *tc_node, u16 *num_nodes) +{ + struct ice_sched_node *parent = tc_node; + enum ice_status status; + u32 first_node_teid; + u16 num_added = 0; + u8 i, vsil; + + if (!pi) + return ICE_ERR_PARAM; + + vsil = ice_sched_get_vsi_layer(pi->hw); + for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, + i, num_nodes[i], + &first_node_teid, + &num_added); + if (status != ICE_SUCCESS || num_nodes[i] != num_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_added) + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + else + parent = parent->children[0]; + + if (!parent) + return ICE_ERR_CFG; + + if (i == vsil) + parent->vsi_handle = vsi_handle; + } + + return ICE_SUCCESS; +} + +/** + * ice_sched_add_vsi_to_topo - add a new VSI into tree + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * + * This function adds a new VSI into scheduler tree + */ +static enum ice_status +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) +{ + u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + struct ice_sched_node *tc_node; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_PARAM; + + /* calculate number of supported nodes needed for this VSI */ + ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); + + /* add VSI supported nodes to TC subtree */ + return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, + num_nodes); +} + +/** + * ice_sched_update_vsi_child_nodes - update VSI child nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @new_numqs: new number of max queues + * @owner: owner of this subtree + * + * This function updates the VSI child nodes based on the number of queues + */ +static enum ice_status +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, + u8 tc, u16 new_numqs, u8 owner) +{ + u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + struct ice_sched_node *vsi_node; + struct ice_sched_node *tc_node; + struct ice_vsi_ctx *vsi_ctx; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u16 prev_numqs; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_CFG; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + return ICE_ERR_CFG; + + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + + prev_numqs = vsi_ctx->sched.max_lanq[tc]; + /* num queues are not changed or less than the previous number */ + if (new_numqs <= prev_numqs) + return status; + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); + if (status) + return status; + + if (new_numqs) + ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); + /* Keep the max number of queue configuration all the time. Update the + * tree only if number of queues > previous number of queues. This may + * leave some extra nodes in the tree if number of queues < previous + * number but that wouldn't harm anything. Removing those extra nodes + * may complicate the code if those nodes are part of SRL or + * individually rate limited. + */ + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, + new_num_nodes, owner); + if (status) + return status; + vsi_ctx->sched.max_lanq[tc] = new_numqs; + + return ICE_SUCCESS; +} + +/** + * ice_sched_cfg_vsi - configure the new/existing VSI + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: TC number + * @maxqs: max number of queues + * @owner: LAN or RDMA + * @enable: TC enabled or disabled + * + * This function adds/updates VSI nodes based on the number of queues. If TC is + * enabled and VSI is in suspended state then resume the VSI back. If TC is + * disabled then suspend the VSI if it is not already. + */ +enum ice_status +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, + u8 owner, bool enable) +{ + struct ice_sched_node *vsi_node, *tc_node; + struct ice_vsi_ctx *vsi_ctx; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + + ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_PARAM; + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + + /* suspend the VSI if TC is not enabled */ + if (!enable) { + if (vsi_node && vsi_node->in_use) { + u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); + + status = ice_sched_suspend_resume_elems(hw, 1, &teid, + true); + if (!status) + vsi_node->in_use = false; + } + return status; + } + + /* TC is enabled, if it is a new VSI then add it to the tree */ + if (!vsi_node) { + status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); + if (status) + return status; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + return ICE_ERR_CFG; + + vsi_ctx->sched.vsi_node[tc] = vsi_node; + vsi_node->in_use = true; + /* invalidate the max queues whenever VSI gets added first time + * into the scheduler tree (boot or after reset). We need to + * recreate the child nodes all the time in these cases. + */ + vsi_ctx->sched.max_lanq[tc] = 0; + } + + /* update the VSI child nodes */ + status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, + owner); + if (status) + return status; + + /* TC is enabled, resume the VSI if it is in the suspend state */ + if (!vsi_node->in_use) { + u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); + + status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); + if (!status) + vsi_node->in_use = true; + } + + return status; +} + +/** + * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function removes single aggregator VSI info entry from + * aggregator list. + */ +static void +ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_agg_info *atmp; + + LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, + ice_sched_agg_info, + list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *vtmp; + + LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, + &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) + if (agg_vsi_info->vsi_handle == vsi_handle) { + LIST_DEL(&agg_vsi_info->list_entry); + ice_free(pi->hw, agg_vsi_info); + return; + } + } +} + +/** + * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree + * @node: pointer to the sub-tree node + * + * This function checks for a leaf node presence in a given sub-tree node. + */ +static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) +{ + u8 i; + + for (i = 0; i < node->num_children; i++) + if (ice_sched_is_leaf_node_present(node->children[i])) + return true; + /* check for a leaf node */ + return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); +} + +/** + * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * @owner: LAN or RDMA + * + * This function removes the VSI and its LAN or RDMA children nodes from the + * scheduler tree. + */ +static enum ice_status +ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_vsi_ctx *vsi_ctx; + u8 i; + + ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return status; + ice_acquire_lock(&pi->sched_lock); + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + goto exit_sched_rm_vsi_cfg; + + ice_for_each_traffic_class(i) { + struct ice_sched_node *vsi_node, *tc_node; + u8 j = 0; + + tc_node = ice_sched_get_tc_node(pi, i); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + continue; + + if (ice_sched_is_leaf_node_present(vsi_node)) { + ice_debug(pi->hw, ICE_DBG_SCHED, + "VSI has leaf nodes in TC %d\n", i); + status = ICE_ERR_IN_USE; + goto exit_sched_rm_vsi_cfg; + } + while (j < vsi_node->num_children) { + if (vsi_node->children[j]->owner == owner) { + ice_free_sched_node(pi, vsi_node->children[j]); + + /* reset the counter again since the num + * children will be updated after node removal + */ + j = 0; + } else { + j++; + } + } + /* remove the VSI if it has no children */ + if (!vsi_node->num_children) { + ice_free_sched_node(pi, vsi_node); + vsi_ctx->sched.vsi_node[i] = NULL; + + /* clean up aggregator related VSI info if any */ + ice_sched_rm_agg_vsi_info(pi, vsi_handle); + } + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi_ctx->sched.max_lanq[i] = 0; + } + status = ICE_SUCCESS; + +exit_sched_rm_vsi_cfg: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function clears the VSI and its LAN children nodes from scheduler tree + * for all TCs. + */ +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +{ + return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); +} + +/** + * ice_sched_is_tree_balanced - Check tree nodes are identical or not + * @hw: pointer to the HW struct + * @node: pointer to the ice_sched_node struct + * + * This function compares all the nodes for a given tree against HW DB nodes + * This function needs to be called with the port_info->sched_lock held + */ +bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) +{ + u8 i; + + /* start from the leaf node */ + for (i = 0; i < node->num_children; i++) + /* Fail if node doesn't match with the SW DB + * this recursion is intentional, and wouldn't + * go more than 9 calls + */ + if (!ice_sched_is_tree_balanced(hw, node->children[i])) + return false; + + return ice_sched_check_node(hw, node); +} + +/** + * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID + * @hw: pointer to the HW struct + * @node_teid: node TEID + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * This function retrieves the tree topology from the firmware for a given + * node TEID to the root node. + */ +enum ice_status +ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_query_node_to_root *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.query_node_to_root; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); + cmd->teid = CPU_TO_LE32(node_teid); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_get_agg_info - get the aggregator ID + * @hw: pointer to the hardware structure + * @agg_id: aggregator ID + * + * This function validates aggregator ID. The function returns info if + * aggregator ID is present in list otherwise it returns null. + */ +static struct ice_sched_agg_info* +ice_get_agg_info(struct ice_hw *hw, u32 agg_id) +{ + struct ice_sched_agg_info *agg_info; + + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) + if (agg_info->agg_id == agg_id) + return agg_info; + + return NULL; +} + +/** + * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree + * @hw: pointer to the HW struct + * @node: pointer to a child node + * @num_nodes: num nodes count array + * + * This function walks through the aggregator subtree to find a free parent + * node + */ +static struct ice_sched_node * +ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, + u16 *num_nodes) +{ + u8 l = node->tx_sched_layer; + u8 vsil, i; + + vsil = ice_sched_get_vsi_layer(hw); + + /* Is it VSI parent layer ? */ + if (l == vsil - 1) + return (node->num_children < hw->max_children[l]) ? node : NULL; + + /* We have intermediate nodes. Let's walk through the subtree. If the + * intermediate node has space to add a new node then clear the count + */ + if (node->num_children < hw->max_children[l]) + num_nodes[l] = 0; + /* The below recursive call is intentional and wouldn't go more than + * 2 or 3 iterations. + */ + + for (i = 0; i < node->num_children; i++) { + struct ice_sched_node *parent; + + parent = ice_sched_get_free_vsi_parent(hw, node->children[i], + num_nodes); + if (parent) + return parent; + } + + return NULL; +} + +/** + * ice_sched_update_parent - update the new parent in SW DB + * @new_parent: pointer to a new parent node + * @node: pointer to a child node + * + * This function removes the child from the old parent and adds it to a new + * parent + */ +static void +ice_sched_update_parent(struct ice_sched_node *new_parent, + struct ice_sched_node *node) +{ + struct ice_sched_node *old_parent; + u8 i, j; + + old_parent = node->parent; + + /* update the old parent children */ + for (i = 0; i < old_parent->num_children; i++) + if (old_parent->children[i] == node) { + for (j = i + 1; j < old_parent->num_children; j++) + old_parent->children[j - 1] = + old_parent->children[j]; + old_parent->num_children--; + break; + } + + /* now move the node to a new parent */ + new_parent->children[new_parent->num_children++] = node; + node->parent = new_parent; + node->info.parent_teid = new_parent->info.node_teid; +} + +/** + * ice_sched_move_nodes - move child nodes to a given parent + * @pi: port information structure + * @parent: pointer to parent node + * @num_items: number of child nodes to be moved + * @list: pointer to child node teids + * + * This function move the child nodes to a given parent. + */ +static enum ice_status +ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, + u16 num_items, u32 *list) +{ + enum ice_status status = ICE_SUCCESS; + struct ice_aqc_move_elem *buf; + struct ice_sched_node *node; + u16 i, grps_movd = 0; + struct ice_hw *hw; + + hw = pi->hw; + + if (!parent || !num_items) + return ICE_ERR_PARAM; + + /* Does parent have enough space */ + if (parent->num_children + num_items >= + hw->max_children[parent->tx_sched_layer]) + return ICE_ERR_AQ_FULL; + + buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf)); + if (!buf) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < num_items; i++) { + node = ice_sched_find_node_by_teid(pi->root, list[i]); + if (!node) { + status = ICE_ERR_PARAM; + goto move_err_exit; + } + + buf->hdr.src_parent_teid = node->info.parent_teid; + buf->hdr.dest_parent_teid = parent->info.node_teid; + buf->teid[0] = node->info.node_teid; + buf->hdr.num_elems = CPU_TO_LE16(1); + status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf), + &grps_movd, NULL); + if (status && grps_movd != 1) { + status = ICE_ERR_CFG; + goto move_err_exit; + } + + /* update the SW DB */ + ice_sched_update_parent(parent, node); + } + +move_err_exit: + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_move_vsi_to_agg - move VSI to aggregator node + * @pi: port information structure + * @vsi_handle: software VSI handle + * @agg_id: aggregator ID + * @tc: TC number + * + * This function moves a VSI to an aggregator node or its subtree. + * Intermediate nodes may be created if required. + */ +static enum ice_status +ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, + u8 tc) +{ + struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; + u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + u32 first_node_teid, vsi_teid; + enum ice_status status; + u16 num_nodes_added; + u8 aggl, vsil, i; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_CFG; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + return ICE_ERR_DOES_NOT_EXIST; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + return ICE_ERR_DOES_NOT_EXIST; + + aggl = ice_sched_get_agg_layer(pi->hw); + vsil = ice_sched_get_vsi_layer(pi->hw); + + /* set intermediate node count to 1 between aggregator and VSI layers */ + for (i = aggl + 1; i < vsil; i++) + num_nodes[i] = 1; + + /* Check if the aggregator subtree has any free node to add the VSI */ + for (i = 0; i < agg_node->num_children; i++) { + parent = ice_sched_get_free_vsi_parent(pi->hw, + agg_node->children[i], + num_nodes); + if (parent) + goto move_nodes; + } + + /* add new nodes */ + parent = agg_node; + for (i = aggl + 1; i < vsil; i++) { + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, + num_nodes[i], + &first_node_teid, + &num_nodes_added); + if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_nodes_added) + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + else + parent = parent->children[0]; + + if (!parent) + return ICE_ERR_CFG; + } + +move_nodes: + vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); + return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); +} + +/** + * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator + * @pi: port information structure + * @agg_info: aggregator info + * @tc: traffic class number + * @rm_vsi_info: true or false + * + * This function move all the VSI(s) to the default aggregator and delete + * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The + * caller holds the scheduler lock. + */ +static enum ice_status +ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, + struct ice_sched_agg_info *agg_info, u8 tc, + bool rm_vsi_info) +{ + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *tmp; + enum ice_status status = ICE_SUCCESS; + + LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) { + u16 vsi_handle = agg_vsi_info->vsi_handle; + + /* Move VSI to default aggregator */ + if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) + continue; + + status = ice_sched_move_vsi_to_agg(pi, vsi_handle, + ICE_DFLT_AGG_ID, tc); + if (status) + break; + + ice_clear_bit(tc, agg_vsi_info->tc_bitmap); + if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { + LIST_DEL(&agg_vsi_info->list_entry); + ice_free(pi->hw, agg_vsi_info); + } + } + + return status; +} + +/** + * ice_sched_is_agg_inuse - check whether the aggregator is in use or not + * @pi: port information structure + * @node: node pointer + * + * This function checks whether the aggregator is attached with any VSI or not. + */ +static bool +ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) +{ + u8 vsil, i; + + vsil = ice_sched_get_vsi_layer(pi->hw); + if (node->tx_sched_layer < vsil - 1) { + for (i = 0; i < node->num_children; i++) + if (ice_sched_is_agg_inuse(pi, node->children[i])) + return true; + return false; + } else { + return node->num_children ? true : false; + } +} + +/** + * ice_sched_rm_agg_cfg - remove the aggregator node + * @pi: port information structure + * @agg_id: aggregator ID + * @tc: TC number + * + * This function removes the aggregator node and intermediate nodes if any + * from the given TC + */ +static enum ice_status +ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) +{ + struct ice_sched_node *tc_node, *agg_node; + struct ice_hw *hw = pi->hw; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_CFG; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + return ICE_ERR_DOES_NOT_EXIST; + + /* Can't remove the aggregator node if it has children */ + if (ice_sched_is_agg_inuse(pi, agg_node)) + return ICE_ERR_IN_USE; + + /* need to remove the whole subtree if aggregator node is the + * only child. + */ + while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { + struct ice_sched_node *parent = agg_node->parent; + + if (!parent) + return ICE_ERR_CFG; + + if (parent->num_children > 1) + break; + + agg_node = parent; + } + + ice_free_sched_node(pi, agg_node); + return ICE_SUCCESS; +} + +/** + * ice_rm_agg_cfg_tc - remove aggregator configuration for TC + * @pi: port information structure + * @agg_info: aggregator ID + * @tc: TC number + * @rm_vsi_info: bool value true or false + * + * This function removes aggregator reference to VSI of given TC. It removes + * the aggregator configuration completely for requested TC. The caller needs + * to hold the scheduler lock. + */ +static enum ice_status +ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, + u8 tc, bool rm_vsi_info) +{ + enum ice_status status = ICE_SUCCESS; + + /* If nothing to remove - return success */ + if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) + goto exit_rm_agg_cfg_tc; + + status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); + if (status) + goto exit_rm_agg_cfg_tc; + + /* Delete aggregator node(s) */ + status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); + if (status) + goto exit_rm_agg_cfg_tc; + + ice_clear_bit(tc, agg_info->tc_bitmap); +exit_rm_agg_cfg_tc: + return status; +} + +/** + * ice_save_agg_tc_bitmap - save aggregator TC bitmap + * @pi: port information structure + * @agg_id: aggregator ID + * @tc_bitmap: 8 bits TC bitmap + * + * Save aggregator TC bitmap. This function needs to be called with scheduler + * lock held. + */ +static enum ice_status +ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, + ice_bitmap_t *tc_bitmap) +{ + struct ice_sched_agg_info *agg_info; + + agg_info = ice_get_agg_info(pi->hw, agg_id); + if (!agg_info) + return ICE_ERR_PARAM; + ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, + ICE_MAX_TRAFFIC_CLASS); + return ICE_SUCCESS; +} + +/** + * ice_sched_add_agg_cfg - create an aggregator node + * @pi: port information structure + * @agg_id: aggregator ID + * @tc: TC number + * + * This function creates an aggregator node and intermediate nodes if required + * for the given TC + */ +static enum ice_status +ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) +{ + struct ice_sched_node *parent, *agg_node, *tc_node; + u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u32 first_node_teid; + u16 num_nodes_added; + u8 i, aggl; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + return ICE_ERR_CFG; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + /* Does Agg node already exist ? */ + if (agg_node) + return status; + + aggl = ice_sched_get_agg_layer(hw); + + /* need one node in Agg layer */ + num_nodes[aggl] = 1; + + /* Check whether the intermediate nodes have space to add the + * new aggregator. If they are full, then SW needs to allocate a new + * intermediate node on those layers + */ + for (i = hw->sw_entry_point_layer; i < aggl; i++) { + parent = ice_sched_get_first_node(pi, tc_node, i); + + /* scan all the siblings */ + while (parent) { + if (parent->num_children < hw->max_children[i]) + break; + parent = parent->sibling; + } + + /* all the nodes are full, reserve one for this layer */ + if (!parent) + num_nodes[i]++; + } + + /* add the aggregator node */ + parent = tc_node; + for (i = hw->sw_entry_point_layer; i <= aggl; i++) { + if (!parent) + return ICE_ERR_CFG; + + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, + num_nodes[i], + &first_node_teid, + &num_nodes_added); + if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) + return ICE_ERR_CFG; + + /* The newly added node can be a new parent for the next + * layer nodes + */ + if (num_nodes_added) { + parent = ice_sched_find_node_by_teid(tc_node, + first_node_teid); + /* register aggregator ID with the aggregator node */ + if (parent && i == aggl) + parent->agg_id = agg_id; + } else { + parent = parent->children[0]; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_sched_cfg_agg - configure aggregator node + * @pi: port information structure + * @agg_id: aggregator ID + * @agg_type: aggregator type queue, VSI, or aggregator group + * @tc_bitmap: bits TC bitmap + * + * It registers a unique aggregator node into scheduler services. It + * allows a user to register with a unique ID to track it's resources. + * The aggregator type determines if this is a queue group, VSI group + * or aggregator group. It then creates the aggregator node(s) for requested + * TC(s) or removes an existing aggregator node including its configuration + * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator + * resources and remove aggregator ID. + * This function needs to be called with scheduler lock held. + */ +static enum ice_status +ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, + enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) +{ + struct ice_sched_agg_info *agg_info; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u8 tc; + + agg_info = ice_get_agg_info(hw, agg_id); + if (!agg_info) { + /* Create new entry for new aggregator ID */ + agg_info = (struct ice_sched_agg_info *) + ice_malloc(hw, sizeof(*agg_info)); + if (!agg_info) { + status = ICE_ERR_NO_MEMORY; + goto exit_reg_agg; + } + agg_info->agg_id = agg_id; + agg_info->agg_type = agg_type; + agg_info->tc_bitmap[0] = 0; + + /* Initialize the aggregator VSI list head */ + INIT_LIST_HEAD(&agg_info->agg_vsi_list); + + /* Add new entry in aggregator list */ + LIST_ADD(&agg_info->list_entry, &hw->agg_list); + } + /* Create aggregator node(s) for requested TC(s) */ + ice_for_each_traffic_class(tc) { + if (!ice_is_tc_ena(*tc_bitmap, tc)) { + /* Delete aggregator cfg TC if it exists previously */ + status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); + if (status) + break; + continue; + } + + /* Check if aggregator node for TC already exists */ + if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) + continue; + + /* Create new aggregator node for TC */ + status = ice_sched_add_agg_cfg(pi, agg_id, tc); + if (status) + break; + + /* Save aggregator node's TC information */ + ice_set_bit(tc, agg_info->tc_bitmap); + } +exit_reg_agg: + return status; +} + +/** + * ice_cfg_agg - config aggregator node + * @pi: port information structure + * @agg_id: aggregator ID + * @agg_type: aggregator type queue, VSI, or aggregator group + * @tc_bitmap: bits TC bitmap + * + * This function configures aggregator node(s). + */ +enum ice_status +ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, + u8 tc_bitmap) +{ + ice_bitmap_t bitmap = tc_bitmap; + enum ice_status status; + + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_cfg_agg(pi, agg_id, agg_type, + (ice_bitmap_t *)&bitmap); + if (!status) + status = ice_save_agg_tc_bitmap(pi, agg_id, + (ice_bitmap_t *)&bitmap); + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_get_agg_vsi_info - get the aggregator ID + * @agg_info: aggregator info + * @vsi_handle: software VSI handle + * + * The function returns aggregator VSI info based on VSI handle. This function + * needs to be called with scheduler lock held. + */ +static struct ice_sched_agg_vsi_info* +ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) +{ + struct ice_sched_agg_vsi_info *agg_vsi_info; + + LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) + if (agg_vsi_info->vsi_handle == vsi_handle) + return agg_vsi_info; + + return NULL; +} + +/** + * ice_get_vsi_agg_info - get the aggregator info of VSI + * @hw: pointer to the hardware structure + * @vsi_handle: Sw VSI handle + * + * The function returns aggregator info of VSI represented via vsi_handle. The + * VSI has in this case a different aggregator than the default one. This + * function needs to be called with scheduler lock held. + */ +static struct ice_sched_agg_info* +ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_sched_agg_info *agg_info; + + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + + agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); + if (agg_vsi_info) + return agg_info; + } + return NULL; +} + +/** + * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap + * @pi: port information structure + * @agg_id: aggregator ID + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap of enabled TC(s) + * + * Save VSI to aggregator TC bitmap. This function needs to call with scheduler + * lock held. + */ +static enum ice_status +ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, + ice_bitmap_t *tc_bitmap) +{ + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_info *agg_info; + + agg_info = ice_get_agg_info(pi->hw, agg_id); + if (!agg_info) + return ICE_ERR_PARAM; + /* check if entry already exist */ + agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); + if (!agg_vsi_info) + return ICE_ERR_PARAM; + ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, + ICE_MAX_TRAFFIC_CLASS); + return ICE_SUCCESS; +} + +/** + * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator + * @pi: port information structure + * @agg_id: aggregator ID + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap of enabled TC(s) + * + * This function moves VSI to a new or default aggregator node. If VSI is + * already associated to the aggregator node then no operation is performed on + * the tree. This function needs to be called with scheduler lock held. + */ +static enum ice_status +ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, + u16 vsi_handle, ice_bitmap_t *tc_bitmap) +{ + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_info *agg_info; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u8 tc; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + agg_info = ice_get_agg_info(hw, agg_id); + if (!agg_info) + return ICE_ERR_PARAM; + /* check if entry already exist */ + agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); + if (!agg_vsi_info) { + /* Create new entry for VSI under aggregator list */ + agg_vsi_info = (struct ice_sched_agg_vsi_info *) + ice_malloc(hw, sizeof(*agg_vsi_info)); + if (!agg_vsi_info) + return ICE_ERR_PARAM; + + /* add VSI ID into the aggregator list */ + agg_vsi_info->vsi_handle = vsi_handle; + LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); + } + /* Move VSI node to new aggregator node for requested TC(s) */ + ice_for_each_traffic_class(tc) { + if (!ice_is_tc_ena(*tc_bitmap, tc)) + continue; + + /* Move VSI to new aggregator */ + status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); + if (status) + break; + + if (agg_id != ICE_DFLT_AGG_ID) + ice_set_bit(tc, agg_vsi_info->tc_bitmap); + else + ice_clear_bit(tc, agg_vsi_info->tc_bitmap); + } + /* If VSI moved back to default aggregator, delete agg_vsi_info. */ + if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap, + ICE_MAX_TRAFFIC_CLASS)) { + LIST_DEL(&agg_vsi_info->list_entry); + ice_free(hw, agg_vsi_info); + } + return status; +} + +/** + * ice_sched_rm_unused_rl_prof - remove unused RL profile + * @pi: port information structure + * + * This function removes unused rate limit profiles from the HW and + * SW DB. The caller needs to hold scheduler lock. + */ +static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) +{ + u16 ln; + + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { + struct ice_aqc_rl_profile_info *rl_prof_elem; + struct ice_aqc_rl_profile_info *rl_prof_tmp; + + LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, + &pi->rl_prof_list[ln], + ice_aqc_rl_profile_info, list_entry) { + if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) + ice_debug(pi->hw, ICE_DBG_SCHED, + "Removed rl profile\n"); + } + } +} + +/** + * ice_sched_update_elem - update element + * @hw: pointer to the HW struct + * @node: pointer to node + * @info: node info to update + * + * It updates the HW DB, and local SW DB of node. It updates the scheduling + * parameters of node from argument info data buffer (Info->data buf) and + * returns success or error on config sched element failure. The caller + * needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_aqc_conf_elem buf; + enum ice_status status; + u16 elem_cfgd = 0; + u16 num_elems = 1; + + buf.generic[0] = *info; + /* Parent TEID is reserved field in this aq call */ + buf.generic[0].parent_teid = 0; + /* Element type is reserved field in this aq call */ + buf.generic[0].data.elem_type = 0; + /* Flags is reserved field in this aq call */ + buf.generic[0].data.flags = 0; + + /* Update HW DB */ + /* Configure element node */ + status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), + &elem_cfgd, NULL); + if (status || elem_cfgd != num_elems) { + ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); + return ICE_ERR_CFG; + } + + /* Config success case */ + /* Now update local SW DB */ + /* Only copy the data portion of info buffer */ + node->info.data = info->data; + return status; +} + +/** + * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params + * @hw: pointer to the HW struct + * @node: sched node to configure + * @rl_type: rate limit type CIR, EIR, or shared + * @bw_alloc: BW weight/allocation + * + * This function configures node element's BW allocation. + */ +static enum ice_status +ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, + enum ice_rl_type rl_type, u16 bw_alloc) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + enum ice_status status; + + buf = node->info; + data = &buf.data; + if (rl_type == ICE_MIN_BW) { + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; + data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); + } else if (rl_type == ICE_MAX_BW) { + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); + } else { + return ICE_ERR_PARAM; + } + + /* Configure element */ + status = ice_sched_update_elem(hw, node, &buf); + return status; +} + +/** + * ice_move_vsi_to_agg - moves VSI to new or default aggregator + * @pi: port information structure + * @agg_id: aggregator ID + * @vsi_handle: software VSI handle + * @tc_bitmap: TC bitmap of enabled TC(s) + * + * Move or associate VSI to a new or default aggregator node. + */ +enum ice_status +ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, + u8 tc_bitmap) +{ + ice_bitmap_t bitmap = tc_bitmap; + enum ice_status status; + + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, + (ice_bitmap_t *)&bitmap); + if (!status) + status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, + (ice_bitmap_t *)&bitmap); + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_rm_agg_cfg - remove aggregator configuration + * @pi: port information structure + * @agg_id: aggregator ID + * + * This function removes aggregator reference to VSI and delete aggregator ID + * info. It removes the aggregator configuration completely. + */ +enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) +{ + struct ice_sched_agg_info *agg_info; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + ice_acquire_lock(&pi->sched_lock); + agg_info = ice_get_agg_info(pi->hw, agg_id); + if (!agg_info) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit_ice_rm_agg_cfg; + } + + ice_for_each_traffic_class(tc) { + status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); + if (status) + goto exit_ice_rm_agg_cfg; + } + + if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { + status = ICE_ERR_IN_USE; + goto exit_ice_rm_agg_cfg; + } + + /* Safe to delete entry now */ + LIST_DEL(&agg_info->list_entry); + ice_free(pi->hw, agg_info); + + /* Remove unused RL profile IDs from HW and SW DB */ + ice_sched_rm_unused_rl_prof(pi); + +exit_ice_rm_agg_cfg: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information + * @bw_t_info: bandwidth type information structure + * @bw_alloc: Bandwidth allocation information + * + * Save or clear CIR BW alloc information (bw_alloc) in the passed param + * bw_t_info. + */ +static void +ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) +{ + bw_t_info->cir_bw.bw_alloc = bw_alloc; + if (bw_t_info->cir_bw.bw_alloc) + ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); + else + ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); +} + +/** + * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information + * @bw_t_info: bandwidth type information structure + * @bw_alloc: Bandwidth allocation information + * + * Save or clear EIR BW alloc information (bw_alloc) in the passed param + * bw_t_info. + */ +static void +ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) +{ + bw_t_info->eir_bw.bw_alloc = bw_alloc; + if (bw_t_info->eir_bw.bw_alloc) + ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); + else + ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); +} + +/** + * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @rl_type: rate limit type min or max + * @bw_alloc: Bandwidth allocation information + * + * Save BW alloc information of VSI type node for post replay use. + */ +static enum ice_status +ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u16 bw_alloc) +{ + struct ice_vsi_ctx *vsi_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], + bw_alloc); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], + bw_alloc); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_set_clear_cir_bw - set or clear CIR BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); + bw_t_info->cir_bw.bw = 0; + } else { + /* Save type of BW information */ + ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); + bw_t_info->cir_bw.bw = bw; + } +} + +/** + * ice_set_clear_eir_bw - set or clear EIR BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = 0; + } else { + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element. + * First clear earlier saved shared BW information. + */ + ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = 0; + /* save EIR BW information */ + ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = bw; + } +} + +/** + * ice_set_clear_shared_bw - set or clear shared BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear shared bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = 0; + } else { + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element. + * First clear earlier saved EIR BW information. + */ + ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = 0; + /* save shared BW information */ + ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = bw; + } +} + +/** + * ice_sched_save_vsi_bw - save VSI node's BW information + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save BW information of VSI type node for post replay use. + */ +static enum ice_status +ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + struct ice_vsi_ctx *vsi_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_set_clear_prio - set or clear priority information + * @bw_t_info: bandwidth type information structure + * @prio: priority to save + * + * Save or clear priority (prio) in the passed param bw_t_info. + */ +static void +ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) +{ + bw_t_info->generic = prio; + if (bw_t_info->generic) + ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); + else + ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); +} + +/** + * ice_sched_save_vsi_prio - save VSI node's priority information + * @pi: port information structure + * @vsi_handle: Software VSI handle + * @tc: traffic class + * @prio: priority to save + * + * Save priority information of VSI type node for post replay use. + */ +static enum ice_status +ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u8 prio) +{ + struct ice_vsi_ctx *vsi_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return ICE_ERR_PARAM; + ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); + return ICE_SUCCESS; +} + +/** + * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information + * @pi: port information structure + * @agg_id: node aggregator ID + * @tc: traffic class + * @rl_type: rate limit type min or max + * @bw_alloc: bandwidth alloc information + * + * Save BW alloc information of AGG type node for post replay use. + */ +static enum ice_status +ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type, u16 bw_alloc) +{ + struct ice_sched_agg_info *agg_info; + + agg_info = ice_get_agg_info(pi->hw, agg_id); + if (!agg_info) + return ICE_ERR_PARAM; + if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_save_agg_bw - save aggregator node's BW information + * @pi: port information structure + * @agg_id: node aggregator ID + * @tc: traffic class + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save BW information of AGG type node for post replay use. + */ +static enum ice_status +ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + struct ice_sched_agg_info *agg_info; + + agg_info = ice_get_agg_info(pi->hw, agg_id); + if (!agg_info) + return ICE_ERR_PARAM; + if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: traffic class + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function configures BW limit of VSI scheduling node based on TC + * information. + */ +enum ice_status +ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, + ICE_AGG_TYPE_VSI, + tc, rl_type, bw); + if (!status) { + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); + ice_release_lock(&pi->sched_lock); + } + return status; +} + +/** + * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC + * @pi: port information structure + * @vsi_handle: software VSI handle + * @tc: traffic class + * @rl_type: min or max + * + * This function configures default BW limit of VSI scheduling node based on TC + * information. + */ +enum ice_status +ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type) +{ + enum ice_status status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, + ICE_AGG_TYPE_VSI, + tc, rl_type, + ICE_SCHED_DFLT_BW); + if (!status) { + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, + ICE_SCHED_DFLT_BW); + ice_release_lock(&pi->sched_lock); + } + return status; +} + +/** + * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC + * @pi: port information structure + * @agg_id: aggregator ID + * @tc: traffic class + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function applies BW limit to aggregator scheduling node based on TC + * information. + */ +enum ice_status +ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, + tc, rl_type, bw); + if (!status) { + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); + ice_release_lock(&pi->sched_lock); + } + return status; +} + +/** + * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC + * @pi: port information structure + * @agg_id: aggregator ID + * @tc: traffic class + * @rl_type: min or max + * + * This function applies default BW limit to aggregator scheduling node based + * on TC information. + */ +enum ice_status +ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type) +{ + enum ice_status status; + + status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, + tc, rl_type, + ICE_SCHED_DFLT_BW); + if (!status) { + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, + ICE_SCHED_DFLT_BW); + ice_release_lock(&pi->sched_lock); + } + return status; +} + +/** + * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit + * @pi: port information structure + * @vsi_handle: software VSI handle + * @bw: bandwidth in Kbps + * + * This function Configures shared rate limiter(SRL) of all VSI type nodes + * across all traffic classes for VSI matching handle. + */ +enum ice_status +ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw) +{ + return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, bw); +} + +/** + * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function removes the shared rate limiter(SRL) of all VSI type nodes + * across all traffic classes for VSI matching handle. + */ +enum ice_status +ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) +{ + return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, + ICE_SCHED_DFLT_BW); +} + +/** + * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit + * @pi: port information structure + * @agg_id: aggregator ID + * @bw: bandwidth in Kbps + * + * This function configures the shared rate limiter(SRL) of all aggregator type + * nodes across all traffic classes for aggregator matching agg_id. + */ +enum ice_status +ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw) +{ + return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, bw); +} + +/** + * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter + * @pi: port information structure + * @agg_id: aggregator ID + * + * This function removes the shared rate limiter(SRL) of all aggregator type + * nodes across all traffic classes for aggregator matching agg_id. + */ +enum ice_status +ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) +{ + return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW); +} + +/** + * ice_config_vsi_queue_priority - config VSI queue priority of node + * @pi: port information structure + * @num_qs: number of VSI queues + * @q_ids: queue IDs array + * @q_ids: queue IDs array + * @q_prio: queue priority array + * + * This function configures the queue node priority (Sibling Priority) of the + * passed in VSI's queue(s) for a given traffic class (TC). + */ +enum ice_status +ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, + u8 *q_prio) +{ + enum ice_status status = ICE_ERR_PARAM; + u16 i; + + ice_acquire_lock(&pi->sched_lock); + + for (i = 0; i < num_qs; i++) { + struct ice_sched_node *node; + + node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); + if (!node || node->info.data.elem_type != + ICE_AQC_ELEM_TYPE_LEAF) { + status = ICE_ERR_PARAM; + break; + } + /* Configure Priority */ + status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); + if (status) + break; + } + + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC + * @pi: port information structure + * @agg_id: Aggregator ID + * @num_vsis: number of VSI(s) + * @vsi_handle_arr: array of software VSI handles + * @node_prio: pointer to node priority + * @tc: traffic class + * + * This function configures the node priority (Sibling Priority) of the + * passed in VSI's for a given traffic class (TC) of an Aggregator ID. + */ +enum ice_status +ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, + u16 num_vsis, u16 *vsi_handle_arr, + u8 *node_prio, u8 tc) +{ + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_node *tc_node, *agg_node; + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_agg_info *agg_info; + bool agg_id_present = false; + struct ice_hw *hw = pi->hw; + u16 i; + + ice_acquire_lock(&pi->sched_lock); + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) + if (agg_info->agg_id == agg_id) { + agg_id_present = true; + break; + } + if (!agg_id_present) + goto exit_agg_priority_per_tc; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + goto exit_agg_priority_per_tc; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + goto exit_agg_priority_per_tc; + + if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) + goto exit_agg_priority_per_tc; + + for (i = 0; i < num_vsis; i++) { + struct ice_sched_node *vsi_node; + bool vsi_handle_valid = false; + u16 vsi_handle; + + status = ICE_ERR_PARAM; + vsi_handle = vsi_handle_arr[i]; + if (!ice_is_vsi_valid(hw, vsi_handle)) + goto exit_agg_priority_per_tc; + /* Verify child nodes before applying settings */ + LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) + if (agg_vsi_info->vsi_handle == vsi_handle) { + /* cppcheck-suppress unreadVariable */ + vsi_handle_valid = true; + break; + } + + if (!vsi_handle_valid) + goto exit_agg_priority_per_tc; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + goto exit_agg_priority_per_tc; + + if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { + /* Configure Priority */ + status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, + node_prio[i]); + if (status) + break; + status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, + node_prio[i]); + if (status) + break; + } + } + +exit_agg_priority_per_tc: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC + * @pi: port information structure + * @vsi_handle: software VSI handle + * @ena_tcmap: enabled TC map + * @rl_type: Rate limit type CIR/EIR + * @bw_alloc: Array of BW alloc + * + * This function configures the BW allocation of the passed in VSI's + * node(s) for enabled traffic class. + */ +enum ice_status +ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, + enum ice_rl_type rl_type, u8 *bw_alloc) +{ + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node, *vsi_node; + + if (!ice_is_tc_ena(ena_tcmap, tc)) + continue; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + continue; + + status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, + bw_alloc[tc]); + if (status) + break; + status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, + rl_type, bw_alloc[tc]); + if (status) + break; + } + + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_agg_bw_alloc - config aggregator BW alloc + * @pi: port information structure + * @agg_id: aggregator ID + * @ena_tcmap: enabled TC map + * @rl_type: rate limit type CIR/EIR + * @bw_alloc: array of BW alloc + * + * This function configures the BW allocation of passed in aggregator for + * enabled traffic class(s). + */ +enum ice_status +ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, + enum ice_rl_type rl_type, u8 *bw_alloc) +{ + struct ice_sched_agg_info *agg_info; + bool agg_id_present = false; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw = pi->hw; + u8 tc; + + ice_acquire_lock(&pi->sched_lock); + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) + if (agg_info->agg_id == agg_id) { + agg_id_present = true; + break; + } + if (!agg_id_present) { + status = ICE_ERR_PARAM; + goto exit_cfg_agg_bw_alloc; + } + + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node, *agg_node; + + if (!ice_is_tc_ena(ena_tcmap, tc)) + continue; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + continue; + + status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, + bw_alloc[tc]); + if (status) + break; + status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, + bw_alloc[tc]); + if (status) + break; + } + +exit_cfg_agg_bw_alloc: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_calc_wakeup - calculate RL profile wakeup parameter + * @hw: pointer to the HW struct + * @bw: bandwidth in Kbps + * + * This function calculates the wakeup parameter of RL profile. + */ +static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) +{ + s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; + s32 wakeup_f_int; + u16 wakeup = 0; + + /* Get the wakeup integer value */ + bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); + wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec); + if (wakeup_int > 63) { + wakeup = (u16)((1 << 15) | wakeup_int); + } else { + /* Calculate fraction value up to 4 decimals + * Convert Integer value to a constant multiplier + */ + wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; + wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER * + hw->psm_clk_freq, bytes_per_sec); + + /* Get Fraction value */ + wakeup_f = wakeup_a - wakeup_b; + + /* Round up the Fractional value via Ceil(Fractional value) */ + if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2)) + wakeup_f += 1; + + wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION, + ICE_RL_PROF_MULTIPLIER); + wakeup |= (u16)(wakeup_int << 9); + wakeup |= (u16)(0x1ff & wakeup_f_int); + } + + return wakeup; +} + +/** + * ice_sched_bw_to_rl_profile - convert BW to profile parameters + * @hw: pointer to the HW struct + * @bw: bandwidth in Kbps + * @profile: profile parameters to return + * + * This function converts the BW to profile structure format. + */ +static enum ice_status +ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, + struct ice_aqc_rl_profile_elem *profile) +{ + enum ice_status status = ICE_ERR_PARAM; + s64 bytes_per_sec, ts_rate, mv_tmp; + bool found = false; + s32 encode = 0; + s64 mv = 0; + s32 i; + + /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ + if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) + return status; + + /* Bytes per second from Kbps */ + bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); + + /* encode is 6 bits but really useful are 5 bits */ + for (i = 0; i < 64; i++) { + u64 pow_result = BIT_ULL(i); + + ts_rate = DIV_64BIT((s64)hw->psm_clk_freq, + pow_result * ICE_RL_PROF_TS_MULTIPLIER); + if (ts_rate <= 0) + continue; + + /* Multiplier value */ + mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, + ts_rate); + + /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ + mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); + + /* First multiplier value greater than the given + * accuracy bytes + */ + if (mv > ICE_RL_PROF_ACCURACY_BYTES) { + encode = i; + found = true; + break; + } + } + if (found) { + u16 wm; + + wm = ice_sched_calc_wakeup(hw, bw); + profile->rl_multiply = CPU_TO_LE16(mv); + profile->wake_up_calc = CPU_TO_LE16(wm); + profile->rl_encode = CPU_TO_LE16(encode); + status = ICE_SUCCESS; + } else { + status = ICE_ERR_DOES_NOT_EXIST; + } + + return status; +} + +/** + * ice_sched_add_rl_profile - add RL profile + * @pi: port information structure + * @rl_type: type of rate limit BW - min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * @layer_num: specifies in which layer to create profile + * + * This function first checks the existing list for corresponding BW + * parameter. If it exists, it returns the associated profile otherwise + * it creates a new rate limit profile for requested BW, and adds it to + * the HW DB and local list. It returns the new profile or null on error. + * The caller needs to hold the scheduler lock. + */ +static struct ice_aqc_rl_profile_info * +ice_sched_add_rl_profile(struct ice_port_info *pi, + enum ice_rl_type rl_type, u32 bw, u8 layer_num) +{ + struct ice_aqc_rl_profile_generic_elem *buf; + struct ice_aqc_rl_profile_info *rl_prof_elem; + u16 profiles_added = 0, num_profiles = 1; + enum ice_status status; + struct ice_hw *hw; + u8 profile_type; + + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + return NULL; + switch (rl_type) { + case ICE_MIN_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; + break; + case ICE_MAX_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; + break; + case ICE_SHARED_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; + break; + default: + return NULL; + } + + if (!pi) + return NULL; + hw = pi->hw; + LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num], + ice_aqc_rl_profile_info, list_entry) + if (rl_prof_elem->profile.flags == profile_type && + rl_prof_elem->bw == bw) + /* Return existing profile ID info */ + return rl_prof_elem; + + /* Create new profile ID */ + rl_prof_elem = (struct ice_aqc_rl_profile_info *) + ice_malloc(hw, sizeof(*rl_prof_elem)); + + if (!rl_prof_elem) + return NULL; + + status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); + if (status != ICE_SUCCESS) + goto exit_add_rl_prof; + + rl_prof_elem->bw = bw; + /* layer_num is zero relative, and fw expects level from 1 to 9 */ + rl_prof_elem->profile.level = layer_num + 1; + rl_prof_elem->profile.flags = profile_type; + rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); + + /* Create new entry in HW DB */ + buf = (struct ice_aqc_rl_profile_generic_elem *) + &rl_prof_elem->profile; + status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), + &profiles_added, NULL); + if (status || profiles_added != num_profiles) + goto exit_add_rl_prof; + + /* Good entry - add in the list */ + rl_prof_elem->prof_id_ref = 0; + LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); + return rl_prof_elem; + +exit_add_rl_prof: + ice_free(hw, rl_prof_elem); + return NULL; +} + +/** + * ice_sched_cfg_node_bw_lmt - configure node sched params + * @hw: pointer to the HW struct + * @node: sched node to configure + * @rl_type: rate limit type CIR, EIR, or shared + * @rl_prof_id: rate limit profile ID + * + * This function configures node element's BW limit. + */ +static enum ice_status +ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, + enum ice_rl_type rl_type, u16 rl_prof_id) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + + buf = node->info; + data = &buf.data; + switch (rl_type) { + case ICE_MIN_BW: + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; + data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); + break; + case ICE_MAX_BW: + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) + return ICE_ERR_CFG; + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); + break; + case ICE_SHARED_BW: + /* Check for removing shared BW */ + if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { + /* remove shared profile */ + data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; + data->srl_id = 0; /* clear SRL field */ + + /* enable back EIR to default profile */ + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + break; + } + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && + (LE16_TO_CPU(data->eir_bw.bw_profile_idx) != + ICE_SCHED_DFLT_RL_PROF_ID)) + return ICE_ERR_CFG; + /* EIR BW is set to default, disable it */ + data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; + /* Okay to enable shared BW now */ + data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; + data->srl_id = CPU_TO_LE16(rl_prof_id); + break; + default: + /* Unknown rate limit type */ + return ICE_ERR_PARAM; + } + + /* Configure element */ + return ice_sched_update_elem(hw, node, &buf); +} + +/** + * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID + * @node: sched node + * @rl_type: rate limit type + * + * If existing profile matches, it returns the corresponding rate + * limit profile ID, otherwise it returns an invalid ID as error. + */ +static u16 +ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, + enum ice_rl_type rl_type) +{ + u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; + struct ice_aqc_txsched_elem *data; + + data = &node->info.data; + switch (rl_type) { + case ICE_MIN_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) + rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); + break; + case ICE_MAX_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) + rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); + break; + case ICE_SHARED_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) + rl_prof_id = LE16_TO_CPU(data->srl_id); + break; + default: + break; + } + + return rl_prof_id; +} + +/** + * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer + * @pi: port information structure + * @rl_type: type of rate limit BW - min, max, or shared + * @layer_index: layer index + * + * This function returns requested profile creation layer. + */ +static u8 +ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, + u8 layer_index) +{ + struct ice_hw *hw = pi->hw; + + if (layer_index >= hw->num_tx_sched_layers) + return ICE_SCHED_INVAL_LAYER_NUM; + switch (rl_type) { + case ICE_MIN_BW: + if (hw->layer_info[layer_index].max_cir_rl_profiles) + return layer_index; + break; + case ICE_MAX_BW: + if (hw->layer_info[layer_index].max_eir_rl_profiles) + return layer_index; + break; + case ICE_SHARED_BW: + /* if current layer doesn't support SRL profile creation + * then try a layer up or down. + */ + if (hw->layer_info[layer_index].max_srl_profiles) + return layer_index; + else if (layer_index < hw->num_tx_sched_layers - 1 && + hw->layer_info[layer_index + 1].max_srl_profiles) + return layer_index + 1; + else if (layer_index > 0 && + hw->layer_info[layer_index - 1].max_srl_profiles) + return layer_index - 1; + break; + default: + break; + } + return ICE_SCHED_INVAL_LAYER_NUM; +} + +/** + * ice_sched_get_srl_node - get shared rate limit node + * @node: tree node + * @srl_layer: shared rate limit layer + * + * This function returns SRL node to be used for shared rate limit purpose. + * The caller needs to hold scheduler lock. + */ +static struct ice_sched_node * +ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) +{ + if (srl_layer > node->tx_sched_layer) + return node->children[0]; + else if (srl_layer < node->tx_sched_layer) + /* Node can't be created without a parent. It will always + * have a valid parent except root node. + */ + return node->parent; + else + return node; +} + +/** + * ice_sched_rm_rl_profile - remove RL profile ID + * @pi: port information structure + * @layer_num: layer number where profiles are saved + * @profile_type: profile type like EIR, CIR, or SRL + * @profile_id: profile ID to remove + * + * This function removes rate limit profile from layer 'layer_num' of type + * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold + * scheduler lock. + */ +static enum ice_status +ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, + u16 profile_id) +{ + struct ice_aqc_rl_profile_info *rl_prof_elem; + enum ice_status status = ICE_SUCCESS; + + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + return ICE_ERR_PARAM; + /* Check the existing list for RL profile */ + LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num], + ice_aqc_rl_profile_info, list_entry) + if (rl_prof_elem->profile.flags == profile_type && + LE16_TO_CPU(rl_prof_elem->profile.profile_id) == + profile_id) { + if (rl_prof_elem->prof_id_ref) + rl_prof_elem->prof_id_ref--; + + /* Remove old profile ID from database */ + status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); + if (status && status != ICE_ERR_IN_USE) + ice_debug(pi->hw, ICE_DBG_SCHED, + "Remove rl profile failed\n"); + break; + } + if (status == ICE_ERR_IN_USE) + status = ICE_SUCCESS; + return status; +} + +/** + * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default + * @pi: port information structure + * @node: pointer to node structure + * @rl_type: rate limit type min, max, or shared + * @layer_num: layer number where RL profiles are saved + * + * This function configures node element's BW rate limit profile ID of + * type CIR, EIR, or SRL to default. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_node_bw_dflt(struct ice_port_info *pi, + struct ice_sched_node *node, + enum ice_rl_type rl_type, u8 layer_num) +{ + enum ice_status status; + struct ice_hw *hw; + u8 profile_type; + u16 rl_prof_id; + u16 old_id; + + hw = pi->hw; + switch (rl_type) { + case ICE_MIN_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; + break; + case ICE_MAX_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; + break; + case ICE_SHARED_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; + /* No SRL is configured for default case */ + rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; + break; + default: + return ICE_ERR_PARAM; + } + /* Save existing RL prof ID for later clean up */ + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); + /* Configure BW scheduling parameters */ + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); + if (status) + return status; + + /* Remove stale RL profile ID */ + if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || + old_id == ICE_SCHED_INVAL_PROF_ID) + return ICE_SUCCESS; + + return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); +} + +/** + * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness + * @pi: port information structure + * @node: pointer to node structure + * @layer_num: layer number where rate limit profiles are saved + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth value + * + * This function prepares node element's bandwidth to SRL or EIR exclusively. + * EIR BW and Shared BW profiles are mutually exclusive and hence only one of + * them may be set for any given element. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_eir_srl_excl(struct ice_port_info *pi, + struct ice_sched_node *node, + u8 layer_num, enum ice_rl_type rl_type, u32 bw) +{ + if (rl_type == ICE_SHARED_BW) { + /* SRL node passed in this case, it may be different node */ + if (bw == ICE_SCHED_DFLT_BW) + /* SRL being removed, ice_sched_cfg_node_bw_lmt() + * enables EIR to default. EIR is not set in this + * case, so no additional action is required. + */ + return ICE_SUCCESS; + + /* SRL being configured, set EIR to default here. + * ice_sched_cfg_node_bw_lmt() disables EIR when it + * configures SRL + */ + return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, + layer_num); + } else if (rl_type == ICE_MAX_BW && + node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { + /* Remove Shared profile. Set default shared BW call + * removes shared profile for a node. + */ + return ice_sched_set_node_bw_dflt(pi, node, + ICE_SHARED_BW, + layer_num); + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_node_bw - set node's bandwidth + * @pi: port information structure + * @node: tree node + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * @layer_num: layer number + * + * This function adds new profile corresponding to requested BW, configures + * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile + * ID from local database. The caller needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw, u8 layer_num) +{ + struct ice_aqc_rl_profile_info *rl_prof_info; + enum ice_status status = ICE_ERR_PARAM; + struct ice_hw *hw = pi->hw; + u16 old_id, rl_prof_id; + + rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); + if (!rl_prof_info) + return status; + + rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); + + /* Save existing RL prof ID for later clean up */ + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); + /* Configure BW scheduling parameters */ + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); + if (status) + return status; + + /* New changes has been applied */ + /* Increment the profile ID reference count */ + rl_prof_info->prof_id_ref++; + + /* Check for old ID removal */ + if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || + old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) + return ICE_SUCCESS; + + return ice_sched_rm_rl_profile(pi, layer_num, + rl_prof_info->profile.flags, + old_id); +} + +/** + * ice_sched_set_node_bw_lmt - set node's BW limit + * @pi: port information structure + * @node: tree node + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * It updates node's BW limit parameters like BW RL profile ID of type CIR, + * EIR, or SRL. The caller needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw) +{ + struct ice_sched_node *cfg_node = node; + enum ice_status status; + + struct ice_hw *hw; + u8 layer_num; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + /* Remove unused RL profile IDs from HW and SW DB */ + ice_sched_rm_unused_rl_prof(pi); + layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, + node->tx_sched_layer); + if (layer_num >= hw->num_tx_sched_layers) + return ICE_ERR_PARAM; + + if (rl_type == ICE_SHARED_BW) { + /* SRL node may be different */ + cfg_node = ice_sched_get_srl_node(node, layer_num); + if (!cfg_node) + return ICE_ERR_CFG; + } + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, + bw); + if (status) + return status; + if (bw == ICE_SCHED_DFLT_BW) + return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, + layer_num); + return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); +} + +/** + * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default + * @pi: port information structure + * @node: pointer to node structure + * @rl_type: rate limit type min, max, or shared + * + * This function configures node element's BW rate limit profile ID of + * type CIR, EIR, or SRL to default. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, + struct ice_sched_node *node, + enum ice_rl_type rl_type) +{ + return ice_sched_set_node_bw_lmt(pi, node, rl_type, + ICE_SCHED_DFLT_BW); +} + +/** + * ice_sched_validate_srl_node - Check node for SRL applicability + * @node: sched node to configure + * @sel_layer: selected SRL layer + * + * This function checks if the SRL can be applied to a selceted layer node on + * behalf of the requested node (first argument). This function needs to be + * called with scheduler lock held. + */ +static enum ice_status +ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) +{ + /* SRL profiles are not available on all layers. Check if the + * SRL profile can be applied to a node above or below the + * requested node. SRL configuration is possible only if the + * selected layer's node has single child. + */ + if (sel_layer == node->tx_sched_layer || + ((sel_layer == node->tx_sched_layer + 1) && + node->num_children == 1) || + ((sel_layer == node->tx_sched_layer - 1) && + (node->parent && node->parent->num_children == 1))) + return ICE_SUCCESS; + + return ICE_ERR_CFG; +} + +/** + * ice_sched_save_q_bw - save queue node's BW information + * @q_ctx: queue context structure + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save BW information of queue type node for post replay use. + */ +static enum ice_status +ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) +{ + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_q_bw_lmt - sets queue BW limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * @bw: bandwidth in Kbps + * + * This function sets BW limit of queue scheduling node. + */ +static enum ice_status +ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *node; + struct ice_q_ctx *q_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + ice_acquire_lock(&pi->sched_lock); + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); + if (!q_ctx) + goto exit_q_bw_lmt; + node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); + if (!node) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); + goto exit_q_bw_lmt; + } + + /* Return error if it is not a leaf node */ + if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) + goto exit_q_bw_lmt; + + /* SRL bandwidth layer selection */ + if (rl_type == ICE_SHARED_BW) { + u8 sel_layer; /* selected layer */ + + sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, + node->tx_sched_layer); + if (sel_layer >= pi->hw->num_tx_sched_layers) { + status = ICE_ERR_PARAM; + goto exit_q_bw_lmt; + } + status = ice_sched_validate_srl_node(node, sel_layer); + if (status) + goto exit_q_bw_lmt; + } + + if (bw == ICE_SCHED_DFLT_BW) + status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); + + if (!status) + status = ice_sched_save_q_bw(q_ctx, rl_type, bw); + +exit_q_bw_lmt: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_q_bw_lmt - configure queue BW limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * @bw: bandwidth in Kbps + * + * This function configures BW limit of queue scheduling node. + */ +enum ice_status +ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw) +{ + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, + bw); +} + +/** + * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * + * This function configures BW default limit of queue scheduling node. + */ +enum ice_status +ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type) +{ + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, + ICE_SCHED_DFLT_BW); +} + +/** + * ice_sched_save_tc_node_bw - save TC node BW limit + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function saves the modified values of bandwidth settings for later + * replay purpose (restore) after reset. + */ +static enum ice_status +ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function configures bandwidth limit of TC node. + */ +static enum ice_status +ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *tc_node; + + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return status; + ice_acquire_lock(&pi->sched_lock); + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + goto exit_set_tc_node_bw; + if (bw == ICE_SCHED_DFLT_BW) + status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); + if (!status) + status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); + +exit_set_tc_node_bw: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_tc_node_bw_lmt - configure TC node BW limit + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function configures BW limit of TC node. + * Note: The minimum guaranteed reservation is done via DCBX. + */ +enum ice_status +ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); +} + +/** + * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * + * This function configures BW default limit of TC node. + */ +enum ice_status +ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type) +{ + return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); +} + +/** + * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information + * @pi: port information structure + * @tc: traffic class + * @rl_type: rate limit type min or max + * @bw_alloc: Bandwidth allocation information + * + * Save BW alloc information of VSI type node for post replay use. + */ +static enum ice_status +ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u16 bw_alloc) +{ + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return ICE_ERR_PARAM; + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], + bw_alloc); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], + bw_alloc); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * @bw_alloc: bandwidth alloc + * + * This function configures bandwidth alloc of TC node, also saves the + * changed settings for replay purpose, and return success if it succeeds + * in modifying bandwidth alloc setting. + */ +static enum ice_status +ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u8 bw_alloc) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *tc_node; + + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return status; + ice_acquire_lock(&pi->sched_lock); + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + goto exit_set_tc_node_bw_alloc; + status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, + bw_alloc); + if (status) + goto exit_set_tc_node_bw_alloc; + status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); + +exit_set_tc_node_bw_alloc: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc + * @pi: port information structure + * @tc: TC number + * @rl_type: min or max + * @bw_alloc: bandwidth alloc + * + * This function configures BW limit of TC node. + * Note: The minimum guaranteed reservation is done via DCBX. + */ +enum ice_status +ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u8 bw_alloc) +{ + return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); +} + +/** + * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function retrieves the aggregator ID based on VSI ID and TC, + * and sets node's BW limit to default. This function needs to be + * called with the scheduler lock held. + */ +enum ice_status +ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi_ctx; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + + ice_for_each_traffic_class(tc) { + struct ice_sched_node *node; + + node = vsi_ctx->sched.ag_node[tc]; + if (!node) + continue; + + /* Set min profile to default */ + status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); + if (status) + break; + + /* Set max profile to default */ + status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); + if (status) + break; + + /* Remove shared profile, if there is one */ + status = ice_sched_set_node_bw_dflt_lmt(pi, node, + ICE_SHARED_BW); + if (status) + break; + } + + return status; +} + +/** + * ice_sched_get_node_by_id_type - get node from ID type + * @pi: port information structure + * @id: identifier + * @agg_type: type of aggregator + * @tc: traffic class + * + * This function returns node identified by ID of type aggregator, and + * based on traffic class (TC). This function needs to be called with + * the scheduler lock held. + */ +static struct ice_sched_node * +ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc) +{ + struct ice_sched_node *node = NULL; + struct ice_sched_node *child_node; + + switch (agg_type) { + case ICE_AGG_TYPE_VSI: { + struct ice_vsi_ctx *vsi_ctx; + u16 vsi_handle = (u16)id; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + break; + /* Get sched_vsi_info */ + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + break; + node = vsi_ctx->sched.vsi_node[tc]; + break; + } + + case ICE_AGG_TYPE_AGG: { + struct ice_sched_node *tc_node; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (tc_node) + node = ice_sched_get_agg_node(pi, tc_node, id); + break; + } + + case ICE_AGG_TYPE_Q: + /* The current implementation allows single queue to modify */ + node = ice_sched_get_node(pi, id); + break; + + case ICE_AGG_TYPE_QG: + /* The current implementation allows single qg to modify */ + child_node = ice_sched_get_node(pi, id); + if (!child_node) + break; + node = child_node->parent; + break; + + default: + break; + } + + return node; +} + +/** + * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC + * @pi: port information structure + * @id: ID (software VSI handle or AGG ID) + * @agg_type: aggregator type (VSI or AGG type node) + * @tc: traffic class + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function sets BW limit of VSI or Aggregator scheduling node + * based on TC information from passed in argument BW. + */ +enum ice_status +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *node; + + if (!pi) + return status; + + if (rl_type == ICE_UNKNOWN_BW) + return status; + + ice_acquire_lock(&pi->sched_lock); + node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); + if (!node) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); + goto exit_set_node_bw_lmt_per_tc; + } + if (bw == ICE_SCHED_DFLT_BW) + status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); + +exit_set_node_bw_lmt_per_tc: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_validate_vsi_srl_node - validate VSI SRL node + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function validates SRL node of the VSI node if available SRL layer is + * different than the VSI node layer on all TC(s).This function needs to be + * called with scheduler lock held. + */ +static enum ice_status +ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) +{ + u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; + u8 tc; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node, *vsi_node; + enum ice_rl_type rl_type = ICE_SHARED_BW; + enum ice_status status; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + continue; + + /* SRL bandwidth layer selection */ + if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { + u8 node_layer = vsi_node->tx_sched_layer; + u8 layer_num; + + layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, + node_layer); + if (layer_num >= pi->hw->num_tx_sched_layers) + return ICE_ERR_PARAM; + sel_layer = layer_num; + } + + status = ice_sched_validate_srl_node(vsi_node, sel_layer); + if (status) + return status; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit + * @pi: port information structure + * @vsi_handle: software VSI handle + * @bw: bandwidth in Kbps + * + * This function Configures shared rate limiter(SRL) of all VSI type nodes + * across all traffic classes for VSI matching handle. When BW value of + * ICE_SCHED_DFLT_BW is passed, it removes the SRL from the node. + */ +enum ice_status +ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, + u32 bw) +{ + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!pi) + return ICE_ERR_PARAM; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); + if (status) + goto exit_set_vsi_bw_shared_lmt; + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node, *vsi_node; + enum ice_rl_type rl_type = ICE_SHARED_BW; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + continue; + + if (bw == ICE_SCHED_DFLT_BW) + /* It removes existing SRL from the node */ + status = ice_sched_set_node_bw_dflt_lmt(pi, vsi_node, + rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, vsi_node, + rl_type, bw); + if (status) + break; + status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); + if (status) + break; + } + +exit_set_vsi_bw_shared_lmt: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_validate_agg_srl_node - validate AGG SRL node + * @pi: port information structure + * @agg_id: aggregator ID + * + * This function validates SRL node of the AGG node if available SRL layer is + * different than the AGG node layer on all TC(s).This function needs to be + * called with scheduler lock held. + */ +static enum ice_status +ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) +{ + u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; + struct ice_sched_agg_info *agg_info; + bool agg_id_present = false; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, + list_entry) + if (agg_info->agg_id == agg_id) { + agg_id_present = true; + break; + } + if (!agg_id_present) + return ICE_ERR_PARAM; + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node, *agg_node; + enum ice_rl_type rl_type = ICE_SHARED_BW; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + continue; + /* SRL bandwidth layer selection */ + if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { + u8 node_layer = agg_node->tx_sched_layer; + u8 layer_num; + + layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, + node_layer); + if (layer_num >= pi->hw->num_tx_sched_layers) + return ICE_ERR_PARAM; + sel_layer = layer_num; + } + + status = ice_sched_validate_srl_node(agg_node, sel_layer); + if (status) + break; + } + return status; +} + +/** + * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit + * @pi: port information structure + * @agg_id: aggregator ID + * @bw: bandwidth in Kbps + * + * This function configures the shared rate limiter(SRL) of all aggregator type + * nodes across all traffic classes for aggregator matching agg_id. When + * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the + * node(s). + */ +enum ice_status +ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_agg_info *tmp; + bool agg_id_present = false; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!pi) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_validate_agg_srl_node(pi, agg_id); + if (status) + goto exit_agg_bw_shared_lmt; + + LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, + ice_sched_agg_info, list_entry) + if (agg_info->agg_id == agg_id) { + agg_id_present = true; + break; + } + + if (!agg_id_present) { + status = ICE_ERR_PARAM; + goto exit_agg_bw_shared_lmt; + } + + /* Return success if no nodes are present across TC */ + ice_for_each_traffic_class(tc) { + enum ice_rl_type rl_type = ICE_SHARED_BW; + struct ice_sched_node *tc_node, *agg_node; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + + agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); + if (!agg_node) + continue; + + if (bw == ICE_SCHED_DFLT_BW) + /* It removes existing SRL from the node */ + status = ice_sched_set_node_bw_dflt_lmt(pi, agg_node, + rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, agg_node, + rl_type, bw); + if (status) + break; + status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); + if (status) + break; + } + +exit_agg_bw_shared_lmt: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_cfg_sibl_node_prio - configure node sibling priority + * @pi: port information structure + * @node: sched node to configure + * @priority: sibling priority + * + * This function configures node element's sibling priority only. This + * function needs to be called with scheduler lock held. + */ +enum ice_status +ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, + struct ice_sched_node *node, u8 priority) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + struct ice_hw *hw = pi->hw; + enum ice_status status; + + if (!hw) + return ICE_ERR_PARAM; + buf = node->info; + data = &buf.data; + data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; + priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & + ICE_AQC_ELEM_GENERIC_PRIO_M; + data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; + data->generic |= priority; + + /* Configure element */ + status = ice_sched_update_elem(hw, node, &buf); + return status; +} + +/** + * ice_cfg_rl_burst_size - Set burst size value + * @hw: pointer to the HW struct + * @bytes: burst size in bytes + * + * This function configures/set the burst size to requested new value. The new + * burst size value is used for future rate limit calls. It doesn't change the + * existing or previously created RL profiles. + */ +enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +{ + u16 burst_size_to_prog; + + if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || + bytes > ICE_MAX_BURST_SIZE_ALLOWED) + return ICE_ERR_PARAM; + if (ice_round_to_num(bytes, 64) <= + ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { + /* 64 byte granularity case */ + /* Disable MSB granularity bit */ + burst_size_to_prog = ICE_64_BYTE_GRANULARITY; + /* round number to nearest 64 byte granularity */ + bytes = ice_round_to_num(bytes, 64); + /* The value is in 64 byte chunks */ + burst_size_to_prog |= (u16)(bytes / 64); + } else { + /* k bytes granularity case */ + /* Enable MSB granularity bit */ + burst_size_to_prog = ICE_KBYTE_GRANULARITY; + /* round number to nearest 1024 granularity */ + bytes = ice_round_to_num(bytes, 1024); + /* check rounding doesn't go beyond allowed */ + if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) + bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; + /* The value is in k bytes */ + burst_size_to_prog |= (u16)(bytes / 1024); + } + hw->max_burst_size = burst_size_to_prog; + return ICE_SUCCESS; +} + +/* + * ice_sched_replay_node_prio - re-configure node priority + * @hw: pointer to the HW struct + * @node: sched node to configure + * @priority: priority value + * + * This function configures node element's priority value. It + * needs to be called with scheduler lock held. + */ +static enum ice_status +ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, + u8 priority) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + enum ice_status status; + + buf = node->info; + data = &buf.data; + data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; + data->generic = priority; + + /* Configure element */ + status = ice_sched_update_elem(hw, node, &buf); + return status; +} + +/** + * ice_sched_replay_node_bw - replay node(s) BW + * @hw: pointer to the HW struct + * @node: sched node to configure + * @bw_t_info: BW type information + * + * This function restores node's BW from bw_t_info. The caller needs + * to hold the scheduler lock. + */ +static enum ice_status +ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, + struct ice_bw_type_info *bw_t_info) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status status = ICE_ERR_PARAM; + u16 bw_alloc; + + if (!node) + return status; + if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) + return ICE_SUCCESS; + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { + status = ice_sched_replay_node_prio(hw, node, + bw_t_info->generic); + if (status) + return status; + } + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, + bw_t_info->cir_bw.bw); + if (status) + return status; + } + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { + bw_alloc = bw_t_info->cir_bw.bw_alloc; + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, + bw_alloc); + if (status) + return status; + } + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, + bw_t_info->eir_bw.bw); + if (status) + return status; + } + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { + bw_alloc = bw_t_info->eir_bw.bw_alloc; + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, + bw_alloc); + if (status) + return status; + } + if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) + status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, + bw_t_info->shared_bw); + return status; +} + +/** + * ice_sched_replay_agg_bw - replay aggregator node(s) BW + * @hw: pointer to the HW struct + * @agg_info: aggregator data structure + * + * This function re-creates aggregator type nodes. The caller needs to hold + * the scheduler lock. + */ +static enum ice_status +ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) +{ + struct ice_sched_node *tc_node, *agg_node; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!agg_info) + return ICE_ERR_PARAM; + ice_for_each_traffic_class(tc) { + if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, + ICE_BW_TYPE_CNT)) + continue; + tc_node = ice_sched_get_tc_node(hw->port_info, tc); + if (!tc_node) { + status = ICE_ERR_PARAM; + break; + } + agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, + agg_info->agg_id); + if (!agg_node) { + status = ICE_ERR_PARAM; + break; + } + status = ice_sched_replay_node_bw(hw, agg_node, + &agg_info->bw_t_info[tc]); + if (status) + break; + } + return status; +} + +/** + * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap + * @pi: port info struct + * @tc_bitmap: 8 bits TC bitmap to check + * @ena_tc_bitmap: 8 bits enabled TC bitmap to return + * + * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs + * may be missing, it returns enabled TCs. This function needs to be called with + * scheduler lock held. + */ +static void +ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, + ice_bitmap_t *ena_tc_bitmap) +{ + u8 tc; + + /* Some TC(s) may be missing after reset, adjust for replay */ + ice_for_each_traffic_class(tc) + if (ice_is_tc_ena(*tc_bitmap, tc) && + (ice_sched_get_tc_node(pi, tc))) + ice_set_bit(tc, ena_tc_bitmap); +} + +/** + * ice_sched_replay_agg - recreate aggregator node(s) + * @hw: pointer to the HW struct + * + * This function recreate aggregator type nodes which are not replayed earlier. + * It also replay aggregator BW information. These aggregator nodes are not + * associated with VSI type node yet. + */ +void ice_sched_replay_agg(struct ice_hw *hw) +{ + struct ice_port_info *pi = hw->port_info; + struct ice_sched_agg_info *agg_info; + + ice_acquire_lock(&pi->sched_lock); + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) { + /* replay aggregator (re-create aggregator node) */ + if (!ice_cmp_bitmap(agg_info->tc_bitmap, + agg_info->replay_tc_bitmap, + ICE_MAX_TRAFFIC_CLASS)) { + ice_declare_bitmap(replay_bitmap, + ICE_MAX_TRAFFIC_CLASS); + enum ice_status status; + + ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); + ice_sched_get_ena_tc_bitmap(pi, + agg_info->replay_tc_bitmap, + replay_bitmap); + status = ice_sched_cfg_agg(hw->port_info, + agg_info->agg_id, + ICE_AGG_TYPE_AGG, + replay_bitmap); + if (status) { + ice_info(hw, "Replay agg id[%d] failed\n", + agg_info->agg_id); + /* Move on to next one */ + continue; + } + /* Replay aggregator node BW (restore aggregator BW) */ + status = ice_sched_replay_agg_bw(hw, agg_info); + if (status) + ice_info(hw, "Replay agg bw [id=%d] failed\n", + agg_info->agg_id); + } + } + ice_release_lock(&pi->sched_lock); +} + +/** + * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization + * @hw: pointer to the HW struct + * + * This function initialize aggregator(s) TC bitmap to zero. A required + * preinit step for replaying aggregators. + */ +void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) +{ + struct ice_port_info *pi = hw->port_info; + struct ice_sched_agg_info *agg_info; + + ice_acquire_lock(&pi->sched_lock); + LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, + list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + + agg_info->tc_bitmap[0] = 0; + LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, + ice_sched_agg_vsi_info, list_entry) + agg_vsi_info->tc_bitmap[0] = 0; + } + ice_release_lock(&pi->sched_lock); +} + +/** + * ice_sched_replay_tc_node_bw - replay TC node(s) BW + * @pi: port information structure + * + * This function replay TC nodes. + */ +enum ice_status +ice_sched_replay_tc_node_bw(struct ice_port_info *pi) +{ + enum ice_status status = ICE_SUCCESS; + u8 tc; + + if (!pi->hw) + return ICE_ERR_PARAM; + ice_acquire_lock(&pi->sched_lock); + ice_for_each_traffic_class(tc) { + struct ice_sched_node *tc_node; + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; /* TC not present */ + status = ice_sched_replay_node_bw(pi->hw, tc_node, + &pi->tc_node_bw_t_info[tc]); + if (status) + break; + } + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_replay_vsi_bw - replay VSI type node(s) BW + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * @tc_bitmap: 8 bits TC bitmap + * + * This function replays VSI type nodes bandwidth. This function needs to be + * called with scheduler lock held. + */ +static enum ice_status +ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, + ice_bitmap_t *tc_bitmap) +{ + struct ice_sched_node *vsi_node, *tc_node; + struct ice_port_info *pi = hw->port_info; + struct ice_bw_type_info *bw_t_info; + struct ice_vsi_ctx *vsi_ctx; + enum ice_status status = ICE_SUCCESS; + u8 tc; + + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + return ICE_ERR_PARAM; + ice_for_each_traffic_class(tc) { + if (!ice_is_tc_ena(*tc_bitmap, tc)) + continue; + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) + continue; + vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); + if (!vsi_node) + continue; + bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; + status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); + if (status) + break; + } + return status; +} + +/** + * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * + * This function replays aggregator node, VSI to aggregator type nodes, and + * their node bandwidth information. This function needs to be called with + * scheduler lock held. + */ +static enum ice_status +ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +{ + ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_port_info *pi = hw->port_info; + struct ice_sched_agg_info *agg_info; + enum ice_status status; + + ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + agg_info = ice_get_vsi_agg_info(hw, vsi_handle); + if (!agg_info) + return ICE_SUCCESS; /* Not present in list - default Agg case */ + agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); + if (!agg_vsi_info) + return ICE_SUCCESS; /* Not present in list - default Agg case */ + ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, + replay_bitmap); + /* Replay aggregator node associated to vsi_handle */ + status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, + ICE_AGG_TYPE_AGG, replay_bitmap); + if (status) + return status; + /* Replay aggregator node BW (restore aggregator BW) */ + status = ice_sched_replay_agg_bw(hw, agg_info); + if (status) + return status; + + ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); + ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, + replay_bitmap); + /* Move this VSI (vsi_handle) to above aggregator */ + status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, + replay_bitmap); + if (status) + return status; + /* Replay VSI BW (restore VSI BW) */ + return ice_sched_replay_vsi_bw(hw, vsi_handle, + agg_vsi_info->tc_bitmap); +} + +/** + * ice_replay_vsi_agg - replay VSI to aggregator node + * @hw: pointer to the HW struct + * @vsi_handle: software VSI handle + * + * This function replays association of VSI to aggregator type nodes, and + * node bandwidth information. + */ +enum ice_status +ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status status; + + ice_acquire_lock(&pi->sched_lock); + status = ice_sched_replay_vsi_agg(hw, vsi_handle); + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_replay_q_bw - replay queue type node BW + * @pi: port information structure + * @q_ctx: queue context structure + * + * This function replays queue type node bandwidth. This function needs to be + * called with scheduler lock held. + */ +enum ice_status +ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) +{ + struct ice_sched_node *q_node; + + /* Following also checks the presence of node in tree */ + q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); + if (!q_node) + return ICE_ERR_PARAM; + return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_sched.h b/src/spdk/dpdk/drivers/net/ice/base/ice_sched.h new file mode 100644 index 000000000..57bf4b59d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_sched.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_SCHED_H_ +#define _ICE_SCHED_H_ + +#include "ice_common.h" + +#define ICE_QGRP_LAYER_OFFSET 2 +#define ICE_VSI_LAYER_OFFSET 4 +#define ICE_AGG_LAYER_OFFSET 6 +#define ICE_SCHED_INVAL_LAYER_NUM 0xFF +/* Burst size is a 12 bits register that is configured while creating the RL + * profile(s). MSB is a granularity bit and tells the granularity type + * 0 - LSB bits are in 64 bytes granularity + * 1 - LSB bits are in 1K bytes granularity + */ +#define ICE_64_BYTE_GRANULARITY 0 +#define ICE_KBYTE_GRANULARITY BIT(11) +#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */ +#define ICE_MAX_BURST_SIZE_ALLOWED \ + ((BIT(11) - 1) * 1024) /* In Bytes */ +#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \ + ((BIT(11) - 1) * 64) /* In Bytes */ +#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED + +#define ICE_RL_PROF_ACCURACY_BYTES 128 +#define ICE_RL_PROF_MULTIPLIER 10000 +#define ICE_RL_PROF_TS_MULTIPLIER 32 +#define ICE_RL_PROF_FRACTION 512 + +#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059 +#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667 +#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571 +#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000 + +struct rl_profile_params { + u32 bw; /* in Kbps */ + u16 rl_multiplier; + u16 wake_up_calc; + u16 rl_encode; +}; + +/* BW rate limit profile parameters list entry along + * with bandwidth maintained per layer in port info + */ +struct ice_aqc_rl_profile_info { + struct ice_aqc_rl_profile_elem profile; + struct LIST_ENTRY_TYPE list_entry; + u32 bw; /* requested */ + u16 prof_id_ref; /* profile ID to node association ref count */ +}; + +struct ice_sched_agg_vsi_info { + struct LIST_ENTRY_TYPE list_entry; + ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u16 vsi_handle; + /* save aggregator VSI TC bitmap */ + ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS); +}; + +struct ice_sched_agg_info { + struct LIST_HEAD_TYPE agg_vsi_list; + struct LIST_ENTRY_TYPE list_entry; + ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u32 agg_id; + enum ice_agg_type agg_type; + /* bw_t_info saves aggregator BW information */ + struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS]; + /* save aggregator TC bitmap */ + ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS); +}; + +/* FW AQ command calls */ +enum ice_status +ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, struct ice_sq_cd *cd); +enum ice_status +ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes, + struct ice_aqc_cfg_l2_node_cgd_data *buf, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd); +enum ice_status ice_sched_init_port(struct ice_port_info *pi); +enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +void ice_sched_get_psm_clk_freq(struct ice_hw *hw); + +/* Functions to cleanup scheduler SW DB */ +void ice_sched_clear_port(struct ice_port_info *pi); +void ice_sched_cleanup_all(struct ice_hw *hw); +void ice_sched_clear_agg(struct ice_hw *hw); + +/* Get a scheduling node from SW DB for given TEID */ +struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid); +struct ice_sched_node * +ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); +/* Add a scheduling node into SW DB for given info */ +enum ice_status +ice_sched_add_node(struct ice_port_info *pi, u8 layer, + struct ice_aqc_txsched_elem_data *info); +void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); +struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); +struct ice_sched_node * +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u8 owner); +enum ice_status +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, + u8 owner, bool enable); +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +struct ice_sched_node * +ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, + u16 vsi_handle); +bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node); +enum ice_status +ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf, u16 buf_size, + struct ice_sq_cd *cd); + +/* Tx scheduler rate limiter functions */ +enum ice_status +ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, + enum ice_agg_type agg_type, u8 tc_bitmap); +enum ice_status +ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, + u8 tc_bitmap); +enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id); +enum ice_status +ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type); +enum ice_status +ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type); +enum ice_status +ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + enum ice_rl_type rl_type); +enum ice_status +ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, + enum ice_rl_type rl_type); +enum ice_status +ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw); +enum ice_status +ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle); +enum ice_status +ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw); +enum ice_status +ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id); +enum ice_status +ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, + u8 *q_prio); +enum ice_status +ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, + enum ice_rl_type rl_type, u8 *bw_alloc); +enum ice_status +ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, + u16 num_vsis, u16 *vsi_handle_arr, + u8 *node_prio, u8 tc); +enum ice_status +ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, + enum ice_rl_type rl_type, u8 *bw_alloc); +bool +ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, + struct ice_sched_node *node); +enum ice_status +ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle); +enum ice_status +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, + u32 bw); +enum ice_status +ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw); +enum ice_status +ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, + struct ice_sched_node *node, u8 priority); +enum ice_status +ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, + enum ice_rl_type rl_type, u8 bw_alloc); +enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +#endif /* _ICE_SCHED_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_status.h b/src/spdk/dpdk/drivers/net/ice/base/ice_status.h new file mode 100644 index 000000000..3ca1b5db7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_status.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_STATUS_H_ +#define _ICE_STATUS_H_ + +/* Error Codes */ +enum ice_status { + ICE_SUCCESS = 0, + + /* Generic codes : Range -1..-49 */ + ICE_ERR_PARAM = -1, + ICE_ERR_NOT_IMPL = -2, + ICE_ERR_NOT_READY = -3, + ICE_ERR_NOT_SUPPORTED = -4, + ICE_ERR_BAD_PTR = -5, + ICE_ERR_INVAL_SIZE = -6, + ICE_ERR_DEVICE_NOT_SUPPORTED = -8, + ICE_ERR_RESET_FAILED = -9, + ICE_ERR_FW_API_VER = -10, + ICE_ERR_NO_MEMORY = -11, + ICE_ERR_CFG = -12, + ICE_ERR_OUT_OF_RANGE = -13, + ICE_ERR_ALREADY_EXISTS = -14, + ICE_ERR_DOES_NOT_EXIST = -15, + ICE_ERR_IN_USE = -16, + ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_RESET_ONGOING = -18, + ICE_ERR_HW_TABLE = -19, + ICE_ERR_FW_DDP_MISMATCH = -20, + + /* NVM specific error codes: Range -50..-59 */ + ICE_ERR_NVM = -50, + ICE_ERR_NVM_CHECKSUM = -51, + ICE_ERR_BUF_TOO_SHORT = -52, + ICE_ERR_NVM_BLANK_MODE = -53, + + /* ARQ/ASQ specific error codes. Range -100..-109 */ + ICE_ERR_AQ_ERROR = -100, + ICE_ERR_AQ_TIMEOUT = -101, + ICE_ERR_AQ_FULL = -102, + ICE_ERR_AQ_NO_WORK = -103, + ICE_ERR_AQ_EMPTY = -104, + ICE_ERR_AQ_FW_CRITICAL = -105, +}; + +#endif /* _ICE_STATUS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_switch.c b/src/spdk/dpdk/drivers/net/ice/base/ice_switch.c new file mode 100644 index 000000000..5b968b7ce --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_switch.c @@ -0,0 +1,7611 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ice_switch.h" +#include "ice_flex_type.h" +#include "ice_flow.h" + +#define ICE_ETH_DA_OFFSET 0 +#define ICE_ETH_ETHTYPE_OFFSET 12 +#define ICE_ETH_VLAN_TCI_OFFSET 14 +#define ICE_MAX_VLAN_ID 0xFFF +#define ICE_IPV4_NVGRE_PROTO_ID 0x002F +#define ICE_PPP_IPV6_PROTO_ID 0x0057 +#define ICE_IPV6_ETHER_ID 0x86DD + +/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem + * struct to configure any switch filter rules. + * {DA (6 bytes), SA(6 bytes), + * Ether type (2 bytes for header without VLAN tag) OR + * VLAN tag (4 bytes for header with VLAN tag) } + * + * Word on Hardcoded values + * byte 0 = 0x2: to identify it as locally administered DA MAC + * byte 6 = 0x2: to identify it as locally administered SA MAC + * byte 12 = 0x81 & byte 13 = 0x00: + * In case of VLAN filter first two bytes defines ether type (0x8100) + * and remaining two bytes are placeholder for programming a given VLAN ID + * In case of Ether type filter it is treated as header without VLAN tag + * and byte 12 and 13 is used to program a given Ether type instead + */ +static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, + 0x2, 0, 0, 0, 0, 0, + 0x81, 0, 0, 0}; + +struct ice_dummy_pkt_offsets { + enum ice_protocol_type type; + u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ +}; + +static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_TCP_IL, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_UDP_ILOS, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ + 0x00, 0x08, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_TCP_IL, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x46, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_UDP_ILOS, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x3a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ + 0x00, 0x08, 0x00, 0x00, +}; + +/* offset info for MAC + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + UDP */ +static const u8 dummy_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_VLAN_OFOS, 14 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_UDP_ILOS, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:UDP dummy packet */ +static const u8 dummy_vlan_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */ + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_TCP_IL, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* Dummy packet for MAC + IPv4 + TCP */ +static const u8 dummy_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ +static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_VLAN_OFOS, 14 }, + { ICE_IPV4_OFOS, 18 }, + { ICE_TCP_IL, 38 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (801.1Q), IPv4:TCP dummy packet */ +static const u8 dummy_vlan_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */ + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_TCP_IL, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + TCP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_tcp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_VLAN_OFOS, 14 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_TCP_IL, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + TCP dummy packet */ +static const u8 dummy_vlan_tcp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* IPv6 + UDP dummy packet */ +static const u8 dummy_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +/* C-tag (802.1Q): IPv6 + UDP */ +static const struct ice_dummy_pkt_offsets +dummy_vlan_udp_ipv6_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_VLAN_OFOS, 14 }, + { ICE_IPV6_OFOS, 18 }, + { ICE_UDP_ILOS, 58 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +/* C-tag (802.1Q), IPv6 + UDP dummy packet */ +static const u8 dummy_vlan_udp_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ + 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ + 0x00, 0x08, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_GTP, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_gtp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */ + 0x00, 0x1c, 0x00, 0x00, + + 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x85, + + 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */ + 0x00, 0x00, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_VLAN_OFOS, 14}, + { ICE_PPPOE, 18 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_pppoe_ipv4_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x16, + + 0x00, 0x21, /* PPP Link Layer 24 */ + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const u8 dummy_pppoe_ipv6_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x81, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */ + + 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */ + 0x00, 0x2a, + + 0x00, 0x57, /* PPP Link Layer 24 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */ + 0x00, 0x00, 0x3b, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_ESP, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv4_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */ + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x32, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_ESP, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_esp_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xDD, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x08, 0x32, 0x00, /* Next header ESP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_AH, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv4_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */ + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x33, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_AH, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_ah_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xDD, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x0c, 0x33, 0x00, /* Next header AH */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_NAT_T, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv4_nat_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */ + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_NAT_T, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_nat_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xDD, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ + +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_L2TPV3, 34 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv4_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */ + 0x00, 0x00, 0x40, 0x00, + 0x40, 0x73, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_L2TPV3, 54 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_l2tpv3_pkt[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xDD, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */ + 0x00, 0x0c, 0x73, 0x40, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ +}; + +/* this is a recipe to profile association bitmap */ +static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES], + ICE_MAX_NUM_PROFILES); + +/* this is a profile to recipe association bitmap */ +static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES], + ICE_MAX_NUM_RECIPES); + +static void ice_get_recp_to_prof_map(struct ice_hw *hw); + +/** + * ice_collect_result_idx - copy result index values + * @buf: buffer that contains the result index + * @recp: the recipe struct to copy data into + */ +static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, + struct ice_sw_recipe *recp) +{ + if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + ice_set_bit(buf->content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs); +} + +/** + * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries + * @hw: pointer to hardware structure + * @recps: struct that we need to populate + * @rid: recipe ID that we are populating + * @refresh_required: true if we should get recipe to profile mapping from FW + * + * This function is used to populate all the necessary entries into our + * bookkeeping so that we have a current list of all the recipes that are + * programmed in the firmware. + */ +static enum ice_status +ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, + bool *refresh_required) +{ + ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + u16 num_recps = ICE_MAX_NUM_RECIPES; + struct ice_prot_lkup_ext *lkup_exts; + enum ice_status status; + u8 fv_word_idx = 0; + u16 sub_recps; + + ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS); + + /* we need a buffer big enough to accommodate all the recipes */ + tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw, + ICE_MAX_NUM_RECIPES, sizeof(*tmp)); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp[0].recipe_indx = rid; + status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); + /* non-zero status meaning recipe doesn't exist */ + if (status) + goto err_unroll; + + /* Get recipe to profile map so that we can get the fv from lkups that + * we read for a recipe from FW. Since we want to minimize the number of + * times we make this FW call, just make one call and cache the copy + * until a new recipe is added. This operation is only required the + * first time to get the changes from FW. Then to search existing + * entries we don't need to update the cache again until another recipe + * gets added. + */ + if (*refresh_required) { + ice_get_recp_to_prof_map(hw); + *refresh_required = false; + } + + /* Start populating all the entries for recps[rid] based on lkups from + * firmware. Note that we are only creating the root recipe in our + * database. + */ + lkup_exts = &recps[rid].lkup_exts; + + for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { + struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; + struct ice_recp_grp_entry *rg_entry; + u8 i, prof, idx, prot = 0; + bool is_root; + u16 off = 0; + + rg_entry = (struct ice_recp_grp_entry *) + ice_malloc(hw, sizeof(*rg_entry)); + if (!rg_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + + idx = root_bufs.recipe_indx; + is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; + + /* Mark all result indices in this chain */ + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) + ice_set_bit(root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, result_bm); + + /* get the first profile that is associated with rid */ + prof = ice_find_first_bit(recipe_to_profile[idx], + ICE_MAX_NUM_PROFILES); + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; + + rg_entry->fv_idx[i] = lkup_indx; + rg_entry->fv_mask[i] = + LE16_TO_CPU(root_bufs.content.mask[i + 1]); + + /* If the recipe is a chained recipe then all its + * child recipe's result will have a result index. + * To fill fv_words we should not use those result + * index, we only need the protocol ids and offsets. + * We will skip all the fv_idx which stores result + * index in them. We also need to skip any fv_idx which + * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a + * valid offset value. + */ + if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof], + rg_entry->fv_idx[i]) || + rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || + rg_entry->fv_idx[i] == 0) + continue; + + ice_find_prot_off(hw, ICE_BLK_SW, prof, + rg_entry->fv_idx[i], &prot, &off); + lkup_exts->fv_words[fv_word_idx].prot_id = prot; + lkup_exts->fv_words[fv_word_idx].off = off; + lkup_exts->field_mask[fv_word_idx] = + rg_entry->fv_mask[i]; + fv_word_idx++; + } + /* populate rg_list with the data from the child entry of this + * recipe + */ + LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list); + + /* Propagate some data to the recipe database */ + recps[idx].is_root = !!is_root; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS); + if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { + recps[idx].chain_idx = root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs); + } else { + recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + } + + if (!is_root) + continue; + + /* Only do the following for root recipes entries */ + ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, + sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA); + recps[idx].root_rid = root_bufs.content.rid & + ~ICE_AQ_RECIPE_ID_IS_ROOT; + recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + } + + /* Complete initialization of the root recipe entry */ + lkup_exts->n_val_words = fv_word_idx; + recps[rid].big_recp = (num_recps > 1); + recps[rid].n_grp_count = (u8)num_recps; + recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *) + ice_memdup(hw, tmp, recps[rid].n_grp_count * + sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA); + if (!recps[rid].root_buf) + goto err_unroll; + + /* Copy result indexes */ + ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); + recps[rid].recp_created = true; + +err_unroll: + ice_free(hw, tmp); + return status; +} + +/** + * ice_get_recp_to_prof_map - updates recipe to profile mapping + * @hw: pointer to hardware structure + * + * This function is used to populate recipe_to_profile matrix where index to + * this array is the recipe ID and the element is the mapping of which profiles + * is this recipe mapped to. + */ +static void +ice_get_recp_to_prof_map(struct ice_hw *hw) +{ + ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 i; + + for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) { + u16 j; + + ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); + ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES); + if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + continue; + ice_cp_bitmap(profile_to_recipe[i], r_bitmap, + ICE_MAX_NUM_RECIPES); + for (j = 0; j < ICE_MAX_NUM_RECIPES; j++) + if (ice_is_bit_set(r_bitmap, j)) + ice_set_bit(i, recipe_to_profile[j]); + } +} + +/** + * ice_init_def_sw_recp - initialize the recipe book keeping tables + * @hw: pointer to the HW struct + * @recp_list: pointer to sw recipe list + * + * Allocate memory for the entire recipe table and initialize the structures/ + * entries corresponding to basic recipes. + */ +enum ice_status +ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list) +{ + struct ice_sw_recipe *recps; + u8 i; + + recps = (struct ice_sw_recipe *) + ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps)); + if (!recps) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + recps[i].root_rid = i; + INIT_LIST_HEAD(&recps[i].filt_rules); + INIT_LIST_HEAD(&recps[i].filt_replay_rules); + INIT_LIST_HEAD(&recps[i].rg_list); + ice_init_lock(&recps[i].filt_rule_lock); + } + + *recp_list = recps; + + return ICE_SUCCESS; +} + +/** + * ice_aq_get_sw_cfg - get switch configuration + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of the buffer available for response + * @req_desc: pointer to requested descriptor + * @num_elems: pointer to number of elements + * @cd: pointer to command details structure or NULL + * + * Get switch configuration (0x0200) to be placed in 'buff'. + * This admin command returns information such as initial VSI/port number + * and switch ID it belongs to. + * + * NOTE: *req_desc is both an input/output parameter. + * The caller of this function first calls this function with *request_desc set + * to 0. If the response from f/w has *req_desc set to 0, all the switch + * configuration information has been returned; if non-zero (meaning not all + * the information was returned), the caller should call this function again + * with *req_desc set to the previous value returned by f/w to get the + * next block of switch configuration information. + * + * *num_elems is output only parameter. This reflects the number of elements + * in response buffer. The caller of this function to use *num_elems while + * parsing the response buffer. + */ +static enum ice_status +ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, + u16 buf_size, u16 *req_desc, u16 *num_elems, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_sw_cfg *cmd; + enum ice_status status; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); + cmd = &desc.params.get_sw_conf; + cmd->element = CPU_TO_LE16(*req_desc); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) { + *req_desc = LE16_TO_CPU(cmd->element); + *num_elems = LE16_TO_CPU(cmd->num_elems); + } + + return status; +} + +/** + * ice_alloc_sw - allocate resources specific to switch + * @hw: pointer to the HW struct + * @ena_stats: true to turn on VEB stats + * @shared_res: true for shared resource, false for dedicated resource + * @sw_id: switch ID returned + * @counter_id: VEB counter ID returned + * + * allocates switch resources (SWID and VEB counter) (0x0208) + */ +enum ice_status +ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, + u16 *counter_id) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + struct ice_aqc_res_elem *sw_ele; + enum ice_status status; + u16 buf_len; + + buf_len = sizeof(*sw_buf); + sw_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + /* Prepare buffer for switch ID. + * The number of resource entries in buffer is passed as 1 since only a + * single switch/VEB instance is allocated, and hence a single sw_id + * is requested. + */ + sw_buf->num_elems = CPU_TO_LE16(1); + sw_buf->res_type = + CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID | + (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED : + ICE_AQC_RES_TYPE_FLAG_DEDICATED)); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + + if (status) + goto ice_alloc_sw_exit; + + sw_ele = &sw_buf->elem[0]; + *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp); + + if (ena_stats) { + /* Prepare buffer for VEB Counter */ + enum ice_adminq_opc opc = ice_aqc_opc_alloc_res; + struct ice_aqc_alloc_free_res_elem *counter_buf; + struct ice_aqc_res_elem *counter_ele; + + counter_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!counter_buf) { + status = ICE_ERR_NO_MEMORY; + goto ice_alloc_sw_exit; + } + + /* The number of resource entries in buffer is passed as 1 since + * only a single switch/VEB instance is allocated, and hence a + * single VEB counter is requested. + */ + counter_buf->num_elems = CPU_TO_LE16(1); + counter_buf->res_type = + CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER | + ICE_AQC_RES_TYPE_FLAG_DEDICATED); + status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len, + opc, NULL); + + if (status) { + ice_free(hw, counter_buf); + goto ice_alloc_sw_exit; + } + counter_ele = &counter_buf->elem[0]; + *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp); + ice_free(hw, counter_buf); + } + +ice_alloc_sw_exit: + ice_free(hw, sw_buf); + return status; +} + +/** + * ice_free_sw - free resources specific to switch + * @hw: pointer to the HW struct + * @sw_id: switch ID returned + * @counter_id: VEB counter ID returned + * + * free switch resources (SWID and VEB counter) (0x0209) + * + * NOTE: This function frees multiple resources. It continues + * releasing other resources even after it encounters error. + * The error code returned is the last error it encountered. + */ +enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf; + enum ice_status status, ret_status; + u16 buf_len; + + buf_len = sizeof(*sw_buf); + sw_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + /* Prepare buffer to free for switch ID res. + * The number of resource entries in buffer is passed as 1 since only a + * single switch/VEB instance is freed, and hence a single sw_id + * is released. + */ + sw_buf->num_elems = CPU_TO_LE16(1); + sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID); + sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id); + + ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_free_res, NULL); + + if (ret_status) + ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); + + /* Prepare buffer to free for VEB Counter resource */ + counter_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!counter_buf) { + ice_free(hw, sw_buf); + return ICE_ERR_NO_MEMORY; + } + + /* The number of resource entries in buffer is passed as 1 since only a + * single switch/VEB instance is freed, and hence a single VEB counter + * is released + */ + counter_buf->num_elems = CPU_TO_LE16(1); + counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER); + counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id); + + status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len, + ice_aqc_opc_free_res, NULL); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "VEB counter resource could not be freed\n"); + ret_status = status; + } + + ice_free(hw, counter_buf); + ice_free(hw, sw_buf); + return ret_status; +} + +/** + * ice_aq_add_vsi + * @hw: pointer to the HW struct + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Add a VSI context to the hardware (0x0210) + */ +enum ice_status +ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *res; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + res = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); + + if (!vsi_ctx->alloc_from_pool) + cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | + ICE_AQ_VSI_IS_VALID); + + cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cd); + + if (!status) { + vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M; + vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free); + } + + return status; +} + +/** + * ice_aq_free_vsi + * @hw: pointer to the HW struct + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware (0x0213) + */ +enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + + cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + if (keep_vsi_alloc) + cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + } + + return status; +} + +/** + * ice_aq_update_vsi + * @hw: pointer to the HW struct + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Update VSI context in the hardware (0x0211) + */ +enum ice_status +ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); + + cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cd); + + if (!status) { + vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + } + + return status; +} + +/** + * ice_is_vsi_valid - check whether the VSI is valid or not + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * + * check whether the VSI is valid or not + */ +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) +{ + return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_get_hw_vsi_num - return the HW VSI number + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * + * return the HW VSI number + * Caution: call this function only if VSI is valid (ice_is_vsi_valid) + */ +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) +{ + return hw->vsi_ctx[vsi_handle]->vsi_num; +} + +/** + * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * + * return the VSI context entry for a given VSI handle + */ +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_save_vsi_ctx - save the VSI context for a given VSI handle + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * @vsi: VSI context pointer + * + * save the VSI context entry for a given VSI handle + */ +static void +ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) +{ + hw->vsi_ctx[vsi_handle] = vsi; +} + +/** + * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + */ +static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + u8 i; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (!vsi) + return; + ice_for_each_traffic_class(i) { + if (vsi->lan_q_ctx[i]) { + ice_free(hw, vsi->lan_q_ctx[i]); + vsi->lan_q_ctx[i] = NULL; + } + } +} + +/** + * ice_clear_vsi_ctx - clear the VSI context entry + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * + * clear the VSI context entry + */ +static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (vsi) { + ice_clear_vsi_q_ctx(hw, vsi_handle); + ice_free(hw, vsi); + hw->vsi_ctx[vsi_handle] = NULL; + } +} + +/** + * ice_clear_all_vsi_ctx - clear all the VSI context entries + * @hw: pointer to the HW struct + */ +void ice_clear_all_vsi_ctx(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_MAX_VSI; i++) + ice_clear_vsi_ctx(hw, i); +} + +/** + * ice_add_vsi - add VSI context to the hardware and VSI handle list + * @hw: pointer to the HW struct + * @vsi_handle: unique VSI handle provided by drivers + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Add a VSI context to the hardware also add it into the VSI handle list. + * If this function gets called after reset for existing VSIs then update + * with the new HW VSI number in the corresponding VSI handle list entry. + */ +enum ice_status +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_vsi_ctx *tmp_vsi_ctx; + enum ice_status status; + + if (vsi_handle >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + status = ice_aq_add_vsi(hw, vsi_ctx, cd); + if (status) + return status; + tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!tmp_vsi_ctx) { + /* Create a new VSI context */ + tmp_vsi_ctx = (struct ice_vsi_ctx *) + ice_malloc(hw, sizeof(*tmp_vsi_ctx)); + if (!tmp_vsi_ctx) { + ice_aq_free_vsi(hw, vsi_ctx, false, cd); + return ICE_ERR_NO_MEMORY; + } + *tmp_vsi_ctx = *vsi_ctx; + + ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); + } else { + /* update with new HW VSI num */ + tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; + } + + return ICE_SUCCESS; +} + +/** + * ice_free_vsi- free VSI context from hardware and VSI handle list + * @hw: pointer to the HW struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware as well as from VSI handle list + */ +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + enum ice_status status; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); + if (!status) + ice_clear_vsi_ctx(hw, vsi_handle); + return status; +} + +/** + * ice_update_vsi + * @hw: pointer to the HW struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Update VSI context in the hardware + */ +enum ice_status +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + return ice_aq_update_vsi(hw, vsi_ctx, cd); +} + +/** + * ice_aq_get_vsi_params + * @hw: pointer to the HW struct + * @vsi_ctx: pointer to a VSI context struct + * @cd: pointer to command details structure or NULL + * + * Get VSI context info from hardware (0x0212) + */ +enum ice_status +ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aqc_get_vsi_resp *resp; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.get_vsi_resp; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params); + + cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + + status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cd); + if (!status) { + vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) & + ICE_AQ_VSI_NUM_M; + vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + } + + return status; +} + +/** + * ice_aq_add_update_mir_rule - add/update a mirror rule + * @hw: pointer to the HW struct + * @rule_type: Rule Type + * @dest_vsi: VSI number to which packets will be mirrored + * @count: length of the list + * @mr_buf: buffer for list of mirrored VSI numbers + * @cd: pointer to command details structure or NULL + * @rule_id: Rule ID + * + * Add/Update Mirror Rule (0x260). + */ +enum ice_status +ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, + u16 count, struct ice_mir_rule_buf *mr_buf, + struct ice_sq_cd *cd, u16 *rule_id) +{ + struct ice_aqc_add_update_mir_rule *cmd; + struct ice_aq_desc desc; + enum ice_status status; + __le16 *mr_list = NULL; + u16 buf_size = 0; + + switch (rule_type) { + case ICE_AQC_RULE_TYPE_VPORT_INGRESS: + case ICE_AQC_RULE_TYPE_VPORT_EGRESS: + /* Make sure count and mr_buf are set for these rule_types */ + if (!(count && mr_buf)) + return ICE_ERR_PARAM; + + buf_size = count * sizeof(__le16); + mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size); + if (!mr_list) + return ICE_ERR_NO_MEMORY; + break; + case ICE_AQC_RULE_TYPE_PPORT_INGRESS: + case ICE_AQC_RULE_TYPE_PPORT_EGRESS: + /* Make sure count and mr_buf are not set for these + * rule_types + */ + if (count || mr_buf) + return ICE_ERR_PARAM; + break; + default: + ice_debug(hw, ICE_DBG_SW, + "Error due to unsupported rule_type %u\n", rule_type); + return ICE_ERR_OUT_OF_RANGE; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule); + + /* Pre-process 'mr_buf' items for add/update of virtual port + * ingress/egress mirroring (but not physical port ingress/egress + * mirroring) + */ + if (mr_buf) { + int i; + + for (i = 0; i < count; i++) { + u16 id; + + id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M; + + /* Validate specified VSI number, make sure it is less + * than ICE_MAX_VSI, if not return with error. + */ + if (id >= ICE_MAX_VSI) { + ice_debug(hw, ICE_DBG_SW, + "Error VSI index (%u) out-of-range\n", + id); + ice_free(hw, mr_list); + return ICE_ERR_OUT_OF_RANGE; + } + + /* add VSI to mirror rule */ + if (mr_buf[i].add) + mr_list[i] = + CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M); + else /* remove VSI from mirror rule */ + mr_list[i] = CPU_TO_LE16(id); + } + } + + cmd = &desc.params.add_update_rule; + if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID) + cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) | + ICE_AQC_RULE_ID_VALID_M); + cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M); + cmd->num_entries = CPU_TO_LE16(count); + cmd->dest = CPU_TO_LE16(dest_vsi); + + status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd); + if (!status) + *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M; + + ice_free(hw, mr_list); + + return status; +} + +/** + * ice_aq_delete_mir_rule - delete a mirror rule + * @hw: pointer to the HW struct + * @rule_id: Mirror rule ID (to be deleted) + * @keep_allocd: if set, the VSI stays part of the PF allocated res, + * otherwise it is returned to the shared pool + * @cd: pointer to command details structure or NULL + * + * Delete Mirror Rule (0x261). + */ +enum ice_status +ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, + struct ice_sq_cd *cd) +{ + struct ice_aqc_delete_mir_rule *cmd; + struct ice_aq_desc desc; + + /* rule_id should be in the range 0...63 */ + if (rule_id >= ICE_MAX_NUM_MIRROR_RULES) + return ICE_ERR_OUT_OF_RANGE; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule); + + cmd = &desc.params.del_rule; + rule_id |= ICE_AQC_RULE_ID_VALID_M; + cmd->rule_id = CPU_TO_LE16(rule_id); + + if (keep_allocd) + cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_alloc_free_vsi_list + * @hw: pointer to the HW struct + * @vsi_list_id: VSI list ID returned or used for lookup + * @lkup_type: switch rule filter lookup type + * @opc: switch rules population command type - pass in the command opcode + * + * allocates or free a VSI list resource + */ +static enum ice_status +ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, + enum ice_sw_lkup_type lkup_type, + enum ice_adminq_opc opc) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + struct ice_aqc_res_elem *vsi_ele; + enum ice_status status; + u16 buf_len; + + buf_len = sizeof(*sw_buf); + sw_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + sw_buf->num_elems = CPU_TO_LE16(1); + + if (lkup_type == ICE_SW_LKUP_MAC || + lkup_type == ICE_SW_LKUP_MAC_VLAN || + lkup_type == ICE_SW_LKUP_ETHERTYPE || + lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + lkup_type == ICE_SW_LKUP_PROMISC || + lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_LAST) { + sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP); + } else if (lkup_type == ICE_SW_LKUP_VLAN) { + sw_buf->res_type = + CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); + } else { + status = ICE_ERR_PARAM; + goto ice_aq_alloc_free_vsi_list_exit; + } + + if (opc == ice_aqc_opc_free_res) + sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); + if (status) + goto ice_aq_alloc_free_vsi_list_exit; + + if (opc == ice_aqc_opc_alloc_res) { + vsi_ele = &sw_buf->elem[0]; + *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp); + } + +ice_aq_alloc_free_vsi_list_exit: + ice_free(hw, sw_buf); + return status; +} + +/** + * ice_aq_set_storm_ctrl - Sets storm control configuration + * @hw: pointer to the HW struct + * @bcast_thresh: represents the upper threshold for broadcast storm control + * @mcast_thresh: represents the upper threshold for multicast storm control + * @ctl_bitmask: storm control control knobs + * + * Sets the storm control configuration (0x0280) + */ +enum ice_status +ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, + u32 ctl_bitmask) +{ + struct ice_aqc_storm_cfg *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.storm_conf; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg); + + cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M); + cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M); + cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_storm_ctrl - gets storm control configuration + * @hw: pointer to the HW struct + * @bcast_thresh: represents the upper threshold for broadcast storm control + * @mcast_thresh: represents the upper threshold for multicast storm control + * @ctl_bitmask: storm control control knobs + * + * Gets the storm control configuration (0x0281) + */ +enum ice_status +ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, + u32 *ctl_bitmask) +{ + enum ice_status status; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf; + + if (bcast_thresh) + *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) & + ICE_AQ_THRESHOLD_M; + if (mcast_thresh) + *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) & + ICE_AQ_THRESHOLD_M; + if (ctl_bitmask) + *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl); + } + + return status; +} + +/** + * ice_aq_sw_rules - add/update/remove switch rules + * @hw: pointer to the HW struct + * @rule_list: pointer to switch rule population list + * @rule_list_sz: total size of the rule list in bytes + * @num_rules: number of switch rules in the rule_list + * @opc: switch rules population command type - pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware + */ +static enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (opc != ice_aqc_opc_add_sw_rules && + opc != ice_aqc_opc_update_sw_rules && + opc != ice_aqc_opc_remove_sw_rules) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, opc); + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + desc.params.sw_rules.num_rules_fltr_entry_index = + CPU_TO_LE16(num_rules); + return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); +} + +/** + * ice_aq_add_recipe - add switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: number of switch recipes in the list + * @cd: pointer to command details structure or NULL + * + * Add(0x0290) + */ +enum ice_status +ice_aq_add_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 num_recipes, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + u16 buf_size; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); + + cmd->num_sub_recipes = CPU_TO_LE16(num_recipes); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + buf_size = num_recipes * sizeof(*s_recipe_list); + + return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); +} + +/** + * ice_aq_get_recipe - get switch recipe + * @hw: pointer to the HW struct + * @s_recipe_list: pointer to switch rule population list + * @num_recipes: pointer to the number of recipes (input and output) + * @recipe_root: root recipe number of recipe(s) to retrieve + * @cd: pointer to command details structure or NULL + * + * Get(0x0292) + * + * On input, *num_recipes should equal the number of entries in s_recipe_list. + * On output, *num_recipes will equal the number of entries returned in + * s_recipe_list. + * + * The caller must supply enough space in s_recipe_list to hold all possible + * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. + */ +enum ice_status +ice_aq_get_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_get_recipe *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u16 buf_size; + + if (*num_recipes != ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + cmd = &desc.params.add_get_recipe; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); + + cmd->return_index = CPU_TO_LE16(recipe_root); + cmd->num_sub_recipes = 0; + + buf_size = *num_recipes * sizeof(*s_recipe_list); + + status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); + /* cppcheck-suppress constArgument */ + *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes); + + return status; +} + +/** + * ice_aq_map_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Recipe to profile association (0x0291) + */ +enum ice_status +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); + cmd->profile_id = CPU_TO_LE16(profile_id); + /* Set the recipe ID bit in the bitmask to let the device know which + * profile we are associating the recipe to + */ + ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc), + ICE_NONDMA_TO_NONDMA); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_recipe_to_profile - Map recipe to packet profile + * @hw: pointer to the HW struct + * @profile_id: package profile ID to associate the recipe with + * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @cd: pointer to command details structure or NULL + * Associate profile ID with given recipe (0x0293) + */ +enum ice_status +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd) +{ + struct ice_aqc_recipe_to_profile *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + cmd = &desc.params.recipe_to_profile; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); + cmd->profile_id = CPU_TO_LE16(profile_id); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) + ice_memcpy(r_bitmap, cmd->recipe_assoc, + sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA); + + return status; +} + +/** + * ice_alloc_recipe - add recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID returned as response to AQ call + */ +enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + enum ice_status status; + u16 buf_len; + + buf_len = sizeof(*sw_buf); + sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + sw_buf->num_elems = CPU_TO_LE16(1); + sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE << + ICE_AQC_RES_TYPE_S) | + ICE_AQC_RES_TYPE_FLAG_SHARED); + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); + ice_free(hw, sw_buf); + + return status; +} + +/* ice_init_port_info - Initialize port_info with switch configuration data + * @pi: pointer to port_info + * @vsi_port_num: VSI number or port number + * @type: Type of switch element (port or VSI) + * @swid: switch ID of the switch the element is attached to + * @pf_vf_num: PF or VF number + * @is_vf: true if the element is a VF, false otherwise + */ +static void +ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, + u16 swid, u16 pf_vf_num, bool is_vf) +{ + switch (type) { + case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: + pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); + pi->sw_id = swid; + pi->pf_vf_num = pf_vf_num; + pi->is_vf = is_vf; + pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; + pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; + break; + default: + ice_debug(pi->hw, ICE_DBG_SW, + "incorrect VSI/port type received\n"); + break; + } +} + +/* ice_get_initial_sw_cfg - Get initial port and default VSI data + * @hw: pointer to the hardware structure + */ +enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +{ + struct ice_aqc_get_sw_cfg_resp *rbuf; + enum ice_status status; + u8 num_total_ports; + u16 req_desc = 0; + u16 num_elems; + u8 j = 0; + u16 i; + + num_total_ports = 1; + + rbuf = (struct ice_aqc_get_sw_cfg_resp *) + ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN); + + if (!rbuf) + return ICE_ERR_NO_MEMORY; + + /* Multiple calls to ice_aq_get_sw_cfg may be required + * to get all the switch configuration information. The need + * for additional calls is indicated by ice_aq_get_sw_cfg + * writing a non-zero value in req_desc + */ + do { + status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, + &req_desc, &num_elems, NULL); + + if (status) + break; + + for (i = 0; i < num_elems; i++) { + struct ice_aqc_get_sw_cfg_resp_elem *ele; + u16 pf_vf_num, swid, vsi_port_num; + bool is_vf = false; + u8 res_type; + + ele = rbuf[i].elements; + vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) & + ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; + + pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) & + ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; + + swid = LE16_TO_CPU(ele->swid); + + if (LE16_TO_CPU(ele->pf_vf_num) & + ICE_AQC_GET_SW_CONF_RESP_IS_VF) + is_vf = true; + + res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >> + ICE_AQC_GET_SW_CONF_RESP_TYPE_S); + + switch (res_type) { + case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: + case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT: + if (j == num_total_ports) { + ice_debug(hw, ICE_DBG_SW, + "more ports than expected\n"); + status = ICE_ERR_CFG; + goto out; + } + ice_init_port_info(hw->port_info, + vsi_port_num, res_type, swid, + pf_vf_num, is_vf); + j++; + break; + default: + break; + } + } + } while (req_desc && !status); + +out: + ice_free(hw, (void *)rbuf); + return status; +} + +/** + * ice_fill_sw_info - Helper function to populate lb_en and lan_en + * @hw: pointer to the hardware structure + * @fi: filter info structure to fill/update + * + * This helper function populates the lb_en and lan_en elements of the provided + * ice_fltr_info struct using the switch's type and characteristics of the + * switch rule being configured. + */ +static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) +{ + fi->lb_en = false; + fi->lan_en = false; + + if ((fi->flag & ICE_FLTR_RX) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST) && + fi->lkup_type == ICE_SW_LKUP_LAST) + fi->lan_en = true; + + if ((fi->flag & ICE_FLTR_TX) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + /* Setting LB for prune actions will result in replicated + * packets to the internal switch that will be dropped. + */ + if (fi->lkup_type != ICE_SW_LKUP_VLAN) + fi->lb_en = true; + + /* Set lan_en to TRUE if + * 1. The switch is a VEB AND + * 2 + * 2.1 The lookup is a directional lookup like ethertype, + * promiscuous, ethertype-MAC, promiscuous-VLAN + * and default-port OR + * 2.2 The lookup is VLAN, OR + * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR + * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. + * + * OR + * + * The switch is a VEPA. + * + * In all other cases, the LAN enable has to be set to false. + */ + if (hw->evb_veb) { + if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || + fi->lkup_type == ICE_SW_LKUP_PROMISC || + fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + fi->lkup_type == ICE_SW_LKUP_DFLT || + fi->lkup_type == ICE_SW_LKUP_VLAN || + (fi->lkup_type == ICE_SW_LKUP_MAC && + !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) || + (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && + !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr))) + fi->lan_en = true; + } else { + fi->lan_en = true; + } + } +} + +/** + * ice_fill_sw_rule - Helper function to fill switch rule structure + * @hw: pointer to the hardware structure + * @f_info: entry containing packet forwarding information + * @s_rule: switch rule structure to be filled in based on mac_entry + * @opc: switch rules population command type - pass in the command opcode + */ +static void +ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, + struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) +{ + u16 vlan_id = ICE_MAX_VLAN_ID + 1; + void *daddr = NULL; + u16 eth_hdr_sz; + u8 *eth_hdr; + u32 act = 0; + __be16 *off; + u8 q_rgn; + + if (opc == ice_aqc_opc_remove_sw_rules) { + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + CPU_TO_LE16(f_info->fltr_rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + return; + } + + eth_hdr_sz = sizeof(dummy_eth_header); + eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; + + /* initialize the ether header with a dummy header */ + ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA); + ice_fill_sw_info(hw, f_info); + + switch (f_info->fltr_act) { + case ICE_FWD_TO_VSI: + act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & + ICE_SINGLE_ACT_VSI_ID_M; + if (f_info->lkup_type != ICE_SW_LKUP_VLAN) + act |= ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_VSI_LIST: + act |= ICE_SINGLE_ACT_VSI_LIST; + act |= (f_info->fwd_id.vsi_list_id << + ICE_SINGLE_ACT_VSI_LIST_ID_S) & + ICE_SINGLE_ACT_VSI_LIST_ID_M; + if (f_info->lkup_type != ICE_SW_LKUP_VLAN) + act |= ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_Q: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_QGRP: + q_rgn = f_info->qgrp_size > 0 ? + (u8)ice_ilog2(f_info->qgrp_size) : 0; + act |= ICE_SINGLE_ACT_TO_Q; + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & + ICE_SINGLE_ACT_Q_REGION_M; + break; + default: + return; + } + + if (f_info->lb_en) + act |= ICE_SINGLE_ACT_LB_ENABLE; + if (f_info->lan_en) + act |= ICE_SINGLE_ACT_LAN_ENABLE; + + switch (f_info->lkup_type) { + case ICE_SW_LKUP_MAC: + daddr = f_info->l_data.mac.mac_addr; + break; + case ICE_SW_LKUP_VLAN: + vlan_id = f_info->l_data.vlan.vlan_id; + if (f_info->fltr_act == ICE_FWD_TO_VSI || + f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { + act |= ICE_SINGLE_ACT_PRUNE; + act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; + } + break; + case ICE_SW_LKUP_ETHERTYPE_MAC: + daddr = f_info->l_data.ethertype_mac.mac_addr; + /* fall-through */ + case ICE_SW_LKUP_ETHERTYPE: + off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); + *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype); + break; + case ICE_SW_LKUP_MAC_VLAN: + daddr = f_info->l_data.mac_vlan.mac_addr; + vlan_id = f_info->l_data.mac_vlan.vlan_id; + break; + case ICE_SW_LKUP_PROMISC_VLAN: + vlan_id = f_info->l_data.mac_vlan.vlan_id; + /* fall-through */ + case ICE_SW_LKUP_PROMISC: + daddr = f_info->l_data.mac_vlan.mac_addr; + break; + default: + break; + } + + s_rule->type = (f_info->flag & ICE_FLTR_RX) ? + CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) : + CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX); + + /* Recipe set depending on lookup type */ + s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type); + s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src); + s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act); + + if (daddr) + ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN, + ICE_NONDMA_TO_NONDMA); + + if (!(vlan_id > ICE_MAX_VLAN_ID)) { + off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); + *off = CPU_TO_BE16(vlan_id); + } + + /* Create the switch rule with the final dummy Ethernet header */ + if (opc != ice_aqc_opc_update_sw_rules) + s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz); +} + +/** + * ice_add_marker_act + * @hw: pointer to the hardware structure + * @m_ent: the management entry for which sw marker needs to be added + * @sw_marker: sw marker to tag the Rx descriptor with + * @l_id: large action resource ID + * + * Create a large action to hold software marker and update the switch rule + * entry pointed by m_ent with newly created large action + */ +static enum ice_status +ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, + u16 sw_marker, u16 l_id) +{ + struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; + /* For software marker we need 3 large actions + * 1. FWD action: FWD TO VSI or VSI LIST + * 2. GENERIC VALUE action to hold the profile ID + * 3. GENERIC VALUE action to hold the software marker ID + */ + const u16 num_lg_acts = 3; + enum ice_status status; + u16 lg_act_size; + u16 rules_size; + u32 act; + u16 id; + + if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + /* Create two back-to-back switch rules and submit them to the HW using + * one memory buffer: + * 1. Large Action + * 2. Look up Tx Rx + */ + lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); + rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size); + if (!lg_act) + return ICE_ERR_NO_MEMORY; + + rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); + + /* Fill in the first switch rule i.e. large action */ + lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT); + lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id); + lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts); + + /* First action VSI forwarding or VSI list forwarding depending on how + * many VSIs + */ + id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : + m_ent->fltr_info.fwd_id.hw_vsi_id; + + act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; + act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & + ICE_LG_ACT_VSI_LIST_ID_M; + if (m_ent->vsi_count > 1) + act |= ICE_LG_ACT_VSI_LIST; + lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act); + + /* Second action descriptor type */ + act = ICE_LG_ACT_GENERIC; + + act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; + lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act); + + act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << + ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; + + /* Third action Marker value */ + act |= ICE_LG_ACT_GENERIC; + act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & + ICE_LG_ACT_GENERIC_VALUE_M; + + lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act); + + /* call the fill switch rule to fill the lookup Tx Rx structure */ + ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, + ice_aqc_opc_update_sw_rules); + + /* Update the action to point to the large action ID */ + rx_tx->pdata.lkup_tx_rx.act = + CPU_TO_LE32(ICE_SINGLE_ACT_PTR | + ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & + ICE_SINGLE_ACT_PTR_VAL_M)); + + /* Use the filter rule ID of the previously created rule with single + * act. Once the update happens, hardware will treat this as large + * action + */ + rx_tx->pdata.lkup_tx_rx.index = + CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id); + + status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, + ice_aqc_opc_update_sw_rules, NULL); + if (!status) { + m_ent->lg_act_idx = l_id; + m_ent->sw_marker_id = sw_marker; + } + + ice_free(hw, lg_act); + return status; +} + +/** + * ice_add_counter_act - add/update filter rule with counter action + * @hw: pointer to the hardware structure + * @m_ent: the management entry for which counter needs to be added + * @counter_id: VLAN counter ID returned as part of allocate resource + * @l_id: large action resource ID + */ +static enum ice_status +ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, + u16 counter_id, u16 l_id) +{ + struct ice_aqc_sw_rules_elem *lg_act; + struct ice_aqc_sw_rules_elem *rx_tx; + enum ice_status status; + /* 2 actions will be added while adding a large action counter */ + const int num_acts = 2; + u16 lg_act_size; + u16 rules_size; + u16 f_rule_id; + u32 act; + u16 id; + + if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + /* Create two back-to-back switch rules and submit them to the HW using + * one memory buffer: + * 1. Large Action + * 2. Look up Tx Rx + */ + lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts); + rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, + rules_size); + if (!lg_act) + return ICE_ERR_NO_MEMORY; + + rx_tx = (struct ice_aqc_sw_rules_elem *) + ((u8 *)lg_act + lg_act_size); + + /* Fill in the first switch rule i.e. large action */ + lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT); + lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id); + lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts); + + /* First action VSI forwarding or VSI list forwarding depending on how + * many VSIs + */ + id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : + m_ent->fltr_info.fwd_id.hw_vsi_id; + + act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; + act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & + ICE_LG_ACT_VSI_LIST_ID_M; + if (m_ent->vsi_count > 1) + act |= ICE_LG_ACT_VSI_LIST; + lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act); + + /* Second action counter ID */ + act = ICE_LG_ACT_STAT_COUNT; + act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) & + ICE_LG_ACT_STAT_COUNT_M; + lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act); + + /* call the fill switch rule to fill the lookup Tx Rx structure */ + ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, + ice_aqc_opc_update_sw_rules); + + act = ICE_SINGLE_ACT_PTR; + act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M; + rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act); + + /* Use the filter rule ID of the previously created rule with single + * act. Once the update happens, hardware will treat this as large + * action + */ + f_rule_id = m_ent->fltr_info.fltr_rule_id; + rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id); + + status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, + ice_aqc_opc_update_sw_rules, NULL); + if (!status) { + m_ent->lg_act_idx = l_id; + m_ent->counter_index = counter_id; + } + + ice_free(hw, lg_act); + return status; +} + +/** + * ice_create_vsi_list_map + * @hw: pointer to the hardware structure + * @vsi_handle_arr: array of VSI handles to set in the VSI mapping + * @num_vsi: number of VSI handles in the array + * @vsi_list_id: VSI list ID generated as part of allocate resource + * + * Helper function to create a new entry of VSI list ID to VSI mapping + * using the given VSI list ID + */ +static struct ice_vsi_list_map_info * +ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, + u16 vsi_list_id) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_vsi_list_map_info *v_map; + int i; + + v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1, + sizeof(*v_map)); + if (!v_map) + return NULL; + + v_map->vsi_list_id = vsi_list_id; + v_map->ref_cnt = 1; + for (i = 0; i < num_vsi; i++) + ice_set_bit(vsi_handle_arr[i], v_map->vsi_map); + + LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head); + return v_map; +} + +/** + * ice_update_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array + * @vsi_list_id: VSI list ID generated as part of allocate resource + * @remove: Boolean value to indicate if this is a remove action + * @opc: switch rules population command type - pass in the command opcode + * @lkup_type: lookup type of the filter + * + * Call AQ command to add a new switch rule or update existing switch rule + * using the given VSI list ID + */ +static enum ice_status +ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, + u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + u16 rule_type; + int i; + + if (!num_vsi) + return ICE_ERR_PARAM; + + if (lkup_type == ICE_SW_LKUP_MAC || + lkup_type == ICE_SW_LKUP_MAC_VLAN || + lkup_type == ICE_SW_LKUP_ETHERTYPE || + lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + lkup_type == ICE_SW_LKUP_PROMISC || + lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_LAST) + rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : + ICE_AQC_SW_RULES_T_VSI_LIST_SET; + else if (lkup_type == ICE_SW_LKUP_VLAN) + rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : + ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; + else + return ICE_ERR_PARAM; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); + s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + for (i = 0; i < num_vsi; i++) { + if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { + status = ICE_ERR_PARAM; + goto exit; + } + /* AQ call requires hw_vsi_id(s) */ + s_rule->pdata.vsi_list.vsi[i] = + CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); + } + + s_rule->type = CPU_TO_LE16(rule_type); + s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi); + s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id); + + status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); + +exit: + ice_free(hw, s_rule); + return status; +} + +/** + * ice_create_vsi_list_rule - Creates and populates a VSI list rule + * @hw: pointer to the HW struct + * @vsi_handle_arr: array of VSI handles to form a VSI list + * @num_vsi: number of VSI handles in the array + * @vsi_list_id: stores the ID of the VSI list to be created + * @lkup_type: switch rule filter's lookup type + */ +static enum ice_status +ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, + u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) +{ + enum ice_status status; + + status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, + ice_aqc_opc_alloc_res); + if (status) + return status; + + /* Update the newly created VSI list to include the specified VSIs */ + return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, + *vsi_list_id, false, + ice_aqc_opc_add_sw_rules, lkup_type); +} + +/** + * ice_create_pkt_fwd_rule + * @hw: pointer to the hardware structure + * @recp_list: corresponding filter management list + * @f_entry: entry containing packet forwarding information + * + * Create switch rule with given filter information and add an entry + * to the corresponding filter management list to track this switch rule + * and VSI mapping + */ +static enum ice_status +ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + + s_rule = (struct ice_aqc_sw_rules_elem *) + ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + fm_entry = (struct ice_fltr_mgmt_list_entry *) + ice_malloc(hw, sizeof(*fm_entry)); + if (!fm_entry) { + status = ICE_ERR_NO_MEMORY; + goto ice_create_pkt_fwd_rule_exit; + } + + fm_entry->fltr_info = f_entry->fltr_info; + + /* Initialize all the fields for the management entry */ + fm_entry->vsi_count = 1; + fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; + fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; + fm_entry->counter_index = ICE_INVAL_COUNTER_ID; + + ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, + ice_aqc_opc_add_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + ice_aqc_opc_add_sw_rules, NULL); + if (status) { + ice_free(hw, fm_entry); + goto ice_create_pkt_fwd_rule_exit; + } + + f_entry->fltr_info.fltr_rule_id = + LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); + fm_entry->fltr_info.fltr_rule_id = + LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); + + /* The book keeping entries will get removed when base driver + * calls remove filter AQ command + */ + LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules); + +ice_create_pkt_fwd_rule_exit: + ice_free(hw, s_rule); + return status; +} + +/** + * ice_update_pkt_fwd_rule + * @hw: pointer to the hardware structure + * @f_info: filter information for switch rule + * + * Call AQ command to update a previously created switch rule with a + * VSI list ID + */ +static enum ice_status +ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + + s_rule = (struct ice_aqc_sw_rules_elem *) + ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); + + s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id); + + /* Update switch rule with new rule set to forward VSI list */ + status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + + ice_free(hw, s_rule); + return status; +} + +/** + * ice_update_sw_rule_bridge_mode + * @hw: pointer to the HW struct + * + * Updates unicast switch filter rules based on VEB/VEPA mode + */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = ICE_SUCCESS; + struct LIST_HEAD_TYPE *rule_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; + + ice_acquire_lock(rule_lock); + LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, + list_entry) { + struct ice_fltr_info *fi = &fm_entry->fltr_info; + u8 *addr = fi->l_data.mac.mac_addr; + + /* Update unicast Tx rules to reflect the selected + * VEB/VEPA mode + */ + if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + status = ice_update_pkt_fwd_rule(hw, fi); + if (status) + break; + } + } + + ice_release_lock(rule_lock); + + return status; +} + +/** + * ice_add_update_vsi_list + * @hw: pointer to the hardware structure + * @m_entry: pointer to current filter management list entry + * @cur_fltr: filter information from the book keeping entry + * @new_fltr: filter information with the new VSI to be added + * + * Call AQ command to add or update previously created VSI list with new VSI. + * + * Helper function to do book keeping associated with adding filter information + * The algorithm to do the book keeping is described below : + * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) + * if only one VSI has been added till now + * Allocate a new VSI list and add two VSIs + * to this list using switch rule command + * Update the previously created switch rule with the + * newly created VSI list ID + * if a VSI list was previously created + * Add the new VSI to the previously created VSI list set + * using the update switch rule command + */ +static enum ice_status +ice_add_update_vsi_list(struct ice_hw *hw, + struct ice_fltr_mgmt_list_entry *m_entry, + struct ice_fltr_info *cur_fltr, + struct ice_fltr_info *new_fltr) +{ + enum ice_status status = ICE_SUCCESS; + u16 vsi_list_id = 0; + + if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || + cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) + return ICE_ERR_NOT_IMPL; + + if ((new_fltr->fltr_act == ICE_FWD_TO_Q || + new_fltr->fltr_act == ICE_FWD_TO_QGRP) && + (cur_fltr->fltr_act == ICE_FWD_TO_VSI || + cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) + return ICE_ERR_NOT_IMPL; + + if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { + /* Only one entry existed in the mapping and it was not already + * a part of a VSI list. So, create a VSI list with the old and + * new VSIs. + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ + if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->vsi_handle; + vsi_handle_arr[1] = new_fltr->vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, + new_fltr->lkup_type); + if (status) + return status; + + tmp_fltr = *new_fltr; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + /* Update the previous switch rule of "MAC forward to VSI" to + * "MAC fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + return status; + + cur_fltr->fwd_id.vsi_list_id = vsi_list_id; + cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; + m_entry->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + + /* If this entry was large action then the large action needs + * to be updated to point to FWD to VSI list + */ + if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) + status = + ice_add_marker_act(hw, m_entry, + m_entry->sw_marker_id, + m_entry->lg_act_idx); + } else { + u16 vsi_handle = new_fltr->vsi_handle; + enum ice_adminq_opc opcode; + + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) + return ICE_SUCCESS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in + */ + vsi_list_id = cur_fltr->fwd_id.vsi_list_id; + opcode = ice_aqc_opc_update_sw_rules; + + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, opcode, + new_fltr->lkup_type); + /* update VSI list mapping info with new VSI ID */ + if (!status) + ice_set_bit(vsi_handle, + m_entry->vsi_list_info->vsi_map); + } + if (!status) + m_entry->vsi_count++; + return status; +} + +/** + * ice_find_rule_entry - Search a rule entry + * @list_head: head of rule list + * @f_info: rule information + * + * Helper function to search for a given rule entry + * Returns pointer to entry storing the rule if found + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head, + struct ice_fltr_info *f_info) +{ + struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; + + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry, + list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->flag == list_itr->fltr_info.flag) { + ret = list_itr; + break; + } + } + return ret; +} + +/** + * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 + * @recp_list: VSI lists needs to be searched + * @vsi_handle: VSI handle to be found in VSI list + * @vsi_list_id: VSI list ID found containing vsi_handle + * + * Helper function to search a VSI list with single entry containing given VSI + * handle element. This can be extended further to search VSI list with more + * than 1 vsi_count. Returns pointer to VSI list entry if found. + */ +static struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, + u16 *vsi_list_id) +{ + struct ice_vsi_list_map_info *map_info = NULL; + struct LIST_HEAD_TYPE *list_head; + + list_head = &recp_list->filt_rules; + if (recp_list->adv_rule) { + struct ice_adv_fltr_mgmt_list_entry *list_itr; + + LIST_FOR_EACH_ENTRY(list_itr, list_head, + ice_adv_fltr_mgmt_list_entry, + list_entry) { + if (list_itr->vsi_list_info) { + map_info = list_itr->vsi_list_info; + if (ice_is_bit_set(map_info->vsi_map, + vsi_handle)) { + *vsi_list_id = map_info->vsi_list_id; + return map_info; + } + } + } + } else { + struct ice_fltr_mgmt_list_entry *list_itr; + + LIST_FOR_EACH_ENTRY(list_itr, list_head, + ice_fltr_mgmt_list_entry, + list_entry) { + if (list_itr->vsi_count == 1 && + list_itr->vsi_list_info) { + map_info = list_itr->vsi_list_info; + if (ice_is_bit_set(map_info->vsi_map, + vsi_handle)) { + *vsi_list_id = map_info->vsi_list_id; + return map_info; + } + } + } + } + return NULL; +} + +/** + * ice_add_rule_internal - add rule for a given lookup type + * @hw: pointer to the hardware structure + * @recp_list: recipe list for which rule has to be added + * @lport: logic port number on which function add rule + * @f_entry: structure containing MAC forwarding information + * + * Adds or updates the rule lists for a given recipe + */ +static enum ice_status +ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, + u8 lport, struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_info *new_fltr, *cur_fltr; + struct ice_fltr_mgmt_list_entry *m_entry; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + + /* Load the hw_vsi_id only if the fwd action is fwd to VSI */ + if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI) + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &recp_list->filt_rule_lock; + + ice_acquire_lock(rule_lock); + new_fltr = &f_entry->fltr_info; + if (new_fltr->flag & ICE_FLTR_RX) + new_fltr->src = lport; + else if (new_fltr->flag & ICE_FLTR_TX) + new_fltr->src = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); + if (!m_entry) { + status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry); + goto exit_add_rule_internal; + } + + cur_fltr = &m_entry->fltr_info; + status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); + +exit_add_rule_internal: + ice_release_lock(rule_lock); + return status; +} + +/** + * ice_remove_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_list_id: VSI list ID generated as part of allocate resource + * @lkup_type: switch rule filter lookup type + * + * The VSI list should be emptied before this function is called to remove the + * VSI list. + */ +static enum ice_status +ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id); + + /* Free the vsi_list resource that we allocated. It is assumed that the + * list is empty at this point. + */ + status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, + ice_aqc_opc_free_res); + + ice_free(hw, s_rule); + return status; +} + +/** + * ice_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_fltr_mgmt_list_entry *fm_list) +{ + enum ice_sw_lkup_type lkup_type; + enum ice_status status = ICE_SUCCESS; + u16 vsi_list_id; + + if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = fm_list->fltr_info.lkup_type; + vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + + if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { + struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + u16 rem_vsi_handle; + + rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + tmp_fltr_info.vsi_handle = rem_vsi_handle; + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr_info.fwd_id.hw_vsi_id, status); + return status; + } + + fm_list->fltr_info = tmp_fltr_info; + } + + if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || + (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + LIST_DEL(&vsi_list_info->list_entry); + ice_free(hw, vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_remove_rule_internal - Remove a filter rule of a given type + * + * @hw: pointer to the hardware structure + * @recp_list: recipe list for which the rule needs to removed + * @f_entry: rule entry containing filter information + */ +static enum ice_status +ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_mgmt_list_entry *list_elem; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + bool remove_rule = false; + u16 vsi_handle; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + + rule_lock = &recp_list->filt_rule_lock; + ice_acquire_lock(rule_lock); + list_elem = ice_find_rule_entry(&recp_list->filt_rules, + &f_entry->fltr_info); + if (!list_elem) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + + if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (!list_elem->vsi_list_info) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } else if (list_elem->vsi_list_info->ref_cnt > 1) { + /* a ref_cnt > 1 indicates that the vsi_list is being + * shared by multiple rules. Decrement the ref_cnt and + * remove this rule, but do not modify the list, as it + * is in-use by other rules. + */ + list_elem->vsi_list_info->ref_cnt--; + remove_rule = true; + } else { + /* a ref_cnt of 1 indicates the vsi_list is only used + * by one rule. However, the original removal request is only + * for a single VSI. Update the vsi_list first, and only + * remove the rule if there are no further VSIs in this list. + */ + vsi_handle = f_entry->fltr_info.vsi_handle; + status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) + goto exit; + /* if VSI count goes to zero after updating the VSI list */ + if (list_elem->vsi_count == 0) + remove_rule = true; + } + + if (remove_rule) { + /* Remove the lookup rule */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = (struct ice_aqc_sw_rules_elem *) + ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE); + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto exit; + } + + ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, + ice_aqc_opc_remove_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_remove_sw_rules, NULL); + + /* Remove a book keeping from the list */ + ice_free(hw, s_rule); + + if (status) + goto exit; + + LIST_DEL(&list_elem->list_entry); + ice_free(hw, list_elem); + } +exit: + ice_release_lock(rule_lock); + return status; +} + +/** + * ice_aq_get_res_alloc - get allocated resources + * @hw: pointer to the HW struct + * @num_entries: pointer to u16 to store the number of resource entries returned + * @buf: pointer to user-supplied buffer + * @buf_size: size of buff + * @cd: pointer to command details structure or NULL + * + * The user-supplied buffer must be large enough to store the resource + * information for all resource types. Each resource type is an + * ice_aqc_get_res_resp_data_elem structure. + */ +enum ice_status +ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aqc_get_res_alloc *resp; + enum ice_status status; + struct ice_aq_desc desc; + + if (!buf) + return ICE_ERR_BAD_PTR; + + if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN) + return ICE_ERR_INVAL_SIZE; + + resp = &desc.params.get_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + + if (!status && num_entries) + *num_entries = LE16_TO_CPU(resp->resp_elem_num); + + return status; +} + +/** + * ice_aq_get_res_descs - get allocated resource descriptors + * @hw: pointer to the hardware structure + * @num_entries: number of resource entries in buffer + * @buf: Indirect buffer to hold data parameters and response + * @buf_size: size of buffer for indirect commands + * @res_type: resource type + * @res_shared: is resource shared + * @desc_id: input - first desc ID to start; output - next desc ID + * @cd: pointer to command details structure or NULL + */ +enum ice_status +ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, + struct ice_aqc_get_allocd_res_desc_resp *buf, + u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_allocd_res_desc *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + cmd = &desc.params.get_res_desc; + + if (!buf) + return ICE_ERR_PARAM; + + if (buf_size != (num_entries * sizeof(*buf))) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc); + + cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | (res_shared ? + ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); + cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) + *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc); + + return status; +} + +/** + * ice_add_mac_rule - Add a MAC address based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * @sw: pointer to switch info struct for which function add rule + * @lport: logic port number on which function add rule + * + * IMPORTANT: When the ucast_shared flag is set to false and m_list has + * multiple unicast addresses, the function assumes that all the + * addresses are unique in a given add_mac call. It doesn't + * check for duplicates in this case, removing duplicates from a given + * list should be taken care of in the caller of this function. + */ +static enum ice_status +ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, + struct ice_switch_info *sw, u8 lport) +{ + struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC]; + struct ice_aqc_sw_rules_elem *s_rule, *r_iter; + struct ice_fltr_list_entry *m_list_itr; + struct LIST_HEAD_TYPE *rule_head; + u16 total_elem_left, s_rule_size; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + u16 num_unicast = 0; + u8 elem_sent; + + s_rule = NULL; + rule_lock = &recp_list->filt_rule_lock; + rule_head = &recp_list->filt_rules; + + LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, + list_entry) { + u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; + u16 hw_vsi_id; + + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + vsi_handle = m_list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + /* update the src in case it is VSI num */ + if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) + return ICE_ERR_PARAM; + m_list_itr->fltr_info.src = hw_vsi_id; + if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || + IS_ZERO_ETHER_ADDR(add)) + return ICE_ERR_PARAM; + if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) { + /* Don't overwrite the unicast address */ + ice_acquire_lock(rule_lock); + if (ice_find_rule_entry(rule_head, + &m_list_itr->fltr_info)) { + ice_release_lock(rule_lock); + return ICE_ERR_ALREADY_EXISTS; + } + ice_release_lock(rule_lock); + num_unicast++; + } else if (IS_MULTICAST_ETHER_ADDR(add) || + (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) { + m_list_itr->status = + ice_add_rule_internal(hw, recp_list, lport, + m_list_itr); + if (m_list_itr->status) + return m_list_itr->status; + } + } + + ice_acquire_lock(rule_lock); + /* Exit if no suitable entries were found for adding bulk switch rule */ + if (!num_unicast) { + status = ICE_SUCCESS; + goto ice_add_mac_exit; + } + + /* Allocate switch rule buffer for the bulk update for unicast */ + s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; + s_rule = (struct ice_aqc_sw_rules_elem *) + ice_calloc(hw, num_unicast, s_rule_size); + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } + + r_iter = s_rule; + LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, + list_entry) { + struct ice_fltr_info *f_info = &m_list_itr->fltr_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; + + if (IS_UNICAST_ETHER_ADDR(mac_addr)) { + ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, + ice_aqc_opc_add_sw_rules); + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + } + + /* Call AQ bulk switch rule update for all unicast addresses */ + r_iter = s_rule; + /* Call AQ switch rule in AQ_MAX chunk */ + for (total_elem_left = num_unicast; total_elem_left > 0; + total_elem_left -= elem_sent) { + struct ice_aqc_sw_rules_elem *entry = r_iter; + + elem_sent = MIN_T(u8, total_elem_left, + (ICE_AQ_MAX_BUF_LEN / s_rule_size)); + status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, + elem_sent, ice_aqc_opc_add_sw_rules, + NULL); + if (status) + goto ice_add_mac_exit; + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + (elem_sent * s_rule_size)); + } + + /* Fill up rule ID based on the value returned from FW */ + r_iter = s_rule; + LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, + list_entry) { + struct ice_fltr_info *f_info = &m_list_itr->fltr_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; + struct ice_fltr_mgmt_list_entry *fm_entry; + + if (IS_UNICAST_ETHER_ADDR(mac_addr)) { + f_info->fltr_rule_id = + LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index); + f_info->fltr_act = ICE_FWD_TO_VSI; + /* Create an entry to track this MAC address */ + fm_entry = (struct ice_fltr_mgmt_list_entry *) + ice_malloc(hw, sizeof(*fm_entry)); + if (!fm_entry) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } + fm_entry->fltr_info = *f_info; + fm_entry->vsi_count = 1; + /* The book keeping entries will get removed when + * base driver calls remove filter AQ command + */ + + LIST_ADD(&fm_entry->list_entry, rule_head); + r_iter = (struct ice_aqc_sw_rules_elem *) + ((u8 *)r_iter + s_rule_size); + } + } + +ice_add_mac_exit: + ice_release_lock(rule_lock); + if (s_rule) + ice_free(hw, s_rule); + return status; +} + +/** + * ice_add_mac - Add a MAC address based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * + * Function add MAC rule for logical port from HW struct + */ +enum ice_status +ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list) +{ + if (!m_list || !hw) + return ICE_ERR_PARAM; + + return ice_add_mac_rule(hw, m_list, hw->switch_info, + hw->port_info->lport); +} + +/** + * ice_add_vlan_internal - Add one VLAN based filter rule + * @hw: pointer to the hardware structure + * @recp_list: recipe list for which rule has to be added + * @f_entry: filter entry containing one VLAN information + */ +static enum ice_status +ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_fltr_mgmt_list_entry *v_list_itr; + struct ice_fltr_info *new_fltr, *cur_fltr; + enum ice_sw_lkup_type lkup_type; + u16 vsi_list_id = 0, vsi_handle; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + + if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) + return ICE_ERR_PARAM; + + f_entry->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); + new_fltr = &f_entry->fltr_info; + + /* VLAN ID should only be 12 bits */ + if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) + return ICE_ERR_PARAM; + + if (new_fltr->src_id != ICE_SRC_ID_VSI) + return ICE_ERR_PARAM; + + new_fltr->src = new_fltr->fwd_id.hw_vsi_id; + lkup_type = new_fltr->lkup_type; + vsi_handle = new_fltr->vsi_handle; + rule_lock = &recp_list->filt_rule_lock; + ice_acquire_lock(rule_lock); + v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); + if (!v_list_itr) { + struct ice_vsi_list_map_info *map_info = NULL; + + if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { + /* All VLAN pruning rules use a VSI list. Check if + * there is already a VSI list containing VSI that we + * want to add. If found, use the same vsi_list_id for + * this new VLAN rule or else create a new list. + */ + map_info = ice_find_vsi_list_entry(recp_list, + vsi_handle, + &vsi_list_id); + if (!map_info) { + status = ice_create_vsi_list_rule(hw, + &vsi_handle, + 1, + &vsi_list_id, + lkup_type); + if (status) + goto exit; + } + /* Convert the action to forwarding to a VSI list. */ + new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; + new_fltr->fwd_id.vsi_list_id = vsi_list_id; + } + + status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry); + if (!status) { + v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, + new_fltr); + if (!v_list_itr) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + /* reuse VSI list for new rule and increment ref_cnt */ + if (map_info) { + v_list_itr->vsi_list_info = map_info; + map_info->ref_cnt++; + } else { + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle, + 1, vsi_list_id); + } + } + } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { + /* Update existing VSI list to add new VSI ID only if it used + * by one VLAN rule. + */ + cur_fltr = &v_list_itr->fltr_info; + status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, + new_fltr); + } else { + /* If VLAN rule exists and VSI list being used by this rule is + * referenced by more than 1 VLAN rule. Then create a new VSI + * list appending previous VSI with new VSI and update existing + * VLAN rule to point to new VSI list ID + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + u16 cur_handle; + + /* Current implementation only supports reusing VSI list with + * one VSI count. We should never hit below condition + */ + if (v_list_itr->vsi_count > 1 && + v_list_itr->vsi_list_info->ref_cnt > 1) { + ice_debug(hw, ICE_DBG_SW, + "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); + status = ICE_ERR_CFG; + goto exit; + } + + cur_handle = + ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map, + ICE_MAX_VSI); + + /* A rule already exists with the new VSI being added */ + if (cur_handle == vsi_handle) { + status = ICE_ERR_ALREADY_EXISTS; + goto exit; + } + + vsi_handle_arr[0] = cur_handle; + vsi_handle_arr[1] = vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, lkup_type); + if (status) + goto exit; + + tmp_fltr = v_list_itr->fltr_info; + tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + /* Update the previous switch rule to a new VSI list which + * includes current VSI that is requested + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + goto exit; + + /* before overriding VSI list map info. decrement ref_cnt of + * previous VSI list + */ + v_list_itr->vsi_list_info->ref_cnt--; + + /* now update to newly created list */ + v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; + v_list_itr->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + v_list_itr->vsi_count++; + } + +exit: + ice_release_lock(rule_lock); + return status; +} + +/** + * ice_add_vlan_rule - Add VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN entries and forwarding information + * @sw: pointer to switch info struct for which function add rule + */ +static enum ice_status +ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list, + struct ice_switch_info *sw) +{ + struct ice_fltr_list_entry *v_list_itr; + struct ice_sw_recipe *recp_list; + + recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN]; + LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry, + list_entry) { + if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->status = ice_add_vlan_internal(hw, recp_list, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_add_vlan - Add a VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN and forwarding information + * + * Function add VLAN rule for logical port from HW struct + */ +enum ice_status +ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list) +{ + if (!v_list || !hw) + return ICE_ERR_PARAM; + + return ice_add_vlan_rule(hw, v_list, hw->switch_info); +} + +/** + * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule + * @hw: pointer to the hardware structure + * @mv_list: list of MAC and VLAN filters + * @sw: pointer to switch info struct for which function add rule + * @lport: logic port number on which function add rule + * + * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN + * pruning bits enabled, then it is the responsibility of the caller to make + * sure to add a VLAN only filter on the same VSI. Packets belonging to that + * VLAN won't be received on that VSI otherwise. + */ +static enum ice_status +ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list, + struct ice_switch_info *sw, u8 lport) +{ + struct ice_fltr_list_entry *mv_list_itr; + struct ice_sw_recipe *recp_list; + + if (!mv_list || !hw) + return ICE_ERR_PARAM; + + recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN]; + LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry, + list_entry) { + enum ice_sw_lkup_type l_type = + mv_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_MAC_VLAN) + return ICE_ERR_PARAM; + mv_list_itr->fltr_info.flag = ICE_FLTR_TX; + mv_list_itr->status = + ice_add_rule_internal(hw, recp_list, lport, + mv_list_itr); + if (mv_list_itr->status) + return mv_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_add_mac_vlan - Add a MAC VLAN address based filter rule + * @hw: pointer to the hardware structure + * @mv_list: list of MAC VLAN addresses and forwarding information + * + * Function add MAC VLAN rule for logical port from HW struct + */ +enum ice_status +ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list) +{ + if (!mv_list || !hw) + return ICE_ERR_PARAM; + + return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info, + hw->port_info->lport); +} + +/** + * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule + * @hw: pointer to the hardware structure + * @em_list: list of ether type MAC filter, MAC is optional + * @sw: pointer to switch info struct for which function add rule + * @lport: logic port number on which function add rule + * + * This function requires the caller to populate the entries in + * the filter list with the necessary fields (including flags to + * indicate Tx or Rx rules). + */ +static enum ice_status +ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list, + struct ice_switch_info *sw, u8 lport) +{ + struct ice_fltr_list_entry *em_list_itr; + + LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry, + list_entry) { + struct ice_sw_recipe *recp_list; + enum ice_sw_lkup_type l_type; + + l_type = em_list_itr->fltr_info.lkup_type; + recp_list = &sw->recp_list[l_type]; + + if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && + l_type != ICE_SW_LKUP_ETHERTYPE) + return ICE_ERR_PARAM; + + em_list_itr->status = ice_add_rule_internal(hw, recp_list, + lport, + em_list_itr); + if (em_list_itr->status) + return em_list_itr->status; + } + return ICE_SUCCESS; +} + +enum ice_status +/** + * ice_add_eth_mac - Add a ethertype based filter rule + * @hw: pointer to the hardware structure + * @em_list: list of ethertype and forwarding information + * + * Function add ethertype rule for logical port from HW struct + */ +ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list) +{ + if (!em_list || !hw) + return ICE_ERR_PARAM; + + return ice_add_eth_mac_rule(hw, em_list, hw->switch_info, + hw->port_info->lport); +} + +/** + * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule + * @hw: pointer to the hardware structure + * @em_list: list of ethertype or ethertype MAC entries + * @sw: pointer to switch info struct for which function add rule + */ +static enum ice_status +ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list, + struct ice_switch_info *sw) +{ + struct ice_fltr_list_entry *em_list_itr, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry, + list_entry) { + struct ice_sw_recipe *recp_list; + enum ice_sw_lkup_type l_type; + + l_type = em_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && + l_type != ICE_SW_LKUP_ETHERTYPE) + return ICE_ERR_PARAM; + + recp_list = &sw->recp_list[l_type]; + em_list_itr->status = ice_remove_rule_internal(hw, recp_list, + em_list_itr); + if (em_list_itr->status) + return em_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_remove_eth_mac - remove a ethertype based filter rule + * @hw: pointer to the hardware structure + * @em_list: list of ethertype and forwarding information + * + */ +enum ice_status +ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list) +{ + if (!em_list || !hw) + return ICE_ERR_PARAM; + + return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info); +} + +/** + * ice_rem_sw_rule_info + * @hw: pointer to the hardware structure + * @rule_head: pointer to the switch list structure that we want to delete + */ +static void +ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head) +{ + if (!LIST_EMPTY(rule_head)) { + struct ice_fltr_mgmt_list_entry *entry; + struct ice_fltr_mgmt_list_entry *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head, + ice_fltr_mgmt_list_entry, list_entry) { + LIST_DEL(&entry->list_entry); + ice_free(hw, entry); + } + } +} + +/** + * ice_rem_adv_rule_info + * @hw: pointer to the hardware structure + * @rule_head: pointer to the switch list structure that we want to delete + */ +static void +ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head) +{ + struct ice_adv_fltr_mgmt_list_entry *tmp_entry; + struct ice_adv_fltr_mgmt_list_entry *lst_itr; + + if (LIST_EMPTY(rule_head)) + return; + + LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head, + ice_adv_fltr_mgmt_list_entry, list_entry) { + LIST_DEL(&lst_itr->list_entry); + ice_free(hw, lst_itr->lkups); + ice_free(hw, lst_itr); + } +} + +/** + * ice_rem_all_sw_rules_info + * @hw: pointer to the hardware structure + */ +void ice_rem_all_sw_rules_info(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct LIST_HEAD_TYPE *rule_head; + + rule_head = &sw->recp_list[i].filt_rules; + if (!sw->recp_list[i].adv_rule) + ice_rem_sw_rule_info(hw, rule_head); + else + ice_rem_adv_rule_info(hw, rule_head); + } +} + +/** + * ice_cfg_dflt_vsi - change state of VSI to set/clear default + * @pi: pointer to the port_info structure + * @vsi_handle: VSI handle to set as default + * @set: true to add the above mentioned switch rule, false to remove it + * @direction: ICE_FLTR_RX or ICE_FLTR_TX + * + * add filter rule to set/unset given VSI as default VSI for the switch + * (represented by swid) + */ +enum ice_status +ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, + u8 direction) +{ + struct ice_aqc_sw_rules_elem *s_rule; + struct ice_fltr_info f_info; + struct ice_hw *hw = pi->hw; + enum ice_adminq_opc opcode; + enum ice_status status; + u16 s_rule_size; + u16 hw_vsi_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : + ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM); + + f_info.lkup_type = ICE_SW_LKUP_DFLT; + f_info.flag = direction; + f_info.fltr_act = ICE_FWD_TO_VSI; + f_info.fwd_id.hw_vsi_id = hw_vsi_id; + + if (f_info.flag & ICE_FLTR_RX) { + f_info.src = pi->lport; + f_info.src_id = ICE_SRC_ID_LPORT; + if (!set) + f_info.fltr_rule_id = + pi->dflt_rx_vsi_rule_id; + } else if (f_info.flag & ICE_FLTR_TX) { + f_info.src_id = ICE_SRC_ID_VSI; + f_info.src = hw_vsi_id; + if (!set) + f_info.fltr_rule_id = + pi->dflt_tx_vsi_rule_id; + } + + if (set) + opcode = ice_aqc_opc_add_sw_rules; + else + opcode = ice_aqc_opc_remove_sw_rules; + + ice_fill_sw_rule(hw, &f_info, s_rule, opcode); + + status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); + if (status || !(f_info.flag & ICE_FLTR_TX_RX)) + goto out; + if (set) { + u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); + + if (f_info.flag & ICE_FLTR_TX) { + pi->dflt_tx_vsi_num = hw_vsi_id; + pi->dflt_tx_vsi_rule_id = index; + } else if (f_info.flag & ICE_FLTR_RX) { + pi->dflt_rx_vsi_num = hw_vsi_id; + pi->dflt_rx_vsi_rule_id = index; + } + } else { + if (f_info.flag & ICE_FLTR_TX) { + pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; + pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; + } else if (f_info.flag & ICE_FLTR_RX) { + pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; + pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; + } + } + +out: + ice_free(hw, s_rule); + return status; +} + +/** + * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry + * @list_head: head of rule list + * @f_info: rule information + * + * Helper function to search for a unicast rule entry - this is to be used + * to remove unicast MAC filter that is not shared with other VSIs on the + * PF switch. + * + * Returns pointer to entry storing the rule if found + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head, + struct ice_fltr_info *f_info) +{ + struct ice_fltr_mgmt_list_entry *list_itr; + + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry, + list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->fwd_id.hw_vsi_id == + list_itr->fltr_info.fwd_id.hw_vsi_id && + f_info->flag == list_itr->fltr_info.flag) + return list_itr; + } + return NULL; +} + +/** + * ice_remove_mac_rule - remove a MAC based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * @recp_list: list from which function remove MAC address + * + * This function removes either a MAC filter rule or a specific VSI from a + * VSI list for a multicast MAC address. + * + * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * ice_add_mac. Caller should be aware that this call will only work if all + * the entries passed into m_list were added previously. It will not attempt to + * do a partial remove of entries that were found. + */ +static enum ice_status +ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, + struct ice_sw_recipe *recp_list) +{ + struct ice_fltr_list_entry *list_itr, *tmp; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + + if (!m_list) + return ICE_ERR_PARAM; + + rule_lock = &recp_list->filt_rule_lock; + LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry, + list_entry) { + enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; + + if (l_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + vsi_handle = list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + list_itr->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) { + /* Don't remove the unicast address that belongs to + * another VSI on the switch, since it is not being + * shared... + */ + ice_acquire_lock(rule_lock); + if (!ice_find_ucast_rule_entry(&recp_list->filt_rules, + &list_itr->fltr_info)) { + ice_release_lock(rule_lock); + return ICE_ERR_DOES_NOT_EXIST; + } + ice_release_lock(rule_lock); + } + list_itr->status = ice_remove_rule_internal(hw, recp_list, + list_itr); + if (list_itr->status) + return list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_remove_mac - remove a MAC address based filter rule + * @hw: pointer to the hardware structure + * @m_list: list of MAC addresses and forwarding information + * + */ +enum ice_status +ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list) +{ + struct ice_sw_recipe *recp_list; + + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; + return ice_remove_mac_rule(hw, m_list, recp_list); +} + +/** + * ice_remove_vlan_rule - Remove VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN entries and forwarding information + * @recp_list: list from which function remove VLAN + */ +static enum ice_status +ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list, + struct ice_sw_recipe *recp_list) +{ + struct ice_fltr_list_entry *v_list_itr, *tmp; + + LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry, + list_entry) { + enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + v_list_itr->status = ice_remove_rule_internal(hw, recp_list, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_remove_vlan - remove a VLAN address based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of VLAN and forwarding information + * + */ +enum ice_status +ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list) +{ + struct ice_sw_recipe *recp_list; + + if (!v_list || !hw) + return ICE_ERR_PARAM; + + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN]; + return ice_remove_vlan_rule(hw, v_list, recp_list); +} + +/** + * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule + * @hw: pointer to the hardware structure + * @v_list: list of MAC VLAN entries and forwarding information + * @recp_list: list from which function remove MAC VLAN + */ +static enum ice_status +ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list, + struct ice_sw_recipe *recp_list) +{ + struct ice_fltr_list_entry *v_list_itr, *tmp; + + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN]; + LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry, + list_entry) { + enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_MAC_VLAN) + return ICE_ERR_PARAM; + v_list_itr->status = + ice_remove_rule_internal(hw, recp_list, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule + * @hw: pointer to the hardware structure + * @mv_list: list of MAC VLAN and forwarding information + */ +enum ice_status +ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list) +{ + struct ice_sw_recipe *recp_list; + + if (!mv_list || !hw) + return ICE_ERR_PARAM; + + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN]; + return ice_remove_mac_vlan_rule(hw, mv_list, recp_list); +} + +/** + * ice_vsi_uses_fltr - Determine if given VSI uses specified filter + * @fm_entry: filter entry to inspect + * @vsi_handle: VSI handle to compare with filter info + */ +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) +{ + return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && + fm_entry->fltr_info.vsi_handle == vsi_handle) || + (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map, + vsi_handle)))); +} + +/** + * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @vsi_list_head: pointer to the list to add entry to + * @fi: pointer to fltr_info of filter entry to copy & add + * + * Helper function, used when creating a list of filters to remove from + * a specific VSI. The entry added to vsi_list_head is a COPY of the + * original filter entry, with the exception of fltr_info.fltr_act and + * fltr_info.fwd_id fields. These are set such that later logic can + * extract which VSI to remove the fltr from, and pass on that information. + */ +static enum ice_status +ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, + struct LIST_HEAD_TYPE *vsi_list_head, + struct ice_fltr_info *fi) +{ + struct ice_fltr_list_entry *tmp; + + /* this memory is freed up in the caller function + * once filters for this VSI are removed + */ + tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp)); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp->fltr_info = *fi; + + /* Overwrite these fields to indicate which VSI to remove filter from, + * so find and remove logic can extract the information from the + * list entries. Note that original entries will still have proper + * values. + */ + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.vsi_handle = vsi_handle; + tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + LIST_ADD(&tmp->list_entry, vsi_list_head); + + return ICE_SUCCESS; +} + +/** + * ice_add_to_vsi_fltr_list - Add VSI filters to the list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @lkup_list_head: pointer to the list that has certain lookup type filters + * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle + * + * Locates all filters in lkup_list_head that are used by the given VSI, + * and adds COPIES of those entries to vsi_list_head (intended to be used + * to remove the listed filters). + * Note that this means all entries in vsi_list_head must be explicitly + * deallocated by the caller when done with list. + */ +static enum ice_status +ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, + struct LIST_HEAD_TYPE *lkup_list_head, + struct LIST_HEAD_TYPE *vsi_list_head) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = ICE_SUCCESS; + + /* check to make sure VSI ID is valid and within boundary */ + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head, + ice_fltr_mgmt_list_entry, list_entry) { + struct ice_fltr_info *fi; + + fi = &fm_entry->fltr_info; + if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + continue; + + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + vsi_list_head, fi); + if (status) + return status; + } + return status; +} + +/** + * ice_determine_promisc_mask + * @fi: filter info to parse + * + * Helper function to determine which ICE_PROMISC_ mask corresponds + * to given filter into. + */ +static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) +{ + u16 vid = fi->l_data.mac_vlan.vlan_id; + u8 *macaddr = fi->l_data.mac.mac_addr; + bool is_tx_fltr = false; + u8 promisc_mask = 0; + + if (fi->flag == ICE_FLTR_TX) + is_tx_fltr = true; + + if (IS_BROADCAST_ETHER_ADDR(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; + else if (IS_MULTICAST_ETHER_ADDR(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; + else if (IS_UNICAST_ETHER_ADDR(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; + if (vid) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; + + return promisc_mask; +} + +/** + * ice_get_vsi_promisc - get promiscuous mode of given VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to retrieve info from + * @promisc_mask: pointer to mask to be filled in + * @vid: VLAN ID of promisc VLAN VSI + */ +enum ice_status +ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, + u16 *vid) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *itr; + struct LIST_HEAD_TYPE *rule_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + *vid = 0; + *promisc_mask = 0; + rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules; + rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock; + + ice_acquire_lock(rule_lock); + LIST_FOR_EACH_ENTRY(itr, rule_head, + ice_fltr_mgmt_list_entry, list_entry) { + /* Continue if this filter doesn't apply to this VSI or the + * VSI ID is not in the VSI map for this filter + */ + if (!ice_vsi_uses_fltr(itr, vsi_handle)) + continue; + + *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info); + } + ice_release_lock(rule_lock); + + return ICE_SUCCESS; +} + +/** + * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to retrieve info from + * @promisc_mask: pointer to mask to be filled in + * @vid: VLAN ID of promisc VLAN VSI + */ +enum ice_status +ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, + u16 *vid) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *itr; + struct LIST_HEAD_TYPE *rule_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + *vid = 0; + *promisc_mask = 0; + rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules; + rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock; + + ice_acquire_lock(rule_lock); + LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry, + list_entry) { + /* Continue if this filter doesn't apply to this VSI or the + * VSI ID is not in the VSI map for this filter + */ + if (!ice_vsi_uses_fltr(itr, vsi_handle)) + continue; + + *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info); + } + ice_release_lock(rule_lock); + + return ICE_SUCCESS; +} + +/** + * ice_remove_promisc - Remove promisc based filter rules + * @hw: pointer to the hardware structure + * @recp_id: recipe ID for which the rule needs to removed + * @v_list: list of promisc entries + */ +static enum ice_status +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, + struct LIST_HEAD_TYPE *v_list) +{ + struct ice_fltr_list_entry *v_list_itr, *tmp; + struct ice_sw_recipe *recp_list; + + recp_list = &hw->switch_info->recp_list[recp_id]; + LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry, + list_entry) { + v_list_itr->status = + ice_remove_rule_internal(hw, recp_list, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return ICE_SUCCESS; +} + +/** + * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *fm_entry, *tmp; + struct LIST_HEAD_TYPE remove_list_head; + struct ice_fltr_mgmt_list_entry *itr; + struct LIST_HEAD_TYPE *rule_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + u8 recipe_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + else + recipe_id = ICE_SW_LKUP_PROMISC; + + rule_head = &sw->recp_list[recipe_id].filt_rules; + rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; + + INIT_LIST_HEAD(&remove_list_head); + + ice_acquire_lock(rule_lock); + LIST_FOR_EACH_ENTRY(itr, rule_head, + ice_fltr_mgmt_list_entry, list_entry) { + struct ice_fltr_info *fltr_info; + u8 fltr_promisc_mask = 0; + + if (!ice_vsi_uses_fltr(itr, vsi_handle)) + continue; + fltr_info = &itr->fltr_info; + + if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && + vid != fltr_info->l_data.mac_vlan.vlan_id) + continue; + + fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); + + /* Skip if filter is not completely specified by given mask */ + if (fltr_promisc_mask & ~promisc_mask) + continue; + + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + &remove_list_head, + fltr_info); + if (status) { + ice_release_lock(rule_lock); + goto free_fltr_list; + } + } + ice_release_lock(rule_lock); + + status = ice_remove_promisc(hw, recipe_id, &remove_list_head); + +free_fltr_list: + LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head, + ice_fltr_list_entry, list_entry) { + LIST_DEL(&fm_entry->list_entry); + ice_free(hw, fm_entry); + } + + return status; +} + +/** + * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) +{ + enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; + struct ice_fltr_list_entry f_list_entry; + struct ice_fltr_info new_fltr; + enum ice_status status = ICE_SUCCESS; + bool is_tx_fltr; + u16 hw_vsi_id; + int pkt_type; + u8 recipe_id; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM); + + if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; + new_fltr.l_data.mac_vlan.vlan_id = vid; + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + } else { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; + recipe_id = ICE_SW_LKUP_PROMISC; + } + + /* Separate filters must be set for each direction/packet type + * combination, so we will loop over the mask value, store the + * individual type, and clear it out in the input mask as it + * is found. + */ + while (promisc_mask) { + struct ice_sw_recipe *recp_list; + u8 *mac_addr; + + pkt_type = 0; + is_tx_fltr = false; + + if (promisc_mask & ICE_PROMISC_UCAST_RX) { + promisc_mask &= ~ICE_PROMISC_UCAST_RX; + pkt_type = UCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { + promisc_mask &= ~ICE_PROMISC_UCAST_TX; + pkt_type = UCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { + promisc_mask &= ~ICE_PROMISC_MCAST_RX; + pkt_type = MCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { + promisc_mask &= ~ICE_PROMISC_MCAST_TX; + pkt_type = MCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { + promisc_mask &= ~ICE_PROMISC_BCAST_RX; + pkt_type = BCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { + promisc_mask &= ~ICE_PROMISC_BCAST_TX; + pkt_type = BCAST_FLTR; + is_tx_fltr = true; + } + + /* Check for VLAN promiscuous flag */ + if (promisc_mask & ICE_PROMISC_VLAN_RX) { + promisc_mask &= ~ICE_PROMISC_VLAN_RX; + } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { + promisc_mask &= ~ICE_PROMISC_VLAN_TX; + is_tx_fltr = true; + } + + /* Set filter DA based on packet type */ + mac_addr = new_fltr.l_data.mac.mac_addr; + if (pkt_type == BCAST_FLTR) { + ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM); + } else if (pkt_type == MCAST_FLTR || + pkt_type == UCAST_FLTR) { + /* Use the dummy ether header DA */ + ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN, + ICE_NONDMA_TO_NONDMA); + if (pkt_type == MCAST_FLTR) + mac_addr[0] |= 0x1; /* Set multicast bit */ + } + + /* Need to reset this to zero for all iterations */ + new_fltr.flag = 0; + if (is_tx_fltr) { + new_fltr.flag |= ICE_FLTR_TX; + new_fltr.src = hw_vsi_id; + } else { + new_fltr.flag |= ICE_FLTR_RX; + new_fltr.src = hw->port_info->lport; + } + + new_fltr.fltr_act = ICE_FWD_TO_VSI; + new_fltr.vsi_handle = vsi_handle; + new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; + f_list_entry.fltr_info = new_fltr; + recp_list = &hw->switch_info->recp_list[recipe_id]; + + status = ice_add_rule_internal(hw, recp_list, + hw->port_info->lport, + &f_list_entry); + if (status != ICE_SUCCESS) + goto set_promisc_exit; + } + +set_promisc_exit: + return status; +} + +/** + * ice_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @rm_vlan_promisc: Clear VLANs VSI promisc mode + * + * Configure VSI with all associated VLANs to given promiscuous mode(s) + */ +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *list_itr, *tmp; + struct LIST_HEAD_TYPE vsi_list_head; + struct LIST_HEAD_TYPE *vlan_head; + struct ice_lock *vlan_lock; /* Lock to protect filter rule list */ + enum ice_status status; + u16 vlan_id; + + INIT_LIST_HEAD(&vsi_list_head); + vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; + ice_acquire_lock(vlan_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, + &vsi_list_head); + ice_release_lock(vlan_lock); + if (status) + goto free_fltr_list; + + LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry, + list_entry) { + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; + if (rm_vlan_promisc) + status = ice_clear_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + else + status = ice_set_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + if (status) + break; + } + +free_fltr_list: + LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head, + ice_fltr_list_entry, list_entry) { + LIST_DEL(&list_itr->list_entry); + ice_free(hw, list_itr); + } + return status; +} + +/** + * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @recp_list: recipe list from which function remove fltr + * @lkup: switch rule filter lookup type + */ +static void +ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, + struct ice_sw_recipe *recp_list, + enum ice_sw_lkup_type lkup) +{ + struct ice_fltr_list_entry *fm_entry; + struct LIST_HEAD_TYPE remove_list_head; + struct LIST_HEAD_TYPE *rule_head; + struct ice_fltr_list_entry *tmp; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status; + + INIT_LIST_HEAD(&remove_list_head); + rule_lock = &recp_list[lkup].filt_rule_lock; + rule_head = &recp_list[lkup].filt_rules; + ice_acquire_lock(rule_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, + &remove_list_head); + ice_release_lock(rule_lock); + if (status) + return; + + switch (lkup) { + case ICE_SW_LKUP_MAC: + ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]); + break; + case ICE_SW_LKUP_VLAN: + ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]); + break; + case ICE_SW_LKUP_PROMISC: + case ICE_SW_LKUP_PROMISC_VLAN: + ice_remove_promisc(hw, lkup, &remove_list_head); + break; + case ICE_SW_LKUP_MAC_VLAN: + ice_remove_mac_vlan(hw, &remove_list_head); + break; + case ICE_SW_LKUP_ETHERTYPE: + case ICE_SW_LKUP_ETHERTYPE_MAC: + ice_remove_eth_mac(hw, &remove_list_head); + break; + case ICE_SW_LKUP_DFLT: + ice_debug(hw, ICE_DBG_SW, + "Remove filters for this lookup type hasn't been implemented yet\n"); + break; + case ICE_SW_LKUP_LAST: + ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n"); + break; + } + + LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head, + ice_fltr_list_entry, list_entry) { + LIST_DEL(&fm_entry->list_entry); + ice_free(hw, fm_entry); + } +} + +/** + * ice_remove_vsi_fltr_rule - Remove all filters for a VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + * @sw: pointer to switch info struct + */ +static void +ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle, + struct ice_switch_info *sw) +{ + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_MAC_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_PROMISC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_VLAN); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_DFLT); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_ETHERTYPE); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC); + ice_remove_vsi_lkup_fltr(hw, vsi_handle, + sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN); +} + +/** + * ice_remove_vsi_fltr - Remove all filters for a VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to remove filters from + */ +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) +{ + ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info); +} + +/** + * ice_alloc_res_cntr - allocating resource counter + * @hw: pointer to the hardware structure + * @type: type of resource + * @alloc_shared: if set it is shared else dedicated + * @num_items: number of entries requested for FD resource type + * @counter_id: counter index returned by AQ call + */ +enum ice_status +ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 *counter_id) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + /* Allocate resource */ + buf_len = sizeof(*buf); + buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->num_elems = CPU_TO_LE16(num_items); + buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | alloc_shared); + + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (status) + goto exit; + + *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp); + +exit: + ice_free(hw, buf); + return status; +} + +/** + * ice_free_res_cntr - free resource counter + * @hw: pointer to the hardware structure + * @type: type of resource + * @alloc_shared: if set it is shared else dedicated + * @num_items: number of entries to be freed for FD resource type + * @counter_id: counter ID resource which needs to be freed + */ +enum ice_status +ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 counter_id) +{ + struct ice_aqc_alloc_free_res_elem *buf; + enum ice_status status; + u16 buf_len; + + /* Free resource */ + buf_len = sizeof(*buf); + buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!buf) + return ICE_ERR_NO_MEMORY; + + buf->num_elems = CPU_TO_LE16(num_items); + buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id); + + status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, + ice_aqc_opc_free_res, NULL); + if (status) + ice_debug(hw, ICE_DBG_SW, + "counter resource could not be freed\n"); + + ice_free(hw, buf); + return status; +} + +/** + * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type + * @hw: pointer to the hardware structure + * @counter_id: returns counter index + */ +enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id) +{ + return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, + counter_id); +} + +/** + * ice_free_vlan_res_counter - Free counter resource for VLAN type + * @hw: pointer to the hardware structure + * @counter_id: counter index to be freed + */ +enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id) +{ + return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, + ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, + counter_id); +} + +/** + * ice_alloc_res_lg_act - add large action resource + * @hw: pointer to the hardware structure + * @l_id: large action ID to fill it in + * @num_acts: number of actions to hold with a large action entry + */ +static enum ice_status +ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + enum ice_status status; + u16 buf_len; + + if (num_acts > ICE_MAX_LG_ACT || num_acts == 0) + return ICE_ERR_PARAM; + + /* Allocate resource for large action */ + buf_len = sizeof(*sw_buf); + sw_buf = (struct ice_aqc_alloc_free_res_elem *) + ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + sw_buf->num_elems = CPU_TO_LE16(1); + + /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. + * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. + * If num_acts is greater than 2, then use + * ICE_AQC_RES_TYPE_WIDE_TABLE_4. + * The num_acts cannot exceed 4. This was ensured at the + * beginning of the function. + */ + if (num_acts == 1) + sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1); + else if (num_acts == 2) + sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2); + else + sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); + + ice_free(hw, sw_buf); + return status; +} + +/** + * ice_add_mac_with_sw_marker - add filter with sw marker + * @hw: pointer to the hardware structure + * @f_info: filter info structure containing the MAC filter information + * @sw_marker: sw marker to tag the Rx descriptor with + */ +enum ice_status +ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, + u16 sw_marker) +{ + struct ice_fltr_mgmt_list_entry *m_entry; + struct ice_fltr_list_entry fl_info; + struct ice_sw_recipe *recp_list; + struct LIST_HEAD_TYPE l_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status ret; + bool entry_exists; + u16 lg_act_id; + + if (f_info->fltr_act != ICE_FWD_TO_VSI) + return ICE_ERR_PARAM; + + if (f_info->lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + if (sw_marker == ICE_INVAL_SW_MARKER_ID) + return ICE_ERR_PARAM; + + if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) + return ICE_ERR_PARAM; + f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); + + /* Add filter if it doesn't exist so then the adding of large + * action always results in update + */ + + INIT_LIST_HEAD(&l_head); + fl_info.fltr_info = *f_info; + LIST_ADD(&fl_info.list_entry, &l_head); + + entry_exists = false; + ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, + hw->port_info->lport); + if (ret == ICE_ERR_ALREADY_EXISTS) + entry_exists = true; + else if (ret) + return ret; + + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; + rule_lock = &recp_list->filt_rule_lock; + ice_acquire_lock(rule_lock); + /* Get the book keeping entry for the filter */ + m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info); + if (!m_entry) + goto exit_error; + + /* If counter action was enabled for this rule then don't enable + * sw marker large action + */ + if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { + ret = ICE_ERR_PARAM; + goto exit_error; + } + + /* if same marker was added before */ + if (m_entry->sw_marker_id == sw_marker) { + ret = ICE_ERR_ALREADY_EXISTS; + goto exit_error; + } + + /* Allocate a hardware table entry to hold large act. Three actions + * for marker based large action + */ + ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3); + if (ret) + goto exit_error; + + if (lg_act_id == ICE_INVAL_LG_ACT_INDEX) + goto exit_error; + + /* Update the switch rule to add the marker action */ + ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id); + if (!ret) { + ice_release_lock(rule_lock); + return ret; + } + +exit_error: + ice_release_lock(rule_lock); + /* only remove entry if it did not exist previously */ + if (!entry_exists) + ret = ice_remove_mac(hw, &l_head); + + return ret; +} + +/** + * ice_add_mac_with_counter - add filter with counter enabled + * @hw: pointer to the hardware structure + * @f_info: pointer to filter info structure containing the MAC filter + * information + */ +enum ice_status +ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) +{ + struct ice_fltr_mgmt_list_entry *m_entry; + struct ice_fltr_list_entry fl_info; + struct ice_sw_recipe *recp_list; + struct LIST_HEAD_TYPE l_head; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status ret; + bool entry_exist; + u16 counter_id; + u16 lg_act_id; + + if (f_info->fltr_act != ICE_FWD_TO_VSI) + return ICE_ERR_PARAM; + + if (f_info->lkup_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + + if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) + return ICE_ERR_PARAM; + f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); + recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; + + entry_exist = false; + + rule_lock = &recp_list->filt_rule_lock; + + /* Add filter if it doesn't exist so then the adding of large + * action always results in update + */ + INIT_LIST_HEAD(&l_head); + + fl_info.fltr_info = *f_info; + LIST_ADD(&fl_info.list_entry, &l_head); + + ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, + hw->port_info->lport); + if (ret == ICE_ERR_ALREADY_EXISTS) + entry_exist = true; + else if (ret) + return ret; + + ice_acquire_lock(rule_lock); + m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info); + if (!m_entry) { + ret = ICE_ERR_BAD_PTR; + goto exit_error; + } + + /* Don't enable counter for a filter for which sw marker was enabled */ + if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) { + ret = ICE_ERR_PARAM; + goto exit_error; + } + + /* If a counter was already enabled then don't need to add again */ + if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { + ret = ICE_ERR_ALREADY_EXISTS; + goto exit_error; + } + + /* Allocate a hardware table entry to VLAN counter */ + ret = ice_alloc_vlan_res_counter(hw, &counter_id); + if (ret) + goto exit_error; + + /* Allocate a hardware table entry to hold large act. Two actions for + * counter based large action + */ + ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2); + if (ret) + goto exit_error; + + if (lg_act_id == ICE_INVAL_LG_ACT_INDEX) + goto exit_error; + + /* Update the switch rule to add the counter action */ + ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id); + if (!ret) { + ice_release_lock(rule_lock); + return ret; + } + +exit_error: + ice_release_lock(rule_lock); + /* only remove entry if it did not exist previously */ + if (!entry_exist) + ret = ice_remove_mac(hw, &l_head); + + return ret; +} + +/* This is mapping table entry that maps every word within a given protocol + * structure to the real byte offset as per the specification of that + * protocol header. + * for example dst address is 3 words in ethertype header and corresponding + * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 + * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a + * matching entry describing its field. This needs to be updated if new + * structure is added to that union. + */ +static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, + { ICE_ETYPE_OL, { 0 } }, + { ICE_VLAN_OFOS, { 0, 2 } }, + { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, + { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, + 26, 28, 30, 32, 34, 36, 38 } }, + { ICE_TCP_IL, { 0, 2 } }, + { ICE_UDP_OF, { 0, 2 } }, + { ICE_UDP_ILOS, { 0, 2 } }, + { ICE_SCTP_IL, { 0, 2 } }, + { ICE_VXLAN, { 8, 10, 12, 14 } }, + { ICE_GENEVE, { 8, 10, 12, 14 } }, + { ICE_VXLAN_GPE, { 8, 10, 12, 14 } }, + { ICE_NVGRE, { 0, 2, 4, 6 } }, + { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } }, + { ICE_PPPOE, { 0, 2, 4, 6 } }, + { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, + { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } }, + { ICE_ESP, { 0, 2, 4, 6 } }, + { ICE_AH, { 0, 2, 4, 6, 8, 10 } }, + { ICE_NAT_T, { 8, 10, 12, 14 } }, +}; + +/* The following table describes preferred grouping of recipes. + * If a recipe that needs to be programmed is a superset or matches one of the + * following combinations, then the recipe needs to be chained as per the + * following policy. + */ + +static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { + { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, + { ICE_MAC_IL, ICE_MAC_IL_HW }, + { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, + { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, + { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, + { ICE_IPV4_IL, ICE_IPV4_IL_HW }, + { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, + { ICE_IPV6_IL, ICE_IPV6_IL_HW }, + { ICE_TCP_IL, ICE_TCP_IL_HW }, + { ICE_UDP_OF, ICE_UDP_OF_HW }, + { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, + { ICE_SCTP_IL, ICE_SCTP_IL_HW }, + { ICE_VXLAN, ICE_UDP_OF_HW }, + { ICE_GENEVE, ICE_UDP_OF_HW }, + { ICE_VXLAN_GPE, ICE_UDP_OF_HW }, + { ICE_NVGRE, ICE_GRE_OF_HW }, + { ICE_GTP, ICE_UDP_OF_HW }, + { ICE_PPPOE, ICE_PPPOE_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, + { ICE_L2TPV3, ICE_L2TPV3_HW }, + { ICE_ESP, ICE_ESP_HW }, + { ICE_AH, ICE_AH_HW }, + { ICE_NAT_T, ICE_UDP_ILOS_HW }, +}; + +/** + * ice_find_recp - find a recipe + * @hw: pointer to the hardware structure + * @lkup_exts: extension sequence to match + * + * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. + */ +static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type) +{ + bool refresh_required = true; + struct ice_sw_recipe *recp; + u8 i; + + /* Walk through existing recipes to find a match */ + recp = hw->switch_info->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + /* If recipe was not created for this ID, in SW bookkeeping, + * check if FW has an entry for this recipe. If the FW has an + * entry update it in our SW bookkeeping and continue with the + * matching. + */ + if (!recp[i].recp_created) + if (ice_get_recp_frm_fw(hw, + hw->switch_info->recp_list, i, + &refresh_required)) + continue; + + /* Skip inverse action recipes */ + if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & + ICE_AQ_RECIPE_ACT_INV_ACT) + continue; + + /* if number of words we are looking for match */ + if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { + struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; + struct ice_fv_word *be = lkup_exts->fv_words; + u16 *cr = recp[i].lkup_exts.field_mask; + u16 *de = lkup_exts->field_mask; + bool found = true; + u8 pe, qr; + + /* ar, cr, and qr are related to the recipe words, while + * be, de and pe are related to the lookup words + */ + for (pe = 0; pe < lkup_exts->n_val_words; pe++) { + for (qr = 0; qr < recp[i].lkup_exts.n_val_words; + qr++) { + if (ar[qr].off == be[pe].off && + ar[qr].prot_id == be[pe].prot_id && + cr[qr] == de[pe]) + /* Found the "pe"th word in the + * given recipe + */ + break; + } + /* After walking through all the words in the + * "i"th recipe if "p"th word was not found then + * this recipe is not what we are looking for. + * So break out from this loop and try the next + * recipe + */ + if (qr >= recp[i].lkup_exts.n_val_words) { + found = false; + break; + } + } + /* If for "i"th recipe the found was never set to false + * then it means we found our match + */ + if ((tun_type == recp[i].tun_type || + tun_type == ICE_SW_TUN_AND_NON_TUN) && found) + return i; /* Return the recipe ID */ + } + } + return ICE_MAX_NUM_RECIPES; +} + +/** + * ice_prot_type_to_id - get protocol ID from protocol type + * @type: protocol type + * @id: pointer to variable that will receive the ID + * + * Returns true if found, false otherwise + */ +static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) +{ + u8 i; + + for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) + if (ice_prot_id_tbl[i].type == type) { + *id = ice_prot_id_tbl[i].protocol_id; + return true; + } + return false; +} + +/** + * ice_find_valid_words - count valid words + * @rule: advanced rule with lookup information + * @lkup_exts: byte offset extractions of the words that are valid + * + * calculate valid words in a lookup rule using mask value + */ +static u8 +ice_fill_valid_words(struct ice_adv_lkup_elem *rule, + struct ice_prot_lkup_ext *lkup_exts) +{ + u8 j, word, prot_id, ret_val; + + if (!ice_prot_type_to_id(rule->type, &prot_id)) + return 0; + + word = lkup_exts->n_val_words; + + for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) + if (((u16 *)&rule->m_u)[j] && + rule->type < ARRAY_SIZE(ice_prot_ext)) { + /* No more space to accommodate */ + if (word >= ICE_MAX_CHAIN_WORDS) + return 0; + lkup_exts->fv_words[word].off = + ice_prot_ext[rule->type].offs[j]; + lkup_exts->fv_words[word].prot_id = + ice_prot_id_tbl[rule->type].protocol_id; + lkup_exts->field_mask[word] = + BE16_TO_CPU(((__be16 *)&rule->m_u)[j]); + word++; + } + + ret_val = word - lkup_exts->n_val_words; + lkup_exts->n_val_words = word; + + return ret_val; +} + +/** + * ice_create_first_fit_recp_def - Create a recipe grouping + * @hw: pointer to the hardware structure + * @lkup_exts: an array of protocol header extractions + * @rg_list: pointer to a list that stores new recipe groups + * @recp_cnt: pointer to a variable that stores returned number of recipe groups + * + * Using first fit algorithm, take all the words that are still not done + * and start grouping them in 4-word groups. Each group makes up one + * recipe. + */ +static enum ice_status +ice_create_first_fit_recp_def(struct ice_hw *hw, + struct ice_prot_lkup_ext *lkup_exts, + struct LIST_HEAD_TYPE *rg_list, + u8 *recp_cnt) +{ + struct ice_pref_recipe_group *grp = NULL; + u8 j; + + *recp_cnt = 0; + + if (!lkup_exts->n_val_words) { + struct ice_recp_grp_entry *entry; + + entry = (struct ice_recp_grp_entry *) + ice_malloc(hw, sizeof(*entry)); + if (!entry) + return ICE_ERR_NO_MEMORY; + LIST_ADD(&entry->l_entry, rg_list); + grp = &entry->r_group; + (*recp_cnt)++; + grp->n_val_pairs = 0; + } + + /* Walk through every word in the rule to check if it is not done. If so + * then this word needs to be part of a new recipe. + */ + for (j = 0; j < lkup_exts->n_val_words; j++) + if (!ice_is_bit_set(lkup_exts->done, j)) { + if (!grp || + grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { + struct ice_recp_grp_entry *entry; + + entry = (struct ice_recp_grp_entry *) + ice_malloc(hw, sizeof(*entry)); + if (!entry) + return ICE_ERR_NO_MEMORY; + LIST_ADD(&entry->l_entry, rg_list); + grp = &entry->r_group; + (*recp_cnt)++; + } + + grp->pairs[grp->n_val_pairs].prot_id = + lkup_exts->fv_words[j].prot_id; + grp->pairs[grp->n_val_pairs].off = + lkup_exts->fv_words[j].off; + grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; + grp->n_val_pairs++; + } + + return ICE_SUCCESS; +} + +/** + * ice_fill_fv_word_index - fill in the field vector indices for a recipe group + * @hw: pointer to the hardware structure + * @fv_list: field vector with the extraction sequence information + * @rg_list: recipe groupings with protocol-offset pairs + * + * Helper function to fill in the field vector indices for protocol-offset + * pairs. These indexes are then ultimately programmed into a recipe. + */ +static enum ice_status +ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list, + struct LIST_HEAD_TYPE *rg_list) +{ + struct ice_sw_fv_list_entry *fv; + struct ice_recp_grp_entry *rg; + struct ice_fv_word *fv_ext; + + if (LIST_EMPTY(fv_list)) + return ICE_SUCCESS; + + fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry); + fv_ext = fv->fv_ptr->ew; + + LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) { + u8 i; + + for (i = 0; i < rg->r_group.n_val_pairs; i++) { + struct ice_fv_word *pr; + bool found = false; + u16 mask; + u8 j; + + pr = &rg->r_group.pairs[i]; + mask = rg->r_group.mask[i]; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv_ext[j].prot_id == pr->prot_id && + fv_ext[j].off == pr->off) { + found = true; + + /* Store index of field vector */ + rg->fv_idx[i] = j; + rg->fv_mask[i] = mask; + break; + } + + /* Protocol/offset could not be found, caller gave an + * invalid pair + */ + if (!found) + return ICE_ERR_PARAM; + } + } + + return ICE_SUCCESS; +} + +/** + * ice_find_free_recp_res_idx - find free result indexes for recipe + * @hw: pointer to hardware structure + * @profiles: bitmap of profiles that will be associated with the new recipe + * @free_idx: pointer to variable to receive the free index bitmap + * + * The algorithm used here is: + * 1. When creating a new recipe, create a set P which contains all + * Profiles that will be associated with our new recipe + * + * 2. For each Profile p in set P: + * a. Add all recipes associated with Profile p into set R + * b. Optional : PossibleIndexes &= profile[p].possibleIndexes + * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] + * i. Or just assume they all have the same possible indexes: + * 44, 45, 46, 47 + * i.e., PossibleIndexes = 0x0000F00000000000 + * + * 3. For each Recipe r in set R: + * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes + * b. FreeIndexes = UsedIndexes ^ PossibleIndexes + * + * FreeIndexes will contain the bits indicating the indexes free for use, + * then the code needs to update the recipe[r].used_result_idx_bits to + * indicate which indexes were selected for use by this recipe. + */ +static u16 +ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles, + ice_bitmap_t *free_idx) +{ + ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS); + ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES); + ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS); + u16 count = 0; + u16 bit; + + ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS); + ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES); + ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS); + ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS); + + for (count = 0; count < ICE_MAX_FV_WORDS; count++) + ice_set_bit(count, possible_idx); + + /* For each profile we are going to associate the recipe with, add the + * recipes that are associated with that profile. This will give us + * the set of recipes that our recipe may collide with. Also, determine + * what possible result indexes are usable given this set of profiles. + */ + bit = 0; + while (ICE_MAX_NUM_PROFILES > + (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) { + ice_or_bitmap(recipes, recipes, profile_to_recipe[bit], + ICE_MAX_NUM_RECIPES); + ice_and_bitmap(possible_idx, possible_idx, + hw->switch_info->prof_res_bm[bit], + ICE_MAX_FV_WORDS); + bit++; + } + + /* For each recipe that our new recipe may collide with, determine + * which indexes have been used. + */ + for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++) + if (ice_is_bit_set(recipes, bit)) { + ice_or_bitmap(used_idx, used_idx, + hw->switch_info->recp_list[bit].res_idxs, + ICE_MAX_FV_WORDS); + } + + ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ + count = 0; + bit = 0; + while (ICE_MAX_FV_WORDS > + (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) { + count++; + bit++; + } + + return count; +} + +/** + * ice_add_sw_recipe - function to call AQ calls to create switch recipe + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @match_tun: if field vector index for tunnel needs to be programmed + * @profiles: bitmap of profiles that will be assocated. + */ +static enum ice_status +ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + bool match_tun, ice_bitmap_t *profiles) +{ + ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *tmp; + struct ice_aqc_recipe_data_elem *buf; + struct ice_recp_grp_entry *entry; + enum ice_status status; + u16 free_res_idx; + u16 recipe_count; + u8 chain_idx; + u8 recps = 0; + + /* When more than one recipe are required, another recipe is needed to + * chain them together. Matching a tunnel metadata ID takes up one of + * the match fields in the chaining recipe reducing the number of + * chained recipes by one. + */ + /* check number of free result indices */ + ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS); + free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); + + ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", + free_res_idx, rm->n_grp_count); + + if (rm->n_grp_count > 1) { + if (rm->n_grp_count > free_res_idx) + return ICE_ERR_MAX_LIMIT; + + rm->n_grp_count++; + } + + if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) + return ICE_ERR_MAX_LIMIT; + + tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw, + ICE_MAX_NUM_RECIPES, + sizeof(*tmp)); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + buf = (struct ice_aqc_recipe_data_elem *) + ice_calloc(hw, rm->n_grp_count, sizeof(*buf)); + if (!buf) { + status = ICE_ERR_NO_MEMORY; + goto err_mem; + } + + ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES); + recipe_count = ICE_MAX_NUM_RECIPES; + status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, + NULL); + if (status || recipe_count == 0) + goto err_unroll; + + /* Allocate the recipe resources, and configure them according to the + * match fields from protocol headers and extracted field vectors. + */ + chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) { + u8 i; + + status = ice_alloc_recipe(hw, &entry->rid); + if (status) + goto err_unroll; + + /* Clear the result index of the located recipe, as this will be + * updated, if needed, later in the recipe creation process. + */ + tmp[0].content.result_indx = 0; + + buf[recps] = tmp[0]; + buf[recps].recipe_indx = (u8)entry->rid; + /* if the recipe is a non-root recipe RID should be programmed + * as 0 for the rules to be applied correctly. + */ + buf[recps].content.rid = 0; + ice_memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx), + ICE_NONDMA_MEM); + + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK); + /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask + * to be 0 + */ + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = 0x80; + buf[recps].content.mask[i] = 0; + } + + for (i = 0; i < entry->r_group.n_val_pairs; i++) { + buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; + buf[recps].content.mask[i + 1] = + CPU_TO_LE16(entry->fv_mask[i]); + } + + if (rm->n_grp_count > 1) { + /* Checks to see if there really is a valid result index + * that can be used. + */ + if (chain_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, + "No chain index available\n"); + status = ICE_ERR_MAX_LIMIT; + goto err_unroll; + } + + entry->chain_idx = chain_idx; + buf[recps].content.result_indx = + ICE_AQ_RECIPE_RESULT_EN | + ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & + ICE_AQ_RECIPE_RESULT_DATA_M); + ice_clear_bit(chain_idx, result_idx_bm); + chain_idx = ice_find_first_bit(result_idx_bm, + ICE_MAX_FV_WORDS); + } + + /* fill recipe dependencies */ + ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap, + ICE_MAX_NUM_RECIPES); + ice_set_bit(buf[recps].recipe_indx, + (ice_bitmap_t *)buf[recps].recipe_bitmap); + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + recps++; + } + + if (rm->n_grp_count == 1) { + rm->root_rid = buf[0].recipe_indx; + ice_set_bit(buf[0].recipe_indx, rm->r_bitmap); + buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; + if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { + ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap, + sizeof(buf[0].recipe_bitmap), + ICE_NONDMA_TO_NONDMA); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + /* Applicable only for ROOT_RECIPE, set the fwd_priority for + * the recipe which is getting created if specified + * by user. Usually any advanced switch filter, which results + * into new extraction sequence, ended up creating a new recipe + * of type ROOT and usually recipes are associated with profiles + * Switch rule referreing newly created recipe, needs to have + * either/or 'fwd' or 'join' priority, otherwise switch rule + * evaluation will not happen correctly. In other words, if + * switch rule to be evaluated on priority basis, then recipe + * needs to have priority, otherwise it will be evaluated last. + */ + buf[0].content.act_ctrl_fwd_priority = rm->priority; + } else { + struct ice_recp_grp_entry *last_chain_entry; + u16 rid, i; + + /* Allocate the last recipe that will chain the outcomes of the + * other recipes together + */ + status = ice_alloc_recipe(hw, &rid); + if (status) + goto err_unroll; + + buf[recps].recipe_indx = (u8)rid; + buf[recps].content.rid = (u8)rid; + buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; + /* the new entry created should also be part of rg_list to + * make sure we have complete recipe + */ + last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw, + sizeof(*last_chain_entry)); + if (!last_chain_entry) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll; + } + last_chain_entry->rid = rid; + ice_memset(&buf[recps].content.lkup_indx, 0, + sizeof(buf[recps].content.lkup_indx), + ICE_NONDMA_MEM); + /* All recipes use look-up index 0 to match switch ID. */ + buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + buf[recps].content.mask[0] = + CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK); + for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { + buf[recps].content.lkup_indx[i] = + ICE_AQ_RECIPE_LKUP_IGNORE; + buf[recps].content.mask[i] = 0; + } + + i = 1; + /* update r_bitmap with the recp that is used for chaining */ + ice_set_bit(rid, rm->r_bitmap); + /* this is the recipe that chains all the other recipes so it + * should not have a chaining ID to indicate the same + */ + last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; + LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, + l_entry) { + last_chain_entry->fv_idx[i] = entry->chain_idx; + buf[recps].content.lkup_indx[i] = entry->chain_idx; + buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF); + ice_set_bit(entry->rid, rm->r_bitmap); + } + LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list); + if (sizeof(buf[recps].recipe_bitmap) >= + sizeof(rm->r_bitmap)) { + ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, + sizeof(buf[recps].recipe_bitmap), + ICE_NONDMA_TO_NONDMA); + } else { + status = ICE_ERR_BAD_PTR; + goto err_unroll; + } + buf[recps].content.act_ctrl_fwd_priority = rm->priority; + + /* To differentiate among different UDP tunnels, a meta data ID + * flag is used. + */ + if (match_tun) { + buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; + buf[recps].content.mask[i] = + CPU_TO_LE16(ICE_TUN_FLAG_MASK); + } + + recps++; + rm->root_rid = (u8)rid; + } + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); + ice_release_change_lock(hw); + if (status) + goto err_unroll; + + /* Every recipe that just got created add it to the recipe + * book keeping list + */ + LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) { + struct ice_switch_info *sw = hw->switch_info; + bool is_root, idx_found = false; + struct ice_sw_recipe *recp; + u16 idx, buf_idx = 0; + + /* find buffer index for copying some data */ + for (idx = 0; idx < rm->n_grp_count; idx++) + if (buf[idx].recipe_indx == entry->rid) { + buf_idx = idx; + idx_found = true; + } + + if (!idx_found) { + status = ICE_ERR_OUT_OF_RANGE; + goto err_unroll; + } + + recp = &sw->recp_list[entry->rid]; + is_root = (rm->root_rid == entry->rid); + recp->is_root = is_root; + + recp->root_rid = entry->rid; + recp->big_recp = (is_root && rm->n_grp_count > 1); + + ice_memcpy(&recp->ext_words, entry->r_group.pairs, + entry->r_group.n_val_pairs * + sizeof(struct ice_fv_word), + ICE_NONDMA_TO_NONDMA); + + ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, + sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA); + + /* Copy non-result fv index values and masks to recipe. This + * call will also update the result recipe bitmask. + */ + ice_collect_result_idx(&buf[buf_idx], recp); + + /* for non-root recipes, also copy to the root, this allows + * easier matching of a complete chained recipe + */ + if (!is_root) + ice_collect_result_idx(&buf[buf_idx], + &sw->recp_list[rm->root_rid]); + + recp->n_ext_words = entry->r_group.n_val_pairs; + recp->chain_idx = entry->chain_idx; + recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; + recp->n_grp_count = rm->n_grp_count; + recp->tun_type = rm->tun_type; + recp->recp_created = true; + recp->adv_rule = 1; + } + rm->root_buf = buf; + ice_free(hw, tmp); + return status; + +err_unroll: +err_mem: + ice_free(hw, tmp); + ice_free(hw, buf); + return status; +} + +/** + * ice_create_recipe_group - creates recipe group + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @lkup_exts: lookup elements + */ +static enum ice_status +ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, + struct ice_prot_lkup_ext *lkup_exts) +{ + enum ice_status status; + u8 recp_count = 0; + + rm->n_grp_count = 0; + + /* Create recipes for words that are marked not done by packing them + * as best fit. + */ + status = ice_create_first_fit_recp_def(hw, lkup_exts, + &rm->rg_list, &recp_count); + if (!status) { + rm->n_grp_count += recp_count; + rm->n_ext_words = lkup_exts->n_val_words; + ice_memcpy(&rm->ext_words, lkup_exts->fv_words, + sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA); + ice_memcpy(rm->word_masks, lkup_exts->field_mask, + sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA); + } + + return status; +} + +/** + * ice_get_fv - get field vectors/extraction sequences for spec. lookup types + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @bm: bitmap of field vectors to consider + * @fv_list: pointer to a list that holds the returned field vectors + */ +static enum ice_status +ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) +{ + enum ice_status status; + u8 *prot_ids; + u16 i; + + if (!lkups_cnt) + return ICE_SUCCESS; + + prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids)); + if (!prot_ids) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < lkups_cnt; i++) + if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { + status = ICE_ERR_CFG; + goto free_mem; + } + + /* Find field vectors that include all specified protocol types */ + status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); + +free_mem: + ice_free(hw, prot_ids); + return status; +} + +/** + * ice_tun_type_match_mask - determine if tun type needs a match mask + * @tun_type: tunnel type + * @mask: mask to be used for the tunnel + */ +static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) +{ + switch (tun_type) { + case ICE_SW_TUN_VXLAN_GPE: + case ICE_SW_TUN_NVGRE: + case ICE_SW_TUN_UDP: + case ICE_ALL_TUNNELS: + *mask = ICE_TUN_FLAG_MASK; + return true; + + default: + *mask = 0; + return false; + } +} + +/** + * ice_add_special_words - Add words that are not protocols, such as metadata + * @rinfo: other information regarding the rule e.g. priority and action info + * @lkup_exts: lookup word structure + */ +static enum ice_status +ice_add_special_words(struct ice_adv_rule_info *rinfo, + struct ice_prot_lkup_ext *lkup_exts) +{ + u16 mask; + + /* If this is a tunneled packet, then add recipe index to match the + * tunnel bit in the packet metadata flags. + */ + if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { + if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { + u8 word = lkup_exts->n_val_words++; + + lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; + lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; + lkup_exts->field_mask[word] = mask; + } else { + return ICE_ERR_MAX_LIMIT; + } + } + + return ICE_SUCCESS; +} + +/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule + * @hw: pointer to hardware structure + * @rinfo: other information regarding the rule e.g. priority and action info + * @bm: pointer to memory for returning the bitmap of field vectors + */ +static void +ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, + ice_bitmap_t *bm) +{ + enum ice_prof_type prof_type; + + ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); + + switch (rinfo->tun_type) { + case ICE_NON_TUN: + prof_type = ICE_PROF_NON_TUN; + break; + case ICE_ALL_TUNNELS: + prof_type = ICE_PROF_TUN_ALL; + break; + case ICE_SW_TUN_VXLAN_GPE: + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_UDP: + case ICE_SW_TUN_GTP: + prof_type = ICE_PROF_TUN_UDP; + break; + case ICE_SW_TUN_NVGRE: + prof_type = ICE_PROF_TUN_GRE; + break; + case ICE_SW_TUN_PPPOE: + prof_type = ICE_PROF_TUN_PPPOE; + break; + case ICE_SW_TUN_PROFID_IPV6_ESP: + case ICE_SW_TUN_IPV6_ESP: + ice_set_bit(ICE_PROFID_IPV6_ESP, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_AH: + case ICE_SW_TUN_IPV6_AH: + ice_set_bit(ICE_PROFID_IPV6_AH, bm); + return; + case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: + case ICE_SW_TUN_IPV6_L2TPV3: + ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_NAT_T: + case ICE_SW_TUN_IPV6_NAT_T: + ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm); + return; + case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE: + ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm); + return; + case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION: + ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE: + ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION: + ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm); + return; + case ICE_SW_TUN_IPV4_NAT_T: + ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm); + return; + case ICE_SW_TUN_IPV4_L2TPV3: + ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm); + return; + case ICE_SW_TUN_IPV4_ESP: + ice_set_bit(ICE_PROFID_IPV4_ESP, bm); + return; + case ICE_SW_TUN_IPV4_AH: + ice_set_bit(ICE_PROFID_IPV4_AH, bm); + return; + case ICE_SW_TUN_AND_NON_TUN: + default: + prof_type = ICE_PROF_ALL; + break; + } + + ice_get_sw_fv_bitmap(hw, prof_type, bm); +} + +/** + * ice_is_prof_rule - determine if rule type is a profile rule + * @type: the rule type + * + * if the rule type is a profile rule, that means that there no field value + * match required, in this case just a profile hit is required. + */ +bool ice_is_prof_rule(enum ice_sw_tunnel_type type) +{ + switch (type) { + case ICE_SW_TUN_PROFID_IPV6_ESP: + case ICE_SW_TUN_PROFID_IPV6_AH: + case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: + case ICE_SW_TUN_PROFID_IPV6_NAT_T: + case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE: + case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION: + case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE: + case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION: + return true; + default: + break; + } + + return false; +} + +/** + * ice_add_adv_recipe - Add an advanced recipe that is not part of the default + * @hw: pointer to hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @rinfo: other information regarding the rule e.g. priority and action info + * @rid: return the recipe ID of the recipe created + */ +static enum ice_status +ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) +{ + ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES); + ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES); + struct ice_prot_lkup_ext *lkup_exts; + struct ice_recp_grp_entry *r_entry; + struct ice_sw_fv_list_entry *fvit; + struct ice_recp_grp_entry *r_tmp; + struct ice_sw_fv_list_entry *tmp; + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *rm; + bool match_tun = false; + u16 mask; + u8 i; + + if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) + return ICE_ERR_PARAM; + + lkup_exts = (struct ice_prot_lkup_ext *) + ice_malloc(hw, sizeof(*lkup_exts)); + if (!lkup_exts) + return ICE_ERR_NO_MEMORY; + + /* Determine the number of words to be matched and if it exceeds a + * recipe's restrictions + */ + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + + count = ice_fill_valid_words(&lkups[i], lkup_exts); + if (!count) { + status = ICE_ERR_CFG; + goto err_free_lkup_exts; + } + } + + rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm)); + if (!rm) { + status = ICE_ERR_NO_MEMORY; + goto err_free_lkup_exts; + } + + /* Get field vectors that contain fields extracted from all the protocol + * headers being programmed. + */ + INIT_LIST_HEAD(&rm->fv_list); + INIT_LIST_HEAD(&rm->rg_list); + + /* Get bitmap of field vectors (profiles) that are compatible with the + * rule request; only these will be searched in the subsequent call to + * ice_get_fv. + */ + ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); + + status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); + if (status) + goto err_unroll; + + /* Group match words into recipes using preferred recipe grouping + * criteria. + */ + status = ice_create_recipe_group(hw, rm, lkup_exts); + if (status) + goto err_unroll; + + /* For certain tunnel types it is necessary to use a metadata ID flag to + * differentiate different tunnel types. A separate recipe needs to be + * used for the metadata. + */ + if (ice_tun_type_match_word(rinfo->tun_type, &mask) && + rm->n_grp_count > 1) + match_tun = mask; + + /* set the recipe priority if specified */ + rm->priority = (u8)rinfo->priority; + + /* Find offsets from the field vector. Pick the first one for all the + * recipes. + */ + status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + if (status) + goto err_unroll; + + /* An empty FV list means to use all the profiles returned in the + * profile bitmap + */ + if (LIST_EMPTY(&rm->fv_list)) { + u16 j; + + for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) + if (ice_is_bit_set(fv_bitmap, j)) { + struct ice_sw_fv_list_entry *fvl; + + fvl = (struct ice_sw_fv_list_entry *) + ice_malloc(hw, sizeof(*fvl)); + if (!fvl) + goto err_unroll; + fvl->fv_ptr = NULL; + fvl->profile_id = j; + LIST_ADD(&fvl->list_entry, &rm->fv_list); + } + } + + /* get bitmap of all profiles the recipe will be associated with */ + ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES); + LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry, + list_entry) { + ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); + ice_set_bit((u16)fvit->profile_id, profiles); + } + + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, lkup_exts); + if (status) + goto err_free_lkup_exts; + + /* Look for a recipe which matches our requested fv / mask list */ + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); + if (*rid < ICE_MAX_NUM_RECIPES) + /* Success if found a recipe that match the existing criteria */ + goto err_unroll; + + rm->tun_type = rinfo->tun_type; + /* Recipe we need does not exist, add a recipe */ + status = ice_add_sw_recipe(hw, rm, match_tun, profiles); + if (status) + goto err_unroll; + + /* Associate all the recipes created with all the profiles in the + * common field vector. + */ + LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry, + list_entry) { + ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES); + u16 j; + + status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, NULL); + if (status) + goto err_unroll; + + ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap, + ICE_MAX_NUM_RECIPES); + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + goto err_unroll; + + status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, + (u8 *)r_bitmap, + NULL); + ice_release_change_lock(hw); + + if (status) + goto err_unroll; + + /* Update profile to recipe bitmap array */ + ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap, + ICE_MAX_NUM_RECIPES); + + /* Update recipe to profile bitmap array */ + for (j = 0; j < ICE_MAX_NUM_RECIPES; j++) + if (ice_is_bit_set(r_bitmap, j)) + ice_set_bit((u16)fvit->profile_id, + recipe_to_profile[j]); + } + + *rid = rm->root_rid; + ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, + lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA); +err_unroll: + LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list, + ice_recp_grp_entry, l_entry) { + LIST_DEL(&r_entry->l_entry); + ice_free(hw, r_entry); + } + + LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry, + list_entry) { + LIST_DEL(&fvit->list_entry); + ice_free(hw, fvit); + } + + if (rm->root_buf) + ice_free(hw, rm->root_buf); + + ice_free(hw, rm); + +err_free_lkup_exts: + ice_free(hw, lkup_exts); + + return status; +} + +/** + * ice_find_dummy_packet - find dummy packet by tunnel type + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @tun_type: tunnel type from the match criteria + * @pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: pointer to receive the pointer to the offsets for the packet + */ +static void +ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + enum ice_sw_tunnel_type tun_type, const u8 **pkt, + u16 *pkt_len, + const struct ice_dummy_pkt_offsets **offsets) +{ + bool tcp = false, udp = false, ipv6 = false, vlan = false; + bool gre = false; + u16 i; + + for (i = 0; i < lkups_cnt; i++) { + if (lkups[i].type == ICE_UDP_ILOS) + udp = true; + else if (lkups[i].type == ICE_TCP_IL) + tcp = true; + else if (lkups[i].type == ICE_IPV6_OFOS) + ipv6 = true; + else if (lkups[i].type == ICE_VLAN_OFOS) + vlan = true; + else if (lkups[i].type == ICE_IPV4_OFOS && + lkups[i].h_u.ipv4_hdr.protocol == + ICE_IPV4_NVGRE_PROTO_ID && + lkups[i].m_u.ipv4_hdr.protocol == + 0xFF) + gre = true; + else if (lkups[i].type == ICE_PPPOE && + lkups[i].h_u.pppoe_hdr.ppp_prot_id == + CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) && + lkups[i].m_u.pppoe_hdr.ppp_prot_id == + 0xFFFF) + ipv6 = true; + else if (lkups[i].type == ICE_ETYPE_OL && + lkups[i].h_u.ethertype.ethtype_id == + CPU_TO_BE16(ICE_IPV6_ETHER_ID) && + lkups[i].m_u.ethertype.ethtype_id == + 0xFFFF) + ipv6 = true; + } + + if (tun_type == ICE_SW_TUN_IPV4_ESP) { + *pkt = dummy_ipv4_esp_pkt; + *pkt_len = sizeof(dummy_ipv4_esp_pkt); + *offsets = dummy_ipv4_esp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV6_ESP) { + *pkt = dummy_ipv6_esp_pkt; + *pkt_len = sizeof(dummy_ipv6_esp_pkt); + *offsets = dummy_ipv6_esp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV4_AH) { + *pkt = dummy_ipv4_ah_pkt; + *pkt_len = sizeof(dummy_ipv4_ah_pkt); + *offsets = dummy_ipv4_ah_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV6_AH) { + *pkt = dummy_ipv6_ah_pkt; + *pkt_len = sizeof(dummy_ipv6_ah_pkt); + *offsets = dummy_ipv6_ah_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV4_NAT_T) { + *pkt = dummy_ipv4_nat_pkt; + *pkt_len = sizeof(dummy_ipv4_nat_pkt); + *offsets = dummy_ipv4_nat_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV6_NAT_T) { + *pkt = dummy_ipv6_nat_pkt; + *pkt_len = sizeof(dummy_ipv6_nat_pkt); + *offsets = dummy_ipv6_nat_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) { + *pkt = dummy_ipv4_l2tpv3_pkt; + *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt); + *offsets = dummy_ipv4_l2tpv3_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) { + *pkt = dummy_ipv6_l2tpv3_pkt; + *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt); + *offsets = dummy_ipv6_l2tpv3_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_GTP) { + *pkt = dummy_udp_gtp_packet; + *pkt_len = sizeof(dummy_udp_gtp_packet); + *offsets = dummy_udp_gtp_packet_offsets; + return; + } + if (tun_type == ICE_SW_TUN_PPPOE && ipv6) { + *pkt = dummy_pppoe_ipv6_packet; + *pkt_len = sizeof(dummy_pppoe_ipv6_packet); + *offsets = dummy_pppoe_packet_offsets; + return; + } else if (tun_type == ICE_SW_TUN_PPPOE) { + *pkt = dummy_pppoe_ipv4_packet; + *pkt_len = sizeof(dummy_pppoe_ipv4_packet); + *offsets = dummy_pppoe_packet_offsets; + return; + } + + if (tun_type == ICE_ALL_TUNNELS) { + *pkt = dummy_gre_udp_packet; + *pkt_len = sizeof(dummy_gre_udp_packet); + *offsets = dummy_gre_udp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_NVGRE || gre) { + if (tcp) { + *pkt = dummy_gre_tcp_packet; + *pkt_len = sizeof(dummy_gre_tcp_packet); + *offsets = dummy_gre_tcp_packet_offsets; + return; + } + + *pkt = dummy_gre_udp_packet; + *pkt_len = sizeof(dummy_gre_udp_packet); + *offsets = dummy_gre_udp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE || + tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) { + if (tcp) { + *pkt = dummy_udp_tun_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_tcp_packet); + *offsets = dummy_udp_tun_tcp_packet_offsets; + return; + } + + *pkt = dummy_udp_tun_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_udp_packet); + *offsets = dummy_udp_tun_udp_packet_offsets; + return; + } + + if (udp && !ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_packet; + *pkt_len = sizeof(dummy_vlan_udp_packet); + *offsets = dummy_vlan_udp_packet_offsets; + return; + } + *pkt = dummy_udp_packet; + *pkt_len = sizeof(dummy_udp_packet); + *offsets = dummy_udp_packet_offsets; + return; + } else if (udp && ipv6) { + if (vlan) { + *pkt = dummy_vlan_udp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); + *offsets = dummy_vlan_udp_ipv6_packet_offsets; + return; + } + *pkt = dummy_udp_ipv6_packet; + *pkt_len = sizeof(dummy_udp_ipv6_packet); + *offsets = dummy_udp_ipv6_packet_offsets; + return; + } else if ((tcp && ipv6) || ipv6) { + if (vlan) { + *pkt = dummy_vlan_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); + *offsets = dummy_vlan_tcp_ipv6_packet_offsets; + return; + } + *pkt = dummy_tcp_ipv6_packet; + *pkt_len = sizeof(dummy_tcp_ipv6_packet); + *offsets = dummy_tcp_ipv6_packet_offsets; + return; + } + + if (vlan) { + *pkt = dummy_vlan_tcp_packet; + *pkt_len = sizeof(dummy_vlan_tcp_packet); + *offsets = dummy_vlan_tcp_packet_offsets; + } else { + *pkt = dummy_tcp_packet; + *pkt_len = sizeof(dummy_tcp_packet); + *offsets = dummy_tcp_packet_offsets; + } +} + +/** + * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria + * + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @s_rule: stores rule information from the match criteria + * @dummy_pkt: dummy packet to fill according to filter match criteria + * @pkt_len: packet length of dummy packet + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + struct ice_aqc_sw_rules_elem *s_rule, + const u8 *dummy_pkt, u16 pkt_len, + const struct ice_dummy_pkt_offsets *offsets) +{ + u8 *pkt; + u16 i; + + /* Start with a packet with a pre-defined/dummy content. Then, fill + * in the header values to be looked up or matched. + */ + pkt = s_rule->pdata.lkup_tx_rx.hdr; + + ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA); + + for (i = 0; i < lkups_cnt; i++) { + enum ice_protocol_type type; + u16 offset = 0, len = 0, j; + bool found = false; + + /* find the start of this layer; it should be found since this + * was already checked when search for the dummy packet + */ + type = lkups[i].type; + for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { + if (type == offsets[j].type) { + offset = offsets[j].offset; + found = true; + break; + } + } + /* this should never happen in a correct calling sequence */ + if (!found) + return ICE_ERR_PARAM; + + switch (lkups[i].type) { + case ICE_MAC_OFOS: + case ICE_MAC_IL: + len = sizeof(struct ice_ether_hdr); + break; + case ICE_ETYPE_OL: + len = sizeof(struct ice_ethtype_hdr); + break; + case ICE_VLAN_OFOS: + len = sizeof(struct ice_vlan_hdr); + break; + case ICE_IPV4_OFOS: + case ICE_IPV4_IL: + len = sizeof(struct ice_ipv4_hdr); + break; + case ICE_IPV6_OFOS: + case ICE_IPV6_IL: + len = sizeof(struct ice_ipv6_hdr); + break; + case ICE_TCP_IL: + case ICE_UDP_OF: + case ICE_UDP_ILOS: + len = sizeof(struct ice_l4_hdr); + break; + case ICE_SCTP_IL: + len = sizeof(struct ice_sctp_hdr); + break; + case ICE_NVGRE: + len = sizeof(struct ice_nvgre); + break; + case ICE_VXLAN: + case ICE_GENEVE: + case ICE_VXLAN_GPE: + len = sizeof(struct ice_udp_tnl_hdr); + break; + + case ICE_GTP: + len = sizeof(struct ice_udp_gtp_hdr); + break; + case ICE_PPPOE: + len = sizeof(struct ice_pppoe_hdr); + break; + case ICE_ESP: + len = sizeof(struct ice_esp_hdr); + break; + case ICE_NAT_T: + len = sizeof(struct ice_nat_t_hdr); + break; + case ICE_AH: + len = sizeof(struct ice_ah_hdr); + break; + case ICE_L2TPV3: + len = sizeof(struct ice_l2tpv3_sess_hdr); + break; + default: + return ICE_ERR_PARAM; + } + + /* the length should be a word multiple */ + if (len % ICE_BYTES_PER_WORD) + return ICE_ERR_CFG; + + /* We have the offset to the header start, the length, the + * caller's header values and mask. Use this information to + * copy the data into the dummy packet appropriately based on + * the mask. Note that we need to only write the bits as + * indicated by the mask to make sure we don't improperly write + * over any significant packet data. + */ + for (j = 0; j < len / sizeof(u16); j++) + if (((u16 *)&lkups[i].m_u)[j]) + ((u16 *)(pkt + offset))[j] = + (((u16 *)(pkt + offset))[j] & + ~((u16 *)&lkups[i].m_u)[j]) | + (((u16 *)&lkups[i].h_u)[j] & + ((u16 *)&lkups[i].m_u)[j]); + } + + s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len); + + return ICE_SUCCESS; +} + +/** + * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port + * @hw: pointer to the hardware structure + * @tun_type: tunnel type + * @pkt: dummy packet to fill in + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, + u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) +{ + u16 open_port, i; + + switch (tun_type) { + case ICE_SW_TUN_AND_NON_TUN: + case ICE_SW_TUN_VXLAN_GPE: + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_UDP: + if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port)) + return ICE_ERR_CFG; + break; + + case ICE_SW_TUN_GENEVE: + if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port)) + return ICE_ERR_CFG; + break; + + default: + /* Nothing needs to be done for this tunnel type */ + return ICE_SUCCESS; + } + + /* Find the outer UDP protocol header and insert the port number */ + for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { + if (offsets[i].type == ICE_UDP_OF) { + struct ice_l4_hdr *hdr; + u16 offset; + + offset = offsets[i].offset; + hdr = (struct ice_l4_hdr *)&pkt[offset]; + hdr->dst_port = CPU_TO_BE16(open_port); + + return ICE_SUCCESS; + } + } + + return ICE_ERR_CFG; +} + +/** + * ice_find_adv_rule_entry - Search a rule entry + * @hw: pointer to the hardware structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @lkups_cnt: number of protocols + * @recp_id: recipe ID for which we are finding the rule + * @rinfo: other information regarding the rule e.g. priority and action info + * + * Helper function to search for a given advance rule entry + * Returns pointer to entry storing the rule if found + */ +static struct ice_adv_fltr_mgmt_list_entry * +ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, u16 recp_id, + struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_switch_info *sw = hw->switch_info; + int i; + + LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules, + ice_adv_fltr_mgmt_list_entry, list_entry) { + bool lkups_matched = true; + + if (lkups_cnt != list_itr->lkups_cnt) + continue; + for (i = 0; i < list_itr->lkups_cnt; i++) + if (memcmp(&list_itr->lkups[i], &lkups[i], + sizeof(*lkups))) { + lkups_matched = false; + break; + } + if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && + rinfo->tun_type == list_itr->rule_info.tun_type && + lkups_matched) + return list_itr; + } + return NULL; +} + +/** + * ice_adv_add_update_vsi_list + * @hw: pointer to the hardware structure + * @m_entry: pointer to current adv filter management list entry + * @cur_fltr: filter information from the book keeping entry + * @new_fltr: filter information with the new VSI to be added + * + * Call AQ command to add or update previously created VSI list with new VSI. + * + * Helper function to do book keeping associated with adding filter information + * The algorithm to do the booking keeping is described below : + * When a VSI needs to subscribe to a given advanced filter + * if only one VSI has been added till now + * Allocate a new VSI list and add two VSIs + * to this list using switch rule command + * Update the previously created switch rule with the + * newly created VSI list ID + * if a VSI list was previously created + * Add the new VSI to the previously created VSI list set + * using the update switch rule command + */ +static enum ice_status +ice_adv_add_update_vsi_list(struct ice_hw *hw, + struct ice_adv_fltr_mgmt_list_entry *m_entry, + struct ice_adv_rule_info *cur_fltr, + struct ice_adv_rule_info *new_fltr) +{ + enum ice_status status; + u16 vsi_list_id = 0; + + if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || + cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) + return ICE_ERR_NOT_IMPL; + + if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || + new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && + (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) + return ICE_ERR_NOT_IMPL; + + /* Workaround fix for unexpected rule deletion by kernel PF + * during VF reset. + */ + if (new_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI && + cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI) + return ICE_ERR_NOT_IMPL; + + if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { + /* Only one entry existed in the mapping and it was not already + * a part of a VSI list. So, create a VSI list with the old and + * new VSIs. + */ + struct ice_fltr_info tmp_fltr; + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ + if (cur_fltr->sw_act.fwd_id.hw_vsi_id == + new_fltr->sw_act.fwd_id.hw_vsi_id) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; + vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; + status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, + &vsi_list_id, + ICE_SW_LKUP_LAST); + if (status) + return status; + + ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM); + tmp_fltr.flag = m_entry->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; + + /* Update the previous switch rule of "forward to VSI" to + * "fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) + return status; + + cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; + cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; + m_entry->vsi_list_info = + ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, + vsi_list_id); + } else { + u16 vsi_handle = new_fltr->sw_act.vsi_handle; + + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) + return ICE_SUCCESS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in + */ + vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; + + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, + vsi_list_id, false, + ice_aqc_opc_update_sw_rules, + ICE_SW_LKUP_LAST); + /* update VSI list mapping info with new VSI ID */ + if (!status) + ice_set_bit(vsi_handle, + m_entry->vsi_list_info->vsi_map); + } + if (!status) + m_entry->vsi_count++; + return status; +} + +/** + * ice_add_adv_rule - helper function to create an advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: other information related to the rule that needs to be programmed + * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be + * ignored is case of error. + * + * This function can program only 1 rule at a time. The lkups is used to + * describe the all the words that forms the "lookup" portion of the recipe. + * These words can span multiple protocols. Callers to this function need to + * pass in a list of protocol headers with lookup information along and mask + * that determines which words are valid from the given protocol header. + * rinfo describes other information related to this rule such as forwarding + * IDs, priority of this rule, etc. + */ +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; + u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; + const struct ice_dummy_pkt_offsets *pkt_offsets; + struct ice_aqc_sw_rules_elem *s_rule = NULL; + struct LIST_HEAD_TYPE *rule_head; + struct ice_switch_info *sw; + enum ice_status status; + const u8 *pkt = NULL; + bool prof_rule; + u16 word_cnt; + u32 act = 0; + u8 q_rgn; + + /* Initialize profile to result index bitmap */ + if (!hw->switch_info->prof_res_bm_init) { + hw->switch_info->prof_res_bm_init = 1; + ice_init_prof_result_bm(hw); + } + + prof_rule = ice_is_prof_rule(rinfo->tun_type); + if (!prof_rule && !lkups_cnt) + return ICE_ERR_PARAM; + + /* get # of words we need to match */ + word_cnt = 0; + for (i = 0; i < lkups_cnt; i++) { + u16 j, *ptr; + + ptr = (u16 *)&lkups[i].m_u; + for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) + if (ptr[j] != 0) + word_cnt++; + } + + if (prof_rule) { + if (word_cnt > ICE_MAX_CHAIN_WORDS) + return ICE_ERR_PARAM; + } else { + if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) + return ICE_ERR_PARAM; + } + + /* make sure that we can locate a dummy packet */ + ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, + &pkt_offsets); + if (!pkt) { + status = ICE_ERR_PARAM; + goto err_ice_add_adv_rule; + } + + if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || + rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || + rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || + rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) + return ICE_ERR_CFG; + + vsi_handle = rinfo->sw_act.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + rinfo->sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (rinfo->sw_act.flag & ICE_FLTR_TX) + rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); + + status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); + if (status) + return status; + m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + if (m_entry) { + /* we have to add VSI to VSI_LIST and increment vsi_count. + * Also Update VSI list so that we can change forwarding rule + * if the rule already exists, we will check if it exists with + * same vsi_id, if not then add it to the VSI list if it already + * exists if not then create a VSI list and add the existing VSI + * ID and the new VSI ID to the list + * We will add that VSI to the list + */ + status = ice_adv_add_update_vsi_list(hw, m_entry, + &m_entry->rule_info, + rinfo); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = m_entry->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } + return status; + } + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + act |= ICE_SINGLE_ACT_LAN_ENABLE; + switch (rinfo->sw_act.fltr_act) { + case ICE_FWD_TO_VSI: + act |= (rinfo->sw_act.fwd_id.hw_vsi_id << + ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; + break; + case ICE_FWD_TO_Q: + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + break; + case ICE_FWD_TO_QGRP: + q_rgn = rinfo->sw_act.qgrp_size > 0 ? + (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0; + act |= ICE_SINGLE_ACT_TO_Q; + act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & + ICE_SINGLE_ACT_Q_REGION_M; + break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; + default: + status = ICE_ERR_CFG; + goto err_ice_add_adv_rule; + } + + /* set the rule LOOKUP type based on caller specified 'RX' + * instead of hardcoding it to be either LOOKUP_TX/RX + * + * for 'RX' set the source to be the port number + * for 'TX' set the source to be the source HW VSI number (determined + * by caller) + */ + if (rinfo->rx) { + s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->pdata.lkup_tx_rx.src = + CPU_TO_LE16(hw->port_info->lport); + } else { + s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src); + } + + s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid); + s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act); + + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, + pkt_len, pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + + if (rinfo->tun_type != ICE_NON_TUN && + rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { + status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, + s_rule->pdata.lkup_tx_rx.hdr, + pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + } + + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, + NULL); + if (status) + goto err_ice_add_adv_rule; + adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *) + ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry)); + if (!adv_fltr) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups = (struct ice_adv_lkup_elem *) + ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups), + ICE_NONDMA_TO_NONDMA); + if (!adv_fltr->lkups && !prof_rule) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_adv_rule; + } + + adv_fltr->lkups_cnt = lkups_cnt; + adv_fltr->rule_info = *rinfo; + adv_fltr->rule_info.fltr_rule_id = + LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); + sw = hw->switch_info; + sw->recp_list[rid].adv_rule = true; + rule_head = &sw->recp_list[rid].filt_rules; + + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + adv_fltr->vsi_count = 1; + + /* Add rule entry to book keeping list */ + LIST_ADD(&adv_fltr->list_entry, rule_head); + if (added_entry) { + added_entry->rid = rid; + added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; + added_entry->vsi_handle = rinfo->sw_act.vsi_handle; + } +err_ice_add_adv_rule: + if (status && adv_fltr) { + ice_free(hw, adv_fltr->lkups); + ice_free(hw, adv_fltr); + } + + ice_free(hw, s_rule); + + return status; +} + +/** + * ice_adv_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, + struct ice_adv_fltr_mgmt_list_entry *fm_list) +{ + struct ice_vsi_list_map_info *vsi_list_info; + enum ice_sw_lkup_type lkup_type; + enum ice_status status; + u16 vsi_list_id; + + if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = ICE_SW_LKUP_LAST; + vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; + status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); + vsi_list_info = fm_list->vsi_list_info; + if (fm_list->vsi_count == 1) { + struct ice_fltr_info tmp_fltr; + u16 rem_vsi_handle; + + rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, rem_vsi_handle)) + return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ + status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM); + tmp_fltr.flag = fm_list->rule_info.sw_act.flag; + tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; + fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + + /* Update the previous switch rule of "MAC forward to VSI" to + * "MAC fwd to VSI list" + */ + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr.fwd_id.hw_vsi_id, status); + return status; + } + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); + return status; + } + + LIST_DEL(&vsi_list_info->list_entry); + ice_free(hw, vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_rem_adv_rule - removes existing advanced switch rule + * @hw: pointer to the hardware structure + * @lkups: information on the words that needs to be looked up. All words + * together makes one recipe + * @lkups_cnt: num of entries in the lkups array + * @rinfo: Its the pointer to the rule information for the rule + * + * This function can be used to remove 1 rule at a time. The lkups is + * used to describe all the words that forms the "lookup" portion of the + * rule. These words can span multiple protocols. Callers to this function + * need to pass in a list of protocol headers with lookup information along + * and mask that determines which words are valid from the given protocol + * header. rinfo describes other information related to this rule such as + * forwarding IDs, priority of this rule, etc. + */ +enum ice_status +ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo) +{ + struct ice_adv_fltr_mgmt_list_entry *list_elem; + struct ice_prot_lkup_ext lkup_exts; + struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = ICE_SUCCESS; + bool remove_rule = false; + u16 i, rid, vsi_handle; + + ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM); + for (i = 0; i < lkups_cnt; i++) { + u16 count; + + if (lkups[i].type >= ICE_PROTOCOL_LAST) + return ICE_ERR_CFG; + + count = ice_fill_valid_words(&lkups[i], &lkup_exts); + if (!count) + return ICE_ERR_CFG; + } + + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, &lkup_exts); + if (status) + return status; + + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); + /* If did not find a recipe that match the existing criteria */ + if (rid == ICE_MAX_NUM_RECIPES) + return ICE_ERR_PARAM; + + rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; + list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); + /* the rule is already removed */ + if (!list_elem) + return ICE_SUCCESS; + ice_acquire_lock(rule_lock); + if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else if (list_elem->vsi_count > 1) { + list_elem->vsi_list_info->ref_cnt--; + remove_rule = false; + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + } else { + vsi_handle = rinfo->sw_act.vsi_handle; + status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); + if (status) { + ice_release_lock(rule_lock); + return status; + } + if (list_elem->vsi_count == 0) + remove_rule = true; + } + ice_release_lock(rule_lock); + if (remove_rule) { + struct ice_aqc_sw_rules_elem *s_rule; + u16 rule_buf_sz; + + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = + (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, + rule_buf_sz); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + s_rule->pdata.lkup_tx_rx.act = 0; + s_rule->pdata.lkup_tx_rx.index = + CPU_TO_LE16(list_elem->rule_info.fltr_rule_id); + s_rule->pdata.lkup_tx_rx.hdr_len = 0; + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (status == ICE_SUCCESS) { + ice_acquire_lock(rule_lock); + LIST_DEL(&list_elem->list_entry); + ice_free(hw, list_elem->lkups); + ice_free(hw, list_elem); + ice_release_lock(rule_lock); + } + ice_free(hw, s_rule); + } + return status; +} + +/** + * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID + * @hw: pointer to the hardware structure + * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID + * + * This function is used to remove 1 rule at a time. The removal is based on + * the remove_entry parameter. This function will remove rule for a given + * vsi_handle with a given rule_id which is passed as parameter in remove_entry + */ +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct LIST_HEAD_TYPE *list_head; + struct ice_adv_rule_info rinfo; + struct ice_switch_info *sw; + + sw = hw->switch_info; + if (!sw->recp_list[remove_entry->rid].recp_created) + return ICE_ERR_PARAM; + list_head = &sw->recp_list[remove_entry->rid].filt_rules; + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, + list_entry) { + if (list_itr->rule_info.fltr_rule_id == + remove_entry->rule_id) { + rinfo = list_itr->rule_info; + rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; + return ice_rem_adv_rule(hw, list_itr->lkups, + list_itr->lkups_cnt, &rinfo); + } + } + return ICE_ERR_PARAM; +} + +/** + * ice_rem_adv_for_vsi - removes existing advanced switch rules for a + * given VSI handle + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle for which we are supposed to remove all the rules. + * + * This function is used to remove all the rules for a given VSI and as soon + * as removing a rule fails, it will return immediately with the error code, + * else it will return ICE_SUCCESS + */ +enum ice_status +ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_vsi_list_map_info *map_info; + struct LIST_HEAD_TYPE *list_head; + struct ice_adv_rule_info rinfo; + struct ice_switch_info *sw; + enum ice_status status; + u16 vsi_list_id = 0; + u8 rid; + + sw = hw->switch_info; + for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { + if (!sw->recp_list[rid].recp_created) + continue; + if (!sw->recp_list[rid].adv_rule) + continue; + list_head = &sw->recp_list[rid].filt_rules; + map_info = NULL; + LIST_FOR_EACH_ENTRY(list_itr, list_head, + ice_adv_fltr_mgmt_list_entry, list_entry) { + map_info = ice_find_vsi_list_entry(&sw->recp_list[rid], + vsi_handle, + &vsi_list_id); + if (!map_info) + continue; + rinfo = list_itr->rule_info; + rinfo.sw_act.vsi_handle = vsi_handle; + status = ice_rem_adv_rule(hw, list_itr->lkups, + list_itr->lkups_cnt, &rinfo); + if (status) + return status; + map_info = NULL; + } + } + return ICE_SUCCESS; +} + +/** + * ice_replay_fltr - Replay all the filters stored by a specific list head + * @hw: pointer to the hardware structure + * @list_head: list for which filters needs to be replayed + * @recp_id: Recipe ID for which rules need to be replayed + */ +static enum ice_status +ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *recp_list; + u8 lport = hw->port_info->lport; + struct LIST_HEAD_TYPE l_head; + + if (LIST_EMPTY(list_head)) + return status; + + recp_list = &hw->switch_info->recp_list[recp_id]; + /* Move entries from the given list_head to a temporary l_head so that + * they can be replayed. Otherwise when trying to re-add the same + * filter, the function will return already exists + */ + LIST_REPLACE_INIT(list_head, &l_head); + + /* Mark the given list_head empty by reinitializing it so filters + * could be added again by *handler + */ + LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry, + list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) { + status = ice_add_rule_internal(hw, recp_list, lport, + &f_entry); + if (status != ICE_SUCCESS) + goto end; + continue; + } + + /* Add a filter per VSI separately */ + while (1) { + u16 vsi_handle; + + vsi_handle = + ice_find_first_bit(itr->vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (!ice_is_vsi_valid(hw, vsi_handle)) + break; + + ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + if (recp_id == ICE_SW_LKUP_VLAN) + status = ice_add_vlan_internal(hw, recp_list, + &f_entry); + else + status = ice_add_rule_internal(hw, recp_list, + lport, + &f_entry); + if (status != ICE_SUCCESS) + goto end; + } + } +end: + /* Clear the filter management list */ + ice_rem_sw_rule_info(hw, &l_head); + return status; +} + +/** + * ice_replay_all_fltr - replay all filters stored in bookkeeping lists + * @hw: pointer to the hardware structure + * + * NOTE: This function does not clean up partially added filters on error. + * It is up to caller of the function to issue a reset or fail early. + */ +enum ice_status ice_replay_all_fltr(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status = ICE_SUCCESS; + u8 i; + + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules; + + status = ice_replay_fltr(hw, i, head); + if (status != ICE_SUCCESS) + return status; + } + return status; +} + +/** + * ice_replay_vsi_fltr - Replay filters for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @recp_id: Recipe ID for which rules need to be replayed + * @list_head: list for which filters need to be replayed + * + * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. + * It is required to pass valid VSI handle. + */ +static enum ice_status +ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, + struct LIST_HEAD_TYPE *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *recp_list; + u16 hw_vsi_id; + + if (LIST_EMPTY(list_head)) + return status; + recp_list = &hw->switch_info->recp_list[recp_id]; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry, + list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && + itr->fltr_info.vsi_handle == vsi_handle) { + /* update the src in case it is VSI num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + status = ice_add_rule_internal(hw, recp_list, + hw->port_info->lport, + &f_entry); + if (status != ICE_SUCCESS) + goto end; + continue; + } + if (!itr->vsi_list_info || + !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle)) + continue; + /* Clearing it so that the logic can add it back */ + ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + /* update the src in case it is VSI num */ + if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) + f_entry.fltr_info.src = hw_vsi_id; + if (recp_id == ICE_SW_LKUP_VLAN) + status = ice_add_vlan_internal(hw, recp_list, &f_entry); + else + status = ice_add_rule_internal(hw, recp_list, + hw->port_info->lport, + &f_entry); + if (status != ICE_SUCCESS) + goto end; + } +end: + return status; +} + +/** + * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @list_head: list for which filters need to be replayed + * + * Replay the advanced rule for the given VSI. + */ +static enum ice_status +ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, + struct LIST_HEAD_TYPE *list_head) +{ + struct ice_rule_query_data added_entry = { 0 }; + struct ice_adv_fltr_mgmt_list_entry *adv_fltr; + enum ice_status status = ICE_SUCCESS; + + if (LIST_EMPTY(list_head)) + return status; + LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry, + list_entry) { + struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; + u16 lk_cnt = adv_fltr->lkups_cnt; + + if (vsi_handle != rinfo->sw_act.vsi_handle) + continue; + status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, + &added_entry); + if (status) + break; + } + return status; +} + +/** + * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * + * Replays filters for requested VSI via vsi_handle. + */ +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status; + u8 i; + + /* Update the recipes that were created */ + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct LIST_HEAD_TYPE *head; + + head = &sw->recp_list[i].filt_replay_rules; + if (!sw->recp_list[i].adv_rule) + status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); + else + status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); + if (status != ICE_SUCCESS) + return status; + } + + return ICE_SUCCESS; +} + +/** + * ice_rm_all_sw_replay_rule_info - deletes filter replay rules + * @hw: pointer to the HW struct + * + * Deletes the filter replay rules. + */ +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + if (!sw) + return; + + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) { + struct LIST_HEAD_TYPE *l_head; + + l_head = &sw->recp_list[i].filt_replay_rules; + if (!sw->recp_list[i].adv_rule) + ice_rem_sw_rule_info(hw, l_head); + else + ice_rem_adv_rule_info(hw, l_head); + } + } +} diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_switch.h b/src/spdk/dpdk/drivers/net/ice/base/ice_switch.h new file mode 100644 index 000000000..6bd50518f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_switch.h @@ -0,0 +1,492 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_SWITCH_H_ +#define _ICE_SWITCH_H_ + +#include "ice_common.h" +#include "ice_protocol_type.h" + +#define ICE_SW_CFG_MAX_BUF_LEN 2048 +#define ICE_MAX_SW 256 +#define ICE_DFLT_VSI_INVAL 0xff +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) + +/* Switch Profile IDs for Profile related switch rules */ +#define ICE_PROFID_IPV4_ESP 71 +#define ICE_PROFID_IPV6_ESP 72 +#define ICE_PROFID_IPV4_AH 73 +#define ICE_PROFID_IPV6_AH 74 +#define ICE_PROFID_IPV4_NAT_T 75 +#define ICE_PROFID_IPV6_NAT_T 76 +#define ICE_PROFID_MAC_IPV4_L2TPV3 77 +#define ICE_PROFID_MAC_IPV6_L2TPV3 78 +#define ICE_PROFID_IPV4_PFCP_NODE 79 +#define ICE_PROFID_IPV4_PFCP_SESSION 80 +#define ICE_PROFID_IPV6_PFCP_NODE 81 +#define ICE_PROFID_IPV6_PFCP_SESSION 82 + +#define DUMMY_ETH_HDR_LEN 16 +#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1) +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lkup_rx_tx) - 1) +#define ICE_SW_RULE_LG_ACT_SIZE(n) \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_lg_act) - \ + sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \ + ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) +#define ICE_SW_RULE_VSI_LIST_SIZE(n) \ + (sizeof(struct ice_aqc_sw_rules_elem) - \ + sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ + sizeof(struct ice_sw_rule_vsi_list) - \ + sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \ + ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) + +/* Worst case buffer length for ice_aqc_opc_get_res_alloc */ +#define ICE_MAX_RES_TYPES 0x80 +#define ICE_AQ_GET_RES_ALLOC_BUF_LEN \ + (ICE_MAX_RES_TYPES * sizeof(struct ice_aqc_get_res_resp_elem)) + +#define ICE_VSI_INVAL_ID 0xFFFF +#define ICE_INVAL_Q_HANDLE 0xFFFF + +/* VSI context structure for add/get/update/free operations */ +struct ice_vsi_ctx { + u16 vsi_num; + u16 vsis_allocd; + u16 vsis_unallocated; + u16 flags; + struct ice_aqc_vsi_props info; + struct ice_sched_vsi_info sched; + u8 alloc_from_pool; + u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; + struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; +}; + +/* This is to be used by add/update mirror rule Admin Queue command */ +struct ice_mir_rule_buf { + u16 vsi_idx; /* VSI index */ + + /* For each VSI, user can specify whether corresponding VSI + * should be added/removed to/from mirror rule + * + * add mirror rule: this should always be TRUE. + * update mirror rule: add(true) or remove(false) VSI to/from + * mirror rule + */ + u8 add; +}; + +/* Switch recipe ID enum values are specific to hardware */ +enum ice_sw_lkup_type { + ICE_SW_LKUP_ETHERTYPE = 0, + ICE_SW_LKUP_MAC = 1, + ICE_SW_LKUP_MAC_VLAN = 2, + ICE_SW_LKUP_PROMISC = 3, + ICE_SW_LKUP_VLAN = 4, + ICE_SW_LKUP_DFLT = 5, + ICE_SW_LKUP_ETHERTYPE_MAC = 8, + ICE_SW_LKUP_PROMISC_VLAN = 9, + ICE_SW_LKUP_LAST +}; + +/* type of filter src ID */ +enum ice_src_id { + ICE_SRC_ID_UNKNOWN = 0, + ICE_SRC_ID_VSI, + ICE_SRC_ID_QUEUE, + ICE_SRC_ID_LPORT, +}; + +struct ice_fltr_info { + /* Look up information: how to look up packet */ + enum ice_sw_lkup_type lkup_type; + /* Forward action: filter action to do after lookup */ + enum ice_sw_fwd_act_type fltr_act; + /* rule ID returned by firmware once filter rule is created */ + u16 fltr_rule_id; + u16 flag; + + /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ + u16 src; + enum ice_src_id src_id; + + union { + struct { + u8 mac_addr[ETH_ALEN]; + } mac; + struct { + u8 mac_addr[ETH_ALEN]; + u16 vlan_id; + } mac_vlan; + struct { + u16 vlan_id; + } vlan; + /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE + * if just using ethertype as filter. Set lkup_type as + * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be + * passed in as filter. + */ + struct { + u16 ethertype; + u8 mac_addr[ETH_ALEN]; /* optional */ + } ethertype_mac; + } l_data; /* Make sure to zero out the memory of l_data before using + * it or only set the data associated with lookup match + * rest everything should be zero + */ + + /* Depending on filter action */ + union { + /* queue ID in case of ICE_FWD_TO_Q and starting + * queue ID in case of ICE_FWD_TO_QGRP. + */ + u16 q_id:11; + u16 hw_vsi_id:10; + u16 vsi_id:10; + u16 vsi_list_id:10; + } fwd_id; + + /* Sw VSI handle */ + u16 vsi_handle; + + /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field + * determines the range of queues the packet needs to be forwarded to. + * Note that qgrp_size must be set to a power of 2. + */ + u8 qgrp_size; + + /* Rule creations populate these indicators basing on the switch type */ + u8 lb_en; /* Indicate if packet can be looped back */ + u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ +}; + +struct ice_adv_lkup_elem { + enum ice_protocol_type type; + union ice_prot_hdr h_u; /* Header values */ + union ice_prot_hdr m_u; /* Mask of header values to match */ +}; + +struct ice_sw_act_ctrl { + /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ + u16 src; + u16 flag; + enum ice_sw_fwd_act_type fltr_act; + /* Depending on filter action */ + union { + /* This is a queue ID in case of ICE_FWD_TO_Q and starting + * queue ID in case of ICE_FWD_TO_QGRP. + */ + u16 q_id:11; + u16 vsi_id:10; + u16 hw_vsi_id:10; + u16 vsi_list_id:10; + } fwd_id; + /* software VSI handle */ + u16 vsi_handle; + u8 qgrp_size; +}; + +struct ice_rule_query_data { + /* Recipe ID for which the requested rule was added */ + u16 rid; + /* Rule ID that was added or is supposed to be removed */ + u16 rule_id; + /* vsi_handle for which Rule was added or is supposed to be removed */ + u16 vsi_handle; +}; + +struct ice_adv_rule_info { + enum ice_sw_tunnel_type tun_type; + struct ice_sw_act_ctrl sw_act; + u32 priority; + u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ + u16 fltr_rule_id; +}; + +/* A collection of one or more four word recipe */ +struct ice_sw_recipe { + /* For a chained recipe the root recipe is what should be used for + * programming rules + */ + u8 is_root; + u8 root_rid; + u8 recp_created; + + /* Number of extraction words */ + u8 n_ext_words; + /* Protocol ID and Offset pair (extraction word) to describe the + * recipe + */ + struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; + u16 word_masks[ICE_MAX_CHAIN_WORDS]; + + /* if this recipe is a collection of other recipe */ + u8 big_recp; + + /* if this recipe is part of another bigger recipe then chain index + * corresponding to this recipe + */ + u8 chain_idx; + + /* if this recipe is a collection of other recipe then count of other + * recipes and recipe IDs of those recipes + */ + u8 n_grp_count; + + /* Bit map specifying the IDs associated with this group of recipe */ + ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES); + + enum ice_sw_tunnel_type tun_type; + + /* List of type ice_fltr_mgmt_list_entry or adv_rule */ + u8 adv_rule; + struct LIST_HEAD_TYPE filt_rules; + struct LIST_HEAD_TYPE filt_replay_rules; + + struct ice_lock filt_rule_lock; /* protect filter rule structure */ + + /* Profiles this recipe should be associated with */ + struct LIST_HEAD_TYPE fv_list; + + /* Profiles this recipe is associated with */ + u8 num_profs, *prof_ids; + + /* Possible result indexes are 44, 45, 46 and 47 */ +#define ICE_POSSIBLE_RES_IDX 0x0000F00000000000ULL + ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS); + + /* This allows user to specify the recipe priority. + * For now, this becomes 'fwd_priority' when recipe + * is created, usually recipes can have 'fwd' and 'join' + * priority. + */ + u8 priority; + + struct LIST_HEAD_TYPE rg_list; + + /* AQ buffer associated with this recipe */ + struct ice_aqc_recipe_data_elem *root_buf; + /* This struct saves the fv_words for a given lookup */ + struct ice_prot_lkup_ext lkup_exts; +}; + +/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */ +struct ice_vsi_list_map_info { + struct LIST_ENTRY_TYPE list_entry; + ice_declare_bitmap(vsi_map, ICE_MAX_VSI); + u16 vsi_list_id; + /* counter to track how many rules are reusing this VSI list */ + u16 ref_cnt; +}; + +struct ice_fltr_list_entry { + struct LIST_ENTRY_TYPE list_entry; + enum ice_status status; + struct ice_fltr_info fltr_info; +}; + +/* This defines an entry in the list that maintains MAC or VLAN membership + * to HW list mapping, since multiple VSIs can subscribe to the same MAC or + * VLAN. As an optimization the VSI list should be created only when a + * second VSI becomes a subscriber to the same MAC address. VSI lists are always + * used for VLAN membership. + */ +struct ice_fltr_mgmt_list_entry { + /* back pointer to VSI list ID to VSI list mapping */ + struct ice_vsi_list_map_info *vsi_list_info; + u16 vsi_count; +#define ICE_INVAL_LG_ACT_INDEX 0xffff + u16 lg_act_idx; +#define ICE_INVAL_SW_MARKER_ID 0xffff + u16 sw_marker_id; + struct LIST_ENTRY_TYPE list_entry; + struct ice_fltr_info fltr_info; +#define ICE_INVAL_COUNTER_ID 0xff + u8 counter_index; +}; + +struct ice_adv_fltr_mgmt_list_entry { + struct LIST_ENTRY_TYPE list_entry; + + struct ice_adv_lkup_elem *lkups; + struct ice_adv_rule_info rule_info; + u16 lkups_cnt; + struct ice_vsi_list_map_info *vsi_list_info; + u16 vsi_count; +}; + +enum ice_promisc_flags { + ICE_PROMISC_UCAST_RX = 0x1, + ICE_PROMISC_UCAST_TX = 0x2, + ICE_PROMISC_MCAST_RX = 0x4, + ICE_PROMISC_MCAST_TX = 0x8, + ICE_PROMISC_BCAST_RX = 0x10, + ICE_PROMISC_BCAST_TX = 0x20, + ICE_PROMISC_VLAN_RX = 0x40, + ICE_PROMISC_VLAN_TX = 0x80, +}; + +/* VSI related commands */ +enum ice_status +ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); +enum ice_status +ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); +enum ice_status +ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); +void ice_clear_all_vsi_ctx(struct ice_hw *hw); +enum ice_status +ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, + u16 count, struct ice_mir_rule_buf *mr_buf, + struct ice_sq_cd *cd, u16 *rule_id); +enum ice_status +ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, + u32 *ctl_bitmask); +enum ice_status +ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, + u32 ctl_bitmask); +/* Switch config */ +enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); + +enum ice_status +ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id); +enum ice_status +ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id); +enum ice_status +ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 *counter_id); +enum ice_status +ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, + u16 counter_id); + +/* Switch/bridge related commands */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); +enum ice_status +ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, + u16 *counter_id); +enum ice_status +ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id); +enum ice_status +ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf, + u16 buf_size, struct ice_sq_cd *cd); +enum ice_status +ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, + struct ice_aqc_get_allocd_res_desc_resp *buf, + u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, + struct ice_sq_cd *cd); +enum ice_status +ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list); +enum ice_status +ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list); +void ice_rem_all_sw_rules_info(struct ice_hw *hw); +enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst); +enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst); +enum ice_status +ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list); +enum ice_status +ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list); +enum ice_status +ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list); +enum ice_status +ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list); + +enum ice_status +ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, + u16 sw_marker); +enum ice_status +ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info); +void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); + +/* Promisc/defport setup for VSIs */ +enum ice_status +ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, + u8 direction); +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc); + +/* Get VSIs Promisc/defport settings */ +enum ice_status +ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, + u16 *vid); +enum ice_status +ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, + u16 *vid); + +enum ice_status +ice_aq_add_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 num_recipes, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_get_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd); +enum ice_status +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd); + +enum ice_status +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd); + +enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *recipe_id); +enum ice_status +ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, + struct ice_rule_query_data *added_entry); +enum ice_status +ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +enum ice_status +ice_rem_adv_rule_by_id(struct ice_hw *hw, + struct ice_rule_query_data *remove_entry); +enum ice_status +ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo); + +enum ice_status ice_replay_all_fltr(struct ice_hw *hw); + +enum ice_status +ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list); +u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); +bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); + +enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +bool ice_is_prof_rule(enum ice_sw_tunnel_type type); + +#endif /* _ICE_SWITCH_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/ice_type.h b/src/spdk/dpdk/drivers/net/ice/base/ice_type.h new file mode 100644 index 000000000..94ea44265 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/ice_type.h @@ -0,0 +1,1101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _ICE_TYPE_H_ +#define _ICE_TYPE_H_ + +#define ETH_ALEN 6 + +#define ETH_HEADER_LEN 14 + +#define BIT(a) (1UL << (a)) +#define BIT_ULL(a) (1ULL << (a)) + +#define BITS_PER_BYTE 8 + +#define _FORCE_ + +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 +#define ICE_MAX_TRAFFIC_CLASS 8 + +/** + * ROUND_UP - round up to next arbitrary multiple (not a power of 2) + * @a: value to round up + * @b: arbitrary multiple + * + * Round up to the next multiple of the arbitrary b. + * Note, when b is a power of 2 use ICE_ALIGN() instead. + */ +#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b))) + +#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b)) + +#define IS_ASCII(_ch) ((_ch) < 0x80) + +#define ice_struct_size(ptr, field, num) \ + (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num)) + +#include "ice_status.h" +#include "ice_hw_autogen.h" +#include "ice_devids.h" +#include "ice_osdep.h" +#include "ice_bitops.h" /* Must come before ice_controlq.h */ +#include "ice_controlq.h" +#include "ice_lan_tx_rx.h" +#include "ice_flex_type.h" +#include "ice_protocol_type.h" + +/** + * ice_is_pow2 - check if integer value is a power of 2 + * @val: unsigned integer to be validated + */ +static inline bool ice_is_pow2(u64 val) +{ + return (val && !(val & (val - 1))); +} + +/** + * ice_ilog2 - Calculates integer log base 2 of a number + * @n: number on which to perform operation + */ +static inline int ice_ilog2(u64 n) +{ + int i; + + for (i = 63; i >= 0; i--) + if (((u64)1 << i) & n) + return i; + + return -1; +} + +static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc) +{ + return ice_is_bit_set(&bitmap, tc); +} + +#define DIV_64BIT(n, d) ((n) / (d)) + +static inline u64 round_up_64bit(u64 a, u32 b) +{ + return DIV_64BIT(((a) + (b) / 2), (b)); +} + +static inline u32 ice_round_to_num(u32 N, u32 R) +{ + return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) : + ((((N) + (R) - 1) / (R)) * (R))); +} + +/* Driver always calls main vsi_handle first */ +#define ICE_MAIN_VSI_HANDLE 0 + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define ICE_MS_TO_GTIME(time) ((time) * 1000) + +/* Data type manipulation macros. */ +#define ICE_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) +#define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) +#define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) +#define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF)) + +/* debug masks - set these bits in hw->debug_mask to control output */ +#define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */ +#define ICE_DBG_INIT BIT_ULL(1) +#define ICE_DBG_RELEASE BIT_ULL(2) +#define ICE_DBG_FW_LOG BIT_ULL(3) +#define ICE_DBG_LINK BIT_ULL(4) +#define ICE_DBG_PHY BIT_ULL(5) +#define ICE_DBG_QCTX BIT_ULL(6) +#define ICE_DBG_NVM BIT_ULL(7) +#define ICE_DBG_LAN BIT_ULL(8) +#define ICE_DBG_FLOW BIT_ULL(9) +#define ICE_DBG_DCB BIT_ULL(10) +#define ICE_DBG_DIAG BIT_ULL(11) +#define ICE_DBG_FD BIT_ULL(12) +#define ICE_DBG_SW BIT_ULL(13) +#define ICE_DBG_SCHED BIT_ULL(14) + +#define ICE_DBG_PKG BIT_ULL(16) +#define ICE_DBG_RES BIT_ULL(17) +#define ICE_DBG_ACL BIT_ULL(18) +#define ICE_DBG_AQ_MSG BIT_ULL(24) +#define ICE_DBG_AQ_DESC BIT_ULL(25) +#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26) +#define ICE_DBG_AQ_CMD BIT_ULL(27) +#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \ + ICE_DBG_AQ_DESC | \ + ICE_DBG_AQ_DESC_BUF | \ + ICE_DBG_AQ_CMD) + +#define ICE_DBG_USER BIT_ULL(31) +#define ICE_DBG_ALL 0xFFFFFFFFFFFFFFFFULL + +#define __ALWAYS_UNUSED + +#define IS_ETHER_ADDR_EQUAL(addr1, addr2) \ + (((bool)((((u16 *)(addr1))[0] == ((u16 *)(addr2))[0]))) && \ + ((bool)((((u16 *)(addr1))[1] == ((u16 *)(addr2))[1]))) && \ + ((bool)((((u16 *)(addr1))[2] == ((u16 *)(addr2))[2])))) + +enum ice_aq_res_ids { + ICE_NVM_RES_ID = 1, + ICE_SPD_RES_ID, + ICE_CHANGE_LOCK_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID +}; + +/* FW update timeout definitions are in milliseconds */ +#define ICE_NVM_TIMEOUT 180000 +#define ICE_CHANGE_LOCK_TIMEOUT 1000 +#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 + +enum ice_aq_res_access_type { + ICE_RES_READ = 1, + ICE_RES_WRITE +}; + +struct ice_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 driver_string[32]; +}; + +enum ice_fc_mode { + ICE_FC_NONE = 0, + ICE_FC_RX_PAUSE, + ICE_FC_TX_PAUSE, + ICE_FC_FULL, + ICE_FC_AUTO, + ICE_FC_PFC, + ICE_FC_DFLT +}; + +enum ice_phy_cache_mode { + ICE_FC_MODE = 0, + ICE_SPEED_MODE, + ICE_FEC_MODE +}; + +enum ice_fec_mode { + ICE_FEC_NONE = 0, + ICE_FEC_RS, + ICE_FEC_BASER, + ICE_FEC_AUTO +}; + +struct ice_phy_cache_mode_data { + union { + enum ice_fec_mode curr_user_fec_req; + enum ice_fc_mode curr_user_fc_req; + u16 curr_user_speed_req; + } data; +}; + +enum ice_set_fc_aq_failures { + ICE_SET_FC_AQ_FAIL_NONE = 0, + ICE_SET_FC_AQ_FAIL_GET, + ICE_SET_FC_AQ_FAIL_SET, + ICE_SET_FC_AQ_FAIL_UPDATE +}; + +/* These are structs for managing the hardware information and the operations */ +/* MAC types */ +enum ice_mac_type { + ICE_MAC_UNKNOWN = 0, + ICE_MAC_E810, + ICE_MAC_GENERIC, +}; + +/* Media Types */ +enum ice_media_type { + ICE_MEDIA_UNKNOWN = 0, + ICE_MEDIA_FIBER, + ICE_MEDIA_BASET, + ICE_MEDIA_BACKPLANE, + ICE_MEDIA_DA, +}; + +/* Software VSI types. */ +enum ice_vsi_type { + ICE_VSI_PF = 0, + ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ + ICE_VSI_LB = 6, +}; + +struct ice_link_status { + /* Refer to ice_aq_phy_type for bits definition */ + u64 phy_type_low; + u64 phy_type_high; + u8 topo_media_conflict; + u16 max_frame_size; + u16 link_speed; + u16 req_speeds; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of + * ice_aqc_get_phy_caps structure + */ + u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; +}; + +/* Different data queue types: These are mainly for SW consumption. */ +enum ice_q { + ICE_DATA_Q_DOORBELL, + ICE_DATA_Q_CMPL, + ICE_DATA_Q_QUANTA, + ICE_DATA_Q_RX, + ICE_DATA_Q_TX, +}; + +/* Different reset sources for which a disable queue AQ call has to be made in + * order to clean the Tx scheduler as a part of the reset + */ +enum ice_disq_rst_src { + ICE_NO_RESET = 0, + ICE_VM_RESET, +}; + +/* PHY info such as phy_type, etc... */ +struct ice_phy_info { + struct ice_link_status link_info; + struct ice_link_status link_info_old; + u64 phy_type_low; + u64 phy_type_high; + enum ice_media_type media_type; + u8 get_link_info; + /* Please refer to struct ice_aqc_get_link_status_data to get + * detail of enable bit in curr_user_speed_req + */ + u16 curr_user_speed_req; + enum ice_fec_mode curr_user_fec_req; + enum ice_fc_mode curr_user_fc_req; + struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg; +}; + +#define ICE_MAX_NUM_MIRROR_RULES 64 + +/* protocol enumeration for filters */ +enum ice_fltr_ptype { + /* NONE - used for undef/error */ + ICE_FLTR_PTYPE_NONF_NONE = 0, + ICE_FLTR_PTYPE_NONF_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_SCTP, + ICE_FLTR_PTYPE_NONF_IPV4_OTHER, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP, + ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER, + ICE_FLTR_PTYPE_FRAG_IPV4, + ICE_FLTR_PTYPE_NONF_IPV6_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_TCP, + ICE_FLTR_PTYPE_NONF_IPV6_SCTP, + ICE_FLTR_PTYPE_NONF_IPV6_OTHER, + ICE_FLTR_PTYPE_MAX, +}; + +enum ice_fd_hw_seg { + ICE_FD_HW_SEG_NON_TUN = 0, + ICE_FD_HW_SEG_TUN, + ICE_FD_HW_SEG_MAX, +}; + +/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */ +#define ICE_MAX_FDIR_VSI_PER_FILTER 2 + +struct ice_fd_hw_prof { + struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX]; + int cnt; + u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; + u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; +}; + +/* Common HW capabilities for SW use */ +struct ice_hw_common_caps { + /* Write CSR protection */ + u64 wr_csr_prot; + u32 switching_mode; + /* switching mode supported - EVB switching (including cloud) */ +#define ICE_NVM_IMAGE_TYPE_EVB 0x0 + + /* Manageablity mode & supported protocols over MCTP */ + u32 mgmt_mode; +#define ICE_MGMT_MODE_PASS_THRU_MODE_M 0xF +#define ICE_MGMT_MODE_CTL_INTERFACE_M 0xF0 +#define ICE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00 + + u32 mgmt_protocols_mctp; +#define ICE_MGMT_MODE_PROTO_RSVD BIT(0) +#define ICE_MGMT_MODE_PROTO_PLDM BIT(1) +#define ICE_MGMT_MODE_PROTO_OEM BIT(2) +#define ICE_MGMT_MODE_PROTO_NC_SI BIT(3) + + u32 os2bmc; + u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; + + /* RSS related capabilities */ + u32 rss_table_size; /* 512 for PFs and 64 for VFs */ + u32 rss_table_entry_width; /* RSS Entry width in bits */ + + /* Tx/Rx queues */ + u32 num_rxq; /* Number/Total Rx queues */ + u32 rxq_first_id; /* First queue ID for Rx queues */ + u32 num_txq; /* Number/Total Tx queues */ + u32 txq_first_id; /* First queue ID for Tx queues */ + + /* MSI-X vectors */ + u32 num_msix_vectors; + u32 msix_vector_first_id; + + /* Max MTU for function or device */ + u32 max_mtu; + + /* WOL related */ + u32 num_wol_proxy_fltr; + u32 wol_proxy_vsi_seid; + + /* LED/SDP pin count */ + u32 led_pin_num; + u32 sdp_pin_num; + + /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */ +#define ICE_MAX_SUPPORTED_GPIO_LED 12 +#define ICE_MAX_SUPPORTED_GPIO_SDP 8 + u8 led[ICE_MAX_SUPPORTED_GPIO_LED]; + u8 sdp[ICE_MAX_SUPPORTED_GPIO_SDP]; + + /* EVB capabilities */ + u8 evb_802_1_qbg; /* Edge Virtual Bridging */ + u8 evb_802_1_qbh; /* Bridge Port Extension */ + + u8 dcb; + u8 iscsi; + u8 mgmt_cem; + + /* WoL and APM support */ +#define ICE_WOL_SUPPORT_M BIT(0) +#define ICE_ACPI_PROG_MTHD_M BIT(1) +#define ICE_PROXY_SUPPORT_M BIT(2) + u8 apm_wol_support; + u8 acpi_prog_mthd; + u8 proxy_support; + bool nvm_unified_update; +#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) +}; + +/* Function specific capabilities */ +struct ice_hw_func_caps { + struct ice_hw_common_caps common_cap; + u32 guar_num_vsi; + u32 fd_fltr_guar; /* Number of filters guaranteed */ + u32 fd_fltr_best_effort; /* Number of best effort filters */ +}; + +/* Device wide capabilities */ +struct ice_hw_dev_caps { + struct ice_hw_common_caps common_cap; + u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ + u32 num_flow_director_fltr; /* Number of FD filters available */ + u32 num_funcs; +}; + +/* Information about MAC such as address, etc... */ +struct ice_mac_info { + u8 lan_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u8 wol_addr[ETH_ALEN]; +}; + +/* PCI bus types */ +enum ice_bus_type { + ice_bus_unknown = 0, + ice_bus_pci_express, + ice_bus_embedded, /* Is device Embedded versus card */ + ice_bus_reserved +}; + +/* PCI bus speeds */ +enum ice_pcie_bus_speed { + ice_pcie_speed_unknown = 0xff, + ice_pcie_speed_2_5GT = 0x14, + ice_pcie_speed_5_0GT = 0x15, + ice_pcie_speed_8_0GT = 0x16, + ice_pcie_speed_16_0GT = 0x17 +}; + +/* PCI bus widths */ +enum ice_pcie_link_width { + ice_pcie_lnk_width_resrv = 0x00, + ice_pcie_lnk_x1 = 0x01, + ice_pcie_lnk_x2 = 0x02, + ice_pcie_lnk_x4 = 0x04, + ice_pcie_lnk_x8 = 0x08, + ice_pcie_lnk_x12 = 0x0C, + ice_pcie_lnk_x16 = 0x10, + ice_pcie_lnk_x32 = 0x20, + ice_pcie_lnk_width_unknown = 0xff, +}; + +/* Reset types used to determine which kind of reset was requested. These + * defines match what the RESET_TYPE field of the GLGEN_RSTAT register. + * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register + * because its reset source is different than the other types listed. + */ +enum ice_reset_req { + ICE_RESET_POR = 0, + ICE_RESET_INVAL = 0, + ICE_RESET_CORER = 1, + ICE_RESET_GLOBR = 2, + ICE_RESET_EMPR = 3, + ICE_RESET_PFR = 4, +}; + +/* Bus parameters */ +struct ice_bus_info { + enum ice_pcie_bus_speed speed; + enum ice_pcie_link_width width; + enum ice_bus_type type; + u16 domain_num; + u16 device; + u8 func; + u8 bus_num; +}; + +/* Flow control (FC) parameters */ +struct ice_fc_info { + enum ice_fc_mode current_mode; /* FC mode in effect */ + enum ice_fc_mode req_mode; /* FC mode requested by caller */ +}; + +/* Option ROM version information */ +struct ice_orom_info { + u8 major; /* Major version of OROM */ + u8 patch; /* Patch version of OROM */ + u16 build; /* Build version of OROM */ +}; + +/* NVM Information */ +struct ice_nvm_info { + struct ice_orom_info orom; /* Option ROM version info */ + u32 eetrack; /* NVM data version */ + u16 sr_words; /* Shadow RAM size in words */ + u32 flash_size; /* Size of available flash in bytes */ + u8 major_ver; /* major version of dev starter */ + u8 minor_ver; /* minor version of dev starter */ + u8 blank_nvm_mode; /* is NVM empty (no FW present)*/ +}; + +struct ice_link_default_override_tlv { + u8 options; +#define ICE_LINK_OVERRIDE_OPT_M 0x3F +#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0) +#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1) +#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2) +#define ICE_LINK_OVERRIDE_EN BIT(3) +#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4) +#define ICE_LINK_OVERRIDE_EEE_EN BIT(5) + u8 phy_config; +#define ICE_LINK_OVERRIDE_PHY_CFG_S 8 +#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S) +#define ICE_LINK_OVERRIDE_PAUSE_M 0x3 +#define ICE_LINK_OVERRIDE_LESM_EN BIT(6) +#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7) + u8 fec_options; +#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF + u8 rsvd1; + u64 phy_type_low; + u64 phy_type_high; +}; + +#define ICE_NVM_VER_LEN 32 + +/* Max number of port to queue branches w.r.t topology */ +#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS + +#define ice_for_each_traffic_class(_i) \ + for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) + +/* ICE_DFLT_AGG_ID means that all new VM(s)/VSI node connects + * to driver defined policy for default aggregator + */ +#define ICE_INVAL_TEID 0xFFFFFFFF +#define ICE_DFLT_AGG_ID 0 + +struct ice_sched_node { + struct ice_sched_node *parent; + struct ice_sched_node *sibling; /* next sibling in the same layer */ + struct ice_sched_node **children; + struct ice_aqc_txsched_elem_data info; + u32 agg_id; /* aggregator group ID */ + u16 vsi_handle; + u8 in_use; /* suspended or in use */ + u8 tx_sched_layer; /* Logical Layer (1-9) */ + u8 num_children; + u8 tc_num; + u8 owner; +#define ICE_SCHED_NODE_OWNER_LAN 0 +#define ICE_SCHED_NODE_OWNER_AE 1 +#define ICE_SCHED_NODE_OWNER_RDMA 2 +}; + +/* Access Macros for Tx Sched Elements data */ +#define ICE_TXSCHED_GET_NODE_TEID(x) LE32_TO_CPU((x)->info.node_teid) +#define ICE_TXSCHED_GET_PARENT_TEID(x) LE32_TO_CPU((x)->info.parent_teid) +#define ICE_TXSCHED_GET_CIR_RL_ID(x) \ + LE16_TO_CPU((x)->info.cir_bw.bw_profile_idx) +#define ICE_TXSCHED_GET_EIR_RL_ID(x) \ + LE16_TO_CPU((x)->info.eir_bw.bw_profile_idx) +#define ICE_TXSCHED_GET_SRL_ID(x) LE16_TO_CPU((x)->info.srl_id) +#define ICE_TXSCHED_GET_CIR_BWALLOC(x) \ + LE16_TO_CPU((x)->info.cir_bw.bw_alloc) +#define ICE_TXSCHED_GET_EIR_BWALLOC(x) \ + LE16_TO_CPU((x)->info.eir_bw.bw_alloc) + +struct ice_sched_rl_profile { + u32 rate; /* In Kbps */ + struct ice_aqc_rl_profile_elem info; +}; + +/* The aggregator type determines if identifier is for a VSI group, + * aggregator group, aggregator of queues, or queue group. + */ +enum ice_agg_type { + ICE_AGG_TYPE_UNKNOWN = 0, + ICE_AGG_TYPE_TC, + ICE_AGG_TYPE_AGG, /* aggregator */ + ICE_AGG_TYPE_VSI, + ICE_AGG_TYPE_QG, + ICE_AGG_TYPE_Q +}; + +/* Rate limit types */ +enum ice_rl_type { + ICE_UNKNOWN_BW = 0, + ICE_MIN_BW, /* for CIR profile */ + ICE_MAX_BW, /* for EIR profile */ + ICE_SHARED_BW /* for shared profile */ +}; + +#define ICE_SCHED_MIN_BW 500 /* in Kbps */ +#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */ +#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */ +#define ICE_SCHED_NO_PRIORITY 0 +#define ICE_SCHED_NO_BW_WT 0 +#define ICE_SCHED_DFLT_RL_PROF_ID 0 +#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF +#define ICE_SCHED_DFLT_BW_WT 1 +#define ICE_SCHED_INVAL_PROF_ID 0xFFFF +#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */ + +/* Access Macros for Tx Sched RL Profile data */ +#define ICE_TXSCHED_GET_RL_PROF_ID(p) LE16_TO_CPU((p)->info.profile_id) +#define ICE_TXSCHED_GET_RL_MBS(p) LE16_TO_CPU((p)->info.max_burst_size) +#define ICE_TXSCHED_GET_RL_MULTIPLIER(p) LE16_TO_CPU((p)->info.rl_multiply) +#define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) LE16_TO_CPU((p)->info.wake_up_calc) +#define ICE_TXSCHED_GET_RL_ENCODE(p) LE16_TO_CPU((p)->info.rl_encode) + +/* The following tree example shows the naming conventions followed under + * ice_port_info struct for default scheduler tree topology. + * + * A tree on a port + * * ---> root node + * (TC0)/ / / / \ \ \ \(TC7) ---> num_branches (range:1- 8) + * * * * * * * * * | + * / | + * * | + * / |-> num_elements (range:1 - 9) + * * | implies num_of_layers + * / | + * (a)* | + * + * (a) is the last_node_teid(not of type Leaf). A leaf node is created under + * (a) as child node where queues get added, add Tx/Rx queue admin commands; + * need TEID of (a) to add queues. + * + * This tree + * -> has 8 branches (one for each TC) + * -> First branch (TC0) has 4 elements + * -> has 4 layers + * -> (a) is the topmost layer node created by firmware on branch 0 + * + * Note: Above asterisk tree covers only basic terminology and scenario. + * Refer to the documentation for more info. + */ + + /* Data structure for saving BW information */ +enum ice_bw_type { + ICE_BW_TYPE_PRIO, + ICE_BW_TYPE_CIR, + ICE_BW_TYPE_CIR_WT, + ICE_BW_TYPE_EIR, + ICE_BW_TYPE_EIR_WT, + ICE_BW_TYPE_SHARED, + ICE_BW_TYPE_CNT /* This must be last */ +}; + +struct ice_bw { + u32 bw; + u16 bw_alloc; +}; + +struct ice_bw_type_info { + ice_declare_bitmap(bw_t_bitmap, ICE_BW_TYPE_CNT); + u8 generic; + struct ice_bw cir_bw; + struct ice_bw eir_bw; + u32 shared_bw; +}; + +/* VSI queue context structure for given TC */ +struct ice_q_ctx { + u16 q_handle; + u32 q_teid; + /* bw_t_info saves queue BW information */ + struct ice_bw_type_info bw_t_info; +}; + +/* VSI type list entry to locate corresponding VSI/aggregator nodes */ +struct ice_sched_vsi_info { + struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS]; + struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; + u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; + /* bw_t_info saves VSI BW information */ + struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct ice_dcb_ets_cfg { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prio_table[ICE_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS]; + u8 tsatable[ICE_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct ice_dcb_pfc_cfg { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcena; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct ice_dcb_app_priority_table { + u16 prot_id; + u8 priority; + u8 selector; +}; + +#define ICE_MAX_USER_PRIORITY 8 +#define ICE_DCBX_MAX_APPS 32 +#define ICE_LLDPDU_SIZE 1500 +#define ICE_TLV_STATUS_OPER 0x1 +#define ICE_TLV_STATUS_SYNC 0x2 +#define ICE_TLV_STATUS_ERR 0x4 +#define ICE_APP_PROT_ID_FCOE 0x8906 +#define ICE_APP_PROT_ID_ISCSI 0x0cbc +#define ICE_APP_PROT_ID_FIP 0x8914 +#define ICE_APP_SEL_ETHTYPE 0x1 +#define ICE_APP_SEL_TCPIP 0x2 +#define ICE_CEE_APP_SEL_ETHTYPE 0x0 +#define ICE_CEE_APP_SEL_TCPIP 0x1 + +struct ice_dcbx_cfg { + u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ + struct ice_dcb_ets_cfg etscfg; + struct ice_dcb_ets_cfg etsrec; + struct ice_dcb_pfc_cfg pfc; + struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; + u8 dcbx_mode; +#define ICE_DCBX_MODE_CEE 0x1 +#define ICE_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define ICE_DCBX_APPS_NON_WILLING 0x1 +}; + +struct ice_port_info { + struct ice_sched_node *root; /* Root Node per Port */ + struct ice_hw *hw; /* back pointer to HW instance */ + u32 last_node_teid; /* scheduler last node info */ + u16 sw_id; /* Initial switch ID belongs to port */ + u16 pf_vf_num; + u8 port_state; +#define ICE_SCHED_PORT_STATE_INIT 0x0 +#define ICE_SCHED_PORT_STATE_READY 0x1 + u8 lport; +#define ICE_LPORT_MASK 0xff + u16 dflt_tx_vsi_rule_id; + u16 dflt_tx_vsi_num; + u16 dflt_rx_vsi_rule_id; + u16 dflt_rx_vsi_num; + struct ice_fc_info fc; + struct ice_mac_info mac; + struct ice_phy_info phy; + struct ice_lock sched_lock; /* protect access to TXSched tree */ + struct ice_sched_node * + sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; + /* List contain profile ID(s) and other params per layer */ + struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS]; + struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ + /* DCBX info */ + struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ + struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ + /* LLDP/DCBX Status */ + u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */ + u8 is_sw_lldp:1; + u8 is_vf:1; +}; + +struct ice_switch_info { + struct LIST_HEAD_TYPE vsi_list_map_head; + struct ice_sw_recipe *recp_list; + u16 prof_res_bm_init; + + ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); +}; + +/* Port hardware description */ +struct ice_hw { + u8 *hw_addr; + void *back; + struct ice_aqc_layer_props *layer_info; + struct ice_port_info *port_info; + /* 2D Array for each Tx Sched RL Profile type */ + struct ice_sched_rl_profile **cir_profiles; + struct ice_sched_rl_profile **eir_profiles; + struct ice_sched_rl_profile **srl_profiles; + /* PSM clock frequency for calculating RL profile params */ + u32 psm_clk_freq; + u64 debug_mask; /* BITMAP for debug mask */ + enum ice_mac_type mac_type; + + u16 fd_ctr_base; /* FD counter base index */ + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + u8 pf_id; /* device profile info */ + + u16 max_burst_size; /* driver sets this value */ + + /* Tx Scheduler values */ + u8 num_tx_sched_layers; + u8 num_tx_sched_phys_layers; + u8 flattened_layers; + u8 max_cgds; + u8 sw_entry_point_layer; + u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct LIST_HEAD_TYPE agg_list; /* lists all aggregator */ + struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; + u8 evb_veb; /* true for VEB, false for VEPA */ + u8 reset_ongoing; /* true if HW is in reset, false otherwise */ + struct ice_bus_info bus; + struct ice_nvm_info nvm; + struct ice_hw_dev_caps dev_caps; /* device capabilities */ + struct ice_hw_func_caps func_caps; /* function capabilities */ + + struct ice_switch_info *switch_info; /* switch filter lists */ + + /* Control Queue info */ + struct ice_ctl_q_info adminq; + struct ice_ctl_q_info mailboxq; + /* Additional function to send AdminQ command */ + int (*aq_send_cmd_fn)(void *param, struct ice_aq_desc *desc, + void *buf, u16 buf_size); + void *aq_send_cmd_param; + u8 dcf_enabled; /* Device Config Function */ + + u8 api_branch; /* API branch version */ + u8 api_maj_ver; /* API major version */ + u8 api_min_ver; /* API minor version */ + u8 api_patch; /* API patch version */ + u8 fw_branch; /* firmware branch version */ + u8 fw_maj_ver; /* firmware major version */ + u8 fw_min_ver; /* firmware minor version */ + u8 fw_patch; /* firmware patch version */ + u32 fw_build; /* firmware build number */ + +/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL + * register. Used for determining the ITR/INTRL granularity during + * initialization. + */ +#define ICE_MAX_AGG_BW_200G 0x0 +#define ICE_MAX_AGG_BW_100G 0X1 +#define ICE_MAX_AGG_BW_50G 0x2 +#define ICE_MAX_AGG_BW_25G 0x3 + /* ITR granularity for different speeds */ +#define ICE_ITR_GRAN_ABOVE_25 2 +#define ICE_ITR_GRAN_MAX_25 4 + /* ITR granularity in 1 us */ + u8 itr_gran; + /* INTRL granularity for different speeds */ +#define ICE_INTRL_GRAN_ABOVE_25 4 +#define ICE_INTRL_GRAN_MAX_25 8 + /* INTRL granularity in 1 us */ + u8 intrl_gran; + + u8 ucast_shared; /* true if VSIs can share unicast addr */ + +#define ICE_PHY_PER_NAC 1 +#define ICE_MAX_QUAD 2 +#define ICE_NUM_QUAD_TYPE 2 +#define ICE_PORTS_PER_QUAD 4 +#define ICE_PHY_0_LAST_QUAD 1 +#define ICE_PORTS_PER_PHY 8 +#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY + + /* Active package version (currently active) */ + struct ice_pkg_ver active_pkg_ver; + u32 active_track_id; + u8 active_pkg_name[ICE_PKG_NAME_SIZE]; + u8 active_pkg_in_nvm; + + enum ice_aq_err pkg_dwnld_status; + + /* Driver's package ver - (from the Metadata seg) */ + struct ice_pkg_ver pkg_ver; + u8 pkg_name[ICE_PKG_NAME_SIZE]; + + /* Driver's Ice package version (from the Ice seg) */ + struct ice_pkg_ver ice_pkg_ver; + u8 ice_pkg_name[ICE_PKG_NAME_SIZE]; + + /* Pointer to the ice segment */ + struct ice_seg *seg; + + /* Pointer to allocated copy of pkg memory */ + u8 *pkg_copy; + u32 pkg_size; + + /* tunneling info */ + struct ice_lock tnl_lock; + struct ice_tunnel_table tnl; + + struct ice_acl_tbl *acl_tbl; + struct ice_fd_hw_prof **acl_prof; + u16 acl_fltr_cnt[ICE_FLTR_PTYPE_MAX]; + /* HW block tables */ + struct ice_blk_info blk[ICE_BLK_COUNT]; + struct ice_lock fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ + struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT]; + /* Flow Director filter info */ + int fdir_active_fltr; + + struct ice_lock fdir_fltr_lock; /* protect Flow Director */ + struct LIST_HEAD_TYPE fdir_list_head; + + /* Book-keeping of side-band filter count per flow-type. + * This is used to detect and handle input set changes for + * respective flow-type. + */ + u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX]; + + struct ice_fd_hw_prof **fdir_prof; + ice_declare_bitmap(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX); + struct ice_lock rss_locks; /* protect RSS configuration */ + struct LIST_HEAD_TYPE rss_list_head; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct ice_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ + u64 rx_no_desc; /* repc */ + u64 rx_errors; /* repc */ +}; + +#define ICE_MAX_UP 8 + +/* Statistics collected per VEB per User Priority (UP) for up to 8 UPs */ +struct ice_veb_up_stats { + u64 up_rx_pkts[ICE_MAX_UP]; + u64 up_rx_bytes[ICE_MAX_UP]; + u64 up_tx_pkts[ICE_MAX_UP]; + u64 up_tx_bytes[ICE_MAX_UP]; +}; + +/* Statistics collected by the MAC */ +struct ice_hw_port_stats { + /* eth stats collected by the port */ + struct ice_eth_stats eth; + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_len_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_pkt_dropped; /* mspdc */ + /* flow director stats */ + u32 fd_sb_status; + u64 fd_sb_match; +}; + +enum ice_sw_fwd_act_type { + ICE_FWD_TO_VSI = 0, + ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */ + ICE_FWD_TO_Q, + ICE_FWD_TO_QGRP, + ICE_DROP_PACKET, + ICE_INVAL_ACT +}; + +/* Checksum and Shadow RAM pointers */ +#define ICE_SR_NVM_CTRL_WORD 0x00 +#define ICE_SR_PHY_ANALOG_PTR 0x04 +#define ICE_SR_OPTION_ROM_PTR 0x05 +#define ICE_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 +#define ICE_SR_AUTO_GENERATED_POINTERS_PTR 0x07 +#define ICE_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 +#define ICE_SR_EMP_GLOBAL_MODULE_PTR 0x09 +#define ICE_SR_EMP_IMAGE_PTR 0x0B +#define ICE_SR_PE_IMAGE_PTR 0x0C +#define ICE_SR_CSR_PROTECTED_LIST_PTR 0x0D +#define ICE_SR_MNG_CFG_PTR 0x0E +#define ICE_SR_EMP_MODULE_PTR 0x0F +#define ICE_SR_PBA_BLOCK_PTR 0x16 +#define ICE_SR_BOOT_CFG_PTR 0x132 +#define ICE_SR_NVM_WOL_CFG 0x19 +#define ICE_NVM_OROM_VER_OFF 0x02 +#define ICE_SR_NVM_DEV_STARTER_VER 0x18 +#define ICE_SR_ALTERNATE_SAN_MAC_ADDR_PTR 0x27 +#define ICE_SR_PERMANENT_SAN_MAC_ADDR_PTR 0x28 +#define ICE_SR_NVM_MAP_VER 0x29 +#define ICE_SR_NVM_IMAGE_VER 0x2A +#define ICE_SR_NVM_STRUCTURE_VER 0x2B +#define ICE_SR_NVM_EETRACK_LO 0x2D +#define ICE_SR_NVM_EETRACK_HI 0x2E +#define ICE_NVM_VER_LO_SHIFT 0 +#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT) +#define ICE_NVM_VER_HI_SHIFT 12 +#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT) +#define ICE_OEM_EETRACK_ID 0xffffffff +#define ICE_OROM_VER_PATCH_SHIFT 0 +#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT) +#define ICE_OROM_VER_BUILD_SHIFT 8 +#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) +#define ICE_OROM_VER_SHIFT 24 +#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_SR_VPD_PTR 0x2F +#define ICE_SR_PXE_SETUP_PTR 0x30 +#define ICE_SR_PXE_CFG_CUST_OPTIONS_PTR 0x31 +#define ICE_SR_NVM_ORIGINAL_EETRACK_LO 0x34 +#define ICE_SR_NVM_ORIGINAL_EETRACK_HI 0x35 +#define ICE_SR_VLAN_CFG_PTR 0x37 +#define ICE_SR_POR_REGS_AUTO_LOAD_PTR 0x38 +#define ICE_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A +#define ICE_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B +#define ICE_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C +#define ICE_SR_PHY_CFG_SCRIPT_PTR 0x3D +#define ICE_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define ICE_SR_SW_CHECKSUM_WORD 0x3F +#define ICE_SR_PFA_PTR 0x40 +#define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41 +#define ICE_SR_1ST_NVM_BANK_PTR 0x42 +#define ICE_SR_NVM_BANK_SIZE 0x43 +#define ICE_SR_1ND_OROM_BANK_PTR 0x44 +#define ICE_SR_OROM_BANK_SIZE 0x45 +#define ICE_SR_NETLIST_BANK_PTR 0x46 +#define ICE_SR_NETLIST_BANK_SIZE 0x47 +#define ICE_SR_EMP_SR_SETTINGS_PTR 0x48 +#define ICE_SR_CONFIGURATION_METADATA_PTR 0x4D +#define ICE_SR_IMMEDIATE_VALUES_PTR 0x4E +#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134 +#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118 + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define ICE_SR_VPD_SIZE_WORDS 512 +#define ICE_SR_PCIE_ALT_SIZE_WORDS 512 +#define ICE_SR_CTRL_WORD_1_S 0x06 +#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S) + +/* Shadow RAM related */ +#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define ICE_SR_BUF_ALIGNMENT 4096 +#define ICE_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define ICE_SR_SW_CHECKSUM_BASE 0xBABA + +/* Link override related */ +#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10 +#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4 +#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2 +#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1 +#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2 +#define ICE_FW_API_LINK_OVERRIDE_MAJ 1 +#define ICE_FW_API_LINK_OVERRIDE_MIN 5 +#define ICE_FW_API_LINK_OVERRIDE_PATCH 2 + +#define ICE_PBA_FLAG_DFLT 0xFAFA +/* Hash redirection LUT for VSI - maximum array size */ +#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) + +/* + * Defines for values in the VF_PE_DB_SIZE bits in the GLPCI_LBARCTRL register. + * This is needed to determine the BAR0 space for the VFs + */ +#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_0KB 0x0 +#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1 +#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2 + +#endif /* _ICE_TYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/base/meson.build b/src/spdk/dpdk/drivers/net/ice/base/meson.build new file mode 100644 index 000000000..22963ce31 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/base/meson.build @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018-2020 Intel Corporation + +sources = [ + 'ice_controlq.c', + 'ice_common.c', + 'ice_sched.c', + 'ice_switch.c', + 'ice_nvm.c', + 'ice_flex_pipe.c', + 'ice_flow.c', + 'ice_dcb.c', + 'ice_fdir.c', + 'ice_acl.c', + 'ice_acl_ctrl.c', +] + +error_cflags = ['-Wno-unused-value', + '-Wno-unused-but-set-variable', + '-Wno-unused-variable', + '-Wno-unused-parameter', +] +c_args = cflags + +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('ice_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/ice/ice_dcf.c b/src/spdk/dpdk/drivers/net/ice/ice_dcf.c new file mode 100644 index 000000000..0cd5d1bf6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_dcf.c @@ -0,0 +1,658 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ice_dcf.h" + +#define ICE_DCF_AQ_LEN 32 +#define ICE_DCF_AQ_BUF_SZ 4096 + +#define ICE_DCF_ARQ_MAX_RETRIES 200 +#define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */ + +#define ICE_DCF_VF_RES_BUF_SZ \ + (sizeof(struct virtchnl_vf_resource) + \ + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)) + +static __rte_always_inline int +ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op, + uint8_t *req_msg, uint16_t req_msglen) +{ + return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS, + req_msg, req_msglen, NULL); +} + +static int +ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op, + uint8_t *rsp_msgbuf, uint16_t rsp_buflen, + uint16_t *rsp_msglen) +{ + struct iavf_arq_event_info event; + enum virtchnl_ops v_op; + int i = 0; + int err; + + event.buf_len = rsp_buflen; + event.msg_buf = rsp_msgbuf; + + do { + err = iavf_clean_arq_element(&hw->avf, &event, NULL); + if (err != IAVF_SUCCESS) + goto again; + + v_op = rte_le_to_cpu_32(event.desc.cookie_high); + if (v_op != op) + goto again; + + if (rsp_msglen != NULL) + *rsp_msglen = event.msg_len; + return rte_le_to_cpu_32(event.desc.cookie_low); + +again: + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); + } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); + + return -EIO; +} + +static __rte_always_inline void +ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd) +{ + rte_spinlock_lock(&hw->vc_cmd_queue_lock); + + TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next); + + rte_spinlock_unlock(&hw->vc_cmd_queue_lock); +} + +static __rte_always_inline void +ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd) +{ + cmd->v_ret = IAVF_ERR_NOT_READY; + cmd->rsp_msglen = 0; + cmd->pending = 1; + + rte_spinlock_lock(&hw->vc_cmd_queue_lock); + + TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next); + + rte_spinlock_unlock(&hw->vc_cmd_queue_lock); +} + +static __rte_always_inline int +ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd) +{ + return iavf_aq_send_msg_to_pf(&hw->avf, + cmd->v_op, IAVF_SUCCESS, + cmd->req_msg, cmd->req_msglen, NULL); +} + +static __rte_always_inline void +ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info) +{ + struct dcf_virtchnl_cmd *cmd; + enum virtchnl_ops v_op; + enum iavf_status v_ret; + uint16_t aq_op; + + aq_op = rte_le_to_cpu_16(info->desc.opcode); + if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) { + PMD_DRV_LOG(ERR, + "Request %u is not supported yet", aq_op); + return; + } + + v_op = rte_le_to_cpu_32(info->desc.cookie_high); + if (v_op == VIRTCHNL_OP_EVENT) { + if (hw->vc_event_msg_cb != NULL) + hw->vc_event_msg_cb(hw, + info->msg_buf, + info->msg_len); + return; + } + + v_ret = rte_le_to_cpu_32(info->desc.cookie_low); + + rte_spinlock_lock(&hw->vc_cmd_queue_lock); + + TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) { + if (cmd->v_op == v_op && cmd->pending) { + cmd->v_ret = v_ret; + cmd->rsp_msglen = RTE_MIN(info->msg_len, + cmd->rsp_buflen); + if (likely(cmd->rsp_msglen != 0)) + rte_memcpy(cmd->rsp_msgbuf, info->msg_buf, + cmd->rsp_msglen); + + /* prevent compiler reordering */ + rte_compiler_barrier(); + cmd->pending = 0; + break; + } + } + + rte_spinlock_unlock(&hw->vc_cmd_queue_lock); +} + +static void +ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw) +{ + struct iavf_arq_event_info info; + uint16_t pending = 1; + int ret; + + info.buf_len = ICE_DCF_AQ_BUF_SZ; + info.msg_buf = hw->arq_buf; + + while (pending) { + ret = iavf_clean_arq_element(&hw->avf, &info, &pending); + if (ret != IAVF_SUCCESS) + break; + + ice_dcf_aq_cmd_handle(hw, &info); + } +} + +static int +ice_dcf_init_check_api_version(struct ice_dcf_hw *hw) +{ +#define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START 1 +#define ICE_CPF_VIRTCHNL_VERSION_MINOR_START 1 + struct virtchnl_version_info version, *pver; + int err; + + version.major = VIRTCHNL_VERSION_MAJOR; + version.minor = VIRTCHNL_VERSION_MINOR; + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION, + (uint8_t *)&version, sizeof(version)); + if (err) { + PMD_INIT_LOG(ERR, "Failed to send OP_VERSION"); + return err; + } + + pver = &hw->virtchnl_version; + err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION, + (uint8_t *)pver, sizeof(*pver), NULL); + if (err) { + PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION"); + return -1; + } + + PMD_INIT_LOG(DEBUG, + "Peer PF API version: %u.%u", pver->major, pver->minor); + + if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START || + (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START && + pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) { + PMD_INIT_LOG(ERR, + "VIRTCHNL API version should not be lower than (%u.%u)", + ICE_CPF_VIRTCHNL_VERSION_MAJOR_START, + ICE_CPF_VIRTCHNL_VERSION_MAJOR_START); + return -1; + } else if (pver->major > VIRTCHNL_VERSION_MAJOR || + (pver->major == VIRTCHNL_VERSION_MAJOR && + pver->minor > VIRTCHNL_VERSION_MINOR)) { + PMD_INIT_LOG(ERR, + "PF/VF API version mismatch:(%u.%u)-(%u.%u)", + pver->major, pver->minor, + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); + return -1; + } + + PMD_INIT_LOG(DEBUG, "Peer is supported PF host"); + + return 0; +} + +static int +ice_dcf_get_vf_resource(struct ice_dcf_hw *hw) +{ + uint32_t caps; + int err, i; + + caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF | + VF_BASE_MODE_OFFLOADS; + + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES, + (uint8_t *)&caps, sizeof(caps)); + if (err) { + PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE"); + return err; + } + + err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES, + (uint8_t *)hw->vf_res, + ICE_DCF_VF_RES_BUF_SZ, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE"); + return -1; + } + + iavf_vf_parse_hw_config(&hw->avf, hw->vf_res); + + hw->vsi_res = NULL; + for (i = 0; i < hw->vf_res->num_vsis; i++) { + if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + hw->vsi_res = &hw->vf_res->vsi_res[i]; + } + + if (!hw->vsi_res) { + PMD_DRV_LOG(ERR, "no LAN VSI found"); + return -1; + } + + hw->vsi_id = hw->vsi_res->vsi_id; + PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id); + + return 0; +} + +static int +ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw) +{ + struct virtchnl_dcf_vsi_map *vsi_map; + uint32_t valid_msg_len; + uint16_t len; + int err; + + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP, + NULL, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP"); + return err; + } + + err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP, + hw->arq_buf, ICE_DCF_AQ_BUF_SZ, + &len); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP"); + return err; + } + + vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf; + valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) + + sizeof(*vsi_map); + if (len != valid_msg_len) { + PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u", + len); + return -EINVAL; + } + + if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) { + PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)", + vsi_map->num_vfs, hw->num_vfs); + return -EINVAL; + } + + len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]); + + if (!hw->vf_vsi_map) { + hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0); + if (!hw->vf_vsi_map) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context"); + return -ENOMEM; + } + + hw->num_vfs = vsi_map->num_vfs; + } + + if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) { + PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change"); + return 1; + } + + rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len); + return 0; +} + +static int +ice_dcf_mode_disable(struct ice_dcf_hw *hw) +{ + int err; + + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE, + NULL, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_DISABLE"); + return err; + } + + err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE, + hw->arq_buf, ICE_DCF_AQ_BUF_SZ, NULL); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to get response of OP_DCF_DISABLE %d", + err); + return -1; + } + + return 0; +} + +static int +ice_dcf_check_reset_done(struct ice_dcf_hw *hw) +{ +#define ICE_DCF_RESET_WAIT_CNT 50 + struct iavf_hw *avf = &hw->avf; + int i, reset; + + for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) { + reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT; + + if (reset == VIRTCHNL_VFR_VFACTIVE || + reset == VIRTCHNL_VFR_COMPLETED) + break; + + rte_delay_ms(20); + } + + if (i >= ICE_DCF_RESET_WAIT_CNT) + return -1; + + return 0; +} + +static inline void +ice_dcf_enable_irq0(struct ice_dcf_hw *hw) +{ + struct iavf_hw *avf = &hw->avf; + + /* Enable admin queue interrupt trigger */ + IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, + IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); + IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK | + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + + IAVF_WRITE_FLUSH(avf); +} + +static inline void +ice_dcf_disable_irq0(struct ice_dcf_hw *hw) +{ + struct iavf_hw *avf = &hw->avf; + + /* Disable all interrupt types */ + IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0); + IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); + + IAVF_WRITE_FLUSH(avf); +} + +static void +ice_dcf_dev_interrupt_handler(void *param) +{ + struct ice_dcf_hw *hw = param; + + ice_dcf_disable_irq0(hw); + + ice_dcf_handle_virtchnl_msg(hw); + + ice_dcf_enable_irq0(hw); +} + +int +ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, + struct dcf_virtchnl_cmd *cmd) +{ + int i = 0; + int err; + + if ((cmd->req_msg && !cmd->req_msglen) || + (!cmd->req_msg && cmd->req_msglen) || + (cmd->rsp_msgbuf && !cmd->rsp_buflen) || + (!cmd->rsp_msgbuf && cmd->rsp_buflen)) + return -EINVAL; + + rte_spinlock_lock(&hw->vc_cmd_send_lock); + ice_dcf_vc_cmd_set(hw, cmd); + + err = ice_dcf_vc_cmd_send(hw, cmd); + if (err) { + PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op); + goto ret; + } + + do { + if (!cmd->pending) + break; + + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); + } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); + + if (cmd->v_ret != IAVF_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, + "No response (%d times) or return failure (%d) for cmd %d", + i, cmd->v_ret, cmd->v_op); + } + +ret: + ice_dcf_aq_cmd_clear(hw, cmd); + rte_spinlock_unlock(&hw->vc_cmd_send_lock); + return err; +} + +int +ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, + void *buf, uint16_t buf_size) +{ + struct dcf_virtchnl_cmd desc_cmd, buff_cmd; + struct ice_dcf_hw *hw = dcf_hw; + int err = 0; + int i = 0; + + if ((buf && !buf_size) || (!buf && buf_size) || + buf_size > ICE_DCF_AQ_BUF_SZ) + return -EINVAL; + + desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC; + desc_cmd.req_msglen = sizeof(*desc); + desc_cmd.req_msg = (uint8_t *)desc; + desc_cmd.rsp_buflen = sizeof(*desc); + desc_cmd.rsp_msgbuf = (uint8_t *)desc; + + if (buf == NULL) + return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd); + + desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF); + + buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF; + buff_cmd.req_msglen = buf_size; + buff_cmd.req_msg = buf; + buff_cmd.rsp_buflen = buf_size; + buff_cmd.rsp_msgbuf = buf; + + rte_spinlock_lock(&hw->vc_cmd_send_lock); + ice_dcf_vc_cmd_set(hw, &desc_cmd); + ice_dcf_vc_cmd_set(hw, &buff_cmd); + + if (ice_dcf_vc_cmd_send(hw, &desc_cmd) || + ice_dcf_vc_cmd_send(hw, &buff_cmd)) { + err = -1; + PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF"); + goto ret; + } + + do { + if ((!desc_cmd.pending && !buff_cmd.pending) || + (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) || + (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS)) + break; + + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); + } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); + + if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, + "No response (%d times) or return failure (desc: %d / buff: %d)", + i, desc_cmd.v_ret, buff_cmd.v_ret); + } + +ret: + ice_dcf_aq_cmd_clear(hw, &desc_cmd); + ice_dcf_aq_cmd_clear(hw, &buff_cmd); + rte_spinlock_unlock(&hw->vc_cmd_send_lock); + + return err; +} + +int +ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev); + int err = 0; + + rte_spinlock_lock(&hw->vc_cmd_send_lock); + + rte_intr_disable(&pci_dev->intr_handle); + ice_dcf_disable_irq0(hw); + + if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw) < 0) + err = -1; + + rte_intr_enable(&pci_dev->intr_handle); + ice_dcf_enable_irq0(hw); + + rte_spinlock_unlock(&hw->vc_cmd_send_lock); + + return err; +} + +int +ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret; + + hw->avf.hw_addr = pci_dev->mem_resource[0].addr; + hw->avf.back = hw; + + hw->avf.bus.bus_id = pci_dev->addr.bus; + hw->avf.bus.device = pci_dev->addr.devid; + hw->avf.bus.func = pci_dev->addr.function; + + hw->avf.device_id = pci_dev->id.device_id; + hw->avf.vendor_id = pci_dev->id.vendor_id; + hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + + hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN; + hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN; + hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ; + hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ; + + rte_spinlock_init(&hw->vc_cmd_send_lock); + rte_spinlock_init(&hw->vc_cmd_queue_lock); + TAILQ_INIT(&hw->vc_cmd_queue); + + hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0); + if (hw->arq_buf == NULL) { + PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory"); + goto err; + } + + ret = iavf_set_mac_type(&hw->avf); + if (ret) { + PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret); + goto err; + } + + ret = ice_dcf_check_reset_done(hw); + if (ret) { + PMD_INIT_LOG(ERR, "VF is still resetting"); + goto err; + } + + ret = iavf_init_adminq(&hw->avf); + if (ret) { + PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret); + goto err; + } + + if (ice_dcf_init_check_api_version(hw)) { + PMD_INIT_LOG(ERR, "check_api version failed"); + goto err_api; + } + + hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0); + if (hw->vf_res == NULL) { + PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); + goto err_api; + } + + if (ice_dcf_get_vf_resource(hw)) { + PMD_INIT_LOG(ERR, "Failed to get VF resource"); + goto err_alloc; + } + + if (ice_dcf_get_vf_vsi_map(hw) < 0) { + PMD_INIT_LOG(ERR, "Failed to get VF VSI map"); + ice_dcf_mode_disable(hw); + goto err_alloc; + } + + hw->eth_dev = eth_dev; + rte_intr_callback_register(&pci_dev->intr_handle, + ice_dcf_dev_interrupt_handler, hw); + rte_intr_enable(&pci_dev->intr_handle); + ice_dcf_enable_irq0(hw); + + return 0; + +err_alloc: + rte_free(hw->vf_res); +err_api: + iavf_shutdown_adminq(&hw->avf); +err: + rte_free(hw->arq_buf); + + return -1; +} + +void +ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + ice_dcf_disable_irq0(hw); + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ice_dcf_dev_interrupt_handler, hw); + + ice_dcf_mode_disable(hw); + iavf_shutdown_adminq(&hw->avf); + + rte_free(hw->arq_buf); + rte_free(hw->vf_vsi_map); + rte_free(hw->vf_res); +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_dcf.h b/src/spdk/dpdk/drivers/net/ice/ice_dcf.h new file mode 100644 index 000000000..d2e447b48 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_dcf.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _ICE_DCF_H_ +#define _ICE_DCF_H_ + +#include + +#include +#include +#include + +#include "base/ice_type.h" +#include "ice_logs.h" + +struct dcf_virtchnl_cmd { + TAILQ_ENTRY(dcf_virtchnl_cmd) next; + + enum virtchnl_ops v_op; + enum iavf_status v_ret; + + uint16_t req_msglen; + uint8_t *req_msg; + + uint16_t rsp_msglen; + uint16_t rsp_buflen; + uint8_t *rsp_msgbuf; + + volatile int pending; +}; + +struct ice_dcf_hw { + struct iavf_hw avf; + + rte_spinlock_t vc_cmd_send_lock; + rte_spinlock_t vc_cmd_queue_lock; + TAILQ_HEAD(, dcf_virtchnl_cmd) vc_cmd_queue; + void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw, + uint8_t *msg, uint16_t msglen); + + uint8_t *arq_buf; + + uint16_t num_vfs; + uint16_t *vf_vsi_map; + + struct virtchnl_version_info virtchnl_version; + struct virtchnl_vf_resource *vf_res; /* VF resource */ + struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ + uint16_t vsi_id; + + struct rte_eth_dev *eth_dev; +}; + +int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, + struct dcf_virtchnl_cmd *cmd); +int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, + void *buf, uint16_t buf_size); +int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); +int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); +void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); + +#endif /* _ICE_DCF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.c new file mode 100644 index 000000000..e5ba1a61f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ice_generic_flow.h" +#include "ice_dcf_ethdev.h" + +static uint16_t +ice_dcf_recv_pkts(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **bufs, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +ice_dcf_xmit_pkts(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **bufs, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static int +ice_dcf_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +static void +ice_dcf_dev_stop(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +ice_dcf_dev_configure(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = RTE_DIM(adapter->rxqs); + dev_info->max_tx_queues = RTE_DIM(adapter->txqs); + + return 0; +} + +static int +ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_eth_stats *igb_stats) +{ + return 0; +} + +static int +ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ice_flow_ops; + break; + + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static void +ice_dcf_dev_close(struct rte_eth_dev *dev) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + ice_dcf_uninit_parent_adapter(dev); + ice_dcf_uninit_hw(dev, &adapter->real_hw); +} + +static void +ice_dcf_queue_release(__rte_unused void *q) +{ +} + +static int +ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + return 0; +} + +static int +ice_dcf_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + dev->data->rx_queues[rx_queue_id] = &adapter->rxqs[rx_queue_id]; + + return 0; +} + +static int +ice_dcf_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + __rte_unused uint16_t nb_tx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + dev->data->tx_queues[tx_queue_id] = &adapter->txqs[tx_queue_id]; + + return 0; +} + +static const struct eth_dev_ops ice_dcf_eth_dev_ops = { + .dev_start = ice_dcf_dev_start, + .dev_stop = ice_dcf_dev_stop, + .dev_close = ice_dcf_dev_close, + .dev_configure = ice_dcf_dev_configure, + .dev_infos_get = ice_dcf_dev_info_get, + .rx_queue_setup = ice_dcf_rx_queue_setup, + .tx_queue_setup = ice_dcf_tx_queue_setup, + .rx_queue_release = ice_dcf_queue_release, + .tx_queue_release = ice_dcf_queue_release, + .link_update = ice_dcf_link_update, + .stats_get = ice_dcf_stats_get, + .stats_reset = ice_dcf_stats_reset, + .promiscuous_enable = ice_dcf_dev_promiscuous_enable, + .promiscuous_disable = ice_dcf_dev_promiscuous_disable, + .allmulticast_enable = ice_dcf_dev_allmulticast_enable, + .allmulticast_disable = ice_dcf_dev_allmulticast_disable, + .filter_ctrl = ice_dcf_dev_filter_ctrl, +}; + +static int +ice_dcf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; + + eth_dev->dev_ops = &ice_dcf_eth_dev_ops; + eth_dev->rx_pkt_burst = ice_dcf_recv_pkts; + eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg; + if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) { + PMD_INIT_LOG(ERR, "Failed to init DCF hardware"); + return -1; + } + + if (ice_dcf_init_parent_adapter(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter"); + ice_dcf_uninit_hw(eth_dev, &adapter->real_hw); + return -1; + } + + return 0; +} + +static int +ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + ice_dcf_dev_close(eth_dev); + + return 0; +} + +static int +ice_dcf_cap_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "dcf")) + return -1; + + return 0; +} + +static int +ice_dcf_cap_selected(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *key = "cap"; + int ret = 0; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, key)) + goto exit; + + /* dcf capability selected when there's a key-value pair: cap=dcf */ + if (rte_kvargs_process(kvlist, key, + ice_dcf_cap_check_handler, NULL) < 0) + goto exit; + + ret = 1; + +exit: + rte_kvargs_free(kvlist); + return ret; +} + +static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + if (!ice_dcf_cap_selected(pci_dev->device.devargs)) + return 1; + + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ice_dcf_adapter), + ice_dcf_dev_init); +} + +static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit); +} + +static const struct rte_pci_id pci_id_ice_dcf_map[] = { + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct rte_pci_driver rte_ice_dcf_pmd = { + .id_table = pci_id_ice_dcf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_ice_dcf_pci_probe, + .remove = eth_ice_dcf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf"); diff --git a/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.h b/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.h new file mode 100644 index 000000000..e60e808d8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_dcf_ethdev.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _ICE_DCF_ETHDEV_H_ +#define _ICE_DCF_ETHDEV_H_ + +#include "base/ice_common.h" +#include "base/ice_adminq_cmd.h" + +#include "ice_ethdev.h" +#include "ice_dcf.h" + +#define ICE_DCF_MAX_RINGS 1 + +struct ice_dcf_queue { + uint64_t dummy; +}; + +struct ice_dcf_adapter { + struct ice_adapter parent; /* Must be first */ + + struct ice_dcf_hw real_hw; + struct ice_dcf_queue rxqs[ICE_DCF_MAX_RINGS]; + struct ice_dcf_queue txqs[ICE_DCF_MAX_RINGS]; +}; + +void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, + uint8_t *msg, uint16_t msglen); +int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev); +void ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev); + +#endif /* _ICE_DCF_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/ice_dcf_parent.c b/src/spdk/dpdk/drivers/net/ice/ice_dcf_parent.c new file mode 100644 index 000000000..bdfc7d430 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_dcf_parent.c @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ +#include +#include +#include +#include + +#include + +#include "ice_dcf_ethdev.h" +#include "ice_generic_flow.h" + +#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */ +static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER; + +static __rte_always_inline void +ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle, + uint16_t vsi_map) +{ + struct ice_vsi_ctx *vsi_ctx; + bool first_update = false; + uint16_t new_vsi_num; + + if (unlikely(vsi_handle >= ICE_MAX_VSI)) { + PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle); + return; + } + + vsi_ctx = hw->vsi_ctx[vsi_handle]; + + if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) { + if (!vsi_ctx) { + vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx)); + if (!vsi_ctx) { + PMD_DRV_LOG(ERR, "No memory for vsi context %u", + vsi_handle); + return; + } + hw->vsi_ctx[vsi_handle] = vsi_ctx; + first_update = true; + } + + new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >> + VIRTCHNL_DCF_VF_VSI_ID_S; + + /* Redirect rules if vsi mapping table changes. */ + if (!first_update) { + struct ice_flow_redirect rd; + + memset(&rd, 0, sizeof(struct ice_flow_redirect)); + rd.type = ICE_FLOW_REDIRECT_VSI; + rd.vsi_handle = vsi_handle; + rd.new_vsi_num = new_vsi_num; + ice_flow_redirect((struct ice_adapter *)hw->back, &rd); + } else { + vsi_ctx->vsi_num = new_vsi_num; + } + + PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u", + vsi_handle, vsi_ctx->vsi_num); + } else { + hw->vsi_ctx[vsi_handle] = NULL; + + ice_free(hw, vsi_ctx); + + PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle); + } +} + +static void +ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs, + uint16_t *vf_vsi_map) +{ + uint16_t vf_id; + + for (vf_id = 0; vf_id < num_vfs; vf_id++) + ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]); +} + +static void* +ice_dcf_vsi_update_service_handler(void *param) +{ + struct ice_dcf_hw *hw = param; + + usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL); + + rte_spinlock_lock(&vsi_update_lock); + + if (!ice_dcf_handle_vsi_update_event(hw)) { + struct ice_dcf_adapter *dcf_ad = + container_of(hw, struct ice_dcf_adapter, real_hw); + + ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw, + hw->num_vfs, hw->vf_vsi_map); + } + + rte_spinlock_unlock(&vsi_update_lock); + + return NULL; +} + +void +ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, + uint8_t *msg, uint16_t msglen) +{ + struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg; + pthread_t thread; + + if (msglen < sizeof(struct virtchnl_pf_event)) { + PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen); + return; + } + + switch (pf_msg->event) { + case VIRTCHNL_EVENT_RESET_IMPENDING: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); + pthread_create(&thread, NULL, + ice_dcf_vsi_update_service_handler, dcf_hw); + break; + case VIRTCHNL_EVENT_LINK_CHANGE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); + break; + case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u", + pf_msg->event_data.vf_vsi_map.vf_id, + pf_msg->event_data.vf_vsi_map.vsi_id); + pthread_create(&thread, NULL, + ice_dcf_vsi_update_service_handler, dcf_hw); + break; + default: + PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event); + break; + } +} + +static int +ice_dcf_init_parent_hw(struct ice_hw *hw) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + + status = ice_aq_get_fw_ver(hw, NULL); + if (status) + return status; + + status = ice_get_caps(hw); + if (status) + return status; + + hw->port_info = (struct ice_port_info *) + ice_malloc(hw, sizeof(*hw->port_info)); + if (!hw->port_info) + return ICE_ERR_NO_MEMORY; + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + /* Initialize port_info struct with switch configuration data */ + status = ice_get_initial_sw_cfg(hw); + if (status) + goto err_unroll_alloc; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) { + status = ICE_ERR_NO_MEMORY; + goto err_unroll_alloc; + } + + /* Initialize port_info struct with PHY capabilities */ + status = ice_aq_get_phy_caps(hw->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ice_free(hw, pcaps); + if (status) + goto err_unroll_alloc; + + /* Initialize port_info struct with link information */ + status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); + if (status) + goto err_unroll_alloc; + + status = ice_init_fltr_mgmt_struct(hw); + if (status) + goto err_unroll_alloc; + + status = ice_init_hw_tbls(hw); + if (status) + goto err_unroll_fltr_mgmt_struct; + + PMD_INIT_LOG(INFO, + "firmware %d.%d.%d api %d.%d.%d build 0x%08x", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, + hw->api_maj_ver, hw->api_min_ver, hw->api_patch, + hw->fw_build); + + return ICE_SUCCESS; + +err_unroll_fltr_mgmt_struct: + ice_cleanup_fltr_mgmt_struct(hw); +err_unroll_alloc: + ice_free(hw, hw->port_info); + hw->port_info = NULL; + + return status; +} + +static void ice_dcf_uninit_parent_hw(struct ice_hw *hw) +{ + ice_cleanup_fltr_mgmt_struct(hw); + + ice_free_seg(hw); + ice_free_hw_tbls(hw); + + ice_free(hw, hw->port_info); + hw->port_info = NULL; + + ice_clear_all_vsi_ctx(hw); +} + +static int +ice_dcf_request_pkg_name(struct ice_hw *hw, char *pkg_name) +{ + struct ice_dcf_adapter *dcf_adapter = + container_of(hw, struct ice_dcf_adapter, parent.hw); + struct virtchnl_pkg_info pkg_info; + struct dcf_virtchnl_cmd vc_cmd; + uint64_t dsn; + + vc_cmd.v_op = VIRTCHNL_OP_DCF_GET_PKG_INFO; + vc_cmd.req_msglen = 0; + vc_cmd.req_msg = NULL; + vc_cmd.rsp_buflen = sizeof(pkg_info); + vc_cmd.rsp_msgbuf = (uint8_t *)&pkg_info; + + if (ice_dcf_execute_virtchnl_cmd(&dcf_adapter->real_hw, &vc_cmd)) + goto pkg_file_direct; + + rte_memcpy(&dsn, pkg_info.dsn, sizeof(dsn)); + + snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE, + ICE_PKG_FILE_SEARCH_PATH_UPDATES "ice-%016llX.pkg", + (unsigned long long)dsn); + if (!access(pkg_name, 0)) + return 0; + + snprintf(pkg_name, ICE_MAX_PKG_FILENAME_SIZE, + ICE_PKG_FILE_SEARCH_PATH_DEFAULT "ice-%016llX.pkg", + (unsigned long long)dsn); + if (!access(pkg_name, 0)) + return 0; + +pkg_file_direct: + snprintf(pkg_name, + ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_UPDATES); + if (!access(pkg_name, 0)) + return 0; + + snprintf(pkg_name, + ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_DEFAULT); + if (!access(pkg_name, 0)) + return 0; + + return -1; +} + +static int +ice_dcf_load_pkg(struct ice_hw *hw) +{ + char pkg_name[ICE_MAX_PKG_FILENAME_SIZE]; + uint8_t *pkg_buf; + uint32_t buf_len; + struct stat st; + FILE *fp; + int err; + + if (ice_dcf_request_pkg_name(hw, pkg_name)) { + PMD_INIT_LOG(ERR, "Failed to locate the package file"); + return -ENOENT; + } + + PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_name); + + err = stat(pkg_name, &st); + if (err) { + PMD_INIT_LOG(ERR, "Failed to get file status"); + return err; + } + + buf_len = st.st_size; + pkg_buf = rte_malloc(NULL, buf_len, 0); + if (!pkg_buf) { + PMD_INIT_LOG(ERR, "failed to allocate buffer of size %u for package", + buf_len); + return -1; + } + + fp = fopen(pkg_name, "rb"); + if (!fp) { + PMD_INIT_LOG(ERR, "failed to open file: %s", pkg_name); + err = -1; + goto ret; + } + + err = fread(pkg_buf, buf_len, 1, fp); + fclose(fp); + if (err != 1) { + PMD_INIT_LOG(ERR, "failed to read package data"); + err = -1; + goto ret; + } + + err = ice_copy_and_init_pkg(hw, pkg_buf, buf_len); + if (err) + PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d", err); + +ret: + rte_free(pkg_buf); + return err; +} + +int +ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev) +{ + struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; + struct ice_adapter *parent_adapter = &adapter->parent; + struct ice_hw *parent_hw = &parent_adapter->hw; + struct ice_dcf_hw *hw = &adapter->real_hw; + const struct rte_ether_addr *mac; + int err; + + parent_adapter->eth_dev = eth_dev; + parent_adapter->pf.adapter = parent_adapter; + parent_adapter->pf.dev_data = eth_dev->data; + parent_hw->back = parent_adapter; + parent_hw->mac_type = ICE_MAC_GENERIC; + parent_hw->vendor_id = ICE_INTEL_VENDOR_ID; + + ice_init_lock(&parent_hw->adminq.sq_lock); + ice_init_lock(&parent_hw->adminq.rq_lock); + parent_hw->aq_send_cmd_fn = ice_dcf_send_aq_cmd; + parent_hw->aq_send_cmd_param = &adapter->real_hw; + parent_hw->dcf_enabled = true; + + err = ice_dcf_init_parent_hw(parent_hw); + if (err) { + PMD_INIT_LOG(ERR, "failed to init the DCF parent hardware with error %d", + err); + return err; + } + + err = ice_dcf_load_pkg(parent_hw); + if (err) { + PMD_INIT_LOG(ERR, "failed to load package with error %d", + err); + goto uninit_hw; + } + parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw); + + err = ice_flow_init(parent_adapter); + if (err) { + PMD_INIT_LOG(ERR, "Failed to initialize flow"); + goto uninit_hw; + } + + ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map); + + mac = (const struct rte_ether_addr *)hw->avf.mac.addr; + if (rte_is_valid_assigned_ether_addr(mac)) + rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr); + else + rte_eth_random_addr(parent_adapter->pf.dev_addr.addr_bytes); + + eth_dev->data->mac_addrs = &parent_adapter->pf.dev_addr; + + return 0; + +uninit_hw: + ice_dcf_uninit_parent_hw(parent_hw); + return err; +} + +void +ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev) +{ + struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; + struct ice_adapter *parent_adapter = &adapter->parent; + struct ice_hw *parent_hw = &parent_adapter->hw; + + eth_dev->data->mac_addrs = NULL; + + ice_flow_uninit(parent_adapter); + ice_dcf_uninit_parent_hw(parent_hw); +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_ethdev.c b/src/spdk/dpdk/drivers/net/ice/ice_ethdev.c new file mode 100644 index 000000000..d5110c439 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_ethdev.c @@ -0,0 +1,4600 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include + +#include +#include +#include +#include + +#include "base/ice_sched.h" +#include "base/ice_flow.h" +#include "base/ice_dcb.h" +#include "base/ice_common.h" + +#include "rte_pmd_ice.h" +#include "ice_ethdev.h" +#include "ice_rxtx.h" +#include "ice_generic_flow.h" + +/* devargs */ +#define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" +#define ICE_PIPELINE_MODE_SUPPORT_ARG "pipeline-mode-support" +#define ICE_FLOW_MARK_SUPPORT_ARG "flow-mark-support" +#define ICE_PROTO_XTR_ARG "proto_xtr" + +static const char * const ice_valid_args[] = { + ICE_SAFE_MODE_SUPPORT_ARG, + ICE_PIPELINE_MODE_SUPPORT_ARG, + ICE_FLOW_MARK_SUPPORT_ARG, + ICE_PROTO_XTR_ARG, + NULL +}; + +static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = { + .name = "ice_dynfield_proto_xtr_metadata", + .size = sizeof(uint32_t), + .align = __alignof__(uint32_t), + .flags = 0, +}; + +struct proto_xtr_ol_flag { + const struct rte_mbuf_dynflag param; + uint64_t *ol_flag; + bool required; +}; + +static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = { + [PROTO_XTR_VLAN] = { + .param = { .name = "ice_dynflag_proto_xtr_vlan" }, + .ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask }, + [PROTO_XTR_IPV4] = { + .param = { .name = "ice_dynflag_proto_xtr_ipv4" }, + .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask }, + [PROTO_XTR_IPV6] = { + .param = { .name = "ice_dynflag_proto_xtr_ipv6" }, + .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask }, + [PROTO_XTR_IPV6_FLOW] = { + .param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" }, + .ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask }, + [PROTO_XTR_TCP] = { + .param = { .name = "ice_dynflag_proto_xtr_tcp" }, + .ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask }, +}; + +#define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100 + +#define ICE_OS_DEFAULT_PKG_NAME "ICE OS Default Package" +#define ICE_COMMS_PKG_NAME "ICE COMMS Package" +#define ICE_MAX_RES_DESC_NUM 1024 + +int ice_logtype_init; +int ice_logtype_driver; +#ifdef RTE_LIBRTE_ICE_DEBUG_RX +int ice_logtype_rx; +#endif +#ifdef RTE_LIBRTE_ICE_DEBUG_TX +int ice_logtype_tx; +#endif +#ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE +int ice_logtype_tx_free; +#endif + +static int ice_dev_configure(struct rte_eth_dev *dev); +static int ice_dev_start(struct rte_eth_dev *dev); +static void ice_dev_stop(struct rte_eth_dev *dev); +static void ice_dev_close(struct rte_eth_dev *dev); +static int ice_dev_reset(struct rte_eth_dev *dev); +static int ice_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ice_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int ice_dev_set_link_up(struct rte_eth_dev *dev); +static int ice_dev_set_link_down(struct rte_eth_dev *dev); + +static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ice_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ice_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ice_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int ice_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int ice_promisc_enable(struct rte_eth_dev *dev); +static int ice_promisc_disable(struct rte_eth_dev *dev); +static int ice_allmulti_enable(struct rte_eth_dev *dev); +static int ice_allmulti_disable(struct rte_eth_dev *dev); +static int ice_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +static int ice_macaddr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int ice_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, + uint32_t pool); +static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); +static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); +static int ice_vlan_pvid_set(struct rte_eth_dev *dev, + uint16_t pvid, int on); +static int ice_get_eeprom_length(struct rte_eth_dev *dev); +static int ice_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int ice_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int ice_stats_reset(struct rte_eth_dev *dev); +static int ice_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned int n); +static int ice_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit); +static int ice_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); + +static const struct rte_pci_id pci_id_ice_map[] = { + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) }, + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops ice_eth_dev_ops = { + .dev_configure = ice_dev_configure, + .dev_start = ice_dev_start, + .dev_stop = ice_dev_stop, + .dev_close = ice_dev_close, + .dev_reset = ice_dev_reset, + .dev_set_link_up = ice_dev_set_link_up, + .dev_set_link_down = ice_dev_set_link_down, + .rx_queue_start = ice_rx_queue_start, + .rx_queue_stop = ice_rx_queue_stop, + .tx_queue_start = ice_tx_queue_start, + .tx_queue_stop = ice_tx_queue_stop, + .rx_queue_setup = ice_rx_queue_setup, + .rx_queue_release = ice_rx_queue_release, + .tx_queue_setup = ice_tx_queue_setup, + .tx_queue_release = ice_tx_queue_release, + .dev_infos_get = ice_dev_info_get, + .dev_supported_ptypes_get = ice_dev_supported_ptypes_get, + .link_update = ice_link_update, + .mtu_set = ice_mtu_set, + .mac_addr_set = ice_macaddr_set, + .mac_addr_add = ice_macaddr_add, + .mac_addr_remove = ice_macaddr_remove, + .vlan_filter_set = ice_vlan_filter_set, + .vlan_offload_set = ice_vlan_offload_set, + .reta_update = ice_rss_reta_update, + .reta_query = ice_rss_reta_query, + .rss_hash_update = ice_rss_hash_update, + .rss_hash_conf_get = ice_rss_hash_conf_get, + .promiscuous_enable = ice_promisc_enable, + .promiscuous_disable = ice_promisc_disable, + .allmulticast_enable = ice_allmulti_enable, + .allmulticast_disable = ice_allmulti_disable, + .rx_queue_intr_enable = ice_rx_queue_intr_enable, + .rx_queue_intr_disable = ice_rx_queue_intr_disable, + .fw_version_get = ice_fw_version_get, + .vlan_pvid_set = ice_vlan_pvid_set, + .rxq_info_get = ice_rxq_info_get, + .txq_info_get = ice_txq_info_get, + .rx_burst_mode_get = ice_rx_burst_mode_get, + .tx_burst_mode_get = ice_tx_burst_mode_get, + .get_eeprom_length = ice_get_eeprom_length, + .get_eeprom = ice_get_eeprom, + .rx_queue_count = ice_rx_queue_count, + .rx_descriptor_status = ice_rx_descriptor_status, + .tx_descriptor_status = ice_tx_descriptor_status, + .stats_get = ice_stats_get, + .stats_reset = ice_stats_reset, + .xstats_get = ice_xstats_get, + .xstats_get_names = ice_xstats_get_names, + .xstats_reset = ice_stats_reset, + .filter_ctrl = ice_dev_filter_ctrl, + .udp_tunnel_port_add = ice_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del, + .tx_done_cleanup = ice_tx_done_cleanup, +}; + +/* store statistics names and its offset in stats structure */ +struct ice_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct ice_xstats_name_off ice_stats_strings[] = { + {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)}, + {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)}, + {"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats, + rx_unknown_protocol)}, + {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)}, + {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)}, + {"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)}, +}; + +#define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \ + sizeof(ice_stats_strings[0])) + +static const struct ice_xstats_name_off ice_hw_port_strings[] = { + {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats, + tx_dropped_link_down)}, + {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)}, + {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats, + illegal_bytes)}, + {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)}, + {"mac_local_errors", offsetof(struct ice_hw_port_stats, + mac_local_faults)}, + {"mac_remote_errors", offsetof(struct ice_hw_port_stats, + mac_remote_faults)}, + {"rx_len_errors", offsetof(struct ice_hw_port_stats, + rx_len_errors)}, + {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)}, + {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)}, + {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)}, + {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)}, + {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)}, + {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, + rx_size_127)}, + {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, + rx_size_255)}, + {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, + rx_size_511)}, + {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, + rx_size_1023)}, + {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, + rx_size_1522)}, + {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, + rx_size_big)}, + {"rx_undersized_errors", offsetof(struct ice_hw_port_stats, + rx_undersize)}, + {"rx_oversize_errors", offsetof(struct ice_hw_port_stats, + rx_oversize)}, + {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats, + mac_short_pkt_dropped)}, + {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats, + rx_fragments)}, + {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)}, + {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)}, + {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats, + tx_size_127)}, + {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats, + tx_size_255)}, + {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats, + tx_size_511)}, + {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats, + tx_size_1023)}, + {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats, + tx_size_1522)}, + {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats, + tx_size_big)}, +}; + +#define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \ + sizeof(ice_hw_port_strings[0])) + +static void +ice_init_controlq_parameter(struct ice_hw *hw) +{ + /* fields for adminq */ + hw->adminq.num_rq_entries = ICE_ADMINQ_LEN; + hw->adminq.num_sq_entries = ICE_ADMINQ_LEN; + hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ; + hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ; + + /* fields for mailboxq, DPDK used as PF host */ + hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN; + hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ; + hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ; +} + +static int +lookup_proto_xtr_type(const char *xtr_name) +{ + static struct { + const char *name; + enum proto_xtr_type type; + } xtr_type_map[] = { + { "vlan", PROTO_XTR_VLAN }, + { "ipv4", PROTO_XTR_IPV4 }, + { "ipv6", PROTO_XTR_IPV6 }, + { "ipv6_flow", PROTO_XTR_IPV6_FLOW }, + { "tcp", PROTO_XTR_TCP }, + }; + uint32_t i; + + for (i = 0; i < RTE_DIM(xtr_type_map); i++) { + if (strcmp(xtr_name, xtr_type_map[i].name) == 0) + return xtr_type_map[i].type; + } + + return -1; +} + +/* + * Parse elem, the elem could be single number/range or '(' ')' group + * 1) A single number elem, it's just a simple digit. e.g. 9 + * 2) A single range elem, two digits with a '-' between. e.g. 2-6 + * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) + * Within group elem, '-' used for a range separator; + * ',' used for a single number. + */ +static int +parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs) +{ + const char *str = input; + char *end = NULL; + uint32_t min, max; + uint32_t idx; + + while (isblank(*str)) + str++; + + if (!isdigit(*str) && *str != '(') + return -1; + + /* process single number or single range of number */ + if (*str != '(') { + errno = 0; + idx = strtoul(str, &end, 10); + if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) + return -1; + + while (isblank(*end)) + end++; + + min = idx; + max = idx; + + /* process single - */ + if (*end == '-') { + end++; + while (isblank(*end)) + end++; + if (!isdigit(*end)) + return -1; + + errno = 0; + idx = strtoul(end, &end, 10); + if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) + return -1; + + max = idx; + while (isblank(*end)) + end++; + } + + if (*end != ':') + return -1; + + for (idx = RTE_MIN(min, max); + idx <= RTE_MAX(min, max); idx++) + devargs->proto_xtr[idx] = xtr_type; + + return 0; + } + + /* process set within bracket */ + str++; + while (isblank(*str)) + str++; + if (*str == '\0') + return -1; + + min = ICE_MAX_QUEUE_NUM; + do { + /* go ahead to the first digit */ + while (isblank(*str)) + str++; + if (!isdigit(*str)) + return -1; + + /* get the digit value */ + errno = 0; + idx = strtoul(str, &end, 10); + if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM) + return -1; + + /* go ahead to separator '-',',' and ')' */ + while (isblank(*end)) + end++; + if (*end == '-') { + if (min == ICE_MAX_QUEUE_NUM) + min = idx; + else /* avoid continuous '-' */ + return -1; + } else if (*end == ',' || *end == ')') { + max = idx; + if (min == ICE_MAX_QUEUE_NUM) + min = idx; + + for (idx = RTE_MIN(min, max); + idx <= RTE_MAX(min, max); idx++) + devargs->proto_xtr[idx] = xtr_type; + + min = ICE_MAX_QUEUE_NUM; + } else { + return -1; + } + + str = end + 1; + } while (*end != ')' && *end != '\0'); + + return 0; +} + +static int +parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs) +{ + const char *queue_start; + uint32_t idx; + int xtr_type; + char xtr_name[32]; + + while (isblank(*queues)) + queues++; + + if (*queues != '[') { + xtr_type = lookup_proto_xtr_type(queues); + if (xtr_type < 0) + return -1; + + devargs->proto_xtr_dflt = xtr_type; + + return 0; + } + + queues++; + do { + while (isblank(*queues)) + queues++; + if (*queues == '\0') + return -1; + + queue_start = queues; + + /* go across a complete bracket */ + if (*queue_start == '(') { + queues += strcspn(queues, ")"); + if (*queues != ')') + return -1; + } + + /* scan the separator ':' */ + queues += strcspn(queues, ":"); + if (*queues++ != ':') + return -1; + while (isblank(*queues)) + queues++; + + for (idx = 0; ; idx++) { + if (isblank(queues[idx]) || + queues[idx] == ',' || + queues[idx] == ']' || + queues[idx] == '\0') + break; + + if (idx > sizeof(xtr_name) - 2) + return -1; + + xtr_name[idx] = queues[idx]; + } + xtr_name[idx] = '\0'; + xtr_type = lookup_proto_xtr_type(xtr_name); + if (xtr_type < 0) + return -1; + + queues += idx; + + while (isblank(*queues) || *queues == ',' || *queues == ']') + queues++; + + if (parse_queue_set(queue_start, xtr_type, devargs) < 0) + return -1; + } while (*queues != '\0'); + + return 0; +} + +static int +handle_proto_xtr_arg(__rte_unused const char *key, const char *value, + void *extra_args) +{ + struct ice_devargs *devargs = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + if (parse_queue_proto_xtr(value, devargs) < 0) { + PMD_DRV_LOG(ERR, + "The protocol extraction parameter is wrong : '%s'", + value); + return -1; + } + + return 0; +} + +static bool +ice_proto_xtr_support(struct ice_hw *hw) +{ +#define FLX_REG(val, fld, idx) \ + (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \ + GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S) + static struct { + uint32_t rxdid; + uint16_t protid_0; + uint16_t protid_1; + } xtr_sets[] = { + { ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O }, + { ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S, + ICE_PROT_IPV4_OF_OR_S }, + { ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S, + ICE_PROT_IPV6_OF_OR_S }, + { ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S, + ICE_PROT_IPV6_OF_OR_S }, + { ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL }, + }; + uint32_t i; + + for (i = 0; i < RTE_DIM(xtr_sets); i++) { + uint32_t rxdid = xtr_sets[i].rxdid; + uint32_t v; + + if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) { + v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid)); + + if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 || + FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT) + return false; + } + + if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) { + v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid)); + + if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 || + FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT) + return false; + } + } + + return true; +} + +static int +ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base, + uint32_t num) +{ + struct pool_entry *entry; + + if (!pool || !num) + return -EINVAL; + + entry = rte_zmalloc(NULL, sizeof(*entry), 0); + if (!entry) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for resource pool"); + return -ENOMEM; + } + + /* queue heap initialize */ + pool->num_free = num; + pool->num_alloc = 0; + pool->base = base; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); + + /* Initialize element */ + entry->base = 0; + entry->len = num; + + LIST_INSERT_HEAD(&pool->free_list, entry, next); + return 0; +} + +static int +ice_res_pool_alloc(struct ice_res_pool_info *pool, + uint16_t num) +{ + struct pool_entry *entry, *valid_entry; + + if (!pool || !num) { + PMD_INIT_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + if (pool->num_free < num) { + PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u", + num, pool->num_free); + return -ENOMEM; + } + + valid_entry = NULL; + /* Lookup in free list and find most fit one */ + LIST_FOREACH(entry, &pool->free_list, next) { + if (entry->len >= num) { + /* Find best one */ + if (entry->len == num) { + valid_entry = entry; + break; + } + if (!valid_entry || + valid_entry->len > entry->len) + valid_entry = entry; + } + } + + /* Not find one to satisfy the request, return */ + if (!valid_entry) { + PMD_INIT_LOG(ERR, "No valid entry found"); + return -ENOMEM; + } + /** + * The entry have equal queue number as requested, + * remove it from alloc_list. + */ + if (valid_entry->len == num) { + LIST_REMOVE(valid_entry, next); + } else { + /** + * The entry have more numbers than requested, + * create a new entry for alloc_list and minus its + * queue base and number in free_list. + */ + entry = rte_zmalloc(NULL, sizeof(*entry), 0); + if (!entry) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for " + "resource pool"); + return -ENOMEM; + } + entry->base = valid_entry->base; + entry->len = num; + valid_entry->base += num; + valid_entry->len -= num; + valid_entry = entry; + } + + /* Insert it into alloc list, not sorted */ + LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); + + pool->num_free -= valid_entry->len; + pool->num_alloc += valid_entry->len; + + return valid_entry->base + pool->base; +} + +static void +ice_res_pool_destroy(struct ice_res_pool_info *pool) +{ + struct pool_entry *entry, *next_entry; + + if (!pool) + return; + + for (entry = LIST_FIRST(&pool->alloc_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + for (entry = LIST_FIRST(&pool->free_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + pool->num_free = 0; + pool->num_alloc = 0; + pool->base = 0; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); +} + +static void +ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info) +{ + /* Set VSI LUT selection */ + info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI & + ICE_AQ_VSI_Q_OPT_RSS_LUT_M; + /* Set Hash scheme */ + info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ & + ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + /* enable TC */ + info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M; +} + +static enum ice_status +ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, + struct ice_aqc_vsi_props *info, + uint8_t enabled_tcmap) +{ + uint16_t bsf, qp_idx; + + /* default tc 0 now. Multi-TC supporting need to be done later. + * Configure TC and queue mapping parameters, for enabled TC, + * allocate qpnum_per_tc queues to this traffic. + */ + if (enabled_tcmap != 0x01) { + PMD_INIT_LOG(ERR, "only TC0 is supported"); + return -ENOTSUP; + } + + vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC); + bsf = rte_bsf32(vsi->nb_qps); + /* Adjust the queue number to actual queues that can be applied */ + vsi->nb_qps = 0x1 << bsf; + + qp_idx = 0; + /* Set tc and queue mapping with VSI */ + info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx << + ICE_AQ_VSI_TC_Q_OFFSET_S) | + (bsf << ICE_AQ_VSI_TC_Q_NUM_S)); + + /* Associate queue number with VSI */ + info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG); + info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps); + info->valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + /* Set the info.ingress_table and info.egress_table + * for UP translate table. Now just set it to 1:1 map by default + * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688 + */ +#define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688 + info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); + info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); + info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT); + return 0; +} + +static int +ice_init_mac_address(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!rte_is_unicast_ether_addr + ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) { + PMD_INIT_LOG(ERR, "Invalid MAC address"); + return -EINVAL; + } + + rte_ether_addr_copy( + (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr, + (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr); + + dev->data->mac_addrs = + rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0); + if (!dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory to store mac address"); + return -ENOMEM; + } + /* store it to dev data */ + rte_ether_addr_copy( + (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr, + &dev->data->mac_addrs[0]); + return 0; +} + +/* Find out specific MAC filter */ +static struct ice_mac_filter * +ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr) +{ + struct ice_mac_filter *f; + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) + return f; + } + + return NULL; +} + +static int +ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) +{ + struct ice_fltr_list_entry *m_list_itr = NULL; + struct ice_mac_filter *f; + struct LIST_HEAD_TYPE list_head; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int ret = 0; + + /* If it's added and configured, return */ + f = ice_find_mac_filter(vsi, mac_addr); + if (f) { + PMD_DRV_LOG(INFO, "This MAC filter already exists."); + return 0; + } + + INIT_LIST_HEAD(&list_head); + + m_list_itr = (struct ice_fltr_list_entry *) + ice_malloc(hw, sizeof(*m_list_itr)); + if (!m_list_itr) { + ret = -ENOMEM; + goto DONE; + } + ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, + mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); + m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; + m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + m_list_itr->fltr_info.vsi_handle = vsi->idx; + + LIST_ADD(&m_list_itr->list_entry, &list_head); + + /* Add the mac */ + ret = ice_add_mac(hw, &list_head); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter"); + ret = -EINVAL; + goto DONE; + } + /* Add the mac addr into mac list */ + f = rte_zmalloc(NULL, sizeof(*f), 0); + if (!f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + ret = -ENOMEM; + goto DONE; + } + rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr); + TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); + vsi->mac_num++; + + ret = 0; + +DONE: + rte_free(m_list_itr); + return ret; +} + +static int +ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr) +{ + struct ice_fltr_list_entry *m_list_itr = NULL; + struct ice_mac_filter *f; + struct LIST_HEAD_TYPE list_head; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int ret = 0; + + /* Can't find it, return an error */ + f = ice_find_mac_filter(vsi, mac_addr); + if (!f) + return -EINVAL; + + INIT_LIST_HEAD(&list_head); + + m_list_itr = (struct ice_fltr_list_entry *) + ice_malloc(hw, sizeof(*m_list_itr)); + if (!m_list_itr) { + ret = -ENOMEM; + goto DONE; + } + ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr, + mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); + m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; + m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC; + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + m_list_itr->fltr_info.vsi_handle = vsi->idx; + + LIST_ADD(&m_list_itr->list_entry, &list_head); + + /* remove the mac filter */ + ret = ice_remove_mac(hw, &list_head); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); + ret = -EINVAL; + goto DONE; + } + + /* Remove the mac addr from mac list */ + TAILQ_REMOVE(&vsi->mac_list, f, next); + rte_free(f); + vsi->mac_num--; + + ret = 0; +DONE: + rte_free(m_list_itr); + return ret; +} + +/* Find out specific VLAN filter */ +static struct ice_vlan_filter * +ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) +{ + struct ice_vlan_filter *f; + + TAILQ_FOREACH(f, &vsi->vlan_list, next) { + if (vlan_id == f->vlan_info.vlan_id) + return f; + } + + return NULL; +} + +static int +ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) +{ + struct ice_fltr_list_entry *v_list_itr = NULL; + struct ice_vlan_filter *f; + struct LIST_HEAD_TYPE list_head; + struct ice_hw *hw; + int ret = 0; + + if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID) + return -EINVAL; + + hw = ICE_VSI_TO_HW(vsi); + + /* If it's added and configured, return. */ + f = ice_find_vlan_filter(vsi, vlan_id); + if (f) { + PMD_DRV_LOG(INFO, "This VLAN filter already exists."); + return 0; + } + + if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) + return 0; + + INIT_LIST_HEAD(&list_head); + + v_list_itr = (struct ice_fltr_list_entry *) + ice_malloc(hw, sizeof(*v_list_itr)); + if (!v_list_itr) { + ret = -ENOMEM; + goto DONE; + } + v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id; + v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; + v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->fltr_info.vsi_handle = vsi->idx; + + LIST_ADD(&v_list_itr->list_entry, &list_head); + + /* Add the vlan */ + ret = ice_add_vlan(hw, &list_head); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add VLAN filter"); + ret = -EINVAL; + goto DONE; + } + + /* Add vlan into vlan list */ + f = rte_zmalloc(NULL, sizeof(*f), 0); + if (!f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + ret = -ENOMEM; + goto DONE; + } + f->vlan_info.vlan_id = vlan_id; + TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next); + vsi->vlan_num++; + + ret = 0; + +DONE: + rte_free(v_list_itr); + return ret; +} + +static int +ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id) +{ + struct ice_fltr_list_entry *v_list_itr = NULL; + struct ice_vlan_filter *f; + struct LIST_HEAD_TYPE list_head; + struct ice_hw *hw; + int ret = 0; + + /** + * Vlan 0 is the generic filter for untagged packets + * and can't be removed. + */ + if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID) + return -EINVAL; + + hw = ICE_VSI_TO_HW(vsi); + + /* Can't find it, return an error */ + f = ice_find_vlan_filter(vsi, vlan_id); + if (!f) + return -EINVAL; + + INIT_LIST_HEAD(&list_head); + + v_list_itr = (struct ice_fltr_list_entry *) + ice_malloc(hw, sizeof(*v_list_itr)); + if (!v_list_itr) { + ret = -ENOMEM; + goto DONE; + } + + v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id; + v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI; + v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->fltr_info.vsi_handle = vsi->idx; + + LIST_ADD(&v_list_itr->list_entry, &list_head); + + /* remove the vlan filter */ + ret = ice_remove_vlan(hw, &list_head); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to remove VLAN filter"); + ret = -EINVAL; + goto DONE; + } + + /* Remove the vlan id from vlan list */ + TAILQ_REMOVE(&vsi->vlan_list, f, next); + rte_free(f); + vsi->vlan_num--; + + ret = 0; +DONE: + rte_free(v_list_itr); + return ret; +} + +static int +ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi) +{ + struct ice_mac_filter *m_f; + struct ice_vlan_filter *v_f; + int ret = 0; + + if (!vsi || !vsi->mac_num) + return -EINVAL; + + TAILQ_FOREACH(m_f, &vsi->mac_list, next) { + ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr); + if (ret != ICE_SUCCESS) { + ret = -EINVAL; + goto DONE; + } + } + + if (vsi->vlan_num == 0) + return 0; + + TAILQ_FOREACH(v_f, &vsi->vlan_list, next) { + ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id); + if (ret != ICE_SUCCESS) { + ret = -EINVAL; + goto DONE; + } + } + +DONE: + return ret; +} + +static int +ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_vsi_ctx ctxt; + uint8_t qinq_flags; + int ret = 0; + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) { + if (on) { + if ((vsi->info.outer_tag_flags & + ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) == + ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) + return 0; /* already on */ + } else { + if (!(vsi->info.outer_tag_flags & + ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)) + return 0; /* already off */ + } + } + + if (on) + qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST; + else + qinq_flags = 0; + /* clear global insertion and use per packet insertion */ + vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT); + vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST); + vsi->info.outer_tag_flags |= qinq_flags; + /* use default vlan type 0x8100 */ + vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M); + vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE << + ICE_AQ_VSI_OUTER_TAG_TYPE_S; + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info.valid_sections = + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); + ctxt.vsi_num = vsi->vsi_id; + ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(INFO, + "Update VSI failed to %s qinq stripping", + on ? "enable" : "disable"); + return -EINVAL; + } + + vsi->info.valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); + + return ret; +} + +static int +ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_vsi_ctx ctxt; + uint8_t qinq_flags; + int ret = 0; + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) { + if (on) { + if ((vsi->info.outer_tag_flags & + ICE_AQ_VSI_OUTER_TAG_MODE_M) == + ICE_AQ_VSI_OUTER_TAG_COPY) + return 0; /* already on */ + } else { + if ((vsi->info.outer_tag_flags & + ICE_AQ_VSI_OUTER_TAG_MODE_M) == + ICE_AQ_VSI_OUTER_TAG_NOTHING) + return 0; /* already off */ + } + } + + if (on) + qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY; + else + qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING; + vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M); + vsi->info.outer_tag_flags |= qinq_flags; + /* use default vlan type 0x8100 */ + vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M); + vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE << + ICE_AQ_VSI_OUTER_TAG_TYPE_S; + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info.valid_sections = + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); + ctxt.vsi_num = vsi->vsi_id; + ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(INFO, + "Update VSI failed to %s qinq stripping", + on ? "enable" : "disable"); + return -EINVAL; + } + + vsi->info.valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID); + + return ret; +} + +static int +ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on) +{ + int ret; + + ret = ice_vsi_config_qinq_stripping(vsi, on); + if (ret) + PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret); + + ret = ice_vsi_config_qinq_insertion(vsi, on); + if (ret) + PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret); + + return ret; +} + +/* Enable IRQ0 */ +static void +ice_pf_enable_irq0(struct ice_hw *hw) +{ + /* reset the registers */ + ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0); + ICE_READ_REG(hw, PFINT_OICR); + +#ifdef ICE_LSE_SPT + ICE_WRITE_REG(hw, PFINT_OICR_ENA, + (uint32_t)(PFINT_OICR_ENA_INT_ENA_M & + (~PFINT_OICR_LINK_STAT_CHANGE_M))); + + ICE_WRITE_REG(hw, PFINT_OICR_CTL, + (0 & PFINT_OICR_CTL_MSIX_INDX_M) | + ((0 << PFINT_OICR_CTL_ITR_INDX_S) & + PFINT_OICR_CTL_ITR_INDX_M) | + PFINT_OICR_CTL_CAUSE_ENA_M); + + ICE_WRITE_REG(hw, PFINT_FW_CTL, + (0 & PFINT_FW_CTL_MSIX_INDX_M) | + ((0 << PFINT_FW_CTL_ITR_INDX_S) & + PFINT_FW_CTL_ITR_INDX_M) | + PFINT_FW_CTL_CAUSE_ENA_M); +#else + ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M); +#endif + + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), + GLINT_DYN_CTL_INTENA_M | + GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M); + + ice_flush(hw); +} + +/* Disable IRQ0 */ +static void +ice_pf_disable_irq0(struct ice_hw *hw) +{ + /* Disable all interrupt types */ + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); + ice_flush(hw); +} + +#ifdef ICE_LSE_SPT +static void +ice_handle_aq_msg(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_ctl_q_info *cq = &hw->adminq; + struct ice_rq_event_info event; + uint16_t pending, opcode; + int ret; + + event.buf_len = ICE_AQ_MAX_BUF_LEN; + event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0); + if (!event.msg_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate mem"); + return; + } + + pending = 1; + while (pending) { + ret = ice_clean_rq_elem(hw, cq, &event, &pending); + + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(INFO, + "Failed to read msg from AdminQ, " + "adminq_err: %u", + hw->adminq.sq_last_status); + break; + } + opcode = rte_le_to_cpu_16(event.desc.opcode); + + switch (opcode) { + case ice_aqc_opc_get_link_status: + ret = ice_link_update(dev, 0); + if (!ret) + _rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_LSC, NULL); + break; + default: + PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", + opcode); + break; + } + } + rte_free(event.msg_buf); +} +#endif + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ice_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t oicr; + uint32_t reg; + uint8_t pf_num; + uint8_t event; + uint16_t queue; + int ret; +#ifdef ICE_LSE_SPT + uint32_t int_fw_ctl; +#endif + + /* Disable interrupt */ + ice_pf_disable_irq0(hw); + + /* read out interrupt causes */ + oicr = ICE_READ_REG(hw, PFINT_OICR); +#ifdef ICE_LSE_SPT + int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL); +#endif + + /* No interrupt event indicated */ + if (!(oicr & PFINT_OICR_INTEVENT_M)) { + PMD_DRV_LOG(INFO, "No interrupt event"); + goto done; + } + +#ifdef ICE_LSE_SPT + if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) { + PMD_DRV_LOG(INFO, "FW_CTL: link state change event"); + ice_handle_aq_msg(dev); + } +#else + if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) { + PMD_DRV_LOG(INFO, "OICR: link state change event"); + ret = ice_link_update(dev, 0); + if (!ret) + _rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } +#endif + + if (oicr & PFINT_OICR_MAL_DETECT_M) { + PMD_DRV_LOG(WARNING, "OICR: MDD event"); + reg = ICE_READ_REG(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " + "%d by PQM on TX queue %d PF# %d", + event, queue, pf_num); + } + + reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event " + "%d by TCLAN on TX queue %d PF# %d", + event, queue, pf_num); + } + } +done: + /* Enable interrupt */ + ice_pf_enable_irq0(hw); + rte_intr_ack(dev->intr_handle); +} + +static void +ice_init_proto_xtr(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_PF_TO_HW(pf); + const struct proto_xtr_ol_flag *ol_flag; + bool proto_xtr_enable = false; + int offset; + uint16_t i; + + if (!ice_proto_xtr_support(hw)) { + PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported"); + return; + } + + pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0); + if (unlikely(pf->proto_xtr == NULL)) { + PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table"); + return; + } + + for (i = 0; i < pf->lan_nb_qps; i++) { + pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ? + ad->devargs.proto_xtr[i] : + ad->devargs.proto_xtr_dflt; + + if (pf->proto_xtr[i] != PROTO_XTR_NONE) { + uint8_t type = pf->proto_xtr[i]; + + ice_proto_xtr_ol_flag_params[type].required = true; + proto_xtr_enable = true; + } + } + + if (likely(!proto_xtr_enable)) + return; + + offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param); + if (unlikely(offset == -1)) { + PMD_DRV_LOG(ERR, + "Protocol extraction metadata is disabled in mbuf with error %d", + -rte_errno); + return; + } + + PMD_DRV_LOG(DEBUG, + "Protocol extraction metadata offset in mbuf is : %d", + offset); + rte_net_ice_dynfield_proto_xtr_metadata_offs = offset; + + for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) { + ol_flag = &ice_proto_xtr_ol_flag_params[i]; + + if (!ol_flag->required) + continue; + + offset = rte_mbuf_dynflag_register(&ol_flag->param); + if (unlikely(offset == -1)) { + PMD_DRV_LOG(ERR, + "Protocol extraction offload '%s' failed to register with error %d", + ol_flag->param.name, -rte_errno); + + rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; + break; + } + + PMD_DRV_LOG(DEBUG, + "Protocol extraction offload '%s' offset in mbuf is : %d", + ol_flag->param.name, offset); + *ol_flag->ol_flag = 1ULL << offset; + } +} + +/* Initialize SW parameters of PF */ +static int +ice_pf_sw_init(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_PF_TO_HW(pf); + + pf->lan_nb_qp_max = + (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq, + hw->func_caps.common_cap.num_rxq); + + pf->lan_nb_qps = pf->lan_nb_qp_max; + + ice_init_proto_xtr(dev); + + if (hw->func_caps.fd_fltr_guar > 0 || + hw->func_caps.fd_fltr_best_effort > 0) { + pf->flags |= ICE_FLAG_FDIR; + pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; + pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; + } else { + pf->fdir_nb_qps = 0; + } + pf->fdir_qp_offset = 0; + + return 0; +} + +struct ice_vsi * +ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = NULL; + struct ice_vsi_ctx vsi_ctx; + int ret; + struct rte_ether_addr broadcast = { + .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; + struct rte_ether_addr mac_addr; + uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + uint8_t tc_bitmap = 0x1; + uint16_t cfg; + + /* hw->num_lports = 1 in NIC mode */ + vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); + if (!vsi) + return NULL; + + vsi->idx = pf->next_vsi_idx; + pf->next_vsi_idx++; + vsi->type = type; + vsi->adapter = ICE_PF_TO_ADAPTER(pf); + vsi->max_macaddrs = ICE_NUM_MACADDR_MAX; + vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 1; + TAILQ_INIT(&vsi->mac_list); + TAILQ_INIT(&vsi->vlan_list); + + /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */ + pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size > + ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 : + hw->func_caps.common_cap.rss_table_size; + pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; + + memset(&vsi_ctx, 0, sizeof(vsi_ctx)); + switch (type) { + case ICE_VSI_PF: + vsi->nb_qps = pf->lan_nb_qps; + vsi->base_queue = 1; + ice_vsi_config_default_rss(&vsi_ctx.info); + vsi_ctx.alloc_from_pool = true; + vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; + /* switch_id is queried by get_switch_config aq, which is done + * by ice_init_hw + */ + vsi_ctx.info.sw_id = hw->port_info->sw_id; + vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; + /* Allow all untagged or tagged packets */ + vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; + vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING; + vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | + ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + + /* FDIR */ + cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); + cfg = ICE_AQ_VSI_FD_ENABLE; + vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); + vsi_ctx.info.max_fd_fltr_dedicated = + rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); + vsi_ctx.info.max_fd_fltr_shared = + rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); + + /* Enable VLAN/UP trip */ + ret = ice_vsi_config_tc_queue_mapping(vsi, + &vsi_ctx.info, + ICE_DEFAULT_TCMAP); + if (ret) { + PMD_INIT_LOG(ERR, + "tc queue mapping with vsi failed, " + "err = %d", + ret); + goto fail_mem; + } + + break; + case ICE_VSI_CTRL: + vsi->nb_qps = pf->fdir_nb_qps; + vsi->base_queue = ICE_FDIR_QUEUE_ID; + vsi_ctx.alloc_from_pool = true; + vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; + + cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); + cfg = ICE_AQ_VSI_FD_PROG_ENABLE; + vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); + vsi_ctx.info.sw_id = hw->port_info->sw_id; + vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; + ret = ice_vsi_config_tc_queue_mapping(vsi, + &vsi_ctx.info, + ICE_DEFAULT_TCMAP); + if (ret) { + PMD_INIT_LOG(ERR, + "tc queue mapping with vsi failed, " + "err = %d", + ret); + goto fail_mem; + } + break; + default: + /* for other types of VSI */ + PMD_INIT_LOG(ERR, "other types of VSI not supported"); + goto fail_mem; + } + + /* VF has MSIX interrupt in VF range, don't allocate here */ + if (type == ICE_VSI_PF) { + ret = ice_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d", + vsi->vsi_id, ret); + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); + } else if (type == ICE_VSI_CTRL) { + ret = ice_res_pool_alloc(&pf->msix_pool, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", + vsi->vsi_id, ret); + } + vsi->msix_intr = ret; + vsi->nb_msix = 1; + } else { + vsi->msix_intr = 0; + vsi->nb_msix = 0; + } + ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL); + if (ret != ICE_SUCCESS) { + PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret); + goto fail_mem; + } + /* store vsi information is SW structure */ + vsi->vsi_id = vsi_ctx.vsi_num; + vsi->info = vsi_ctx.info; + pf->vsis_allocated = vsi_ctx.vsis_allocd; + pf->vsis_unallocated = vsi_ctx.vsis_unallocated; + + if (type == ICE_VSI_PF) { + /* MAC configuration */ + rte_ether_addr_copy((struct rte_ether_addr *) + hw->port_info->mac.perm_addr, + &pf->dev_addr); + + rte_ether_addr_copy(&pf->dev_addr, &mac_addr); + ret = ice_add_mac_filter(vsi, &mac_addr); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); + + rte_ether_addr_copy(&broadcast, &mac_addr); + ret = ice_add_mac_filter(vsi, &mac_addr); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to add MAC filter"); + } + + /* At the beginning, only TC0. */ + /* What we need here is the maximam number of the TX queues. + * Currently vsi->nb_qps means it. + * Correct it if any change. + */ + max_txqs[0] = vsi->nb_qps; + ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx, + tc_bitmap, max_txqs); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to config vsi sched"); + + return vsi; +fail_mem: + rte_free(vsi); + pf->next_vsi_idx--; + return NULL; +} + +static int +ice_send_driver_ver(struct ice_hw *hw) +{ + struct ice_driver_ver dv; + + /* we don't have driver version use 0 for dummy */ + dv.major_ver = 0; + dv.minor_ver = 0; + dv.build_ver = 0; + dv.subbuild_ver = 0; + strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string)); + + return ice_aq_send_driver_ver(hw, &dv, NULL); +} + +static int +ice_pf_setup(struct ice_pf *pf) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi; + uint16_t unused; + + /* Clear all stats counters */ + pf->offset_loaded = false; + memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); + memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); + memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); + memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); + + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(hw, &unused, + hw->func_caps.fd_fltr_guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(hw, &unused, + hw->func_caps.fd_fltr_best_effort); + + vsi = ice_setup_vsi(pf, ICE_VSI_PF); + if (!vsi) { + PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); + return -EINVAL; + } + + pf->main_vsi = vsi; + + return 0; +} + +/* PCIe configuration space setting */ +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 +#define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff) +#define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc) +#define PCI_EXT_CAP_ID_DSN 0x03 + +static int +ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap) +{ + uint32_t header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n"); + return -1; + } + + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + + if (pos < PCI_CFG_SPACE_SIZE) + break; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n"); + return -1; + } + } + + return 0; +} + +/* + * Extract device serial number from PCIe Configuration Space and + * determine the pkg file path according to the DSN. + */ +static int +ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file) +{ + int pos; + char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE]; + uint32_t dsn_low, dsn_high; + memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE); + + pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN); + + if (pos) { + rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4); + rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8); + snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE, + "ice-%08x%08x.pkg", dsn_high, dsn_low); + } else { + PMD_INIT_LOG(ERR, "Failed to read device serial number\n"); + goto fail_dsn; + } + + strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES, + ICE_MAX_PKG_FILENAME_SIZE); + if (!access(strcat(pkg_file, opt_ddp_filename), 0)) + return 0; + + strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT, + ICE_MAX_PKG_FILENAME_SIZE); + if (!access(strcat(pkg_file, opt_ddp_filename), 0)) + return 0; + +fail_dsn: + strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE); + if (!access(pkg_file, 0)) + return 0; + strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE); + return 0; +} + +enum ice_pkg_type +ice_load_pkg_type(struct ice_hw *hw) +{ + enum ice_pkg_type package_type; + + /* store the activated package type (OS default or Comms) */ + if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME, + ICE_PKG_NAME_SIZE)) + package_type = ICE_PKG_TYPE_OS_DEFAULT; + else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, + ICE_PKG_NAME_SIZE)) + package_type = ICE_PKG_TYPE_COMMS; + else + package_type = ICE_PKG_TYPE_UNKNOWN; + + PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s", + hw->active_pkg_ver.major, hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, hw->active_pkg_ver.draft, + hw->active_pkg_name); + + return package_type; +} + +static int ice_load_pkg(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + char pkg_file[ICE_MAX_PKG_FILENAME_SIZE]; + int err; + uint8_t *buf; + int buf_len; + FILE *file; + struct stat fstat; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + ice_pkg_file_search_path(pci_dev, pkg_file); + + file = fopen(pkg_file, "rb"); + if (!file) { + PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file); + return -1; + } + + err = stat(pkg_file, &fstat); + if (err) { + PMD_INIT_LOG(ERR, "failed to get file stats\n"); + fclose(file); + return err; + } + + buf_len = fstat.st_size; + buf = rte_malloc(NULL, buf_len, 0); + + if (!buf) { + PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n", + buf_len); + fclose(file); + return -1; + } + + err = fread(buf, buf_len, 1, file); + if (err != 1) { + PMD_INIT_LOG(ERR, "failed to read package data\n"); + fclose(file); + err = -1; + goto fail_exit; + } + + fclose(file); + + err = ice_copy_and_init_pkg(hw, buf, buf_len); + if (err) { + PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err); + goto fail_exit; + } + + /* store the loaded pkg type info */ + ad->active_pkg_type = ice_load_pkg_type(hw); + + err = ice_init_hw_tbls(hw); + if (err) { + PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err); + goto fail_init_tbls; + } + + return 0; + +fail_init_tbls: + rte_free(hw->pkg_copy); +fail_exit: + rte_free(buf); + return err; +} + +static void +ice_base_queue_get(struct ice_pf *pf) +{ + uint32_t reg; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + + reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC); + if (reg & PFLAN_RX_QALLOC_VALID_M) { + pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M; + } else { + PMD_INIT_LOG(WARNING, "Failed to get Rx base queue" + " index"); + } +} + +static int +parse_bool(const char *key, const char *value, void *args) +{ + int *i = (int *)args; + char *end; + int num; + + num = strtoul(value, &end, 10); + + if (num != 0 && num != 1) { + PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " + "value must be 0 or 1", + value, key); + return -1; + } + + *i = num; + return 0; +} + +static int ice_parse_devargs(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_devargs *devargs = dev->device->devargs; + struct rte_kvargs *kvlist; + int ret; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, ice_valid_args); + if (kvlist == NULL) { + PMD_INIT_LOG(ERR, "Invalid kvargs key\n"); + return -EINVAL; + } + + ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE; + memset(ad->devargs.proto_xtr, PROTO_XTR_NONE, + sizeof(ad->devargs.proto_xtr)); + + ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG, + &handle_proto_xtr_arg, &ad->devargs); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG, + &parse_bool, &ad->devargs.safe_mode_support); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG, + &parse_bool, &ad->devargs.pipe_mode_support); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG, + &parse_bool, &ad->devargs.flow_mark_support); + if (ret) + goto bail; + +bail: + rte_kvargs_free(kvlist); + return ret; +} + +/* Forward LLDP packets to default VSI by set switch rules */ +static int +ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_fltr_list_entry *s_list_itr = NULL; + struct LIST_HEAD_TYPE list_head; + int ret = 0; + + INIT_LIST_HEAD(&list_head); + + s_list_itr = (struct ice_fltr_list_entry *) + ice_malloc(hw, sizeof(*s_list_itr)); + if (!s_list_itr) + return -ENOMEM; + s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; + s_list_itr->fltr_info.vsi_handle = vsi->idx; + s_list_itr->fltr_info.l_data.ethertype_mac.ethertype = + RTE_ETHER_TYPE_LLDP; + s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; + s_list_itr->fltr_info.flag = ICE_FLTR_RX; + s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT; + LIST_ADD(&s_list_itr->list_entry, &list_head); + if (on) + ret = ice_add_eth_mac(hw, &list_head); + else + ret = ice_remove_eth_mac(hw, &list_head); + + rte_free(s_list_itr); + return ret; +} + +static enum ice_status +ice_get_hw_res(struct ice_hw *hw, uint16_t res_type, + uint16_t num, uint16_t desc_id, + uint16_t *prof_buf, uint16_t *num_prof) +{ + struct ice_aqc_get_allocd_res_desc_resp *resp_buf; + int ret; + uint16_t buf_len; + bool res_shared = 1; + struct ice_aq_desc aq_desc; + struct ice_sq_cd *cd = NULL; + struct ice_aqc_get_allocd_res_desc *cmd = + &aq_desc.params.get_res_desc; + + buf_len = sizeof(resp_buf->elem) * num; + resp_buf = ice_malloc(hw, buf_len); + if (!resp_buf) + return -ENOMEM; + + ice_fill_dflt_direct_cmd_desc(&aq_desc, + ice_aqc_opc_get_allocd_res_desc); + + cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | (res_shared ? + ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); + cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id); + + ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd); + if (!ret) + *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc); + else + goto exit; + + ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) * + (*num_prof), ICE_NONDMA_TO_NONDMA); + +exit: + rte_free(resp_buf); + return ret; +} +static int +ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type) +{ + int ret; + uint16_t prof_id; + uint16_t prof_buf[ICE_MAX_RES_DESC_NUM]; + uint16_t first_desc = 1; + uint16_t num_prof = 0; + + ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM, + first_desc, prof_buf, &num_prof); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get fxp resource"); + return ret; + } + + for (prof_id = 0; prof_id < num_prof; prof_id++) { + ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to free fxp resource"); + return ret; + } + } + return 0; +} + +static int +ice_reset_fxp_resource(struct ice_hw *hw) +{ + int ret; + + ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to clearup fdir resource"); + return ret; + } + + ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to clearup rss resource"); + return ret; + } + + return 0; +} + +static int +ice_dev_init(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_vsi *vsi; + int ret; + + dev->dev_ops = &ice_eth_dev_ops; + dev->rx_pkt_burst = ice_recv_pkts; + dev->tx_pkt_burst = ice_xmit_pkts; + dev->tx_pkt_prepare = ice_prep_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + ice_set_rx_function(dev); + ice_set_tx_function(dev); + return 0; + } + + ice_set_default_ptype_table(dev); + pci_dev = RTE_DEV_TO_PCI(dev->device); + intr_handle = &pci_dev->intr_handle; + + pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + pf->adapter->eth_dev = dev; + pf->dev_data = dev->data; + hw->back = pf->adapter; + hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr; + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + + ret = ice_parse_devargs(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to parse devargs"); + return -EINVAL; + } + + ice_init_controlq_parameter(hw); + + ret = ice_init_hw(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize HW"); + return -EINVAL; + } + + ret = ice_load_pkg(dev); + if (ret) { + if (ad->devargs.safe_mode_support == 0) { + PMD_INIT_LOG(ERR, "Failed to load the DDP package," + "Use safe-mode-support=1 to enter Safe Mode"); + return ret; + } + + PMD_INIT_LOG(WARNING, "Failed to load the DDP package," + "Entering Safe Mode"); + ad->is_safe_mode = 1; + } + + PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, + hw->api_maj_ver, hw->api_min_ver); + + ice_pf_sw_init(dev); + ret = ice_init_mac_address(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize mac address"); + goto err_init_mac; + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + ret = ice_res_pool_init(&pf->msix_pool, 1, + hw->func_caps.common_cap.num_msix_vectors - 1); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); + goto err_msix_pool_init; + } + + ret = ice_pf_setup(pf); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup PF"); + goto err_pf_setup; + } + + ret = ice_send_driver_ver(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to send driver version"); + goto err_pf_setup; + } + + vsi = pf->main_vsi; + + /* Disable double vlan by default */ + ice_vsi_config_double_vlan(vsi, false); + + ret = ice_aq_stop_lldp(hw, true, false, NULL); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); + ret = ice_init_dcb(hw, true); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); + /* Forward LLDP packets to default VSI */ + ret = ice_vsi_config_sw_lldp(vsi, true); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); + /* register callback func to eal lib */ + rte_intr_callback_register(intr_handle, + ice_interrupt_handler, dev); + + ice_pf_enable_irq0(hw); + + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + + /* get base queue pairs index in the device */ + ice_base_queue_get(pf); + + if (!ad->is_safe_mode) { + ret = ice_flow_init(ad); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize flow"); + return ret; + } + } + + ret = ice_reset_fxp_resource(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to reset fxp resource"); + return ret; + } + + return 0; + +err_pf_setup: + ice_res_pool_destroy(&pf->msix_pool); +err_msix_pool_init: + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; +err_init_mac: + ice_sched_cleanup_all(hw); + rte_free(hw->port_info); + ice_shutdown_all_ctrlq(hw); + rte_free(pf->proto_xtr); + + return ret; +} + +int +ice_release_vsi(struct ice_vsi *vsi) +{ + struct ice_hw *hw; + struct ice_vsi_ctx vsi_ctx; + enum ice_status ret; + + if (!vsi) + return 0; + + hw = ICE_VSI_TO_HW(vsi); + + ice_remove_all_mac_vlan_filters(vsi); + + memset(&vsi_ctx, 0, sizeof(vsi_ctx)); + + vsi_ctx.vsi_num = vsi->vsi_id; + vsi_ctx.info = vsi->info; + ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL); + if (ret != ICE_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id); + rte_free(vsi); + return -1; + } + + rte_free(vsi); + return 0; +} + +void +ice_vsi_disable_queues_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + /* disable interrupt and also clear all the exist config */ + for (i = 0; i < vsi->nb_qps; i++) { + ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); + ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); + rte_wmb(); + } + + if (rte_intr_allow_others(intr_handle)) + /* vfio-pci */ + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), + GLINT_DYN_CTL_WB_ON_ITR_M); + } + else + /* igb_uio */ + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M); +} + +static void +ice_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *main_vsi = pf->main_vsi; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint16_t i; + + /* avoid stopping again */ + if (pf->adapter_stopped) + return; + + /* stop and clear all Rx queues */ + for (i = 0; i < data->nb_rx_queues; i++) + ice_rx_queue_stop(dev, i); + + /* stop and clear all Tx queues */ + for (i = 0; i < data->nb_tx_queues; i++) + ice_tx_queue_stop(dev, i); + + /* disable all queue interrupts */ + ice_vsi_disable_queues_intr(main_vsi); + + if (pf->init_link_up) + ice_dev_set_link_up(dev); + else + ice_dev_set_link_down(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + pf->adapter_stopped = true; +} + +static void +ice_dev_close(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Since stop will make link down, then the link event will be + * triggered, disable the irq firstly to avoid the port_infoe etc + * resources deallocation causing the interrupt service thread + * crash. + */ + ice_pf_disable_irq0(hw); + + ice_dev_stop(dev); + + if (!ad->is_safe_mode) + ice_flow_uninit(ad); + + /* release all queue resource */ + ice_free_queues(dev); + + ice_res_pool_destroy(&pf->msix_pool); + ice_release_vsi(pf->main_vsi); + ice_sched_cleanup_all(hw); + ice_free_hw_tbls(hw); + rte_free(hw->port_info); + hw->port_info = NULL; + ice_shutdown_all_ctrlq(hw); + rte_free(pf->proto_xtr); + pf->proto_xtr = NULL; + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(intr_handle, + ice_interrupt_handler, dev); +} + +static int +ice_dev_uninit(struct rte_eth_dev *dev) +{ + ice_dev_close(dev); + + return 0; +} + +static int ice_init_rss(struct ice_pf *pf) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct rte_eth_rss_conf *rss_conf; + struct ice_aqc_get_set_rss_keys key; + uint16_t i, nb_q; + int ret = 0; + bool is_safe_mode = pf->adapter->is_safe_mode; + uint32_t reg; + + rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; + nb_q = dev->data->nb_rx_queues; + vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; + vsi->rss_lut_size = pf->hash_lut_size; + + if (is_safe_mode) { + PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); + return 0; + } + + if (!vsi->rss_key) + vsi->rss_key = rte_zmalloc(NULL, + vsi->rss_key_size, 0); + if (!vsi->rss_lut) + vsi->rss_lut = rte_zmalloc(NULL, + vsi->rss_lut_size, 0); + + /* configure RSS key */ + if (!rss_conf->rss_key) { + /* Calculate the default hash key */ + for (i = 0; i <= vsi->rss_key_size; i++) + vsi->rss_key[i] = (uint8_t)rte_rand(); + } else { + rte_memcpy(vsi->rss_key, rss_conf->rss_key, + RTE_MIN(rss_conf->rss_key_len, + vsi->rss_key_size)); + } + rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size); + ret = ice_aq_set_rss_key(hw, vsi->idx, &key); + if (ret) + return -EINVAL; + + /* init RSS LUT table */ + for (i = 0; i < vsi->rss_lut_size; i++) + vsi->rss_lut[i] = i % nb_q; + + ret = ice_aq_set_rss_lut(hw, vsi->idx, + ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, + vsi->rss_lut, vsi->rss_lut_size); + if (ret) + return -EINVAL; + + /* Enable registers for symmetric_toeplitz function. */ + reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); + reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | + (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); + ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + + /* configure RSS for IPv4 with input set IPv4 src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ICE_FLOW_SEG_HDR_IPV4, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret); + + /* configure RSS for IPv6 with input set IPv6 src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ICE_FLOW_SEG_HDR_IPV6, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret); + + /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret); + + /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret); + + /* configure RSS for sctp6 with input set IPv6 src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d", + __func__, ret); + + /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret); + + /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret); + + /* configure RSS for sctp4 with input set IP src/dst */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d", + __func__, ret); + + /* configure RSS for gtpu with input set TEID */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID, + ICE_FLOW_SEG_HDR_GTPU_IP, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d", + __func__, ret); + + /** + * configure RSS for pppoe/pppod with input set + * Source MAC and Session ID + */ + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH, + ICE_FLOW_SEG_HDR_PPPOE, 0); + if (ret) + PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d", + __func__, ret); + + return 0; +} + +static int +ice_dev_configure(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret; + + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * bulk allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->tx_simple_allowed = true; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + ret = ice_init_rss(pf); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to enable rss for PF"); + return ret; + } + + return 0; +} + +static void +__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect, + int base_queue, int nb_queue) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint32_t val, val_tx; + int i; + + for (i = 0; i < nb_queue; i++) { + /*do actual bind*/ + val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) | + (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M; + val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) | + (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M; + + PMD_DRV_LOG(INFO, "queue %d is binding to vect %d", + base_queue + i, msix_vect); + /* set ITR0 value */ + ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10); + ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val); + ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx); + } +} + +void +ice_vsi_queues_bind_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t queue_idx = 0; + int record = 0; + int i; + + /* clear Rx/Tx queue interrupt */ + for (i = 0; i < vsi->nb_used_qps; i++) { + ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0); + ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0); + } + + /* PF bind interrupt */ + if (rte_intr_dp_is_en(intr_handle)) { + queue_idx = 0; + record = 1; + } + + for (i = 0; i < vsi->nb_used_qps; i++) { + if (nb_msix <= 1) { + if (!rte_intr_allow_others(intr_handle)) + msix_vect = ICE_MISC_VEC_ID; + + /* uio mapping all queue to one msix_vect */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, + vsi->nb_used_qps - i); + + for (; !!record && i < vsi->nb_used_qps; i++) + intr_handle->intr_vec[queue_idx + i] = + msix_vect; + break; + } + + /* vfio 1:1 queue/msix_vect mapping */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, 1); + + if (!!record) + intr_handle->intr_vec[queue_idx + i] = msix_vect; + + msix_vect++; + nb_msix--; + } +} + +void +ice_vsi_enable_queues_intr(struct ice_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_used_qps; i++) { + msix_intr = vsi->msix_intr + i; + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), + GLINT_DYN_CTL_INTENA_M | + GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M | + GLINT_DYN_CTL_WB_ON_ITR_M); + } + else + ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), + GLINT_DYN_CTL_INTENA_M | + GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M | + GLINT_DYN_CTL_WB_ON_ITR_M); +} + +static int +ice_rxq_intr_setup(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_vsi *vsi = pf->main_vsi; + uint32_t intr_vector = 0; + + rte_intr_disable(intr_handle); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) { + PMD_DRV_LOG(ERR, "At most %d intr queues supported", + ICE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int), + 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* Map queues with MSIX interrupt */ + vsi->nb_used_qps = dev->data->nb_rx_queues; + ice_vsi_queues_bind_intr(vsi); + + /* Enable interrupts for all the queues */ + ice_vsi_enable_queues_intr(vsi); + + rte_intr_enable(intr_handle); + + return 0; +} + +static void +ice_get_init_link_status(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + struct ice_link_status link_status; + int ret; + + ret = ice_aq_get_link_info(hw->port_info, enable_lse, + &link_status, NULL); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link info"); + pf->init_link_up = false; + return; + } + + if (link_status.link_info & ICE_AQ_LINK_UP) + pf->init_link_up = true; +} + +static int +ice_dev_start(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + uint16_t nb_rxq = 0; + uint16_t nb_txq, i; + uint16_t max_frame_size; + int mask, ret; + + /* program Tx queues' context in hardware */ + for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) { + ret = ice_tx_queue_start(dev, nb_txq); + if (ret) { + PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq); + goto tx_err; + } + } + + /* program Rx queues' context in hardware*/ + for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { + ret = ice_rx_queue_start(dev, nb_rxq); + if (ret) { + PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq); + goto rx_err; + } + } + + ice_set_rx_function(dev); + ice_set_tx_function(dev); + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = ice_vlan_offload_set(dev, mask); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto rx_err; + } + + /* enable Rx interrput and mapping Rx queue to interrupt vector */ + if (ice_rxq_intr_setup(dev)) + return -EIO; + + /* Enable receiving broadcast packets and transmitting packets */ + ret = ice_set_vsi_promisc(hw, vsi->idx, + ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX | + ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX, + 0); + if (ret != ICE_SUCCESS) + PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); + + ret = ice_aq_set_event_mask(hw, hw->port_info->lport, + ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT | + ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM | + ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS | + ICE_AQ_LINK_EVENT_SIGNAL_DETECT | + ICE_AQ_LINK_EVENT_AN_COMPLETED | + ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)), + NULL); + if (ret != ICE_SUCCESS) + PMD_DRV_LOG(WARNING, "Fail to set phy mask"); + + ice_get_init_link_status(dev); + + ice_dev_set_link_up(dev); + + /* Call get_link_info aq commond to enable/disable LSE */ + ice_link_update(dev, 0); + + pf->adapter_stopped = false; + + /* Set the max frame size to default value*/ + max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ? + pf->dev_data->dev_conf.rxmode.max_rx_pkt_len : + ICE_FRAME_SIZE_MAX; + + /* Set the max frame size to HW*/ + ice_aq_set_mac_cfg(hw, max_frame_size, NULL); + + return 0; + + /* stop the started queues if failed to start all queues */ +rx_err: + for (i = 0; i < nb_rxq; i++) + ice_rx_queue_stop(dev, i); +tx_err: + for (i = 0; i < nb_txq; i++) + ice_tx_queue_stop(dev, i); + + return -EIO; +} + +static int +ice_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = ice_dev_uninit(dev); + if (ret) { + PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret); + return -ENXIO; + } + + ret = ice_dev_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret); + return -ENXIO; + } + + return 0; +} + +static int +ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); + bool is_safe_mode = pf->adapter->is_safe_mode; + u64 phy_type_low; + u64 phy_type_high; + + dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX; + dev_info->max_rx_queues = vsi->nb_qps; + dev_info->max_tx_queues = vsi->nb_qps; + dev_info->max_mac_addrs = vsi->max_macaddrs; + dev_info->max_vfs = pci_dev->max_vfs; + dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_VLAN_FILTER; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + dev_info->flow_type_rss_offloads = 0; + + if (!is_safe_mode) { + dev_info->rx_offload_capa |= + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_RSS_HASH; + dev_info->tx_offload_capa |= + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; + } + + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_queue_offload_capa = 0; + + dev_info->reta_size = pf->hash_lut_size; + dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = ICE_DEFAULT_RX_PTHRESH, + .hthresh = ICE_DEFAULT_RX_HTHRESH, + .wthresh = ICE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = ICE_DEFAULT_TX_PTHRESH, + .hthresh = ICE_DEFAULT_TX_HTHRESH, + .wthresh = ICE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = ICE_MAX_RING_DESC, + .nb_min = ICE_MIN_RING_DESC, + .nb_align = ICE_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = ICE_MAX_RING_DESC, + .nb_min = ICE_MIN_RING_DESC, + .nb_align = ICE_ALIGN_RING_DESC, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_25G; + + phy_type_low = hw->port_info->phy.phy_type_low; + phy_type_high = hw->port_info->phy.phy_type_high; + + if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low)) + dev_info->speed_capa |= ETH_LINK_SPEED_50G; + + if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) || + ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high)) + dev_info->speed_capa |= ETH_LINK_SPEED_100G; + + dev_info->nb_rx_queues = dev->data->nb_rx_queues; + dev_info->nb_tx_queues = dev->data->nb_tx_queues; + + dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST; + dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; + dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; + + return 0; +} + +static inline int +ice_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &dev->data->dev_link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline int +ice_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &dev->data->dev_link; + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static int +ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_link_status link_status; + struct rte_eth_link link, old; + int status; + unsigned int rep_cnt = MAX_REPEAT_TIME; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + + memset(&link, 0, sizeof(link)); + memset(&old, 0, sizeof(old)); + memset(&link_status, 0, sizeof(link_status)); + ice_atomic_read_link_status(dev, &old); + + do { + /* Get link status information from hardware */ + status = ice_aq_get_link_info(hw->port_info, enable_lse, + &link_status, NULL); + if (status != ICE_SUCCESS) { + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + PMD_DRV_LOG(ERR, "Failed to get link info"); + goto out; + } + + link.link_status = link_status.link_info & ICE_AQ_LINK_UP; + if (!wait_to_complete || link.link_status) + break; + + rte_delay_ms(CHECK_INTERVAL); + } while (--rep_cnt); + + if (!link.link_status) + goto out; + + /* Full-duplex operation at all supported speeds */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + /* Parse the link status */ + switch (link_status.link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + link.link_speed = ETH_SPEED_NUM_10M; + break; + case ICE_AQ_LINK_SPEED_100MB: + link.link_speed = ETH_SPEED_NUM_100M; + break; + case ICE_AQ_LINK_SPEED_1000MB: + link.link_speed = ETH_SPEED_NUM_1G; + break; + case ICE_AQ_LINK_SPEED_2500MB: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + case ICE_AQ_LINK_SPEED_5GB: + link.link_speed = ETH_SPEED_NUM_5G; + break; + case ICE_AQ_LINK_SPEED_10GB: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case ICE_AQ_LINK_SPEED_20GB: + link.link_speed = ETH_SPEED_NUM_20G; + break; + case ICE_AQ_LINK_SPEED_25GB: + link.link_speed = ETH_SPEED_NUM_25G; + break; + case ICE_AQ_LINK_SPEED_40GB: + link.link_speed = ETH_SPEED_NUM_40G; + break; + case ICE_AQ_LINK_SPEED_50GB: + link.link_speed = ETH_SPEED_NUM_50G; + break; + case ICE_AQ_LINK_SPEED_100GB: + link.link_speed = ETH_SPEED_NUM_100G; + break; + case ICE_AQ_LINK_SPEED_UNKNOWN: + default: + PMD_DRV_LOG(ERR, "Unknown link speed"); + link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + +out: + ice_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + + return 0; +} + +/* Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes, link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + */ +static enum ice_status +ice_force_phys_link_state(struct ice_hw *hw, bool link_up) +{ + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_port_info *pi; + enum ice_status status; + + if (!hw || !hw->port_info) + return ICE_ERR_PARAM; + + pi = hw->port_info; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) + goto out; + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg.phy_type_low = pcaps->phy_type_low; + cfg.phy_type_high = pcaps->phy_type_high; + cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an; + cfg.eee_cap = pcaps->eee_cap; + cfg.eeer_value = pcaps->eeer_value; + cfg.link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg.caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg.caps &= ~ICE_AQ_PHY_ENA_LINK; + + status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); + +out: + ice_free(hw, pcaps); + return status; +} + +static int +ice_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + return ice_force_phys_link_state(hw, true); +} + +static int +ice_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + return ice_force_phys_link_state(hw, false); +} + +static int +ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = pf->dev_data; + uint32_t frame_size = mtu + ICE_ETH_OVERHEAD; + + /* check if mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + if (dev_data->dev_started) { + PMD_DRV_LOG(ERR, + "port %d must be stopped before configuration", + dev_data->port_id); + return -EBUSY; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return 0; +} + +static int ice_macaddr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct ice_mac_filter *f; + uint8_t flags = 0; + int ret; + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); + return -EINVAL; + } + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) + break; + } + + if (!f) { + PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); + return -EIO; + } + + ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete mac filter"); + return -EIO; + } + ret = ice_add_mac_filter(vsi, mac_addr); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add mac filter"); + return -EIO; + } + rte_ether_addr_copy(mac_addr, &pf->dev_addr); + + flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; + ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL); + if (ret != ICE_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to set manage mac"); + + return 0; +} + +/* Add a MAC address, and update filters */ +static int +ice_macaddr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + + ret = ice_add_mac_filter(vsi, mac_addr); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter"); + return -EINVAL; + } + + return ICE_SUCCESS; +} + +/* Remove a MAC address, and update filters */ +static void +ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_dev_data *data = dev->data; + struct rte_ether_addr *macaddr; + int ret; + + macaddr = &data->mac_addrs[index]; + ret = ice_remove_mac_filter(vsi, macaddr); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to remove MAC filter"); + return; + } +} + +static int +ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (on) { + ret = ice_add_vlan_filter(vsi, vlan_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); + return -EINVAL; + } + } else { + ret = ice_remove_vlan_filter(vsi, vlan_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to remove vlan filter"); + return -EINVAL; + } + } + + return 0; +} + +/* Configure vlan filter on or off */ +static int +ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_vsi_ctx ctxt; + uint8_t sec_flags, sw_flags2; + int ret = 0; + + sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + + if (on) { + vsi->info.sec_flags |= sec_flags; + vsi->info.sw_flags2 |= sw_flags2; + } else { + vsi->info.sec_flags &= ~sec_flags; + vsi->info.sw_flags2 &= ~sw_flags2; + } + vsi->info.sw_id = hw->port_info->sw_id; + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info.valid_sections = + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | + ICE_AQ_VSI_PROP_SECURITY_VALID); + ctxt.vsi_num = vsi->vsi_id; + + ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning", + on ? "enable" : "disable"); + return -EINVAL; + } else { + vsi->info.valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID | + ICE_AQ_VSI_PROP_SECURITY_VALID); + } + + /* consist with other drivers, allow untagged packet when vlan filter on */ + if (on) + ret = ice_add_vlan_filter(vsi, 0); + else + ret = ice_remove_vlan_filter(vsi, 0); + + return 0; +} + +static int +ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct ice_vsi_ctx ctxt; + uint8_t vlan_flags; + int ret = 0; + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) { + if (on) { + if ((vsi->info.vlan_flags & + ICE_AQ_VSI_VLAN_EMOD_M) == + ICE_AQ_VSI_VLAN_EMOD_STR_BOTH) + return 0; /* already on */ + } else { + if ((vsi->info.vlan_flags & + ICE_AQ_VSI_VLAN_EMOD_M) == + ICE_AQ_VSI_VLAN_EMOD_NOTHING) + return 0; /* already off */ + } + } + + if (on) + vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; + else + vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; + vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M); + vsi->info.vlan_flags |= vlan_flags; + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info.valid_sections = + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt.vsi_num = vsi->vsi_id; + ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", + on ? "enable" : "disable"); + return -EINVAL; + } + + vsi->info.valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); + + return ret; +} + +static int +ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + ice_vsi_config_vlan_filter(vsi, true); + else + ice_vsi_config_vlan_filter(vsi, false); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + ice_vsi_config_vlan_stripping(vsi, true); + else + ice_vsi_config_vlan_stripping(vsi, false); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + ice_vsi_config_double_vlan(vsi, true); + else + ice_vsi_config_double_vlan(vsi, false); + } + + return 0; +} + +static int +ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct ice_pf *pf = ICE_VSI_TO_PF(vsi); + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int ret; + + if (!lut) + return -EINVAL; + + if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { + ret = ice_aq_get_rss_lut(hw, vsi->idx, + ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return -EINVAL; + } + } else { + uint64_t *lut_dw = (uint64_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i)); + } + + return 0; +} + +static int +ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + + if (!vsi || !lut) + return -EINVAL; + + pf = ICE_VSI_TO_PF(vsi); + hw = ICE_VSI_TO_HW(vsi); + + if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) { + ret = ice_aq_set_rss_lut(hw, vsi->idx, + ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); + return -EINVAL; + } + } else { + uint64_t *lut_dw = (uint64_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]); + + ice_flush(hw); + } + + return 0; +} + +static int +ice_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 && + reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 && + reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) { + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d)" + "doesn't match the number hardware can " + "supported (128, 512, 2048)", + reta_size); + return -EINVAL; + } + + /* It MUST use the current LUT size to get the RSS lookup table, + * otherwise if will fail with -100 error code. + */ + lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size); + if (ret) + goto out; + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size); + if (ret == 0 && lut_size != reta_size) { + PMD_DRV_LOG(INFO, + "The size of hash lookup table is changed from (%d) to (%d)", + lut_size, reta_size); + pf->hash_lut_size = reta_size; + } + +out: + rte_free(lut); + + return ret; +} + +static int +ice_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != lut_size) { + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d)" + "doesn't match the number hardware can " + "supported (%d)", + reta_size, lut_size); + return -EINVAL; + } + + lut = rte_zmalloc(NULL, reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size); + if (ret) + goto out; + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = lut[i]; + } + +out: + rte_free(lut); + + return ret; +} + +static int +ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); + return -EINVAL; + } + + struct ice_aqc_get_set_rss_keys *key_dw = + (struct ice_aqc_get_set_rss_keys *)key; + + ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ"); + ret = -EINVAL; + } + + return ret; +} + +static int +ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len) +{ + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int ret; + + if (!key || !key_len) + return -EINVAL; + + ret = ice_aq_get_rss_key + (hw, vsi->idx, + (struct ice_aqc_get_set_rss_keys *)key); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ"); + return -EINVAL; + } + *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + + return 0; +} + +static int +ice_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + enum ice_status status = ICE_SUCCESS; + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + + /* set hash key */ + status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len); + if (status) + return status; + + /* TODO: hash enable config, ice_add_rss_cfg */ + return 0; +} + +static int +ice_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + + ice_get_rss_key(vsi, rss_conf->rss_key, + &rss_conf->rss_key_len); + + /* TODO: default set to 0 as hf config is not supported now */ + rss_conf->rss_hf = 0; + return 0; +} + +static int +ice_promisc_enable(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint8_t pmask; + int ret = 0; + + pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | + ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; + + status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); + switch (status) { + case ICE_ERR_ALREADY_EXISTS: + PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled"); + case ICE_SUCCESS: + break; + default: + PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status); + ret = -EAGAIN; + } + + return ret; +} + +static int +ice_promisc_disable(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint8_t pmask; + int ret = 0; + + pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX | + ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; + + status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); + if (status != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status); + ret = -EAGAIN; + } + + return ret; +} + +static int +ice_allmulti_enable(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint8_t pmask; + int ret = 0; + + pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; + + status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0); + + switch (status) { + case ICE_ERR_ALREADY_EXISTS: + PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled"); + case ICE_SUCCESS: + break; + default: + PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status); + ret = -EAGAIN; + } + + return ret; +} + +static int +ice_allmulti_disable(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint8_t pmask; + int ret = 0; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX; + + status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0); + if (status != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status); + ret = -EAGAIN; + } + + return ret; +} + +static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t val; + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + + val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + GLINT_DYN_CTL_ITR_INDX_M; + val &= ~GLINT_DYN_CTL_WB_ON_ITR_M; + + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val); + rte_intr_ack(&pci_dev->intr_handle); + + return 0; +} + +static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + + ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M); + + return 0; +} + +static int +ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u8 ver, patch; + u16 build; + int ret; + + ver = hw->nvm.orom.major; + patch = hw->nvm.orom.patch; + build = hw->nvm.orom.build; + + ret = snprintf(fw_version, fw_size, + "%d.%d 0x%08x %d.%d.%d", + hw->nvm.major_ver, + hw->nvm.minor_ver, + hw->nvm.eetrack, + ver, build, patch); + + /* add the size of '\0' */ + ret += 1; + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static int +ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info) +{ + struct ice_hw *hw; + struct ice_vsi_ctx ctxt; + uint8_t vlan_flags = 0; + int ret; + + if (!vsi || !info) { + PMD_DRV_LOG(ERR, "invalid parameters"); + return -EINVAL; + } + + if (info->on) { + vsi->info.pvid = info->config.pvid; + /** + * If insert pvid is enabled, only tagged pkts are + * allowed to be sent out. + */ + vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_MODE_UNTAGGED; + } else { + vsi->info.pvid = 0; + if (info->config.reject.tagged == 0) + vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED; + + if (info->config.reject.untagged == 0) + vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED; + } + vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_MODE_M); + vsi->info.vlan_flags |= vlan_flags; + memset(&ctxt, 0, sizeof(ctxt)); + rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info.valid_sections = + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt.vsi_num = vsi->vsi_id; + + hw = ICE_VSI_TO_HW(vsi); + ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, + "update VSI for VLAN insert failed, err %d", + ret); + return -EINVAL; + } + + vsi->info.valid_sections |= + rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID); + + return ret; +} + +static int +ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_dev_data *data = pf->dev_data; + struct ice_vsi_vlan_pvid_info info; + int ret; + + memset(&info, 0, sizeof(info)); + info.on = on; + if (info.on) { + info.config.pvid = pvid; + } else { + info.config.reject.tagged = + data->dev_conf.txmode.hw_vlan_reject_tagged; + info.config.reject.untagged = + data->dev_conf.txmode.hw_vlan_reject_untagged; + } + + ret = ice_vsi_vlan_pvid_set(vsi, &info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to set pvid."); + return -EINVAL; + } + + return 0; +} + +static int +ice_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Convert word count to byte count */ + return hw->nvm.sr_words << 1; +} + +static int +ice_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t *data = eeprom->data; + uint16_t first_word, last_word, nwords; + enum ice_status status = ICE_SUCCESS; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->length - 1) >> 1; + nwords = last_word - first_word + 1; + + if (first_word >= hw->nvm.sr_words || + last_word >= hw->nvm.sr_words) { + PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range."); + return -EINVAL; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + status = ice_read_sr_buf(hw, first_word, &nwords, data); + if (status) { + PMD_DRV_LOG(ERR, "EEPROM read failed."); + eeprom->length = sizeof(uint16_t) * nwords; + return -EIO; + } + + return 0; +} + +static void +ice_stat_update_32(struct ice_hw *hw, + uint32_t reg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)ICE_READ_REG(hw, reg); + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = (uint64_t)(new_data - *offset); + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << ICE_32_BIT_WIDTH)) + - *offset); +} + +static void +ice_stat_update_40(struct ice_hw *hw, + uint32_t hireg, + uint32_t loreg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)ICE_READ_REG(hw, loreg); + new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) << + ICE_32_BIT_WIDTH; + + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = new_data - *offset; + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << ICE_40_BIT_WIDTH)) - + *offset); + + *stat &= ICE_40_BIT_MASK; +} + +/* Get all the statistics of a VSI */ +static void +ice_update_vsi_stats(struct ice_vsi *vsi) +{ + struct ice_eth_stats *oes = &vsi->eth_stats_offset; + struct ice_eth_stats *nes = &vsi->eth_stats; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + int idx = rte_le_to_cpu_16(vsi->vsi_id); + + ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx), + vsi->offset_loaded, &oes->rx_bytes, + &nes->rx_bytes); + ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx), + vsi->offset_loaded, &oes->rx_unicast, + &nes->rx_unicast); + ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx), + vsi->offset_loaded, &oes->rx_multicast, + &nes->rx_multicast); + ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx), + vsi->offset_loaded, &oes->rx_broadcast, + &nes->rx_broadcast); + /* exclude CRC bytes */ + nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + + nes->rx_broadcast) * RTE_ETHER_CRC_LEN; + + ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded, + &oes->rx_discards, &nes->rx_discards); + /* GLV_REPC not supported */ + /* GLV_RMPC not supported */ + ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded, + &oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx), + vsi->offset_loaded, &oes->tx_bytes, + &nes->tx_bytes); + ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx), + vsi->offset_loaded, &oes->tx_unicast, + &nes->tx_unicast); + ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx), + vsi->offset_loaded, &oes->tx_multicast, + &nes->tx_multicast); + ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx), + vsi->offset_loaded, &oes->tx_broadcast, + &nes->tx_broadcast); + /* GLV_TDPC not supported */ + ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded, + &oes->tx_errors, &nes->tx_errors); + vsi->offset_loaded = true; + + PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************", + vsi->vsi_id); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + nes->rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); + PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************", + vsi->vsi_id); +} + +static void +ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw) +{ + struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ + struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */ + + /* Get statistics of struct ice_eth_stats */ + ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport), + GLPRT_GORCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.rx_bytes, + &ns->eth.rx_bytes); + ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport), + GLPRT_UPRCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.rx_unicast, + &ns->eth.rx_unicast); + ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport), + GLPRT_MPRCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.rx_multicast, + &ns->eth.rx_multicast); + ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport), + GLPRT_BPRCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.rx_broadcast, + &ns->eth.rx_broadcast); + ice_stat_update_32(hw, PRTRPB_RDPC, + pf->offset_loaded, &os->eth.rx_discards, + &ns->eth.rx_discards); + + /* Workaround: CRC size should not be included in byte statistics, + * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx + * packet. + */ + ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + + ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN; + + /* GLPRT_REPC not supported */ + /* GLPRT_RMPC not supported */ + ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport), + pf->offset_loaded, + &os->eth.rx_unknown_protocol, + &ns->eth.rx_unknown_protocol); + ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport), + GLPRT_GOTCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.tx_bytes, + &ns->eth.tx_bytes); + ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport), + GLPRT_UPTCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.tx_unicast, + &ns->eth.tx_unicast); + ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport), + GLPRT_MPTCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.tx_multicast, + &ns->eth.tx_multicast); + ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport), + GLPRT_BPTCL(hw->port_info->lport), + pf->offset_loaded, &os->eth.tx_broadcast, + &ns->eth.tx_broadcast); + ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + + ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN; + + /* GLPRT_TEPC not supported */ + + /* additional port specific stats */ + ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport), + pf->offset_loaded, &os->tx_dropped_link_down, + &ns->tx_dropped_link_down); + ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport), + pf->offset_loaded, &os->crc_errors, + &ns->crc_errors); + ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport), + pf->offset_loaded, &os->illegal_bytes, + &ns->illegal_bytes); + /* GLPRT_ERRBC not supported */ + ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport), + pf->offset_loaded, &os->mac_local_faults, + &ns->mac_local_faults); + ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport), + pf->offset_loaded, &os->mac_remote_faults, + &ns->mac_remote_faults); + + ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport), + pf->offset_loaded, &os->rx_len_errors, + &ns->rx_len_errors); + + ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport), + pf->offset_loaded, &os->link_xon_rx, + &ns->link_xon_rx); + ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport), + pf->offset_loaded, &os->link_xoff_rx, + &ns->link_xoff_rx); + ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport), + pf->offset_loaded, &os->link_xon_tx, + &ns->link_xon_tx); + ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport), + pf->offset_loaded, &os->link_xoff_tx, + &ns->link_xoff_tx); + ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport), + GLPRT_PRC64L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_64, + &ns->rx_size_64); + ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport), + GLPRT_PRC127L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_127, + &ns->rx_size_127); + ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport), + GLPRT_PRC255L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_255, + &ns->rx_size_255); + ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport), + GLPRT_PRC511L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_511, + &ns->rx_size_511); + ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport), + GLPRT_PRC1023L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_1023, + &ns->rx_size_1023); + ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport), + GLPRT_PRC1522L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_1522, + &ns->rx_size_1522); + ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport), + GLPRT_PRC9522L(hw->port_info->lport), + pf->offset_loaded, &os->rx_size_big, + &ns->rx_size_big); + ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport), + pf->offset_loaded, &os->rx_undersize, + &ns->rx_undersize); + ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport), + pf->offset_loaded, &os->rx_fragments, + &ns->rx_fragments); + ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport), + pf->offset_loaded, &os->rx_oversize, + &ns->rx_oversize); + ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport), + pf->offset_loaded, &os->rx_jabber, + &ns->rx_jabber); + ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport), + GLPRT_PTC64L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_64, + &ns->tx_size_64); + ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport), + GLPRT_PTC127L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_127, + &ns->tx_size_127); + ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport), + GLPRT_PTC255L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_255, + &ns->tx_size_255); + ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport), + GLPRT_PTC511L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_511, + &ns->tx_size_511); + ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport), + GLPRT_PTC1023L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_1023, + &ns->tx_size_1023); + ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport), + GLPRT_PTC1522L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_1522, + &ns->tx_size_1522); + ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport), + GLPRT_PTC9522L(hw->port_info->lport), + pf->offset_loaded, &os->tx_size_big, + &ns->tx_size_big); + + /* GLPRT_MSPDC not supported */ + /* GLPRT_XEC not supported */ + + pf->offset_loaded = true; + + if (pf->main_vsi) + ice_update_vsi_stats(pf->main_vsi); +} + +/* Get all statistics of a port */ +static int +ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_hw_port_stats *ns = &pf->stats; /* new stats */ + + /* call read registers - updates values, now write them to struct */ + ice_read_stats_registers(pf, hw); + + stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + + pf->main_vsi->eth_stats.rx_multicast + + pf->main_vsi->eth_stats.rx_broadcast - + pf->main_vsi->eth_stats.rx_discards; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; + stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; + stats->obytes = ns->eth.tx_bytes; + stats->oerrors = ns->eth.tx_errors + + pf->main_vsi->eth_stats.tx_errors; + + /* Rx Errors */ + stats->imissed = ns->eth.rx_discards + + pf->main_vsi->eth_stats.rx_discards; + stats->ierrors = ns->crc_errors + + ns->rx_undersize + + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + + PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************"); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards); + PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"", + pf->main_vsi->eth_stats.rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + ns->eth.rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards); + PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"", + pf->main_vsi->eth_stats.tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); + + PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", + ns->tx_dropped_link_down); + PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); + PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", + ns->illegal_bytes); + PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); + PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", + ns->mac_local_faults); + PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", + ns->mac_remote_faults); + PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); + PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); + PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); + PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); + PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); + PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); + PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); + PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); + PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); + PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); + PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); + PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); + PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); + PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); + PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); + PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); + PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); + PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); + PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); + PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); + PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); + PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); + PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors); + PMD_DRV_LOG(DEBUG, "************* PF stats end ****************"); + return 0; +} + +/* Reset the statistics */ +static int +ice_stats_reset(struct rte_eth_dev *dev) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Mark PF and VSI stats to update the offset, aka "reset" */ + pf->offset_loaded = false; + if (pf->main_vsi) + pf->main_vsi->offset_loaded = false; + + /* read the stats, reading current register values into offset */ + ice_read_stats_registers(pf, hw); + + return 0; +} + +static uint32_t +ice_xstats_calc_num(void) +{ + uint32_t num; + + num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS; + + return num; +} + +static int +ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + unsigned int i; + unsigned int count; + struct ice_hw_port_stats *hw_stats = &pf->stats; + + count = ice_xstats_calc_num(); + if (n < count) + return count; + + ice_read_stats_registers(pf, hw); + + if (!xstats) + return 0; + + count = 0; + + /* Get stats from ice_eth_stats struct */ + for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)((char *)&hw_stats->eth + + ice_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* Get individiual stats from ice_hw_port struct */ + for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)((char *)hw_stats + + ice_hw_port_strings[i].offset); + xstats[count].id = count; + count++; + } + + return count; +} + +static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + unsigned int count = 0; + unsigned int i; + + if (!xstats_names) + return ice_xstats_calc_num(); + + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Get stats from ice_eth_stats struct */ + for (i = 0; i < ICE_NB_ETH_XSTATS; i++) { + strlcpy(xstats_names[count].name, ice_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* Get individiual stats from ice_hw_port struct */ + for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { + strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + return count; +} + +static int +ice_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ice_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Add UDP tunneling port */ +static int +ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Delete UDP tunneling port */ +static int +ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ice_adapter), + ice_dev_init); +} + +static int +ice_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit); +} + +static struct rte_pci_driver rte_ice_pmd = { + .id_table = pci_id_ice_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = ice_pci_probe, + .remove = ice_pci_remove, +}; + +/** + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI devices. + */ +RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ice, + ICE_PROTO_XTR_ARG "=[queue:]" + ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" + ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>" + ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>"); + +RTE_INIT(ice_init_log) +{ + ice_logtype_init = rte_log_register("pmd.net.ice.init"); + if (ice_logtype_init >= 0) + rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE); + ice_logtype_driver = rte_log_register("pmd.net.ice.driver"); + if (ice_logtype_driver >= 0) + rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE); + +#ifdef RTE_LIBRTE_ICE_DEBUG_RX + ice_logtype_rx = rte_log_register("pmd.net.ice.rx"); + if (ice_logtype_rx >= 0) + rte_log_set_level(ice_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_ICE_DEBUG_TX + ice_logtype_tx = rte_log_register("pmd.net.ice.tx"); + if (ice_logtype_tx >= 0) + rte_log_set_level(ice_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE + ice_logtype_tx_free = rte_log_register("pmd.net.ice.tx_free"); + if (ice_logtype_tx_free >= 0) + rte_log_set_level(ice_logtype_tx_free, RTE_LOG_DEBUG); +#endif +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_ethdev.h b/src/spdk/dpdk/drivers/net/ice/ice_ethdev.h new file mode 100644 index 000000000..f88f9dd9f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_ethdev.h @@ -0,0 +1,524 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _ICE_ETHDEV_H_ +#define _ICE_ETHDEV_H_ + +#include + +#include + +#include "base/ice_common.h" +#include "base/ice_adminq_cmd.h" + +#define ICE_VLAN_TAG_SIZE 4 + +#define ICE_ADMINQ_LEN 32 +#define ICE_SBIOQ_LEN 32 +#define ICE_MAILBOXQ_LEN 32 +#define ICE_ADMINQ_BUF_SZ 4096 +#define ICE_SBIOQ_BUF_SZ 4096 +#define ICE_MAILBOXQ_BUF_SZ 4096 +/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */ +#define ICE_MAX_Q_PER_TC 64 +#define ICE_NUM_DESC_DEFAULT 512 +#define ICE_BUF_SIZE_MIN 1024 +#define ICE_FRAME_SIZE_MAX 9728 +#define ICE_QUEUE_BASE_ADDR_UNIT 128 +/* number of VSIs and queue default setting */ +#define ICE_MAX_QP_NUM_PER_VF 16 +#define ICE_DEFAULT_QP_NUM_FDIR 1 +#define ICE_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define ICE_VFTA_SIZE (4096 / ICE_UINT32_BIT_SIZE) +/* Maximun number of MAC addresses */ +#define ICE_NUM_MACADDR_MAX 64 +/* Maximum number of VFs */ +#define ICE_MAX_VF 128 +#define ICE_MAX_INTR_QUEUE_NUM 256 + +#define ICE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define ICE_RX_VEC_ID RTE_INTR_VEC_RXTX_OFFSET + +#define ICE_MAX_PKT_TYPE 1024 + +/* DDP package search path */ +#define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/" +#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/" +#define ICE_MAX_PKG_FILENAME_SIZE 256 + +/** + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define ICE_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define ICE_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +/* Default TC traffic in case DCB is not enabled */ +#define ICE_DEFAULT_TCMAP 0x1 +#define ICE_FDIR_QUEUE_ID 0 + +/* Always assign pool 0 to main VSI, VMDQ will start from 1 */ +#define ICE_VMDQ_POOL_BASE 1 + +#define ICE_DEFAULT_RX_FREE_THRESH 32 +#define ICE_DEFAULT_RX_PTHRESH 8 +#define ICE_DEFAULT_RX_HTHRESH 8 +#define ICE_DEFAULT_RX_WTHRESH 0 + +#define ICE_DEFAULT_TX_FREE_THRESH 32 +#define ICE_DEFAULT_TX_PTHRESH 32 +#define ICE_DEFAULT_TX_HTHRESH 0 +#define ICE_DEFAULT_TX_WTHRESH 0 +#define ICE_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define ICE_4_BIT_WIDTH (CHAR_BIT / 2) +#define ICE_4_BIT_MASK RTE_LEN2MASK(ICE_4_BIT_WIDTH, uint8_t) +#define ICE_8_BIT_WIDTH CHAR_BIT +#define ICE_8_BIT_MASK UINT8_MAX +#define ICE_16_BIT_WIDTH (CHAR_BIT * 2) +#define ICE_16_BIT_MASK UINT16_MAX +#define ICE_32_BIT_WIDTH (CHAR_BIT * 4) +#define ICE_32_BIT_MASK UINT32_MAX +#define ICE_40_BIT_WIDTH (CHAR_BIT * 5) +#define ICE_40_BIT_MASK RTE_LEN2MASK(ICE_40_BIT_WIDTH, uint64_t) +#define ICE_48_BIT_WIDTH (CHAR_BIT * 6) +#define ICE_48_BIT_MASK RTE_LEN2MASK(ICE_48_BIT_WIDTH, uint64_t) + +#define ICE_FLAG_RSS BIT_ULL(0) +#define ICE_FLAG_DCB BIT_ULL(1) +#define ICE_FLAG_VMDQ BIT_ULL(2) +#define ICE_FLAG_SRIOV BIT_ULL(3) +#define ICE_FLAG_HEADER_SPLIT_DISABLED BIT_ULL(4) +#define ICE_FLAG_HEADER_SPLIT_ENABLED BIT_ULL(5) +#define ICE_FLAG_FDIR BIT_ULL(6) +#define ICE_FLAG_VXLAN BIT_ULL(7) +#define ICE_FLAG_RSS_AQ_CAPABLE BIT_ULL(8) +#define ICE_FLAG_VF_MAC_BY_PF BIT_ULL(9) +#define ICE_FLAG_ALL (ICE_FLAG_RSS | \ + ICE_FLAG_DCB | \ + ICE_FLAG_VMDQ | \ + ICE_FLAG_SRIOV | \ + ICE_FLAG_HEADER_SPLIT_DISABLED | \ + ICE_FLAG_HEADER_SPLIT_ENABLED | \ + ICE_FLAG_FDIR | \ + ICE_FLAG_VXLAN | \ + ICE_FLAG_RSS_AQ_CAPABLE | \ + ICE_FLAG_VF_MAC_BY_PF) + +#define ICE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + ETH_RSS_NONFRAG_IPV6_OTHER | \ + ETH_RSS_L2_PAYLOAD) + +/** + * The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define ICE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2) + +/* DDP package type */ +enum ice_pkg_type { + ICE_PKG_TYPE_UNKNOWN, + ICE_PKG_TYPE_OS_DEFAULT, + ICE_PKG_TYPE_COMMS, +}; + +struct ice_adapter; + +/** + * MAC filter structure + */ +struct ice_mac_filter_info { + struct rte_ether_addr mac_addr; +}; + +TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter); + +/* MAC filter list structure */ +struct ice_mac_filter { + TAILQ_ENTRY(ice_mac_filter) next; + struct ice_mac_filter_info mac_info; +}; + +/** + * VLAN filter structure + */ +struct ice_vlan_filter_info { + uint16_t vlan_id; +}; + +TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter); + +/* VLAN filter list structure */ +struct ice_vlan_filter { + TAILQ_ENTRY(ice_vlan_filter) next; + struct ice_vlan_filter_info vlan_info; +}; + +struct pool_entry { + LIST_ENTRY(pool_entry) next; + uint16_t base; + uint16_t len; +}; + +LIST_HEAD(res_list, pool_entry); + +struct ice_res_pool_info { + uint32_t base; /* Resource start index */ + uint32_t num_alloc; /* Allocated resource number */ + uint32_t num_free; /* Total available resource number */ + struct res_list alloc_list; /* Allocated resource list */ + struct res_list free_list; /* Available resource list */ +}; + +TAILQ_HEAD(ice_vsi_list_head, ice_vsi_list); + +struct ice_vsi; + +/* VSI list structure */ +struct ice_vsi_list { + TAILQ_ENTRY(ice_vsi_list) list; + struct ice_vsi *vsi; +}; + +struct ice_rx_queue; +struct ice_tx_queue; + +/** + * Structure that defines a VSI, associated with a adapter. + */ +struct ice_vsi { + struct ice_adapter *adapter; /* Backreference to associated adapter */ + struct ice_aqc_vsi_props info; /* VSI properties */ + /** + * When drivers loaded, only a default main VSI exists. In case new VSI + * needs to add, HW needs to know the layout that VSIs are organized. + * Besides that, VSI isan element and can't switch packets, which needs + * to add new component VEB to perform switching. So, a new VSI needs + * to specify the the uplink VSI (Parent VSI) before created. The + * uplink VSI will check whether it had a VEB to switch packets. If no, + * it will try to create one. Then, uplink VSI will move the new VSI + * into its' sib_vsi_list to manage all the downlink VSI. + * sib_vsi_list: the VSI list that shared the same uplink VSI. + * parent_vsi : the uplink VSI. It's NULL for main VSI. + * veb : the VEB associates with the VSI. + */ + struct ice_vsi_list sib_vsi_list; /* sibling vsi list */ + struct ice_vsi *parent_vsi; + enum ice_vsi_type type; /* VSI types */ + uint16_t vlan_num; /* Total VLAN number */ + uint16_t mac_num; /* Total mac number */ + struct ice_mac_filter_list mac_list; /* macvlan filter list */ + struct ice_vlan_filter_list vlan_list; /* vlan filter list */ + uint16_t nb_qps; /* Number of queue pairs VSI can occupy */ + uint16_t nb_used_qps; /* Number of queue pairs VSI uses */ + uint16_t max_macaddrs; /* Maximum number of MAC addresses */ + uint16_t base_queue; /* The first queue index of this VSI */ + uint16_t vsi_id; /* Hardware Id */ + uint16_t idx; /* vsi_handle: SW index in hw->vsi_ctx */ + /* VF number to which the VSI connects, valid when VSI is VF type */ + uint8_t vf_num; + uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ + uint16_t nb_msix; /* The max number of msix vector */ + uint8_t enabled_tc; /* The traffic class enabled */ + uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */ + uint8_t vlan_filter_on; /* The VLAN filter enabled */ + /* information about rss configuration */ + u32 rss_key_size; + u32 rss_lut_size; + uint8_t *rss_lut; + uint8_t *rss_key; + struct ice_eth_stats eth_stats_offset; + struct ice_eth_stats eth_stats; + bool offset_loaded; +}; + +enum proto_xtr_type { + PROTO_XTR_NONE, + PROTO_XTR_VLAN, + PROTO_XTR_IPV4, + PROTO_XTR_IPV6, + PROTO_XTR_IPV6_FLOW, + PROTO_XTR_TCP, +}; + +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_VXLAN, + ICE_FDIR_TUNNEL_TYPE_GTPU, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH, +}; + +struct rte_flow; +TAILQ_HEAD(ice_flow_list, rte_flow); + +struct ice_flow_parser_node; +TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); + +struct ice_fdir_filter_conf { + struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type tunnel_type; + + struct ice_fdir_counter *counter; /* flow specific counter context */ + struct rte_flow_action_count act_count; + + uint64_t input_set; +}; + +#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) + +struct ice_fdir_fltr_pattern { + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; + + enum ice_fdir_tunnel_type tunnel_type; +}; + +#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 +#define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32 +#define ICE_FDIR_COUNTERS_PER_BLOCK 256 +#define ICE_FDIR_COUNTER_INDEX(base_idx) \ + ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK) +struct ice_fdir_counter_pool; + +struct ice_fdir_counter { + TAILQ_ENTRY(ice_fdir_counter) next; + struct ice_fdir_counter_pool *pool; + uint8_t shared; + uint32_t ref_cnt; + uint32_t id; + uint64_t hits; + uint64_t bytes; + uint32_t hw_index; +}; + +TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter); + +struct ice_fdir_counter_pool { + TAILQ_ENTRY(ice_fdir_counter_pool) next; + struct ice_fdir_counter_list counter_list; + struct ice_fdir_counter counters[0]; +}; + +TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool); + +struct ice_fdir_counter_pool_container { + struct ice_fdir_counter_pool_list pool_list; + struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE]; + uint8_t index_free; +}; + +/** + * A structure used to define fields of a FDIR related info. + */ +struct ice_fdir_info { + struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + const struct rte_memzone *mz; + struct ice_fdir_filter_conf conf; + + struct ice_fdir_filter_conf **hash_map; + struct rte_hash *hash_table; + + struct ice_fdir_counter_pool_container counter; +}; + +struct ice_pf { + struct ice_adapter *adapter; /* The adapter this PF associate to */ + struct ice_vsi *main_vsi; /* pointer to main VSI structure */ + /* Used for next free software vsi idx. + * To save the effort, we don't recycle the index. + * Suppose the indexes are more than enough. + */ + uint16_t next_vsi_idx; + uint16_t vsis_allocated; + uint16_t vsis_unallocated; + struct ice_res_pool_info qp_pool; /*Queue pair pool */ + struct ice_res_pool_info msix_pool; /* MSIX interrupt pool */ + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ + struct rte_ether_addr dev_addr; /* PF device mac address */ + uint64_t flags; /* PF feature flags */ + uint16_t hash_lut_size; /* The size of hash lookup table */ + uint16_t lan_nb_qp_max; + uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ + uint16_t base_queue; /* The base queue pairs index in the device */ + uint8_t *proto_xtr; /* Protocol extraction type for all queues */ + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + struct ice_fdir_info fdir; /* flow director info */ + uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + struct ice_hw_port_stats stats_offset; + struct ice_hw_port_stats stats; + /* internal packet statistics, it should be excluded from the total */ + struct ice_eth_stats internal_stats_offset; + struct ice_eth_stats internal_stats; + bool offset_loaded; + bool adapter_stopped; + struct ice_flow_list flow_list; + rte_spinlock_t flow_ops_lock; + struct ice_parser_list rss_parser_list; + struct ice_parser_list perm_parser_list; + struct ice_parser_list dist_parser_list; + bool init_link_up; +}; + +#define ICE_MAX_QUEUE_NUM 2048 + +/** + * Cache devargs parse result. + */ +struct ice_devargs { + int safe_mode_support; + uint8_t proto_xtr_dflt; + int pipe_mode_support; + int flow_mark_support; + uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; +}; + +/** + * Structure to store private data for each PF/VF instance. + */ +struct ice_adapter { + /* Common for both PF and VF */ + struct ice_hw hw; + struct rte_eth_dev *eth_dev; + struct ice_pf pf; + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_vec_allowed; + bool tx_simple_allowed; + /* ptype mapping table */ + uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned; + bool is_safe_mode; + struct ice_devargs devargs; + enum ice_pkg_type active_pkg_type; /* loaded ddp package type */ +}; + +struct ice_vsi_vlan_pvid_info { + uint16_t on; /* Enable or disable pvid */ + union { + uint16_t pvid; /* Valid in case 'on' is set to set pvid */ + struct { + /* Valid in case 'on' is cleared. 'tagged' will reject + * tagged packets, while 'untagged' will reject + * untagged packets. + */ + uint8_t tagged; + uint8_t untagged; + } reject; + } config; +}; + +#define ICE_DEV_TO_PCI(eth_dev) \ + RTE_DEV_TO_PCI((eth_dev)->device) + +/* ICE_DEV_PRIVATE_TO */ +#define ICE_DEV_PRIVATE_TO_PF(adapter) \ + (&((struct ice_adapter *)adapter)->pf) +#define ICE_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct ice_adapter *)adapter)->hw) +#define ICE_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct ice_adapter *)adapter) + +/* ICE_VSI_TO */ +#define ICE_VSI_TO_HW(vsi) \ + (&(((struct ice_vsi *)vsi)->adapter->hw)) +#define ICE_VSI_TO_PF(vsi) \ + (&(((struct ice_vsi *)vsi)->adapter->pf)) +#define ICE_VSI_TO_ETH_DEV(vsi) \ + (((struct ice_vsi *)vsi)->adapter->eth_dev) + +/* ICE_PF_TO */ +#define ICE_PF_TO_HW(pf) \ + (&(((struct ice_pf *)pf)->adapter->hw)) +#define ICE_PF_TO_ADAPTER(pf) \ + ((struct ice_adapter *)(pf)->adapter) +#define ICE_PF_TO_ETH_DEV(pf) \ + (((struct ice_pf *)pf)->adapter->eth_dev) + +enum ice_pkg_type ice_load_pkg_type(struct ice_hw *hw); +struct ice_vsi * +ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); +int +ice_release_vsi(struct ice_vsi *vsi); +void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); + +static inline int +ice_align_floor(int n) +{ + if (n == 0) + return 0; + return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); +} + +#define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1)) + +#define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR)) + +#define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2)) + +#endif /* _ICE_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c b/src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c new file mode 100644 index 000000000..69c714c59 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c @@ -0,0 +1,2013 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include "base/ice_fdir.h" +#include "base/ice_flow.h" +#include "base/ice_type.h" +#include "ice_ethdev.h" +#include "ice_rxtx.h" +#include "ice_generic_flow.h" + +#define ICE_FDIR_IPV6_TC_OFFSET 20 +#define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET) + +#define ICE_FDIR_MAX_QREGION_SIZE 128 + +#define ICE_FDIR_INSET_ETH_IPV4 (\ + ICE_INSET_DMAC | \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO) + +#define ICE_FDIR_INSET_ETH_IPV4_UDP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV4_TCP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6 (\ + ICE_INSET_DMAC | \ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR) + +#define ICE_FDIR_INSET_ETH_IPV6_UDP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6_TCP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4 (\ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST) + +#define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT) + +#define ICE_FDIR_INSET_GTPU (\ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID) + +#define ICE_FDIR_INSET_GTPU_EH (\ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI) + +static struct ice_pattern_match_item ice_fdir_pattern_os[] = { + {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, +}; + +static struct ice_pattern_match_item ice_fdir_pattern_comms[] = { + {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_GTPU, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_GTPU_EH, ICE_INSET_NONE}, +}; + +static struct ice_flow_parser ice_fdir_parser_os; +static struct ice_flow_parser ice_fdir_parser_comms; + +static int +ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type); + +static const struct rte_memzone * +ice_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, + ICE_RING_BASE_ALIGN); +} + +#define ICE_FDIR_MZ_NAME "FDIR_MEMZONE" + +static int +ice_fdir_prof_alloc(struct ice_hw *hw) +{ + enum ice_fltr_ptype ptype, fltr_ptype; + + if (!hw->fdir_prof) { + hw->fdir_prof = (struct ice_fd_hw_prof **) + ice_malloc(hw, ICE_FLTR_PTYPE_MAX * + sizeof(*hw->fdir_prof)); + if (!hw->fdir_prof) + return -ENOMEM; + } + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) { + if (!hw->fdir_prof[ptype]) { + hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *) + ice_malloc(hw, sizeof(**hw->fdir_prof)); + if (!hw->fdir_prof[ptype]) + goto fail_mem; + } + } + return 0; + +fail_mem: + for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; + fltr_ptype < ptype; + fltr_ptype++) { + rte_free(hw->fdir_prof[fltr_ptype]); + hw->fdir_prof[fltr_ptype] = NULL; + } + + rte_free(hw->fdir_prof); + hw->fdir_prof = NULL; + + return -ENOMEM; +} + +static int +ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter_pool_container *container, + uint32_t index_start, + uint32_t len) +{ + struct ice_fdir_counter_pool *pool; + uint32_t i; + int ret = 0; + + pool = rte_zmalloc("ice_fdir_counter_pool", + sizeof(*pool) + + sizeof(struct ice_fdir_counter) * len, + 0); + if (!pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir counter pool"); + return -ENOMEM; + } + + TAILQ_INIT(&pool->counter_list); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + + for (i = 0; i < len; i++) { + struct ice_fdir_counter *counter = &pool->counters[i]; + + counter->hw_index = index_start + i; + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } + + if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) { + PMD_INIT_LOG(ERR, "FDIR counter pool is full"); + ret = -EINVAL; + goto free_pool; + } + + container->pools[container->index_free++] = pool; + return 0; + +free_pool: + rte_free(pool); + return ret; +} + +static int +ice_fdir_counter_init(struct ice_pf *pf) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint32_t cnt_index, len; + int ret; + + TAILQ_INIT(&container->pool_list); + + cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base); + len = ICE_FDIR_COUNTERS_PER_BLOCK; + + ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to add fdir pool to container"); + return ret; + } + + return 0; +} + +static int +ice_fdir_counter_release(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint8_t i; + + for (i = 0; i < container->index_free; i++) { + rte_free(container->pools[i]); + container->pools[i] = NULL; + } + + TAILQ_INIT(&container->pool_list); + container->index_free = 0; + + return 0; +} + +static struct ice_fdir_counter * +ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container + *container, + uint32_t id) +{ + struct ice_fdir_counter_pool *pool; + struct ice_fdir_counter *counter; + int i; + + TAILQ_FOREACH(pool, &container->pool_list, next) { + for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) { + counter = &pool->counters[i]; + + if (counter->shared && + counter->ref_cnt && + counter->id == id) + return counter; + } + } + + return NULL; +} + +static struct ice_fdir_counter * +ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + struct ice_fdir_counter_pool *pool = NULL; + struct ice_fdir_counter *counter_free = NULL; + + if (shared) { + counter_free = ice_fdir_counter_shared_search(container, id); + if (counter_free) { + if (counter_free->ref_cnt + 1 == 0) { + rte_errno = E2BIG; + return NULL; + } + counter_free->ref_cnt++; + return counter_free; + } + } + + TAILQ_FOREACH(pool, &container->pool_list, next) { + counter_free = TAILQ_FIRST(&pool->counter_list); + if (counter_free) + break; + counter_free = NULL; + } + + if (!counter_free) { + PMD_DRV_LOG(ERR, "No free counter found\n"); + return NULL; + } + + counter_free->shared = shared; + counter_free->id = id; + counter_free->ref_cnt = 1; + counter_free->pool = pool; + + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0); + + TAILQ_REMOVE(&pool->counter_list, counter_free, next); + if (TAILQ_EMPTY(&pool->counter_list)) { + TAILQ_REMOVE(&container->pool_list, pool, next); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + } + + return counter_free; +} + +static void +ice_fdir_counter_free(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter *counter) +{ + if (!counter) + return; + + if (--counter->ref_cnt == 0) { + struct ice_fdir_counter_pool *pool = counter->pool; + + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } +} + +static int +ice_fdir_init_filter_list(struct ice_pf *pf) +{ + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct ice_fdir_info *fdir_info = &pf->fdir; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = ICE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct ice_fdir_fltr_pattern), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE, + }; + + /* Initialize hash */ + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", dev->device->name); + fdir_info->hash_table = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map", + sizeof(*fdir_info->hash_map) * + ICE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + ret = -ENOMEM; + goto err_fdir_hash_map_alloc; + } + return 0; + +err_fdir_hash_map_alloc: + rte_hash_free(fdir_info->hash_table); + + return ret; +} + +static void +ice_fdir_release_filter_list(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_table) + rte_hash_free(fdir_info->hash_table); + + fdir_info->hash_map = NULL; + fdir_info->hash_table = NULL; +} + +/* + * ice_fdir_setup - reserve and initialize the Flow Director resources + * @pf: board private structure + */ +static int +ice_fdir_setup(struct ice_pf *pf) +{ + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + struct ice_vsi *vsi; + int err = ICE_SUCCESS; + + if ((pf->flags & ICE_FLAG_FDIR) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); + return -ENOTSUP; + } + + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u," + " fd_fltr_best_effort = %u.", + hw->func_caps.fd_fltr_guar, + hw->func_caps.fd_fltr_best_effort); + + if (pf->fdir.fdir_vsi) { + PMD_DRV_LOG(INFO, "FDIR initialization has been done."); + return ICE_SUCCESS; + } + + /* make new FDIR VSI */ + vsi = ice_setup_vsi(pf, ICE_VSI_CTRL); + if (!vsi) { + PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); + return -EINVAL; + } + pf->fdir.fdir_vsi = vsi; + + err = ice_fdir_init_filter_list(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR filter list."); + return -EINVAL; + } + + err = ice_fdir_counter_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR counter."); + return -EINVAL; + } + + /*Fdir tx queue setup*/ + err = ice_fdir_setup_tx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); + goto fail_setup_tx; + } + + /*Fdir rx queue setup*/ + err = ice_fdir_setup_rx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); + goto fail_setup_rx; + } + + err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue."); + goto fail_mem; + } + + err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue."); + goto fail_mem; + } + + /* Enable FDIR MSIX interrupt */ + vsi->nb_used_qps = 1; + ice_vsi_queues_bind_intr(vsi); + ice_vsi_enable_queues_intr(vsi); + + /* reserve memory for the fdir programming packet */ + snprintf(z_name, sizeof(z_name), "ICE_%s_%d", + ICE_FDIR_MZ_NAME, + eth_dev->data->port_id); + mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY); + if (!mz) { + PMD_DRV_LOG(ERR, "Cannot init memzone for " + "flow director program packet."); + err = -ENOMEM; + goto fail_mem; + } + pf->fdir.prg_pkt = mz->addr; + pf->fdir.dma_addr = mz->iova; + pf->fdir.mz = mz; + + err = ice_fdir_prof_alloc(hw); + if (err) { + PMD_DRV_LOG(ERR, "Cannot allocate memory for " + "flow director profile."); + err = -ENOMEM; + goto fail_prof; + } + + PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", + vsi->base_queue); + return ICE_SUCCESS; + +fail_prof: + rte_memzone_free(pf->fdir.mz); + pf->fdir.mz = NULL; +fail_mem: + ice_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; +fail_setup_rx: + ice_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; +fail_setup_tx: + ice_release_vsi(vsi); + pf->fdir.fdir_vsi = NULL; + return err; +} + +static void +ice_fdir_prof_free(struct ice_hw *hw) +{ + enum ice_fltr_ptype ptype; + + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) { + rte_free(hw->fdir_prof[ptype]); + hw->fdir_prof[ptype] = NULL; + } + + rte_free(hw->fdir_prof); + hw->fdir_prof = NULL; +} + +/* Remove a profile for some filter type */ +static void +ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fd_hw_prof *hw_prof; + uint64_t prof_id; + uint16_t vsi_num; + int i; + + if (!hw->fdir_prof || !hw->fdir_prof[ptype]) + return; + + hw_prof = hw->fdir_prof[ptype]; + + prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX; + for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) { + if (hw_prof->entry_h[i][is_tunnel]) { + vsi_num = ice_get_hw_vsi_num(hw, + hw_prof->vsi_h[i]); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, + vsi_num, ptype); + ice_flow_rem_entry(hw, ICE_BLK_FD, + hw_prof->entry_h[i][is_tunnel]); + hw_prof->entry_h[i][is_tunnel] = 0; + } + } + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + rte_free(hw_prof->fdir_seg[is_tunnel]); + hw_prof->fdir_seg[is_tunnel] = NULL; + + for (i = 0; i < hw_prof->cnt; i++) + hw_prof->vsi_h[i] = 0; + pf->hw_prof_cnt[ptype][is_tunnel] = 0; +} + +/* Remove all created profiles */ +static void +ice_fdir_prof_rm_all(struct ice_pf *pf) +{ + enum ice_fltr_ptype ptype; + + for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) { + ice_fdir_prof_rm(pf, ptype, false); + ice_fdir_prof_rm(pf, ptype, true); + } +} + +/* + * ice_fdir_teardown - release the Flow Director resources + * @pf: board private structure + */ +static void +ice_fdir_teardown(struct ice_pf *pf) +{ + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi; + int err; + + vsi = pf->fdir.fdir_vsi; + if (!vsi) + return; + + ice_vsi_disable_queues_intr(vsi); + + err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id); + if (err) + PMD_DRV_LOG(ERR, "Failed to stop TX queue."); + + err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id); + if (err) + PMD_DRV_LOG(ERR, "Failed to stop RX queue."); + + err = ice_fdir_counter_release(pf); + if (err) + PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource."); + + ice_fdir_release_filter_list(pf); + + ice_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; + ice_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; + ice_fdir_prof_rm_all(pf); + ice_fdir_prof_free(hw); + ice_release_vsi(vsi); + pf->fdir.fdir_vsi = NULL; + + if (pf->fdir.mz) { + err = rte_memzone_free(pf->fdir.mz); + pf->fdir.mz = NULL; + if (err) + PMD_DRV_LOG(ERR, "Failed to free FDIR memezone."); + } +} + +static int +ice_fdir_cur_prof_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + struct ice_flow_seg_info *seg, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_flow_seg_info *ori_seg; + struct ice_fd_hw_prof *hw_prof; + + hw_prof = hw->fdir_prof[ptype]; + ori_seg = hw_prof->fdir_seg[is_tunnel]; + + /* profile does not exist */ + if (!ori_seg) + return 0; + + /* if no input set conflict, return -EEXIST */ + if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) || + (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) { + PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.", + ptype); + return -EEXIST; + } + + /* a rule with input set conflict already exist, so give up */ + if (pf->fdir_fltr_cnt[ptype][is_tunnel]) { + PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.", + ptype); + return -EINVAL; + } + + /* it's safe to delete an empty profile */ + ice_fdir_prof_rm(pf, ptype, is_tunnel); + return 0; +} + +static bool +ice_fdir_prof_resolve_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fd_hw_prof *hw_prof; + struct ice_flow_seg_info *seg; + + hw_prof = hw->fdir_prof[ptype]; + seg = hw_prof->fdir_seg[is_tunnel]; + + /* profile does not exist */ + if (!seg) + return true; + + /* profile exists and rule exists, fail to resolve the conflict */ + if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0) + return false; + + /* it's safe to delete an empty profile */ + ice_fdir_prof_rm(pf, ptype, is_tunnel); + + return true; +} + +static int +ice_fdir_cross_prof_conflict(struct ice_pf *pf, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + enum ice_fltr_ptype cflct_ptype; + + switch (ptype) { + /* IPv4 */ + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + /* IPv4 GTPU */ + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + /* IPv6 */ + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + if (!ice_fdir_prof_resolve_conflict + (pf, cflct_ptype, is_tunnel)) + goto err; + break; + default: + break; + } + return 0; +err: + PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.", + ptype, cflct_ptype); + return -EINVAL; +} + +static int +ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, + struct ice_vsi *ctrl_vsi, + struct ice_flow_seg_info *seg, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + enum ice_flow_dir dir = ICE_FLOW_RX; + struct ice_fd_hw_prof *hw_prof; + struct ice_flow_prof *prof; + uint64_t entry_1 = 0; + uint64_t entry_2 = 0; + uint16_t vsi_num; + int ret; + uint64_t prof_id; + + /* check if have input set conflict on current profile. */ + ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel); + if (ret) + return ret; + + /* check if the profile is conflict with other profile. */ + ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel); + if (ret) + return ret; + + prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX; + ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg, + (is_tunnel) ? 2 : 1, NULL, 0, &prof); + if (ret) + return ret; + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx, + vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry_1); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.", + ptype); + goto err_add_prof; + } + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry_2); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.", + ptype); + goto err_add_entry; + } + + hw_prof = hw->fdir_prof[ptype]; + pf->hw_prof_cnt[ptype][is_tunnel] = 0; + hw_prof->cnt = 0; + hw_prof->fdir_seg[is_tunnel] = seg; + hw_prof->vsi_h[hw_prof->cnt] = vsi->idx; + hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1; + pf->hw_prof_cnt[ptype][is_tunnel]++; + hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx; + hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2; + pf->hw_prof_cnt[ptype][is_tunnel]++; + + return ret; + +err_add_entry: + vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1); +err_add_prof: + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + + return ret; +} + +static void +ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) +{ + uint32_t i, j; + + struct ice_inset_map { + uint64_t inset; + enum ice_flow_field fld; + }; + static const struct ice_inset_map ice_inset_map[] = { + {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA}, + {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, + {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, + {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP}, + {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL}, + {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT}, + {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA}, + {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA}, + {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP}, + {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT}, + {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL}, + {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT}, + {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT}, + {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, + {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, + {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, + {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, + {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, + {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT}, + {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT}, + {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, + {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, + {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, + {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID}, + {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI}, + }; + + for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) { + if ((inset & ice_inset_map[i].inset) == + ice_inset_map[i].inset) + field[j++] = ice_inset_map[i].fld; + } +} + +static int +ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, + uint64_t input_set, enum ice_fdir_tunnel_type ttype) +{ + struct ice_flow_seg_info *seg; + struct ice_flow_seg_info *seg_tun = NULL; + enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX]; + bool is_tunnel; + int i, ret; + + if (!input_set) + return -EINVAL; + + seg = (struct ice_flow_seg_info *) + ice_malloc(hw, sizeof(*seg)); + if (!seg) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++) + field[i] = ICE_FLOW_FIELD_IDX_MAX; + ice_fdir_input_set_parse(input_set, field); + + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: + case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: + if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4); + else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_IPV4); + else + PMD_DRV_LOG(ERR, "not supported tunnel type."); + break; + default: + PMD_DRV_LOG(ERR, "not supported filter type."); + break; + } + + for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) { + ice_flow_set_fld(seg, field[i], + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + is_tunnel = ice_fdir_is_tunnel_profile(ttype); + if (!is_tunnel) { + ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, + seg, flow, false); + } else { + seg_tun = (struct ice_flow_seg_info *) + ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX); + if (!seg_tun) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + rte_free(seg); + return -ENOMEM; + } + rte_memcpy(&seg_tun[1], seg, sizeof(*seg)); + ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, + seg_tun, flow, true); + } + + if (!ret) { + return ret; + } else if (ret < 0) { + rte_free(seg); + if (is_tunnel) + rte_free(seg_tun); + return (ret == -EEXIST) ? 0 : ret; + } else { + return ret; + } +} + +static void +ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype, + bool is_tunnel, bool add) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int cnt; + + cnt = (add) ? 1 : -1; + hw->fdir_active_fltr += cnt; + if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX) + PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype); + else + pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt; +} + +static int +ice_fdir_init(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + struct ice_flow_parser *parser; + int ret; + + if (ad->hw.dcf_enabled) + return 0; + + ret = ice_fdir_setup(pf); + if (ret) + return ret; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + parser = &ice_fdir_parser_comms; + else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) + parser = &ice_fdir_parser_os; + else + return -EINVAL; + + return ice_register_parser(parser, ad); +} + +static void +ice_fdir_uninit(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + struct ice_flow_parser *parser; + + if (ad->hw.dcf_enabled) + return; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + parser = &ice_fdir_parser_comms; + else + parser = &ice_fdir_parser_os; + + ice_unregister_parser(parser, ad); + + ice_fdir_teardown(pf); +} + +static int +ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type) +{ + if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN) + return 1; + else + return 0; +} + +static int +ice_fdir_add_del_filter(struct ice_pf *pf, + struct ice_fdir_filter_conf *filter, + bool add) +{ + struct ice_fltr_desc desc; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + bool is_tun; + int ret; + + filter->input.dest_vsi = pf->main_vsi->idx; + + memset(&desc, 0, sizeof(desc)); + ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add); + + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + + memset(pkt, 0, ICE_FDIR_PKT_LEN); + ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun); + if (ret) { + PMD_DRV_LOG(ERR, "Generate dummy packet failed"); + return -EINVAL; + } + + return ice_fdir_programming(pf, &desc); +} + +static void +ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key, + struct ice_fdir_filter_conf *filter) +{ + struct ice_fdir_fltr *input = &filter->input; + memset(key, 0, sizeof(*key)); + + key->flow_type = input->flow_type; + rte_memcpy(&key->ip, &input->ip, sizeof(key->ip)); + rte_memcpy(&key->mask, &input->mask, sizeof(key->mask)); + rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data)); + rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask)); + + rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data)); + rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask)); + + key->tunnel_type = filter->tunnel_type; +} + +/* Check if there exists the flow director filter */ +static struct ice_fdir_filter_conf * +ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info, + const struct ice_fdir_fltr_pattern *key) +{ + int ret; + + ret = rte_hash_lookup(fdir_info->hash_table, key); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +/* Add a flow director entry into the SW list */ +static int +ice_fdir_entry_insert(struct ice_pf *pf, + struct ice_fdir_filter_conf *entry, + struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_add_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir entry to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = entry; + + return 0; +} + +/* Delete a flow director entry from the SW list */ +static int +ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_del_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete fdir filter to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = NULL; + + return 0; +} + +static int +ice_fdir_create_filter(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_fdir_filter_conf *filter = meta; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *entry, *node; + struct ice_fdir_fltr_pattern key; + bool is_tun; + int ret; + + ice_fdir_extract_fltr_key(&key, filter); + node = ice_fdir_entry_lookup(fdir_info, &key); + if (node) { + rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Rule already exists!"); + return -rte_errno; + } + + entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0); + if (!entry) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return -rte_errno; + } + + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + + ret = ice_fdir_input_set_conf(pf, filter->input.flow_type, + filter->input_set, filter->tunnel_type); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Profile configure failed."); + goto free_entry; + } + + /* alloc counter for FDIR */ + if (filter->input.cnt_ena) { + struct rte_flow_action_count *act_count = &filter->act_count; + + filter->counter = ice_fdir_counter_alloc(pf, + act_count->shared, + act_count->id); + if (!filter->counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to alloc FDIR counter."); + goto free_entry; + } + filter->input.cnt_index = filter->counter->hw_index; + } + + ret = ice_fdir_add_del_filter(pf, filter, true); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Add filter rule failed."); + goto free_counter; + } + + rte_memcpy(entry, filter, sizeof(*entry)); + ret = ice_fdir_entry_insert(pf, entry, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Insert entry to table failed."); + goto free_entry; + } + + flow->rule = entry; + ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true); + + return 0; + +free_counter: + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + +free_entry: + rte_free(entry); + return -rte_errno; +} + +static int +ice_fdir_destroy_filter(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *filter, *entry; + struct ice_fdir_fltr_pattern key; + bool is_tun; + int ret; + + filter = (struct ice_fdir_filter_conf *)flow->rule; + + is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type); + + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + + ice_fdir_extract_fltr_key(&key, filter); + entry = ice_fdir_entry_lookup(fdir_info, &key); + if (!entry) { + rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Can't find entry."); + return -rte_errno; + } + + ret = ice_fdir_add_del_filter(pf, filter, false); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Del filter rule failed."); + return -rte_errno; + } + + ret = ice_fdir_entry_del(pf, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Remove entry from table failed."); + return -rte_errno; + } + + ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false); + flow->rule = NULL; + + rte_free(filter); + + return 0; +} + +static int +ice_fdir_query_count(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *flow_stats, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_filter_conf *filter = flow->rule; + struct ice_fdir_counter *counter = filter->counter; + uint64_t hits_lo, hits_hi; + + if (!counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "FDIR counters not available"); + return -rte_errno; + } + + /* + * Reading the low 32-bits latches the high 32-bits into a shadow + * register. Reading the high 32-bit returns the value in the + * shadow register. + */ + hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index)); + hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index)); + + flow_stats->hits_set = 1; + flow_stats->hits = hits_lo | (hits_hi << 32); + flow_stats->bytes_set = 0; + flow_stats->bytes = 0; + + if (flow_stats->reset) { + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0); + } + + return 0; +} + +static struct ice_flow_engine ice_fdir_engine = { + .init = ice_fdir_init, + .uninit = ice_fdir_uninit, + .create = ice_fdir_create_filter, + .destroy = ice_fdir_destroy_filter, + .query_count = ice_fdir_query_count, + .type = ICE_FLOW_ENGINE_FDIR, +}; + +static int +ice_fdir_parse_action_qregion(struct ice_pf *pf, + struct rte_flow_error *error, + const struct rte_flow_action *act, + struct ice_fdir_filter_conf *filter) +{ + const struct rte_flow_action_rss *rss = act->conf; + uint32_t i; + + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + + if (rss->queue_num <= 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Queue region size can't be 0 or 1."); + return -rte_errno; + } + + /* check if queue index for queue region is continuous */ + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Discontinuous queue region"); + return -rte_errno; + } + } + + if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue region indexes."); + return -rte_errno; + } + + if (!(rte_is_power_of_2(rss->queue_num) && + (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "The region size should be any of the following values:" + "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number " + "of queues do not exceed the VSI allocation."); + return -rte_errno; + } + + filter->input.q_index = rss->queue[0]; + filter->input.q_region = rte_fls_u32(rss->queue_num) - 1; + filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; + + return 0; +} + +static int +ice_fdir_parse_action(struct ice_adapter *ad, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct ice_fdir_filter_conf *filter) +{ + struct ice_pf *pf = &ad->pf; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark_spec = NULL; + const struct rte_flow_action_count *act_count; + uint32_t dest_num = 0; + uint32_t mark_num = 0; + uint32_t counter_num = 0; + int ret; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + + act_q = actions->conf; + filter->input.q_index = act_q->index; + if (filter->input.q_index >= + pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue for FDIR."); + return -rte_errno; + } + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + dest_num++; + + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; + break; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + dest_num++; + + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + dest_num++; + + ret = ice_fdir_parse_action_qregion(pf, + error, actions, filter); + if (ret) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + mark_num++; + + mark_spec = actions->conf; + filter->input.fltr_id = mark_spec->id; + filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + counter_num++; + + act_count = actions->conf; + filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + rte_memcpy(&filter->act_count, act_count, + sizeof(filter->act_count)); + + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action."); + return -rte_errno; + } + } + + if (dest_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Unsupported action combination"); + return -rte_errno; + } + + if (mark_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many mark actions"); + return -rte_errno; + } + + if (counter_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many count actions"); + return -rte_errno; + } + + if (dest_num + mark_num + counter_num == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Empty action"); + return -rte_errno; + } + + /* set default action to PASSTHRU mode, in "mark/count only" case. */ + if (dest_num == 0) + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; + + return 0; +} + +static int +ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct ice_fdir_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; + uint64_t input_set = ICE_INSET_NONE; + uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE; + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + uint32_t vtc_flow_cpu; + + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + if (eth_spec && eth_mask) { + if (!rte_is_zero_ether_addr(ð_spec->src) || + !rte_is_zero_ether_addr(ð_mask->src)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Src mac not support"); + return -rte_errno; + } + + if (!rte_is_broadcast_ether_addr(ð_mask->dst)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid mac addr mask"); + return -rte_errno; + } + + input_set |= ICE_INSET_DMAC; + rte_memcpy(&filter->input.ext_data.dst_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + if (ipv4_spec && ipv4_mask) { + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_SRC : + ICE_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_DST : + ICE_INSET_IPV4_DST; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TOS; + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) + input_set |= ICE_INSET_IPV4_PROTO; + + filter->input.ip.v4.dst_ip = + ipv4_spec->hdr.src_addr; + filter->input.ip.v4.src_ip = + ipv4_spec->hdr.dst_addr; + filter->input.ip.v4.tos = + ipv4_spec->hdr.type_of_service; + filter->input.ip.v4.ttl = + ipv4_spec->hdr.time_to_live; + filter->input.ip.v4.proto = + ipv4_spec->hdr.next_proto_id; + } + + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + if (ipv6_spec && ipv6_mask) { + /* Check IPv6 mask and update input set */ + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask"); + return -rte_errno; + } + + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) + input_set |= ICE_INSET_IPV6_SRC; + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) + input_set |= ICE_INSET_IPV6_DST; + + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) + == rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) + input_set |= ICE_INSET_IPV6_TC; + if (ipv6_mask->hdr.proto == UINT8_MAX) + input_set |= ICE_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) + input_set |= ICE_INSET_IPV6_HOP_LIMIT; + + rte_memcpy(filter->input.ip.v6.dst_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->input.ip.v6.src_ip, + ipv6_spec->hdr.dst_addr, 16); + + vtc_flow_cpu = + rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); + filter->input.ip.v6.tc = + (uint8_t)(vtc_flow_cpu >> + ICE_FDIR_IPV6_TC_OFFSET); + filter->input.ip.v6.proto = + ipv6_spec->hdr.proto; + filter->input.ip.v6.hlim = + ipv6_spec->hdr.hop_limits; + } + + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; + + if (tcp_spec && tcp_mask) { + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + if (tcp_mask->hdr.src_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_SRC_PORT : + ICE_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_DST_PORT : + ICE_INSET_TCP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + tcp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + tcp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.dst_port = + tcp_spec->hdr.src_port; + filter->input.ip.v6.src_port = + tcp_spec->hdr.dst_port; + } + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; + + if (udp_spec && udp_mask) { + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (udp_mask->hdr.src_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_SRC_PORT : + ICE_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_DST_PORT : + ICE_INSET_UDP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + udp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + udp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.src_port = + udp_spec->hdr.dst_port; + filter->input.ip.v6.dst_port = + udp_spec->hdr.src_port; + } + } + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + + if (sctp_spec && sctp_mask) { + /* Check SCTP mask and update input set */ + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (sctp_mask->hdr.src_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_SRC_PORT : + ICE_INSET_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port == UINT16_MAX) + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_DST_PORT : + ICE_INSET_SCTP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + sctp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + sctp_spec->hdr.dst_port; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.dst_port = + sctp_spec->hdr.src_port; + filter->input.ip.v6.src_port = + sctp_spec->hdr.dst_port; + } + } + break; + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + l3 = RTE_FLOW_ITEM_TYPE_END; + vxlan_spec = item->spec; + vxlan_mask = item->mask; + + if (vxlan_spec || vxlan_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vxlan field"); + return -rte_errno; + } + + tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_GTPU: + l3 = RTE_FLOW_ITEM_TYPE_END; + gtp_spec = item->spec; + gtp_mask = item->mask; + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return -rte_errno; + } + + if (gtp_mask->teid == UINT32_MAX) + input_set |= ICE_INSET_GTPU_TEID; + + filter->input.gtpu_data.teid = gtp_spec->teid; + } + + tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU; + break; + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->qfi == UINT8_MAX) + input_set |= ICE_INSET_GTPU_QFI; + + filter->input.gtpu_data.qfi = + gtp_psc_spec->qfi; + } + tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pattern item."); + return -rte_errno; + } + } + + if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU || + tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; + + filter->tunnel_type = tunnel_type; + filter->input.flow_type = flow_type; + filter->input_set = input_set; + + return 0; +} + +static int +ice_fdir_parse(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_fdir_filter_conf *filter = &pf->fdir.conf; + struct ice_pattern_match_item *item = NULL; + uint64_t input_set; + int ret; + + memset(filter, 0, sizeof(*filter)); + item = ice_search_pattern_match_item(pattern, array, array_len, error); + if (!item) + return -rte_errno; + + ret = ice_fdir_parse_pattern(ad, pattern, error, filter); + if (ret) + goto error; + input_set = filter->input_set; + if (!input_set || input_set & ~item->input_set_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + pattern, + "Invalid input set"); + ret = -rte_errno; + goto error; + } + + ret = ice_fdir_parse_action(ad, actions, error, filter); + if (ret) + goto error; + + if (meta) + *meta = filter; +error: + rte_free(item); + return ret; +} + +static struct ice_flow_parser ice_fdir_parser_os = { + .engine = &ice_fdir_engine, + .array = ice_fdir_pattern_os, + .array_len = RTE_DIM(ice_fdir_pattern_os), + .parse_pattern_action = ice_fdir_parse, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +static struct ice_flow_parser ice_fdir_parser_comms = { + .engine = &ice_fdir_engine, + .array = ice_fdir_pattern_comms, + .array_len = RTE_DIM(ice_fdir_pattern_comms), + .parse_pattern_action = ice_fdir_parse, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +RTE_INIT(ice_fdir_engine_register) +{ + ice_register_flow_engine(&ice_fdir_engine); +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.c b/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.c new file mode 100644 index 000000000..ad103d0e8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.c @@ -0,0 +1,2090 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ice_ethdev.h" +#include "ice_generic_flow.h" + +/** + * Non-pipeline mode, fdir and switch both used as distributor, + * fdir used first, switch used as fdir's backup. + */ +#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 +/*Pipeline mode, switch used at permission stage*/ +#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 +/*Pipeline mode, fdir used at distributor stage*/ +#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2 + +static struct ice_engine_list engine_list = + TAILQ_HEAD_INITIALIZER(engine_list); + +static int ice_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +static int ice_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +static int ice_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error); +static int ice_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); + +const struct rte_flow_ops ice_flow_ops = { + .validate = ice_flow_validate, + .create = ice_flow_create, + .destroy = ice_flow_destroy, + .flush = ice_flow_flush, + .query = ice_flow_query, +}; + +/* empty */ +enum rte_flow_item_type pattern_empty[] = { + RTE_FLOW_ITEM_TYPE_END, +}; + +/* L2 */ +enum rte_flow_item_type pattern_ethertype[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_ethertype_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_ethertype_qinq[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* ARP */ +enum rte_flow_item_type pattern_eth_arp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* non-tunnel IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 VXLAN MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE MAC IPv4 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv4 NVGRE MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 NVGRE MAC IPv6 */ +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_NVGRE, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* GTPU */ +enum rte_flow_item_type pattern_eth_ipv4_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, + +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* PPPoE */ +enum rte_flow_item_type pattern_eth_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoed[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOED, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_proto[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_icmp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_sctp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP6, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_l2tp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; + + + +typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); + +void +ice_register_flow_engine(struct ice_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +int +ice_flow_init(struct ice_adapter *ad) +{ + int ret; + struct ice_pf *pf = &ad->pf; + void *temp; + struct ice_flow_engine *engine; + + TAILQ_INIT(&pf->flow_list); + TAILQ_INIT(&pf->rss_parser_list); + TAILQ_INIT(&pf->perm_parser_list); + TAILQ_INIT(&pf->dist_parser_list); + rte_spinlock_init(&pf->flow_ops_lock); + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->init == NULL) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(ad); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + return 0; +} + +void +ice_flow_uninit(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + struct ice_flow_engine *engine; + struct rte_flow *p_flow; + struct ice_flow_parser_node *p_parser; + void *temp; + + TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(ad); + } + + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } + + /* Cleanup parser list */ + while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) { + TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node); + rte_free(p_parser); + } + + while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) { + TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node); + rte_free(p_parser); + } + + while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) { + TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); + rte_free(p_parser); + } +} + +static struct ice_parser_list * +ice_get_parser_list(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_pf *pf = &ad->pf; + + switch (parser->stage) { + case ICE_FLOW_STAGE_RSS: + list = &pf->rss_parser_list; + break; + case ICE_FLOW_STAGE_PERMISSION: + list = &pf->perm_parser_list; + break; + case ICE_FLOW_STAGE_DISTRIBUTOR: + list = &pf->dist_parser_list; + break; + default: + return NULL; + } + + return list; +} + +int +ice_register_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_flow_parser_node *parser_node; + + parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0); + if (parser_node == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory."); + return -ENOMEM; + } + parser_node->parser = parser; + + list = ice_get_parser_list(parser, ad); + if (list == NULL) + return -EINVAL; + + if (ad->devargs.pipe_mode_support) { + TAILQ_INSERT_TAIL(list, parser_node, node); + } else { + if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH || + parser->engine->type == ICE_FLOW_ENGINE_HASH) + TAILQ_INSERT_TAIL(list, parser_node, node); + else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) + TAILQ_INSERT_HEAD(list, parser_node, node); + else + return -EINVAL; + } + return 0; +} + +void +ice_unregister_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad) +{ + struct ice_parser_list *list; + struct ice_flow_parser_node *p_parser; + void *temp; + + list = ice_get_parser_list(parser, ad); + if (list == NULL) + return; + + TAILQ_FOREACH_SAFE(p_parser, list, node, temp) { + if (p_parser->parser->engine->type == parser->engine->type) { + TAILQ_REMOVE(list, p_parser, node); + rte_free(p_parser); + } + } +} + +static int +ice_flow_valid_attr(struct ice_adapter *ad, + const struct rte_flow_attr *attr, + int *ice_pipeline_stage, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Check pipeline mode support to set classification stage */ + if (ad->devargs.pipe_mode_support) { + if (attr->priority == 0) + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_PERMISSION; + else + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR; + } else { + *ice_pipeline_stage = + ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY; + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +/* Find the first VOID or non-VOID item pointer */ +static const struct rte_flow_item * +ice_find_first_item(const struct rte_flow_item *item, bool is_void) +{ + bool is_find; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (is_void) + is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID; + else + is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID; + if (is_find) + break; + item++; + } + return item; +} + +/* Skip all VOID items of the pattern */ +static void +ice_pattern_skip_void_item(struct rte_flow_item *items, + const struct rte_flow_item *pattern) +{ + uint32_t cpy_count = 0; + const struct rte_flow_item *pb = pattern, *pe = pattern; + + for (;;) { + /* Find a non-void item first */ + pb = ice_find_first_item(pb, false); + if (pb->type == RTE_FLOW_ITEM_TYPE_END) { + pe = pb; + break; + } + + /* Find a void item */ + pe = ice_find_first_item(pb + 1, true); + + cpy_count = pe - pb; + rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); + + items += cpy_count; + + if (pe->type == RTE_FLOW_ITEM_TYPE_END) { + break; + } + + pb = pe + 1; + } + /* Copy the END item. */ + rte_memcpy(items, pe, sizeof(struct rte_flow_item)); +} + +/* Check if the pattern matches a supported item type array */ +static bool +ice_match_pattern(enum rte_flow_item_type *item_array, + const struct rte_flow_item *pattern) +{ + const struct rte_flow_item *item = pattern; + + while ((*item_array == item->type) && + (*item_array != RTE_FLOW_ITEM_TYPE_END)) { + item_array++; + item++; + } + + return (*item_array == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +struct ice_pattern_match_item * +ice_search_pattern_match_item(const struct rte_flow_item pattern[], + struct ice_pattern_match_item *array, + uint32_t array_len, + struct rte_flow_error *error) +{ + uint16_t i = 0; + struct ice_pattern_match_item *pattern_match_item; + /* need free by each filter */ + struct rte_flow_item *items; /* used for pattern without VOID items */ + uint32_t item_num = 0; /* non-void item number */ + + /* Get the non-void item number of pattern */ + while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { + if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) + item_num++; + i++; + } + item_num++; + + items = rte_zmalloc("ice_pattern", + item_num * sizeof(struct rte_flow_item), 0); + if (!items) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "No memory for PMD internal items."); + return NULL; + } + pattern_match_item = rte_zmalloc("ice_pattern_match_item", + sizeof(struct ice_pattern_match_item), 0); + if (!pattern_match_item) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate memory."); + return NULL; + } + + ice_pattern_skip_void_item(items, pattern); + + for (i = 0; i < array_len; i++) + if (ice_match_pattern(array[i].pattern_list, + items)) { + pattern_match_item->input_set_mask = + array[i].input_set_mask; + pattern_match_item->pattern_list = + array[i].pattern_list; + pattern_match_item->meta = array[i].meta; + rte_free(items); + return pattern_match_item; + } + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "Unsupported pattern"); + + rte_free(items); + rte_free(pattern_match_item); + return NULL; +} + +static struct ice_flow_engine * +ice_parse_engine_create(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct ice_flow_engine *engine = NULL; + struct ice_flow_parser_node *parser_node; + void *meta = NULL; + void *temp; + + TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + int ret; + + if (parser_node->parser->parse_pattern_action(ad, + parser_node->parser->array, + parser_node->parser->array_len, + pattern, actions, &meta, error) < 0) + continue; + + engine = parser_node->parser->engine; + RTE_ASSERT(engine->create != NULL); + ret = engine->create(ad, flow, meta, error); + if (ret == 0) + return engine; + else if (ret == -EEXIST) + return NULL; + } + return NULL; +} + +static struct ice_flow_engine * +ice_parse_engine_validate(struct ice_adapter *ad, + struct rte_flow *flow __rte_unused, + struct ice_parser_list *parser_list, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct ice_flow_engine *engine = NULL; + struct ice_flow_parser_node *parser_node; + void *temp; + + TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + if (parser_node->parser->parse_pattern_action(ad, + parser_node->parser->array, + parser_node->parser->array_len, + pattern, actions, NULL, error) < 0) + continue; + + engine = parser_node->parser->engine; + break; + } + return engine; +} + +static int +ice_flow_process_filter(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct ice_flow_engine **engine, + parse_engine_t ice_parse_engine, + struct rte_flow_error *error) +{ + int ret = ICE_ERR_NOT_SUPPORTED; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ice_pipeline_stage = 0; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error); + if (ret) + return ret; + + *engine = ice_parse_engine(ad, flow, &pf->rss_parser_list, + pattern, actions, error); + if (*engine != NULL) + return 0; + + switch (ice_pipeline_stage) { + case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY: + case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR: + *engine = ice_parse_engine(ad, flow, &pf->dist_parser_list, + pattern, actions, error); + break; + case ICE_FLOW_CLASSIFY_STAGE_PERMISSION: + *engine = ice_parse_engine(ad, flow, &pf->perm_parser_list, + pattern, actions, error); + break; + default: + return -EINVAL; + } + + if (*engine == NULL) + return -EINVAL; + + return 0; +} + +static int +ice_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct ice_flow_engine *engine; + + return ice_flow_process_filter(dev, NULL, attr, pattern, actions, + &engine, ice_parse_engine_validate, error); +} + +static struct rte_flow * +ice_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_flow *flow = NULL; + int ret; + struct ice_flow_engine *engine = NULL; + + flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + rte_spinlock_lock(&pf->flow_ops_lock); + + ret = ice_flow_process_filter(dev, flow, attr, pattern, actions, + &engine, ice_parse_engine_create, error); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to create flow"); + rte_free(flow); + flow = NULL; + goto out; + } + + flow->engine = engine; + TAILQ_INSERT_TAIL(&pf->flow_list, flow, node); + PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); + +out: + rte_spinlock_unlock(&pf->flow_ops_lock); + return flow; +} + +static int +ice_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int ret = 0; + + if (!flow || !flow->engine || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + rte_spinlock_lock(&pf->flow_ops_lock); + + ret = flow->engine->destroy(ad, flow, error); + if (!ret) { + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } else { + PMD_DRV_LOG(ERR, "Failed to destroy flow"); + } + + rte_spinlock_unlock(&pf->flow_ops_lock); + + return ret; +} + +static int +ice_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_flow *p_flow; + void *temp; + int ret = 0; + + TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { + ret = ice_flow_destroy(dev, p_flow, error); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; + } + } + + return ret; +} + +static int +ice_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = -EINVAL; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_flow_query_count *count = data; + struct ice_pf *pf = &ad->pf; + + if (!flow || !flow->engine || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + rte_spinlock_lock(&pf->flow_ops_lock); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(ad, flow, count, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + + rte_spinlock_unlock(&pf->flow_ops_lock); + + return ret; +} + +int +ice_flow_redirect(struct ice_adapter *ad, + struct ice_flow_redirect *rd) +{ + struct ice_pf *pf = &ad->pf; + struct rte_flow *p_flow; + void *temp; + int ret; + + rte_spinlock_lock(&pf->flow_ops_lock); + + TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { + if (!p_flow->engine->redirect) + continue; + ret = p_flow->engine->redirect(ad, p_flow, rd); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to redirect flows"); + return ret; + } + } + + rte_spinlock_unlock(&pf->flow_ops_lock); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.h b/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.h new file mode 100644 index 000000000..492a48cd9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_generic_flow.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _ICE_GENERIC_FLOW_H_ +#define _ICE_GENERIC_FLOW_H_ + +#include + +/* protocol */ + +#define ICE_PROT_MAC_INNER (1ULL << 1) +#define ICE_PROT_MAC_OUTER (1ULL << 2) +#define ICE_PROT_VLAN_INNER (1ULL << 3) +#define ICE_PROT_VLAN_OUTER (1ULL << 4) +#define ICE_PROT_IPV4_INNER (1ULL << 5) +#define ICE_PROT_IPV4_OUTER (1ULL << 6) +#define ICE_PROT_IPV6_INNER (1ULL << 7) +#define ICE_PROT_IPV6_OUTER (1ULL << 8) +#define ICE_PROT_TCP_INNER (1ULL << 9) +#define ICE_PROT_TCP_OUTER (1ULL << 10) +#define ICE_PROT_UDP_INNER (1ULL << 11) +#define ICE_PROT_UDP_OUTER (1ULL << 12) +#define ICE_PROT_SCTP_INNER (1ULL << 13) +#define ICE_PROT_SCTP_OUTER (1ULL << 14) +#define ICE_PROT_ICMP4_INNER (1ULL << 15) +#define ICE_PROT_ICMP4_OUTER (1ULL << 16) +#define ICE_PROT_ICMP6_INNER (1ULL << 17) +#define ICE_PROT_ICMP6_OUTER (1ULL << 18) +#define ICE_PROT_VXLAN (1ULL << 19) +#define ICE_PROT_NVGRE (1ULL << 20) +#define ICE_PROT_GTPU (1ULL << 21) +#define ICE_PROT_PPPOE_S (1ULL << 22) +#define ICE_PROT_ESP (1ULL << 23) +#define ICE_PROT_AH (1ULL << 24) +#define ICE_PROT_L2TPV3OIP (1ULL << 25) +#define ICE_PROT_PFCP (1ULL << 26) + +/* field */ + +#define ICE_SMAC (1ULL << 63) +#define ICE_DMAC (1ULL << 62) +#define ICE_ETHERTYPE (1ULL << 61) +#define ICE_IP_SRC (1ULL << 60) +#define ICE_IP_DST (1ULL << 59) +#define ICE_IP_PROTO (1ULL << 58) +#define ICE_IP_TTL (1ULL << 57) +#define ICE_IP_TOS (1ULL << 56) +#define ICE_SPORT (1ULL << 55) +#define ICE_DPORT (1ULL << 54) +#define ICE_ICMP_TYPE (1ULL << 53) +#define ICE_ICMP_CODE (1ULL << 52) +#define ICE_VXLAN_VNI (1ULL << 51) +#define ICE_NVGRE_TNI (1ULL << 50) +#define ICE_GTPU_TEID (1ULL << 49) +#define ICE_GTPU_QFI (1ULL << 48) +#define ICE_PPPOE_SESSION (1ULL << 47) +#define ICE_PPPOE_PROTO (1ULL << 46) +#define ICE_ESP_SPI (1ULL << 45) +#define ICE_AH_SPI (1ULL << 44) +#define ICE_L2TPV3OIP_SESSION_ID (1ULL << 43) +#define ICE_PFCP_SEID (1ULL << 42) +#define ICE_PFCP_S_FIELD (1ULL << 41) + +/* input set */ + +#define ICE_INSET_NONE 0ULL + +/* non-tunnel */ + +#define ICE_INSET_SMAC (ICE_PROT_MAC_OUTER | ICE_SMAC) +#define ICE_INSET_DMAC (ICE_PROT_MAC_OUTER | ICE_DMAC) +#define ICE_INSET_VLAN_INNER (ICE_PROT_VLAN_INNER) +#define ICE_INSET_VLAN_OUTER (ICE_PROT_VLAN_OUTER) +#define ICE_INSET_ETHERTYPE (ICE_ETHERTYPE) + +#define ICE_INSET_IPV4_SRC \ + (ICE_PROT_IPV4_OUTER | ICE_IP_SRC) +#define ICE_INSET_IPV4_DST \ + (ICE_PROT_IPV4_OUTER | ICE_IP_DST) +#define ICE_INSET_IPV4_TOS \ + (ICE_PROT_IPV4_OUTER | ICE_IP_TOS) +#define ICE_INSET_IPV4_PROTO \ + (ICE_PROT_IPV4_OUTER | ICE_IP_PROTO) +#define ICE_INSET_IPV4_TTL \ + (ICE_PROT_IPV4_OUTER | ICE_IP_TTL) +#define ICE_INSET_IPV6_SRC \ + (ICE_PROT_IPV6_OUTER | ICE_IP_SRC) +#define ICE_INSET_IPV6_DST \ + (ICE_PROT_IPV6_OUTER | ICE_IP_DST) +#define ICE_INSET_IPV6_NEXT_HDR \ + (ICE_PROT_IPV6_OUTER | ICE_IP_PROTO) +#define ICE_INSET_IPV6_HOP_LIMIT \ + (ICE_PROT_IPV6_OUTER | ICE_IP_TTL) +#define ICE_INSET_IPV6_TC \ + (ICE_PROT_IPV6_OUTER | ICE_IP_TOS) + +#define ICE_INSET_TCP_SRC_PORT \ + (ICE_PROT_TCP_OUTER | ICE_SPORT) +#define ICE_INSET_TCP_DST_PORT \ + (ICE_PROT_TCP_OUTER | ICE_DPORT) +#define ICE_INSET_UDP_SRC_PORT \ + (ICE_PROT_UDP_OUTER | ICE_SPORT) +#define ICE_INSET_UDP_DST_PORT \ + (ICE_PROT_UDP_OUTER | ICE_DPORT) +#define ICE_INSET_SCTP_SRC_PORT \ + (ICE_PROT_SCTP_OUTER | ICE_SPORT) +#define ICE_INSET_SCTP_DST_PORT \ + (ICE_PROT_SCTP_OUTER | ICE_DPORT) +#define ICE_INSET_ICMP4_SRC_PORT \ + (ICE_PROT_ICMP4_OUTER | ICE_SPORT) +#define ICE_INSET_ICMP4_DST_PORT \ + (ICE_PROT_ICMP4_OUTER | ICE_DPORT) +#define ICE_INSET_ICMP6_SRC_PORT \ + (ICE_PROT_ICMP6_OUTER | ICE_SPORT) +#define ICE_INSET_ICMP6_DST_PORT \ + (ICE_PROT_ICMP6_OUTER | ICE_DPORT) +#define ICE_INSET_ICMP4_TYPE \ + (ICE_PROT_ICMP4_OUTER | ICE_ICMP_TYPE) +#define ICE_INSET_ICMP4_CODE \ + (ICE_PROT_ICMP4_OUTER | ICE_ICMP_CODE) +#define ICE_INSET_ICMP6_TYPE \ + (ICE_PROT_ICMP6_OUTER | ICE_ICMP_TYPE) +#define ICE_INSET_ICMP6_CODE \ + (ICE_PROT_ICMP6_OUTER | ICE_ICMP_CODE) + +/* tunnel */ + +#define ICE_INSET_TUN_SMAC \ + (ICE_PROT_MAC_INNER | ICE_SMAC) +#define ICE_INSET_TUN_DMAC \ + (ICE_PROT_MAC_INNER | ICE_DMAC) + +#define ICE_INSET_TUN_IPV4_SRC \ + (ICE_PROT_IPV4_INNER | ICE_IP_SRC) +#define ICE_INSET_TUN_IPV4_DST \ + (ICE_PROT_IPV4_INNER | ICE_IP_DST) +#define ICE_INSET_TUN_IPV4_TTL \ + (ICE_PROT_IPV4_INNER | ICE_IP_TTL) +#define ICE_INSET_TUN_IPV4_PROTO \ + (ICE_PROT_IPV4_INNER | ICE_IP_PROTO) +#define ICE_INSET_TUN_IPV4_TOS \ + (ICE_PROT_IPV4_INNER | ICE_IP_TOS) +#define ICE_INSET_TUN_IPV6_SRC \ + (ICE_PROT_IPV6_INNER | ICE_IP_SRC) +#define ICE_INSET_TUN_IPV6_DST \ + (ICE_PROT_IPV6_INNER | ICE_IP_DST) +#define ICE_INSET_TUN_IPV6_HOP_LIMIT \ + (ICE_PROT_IPV6_INNER | ICE_IP_TTL) +#define ICE_INSET_TUN_IPV6_NEXT_HDR \ + (ICE_PROT_IPV6_INNER | ICE_IP_PROTO) +#define ICE_INSET_TUN_IPV6_TC \ + (ICE_PROT_IPV6_INNER | ICE_IP_TOS) + +#define ICE_INSET_TUN_TCP_SRC_PORT \ + (ICE_PROT_TCP_INNER | ICE_SPORT) +#define ICE_INSET_TUN_TCP_DST_PORT \ + (ICE_PROT_TCP_INNER | ICE_DPORT) +#define ICE_INSET_TUN_UDP_SRC_PORT \ + (ICE_PROT_UDP_INNER | ICE_SPORT) +#define ICE_INSET_TUN_UDP_DST_PORT \ + (ICE_PROT_UDP_INNER | ICE_DPORT) +#define ICE_INSET_TUN_SCTP_SRC_PORT \ + (ICE_PROT_SCTP_INNER | ICE_SPORT) +#define ICE_INSET_TUN_SCTP_DST_PORT \ + (ICE_PROT_SCTP_INNER | ICE_DPORT) +#define ICE_INSET_TUN_ICMP4_SRC_PORT \ + (ICE_PROT_ICMP4_INNER | ICE_SPORT) +#define ICE_INSET_TUN_ICMP4_DST_PORT \ + (ICE_PROT_ICMP4_INNER | ICE_DPORT) +#define ICE_INSET_TUN_ICMP6_SRC_PORT \ + (ICE_PROT_ICMP6_INNER | ICE_SPORT) +#define ICE_INSET_TUN_ICMP6_DST_PORT \ + (ICE_PROT_ICMP6_INNER | ICE_DPORT) +#define ICE_INSET_TUN_ICMP4_TYPE \ + (ICE_PROT_ICMP4_INNER | ICE_ICMP_TYPE) +#define ICE_INSET_TUN_ICMP4_CODE \ + (ICE_PROT_ICMP4_INNER | ICE_ICMP_CODE) +#define ICE_INSET_TUN_ICMP6_TYPE \ + (ICE_PROT_ICMP6_INNER | ICE_ICMP_TYPE) +#define ICE_INSET_TUN_ICMP6_CODE \ + (ICE_PROT_ICMP6_INNER | ICE_ICMP_CODE) + +#define ICE_INSET_TUN_VXLAN_VNI \ + (ICE_PROT_VXLAN | ICE_VXLAN_VNI) +#define ICE_INSET_TUN_NVGRE_TNI \ + (ICE_PROT_NVGRE | ICE_NVGRE_TNI) +#define ICE_INSET_GTPU_TEID \ + (ICE_PROT_GTPU | ICE_GTPU_TEID) +#define ICE_INSET_GTPU_QFI \ + (ICE_PROT_GTPU | ICE_GTPU_QFI) +#define ICE_INSET_PPPOE_SESSION \ + (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION) +#define ICE_INSET_PPPOE_PROTO \ + (ICE_PROT_PPPOE_S | ICE_PPPOE_PROTO) +#define ICE_INSET_ESP_SPI \ + (ICE_PROT_ESP | ICE_ESP_SPI) +#define ICE_INSET_AH_SPI \ + (ICE_PROT_AH | ICE_AH_SPI) +#define ICE_INSET_L2TPV3OIP_SESSION_ID \ + (ICE_PROT_L2TPV3OIP | ICE_L2TPV3OIP_SESSION_ID) +#define ICE_INSET_PFCP_S_FIELD \ + (ICE_PROT_PFCP | ICE_PFCP_S_FIELD) +#define ICE_INSET_PFCP_SEID \ + (ICE_PROT_PFCP | ICE_PFCP_S_FIELD | ICE_PFCP_SEID) + +/* empty pattern */ +extern enum rte_flow_item_type pattern_empty[]; + +/* L2 */ +extern enum rte_flow_item_type pattern_ethertype[]; +extern enum rte_flow_item_type pattern_ethertype_vlan[]; +extern enum rte_flow_item_type pattern_ethertype_qinq[]; + +/* ARP */ +extern enum rte_flow_item_type pattern_eth_arp[]; + +/* non-tunnel IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv4_icmp[]; + +/* non-tunnel IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_vlan_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_qinq_ipv6_icmp6[]; + +/* IPv4 VXLAN IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_icmp[]; + +/* IPv4 VXLAN MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4_icmp[]; + +/* IPv6 VXLAN IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv4_icmp[]; + +/* IPv6 VXLAN MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv4_icmp[]; + +/* IPv4 VXLAN IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv6_icmp6[]; + +/* IPv4 VXLAN MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv6_icmp6[]; + +/* IPv6 VXLAN IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_ipv6_icmp6[]; + +/* IPv6 VXLAN MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_eth_ipv6_icmp6[]; + +/* IPv4 NVGRE IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv4_icmp[]; + +/* IPv4 NVGRE MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv4_icmp[]; + +/* IPv6 NVGRE IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv4_icmp[]; + +/* IPv6 NVGRE MAC IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv4_icmp[]; + +/* IPv4 NVGRE IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_ipv6_icmp6[]; + +/* IPv4 NVGRE MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_nvgre_eth_ipv6_icmp6[]; + +/* IPv6 NVGRE IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_ipv6_icmp6[]; + +/* IPv6 NVGRE MAC IPv6 */ +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[]; + +/* GTPU */ +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_icmp[]; + +/* PPPoE */ +extern enum rte_flow_item_type pattern_eth_pppoed[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoed[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoed[]; +extern enum rte_flow_item_type pattern_eth_pppoes[]; +extern enum rte_flow_item_type pattern_eth_pppoes_proto[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_tcp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_sctp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4_icmp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_udp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_tcp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_sctp[]; +extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[]; +extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[]; + +/* ESP */ +extern enum rte_flow_item_type pattern_eth_ipv4_esp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_udp_esp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_esp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[]; + +/* AH */ +extern enum rte_flow_item_type pattern_eth_ipv4_ah[]; +extern enum rte_flow_item_type pattern_eth_ipv6_ah[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[]; + +/* L2TP */ +extern enum rte_flow_item_type pattern_eth_ipv4_l2tp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[]; + +/* PFCP */ +extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[]; +extern enum rte_flow_item_type pattern_eth_ipv6_pfcp[]; + +struct ice_adapter; + +extern const struct rte_flow_ops ice_flow_ops; + +/* engine types. */ +enum ice_flow_engine_type { + ICE_FLOW_ENGINE_NONE = 0, + ICE_FLOW_ENGINE_FDIR, + ICE_FLOW_ENGINE_SWITCH, + ICE_FLOW_ENGINE_HASH, + ICE_FLOW_ENGINE_ACL, + ICE_FLOW_ENGINE_MAX, +}; + +/** + * classification stages. + * for non-pipeline mode, we have two classification stages: Distributor/RSS + * for pipeline-mode we have three classification stages: + * Permission/Distributor/RSS + */ +enum ice_flow_classification_stage { + ICE_FLOW_STAGE_NONE = 0, + ICE_FLOW_STAGE_RSS, + ICE_FLOW_STAGE_PERMISSION, + ICE_FLOW_STAGE_DISTRIBUTOR, + ICE_FLOW_STAGE_MAX, +}; +/* pattern structure */ +struct ice_pattern_match_item { + enum rte_flow_item_type *pattern_list; + /* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */ + uint64_t input_set_mask; + void *meta; +}; + +enum ice_flow_redirect_type { + ICE_FLOW_REDIRECT_VSI, +}; + +struct ice_flow_redirect { + enum ice_flow_redirect_type type; + union { + struct { + uint16_t vsi_handle; + uint16_t new_vsi_num; + }; + }; +}; + +typedef int (*engine_init_t)(struct ice_adapter *ad); +typedef void (*engine_uninit_t)(struct ice_adapter *ad); +typedef int (*engine_create_t)(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef int(*engine_redirect_t)(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_flow_redirect *redirect); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*parse_pattern_action_t)(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error); + +/* Struct to store engine created. */ +struct ice_flow_engine { + TAILQ_ENTRY(ice_flow_engine) node; + engine_init_t init; + engine_uninit_t uninit; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_redirect_t redirect; + engine_free_t free; + enum ice_flow_engine_type type; +}; +TAILQ_HEAD(ice_engine_list, ice_flow_engine); + +/* Struct to store flow created. */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + struct ice_flow_engine *engine; + void *rule; +}; + +struct ice_flow_parser { + struct ice_flow_engine *engine; + struct ice_pattern_match_item *array; + uint32_t array_len; + parse_pattern_action_t parse_pattern_action; + enum ice_flow_classification_stage stage; +}; + +/* Struct to store parser created. */ +struct ice_flow_parser_node { + TAILQ_ENTRY(ice_flow_parser_node) node; + struct ice_flow_parser *parser; +}; + +void ice_register_flow_engine(struct ice_flow_engine *engine); +int ice_flow_init(struct ice_adapter *ad); +void ice_flow_uninit(struct ice_adapter *ad); +int ice_register_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad); +void ice_unregister_parser(struct ice_flow_parser *parser, + struct ice_adapter *ad); +struct ice_pattern_match_item * +ice_search_pattern_match_item(const struct rte_flow_item pattern[], + struct ice_pattern_match_item *array, + uint32_t array_len, + struct rte_flow_error *error); +int +ice_flow_redirect(struct ice_adapter *ad, + struct ice_flow_redirect *rd); +#endif diff --git a/src/spdk/dpdk/drivers/net/ice/ice_hash.c b/src/spdk/dpdk/drivers/net/ice/ice_hash.c new file mode 100644 index 000000000..11435cbfb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_hash.c @@ -0,0 +1,588 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ice_logs.h" +#include "base/ice_type.h" +#include "base/ice_flow.h" +#include "ice_ethdev.h" +#include "ice_generic_flow.h" + +struct rss_type_match_hdr { + uint32_t hdr_mask; + uint64_t eth_rss_hint; +}; + +struct ice_hash_match_type { + uint64_t hash_type; + uint64_t hash_flds; +}; + +struct rss_meta { + uint32_t pkt_hdr; + uint64_t hash_flds; + uint8_t hash_function; +}; + +struct ice_hash_flow_cfg { + bool simple_xor; + struct ice_rss_cfg rss_cfg; +}; + +static int +ice_hash_init(struct ice_adapter *ad); + +static int +ice_hash_create(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); + +static int +ice_hash_destroy(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error); + +static void +ice_hash_uninit(struct ice_adapter *ad); + +static void +ice_hash_free(struct rte_flow *flow); + +static int +ice_hash_parse_pattern_action(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error); + +/* The first member is protocol header, the second member is ETH_RSS_*. */ +struct rss_type_match_hdr hint_0 = { + ICE_FLOW_SEG_HDR_NONE, 0}; +struct rss_type_match_hdr hint_1 = { + ICE_FLOW_SEG_HDR_IPV4, ETH_RSS_IPV4}; +struct rss_type_match_hdr hint_2 = { + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, ETH_RSS_NONFRAG_IPV4_UDP}; +struct rss_type_match_hdr hint_3 = { + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, ETH_RSS_NONFRAG_IPV4_TCP}; +struct rss_type_match_hdr hint_4 = { + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, ETH_RSS_NONFRAG_IPV4_SCTP}; +struct rss_type_match_hdr hint_5 = { + ICE_FLOW_SEG_HDR_IPV6, ETH_RSS_IPV6}; +struct rss_type_match_hdr hint_6 = { + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, ETH_RSS_NONFRAG_IPV6_UDP}; +struct rss_type_match_hdr hint_7 = { + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, ETH_RSS_NONFRAG_IPV6_TCP}; +struct rss_type_match_hdr hint_8 = { + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, ETH_RSS_NONFRAG_IPV6_SCTP}; +struct rss_type_match_hdr hint_9 = { + ICE_FLOW_SEG_HDR_GTPU_EH, ETH_RSS_IPV4}; +struct rss_type_match_hdr hint_10 = { + ICE_FLOW_SEG_HDR_PPPOE, ETH_RSS_IPV4}; +struct rss_type_match_hdr hint_11 = { + ICE_FLOW_SEG_HDR_PPPOE, ETH_RSS_NONFRAG_IPV4_UDP}; +struct rss_type_match_hdr hint_12 = { + ICE_FLOW_SEG_HDR_PPPOE, ETH_RSS_NONFRAG_IPV4_TCP}; +struct rss_type_match_hdr hint_13 = { + ICE_FLOW_SEG_HDR_PPPOE, ETH_RSS_NONFRAG_IPV4_SCTP}; +struct rss_type_match_hdr hint_14 = { + ICE_FLOW_SEG_HDR_GTPU_EH, ETH_RSS_NONFRAG_IPV4_UDP}; +struct rss_type_match_hdr hint_15 = { + ICE_FLOW_SEG_HDR_GTPU_EH, ETH_RSS_NONFRAG_IPV4_TCP}; + +/* Supported pattern for os default package. */ +static struct ice_pattern_match_item ice_hash_pattern_list_os[] = { + {pattern_eth_ipv4, ICE_INSET_NONE, &hint_1}, + {pattern_eth_ipv4_udp, ICE_INSET_NONE, &hint_2}, + {pattern_eth_ipv4_tcp, ICE_INSET_NONE, &hint_3}, + {pattern_eth_ipv4_sctp, ICE_INSET_NONE, &hint_4}, + {pattern_eth_ipv6, ICE_INSET_NONE, &hint_5}, + {pattern_eth_ipv6_udp, ICE_INSET_NONE, &hint_6}, + {pattern_eth_ipv6_tcp, ICE_INSET_NONE, &hint_7}, + {pattern_eth_ipv6_sctp, ICE_INSET_NONE, &hint_8}, + {pattern_empty, ICE_INSET_NONE, &hint_0}, +}; + +/* Supported pattern for comms package. */ +static struct ice_pattern_match_item ice_hash_pattern_list_comms[] = { + {pattern_eth_ipv4, ICE_INSET_NONE, &hint_1}, + {pattern_eth_ipv4_udp, ICE_INSET_NONE, &hint_2}, + {pattern_eth_ipv4_tcp, ICE_INSET_NONE, &hint_3}, + {pattern_eth_ipv4_sctp, ICE_INSET_NONE, &hint_4}, + {pattern_eth_ipv6, ICE_INSET_NONE, &hint_5}, + {pattern_eth_ipv6_udp, ICE_INSET_NONE, &hint_6}, + {pattern_eth_ipv6_tcp, ICE_INSET_NONE, &hint_7}, + {pattern_eth_ipv6_sctp, ICE_INSET_NONE, &hint_8}, + {pattern_empty, ICE_INSET_NONE, &hint_0}, + {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_INSET_NONE, &hint_9}, + {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_INSET_NONE, &hint_14}, + {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_INSET_NONE, &hint_15}, + {pattern_eth_pppoes_ipv4, ICE_INSET_NONE, &hint_10}, + {pattern_eth_pppoes_ipv4_udp, ICE_INSET_NONE, &hint_11}, + {pattern_eth_pppoes_ipv4_tcp, ICE_INSET_NONE, &hint_12}, + {pattern_eth_pppoes_ipv4_sctp, ICE_INSET_NONE, &hint_13}, +}; + +/** + * The first member is input set combination, + * the second member is hash fields. + */ +struct ice_hash_match_type ice_hash_type_list[] = { + {ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {ETH_RSS_IPV4, ICE_FLOW_HASH_IPV4}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_UDP, ICE_HASH_UDP_IPV4}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_TCP, ICE_HASH_TCP_IPV4}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV4_SCTP, ICE_HASH_SCTP_IPV4}, + {ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {ETH_RSS_IPV6, ICE_FLOW_HASH_IPV6}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_UDP, ICE_HASH_UDP_IPV6}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_TCP, ICE_HASH_TCP_IPV6}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, + {ETH_RSS_NONFRAG_IPV6_SCTP, ICE_HASH_SCTP_IPV6}, +}; + +static struct ice_flow_engine ice_hash_engine = { + .init = ice_hash_init, + .create = ice_hash_create, + .destroy = ice_hash_destroy, + .uninit = ice_hash_uninit, + .free = ice_hash_free, + .type = ICE_FLOW_ENGINE_HASH, +}; + +/* Register parser for os package. */ +static struct ice_flow_parser ice_hash_parser_os = { + .engine = &ice_hash_engine, + .array = ice_hash_pattern_list_os, + .array_len = RTE_DIM(ice_hash_pattern_list_os), + .parse_pattern_action = ice_hash_parse_pattern_action, + .stage = ICE_FLOW_STAGE_RSS, +}; + +/* Register parser for comms package. */ +static struct ice_flow_parser ice_hash_parser_comms = { + .engine = &ice_hash_engine, + .array = ice_hash_pattern_list_comms, + .array_len = RTE_DIM(ice_hash_pattern_list_comms), + .parse_pattern_action = ice_hash_parse_pattern_action, + .stage = ICE_FLOW_STAGE_RSS, +}; + +RTE_INIT(ice_hash_engine_init) +{ + struct ice_flow_engine *engine = &ice_hash_engine; + ice_register_flow_engine(engine); +} + +static int +ice_hash_init(struct ice_adapter *ad) +{ + struct ice_flow_parser *parser = NULL; + + if (ad->hw.dcf_enabled) + return 0; + + if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) + parser = &ice_hash_parser_os; + else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + parser = &ice_hash_parser_comms; + else + return -EINVAL; + + return ice_register_parser(parser, ad); +} + +static int +ice_hash_check_inset(const struct rte_flow_item pattern[], + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = pattern; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not support range"); + return -rte_errno; + } + + /* Ignore spec and mask. */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid spec/mask."); + return -rte_errno; + } + } + + return 0; +} + +static int +ice_hash_parse_action(struct ice_pattern_match_item *pattern_match_item, + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + const struct rte_flow_action_rss *rss; + struct rss_type_match_hdr *m = (struct rss_type_match_hdr *) + (pattern_match_item->meta); + uint32_t type_list_len = RTE_DIM(ice_hash_type_list); + struct ice_hash_match_type *type_match_item; + uint64_t rss_hf; + uint16_t i; + + /* Supported action is RSS. */ + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_RSS: + rss = action->conf; + rss_hf = rss->types; + + /** + * Check simultaneous use of SRC_ONLY and DST_ONLY + * of the same level. + */ + rss_hf = rte_eth_rss_hf_refine(rss_hf); + + /* Check if pattern is empty. */ + if (pattern_match_item->pattern_list != + pattern_empty && rss->func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not supported flow"); + + /* Check if rss types match pattern. */ + if (rss->func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + if (((rss_hf & ETH_RSS_IPV4) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_IPV6) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) != m->eth_rss_hint) && + ((rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) != m->eth_rss_hint)) + return rte_flow_error_set(error, + ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + action, "Not supported RSS types"); + } + + if (rss->level) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a nonzero RSS encapsulation level is not supported"); + + if (rss->key_len) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a nonzero RSS key_len is not supported"); + + if (rss->queue) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "a non-NULL RSS queue is not supported"); + + /* Check hash function and save it to rss_meta. */ + if (rss->func == + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) + ((struct rss_meta *)*meta)->hash_function = + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + + if (rss->func == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) + ((struct rss_meta *)*meta)->hash_function = + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ; + + type_match_item = rte_zmalloc("ice_type_match_item", + sizeof(struct ice_hash_match_type), 0); + if (!type_match_item) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for type_match_item"); + return -ENOMEM; + } + + /* Find matched hash fields according to hash type. */ + for (i = 0; i < type_list_len; i++) { + if (rss_hf == + ice_hash_type_list[i].hash_type) { + type_match_item->hash_type = + ice_hash_type_list[i].hash_type; + type_match_item->hash_flds = + ice_hash_type_list[i].hash_flds; + } + } + + /* Save hash fileds to rss_meta. */ + ((struct rss_meta *)*meta)->hash_flds = + type_match_item->hash_flds; + + rte_free(type_match_item); + break; + + case RTE_FLOW_ACTION_TYPE_END: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Invalid action."); + return -rte_errno; + } + } + + return 0; +} + +static int +ice_hash_parse_pattern_action(__rte_unused struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + int ret = 0; + struct ice_pattern_match_item *pattern_match_item; + struct rss_meta *rss_meta_ptr; + + rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0); + if (!rss_meta_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for rss_meta_ptr"); + return -ENOMEM; + } + + /* Check rss supported pattern and find matched pattern. */ + pattern_match_item = ice_search_pattern_match_item(pattern, + array, array_len, error); + if (!pattern_match_item) { + ret = -rte_errno; + goto error; + } + + ret = ice_hash_check_inset(pattern, error); + if (ret) + goto error; + + /* Save protocol header to rss_meta. */ + rss_meta_ptr->pkt_hdr = ((struct rss_type_match_hdr *) + (pattern_match_item->meta))->hdr_mask; + + /* Check rss action. */ + ret = ice_hash_parse_action(pattern_match_item, actions, + (void **)&rss_meta_ptr, error); + +error: + if (!ret && meta) + *meta = rss_meta_ptr; + else + rte_free(rss_meta_ptr); + rte_free(pattern_match_item); + + return ret; +} + +static int +ice_hash_create(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + uint32_t reg; + struct ice_hash_flow_cfg *filter_ptr; + + uint32_t headermask = ((struct rss_meta *)meta)->pkt_hdr; + uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; + uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; + + filter_ptr = rte_zmalloc("ice_rss_filter", + sizeof(struct ice_hash_flow_cfg), 0); + if (!filter_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for filter_ptr"); + return -ENOMEM; + } + + if (hash_function == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Enable registers for simple_xor hash function. */ + reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); + reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | + (2 << VSIQF_HASH_CTL_HASH_SCHEME_S); + ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + + filter_ptr->simple_xor = 1; + + goto out; + } else { + filter_ptr->rss_cfg.packet_hdr = headermask; + filter_ptr->rss_cfg.hashed_flds = hash_field; + filter_ptr->rss_cfg.symm = + (hash_function == + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ); + + ret = ice_add_rss_cfg(hw, vsi->idx, + filter_ptr->rss_cfg.hashed_flds, + filter_ptr->rss_cfg.packet_hdr, + filter_ptr->rss_cfg.symm); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "rss flow create fail"); + goto error; + } + } + +out: + flow->rule = filter_ptr; + rte_free(meta); + return 0; + +error: + rte_free(filter_ptr); + rte_free(meta); + return -rte_errno; +} + +static int +ice_hash_destroy(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(ad); + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + uint32_t reg; + struct ice_hash_flow_cfg *filter_ptr; + + filter_ptr = (struct ice_hash_flow_cfg *)flow->rule; + + if (filter_ptr->simple_xor == 1) { + /* Return to symmetric_toeplitz state. */ + reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id)); + reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) | + (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); + ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + } else { + ret = ice_rem_rss_cfg(hw, vsi->idx, + filter_ptr->rss_cfg.hashed_flds, + filter_ptr->rss_cfg.packet_hdr); + /* Fixme: Ignore the error if a rule does not exist. + * Currently a rule for inputset change or symm turn on/off + * will overwrite an exist rule, while application still + * have 2 rte_flow handles. + **/ + if (ret && ret != ICE_ERR_DOES_NOT_EXIST) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "rss flow destroy fail"); + goto error; + } + } + + rte_free(filter_ptr); + return 0; + +error: + rte_free(filter_ptr); + return -rte_errno; +} + +static void +ice_hash_uninit(struct ice_adapter *ad) +{ + if (ad->hw.dcf_enabled) + return; + + if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) + ice_unregister_parser(&ice_hash_parser_os, ad); + else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + ice_unregister_parser(&ice_hash_parser_comms, ad); +} + +static void +ice_hash_free(struct rte_flow *flow) +{ + rte_free(flow->rule); +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_logs.h b/src/spdk/dpdk/drivers/net/ice/ice_logs.h new file mode 100644 index 000000000..aab7da5f7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_logs.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _ICE_LOGS_H_ +#define _ICE_LOGS_H_ + +extern int ice_logtype_init; +extern int ice_logtype_driver; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ice_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_ICE_DEBUG_RX +extern int ice_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ice_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ICE_DEBUG_TX +extern int ice_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ice_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE +extern int ice_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ice_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ice_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _ICE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/ice_rxtx.c b/src/spdk/dpdk/drivers/net/ice/ice_rxtx.c new file mode 100644 index 000000000..1c9f31efd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_rxtx.c @@ -0,0 +1,3823 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include + +#include "rte_pmd_ice.h" +#include "ice_rxtx.h" + +#define ICE_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_OUTER_IP_CKSUM) + +/* Offset of mbuf dynamic field for protocol extraction data */ +int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1; + +/* Mask of mbuf dynamic flags for protocol extraction type */ +uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; +uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask; + +static inline uint64_t +ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid) +{ + static uint64_t *ol_flag_map[] = { + [ICE_RXDID_COMMS_AUX_VLAN] = + &rte_net_ice_dynflag_proto_xtr_vlan_mask, + [ICE_RXDID_COMMS_AUX_IPV4] = + &rte_net_ice_dynflag_proto_xtr_ipv4_mask, + [ICE_RXDID_COMMS_AUX_IPV6] = + &rte_net_ice_dynflag_proto_xtr_ipv6_mask, + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = + &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, + [ICE_RXDID_COMMS_AUX_TCP] = + &rte_net_ice_dynflag_proto_xtr_tcp_mask, + }; + uint64_t *ol_flag; + + ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL; + + return ol_flag != NULL ? *ol_flag : 0ULL; +} + +static inline uint8_t +ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) +{ + static uint8_t rxdid_map[] = { + [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC, + [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, + [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, + [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, + [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, + [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, + }; + + return xtr_type < RTE_DIM(rxdid_map) ? + rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC; +} + +static enum ice_status +ice_program_hw_rx_queue(struct ice_rx_queue *rxq) +{ + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); + struct ice_rlan_ctx rx_ctx; + enum ice_status err; + uint16_t buf_size, len; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t rxdid = ICE_RXDID_COMMS_GENERIC; + uint32_t regval; + + /* Set buffer size as the head split is disabled. */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); + len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len; + rxq->max_pkt_len = RTE_MIN(len, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN || + rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u," + "as jumbo frame is enabled", + (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN || + rxq->max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return -EINVAL; + } + } + + memset(&rx_ctx, 0, sizeof(rx_ctx)); + + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; + rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rx_ctx.dtype = 0; /* No Header Split mode */ +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + rx_ctx.dsize = 1; /* 32B descriptors */ +#endif + rx_ctx.rxmax = rxq->max_pkt_len; + /* TPH: Transaction Layer Packet (TLP) processing hints */ + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + /* Low Receive Queue Threshold defined in 64 descriptors units. + * When the number of free descriptors goes below the lrxqthresh, + * an immediate interrupt is triggered. + */ + rx_ctx.lrxqthresh = 2; + /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 0; + rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; + + rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr); + + PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", + rxq->port_id, rxq->queue_id, rxdid); + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile ID; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); + + err = ice_clear_rxq_ctx(hw, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + + /* Check if scattered RX needs to be used. */ + if (rxq->max_pkt_len > buf_size) + dev->data->scattered_rx = 1; + + rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); + + /* Init the Rx tail register*/ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return 0; +} + +/* Allocate mbufs for all descriptors in rx queue */ +static int +ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) +{ + struct ice_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union ice_rx_flex_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); + + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/* Free all mbufs for descriptors in rx queue */ +static void +_ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) +{ + uint16_t i; + + if (!rxq || !rxq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); + return; + } + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) + rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]); + + rxq->rx_nb_avail = 0; +} + +/* turn on or off rx queue + * @q_idx: queue index in pf scope + * @on: turn on or off the queue + */ +static int +ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on) +{ + uint32_t reg; + uint16_t j; + + /* QRX_CTRL = QRX_ENA */ + reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); + + if (on) { + if (reg & QRX_CTRL_QENA_STAT_M) + return 0; /* Already on, skip */ + reg |= QRX_CTRL_QENA_REQ_M; + } else { + if (!(reg & QRX_CTRL_QENA_STAT_M)) + return 0; /* Already off, skip */ + reg &= ~QRX_CTRL_QENA_REQ_M; + } + + /* Write the register */ + ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg); + /* Check the result. It is said that QENA_STAT + * follows the QENA_REQ not more than 10 use. + * TODO: need to change the wait counter later + */ + for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US); + reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); + if (on) { + if ((reg & QRX_CTRL_QENA_REQ_M) && + (reg & QRX_CTRL_QENA_STAT_M)) + break; + } else { + if (!(reg & QRX_CTRL_QENA_REQ_M) && + !(reg & QRX_CTRL_QENA_STAT_M)) + break; + } + } + + /* Check if it is timeout */ + if (j >= ICE_CHK_Q_ENA_COUNT) { + PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", + (on ? "enable" : "disable"), q_idx); + return -ETIMEDOUT; + } + + return 0; +} + +static inline int +ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq) +{ + int ret = 0; + + if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "ICE_RX_MAX_BURST=%d", + rxq->rx_free_thresh, ICE_RX_MAX_BURST); + ret = -EINVAL; + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); + ret = -EINVAL; + } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = -EINVAL; + } + + return ret; +} + +/* reset fields in ice_rx_queue back to default */ +static void +ice_reset_rx_queue(struct ice_rx_queue *rxq) +{ + unsigned int i; + uint16_t len; + + if (!rxq) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + + len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); + + for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + for (i = 0; i < ICE_RX_MAX_BURST; ++i) + rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; + + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +} + +int +ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "RX queue %u is out of range %u", + rx_queue_id, dev->data->nb_rx_queues); + return -EINVAL; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + if (!rxq || !rxq->q_set) { + PMD_DRV_LOG(ERR, "RX queue %u not available or setup", + rx_queue_id); + return -EINVAL; + } + + err = ice_program_hw_rx_queue(rxq); + if (err) { + PMD_DRV_LOG(ERR, "fail to program RX queue %u", + rx_queue_id); + return -EIO; + } + + err = ice_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return -ENOMEM; + } + + /* Init the RX tail register. */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + + rxq->rx_rel_mbufs(rxq); + ice_reset_rx_queue(rxq); + return -EINVAL; + } + + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return -EINVAL; + } + rxq->rx_rel_mbufs(rxq); + ice_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +int +ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_tx_queue *txq; + int err; + struct ice_vsi *vsi; + struct ice_hw *hw; + struct ice_aqc_add_tx_qgrp txq_elem; + struct ice_tlan_ctx tx_ctx; + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", + tx_queue_id, dev->data->nb_tx_queues); + return -EINVAL; + } + + txq = dev->data->tx_queues[tx_queue_id]; + if (!txq || !txq->q_set) { + PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", + tx_queue_id); + return -EINVAL; + } + + vsi = txq->vsi; + hw = ICE_VSI_TO_HW(vsi); + + memset(&txq_elem, 0, sizeof(txq_elem)); + memset(&tx_ctx, 0, sizeof(tx_ctx)); + txq_elem.num_txqs = 1; + txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); + + tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->nb_tx_desc; + tx_ctx.pf_num = hw->pf_id; + tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + tx_ctx.src_vsi = vsi->vsi_id; + tx_ctx.port_num = hw->port_info->lport; + tx_ctx.tso_ena = 1; /* tso enable */ + tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ + tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ + + ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_tlan_ctx_info); + + txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); + + /* Init the Tx tail register*/ + ICE_PCI_REG_WRITE(txq->qtx_tail, 0); + + /* Fix me, we assume TC always 0 here */ + err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, + &txq_elem, sizeof(txq_elem), NULL); + if (err) { + PMD_DRV_LOG(ERR, "Failed to add lan txq"); + return -EIO; + } + /* store the schedule node id */ + txq->q_teid = txq_elem.txqs[0].q_teid; + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; +} + +static enum ice_status +ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) +{ + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint32_t rxdid = ICE_RXDID_LEGACY_1; + struct ice_rlan_ctx rx_ctx; + enum ice_status err; + uint32_t regval; + + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = 1024; + + memset(&rx_ctx, 0, sizeof(rx_ctx)); + + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; + rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rx_ctx.dtype = 0; /* No Header Split mode */ + rx_ctx.dsize = 1; /* 32B descriptors */ + rx_ctx.rxmax = RTE_ETHER_MAX_LEN; + /* TPH: Transaction Layer Packet (TLP) processing hints */ + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + /* Low Receive Queue Threshold defined in 64 descriptors units. + * When the number of free descriptors goes below the lrxqthresh, + * an immediate interrupt is triggered. + */ + rx_ctx.lrxqthresh = 2; + /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 0; + rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile ID; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); + + err = ice_clear_rxq_ctx(hw, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + + rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); + + /* Init the Rx tail register*/ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return 0; +} + +int +ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + rxq = pf->fdir.rxq; + if (!rxq || !rxq->q_set) { + PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", + rx_queue_id); + return -EINVAL; + } + + err = ice_fdir_program_hw_rx_queue(rxq); + if (err) { + PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", + rx_queue_id); + return -EIO; + } + + /* Init the RX tail register. */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", + rx_queue_id); + + ice_reset_rx_queue(rxq); + return -EINVAL; + } + + return 0; +} + +int +ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_tx_queue *txq; + int err; + struct ice_vsi *vsi; + struct ice_hw *hw; + struct ice_aqc_add_tx_qgrp txq_elem; + struct ice_tlan_ctx tx_ctx; + + PMD_INIT_FUNC_TRACE(); + + txq = pf->fdir.txq; + if (!txq || !txq->q_set) { + PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", + tx_queue_id); + return -EINVAL; + } + + vsi = txq->vsi; + hw = ICE_VSI_TO_HW(vsi); + + memset(&txq_elem, 0, sizeof(txq_elem)); + memset(&tx_ctx, 0, sizeof(tx_ctx)); + txq_elem.num_txqs = 1; + txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); + + tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->nb_tx_desc; + tx_ctx.pf_num = hw->pf_id; + tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + tx_ctx.src_vsi = vsi->vsi_id; + tx_ctx.port_num = hw->port_info->lport; + tx_ctx.tso_ena = 1; /* tso enable */ + tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ + tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ + + ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_tlan_ctx_info); + + txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); + + /* Init the Tx tail register*/ + ICE_PCI_REG_WRITE(txq->qtx_tail, 0); + + /* Fix me, we assume TC always 0 here */ + err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, + &txq_elem, sizeof(txq_elem), NULL); + if (err) { + PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); + return -EIO; + } + /* store the schedule node id */ + txq->q_teid = txq_elem.txqs[0].q_teid; + + return 0; +} + +/* Free all mbufs for descriptors in tx queue */ +static void +_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq) +{ + uint16_t i; + + if (!txq || !txq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL"); + return; + } + + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } +} + +static void +ice_reset_tx_queue(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txe; + uint16_t i, prev, size; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile struct ice_tx_desc *txd = &txq->tx_ring[i]; + + txd->cmd_type_offset_bsz = + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); +} + +int +ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_tx_queue *txq; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint16_t q_ids[1]; + uint32_t q_teids[1]; + uint16_t q_handle = tx_queue_id; + + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", + tx_queue_id, dev->data->nb_tx_queues); + return -EINVAL; + } + + txq = dev->data->tx_queues[tx_queue_id]; + if (!txq) { + PMD_DRV_LOG(ERR, "TX queue %u is not available", + tx_queue_id); + return -EINVAL; + } + + q_ids[0] = txq->reg_idx; + q_teids[0] = txq->q_teid; + + /* Fix me, we assume TC always 0 here */ + status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, + q_ids, q_teids, ICE_NO_RESET, 0, NULL); + if (status != ICE_SUCCESS) { + PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); + return -EINVAL; + } + + txq->tx_rel_mbufs(txq); + ice_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + rxq = pf->fdir.rxq; + + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", + rx_queue_id); + return -EINVAL; + } + rxq->rx_rel_mbufs(rxq); + + return 0; +} + +int +ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_tx_queue *txq; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint16_t q_ids[1]; + uint32_t q_teids[1]; + uint16_t q_handle = tx_queue_id; + + txq = pf->fdir.txq; + if (!txq) { + PMD_DRV_LOG(ERR, "TX queue %u is not available", + tx_queue_id); + return -EINVAL; + } + vsi = txq->vsi; + + q_ids[0] = txq->reg_idx; + q_teids[0] = txq->q_teid; + + /* Fix me, we assume TC always 0 here */ + status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, + q_ids, q_teids, ICE_NO_RESET, 0, NULL); + if (status != ICE_SUCCESS) { + PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); + return -EINVAL; + } + + txq->tx_rel_mbufs(txq); + + return 0; +} + +int +ice_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct ice_rx_queue *rxq; + const struct rte_memzone *rz; + uint32_t ring_size; + uint16_t len; + int use_def_burst_func = 1; + + if (nb_desc % ICE_ALIGN_RING_DESC != 0 || + nb_desc > ICE_MAX_RING_DESC || + nb_desc < ICE_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + ice_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate the rx queue data structure */ + rxq = rte_zmalloc_socket(NULL, + sizeof(struct ice_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "rx queue data structure"); + return -ENOMEM; + } + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + + rxq->reg_idx = vsi->base_queue + queue_idx; + rxq->port_id = dev->data->port_id; + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + rxq->drop_en = rx_conf->rx_drop_en; + rxq->vsi = vsi; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->proto_xtr = pf->proto_xtr != NULL ? + pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; + + /* Allocate the maximun number of RX ring hardware descriptor. */ + len = ICE_MAX_RING_DESC; + + /** + * Allocating a little more memory because vectorized/bulk_alloc Rx + * functions doesn't check boundaries each time. + */ + len += ICE_RX_MAX_BURST; + + /* Allocate the maximum number of RX ring hardware descriptor. */ + ring_size = sizeof(union ice_rx_flex_desc) * len; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, ICE_RING_BASE_ALIGN, + socket_id); + if (!rz) { + ice_rx_queue_release(rxq); + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); + return -ENOMEM; + } + + /* Zero all the descriptors in the ring. */ + memset(rz->addr, 0, ring_size); + + rxq->rx_ring_dma = rz->iova; + rxq->rx_ring = rz->addr; + + /* always reserve more for bulk alloc */ + len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); + + /* Allocate the software ring. */ + rxq->sw_ring = rte_zmalloc_socket(NULL, + sizeof(struct ice_rx_entry) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + ice_rx_queue_release(rxq); + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); + return -ENOMEM; + } + + ice_reset_rx_queue(rxq); + rxq->q_set = true; + dev->data->rx_queues[queue_idx] = rxq; + rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; + + use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq); + + if (!use_def_burst_func) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested. " + "on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + + return 0; +} + +void +ice_rx_queue_release(void *rxq) +{ + struct ice_rx_queue *q = (struct ice_rx_queue *)rxq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + + q->rx_rel_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +int +ice_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + struct ice_tx_queue *txq; + const struct rte_memzone *tz; + uint32_t ring_size; + uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + if (nb_desc % ICE_ALIGN_RING_DESC != 0 || + nb_desc > ICE_MAX_RING_DESC || + nb_desc < ICE_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " + "invalid", nb_desc); + return -EINVAL; + } + + /** + * The following two parameters control the setting of the RS bit on + * transmit descriptors. TX descriptors will have their RS bit set + * after txq->tx_rs_thresh descriptors have been used. The TX + * descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required to + * transmit a packet is greater than the number of free TX descriptors. + * + * The following constraints must be satisfied: + * - tx_rs_thresh must be greater than 0. + * - tx_rs_thresh must be less than the size of the ring minus 2. + * - tx_rs_thresh must be less than or equal to tx_free_thresh. + * - tx_rs_thresh must be a divisor of the ring size. + * - tx_free_thresh must be greater than 0. + * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. + * + * One descriptor in the TX ring is used as a sentinel to avoid a H/W + * race condition, hence the maximum threshold constraints. When set + * to zero use default values. + */ + tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : + ICE_DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = + (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; + if (tx_conf->tx_rs_thresh) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "number of TX descriptors minus 2. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " + "equal to tx_free_thresh. (tx_free_thresh=%u" + " tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u" + " port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -EINVAL; + } + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + ice_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket(NULL, + sizeof(struct ice_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for " + "tx queue structure"); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, ICE_RING_BASE_ALIGN, + socket_id); + if (!tz) { + ice_tx_queue_release(txq); + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + + txq->reg_idx = vsi->base_queue + queue_idx; + txq->port_id = dev->data->port_id; + txq->offloads = offloads; + txq->vsi = vsi; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + txq->tx_ring_dma = tz->iova; + txq->tx_ring = tz->addr; + + /* Allocate software ring */ + txq->sw_ring = + rte_zmalloc_socket(NULL, + sizeof(struct ice_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq->sw_ring) { + ice_tx_queue_release(txq); + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); + return -ENOMEM; + } + + ice_reset_tx_queue(txq); + txq->q_set = true; + dev->data->tx_queues[queue_idx] = txq; + txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; + ice_set_tx_function_flag(dev, txq); + + return 0; +} + +void +ice_tx_queue_release(void *txq) +{ + struct ice_tx_queue *q = (struct ice_tx_queue *)txq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); + return; + } + + q->tx_rel_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +void +ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ice_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ice_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +uint32_t +ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define ICE_RXQ_SCAN_INTERVAL 4 + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; + while ((desc < rxq->nb_rx_desc) && + rte_le_to_cpu_16(rxdp->wb.status_error0) & + (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) { + /** + * Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += ICE_RXQ_SCAN_INTERVAL; + rxdp += ICE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +#define ICE_RX_FLEX_ERR0_BITS \ + ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S)) + +/* Rx L3/L4 checksum */ +static inline uint64_t +ice_rxd_error_to_pkt_flags(uint16_t stat_err0) +{ + uint64_t flags = 0; + + /* check if HW has decoded the packet and checksum */ + if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))) + return 0; + + if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) { + flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) + flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + + if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) + flags |= PKT_RX_EIP_CKSUM_BAD; + + return flags; +} + +static inline void +ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) +{ + if (rte_le_to_cpu_16(rxdp->wb.status_error0) & + (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.l2tag1); + PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", + rte_le_to_cpu_16(rxdp->wb.l2tag1)); + } else { + mb->vlan_tci = 0; + } + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + if (rte_le_to_cpu_16(rxdp->wb.status_error1) & + (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { + mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | + PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), + rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); + } else { + mb->vlan_tci_outer = 0; + } +#endif + PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", + mb->vlan_tci, mb->vlan_tci_outer); +} + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC +#define ICE_RX_PROTO_XTR_VALID \ + ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \ + (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) + +static void +ice_rxd_to_proto_xtr(struct rte_mbuf *mb, + volatile struct ice_32b_rx_flex_desc_comms *desc) +{ + uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1); + uint32_t metadata; + uint64_t ol_flag; + + if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID))) + return; + + ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid); + if (unlikely(!ol_flag)) + return; + + mb->ol_flags |= ol_flag; + + metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ? + rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0; + + if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))) + metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + + *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; +} +#endif + +static inline void +ice_rxd_to_pkt_fields(struct rte_mbuf *mb, + volatile union ice_rx_flex_desc *rxdp) +{ + volatile struct ice_32b_rx_flex_desc_comms *desc = + (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; + uint16_t stat_err; + + stat_err = rte_le_to_cpu_16(desc->status_error0); + if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { + mb->ol_flags |= PKT_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); + } + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } + + if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail())) + ice_rxd_to_proto_xtr(mb, desc); +#endif +} + +#define ICE_LOOK_AHEAD 8 +#if (ICE_LOOK_AHEAD != 8) +#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" +#endif +static inline int +ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +{ + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t stat_err0; + uint16_t pkt_len; + int32_t s[ICE_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags = 0; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Make sure there is at least 1 packet to receive */ + if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /** + * Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD, + rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) + s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + mb = rxep[j].mbuf; + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); + pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); + mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; + ice_rxd_to_vlan_tci(mb, &rxdp[j]); + ice_rxd_to_pkt_fields(mb, &rxdp[j]); + + mb->ol_flags |= pkt_flags; + } + + for (j = 0; j < ICE_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j].mbuf; + + if (nb_dd != ICE_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + + PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: " + "port_id=%u, queue_id=%u, nb_rx=%d", + rxq->port_id, rxq->queue_id, nb_rx); + + return nb_rx; +} + +static inline uint16_t +ice_rx_fill_from_stage(struct ice_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + for (i = 0; i < nb_pkts; i++) + rx_pkts[i] = stage[i]; + + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline int +ice_rx_alloc_bufs(struct ice_rx_queue *rxq) +{ + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx, i; + uint64_t dma_addr; + int diag; + + /* Allocate buffers in bulk */ + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); + rxep = &rxq->sw_ring[alloc_idx]; + diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) { + PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); + return -ENOMEM; + } + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1].mbuf); + + mb = rxep[i].mbuf; + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update rx tail regsiter */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); + + rxq->rx_free_trigger = + (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + return 0; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue; + uint16_t nb_rx = 0; + struct rte_eth_dev *dev; + + if (!nb_pkts) + return 0; + + if (rxq->rx_nb_avail) + return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + if (rxq->rx_tail > rxq->rx_free_trigger) { + if (ice_rx_alloc_bufs(rxq) != 0) { + uint16_t i, j; + + dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for " + "port_id=%u, queue_id=%u", + rxq->port_id, rxq->queue_id); + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) + rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; + + return 0; + } + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + if (rxq->rx_nb_avail) + return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +static uint16_t +ice_recv_pkts_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx = 0; + uint16_t n; + uint16_t count; + + if (unlikely(nb_pkts == 0)) + return nb_rx; + + if (likely(nb_pkts <= ICE_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + while (nb_pkts) { + n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST); + count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + count); + nb_pkts = (uint16_t)(nb_pkts - count); + if (count < n) + break; + } + + return nb_rx; +} + +static uint16_t +ice_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; + volatile union ice_rx_flex_desc *rxdp; + union ice_rx_flex_desc rxd; + struct ice_rx_entry *sw_ring = rxq->sw_ring; + struct ice_rx_entry *rxe; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + struct rte_mbuf *nmb; /* new allocated mbuf */ + struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = 0; + uint16_t rx_packet_len; + uint16_t rx_stat_err0; + uint64_t dma_addr; + uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct rte_eth_dev *dev; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Check the DD bit first */ + if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + break; + + /* allocate mbuf */ + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed++; + break; + } + rxd = *rxdp; /* copy descriptor in ring to temp variable*/ + + nb_hold++; + rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(sw_ring[rx_id].mbuf); + + /** + * When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + + /* Set data buffer address and data length of the mbuf */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /** + * If this is the first buffer of the received packet, set the + * pointer to the first mbuf of the packet and initialize its + * context. Otherwise, update the total length and the number + * of segments of the current scattered packet, and update the + * pointer to the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = rxm; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rx_packet_len); + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /** + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) { + last_seg = rxm; + continue; + } + + /** + * This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (rx_packet_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - + (RTE_ETHER_CRC_LEN - rx_packet_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t)(rx_packet_len - + RTE_ETHER_CRC_LEN); + } + + first_seg->port = rxq->port_id; + first_seg->ol_flags = 0; + first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; + ice_rxd_to_vlan_tci(first_seg, &rxd); + ice_rxd_to_pkt_fields(first_seg, &rxd); + pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + first_seg->ol_flags |= pkt_flags; + /* Prefetch data of first segment, if configured to do so. */ + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + } + + /* Record index of the next RX descriptor to probe. */ + rxq->rx_tail = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. Update the RDT with the value of the last processed RX + * descriptor minus 1, to guarantee that the RDT register is never + * equal to the RDH register, which creates a "full" ring situtation + * from the hardware point of view. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t)(rx_id == 0 ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + /* write TAIL register */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + /* return received packet in the burst */ + return nb_rx; +} + +const uint32_t * +ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + const uint32_t *ptypes; + + static const uint32_t ptypes_os[] = { + /* refers to ice_get_default_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + static const uint32_t ptypes_comms[] = { + /* refers to ice_get_default_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_TUNNEL_GTPC, + RTE_PTYPE_TUNNEL_GTPU, + RTE_PTYPE_L2_ETHER_PPPOE, + RTE_PTYPE_UNKNOWN + }; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + ptypes = ptypes_comms; + else + ptypes = ptypes_os; + + if (dev->rx_pkt_burst == ice_recv_pkts || + dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || + dev->rx_pkt_burst == ice_recv_scattered_pkts) + return ptypes; + +#ifdef RTE_ARCH_X86 + if (dev->rx_pkt_burst == ice_recv_pkts_vec || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2) + return ptypes; +#endif + + return NULL; +} + +int +ice_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + if (rte_le_to_cpu_16(rxdp->wb.status_error0) & + (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +ice_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct ice_tx_queue *txq = tx_queue; + volatile uint64_t *status; + uint64_t mask, expect; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].cmd_type_offset_bsz; + mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M); + expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE << + ICE_TXD_QW1_DTYPE_S); + if ((*status & mask) == expect) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +void +ice_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!dev->data->rx_queues[i]) + continue; + ice_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (!dev->data->tx_queues[i]) + continue; + ice_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +#define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC +#define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC + +int +ice_fdir_setup_tx_resources(struct ice_pf *pf) +{ + struct ice_tx_queue *txq; + const struct rte_memzone *tz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return -EINVAL; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("ice fdir tx queue", + sizeof(struct ice_tx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure."); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + + tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", + ICE_FDIR_QUEUE_ID, ring_size, + ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!tz) { + ice_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); + return -ENOMEM; + } + + txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; + txq->queue_id = ICE_FDIR_QUEUE_ID; + txq->reg_idx = pf->fdir.fdir_vsi->base_queue; + txq->vsi = pf->fdir.fdir_vsi; + + txq->tx_ring_dma = tz->iova; + txq->tx_ring = (struct ice_tx_desc *)tz->addr; + /* + * don't need to allocate software ring and reset for the fdir + * program queue just set the queue has been configured. + */ + txq->q_set = true; + pf->fdir.txq = txq; + + txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; + + return ICE_SUCCESS; +} + +int +ice_fdir_setup_rx_resources(struct ice_pf *pf) +{ + struct ice_rx_queue *rxq; + const struct rte_memzone *rz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return -EINVAL; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the RX queue data structure. */ + rxq = rte_zmalloc_socket("ice fdir rx queue", + sizeof(struct ice_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue structure."); + return -ENOMEM; + } + + /* Allocate RX hardware ring descriptors. */ + ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + + rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", + ICE_FDIR_QUEUE_ID, ring_size, + ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!rz) { + ice_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); + return -ENOMEM; + } + + rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; + rxq->queue_id = ICE_FDIR_QUEUE_ID; + rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; + rxq->vsi = pf->fdir.fdir_vsi; + + rxq->rx_ring_dma = rz->iova; + memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * + sizeof(union ice_32byte_rx_desc)); + rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; + + /* + * Don't need to allocate software ring and reset for the fdir + * rx queue, just set the queue has been configured. + */ + rxq->q_set = true; + pf->fdir.rxq = rxq; + + rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; + + return ICE_SUCCESS; +} + +uint16_t +ice_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; + volatile union ice_rx_flex_desc *rxdp; + union ice_rx_flex_desc rxd; + struct ice_rx_entry *sw_ring = rxq->sw_ring; + struct ice_rx_entry *rxe; + struct rte_mbuf *nmb; /* new allocated mbuf */ + struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = 0; + uint16_t rx_packet_len; + uint16_t rx_stat_err0; + uint64_t dma_addr; + uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + struct rte_eth_dev *dev; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); + + /* Check the DD bit first */ + if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + break; + + /* allocate mbuf */ + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = ICE_VSI_TO_ETH_DEV(rxq->vsi); + dev->data->rx_mbuf_alloc_failed++; + break; + } + rxd = *rxdp; /* copy descriptor in ring to temp variable*/ + + nb_hold++; + rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + + /** + * fill the read format of descriptor with physic address in + * new allocated mbuf: nmb + */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + /* calculate rx_packet_len of the received pkt */ + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + + /* fill old mbuf with received descriptor: rxd */ + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; + ice_rxd_to_vlan_tci(rxm, &rxd); + ice_rxd_to_pkt_fields(rxm, &rxd); + pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); + rxm->ol_flags |= pkt_flags; + /* copy old mbuf to rx_pkts */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the receive tail register of queue. + * Update that register with the value of the last processed RX + * descriptor minus 1. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t)(rx_id == 0 ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + /* write TAIL register */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + /* return received packet in the burst */ + return nb_rx; +} + +static inline void +ice_parse_tunneling_params(uint64_t ol_flags, + union ice_tx_offload tx_offload, + uint32_t *cd_tunneling) +{ + /* EIPT: External (outer) IP header type */ + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; + else if (ol_flags & PKT_TX_OUTER_IPV4) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; + else if (ol_flags & PKT_TX_OUTER_IPV6) + *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; + + /* EIPLEN: External (outer) IP header length, in DWords */ + *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << + ICE_TXD_CTX_QW0_EIPLEN_S; + + /* L4TUNT: L4 Tunneling Type */ + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + /* for non UDP / GRE tunneling, set to 00b */ + break; + case PKT_TX_TUNNEL_VXLAN: + case PKT_TX_TUNNEL_GTP: + case PKT_TX_TUNNEL_GENEVE: + *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; + break; + case PKT_TX_TUNNEL_GRE: + *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; + break; + default: + PMD_TX_LOG(ERR, "Tunnel type not supported"); + return; + } + + /* L4TUNLEN: L4 Tunneling Length, in Words + * + * We depend on app to set rte_mbuf.l2_len correctly. + * For IP in GRE it should be set to the length of the GRE + * header; + * For MAC in GRE or MAC in UDP it should be set to the length + * of the GRE or UDP headers plus the inner MAC up to including + * its last Ethertype. + * If MPLS labels exists, it should include them as well. + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + ICE_TXD_CTX_QW0_NATLEN_S; + + if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) && + (ol_flags & PKT_TX_OUTER_IP_CKSUM) && + (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; +} + +static inline void +ice_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union ice_tx_offload tx_offload) +{ + /* Set MACLEN */ + if (ol_flags & PKT_TX_TUNNEL_MASK) + *td_offset |= (tx_offload.outer_l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; + else + *td_offset |= (tx_offload.l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; + + /* Enable L3 checksum offloads */ + if (ol_flags & PKT_TX_IP_CKSUM) { + *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & PKT_TX_IPV4) { + *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & PKT_TX_IPV6) { + *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } + + if (ol_flags & PKT_TX_TCP_SEG) { + *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case PKT_TX_SCTP_CKSUM: + *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case PKT_TX_UDP_CKSUM: + *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + default: + break; + } +} + +static inline int +ice_xmit_cleanup(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *sw_ring = txq->sw_ring; + volatile struct ice_tx_desc *txd = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) { + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d) value=0x%"PRIx64"\n", + desc_to_clean_to, + txq->port_id, txq->queue_id, + txd[desc_to_clean_to].cmd_type_offset_bsz); + /* Failed to clean any descriptors */ + return -1; + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + /* The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txd[desc_to_clean_to].cmd_type_offset_bsz = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + return 0; +} + +/* Construct the tx flags */ +static inline uint64_t +ice_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + uint16_t size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | + ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | + ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) | + ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); +} + +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +ice_calc_context_desc(uint64_t flags) +{ + static uint64_t mask = PKT_TX_TCP_SEG | + PKT_TX_QINQ | + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_MASK; + + return (flags & mask) ? 1 : 0; +} + +/* set ice TSO context descriptor */ +static inline uint64_t +ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ? + tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; + + cd_cmd = ICE_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) | + ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | + ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S); + + return ctx_desc; +} + +/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */ +#define ICE_MAX_DATA_PER_TXD \ + (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S) +/* Calculate the number of TX descriptors needed for each pkt */ +static inline uint16_t +ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) +{ + struct rte_mbuf *txd = tx_pkt; + uint16_t count = 0; + + while (txd != NULL) { + count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD); + txd = txd->next; + } + + return count; +} + +uint16_t +ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct ice_tx_queue *txq; + volatile struct ice_tx_desc *tx_ring; + volatile struct ice_tx_desc *txd; + struct ice_tx_entry *sw_ring; + struct ice_tx_entry *txe, *txn; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint32_t cd_tunneling_params; + uint16_t tx_id; + uint16_t nb_tx; + uint16_t nb_used; + uint16_t nb_ctx; + uint32_t td_cmd = 0; + uint32_t td_offset = 0; + uint32_t td_tag = 0; + uint16_t tx_last; + uint16_t slen; + uint64_t buf_dma_addr; + uint64_t ol_flags; + union ice_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + tx_ring = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Check if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + (void)ice_xmit_cleanup(txq); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + + td_cmd = 0; + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = ice_calc_context_desc(ol_flags); + + /* The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus the number of context descriptor if needed. + * Recalculate the needed tx descs when TSO enabled in case + * the mbuf data size exceeds max data size that hw allows + * per tx desc. + */ + if (ol_flags & PKT_TX_TCP_SEG) + nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) + + nb_ctx); + else + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + if (nb_used > txq->nb_tx_free) { + if (ice_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + if (unlikely(nb_used > txq->tx_rs_thresh)) { + while (nb_used > txq->nb_tx_free) { + if (ice_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* Descriptor based VLAN insertion */ + if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) { + td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; + td_tag = tx_pkt->vlan_tci; + } + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; + if (ol_flags & PKT_TX_TUNNEL_MASK) + ice_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); + + /* Enable checksum offloading */ + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) { + ice_txd_enable_checksum(ol_flags, &td_cmd, + &td_offset, tx_offload); + } + + if (nb_ctx) { + /* Setup TX context descriptor if required */ + volatile struct ice_tx_ctx_desc *ctx_txd = + (volatile struct ice_tx_ctx_desc *) + &tx_ring[tx_id]; + uint16_t cd_l2tag2 = 0; + uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + if (ol_flags & PKT_TX_TCP_SEG) + cd_type_cmd_tso_mss |= + ice_set_tso_ctx(tx_pkt, tx_offload); + + ctx_txd->tunneling_params = + rte_cpu_to_le_32(cd_tunneling_params); + + /* TX context descriptor based double VLAN insert */ + if (ol_flags & PKT_TX_QINQ) { + cd_l2tag2 = tx_pkt->vlan_tci_outer; + cd_type_cmd_tso_mss |= + ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 << + ICE_TXD_CTX_QW1_CMD_S); + } + ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); + ctx_txd->qw1 = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + m_seg = tx_pkt; + + do { + txd = &tx_ring[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Setup TX Descriptor */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + + while ((ol_flags & PKT_TX_TCP_SEG) && + unlikely(slen > ICE_MAX_DATA_PER_TXD)) { + txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | + ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | + ((uint64_t)ICE_MAX_DATA_PER_TXD << + ICE_TXD_QW1_TX_BUF_SZ_S) | + ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); + + buf_dma_addr += ICE_MAX_DATA_PER_TXD; + slen -= ICE_MAX_DATA_PER_TXD; + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + txd = &tx_ring[tx_id]; + txn = &sw_ring[txe->next_id]; + } + + txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | + ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | + ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) | + ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg); + + /* fill the last descriptor with End of Packet (EOP) bit */ + td_cmd |= ICE_TX_DESC_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + /* set RS bit on the last descriptor of one packet */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + td_cmd |= ICE_TX_DESC_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + txd->cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)td_cmd) << + ICE_TXD_QW1_CMD_S); + } +end_of_tx: + /* update Tail register */ + ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +static __rte_always_inline int +ice_tx_free_bufs(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txep; + uint16_t i; + + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]; + + for (i = 0; i < txq->tx_rs_thresh; i++) + rte_prefetch0((txep + i)->mbuf); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_mempool_put(txep->mbuf->pool, txep->mbuf); + txep->mbuf = NULL; + } + } else { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_pktmbuf_free_seg(txep->mbuf); + txep->mbuf = NULL; + } + } + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static int +ice_tx_done_cleanup_full(struct ice_tx_queue *txq, + uint32_t free_cnt) +{ + struct ice_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (ice_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +#ifdef RTE_ARCH_X86 +static int +ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} +#endif + +static int +ice_tx_done_cleanup_simple(struct ice_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = ice_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +int +ice_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + struct ice_tx_queue *q = (struct ice_tx_queue *)txq; + struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + +#ifdef RTE_ARCH_X86 + if (ad->tx_vec_allowed) + return ice_tx_done_cleanup_vec(q, free_cnt); +#endif + if (ad->tx_simple_allowed) + return ice_tx_done_cleanup_simple(q, free_cnt); + else + return ice_tx_done_cleanup_full(q, free_cnt); +} + +/* Populate 4 descriptors with data from 4 mbufs */ +static inline void +tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + uint32_t i; + + for (i = 0; i < 4; i++, txdp++, pkts++) { + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->buf_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + ice_build_ctob((uint32_t)ICE_TD_CMD, 0, + (*pkts)->data_len, 0); + } +} + +/* Populate 1 descriptor with data from 1 mbuf */ +static inline void +tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + + dma_addr = rte_mbuf_data_iova(*pkts); + txdp->buf_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + ice_build_ctob((uint32_t)ICE_TD_CMD, 0, + (*pkts)->data_len, 0); +} + +static inline void +ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ + volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail]; + struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail]; + const int N_PER_LOOP = 4; + const int N_PER_LOOP_MASK = N_PER_LOOP - 1; + int mainpart, leftover; + int i, j; + + /** + * Process most of the packets in chunks of N pkts. Any + * leftover packets will get processed one at a time. + */ + mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK); + leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK); + for (i = 0; i < mainpart; i += N_PER_LOOP) { + /* Copy N mbuf pointers to the S/W ring */ + for (j = 0; j < N_PER_LOOP; ++j) + (txep + i + j)->mbuf = *(pkts + i + j); + tx4(txdp + i, pkts + i); + } + + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; ++i) { + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); + tx1(txdp + mainpart + i, pkts + mainpart + i); + } + } +} + +static inline uint16_t +tx_xmit_pkts(struct ice_tx_queue *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + volatile struct ice_tx_desc *txr = txq->tx_ring; + uint16_t n = 0; + + /** + * Begin scanning the H/W ring for done descriptors when the number + * of available descriptors drops below tx_free_thresh. For each done + * descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + /* Use available descriptor only */ + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(!nb_pkts)) + return 0; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); + ice_tx_fill_hw_ring(txq, tx_pkts, n); + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_tail = 0; + } + + /* Fill hardware descriptor ring with mbuf data */ + ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); + + /* Determin if RS bit needs to be set */ + if (txq->tx_tail > txq->tx_next_rs) { + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + if (txq->tx_next_rs >= txq->nb_tx_desc) + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + } + + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + /* Update the tx tail register */ + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +static uint16_t +ice_xmit_pkts_simple(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + + if (likely(nb_pkts <= ICE_TX_MAX_BURST)) + return tx_xmit_pkts((struct ice_tx_queue *)tx_queue, + tx_pkts, nb_pkts); + + while (nb_pkts) { + uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, + ICE_TX_MAX_BURST); + + ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue, + &tx_pkts[nb_tx], num); + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < num) + break; + } + + return nb_tx; +} + +void __rte_cold +ice_set_rx_function(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); +#ifdef RTE_ARCH_X86 + struct ice_rx_queue *rxq; + int i; + bool use_avx2 = false; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) { + ad->rx_vec_allowed = true; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq && ice_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + + } else { + ad->rx_vec_allowed = false; + } + } + + if (ad->rx_vec_allowed) { + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, + "Using %sVector Scattered Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->rx_pkt_burst = use_avx2 ? + ice_recv_scattered_pkts_vec_avx2 : + ice_recv_scattered_pkts_vec; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->rx_pkt_burst = use_avx2 ? + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; + } + return; + } + +#endif + + if (dev->data->scattered_rx) { + /* Set the non-LRO scattered function */ + PMD_INIT_LOG(DEBUG, + "Using a Scattered function on port %d.", + dev->data->port_id); + dev->rx_pkt_burst = ice_recv_scattered_pkts; + } else if (ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port %d.", + dev->data->port_id); + dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, Normal Rx will be used on port %d.", + dev->data->port_id); + dev->rx_pkt_burst = ice_recv_pkts; + } +} + +static const struct { + eth_rx_burst_t pkt_burst; + const char *info; +} ice_rx_burst_infos[] = { + { ice_recv_scattered_pkts, "Scalar Scattered" }, + { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, + { ice_recv_pkts, "Scalar" }, +#ifdef RTE_ARCH_X86 + { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, + { ice_recv_pkts_vec_avx2, "Vector AVX2" }, + { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, + { ice_recv_pkts_vec, "Vector SSE" }, +#endif +}; + +int +ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) { + if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + ice_rx_burst_infos[i].info); + ret = 0; + break; + } + } + + return ret; +} + +void __rte_cold +ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= ICE_TX_MAX_BURST); + + if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Simple Tx can NOT be enabled on Tx queue %u.", + txq->queue_id); +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +/* The default values of TSO MSS */ +#define ICE_MIN_TSO_MSS 64 +#define ICE_MAX_TSO_MSS 9728 +#define ICE_MAX_TSO_FRAME_SIZE 262144 +uint16_t +ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + if (ol_flags & PKT_TX_TCP_SEG && + (m->tso_segsz < ICE_MIN_TSO_MSS || + m->tso_segsz > ICE_MAX_TSO_MSS || + m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { + /** + * MSS outside the range are considered malicious + */ + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + return i; +} + +void __rte_cold +ice_set_tx_function(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); +#ifdef RTE_ARCH_X86 + struct ice_tx_queue *txq; + int i; + bool use_avx2 = false; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (!ice_tx_vec_dev_check(dev)) { + ad->tx_vec_allowed = true; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq && ice_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) + use_avx2 = true; + + } else { + ad->tx_vec_allowed = false; + } + } + + if (ad->tx_vec_allowed) { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + ice_xmit_pkts_vec_avx2 : + ice_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + + return; + } +#endif + + if (ad->tx_simple_allowed) { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = ice_xmit_pkts_simple; + dev->tx_pkt_prepare = NULL; + } else { + PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); + dev->tx_pkt_burst = ice_xmit_pkts; + dev->tx_pkt_prepare = ice_prep_pkts; + } +} + +static const struct { + eth_tx_burst_t pkt_burst; + const char *info; +} ice_tx_burst_infos[] = { + { ice_xmit_pkts_simple, "Scalar Simple" }, + { ice_xmit_pkts, "Scalar" }, +#ifdef RTE_ARCH_X86 + { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, + { ice_xmit_pkts_vec, "Vector SSE" }, +#endif +}; + +int +ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + int ret = -EINVAL; + unsigned int i; + + for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) { + if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", + ice_tx_burst_infos[i].info); + ret = 0; + break; + } + } + + return ret; +} + +/* For each value it means, datasheet of hardware can tell more details + * + * @note: fix ice_dev_supported_ptypes_get() if any change here. + */ +static inline uint32_t +ice_get_default_pkt_type(uint16_t ptype) +{ + static const uint32_t type_table[ICE_MAX_PKT_TYPE] + __rte_cache_aligned = { + /* L2 types */ + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, + /* [3] - [5] reserved */ + [6] = RTE_PTYPE_L2_ETHER_LLDP, + /* [7] - [10] reserved */ + [11] = RTE_PTYPE_L2_ETHER_ARP, + /* [12] - [21] reserved */ + + /* Non tunneled IPv4 */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv4 --> IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* [73] - [87] reserved */ + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* [139] - [299] reserved */ + + /* PPPoE */ + [300] = RTE_PTYPE_L2_ETHER_PPPOE, + [301] = RTE_PTYPE_L2_ETHER_PPPOE, + + /* PPPoE --> IPv4 */ + [302] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [303] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [304] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [305] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [306] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [307] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* PPPoE --> IPv6 */ + [308] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [309] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [310] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [311] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [312] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [313] = RTE_PTYPE_L2_ETHER_PPPOE | + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + /* [314] - [324] reserved */ + + /* IPv4/IPv6 --> GTPC/GTPU */ + [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPC, + [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU, + [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU, + + /* IPv4 --> GTPU --> IPv4 */ + [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GTPU --> IPv4 */ + [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GTPU --> IPv6 */ + [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GTPU --> IPv6 */ + [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GTPU | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + /* All others reserved */ + }; + + return type_table[ptype]; +} + +void __rte_cold +ice_set_default_ptype_table(struct rte_eth_dev *dev) +{ + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + for (i = 0; i < ICE_MAX_PKT_TYPE; i++) + ad->ptype_tbl[i] = ice_get_default_pkt_type(i); +} + +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1 +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \ + (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S) +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0 +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1 + +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4 +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \ + (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S) +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5 +#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \ + (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S) + +/* + * check the programming status descriptor in rx queue. + * done after Programming Flow Director is programmed on + * tx queue + */ +static inline int +ice_check_fdir_programming_status(struct ice_rx_queue *rxq) +{ + volatile union ice_32byte_rx_desc *rxdp; + uint64_t qword1; + uint32_t rx_status; + uint32_t error; + uint32_t id; + int ret = -EAGAIN; + + rxdp = (volatile union ice_32byte_rx_desc *) + (&rxq->rx_ring[rxq->rx_tail]); + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) + >> ICE_RXD_QW1_STATUS_S; + + if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { + ret = 0; + error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >> + ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S; + id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >> + ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S; + if (error) { + if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD) + PMD_DRV_LOG(ERR, "Failed to add FDIR rule."); + else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL) + PMD_DRV_LOG(ERR, "Failed to remove FDIR rule."); + ret = -EINVAL; + goto err; + } + error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >> + ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S; + if (error) { + PMD_DRV_LOG(ERR, "Failed to create FDIR profile."); + ret = -EINVAL; + } +err: + rxdp->wb.qword1.status_error_len = 0; + rxq->rx_tail++; + if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) + rxq->rx_tail = 0; + if (rxq->rx_tail == 0) + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + else + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); + } + + return ret; +} + +#define ICE_FDIR_MAX_WAIT_US 10000 + +int +ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) +{ + struct ice_tx_queue *txq = pf->fdir.txq; + struct ice_rx_queue *rxq = pf->fdir.rxq; + volatile struct ice_fltr_desc *fdirdp; + volatile struct ice_tx_desc *txdp; + uint32_t td_cmd; + uint16_t i; + + fdirdp = (volatile struct ice_fltr_desc *) + (&txq->tx_ring[txq->tx_tail]); + fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; + fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; + + txdp = &txq->tx_ring[txq->tx_tail + 1]; + txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = ICE_TX_DESC_CMD_EOP | + ICE_TX_DESC_CMD_RS | + ICE_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + break; + rte_delay_us(1); + } + if (i >= ICE_FDIR_MAX_WAIT_US) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: time out to get DD on tx queue."); + return -ETIMEDOUT; + } + + for (; i < ICE_FDIR_MAX_WAIT_US; i++) { + int ret; + + ret = ice_check_fdir_programming_status(rxq); + if (ret == -EAGAIN) + rte_delay_us(1); + else + return ret; + } + + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: programming status reported."); + return -ETIMEDOUT; + + +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_rxtx.h b/src/spdk/dpdk/drivers/net/ice/ice_rxtx.h new file mode 100644 index 000000000..2fdcfb7d0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_rxtx.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _ICE_RXTX_H_ +#define _ICE_RXTX_H_ + +#include "ice_ethdev.h" + +#define ICE_ALIGN_RING_DESC 32 +#define ICE_MIN_RING_DESC 64 +#define ICE_MAX_RING_DESC 4096 +#define ICE_DMA_MEM_ALIGN 4096 +#define ICE_RING_BASE_ALIGN 128 + +#define ICE_RX_MAX_BURST 32 +#define ICE_TX_MAX_BURST 32 + +#define ICE_CHK_Q_ENA_COUNT 100 +#define ICE_CHK_Q_ENA_INTERVAL_US 100 + +#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC +#define ice_rx_flex_desc ice_16b_rx_flex_desc +#else +#define ice_rx_flex_desc ice_32b_rx_flex_desc +#endif + +#define ICE_SUPPORT_CHAIN_NUM 5 + +#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP + +#define ICE_VPMD_RX_BURST 32 +#define ICE_VPMD_TX_BURST 32 +#define ICE_RXQ_REARM_THRESH 32 +#define ICE_MAX_RX_BURST ICE_RXQ_REARM_THRESH +#define ICE_TX_MAX_FREE_BUF_SZ 64 +#define ICE_DESCS_PER_LOOP 4 + +#define ICE_FDIR_PKT_LEN 512 + +typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); +typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); + +struct ice_rx_entry { + struct rte_mbuf *mbuf; +}; + +struct ice_rx_queue { + struct rte_mempool *mp; /* mbuf pool to populate RX ring */ + volatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */ + rte_iova_t rx_ring_dma; /* RX ring DMA address */ + struct ice_rx_entry *sw_ring; /* address of RX soft ring */ + uint16_t nb_rx_desc; /* number of RX descriptors */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t rx_tail; /* current value of tail */ + uint16_t nb_rx_hold; /* number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */ + uint16_t rx_nb_avail; /**< number of staged packets ready */ + uint16_t rx_next_avail; /**< index of next staged packets */ + uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ + struct rte_mbuf fake_mbuf; /**< dummy mbuf */ + struct rte_mbuf *rx_stage[ICE_RX_MAX_BURST * 2]; + + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + + uint8_t port_id; /* device port ID */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint16_t queue_id; /* RX queue index */ + uint16_t reg_idx; /* RX queue register index */ + uint8_t drop_en; /* if not 0, set register bit */ + volatile uint8_t *qrx_tail; /* register address of tail */ + struct ice_vsi *vsi; /* the VSI this queue belongs to */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + bool q_set; /* indicate if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ + uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */ + ice_rx_release_mbufs_t rx_rel_mbufs; +}; + +struct ice_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +struct ice_tx_queue { + uint16_t nb_tx_desc; /* number of TX descriptors */ + rte_iova_t tx_ring_dma; /* TX ring DMA address */ + volatile struct ice_tx_desc *tx_ring; /* TX ring virtual address */ + struct ice_tx_entry *sw_ring; /* virtual address of SW ring */ + uint16_t tx_tail; /* current value of tail register */ + volatile uint8_t *qtx_tail; /* register address of tail */ + uint16_t nb_tx_used; /* number of TX desc used since RS bit set */ + /* index to last TX descriptor to have been cleaned */ + uint16_t last_desc_cleaned; + /* Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + /* Start freeing TX buffers if there are less free descriptors than + * this value. + */ + uint16_t tx_free_thresh; + /* Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ + uint8_t port_id; /* Device port identifier. */ + uint16_t queue_id; /* TX queue index. */ + uint32_t q_teid; /* TX schedule node id. */ + uint16_t reg_idx; + uint64_t offloads; + struct ice_vsi *vsi; /* the VSI this queue belongs to */ + uint16_t tx_next_dd; + uint16_t tx_next_rs; + bool tx_deferred_start; /* don't start this queue in dev start */ + bool q_set; /* indicate if tx queue has been configured */ + ice_tx_release_mbufs_t tx_rel_mbufs; +}; + +/* Offload features */ +union ice_tx_offload { + uint64_t data; + struct { + uint64_t l2_len:7; /* L2 (MAC) Header Length. */ + uint64_t l3_len:9; /* L3 (IP) Header Length. */ + uint64_t l4_len:8; /* L4 Header Length. */ + uint64_t tso_segsz:16; /* TCP TSO segment size */ + uint64_t outer_l2_len:8; /* outer L2 Header Length */ + uint64_t outer_l3_len:16; /* outer L3 Header Length */ + }; +}; + +int ice_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int ice_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void ice_rx_queue_release(void *rxq); +void ice_tx_queue_release(void *txq); +void ice_free_queues(struct rte_eth_dev *dev); +int ice_fdir_setup_tx_resources(struct ice_pf *pf); +int ice_fdir_setup_rx_resources(struct ice_pf *pf); +uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void ice_set_rx_function(struct rte_eth_dev *dev); +uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void ice_set_tx_function_flag(struct rte_eth_dev *dev, + struct ice_tx_queue *txq); +void ice_set_tx_function(struct rte_eth_dev *dev); +uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +int ice_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int ice_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int ice_rx_descriptor_status(void *rx_queue, uint16_t offset); +int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); +void ice_set_default_ptype_table(struct rte_eth_dev *dev); +const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev); + +int ice_rx_vec_dev_check(struct rte_eth_dev *dev); +int ice_tx_vec_dev_check(struct rte_eth_dev *dev); +int ice_rxq_vec_setup(struct ice_rx_queue *rxq); +int ice_txq_vec_setup(struct ice_tx_queue *txq); +uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); +int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); + +#endif /* _ICE_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c new file mode 100644 index 000000000..be50677c2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + ICE_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + struct rte_mbuf *mb0, *mb1; + __m128i dma_addr0, dma_addr1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } +#else + struct rte_mbuf *mb0, *mb1, *mb2, *mb3; + __m256i dma_addr0_1, dma_addr2_3; + __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 4 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH; + i += 4, rxep += 4, rxdp += 4) { + __m128i vaddr0, vaddr1, vaddr2, vaddr3; + __m256i vaddr0_1, vaddr2_3; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + mb2 = rxep[2].mbuf; + mb3 = rxep[3].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); + vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); + + /** + * merge 0 & 1, by casting 0 to 256-bit and inserting 1 + * into the high lanes. Similarly for 2 & 3 + */ + vaddr0_1 = + _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), + vaddr1, 1); + vaddr2_3 = + _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), + vaddr3, 1); + + /* convert pa to dma_addr hdr/data */ + dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); + dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); + + /* add headroom to pa values */ + dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); + dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); + + /* flush desc with pa dma_addr */ + _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); + _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); + } + +#endif + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline uint16_t +_ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ +#define ICE_DESCS_PER_LOOP_AVX 8 + + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + const int avx_aligned = ((rxq->rx_tail & 1) == 0); + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m256i crc_adjust = + _mm256_set_epi16 + (/* first descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0, /* ignore pkt_type field */ + /* second descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + ICE_RX_DESC_STATUS_EOF_S); + + /* mask to shuffle from desc. to mbuf (2 descriptors)*/ + const __m256i shuf_msk = + _mm256_set_epi8 + (/* first descriptor */ + 15, 14, + 13, 12, /* octet 12~15, 32 bits rss */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /*pkt_type set as unknown */ + /* second descriptor */ + 15, 14, + 13, 12, /* octet 12~15, 32 bits rss */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */ + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += ICE_DESCS_PER_LOOP_AVX, + rxdp += ICE_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /* for AVX we need alignment otherwise loads are not atomic */ + if (avx_aligned) { + /* load in descriptors, 2 at a time, in reverse order */ + raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); + rte_compiler_barrier(); + raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); + rte_compiler_barrier(); + raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); + rte_compiler_barrier(); + raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); + } else +#endif + { + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + } + + if (split_packet) { + int j; + + for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 4-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m256i mb6_7 = _mm256_shuffle_epi8(raw_desc6_7, shuf_msk); + __m256i mb4_5 = _mm256_shuffle_epi8(raw_desc4_5, shuf_msk); + + mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust); + mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust); + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m256i ptype_mask = + _mm256_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M); + const __m256i ptypes6_7 = + _mm256_and_si256(raw_desc6_7, ptype_mask); + const __m256i ptypes4_5 = + _mm256_and_si256(raw_desc4_5, ptype_mask); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4); + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0); + /* merge the status bits into one register */ + const __m256i status4_7 = _mm256_unpackhi_epi32(raw_desc6_7, + raw_desc4_5); + + /** + * convert descriptors 0-3 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m256i mb2_3 = _mm256_shuffle_epi8(raw_desc2_3, shuf_msk); + __m256i mb0_1 = _mm256_shuffle_epi8(raw_desc0_1, shuf_msk); + + mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust); + mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust); + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m256i ptypes2_3 = + _mm256_and_si256(raw_desc2_3, ptype_mask); + const __m256i ptypes0_1 = + _mm256_and_si256(raw_desc0_1, ptype_mask); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4); + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0); + /* merge the status bits into one register */ + const __m256i status0_3 = _mm256_unpackhi_epi32(raw_desc2_3, + raw_desc0_1); + + /** + * take the two sets of status bits and merge to one + * After merge, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, + status0_3); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += ICE_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != ICE_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > ICE_VPMD_RX_BURST) { + uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, ICE_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < ICE_VPMD_RX_BURST) + return retval; + } + return retval + ice_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + ice_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + ice_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + ice_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_common.h new file mode 100644 index 000000000..46e3be98a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_common.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _ICE_RXTX_VEC_COMMON_H_ +#define _ICE_RXTX_VEC_COMMON_H_ + +#include "ice_rxtx.h" + +static inline uint16_t +ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned int pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->vlan_tci = end->vlan_tci; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) { + end->data_len -= rxq->crc_len; + } else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = NULL; + end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + start = rx_bufs[buf_idx]; + end = start; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static __rte_always_inline int +ice_tx_free_bufs(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static __rte_always_inline void +ice_tx_backlog_entry(struct ice_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq) +{ + const unsigned int mask = rxq->nb_rx_desc - 1; + unsigned int i; + + if (unlikely(!rxq->sw_ring)) { + PMD_DRV_LOG(DEBUG, "sw_ring is NULL"); + return; + } + + if (rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i].mbuf) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline void +_ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) +{ + uint16_t i; + + if (unlikely(!txq || !txq->sw_ring)) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } +} + +static inline int +ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +ice_rx_vec_queue_default(struct ice_rx_queue *rxq) +{ + if (!rxq) + return -1; + + if (!rte_is_power_of_2(rxq->nb_rx_desc)) + return -1; + + if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST) + return -1; + + if (rxq->nb_rx_desc % rxq->rx_free_thresh) + return -1; + + if (rxq->proto_xtr != PROTO_XTR_NONE) + return -1; + + return 0; +} + +#define ICE_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_TCP_CKSUM) + +static inline int +ice_tx_vec_queue_default(struct ice_tx_queue *txq) +{ + if (!txq) + return -1; + + if (txq->offloads & ICE_NO_VECTOR_FLAGS) + return -1; + + if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || + txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) + return -1; + + return 0; +} + +static inline int +ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) +{ + int i; + struct ice_rx_queue *rxq; + struct ice_adapter *ad = + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* vPMD does not support flow mark. */ + if (ad->devargs.flow_mark_support) + return -1; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (ice_rx_vec_queue_default(rxq)) + return -1; + } + + return 0; +} + +static inline int +ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) +{ + int i; + struct ice_tx_queue *txq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (ice_tx_vec_queue_default(txq)) + return -1; + } + + return 0; +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c new file mode 100644 index 000000000..382ef31f3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_sse.c @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + ICE_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline void +ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4], + struct rte_mbuf **rx_pkts) +{ + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + + __m128i tmp_desc, flags, rss_vlan; + + /* mask everything except checksum, RSS and VLAN flags. + * bit6:4 for checksum. + * bit12 for RSS indication. + * bit13 for VLAN indication. + */ + const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, + 0x3070, 0x3070); + + const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD); + + /* map the checksum, rss and vlan fields to the checksum, rss + * and vlan flag + */ + const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + + const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + /* merge 4 descriptors */ + flags = _mm_unpackhi_epi32(descs[0], descs[1]); + tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]); + tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc); + tmp_desc = _mm_and_si128(flags, desc_mask); + + /* checksum flags */ + tmp_desc = _mm_srli_epi32(tmp_desc, 4); + flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); + /* then we shift left 1 bit */ + flags = _mm_slli_epi32(flags, 1); + /* we need to mask out the reduntant bits introduced by RSS or + * VLAN fields. + */ + flags = _mm_and_si128(flags, cksum_mask); + + /* RSS, VLAN flag */ + tmp_desc = _mm_srli_epi32(tmp_desc, 8); + rss_vlan = _mm_shuffle_epi8(rss_vlan_flags, tmp_desc); + + /* merge the flags */ + flags = _mm_or_si128(flags, rss_vlan); + + /** + * At this point, we have the 4 sets of flags in the low 16-bits + * of each 32-bit value in flags. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +static inline void +ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) +{ + const __m128i ptype_mask = _mm_set_epi16(0, ICE_RX_FLEX_DESC_PTYPE_M, + 0, ICE_RX_FLEX_DESC_PTYPE_M, + 0, ICE_RX_FLEX_DESC_PTYPE_M, + 0, ICE_RX_FLEX_DESC_PTYPE_M); + __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]); + __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]); + __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23); + + ptype_all = _mm_and_si128(ptype_all, ptype_mask); + + rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 1)]; + rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 3)]; + rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 5)]; + rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi16(ptype_all, 7)]; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + __m128i crc_adjust = _mm_set_epi16 + (0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + const __m128i zero = _mm_setzero_si128(); + /* mask to shuffle from desc. to mbuf */ + const __m128i shuf_msk = _mm_set_epi8 + (15, 14, 13, 12, /* octet 12~15, 32 bits rss */ + 11, 10, /* octet 10~11, 16 bits vlan_macip */ + 5, 4, /* octet 4~5, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 5, 4, /* octet 4~5, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /* pkt_type set as unknown */ + ); + const __m128i eop_shuf_mask = _mm_set_epi8(0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0xFF, 0xFF, + 0x04, 0x0C, + 0x00, 0x08); + + /** + * compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + + /* 4 packets DD mask */ + const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL, + 0x0000000100000001LL); + /* 4 packets EOP mask */ + const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL, + 0x0000000200000002LL); + + /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /** + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += ICE_DESCS_PER_LOOP, + rxdp += ICE_DESCS_PER_LOOP) { + __m128i descs[ICE_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + ice_rx_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.2 get 4 pkts staterr value */ + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += ICE_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128 + ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != ICE_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, + uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + ice_vtx1(txdp, *pkt, flags); +} + +static uint16_t +ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + nb_commit = nb_pkts; + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + ice_vtx1(txdp, *tx_pkts, flags); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +int __rte_cold +ice_rxq_vec_setup(struct ice_rx_queue *rxq) +{ + if (!rxq) + return -1; + + rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs_vec; + return ice_rxq_vec_setup_default(rxq); +} + +int __rte_cold +ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq) +{ + if (!txq) + return -1; + + txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs_vec; + return 0; +} + +int __rte_cold +ice_rx_vec_dev_check(struct rte_eth_dev *dev) +{ + return ice_rx_vec_dev_check_default(dev); +} + +int __rte_cold +ice_tx_vec_dev_check(struct rte_eth_dev *dev) +{ + return ice_tx_vec_dev_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/ice/ice_switch_filter.c b/src/spdk/dpdk/drivers/net/ice/ice_switch_filter.c new file mode 100644 index 000000000..dd3f4847a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/ice_switch_filter.c @@ -0,0 +1,1718 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "base/ice_type.h" +#include "base/ice_switch.h" +#include "ice_logs.h" +#include "ice_ethdev.h" +#include "ice_generic_flow.h" + + +#define MAX_QGRP_NUM_TYPE 7 + +#define ICE_SW_INSET_ETHER ( \ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) +#define ICE_SW_INSET_MAC_VLAN ( \ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \ + ICE_INSET_VLAN_OUTER) +#define ICE_SW_INSET_MAC_IPV4 ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) +#define ICE_SW_INSET_MAC_IPV4_TCP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ + ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV4_UDP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ + ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV6 ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \ + ICE_INSET_IPV6_NEXT_HDR) +#define ICE_SW_INSET_MAC_IPV6_TCP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ + ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT) +#define ICE_SW_INSET_MAC_IPV6_UDP ( \ + ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ + ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) +#define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ + ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS) +#define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ + ICE_INSET_TUN_IPV4_TOS) +#define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ + ICE_INSET_TUN_IPV4_TOS) +#define ICE_SW_INSET_MAC_PPPOE ( \ + ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ + ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION) +#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \ + ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ + ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \ + ICE_INSET_PPPOE_PROTO) +#define ICE_SW_INSET_MAC_IPV4_ESP ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI) +#define ICE_SW_INSET_MAC_IPV6_ESP ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI) +#define ICE_SW_INSET_MAC_IPV4_AH ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI) +#define ICE_SW_INSET_MAC_IPV6_AH ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI) +#define ICE_SW_INSET_MAC_IPV4_L2TP ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID) +#define ICE_SW_INSET_MAC_IPV6_L2TP ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID) +#define ICE_SW_INSET_MAC_IPV4_PFCP ( \ + ICE_SW_INSET_MAC_IPV4 | \ + ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_IPV6_PFCP ( \ + ICE_SW_INSET_MAC_IPV6 | \ + ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) + +struct sw_meta { + struct ice_adv_lkup_elem *list; + uint16_t lkups_num; + struct ice_adv_rule_info rule_info; +}; + +static struct ice_flow_parser ice_switch_dist_parser_os; +static struct ice_flow_parser ice_switch_dist_parser_comms; +static struct ice_flow_parser ice_switch_perm_parser; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_comms[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_ethertype_vlan, + ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_ipv4_esp, + ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_esp, + ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv6_esp, + ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_esp, + ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv4_ah, + ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE}, + {pattern_eth_ipv6_ah, + ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_ah, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_l2tp, + ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE}, + {pattern_eth_ipv6_l2tp, + ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE}, + {pattern_eth_ipv4_pfcp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_pfcp, + ICE_INSET_NONE, ICE_INSET_NONE}, +}; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_os[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_ethertype_vlan, + ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, +}; + +static struct +ice_pattern_match_item ice_switch_pattern_perm[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_ethertype_vlan, + ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoed, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes, + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, + {pattern_eth_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_proto, + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_ipv4_esp, + ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_esp, + ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv6_esp, + ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_esp, + ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE}, + {pattern_eth_ipv4_ah, + ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE}, + {pattern_eth_ipv6_ah, + ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_ah, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_l2tp, + ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE}, + {pattern_eth_ipv6_l2tp, + ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE}, + {pattern_eth_ipv4_pfcp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_pfcp, + ICE_INSET_NONE, ICE_INSET_NONE}, +}; + +static int +ice_switch_create(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + int ret = 0; + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_rule_query_data rule_added = {0}; + struct ice_rule_query_data *filter_ptr; + struct ice_adv_lkup_elem *list = + ((struct sw_meta *)meta)->list; + uint16_t lkups_cnt = + ((struct sw_meta *)meta)->lkups_num; + struct ice_adv_rule_info *rule_info = + &((struct sw_meta *)meta)->rule_info; + + if (lkups_cnt > ICE_MAX_CHAIN_WORDS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "item number too large for rule"); + goto error; + } + if (!list) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "lookup list should not be NULL"); + goto error; + } + ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added); + if (!ret) { + filter_ptr = rte_zmalloc("ice_switch_filter", + sizeof(struct ice_rule_query_data), 0); + if (!filter_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for ice_switch_filter"); + goto error; + } + flow->rule = filter_ptr; + rte_memcpy(filter_ptr, + &rule_added, + sizeof(struct ice_rule_query_data)); + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "switch filter create flow fail"); + goto error; + } + + rte_free(list); + rte_free(meta); + return 0; + +error: + rte_free(list); + rte_free(meta); + + return -rte_errno; +} + +static int +ice_switch_destroy(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_hw *hw = &ad->hw; + int ret; + struct ice_rule_query_data *filter_ptr; + + filter_ptr = (struct ice_rule_query_data *) + flow->rule; + + if (!filter_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "no such flow" + " create by switch filter"); + return -rte_errno; + } + + ret = ice_rem_adv_rule_by_id(hw, filter_ptr); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to destroy switch filter rule"); + return -rte_errno; + } + + rte_free(filter_ptr); + return ret; +} + +static void +ice_switch_filter_rule_free(struct rte_flow *flow) +{ + rte_free(flow->rule); +} + +static uint64_t +ice_switch_inset_get(const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct ice_adv_lkup_elem *list, + uint16_t *lkups_num, + enum ice_sw_tunnel_type *tun_type) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask; + const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec, + *pppoe_proto_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; + const struct rte_flow_item_ah *ah_spec, *ah_mask; + const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; + uint64_t input_set = ICE_INSET_NONE; + uint16_t j, t = 0; + bool profile_rule = 0; + bool tunnel_valid = 0; + bool pppoe_valid = 0; + bool ipv6_valiad = 0; + bool ipv4_valiad = 0; + bool udp_valiad = 0; + + for (item = pattern; item->type != + RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return 0; + } + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + if (eth_spec && eth_mask) { + const uint8_t *a = eth_mask->src.addr_bytes; + const uint8_t *b = eth_mask->dst.addr_bytes; + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (a[j] && tunnel_valid) { + input_set |= + ICE_INSET_TUN_SMAC; + break; + } else if (a[j]) { + input_set |= + ICE_INSET_SMAC; + break; + } + } + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (b[j] && tunnel_valid) { + input_set |= + ICE_INSET_TUN_DMAC; + break; + } else if (b[j]) { + input_set |= + ICE_INSET_DMAC; + break; + } + } + if (eth_mask->type) + input_set |= ICE_INSET_ETHERTYPE; + list[t].type = (tunnel_valid == 0) ? + ICE_MAC_OFOS : ICE_MAC_IL; + struct ice_ether_hdr *h; + struct ice_ether_hdr *m; + uint16_t i = 0; + h = &list[t].h_u.eth_hdr; + m = &list[t].m_u.eth_hdr; + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j]) { + h->src_addr[j] = + eth_spec->src.addr_bytes[j]; + m->src_addr[j] = + eth_mask->src.addr_bytes[j]; + i = 1; + } + if (eth_mask->dst.addr_bytes[j]) { + h->dst_addr[j] = + eth_spec->dst.addr_bytes[j]; + m->dst_addr[j] = + eth_mask->dst.addr_bytes[j]; + i = 1; + } + } + if (i) + t++; + if (eth_mask->type) { + list[t].type = ICE_ETYPE_OL; + list[t].h_u.ethertype.ethtype_id = + eth_spec->type; + list[t].m_u.ethertype.ethtype_id = + eth_mask->type; + t++; + } + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4_spec = item->spec; + ipv4_mask = item->mask; + ipv4_valiad = 1; + if (ipv4_spec && ipv4_mask) { + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return 0; + } + + if (tunnel_valid) { + if (ipv4_mask->hdr.type_of_service) + input_set |= + ICE_INSET_TUN_IPV4_TOS; + if (ipv4_mask->hdr.src_addr) + input_set |= + ICE_INSET_TUN_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr) + input_set |= + ICE_INSET_TUN_IPV4_DST; + if (ipv4_mask->hdr.time_to_live) + input_set |= + ICE_INSET_TUN_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id) + input_set |= + ICE_INSET_TUN_IPV4_PROTO; + } else { + if (ipv4_mask->hdr.src_addr) + input_set |= ICE_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr) + input_set |= ICE_INSET_IPV4_DST; + if (ipv4_mask->hdr.time_to_live) + input_set |= ICE_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id) + input_set |= + ICE_INSET_IPV4_PROTO; + if (ipv4_mask->hdr.type_of_service) + input_set |= + ICE_INSET_IPV4_TOS; + } + list[t].type = (tunnel_valid == 0) ? + ICE_IPV4_OFOS : ICE_IPV4_IL; + if (ipv4_mask->hdr.src_addr) { + list[t].h_u.ipv4_hdr.src_addr = + ipv4_spec->hdr.src_addr; + list[t].m_u.ipv4_hdr.src_addr = + ipv4_mask->hdr.src_addr; + } + if (ipv4_mask->hdr.dst_addr) { + list[t].h_u.ipv4_hdr.dst_addr = + ipv4_spec->hdr.dst_addr; + list[t].m_u.ipv4_hdr.dst_addr = + ipv4_mask->hdr.dst_addr; + } + if (ipv4_mask->hdr.time_to_live) { + list[t].h_u.ipv4_hdr.time_to_live = + ipv4_spec->hdr.time_to_live; + list[t].m_u.ipv4_hdr.time_to_live = + ipv4_mask->hdr.time_to_live; + } + if (ipv4_mask->hdr.next_proto_id) { + list[t].h_u.ipv4_hdr.protocol = + ipv4_spec->hdr.next_proto_id; + list[t].m_u.ipv4_hdr.protocol = + ipv4_mask->hdr.next_proto_id; + } + if (ipv4_mask->hdr.type_of_service) { + list[t].h_u.ipv4_hdr.tos = + ipv4_spec->hdr.type_of_service; + list[t].m_u.ipv4_hdr.tos = + ipv4_mask->hdr.type_of_service; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + ipv6_valiad = 1; + if (ipv6_spec && ipv6_mask) { + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask"); + return 0; + } + + for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j] && + tunnel_valid) { + input_set |= + ICE_INSET_TUN_IPV6_SRC; + break; + } else if (ipv6_mask->hdr.src_addr[j]) { + input_set |= ICE_INSET_IPV6_SRC; + break; + } + } + for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.dst_addr[j] && + tunnel_valid) { + input_set |= + ICE_INSET_TUN_IPV6_DST; + break; + } else if (ipv6_mask->hdr.dst_addr[j]) { + input_set |= ICE_INSET_IPV6_DST; + break; + } + } + if (ipv6_mask->hdr.proto && + tunnel_valid) + input_set |= + ICE_INSET_TUN_IPV6_NEXT_HDR; + else if (ipv6_mask->hdr.proto) + input_set |= + ICE_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits && + tunnel_valid) + input_set |= + ICE_INSET_TUN_IPV6_HOP_LIMIT; + else if (ipv6_mask->hdr.hop_limits) + input_set |= + ICE_INSET_IPV6_HOP_LIMIT; + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) && + tunnel_valid) + input_set |= + ICE_INSET_TUN_IPV6_TC; + else if (ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) + input_set |= ICE_INSET_IPV6_TC; + + list[t].type = (tunnel_valid == 0) ? + ICE_IPV6_OFOS : ICE_IPV6_IL; + struct ice_ipv6_hdr *f; + struct ice_ipv6_hdr *s; + f = &list[t].h_u.ipv6_hdr; + s = &list[t].m_u.ipv6_hdr; + for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j]) { + f->src_addr[j] = + ipv6_spec->hdr.src_addr[j]; + s->src_addr[j] = + ipv6_mask->hdr.src_addr[j]; + } + if (ipv6_mask->hdr.dst_addr[j]) { + f->dst_addr[j] = + ipv6_spec->hdr.dst_addr[j]; + s->dst_addr[j] = + ipv6_mask->hdr.dst_addr[j]; + } + } + if (ipv6_mask->hdr.proto) { + f->next_hdr = + ipv6_spec->hdr.proto; + s->next_hdr = + ipv6_mask->hdr.proto; + } + if (ipv6_mask->hdr.hop_limits) { + f->hop_limit = + ipv6_spec->hdr.hop_limits; + s->hop_limit = + ipv6_mask->hdr.hop_limits; + } + if (ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32 + (RTE_IPV6_HDR_TC_MASK)) { + struct ice_le_ver_tc_flow vtf; + vtf.u.fld.version = 0; + vtf.u.fld.flow_label = 0; + vtf.u.fld.tc = (rte_be_to_cpu_32 + (ipv6_spec->hdr.vtc_flow) & + RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; + f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + vtf.u.fld.tc = (rte_be_to_cpu_32 + (ipv6_mask->hdr.vtc_flow) & + RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; + s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + udp_valiad = 1; + if (udp_spec && udp_mask) { + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return 0; + } + + if (tunnel_valid) { + if (udp_mask->hdr.src_port) + input_set |= + ICE_INSET_TUN_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port) + input_set |= + ICE_INSET_TUN_UDP_DST_PORT; + } else { + if (udp_mask->hdr.src_port) + input_set |= + ICE_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port) + input_set |= + ICE_INSET_UDP_DST_PORT; + } + if (*tun_type == ICE_SW_TUN_VXLAN && + tunnel_valid == 0) + list[t].type = ICE_UDP_OF; + else + list[t].type = ICE_UDP_ILOS; + if (udp_mask->hdr.src_port) { + list[t].h_u.l4_hdr.src_port = + udp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + udp_mask->hdr.src_port; + } + if (udp_mask->hdr.dst_port) { + list[t].h_u.l4_hdr.dst_port = + udp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + udp_mask->hdr.dst_port; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + if (tcp_spec && tcp_mask) { + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return 0; + } + + if (tunnel_valid) { + if (tcp_mask->hdr.src_port) + input_set |= + ICE_INSET_TUN_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port) + input_set |= + ICE_INSET_TUN_TCP_DST_PORT; + } else { + if (tcp_mask->hdr.src_port) + input_set |= + ICE_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port) + input_set |= + ICE_INSET_TCP_DST_PORT; + } + list[t].type = ICE_TCP_IL; + if (tcp_mask->hdr.src_port) { + list[t].h_u.l4_hdr.src_port = + tcp_spec->hdr.src_port; + list[t].m_u.l4_hdr.src_port = + tcp_mask->hdr.src_port; + } + if (tcp_mask->hdr.dst_port) { + list[t].h_u.l4_hdr.dst_port = + tcp_spec->hdr.dst_port; + list[t].m_u.l4_hdr.dst_port = + tcp_mask->hdr.dst_port; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + if (sctp_spec && sctp_mask) { + /* Check SCTP mask and update input set */ + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid SCTP mask"); + return 0; + } + + if (tunnel_valid) { + if (sctp_mask->hdr.src_port) + input_set |= + ICE_INSET_TUN_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port) + input_set |= + ICE_INSET_TUN_SCTP_DST_PORT; + } else { + if (sctp_mask->hdr.src_port) + input_set |= + ICE_INSET_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port) + input_set |= + ICE_INSET_SCTP_DST_PORT; + } + list[t].type = ICE_SCTP_IL; + if (sctp_mask->hdr.src_port) { + list[t].h_u.sctp_hdr.src_port = + sctp_spec->hdr.src_port; + list[t].m_u.sctp_hdr.src_port = + sctp_mask->hdr.src_port; + } + if (sctp_mask->hdr.dst_port) { + list[t].h_u.sctp_hdr.dst_port = + sctp_spec->hdr.dst_port; + list[t].m_u.sctp_hdr.dst_port = + sctp_mask->hdr.dst_port; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return 0; + } + + tunnel_valid = 1; + if (vxlan_spec && vxlan_mask) { + list[t].type = ICE_VXLAN; + if (vxlan_mask->vni[0] || + vxlan_mask->vni[1] || + vxlan_mask->vni[2]) { + list[t].h_u.tnl_hdr.vni = + (vxlan_spec->vni[2] << 16) | + (vxlan_spec->vni[1] << 8) | + vxlan_spec->vni[0]; + list[t].m_u.tnl_hdr.vni = + (vxlan_mask->vni[2] << 16) | + (vxlan_mask->vni[1] << 8) | + vxlan_mask->vni[0]; + input_set |= + ICE_INSET_TUN_VXLAN_VNI; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return 0; + } + tunnel_valid = 1; + if (nvgre_spec && nvgre_mask) { + list[t].type = ICE_NVGRE; + if (nvgre_mask->tni[0] || + nvgre_mask->tni[1] || + nvgre_mask->tni[2]) { + list[t].h_u.nvgre_hdr.tni_flow = + (nvgre_spec->tni[2] << 16) | + (nvgre_spec->tni[1] << 8) | + nvgre_spec->tni[0]; + list[t].m_u.nvgre_hdr.tni_flow = + (nvgre_mask->tni[2] << 16) | + (nvgre_mask->tni[1] << 8) | + nvgre_mask->tni[0]; + input_set |= + ICE_INSET_TUN_NVGRE_TNI; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + /* Check if VLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vlan_spec && vlan_mask) || + (vlan_spec && !vlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VLAN item"); + return 0; + } + if (vlan_spec && vlan_mask) { + list[t].type = ICE_VLAN_OFOS; + if (vlan_mask->tci) { + list[t].h_u.vlan_hdr.vlan = + vlan_spec->tci; + list[t].m_u.vlan_hdr.vlan = + vlan_mask->tci; + input_set |= ICE_INSET_VLAN_OUTER; + } + if (vlan_mask->inner_type) { + list[t].h_u.vlan_hdr.type = + vlan_spec->inner_type; + list[t].m_u.vlan_hdr.type = + vlan_mask->inner_type; + input_set |= ICE_INSET_ETHERTYPE; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_PPPOED: + case RTE_FLOW_ITEM_TYPE_PPPOES: + pppoe_spec = item->spec; + pppoe_mask = item->mask; + /* Check if PPPoE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!pppoe_spec && pppoe_mask) || + (pppoe_spec && !pppoe_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pppoe item"); + return 0; + } + if (pppoe_spec && pppoe_mask) { + /* Check pppoe mask and update input set */ + if (pppoe_mask->length || + pppoe_mask->code || + pppoe_mask->version_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pppoe mask"); + return 0; + } + list[t].type = ICE_PPPOE; + if (pppoe_mask->session_id) { + list[t].h_u.pppoe_hdr.session_id = + pppoe_spec->session_id; + list[t].m_u.pppoe_hdr.session_id = + pppoe_mask->session_id; + input_set |= ICE_INSET_PPPOE_SESSION; + } + t++; + pppoe_valid = 1; + } + break; + + case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID: + pppoe_proto_spec = item->spec; + pppoe_proto_mask = item->mask; + /* Check if PPPoE optional proto_id item + * is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!pppoe_proto_spec && pppoe_proto_mask) || + (pppoe_proto_spec && !pppoe_proto_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pppoe proto item"); + return 0; + } + if (pppoe_proto_spec && pppoe_proto_mask) { + if (pppoe_valid) + t--; + list[t].type = ICE_PPPOE; + if (pppoe_proto_mask->proto_id) { + list[t].h_u.pppoe_hdr.ppp_prot_id = + pppoe_proto_spec->proto_id; + list[t].m_u.pppoe_hdr.ppp_prot_id = + pppoe_proto_mask->proto_id; + input_set |= ICE_INSET_PPPOE_PROTO; + } + t++; + } + break; + + case RTE_FLOW_ITEM_TYPE_ESP: + esp_spec = item->spec; + esp_mask = item->mask; + if ((esp_spec && !esp_mask) || + (!esp_spec && esp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid esp item"); + return 0; + } + /* Check esp mask and update input set */ + if (esp_mask && esp_mask->hdr.seq) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid esp mask"); + return 0; + } + + if (!esp_spec && !esp_mask && !input_set) { + profile_rule = 1; + if (ipv6_valiad && udp_valiad) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_NAT_T; + else if (ipv6_valiad) + *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP; + else if (ipv4_valiad) + return 0; + } else if (esp_spec && esp_mask && + esp_mask->hdr.spi){ + if (udp_valiad) + list[t].type = ICE_NAT_T; + else + list[t].type = ICE_ESP; + list[t].h_u.esp_hdr.spi = + esp_spec->hdr.spi; + list[t].m_u.esp_hdr.spi = + esp_mask->hdr.spi; + input_set |= ICE_INSET_ESP_SPI; + t++; + } + + if (!profile_rule) { + if (ipv6_valiad && udp_valiad) + *tun_type = ICE_SW_TUN_IPV6_NAT_T; + else if (ipv4_valiad && udp_valiad) + *tun_type = ICE_SW_TUN_IPV4_NAT_T; + else if (ipv6_valiad) + *tun_type = ICE_SW_TUN_IPV6_ESP; + else if (ipv4_valiad) + *tun_type = ICE_SW_TUN_IPV4_ESP; + } + break; + + case RTE_FLOW_ITEM_TYPE_AH: + ah_spec = item->spec; + ah_mask = item->mask; + if ((ah_spec && !ah_mask) || + (!ah_spec && ah_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ah item"); + return 0; + } + /* Check ah mask and update input set */ + if (ah_mask && + (ah_mask->next_hdr || + ah_mask->payload_len || + ah_mask->seq_num || + ah_mask->reserved)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ah mask"); + return 0; + } + + if (!ah_spec && !ah_mask && !input_set) { + profile_rule = 1; + if (ipv6_valiad && udp_valiad) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_NAT_T; + else if (ipv6_valiad) + *tun_type = ICE_SW_TUN_PROFID_IPV6_AH; + else if (ipv4_valiad) + return 0; + } else if (ah_spec && ah_mask && + ah_mask->spi){ + list[t].type = ICE_AH; + list[t].h_u.ah_hdr.spi = + ah_spec->spi; + list[t].m_u.ah_hdr.spi = + ah_mask->spi; + input_set |= ICE_INSET_AH_SPI; + t++; + } + + if (!profile_rule) { + if (udp_valiad) + return 0; + else if (ipv6_valiad) + *tun_type = ICE_SW_TUN_IPV6_AH; + else if (ipv4_valiad) + *tun_type = ICE_SW_TUN_IPV4_AH; + } + break; + + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tp_spec = item->spec; + l2tp_mask = item->mask; + if ((l2tp_spec && !l2tp_mask) || + (!l2tp_spec && l2tp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid l2tp item"); + return 0; + } + + if (!l2tp_spec && !l2tp_mask && !input_set) { + if (ipv6_valiad) + *tun_type = + ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3; + else if (ipv4_valiad) + return 0; + } else if (l2tp_spec && l2tp_mask && + l2tp_mask->session_id){ + list[t].type = ICE_L2TPV3; + list[t].h_u.l2tpv3_sess_hdr.session_id = + l2tp_spec->session_id; + list[t].m_u.l2tpv3_sess_hdr.session_id = + l2tp_mask->session_id; + input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID; + t++; + } + + if (!profile_rule) { + if (ipv6_valiad) + *tun_type = + ICE_SW_TUN_IPV6_L2TPV3; + else if (ipv4_valiad) + *tun_type = + ICE_SW_TUN_IPV4_L2TPV3; + } + break; + + case RTE_FLOW_ITEM_TYPE_PFCP: + pfcp_spec = item->spec; + pfcp_mask = item->mask; + /* Check if PFCP item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!pfcp_spec && pfcp_mask) || + (pfcp_spec && !pfcp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid PFCP item"); + return -ENOTSUP; + } + if (pfcp_spec && pfcp_mask) { + /* Check pfcp mask and update input set */ + if (pfcp_mask->msg_type || + pfcp_mask->msg_len || + pfcp_mask->seid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pfcp mask"); + return -ENOTSUP; + } + if (pfcp_mask->s_field && + pfcp_spec->s_field == 0x01 && + ipv6_valiad) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION; + else if (pfcp_mask->s_field && + pfcp_spec->s_field == 0x01) + *tun_type = + ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION; + else if (pfcp_mask->s_field && + !pfcp_spec->s_field && + ipv6_valiad) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_PFCP_NODE; + else if (pfcp_mask->s_field && + !pfcp_spec->s_field) + *tun_type = + ICE_SW_TUN_PROFID_IPV4_PFCP_NODE; + else + return -ENOTSUP; + } + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Invalid pattern item."); + goto out; + } + } + + *lkups_num = t; + + return input_set; +out: + return 0; +} + +static int +ice_switch_parse_dcf_action(const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct ice_adv_rule_info *rule_info) +{ + const struct rte_flow_action_vf *act_vf; + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VF: + rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; + act_vf = action->conf; + rule_info->sw_act.vsi_handle = act_vf->id; + break; + default: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action type or queue number"); + return -rte_errno; + } + } + + rule_info->sw_act.src = rule_info->sw_act.vsi_handle; + rule_info->sw_act.flag = ICE_FLTR_RX; + rule_info->rx = 1; + rule_info->priority = 5; + + return 0; +} + +static int +ice_switch_parse_action(struct ice_pf *pf, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + struct ice_adv_rule_info *rule_info) +{ + struct ice_vsi *vsi = pf->main_vsi; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_rss *act_qgrop; + uint16_t base_queue, i; + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = { + 2, 4, 8, 16, 32, 64, 128}; + + base_queue = pf->base_queue + vsi->base_queue; + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_RSS: + act_qgrop = action->conf; + if (act_qgrop->queue_num <= 1) + goto error; + rule_info->sw_act.fltr_act = + ICE_FWD_TO_QGRP; + rule_info->sw_act.fwd_id.q_id = + base_queue + act_qgrop->queue[0]; + for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) { + if (act_qgrop->queue_num == + valid_qgrop_number[i]) + break; + } + if (i == MAX_QGRP_NUM_TYPE) + goto error; + if ((act_qgrop->queue[0] + + act_qgrop->queue_num) > + dev->data->nb_rx_queues) + goto error; + for (i = 0; i < act_qgrop->queue_num - 1; i++) + if (act_qgrop->queue[i + 1] != + act_qgrop->queue[i] + 1) + goto error; + rule_info->sw_act.qgrp_size = + act_qgrop->queue_num; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + act_q = action->conf; + if (act_q->index >= dev->data->nb_rx_queues) + goto error; + rule_info->sw_act.fltr_act = + ICE_FWD_TO_Q; + rule_info->sw_act.fwd_id.q_id = + base_queue + act_q->index; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + rule_info->sw_act.fltr_act = + ICE_DROP_PACKET; + break; + + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + default: + goto error; + } + } + + rule_info->sw_act.vsi_handle = vsi->idx; + rule_info->rx = 1; + rule_info->sw_act.src = vsi->idx; + rule_info->priority = 5; + + return 0; + +error: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action type or queue number"); + return -rte_errno; +} + +static int +ice_switch_check_action(const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + uint16_t actions_num = 0; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VF: + case RTE_FLOW_ACTION_TYPE_RSS: + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_DROP: + actions_num++; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + default: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action type"); + return -rte_errno; + } + } + + if (actions_num > 1) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action number"); + return -rte_errno; + } + + return 0; +} + +static bool +ice_is_profile_rule(enum ice_sw_tunnel_type tun_type) +{ + switch (tun_type) { + case ICE_SW_TUN_PROFID_IPV6_ESP: + case ICE_SW_TUN_PROFID_IPV6_AH: + case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: + case ICE_SW_TUN_PROFID_IPV6_NAT_T: + case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE: + case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION: + case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE: + case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION: + return true; + default: + break; + } + + return false; +} + +static int +ice_switch_parse_pattern_action(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + uint64_t inputset = 0; + int ret = 0; + struct sw_meta *sw_meta_ptr = NULL; + struct ice_adv_rule_info rule_info; + struct ice_adv_lkup_elem *list = NULL; + uint16_t lkups_num = 0; + const struct rte_flow_item *item = pattern; + uint16_t item_num = 0; + enum ice_sw_tunnel_type tun_type = + ICE_SW_TUN_AND_NON_TUN; + struct ice_pattern_match_item *pattern_match_item = NULL; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + item_num++; + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) + tun_type = ICE_SW_TUN_VXLAN; + if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) + tun_type = ICE_SW_TUN_NVGRE; + if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED || + item->type == RTE_FLOW_ITEM_TYPE_PPPOES) + tun_type = ICE_SW_TUN_PPPOE; + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + const struct rte_flow_item_eth *eth_mask; + if (item->mask) + eth_mask = item->mask; + else + continue; + if (eth_mask->type == UINT16_MAX) + tun_type = ICE_SW_TUN_AND_NON_TUN; + } + /* reserve one more memory slot for ETH which may + * consume 2 lookup items. + */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) + item_num++; + } + + list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); + if (!list) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for PMD internal items"); + return -rte_errno; + } + + sw_meta_ptr = + rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0); + if (!sw_meta_ptr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for sw_pattern_meta_ptr"); + goto error; + } + + pattern_match_item = + ice_search_pattern_match_item(pattern, array, array_len, error); + if (!pattern_match_item) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid input pattern"); + goto error; + } + + inputset = ice_switch_inset_get + (pattern, error, list, &lkups_num, &tun_type); + if ((!inputset && !ice_is_profile_rule(tun_type)) || + (inputset & ~pattern_match_item->input_set_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + pattern, + "Invalid input set"); + goto error; + } + + memset(&rule_info, 0, sizeof(rule_info)); + rule_info.tun_type = tun_type; + + ret = ice_switch_check_action(actions, error); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid input action number"); + goto error; + } + + if (ad->hw.dcf_enabled) + ret = ice_switch_parse_dcf_action(actions, error, &rule_info); + else + ret = ice_switch_parse_action(pf, actions, error, &rule_info); + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid input action"); + goto error; + } + + if (meta) { + *meta = sw_meta_ptr; + ((struct sw_meta *)*meta)->list = list; + ((struct sw_meta *)*meta)->lkups_num = lkups_num; + ((struct sw_meta *)*meta)->rule_info = rule_info; + } else { + rte_free(list); + rte_free(sw_meta_ptr); + } + + rte_free(pattern_match_item); + + return 0; + +error: + rte_free(list); + rte_free(sw_meta_ptr); + rte_free(pattern_match_item); + + return -rte_errno; +} + +static int +ice_switch_query(struct ice_adapter *ad __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_query_count *count __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "count action not supported by switch filter"); + + return -rte_errno; +} + +static int +ice_switch_redirect(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_flow_redirect *rd) +{ + struct ice_rule_query_data *rdata = flow->rule; + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_adv_lkup_elem *lkups_dp = NULL; + struct LIST_HEAD_TYPE *list_head; + struct ice_adv_rule_info rinfo; + struct ice_hw *hw = &ad->hw; + struct ice_switch_info *sw; + uint16_t lkups_cnt; + int ret; + + sw = hw->switch_info; + if (!sw->recp_list[rdata->rid].recp_created) + return -EINVAL; + + if (rd->type != ICE_FLOW_REDIRECT_VSI) + return -ENOTSUP; + + list_head = &sw->recp_list[rdata->rid].filt_rules; + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, + list_entry) { + rinfo = list_itr->rule_info; + if (rinfo.fltr_rule_id == rdata->rule_id && + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI && + rinfo.sw_act.vsi_handle == rd->vsi_handle) { + lkups_cnt = list_itr->lkups_cnt; + lkups_dp = (struct ice_adv_lkup_elem *) + ice_memdup(hw, list_itr->lkups, + sizeof(*list_itr->lkups) * + lkups_cnt, ICE_NONDMA_TO_NONDMA); + if (!lkups_dp) { + PMD_DRV_LOG(ERR, "Failed to allocate memory."); + return -EINVAL; + } + + break; + } + } + + if (!lkups_dp) + return 0; + + /* Remove the old rule */ + ret = ice_rem_adv_rule(hw, list_itr->lkups, + lkups_cnt, &rinfo); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to delete the old rule %d", + rdata->rule_id); + ret = -EINVAL; + goto out; + } + + /* Update VSI context */ + hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num; + + /* Replay the rule */ + ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt, + &rinfo, rdata); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to replay the rule"); + ret = -EINVAL; + } + +out: + ice_free(hw, lkups_dp); + return ret; +} + +static int +ice_switch_init(struct ice_adapter *ad) +{ + int ret = 0; + struct ice_flow_parser *dist_parser; + struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; + else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) + dist_parser = &ice_switch_dist_parser_os; + else + return -EINVAL; + + if (ad->devargs.pipe_mode_support) + ret = ice_register_parser(perm_parser, ad); + else + ret = ice_register_parser(dist_parser, ad); + return ret; +} + +static void +ice_switch_uninit(struct ice_adapter *ad) +{ + struct ice_flow_parser *dist_parser; + struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + dist_parser = &ice_switch_dist_parser_comms; + else + dist_parser = &ice_switch_dist_parser_os; + + if (ad->devargs.pipe_mode_support) + ice_unregister_parser(perm_parser, ad); + else + ice_unregister_parser(dist_parser, ad); +} + +static struct +ice_flow_engine ice_switch_engine = { + .init = ice_switch_init, + .uninit = ice_switch_uninit, + .create = ice_switch_create, + .destroy = ice_switch_destroy, + .query_count = ice_switch_query, + .redirect = ice_switch_redirect, + .free = ice_switch_filter_rule_free, + .type = ICE_FLOW_ENGINE_SWITCH, +}; + +static struct +ice_flow_parser ice_switch_dist_parser_os = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_dist_os, + .array_len = RTE_DIM(ice_switch_pattern_dist_os), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +static struct +ice_flow_parser ice_switch_dist_parser_comms = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_dist_comms, + .array_len = RTE_DIM(ice_switch_pattern_dist_comms), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + +static struct +ice_flow_parser ice_switch_perm_parser = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_perm, + .array_len = RTE_DIM(ice_switch_pattern_perm), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_PERMISSION, +}; + +RTE_INIT(ice_sw_engine_init) +{ + struct ice_flow_engine *engine = &ice_switch_engine; + ice_register_flow_engine(engine); +} diff --git a/src/spdk/dpdk/drivers/net/ice/meson.build b/src/spdk/dpdk/drivers/net/ice/meson.build new file mode 100644 index 000000000..e6fe74487 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/meson.build @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +subdir('base') +objs = [base_objs] + +sources = files( + 'ice_ethdev.c', + 'ice_rxtx.c', + 'ice_switch_filter.c', + 'ice_generic_flow.c', + 'ice_fdir_filter.c', + 'ice_hash.c' + ) + +deps += ['hash', 'net', 'common_iavf'] +includes += include_directories('base', '../../common/iavf') + +if arch_subdir == 'x86' + sources += files('ice_rxtx_vec_sse.c') + + # compile AVX2 version if either: + # a. we have AVX supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') + sources += files('ice_rxtx_vec_avx2.c') + elif cc.has_argument('-mavx2') + ice_avx2_lib = static_library('ice_avx2_lib', + 'ice_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-mavx2']) + objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') + endif +endif + +sources += files('ice_dcf.c', + 'ice_dcf_ethdev.c', + 'ice_dcf_parent.c') + +install_headers('rte_pmd_ice.h') diff --git a/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice.h b/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice.h new file mode 100644 index 000000000..e254db053 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _RTE_PMD_ICE_H_ +#define _RTE_PMD_ICE_H_ + +/** + * @file rte_pmd_ice.h + * + * ice PMD specific functions. + * + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * The supported network protocol extraction metadata format. + */ +union rte_net_ice_proto_xtr_metadata { + uint32_t metadata; + + struct { + uint16_t data0; + uint16_t data1; + } raw; + + struct { + uint16_t stag_vid:12, + stag_dei:1, + stag_pcp:3; + uint16_t ctag_vid:12, + ctag_dei:1, + ctag_pcp:3; + } vlan; + + struct { + uint16_t protocol:8, + ttl:8; + uint16_t tos:8, + ihl:4, + version:4; + } ipv4; + + struct { + uint16_t hoplimit:8, + nexthdr:8; + uint16_t flowhi4:4, + tc:8, + version:4; + } ipv6; + + struct { + uint16_t flowlo16; + uint16_t flowhi4:4, + tc:8, + version:4; + } ipv6_flow; + + struct { + uint16_t fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1, + res1:4, + doff:4; + uint16_t rsvd; + } tcp; +}; + +/* Offset of mbuf dynamic field for protocol extraction data */ +extern int rte_net_ice_dynfield_proto_xtr_metadata_offs; + +/* Mask of mbuf dynamic flags for protocol extraction type */ +extern uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask; +extern uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask; +extern uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask; +extern uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; +extern uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask; + +/** + * The mbuf dynamic field pointer for protocol extraction metadata. + */ +#define RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(m) \ + RTE_MBUF_DYNFIELD((m), \ + rte_net_ice_dynfield_proto_xtr_metadata_offs, \ + uint32_t *) + +/** + * The mbuf dynamic flag for VLAN protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'vlan' specified. + */ +#define RTE_PKT_RX_DYNF_PROTO_XTR_VLAN \ + (rte_net_ice_dynflag_proto_xtr_vlan_mask) + +/** + * The mbuf dynamic flag for IPv4 protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'ipv4' specified. + */ +#define RTE_PKT_RX_DYNF_PROTO_XTR_IPV4 \ + (rte_net_ice_dynflag_proto_xtr_ipv4_mask) + +/** + * The mbuf dynamic flag for IPv6 protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'ipv6' specified. + */ +#define RTE_PKT_RX_DYNF_PROTO_XTR_IPV6 \ + (rte_net_ice_dynflag_proto_xtr_ipv6_mask) + +/** + * The mbuf dynamic flag for IPv6 with flow protocol extraction metadata, it is + * valid when dev_args 'proto_xtr' has 'ipv6_flow' specified. + */ +#define RTE_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW \ + (rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask) + +/** + * The mbuf dynamic flag for TCP protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'tcp' specified. + */ +#define RTE_PKT_RX_DYNF_PROTO_XTR_TCP \ + (rte_net_ice_dynflag_proto_xtr_tcp_mask) + +/** + * Check if mbuf dynamic field for protocol extraction metadata is registered. + * + * @return + * True if registered, false otherwise. + */ +__rte_experimental +static __rte_always_inline int +rte_net_ice_dynf_proto_xtr_metadata_avail(void) +{ + return rte_net_ice_dynfield_proto_xtr_metadata_offs != -1; +} + +/** + * Get the mbuf dynamic field for protocol extraction metadata. + * + * @param m + * The pointer to the mbuf. + * @return + * The saved protocol extraction metadata. + */ +__rte_experimental +static __rte_always_inline uint32_t +rte_net_ice_dynf_proto_xtr_metadata_get(struct rte_mbuf *m) +{ + return *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(m); +} + +/** + * Dump the mbuf dynamic field for protocol extraction metadata. + * + * @param m + * The pointer to the mbuf. + */ +__rte_experimental +static inline void +rte_net_ice_dump_proto_xtr_metadata(struct rte_mbuf *m) +{ + union rte_net_ice_proto_xtr_metadata data; + + if (!rte_net_ice_dynf_proto_xtr_metadata_avail()) + return; + + data.metadata = rte_net_ice_dynf_proto_xtr_metadata_get(m); + + if (m->ol_flags & RTE_PKT_RX_DYNF_PROTO_XTR_VLAN) + printf(" - Protocol Extraction:[0x%04x:0x%04x],vlan,stag=%u:%u:%u,ctag=%u:%u:%u", + data.raw.data0, data.raw.data1, + data.vlan.stag_pcp, + data.vlan.stag_dei, + data.vlan.stag_vid, + data.vlan.ctag_pcp, + data.vlan.ctag_dei, + data.vlan.ctag_vid); + else if (m->ol_flags & RTE_PKT_RX_DYNF_PROTO_XTR_IPV4) + printf(" - Protocol Extraction:[0x%04x:0x%04x],ipv4,ver=%u,hdrlen=%u,tos=%u,ttl=%u,proto=%u", + data.raw.data0, data.raw.data1, + data.ipv4.version, + data.ipv4.ihl, + data.ipv4.tos, + data.ipv4.ttl, + data.ipv4.protocol); + else if (m->ol_flags & RTE_PKT_RX_DYNF_PROTO_XTR_IPV6) + printf(" - Protocol Extraction:[0x%04x:0x%04x],ipv6,ver=%u,tc=%u,flow_hi4=0x%x,nexthdr=%u,hoplimit=%u", + data.raw.data0, data.raw.data1, + data.ipv6.version, + data.ipv6.tc, + data.ipv6.flowhi4, + data.ipv6.nexthdr, + data.ipv6.hoplimit); + else if (m->ol_flags & RTE_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW) + printf(" - Protocol Extraction:[0x%04x:0x%04x],ipv6_flow,ver=%u,tc=%u,flow=0x%x%04x", + data.raw.data0, data.raw.data1, + data.ipv6_flow.version, + data.ipv6_flow.tc, + data.ipv6_flow.flowhi4, + data.ipv6_flow.flowlo16); + else if (m->ol_flags & RTE_PKT_RX_DYNF_PROTO_XTR_TCP) + printf(" - Protocol Extraction:[0x%04x:0x%04x],tcp,doff=%u,flags=%s%s%s%s%s%s%s%s", + data.raw.data0, data.raw.data1, + data.tcp.doff, + data.tcp.cwr ? "C" : "", + data.tcp.ece ? "E" : "", + data.tcp.urg ? "U" : "", + data.tcp.ack ? "A" : "", + data.tcp.psh ? "P" : "", + data.tcp.rst ? "R" : "", + data.tcp.syn ? "S" : "", + data.tcp.fin ? "F" : ""); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PMD_ICE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice_version.map b/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice_version.map new file mode 100644 index 000000000..d04b194c1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ice/rte_pmd_ice_version.map @@ -0,0 +1,15 @@ +DPDK_20.0 { + local: *; +}; + +EXPERIMENTAL { + global: + + # added in 19.11 + rte_net_ice_dynfield_proto_xtr_metadata_offs; + rte_net_ice_dynflag_proto_xtr_vlan_mask; + rte_net_ice_dynflag_proto_xtr_ipv4_mask; + rte_net_ice_dynflag_proto_xtr_ipv6_mask; + rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask; + rte_net_ice_dynflag_proto_xtr_tcp_mask; +}; diff --git a/src/spdk/dpdk/drivers/net/igc/Makefile b/src/spdk/dpdk/drivers/net/igc/Makefile new file mode 100644 index 000000000..d6d7959d2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019-2020 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_igc.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal +LDLIBS += -lrte_ethdev +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_mbuf +LDLIBS += -lrte_mempool + +EXPORT_MAP := rte_pmd_igc_version.map + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_base.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_i225.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_manage.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_osdep.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_logs.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_txrx.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_flow.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/igc/base/README b/src/spdk/dpdk/drivers/net/igc/base/README new file mode 100644 index 000000000..0ff830700 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/README @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +Intel® IGC driver +================== + +This directory contains source code of FreeBSD igc driver of version +2019.10.18 released by the team which develops basic drivers for any +i225 NIC. +The directory of base/ contains the original source package. +This driver is valid for the product(s) listed below + +* Intel® Ethernet Network Adapters I225 + +Updating the driver +=================== + +NOTE: +- To avoid namespace issues with e1000 PMD, all prefix e1000_ or E1000_ +of the definition, macro and file names ware replaced with igc_ or IGC_. +- Since some codes are not required, they have been removed from the +base codes, such as the I350 and I210 series NICs related codes. +- Some registers are used by the base codes but not defined in the base +codes, so they ware added to them. +- OS and DPDK specified definitions and macros ware added in following +files: + igc_osdep.h + igc_osdep.c diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_82571.h b/src/spdk/dpdk/drivers/net/igc/base/igc_82571.h new file mode 100644 index 000000000..764e77d85 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_82571.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_82571_H_ +#define _IGC_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define IGC_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define IGC_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define IGC_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAC_MASK_82574 0x01F00000 + +#define IGC_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define IGC_NVM_INIT_CTRL2_MNGM 0x6000 + +#define IGC_BASE1000T_STATUS 10 +#define IGC_IDLE_ERROR_COUNT_MASK 0xFF +#define IGC_RECEIVE_ERROR_COUNTER 21 +#define IGC_RECEIVE_ERROR_MAX 0xFFFF +bool igc_check_phy_82574(struct igc_hw *hw); +bool igc_get_laa_state_82571(struct igc_hw *hw); +void igc_set_laa_state_82571(struct igc_hw *hw, bool state); + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_82575.h b/src/spdk/dpdk/drivers/net/igc/base/igc_82575.h new file mode 100644 index 000000000..be060b407 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_82575.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_82575_H_ +#define _IGC_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define IGC_RAR_ENTRIES_82575 16 +#define IGC_RAR_ENTRIES_82576 24 +#define IGC_RAR_ENTRIES_82580 24 +#define IGC_RAR_ENTRIES_I350 32 +#define IGC_SW_SYNCH_MB 0x00000100 +#define IGC_STAT_DEV_RST_SET 0x00100000 + +struct igc_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +#define IGC_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define IGC_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define IGC_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define IGC_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define IGC_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define IGC_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define IGC_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define IGC_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define IGC_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define IGC_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADV_DCMD_RS 0x8 /* Report Status */ +#define IGC_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define IGC_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define IGC_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct igc_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define IGC_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IGC_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_DROP_EN 0x80000000 + +#define IGC_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IGC_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define IGC_TX_HEAD_WB_ENABLE 0x1 +#define IGC_TX_SEQNUM_WB_ENABLE 0x2 + +#define IGC_MRQC_ENABLE_RSS_4Q 0x00000002 +#define IGC_MRQC_ENABLE_VMDQ 0x00000003 +#define IGC_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define IGC_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define IGC_VMRCTL_MIRROR_PORT_SHIFT 8 +#define IGC_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + IGC_VMRCTL_MIRROR_PORT_SHIFT) +#define IGC_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define IGC_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define IGC_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define IGC_EICR_TX_QUEUE ( \ + IGC_EICR_TX_QUEUE0 | \ + IGC_EICR_TX_QUEUE1 | \ + IGC_EICR_TX_QUEUE2 | \ + IGC_EICR_TX_QUEUE3) + +#define IGC_EICR_RX_QUEUE ( \ + IGC_EICR_RX_QUEUE0 | \ + IGC_EICR_RX_QUEUE1 | \ + IGC_EICR_RX_QUEUE2 | \ + IGC_EICR_RX_QUEUE3) + +#define IGC_EIMS_RX_QUEUE IGC_EICR_RX_QUEUE +#define IGC_EIMS_TX_QUEUE IGC_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + IGC_EIMS_RX_QUEUE | \ + IGC_EIMS_TX_QUEUE | \ + IGC_EIMS_TCP_TIMER | \ + IGC_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IGC_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IGC_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IGC_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IGC_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IGC_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IGC_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IGC_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IGC_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ + +#define IGC_RXDADV_RSSTYPE_MASK 0x0000000F +#define IGC_RXDADV_RSSTYPE_SHIFT 12 +#define IGC_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define IGC_RXDADV_HDRBUFLEN_SHIFT 5 +#define IGC_RXDADV_SPLITHEADER_EN 0x00001000 +#define IGC_RXDADV_SPH 0x8000 +#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define IGC_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define IGC_RXDADV_RSSTYPE_NONE 0x00000000 +#define IGC_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IGC_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IGC_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IGC_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IGC_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IGC_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IGC_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IGC_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IGC_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define IGC_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define IGC_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define IGC_RXDADV_PKTTYPE_NONE 0x00000000 +#define IGC_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define IGC_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define IGC_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define IGC_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define IGC_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IGC_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IGC_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IGC_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define IGC_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IGC_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IGC_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IGC_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IGC_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IGC_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define IGC_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IGC_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IGC_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IGC_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IGC_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define IGC_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IGC_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define IGC_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IGC_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IGC_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +#define IGC_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define IGC_TXDCTL_PRIORITY 0x08000000 + +#define IGC_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define IGC_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IGC_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IGC_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IGC_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IGC_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IGC_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IGC_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define IGC_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define IGC_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ + +#define IGC_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IGC_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IGC_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IGC_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IGC_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#define IGC_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define IGC_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define IGC_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define IGC_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define IGC_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define IGC_IMS_LSECPNS IGC_ICR_LSECPNS /* PN threshold - server */ +#define IGC_ICS_LSECPNS IGC_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE (1 << 26) +#define IGC_ETQF_IMM_INT (1 << 29) +#define IGC_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define IGC_ETQF_FILTER_EAPOL 0 + +#define IGC_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define IGC_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define IGC_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define IGC_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define IGC_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define IGC_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define IGC_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define IGC_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define IGC_DTXSWC_LLE_SHIFT 16 +#define IGC_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define IGC_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define IGC_VT_CTL_DEFAULT_POOL_MASK (0x7 << IGC_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define IGC_VT_CTL_IGNORE_MAC (1 << 28) +#define IGC_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define IGC_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define IGC_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define IGC_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define IGC_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define IGC_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define IGC_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define IGC_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define IGC_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define IGC_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define IGC_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define IGC_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define IGC_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define IGC_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define IGC_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define IGC_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define IGC_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define IGC_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define IGC_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define IGC_VLVF_ARRAY_SIZE 32 +#define IGC_VLVF_VLANID_MASK 0x00000FFF +#define IGC_VLVF_POOLSEL_SHIFT 12 +#define IGC_VLVF_POOLSEL_MASK (0xFF << IGC_VLVF_POOLSEL_SHIFT) +#define IGC_VLVF_LVLAN 0x00100000 +#define IGC_VLVF_VLANID_ENABLE 0x80000000 + +#define IGC_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IGC_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IGC_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define IGC_IOVCTL 0x05BBC +#define IGC_IOVCTL_REUSE_VFQ 0x00000001 + +#define IGC_RPLOLR_STRVLAN 0x40000000 +#define IGC_RPLOLR_STRCRC 0x80000000 + +#define IGC_TCTL_EXT_COLD 0x000FFC00 +#define IGC_TCTL_EXT_COLD_SHIFT 10 + +#define IGC_DTXCTL_8023LL 0x0004 +#define IGC_DTXCTL_VLAN_ADDED 0x0008 +#define IGC_DTXCTL_OOS_ENABLE 0x0010 +#define IGC_DTXCTL_MDP_EN 0x0020 +#define IGC_DTXCTL_SPOOF_INT 0x0040 + +#define IGC_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +s32 igc_reset_init_script_82575(struct igc_hw *hw); +s32 igc_init_nvm_params_82575(struct igc_hw *hw); + +/* Rx packet buffer size defines */ +#define IGC_RXPBS_SIZE_MASK_82576 0x0000007F +void igc_vmdq_set_loopback_pf(struct igc_hw *hw, bool enable); +void igc_vmdq_set_anti_spoofing_pf(struct igc_hw *hw, bool enable, int pf); +void igc_vmdq_set_replication_pf(struct igc_hw *hw, bool enable); + +enum igc_promisc_type { + igc_promisc_disabled = 0, /* all promisc modes disabled */ + igc_promisc_unicast = 1, /* unicast promiscuous enabled */ + igc_promisc_multicast = 2, /* multicast promiscuous enabled */ + igc_promisc_enabled = 3, /* both uni and multicast promisc */ + igc_num_promisc_types +}; + +#endif /* _IGC_82575_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_api.c b/src/spdk/dpdk/drivers/net/igc/base/igc_api.c new file mode 100644 index 000000000..2f8c0753c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_api.c @@ -0,0 +1,1845 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" + +/** + * igc_get_i2c_data - Reads the I2C SDA data bit + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool igc_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("igc_get_i2c_data"); + + if (*i2cctl & IGC_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * igc_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 igc_set_i2c_data(struct igc_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = IGC_SUCCESS; + + DEBUGFUNC("igc_set_i2c_data"); + + if (data) + *i2cctl |= IGC_I2C_DATA_OUT; + else + *i2cctl &= ~IGC_I2C_DATA_OUT; + + *i2cctl &= ~IGC_I2C_DATA_OE_N; + *i2cctl |= IGC_I2C_CLK_OE_N; + IGC_WRITE_REG(hw, IGC_I2CPARAMS, *i2cctl); + IGC_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(IGC_I2C_T_RISE + IGC_I2C_T_FALL + IGC_I2C_T_SU_DATA); + + *i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + if (data != igc_get_i2c_data(i2cctl)) { + status = IGC_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * igc_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void igc_raise_i2c_clk(struct igc_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("igc_raise_i2c_clk"); + + *i2cctl |= IGC_I2C_CLK_OUT; + *i2cctl &= ~IGC_I2C_CLK_OE_N; + IGC_WRITE_REG(hw, IGC_I2CPARAMS, *i2cctl); + IGC_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(IGC_I2C_T_RISE); +} + +/** + * igc_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void igc_lower_i2c_clk(struct igc_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("igc_lower_i2c_clk"); + + *i2cctl &= ~IGC_I2C_CLK_OUT; + *i2cctl &= ~IGC_I2C_CLK_OE_N; + IGC_WRITE_REG(hw, IGC_I2CPARAMS, *i2cctl); + IGC_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(IGC_I2C_T_FALL); +} + +/** + * igc_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void igc_i2c_start(struct igc_hw *hw) +{ + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + + DEBUGFUNC("igc_i2c_start"); + + /* Start condition must begin with data and clock high */ + igc_set_i2c_data(hw, &i2cctl, 1); + igc_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(IGC_I2C_T_SU_STA); + + igc_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(IGC_I2C_T_HD_STA); + + igc_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IGC_I2C_T_LOW); +} + +/** + * igc_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void igc_i2c_stop(struct igc_hw *hw) +{ + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + + DEBUGFUNC("igc_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + igc_set_i2c_data(hw, &i2cctl, 0); + igc_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(IGC_I2C_T_SU_STO); + + igc_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(IGC_I2C_T_BUF); +} + +/** + * igc_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static void igc_clock_in_i2c_bit(struct igc_hw *hw, bool *data) +{ + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + + DEBUGFUNC("igc_clock_in_i2c_bit"); + + igc_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IGC_I2C_T_HIGH); + + i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + *data = igc_get_i2c_data(&i2cctl); + + igc_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IGC_I2C_T_LOW); +} + +/** + * igc_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static void igc_clock_in_i2c_byte(struct igc_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("igc_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + igc_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } +} + +/** + * igc_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 igc_clock_out_i2c_bit(struct igc_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + + DEBUGFUNC("igc_clock_out_i2c_bit"); + + status = igc_set_i2c_data(hw, &i2cctl, data); + if (status == IGC_SUCCESS) { + igc_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IGC_I2C_T_HIGH); + + igc_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(IGC_I2C_T_LOW); + } else { + status = IGC_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} + +/** + * igc_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 igc_clock_out_i2c_byte(struct igc_hw *hw, u8 data) +{ + s32 status = IGC_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("igc_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = igc_clock_out_i2c_bit(hw, bit); + + if (status != IGC_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + + i2cctl |= IGC_I2C_DATA_OE_N; + IGC_WRITE_REG(hw, IGC_I2CPARAMS, i2cctl); + IGC_WRITE_FLUSH(hw); + + return status; +} + +/** + * igc_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 igc_get_i2c_ack(struct igc_hw *hw) +{ + s32 status = IGC_SUCCESS; + u32 i = 0; + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("igc_get_i2c_ack"); + + igc_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IGC_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + if (i2cctl & IGC_I2C_CLK_IN) + break; + } + if (!(i2cctl & IGC_I2C_CLK_IN)) + return IGC_ERR_I2C; + + ack = igc_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = IGC_ERR_I2C; + } + + igc_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IGC_I2C_T_LOW); + + return status; +} + +/** + * igc_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 igc_set_i2c_bb(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("igc_set_i2c_bb"); + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + ctrl_ext |= IGC_CTRL_I2C_ENA; + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext); + IGC_WRITE_FLUSH(hw); + + i2cparams = IGC_READ_REG(hw, IGC_I2CPARAMS); + i2cparams |= IGC_I2CBB_EN; + i2cparams |= IGC_I2C_DATA_OE_N; + i2cparams |= IGC_I2C_CLK_OE_N; + IGC_WRITE_REG(hw, IGC_I2CPARAMS, i2cparams); + IGC_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * igc_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igc_read_i2c_byte_generic(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = IGC_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("igc_read_i2c_byte_generic"); + + swfw_mask = IGC_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != IGC_SUCCESS) { + status = IGC_ERR_SWFW_SYNC; + goto read_byte_out; + } + + igc_i2c_start(hw); + + /* Device Address and write indication */ + status = igc_clock_out_i2c_byte(hw, dev_addr); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_clock_out_i2c_byte(hw, byte_offset); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + igc_i2c_start(hw); + + /* Device Address and read indication */ + status = igc_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + igc_clock_in_i2c_byte(hw, data); + + status = igc_clock_out_i2c_bit(hw, nack); + if (status != IGC_SUCCESS) + goto fail; + + igc_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + igc_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * igc_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igc_write_i2c_byte_generic(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = IGC_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("igc_write_i2c_byte_generic"); + + swfw_mask = IGC_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IGC_SUCCESS) { + status = IGC_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + igc_i2c_start(hw); + + status = igc_clock_out_i2c_byte(hw, dev_addr); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_clock_out_i2c_byte(hw, byte_offset); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_clock_out_i2c_byte(hw, data); + if (status != IGC_SUCCESS) + goto fail; + + status = igc_get_i2c_ack(hw); + if (status != IGC_SUCCESS) + goto fail; + + igc_i2c_stop(hw); + break; + +fail: + igc_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * igc_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void igc_i2c_bus_clear(struct igc_hw *hw) +{ + u32 i2cctl = IGC_READ_REG(hw, IGC_I2CPARAMS); + u32 i; + + DEBUGFUNC("igc_i2c_bus_clear"); + + igc_i2c_start(hw); + + igc_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + igc_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(IGC_I2C_T_HIGH); + + igc_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(IGC_I2C_T_LOW); + } + + igc_i2c_start(hw); + + /* Put the i2c bus back to default state */ + igc_i2c_stop(hw); +} + +/** + * igc_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by igc_setup_init_funcs. + **/ +s32 igc_init_mac_params(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -IGC_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * igc_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by igc_setup_init_funcs. + **/ +s32 igc_init_nvm_params(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -IGC_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * igc_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by igc_setup_init_funcs. + **/ +s32 igc_init_phy_params(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -IGC_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * igc_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by igc_setup_init_funcs. + **/ +s32 igc_init_mbx_params(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -IGC_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * igc_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * igc_setup_init_funcs()). + **/ +s32 igc_set_mac_type(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_set_mac_type"); + + switch (hw->device_id) { + case IGC_DEV_ID_82542: + mac->type = igc_82542; + break; + case IGC_DEV_ID_82543GC_FIBER: + case IGC_DEV_ID_82543GC_COPPER: + mac->type = igc_82543; + break; + case IGC_DEV_ID_82544EI_COPPER: + case IGC_DEV_ID_82544EI_FIBER: + case IGC_DEV_ID_82544GC_COPPER: + case IGC_DEV_ID_82544GC_LOM: + mac->type = igc_82544; + break; + case IGC_DEV_ID_82540EM: + case IGC_DEV_ID_82540EM_LOM: + case IGC_DEV_ID_82540EP: + case IGC_DEV_ID_82540EP_LOM: + case IGC_DEV_ID_82540EP_LP: + mac->type = igc_82540; + break; + case IGC_DEV_ID_82545EM_COPPER: + case IGC_DEV_ID_82545EM_FIBER: + mac->type = igc_82545; + break; + case IGC_DEV_ID_82545GM_COPPER: + case IGC_DEV_ID_82545GM_FIBER: + case IGC_DEV_ID_82545GM_SERDES: + mac->type = igc_82545_rev_3; + break; + case IGC_DEV_ID_82546EB_COPPER: + case IGC_DEV_ID_82546EB_FIBER: + case IGC_DEV_ID_82546EB_QUAD_COPPER: + mac->type = igc_82546; + break; + case IGC_DEV_ID_82546GB_COPPER: + case IGC_DEV_ID_82546GB_FIBER: + case IGC_DEV_ID_82546GB_SERDES: + case IGC_DEV_ID_82546GB_PCIE: + case IGC_DEV_ID_82546GB_QUAD_COPPER: + case IGC_DEV_ID_82546GB_QUAD_COPPER_KSP3: + mac->type = igc_82546_rev_3; + break; + case IGC_DEV_ID_82541EI: + case IGC_DEV_ID_82541EI_MOBILE: + case IGC_DEV_ID_82541ER_LOM: + mac->type = igc_82541; + break; + case IGC_DEV_ID_82541ER: + case IGC_DEV_ID_82541GI: + case IGC_DEV_ID_82541GI_LF: + case IGC_DEV_ID_82541GI_MOBILE: + mac->type = igc_82541_rev_2; + break; + case IGC_DEV_ID_82547EI: + case IGC_DEV_ID_82547EI_MOBILE: + mac->type = igc_82547; + break; + case IGC_DEV_ID_82547GI: + mac->type = igc_82547_rev_2; + break; + case IGC_DEV_ID_82571EB_COPPER: + case IGC_DEV_ID_82571EB_FIBER: + case IGC_DEV_ID_82571EB_SERDES: + case IGC_DEV_ID_82571EB_SERDES_DUAL: + case IGC_DEV_ID_82571EB_SERDES_QUAD: + case IGC_DEV_ID_82571EB_QUAD_COPPER: + case IGC_DEV_ID_82571PT_QUAD_COPPER: + case IGC_DEV_ID_82571EB_QUAD_FIBER: + case IGC_DEV_ID_82571EB_QUAD_COPPER_LP: + mac->type = igc_82571; + break; + case IGC_DEV_ID_82572EI: + case IGC_DEV_ID_82572EI_COPPER: + case IGC_DEV_ID_82572EI_FIBER: + case IGC_DEV_ID_82572EI_SERDES: + mac->type = igc_82572; + break; + case IGC_DEV_ID_82573E: + case IGC_DEV_ID_82573E_IAMT: + case IGC_DEV_ID_82573L: + mac->type = igc_82573; + break; + case IGC_DEV_ID_82574L: + case IGC_DEV_ID_82574LA: + mac->type = igc_82574; + break; + case IGC_DEV_ID_82583V: + mac->type = igc_82583; + break; + case IGC_DEV_ID_80003ES2LAN_COPPER_DPT: + case IGC_DEV_ID_80003ES2LAN_SERDES_DPT: + case IGC_DEV_ID_80003ES2LAN_COPPER_SPT: + case IGC_DEV_ID_80003ES2LAN_SERDES_SPT: + mac->type = igc_80003es2lan; + break; + case IGC_DEV_ID_ICH8_IFE: + case IGC_DEV_ID_ICH8_IFE_GT: + case IGC_DEV_ID_ICH8_IFE_G: + case IGC_DEV_ID_ICH8_IGP_M: + case IGC_DEV_ID_ICH8_IGP_M_AMT: + case IGC_DEV_ID_ICH8_IGP_AMT: + case IGC_DEV_ID_ICH8_IGP_C: + case IGC_DEV_ID_ICH8_82567V_3: + mac->type = igc_ich8lan; + break; + case IGC_DEV_ID_ICH9_IFE: + case IGC_DEV_ID_ICH9_IFE_GT: + case IGC_DEV_ID_ICH9_IFE_G: + case IGC_DEV_ID_ICH9_IGP_M: + case IGC_DEV_ID_ICH9_IGP_M_AMT: + case IGC_DEV_ID_ICH9_IGP_M_V: + case IGC_DEV_ID_ICH9_IGP_AMT: + case IGC_DEV_ID_ICH9_BM: + case IGC_DEV_ID_ICH9_IGP_C: + case IGC_DEV_ID_ICH10_R_BM_LM: + case IGC_DEV_ID_ICH10_R_BM_LF: + case IGC_DEV_ID_ICH10_R_BM_V: + mac->type = igc_ich9lan; + break; + case IGC_DEV_ID_ICH10_D_BM_LM: + case IGC_DEV_ID_ICH10_D_BM_LF: + case IGC_DEV_ID_ICH10_D_BM_V: + mac->type = igc_ich10lan; + break; + case IGC_DEV_ID_PCH_D_HV_DM: + case IGC_DEV_ID_PCH_D_HV_DC: + case IGC_DEV_ID_PCH_M_HV_LM: + case IGC_DEV_ID_PCH_M_HV_LC: + mac->type = igc_pchlan; + break; + case IGC_DEV_ID_PCH2_LV_LM: + case IGC_DEV_ID_PCH2_LV_V: + mac->type = igc_pch2lan; + break; + case IGC_DEV_ID_PCH_LPT_I217_LM: + case IGC_DEV_ID_PCH_LPT_I217_V: + case IGC_DEV_ID_PCH_LPTLP_I218_LM: + case IGC_DEV_ID_PCH_LPTLP_I218_V: + case IGC_DEV_ID_PCH_I218_LM2: + case IGC_DEV_ID_PCH_I218_V2: + case IGC_DEV_ID_PCH_I218_LM3: + case IGC_DEV_ID_PCH_I218_V3: + mac->type = igc_pch_lpt; + break; + case IGC_DEV_ID_PCH_SPT_I219_LM: + case IGC_DEV_ID_PCH_SPT_I219_V: + case IGC_DEV_ID_PCH_SPT_I219_LM2: + case IGC_DEV_ID_PCH_SPT_I219_V2: + case IGC_DEV_ID_PCH_LBG_I219_LM3: + case IGC_DEV_ID_PCH_SPT_I219_LM4: + case IGC_DEV_ID_PCH_SPT_I219_V4: + case IGC_DEV_ID_PCH_SPT_I219_LM5: + case IGC_DEV_ID_PCH_SPT_I219_V5: + mac->type = igc_pch_spt; + break; + case IGC_DEV_ID_PCH_CNP_I219_LM6: + case IGC_DEV_ID_PCH_CNP_I219_V6: + case IGC_DEV_ID_PCH_CNP_I219_LM7: + case IGC_DEV_ID_PCH_CNP_I219_V7: + case IGC_DEV_ID_PCH_ICP_I219_LM8: + case IGC_DEV_ID_PCH_ICP_I219_V8: + case IGC_DEV_ID_PCH_ICP_I219_LM9: + case IGC_DEV_ID_PCH_ICP_I219_V9: + mac->type = igc_pch_cnp; + break; + case IGC_DEV_ID_82575EB_COPPER: + case IGC_DEV_ID_82575EB_FIBER_SERDES: + case IGC_DEV_ID_82575GB_QUAD_COPPER: + mac->type = igc_82575; + break; + case IGC_DEV_ID_82576: + case IGC_DEV_ID_82576_FIBER: + case IGC_DEV_ID_82576_SERDES: + case IGC_DEV_ID_82576_QUAD_COPPER: + case IGC_DEV_ID_82576_QUAD_COPPER_ET2: + case IGC_DEV_ID_82576_NS: + case IGC_DEV_ID_82576_NS_SERDES: + case IGC_DEV_ID_82576_SERDES_QUAD: + mac->type = igc_82576; + break; + case IGC_DEV_ID_82576_VF: + case IGC_DEV_ID_82576_VF_HV: + mac->type = igc_vfadapt; + break; + case IGC_DEV_ID_82580_COPPER: + case IGC_DEV_ID_82580_FIBER: + case IGC_DEV_ID_82580_SERDES: + case IGC_DEV_ID_82580_SGMII: + case IGC_DEV_ID_82580_COPPER_DUAL: + case IGC_DEV_ID_82580_QUAD_FIBER: + case IGC_DEV_ID_DH89XXCC_SGMII: + case IGC_DEV_ID_DH89XXCC_SERDES: + case IGC_DEV_ID_DH89XXCC_BACKPLANE: + case IGC_DEV_ID_DH89XXCC_SFP: + mac->type = igc_82580; + break; + case IGC_DEV_ID_I350_COPPER: + case IGC_DEV_ID_I350_FIBER: + case IGC_DEV_ID_I350_SERDES: + case IGC_DEV_ID_I350_SGMII: + case IGC_DEV_ID_I350_DA4: + mac->type = igc_i350; + break; + case IGC_DEV_ID_I210_COPPER_FLASHLESS: + case IGC_DEV_ID_I210_SERDES_FLASHLESS: + case IGC_DEV_ID_I210_SGMII_FLASHLESS: + case IGC_DEV_ID_I210_COPPER: + case IGC_DEV_ID_I210_COPPER_OEM1: + case IGC_DEV_ID_I210_COPPER_IT: + case IGC_DEV_ID_I210_FIBER: + case IGC_DEV_ID_I210_SERDES: + case IGC_DEV_ID_I210_SGMII: + mac->type = igc_i210; + break; + case IGC_DEV_ID_I211_COPPER: + mac->type = igc_i211; + break; + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + case IGC_DEV_ID_I350_VF: + case IGC_DEV_ID_I350_VF_HV: + mac->type = igc_vfadapt_i350; + break; + case IGC_DEV_ID_I354_BACKPLANE_1GBPS: + case IGC_DEV_ID_I354_SGMII: + case IGC_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = igc_i354; + break; + default: + /* Should never have loaded on this device */ + ret_val = -IGC_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * igc_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 igc_setup_init_funcs(struct igc_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = igc_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + igc_init_mac_ops_generic(hw); + igc_init_phy_ops_generic(hw); + igc_init_nvm_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case igc_i225: + igc_init_function_pointers_i225(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -IGC_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = igc_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = igc_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = igc_init_phy_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 igc_get_bus_info(struct igc_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return IGC_SUCCESS; +} + +/** + * igc_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void igc_clear_vfta(struct igc_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * igc_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void igc_write_vfta(struct igc_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * igc_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + return igc_force_mac_fc_generic(hw); +} + +/** + * igc_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 igc_check_for_link(struct igc_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool igc_check_mng_mode(struct igc_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * igc_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 igc_mng_write_dhcp_info(struct igc_hw *hw, u8 *buffer, u16 length) +{ + return igc_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * igc_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 igc_reset_hw(struct igc_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 igc_init_hw(struct igc_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 igc_setup_link(struct igc_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 igc_get_speed_and_duplex(struct igc_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 igc_setup_led(struct igc_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return IGC_SUCCESS; +} + +/** + * igc_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * igc_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 igc_cleanup_led(struct igc_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return IGC_SUCCESS; +} + +/** + * igc_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 igc_blink_led(struct igc_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return IGC_SUCCESS; +} + +/** + * igc_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 igc_id_led_init(struct igc_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return IGC_SUCCESS; +} + +/** + * igc_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 igc_led_on(struct igc_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return IGC_SUCCESS; +} + +/** + * igc_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 igc_led_off(struct igc_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return IGC_SUCCESS; +} + +/** + * igc_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igc_reset_adaptive(struct igc_hw *hw) +{ + igc_reset_adaptive_generic(hw); +} + +/** + * igc_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igc_update_adaptive(struct igc_hw *hw) +{ + igc_update_adaptive_generic(hw); +} + +/** + * igc_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + return igc_disable_pcie_master_generic(hw); +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void igc_config_collision_dist(struct igc_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * igc_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +int igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + return hw->mac.ops.rar_set(hw, addr, index); + + return IGC_SUCCESS; +} + +/** + * igc_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 igc_validate_mdi_setting(struct igc_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return IGC_SUCCESS; +} + +/** + * igc_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + return igc_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * igc_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool igc_enable_tx_pkt_filtering(struct igc_hw *hw) +{ + return igc_enable_tx_pkt_filtering_generic(hw); +} + +/** + * igc_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 igc_mng_host_if_write(struct igc_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + return igc_mng_host_if_write_generic(hw, buffer, length, offset, sum); +} + +/** + * igc_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 igc_mng_write_cmd_header(struct igc_hw *hw, + struct igc_host_mng_command_header *hdr) +{ + return igc_mng_write_cmd_header_generic(hw, hdr); +} + +/** + * igc_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns IGC_success upon success, else IGC_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 igc_mng_enable_host_if(struct igc_hw *hw) +{ + return igc_mng_enable_host_if_generic(hw); +} + +/** + * igc_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return IGC_SUCCESS; +} + +/** + * igc_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return IGC_SUCCESS; +} + +/** + * igc_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 igc_write_phy_reg(struct igc_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return IGC_SUCCESS; +} + +/** + * igc_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void igc_release_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * igc_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 igc_acquire_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return IGC_SUCCESS; +} + +/** + * igc_cfg_on_link_up - Configure PHY upon link up + * @hw: pointer to the HW structure + **/ +s32 igc_cfg_on_link_up(struct igc_hw *hw) +{ + if (hw->phy.ops.cfg_on_link_up) + return hw->phy.ops.cfg_on_link_up(hw); + + return IGC_SUCCESS; +} + +/** + * igc_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 igc_read_kmrn_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + return igc_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * igc_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 igc_write_kmrn_reg(struct igc_hw *hw, u32 offset, u16 data) +{ + return igc_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * igc_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 igc_get_cable_length(struct igc_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return IGC_SUCCESS; +} + +/** + * igc_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return IGC_SUCCESS; +} + +/** + * igc_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return IGC_SUCCESS; +} + +/** + * igc_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 igc_phy_commit(struct igc_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return IGC_SUCCESS; +} + +/** + * igc_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 igc_set_d0_lplu_state(struct igc_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return IGC_SUCCESS; +} + +/** + * igc_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 igc_set_d3_lplu_state(struct igc_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return IGC_SUCCESS; +} + +/** + * igc_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return igc_read_mac_addr_generic(hw); +} + +/** + * igc_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 igc_read_pba_string(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return igc_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * igc_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 igc_read_pba_length(struct igc_hw *hw, u32 *pba_num_size) +{ + return igc_read_pba_length_generic(hw, pba_num_size); +} + +/** + * igc_read_pba_num - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 igc_read_pba_num(struct igc_hw *hw, u32 *pba_num) +{ + return igc_read_pba_num_generic(hw, pba_num); +} + +/** + * igc_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void igc_reload_nvm(struct igc_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * igc_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 igc_read_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -IGC_ERR_CONFIG; +} + +/** + * igc_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 igc_write_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return IGC_SUCCESS; +} + +/** + * igc_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 igc_write_8bit_ctrl_reg(struct igc_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return igc_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * igc_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void igc_power_up_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + igc_setup_link(hw); +} + +/** + * igc_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void igc_power_down_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * igc_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void igc_power_up_fiber_serdes_link(struct igc_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * igc_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void igc_shutdown_fiber_serdes_link(struct igc_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_api.h b/src/spdk/dpdk/drivers/net/igc/base/igc_api.h new file mode 100644 index 000000000..00681ee4f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_api.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_API_H_ +#define _IGC_API_H_ + +#include "igc_hw.h" + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IGC_I2C_T_HD_STA 4 +#define IGC_I2C_T_LOW 5 +#define IGC_I2C_T_HIGH 4 +#define IGC_I2C_T_SU_STA 5 +#define IGC_I2C_T_HD_DATA 5 +#define IGC_I2C_T_SU_DATA 1 +#define IGC_I2C_T_RISE 1 +#define IGC_I2C_T_FALL 1 +#define IGC_I2C_T_SU_STO 4 +#define IGC_I2C_T_BUF 5 + +s32 igc_set_i2c_bb(struct igc_hw *hw); +s32 igc_read_i2c_byte_generic(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 igc_write_i2c_byte_generic(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void igc_i2c_bus_clear(struct igc_hw *hw); + +void igc_init_function_pointers_82542(struct igc_hw *hw); +void igc_init_function_pointers_82543(struct igc_hw *hw); +void igc_init_function_pointers_82540(struct igc_hw *hw); +void igc_init_function_pointers_82571(struct igc_hw *hw); +void igc_init_function_pointers_82541(struct igc_hw *hw); +void igc_init_function_pointers_80003es2lan(struct igc_hw *hw); +void igc_init_function_pointers_ich8lan(struct igc_hw *hw); +void igc_init_function_pointers_82575(struct igc_hw *hw); +void igc_init_function_pointers_vf(struct igc_hw *hw); +void igc_power_up_fiber_serdes_link(struct igc_hw *hw); +void igc_shutdown_fiber_serdes_link(struct igc_hw *hw); +void igc_init_function_pointers_i210(struct igc_hw *hw); +void igc_init_function_pointers_i225(struct igc_hw *hw); + +s32 igc_set_obff_timer(struct igc_hw *hw, u32 itr); +s32 igc_set_mac_type(struct igc_hw *hw); +s32 igc_setup_init_funcs(struct igc_hw *hw, bool init_device); +s32 igc_init_mac_params(struct igc_hw *hw); +s32 igc_init_nvm_params(struct igc_hw *hw); +s32 igc_init_phy_params(struct igc_hw *hw); +s32 igc_init_mbx_params(struct igc_hw *hw); +s32 igc_get_bus_info(struct igc_hw *hw); +void igc_clear_vfta(struct igc_hw *hw); +void igc_write_vfta(struct igc_hw *hw, u32 offset, u32 value); +s32 igc_force_mac_fc(struct igc_hw *hw); +s32 igc_check_for_link(struct igc_hw *hw); +s32 igc_reset_hw(struct igc_hw *hw); +s32 igc_init_hw(struct igc_hw *hw); +s32 igc_setup_link(struct igc_hw *hw); +s32 igc_get_speed_and_duplex(struct igc_hw *hw, u16 *speed, u16 *duplex); +s32 igc_disable_pcie_master(struct igc_hw *hw); +void igc_config_collision_dist(struct igc_hw *hw); +int igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr); +void igc_update_mc_addr_list(struct igc_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 igc_setup_led(struct igc_hw *hw); +s32 igc_cleanup_led(struct igc_hw *hw); +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_blink_led(struct igc_hw *hw); +s32 igc_led_on(struct igc_hw *hw); +s32 igc_led_off(struct igc_hw *hw); +s32 igc_id_led_init(struct igc_hw *hw); +void igc_reset_adaptive(struct igc_hw *hw); +void igc_update_adaptive(struct igc_hw *hw); +s32 igc_get_cable_length(struct igc_hw *hw); +s32 igc_validate_mdi_setting(struct igc_hw *hw); +s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_8bit_ctrl_reg(struct igc_hw *hw, u32 reg, u32 offset, + u8 data); +s32 igc_get_phy_info(struct igc_hw *hw); +void igc_release_phy(struct igc_hw *hw); +s32 igc_acquire_phy(struct igc_hw *hw); +s32 igc_cfg_on_link_up(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_phy_commit(struct igc_hw *hw); +void igc_power_up_phy(struct igc_hw *hw); +void igc_power_down_phy(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_pba_num(struct igc_hw *hw, u32 *part_num); +s32 igc_read_pba_string(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 igc_read_pba_length(struct igc_hw *hw, u32 *pba_num_size); +void igc_reload_nvm(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_read_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_read_kmrn_reg(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_kmrn_reg(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_set_d3_lplu_state(struct igc_hw *hw, bool active); +s32 igc_set_d0_lplu_state(struct igc_hw *hw, bool active); +bool igc_check_mng_mode(struct igc_hw *hw); +bool igc_enable_tx_pkt_filtering(struct igc_hw *hw); +s32 igc_mng_enable_host_if(struct igc_hw *hw); +s32 igc_mng_host_if_write(struct igc_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 igc_mng_write_cmd_header(struct igc_hw *hw, + struct igc_host_mng_command_header *hdr); +s32 igc_mng_write_dhcp_info(struct igc_hw *hw, u8 *buffer, u16 length); +u32 igc_translate_register_82542(u32 reg); + +#endif /* _IGC_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_base.c b/src/spdk/dpdk/drivers/net/igc/base/igc_base.c new file mode 100644 index 000000000..1e8b90890 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_base.c @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc_manage.h" + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + DEBUGFUNC("igc_acquire_phy_base"); + + if (hw->bus.func == IGC_FUNC_1) + mask = IGC_SWFW_PHY1_SM; + else if (hw->bus.func == IGC_FUNC_2) + mask = IGC_SWFW_PHY2_SM; + else if (hw->bus.func == IGC_FUNC_3) + mask = IGC_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + DEBUGFUNC("igc_release_phy_base"); + + if (hw->bus.func == IGC_FUNC_1) + mask = IGC_SWFW_PHY1_SM; + else if (hw->bus.func == IGC_FUNC_2) + mask = IGC_SWFW_PHY2_SM; + else if (hw->bus.func == IGC_FUNC_3) + mask = IGC_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("igc_init_hw_base"); + + /* Setup the receive address */ + igc_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base_generic(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!phy->ops.check_reset_block(hw)) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the FIFO and possibly in the DMA FIFO. This + * function clears the FIFOs and flushes any packets that came in as Rx was + * being enabled. + **/ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("igc_rx_fifo_flush_base"); + + /* disable IPv6 options as per hardware errata */ + rfctl = IGC_READ_REG(hw, IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); + + if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i)); + IGC_WRITE_REG(hw, IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = IGC_READ_REG(hw, IGC_RLPML); + IGC_WRITE_REG(hw, IGC_RLPML, 0); + + rctl = IGC_READ_REG(hw, IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl); + IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN); + IGC_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]); + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + IGC_WRITE_FLUSH(hw); + + IGC_WRITE_REG(hw, IGC_RLPML, rlpml); + IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + IGC_READ_REG(hw, IGC_ROC); + IGC_READ_REG(hw, IGC_RNBC); + IGC_READ_REG(hw, IGC_MPC); +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_base.h b/src/spdk/dpdk/drivers/net/igc/base/igc_base.h new file mode 100644 index 000000000..5f342af7e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_base.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +s32 igc_init_hw_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); +void igc_rx_fifo_flush_base(struct igc_hw *hw); +s32 igc_acquire_phy_base(struct igc_hw *hw); +void igc_release_phy_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + union { + __le32 launch_time; + __le32 seqnum_seed; + } u; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define IGC_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define IGC_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IGC_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IGC_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IGC_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define IGC_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IGC_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Advanced Transmit Context Descriptor Config */ +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IGC_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IGC_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IGC_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define IGC_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define IGC_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define IGC_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define IGC_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +#define IGC_RAR_ENTRIES_BASE 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_defines.h b/src/spdk/dpdk/drivers/net/igc/base/igc_defines.h new file mode 100644 index 000000000..30a41300f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_defines.h @@ -0,0 +1,1649 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_APME 0x00000001 /* APM Enable */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IGC_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IGC_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define IGC_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IGC_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IGC_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ + +/* Wake Up Status */ +#define IGC_WUS_LNKC IGC_WUFC_LNKC +#define IGC_WUS_MAG IGC_WUFC_MAG +#define IGC_WUS_EX IGC_WUFC_EX +#define IGC_WUS_MC IGC_WUFC_MC +#define IGC_WUS_BC IGC_WUFC_BC + +/* Extended Device Control */ +#define IGC_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define IGC_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define IGC_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define IGC_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define IGC_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define IGC_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define IGC_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define IGC_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +/* Physical Func Reset Done Indication */ +#define IGC_CTRL_EXT_PFRSTD 0x00004000 +#define IGC_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define IGC_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define IGC_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IGC_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define IGC_CTRL_EXT_LINK_MODE_OFFSET 22 +#define IGC_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define IGC_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define IGC_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define IGC_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define IGC_CTRL_EXT_EIAME 0x01000000 +#define IGC_CTRL_EXT_IRCA 0x00000001 +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define IGC_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define IGC_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define IGC_CTRL_EXT_LSECCK 0x00001000 +#define IGC_CTRL_EXT_PHYPDEN 0x00100000 +#define IGC_I2CCMD_REG_ADDR_SHIFT 16 +#define IGC_I2CCMD_PHY_ADDR_SHIFT 24 +#define IGC_I2CCMD_OPCODE_READ 0x08000000 +#define IGC_I2CCMD_OPCODE_WRITE 0x00000000 +#define IGC_I2CCMD_READY 0x20000000 +#define IGC_I2CCMD_ERROR 0x80000000 +#define IGC_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define IGC_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define IGC_MAX_SGMII_PHY_REG_ADDR 255 +#define IGC_I2CCMD_PHY_TIMEOUT 200 +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IGC_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IGC_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define IGC_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IGC_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IGC_RXD_ERR_CE 0x01 /* CRC Error */ +#define IGC_RXD_ERR_SE 0x02 /* Symbol Error */ +#define IGC_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define IGC_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define IGC_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define IGC_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define IGC_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define IGC_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define IGC_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define IGC_RXDEXT_STATERR_LB 0x00040000 +#define IGC_RXDEXT_STATERR_CE 0x01000000 +#define IGC_RXDEXT_STATERR_SE 0x02000000 +#define IGC_RXDEXT_STATERR_SEQ 0x04000000 +#define IGC_RXDEXT_STATERR_CXE 0x10000000 +#define IGC_RXDEXT_STATERR_TCPE 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define IGC_RXD_ERR_FRAME_ERR_MASK ( \ + IGC_RXD_ERR_CE | \ + IGC_RXD_ERR_SE | \ + IGC_RXD_ERR_SEQ | \ + IGC_RXD_ERR_CXE | \ + IGC_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ + IGC_RXDEXT_STATERR_CE | \ + IGC_RXDEXT_STATERR_SE | \ + IGC_RXDEXT_STATERR_SEQ | \ + IGC_RXDEXT_STATERR_CXE | \ + IGC_RXDEXT_STATERR_RXE) + +#define IGC_MRQC_ENABLE_RSS_2Q 0x00000001 +#define IGC_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define IGC_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define IGC_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define IGC_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define IGC_MANC_EN_MNG2HOST 0x00200000 + +#define IGC_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define IGC_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define IGC_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define IGC_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define IGC_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_RDMTS_HEX 0x00010000 +#define IGC_RCTL_RDMTS1_HEX IGC_RCTL_RDMTS_HEX +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if IGC_RCTL_BSEX is 0 */ +#define IGC_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define IGC_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define IGC_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if IGC_RCTL_BSEX is 1 */ +#define IGC_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define IGC_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define IGC_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define IGC_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> IGC_PSRCTL_BSIZE0_SHIFT) & + * IGC_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> IGC_PSRCTL_BSIZE1_SHIFT) & + * IGC_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << IGC_PSRCTL_BSIZE2_SHIFT) & + * IGC_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << IGC_PSRCTL_BSIZE3_SHIFT) |; + * IGC_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define IGC_PSRCTL_BSIZE0_MASK 0x0000007F +#define IGC_PSRCTL_BSIZE1_MASK 0x00003F00 +#define IGC_PSRCTL_BSIZE2_MASK 0x003F0000 +#define IGC_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define IGC_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define IGC_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define IGC_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define IGC_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x01 +#define IGC_SWFW_PHY0_SM 0x02 +#define IGC_SWFW_PHY1_SM 0x04 +#define IGC_SWFW_CSR_SM 0x08 +#define IGC_SWFW_PHY2_SM 0x20 +#define IGC_SWFW_PHY3_SM 0x40 +#define IGC_SWFW_SW_MNG_SM 0x400 + +/* Device Control */ +#define IGC_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define IGC_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define IGC_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define IGC_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define IGC_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define IGC_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define IGC_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define IGC_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define IGC_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define IGC_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define IGC_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define IGC_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define IGC_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define IGC_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define IGC_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define IGC_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define IGC_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define IGC_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +#define IGC_CTRL_MDIO_DIR IGC_CTRL_SWDPIO2 +#define IGC_CTRL_MDIO IGC_CTRL_SWDPIN2 +#define IGC_CTRL_MDC_DIR IGC_CTRL_SWDPIO3 +#define IGC_CTRL_MDC IGC_CTRL_SWDPIN3 + +#define IGC_CONNSW_AUTOSENSE_EN 0x1 +#define IGC_CONNSW_ENRGSRC 0x4 +#define IGC_CONNSW_PHYSD 0x400 +#define IGC_CONNSW_PHY_PDN 0x800 +#define IGC_CONNSW_SERDESD 0x200 +#define IGC_CONNSW_AUTOSENSE_CONF 0x2 +#define IGC_PCS_CFG_PCS_EN 8 +#define IGC_PCS_LCTL_FLV_LINK_UP 1 +#define IGC_PCS_LCTL_FSV_10 0 +#define IGC_PCS_LCTL_FSV_100 2 +#define IGC_PCS_LCTL_FSV_1000 4 +#define IGC_PCS_LCTL_FDV_FULL 8 +#define IGC_PCS_LCTL_FSD 0x10 +#define IGC_PCS_LCTL_FORCE_LINK 0x20 +#define IGC_PCS_LCTL_FORCE_FCTRL 0x80 +#define IGC_PCS_LCTL_AN_ENABLE 0x10000 +#define IGC_PCS_LCTL_AN_RESTART 0x20000 +#define IGC_PCS_LCTL_AN_TIMEOUT 0x40000 +#define IGC_ENABLE_SERDES_LOOPBACK 0x0410 + +#define IGC_PCS_LSTS_LINK_OK 1 +#define IGC_PCS_LSTS_SPEED_100 2 +#define IGC_PCS_LSTS_SPEED_1000 4 +#define IGC_PCS_LSTS_DUPLEX_FULL 8 +#define IGC_PCS_LSTS_SYNK_OK 0x10 +#define IGC_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_MASK 0x000000C0 +#define IGC_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Speed 2.5Gb/s indication for I225 */ +#define IGC_STATUS_SPEED_2500 0x00400000 +#define IGC_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define IGC_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define IGC_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define IGC_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define IGC_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define IGC_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +#define IGC_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define IGC_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define IGC_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */ +#define IGC_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */ +#define IGC_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/ +#define IGC_STATUS_PCIM_STATE 0x40000000 /* PCIm function state */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +/* 1000/H is not supported, nor spec-compliant. */ +#define IGC_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) +#define IGC_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define IGC_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define IGC_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define IGC_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT IGC_ALL_SPEED_DUPLEX +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* LED Control */ +#define IGC_PHY_LED0_MODE_MASK 0x00000007 +#define IGC_PHY_LED0_IVRT 0x00000008 +#define IGC_PHY_LED0_MASK 0x0000001F + +#define IGC_LEDCTL_LED0_MODE_MASK 0x0000000F +#define IGC_LEDCTL_LED0_MODE_SHIFT 0 +#define IGC_LEDCTL_LED0_IVRT 0x00000040 +#define IGC_LEDCTL_LED0_BLINK 0x00000080 + +#define IGC_LEDCTL_MODE_LINK_UP 0x2 +#define IGC_LEDCTL_MODE_LED_ON 0xE +#define IGC_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define IGC_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define IGC_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define IGC_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define IGC_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF +/* Header split receive */ +#define IGC_RFCTL_NFSW_DIS 0x00000040 +#define IGC_RFCTL_NFSR_DIS 0x00000080 +#define IGC_RFCTL_ACK_DIS 0x00001000 +#define IGC_RFCTL_EXTEN 0x00008000 +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define IGC_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define IGC_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define IGC_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define IGC_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */ +#define MAX_RX_JUMBO_FRAME_SIZE 0x2600 +#define IGC_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define IGC_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define IGC_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define IGC_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define IGC_EXTCNF_CTRL_SWFLAG 0x00000020 +#define IGC_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define IGC_PHY_CTRL_D0A_LPLU 0x00000002 +#define IGC_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define IGC_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define IGC_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define IGC_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define IGC_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define IGC_PBA_8K 0x0008 /* 8KB */ +#define IGC_PBA_10K 0x000A /* 10KB */ +#define IGC_PBA_12K 0x000C /* 12KB */ +#define IGC_PBA_14K 0x000E /* 14KB */ +#define IGC_PBA_16K 0x0010 /* 16KB */ +#define IGC_PBA_18K 0x0012 +#define IGC_PBA_20K 0x0014 +#define IGC_PBA_22K 0x0016 +#define IGC_PBA_24K 0x0018 +#define IGC_PBA_26K 0x001A +#define IGC_PBA_30K 0x001E +#define IGC_PBA_32K 0x0020 +#define IGC_PBA_34K 0x0022 +#define IGC_PBA_35K 0x0023 +#define IGC_PBA_38K 0x0026 +#define IGC_PBA_40K 0x0028 +#define IGC_PBA_48K 0x0030 /* 48KB */ +#define IGC_PBA_64K 0x0040 /* 64KB */ + +#define IGC_PBA_RXA_MASK 0xFFFF + +#define IGC_PBS_16K IGC_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define IGC_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define IGC_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define IGC_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define IGC_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IGC_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define IGC_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define IGC_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define IGC_ICR_LSC 0x00000004 /* Link Status Change */ +#define IGC_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define IGC_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO 0x00000040 /* Rx overrun */ +#define IGC_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define IGC_ICR_VMMB 0x00000100 /* VM MB event */ +#define IGC_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define IGC_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define IGC_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define IGC_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define IGC_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define IGC_ICR_TXD_LOW 0x00008000 +#define IGC_ICR_MNG 0x00040000 /* Manageability event */ +#define IGC_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +#define IGC_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED 0x80000000 +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define IGC_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define IGC_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define IGC_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define IGC_ICR_OTHER 0x01000000 /* Other Interrupts */ +#define IGC_ICR_FER 0x00400000 /* Fatal Error */ + +#define IGC_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define IGC_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* PBA ECC Register */ +#define IGC_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define IGC_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define IGC_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */ +#define IGC_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define IGC_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */ + +/* Extended Interrupt Cause Read */ +#define IGC_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define IGC_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define IGC_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define IGC_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define IGC_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define IGC_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define IGC_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define IGC_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define IGC_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IGC_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define IGC_TCPTIMER_KS 0x00000100 /* KickStart */ +#define IGC_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define IGC_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define IGC_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_TXQE IGC_ICR_TXQE /* Transmit Queue empty */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_VMMB IGC_ICR_VMMB /* Mail box activity */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ +#define IGC_IMS_RXO IGC_ICR_RXO /* Rx overrun */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_TXD_LOW IGC_ICR_TXD_LOW +#define IGC_IMS_ECCER IGC_ICR_ECCER /* Uncorrectable ECC Error */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_RXQ0 IGC_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define IGC_IMS_RXQ1 IGC_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define IGC_IMS_TXQ0 IGC_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define IGC_IMS_TXQ1 IGC_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define IGC_IMS_OTHER IGC_ICR_OTHER /* Other Interrupts */ +#define IGC_IMS_FER IGC_ICR_FER /* Fatal Error */ + +#define IGC_IMS_THS IGC_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define IGC_IMS_MDDET IGC_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define IGC_EIMS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define IGC_EIMS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define IGC_EIMS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define IGC_EIMS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define IGC_EIMS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define IGC_EIMS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define IGC_EIMS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define IGC_EIMS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define IGC_EIMS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */ +#define IGC_EIMS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +#define IGC_EICS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define IGC_EICS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define IGC_EICS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define IGC_EICS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define IGC_EICS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define IGC_EICS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define IGC_EICS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define IGC_EICS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define IGC_EICS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */ +#define IGC_EICS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */ + +#define IGC_EITR_ITR_INT_MASK 0x0000FFFF +#define IGC_EITR_INTERVAL 0x00007FFC +/* IGC_EITR_CNT_IGNR is only for 82576 and newer */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + +/* Transmit Descriptor Control */ +#define IGC_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define IGC_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define IGC_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define IGC_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define IGC_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define IGC_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define IGC_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define IGC_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAR_ENTRIES 15 +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 +#define IGC_RAH_QUEUE_MASK_82575 0x000C0000 +#define IGC_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_PHY_TYPE 6 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_HOST_INTERFACE_COMMAND 11 +#define IGC_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 +#define IGC_NOT_IMPLEMENTED 14 +#define IGC_ERR_MBX 15 +#define IGC_ERR_INVALID_ARGUMENT 16 +#define IGC_ERR_NO_SPACE 17 +#define IGC_ERR_NVM_PBA_SECTION 18 +#define IGC_ERR_I2C 19 +#define IGC_ERR_INVM_VALUE_NOT_FOUND 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define IGC_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define IGC_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define IGC_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define IGC_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define IGC_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define IGC_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define IGC_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define IGC_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define IGC_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define IGC_RXCW_IV 0x08000000 /* Receive config invalid */ +#define IGC_RXCW_C 0x20000000 /* Receive config */ +#define IGC_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +/* HH Time Sync */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + +#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define IGC_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define IGC_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define IGC_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define IGC_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define IGC_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define IGC_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define IGC_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define IGC_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define IGC_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define IGC_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define IGC_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define IGC_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define IGC_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define IGC_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define IGC_TIMINCA_16NS_SHIFT 24 +#define IGC_TIMINCA_INCPERIOD_SHIFT 24 +#define IGC_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* Time Sync Interrupt Cause/Mask Register Bits */ +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* Extended Device Control */ +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_1588 (1 << 30) +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define IGC_TSICR_TXTS 0x00000002 +#define IGC_TSIM_TXTS 0x00000002 +/* TUPLE Filtering Configuration */ +#define IGC_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define IGC_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define IGC_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with IGC_TTQF_PROTOCOL SHIFT */ +#define IGC_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */ +#define IGC_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */ +#define IGC_TTQF_PROTOCOL_SCTP 0x2 +#define IGC_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define IGC_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define IGC_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define IGC_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define IGC_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define IGC_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define IGC_MDICNFG_PHY_MASK 0x03E00000 +#define IGC_MDICNFG_PHY_SHIFT 21 + +#define IGC_MEDIA_PORT_COPPER 1 +#define IGC_MEDIA_PORT_OTHER 2 +#define IGC_M88E1112_AUTO_COPPER_SGMII 0x2 +#define IGC_M88E1112_AUTO_COPPER_BASEX 0x3 +#define IGC_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define IGC_M88E1112_MAC_CTRL_1 0x10 +#define IGC_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define IGC_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define IGC_M88E1112_PAGE_ADDR 0x16 +#define IGC_M88E1112_STATUS 0x01 + +#define IGC_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define IGC_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define IGC_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define IGC_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define IGC_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define IGC_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define IGC_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define IGC_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define IGC_M88E1543_EEE_CTRL_1 0x0 +#define IGC_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define IGC_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ +#define IGC_EEE_ADV_DEV_I354 7 +#define IGC_EEE_ADV_ADDR_I354 60 +#define IGC_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define IGC_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define IGC_PCS_STATUS_DEV_I354 3 +#define IGC_PCS_STATUS_ADDR_I354 1 +#define IGC_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define IGC_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define IGC_M88E1512_CFG_REG_1 0x0010 +#define IGC_M88E1512_CFG_REG_2 0x0011 +#define IGC_M88E1512_CFG_REG_3 0x0007 +#define IGC_M88E1512_MODE 0x0014 +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define IGC_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define IGC_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define IGC_EEE_LP_ADV_DEV_I225 7 /* EEE LP Adv Device */ +#define IGC_EEE_LP_ADV_ADDR_I225 61 /* EEE LP Adv Register */ + +/* PCI Express Control */ +#define IGC_GCR_RXD_NO_SNOOP 0x00000001 +#define IGC_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define IGC_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define IGC_GCR_TXD_NO_SNOOP 0x00000008 +#define IGC_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define IGC_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IGC_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (IGC_GCR_RXD_NO_SNOOP | \ + IGC_GCR_RXDSCW_NO_SNOOP | \ + IGC_GCR_RXDSCR_NO_SNOOP | \ + IGC_GCR_TXD_NO_SNOOP | \ + IGC_GCR_TXDSCW_NO_SNOOP | \ + IGC_GCR_TXDSCR_NO_SNOOP) + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define IGC_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define IGC_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define IGC_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define IGC_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define IGC_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define IGC_EECD_SK 0x00000001 /* NVM Clock */ +#define IGC_EECD_CS 0x00000002 /* NVM Chip Select */ +#define IGC_EECD_DI 0x00000004 /* NVM Data In */ +#define IGC_EECD_DO 0x00000008 /* NVM Data Out */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define IGC_EECD_PRES 0x00000100 /* NVM Present */ +#define IGC_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define IGC_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define IGC_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define IGC_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define IGC_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define IGC_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define IGC_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define IGC_EECD_SEC1VAL_VALID_MASK (IGC_EECD_AUTO_RD | IGC_EECD_PRES) +#define IGC_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define IGC_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define IGC_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define IGC_I210_FIFO_SEL_RX 0x00 +#define IGC_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define IGC_I210_FIFO_SEL_TX_LEGACY IGC_I210_FIFO_SEL_TX_QAV(0) +#define IGC_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define IGC_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define IGC_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define IGC_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define IGC_I210_FW_VER_OFFSET 328 + +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done */ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define IGC_EECD_SEC1VAL_I225 0x02000000 /* Sector One Valid */ +#define IGC_FLSECU_BLK_SW_ACCESS_I225 0x00000004 /* Block SW access */ +#define IGC_FWSM_FW_VALID_I225 0x8000 /* FW valid bit */ + +#define IGC_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ +#define NVM_PHY_CLASS_WORD 0x0007 +#define IGC_I210_NVM_FW_MODULE_PTR 0x0010 +#define IGC_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define IGC_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define IGC_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define IGC_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define IGC_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define IGC_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define IGC_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define IGC_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define IGC_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define IGC_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ( \ + __extension__ ({ \ + typeof(a) _a = (a); \ + _a ? (0x40 + 0x40 * _a) : 0; \ + })) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define IGC_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define ETH_ADDR_LEN 6 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88IGC_E_PHY_ID 0x01410C50 +#define M88IGC_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01IGC_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03IGC_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BMIGC_E_PHY_ID 0x01410CB0 +#define BMIGC_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04IGC_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 +#define I225_I_PHY_ID 0x67C9DC00 + +/* M88E1000 Specific Registers */ +#define M88IGC_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88IGC_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88IGC_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88IGC_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88IGC_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88IGC_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88IGC_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ +#define M88IGC_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88IGC_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88IGC_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88IGC_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88IGC_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88IGC_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88IGC_PSCR_AUTO_X_MODE 0x0060 +#define M88IGC_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88IGC_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88IGC_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88IGC_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88IGC_PSSR_CABLE_LENGTH 0x0380 +#define M88IGC_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88IGC_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88IGC_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88IGC_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88IGC_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88IGC_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88IGC_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88IGC_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88IGC_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88IGC_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88IGC_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88IGC_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BMIGC_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_INT_EN 0x20000000 +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 +#define IGC_MDIC_DEST 0x80000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLAPQF_QUEUE_MASK 0x03 +#define IGC_VFTA_BLOCK_SIZE 8 +/* SerDes Control */ +#define IGC_GEN_CTL_READY 0x80000000 +#define IGC_GEN_CTL_ADDRESS_SHIFT 8 +#define IGC_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define IGC_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IGC_LSECTXCAP_SUM_SHIFT 16 +#define IGC_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IGC_LSECRXCAP_SUM_SHIFT 16 + +#define IGC_LSECTXCTRL_EN_MASK 0x00000003 +#define IGC_LSECTXCTRL_DISABLE 0x0 +#define IGC_LSECTXCTRL_AUTH 0x1 +#define IGC_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IGC_LSECTXCTRL_AISCI 0x00000020 +#define IGC_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IGC_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IGC_LSECRXCTRL_EN_MASK 0x0000000C +#define IGC_LSECRXCTRL_EN_SHIFT 2 +#define IGC_LSECRXCTRL_DISABLE 0x0 +#define IGC_LSECRXCTRL_CHECK 0x1 +#define IGC_LSECRXCTRL_STRICT 0x2 +#define IGC_LSECRXCTRL_DROP 0x3 +#define IGC_LSECRXCTRL_PLSH 0x00000040 +#define IGC_LSECRXCTRL_RP 0x00000080 +#define IGC_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define IGC_RTTBCNRC_RS_ENA 0x80000000 +#define IGC_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IGC_RTTBCNRC_RF_INT_SHIFT 14 +#define IGC_RTTBCNRC_RF_INT_MASK \ + (IGC_RTTBCNRC_RF_DEC_MASK << IGC_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define IGC_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define IGC_DMACR_DMAC_LX_MASK 0x30000000 +#define IGC_DMACR_DMAC_LX_SHIFT 28 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define IGC_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define IGC_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define IGC_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define IGC_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define IGC_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define IGC_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define IGC_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define IGC_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define IGC_PCIEMISC_LX_DECISION 0x00000080 + +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define IGC_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TXPB0S_SIZE_I225_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define IGC_STM_OPCODE 0xDB00 +#define IGC_EEPROM_FLASH_SIZE_WORD 0x11 +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) +#define IGC_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define IGC_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define IGC_INVM_ULT_BYTES_SIZE 8 +#define IGC_INVM_RECORD_SIZE_IN_BYTES 4 +#define IGC_INVM_VER_FIELD_ONE 0x1FF8 +#define IGC_INVM_VER_FIELD_TWO 0x7FE000 +#define IGC_INVM_IMGTYPE_FIELD 0x1F800000 + +#define IGC_INVM_MAJOR_MASK 0x3F0 +#define IGC_INVM_MINOR_MASK 0xF +#define IGC_INVM_MAJOR_SHIFT 4 + +/* PLL Defines */ +#define IGC_PCI_PMCSR 0x44 +#define IGC_PCI_PMCSR_D3 0x03 +#define IGC_MAX_PLL_TRIES 5 +#define IGC_PHY_PLL_UNCONF 0xFF +#define IGC_PHY_PLL_FREQ_PAGE 0xFC0000 +#define IGC_PHY_PLL_FREQ_REG 0x000E +#define IGC_INVM_DEFAULT_AL 0x202F +#define IGC_INVM_AUTOLOAD 0x0A +#define IGC_INVM_PLL_WO_VAL 0x0010 + +/* Proxy Filter Control Extended */ +#define IGC_PROXYFCEX_MDNS 0x00000001 /* mDNS */ +#define IGC_PROXYFCEX_MDNS_M 0x00000002 /* mDNS Multicast */ +#define IGC_PROXYFCEX_MDNS_U 0x00000004 /* mDNS Unicast */ +#define IGC_PROXYFCEX_IPV4_M 0x00000008 /* IPv4 Multicast */ +#define IGC_PROXYFCEX_IPV6_M 0x00000010 /* IPv6 Multicast */ +#define IGC_PROXYFCEX_IGMP 0x00000020 /* IGMP */ +#define IGC_PROXYFCEX_IGMP_M 0x00000040 /* IGMP Multicast */ +#define IGC_PROXYFCEX_ARPRES 0x00000080 /* ARP Response */ +#define IGC_PROXYFCEX_ARPRES_D 0x00000100 /* ARP Response Directed */ +#define IGC_PROXYFCEX_ICMPV4 0x00000200 /* ICMPv4 */ +#define IGC_PROXYFCEX_ICMPV4_D 0x00000400 /* ICMPv4 Directed */ +#define IGC_PROXYFCEX_ICMPV6 0x00000800 /* ICMPv6 */ +#define IGC_PROXYFCEX_ICMPV6_D 0x00001000 /* ICMPv6 Directed */ +#define IGC_PROXYFCEX_DNS 0x00002000 /* DNS */ + +/* Proxy Filter Control */ +#define IGC_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define IGC_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define IGC_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define IGC_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define IGC_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define IGC_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define IGC_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define IGC_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IGC_PROXYFC_NS_DIRECTED 0x00000400 /* Directed NS Proxy Ena */ +#define IGC_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define IGC_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define IGC_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define IGC_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define IGC_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define IGC_STATUS_LAN_ID_OFFSET 2 +#define IGC_VFTA_ENTRIES 128 + +#define IGC_UNUSEDARG +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* _IGC_DEFINES_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_hw.h b/src/spdk/dpdk/drivers/net/igc/base/igc_hw.h new file mode 100644 index 000000000..be38fafa5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_hw.h @@ -0,0 +1,1051 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include "igc_osdep.h" +#include "igc_regs.h" +#include "igc_defines.h" + +struct igc_hw; + +#define IGC_DEV_ID_82542 0x1000 +#define IGC_DEV_ID_82543GC_FIBER 0x1001 +#define IGC_DEV_ID_82543GC_COPPER 0x1004 +#define IGC_DEV_ID_82544EI_COPPER 0x1008 +#define IGC_DEV_ID_82544EI_FIBER 0x1009 +#define IGC_DEV_ID_82544GC_COPPER 0x100C +#define IGC_DEV_ID_82544GC_LOM 0x100D +#define IGC_DEV_ID_82540EM 0x100E +#define IGC_DEV_ID_82540EM_LOM 0x1015 +#define IGC_DEV_ID_82540EP_LOM 0x1016 +#define IGC_DEV_ID_82540EP 0x1017 +#define IGC_DEV_ID_82540EP_LP 0x101E +#define IGC_DEV_ID_82545EM_COPPER 0x100F +#define IGC_DEV_ID_82545EM_FIBER 0x1011 +#define IGC_DEV_ID_82545GM_COPPER 0x1026 +#define IGC_DEV_ID_82545GM_FIBER 0x1027 +#define IGC_DEV_ID_82545GM_SERDES 0x1028 +#define IGC_DEV_ID_82546EB_COPPER 0x1010 +#define IGC_DEV_ID_82546EB_FIBER 0x1012 +#define IGC_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define IGC_DEV_ID_82546GB_COPPER 0x1079 +#define IGC_DEV_ID_82546GB_FIBER 0x107A +#define IGC_DEV_ID_82546GB_SERDES 0x107B +#define IGC_DEV_ID_82546GB_PCIE 0x108A +#define IGC_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define IGC_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define IGC_DEV_ID_82541EI 0x1013 +#define IGC_DEV_ID_82541EI_MOBILE 0x1018 +#define IGC_DEV_ID_82541ER_LOM 0x1014 +#define IGC_DEV_ID_82541ER 0x1078 +#define IGC_DEV_ID_82541GI 0x1076 +#define IGC_DEV_ID_82541GI_LF 0x107C +#define IGC_DEV_ID_82541GI_MOBILE 0x1077 +#define IGC_DEV_ID_82547EI 0x1019 +#define IGC_DEV_ID_82547EI_MOBILE 0x101A +#define IGC_DEV_ID_82547GI 0x1075 +#define IGC_DEV_ID_82571EB_COPPER 0x105E +#define IGC_DEV_ID_82571EB_FIBER 0x105F +#define IGC_DEV_ID_82571EB_SERDES 0x1060 +#define IGC_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define IGC_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define IGC_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define IGC_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define IGC_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define IGC_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define IGC_DEV_ID_82572EI_COPPER 0x107D +#define IGC_DEV_ID_82572EI_FIBER 0x107E +#define IGC_DEV_ID_82572EI_SERDES 0x107F +#define IGC_DEV_ID_82572EI 0x10B9 +#define IGC_DEV_ID_82573E 0x108B +#define IGC_DEV_ID_82573E_IAMT 0x108C +#define IGC_DEV_ID_82573L 0x109A +#define IGC_DEV_ID_82574L 0x10D3 +#define IGC_DEV_ID_82574LA 0x10F6 +#define IGC_DEV_ID_82583V 0x150C +#define IGC_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define IGC_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define IGC_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define IGC_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define IGC_DEV_ID_ICH8_82567V_3 0x1501 +#define IGC_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define IGC_DEV_ID_ICH8_IGP_AMT 0x104A +#define IGC_DEV_ID_ICH8_IGP_C 0x104B +#define IGC_DEV_ID_ICH8_IFE 0x104C +#define IGC_DEV_ID_ICH8_IFE_GT 0x10C4 +#define IGC_DEV_ID_ICH8_IFE_G 0x10C5 +#define IGC_DEV_ID_ICH8_IGP_M 0x104D +#define IGC_DEV_ID_ICH9_IGP_M 0x10BF +#define IGC_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define IGC_DEV_ID_ICH9_IGP_M_V 0x10CB +#define IGC_DEV_ID_ICH9_IGP_AMT 0x10BD +#define IGC_DEV_ID_ICH9_BM 0x10E5 +#define IGC_DEV_ID_ICH9_IGP_C 0x294C +#define IGC_DEV_ID_ICH9_IFE 0x10C0 +#define IGC_DEV_ID_ICH9_IFE_GT 0x10C3 +#define IGC_DEV_ID_ICH9_IFE_G 0x10C2 +#define IGC_DEV_ID_ICH10_R_BM_LM 0x10CC +#define IGC_DEV_ID_ICH10_R_BM_LF 0x10CD +#define IGC_DEV_ID_ICH10_R_BM_V 0x10CE +#define IGC_DEV_ID_ICH10_D_BM_LM 0x10DE +#define IGC_DEV_ID_ICH10_D_BM_LF 0x10DF +#define IGC_DEV_ID_ICH10_D_BM_V 0x1525 +#define IGC_DEV_ID_PCH_M_HV_LM 0x10EA +#define IGC_DEV_ID_PCH_M_HV_LC 0x10EB +#define IGC_DEV_ID_PCH_D_HV_DM 0x10EF +#define IGC_DEV_ID_PCH_D_HV_DC 0x10F0 +#define IGC_DEV_ID_PCH2_LV_LM 0x1502 +#define IGC_DEV_ID_PCH2_LV_V 0x1503 +#define IGC_DEV_ID_PCH_LPT_I217_LM 0x153A +#define IGC_DEV_ID_PCH_LPT_I217_V 0x153B +#define IGC_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define IGC_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define IGC_DEV_ID_PCH_I218_LM2 0x15A0 +#define IGC_DEV_ID_PCH_I218_V2 0x15A1 +#define IGC_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define IGC_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define IGC_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */ +#define IGC_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */ +#define IGC_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */ +#define IGC_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */ +#define IGC_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */ +#define IGC_DEV_ID_PCH_SPT_I219_LM4 0x15D7 +#define IGC_DEV_ID_PCH_SPT_I219_V4 0x15D8 +#define IGC_DEV_ID_PCH_SPT_I219_LM5 0x15E3 +#define IGC_DEV_ID_PCH_SPT_I219_V5 0x15D6 +#define IGC_DEV_ID_PCH_CNP_I219_LM6 0x15BD +#define IGC_DEV_ID_PCH_CNP_I219_V6 0x15BE +#define IGC_DEV_ID_PCH_CNP_I219_LM7 0x15BB +#define IGC_DEV_ID_PCH_CNP_I219_V7 0x15BC +#define IGC_DEV_ID_PCH_ICP_I219_LM8 0x15DF +#define IGC_DEV_ID_PCH_ICP_I219_V8 0x15E0 +#define IGC_DEV_ID_PCH_ICP_I219_LM9 0x15E1 +#define IGC_DEV_ID_PCH_ICP_I219_V9 0x15E2 +#define IGC_DEV_ID_82576 0x10C9 +#define IGC_DEV_ID_82576_FIBER 0x10E6 +#define IGC_DEV_ID_82576_SERDES 0x10E7 +#define IGC_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define IGC_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define IGC_DEV_ID_82576_NS 0x150A +#define IGC_DEV_ID_82576_NS_SERDES 0x1518 +#define IGC_DEV_ID_82576_SERDES_QUAD 0x150D +#define IGC_DEV_ID_82576_VF 0x10CA +#define IGC_DEV_ID_82576_VF_HV 0x152D +#define IGC_DEV_ID_I350_VF 0x1520 +#define IGC_DEV_ID_I350_VF_HV 0x152F +#define IGC_DEV_ID_82575EB_COPPER 0x10A7 +#define IGC_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define IGC_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define IGC_DEV_ID_82580_COPPER 0x150E +#define IGC_DEV_ID_82580_FIBER 0x150F +#define IGC_DEV_ID_82580_SERDES 0x1510 +#define IGC_DEV_ID_82580_SGMII 0x1511 +#define IGC_DEV_ID_82580_COPPER_DUAL 0x1516 +#define IGC_DEV_ID_82580_QUAD_FIBER 0x1527 +#define IGC_DEV_ID_I350_COPPER 0x1521 +#define IGC_DEV_ID_I350_FIBER 0x1522 +#define IGC_DEV_ID_I350_SERDES 0x1523 +#define IGC_DEV_ID_I350_SGMII 0x1524 +#define IGC_DEV_ID_I350_DA4 0x1546 +#define IGC_DEV_ID_I210_COPPER 0x1533 +#define IGC_DEV_ID_I210_COPPER_OEM1 0x1534 +#define IGC_DEV_ID_I210_COPPER_IT 0x1535 +#define IGC_DEV_ID_I210_FIBER 0x1536 +#define IGC_DEV_ID_I210_SERDES 0x1537 +#define IGC_DEV_ID_I210_SGMII 0x1538 +#define IGC_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define IGC_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define IGC_DEV_ID_I210_SGMII_FLASHLESS 0x15F6 +#define IGC_DEV_ID_I211_COPPER 0x1539 +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD +#define IGC_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define IGC_DEV_ID_I354_SGMII 0x1F41 +#define IGC_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define IGC_DEV_ID_DH89XXCC_SGMII 0x0438 +#define IGC_DEV_ID_DH89XXCC_SERDES 0x043A +#define IGC_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define IGC_DEV_ID_DH89XXCC_SFP 0x0440 + +#define IGC_REVISION_0 0 +#define IGC_REVISION_1 1 +#define IGC_REVISION_2 2 +#define IGC_REVISION_3 3 +#define IGC_REVISION_4 4 + +#define IGC_FUNC_0 0 +#define IGC_FUNC_1 1 +#define IGC_FUNC_2 2 +#define IGC_FUNC_3 3 + +#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum igc_mac_type { + igc_undefined = 0, + igc_82542, + igc_82543, + igc_82544, + igc_82540, + igc_82545, + igc_82545_rev_3, + igc_82546, + igc_82546_rev_3, + igc_82541, + igc_82541_rev_2, + igc_82547, + igc_82547_rev_2, + igc_82571, + igc_82572, + igc_82573, + igc_82574, + igc_82583, + igc_80003es2lan, + igc_ich8lan, + igc_ich9lan, + igc_ich10lan, + igc_pchlan, + igc_pch2lan, + igc_pch_lpt, + igc_pch_spt, + igc_pch_cnp, + igc_82575, + igc_82576, + igc_82580, + igc_i350, + igc_i354, + igc_i210, + igc_i211, + igc_i225, + igc_vfadapt, + igc_vfadapt_i350, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_media_type_fiber = 2, + igc_media_type_internal_serdes = 3, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_none, + igc_nvm_eeprom_spi, + igc_nvm_eeprom_microwire, + igc_nvm_flash_hw, + igc_nvm_invm, + igc_nvm_flash_sw +}; + +enum igc_nvm_override { + igc_nvm_override_none = 0, + igc_nvm_override_spi_small, + igc_nvm_override_spi_large, + igc_nvm_override_microwire_small, + igc_nvm_override_microwire_large +}; + +enum igc_phy_type { + igc_phy_unknown = 0, + igc_phy_none, + igc_phy_m88, + igc_phy_igp, + igc_phy_igp_2, + igc_phy_gg82563, + igc_phy_igp_3, + igc_phy_ife, + igc_phy_bm, + igc_phy_82578, + igc_phy_82577, + igc_phy_82579, + igc_phy_i217, + igc_phy_82580, + igc_phy_vf, + igc_phy_i210, + igc_phy_i225, +}; + +enum igc_bus_type { + igc_bus_type_unknown = 0, + igc_bus_type_pci, + igc_bus_type_pcix, + igc_bus_type_pci_express, + igc_bus_type_reserved +}; + +enum igc_bus_speed { + igc_bus_speed_unknown = 0, + igc_bus_speed_33, + igc_bus_speed_66, + igc_bus_speed_100, + igc_bus_speed_120, + igc_bus_speed_133, + igc_bus_speed_2500, + igc_bus_speed_5000, + igc_bus_speed_reserved +}; + +enum igc_bus_width { + igc_bus_width_unknown = 0, + igc_bus_width_pcie_x1, + igc_bus_width_pcie_x2, + igc_bus_width_pcie_x4 = 4, + igc_bus_width_pcie_x8 = 8, + igc_bus_width_32, + igc_bus_width_64, + igc_bus_width_reserved +}; + +enum igc_1000t_rx_status { + igc_1000t_rx_status_not_ok = 0, + igc_1000t_rx_status_ok, + igc_1000t_rx_status_undefined = 0xFF +}; + +enum igc_rev_polarity { + igc_rev_polarity_normal = 0, + igc_rev_polarity_reversed, + igc_rev_polarity_undefined = 0xFF +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +enum igc_ffe_config { + igc_ffe_config_enabled = 0, + igc_ffe_config_active, + igc_ffe_config_blocked +}; + +enum igc_dsp_config { + igc_dsp_config_disabled = 0, + igc_dsp_config_enabled, + igc_dsp_config_activated, + igc_dsp_config_undefined = 0xFF +}; + +enum igc_ms_type { + igc_ms_hw_default = 0, + igc_ms_force_master, + igc_ms_force_slave, + igc_ms_auto +}; + +enum igc_smart_speed { + igc_smart_speed_default = 0, + igc_smart_speed_on, + igc_smart_speed_off +}; + +enum igc_serdes_link_state { + igc_serdes_link_down = 0, + igc_serdes_link_autoneg_progress, + igc_serdes_link_autoneg_complete, + igc_serdes_link_forced_up +}; + +enum igc_invm_structure_type { + igc_invm_uninitialized_structure = 0x00, + igc_invm_word_autoload_structure = 0x01, + igc_invm_csr_autoload_structure = 0x02, + igc_invm_phy_register_autoload_structure = 0x03, + igc_invm_rsa_key_sha256_structure = 0x04, + igc_invm_invalidated_structure = 0x0f, +}; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +/* Receive Descriptor */ +struct igc_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union igc_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union igc_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct igc_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct igc_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct igc_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct igc_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +struct igc_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct igc_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct igc_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define IGC_HI_MAX_DATA_LENGTH 252 +struct igc_host_command_info { + struct igc_host_command_header command_header; + u8 command_data[IGC_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct igc_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define IGC_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct igc_host_mng_command_info { + struct igc_host_mng_command_header command_header; + u8 command_data[IGC_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_manage.h" + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*init_params)(struct igc_hw *hw); + s32 (*id_led_init)(struct igc_hw *hw); + s32 (*blink_led)(struct igc_hw *hw); + bool (*check_mng_mode)(struct igc_hw *hw); + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*cleanup_led)(struct igc_hw *hw); + void (*clear_hw_cntrs)(struct igc_hw *hw); + void (*clear_vfta)(struct igc_hw *hw); + s32 (*get_bus_info)(struct igc_hw *hw); + void (*set_lan_id)(struct igc_hw *hw); + s32 (*get_link_up_info)(struct igc_hw *hw, u16 *speed, u16 *duplex); + s32 (*led_on)(struct igc_hw *hw); + s32 (*led_off)(struct igc_hw *hw); + void (*update_mc_addr_list)(struct igc_hw *hw, + u8 *mc_addr_list, u32 count); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + void (*shutdown_serdes)(struct igc_hw *hw); + void (*power_up_serdes)(struct igc_hw *hw); + s32 (*setup_link)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + s32 (*setup_led)(struct igc_hw *hw); + void (*write_vfta)(struct igc_hw *hw, u32 offset, u32 value); + void (*config_collision_dist)(struct igc_hw *hw); + int (*rar_set)(struct igc_hw *hw, u8 *addr, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*validate_mdi_setting)(struct igc_hw *hw); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct igc_phy_operations { + s32 (*init_params)(struct igc_hw *hw); + s32 (*acquire)(struct igc_hw *hw); + s32 (*cfg_on_link_up)(struct igc_hw *hw); + s32 (*check_polarity)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*commit)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_cfg_done)(struct igc_hw *hw); + s32 (*get_cable_length)(struct igc_hw *hw); + s32 (*get_info)(struct igc_hw *hw); + s32 (*set_page)(struct igc_hw *hw, u16 page); + s32 (*read_reg)(struct igc_hw *hw, u32 offset, u16 *data); + s32 (*read_reg_locked)(struct igc_hw *hw, u32 offset, u16 *data); + s32 (*read_reg_page)(struct igc_hw *hw, u32 offset, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*set_d0_lplu_state)(struct igc_hw *hw, bool active); + s32 (*set_d3_lplu_state)(struct igc_hw *hw, bool active); + s32 (*write_reg)(struct igc_hw *hw, u32 offset, u16 data); + s32 (*write_reg_locked)(struct igc_hw *hw, u32 offset, u16 data); + s32 (*write_reg_page)(struct igc_hw *hw, u32 offset, u16 data); + void (*power_up)(struct igc_hw *hw); + void (*power_down)(struct igc_hw *hw); + s32 (*read_i2c_byte)(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); + s32 (*write_i2c_byte)(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +}; + +/* Function pointers for the NVM. */ +struct igc_nvm_operations { + s32 (*init_params)(struct igc_hw *hw); + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 words, u16 *data); + void (*release)(struct igc_hw *hw); + void (*reload)(struct igc_hw *hw); + s32 (*update)(struct igc_hw *hw); + s32 (*valid_led_default)(struct igc_hw *hw, u16 *data); + s32 (*validate)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_i225_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum igc_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool report_tx_early; + enum igc_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + enum igc_phy_type type; + + enum igc_1000t_rx_status local_rx; + enum igc_1000t_rx_status remote_rx; + enum igc_ms_type ms_type; + enum igc_ms_type original_ms_type; + enum igc_rev_polarity cable_polarity; + enum igc_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + enum igc_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_bus_info { + enum igc_bus_type type; + enum igc_bus_speed speed; + enum igc_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* FC mode in effect */ + enum igc_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct igc_mbx_operations { + s32 (*init_params)(struct igc_hw *hw); +}; + +struct igc_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct igc_mbx_info { + struct igc_mbx_operations ops; + struct igc_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct igc_dev_spec_82541 { + enum igc_dsp_config dsp_config; + enum igc_ffe_config ffe_config; + u16 spd_default; + bool phy_init_script; +}; + +struct igc_dev_spec_82542 { + bool dma_fairness; +}; + +struct igc_dev_spec_82543 { + u32 tbi_compatibility; + bool dma_fairness; + bool init_phy_disabled; +}; + +struct igc_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; + IGC_MUTEX swflag_mutex; +}; + +struct igc_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct igc_shadow_ram { + u16 value; + bool modified; +}; + +#define IGC_SHADOW_RAM_WORDS 2048 + +/* I218 PHY Ultra Low Power (ULP) states */ +enum igc_ulp_state { + igc_ulp_state_unknown, + igc_ulp_state_off, + igc_ulp_state_on, +}; + +struct igc_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct igc_shadow_ram shadow_ram[IGC_SHADOW_RAM_WORDS]; + IGC_MUTEX nvm_mutex; + IGC_MUTEX swflag_mutex; + bool nvm_k1_enabled; + bool disable_k1_off; + bool eee_disable; + u16 eee_lp_ability; + enum igc_ulp_state ulp_state; + bool ulp_capability_disabled; + bool during_suspend_flow; + bool during_dpg_exit; + u16 lat_enc; + u16 max_ltr_enc; + bool smbus_disable; +}; + +struct igc_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_igc_flags eth_flags; + u8 media_port; + bool media_changed; +}; + +struct igc_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct igc_dev_spec_i225 { + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + bool module_plugged; + u8 media_port; + bool mas_capable; + u32 mtu; +}; + +struct igc_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_phy_info phy; + struct igc_nvm_info nvm; + struct igc_bus_info bus; + struct igc_mbx_info mbx; + struct igc_host_mng_dhcp_cookie mng_cookie; + + union { + struct igc_dev_spec_82541 _82541; + struct igc_dev_spec_82542 _82542; + struct igc_dev_spec_82543 _82543; + struct igc_dev_spec_82571 _82571; + struct igc_dev_spec_80003es2lan _80003es2lan; + struct igc_dev_spec_ich8lan ich8lan; + struct igc_dev_spec_82575 _82575; + struct igc_dev_spec_vf vf; + struct igc_dev_spec_i225 _i225; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "igc_82571.h" +#include "igc_ich8lan.h" +#include "igc_82575.h" +#include "igc_i225.h" +#include "igc_base.h" + +/* These functions must be implemented by drivers */ +void igc_pci_clear_mwi(struct igc_hw *hw); +void igc_pci_set_mwi(struct igc_hw *hw); +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_i225.c b/src/spdk/dpdk/drivers/net/igc/base/igc_i225.c new file mode 100644 index 000000000..060b2f8f9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_i225.c @@ -0,0 +1,1378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" + +static s32 igc_init_nvm_params_i225(struct igc_hw *hw); +static s32 igc_init_mac_params_i225(struct igc_hw *hw); +static s32 igc_init_phy_params_i225(struct igc_hw *hw); +static s32 igc_reset_hw_i225(struct igc_hw *hw); +static s32 igc_acquire_nvm_i225(struct igc_hw *hw); +static void igc_release_nvm_i225(struct igc_hw *hw); +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw); +static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw); +static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data); + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = IGC_READ_REG(hw, IGC_EECD); + u16 size; + + DEBUGFUNC("igc_init_nvm_params_i225"); + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + nvm->type = igc_nvm_eeprom_spi; + + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + nvm->ops.valid_led_default = igc_valid_led_default_i225; + if (igc_get_flash_presence_i225(hw)) { + hw->nvm.type = igc_nvm_flash_hw; + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + hw->nvm.type = igc_nvm_invm; + nvm->ops.write = igc_null_write_nvm; + nvm->ops.validate = igc_null_ops_generic; + nvm->ops.update = igc_null_ops_generic; + } + + return IGC_SUCCESS; +} + +/** + * igc_init_mac_params_i225 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igc_init_mac_params_i225(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225; + + DEBUGFUNC("igc_init_mac_params_i225"); + + /* Initialize function pointer */ + igc_init_mac_ops_generic(hw); + + /* Set media type */ + hw->phy.media_type = igc_media_type_copper; + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = IGC_RAR_ENTRIES_BASE; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_i225; + /* hw initialization */ + mac->ops.init_hw = igc_init_hw_i225; + /* link setup */ + mac->ops.setup_link = igc_setup_link_generic; + /* check for link */ + mac->ops.check_for_link = igc_check_for_link_i225; + /* link info */ + mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + /* release SW_FW sync */ + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + dev_spec->clear_semaphore_once = true; + mac->ops.setup_physical_interface = igc_setup_copper_link_i225; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + + /* multicast address update */ + mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic; + + mac->ops.write_vfta = igc_write_vfta_generic; + + return IGC_SUCCESS; +} + +/** + * igc_init_phy_params_i225 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igc_init_phy_params_i225(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = IGC_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("igc_init_phy_params_i225"); + + phy->ops.read_i2c_byte = igc_read_i2c_byte_generic; + phy->ops.write_i2c_byte = igc_write_i2c_byte_generic; + + if (hw->phy.media_type != igc_media_type_copper) { + phy->type = igc_phy_none; + goto out; + } + + phy->ops.power_up = igc_power_up_phy_copper; + phy->ops.power_down = igc_power_down_phy_copper_base; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + + phy->reset_delay_us = 100; + + phy->ops.acquire = igc_acquire_phy_base; + phy->ops.check_reset_block = igc_check_reset_block_generic; + phy->ops.commit = igc_phy_sw_reset_generic; + phy->ops.release = igc_release_phy_base; + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) + goto out; + + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext); + phy->ops.read_reg = igc_read_phy_reg_gpy; + phy->ops.write_reg = igc_write_phy_reg_gpy; + + ret_val = igc_get_phy_id(hw); + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I225_I_PHY_ID: + phy->type = igc_phy_i225; + phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225; + phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225; + /* TODO - complete with GPY PHY information */ + break; + default: + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_reset_hw_i225 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 igc_reset_hw_i225(struct igc_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("igc_reset_hw_i225"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); + + IGC_WRITE_REG(hw, IGC_RCTL, 0); + IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP); + IGC_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); + IGC_READ_REG(hw, IGC_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igc_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/* igc_acquire_nvm_i225 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("igc_acquire_nvm_i225"); + + ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); + + return ret_val; +} + +/* igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + DEBUGFUNC("igc_release_nvm_i225"); + + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = IGC_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("igc_acquire_swfw_sync_i225"); + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igc_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/* igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("igc_release_swfw_sync_i225"); + + while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS) + ; /* Empty */ + + swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore_generic(hw); +} + +/* + * igc_setup_copper_link_i225 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +s32 igc_setup_copper_link_i225(struct igc_hw *hw) +{ + u32 phpm_reg; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("igc_setup_copper_link_i225"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + + phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM); + phpm_reg &= ~IGC_I225_PHPM_GO_LINKD; + IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg); + + ret_val = igc_setup_copper_link_generic(hw); + + return ret_val; +} + +/* igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("igc_get_hw_semaphore_i225"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = IGC_READ_REG(hw, IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._i225.clear_semaphore_once) { + hw->dev_spec._i225.clear_semaphore_once = false; + igc_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = IGC_READ_REG(hw, IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device -\n"); + DEBUGOUT("SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = IGC_READ_REG(hw, IGC_SWSM); + IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return IGC_SUCCESS; +} + +/* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = IGC_SUCCESS; + u16 i, count; + + DEBUGFUNC("igc_read_nvm_srrd_i225"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { + status = igc_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = IGC_ERR_SWFW_SYNC; + } + + if (status != IGC_SUCCESS) + break; + } + + return status; +} + +/* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = IGC_SUCCESS; + u16 i, count; + + DEBUGFUNC("igc_write_nvm_srwr_i225"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { + status = __igc_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = IGC_ERR_SWFW_SYNC; + } + + if (status != IGC_SUCCESS) + break; + } + + return status; +} + +/* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("__igc_write_nvm_srwr"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + IGC_WRITE_REG(hw, IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + IGC_READ_REG(hw, IGC_SRWR)) { + ret_val = IGC_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != IGC_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/* igc_read_invm_version_i225 - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + */ +s32 igc_read_invm_version_i225(struct igc_hw *hw, + struct igc_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = IGC_INVM_SIZE - (IGC_INVM_ULT_BYTES_SIZE / + IGC_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[IGC_INVM_SIZE]; + s32 status = -IGC_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("igc_read_invm_version_i225"); + + /* Read iNVM memory */ + for (i = 0; i < IGC_INVM_SIZE; i++) { + invm_dword = IGC_READ_REG(hw, IGC_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if (i == 1 && (*record & IGC_INVM_VER_FIELD_ONE) == 0) { + version = 0; + status = IGC_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & IGC_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3; + status = IGC_SUCCESS; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & IGC_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & IGC_INVM_VER_FIELD_TWO) + >> 13; + status = IGC_SUCCESS; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & IGC_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & IGC_INVM_VER_FIELD_ONE) >> 3; + status = IGC_SUCCESS; + break; + } + } + + if (status == IGC_SUCCESS) { + invm_ver->invm_major = (version & IGC_INVM_MAJOR_MASK) + >> IGC_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & IGC_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if (i == 1 && (*record & IGC_INVM_IMGTYPE_FIELD) == 0) { + invm_ver->invm_img_type = 0; + status = IGC_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & IGC_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & IGC_INVM_IMGTYPE_FIELD) >> 23; + status = IGC_SUCCESS; + break; + } + } + return status; +} + +/* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 status = IGC_SUCCESS; + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, + u16 count, u16 *data); + + DEBUGFUNC("igc_validate_nvm_checksum_i225"); + + if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = IGC_ERR_SWFW_SYNC; + } + + return status; +} + +/* igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("igc_update_nvm_checksum_i225"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != IGC_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating\n"); + DEBUGOUT("checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != IGC_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + } else { + ret_val = IGC_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/* igc_get_flash_presence_i225 - Check if flash device is detected. + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("igc_get_flash_presence_i225"); + + eec = IGC_READ_REG(hw, IGC_EECD); + + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst + * Counter in FLSWCNT register. + * + * @hw: pointer to the HW structure + * @burst_counter: size in bytes of the Flash burst to read or write + */ +s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw, + u32 burst_counter) +{ + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225"); + + /* Validate input data */ + if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) { + /* Write FLSWCNT - burst counter */ + IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter); + } else { + ret_val = IGC_ERR_INVALID_ARGUMENT; + } + + return ret_val; +} + +/* igc_write_erase_flash_command_i225 - write/erase to a sector + * region on a given address. + * + * @hw: pointer to the HW structure + * @opcode: opcode to be used for the write command + * @address: the offset to write into the FLASH image + */ +s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode, + u32 address) +{ + u32 flswctl = 0; + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_write_erase_flash_command_i225"); + + flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); + /* Polling done bit on FLSWCTL register */ + while (timeout) { + if (flswctl & IGC_FLSWCTL_DONE) + break; + usec_delay(5); + flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Flash transaction was not done\n"); + return -IGC_ERR_NVM; + } + + /* Build and issue command on FLSWCTL register */ + flswctl = address | opcode; + IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl); + + /* Check if issued command is valid on FLSWCTL register */ + flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL); + if (!(flswctl & IGC_FLSWCTL_CMDV)) { + DEBUGOUT("Write flash command failed\n"); + ret_val = IGC_ERR_INVALID_ARGUMENT; + } + + return ret_val; +} + +/* igc_update_flash_i225 - Commit EEPROM to the flash + * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC + * register makes the FW load the internal shadow RAM into the flash. + * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0 + * then FW is not active so the SW is responsible shadow RAM dump. + * + * @hw: pointer to the HW structure + */ +s32 igc_update_flash_i225(struct igc_hw *hw) +{ + u16 current_offset_data = 0; + u32 block_sw_protect = 1; + u16 base_address = 0x0; + u32 i, fw_valid_bit; + u16 current_offset; + s32 ret_val = 0; + u32 flup; + + DEBUGFUNC("igc_update_flash_i225"); + + block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) & + IGC_FLSECU_BLK_SW_ACCESS_I225; + fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) & + IGC_FWSM_FW_VALID_I225; + if (fw_valid_bit) { + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225; + IGC_WRITE_REG(hw, IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == IGC_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + } else if (!block_sw_protect) { + /* FW is not active and security protection is disabled. + * therefore, SW is in charge of shadow RAM dump. + * Check which sector is valid. if sector 0 is valid, + * base address remains 0x0. otherwise, sector 1 is + * valid and it's base address is 0x1000 + */ + if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225) + base_address = 0x1000; + + /* Valid sector erase */ + ret_val = igc_write_erase_flash_command_i225(hw, + IGC_I225_ERASE_CMD_OPCODE, + base_address); + if (!ret_val) { + DEBUGOUT("Sector erase failed\n"); + goto out; + } + + current_offset = base_address; + + /* Write */ + for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) { + /* Set burst write length */ + ret_val = igc_set_flsw_flash_burst_counter_i225(hw, + 0x2); + if (ret_val != IGC_SUCCESS) + break; + + /* Set address and opcode */ + ret_val = igc_write_erase_flash_command_i225(hw, + IGC_I225_WRITE_CMD_OPCODE, + 2 * current_offset); + if (ret_val != IGC_SUCCESS) + break; + + ret_val = igc_read_nvm_eerd(hw, current_offset, + 1, ¤t_offset_data); + if (ret_val) { + DEBUGOUT("Failed to read from EEPROM\n"); + goto out; + } + + /* Write CurrentOffseData to FLSWDATA register */ + IGC_WRITE_REG(hw, IGC_I225_FLSWDATA, + current_offset_data); + current_offset++; + + /* Wait till operation has finished */ + ret_val = igc_poll_eerd_eewr_done(hw, + IGC_NVM_POLL_READ); + if (ret_val) + break; + + usec_delay(1000); + } + } +out: + return ret_val; +} + +/* igc_pool_flash_update_done_i225 - Pool FLUDONE status. + * @hw: pointer to the HW structure + */ +s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("igc_pool_flash_update_done_i225"); + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = IGC_READ_REG(hw, IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = IGC_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds. + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u16 speed, duplex; + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + s32 size; + + DEBUGFUNC("igc_set_ltr_i225"); + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->phy.media_type == igc_media_type_copper && + !hw->dev_spec._i225.eee_disable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = IGC_READ_REG(hw, IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + IGC_WRITE_REG(hw, IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) + tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + else + tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = IGC_READ_REG(hw, IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (IGC_READ_REG(hw, IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size -= hw->dev_spec._i225.mtu; + size *= 8; + } + + if (size < 0) { + DEBUGOUT1("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = IGC_READ_REG(hw, IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv); + } + + ltrv = IGC_READ_REG(hw, IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} + +/* igc_check_for_link_i225 - Check for link + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_link_i225(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val; + bool link = false; + + DEBUGFUNC("igc_check_for_link_i225"); + + /* We only want to go out to the PHY registers to see if + * Auto-Neg has completed and/or if our link status has + * changed. The get_link_status flag is set upon receiving + * a Link Status Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = IGC_SUCCESS; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + goto out; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/* igc_init_function_pointers_i225 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + */ +void igc_init_function_pointers_i225(struct igc_hw *hw) +{ + igc_init_mac_ops_generic(hw); + igc_init_phy_ops_generic(hw); + igc_init_nvm_ops_generic(hw); + hw->mac.ops.init_params = igc_init_mac_params_i225; + hw->nvm.ops.init_params = igc_init_nvm_params_i225; + hw->phy.ops.init_params = igc_init_phy_params_i225; +} + +/* igc_valid_led_default_i225 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + */ +static s32 igc_valid_led_default_i225(struct igc_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("igc_valid_led_default_i225"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case igc_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I225_SERDES; + break; + case igc_media_type_copper: + default: + *data = ID_LED_DEFAULT_I225; + break; + } + } +out: + return ret_val; +} + +/* igc_get_cfg_done_i225 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * IGC_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + */ +static s32 igc_get_cfg_done_i225(struct igc_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = IGC_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("igc_get_cfg_done_i225"); + + while (timeout) { + if (IGC_READ_REG(hw, IGC_EEMNGCTL_I225) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + return IGC_SUCCESS; +} + +/* igc_init_hw_i225 - Init hw for I225 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i225 hw family. + */ +s32 igc_init_hw_i225(struct igc_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("igc_init_hw_i225"); + + hw->phy.ops.get_cfg_done = igc_get_cfg_done_i225; + ret_val = igc_init_hw_base(hw); + return ret_val; +} + +/* + * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Note: since I225 does not actually support LPLU, this function + * simply enables/disables 1G and 2.5G speeds in D0. + */ +s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active) +{ + u32 data; + + DEBUGFUNC("igc_set_d0_lplu_state_i225"); + + data = IGC_READ_REG(hw, IGC_I225_PHPM); + + if (active) { + data |= IGC_I225_PHPM_DIS_1000; + data |= IGC_I225_PHPM_DIS_2500; + } else { + data &= ~IGC_I225_PHPM_DIS_1000; + data &= ~IGC_I225_PHPM_DIS_2500; + } + + IGC_WRITE_REG(hw, IGC_I225_PHPM, data); + return IGC_SUCCESS; +} + +/* + * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Note: since I225 does not actually support LPLU, this function + * simply enables/disables 100M, 1G and 2.5G speeds in D3. + */ +s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active) +{ + u32 data; + + DEBUGFUNC("igc_set_d3_lplu_state_i225"); + + data = IGC_READ_REG(hw, IGC_I225_PHPM); + + if (active) { + data |= IGC_I225_PHPM_DIS_100_D3; + data |= IGC_I225_PHPM_DIS_1000_D3; + data |= IGC_I225_PHPM_DIS_2500_D3; + } else { + data &= ~IGC_I225_PHPM_DIS_100_D3; + data &= ~IGC_I225_PHPM_DIS_1000_D3; + data &= ~IGC_I225_PHPM_DIS_2500_D3; + } + + IGC_WRITE_REG(hw, IGC_I225_PHPM, data); + return IGC_SUCCESS; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + DEBUGFUNC("igc_set_eee_i225"); + + if (hw->mac.type != igc_i225 || + hw->phy.media_type != igc_media_type_copper) + goto out; + ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG); + eeer = IGC_READ_REG(hw, IGC_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._i225.eee_disable)) { + u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg); + IGC_WRITE_REG(hw, IGC_EEER, eeer); + IGC_READ_REG(hw, IGC_IPCNFG); + IGC_READ_REG(hw, IGC_EEER); +out: + + return IGC_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_i225.h b/src/spdk/dpdk/drivers/net/igc/base/igc_i225.h new file mode 100644 index 000000000..c61ece0e8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_i225.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_update_flash_i225(struct igc_hw *hw); +s32 igc_update_nvm_checksum_i225(struct igc_hw *hw); +s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw); +s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, + u16 words, u16 *data); +s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, + u16 words, u16 *data); +s32 igc_read_invm_version_i225(struct igc_hw *hw, + struct igc_fw_version *invm_ver); +s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw, + u32 burst_counter); +s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode, + u32 address); +s32 igc_check_for_link_i225(struct igc_hw *hw); +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); +s32 igc_init_hw_i225(struct igc_hw *hw); +s32 igc_setup_copper_link_i225(struct igc_hw *hw); +s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active); +s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); + +#define ID_LED_DEFAULT_I225 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I225_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I225 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I225 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I225 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I225 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I225 0x200C + +#define IGC_MRQC_ENABLE_RSS_4Q 0x00000002 +#define IGC_MRQC_ENABLE_VMDQ 0x00000003 +#define IGC_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define IGC_I225_SHADOW_RAM_SIZE 4096 +#define IGC_I225_ERASE_CMD_OPCODE 0x02000000 +#define IGC_I225_WRITE_CMD_OPCODE 0x01000000 +#define IGC_FLSWCTL_DONE 0x40000000 +#define IGC_FLSWCTL_CMDV 0x10000000 + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define IGC_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IGC_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IGC_SRRCTL_DROP_EN 0x80000000 +#define IGC_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IGC_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define IGC_RXDADV_RSSTYPE_MASK 0x0000000F +#define IGC_RXDADV_RSSTYPE_SHIFT 12 +#define IGC_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define IGC_RXDADV_HDRBUFLEN_SHIFT 5 +#define IGC_RXDADV_SPLITHEADER_EN 0x00001000 +#define IGC_RXDADV_SPH 0x8000 +#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define IGC_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define IGC_RXDADV_RSSTYPE_NONE 0x00000000 +#define IGC_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IGC_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IGC_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IGC_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IGC_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IGC_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IGC_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IGC_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IGC_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define IGC_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define IGC_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define IGC_RXDADV_PKTTYPE_NONE 0x00000000 +#define IGC_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define IGC_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define IGC_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define IGC_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define IGC_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IGC_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IGC_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IGC_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define IGC_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IGC_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IGC_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IGC_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IGC_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IGC_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_ich8lan.h b/src/spdk/dpdk/drivers/net/igc/base/igc_ich8lan.h new file mode 100644 index 000000000..ff32fc687 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_ich8lan.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_ICH8LAN_H_ +#define _IGC_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define IGC_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define IGC_ICH_FWSM_FW_VALID 0x00008000 +#define IGC_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define IGC_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define IGC_ICH_MNG_IAMT_MODE 0x2 + +#define IGC_FWSM_WLOCK_MAC_MASK 0x0380 +#define IGC_FWSM_WLOCK_MAC_SHIFT 7 +#define IGC_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ + +/* Shared Receive Address Registers */ +#define IGC_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define IGC_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#define IGC_H2ME 0x05B50 /* Host to ME */ +#define IGC_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define IGC_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define IGC_ICH_NVM_SIG_WORD 0x13 +#define IGC_ICH_NVM_SIG_MASK 0xC000 +#define IGC_ICH_NVM_VALID_SIG_MASK 0xC0 +#define IGC_ICH_NVM_SIG_VALUE 0x80 + +#define IGC_ICH8_LAN_INIT_TIMEOUT 1500 + +/* FEXT register bit definition */ +#define IGC_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + +#define IGC_FEXTNVM_SW_CONFIG 1 +#define IGC_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define IGC_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define IGC_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define IGC_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define IGC_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define IGC_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define IGC_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define IGC_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define IGC_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define IGC_FEXTNVM7_DISABLE_PB_READ 0x00040000 +#define IGC_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 +#define IGC_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define IGC_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define IGC_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define IGC_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define IGC_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define IGC_RXDCTL_THRESH_UNIT_DESC 0x01000000 + +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/ +#define IGC_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/ +#define IGC_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ +#define IGC_TARC0_CB_MULTIQ_3_REQ 0x30000000 +#define IGC_TARC0_CB_MULTIQ_2_REQ 0x20000000 +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define IGC_ICH_RAR_ENTRIES 7 +#define IGC_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define IGC_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define IGC_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define IGC_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define IGC_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +/* enable ULP even if when phy powered down via lanphypc */ +#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400 +/* disable clear of sticky ULP on PERST */ +#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800 +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define IGC_STRAP 0x0000C +#define IGC_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define IGC_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define IGC_STRAP_SMT_FREQ_MASK 0x00003000 +#define IGC_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_K1_CLK_REQ 0x200 +#define HV_PM_CTRL_K1_ENABLE 0x4000 + +#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) +#define I217_PLL_CLOCK_GATE_MASK 0x07FF + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 + +/* 82579 DFT Control */ +#define I82579_DFT_CTRL PHY_REG(769, 20) +#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */ + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ + +#define IGC_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define IGC_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define IGC_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +#define IGC_PCI_VENDOR_ID_REGISTER 0x00 + +#define IGC_PCI_REVISION_ID_REG 0x08 +void igc_set_kmrn_lock_loss_workaround_ich8lan(struct igc_hw *hw, + bool state); +void igc_igp3_phy_powerdown_workaround_ich8lan(struct igc_hw *hw); +void igc_gig_downshift_workaround_ich8lan(struct igc_hw *hw); +void igc_suspend_workarounds_ich8lan(struct igc_hw *hw); +u32 igc_resume_workarounds_pchlan(struct igc_hw *hw); +s32 igc_configure_k1_ich8lan(struct igc_hw *hw, bool k1_enable); +s32 igc_configure_k0s_lpt(struct igc_hw *hw, u8 entry_latency, u8 min_time); +void igc_copy_rx_addrs_to_phy_ich8lan(struct igc_hw *hw); +s32 igc_lv_jumbo_workaround_ich8lan(struct igc_hw *hw, bool enable); +s32 igc_read_emi_reg_locked(struct igc_hw *hw, u16 addr, u16 *data); +s32 igc_write_emi_reg_locked(struct igc_hw *hw, u16 addr, u16 data); +s32 igc_set_eee_pchlan(struct igc_hw *hw); +s32 igc_enable_ulp_lpt_lp(struct igc_hw *hw, bool to_sx); +s32 igc_disable_ulp_lpt_lp(struct igc_hw *hw, bool force); +#endif /* _IGC_ICH8LAN_H_ */ +void igc_demote_ltr(struct igc_hw *hw, bool demote, bool link); diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_mac.c b/src/spdk/dpdk/drivers/net/igc/base/igc_mac.c new file mode 100644 index 000000000..3cd6506e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_mac.c @@ -0,0 +1,2100 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" + +static s32 igc_validate_mdi_setting_generic(struct igc_hw *hw); +static void igc_set_lan_id_multi_port_pcie(struct igc_hw *hw); +static void igc_config_collision_dist_generic(struct igc_hw *hw); +static int igc_rar_set_generic(struct igc_hw *hw, u8 *addr, u32 index); + +/** + * igc_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void igc_init_mac_ops_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + DEBUGFUNC("igc_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = igc_null_ops_generic; + mac->ops.init_hw = igc_null_ops_generic; + mac->ops.reset_hw = igc_null_ops_generic; + mac->ops.setup_physical_interface = igc_null_ops_generic; + mac->ops.get_bus_info = igc_null_ops_generic; + mac->ops.set_lan_id = igc_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = igc_read_mac_addr_generic; + mac->ops.config_collision_dist = igc_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = igc_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = igc_null_ops_generic; + mac->ops.setup_led = igc_null_ops_generic; + mac->ops.blink_led = igc_null_ops_generic; + mac->ops.led_on = igc_null_ops_generic; + mac->ops.led_off = igc_null_ops_generic; + /* LINK */ + mac->ops.setup_link = igc_null_ops_generic; + mac->ops.get_link_up_info = igc_null_link_info; + mac->ops.check_for_link = igc_null_ops_generic; + /* Management */ + mac->ops.check_mng_mode = igc_null_mng_mode; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = igc_null_update_mc; + mac->ops.clear_vfta = igc_null_mac_generic; + mac->ops.write_vfta = igc_null_write_vfta; + mac->ops.rar_set = igc_rar_set_generic; + mac->ops.validate_mdi_setting = igc_validate_mdi_setting_generic; +} + +/** + * igc_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 igc_null_ops_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_null_ops_generic"); + UNREFERENCED_1PARAMETER(hw); + return IGC_SUCCESS; +} + +/** + * igc_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void igc_null_mac_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_null_mac_generic"); + UNREFERENCED_1PARAMETER(hw); +} + +/** + * igc_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + * @s: dummy variable + * @d: dummy variable + **/ +s32 igc_null_link_info(struct igc_hw IGC_UNUSEDARG * hw, + u16 IGC_UNUSEDARG * s, u16 IGC_UNUSEDARG * d) +{ + DEBUGFUNC("igc_null_link_info"); + UNREFERENCED_3PARAMETER(hw, s, d); + return IGC_SUCCESS; +} + +/** + * igc_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool igc_null_mng_mode(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_null_mng_mode"); + UNREFERENCED_1PARAMETER(hw); + return false; +} + +/** + * igc_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + * @h: dummy variable + * @a: dummy variable + **/ +void igc_null_update_mc(struct igc_hw IGC_UNUSEDARG * hw, + u8 IGC_UNUSEDARG * h, u32 IGC_UNUSEDARG a) +{ + DEBUGFUNC("igc_null_update_mc"); + UNREFERENCED_3PARAMETER(hw, h, a); +} + +/** + * igc_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + * @a: dummy variable + * @b: dummy variable + **/ +void igc_null_write_vfta(struct igc_hw IGC_UNUSEDARG * hw, + u32 IGC_UNUSEDARG a, u32 IGC_UNUSEDARG b) +{ + DEBUGFUNC("igc_null_write_vfta"); + UNREFERENCED_3PARAMETER(hw, a, b); +} + +/** + * igc_null_rar_set - No-op function, return 0 + * @hw: pointer to the HW structure + * @h: dummy variable + * @a: dummy variable + **/ +int igc_null_rar_set(struct igc_hw IGC_UNUSEDARG * hw, + u8 IGC_UNUSEDARG * h, u32 IGC_UNUSEDARG a) +{ + DEBUGFUNC("igc_null_rar_set"); + UNREFERENCED_3PARAMETER(hw, h, a); + return IGC_SUCCESS; +} + +/** + * igc_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + **/ +s32 igc_get_bus_info_pci_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + struct igc_bus_info *bus = &hw->bus; + u32 status = IGC_READ_REG(hw, IGC_STATUS); + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & IGC_STATUS_PCIX_MODE) + ? igc_bus_type_pcix + : igc_bus_type_pci; + + /* Bus speed */ + if (bus->type == igc_bus_type_pci) { + bus->speed = (status & IGC_STATUS_PCI66) + ? igc_bus_speed_66 + : igc_bus_speed_33; + } else { + switch (status & IGC_STATUS_PCIX_SPEED) { + case IGC_STATUS_PCIX_SPEED_66: + bus->speed = igc_bus_speed_66; + break; + case IGC_STATUS_PCIX_SPEED_100: + bus->speed = igc_bus_speed_100; + break; + case IGC_STATUS_PCIX_SPEED_133: + bus->speed = igc_bus_speed_133; + break; + default: + bus->speed = igc_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & IGC_STATUS_BUS64) + ? igc_bus_width_64 + : igc_bus_width_32; + + /* Which PCI(-X) function? */ + mac->ops.set_lan_id(hw); + + return ret_val; +} + +/** + * igc_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igc_get_bus_info_pcie_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + struct igc_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("igc_get_bus_info_pcie_generic"); + + bus->type = igc_bus_type_pci_express; + + ret_val = igc_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = igc_bus_width_unknown; + bus->speed = igc_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = igc_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = igc_bus_speed_5000; + break; + default: + bus->speed = igc_bus_speed_unknown; + break; + } + + bus->width = (enum igc_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return IGC_SUCCESS; +} + +/** + * igc_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +static void igc_set_lan_id_multi_port_pcie(struct igc_hw *hw) +{ + struct igc_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = IGC_READ_REG(hw, IGC_STATUS); + bus->func = (reg & IGC_STATUS_FUNC_MASK) >> IGC_STATUS_FUNC_SHIFT; +} + +/** + * igc_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading PCI config space. + **/ +void igc_set_lan_id_multi_port_pci(struct igc_hw *hw) +{ + struct igc_bus_info *bus = &hw->bus; + u16 pci_header_type; + u32 status; + + igc_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = IGC_READ_REG(hw, IGC_STATUS); + bus->func = (status & IGC_STATUS_FUNC_MASK) + >> IGC_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } +} + +/** + * igc_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void igc_set_lan_id_single_port(struct igc_hw *hw) +{ + struct igc_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * igc_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igc_clear_vfta_generic(struct igc_hw *hw) +{ + u32 offset; + + DEBUGFUNC("igc_clear_vfta_generic"); + + for (offset = 0; offset < IGC_VLAN_FILTER_TBL_SIZE; offset++) { + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, offset, 0); + IGC_WRITE_FLUSH(hw); + } +} + +/** + * igc_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igc_write_vfta_generic(struct igc_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("igc_write_vfta_generic"); + + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, offset, value); + IGC_WRITE_FLUSH(hw); +} + +/** + * igc_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igc_init_rx_addrs_generic(struct igc_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("igc_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 igc_check_alt_mac_addr_generic(struct igc_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("igc_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on older hardware or 82573 */ + if (hw->mac.type < igc_82571 || hw->mac.type == igc_82573) + return IGC_SUCCESS; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= igc_82580) + return IGC_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF || + nvm_alt_mac_addr_offset == 0x0000) + /* There is no Alternate MAC Address */ + return IGC_SUCCESS; + + if (hw->bus.func == IGC_FUNC_1) + nvm_alt_mac_addr_offset += IGC_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == IGC_FUNC_2) + nvm_alt_mac_addr_offset += IGC_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == IGC_FUNC_3) + nvm_alt_mac_addr_offset += IGC_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return IGC_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return IGC_SUCCESS; +} + +/** + * igc_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +static int igc_rar_set_generic(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("igc_rar_set_generic"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + IGC_WRITE_REG(hw, IGC_RAL(index), rar_low); + IGC_WRITE_FLUSH(hw); + IGC_WRITE_REG(hw, IGC_RAH(index), rar_high); + IGC_WRITE_FLUSH(hw); + + return IGC_SUCCESS; +} + +/** + * igc_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 igc_hash_mc_addr_generic(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("igc_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list_generic(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("igc_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, hw->mac.mta_shadow[i]); + IGC_WRITE_FLUSH(hw); +} + +/** + * igc_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void igc_pcix_mmrbc_workaround_generic(struct igc_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("igc_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != igc_bus_type_pcix) + return; + + igc_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + igc_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + igc_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * igc_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igc_clear_hw_cntrs_base_generic(struct igc_hw *hw) +{ + DEBUGFUNC("igc_clear_hw_cntrs_base_generic"); + + IGC_READ_REG(hw, IGC_CRCERRS); + IGC_READ_REG(hw, IGC_SYMERRS); + IGC_READ_REG(hw, IGC_MPC); + IGC_READ_REG(hw, IGC_SCC); + IGC_READ_REG(hw, IGC_ECOL); + IGC_READ_REG(hw, IGC_MCC); + IGC_READ_REG(hw, IGC_LATECOL); + IGC_READ_REG(hw, IGC_COLC); + IGC_READ_REG(hw, IGC_DC); + IGC_READ_REG(hw, IGC_SEC); + IGC_READ_REG(hw, IGC_RLEC); + IGC_READ_REG(hw, IGC_XONRXC); + IGC_READ_REG(hw, IGC_XONTXC); + IGC_READ_REG(hw, IGC_XOFFRXC); + IGC_READ_REG(hw, IGC_XOFFTXC); + IGC_READ_REG(hw, IGC_FCRUC); + IGC_READ_REG(hw, IGC_GPRC); + IGC_READ_REG(hw, IGC_BPRC); + IGC_READ_REG(hw, IGC_MPRC); + IGC_READ_REG(hw, IGC_GPTC); + IGC_READ_REG(hw, IGC_GORCL); + IGC_READ_REG(hw, IGC_GORCH); + IGC_READ_REG(hw, IGC_GOTCL); + IGC_READ_REG(hw, IGC_GOTCH); + IGC_READ_REG(hw, IGC_RNBC); + IGC_READ_REG(hw, IGC_RUC); + IGC_READ_REG(hw, IGC_RFC); + IGC_READ_REG(hw, IGC_ROC); + IGC_READ_REG(hw, IGC_RJC); + IGC_READ_REG(hw, IGC_TORL); + IGC_READ_REG(hw, IGC_TORH); + IGC_READ_REG(hw, IGC_TOTL); + IGC_READ_REG(hw, IGC_TOTH); + IGC_READ_REG(hw, IGC_TPR); + IGC_READ_REG(hw, IGC_TPT); + IGC_READ_REG(hw, IGC_MPTC); + IGC_READ_REG(hw, IGC_BPTC); +} + +/** + * igc_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igc_check_for_copper_link_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("igc_check_for_copper_link"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return IGC_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return IGC_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -IGC_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * igc_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 igc_check_for_fiber_link_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("igc_check_for_fiber_link_generic"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + status = IGC_READ_REG(hw, IGC_STATUS); + rxcw = IGC_READ_REG(hw, IGC_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & IGC_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & IGC_CTRL_SWDPIN1) && !(status & IGC_STATUS_LU) && + !(rxcw & IGC_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return IGC_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + IGC_WRITE_REG(hw, IGC_TXCW, (mac->txcw & ~IGC_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= (IGC_CTRL_SLU | IGC_CTRL_FD); + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = igc_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & IGC_CTRL_SLU) && (rxcw & IGC_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + IGC_WRITE_REG(hw, IGC_TXCW, mac->txcw); + IGC_WRITE_REG(hw, IGC_CTRL, (ctrl & ~IGC_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return IGC_SUCCESS; +} + +/** + * igc_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 igc_check_for_serdes_link_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("igc_check_for_serdes_link_generic"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + status = IGC_READ_REG(hw, IGC_STATUS); + rxcw = IGC_READ_REG(hw, IGC_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & IGC_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & IGC_STATUS_LU) && !(rxcw & IGC_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return IGC_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + IGC_WRITE_REG(hw, IGC_TXCW, (mac->txcw & ~IGC_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= (IGC_CTRL_SLU | IGC_CTRL_FD); + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = igc_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & IGC_CTRL_SLU) && (rxcw & IGC_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + IGC_WRITE_REG(hw, IGC_TXCW, mac->txcw); + IGC_WRITE_REG(hw, IGC_CTRL, (ctrl & ~IGC_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(IGC_TXCW_ANE & IGC_READ_REG(hw, IGC_TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = IGC_READ_REG(hw, IGC_RXCW); + if (rxcw & IGC_RXCW_SYNCH) { + if (!(rxcw & IGC_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (IGC_TXCW_ANE & IGC_READ_REG(hw, IGC_TXCW)) { + status = IGC_READ_REG(hw, IGC_STATUS); + if (status & IGC_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = IGC_READ_REG(hw, IGC_RXCW); + if (rxcw & IGC_RXCW_SYNCH) { + if (!(rxcw & IGC_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return IGC_SUCCESS; +} + +/** + * igc_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +s32 igc_set_default_fc_generic(struct igc_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + u16 nvm_offset = 0; + + DEBUGFUNC("igc_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == igc_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = igc_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = igc_fc_tx_pause; + else + hw->fc.requested_mode = igc_fc_full; + + return IGC_SUCCESS; +} + +/** + * igc_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igc_setup_link_generic(struct igc_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("igc_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return IGC_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + IGC_WRITE_REG(hw, IGC_FCT, FLOW_CONTROL_TYPE); + IGC_WRITE_REG(hw, IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + IGC_WRITE_REG(hw, IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + IGC_WRITE_REG(hw, IGC_FCTTV, hw->fc.pause_time); + + return igc_set_fc_watermarks_generic(hw); +} + +/** + * igc_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in igc_mac_info. + **/ +s32 igc_commit_fc_settings_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("igc_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (IGC_TXCW_ANE | IGC_TXCW_FD); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (IGC_TXCW_ANE | IGC_TXCW_FD | IGC_TXCW_PAUSE_MASK); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (IGC_TXCW_ANE | IGC_TXCW_FD | IGC_TXCW_ASM_DIR); + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (IGC_TXCW_ANE | IGC_TXCW_FD | IGC_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + IGC_WRITE_REG(hw, IGC_TXCW, txcw); + mac->txcw = txcw; + + return IGC_SUCCESS; +} + +/** + * igc_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +s32 igc_poll_fiber_serdes_link_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("igc_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = IGC_READ_REG(hw, IGC_STATUS); + if (status & IGC_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return IGC_SUCCESS; +} + +/** + * igc_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 igc_setup_fiber_serdes_link_generic(struct igc_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("igc_setup_fiber_serdes_link_generic"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + + /* Take the link out of reset */ + ctrl &= ~IGC_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = igc_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + IGC_WRITE_FLUSH(hw); + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == igc_media_type_internal_serdes || + (IGC_READ_REG(hw, IGC_CTRL) & IGC_CTRL_SWDPIN1)) { + ret_val = igc_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + + return ret_val; +} + +/** + * igc_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void igc_config_collision_dist_generic(struct igc_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("igc_config_collision_dist_generic"); + + tctl = IGC_READ_REG(hw, IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + IGC_WRITE_REG(hw, IGC_TCTL, tctl); + IGC_WRITE_FLUSH(hw); +} + +/** + * igc_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 igc_set_fc_watermarks_generic(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("igc_set_fc_watermarks_generic"); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + IGC_WRITE_REG(hw, IGC_FCRTL, fcrtl); + IGC_WRITE_REG(hw, IGC_FCRTH, fcrth); + + return IGC_SUCCESS; +} + +/** + * igc_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igc_force_mac_fc_generic(struct igc_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("igc_force_mac_fc_generic"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + + return IGC_SUCCESS; +} + +/** + * igc_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igc_config_fc_after_link_up_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = IGC_SUCCESS; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("igc_config_fc_after_link_up_generic"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == igc_media_type_copper) + ret_val = igc_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = igc_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return IGC_SUCCESS; +} + +/** + * igc_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igc_get_speed_and_duplex_copper_generic(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("igc_get_speed_and_duplex_copper_generic"); + + status = IGC_READ_REG(hw, IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + DEBUGOUT("2500 Mbs, "); + } else { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return IGC_SUCCESS; +} + +/** + * igc_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 +igc_get_speed_and_duplex_fiber_serdes_generic(struct igc_hw *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("igc_get_speed_and_duplex_fiber_serdes_generic"); + UNREFERENCED_1PARAMETER(hw); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return IGC_SUCCESS; +} + +/** + * igc_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igc_get_hw_semaphore_generic(struct igc_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("igc_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = IGC_READ_REG(hw, IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = IGC_READ_REG(hw, IGC_SWSM); + IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return IGC_SUCCESS; +} + +/** + * igc_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igc_put_hw_semaphore_generic(struct igc_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("igc_put_hw_semaphore_generic"); + + swsm = IGC_READ_REG(hw, IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + IGC_WRITE_REG(hw, IGC_SWSM, swsm); +} + +/** + * igc_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igc_get_auto_rd_done_generic(struct igc_hw *hw) +{ + s32 i = 0; + + DEBUGFUNC("igc_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -IGC_ERR_RESET; + } + + return IGC_SUCCESS; +} + +/** + * igc_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igc_valid_led_default_generic(struct igc_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("igc_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return IGC_SUCCESS; +} + +/** + * igc_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 igc_id_led_init_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = IGC_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = IGC_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("igc_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = IGC_READ_REG(hw, IGC_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return IGC_SUCCESS; +} + +/** + * igc_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 igc_setup_led_generic(struct igc_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("igc_setup_led_generic"); + + if (hw->mac.ops.setup_led != igc_setup_led_generic) + return -IGC_ERR_CONFIG; + + if (hw->phy.media_type == igc_media_type_fiber) { + ledctl = IGC_READ_REG(hw, IGC_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(IGC_LEDCTL_LED0_IVRT | IGC_LEDCTL_LED0_BLINK | + IGC_LEDCTL_LED0_MODE_MASK); + ledctl |= (IGC_LEDCTL_MODE_LED_OFF << + IGC_LEDCTL_LED0_MODE_SHIFT); + IGC_WRITE_REG(hw, IGC_LEDCTL, ledctl); + } else if (hw->phy.media_type == igc_media_type_copper) { + IGC_WRITE_REG(hw, IGC_LEDCTL, hw->mac.ledctl_mode1); + } + + return IGC_SUCCESS; +} + +/** + * igc_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igc_cleanup_led_generic(struct igc_hw *hw) +{ + DEBUGFUNC("igc_cleanup_led_generic"); + + IGC_WRITE_REG(hw, IGC_LEDCTL, hw->mac.ledctl_default); + return IGC_SUCCESS; +} + +/** + * igc_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 igc_blink_led_generic(struct igc_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("igc_blink_led_generic"); + + if (hw->phy.media_type == igc_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = IGC_LEDCTL_LED0_BLINK | + (IGC_LEDCTL_MODE_LED_ON << IGC_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + IGC_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & IGC_LEDCTL_LED0_IVRT) && + mode == IGC_LEDCTL_MODE_LED_ON) || + ((led_default & IGC_LEDCTL_LED0_IVRT) && + mode == IGC_LEDCTL_MODE_LED_OFF)) { + ledctl_blink &= + ~(IGC_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (IGC_LEDCTL_LED0_BLINK | + IGC_LEDCTL_MODE_LED_ON) << i; + } + } + } + + IGC_WRITE_REG(hw, IGC_LEDCTL, ledctl_blink); + + return IGC_SUCCESS; +} + +/** + * igc_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 igc_led_on_generic(struct igc_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("igc_led_on_generic"); + + switch (hw->phy.media_type) { + case igc_media_type_fiber: + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl &= ~IGC_CTRL_SWDPIN0; + ctrl |= IGC_CTRL_SWDPIO0; + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + break; + case igc_media_type_copper: + IGC_WRITE_REG(hw, IGC_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return IGC_SUCCESS; +} + +/** + * igc_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igc_led_off_generic(struct igc_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("igc_led_off_generic"); + + switch (hw->phy.media_type) { + case igc_media_type_fiber: + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= IGC_CTRL_SWDPIN0; + ctrl |= IGC_CTRL_SWDPIO0; + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + break; + case igc_media_type_copper: + IGC_WRITE_REG(hw, IGC_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return IGC_SUCCESS; +} + +/** + * igc_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void igc_set_pcie_no_snoop_generic(struct igc_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("igc_set_pcie_no_snoop_generic"); + + if (hw->bus.type != igc_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = IGC_READ_REG(hw, IGC_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + IGC_WRITE_REG(hw, IGC_GCR, gcr); + } +} + +/** + * igc_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns IGC_SUCCESS if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igc_disable_pcie_master_generic(struct igc_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + DEBUGFUNC("igc_disable_pcie_master_generic"); + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + + while (timeout) { + if (!(IGC_READ_REG(hw, IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE) || + IGC_REMOVED(hw->hw_addr)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -IGC_ERR_MASTER_REQUESTS_PENDING; + } + + return IGC_SUCCESS; +} + +/** + * igc_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void igc_reset_adaptive_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + + DEBUGFUNC("igc_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + IGC_WRITE_REG(hw, IGC_AIT, 0); +} + +/** + * igc_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void igc_update_adaptive_generic(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + + DEBUGFUNC("igc_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + IGC_WRITE_REG(hw, IGC_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + mac->tx_packet_delta <= MIN_NUM_XMITS) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + IGC_WRITE_REG(hw, IGC_AIT, 0); + } + } +} + +/** + * igc_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +static s32 igc_validate_mdi_setting_generic(struct igc_hw *hw) +{ + DEBUGFUNC("igc_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + return -IGC_ERR_CONFIG; + } + + return IGC_SUCCESS; +} + +/** + * igc_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 +igc_validate_mdi_setting_crossover_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_validate_mdi_setting_crossover_generic"); + UNREFERENCED_1PARAMETER(hw); + + return IGC_SUCCESS; +} + +/** + * igc_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as IGC_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igc_write_8bit_ctrl_reg_generic(struct igc_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + + DEBUGFUNC("igc_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << IGC_GEN_CTL_ADDRESS_SHIFT); + IGC_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = IGC_READ_REG(hw, reg); + if (regvalue & IGC_GEN_CTL_READY) + break; + } + if (!(regvalue & IGC_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -IGC_ERR_PHY; + } + + return IGC_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_mac.h b/src/spdk/dpdk/drivers/net/igc/base/igc_mac.h new file mode 100644 index 000000000..035a371e1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_mac.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +void igc_init_mac_ops_generic(struct igc_hw *hw); +#define IGC_REMOVED(a) (0) +void igc_null_mac_generic(struct igc_hw *hw); +s32 igc_null_ops_generic(struct igc_hw *hw); +s32 igc_null_link_info(struct igc_hw *hw, u16 *s, u16 *d); +bool igc_null_mng_mode(struct igc_hw *hw); +void igc_null_update_mc(struct igc_hw *hw, u8 *h, u32 a); +void igc_null_write_vfta(struct igc_hw *hw, u32 a, u32 b); +int igc_null_rar_set(struct igc_hw *hw, u8 *h, u32 a); +s32 igc_blink_led_generic(struct igc_hw *hw); +s32 igc_check_for_copper_link_generic(struct igc_hw *hw); +s32 igc_check_for_fiber_link_generic(struct igc_hw *hw); +s32 igc_check_for_serdes_link_generic(struct igc_hw *hw); +s32 igc_cleanup_led_generic(struct igc_hw *hw); +s32 igc_commit_fc_settings_generic(struct igc_hw *hw); +s32 igc_poll_fiber_serdes_link_generic(struct igc_hw *hw); +s32 igc_config_fc_after_link_up_generic(struct igc_hw *hw); +s32 igc_disable_pcie_master_generic(struct igc_hw *hw); +s32 igc_force_mac_fc_generic(struct igc_hw *hw); +s32 igc_get_auto_rd_done_generic(struct igc_hw *hw); +s32 igc_get_bus_info_pci_generic(struct igc_hw *hw); +s32 igc_get_bus_info_pcie_generic(struct igc_hw *hw); +void igc_set_lan_id_single_port(struct igc_hw *hw); +void igc_set_lan_id_multi_port_pci(struct igc_hw *hw); +s32 igc_get_hw_semaphore_generic(struct igc_hw *hw); +s32 igc_get_speed_and_duplex_copper_generic(struct igc_hw *hw, u16 *speed, + u16 *duplex); +s32 igc_get_speed_and_duplex_fiber_serdes_generic(struct igc_hw *hw, + u16 *speed, u16 *duplex); +s32 igc_id_led_init_generic(struct igc_hw *hw); +s32 igc_led_on_generic(struct igc_hw *hw); +s32 igc_led_off_generic(struct igc_hw *hw); +void igc_update_mc_addr_list_generic(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igc_set_default_fc_generic(struct igc_hw *hw); +s32 igc_set_fc_watermarks_generic(struct igc_hw *hw); +s32 igc_setup_fiber_serdes_link_generic(struct igc_hw *hw); +s32 igc_setup_led_generic(struct igc_hw *hw); +s32 igc_setup_link_generic(struct igc_hw *hw); +s32 igc_validate_mdi_setting_crossover_generic(struct igc_hw *hw); +s32 igc_write_8bit_ctrl_reg_generic(struct igc_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 igc_hash_mc_addr_generic(struct igc_hw *hw, u8 *mc_addr); + +void igc_clear_hw_cntrs_base_generic(struct igc_hw *hw); +void igc_clear_vfta_generic(struct igc_hw *hw); +void igc_init_rx_addrs_generic(struct igc_hw *hw, u16 rar_count); +void igc_pcix_mmrbc_workaround_generic(struct igc_hw *hw); +void igc_put_hw_semaphore_generic(struct igc_hw *hw); +s32 igc_check_alt_mac_addr_generic(struct igc_hw *hw); +void igc_reset_adaptive_generic(struct igc_hw *hw); +void igc_set_pcie_no_snoop_generic(struct igc_hw *hw, u32 no_snoop); +void igc_update_adaptive_generic(struct igc_hw *hw); +void igc_write_vfta_generic(struct igc_hw *hw, u32 offset, u32 value); + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_manage.c b/src/spdk/dpdk/drivers/net/igc/base/igc_manage.c new file mode 100644 index 000000000..563ab8160 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_manage.c @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" +#include "igc_manage.h" + +/** + * igc_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 igc_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("igc_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * igc_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns IGC_success upon success, else IGC_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 igc_mng_enable_host_if_generic(struct igc_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("igc_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_EN)) { + DEBUGOUT("IGC_HOST_EN bit disabled.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < IGC_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == IGC_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + return IGC_SUCCESS; +} + +/** + * igc_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool igc_check_mng_mode_generic(struct igc_hw *hw) +{ + u32 fwsm = IGC_READ_REG(hw, IGC_FWSM); + + DEBUGFUNC("igc_check_mng_mode_generic"); + + + return (fwsm & IGC_FWSM_MODE_MASK) == + (IGC_MNG_IAMT_MODE << IGC_FWSM_MODE_SHIFT); +} + +/** + * igc_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool igc_enable_tx_pkt_filtering_generic(struct igc_hw *hw) +{ + struct igc_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("igc_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = igc_mng_enable_host_if_generic(hw); + if (ret_val != IGC_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = IGC_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = IGC_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = IGC_READ_REG_ARRAY_DWORD(hw, IGC_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = igc_calculate_checksum((u8 *)hdr, + IGC_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if (hdr_csum != csum || hdr->signature != IGC_IAMT_SIGNATURE) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & IGC_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * igc_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 igc_mng_write_cmd_header_generic(struct igc_hw *hw, + struct igc_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct igc_host_mng_command_header); + + DEBUGFUNC("igc_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = igc_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, i, + *((u32 *)hdr + i)); + IGC_WRITE_FLUSH(hw); + } + + return IGC_SUCCESS; +} + +/** + * igc_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 igc_mng_host_if_write_generic(struct igc_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("igc_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > IGC_HI_MAX_MNG_DATA_LENGTH) + return -IGC_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = IGC_READ_REG_ARRAY_DWORD(hw, IGC_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, offset + i, + data); + } + + return IGC_SUCCESS; +} + +/** + * igc_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 igc_mng_write_dhcp_info_generic(struct igc_hw *hw, u8 *buffer, + u16 length) +{ + struct igc_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("igc_mng_write_dhcp_info_generic"); + + hdr.command_id = IGC_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = igc_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = igc_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &hdr.checksum); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = igc_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = IGC_READ_REG(hw, IGC_HICR); + IGC_WRITE_REG(hw, IGC_HICR, hicr | IGC_HICR_C); + + return IGC_SUCCESS; +} + +/** + * igc_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("igc_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = IGC_READ_REG(hw, IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = IGC_READ_REG(hw, IGC_FWSM); + factps = IGC_READ_REG(hw, IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == igc_82574) || + (hw->mac.type == igc_82583)) { + u16 data; + s32 ret_val; + + factps = IGC_READ_REG(hw, IGC_FACTPS); + ret_val = igc_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + if (!(factps & IGC_FACTPS_MNGCG) && + ((data & IGC_NVM_INIT_CTRL2_MNGM) == + (igc_mng_mode_pt << 13))) + return true; + } else if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * igc_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns IGC_SUCCESS + * else returns IGC_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 igc_host_interface_command(struct igc_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("igc_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return IGC_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return IGC_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > IGC_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_EN)) { + DEBUGOUT("IGC_HOST_EN bit disabled.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + IGC_WRITE_REG(hw, IGC_HICR, hicr | IGC_HICR_C); + + for (i = 0; i < IGC_HI_COMMAND_TIMEOUT; i++) { + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == IGC_HI_COMMAND_TIMEOUT || + (!(IGC_READ_REG(hw, IGC_HICR) & IGC_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = IGC_READ_REG_ARRAY_DWORD(hw, + IGC_HOST_IF, + i); + + return IGC_SUCCESS; +} + +/** + * igc_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns IGC_SUCCESS, returns IGC_ERR_CONFIG if not enabled + * in HW else returns IGC_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 igc_load_firmware(struct igc_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("igc_load_firmware"); + + if (hw->mac.type < igc_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -IGC_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_EN)) { + DEBUGOUT("IGC_HOST_EN bit disabled.\n"); + return -IGC_ERR_CONFIG; + } + if (!(hicr & IGC_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("IGC_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -IGC_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > IGC_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -IGC_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = IGC_READ_REG(hw, IGC_ICR_V2); + + /* Reset ROM-FW */ + hicr = IGC_READ_REG(hw, IGC_HICR); + hicr |= IGC_HICR_FW_RESET_ENABLE; + IGC_WRITE_REG(hw, IGC_HICR, hicr); + hicr |= IGC_HICR_FW_RESET; + IGC_WRITE_REG(hw, IGC_HICR, hicr); + IGC_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (IGC_HI_COMMAND_TIMEOUT * 2); i++) { + icr = IGC_READ_REG(hw, IGC_ICR_V2); + if (icr & IGC_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == IGC_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < IGC_HI_COMMAND_TIMEOUT; i++) { + fwsm = IGC_READ_REG(hw, IGC_FWSM); + if ((fwsm & IGC_FWSM_FW_VALID) && + ((fwsm & IGC_FWSM_MODE_MASK) >> IGC_FWSM_MODE_SHIFT == + IGC_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == IGC_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % IGC_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = IGC_HI_FW_BASE_ADDRESS + + ((IGC_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / IGC_HI_FW_BLOCK_DWORD_LENGTH)); + + IGC_WRITE_REG(hw, IGC_HIBBA, hibba); + } + + IGC_WRITE_REG_ARRAY_DWORD(hw, IGC_HOST_IF, + i % IGC_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = IGC_READ_REG(hw, IGC_HICR); + IGC_WRITE_REG(hw, IGC_HICR, hicr | IGC_HICR_C); + + for (i = 0; i < IGC_HI_COMMAND_TIMEOUT; i++) { + hicr = IGC_READ_REG(hw, IGC_HICR); + if (!(hicr & IGC_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == IGC_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -IGC_ERR_HOST_INTERFACE_COMMAND; + } + + return IGC_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_manage.h b/src/spdk/dpdk/drivers/net/igc/base/igc_manage.h new file mode 100644 index 000000000..10cae6d7f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_manage.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_MANAGE_H_ +#define _IGC_MANAGE_H_ + +bool igc_check_mng_mode_generic(struct igc_hw *hw); +bool igc_enable_tx_pkt_filtering_generic(struct igc_hw *hw); +s32 igc_mng_enable_host_if_generic(struct igc_hw *hw); +s32 igc_mng_host_if_write_generic(struct igc_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 igc_mng_write_cmd_header_generic(struct igc_hw *hw, + struct igc_host_mng_command_header *hdr); +s32 igc_mng_write_dhcp_info_generic(struct igc_hw *hw, + u8 *buffer, u16 length); +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +u8 igc_calculate_checksum(u8 *buffer, u32 length); +s32 igc_host_interface_command(struct igc_hw *hw, u8 *buffer, u32 length); +s32 igc_load_firmware(struct igc_hw *hw, u8 *buffer, u32 length); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#define IGC_FACTPS_MNGCG 0x20000000 + +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 +#define IGC_FWSM_FW_VALID 0x00008000 +#define IGC_FWSM_HI_EN_ONLY_MODE 0x4 + +#define IGC_MNG_IAMT_MODE 0x3 +#define IGC_MNG_DHCP_COOKIE_LENGTH 0x10 +#define IGC_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define IGC_MNG_DHCP_COMMAND_TIMEOUT 10 +#define IGC_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define IGC_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define IGC_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define IGC_VFTA_ENTRY_SHIFT 5 +#define IGC_VFTA_ENTRY_MASK 0x7F +#define IGC_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define IGC_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IGC_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IGC_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define IGC_HI_FW_BASE_ADDRESS 0x10000 +#define IGC_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define IGC_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define IGC_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define IGC_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IGC_HICR_C 0x02 +#define IGC_HICR_SV 0x04 /* Status Validity */ +#define IGC_HICR_FW_RESET_ENABLE 0x40 +#define IGC_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define IGC_IAMT_SIGNATURE 0x544D4149 +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.c b/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.c new file mode 100644 index 000000000..a7c901ab5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.c @@ -0,0 +1,1324 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" + +static void igc_reload_nvm_generic(struct igc_hw *hw); + +/** + * igc_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void igc_init_nvm_ops_generic(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("igc_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = igc_null_ops_generic; + nvm->ops.acquire = igc_null_ops_generic; + nvm->ops.read = igc_null_read_nvm; + nvm->ops.release = igc_null_nvm_generic; + nvm->ops.reload = igc_reload_nvm_generic; + nvm->ops.update = igc_null_ops_generic; + nvm->ops.valid_led_default = igc_null_led_default; + nvm->ops.validate = igc_null_ops_generic; + nvm->ops.write = igc_null_write_nvm; +} + +/** + * igc_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + * @a: dummy variable + * @b: dummy variable + * @c: dummy variable + **/ +s32 igc_null_read_nvm(struct igc_hw IGC_UNUSEDARG * hw, + u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b, + u16 IGC_UNUSEDARG * c) +{ + DEBUGFUNC("igc_null_read_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return IGC_SUCCESS; +} + +/** + * igc_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void igc_null_nvm_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_null_nvm_generic"); + UNREFERENCED_1PARAMETER(hw); +} + +/** + * igc_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + * @data: dummy variable + **/ +s32 igc_null_led_default(struct igc_hw IGC_UNUSEDARG * hw, + u16 IGC_UNUSEDARG * data) +{ + DEBUGFUNC("igc_null_led_default"); + UNREFERENCED_2PARAMETER(hw, data); + return IGC_SUCCESS; +} + +/** + * igc_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + * @a: dummy variable + * @b: dummy variable + * @c: dummy variable + **/ +s32 igc_null_write_nvm(struct igc_hw IGC_UNUSEDARG * hw, + u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b, + u16 IGC_UNUSEDARG * c) +{ + DEBUGFUNC("igc_null_write_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return IGC_SUCCESS; +} + +/** + * igc_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igc_raise_eec_clk(struct igc_hw *hw, u32 *eecd) +{ + *eecd = *eecd | IGC_EECD_SK; + IGC_WRITE_REG(hw, IGC_EECD, *eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * igc_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igc_lower_eec_clk(struct igc_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~IGC_EECD_SK; + IGC_WRITE_REG(hw, IGC_EECD, *eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * igc_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igc_shift_out_eec_bits(struct igc_hw *hw, u16 data, u16 count) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = IGC_READ_REG(hw, IGC_EECD); + u32 mask; + + DEBUGFUNC("igc_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == igc_nvm_eeprom_microwire) + eecd &= ~IGC_EECD_DO; + else if (nvm->type == igc_nvm_eeprom_spi) + eecd |= IGC_EECD_DO; + + do { + eecd &= ~IGC_EECD_DI; + + if (data & mask) + eecd |= IGC_EECD_DI; + + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + igc_raise_eec_clk(hw, &eecd); + igc_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~IGC_EECD_DI; + IGC_WRITE_REG(hw, IGC_EECD, eecd); +} + +/** + * igc_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igc_shift_in_eec_bits(struct igc_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("igc_shift_in_eec_bits"); + + eecd = IGC_READ_REG(hw, IGC_EECD); + + eecd &= ~(IGC_EECD_DO | IGC_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igc_raise_eec_clk(hw, &eecd); + + eecd = IGC_READ_REG(hw, IGC_EECD); + + eecd &= ~IGC_EECD_DI; + if (eecd & IGC_EECD_DO) + data |= 1; + + igc_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + DEBUGFUNC("igc_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = IGC_READ_REG(hw, IGC_EERD); + else + reg = IGC_READ_REG(hw, IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) + return IGC_SUCCESS; + + usec_delay(5); + } + + return -IGC_ERR_NVM; +} + +/** + * igc_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + **/ +s32 igc_acquire_nvm_generic(struct igc_hw *hw) +{ + u32 eecd = IGC_READ_REG(hw, IGC_EECD); + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + + DEBUGFUNC("igc_acquire_nvm_generic"); + + IGC_WRITE_REG(hw, IGC_EECD, eecd | IGC_EECD_REQ); + eecd = IGC_READ_REG(hw, IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + usec_delay(5); + eecd = IGC_READ_REG(hw, IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + IGC_WRITE_REG(hw, IGC_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -IGC_ERR_NVM; + } + + return IGC_SUCCESS; +} + +/** + * igc_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igc_standby_nvm(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = IGC_READ_REG(hw, IGC_EECD); + + DEBUGFUNC("igc_standby_nvm"); + + if (nvm->type == igc_nvm_eeprom_microwire) { + eecd &= ~(IGC_EECD_CS | IGC_EECD_SK); + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + igc_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= IGC_EECD_CS; + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + igc_lower_eec_clk(hw, &eecd); + } else if (nvm->type == igc_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= IGC_EECD_CS; + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~IGC_EECD_CS; + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * igc_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +void igc_stop_nvm(struct igc_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("igc_stop_nvm"); + + eecd = IGC_READ_REG(hw, IGC_EECD); + if (hw->nvm.type == igc_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= IGC_EECD_CS; + igc_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == igc_nvm_eeprom_microwire) { + /* CS on Microwire is active-high */ + eecd &= ~(IGC_EECD_CS | IGC_EECD_DI); + IGC_WRITE_REG(hw, IGC_EECD, eecd); + igc_raise_eec_clk(hw, &eecd); + igc_lower_eec_clk(hw, &eecd); + } +} + +/** + * igc_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igc_release_nvm_generic(struct igc_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("igc_release_nvm_generic"); + + igc_stop_nvm(hw); + + eecd = IGC_READ_REG(hw, IGC_EECD); + eecd &= ~IGC_EECD_REQ; + IGC_WRITE_REG(hw, IGC_EECD, eecd); +} + +/** + * igc_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igc_ready_nvm_eeprom(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = IGC_READ_REG(hw, IGC_EECD); + u8 spi_stat_reg; + + DEBUGFUNC("igc_ready_nvm_eeprom"); + + if (nvm->type == igc_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(IGC_EECD_DI | IGC_EECD_SK); + IGC_WRITE_REG(hw, IGC_EECD, eecd); + /* Set CS */ + eecd |= IGC_EECD_CS; + IGC_WRITE_REG(hw, IGC_EECD, eecd); + } else if (nvm->type == igc_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(IGC_EECD_CS | IGC_EECD_SK); + IGC_WRITE_REG(hw, IGC_EECD, eecd); + IGC_WRITE_FLUSH(hw); + usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igc_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igc_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + igc_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + return -IGC_ERR_NVM; + } + } + + return IGC_SUCCESS; +} + +/** + * igc_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igc_read_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("igc_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -IGC_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igc_standby_nvm(hw); + + if (nvm->address_bits == 8 && offset >= 128) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igc_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igc_shift_out_eec_bits(hw, (u16)(offset * 2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igc_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * igc_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igc_read_nvm_microwire(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("igc_read_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -IGC_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + igc_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igc_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = igc_shift_in_eec_bits(hw, 16); + igc_standby_nvm(hw); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -IGC_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + IGC_WRITE_REG(hw, IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (IGC_READ_REG(hw, IGC_EERD) >> + IGC_NVM_RW_REG_DATA); + } + + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * igc_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If igc_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u16 widx = 0; + + DEBUGFUNC("igc_write_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -IGC_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igc_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igc_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igc_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if (nvm->address_bits == 8 && offset >= 128) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igc_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igc_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + igc_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igc_standby_nvm(hw); + break; + } + } + msec_delay(10); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igc_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If igc_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 igc_write_nvm_microwire(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("igc_write_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || words > (nvm->word_size - offset) || + words == 0) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -IGC_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igc_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + igc_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + igc_standby_nvm(hw); + + while (words_written < words) { + igc_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + igc_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + igc_shift_out_eec_bits(hw, data[words_written], 16); + + igc_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = IGC_READ_REG(hw, IGC_EECD); + if (eecd & IGC_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -IGC_ERR_NVM; + goto release; + } + + igc_standby_nvm(hw); + + words_written++; + } + + igc_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + igc_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * igc_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("igc_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -IGC_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < IGC_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return IGC_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return IGC_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -IGC_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -IGC_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return IGC_SUCCESS; +} + +/** + * igc_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 igc_read_pba_length_generic(struct igc_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("igc_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -IGC_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = IGC_PBANUM_LENGTH; + return IGC_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -IGC_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return IGC_SUCCESS; +} + +/** + * igc_read_pba_num_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 igc_read_pba_num_generic(struct igc_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("igc_read_pba_num_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (nvm_data == NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM Not Supported\n"); + return -IGC_NOT_IMPLEMENTED; + } + *pba_num = (u32)(nvm_data << 16); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= nvm_data; + + return IGC_SUCCESS; +} + + +/** + * igc_read_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @max_pba_block_size: PBA block size limit + * @pba: pointer to output PBA structure + * + * Reads PBA from EEPROM image when eeprom_buf is not NULL. + * Reads PBA from physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 igc_read_pba_raw(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct igc_pba *pba) +{ + s32 ret_val; + u16 pba_block_size; + + if (pba == NULL) + return -IGC_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = igc_read_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -IGC_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -IGC_ERR_PARAM; + + ret_val = igc_get_pba_block_size(hw, eeprom_buf, + eeprom_buf_size, + &pba_block_size); + if (ret_val) + return ret_val; + + if (pba_block_size > max_pba_block_size) + return -IGC_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = igc_read_nvm(hw, pba->word[1], + pba_block_size, + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba_block_size)) { + memcpy(pba->pba_block, + &eeprom_buf[pba->word[1]], + pba_block_size * sizeof(u16)); + } else { + return -IGC_ERR_PARAM; + } + } + } + + return IGC_SUCCESS; +} + +/** + * igc_write_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba: pointer to PBA structure + * + * Writes PBA to EEPROM image when eeprom_buf is not NULL. + * Writes PBA to physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 igc_write_pba_raw(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct igc_pba *pba) +{ + s32 ret_val; + + if (pba == NULL) + return -IGC_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = igc_write_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0]; + eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1]; + } else { + return -IGC_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -IGC_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = igc_write_nvm(hw, pba->word[1], + pba->pba_block[0], + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba->pba_block[0])) { + memcpy(&eeprom_buf[pba->word[1]], + pba->pba_block, + pba->pba_block[0] * sizeof(u16)); + } else { + return -IGC_ERR_PARAM; + } + } + } + + return IGC_SUCCESS; +} + +/** + * igc_get_pba_block_size + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba_data_size: pointer to output variable + * + * Returns the size of the PBA block in words. Function operates on EEPROM + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical + * EEPROM device. + * + **/ +s32 igc_get_pba_block_size(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size) +{ + s32 ret_val; + u16 pba_word[2]; + u16 length; + + DEBUGFUNC("igc_get_pba_block_size"); + + if (eeprom_buf == NULL) { + ret_val = igc_read_nvm(hw, NVM_PBA_OFFSET_0, 2, &pba_word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba_word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba_word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -IGC_ERR_PARAM; + } + } + + if (pba_word[0] == NVM_PBA_PTR_GUARD) { + if (eeprom_buf == NULL) { + ret_val = igc_read_nvm(hw, pba_word[1] + 0, 1, + &length); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > pba_word[1]) + length = eeprom_buf[pba_word[1] + 0]; + else + return -IGC_ERR_PARAM; + } + + if (length == 0xFFFF || length == 0) + return -IGC_ERR_NVM_PBA_SECTION; + } else { + /* PBA number in legacy format, there is no PBA Block. */ + length = 0; + } + + if (pba_block_size != NULL) + *pba_block_size = length; + + return IGC_SUCCESS; +} + +/** + * igc_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igc_read_mac_addr_generic(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IGC_READ_REG(hw, IGC_RAH(0)); + rar_low = IGC_READ_REG(hw, IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return IGC_SUCCESS; +} + +/** + * igc_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("igc_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + return -IGC_ERR_NVM; + } + + return IGC_SUCCESS; +} + +/** + * igc_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igc_update_nvm_checksum_generic(struct igc_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("igc_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * igc_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +static void igc_reload_nvm_generic(struct igc_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("igc_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + ctrl_ext |= IGC_CTRL_EXT_EE_RST; + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext); + IGC_WRITE_FLUSH(hw); +} + +/** + * igc_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void igc_get_fw_version(struct igc_hw *hw, struct igc_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct igc_fw_version)); + + /* + * basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + switch (hw->mac.type) { + case igc_i225: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if (comb_offset && comb_offset != NVM_VER_INVALID) { + hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset + 1, + 1, &comb_verh); + hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset, + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if (comb_verh && comb_verl && + comb_verh != NVM_VER_INVALID && + comb_verl != NVM_VER_INVALID) { + fw_vers->or_valid = true; + fw_vers->or_major = comb_verl >> + NVM_COMB_VER_SHFT; + fw_vers->or_build = (comb_verl << + NVM_COMB_VER_SHFT) | + (comb_verh >> + NVM_COMB_VER_SHFT); + fw_vers->or_patch = comb_verh & + NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.h b/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.h new file mode 100644 index 000000000..0eee5e457 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_nvm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +struct igc_pba { + u16 word[2]; + u16 *pba_block; +}; + +struct igc_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + + +void igc_init_nvm_ops_generic(struct igc_hw *hw); +s32 igc_null_read_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c); +void igc_null_nvm_generic(struct igc_hw *hw); +s32 igc_null_led_default(struct igc_hw *hw, u16 *data); +s32 igc_null_write_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c); +s32 igc_acquire_nvm_generic(struct igc_hw *hw); + +s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg); +s32 igc_read_mac_addr_generic(struct igc_hw *hw); +s32 igc_read_pba_num_generic(struct igc_hw *hw, u32 *pba_num); +s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 igc_read_pba_length_generic(struct igc_hw *hw, u32 *pba_num_size); +s32 igc_read_pba_raw(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct igc_pba *pba); +s32 igc_write_pba_raw(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct igc_pba *pba); +s32 igc_get_pba_block_size(struct igc_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size); +s32 igc_read_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_read_nvm_microwire(struct igc_hw *hw, u16 offset, + u16 words, u16 *data); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, + u16 *data); +s32 igc_valid_led_default_generic(struct igc_hw *hw, u16 *data); +s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw); +s32 igc_write_nvm_microwire(struct igc_hw *hw, u16 offset, + u16 words, u16 *data); +s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, + u16 *data); +s32 igc_update_nvm_checksum_generic(struct igc_hw *hw); +void igc_stop_nvm(struct igc_hw *hw); +void igc_release_nvm_generic(struct igc_hw *hw); +void igc_get_fw_version(struct igc_hw *hw, + struct igc_fw_version *fw_vers); + +#define IGC_STM_OPCODE 0xDB00 + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.c b/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.c new file mode 100644 index 000000000..508f2e07a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.c @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + +#include "igc_api.h" + +/* + * NOTE: the following routines using the igc + * naming style are provided to the shared + * code but are OS specific + */ + +void +igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + (void)hw; + (void)reg; + (void)value; +} + +void +igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + (void)hw; + (void)reg; + *value = 0; +} + +void +igc_pci_set_mwi(struct igc_hw *hw) +{ + (void)hw; +} + +void +igc_pci_clear_mwi(struct igc_hw *hw) +{ + (void)hw; +} + +/* + * Read the PCI Express capabilities + */ +int32_t +igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + (void)hw; + (void)reg; + (void)value; + return IGC_NOT_IMPLEMENTED; +} + +/* + * Write the PCI Express capabilities + */ +int32_t +igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + (void)hw; + (void)reg; + (void)value; + + return IGC_NOT_IMPLEMENTED; +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.h b/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.h new file mode 100644 index 000000000..25090d65e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_osdep.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + + +#ifndef _IGC_OSDEP_H_ +#define _IGC_OSDEP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../igc_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define usec_delay_irq(x) DELAY(x) +#define msec_delay(x) DELAY(1000 * (x)) +#define msec_delay_irq(x) DELAY(1000 * (x)) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define UNREFERENCED_PARAMETER(_p) (void)(_p) +#define UNREFERENCED_1PARAMETER(_p) (void)(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) \ + do { \ + (void)(_p); \ + (void)(_q); \ + } while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) \ + do { \ + (void)(_p); \ + (void)(_q); \ + (void)(_r); \ + } while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) \ + do { \ + (void)(_p); \ + (void)(_q); \ + (void)(_r); \ + (void)(_s); \ + } while (0) + +#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ + +/* Mutex used in the shared code */ +#define IGC_MUTEX uintptr_t +#define IGC_MUTEX_INIT(mutex) (*(mutex) = 0) +#define IGC_MUTEX_LOCK(mutex) (*(mutex) = 1) +#define IGC_MUTEX_UNLOCK(mutex) (*(mutex) = 0) + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define IGC_WRITE_FLUSH(a) IGC_READ_REG(a, IGC_STATUS) + +#define IGC_PCI_REG(reg) rte_read32(reg) + +#define IGC_PCI_REG16(reg) rte_read16(reg) + +#define IGC_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) + +#define IGC_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) + +#define IGC_PCI_REG_WRITE16(reg, value) \ + rte_write16((rte_cpu_to_le_16(value)), reg) + +#define IGC_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define IGC_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + IGC_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +#define IGC_PCI_REG_FLASH_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->flash_address + (reg))) + +static inline uint32_t igc_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(IGC_PCI_REG(addr)); +} + +static inline uint16_t igc_read_addr16(volatile void *addr) +{ + return rte_le_to_cpu_16(IGC_PCI_REG16(addr)); +} + +/* Register READ/WRITE macros */ + +#define IGC_READ_REG(hw, reg) \ + igc_read_addr(IGC_PCI_REG_ADDR((hw), (reg))) + +#define IGC_READ_REG_LE_VALUE(hw, reg) \ + rte_read32(IGC_PCI_REG_ADDR((hw), (reg))) + +#define IGC_WRITE_REG(hw, reg, value) \ + IGC_PCI_REG_WRITE(IGC_PCI_REG_ADDR((hw), (reg)), (value)) + +#define IGC_WRITE_REG_LE_VALUE(hw, reg, value) \ + rte_write32(value, IGC_PCI_REG_ADDR((hw), (reg))) + +#define IGC_READ_REG_ARRAY(hw, reg, index) \ + IGC_PCI_REG(IGC_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define IGC_WRITE_REG_ARRAY(hw, reg, index, value) \ + IGC_PCI_REG_WRITE(IGC_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), \ + (value)) + +#define IGC_READ_REG_ARRAY_DWORD IGC_READ_REG_ARRAY +#define IGC_WRITE_REG_ARRAY_DWORD IGC_WRITE_REG_ARRAY + +/* + * To be able to do IO write, we need to map IO BAR + * (bar 2/4 depending on device). + * Right now mapping multiple BARs is not supported by DPDK. + * Fortunatelly we need it only for legacy hw support. + */ + +#define IGC_WRITE_REG_IO(hw, reg, value) \ + IGC_WRITE_REG(hw, reg, value) + +/* + * Tested on I217/I218 chipset. + */ + +#define IGC_READ_FLASH_REG(hw, reg) \ + igc_read_addr(IGC_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define IGC_READ_FLASH_REG16(hw, reg) \ + igc_read_addr16(IGC_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define IGC_WRITE_FLASH_REG(hw, reg, value) \ + IGC_PCI_REG_WRITE(IGC_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#define IGC_WRITE_FLASH_REG16(hw, reg, value) \ + IGC_PCI_REG_WRITE16(IGC_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#endif /* _IGC_OSDEP_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_phy.c b/src/spdk/dpdk/drivers/net/igc/base/igc_phy.c new file mode 100644 index 000000000..43bbe69bc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_phy.c @@ -0,0 +1,4422 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "igc_api.h" + +static s32 igc_wait_autoneg(struct igc_hw *hw); +static s32 igc_access_phy_wakeup_reg_bm(struct igc_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 igc_get_phy_addr_for_hv_page(u32 page); +static s32 igc_access_phy_debug_regs_hv(struct igc_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 igc_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, IGC_CABLE_LENGTH_UNDEFINED }; +#define M88IGC_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(igc_m88_cable_length_table) / \ + sizeof(igc_m88_cable_length_table[0])) + +static const u16 igc_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02IGC_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(igc_igp_2_cable_length_table) / \ + sizeof(igc_igp_2_cable_length_table[0])) + +/** + * igc_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void igc_init_phy_ops_generic(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + DEBUGFUNC("igc_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = igc_null_ops_generic; + phy->ops.acquire = igc_null_ops_generic; + phy->ops.check_polarity = igc_null_ops_generic; + phy->ops.check_reset_block = igc_null_ops_generic; + phy->ops.commit = igc_null_ops_generic; + phy->ops.force_speed_duplex = igc_null_ops_generic; + phy->ops.get_cfg_done = igc_null_ops_generic; + phy->ops.get_cable_length = igc_null_ops_generic; + phy->ops.get_info = igc_null_ops_generic; + phy->ops.set_page = igc_null_set_page; + phy->ops.read_reg = igc_null_read_reg; + phy->ops.read_reg_locked = igc_null_read_reg; + phy->ops.read_reg_page = igc_null_read_reg; + phy->ops.release = igc_null_phy_generic; + phy->ops.reset = igc_null_ops_generic; + phy->ops.set_d0_lplu_state = igc_null_lplu_state; + phy->ops.set_d3_lplu_state = igc_null_lplu_state; + phy->ops.write_reg = igc_null_write_reg; + phy->ops.write_reg_locked = igc_null_write_reg; + phy->ops.write_reg_page = igc_null_write_reg; + phy->ops.power_up = igc_null_phy_generic; + phy->ops.power_down = igc_null_phy_generic; + phy->ops.read_i2c_byte = igc_read_i2c_byte_null; + phy->ops.write_i2c_byte = igc_write_i2c_byte_null; + phy->ops.cfg_on_link_up = igc_null_ops_generic; +} + +/** + * igc_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + * @data: dummy variable + **/ +s32 igc_null_set_page(struct igc_hw IGC_UNUSEDARG * hw, + u16 IGC_UNUSEDARG data) +{ + DEBUGFUNC("igc_null_set_page"); + UNREFERENCED_2PARAMETER(hw, data); + return IGC_SUCCESS; +} + +/** + * igc_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + * @offset: dummy variable + * @data: dummy variable + **/ +s32 igc_null_read_reg(struct igc_hw IGC_UNUSEDARG * hw, + u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG * data) +{ + DEBUGFUNC("igc_null_read_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return IGC_SUCCESS; +} + +/** + * igc_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void igc_null_phy_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_null_phy_generic"); + UNREFERENCED_1PARAMETER(hw); +} + +/** + * igc_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + * @active: dummy variable + **/ +s32 igc_null_lplu_state(struct igc_hw IGC_UNUSEDARG * hw, + bool IGC_UNUSEDARG active) +{ + DEBUGFUNC("igc_null_lplu_state"); + UNREFERENCED_2PARAMETER(hw, active); + return IGC_SUCCESS; +} + +/** + * igc_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + * @offset: dummy variable + * @data: dummy variable + **/ +s32 igc_null_write_reg(struct igc_hw IGC_UNUSEDARG * hw, + u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG data) +{ + DEBUGFUNC("igc_null_write_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return IGC_SUCCESS; +} + +/** + * igc_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 igc_read_i2c_byte_null(struct igc_hw IGC_UNUSEDARG * hw, + u8 IGC_UNUSEDARG byte_offset, + u8 IGC_UNUSEDARG dev_addr, + u8 IGC_UNUSEDARG * data) +{ + DEBUGFUNC("igc_read_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return IGC_SUCCESS; +} + +/** + * igc_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 igc_write_i2c_byte_null(struct igc_hw IGC_UNUSEDARG * hw, + u8 IGC_UNUSEDARG byte_offset, + u8 IGC_UNUSEDARG dev_addr, + u8 IGC_UNUSEDARG data) +{ + DEBUGFUNC("igc_write_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return IGC_SUCCESS; +} + +/** + * igc_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return IGC_SUCCESS, otherwise + * return IGC_BLK_PHY_RESET (12). + **/ +s32 igc_check_reset_block_generic(struct igc_hw *hw) +{ + u32 manc; + + DEBUGFUNC("igc_check_reset_block"); + + manc = IGC_READ_REG(hw, IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_BLK_PHY_RESET : IGC_SUCCESS; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = IGC_SUCCESS; + u16 phy_id; + u16 retry_count = 0; + + DEBUGFUNC("igc_get_phy_id"); + + if (!phy->ops.read_reg) + return IGC_SUCCESS; + + while (retry_count < 2) { + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return IGC_SUCCESS; + + retry_count++; + } + + return IGC_SUCCESS; +} + +/** + * igc_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 igc_phy_reset_dsp_generic(struct igc_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("igc_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88IGC_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, M88IGC_PHY_GEN_CONTROL, 0); +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("igc_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -IGC_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + IGC_WRITE_REG(hw, IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = IGC_READ_REG(hw, IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -IGC_ERR_PHY; + } + if (mdic & IGC_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -IGC_ERR_PHY; + } + if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT); + return -IGC_ERR_PHY; + } + *data = (u16)mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == igc_pch2lan) + usec_delay_irq(100); + + return IGC_SUCCESS; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("igc_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -IGC_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + IGC_WRITE_REG(hw, IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = IGC_READ_REG(hw, IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -IGC_ERR_PHY; + } + if (mdic & IGC_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -IGC_ERR_PHY; + } + if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT); + return -IGC_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == igc_pch2lan) + usec_delay_irq(100); + + return IGC_SUCCESS; +} + +/** + * igc_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igc_read_phy_reg_i2c(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("igc_read_phy_reg_i2c"); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << IGC_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << IGC_I2CCMD_PHY_ADDR_SHIFT) | + (IGC_I2CCMD_OPCODE_READ)); + + IGC_WRITE_REG(hw, IGC_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < IGC_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = IGC_READ_REG(hw, IGC_I2CCMD); + if (i2ccmd & IGC_I2CCMD_READY) + break; + } + if (!(i2ccmd & IGC_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -IGC_ERR_PHY; + } + if (i2ccmd & IGC_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -IGC_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return IGC_SUCCESS; +} + +/** + * igc_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igc_write_phy_reg_i2c(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("igc_write_phy_reg_i2c"); + + /* Prevent overwriting SFP I2C EEPROM which is at A0 address. */ + if (hw->phy.addr == 0 || hw->phy.addr > 7) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -IGC_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << IGC_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << IGC_I2CCMD_PHY_ADDR_SHIFT) | + IGC_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + IGC_WRITE_REG(hw, IGC_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < IGC_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = IGC_READ_REG(hw, IGC_I2CCMD); + if (i2ccmd & IGC_I2CCMD_READY) + break; + } + if (!(i2ccmd & IGC_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -IGC_ERR_PHY; + } + if (i2ccmd & IGC_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -IGC_ERR_PHY; + } + + return IGC_SUCCESS; +} + +/** + * igc_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * IGC_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * IGC_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igc_read_sfp_data_byte(struct igc_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("igc_read_sfp_data_byte"); + + if (offset > IGC_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -IGC_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << IGC_I2CCMD_REG_ADDR_SHIFT) | + IGC_I2CCMD_OPCODE_READ); + + IGC_WRITE_REG(hw, IGC_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < IGC_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = IGC_READ_REG(hw, IGC_I2CCMD); + if (data_local & IGC_I2CCMD_READY) + break; + } + if (!(data_local & IGC_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -IGC_ERR_PHY; + } + if (data_local & IGC_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -IGC_ERR_PHY; + } + *data = (u8)data_local & 0xFF; + + return IGC_SUCCESS; +} + +/** + * igc_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * IGC_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * IGC_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igc_write_sfp_data_byte(struct igc_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("igc_write_sfp_data_byte"); + + if (offset > IGC_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -IGC_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << IGC_I2CCMD_REG_ADDR_SHIFT) | + IGC_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + IGC_WRITE_REG(hw, IGC_I2CCMD, i2ccmd); + for (i = 0; i < IGC_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = IGC_READ_REG(hw, IGC_I2CCMD); + if (i2ccmd & IGC_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & IGC_I2CCMD_OPCODE_READ) == + IGC_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= (u32)data; + i2ccmd = ((offset << + IGC_I2CCMD_REG_ADDR_SHIFT) | + IGC_I2CCMD_OPCODE_WRITE | data_local); + IGC_WRITE_REG(hw, IGC_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & IGC_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -IGC_ERR_PHY; + } + if (i2ccmd & IGC_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -IGC_ERR_PHY; + } + return IGC_SUCCESS; +} + +/** + * igc_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igc_read_phy_reg_m88(struct igc_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("igc_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_m88(struct igc_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("igc_write_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 igc_set_page_igp(struct igc_hw *hw, u16 page) +{ + DEBUGFUNC("igc_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return igc_write_phy_reg_mdic(hw, IGP01IGC_PHY_PAGE_SELECT, page); +} + +/** + * __igc_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __igc_read_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("__igc_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = igc_write_phy_reg_mdic(hw, + IGP01IGC_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = igc_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 igc_read_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * igc_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 igc_read_phy_reg_igp_locked(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * igc_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __igc_write_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = IGC_SUCCESS; + + DEBUGFUNC("igc_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = igc_write_phy_reg_mdic(hw, + IGP01IGC_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = igc_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * igc_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 igc_write_phy_reg_igp_locked(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __igc_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __igc_read_kmrn_reg(struct igc_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__igc_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = IGC_SUCCESS; + + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << IGC_KMRNCTRLSTA_OFFSET_SHIFT) & + IGC_KMRNCTRLSTA_OFFSET) | IGC_KMRNCTRLSTA_REN; + IGC_WRITE_REG(hw, IGC_KMRNCTRLSTA, kmrnctrlsta); + IGC_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = IGC_READ_REG(hw, IGC_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return IGC_SUCCESS; +} + +/** + * igc_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 igc_read_kmrn_reg_generic(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_kmrn_reg(hw, offset, data, false); +} + +/** + * igc_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 igc_read_kmrn_reg_locked(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __igc_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __igc_write_kmrn_reg(struct igc_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("igc_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = IGC_SUCCESS; + + if (!hw->phy.ops.acquire) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << IGC_KMRNCTRLSTA_OFFSET_SHIFT) & + IGC_KMRNCTRLSTA_OFFSET) | data; + IGC_WRITE_REG(hw, IGC_KMRNCTRLSTA, kmrnctrlsta); + IGC_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return IGC_SUCCESS; +} + +/** + * igc_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 igc_write_kmrn_reg_generic(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_kmrn_reg(hw, offset, data, false); +} + +/** + * igc_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 igc_write_kmrn_reg_locked(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_kmrn_reg(hw, offset, data, true); +} + +/** + * igc_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igc_set_master_slave_mode(struct igc_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + igc_ms_force_master : + igc_ms_force_slave) : igc_ms_auto; + + switch (hw->phy.ms_type) { + case igc_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case igc_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case igc_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * igc_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igc_copper_link_setup_82577(struct igc_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("igc_copper_link_setup_82577"); + + if (hw->phy.type == igc_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return igc_set_master_slave_mode(hw); +} + +/** + * igc_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igc_copper_link_setup_m88(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("igc_copper_link_setup_m88"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != igc_phy_bm) + phy_data |= M88IGC_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88IGC_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88IGC_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88IGC_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88IGC_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88IGC_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88IGC_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88IGC_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == igc_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BMIGC_E_PHY_ID_R2) { + phy_data &= ~BMIGC_PSCR_ENABLE_DOWNSHIFT; + ret_val = phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BMIGC_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (phy->type == igc_phy_m88 && phy->revision < IGC_REVISION_4 && + phy->id != BMIGC_E_PHY_ID_R2) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88IGC_EPSCR_TX_CLK_25; + + if (phy->revision == IGC_REVISION_2 && + phy->id == M88E1111_I_PHY_ID) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88IGC_EPSCR_MASTER_DOWNSHIFT_MASK | + M88IGC_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88IGC_EPSCR_MASTER_DOWNSHIFT_1X | + M88IGC_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + if (phy->type == igc_phy_bm && phy->id == BMIGC_E_PHY_ID_R2) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = phy->ops.write_reg(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = phy->ops.write_reg(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + if (phy->type == igc_phy_82578) { + ret_val = phy->ops.read_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = phy->ops.write_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + return IGC_SUCCESS; +} + +/** + * igc_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igc_copper_link_setup_m88_gen2(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("igc_copper_link_setup_m88_gen2"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88IGC_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88IGC_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88IGC_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88IGC_PSCR_AUTO_X_1000T; + break; + } + /* Fall through */ + case 0: + default: + phy_data |= M88IGC_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88IGC_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88IGC_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + ret_val = igc_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return IGC_SUCCESS; +} + +/** + * igc_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igc_copper_link_setup_igp(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("igc_copper_link_setup_igp"); + + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == igc_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01IGC_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01IGC_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01IGC_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01IGC_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01IGC_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01IGC_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = igc_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + u16 aneg_multigbt_an_ctrl = 0; + + DEBUGFUNC("igc_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + DEBUGOUT("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + DEBUGOUT("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("igc_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * igc_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + **/ +s32 igc_setup_copper_link_generic(struct igc_hw *hw) +{ + s32 ret_val; + bool link = false; + + DEBUGFUNC("igc_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * igc_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -IGC_ERR_PHY (-2). + **/ +s32 igc_phy_force_speed_duplex_igp(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("igc_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + igc_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01IGC_PSCR_AUTO_MDIX; + phy_data &= ~IGP01IGC_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01IGC_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * igc_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igc_phy_force_speed_duplex_m88(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("igc_phy_force_speed_duplex_m88"); + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != igc_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88IGC_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + igc_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + /* fall-through */ + case I225_I_PHY_ID: + /* fall-through */ + reset_dsp = false; + break; + default: + if (hw->phy.type != igc_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88IGC_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = igc_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != igc_phy_m88) + return IGC_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return IGC_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return IGC_SUCCESS; + if (hw->phy.id == I225_I_PHY_ID) + return IGC_SUCCESS; + if (hw->phy.id == M88E1543_E_PHY_ID || hw->phy.id == M88E1512_E_PHY_ID) + return IGC_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88IGC_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88IGC_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88IGC_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88IGC_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * igc_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 igc_phy_force_speed_duplex_ife(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("igc_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + igc_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return IGC_SUCCESS; +} + +/** + * igc_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void igc_phy_force_speed_duplex_setup(struct igc_hw *hw, u16 *phy_ctrl) +{ + struct igc_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("igc_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = igc_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = IGC_READ_REG(hw, IGC_CTRL); + ctrl |= (IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + ctrl &= ~IGC_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~IGC_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & IGC_ALL_HALF_DUPLEX) { + ctrl &= ~IGC_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= IGC_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & IGC_ALL_100_SPEED) { + ctrl |= IGC_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~MII_CR_SPEED_1000; + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(IGC_CTRL_SPD_1000 | IGC_CTRL_SPD_100); + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); +} + +/** + * igc_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igc_set_d3_lplu_state_generic(struct igc_hw *hw, bool active) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("igc_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return IGC_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02IGC_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02IGC_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == igc_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01IGC_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == igc_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01IGC_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01IGC_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == IGC_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == IGC_ALL_NOT_GIG) || + (phy->autoneg_advertised == IGC_ALL_10_SPEED)) { + data |= IGP02IGC_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01IGC_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01IGC_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * igc_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igc_check_downshift_generic(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("igc_check_downshift_generic"); + + switch (phy->type) { + case igc_phy_i210: + case igc_phy_m88: + case igc_phy_gg82563: + case igc_phy_bm: + case igc_phy_82578: + offset = M88IGC_PHY_SPEC_STATUS; + mask = M88IGC_PSSR_DOWNSHIFT; + break; + case igc_phy_igp: + case igc_phy_igp_2: + case igc_phy_igp_3: + offset = IGP01IGC_PHY_LINK_HEALTH; + mask = IGP01IGC_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return IGC_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * igc_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -IGC_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igc_check_polarity_m88(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("igc_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88IGC_PSSR_REV_POLARITY) + ? igc_rev_polarity_reversed + : igc_rev_polarity_normal); + + return ret_val; +} + +/** + * igc_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -IGC_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 igc_check_polarity_igp(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("igc_check_polarity_igp"); + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01IGC_PSSR_SPEED_MASK) == + IGP01IGC_PSSR_SPEED_1000MBPS) { + offset = IGP01IGC_PHY_PCS_INIT_REG; + mask = IGP01IGC_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01IGC_PHY_PORT_STATUS; + mask = IGP01IGC_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? igc_rev_polarity_reversed + : igc_rev_polarity_normal); + + return ret_val; +} + +/** + * igc_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 igc_check_polarity_ife(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("igc_check_polarity_ife"); + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? igc_rev_polarity_reversed + : igc_rev_polarity_normal); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("igc_wait_autoneg"); + + if (!hw->phy.ops.read_reg) + return IGC_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igc_phy_has_link_generic(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = IGC_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("igc_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return IGC_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msec_delay(usec_interval / 1000); + else + usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay(usec_interval / 1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * igc_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igc_get_cable_length_m88(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("igc_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88IGC_PSSR_CABLE_LENGTH) >> + M88IGC_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88IGC_CABLE_LENGTH_TABLE_SIZE - 1) + return -IGC_ERR_PHY; + + phy->min_cable_length = igc_m88_cable_length_table[index]; + phy->max_cable_length = igc_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return IGC_SUCCESS; +} + +s32 igc_get_cable_length_m88_gen2(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("igc_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case I225_I_PHY_ID: + if (ret_val) + return ret_val; + /* TODO - complete with Foxville data */ + break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88IGC_PSSR_CABLE_LENGTH) >> + M88IGC_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88IGC_CABLE_LENGTH_TABLE_SIZE - 1) + return -IGC_ERR_PHY; + + phy->min_cable_length = igc_m88_cable_length_table[index]; + phy->max_cable_length = igc_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -IGC_ERR_PHY; + } + + return ret_val; +} + +/** + * igc_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igc_get_cable_length_igp_2(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02IGC_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02IGC_PHY_CHANNEL_NUM] = { + IGP02IGC_PHY_AGC_A, + IGP02IGC_PHY_AGC_B, + IGP02IGC_PHY_AGC_C, + IGP02IGC_PHY_AGC_D + }; + + DEBUGFUNC("igc_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02IGC_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02IGC_AGC_LENGTH_SHIFT) & + IGP02IGC_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if (cur_agc_index >= IGP02IGC_CABLE_LENGTH_TABLE_SIZE || + cur_agc_index == 0) + return -IGC_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (igc_igp_2_cable_length_table[min_agc_index] > + igc_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (igc_igp_2_cable_length_table[max_agc_index] < + igc_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += igc_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (igc_igp_2_cable_length_table[min_agc_index] + + igc_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02IGC_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02IGC_AGC_RANGE) > 0) ? + (agc_value - IGP02IGC_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02IGC_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return IGC_SUCCESS; +} + +/** + * igc_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igc_get_phy_info_m88(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("igc_get_phy_info_m88"); + + if (phy->media_type != igc_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88IGC_PSCR_POLARITY_REVERSAL); + + ret_val = igc_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88IGC_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88IGC_PSSR_MDIX); + + if ((phy_data & M88IGC_PSSR_SPEED) == M88IGC_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = IGC_CABLE_LENGTH_UNDEFINED; + phy->local_rx = igc_1000t_rx_status_undefined; + phy->remote_rx = igc_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * igc_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igc_get_phy_info_igp(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("igc_get_phy_info_igp"); + + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -IGC_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = igc_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01IGC_PSSR_MDIX); + + if ((data & IGP01IGC_PSSR_SPEED_MASK) == + IGP01IGC_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + } else { + phy->cable_length = IGC_CABLE_LENGTH_UNDEFINED; + phy->local_rx = igc_1000t_rx_status_undefined; + phy->remote_rx = igc_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * igc_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 igc_get_phy_info_ife(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("igc_get_phy_info_ife"); + + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = igc_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? igc_rev_polarity_reversed + : igc_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = IGC_CABLE_LENGTH_UNDEFINED; + phy->local_rx = igc_1000t_rx_status_undefined; + phy->remote_rx = igc_1000t_rx_status_undefined; + + return IGC_SUCCESS; +} + +/** + * igc_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igc_phy_sw_reset_generic(struct igc_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("igc_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return IGC_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + usec_delay(1); + + return ret_val; +} + +/** + * igc_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igc_phy_hw_reset_generic(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("igc_phy_hw_reset_generic"); + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return IGC_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = IGC_READ_REG(hw, IGC_CTRL); + IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + IGC_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + IGC_WRITE_REG(hw, IGC_CTRL, ctrl); + IGC_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + return ret_val; +} + +/** + * igc_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 igc_get_cfg_done_generic(struct igc_hw IGC_UNUSEDARG * hw) +{ + DEBUGFUNC("igc_get_cfg_done_generic"); + UNREFERENCED_1PARAMETER(hw); + + msec_delay_irq(10); + + return IGC_SUCCESS; +} + +/** + * igc_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igc_phy_init_script_igp3(struct igc_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return IGC_SUCCESS; +} + +/** + * igc_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum igc_phy_type igc_get_phy_type_from_id(u32 phy_id) +{ + enum igc_phy_type phy_type = igc_phy_unknown; + + switch (phy_id) { + case M88IGC_I_PHY_ID: + case M88IGC_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = igc_phy_m88; + break; + case IGP01IGC_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = igc_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = igc_phy_gg82563; + break; + case IGP03IGC_E_PHY_ID: + phy_type = igc_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = igc_phy_ife; + break; + case BMIGC_E_PHY_ID: + case BMIGC_E_PHY_ID_R2: + phy_type = igc_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = igc_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = igc_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = igc_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = igc_phy_i217; + break; + case I82580_I_PHY_ID: + phy_type = igc_phy_82580; + break; + case I210_I_PHY_ID: + phy_type = igc_phy_i210; + break; + case I225_I_PHY_ID: + phy_type = igc_phy_i225; + break; + default: + phy_type = igc_phy_unknown; + break; + } + return phy_type; +} + +/** + * igc_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 igc_determine_phy_address(struct igc_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum igc_phy_type phy_type = igc_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < IGC_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + igc_get_phy_id(hw); + phy_type = igc_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != igc_phy_unknown) + return IGC_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -IGC_ERR_PHY_TYPE; +} + +/** + * igc_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * @reg: register to access + * + * Returns the phy address for the page requested. + **/ +static u32 igc_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if (page >= 768 || (page == 0 && reg == 25) || reg == 31) + phy_addr = 1; + + return phy_addr; +} + +/** + * igc_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_bm(struct igc_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("igc_write_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = igc_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01IGC_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = igc_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igc_read_phy_reg_bm(struct igc_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("igc_read_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = igc_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01IGC_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = igc_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igc_read_phy_reg_bm2(struct igc_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("igc_read_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = igc_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_bm2(struct igc_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("igc_write_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = igc_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 igc_enable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + DEBUGFUNC("igc_enable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -IGC_ERR_PARAM; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = igc_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = igc_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + DEBUGOUT2("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = igc_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + DEBUGOUT2("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return igc_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * igc_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 igc_disable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + DEBUGFUNC("igc_disable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -IGC_ERR_PARAM; + + /* Select Port Control Registers page */ + ret_val = igc_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = igc_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + DEBUGOUT2("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * igc_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by igc_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by igc_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to igc_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 igc_access_phy_wakeup_reg_bm(struct igc_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + DEBUGFUNC("igc_access_phy_wakeup_reg_bm"); + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if (hw->mac.type == igc_pchlan && + !(IGC_READ_REG(hw, IGC_PHY_CTRL) & IGC_PHY_CTRL_GBE_DISABLE)) + DEBUGOUT1("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = igc_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + DEBUGOUT("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = igc_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + DEBUGOUT1("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = igc_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = igc_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = igc_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * __igc_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __igc_read_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = igc_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__igc_read_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = igc_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = igc_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 igc_read_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * igc_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 igc_read_phy_reg_hv_locked(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * igc_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 igc_read_phy_reg_page_hv(struct igc_hw *hw, u32 offset, u16 *data) +{ + return __igc_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __igc_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __igc_write_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = igc_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__igc_write_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = igc_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = igc_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* + * Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if (hw->phy.type == igc_phy_82578 && + hw->phy.revision >= 1 && + hw->phy.addr == 2 && + !(MAX_PHY_REG_ADDRESS & reg) && + (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = igc_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = igc_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = igc_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * igc_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * igc_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 igc_write_phy_reg_hv_locked(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * igc_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 igc_write_phy_reg_page_hv(struct igc_hw *hw, u32 offset, u16 data) +{ + return __igc_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * igc_get_phy_addr_for_hv_page - Get PHY address based on page + * @page: page to be accessed + **/ +static u32 igc_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * igc_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 igc_access_phy_debug_regs_hv(struct igc_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + DEBUGFUNC("igc_access_phy_debug_regs_hv"); + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == igc_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = igc_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + DEBUGOUT("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = igc_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = igc_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + DEBUGOUT("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * igc_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 igc_link_stall_workaround_hv(struct igc_hw *hw) +{ + s32 ret_val = IGC_SUCCESS; + u16 data; + + DEBUGFUNC("igc_link_stall_workaround_hv"); + + if (hw->phy.type != igc_phy_82578) + return IGC_SUCCESS; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + return IGC_SUCCESS; + + /* check if link is up and at 1Gbps */ + ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return IGC_SUCCESS; + + msec_delay(200); + + /* flush the packets in the fifo buffer */ + ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * igc_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -IGC_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igc_check_polarity_82577(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("igc_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? igc_rev_polarity_reversed + : igc_rev_polarity_normal); + + return ret_val; +} + +/** + * igc_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 igc_phy_force_speed_duplex_82577(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link = false; + + DEBUGFUNC("igc_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + igc_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igc_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * igc_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igc_get_phy_info_82577(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("igc_get_phy_info_82577"); + + ret_val = igc_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -IGC_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = igc_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? igc_1000t_rx_status_ok + : igc_1000t_rx_status_not_ok; + } else { + phy->cable_length = IGC_CABLE_LENGTH_UNDEFINED; + phy->local_rx = igc_1000t_rx_status_undefined; + phy->remote_rx = igc_1000t_rx_status_undefined; + } + + return IGC_SUCCESS; +} + +/** + * igc_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igc_get_cable_length_82577(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("igc_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == IGC_CABLE_LENGTH_UNDEFINED) + return -IGC_ERR_PHY; + + phy->cable_length = length; + + return IGC_SUCCESS; +} + +/** + * igc_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_gs40g(struct igc_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("igc_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_read_phy_reg_gs40g(struct igc_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("igc_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igc_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + + DEBUGFUNC("igc_write_phy_reg_gpy"); + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + + DEBUGFUNC("igc_read_phy_reg_gpy"); + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + return ret_val; +} + +/** + * igc_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igc_read_phy_reg_mphy(struct igc_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("igc_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = IGC_READ_REG(hw, IGC_MPHY_ADDR_CTRL); + if (mphy_ctrl & IGC_MPHY_DIS_ACCESS) { + locked = true; + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + mphy_ctrl |= IGC_MPHY_ENA_ACCESS; + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~IGC_MPHY_ADDRESS_MASK & + ~IGC_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & IGC_MPHY_ADDRESS_MASK); + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + *data = IGC_READ_REG(hw, IGC_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, + IGC_MPHY_DIS_ACCESS); + + return IGC_SUCCESS; +} + +/** + * igc_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 igc_write_phy_reg_mphy(struct igc_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("igc_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = IGC_READ_REG(hw, IGC_MPHY_ADDR_CTRL); + if (mphy_ctrl & IGC_MPHY_DIS_ACCESS) { + locked = true; + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + mphy_ctrl |= IGC_MPHY_ENA_ACCESS; + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= IGC_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~IGC_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~IGC_MPHY_ADDRESS_MASK) | + (address & IGC_MPHY_ADDRESS_MASK); + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + IGC_WRITE_REG(hw, IGC_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = igc_is_mphy_ready(hw); + if (!ready) + return -IGC_ERR_PHY; + IGC_WRITE_REG(hw, IGC_MPHY_ADDR_CTRL, + IGC_MPHY_DIS_ACCESS); + + return IGC_SUCCESS; +} + +/** + * igc_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool igc_is_mphy_ready(struct igc_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = IGC_READ_REG(hw, IGC_MPHY_ADDR_CTRL); + if (mphy_ctrl & IGC_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__igc_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("igc_read_xmdio_reg"); + + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("igc_write_xmdio_reg"); + + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, + false); +} diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_phy.h b/src/spdk/dpdk/drivers/net/igc/base/igc_phy.h new file mode 100644 index 000000000..fbc0e7cbc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_phy.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +void igc_init_phy_ops_generic(struct igc_hw *hw); +s32 igc_null_read_reg(struct igc_hw *hw, u32 offset, u16 *data); +void igc_null_phy_generic(struct igc_hw *hw); +s32 igc_null_lplu_state(struct igc_hw *hw, bool active); +s32 igc_null_write_reg(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_null_set_page(struct igc_hw *hw, u16 data); +s32 igc_read_i2c_byte_null(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 igc_write_i2c_byte_null(struct igc_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 igc_check_downshift_generic(struct igc_hw *hw); +s32 igc_check_polarity_m88(struct igc_hw *hw); +s32 igc_check_polarity_igp(struct igc_hw *hw); +s32 igc_check_polarity_ife(struct igc_hw *hw); +s32 igc_check_reset_block_generic(struct igc_hw *hw); +s32 igc_phy_setup_autoneg(struct igc_hw *hw); +s32 igc_copper_link_autoneg(struct igc_hw *hw); +s32 igc_copper_link_setup_igp(struct igc_hw *hw); +s32 igc_copper_link_setup_m88(struct igc_hw *hw); +s32 igc_copper_link_setup_m88_gen2(struct igc_hw *hw); +s32 igc_phy_force_speed_duplex_igp(struct igc_hw *hw); +s32 igc_phy_force_speed_duplex_m88(struct igc_hw *hw); +s32 igc_phy_force_speed_duplex_ife(struct igc_hw *hw); +s32 igc_get_cable_length_m88(struct igc_hw *hw); +s32 igc_get_cable_length_m88_gen2(struct igc_hw *hw); +s32 igc_get_cable_length_igp_2(struct igc_hw *hw); +s32 igc_get_cfg_done_generic(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_get_phy_info_igp(struct igc_hw *hw); +s32 igc_get_phy_info_m88(struct igc_hw *hw); +s32 igc_get_phy_info_ife(struct igc_hw *hw); +s32 igc_phy_sw_reset_generic(struct igc_hw *hw); +void igc_phy_force_speed_duplex_setup(struct igc_hw *hw, u16 *phy_ctrl); +s32 igc_phy_hw_reset_generic(struct igc_hw *hw); +s32 igc_phy_reset_dsp_generic(struct igc_hw *hw); +s32 igc_read_kmrn_reg_generic(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_kmrn_reg_locked(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_set_page_igp(struct igc_hw *hw, u16 page); +s32 igc_read_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_phy_reg_igp_locked(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_phy_reg_m88(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_set_d3_lplu_state_generic(struct igc_hw *hw, bool active); +s32 igc_setup_copper_link_generic(struct igc_hw *hw); +s32 igc_write_kmrn_reg_generic(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_kmrn_reg_locked(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_phy_reg_igp(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_phy_reg_igp_locked(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_phy_reg_m88(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_phy_has_link_generic(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 igc_phy_init_script_igp3(struct igc_hw *hw); +enum igc_phy_type igc_get_phy_type_from_id(u32 phy_id); +s32 igc_determine_phy_address(struct igc_hw *hw); +s32 igc_write_phy_reg_bm(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_bm(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_enable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg); +s32 igc_disable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg); +s32 igc_read_phy_reg_bm2(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg_bm2(struct igc_hw *hw, u32 offset, u16 data); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_i2c(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg_i2c(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_sfp_data_byte(struct igc_hw *hw, u16 offset, u8 *data); +s32 igc_write_sfp_data_byte(struct igc_hw *hw, u16 offset, u8 data); +s32 igc_read_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_phy_reg_hv_locked(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_phy_reg_page_hv(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg_hv(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_phy_reg_hv_locked(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_write_phy_reg_page_hv(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_link_stall_workaround_hv(struct igc_hw *hw); +s32 igc_copper_link_setup_82577(struct igc_hw *hw); +s32 igc_check_polarity_82577(struct igc_hw *hw); +s32 igc_get_phy_info_82577(struct igc_hw *hw); +s32 igc_phy_force_speed_duplex_82577(struct igc_hw *hw); +s32 igc_get_cable_length_82577(struct igc_hw *hw); +s32 igc_write_phy_reg_gs40g(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gs40g(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +s32 igc_read_phy_reg_mphy(struct igc_hw *hw, u32 address, u32 *data); +s32 igc_write_phy_reg_mphy(struct igc_hw *hw, u32 address, u32 data, + bool line_override); +bool igc_is_mphy_ready(struct igc_hw *hw); + +s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, + u16 data); + +#define IGC_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01IGC_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01IGC_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01IGC_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01IGC_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01IGC_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP02IGC_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01IGC_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 + +#define IGC_I225_PHPM 0x0E14 /* I225 PHY Power Management */ +#define IGC_I225_PHPM_DIS_1000_D3 0x0008 /* Disable 1G in D3 */ +#define IGC_I225_PHPM_LINK_ENERGY 0x0010 /* Link Energy Detect */ +#define IGC_I225_PHPM_GO_LINKD 0x0020 /* Go Link Disconnect */ +#define IGC_I225_PHPM_DIS_1000 0x0040 /* Disable 1G globally */ +#define IGC_I225_PHPM_SPD_B2B_EN 0x0080 /* Smart Power Down Back2Back */ +#define IGC_I225_PHPM_RST_COMPL 0x0100 /* PHY Reset Completed */ +#define IGC_I225_PHPM_DIS_100_D3 0x0200 /* Disable 100M in D3 */ +#define IGC_I225_PHPM_ULP 0x0400 /* Ultra Low-Power Mode */ +#define IGC_I225_PHPM_DIS_2500 0x0800 /* Disable 2.5G globally */ +#define IGC_I225_PHPM_DIS_2500_D3 0x1000 /* Disable 2.5G in D3 */ +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 + +#define BM_PHY_REG(page, reg) ( \ + __extension__ ({ \ + typeof(page) _page = (page); \ + typeof(reg) _reg = (reg); \ + (_reg & MAX_PHY_REG_ADDRESS) | \ + ((_page & 0xFFFF) << PHY_PAGE_SHIFT) | \ + ((_reg & ~MAX_PHY_REG_ADDRESS) << \ + (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)); \ + })) + +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) + +#define BM_PHY_REG_NUM(offset) ( \ + __extension__ ({ \ + typeof(offset) _offset = (offset); \ + (u16)((_offset & MAX_PHY_REG_ADDRESS) | \ + ((_offset >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) & \ + ~MAX_PHY_REG_ADDRESS)); \ + })) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define IGC_82580_PHY_POWER_MGMT 0xE14 +#define IGC_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define IGC_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGC_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGC_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define IGC_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define IGC_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define IGC_MPHY_BUSY 0x00010000 /* busy bit */ +#define IGC_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define IGC_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01IGC_PHY_PCS_INIT_REG 0x00B4 +#define IGP01IGC_PHY_POLARITY_MASK 0x0078 + +#define IGP01IGC_PSCR_AUTO_MDIX 0x1000 +#define IGP01IGC_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01IGC_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01IGC_GMII_FLEX_SPD 0x0010 +#define IGP01IGC_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02IGC_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02IGC_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02IGC_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01IGC_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01IGC_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01IGC_PSSR_MDIX 0x0800 +#define IGP01IGC_PSSR_SPEED_MASK 0xC000 +#define IGP01IGC_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02IGC_PHY_CHANNEL_NUM 4 +#define IGP02IGC_PHY_AGC_A 0x11B1 +#define IGP02IGC_PHY_AGC_B 0x12B1 +#define IGP02IGC_PHY_AGC_C 0x14B1 +#define IGP02IGC_PHY_AGC_D 0x18B1 + +#define IGP02IGC_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02IGC_AGC_LENGTH_MASK 0x7F +#define IGP02IGC_AGC_RANGE 15 + +#define IGC_CABLE_LENGTH_UNDEFINED 0xFF + +#define IGC_KMRNCTRLSTA_OFFSET 0x001F0000 +#define IGC_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define IGC_KMRNCTRLSTA_REN 0x00200000 +#define IGC_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define IGC_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define IGC_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define IGC_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define IGC_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define IGC_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define IGC_KMRNCTRLSTA_K1_CONFIG 0x7 +#define IGC_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define IGC_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ +#define IGC_KMRNCTRLSTA_K0S_CTRL 0x1E /* Kumeran K0s Control */ +#define IGC_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT 0 +#define IGC_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT 4 +#define IGC_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_MASK \ + (3 << IGC_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT) +#define IGC_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK \ + (7 << IGC_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT) +#define IGC_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */ +#define IGC_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define IGC_SFF_IDENTIFIER_OFFSET 0x00 +#define IGC_SFF_IDENTIFIER_SFF 0x02 +#define IGC_SFF_IDENTIFIER_SFP 0x03 + +#define IGC_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_igc_flags { + u8 igc_base_sx:1; + u8 igc_base_lx:1; + u8 igc_base_cx:1; + u8 igc_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IGC_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IGC_SFF_VENDOR_OUI_FTL 0x00906500 +#define IGC_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IGC_SFF_VENDOR_OUI_INTEL 0x001B2100 + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/igc_regs.h b/src/spdk/dpdk/drivers/net/igc/base/igc_regs.h new file mode 100644 index 000000000..d424387c7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/igc_regs.h @@ -0,0 +1,724 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_MDICNFG 0x00E04 /* MDI Config - RW */ +#define IGC_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define IGC_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define IGC_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define IGC_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define IGC_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define IGC_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define IGC_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define IGC_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define IGC_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define IGC_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define IGC_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define IGC_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define IGC_SCTL 0x00024 /* SerDes Control - RW */ +#define IGC_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define IGC_FCAH 0x0002C /* Flow Control Address High -RW */ +#define IGC_FEXT 0x0002C /* Future Extended - RW */ +#define IGC_I225_FLSWCTL 0x12048 /* FLASH control register */ +#define IGC_I225_FLSWDATA 0x1204C /* FLASH data register */ +#define IGC_I225_FLSWCNT 0x12050 /* FLASH Access Counter */ +#define IGC_I225_FLSECU 0x12114 /* FLASH Security */ +#define IGC_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define IGC_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define IGC_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define IGC_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */ +#define IGC_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define IGC_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define IGC_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define IGC_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define IGC_PCIEANACFG 0x00F18 /* PCIE Analog Config */ +#define IGC_FCT 0x00030 /* Flow Control Type - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +#define IGC_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define IGC_SVCR 0x000F0 +#define IGC_SVT 0x000F4 +#define IGC_LPIC 0x000FC /* Low Power IDLE control */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define IGC_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define IGC_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define IGC_PBA_ECC 0x01100 /* PBA ECC Register */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define IGC_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define IGC_TBT 0x00448 /* Tx Burst Timer - RW */ +#define IGC_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define IGC_LEDCTL 0x00E00 /* LED Control - RW */ +#define IGC_LEDMUX 0x08130 /* LED MUX Control */ +#define IGC_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define IGC_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define IGC_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define IGC_POEMB IGC_PHY_CTRL /* PHY OEM Bits */ +#define IGC_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define IGC_PBS 0x01008 /* Packet Buffer Size */ +#define IGC_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define IGC_IOSFPC 0x00F28 /* TX corrupted data */ +#define IGC_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define IGC_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ +#define IGC_EEMNGCTL_I225 0x01010 /* i225 MNG EEprom Mode Control */ +#define IGC_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define IGC_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define IGC_EEARBC_I225 0x12024 /* EEPROM Auto Read Bus Control */ +#define IGC_FLASHT 0x01028 /* FLASH Timer Register */ +#define IGC_FLSWCTL 0x01030 /* FLASH control register */ +#define IGC_FLSWDATA 0x01034 /* FLASH data register */ +#define IGC_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define IGC_FLOP 0x0103C /* FLASH Opcode Register */ +#define IGC_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define IGC_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define IGC_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define IGC_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define IGC_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define IGC_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define IGC_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define IGC_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define IGC_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define IGC_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define IGC_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define IGC_SWDSTS 0x01044 /* SW Device Status - RW */ +#define IGC_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define IGC_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define IGC_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define IGC_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define IGC_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define IGC_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define IGC_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define IGC_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define IGC_ERT 0x02008 /* Early Rx Threshold - RW */ +#define IGC_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define IGC_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define IGC_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define IGC_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define IGC_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define IGC_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define IGC_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define IGC_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define IGC_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define IGC_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define IGC_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define IGC_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define IGC_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define IGC_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define IGC_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define IGC_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define IGC_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define IGC_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define IGC_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define IGC_EMIDATA 0x11 /* Extended Memory Indirect Data */ +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 +#define IGC_EEC_REG 0x12010 + +#define IGC_I210_FLMNGCTL 0x12038 +#define IGC_I210_FLMNGDATA 0x1203C +#define IGC_I210_FLMNGCNT 0x12040 + +#define IGC_I210_FLSWCTL 0x12048 +#define IGC_I210_FLSWDATA 0x1204C +#define IGC_I210_FLSWCNT 0x12050 + +#define IGC_I210_FLA 0x1201C + +#define IGC_SHADOWINF 0x12068 +#define IGC_FLFWUPDATE 0x12108 + +#define IGC_INVM_DATA_REG(_n) (0x12120 + 4 * (_n)) +#define IGC_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define IGC_I210_TQAVCTRL 0x3570 + +/* QAV Tx mode control register bitfields masks */ +/* QAV enable */ +#define IGC_TQAVCTRL_MODE (1 << 0) +/* Fetching arbitration type */ +#define IGC_TQAVCTRL_FETCH_ARB (1 << 4) +/* Fetching timer enable */ +#define IGC_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) +/* Launch arbitration type */ +#define IGC_TQAVCTRL_LAUNCH_ARB (1 << 8) +/* Launch timer enable */ +#define IGC_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) +/* SP waits for SR enable */ +#define IGC_TQAVCTRL_SP_WAIT_SR (1 << 10) +/* Fetching timer correction */ +#define IGC_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 +#define IGC_TQAVCTRL_FETCH_TIMER_DELTA \ + (0xFFFF << IGC_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) + +/* High credit registers where _n can be 0 or 1. */ +#define IGC_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define IGC_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define IGC_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define IGC_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define IGC_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define IGC_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define IGC_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define IGC_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) + +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register + * + * Example usage: + * IGC_RDBAL_REG(current_rx_queue) + */ +#define IGC_QUEUE_REG(n, low, high) ( \ + __extension__ ({ \ + typeof(n) _n = (n); \ + _n < 4 ? ((low) + _n * 0x100) : ((high) + _n * 0x40); \ + })) + +#define IGC_RDBAL(_n) IGC_QUEUE_REG(_n, 0x02800, 0x0C000) +#define IGC_RDBAH(_n) IGC_QUEUE_REG(_n, 0x02804, 0x0C004) +#define IGC_RDLEN(_n) IGC_QUEUE_REG(_n, 0x02808, 0x0C008) +#define IGC_SRRCTL(_n) IGC_QUEUE_REG(_n, 0x0280C, 0x0C00C) +#define IGC_RDH(_n) IGC_QUEUE_REG(_n, 0x02810, 0x0C010) +#define IGC_RXCTL(_n) IGC_QUEUE_REG(_n, 0x02814, 0x0C014) +#define IGC_DCA_RXCTRL(_n) IGC_RXCTL(_n) +#define IGC_RDT(_n) IGC_QUEUE_REG(_n, 0x02818, 0x0C018) +#define IGC_RXDCTL(_n) IGC_QUEUE_REG(_n, 0x02828, 0x0C028) +#define IGC_RQDPC(_n) IGC_QUEUE_REG(_n, 0x02830, 0x0C030) +#define IGC_TDBAL(_n) IGC_QUEUE_REG(_n, 0x03800, 0x0E000) +#define IGC_TDBAH(_n) IGC_QUEUE_REG(_n, 0x03804, 0x0E004) +#define IGC_TDLEN(_n) IGC_QUEUE_REG(_n, 0x03808, 0x0E008) +#define IGC_TDH(_n) IGC_QUEUE_REG(_n, 0x03810, 0x0E010) +#define IGC_TXCTL(_n) IGC_QUEUE_REG(_n, 0x03814, 0x0E014) +#define IGC_DCA_TXCTRL(_n) IGC_TXCTL(_n) +#define IGC_TDT(_n) IGC_QUEUE_REG(_n, 0x03818, 0x0E018) +#define IGC_TXDCTL(_n) IGC_QUEUE_REG(_n, 0x03828, 0x0E028) +#define IGC_TDWBAL(_n) IGC_QUEUE_REG(_n, 0x03838, 0x0E038) +#define IGC_TDWBAH(_n) IGC_QUEUE_REG(_n, 0x0383C, 0x0E03C) +#define IGC_TARC(_n) (0x03840 + (_n) * 0x100) +#define IGC_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define IGC_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define IGC_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define IGC_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) + +#define IGC_RAL(n) ( \ + __extension__ ({ \ + typeof(n) _n = (n); \ + _n < 16 ? (0x05400 + _n * 8) : (0x054E0 + (_n - 16) * 8); \ + })) + +#define IGC_RAH(_n) (IGC_RAL(_n) + 4) + +#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ + +#define IGC_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define IGC_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define IGC_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define IGC_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define IGC_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define IGC_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define IGC_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define IGC_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define IGC_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define IGC_ITPBS 0x03404 +#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define IGC_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define IGC_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define IGC_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define IGC_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define IGC_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define IGC_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define IGC_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define IGC_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define IGC_DTXMXSZRQ 0x03540 +#define IGC_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define IGC_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define IGC_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause */ +#define IGC_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define IGC_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define IGC_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define IGC_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define IGC_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define IGC_VFGPRC 0x00F10 +#define IGC_VFGORC 0x00F18 +#define IGC_VFMPRC 0x00F3C +#define IGC_VFGPTC 0x00F14 +#define IGC_VFGOTC 0x00F34 +#define IGC_VFGOTLBC 0x00F50 +#define IGC_VFGPTLBC 0x00F44 +#define IGC_VFGORLBC 0x00F48 +#define IGC_VFGPRLBC 0x00F40 +/* Virtualization statistical counters */ +#define IGC_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define IGC_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define IGC_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define IGC_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define IGC_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define IGC_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define IGC_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define IGC_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define IGC_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define IGC_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define IGC_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define IGC_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define IGC_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define IGC_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define IGC_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define IGC_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define IGC_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define IGC_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define IGC_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define IGC_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define IGC_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define IGC_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define IGC_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define IGC_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define IGC_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define IGC_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define IGC_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define IGC_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define IGC_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define IGC_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define IGC_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define IGC_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define IGC_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define IGC_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define IGC_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define IGC_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define IGC_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define IGC_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define IGC_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define IGC_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define IGC_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define IGC_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define IGC_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define IGC_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define IGC_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define IGC_IPSCTRL 0xB430 /* IpSec Control Register */ +#define IGC_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define IGC_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define IGC_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define IGC_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define IGC_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define IGC_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define IGC_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define IGC_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define IGC_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define IGC_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define IGC_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define IGC_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define IGC_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define IGC_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define IGC_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ +#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define IGC_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define IGC_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define IGC_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define IGC_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define IGC_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define IGC_VT_CTL 0x0581C /* VMDq Control - RW */ +#define IGC_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define IGC_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define IGC_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define IGC_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - RO */ +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ +#define IGC_IPAV 0x05838 /* IP Address Valid - RW */ +#define IGC_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define IGC_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define IGC_WUPM_EXT 0x0B800 /* Wakeup Packet Memory Extended - RO Array */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Extended - RW */ +#define IGC_WUS_EXT 0x05814 /* Wakeup Status Extended - RW1C */ +#define IGC_FHFTSL 0x05804 /* Flex Filter Indirect Table Select - RW */ +#define IGC_PROXYFCEX 0x05590 /* Proxy Filter Control Extended - RW1C */ +#define IGC_PROXYEXS 0x05594 /* Proxy Extended Status - RO */ +#define IGC_WFUTPF 0x05500 /* Wake Flex UDP TCP Port Filter - RW Array */ +#define IGC_RFUTPF 0x05580 /* Range Flex UDP TCP Port Filter - RW */ +#define IGC_RWPFC 0x05584 /* Range Wake Port Filter Control - RW */ +#define IGC_WFUTPS 0x05588 /* Wake Filter UDP TCP Status - RW1C */ +#define IGC_WCS 0x0558C /* Wake Control Status - RW1C */ +/* MSI-X Table Register Descriptions */ +#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define IGC_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define IGC_HOST_IF 0x08800 /* Host Interface */ +#define IGC_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define IGC_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define IGC_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + + +#define IGC_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define IGC_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define IGC_MDEF(_n) (0x05890 + (4 * (_n))) +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_CCMCTL 0x05B48 /* CCM Control Register */ +#define IGC_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define IGC_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +/* PCIe Register Description */ +#define IGC_GCR 0x05B00 /* PCI-Ex Control */ +#define IGC_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define IGC_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define IGC_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define IGC_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define IGC_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define IGC_SWSM2 0x05B58 +#define IGC_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define IGC_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define IGC_UFUSE 0x05B78 /* UFUSE - RO */ +#define IGC_FFLT_DBG 0x05F04 /* Debug Register */ +#define IGC_HICR 0x08F00 /* Host Interface Control */ +#define IGC_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define IGC_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define IGC_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define IGC_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) +#define IGC_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define IGC_RSSIR 0x05868 /* RSS Interrupt Request */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +/* VT Registers */ +#define IGC_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define IGC_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define IGC_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define IGC_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define IGC_VFRE 0x00C8C /* VF Receive Enables */ +#define IGC_VFTE 0x00C90 /* VF Transmit Enables */ +#define IGC_QDE 0x02408 /* Queue Drop Enable - RW */ +#define IGC_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define IGC_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define IGC_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define IGC_IOVTCL 0x05BBC /* IOV Control Register */ +#define IGC_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define IGC_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define IGC_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define IGC_MDFB 0x03558 /* Malicious Driver free block */ +#define IGC_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define IGC_TXSWC 0x05ACC /* Tx Switch Control */ +#define IGC_SCCRL 0x05DB0 /* Storm Control Control */ +#define IGC_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define IGC_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define IGC_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define IGC_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define IGC_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define IGC_VFVMBMEM(_n) (0x00800 + (_n)) +#define IGC_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define IGC_VLVF(_n) (0x05D00 + (4 * (_n))) +#define IGC_VMVIR(_n) (0x03700 + (4 * (_n))) +#define IGC_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define IGC_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define IGC_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define IGC_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define IGC_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define IGC_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ +#define IGC_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ +#define IGC_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ +#define IGC_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TSICR 0x0B66C /* Interrupt Cause Register */ +#define IGC_TSIM 0x0B674 /* Interrupt Mask Register */ +#define IGC_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define IGC_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +/* Filtering Registers */ +#define IGC_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define IGC_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define IGC_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define IGC_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define IGC_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define IGC_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define IGC_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define IGC_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define IGC_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define IGC_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define IGC_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define IGC_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define IGC_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define IGC_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define IGC_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define IGC_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define IGC_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define IGC_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define IGC_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define IGC_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define IGC_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define IGC_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define IGC_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define IGC_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define IGC_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define IGC_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define IGC_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define IGC_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define IGC_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define IGC_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define IGC_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define IGC_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define IGC_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define IGC_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define IGC_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define IGC_DMACR 0x02508 /* Control Register */ +#define IGC_DMCTXTH 0x03550 /* Transmit Threshold */ +#define IGC_DMCTLX 0x02514 /* Time to Lx Request */ +#define IGC_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define IGC_DMCCNT 0x05DD4 /* Current Rx Count */ +#define IGC_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define IGC_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define IGC_PCIEERRSTS 0x05BA8 + +#define IGC_PROXYS 0x5F64 /* Proxying Status */ +#define IGC_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define IGC_THMJT 0x08100 /* Junction Temperature */ +#define IGC_THLOWTC 0x08104 /* Low Threshold Control */ +#define IGC_THMIDTC 0x08108 /* Mid Threshold Control */ +#define IGC_THHIGHTC 0x0810C /* High Threshold Control */ +#define IGC_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ +#define IGC_EEE_SU_2P5 0x0E3C /* EEE 2.5G Setup */ +#define IGC_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define IGC_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define IGC_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define IGC_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define IGC_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define IGC_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + + +/* IEEE 1588 TIMESYNCH */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ + +#define IGC_LTRC_EEEMS_EN (1 << 5) +#define IGC_TW_SYSTEM_100_MASK 0xff00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_TW_SYSTEM_1000_MASK 0xff +#define IGC_LTRMINV_SCALE_1024 0x02 +#define IGC_LTRMINV_SCALE_32768 0x03 +#define IGC_LTRMAXV_SCALE_1024 0x02 +#define IGC_LTRMAXV_SCALE_32768 0x03 +#define IGC_LTRMINV_LTRV_MASK 0x1ff +#define IGC_LTRMINV_LSNP_REQ 0x80 +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LTRV_MASK 0x1ff +#define IGC_LTRMAXV_LSNP_REQ 0x80 +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#define IGC_MRQC_ENABLE_MASK 0x00000007 +#define IGC_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IGC_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ + +#endif diff --git a/src/spdk/dpdk/drivers/net/igc/base/meson.build b/src/spdk/dpdk/drivers/net/igc/base/meson.build new file mode 100644 index 000000000..299985180 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/base/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019-2020 Intel Corporation + +sources = [ + 'igc_api.c', + 'igc_base.c', + 'igc_i225.c', + 'igc_mac.c', + 'igc_manage.c', + 'igc_nvm.c', + 'igc_osdep.c', + 'igc_phy.c', +] + +base_lib = static_library('igc_base', sources, + dependencies: static_rte_eal) + +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/igc/igc_ethdev.c b/src/spdk/dpdk/drivers/net/igc/igc_ethdev.c new file mode 100644 index 000000000..6ab3ee909 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_ethdev.c @@ -0,0 +1,2630 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "igc_logs.h" +#include "igc_txrx.h" +#include "igc_filter.h" +#include "igc_flow.h" + +#define IGC_INTEL_VENDOR_ID 0x8086 + +/* + * The overhead from MTU to max frame size. + * Considering VLAN so tag needs to be counted. + */ +#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \ + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE) + +#define IGC_FC_PAUSE_TIME 0x0680 +#define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +#define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET +#define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ +#define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ + +#define IGC_DEFAULT_RX_FREE_THRESH 32 + +#define IGC_DEFAULT_RX_PTHRESH 8 +#define IGC_DEFAULT_RX_HTHRESH 8 +#define IGC_DEFAULT_RX_WTHRESH 4 + +#define IGC_DEFAULT_TX_PTHRESH 8 +#define IGC_DEFAULT_TX_HTHRESH 1 +#define IGC_DEFAULT_TX_WTHRESH 16 + +/* MSI-X other interrupt vector */ +#define IGC_MSIX_OTHER_INTR_VEC 0 + +/* External VLAN Enable bit mask */ +#define IGC_CTRL_EXT_EXT_VLAN (1u << 26) + +/* Speed select */ +#define IGC_CTRL_SPEED_MASK (7u << 8) +#define IGC_CTRL_SPEED_2500 (6u << 8) + +/* External VLAN Ether Type bit mask and shift */ +#define IGC_VET_EXT 0xFFFF0000 +#define IGC_VET_EXT_SHIFT 16 + +/* Force EEE Auto-negotiation */ +#define IGC_EEER_EEE_FRC_AN (1u << 28) + +/* Per Queue Good Packets Received Count */ +#define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) +/* Per Queue Good Octets Received Count */ +#define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) +/* Per Queue Good Octets Transmitted Count */ +#define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) +/* Per Queue Multicast Packets Received Count */ +#define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) +/* Transmit Queue Drop Packet Count */ +#define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define U32_0_IN_U64 0 /* lower bytes of u64 */ +#define U32_1_IN_U64 1 /* higher bytes of u64 */ +#else +#define U32_0_IN_U64 1 +#define U32_1_IN_U64 0 +#endif + +#define IGC_ALARM_INTERVAL 8000000u +/* us, about 13.6s some per-queue registers will wrap around back to 0. */ + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IGC_MAX_RXD, + .nb_min = IGC_MIN_RXD, + .nb_align = IGC_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IGC_MAX_TXD, + .nb_min = IGC_MIN_TXD, + .nb_align = IGC_TXD_ALIGN, + .nb_seg_max = IGC_TX_MAX_SEG, + .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, +}; + +static const struct rte_pci_id pci_id_igc_map[] = { + { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, + { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, + { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, + { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_igc_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { + {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, + {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, + {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, + {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, + {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, + {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, + {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, + ecol)}, + {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, + {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, + {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, + {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, + {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, + {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, + {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, + {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, + {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, + {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, + {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, + fcruc)}, + {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, + {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, + {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, + {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, + {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, + {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, + {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, + {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, + {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, + {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, + {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, + {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, + {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, + ptc1023)}, + {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, + {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, + {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, + {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, + {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, + {"rx_descriptor_lower_threshold", + offsetof(struct igc_hw_stats, icrxdmtc)}, +}; + +#define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ + sizeof(rte_igc_stats_strings[0])) + +static int eth_igc_configure(struct rte_eth_dev *dev); +static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static void eth_igc_stop(struct rte_eth_dev *dev); +static int eth_igc_start(struct rte_eth_dev *dev); +static int eth_igc_set_link_up(struct rte_eth_dev *dev); +static int eth_igc_set_link_down(struct rte_eth_dev *dev); +static void eth_igc_close(struct rte_eth_dev *dev); +static int eth_igc_reset(struct rte_eth_dev *dev); +static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); +static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); +static int eth_igc_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); +static int eth_igc_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_igc_led_on(struct rte_eth_dev *dev); +static int eth_igc_led_off(struct rte_eth_dev *dev); +static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); +static int eth_igc_rar_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); +static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr); +static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); +static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int eth_igc_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_igc_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned int n); +static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, unsigned int n); +static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit); +static int eth_igc_xstats_reset(struct rte_eth_dev *dev); +static int +eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, + uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); +static int +eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); +static int +eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); +static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int +eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, uint16_t tpid); + +static const struct eth_dev_ops eth_igc_ops = { + .dev_configure = eth_igc_configure, + .link_update = eth_igc_link_update, + .dev_stop = eth_igc_stop, + .dev_start = eth_igc_start, + .dev_close = eth_igc_close, + .dev_reset = eth_igc_reset, + .dev_set_link_up = eth_igc_set_link_up, + .dev_set_link_down = eth_igc_set_link_down, + .promiscuous_enable = eth_igc_promiscuous_enable, + .promiscuous_disable = eth_igc_promiscuous_disable, + .allmulticast_enable = eth_igc_allmulticast_enable, + .allmulticast_disable = eth_igc_allmulticast_disable, + .fw_version_get = eth_igc_fw_version_get, + .dev_infos_get = eth_igc_infos_get, + .dev_led_on = eth_igc_led_on, + .dev_led_off = eth_igc_led_off, + .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, + .mtu_set = eth_igc_mtu_set, + .mac_addr_add = eth_igc_rar_set, + .mac_addr_remove = eth_igc_rar_clear, + .mac_addr_set = eth_igc_default_mac_addr_set, + .set_mc_addr_list = eth_igc_set_mc_addr_list, + + .rx_queue_setup = eth_igc_rx_queue_setup, + .rx_queue_release = eth_igc_rx_queue_release, + .rx_queue_count = eth_igc_rx_queue_count, + .rx_descriptor_done = eth_igc_rx_descriptor_done, + .rx_descriptor_status = eth_igc_rx_descriptor_status, + .tx_descriptor_status = eth_igc_tx_descriptor_status, + .tx_queue_setup = eth_igc_tx_queue_setup, + .tx_queue_release = eth_igc_tx_queue_release, + .tx_done_cleanup = eth_igc_tx_done_cleanup, + .rxq_info_get = eth_igc_rxq_info_get, + .txq_info_get = eth_igc_txq_info_get, + .stats_get = eth_igc_stats_get, + .xstats_get = eth_igc_xstats_get, + .xstats_get_by_id = eth_igc_xstats_get_by_id, + .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, + .xstats_get_names = eth_igc_xstats_get_names, + .stats_reset = eth_igc_xstats_reset, + .xstats_reset = eth_igc_xstats_reset, + .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, + .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, + .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, + .flow_ctrl_get = eth_igc_flow_ctrl_get, + .flow_ctrl_set = eth_igc_flow_ctrl_set, + .reta_update = eth_igc_rss_reta_update, + .reta_query = eth_igc_rss_reta_query, + .rss_hash_update = eth_igc_rss_hash_update, + .rss_hash_conf_get = eth_igc_rss_hash_conf_get, + .vlan_filter_set = eth_igc_vlan_filter_set, + .vlan_offload_set = eth_igc_vlan_offload_set, + .vlan_tpid_set = eth_igc_vlan_tpid_set, + .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, + .filter_ctrl = eth_igc_filter_ctrl, +}; + +/* + * multiple queue mode checking + */ +static int +igc_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + PMD_INIT_LOG(ERR, "SRIOV is not supported."); + return -EINVAL; + } + + if (rx_mq_mode != ETH_MQ_RX_NONE && + rx_mq_mode != ETH_MQ_RX_RSS) { + /* RSS together with VMDq not supported*/ + PMD_INIT_LOG(ERR, "RX mode %d is not supported.", + rx_mq_mode); + return -EINVAL; + } + + /* To no break software that set invalid mode, only display + * warning if invalid mode is used. + */ + if (tx_mq_mode != ETH_MQ_TX_NONE) + PMD_INIT_LOG(WARNING, + "TX mode %d is not supported. Due to meaningless in this driver, just ignore", + tx_mq_mode); + + return 0; +} + +static int +eth_igc_configure(struct rte_eth_dev *dev) +{ + struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + + ret = igc_check_mq_mode(dev); + if (ret != 0) + return ret; + + intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; + return 0; +} + +static int +eth_igc_set_link_up(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + if (hw->phy.media_type == igc_media_type_copper) + igc_power_up_phy(hw); + else + igc_power_up_fiber_serdes_link(hw); + return 0; +} + +static int +eth_igc_set_link_down(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + if (hw->phy.media_type == igc_media_type_copper) + igc_power_down_phy(hw); + else + igc_shutdown_fiber_serdes_link(hw); + return 0; +} + +/* + * disable other interrupt + */ +static void +igc_intr_other_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (rte_intr_allow_others(intr_handle) && + dev->data->dev_conf.intr_conf.lsc) { + IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); + } + + IGC_WRITE_REG(hw, IGC_IMC, ~0); + IGC_WRITE_FLUSH(hw); +} + +/* + * enable other interrupt + */ +static inline void +igc_intr_other_enable(struct rte_eth_dev *dev) +{ + struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (rte_intr_allow_others(intr_handle) && + dev->data->dev_conf.intr_conf.lsc) { + IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); + } + + IGC_WRITE_REG(hw, IGC_IMS, intr->mask); + IGC_WRITE_FLUSH(hw); +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + */ +static void +eth_igc_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); + + /* read-on-clear nic registers here */ + icr = IGC_READ_REG(hw, IGC_ICR); + + intr->flags = 0; + if (icr & IGC_ICR_LSC) + intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_eth_link link; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case igc_media_type_copper: + /* Do the work to read phy */ + igc_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case igc_media_type_fiber: + igc_check_for_link(hw); + link_check = (IGC_READ_REG(hw, IGC_STATUS) & + IGC_STATUS_LU); + break; + + case igc_media_type_internal_serdes: + igc_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + default: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + + /* Now we check if a transition has happened */ + if (link_check) { + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; + link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + if (speed == SPEED_2500) { + uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); + if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { + tipg &= ~IGC_TIPG_IPGT_MASK; + tipg |= 0x0b; + IGC_WRITE_REG(hw, IGC_TIPG, tipg); + } + } + } else { + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_FIXED; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +/* + * It executes link_update after knowing an interrupt is present. + */ +static void +eth_igc_interrupt_action(struct rte_eth_dev *dev) +{ + struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + int ret; + + if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { + intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + /* set get_link_status to check register later */ + ret = eth_igc_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return; + + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) + PMD_DRV_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, + (unsigned int)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + else + PMD_DRV_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); + + PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } +} + +/* + * Interrupt handler which shall be registered at first. + * + * @handle + * Pointer to interrupt handle. + * @param + * The address of parameter (struct rte_eth_dev *) registered before. + */ +static void +eth_igc_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igc_interrupt_get_status(dev); + eth_igc_interrupt_action(dev); +} + +static void igc_read_queue_stats_register(struct rte_eth_dev *dev); + +/* + * Update the queue status every IGC_ALARM_INTERVAL time. + * @param + * The address of parameter (struct rte_eth_dev *) registered before. + */ +static void +igc_update_queue_stats_handler(void *param) +{ + struct rte_eth_dev *dev = param; + igc_read_queue_stats_register(dev); + rte_eal_alarm_set(IGC_ALARM_INTERVAL, + igc_update_queue_stats_handler, dev); +} + +/* + * rx,tx enable/disable + */ +static void +eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t tctl, rctl; + + tctl = IGC_READ_REG(hw, IGC_TCTL); + rctl = IGC_READ_REG(hw, IGC_RCTL); + + if (enable) { + /* enable Tx/Rx */ + tctl |= IGC_TCTL_EN; + rctl |= IGC_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~IGC_TCTL_EN; + rctl &= ~IGC_RCTL_EN; + } + IGC_WRITE_REG(hw, IGC_TCTL, tctl); + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + IGC_WRITE_FLUSH(hw); +} + +/* + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + */ +static void +eth_igc_stop(struct rte_eth_dev *dev) +{ + struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_eth_link link; + + adapter->stopped = 1; + + /* disable receive and transmit */ + eth_igc_rxtx_control(dev, false); + + /* disable all MSI-X interrupts */ + IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); + IGC_WRITE_FLUSH(hw); + + /* clear all MSI-X interrupts */ + IGC_WRITE_REG(hw, IGC_EICR, 0x1f); + + igc_intr_other_disable(dev); + + rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + igc_reset_hw(hw); + + /* disable all wake up */ + IGC_WRITE_REG(hw, IGC_WUC, 0); + + /* disable checking EEE operation in MAC loopback mode */ + igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); + + /* Set bit for Go Link disconnect */ + igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, + IGC_82580_PM_GO_LINKD); + + /* Power down the phy. Needed to make the link go Down */ + eth_igc_set_link_down(dev); + + igc_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + eth_igc_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +/* + * write interrupt vector allocation register + * @hw + * board private structure + * @queue_index + * queue index, valid 0,1,2,3 + * @tx + * tx:1, rx:0 + * @msix_vector + * msix-vector, valid 0,1,2,3,4 + */ +static void +igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, + bool tx, uint8_t msix_vector) +{ + uint8_t offset = 0; + uint8_t reg_index = queue_index >> 1; + uint32_t val; + + /* + * IVAR(0) + * bit31...24 bit23...16 bit15...8 bit7...0 + * TX1 RX1 TX0 RX0 + * + * IVAR(1) + * bit31...24 bit23...16 bit15...8 bit7...0 + * TX3 RX3 TX2 RX2 + */ + + if (tx) + offset = 8; + + if (queue_index & 1) + offset += 16; + + val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); + + /* clear bits */ + val &= ~((uint32_t)0xFF << offset); + + /* write vector and valid bit */ + val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; + + IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); +} + +/* Sets up the hardware to generate MSI-X interrupts properly + * @hw + * board private structure + */ +static void +igc_configure_msix_intr(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + uint32_t intr_mask; + uint32_t vec = IGC_MISC_VEC_ID; + uint32_t base = IGC_MISC_VEC_ID; + uint32_t misc_shift = 0; + int i; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) { + base = IGC_RX_VEC_START; + vec = base; + misc_shift = 1; + } + + /* turn on MSI-X capability first */ + IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << + misc_shift; + + if (dev->data->dev_conf.intr_conf.lsc) + intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); + + /* enable msix auto-clear */ + igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); + + /* set other cause interrupt vector */ + igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, + (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); + + /* enable auto-mask */ + igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + igc_write_ivar(hw, i, 0, vec); + intr_handle->intr_vec[i] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + IGC_WRITE_FLUSH(hw); +} + +/** + * It enables the interrupt mask and then enable the interrupt. + * + * @dev + * Pointer to struct rte_eth_dev. + * @on + * Enable or Disable + */ +static void +igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) +{ + struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); + + if (on) + intr->mask |= IGC_ICR_LSC; + else + intr->mask &= ~IGC_ICR_LSC; +} + +/* + * It enables the interrupt. + * It will be called once only during nic initialized. + */ +static void +igc_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + uint32_t mask; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift; + IGC_WRITE_REG(hw, IGC_EIMS, mask); +} + +/* + * Get hardware rx-buffer size. + */ +static inline int +igc_get_rx_buffer_size(struct igc_hw *hw) +{ + return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; +} + +/* + * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. + */ +static void +igc_hw_control_acquire(struct igc_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +/* + * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void +igc_hw_control_release(struct igc_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware taken over control of h/w */ + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + IGC_WRITE_REG(hw, IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +static int +igc_hardware_init(struct igc_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Let the firmware know the OS is in control */ + igc_hw_control_acquire(hw); + + /* Issue a global reset */ + igc_reset_hw(hw); + + /* disable all wake up */ + IGC_WRITE_REG(hw, IGC_WUC, 0); + + /* + * Hardware flow control + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitrary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + */ + rx_buf_size = igc_get_rx_buffer_size(hw); + hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); + hw->fc.low_water = hw->fc.high_water - 1500; + hw->fc.pause_time = IGC_FC_PAUSE_TIME; + hw->fc.send_xon = 1; + hw->fc.requested_mode = igc_fc_full; + + diag = igc_init_hw(hw); + if (diag < 0) + return diag; + + igc_get_phy_info(hw); + igc_check_for_link(hw); + + return 0; +} + +static int +eth_igc_start(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t *speeds; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* disable all MSI-X interrupts */ + IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); + IGC_WRITE_FLUSH(hw); + + /* clear all MSI-X interrupts */ + IGC_WRITE_REG(hw, IGC_EICR, 0x1f); + + /* disable uio/vfio intr/eventfd mapping */ + if (!adapter->stopped) + rte_intr_disable(intr_handle); + + /* Power up the phy. Needed to make the link go Up */ + eth_igc_set_link_up(dev); + + /* Put the address into the Receive Address Array */ + igc_rar_set(hw, hw->mac.addr, 0); + + /* Initialize the hardware */ + if (igc_hardware_init(hw)) { + PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); + return -EIO; + } + adapter->stopped = 0; + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + uint32_t intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_DRV_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* configure msix for rx interrupt */ + igc_configure_msix_intr(dev); + + igc_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = igc_rx_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); + igc_dev_clear_queues(dev); + return ret; + } + + igc_clear_hw_cntrs_base_generic(hw); + + /* VLAN Offload Settings */ + eth_igc_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + + /* Setup link speed and duplex */ + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; + hw->mac.autoneg = 1; + } else { + int num_speeds = 0; + bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; + goto error_invalid_config; + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_2_5G) { + hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && num_speeds > 1)) + goto error_invalid_config; + + /* Set/reset the mac.autoneg based on the link speed, + * fixed or not + */ + if (!autoneg) { + hw->mac.autoneg = 0; + hw->mac.forced_speed_duplex = + hw->phy.autoneg_advertised; + } else { + hw->mac.autoneg = 1; + } + } + + igc_setup_link(hw); + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc) + igc_lsc_interrupt_setup(dev, 1); + else + igc_lsc_interrupt_setup(dev, 0); + } else { + rte_intr_callback_unregister(intr_handle, + eth_igc_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc) + PMD_DRV_LOG(INFO, + "LSC won't enable because of no intr multiplex"); + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + rte_eal_alarm_set(IGC_ALARM_INTERVAL, + igc_update_queue_stats_handler, dev); + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq && + rte_intr_dp_is_en(intr_handle)) + igc_rxq_interrupt_setup(dev); + + /* resume enabled intr since hw reset */ + igc_intr_other_enable(dev); + + eth_igc_rxtx_control(dev, true); + eth_igc_link_update(dev, 0); + + /* configure MAC-loopback mode */ + if (dev->data->dev_conf.lpbk_mode == 1) { + uint32_t reg_val; + + reg_val = IGC_READ_REG(hw, IGC_CTRL); + reg_val &= ~IGC_CTRL_SPEED_MASK; + reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | + IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; + IGC_WRITE_REG(hw, IGC_CTRL, reg_val); + + igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); + } + + return 0; + +error_invalid_config: + PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); + igc_dev_clear_queues(dev); + return -EINVAL; +} + +static int +igc_reset_swfw_lock(struct igc_hw *hw) +{ + int ret_val; + + /* + * Do mac ops initialization manually here, since we will need + * some function pointers set by this call. + */ + ret_val = igc_init_mac_params(hw); + if (ret_val) + return ret_val; + + /* + * SMBI lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + if (igc_get_hw_semaphore_generic(hw) < 0) + PMD_DRV_LOG(DEBUG, "SMBI lock released"); + + igc_put_hw_semaphore_generic(hw); + + if (hw->mac.ops.acquire_swfw_sync != NULL) { + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. + * If this is the case, it is due to an improper exit of the + * application. So force the release of the faulty lock. + */ + mask = IGC_SWFW_PHY0_SM; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", + hw->bus.func); + } + hw->mac.ops.release_swfw_sync(hw, mask); + + /* + * This one is more tricky since it is common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure + * that if lock can not be taken it is due to an improper lock + * of the semaphore. + */ + mask = IGC_SWFW_EEP_SM; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + + hw->mac.ops.release_swfw_sync(hw, mask); + } + + return IGC_SUCCESS; +} + +/* + * free all rx/tx queues. + */ +static void +igc_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_igc_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_igc_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +static void +eth_igc_close(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); + int retry = 0; + + PMD_INIT_FUNC_TRACE(); + + if (!adapter->stopped) + eth_igc_stop(dev); + + igc_flow_flush(dev, NULL); + igc_clear_all_filter(dev); + + igc_intr_other_disable(dev); + do { + int ret = rte_intr_callback_unregister(intr_handle, + eth_igc_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) + break; + + PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); + DELAY(200 * 1000); /* delay 200ms */ + } while (retry++ < 5); + + igc_phy_hw_reset(hw); + igc_hw_control_release(hw); + igc_dev_free_queues(dev); + + /* Reset any pending lock */ + igc_reset_swfw_lock(hw); +} + +static void +igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; +} + +static int +eth_igc_dev_init(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + int i, error = 0; + + PMD_INIT_FUNC_TRACE(); + dev->dev_ops = ð_igc_ops; + + /* + * for secondary processes, we don't initialize any further as primary + * has already done this work. Only check we don't need a different + * RX function. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(dev, pci_dev); + + hw->back = pci_dev; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + + igc_identify_hardware(dev, pci_dev); + if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { + error = -EIO; + goto err_late; + } + + igc_get_bus_info(hw); + + /* Reset any pending lock */ + if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { + error = -EIO; + goto err_late; + } + + /* Finish initialization */ + if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { + error = -EIO; + goto err_late; + } + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; + + /* Copper options */ + if (hw->phy.media_type == igc_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = igc_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + igc_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (igc_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + if (igc_validate_nvm_checksum(hw) < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + error = -EIO; + goto err_late; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + if (igc_read_mac_addr(hw) != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + error = -EIO; + goto err_late; + } + + /* Allocate memory for storing MAC addresses */ + dev->data->mac_addrs = rte_zmalloc("igc", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); + if (dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", + RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); + error = -ENOMEM; + goto err_late; + } + + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + + /* Now initialize the hardware */ + if (igc_hardware_init(hw) != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + error = -ENODEV; + goto err_late; + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + hw->mac.get_link_status = 1; + igc->stopped = 0; + + /* Indicate SOL/IDER usage */ + if (igc_check_reset_block(hw) < 0) + PMD_INIT_LOG(ERR, + "PHY reset is blocked due to SOL/IDER session."); + + PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", + dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&pci_dev->intr_handle, + eth_igc_interrupt_handler, (void *)dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pci_dev->intr_handle); + + /* enable support intr */ + igc_intr_other_enable(dev); + + /* initiate queue status */ + for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { + igc->txq_stats_map[i] = -1; + igc->rxq_stats_map[i] = -1; + } + + igc_flow_init(dev); + igc_clear_all_filter(dev); + return 0; + +err_late: + igc_hw_control_release(hw); + return error; +} + +static int +eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + eth_igc_close(eth_dev); + return 0; +} + +static int +eth_igc_reset(struct rte_eth_dev *dev) +{ + int ret; + + PMD_INIT_FUNC_TRACE(); + + ret = eth_igc_dev_uninit(dev); + if (ret) + return ret; + + return eth_igc_dev_init(dev); +} + +static int +eth_igc_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t rctl; + + rctl = IGC_READ_REG(hw, IGC_RCTL); + rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + return 0; +} + +static int +eth_igc_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t rctl; + + rctl = IGC_READ_REG(hw, IGC_RCTL); + rctl &= (~IGC_RCTL_UPE); + if (dev->data->all_multicast == 1) + rctl |= IGC_RCTL_MPE; + else + rctl &= (~IGC_RCTL_MPE); + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + return 0; +} + +static int +eth_igc_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t rctl; + + rctl = IGC_READ_REG(hw, IGC_RCTL); + rctl |= IGC_RCTL_MPE; + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + return 0; +} + +static int +eth_igc_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + rctl = IGC_READ_REG(hw, IGC_RCTL); + rctl &= (~IGC_RCTL_MPE); + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + return 0; +} + +static int +eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_fw_version fw; + int ret; + + igc_get_fw_version(hw, &fw); + + /* if option rom is valid, display its version too */ + if (fw.or_valid) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else { + if (fw.etrack_id != 0X0000) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, + fw.etrack_id); + } else { + ret = snprintf(fw_version, fw_size, + "%d.%d.%d", + fw.eep_major, fw.eep_minor, + fw.eep_build); + } + } + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static int +eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; + dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; + dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + + dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; + dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; + dev_info->max_vmdq_pools = 0; + + dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGC_DEFAULT_RX_PTHRESH, + .hthresh = IGC_DEFAULT_RX_HTHRESH, + .wthresh = IGC_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGC_DEFAULT_TX_PTHRESH, + .hthresh = IGC_DEFAULT_TX_HTHRESH, + .wthresh = IGC_DEFAULT_TX_WTHRESH, + }, + .offloads = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; + + dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + return 0; +} + +static int +eth_igc_led_on(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_igc_led_off(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; +} + +static const uint32_t * +eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to rx_desc_pkt_info_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; +} + +static int +eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; + uint32_t rctl; + + /* if extend vlan has been enabled */ + if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) + frame_size += VLAN_TAG_SIZE; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || + frame_size > MAX_RX_JUMBO_FRAME_SIZE) + return -EINVAL; + + /* + * refuse mtu that requires the support of scattered packets when + * this feature has not been enabled before. + */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + rctl = IGC_READ_REG(hw, IGC_RCTL); + + /* switch to jumbo mode if needed */ + if (mtu > RTE_ETHER_MTU) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl |= IGC_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + rctl &= ~IGC_RCTL_LPE; + } + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + IGC_WRITE_REG(hw, IGC_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + return 0; +} + +static int +eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + igc_rar_set(hw, mac_addr->addr_bytes, index); + RTE_SET_USED(pool); + return 0; +} + +static void +eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[RTE_ETHER_ADDR_LEN]; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + memset(addr, 0, sizeof(addr)); + igc_rar_set(hw, addr, index); +} + +static int +eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + igc_rar_set(hw, addr->addr_bytes, 0); + return 0; +} + +static int +eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); + return 0; +} + +/* + * Read hardware registers + */ +static void +igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) +{ + int pause_frames; + + uint64_t old_gprc = stats->gprc; + uint64_t old_gptc = stats->gptc; + uint64_t old_tpr = stats->tpr; + uint64_t old_tpt = stats->tpt; + uint64_t old_rpthc = stats->rpthc; + uint64_t old_hgptc = stats->hgptc; + + stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); + stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); + stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); + stats->mpc += IGC_READ_REG(hw, IGC_MPC); + stats->scc += IGC_READ_REG(hw, IGC_SCC); + stats->ecol += IGC_READ_REG(hw, IGC_ECOL); + + stats->mcc += IGC_READ_REG(hw, IGC_MCC); + stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); + stats->colc += IGC_READ_REG(hw, IGC_COLC); + + stats->dc += IGC_READ_REG(hw, IGC_DC); + stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); + stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); + stats->rlec += IGC_READ_REG(hw, IGC_RLEC); + stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); + stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); + + /* + * For watchdog management we need to know if we have been + * paused during the last interval, so capture that here. + */ + pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); + stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); + stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); + stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); + stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); + stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); + stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); + stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); + stats->gprc += IGC_READ_REG(hw, IGC_GPRC); + stats->bprc += IGC_READ_REG(hw, IGC_BPRC); + stats->mprc += IGC_READ_REG(hw, IGC_MPRC); + stats->gptc += IGC_READ_REG(hw, IGC_GPTC); + + /* For the 64-bit byte counters the low dword must be read first. */ + /* Both registers clear on the read of the high dword */ + + /* Workaround CRC bytes included in size, take away 4 bytes/packet */ + stats->gorc += IGC_READ_REG(hw, IGC_GORCL); + stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); + stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; + stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); + stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); + stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; + + stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); + stats->ruc += IGC_READ_REG(hw, IGC_RUC); + stats->rfc += IGC_READ_REG(hw, IGC_RFC); + stats->roc += IGC_READ_REG(hw, IGC_ROC); + stats->rjc += IGC_READ_REG(hw, IGC_RJC); + + stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); + stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); + stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); + stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); + stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); + stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); + stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); + + stats->tpr += IGC_READ_REG(hw, IGC_TPR); + stats->tpt += IGC_READ_REG(hw, IGC_TPT); + + stats->tor += IGC_READ_REG(hw, IGC_TORL); + stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); + stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; + stats->tot += IGC_READ_REG(hw, IGC_TOTL); + stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); + stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; + + stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); + stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); + stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); + stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); + stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); + stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); + stats->mptc += IGC_READ_REG(hw, IGC_MPTC); + stats->bptc += IGC_READ_REG(hw, IGC_BPTC); + stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); + + stats->iac += IGC_READ_REG(hw, IGC_IAC); + stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); + stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); + stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); + + /* Host to Card Statistics */ + stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); + stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); + stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; + stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); + stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); + stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; + stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); +} + +/* + * Write 0 to all queue status registers + */ +static void +igc_reset_queue_stats_register(struct igc_hw *hw) +{ + int i; + + for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { + IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); + IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); + IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); + IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); + IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); + IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); + IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); + } +} + +/* + * Read all hardware queue status registers + */ +static void +igc_read_queue_stats_register(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_hw_queue_stats *queue_stats = + IGC_DEV_PRIVATE_QUEUE_STATS(dev); + int i; + + /* + * This register is not cleared on read. Furthermore, the register wraps + * around back to 0x00000000 on the next increment when reaching a value + * of 0xFFFFFFFF and then continues normal count operation. + */ + for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { + union { + u64 ddword; + u32 dword[2]; + } value; + u32 tmp; + + /* + * Read the register first, if the value is smaller than that + * previous read, that mean the register has been overflowed, + * then we add the high 4 bytes by 1 and replace the low 4 + * bytes by the new value. + */ + tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); + value.ddword = queue_stats->pqgprc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->pqgprc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); + value.ddword = queue_stats->pqgptc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->pqgptc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); + value.ddword = queue_stats->pqgorc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->pqgorc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); + value.ddword = queue_stats->pqgotc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->pqgotc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); + value.ddword = queue_stats->pqmprc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->pqmprc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); + value.ddword = queue_stats->rqdpc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->rqdpc[i] = value.ddword; + + tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); + value.ddword = queue_stats->tqdpc[i]; + if (value.dword[U32_0_IN_U64] > tmp) + value.dword[U32_1_IN_U64]++; + value.dword[U32_0_IN_U64] = tmp; + queue_stats->tqdpc[i] = value.ddword; + } +} + +static int +eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); + struct igc_hw_queue_stats *queue_stats = + IGC_DEV_PRIVATE_QUEUE_STATS(dev); + int i; + + /* + * Cancel status handler since it will read the queue status registers + */ + rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); + + /* Read status register */ + igc_read_queue_stats_register(dev); + igc_read_stats_registers(hw, stats); + + if (rte_stats == NULL) { + /* Restart queue status handler */ + rte_eal_alarm_set(IGC_ALARM_INTERVAL, + igc_update_queue_stats_handler, dev); + return -EINVAL; + } + + /* Rx Errors */ + rte_stats->imissed = stats->mpc; + rte_stats->ierrors = stats->crcerrs + + stats->rlec + stats->ruc + stats->roc + + stats->rxerrc + stats->algnerrc; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; + + /* Get per-queue statuses */ + for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { + /* GET TX queue statuses */ + int map_id = igc->txq_stats_map[i]; + if (map_id >= 0) { + rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; + rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; + } + /* Get RX queue statuses */ + map_id = igc->rxq_stats_map[i]; + if (map_id >= 0) { + rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; + rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; + rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; + } + } + + /* Restart queue status handler */ + rte_eal_alarm_set(IGC_ALARM_INTERVAL, + igc_update_queue_stats_handler, dev); + return 0; +} + +static int +eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_hw_stats *hw_stats = + IGC_DEV_PRIVATE_STATS(dev); + unsigned int i; + + igc_read_stats_registers(hw, hw_stats); + + if (n < IGC_NB_XSTATS) + return IGC_NB_XSTATS; + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IGC_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_igc_stats_strings[i].offset); + } + + return IGC_NB_XSTATS; +} + +static int +eth_igc_xstats_reset(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); + struct igc_hw_queue_stats *queue_stats = + IGC_DEV_PRIVATE_QUEUE_STATS(dev); + + /* Cancel queue status handler for avoid conflict */ + rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); + + /* HW registers are cleared on read */ + igc_reset_queue_stats_register(hw); + igc_read_stats_registers(hw, hw_stats); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + memset(queue_stats, 0, sizeof(*queue_stats)); + + /* Restart the queue status handler */ + rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, + dev); + + return 0; +} + +static int +eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned int size) +{ + unsigned int i; + + if (xstats_names == NULL) + return IGC_NB_XSTATS; + + if (size < IGC_NB_XSTATS) { + PMD_DRV_LOG(ERR, "not enough buffers!"); + return IGC_NB_XSTATS; + } + + for (i = 0; i < IGC_NB_XSTATS; i++) + strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, + sizeof(xstats_names[i].name)); + + return IGC_NB_XSTATS; +} + +static int +eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit) +{ + unsigned int i; + + if (!ids) + return eth_igc_xstats_get_names(dev, xstats_names, limit); + + for (i = 0; i < limit; i++) { + if (ids[i] >= IGC_NB_XSTATS) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + strlcpy(xstats_names[i].name, + rte_igc_stats_strings[ids[i]].name, + sizeof(xstats_names[i].name)); + } + return limit; +} + +static int +eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); + unsigned int i; + + igc_read_stats_registers(hw, hw_stats); + + if (!ids) { + if (n < IGC_NB_XSTATS) + return IGC_NB_XSTATS; + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!values) + return 0; + + /* Extended stats */ + for (i = 0; i < IGC_NB_XSTATS; i++) + values[i] = *(uint64_t *)(((char *)hw_stats) + + rte_igc_stats_strings[i].offset); + + return IGC_NB_XSTATS; + + } else { + for (i = 0; i < n; i++) { + if (ids[i] >= IGC_NB_XSTATS) { + PMD_DRV_LOG(ERR, "id value isn't valid"); + return -EINVAL; + } + values[i] = *(uint64_t *)(((char *)hw_stats) + + rte_igc_stats_strings[ids[i]].offset); + } + return n; + } +} + +static int +eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, + uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) +{ + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + + /* check queue id is valid */ + if (queue_id >= IGC_QUEUE_PAIRS_NUM) { + PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", + queue_id, IGC_QUEUE_PAIRS_NUM - 1); + return -EINVAL; + } + + /* store the mapping status id */ + if (is_rx) + igc->rxq_stats_map[queue_id] = stat_idx; + else + igc->txq_stats_map[queue_id] = stat_idx; + + return 0; +} + +static int +eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IGC_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = IGC_RX_VEC_START; + + uint32_t mask = 1u << (queue_id + vec); + + IGC_WRITE_REG(hw, IGC_EIMC, mask); + IGC_WRITE_FLUSH(hw); + + return 0; +} + +static int +eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IGC_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = IGC_RX_VEC_START; + + uint32_t mask = 1u << (queue_id + vec); + + IGC_WRITE_REG(hw, IGC_EIMS, mask); + IGC_WRITE_FLUSH(hw); + + rte_intr_enable(intr_handle); + + return 0; +} + +static int +eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t ctrl; + int tx_pause; + int rx_pause; + + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water; + fc_conf->low_water = hw->fc.low_water; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = hw->mac.autoneg; + + /* + * Return rx_pause and tx_pause status according to actual setting of + * the TFCE and RFCE bits in the CTRL register. + */ + ctrl = IGC_READ_REG(hw, IGC_CTRL); + if (ctrl & IGC_CTRL_TFCE) + tx_pause = 1; + else + tx_pause = 0; + + if (ctrl & IGC_CTRL_RFCE) + rx_pause = 1; + else + rx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; + int err; + + if (fc_conf->autoneg != hw->mac.autoneg) + return -ENOTSUP; + + rx_buf_size = igc_get_rx_buffer_size(hw); + PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; + if (fc_conf->high_water > max_high_water || + fc_conf->high_water < fc_conf->low_water) { + PMD_DRV_LOG(ERR, + "Incorrect high(%u)/low(%u) water value, max is %u", + fc_conf->high_water, fc_conf->low_water, + max_high_water); + return -EINVAL; + } + + switch (fc_conf->mode) { + case RTE_FC_NONE: + hw->fc.requested_mode = igc_fc_none; + break; + case RTE_FC_RX_PAUSE: + hw->fc.requested_mode = igc_fc_rx_pause; + break; + case RTE_FC_TX_PAUSE: + hw->fc.requested_mode = igc_fc_tx_pause; + break; + case RTE_FC_FULL: + hw->fc.requested_mode = igc_fc_full; + break; + default: + PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); + return -EINVAL; + } + + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = igc_setup_link_generic(hw); + if (err == IGC_SUCCESS) { + /** + * check if we want to forward MAC frames - driver doesn't have + * native capability to do that, so we'll write the registers + * ourselves + **/ + rctl = IGC_READ_REG(hw, IGC_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= IGC_RCTL_PMCF; + else + rctl &= ~IGC_RCTL_PMCF; + + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + IGC_WRITE_FLUSH(hw); + + return 0; + } + + PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); + return -EIO; +} + +static int +eth_igc_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint16_t i; + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, + "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", + reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); + + /* set redirection table */ + for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { + union igc_rss_reta_reg reta, reg; + uint16_t idx, shift; + uint8_t j, mask; + + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGC_RSS_RDT_REG_SIZE_MASK); + + /* if no need to update the register */ + if (!mask || + shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) + continue; + + /* check mask whether need to read the register value first */ + if (mask == IGC_RSS_RDT_REG_SIZE_MASK) + reg.dword = 0; + else + reg.dword = IGC_READ_REG_LE_VALUE(hw, + IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); + + /* update the register */ + RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); + for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { + if (mask & (1u << j)) + reta.bytes[j] = + (uint8_t)reta_conf[idx].reta[shift + j]; + else + reta.bytes[j] = reg.bytes[j]; + } + IGC_WRITE_REG_LE_VALUE(hw, + IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); + } + + return 0; +} + +static int +eth_igc_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint16_t i; + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, + "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", + reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); + + /* read redirection table */ + for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { + union igc_rss_reta_reg reta; + uint16_t idx, shift; + uint8_t j, mask; + + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGC_RSS_RDT_REG_SIZE_MASK); + + /* if no need to read register */ + if (!mask || + shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) + continue; + + /* read register and get the queue index */ + RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); + reta.dword = IGC_READ_REG_LE_VALUE(hw, + IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); + for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { + if (mask & (1u << j)) + reta_conf[idx].reta[shift + j] = reta.bytes[j]; + } + } + + return 0; +} + +static int +eth_igc_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + igc_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +static int +eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t rss_hf; + + if (hash_key != NULL) { + int i; + + /* if not enough space for store hash key */ + if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { + PMD_DRV_LOG(ERR, + "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", + rss_conf->rss_key_len, IGC_HKEY_SIZE); + return -EINVAL; + } + + /* read RSS key from register */ + for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) + hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); + } + + /* get RSS functions configured in MRQC register */ + mrqc = IGC_READ_REG(hw, IGC_MRQC); + if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) + return 0; + + rss_hf = 0; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + + rss_conf->rss_hf |= rss_hf; + return 0; +} + +static int +eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; + vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); + vfta = shadow_vfta->vfta[vid_idx]; + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + igc_read_reg_check_clear_bits(hw, IGC_RCTL, + IGC_RCTL_CFIEN | IGC_RCTL_VFE); +} + +static void +igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); + uint32_t reg_val; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg_val = IGC_READ_REG(hw, IGC_RCTL); + reg_val &= ~IGC_RCTL_CFIEN; + reg_val |= IGC_RCTL_VFE; + IGC_WRITE_REG(hw, IGC_RCTL, reg_val); + + /* restore VFTA table */ + for (i = 0; i < IGC_VFTA_SIZE; i++) + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); +} + +static void +igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); +} + +static int +igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t ctrl_ext; + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + + /* if extend vlan hasn't been enabled */ + if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) + return 0; + + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) + goto write_ext_vlan; + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len < + RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { + PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); + return -EINVAL; + } + dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE; + IGC_WRITE_REG(hw, IGC_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + +write_ext_vlan: + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); + return 0; +} + +static int +igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t ctrl_ext; + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + + /* if extend vlan has been enabled */ + if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) + return 0; + + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) + goto write_ext_vlan; + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > + MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) { + PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE; + IGC_WRITE_REG(hw, IGC_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + +write_ext_vlan: + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); + return 0; +} + +static int +eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + igc_vlan_hw_strip_enable(dev); + else + igc_vlan_hw_strip_disable(dev); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + igc_vlan_hw_filter_enable(dev); + else + igc_vlan_hw_filter_disable(dev); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + return igc_vlan_hw_extend_enable(dev); + else + return igc_vlan_hw_extend_disable(dev); + } + + return 0; +} + +static int +eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t reg_val; + + /* only outer TPID of double VLAN can be configured*/ + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + reg_val = IGC_READ_REG(hw, IGC_VET); + reg_val = (reg_val & (~IGC_VET_EXT)) | + ((uint32_t)tpid << IGC_VET_EXT_SHIFT); + IGC_WRITE_REG(hw, IGC_VET, reg_val); + + return 0; + } + + /* all other TPID values are read-only*/ + PMD_DRV_LOG(ERR, "Not supported"); + return -ENOTSUP; +} + +static int +eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + PMD_INIT_FUNC_TRACE(); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct igc_adapter), eth_igc_dev_init); +} + +static int +eth_igc_pci_remove(struct rte_pci_device *pci_dev) +{ + PMD_INIT_FUNC_TRACE(); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); +} + +static struct rte_pci_driver rte_igc_pmd = { + .id_table = pci_id_igc_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_igc_pci_probe, + .remove = eth_igc_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); +RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); diff --git a/src/spdk/dpdk/drivers/net/igc/igc_ethdev.h b/src/spdk/dpdk/drivers/net/igc/igc_ethdev.h new file mode 100644 index 000000000..a09debfb4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_ethdev.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + +#ifndef _IGC_ETHDEV_H_ +#define _IGC_ETHDEV_H_ + +#include + +#include "base/igc_osdep.h" +#include "base/igc_hw.h" +#include "base/igc_i225.h" +#include "base/igc_api.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IGC_RSS_RDT_SIZD 128 + +/* VLAN filter table size */ +#define IGC_VFTA_SIZE 128 + +#define IGC_QUEUE_PAIRS_NUM 4 + +#define IGC_HKEY_MAX_INDEX 10 +#define IGC_RSS_RDT_SIZD 128 + +#define IGC_DEFAULT_REG_SIZE 4 +#define IGC_DEFAULT_REG_SIZE_MASK 0xf + +#define IGC_RSS_RDT_REG_SIZE IGC_DEFAULT_REG_SIZE +#define IGC_RSS_RDT_REG_SIZE_MASK IGC_DEFAULT_REG_SIZE_MASK +#define IGC_HKEY_REG_SIZE IGC_DEFAULT_REG_SIZE +#define IGC_HKEY_SIZE (IGC_HKEY_REG_SIZE * IGC_HKEY_MAX_INDEX) + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define IGC_ALIGN 128 + +#define IGC_TX_DESCRIPTOR_MULTIPLE 8 +#define IGC_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_RXD_ALIGN ((uint16_t)(IGC_ALIGN / \ + sizeof(union igc_adv_rx_desc))) +#define IGC_TXD_ALIGN ((uint16_t)(IGC_ALIGN / \ + sizeof(union igc_adv_tx_desc))) +#define IGC_MIN_TXD IGC_TX_DESCRIPTOR_MULTIPLE +#define IGC_MAX_TXD ((uint16_t)(0x80000 / sizeof(union igc_adv_tx_desc))) +#define IGC_MIN_RXD IGC_RX_DESCRIPTOR_MULTIPLE +#define IGC_MAX_RXD ((uint16_t)(0x80000 / sizeof(union igc_adv_rx_desc))) + +#define IGC_TX_MAX_SEG UINT8_MAX +#define IGC_TX_MAX_MTU_SEG UINT8_MAX + +#define IGC_RX_OFFLOAD_ALL ( \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_EXTEND | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_SCTP_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ + DEV_RX_OFFLOAD_SCATTER) + +#define IGC_TX_OFFLOAD_ALL ( \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_UDP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define IGC_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +#define IGC_MAX_ETQF_FILTERS 3 /* etqf(3) is used for 1588 */ +#define IGC_ETQF_FILTER_1588 3 +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK (7u << IGC_ETQF_QUEUE_SHIFT) + +#define IGC_MAX_NTUPLE_FILTERS 8 +#define IGC_NTUPLE_MAX_PRI 7 + +#define IGC_SYN_FILTER_ENABLE 0x01 /* syn filter enable field */ +#define IGC_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */ +#define IGC_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */ +#define IGC_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */ + +/* structure for interrupt relative data */ +struct igc_interrupt { + uint32_t flags; + uint32_t mask; +}; + +/* Union of RSS redirect table register */ +union igc_rss_reta_reg { + uint32_t dword; + uint8_t bytes[4]; +}; + +/* Structure to per-queue statics */ +struct igc_hw_queue_stats { + u64 pqgprc[IGC_QUEUE_PAIRS_NUM]; + /* per queue good packets received count */ + u64 pqgptc[IGC_QUEUE_PAIRS_NUM]; + /* per queue good packets transmitted count */ + u64 pqgorc[IGC_QUEUE_PAIRS_NUM]; + /* per queue good octets received count */ + u64 pqgotc[IGC_QUEUE_PAIRS_NUM]; + /* per queue good octets transmitted count */ + u64 pqmprc[IGC_QUEUE_PAIRS_NUM]; + /* per queue multicast packets received count */ + u64 rqdpc[IGC_QUEUE_PAIRS_NUM]; + /* per receive queue drop packet count */ + u64 tqdpc[IGC_QUEUE_PAIRS_NUM]; + /* per transmit queue drop packet count */ +}; + +/* local vfta copy */ +struct igc_vfta { + uint32_t vfta[IGC_VFTA_SIZE]; +}; + +/* ethertype filter structure */ +struct igc_ethertype_filter { + uint16_t ether_type; + uint16_t queue; +}; + +/* Structure of ntuple filter info. */ +struct igc_ntuple_info { + uint16_t dst_port; + uint8_t proto; /* l4 protocol. */ + + /* + * the packet matched above 2tuple and contain any set bit will hit + * this filter. + */ + uint8_t tcp_flags; + + /* + * seven levels (001b-111b), 111b is highest, used when more than one + * filter matches. + */ + uint8_t priority; + uint8_t dst_port_mask:1, /* if mask is 1b, do compare dst port. */ + proto_mask:1; /* if mask is 1b, do compare protocol. */ +}; + +/* Structure of n-tuple filter */ +struct igc_ntuple_filter { + RTE_STD_C11 + union { + uint64_t hash_val; + struct igc_ntuple_info tuple_info; + }; + + uint8_t queue; +}; + +/* Structure of TCP SYN filter */ +struct igc_syn_filter { + uint8_t queue; + + uint8_t hig_pri:1, /* 1 - higher priority than other filters, */ + /* 0 - lower priority. */ + enable:1; /* 1-enable; 0-disable */ +}; + +/* Structure to store RTE flow RSS configure. */ +struct igc_rss_filter { + struct rte_flow_action_rss conf; /* RSS parameters. */ + uint8_t key[IGC_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ + uint16_t queue[IGC_RSS_RDT_SIZD];/* Queues indices to use. */ + uint8_t enable; /* 1-enabled, 0-disabled */ +}; + +/* Feature filter types */ +enum igc_filter_type { + IGC_FILTER_TYPE_ETHERTYPE, + IGC_FILTER_TYPE_NTUPLE, + IGC_FILTER_TYPE_SYN, + IGC_FILTER_TYPE_HASH +}; + +/* Structure to store flow */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + enum igc_filter_type filter_type; + RTE_STD_C11 + char filter[0]; /* filter data */ +}; + +/* Flow list header */ +TAILQ_HEAD(igc_flow_list, rte_flow); + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct igc_adapter { + struct igc_hw hw; + struct igc_hw_stats stats; + struct igc_hw_queue_stats queue_stats; + int16_t txq_stats_map[IGC_QUEUE_PAIRS_NUM]; + int16_t rxq_stats_map[IGC_QUEUE_PAIRS_NUM]; + + struct igc_interrupt intr; + struct igc_vfta shadow_vfta; + bool stopped; + + struct igc_ethertype_filter ethertype_filters[IGC_MAX_ETQF_FILTERS]; + struct igc_ntuple_filter ntuple_filters[IGC_MAX_NTUPLE_FILTERS]; + struct igc_syn_filter syn_filter; + struct igc_rss_filter rss_filter; + struct igc_flow_list flow_list; +}; + +#define IGC_DEV_PRIVATE(_dev) ((_dev)->data->dev_private) + +#define IGC_DEV_PRIVATE_HW(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->hw) + +#define IGC_DEV_PRIVATE_STATS(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->stats) + +#define IGC_DEV_PRIVATE_QUEUE_STATS(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->queue_stats) + +#define IGC_DEV_PRIVATE_INTR(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->intr) + +#define IGC_DEV_PRIVATE_VFTA(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->shadow_vfta) + +#define IGC_DEV_PRIVATE_RSS_FILTER(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->rss_filter) + +#define IGC_DEV_PRIVATE_FLOW_LIST(_dev) \ + (&((struct igc_adapter *)(_dev)->data->dev_private)->flow_list) + +static inline void +igc_read_reg_check_set_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits) +{ + uint32_t reg_val = IGC_READ_REG(hw, reg); + + bits |= reg_val; + if (bits == reg_val) + return; /* no need to write back */ + + IGC_WRITE_REG(hw, reg, bits); +} + +static inline void +igc_read_reg_check_clear_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits) +{ + uint32_t reg_val = IGC_READ_REG(hw, reg); + + bits = reg_val & ~bits; + if (bits == reg_val) + return; /* no need to write back */ + + IGC_WRITE_REG(hw, reg, bits); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _IGC_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/igc_filter.c b/src/spdk/dpdk/drivers/net/igc/igc_filter.c new file mode 100644 index 000000000..836621d4c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_filter.c @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2020 Intel Corporation + */ + +#include "rte_malloc.h" +#include "igc_logs.h" +#include "igc_txrx.h" +#include "igc_filter.h" +#include "igc_flow.h" + +/* + * igc_ethertype_filter_lookup - lookup ether-type filter + * + * @igc, IGC filter pointer + * @ethertype, ethernet type + * @empty, a place to store the index of empty entry if the item not found + * it's not smaller than 0 if valid, otherwise -1 for no empty entry. + * empty parameter is only valid if the return value of the function is -1 + * + * Return value + * >= 0, item index of the ether-type filter + * -1, the item not been found + */ +static inline int +igc_ethertype_filter_lookup(const struct igc_adapter *igc, + uint16_t ethertype, int *empty) +{ + int i = 0; + + if (empty) { + /* set to invalid valid */ + *empty = -1; + + /* search the filters array */ + for (; i < IGC_MAX_ETQF_FILTERS; i++) { + if (igc->ethertype_filters[i].ether_type == ethertype) + return i; + if (igc->ethertype_filters[i].ether_type == 0) { + /* get empty entry */ + *empty = i; + i++; + break; + } + } + } + + /* search the rest of filters */ + for (; i < IGC_MAX_ETQF_FILTERS; i++) { + if (igc->ethertype_filters[i].ether_type == ethertype) + return i; /* filter be found, return index */ + } + + return -1; +} + +int +igc_del_ethertype_filter(struct rte_eth_dev *dev, + const struct igc_ethertype_filter *filter) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + int ret; + + if (filter->ether_type == 0) { + PMD_DRV_LOG(ERR, "Ethertype 0 is not been supported"); + return -EINVAL; + } + + ret = igc_ethertype_filter_lookup(igc, filter->ether_type, NULL); + if (ret < 0) { + /* not found */ + PMD_DRV_LOG(ERR, + "Ethertype (0x%04x) filter doesn't exist", + filter->ether_type); + return -ENOENT; + } + + igc->ethertype_filters[ret].ether_type = 0; + + IGC_WRITE_REG(hw, IGC_ETQF(ret), 0); + IGC_WRITE_FLUSH(hw); + return 0; +} + +int +igc_add_ethertype_filter(struct rte_eth_dev *dev, + const struct igc_ethertype_filter *filter) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + uint32_t etqf; + int ret, empty; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6 || + filter->ether_type == 0) { + PMD_DRV_LOG(ERR, + "Unsupported ether_type(0x%04x) in ethertype filter", + filter->ether_type); + return -EINVAL; + } + + ret = igc_ethertype_filter_lookup(igc, filter->ether_type, &empty); + if (ret >= 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + + if (empty < 0) { + PMD_DRV_LOG(ERR, "no ethertype filter entry."); + return -ENOSPC; + } + ret = empty; + + etqf = filter->ether_type; + etqf |= IGC_ETQF_FILTER_ENABLE | IGC_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)filter->queue << IGC_ETQF_QUEUE_SHIFT; + + memcpy(&igc->ethertype_filters[ret], filter, sizeof(*filter)); + + IGC_WRITE_REG(hw, IGC_ETQF(ret), etqf); + IGC_WRITE_FLUSH(hw); + return 0; +} + +/* clear all the ether type filters */ +static void +igc_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + int i; + + for (i = 0; i < IGC_MAX_ETQF_FILTERS; i++) + IGC_WRITE_REG(hw, IGC_ETQF(i), 0); + IGC_WRITE_FLUSH(hw); + + memset(&igc->ethertype_filters, 0, sizeof(igc->ethertype_filters)); +} + +/* + * igc_tuple_filter_lookup - lookup n-tuple filter + * + * @igc, igc filter pointer + * @ntuple, n-tuple filter pointer + * @empty, a place to store the index of empty entry if the item not found + * it's not smaller than 0 if valid, otherwise -1 for no empty entry. + * The value of empty is uncertain if the return value of the function is + * not -1. + * + * Return value + * >= 0, item index of the filter + * -1, the item not been found + */ +static int +igc_tuple_filter_lookup(const struct igc_adapter *igc, + const struct igc_ntuple_filter *ntuple, + int *empty) +{ + int i = 0; + + if (empty) { + /* set initial value */ + *empty = -1; + + /* search the filter array */ + for (; i < IGC_MAX_NTUPLE_FILTERS; i++) { + if (igc->ntuple_filters[i].hash_val) { + /* compare the hase value */ + if (ntuple->hash_val == + igc->ntuple_filters[i].hash_val) + /* filter be found, return index */ + return i; + } else { + /* get the empty entry */ + *empty = i; + i++; + break; + } + } + } + + /* search the rest of filters */ + for (; i < IGC_MAX_NTUPLE_FILTERS; i++) { + if (ntuple->hash_val == igc->ntuple_filters[i].hash_val) + /* filter be found, return index */ + return i; + } + + return -1; +} + +/* Set hardware register values */ +static void +igc_enable_tuple_filter(struct rte_eth_dev *dev, + const struct igc_adapter *igc, uint8_t index) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + const struct igc_ntuple_filter *filter = &igc->ntuple_filters[index]; + const struct igc_ntuple_info *info = &filter->tuple_info; + uint32_t ttqf, imir, imir_ext = IGC_IMIREXT_SIZE_BP; + + imir = info->dst_port; + imir |= (uint32_t)info->priority << IGC_IMIR_PRIORITY_SHIFT; + + /* 0b means not compare. */ + if (info->dst_port_mask == 0) + imir |= IGC_IMIR_PORT_BP; + + ttqf = IGC_TTQF_DISABLE_MASK | IGC_TTQF_QUEUE_ENABLE; + ttqf |= (uint32_t)filter->queue << IGC_TTQF_QUEUE_SHIFT; + ttqf |= info->proto; + + if (info->proto_mask) + ttqf &= ~IGC_TTQF_MASK_ENABLE; + + /* TCP flags bits setting. */ + if (info->tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { + if (info->tcp_flags & RTE_TCP_URG_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_URG; + if (info->tcp_flags & RTE_TCP_ACK_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_ACK; + if (info->tcp_flags & RTE_TCP_PSH_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_PSH; + if (info->tcp_flags & RTE_TCP_RST_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_RST; + if (info->tcp_flags & RTE_TCP_SYN_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_SYN; + if (info->tcp_flags & RTE_TCP_FIN_FLAG) + imir_ext |= IGC_IMIREXT_CTRL_FIN; + } else { + imir_ext |= IGC_IMIREXT_CTRL_BP; + } + + IGC_WRITE_REG(hw, IGC_IMIR(index), imir); + IGC_WRITE_REG(hw, IGC_TTQF(index), ttqf); + IGC_WRITE_REG(hw, IGC_IMIREXT(index), imir_ext); + IGC_WRITE_FLUSH(hw); +} + +/* Reset hardware register values */ +static void +igc_disable_tuple_filter(struct rte_eth_dev *dev, uint8_t index) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + IGC_WRITE_REG(hw, IGC_TTQF(index), IGC_TTQF_DISABLE_MASK); + IGC_WRITE_REG(hw, IGC_IMIR(index), 0); + IGC_WRITE_REG(hw, IGC_IMIREXT(index), 0); + IGC_WRITE_FLUSH(hw); +} + +int +igc_add_ntuple_filter(struct rte_eth_dev *dev, + const struct igc_ntuple_filter *ntuple) +{ + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + int ret, empty; + + ret = igc_tuple_filter_lookup(igc, ntuple, &empty); + if (ret >= 0) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + + if (empty < 0) { + PMD_DRV_LOG(ERR, "filter no entry."); + return -ENOSPC; + } + + ret = empty; + memcpy(&igc->ntuple_filters[ret], ntuple, sizeof(*ntuple)); + igc_enable_tuple_filter(dev, igc, (uint8_t)ret); + return 0; +} + +int +igc_del_ntuple_filter(struct rte_eth_dev *dev, + const struct igc_ntuple_filter *ntuple) +{ + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + int ret; + + ret = igc_tuple_filter_lookup(igc, ntuple, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "filter not exists."); + return -ENOENT; + } + + memset(&igc->ntuple_filters[ret], 0, sizeof(*ntuple)); + igc_disable_tuple_filter(dev, (uint8_t)ret); + return 0; +} + +/* Clear all the n-tuple filters */ +static void +igc_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + int i; + + for (i = 0; i < IGC_MAX_NTUPLE_FILTERS; i++) + igc_disable_tuple_filter(dev, i); + + memset(&igc->ntuple_filters, 0, sizeof(igc->ntuple_filters)); +} + +int +igc_set_syn_filter(struct rte_eth_dev *dev, + const struct igc_syn_filter *filter) +{ + struct igc_hw *hw; + struct igc_adapter *igc; + uint32_t synqf, rfctl; + + if (filter->queue >= IGC_QUEUE_PAIRS_NUM) { + PMD_DRV_LOG(ERR, "out of range queue %u(max is %u)", + filter->queue, IGC_QUEUE_PAIRS_NUM); + return -EINVAL; + } + + igc = IGC_DEV_PRIVATE(dev); + + if (igc->syn_filter.enable) { + PMD_DRV_LOG(ERR, "SYN filter has been enabled before!"); + return -EEXIST; + } + + hw = IGC_DEV_PRIVATE_HW(dev); + synqf = (uint32_t)filter->queue << IGC_SYN_FILTER_QUEUE_SHIFT; + synqf |= IGC_SYN_FILTER_ENABLE; + + rfctl = IGC_READ_REG(hw, IGC_RFCTL); + if (filter->hig_pri) + rfctl |= IGC_RFCTL_SYNQFP; + else + rfctl &= ~IGC_RFCTL_SYNQFP; + + memcpy(&igc->syn_filter, filter, sizeof(igc->syn_filter)); + igc->syn_filter.enable = 1; + + IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); + IGC_WRITE_REG(hw, IGC_SYNQF(0), synqf); + IGC_WRITE_FLUSH(hw); + return 0; +} + +/* clear the SYN filter */ +void +igc_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); + + IGC_WRITE_REG(hw, IGC_SYNQF(0), 0); + IGC_WRITE_FLUSH(hw); + + memset(&igc->syn_filter, 0, sizeof(igc->syn_filter)); +} + +void +igc_clear_all_filter(struct rte_eth_dev *dev) +{ + igc_clear_all_ethertype_filter(dev); + igc_clear_all_ntuple_filter(dev); + igc_clear_syn_filter(dev); + igc_clear_rss_filter(dev); +} + +int +eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + int ret = 0; + + RTE_SET_USED(dev); + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &igc_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + } + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/igc/igc_filter.h b/src/spdk/dpdk/drivers/net/igc/igc_filter.h new file mode 100644 index 000000000..79951504f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_filter.h @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2020 Intel Corporation + */ + +#ifndef _IGC_FILTER_H_ +#define _IGC_FILTER_H_ + +#include +#include +#include + +#include "igc_ethdev.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int igc_add_ethertype_filter(struct rte_eth_dev *dev, + const struct igc_ethertype_filter *filter); +int igc_del_ethertype_filter(struct rte_eth_dev *dev, + const struct igc_ethertype_filter *filter); +int igc_add_ntuple_filter(struct rte_eth_dev *dev, + const struct igc_ntuple_filter *tuple); +int igc_del_ntuple_filter(struct rte_eth_dev *dev, + const struct igc_ntuple_filter *tuple); +int igc_set_syn_filter(struct rte_eth_dev *dev, + const struct igc_syn_filter *filter); +void igc_clear_syn_filter(struct rte_eth_dev *dev); +void igc_clear_all_filter(struct rte_eth_dev *dev); +int +eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* IGC_FILTER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/igc_flow.c b/src/spdk/dpdk/drivers/net/igc/igc_flow.c new file mode 100644 index 000000000..1bb64d323 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_flow.c @@ -0,0 +1,917 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2020 Intel Corporation + */ + +#include "rte_malloc.h" +#include "igc_logs.h" +#include "igc_txrx.h" +#include "igc_filter.h" +#include "igc_flow.h" + +/******************************************************************************* + * All Supported Rule Type + * + * Notes: + * `para` or `(para)`, the para must been set + * `[para]`, the para is optional + * `([para1][para2]...)`, all paras is optional, but must one of them been set + * `para1 | para2 | ...`, only one of the paras can be set + * + * ether-type filter + * pattern: ETH(type)/END + * action: QUEUE/END + * attribute: + * + * n-tuple filter + * pattern: [ETH/]([IPv4(protocol)|IPv6(protocol)/][UDP(dst_port)| + * TCP([dst_port],[flags])|SCTP(dst_port)/])END + * action: QUEUE/END + * attribute: [priority(0-7)] + * + * SYN filter + * pattern: [ETH/][IPv4|IPv6/]TCP(flags=SYN)/END + * action: QUEUE/END + * attribute: [priority(0,1)] + * + * RSS filter + * pattern: + * action: RSS/END + * attribute: + ******************************************************************************/ + +/* Structure to store all filters */ +struct igc_all_filter { + struct igc_ethertype_filter ethertype; + struct igc_ntuple_filter ntuple; + struct igc_syn_filter syn; + struct igc_rss_filter rss; + uint32_t mask; /* see IGC_FILTER_MASK_* definition */ +}; + +#define IGC_FILTER_MASK_ETHER (1u << IGC_FILTER_TYPE_ETHERTYPE) +#define IGC_FILTER_MASK_NTUPLE (1u << IGC_FILTER_TYPE_NTUPLE) +#define IGC_FILTER_MASK_TCP_SYN (1u << IGC_FILTER_TYPE_SYN) +#define IGC_FILTER_MASK_RSS (1u << IGC_FILTER_TYPE_HASH) +#define IGC_FILTER_MASK_ALL (IGC_FILTER_MASK_ETHER | \ + IGC_FILTER_MASK_NTUPLE | \ + IGC_FILTER_MASK_TCP_SYN | \ + IGC_FILTER_MASK_RSS) + +#define IGC_SET_FILTER_MASK(_filter, _mask_bits) \ + ((_filter)->mask &= (_mask_bits)) + +#define IGC_IS_ALL_BITS_SET(_val) ((_val) == (typeof(_val))~0) +#define IGC_NOT_ALL_BITS_SET(_val) ((_val) != (typeof(_val))~0) + +/* Parse rule attribute */ +static int +igc_parse_attribute(const struct rte_flow_attr *attr, + struct igc_all_filter *filter, struct rte_flow_error *error) +{ + if (!attr) + return 0; + + if (attr->group) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, + "Not support"); + + if (attr->egress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, + "Not support"); + + if (attr->transfer) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Not support"); + + if (!attr->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, + "A rule must apply to ingress traffic"); + + if (attr->priority == 0) + return 0; + + /* only n-tuple and SYN filter have priority level */ + IGC_SET_FILTER_MASK(filter, + IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN); + + if (IGC_IS_ALL_BITS_SET(attr->priority)) { + /* only SYN filter match this value */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN); + filter->syn.hig_pri = 1; + return 0; + } + + if (attr->priority > IGC_NTUPLE_MAX_PRI) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, + "Priority value is invalid."); + + if (attr->priority > 1) { + /* only n-tuple filter match this value */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + /* get priority */ + filter->ntuple.tuple_info.priority = (uint8_t)attr->priority; + return 0; + } + + /* get priority */ + filter->ntuple.tuple_info.priority = (uint8_t)attr->priority; + filter->syn.hig_pri = (uint8_t)attr->priority; + + return 0; +} + +/* function type of parse pattern */ +typedef int (*igc_pattern_parse)(const struct rte_flow_item *, + struct igc_all_filter *, struct rte_flow_error *); + +static int igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item, + __rte_unused struct igc_all_filter *filter, + __rte_unused struct rte_flow_error *error); +static int igc_parse_pattern_ether(const struct rte_flow_item *item, + struct igc_all_filter *filter, struct rte_flow_error *error); +static int igc_parse_pattern_ip(const struct rte_flow_item *item, + struct igc_all_filter *filter, struct rte_flow_error *error); +static int igc_parse_pattern_ipv6(const struct rte_flow_item *item, + struct igc_all_filter *filter, struct rte_flow_error *error); +static int igc_parse_pattern_udp(const struct rte_flow_item *item, + struct igc_all_filter *filter, struct rte_flow_error *error); +static int igc_parse_pattern_tcp(const struct rte_flow_item *item, + struct igc_all_filter *filter, struct rte_flow_error *error); + +static igc_pattern_parse pattern_parse_list[] = { + [RTE_FLOW_ITEM_TYPE_VOID] = igc_parse_pattern_void, + [RTE_FLOW_ITEM_TYPE_ETH] = igc_parse_pattern_ether, + [RTE_FLOW_ITEM_TYPE_IPV4] = igc_parse_pattern_ip, + [RTE_FLOW_ITEM_TYPE_IPV6] = igc_parse_pattern_ipv6, + [RTE_FLOW_ITEM_TYPE_UDP] = igc_parse_pattern_udp, + [RTE_FLOW_ITEM_TYPE_TCP] = igc_parse_pattern_tcp, +}; + +/* Parse rule patterns */ +static int +igc_parse_patterns(const struct rte_flow_item patterns[], + struct igc_all_filter *filter, struct rte_flow_error *error) +{ + const struct rte_flow_item *item = patterns; + + if (item == NULL) { + /* only RSS filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS); + return 0; + } + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + int ret; + + if (item->type >= RTE_DIM(pattern_parse_list)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not been supported"); + + if (item->last) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, + "Range not been supported"); + + /* check pattern format is valid */ + if (!!item->spec ^ !!item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Format error"); + + /* get the pattern type callback */ + igc_pattern_parse parse_func = + pattern_parse_list[item->type]; + if (!parse_func) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not been supported"); + + /* call the pattern type function */ + ret = parse_func(item, filter, error); + if (ret) + return ret; + + /* if no filter match the pattern */ + if (filter->mask == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not been supported"); + } + + return 0; +} + +static int igc_parse_action_queue(struct rte_eth_dev *dev, + const struct rte_flow_action *act, + struct igc_all_filter *filter, struct rte_flow_error *error); +static int igc_parse_action_rss(struct rte_eth_dev *dev, + const struct rte_flow_action *act, + struct igc_all_filter *filter, struct rte_flow_error *error); + +/* Parse flow actions */ +static int +igc_parse_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act = actions; + int ret; + + if (act == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, act, + "Action is needed"); + + for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) { + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = igc_parse_action_queue(dev, act, filter, error); + if (ret) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = igc_parse_action_rss(dev, act, filter, error); + if (ret) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; + default: + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not been supported"); + } + + /* if no filter match the action */ + if (filter->mask == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not been supported"); + } + + return 0; +} + +/* Parse a flow rule */ +static int +igc_parse_flow(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct igc_all_filter *filter) +{ + int ret; + + /* clear all filters */ + memset(filter, 0, sizeof(*filter)); + + /* set default filter mask */ + filter->mask = IGC_FILTER_MASK_ALL; + + ret = igc_parse_attribute(attr, filter, error); + if (ret) + return ret; + + ret = igc_parse_patterns(patterns, filter, error); + if (ret) + return ret; + + ret = igc_parse_actions(dev, actions, filter, error); + if (ret) + return ret; + + /* if no or more than one filter matched this flow */ + if (filter->mask == 0 || (filter->mask & (filter->mask - 1))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Flow can't be recognized"); + return 0; +} + +/* Parse pattern type of void */ +static int +igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item, + __rte_unused struct igc_all_filter *filter, + __rte_unused struct rte_flow_error *error) +{ + return 0; +} + +/* Parse pattern type of ethernet header */ +static int +igc_parse_pattern_ether(const struct rte_flow_item *item, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + struct igc_ethertype_filter *ether; + + if (mask == NULL) { + /* only n-tuple and SYN filter match the pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE | + IGC_FILTER_MASK_TCP_SYN); + return 0; + } + + /* only ether-type filter match the pattern*/ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER); + + /* destination and source MAC address are not supported */ + if (!rte_is_zero_ether_addr(&mask->src) || + !rte_is_zero_ether_addr(&mask->dst)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "Only support ether-type"); + + /* ether-type mask bits must be all 1 */ + if (IGC_NOT_ALL_BITS_SET(mask->type)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "Ethernet type mask bits must be all 1"); + + ether = &filter->ethertype; + + /* get ether-type */ + ether->ether_type = rte_be_to_cpu_16(spec->type); + + /* ether-type should not be IPv4 and IPv6 */ + if (ether->ether_type == RTE_ETHER_TYPE_IPV4 || + ether->ether_type == RTE_ETHER_TYPE_IPV6 || + ether->ether_type == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "IPv4/IPv6/0 not supported by ethertype filter"); + return 0; +} + +/* Parse pattern type of IP */ +static int +igc_parse_pattern_ip(const struct rte_flow_item *item, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + + if (mask == NULL) { + /* only n-tuple and SYN filter match this pattern */ + IGC_SET_FILTER_MASK(filter, + IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN); + return 0; + } + + /* only n-tuple filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + /* only protocol is used */ + if (mask->hdr.version_ihl || + mask->hdr.type_of_service || + mask->hdr.total_length || + mask->hdr.packet_id || + mask->hdr.fragment_offset || + mask->hdr.time_to_live || + mask->hdr.hdr_checksum || + mask->hdr.dst_addr || + mask->hdr.src_addr) + return rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "IPv4 only support protocol"); + + if (mask->hdr.next_proto_id == 0) + return 0; + + if (IGC_NOT_ALL_BITS_SET(mask->hdr.next_proto_id)) + return rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "IPv4 protocol mask bits must be all 0 or 1"); + + /* get protocol type */ + filter->ntuple.tuple_info.proto_mask = 1; + filter->ntuple.tuple_info.proto = spec->hdr.next_proto_id; + return 0; +} + +/* + * Check ipv6 address is 0 + * Return 1 if true, 0 for false. + */ +static inline bool +igc_is_zero_ipv6_addr(const void *ipv6_addr) +{ + const uint64_t *ddw = ipv6_addr; + return ddw[0] == 0 && ddw[1] == 0; +} + +/* Parse pattern type of IPv6 */ +static int +igc_parse_pattern_ipv6(const struct rte_flow_item *item, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + + if (mask == NULL) { + /* only n-tuple and syn filter match this pattern */ + IGC_SET_FILTER_MASK(filter, + IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN); + return 0; + } + + /* only n-tuple filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + /* only protocol is used */ + if (mask->hdr.vtc_flow || + mask->hdr.payload_len || + mask->hdr.hop_limits || + !igc_is_zero_ipv6_addr(mask->hdr.src_addr) || + !igc_is_zero_ipv6_addr(mask->hdr.dst_addr)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 only support protocol"); + + if (mask->hdr.proto == 0) + return 0; + + if (IGC_NOT_ALL_BITS_SET(mask->hdr.proto)) + return rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "IPv6 protocol mask bits must be all 0 or 1"); + + /* get protocol type */ + filter->ntuple.tuple_info.proto_mask = 1; + filter->ntuple.tuple_info.proto = spec->hdr.proto; + + return 0; +} + +/* Parse pattern type of UDP */ +static int +igc_parse_pattern_udp(const struct rte_flow_item *item, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + + /* only n-tuple filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + if (mask == NULL) + return 0; + + /* only destination port is used */ + if (mask->hdr.dgram_len || mask->hdr.dgram_cksum || mask->hdr.src_port) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "UDP only support destination port"); + + if (mask->hdr.dst_port == 0) + return 0; + + if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "UDP port mask bits must be all 0 or 1"); + + /* get destination port info. */ + filter->ntuple.tuple_info.dst_port_mask = 1; + filter->ntuple.tuple_info.dst_port = spec->hdr.dst_port; + + return 0; +} + +/* Parse pattern type of TCP */ +static int +igc_parse_pattern_tcp(const struct rte_flow_item *item, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + struct igc_ntuple_info *tuple_info = &filter->ntuple.tuple_info; + + if (mask == NULL) { + /* only n-tuple filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + return 0; + } + + /* only n-tuple and SYN filter match this pattern */ + IGC_SET_FILTER_MASK(filter, + IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN); + + /* only destination port and TCP flags are used */ + if (mask->hdr.sent_seq || + mask->hdr.recv_ack || + mask->hdr.data_off || + mask->hdr.rx_win || + mask->hdr.cksum || + mask->hdr.tcp_urp || + mask->hdr.src_port) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "TCP only support destination port and flags"); + + /* if destination port is used */ + if (mask->hdr.dst_port) { + /* only n-tuple match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "TCP port mask bits must be all 1"); + + /* get destination port info. */ + tuple_info->dst_port = spec->hdr.dst_port; + tuple_info->dst_port_mask = 1; + } + + /* if TCP flags are used */ + if (mask->hdr.tcp_flags) { + if (IGC_IS_ALL_BITS_SET(mask->hdr.tcp_flags)) { + /* only n-tuple match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + + /* get TCP flags */ + tuple_info->tcp_flags = spec->hdr.tcp_flags; + } else if (mask->hdr.tcp_flags == RTE_TCP_SYN_FLAG) { + /* only TCP SYN filter match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN); + } else { + /* no filter match this pattern */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "TCP flags can't match"); + } + } else { + /* only n-tuple match this pattern */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE); + } + + return 0; +} + +static int +igc_parse_action_queue(struct rte_eth_dev *dev, + const struct rte_flow_action *act, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + uint16_t queue_idx; + + if (act->conf == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "NULL pointer"); + + /* only ether-type, n-tuple, SYN filter match the action */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER | + IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN); + + /* get queue index */ + queue_idx = ((const struct rte_flow_action_queue *)act->conf)->index; + + /* check the queue index is valid */ + if (queue_idx >= dev->data->nb_rx_queues) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Queue id is invalid"); + + /* get queue info. */ + filter->ethertype.queue = queue_idx; + filter->ntuple.queue = queue_idx; + filter->syn.queue = queue_idx; + return 0; +} + +/* Parse action of RSS */ +static int +igc_parse_action_rss(struct rte_eth_dev *dev, + const struct rte_flow_action *act, + struct igc_all_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_action_rss *rss = act->conf; + uint32_t i; + + if (act->conf == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "NULL pointer"); + + /* only RSS match the action */ + IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS); + + /* RSS redirect table can't be zero and can't exceed 128 */ + if (!rss || !rss->queue_num || rss->queue_num > IGC_RSS_RDT_SIZD) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "No valid queues"); + + /* queue index can't exceed max queue index */ + for (i = 0; i < rss->queue_num; i++) { + if (rss->queue[i] >= dev->data->nb_rx_queues) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Queue id is invalid"); + } + + /* only default RSS hash function is supported */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Only default RSS hash functions is supported"); + + if (rss->level) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Only 0 RSS encapsulation level is supported"); + + /* check key length is valid */ + if (rss->key_len && rss->key_len != sizeof(filter->rss.key)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "RSS hash key must be exactly 40 bytes"); + + /* get RSS info. */ + igc_rss_conf_set(&filter->rss, rss); + return 0; +} + +/** + * Allocate a rte_flow from the heap + * Return the pointer of the flow, or NULL for failed + **/ +static inline struct rte_flow * +igc_alloc_flow(const void *filter, enum igc_filter_type type, uint inbytes) +{ + /* allocate memory, 8 bytes boundary aligned */ + struct rte_flow *flow = rte_malloc("igc flow filter", + sizeof(struct rte_flow) + inbytes, 8); + if (flow == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return NULL; + } + + flow->filter_type = type; + + /* copy filter data */ + memcpy(flow->filter, filter, inbytes); + return flow; +} + +/* Append a rte_flow to the list */ +static inline void +igc_append_flow(struct igc_flow_list *list, struct rte_flow *flow) +{ + TAILQ_INSERT_TAIL(list, flow, node); +} + +/** + * Remove the flow and free the flow buffer + * The caller should make sure the flow is really exist in the list + **/ +static inline void +igc_remove_flow(struct igc_flow_list *list, struct rte_flow *flow) +{ + TAILQ_REMOVE(list, flow, node); + rte_free(flow); +} + +/* Check whether the flow is really in the list or not */ +static inline bool +igc_is_flow_in_list(struct igc_flow_list *list, struct rte_flow *flow) +{ + struct rte_flow *it; + + TAILQ_FOREACH(it, list, node) { + if (it == flow) + return true; + } + + return false; +} + +/** + * Create a flow rule. + * Theoretically one rule can match more than one filters. + * We will let it use the filter which it hit first. + * So, the sequence matters. + **/ +static struct rte_flow * +igc_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *flow = NULL; + struct igc_all_filter filter; + int ret; + + ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter); + if (ret) + return NULL; + ret = -ENOMEM; + + switch (filter.mask) { + case IGC_FILTER_MASK_ETHER: + flow = igc_alloc_flow(&filter.ethertype, + IGC_FILTER_TYPE_ETHERTYPE, + sizeof(filter.ethertype)); + if (flow) + ret = igc_add_ethertype_filter(dev, &filter.ethertype); + break; + case IGC_FILTER_MASK_NTUPLE: + /* Check n-tuple filter is valid */ + if (filter.ntuple.tuple_info.dst_port_mask == 0 && + filter.ntuple.tuple_info.proto_mask == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_NONE, NULL, + "Flow can't be recognized"); + return NULL; + } + + flow = igc_alloc_flow(&filter.ntuple, IGC_FILTER_TYPE_NTUPLE, + sizeof(filter.ntuple)); + if (flow) + ret = igc_add_ntuple_filter(dev, &filter.ntuple); + break; + case IGC_FILTER_MASK_TCP_SYN: + flow = igc_alloc_flow(&filter.syn, IGC_FILTER_TYPE_SYN, + sizeof(filter.syn)); + if (flow) + ret = igc_set_syn_filter(dev, &filter.syn); + break; + case IGC_FILTER_MASK_RSS: + flow = igc_alloc_flow(&filter.rss, IGC_FILTER_TYPE_HASH, + sizeof(filter.rss)); + if (flow) { + struct igc_rss_filter *rss = + (struct igc_rss_filter *)flow->filter; + rss->conf.key = rss->key; + rss->conf.queue = rss->queue; + ret = igc_add_rss_filter(dev, &filter.rss); + } + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_NONE, NULL, + "Flow can't be recognized"); + return NULL; + } + + if (ret) { + /* check and free the memory */ + if (flow) + rte_free(flow); + + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + return NULL; + } + + /* append the flow to the tail of the list */ + igc_append_flow(IGC_DEV_PRIVATE_FLOW_LIST(dev), flow); + return flow; +} + +/** + * Check if the flow rule is supported by the device. + * It only checks the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + **/ +static int +igc_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct igc_all_filter filter; + int ret; + + ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter); + if (ret) + return ret; + + switch (filter.mask) { + case IGC_FILTER_MASK_NTUPLE: + /* Check n-tuple filter is valid */ + if (filter.ntuple.tuple_info.dst_port_mask == 0 && + filter.ntuple.tuple_info.proto_mask == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_NONE, NULL, + "Flow can't be recognized"); + break; + } + + return 0; +} + +/** + * Disable a valid flow, the flow must be not NULL and + * chained in the device flow list. + **/ +static int +igc_disable_flow(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + int ret = 0; + + switch (flow->filter_type) { + case IGC_FILTER_TYPE_ETHERTYPE: + ret = igc_del_ethertype_filter(dev, + (struct igc_ethertype_filter *)&flow->filter); + break; + case IGC_FILTER_TYPE_NTUPLE: + ret = igc_del_ntuple_filter(dev, + (struct igc_ntuple_filter *)&flow->filter); + break; + case IGC_FILTER_TYPE_SYN: + igc_clear_syn_filter(dev); + break; + case IGC_FILTER_TYPE_HASH: + ret = igc_del_rss_filter(dev); + break; + default: + PMD_DRV_LOG(ERR, "Filter type (%d) not supported", + flow->filter_type); + ret = -EINVAL; + } + + return ret; +} + +/* Destroy a flow rule */ +static int +igc_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev); + int ret; + + if (!flow) { + PMD_DRV_LOG(ERR, "NULL flow!"); + return -EINVAL; + } + + /* check the flow is create by IGC PMD */ + if (!igc_is_flow_in_list(list, flow)) { + PMD_DRV_LOG(ERR, "Flow(%p) not been found!", flow); + return -ENOENT; + } + + ret = igc_disable_flow(dev, flow); + if (ret) + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + + igc_remove_flow(list, flow); + return ret; +} + +/* Initiate device flow list header */ +void +igc_flow_init(struct rte_eth_dev *dev) +{ + TAILQ_INIT(IGC_DEV_PRIVATE_FLOW_LIST(dev)); +} + +/* Destroy all flow in the list and free memory */ +int +igc_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev); + struct rte_flow *flow; + + while ((flow = TAILQ_FIRST(list)) != NULL) { + igc_disable_flow(dev, flow); + igc_remove_flow(list, flow); + } + + return 0; +} + +const struct rte_flow_ops igc_flow_ops = { + .validate = igc_flow_validate, + .create = igc_flow_create, + .destroy = igc_flow_destroy, + .flush = igc_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/igc/igc_flow.h b/src/spdk/dpdk/drivers/net/igc/igc_flow.h new file mode 100644 index 000000000..310b4bd5a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_flow.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2020 Intel Corporation + */ + +#ifndef _IGC_FLOW_H_ +#define _IGC_FLOW_H_ + +#include +#include "igc_ethdev.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern const struct rte_flow_ops igc_flow_ops; + +void igc_flow_init(struct rte_eth_dev *dev); +int igc_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error); + +#ifdef __cplusplus +} +#endif + +#endif /* _IGC_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/igc_logs.c b/src/spdk/dpdk/drivers/net/igc/igc_logs.c new file mode 100644 index 000000000..eff7640b1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_logs.c @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + +#include + +#include "igc_logs.h" + +/* declared as extern in igc_logs.h */ +int igc_logtype_init; +int igc_logtype_driver; + +RTE_INIT(igc_init_log) +{ + igc_logtype_init = rte_log_register("pmd.net.igc.init"); + if (igc_logtype_init >= 0) + rte_log_set_level(igc_logtype_init, RTE_LOG_INFO); + + igc_logtype_driver = rte_log_register("pmd.net.igc.driver"); + if (igc_logtype_driver >= 0) + rte_log_set_level(igc_logtype_driver, RTE_LOG_INFO); +} diff --git a/src/spdk/dpdk/drivers/net/igc/igc_logs.h b/src/spdk/dpdk/drivers/net/igc/igc_logs.h new file mode 100644 index 000000000..6457c4d18 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_logs.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2020 Intel Corporation + */ + +#ifndef _IGC_LOGS_H_ +#define _IGC_LOGS_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern int igc_logtype_init; +extern int igc_logtype_driver; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, igc_logtype_init, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_IGC_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_IGC_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, igc_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#ifdef __cplusplus +} +#endif + +#endif /* _IGC_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/igc_txrx.c b/src/spdk/dpdk/drivers/net/igc/igc_txrx.c new file mode 100644 index 000000000..4654ec41f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_txrx.c @@ -0,0 +1,2279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include + +#include "igc_logs.h" +#include "igc_txrx.h" + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_igc_prefetch(p) rte_prefetch0(p) +#else +#define rte_igc_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +/* Multicast / Unicast table offset mask. */ +#define IGC_RCTL_MO_MSK (3u << IGC_RCTL_MO_SHIFT) + +/* Loopback mode. */ +#define IGC_RCTL_LBM_SHIFT 6 +#define IGC_RCTL_LBM_MSK (3u << IGC_RCTL_LBM_SHIFT) + +/* Hash select for MTA */ +#define IGC_RCTL_HSEL_SHIFT 8 +#define IGC_RCTL_HSEL_MSK (3u << IGC_RCTL_HSEL_SHIFT) +#define IGC_RCTL_PSP (1u << 21) + +/* Receive buffer size for header buffer */ +#define IGC_SRRCTL_BSIZEHEADER_SHIFT 8 + +/* RX descriptor status and error flags */ +#define IGC_RXD_STAT_L4CS (1u << 5) +#define IGC_RXD_STAT_VEXT (1u << 9) +#define IGC_RXD_STAT_LLINT (1u << 11) +#define IGC_RXD_STAT_SCRC (1u << 12) +#define IGC_RXD_STAT_SMDT_MASK (3u << 13) +#define IGC_RXD_STAT_MC (1u << 19) +#define IGC_RXD_EXT_ERR_L4E (1u << 29) +#define IGC_RXD_EXT_ERR_IPE (1u << 30) +#define IGC_RXD_EXT_ERR_RXE (1u << 31) +#define IGC_RXD_RSS_TYPE_MASK 0xfu +#define IGC_RXD_PCTYPE_MASK (0x7fu << 4) +#define IGC_RXD_ETQF_SHIFT 12 +#define IGC_RXD_ETQF_MSK (0xfu << IGC_RXD_ETQF_SHIFT) +#define IGC_RXD_VPKT (1u << 16) + +/* TXD control bits */ +#define IGC_TXDCTL_PTHRESH_SHIFT 0 +#define IGC_TXDCTL_HTHRESH_SHIFT 8 +#define IGC_TXDCTL_WTHRESH_SHIFT 16 +#define IGC_TXDCTL_PTHRESH_MSK (0x1fu << IGC_TXDCTL_PTHRESH_SHIFT) +#define IGC_TXDCTL_HTHRESH_MSK (0x1fu << IGC_TXDCTL_HTHRESH_SHIFT) +#define IGC_TXDCTL_WTHRESH_MSK (0x1fu << IGC_TXDCTL_WTHRESH_SHIFT) + +/* RXD control bits */ +#define IGC_RXDCTL_PTHRESH_SHIFT 0 +#define IGC_RXDCTL_HTHRESH_SHIFT 8 +#define IGC_RXDCTL_WTHRESH_SHIFT 16 +#define IGC_RXDCTL_PTHRESH_MSK (0x1fu << IGC_RXDCTL_PTHRESH_SHIFT) +#define IGC_RXDCTL_HTHRESH_MSK (0x1fu << IGC_RXDCTL_HTHRESH_SHIFT) +#define IGC_RXDCTL_WTHRESH_MSK (0x1fu << IGC_RXDCTL_WTHRESH_SHIFT) + +#define IGC_TSO_MAX_HDRLEN 512 +#define IGC_TSO_MAX_MSS 9216 + +/* Bit Mask to indicate what bits required for building TX context */ +#define IGC_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_UDP_SEG) + +#define IGC_TX_OFFLOAD_SEG (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG) + +#define IGC_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */ +#define IGC_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */ + +/* L4 Packet TYPE of Reserved */ +#define IGC_ADVTXD_TUCMD_L4T_RSV 0x00001800 + +#define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct igc_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct igc_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union igc_adv_rx_desc *rx_ring; + /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct igc_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t reg_idx; /**< RX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ +}; + +/** Offload features */ +union igc_tx_offload { + uint64_t data; + struct { + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t vlan_tci:16; + /**< VLAN Tag Control Identifier(CPU order). */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size. */ + /* uint64_t unused:8; */ + }; +}; + +/* + * Compare mask for igc_tx_offload.data, + * should be in sync with igc_tx_offload layout. + */ +#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */ +#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */ +#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */ +#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */ +/** Mac + IP + TCP + Mss mask. */ +#define TX_TSO_CMP_MASK \ + (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK) + +/** + * Structure to check if new context need be built + */ +struct igc_advctx_info { + uint64_t flags; /**< ol_flags related to context build. */ + /** tx offload: vlan, tso, l2-l3-l4 lengths. */ + union igc_tx_offload tx_offload; + /** compare mask for tx offload. */ + union igc_tx_offload tx_offload_mask; +}; + +/** + * Hardware context number + */ +enum { + IGC_CTX_0 = 0, /**< CTX0 */ + IGC_CTX_1 = 1, /**< CTX1 */ + IGC_CTX_NUM = 2, /**< CTX_NUM */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct igc_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each TX queue. + */ +struct igc_tx_queue { + volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint32_t txd_type; /**< Device-specific TXD type */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_head; + /**< Index of first used TX descriptor. */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; /**< TX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t ctx_curr; + + /**< Start context position for transmit queue. */ + struct igc_advctx_info ctx_cache[IGC_CTX_NUM]; + /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ +}; + +static inline uint64_t +rx_desc_statuserr_to_pkt_flags(uint32_t statuserr) +{ + static uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD, + PKT_RX_L4_CKSUM_BAD}; + + static uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD, + PKT_RX_IP_CKSUM_BAD}; + uint64_t pkt_flags = 0; + uint32_t tmp; + + if (statuserr & IGC_RXD_STAT_VP) + pkt_flags |= PKT_RX_VLAN_STRIPPED; + + tmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS)); + tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E); + pkt_flags |= l4_chksum_flags[tmp]; + + tmp = !!(statuserr & IGC_RXD_STAT_IPCS); + tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_IPE); + pkt_flags |= l3_chksum_flags[tmp]; + + return pkt_flags; +} + +#define IGC_PACKET_TYPE_IPV4 0X01 +#define IGC_PACKET_TYPE_IPV4_TCP 0X11 +#define IGC_PACKET_TYPE_IPV4_UDP 0X21 +#define IGC_PACKET_TYPE_IPV4_SCTP 0X41 +#define IGC_PACKET_TYPE_IPV4_EXT 0X03 +#define IGC_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IGC_PACKET_TYPE_IPV6 0X04 +#define IGC_PACKET_TYPE_IPV6_TCP 0X14 +#define IGC_PACKET_TYPE_IPV6_UDP 0X24 +#define IGC_PACKET_TYPE_IPV6_EXT 0X0C +#define IGC_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IGC_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IGC_PACKET_TYPE_IPV4_IPV6 0X05 +#define IGC_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IGC_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IGC_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IGC_PACKET_TYPE_MAX 0X80 +#define IGC_PACKET_TYPE_MASK 0X7F +#define IGC_PACKET_TYPE_SHIFT 0X04 + +static inline uint32_t +rx_desc_pkt_info_to_pkt_type(uint32_t pkt_info) +{ + static const uint32_t + ptype_table[IGC_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IGC_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IGC_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IGC_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IGC_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IGC_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IGC_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IGC_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IGC_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IGC_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IGC_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IGC_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IGC_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IGC_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IGC_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IGC_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IGC_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + }; + if (unlikely(pkt_info & IGC_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IGC_PACKET_TYPE_SHIFT) & IGC_PACKET_TYPE_MASK; + + return ptype_table[pkt_info]; +} + +static inline void +rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm, + union igc_adv_rx_desc *rxd, uint32_t staterr) +{ + uint64_t pkt_flags; + uint32_t hlen_type_rss; + uint16_t pkt_info; + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + + rxm->port = rxq->port_id; + hlen_type_rss = rte_le_to_cpu_32(rxd->wb.lower.lo_dword.data); + rxm->hash.rss = rte_le_to_cpu_32(rxd->wb.lower.hi_dword.rss); + rxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan); + + pkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ? + PKT_RX_RSS_HASH : 0; + + if (hlen_type_rss & IGC_RXD_VPKT) + pkt_flags |= PKT_RX_VLAN; + + pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr); + + rxm->ol_flags = pkt_flags; + pkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info); + rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info); +} + +static uint16_t +igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct igc_rx_queue * const rxq = rx_queue; + volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring; + struct igc_rx_entry * const sw_ring = rxq->sw_ring; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = 0; + + while (nb_rx < nb_pkts) { + volatile union igc_adv_rx_desc *rxdp; + struct igc_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union igc_adv_rx_desc rxd; + uint32_t staterr; + uint16_t data_len; + + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error); + if (!(staterr & IGC_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the IGC_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u", + rxq->port_id, rxq->queue_id, rx_id, staterr, + rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + unsigned int id; + PMD_RX_LOG(DEBUG, + "RX mbuf alloc failed, port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + id = rxq->port_id; + rte_eth_devices[id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id >= rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igc_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igc_prefetch(&rx_ring[rx_id]); + rte_igc_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxm->next = NULL; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + data_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len; + rxm->data_len = data_len; + rxm->pkt_len = data_len; + rxm->nb_segs = 1; + + rx_desc_get_pkt_info(rxq, rxm, &rxd, staterr); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situation from the + * hardware point of view... + */ + nb_hold = nb_hold + rxq->nb_rx_hold; + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); + rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1); + IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +static uint16_t +igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct igc_rx_queue * const rxq = rx_queue; + volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring; + struct igc_rx_entry * const sw_ring = rxq->sw_ring; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = 0; + + while (nb_rx < nb_pkts) { + volatile union igc_adv_rx_desc *rxdp; + struct igc_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union igc_adv_rx_desc rxd; + uint32_t staterr; + uint16_t data_len; + +next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error); + if (!(staterr & IGC_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u", + rxq->port_id, rxq->queue_id, rx_id, staterr, + rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + unsigned int id; + PMD_RX_LOG(DEBUG, + "RX mbuf alloc failed, port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + id = rxq->port_id; + rte_eth_devices[id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id >= rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igc_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igc_prefetch(&rx_ring[rx_id]); + rte_igc_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxm->next = NULL; + + /* + * Set data length & data buffer address of mbuf. + */ + rxm->data_off = RTE_PKTMBUF_HEADROOM; + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->data_len = data_len; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(staterr & IGC_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= RTE_ETHER_CRC_LEN; + if (data_len <= RTE_ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = last_seg->data_len - + (RTE_ETHER_CRC_LEN - data_len); + last_seg->next = NULL; + } else { + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); + } + } + + rx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* Setup receipt context for a new packet. */ + first_seg = NULL; + } + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situation from the + * hardware point of view... + */ + nb_hold = nb_hold + rxq->nb_rx_hold; + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); + rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1); + IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +static void +igc_rx_queue_release_mbufs(struct igc_rx_queue *rxq) +{ + unsigned int i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igc_rx_queue_release(struct igc_rx_queue *rxq) +{ + igc_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +void eth_igc_rx_queue_release(void *rxq) +{ + if (rxq) + igc_rx_queue_release(rxq); +} + +uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id) +{ + /** + * Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ +#define IGC_RXQ_SCAN_INTERVAL 4 + + volatile union igc_adv_rx_desc *rxdp; + struct igc_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; + + while (desc < rxq->nb_rx_desc - rxq->rx_tail) { + if (unlikely(!(rxdp->wb.upper.status_error & + IGC_RXD_STAT_DD))) + return desc; + desc += IGC_RXQ_SCAN_INTERVAL; + rxdp += IGC_RXQ_SCAN_INTERVAL; + } + rxdp = &rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc]; + + while (desc < rxq->nb_rx_desc && + (rxdp->wb.upper.status_error & IGC_RXD_STAT_DD)) { + desc += IGC_RXQ_SCAN_INTERVAL; + rxdp += IGC_RXQ_SCAN_INTERVAL; + } + + return desc; +} + +int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union igc_adv_rx_desc *rxdp; + struct igc_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(!rxq || offset >= rxq->nb_rx_desc)) + return 0; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IGC_RXD_STAT_DD)); +} + +int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct igc_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(!rxq || offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.upper.status_error; + if (*status & rte_cpu_to_le_32(IGC_RXD_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +static int +igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq) +{ + struct igc_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries. */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union igc_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed, queue_id=%hu", + rxq->queue_id); + return -ENOMEM; + } + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/* + * RSS random key supplied in section 7.1.2.9.3 of the Intel I225 datasheet. + * Used as the default key. + */ +static uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +void +igc_rss_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t mrqc; + + mrqc = IGC_READ_REG(hw, IGC_MRQC); + mrqc &= ~IGC_MRQC_ENABLE_MASK; + IGC_WRITE_REG(hw, IGC_MRQC, mrqc); +} + +void +igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf) +{ + uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t rss_hf; + + if (hash_key != NULL) { + uint8_t i; + + /* Fill in RSS hash key */ + for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) + IGC_WRITE_REG_LE_VALUE(hw, IGC_RSSRK(i), hash_key[i]); + } + + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX; + IGC_WRITE_REG(hw, IGC_MRQC, mrqc); +} + +static void +igc_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint16_t i; + + /* Fill in redirection table. */ + for (i = 0; i < IGC_RSS_RDT_SIZD; i++) { + union igc_rss_reta_reg reta; + uint16_t q_idx, reta_idx; + + q_idx = (uint8_t)((dev->data->nb_rx_queues > 1) ? + i % dev->data->nb_rx_queues : 0); + reta_idx = i % sizeof(reta); + reta.bytes[reta_idx] = q_idx; + if (reta_idx == sizeof(reta) - 1) + IGC_WRITE_REG_LE_VALUE(hw, + IGC_RETA(i / sizeof(reta)), reta.dword); + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = default_rss_key; + igc_hw_rss_hash_set(hw, &rss_conf); +} + +int +igc_del_rss_filter(struct rte_eth_dev *dev) +{ + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + + if (rss_filter->enable) { + /* recover default RSS configuration */ + igc_rss_configure(dev); + + /* disable RSS logic and clear filter data */ + igc_rss_disable(dev); + memset(rss_filter, 0, sizeof(*rss_filter)); + return 0; + } + PMD_DRV_LOG(ERR, "filter not exist!"); + return -ENOENT; +} + +/* Initiate the filter structure by the structure of rte_flow_action_rss */ +void +igc_rss_conf_set(struct igc_rss_filter *out, + const struct rte_flow_action_rss *rss) +{ + out->conf.func = rss->func; + out->conf.level = rss->level; + out->conf.types = rss->types; + + if (rss->key_len == sizeof(out->key)) { + memcpy(out->key, rss->key, rss->key_len); + out->conf.key = out->key; + out->conf.key_len = rss->key_len; + } else { + out->conf.key = NULL; + out->conf.key_len = 0; + } + + if (rss->queue_num <= IGC_RSS_RDT_SIZD) { + memcpy(out->queue, rss->queue, + sizeof(*out->queue) * rss->queue_num); + out->conf.queue = out->queue; + out->conf.queue_num = rss->queue_num; + } else { + out->conf.queue = NULL; + out->conf.queue_num = 0; + } +} + +int +igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss) +{ + struct rte_eth_rss_conf rss_conf = { + .rss_key = rss->conf.key_len ? + (void *)(uintptr_t)rss->conf.key : NULL, + .rss_key_len = rss->conf.key_len, + .rss_hf = rss->conf.types, + }; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + uint32_t i, j; + + /* check RSS type is valid */ + if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) { + PMD_DRV_LOG(ERR, + "RSS type(0x%" PRIx64 ") error!, only 0x%" PRIx64 + " been supported", rss_conf.rss_hf, + (uint64_t)IGC_RSS_OFFLOAD_ALL); + return -EINVAL; + } + + /* check queue count is not zero */ + if (!rss->conf.queue_num) { + PMD_DRV_LOG(ERR, "Queue number should not be 0!"); + return -EINVAL; + } + + /* check queue id is valid */ + for (i = 0; i < rss->conf.queue_num; i++) + if (rss->conf.queue[i] >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Queue id %u is invalid!", + rss->conf.queue[i]); + return -EINVAL; + } + + /* only support one filter */ + if (rss_filter->enable) { + PMD_DRV_LOG(ERR, "Only support one RSS filter!"); + return -ENOTSUP; + } + rss_filter->enable = 1; + + igc_rss_conf_set(rss_filter, &rss->conf); + + /* Fill in redirection table. */ + for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) { + union igc_rss_reta_reg reta; + uint16_t q_idx, reta_idx; + + if (j == rss->conf.queue_num) + j = 0; + q_idx = rss->conf.queue[j]; + reta_idx = i % sizeof(reta); + reta.bytes[reta_idx] = q_idx; + if (reta_idx == sizeof(reta) - 1) + IGC_WRITE_REG_LE_VALUE(hw, + IGC_RETA(i / sizeof(reta)), reta.dword); + } + + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = default_rss_key; + igc_hw_rss_hash_set(hw, &rss_conf); + return 0; +} + +void +igc_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev); + + if (!rss_filter->enable) + return; + + /* recover default RSS configuration */ + igc_rss_configure(dev); + + /* disable RSS logic and clear filter data */ + igc_rss_disable(dev); + memset(rss_filter, 0, sizeof(*rss_filter)); +} + +static int +igc_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + if (RTE_ETH_DEV_SRIOV(dev).active) { + PMD_DRV_LOG(ERR, "SRIOV unsupported!"); + return -EINVAL; + } + + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + igc_rss_configure(dev); + break; + case ETH_MQ_RX_NONE: + /* + * configure RSS register for following, + * then disable the RSS logic + */ + igc_rss_configure(dev); + igc_rss_disable(dev); + break; + default: + PMD_DRV_LOG(ERR, "rx mode(%d) not supported!", + dev->data->dev_conf.rxmode.mq_mode); + return -EINVAL; + } + return 0; +} + +int +igc_rx_init(struct rte_eth_dev *dev) +{ + struct igc_rx_queue *rxq; + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint64_t offloads = dev->data->dev_conf.rxmode.offloads; + uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + uint32_t rctl; + uint32_t rxcsum; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + dev->rx_pkt_burst = igc_recv_pkts; + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = IGC_READ_REG(hw, IGC_RCTL); + IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); + + /* Configure support of jumbo frames, if any. */ + if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + rctl |= IGC_RCTL_LPE; + + /* + * Set maximum packet length by default, and might be updated + * together with enabling/disabling dual VLAN. + */ + IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len); + } else { + rctl &= ~IGC_RCTL_LPE; + } + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + uint32_t srrctl; + + rxq = dev->data->rx_queues[i]; + rxq->flags = 0; + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igc_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ? + RTE_ETHER_CRC_LEN : 0; + + bus_addr = rxq->rx_ring_phys_addr; + IGC_WRITE_REG(hw, IGC_RDLEN(rxq->reg_idx), + rxq->nb_rx_desc * + sizeof(union igc_adv_rx_desc)); + IGC_WRITE_REG(hw, IGC_RDBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IGC_WRITE_REG(hw, IGC_RDBAL(rxq->reg_idx), + (uint32_t)bus_addr); + + /* set descriptor configuration */ + srrctl = IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + srrctl |= (uint32_t)(RTE_PKTMBUF_HEADROOM / 64) << + IGC_SRRCTL_BSIZEHEADER_SHIFT; + /* + * Configure RX buffer size. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 16 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + + srrctl |= ((buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT) & + IGC_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t)((srrctl & + IGC_SRRCTL_BSIZEPKT_MASK) << + IGC_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size) + dev->data->scattered_rx = 1; + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if (rctl_bsize == 0 || rctl_bsize > buf_size) + rctl_bsize = buf_size; + dev->data->scattered_rx = 1; + } + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= IGC_SRRCTL_DROP_EN; + + IGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl); + + /* Enable this RX queue. */ + rxdctl = IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= ((uint32_t)rxq->pthresh << IGC_RXDCTL_PTHRESH_SHIFT) & + IGC_RXDCTL_PTHRESH_MSK; + rxdctl |= ((uint32_t)rxq->hthresh << IGC_RXDCTL_HTHRESH_SHIFT) & + IGC_RXDCTL_HTHRESH_MSK; + rxdctl |= ((uint32_t)rxq->wthresh << IGC_RXDCTL_WTHRESH_SHIFT) & + IGC_RXDCTL_WTHRESH_MSK; + IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl); + } + + if (offloads & DEV_RX_OFFLOAD_SCATTER) + dev->data->scattered_rx = 1; + + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = igc_recv_scattered_pkts; + } + /* + * Setup BSIZE field of RCTL register, if needed. + * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL + * register, since the code above configures the SRRCTL register of + * the RX queue in such a case. + * All configurable sizes are: + * 16384: rctl |= (IGC_RCTL_SZ_16384 | IGC_RCTL_BSEX); + * 8192: rctl |= (IGC_RCTL_SZ_8192 | IGC_RCTL_BSEX); + * 4096: rctl |= (IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX); + * 2048: rctl |= IGC_RCTL_SZ_2048; + * 1024: rctl |= IGC_RCTL_SZ_1024; + * 512: rctl |= IGC_RCTL_SZ_512; + * 256: rctl |= IGC_RCTL_SZ_256; + */ + if (rctl_bsize > 0) { + if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */ + rctl |= IGC_RCTL_SZ_512; + else /* 256 <= buf_size < 512 - use 256 */ + rctl |= IGC_RCTL_SZ_256; + } + + /* + * Configure RSS if device configured with multiple RX queues. + */ + igc_dev_mq_rx_configure(dev); + + /* Update the rctl since igc_dev_mq_rx_configure may change its value */ + rctl |= IGC_READ_REG(hw, IGC_RCTL); + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable both L3/L4 rx checksum offload */ + if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= IGC_RXCSUM_IPOFL; + else + rxcsum &= ~IGC_RXCSUM_IPOFL; + + if (offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) { + rxcsum |= IGC_RXCSUM_TUOFL; + offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM; + } else { + rxcsum &= ~IGC_RXCSUM_TUOFL; + } + + if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM) + rxcsum |= IGC_RXCSUM_CRCOFL; + else + rxcsum &= ~IGC_RXCSUM_CRCOFL; + + IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); + + /* Setup the Receive Control Register. */ + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */ + + rctl &= ~IGC_RCTL_MO_MSK; + rctl &= ~IGC_RCTL_LBM_MSK; + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | + IGC_RCTL_DPF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + if (dev->data->dev_conf.lpbk_mode == 1) + rctl |= IGC_RCTL_LBM_MAC; + + rctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI | + IGC_RCTL_PSP | IGC_RCTL_PMCF); + + /* Make sure VLAN Filters are off. */ + rctl &= ~IGC_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~IGC_RCTL_SBP; + + /* Enable Receives. */ + IGC_WRITE_REG(hw, IGC_RCTL, rctl); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0); + IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), + rxq->nb_rx_desc - 1); + + /* strip queue vlan offload */ + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + uint32_t dvmolr; + dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->queue_id)); + + /* If vlan been stripped off, the CRC is meaningless. */ + dvmolr |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC; + IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr); + } + } + + return 0; +} + +static void +igc_reset_rx_queue(struct igc_rx_queue *rxq) +{ + static const union igc_adv_rx_desc zeroed_desc = { {0} }; + unsigned int i; + + /* Zero out HW ring memory */ + for (i = 0; i < rxq->nb_rx_desc; i++) + rxq->rx_ring[i] = zeroed_desc; + + rxq->rx_tail = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +eth_igc_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + const struct rte_memzone *rz; + struct igc_rx_queue *rxq; + unsigned int size; + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IGC_RX_DESCRIPTOR_MULTIPLE. + */ + if (nb_desc % IGC_RX_DESCRIPTOR_MULTIPLE != 0 || + nb_desc > IGC_MAX_RXD || nb_desc < IGC_MIN_RXD) { + PMD_DRV_LOG(ERR, + "RX descriptor must be multiple of %u(cur: %u) and between %u and %u", + IGC_RX_DESCRIPTOR_MULTIPLE, nb_desc, + IGC_MIN_RXD, IGC_MAX_RXD); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed */ + if (dev->data->rx_queues[queue_idx] != NULL) { + igc_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the RX queue data structure. */ + rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igc_rx_queue), + RTE_CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + rxq->offloads = rx_conf->offloads; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = queue_idx; + rxq->port_id = dev->data->port_id; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union igc_adv_rx_desc) * IGC_MAX_RXD; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, + IGC_ALIGN, socket_id); + if (rz == NULL) { + igc_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->rdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDH(rxq->reg_idx)); + rxq->rx_ring_phys_addr = rz->iova; + rxq->rx_ring = (union igc_adv_rx_desc *)rz->addr; + + /* Allocate software ring. */ + rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof(struct igc_rx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (rxq->sw_ring == NULL) { + igc_rx_queue_release(rxq); + return -ENOMEM; + } + + PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + igc_reset_rx_queue(rxq); + + return 0; +} + +/* prepare packets for transmit */ +static uint16_t +eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + /* Check some limitations for TSO in hardware */ + if (m->ol_flags & IGC_TX_OFFLOAD_SEG) + if (m->tso_segsz > IGC_TSO_MAX_MSS || + m->l2_len + m->l3_len + m->l4_len > + IGC_TSO_MAX_HDRLEN) { + rte_errno = EINVAL; + return i; + } + + if (m->ol_flags & IGC_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +/* + *There're some limitations in hardware for TCP segmentation offload. We + *should check whether the parameters are valid. + */ +static inline uint64_t +check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para) +{ + if (!(ol_req & IGC_TX_OFFLOAD_SEG)) + return ol_req; + if (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len + + ol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) { + ol_req &= ~IGC_TX_OFFLOAD_SEG; + ol_req |= PKT_TX_TCP_CKSUM; + } + return ol_req; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct igc_tx_queue *txq, uint64_t flags, + union igc_tx_offload tx_offload) +{ + uint32_t curr = txq->ctx_curr; + + /* If match with the current context */ + if (likely(txq->ctx_cache[curr].flags == flags && + txq->ctx_cache[curr].tx_offload.data == + (txq->ctx_cache[curr].tx_offload_mask.data & + tx_offload.data))) { + return curr; + } + + /* Total two context, if match with the second context */ + curr ^= 1; + if (likely(txq->ctx_cache[curr].flags == flags && + txq->ctx_cache[curr].tx_offload.data == + (txq->ctx_cache[curr].tx_offload_mask.data & + tx_offload.data))) { + txq->ctx_curr = curr; + return curr; + } + + /* Mismatch, create new one */ + return IGC_CTX_NUM; +} + +/* + * This is a separate function, looking for optimization opportunity here + * Rework required to go with the pre-defined values. + */ +static inline void +igc_set_xmit_ctx(struct igc_tx_queue *txq, + volatile struct igc_adv_tx_context_desc *ctx_txd, + uint64_t ol_flags, union igc_tx_offload tx_offload) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_curr; + uint32_t vlan_macip_lens; + union igc_tx_offload tx_offload_mask; + + /* Use the previous context */ + txq->ctx_curr ^= 1; + ctx_curr = txq->ctx_curr; + + tx_offload_mask.data = 0; + type_tucmd_mlhl = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT); + + if (ol_flags & PKT_TX_VLAN_PKT) + tx_offload_mask.vlan_tci = 0xffff; + + /* check if TCP segmentation required for this packet */ + if (ol_flags & IGC_TX_OFFLOAD_SEG) { + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + + if (ol_flags & PKT_TX_TCP_SEG) + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; + else + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; + + tx_offload_mask.data |= TX_TSO_CMP_MASK; + mss_l4len_idx |= (uint32_t)tx_offload.tso_segsz << + IGC_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= (uint32_t)tx_offload.l4_len << + IGC_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK; + + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr) + << IGC_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr) + << IGC_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr) + << IGC_ADVTXD_L4LEN_SHIFT; + break; + default: + type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_RSV | + IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT; + break; + } + } + + txq->ctx_cache[ctx_curr].flags = ol_flags; + txq->ctx_cache[ctx_curr].tx_offload.data = + tx_offload_mask.data & tx_offload.data; + txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = (uint32_t)tx_offload.data; + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->u.launch_time = 0; +} + +static inline uint32_t +tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype; + static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE}; + static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE}; + cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; + cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0]; + return cmdtype; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, IGC_ADVTXD_POPTS_TXSM}; + static const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + tmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0]; + return tmp; +} + +static uint16_t +igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct igc_tx_queue * const txq = tx_queue; + struct igc_tx_entry * const sw_ring = txq->sw_ring; + struct igc_tx_entry *txe, *txn; + volatile union igc_adv_tx_desc * const txr = txq->tx_ring; + volatile union igc_adv_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_end; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint64_t tx_ol_req; + uint32_t new_ctx = 0; + union igc_tx_offload tx_offload = {0}; + + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the VLAN Tag Identifier, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t)(tx_id + tx_pkt->nb_segs - 1); + + ol_flags = tx_pkt->ol_flags; + tx_ol_req = ol_flags & IGC_TX_OFFLOAD_MASK; + + /* If a Context Descriptor need be built . */ + if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_ol_req = check_tso_para(tx_ol_req, tx_offload); + + new_ctx = what_advctx_update(txq, tx_ol_req, + tx_offload); + /* Only allocate context descriptor if required*/ + new_ctx = (new_ctx >= IGC_CTX_NUM); + tx_last = (uint16_t)(tx_last + new_ctx); + } + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, + "port_id=%u queue_id=%u pktlen=%u tx_first=%u tx_last=%u", + txq->port_id, txq->queue_id, pkt_len, tx_id, tx_last); + + /* + * Check if there are enough free descriptors in the TX ring + * to transmit the next packet. + * This operation is based on the two following rules: + * + * 1- Only check that the last needed TX descriptor can be + * allocated (by construction, if that descriptor is free, + * all intermediate ones are also free). + * + * For this purpose, the index of the last TX descriptor + * used for a packet (the "last descriptor" of a packet) + * is recorded in the TX entries (the last one included) + * that are associated with all TX descriptors allocated + * for that packet. + * + * 2- Avoid to allocate the last free TX descriptor of the + * ring, in order to never set the TDT register with the + * same value stored in parallel by the NIC in the TDH + * register, which makes the TX engine of the NIC enter + * in a deadlock situation. + * + * By extension, avoid to allocate a free descriptor that + * belongs to the last set of free descriptors allocated + * to the same packet previously transmitted. + */ + + /* + * The "last descriptor" of the previously sent packet, if any, + * which used the last descriptor to allocate. + */ + tx_end = sw_ring[tx_last].last_id; + + /* + * The next descriptor following that "last descriptor" in the + * ring. + */ + tx_end = sw_ring[tx_end].next_id; + + /* + * The "last descriptor" associated with that next descriptor. + */ + tx_end = sw_ring[tx_end].last_id; + + /* + * Check that this descriptor is free. + */ + if (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - IGC_ADVTXD_DTYP_DATA + * - IGC_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - IGC_ADVTXD_DCMD_IFCS + * - IGC_ADVTXD_MAC_1588 + * - IGC_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - IGC_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - IGC_TXD_CMD_RS + */ + cmd_type_len = txq->txd_type | + IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT; + if (tx_ol_req & IGC_TX_OFFLOAD_SEG) + pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len); + olinfo_status = (pkt_len << IGC_ADVTXD_PAYLEN_SHIFT); + + /* + * Timer 0 should be used to for packet timestamping, + * sample the packet timestamp to reg 0 + */ + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= IGC_ADVTXD_MAC_TSTAMP; + + if (tx_ol_req) { + /* Setup TX Advanced context descriptor if required */ + if (new_ctx) { + volatile struct igc_adv_tx_context_desc * + ctx_txd = (volatile struct + igc_adv_tx_context_desc *)&txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + igc_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + tx_offload); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* Setup the TX Advanced Data Descriptor */ + cmd_type_len |= + tx_desc_vlan_flags_to_cmdtype(tx_ol_req); + olinfo_status |= + tx_desc_cksum_flags_to_olinfo(tx_ol_req); + olinfo_status |= (uint32_t)txq->ctx_curr << + IGC_ADVTXD_IDX_SHIFT; + } + + m_seg = tx_pkt; + do { + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + txd = &txr[tx_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Set up transmit descriptor */ + slen = (uint16_t)m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + * and Report Status (RS). + */ + txd->read.cmd_type_len |= + rte_cpu_to_le_32(IGC_TXD_CMD_EOP | IGC_TXD_CMD_RS); + } +end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT). + */ + IGC_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + txq->port_id, txq->queue_id, tx_id, nb_tx); + txq->tx_tail = tx_id; + + return nb_tx; +} + +int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct igc_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(!txq || offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + + status = &txq->tx_ring[desc].wb.status; + if (*status & rte_cpu_to_le_32(IGC_TXD_STAT_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static void +igc_tx_queue_release_mbufs(struct igc_tx_queue *txq) +{ + unsigned int i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igc_tx_queue_release(struct igc_tx_queue *txq) +{ + igc_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); +} + +void eth_igc_tx_queue_release(void *txq) +{ + if (txq) + igc_tx_queue_release(txq); +} + +static void +igc_reset_tx_queue_stat(struct igc_tx_queue *txq) +{ + txq->tx_head = 0; + txq->tx_tail = 0; + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + IGC_CTX_NUM * sizeof(struct igc_advctx_info)); +} + +static void +igc_reset_tx_queue(struct igc_tx_queue *txq) +{ + struct igc_tx_entry *txe = txq->sw_ring; + uint16_t i, prev; + + /* Initialize ring entries */ + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union igc_adv_tx_desc *txd = &txq->tx_ring[i]; + + txd->wb.status = IGC_TXD_STAT_DD; + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->txd_type = IGC_ADVTXD_DTYP_DATA; + igc_reset_tx_queue_stat(txq); +} + +/* + * clear all rx/tx queue + */ +void +igc_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct igc_tx_queue *txq; + struct igc_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + igc_tx_queue_release_mbufs(txq); + igc_reset_tx_queue(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + igc_rx_queue_release_mbufs(rxq); + igc_reset_rx_queue(rxq); + } + } +} + +int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct igc_tx_queue *txq; + struct igc_hw *hw; + uint32_t size; + + if (nb_desc % IGC_TX_DESCRIPTOR_MULTIPLE != 0 || + nb_desc > IGC_MAX_TXD || nb_desc < IGC_MIN_TXD) { + PMD_DRV_LOG(ERR, + "TX-descriptor must be a multiple of %u and between %u and %u, cur: %u", + IGC_TX_DESCRIPTOR_MULTIPLE, + IGC_MAX_TXD, IGC_MIN_TXD, nb_desc); + return -EINVAL; + } + + hw = IGC_DEV_PRIVATE_HW(dev); + + /* + * The tx_free_thresh and tx_rs_thresh values are not used in the 2.5G + * driver. + */ + if (tx_conf->tx_free_thresh != 0) + PMD_DRV_LOG(INFO, + "The tx_free_thresh parameter is not used for the 2.5G driver"); + if (tx_conf->tx_rs_thresh != 0) + PMD_DRV_LOG(INFO, + "The tx_rs_thresh parameter is not used for the 2.5G driver"); + if (tx_conf->tx_thresh.wthresh == 0) + PMD_DRV_LOG(INFO, + "To improve 2.5G driver performance, consider setting the TX WTHRESH value to 4, 8, or 16."); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->tx_queues[queue_idx] != NULL) { + igc_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct igc_tx_queue), + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union igc_adv_tx_desc) * IGC_MAX_TXD; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, + IGC_ALIGN, socket_id); + if (tz == NULL) { + igc_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + + txq->queue_id = queue_idx; + txq->reg_idx = queue_idx; + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_TDT(txq->reg_idx)); + txq->tx_ring_phys_addr = tz->iova; + + txq->tx_ring = (union igc_adv_tx_desc *)tz->addr; + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(struct igc_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + igc_tx_queue_release(txq); + return -ENOMEM; + } + PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + igc_reset_tx_queue(txq); + dev->tx_pkt_burst = igc_xmit_pkts; + dev->tx_pkt_prepare = ð_igc_prep_pkts; + dev->data->tx_queues[queue_idx] = txq; + txq->offloads = tx_conf->offloads; + + return 0; +} + +int +eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt) +{ + struct igc_tx_queue *txq = txqueue; + struct igc_tx_entry *sw_ring; + volatile union igc_adv_tx_desc *txr; + uint16_t tx_first; /* First segment analyzed. */ + uint16_t tx_id; /* Current segment being processed. */ + uint16_t tx_last; /* Last segment in the current packet. */ + uint16_t tx_next; /* First segment of the next packet. */ + uint32_t count; + + if (txq == NULL) + return -ENODEV; + + count = 0; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + + /* + * tx_tail is the last sent packet on the sw_ring. Goto the end + * of that packet (the last segment in the packet chain) and + * then the next segment will be the start of the oldest segment + * in the sw_ring. This is the first packet that will be + * attempted to be freed. + */ + + /* Get last segment in most recently added packet. */ + tx_first = sw_ring[txq->tx_tail].last_id; + + /* Get the next segment, which is the oldest segment in ring. */ + tx_first = sw_ring[tx_first].next_id; + + /* Set the current index to the first. */ + tx_id = tx_first; + + /* + * Loop through each packet. For each packet, verify that an + * mbuf exists and that the last segment is free. If so, free + * it and move on. + */ + while (1) { + tx_last = sw_ring[tx_id].last_id; + + if (sw_ring[tx_last].mbuf) { + if (!(txr[tx_last].wb.status & + rte_cpu_to_le_32(IGC_TXD_STAT_DD))) + break; + + /* Get the start of the next packet. */ + tx_next = sw_ring[tx_last].next_id; + + /* + * Loop through all segments in a + * packet. + */ + do { + rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf); + sw_ring[tx_id].mbuf = NULL; + sw_ring[tx_id].last_id = tx_id; + + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; + } while (tx_id != tx_next); + + /* + * Increment the number of packets + * freed. + */ + count++; + if (unlikely(count == free_cnt)) + break; + } else { + /* + * There are multiple reasons to be here: + * 1) All the packets on the ring have been + * freed - tx_id is equal to tx_first + * and some packets have been freed. + * - Done, exit + * 2) Interfaces has not sent a rings worth of + * packets yet, so the segment after tail is + * still empty. Or a previous call to this + * function freed some of the segments but + * not all so there is a hole in the list. + * Hopefully this is a rare case. + * - Walk the list and find the next mbuf. If + * there isn't one, then done. + */ + if (likely(tx_id == tx_first && count != 0)) + break; + + /* + * Walk the list and find the next mbuf, if any. + */ + do { + /* Move to next segemnt. */ + tx_id = sw_ring[tx_id].next_id; + + if (sw_ring[tx_id].mbuf) + break; + + } while (tx_id != tx_first); + + /* + * Determine why previous loop bailed. If there + * is not an mbuf, done. + */ + if (sw_ring[tx_id].mbuf == NULL) + break; + } + } + + return count; +} + +void +igc_tx_init(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct igc_tx_queue *txq = dev->data->tx_queues[i]; + uint64_t bus_addr = txq->tx_ring_phys_addr; + + IGC_WRITE_REG(hw, IGC_TDLEN(txq->reg_idx), + txq->nb_tx_desc * + sizeof(union igc_adv_tx_desc)); + IGC_WRITE_REG(hw, IGC_TDBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IGC_WRITE_REG(hw, IGC_TDBAL(txq->reg_idx), + (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + IGC_WRITE_REG(hw, IGC_TDT(txq->reg_idx), 0); + IGC_WRITE_REG(hw, IGC_TDH(txq->reg_idx), 0); + + /* Setup Transmit threshold registers. */ + txdctl = ((uint32_t)txq->pthresh << IGC_TXDCTL_PTHRESH_SHIFT) & + IGC_TXDCTL_PTHRESH_MSK; + txdctl |= ((uint32_t)txq->hthresh << IGC_TXDCTL_HTHRESH_SHIFT) & + IGC_TXDCTL_HTHRESH_MSK; + txdctl |= ((uint32_t)txq->wthresh << IGC_TXDCTL_WTHRESH_SHIFT) & + IGC_TXDCTL_WTHRESH_MSK; + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl); + } + + igc_config_collision_dist(hw); + + /* Program the Transmit Control Register. */ + tctl = IGC_READ_REG(hw, IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | + ((uint32_t)IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); + + /* This write will effectively turn on the transmit unit. */ + IGC_WRITE_REG(hw, IGC_TCTL, tctl); +} + +void +eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct igc_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; + qinfo->conf.rx_thresh.hthresh = rxq->hthresh; + qinfo->conf.rx_thresh.pthresh = rxq->pthresh; + qinfo->conf.rx_thresh.wthresh = rxq->wthresh; +} + +void +eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct igc_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +void +eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t rx_queue_id, int on) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; + uint32_t reg_val; + + if (rx_queue_id >= IGC_QUEUE_PAIRS_NUM) { + PMD_DRV_LOG(ERR, "Queue index(%u) illegal, max is %u", + rx_queue_id, IGC_QUEUE_PAIRS_NUM - 1); + return; + } + + reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id)); + if (on) { + /* If vlan been stripped off, the CRC is meaningless. */ + reg_val |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { + reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN | + IGC_DVMOLR_STRCRC); + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + + IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val); +} diff --git a/src/spdk/dpdk/drivers/net/igc/igc_txrx.h b/src/spdk/dpdk/drivers/net/igc/igc_txrx.h new file mode 100644 index 000000000..f2b2d75bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/igc_txrx.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _IGC_TXRX_H_ +#define _IGC_TXRX_H_ + +#include "igc_ethdev.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * RX/TX function prototypes + */ +void eth_igc_tx_queue_release(void *txq); +void eth_igc_rx_queue_release(void *rxq); +void igc_dev_clear_queues(struct rte_eth_dev *dev); +int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset); + +int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset); + +int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt); + +int igc_rx_init(struct rte_eth_dev *dev); +void igc_tx_init(struct rte_eth_dev *dev); +void igc_rss_disable(struct rte_eth_dev *dev); +void +igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf); +int igc_del_rss_filter(struct rte_eth_dev *dev); +void igc_rss_conf_set(struct igc_rss_filter *out, + const struct rte_flow_action_rss *rss); +int igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss); +void igc_clear_rss_filter(struct rte_eth_dev *dev); +void eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +void eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t rx_queue_id, int on); +#ifdef __cplusplus +} +#endif + +#endif /* _IGC_TXRX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/igc/meson.build b/src/spdk/dpdk/drivers/net/igc/meson.build new file mode 100644 index 000000000..fba119c98 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019-2020 Intel Corporation + +subdir('base') +objs = [base_objs] + +sources = files( + 'igc_logs.c', + 'igc_ethdev.c', + 'igc_txrx.c', + 'igc_filter.c', + 'igc_flow.c' +) + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/igc/rte_pmd_igc_version.map b/src/spdk/dpdk/drivers/net/igc/rte_pmd_igc_version.map new file mode 100644 index 000000000..4a76d1d52 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/igc/rte_pmd_igc_version.map @@ -0,0 +1,3 @@ +DPDK_21 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ionic/Makefile b/src/spdk/dpdk/drivers/net/ionic/Makefile new file mode 100644 index 000000000..7442e2c5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +# Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ionic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_ionic_version.map + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net +LDLIBS += -lrte_bus_pci + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_mac_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rx_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_lif.c +SRCS-$(CONFIG_RTE_LIBRTE_IONIC_PMD) += ionic_main.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic.h b/src/spdk/dpdk/drivers/net/ionic/ionic.h new file mode 100644 index 000000000..1538df309 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +#include +#include + +#include + +#include "ionic_dev.h" +#include "ionic_if.h" +#include "ionic_osdep.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION "0.11.0-49" + +/* Vendor ID */ +#define IONIC_PENSANDO_VENDOR_ID 0x1dd8 + +/* Device IDs */ +#define IONIC_DEV_ID_ETH_PF 0x1002 +#define IONIC_DEV_ID_ETH_VF 0x1003 +#define IONIC_DEV_ID_ETH_MGMT 0x1004 + +enum ionic_mac_type { + IONIC_MAC_UNKNOWN = 0, + IONIC_MAC_CAPRI, + IONIC_NUM_MACS +}; + +struct ionic_mac_info { + enum ionic_mac_type type; +}; + +struct ionic_hw { + struct ionic_mac_info mac; + uint16_t device_id; + uint16_t vendor_id; +}; + +/* + * Structure to store private data for each driver instance (for each adapter). + */ +struct ionic_adapter { + struct ionic_hw hw; + struct ionic_dev idev; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + struct ionic_identity ident; + struct ionic_lif *lifs[IONIC_LIFS_MAX]; + uint32_t num_bars; + uint32_t nlifs; + uint32_t max_ntxqs_per_lif; + uint32_t max_nrxqs_per_lif; + uint32_t max_mac_addrs; + uint32_t link_speed; + uint32_t nintrs; + bool intrs[IONIC_INTR_CTRL_REGS_MAX]; + bool is_mgmt_nic; + bool link_up; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + struct rte_pci_device *pci_dev; + LIST_ENTRY(ionic_adapter) pci_adapters; +}; + +int ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout); +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_dev_cmd_wait_check(struct ionic_dev *idev, unsigned long max_wait); +int ionic_setup(struct ionic_adapter *adapter); + +int ionic_identify(struct ionic_adapter *adapter); +int ionic_init(struct ionic_adapter *adapter); +int ionic_reset(struct ionic_adapter *adapter); + +int ionic_port_identify(struct ionic_adapter *adapter); +int ionic_port_init(struct ionic_adapter *adapter); +int ionic_port_reset(struct ionic_adapter *adapter); + +#endif /* _IONIC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c new file mode 100644 index 000000000..5c2820b7a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.c @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include + +#include "ionic_dev.h" +#include "ionic_lif.h" +#include "ionic.h" + +int +ionic_dev_setup(struct ionic_adapter *adapter) +{ + struct ionic_dev_bar *bar = adapter->bars; + unsigned int num_bars = adapter->num_bars; + struct ionic_dev *idev = &adapter->idev; + uint32_t sig; + u_char *bar0_base; + unsigned int i; + + /* BAR0: dev_cmd and interrupts */ + if (num_bars < 1) { + IONIC_PRINT(ERR, "No bars found, aborting"); + return -EFAULT; + } + + if (bar->len < IONIC_BAR0_SIZE) { + IONIC_PRINT(ERR, + "Resource bar size %lu too small, aborting", + bar->len); + return -EFAULT; + } + + bar0_base = bar->vaddr; + idev->dev_info = (union ionic_dev_info_regs *) + &bar0_base[IONIC_BAR0_DEV_INFO_REGS_OFFSET]; + idev->dev_cmd = (union ionic_dev_cmd_regs *) + &bar0_base[IONIC_BAR0_DEV_CMD_REGS_OFFSET]; + idev->intr_status = (struct ionic_intr_status *) + &bar0_base[IONIC_BAR0_INTR_STATUS_OFFSET]; + idev->intr_ctrl = (struct ionic_intr *) + &bar0_base[IONIC_BAR0_INTR_CTRL_OFFSET]; + + sig = ioread32(&idev->dev_info->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) { + IONIC_PRINT(ERR, "Incompatible firmware signature %" PRIx32 "", + sig); + return -EFAULT; + } + + for (i = 0; i < IONIC_DEVINFO_FWVERS_BUFLEN; i++) + adapter->fw_version[i] = + ioread8(&idev->dev_info->fw_version[i]); + adapter->fw_version[IONIC_DEVINFO_FWVERS_BUFLEN - 1] = '\0'; + + IONIC_PRINT(DEBUG, "Firmware version: %s", adapter->fw_version); + + /* BAR1: doorbells */ + bar++; + if (num_bars < 2) { + IONIC_PRINT(ERR, "Doorbell bar missing, aborting"); + return -EFAULT; + } + + idev->db_pages = bar->vaddr; + idev->phy_db_pages = bar->bus_addr; + + return 0; +} + +/* Devcmd Interface */ + +uint8_t +ionic_dev_cmd_status(struct ionic_dev *idev) +{ + return ioread8(&idev->dev_cmd->comp.comp.status); +} + +bool +ionic_dev_cmd_done(struct ionic_dev *idev) +{ + return ioread32(&idev->dev_cmd->done) & IONIC_DEV_CMD_DONE; +} + +void +ionic_dev_cmd_comp(struct ionic_dev *idev, void *mem) +{ + union ionic_dev_cmd_comp *comp = mem; + unsigned int i; + uint32_t comp_size = sizeof(comp->words) / + sizeof(comp->words[0]); + + for (i = 0; i < comp_size; i++) + comp->words[i] = ioread32(&idev->dev_cmd->comp.words[i]); +} + +void +ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) +{ + unsigned int i; + uint32_t cmd_size = sizeof(cmd->words) / + sizeof(cmd->words[0]); + + for (i = 0; i < cmd_size; i++) + iowrite32(cmd->words[i], &idev->dev_cmd->cmd.words[i]); + + iowrite32(0, &idev->dev_cmd->done); + iowrite32(1, &idev->dev_cmd->doorbell); +} + +/* Device commands */ + +void +ionic_dev_cmd_identify(struct ionic_dev *idev, uint8_t ver) +{ + union ionic_dev_cmd cmd = { + .identify.opcode = IONIC_CMD_IDENTIFY, + .identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .init.opcode = IONIC_CMD_INIT, + .init.type = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .reset.opcode = IONIC_CMD_RESET, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* Port commands */ + +void +ionic_dev_cmd_port_identify(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, + .port_init.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_INIT, + .port_init.index = 0, + .port_init.info_pa = idev->port_info_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_reset.opcode = IONIC_CMD_PORT_RESET, + .port_reset.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_state(struct ionic_dev *idev, uint8_t state) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_STATE, + .port_setattr.state = state, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_speed(struct ionic_dev *idev, uint32_t speed) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_SPEED, + .port_setattr.speed = speed, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_mtu(struct ionic_dev *idev, uint32_t mtu) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_MTU, + .port_setattr.mtu = mtu, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, uint8_t an_enable) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, + .port_setattr.an_enable = an_enable, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_fec(struct ionic_dev *idev, uint8_t fec_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_FEC, + .port_setattr.fec_type = fec_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_pause(struct ionic_dev *idev, uint8_t pause_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, + .port_setattr.pause_type = pause_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_port_loopback(struct ionic_dev *idev, uint8_t loopback_mode) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_LOOPBACK, + .port_setattr.loopback_mode = loopback_mode, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* LIF commands */ + +void +ionic_dev_cmd_lif_identify(struct ionic_dev *idev, uint8_t type, uint8_t ver) +{ + union ionic_dev_cmd cmd = { + .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, + .lif_identify.type = type, + .lif_identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_lif_init(struct ionic_dev *idev, uint16_t lif_index, + rte_iova_t info_pa) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_INIT, + .lif_init.index = lif_index, + .lif_init.info_pa = info_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void +ionic_dev_cmd_lif_reset(struct ionic_dev *idev, uint16_t lif_index) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_RESET, + .lif_init.index = lif_index, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +struct ionic_doorbell * +ionic_db_map(struct ionic_lif *lif, struct ionic_queue *q) +{ + return lif->kern_dbpage + q->hw_type; +} + +int +ionic_db_page_num(struct ionic_lif *lif, int pid) +{ + return (lif->index * 0) + pid; +} + +void +ionic_intr_init(struct ionic_dev *idev, struct ionic_intr_info *intr, + unsigned long index) +{ + ionic_intr_clean(idev->intr_ctrl, index); + intr->index = index; +} + +void +ionic_dev_cmd_adminq_init(struct ionic_dev *idev, + struct ionic_qcq *qcq, + uint16_t lif_index, uint16_t intr_index) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + union ionic_dev_cmd cmd = { + .q_init.opcode = IONIC_CMD_Q_INIT, + .q_init.lif_index = lif_index, + .q_init.type = q->type, + .q_init.index = q->index, + .q_init.flags = IONIC_QINIT_F_ENA, + .q_init.pid = q->pid, + .q_init.intr_index = intr_index, + .q_init.ring_size = rte_log2_u32(q->num_descs), + .q_init.ring_base = q->base_pa, + .q_init.cq_ring_base = cq->base_pa, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int +ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + uint32_t num_descs, size_t desc_size) +{ + if (desc_size == 0) { + IONIC_PRINT(ERR, "Descriptor size is %zu", desc_size); + return -EINVAL; + } + + if (!rte_is_power_of_2(num_descs) || + num_descs < IONIC_MIN_RING_DESC || + num_descs > IONIC_MAX_RING_DESC) { + IONIC_PRINT(ERR, "%u descriptors (min: %u max: %u)", + num_descs, IONIC_MIN_RING_DESC, IONIC_MAX_RING_DESC); + return -EINVAL; + } + + cq->lif = lif; + cq->bound_intr = intr; + cq->num_descs = num_descs; + cq->desc_size = desc_size; + cq->tail_idx = 0; + cq->done_color = 1; + + return 0; +} + +void +ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa) +{ + cq->base = base; + cq->base_pa = base_pa; +} + +void +ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) +{ + cq->bound_q = q; + q->bound_cq = cq; +} + +uint32_t +ionic_cq_service(struct ionic_cq *cq, uint32_t work_to_do, + ionic_cq_cb cb, void *cb_arg) +{ + uint32_t work_done = 0; + + if (work_to_do == 0) + return 0; + + while (cb(cq, cq->tail_idx, cb_arg)) { + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + if (++work_done == work_to_do) + break; + } + + return work_done; +} + +int +ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, uint32_t index, uint32_t num_descs, + size_t desc_size, size_t sg_desc_size, uint32_t pid) +{ + uint32_t ring_size; + + if (desc_size == 0 || !rte_is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = rte_log2_u32(num_descs); + + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + q->lif = lif; + q->idev = idev; + q->index = index; + q->num_descs = num_descs; + q->desc_size = desc_size; + q->sg_desc_size = sg_desc_size; + q->head_idx = 0; + q->tail_idx = 0; + q->pid = pid; + + return 0; +} + +void +ionic_q_map(struct ionic_queue *q, void *base, rte_iova_t base_pa) +{ + q->base = base; + q->base_pa = base_pa; +} + +void +ionic_q_sg_map(struct ionic_queue *q, void *base, rte_iova_t base_pa) +{ + q->sg_base = base; + q->sg_base_pa = base_pa; +} + +void +ionic_q_flush(struct ionic_queue *q) +{ + writeq(IONIC_DBELL_QID(q->hw_index) | q->head_idx, q->db); +} + +void +ionic_q_post(struct ionic_queue *q, bool ring_doorbell, desc_cb cb, + void *cb_arg) +{ + struct ionic_desc_info *head = &q->info[q->head_idx]; + + head->cb = cb; + head->cb_arg = cb_arg; + + q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); + + if (ring_doorbell) + ionic_q_flush(q); +} + +uint32_t +ionic_q_space_avail(struct ionic_queue *q) +{ + uint32_t avail = q->tail_idx; + + if (q->head_idx >= avail) + avail += q->num_descs - q->head_idx - 1; + else + avail -= q->head_idx + 1; + + return avail; +} + +bool +ionic_q_has_space(struct ionic_queue *q, uint32_t want) +{ + return ionic_q_space_avail(q) >= want; +} + +void +ionic_q_service(struct ionic_queue *q, uint32_t cq_desc_index, + uint32_t stop_index, void *service_cb_arg) +{ + struct ionic_desc_info *desc_info; + uint32_t curr_q_tail_idx; + + do { + desc_info = &q->info[q->tail_idx]; + + if (desc_info->cb) + desc_info->cb(q, q->tail_idx, cq_desc_index, + desc_info->cb_arg, service_cb_arg); + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + curr_q_tail_idx = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + } while (curr_q_tail_idx != stop_index); +} + +static void +ionic_adminq_cb(struct ionic_queue *q, + uint32_t q_desc_index, uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg __rte_unused) +{ + struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_admin_comp *cq_desc_base = q->bound_cq->base; + struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index]; + + if (unlikely(cq_desc->comp_index != q_desc_index)) { + IONIC_WARN_ON(cq_desc->comp_index != q_desc_index); + return; + } + + memcpy(&ctx->comp, cq_desc, sizeof(*cq_desc)); + + ctx->pending_work = false; /* done */ +} + +/** ionic_adminq_post - Post an admin command. + * @lif: Handle to lif. + * @cmd_ctx: Api admin command context. + * + * Post the command to an admin queue in the ethernet driver. If this command + * succeeds, then the command has been posted, but that does not indicate a + * completion. If this command returns success, then the completion callback + * will eventually be called. + * + * Return: zero or negative error status. + */ +int +ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + struct ionic_admin_cmd *q_desc_base = adminq->base; + struct ionic_admin_cmd *q_desc; + int err = 0; + + rte_spinlock_lock(&lif->adminq_lock); + + if (!ionic_q_has_space(adminq, 1)) { + err = -ENOSPC; + goto err_out; + } + + q_desc = &q_desc_base[adminq->head_idx]; + + memcpy(q_desc, &ctx->cmd, sizeof(ctx->cmd)); + + ionic_q_post(adminq, true, ionic_adminq_cb, ctx); + +err_out: + rte_spinlock_unlock(&lif->adminq_lock); + + return err; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h new file mode 100644 index 000000000..532255a60 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_dev.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_DEV_H_ +#define _IONIC_DEV_H_ + +#include + +#include "ionic_osdep.h" +#include "ionic_if.h" +#include "ionic_regs.h" + +#define IONIC_MIN_MTU RTE_ETHER_MIN_MTU +#define IONIC_MAX_MTU 9194 + +#define IONIC_MAX_RING_DESC 32768 +#define IONIC_MIN_RING_DESC 16 +#define IONIC_DEF_TXRX_DESC 4096 + +#define IONIC_LIFS_MAX 1024 + +#define IONIC_DEVCMD_TIMEOUT 30 /* devcmd_timeout */ +#define IONIC_ALIGN 4096 + +struct ionic_adapter; + +struct ionic_dev_bar { + void __iomem *vaddr; + rte_iova_t bus_addr; + unsigned long len; +}; + +static inline void ionic_struct_size_checks(void) +{ + RTE_BUILD_BUG_ON(sizeof(struct ionic_doorbell) != 8); + RTE_BUILD_BUG_ON(sizeof(struct ionic_intr) != 32); + RTE_BUILD_BUG_ON(sizeof(struct ionic_intr_status) != 8); + + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_regs) != 4096); + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_info_regs) != 2048); + RTE_BUILD_BUG_ON(sizeof(union ionic_dev_cmd_regs) != 2048); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_stats) != 1024); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_admin_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_admin_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_nop_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_nop_comp) != 16); + + /* Device commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_identify_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_identify_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_reset_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_dev_setattr_comp) != 16); + + /* Port commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_identify_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_identify_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_reset_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_port_setattr_comp) != 16); + + /* LIF commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_getattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_getattr_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_setattr_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_lif_setattr_comp) != 16); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_init_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_init_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_q_control_cmd) != 64); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_mode_set_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_add_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_add_comp) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rx_filter_del_cmd) != 64); + + /* RDMA commands */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_rdma_reset_cmd) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rdma_queue_cmd) != 64); + + /* Events */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_notifyq_cmd) != 4); + RTE_BUILD_BUG_ON(sizeof(union ionic_notifyq_comp) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_notifyq_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_link_change_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_reset_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_heartbeat_event) != 64); + RTE_BUILD_BUG_ON(sizeof(struct ionic_log_event) != 64); + + /* I/O */ + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_desc) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_sg_desc) != 128); + RTE_BUILD_BUG_ON(sizeof(struct ionic_txq_comp) != 16); + + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_desc) != 16); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_sg_desc) != 128); + RTE_BUILD_BUG_ON(sizeof(struct ionic_rxq_comp) != 16); +} + +struct ionic_dev { + union ionic_dev_info_regs __iomem *dev_info; + union ionic_dev_cmd_regs __iomem *dev_cmd; + + struct ionic_doorbell __iomem *db_pages; + rte_iova_t phy_db_pages; + + struct ionic_intr __iomem *intr_ctrl; + + struct ionic_intr_status __iomem *intr_status; + + struct ionic_port_info *port_info; + const struct rte_memzone *port_info_z; + rte_iova_t port_info_pa; + uint32_t port_info_sz; +}; + +struct ionic_queue; +struct ionic_desc_info; + +typedef void (*desc_cb)(struct ionic_queue *q, + uint32_t q_desc_index, + uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg); + +struct ionic_desc_info { + desc_cb cb; + void *cb_arg; +}; + +struct ionic_queue { + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_cq *bound_cq; + uint32_t index; + uint32_t type; + uint32_t hw_index; + uint32_t hw_type; + void *base; + void *sg_base; + rte_iova_t base_pa; + rte_iova_t sg_base_pa; + struct ionic_desc_info *info; + uint32_t tail_idx; + uint32_t head_idx; + uint32_t num_descs; + uint32_t desc_size; + uint32_t sg_desc_size; + uint32_t pid; + uint32_t qid; + uint32_t qtype; + struct ionic_doorbell __iomem *db; + void *nop_desc; +}; + +#define IONIC_INTR_INDEX_NOT_ASSIGNED (-1) +#define IONIC_INTR_NAME_MAX_SZ (32) + +struct ionic_intr_info { + char name[IONIC_INTR_NAME_MAX_SZ]; + int index; + uint32_t vector; + struct ionic_intr __iomem *ctrl; +}; + +struct ionic_cq { + struct ionic_lif *lif; + struct ionic_queue *bound_q; + uint32_t tail_idx; + uint32_t num_descs; + uint32_t desc_size; + bool done_color; + void *base; + rte_iova_t base_pa; + struct ionic_intr_info *bound_intr; +}; + +/** ionic_admin_ctx - Admin command context. + * @pending_work: Flag that indicates a completion. + * @cmd: Admin command (64B) to be copied to the queue. + * @comp: Admin completion (16B) copied from the queue. + */ +struct ionic_admin_ctx { + bool pending_work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +struct ionic_lif; +struct ionic_adapter; +struct ionic_qcq; + +void ionic_intr_init(struct ionic_dev *idev, struct ionic_intr_info *intr, + unsigned long index); + +int ionic_dev_setup(struct ionic_adapter *adapter); + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +uint8_t ionic_dev_cmd_status(struct ionic_dev *idev); +bool ionic_dev_cmd_done(struct ionic_dev *idev); +void ionic_dev_cmd_comp(struct ionic_dev *idev, void *mem); + +void ionic_dev_cmd_identify(struct ionic_dev *idev, uint8_t ver); +void ionic_dev_cmd_init(struct ionic_dev *idev); +void ionic_dev_cmd_reset(struct ionic_dev *idev); + +void ionic_dev_cmd_port_identify(struct ionic_dev *idev); +void ionic_dev_cmd_port_init(struct ionic_dev *idev); +void ionic_dev_cmd_port_reset(struct ionic_dev *idev); +void ionic_dev_cmd_port_state(struct ionic_dev *idev, uint8_t state); +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, uint32_t speed); +void ionic_dev_cmd_port_mtu(struct ionic_dev *idev, uint32_t mtu); +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, uint8_t an_enable); +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, uint8_t fec_type); +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, uint8_t pause_type); +void ionic_dev_cmd_port_loopback(struct ionic_dev *idev, + uint8_t loopback_mode); + +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, uint8_t type, + uint8_t ver); +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, uint16_t lif_index, + rte_iova_t addr); +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, uint16_t lif_index); +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + uint16_t lif_index, uint16_t intr_index); + +struct ionic_doorbell __iomem *ionic_db_map(struct ionic_lif *lif, + struct ionic_queue *q); +int ionic_db_page_num(struct ionic_lif *lif, int pid); + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, uint32_t num_descs, + size_t desc_size); +void ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa); +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg); +uint32_t ionic_cq_service(struct ionic_cq *cq, uint32_t work_to_do, + ionic_cq_cb cb, void *cb_arg); + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, uint32_t index, uint32_t num_descs, + size_t desc_size, size_t sg_desc_size, uint32_t pid); +void ionic_q_map(struct ionic_queue *q, void *base, rte_iova_t base_pa); +void ionic_q_sg_map(struct ionic_queue *q, void *base, rte_iova_t base_pa); +void ionic_q_flush(struct ionic_queue *q); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, desc_cb cb, + void *cb_arg); +uint32_t ionic_q_space_avail(struct ionic_queue *q); +bool ionic_q_has_space(struct ionic_queue *q, uint32_t want); +void ionic_q_service(struct ionic_queue *q, uint32_t cq_desc_index, + uint32_t stop_index, void *service_cb_arg); + +int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); + +#endif /* _IONIC_DEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c new file mode 100644 index 000000000..363f0cf00 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.c @@ -0,0 +1,1327 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" +#include "ionic.h" +#include "ionic_dev.h" +#include "ionic_mac_api.h" +#include "ionic_lif.h" +#include "ionic_ethdev.h" +#include "ionic_rxtx.h" + +static int eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params); +static int eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev); +static int ionic_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info); +static int ionic_dev_configure(struct rte_eth_dev *dev); +static int ionic_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int ionic_dev_start(struct rte_eth_dev *dev); +static void ionic_dev_stop(struct rte_eth_dev *dev); +static void ionic_dev_close(struct rte_eth_dev *dev); +static int ionic_dev_set_link_up(struct rte_eth_dev *dev); +static int ionic_dev_set_link_down(struct rte_eth_dev *dev); +static int ionic_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete); +static int ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); +static int ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); +static int ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask); +static int ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); +static int ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); +static int ionic_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats); +static int ionic_dev_stats_reset(struct rte_eth_dev *eth_dev); +static int ionic_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned int n); +static int ionic_dev_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, uint64_t *values, unsigned int n); +static int ionic_dev_xstats_reset(struct rte_eth_dev *dev); +static int ionic_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned int size); +static int ionic_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit); +static int ionic_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size); + +int ionic_logtype; + +static const struct rte_pci_id pci_id_ionic_map[] = { + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_PF) }, + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_VF) }, + { RTE_PCI_DEVICE(IONIC_PENSANDO_VENDOR_ID, IONIC_DEV_ID_ETH_MGMT) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IONIC_MAX_RING_DESC, + .nb_min = IONIC_MIN_RING_DESC, + .nb_align = 1, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IONIC_MAX_RING_DESC, + .nb_min = IONIC_MIN_RING_DESC, + .nb_align = 1, + .nb_seg_max = IONIC_TX_MAX_SG_ELEMS, + .nb_mtu_seg_max = IONIC_TX_MAX_SG_ELEMS, +}; + +static const struct eth_dev_ops ionic_eth_dev_ops = { + .dev_infos_get = ionic_dev_info_get, + .dev_configure = ionic_dev_configure, + .mtu_set = ionic_dev_mtu_set, + .dev_start = ionic_dev_start, + .dev_stop = ionic_dev_stop, + .dev_close = ionic_dev_close, + .link_update = ionic_dev_link_update, + .dev_set_link_up = ionic_dev_set_link_up, + .dev_set_link_down = ionic_dev_set_link_down, + .mac_addr_add = ionic_dev_add_mac, + .mac_addr_remove = ionic_dev_remove_mac, + .mac_addr_set = ionic_dev_set_mac, + .vlan_filter_set = ionic_dev_vlan_filter_set, + .promiscuous_enable = ionic_dev_promiscuous_enable, + .promiscuous_disable = ionic_dev_promiscuous_disable, + .allmulticast_enable = ionic_dev_allmulticast_enable, + .allmulticast_disable = ionic_dev_allmulticast_disable, + .flow_ctrl_get = ionic_flow_ctrl_get, + .flow_ctrl_set = ionic_flow_ctrl_set, + .rxq_info_get = ionic_rxq_info_get, + .txq_info_get = ionic_txq_info_get, + .rx_queue_setup = ionic_dev_rx_queue_setup, + .rx_queue_release = ionic_dev_rx_queue_release, + .rx_queue_start = ionic_dev_rx_queue_start, + .rx_queue_stop = ionic_dev_rx_queue_stop, + .tx_queue_setup = ionic_dev_tx_queue_setup, + .tx_queue_release = ionic_dev_tx_queue_release, + .tx_queue_start = ionic_dev_tx_queue_start, + .tx_queue_stop = ionic_dev_tx_queue_stop, + .vlan_offload_set = ionic_vlan_offload_set, + .reta_update = ionic_dev_rss_reta_update, + .reta_query = ionic_dev_rss_reta_query, + .rss_hash_conf_get = ionic_dev_rss_hash_conf_get, + .rss_hash_update = ionic_dev_rss_hash_update, + .stats_get = ionic_dev_stats_get, + .stats_reset = ionic_dev_stats_reset, + .xstats_get = ionic_dev_xstats_get, + .xstats_get_by_id = ionic_dev_xstats_get_by_id, + .xstats_reset = ionic_dev_xstats_reset, + .xstats_get_names = ionic_dev_xstats_get_names, + .xstats_get_names_by_id = ionic_dev_xstats_get_names_by_id, + .fw_version_get = ionic_dev_fw_version_get, +}; + +struct rte_ionic_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct rte_ionic_xstats_name_off rte_ionic_xstats_strings[] = { + /* RX */ + {"rx_ucast_bytes", offsetof(struct ionic_lif_stats, + rx_ucast_bytes)}, + {"rx_ucast_packets", offsetof(struct ionic_lif_stats, + rx_ucast_packets)}, + {"rx_mcast_bytes", offsetof(struct ionic_lif_stats, + rx_mcast_bytes)}, + {"rx_mcast_packets", offsetof(struct ionic_lif_stats, + rx_mcast_packets)}, + {"rx_bcast_bytes", offsetof(struct ionic_lif_stats, + rx_bcast_bytes)}, + {"rx_bcast_packets", offsetof(struct ionic_lif_stats, + rx_bcast_packets)}, + /* RX drops */ + {"rx_ucast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_ucast_drop_bytes)}, + {"rx_ucast_drop_packets", offsetof(struct ionic_lif_stats, + rx_ucast_drop_packets)}, + {"rx_mcast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_mcast_drop_bytes)}, + {"rx_mcast_drop_packets", offsetof(struct ionic_lif_stats, + rx_mcast_drop_packets)}, + {"rx_bcast_drop_bytes", offsetof(struct ionic_lif_stats, + rx_bcast_drop_bytes)}, + {"rx_bcast_drop_packets", offsetof(struct ionic_lif_stats, + rx_bcast_drop_packets)}, + {"rx_dma_error", offsetof(struct ionic_lif_stats, + rx_dma_error)}, + /* TX */ + {"tx_ucast_bytes", offsetof(struct ionic_lif_stats, + tx_ucast_bytes)}, + {"tx_ucast_packets", offsetof(struct ionic_lif_stats, + tx_ucast_packets)}, + {"tx_mcast_bytes", offsetof(struct ionic_lif_stats, + tx_mcast_bytes)}, + {"tx_mcast_packets", offsetof(struct ionic_lif_stats, + tx_mcast_packets)}, + {"tx_bcast_bytes", offsetof(struct ionic_lif_stats, + tx_bcast_bytes)}, + {"tx_bcast_packets", offsetof(struct ionic_lif_stats, + tx_bcast_packets)}, + /* TX drops */ + {"tx_ucast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_ucast_drop_bytes)}, + {"tx_ucast_drop_packets", offsetof(struct ionic_lif_stats, + tx_ucast_drop_packets)}, + {"tx_mcast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_mcast_drop_bytes)}, + {"tx_mcast_drop_packets", offsetof(struct ionic_lif_stats, + tx_mcast_drop_packets)}, + {"tx_bcast_drop_bytes", offsetof(struct ionic_lif_stats, + tx_bcast_drop_bytes)}, + {"tx_bcast_drop_packets", offsetof(struct ionic_lif_stats, + tx_bcast_drop_packets)}, + {"tx_dma_error", offsetof(struct ionic_lif_stats, + tx_dma_error)}, + /* Rx Queue/Ring drops */ + {"rx_queue_disabled", offsetof(struct ionic_lif_stats, + rx_queue_disabled)}, + {"rx_queue_empty", offsetof(struct ionic_lif_stats, + rx_queue_empty)}, + {"rx_queue_error", offsetof(struct ionic_lif_stats, + rx_queue_error)}, + {"rx_desc_fetch_error", offsetof(struct ionic_lif_stats, + rx_desc_fetch_error)}, + {"rx_desc_data_error", offsetof(struct ionic_lif_stats, + rx_desc_data_error)}, + /* Tx Queue/Ring drops */ + {"tx_queue_disabled", offsetof(struct ionic_lif_stats, + tx_queue_disabled)}, + {"tx_queue_error", offsetof(struct ionic_lif_stats, + tx_queue_error)}, + {"tx_desc_fetch_error", offsetof(struct ionic_lif_stats, + tx_desc_fetch_error)}, + {"tx_desc_data_error", offsetof(struct ionic_lif_stats, + tx_desc_data_error)}, +}; + +#define IONIC_NB_HW_STATS (sizeof(rte_ionic_xstats_strings) / \ + sizeof(rte_ionic_xstats_strings[0])) + +static int +ionic_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + if (fw_version == NULL || fw_size <= 0) + return -EINVAL; + + snprintf(fw_version, fw_size, "%s", + adapter->fw_version); + fw_version[fw_size - 1] = '\0'; + + return 0; +} + +/* + * Set device link up, enable tx. + */ +static int +ionic_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + int err; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_UP); + + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port UP"); + return err; + } + + return 0; +} + +/* + * Set device link down, disable tx. + */ +static int +ionic_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + int err; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_DOWN); + + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port DOWN"); + return err; + } + + return 0; +} + +static int +ionic_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct rte_eth_link link; + + IONIC_PRINT_CALL(); + + /* Initialize */ + memset(&link, 0, sizeof(link)); + link.link_autoneg = ETH_LINK_AUTONEG; + + if (!adapter->link_up) { + /* Interface is down */ + link.link_status = ETH_LINK_DOWN; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_speed = ETH_SPEED_NUM_NONE; + } else { + /* Interface is up */ + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + switch (adapter->link_speed) { + case 10000: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case 25000: + link.link_speed = ETH_SPEED_NUM_25G; + break; + case 40000: + link.link_speed = ETH_SPEED_NUM_40G; + break; + case 50000: + link.link_speed = ETH_SPEED_NUM_50G; + break; + case 100000: + link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + } + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param param + * The address of parameter registered before. + * + * @return + * void + */ +static void +ionic_dev_interrupt_handler(void *param) +{ + struct ionic_adapter *adapter = (struct ionic_adapter *)param; + uint32_t i; + + IONIC_PRINT(DEBUG, "->"); + + for (i = 0; i < adapter->nlifs; i++) { + if (adapter->lifs[i]) + ionic_notifyq_handler(adapter->lifs[i], -1); + } +} + +static int +ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t max_frame_size; + int err; + + IONIC_PRINT_CALL(); + + /* + * Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU + * is done by the the API. + */ + + /* + * Max frame size is MTU + Ethernet header + VLAN + QinQ + * (plus ETHER_CRC_LEN if the adapter is able to keep CRC) + */ + max_frame_size = mtu + RTE_ETHER_HDR_LEN + 4 + 4; + + if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len < max_frame_size) + return -EINVAL; + + err = ionic_lif_change_mtu(lif, mtu); + if (err) + return err; + + return 0; +} + +static int +ionic_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + + IONIC_PRINT_CALL(); + + dev_info->max_rx_queues = (uint16_t) + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + dev_info->max_tx_queues = (uint16_t) + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + /* Also add ETHER_CRC_LEN if the adapter is able to keep CRC */ + dev_info->min_rx_bufsize = IONIC_MIN_MTU + RTE_ETHER_HDR_LEN; + dev_info->max_rx_pktlen = IONIC_MAX_MTU + RTE_ETHER_HDR_LEN; + dev_info->max_mac_addrs = adapter->max_mac_addrs; + dev_info->min_mtu = IONIC_MIN_MTU; + dev_info->max_mtu = IONIC_MAX_MTU; + + dev_info->hash_key_size = IONIC_RSS_HASH_KEY_SIZE; + dev_info->reta_size = ident->lif.eth.rss_ind_tbl_sz; + dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + /* + * Per-queue capabilities. Actually most of the offloads are enabled + * by default on the port and can be used on selected queues (by adding + * packet flags at runtime when required) + */ + + dev_info->rx_queue_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + 0; + + dev_info->tx_queue_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | + 0; + + /* + * Per-port capabilities + * See ionic_set_features to request and check supported features + */ + + dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_SCATTER | + 0; + + dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | + 0; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = IONIC_DEF_TXRX_DESC; + dev_info->default_txportconf.ring_size = IONIC_DEF_TXRX_DESC; + + return 0; +} + +static int +ionic_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + + if (idev->port_info) { + fc_conf->autoneg = idev->port_info->config.an_enable; + + if (idev->port_info->config.pause_type) + fc_conf->mode = RTE_FC_FULL; + else + fc_conf->mode = RTE_FC_NONE; + } + + return 0; +} + +static int +ionic_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + uint8_t pause_type = IONIC_PORT_PAUSE_TYPE_NONE; + uint8_t an_enable; + + switch (fc_conf->mode) { + case RTE_FC_NONE: + pause_type = IONIC_PORT_PAUSE_TYPE_NONE; + break; + case RTE_FC_FULL: + pause_type = IONIC_PORT_PAUSE_TYPE_LINK; + break; + case RTE_FC_RX_PAUSE: + case RTE_FC_TX_PAUSE: + return -ENOTSUP; + } + + an_enable = fc_conf->autoneg; + + ionic_dev_cmd_port_pause(idev, pause_type); + ionic_dev_cmd_port_autoneg(idev, an_enable); + + return 0; +} + +static int +ionic_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct rte_eth_rxmode *rxmode; + rxmode = ð_dev->data->dev_conf.rxmode; + int i; + + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct ionic_qcq *rxq = + eth_dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP; + } else { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct ionic_qcq *rxq = + eth_dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP; + } + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + lif->features |= IONIC_ETH_HW_VLAN_RX_FILTER; + else + lif->features &= ~IONIC_ETH_HW_VLAN_RX_FILTER; + } + + ionic_lif_set_features(lif); + + return 0; +} + +static int +ionic_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + uint32_t i, j, index, num; + + IONIC_PRINT_CALL(); + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(ERR, "RSS RETA not initialized, " + "can't update the table"); + return -EINVAL; + } + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; + } + + num = lif->adapter->ident.lif.eth.rss_ind_tbl_sz / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { + if (reta_conf[i].mask & ((uint64_t)1 << j)) { + index = (i * RTE_RETA_GROUP_SIZE) + j; + lif->rss_ind_tbl[index] = reta_conf[i].reta[j]; + } + } + } + + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); +} + +static int +ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; + int i, num; + + IONIC_PRINT_CALL(); + + if (reta_size != ident->lif.eth.rss_ind_tbl_sz) { + IONIC_PRINT(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", + reta_size, ident->lif.eth.rss_ind_tbl_sz); + return -EINVAL; + } + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(ERR, "RSS RETA has not been built yet"); + return -EINVAL; + } + + num = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { + memcpy(reta_conf->reta, + &lif->rss_ind_tbl[i * RTE_RETA_GROUP_SIZE], + RTE_RETA_GROUP_SIZE); + reta_conf++; + } + + return 0; +} + +static int +ionic_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint64_t rss_hf = 0; + + IONIC_PRINT_CALL(); + + if (!lif->rss_ind_tbl) { + IONIC_PRINT(NOTICE, "RSS not enabled"); + return 0; + } + + /* Get key value (if not null, rss_key is 40-byte) */ + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len >= IONIC_RSS_HASH_KEY_SIZE) + memcpy(rss_conf->rss_key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + if (lif->rss_types & IONIC_RSS_TYPE_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (lif->rss_types & IONIC_RSS_TYPE_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (lif->rss_types & IONIC_RSS_TYPE_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + + rss_conf->rss_hf = rss_hf; + + return 0; +} + +static int +ionic_dev_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rss_types = 0; + uint8_t *key = NULL; + + IONIC_PRINT_CALL(); + + if (rss_conf->rss_key) + key = rss_conf->rss_key; + + if ((rss_conf->rss_hf & IONIC_ETH_RSS_OFFLOAD_ALL) == 0) { + /* + * Can't disable rss through hash flags, + * if it is enabled by default during init + */ + if (lif->rss_ind_tbl) + return -EINVAL; + } else { + /* Can't enable rss if disabled by default during init */ + if (!lif->rss_ind_tbl) + return -EINVAL; + + if (rss_conf->rss_hf & ETH_RSS_IPV4) + rss_types |= IONIC_RSS_TYPE_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_types |= IONIC_RSS_TYPE_IPV4_TCP; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + rss_types |= IONIC_RSS_TYPE_IPV4_UDP; + if (rss_conf->rss_hf & ETH_RSS_IPV6) + rss_types |= IONIC_RSS_TYPE_IPV6; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + rss_types |= IONIC_RSS_TYPE_IPV6_TCP; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + rss_types |= IONIC_RSS_TYPE_IPV6_UDP; + + ionic_lif_rss_config(lif, rss_types, key, NULL); + } + + return 0; +} + +static int +ionic_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + ionic_lif_get_stats(lif, stats); + + return 0; +} + +static int +ionic_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + ionic_lif_reset_stats(lif); + + return 0; +} + +static int +ionic_dev_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size) +{ + unsigned int i; + + if (xstats_names != NULL) { + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ionic_xstats_strings[i].name); + } + } + + return IONIC_NB_HW_STATS; +} + +static int +ionic_dev_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit) +{ + struct rte_eth_xstat_name xstats_names_copy[IONIC_NB_HW_STATS]; + uint16_t i; + + if (!ids) { + if (xstats_names != NULL) { + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ionic_xstats_strings[i].name); + } + } + + return IONIC_NB_HW_STATS; + } + + ionic_dev_xstats_get_names_by_id(eth_dev, xstats_names_copy, NULL, + IONIC_NB_HW_STATS); + + for (i = 0; i < limit; i++) { + if (ids[i] >= IONIC_NB_HW_STATS) { + IONIC_PRINT(ERR, "id value isn't valid"); + return -1; + } + + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + + return limit; +} + +static int +ionic_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_lif_stats hw_stats; + uint16_t i; + + if (n < IONIC_NB_HW_STATS) + return IONIC_NB_HW_STATS; + + ionic_lif_get_hw_stats(lif, &hw_stats); + + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + xstats[i].value = *(uint64_t *)(((char *)&hw_stats) + + rte_ionic_xstats_strings[i].offset); + xstats[i].id = i; + } + + return IONIC_NB_HW_STATS; +} + +static int +ionic_dev_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_lif_stats hw_stats; + uint64_t values_copy[IONIC_NB_HW_STATS]; + uint16_t i; + + if (!ids) { + if (!ids && n < IONIC_NB_HW_STATS) + return IONIC_NB_HW_STATS; + + ionic_lif_get_hw_stats(lif, &hw_stats); + + for (i = 0; i < IONIC_NB_HW_STATS; i++) { + values[i] = *(uint64_t *)(((char *)&hw_stats) + + rte_ionic_xstats_strings[i].offset); + } + + return IONIC_NB_HW_STATS; + } + + ionic_dev_xstats_get_by_id(eth_dev, NULL, values_copy, + IONIC_NB_HW_STATS); + + for (i = 0; i < n; i++) { + if (ids[i] >= IONIC_NB_HW_STATS) { + IONIC_PRINT(ERR, "id value isn't valid"); + return -1; + } + + values[i] = values_copy[ids[i]]; + } + + return n; +} + +static int +ionic_dev_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + ionic_lif_reset_hw_stats(lif); + + return 0; +} + +static int +ionic_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_configure(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot configure LIF: %d", err); + return err; + } + + return 0; +} + +static inline uint32_t +ionic_parse_link_speeds(uint16_t link_speeds) +{ + if (link_speeds & ETH_LINK_SPEED_100G) + return 100000; + else if (link_speeds & ETH_LINK_SPEED_50G) + return 50000; + else if (link_speeds & ETH_LINK_SPEED_40G) + return 40000; + else if (link_speeds & ETH_LINK_SPEED_25G) + return 25000; + else if (link_speeds & ETH_LINK_SPEED_10G) + return 10000; + else + return 0; +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ionic_dev_start(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + uint32_t allowed_speeds; + int err; + + IONIC_PRINT_CALL(); + + allowed_speeds = + ETH_LINK_SPEED_FIXED | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + if (dev_conf->link_speeds & ~allowed_speeds) { + IONIC_PRINT(ERR, "Invalid link setting"); + return -EINVAL; + } + + err = ionic_lif_start(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot start LIF: %d", err); + return err; + } + + if (eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + uint32_t speed = ionic_parse_link_speeds(dev_conf->link_speeds); + + if (speed) + ionic_dev_cmd_port_speed(idev, speed); + } + + ionic_dev_link_update(eth_dev, 0); + + return 0; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +ionic_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_stop(lif); + if (err) + IONIC_PRINT(ERR, "Cannot stop LIF: %d", err); +} + +/* + * Reset and stop device. + */ +static void +ionic_dev_close(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + IONIC_PRINT_CALL(); + + err = ionic_lif_stop(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot stop LIF: %d", err); + return; + } + + err = eth_ionic_dev_uninit(eth_dev); + if (err) { + IONIC_PRINT(ERR, "Cannot destroy LIF: %d", err); + return; + } +} + +static int +eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = (struct ionic_adapter *)init_params; + int err; + + IONIC_PRINT_CALL(); + + eth_dev->dev_ops = &ionic_eth_dev_ops; + eth_dev->rx_pkt_burst = &ionic_recv_pkts; + eth_dev->tx_pkt_burst = &ionic_xmit_pkts; + eth_dev->tx_pkt_prepare = &ionic_prep_pkts; + + /* Multi-process not supported, primary does initialization anyway */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + lif->index = adapter->nlifs; + lif->eth_dev = eth_dev; + lif->adapter = adapter; + adapter->lifs[adapter->nlifs] = lif; + + IONIC_PRINT(DEBUG, "Up to %u MAC addresses supported", + adapter->max_mac_addrs); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ionic", + RTE_ETHER_ADDR_LEN * adapter->max_mac_addrs, 0); + + if (eth_dev->data->mac_addrs == NULL) { + IONIC_PRINT(ERR, "Failed to allocate %u bytes needed to " + "store MAC addresses", + RTE_ETHER_ADDR_LEN * adapter->max_mac_addrs); + err = -ENOMEM; + goto err; + } + + err = ionic_lif_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate LIFs: %d, aborting", + err); + goto err; + } + + err = ionic_lif_init(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot init LIFs: %d, aborting", err); + goto err_free_lif; + } + + /* Copy the MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)lif->mac_addr, + ð_dev->data->mac_addrs[0]); + + IONIC_PRINT(DEBUG, "Port %u initialized", eth_dev->data->port_id); + + return 0; + +err_free_lif: + ionic_lif_free(lif); +err: + return err; +} + +static int +eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + IONIC_PRINT_CALL(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + adapter->lifs[lif->index] = NULL; + + ionic_lif_deinit(lif); + ionic_lif_free(lif); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + + return 0; +} + +static int +ionic_configure_intr(struct ionic_adapter *adapter) +{ + struct rte_pci_device *pci_dev = adapter->pci_dev; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int err; + + IONIC_PRINT(DEBUG, "Configuring %u intrs", adapter->nintrs); + + if (rte_intr_efd_enable(intr_handle, adapter->nintrs)) { + IONIC_PRINT(ERR, "Fail to create eventfd"); + return -1; + } + + if (rte_intr_dp_is_en(intr_handle)) + IONIC_PRINT(DEBUG, + "Packet I/O interrupt on datapath is enabled"); + + if (!intr_handle->intr_vec) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + adapter->nintrs * sizeof(int), 0); + + if (!intr_handle->intr_vec) { + IONIC_PRINT(ERR, "Failed to allocate %u vectors", + adapter->nintrs); + return -ENOMEM; + } + } + + err = rte_intr_callback_register(intr_handle, + ionic_dev_interrupt_handler, + adapter); + + if (err) { + IONIC_PRINT(ERR, + "Failure registering interrupts handler (%d)", + err); + return err; + } + + /* enable intr mapping */ + err = rte_intr_enable(intr_handle); + + if (err) { + IONIC_PRINT(ERR, "Failure enabling interrupts (%d)", err); + return err; + } + + return 0; +} + +static void +ionic_unconfigure_intr(struct ionic_adapter *adapter) +{ + struct rte_pci_device *pci_dev = adapter->pci_dev; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + rte_intr_disable(intr_handle); + + rte_intr_callback_unregister(intr_handle, + ionic_dev_interrupt_handler, + adapter); +} + +static int +eth_ionic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_mem_resource *resource; + struct ionic_adapter *adapter; + struct ionic_hw *hw; + unsigned long i; + int err; + + /* Check structs (trigger error at compilation time) */ + ionic_struct_size_checks(); + + /* Multi-process not supported */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + err = -EPERM; + goto err; + } + + IONIC_PRINT(DEBUG, "Initializing device %s", + pci_dev->device.name); + + adapter = rte_zmalloc("ionic", sizeof(*adapter), 0); + if (!adapter) { + IONIC_PRINT(ERR, "OOM"); + err = -ENOMEM; + goto err; + } + + adapter->pci_dev = pci_dev; + hw = &adapter->hw; + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + + err = ionic_init_mac(hw); + if (err != 0) { + IONIC_PRINT(ERR, "Mac init failed: %d", err); + err = -EIO; + goto err_free_adapter; + } + + adapter->is_mgmt_nic = (pci_dev->id.device_id == IONIC_DEV_ID_ETH_MGMT); + + adapter->num_bars = 0; + for (i = 0; i < PCI_MAX_RESOURCE && i < IONIC_BARS_MAX; i++) { + resource = &pci_dev->mem_resource[i]; + if (resource->phys_addr == 0 || resource->len == 0) + continue; + adapter->bars[adapter->num_bars].vaddr = resource->addr; + adapter->bars[adapter->num_bars].bus_addr = resource->phys_addr; + adapter->bars[adapter->num_bars].len = resource->len; + adapter->num_bars++; + } + + /* Discover ionic dev resources */ + + err = ionic_setup(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot setup device: %d, aborting", err); + goto err_free_adapter; + } + + err = ionic_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify device: %d, aborting", + err); + goto err_free_adapter; + } + + err = ionic_init(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot init device: %d, aborting", err); + goto err_free_adapter; + } + + /* Configure the ports */ + err = ionic_port_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify port: %d, aborting", + err); + goto err_free_adapter; + } + + err = ionic_port_init(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot init port: %d, aborting", err); + goto err_free_adapter; + } + + /* Configure LIFs */ + err = ionic_lif_identify(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot identify lif: %d, aborting", err); + goto err_free_adapter; + } + + /* Allocate and init LIFs */ + err = ionic_lifs_size(adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot size LIFs: %d, aborting", err); + goto err_free_adapter; + } + + adapter->max_mac_addrs = adapter->ident.lif.eth.max_ucast_filters; + + adapter->nlifs = 0; + for (i = 0; i < adapter->ident.dev.nlifs; i++) { + snprintf(name, sizeof(name), "net_%s_lif_%lu", + pci_dev->device.name, i); + + err = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ionic_lif), + NULL, NULL, + eth_ionic_dev_init, adapter); + if (err) { + IONIC_PRINT(ERR, "Cannot create eth device for " + "ionic lif %s", name); + break; + } + + adapter->nlifs++; + } + + err = ionic_configure_intr(adapter); + + if (err) { + IONIC_PRINT(ERR, "Failed to configure interrupts"); + goto err_free_adapter; + } + + return 0; + +err_free_adapter: + rte_free(adapter); +err: + return err; +} + +static int +eth_ionic_pci_remove(struct rte_pci_device *pci_dev __rte_unused) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct ionic_adapter *adapter = NULL; + struct rte_eth_dev *eth_dev; + struct ionic_lif *lif; + uint32_t i; + + /* Adapter lookup is using (the first) eth_dev name */ + snprintf(name, sizeof(name), "net_%s_lif_0", + pci_dev->device.name); + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev) { + lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + adapter = lif->adapter; + } + + if (adapter) { + ionic_unconfigure_intr(adapter); + + for (i = 0; i < adapter->nlifs; i++) { + lif = adapter->lifs[i]; + rte_eth_dev_destroy(lif->eth_dev, eth_ionic_dev_uninit); + } + + rte_free(adapter); + } + + return 0; +} + +static struct rte_pci_driver rte_ionic_pmd = { + .id_table = pci_id_ionic_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_ionic_pci_probe, + .remove = eth_ionic_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ionic, rte_ionic_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ionic, pci_id_ionic_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ionic, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(ionic_init_log) +{ + ionic_logtype = rte_log_register("pmd.net.ionic"); + if (ionic_logtype >= 0) + rte_log_set_level(ionic_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h new file mode 100644 index 000000000..578e2301f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_ethdev.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_ETHDEV_H_ +#define _IONIC_ETHDEV_H_ + +#define IONIC_ETH_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define IONIC_ETH_DEV_TO_LIF(eth_dev) ((struct ionic_lif *) \ + (eth_dev)->data->dev_private) +#define IONIC_ETH_DEV_TO_ADAPTER(eth_dev) \ + (IONIC_ETH_DEV_TO_LIF(eth_dev)->adapter) + +#endif /* _IONIC_ETHDEV_H_ */ + diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_if.h b/src/spdk/dpdk/drivers/net/ionic/ionic_if.h new file mode 100644 index 000000000..f83c8711b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_if.h @@ -0,0 +1,2491 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-3-Clause */ +/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef _IONIC_IF_H_ +#define _IONIC_IF_H_ + +#pragma pack(push, 1) + +#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */ +#define IONIC_DEV_INFO_VERSION 1 +#define IONIC_IFNAMSIZ 16 + +/** + * Commands + */ +enum ionic_cmd_opcode { + IONIC_CMD_NOP = 0, + + /* Device commands */ + IONIC_CMD_IDENTIFY = 1, + IONIC_CMD_INIT = 2, + IONIC_CMD_RESET = 3, + IONIC_CMD_GETATTR = 4, + IONIC_CMD_SETATTR = 5, + + /* Port commands */ + IONIC_CMD_PORT_IDENTIFY = 10, + IONIC_CMD_PORT_INIT = 11, + IONIC_CMD_PORT_RESET = 12, + IONIC_CMD_PORT_GETATTR = 13, + IONIC_CMD_PORT_SETATTR = 14, + + /* LIF commands */ + IONIC_CMD_LIF_IDENTIFY = 20, + IONIC_CMD_LIF_INIT = 21, + IONIC_CMD_LIF_RESET = 22, + IONIC_CMD_LIF_GETATTR = 23, + IONIC_CMD_LIF_SETATTR = 24, + + IONIC_CMD_RX_MODE_SET = 30, + IONIC_CMD_RX_FILTER_ADD = 31, + IONIC_CMD_RX_FILTER_DEL = 32, + + /* Queue commands */ + IONIC_CMD_Q_INIT = 40, + IONIC_CMD_Q_CONTROL = 41, + + /* RDMA commands */ + IONIC_CMD_RDMA_RESET_LIF = 50, + IONIC_CMD_RDMA_CREATE_EQ = 51, + IONIC_CMD_RDMA_CREATE_CQ = 52, + IONIC_CMD_RDMA_CREATE_ADMINQ = 53, + + /* QoS commands */ + IONIC_CMD_QOS_CLASS_IDENTIFY = 240, + IONIC_CMD_QOS_CLASS_INIT = 241, + IONIC_CMD_QOS_CLASS_RESET = 242, + + /* Firmware commands */ + IONIC_CMD_FW_DOWNLOAD = 254, + IONIC_CMD_FW_CONTROL = 255, +}; + +/** + * Command Return codes + */ +enum ionic_status_code { + IONIC_RC_SUCCESS = 0, /* Success */ + IONIC_RC_EVERSION = 1, /* Incorrect version for request */ + IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */ + IONIC_RC_EIO = 3, /* I/O error */ + IONIC_RC_EPERM = 4, /* Permission denied */ + IONIC_RC_EQID = 5, /* Bad qid */ + IONIC_RC_EQTYPE = 6, /* Bad qtype */ + IONIC_RC_ENOENT = 7, /* No such element */ + IONIC_RC_EINTR = 8, /* operation interrupted */ + IONIC_RC_EAGAIN = 9, /* Try again */ + IONIC_RC_ENOMEM = 10, /* Out of memory */ + IONIC_RC_EFAULT = 11, /* Bad address */ + IONIC_RC_EBUSY = 12, /* Device or resource busy */ + IONIC_RC_EEXIST = 13, /* object already exists */ + IONIC_RC_EINVAL = 14, /* Invalid argument */ + IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */ + IONIC_RC_ERANGE = 16, /* Parameter out of range */ + IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */ + IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ + IONIC_RC_ENOSUPP = 19, /* Operation not supported */ + IONIC_RC_ERROR = 29, /* Generic error */ + + IONIC_RC_ERDMA = 30, /* Generic RDMA error */ +}; + +enum ionic_notifyq_opcode { + IONIC_EVENT_LINK_CHANGE = 1, + IONIC_EVENT_RESET = 2, + IONIC_EVENT_HEARTBEAT = 3, + IONIC_EVENT_LOG = 4, +}; + +/** + * struct cmd - General admin command format + * @opcode: Opcode for the command + * @lif_index: LIF index + * @cmd_data: Opcode-specific command bytes + */ +struct ionic_admin_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 cmd_data[60]; +}; + +/** + * struct ionic_admin_comp - General admin command completion format + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @cmd_data: Command-specific bytes. + * @color: Color bit. (Always 0 for commands issued to the + * Device Cmd Registers.) + */ +struct ionic_admin_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 cmd_data[11]; + u8 color; +#define IONIC_COMP_COLOR_MASK 0x80 +}; + +static inline u8 color_match(u8 color, u8 done_color) +{ + return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color; +} + +/** + * struct ionic_nop_cmd - NOP command + * @opcode: opcode + */ +struct ionic_nop_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct ionic_nop_comp - NOP command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_nop_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_init_cmd - Device init command + * @opcode: opcode + * @type: device type + */ +struct ionic_dev_init_cmd { + u8 opcode; + u8 type; + u8 rsvd[62]; +}; + +/** + * struct init_comp - Device init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_dev_reset_cmd - Device reset command + * @opcode: opcode + */ +struct ionic_dev_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct reset_comp - Reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_IDENTITY_VERSION_1 1 + +/** + * struct ionic_dev_identify_cmd - Driver/device identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_dev_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct dev_identify_comp - Driver/device identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_dev_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +enum ionic_os_type { + IONIC_OS_TYPE_LINUX = 1, + IONIC_OS_TYPE_WIN = 2, + IONIC_OS_TYPE_DPDK = 3, + IONIC_OS_TYPE_FREEBSD = 4, + IONIC_OS_TYPE_IPXE = 5, + IONIC_OS_TYPE_ESXI = 6, +}; + +/** + * union drv_identity - driver identity information + * @os_type: OS type (see enum os_type) + * @os_dist: OS distribution, numeric format + * @os_dist_str: OS distribution, string format + * @kernel_ver: Kernel version, numeric format + * @kernel_ver_str: Kernel version, string format + * @driver_ver_str: Driver version, string format + */ +union ionic_drv_identity { + struct { + __le32 os_type; + __le32 os_dist; + char os_dist_str[128]; + __le32 kernel_ver; + char kernel_ver_str[32]; + char driver_ver_str[32]; + }; + __le32 words[512]; +}; + +/** + * union dev_identity - device identity information + * @version: Version of device identify + * @type: Identify type (0 for now) + * @nports: Number of ports provisioned + * @nlifs: Number of LIFs provisioned + * @nintrs: Number of interrupts provisioned + * @ndbpgs_per_lif: Number of doorbell pages per LIF + * @intr_coal_mult: Interrupt coalescing multiplication factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @intr_coal_div: Interrupt coalescing division factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * + */ +union ionic_dev_identity { + struct { + u8 version; + u8 type; + u8 rsvd[2]; + u8 nports; + u8 rsvd2[3]; + __le32 nlifs; + __le32 nintrs; + __le32 ndbpgs_per_lif; + __le32 intr_coal_mult; + __le32 intr_coal_div; + }; + __le32 words[512]; +}; + +enum ionic_lif_type { + IONIC_LIF_TYPE_CLASSIC = 0, + IONIC_LIF_TYPE_MACVLAN = 1, + IONIC_LIF_TYPE_NETQUEUE = 2, +}; + +/** + * struct ionic_lif_identify_cmd - lif identify command + * @opcode: opcode + * @type: lif type (enum lif_type) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_cmd { + u8 opcode; + u8 type; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_lif_identify_comp - lif identify command completion + * @status: status of the command (enum status_code) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_comp { + u8 status; + u8 ver; + u8 rsvd2[14]; +}; + +enum ionic_lif_capability { + IONIC_LIF_CAP_ETH = BIT(0), + IONIC_LIF_CAP_RDMA = BIT(1), +}; + +/** + * Logical Queue Types + */ +enum ionic_logical_qtype { + IONIC_QTYPE_ADMINQ = 0, + IONIC_QTYPE_NOTIFYQ = 1, + IONIC_QTYPE_RXQ = 2, + IONIC_QTYPE_TXQ = 3, + IONIC_QTYPE_EQ = 4, + IONIC_QTYPE_MAX = 16, +}; + +/** + * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue + * type. + * @qtype: Hardware Queue Type. + * @qid_count: Number of Queue IDs of the logical type. + * @qid_base: Minimum Queue ID of the logical type. + */ +struct ionic_lif_logical_qtype { + u8 qtype; + u8 rsvd[3]; + __le32 qid_count; + __le32 qid_base; +}; + +enum ionic_lif_state { + IONIC_LIF_DISABLE = 0, + IONIC_LIF_ENABLE = 1, + IONIC_LIF_HANG_RESET = 2, +}; + +/** + * LIF configuration + * @state: lif state (enum lif_state) + * @name: lif name + * @mtu: mtu + * @mac: station mac address + * @features: features (enum ionic_eth_hw_features) + * @queue_count: queue counts per queue-type + */ +union ionic_lif_config { + struct { + u8 state; + u8 rsvd[3]; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + u8 rsvd2[2]; + __le64 features; + __le32 queue_count[IONIC_QTYPE_MAX]; + }; + __le32 words[64]; +}; + +/** + * struct ionic_lif_identity - lif identity information (type-specific) + * + * @capabilities LIF capabilities + * + * Ethernet: + * @version: Ethernet identify structure version. + * @features: Ethernet features supported on this lif type. + * @max_ucast_filters: Number of perfect unicast addresses supported. + * @max_mcast_filters: Number of perfect multicast addresses supported. + * @min_frame_size: Minimum size of frames to be sent + * @max_frame_size: Maximum size of frames to be sent + * @config: LIF config struct with features, mtu, mac, q counts + * + * RDMA: + * @version: RDMA version of opcodes and queue descriptors. + * @qp_opcodes: Number of rdma queue pair opcodes supported. + * @admin_opcodes: Number of rdma admin opcodes supported. + * @npts_per_lif: Page table size per lif + * @nmrs_per_lif: Number of memory regions per lif + * @nahs_per_lif: Number of address handles per lif + * @max_stride: Max work request stride. + * @cl_stride: Cache line stride. + * @pte_stride: Page table entry stride. + * @rrq_stride: Remote RQ work request stride. + * @rsq_stride: Remote SQ work request stride. + * @dcqcn_profiles: Number of DCQCN profiles + * @aq_qtype: RDMA Admin Qtype. + * @sq_qtype: RDMA Send Qtype. + * @rq_qtype: RDMA Receive Qtype. + * @cq_qtype: RDMA Completion Qtype. + * @eq_qtype: RDMA Event Qtype. + */ +union ionic_lif_identity { + struct { + __le64 capabilities; + + struct { + u8 version; + u8 rsvd[3]; + __le32 max_ucast_filters; + __le32 max_mcast_filters; + __le16 rss_ind_tbl_sz; + __le32 min_frame_size; + __le32 max_frame_size; + u8 rsvd2[106]; + union ionic_lif_config config; + } eth; + + struct { + u8 version; + u8 qp_opcodes; + u8 admin_opcodes; + u8 rsvd; + __le32 npts_per_lif; + __le32 nmrs_per_lif; + __le32 nahs_per_lif; + u8 max_stride; + u8 cl_stride; + u8 pte_stride; + u8 rrq_stride; + u8 rsq_stride; + u8 dcqcn_profiles; + u8 rsvd_dimensions[10]; + struct ionic_lif_logical_qtype aq_qtype; + struct ionic_lif_logical_qtype sq_qtype; + struct ionic_lif_logical_qtype rq_qtype; + struct ionic_lif_logical_qtype cq_qtype; + struct ionic_lif_logical_qtype eq_qtype; + } rdma; + }; + __le32 words[512]; +}; + +/** + * struct ionic_lif_init_cmd - LIF init command + * @opcode: opcode + * @type: LIF type (enum lif_type) + * @index: LIF index + * @info_pa: destination address for lif info (struct ionic_lif_info) + */ +struct ionic_lif_init_cmd { + u8 opcode; + u8 type; + __le16 index; + __le32 rsvd; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_lif_init_comp - LIF init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_lif_init_comp { + u8 status; + u8 rsvd; + __le16 hw_index; + u8 rsvd2[12]; +}; + +/** + * struct ionic_q_init_cmd - Queue init command + * @opcode: opcode + * @type: Logical queue type + * @ver: Queue version (defines opcode/descriptor scope) + * @lif_index: LIF index + * @index: (lif, qtype) relative admin queue index + * @intr_index: Interrupt control register index + * @pid: Process ID + * @flags: + * IRQ: Interrupt requested on completion + * ENA: Enable the queue. If ENA=0 the queue is initialized + * but remains disabled, to be later enabled with the + * Queue Enable command. If ENA=1, then queue is + * initialized and then enabled. + * SG: Enable Scatter-Gather on the queue. + * in number of descs. The actual ring size is + * (1 << ring_size). For example, to + * select a ring size of 64 descriptors write + * ring_size = 6. The minimum ring_size value is 2 + * for a ring size of 4 descriptors. The maximum + * ring_size value is 16 for a ring size of 64k + * descriptors. Values of ring_size <2 and >16 are + * reserved. + * EQ: Enable the Event Queue + * @cos: Class of service for this queue. + * @ring_size: Queue ring size, encoded as a log2(size) + * @ring_base: Queue ring base address + * @cq_ring_base: Completion queue ring base address + * @sg_ring_base: Scatter/Gather ring base address + * @eq_index: Event queue index + */ +struct ionic_q_init_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 type; + u8 ver; + u8 rsvd1[2]; + __le32 index; + __le16 pid; + __le16 intr_index; + __le16 flags; +#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */ +#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */ +#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */ +#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */ +#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */ + u8 cos; + u8 ring_size; + __le64 ring_base; + __le64 cq_ring_base; + __le64 sg_ring_base; + __le32 eq_index; + u8 rsvd2[16]; +}; + +/** + * struct ionic_q_init_comp - Queue init command completion + * @status: The status of the command (enum status_code) + * @ver: Queue version (defines opcode/descriptor scope) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @hw_index: Hardware Queue ID + * @hw_type: Hardware Queue type + * @color: Color + */ +struct ionic_q_init_comp { + u8 status; + u8 ver; + __le16 comp_index; + __le32 hw_index; + u8 hw_type; + u8 rsvd2[6]; + u8 color; +}; + +/* the device's internal addressing uses up to 52 bits */ +#define IONIC_ADDR_LEN 52 +#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) + +enum ionic_txq_desc_opcode { + IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0, + IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1, + IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2, + IONIC_TXQ_DESC_OPCODE_TSO = 3, +}; + +/** + * struct ionic_txq_desc - Ethernet Tx queue descriptor format + * @opcode: Tx operation, see TXQ_DESC_OPCODE_*: + * + * IONIC_TXQ_DESC_OPCODE_CSUM_NONE: + * + * Non-offload send. No segmentation, + * fragmentation or checksum calc/insertion is + * performed by device; packet is prepared + * to send by software stack and requires + * no further manipulation from device. + * + * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL: + * + * Offload 16-bit L4 checksum + * calculation/insertion. The device will + * calculate the L4 checksum value and + * insert the result in the packet's L4 + * header checksum field. The L4 checksum + * is calculated starting at @csum_start bytes + * into the packet to the end of the packet. + * The checksum insertion position is given + * in @csum_offset. This feature is only + * applicable to protocols such as TCP, UDP + * and ICMP where a standard (i.e. the + * 'IP-style' checksum) one's complement + * 16-bit checksum is used, using an IP + * pseudo-header to seed the calculation. + * Software will preload the L4 checksum + * field with the IP pseudo-header checksum. + * + * For tunnel encapsulation, @csum_start and + * @csum_offset refer to the inner L4 + * header. Supported tunnels encapsulations + * are: IPIP, GRE, and UDP. If the @encap + * is clear, no further processing by the + * device is required; software will + * calculate the outer header checksums. If + * the @encap is set, the device will + * offload the outer header checksums using + * LCO (local checksum offload) (see + * Documentation/networking/checksum- + * offloads.txt for more info). + * + * IONIC_TXQ_DESC_OPCODE_CSUM_HW: + * + * Offload 16-bit checksum computation to hardware. + * If @csum_l3 is set then the packet's L3 checksum is + * updated. Similarly, if @csum_l4 is set the the L4 + * checksum is updated. If @encap is set then encap header + * checksums are also updated. + * + * IONIC_TXQ_DESC_OPCODE_TSO: + * + * Device performs TCP segmentation offload + * (TSO). @hdr_len is the number of bytes + * to the end of TCP header (the offset to + * the TCP payload). @mss is the desired + * MSS, the TCP payload length for each + * segment. The device will calculate/ + * insert IP (IPv4 only) and TCP checksums + * for each segment. In the first data + * buffer containing the header template, + * the driver will set IPv4 checksum to 0 + * and preload TCP checksum with the IP + * pseudo header calculated with IP length = 0. + * + * Supported tunnel encapsulations are IPIP, + * layer-3 GRE, and UDP. @hdr_len includes + * both outer and inner headers. The driver + * will set IPv4 checksum to zero and + * preload TCP checksum with IP pseudo + * header on the inner header. + * + * TCP ECN offload is supported. The device + * will set CWR flag in the first segment if + * CWR is set in the template header, and + * clear CWR in remaining segments. + * @flags: + * vlan: + * Insert an L2 VLAN header using @vlan_tci. + * encap: + * Calculate encap header checksum. + * csum_l3: + * Compute L3 header checksum. + * csum_l4: + * Compute L4 header checksum. + * tso_sot: + * TSO start + * tso_eot: + * TSO end + * @num_sg_elems: Number of scatter-gather elements in SG + * descriptor + * @addr: First data buffer's DMA address. + * (Subsequent data buffers are on txq_sg_desc). + * @len: First data buffer's length, in bytes + * @vlan_tci: VLAN tag to insert in the packet (if requested + * by @V-bit). Includes .1p and .1q tags + * @hdr_len: Length of packet headers, including + * encapsulating outer header, if applicable. + * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and + * TXQ_DESC_OPCODE_TSO. Should be set to zero for + * all other modes. For + * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length + * of headers up to inner-most L4 header. For + * TXQ_DESC_OPCODE_TSO, @hdr_len is up to + * inner-most L4 payload, so inclusive of + * inner-most L4 header. + * @mss: Desired MSS value for TSO. Only applicable for + * TXQ_DESC_OPCODE_TSO. + * @csum_start: Offset into inner-most L3 header of checksum + * @csum_offset: Offset into inner-most L4 header of checksum + */ + +#define IONIC_TXQ_DESC_OPCODE_MASK 0xf +#define IONIC_TXQ_DESC_OPCODE_SHIFT 4 +#define IONIC_TXQ_DESC_FLAGS_MASK 0xf +#define IONIC_TXQ_DESC_FLAGS_SHIFT 0 +#define IONIC_TXQ_DESC_NSGE_MASK 0xf +#define IONIC_TXQ_DESC_NSGE_SHIFT 8 +#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) +#define IONIC_TXQ_DESC_ADDR_SHIFT 12 + +/* common flags */ +#define IONIC_TXQ_DESC_FLAG_VLAN 0x1 +#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2 + +/* flags for csum_hw opcode */ +#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4 +#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8 + +/* flags for tso opcode */ +#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4 +#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8 + +struct ionic_txq_desc { + __le64 cmd; + __le16 len; + union { + __le16 vlan_tci; + __le16 hword0; + }; + union { + __le16 csum_start; + __le16 hdr_len; + __le16 hword1; + }; + union { + __le16 csum_offset; + __le16 mss; + __le16 hword2; + }; +}; + +static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags, + u8 nsge, u64 addr) +{ + u64 cmd; + + cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << + IONIC_TXQ_DESC_OPCODE_SHIFT; + cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << + IONIC_TXQ_DESC_FLAGS_SHIFT; + cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT; + cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT; + + return cmd; +}; + +static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, + u8 *nsge, u64 *addr) +{ + *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & + IONIC_TXQ_DESC_OPCODE_MASK; + *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & + IONIC_TXQ_DESC_FLAGS_MASK; + *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK; + *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK; +}; + +#define IONIC_TX_MAX_SG_ELEMS 8 +#define IONIC_RX_MAX_SG_ELEMS 8 + +/** + * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_txq_sg_desc { + struct ionic_txq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_TX_MAX_SG_ELEMS]; +}; + +/** + * struct ionic_txq_comp - Ethernet transmit queue completion descriptor + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @color: Color bit. + */ +struct ionic_txq_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 rsvd2[11]; + u8 color; +}; + +enum ionic_rxq_desc_opcode { + IONIC_RXQ_DESC_OPCODE_SIMPLE = 0, + IONIC_RXQ_DESC_OPCODE_SG = 1, +}; + +/** + * struct ionic_rxq_desc - Ethernet Rx queue descriptor format + * @opcode: Rx operation, see RXQ_DESC_OPCODE_*: + * + * RXQ_DESC_OPCODE_SIMPLE: + * + * Receive full packet into data buffer + * starting at @addr. Results of + * receive, including actual bytes received, + * are recorded in Rx completion descriptor. + * + * @len: Data buffer's length, in bytes. + * @addr: Data buffer's DMA address + */ +struct ionic_rxq_desc { + u8 opcode; + u8 rsvd[5]; + __le16 len; + __le64 addr; +}; + +/** + * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_rxq_sg_desc { + struct ionic_rxq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_RX_MAX_SG_ELEMS]; +}; + +/** + * struct ionic_rxq_comp - Ethernet receive queue completion descriptor + * @status: The status of the command (enum status_code) + * @num_sg_elems: Number of SG elements used by this descriptor + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @rss_hash: 32-bit RSS hash + * @csum: 16-bit sum of the packet's L2 payload. + * If the packet's L2 payload is odd length, an extra + * zero-value byte is included in the @csum calculation but + * not included in @len. + * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is + * set. Includes .1p and .1q tags. + * @len: Received packet length, in bytes. Excludes FCS. + * @csum_calc L2 payload checksum is computed or not + * @csum_tcp_ok: The TCP checksum calculated by the device + * matched the checksum in the receive packet's + * TCP header + * @csum_tcp_bad: The TCP checksum calculated by the device did + * not match the checksum in the receive packet's + * TCP header. + * @csum_udp_ok: The UDP checksum calculated by the device + * matched the checksum in the receive packet's + * UDP header + * @csum_udp_bad: The UDP checksum calculated by the device did + * not match the checksum in the receive packet's + * UDP header. + * @csum_ip_ok: The IPv4 checksum calculated by the device + * matched the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for the both IPv4 headers. + * @csum_ip_bad: The IPv4 checksum calculated by the device did + * not match the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for both IP headers. + * @VLAN: VLAN header was stripped and placed in @vlan_tci. + * @pkt_type: Packet type + * @color: Color bit. + */ +struct ionic_rxq_comp { + u8 status; + u8 num_sg_elems; + __le16 comp_index; + __le32 rss_hash; + __le16 csum; + __le16 vlan_tci; + __le16 len; + u8 csum_flags; +#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01 +#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02 +#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04 +#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08 +#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10 +#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20 +#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 +#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 + u8 pkt_type_color; +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f +}; + +enum ionic_pkt_type { + IONIC_PKT_TYPE_NON_IP = 0x000, + IONIC_PKT_TYPE_IPV4 = 0x001, + IONIC_PKT_TYPE_IPV4_TCP = 0x003, + IONIC_PKT_TYPE_IPV4_UDP = 0x005, + IONIC_PKT_TYPE_IPV6 = 0x008, + IONIC_PKT_TYPE_IPV6_TCP = 0x018, + IONIC_PKT_TYPE_IPV6_UDP = 0x028, +}; + +enum ionic_eth_hw_features { + IONIC_ETH_HW_VLAN_TX_TAG = BIT(0), + IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1), + IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2), + IONIC_ETH_HW_RX_HASH = BIT(3), + IONIC_ETH_HW_RX_CSUM = BIT(4), + IONIC_ETH_HW_TX_SG = BIT(5), + IONIC_ETH_HW_RX_SG = BIT(6), + IONIC_ETH_HW_TX_CSUM = BIT(7), + IONIC_ETH_HW_TSO = BIT(8), + IONIC_ETH_HW_TSO_IPV6 = BIT(9), + IONIC_ETH_HW_TSO_ECN = BIT(10), + IONIC_ETH_HW_TSO_GRE = BIT(11), + IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12), + IONIC_ETH_HW_TSO_IPXIP4 = BIT(13), + IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), + IONIC_ETH_HW_TSO_UDP = BIT(15), + IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), +}; + +/** + * struct ionic_q_control_cmd - Queue control command + * @opcode: opcode + * @type: Queue type + * @lif_index: LIF index + * @index: Queue index + * @oper: Operation (enum q_control_oper) + */ +struct ionic_q_control_cmd { + u8 opcode; + u8 type; + __le16 lif_index; + __le32 index; + u8 oper; + u8 rsvd[55]; +}; + +typedef struct ionic_admin_comp ionic_q_control_comp; + +enum q_control_oper { + IONIC_Q_DISABLE = 0, + IONIC_Q_ENABLE = 1, + IONIC_Q_HANG_RESET = 2, +}; + +/** + * Physical connection type + */ +enum ionic_phy_type { + IONIC_PHY_TYPE_NONE = 0, + IONIC_PHY_TYPE_COPPER = 1, + IONIC_PHY_TYPE_FIBER = 2, +}; + +/** + * Transceiver status + */ +enum ionic_xcvr_state { + IONIC_XCVR_STATE_REMOVED = 0, + IONIC_XCVR_STATE_INSERTED = 1, + IONIC_XCVR_STATE_PENDING = 2, + IONIC_XCVR_STATE_SPROM_READ = 3, + IONIC_XCVR_STATE_SPROM_READ_ERR = 4, +}; + +/** + * Supported link modes + */ +enum ionic_xcvr_pid { + IONIC_XCVR_PID_UNKNOWN = 0, + + /* CU */ + IONIC_XCVR_PID_QSFP_100G_CR4 = 1, + IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2, + IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, + IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, + IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, + + /* Fiber */ + IONIC_XCVR_PID_QSFP_100G_AOC = 50, + IONIC_XCVR_PID_QSFP_100G_ACC = 51, + IONIC_XCVR_PID_QSFP_100G_SR4 = 52, + IONIC_XCVR_PID_QSFP_100G_LR4 = 53, + IONIC_XCVR_PID_QSFP_100G_ER4 = 54, + IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55, + IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56, + IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57, + IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58, + IONIC_XCVR_PID_SFP_25GBASE_SR = 59, + IONIC_XCVR_PID_SFP_25GBASE_LR = 60, + IONIC_XCVR_PID_SFP_25GBASE_ER = 61, + IONIC_XCVR_PID_SFP_25GBASE_AOC = 62, + IONIC_XCVR_PID_SFP_10GBASE_SR = 63, + IONIC_XCVR_PID_SFP_10GBASE_LR = 64, + IONIC_XCVR_PID_SFP_10GBASE_LRM = 65, + IONIC_XCVR_PID_SFP_10GBASE_ER = 66, + IONIC_XCVR_PID_SFP_10GBASE_AOC = 67, + IONIC_XCVR_PID_SFP_10GBASE_CU = 68, + IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, + IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, +}; + +/** + * Port types + */ +enum ionic_port_type { + IONIC_PORT_TYPE_NONE = 0, /* port type not configured */ + IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */ + IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */ +}; + +/** + * Port config state + */ +enum ionic_port_admin_state { + IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */ + IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */ + IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */ +}; + +/** + * Port operational status + */ +enum ionic_port_oper_status { + IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */ + IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */ + IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */ +}; + +/** + * Ethernet Forward error correction (fec) modes + */ +enum ionic_port_fec_type { + IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */ + IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */ + IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */ +}; + +/** + * Ethernet pause (flow control) modes + */ +enum ionic_port_pause_type { + IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */ + IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */ + IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */ +}; + +/** + * Loopback modes + */ +enum ionic_port_loopback_mode { + IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */ + IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */ + IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */ +}; + +/** + * Transceiver Status information + * @state: Transceiver status (enum ionic_xcvr_state) + * @phy: Physical connection type (enum ionic_phy_type) + * @pid: Transceiver link mode (enum pid) + * @sprom: Transceiver sprom contents + */ +struct ionic_xcvr_status { + u8 state; + u8 phy; + __le16 pid; + u8 sprom[256]; +}; + +/** + * Port configuration + * @speed: port speed (in Mbps) + * @mtu: mtu + * @state: port admin state (enum port_admin_state) + * @an_enable: autoneg enable + * @fec_type: fec type (enum ionic_port_fec_type) + * @pause_type: pause type (enum ionic_port_pause_type) + * @loopback_mode: loopback mode (enum ionic_port_loopback_mode) + */ +union ionic_port_config { + struct { +#define IONIC_SPEED_100G 100000 /* 100G in Mbps */ +#define IONIC_SPEED_50G 50000 /* 50G in Mbps */ +#define IONIC_SPEED_40G 40000 /* 40G in Mbps */ +#define IONIC_SPEED_25G 25000 /* 25G in Mbps */ +#define IONIC_SPEED_10G 10000 /* 10G in Mbps */ +#define IONIC_SPEED_1G 1000 /* 1G in Mbps */ + __le32 speed; + __le32 mtu; + u8 state; + u8 an_enable; + u8 fec_type; +#define IONIC_PAUSE_TYPE_MASK 0x0f +#define IONIC_PAUSE_FLAGS_MASK 0xf0 +#define IONIC_PAUSE_F_TX 0x10 +#define IONIC_PAUSE_F_RX 0x20 + u8 pause_type; + u8 loopback_mode; + }; + __le32 words[64]; +}; + +/** + * Port Status information + * @status: link status (enum ionic_port_oper_status) + * @id: port id + * @speed: link speed (in Mbps) + * @xcvr: transceiver status + */ +struct ionic_port_status { + __le32 id; + __le32 speed; + u8 status; + u8 rsvd[51]; + struct ionic_xcvr_status xcvr; +}; + +/** + * struct ionic_port_identify_cmd - Port identify command + * @opcode: opcode + * @index: port index + * @ver: Highest version of identify supported by driver + */ +struct ionic_port_identify_cmd { + u8 opcode; + u8 index; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_identify_comp - Port identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_port_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +/** + * struct ionic_port_init_cmd - Port initialization command + * @opcode: opcode + * @index: port index + * @info_pa: destination address for port info (struct ionic_port_info) + */ +struct ionic_port_init_cmd { + u8 opcode; + u8 index; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct ionic_port_init_comp - Port initialization command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct ionic_port_reset_cmd - Port reset command + * @opcode: opcode + * @index: port index + */ +struct ionic_port_reset_cmd { + u8 opcode; + u8 index; + u8 rsvd[62]; +}; + +/** + * struct ionic_port_reset_comp - Port reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * enum stats_ctl_cmd - List of commands for stats control + */ +enum ionic_stats_ctl_cmd { + IONIC_STATS_CTL_RESET = 0, +}; + + +/** + * enum ionic_port_attr - List of device attributes + */ +enum ionic_port_attr { + IONIC_PORT_ATTR_STATE = 0, + IONIC_PORT_ATTR_SPEED = 1, + IONIC_PORT_ATTR_MTU = 2, + IONIC_PORT_ATTR_AUTONEG = 3, + IONIC_PORT_ATTR_FEC = 4, + IONIC_PORT_ATTR_PAUSE = 5, + IONIC_PORT_ATTR_LOOPBACK = 6, + IONIC_PORT_ATTR_STATS_CTRL = 7, +}; + +/** + * struct ionic_port_setattr_cmd - Set port attributes on the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_setattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 stats_ctl; + u8 rsvd2[60]; + }; +}; + +/** + * struct ionic_port_setattr_comp - Port set attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +/** + * struct ionic_port_getattr_cmd - Get port attributes from the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_getattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd[61]; +}; + +/** + * struct ionic_port_getattr_comp - Port get attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_lif_status - Lif status register + * @eid: most recent NotifyQ event id + * @port_num: port the lif is connected to + * @link_status: port status (enum ionic_port_oper_status) + * @link_speed: speed of link in Mbps + * @link_down_count: number of times link status changes + */ +struct ionic_lif_status { + __le64 eid; + u8 port_num; + u8 rsvd; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */ + __le16 link_down_count; + u8 rsvd2[46]; +}; + +/** + * struct ionic_lif_reset_cmd - LIF reset command + * @opcode: opcode + * @index: LIF index + */ +struct ionic_lif_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 index; + __le32 rsvd2[15]; +}; + +typedef struct ionic_admin_comp ionic_lif_reset_comp; + +enum ionic_dev_state { + IONIC_DEV_DISABLE = 0, + IONIC_DEV_ENABLE = 1, + IONIC_DEV_HANG_RESET = 2, +}; + +/** + * enum ionic_dev_attr - List of device attributes + */ +enum ionic_dev_attr { + IONIC_DEV_ATTR_STATE = 0, + IONIC_DEV_ATTR_NAME = 1, + IONIC_DEV_ATTR_FEATURES = 2, +}; + +/** + * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_dev_attr) + * @state: Device state (enum ionic_dev_state) + * @name: The bus info, e.g. PCI slot-device-function, 0 terminated + * @features: Device features + */ +struct ionic_dev_setattr_cmd { + u8 opcode; + u8 attr; + __le16 rsvd; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le64 features; + u8 rsvd2[60]; + }; +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_setattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC + * @opcode: opcode + * @attr: Attribute type (enum ionic_dev_attr) + */ +struct ionic_dev_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +/** + * struct ionic_dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * RSS parameters + */ +#define IONIC_RSS_HASH_KEY_SIZE 40 + +enum ionic_rss_hash_types { + IONIC_RSS_TYPE_IPV4 = BIT(0), + IONIC_RSS_TYPE_IPV4_TCP = BIT(1), + IONIC_RSS_TYPE_IPV4_UDP = BIT(2), + IONIC_RSS_TYPE_IPV6 = BIT(3), + IONIC_RSS_TYPE_IPV6_TCP = BIT(4), + IONIC_RSS_TYPE_IPV6_UDP = BIT(5), +}; + +/** + * enum ionic_lif_attr - List of LIF attributes + */ +enum ionic_lif_attr { + IONIC_LIF_ATTR_STATE = 0, + IONIC_LIF_ATTR_NAME = 1, + IONIC_LIF_ATTR_MTU = 2, + IONIC_LIF_ATTR_MAC = 3, + IONIC_LIF_ATTR_FEATURES = 4, + IONIC_LIF_ATTR_RSS = 5, + IONIC_LIF_ATTR_STATS_CTRL = 6, +}; + +/** + * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC + * @opcode: Opcode + * @type: Attribute type (enum ionic_lif_attr) + * @index: LIF index + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @rss: RSS properties + * @types: The hash types to enable (see rss_hash_types). + * @key: The hash secret key. + * @addr: Address for the indirection table shared memory. + * @stats_ctl: stats control commands (enum stats_ctl_cmd) + */ +struct ionic_lif_setattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le64 features; + struct { + __le16 types; + u8 key[IONIC_RSS_HASH_KEY_SIZE]; + u8 rsvd[6]; + __le64 addr; + } rss; + u8 stats_ctl; + u8 rsvd[60]; + }; +}; + +/** + * struct ionic_lif_setattr_comp - LIF set attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @features: features (enum ionic_eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_setattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum ionic_lif_attr) + * @index: LIF index + */ +struct ionic_lif_getattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + u8 rsvd[60]; +}; + +/** + * struct ionic_lif_getattr_comp - LIF get attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum ionic_eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_getattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + u8 state; + __le32 mtu; + u8 mac[6]; + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +enum ionic_rx_mode { + IONIC_RX_MODE_F_UNICAST = BIT(0), + IONIC_RX_MODE_F_MULTICAST = BIT(1), + IONIC_RX_MODE_F_BROADCAST = BIT(2), + IONIC_RX_MODE_F_PROMISC = BIT(3), + IONIC_RX_MODE_F_ALLMULTI = BIT(4), +}; + +/** + * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command + * @opcode: opcode + * @lif_index: LIF index + * @rx_mode: Rx mode flags: + * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets. + * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets. + * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets. + * IONIC_RX_MODE_F_PROMISC: Accept any packets. + * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets. + */ +struct ionic_rx_mode_set_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le16 rx_mode; + __le16 rsvd2[29]; +}; + +typedef struct ionic_admin_comp ionic_rx_mode_set_comp; + +enum ionic_rx_filter_match_type { + IONIC_RX_FILTER_MATCH_VLAN = 0, + IONIC_RX_FILTER_MATCH_MAC, + IONIC_RX_FILTER_MATCH_MAC_VLAN, +}; + +/** + * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command + * @opcode: opcode + * @qtype: Queue type + * @lif_index: LIF index + * @qid: Queue ID + * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx) + * @vlan: VLAN ID + * @addr: MAC address (network-byte order) + */ +struct ionic_rx_filter_add_cmd { + u8 opcode; + u8 qtype; + __le16 lif_index; + __le32 qid; + __le16 match; + union { + struct { + __le16 vlan; + } vlan; + struct { + u8 addr[6]; + } mac; + struct { + __le16 vlan; + u8 addr[6]; + } mac_vlan; + u8 rsvd[54]; + }; +}; + +/** + * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @filter_id: Filter ID + * @color: Color bit. + */ +struct ionic_rx_filter_add_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 filter_id; + u8 rsvd2[7]; + u8 color; +}; + +/** + * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command + * @opcode: opcode + * @lif_index: LIF index + * @filter_id: Filter ID + */ +struct ionic_rx_filter_del_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 filter_id; + u8 rsvd2[56]; +}; + +typedef struct ionic_admin_comp ionic_rx_filter_del_comp; + +/** + * struct ionic_qos_identify_cmd - QoS identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + * + */ +struct ionic_qos_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct ionic_qos_identify_comp - QoS identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_qos_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +#define IONIC_QOS_CLASS_MAX 7 +#define IONIC_QOS_CLASS_NAME_SZ 32 +#define IONIC_QOS_DSCP_MAX_VALUES 64 + +/** + * enum ionic_qos_class + */ +enum ionic_qos_class { + IONIC_QOS_CLASS_DEFAULT = 0, + IONIC_QOS_CLASS_USER_DEFINED_1 = 1, + IONIC_QOS_CLASS_USER_DEFINED_2 = 2, + IONIC_QOS_CLASS_USER_DEFINED_3 = 3, + IONIC_QOS_CLASS_USER_DEFINED_4 = 4, + IONIC_QOS_CLASS_USER_DEFINED_5 = 5, + IONIC_QOS_CLASS_USER_DEFINED_6 = 6, +}; + +/** + * enum ionic_qos_class_type - Traffic classification criteria + */ +enum ionic_qos_class_type { + IONIC_QOS_CLASS_TYPE_NONE = 0, + IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */ + IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */ +}; + +/** + * enum ionic_qos_sched_type - Qos class scheduling type + */ +enum ionic_qos_sched_type { + /* Strict priority */ + IONIC_QOS_SCHED_TYPE_STRICT = 0, + /* Deficit weighted round-robin */ + IONIC_QOS_SCHED_TYPE_DWRR = 1, +}; + +/** + * union ionic_qos_config - Qos configuration structure + * @flags: Configuration flags + * IONIC_QOS_CONFIG_F_ENABLE enable + * IONIC_QOS_CONFIG_F_DROP drop/nodrop + * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite + * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type) + * @class_type: Qos class type (enum ionic_qos_class_type) + * @pause_type: Qos pause type (enum qos_pause_type) + * @name: Qos class name + * @mtu: MTU of the class + * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP) + * @dwrr_weight: Qos class scheduling weight + * @strict_rlmt: Rate limit for strict priority scheduling + * @rw_dot1q_pcp: Rewrite dot1q pcp to this value + * (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to this value + * (valid iff F_RW_IP_DSCP) + * @dot1q_pcp: Dot1q pcp value + * @ndscp: Number of valid dscp values in the ip_dscp field + * @ip_dscp: IP dscp values + */ +union ionic_qos_config { + struct { +#define IONIC_QOS_CONFIG_F_ENABLE BIT(0) +#define IONIC_QOS_CONFIG_F_DROP BIT(1) +#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) +#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) + u8 flags; + u8 sched_type; + u8 class_type; + u8 pause_type; + char name[IONIC_QOS_CLASS_NAME_SZ]; + __le32 mtu; + /* flow control */ + u8 pfc_cos; + /* scheduler */ + union { + u8 dwrr_weight; + __le64 strict_rlmt; + }; + /* marking */ + union { + u8 rw_dot1q_pcp; + u8 rw_ip_dscp; + }; + /* classification */ + union { + u8 dot1q_pcp; + struct { + u8 ndscp; + u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES]; + }; + }; + }; + __le32 words[64]; +}; + +/** + * union ionic_qos_identity - QoS identity structure + * @version: Version of the identify structure + * @type: QoS system type + * @nclasses: Number of usable QoS classes + * @config: Current configuration of classes + */ +union ionic_qos_identity { + struct { + u8 version; + u8 type; + u8 rsvd[62]; + union ionic_qos_config config[IONIC_QOS_CLASS_MAX]; + }; + __le32 words[512]; +}; + +/** + * struct qos_init_cmd - QoS config init command + * @opcode: Opcode + * @group: Qos class id + * @info_pa: destination address for qos info + */ +struct ionic_qos_init_cmd { + u8 opcode; + u8 group; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd1[48]; +}; + +typedef struct ionic_admin_comp ionic_qos_init_comp; + +/** + * struct ionic_qos_reset_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_reset_cmd { + u8 opcode; + u8 group; + u8 rsvd[62]; +}; + +typedef struct ionic_admin_comp ionic_qos_reset_comp; + +/** + * struct ionic_fw_download_cmd - Firmware download command + * @opcode: opcode + * @addr: dma address of the firmware buffer + * @offset: offset of the firmware buffer within the full image + * @length: number of valid bytes in the firmware buffer + */ +struct ionic_fw_download_cmd { + u8 opcode; + u8 rsvd[3]; + __le32 offset; + __le64 addr; + __le32 length; +}; + +typedef struct ionic_admin_comp ionic_fw_download_comp; + +enum ionic_fw_control_oper { + IONIC_FW_RESET = 0, /* Reset firmware */ + IONIC_FW_INSTALL = 1, /* Install firmware */ + IONIC_FW_ACTIVATE = 2, /* Activate firmware */ +}; + +/** + * struct ionic_fw_control_cmd - Firmware control command + * @opcode: opcode + * @oper: firmware control operation (enum ionic_fw_control_oper) + * @slot: slot to activate + */ +struct ionic_fw_control_cmd { + u8 opcode; + u8 rsvd[3]; + u8 oper; + u8 slot; + u8 rsvd1[58]; +}; + +/** + * struct ionic_fw_control_comp - Firmware control copletion + * @opcode: opcode + * @slot: slot where the firmware was installed + */ +struct ionic_fw_control_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 slot; + u8 rsvd1[10]; + u8 color; +}; + +/****************************************************************** + ******************* RDMA Commands ******************************** + ******************************************************************/ + +/** + * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd + * @opcode: opcode + * @lif_index: lif index + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + * Nonzero status means the LIF does not support rdma. + **/ +struct ionic_rdma_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 rsvd2[60]; +}; + +/** + * struct ionic_rdma_queue_cmd - Create RDMA Queue command + * @opcode: opcode, 52, 53 + * @lif_index lif index + * @qid_ver: (qid | (rdma version << 24)) + * @cid: intr, eq_id, or cq_id + * @dbid: doorbell page id + * @depth_log2: log base two of queue depth + * @stride_log2: log base two of queue stride + * @dma_addr: address of the queue memory + * @xxx_table_index: temporary, but should not need pgtbl for contig. queues. + * + * The same command struct is used to create an rdma event queue, completion + * queue, or rdma admin queue. The cid is an interrupt number for an event + * queue, an event queue id for a completion queue, or a completion queue id + * for an rdma admin queue. + * + * The queue created via a dev command must be contiguous in dma space. + * + * The dev commands are intended only to be used during driver initialization, + * to create queues supporting the rdma admin queue. Other queues, and other + * types of rdma resources like memory regions, will be created and registered + * via the rdma admin queue, and will support a more complete interface + * providing scatter gather lists for larger, scattered queue buffers and + * memory registration. + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct ionic_admin_comp. Only the status is indicated. + **/ +struct ionic_rdma_queue_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 qid_ver; + __le32 cid; + __le16 dbid; + u8 depth_log2; + u8 stride_log2; + __le64 dma_addr; + u8 rsvd2[36]; + __le32 xxx_table_index; +}; + +/****************************************************************** + ******************* Notify Events ******************************** + ******************************************************************/ + +/** + * struct ionic_notifyq_event + * @eid: event number + * @ecode: event code + * @data: unspecified data about the event + * + * This is the generic event report struct from which the other + * actual events will be formed. + */ +struct ionic_notifyq_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_link_change_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LINK_CHANGE + * @link_status: link up or down, with error bits (enum port_status) + * @link_speed: speed of the network link + * + * Sent when the network link state changes between UP and DOWN + */ +struct ionic_link_change_event { + __le64 eid; + __le16 ecode; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */ + u8 rsvd[48]; +}; + +/** + * struct ionic_reset_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_RESET + * @reset_code: reset type + * @state: 0=pending, 1=complete, 2=error + * + * Sent when the NIC or some subsystem is going to be or + * has been reset. + */ +struct ionic_reset_event { + __le64 eid; + __le16 ecode; + u8 reset_code; + u8 state; + u8 rsvd[52]; +}; + +/** + * struct ionic_heartbeat_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_HEARTBEAT + * + * Sent periodically by the NIC to indicate continued health + */ +struct ionic_heartbeat_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct ionic_log_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LOG + * @data: log data + * + * Sent to notify the driver of an internal error. + */ +struct ionic_log_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct ionic_port_stats + */ +struct ionic_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_pripause; + __le64 frames_rx_stomped_crc; + __le64 frames_rx_too_long; + __le64 frames_rx_vlan_good; + __le64 frames_rx_dropped; + __le64 frames_rx_less_than_64b; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_1519b_2047b; + __le64 frames_rx_2048b_4095b; + __le64 frames_rx_4096b_8191b; + __le64 frames_rx_8192b_9215b; + __le64 frames_rx_other; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; + __le64 frames_tx_pripause; + __le64 frames_tx_vlan; + __le64 frames_tx_less_than_64b; + __le64 frames_tx_64b; + __le64 frames_tx_65b_127b; + __le64 frames_tx_128b_255b; + __le64 frames_tx_256b_511b; + __le64 frames_tx_512b_1023b; + __le64 frames_tx_1024b_1518b; + __le64 frames_tx_1519b_2047b; + __le64 frames_tx_2048b_4095b; + __le64 frames_tx_4096b_8191b; + __le64 frames_tx_8192b_9215b; + __le64 frames_tx_other; + __le64 frames_tx_pri_0; + __le64 frames_tx_pri_1; + __le64 frames_tx_pri_2; + __le64 frames_tx_pri_3; + __le64 frames_tx_pri_4; + __le64 frames_tx_pri_5; + __le64 frames_tx_pri_6; + __le64 frames_tx_pri_7; + __le64 frames_rx_pri_0; + __le64 frames_rx_pri_1; + __le64 frames_rx_pri_2; + __le64 frames_rx_pri_3; + __le64 frames_rx_pri_4; + __le64 frames_rx_pri_5; + __le64 frames_rx_pri_6; + __le64 frames_rx_pri_7; + __le64 tx_pripause_0_1us_count; + __le64 tx_pripause_1_1us_count; + __le64 tx_pripause_2_1us_count; + __le64 tx_pripause_3_1us_count; + __le64 tx_pripause_4_1us_count; + __le64 tx_pripause_5_1us_count; + __le64 tx_pripause_6_1us_count; + __le64 tx_pripause_7_1us_count; + __le64 rx_pripause_0_1us_count; + __le64 rx_pripause_1_1us_count; + __le64 rx_pripause_2_1us_count; + __le64 rx_pripause_3_1us_count; + __le64 rx_pripause_4_1us_count; + __le64 rx_pripause_5_1us_count; + __le64 rx_pripause_6_1us_count; + __le64 rx_pripause_7_1us_count; + __le64 rx_pause_1us_count; + __le64 frames_tx_truncated; +}; + +struct ionic_mgmt_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length0; + __le64 frames_rx_undersized1; + __le64 frames_rx_oversized2; + __le64 frames_rx_fragments3; + __le64 frames_rx_jabber4; + __le64 frames_rx_64b5; + __le64 frames_rx_65b_127b6; + __le64 frames_rx_128b_255b7; + __le64 frames_rx_256b_511b8; + __le64 frames_rx_512b_1023b9; + __le64 frames_rx_1024b_1518b0; + __le64 frames_rx_gt_1518b1; + __le64 frames_rx_fifo_full2; + __le64 frames_tx_ok3; + __le64 frames_tx_all4; + __le64 frames_tx_bad5; + __le64 octets_tx_ok6; + __le64 octets_tx_total7; + __le64 frames_tx_unicast8; + __le64 frames_tx_multicast9; + __le64 frames_tx_broadcast0; + __le64 frames_tx_pause1; +}; + +/** + * struct ionic_port_identity - port identity structure + * @version: identity structure version + * @type: type of port (enum port_type) + * @num_lanes: number of lanes for the port + * @autoneg: autoneg supported + * @min_frame_size: minimum frame size supported + * @max_frame_size: maximum frame size supported + * @fec_type: supported fec types + * @pause_type: supported pause types + * @loopback_mode: supported loopback mode + * @speeds: supported speeds + * @config: current port configuration + */ +union ionic_port_identity { + struct { + u8 version; + u8 type; + u8 num_lanes; + u8 autoneg; + __le32 min_frame_size; + __le32 max_frame_size; + u8 fec_type[4]; + u8 pause_type[2]; + u8 loopback_mode[2]; + __le32 speeds[16]; + u8 rsvd2[44]; + union ionic_port_config config; + }; + __le32 words[512]; +}; + +/** + * struct ionic_port_info - port info structure + * @port_status: port status + * @port_stats: port stats + */ +struct ionic_port_info { + union ionic_port_config config; + struct ionic_port_status status; + struct ionic_port_stats stats; +}; + +/** + * struct ionic_lif_stats + */ +struct ionic_lif_stats { + /* RX */ + __le64 rx_ucast_bytes; + __le64 rx_ucast_packets; + __le64 rx_mcast_bytes; + __le64 rx_mcast_packets; + __le64 rx_bcast_bytes; + __le64 rx_bcast_packets; + __le64 rsvd0; + __le64 rsvd1; + /* RX drops */ + __le64 rx_ucast_drop_bytes; + __le64 rx_ucast_drop_packets; + __le64 rx_mcast_drop_bytes; + __le64 rx_mcast_drop_packets; + __le64 rx_bcast_drop_bytes; + __le64 rx_bcast_drop_packets; + __le64 rx_dma_error; + __le64 rsvd2; + /* TX */ + __le64 tx_ucast_bytes; + __le64 tx_ucast_packets; + __le64 tx_mcast_bytes; + __le64 tx_mcast_packets; + __le64 tx_bcast_bytes; + __le64 tx_bcast_packets; + __le64 rsvd3; + __le64 rsvd4; + /* TX drops */ + __le64 tx_ucast_drop_bytes; + __le64 tx_ucast_drop_packets; + __le64 tx_mcast_drop_bytes; + __le64 tx_mcast_drop_packets; + __le64 tx_bcast_drop_bytes; + __le64 tx_bcast_drop_packets; + __le64 tx_dma_error; + __le64 rsvd5; + /* Rx Queue/Ring drops */ + __le64 rx_queue_disabled; + __le64 rx_queue_empty; + __le64 rx_queue_error; + __le64 rx_desc_fetch_error; + __le64 rx_desc_data_error; + __le64 rsvd6; + __le64 rsvd7; + __le64 rsvd8; + /* Tx Queue/Ring drops */ + __le64 tx_queue_disabled; + __le64 tx_queue_error; + __le64 tx_desc_fetch_error; + __le64 tx_desc_data_error; + __le64 rsvd9; + __le64 rsvd10; + __le64 rsvd11; + __le64 rsvd12; + + /* RDMA/ROCE TX */ + __le64 tx_rdma_ucast_bytes; + __le64 tx_rdma_ucast_packets; + __le64 tx_rdma_mcast_bytes; + __le64 tx_rdma_mcast_packets; + __le64 tx_rdma_cnp_packets; + __le64 rsvd13; + __le64 rsvd14; + __le64 rsvd15; + + /* RDMA/ROCE RX */ + __le64 rx_rdma_ucast_bytes; + __le64 rx_rdma_ucast_packets; + __le64 rx_rdma_mcast_bytes; + __le64 rx_rdma_mcast_packets; + __le64 rx_rdma_cnp_packets; + __le64 rx_rdma_ecn_packets; + __le64 rsvd16; + __le64 rsvd17; + + __le64 rsvd18; + __le64 rsvd19; + __le64 rsvd20; + __le64 rsvd21; + __le64 rsvd22; + __le64 rsvd23; + __le64 rsvd24; + __le64 rsvd25; + + __le64 rsvd26; + __le64 rsvd27; + __le64 rsvd28; + __le64 rsvd29; + __le64 rsvd30; + __le64 rsvd31; + __le64 rsvd32; + __le64 rsvd33; + + __le64 rsvd34; + __le64 rsvd35; + __le64 rsvd36; + __le64 rsvd37; + __le64 rsvd38; + __le64 rsvd39; + __le64 rsvd40; + __le64 rsvd41; + + __le64 rsvd42; + __le64 rsvd43; + __le64 rsvd44; + __le64 rsvd45; + __le64 rsvd46; + __le64 rsvd47; + __le64 rsvd48; + __le64 rsvd49; + + /* RDMA/ROCE REQ Error/Debugs (768 - 895) */ + __le64 rdma_req_rx_pkt_seq_err; + __le64 rdma_req_rx_rnr_retry_err; + __le64 rdma_req_rx_remote_access_err; + __le64 rdma_req_rx_remote_inv_req_err; + __le64 rdma_req_rx_remote_oper_err; + __le64 rdma_req_rx_implied_nak_seq_err; + __le64 rdma_req_rx_cqe_err; + __le64 rdma_req_rx_cqe_flush_err; + + __le64 rdma_req_rx_dup_responses; + __le64 rdma_req_rx_invalid_packets; + __le64 rdma_req_tx_local_access_err; + __le64 rdma_req_tx_local_oper_err; + __le64 rdma_req_tx_memory_mgmt_err; + __le64 rsvd52; + __le64 rsvd53; + __le64 rsvd54; + + /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */ + __le64 rdma_resp_rx_dup_requests; + __le64 rdma_resp_rx_out_of_buffer; + __le64 rdma_resp_rx_out_of_seq_pkts; + __le64 rdma_resp_rx_cqe_err; + __le64 rdma_resp_rx_cqe_flush_err; + __le64 rdma_resp_rx_local_len_err; + __le64 rdma_resp_rx_inv_request_err; + __le64 rdma_resp_rx_local_qp_oper_err; + + __le64 rdma_resp_rx_out_of_atomic_resource; + __le64 rdma_resp_tx_pkt_seq_err; + __le64 rdma_resp_tx_remote_inv_req_err; + __le64 rdma_resp_tx_remote_access_err; + __le64 rdma_resp_tx_remote_oper_err; + __le64 rdma_resp_tx_rnr_retry_err; + __le64 rsvd57; + __le64 rsvd58; +}; + +/** + * struct ionic_lif_info - lif info structure + */ +struct ionic_lif_info { + union ionic_lif_config config; + struct ionic_lif_status status; + struct ionic_lif_stats stats; +}; + +union ionic_dev_cmd { + u32 words[16]; + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + + struct ionic_dev_identify_cmd identify; + struct ionic_dev_init_cmd init; + struct ionic_dev_reset_cmd reset; + struct ionic_dev_getattr_cmd getattr; + struct ionic_dev_setattr_cmd setattr; + + struct ionic_port_identify_cmd port_identify; + struct ionic_port_init_cmd port_init; + struct ionic_port_reset_cmd port_reset; + struct ionic_port_getattr_cmd port_getattr; + struct ionic_port_setattr_cmd port_setattr; + + struct ionic_lif_identify_cmd lif_identify; + struct ionic_lif_init_cmd lif_init; + struct ionic_lif_reset_cmd lif_reset; + + struct ionic_qos_identify_cmd qos_identify; + struct ionic_qos_init_cmd qos_init; + struct ionic_qos_reset_cmd qos_reset; + + struct ionic_q_init_cmd q_init; +}; + +union ionic_dev_cmd_comp { + u32 words[4]; + u8 status; + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + + struct ionic_dev_identify_comp identify; + struct ionic_dev_init_comp init; + struct ionic_dev_reset_comp reset; + struct ionic_dev_getattr_comp getattr; + struct ionic_dev_setattr_comp setattr; + + struct ionic_port_identify_comp port_identify; + struct ionic_port_init_comp port_init; + struct ionic_port_reset_comp port_reset; + struct ionic_port_getattr_comp port_getattr; + struct ionic_port_setattr_comp port_setattr; + + struct ionic_lif_identify_comp lif_identify; + struct ionic_lif_init_comp lif_init; + ionic_lif_reset_comp lif_reset; + + struct ionic_qos_identify_comp qos_identify; + ionic_qos_init_comp qos_init; + ionic_qos_reset_comp qos_reset; + + struct ionic_q_init_comp q_init; +}; + +/** + * union dev_info - Device info register format (read-only) + * @signature: Signature value of 0x44455649 ('DEVI'). + * @version: Current version of info. + * @asic_type: Asic type. + * @asic_rev: Asic revision. + * @fw_status: Firmware status. + * @fw_heartbeat: Firmware heartbeat counter. + * @serial_num: Serial number. + * @fw_version: Firmware version. + */ +union ionic_dev_info_regs { +#define IONIC_DEVINFO_FWVERS_BUFLEN 32 +#define IONIC_DEVINFO_SERIAL_BUFLEN 32 + struct { + u32 signature; + u8 version; + u8 asic_type; + u8 asic_rev; + u8 fw_status; + u32 fw_heartbeat; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + }; + u32 words[512]; +}; + +/** + * union ionic_dev_cmd_regs - Device command register format (read-write) + * @doorbell: Device Cmd Doorbell, write-only. + * Write a 1 to signal device to process cmd, + * poll done for completion. + * @done: Done indicator, bit 0 == 1 when command is complete. + * @cmd: Opcode-specific command bytes + * @comp: Opcode-specific response bytes + * @data: Opcode-specific side-data + */ +union ionic_dev_cmd_regs { + struct { + u32 doorbell; + u32 done; + union ionic_dev_cmd cmd; + union ionic_dev_cmd_comp comp; + u8 rsvd[48]; + u32 data[478]; + }; + u32 words[512]; +}; + +/** + * union ionic_dev_regs - Device register format in for bar 0 page 0 + * @info: Device info registers + * @devcmd: Device command registers + */ +union ionic_dev_regs { + struct { + union ionic_dev_info_regs info; + union ionic_dev_cmd_regs devcmd; + }; + __le32 words[1024]; +}; + +union ionic_adminq_cmd { + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + struct ionic_lif_setattr_cmd lif_setattr; + struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_rx_mode_set_cmd rx_mode_set; + struct ionic_rx_filter_add_cmd rx_filter_add; + struct ionic_rx_filter_del_cmd rx_filter_del; + struct ionic_rdma_reset_cmd rdma_reset; + struct ionic_rdma_queue_cmd rdma_queue; + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; +}; + +union ionic_adminq_comp { + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + struct ionic_q_init_comp q_init; + struct ionic_lif_setattr_comp lif_setattr; + struct ionic_lif_getattr_comp lif_getattr; + struct ionic_rx_filter_add_comp rx_filter_add; + struct ionic_fw_control_comp fw_control; +}; + +#define IONIC_BARS_MAX 6 +#define IONIC_PCI_BAR_DBELL 1 + +/* BAR0 */ +#define IONIC_BAR0_SIZE 0x8000 + +#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 +#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 +#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 +#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 +#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 +#define IONIC_DEV_CMD_DONE 0x00000001 + +#define IONIC_ASIC_TYPE_CAPRI 0 + +/** + * struct ionic_doorbell - Doorbell register layout + * @p_index: Producer index + * @ring: Selects the specific ring of the queue to update. + * Type-specific meaning: + * ring=0: Default producer/consumer queue. + * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs + * send events to EQs when armed. EQs send + * interrupts when armed. + * @qid: The queue id selects the queue destination for the + * producer index and flags. + */ +struct ionic_doorbell { + __le16 p_index; + u8 ring; + u8 qid_lo; + __le16 qid_hi; + u16 rsvd2; +}; + +struct ionic_intr_status { + u32 status[2]; +}; + +struct ionic_notifyq_cmd { + __le32 data; /* Not used but needed for qcq structure */ +}; + +union ionic_notifyq_comp { + struct ionic_notifyq_event event; + struct ionic_link_change_event link_change; + struct ionic_reset_event reset; + struct ionic_heartbeat_event heartbeat; + struct ionic_log_event log; +}; + +/* Deprecate */ +struct ionic_identity { + union ionic_drv_identity drv; + union ionic_dev_identity dev; + union ionic_lif_identity lif; + union ionic_port_identity port; + union ionic_qos_identity qos; +}; + +#pragma pack(pop) + +#endif /* _IONIC_IF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c new file mode 100644 index 000000000..60a5f3d53 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.c @@ -0,0 +1,1696 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include + +#include "ionic.h" +#include "ionic_logs.h" +#include "ionic_lif.h" +#include "ionic_ethdev.h" +#include "ionic_rx_filter.h" +#include "ionic_rxtx.h" + +static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr); +static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr); + +int +ionic_qcq_enable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .oper = IONIC_Q_ENABLE, + }, + }; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_qcq_disable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .oper = IONIC_Q_DISABLE, + }, + }; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_lif_stop(struct ionic_lif *lif __rte_unused) +{ + /* Carrier OFF here */ + + return 0; +} + +void +ionic_lif_reset(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + + IONIC_PRINT_CALL(); + + ionic_dev_cmd_lif_reset(idev, lif->index); + ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); +} + +static void +ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats) +{ + struct ionic_lif_stats *ls = &lif->info->stats; + uint32_t i; + uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t) + RTE_ETHDEV_QUEUE_STAT_CNTRS); + uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t) + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + memset(stats, 0, sizeof(*stats)); + + if (ls == NULL) { + IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized", + lif->port_id); + return; + } + + /* RX */ + + stats->ipackets = ls->rx_ucast_packets + + ls->rx_mcast_packets + + ls->rx_bcast_packets; + + stats->ibytes = ls->rx_ucast_bytes + + ls->rx_mcast_bytes + + ls->rx_bcast_bytes; + + for (i = 0; i < lif->nrxqcqs; i++) { + struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; + stats->imissed += + rx_stats->no_cb_arg + + rx_stats->bad_cq_status + + rx_stats->no_room + + rx_stats->bad_len; + } + + stats->imissed += + ls->rx_ucast_drop_packets + + ls->rx_mcast_drop_packets + + ls->rx_bcast_drop_packets; + + stats->imissed += + ls->rx_queue_empty + + ls->rx_dma_error + + ls->rx_queue_disabled + + ls->rx_desc_fetch_error + + ls->rx_desc_data_error; + + for (i = 0; i < num_rx_q_counters; i++) { + struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; + stats->q_ipackets[i] = rx_stats->packets; + stats->q_ibytes[i] = rx_stats->bytes; + stats->q_errors[i] = + rx_stats->no_cb_arg + + rx_stats->bad_cq_status + + rx_stats->no_room + + rx_stats->bad_len; + } + + /* TX */ + + stats->opackets = ls->tx_ucast_packets + + ls->tx_mcast_packets + + ls->tx_bcast_packets; + + stats->obytes = ls->tx_ucast_bytes + + ls->tx_mcast_bytes + + ls->tx_bcast_bytes; + + for (i = 0; i < lif->ntxqcqs; i++) { + struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; + stats->oerrors += tx_stats->drop; + } + + stats->oerrors += + ls->tx_ucast_drop_packets + + ls->tx_mcast_drop_packets + + ls->tx_bcast_drop_packets; + + stats->oerrors += + ls->tx_dma_error + + ls->tx_queue_disabled + + ls->tx_desc_fetch_error + + ls->tx_desc_data_error; + + for (i = 0; i < num_tx_q_counters; i++) { + struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; + stats->q_opackets[i] = tx_stats->packets; + stats->q_obytes[i] = tx_stats->bytes; + } +} + +void +ionic_lif_get_stats(const struct ionic_lif *lif, + struct rte_eth_stats *stats) +{ + ionic_lif_get_abs_stats(lif, stats); + + stats->ipackets -= lif->stats_base.ipackets; + stats->opackets -= lif->stats_base.opackets; + stats->ibytes -= lif->stats_base.ibytes; + stats->obytes -= lif->stats_base.obytes; + stats->imissed -= lif->stats_base.imissed; + stats->ierrors -= lif->stats_base.ierrors; + stats->oerrors -= lif->stats_base.oerrors; + stats->rx_nombuf -= lif->stats_base.rx_nombuf; +} + +void +ionic_lif_reset_stats(struct ionic_lif *lif) +{ + uint32_t i; + + for (i = 0; i < lif->nrxqcqs; i++) { + memset(&lif->rxqcqs[i]->stats.rx, 0, + sizeof(struct ionic_rx_stats)); + memset(&lif->txqcqs[i]->stats.tx, 0, + sizeof(struct ionic_tx_stats)); + } + + ionic_lif_get_abs_stats(lif, &lif->stats_base); +} + +void +ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats) +{ + uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); + uint64_t *stats64 = (uint64_t *)stats; + uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; + uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; + + for (i = 0; i < count; i++) + stats64[i] = lif_stats64[i] - lif_stats64_base[i]; +} + +void +ionic_lif_reset_hw_stats(struct ionic_lif *lif) +{ + uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); + uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; + uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; + + for (i = 0; i < count; i++) + lif_stats64_base[i] = lif_stats64[i]; +} + +static int +ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .match = IONIC_RX_FILTER_MATCH_MAC, + }, + }; + int err; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter add (id %d)", + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); +} + +static int +ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + }, + }; + struct ionic_rx_filter *f; + int err; + + IONIC_PRINT_CALL(); + + rte_spinlock_lock(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (!f) { + rte_spinlock_unlock(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = f->filter_id; + ionic_rx_filter_free(f); + + rte_spinlock_unlock(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter del (id %d)", + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +int +ionic_dev_add_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); +} + +void +ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index __rte_unused) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + + IONIC_PRINT_CALL(); + + if (index >= adapter->max_mac_addrs) { + IONIC_PRINT(WARNING, + "Index %u is above MAC filter limit %u", + index, adapter->max_mac_addrs); + return; + } + + if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + + ionic_lif_addr_del(lif, (const uint8_t *) + ð_dev->data->mac_addrs[index]); +} + +int +ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + + IONIC_PRINT_CALL(); + + if (mac_addr == NULL) { + IONIC_PRINT(NOTICE, "New mac is null"); + return -1; + } + + if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { + IONIC_PRINT(INFO, "Deleting mac addr %pM", + lif->mac_addr); + ionic_lif_addr_del(lif, lif->mac_addr); + memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN); + } + + IONIC_PRINT(INFO, "Updating mac addr"); + + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr); + + return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); +} + +static int +ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .match = IONIC_RX_FILTER_MATCH_VLAN, + .vlan.vlan = vid, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid, + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); +} + +static int +ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + }, + }; + struct ionic_rx_filter *f; + int err; + + IONIC_PRINT_CALL(); + + rte_spinlock_lock(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_vlan(lif, vid); + if (!f) { + rte_spinlock_unlock(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = f->filter_id; + ionic_rx_filter_free(f); + rte_spinlock_unlock(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid, + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +int +ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + int err; + + if (on) + err = ionic_vlan_rx_add_vid(lif, vlan_id); + else + err = ionic_vlan_rx_kill_vid(lif, vlan_id); + + return err; +} + +static void +ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = lif->index, + .rx_mode = rx_mode, + }, + }; + int err; + + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI"); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + IONIC_PRINT(ERR, "Failure setting RX mode"); +} + +static void +ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) +{ + if (lif->rx_mode != rx_mode) { + lif->rx_mode = rx_mode; + ionic_lif_rx_mode(lif, rx_mode); + } +} + +int +ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + IONIC_PRINT_CALL(); + + rx_mode |= IONIC_RX_MODE_F_PROMISC; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode &= ~IONIC_RX_MODE_F_PROMISC; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + uint32_t rx_mode = lif->rx_mode; + + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + + ionic_set_rx_mode(lif, rx_mode); + + return 0; +} + +int +ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_MTU, + .mtu = new_mtu, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->mtu = new_mtu; + + return 0; +} + +int +ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + struct ionic_adapter *adapter = lif->adapter; + struct ionic_dev *idev = &adapter->idev; + unsigned long index; + + /* + * Note: interrupt handler is called for index = 0 only + * (we use interrupts for the notifyq only anyway, + * which hash index = 0) + */ + + for (index = 0; index < adapter->nintrs; index++) + if (!adapter->intrs[index]) + break; + + if (index == adapter->nintrs) + return -ENOSPC; + + adapter->intrs[index] = true; + + ionic_intr_init(idev, intr, index); + + return 0; +} + +void +ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + if (intr->index != IONIC_INTR_INDEX_NOT_ASSIGNED) + lif->adapter->intrs[intr->index] = false; +} + +static int +ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type, + uint32_t index, + const char *base, uint32_t flags, + uint32_t num_descs, + uint32_t desc_size, + uint32_t cq_desc_size, + uint32_t sg_desc_size, + uint32_t pid, struct ionic_qcq **qcq) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *new; + uint32_t q_size, cq_size, sg_size, total_size; + void *q_base, *cq_base, *sg_base; + rte_iova_t q_base_pa = 0; + rte_iova_t cq_base_pa = 0; + rte_iova_t sg_base_pa = 0; + uint32_t socket_id = rte_socket_id(); + int err; + + *qcq = NULL; + + q_size = num_descs * desc_size; + cq_size = num_descs * cq_desc_size; + sg_size = num_descs * sg_desc_size; + + total_size = RTE_ALIGN(q_size, PAGE_SIZE) + + RTE_ALIGN(cq_size, PAGE_SIZE); + /* + * Note: aligning q_size/cq_size is not enough due to cq_base address + * aligning as q_base could be not aligned to the page. + * Adding PAGE_SIZE. + */ + total_size += PAGE_SIZE; + + if (flags & IONIC_QCQ_F_SG) { + total_size += RTE_ALIGN(sg_size, PAGE_SIZE); + total_size += PAGE_SIZE; + } + + new = rte_zmalloc("ionic", sizeof(*new), 0); + if (!new) { + IONIC_PRINT(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + new->lif = lif; + new->flags = flags; + + new->q.info = rte_zmalloc("ionic", sizeof(*new->q.info) * num_descs, 0); + if (!new->q.info) { + IONIC_PRINT(ERR, "Cannot allocate queue info"); + return -ENOMEM; + } + + new->q.type = type; + + err = ionic_q_init(lif, idev, &new->q, index, num_descs, + desc_size, sg_desc_size, pid); + if (err) { + IONIC_PRINT(ERR, "Queue initialization failed"); + return err; + } + + if (flags & IONIC_QCQ_F_INTR) { + err = ionic_intr_alloc(lif, &new->intr); + if (err) + return err; + + ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, + IONIC_INTR_MASK_SET); + } else { + new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; + } + + err = ionic_cq_init(lif, &new->cq, &new->intr, + num_descs, cq_desc_size); + if (err) { + IONIC_PRINT(ERR, "Completion queue initialization failed"); + goto err_out_free_intr; + } + + new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev, + base /* name */, index /* queue_idx */, + total_size, IONIC_ALIGN, socket_id); + + if (!new->base_z) { + IONIC_PRINT(ERR, "Cannot reserve queue DMA memory"); + err = -ENOMEM; + goto err_out_free_intr; + } + + new->base = new->base_z->addr; + new->base_pa = new->base_z->iova; + new->total_size = total_size; + + q_base = new->base; + q_base_pa = new->base_pa; + + cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); + cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE); + + if (flags & IONIC_QCQ_F_SG) { + sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, + PAGE_SIZE); + sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE); + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + + IONIC_PRINT(DEBUG, "Q-Base-PA = %ju CQ-Base-PA = %ju " + "SG-base-PA = %ju", + q_base_pa, cq_base_pa, sg_base_pa); + + ionic_q_map(&new->q, q_base, q_base_pa); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + + *qcq = new; + + return 0; + +err_out_free_intr: + if (flags & IONIC_QCQ_F_INTR) + ionic_intr_free(lif, &new->intr); + + return err; +} + +void +ionic_qcq_free(struct ionic_qcq *qcq) +{ + if (qcq->base_z) { + qcq->base = NULL; + qcq->base_pa = 0; + rte_memzone_free(qcq->base_z); + qcq->base_z = NULL; + } + + if (qcq->q.info) { + rte_free(qcq->q.info); + qcq->q.info = NULL; + } + + rte_free(qcq); +} + +int +ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs, + struct ionic_qcq **qcq) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, "rx", flags, + nrxq_descs, + sizeof(struct ionic_rxq_desc), + sizeof(struct ionic_rxq_comp), + sizeof(struct ionic_rxq_sg_desc), + lif->kern_pid, &lif->rxqcqs[index]); + if (err) + return err; + + *qcq = lif->rxqcqs[index]; + + return 0; +} + +int +ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs, + struct ionic_qcq **qcq) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_SG; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, "tx", flags, + ntxq_descs, + sizeof(struct ionic_txq_desc), + sizeof(struct ionic_txq_comp), + sizeof(struct ionic_txq_sg_desc), + lif->kern_pid, &lif->txqcqs[index]); + if (err) + return err; + + *qcq = lif->txqcqs[index]; + + return 0; +} + +static int +ionic_admin_qcq_alloc(struct ionic_lif *lif) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = 0; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, + IONIC_ADMINQ_LENGTH, + sizeof(struct ionic_admin_cmd), + sizeof(struct ionic_admin_comp), + 0, + lif->kern_pid, &lif->adminqcq); + if (err) + return err; + + return 0; +} + +static int +ionic_notify_qcq_alloc(struct ionic_lif *lif) +{ + uint32_t flags; + int err = -ENOMEM; + + flags = IONIC_QCQ_F_NOTIFYQ | IONIC_QCQ_F_INTR; + + err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notify", + flags, + IONIC_NOTIFYQ_LENGTH, + sizeof(struct ionic_notifyq_cmd), + sizeof(union ionic_notifyq_comp), + 0, + lif->kern_pid, &lif->notifyqcq); + if (err) + return err; + + return 0; +} + +static void * +ionic_bus_map_dbpage(struct ionic_adapter *adapter, int page_num) +{ + char *vaddr = adapter->bars[IONIC_PCI_BAR_DBELL].vaddr; + + if (adapter->num_bars <= IONIC_PCI_BAR_DBELL) + return NULL; + + return (void *)&vaddr[page_num << PAGE_SHIFT]; +} + +int +ionic_lif_alloc(struct ionic_lif *lif) +{ + struct ionic_adapter *adapter = lif->adapter; + uint32_t socket_id = rte_socket_id(); + int dbpage_num; + int err; + + snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); + + IONIC_PRINT(DEBUG, "Allocating Lif Info"); + + rte_spinlock_init(&lif->adminq_lock); + rte_spinlock_init(&lif->adminq_service_lock); + + lif->kern_pid = 0; + + dbpage_num = ionic_db_page_num(lif, 0); + + lif->kern_dbpage = ionic_bus_map_dbpage(adapter, dbpage_num); + if (!lif->kern_dbpage) { + IONIC_PRINT(ERR, "Cannot map dbpage, aborting"); + return -ENOMEM; + } + + lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) * + adapter->max_ntxqs_per_lif, 0); + + if (!lif->txqcqs) { + IONIC_PRINT(ERR, "Cannot allocate tx queues array"); + return -ENOMEM; + } + + lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) * + adapter->max_nrxqs_per_lif, 0); + + if (!lif->rxqcqs) { + IONIC_PRINT(ERR, "Cannot allocate rx queues array"); + return -ENOMEM; + } + + IONIC_PRINT(DEBUG, "Allocating Notify Queue"); + + err = ionic_notify_qcq_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate notify queue"); + return err; + } + + IONIC_PRINT(DEBUG, "Allocating Admin Queue"); + + IONIC_PRINT(DEBUG, "Allocating Admin Queue"); + + err = ionic_admin_qcq_alloc(lif); + if (err) { + IONIC_PRINT(ERR, "Cannot allocate admin queue"); + return err; + } + + IONIC_PRINT(DEBUG, "Allocating Lif Info"); + + lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE); + + lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev, + "lif_info", 0 /* queue_idx*/, + lif->info_sz, IONIC_ALIGN, socket_id); + if (!lif->info_z) { + IONIC_PRINT(ERR, "Cannot allocate lif info memory"); + return -ENOMEM; + } + + lif->info = lif->info_z->addr; + lif->info_pa = lif->info_z->iova; + + return 0; +} + +void +ionic_lif_free(struct ionic_lif *lif) +{ + if (lif->notifyqcq) { + ionic_qcq_free(lif->notifyqcq); + lif->notifyqcq = NULL; + } + + if (lif->adminqcq) { + ionic_qcq_free(lif->adminqcq); + lif->adminqcq = NULL; + } + + if (lif->txqcqs) { + rte_free(lif->txqcqs); + lif->txqcqs = NULL; + } + + if (lif->rxqcqs) { + rte_free(lif->rxqcqs); + lif->rxqcqs = NULL; + } + + if (lif->info) { + rte_memzone_free(lif->info_z); + lif->info = NULL; + } +} + +int +ionic_lif_rss_config(struct ionic_lif *lif, + const uint16_t types, const uint8_t *key, const uint32_t *indir) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .attr = IONIC_LIF_ATTR_RSS, + .rss.types = types, + .rss.addr = lif->rss_ind_tbl_pa, + }, + }; + unsigned int i; + + IONIC_PRINT_CALL(); + + lif->rss_types = types; + + if (key) + memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); + + if (indir) + for (i = 0; i < lif->adapter->ident.lif.eth.rss_ind_tbl_sz; i++) + lif->rss_ind_tbl[i] = indir[i]; + + memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int +ionic_lif_rss_setup(struct ionic_lif *lif) +{ + size_t tbl_size = sizeof(*lif->rss_ind_tbl) * + lif->adapter->ident.lif.eth.rss_ind_tbl_sz; + static const uint8_t toeplitz_symmetric_key[] = { + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + }; + uint32_t socket_id = rte_socket_id(); + uint32_t i; + int err; + + IONIC_PRINT_CALL(); + + lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev, + "rss_ind_tbl", + 0 /* queue_idx*/, tbl_size, IONIC_ALIGN, socket_id); + + if (!lif->rss_ind_tbl_z) { + IONIC_PRINT(ERR, "OOM"); + return -ENOMEM; + } + + lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr; + lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova; + + /* Fill indirection table with 'default' values */ + for (i = 0; i < lif->adapter->ident.lif.eth.rss_ind_tbl_sz; i++) + lif->rss_ind_tbl[i] = i % lif->nrxqcqs; + + err = ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL, + toeplitz_symmetric_key, NULL); + if (err) + return err; + + return 0; +} + +static void +ionic_lif_rss_teardown(struct ionic_lif *lif) +{ + if (!lif->rss_ind_tbl) + return; + + if (lif->rss_ind_tbl_z) { + /* Disable RSS on the NIC */ + ionic_lif_rss_config(lif, 0x0, NULL, NULL); + + lif->rss_ind_tbl = NULL; + lif->rss_ind_tbl_pa = 0; + rte_memzone_free(lif->rss_ind_tbl_z); + lif->rss_ind_tbl_z = NULL; + } +} + +static void +ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_dev *idev = &lif->adapter->idev; + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) + return; + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + + qcq->flags &= ~IONIC_QCQ_F_INITED; +} + +void +ionic_lif_txq_deinit(struct ionic_qcq *qcq) +{ + ionic_lif_qcq_deinit(qcq->lif, qcq); +} + +void +ionic_lif_rxq_deinit(struct ionic_qcq *qcq) +{ + ionic_lif_qcq_deinit(qcq->lif, qcq); +} + +bool +ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg __rte_unused) +{ + struct ionic_admin_comp *cq_desc_base = cq->base; + struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index]; + + if (!color_match(cq_desc->color, cq->done_color)) + return false; + + ionic_q_service(cq->bound_q, cq_desc_index, cq_desc->comp_index, NULL); + + return true; +} + +/* This acts like ionic_napi */ +int +ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, + void *cb_arg) +{ + struct ionic_cq *cq = &qcq->cq; + uint32_t work_done; + + work_done = ionic_cq_service(cq, budget, cb, cb_arg); + + return work_done; +} + +static void +ionic_link_status_check(struct ionic_lif *lif) +{ + struct ionic_adapter *adapter = lif->adapter; + bool link_up; + + lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED; + + if (!lif->info) + return; + + link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP); + + if ((link_up && adapter->link_up) || + (!link_up && !adapter->link_up)) + return; + + if (link_up) { + IONIC_PRINT(DEBUG, "Link up - %d Gbps", + lif->info->status.link_speed); + adapter->link_speed = lif->info->status.link_speed; + } else { + IONIC_PRINT(DEBUG, "Link down"); + } + + adapter->link_up = link_up; +} + +static bool +ionic_notifyq_cb(struct ionic_cq *cq, uint32_t cq_desc_index, void *cb_arg) +{ + union ionic_notifyq_comp *cq_desc_base = cq->base; + union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index]; + struct ionic_lif *lif = cb_arg; + + IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d", + cq_desc->event.eid, cq_desc->event.ecode); + + /* Have we run out of new completions to process? */ + if (!(cq_desc->event.eid > lif->last_eid)) + return false; + + lif->last_eid = cq_desc->event.eid; + + switch (cq_desc->event.ecode) { + case IONIC_EVENT_LINK_CHANGE: + IONIC_PRINT(DEBUG, + "Notifyq IONIC_EVENT_LINK_CHANGE eid=%jd link_status=%d link_speed=%d", + cq_desc->event.eid, + cq_desc->link_change.link_status, + cq_desc->link_change.link_speed); + + lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED; + + break; + default: + IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd", + cq_desc->event.ecode, cq_desc->event.eid); + break; + } + + return true; +} + +int +ionic_notifyq_handler(struct ionic_lif *lif, int budget) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->notifyqcq; + uint32_t work_done; + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) { + IONIC_PRINT(DEBUG, "Notifyq not yet initialized"); + return -1; + } + + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + + work_done = ionic_qcq_service(qcq, budget, ionic_notifyq_cb, lif); + + if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED) + ionic_link_status_check(lif); + + ionic_intr_credits(idev->intr_ctrl, qcq->intr.index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); + + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + return 0; +} + +static int +ionic_lif_adminq_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->adminqcq; + struct ionic_queue *q = &qcq->q; + struct ionic_q_init_comp comp; + int err; + + ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) + return err; + + ionic_dev_cmd_comp(idev, &comp); + + q->hw_type = comp.hw_type; + q->hw_index = comp.hw_index; + q->db = ionic_db_map(lif, q); + + IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "adminq->db %p", q->db); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +static int +ionic_lif_notifyq_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_qcq *qcq = lif->notifyqcq; + struct ionic_queue *q = &qcq->q; + int err; + + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = (IONIC_QINIT_F_IRQ | IONIC_QINIT_F_ENA), + .intr_index = qcq->intr.index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + } + }; + + IONIC_PRINT(DEBUG, "notifyq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "notifyq_init.index %d", + ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = NULL; + + IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "notifyq->db %p", q->db); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +int +ionic_lif_set_features(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_FEATURES, + .features = lif->features, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->hw_features = (ctx.cmd.lif_setattr.features & + ctx.comp.lif_setattr.features); + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER"); + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH"); + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG"); + if (lif->hw_features & IONIC_ETH_HW_RX_SG) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG"); + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_TSO) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6"); + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM"); + + return 0; +} + +int +ionic_lif_txq_init(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = qcq->lif; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = IONIC_QINIT_F_SG, + .intr_index = cq->bound_intr->index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + .cq_ring_base = cq->base_pa, + .sg_ring_base = q->sg_base_pa, + }, + }; + int err; + + IONIC_PRINT(DEBUG, "txq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "txq_init.index %d", ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "txq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = ionic_db_map(lif, q); + + IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "txq->db %p", q->db); + + qcq->flags |= IONIC_QCQ_F_INITED; + + return 0; +} + +int +ionic_lif_rxq_init(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = qcq->lif; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = lif->index, + .type = q->type, + .index = q->index, + .flags = IONIC_QINIT_F_SG, + .intr_index = cq->bound_intr->index, + .pid = q->pid, + .ring_size = rte_log2_u32(q->num_descs), + .ring_base = q->base_pa, + .cq_ring_base = cq->base_pa, + .sg_ring_base = q->sg_base_pa, + }, + }; + int err; + + IONIC_PRINT(DEBUG, "rxq_init.pid %d", ctx.cmd.q_init.pid); + IONIC_PRINT(DEBUG, "rxq_init.index %d", ctx.cmd.q_init.index); + IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", + ctx.cmd.q_init.ring_base); + IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", + ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(qcq->lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = ctx.comp.q_init.hw_index; + q->db = ionic_db_map(lif, q); + + qcq->flags |= IONIC_QCQ_F_INITED; + + IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type); + IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index); + IONIC_PRINT(DEBUG, "rxq->db %p", q->db); + + return 0; +} + +static int +ionic_station_set(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + int err; + + IONIC_PRINT_CALL(); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + if (!rte_is_zero_ether_addr((struct rte_ether_addr *) + lif->mac_addr)) { + IONIC_PRINT(INFO, "deleting station MAC addr"); + + ionic_lif_addr_del(lif, lif->mac_addr); + } + + memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN); + + if (rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { + IONIC_PRINT(NOTICE, "empty MAC addr (VF?)"); + return 0; + } + + IONIC_PRINT(DEBUG, "adding station MAC addr"); + + ionic_lif_addr_add(lif, lif->mac_addr); + + return 0; +} + +static void +ionic_lif_set_name(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = lif->index, + .attr = IONIC_LIF_ATTR_NAME, + }, + }; + + snprintf(ctx.cmd.lif_setattr.name, sizeof(ctx.cmd.lif_setattr.name), + "%d", lif->port_id); + + ionic_adminq_post_wait(lif, &ctx); +} + +int +ionic_lif_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->adapter->idev; + struct ionic_q_init_comp comp; + int err; + + memset(&lif->stats_base, 0, sizeof(lif->stats_base)); + + ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, &comp); + if (err) + return err; + + lif->hw_index = comp.hw_index; + + err = ionic_lif_adminq_init(lif); + if (err) + return err; + + err = ionic_lif_notifyq_init(lif); + if (err) + goto err_out_adminq_deinit; + + lif->features = + IONIC_ETH_HW_VLAN_TX_TAG + | IONIC_ETH_HW_VLAN_RX_STRIP + | IONIC_ETH_HW_VLAN_RX_FILTER + | IONIC_ETH_HW_RX_HASH + | IONIC_ETH_HW_TX_SG + | IONIC_ETH_HW_RX_SG + | IONIC_ETH_HW_TX_CSUM + | IONIC_ETH_HW_RX_CSUM + | IONIC_ETH_HW_TSO + | IONIC_ETH_HW_TSO_IPV6 + | IONIC_ETH_HW_TSO_ECN; + + err = ionic_lif_set_features(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_rx_filters_init(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_station_set(lif); + if (err) + goto err_out_rx_filter_deinit; + + ionic_lif_set_name(lif); + + lif->state |= IONIC_LIF_F_INITED; + + return 0; + +err_out_rx_filter_deinit: + ionic_rx_filters_deinit(lif); + +err_out_notifyq_deinit: + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + +err_out_adminq_deinit: + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + return err; +} + +void +ionic_lif_deinit(struct ionic_lif *lif) +{ + if (!(lif->state & IONIC_LIF_F_INITED)) + return; + + ionic_rx_filters_deinit(lif); + ionic_lif_rss_teardown(lif); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + lif->state &= ~IONIC_LIF_F_INITED; +} + +int +ionic_lif_configure(struct ionic_lif *lif) +{ + struct ionic_identity *ident = &lif->adapter->ident; + uint32_t ntxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + uint32_t nrxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues; + uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues; + + lif->port_id = lif->eth_dev->data->port_id; + + IONIC_PRINT(DEBUG, "Configuring LIF on port %u", + lif->port_id); + + if (nrxqs > 0) + nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs); + + if (ntxqs > 0) + ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs); + + lif->nrxqcqs = nrxqs_per_lif; + lif->ntxqcqs = ntxqs_per_lif; + + return 0; +} + +int +ionic_lif_start(struct ionic_lif *lif) +{ + uint32_t rx_mode = 0; + uint32_t i; + int err; + + IONIC_PRINT(DEBUG, "Setting RSS configuration on port %u", + lif->port_id); + + err = ionic_lif_rss_setup(lif); + if (err) + return err; + + IONIC_PRINT(DEBUG, "Setting RX mode on port %u", + lif->port_id); + + rx_mode |= IONIC_RX_MODE_F_UNICAST; + rx_mode |= IONIC_RX_MODE_F_MULTICAST; + rx_mode |= IONIC_RX_MODE_F_BROADCAST; + + lif->rx_mode = 0; /* set by ionic_set_rx_mode */ + + ionic_set_rx_mode(lif, rx_mode); + + IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues " + "on port %u", + lif->nrxqcqs, lif->ntxqcqs, lif->port_id); + + for (i = 0; i < lif->nrxqcqs; i++) { + struct ionic_qcq *rxq = lif->rxqcqs[i]; + if (!rxq->deferred_start) { + err = ionic_dev_rx_queue_start(lif->eth_dev, i); + + if (err) + return err; + } + } + + for (i = 0; i < lif->ntxqcqs; i++) { + struct ionic_qcq *txq = lif->txqcqs[i]; + if (!txq->deferred_start) { + err = ionic_dev_tx_queue_start(lif->eth_dev, i); + + if (err) + return err; + } + } + + ionic_link_status_check(lif); + + /* Carrier ON here */ + + return 0; +} + +int +ionic_lif_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + int err; + unsigned int i; + unsigned int lif_words = sizeof(ident->lif.words) / + sizeof(ident->lif.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int nwords; + + ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC, + IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) + return (err); + + nwords = RTE_MIN(lif_words, cmd_words); + for (i = 0; i < nwords; i++) + ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]); + + IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ", + ident->lif.capabilities); + + IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ", + ident->lif.eth.max_ucast_filters); + IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ", + ident->lif.eth.max_mcast_filters); + + IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ", + ident->lif.eth.config.features); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_ADMINQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); + IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ", + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); + + return 0; +} + +int +ionic_lifs_size(struct ionic_adapter *adapter) +{ + struct ionic_identity *ident = &adapter->ident; + uint32_t nlifs = ident->dev.nlifs; + uint32_t nintrs, dev_nintrs = ident->dev.nintrs; + + adapter->max_ntxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_TXQ]; + adapter->max_nrxqs_per_lif = + ident->lif.eth.config.queue_count[IONIC_QTYPE_RXQ]; + + nintrs = nlifs * 1 /* notifyq */; + + if (nintrs > dev_nintrs) { + IONIC_PRINT(ERR, "At most %d intr queues supported, minimum required is %u", + dev_nintrs, nintrs); + return -ENOSPC; + } + + adapter->nintrs = nintrs; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h new file mode 100644 index 000000000..425762d65 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_lif.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_LIF_H_ +#define _IONIC_LIF_H_ + +#include + +#include +#include + +#include "ionic_osdep.h" +#include "ionic_dev.h" +#include "ionic_rx_filter.h" + +#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ +#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + +#define IONIC_RSS_OFFLOAD_ALL ( \ + IONIC_RSS_TYPE_IPV4 | \ + IONIC_RSS_TYPE_IPV4_TCP | \ + IONIC_RSS_TYPE_IPV4_UDP | \ + IONIC_RSS_TYPE_IPV6 | \ + IONIC_RSS_TYPE_IPV6_TCP | \ + IONIC_RSS_TYPE_IPV6_UDP) + +#define IONIC_GET_SG_CNTR_IDX(num_sg_elems) (num_sg_elems) + +struct ionic_tx_stats { + uint64_t packets; + uint64_t bytes; + uint64_t drop; + uint64_t stop; + uint64_t no_csum; + uint64_t tso; + uint64_t frags; +}; + +struct ionic_rx_stats { + uint64_t packets; + uint64_t bytes; + uint64_t no_cb_arg; + uint64_t bad_cq_status; + uint64_t no_room; + uint64_t bad_len; +}; + +#define IONIC_QCQ_F_INITED BIT(0) +#define IONIC_QCQ_F_SG BIT(1) +#define IONIC_QCQ_F_INTR BIT(2) +#define IONIC_QCQ_F_NOTIFYQ BIT(3) + +/* Queue / Completion Queue */ +struct ionic_qcq { + uint64_t offloads; + struct ionic_queue q; /**< Queue */ + struct ionic_cq cq; /**< Completion Queue */ + struct ionic_lif *lif; /**< LIF */ + struct rte_mempool *mb_pool; /**< mbuf pool to populate the RX ring */ + union { + struct ionic_tx_stats tx; + struct ionic_rx_stats rx; + } stats; + const struct rte_memzone *base_z; + void *base; + rte_iova_t base_pa; + uint32_t total_size; + uint32_t flags; + struct ionic_intr_info intr; + bool deferred_start; +}; + +#define IONIC_Q_TO_QCQ(q) container_of(q, struct ionic_qcq, q) +#define IONIC_Q_TO_TX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.tx) +#define IONIC_Q_TO_RX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.rx) + +#define IONIC_LIF_F_INITED BIT(0) +#define IONIC_LIF_F_LINK_CHECK_NEEDED BIT(1) + +#define IONIC_LIF_NAME_MAX_SZ (32) + +struct ionic_lif { + struct ionic_adapter *adapter; + struct rte_eth_dev *eth_dev; + uint16_t port_id; /**< Device port identifier */ + uint16_t mtu; + uint32_t index; + uint32_t hw_index; + uint32_t state; + uint32_t ntxqcqs; + uint32_t nrxqcqs; + uint32_t kern_pid; + rte_spinlock_t adminq_lock; + rte_spinlock_t adminq_service_lock; + struct ionic_qcq *adminqcq; + struct ionic_qcq *notifyqcq; + struct ionic_qcq **txqcqs; + struct ionic_qcq **rxqcqs; + struct ionic_rx_filters rx_filters; + struct ionic_doorbell __iomem *kern_dbpage; + uint64_t last_eid; + uint64_t features; + uint32_t hw_features; + uint32_t rx_mode; + char name[IONIC_LIF_NAME_MAX_SZ]; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint16_t rss_types; + uint8_t rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; + uint8_t *rss_ind_tbl; + rte_iova_t rss_ind_tbl_pa; + const struct rte_memzone *rss_ind_tbl_z; + uint32_t info_sz; + struct ionic_lif_info *info; + rte_iova_t info_pa; + const struct rte_memzone *info_z; + struct rte_eth_stats stats_base; + struct ionic_lif_stats lif_stats_base; +}; + +int ionic_lif_identify(struct ionic_adapter *adapter); +int ionic_lifs_size(struct ionic_adapter *ionic); + +int ionic_lif_alloc(struct ionic_lif *lif); +void ionic_lif_free(struct ionic_lif *lif); + +int ionic_lif_init(struct ionic_lif *lif); +void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_start(struct ionic_lif *lif); +int ionic_lif_stop(struct ionic_lif *lif); + +int ionic_lif_configure(struct ionic_lif *lif); +void ionic_lif_reset(struct ionic_lif *lif); + +int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr); +void ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr); + +bool ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index, + void *cb_arg); +int ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, + void *cb_arg); + +int ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu); + +int ionic_dev_add_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused); +void ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, + uint32_t index __rte_unused); +int ionic_dev_set_mac(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mac_addr); +int ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on); +int ionic_dev_promiscuous_enable(struct rte_eth_dev *dev); +int ionic_dev_promiscuous_disable(struct rte_eth_dev *dev); +int ionic_dev_allmulticast_enable(struct rte_eth_dev *dev); +int ionic_dev_allmulticast_disable(struct rte_eth_dev *dev); + +int ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, + uint16_t nrxq_descs, struct ionic_qcq **qcq); +int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, + uint16_t ntxq_descs, struct ionic_qcq **qcq); +void ionic_qcq_free(struct ionic_qcq *qcq); + +int ionic_qcq_enable(struct ionic_qcq *qcq); +int ionic_qcq_disable(struct ionic_qcq *qcq); + +int ionic_lif_rxq_init(struct ionic_qcq *qcq); +void ionic_lif_rxq_deinit(struct ionic_qcq *qcq); + +int ionic_lif_txq_init(struct ionic_qcq *qcq); +void ionic_lif_txq_deinit(struct ionic_qcq *qcq); + +int ionic_lif_rss_config(struct ionic_lif *lif, const uint16_t types, + const uint8_t *key, const uint32_t *indir); + +int ionic_lif_set_features(struct ionic_lif *lif); + +void ionic_lif_get_stats(const struct ionic_lif *lif, + struct rte_eth_stats *stats); +void ionic_lif_reset_stats(struct ionic_lif *lif); + +void ionic_lif_get_hw_stats(struct ionic_lif *lif, + struct ionic_lif_stats *stats); +void ionic_lif_reset_hw_stats(struct ionic_lif *lif); + +int ionic_notifyq_handler(struct ionic_lif *lif, int budget); + +#endif /* _IONIC_LIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h b/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h new file mode 100644 index 000000000..bc10ad174 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_logs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_LOGS_H_ +#define _IONIC_LOGS_H_ + +#include + +extern int ionic_logtype; + +#define IONIC_PRINT(level, fmt, args...) rte_log(RTE_LOG_ ## level, \ + ionic_logtype, "%s(): " fmt "\n", __func__, ##args) + +#define IONIC_PRINT_CALL() IONIC_PRINT(DEBUG, " >>") + +#ifndef IONIC_WARN_ON +#define IONIC_WARN_ON(x) do { \ + int ret = !!(x); \ + if (unlikely(ret)) \ + IONIC_PRINT(WARNING, "WARN_ON: \"" #x "\" at %s:%d\n", \ + __func__, __LINE__); \ +} while (0) +#endif + +#endif /* _IONIC_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c new file mode 100644 index 000000000..c0ea042bc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.c @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include "ionic_mac_api.h" + +int32_t +ionic_init_mac(struct ionic_hw *hw) +{ + int err = 0; + + IONIC_PRINT_CALL(); + + /* + * Set the mac type + */ + ionic_set_mac_type(hw); + + switch (hw->mac.type) { + case IONIC_MAC_CAPRI: + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int32_t +ionic_set_mac_type(struct ionic_hw *hw) +{ + int err = 0; + + IONIC_PRINT_CALL(); + + if (hw->vendor_id != IONIC_PENSANDO_VENDOR_ID) { + IONIC_PRINT(ERR, "Unsupported vendor id: %" PRIx32 "", + hw->vendor_id); + return -EINVAL; + } + + switch (hw->device_id) { + case IONIC_DEV_ID_ETH_PF: + case IONIC_DEV_ID_ETH_VF: + case IONIC_DEV_ID_ETH_MGMT: + hw->mac.type = IONIC_MAC_CAPRI; + break; + default: + err = -EINVAL; + IONIC_PRINT(ERR, "Unsupported device id: %" PRIx32 "", + hw->device_id); + break; + } + + IONIC_PRINT(INFO, "Mac: %d (%d)", + hw->mac.type, err); + + return err; +} + diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h new file mode 100644 index 000000000..ed9e059a6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_mac_api.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_API_H_ +#define _IONIC_API_H_ + +#include "ionic.h" + +int32_t ionic_init_mac(struct ionic_hw *hw); +int32_t ionic_set_mac_type(struct ionic_hw *hw); + +#endif /* _IONIC_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_main.c b/src/spdk/dpdk/drivers/net/ionic/ionic_main.c new file mode 100644 index 000000000..2ade213d2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_main.c @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include + +#include + +#include "ionic.h" +#include "ionic_ethdev.h" +#include "ionic_lif.h" + +static const char * +ionic_error_to_str(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return "IONIC_RC_SUCCESS"; + case IONIC_RC_EVERSION: + return "IONIC_RC_EVERSION"; + case IONIC_RC_EOPCODE: + return "IONIC_RC_EOPCODE"; + case IONIC_RC_EIO: + return "IONIC_RC_EIO"; + case IONIC_RC_EPERM: + return "IONIC_RC_EPERM"; + case IONIC_RC_EQID: + return "IONIC_RC_EQID"; + case IONIC_RC_EQTYPE: + return "IONIC_RC_EQTYPE"; + case IONIC_RC_ENOENT: + return "IONIC_RC_ENOENT"; + case IONIC_RC_EINTR: + return "IONIC_RC_EINTR"; + case IONIC_RC_EAGAIN: + return "IONIC_RC_EAGAIN"; + case IONIC_RC_ENOMEM: + return "IONIC_RC_ENOMEM"; + case IONIC_RC_EFAULT: + return "IONIC_RC_EFAULT"; + case IONIC_RC_EBUSY: + return "IONIC_RC_EBUSY"; + case IONIC_RC_EEXIST: + return "IONIC_RC_EEXIST"; + case IONIC_RC_EINVAL: + return "IONIC_RC_EINVAL"; + case IONIC_RC_ENOSPC: + return "IONIC_RC_ENOSPC"; + case IONIC_RC_ERANGE: + return "IONIC_RC_ERANGE"; + case IONIC_RC_BAD_ADDR: + return "IONIC_RC_BAD_ADDR"; + case IONIC_RC_DEV_CMD: + return "IONIC_RC_DEV_CMD"; + case IONIC_RC_ERROR: + return "IONIC_RC_ERROR"; + case IONIC_RC_ERDMA: + return "IONIC_RC_ERDMA"; + default: + return "IONIC_RC_UNKNOWN"; + } +} + +static const char * +ionic_opcode_to_str(enum ionic_cmd_opcode opcode) +{ + switch (opcode) { + case IONIC_CMD_NOP: + return "IONIC_CMD_NOP"; + case IONIC_CMD_INIT: + return "IONIC_CMD_INIT"; + case IONIC_CMD_RESET: + return "IONIC_CMD_RESET"; + case IONIC_CMD_IDENTIFY: + return "IONIC_CMD_IDENTIFY"; + case IONIC_CMD_GETATTR: + return "IONIC_CMD_GETATTR"; + case IONIC_CMD_SETATTR: + return "IONIC_CMD_SETATTR"; + case IONIC_CMD_PORT_IDENTIFY: + return "IONIC_CMD_PORT_IDENTIFY"; + case IONIC_CMD_PORT_INIT: + return "IONIC_CMD_PORT_INIT"; + case IONIC_CMD_PORT_RESET: + return "IONIC_CMD_PORT_RESET"; + case IONIC_CMD_PORT_GETATTR: + return "IONIC_CMD_PORT_GETATTR"; + case IONIC_CMD_PORT_SETATTR: + return "IONIC_CMD_PORT_SETATTR"; + case IONIC_CMD_LIF_INIT: + return "IONIC_CMD_LIF_INIT"; + case IONIC_CMD_LIF_RESET: + return "IONIC_CMD_LIF_RESET"; + case IONIC_CMD_LIF_IDENTIFY: + return "IONIC_CMD_LIF_IDENTIFY"; + case IONIC_CMD_LIF_SETATTR: + return "IONIC_CMD_LIF_SETATTR"; + case IONIC_CMD_LIF_GETATTR: + return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_RX_MODE_SET: + return "IONIC_CMD_RX_MODE_SET"; + case IONIC_CMD_RX_FILTER_ADD: + return "IONIC_CMD_RX_FILTER_ADD"; + case IONIC_CMD_RX_FILTER_DEL: + return "IONIC_CMD_RX_FILTER_DEL"; + case IONIC_CMD_Q_INIT: + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: + return "IONIC_CMD_RDMA_CREATE_EQ"; + case IONIC_CMD_RDMA_CREATE_CQ: + return "IONIC_CMD_RDMA_CREATE_CQ"; + case IONIC_CMD_RDMA_CREATE_ADMINQ: + return "IONIC_CMD_RDMA_CREATE_ADMINQ"; + default: + return "DEVCMD_UNKNOWN"; + } +} + +int +ionic_adminq_check_err(struct ionic_admin_ctx *ctx, bool timeout) +{ + const char *name; + const char *status; + + if (ctx->comp.comp.status || timeout) { + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + status = ionic_error_to_str(ctx->comp.comp.status); + IONIC_PRINT(ERR, "%s (%d) failed: %s (%d)", + name, + ctx->cmd.cmd.opcode, + timeout ? "TIMEOUT" : status, + timeout ? -1 : ctx->comp.comp.status); + return -EIO; + } + + return 0; +} + +static int +ionic_wait_ctx_for_completion(struct ionic_lif *lif, struct ionic_qcq *qcq, + struct ionic_admin_ctx *ctx, unsigned long max_wait) +{ + unsigned long step_msec = 1; + unsigned int max_wait_msec = max_wait * 1000; + unsigned long elapsed_msec = 0; + int budget = 8; + + while (ctx->pending_work && elapsed_msec < max_wait_msec) { + /* + * Locking here as adminq is served inline (this could be called + * from multiple places) + */ + rte_spinlock_lock(&lif->adminq_service_lock); + + ionic_qcq_service(qcq, budget, ionic_adminq_service, NULL); + + rte_spinlock_unlock(&lif->adminq_service_lock); + + msec_delay(step_msec); + elapsed_msec += step_msec; + } + + return (!ctx->pending_work); +} + +int +ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_qcq *qcq = lif->adminqcq; + bool done; + int err; + + IONIC_PRINT(DEBUG, "Sending %s to the admin queue", + ionic_opcode_to_str(ctx->cmd.cmd.opcode)); + + err = ionic_adminq_post(lif, ctx); + if (err) { + IONIC_PRINT(ERR, "Failure posting to the admin queue %d (%d)", + ctx->cmd.cmd.opcode, err); + + return err; + } + + done = ionic_wait_ctx_for_completion(lif, qcq, ctx, + IONIC_DEVCMD_TIMEOUT); + + err = ionic_adminq_check_err(ctx, !done /* timed out */); + return err; +} + +static int +ionic_dev_cmd_wait(struct ionic_dev *idev, unsigned long max_wait) +{ + unsigned long step_msec = 100; + unsigned int max_wait_msec = max_wait * 1000; + unsigned long elapsed_msec = 0; + int done; + + /* Wait for dev cmd to complete.. but no more than max_wait sec */ + + do { + done = ionic_dev_cmd_done(idev); + if (done) { + IONIC_PRINT(DEBUG, "DEVCMD %d done took %ld msecs", + idev->dev_cmd->cmd.cmd.opcode, + elapsed_msec); + return 0; + } + + msec_delay(step_msec); + + elapsed_msec += step_msec; + } while (elapsed_msec < max_wait_msec); + + IONIC_PRINT(DEBUG, "DEVCMD %d timeout after %ld msecs", + idev->dev_cmd->cmd.cmd.opcode, + elapsed_msec); + + return -ETIMEDOUT; +} + +static int +ionic_dev_cmd_check_error(struct ionic_dev *idev) +{ + uint8_t status; + + status = ionic_dev_cmd_status(idev); + if (status == 0) + return 0; + + return -EIO; +} + +int +ionic_dev_cmd_wait_check(struct ionic_dev *idev, unsigned long max_wait) +{ + int err; + + err = ionic_dev_cmd_wait(idev, max_wait); + if (err) + return err; + + return ionic_dev_cmd_check_error(idev); +} + +int +ionic_setup(struct ionic_adapter *adapter) +{ + return ionic_dev_setup(adapter); +} + +int +ionic_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + int err = 0; + uint32_t i; + unsigned int nwords; + uint32_t drv_size = sizeof(ident->drv.words) / + sizeof(ident->drv.words[0]); + uint32_t cmd_size = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + uint32_t dev_size = sizeof(ident->dev.words) / + sizeof(ident->dev.words[0]); + + memset(ident, 0, sizeof(*ident)); + + ident->drv.os_type = IONIC_OS_TYPE_LINUX; + ident->drv.os_dist = 0; + snprintf(ident->drv.os_dist_str, + sizeof(ident->drv.os_dist_str), "Unknown"); + ident->drv.kernel_ver = 0; + snprintf(ident->drv.kernel_ver_str, + sizeof(ident->drv.kernel_ver_str), "DPDK"); + strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION, + sizeof(ident->drv.driver_ver_str) - 1); + + nwords = RTE_MIN(drv_size, cmd_size); + for (i = 0; i < nwords; i++) + iowrite32(ident->drv.words[i], &idev->dev_cmd->data[i]); + + ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (!err) { + nwords = RTE_MIN(dev_size, cmd_size); + for (i = 0; i < nwords; i++) + ident->dev.words[i] = ioread32(&idev->dev_cmd->data[i]); + } + + return err; +} + +int +ionic_init(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + ionic_dev_cmd_init(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + return err; +} + +int +ionic_reset(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + ionic_dev_cmd_reset(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + return err; +} + +int +ionic_port_identify(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + unsigned int port_words = sizeof(ident->port.words) / + sizeof(ident->port.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int i; + unsigned int nwords; + int err; + + ionic_dev_cmd_port_identify(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (!err) { + nwords = RTE_MIN(port_words, cmd_words); + for (i = 0; i < nwords; i++) + ident->port.words[i] = + ioread32(&idev->dev_cmd->data[i]); + } + + IONIC_PRINT(INFO, "speed %d ", ident->port.config.speed); + IONIC_PRINT(INFO, "mtu %d ", ident->port.config.mtu); + IONIC_PRINT(INFO, "state %d ", ident->port.config.state); + IONIC_PRINT(INFO, "an_enable %d ", ident->port.config.an_enable); + IONIC_PRINT(INFO, "fec_type %d ", ident->port.config.fec_type); + IONIC_PRINT(INFO, "pause_type %d ", ident->port.config.pause_type); + IONIC_PRINT(INFO, "loopback_mode %d", + ident->port.config.loopback_mode); + + return err; +} + +static const struct rte_memzone * +ionic_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, IONIC_ALIGN); + return mz; +} + +int +ionic_port_init(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + struct ionic_identity *ident = &adapter->ident; + char z_name[RTE_MEMZONE_NAMESIZE]; + unsigned int config_words = sizeof(ident->port.config.words) / + sizeof(ident->port.config.words[0]); + unsigned int cmd_words = sizeof(idev->dev_cmd->data) / + sizeof(idev->dev_cmd->data[0]); + unsigned int nwords; + unsigned int i; + int err; + + if (idev->port_info) + return 0; + + idev->port_info_sz = RTE_ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + + snprintf(z_name, sizeof(z_name), "%s_port_%s_info", + IONIC_DRV_NAME, + adapter->pci_dev->device.name); + + idev->port_info_z = ionic_memzone_reserve(z_name, idev->port_info_sz, + SOCKET_ID_ANY); + if (!idev->port_info_z) { + IONIC_PRINT(ERR, "Cannot reserve port info DMA memory"); + return -ENOMEM; + } + + idev->port_info = idev->port_info_z->addr; + idev->port_info_pa = idev->port_info_z->iova; + + nwords = RTE_MIN(config_words, cmd_words); + + for (i = 0; i < nwords; i++) + iowrite32(ident->port.config.words[i], &idev->dev_cmd->data[i]); + + ionic_dev_cmd_port_init(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(ERR, "Failed to init port"); + return err; + } + + ionic_dev_cmd_port_state(idev, IONIC_PORT_ADMIN_STATE_UP); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(WARNING, "Failed to bring port UP"); + return err; + } + + return 0; +} + +int +ionic_port_reset(struct ionic_adapter *adapter) +{ + struct ionic_dev *idev = &adapter->idev; + int err; + + if (!idev->port_info) + return 0; + + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); + if (err) { + IONIC_PRINT(ERR, "Failed to reset port"); + return err; + } + + idev->port_info = NULL; + idev->port_info_pa = 0; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h b/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h new file mode 100644 index 000000000..e04bb8f65 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_osdep.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_OSDEP_ +#define _IONIC_OSDEP_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000 * (x)) + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BITS_TO_LONGS(nr) div_round_up(nr, 8 * sizeof(long)) + +#ifndef PAGE_SHIFT +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#endif + +#define __iomem + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint16_t __le16; +typedef uint32_t __le32; +typedef uint64_t __le64; + +static inline uint32_t div_round_up(uint32_t n, uint32_t d) +{ + return (n + d - 1) / d; +} + +#define ioread8(reg) rte_read8(reg) +#define ioread32(reg) rte_read32(reg) +#define iowrite8(value, reg) rte_write8(value, reg) +#define iowrite32(value, reg) rte_write32(value, reg) +#define writeq(value, reg) rte_write64(value, reg) + +#endif diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h b/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h new file mode 100644 index 000000000..3adc2bc7c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_regs.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_REGS_H_ +#define _IONIC_REGS_H_ + +/** struct ionic_intr - interrupt control register set. + * @coal_init: coalesce timer initial value. + * @mask: interrupt mask value. + * @credits: interrupt credit count and return. + * @mask_assert: interrupt mask value on assert. + * @coal: coalesce timer time remaining. + */ +struct ionic_intr { + uint32_t coal_init; + uint32_t mask; + uint32_t credits; + uint32_t mask_assert; + uint32_t coal; + uint32_t rsvd[3]; +}; + +#define IONIC_INTR_CTRL_REGS_MAX 2048 +#define IONIC_INTR_CTRL_COAL_MAX 0x3F + +/** enum ionic_intr_mask_vals - valid values for mask and mask_assert. + * @IONIC_INTR_MASK_CLEAR: unmask interrupt. + * @IONIC_INTR_MASK_SET: mask interrupt. + */ +enum ionic_intr_mask_vals { + IONIC_INTR_MASK_CLEAR = 0, + IONIC_INTR_MASK_SET = 1, +}; + +/** enum ionic_intr_credits_bits - bitwise composition of credits values. + * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed. + * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit. + * @IONIC_INTR_CRED_UNMASK: unmask the interrupt. + * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer. + * @IONIC_INTR_CRED_REARM: unmask the and reset the timer. + */ +enum ionic_intr_credits_bits { + IONIC_INTR_CRED_COUNT = 0x7fffu, + IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu, + IONIC_INTR_CRED_UNMASK = 0x10000u, + IONIC_INTR_CRED_RESET_COALESCE = 0x20000u, + IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK | + IONIC_INTR_CRED_RESET_COALESCE), +}; + +static inline void +ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t coal) +{ + iowrite32(coal, &intr_ctrl[intr_idx].coal_init); +} + +static inline void +ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask); +} + +static inline void +ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t cred, uint32_t flags) +{ + if (cred > IONIC_INTR_CRED_COUNT) { + IONIC_WARN_ON(cred > IONIC_INTR_CRED_COUNT); + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + } + + iowrite32(cred | flags, &intr_ctrl[intr_idx].credits); +} + +static inline void +ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl, + int intr_idx) +{ + uint32_t cred; + + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + cred |= IONIC_INTR_CRED_RESET_COALESCE; + iowrite32(cred, &intr_ctrl[intr_idx].credits); +} + +static inline void +ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, uint32_t mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask_assert); +} + +/** enum ionic_dbell_bits - bitwise composition of dbell values. + * + * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits. + * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value. + * @IONIC_DBELL_QID: macro to build QID component of dbell value. + * + * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits. + * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value. + * @IONIC_DBELL_RING: macro to build ring component of dbell value. + * + * @IONIC_DBELL_RING_0: ring zero dbell component value. + * @IONIC_DBELL_RING_1: ring one dbell component value. + * @IONIC_DBELL_RING_2: ring two dbell component value. + * @IONIC_DBELL_RING_3: ring three dbell component value. + * + * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed. + */ +enum ionic_dbell_bits { + IONIC_DBELL_QID_MASK = 0xffffff, + IONIC_DBELL_QID_SHIFT = 24, + +#define IONIC_DBELL_QID(n) \ + (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT) + + IONIC_DBELL_RING_MASK = 0x7, + IONIC_DBELL_RING_SHIFT = 16, + +#define IONIC_DBELL_RING(n) \ + (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT) + + IONIC_DBELL_RING_0 = 0, + IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1), + IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2), + IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3), + + IONIC_DBELL_INDEX_MASK = 0xffff, +}; + +static inline void +ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val) +{ + writeq(val, &db_page[qtype]); +} + +#endif /* _IONIC_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c new file mode 100644 index 000000000..fe624538d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.c @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include + +#include + +#include "ionic_lif.h" +#include "ionic_rx_filter.h" + +void +ionic_rx_filter_free(struct ionic_rx_filter *f) +{ + LIST_REMOVE(f, by_id); + LIST_REMOVE(f, by_hash); + rte_free(f); +} + +int +ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct ionic_admin_ctx ctx = { + .pending_work = true, + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .filter_id = f->filter_id, + }, + }; + + return ionic_adminq_post(lif, &ctx); +} + +int +ionic_rx_filters_init(struct ionic_lif *lif) +{ + uint32_t i; + + rte_spinlock_init(&lif->rx_filters.lock); + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + LIST_INIT(&lif->rx_filters.by_hash[i]); + LIST_INIT(&lif->rx_filters.by_id[i]); + } + + return 0; +} + +void +ionic_rx_filters_deinit(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + uint32_t i; + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + while (!LIST_EMPTY(&lif->rx_filters.by_id[i])) { + f = LIST_FIRST(&lif->rx_filters.by_id[i]); + ionic_rx_filter_free(f); + } + } +} + +int +ionic_rx_filter_save(struct ionic_lif *lif, uint32_t flow_id, + uint16_t rxq_index, struct ionic_admin_ctx *ctx) +{ + struct ionic_rx_filter *f; + uint32_t key; + + f = rte_zmalloc("ionic", sizeof(*f), 0); + + if (!f) + return -ENOMEM; + + f->flow_id = flow_id; + f->filter_id = ctx->comp.rx_filter_add.filter_id; + f->rxq_index = rxq_index; + memcpy(&f->cmd, &ctx->cmd, sizeof(f->cmd)); + + switch (f->cmd.match) { + case IONIC_RX_FILTER_MATCH_VLAN: + key = f->cmd.vlan.vlan & IONIC_RX_FILTER_HLISTS_MASK; + break; + case IONIC_RX_FILTER_MATCH_MAC: + memcpy(&key, f->cmd.mac.addr, sizeof(key)); + key &= IONIC_RX_FILTER_HLISTS_MASK; + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + key = f->cmd.mac_vlan.vlan & IONIC_RX_FILTER_HLISTS_MASK; + break; + default: + return -EINVAL; + } + + rte_spinlock_lock(&lif->rx_filters.lock); + + LIST_INSERT_HEAD(&lif->rx_filters.by_hash[key], f, by_hash); + + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + + LIST_INSERT_HEAD(&lif->rx_filters.by_id[key], f, by_id); + + rte_spinlock_unlock(&lif->rx_filters.lock); + + return 0; +} + +struct ionic_rx_filter * +ionic_rx_filter_by_vlan(struct ionic_lif *lif, uint16_t vid) +{ + uint32_t key = vid & IONIC_RX_FILTER_HLISTS_MASK; + struct ionic_rx_filter *f; + + LIST_FOREACH(f, &lif->rx_filters.by_hash[key], by_hash) { + if (f->cmd.match != IONIC_RX_FILTER_MATCH_VLAN) + continue; + if (f->cmd.vlan.vlan == vid) + return f; + } + + return NULL; +} + +struct ionic_rx_filter * +ionic_rx_filter_by_addr(struct ionic_lif *lif, const uint8_t *addr) +{ + const uint32_t key = *(const uint32_t *)addr & + IONIC_RX_FILTER_HLISTS_MASK; + struct ionic_rx_filter *f; + + LIST_FOREACH(f, &lif->rx_filters.by_hash[key], by_hash) { + if (f->cmd.match != IONIC_RX_FILTER_MATCH_MAC) + continue; + if (memcmp(addr, f->cmd.mac.addr, RTE_ETHER_ADDR_LEN) == 0) + return f; + } + + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h new file mode 100644 index 000000000..6204a7b53 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rx_filter.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_RX_FILTER_H_ +#define _IONIC_RX_FILTER_H_ + +#include + +#include "ionic_osdep.h" +#include "ionic_if.h" + +#define IONIC_RXQ_INDEX_ANY (0xFFFF) +struct ionic_rx_filter { + uint32_t flow_id; + uint32_t filter_id; + uint16_t rxq_index; + struct ionic_rx_filter_add_cmd cmd; + LIST_ENTRY(ionic_rx_filter) by_hash; + LIST_ENTRY(ionic_rx_filter) by_id; +}; + +#define IONIC_RX_FILTER_HLISTS (1 << 10) +#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1) +struct ionic_rx_filters { + rte_spinlock_t lock; + LIST_HEAD(rx_filters_by_hash, ionic_rx_filter) + by_hash[IONIC_RX_FILTER_HLISTS]; /* by pkt hash */ + LIST_HEAD(rx_filters_by_id, ionic_rx_filter) + by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */ +}; + +struct ionic_admin_ctx; +struct ionic_lif; + +void ionic_rx_filter_free(struct ionic_rx_filter *f); +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filters_init(struct ionic_lif *lif); +void ionic_rx_filters_deinit(struct ionic_lif *lif); +int ionic_rx_filter_save(struct ionic_lif *lif, uint32_t flow_id, + uint16_t rxq_index, struct ionic_admin_ctx *ctx); +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, + uint16_t vid); +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, + const uint8_t *addr); + +#endif /* _IONIC_RX_FILTER_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c new file mode 100644 index 000000000..2592f5cab --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.c @@ -0,0 +1,1082 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ionic_logs.h" +#include "ionic_mac_api.h" +#include "ionic_ethdev.h" +#include "ionic_lif.h" +#include "ionic_rxtx.h" + +#define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) + +/********************************************************************* + * + * TX functions + * + **********************************************************************/ + +void +ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ionic_qcq *txq = dev->data->tx_queues[queue_id]; + struct ionic_queue *q = &txq->q; + + qinfo->nb_desc = q->num_descs; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->deferred_start; +} + +static inline void __rte_cold +ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *q_desc_info; + struct rte_mbuf *txm, *next; + struct ionic_txq_comp *cq_desc_base = cq->base; + struct ionic_txq_comp *cq_desc; + u_int32_t comp_index = (u_int32_t)-1; + + cq_desc = &cq_desc_base[cq->tail_idx]; + while (color_match(cq_desc->color, cq->done_color)) { + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + /* Prefetch the next 4 descriptors (not really useful here) */ + if ((cq->tail_idx & 0x3) == 0) + rte_prefetch0(&cq_desc_base[cq->tail_idx]); + + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + comp_index = cq_desc->comp_index; + + cq_desc = &cq_desc_base[cq->tail_idx]; + } + + if (comp_index != (u_int32_t)-1) { + while (q->tail_idx != comp_index) { + q_desc_info = &q->info[q->tail_idx]; + + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + /* Prefetch the next 4 descriptors */ + if ((q->tail_idx & 0x3) == 0) + /* q desc info */ + rte_prefetch0(&q->info[q->tail_idx]); + + /* + * Note: you can just use rte_pktmbuf_free, + * but this loop is faster + */ + txm = q_desc_info->cb_arg; + while (txm != NULL) { + next = txm->next; + rte_pktmbuf_free_seg(txm); + txm = next; + } + } + } +} + +void __rte_cold +ionic_dev_tx_queue_release(void *tx_queue) +{ + struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; + + IONIC_PRINT_CALL(); + + ionic_qcq_free(txq); +} + +int __rte_cold +ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct ionic_qcq *txq; + + IONIC_PRINT_CALL(); + + txq = eth_dev->data->tx_queues[tx_queue_id]; + + /* + * Note: we should better post NOP Tx desc and wait for its completion + * before disabling Tx queue + */ + + ionic_qcq_disable(txq); + + ionic_tx_flush(&txq->cq); + + ionic_lif_txq_deinit(txq); + + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int __rte_cold +ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, + uint16_t nb_desc, uint32_t socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_qcq *txq; + uint64_t offloads; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers", + tx_queue_id, nb_desc); + + if (tx_queue_id >= lif->ntxqcqs) { + IONIC_PRINT(DEBUG, "Queue index %u not available " + "(max %u queues)", + tx_queue_id, lif->ntxqcqs); + return -EINVAL; + } + + offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) + return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ + + /* Free memory prior to re-allocation if needed... */ + if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { + void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; + ionic_dev_tx_queue_release(tx_queue); + eth_dev->data->tx_queues[tx_queue_id] = NULL; + } + + err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq); + if (err) { + IONIC_PRINT(DEBUG, "Queue allocation failure"); + return -EINVAL; + } + + /* Do not start queue with rte_eth_dev_start() */ + txq->deferred_start = tx_conf->tx_deferred_start; + + txq->offloads = offloads; + + eth_dev->data->tx_queues[tx_queue_id] = txq; + + return 0; +} + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct ionic_qcq *txq; + int err; + + IONIC_PRINT_CALL(); + + txq = eth_dev->data->tx_queues[tx_queue_id]; + + err = ionic_lif_txq_init(txq); + if (err) + return err; + + ionic_qcq_enable(txq); + + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static void +ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IP_CKSUM) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + +static void +ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + + txm->outer_l3_len + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + +static void +ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, + struct rte_mbuf *txm, + rte_iova_t addr, uint8_t nsge, uint16_t len, + uint32_t hdrlen, uint32_t mss, + bool encap, + uint16_t vlan_tci, bool has_vlan, + bool start, bool done) +{ + uint8_t flags = 0; + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + + desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, + flags, nsge, addr); + desc->len = len; + desc->vlan_tci = vlan_tci; + desc->hdr_len = hdrlen; + desc->mss = mss; + + ionic_q_post(q, done, NULL, done ? txm : NULL); +} + +static struct ionic_txq_desc * +ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) +{ + struct ionic_txq_desc *desc_base = q->base; + struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_desc *desc = &desc_base[q->head_idx]; + struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + + *elem = sg_desc->elems; + return desc; +} + +static int +ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, + uint64_t offloads __rte_unused, bool not_xmit_more) +{ + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + struct ionic_txq_desc *desc; + struct ionic_txq_sg_elem *elem; + struct rte_mbuf *txm_seg; + uint64_t desc_addr = 0; + uint16_t desc_len = 0; + uint8_t desc_nsge; + uint32_t hdrlen; + uint32_t mss = txm->tso_segsz; + uint32_t frag_left = 0; + uint32_t left; + uint32_t seglen; + uint32_t len; + uint32_t offset = 0; + bool start, done; + bool encap; + bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); + uint16_t vlan_tci = txm->vlan_tci; + uint64_t ol_flags = txm->ol_flags; + + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); + + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) { + ionic_tx_tcp_inner_pseudo_csum(txm); + hdrlen = txm->outer_l2_len + txm->outer_l3_len + + txm->l2_len + txm->l3_len + txm->l4_len; + } else { + ionic_tx_tcp_pseudo_csum(txm); + hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; + } + + seglen = hdrlen + mss; + left = txm->data_len; + + desc = ionic_tx_tso_next(q, &elem); + start = true; + + /* Chop data up into desc segments */ + + while (left > 0) { + len = RTE_MIN(seglen, left); + frag_left = seglen - len; + desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (txm->nb_segs > 1 && frag_left > 0) + continue; + done = (txm->nb_segs == 1 && left == 0); + ionic_tx_tso_post(q, desc, txm, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + encap, + vlan_tci, has_vlan, + start, done && not_xmit_more); + desc = ionic_tx_tso_next(q, &elem); + start = false; + seglen = mss; + } + + /* Chop frags into desc segments */ + + txm_seg = txm->next; + while (txm_seg != NULL) { + offset = 0; + left = txm_seg->data_len; + stats->frags++; + + while (left > 0) { + rte_iova_t data_iova; + data_iova = rte_mbuf_data_iova(txm_seg); + elem->addr = rte_cpu_to_le_64(data_iova) + offset; + if (frag_left > 0) { + len = RTE_MIN(frag_left, left); + frag_left -= len; + elem->len = len; + elem++; + desc_nsge++; + } else { + len = RTE_MIN(mss, left); + frag_left = mss - len; + data_iova = rte_mbuf_data_iova(txm_seg); + desc_addr = rte_cpu_to_le_64(data_iova); + desc_len = len; + desc_nsge = 0; + } + left -= len; + offset += len; + if (txm_seg->next != NULL && frag_left > 0) + continue; + done = (txm_seg->next == NULL && left == 0); + ionic_tx_tso_post(q, desc, txm_seg, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + encap, + vlan_tci, has_vlan, + start, done && not_xmit_more); + desc = ionic_tx_tso_next(q, &elem); + start = false; + } + + txm_seg = txm_seg->next; + } + + stats->tso++; + + return 0; +} + +static int +ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, + uint64_t offloads, bool not_xmit_more) +{ + struct ionic_txq_desc *desc_base = q->base; + struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_txq_desc *desc = &desc_base[q->head_idx]; + struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + struct rte_mbuf *txm_seg; + bool encap; + bool has_vlan; + uint64_t ol_flags = txm->ol_flags; + uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); + uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; + uint8_t flags = 0; + + if ((ol_flags & PKT_TX_IP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { + opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; + if (((ol_flags & PKT_TX_TCP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || + ((ol_flags & PKT_TX_UDP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; + } else { + stats->no_csum++; + } + + has_vlan = (ol_flags & PKT_TX_VLAN_PKT); + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); + desc->len = txm->data_len; + desc->vlan_tci = txm->vlan_tci; + + txm_seg = txm->next; + while (txm_seg != NULL) { + elem->len = txm_seg->data_len; + elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); + stats->frags++; + elem++; + txm_seg = txm_seg->next; + } + + ionic_q_post(q, not_xmit_more, NULL, txm); + + return 0; +} + +uint16_t +ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; + struct ionic_queue *q = &txq->q; + struct ionic_cq *cq = &txq->cq; + struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); + uint32_t next_q_head_idx; + uint32_t bytes_tx = 0; + uint16_t nb_tx = 0; + int err; + bool last; + + /* Cleaning old buffers */ + ionic_tx_flush(cq); + + if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { + stats->stop += nb_pkts; + return 0; + } + + while (nb_tx < nb_pkts) { + last = (nb_tx == (nb_pkts - 1)); + + next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1); + if ((next_q_head_idx & 0x3) == 0) { + struct ionic_txq_desc *desc_base = q->base; + rte_prefetch0(&desc_base[next_q_head_idx]); + rte_prefetch0(&q->info[next_q_head_idx]); + } + + if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) + err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads, + last); + else + err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last); + if (err) { + stats->drop += nb_pkts - nb_tx; + if (nb_tx > 0) + ionic_q_flush(q); + break; + } + + bytes_tx += tx_pkts[nb_tx]->pkt_len; + nb_tx++; + } + + stats->packets += nb_tx; + stats->bytes += bytes_tx; + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ + +#define IONIC_TX_OFFLOAD_MASK ( \ + PKT_TX_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_VLAN | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_L4_MASK) + +#define IONIC_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) + +uint16_t +ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *txm; + uint64_t offloads; + int i = 0; + + for (i = 0; i < nb_pkts; i++) { + txm = tx_pkts[i]; + + if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) { + rte_errno = -EINVAL; + break; + } + + offloads = txm->ol_flags; + + if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + break; + } + } + + return i; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, + struct rte_mbuf *mbuf); + +void +ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ionic_qcq *rxq = dev->data->rx_queues[queue_id]; + struct ionic_queue *q = &rxq->q; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = q->num_descs; + qinfo->conf.rx_deferred_start = rxq->deferred_start; + qinfo->conf.offloads = rxq->offloads; +} + +static void __rte_cold +ionic_rx_empty(struct ionic_queue *q) +{ + struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); + struct ionic_desc_info *cur; + struct rte_mbuf *mbuf; + + while (q->tail_idx != q->head_idx) { + cur = &q->info[q->tail_idx]; + mbuf = cur->cb_arg; + rte_mempool_put(rxq->mb_pool, mbuf); + + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + } +} + +void __rte_cold +ionic_dev_rx_queue_release(void *rx_queue) +{ + struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; + + IONIC_PRINT_CALL(); + + ionic_rx_empty(&rxq->q); + + ionic_qcq_free(rxq); +} + +int __rte_cold +ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id, + uint16_t nb_desc, + uint32_t socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_qcq *rxq; + uint64_t offloads; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers", + rx_queue_id, nb_desc); + + if (rx_queue_id >= lif->nrxqcqs) { + IONIC_PRINT(ERR, + "Queue index %u not available (max %u queues)", + rx_queue_id, lif->nrxqcqs); + return -EINVAL; + } + + offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; + + /* Validate number of receive descriptors */ + if (!rte_is_power_of_2(nb_desc) || + nb_desc < IONIC_MIN_RING_DESC || + nb_desc > IONIC_MAX_RING_DESC) { + IONIC_PRINT(ERR, + "Bad number of descriptors (%u) for queue %u (min: %u)", + nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); + return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ + } + + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; + + /* Free memory prior to re-allocation if needed... */ + if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { + void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; + ionic_dev_rx_queue_release(rx_queue); + eth_dev->data->rx_queues[rx_queue_id] = NULL; + } + + err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq); + if (err) { + IONIC_PRINT(ERR, "Queue allocation failure"); + return -EINVAL; + } + + rxq->mb_pool = mp; + + /* + * Note: the interface does not currently support + * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN + * when the adapter will be able to keep the CRC and subtract + * it to the length for all received packets: + * if (eth_dev->data->dev_conf.rxmode.offloads & + * DEV_RX_OFFLOAD_KEEP_CRC) + * rxq->crc_len = ETHER_CRC_LEN; + */ + + /* Do not start queue with rte_eth_dev_start() */ + rxq->deferred_start = rx_conf->rx_deferred_start; + + rxq->offloads = offloads; + + eth_dev->data->rx_queues[rx_queue_id] = rxq; + + return 0; +} + +static void +ionic_rx_clean(struct ionic_queue *q, + uint32_t q_desc_index, uint32_t cq_desc_index, + void *cb_arg, void *service_cb_arg) +{ + struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base; + struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; + struct rte_mbuf *rxm = cb_arg; + struct rte_mbuf *rxm_seg; + struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); + uint32_t max_frame_size = + rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + uint64_t pkt_flags = 0; + uint32_t pkt_type; + struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q); + struct ionic_rx_service *recv_args = (struct ionic_rx_service *) + service_cb_arg; + uint32_t buf_size = (uint16_t) + (rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + uint32_t left; + + if (!recv_args) { + stats->no_cb_arg++; + /* Flush */ + rte_pktmbuf_free(rxm); + /* + * Note: rte_mempool_put is faster with no segs + * rte_mempool_put(rxq->mb_pool, rxm); + */ + return; + } + + if (cq_desc->status) { + stats->bad_cq_status++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + if (recv_args->nb_rx >= recv_args->nb_pkts) { + stats->no_room++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + if (cq_desc->len > max_frame_size || + cq_desc->len == 0) { + stats->bad_len++; + ionic_rx_recycle(q, q_desc_index, rxm); + return; + } + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ + rxm->pkt_len = cq_desc->len; + rxm->port = rxq->lif->port_id; + + left = cq_desc->len; + + rxm->data_len = RTE_MIN(buf_size, left); + left -= rxm->data_len; + + rxm_seg = rxm->next; + while (rxm_seg && left) { + rxm_seg->data_len = RTE_MIN(buf_size, left); + left -= rxm_seg->data_len; + + rxm_seg = rxm_seg->next; + rxm->nb_segs++; + } + + /* RSS */ + pkt_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = cq_desc->rss_hash; + + /* Vlan Strip */ + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { + pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + rxm->vlan_tci = cq_desc->vlan_tci; + } + + /* Checksum */ + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) + pkt_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || + (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) + pkt_flags |= PKT_RX_L4_CKSUM_GOOD; + else if ((cq_desc->csum_flags & + IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || + (cq_desc->csum_flags & + IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + + rxm->ol_flags = pkt_flags; + + /* Packet Type */ + switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { + case IONIC_PKT_TYPE_IPV4: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + break; + case IONIC_PKT_TYPE_IPV6: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + break; + case IONIC_PKT_TYPE_IPV4_TCP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + break; + case IONIC_PKT_TYPE_IPV6_TCP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + break; + case IONIC_PKT_TYPE_IPV4_UDP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + break; + case IONIC_PKT_TYPE_IPV6_UDP: + pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + break; + default: + { + struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, + struct rte_ether_hdr *); + uint16_t ether_type = eth_h->ether_type; + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) + pkt_type = RTE_PTYPE_L2_ETHER_ARP; + else + pkt_type = RTE_PTYPE_UNKNOWN; + break; + } + } + + rxm->packet_type = pkt_type; + + recv_args->rx_pkts[recv_args->nb_rx] = rxm; + recv_args->nb_rx++; + + stats->packets++; + stats->bytes += rxm->pkt_len; +} + +static void +ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, + struct rte_mbuf *mbuf) +{ + struct ionic_rxq_desc *desc_base = q->base; + struct ionic_rxq_desc *old = &desc_base[q_desc_index]; + struct ionic_rxq_desc *new = &desc_base[q->head_idx]; + + new->addr = old->addr; + new->len = old->len; + + ionic_q_post(q, true, ionic_rx_clean, mbuf); +} + +static int __rte_cold +ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) +{ + struct ionic_queue *q = &rxq->q; + struct ionic_rxq_desc *desc_base = q->base; + struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base; + struct ionic_rxq_desc *desc; + struct ionic_rxq_sg_desc *sg_desc; + struct ionic_rxq_sg_elem *elem; + rte_iova_t dma_addr; + uint32_t i, j, nsegs, buf_size, size; + bool ring_doorbell; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + + /* Initialize software ring entries */ + for (i = ionic_q_space_avail(q); i; i--) { + struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); + struct rte_mbuf *prev_rxm_seg; + + if (rxm == NULL) { + IONIC_PRINT(ERR, "RX mbuf alloc failed"); + return -ENOMEM; + } + + nsegs = (len + buf_size - 1) / buf_size; + + desc = &desc_base[q->head_idx]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); + desc->addr = dma_addr; + desc->len = buf_size; + size = buf_size; + desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : + IONIC_RXQ_DESC_OPCODE_SIMPLE; + rxm->next = NULL; + + prev_rxm_seg = rxm; + sg_desc = &sg_desc_base[q->head_idx]; + elem = sg_desc->elems; + for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { + struct rte_mbuf *rxm_seg; + rte_iova_t data_iova; + + rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); + if (rxm_seg == NULL) { + IONIC_PRINT(ERR, "RX mbuf alloc failed"); + return -ENOMEM; + } + + data_iova = rte_mbuf_data_iova(rxm_seg); + dma_addr = rte_cpu_to_le_64(data_iova); + elem->addr = dma_addr; + elem->len = buf_size; + size += buf_size; + elem++; + rxm_seg->next = NULL; + prev_rxm_seg->next = rxm_seg; + prev_rxm_seg = rxm_seg; + } + + if (size < len) + IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", + size, len); + + ring_doorbell = ((q->head_idx + 1) & + IONIC_RX_RING_DOORBELL_STRIDE) == 0; + + ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm); + } + + return 0; +} + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + struct ionic_qcq *rxq; + int err; + + IONIC_PRINT_CALL(); + + IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)", + frame_size); + + rxq = eth_dev->data->rx_queues[rx_queue_id]; + + err = ionic_lif_rxq_init(rxq); + if (err) + return err; + + /* Allocate buffers for descriptor rings */ + if (ionic_rx_fill(rxq, frame_size) != 0) { + IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + + ionic_qcq_enable(rxq); + + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static inline void __rte_cold +ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, + void *service_cb_arg) +{ + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *q_desc_info; + struct ionic_rxq_comp *cq_desc_base = cq->base; + struct ionic_rxq_comp *cq_desc; + bool more; + uint32_t curr_q_tail_idx, curr_cq_tail_idx; + uint32_t work_done = 0; + + if (work_to_do == 0) + return; + + cq_desc = &cq_desc_base[cq->tail_idx]; + while (color_match(cq_desc->pkt_type_color, cq->done_color)) { + curr_cq_tail_idx = cq->tail_idx; + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); + + if (cq->tail_idx == 0) + cq->done_color = !cq->done_color; + + /* Prefetch the next 4 descriptors */ + if ((cq->tail_idx & 0x3) == 0) + rte_prefetch0(&cq_desc_base[cq->tail_idx]); + + do { + more = (q->tail_idx != cq_desc->comp_index); + + q_desc_info = &q->info[q->tail_idx]; + + curr_q_tail_idx = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + + /* Prefetch the next 4 descriptors */ + if ((q->tail_idx & 0x3) == 0) + /* q desc info */ + rte_prefetch0(&q->info[q->tail_idx]); + + ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx, + q_desc_info->cb_arg, service_cb_arg); + + } while (more); + + if (++work_done == work_to_do) + break; + + cq_desc = &cq_desc_base[cq->tail_idx]; + } +} + +/* + * Stop Receive Units for specified queue. + */ +int __rte_cold +ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + struct ionic_qcq *rxq; + + IONIC_PRINT_CALL(); + + rxq = eth_dev->data->rx_queues[rx_queue_id]; + + ionic_qcq_disable(rxq); + + /* Flush */ + ionic_rxq_service(&rxq->cq, -1, NULL); + + ionic_lif_rxq_deinit(rxq); + + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +uint16_t +ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; + uint32_t frame_size = + rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + struct ionic_cq *cq = &rxq->cq; + struct ionic_rx_service service_cb_arg; + + service_cb_arg.rx_pkts = rx_pkts; + service_cb_arg.nb_pkts = nb_pkts; + service_cb_arg.nb_rx = 0; + + ionic_rxq_service(cq, nb_pkts, &service_cb_arg); + + ionic_rx_fill(rxq, frame_size); + + return service_cb_arg.nb_rx; +} diff --git a/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h new file mode 100644 index 000000000..5c85b9c49 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/ionic_rxtx.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. + */ + +#ifndef _IONIC_RXTX_H_ +#define _IONIC_RXTX_H_ + +#include + +struct ionic_rx_service { + /* cb in */ + struct rte_mbuf **rx_pkts; + uint16_t nb_pkts; + /* cb out */ + uint16_t nb_rx; +}; + +uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_desc, uint32_t socket_id, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); +void ionic_dev_rx_queue_release(void *rxq); +int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); + +int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_desc, uint32_t socket_id, + const struct rte_eth_txconf *tx_conf); +void ionic_dev_tx_queue_release(void *tx_queue); +int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); +int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +void ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +#endif /* _IONIC_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ionic/meson.build b/src/spdk/dpdk/drivers/net/ionic/meson.build new file mode 100644 index 000000000..1c6362d27 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +# Copyright(c) 2019 Pensando + +sources = files( + 'ionic_mac_api.c', + 'ionic_rx_filter.c', + 'ionic_rxtx.c', + 'ionic_dev.c', + 'ionic_ethdev.c', + 'ionic_lif.c', + 'ionic_main.c' +) diff --git a/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map b/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map new file mode 100644 index 000000000..acdaf587d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ionic/rte_pmd_ionic_version.map @@ -0,0 +1,4 @@ +DPDK_21 { + + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/Makefile b/src/spdk/dpdk/drivers/net/ipn3ke/Makefile new file mode 100644 index 000000000..40696dbde --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ipn3ke.a + +# +# Add the experimenatal APIs called from this PMD +# rte_eth_switch_domain_alloc() +# rte_eth_dev_create() +# rte_eth_dev_destroy() +# rte_eth_switch_domain_free() +# +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga +CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_ifpga +LDLIBS += -lrte_bus_vdev +LDLIBS += -lpthread + +EXPORT_MAP := rte_pmd_ipn3ke_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_representor.c +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IPN3KE_PMD) += ipn3ke_flow.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c new file mode 100644 index 000000000..5b5510f08 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +int ipn3ke_afu_logtype; + +static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = { + { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH }, + { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH }, + { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH}, + { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH }, + { 0, 0 /* sentinel */ }, +}; + +struct ipn3ke_pub_func ipn3ke_bridge_func; + +static int +ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data, + uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel) +{ + uint32_t i, try_cnt; + uint64_t indirect_value; + volatile void *indirect_addrs; + uint64_t target_addr; + uint64_t read_data = 0; + + if (eth_group_sel != 0 && eth_group_sel != 1) + return -1; + + target_addr = addr | dev_sel << 17; + + indirect_value = RCMD | target_addr << 32; + indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10; + + rte_delay_us(10); + + rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs); + + i = 0; + try_cnt = 10; + indirect_addrs = hw->eth_group_bar[eth_group_sel] + + 0x18; + do { + read_data = rte_read64(indirect_addrs); + if ((read_data >> 32) == 1) + break; + i++; + } while (i <= try_cnt); + if (i > try_cnt) + return -1; + + *rd_data = rte_le_to_cpu_32(read_data); + return 0; +} + +static int +ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data, + uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel) +{ + volatile void *indirect_addrs; + uint64_t indirect_value; + uint64_t target_addr; + + if (eth_group_sel != 0 && eth_group_sel != 1) + return -1; + + target_addr = addr | dev_sel << 17; + + indirect_value = WCMD | target_addr << 32 | wr_data; + indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10; + + rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs); + return 0; +} + +static int +ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data, + uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t dev_sel; + + if (mac_num >= hw->port_num) + return -1; + + mac_num &= 0x7; + dev_sel = mac_num * 2 + 3; + + return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel); +} + +static int +ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data, + uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t dev_sel; + + if (mac_num >= hw->port_num) + return -1; + + mac_num &= 0x7; + dev_sel = mac_num * 2 + 3; + + return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel); +} + +static void +ipn3ke_hw_cap_init(struct ipn3ke_hw *hw) +{ + hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0), 0, 0xFFFF); + hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF); + hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF); + hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF); + hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF); + hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF); + hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF); + hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF); + hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF); + hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF); + hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF); + hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF); + hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF); + hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF); + hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF); + hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF); + hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF); + hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF); + hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF); + hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw, + (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF); + + hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET, + 0, 0xFFFF); + hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET, + 4, 0xFFFF); + hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET, + 8, 0xFFFF); + hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET, + 0xC, 0xFFFF); + hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET, + 0x10, 0xFFFF); + + hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw, + IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET, + 0, 0xFFFFF); +} + +static int +ipn3ke_vbng_init_done(struct ipn3ke_hw *hw) +{ + uint32_t timeout = 10000; + while (timeout > 0) { + if (IPN3KE_READ_REG(hw, IPN3KE_VBNG_INIT_STS) + == IPN3KE_VBNG_INIT_DONE) + break; + rte_delay_us(1000); + timeout--; + } + + if (!timeout) { + IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n"); + return -1; + } + + return 0; +} + +static uint32_t +ipn3ke_mtu_cal(uint32_t tx, uint32_t rx) +{ + uint32_t tmp; + tmp = RTE_MIN(tx, rx); + tmp = RTE_MAX(tmp, (uint32_t)RTE_ETHER_MIN_MTU); + tmp = RTE_MIN(tmp, (uint32_t)(IPN3KE_MAC_FRAME_SIZE_MAX - + IPN3KE_ETH_OVERHEAD)); + return tmp; +} + +static void +ipn3ke_mtu_set(struct ipn3ke_hw *hw, uint32_t mac_num, + uint32_t eth_group_sel, uint32_t txaddr, uint32_t rxaddr) +{ + uint32_t tx; + uint32_t rx; + uint32_t tmp; + + if (!(*hw->f_mac_read) || !(*hw->f_mac_write)) + return; + + (*hw->f_mac_read)(hw, + &tx, + txaddr, + mac_num, + eth_group_sel); + + (*hw->f_mac_read)(hw, + &rx, + rxaddr, + mac_num, + eth_group_sel); + + tmp = ipn3ke_mtu_cal(tx, rx); + + (*hw->f_mac_write)(hw, + tmp, + txaddr, + mac_num, + eth_group_sel); + + (*hw->f_mac_write)(hw, + tmp, + rxaddr, + mac_num, + eth_group_sel); +} + +static void +ipn3ke_10G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num, + uint32_t eth_group_sel) +{ + ipn3ke_mtu_set(hw, mac_num, eth_group_sel, + IPN3KE_10G_TX_FRAME_MAXLENGTH, IPN3KE_10G_RX_FRAME_MAXLENGTH); +} + +static void +ipn3ke_25G_mtu_setup(struct ipn3ke_hw *hw, uint32_t mac_num, + uint32_t eth_group_sel) +{ + ipn3ke_mtu_set(hw, mac_num, eth_group_sel, + IPN3KE_25G_MAX_TX_SIZE_CONFIG, IPN3KE_25G_MAX_RX_SIZE_CONFIG); +} + +static void +ipn3ke_mtu_setup(struct ipn3ke_hw *hw) +{ + int i; + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + for (i = 0; i < hw->port_num; i++) { + ipn3ke_10G_mtu_setup(hw, i, 0); + ipn3ke_10G_mtu_setup(hw, i, 1); + } + } else if (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + for (i = 0; i < hw->port_num; i++) { + ipn3ke_25G_mtu_setup(hw, i, 0); + ipn3ke_25G_mtu_setup(hw, i, 1); + } + } +} + +static int +ipn3ke_hw_init(struct rte_afu_device *afu_dev, + struct ipn3ke_hw *hw) +{ + struct rte_rawdev *rawdev; + int ret; + int i; + uint64_t port_num, mac_type, index; + + rawdev = afu_dev->rawdev; + + hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low; + hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high; + hw->afu_id.port = afu_dev->id.port; + hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr); + hw->f_mac_read = ipn3ke_indirect_mac_read; + hw->f_mac_write = ipn3ke_indirect_mac_write; + hw->rawdev = rawdev; + rawdev->dev_ops->attr_get(rawdev, + "LineSideBARIndex", &index); + hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr); + rawdev->dev_ops->attr_get(rawdev, + "NICSideBARIndex", &index); + hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr); + rawdev->dev_ops->attr_get(rawdev, + "LineSideLinkPortNum", &port_num); + hw->retimer.port_num = (int)port_num; + hw->port_num = hw->retimer.port_num; + rawdev->dev_ops->attr_get(rawdev, + "LineSideMACType", &mac_type); + hw->retimer.mac_type = (int)mac_type; + + hw->acc_tm = 0; + hw->acc_flow = 0; + + if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW && + afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) { + /* After power on, wait until init done */ + if (ipn3ke_vbng_init_done(hw)) + return -1; + + ipn3ke_hw_cap_init(hw); + + /* Reset vBNG IP */ + IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1); + rte_delay_us(10); + IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0); + + /* After reset, wait until init done */ + if (ipn3ke_vbng_init_done(hw)) + return -1; + + hw->acc_tm = 1; + hw->acc_flow = 1; + + IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", + IPN3KE_READ_REG(hw, 0)); + } + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Enable inter connect channel */ + for (i = 0; i < hw->port_num; i++) { + /* Enable the TX path */ + ipn3ke_xmac_tx_enable(hw, i, 1); + + /* Disables source address override */ + ipn3ke_xmac_smac_ovd_dis(hw, i, 1); + + /* Enable the RX path */ + ipn3ke_xmac_rx_enable(hw, i, 1); + + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1); + + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0); + + /* Clear line RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0); + } + } else if (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + /* Enable inter connect channel */ + for (i = 0; i < hw->port_num; i++) { + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1); + + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0); + + /* Clear line side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0); + } + } + + /* init mtu */ + ipn3ke_mtu_setup(hw); + + ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id); + if (ret) + IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d", + ret); + + hw->tm_hw_enable = 0; + hw->flow_hw_enable = 0; + if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW && + afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) { + ret = ipn3ke_hw_tm_init(hw); + if (ret) + return ret; + hw->tm_hw_enable = 1; + + ret = ipn3ke_flow_init(hw); + if (ret) + return ret; + hw->flow_hw_enable = 1; + } + + return 0; +} + +static void +ipn3ke_hw_uninit(struct ipn3ke_hw *hw) +{ + int i; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + for (i = 0; i < hw->port_num; i++) { + /* Disable the TX path */ + ipn3ke_xmac_tx_disable(hw, i, 1); + + /* Disable the RX path */ + ipn3ke_xmac_rx_disable(hw, i, 1); + + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 1); + + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, i, 0); + + /* Clear line side RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, i, 0); + } + } else if (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + for (i = 0; i < hw->port_num; i++) { + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 1); + + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, i, 0); + + /* Clear line side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, i, 0); + } + } +} + +static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct ipn3ke_hw *hw; + struct rte_eth_dev *i40e_eth; + struct ifpga_rawdev *ifpga_dev; + uint16_t port_id; + int i, j, retval; + char *fvl_bdf; + + /* check if the AFU device has been probed already */ + /* allocate shared mcp_vswitch structure */ + if (!afu_dev->shared.data) { + snprintf(name, sizeof(name), "net_%s_hw", + afu_dev->device.name); + hw = rte_zmalloc_socket(name, + sizeof(struct ipn3ke_hw), + RTE_CACHE_LINE_SIZE, + afu_dev->device.numa_node); + if (!hw) { + IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data"); + retval = -ENOMEM; + return -ENOMEM; + } + afu_dev->shared.data = hw; + + rte_spinlock_init(&afu_dev->shared.lock); + } else { + hw = afu_dev->shared.data; + } + + retval = ipn3ke_hw_init(afu_dev, hw); + if (retval) + return retval; + + if (ipn3ke_bridge_func.get_ifpga_rawdev == NULL) + return -ENOMEM; + ifpga_dev = ipn3ke_bridge_func.get_ifpga_rawdev(hw->rawdev); + if (!ifpga_dev) + IPN3KE_AFU_PMD_ERR("failed to find ifpga_device."); + + /* probe representor ports */ + j = 0; + for (i = 0; i < hw->port_num; i++) { + struct ipn3ke_rpst rpst = { + .port_id = i, + .switch_domain_id = hw->switch_domain_id, + .hw = hw + }; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + afu_dev->device.name, i); + + for (; j < 8; j++) { + fvl_bdf = ifpga_dev->fvl_bdf[j]; + retval = rte_eth_dev_get_port_by_name(fvl_bdf, + &port_id); + if (retval) { + continue; + } else { + i40e_eth = &rte_eth_devices[port_id]; + rpst.i40e_pf_eth = i40e_eth; + rpst.i40e_pf_eth_port_id = port_id; + + j++; + break; + } + } + + retval = rte_eth_dev_create(&afu_dev->device, name, + sizeof(struct ipn3ke_rpst), NULL, NULL, + ipn3ke_rpst_init, &rpst); + + if (retval) + IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.", + name); + + } + + return 0; +} + +static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct ipn3ke_hw *hw; + struct rte_eth_dev *ethdev; + int i, ret; + + hw = afu_dev->shared.data; + + /* remove representor ports */ + for (i = 0; i < hw->port_num; i++) { + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + afu_dev->device.name, i); + + ethdev = rte_eth_dev_allocated(afu_dev->device.name); + if (!ethdev) + return -ENODEV; + + rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit); + } + + ret = rte_eth_switch_domain_free(hw->switch_domain_id); + if (ret) + IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret); + + /* hw uninit*/ + ipn3ke_hw_uninit(hw); + + return 0; +} + +static struct rte_afu_driver afu_ipn3ke_driver = { + .id_table = afu_uuid_ipn3ke_map, + .probe = ipn3ke_vswitch_probe, + .remove = ipn3ke_vswitch_remove, +}; + +RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver); + +RTE_INIT(ipn3ke_afu_init_log) +{ + ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke"); + if (ipn3ke_afu_logtype >= 0) + rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h new file mode 100644 index 000000000..9b0cf309c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IPN3KE_ETHDEV_H_ +#define _IPN3KE_ETHDEV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define IPN3KE_TM_SCRATCH_RW 0 + +/* TM Levels */ +enum ipn3ke_tm_node_level { + IPN3KE_TM_NODE_LEVEL_PORT, + IPN3KE_TM_NODE_LEVEL_VT, + IPN3KE_TM_NODE_LEVEL_COS, + IPN3KE_TM_NODE_LEVEL_MAX, +}; + +/* TM Shaper Profile */ +struct ipn3ke_tm_shaper_profile { + uint32_t valid; + uint32_t m; + uint32_t e; + uint64_t rate; + struct rte_tm_shaper_params params; +}; + +TAILQ_HEAD(ipn3ke_tm_shaper_profile_list, ipn3ke_tm_shaper_profile); + + +#define IPN3KE_TDROP_TH1_MASK 0x1ffffff +#define IPN3KE_TDROP_TH1_SHIFT (25) +#define IPN3KE_TDROP_TH2_MASK 0x1ffffff + +/* TM TDROP Profile */ +struct ipn3ke_tm_tdrop_profile { + uint32_t tdrop_profile_id; + uint32_t th1; + uint32_t th2; + uint32_t n_users; + uint32_t valid; + struct rte_tm_wred_params params; +}; + +/* TM node priority */ +enum ipn3ke_tm_node_state { + IPN3KE_TM_NODE_STATE_IDLE = 0, + IPN3KE_TM_NODE_STATE_CONFIGURED_ADD, + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL, + IPN3KE_TM_NODE_STATE_COMMITTED, + IPN3KE_TM_NODE_STATE_MAX, +}; + +TAILQ_HEAD(ipn3ke_tm_node_list, ipn3ke_tm_node); + +/* IPN3KE TM Node */ +struct ipn3ke_tm_node { + TAILQ_ENTRY(ipn3ke_tm_node) node; + uint32_t node_index; + uint32_t level; + uint32_t tm_id; + enum ipn3ke_tm_node_state node_state; + uint32_t parent_node_id; + uint32_t priority; + uint32_t weight; + struct ipn3ke_tm_node *parent_node; + struct ipn3ke_tm_shaper_profile shaper_profile; + struct ipn3ke_tm_tdrop_profile *tdrop_profile; + struct rte_tm_node_params params; + struct rte_tm_node_stats stats; + uint32_t n_children; + struct ipn3ke_tm_node_list children_node_list; +}; + +/* IPN3KE TM Hierarchy Specification */ +struct ipn3ke_tm_hierarchy { + struct ipn3ke_tm_node *port_node; + uint32_t n_shaper_profiles; + uint32_t n_tdrop_profiles; + uint32_t n_vt_nodes; + uint32_t n_cos_nodes; + struct ipn3ke_tm_node *port_commit_node; + struct ipn3ke_tm_node_list vt_commit_node_list; + struct ipn3ke_tm_node_list cos_commit_node_list; +}; + +struct ipn3ke_tm_internals { + /** Hierarchy specification + * + * -Hierarchy is unfrozen at init and when port is stopped. + * -Hierarchy is frozen on successful hierarchy commit. + * -Run-time hierarchy changes are not allowed, therefore it makes + * sense to keep the hierarchy frozen after the port is started. + */ + struct ipn3ke_tm_hierarchy h; + int hierarchy_frozen; + int tm_started; + uint32_t tm_id; +}; + +#define IPN3KE_TM_COS_NODE_NUM (64 * 1024) +#define IPN3KE_TM_VT_NODE_NUM (IPN3KE_TM_COS_NODE_NUM / 8) +#define IPN3KE_TM_10G_PORT_NODE_NUM (8) +#define IPN3KE_TM_25G_PORT_NODE_NUM (4) + +#define IPN3KE_TM_NODE_LEVEL_MOD (100000) +#define IPN3KE_TM_NODE_MOUNT_MAX (8) + +#define IPN3KE_TM_TDROP_PROFILE_NUM (2 * 1024) + +/* TM node priority */ +enum ipn3ke_tm_node_priority { + IPN3KE_TM_NODE_PRIORITY_NORMAL0 = 0, + IPN3KE_TM_NODE_PRIORITY_LOW, + IPN3KE_TM_NODE_PRIORITY_NORMAL1, + IPN3KE_TM_NODE_PRIORITY_HIGHEST, +}; + +#define IPN3KE_TM_NODE_WEIGHT_MAX UINT8_MAX + +/** Set a bit in the uint32 variable */ +#define IPN3KE_BIT_SET(var, pos) \ + ((var) |= ((uint32_t)1 << ((pos)))) + +/** Reset the bit in the variable */ +#define IPN3KE_BIT_RESET(var, pos) \ + ((var) &= ~((uint32_t)1 << ((pos)))) + +/** Check the bit is set in the variable */ +#define IPN3KE_BIT_ISSET(var, pos) \ + (((var) & ((uint32_t)1 << ((pos)))) ? 1 : 0) + +struct ipn3ke_hw; + +#define IPN3KE_HW_BASE 0x4000000 + +#define IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.capability_registers_block_offset) + +#define IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.status_registers_block_offset) + +#define IPN3KE_CTRL_RESET \ + (IPN3KE_HW_BASE + hw->hw_cap.control_registers_block_offset) + +#define IPN3KE_CTRL_MTU \ + (IPN3KE_HW_BASE + hw->hw_cap.control_registers_block_offset + 4) + +#define IPN3KE_CLASSIFY_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.classify_offset) + +#define IPN3KE_POLICER_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.policer_offset) + +#define IPN3KE_RSS_KEY_ARRAY_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.rss_key_array_offset) + +#define IPN3KE_RSS_INDIRECTION_TABLE_ARRAY_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.rss_indirection_table_array_offset) + +#define IPN3KE_DMAC_MAP_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.dmac_map_offset) + +#define IPN3KE_QM_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.qm_offset) + +#define IPN3KE_CCB_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.ccb_offset) + +#define IPN3KE_QOS_OFFSET \ + (IPN3KE_HW_BASE + hw->hw_cap.qos_offset) + +struct ipn3ke_hw_cap { + uint32_t version_number; + uint32_t capability_registers_block_offset; + uint32_t status_registers_block_offset; + uint32_t control_registers_block_offset; + uint32_t classify_offset; + uint32_t classy_size; + uint32_t policer_offset; + uint32_t policer_entry_size; + uint32_t rss_key_array_offset; + uint32_t rss_key_entry_size; + uint32_t rss_indirection_table_array_offset; + uint32_t rss_indirection_table_entry_size; + uint32_t dmac_map_offset; + uint32_t dmac_map_size; + uint32_t qm_offset; + uint32_t qm_size; + uint32_t ccb_offset; + uint32_t ccb_entry_size; + uint32_t qos_offset; + uint32_t qos_size; + + uint32_t num_rx_flow; /* Default: 64K */ + uint32_t num_rss_blocks; /* Default: 512 */ + uint32_t num_dmac_map; /* Default: 1K */ + uint32_t num_tx_flow; /* Default: 64K */ + uint32_t num_smac_map; /* Default: 1K */ + + uint32_t link_speed_mbps; +}; + +/** + * Strucute to store private data for each representor instance + */ +struct ipn3ke_rpst { + TAILQ_ENTRY(ipn3ke_rpst) next; /**< Next in device list. */ + uint16_t switch_domain_id; + /**< Switch ID */ + uint16_t port_id; + struct rte_eth_dev *ethdev; + /**< Port ID */ + struct ipn3ke_hw *hw; + struct rte_eth_dev *i40e_pf_eth; + uint16_t i40e_pf_eth_port_id; + struct rte_eth_link ori_linfo; + struct ipn3ke_tm_internals tm; + /**< Private data store of assocaiated physical function */ + struct rte_ether_addr mac_addr; +}; + +/* UUID IDs */ +#define MAP_UUID_10G_LOW 0xffffffffffffffff +#define MAP_UUID_10G_HIGH 0xffffffffffffffff +#define IPN3KE_UUID_10G_LOW 0xc000c9660d824272 +#define IPN3KE_UUID_10G_HIGH 0x9aeffe5f84570612 +#define IPN3KE_UUID_VBNG_LOW 0x8991165349d23ff9 +#define IPN3KE_UUID_VBNG_HIGH 0xb74cf419d15a481f +#define IPN3KE_UUID_25G_LOW 0xb7d9bac566bfbc80 +#define IPN3KE_UUID_25G_HIGH 0xb07bac1aeef54d67 + +#define IPN3KE_AFU_BUF_SIZE_MIN 1024 +#define IPN3KE_AFU_FRAME_SIZE_MAX 9728 + +#define IPN3KE_RAWDEV_ATTR_LEN_MAX (64) + +typedef int (*ipn3ke_indirect_mac_read_t)(struct ipn3ke_hw *hw, + uint32_t *rd_data, uint32_t addr, uint32_t mac_num, + uint32_t eth_wrapper_sel); + +typedef int (*ipn3ke_indirect_mac_write_t)(struct ipn3ke_hw *hw, + uint32_t wr_data, uint32_t addr, uint32_t mac_num, + uint32_t eth_wrapper_sel); + +struct ipn3ke_hw { + struct rte_eth_dev *eth_dev; + + /* afu info */ + struct rte_afu_id afu_id; + struct rte_rawdev *rawdev; + + struct ipn3ke_hw_cap hw_cap; + + struct ifpga_rawdevg_retimer_info retimer; + + uint16_t switch_domain_id; + uint16_t port_num; + + uint32_t tm_hw_enable; + uint32_t flow_hw_enable; + + uint32_t acc_tm; + uint32_t acc_flow; + + struct ipn3ke_flow_list flow_list; + uint32_t flow_max_entries; + uint32_t flow_num_entries; + + struct ipn3ke_tm_node *nodes; + struct ipn3ke_tm_node *port_nodes; + struct ipn3ke_tm_node *vt_nodes; + struct ipn3ke_tm_node *cos_nodes; + + struct ipn3ke_tm_tdrop_profile *tdrop_profile; + uint32_t tdrop_profile_num; + + uint32_t ccb_status; + uint32_t ccb_seg_free; + uint32_t ccb_seg_num; + uint32_t ccb_seg_k; + + uint8_t *eth_group_bar[2]; + /**< MAC Register read */ + ipn3ke_indirect_mac_read_t f_mac_read; + /**< MAC Register write */ + ipn3ke_indirect_mac_write_t f_mac_write; + + uint8_t *hw_addr; +}; + +/** + * @internal + * Helper macro for drivers that need to convert to struct rte_afu_device. + */ +#define RTE_DEV_TO_AFU(ptr) \ + container_of(ptr, struct rte_afu_device, device) + +#define RTE_DEV_TO_AFU_CONST(ptr) \ + container_of(ptr, const struct rte_afu_device, device) + +#define RTE_ETH_DEV_TO_AFU(eth_dev) \ + RTE_DEV_TO_AFU((eth_dev)->device) + +/** + * PCIe MMIO Access + */ + +#define IPN3KE_PCI_REG(reg) rte_read32(reg) +#define IPN3KE_PCI_REG_ADDR(a, reg) \ + ((volatile uint32_t *)((char *)(a)->hw_addr + (reg))) +static inline uint32_t ipn3ke_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(IPN3KE_PCI_REG(addr)); +} + +#define WCMD 0x8000000000000000 +#define RCMD 0x4000000000000000 +#define INDRCT_CTRL 0x30 +#define INDRCT_STS 0x38 +static inline uint32_t _ipn3ke_indrct_read(struct ipn3ke_hw *hw, + uint32_t addr) +{ + uint64_t word_offset; + uint64_t read_data = 0; + uint64_t indirect_value; + volatile void *indirect_addrs; + + word_offset = (addr & 0x1FFFFFF) >> 2; + indirect_value = RCMD | word_offset << 32; + indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_CTRL); + + rte_delay_us(10); + + rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs); + + indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_STS); + while ((read_data >> 32) != 1) + read_data = rte_read64(indirect_addrs); + + return rte_le_to_cpu_32(read_data); +} + +static inline void _ipn3ke_indrct_write(struct ipn3ke_hw *hw, + uint32_t addr, uint32_t value) +{ + uint64_t word_offset; + uint64_t indirect_value; + volatile void *indirect_addrs = 0; + + word_offset = (addr & 0x1FFFFFF) >> 2; + indirect_value = WCMD | word_offset << 32 | value; + indirect_addrs = hw->hw_addr + (uint32_t)(INDRCT_CTRL); + + rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs); + rte_delay_us(10); +} + +#define IPN3KE_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) + +#define IPN3KE_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) + +#define IPN3KE_READ_REG(hw, reg) \ + _ipn3ke_indrct_read((hw), (reg)) + +#define IPN3KE_WRITE_REG(hw, reg, value) \ + _ipn3ke_indrct_write((hw), (reg), (value)) + +#define IPN3KE_MASK_READ_REG(hw, reg, x, mask) \ + ((mask) & IPN3KE_READ_REG((hw), ((reg) + (0x4 * (x))))) + +#define IPN3KE_MASK_WRITE_REG(hw, reg, x, value, mask) \ + IPN3KE_WRITE_REG((hw), ((reg) + (0x4 * (x))), ((mask) & (value))) + +#define IPN3KE_DEV_PRIVATE_TO_HW(dev) \ + (((struct ipn3ke_rpst *)(dev)->data->dev_private)->hw) + +#define IPN3KE_DEV_PRIVATE_TO_RPST(dev) \ + ((struct ipn3ke_rpst *)(dev)->data->dev_private) + +#define IPN3KE_DEV_PRIVATE_TO_TM(dev) \ + (&(((struct ipn3ke_rpst *)(dev)->data->dev_private)->tm)) + +#define IPN3KE_VBNG_INIT_DONE (0x3) +#define IPN3KE_VBNG_INIT_STS (0x204) + +/* Byte address of IPN3KE internal module */ +#define IPN3KE_TM_VERSION (IPN3KE_QM_OFFSET + 0x0000) +#define IPN3KE_TM_SCRATCH (IPN3KE_QM_OFFSET + 0x0004) +#define IPN3KE_TM_STATUS (IPN3KE_QM_OFFSET + 0x0008) +#define IPN3KE_TM_MISC_STATUS (IPN3KE_QM_OFFSET + 0x0010) +#define IPN3KE_TM_MISC_WARNING_0 (IPN3KE_QM_OFFSET + 0x0040) +#define IPN3KE_TM_MISC_MON_0 (IPN3KE_QM_OFFSET + 0x0048) +#define IPN3KE_TM_MISC_FATAL_0 (IPN3KE_QM_OFFSET + 0x0050) +#define IPN3KE_TM_BW_MON_CTRL_1 (IPN3KE_QM_OFFSET + 0x0080) +#define IPN3KE_TM_BW_MON_CTRL_2 (IPN3KE_QM_OFFSET + 0x0084) +#define IPN3KE_TM_BW_MON_RATE (IPN3KE_QM_OFFSET + 0x0088) +#define IPN3KE_TM_STATS_CTRL (IPN3KE_QM_OFFSET + 0x0100) +#define IPN3KE_TM_STATS_DATA_0 (IPN3KE_QM_OFFSET + 0x0110) +#define IPN3KE_TM_STATS_DATA_1 (IPN3KE_QM_OFFSET + 0x0114) +#define IPN3KE_QM_UID_CONFIG_CTRL (IPN3KE_QM_OFFSET + 0x0200) +#define IPN3KE_QM_UID_CONFIG_DATA (IPN3KE_QM_OFFSET + 0x0204) + +#define IPN3KE_BM_VERSION (IPN3KE_QM_OFFSET + 0x4000) +#define IPN3KE_BM_STATUS (IPN3KE_QM_OFFSET + 0x4008) +#define IPN3KE_BM_STORE_CTRL (IPN3KE_QM_OFFSET + 0x4010) +#define IPN3KE_BM_STORE_STATUS (IPN3KE_QM_OFFSET + 0x4018) +#define IPN3KE_BM_STORE_MON (IPN3KE_QM_OFFSET + 0x4028) +#define IPN3KE_BM_WARNING_0 (IPN3KE_QM_OFFSET + 0x4040) +#define IPN3KE_BM_MON_0 (IPN3KE_QM_OFFSET + 0x4048) +#define IPN3KE_BM_FATAL_0 (IPN3KE_QM_OFFSET + 0x4050) +#define IPN3KE_BM_DRAM_ACCESS_CTRL (IPN3KE_QM_OFFSET + 0x4100) +#define IPN3KE_BM_DRAM_ACCESS_DATA_0 (IPN3KE_QM_OFFSET + 0x4120) +#define IPN3KE_BM_DRAM_ACCESS_DATA_1 (IPN3KE_QM_OFFSET + 0x4124) +#define IPN3KE_BM_DRAM_ACCESS_DATA_2 (IPN3KE_QM_OFFSET + 0x4128) +#define IPN3KE_BM_DRAM_ACCESS_DATA_3 (IPN3KE_QM_OFFSET + 0x412C) +#define IPN3KE_BM_DRAM_ACCESS_DATA_4 (IPN3KE_QM_OFFSET + 0x4130) +#define IPN3KE_BM_DRAM_ACCESS_DATA_5 (IPN3KE_QM_OFFSET + 0x4134) +#define IPN3KE_BM_DRAM_ACCESS_DATA_6 (IPN3KE_QM_OFFSET + 0x4138) + +#define IPN3KE_QM_VERSION (IPN3KE_QM_OFFSET + 0x8000) +#define IPN3KE_QM_STATUS (IPN3KE_QM_OFFSET + 0x8008) +#define IPN3KE_QM_LL_TABLE_MON (IPN3KE_QM_OFFSET + 0x8018) +#define IPN3KE_QM_WARNING_0 (IPN3KE_QM_OFFSET + 0x8040) +#define IPN3KE_QM_MON_0 (IPN3KE_QM_OFFSET + 0x8048) +#define IPN3KE_QM_FATAL_0 (IPN3KE_QM_OFFSET + 0x8050) +#define IPN3KE_QM_FATAL_1 (IPN3KE_QM_OFFSET + 0x8054) +#define IPN3KE_LL_TABLE_ACCESS_CTRL (IPN3KE_QM_OFFSET + 0x8100) +#define IPN3KE_LL_TABLE_ACCESS_DATA_0 (IPN3KE_QM_OFFSET + 0x8110) +#define IPN3KE_LL_TABLE_ACCESS_DATA_1 (IPN3KE_QM_OFFSET + 0x8114) + +#define IPN3KE_CCB_ERROR (IPN3KE_CCB_OFFSET + 0x0008) +#define IPN3KE_CCB_NSEGFREE (IPN3KE_CCB_OFFSET + 0x200000) +#define IPN3KE_CCB_NSEGFREE_MASK 0x3FFFFF +#define IPN3KE_CCB_PSEGMAX_COEF (IPN3KE_CCB_OFFSET + 0x200008) +#define IPN3KE_CCB_PSEGMAX_COEF_MASK 0xFFFFF +#define IPN3KE_CCB_NSEG_P (IPN3KE_CCB_OFFSET + 0x200080) +#define IPN3KE_CCB_NSEG_MASK 0x3FFFFF +#define IPN3KE_CCB_QPROFILE_Q (IPN3KE_CCB_OFFSET + 0x240000) +#define IPN3KE_CCB_QPROFILE_MASK 0x7FF +#define IPN3KE_CCB_PROFILE_P (IPN3KE_CCB_OFFSET + 0x280000) +#define IPN3KE_CCB_PROFILE_MASK 0x1FFFFFF +#define IPN3KE_CCB_PROFILE_MS (IPN3KE_CCB_OFFSET + 0xC) +#define IPN3KE_CCB_PROFILE_MS_MASK 0x1FFFFFF +#define IPN3KE_CCB_LR_LB_DBG_CTRL (IPN3KE_CCB_OFFSET + 0x2C0000) +#define IPN3KE_CCB_LR_LB_DBG_DONE (IPN3KE_CCB_OFFSET + 0x2C0004) +#define IPN3KE_CCB_LR_LB_DBG_RDATA (IPN3KE_CCB_OFFSET + 0x2C000C) + +#define IPN3KE_QOS_MAP_L1_X (IPN3KE_QOS_OFFSET + 0x000000) +#define IPN3KE_QOS_MAP_L1_MASK 0x1FFF +#define IPN3KE_QOS_MAP_L2_X (IPN3KE_QOS_OFFSET + 0x040000) +#define IPN3KE_QOS_MAP_L2_MASK 0x7 +#define IPN3KE_QOS_TYPE_MASK 0x3 +#define IPN3KE_QOS_TYPE_L1_X (IPN3KE_QOS_OFFSET + 0x200000) +#define IPN3KE_QOS_TYPE_L2_X (IPN3KE_QOS_OFFSET + 0x240000) +#define IPN3KE_QOS_TYPE_L3_X (IPN3KE_QOS_OFFSET + 0x280000) +#define IPN3KE_QOS_SCH_WT_MASK 0xFF +#define IPN3KE_QOS_SCH_WT_L1_X (IPN3KE_QOS_OFFSET + 0x400000) +#define IPN3KE_QOS_SCH_WT_L2_X (IPN3KE_QOS_OFFSET + 0x440000) +#define IPN3KE_QOS_SCH_WT_L3_X (IPN3KE_QOS_OFFSET + 0x480000) +#define IPN3KE_QOS_SHAP_WT_MASK 0x3FFF +#define IPN3KE_QOS_SHAP_WT_L1_X (IPN3KE_QOS_OFFSET + 0x600000) +#define IPN3KE_QOS_SHAP_WT_L2_X (IPN3KE_QOS_OFFSET + 0x640000) +#define IPN3KE_QOS_SHAP_WT_L3_X (IPN3KE_QOS_OFFSET + 0x680000) + +#define IPN3KE_CLF_BASE_DST_MAC_ADDR_HI (IPN3KE_CLASSIFY_OFFSET + 0x0000) +#define IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW (IPN3KE_CLASSIFY_OFFSET + 0x0004) +#define IPN3KE_CLF_QINQ_STAG (IPN3KE_CLASSIFY_OFFSET + 0x0008) +#define IPN3KE_CLF_LKUP_ENABLE (IPN3KE_CLASSIFY_OFFSET + 0x000C) +#define IPN3KE_CLF_DFT_FLOW_ID (IPN3KE_CLASSIFY_OFFSET + 0x0040) +#define IPN3KE_CLF_RX_PARSE_CFG (IPN3KE_CLASSIFY_OFFSET + 0x0080) +#define IPN3KE_CLF_RX_STATS_CFG (IPN3KE_CLASSIFY_OFFSET + 0x00C0) +#define IPN3KE_CLF_RX_STATS_RPT (IPN3KE_CLASSIFY_OFFSET + 0x00C4) +#define IPN3KE_CLF_RX_TEST (IPN3KE_CLASSIFY_OFFSET + 0x0400) + +#define IPN3KE_CLF_EM_VERSION (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0000) +#define IPN3KE_CLF_EM_NUM (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0008) +#define IPN3KE_CLF_EM_KEY_WDTH (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x000C) +#define IPN3KE_CLF_EM_RES_WDTH (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0010) +#define IPN3KE_CLF_EM_ALARMS (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0014) +#define IPN3KE_CLF_EM_DRC_RLAT (IPN3KE_CLASSIFY_OFFSET + 0x40000 + 0x0018) + +#define IPN3KE_CLF_MHL_VERSION (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0000) +#define IPN3KE_CLF_MHL_GEN_CTRL (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0018) +#define IPN3KE_CLF_MHL_MGMT_CTRL (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0020) +#define IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY 31 +#define IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH 0x0 +#define IPN3KE_CLF_MHL_MGMT_CTRL_INSERT 0x1 +#define IPN3KE_CLF_MHL_MGMT_CTRL_DELETE 0x2 +#define IPN3KE_CLF_MHL_MGMT_CTRL_SEARCH 0x3 +#define IPN3KE_CLF_MHL_FATAL_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0050) +#define IPN3KE_CLF_MHL_MON_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x0060) +#define IPN3KE_CLF_MHL_TOTAL_ENTRIES (IPN3KE_CLASSIFY_OFFSET + \ + 0x50000 + 0x0080) +#define IPN3KE_CLF_MHL_ONEHIT_BUCKETS (IPN3KE_CLASSIFY_OFFSET + \ + 0x50000 + 0x0084) +#define IPN3KE_CLF_MHL_KEY_MASK 0xFFFFFFFF +#define IPN3KE_CLF_MHL_KEY_0 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1000) +#define IPN3KE_CLF_MHL_KEY_1 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1004) +#define IPN3KE_CLF_MHL_KEY_2 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x1008) +#define IPN3KE_CLF_MHL_KEY_3 (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x100C) +#define IPN3KE_CLF_MHL_RES_MASK 0xFFFFFFFF +#define IPN3KE_CLF_MHL_RES (IPN3KE_CLASSIFY_OFFSET + 0x50000 + 0x2000) + +int +ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev); +int +ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev); +int +ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev, + __rte_unused int wait_to_complete); +int +ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev); +int +ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev); +int +ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev); +int +ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev); +int +ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev, + struct rte_ether_addr *mac_addr); +int +ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu); + +int +ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params); +int +ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev); +int +ipn3ke_hw_tm_init(struct ipn3ke_hw *hw); +void +ipn3ke_tm_init(struct ipn3ke_rpst *rpst); +int +ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev, + void *arg); + + +/* IPN3KE_MASK is a macro used on 32 bit registers */ +#define IPN3KE_MASK(mask, shift) ((mask) << (shift)) + +#define IPN3KE_MAC_CTRL_BASE_0 0x00000000 +#define IPN3KE_MAC_CTRL_BASE_1 0x00008000 + +#define IPN3KE_MAC_STATS_MASK 0xFFFFFFFFF + +/* All the address are in 4Bytes*/ +#define IPN3KE_MAC_PRIMARY_MAC_ADDR0 0x0010 +#define IPN3KE_MAC_PRIMARY_MAC_ADDR1 0x0011 + +#define IPN3KE_MAC_MAC_RESET_CONTROL 0x001F +#define IPN3KE_MAC_MAC_RESET_CONTROL_TX_SHIFT 0 +#define IPN3KE_MAC_MAC_RESET_CONTROL_TX_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_MAC_RESET_CONTROL_TX_SHIFT) + +#define IPN3KE_MAC_MAC_RESET_CONTROL_RX_SHIFT 8 +#define IPN3KE_MAC_MAC_RESET_CONTROL_RX_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_MAC_RESET_CONTROL_RX_SHIFT) + +#define IPN3KE_MAC_TX_PACKET_CONTROL 0x0020 +#define IPN3KE_MAC_TX_PACKET_CONTROL_SHIFT 0 +#define IPN3KE_MAC_TX_PACKET_CONTROL_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_TX_PACKET_CONTROL_SHIFT) + +#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE 0x002A +#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_SHIFT 0 +#define IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_SHIFT) + +#define IPN3KE_MAC_TX_FRAME_MAXLENGTH 0x002C +#define IPN3KE_MAC_TX_FRAME_MAXLENGTH_SHIFT 0 +#define IPN3KE_MAC_TX_FRAME_MAXLENGTH_MASK \ + IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_FRAME_MAXLENGTH_SHIFT) + +#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL 0x0040 +#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_SHIFT 0 +#define IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_MASK \ + IPN3KE_MASK(0x3, IPN3KE_MAC_TX_PAUSEFRAME_CONTROL_SHIFT) + +#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA 0x0042 +#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_SHIFT 0 +#define IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_MASK \ + IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_PAUSEFRAME_QUANTA_SHIFT) + +#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA 0x0043 +#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_SHIFT 0 +#define IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_MASK \ + IPN3KE_MASK(0xFFFF, IPN3KE_MAC_TX_PAUSEFRAME_HOLDOFF_QUANTA_SHIFT) + +#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE 0x0044 +#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_SHIFT 0 +#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_CFG_SHIFT) + +#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_SHIFT 1 +#define IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_MASK \ + IPN3KE_MASK(0x3, IPN3KE_MAC_TX_PAUSEFRAME_ENABLE_TYPE_SHIFT) + +#define IPN3KE_MAC_RX_TRANSFER_CONTROL 0x00A0 +#define IPN3KE_MAC_RX_TRANSFER_CONTROL_SHIFT 0x0 +#define IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_RX_TRANSFER_CONTROL_SHIFT) + +#define IPN3KE_MAC_RX_FRAME_CONTROL 0x00AC +#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_SHIFT 0x0 +#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_SHIFT) + +#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT 0x1 +#define IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK \ + IPN3KE_MASK(0x1, IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT) + +#define IPN3KE_VLAN_TAG_SIZE 4 +/** + * The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define IPN3KE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IPN3KE_VLAN_TAG_SIZE * 2) + +#define IPN3KE_MAC_FRAME_SIZE_MAX 9728 +#define IPN3KE_MAC_RX_FRAME_MAXLENGTH 0x00AE +#define IPN3KE_MAC_RX_FRAME_MAXLENGTH_SHIFT 0 +#define IPN3KE_MAC_RX_FRAME_MAXLENGTH_MASK \ + IPN3KE_MASK(0xFFFF, IPN3KE_MAC_RX_FRAME_MAXLENGTH_SHIFT) + +#define IPN3KE_25G_MAX_TX_SIZE_CONFIG 0x407 +#define IPN3KE_25G_MAX_RX_SIZE_CONFIG 0x506 + +#define IPN3KE_10G_TX_FRAME_MAXLENGTH 0x002C +#define IPN3KE_10G_RX_FRAME_MAXLENGTH 0x00AE + +#define IPN3KE_REGISTER_WIDTH 32 + +/*Bits[2:0]: Configuration of TX statistics counters: + *Bit[2]: Shadow request (active high): When set to the value of 1, + *TX statistics collection is paused. The underlying counters + *continue to operate, but the readable values reflect a snapshot at + *the time the pause flag was activated. Write a 0 to release. + *Bit[1]: Parity-error clear. When software sets this bit, the IP core + *clears the parity bit CNTR_TX_STATUS[0]. This bit + *(CNTR_TX_CONFIG[1]) is self-clearing. + *Bit[0]: Software can set this bit to the value of 1 to reset all of + *the TX statistics registers at the same time. This bit is selfclearing. + *Bits[31:3] are Reserved + */ +#define IPN3KE_25G_TX_STATISTICS_CONFIG 0x845 +#define IPN3KE_25G_TX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK 0x00000004 + +/*Bit[1]: Indicates that the TX statistics registers are paused (while + *CNTR_TX_CONFIG[2] is asserted). + *Bit[0]: Indicates the presence of at least one parity error in the + *TX statistics counters. + *Bits[31:2] are Reserved. + */ +#define IPN3KE_25G_TX_STATISTICS_STATUS 0x846 +#define IPN3KE_25G_TX_STATISTICS_STATUS_SHADOW_REQUEST_MASK 0x00000002 + +#define IPN3KE_25G_CNTR_TX_FRAGMENTS_LO 0x800 +#define IPN3KE_25G_CNTR_TX_FRAGMENTS_HI 0x801 +#define IPN3KE_25G_CNTR_TX_JABBERS_LO 0x802 +#define IPN3KE_25G_CNTR_TX_JABBERS_HI 0x803 +#define IPN3KE_25G_CNTR_TX_FCS_LO 0x804 +#define IPN3KE_25G_CNTR_TX_FCS_HI 0x805 +#define IPN3KE_25G_CNTR_TX_CRCERR_LO 0x806 +#define IPN3KE_25G_CNTR_TX_CRCERR_HI 0x807 +#define IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_LO 0x808 +#define IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_HI 0x809 +#define IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_LO 0x80A +#define IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_HI 0x80B +#define IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_LO 0x80C +#define IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_HI 0x80D +#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_LO 0x80E +#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_HI 0x80F +#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_LO 0x810 +#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_HI 0x811 +#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_LO 0x812 +#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_HI 0x813 +#define IPN3KE_25G_CNTR_TX_PAUSE_ERR_LO 0x814 +#define IPN3KE_25G_CNTR_TX_PAUSE_ERR_HI 0x815 +#define IPN3KE_25G_CNTR_TX_64B_LO 0x816 +#define IPN3KE_25G_CNTR_TX_64B_HI 0x817 +#define IPN3KE_25G_CNTR_TX_65_127B_LO 0x818 +#define IPN3KE_25G_CNTR_TX_65_127B_HI 0x819 +#define IPN3KE_25G_CNTR_TX_128_255B_LO 0x81A +#define IPN3KE_25G_CNTR_TX_128_255B_HI 0x81B +#define IPN3KE_25G_CNTR_TX_256_511B_LO 0x81C +#define IPN3KE_25G_CNTR_TX_256_511B_HI 0x81D +#define IPN3KE_25G_CNTR_TX_512_1023B_LO 0x81E +#define IPN3KE_25G_CNTR_TX_512_1023B_HI 0x81F +#define IPN3KE_25G_CNTR_TX_1024_1518B_LO 0x820 +#define IPN3KE_25G_CNTR_TX_1024_1518B_HI 0x821 +#define IPN3KE_25G_CNTR_TX_1519_MAXB_LO 0x822 +#define IPN3KE_25G_CNTR_TX_1519_MAXB_HI 0x823 +#define IPN3KE_25G_CNTR_TX_OVERSIZE_LO 0x824 +#define IPN3KE_25G_CNTR_TX_OVERSIZE_HI 0x825 +#define IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_LO 0x826 +#define IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_HI 0x827 +#define IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_LO 0x828 +#define IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_HI 0x829 +#define IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_LO 0x82A +#define IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_HI 0x82B +#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_LO 0x82C +#define IPN3KE_25G_CNTR_TX_MCAST_CTRL_HI 0x82D +#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_LO 0x82E +#define IPN3KE_25G_CNTR_TX_BCAST_CTRL_HI 0x82F +#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_LO 0x830 +#define IPN3KE_25G_CNTR_TX_UCAST_CTRL_HI 0x831 +#define IPN3KE_25G_CNTR_TX_PAUSE_LO 0x832 +#define IPN3KE_25G_CNTR_TX_PAUSE_HI 0x833 +#define IPN3KE_25G_CNTR_TX_RUNT_LO 0x834 +#define IPN3KE_25G_CNTR_TX_RUNT_HI 0x835 +#define IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_LO 0x860 +#define IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_HI 0x861 +#define IPN3KE_25G_TX_FRAME_OCTETS_OK_LO 0x862 +#define IPN3KE_25G_TX_FRAME_OCTETS_OK_HI 0x863 + +/*Bits[2:0]: Configuration of RX statistics counters: + *Bit[2]: Shadow request (active high): When set to the value of 1, + *RX statistics collection is paused. The underlying counters + *continue to operate, but the readable values reflect a snapshot + *at the time the pause flag was activated. Write a 0 to release. + *Bit[1]: Parity-error clear. When software sets this bit, the IP + *core clears the parity bit CNTR_RX_STATUS[0]. This bit + *(CNTR_RX_CONFIG[1]) is self-clearing. + *Bit[0]: Software can set this bit to the value of 1 to reset all of + *the RX statistics registers at the same time. This bit is selfclearing. + *Bits[31:3] are Reserved. + */ +#define IPN3KE_25G_RX_STATISTICS_CONFIG 0x945 +#define IPN3KE_25G_RX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK 0x00000004 + +/*Bit[1]: Indicates that the RX statistics registers are paused + *(while CNTR_RX_CONFIG[2] is asserted). + *Bit[0]: Indicates the presence of at least one parity error in the + *RX statistics counters. + *Bits [31:2] are Reserved + */ +#define IPN3KE_25G_RX_STATISTICS_STATUS 0x946 +#define IPN3KE_25G_RX_STATISTICS_STATUS_SHADOW_REQUEST_MASK 0x00000002 + +#define IPN3KE_25G_CNTR_RX_FRAGMENTS_LO 0x900 +#define IPN3KE_25G_CNTR_RX_FRAGMENTS_HI 0x901 +#define IPN3KE_25G_CNTR_RX_JABBERS_LO 0x902 +#define IPN3KE_25G_CNTR_RX_JABBERS_HI 0x903 +#define IPN3KE_25G_CNTR_RX_FCS_LO 0x904 +#define IPN3KE_25G_CNTR_RX_FCS_HI 0x905 +#define IPN3KE_25G_CNTR_RX_CRCERR_LO 0x906 +#define IPN3KE_25G_CNTR_RX_CRCERR_HI 0x907 +#define IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_LO 0x908 +#define IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_HI 0x909 +#define IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_LO 0x90A +#define IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_HI 0x90B +#define IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_LO 0x90C +#define IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_HI 0x90D +#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_LO 0x90E +#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_HI 0x90F +#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_LO 0x910 +#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_HI 0x911 +#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_LO 0x912 +#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_HI 0x913 +#define IPN3KE_25G_CNTR_RX_PAUSE_ERR_LO 0x914 +#define IPN3KE_25G_CNTR_RX_PAUSE_ERR_HI 0x915 +#define IPN3KE_25G_CNTR_RX_64B_LO 0x916 +#define IPN3KE_25G_CNTR_RX_64B_HI 0x917 +#define IPN3KE_25G_CNTR_RX_65_127B_LO 0x918 +#define IPN3KE_25G_CNTR_RX_65_127B_HI 0x919 +#define IPN3KE_25G_CNTR_RX_128_255B_LO 0x91A +#define IPN3KE_25G_CNTR_RX_128_255B_HI 0x91B +#define IPN3KE_25G_CNTR_RX_256_511B_LO 0x91C +#define IPN3KE_25G_CNTR_RX_256_511B_HI 0x91D +#define IPN3KE_25G_CNTR_RX_512_1023B_LO 0x91E +#define IPN3KE_25G_CNTR_RX_512_1023B_HI 0x91F +#define IPN3KE_25G_CNTR_RX_1024_1518B_LO 0x920 +#define IPN3KE_25G_CNTR_RX_1024_1518B_HI 0x921 +#define IPN3KE_25G_CNTR_RX_1519_MAXB_LO 0x922 +#define IPN3KE_25G_CNTR_RX_1519_MAXB_HI 0x923 +#define IPN3KE_25G_CNTR_RX_OVERSIZE_LO 0x924 +#define IPN3KE_25G_CNTR_RX_OVERSIZE_HI 0x925 +#define IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_LO 0x926 +#define IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_HI 0x927 +#define IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_LO 0x928 +#define IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_HI 0x929 +#define IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_LO 0x92A +#define IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_HI 0x92B +#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_LO 0x92C +#define IPN3KE_25G_CNTR_RX_MCAST_CTRL_HI 0x92D +#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_LO 0x92E +#define IPN3KE_25G_CNTR_RX_BCAST_CTRL_HI 0x92F +#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_LO 0x930 +#define IPN3KE_25G_CNTR_RX_UCAST_CTRL_HI 0x931 +#define IPN3KE_25G_CNTR_RX_PAUSE_LO 0x932 +#define IPN3KE_25G_CNTR_RX_PAUSE_HI 0x933 +#define IPN3KE_25G_CNTR_RX_RUNT_LO 0x934 +#define IPN3KE_25G_CNTR_RX_RUNT_HI 0x935 +#define IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_LO 0x960 +#define IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_HI 0x961 +#define IPN3KE_25G_RX_FRAME_OCTETS_OK_LO 0x962 +#define IPN3KE_25G_RX_FRAME_OCTETS_OK_HI 0x963 + +#define IPN3KE_10G_STATS_HI_VALID_MASK 0x0000000F + +#define IPN3KE_10G_TX_STATS_CLR 0x0140 +#define IPN3KE_10G_TX_STATS_CLR_CLEAR_SHIFT 0 +#define IPN3KE_10G_TX_STATS_CLR_CLEAR_MASK \ + IPN3KE_MASK(0x1, IPN3KE_10G_TX_STATS_CLR_CLEAR_SHIFT) + +#define IPN3KE_10G_RX_STATS_CLR 0x01C0 +#define IPN3KE_10G_RX_STATS_CLR_CLEAR_SHIFT 0 +#define IPN3KE_10G_RX_STATS_CLR_CLEAR_MASK \ + IPN3KE_MASK(0x1, IPN3KE_10G_RX_STATS_CLR_CLEAR_SHIFT) + +#define IPN3KE_10G_TX_STATS_FRAME_OK_LO 0x0142 +#define IPN3KE_10G_TX_STATS_FRAME_OK_HI 0x0143 +#define IPN3KE_10G_RX_STATS_FRAME_OK_LO 0x01C2 +#define IPN3KE_10G_RX_STATS_FRAME_OK_HI 0x01C3 +#define IPN3KE_10G_TX_STATS_FRAME_ERR_LO 0x0144 +#define IPN3KE_10G_TX_STATS_FRAME_ERR_HI 0x0145 +#define IPN3KE_10G_RX_STATS_FRAME_ERR_LO 0x01C4 +#define IPN3KE_10G_RX_STATS_FRAME_ERR_HI 0x01C5 +#define IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_LO 0x01C6 +#define IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_HI 0x01C7 +#define IPN3KE_10G_TX_STATS_OCTETS_OK_LO 0x0148 +#define IPN3KE_10G_TX_STATS_OCTETS_OK_HI 0x0149 +#define IPN3KE_10G_RX_STATS_OCTETS_OK_LO 0x01C8 +#define IPN3KE_10G_RX_STATS_OCTETS_OK_HI 0x01C9 +#define IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_LO 0x014A +#define IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_HI 0x014B +#define IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_LO 0x01CA +#define IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_HI 0x01CB +#define IPN3KE_10G_TX_STATS_IF_ERRORS_LO 0x014C +#define IPN3KE_10G_TX_STATS_IF_ERRORS_HI 0x014D +#define IPN3KE_10G_RX_STATS_IF_ERRORS_LO 0x01CC +#define IPN3KE_10G_RX_STATS_IF_ERRORS_HI 0x01CD +#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_LO 0x014E +#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_HI 0x014F +#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_LO 0x01CE +#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_HI 0x01CF +#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_LO 0x0150 +#define IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_HI 0x0151 +#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_LO 0x01D0 +#define IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_HI 0x01D1 +#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_LO 0x0152 +#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_HI 0x0153 +#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_LO 0x01D2 +#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_HI 0x01D3 +#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_LO 0x0154 +#define IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_HI 0x0155 +#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_LO 0x01D4 +#define IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_HI 0x01D5 +#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_LO 0x0156 +#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_HI 0x0157 +#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_LO 0x01D6 +#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_HI 0x01D7 +#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_LO 0x0158 +#define IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_HI 0x0159 +#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_LO 0x01D8 +#define IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_HI 0x01D9 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_LO 0x015A +#define IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_HI 0x015B +#define IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_LO 0x01DA +#define IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_HI 0x01DB +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_LO 0x015C +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_HI 0x015D +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_LO 0x01DC +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_HI 0x01DD +#define IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO 0x015E +#define IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI 0x015F +#define IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO 0x01DE +#define IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI 0x01DF +#define IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO 0x0160 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI 0x0161 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO 0x01E0 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI 0x01E1 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO 0x0162 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI 0x0163 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO 0x01E2 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI 0x01E3 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO 0x0164 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI 0x0165 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO 0x01E4 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI 0x01E5 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO 0x0166 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI 0x0167 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO 0x01E6 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI 0x01E7 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO 0x0168 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI 0x0169 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO 0x01E8 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI 0x01E9 +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO 0x016A +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI 0x016B +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO 0x01EA +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI 0x01EB +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO 0x016C +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI 0x016D +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO 0x01EC +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI 0x01ED +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO 0x016E +#define IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI 0x016F +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO 0x01EE +#define IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI 0x01EF +#define IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_LO 0x01E0 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_HI 0x01F1 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_LO 0x01E2 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_HI 0x01F3 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_LO 0x01E4 +#define IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_HI 0x01F5 +#define IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_LO 0x0176 +#define IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_HI 0x0177 +#define IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_LO 0x01F6 +#define IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_HI 0x01F7 +#define IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO 0x0178 +#define IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI 0x0179 +#define IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO 0x01F8 +#define IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI 0x01F9 +#define IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO 0x017A +#define IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI 0x017B +#define IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO 0x01FA +#define IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI 0x01FB +#define IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_LO 0x017C +#define IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_HI 0x017D +#define IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_LO 0x01FC +#define IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_HI 0x01FD + +static inline void ipn3ke_xmac_tx_enable(struct ipn3ke_hw *hw, + uint32_t mac_num, uint32_t eth_group_sel) +{ +#define IPN3KE_XMAC_TX_ENABLE (0 & (IPN3KE_MAC_TX_PACKET_CONTROL_MASK)) + + (*hw->f_mac_write)(hw, + IPN3KE_XMAC_TX_ENABLE, + IPN3KE_MAC_TX_PACKET_CONTROL, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_tx_disable(struct ipn3ke_hw *hw, + uint32_t mac_num, uint32_t eth_group_sel) +{ +#define IPN3KE_XMAC_TX_DISABLE (1 & (IPN3KE_MAC_TX_PACKET_CONTROL_MASK)) + + (*hw->f_mac_write)(hw, + IPN3KE_XMAC_TX_DISABLE, + IPN3KE_MAC_TX_PACKET_CONTROL, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_rx_enable(struct ipn3ke_hw *hw, + uint32_t mac_num, uint32_t eth_group_sel) +{ +#define IPN3KE_XMAC_RX_ENABLE (0 & (IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK)) + + (*hw->f_mac_write)(hw, + IPN3KE_XMAC_RX_ENABLE, + IPN3KE_MAC_RX_TRANSFER_CONTROL, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_rx_disable(struct ipn3ke_hw *hw, + uint32_t mac_num, uint32_t eth_group_sel) +{ +#define IPN3KE_XMAC_RX_DISABLE (1 & (IPN3KE_MAC_RX_TRANSFER_CONTROL_MASK)) + + (*hw->f_mac_write)(hw, + IPN3KE_XMAC_RX_DISABLE, + IPN3KE_MAC_RX_TRANSFER_CONTROL, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_smac_ovd_dis(struct ipn3ke_hw *hw, + uint32_t mac_num, uint32_t eth_group_sel) +{ +#define IPN3KE_XMAC_SMAC_OVERRIDE_DISABLE (0 & \ + (IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE_MASK)) + + (*hw->f_mac_write)(hw, + IPN3KE_XMAC_SMAC_OVERRIDE_DISABLE, + IPN3KE_MAC_TX_SRC_ADDR_OVERRIDE, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_tx_clr_10G_stcs +(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t tmp; + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_TX_STATS_CLR, + mac_num, + eth_group_sel); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_TX_STATS_CLR, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_rx_clr_10G_stcs +(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t tmp; + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_RX_STATS_CLR, + mac_num, + eth_group_sel); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_RX_STATS_CLR, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_tx_clr_25G_stcs +(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t tmp = 0x00000001; + + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the TX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + mac_num, + eth_group_sel); +} + +static inline void ipn3ke_xmac_rx_clr_25G_stcs +(struct ipn3ke_hw *hw, uint32_t mac_num, uint32_t eth_group_sel) +{ + uint32_t tmp = 0x00000001; + + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the RX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + mac_num, + eth_group_sel); +} + +#endif /* _IPN3KE_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c new file mode 100644 index 000000000..f857e64af --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c @@ -0,0 +1,1380 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +/** Static initializer for items. */ +#define FLOW_PATTERNS(...) \ + ((const enum rte_flow_item_type []) { \ + __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ + }) + +enum IPN3KE_HASH_KEY_TYPE { + IPN3KE_HASH_KEY_VXLAN, + IPN3KE_HASH_KEY_MAC, + IPN3KE_HASH_KEY_QINQ, + IPN3KE_HASH_KEY_MPLS, + IPN3KE_HASH_KEY_IP_TCP, + IPN3KE_HASH_KEY_IP_UDP, + IPN3KE_HASH_KEY_IP_NVGRE, + IPN3KE_HASH_KEY_VXLAN_IP_UDP, +}; + +struct ipn3ke_flow_parse { + uint32_t mark:1; /**< Set if the flow is marked. */ + uint32_t drop:1; /**< ACL drop. */ + uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS; + uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */ + uint8_t key_len; /**< Length in bit. */ + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)]; + /**< key1, key2 */ +}; + +typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser); + + +struct ipn3ke_flow_pattern { + const enum rte_flow_item_type *const items; + + pattern_filter_t filter; +}; + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] vxlan_inner_mac; + * logic [23:0] vxlan_vni; + * } Hash_Key_Vxlan_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (/*!item->spec || item->mask || */item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(&parser->key[0], + eth->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[6], vxlan->vni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && eth != NULL) { + parser->key_len = 48 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [47:0] eth_smac; + * } Hash_Key_Mac_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_ETH + */ +static int +ipn3ke_pattern_mac(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_eth *eth = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = item->spec; + + rte_memcpy(parser->key, + eth->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (eth != NULL) { + parser->key_len = 48; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [11:0] outer_vlan_id; + * logic [11:0] inner_vlan_id; + * } Hash_Key_QinQ_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VLAN + * RTE_FLOW_ITEM_TYPE_VLAN + */ +static int +ipn3ke_pattern_qinq(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vlan *outer_vlan = NULL; + const struct rte_flow_item_vlan *inner_vlan = NULL; + const struct rte_flow_item *item; + uint16_t tci; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VLAN: + if (!outer_vlan) { + outer_vlan = item->spec; + + tci = rte_be_to_cpu_16(outer_vlan->tci); + parser->key[0] = (tci & 0xff0) >> 4; + parser->key[1] |= (tci & 0x00f) << 4; + } else { + inner_vlan = item->spec; + + tci = rte_be_to_cpu_16(inner_vlan->tci); + parser->key[1] |= (tci & 0xf00) >> 8; + parser->key[2] = (tci & 0x0ff); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (outer_vlan != NULL && inner_vlan != NULL) { + parser->key_len = 12 + 12; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [19:0] mpls_label1; + * logic [19:0] mpls_label2; + * } Hash_Key_Mpls_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_MPLS + * RTE_FLOW_ITEM_TYPE_MPLS + */ +static int +ipn3ke_pattern_mpls(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_mpls *mpls1 = NULL; + const struct rte_flow_item_mpls *mpls2 = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_MPLS: + if (!mpls1) { + mpls1 = item->spec; + + parser->key[0] = mpls1->label_tc_s[0]; + parser->key[1] = mpls1->label_tc_s[1]; + parser->key[2] = mpls1->label_tc_s[2] & 0xf0; + } else { + mpls2 = item->spec; + + parser->key[2] |= + ((mpls2->label_tc_s[0] & 0xf0) >> 4); + parser->key[3] = + ((mpls2->label_tc_s[0] & 0xf) << 4) | + ((mpls2->label_tc_s[1] & 0xf0) >> 4); + parser->key[4] = + ((mpls2->label_tc_s[1] & 0xf) << 4) | + ((mpls2->label_tc_s[2] & 0xf0) >> 4); + } + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (mpls1 != NULL && mpls2 != NULL) { + parser->key_len = 20 + 20; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] tcp_sport; + * } Hash_Key_Ip_Tcp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_TCP + */ +static int +ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_tcp *tcp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp = item->spec; + + rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && tcp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL) { + parser->key_len = 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed { + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * logic [23:0] vsid; + * } Hash_Key_Ip_Nvgre_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + * RTE_FLOW_ITEM_TYPE_NVGRE + */ +static int +ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_nvgre *nvgre = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2); + break; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre = item->spec; + + rte_memcpy(&parser->key[6], nvgre->tni, 3); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (ipv4 != NULL && udp != NULL && nvgre != NULL) { + parser->key_len = 32 + 16 + 24; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +/* + * @ RTL definition: + * typedef struct packed{ + * logic [23:0] vxlan_vni; + * logic [31:0] ip_sa; + * logic [15:0] udp_sport; + * } Hash_Key_Vxlan_Ip_Udp_t; + * + * @ flow items: + * RTE_FLOW_ITEM_TYPE_VXLAN + * RTE_FLOW_ITEM_TYPE_IPV4 + * RTE_FLOW_ITEM_TYPE_UDP + */ +static int +ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_item_vxlan *vxlan = NULL; + const struct rte_flow_item_ipv4 *ipv4 = NULL; + const struct rte_flow_item_udp *udp = NULL; + const struct rte_flow_item *item; + + for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (!item->spec || item->mask || item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support item with 'spec'"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = item->spec; + + rte_memcpy(&parser->key[0], vxlan->vni, 3); + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = item->spec; + + rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp = item->spec; + + rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2); + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support item type"); + return -rte_errno; + } + } + + if (vxlan != NULL && ipv4 != NULL && udp != NULL) { + parser->key_len = 24 + 32 + 16; + return 0; + } + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + patterns, + "Missed some patterns"); + return -rte_errno; +} + +static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = { + [IPN3KE_HASH_KEY_VXLAN] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_vxlan, + }, + + [IPN3KE_HASH_KEY_MAC] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH), + .filter = ipn3ke_pattern_mac, + }, + + [IPN3KE_HASH_KEY_QINQ] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_VLAN), + .filter = ipn3ke_pattern_qinq, + }, + + [IPN3KE_HASH_KEY_MPLS] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS, + RTE_FLOW_ITEM_TYPE_MPLS), + .filter = ipn3ke_pattern_mpls, + }, + + [IPN3KE_HASH_KEY_IP_TCP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP), + .filter = ipn3ke_pattern_ip_tcp, + }, + + [IPN3KE_HASH_KEY_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_ip_udp, + }, + + [IPN3KE_HASH_KEY_IP_NVGRE] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_NVGRE), + .filter = ipn3ke_pattern_ip_nvgre, + }, + + [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = { + .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP), + .filter = ipn3ke_pattern_vxlan_ip_udp, + }, +}; + +static int +ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups are not supported"); + return -rte_errno; + } + + if (attr->egress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "egress is not supported"); + return -rte_errno; + } + + if (attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } + + if (!attr->ingress) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "only ingress is supported"); + return -rte_errno; + } + + return 0; +} + +static int +ipn3ke_flow_convert_actions(const struct rte_flow_action actions[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + const struct rte_flow_action_mark *mark = NULL; + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + if (mark) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "duplicated mark"); + return -rte_errno; + } + + mark = actions->conf; + if (!mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark must be defined"); + return -rte_errno; + } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "mark id is out of range"); + return -rte_errno; + } + + parser->mark = 1; + parser->mark_id = mark->id; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + parser->drop = 1; + break; + + default: + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "invalid action"); + return -rte_errno; + } + } + + if (!parser->drop && !parser->mark) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no valid actions"); + return -rte_errno; + } + + return 0; +} + +static bool +ipn3ke_match_pattern(const enum rte_flow_item_type *patterns, + const struct rte_flow_item *input) +{ + const struct rte_flow_item *item = input; + + while ((*patterns == item->type) && + (*patterns != RTE_FLOW_ITEM_TYPE_END)) { + patterns++; + item++; + } + + return (*patterns == RTE_FLOW_ITEM_TYPE_END && + item->type == RTE_FLOW_ITEM_TYPE_END); +} + +static pattern_filter_t +ipn3ke_find_filter_func(const struct rte_flow_item *input, + uint32_t *idx) +{ + pattern_filter_t filter = NULL; + uint32_t i; + + for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) { + if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items, + input)) { + filter = ipn3ke_supported_patterns[i].filter; + *idx = i; + break; + } + } + + return filter; +} + +static int +ipn3ke_flow_convert_items(const struct rte_flow_item items[], + struct rte_flow_error *error, struct ipn3ke_flow_parse *parser) +{ + pattern_filter_t filter = NULL; + uint32_t idx; + + if (!items) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + filter = ipn3ke_find_filter_func(items, &idx); + + if (!filter) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "Unsupported pattern"); + return -rte_errno; + } + + parser->key_type = idx; + + return filter(items, error, parser); +} + +/* Put the least @nbits of @data into @offset of @dst bits stream, and + * the @offset starts from MSB to LSB in each byte. + * + * MSB LSB + * +------+------+------+------+ + * | | | | | + * +------+------+------+------+ + * ^ ^ + * |<- data: nbits ->| + * | + * offset + */ +static void +copy_data_bits(uint8_t *dst, uint64_t data, + uint32_t offset, uint8_t nbits) +{ + uint8_t set, *p = &dst[offset / BITS_PER_BYTE]; + uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE); + uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE); + uint32_t size = offset + nbits; + + if (nbits > (sizeof(data) * BITS_PER_BYTE)) { + IPN3KE_AFU_PMD_ERR("nbits is out of range"); + return; + } + + while (nbits - bits_to_set >= 0) { + set = data >> (nbits - bits_to_set); + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + + nbits -= bits_to_set; + bits_to_set = BITS_PER_BYTE; + mask_to_set = 0xff; + p++; + } + + if (nbits) { + uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE); + + set = data << shift; + mask_to_set = 0xff << shift; + + *p &= ~mask_to_set; + *p |= (set & mask_to_set); + } +} + +static void +ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint32_t i, shift_bytes, len_in_bytes, offset; + uint64_t key; + uint8_t *dst; + + dst = flow->rule.key; + + copy_data_bits(dst, + parser->key_type, + IPN3KE_FLOW_KEY_ID_OFFSET, + IPN3KE_FLOW_KEY_ID_BITS); + + /* The MSb of key is filled to 0 when it is less than + * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is + * save as MSB byte first in the array, it needs to move + * the bits before formatting them. + */ + key = 0; + shift_bytes = 0; + len_in_bytes = BITS_TO_BYTES(parser->key_len); + offset = (IPN3KE_FLOW_KEY_DATA_OFFSET + + IPN3KE_FLOW_KEY_DATA_BITS - + parser->key_len); + + for (i = 0; i < len_in_bytes; i++) { + key = (key << 8) | parser->key[i]; + + if (++shift_bytes == sizeof(key)) { + shift_bytes = 0; + + copy_data_bits(dst, key, offset, + sizeof(key) * BITS_PER_BYTE); + offset += sizeof(key) * BITS_PER_BYTE; + key = 0; + } + } + + if (shift_bytes != 0) { + uint32_t rem_bits; + + rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE); + key >>= (shift_bytes * 8 - rem_bits); + copy_data_bits(dst, key, offset, rem_bits); + } +} + +static void +ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser, + struct rte_flow *flow) +{ + uint8_t *dst; + + if (parser->drop) + return; + + dst = flow->rule.result; + + copy_data_bits(dst, + 1, + IPN3KE_FLOW_RESULT_ACL_OFFSET, + IPN3KE_FLOW_RESULT_ACL_BITS); + + copy_data_bits(dst, + parser->mark_id, + IPN3KE_FLOW_RESULT_UID_OFFSET, + IPN3KE_FLOW_RESULT_UID_BITS); +} + +#define MHL_COMMAND_TIME_COUNT 0xFFFF +#define MHL_COMMAND_TIME_INTERVAL_US 10 + +static int +ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + struct rte_flow *flow, uint32_t is_add) +{ + uint32_t *pdata = NULL; + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + uint32_t i; + + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n"); + + pdata = (uint32_t *)flow->rule.key; + IPN3KE_AFU_PMD_DEBUG(" - key :"); + + for (i = 0; i < RTE_DIM(flow->rule.key); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]); + + for (i = 0; i < 4; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i])); + IPN3KE_AFU_PMD_DEBUG("\n"); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_AFU_PMD_DEBUG(" - result:"); + + for (i = 0; i < RTE_DIM(flow->rule.result); i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]); + + for (i = 0; i < 1; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]); + IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n"); + + pdata = (uint32_t *)flow->rule.key; + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_0, + 0, + ipn3ke_swap32(pdata[3]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_1, + 0, + ipn3ke_swap32(pdata[2]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_2, + 0, + ipn3ke_swap32(pdata[1]), + IPN3KE_CLF_MHL_KEY_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_KEY_3, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_KEY_MASK); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_RES, + 0, + ipn3ke_swap32(pdata[0]), + IPN3KE_CLF_MHL_RES_MASK); + + /* insert/delete the key and result */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + if (is_add) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_INSERT, + 0x3); + else + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_DELETE, + 0x3); + + return 0; +} + +static int +ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw) +{ + uint32_t data; + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + + /* flush the MHL lookup table */ + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out = MHL_COMMAND_TIME_COUNT; + while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) && + (time_out > 0)) { + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + 0x80000000); + time_out--; + rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US); + } + if (!time_out) + return -1; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MGMT_CTRL, + 0, + IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH, + 0x3); + + return 0; +} + +static void +ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw, + struct ipn3ke_flow_parse *parser, struct rte_flow *flow) +{ + ipn3ke_flow_key_generation(parser, flow); + ipn3ke_flow_result_generation(parser, flow); + ipn3ke_flow_hw_update(hw, flow, 1); +} + +static int +ipn3ke_flow_convert(const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + struct ipn3ke_flow_parse *parser) +{ + int ret; + + ret = ipn3ke_flow_convert_attributes(attr, error); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_actions(actions, error, parser); + if (ret) + return ret; + + ret = ipn3ke_flow_convert_items(items, error, parser); + if (ret) + return ret; + + return 0; +} + +static int +ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_flow_parse parser = {0}; + return ipn3ke_flow_convert(attr, pattern, actions, error, &parser); +} + +static struct rte_flow * +ipn3ke_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_flow_parse parser = {0}; + struct rte_flow *flow; + int ret; + + if (hw->flow_num_entries == hw->flow_max_entries) { + rte_flow_error_set(error, + ENOBUFS, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "The flow table is full."); + return NULL; + } + + ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser); + if (ret < 0) { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to create flow."); + return NULL; + } + + flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to allocate memory"); + return flow; + } + + ipn3ke_flow_convert_finalise(hw, &parser, flow); + + TAILQ_INSERT_TAIL(&hw->flow_list, flow, next); + + return flow; +} + +static int +ipn3ke_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + int ret = 0; + + ret = ipn3ke_flow_hw_update(hw, flow, 0); + if (!ret) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +ipn3ke_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct rte_flow *flow, *temp; + + TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) { + TAILQ_REMOVE(&hw->flow_list, flow, next); + rte_free(flow); + } + + return ipn3ke_flow_hw_flush(hw); +} + +int ipn3ke_flow_init(void *dev) +{ + struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev; + uint32_t data; + + /* disable rx classifier bypass */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, 0, 0x1); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_TEST, + 0, + 0x1); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data); + + /* configure base mac address */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0x2457, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0x9bdf1000, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data); + + + /* configure hash lookup rules enable */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFD, + 0xFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data); + + + /* configure rx parse config, settings associatied with VxLAN */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x212b5, + 0x3FFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x3FFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data); + + + /* configure QinQ S-Tag */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0x88a8, + 0xFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_QINQ_STAG, + 0, + 0xFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data); + + + /* configure gen ctrl */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x3, + 0x3); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x1F); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data); + + + /* clear monitoring register */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF, + 0xFFFFFFFF); + + data = 0; + data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data); + + + ipn3ke_flow_hw_flush(hw); + + TAILQ_INIT(&hw->flow_list); + hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw, + IPN3KE_CLF_EM_NUM, + 0, + 0xFFFFFFFF); + IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x\n", hw->flow_max_entries); + hw->flow_num_entries = 0; + + return 0; +} + +const struct rte_flow_ops ipn3ke_flow_ops = { + .validate = ipn3ke_flow_validate, + .create = ipn3ke_flow_create, + .destroy = ipn3ke_flow_destroy, + .flush = ipn3ke_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h new file mode 100644 index 000000000..ef1a61f60 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_flow.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IPN3KE_FLOW_H_ +#define _IPN3KE_FLOW_H_ + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW KEY: + * | Unused |Ruler id (id) | Key1 Key2 … (data) | + * |--------+---------------+--------------------| + * | 17bits | 3 bits | Total 108 bits | + * MSB ---> LSB + * + * Note: And the MSb of key data is filled to 0 when it is less + * than 108 bit. + */ +#define IPN3KE_FLOW_KEY_UNUSED_BITS 17 +#define IPN3KE_FLOW_KEY_ID_BITS 3 +#define IPN3KE_FLOW_KEY_DATA_BITS 108 + +#define IPN3KE_FLOW_KEY_TOTAL_BITS \ + (IPN3KE_FLOW_KEY_UNUSED_BITS + \ + IPN3KE_FLOW_KEY_ID_BITS + \ + IPN3KE_FLOW_KEY_DATA_BITS) + +#define IPN3KE_FLOW_KEY_ID_OFFSET \ + (IPN3KE_FLOW_KEY_UNUSED_BITS) + +#define IPN3KE_FLOW_KEY_DATA_OFFSET \ + (IPN3KE_FLOW_KEY_ID_OFFSET + IPN3KE_FLOW_KEY_ID_BITS) + +/** + * Expand the length to DWORD alignment with 'Unused' field. + * + * FLOW RESULT: + * | Unused | enable (acl) | uid | + * |---------+--------------+--------------| + * | 15 bits | 1 bit | 16 bits | + * MSB ---> LSB + */ + +#define IPN3KE_FLOW_RESULT_UNUSED_BITS 15 +#define IPN3KE_FLOW_RESULT_ACL_BITS 1 +#define IPN3KE_FLOW_RESULT_UID_BITS 16 + +#define IPN3KE_FLOW_RESULT_TOTAL_BITS \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS + \ + IPN3KE_FLOW_RESULT_ACL_BITS + \ + IPN3KE_FLOW_RESULT_UID_BITS) + +#define IPN3KE_FLOW_RESULT_ACL_OFFSET \ + (IPN3KE_FLOW_RESULT_UNUSED_BITS) + +#define IPN3KE_FLOW_RESULT_UID_OFFSET \ + (IPN3KE_FLOW_RESULT_ACL_OFFSET + IPN3KE_FLOW_RESULT_ACL_BITS) + +#define IPN3KE_FLOW_RESULT_UID_MAX \ + ((1UL << IPN3KE_FLOW_RESULT_UID_BITS) - 1) + +#ifndef BITS_PER_BYTE +#define BITS_PER_BYTE 8 +#endif +#define BITS_TO_BYTES(bits) \ + (((bits) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) + +struct ipn3ke_flow_rule { + uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_TOTAL_BITS)]; + uint8_t result[BITS_TO_BYTES(IPN3KE_FLOW_RESULT_TOTAL_BITS)]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + + struct ipn3ke_flow_rule rule; +}; + +TAILQ_HEAD(ipn3ke_flow_list, rte_flow); + +static inline uint16_t ipn3ke_swap16(uint16_t x) +{ + return ((x & 0xff) << 8) | ((x >> 8) & 0xff); +} + +static inline uint32_t ipn3ke_swap32(uint32_t x) +{ + uint32_t high, low; + uint32_t high1, low1; + + high = (x >> 16) & 0xffff; + low = x & 0xffff; + high1 = ipn3ke_swap16(low); + high1 = high1 << 16; + low1 = ipn3ke_swap16(high); + low1 = low1 & 0xffff; + + return high1 | low1; +} + +extern const struct rte_flow_ops ipn3ke_flow_ops; + +int ipn3ke_flow_init(void *dev); + +#endif /* _IPN3KE_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h new file mode 100644 index 000000000..147fd8039 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_logs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _IPN3KE_LOGS_H_ +#define _IPN3KE_LOGS_H_ + +#include + +extern int ipn3ke_afu_logtype; + +#define IPN3KE_AFU_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ipn3ke_afu_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#define IPN3KE_AFU_PMD_FUNC_TRACE() IPN3KE_AFU_PMD_LOG(DEBUG, ">>") + +#define IPN3KE_AFU_PMD_DEBUG(fmt, args...) \ + IPN3KE_AFU_PMD_LOG(DEBUG, fmt, ## args) + +#define IPN3KE_AFU_PMD_INFO(fmt, args...) \ + IPN3KE_AFU_PMD_LOG(INFO, fmt, ## args) + +#define IPN3KE_AFU_PMD_ERR(fmt, args...) \ + IPN3KE_AFU_PMD_LOG(ERR, fmt, ## args) + +#define IPN3KE_AFU_PMD_WARN(fmt, args...) \ + IPN3KE_AFU_PMD_LOG(WARNING, fmt, ## args) + +#endif /* _IPN3KE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h new file mode 100644 index 000000000..fd2393fe6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_rawdev_api.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef _IFPGA_RAWDEV_API_H_ +#define _IFPGA_RAWDEV_API_H_ + +#include + +enum ifpga_rawdev_retimer_media_type { + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_UNKNOWN = 0, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_LR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_SR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_100GBASE_CR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_40GBASE_LR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_400GBASE_SR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_40GBASE_CR4, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_25GBASE_SR, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_25GBASE_CR, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_LR, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_SR, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_10GBASE_DAC, + IFPGA_RAWDEV_RETIMER_MEDIA_TYPE_DEFAULT +}; + +enum ifpga_rawdev_retimer_mac_type { + IFPGA_RAWDEV_RETIMER_MAC_TYPE_UNKNOWN = 0, + IFPGA_RAWDEV_RETIMER_MAC_TYPE_100GE_CAUI, + IFPGA_RAWDEVG_RETIMER_MAC_TYPE_40GE_XLAUI, + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI, + IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI, + IFPGA_RAWDEV_RETIMER_MAC_TYPE_DEFAULT +}; + +#define IFPGA_RAWDEV_LINK_SPEED_10GB_SHIFT 0x0 +#define IFPGA_RAWDEV_LINK_SPEED_40GB_SHIFT 0x1 +#define IFPGA_RAWDEV_LINK_SPEED_25GB_SHIFT 0x2 + +enum ifpga_rawdev_link_speed { + IFPGA_RAWDEV_LINK_SPEED_UNKNOWN = 0, + IFPGA_RAWDEV_LINK_SPEED_10GB = + (1 << IFPGA_RAWDEV_LINK_SPEED_10GB_SHIFT), + IFPGA_RAWDEV_LINK_SPEED_40GB = + (1 << IFPGA_RAWDEV_LINK_SPEED_40GB_SHIFT), + IFPGA_RAWDEV_LINK_SPEED_25GB = + (1 << IFPGA_RAWDEV_LINK_SPEED_25GB_SHIFT), +}; + +struct ifpga_rawdevg_retimer_info { + int retimer_num; + int port_num; + enum ifpga_rawdev_retimer_media_type media_type; + enum ifpga_rawdev_retimer_mac_type mac_type; +}; + +struct ifpga_rawdevg_link_info { + int port; + int link_up; + enum ifpga_rawdev_link_speed link_speed; +}; + +struct ipn3ke_pub_func { + struct ifpga_rawdev *(*get_ifpga_rawdev)(const struct rte_rawdev *rdv); + int (*set_i40e_sw_dev)(uint16_t port_id, struct rte_eth_dev *sw_dev); +}; + +/** + * @internal + * The publid functions of bridge PAC N3000 FPGA and I40e. + */ +extern struct ipn3ke_pub_func ipn3ke_bridge_func; + + +#endif /* _IFPGA_RAWDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c new file mode 100644 index 000000000..b673c4914 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c @@ -0,0 +1,2985 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +static int ipn3ke_rpst_scan_num; +static pthread_t ipn3ke_rpst_scan_thread; + +/** Double linked list of representor port. */ +TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst); + +static struct ipn3ke_rpst_list ipn3ke_rpst_list = + TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list); + +static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER; + +static int +ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst); + +static int +ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + + dev_info->speed_capa = + (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ? + ETH_LINK_SPEED_10G : + ((hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ? + ETH_LINK_SPEED_25G : + ETH_LINK_SPEED_AUTONEG); + + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX; + dev_info->max_mac_addrs = hw->port_num; + dev_info->max_vfs = 0; + dev_info->default_txconf = (struct rte_eth_txconf) { + .offloads = 0, + }; + dev_info->rx_queue_offload_capa = 0; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + dev_info->tx_queue_offload_capa; + + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + dev_info->switch_info.name = ethdev->device->name; + dev_info->switch_info.domain_id = rpst->switch_domain_id; + dev_info->switch_info.port_id = rpst->port_id; + + return 0; +} + +static int +ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + struct rte_rawdev *rawdev; + uint64_t base_mac; + uint32_t val; + char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX]; + + rawdev = hw->rawdev; + + memset(attr_name, 0, sizeof(attr_name)); + snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s", + "LineSideBaseMAC"); + rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac); + rte_ether_addr_copy((struct rte_ether_addr *)&base_mac, + &rpst->mac_addr); + + rte_ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]); + dev->data->mac_addrs->addr_bytes[RTE_ETHER_ADDR_LEN - 1] = + (uint8_t)rpst->port_id + 1; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Set mac address */ + rte_memcpy(((char *)(&val)), + (char *)&dev->data->mac_addrs->addr_bytes[0], + sizeof(uint32_t)); + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_PRIMARY_MAC_ADDR0, + rpst->port_id, + 0); + rte_memcpy(((char *)(&val)), + (char *)&dev->data->mac_addrs->addr_bytes[4], + sizeof(uint16_t)); + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_PRIMARY_MAC_ADDR1, + rpst->port_id, + 0); + + /* Enable the TX path */ + ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0); + + /* Disables source address override */ + ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0); + + /* Enable the RX path */ + ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0); + + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, rpst->port_id, 0); + + /* Clear line side RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, rpst->port_id, 0); + + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_10G_stcs(hw, rpst->port_id, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_10G_stcs(hw, rpst->port_id, 1); + } else if (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + /* Clear line side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, rpst->port_id, 0); + + /* Clear line side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, rpst->port_id, 0); + + /* Clear NIC side TX statistics counters */ + ipn3ke_xmac_tx_clr_25G_stcs(hw, rpst->port_id, 1); + + /* Clear NIC side RX statistics counters */ + ipn3ke_xmac_rx_clr_25G_stcs(hw, rpst->port_id, 1); + } + + ipn3ke_rpst_link_update(dev, 0); + + return 0; +} + +static void +ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable the TX path */ + ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0); + + /* Disable the RX path */ + ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); + } +} + +static void +ipn3ke_rpst_dev_close(struct rte_eth_dev *dev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable the TX path */ + ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0); + + /* Disable the RX path */ + ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); + } +} + +/* + * Reset PF device only to re-initialize resources in PMD layer + */ +static int +ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable the TX path */ + ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0); + + /* Disable the RX path */ + ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); + } + + return 0; +} + +static int +ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id) +{ + return 0; +} + +static int +ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id) +{ + return 0; +} + +static int +ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t tx_queue_id) +{ + return 0; +} + +static int +ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t tx_queue_id) +{ + return 0; +} + +static int +ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mp) +{ + return 0; +} + +static void +ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq) +{ +} + +static int +ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static void +ipn3ke_rpst_tx_queue_release(__rte_unused void *txq) +{ +} + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct ipn3ke_rpst_eth_stats { + uint64_t tx_bytes; /* gotc */ + uint64_t tx_multicast; /* mptc */ + uint64_t tx_broadcast; /* bptc */ + uint64_t tx_unicast; /* uptc */ + uint64_t tx_discards; /* tdpc */ + uint64_t tx_errors; /* tepc */ + uint64_t rx_bytes; /* gorc */ + uint64_t rx_multicast; /* mprc */ + uint64_t rx_broadcast; /* bprc */ + uint64_t rx_unicast; /* uprc */ + uint64_t rx_discards; /* rdpc */ + uint64_t rx_unknown_protocol; /* rupp */ +}; + +/* store statistics names and its offset in stats structure */ +struct ipn3ke_rpst_xstats_name_offset { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct ipn3ke_rpst_xstats_name_offset +ipn3ke_rpst_stats_strings[] = { + {"tx_multicast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + tx_broadcast)}, + {"tx_unicast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + tx_unicast)}, + {"tx_dropped_packets", offsetof(struct ipn3ke_rpst_eth_stats, + tx_discards)}, + {"rx_multicast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + rx_broadcast)}, + {"rx_unicast_packets", offsetof(struct ipn3ke_rpst_eth_stats, + rx_unicast)}, + {"rx_dropped_packets", offsetof(struct ipn3ke_rpst_eth_stats, + rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct ipn3ke_rpst_eth_stats, + rx_unknown_protocol)}, +}; + +#define IPN3KE_RPST_ETH_XSTATS_CNT (sizeof(ipn3ke_rpst_stats_strings) / \ + sizeof(ipn3ke_rpst_stats_strings[0])) + +#define IPN3KE_RPST_PRIO_XSTATS_CNT 8 + +/* Statistics collected by the MAC */ +struct ipn3ke_rpst_hw_port_stats { + /* eth stats collected by the port */ + struct ipn3ke_rpst_eth_stats eth; + + /* additional port specific stats */ + uint64_t tx_dropped_link_down; + uint64_t crc_errors; + uint64_t illegal_bytes; + uint64_t error_bytes; + uint64_t mac_local_faults; + uint64_t mac_remote_faults; + uint64_t rx_length_errors; + uint64_t link_xon_rx; + uint64_t link_xoff_rx; + uint64_t priority_xon_rx[IPN3KE_RPST_PRIO_XSTATS_CNT]; + uint64_t priority_xoff_rx[IPN3KE_RPST_PRIO_XSTATS_CNT]; + uint64_t link_xon_tx; + uint64_t link_xoff_tx; + uint64_t priority_xon_tx[IPN3KE_RPST_PRIO_XSTATS_CNT]; + uint64_t priority_xoff_tx[IPN3KE_RPST_PRIO_XSTATS_CNT]; + uint64_t priority_xon_2_xoff[IPN3KE_RPST_PRIO_XSTATS_CNT]; + uint64_t rx_size_64; + uint64_t rx_size_65_127; + uint64_t rx_size_128_255; + uint64_t rx_size_256_511; + uint64_t rx_size_512_1023; + uint64_t rx_size_1024_1518; + uint64_t rx_size_big; + uint64_t rx_undersize; + uint64_t rx_fragments; + uint64_t rx_oversize; + uint64_t rx_jabber; + uint64_t tx_size_64; + uint64_t tx_size_65_127; + uint64_t tx_size_128_255; + uint64_t tx_size_256_511; + uint64_t tx_size_512_1023; + uint64_t tx_size_1024_1518; + uint64_t tx_size_1519_to_max; + uint64_t mac_short_packet_dropped; + uint64_t checksum_error; + /* flow director stats */ + uint64_t fd_atr_match; + uint64_t fd_sb_match; + uint64_t fd_atr_tunnel_match; + uint32_t fd_atr_status; + uint32_t fd_sb_status; + /* EEE LPI */ + uint32_t tx_lpi_status; + uint32_t rx_lpi_status; + uint64_t tx_lpi_count; + uint64_t rx_lpi_count; +}; + +static const struct ipn3ke_rpst_xstats_name_offset +ipn3ke_rpst_hw_port_strings[] = { + {"tx_link_down_dropped", offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_dropped_link_down)}, + {"rx_crc_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + crc_errors)}, + {"rx_illegal_byte_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + illegal_bytes)}, + {"rx_error_bytes", offsetof(struct ipn3ke_rpst_hw_port_stats, + error_bytes)}, + {"mac_local_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + mac_local_faults)}, + {"mac_remote_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + mac_remote_faults)}, + {"rx_length_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_length_errors)}, + {"tx_xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + link_xon_tx)}, + {"rx_xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + link_xon_rx)}, + {"tx_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + link_xoff_tx)}, + {"rx_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + link_xoff_rx)}, + {"rx_size_64_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_64)}, + {"rx_size_65_to_127_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_65_127)}, + {"rx_size_128_to_255_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_128_255)}, + {"rx_size_256_to_511_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_256_511)}, + {"rx_size_512_to_1023_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_512_1023)}, + {"rx_size_1024_to_1518_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_1024_1518)}, + {"rx_size_1519_to_max_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_size_big)}, + {"rx_undersized_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_undersize)}, + {"rx_oversize_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_oversize)}, + {"rx_mac_short_dropped", offsetof(struct ipn3ke_rpst_hw_port_stats, + mac_short_packet_dropped)}, + {"rx_fragmented_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_fragments)}, + {"rx_jabber_errors", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_jabber)}, + {"tx_size_64_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_64)}, + {"tx_size_65_to_127_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_65_127)}, + {"tx_size_128_to_255_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_128_255)}, + {"tx_size_256_to_511_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_256_511)}, + {"tx_size_512_to_1023_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_512_1023)}, + {"tx_size_1024_to_1518_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_1024_1518)}, + {"tx_size_1519_to_max_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_size_1519_to_max)}, + {"rx_flow_director_atr_match_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + fd_atr_match)}, + {"rx_flow_director_sb_match_packets", + offsetof(struct ipn3ke_rpst_hw_port_stats, + fd_sb_match)}, + {"tx_low_power_idle_status", offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_lpi_status)}, + {"rx_low_power_idle_status", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_lpi_status)}, + {"tx_low_power_idle_count", offsetof(struct ipn3ke_rpst_hw_port_stats, + tx_lpi_count)}, + {"rx_low_power_idle_count", offsetof(struct ipn3ke_rpst_hw_port_stats, + rx_lpi_count)}, +}; + +#define IPN3KE_RPST_HW_PORT_XSTATS_CNT (sizeof(ipn3ke_rpst_hw_port_strings) \ + / sizeof(ipn3ke_rpst_hw_port_strings[0])) + +static const struct ipn3ke_rpst_xstats_name_offset +ipn3ke_rpst_rxq_prio_strings[] = { + {"xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + priority_xon_rx)}, + {"xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + priority_xoff_rx)}, +}; + +#define IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT (sizeof(ipn3ke_rpst_rxq_prio_strings) \ + / sizeof(ipn3ke_rpst_rxq_prio_strings[0])) + +static const struct ipn3ke_rpst_xstats_name_offset +ipn3ke_rpst_txq_prio_strings[] = { + {"xon_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + priority_xon_tx)}, + {"xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + priority_xoff_tx)}, + {"xon_to_xoff_packets", offsetof(struct ipn3ke_rpst_hw_port_stats, + priority_xon_2_xoff)}, +}; + +#define IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT (sizeof(ipn3ke_rpst_txq_prio_strings) \ + / sizeof(ipn3ke_rpst_txq_prio_strings[0])) + +static uint32_t +ipn3ke_rpst_xstats_calc_num(void) +{ + return IPN3KE_RPST_ETH_XSTATS_CNT + + IPN3KE_RPST_HW_PORT_XSTATS_CNT + + (IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT + * IPN3KE_RPST_PRIO_XSTATS_CNT) + + (IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT + * IPN3KE_RPST_PRIO_XSTATS_CNT); +} + +static void +ipn3ke_rpst_25g_nic_side_tx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp = 0x00000001; + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the TX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 1); + + while (tmp & 0x00000001) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 1); + if (tmp & 0x00000001) + usleep(5); + else + return; + } +} + +static void +ipn3ke_rpst_25g_nic_side_rx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp = 0x00000001; + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the RX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 1); + + while (tmp & 0x00000001) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 1); + if (tmp & 0x00000001) + usleep(5); + else + return; + } +} + +static void +ipn3ke_rpst_10g_nic_side_tx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp; + + /*Bit [0]: Set this register to 1 to clear all TX statistics + *counters. + *The IP core clears this bit when all counters are cleared. + *Bits [31:1]: Reserved. + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_TX_STATS_CLR, + port_id, + 1); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_TX_STATS_CLR, + port_id, + 1); +} + +static void +ipn3ke_rpst_10g_nic_side_rx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp; + + /*Bit [0]: Set this register to 1 to clear all RX statistics + *counters. + *The IP core clears this bit when all counters are cleared. + *Bits [31:1]: Reserved + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_RX_STATS_CLR, + port_id, + 1); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_RX_STATS_CLR, + port_id, + 1); +} + +static uint64_t +ipn3ke_rpst_read_64bits_statistics_register(uint32_t addr_lo, +uint32_t addr_hi, struct ipn3ke_hw *hw, uint16_t port_id) +{ + uint32_t statistics_lo = 0x00000000; + uint32_t statistics_hi = 0x00000000; + uint64_t statistics = 0x0000000000000000; + + (*hw->f_mac_read)(hw, + &statistics_lo, + addr_lo, + port_id, + 0); + + (*hw->f_mac_read)(hw, + &statistics_hi, + addr_hi, + port_id, + 0); + + statistics += statistics_hi; + statistics = statistics << IPN3KE_REGISTER_WIDTH; + statistics += statistics_lo; + return statistics; + +} + +static int +ipn3ke_rpst_read_25g_lineside_stats_registers +(struct ipn3ke_hw *hw, +uint16_t port_id, +struct ipn3ke_rpst_hw_port_stats *hw_stats) +{ + uint32_t tmp; + uint64_t statistics; + + memset(hw_stats, 0, sizeof(*hw_stats)); + + /*check Tx statistics is real time. + *if statistics has been paused, make it real time. + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + + if (tmp & IPN3KE_25G_TX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK) { + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + } + + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_STATUS, + port_id, + 0); + if (tmp & IPN3KE_25G_TX_STATISTICS_STATUS_SHADOW_REQUEST_MASK) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + } + + /*check Rx statistics is real time. + *if statistics has been paused, make it real time. + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + if (tmp & IPN3KE_25G_RX_STATISTICS_CONFIG_SHADOW_REQUEST_MASK) { + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + } + + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_STATUS, + port_id, + 0); + + if (tmp & IPN3KE_25G_RX_STATISTICS_STATUS_SHADOW_REQUEST_MASK) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + } + + /* pause Tx counter to read the statistics */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + tmp |= 0x00000004; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + + /* pause Rx counter to read the statistics */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + tmp |= 0x00000004; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + + /*Number of transmitted frames less than 64 bytes + *and reporting a CRC error + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_FRAGMENTS_LO, + IPN3KE_25G_CNTR_TX_FRAGMENTS_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + hw_stats->crc_errors += statistics; + + /*Number of transmitted oversized frames reporting a CRC error*/ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_JABBERS_LO, + IPN3KE_25G_CNTR_TX_JABBERS_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + hw_stats->crc_errors += statistics; + + /* Number of transmitted packets with FCS errors */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_FCS_LO, + IPN3KE_25G_CNTR_TX_FCS_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + hw_stats->checksum_error += statistics; + + /*Number of transmitted frames with a frame of length at + *least 64 reporting a CRC error + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_CRCERR_LO, + IPN3KE_25G_CNTR_TX_CRCERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + hw_stats->crc_errors += statistics; + + /*Number of errored multicast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_TX_MCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /*Number of errored broadcast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_TX_BCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /*Number of errored unicast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_TX_UCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /* Number of errored multicast control frames transmitted */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_TX_MCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /* Number of errored broadcast control frames transmitted */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_TX_BCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /* Number of errored unicast control frames transmitted */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_TX_UCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /* Number of errored pause frames transmitted */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_PAUSE_ERR_LO, + IPN3KE_25G_CNTR_TX_PAUSE_ERR_HI, + hw, port_id); + hw_stats->eth.tx_errors += statistics; + + /*Number of 64-byte transmitted frames, + *including the CRC field but excluding the preamble + *and SFD bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_64B_LO, + IPN3KE_25G_CNTR_TX_64B_HI, + hw, port_id); + hw_stats->tx_size_64 += statistics; + + /* Number of transmitted frames between 65 and 127 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_65_127B_LO, + IPN3KE_25G_CNTR_TX_65_127B_HI, + hw, port_id); + hw_stats->tx_size_65_127 += statistics; + + /* Number of transmitted frames between 128 and 255 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_128_255B_LO, + IPN3KE_25G_CNTR_TX_128_255B_HI, + hw, port_id); + hw_stats->tx_size_128_255 += statistics; + + /* Number of transmitted frames between 256 and 511 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_256_511B_LO, + IPN3KE_25G_CNTR_TX_256_511B_HI, + hw, port_id); + hw_stats->tx_size_256_511 += statistics; + + /* Number of transmitted frames between 512 and 1023 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_512_1023B_LO, + IPN3KE_25G_CNTR_TX_512_1023B_HI, + hw, port_id); + hw_stats->tx_size_512_1023 += statistics; + + /* Number of transmitted frames between 1024 and 1518 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_1024_1518B_LO, + IPN3KE_25G_CNTR_TX_1024_1518B_HI, + hw, port_id); + hw_stats->tx_size_1024_1518 += statistics; + + /*Number of transmitted frames of size between 1519 bytes + *and the number of bytes specified in the MAX_TX_SIZE_CONFIG + *register + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_1519_MAXB_LO, + IPN3KE_25G_CNTR_TX_1519_MAXB_HI, + hw, port_id); + hw_stats->tx_size_1519_to_max += statistics; + + /*Number of oversized frames (frames with more bytes than the + *number specified in the MAX_TX_SIZE_CONFIG register) + *transmitted + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_OVERSIZE_LO, + IPN3KE_25G_CNTR_TX_OVERSIZE_HI, + hw, port_id); + + /*Number of valid multicast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_TX_MCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.tx_multicast += statistics; + + /*Number of valid broadcast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_TX_BCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.tx_broadcast += statistics; + + /*Number of valid unicast frames transmitted, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_TX_UCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.tx_unicast += statistics; + + /*Number of valid multicast frames transmitted, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_MCAST_CTRL_LO, + IPN3KE_25G_CNTR_TX_MCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.tx_multicast += statistics; + + /*Number of valid broadcast frames transmitted, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_BCAST_CTRL_LO, + IPN3KE_25G_CNTR_TX_BCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.tx_broadcast += statistics; + + /*Number of valid unicast frames transmitted, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_UCAST_CTRL_LO, + IPN3KE_25G_CNTR_TX_UCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.tx_unicast += statistics; + + /* Number of valid pause frames transmitted */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_PAUSE_LO, + IPN3KE_25G_CNTR_TX_PAUSE_HI, + hw, port_id); + + /*Number of transmitted runt packets. The IP core does not + *transmit frames of length less than nine bytes. + *The IP core pads frames of length nine bytes to 64 bytes to + *extend them to 64 bytes. Therefore, this counter does not + *increment in normal operating conditions. + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_TX_RUNT_LO, + IPN3KE_25G_CNTR_TX_RUNT_HI, + hw, port_id); + + /*Number of transmitted payload bytes in frames with no FCS, + *undersized, oversized, or payload length errors. + *If VLAN detection is turned off for the TX MAC (bit[1] + *of the TX_MAC_CONTROL register at offset 0x40A has + *the value of 1), the IP core counts the VLAN header bytes + *(4 bytes for VLAN and 8 bytes for stacked VLAN) + *as payload bytes. This register is compliant with + *the requirements for aOctetsTransmittedOK in section + *5.2.2.1.8 of the IEEE Standard 802.3-2008. + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_LO, + IPN3KE_25G_TX_PAYLOAD_OCTETS_OK_HI, + hw, port_id); + hw_stats->eth.tx_bytes += statistics; + + /*Number of transmitted bytes in frames with no FCS, undersized, + *oversized, or payload length errors. This register is + *compliant with the requirements for ifOutOctets in RFC3635 + *(Managed Objects for Ethernet-like Interface Types) + *and TX etherStatsOctets in RFC2819(Remote Network Monitoring + *Management Information Base (RMON)). + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_TX_FRAME_OCTETS_OK_LO, + IPN3KE_25G_TX_FRAME_OCTETS_OK_HI, + hw, port_id); + + /*Number of received frames less than 64 bytes + *and reporting a CRC error + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_FRAGMENTS_LO, + IPN3KE_25G_CNTR_RX_FRAGMENTS_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + hw_stats->crc_errors += statistics; + hw_stats->rx_length_errors += statistics; + + /* Number of received oversized frames reporting a CRC error */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_JABBERS_LO, + IPN3KE_25G_CNTR_RX_JABBERS_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + hw_stats->crc_errors += statistics; + hw_stats->rx_length_errors += statistics; + + /*Number of received packets with FCS errors. + *This register maintains a count of the number of pulses + *on the "l_rx_fcs_error" or "rx_fcs_error" output signal + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_FCS_LO, + IPN3KE_25G_CNTR_RX_FCS_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + hw_stats->checksum_error += statistics; + + /*Number of received frames with a frame of length at least 64 + *with CRC error + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_CRCERR_LO, + IPN3KE_25G_CNTR_RX_CRCERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + hw_stats->crc_errors += statistics; + + /*Number of errored multicast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_RX_MCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /*Number of errored broadcast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_RX_BCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /*Number of errored unicast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_LO, + IPN3KE_25G_CNTR_RX_UCAST_DATA_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /* Number of errored multicast control frames received */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_RX_MCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /* Number of errored broadcast control frames received */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_RX_BCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /* Number of errored unicast control frames received */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_LO, + IPN3KE_25G_CNTR_RX_UCAST_CTRL_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /* Number of errored pause frames received */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_PAUSE_ERR_LO, + IPN3KE_25G_CNTR_RX_PAUSE_ERR_HI, + hw, port_id); + hw_stats->eth.rx_discards += statistics; + + /*Number of 64-byte received frames, + *including the CRC field but excluding the preamble + *and SFD bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_64B_LO, + IPN3KE_25G_CNTR_RX_64B_HI, + hw, port_id); + hw_stats->rx_size_64 += statistics; + + /*Number of received frames between 65 and 127 bytes */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_65_127B_LO, + IPN3KE_25G_CNTR_RX_65_127B_HI, + hw, port_id); + hw_stats->rx_size_65_127 += statistics; + + /*Number of received frames between 128 and 255 bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_128_255B_LO, + IPN3KE_25G_CNTR_RX_128_255B_HI, + hw, port_id); + hw_stats->rx_size_128_255 += statistics; + + /*Number of received frames between 256 and 511 bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_256_511B_LO, + IPN3KE_25G_CNTR_RX_256_511B_HI, + hw, port_id); + hw_stats->rx_size_256_511 += statistics; + + /*Number of received frames between 512 and 1023 bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_512_1023B_LO, + IPN3KE_25G_CNTR_RX_512_1023B_HI, + hw, port_id); + hw_stats->rx_size_512_1023 += statistics; + + /*Number of received frames between 1024 and 1518 bytes + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_1024_1518B_LO, + IPN3KE_25G_CNTR_RX_1024_1518B_HI, + hw, port_id); + hw_stats->rx_size_1024_1518 += statistics; + + /*Number of received frames of size between 1519 bytes + *and the number of bytes specified in the MAX_TX_SIZE_CONFIG + *register + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_1519_MAXB_LO, + IPN3KE_25G_CNTR_RX_1519_MAXB_HI, + hw, port_id); + hw_stats->rx_size_big += statistics; + + /*Number of oversized frames (frames with more bytes + *than the number specified in the MAX_TX_SIZE_CONFIG register) + *received + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_OVERSIZE_LO, + IPN3KE_25G_CNTR_RX_OVERSIZE_HI, + hw, port_id); + hw_stats->rx_jabber += statistics; + + /*Number of valid multicast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_RX_MCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.rx_multicast += statistics; + + /*Number of valid broadcast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_RX_BCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.rx_broadcast += statistics; + + /*Number of valid unicast frames received, + *excluding control frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_LO, + IPN3KE_25G_CNTR_RX_UCAST_DATA_OK_HI, + hw, port_id); + hw_stats->eth.rx_unicast += statistics; + + /*Number of valid multicast frames received, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_MCAST_CTRL_LO, + IPN3KE_25G_CNTR_RX_MCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.rx_multicast += statistics; + + /*Number of valid broadcast frames received, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_BCAST_CTRL_LO, + IPN3KE_25G_CNTR_RX_BCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.rx_broadcast += statistics; + + /*Number of valid unicast frames received, + *excluding data frames + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_UCAST_CTRL_LO, + IPN3KE_25G_CNTR_RX_UCAST_CTRL_HI, + hw, port_id); + hw_stats->eth.rx_unicast += statistics; + + /*Number of received pause frames, with or without error + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_PAUSE_LO, + IPN3KE_25G_CNTR_RX_PAUSE_HI, + hw, port_id); + + /*Number of received runt packets. A runt is a packet of size + *less than 64 bytes but greater than eight bytes. + *If a packet is eight bytes or smaller, it is considered + *a decoding error and not a runt frame, and the IP core + *does not flag it nor count it as a runt. + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_CNTR_RX_RUNT_LO, + IPN3KE_25G_CNTR_RX_RUNT_HI, + hw, port_id); + + /*Number of received payload bytes in frames with no FCS, + *undersized, oversized, or payload length errors. + *If VLAN detection is turned off for the RX MAC (bit [1] of the + *"RXMAC_CONTROL" register at offset 0x50A has the value of 1), + *the IP core counts the VLAN header bytes (4 bytes for VLAN and + *8 bytes for stacked VLAN) as payload bytes. + *This register is compliant with the requirements for + *aOctetsReceivedOK in section 5.2.2.1.14 of the IEEE Standard + *802.3-2008 + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_LO, + IPN3KE_25G_RX_PAYLOAD_OCTETS_OK_HI, + hw, port_id); + hw_stats->eth.rx_bytes += statistics; + + /*Number of received bytes in frames with no FCS, undersized, + *oversized, or payload length errors. + *This register is compliant with the requirements for + *ifInOctets in RFC3635 (Managed Objects for Ethernet-like + *Interface Types) and RX etherStatsOctets in RFC2819 + *(Remote Network Monitoring Management Information Base + *(RMON)). + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_25G_RX_FRAME_OCTETS_OK_LO, + IPN3KE_25G_RX_FRAME_OCTETS_OK_HI, + hw, port_id); + + /*resume Tx counter to real time + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + + /*resume Rx counter to real time + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + tmp &= 0xfffffffb; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + + return 0; +} + +static void +ipn3ke_rpst_25g_lineside_tx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp = 0x00000001; + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the TX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + + while (tmp & 0x00000001) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_TX_STATISTICS_CONFIG, + port_id, + 0); + if (tmp & 0x00000001) + usleep(5); + else + return; + } +} + +static void +ipn3ke_rpst_25g_lineside_rx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp = 0x00000001; + /* Bit[0]: Software can set this bit to the value of 1 + * to reset all of the RX statistics registers at the same time. + * This bit is selfclearing. + */ + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + + while (tmp & 0x00000001) { + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_25G_RX_STATISTICS_CONFIG, + port_id, + 0); + if (tmp & 0x00000001) + usleep(5); + else + return; + } +} + +static uint64_t +ipn3ke_rpst_read_36bits_statistics_register(uint32_t addr_lo, +uint32_t addr_hi, struct ipn3ke_hw *hw, uint16_t port_id) +{ + uint32_t statistics_lo = 0x00000000; + uint32_t statistics_hi = 0x00000000; + uint64_t statistics = 0x0000000000000000; + + (*hw->f_mac_read)(hw, + &statistics_lo, + addr_lo, + port_id, + 0); + (*hw->f_mac_read)(hw, + &statistics_hi, + addr_hi, + port_id, + 0); + statistics_hi &= IPN3KE_10G_STATS_HI_VALID_MASK; + statistics += statistics_hi; + statistics = statistics << IPN3KE_REGISTER_WIDTH; + statistics += statistics_lo; + return statistics; +} + +static int +ipn3ke_rpst_read_10g_lineside_stats_registers +(struct ipn3ke_hw *hw, +uint16_t port_id, +struct ipn3ke_rpst_hw_port_stats *hw_stats, +struct rte_eth_stats *stats) +{ + uint64_t statistics = 0; + + memset(hw_stats, 0, sizeof(*hw_stats)); + memset(stats, 0, sizeof(*stats)); + + /*36-bit statistics counter that collects the number of frames + *that are successfully transmitted, including control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_FRAME_OK_LO, + IPN3KE_10G_TX_STATS_FRAME_OK_HI, + hw, port_id); + stats->opackets = statistics; + + /*36-bit statistics counter that collects the number of frames + *that are successfully received, including control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_FRAME_OK_LO, + IPN3KE_10G_RX_STATS_FRAME_OK_HI, + hw, port_id); + stats->ipackets = statistics; + + /*36-bit statistics counter that collects the number of frames + *transmitted with error, including control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_FRAME_ERR_LO, + IPN3KE_10G_TX_STATS_FRAME_ERR_HI, + hw, port_id); + stats->oerrors = statistics; + hw_stats->eth.tx_errors = statistics; + + /*36-bit statistics counter that collects the number of frames + *received with error, including control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_FRAME_ERR_LO, + IPN3KE_10G_RX_STATS_FRAME_ERR_HI, + hw, port_id); + stats->ierrors = statistics; + hw_stats->eth.rx_discards = statistics; + + /*36-bit statistics counter that collects the number + *of RX frames with CRC error. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_LO, + IPN3KE_10G_RX_STATS_FRAME_CRC_ERR_HI, + hw, port_id); + hw_stats->crc_errors = statistics; + + /*64-bit statistics counter that collects the payload length, + *including the bytes in control frames. + *The payload length is the number of data and padding bytes + *transmitted. + *If the tx_vlan_detection[0] register bit is set to 1, + *the VLAN and stacked VLAN tags are counted as part of + *the TX payload. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_OCTETS_OK_LO, + IPN3KE_10G_TX_STATS_OCTETS_OK_HI, + hw, port_id); + stats->obytes = statistics; + hw_stats->eth.tx_bytes = statistics; + + /*64-bit statistics counter that collects the payload length, + *including the bytes in control frames. + *The payload length is the number of data and padding bytes + *received. + *If the rx_vlan_detection[0] register bit is set to 1, + *the VLAN and stacked VLAN tags are counted as part of + *the RX payload. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_OCTETS_OK_LO, + IPN3KE_10G_RX_STATS_OCTETS_OK_HI, + hw, port_id); + stats->ibytes = statistics; + hw_stats->eth.rx_bytes = statistics; + + /*36-bit statistics counter that collects the number of + *valid pause frames transmitted. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_TX_STATS_PAUSE_MAC_CTRL_FRAMES_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *valid pause frames received. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_RX_STATS_PAUSE_MAC_CTRL_FRAMES_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of frames + *transmitted that are invalid and with error. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_IF_ERRORS_LO, + IPN3KE_10G_TX_STATS_IF_ERRORS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of frames + *received that are invalid and with error. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_IF_ERRORS_LO, + IPN3KE_10G_RX_STATS_IF_ERRORS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *good unicast frames transmitted, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_LO, + IPN3KE_10G_TX_STATS_UNICAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.tx_unicast = statistics; + + /*36-bit statistics counter that collects the number of + *good unicast frames received, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_LO, + IPN3KE_10G_RX_STATS_UNICAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.rx_unicast = statistics; + + /*36-bit statistics counter that collects the number of + *unicast frames transmitted with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_LO, + IPN3KE_10G_TX_STATS_UNICAST_FRAME_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *unicast frames received with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_LO, + IPN3KE_10G_RX_STATS_UNICAST_FRAME_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *good multicast frames transmitted, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_LO, + IPN3KE_10G_TX_STATS_MULTICAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.tx_multicast = statistics; + + /*36-bit statistics counter that collects the number of + *good multicast frames received, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_LO, + IPN3KE_10G_RX_STATS_MULTICAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.rx_multicast = statistics; + + /*36-bit statistics counter that collects the number of + *multicast frames transmitted with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_LO, + IPN3KE_10G_TX_STATS_MULTICAST_FRAME_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number + *of multicast frames received with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_LO, + IPN3KE_10G_RX_STATS_MULTICAST_FRAME_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *good broadcast frames transmitted, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_LO, + IPN3KE_10G_TX_STATS_BROADCAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.tx_broadcast = statistics; + + /*36-bit statistics counter that collects the number of + *good broadcast frames received, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_LO, + IPN3KE_10G_RX_STATS_BROADCAST_FRAME_OK_HI, + hw, port_id); + hw_stats->eth.rx_broadcast = statistics; + + /*36-bit statistics counter that collects the number + *of broadcast frames transmitted with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_LO, + IPN3KE_10G_TX_STATS_BROADCAST_FRAME_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *broadcast frames received with error, + *excluding control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_LO, + IPN3KE_10G_RX_STATS_BROADCAST_FRAME_ERR_HI, + hw, port_id); + + /*64-bit statistics counter that collects the total number of + *octets transmitted. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_OCTETS_HI, + hw, port_id); + + /*64-bit statistics counter that collects the total number of + *octets received. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_64bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_OCTETS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the total number of + *good, errored, and invalid frames transmitted. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the total number of + *good, errored, and invalid frames received. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *undersized TX frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *undersized RX frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_UNDER_SIZE_PKTS_HI, + hw, port_id); + hw_stats->rx_undersize = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames whose length exceeds the maximum frame length + *specified. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *RX frames whose length exceeds the maximum frame length + *specified. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_OVER_SIZE_PKTS_HI, + hw, port_id); + hw_stats->rx_oversize = statistics; + + /*36-bit statistics counter that collects the number of + *64-byte TX frames, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_64 = statistics; + + /*36-bit statistics counter that collects the number of + *64-byte RX frames, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_64_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_64 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames between the length of 65 and 127 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_65_127 = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames between the length of 65 and 127 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_65_127_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_65_127 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames between the length of 128 and 255 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_128_255 = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames between the length of 128 and 255 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_128_255_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_128_255 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames between the length of 256 and 511 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_256_511 = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames between the length of 256 and 511 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_256_511_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_256_511 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames between the length of 512 and 1023 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_512_1023 = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames between the length of 512 and 1023 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_512_1023_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_512_1023 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames between the length of 1024 and 1518 bytes, + *including the CRC field but + *excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_1024_1518 = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames between the length of 1024 and 1518 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1024_1518_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_1024_1518 = statistics; + + /*36-bit statistics counter that collects the number of + *TX frames equal or more than the length of 1,519 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO, + IPN3KE_10G_TX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI, + hw, port_id); + hw_stats->tx_size_1519_to_max = statistics; + + /*36-bit statistics counter that collects the number of + *RX frames equal or more than the length of 1,519 bytes, + *including the CRC field + *but excluding the preamble and SFD bytes. + *This count includes good, + *errored, and invalid frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_PKTS_1519_X_OCTETS_HI, + hw, port_id); + hw_stats->rx_size_big = statistics; + + /*36-bit statistics counter that collects the total number of + *RX frames with length less than 64 bytes and CRC error. + *The MAC does not drop these frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_FRAGMENTS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *oversized RX frames with CRC error. + *The MAC does not drop these frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_JABBERS_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *RX frames with CRC error, + *whose length is between 64 and the maximum frame length + *specified in the register. + *The MAC does not drop these frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_LO, + IPN3KE_10G_RX_STATS_ETHER_STATS_CRC_ERR_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *valid TX unicast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_TX_STATS_UNICAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.tx_unicast += statistics; + + /*36-bit statistics counter that collects the number of + *valid RX unicast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_RX_STATS_UNICAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.rx_unicast += statistics; + + /*36-bit statistics counter that collects the number of + *valid TX multicast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_TX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.tx_multicast += statistics; + + /*36-bit statistics counter that collects the number of + *valid RX multicast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_RX_STATS_MULTICAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.rx_multicast += statistics; + + /*36-bit statistics counter that collects the number of + *valid TX broadcast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_TX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.tx_broadcast += statistics; + + /*36-bit statistics counter that collects the number of + *valid RX broadcast control frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_RX_STATS_BROADCAST_MAC_CTRL_FRAMES_HI, + hw, port_id); + hw_stats->eth.rx_broadcast += statistics; + + /*36-bit statistics counter that collects the number of + *valid TX PFC frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_TX_STATS_PFC_MAC_CTRL_FRAMES_HI, + hw, port_id); + + /*36-bit statistics counter that collects the number of + *valid RX PFC frames. + */ + statistics = ipn3ke_rpst_read_36bits_statistics_register( + IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_LO, + IPN3KE_10G_RX_STATS_PFC_MAC_CTRL_FRAMES_HI, + hw, port_id); + + return 0; +} + +static void +ipn3ke_rpst_10g_lineside_tx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp; + + /*Bit [0]: Set this register to 1 to clear all TX statistics + *counters. + *The IP core clears this bit when all counters are cleared. + *Bits [31:1]: Reserved. + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_TX_STATS_CLR, + port_id, + 0); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_TX_STATS_CLR, + port_id, + 0); +} + +static void +ipn3ke_rpst_10g_lineside_rx_stats_reset(struct ipn3ke_hw *hw, +uint16_t port_id) +{ + uint32_t tmp; + + /*Bit [0]: Set this register to 1 to clear all RX statistics + *counters. + *The IP core clears this bit when all counters are cleared. + *Bits [31:1]: Reserved + */ + tmp = 0x00000000; + (*hw->f_mac_read)(hw, + &tmp, + IPN3KE_10G_RX_STATS_CLR, + port_id, + 0); + tmp |= 0x00000001; + (*hw->f_mac_write)(hw, + tmp, + IPN3KE_10G_RX_STATS_CLR, + port_id, + 0); +} + +static int +ipn3ke_rpst_stats_reset(struct rte_eth_dev *ethdev) +{ + uint16_t port_id = 0; + char *ch; + int cnt = 0; + struct rte_afu_device *afu_dev = NULL; + struct ipn3ke_hw *hw = NULL; + + if (!ethdev) { + IPN3KE_AFU_PMD_ERR("ethernet device to reset is NULL!"); + return -EINVAL; + } + + afu_dev = RTE_ETH_DEV_TO_AFU(ethdev); + if (!afu_dev) { + IPN3KE_AFU_PMD_ERR("afu device to reset is NULL!"); + return -EINVAL; + } + + if (!afu_dev->shared.data) { + IPN3KE_AFU_PMD_ERR("hardware data to reset is NULL!"); + return -EINVAL; + } + + hw = afu_dev->shared.data; + + ch = ethdev->data->name; + if (!ch) { + IPN3KE_AFU_PMD_ERR("ethdev name is NULL!"); + return -EINVAL; + } + while (ch) { + if (*ch == '_') + cnt++; + ch++; + if (cnt == 3) + break; + } + if (!ch) { + IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!"); + return -EINVAL; + } + port_id = atoi(ch); + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + ipn3ke_rpst_25g_nic_side_tx_stats_reset(hw, port_id); + ipn3ke_rpst_25g_nic_side_rx_stats_reset(hw, port_id); + ipn3ke_rpst_25g_lineside_tx_stats_reset(hw, port_id); + ipn3ke_rpst_25g_lineside_rx_stats_reset(hw, port_id); + } else if (hw->retimer.mac_type == + IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + ipn3ke_rpst_10g_nic_side_tx_stats_reset(hw, port_id); + ipn3ke_rpst_10g_nic_side_rx_stats_reset(hw, port_id); + ipn3ke_rpst_10g_lineside_tx_stats_reset(hw, port_id); + ipn3ke_rpst_10g_lineside_rx_stats_reset(hw, port_id); + } + + return 0; +} + +static int +ipn3ke_rpst_stats_get +(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats) +{ + uint16_t port_id = 0; + char *ch; + int cnt = 0; + int i = 0; + struct rte_afu_device *afu_dev = NULL; + struct ipn3ke_hw *hw = NULL; + struct ipn3ke_rpst_hw_port_stats hw_stats; + + if (!ethdev) { + IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL"); + return -EINVAL; + } + if (!stats) { + IPN3KE_AFU_PMD_ERR("Address to return statistics is NULL!"); + return -EINVAL; + } + + afu_dev = RTE_ETH_DEV_TO_AFU(ethdev); + if (!afu_dev) { + IPN3KE_AFU_PMD_ERR("afu device to get statistics is NULL!"); + return -EINVAL; + } + + if (!afu_dev->shared.data) { + IPN3KE_AFU_PMD_ERR("hardware data to get statistics is NULL!"); + return -EINVAL; + } + + hw = afu_dev->shared.data; + + ch = ethdev->data->name; + if (!ch) { + IPN3KE_AFU_PMD_ERR("ethdev name is NULL!"); + return -EINVAL; + } + while (ch) { + if (*ch == '_') + cnt++; + ch++; + if (cnt == 3) + break; + } + if (!ch) { + IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!"); + return -EINVAL; + } + port_id = atoi(ch); + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + ipn3ke_rpst_read_25g_lineside_stats_registers(hw, + port_id, + &hw_stats); + + stats->ipackets = hw_stats.rx_size_64 + + hw_stats.rx_size_65_127 + + hw_stats.rx_size_128_255 + + hw_stats.rx_size_256_511 + + hw_stats.rx_size_512_1023 + + hw_stats.rx_size_1024_1518 + + hw_stats.rx_size_big + + hw_stats.rx_undersize + + hw_stats.rx_fragments + + hw_stats.rx_oversize + + hw_stats.rx_jabber; + stats->opackets = hw_stats.tx_size_64 + + hw_stats.tx_size_65_127 + + hw_stats.tx_size_128_255 + + hw_stats.tx_size_256_511 + + hw_stats.tx_size_512_1023 + + hw_stats.tx_size_1024_1518 + + hw_stats.tx_size_1519_to_max; + stats->ibytes = hw_stats.eth.rx_bytes; + stats->obytes = hw_stats.eth.tx_bytes; + stats->imissed = 0; + stats->ierrors = hw_stats.eth.rx_discards + + hw_stats.eth.rx_unknown_protocol; + stats->oerrors = hw_stats.eth.tx_discards + + hw_stats.eth.tx_errors; + stats->rx_nombuf = 0; + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + stats->q_ipackets[i] = 0; + stats->q_opackets[i] = 0; + stats->q_ibytes[i] = 0; + stats->q_obytes[i] = 0; + stats->q_errors[i] = 0; + } + } else { + ipn3ke_rpst_read_10g_lineside_stats_registers(hw, + port_id, + &hw_stats, + stats); + } + + return 0; +} + +static int +ipn3ke_rpst_xstats_get +(struct rte_eth_dev *ethdev, struct rte_eth_xstat *xstats, unsigned int n) +{ + uint16_t port_id = 0; + char *ch = NULL; + int cnt = 0; + unsigned int i, count, prio; + struct rte_afu_device *afu_dev = NULL; + struct ipn3ke_hw *hw = NULL; + struct ipn3ke_rpst_hw_port_stats hw_stats; + struct rte_eth_stats stats; + + if (!xstats) + return 0; + + if (!ethdev) { + IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL"); + return -EINVAL; + } + + afu_dev = RTE_ETH_DEV_TO_AFU(ethdev); + if (!afu_dev) { + IPN3KE_AFU_PMD_ERR("afu device to get statistics is NULL!"); + return -EINVAL; + } + + if (!afu_dev->shared.data) { + IPN3KE_AFU_PMD_ERR("hardware data to get statistics is NULL!"); + return -EINVAL; + } + + hw = afu_dev->shared.data; + + ch = ethdev->data->name; + if (!ch) { + IPN3KE_AFU_PMD_ERR("ethdev name is NULL!"); + return -EINVAL; + } + while (ch) { + if (*ch == '_') + cnt++; + ch++; + if (cnt == 3) + break; + } + if (!ch) { + IPN3KE_AFU_PMD_ERR("Can not get port_id from ethdev name!"); + return -EINVAL; + } + port_id = atoi(ch); + + count = ipn3ke_rpst_xstats_calc_num(); + if (n < count) + return count; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) { + ipn3ke_rpst_read_25g_lineside_stats_registers(hw, + port_id, + &hw_stats); + } else { + ipn3ke_rpst_read_10g_lineside_stats_registers(hw, + port_id, + &hw_stats, + &stats); + } + + count = 0; + + /* Get stats from ipn3ke_rpst_stats */ + for (i = 0; i < IPN3KE_RPST_ETH_XSTATS_CNT; i++) { + xstats[count].value = *(uint64_t *)(((char *)&hw_stats.eth) + + ipn3ke_rpst_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* Get individiual stats from ipn3ke_rpst_hw_port */ + for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) { + xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) + + ipn3ke_rpst_hw_port_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* Get individiual stats from ipn3ke_rpst_rxq_pri */ + for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) { + for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) { + xstats[count].value = + *(uint64_t *)(((char *)(&hw_stats)) + + ipn3ke_rpst_rxq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + xstats[count].id = count; + count++; + } + } + + /* Get individiual stats from ipn3ke_rpst_txq_prio */ + for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) { + for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) { + xstats[count].value = + *(uint64_t *)(((char *)(&hw_stats)) + + ipn3ke_rpst_txq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + xstats[count].id = count; + count++; + } + } + + return count; +} + +static int +ipn3ke_rpst_xstats_get_names +(__rte_unused struct rte_eth_dev *dev, +struct rte_eth_xstat_name *xstats_names, +__rte_unused unsigned int limit) +{ + unsigned int count = 0; + unsigned int i, prio; + + if (!xstats_names) + return ipn3ke_rpst_xstats_calc_num(); + + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Get stats from ipn3ke_rpst_stats */ + for (i = 0; i < IPN3KE_RPST_ETH_XSTATS_CNT; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + ipn3ke_rpst_stats_strings[i].name); + count++; + } + + /* Get individiual stats from ipn3ke_rpst_hw_port */ + for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + ipn3ke_rpst_hw_port_strings[i].name); + count++; + } + + /* Get individiual stats from ipn3ke_rpst_rxq_pri */ + for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", + prio, + ipn3ke_rpst_rxq_prio_strings[i].name); + count++; + } + } + + /* Get individiual stats from ipn3ke_rpst_txq_prio */ + for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", + prio, + ipn3ke_rpst_txq_prio_strings[i].name); + count++; + } + } + return count; +} + +static void +ipn3ke_update_link(struct rte_rawdev *rawdev, + uint16_t port, struct rte_eth_link *link) +{ + uint64_t line_link_bitmap = 0; + enum ifpga_rawdev_link_speed link_speed; + + rawdev->dev_ops->attr_get(rawdev, + "LineSideLinkStatus", + (uint64_t *)&line_link_bitmap); + + /* Parse the link status */ + if ((1 << port) & line_link_bitmap) + link->link_status = 1; + else + link->link_status = 0; + + IPN3KE_AFU_PMD_DEBUG("port is %d\n", port); + IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status); + + rawdev->dev_ops->attr_get(rawdev, + "LineSideLinkSpeed", + (uint64_t *)&link_speed); + switch (link_speed) { + case IFPGA_RAWDEV_LINK_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case IFPGA_RAWDEV_LINK_SPEED_25GB: + link->link_speed = ETH_SPEED_NUM_25G; + break; + default: + IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed); + break; + } +} + +/* + * Set device link up. + */ +int +ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + struct rte_eth_dev *pf; + int ret = 0; + + if (rpst->i40e_pf_eth) { + ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + + return ret; +} + +/* + * Set device link down. + */ +int +ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + struct rte_eth_dev *pf; + int ret = 0; + + if (rpst->i40e_pf_eth) { + ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + + return ret; +} + +int +ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev, + __rte_unused int wait_to_complete) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + struct rte_rawdev *rawdev; + struct rte_eth_link link; + struct rte_eth_dev *pf; + + memset(&link, 0, sizeof(link)); + + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = !(ethdev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + rawdev = hw->rawdev; + ipn3ke_update_link(rawdev, rpst->port_id, &link); + + if (!rpst->ori_linfo.link_status && + link.link_status) { + IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(ethdev, &link); + + if (rpst->i40e_pf_eth) { + IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + } else if (rpst->ori_linfo.link_status && + !link.link_status) { + IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n", + rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(ethdev, &link); + + if (rpst->i40e_pf_eth) { + IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + } + + return 0; +} + +static int +ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst) +{ + struct ipn3ke_hw *hw; + struct rte_rawdev *rawdev; + struct rte_eth_link link; + struct rte_eth_dev *pf; + + if (rpst == NULL) + return -1; + + hw = rpst->hw; + + memset(&link, 0, sizeof(link)); + + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + rawdev = hw->rawdev; + ipn3ke_update_link(rawdev, rpst->port_id, &link); + + if (!rpst->ori_linfo.link_status && + link.link_status) { + IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(rpst->ethdev, &link); + + if (rpst->i40e_pf_eth) { + IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + } else if (rpst->ori_linfo.link_status && + !link.link_status) { + IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(rpst->ethdev, &link); + + if (rpst->i40e_pf_eth) { + IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; + (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1); + } + } + + return 0; +} + +static void * +ipn3ke_rpst_scan_handle_request(__rte_unused void *param) +{ + struct ipn3ke_rpst *rpst; + int num = 0; +#define MS 1000 +#define SCAN_NUM 32 + + for (;;) { + num = 0; + TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) { + if (rpst->i40e_pf_eth && + rpst->ethdev->data->dev_started && + rpst->i40e_pf_eth->data->dev_started) + ipn3ke_rpst_link_check(rpst); + + if (++num > SCAN_NUM) + rte_delay_us(1 * MS); + } + rte_delay_us(50 * MS); + + if (num == 0xffffff) + return NULL; + } + + return NULL; +} + +static int +ipn3ke_rpst_scan_check(void) +{ + int ret; + + if (ipn3ke_rpst_scan_num == 1) { + ret = rte_ctrl_thread_create(&ipn3ke_rpst_scan_thread, + "ipn3ke scanner", + NULL, + ipn3ke_rpst_scan_handle_request, NULL); + if (ret) { + IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread"); + return -1; + } + } else if (ipn3ke_rpst_scan_num == 0) { + ret = pthread_cancel(ipn3ke_rpst_scan_thread); + if (ret) + IPN3KE_AFU_PMD_ERR("Can't cancel the thread"); + + ret = pthread_join(ipn3ke_rpst_scan_thread, NULL); + if (ret) + IPN3KE_AFU_PMD_ERR("Can't join the thread"); + + return ret; + } + + return 0; +} + +int +ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + uint32_t rddata, val; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Enable all unicast */ + (*hw->f_mac_read)(hw, + &rddata, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + val = 1; + val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK; + val |= rddata; + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + } + + return 0; +} + +int +ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + uint32_t rddata, val; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable all unicast */ + (*hw->f_mac_read)(hw, + &rddata, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + val = 0; + val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK; + val |= rddata; + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + } + + return 0; +} + +int +ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + uint32_t rddata, val; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Enable all unicast */ + (*hw->f_mac_read)(hw, + &rddata, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + val = 1; + val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT; + val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK; + val |= rddata; + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + } + + return 0; +} + +int +ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + uint32_t rddata, val; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable all unicast */ + (*hw->f_mac_read)(hw, + &rddata, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + val = 0; + val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT; + val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK; + val |= rddata; + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_RX_FRAME_CONTROL, + rpst->port_id, + 0); + } + + return 0; +} + +int +ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev, + struct rte_ether_addr *mac_addr) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + uint32_t val; + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { + IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address."); + return -EINVAL; + } + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + rte_ether_addr_copy(&mac_addr[0], &rpst->mac_addr); + + /* Set mac address */ + rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t)); + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_PRIMARY_MAC_ADDR0, + rpst->port_id, + 0); + rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t)); + (*hw->f_mac_write)(hw, + val, + IPN3KE_MAC_PRIMARY_MAC_ADDR0, + rpst->port_id, + 0); + } + + return 0; +} + +int +ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu) +{ + int ret = 0; + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + struct rte_eth_dev_data *dev_data = ethdev->data; + uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD; + + /* check if mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || + frame_size > IPN3KE_MAC_FRAME_SIZE_MAX) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + /* make sure NIC port is stopped */ + if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) { + IPN3KE_AFU_PMD_ERR("NIC port %d must " + "be stopped before configuration", + rpst->i40e_pf_eth->data->port_id); + return -EBUSY; + } + /* mtu setting is forbidden if port is start */ + if (dev_data->dev_started) { + IPN3KE_AFU_PMD_ERR("FPGA port %d must " + "be stopped before configuration", + dev_data->port_id); + return -EBUSY; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev_data->dev_conf.rxmode.offloads |= + (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME); + else + dev_data->dev_conf.rxmode.offloads &= + (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME); + + dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + if (rpst->i40e_pf_eth) { + ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth, + mtu); + if (!ret) + rpst->i40e_pf_eth->data->mtu = mtu; + } + + return ret; +} + +static int +ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev, + enum rte_filter_type filter_type, enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + struct ipn3ke_hw *hw; + struct ipn3ke_rpst *rpst; + + if (ethdev == NULL) + return -EINVAL; + + hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + + if (hw->acc_flow) + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ipn3ke_flow_ops; + break; + default: + IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + else if (rpst->i40e_pf_eth) + (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev, + filter_type, + filter_op, + arg); + else + return -EINVAL; + + return ret; +} + +static const struct eth_dev_ops ipn3ke_rpst_dev_ops = { + .dev_infos_get = ipn3ke_rpst_dev_infos_get, + + .dev_configure = ipn3ke_rpst_dev_configure, + .dev_start = ipn3ke_rpst_dev_start, + .dev_stop = ipn3ke_rpst_dev_stop, + .dev_close = ipn3ke_rpst_dev_close, + .dev_reset = ipn3ke_rpst_dev_reset, + + .stats_get = ipn3ke_rpst_stats_get, + .xstats_get = ipn3ke_rpst_xstats_get, + .xstats_get_names = ipn3ke_rpst_xstats_get_names, + .stats_reset = ipn3ke_rpst_stats_reset, + .xstats_reset = ipn3ke_rpst_stats_reset, + + .filter_ctrl = ipn3ke_afu_filter_ctrl, + + .rx_queue_start = ipn3ke_rpst_rx_queue_start, + .rx_queue_stop = ipn3ke_rpst_rx_queue_stop, + .tx_queue_start = ipn3ke_rpst_tx_queue_start, + .tx_queue_stop = ipn3ke_rpst_tx_queue_stop, + .rx_queue_setup = ipn3ke_rpst_rx_queue_setup, + .rx_queue_release = ipn3ke_rpst_rx_queue_release, + .tx_queue_setup = ipn3ke_rpst_tx_queue_setup, + .tx_queue_release = ipn3ke_rpst_tx_queue_release, + + .dev_set_link_up = ipn3ke_rpst_dev_set_link_up, + .dev_set_link_down = ipn3ke_rpst_dev_set_link_down, + .link_update = ipn3ke_rpst_link_update, + + .promiscuous_enable = ipn3ke_rpst_promiscuous_enable, + .promiscuous_disable = ipn3ke_rpst_promiscuous_disable, + .allmulticast_enable = ipn3ke_rpst_allmulticast_enable, + .allmulticast_disable = ipn3ke_rpst_allmulticast_disable, + .mac_addr_set = ipn3ke_rpst_mac_addr_set, + .mtu_set = ipn3ke_rpst_mtu_set, + + .tm_ops_get = ipn3ke_tm_ops_get, +}; + +static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + struct ipn3ke_rpst *representor_param = + (struct ipn3ke_rpst *)init_params; + + if (representor_param->port_id >= representor_param->hw->port_num) + return -ENODEV; + + if (ipn3ke_bridge_func.set_i40e_sw_dev == NULL) + return -ENOMEM; + + rpst->ethdev = ethdev; + rpst->switch_domain_id = representor_param->switch_domain_id; + rpst->port_id = representor_param->port_id; + rpst->hw = representor_param->hw; + rpst->i40e_pf_eth = representor_param->i40e_pf_eth; + rpst->i40e_pf_eth_port_id = representor_param->i40e_pf_eth_port_id; + if (rpst->i40e_pf_eth) + ipn3ke_bridge_func.set_i40e_sw_dev(rpst->i40e_pf_eth_port_id, + rpst->ethdev); + + ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", RTE_ETHER_ADDR_LEN, 0); + if (!ethdev->data->mac_addrs) { + IPN3KE_AFU_PMD_ERR("Failed to " + "allocated memory for storing mac address"); + return -ENODEV; + } + + if (rpst->hw->tm_hw_enable) + ipn3ke_tm_init(rpst); + + /* Set representor device ops */ + ethdev->dev_ops = &ipn3ke_rpst_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts; + ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts; + + ethdev->data->nb_rx_queues = 1; + ethdev->data->nb_tx_queues = 1; + + ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor", + RTE_ETHER_ADDR_LEN, + 0); + if (!ethdev->data->mac_addrs) { + IPN3KE_AFU_PMD_ERR("Failed to " + "allocated memory for storing mac address"); + return -ENODEV; + } + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + rte_spinlock_lock(&ipn3ke_link_notify_list_lk); + TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next); + ipn3ke_rpst_scan_num++; + ipn3ke_rpst_scan_check(); + rte_spinlock_unlock(&ipn3ke_link_notify_list_lk); + + return 0; +} + +int +ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev) +{ + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + + rte_spinlock_lock(&ipn3ke_link_notify_list_lk); + TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next); + ipn3ke_rpst_scan_num--; + ipn3ke_rpst_scan_check(); + rte_spinlock_unlock(&ipn3ke_link_notify_list_lk); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c new file mode 100644 index 000000000..5a16c5f96 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c @@ -0,0 +1,2055 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ipn3ke_rawdev_api.h" +#include "ipn3ke_flow.h" +#include "ipn3ke_logs.h" +#include "ipn3ke_ethdev.h" + +#define BYTES_IN_MBPS (1000 * 1000 / 8) +#define SUBPORT_TC_PERIOD 10 +#define PIPE_TC_PERIOD 40 + +struct ipn3ke_tm_shaper_params_range_type { + uint32_t m1; + uint32_t m2; + uint32_t exp; + uint32_t exp2; + uint32_t low; + uint32_t high; +}; +struct ipn3ke_tm_shaper_params_range_type ipn3ke_tm_shaper_params_rang[] = { + { 0, 1, 0, 1, 0, 4}, + { 2, 3, 0, 1, 8, 12}, + { 4, 7, 0, 1, 16, 28}, + { 8, 15, 0, 1, 32, 60}, + { 16, 31, 0, 1, 64, 124}, + { 32, 63, 0, 1, 128, 252}, + { 64, 127, 0, 1, 256, 508}, + {128, 255, 0, 1, 512, 1020}, + {256, 511, 0, 1, 1024, 2044}, + {512, 1023, 0, 1, 2048, 4092}, + {512, 1023, 1, 2, 4096, 8184}, + {512, 1023, 2, 4, 8192, 16368}, + {512, 1023, 3, 8, 16384, 32736}, + {512, 1023, 4, 16, 32768, 65472}, + {512, 1023, 5, 32, 65536, 130944}, + {512, 1023, 6, 64, 131072, 261888}, + {512, 1023, 7, 128, 262144, 523776}, + {512, 1023, 8, 256, 524288, 1047552}, + {512, 1023, 9, 512, 1048576, 2095104}, + {512, 1023, 10, 1024, 2097152, 4190208}, + {512, 1023, 11, 2048, 4194304, 8380416}, + {512, 1023, 12, 4096, 8388608, 16760832}, + {512, 1023, 13, 8192, 16777216, 33521664}, + {512, 1023, 14, 16384, 33554432, 67043328}, + {512, 1023, 15, 32768, 67108864, 134086656}, +}; + +#define IPN3KE_TM_SHAPER_RANGE_NUM (sizeof(ipn3ke_tm_shaper_params_rang) / \ + sizeof(struct ipn3ke_tm_shaper_params_range_type)) + +#define IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX \ + (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high) + +#define IPN3KE_TM_SHAPER_PEAK_RATE_MAX \ + (ipn3ke_tm_shaper_params_rang[IPN3KE_TM_SHAPER_RANGE_NUM - 1].high) + +int +ipn3ke_hw_tm_init(struct ipn3ke_hw *hw) +{ +#define SCRATCH_DATA 0xABCDEF + struct ipn3ke_tm_node *nodes; + struct ipn3ke_tm_tdrop_profile *tdrop_profile; + int node_num; + int i; + + if (hw == NULL) + return -EINVAL; +#if IPN3KE_TM_SCRATCH_RW + uint32_t scratch_data; + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_TM_SCRATCH, + 0, + SCRATCH_DATA, + 0xFFFFFFFF); + scratch_data = IPN3KE_MASK_READ_REG(hw, + IPN3KE_TM_SCRATCH, + 0, + 0xFFFFFFFF); + if (scratch_data != SCRATCH_DATA) + return -EINVAL; +#endif + /* alloc memory for all hierarchy nodes */ + node_num = hw->port_num + + IPN3KE_TM_VT_NODE_NUM + + IPN3KE_TM_COS_NODE_NUM; + + nodes = rte_zmalloc("ipn3ke_tm_nodes", + sizeof(struct ipn3ke_tm_node) * node_num, + 0); + if (!nodes) + return -ENOMEM; + + /* alloc memory for Tail Drop Profile */ + tdrop_profile = rte_zmalloc("ipn3ke_tm_tdrop_profile", + sizeof(struct ipn3ke_tm_tdrop_profile) * + IPN3KE_TM_TDROP_PROFILE_NUM, + 0); + if (!tdrop_profile) { + rte_free(nodes); + return -ENOMEM; + } + + hw->nodes = nodes; + hw->port_nodes = nodes; + hw->vt_nodes = hw->port_nodes + hw->port_num; + hw->cos_nodes = hw->vt_nodes + IPN3KE_TM_VT_NODE_NUM; + hw->tdrop_profile = tdrop_profile; + hw->tdrop_profile_num = IPN3KE_TM_TDROP_PROFILE_NUM; + + for (i = 0, nodes = hw->port_nodes; + i < hw->port_num; + i++, nodes++) { + nodes->node_index = i; + nodes->level = IPN3KE_TM_NODE_LEVEL_PORT; + nodes->tm_id = RTE_TM_NODE_ID_NULL; + nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE; + nodes->parent_node_id = RTE_TM_NODE_ID_NULL; + nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + nodes->weight = 0; + nodes->parent_node = NULL; + nodes->shaper_profile.valid = 0; + nodes->tdrop_profile = NULL; + nodes->n_children = 0; + TAILQ_INIT(&nodes->children_node_list); + } + + for (i = 0, nodes = hw->vt_nodes; + i < IPN3KE_TM_VT_NODE_NUM; + i++, nodes++) { + nodes->node_index = i; + nodes->level = IPN3KE_TM_NODE_LEVEL_VT; + nodes->tm_id = RTE_TM_NODE_ID_NULL; + nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE; + nodes->parent_node_id = RTE_TM_NODE_ID_NULL; + nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + nodes->weight = 0; + nodes->parent_node = NULL; + nodes->shaper_profile.valid = 0; + nodes->tdrop_profile = NULL; + nodes->n_children = 0; + TAILQ_INIT(&nodes->children_node_list); + } + + for (i = 0, nodes = hw->cos_nodes; + i < IPN3KE_TM_COS_NODE_NUM; + i++, nodes++) { + nodes->node_index = i; + nodes->level = IPN3KE_TM_NODE_LEVEL_COS; + nodes->tm_id = RTE_TM_NODE_ID_NULL; + nodes->node_state = IPN3KE_TM_NODE_STATE_IDLE; + nodes->parent_node_id = RTE_TM_NODE_ID_NULL; + nodes->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + nodes->weight = 0; + nodes->parent_node = NULL; + nodes->shaper_profile.valid = 0; + nodes->tdrop_profile = NULL; + nodes->n_children = 0; + TAILQ_INIT(&nodes->children_node_list); + } + + for (i = 0, tdrop_profile = hw->tdrop_profile; + i < IPN3KE_TM_TDROP_PROFILE_NUM; + i++, tdrop_profile++) { + tdrop_profile->tdrop_profile_id = i; + tdrop_profile->n_users = 0; + tdrop_profile->valid = 0; + } + + return 0; +} + +void +ipn3ke_tm_init(struct ipn3ke_rpst *rpst) +{ + struct ipn3ke_tm_internals *tm; + struct ipn3ke_tm_node *port_node; + + tm = &rpst->tm; + + port_node = &rpst->hw->port_nodes[rpst->port_id]; + tm->h.port_node = port_node; + + tm->h.n_shaper_profiles = 0; + tm->h.n_tdrop_profiles = 0; + tm->h.n_vt_nodes = 0; + tm->h.n_cos_nodes = 0; + + tm->h.port_commit_node = NULL; + TAILQ_INIT(&tm->h.vt_commit_node_list); + TAILQ_INIT(&tm->h.cos_commit_node_list); + + tm->hierarchy_frozen = 0; + tm->tm_started = 1; + tm->tm_id = rpst->port_id; +} + +static struct ipn3ke_tm_shaper_profile * +ipn3ke_hw_tm_shaper_profile_search(struct ipn3ke_hw *hw, + uint32_t shaper_profile_id, struct rte_tm_error *error) +{ + struct ipn3ke_tm_shaper_profile *sp = NULL; + uint32_t level_of_node_id; + uint32_t node_index; + + /* Shaper profile ID must not be NONE. */ + if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) { + rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + return NULL; + } + + level_of_node_id = shaper_profile_id / IPN3KE_TM_NODE_LEVEL_MOD; + node_index = shaper_profile_id % IPN3KE_TM_NODE_LEVEL_MOD; + + switch (level_of_node_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + if (node_index >= hw->port_num) + rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + else + sp = &hw->port_nodes[node_index].shaper_profile; + + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_index >= IPN3KE_TM_VT_NODE_NUM) + rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + else + sp = &hw->vt_nodes[node_index].shaper_profile; + + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_index >= IPN3KE_TM_COS_NODE_NUM) + rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + else + sp = &hw->cos_nodes[node_index].shaper_profile; + + break; + default: + rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + } + + return sp; +} + +static struct ipn3ke_tm_tdrop_profile * +ipn3ke_hw_tm_tdrop_profile_search(struct ipn3ke_hw *hw, + uint32_t tdrop_profile_id) +{ + struct ipn3ke_tm_tdrop_profile *tdrop_profile; + + if (tdrop_profile_id >= hw->tdrop_profile_num) + return NULL; + + tdrop_profile = &hw->tdrop_profile[tdrop_profile_id]; + if (tdrop_profile->valid) + return tdrop_profile; + + return NULL; +} + +static struct ipn3ke_tm_node * +ipn3ke_hw_tm_node_search(struct ipn3ke_hw *hw, uint32_t tm_id, + uint32_t node_id, uint32_t state_mask) +{ + uint32_t level_of_node_id; + uint32_t node_index; + struct ipn3ke_tm_node *n; + + level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD; + node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD; + + switch (level_of_node_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + if (node_index >= hw->port_num) + return NULL; + n = &hw->port_nodes[node_index]; + + break; + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_index >= IPN3KE_TM_VT_NODE_NUM) + return NULL; + n = &hw->vt_nodes[node_index]; + + break; + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_index >= IPN3KE_TM_COS_NODE_NUM) + return NULL; + n = &hw->cos_nodes[node_index]; + + break; + default: + return NULL; + } + + /* Check tm node status */ + if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) { + if (n->tm_id != RTE_TM_NODE_ID_NULL || + n->parent_node_id != RTE_TM_NODE_ID_NULL || + n->parent_node != NULL || + n->n_children > 0) { + IPN3KE_AFU_PMD_ERR("tm node check error %d", 1); + } + } else if (n->node_state < IPN3KE_TM_NODE_STATE_MAX) { + if (n->tm_id == RTE_TM_NODE_ID_NULL || + (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT && + n->parent_node_id == RTE_TM_NODE_ID_NULL) || + (level_of_node_id != IPN3KE_TM_NODE_LEVEL_PORT && + n->parent_node == NULL)) { + IPN3KE_AFU_PMD_ERR("tm node check error %d", 1); + } + } else { + IPN3KE_AFU_PMD_ERR("tm node check error %d", 1); + } + + if (IPN3KE_BIT_ISSET(state_mask, n->node_state)) { + if (n->node_state == IPN3KE_TM_NODE_STATE_IDLE) + return n; + else if (n->tm_id == tm_id) + return n; + else + return NULL; + } else { + return NULL; + } +} + +/* Traffic manager node type get */ +static int +ipn3ke_pmd_tm_node_type_get(struct rte_eth_dev *dev, + uint32_t node_id, int *is_leaf, struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node *node; + uint32_t state_mask; + + if (is_leaf == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + tm_id = tm->tm_id; + + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED); + node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask); + if (node_id == RTE_TM_NODE_ID_NULL || + node == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + *is_leaf = (node->level == IPN3KE_TM_NODE_LEVEL_COS) ? 1 : 0; + + return 0; +} + +#define WRED_SUPPORTED 0 + +#define STATS_MASK_DEFAULT \ + (RTE_TM_STATS_N_PKTS | \ + RTE_TM_STATS_N_BYTES | \ + RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ + RTE_TM_STATS_N_BYTES_GREEN_DROPPED) + +#define STATS_MASK_QUEUE \ + (STATS_MASK_DEFAULT | RTE_TM_STATS_N_PKTS_QUEUED) + +/* Traffic manager capabilities get */ +static int +ipn3ke_tm_capabilities_get(__rte_unused struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, struct rte_tm_error *error) +{ + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(*cap)); + + cap->n_nodes_max = 1 + IPN3KE_TM_COS_NODE_NUM + IPN3KE_TM_VT_NODE_NUM; + cap->n_levels_max = IPN3KE_TM_NODE_LEVEL_MAX; + + cap->non_leaf_nodes_identical = 0; + cap->leaf_nodes_identical = 1; + + cap->shaper_n_max = 1 + IPN3KE_TM_VT_NODE_NUM; + cap->shaper_private_n_max = 1 + IPN3KE_TM_VT_NODE_NUM; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 1; + cap->shaper_private_rate_max = 1 + IPN3KE_TM_VT_NODE_NUM; + + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + cap->sched_n_children_max = IPN3KE_TM_COS_NODE_NUM; + cap->sched_sp_n_priorities_max = 3; + cap->sched_wfq_n_children_per_group_max = UINT32_MAX; + cap->sched_wfq_n_groups_max = 1; + cap->sched_wfq_weight_max = UINT32_MAX; + + cap->cman_wred_packet_mode_supported = 0; + cap->cman_wred_byte_mode_supported = 0; + cap->cman_head_drop_supported = 0; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + + /** + * cap->mark_vlan_dei_supported = {0, 0, 0}; + * cap->mark_ip_ecn_tcp_supported = {0, 0, 0}; + * cap->mark_ip_ecn_sctp_supported = {0, 0, 0}; + * cap->mark_ip_dscp_supported = {0, 0, 0}; + */ + + cap->dynamic_update_mask = 0; + + cap->stats_mask = 0; + + return 0; +} + +/* Traffic manager level capabilities get */ +static int +ipn3ke_tm_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + if (level_id >= IPN3KE_TM_NODE_LEVEL_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(*cap)); + + switch (level_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + cap->n_nodes_max = hw->port_num; + cap->n_nodes_nonleaf_max = IPN3KE_TM_VT_NODE_NUM; + cap->n_nodes_leaf_max = 0; + cap->non_leaf_nodes_identical = 0; + cap->leaf_nodes_identical = 0; + + cap->nonleaf.shaper_private_supported = 0; + cap->nonleaf.shaper_private_dual_rate_supported = 0; + cap->nonleaf.shaper_private_rate_min = 1; + cap->nonleaf.shaper_private_rate_max = UINT32_MAX; + cap->nonleaf.shaper_shared_n_max = 0; + + cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 0; + + cap->nonleaf.stats_mask = STATS_MASK_DEFAULT; + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + cap->n_nodes_max = IPN3KE_TM_VT_NODE_NUM; + cap->n_nodes_nonleaf_max = IPN3KE_TM_COS_NODE_NUM; + cap->n_nodes_leaf_max = 0; + cap->non_leaf_nodes_identical = 0; + cap->leaf_nodes_identical = 0; + + cap->nonleaf.shaper_private_supported = 0; + cap->nonleaf.shaper_private_dual_rate_supported = 0; + cap->nonleaf.shaper_private_rate_min = 1; + cap->nonleaf.shaper_private_rate_max = UINT32_MAX; + cap->nonleaf.shaper_shared_n_max = 0; + + cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 0; + + cap->nonleaf.stats_mask = STATS_MASK_DEFAULT; + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + cap->n_nodes_max = IPN3KE_TM_COS_NODE_NUM; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = IPN3KE_TM_COS_NODE_NUM; + cap->non_leaf_nodes_identical = 0; + cap->leaf_nodes_identical = 0; + + cap->leaf.shaper_private_supported = 0; + cap->leaf.shaper_private_dual_rate_supported = 0; + cap->leaf.shaper_private_rate_min = 0; + cap->leaf.shaper_private_rate_max = 0; + cap->leaf.shaper_shared_n_max = 0; + + cap->leaf.cman_head_drop_supported = 0; + cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED; + cap->leaf.cman_wred_byte_mode_supported = 0; + cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED; + cap->leaf.cman_wred_context_shared_n_max = 0; + + cap->leaf.stats_mask = STATS_MASK_QUEUE; + break; + + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + break; + } + + return 0; +} + +/* Traffic manager node capabilities get */ +static int +ipn3ke_tm_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct ipn3ke_rpst *representor = IPN3KE_DEV_PRIVATE_TO_RPST(dev); + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node *tm_node; + uint32_t state_mask; + + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + tm_id = tm->tm_id; + + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED); + tm_node = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask); + if (tm_node == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + if (tm_node->tm_id != representor->port_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(*cap)); + + switch (tm_node->level) { + case IPN3KE_TM_NODE_LEVEL_PORT: + cap->shaper_private_supported = 1; + cap->shaper_private_dual_rate_supported = 0; + cap->shaper_private_rate_min = 1; + cap->shaper_private_rate_max = UINT32_MAX; + cap->shaper_shared_n_max = 0; + + cap->nonleaf.sched_n_children_max = IPN3KE_TM_VT_NODE_NUM; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + IPN3KE_TM_VT_NODE_NUM; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = 1; + + cap->stats_mask = STATS_MASK_DEFAULT; + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + cap->shaper_private_supported = 1; + cap->shaper_private_dual_rate_supported = 0; + cap->shaper_private_rate_min = 1; + cap->shaper_private_rate_max = UINT32_MAX; + cap->shaper_shared_n_max = 0; + + cap->nonleaf.sched_n_children_max = IPN3KE_TM_COS_NODE_NUM; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + IPN3KE_TM_COS_NODE_NUM; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = 1; + + cap->stats_mask = STATS_MASK_DEFAULT; + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + cap->shaper_private_supported = 0; + cap->shaper_private_dual_rate_supported = 0; + cap->shaper_private_rate_min = 0; + cap->shaper_private_rate_max = 0; + cap->shaper_shared_n_max = 0; + + cap->leaf.cman_head_drop_supported = 0; + cap->leaf.cman_wred_packet_mode_supported = WRED_SUPPORTED; + cap->leaf.cman_wred_byte_mode_supported = 0; + cap->leaf.cman_wred_context_private_supported = WRED_SUPPORTED; + cap->leaf.cman_wred_context_shared_n_max = 0; + + cap->stats_mask = STATS_MASK_QUEUE; + break; + default: + break; + } + + return 0; +} + +static int +ipn3ke_tm_shaper_parame_trans(struct rte_tm_shaper_params *profile, + struct ipn3ke_tm_shaper_profile *local_profile, + const struct ipn3ke_tm_shaper_params_range_type *ref_data) +{ + uint32_t i; + const struct ipn3ke_tm_shaper_params_range_type *r; + uint64_t rate; + + rate = profile->peak.rate; + for (i = 0, r = ref_data; i < IPN3KE_TM_SHAPER_RANGE_NUM; i++, r++) { + if (rate >= r->low && + rate <= r->high) { + local_profile->m = (rate / 4) / r->exp2; + local_profile->e = r->exp; + local_profile->rate = rate; + + return 0; + } + } + + return -1; +} + +static int +ipn3ke_tm_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_shaper_profile *sp; + + /* Shaper profile must not exist. */ + sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error); + if (!sp || (sp && sp->valid)) + return -rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE, + NULL, + rte_strerror(EINVAL)); + + /* Peak rate: non-zero, 32-bit */ + if (profile->peak.rate == 0 || + profile->peak.rate > IPN3KE_TM_SHAPER_PEAK_RATE_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE, + NULL, + rte_strerror(EINVAL)); + + /* Peak size: non-zero, 32-bit */ + if (profile->peak.size != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE, + NULL, + rte_strerror(EINVAL)); + + /* Dual-rate profiles are not supported. */ + if (profile->committed.rate > IPN3KE_TM_SHAPER_COMMITTED_RATE_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE, + NULL, + rte_strerror(EINVAL)); + + /* Packet length adjust: 24 bytes */ + if (profile->pkt_length_adjust != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN, + NULL, + rte_strerror(EINVAL)); + + if (ipn3ke_tm_shaper_parame_trans(profile, + sp, + ipn3ke_tm_shaper_params_rang)) { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE, + NULL, + rte_strerror(EINVAL)); + } else { + sp->valid = 1; + rte_memcpy(&sp->params, profile, sizeof(sp->params)); + } + + tm->h.n_shaper_profiles++; + + return 0; +} + +/* Traffic manager shaper profile delete */ +static int +ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_shaper_profile *sp; + + /* Check existing */ + sp = ipn3ke_hw_tm_shaper_profile_search(hw, shaper_profile_id, error); + if (!sp || (sp && !sp->valid)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + sp->valid = 0; + tm->h.n_shaper_profiles--; + + return 0; +} + +static int +ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev, + uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile, + struct rte_tm_error *error) +{ + enum rte_color color; + + /* TDROP profile ID must not be NONE. */ + if (tdrop_profile_id == RTE_TM_WRED_PROFILE_ID_NONE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(EINVAL)); + + /* TDROP profile should be in packet mode */ + if (profile->packet_mode != 0) + return -rte_tm_error_set(error, + ENOTSUP, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(ENOTSUP)); + + /* min_th <= max_th, max_th > 0 */ + for (color = RTE_COLOR_GREEN; color <= RTE_COLOR_GREEN; color++) { + uint64_t min_th = profile->red_params[color].min_th; + uint64_t max_th = profile->red_params[color].max_th; + + if (((min_th >> IPN3KE_TDROP_TH1_SHIFT) >> + IPN3KE_TDROP_TH1_SHIFT) || + max_th != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +static int +ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw, + struct ipn3ke_tm_tdrop_profile *tp) +{ + if (tp->valid) { + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CCB_PROFILE_MS, + 0, + tp->th2, + IPN3KE_CCB_PROFILE_MS_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CCB_PROFILE_P, + tp->tdrop_profile_id, + tp->th1, + IPN3KE_CCB_PROFILE_MASK); + } else { + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CCB_PROFILE_MS, + 0, + 0, + IPN3KE_CCB_PROFILE_MS_MASK); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CCB_PROFILE_P, + tp->tdrop_profile_id, + 0, + IPN3KE_CCB_PROFILE_MASK); + } + + return 0; +} + +/* Traffic manager TDROP profile add */ +static int +ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev, + uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile, + struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_tdrop_profile *tp; + int status; + uint64_t min_th; + uint32_t th1, th2; + + /* Check input params */ + status = ipn3ke_tm_tdrop_profile_check(dev, + tdrop_profile_id, + profile, + error); + if (status) + return status; + + /* Memory allocation */ + tp = &hw->tdrop_profile[tdrop_profile_id]; + + /* Fill in */ + tp->valid = 1; + min_th = profile->red_params[RTE_COLOR_GREEN].min_th; + th1 = (uint32_t)(min_th & IPN3KE_TDROP_TH1_MASK); + th2 = (uint32_t)((min_th >> IPN3KE_TDROP_TH1_SHIFT) & + IPN3KE_TDROP_TH2_MASK); + tp->th1 = th1; + tp->th2 = th2; + rte_memcpy(&tp->params, profile, sizeof(tp->params)); + + /* Add to list */ + tm->h.n_tdrop_profiles++; + + /* Write FPGA */ + ipn3ke_hw_tm_tdrop_wr(hw, tp); + + return 0; +} + +/* Traffic manager TDROP profile delete */ +static int +ipn3ke_tm_tdrop_profile_delete(struct rte_eth_dev *dev, + uint32_t tdrop_profile_id, struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_tdrop_profile *tp; + + /* Check existing */ + tp = ipn3ke_hw_tm_tdrop_profile_search(hw, tdrop_profile_id); + if (tp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Check unused */ + if (tp->n_users) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EBUSY)); + + /* Set free */ + tp->valid = 0; + tm->h.n_tdrop_profiles--; + + /* Write FPGA */ + ipn3ke_hw_tm_tdrop_wr(hw, tp); + + return 0; +} + +static int +ipn3ke_tm_node_add_check_parameter(uint32_t tm_id, + uint32_t node_id, uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + uint32_t level_of_node_id; + uint32_t node_index; + uint32_t parent_level_id; + + if (node_id == RTE_TM_NODE_ID_NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* priority: must be 0, 1, 2, 3 */ + if (priority > IPN3KE_TM_NODE_PRIORITY_HIGHEST) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* weight: must be 1 .. 255 */ + if (weight > IPN3KE_TM_NODE_WEIGHT_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + /* check node id and parent id*/ + level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD; + if (level_of_node_id != level_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD; + parent_level_id = parent_node_id / IPN3KE_TM_NODE_LEVEL_MOD; + switch (level_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + if (node_index != tm_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + if (parent_node_id != RTE_TM_NODE_ID_NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_index >= IPN3KE_TM_VT_NODE_NUM) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + if (parent_level_id != IPN3KE_TM_NODE_LEVEL_PORT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_index >= IPN3KE_TM_COS_NODE_NUM) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + if (parent_level_id != IPN3KE_TM_NODE_LEVEL_VT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + /* params: must not be NULL */ + if (params == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS, + NULL, + rte_strerror(EINVAL)); + /* No shared shapers */ + if (params->n_shared_shapers != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + return 0; +} + +static int +ipn3ke_tm_node_add_check_mount(uint32_t tm_id, + uint32_t node_id, uint32_t parent_node_id, uint32_t level_id, + struct rte_tm_error *error) +{ + uint32_t node_index; + uint32_t parent_index; + uint32_t parent_index1; + + node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD; + parent_index = parent_node_id % IPN3KE_TM_NODE_LEVEL_MOD; + parent_index1 = node_index / IPN3KE_TM_NODE_MOUNT_MAX; + switch (level_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (parent_index != tm_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (parent_index != parent_index1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +/* Traffic manager node add */ +static int +ipn3ke_tm_node_add(struct rte_eth_dev *dev, + uint32_t node_id, uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node *n, *parent_node; + uint32_t node_state, state_mask; + int status; + + /* Checks */ + if (tm->hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + tm_id = tm->tm_id; + + status = ipn3ke_tm_node_add_check_parameter(tm_id, + node_id, + parent_node_id, + priority, + weight, + level_id, + params, + error); + if (status) + return status; + + status = ipn3ke_tm_node_add_check_mount(tm_id, + node_id, + parent_node_id, + level_id, + error); + if (status) + return status; + + /* Shaper profile ID must not be NONE. */ + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && + params->shaper_profile_id != node_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Memory allocation */ + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_IDLE); + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_DEL); + n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask); + if (!n) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + node_state = n->node_state; + + /* Check parent node */ + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD); + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED); + if (parent_node_id != RTE_TM_NODE_ID_NULL) { + parent_node = ipn3ke_hw_tm_node_search(hw, + tm_id, + parent_node_id, + state_mask); + if (!parent_node) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + } else { + parent_node = NULL; + } + + switch (level_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD; + n->tm_id = tm_id; + tm->h.port_commit_node = n; + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_state == IPN3KE_TM_NODE_STATE_IDLE) { + TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node); + if (parent_node) + parent_node->n_children++; + tm->h.n_vt_nodes++; + } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + if (parent_node) + parent_node->n_children++; + tm->h.n_vt_nodes++; + } + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD; + n->parent_node_id = parent_node_id; + n->tm_id = tm_id; + n->parent_node = parent_node; + + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_state == IPN3KE_TM_NODE_STATE_IDLE) { + TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list, + n, node); + if (parent_node) + parent_node->n_children++; + tm->h.n_cos_nodes++; + } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + if (parent_node) + parent_node->n_children++; + tm->h.n_cos_nodes++; + } + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_ADD; + n->parent_node_id = parent_node_id; + n->tm_id = tm_id; + n->parent_node = parent_node; + + break; + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + /* Fill in */ + n->priority = priority; + n->weight = weight; + + if (n->level == IPN3KE_TM_NODE_LEVEL_COS && + params->leaf.cman == RTE_TM_CMAN_TAIL_DROP) + n->tdrop_profile = ipn3ke_hw_tm_tdrop_profile_search(hw, + params->leaf.wred.wred_profile_id); + + rte_memcpy(&n->params, params, sizeof(n->params)); + + return 0; +} + +static int +ipn3ke_tm_node_del_check_parameter(uint32_t tm_id, + uint32_t node_id, struct rte_tm_error *error) +{ + uint32_t level_of_node_id; + uint32_t node_index; + + if (node_id == RTE_TM_NODE_ID_NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* check node id and parent id*/ + level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD; + node_index = node_id % IPN3KE_TM_NODE_LEVEL_MOD; + switch (level_of_node_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + if (node_index != tm_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_index >= IPN3KE_TM_VT_NODE_NUM) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_index >= IPN3KE_TM_COS_NODE_NUM) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + break; + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +/* Traffic manager node delete */ +static int +ipn3ke_pmd_tm_node_delete(struct rte_eth_dev *dev, + uint32_t node_id, struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_node *n, *parent_node; + uint32_t tm_id; + int status; + uint32_t level_of_node_id; + uint32_t node_state; + uint32_t state_mask; + + /* Check hierarchy changes are currently allowed */ + if (tm->hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + tm_id = tm->tm_id; + + status = ipn3ke_tm_node_del_check_parameter(tm_id, + node_id, + error); + if (status) + return status; + + /* Check existing */ + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD); + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED); + n = ipn3ke_hw_tm_node_search(hw, tm_id, node_id, state_mask); + if (n == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + if (n->n_children > 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + node_state = n->node_state; + + level_of_node_id = node_id / IPN3KE_TM_NODE_LEVEL_MOD; + + /* Check parent node */ + if (n->parent_node_id != RTE_TM_NODE_ID_NULL) { + state_mask = 0; + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_CONFIGURED_ADD); + IPN3KE_BIT_SET(state_mask, IPN3KE_TM_NODE_STATE_COMMITTED); + parent_node = ipn3ke_hw_tm_node_search(hw, + tm_id, + n->parent_node_id, + state_mask); + if (!parent_node) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + if (n->parent_node != parent_node) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + } else { + parent_node = NULL; + } + + switch (level_of_node_id) { + case IPN3KE_TM_NODE_LEVEL_PORT: + if (tm->h.port_node != n) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL; + tm->h.port_commit_node = n; + + break; + + case IPN3KE_TM_NODE_LEVEL_VT: + if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) { + if (parent_node) + TAILQ_REMOVE(&parent_node->children_node_list, + n, node); + TAILQ_INSERT_TAIL(&tm->h.vt_commit_node_list, n, node); + if (parent_node) + parent_node->n_children--; + tm->h.n_vt_nodes--; + } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + if (parent_node) + parent_node->n_children--; + tm->h.n_vt_nodes--; + } + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL; + + break; + + case IPN3KE_TM_NODE_LEVEL_COS: + if (node_state == IPN3KE_TM_NODE_STATE_COMMITTED) { + if (parent_node) + TAILQ_REMOVE(&parent_node->children_node_list, + n, node); + TAILQ_INSERT_TAIL(&tm->h.cos_commit_node_list, + n, node); + if (parent_node) + parent_node->n_children--; + tm->h.n_cos_nodes--; + } else if (node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + if (parent_node) + parent_node->n_children--; + tm->h.n_cos_nodes--; + } + n->node_state = IPN3KE_TM_NODE_STATE_CONFIGURED_DEL; + + break; + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +static int +ipn3ke_tm_hierarchy_commit_check(struct rte_eth_dev *dev, + struct rte_tm_error *error) +{ + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node_list *nl; + struct ipn3ke_tm_node *n, *parent_node; + + tm_id = tm->tm_id; + + nl = &tm->h.cos_commit_node_list; + TAILQ_FOREACH(n, nl, node) { + parent_node = n->parent_node; + if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + if (n->parent_node_id == RTE_TM_NODE_ID_NULL || + n->level != IPN3KE_TM_NODE_LEVEL_COS || + n->tm_id != tm_id || + parent_node == NULL || + (parent_node && + parent_node->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) || + (parent_node && + parent_node->node_state == + IPN3KE_TM_NODE_STATE_IDLE) || + n->shaper_profile.valid == 0) { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + } else if (n->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + if (n->level != IPN3KE_TM_NODE_LEVEL_COS || + n->n_children != 0) { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + } + } + + nl = &tm->h.vt_commit_node_list; + TAILQ_FOREACH(n, nl, node) { + parent_node = n->parent_node; + if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + if (n->parent_node_id == RTE_TM_NODE_ID_NULL || + n->level != IPN3KE_TM_NODE_LEVEL_VT || + n->tm_id != tm_id || + parent_node == NULL || + (parent_node && + parent_node->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) || + (parent_node && + parent_node->node_state == + IPN3KE_TM_NODE_STATE_IDLE) || + n->shaper_profile.valid == 0) { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + } else if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + n = tm->h.port_commit_node; + if (n && + (n->parent_node_id != RTE_TM_NODE_ID_NULL || + n->level != IPN3KE_TM_NODE_LEVEL_PORT || + n->tm_id != tm_id || + n->parent_node != NULL || + n->shaper_profile.valid == 0)) { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +static int +ipn3ke_hw_tm_node_wr(struct ipn3ke_hw *hw, + struct ipn3ke_tm_node *n, + struct ipn3ke_tm_node *parent_node) +{ + uint32_t level; + + level = n->level; + + switch (level) { + case IPN3KE_TM_NODE_LEVEL_PORT: + /** + * Configure Type + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_TYPE_L3_X, + n->node_index, + n->priority, + IPN3KE_QOS_TYPE_MASK); + + /** + * Configure Sch_wt + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SCH_WT_L3_X, + n->node_index, + n->weight, + IPN3KE_QOS_SCH_WT_MASK); + + /** + * Configure Shap_wt + */ + if (n->shaper_profile.valid) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SHAP_WT_L3_X, + n->node_index, + ((n->shaper_profile.e << 10) | + n->shaper_profile.m), + IPN3KE_QOS_SHAP_WT_MASK); + + break; + case IPN3KE_TM_NODE_LEVEL_VT: + /** + * Configure Type + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_TYPE_L2_X, + n->node_index, + n->priority, + IPN3KE_QOS_TYPE_MASK); + + /** + * Configure Sch_wt + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SCH_WT_L2_X, + n->node_index, + n->weight, + IPN3KE_QOS_SCH_WT_MASK); + + /** + * Configure Shap_wt + */ + if (n->shaper_profile.valid) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SHAP_WT_L2_X, + n->node_index, + ((n->shaper_profile.e << 10) | + n->shaper_profile.m), + IPN3KE_QOS_SHAP_WT_MASK); + + /** + * Configure Map + */ + if (parent_node) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_MAP_L2_X, + n->node_index, + parent_node->node_index, + IPN3KE_QOS_MAP_L2_MASK); + + break; + case IPN3KE_TM_NODE_LEVEL_COS: + /** + * Configure Tail Drop mapping + */ + if (n->tdrop_profile && n->tdrop_profile->valid) { + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CCB_QPROFILE_Q, + n->node_index, + n->tdrop_profile->tdrop_profile_id, + IPN3KE_CCB_QPROFILE_MASK); + } + + /** + * Configure Type + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_TYPE_L1_X, + n->node_index, + n->priority, + IPN3KE_QOS_TYPE_MASK); + + /** + * Configure Sch_wt + */ + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SCH_WT_L1_X, + n->node_index, + n->weight, + IPN3KE_QOS_SCH_WT_MASK); + + /** + * Configure Shap_wt + */ + if (n->shaper_profile.valid) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_SHAP_WT_L1_X, + n->node_index, + ((n->shaper_profile.e << 10) | + n->shaper_profile.m), + IPN3KE_QOS_SHAP_WT_MASK); + + /** + * Configure COS queue to port + */ + while (IPN3KE_MASK_READ_REG(hw, + IPN3KE_QM_UID_CONFIG_CTRL, + 0, + 0x80000000)) + ; + + if (parent_node && parent_node->parent_node) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QM_UID_CONFIG_DATA, + 0, + (1 << 8 | parent_node->parent_node->node_index), + 0x1FF); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QM_UID_CONFIG_CTRL, + 0, + n->node_index, + 0xFFFFF); + + while (IPN3KE_MASK_READ_REG(hw, + IPN3KE_QM_UID_CONFIG_CTRL, + 0, + 0x80000000)) + ; + + /** + * Configure Map + */ + if (parent_node) + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_QOS_MAP_L1_X, + n->node_index, + parent_node->node_index, + IPN3KE_QOS_MAP_L1_MASK); + + break; + default: + return -1; + } + + return 0; +} + +static int +ipn3ke_tm_hierarchy_hw_commit(struct rte_eth_dev *dev, + struct rte_tm_error *error) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_node_list *nl; + struct ipn3ke_tm_node *n, *nn, *parent_node; + + n = tm->h.port_commit_node; + if (n) { + if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + tm->h.port_commit_node = NULL; + + n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED; + } else if (n->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + tm->h.port_commit_node = NULL; + + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + } else { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + parent_node = n->parent_node; + ipn3ke_hw_tm_node_wr(hw, n, parent_node); + } + + nl = &tm->h.vt_commit_node_list; + for (n = TAILQ_FIRST(nl); n != NULL; n = nn) { + nn = TAILQ_NEXT(n, node); + if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED; + parent_node = n->parent_node; + TAILQ_REMOVE(nl, n, node); + TAILQ_INSERT_TAIL(&parent_node->children_node_list, + n, node); + } else if (n->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + parent_node = n->parent_node; + TAILQ_REMOVE(nl, n, node); + + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->parent_node_id = RTE_TM_NODE_ID_NULL; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + n->parent_node = NULL; + } else { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + ipn3ke_hw_tm_node_wr(hw, n, parent_node); + } + + nl = &tm->h.cos_commit_node_list; + for (n = TAILQ_FIRST(nl); n != NULL; n = nn) { + nn = TAILQ_NEXT(n, node); + if (n->node_state == IPN3KE_TM_NODE_STATE_CONFIGURED_ADD) { + n->node_state = IPN3KE_TM_NODE_STATE_COMMITTED; + parent_node = n->parent_node; + TAILQ_REMOVE(nl, n, node); + TAILQ_INSERT_TAIL(&parent_node->children_node_list, + n, node); + } else if (n->node_state == + IPN3KE_TM_NODE_STATE_CONFIGURED_DEL) { + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + parent_node = n->parent_node; + TAILQ_REMOVE(nl, n, node); + + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->parent_node_id = RTE_TM_NODE_ID_NULL; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + n->parent_node = NULL; + + if (n->tdrop_profile) + n->tdrop_profile->n_users--; + } else { + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + ipn3ke_hw_tm_node_wr(hw, n, parent_node); + } + + return 0; +} + +static int +ipn3ke_tm_hierarchy_commit_clear(struct rte_eth_dev *dev) +{ + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + struct ipn3ke_tm_node_list *nl; + struct ipn3ke_tm_node *n; + struct ipn3ke_tm_node *nn; + + n = tm->h.port_commit_node; + if (n) { + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + n->n_children = 0; + + tm->h.port_commit_node = NULL; + } + + nl = &tm->h.vt_commit_node_list; + for (n = TAILQ_FIRST(nl); n != NULL; n = nn) { + nn = TAILQ_NEXT(n, node); + + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->parent_node_id = RTE_TM_NODE_ID_NULL; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + n->parent_node = NULL; + n->n_children = 0; + tm->h.n_vt_nodes--; + + TAILQ_REMOVE(nl, n, node); + } + + nl = &tm->h.cos_commit_node_list; + for (n = TAILQ_FIRST(nl); n != NULL; n = nn) { + nn = TAILQ_NEXT(n, node); + + n->node_state = IPN3KE_TM_NODE_STATE_IDLE; + n->parent_node_id = RTE_TM_NODE_ID_NULL; + n->priority = IPN3KE_TM_NODE_PRIORITY_NORMAL0; + n->weight = 0; + n->tm_id = RTE_TM_NODE_ID_NULL; + n->parent_node = NULL; + tm->h.n_cos_nodes--; + + TAILQ_REMOVE(nl, n, node); + } + + return 0; +} + +static void +ipn3ke_tm_show(struct rte_eth_dev *dev) +{ + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node_list *vt_nl, *cos_nl; + struct ipn3ke_tm_node *port_n, *vt_n, *cos_n; + const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle", + "CfgAdd", + "CfgDel", + "Committed"}; + + tm_id = tm->tm_id; + + IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id); + + port_n = tm->h.port_node; + IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index, + str_state[port_n->node_state]); + + vt_nl = &tm->h.port_node->children_node_list; + TAILQ_FOREACH(vt_n, vt_nl, node) { + cos_nl = &vt_n->children_node_list; + IPN3KE_AFU_PMD_DEBUG(" VT%d: ", vt_n->node_index); + TAILQ_FOREACH(cos_n, cos_nl, node) { + if (cos_n->parent_node_id != + (vt_n->node_index + IPN3KE_TM_NODE_LEVEL_MOD)) + IPN3KE_AFU_PMD_ERR("(%d|%s), ", + cos_n->node_index, + str_state[cos_n->node_state]); + } + IPN3KE_AFU_PMD_DEBUG("\n"); + } +} + +static void +ipn3ke_tm_show_commmit(struct rte_eth_dev *dev) +{ + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + uint32_t tm_id; + struct ipn3ke_tm_node_list *nl; + struct ipn3ke_tm_node *n; + const char *str_state[IPN3KE_TM_NODE_STATE_MAX] = {"Idle", + "CfgAdd", + "CfgDel", + "Committed"}; + + tm_id = tm->tm_id; + + IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id); + n = tm->h.port_commit_node; + IPN3KE_AFU_PMD_DEBUG("Port: "); + if (n) + IPN3KE_AFU_PMD_DEBUG("(%d|%s)", + n->node_index, + str_state[n->node_state]); + IPN3KE_AFU_PMD_DEBUG("\n"); + + nl = &tm->h.vt_commit_node_list; + IPN3KE_AFU_PMD_DEBUG("VT : "); + TAILQ_FOREACH(n, nl, node) { + IPN3KE_AFU_PMD_DEBUG("(%d|%s), ", + n->node_index, + str_state[n->node_state]); + } + IPN3KE_AFU_PMD_DEBUG("\n"); + + nl = &tm->h.cos_commit_node_list; + IPN3KE_AFU_PMD_DEBUG("COS : "); + TAILQ_FOREACH(n, nl, node) { + IPN3KE_AFU_PMD_DEBUG("(%d|%s), ", + n->node_index, + str_state[n->node_state]); + } + IPN3KE_AFU_PMD_DEBUG("\n"); +} + +/* Traffic manager hierarchy commit */ +static int +ipn3ke_tm_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, struct rte_tm_error *error) +{ + struct ipn3ke_tm_internals *tm = IPN3KE_DEV_PRIVATE_TO_TM(dev); + int status; + + /* Checks */ + if (tm->hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + ipn3ke_tm_show_commmit(dev); + + status = ipn3ke_tm_hierarchy_commit_check(dev, error); + if (status) { + if (clear_on_fail) + ipn3ke_tm_hierarchy_commit_clear(dev); + return status; + } + + ipn3ke_tm_hierarchy_hw_commit(dev, error); + ipn3ke_tm_show(dev); + + return 0; +} + +const struct rte_tm_ops ipn3ke_tm_ops = { + .node_type_get = ipn3ke_pmd_tm_node_type_get, + .capabilities_get = ipn3ke_tm_capabilities_get, + .level_capabilities_get = ipn3ke_tm_level_capabilities_get, + .node_capabilities_get = ipn3ke_tm_node_capabilities_get, + + .wred_profile_add = ipn3ke_tm_tdrop_profile_add, + .wred_profile_delete = ipn3ke_tm_tdrop_profile_delete, + .shared_wred_context_add_update = NULL, + .shared_wred_context_delete = NULL, + + .shaper_profile_add = ipn3ke_tm_shaper_profile_add, + .shaper_profile_delete = ipn3ke_tm_shaper_profile_delete, + .shared_shaper_add_update = NULL, + .shared_shaper_delete = NULL, + + .node_add = ipn3ke_tm_node_add, + .node_delete = ipn3ke_pmd_tm_node_delete, + .node_suspend = NULL, + .node_resume = NULL, + .hierarchy_commit = ipn3ke_tm_hierarchy_commit, + + .node_parent_update = NULL, + .node_shaper_update = NULL, + .node_shared_shaper_update = NULL, + .node_stats_update = NULL, + .node_wfq_weight_mode_update = NULL, + .node_cman_update = NULL, + .node_wred_context_update = NULL, + .node_shared_wred_context_update = NULL, + + .node_stats_read = NULL, +}; + +int +ipn3ke_tm_ops_get(struct rte_eth_dev *ethdev, + void *arg) +{ + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev); + struct rte_eth_dev *i40e_pf_eth; + const struct rte_tm_ops *ops; + + if (!arg) + return -EINVAL; + + if (hw->acc_tm) { + *(const void **)arg = &ipn3ke_tm_ops; + } else if (rpst->i40e_pf_eth) { + i40e_pf_eth = rpst->i40e_pf_eth; + if (i40e_pf_eth->dev_ops->tm_ops_get == NULL || + i40e_pf_eth->dev_ops->tm_ops_get(i40e_pf_eth, + &ops) != 0 || + ops == NULL) { + return -EINVAL; + } + *(const void **)arg = ops; + } else { + return -EINVAL; + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/meson.build b/src/spdk/dpdk/drivers/net/ipn3ke/meson.build new file mode 100644 index 000000000..ec9cb7daf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/meson.build @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Intel Corporation + +# +# Add the experimenatal APIs called from this PMD +# rte_eth_switch_domain_alloc() +# rte_eth_dev_create() +# rte_eth_dev_destroy() +# rte_eth_switch_domain_free() +# + +dep = dependency('libfdt', required: false) +if not dep.found() + dep = cc.find_library('libfdt', required: false) +endif +if not dep.found() + build = false + reason = 'missing dependency, "libfdt"' + subdir_done() +endif + +includes += include_directories('../../raw/ifpga') + +sources += files('ipn3ke_ethdev.c', + 'ipn3ke_representor.c', + 'ipn3ke_tm.c', + 'ipn3ke_flow.c') +deps += ['bus_ifpga', 'ethdev', 'sched'] diff --git a/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map b/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map new file mode 100644 index 000000000..7e348e99b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ipn3ke/rte_pmd_ipn3ke_version.map @@ -0,0 +1,9 @@ +DPDK_20.0 { + local: *; +}; + +EXPERIMENTAL { + global: + + ipn3ke_bridge_func; +}; diff --git a/src/spdk/dpdk/drivers/net/ixgbe/Makefile b/src/spdk/dpdk/drivers/net/ixgbe/Makefile new file mode 100644 index 000000000..0f0bcf1d6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/Makefile @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2016 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ixgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_ixgbe_version.map + +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 + +CFLAGS_ixgbe_rxtx.o += -diag-disable 3656 + +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +# +# CFLAGS for clang +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +else +# +# CFLAGS for gcc +# +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable +CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable +endif +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) +CFLAGS_ixgbe_x550.o += -Wno-maybe-uninitialized +endif + +ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1) +CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses +ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough +endif +endif + +endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash +LDLIBS += -lrte_bus_pci +LDLIBS += -lpthread + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_hv_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_flow.c +ifeq ($(CONFIG_RTE_ARCH_ARM64),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx_vec_neon.c +else ifeq ($(CONFIG_RTE_ARCH_X86),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx_vec_sse.c +endif +ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c +endif +ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c +endif +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/README b/src/spdk/dpdk/drivers/net/ixgbe/base/README new file mode 100644 index 000000000..a48b14ed2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/README @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2020 Intel Corporation + */ + +Intel® IXGBE driver +=================== + +This directory contains source code of FreeBSD ixgbe driver of version +cid-ixgbe.2018.08.28.tar.gz released by the team which develop +basic drivers for any ixgbe NIC. The sub-directory of base/ +contains the original source package. +This driver is valid for the product(s) listed below + +* Intel® 10 Gigabit AF DA Dual Port Server Adapter +* Intel® 10 Gigabit AT Server Adapter +* Intel® 10 Gigabit AT2 Server Adapter +* Intel® 10 Gigabit CX4 Dual Port Server Adapter +* Intel® 10 Gigabit XF LR Server Adapter +* Intel® 10 Gigabit XF SR Dual Port Server Adapter +* Intel® 10 Gigabit XF SR Server Adapter +* Intel® 82598 10 Gigabit Ethernet Controller +* Intel® 82599 10 Gigabit Ethernet Controller +* Intel® Ethernet Controller X540-AT2 +* Intel® Ethernet Server Adapter X520 Series +* Intel® Ethernet Server Adapter X520-T2 +* Intel® Ethernet Controller X550 Series + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + ixgbe_osdep.h diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c new file mode 100644 index 000000000..c83e1c6b3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c @@ -0,0 +1,1411 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_type.h" +#include "ixgbe_82598.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete); +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82598"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_82598; + + /* MAC */ + mac->ops.start_hw = ixgbe_start_hw_82598; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; + mac->ops.reset_hw = ixgbe_reset_hw_82598; + mac->ops.get_media_type = ixgbe_get_media_type_82598; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = ixgbe_set_vfta_82598; + mac->ops.set_vlvf = NULL; + mac->ops.clear_vfta = ixgbe_clear_vfta_82598; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_82598; + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* SFP+ Module */ + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; + + /* Link */ + mac->ops.check_link = ixgbe_check_mac_link_82598; + mac->ops.setup_link = ixgbe_setup_mac_link_82598; + mac->ops.flap_tx_laser = NULL; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = NULL; + + mac->ops.get_rtrup2tc = NULL; + + return ret_val; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset; + + DEBUGFUNC("ixgbe_init_phy_ops_82598"); + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering Then set pcie completion timeout + * + **/ +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82598"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + /* set the completion timeout for interface */ + ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82598"); + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82598"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 fcrtl, fcrth; + u32 link_speed = 0; + int i; + bool link_up; + + DEBUGFUNC("ixgbe_fc_enable_82598"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + } + + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82598"); + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return IXGBE_SUCCESS; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); + + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && + (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) + break; + + msec_delay(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + DEBUGOUT("Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + DEBUGFUNC("ixgbe_check_mac_link_82598"); + + /* + * SERDES PHY requires us to read link status from undocumented + * register 0xC79F. Bit 0 set indicates link is up/ready; clear + * indicates link down. OxC00C is read to check that the XAUI lanes + * are active. Bit 0 clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + hw->phy.ops.read_reg(hw, 0xC79F, + IXGBE_TWINAX_DEV, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + IXGBE_TWINAX_DEV, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = true; + else + *link_up = false; + } + + if (*link_up == false) + goto out; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) + *link_up = false; + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + DEBUGFUNC("ixgbe_setup_mac_link_82598"); + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + status = IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82598"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + s32 phy_status = IXGBE_SUCCESS; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + DEBUGFUNC("ixgbe_reset_hw_82598"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto mac_reset_top; + + hw->phy.ops.reset(hw); + } + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = true; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + +reset_hw_out: + if (phy_status != IXGBE_SUCCESS) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_82598"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_1PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vlvf_bypass: boolean flag - unused + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + UNREFERENCED_1PARAMETER(vlvf_bypass); + + DEBUGFUNC("ixgbe_set_vfta_82598"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + DEBUGFUNC("ixgbe_clear_vfta_82598"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82598"); + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) +{ + s32 status = IXGBE_SUCCESS; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u16 gssr; + u32 i; + + DEBUGFUNC("ixgbe_read_i2c_phy_82598"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (dev_addr << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + msec_delay(10); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + DEBUGOUT("EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + } + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return status; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); + + /* Enable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + UNREFERENCED_1PARAMETER(headroom); + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); +} + +/** + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return IXGBE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h new file mode 100644 index 000000000..7bad5e12d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_82598_H_ +#define _IXGBE_82598_H_ + +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass); +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); +#endif /* _IXGBE_82598_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c new file mode 100644 index 000000000..9cd0b1428 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c @@ -0,0 +1,2603 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_type.h" +#include "ixgbe_82599.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data); +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); + +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); + + /* + * enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && + !ixgbe_mng_enabled(hw)) { + mac->ops.disable_tx_laser = + ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; + + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; + } else { + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) { + mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_82599; + } + } +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u32 esdp; + + DEBUGFUNC("ixgbe_init_phy_ops_82599"); + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = TRUE; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; + } + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on PHY type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } +init_phy_ops_out: + return ret_val; +} + +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset, data_value; + + DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } + + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access + * prot_autoc_write uses the semaphore too. + */ + msec_delay(hw->eeprom.semaphore_delay); + + /* Restart DSP and set SFI mode */ + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + false); + + if (ret_val) { + DEBUGOUT("sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } + + } + +setup_sfp_out: + return ret_val; + +setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). + */ +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + s32 ret_val; + + *locked = false; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @autoc: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + */ +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = IXGBE_SUCCESS; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + +/** + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. + **/ + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82599"); + + ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_82599; + phy->ops.init = ixgbe_init_phy_ops_82599; + + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_82599; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_82599; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82599; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.prot_autoc_read = prot_autoc_read_82599; + mac->ops.prot_autoc_write = prot_autoc_write_82599; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; + mac->ops.check_link = ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.read = ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_thermal_sensor_data = + ixgbe_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + ixgbe_init_thermal_sensor_thresh_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82599"); + + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + goto out; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *autoneg = false; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + + /* QSFP must not enable full auto-negotiation + * Limited autoneg is enabled at 1G + */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else + *autoneg = true; + } + +out: + return status; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82599"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82599_CX4: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82599_T3_LOM: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + media_type = ixgbe_media_type_fiber_qsfp; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +{ + u32 autoc2_reg; + u16 ee_ctrl_2 = 0; + + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + } +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + bool got_lock = false; + + DEBUGFUNC("ixgbe_start_mac_link_82599"); + + + /* reset_pipeline requires us to hold this lock as it writes to + * AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status != IXGBE_SUCCESS) + goto out; + + got_lock = true; + } + + /* Restart link */ + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + +out: + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Disable Tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable Tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = false; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, + false); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = false; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + DEBUGOUT("Smartspeed has downgraded the link speed " + "from the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = IXGBE_SUCCESS; + u32 pma_pmd_1g, link_mode; + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ + u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ + u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + DEBUGFUNC("ixgbe_setup_mac_link_82599"); + + /* Check to see if speed passed in is supported. */ + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == false)) + autoc |= IXGBE_AUTOC_KR_SUPP; + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != current_autoc) { + /* Restart link */ + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); + if (status != IXGBE_SUCCESS) + goto out; + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + } + +out: + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82599"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i, autoc, autoc2; + u32 curr_lms; + bool link_up = false; + + DEBUGFUNC("ixgbe_reset_hw_82599"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Reset PHY */ + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + + /* remember AUTOC from before we reset */ + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + + /* Enable link if disabled in NVM */ + if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + IXGBE_WRITE_FLUSH(hw); + } + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || + hw->wol_enabled) + hw->mac.orig_autoc = + (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | + curr_lms; + + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + } + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return IXGBE_SUCCESS; + usec_delay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + DEBUGFUNC("ixgbe_fdir_enable_82599"); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + DEBUGOUT("Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + DEBUGFUNC("ixgbe_init_fdir_signature_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) +{ + UNREFERENCED_1PARAMETER(cloud_mode); + DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue to queue 127 + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + if (cloud_mode) + fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << + IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue + * @hw: pointer to hardware structure + * @dropqueue: Rx queue index used for the dropped packets + **/ +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) +{ + u32 fdirctrl; + + DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); + /* Clear init done bit and drop queue field */ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); + + /* Set drop queue */ + fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if ((hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_X550EM_a)) + fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @input: input bitstream to compute the hash on + * @common: compressed common input dword + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = IXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. + **/ +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; + + DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 14; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 13; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = IXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= IXGBE_NTOHS(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + u32 fdirip6m; + UNREFERENCED_1PARAMETER(cloud_mode); + DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + DEBUGOUT(" bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + DEBUGOUT(" Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + DEBUGOUT(" Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + DEBUGOUT(" Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + /* fall through */ + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only */ + fdirm |= IXGBE_FDIRM_VLANID; + /* fall through */ + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + DEBUGOUT(" Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; + /* fall through */ + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + if (cloud_mode) { + fdirm |= IXGBE_FDIRM_L3P; + fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + + switch (input_mask->formatted.inner_mac[0] & 0xFF) { + case 0x00: + /* Mask inner MAC, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; + case 0xFF: + break; + default: + DEBUGOUT(" Error on inner_mac byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + DEBUGOUT(" Error on TNI/VNI byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tunnel_type & 0xFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on tunnel type byte mask\n"); + return IXGBE_ERR_CONFIG; + } + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); + + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, + * FDIRSIP4M and FDIRDIP4M in cloud mode to allow + * L3/L3 packets to tunnel. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + break; + default: + break; + } + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + if (!cloud_mode) { + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + } + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + u32 addr_low, addr_high; + u32 cloud_type = 0; + s32 err; + UNREFERENCED_1PARAMETER(cloud_mode); + + DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address + * (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } + + /* record VLAN (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + if (cloud_mode) { + if (input->formatted.tunnel_type != 0) + cloud_type = 0x80000000; + + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + cloud_type |= addr_high; + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); + } + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @input_mask: mask for the input bitstream + * @soft_id: software index for the filters + * @queue: queue index to direct traffic to + * @cloud_mode: unused + * + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. + **/ +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask, + u16 soft_id, u8 queue, bool cloud_mode) +{ + s32 err = IXGBE_ERR_CONFIG; + UNREFERENCED_1PARAMETER(cloud_mode); + + DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); + + /* + * Check flow_type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + /* fall through */ + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return err; + } + + /* program input mask into the HW */ + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); + if (err) + return err; + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(input, input_mask); + + /* program filters to filter memory */ + return ixgbe_fdir_write_perfect_filter_82599(hw, input, + soft_id, queue, cloud_mode); +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82599"); + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82599"); + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82599"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_identify_phy_82599"); + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status != IXGBE_SUCCESS) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + return status; + else + status = ixgbe_identify_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + return IXGBE_SUCCESS; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); +out: + return physical_layer; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ + + DEBUGFUNC("ixgbe_enable_rx_dma_82599"); + + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + hw->mac.ops.disable_sec_rx_path(hw); + + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); + + hw->mac.ops.enable_sec_rx_path(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_verify_fw_version_82599 - verify FW version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 fw_version; + + DEBUGFUNC("ixgbe_verify_fw_version_82599"); + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = IXGBE_SUCCESS; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ + if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", IXGBE_FW_PTR); + return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ + if (hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); + return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ + if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), &fw_version)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); + return IXGBE_ERR_EEPROM_VERSION; + } + + if (fw_version > 0x5) + status = IXGBE_SUCCESS; + +fw_version_out: + return status; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + bool lesm_enabled = false; + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; + + /* get the LESM state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; + +out: + return lesm_enabled; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, + data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); + + return ret_val; +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + + return ret_val; +} + +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. This function assumes the SW/FW lock is held. + **/ +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 anlp1_reg = 0; + u32 i, autoc_reg, autoc2_reg; + + /* Enable link if disabled in NVM */ + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + IXGBE_WRITE_FLUSH(hw); + } + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + msec_delay(4); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + DEBUGOUT("auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = IXGBE_SUCCESS; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_read_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to read from + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_write_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h new file mode 100644 index 000000000..238481983 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82599.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_82599_H_ +#define _IXGBE_82599_H_ + +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); +#endif /* _IXGBE_82599_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c new file mode 100644 index 000000000..0a22df3d0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.c @@ -0,0 +1,1688 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" + +#define IXGBE_EMPTY_PARAM + +static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X540) +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_x) +}; + +static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_a) +}; + +/** + * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + if (hw->mac.ops.get_rtrup2tc) + hw->mac.ops.get_rtrup2tc(hw, map); +} + +/** + * ixgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ixgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_shared_code"); + + /* + * Set the mac type + */ + ixgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + status = ixgbe_init_ops_82598(hw); + break; + case ixgbe_mac_82599EB: + status = ixgbe_init_ops_82599(hw); + break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + case ixgbe_mac_X550: + status = ixgbe_init_ops_X550(hw); + break; + case ixgbe_mac_X550EM_x: + status = ixgbe_init_ops_X550EM_x(hw); + break; + case ixgbe_mac_X550EM_a: + status = ixgbe_init_ops_X550EM_a(hw); + break; + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + status = ixgbe_init_ops_vf(hw); + break; + default: + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + + return status; +} + +/** + * ixgbe_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_mac_type\n"); + + if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x", hw->vendor_id); + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->mvals = ixgbe_mvals_base; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + hw->mac.type = ixgbe_mac_82598EB; + break; + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_XAUI_LOM: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_CX4: + case IXGBE_DEV_ID_82599_T3_LOM: + hw->mac.type = ixgbe_mac_82599EB; + break; + case IXGBE_DEV_ID_82599_VF: + case IXGBE_DEV_ID_82599_VF_HV: + hw->mac.type = ixgbe_mac_82599_vf; + break; + case IXGBE_DEV_ID_X540_VF: + case IXGBE_DEV_ID_X540_VF_HV: + hw->mac.type = ixgbe_mac_X540_vf; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + hw->mac.type = ixgbe_mac_X540; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + hw->mac.type = ixgbe_mac_X550; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->mac.type = ixgbe_mac_X550EM_x; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->mac.type = ixgbe_mac_X550EM_a; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + case IXGBE_DEV_ID_X550_VF: + case IXGBE_DEV_ID_X550_VF_HV: + hw->mac.type = ixgbe_mac_X550_vf; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_VF: + case IXGBE_DEV_ID_X550EM_X_VF_HV: + hw->mac.type = ixgbe_mac_X550EM_x_vf; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_VF: + case IXGBE_DEV_ID_X550EM_A_VF_HV: + hw->mac.type = ixgbe_mac_X550EM_a_vf; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + default: + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported device id: %x", + hw->device_id); + break; + } + + DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + return ret_val; +} + +/** + * ixgbe_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_reset_hw - Performs a hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performs a PHY reset, and performs a MAC reset + **/ +s32 ixgbe_reset_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, + * clears all on chip counters, initializes receive address registers, + * multicast table, VLAN filter table, calls routine to setup link and + * flow control settings, and leaves transmit and receive units disabled + * and uninitialized. + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering, + * which is disabled by default in ixgbe_start_hw(); + * + * @hw: pointer to hardware structure + * + * Enable relaxed ordering; + **/ +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_relaxed_ordering) + hw->mac.ops.enable_relaxed_ordering(hw); +} + +/** + * ixgbe_clear_hw_cntrs - Clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); +} + +/** + * ixgbe_get_mac_addr - Get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from the first Receive Address Register + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_san_mac_addr - Get SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + **/ +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_san_mac_addr - Write a SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Writes A SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word for device capabilities + * + * Reads the extra device capabilities from the EEPROM + **/ +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, + (hw, device_caps), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, + (hw, wwnn_prefix, wwpn_prefix), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, + (hw, bs), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_of_tx_queues - Get Tx queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_tx_queues; +} + +/** + * ixgbe_get_num_of_rx_queues - Get Rx queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_rx_queues; +} + +/** + * ixgbe_stop_adapter - Disable Rx/Tx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * ixgbe_read_pba_num - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) +{ + return ixgbe_read_pba_num_generic(hw, pba_num); +} + +/** + * ixgbe_identify_phy - Get PHY type + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), + IXGBE_NOT_IMPLEMENTED); + } + + return status; +} + +/** + * ixgbe_reset_phy - Perform a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); + } + return status; +} + +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + +/** + * ixgbe_read_phy_reg - Read PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_phy_reg - Write PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_internal_phy - Configure integrated PHY + * @hw: pointer to hardware structure + * + * Reconfigure the integrated PHY in order to enable talk to the external PHY. + * Returns success if not implemented, since nothing needs to be done in this + * case. + */ +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), + IXGBE_SUCCESS); +} + +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: link speed + * @link_up: true when link is up + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_phy_power - Control the phy power state + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) +{ + return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_link - Get link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_tx_laser - Disable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to disable the laser on SFI optics. + **/ +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); +} + +/** + * ixgbe_enable_tx_laser - Enable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to enable the laser on SFI optics. + **/ +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); +} + +/** + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support then + * flap the tx laser to alert the link partner to start autotry + * process on its end. + **/ +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); +} + +/** + * ixgbe_setup_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_mac_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_link_capabilities - Returns link capabilities + * @hw: pointer to hardware structure + * @speed: link speed capabilities + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities of the current configuration. + **/ +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_on - Turn on LEDs + * @hw: pointer to hardware structure + * @index: led number to turn on + * + * Turns on the software controllable LEDs. + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_off - Turn off LEDs + * @hw: pointer to hardware structure + * @index: led number to turn off + * + * Turns off the software controllable LEDs. + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_start - Blink LEDs + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Blink LED based on index. + **/ +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_stop - Stop blinking LEDs + * @hw: pointer to hardware structure + * @index: led number to stop + * + * Stop blinking LED based on index. + **/ +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_eeprom_params - Initialize EEPROM parameters + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_write_eeprom - Write word to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word(s) to be written to the EEPROM + * @words: number of words + * + * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom - Read word from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit word(s) from EEPROM + * @words: number of words + * + * Reads 16 bit word(s) from EEPROM + **/ +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, + (hw, addr, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_rar - Clear Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); + +} + +/** + * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address + * @hw: pointer to hardware structure + * @vmdq: VMDq default pool index + **/ +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, + (hw, vmdq), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to disassociate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. + * @hw: pointer to hardware structure + **/ +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) +{ + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func, clear), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating the default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + u32 *vfta_delta, u32 vfta, bool vlvf_bypass) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, + vlan_on, vfta_delta, vfta, vlvf_bypass), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Configures the flow control settings based on SW configuration. + **/ +s32 ixgbe_fc_enable(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_fw_drv_ver - Try to send the driver version number FW + * @hw: pointer to hardware structure + * @maj: driver major number to be sent to firmware + * @min: driver minor number to be sent to firmware + * @build: driver build number to be sent to firmware + * @ver: driver version number to be sent to firmware + * @len: length of driver_ver string + * @driver_ver: driver string + **/ +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, + build, ver, len, driver_ver), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + **/ +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enable dmac. + **/ +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC and set high priority bit for + * FCOE TC. The dmac enable bit must be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_ee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_source_address_pruning - Enable/Disable source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool - Rx pool to toggle source address pruning + **/ +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, enable, pool); +} + +/** + * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); +} + +/** + * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mdd - Disable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_disable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_mdd) + hw->mac.ops.disable_mdd(hw); +} + +/** + * ixgbe_enable_mdd - Enable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_mdd) + hw->mac.ops.enable_mdd(hw); +} + +/** + * ixgbe_mdd_event - Handle malicious driver detection event + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + **/ +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + if (hw->mac.ops.mdd_event) + hw->mac.ops.mdd_event(hw, vf_bitmap); +} + +/** + * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver + * detection event + * @hw: pointer to hardware structure + * @vf: vf index + * + **/ +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) +{ + if (hw->mac.ops.restore_mdd_vf) + hw->mac.ops.restore_mdd_vf(hw, vf); +} + +/** + * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode + * @hw: pointer to hardware structure + * + **/ +bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.fw_recovery_mode) + return hw->mac.ops.fw_recovery_mode(hw); + return false; +} + +/** + * ixgbe_enter_lplu - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). + **/ +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_handle_lasi - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_analog_reg8 - Reads 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_analog_reg8 - Writes 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. + * @hw: pointer to hardware structure + * + * Initializes the Unicast Table Arrays to zero on device load. This + * is part of the Rx init addr execution path. + **/ +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link_unlocked - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link_unlocked - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, + u8 byte_offset, u8 eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_supported_physical_layer - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); +} + +/** + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics + * @hw: pointer to hardware structure + * @regval: bitfield to write to the Rx DMA register + * + * Enables the Rx DMA unit of the device. + **/ +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, + (hw, regval), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path. + **/ +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, + (hw, mask), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_release_swfw_semaphore - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + if (hw->mac.ops.release_swfw_sync) + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore + * @hw: pointer to hardware structure + * + * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. + * Regardless of whether is succeeds or not it then release the semaphore. + * This is function is called to recover from catastrophic failures that + * may have left the semaphore locked. + **/ +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); +} + + +void ixgbe_disable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_rx) + hw->mac.ops.disable_rx(hw); +} + +void ixgbe_enable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_rx) + hw->mac.ops.enable_rx(hw); +} + +/** + * ixgbe_set_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the rate select. + */ +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + if (hw->mac.ops.set_rate_select_speed) + hw->mac.ops.set_rate_select_speed(hw, speed); +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h new file mode 100644 index 000000000..33e7c3c21 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_api.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_API_H_ +#define _IXGBE_API_H_ + +#include "ixgbe_type.h" + +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); + +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); + +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); + +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); + +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); +s32 ixgbe_enable_mc(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc(struct ixgbe_hw *hw); +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_fc_enable(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver, u16 len, char *driver_ver); +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); +s32 ixgbe_dmac_config(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int vf); +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, + int vf); +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data); +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data); +void ixgbe_disable_mdd(struct ixgbe_hw *hw); +void ixgbe_enable_mdd(struct ixgbe_hw *hw); +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); +bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw); +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +void ixgbe_disable_rx(struct ixgbe_hw *hw); +void ixgbe_enable_rx(struct ixgbe_hw *hw); +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); + +#endif /* _IXGBE_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c new file mode 100644 index 000000000..4eb98dc19 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.c @@ -0,0 +1,5410 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_api.h" + +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset); +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); + +/** + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_init_ops_generic"); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & IXGBE_EEC_PRES) { + eeprom->ops.read = ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; + } else { + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read_buffer = + ixgbe_read_eeprom_buffer_bit_bang_generic; + } + eeprom->ops.write = ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.validate_checksum = + ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_supported_physical_layer = NULL; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; + mac->ops.prot_autoc_read = prot_autoc_read_generic; + mac->ops.prot_autoc_write = prot_autoc_write_generic; + + /* LEDs */ + mac->ops.led_on = ixgbe_led_on_generic; + mac->ops.led_off = ixgbe_led_off_generic; + mac->ops.blink_led_start = ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; + mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ixgbe_set_rar_generic; + mac->ops.clear_rar = ixgbe_clear_rar_generic; + mac->ops.insert_mac_addr = NULL; + mac->ops.set_vmdq = NULL; + mac->ops.clear_vmdq = NULL; + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = ixgbe_enable_mc_generic; + mac->ops.disable_mc = ixgbe_disable_mc_generic; + mac->ops.clear_vfta = NULL; + mac->ops.set_vfta = NULL; + mac->ops.set_vlvf = NULL; + mac->ops.init_uta_tables = NULL; + mac->ops.enable_rx = ixgbe_enable_rx_generic; + mac->ops.disable_rx = ixgbe_disable_rx_generic; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_generic; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.fc_autoneg = ixgbe_fc_autoneg; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.check_link = NULL; + mac->ops.dmac_config = NULL; + mac->ops.dmac_update_tcs = NULL; + mac->ops.dmac_config_tcs = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. + * + **/ +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + bool supported = false; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + /* flow control autoneg black list */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + supported = false; + break; + default: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + } + + break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) + supported = false; + else + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + supported = true; + break; + default: + supported = false; + } + default: + break; + } + + if (!supported) + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); + return supported; +} + +/** + * ixgbe_setup_fc_generic - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + bool locked = false; + + DEBUGFUNC("ixgbe_setup_fc_generic"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + switch (hw->phy.media_type) { + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* fall through - only backplane uses autoc */ + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + break; + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); + break; + default: + break; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= IXGBE_PCS1GANA_ASM_PAUSE; + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_ASM_PAUSE; + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= IXGBE_TAF_ASM_PAUSE; + reg_cu &= ~IXGBE_TAF_SYM_PAUSE; + } + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE; + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + if (hw->mac.type < ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + goto out; + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw))) { + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); + } + + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); +out: + return ret_val; +} + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 ctrl_ext; + u16 device_caps; + + DEBUGFUNC("ixgbe_start_hw_generic"); + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); + if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { + DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); + return ret_val; + } + + /* Cache bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + hw->need_crosstalk_fix = false; + else + hw->need_crosstalk_fix = true; + break; + default: + hw->need_crosstalk_fix = false; + break; + } + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + u32 regval; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_hw_generic"); + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + /* Initialize the LED link active for LED blink support */ + if (hw->mac.ops.init_led_link_act) + hw->mac.ops.init_led_link_act(hw); + + if (status != IXGBE_SUCCESS) + DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + if (hw->mac.type == ixgbe_mac_82598EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("ixgbe_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return IXGBE_SUCCESS; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_num_generic - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("ixgbe_read_pba_num_generic"); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (data == IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM Not supported\n"); + return IXGBE_NOT_IMPLEMENTED; + } + *pba_num = (u32)(data << 16); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= data; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @max_pba_block_size: PBA block size limit + * @pba: pointer to output PBA structure + * + * Reads PBA from EEPROM image when eeprom_buf is not NULL. + * Reads PBA from physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct ixgbe_pba *pba) +{ + s32 ret_val; + u16 pba_block_size; + + if (pba == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; + pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (pba->pba_block == NULL) + return IXGBE_ERR_PARAM; + + ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, + eeprom_buf_size, + &pba_block_size); + if (ret_val) + return ret_val; + + if (pba_block_size > max_pba_block_size) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], + pba_block_size, + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba_block_size)) { + memcpy(pba->pba_block, + &eeprom_buf[pba->word[1]], + pba_block_size * sizeof(u16)); + } else { + return IXGBE_ERR_PARAM; + } + } + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba: pointer to PBA structure + * + * Writes PBA to EEPROM image when eeprom_buf is not NULL. + * Writes PBA to physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct ixgbe_pba *pba) +{ + s32 ret_val; + + if (pba == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; + eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (pba->pba_block == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], + pba->pba_block[0], + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba->pba_block[0])) { + memcpy(&eeprom_buf[pba->word[1]], + pba->pba_block, + pba->pba_block[0] * sizeof(u16)); + } else { + return IXGBE_ERR_PARAM; + } + } + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_pba_block_size + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba_data_size: pointer to output variable + * + * Returns the size of the PBA block in words. Function operates on EEPROM + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical + * EEPROM device. + * + **/ +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size) +{ + s32 ret_val; + u16 pba_word[2]; + u16 length; + + DEBUGFUNC("ixgbe_get_pba_block_size"); + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba_word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; + pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, + &length); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > pba_word[1]) + length = eeprom_buf[pba_word[1] + 0]; + else + return IXGBE_ERR_PARAM; + } + + if (length == 0xFFFF || length == 0) + return IXGBE_ERR_PBA_SECTION; + } else { + /* PBA number in legacy format, there is no PBA Block. */ + length = 0; + } + + if (pba_block_size != NULL) + *pba_block_size = length; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + DEBUGFUNC("ixgbe_get_mac_addr_generic"); + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_pci_config_data_generic - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + if (hw->bus.type == ixgbe_bus_type_unknown) + hw->bus.type = ixgbe_bus_type_pci_express; + + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; + case IXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; + case IXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; + case IXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ixgbe_bus_width_unknown; + break; + } + + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ixgbe_bus_speed_2500; + break; + case IXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ixgbe_bus_speed_5000; + break; + case IXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = ixgbe_bus_speed_8000; + break; + default: + hw->bus.speed = ixgbe_bus_speed_unknown; + break; + } + + mac->ops.set_lan_id(hw); +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ixgbe_hw structure. + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + u16 link_status; + + DEBUGFUNC("ixgbe_get_bus_info_generic"); + + /* Get the negotiated link width and speed from PCI config space */ + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); + + ixgbe_set_pci_config_data_generic(hw, link_status); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers and swaps + * the port value if requested, and set MAC instance for devices that share + * CS4227. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; + u16 ee_ctrl_4; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = (u8)bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + DEBUGFUNC("ixgbe_stop_adapter_generic"); + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + ixgbe_disable_rx(hw); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + reg_val |= IXGBE_RXDCTL_SWFLSH; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); +} + +/** + * ixgbe_init_led_link_act_generic - Store the LED index link/activity. + * @hw: pointer to hardware structure + * + * Store the index for the link active LED. This will be used to support + * blinking the LED. + **/ +s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 led_reg, led_mode; + u8 i; + + led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* Get LED link active from the LEDCTL register */ + for (i = 0; i < 4; i++) { + led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); + + if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == + IXGBE_LED_LINK_ACTIVE) { + mac->led_link_act = i; + return IXGBE_SUCCESS; + } + } + + /* + * If LEDCTL register does not have the LED link active set, then use + * known MAC defaults. + */ + switch (hw->mac.type) { + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X550EM_x: + mac->led_link_act = 1; + break; + default: + mac->led_link_act = 2; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_on_generic"); + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_off_generic"); + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_generic"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of word(s) + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + msec_delay(10); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + DEBUGFUNC("ixgbe_write_eeprom_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit words(s) from EEPROM + * @words: number of word(s) + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: + return status; +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == IXGBE_SUCCESS) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + DEBUGOUT("Eeprom read timed out\n"); + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status != IXGBE_SUCCESS) + goto out; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + DEBUGOUT1("Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of word(s) + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_write_eewr_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + } + +out: + return status; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(5); + } + + if (i == IXGBE_EERD_EEWR_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "EEPROM read/write done polling timed out"); + + return status; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 eec; + u32 i; + + DEBUGFUNC("ixgbe_acquire_eeprom"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) + != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == IXGBE_SUCCESS) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_GNT) + break; + usec_delay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + DEBUGOUT("Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + } + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_eeprom_semaphore"); + + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = IXGBE_SUCCESS; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_eeprom_semaphore"); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 i; + u8 spi_stat_reg; + + DEBUGFUNC("ixgbe_ready_eeprom"); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + ixgbe_standby_eeprom(hw); + }; + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + DEBUGOUT("SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + + return status; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_standby_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + }; + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + * @count: number of bits to shift + **/ +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_raise_eeprom_clk"); + + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC's current value + **/ +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_lower_eeprom_clk"); + + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_release_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* Delay before attempt to obtain semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (hw->eeprom.ops.read(hw, i, &pointer)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + if (length == 0xFFFF || length == 0) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address. + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_validate_mac_addr"); + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (IXGBE_IS_BROADCAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_init_rx_addrs_generic"); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + ixgbe_init_uta_tables(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * @vmdq: VMDq "set" or "pool" index + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("ixgbe_add_uc_addr"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + DEBUGFUNC("ixgbe_mta_vector"); + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @mc_addr: Multicast address + * + * Sets the bit-vector in the multicast table. + **/ +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + DEBUGFUNC("ixgbe_set_mta"); + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_enable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_disable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + + DEBUGFUNC("ixgbe_fc_enable_generic"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* Negotiate the fc mode to use */ + hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return IXGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); + goto out; + } + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + DEBUGOUT("Link partner is not AN enabled\n"); + goto out; + } + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_fc_autoneg"); + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw)) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/* + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + */ +STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) +{ + s16 devctl2; + u32 pollcnt; + + devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; /* 34 sec */ + break; + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: + pollcnt = 800; /* 80 millisec minimum */ + break; + } + + /* add 10% to spec maximum */ + return (pollcnt * 11) / 10; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 i, poll; + u16 value; + + DEBUGFUNC("ixgbe_disable_pcie_master"); + + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Exit if master requests are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + IXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + } + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec or more for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + + if (hw->mac.type >= ixgbe_mac_X550) + goto out; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; +} + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 timeout = 200; + u32 i; + + DEBUGFUNC("ixgbe_acquire_swfw_sync"); + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return IXGBE_SUCCESS; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr; + u32 swmask = mask; + + DEBUGFUNC("ixgbe_release_swfw_sync"); + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_disable_sec_rx_path_generic - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECRX_POLL 4000 + + int i; + int secrxreg; + + DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); + + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(10); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: bool to indicate whether the SW/FW lock was taken + * @reg_val: Value we read from AUTOC + * + * The default case requires no protection so just to the register read. + */ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + * + * The default case requires no protection so just to the register write. + */ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + UNREFERENCED_1PARAMETER(locked); + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_rx_path_generic - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) +{ + u32 secrxreg; + + DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_generic"); + + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + + DEBUGFUNC("ixgbe_blink_led_start_generic"); + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; + + IXGBE_WRITE_FLUSH(hw); + msec_delay(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +out: + return ret_val; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + + DEBUGFUNC("ixgbe_blink_led_stop_generic"); + + if (index > 3) + return IXGBE_ERR_PARAM; + + + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + IXGBE_SAN_MAC_ADDR_PTR); + } + + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + goto san_mac_addr_out; + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return IXGBE_SUCCESS; + +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return IXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Make sure we know which port we need to write */ + hw->mac.ops.set_lan_id(hw); + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u16 pcie_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return msix_count; + } + + DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); + msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); + if (IXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("ixgbe_insert_mac_addr_generic"); + + /* swap bytes for HW little endian */ + addr_low = addr[0] | (addr[1] << 8) + | (addr[2] << 16) + | (addr[3] << 24); + addr_high = addr[4] | (addr[5] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + + if (((IXGBE_RAH_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + ixgbe_set_vmdq(hw, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return IXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + ixgbe_clear_vmdq(hw, rar, 0); + + return rar; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (IXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) + goto done; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && + rar != 0 && rar != hw->mac.san_mac_rar_index) + hw->mac.ops.clear_rar(hw, rar); +done: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return IXGBE_SUCCESS; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + DEBUGFUNC("ixgbe_set_vmdq_san_mac"); + + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + DEBUGFUNC("ixgbe_init_uta_tables_generic"); + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: true to find vlanid only, false returns first empty slot if + * vlanid not found + * + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); + + return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 regidx, vfta_delta, vfta; + s32 ret_val; + + DEBUGFUNC("ixgbe_set_vfta_generic"); + + if (vlan > 4095 || vind > 63) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* + * vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update the vfta using an XOR + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF + */ + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, + vfta, vlvf_bypass); + if (ret_val != IXGBE_SUCCESS) { + if (vlvf_bypass) + goto vfta_update; + return ret_val; + } + +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vlvf_generic - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass) +{ + u32 bits; + s32 vlvf_index; + + DEBUGFUNC("ixgbe_set_vlvf_generic"); + + if (vlan > 4095 || vind > 63) + return IXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + return IXGBE_SUCCESS; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) + return vlvf_index; + + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= 1 << (vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (*vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return IXGBE_SUCCESS; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + *vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + DEBUGFUNC("ixgbe_clear_vfta_generic"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + * @hw: pointer to hardware structure + * + * Contains the logic to identify if we need to verify link for the + * crosstalk fix + **/ +static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) +{ + + /* Does FW say we need the fix */ + if (!hw->need_crosstalk_fix) + return false; + + /* Only consider SFP+ PHYs i.e. media type fiber */ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + break; + default: + return false; + } + + return true; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + DEBUGFUNC("ixgbe_check_mac_link_generic"); + + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is full. + */ + if (ixgbe_need_crosstalk_fix(hw)) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* sanity check - No SFP+ devices here */ + sfp_cage_full = false; + break; + } + + if (!sfp_cage_full) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return IXGBE_SUCCESS; + } + } + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + DEBUGOUT2("LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; +#ifdef PREBOOT_SUPPORT + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + *speed = IXGBE_LINK_SPEED_10_FULL; +#else + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + *speed = IXGBE_LINK_SPEED_10_FULL; +#endif /* PREBOOT_SUPPORT */ + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + +wwn_prefix_out: + return IXGBE_SUCCESS; + +wwn_prefix_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) +{ + u16 offset, caps, flags; + s32 status; + + DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); + + /* clear output first */ + *bs = ixgbe_fcoe_bootstatus_unavailable; + + /* check if FCOE IBA block is present */ + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; + status = hw->eeprom.ops.read(hw, offset, &caps); + if (status != IXGBE_SUCCESS) + goto out; + + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) + goto out; + + /* check if iSCSI FCOE block is populated */ + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); + if (status != IXGBE_SUCCESS) + goto out; + + if ((offset == 0) || (offset == 0xFFFF)) + goto out; + + /* read fcoe flags in iSCSI FCOE block */ + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; + status = hw->eeprom.ops.read(hw, offset, &flags); + if (status != IXGBE_SUCCESS) + goto out; + + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) + *bs = ixgbe_fcoe_bootstatus_enabled; + else + *bs = ixgbe_fcoe_bootstatus_disabled; + +out: + return status; +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + DEBUGFUNC("ixgbe_get_device_caps_generic"); + + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); + + /* Enable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("ixgbe_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure + * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * + * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held + * by the caller. + **/ +s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, + u32 timeout) +{ + u32 hicr, i, fwsts; + u16 dword_len; + + DEBUGFUNC("ixgbe_hic_unlocked"); + + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_EN)) { + DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if (length % sizeof(u32)) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, IXGBE_CPU_TO_LE32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < timeout; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + msec_delay(1); + } + + /* Check command completion */ + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer; + u16 buf_len; + s32 status; + u32 bi; + u32 dword_len; + + DEBUGFUNC("ixgbe_host_interface_command"); + + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, buffer, length, timeout); + if (status) + goto rel_out; + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]); + } + + /* + * If there is any thing in data position pull it in + * Read Flash command requires reading buffer length from + * two byes instead of one byte + */ + if (resp->cmd == 0x30) { + for (; bi < dword_len + 2; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) + & 0xF00) | resp->buf_len; + hdr_size += (2 << 2); + } else { + buf_len = resp->buf_len; + } + if (!buf_len) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]); + } + +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: unused + * @driver_ver: unused + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, + const char *driver_ver) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); + UNREFERENCED_2PARAMETER(len, driver_ver); + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* ixgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* fall through - configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * + * The 82599 and x540 MACs can experience issues if TX work is still pending + * when a reset occurs. This function prevents this by flushing the PCIe + * buffers on the system. + **/ +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) +{ + u32 gcr_ext, hlreg0, i, poll; + u16 value; + + /* + * If double reset is not requested then all transactions should + * already be clear and as such there is no work to do + */ + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) + return; + + /* + * Set loopback enable to prevent any transmits from being sent + * should the link come up. This assumes that the RXCTRL.RXEN bit + * has already been cleared. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + + /* Wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(3); + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + +out: + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); + usec_delay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); +} + +STATIC const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA +}; +STATIC const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, + IXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Returns the thermal sensor data structure + **/ +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); + + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); + if (status) + goto out; + + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); + if (status) + goto out; + + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) + goto out; + + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) { + status = hw->phy.ops.read_i2c_byte(hw, + ixgbe_emc_temp_data[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + if (status) + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 offset; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); + + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + offset = IXGBE_ETS_CFG; + if (hw->eeprom.ops.read(hw, offset, &ets_offset)) + goto eeprom_err; + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + offset = ets_offset; + if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) + goto eeprom_err; + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) + return IXGBE_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + + for (i = 0; i < num_sensors; i++) { + offset = ets_offset + 1 + i; + if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + offset); + continue; + } + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + + if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; + +eeprom_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_get_orom_version - Return option ROM from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. + **/ +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; + + nvm_ver->or_valid = false; + /* Option Rom may or may not be present. Start with pointer */ + hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); + + /* make sure offset is valid */ + if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); + + /* option rom exists and is valid */ + if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || + eeprom_cfg_blkl == NVM_VER_INVALID || + eeprom_cfg_blkh == NVM_VER_INVALID) + return; + + nvm_ver->or_valid = true; + nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; + nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | + (eeprom_cfg_blkh >> NVM_OROM_SHIFT); + nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; +} + +/** + * ixgbe_get_oem_prod_version - Return OEM Product version + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. + **/ +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 rel_num, prod_ver, mod_len, cap, offset; + + nvm_ver->oem_valid = false; + hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); + + /* Return is offset to OEM Product Version block is invalid */ + if (offset == 0x0 || offset == NVM_INVALID_PTR) + return; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); + + /* Return if OEM product version block is invalid */ + if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || + (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); + + /* Return if version is invalid */ + if ((rel_num | prod_ver) == 0x0 || + rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) + return; + + nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; + nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; + nvm_ver->oem_release = rel_num; + nvm_ver->oem_valid = true; +} + +/** + * ixgbe_get_etk_id - Return Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * word read errors will return 0xFFFF + **/ +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) +{ + u16 etk_id_l, etk_id_h; + + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) + etk_id_l = NVM_VER_INVALID; + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) + etk_id_h = NVM_VER_INVALID; + + /* The word order for the version format is determined by high order + * word bit 15. + */ + if ((etk_id_h & NVM_ETK_VALID) == 0) { + nvm_ver->etk_id = etk_id_h; + nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); + } else { + nvm_ver->etk_id = etk_id_l; + nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); + } +} + + +/** + * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; +} + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} + +/** + * ixgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + */ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return false; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + + return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); +} + +/** + * ixgbe_mng_enabled - Is the manageability engine enabled? + * @hw: pointer to hardware structure + * + * Returns true if the manageability engine is enabled. + **/ +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.type <= ixgbe_mac_X540) { + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + } + + return true; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = IXGBE_SUCCESS; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 1000ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 10; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS0\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS0\n"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS1\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS1\n"); + goto out; + } +out: + return; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h new file mode 100644 index 000000000..7a31f088c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_common.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#define IXGBE_WRITE_REG64(hw, reg, value) \ + do { \ + IXGBE_WRITE_REG(hw, reg, (u32) value); \ + IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \ + } while (0) +#define IXGBE_REMOVED(a) (0) +struct ixgbe_pba { + u16 word[2]; + u16 *pba_block; +}; + +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); + +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct ixgbe_pba *pba); +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct ixgbe_pba *pba); +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); + +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on, bool vlvf_bypass); +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); + +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]); +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + +extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define IXGBE_EMC_DIODE3_DATA 0x2A +#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); + +void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +#endif /* IXGBE_COMMON */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c new file mode 100644 index 000000000..53def2146 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.c @@ -0,0 +1,704 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + * @bw: bandwidth index by traffic class + * @refill: refill credits index by traffic class + * @max: max credits by traffic class + * @max_frame_size: maximum frame size + */ +s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; + + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = (u16)val; + + max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; + } + + return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits + * @hw: pointer to hardware structure + * @dcb_config: Struct containing DCB settings + * @max_frame_size: Maximum frame size + * @direction: Configuring either Tx or Rx + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config_cee(). + */ +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, + u32 max_frame_size, u8 direction) +{ + struct ixgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = IXGBE_SUCCESS; + /* Initialization values default for Tx settings */ + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (dcb_config == NULL) { + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + (u32)IXGBE_DCB_MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max < min_credit) + credit_max = min_credit; + + if (direction == IXGBE_DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + if (credit_max && (credit_max < + IXGBE_DCB_MIN_TSO_CREDIT) + && (hw->mac.type == ixgbe_mac_82598EB)) + credit_max = IXGBE_DCB_MIN_TSO_CREDIT; + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + +out: + return ret_val; +} + +/** + * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; + + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; + } +} + +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *tsa) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; +} + +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } +out: + return tc; +} + +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *map) +{ + u8 up; + + for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); +} + +/** + * ixgbe_dcb_config - Struct containing DCB settings. + * @dcb_config: Pointer to DCB config structure + * + * This function checks DCB rules for DCB settings. + * The following rules are checked: + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth + * Group must total 100. + * 3. A Traffic Class should not be set to both Link Strict Priority + * and Group Strict Priority. + * 4. Link strict Bandwidth Groups can only have link strict traffic classes + * with zero bandwidth. + */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) +{ + struct ixgbe_dcb_tc_path *p; + s32 ret_val = IXGBE_SUCCESS; + u8 i, j, bw = 0, bw_id; + u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; + bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; + + memset(bw_sum, 0, sizeof(bw_sum)); + memset(link_strict, 0, sizeof(link_strict)); + + /* First Tx, then Rx */ + for (i = 0; i < 2; i++) { + /* Check each traffic class for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + p = &dcb_config->tc_config[j].path[i]; + + bw = p->bwg_percent; + bw_id = p->bwg_id; + + if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + if (p->tsa == ixgbe_dcb_tsa_strict) { + link_strict[i][bw_id] = true; + /* Link strict should have zero bandwidth */ + if (bw) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (!bw) { + /* + * Traffic classes without link strict + * should have non-zero bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + bw_sum[i][bw_id] += bw; + } + + bw = 0; + + /* Check each bandwidth group for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { + bw += dcb_config->bw_percentage[i][j]; + /* + * Sum of bandwidth percentages of all traffic classes + * within a Bandwidth Group must total 100 except for + * link strict group (zero bandwidth). + */ + if (link_strict[i][j]) { + if (bw_sum[i][j]) { + /* + * Link strict group should have zero + * bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && + bw_sum[i][j] != 0) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + + if (bw != IXGBE_DCB_BW_PERCENT) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + +err_config: + + return ret_val; +} + +/** + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, + tsa, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwgid, tsa); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwgid, tsa, + map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_pfc_cee - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tc_stats_82598(hw); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_hw_config_cee - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, + refill, max, bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_82599(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, + refill, max, bwgid, + tsa, map); + + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + break; + default: + break; + } + + if (!ret && dcb_config->pfc_mode_enable) { + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) +{ + int ret = IXGBE_ERR_PARAM; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; +} + +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + break; + default: + break; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h new file mode 100644 index 000000000..c2a1013ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_DCB_H_ +#define _IXGBE_DCB_H_ + +#include "ixgbe_type.h" + +/* DCB defines */ +/* DCB credit calculation defines */ +#define IXGBE_DCB_CREDIT_QUANTUM 64 +#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) + +/* 513 for 32KB TSO packet */ +#define IXGBE_DCB_MIN_TSO_CREDIT \ + ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) + +/* DCB configuration defines */ +#define IXGBE_DCB_MAX_USER_PRIORITY 8 +#define IXGBE_DCB_MAX_BW_GROUP 8 +#define IXGBE_DCB_BW_PERCENT 100 + +#define IXGBE_DCB_TX_CONFIG 0 +#define IXGBE_DCB_RX_CONFIG 1 + +/* DCB capability defines */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +struct ixgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum ixgbe_dcb_tsa { + ixgbe_dcb_tsa_ets = 0, + ixgbe_dcb_tsa_group_strict_cee, + ixgbe_dcb_tsa_strict +}; + +/* Traffic class bandwidth allocation per direction */ +struct ixgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ +}; + +enum ixgbe_dcb_pfc { + ixgbe_dcb_pfc_disabled = 0, + ixgbe_dcb_pfc_enabled, + ixgbe_dcb_pfc_enabled_txonly, + ixgbe_dcb_pfc_enabled_rxonly +}; + +/* Traffic class configuration */ +struct ixgbe_dcb_tc_config { + struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +enum ixgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct ixgbe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { + struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct ixgbe_dcb_support support; + struct ixgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; + + enum ixgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; +}; + +/* DCB driver APIs */ + +/* DCB rule checking */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); + +/* DCB credits calculation */ +s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *, u32, u8); + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +/* DCB unpack routines */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); +#endif /* _IXGBE_DCB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c new file mode 100644 index 000000000..bb309e28f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.c @@ -0,0 +1,343 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); + +#if 0 + /* Can we get rid of these?? Consequently, getting rid + * of the tc_stats structure. + */ + tc_stats_array[up]->in_overflow_discards = 0; + tc_stats_array[up]->out_overflow_discards = 0; +#endif + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *tsa) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + /* Enable Arbiter */ + reg &= ~IXGBE_RMCS_ARBDIS; + /* Enable Receive Recycle within the BWG */ + reg |= IXGBE_RMCS_RRM; + /* Enable Deficit Fixed Priority arbitration*/ + reg |= IXGBE_RMCS_DFP; + + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); + } + + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= IXGBE_RDRXCTL_RDMTS_1_2; + reg |= IXGBE_RDRXCTL_MPBEN; + reg |= IXGBE_RDRXCTL_MCEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* Make sure there is enough descriptors before arbitration */ + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + + /* Enable arbiter */ + reg &= ~IXGBE_DPMCS_ARBDIS; + reg |= IXGBE_DPMCS_TSOEF; + + /* Configure Max TSO packet size 34KB including payload and headers */ + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDTQ2TCCR_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + u32 reg; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + /* Enable Data Plane Arbiter */ + reg &= ~IXGBE_PDPMCS_ARBDIS; + /* Enable DFP and Transmit Recycle Mode */ + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDPT2TCCR_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); + } + + /* Enable Tx packet buffer division */ + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 fcrtl, reg; + u8 i; + + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); + + if (pfc_en) + reg |= IXGBE_FCTRL_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + continue; + } + + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } + + /* Configure pause time */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } + /* Transmit Queues stats setting - 4 queues per statistics reg*/ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @link_speed: unused + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tc_stats_82598(hw); + + + return IXGBE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h new file mode 100644 index 000000000..8f3688137 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82598.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_DCB_82598_H_ +#define _IXGBE_DCB_82598_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); +#endif /* _IXGBE_DCB_82958_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c new file mode 100644 index 000000000..04e0d1fb7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c @@ -0,0 +1,581 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes (read low first to prevent missed carry) */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); + stats->qbtc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes (read low first to prevent missed carry) */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); + stats->qbrc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); + + /* Received Dropped Packet */ + stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTDT2C_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTPT2C_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * @map: priority to tc assignments indexed by priority + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) +{ + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg |= IXGBE_MFLCN_DPF; + + /* + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be + * enabled. + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + if (hw->mac.type >= ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) + reg |= IXGBE_MFLCN_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; + } + + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { + if ((map[j] == i) && (pfc_en & (1 << j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg = 0; + u8 i = 0; + u8 tc_count = 8; + bool vt_mode = false; + + if (dcb_config != NULL) { + tc_count = dcb_config->num_tcs.pg_tcs; + vt_mode = dcb_config->vt_mode; + } + + if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) + return IXGBE_ERR_PARAM; + + if (tc_count == 8 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + if (i % 8 > 3) + /* In 4 TC mode, odd 16-queue ranges are + * not used. + */ + continue; + reg = 0x01010101 * (i / 8); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 64, 32, 16, 16. + */ + for (i = 0; i < 32; i++) { + if (i < 16) + reg = 0x00000000; + else if (i < 24) + reg = 0x01010101; + else if (i < 28) + reg = 0x02020202; + else + reg = 0x03030303; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == true) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_82599 - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg; + u32 q; + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 8) { + /* Enable DCB for Rx with 8 TCs */ + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case 0: + case IXGBE_MRQC_RT4TCEN: + /* RSS disabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + break; + case IXGBE_MRQC_RSSEN: + case IXGBE_MRQC_RTRSS4TCEN: + /* RSS enabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + break; + default: + /* + * Unsupported value, assume stale data, + * overwrite no RSS + */ + ASSERT(0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 4) { + /* We support both VT-on and VT-off with 4 TCs. */ + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; + else + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; + } + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else { + /* We support both VT-on and VT-off with 4 TCs. */ + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @link_speed: unused + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @tsa: transmission selection algorithm indexed by traffic class + * @map: priority to tc assignments indexed by priority + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, + map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + + return IXGBE_SUCCESS; +} + diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h new file mode 100644 index 000000000..7bd1d6a32 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_DCB_82599_H_ +#define _IXGBE_DCB_82599_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ + +/* BCN register definitions */ +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 + +#define IXGBE_RTTBCNCR_MNG_CMTGI 0x00000001 +#define IXGBE_RTTBCNCR_MGN_BCNA_MODE 0x00000002 +#define IXGBE_RTTBCNCR_RSV7_11_SHIFT 5 +#define IXGBE_RTTBCNCR_G 0x00000400 +#define IXGBE_RTTBCNCR_I 0x00000800 +#define IXGBE_RTTBCNCR_H 0x00001000 +#define IXGBE_RTTBCNCR_VER_SHIFT 14 +#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT 16 + +#define IXGBE_RTTBCNACL_SMAC_L_SHIFT 16 + +#define IXGBE_RTTBCNTG_BCNA_MODE 0x80000000 + +#define IXGBE_RTTBCNRTT_TS_SHIFT 3 +#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT 16 + +#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL 0x00000002 +#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT 2 +#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT 16 +#define IXGBE_RTTBCNRD_DRIFT_ENA 0x80000000 + + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, + u8 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, + u8 *, u8 *); +#endif /* _IXGBE_DCB_82959_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c new file mode 100644 index 000000000..6005c4ac9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.c @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_vf.h" +#include "ixgbe_hv_vf.h" + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @mc_addr_list: unused + * @mc_addr_count: unused + * @next: unused + * @clear: unused + */ +static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @xcast_mode: unused + */ +static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + UNREFERENCED_2PARAMETER(hw, xcast_mode); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + * @hw: unused + * @vlan: unused + * @vind: unused + * @vlan_on: unused + * @vlvf_bypass: unused + */ +static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + UNREFERENCED_3PARAMETER(hw, index, addr); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_PARAMETER(hw); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind) +{ + UNREFERENCED_5PARAMETER(hw, index, addr, vlan, vind); + + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; +} + +/** + * Hyper-V variant; there is no mailbox communication. + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: unused + * + */ +static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + DELAY(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Reserved for pre-x550 devices */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) +{ + UNREFERENCED_1PARAMETER(hw); + + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return IXGBE_SUCCESS; +} + +/** + * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) +{ + /* Set defaults for VF then override applicable Hyper-V + * specific functions + */ + ixgbe_init_ops_vf(hw); + + hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; + hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; + hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; + hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; + hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; + hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; + hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; + + return IXGBE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h new file mode 100644 index 000000000..dd2e1eee4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_hv_vf.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_HV_VF_H_ +#define _IXGBE_HV_VF_H_ + +#include "ixgbe_type.h" + +s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw); + +#endif /* _IXGBE_HV_VF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c new file mode 100644 index 000000000..13bdb5f68 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.c @@ -0,0 +1,740 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_type.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_mbx"); + + if (size > mbx->size) { + ret_val = IXGBE_ERR_MBX; + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_init_mbx_ops_generic - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; +} + +/** + * ixgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = IXGBE_SUCCESS; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_msg_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_ack_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_rst_vf"); + + if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = IXGBE_SUCCESS; + + return ret_val; +} + +/** + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_vf; + mbx->ops.write = ixgbe_write_mbx_vf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; + + if (mbvficr & mask) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + } + + return ret_val; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_msg_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_ack_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst_pf"); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + ret_val = IXGBE_SUCCESS; + else + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF%d", vf_number); + + + return ret_val; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_pf; + mbx->ops.write = ixgbe_write_mbx_pf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h new file mode 100644 index 000000000..1a45e49c2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_mbx.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, + IXGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); + +#endif /* _IXGBE_MBX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h new file mode 100644 index 000000000..dc712b7c0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_OS_H_ +#define _IXGBE_OS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../ixgbe_logs.h" +#include "../ixgbe_bypass_defines.h" + +#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x") + +#define DELAY(x) rte_delay_us_sleep(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define ERROR_REPORT1(e, S, args...) DEBUGOUT(S, ##args) +#define ERROR_REPORT2(e, S, args...) DEBUGOUT(S, ##args) +#define ERROR_REPORT3(e, S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 + +#define false 0 +#define true 1 +#define min(a,b) RTE_MIN(a,b) + +#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args) + +/* Bunch of defines for shared code bogosity */ +#define UNREFERENCED_PARAMETER(_p) +#define UNREFERENCED_1PARAMETER(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) +#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) + +/* Shared code error reporting */ +enum { + IXGBE_ERROR_SOFTWARE, + IXGBE_ERROR_POLLING, + IXGBE_ERROR_INVALID_STATE, + IXGBE_ERROR_UNSUPPORTED, + IXGBE_ERROR_ARGUMENT, + IXGBE_ERROR_CAUTION, +}; + +#define STATIC static +#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i) +#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i) +#define IXGBE_CPU_TO_LE16(_i) rte_cpu_to_le_16(_i) +#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i) +#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i) +#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i) +#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i) +#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i) +#define IXGBE_BE32_TO_CPU(_i) rte_be_to_cpu_32(_i) + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; + +#define mb() rte_mb() +#define wmb() rte_wmb() +#define rmb() rte_rmb() + +#define IOMEM + +#define prefetch(x) rte_prefetch0(x) + +#define IXGBE_PCI_REG(reg) rte_read32(reg) + +static inline uint32_t ixgbe_read_addr(volatile void* addr) +{ + return rte_le_to_cpu_32(IXGBE_PCI_REG(addr)); +} + +#define IXGBE_PCI_REG_WRITE(reg, value) \ + rte_write32((rte_cpu_to_le_32(value)), reg) + +#define IXGBE_PCI_REG_WRITE_RELAXED(reg, value) \ + rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) + +#define IXGBE_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +/* Not implemented !! */ +#define IXGBE_READ_PCIE_WORD(hw, reg) 0 +#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#define IXGBE_READ_REG(hw, reg) \ + ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg))) + +#define IXGBE_WRITE_REG(hw, reg, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value)) + +#define IXGBE_READ_REG_ARRAY(hw, reg, index) \ + IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms) \ +do { \ + uint32_t cnt = poll_ms; \ + IXGBE_WRITE_REG(hw, (reg), (val)); \ + while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--)) \ + rte_delay_ms(1); \ +} while (0) + +#endif /* _IXGBE_OS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c new file mode 100644 index 000000000..a8243fa97 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c @@ -0,0 +1,2696 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); + +/** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + */ +STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + */ +STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + */ +STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 3; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ixgbe_init_phy_ops_generic"); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_generic; + phy->ops.reset = ixgbe_reset_phy_generic; + phy->ops.read_reg = ixgbe_read_phy_reg_generic; + phy->ops.write_reg = ixgbe_write_phy_reg_generic; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; + phy->ops.setup_link = ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = ixgbe_identify_module_generic; + phy->sfp_type = ixgbe_sfp_type_unknown; + phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; + phy->ops.write_i2c_byte_unlocked = + ixgbe_write_i2c_byte_generic_unlocked; + phy->ops.check_overtemp = ixgbe_tn_check_overtemp; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_probe_phy - Probe a single address for a PHY + * @hw: pointer to hardware structure + * @phy_addr: PHY address to probe + * + * Returns true if PHY found + */ +static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) +{ + u16 ext_ability = 0; + + if (!ixgbe_validate_phy_addr(hw, phy_addr)) { + DEBUGOUT1("Unable to validate PHY address 0x%04X\n", + phy_addr); + return false; + } + + if (ixgbe_get_phy_id(hw)) + return false; + + hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & + (IXGBE_MDIO_PHY_10GBASET_ABILITY | + IXGBE_MDIO_PHY_1000BASET_ABILITY)) + hw->phy.type = ixgbe_phy_cu_unknown; + else + hw->phy.type = ixgbe_phy_generic; + } + + return true; +} + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u16 phy_addr; + + DEBUGFUNC("ixgbe_identify_phy_generic"); + + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + + if (hw->phy.type != ixgbe_phy_unknown) + return IXGBE_SUCCESS; + + if (hw->phy.nw_mng_if_sel) { + phy_addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + if (ixgbe_probe_phy(hw, phy_addr)) + return IXGBE_SUCCESS; + else + return IXGBE_ERR_PHY_ADDR_INVALID; + } + + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_probe_phy(hw, phy_addr)) { + status = IXGBE_SUCCESS; + break; + } + } + + /* Certain media types do not have a phy so an address will not + * be found and the code will take this path. Caller has to + * decide if it is an error or not. + */ + if (status != IXGBE_SUCCESS) + hw->phy.addr = 0; + + return status; +} + +/** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + DEBUGFUNC("ixgbe_check_reset_blocked"); + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * @phy_addr: PHY address + * + **/ +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + DEBUGFUNC("ixgbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id); + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + DEBUGFUNC("ixgbe_get_phy_id"); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + + if (status == IXGBE_SUCCESS) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", + phy_id_high, phy_id_low); + + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information + * + **/ +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + DEBUGFUNC("ixgbe_get_phy_type_from_id"); + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X550_PHY_ID2: + case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + case X557_PHY_ID: + case X557_PHY_ID2: + phy_type = ixgbe_phy_x550em_ext_t; + break; + case IXGBE_M88E1500_E_PHY_ID: + case IXGBE_M88E1543_E_PHY_ID: + phy_type = ixgbe_phy_ext_1g_t; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_reset_phy_generic"); + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + goto out; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msec_delay(100); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) + return status; + + if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + usec_delay(2); + break; + } + } else { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + &ctrl); + if (status != IXGBE_SUCCESS) + return status; + + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { + usec_delay(2); + break; + } + } + } + + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY reset polling failed to complete.\n"); + } + +out: + return status; +} + +/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); + DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); + DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); + return IXGBE_ERR_PHY; + } + + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_read_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, gssr); + + return status; +} + +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_write_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_generic"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && + (speed & IXGBE_LINK_SPEED_10GB_FULL)) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (hw->mac.type == ixgbe_mac_X550) { + /* Set or unset auto-negotiation 5G advertisement */ + autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && + (speed & IXGBE_LINK_SPEED_5GB_FULL)) + autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; + + /* Set or unset auto-negotiation 2.5G advertisement */ + autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_2_5GB_FULL) && + (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) + autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + } + + /* Set or unset auto-negotiation 1G advertisement */ + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && + (speed & IXGBE_LINK_SPEED_1GB_FULL)) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | + IXGBE_MII_100BASE_T_ADVERTISE_HALF); + if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && + (speed & IXGBE_LINK_SPEED_100_FULL)) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (speed & IXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; + + /* Setup link based on the new speed settings */ + ixgbe_setup_phy_link(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy + * @hw: pointer to hardware structure + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + **/ +static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) +{ + s32 status; + u16 speed_ability; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); + if (status) + return status; + + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: + break; + } + + return status; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + **/ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); + + *autoneg = true; + if (!hw->phy.speeds_supported) + status = ixgbe_get_copper_speeds_supported(hw); + + *speed = hw->phy.speeds_supported; + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: current link speed + * @link_up: true is link is up, false otherwise + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_check_phy_link_tnx"); + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_tnx"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_reset_phy_nl"); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) + break; + msec_delay(10); + } + + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + if (ret_val) + goto err_eeprom; + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + DEBUGOUT1("DELAY: %d MS\n", edata); + msec_delay(edata); + break; + case IXGBE_DATA_NL: + DEBUGOUT("DATA:\n"); + data_offset++; + ret_val = hw->eeprom.ops.read(hw, data_offset, + &phy_offset); + if (ret_val) + goto err_eeprom; + data_offset++; + for (i = 0; i < edata; i++) { + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; + hw->phy.ops.write_reg(hw, phy_offset, + IXGBE_TWINAX_DEV, eword); + DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + DEBUGOUT("CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + DEBUGOUT("EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + DEBUGOUT("SOL\n"); + } else { + DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: + DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + } + +out: + return ret_val; + +err_eeprom: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_identify_module_generic - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + DEBUGFUNC("ixgbe_identify_module_generic"); + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + status = ixgbe_identify_sfp_module_generic(hw); + break; + + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; + + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + DEBUGFUNC("ixgbe_identify_sfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELHA_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lha_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lha_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = IXGBE_SUCCESS; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = IXGBE_SUCCESS; + goto out; + } + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, + "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. " + "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. " + "Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current SFP. + */ +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); + + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return physical_layer; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; + break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + + return physical_layer; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = false; + + DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + hw->phy.id = identifier; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = true; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = true; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, + "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. " + "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. " + "Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_lx_core0 || + sfp_type == ixgbe_sfp_type_1g_lha_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0 || + sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_lx_core1 || + sfp_type == ixgbe_sfp_type_1g_lha_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1 || + sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + DEBUGOUT("SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + DEBUGOUT("No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; + +err_phy: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", *list_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); + + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @sff8472_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); + + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_is_sfp_probe - Returns true if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return true; + return false; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + bool nack = 1; + *data = 0; + + DEBUGFUNC("ixgbe_read_i2c_byte_generic"); + + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; + + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + } + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + return status; +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) +{ + s32 status; + u32 max_retry = 1; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != + IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. + **/ +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_i2c_start"); + + i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. + **/ +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status = IXGBE_SUCCESS; + s32 i; + u32 i2cctl; + bool bit; + + DEBUGFUNC("ixgbe_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != IXGBE_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("ixgbe_get_i2c_ack"); + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); + + usec_delay(1); + if (!ack) + break; + } + + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_clock_in_i2c_bit"); + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_clock_out_i2c_bit"); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == IXGBE_SUCCESS) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(IXGBE_I2C_T_LOW); + } else { + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "I2C data was not set to %X\n", data); + } + + return status; +} + +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. + **/ +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + DEBUGFUNC("ixgbe_raise_i2c_clk"); + + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + } + + for (i = 0; i < timeout; i++) { + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + usec_delay(IXGBE_I2C_T_RISE); + + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + break; + } +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. + **/ +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("ixgbe_lower_i2c_clk"); + + *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. + **/ +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_i2c_data"); + + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + else + *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); + *i2cctl &= ~data_oe_bit; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + if (!data) /* Can't verify data in this case */ + return IXGBE_SUCCESS; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "Error - I2C data was not set to %X.\n", + data); + } + + return status; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. + **/ +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + bool data; + + DEBUGFUNC("ixgbe_get_i2c_data"); + + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(IXGBE_I2C_T_FALL); + } + + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + data = 1; + else + data = 0; + + return data; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl; + u32 i; + + DEBUGFUNC("ixgbe_i2c_bus_clear"); + + ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_tn_check_overtemp"); + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; + ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); +out: + return status; +} + +/** + * ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + if (!on && ixgbe_mng_present(hw)) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h new file mode 100644 index 000000000..a06c3be17 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_phy.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_1GBASELHA_CAPABLE 0x10 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F +#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ +#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander address */ +#define IXGBE_PE_OUTPUT 1 /* Output register offset */ +#define IXGBE_PE_CONFIG 3 /* Config register offset */ +#define IXGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#ifndef IXGBE_SFP_DETECT_RETRIES +#define IXGBE_SFP_DETECT_RETRIES 10 + +#endif /* IXGBE_SFP_DETECT_RETRIES */ +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val, bool lock); +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val, bool lock); +#endif /* _IXGBE_PHY_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h new file mode 100644 index 000000000..15e937010 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_type.h @@ -0,0 +1,4367 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - IXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - IXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occurred, or a failure to obtain a lock, + * or failure to receive data within the time limit. + * + * - IXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - IXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - IXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - IXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ixgbe_osdep.h" + +/* Override this by setting IOMEM in your ixgbe_osdep.h header */ + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_82599_VF_HV 0x152E +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X540_VF_HV 0x1530 +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +/* Placeholder value, pending official value. */ +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA +#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 +#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + +#define IXGBE_CAT(r, m) IXGBE_##r##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA +#define IXGBE_FLA_X550EM_a 0x15F68 +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) + +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) + +#define IXGBE_PHYDBG 0x10218 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A + +#define IXGBE_MAX_SENSORS 3 + +struct ixgbe_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; +}; + + +#define NVM_OROM_OFFSET 0x17 +#define NVM_OROM_BLK_LOW 0x83 +#define NVM_OROM_BLK_HI 0x84 +#define NVM_OROM_PATCH_MASK 0xFF +#define NVM_OROM_SHIFT 8 + +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETK_VALID 0x8000 +#define NVM_INVALID_PTR 0xFFFF +#define NVM_VER_SIZE 32 /* version sting size */ + +struct ixgbe_nvm_version { + u32 etk_id; + u8 nvm_major; + u16 nvm_minor; + u8 nvm_id; + + bool oem_valid; + u8 oem_major; + u8 oem_minor; + u16 oem_release; + + bool or_valid; + u8 or_major; + u16 or_build; + u8 or_patch; + +}; + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +/* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RXPBSIZE_MASK 0x000FFC00 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +/* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +/* 16 of these (0-15) */ +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ + +/* masks for accessing VXLAN and GENEVE UDP ports */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +/* Ext Flexible Host Filter Table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) + +/* Four Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +/* Six Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 +#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ + +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ + +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 + +/* BCN (for DCB) Registers */ +#define IXGBE_RTTBCNRS 0x04988 +#define IXGBE_RTTBCNCR 0x08B00 +#define IXGBE_RTTBCNACH 0x08B04 +#define IXGBE_RTTBCNACL 0x08B08 +#define IXGBE_RTTBCNTG 0x04A90 +#define IXGBE_RTTBCNIDX 0x08B0C +#define IXGBE_RTTBCNCP 0x08B10 +#define IXGBE_RTFRTIMER 0x08B14 +#define IXGBE_RTTBCNRTT 0x05150 +#define IXGBE_RTTBCNRD 0x0498C + +/* FCoE DMA Context Registers */ +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15F14 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ +#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_TS_ENABLED 0x1 +#define IXGBE_FWSM_FW_MODE_PT 0x4 +#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE (1 << 5) +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT (1 << 15) + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 +#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 +#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 +#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 +#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 +#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_GSCL_1_X550 0x11800 +#define IXGBE_GSCL_2_X550 0x11804 +#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550 0x11820 +#define IXGBE_GSCN_1_X550 0x11824 +#define IXGBE_GSCN_2_X550 0x11828 +#define IXGBE_GSCN_3_X550 0x1182C +#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) + +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) + +#define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) + +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 +#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 +#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 +#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_GSCL_5_X550 0x11810 +#define IXGBE_GSCL_6_X550 0x11814 +#define IXGBE_GSCL_7_X550 0x11818 +#define IXGBE_GSCL_8_X550 0x1181C +#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_TS_DIS 0x02 + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define X550_PHY_ID2 0x01540223 +#define X550_PHY_ID3 0x01540221 +#define X557_PHY_ID 0x01540240 +#define X557_PHY_ID2 0x01540250 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 +#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_MAX_PACKET_BUFFERS 8 + +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ +#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +/* Deficit Fixed Prio ena */ +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ +#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 +#define IXGBE_FREE_SPACE_PTR 0X3E + +/* External Thermal Sensor Config */ +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 + +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 + +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F +#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12 +#define IXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF +#define IXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF +#define IXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF +#define IXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF +#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2 +#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3 +#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6 +#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7 +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ + +/* FW header offset */ +#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_X540_FW_MODULE_MASK 0x7FFF +/* 4KB multiplier */ +#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 +/* version word 2 (month & day) */ +#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 +/* version word 3 (silicon compatibility & year) */ +#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 +/* version word 4 (major & minor numbers) */ +#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ + +#define IXGBE_TSIM_SYS_WRAP 0x00000001 +#define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 +#define IXGBE_QDE_READ 0x00020000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 + +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 +#define IXGBE_FDIR_DROP_QUEUE 127 + + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0xA +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 +#define FW_PHY_ACT_REQ_CMD 5 +#define FW_PHY_ACT_DATA_COUNT 4 +#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) +#define FW_PHY_ACT_INIT_PHY 1 +#define FW_PHY_ACT_SETUP_LINK 2 +#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) +#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) +#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) +#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) +#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) +#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) +#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) +#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) +#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) +#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) +#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 +#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) +#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u +#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u +#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) +#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) +#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) +#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) +#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) +#define FW_PHY_ACT_GET_LINK_INFO 3 +#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) +#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) +#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) +#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) +#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) +#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) +#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) +#define FW_PHY_ACT_FORCE_LINK_DOWN 4 +#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) +#define FW_PHY_ACT_PHY_SW_RESET 5 +#define FW_PHY_ACT_PHY_HW_RESET 6 +#define FW_PHY_ACT_GET_PHY_INFO 7 +#define FW_PHY_ACT_UD_2 0x1002 +#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) +#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) +#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) +#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) +#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) +#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) +#define FW_PHY_ACT_RETRIES 50 +#define FW_PHY_INFO_SPEED_MASK 0xFFFu +#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u +#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + +/* Host Interface Command Structures */ + +#ifdef C99 +#pragma pack(push, 1) +#else +#pragma pack (1) +#endif /* C99 */ + +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +struct ixgbe_hic_drv_info2 { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; +}; + +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + +struct ixgbe_hic_phy_activity_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad; + __le16 activity_id; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +struct ixgbe_hic_phy_activity_resp { + struct ixgbe_hic_hdr hdr; + __be32 data[FW_PHY_ACT_DATA_COUNT]; +}; + +#ifdef C99 +#pragma pack(pop) +#else +#pragma pack() +#endif /* C99 */ + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ +/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0002 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u64 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 +#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 +#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[14]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(_IDX), + IXGBE_MVALS_IDX_LIMIT +}; + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_82599_vf, + ixgbe_mac_X540, + ixgbe_mac_X540_vf, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_mac_X550EM_a, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_mac_X550EM_a_vf, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_xfi, + ixgbe_phy_x550em_ext_t, + ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ixgbe_phy_sgmii, + ixgbe_phy_fw, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_1g_lx_core0 = 13, + ixgbe_sfp_type_1g_lx_core1 = 14, + ixgbe_sfp_type_1g_lha_core0 = 15, + ixgbe_sfp_type_1g_lha_core1 = 16, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_qsfp, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u8 lan_id; + u16 instance_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + void (*enable_relaxed_ordering)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u64 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct ixgbe_hw *); + s32 (*enable_sec_rx_path)(struct ixgbe_hw *); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + s32 (*init_led_link_act)(struct ixgbe_hw *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, + bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + s32 (*update_xcast_mode)(struct ixgbe_hw *, int); + s32 (*set_rlpml)(struct ixgbe_hw *, u16); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); + void (*fc_autoneg)(struct ixgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + const char *); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + void (*disable_mdd)(struct ixgbe_hw *hw); + void (*enable_mdd)(struct ixgbe_hw *hw); + void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); + void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); + bool (*fw_recovery_mode)(struct ixgbe_hw *hw); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + void (*i2c_bus_clear)(struct ixgbe_hw *); + s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 *value); + s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 value); +}; + +struct ixgbe_link_operations { + s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val); + s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val); +}; + +struct ixgbe_link_info { + struct ixgbe_link_operations ops; + u8 addr; +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u8 san_mac_rar_index; + bool get_link_status; + u32 orig_autoc2; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct ixgbe_dmac_config dmac_config; + bool set_lben; + u32 max_link_up_time; + u8 led_link_act; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; + ixgbe_link_speed eee_speeds_supported; + ixgbe_link_speed eee_speeds_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + void (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 IOMEM *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + const u32 *mvals; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + bool need_crosstalk_fix; +}; + +#define ixgbe_call_func(hw, func, params, error) \ + (func != NULL) ? func params : error + + +/* Error Codes */ +#define IXGBE_SUCCESS 0 +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 +#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 +#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV_MASK (3 << 6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) +#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) +#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) +#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) +#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) +#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) + +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) + +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 + +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) +#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) +#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c new file mode 100644 index 000000000..7f69ece10 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c @@ -0,0 +1,755 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + + +#include "ixgbe_api.h" +#include "ixgbe_type.h" +#include "ixgbe_vf.h" + +#ifndef IXGBE_VFWRITE_REG +#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG +#endif +#ifndef IXGBE_VFREAD_REG +#define IXGBE_VFREAD_REG IXGBE_READ_REG +#endif + +/** + * ixgbe_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) +{ + /* MAC */ + hw->mac.ops.init_hw = ixgbe_init_hw_vf; + hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; + hw->mac.ops.start_hw = ixgbe_start_hw_vf; + /* Cannot clear stats on VF */ + hw->mac.ops.clear_hw_cntrs = NULL; + hw->mac.ops.get_media_type = NULL; + hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; + hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; + hw->mac.ops.get_bus_info = NULL; + hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version; + + /* Link */ + hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; + hw->mac.ops.check_link = ixgbe_check_mac_link_vf; + hw->mac.ops.get_link_capabilities = NULL; + + /* RAR, Multicast, VLAN */ + hw->mac.ops.set_rar = ixgbe_set_rar_vf; + hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; + hw->mac.ops.init_rx_addrs = NULL; + hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; + hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode; + hw->mac.ops.enable_mc = NULL; + hw->mac.ops.disable_mc = NULL; + hw->mac.ops.clear_vfta = NULL; + hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; + hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf; + + hw->mac.max_tx_queues = 1; + hw->mac.max_rx_queues = 1; + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf; + + return IXGBE_SUCCESS; +} + +/* ixgbe_virt_clr_reg - Set register to default (power on) state. + * @hw: pointer to hardware structure + */ +static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) +{ + int i; + u32 vfsrrctl; + u32 vfdca_rxctrl; + u32 vfdca_txctrl; + + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ + vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* DCA_RXCTRL default value */ + vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + + /* DCA_TXCTRL default value */ + vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_WRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + for (i = 0; i < 7; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbe_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts. + **/ +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("ixgbevf_reset_hw_vf"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + + DEBUGOUT("Issuing a function level reset to MAC\n"); + + IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + msec_delay(50); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* Reset VF registers to initial values */ + ixgbe_virt_clr_reg(hw); + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* + * set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = mbx->ops.read_posted(hw, msgbuf, + IXGBE_VF_PERMADDR_MSG_LEN, 0); + if (ret_val) + return ret_val; + + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return ret_val; +} + +/** + * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + /* Clear packet split and pool config */ + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +STATIC s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, + u32 *retmsg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 retval = mbx->ops.write_posted(hw, msg, size, 0); + + if (retval) + return retval; + + return mbx->ops.read_posted(hw, retmsg, size, 0); +} + +/** + * ixgbe_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + **/ +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); + + memset(msgbuf, 0, 12); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { + ixgbe_get_mac_addr_vf(hw, hw->mac.addr); + return IXGBE_ERR_MBX; + } + + return ret_val; +} + +/** + * ixgbe_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @next: caller supplied function to return next address in list + * @clear: unused + * + * Updates the Multicast Table Array. + **/ +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 vector; + u32 cnt, i; + u32 vmdq; + + UNREFERENCED_1PARAMETER(clear); + + DEBUGFUNC("ixgbe_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); + DEBUGOUT1("Hash value = 0x%03X\n", vector); + vector_list[i] = (u16)vector; + } + + return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0); +} + +/** + * ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + **/ +s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + /* New modes were introduced in 1.3 version */ + if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + /* Fall through */ + case ixgbe_mbox_api_13: + break; + default: + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + u32 msgbuf[2]; + s32 ret_val; + UNREFERENCED_2PARAMETER(vind, vlvf_bypass); + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_SUCCESS; + + return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK); +} + +/** + * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_TX_QUEUES; +} + +/** + * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_RX_QUEUES; +} + +/** + * ixgbe_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * @mac_addr: the MAC address + **/ +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) + mac_addr[i] = hw->mac.perm_addr[i]; + + return IXGBE_SUCCESS; +} + +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + u32 msgbuf[3], msgbuf_chk; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* + * If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + msgbuf_chk = msgbuf[0]; + if (addr) + memcpy(msg_addr, addr, 6); + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + if (!ret_val) { + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_OUT_OF_MEM; + } + + return ret_val; +} + +/** + * ixgbe_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = IXGBE_SUCCESS; + u32 links_reg; + u32 in_msg = 0; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + usec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + s32 retval; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + + retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (retval) + return retval; + if ((msgbuf[0] & IXGBE_VF_SET_LPE) && + (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_MBX; + + return 0; +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; +} + +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support ixgbevf_get_queues */ + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = IXGBE_VF_GET_QUEUES; + msg[1] = msg[2] = msg[3] = msg[4] = 0; + + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* + * if we we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[IXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h new file mode 100644 index 000000000..be58b4f76 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_vf.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_VF_H_ +#define _IXGBE_VF_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +/* DCB define */ +#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +/* define IXGBE_VFPBACL still says TBD in EAS */ +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) + + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); +s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); + +#endif /* __IXGBE_VF_H__ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c new file mode 100644 index 000000000..d65f47c18 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.c @@ -0,0 +1,1034 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +/** + * ixgbe_init_ops_X540 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X540. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X540"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_generic; + phy->ops.reset = NULL; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_X540; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_X540; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X540; + mac->ops.read_analog_reg8 = NULL; + mac->ops.write_analog_reg8 = NULL; + mac->ops.start_hw = ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + mac->ops.check_link = ixgbe_check_mac_link_generic; + + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* + * FWSM register + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* LEDs */ + mac->ops.blink_led_start = ixgbe_blink_led_start_X540; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_X540 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_media_type_X540 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return ixgbe_media_type_copper; +} + +/** + * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + DEBUGFUNC("ixgbe_setup_mac_link_X540"); + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and perform a reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + s32 status; + u32 ctrl, i; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_reset_hw_X540"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + +mac_reset_top: + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Reset polling failed to complete.\n"); + } + msec_delay(100); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_X540"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X540"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i, j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); + + /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the + * checksum itself + */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + goto out; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status; + + DEBUGFUNC("ixgbe_update_flash_X540"); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + + if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + } +out: + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (reg & IXGBE_EEC_FLUDONE) { + status = IXGBE_SUCCESS; + break; + } + msec_delay(5); + } + + if (i == IXGBE_FLUDONE_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Flash update status polling timed out"); + + return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 fwmask = swmask << 5; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; + u32 i; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); + + if (swmask & IXGBE_GSSR_EEP_SM) + hwmask |= IXGBE_GSSR_FLASH_SM; + + /* SW only mask doesn't have FW bit pair */ + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; + if (hw->mac.type >= ixgbe_mac_X550) + timeout = 1000; + + for (i = 0; i < timeout; i++) { + /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) { + DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); + return IXGBE_ERR_SWFW_SYNC; + } + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), + swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_SUCCESS; + } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + } + + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should set the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) { + DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); + return IXGBE_ERR_SWFW_SYNC; + } + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (swfw_sync & (fwmask | hwmask)) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + return IXGBE_SUCCESS; + } + /* If the resource is not released by other SW the SW can assume that + * the other SW malfunctions. In that case the SW should clear all SW + * flags that it does not own and then repeat the whole process once + * again. + */ + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); + return IXGBE_ERR_SWFW_SYNC; + } + ixgbe_release_swfw_sync_semaphore(hw); + DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); + + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); + u32 swfw_sync; + + DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(2); +} + +/** + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW NVM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "REGSMP Software NVM semaphore not granted.\n"); + ixgbe_release_swfw_sync_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + u32 rmask; + + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); + + /* Acquire and release all software resources. */ + rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM; + + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_acquire_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_X540(hw, rmask); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_blink_led_start_X540"); + + if (index > 3) + return IXGBE_ERR_PARAM; + + /* + * Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (link_up == false) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + } + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + if (index > 3) + return IXGBE_ERR_PARAM; + + DEBUGFUNC("ixgbe_blink_led_stop_X540"); + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h new file mode 100644 index 000000000..ba79847d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x540.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool link_up_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); + +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); + +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +#endif /* _IXGBE_X540_H_ */ + diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c new file mode 100644 index 000000000..3de406fd3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.c @@ -0,0 +1,4669 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#include "ixgbe_x550.h" +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); + +/** + * ixgbe_init_ops_X550 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X550. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550"); + + ret_val = ixgbe_init_ops_X540(hw); + mac->ops.dmac_config = ixgbe_dmac_config_X550; + mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; + mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; + mac->ops.setup_eee = NULL; + mac->ops.set_source_address_pruning = + ixgbe_set_source_address_pruning_X550; + mac->ops.set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_X550; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + + mac->ops.disable_mdd = ixgbe_disable_mdd_X550; + mac->ops.enable_mdd = ixgbe_enable_mdd_X550; + mac->ops.mdd_event = ixgbe_mdd_event_X550; + mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; + mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550; + mac->ops.disable_rx = ixgbe_disable_rx_x550; + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->mac.ops.led_on = NULL; + hw->mac.ops.led_off = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + hw->mac.ops.led_on = ixgbe_led_on_t_X550em; + hw->mac.ops.led_off = ixgbe_led_off_t_X550em; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + **/ +STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + **/ +STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + **/ +STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + **/ +STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + **/ +STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + usec_delay(IXGBE_CS4227_RESET_HOLD); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + /* Wait for the reset to complete. */ + msec_delay(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset did not complete."); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status != IXGBE_SUCCESS || + !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 EEPROM did not load successfully."); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u16 value = 0; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status != IXGBE_SUCCESS || + value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(10); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + hw->mac.ops.set_lan_id(hw); + + ixgbe_read_mng_if_sel_x550em(hw); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + return ixgbe_identify_sfp_module_X550em(hw); + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + ixgbe_setup_mux_ctl(hw); + ixgbe_check_cs4227(hw); + /* Fallthrough */ + + case IXGBE_DEV_ID_X550EM_A_SFP_N: + return ixgbe_identify_sfp_module_X550em(hw); + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->phy.type = ixgbe_phy_x550em_xfi; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->phy.type = ixgbe_phy_ext_1g_t; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + hw->phy.type = ixgbe_phy_fw; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; + default: + break; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fw_phy_activity - Perform an activity on a PHY + * @hw: pointer to hardware structure + * @activity: activity to perform + * @data: Pointer to 4 32-bit words of data + */ +s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, + u32 (*data)[FW_PHY_ACT_DATA_COUNT]) +{ + union { + struct ixgbe_hic_phy_activity_req cmd; + struct ixgbe_hic_phy_activity_resp rsp; + } hic; + u16 retries = FW_PHY_ACT_RETRIES; + s32 rc; + u16 i; + + do { + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; + hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); + + rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (rc != IXGBE_SUCCESS) + return rc; + if (hic.rsp.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) + (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); + return IXGBE_SUCCESS; + } + usec_delay(20); + --retries; + } while (retries > 0); + + return IXGBE_ERR_HOST_INTERFACE_COMMAND; +} + +static const struct { + u16 fw_speed; + ixgbe_link_speed phy_speed; +} ixgbe_fw_map[] = { + { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, + { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, + { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, + { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, +}; + +/** + * ixgbe_get_phy_id_fw - Get the phy ID via firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +{ + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + u16 phy_speeds; + u16 phy_id_lo; + s32 rc; + u16 i; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); + if (rc) + return rc; + + hw->phy.speeds_supported = 0; + phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { + if (phy_speeds & ixgbe_fw_map[i].fw_speed) + hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; + } + if (!hw->phy.autoneg_advertised) + hw->phy.autoneg_advertised = hw->phy.speeds_supported; + + hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; + phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; + hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; + if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) + return IXGBE_ERR_PHY_ADDR_INVALID; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_phy_fw - Get PHY type based on firmware command + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +{ + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + return ixgbe_get_phy_id_fw(hw); +} + +/** + * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY + * @hw: pointer to hardware structure + * + * Returns error code + */ +s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + + setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; + return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); +} + +STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +STATIC s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** +* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM"); + + /* Similar to X550 so start there. */ + ret_val = ixgbe_init_ops_X550(hw); + + /* Since this function eventually calls + * ixgbe_init_ops_540 by design, we are setting + * the pointers to NULL explicitly here to overwrite + * the values being set in the x540 function. + */ + /* Thermal sensor not supported in x550EM */ + mac->ops.get_thermal_sensor_data = NULL; + mac->ops.init_thermal_sensor_thresh = NULL; + mac->thermal_sensor_enabled = false; + + /* FCOE not supported in x550EM */ + mac->ops.get_san_mac_addr = NULL; + mac->ops.set_san_mac_addr = NULL; + mac->ops.get_wwn_prefix = NULL; + mac->ops.get_fcoe_boot_status = NULL; + + /* IPsec not supported in x550EM */ + mac->ops.disable_sec_rx_path = NULL; + mac->ops.enable_sec_rx_path = NULL; + + /* AUTOC register is not present in x550EM. */ + mac->ops.prot_autoc_read = NULL; + mac->ops.prot_autoc_write = NULL; + + /* X550EM bus type is internal*/ + hw->bus.type = ixgbe_bus_type_internal; + mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; + + + mac->ops.get_media_type = ixgbe_get_media_type_X550em; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; + mac->ops.reset_hw = ixgbe_reset_hw_X550em; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X550em; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + mac->ops.setup_fc = ixgbe_setup_fc_generic; + else + mac->ops.setup_fc = ixgbe_setup_fc_X550em; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_X550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_fw; + phy->ops.set_phy_power = NULL; + phy->ops.get_firmware_version = NULL; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + mac->ops.setup_fc = NULL; + phy->ops.identify = ixgbe_identify_phy_x550em; + phy->ops.set_phy_power = NULL; + break; + default: + phy->ops.identify = ixgbe_identify_phy_x550em; + } + + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + + return ret_val; +} + +/** + * ixgbe_setup_fw_link - Setup firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +{ + u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + u16 i; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return 0; + + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_rx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + case ixgbe_fc_tx_pause: + setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << + FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; + break; + default: + break; + } + + for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { + if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) + setup[0] |= ixgbe_fw_map[i].fw_speed; + } + setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; + + if (hw->phy.eee_speeds_advertised) + setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); + if (rc) + return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) + return IXGBE_ERR_OVERTEMP; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + */ +static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +{ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_setup_eee_fw - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * This function controls EEE for firmware-based PHY implementations. + */ +static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) +{ + if (!!hw->phy.eee_speeds_advertised == enable_eee) + return IXGBE_SUCCESS; + if (enable_eee) + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + else + hw->phy.eee_speeds_advertised = 0; + return hw->phy.ops.setup_link(hw); +} + +/** +* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_a. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM_a"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + } else { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; + } + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + mac->ops.setup_fc = NULL; + mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; + break; + case ixgbe_media_type_backplane: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; + mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; + break; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; + mac->ops.setup_fc = ixgbe_fc_autoneg_fw; + mac->ops.setup_eee = ixgbe_setup_eee_fw; + hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; + break; + default: + break; + } + + return ret_val; +} + +/** +* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM_x. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_link_info *link = &hw->link; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM_x"); + + /* Start with generic X550EM init */ + ret_val = ixgbe_init_ops_X550EM(hw); + + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; + link->ops.read_link = ixgbe_read_i2c_combined_generic; + link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; + link->ops.write_link = ixgbe_write_i2c_combined_generic; + link->ops.write_link_unlocked = + ixgbe_write_i2c_combined_generic_unlocked; + link->addr = IXGBE_CS4227; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { + mac->ops.setup_fc = NULL; + mac->ops.setup_eee = NULL; + mac->ops.init_led_link_act = NULL; + } + + return ret_val; +} + +/** + * ixgbe_dmac_config_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) +{ + u32 reg, high_pri_tc; + + DEBUGFUNC("ixgbe_dmac_config_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + /* Disable DMA Coalescing if the watchdog timer is 0 */ + if (!hw->mac.dmac_config.watchdog_timer) + goto out; + + ixgbe_dmac_config_tcs_X550(hw); + + /* Configure DMA Coalescing Control Register */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + + /* Set the watchdog timer in units of 40.96 usec */ + reg &= ~IXGBE_DMACR_DMACWT_MASK; + reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; + + reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; + /* If fcoe is enabled, set high priority traffic class */ + if (hw->mac.dmac_config.fcoe_en) { + high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; + reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & + IXGBE_DMACR_HIGH_PRI_TC_MASK); + } + reg |= IXGBE_DMACR_EN_MNG_IND; + + /* Enable DMA coalescing after configuration */ + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_config_tcs_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC. The dmac enable bit must + * be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) +{ + u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; + + DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); + + /* Configure DMA coalescing enabled */ + switch (hw->mac.dmac_config.link_speed) { + case IXGBE_LINK_SPEED_10_FULL: + case IXGBE_LINK_SPEED_100_FULL: + pb_headroom = IXGBE_DMACRXT_100M; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + pb_headroom = IXGBE_DMACRXT_1G; + break; + default: + pb_headroom = IXGBE_DMACRXT_10G; + break; + } + + maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> + IXGBE_MHADD_MFS_SHIFT) / 1024); + + /* Set the per Rx packet buffer receive threshold */ + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { + reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); + reg &= ~IXGBE_DMCTH_DMACRXT_MASK; + + if (tc < hw->mac.dmac_config.num_tcs) { + /* Get Rx PB size */ + rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); + rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> + IXGBE_RXPBSIZE_SHIFT; + + /* Calculate receive buffer threshold in kilobytes */ + if (rx_pb_size > pb_headroom) + rx_pb_size = rx_pb_size - pb_headroom; + else + rx_pb_size = 0; + + /* Minimum of MFS shall be set for DMCTH */ + reg |= (rx_pb_size > maxframe_size_kb) ? + rx_pb_size : maxframe_size_kb; + } + IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_update_tcs_X550 + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enables dmac. + **/ +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + ixgbe_dmac_config_tcs_X550(hw); + + /* Enable DMA coalescing after configuration */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X550"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Returns failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + **/ +STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command = 0; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + usec_delay(10); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register + * of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to write, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to read, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + + if (ret == IXGBE_SUCCESS) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_get_phy_token - Get the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) { + DEBUGOUT1("Issuing host interface command failed with Status = %d\n", + status); + return status; + } + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { + DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", + token_cmd.hdr.cmd_or_resp.ret_status); + return IXGBE_ERR_FW_RESP_INVALID; + } + + DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n"); + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + + DEBUGOUT("Put PHY Token host interface command failed"); + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register + * of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + s32 status; + UNREFERENCED_1PARAMETER(device_type); + + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); + write_cmd.write_data = IXGBE_CPU_TO_BE32(data); + + status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; + s32 status; + UNREFERENCED_1PARAMETER(device_type); + + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); + + status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); + + return status; +} + +/** + * ixgbe_disable_mdd_X550 + * @hw: pointer to hardware structure + * + * Disable malicious driver detection + **/ +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_disable_mdd_X550"); + + /* Disable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Disable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_enable_mdd_X550 + * @hw: pointer to hardware structure + * + * Enable malicious driver detection + **/ +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_enable_mdd_X550"); + + /* Enable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Enable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_restore_mdd_vf_X550 + * @hw: pointer to hardware structure + * @vf: vf index + * + * Restore VF that was disabled during malicious driver detection event + **/ +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) +{ + u32 idx, reg, num_qs, start_q, bitmask; + + DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); + + /* Map VF to queues */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + num_qs = 8; /* 16 VFs / pools */ + bitmask = 0x000000FF; + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + num_qs = 4; /* 32 VFs / pools */ + bitmask = 0x0000000F; + break; + default: /* 64 VFs / pools */ + num_qs = 2; + bitmask = 0x00000003; + break; + } + start_q = vf * num_qs; + + /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ + idx = start_q / 32; + reg = 0; + reg |= (bitmask << (start_q % 32)); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); +} + +/** + * ixgbe_mdd_event_X550 + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + * Handle malicious driver detection event. + **/ +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + u32 wqbr; + u32 i, j, reg, q, shift, vf, idx; + + DEBUGFUNC("ixgbe_mdd_event_X550"); + + /* figure out pool size for mapping to vf's */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + shift = 3; /* 16 VFs / pools */ + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + shift = 2; /* 32 VFs / pools */ + break; + default: + shift = 1; /* 64 VFs / pools */ + break; + } + + /* Read WQBR_TX and WQBR_RX and check for malicious queues */ + for (i = 0; i < 4; i++) { + wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); + wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); + + if (!wqbr) + continue; + + /* Get malicious queue */ + for (j = 0; j < 32 && wqbr; j++) { + + if (!(wqbr & (1 << j))) + continue; + + /* Get queue from bitmask */ + q = j + (i * 32); + + /* Map queue to vf */ + vf = (q >> shift); + + /* Set vf bit in vf_bitmap */ + idx = vf / 32; + vf_bitmap[idx] |= (1 << (vf % 32)); + wqbr &= ~(1 << j); + } + } +} + +/** + * ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_X550em"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_XFI: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + media_type = ixgbe_media_type_backplane; + hw->phy.type = ixgbe_phy_sgmii; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + case ixgbe_sfp_type_1g_lha_core0: + case ixgbe_sfp_type_1g_lha_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_sfp_module_X550em - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); + + status = ixgbe_identify_module_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + return status; +} + +/** + * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + if (status != IXGBE_SUCCESS) + return status; + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + return IXGBE_SUCCESS; +} + +/** +* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the +* internal PHY +* @hw: pointer to hardware structure +**/ +STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 link_ctrl; + + /* Restart auto-negotiation. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + + if (status) { + DEBUGOUT("Auto-negotiation did not complete\n"); + return status; + } + + link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); + + if (hw->mac.type == ixgbe_mac_X550EM_a) { + u32 flx_mask_st20; + + /* Indicate to FW that AN restart has been asserted */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); + + if (status) { + DEBUGOUT("Auto-negotiation did not complete\n"); + return status; + } + + flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); + } + + return status; +} + +#ifndef PREBOOT_SUPPORT +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait: true when waiting for completion is needed + */ +STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + rc = ixgbe_restart_an_internal_phy_x550em(hw); + if (rc) + return rc; + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +#endif /* PREBOOT_SUPPORT */ +/** + * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait: true when waiting for completion is needed + */ +STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval, flx_val; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); + if (rc) + return rc; + + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); + if (rc) + return rc; + + rc = ixgbe_restart_an_internal_phy_x550em(hw); + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** + * ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + */ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; + + if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || + (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + else + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { + mac->ops.setup_link = ixgbe_setup_sgmii_fw; + mac->ops.check_link = + ixgbe_check_mac_link_generic; + } else { + mac->ops.setup_link = + ixgbe_setup_mac_link_t_X550em; + } + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + } + break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) +#ifdef PREBOOT_SUPPORT + mac->ops.setup_link = ixgbe_setup_sgmii_fw; +#else + mac->ops.setup_link = ixgbe_setup_sgmii; +#endif /* PREBOOT_SUPPORT */ + break; + default: + break; + } +} + +/** + * ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + */ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); + + + if (hw->phy.type == ixgbe_phy_fw) { + *autoneg = true; + *speed = hw->phy.speeds_supported; + return 0; + } + + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lha_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return IXGBE_SUCCESS; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + switch (hw->phy.type) { + case ixgbe_phy_ext_1g_t: +#ifdef PREBOOT_SUPPORT + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; +#endif /* PREBOOT_SUPPORT */ + case ixgbe_phy_sgmii: +#ifdef PREBOOT_SUPPORT + *speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_10_FULL; +#else + *speed = IXGBE_LINK_SPEED_1GB_FULL; +#endif /* PREBOOT_SUPPORT */ + break; + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_X550EM_a) { + /* check different backplane modes */ + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + /* fall through */ + default: + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + } + *autoneg = true; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + */ +STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_X550EM_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + + if (status != IXGBE_SUCCESS) + return status; + } + + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + if (hw->mac.type == ixgbe_mac_X550EM_a) { + /* Set lane mode to KR auto negotiation */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + + if (status) + return status; + + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + } + + return ixgbe_restart_an_internal_phy_x550em(hw); +} + +/** + * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs + * @hw: pointer to hardware structure + */ +static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); + if (rc) + return rc; + memset(store, 0, sizeof(store)); + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); + if (rc) + return rc; + + return ixgbe_setup_fw_link(hw); +} + +/** + * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp + * @hw: pointer to hardware structure + */ +static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +{ + u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; + s32 rc; + + rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); + if (rc) + return rc; + + if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { + ixgbe_shutdown_fw_phy(hw); + return IXGBE_ERR_OVERTEMP; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values, and check for valid field + * values. + **/ +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (hw->mac.type == ixgbe_mac_X550EM_a && + hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { + hw->phy.addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + */ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_phy_ops_X550em"); + + hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + phy->ops.read_reg_mdi = NULL; + phy->ops.write_reg_mdi = NULL; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + phy->ops.check_overtemp = ixgbe_check_overtemp_fw; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + + break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + phy->ops.read_reg_mdi = NULL; + phy->ops.write_reg_mdi = NULL; + default: + break; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || + ret_val == IXGBE_ERR_PHY_ADDR_INVALID) + return ret_val; + + /* Setup function pointers based on detected hardware */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_ext_1g_t: + /* link is managed by FW */ + phy->ops.setup_link = NULL; + phy->ops.reset = NULL; + break; + case ixgbe_phy_x550em_xfi: + /* link is managed by HW */ + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + + /* setup SW LPLU only for first revision of X550EM_x */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; + case ixgbe_phy_sgmii: + phy->ops.setup_link = NULL; + break; + case ixgbe_phy_fw: + phy->ops.setup_link = ixgbe_setup_fw_link; + phy->ops.reset = ixgbe_reset_phy_fw; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_QSFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + /* Select fast MDIO clock speed for these devices */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + +/** + * ixgbe_reset_hw_X550em - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + */ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_reset_hw_X550em"); + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) { + DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status); + return status; + } + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + ixgbe_set_mdio_speed(hw); + + /* PHY ops must be identified and initialized prior to reset */ + status = hw->phy.ops.init(hw); + + if (status) + DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n", + status); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || + status == IXGBE_ERR_PHY_ADDR_INVALID) { + DEBUGOUT("Returning from reset HW due to PHY init failure\n"); + return status; + } + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) { + DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n", + status); + return status; + } + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) { + if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) + return IXGBE_ERR_OVERTEMP; + } + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + ixgbe_set_mdio_speed(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + + if (status != IXGBE_SUCCESS) + DEBUGOUT1("Reset HW failed, STATUS = %d\n", status); + + return status; +} + +/** + * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + * @hw: pointer to hardware structure + */ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + } + + return status; +} + +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + /* leave link alone for 2.5G */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + return IXGBE_SUCCESS; + + if (ixgbe_check_reset_blocked(hw)) + return 0; + + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused + * + * Configure the external PHY and the integrated KR PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_slice, reg_val; + bool setup_linear = false; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + /* Configure CS4227 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + return ret_val; +} + +/** + * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated PHY for native SFI mode. Used to connect the + * internal PHY directly to an SFP cage, without autonegotiation. + **/ +STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* Disable all AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; + reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: unused + * + * Configure the integrated PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_phy_ext; + bool setup_linear = false; + u32 reg_slice, reg_phy_int, slice_offset; + + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { + /* Configure internal PHY for native SFI based on module type */ + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; + if (!setup_linear) + reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; + + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Setup SFI internal link. */ + ret_val = ixgbe_setup_sfi_x550a(hw, &speed); + } else { + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { + /* Find Address */ + DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); + return IXGBE_ERR_PHY_ADDR_INVALID; + } + + /* Get external PHY SKU id */ + ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_SKU_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | + (IXGBE_CS4227_EDC_MODE_SR << 1)); + + if (setup_linear) + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->phy.ops.write_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); + + /* Flush previous write with a read */ + ret_val = hw->phy.ops.read_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + } + return ret_val; +} + +/** + * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure + * + * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* Disable training protocol FSM. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable Flex from training TXFFE. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Enable override for coefficients. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + return status; +} + +/** + * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + struct ixgbe_mac_info *mac = &hw->mac; + s32 status; + u32 reg_val; + + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + + /* Disable AN and force speed to 10G Serial. */ + status = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Additional configuration needed for x550em_x */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + status = ixgbe_setup_ixfi_x550em_x(hw); + if (status != IXGBE_SUCCESS) + return status; + } + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + */ +STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; + + *link_up = false; + + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + */ +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If link is down, there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + if (!link_up) + return IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status != IXGBE_SUCCESS) + return status; + + /* If link is still down - no setup is required so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + if (!link_up) + return IXGBE_SUCCESS; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } +} + +/** + * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY to use internal loopback mode. + **/ +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set near-end loopback clocks. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set loopback enable. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Training bypass. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + s32 status; + + DEBUGFUNC("ixgbe_read_ee_hostif_X550"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.pad2 = 0; + buffer.pad3 = 0; + + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) + return status; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + if (!status) { + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + } + + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** + * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, mask); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; + + status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, mask); + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u32 i = 0; + + DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + + return status; +} + +/** + * ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns error status for any failure + */ +STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return IXGBE_SUCCESS; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* + * For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** + * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** + * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + union ixgbe_hic_hdr2 buffer; + + DEBUGFUNC("ixgbe_update_flash_X550"); + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) +{ + u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_X550EM_a) { + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + physical_layer = + IXGBE_PHYSICAL_LAYER_2500BASE_KX; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + physical_layer = + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + } + } + /* fall through */ + case ixgbe_phy_x550em_xfi: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_kx4: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_ext_t: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + case ixgbe_phy_fw: + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) + physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; + break; + case ixgbe_phy_sgmii: +#ifdef PREBOOT_SUPPORT + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_100BASE_TX | + IXGBE_PHYSICAL_LAYER_10BASE_T; +#else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; +#endif /* PREBOOT_SUPPORT */ + break; + case ixgbe_phy_ext_1g_t: + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + + return physical_layer; +} + +/** + * ixgbe_get_bus_info_x550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + + DEBUGFUNC("ixgbe_get_bus_info_x550em"); + + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_rx_x550 - Disable RX unit + * @hw: pointer to hardware structure + * + * Enables the Rx DMA unit for x550 + **/ +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + DEBUGFUNC("ixgbe_enable_rx_dma_x550"); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** + * ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + **/ +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* SW LPLU not required on later HW revisions. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + (IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability + * disabled, then force link down by entering low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, FALSE); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, FALSE); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + +/** + * ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** + * ixgbe_setup_fc_X550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 pause, asm_dir, reg_val; + + DEBUGFUNC("ixgbe_setup_fc_X550em"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = 0; + asm_dir = 0; + break; + case ixgbe_fc_tx_pause: + pause = 0; + asm_dir = 1; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + pause = 1; + asm_dir = 1; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (ret_val != IXGBE_SUCCESS) + goto out; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) +{ + u32 link_s1, lp_an_page_low, an_cntl_1; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + /* Check at auto-negotiation has completed */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_S1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); + + if (status != IXGBE_SUCCESS || + (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, + IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings + * @hw: pointer to hardware structure + * + **/ +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) +{ + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; +} + +/** + * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + /* Check if auto-negotiation has completed */ + status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); + if (status != IXGBE_SUCCESS || + !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + status = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + /* Negotiate the flow control */ + status = ixgbe_negotiate_fc(hw, info[0], info[0], + FW_PHY_ACT_GET_LINK_INFO_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_FC_TX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, + FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_setup_fc_backplane_x550em_a - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 an_cntl = 0; + + DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do FC autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + return status; + } + + /* The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); + + /* Restart auto-negotiation. */ + status = ixgbe_restart_an_internal_phy_x550em(hw); + + return status; +} + +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + **/ +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + **/ +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + +/** + * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared phy token as needed + */ +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); + + while (--retries) { + status = IXGBE_SUCCESS; + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) { + DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", + status); + return status; + } + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + return IXGBE_SUCCESS; + + status = ixgbe_get_phy_token(hw); + if (status == IXGBE_ERR_TOKEN_RETRY) + DEBUGOUT1("Could not acquire PHY token, Status = %d\n", + status); + + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + + if (status != IXGBE_ERR_TOKEN_RETRY) { + DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n", + status); + return status; + } + } + + DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); + return status; +} + +/** + * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and puts the shared phy token as needed + */ +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + +/** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: 5 bit device type + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + **/ +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_read_phy_reg_x550a"); + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + **/ +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_write_phy_reg_x550a"); + + if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { + status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + if (status != IXGBE_SUCCESS) + return status; + + if (lsc) + return ixgbe_setup_internal_phy(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + ixgbe_link_speed force_speed; + u32 i; + bool link_up = false; + + DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* Wait for the controller to acquire link */ + for (i = 0; i < 10; i++) { + msec_delay(100); + + status = ixgbe_check_link(hw, &force_speed, &link_up, + false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + break; + } + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} + +/** + * ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 status; + u16 i, autoneg_status = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status != IXGBE_SUCCESS || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * X557 PHY. Link status is latching low, and can only be used to detect + * link drop, and not the current status of the link without performing + * back-to-back reads. + */ + for (i = 0; i < 2; i++) { + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + + if (status != IXGBE_SUCCESS) + return status; + } + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** + * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_on_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_on_generic(hw, led_idx); +} + +/** + * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_off_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + /* Some designs have the LEDs wired to the MAC */ + return ixgbe_led_off_generic(hw, led_idx); +} + +/** + * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * @len: length of driver_ver string + * @driver_ver: driver string + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub, u16 len, const char *driver_ver) +{ + struct ixgbe_hic_drv_info2 fw_cmd; + s32 ret_val = IXGBE_SUCCESS; + int i; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); + + if ((len == 0) || (driver_ver == NULL) || + (len > sizeof(fw_cmd.driver_string))) + return IXGBE_ERR_INVALID_ARGUMENT; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + memcpy(fw_cmd.driver_string, driver_ver, len); + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode + * @hw: pointer t hardware structure + * + * Returns true if in FW NVM recovery mode. + **/ +bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) +{ + u32 fwsm; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + + return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h new file mode 100644 index 000000000..10086ab42 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_x550.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2020 Intel Corporation + */ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); + +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, +u16 *data); +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf); +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver, u16 len, const char *str); +s32 ixgbe_get_phy_token(struct ixgbe_hw *); +s32 ixgbe_put_phy_token(struct ixgbe_hw *); +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); +u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw); +#endif /* _IXGBE_X550_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build b/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build new file mode 100644 index 000000000..48bbb86cb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/base/meson.build @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2020 Intel Corporation + +sources = [ + 'ixgbe_82598.c', + 'ixgbe_82599.c', + 'ixgbe_api.c', + 'ixgbe_common.c', + 'ixgbe_dcb_82598.c', + 'ixgbe_dcb_82599.c', + 'ixgbe_dcb.c', + 'ixgbe_hv_vf.c', + 'ixgbe_mbx.c', + 'ixgbe_phy.c', + 'ixgbe_vf.c', + 'ixgbe_x540.c', + 'ixgbe_x550.c' +] + +error_cflags = ['-Wno-unused-value', + '-Wno-unused-but-set-variable', + '-Wno-unused-parameter', + ] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('ixgbe_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c new file mode 100644 index 000000000..b16ebd0a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_82599_bypass.c @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include "base/ixgbe_type.h" +#include "base/ixgbe_82599.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_common.h" +#include "base/ixgbe_phy.h" +#include "ixgbe_bypass_defines.h" +#include "ixgbe_bypass.h" + +/** + * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * We set the module speed differently for fixed fiber. For other + * multi-speed devices we don't have an error value so here if we + * detect an error we just log it and exit. + */ +static void +ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + PMD_DRV_LOG(ERR, "Invalid fixed module speed"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1"); + goto out; + } +out: + return; +} + +/** + * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 +ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; + bool link_up = false; + bool negotiation; + + PMD_INIT_FUNC_TRACE(); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* + * Try each speed one by one, highest priority first. We do this in + * software because 10gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + /* Set the module link speed */ + ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); + + /* Set the module link speed */ + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* + * We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw, + highest_link_speed, autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +static enum ixgbe_media_type +ixgbe_bypass_get_media_type(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + PMD_INIT_FUNC_TRACE(); + + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + media_type = ixgbe_media_type_fiber; + } else { + media_type = ixgbe_get_media_type_82599(hw); + } + return media_type; +} + +/* + * Wrapper around shared code (base driver) to support BYPASS nic. + */ +s32 +ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw) +{ + s32 ret_val; + + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + hw->mac.type = ixgbe_mac_82599EB; + } + + ret_val = ixgbe_init_shared_code(hw); + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; + ixgbe_init_mac_link_ops_82599(hw); + } + + return ret_val; +} + +s32 +ixgbe_bypass_init_hw(struct ixgbe_hw *hw) +{ + int rc; + + rc = ixgbe_init_hw(hw); + if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + + hw->mac.ops.setup_link = + &ixgbe_setup_mac_link_multispeed_fixed_fiber; + + hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; + + hw->mac.ops.disable_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; + } + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c new file mode 100644 index 000000000..ae38ce355 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.c @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include +#include +#include +#include "ixgbe_ethdev.h" +#include "ixgbe_bypass_api.h" +#include "rte_pmd_ixgbe.h" + +#define BYPASS_STATUS_OFF_MASK 3 + +/* Macros to check for invlaid function pointers. */ +#define FUNC_PTR_OR_ERR_RET(func, retval) do { \ + if ((func) == NULL) { \ + PMD_DRV_LOG(ERR, "%s:%d function not supported", \ + __func__, __LINE__); \ + return retval; \ + } \ +} while (0) + +#define FUNC_PTR_OR_RET(func) do { \ + if ((func) == NULL) { \ + PMD_DRV_LOG(ERR, "%s:%d function not supported", \ + __func__, __LINE__); \ + return; \ + } \ +} while (0) + + +/** + * ixgbe_bypass_set_time - Set bypass FW time epoc. + * + * @hw: pointer to hardware structure + * + * This function with sync the FW date stamp with that of the + * system clock. + **/ +static void +ixgbe_bypass_set_time(struct ixgbe_adapter *adapter) +{ + u32 mask, value; + u32 sec; + struct ixgbe_hw *hw = &adapter->hw; + + sec = 0; + + /* + * Send the FW our current time and turn on time_valid and + * timer_reset bits. + */ + mask = BYPASS_CTL1_TIME_M | + BYPASS_CTL1_VALID_M | + BYPASS_CTL1_OFFTRST_M; + value = (sec & BYPASS_CTL1_TIME_M) | + BYPASS_CTL1_VALID | + BYPASS_CTL1_OFFTRST; + + FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set); + + /* Store FW reset time (in seconds from epoch). */ + adapter->bps.reset_tm = time(NULL); + + /* reset FW timer. */ + adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value); +} + +/** + * ixgbe_bypass_init - Make some environment changes for bypass + * + * @adapter: pointer to ixgbe_adapter structure for access to state bits + * + * This function collects all the modifications needed by the bypass + * driver. + **/ +void +ixgbe_bypass_init(struct rte_eth_dev *dev) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + + adapter = IXGBE_DEV_TO_ADPATER(dev); + hw = &adapter->hw; + + /* Only allow BYPASS ops on the first port */ + if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS || + hw->bus.func != 0) { + PMD_DRV_LOG(ERR, "bypass function is not supported on that device"); + return; + } + + /* set bypass ops. */ + adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic; + adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic; + adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic; + adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic; + + /* set the time for logging. */ + ixgbe_bypass_set_time(adapter); + + /* Don't have the SDP to the laser */ + hw->mac.ops.disable_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; +} + +s32 +ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state) +{ + struct ixgbe_hw *hw; + s32 ret_val; + u32 cmd; + u32 by_ctl = 0; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + /* Assume bypass_rw didn't error out, if it did state will + * be ignored anyway. + */ + *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK; + + return ret_val; +} + + +s32 +ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state) +{ + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + struct ixgbe_hw *hw; + s32 ret_val; + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + /* Set the new state */ + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + BYPASS_MODE_OFF_M, *new_state); + if (ret_val) + goto exit; + + /* Set AUTO back on so FW can receive events */ + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + BYPASS_MODE_OFF_M, BYPASS_AUTO); + +exit: + return ret_val; + +} + +s32 +ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, + u32 *state) +{ + struct ixgbe_hw *hw; + s32 ret_val; + u32 shift; + u32 cmd; + u32 by_ctl = 0; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + /* Assume bypass_rw didn't error out, if it did event will + * be ignored anyway. + */ + switch (event) { + case BYPASS_EVENT_WDT_TO: + shift = BYPASS_WDTIMEOUT_SHIFT; + break; + case BYPASS_EVENT_MAIN_ON: + shift = BYPASS_MAIN_ON_SHIFT; + break; + case BYPASS_EVENT_MAIN_OFF: + shift = BYPASS_MAIN_OFF_SHIFT; + break; + case BYPASS_EVENT_AUX_ON: + shift = BYPASS_AUX_ON_SHIFT; + break; + case BYPASS_EVENT_AUX_OFF: + shift = BYPASS_AUX_OFF_SHIFT; + break; + default: + return EINVAL; + } + + *state = (by_ctl >> shift) & 0x3; + + return ret_val; +} + +s32 +ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, + u32 state) +{ + struct ixgbe_hw *hw; + u32 status; + u32 off; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + switch (event) { + case BYPASS_EVENT_WDT_TO: + off = BYPASS_WDTIMEOUT_M; + status = state << BYPASS_WDTIMEOUT_SHIFT; + break; + case BYPASS_EVENT_MAIN_ON: + off = BYPASS_MAIN_ON_M; + status = state << BYPASS_MAIN_ON_SHIFT; + break; + case BYPASS_EVENT_MAIN_OFF: + off = BYPASS_MAIN_OFF_M; + status = state << BYPASS_MAIN_OFF_SHIFT; + break; + case BYPASS_EVENT_AUX_ON: + off = BYPASS_AUX_ON_M; + status = state << BYPASS_AUX_ON_SHIFT; + break; + case BYPASS_EVENT_AUX_OFF: + off = BYPASS_AUX_OFF_M; + status = state << BYPASS_AUX_OFF_SHIFT; + break; + default: + return EINVAL; + } + + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + off, status); + + return ret_val; +} + +s32 +ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout) +{ + struct ixgbe_hw *hw; + u32 status; + u32 mask; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + /* disable the timer with timeout of zero */ + if (timeout == RTE_PMD_IXGBE_BYPASS_TMT_OFF) { + status = 0x0; /* WDG enable off */ + mask = BYPASS_WDT_ENABLE_M; + } else { + /* set time out value */ + mask = BYPASS_WDT_VALUE_M; + + /* enable the timer */ + status = timeout << BYPASS_WDT_TIME_SHIFT; + status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT; + mask |= BYPASS_WDT_ENABLE_M; + } + + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + mask, status); + + return ret_val; +} + +s32 +ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver) +{ + struct ixgbe_hw *hw; + u32 cmd; + u32 status; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; + cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) & + BYPASS_CTL2_OFFSET_M; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + if (ret_val) + goto exit; + + /* wait for the write to stick */ + msleep(100); + + /* Now read the results */ + cmd &= ~BYPASS_WE; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + if (ret_val) + goto exit; + + *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */ + +exit: + return ret_val; +} + +s32 +ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout) +{ + struct ixgbe_hw *hw; + u32 by_ctl = 0; + u32 cmd; + u32 wdg; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + wdg = by_ctl & BYPASS_WDT_ENABLE_M; + if (!wdg) + *wd_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; + else + *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) & + BYPASS_WDT_MASK; + + return ret_val; +} + +s32 +ixgbe_bypass_wd_reset(struct rte_eth_dev *dev) +{ + u32 cmd; + u32 status; + u32 sec; + u32 count = 0; + s32 ret_val; + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP); + + /* Use the lower level bit-bang functions since we don't need + * to read the register first to get it's current state as we + * are setting every thing in this write. + */ + /* Set up WD pet */ + cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET; + + /* Resync the FW time while writing to CTL1 anyway */ + adapter->bps.reset_tm = time(NULL); + sec = 0; + + cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID; + + /* reset FW timer offset since we are resetting the clock */ + cmd |= BYPASS_CTL1_OFFTRST; + + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + + /* Read until it matches what we wrote, or we time out */ + do { + if (count++ > 10) { + ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE; + break; + } + + if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) { + ret_val = IXGBE_ERR_INVALID_ARGUMENT; + break; + } + } while (!adapter->bps.ops.bypass_valid_rd(cmd, status)); + + return ret_val; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h new file mode 100644 index 000000000..92befad50 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _IXGBE_BYPASS_H_ +#define _IXGBE_BYPASS_H_ + +#ifdef RTE_LIBRTE_IXGBE_BYPASS + +struct ixgbe_bypass_mac_ops { + s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status); + bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg); + s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); + s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value); +}; + +struct ixgbe_bypass_info { + uint64_t reset_tm; + struct ixgbe_bypass_mac_ops ops; +}; + +struct rte_eth_dev; + +void ixgbe_bypass_init(struct rte_eth_dev *dev); +s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state); +s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state); +s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state); +s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state); +s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout); +s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver); +s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout); +s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev); + +s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw); +s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw); + +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + +#endif /* _IXGBE_BYPASS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h new file mode 100644 index 000000000..8eb773391 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_api.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _IXGBE_BYPASS_API_H_ +#define _IXGBE_BYPASS_API_H_ + +#ifdef RTE_LIBRTE_IXGBE_BYPASS + +#include "ixgbe_bypass_defines.h" +/** + * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW + * + * @hw: pointer to hardware structure + * @cmd: Command we send to the FW + * @status: The reply from the FW + * + * Bit-bangs the cmd to the by_pass FW status points to what is returned. + **/ +#define IXGBE_BYPASS_BB_WAIT 1 +static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) +{ + int i; + u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; + u32 esdp; + + if (!status) + return IXGBE_ERR_PARAM; + + *status = 0; + + /* SDP vary by MAC type */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sck = IXGBE_ESDP_SDP7; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP6; + dir_sck = IXGBE_ESDP_SDP7_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP6_DIR; + break; + case ixgbe_mac_X540: + sck = IXGBE_ESDP_SDP2; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP1; + dir_sck = IXGBE_ESDP_SDP2_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP1_DIR; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + sck = IXGBE_ESDP_SDP2; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP1; + dir_sck = IXGBE_ESDP_SDP2_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP1_DIR; + break; + default: + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + /* Set SDP pins direction */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= dir_sck; /* SCK as output */ + esdp |= dir_sdi; /* SDI as output */ + esdp &= ~dir_sdo; /* SDO as input */ + esdp |= sck; + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + // TODO: + msleep(IXGBE_BYPASS_BB_WAIT); + + /* Generate start condition */ + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp &= ~sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + /* Clock out the new control word and clock in the status */ + for (i = 0; i < 32; i++) { + if ((cmd >> (31 - i)) & 0x01) { + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } else { + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp |= sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp &= ~sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & sdo) + *status = (*status << 1) | 0x01; + else + *status = (*status << 1) | 0x00; + msleep(IXGBE_BYPASS_BB_WAIT); + } + + /* stop condition */ + esdp |= sck; + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + /* set the page bits to match the cmd that the status it belongs to */ + *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); + + return 0; +} + +/** + * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. + * + * If we send a write we can't be sure it took until we can read back + * that same register. It can be a problem as some of the feilds may + * for valid reasons change between the time wrote the register and + * we read it again to verify. So this function check everything we + * can check and then assumes it worked. + * + * @u32 in_reg - The register cmd for the bit-bang read. + * @u32 out_reg - The register returned from a bit-bang read. + **/ +static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) +{ + u32 mask; + + /* Page must match for all control pages */ + if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) + return false; + + switch (in_reg & BYPASS_PAGE_M) { + case BYPASS_PAGE_CTL0: + /* All the following can't change since the last write + * - All the event actions + * - The timeout value + */ + mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | + BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | + BYPASS_WDTIMEOUT_M | + BYPASS_WDT_VALUE_M; + if ((out_reg & mask) != (in_reg & mask)) + return false; + + /* 0x0 is never a valid value for bypass status */ + if (!(out_reg & BYPASS_STATUS_OFF_M)) + return false; + break; + case BYPASS_PAGE_CTL1: + /* All the following can't change since the last write + * - time valid bit + * - time we last sent + */ + mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; + if ((out_reg & mask) != (in_reg & mask)) + return false; + break; + case BYPASS_PAGE_CTL2: + /* All we can check in this page is control number + * which is already done above. + */ + break; + } + + /* We are as sure as we can be return true */ + return true; +} + +/** + * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter. + * + * @hw: pointer to hardware structure + * @cmd: The control word we are setting. + * @event: The event we are setting in the FW. This also happens to + * be the mask for the event we are setting (handy) + * @action: The action we set the event to in the FW. This is in a + * bit field that happens to be what we want to put in + * the event spot (also handy) + **/ +static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, + u32 action) +{ + u32 by_ctl = 0; + u32 cmd, verify; + u32 count = 0; + + /* Get current values */ + cmd = ctrl; /* just reading only need control number */ + if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* Set to new action */ + cmd = (by_ctl & ~event) | BYPASS_WE | action; + if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* Page 0 force a FW eeprom write which is slow so verify */ + if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { + verify = BYPASS_PAGE_CTL0; + do { + if (count++ > 5) + return IXGBE_BYPASS_FW_WRITE_FAILURE; + + if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); + } else { + /* We have give the FW time for the write to stick */ + msleep(100); + } + + return 0; +} + +/** + * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address. + * + * @hw: pointer to hardware structure + * @addr: The bypass eeprom address to read. + * @value: The 8b of data at the address above. + **/ +static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) +{ + u32 cmd; + u32 status; + + + /* send the request */ + cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; + cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; + if (ixgbe_bypass_rw_generic(hw, cmd, &status)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* We have give the FW time for the write to stick */ + msleep(100); + + /* now read the results */ + cmd &= ~BYPASS_WE; + if (ixgbe_bypass_rw_generic(hw, cmd, &status)) + return IXGBE_ERR_INVALID_ARGUMENT; + + *value = status & BYPASS_CTL2_DATA_M; + + return 0; +} + +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + +#endif /* _IXGBE_BYPASS_API_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h new file mode 100644 index 000000000..7740546b9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_bypass_defines.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _IXGBE_BYPASS_DEFINES_H_ +#define _IXGBE_BYPASS_DEFINES_H_ + +#ifdef RTE_LIBRTE_IXGBE_BYPASS + +#define msleep(x) rte_delay_us(x*1000) +#define usleep_range(min, max) rte_delay_us(min) + +#define BYPASS_PAGE_CTL0 0x00000000 +#define BYPASS_PAGE_CTL1 0x40000000 +#define BYPASS_PAGE_CTL2 0x80000000 +#define BYPASS_PAGE_M 0xc0000000 +#define BYPASS_WE 0x20000000 + +#define BYPASS_AUTO 0x0 +#define BYPASS_NOP 0x0 +#define BYPASS_NORM 0x1 +#define BYPASS_BYPASS 0x2 +#define BYPASS_ISOLATE 0x3 + +#define BYPASS_EVENT_MAIN_ON 0x1 +#define BYPASS_EVENT_AUX_ON 0x2 +#define BYPASS_EVENT_MAIN_OFF 0x3 +#define BYPASS_EVENT_AUX_OFF 0x4 +#define BYPASS_EVENT_WDT_TO 0x5 +#define BYPASS_EVENT_USR 0x6 + +#define BYPASS_MODE_OFF_M 0x00000003 +#define BYPASS_STATUS_OFF_M 0x0000000c +#define BYPASS_AUX_ON_M 0x00000030 +#define BYPASS_MAIN_ON_M 0x000000c0 +#define BYPASS_MAIN_OFF_M 0x00000300 +#define BYPASS_AUX_OFF_M 0x00000c00 +#define BYPASS_WDTIMEOUT_M 0x00003000 +#define BYPASS_WDT_ENABLE_M 0x00004000 +#define BYPASS_WDT_VALUE_M 0x00070000 + +#define BYPASS_MODE_OFF_SHIFT 0 +#define BYPASS_STATUS_OFF_SHIFT 2 +#define BYPASS_AUX_ON_SHIFT 4 +#define BYPASS_MAIN_ON_SHIFT 6 +#define BYPASS_MAIN_OFF_SHIFT 8 +#define BYPASS_AUX_OFF_SHIFT 10 +#define BYPASS_WDTIMEOUT_SHIFT 12 +#define BYPASS_WDT_ENABLE_SHIFT 14 +#define BYPASS_WDT_TIME_SHIFT 16 + +#define BYPASS_WDT_1 0x0 +#define BYPASS_WDT_1_5 0x1 +#define BYPASS_WDT_2 0x2 +#define BYPASS_WDT_3 0x3 +#define BYPASS_WDT_4 0x4 +#define BYPASS_WDT_8 0x5 +#define BYPASS_WDT_16 0x6 +#define BYPASS_WDT_32 0x7 +#define BYPASS_WDT_OFF 0xffff + +#define BYPASS_WDT_MASK 0x7 + +#define BYPASS_CTL1_TIME_M 0x01ffffff +#define BYPASS_CTL1_VALID_M 0x02000000 +#define BYPASS_CTL1_OFFTRST_M 0x04000000 +#define BYPASS_CTL1_WDT_PET_M 0x08000000 + +#define BYPASS_CTL1_VALID 0x02000000 +#define BYPASS_CTL1_OFFTRST 0x04000000 +#define BYPASS_CTL1_WDT_PET 0x08000000 + +#define BYPASS_CTL2_DATA_M 0x000000ff +#define BYPASS_CTL2_OFFSET_M 0x0000ff00 +#define BYPASS_CTL2_RW_M 0x00010000 +#define BYPASS_CTL2_HEAD_M 0x0ff00000 + +#define BYPASS_CTL2_OFFSET_SHIFT 8 +#define BYPASS_CTL2_HEAD_SHIFT 20 + +#define BYPASS_CTL2_RW 0x00010000 + +enum ixgbe_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_SERVICE_SCHED, + __IXGBE_IN_SFP_INIT, + __IXGBE_IN_BYPASS_LOW, + __IXGBE_IN_BYPASS_HIGH, + __IXGBE_IN_BYPASS_LOG, +}; + +#define BYPASS_MAX_LOGS 43 +#define BYPASS_LOG_SIZE 5 +#define BYPASS_LOG_LINE_SIZE 37 + +#define BYPASS_EEPROM_VER_ADD 0x02 + +#define BYPASS_LOG_TIME_M 0x01ffffff +#define BYPASS_LOG_TIME_VALID_M 0x02000000 +#define BYPASS_LOG_HEAD_M 0x04000000 +#define BYPASS_LOG_CLEAR_M 0x08000000 +#define BYPASS_LOG_EVENT_M 0xf0000000 +#define BYPASS_LOG_ACTION_M 0x03 + +#define BYPASS_LOG_EVENT_SHIFT 28 +#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */ +#define IXGBE_DEV_TO_ADPATER(dev) \ + ((struct ixgbe_adapter *)(dev->data->dev_private)) + +/* extractions from ixgbe_phy.h */ +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 + +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 + +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 + +/* extractions from ixgbe_type.h */ +#define IXGBE_DEV_ID_82599_BYPASS 0x155D + +#define IXGBE_BYPASS_FW_WRITE_FAILURE -35 + +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + +#endif /* _IXGBE_BYPASS_DEFINES_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c new file mode 100644 index 000000000..f8a84c565 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -0,0 +1,9145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef RTE_LIBRTE_SECURITY +#include +#endif + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_vf.h" +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_bypass.h" +#include "ixgbe_rxtx.h" +#include "base/ixgbe_type.h" +#include "base/ixgbe_phy.h" +#include "ixgbe_regs.h" + +/* + * High threshold controlling when to start sending XOFF frames. Must be at + * least 8 bytes less than receive packet buffer size. This value is in units + * of 1024 bytes. + */ +#define IXGBE_FC_HI 0x80 + +/* + * Low threshold controlling when to start sending XON frames. This value is + * in units of 1024 bytes. + */ +#define IXGBE_FC_LO 0x40 + +/* Timer value included in XOFF frames. */ +#define IXGBE_FC_PAUSE 0x680 + +/*Default value of Max Rx Queue*/ +#define IXGBE_MAX_RX_QUEUE_NUM 128 + +#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ +#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ + +#define IXGBE_MMW_SIZE_DEFAULT 0x4 +#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 +#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ + +/* + * Default values for RX/TX configuration + */ +#define IXGBE_DEFAULT_RX_FREE_THRESH 32 +#define IXGBE_DEFAULT_RX_PTHRESH 8 +#define IXGBE_DEFAULT_RX_HTHRESH 8 +#define IXGBE_DEFAULT_RX_WTHRESH 0 + +#define IXGBE_DEFAULT_TX_FREE_THRESH 32 +#define IXGBE_DEFAULT_TX_PTHRESH 32 +#define IXGBE_DEFAULT_TX_HTHRESH 0 +#define IXGBE_DEFAULT_TX_WTHRESH 0 +#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) +#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) +#define IXGBE_8_BIT_WIDTH CHAR_BIT +#define IXGBE_8_BIT_MASK UINT8_MAX + +#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ + +#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) + +/* Additional timesync values. */ +#define NSEC_PER_SEC 1000000000L +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 + +#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 +#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 +#define IXGBE_ETAG_ETYPE 0x00005084 +#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff +#define IXGBE_ETAG_ETYPE_VALID 0x80000000 +#define IXGBE_RAH_ADTYPE 0x40000000 +#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff +#define IXGBE_VMVIR_TAGA_MASK 0x18000000 +#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 +#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 + +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 + +#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" + +static const char * const ixgbevf_valid_arguments[] = { + IXGBEVF_DEVARG_PFLINK_FULLCHK, + NULL +}; + +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); +static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_dev_configure(struct rte_eth_dev *dev); +static int ixgbe_dev_start(struct rte_eth_dev *dev); +static void ixgbe_dev_stop(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); +static void ixgbe_dev_close(struct rte_eth_dev *dev); +static int ixgbe_dev_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n); +static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit); +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit); +static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx); +static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); +static int ixgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); +static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid_id); +static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, + uint16_t queue, bool on); +static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, + int on); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); +static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); +static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); +static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); + +static int ixgbe_dev_led_on(struct rte_eth_dev *dev); +static int ixgbe_dev_led_off(struct rte_eth_dev *dev); +static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf); +static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); +static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static void ixgbe_dev_interrupt_handler(void *param); +static void ixgbe_dev_interrupt_delayed_handler(void *param); +static void *ixgbe_dev_setup_link_thread_handler(void *param); +static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, + uint32_t timeout_ms); + +static int ixgbe_add_rar(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); +static bool is_device_supported(struct rte_eth_dev *dev, + struct rte_pci_driver *drv); + +/* For Virtual Function support */ +static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbevf_dev_configure(struct rte_eth_dev *dev); +static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void ixgbevf_dev_stop(struct rte_eth_dev *dev); +static void ixgbevf_dev_close(struct rte_eth_dev *dev); +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); +static void ixgbevf_intr_disable(struct rte_eth_dev *dev); +static void ixgbevf_intr_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue, int on); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); + +/* For Eth VMDQ APIs support */ +static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct + rte_ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); +static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on); +static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, + uint8_t rule_id); +static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbe_configure_msix(struct rte_eth_dev *dev); + +static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); + +static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info); + +static int ixgbe_get_reg_length(struct rte_eth_dev *dev); +static int ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); +static int ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static int ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + +static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); +static int ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int ixgbe_timesync_enable(struct rte_eth_dev *dev); +static int ixgbe_timesync_disable(struct rte_eth_dev *dev); +static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(void *param); + +static int ixgbe_dev_l2_tunnel_eth_type_conf + (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); +static int ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en); +static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); + +static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_filter_restore(struct rte_eth_dev *dev); +static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); +static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); + +/* + * Define VF Stats MACRO for Non "cleared on read" register + */ +#define UPDATE_VF_STAT(reg, last, cur) \ +{ \ + uint32_t latest = IXGBE_READ_REG(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ + last = latest; \ +} + +#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ +{ \ + u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ + u64 new_msb = IXGBE_READ_REG(hw, msb); \ + u64 latest = ((new_msb << 32) | new_lsb); \ + cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ + last = latest; \ +} + +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] |= 1 << bit;\ + } while (0) + +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] &= ~(1 << bit);\ + } while (0) + +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ + (r) = (h)->bitmap[idx] >> bit & 1;\ + } while (0) + +int ixgbe_logtype_init; +int ixgbe_logtype_driver; + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +int ixgbe_logtype_rx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +int ixgbe_logtype_tx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +int ixgbe_logtype_tx_free; +#endif + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_ixgbe_map[] = { + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, +#ifdef RTE_LIBRTE_IXGBE_BYPASS + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, +#endif + { .vendor_id = 0, /* sentinel */ }, +}; + +/* + * The set of PCI devices this driver supports (for 82599 VF) + */ +static const struct rte_pci_id pci_id_ixgbevf_map[] = { + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_TXD_ALIGN, + .nb_seg_max = IXGBE_TX_MAX_SEG, + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, +}; + +static const struct eth_dev_ops ixgbe_eth_dev_ops = { + .dev_configure = ixgbe_dev_configure, + .dev_start = ixgbe_dev_start, + .dev_stop = ixgbe_dev_stop, + .dev_set_link_up = ixgbe_dev_set_link_up, + .dev_set_link_down = ixgbe_dev_set_link_down, + .dev_close = ixgbe_dev_close, + .dev_reset = ixgbe_dev_reset, + .promiscuous_enable = ixgbe_dev_promiscuous_enable, + .promiscuous_disable = ixgbe_dev_promiscuous_disable, + .allmulticast_enable = ixgbe_dev_allmulticast_enable, + .allmulticast_disable = ixgbe_dev_allmulticast_disable, + .link_update = ixgbe_dev_link_update, + .stats_get = ixgbe_dev_stats_get, + .xstats_get = ixgbe_dev_xstats_get, + .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, + .stats_reset = ixgbe_dev_stats_reset, + .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, + .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, + .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .fw_version_get = ixgbe_fw_version_get, + .dev_infos_get = ixgbe_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, + .mtu_set = ixgbe_dev_mtu_set, + .vlan_filter_set = ixgbe_vlan_filter_set, + .vlan_tpid_set = ixgbe_vlan_tpid_set, + .vlan_offload_set = ixgbe_vlan_offload_set, + .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, + .rx_queue_start = ixgbe_dev_rx_queue_start, + .rx_queue_stop = ixgbe_dev_rx_queue_stop, + .tx_queue_start = ixgbe_dev_tx_queue_start, + .tx_queue_stop = ixgbe_dev_tx_queue_stop, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, + .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_queue_count = ixgbe_dev_rx_queue_count, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, + .tx_queue_release = ixgbe_dev_tx_queue_release, + .dev_led_on = ixgbe_dev_led_on, + .dev_led_off = ixgbe_dev_led_off, + .flow_ctrl_get = ixgbe_flow_ctrl_get, + .flow_ctrl_set = ixgbe_flow_ctrl_set, + .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, + .mac_addr_add = ixgbe_add_rar, + .mac_addr_remove = ixgbe_remove_rar, + .mac_addr_set = ixgbe_set_default_mac_addr, + .uc_hash_table_set = ixgbe_uc_hash_table_set, + .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, + .mirror_rule_set = ixgbe_mirror_rule_set, + .mirror_rule_reset = ixgbe_mirror_rule_reset, + .set_queue_rate_limit = ixgbe_set_queue_rate_limit, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, + .filter_ctrl = ixgbe_dev_filter_ctrl, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .timesync_enable = ixgbe_timesync_enable, + .timesync_disable = ixgbe_timesync_disable, + .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, + .get_reg = ixgbe_get_regs, + .get_eeprom_length = ixgbe_get_eeprom_length, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, + .get_dcb_info = ixgbe_dev_get_dcb_info, + .timesync_adjust_time = ixgbe_timesync_adjust_time, + .timesync_read_time = ixgbe_timesync_read_time, + .timesync_write_time = ixgbe_timesync_write_time, + .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, + .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, + .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, + .tm_ops_get = ixgbe_tm_ops_get, + .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, +}; + +/* + * dev_ops for virtual function, bare necessities for basic vf + * operation have been implemented + */ +static const struct eth_dev_ops ixgbevf_eth_dev_ops = { + .dev_configure = ixgbevf_dev_configure, + .dev_start = ixgbevf_dev_start, + .dev_stop = ixgbevf_dev_stop, + .link_update = ixgbevf_dev_link_update, + .stats_get = ixgbevf_dev_stats_get, + .xstats_get = ixgbevf_dev_xstats_get, + .stats_reset = ixgbevf_dev_stats_reset, + .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, + .dev_close = ixgbevf_dev_close, + .dev_reset = ixgbevf_dev_reset, + .promiscuous_enable = ixgbevf_dev_promiscuous_enable, + .promiscuous_disable = ixgbevf_dev_promiscuous_disable, + .allmulticast_enable = ixgbevf_dev_allmulticast_enable, + .allmulticast_disable = ixgbevf_dev_allmulticast_disable, + .dev_infos_get = ixgbevf_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, + .mtu_set = ixgbevf_dev_set_mtu, + .vlan_filter_set = ixgbevf_vlan_filter_set, + .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, + .vlan_offload_set = ixgbevf_vlan_offload_set, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, + .tx_queue_release = ixgbe_dev_tx_queue_release, + .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, + .mac_addr_add = ixgbevf_add_mac_addr, + .mac_addr_remove = ixgbevf_remove_mac_addr, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .mac_addr_set = ixgbevf_set_default_mac_addr, + .get_reg = ixgbevf_get_regs, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, + .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_ixgbe_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { + {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, + {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, + {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, + {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, + {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, + {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, + {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, + {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, + {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, + {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, + {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, + {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, + {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, + {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, + {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, + {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, + {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, + {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, + {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, + {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, + {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + ptc1023)}, + {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, + {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, + {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, + + {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_add)}, + {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_remove)}, + {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fadd)}, + {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fremove)}, + {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, + fdirmatch)}, + {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, + fdirmiss)}, + + {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, + {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, + {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, + fclast)}, + {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, + {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, + {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, + {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, + {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, + fcoe_noddp)}, + {"rx_fcoe_no_direct_data_placement_ext_buff", + offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, + + {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxontxc)}, + {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxonrxc)}, + {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxofftxc)}, + {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxoffrxc)}, + {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, +}; + +#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ + sizeof(rte_ixgbe_stats_strings[0])) + +/* MACsec statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { + {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + out_pkts_untagged)}, + {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, + out_pkts_encrypted)}, + {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, + out_pkts_protected)}, + {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, + out_octets_encrypted)}, + {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, + out_octets_protected)}, + {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + in_pkts_untagged)}, + {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, + in_pkts_badtag)}, + {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, + in_pkts_nosci)}, + {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, + in_pkts_unknownsci)}, + {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, + in_octets_decrypted)}, + {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, + in_octets_validated)}, + {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, + in_pkts_unchecked)}, + {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, + in_pkts_delayed)}, + {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, + in_pkts_late)}, + {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, + in_pkts_ok)}, + {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_invalid)}, + {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_notvalid)}, + {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_unusedsa)}, + {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_notusingsa)}, +}; + +#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ + sizeof(rte_ixgbe_macsec_strings[0])) + +/* Per-queue statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { + {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, + {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, +}; + +#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ + sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, + {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, + pxon2offc)}, +}; + +#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ + sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 + +static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { + {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, +}; + +#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ + sizeof(rte_ixgbevf_stats_strings[0])) + +/* + * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. + */ +static inline int +ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + return 1; + default: + return 0; + } +} + +static inline int32_t +ixgbe_pf_reset_hw(struct ixgbe_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = ixgbe_reset_hw(hw); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + status = IXGBE_SUCCESS; + return status; +} + +static inline void +ixgbe_enable_intr(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); + IXGBE_WRITE_FLUSH(hw); +} + +/* + * This function is based on ixgbe_disable_intr() in base/ixgbe.h. + */ +static void +ixgbe_disable_intr(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); + } + IXGBE_WRITE_FLUSH(hw); +} + +/* + * This function resets queue statistics mapping registers. + * From Niantic datasheet, Initialization of Statistics section: + * "...if software requires the queue counters, the RQSMR and TQSM registers + * must be re-programmed following a device reset. + */ +static void +ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) +{ + uint32_t i; + + for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); + } +} + + +static int +ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx) +{ +#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 +#define NB_QMAP_FIELDS_PER_QSM_REG 4 +#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_stat_mapping_registers *stat_mappings = + IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); + uint32_t qsmr_mask = 0; + uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; + uint32_t q_map; + uint8_t n, offset; + + if ((hw->mac.type != ixgbe_mac_82599EB) && + (hw->mac.type != ixgbe_mac_X540) && + (hw->mac.type != ixgbe_mac_X550) && + (hw->mac.type != ixgbe_mac_X550EM_x) && + (hw->mac.type != ixgbe_mac_X550EM_a)) + return -ENOSYS; + + PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + + n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); + if (n >= IXGBE_NB_STAT_MAPPING_REGS) { + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); + return -EIO; + } + offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); + + /* Now clear any previous stat_idx set */ + clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] &= ~clearing_mask; + else + stat_mappings->rqsmr[n] &= ~clearing_mask; + + q_map = (uint32_t)stat_idx; + q_map &= QMAP_FIELD_RESERVED_BITS_MASK; + qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] |= qsmr_mask; + else + stat_mappings->rqsmr[n] |= qsmr_mask; + + PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); + + /* Now write the mapping in the appropriate register */ + if (is_rx) { + PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", + stat_mappings->rqsmr[n], n); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); + } else { + PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", + stat_mappings->tqsm[n], n); + IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); + } + return 0; +} + +static void +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) +{ + struct ixgbe_stat_mapping_registers *stat_mappings = + IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + /* write whatever was in stat mapping table to the NIC */ + for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { + /* rx */ + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); + + /* tx */ + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); + } +} + +static void +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) +{ + uint8_t i; + struct ixgbe_dcb_tc_config *tc; + uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; + + dcb_config->num_tcs.pg_tcs = dcb_max_tc; + dcb_config->num_tcs.pfc_tcs = dcb_max_tc; + for (i = 0; i < dcb_max_tc; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); + tc->pfc = ixgbe_dcb_pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &dcb_config->tc_config[0]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { + dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; + dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; + } + dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; + dcb_config->pfc_mode_enable = false; + dcb_config->vt_mode = true; + dcb_config->round_robin_enable = false; + /* support all DCB capabilities in 82599 */ + dcb_config->support.capabilities = 0xFF; + + /*we only support 4 Tcs for X540, X550 */ + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + dcb_config->num_tcs.pg_tcs = 4; + dcb_config->num_tcs.pfc_tcs = 4; + } +} + +/* + * Ensure that all locks are released before first NVM or PHY access + */ +static void +ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) +{ + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. Release of common lock + * is done automatically by swfw_sync function. + */ + mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); + } + ixgbe_release_swfw_semaphore(hw, mask); + + /* + * These ones are more tricky since they are common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + } + ixgbe_release_swfw_semaphore(hw, mask); +} + +/* + * This function is based on code in ixgbe_attach() in base/ixgbe.c. + * It returns 0 on success. + */ +static int +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +{ + struct ixgbe_adapter *ad = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_bw_conf *bw_conf = + IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); + uint32_t ctrl_ext; + uint16_t csum; + int diag, i; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_dev_macsec_setting_reset(eth_dev); + + eth_dev->dev_ops = &ixgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX and TX function. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " + "Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + + return 0; + } + + rte_atomic32_clear(&ad->link_thread_running); + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->allow_unsupported_sfp = 1; + + /* Initialize the shared code (base driver) */ +#ifdef RTE_LIBRTE_IXGBE_BYPASS + diag = ixgbe_bypass_init_shared_code(hw); +#else + diag = ixgbe_init_shared_code(hw); +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); + return -EIO; + } + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + + /* pick up the PCI bus settings for reporting later */ + ixgbe_get_bus_info(hw); + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + if (ixgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + + /* Initialize DCB configuration*/ + memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); + ixgbe_dcb_init(hw, dcb_config); + /* Get Hardware Flow Control setting */ + hw->fc.requested_mode = ixgbe_fc_none; + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.pause_time = IXGBE_FC_PAUSE; + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + hw->fc.low_water[i] = IXGBE_FC_LO; + hw->fc.high_water[i] = IXGBE_FC_HI; + } + hw->fc.send_xon = 1; + + /* Make sure we have a good EEPROM before we read from it */ + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); + return -EIO; + } + +#ifdef RTE_LIBRTE_IXGBE_BYPASS + diag = ixgbe_bypass_init_hw(hw); +#else + diag = ixgbe_init_hw(hw); +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + + /* + * Devices with copper phys will fail to initialise if ixgbe_init_hw() + * is called too soon after the kernel driver unbinding/binding occurs. + * The failure occurs in ixgbe_identify_phy_generic() for all devices, + * but for non-copper devies, ixgbe_identify_sfp_module_generic() is + * also called. See ixgbe_identify_phy_82599(). The reason for the + * failure is not known, and only occuts when virtualisation features + * are disabled in the bios. A delay of 100ms was found to be enough by + * trial-and-error, and is doubled to be safe. + */ + if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { + rte_delay_ms(200); + diag = ixgbe_init_hw(hw); + } + + if (diag == IXGBE_ERR_SFP_NOT_PRESENT) + diag = IXGBE_SUCCESS; + + if (diag == IXGBE_ERR_EEPROM_VERSION) { + PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" + "LOM. Please be aware there may be issues associated " + "with your hardware."); + PMD_INIT_LOG(ERR, "If you are experiencing problems " + "please contact your Intel or hardware representative " + "who provided you with this hardware."); + } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); + return -EIO; + } + + /* Reset the hw statistics */ + ixgbe_dev_stats_reset(eth_dev); + + /* disable interrupt */ + ixgbe_disable_intr(hw); + + /* reset mappings for queue statistics hw counters*/ + ixgbe_reset_qstat_mappings(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + /* Allocate memory for storing hash filter MAC addresses */ + eth_dev->data->hash_mac_addrs = rte_zmalloc( + "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); + if (eth_dev->data->hash_mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + return -ENOMEM; + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* initialize the hw strip bitmap*/ + memset(hwstrip, 0, sizeof(*hwstrip)); + + /* initialize PF if max_vfs not zero */ + ixgbe_pf_host_init(eth_dev); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* let hardware know driver is loaded */ + ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", + (int) hw->mac.type, (int) hw->phy.type, + (int) hw->phy.sfp_type); + else + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", + (int) hw->mac.type, (int) hw->phy.type); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* enable support intr */ + ixgbe_enable_intr(eth_dev); + + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct ixgbe_filter_info)); + + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + + /* initialize flow director filter list & hash */ + ixgbe_fdir_filter_init(eth_dev); + + /* initialize l2 tunnel filter list & hash */ + ixgbe_l2_tn_filter_init(eth_dev); + + /* initialize flow filter lists */ + ixgbe_filterlist_init(); + + /* initialize bandwidth configuration info */ + memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); + + /* initialize Traffic Manager configuration */ + ixgbe_tm_conf_init(eth_dev); + + return 0; +} + +static int +eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + ixgbe_dev_close(eth_dev); + + return 0; +} + +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + struct ixgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + return 0; +} + +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } + + return 0; +} + +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = IXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(union ixgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", eth_dev->device->name); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_fdir_filter *) * + IXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; + + return 0; +} + +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = IXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct ixgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", eth_dev->device->name); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_l2_tn_filter *) * + IXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; + + return 0; +} +/* + * Negotiate mailbox API version with the PF. + * After reset API version is always set to the basic one (ixgbe_mbox_api_10). + * Then we try to negotiate starting with the most recent one. + * If all negotiation attempts fail, then we will proceed with + * the default one (ixgbe_mbox_api_10). + */ +static void +ixgbevf_negotiate_api(struct ixgbe_hw *hw) +{ + int32_t i; + + /* start with highest supported, proceed down */ + static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_13, + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + }; + + for (i = 0; + i != RTE_DIM(sup_ver) && + ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; + i++) + ; +} + +static void +generate_random_mac_addr(struct rte_ether_addr *mac_addr) +{ + uint64_t random; + + /* Set Organizationally Unique Identifier (OUI) prefix. */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; + /* Generate the last 3 bytes of the MAC address with a random number. */ + random = rte_rand(); + memcpy(&mac_addr->addr_bytes[3], &random, 3); +} + +static int +devarg_handle_int(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + uint16_t pflink_fullchk; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); + if (kvlist == NULL) + return; + + if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && + rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, + devarg_handle_int, &pflink_fullchk) == 0 && + pflink_fullchk == 1) + adapter->pflink_fullchk = 1; + + rte_kvargs_free(kvlist); +} + +/* + * Virtual Function device init + */ +static int +eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) +{ + int diag; + uint32_t tc, tcs; + struct ixgbe_adapter *ad = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &ixgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, + "No TX queues configured yet. Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + + return 0; + } + + rte_atomic32_clear(&ad->link_thread_running); + ixgbevf_parse_devargs(eth_dev->data->dev_private, + pci_dev->device.devargs); + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* initialize the hw strip bitmap*/ + memset(hwstrip, 0, sizeof(*hwstrip)); + + /* Initialize the shared code (base driver) */ + diag = ixgbe_init_shared_code(hw); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); + return -EIO; + } + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* Reset the hw statistics */ + ixgbevf_dev_stats_reset(eth_dev); + + /* Disable the interrupts for VF */ + ixgbevf_intr_disable(eth_dev); + + hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ + diag = hw->mac.ops.reset_hw(hw); + + /* + * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when + * the underlying PF driver has not assigned a MAC address to the VF. + * In this case, assign a random MAC address. + */ + if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + /* + * This error code will be propagated to the app by + * rte_eth_dev_reset, so use a public error code rather than + * the internal-only IXGBE_ERR_RESET_FAILED + */ + return -EAGAIN; + } + + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ + ixgbevf_get_queues(hw, &tcs, &tc); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* Generate a random MAC address, if none was assigned by PF. */ + if (rte_is_zero_ether_addr(perm_addr)) { + generate_random_mac_addr(perm_addr); + diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } + PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); + PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x", + perm_addr->addr_bytes[0], + perm_addr->addr_bytes[1], + perm_addr->addr_bytes[2], + perm_addr->addr_bytes[3], + perm_addr->addr_bytes[4], + perm_addr->addr_bytes[5]); + } + + /* Copy the permanent MAC address */ + rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + + /* reset the hardware with the new settings */ + diag = hw->mac.ops.start_hw(hw); + switch (diag) { + case 0: + break; + + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; + } + + rte_intr_callback_register(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + rte_intr_enable(intr_handle); + ixgbevf_intr_enable(eth_dev); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "ixgbe_mac_82599_vf"); + + return 0; +} + +/* Virtual Function device uninit */ + +static int +eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + ixgbevf_dev_close(eth_dev); + + return 0; +} + +static int +eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *pf_ethdev; + struct rte_eth_devargs eth_da; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } else + memset(ð_da, 0, sizeof(eth_da)); + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct ixgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_ixgbe_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (pf_ethdev == NULL) + return -ENODEV; + + /* probe VF representor ports */ + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct ixgbe_vf_info *vfinfo; + struct ixgbe_vf_representor representor; + + vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + pf_ethdev->data->dev_private); + if (vfinfo == NULL) { + PMD_DRV_LOG(ERR, + "no virtual functions supported by PF"); + break; + } + + representor.vf_id = eth_da.representor_ports[i]; + representor.switch_domain_id = vfinfo->switch_domain_id; + representor.pf_ethdev = pf_ethdev; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, + eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ixgbe_vf_representor), NULL, NULL, + ixgbe_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create ixgbe vf " + "representor %s.", name); + } + + return 0; +} + +static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return 0; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_pci_generic_remove(pci_dev, + ixgbe_vf_representor_uninit); + else + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_ixgbe_dev_uninit); +} + +static struct rte_pci_driver rte_ixgbe_pmd = { + .id_table = pci_id_ixgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_ixgbe_pci_probe, + .remove = eth_ixgbe_pci_remove, +}; + +static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); +} + +static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); +} + +/* + * virtual function driver struct + */ +static struct rte_pci_driver rte_ixgbevf_pmd = { + .id_table = pci_id_ixgbevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_ixgbevf_pci_probe, + .remove = eth_ixgbevf_pci_remove, +}; + +static int +ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + if (on) + ixgbe_vlan_hw_strip_enable(dev, queue); + else + ixgbe_vlan_hw_strip_disable(dev, queue); +} + +static int +ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; + + switch (vlan_type) { + case ETH_VLAN_TYPE_INNER: + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); + break; + } + + return ret; +} + +void +ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vlnctrl; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Disable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +void +ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vlnctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Enable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + + /* write whatever is in local vfta copy */ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); +} + +static void +ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) +{ + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; + + if (queue >= IXGBE_MAX_RX_QUEUE_NUM) + return; + + if (on) + IXGBE_SET_HWSTRIP(hwstrip, queue); + else + IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) { + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { + rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } +} + +static void +ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* No queue level support */ + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); + return; + } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); +} + +static void +ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* No queue level supported */ + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); + return; + } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); +} + +static void +ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + /* DMATXCTRL: Geric Double VLAN Disable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + ctrl &= ~IXGBE_DMATXCTL_GDV; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); + + /* CTRL_EXT: Global Double VLAN Disable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl &= ~IXGBE_EXTENDED_VLAN; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); + +} + +static void +ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + /* DMATXCTRL: Geric Double VLAN Enable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + ctrl |= IXGBE_DMATXCTL_GDV; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); + + /* CTRL_EXT: Global Double VLAN Enable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl |= IXGBE_EXTENDED_VLAN; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); + + /* Clear pooling mode of PFVTCTL. It's required by X550. */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + } + + /* + * VET EXT field in the EXVET register = 0x8100 by default + * So no need to change. Same to VT field of DMATXCTL register + */ +} + +void +ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t ctrl; + uint16_t i; + struct ixgbe_rx_queue *rxq; + bool on; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } else { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + } else { + /* + * Other 10G NIC, the VLAN strip can be setup + * per queue in RXDCTL + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl |= IXGBE_RXDCTL_VME; + on = TRUE; + } else { + ctrl &= ~IXGBE_RXDCTL_VME; + on = FALSE; + } + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); + } + } +} + +static void +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) +{ + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + + if (mask & ETH_VLAN_STRIP_MASK) { + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + +static int +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_STRIP_MASK) { + ixgbe_vlan_hw_strip_config(dev); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + ixgbe_vlan_hw_filter_enable(dev); + else + ixgbe_vlan_hw_filter_disable(dev); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + ixgbe_vlan_hw_extend_enable(dev); + else + ixgbe_vlan_hw_extend_disable(dev); + } + + return 0; +} + +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; +} + +static void +ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); +} + +static int +ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + switch (nb_rx_q) { + case 1: + case 2: + RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + break; + case 4: + RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + break; + default: + return -EINVAL; + } + + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + return 0; +} + +static int +ixgbe_check_mq_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV active," + " unsupported mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) + if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " invalid queue number" + " for VMDQ RSS, allowed" + " value are 1, 2 or 4."); + return -EINVAL; + } + break; + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_NONE: + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + break; + default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return -EINVAL; + } + } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } + /* check configuration for vmdb+dcb mode */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools must be %d or %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools != %d and" + " nb_queue_pools != %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + conf = &dev_conf->rx_adv_conf.dcb_rx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + conf = &dev_conf->tx_adv_conf.dcb_tx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) { + if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + IXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } + } + return 0; +} + +static int +ixgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multipe queue mode checking */ + ret = ixgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + + return 0; +} + +static void +ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t gpie; + + /* only set up it on X550EM_X */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN_X550EM_x; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) + intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; + } +} + +int +ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_link link; + uint8_t nb_q_per_pool; + uint32_t queue_stride; + uint32_t queue_idx, idx = 0, vf_idx; + uint32_t queue_end; + uint16_t total_rate = 0; + struct rte_pci_device *pci_dev; + int ret; + + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + ret = rte_eth_link_get_nowait(dev->data->port_id, &link); + if (ret < 0) + return ret; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (tx_rate > link.link_speed) + return -EINVAL; + + if (q_msk == 0) + return 0; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + queue_idx = vf * queue_stride; + queue_end = queue_idx + nb_q_per_pool - 1; + if (queue_end >= hw->mac.max_tx_queues) + return -EINVAL; + + if (vfinfo) { + for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { + if (vf_idx == vf) + continue; + for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); + idx++) + total_rate += vfinfo[vf_idx].tx_rate[idx]; + } + } else { + return -EINVAL; + } + + /* Store tx_rate for this vf. */ + for (idx = 0; idx < nb_q_per_pool; idx++) { + if (((uint64_t)0x1 << idx) & q_msk) { + if (vfinfo[vf].tx_rate[idx] != tx_rate) + vfinfo[vf].tx_rate[idx] = tx_rate; + total_rate += tx_rate; + } + } + + if (total_rate > dev->data->dev_link.link_speed) { + /* Reset stored TX rate of the VF if it causes exceed + * link speed. + */ + memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + return -EINVAL; + } + + /* Set RTTBCNRC of each queue/pool for vf X */ + for (; queue_idx <= queue_end; queue_idx++) { + if (0x1 & q_msk) + ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); + q_msk = q_msk >> 1; + } + + return 0; +} + +static int +ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = dev->data->dev_private; + int err; + uint32_t mflcn; + + ixgbe_setup_fc(hw); + + err = ixgbe_fc_enable(hw); + + /* Not negotiated is not an error case */ + if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { + /* + *check if we want to forward MAC frames - driver doesn't + *have native capability to do that, + *so we'll write the registers ourselves + */ + + mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (adapter->mac_ctrl_frame_fwd != 0) + mflcn |= IXGBE_MFLCN_PMCF; + else + mflcn &= ~IXGBE_MFLCN_PMCF; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); + IXGBE_WRITE_FLUSH(hw); + + return 0; + } + return err; +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ixgbe_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + int err; + bool link_up = false, negotiate = 0; + uint32_t speed = 0; + uint32_t allowed_speeds = 0; + int mask = 0; + int status; + uint16_t vf, idx; + uint32_t *link_speeds; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_macsec_setting *macsec_setting = + IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Stop the link setup handler before resetting the HW. */ + ixgbe_dev_wait_setup_link_complete(dev, 0); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* stop adapter */ + hw->adapter_stopped = 0; + ixgbe_stop_adapter(hw); + + /* reinitialize adapter + * this calls reset and start + */ + status = ixgbe_pf_reset_hw(hw); + if (status != 0) + return -1; + hw->mac.ops.start_hw(hw); + hw->mac.get_link_status = true; + + /* configure PF module if SRIOV enabled */ + ixgbe_pf_host_configure(dev); + + ixgbe_dev_phy_intr_setup(dev); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + IXGBE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* confiugre msix for sleep until rx interrupt */ + ixgbe_configure_msix(dev); + + /* initialize transmission unit */ + ixgbe_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = ixgbe_dev_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + goto error; + } + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + err = ixgbe_vlan_offload_config(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + ixgbe_set_vf_rate_limit( + dev, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + + err = ixgbe_flow_ctrl_enable(dev, hw); + if (err < 0) { + PMD_INIT_LOG(ERR, "enable flow ctrl err"); + goto error; + } + + err = ixgbe_dev_rxtx_start(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); + goto error; + } + + /* Skip link setup if loopback mode is enabled. */ + if (dev->data->dev_conf.lpbk_mode != 0) { + err = ixgbe_check_supported_loopback_mode(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + goto error; + } else { + goto skip_link_setup; + } + } + + if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { + err = hw->mac.ops.setup_sfp(hw); + if (err) + goto error; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + } + + err = ixgbe_check_link(hw, &speed, &link_up, 0); + if (err) + goto error; + dev->data->dev_link.link_status = link_up; + + err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); + if (err) + goto error; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + allowed_speeds = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + break; + default: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + } + + link_speeds = &dev->data->dev_conf.link_speeds; + + /* Ignore autoneg flag bit and check the validity of  + * link_speed  + */ + if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { + PMD_INIT_LOG(ERR, "Invalid link setting"); + goto error; + } + + speed = 0x0; + if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + speed = IXGBE_LINK_SPEED_82598_AUTONEG; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + speed = IXGBE_LINK_SPEED_X550_AUTONEG; + break; + default: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + } + } else { + if (*link_speeds & ETH_LINK_SPEED_10G) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_5G) + speed |= IXGBE_LINK_SPEED_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_2_5G) + speed |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_1G) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_100M) + speed |= IXGBE_LINK_SPEED_100_FULL; + if (*link_speeds & ETH_LINK_SPEED_10M) + speed |= IXGBE_LINK_SPEED_10_FULL; + } + + err = ixgbe_setup_link(hw, speed, link_up); + if (err) + goto error; + +skip_link_setup: + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + ixgbe_dev_lsc_interrupt_setup(dev, TRUE); + else + ixgbe_dev_lsc_interrupt_setup(dev, FALSE); + ixgbe_dev_macsec_interrupt_setup(dev); + } else { + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex"); + } + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + ixgbe_dev_rxq_interrupt_setup(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + ixgbe_enable_intr(dev); + ixgbe_l2_tunnel_conf(dev); + ixgbe_filter_restore(dev); + + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + + /* wait for the controller to acquire link */ + err = ixgbe_wait_for_link_up(hw); + if (err) + goto error; + + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbe_dev_link_update(dev, 0); + + /* setup the macsec setting register */ + if (macsec_setting->offload_en) + ixgbe_dev_macsec_register_enable(dev, macsec_setting); + + return 0; + +error: + PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); + ixgbe_dev_clear_queues(dev); + return -EIO; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +ixgbe_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int vf; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + + if (hw->adapter_stopped) + return; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_dev_wait_setup_link_complete(dev, 0); + + /* disable interrupts */ + ixgbe_disable_intr(hw); + + /* reset the NIC */ + ixgbe_pf_reset_hw(hw); + hw->adapter_stopped = 0; + + /* stop adapter */ + ixgbe_stop_adapter(hw); + + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + } + + ixgbe_dev_clear_queues(dev); + + /* Clear stored conf */ + dev->data->scattered_rx = 0; + dev->data->lro = 0; + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + /* reset hierarchy commit */ + tm_conf->committed = false; + + adapter->rss_reta_updated = 0; + + adapter->mac_ctrl_frame_fwd = 0; + + hw->adapter_stopped = true; +} + +/* + * Set device link up: enable tx. + */ +static int +ixgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_LIBRTE_IXGBE_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link up is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + ixgbe_dev_link_update(dev, 0); + } + + return 0; +} + +/* + * Set device link down: disable tx. + */ +static int +ixgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_LIBRTE_IXGBE_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link down is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + ixgbe_dev_link_update(dev, 0); + } + + return 0; +} + +/* + * Reset and stop device. + */ +static void +ixgbe_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int retries = 0; + int ret; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_pf_reset_hw(hw); + + ixgbe_dev_stop(dev); + + ixgbe_dev_free_queues(dev); + + ixgbe_disable_pcie_master(hw); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); + + /* cancel the delay handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); + + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(dev); + + /* remove all the fdir filters & hash */ + ixgbe_fdir_filter_uninit(dev); + + /* remove all the L2 tunnel filters & hash */ + ixgbe_l2_tn_filter_uninit(dev); + + /* Remove all ntuple filters of the device */ + ixgbe_ntuple_filter_uninit(dev); + + /* clear all the filters list */ + ixgbe_filterlist_flush(); + + /* Remove all Traffic Manager configuration */ + ixgbe_tm_conf_uninit(dev); + +#ifdef RTE_LIBRTE_SECURITY + rte_free(dev->security_ctx); +#endif + +} + +/* + * Reset PF device. + */ +static int +ixgbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to ixgbe PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_ixgbe_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbe_dev_init(dev, NULL); + + return ret; +} + +static void +ixgbe_read_stats_registers(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *hw_stats, + struct ixgbe_macsec_stats *macsec_stats, + uint64_t *total_missed_rx, uint64_t *total_qbrc, + uint64_t *total_qprc, uint64_t *total_qprdc) +{ + uint32_t bprc, lxon, lxoff, total; + uint32_t delta_gprc = 0; + unsigned i; + /* Workaround for RX byte count not including CRC bytes when CRC + * strip is enabled. CRC bytes are removed from counters when crc_strip + * is disabled. + */ + int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & + IXGBE_HLREG0_RXCRCSTRP); + + hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); + hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); + hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); + + for (i = 0; i < 8; i++) { + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + /* global total per queue */ + hw_stats->mpc[i] += mp; + /* Running comprehensive total for stats display */ + *total_missed_rx += hw_stats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) { + hw_stats->rnbc[i] += + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } else { + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + hw_stats->pxon2offc[i] += + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + } + hw_stats->pxontxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hw_stats->pxofftxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + } + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + + delta_gprc += delta_qprc; + + hw_stats->qprc[i] += delta_qprc; + hw_stats->qptc[i] += delta_qptc; + + hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + hw_stats->qbrc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); + if (crc_strip == 0) + hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; + + hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + hw_stats->qbtc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); + + hw_stats->qprdc[i] += delta_qprdc; + *total_qprdc += hw_stats->qprdc[i]; + + *total_qprc += hw_stats->qprc[i]; + *total_qbrc += hw_stats->qbrc[i]; + } + hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); + hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); + hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + + /* + * An errata states that gprc actually counts good + missed packets: + * Workaround to set gprc to summated queue packet receives + */ + hw_stats->gprc = *total_qprc; + + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + /* 82598 only has a counter in the high register */ + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + } + uint64_t old_tpr = hw_stats->tpr; + + hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + + if (crc_strip == 0) + hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; + + uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); + hw_stats->gptc += delta_gptc; + hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; + + /* + * Workaround: mprc hardware is incorrectly counting + * broadcasts, so for now we subtract those. + */ + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hw_stats->bprc += bprc; + hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hw_stats->mprc -= bprc; + + hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hw_stats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hw_stats->lxofftxc += lxoff; + total = lxon + lxoff; + + hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hw_stats->gptc -= total; + hw_stats->mptc -= total; + hw_stats->ptc64 -= total; + hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; + + hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); + hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); + hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); + hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); + hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + /* Only read FCOE on 82599 */ + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + } + + /* Flow Director Stats registers */ + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hw_stats->fdirustat_add += IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) & 0xFFFF; + hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; + hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) & 0xFFFF; + hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; + } + /* MACsec Stats registers */ + macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); + macsec_stats->out_pkts_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); + macsec_stats->out_pkts_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); + macsec_stats->out_octets_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); + macsec_stats->out_octets_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); + macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); + macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); + macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); + macsec_stats->in_pkts_unknownsci += + IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); + macsec_stats->in_octets_decrypted += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); + macsec_stats->in_octets_validated += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); + macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); + macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); + macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); + for (i = 0; i < 2; i++) { + macsec_stats->in_pkts_ok += + IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); + macsec_stats->in_pkts_invalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); + macsec_stats->in_pkts_notvalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); + } + macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); + macsec_stats->in_pkts_notusingsa += + IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); +} + +/* + * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c + */ +static int +ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); + + if (stats == NULL) + return -EINVAL; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = total_qprc; + stats->ibytes = total_qbrc; + stats->opackets = hw_stats->gptc; + stats->obytes = hw_stats->gotc; + + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + stats->q_ipackets[i] = hw_stats->qprc[i]; + stats->q_opackets[i] = hw_stats->qptc[i]; + stats->q_ibytes[i] = hw_stats->qbrc[i]; + stats->q_obytes[i] = hw_stats->qbtc[i]; + stats->q_errors[i] = hw_stats->qprdc[i]; + } + + /* Rx Errors */ + stats->imissed = total_missed_rx; + stats->ierrors = hw_stats->crcerrs + + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; + + /* Tx Errors */ + stats->oerrors = 0; + return 0; +} + +static int +ixgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + ixgbe_dev_stats_get(dev, NULL); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); + + return 0; +} + +/* This function calculates the number of xstats based on the current config */ +static unsigned +ixgbe_xstats_calc_num(void) { + return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + if (!ids) { + const unsigned int cnt_stats = ixgbe_xstats_calc_num(); + unsigned int stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; + } + + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + struct rte_eth_xstat_name xstats_names_copy[size]; + + ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + size); + + for (i = 0; i < limit; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return limit; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + strlcpy(xstats_names[i].name, + rte_ixgbevf_stats_strings[i].name, + sizeof(xstats_names[i].name)); + return IXGBEVF_NB_XSTATS; +} + +static int +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + return count; +} + +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + if (!ids) { + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS( + dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned int i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (!ids && n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, + &total_missed_rx, &total_qbrc, &total_qprc, + &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!ids && !values) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + values[count] = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + values[count] = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + return count; + } + + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + uint64_t values_copy[size]; + + ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); + + for (i = 0; i < n; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + + unsigned count = ixgbe_xstats_calc_num(); + + /* HW registers are cleared on read */ + ixgbe_dev_xstats_get(dev, NULL, count); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); + memset(macsec_stats, 0, sizeof(*macsec_stats)); + + return 0; +} + +static void +ixgbevf_update_stats(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Good Rx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPRC, + hw_stats->last_vfgprc, hw_stats->vfgprc); + + /* Good Rx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + hw_stats->last_vfgorc, hw_stats->vfgorc); + + /* Good Tx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPTC, + hw_stats->last_vfgptc, hw_stats->vfgptc); + + /* Good Tx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + hw_stats->last_vfgotc, hw_stats->vfgotc); + + /* Rx Multicst Packet */ + UPDATE_VF_STAT(IXGBE_VFMPRC, + hw_stats->last_vfmprc, hw_stats->vfmprc); +} + +static int +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IXGBEVF_NB_XSTATS) + return IXGBEVF_NB_XSTATS; + + ixgbevf_update_stats(dev); + + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbevf_stats_strings[i].offset); + } + + return IXGBEVF_NB_XSTATS; +} + +static int +ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + ixgbevf_update_stats(dev); + + if (stats == NULL) + return -EINVAL; + + stats->ipackets = hw_stats->vfgprc; + stats->ibytes = hw_stats->vfgorc; + stats->opackets = hw_stats->vfgptc; + stats->obytes = hw_stats->vfgotc; + return 0; +} + +static int +ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Sync HW register to the last stats */ + ixgbevf_dev_stats_get(dev, NULL); + + /* reset HW current stats*/ + hw_stats->vfgprc = 0; + hw_stats->vfgorc = 0; + hw_stats->vfgptc = 0; + hw_stats->vfgotc = 0; + + return 0; +} + +static int +ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); + ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); + + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static int +ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) + dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; + } + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->vmdq_queue_num = dev_info->max_rx_queues; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + dev_info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550_vf) { + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + } + if (hw->mac.type == ixgbe_mac_X550) { + dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= ETH_LINK_SPEED_5G; + } + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + + return 0; +} + +static const uint32_t * +ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* For non-vec functions, + * refers to ixgbe_rxd_pkt_info_to_pkt_type(); + * for vec functions, + * refers to _recv_raw_pkts_vec(). + */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == ixgbe_recv_pkts || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) + return ptypes; + +#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON) + if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) + return ptypes; +#endif + return NULL; +} + +static int +ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ + dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + return 0; +} + +static int +ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, int wait_to_complete) +{ + struct ixgbe_adapter *adapter = container_of(hw, + struct ixgbe_adapter, hw); + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + uint32_t links_reg, in_msg; + int ret_val = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { + int i; + + for (i = 0; i < 5; i++) { + rte_delay_us(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + mac->get_link_status = false; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/* + * If @timeout_ms was 0, it means that it will not return until link complete. + * It returns 1 on complete, return 0 on timeout. + */ +static int +ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) +{ +#define WARNING_TIMEOUT 9000 /* 9s in total */ + struct ixgbe_adapter *ad = dev->data->dev_private; + uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; + + while (rte_atomic32_read(&ad->link_thread_running)) { + msec_delay(1); + timeout--; + + if (timeout_ms) { + if (!timeout) + return 0; + } else if (!timeout) { + /* It will not return until link complete */ + timeout = WARNING_TIMEOUT; + PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); + } + } + + return 1; +} + +static void * +ixgbe_dev_setup_link_thread_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_adapter *ad = dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + u32 speed; + bool autoneg = false; + + pthread_detach(pthread_self()); + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + + ixgbe_setup_link(hw, speed, true); + + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + rte_atomic32_clear(&ad->link_thread_running); + return NULL; +} + +/* + * In freebsd environment, nic_uio drivers do not support interrupts, + * rte_intr_callback_register() will fail to register interrupts. + * We can not make link status to change from down to up by interrupt + * callback. So we need to wait for the controller to acquire link + * when ports start. + * It returns 0 on link up. + */ +static int +ixgbe_wait_for_link_up(struct ixgbe_hw *hw) +{ +#ifdef RTE_EXEC_ENV_FREEBSD + int err, i; + bool link_up = false; + uint32_t speed = 0; + const int nb_iter = 25; + + for (i = 0; i < nb_iter; i++) { + err = ixgbe_check_link(hw, &speed, &link_up, 0); + if (err) + return err; + if (link_up) + return 0; + msec_delay(200); + } + + return 0; +#else + RTE_SET_USED(hw); + return 0; +#endif +} + +/* return 0 means link status changed, -1 means not changed */ +int +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *ad = dev->data->dev_private; + struct rte_eth_link link; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + bool link_up; + int diag; + int wait = 1; + u32 esdp_reg; + + memset(&link, 0, sizeof(link)); + link.link_status = ETH_LINK_DOWN; + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + + hw->mac.get_link_status = true; + + if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) + return rte_eth_linkstatus_set(dev, &link); + + /* check if it needs to wait to complete, if lsc interrupt is enabled */ + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + wait = 0; + +/* BSD has no interrupt mechanism, so force NIC status synchronization. */ +#ifdef RTE_EXEC_ENV_FREEBSD + wait = 1; +#endif + + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); + else + diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); + + if (diag != 0) { + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + return rte_eth_linkstatus_set(dev, &link); + } + + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + if ((esdp_reg & IXGBE_ESDP_SDP3)) + link_up = 0; + } + + if (link_up == 0) { + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + ixgbe_dev_wait_setup_link_complete(dev, 0); + if (rte_atomic32_test_and_set(&ad->link_thread_running)) { + /* To avoid race condition between threads, set + * the IXGBE_FLAG_NEED_LINK_CONFIG flag only + * when there is no link thread running. + */ + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + if (rte_ctrl_thread_create(&ad->link_thread_tid, + "ixgbe-link-handler", + NULL, + ixgbe_dev_setup_link_thread_handler, + dev) < 0) { + PMD_DRV_LOG(ERR, + "Create link thread failed!"); + rte_atomic32_clear(&ad->link_thread_running); + } + } else { + PMD_DRV_LOG(ERR, + "Other link thread is running now!"); + } + } + return rte_eth_linkstatus_set(dev, &link); + } + + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (link_speed) { + default: + case IXGBE_LINK_SPEED_UNKNOWN: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + link.link_speed = ETH_SPEED_NUM_10M; + else + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case IXGBE_LINK_SPEED_100_FULL: + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case IXGBE_LINK_SPEED_1GB_FULL: + link.link_speed = ETH_SPEED_NUM_1G; + break; + + case IXGBE_LINK_SPEED_2_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + + case IXGBE_LINK_SPEED_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_5G; + break; + + case IXGBE_LINK_SPEED_10GB_FULL: + link.link_speed = ETH_SPEED_NUM_10G; + break; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); +} + +static int +ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); +} + +static int +ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; +} + +static int +ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_UPE); + if (dev->data->all_multicast == 1) + fctrl |= IXGBE_FCTRL_MPE; + else + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; +} + +static int +ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; +} + +static int +ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + if (dev->data->promiscuous == 1) + return 0; /* must remain in all_multicast mode */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + ixgbe_dev_link_status_print(dev); + if (on) + intr->mask |= IXGBE_EICR_LSC; + else + intr->mask &= ~IXGBE_EICR_LSC; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_RTX_QUEUE; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_LINKSEC; + + return 0; +} + +/* + * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* clear all cause mask */ + ixgbe_disable_intr(hw); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); + + intr->flags = 0; + + /* set flag for async link update */ + if (eicr & IXGBE_EICR_LSC) + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + if (eicr & IXGBE_EICR_MAILBOX) + intr->flags |= IXGBE_FLAG_MAILBOX; + + if (eicr & IXGBE_EICR_LINKSEC) + intr->flags |= IXGBE_FLAG_MACSEC; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) + intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; + + return 0; +} + +/** + * It gets and then prints the link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static void +ixgbe_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + + rte_eth_linkstatus_get(dev, &link); + + if (link.link_status) { + PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + (int)(dev->data->port_id), + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + (int)(dev->data->port_id)); + } + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); +} + +/* + * It executes link_update after knowing an interrupt occurred. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int64_t timeout; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbe_pf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + + /* get the link status before link update, for predicting later */ + rte_eth_linkstatus_get(dev, &link); + + ixgbe_dev_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); + if (rte_eal_alarm_set(timeout * 1000, + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) + PMD_DRV_LOG(ERR, "Error setting alarm"); + else { + /* remember original mask */ + intr->mask_original = intr->mask; + /* only disable lsc interrupt */ + intr->mask &= ~IXGBE_EIMS_LSC; + } + } + + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); + + return 0; +} + +/** + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the + * NIC interrupt state is not stable for ixgbe after link is just down, + * it needs to wait 4 seconds to get the stable status. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t eicr; + + ixgbe_disable_intr(hw); + + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_pf_mbx_process(dev); + + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + ixgbe_dev_link_update(dev, 0); + intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + ixgbe_dev_link_status_print(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (intr->flags & IXGBE_FLAG_MACSEC) { + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~IXGBE_FLAG_MACSEC; + } + + /* restore original mask */ + intr->mask = intr->mask_original; + intr->mask_original = 0; + + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); + ixgbe_enable_intr(dev); + rte_intr_ack(intr_handle); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbe_dev_interrupt_get_status(dev); + ixgbe_dev_interrupt_action(dev); +} + +static int +ixgbe_dev_led_on(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; +} + +static int +ixgbe_dev_led_off(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; +} + +static int +ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + uint32_t mflcn_reg; + uint32_t fccfg_reg; + int rx_pause; + int tx_pause; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water[0]; + fc_conf->low_water = hw->fc.low_water[0]; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = !hw->fc.disable_fc_autoneg; + + /* + * Return rx_pause status according to actual setting of + * MFLCN register. + */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) + rx_pause = 1; + else + rx_pause = 0; + + /* + * Return tx_pause status according to actual setting of + * FCCFG register. + */ + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) + tx_pause = 1; + else + tx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = dev->data->dev_private; + int err; + uint32_t rx_buf_size; + uint32_t max_high_water; + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { + ixgbe_fc_none, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full + }; + + PMD_INIT_FUNC_TRACE(); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* + * At least reserve one Ethernet frame for watermark + * high_water/low_water in kilo bytes for ixgbe + */ + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water[0] = fc_conf->high_water; + hw->fc.low_water[0] = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + hw->fc.disable_fc_autoneg = !fc_conf->autoneg; + adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; + + err = ixgbe_flow_ctrl_enable(dev, hw); + if (err < 0) { + PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); + return -EIO; + } + return err; +} + +/** + * ixgbe_pfc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * @tc_num: traffic class number + * Enable flow control according to the current settings. + */ +static int +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) +{ + int ret_val = 0; + uint32_t mflcn_reg, fccfg_reg; + uint32_t reg; + uint32_t fcrtl, fcrth; + uint8_t i; + uint8_t nb_rx_en; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + /* High/Low water can not be 0 */ + if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * If the count of enabled RX Priority Flow control >1, + * and the TX pause can not be disabled + */ + nb_rx_en = 0; + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + if (reg & IXGBE_FCRTH_FCEN) + nb_rx_en++; + } + if (nb_rx_en > 1) + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RPFCE; + /* + * If the count of enabled RX Priority Flow control >1, + * and the TX pause can not be disabled + */ + nb_rx_en = 0; + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + if (reg & IXGBE_FCRTH_FCEN) + nb_rx_en++; + } + if (nb_rx_en > 1) + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RPFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; + break; + default: + PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[tc_num]) { + fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); + fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the maximum FCRTH value. This allows the Tx + * switch to function even under heavy Rx workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +static int +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret_val = IXGBE_NOT_IMPLEMENTED; + + if (hw->mac.type != ixgbe_mac_82598EB) { + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); + } + return ret_val; +} + +static int +ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) +{ + int err; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint8_t tc_num; + uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { + ixgbe_fc_none, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full + }; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); + tc_num = map[pfc_conf->priority]; + rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + /* + * At least reserve one Ethernet frame for watermark + * high_water/low_water in kilo bytes for ixgbe + */ + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + if ((pfc_conf->fc.high_water > max_high_water) || + (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; + hw->fc.pause_time = pfc_conf->fc.pause_time; + hw->fc.send_xon = pfc_conf->fc.send_xon; + hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; + hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; + + err = ixgbe_dcb_pfc_enable(dev, tc_num); + + /* Not negotiated is not an error case */ + if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) + return 0; + + PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); + return -EIO; +} + +static int +ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint16_t i, sp_reta_size; + uint8_t j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " + "NIC."); + return -ENOTSUP; + } + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, sp_reta_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (mask == IXGBE_4_BIT_MASK) + r = 0; + else + r = IXGBE_READ_REG(hw, reta_reg); + for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IXGBE_8_BIT_MASK << + (CHAR_BIT * j)); + } + IXGBE_WRITE_REG(hw, reta_reg, reta); + } + adapter->rss_reta_updated = 1; + + return 0; +} + +static int +ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint16_t i, sp_reta_size; + uint8_t j, mask; + uint32_t reta; + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, sp_reta_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + reta = IXGBE_READ_REG(hw, reta_reg); + for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IXGBE_8_BIT_MASK); + } + } + + return 0; +} + +static int +ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t enable_addr = 1; + + return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, + pool, enable_addr); +} + +static void +ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbe_clear_rar(hw, index); +} + +static int +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + ixgbe_remove_rar(dev, 0); + ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); + + return 0; +} + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; +} + +bool +is_ixgbe_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_ixgbe_pmd); +} + +static int +ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t hlreg0; + uint32_t maxfrs; + struct ixgbe_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; + int ret; + + ret = ixgbe_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) + return -EINVAL; + + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && + (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); + return -EINVAL; + } + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + + return 0; +} + +/* + * Virtual Function operations + */ +static void +ixgbevf_intr_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + IXGBE_WRITE_FLUSH(hw); + + /* Clear mask value. */ + intr->mask = 0; +} + +static void +ixgbevf_intr_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* VF enable interrupt autoclean */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); + + IXGBE_WRITE_FLUSH(hw); + + /* Save IXGBE_VTEIMS value to mask. */ + intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; +} + +static int +ixgbevf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct ixgbe_adapter *adapter = dev->data->dev_private; + + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ +#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; + } +#else + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { + PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); + conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + } +#endif + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + + return 0; +} + +static int +ixgbevf_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t intr_vector = 0; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + int err, mask = 0; + + PMD_INIT_FUNC_TRACE(); + + /* Stop the link setup handler before resetting the HW. */ + ixgbe_dev_wait_setup_link_complete(dev, 0); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } + hw->mac.get_link_status = true; + + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + ixgbevf_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = ixgbevf_dev_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } + + /* Set vfta */ + ixgbevf_set_vfta_all(dev, 1); + + /* Set HW strip */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + err = ixgbevf_vlan_offload_config(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } + + ixgbevf_dev_rxtx_start(dev); + + /* check and configure queue intr-vector mapping */ + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + /* According to datasheet, only vector 0/1/2 can be used, + * now only one vector is used for Rx queue + */ + intr_vector = 1; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + ixgbevf_configure_msix(dev); + + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + + rte_intr_enable(intr_handle); + + /* Re-enable interrupt for VF */ + ixgbevf_intr_enable(dev); + + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbevf_dev_link_update(dev, 0); + + hw->adapter_stopped = false; + + return 0; +} + +static void +ixgbevf_dev_stop(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (hw->adapter_stopped) + return; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_dev_wait_setup_link_complete(dev, 0); + + ixgbevf_intr_disable(dev); + + hw->adapter_stopped = 1; + ixgbe_stop_adapter(hw); + + /* + * Clear what we set, but we still keep shadow_vfta to + * restore after device starts + */ + ixgbevf_set_vfta_all(dev, 0); + + /* Clear stored conf */ + dev->data->scattered_rx = 0; + + ixgbe_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + adapter->rss_reta_updated = 0; +} + +static void +ixgbevf_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_reset_hw(hw); + + ixgbevf_dev_stop(dev); + + ixgbe_dev_free_queues(dev); + + /** + * Remove the VF MAC address ro ensure + * that the VF traffic goes to the PF + * after stop, close and detach of the VF + **/ + ixgbevf_remove_mac_addr(dev, 0); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, dev); +} + +/* + * Reset VF device + */ +static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ixgbevf_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbevf_dev_init(dev); + + return ret; +} + +static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + int i = 0, j = 0, vfta = 0, mask = 1; + + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { + vfta = shadow_vfta->vfta[i]; + if (vfta) { + mask = 1; + for (j = 0; j < 32; j++) { + if (vfta & mask) + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; + } + } + } + +} + +static int +ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta *shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vid_idx = 0; + uint32_t vid_bit = 0; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to set VF vlan"); + return ret; + } + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + + /* Save what we set and retore it after device reset */ + if (on) + shadow_vfta->vfta[vid_idx] |= vid_bit; + else + shadow_vfta->vfta[vid_idx] &= ~vid_bit; + + return 0; +} + +static void +ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (queue >= hw->mac.max_rx_queues) + return; + + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + if (on) + ctrl |= IXGBE_RXDCTL_VME; + else + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); +} + +static int +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct ixgbe_rx_queue *rxq; + uint16_t i; + int on = 0; + + /* VF function only support hw strip feature, others are not support */ + if (mask & ETH_VLAN_STRIP_MASK) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + ixgbevf_vlan_strip_queue_set(dev, i, on); + } + } + + return 0; +} + +static int +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + +int +ixgbe_vt_check(struct ixgbe_hw *hw) +{ + uint32_t reg_val; + + /* if Virtualization Technology is enabled */ + reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { + PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); + return -1; + } + + return 0; +} + +static uint32_t +ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) +{ + uint32_t vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 4) | + (((uint16_t)uc_addr->addr_bytes[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 3) | + (((uint16_t)uc_addr->addr_bytes[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 2) | + (((uint16_t)uc_addr->addr_bytes[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((uc_addr->addr_bytes[4]) | + (((uint16_t)uc_addr->addr_bytes[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static int +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t on) +{ + uint32_t vector; + uint32_t uta_idx; + uint32_t reg_val; + uint32_t uta_shift; + uint32_t rc; + const uint32_t ixgbe_uta_idx_mask = 0x7F; + const uint32_t ixgbe_uta_bit_shift = 5; + const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; + const uint32_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return -ENOTSUP; + + vector = ixgbe_uta_vector(hw, mac_addr); + uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; + uta_shift = vector & ixgbe_uta_bit_mask; + + rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); + if (rc == on) + return 0; + + reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); + if (on) { + uta_info->uta_in_use++; + reg_val |= (bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); + } else { + uta_info->uta_in_use--; + reg_val &= ~(bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); + } + + IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); + + if (uta_info->uta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + else + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return 0; +} + +static int +ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) +{ + int i; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return -ENOTSUP; + + if (on) { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = ~0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); + } + } else { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = 0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + } + } + return 0; + +} + +uint32_t +ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) +{ + uint32_t new_val = orig_val; + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + new_val |= IXGBE_VMOLR_AUPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + new_val |= IXGBE_VMOLR_ROMPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + new_val |= IXGBE_VMOLR_ROPE; + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + new_val |= IXGBE_VMOLR_BAM; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + new_val |= IXGBE_VMOLR_MPE; + + return new_val; +} + +#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ +#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ +#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ +#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ +#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ + ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ + ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) + +static int +ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) +{ + uint32_t mr_ctl, vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; + + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask = 0x0F; + + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mirror_type = 0; + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + + if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { + PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", + mirror_conf->rule_type); + return -EINVAL; + } + + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + mirror_type |= IXGBE_MRCTL_VLME; + /* Check if vlan id is valid and find conresponding VLAN ID + * index in VLVF + */ + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter + * index + */ + reg_index = ixgbe_find_vlvf_slot( + hw, + mirror_conf->vlan.vlan_id[i], + false); + if (reg_index < 0) + return -EINVAL; + vlvf = IXGBE_READ_REG(hw, + IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) == + mirror_conf->vlan.vlan_id[i])) + vlan_mask |= (1ULL << reg_index); + else + return -EINVAL; + } + } + + if (on) { + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; + + mr_info->mr_conf[rule_id].vlan.vlan_mask = + mirror_conf->vlan.vlan_mask; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = + mirror_conf->vlan.vlan_id[i]; + } + } else { + mv_lsb = 0; + mv_msb = 0; + mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; + } + } + + /** + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + mirror_type |= IXGBE_MRCTL_VPME; + if (on) { + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + mr_info->mr_conf[rule_id].pool_mask = + mirror_conf->pool_mask; + + } else { + mp_lsb = 0; + mp_msb = 0; + mr_info->mr_conf[rule_id].pool_mask = 0; + } + } + if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) + mirror_type |= IXGBE_MRCTL_UPME; + if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) + mirror_type |= IXGBE_MRCTL_DPME; + + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); + + if (on) { + mr_ctl |= mirror_type; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + } else { + mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); + } + + mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; + mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); + } + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + + return 0; +} + +static int +ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +{ + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + + memset(&mr_info->mr_conf[rule_id], 0, + sizeof(struct rte_eth_mirror_conf)); + + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = IXGBE_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + intr->mask |= (1 << vec); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); + + rte_intr_ack(intr_handle); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IXGBE_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + intr->mask &= ~(1 << vec); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask |= (1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= (1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= (1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + rte_intr_ack(intr_handle); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask &= ~(1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= ~(1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= ~(1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + + return 0; +} + +static void +ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + tmp &= ~0xFF; + tmp |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); + } else { + /* rx or tx cause */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); + } +} + +/** + * set the IVAR registers, mapping interrupt causes to vectors + * @param hw + * pointer to ixgbe_hw struct + * @direction + * 0 for Rx, 1 for Tx, -1 for other causes + * @queue + * queue to map the corresponding interrupt to + * @msix_vector + * the vector to map to the corresponding queue + */ +static void +ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (hw->mac.type == ixgbe_mac_82598EB) { + if (direction == -1) + direction = 0; + idx = (((direction * 64) + queue) >> 2) & 0x1F; + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); + tmp &= ~(0xFF << (8 * (queue & 0x3))); + tmp |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); + } else if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { + if (direction == -1) { + /* other causes */ + idx = ((queue & 1) * 8); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); + } else { + /* rx or tx causes */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); + } + } +} + +static void +ixgbevf_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t q_idx; + uint32_t vector_idx = IXGBE_MISC_VEC_ID; + uint32_t base = IXGBE_MISC_VEC_ID; + + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + + /* won't configure msix register if no mapping is done + * between intr vector and event fd. + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) { + base = IXGBE_RX_VEC_START; + vector_idx = IXGBE_RX_VEC_START; + } + + /* Configure all RX queues of VF */ + for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { + /* Force all queue use vector 0, + * as IXGBE_VF_MAXMSIVECOTR = 1 + */ + ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); + intr_handle->intr_vec[q_idx] = vector_idx; + if (vector_idx < base + intr_handle->nb_efd - 1) + vector_idx++; + } + + /* As RX queue setting above show, all queues use the vector 0. + * Set only the ITR value of IXGBE_MISC_VEC_ID. + */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); +} + +/** + * Sets up the hardware to properly generate MSI-X interrupts + * @hw + * board private structure + */ +static void +ixgbe_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t queue_id, base = IXGBE_MISC_VEC_ID; + uint32_t vec = IXGBE_MISC_VEC_ID; + uint32_t mask; + uint32_t gpie; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. + */ + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) + return; + + if (rte_intr_allow_others(intr_handle)) + vec = base = IXGBE_RX_VEC_START; + + /* setup GPIE for MSI-x mode */ + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; + /* auto clearing and auto setting corresponding bits in EIMS + * when MSI-X interrupt is triggered + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); +} + +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; + uint32_t rf_dec, rf_int; + uint32_t bcnrc_val; + uint16_t link_speed = dev->data->dev_link.link_speed; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + rxmode = &dev->data->dev_conf.rxmode; + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag; + + /* + * On a 82599 VF, adding again the same MAC addr is not an idempotent + * operation. Trap this case to avoid exhausting the [very limited] + * set of PF resources used to store VF MAC addresses. + */ + if (memcmp(hw->mac.perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) + return -1; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, "Unable to add MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + return diag; +} + +static void +ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + struct rte_ether_addr *mac_addr; + uint32_t i; + int diag; + + /* + * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does + * not support the deletion of a given MAC address. + * Instead, it imposes to delete all MAC addresses, then to add again + * all MAC addresses with the exception of the one to be deleted. + */ + (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + + /* + * Add again all MAC addresses, with the exception of the deleted one + * and of the permanent MAC address. + */ + for (i = 0, mac_addr = dev->data->mac_addrs; + i < hw->mac.num_rar_entries; i++, mac_addr++) { + /* Skip the deleted MAC address */ + if (i == index) + continue; + /* Skip NULL MAC addresses */ + if (rte_is_zero_ether_addr(mac_addr)) + continue; + /* Skip the permanent MAC address */ + if (memcmp(perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) + continue; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, + "Adding again MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x failed " + "diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + } +} + +static int +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); + + return 0; +} + +int +ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t syn_info; + uint32_t synqf; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + } else { + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + } + + filter_info->syn_info = synqf; + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + return 0; +} + +static int +ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +static int +ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; +} + +/* inject a 5-tuple filter to HW */ +static inline void +ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + i = filter->index; + + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, idx, shift; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= IXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + ixgbe_inject_5tuple_filter(dev, filter); + + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); +} + +static int +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (mtu < RTE_ETHER_MIN_MTU || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) + return -EINVAL; + + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); + return -EINVAL; + } + + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +static inline struct ixgbe_5tuple_filter * +ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, + struct ixgbe_5tuple_filter_info *key) +{ + struct ixgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct ixgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct ixgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("ixgbe_5tuple_filter", + sizeof(struct ixgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct ixgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = ixgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else + ixgbe_remove_5tuple_filter(dev, filter); + + return 0; +} + +/* + * get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = filter->queue; + return 0; +} + +/* + * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +int +ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct ixgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ether_type; + etqs |= (uint32_t)((filter->queue << + IXGBE_ETQS_RX_QUEUE_SHIFT) & + IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf, etqs; + int ret; + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); + filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> + IXGBE_ETQS_RX_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +/* + * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = ixgbe_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_L2_TUNNEL: + ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ixgbe_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 * +ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + u8 *mc_addr; + + *vmdq = 0; + mc_addr = *mc_addr_ptr; + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); + return mc_addr; +} + +static int +ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct ixgbe_hw *hw; + u8 *mc_addr_list; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mc_addr_list = (u8 *)mc_addr_set; + return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, + ixgbe_dev_addr_list_itr, TRUE); +} + +static uint64_t +ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + * NSEC_PER_SEC; + break; + default: + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + << 32; + } + + return systime_cycles; +} + +static uint64_t +ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + << 32; + } + + return rx_tstamp_cycles; +} + +static uint64_t +ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + << 32; + } + + return tx_tstamp_cycles; +} + +static void +ixgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + ixgbe_dev_link_update(dev, 1); + rte_eth_linkstatus_get(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_100M: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case ETH_SPEED_NUM_1G: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case ETH_SPEED_NUM_10G: + default: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Independent of link speed. */ + incval = 1; + /* Cycles read will be interpreted as ns. */ + shift = 0; + /* Fall-through */ + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | incval); + break; + default: + /* Not supported. */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct ixgbe_adapter *adapter = dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct ixgbe_adapter *adapter = dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct ixgbe_adapter *adapter = dev->data->dev_private; + + systime_cycles = ixgbe_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; + + /* Stop the timesync system time. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); + + /* Enable system time for platforms where it isn't on by default. */ + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + ixgbe_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (RTE_ETHER_TYPE_1588 | + IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_1588)); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Enable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_timesync_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); + + return 0; +} + +static int +ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) + return -EINVAL; + + rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) + return -EINVAL; + + tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_get_reg_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Return unit is byte count */ + return hw->eeprom.word_size * 2; +} + +static int +ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.read_buffer(hw, first, length, data); +} + +static int +ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.write_buffer(hw, first, length, data); +} + +static int +ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + +uint16_t +ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + return ETH_RSS_RETA_SIZE_512; + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return ETH_RSS_RETA_SIZE_64; + case ixgbe_mac_X540_vf: + case ixgbe_mac_82599_vf: + return 0; + default: + return ETH_RSS_RETA_SIZE_128; + } +} + +uint32_t +ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (reta_idx < ETH_RSS_RETA_SIZE_128) + return IXGBE_RETA(reta_idx >> 2); + else + return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRETA(reta_idx >> 2); + default: + return IXGBE_RETA(reta_idx >> 2); + } +} + +uint32_t +ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFMRQC; + default: + return IXGBE_MRQC; + } +} + +uint32_t +ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRSSRK(i); + default: + return IXGBE_RSSRK(i); + } +} + +bool +ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + return 0; + default: + return 1; + } +} + +static int +ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; + uint8_t i, j; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; + else + dcb_info->nb_tcs = 1; + + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + + if (dcb_config->vt_mode) { /* vt is enabled*/ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } + } + } + } else { /* vt is disabled*/ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; + if (dcb_info->nb_tcs == ETH_4_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 64; + dcb_info->tc_queue.tc_txq[0][2].base = 96; + dcb_info->tc_queue.tc_txq[0][3].base = 112; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + } else if (dcb_info->nb_tcs == ETH_8_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 32; + dcb_info->tc_queue.tc_txq[0][2].base = 64; + dcb_info->tc_queue.tc_txq[0][3].base = 80; + dcb_info->tc_queue.tc_txq[0][4].base = 96; + dcb_info->tc_queue.tc_txq[0][5].base = 104; + dcb_info->tc_queue.tc_txq[0][6].base = 112; + dcb_info->tc_queue.tc_txq[0][7].base = 120; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; + } + } + for (i = 0; i < dcb_info->nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; + } + return 0; +} + +/* Update e-tag ether type */ +static int +ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, + uint16_t ether_type) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; + etag_etype |= ether_type; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Config l2 tunnel ether type */ +static int +ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + if (l2_tunnel == NULL) + return -EINVAL; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; + ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable e-tag tunnel */ +static int +ixgbe_e_tag_enable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype |= IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Enable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = TRUE; + ret = ixgbe_e_tag_enable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable e-tag tunnel */ +static int +ixgbe_e_tag_disable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Disable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = FALSE; + ret = ixgbe_e_tag_disable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + if ((rar_high & IXGBE_RAH_AV) && + (rar_high & IXGBE_RAH_ADTYPE) && + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == + l2_tunnel->tunnel_id)) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); + + return ret; + } + } + + return ret; +} + +static int +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + /* One entry for one tunnel. Try to remove potential existing entry. */ + ixgbe_e_tag_filter_del(dev, l2_tunnel); + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + if (rar_high & IXGBE_RAH_AV) { + continue; + } else { + ixgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; + rar_low = l2_tunnel->tunnel_id; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +static inline struct ixgbe_l2_tn_filter * +ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return l2_tn_info->hash_map[ret]; +} + +static inline int +ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_filter *l2_tn_filter) +{ + int ret; + + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + + return 0; +} + +static inline int +ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + struct ixgbe_l2_tn_filter *l2_tn_filter; + + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; + } + + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); + + return 0; +} + +/* Add l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool restore) +{ + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + struct ixgbe_l2_tn_filter *node; + + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("ixgbe_l2_tn", + sizeof(struct ixgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + rte_memcpy(&node->key, + &key, + sizeof(struct ixgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + } + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + if ((!restore) && (ret < 0)) + (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + + return ret; +} + +/* Delete l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/** + * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_dev_l2_tunnel_filter_add + (dev, + (struct rte_eth_l2_tunnel_conf *)arg, + FALSE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_dev_l2_tunnel_filter_del + (dev, + (struct rte_eth_l2_tunnel_conf *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +{ + int ret = 0; + uint32_t ctrl; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + if (en) + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + + return ret; +} + +/* Enable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = TRUE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = FALSE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool en) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + int ret = 0; + uint32_t vmtir, vmvir; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { + PMD_DRV_LOG(ERR, + "VF id %u should be less than %u", + l2_tunnel->vf_id, + pci_dev->max_vfs); + return -EINVAL; + } + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (en) + vmtir = l2_tunnel->tunnel_id; + else + vmtir = 0; + + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); + + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); + vmvir &= ~IXGBE_VMVIR_TAGA_MASK; + if (en) + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); + + return ret; +} + +/* Enable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_disable + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, + bool en) +{ + int ret = 0; + uint32_t qde; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + qde = IXGBE_READ_REG(hw, IXGBE_QDE); + if (en) + qde |= IXGBE_QDE_STRIP_TAG; + else + qde &= ~IXGBE_QDE_STRIP_TAG; + qde &= ~IXGBE_QDE_READ; + qde |= IXGBE_QDE_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); + + return ret; +} + +/* Enable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable/disable l2 tunnel offload functions */ +static int +ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en) +{ + int ret = 0; + + if (l2_tunnel == NULL) + return -EINVAL; + + ret = -EINVAL; + if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_insertion_enable( + dev, + l2_tunnel); + else + ret = ixgbe_dev_l2_tunnel_insertion_disable( + dev, + l2_tunnel); + } + + if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_stripping_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_stripping_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_forwarding_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_forwarding_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + return ret; +} + +static int +ixgbe_update_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* There's only one register for VxLAN UDP port. + * So, we cannot add several ports. Will update it. + */ +static int +ixgbe_add_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + if (port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + return -EINVAL; + } + + return ixgbe_update_vxlan_port(hw, port); +} + +/* We cannot delete the VxLAN port. For there's a register for VxLAN + * UDP port, it must have a value. + * So, will reset it to the original value 0. + */ +static int +ixgbe_del_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + uint16_t cur_port; + + cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); + + if (cur_port != port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", port); + return -EINVAL; + } + + return ixgbe_update_vxlan_port(hw, 0); +} + +/* Add UDP tunneling port */ +static int +ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); + break; + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Remove UDP tunneling port */ +static int +ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + int mode = IXGBEVF_XCAST_MODE_ALLMULTI; + + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; + + /* peek the message first */ + in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); + + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) { + /* dummy mbx read to ack pf */ + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static int +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(dev); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; + + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; +} + +static int +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + ixgbevf_intr_enable(dev); + + return 0; +} + +static void +ixgbevf_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); +} + +/** + * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the Tx security block + **/ +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECTX_POLL 40 + + int i; + int sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) + break; + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECTX_POLL) + PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " + "path fully disabled. Continuing with init."); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ + uint32_t sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/* restore n-tuple filter */ +static inline void +ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *node; + + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + ixgbe_inject_5tuple_filter(dev, node); + } +} + +/* restore ethernet type filter */ +static inline void +ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + filter_info->ethertype_filters[i].etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), + filter_info->ethertype_filters[i].etqs); + IXGBE_WRITE_FLUSH(hw); + } + } +} + +/* restore SYN filter */ +static inline void +ixgbe_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + } +} + +/* restore L2 tunnel filter */ +static inline void +ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *node; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); + } +} + +/* restore rss filter */ +static inline void +ixgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.conf.queue_num) + ixgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + +static int +ixgbe_filter_restore(struct rte_eth_dev *dev) +{ + ixgbe_ntuple_filter_restore(dev); + ixgbe_ethertype_filter_restore(dev); + ixgbe_syn_filter_restore(dev); + ixgbe_fdir_filter_restore(dev); + ixgbe_l2_tn_filter_restore(dev); + ixgbe_rss_filter_restore(dev); + + return 0; +} + +static void +ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tn_info->e_tag_en) + (void)ixgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); +} + +/* remove all the n-tuple filters */ +void +ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + ixgbe_remove_5tuple_filter(dev, p_5tuple); +} + +/* remove all the ether type filters */ +void +ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)ixgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); + IXGBE_WRITE_FLUSH(hw); + } + } +} + +/* remove the SYN filter */ +void +ixgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; + + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); + IXGBE_WRITE_FLUSH(hw); + } +} + +/* remove all the L2 tunnel filters */ +int +ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + int ret = 0; + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; + } + + return 0; +} + +void +ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, + struct ixgbe_macsec_setting *macsec_setting) +{ + struct ixgbe_macsec_setting *macsec = + IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); + + macsec->offload_en = macsec_setting->offload_en; + macsec->encrypt_en = macsec_setting->encrypt_en; + macsec->replayprotect_en = macsec_setting->replayprotect_en; +} + +void +ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_macsec_setting *macsec = + IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); + + macsec->offload_en = 0; + macsec->encrypt_en = 0; + macsec->replayprotect_en = 0; +} + +void +ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, + struct ixgbe_macsec_setting *macsec_setting) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + uint8_t en = macsec_setting->encrypt_en; + uint8_t rp = macsec_setting->replayprotect_en; + + /** + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Enable Ethernet CRC (required by MACsec offload) */ + ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); + ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); + + /* Enable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; + ctrl |= 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); + + /* Enable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : + IXGBE_LSECTXCTRL_AUTH; + ctrl |= IXGBE_LSECTXCTRL_AISCI; + ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; + ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; + ctrl &= ~IXGBE_LSECRXCTRL_PLSH; + if (rp) + ctrl |= IXGBE_LSECRXCTRL_RP; + else + ctrl &= ~IXGBE_LSECRXCTRL_RP; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /** + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); +} + +void +ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + /** + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Disable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + /* Disable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= IXGBE_LSECTXCTRL_DISABLE; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /** + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); +} + +RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, + IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); + +RTE_INIT(ixgbe_init_log) +{ + ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); + if (ixgbe_logtype_init >= 0) + rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); + ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); + if (ixgbe_logtype_driver >= 0) + rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX + ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx"); + if (ixgbe_logtype_rx >= 0) + rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX + ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx"); + if (ixgbe_logtype_tx >= 0) + rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE + ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free"); + if (ixgbe_logtype_tx_free >= 0) + rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG); +#endif +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h new file mode 100644 index 000000000..3d78b2ee2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h @@ -0,0 +1,819 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#ifndef _IXGBE_ETHDEV_H_ +#define _IXGBE_ETHDEV_H_ + +#include + +#include "base/ixgbe_type.h" +#include "base/ixgbe_dcb.h" +#include "base/ixgbe_dcb_82599.h" +#include "base/ixgbe_dcb_82598.h" +#include "ixgbe_bypass.h" +#ifdef RTE_LIBRTE_SECURITY +#include "ixgbe_ipsec.h" +#endif +#include +#include +#include +#include +#include +#include + +/* need update link, bit flag */ +#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) +#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) +#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) +#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3) +#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) + +/* + * Defines that were not part of ixgbe_type.h as they are not used by the + * FreeBSD driver. + */ +#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ +#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ +#define IXGBE_RXDADV_ERR_CKSUM_BIT 30 +#define IXGBE_RXDADV_ERR_CKSUM_MSK 3 +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ +#define IXGBE_NB_STAT_MAPPING_REGS 32 +#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ +#define IXGBE_VFTA_SIZE 128 +#define IXGBE_VLAN_TAG_SIZE 4 +#define IXGBE_HKEY_MAX_INDEX 10 +#define IXGBE_MAX_RX_QUEUE_NUM 128 +#define IXGBE_MAX_INTR_QUEUE_NUM 15 +#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM +#define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM +#define IXGBE_NONE_MODE_TX_NB_QUEUES 64 + +#ifndef NBBY +#define NBBY 8 /* number of bits in a byte */ +#endif +#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) + +/* EITR Interval is in 2048ns uinits for 1G and 10G link */ +#define IXGBE_EITR_INTERVAL_UNIT_NS 2048 +#define IXGBE_EITR_ITR_INT_SHIFT 3 +#define IXGBE_EITR_INTERVAL_US(us) \ + (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ + IXGBE_EITR_ITR_INT_MASK) + +#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ + +/* Loopback operation modes */ +#define IXGBE_LPBK_NONE 0x0 /* Default value. Loopback is disabled. */ +#define IXGBE_LPBK_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */ +/* X540-X550 specific loopback operations */ +#define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negociation enable (default = 1) */ + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */ + +#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF +#define IXGBE_RTTBCNRC_RF_INT_MASK_M \ + (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT) + +#define IXGBE_MAX_QUEUE_NUM_PER_VF 8 + +#define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ +#define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */ +#define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */ +#define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */ + +#define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */ +#define IXGBE_ETQF_SHIFT 16 +#define IXGBE_ETQF_UP_EN 0x00080000 +#define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */ +#define IXGBE_ETQF_MAX_PRI 7 + +#define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */ +#define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */ +#define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */ + +#define IXGBE_L34T_IMIR_SIZE_BP 0x00001000 +#define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */ +#define IXGBE_L34T_IMIR_LLI 0x00100000 +#define IXGBE_L34T_IMIR_QUEUE 0x0FE00000 +#define IXGBE_L34T_IMIR_QUEUE_SHIFT 21 +#define IXGBE_5TUPLE_MAX_PRI 7 +#define IXGBE_5TUPLE_MIN_PRI 1 + +/* The overhead from MTU to max frame size. */ +#define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ +#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 +/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ +#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0 + +#define IXGBE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */ +#define IXGBE_VF_MAXMSIVECTOR 1 + +#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F + +#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00 + +#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) +#define IXGBE_MAX_L2_TN_FILTER_NUM 128 + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ + return -ENOTSUP;\ +} while (0) + +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ + (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ + (type) != ixgbe_mac_X550EM_a)\ + return -ENOTSUP;\ +} while (0) + +/* Link speed for X550 auto negotiation */ +#define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_2_5GB_FULL | \ + IXGBE_LINK_SPEED_5GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* + * Information about the fdir mode. + */ +struct ixgbe_hw_fdir_mask { + uint16_t vlan_tci_mask; + uint32_t src_ipv4_mask; + uint32_t dst_ipv4_mask; + uint16_t src_ipv6_mask; + uint16_t dst_ipv6_mask; + uint16_t src_port_mask; + uint16_t dst_port_mask; + uint16_t flex_bytes_mask; + uint8_t mac_addr_byte_mask; + uint32_t tunnel_id_mask; + uint8_t tunnel_type_mask; +}; + +struct ixgbe_fdir_filter { + TAILQ_ENTRY(ixgbe_fdir_filter) entries; + union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ + uint32_t fdirflags; /* drop or forward */ + uint32_t fdirhash; /* hash value for fdir */ + uint8_t queue; /* assigned rx queue */ +}; + +/* list of fdir filters */ +TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter); + +struct ixgbe_fdir_rule { + struct ixgbe_hw_fdir_mask mask; + union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ + bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */ + bool b_mask; /* If TRUE, mask has meaning. */ + enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ + uint32_t fdirflags; /* drop or forward */ + uint32_t soft_id; /* an unique value for this rule */ + uint8_t queue; /* assigned rx queue */ + uint8_t flex_bytes_offset; +}; + +struct ixgbe_hw_fdir_info { + struct ixgbe_hw_fdir_mask mask; + uint8_t flex_bytes_offset; + uint16_t collision; + uint16_t free; + uint16_t maxhash; + uint8_t maxlen; + uint64_t add; + uint64_t remove; + uint64_t f_add; + uint64_t f_remove; + struct ixgbe_fdir_filter_list fdir_list; /* filter list*/ + /* store the pointers of the filters, index is the hash value. */ + struct ixgbe_fdir_filter **hash_map; + struct rte_hash *hash_handle; /* cuckoo hash handler */ + bool mask_added; /* If already got mask from consistent filter */ +}; + +struct ixgbe_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ + uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ +}; + +/* structure for interrupt relative data */ +struct ixgbe_interrupt { + uint32_t flags; + uint32_t mask; + /*to save original mask during delayed handler */ + uint32_t mask_original; +}; + +struct ixgbe_stat_mapping_registers { + uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS]; + uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS]; +}; + +struct ixgbe_vfta { + uint32_t vfta[IXGBE_VFTA_SIZE]; +}; + +struct ixgbe_hwstrip { + uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE]; +}; + +/* + * VF data which used by PF host only + */ +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */ +#define IXGBE_MAX_UTA 128 + +struct ixgbe_uta_info { + uint8_t uc_filter_type; + uint16_t uta_in_use; + uint32_t uta_shadow[IXGBE_MAX_UTA]; +}; + +#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ + +struct ixgbe_mirror_info { + struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES]; + /**< store PF mirror rules configuration*/ +}; + +struct ixgbe_vf_info { + uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; + uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + uint16_t num_vf_mc_hashes; + uint16_t default_vf_vlan_id; + uint16_t vlans_enabled; + bool clear_to_send; + uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF]; + uint16_t vlan_count; + uint8_t spoofchk_enabled; + uint8_t api_version; + uint16_t switch_domain_id; + uint16_t xcast_mode; + uint16_t mac_count; +}; + +/* + * Possible l4type of 5tuple filters. + */ +enum ixgbe_5tuple_protocol { + IXGBE_FILTER_PROTOCOL_TCP = 0, + IXGBE_FILTER_PROTOCOL_UDP, + IXGBE_FILTER_PROTOCOL_SCTP, + IXGBE_FILTER_PROTOCOL_NONE, +}; + +TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter); + +struct ixgbe_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + enum ixgbe_5tuple_protocol proto; /* l4 protocol. */ + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct ixgbe_5tuple_filter { + TAILQ_ENTRY(ixgbe_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct ixgbe_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +#define IXGBE_5TUPLE_ARRAY_SIZE \ + (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ + (sizeof(uint32_t) * NBBY)) + +struct ixgbe_ethertype_filter { + uint16_t ethertype; + uint32_t etqf; + uint32_t etqs; + /** + * If this filter is added by configuration, + * it should not be removed. + */ + bool conf; +}; + +/* + * Structure to store filters' info. + */ +struct ixgbe_filter_info { + uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters*/ + struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS]; + /* Bit mask for every used 5tuple filter */ + uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE]; + struct ixgbe_5tuple_filter_list fivetuple_list; + /* store the SYN filter info */ + uint32_t syn_info; + /* store the rss filter info */ + struct ixgbe_rte_flow_rss_conf rss_info; +}; + +struct ixgbe_l2_tn_key { + enum rte_eth_tunnel_type l2_tn_type; + uint32_t tn_id; +}; + +struct ixgbe_l2_tn_filter { + TAILQ_ENTRY(ixgbe_l2_tn_filter) entries; + struct ixgbe_l2_tn_key key; + uint32_t pool; +}; + +TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter); + +struct ixgbe_l2_tn_info { + struct ixgbe_l2_tn_filter_list l2_tn_list; + struct ixgbe_l2_tn_filter **hash_map; + struct rte_hash *hash_handle; + bool e_tag_en; /* e-tag enabled */ + bool e_tag_fwd_en; /* e-tag based forwarding enabled */ + uint16_t e_tag_ether_type; /* ether type for e-tag */ +}; + +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + +struct ixgbe_macsec_setting { + uint8_t offload_en; + uint8_t encrypt_en; + uint8_t replayprotect_en; +}; + +/* + * Statistics counters collected by the MACsec + */ +struct ixgbe_macsec_stats { + /* TX port statistics */ + uint64_t out_pkts_untagged; + uint64_t out_pkts_encrypted; + uint64_t out_pkts_protected; + uint64_t out_octets_encrypted; + uint64_t out_octets_protected; + + /* RX port statistics */ + uint64_t in_pkts_untagged; + uint64_t in_pkts_badtag; + uint64_t in_pkts_nosci; + uint64_t in_pkts_unknownsci; + uint64_t in_octets_decrypted; + uint64_t in_octets_validated; + + /* RX SC statistics */ + uint64_t in_pkts_unchecked; + uint64_t in_pkts_delayed; + uint64_t in_pkts_late; + + /* RX SA statistics */ + uint64_t in_pkts_ok; + uint64_t in_pkts_invalid; + uint64_t in_pkts_notvalid; + uint64_t in_pkts_unusedsa; + uint64_t in_pkts_notusingsa; +}; + +/* The configuration of bandwidth */ +struct ixgbe_bw_conf { + uint8_t tc_num; /* Number of TCs. */ +}; + +/* Struct to store Traffic Manager shaper profile. */ +struct ixgbe_tm_shaper_profile { + TAILQ_ENTRY(ixgbe_tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params profile; +}; + +TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile); + +/* node type of Traffic Manager */ +enum ixgbe_tm_node_type { + IXGBE_TM_NODE_TYPE_PORT, + IXGBE_TM_NODE_TYPE_TC, + IXGBE_TM_NODE_TYPE_QUEUE, + IXGBE_TM_NODE_TYPE_MAX, +}; + +/* Struct to store Traffic Manager node configuration. */ +struct ixgbe_tm_node { + TAILQ_ENTRY(ixgbe_tm_node) node; + uint32_t id; + uint32_t priority; + uint32_t weight; + uint32_t reference_count; + uint16_t no; + struct ixgbe_tm_node *parent; + struct ixgbe_tm_shaper_profile *shaper_profile; + struct rte_tm_node_params params; +}; + +TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node); + +/* The configuration of Traffic Manager */ +struct ixgbe_tm_conf { + struct ixgbe_shaper_profile_list shaper_profile_list; + struct ixgbe_tm_node *root; /* root node - port */ + struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */ + struct ixgbe_tm_node_list queue_list; /* node list for all the queues */ + /** + * The number of added TC nodes. + * It should be no more than the TC number of this port. + */ + uint32_t nb_tc_node; + /** + * The number of added queue nodes. + * It should be no more than the queue number of this port. + */ + uint32_t nb_queue_node; + /** + * This flag is used to check if APP can change the TM node + * configuration. + * When it's true, means the configuration is applied to HW, + * APP should not change the configuration. + * As we don't support on-the-fly configuration, when starting + * the port, APP should call the hierarchy_commit API to set this + * flag to true. When stopping the port, this flag should be set + * to false. + */ + bool committed; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct ixgbe_adapter { + struct ixgbe_hw hw; + struct ixgbe_hw_stats stats; + struct ixgbe_macsec_stats macsec_stats; + struct ixgbe_macsec_setting macsec_setting; + struct ixgbe_hw_fdir_info fdir; + struct ixgbe_interrupt intr; + struct ixgbe_stat_mapping_registers stat_mappings; + struct ixgbe_vfta shadow_vfta; + struct ixgbe_hwstrip hwstrip; + struct ixgbe_dcb_config dcb_config; + struct ixgbe_mirror_info mr_data; + struct ixgbe_vf_info *vfdata; + struct ixgbe_uta_info uta_info; +#ifdef RTE_LIBRTE_IXGBE_BYPASS + struct ixgbe_bypass_info bps; +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ + struct ixgbe_filter_info filter; + struct ixgbe_l2_tn_info l2_tn; + struct ixgbe_bw_conf bw_conf; +#ifdef RTE_LIBRTE_SECURITY + struct ixgbe_ipsec ipsec; +#endif + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; + struct ixgbe_tm_conf tm_conf; + + /* For RSS reta table update */ + uint8_t rss_reta_updated; + + /* Used for VF link sync with PF's physical and logical (by checking + * mailbox status) link status. + */ + uint8_t pflink_fullchk; + uint8_t mac_ctrl_frame_fwd; + rte_atomic32_t link_thread_running; + pthread_t link_thread_tid; +}; + +struct ixgbe_vf_representor { + uint16_t vf_id; + uint16_t switch_domain_id; + struct rte_eth_dev *pf_ethdev; +}; + +int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev); + +#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct ixgbe_adapter *)adapter)->hw) + +#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->stats) + +#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->macsec_stats) + +#define IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(adapter) \ + (&((struct ixgbe_adapter *)adapter)->macsec_setting) + +#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct ixgbe_adapter *)adapter)->intr) + +#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->fdir) + +#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->stat_mappings) + +#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->shadow_vfta) + +#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \ + (&((struct ixgbe_adapter *)adapter)->hwstrip) + +#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \ + (&((struct ixgbe_adapter *)adapter)->dcb_config) + +#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->vfdata) + +#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->mr_data) + +#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->uta_info) + +#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->filter) + +#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->l2_tn) + +#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \ + (&((struct ixgbe_adapter *)adapter)->bw_conf) + +#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ + (&((struct ixgbe_adapter *)adapter)->tm_conf) + +#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\ + (&((struct ixgbe_adapter *)adapter)->ipsec) + +/* + * RX/TX function prototypes + */ +void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); + +void ixgbe_dev_free_queues(struct rte_eth_dev *dev); + +void ixgbe_dev_rx_queue_release(void *rxq); + +void ixgbe_dev_tx_queue_release(void *txq); + +int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); +int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + +int ixgbe_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbe_dev_tx_init(struct rte_eth_dev *dev); + +int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); + +int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); + +uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); + +uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); + +uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type); + +uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); + +bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); + +int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); +int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +int ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +int +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool restore); +int +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel); +void ixgbe_filterlist_init(void); +void ixgbe_filterlist_flush(void); +/* + * Flow director function prototypes + */ +int ixgbe_fdir_configure(struct rte_eth_dev *dev); +int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev); +int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset); +int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, + struct ixgbe_fdir_rule *rule, + bool del, bool update); + +void ixgbe_configure_dcb(struct rte_eth_dev *dev); + +int +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf); + +/* + * misc function prototypes + */ +void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); + +void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); + +void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); + +void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); + +void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); + +void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); + +int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev); + +uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); + +int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, void *arg); +void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev); +int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); + +extern const struct rte_flow_ops ixgbe_flow_ops; + +void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); +void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); +void ixgbe_clear_syn_filter(struct rte_eth_dev *dev); +int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); + +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw); + +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw); + +int ixgbe_vt_check(struct ixgbe_hw *hw); +int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); +bool is_ixgbe_supported(struct rte_eth_dev *dev); +int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); +void ixgbe_tm_conf_init(struct rte_eth_dev *dev); +void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); +int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t tx_rate); +int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); +int ixgbe_config_rss_filter(struct rte_eth_dev *dev, + struct ixgbe_rte_flow_rss_conf *conf, bool add); + +void ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, + struct ixgbe_macsec_setting *macsec_setting); + +void ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev); + +void ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, + struct ixgbe_macsec_setting *macsec_setting); + +void ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev); + +static inline int +ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i].ethertype == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static inline int +ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, + struct ixgbe_ethertype_filter *ethertype_filter) +{ + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i].ethertype = + ethertype_filter->ethertype; + filter_info->ethertype_filters[i].etqf = + ethertype_filter->etqf; + filter_info->ethertype_filters[i].etqs = + ethertype_filter->etqs; + filter_info->ethertype_filters[i].conf = + ethertype_filter->conf; + return i; + } + } + return -1; +} + +static inline int +ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= IXGBE_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx].ethertype = 0; + filter_info->ethertype_filters[idx].etqf = 0; + filter_info->ethertype_filters[idx].etqs = 0; + filter_info->ethertype_filters[idx].etqs = FALSE; + return idx; +} + +#endif /* _IXGBE_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c new file mode 100644 index 000000000..166dae1e0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_fdir.c @@ -0,0 +1,1648 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" + +/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */ +#define FDIRCTRL_PBALLOC_MASK 0x03 + +/* For calculating memory required for FDIR filters */ +#define PBALLOC_SIZE_SHIFT 15 + +/* Number of bits used to mask bucket hash for different pballoc sizes */ +#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */ +#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */ +#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */ +#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */ +#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */ +#define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */ +#define IXGBE_MAX_FLX_SOURCE_OFF 62 +#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT) +#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10 + +#define IXGBE_FDIR_FLOW_TYPES ( \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) + +#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\ + (ipv6m) = 0; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if (ipv6_addr[i] == UINT8_MAX) \ + (ipv6m) |= 1 << i; \ + else if (ipv6_addr[i] != 0) { \ + PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \ + return -EINVAL; \ + } \ + } \ +} while (0) + +#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if ((ipv6m) & (1 << i)) \ + ipv6_addr[i] = UINT8_MAX; \ + else \ + ipv6_addr[i] = 0; \ + } \ + rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\ +} while (0) + +#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4 + +static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash); +static int fdir_set_input_mask(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask); +static int fdir_set_input_mask_82599(struct rte_eth_dev *dev); +static int fdir_set_input_mask_x550(struct rte_eth_dev *dev); +static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, + const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl); +static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl); +static int ixgbe_fdir_filter_to_atr_input( + const struct rte_eth_fdir_filter *fdir_filter, + union ixgbe_atr_input *input, + enum rte_fdir_mode mode); +static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + uint32_t key); +static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc); +static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc); +static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, uint8_t queue, + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode); +static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, + uint32_t fdirhash); +static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + bool del, + bool update); +static int ixgbe_fdir_flush(struct rte_eth_dev *dev); +static void ixgbe_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_info *fdir_info); +static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_stats *fdir_stats); + +/** + * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c. + * It adds extra configuration of fdirctrl that is common for all filter types. + * + * Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static int +fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Continue setup of fdirctrl register bits: + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!"); + return -ETIMEDOUT; + } + return 0; +} + +/* + * Set appropriate bits in fdirctrl for: variable reporting levels, moving + * flexbytes matching field, and drop queue (only for perfect matching mode). + */ +static inline int +configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) +{ + *fdirctrl = 0; + + switch (conf->pballoc) { + case RTE_FDIR_PBALLOC_64K: + /* 8k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + break; + case RTE_FDIR_PBALLOC_128K: + /* 16k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + break; + case RTE_FDIR_PBALLOC_256K: + /* 32k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value"); + return -EINVAL; + }; + + /* status flags: write hash & swindex in the rx descriptor */ + switch (conf->status) { + case RTE_FDIR_NO_REPORT_STATUS: + /* do nothing, default mode */ + break; + case RTE_FDIR_REPORT_STATUS: + /* report status when the packet matches a fdir rule */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; + break; + case RTE_FDIR_REPORT_STATUS_ALWAYS: + /* always report status */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value"); + return -EINVAL; + }; + + *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << + IXGBE_FDIRCTRL_FLEX_SHIFT; + + if (conf->mode >= RTE_FDIR_MODE_PERFECT && + conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; + *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + } + + return 0; +} + +/** + * Reverse the bits in FDIR registers that store 2 x 16 bit masks. + * + * @hi_dword: Bits 31:16 mask to be bit swapped. + * @lo_dword: Bits 15:0 mask to be bit swapped. + * + * Flow director uses several registers to store 2 x 16 bit masks with the + * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the + * mask affects the MS bit/byte of the target. This function reverses the + * bits in these masks. + * **/ +static inline uint32_t +reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword) +{ + uint32_t mask = hi_dword << 16; + + mask |= lo_dword; + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, + * but makes use of the rte_fdir_masks structure to see which bits to set. + */ +static int +fdir_set_input_mask_82599(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + /* + * mask VM pool and DIPv6 since there are currently not supported + * mask FLEX byte, it will be set in flex_conf + */ + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; + uint32_t fdirtcpm; /* TCP source and destination port masks. */ + uint32_t fdiripv6m; /* IPv6 source and destination masks. */ + volatile uint32_t *reg; + + PMD_INIT_FUNC_TRACE(); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + */ + if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ + fdirm |= IXGBE_FDIRM_L4P; + + if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) + /* mask VLAN Priority */ + fdirm |= IXGBE_FDIRM_VLANP; + else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000)) + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + else if (info->mask.vlan_tci_mask == 0) + /* mask VLAN ID and Priority */ + fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; + else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { + PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); + return -EINVAL; + } + + /* flex byte mask */ + if (info->mask.flex_bytes_mask == 0) + fdirm |= IXGBE_FDIRM_FLEX; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = reverse_fdir_bitmasks( + rte_be_to_cpu_16(info->mask.dst_port_mask), + rte_be_to_cpu_16(info->mask.src_port_mask)); + + /* write all the same so that UDP, TCP and SCTP use the same mask + * (little-endian) + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + + /* Store source and destination IPv4 masks (big-endian), + * can not use IXGBE_WRITE_REG. + */ + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M); + *reg = ~(info->mask.src_ipv4_mask); + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M); + *reg = ~(info->mask.dst_ipv4_mask); + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { + /* + * Store source and destination IPv6 masks (bit reversed) + */ + fdiripv6m = (info->mask.dst_ipv6_mask << 16) | + info->mask.src_ipv6_mask; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m); + } + + return IXGBE_SUCCESS; +} + +/* + * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, + * but makes use of the rte_fdir_masks structure to see which bits to set. + */ +static int +fdir_set_input_mask_x550(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + /* mask VM pool and DIPv6 since there are currently not supported + * mask FLEX byte, it will be set in flex_conf + */ + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | + IXGBE_FDIRM_FLEX; + uint32_t fdiripv6m; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + uint16_t mac_mask; + + PMD_INIT_FUNC_TRACE(); + + /* set the default UDP port for VxLAN */ + if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT); + + /* some bits must be set for mac vlan or tunnel mode */ + fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P; + + if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) + /* mask VLAN Priority */ + fdirm |= IXGBE_FDIRM_VLANP; + else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000)) + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + else if (info->mask.vlan_tci_mask == 0) + /* mask VLAN ID and Priority */ + fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; + else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { + PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); + return -EINVAL; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE | + IXGBE_FDIRIP6M_TNI_VNI; + + if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC; + mac_mask = info->mask.mac_addr_byte_mask & + (IXGBE_FDIRIP6M_INNER_MAC >> + IXGBE_FDIRIP6M_INNER_MAC_SHIFT); + fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) & + IXGBE_FDIRIP6M_INNER_MAC); + + switch (info->mask.tunnel_type_mask) { + case 0: + /* Mask turnnel type */ + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + break; + case 1: + break; + default: + PMD_INIT_LOG(ERR, "invalid tunnel_type_mask"); + return -EINVAL; + } + + switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) { + case 0x0: + /* Mask vxlan id */ + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + PMD_INIT_LOG(ERR, "invalid tunnel_id_mask"); + return -EINVAL; + } + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m); + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + + return IXGBE_SUCCESS; +} + +static int +ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint16_t dst_ipv6m = 0; + uint16_t src_ipv6m = 0; + + memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask)); + info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; + info->mask.src_port_mask = input_mask->src_port_mask; + info->mask.dst_port_mask = input_mask->dst_port_mask; + info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; + info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m); + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m); + info->mask.src_ipv6_mask = src_ipv6m; + info->mask.dst_ipv6_mask = dst_ipv6m; + + return IXGBE_SUCCESS; +} + +static int +ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + + memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask)); + info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; + info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask; + info->mask.tunnel_type_mask = input_mask->tunnel_type_mask; + info->mask.tunnel_id_mask = input_mask->tunnel_id_mask; + + return IXGBE_SUCCESS; +} + +static int +ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + if (mode >= RTE_FDIR_MODE_SIGNATURE && + mode <= RTE_FDIR_MODE_PERFECT) + return ixgbe_fdir_store_input_mask_82599(dev, input_mask); + else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + return ixgbe_fdir_store_input_mask_x550(dev, input_mask); + + PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); + return -ENOTSUP; +} + +int +ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev) +{ + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + if (mode >= RTE_FDIR_MODE_SIGNATURE && + mode <= RTE_FDIR_MODE_PERFECT) + return fdir_set_input_mask_82599(dev); + else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + return fdir_set_input_mask_x550(dev); + + PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); + return -ENOTSUP; +} + +int +ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdirctrl; + int i; + + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; + fdirctrl |= ((offset >> 1) /* convert to word offset */ + << IXGBE_FDIRCTRL_FLEX_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + return 0; +} + +static int +fdir_set_input_mask(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + int ret; + + ret = ixgbe_fdir_store_input_mask(dev, input_mask); + if (ret) + return ret; + + return ixgbe_fdir_set_input_mask(dev); +} + +/* + * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, + const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint32_t fdirm; + uint16_t flexbytes = 0; + uint16_t i; + + fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM); + + if (conf == NULL) { + PMD_DRV_LOG(ERR, "NULL pointer."); + return -EINVAL; + } + + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) { + PMD_DRV_LOG(ERR, "unsupported payload type."); + return -EINVAL; + } + if (((flex_cfg->src_offset[0] & 0x1) == 0) && + (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) && + (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) { + *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; + *fdirctrl |= + (flex_cfg->src_offset[0] / sizeof(uint16_t)) << + IXGBE_FDIRCTRL_FLEX_SHIFT; + } else { + PMD_DRV_LOG(ERR, "invalid flexbytes arguments."); + return -EINVAL; + } + } + + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) { + PMD_DRV_LOG(ERR, "flexmask should be set globally."); + return -EINVAL; + } + flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) | + ((flex_mask->mask[1]) & 0xFF)); + if (flexbytes == UINT16_MAX) + fdirm &= ~IXGBE_FDIRM_FLEX; + else if (flexbytes != 0) { + /* IXGBE_FDIRM_FLEX is set by default when set mask */ + PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments."); + return -EINVAL; + } + } + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0; + info->flex_bytes_offset = (uint8_t)((*fdirctrl & + IXGBE_FDIRCTRL_FLEX_MASK) >> + IXGBE_FDIRCTRL_FLEX_SHIFT); + return 0; +} + +int +ixgbe_fdir_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err; + uint32_t fdirctrl, pbsize; + int i; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOSYS; + + /* x550 supports mac-vlan and tunnel mode but other NICs not */ + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) + return -ENOSYS; + + err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); + if (err) + return err; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * initialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + + err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD mask"); + return err; + } + err = ixgbe_set_fdir_flex_conf(dev, + &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments."); + return err; + } + + err = fdir_enable_82599(hw, fdirctrl); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on enabling FD."); + return err; + } + return 0; +} + +/* + * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used + * by the IXGBE driver code. + */ +static int +ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, + union ixgbe_atr_input *input, enum rte_fdir_mode mode) +{ + input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci; + input->formatted.flex_bytes = (uint16_t)( + (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | + (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF)); + + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6; + break; + default: + break; + } + + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + input->formatted.src_port = + fdir_filter->input.flow.udp4_flow.src_port; + input->formatted.dst_port = + fdir_filter->input.flow.udp4_flow.dst_port; + /* fall-through */ + /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + input->formatted.src_ip[0] = + fdir_filter->input.flow.ip4_flow.src_ip; + input->formatted.dst_ip[0] = + fdir_filter->input.flow.ip4_flow.dst_ip; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + input->formatted.src_port = + fdir_filter->input.flow.udp6_flow.src_port; + input->formatted.dst_port = + fdir_filter->input.flow.udp6_flow.dst_port; + /* fall-through */ + /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + rte_memcpy(input->formatted.src_ip, + fdir_filter->input.flow.ipv6_flow.src_ip, + sizeof(input->formatted.src_ip)); + rte_memcpy(input->formatted.dst_ip, + fdir_filter->input.flow.ipv6_flow.dst_ip, + sizeof(input->formatted.dst_ip)); + break; + default: + break; + } + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_VXLAN) + input->formatted.tunnel_type = + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; + else if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_NVGRE) + input->formatted.tunnel_type = + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; + else + PMD_DRV_LOG(ERR, " invalid tunnel type arguments."); + + input->formatted.tni_vni = + fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8; + } + + return 0; +} + +/* + * The below function is taken from the FreeBSD IXGBE drivers release + * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK + * before returning, as the signature hash can use 16bits. + * + * The newer driver has optimised functions for calculating bucket and + * signature hashes. However they don't support IPv6 type packets for signature + * filters so are not used here. + * + * Note that the bkt_hash field in the ixgbe_atr_input structure is also never + * set. + * + * Compute the hashes for SW ATR + * @stream: input bitstream to compute the hash on + * @key: 32-bit hash key + **/ +static uint32_t +ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + uint32_t key) +{ + /* + * The algorithm is as follows: + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] + * and A[n] x B[n] is bitwise AND between same length strings + * + * K[n] is 16 bits, defined as: + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] + * for n modulo 32 < 15, K[n] = + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] + * + * S[n] is 16 bits, defined as: + * for n >= 15, S[n] = S[n:n - 15] + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] + * + * To simplify for programming, the algorithm is implemented + * in software this way: + * + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] + * + * for (i = 0; i < 352; i+=32) + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; + * + * lo_hash_dword[15:0] ^= Stream[15:0]; + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; + * + * hi_hash_dword[31:0] ^= Stream[351:320]; + * + * if (key[0]) + * hash[15:0] ^= Stream[15:0]; + * + * for (i = 0; i < 16; i++) { + * if (key[i]) + * hash[15:0] ^= lo_hash_dword[(i+15):i]; + * if (key[i + 16]) + * hash[15:0] ^= hi_hash_dword[(i+15):i]; + * } + * + */ + __be32 common_hash_dword = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 hash_result = 0; + u8 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 13; i++) + common_hash_dword ^= atr_input->dword_stream[i]; + + hi_hash_dword = IXGBE_NTOHL(common_hash_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + if (key & 0x0001) + hash_result ^= lo_hash_dword; + if (key & 0x00010000) + hash_result ^= hi_hash_dword; + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + + /* process the remaining 30 bits in the key 2 bits at a time */ + for (i = 15; i; i--) { + if (key & (0x0001 << i)) + hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) + hash_result ^= hi_hash_dword >> i; + } + + return hash_result; +} + +static uint32_t +atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + if (pballoc == RTE_FDIR_PBALLOC_256K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_128KB_HASH_MASK; + else + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_64KB_HASH_MASK; +} + +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + */ +static inline int +ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return 0; + rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US); + } + + return -ETIMEDOUT; +} + +/* + * Calculate the hash value needed for signature-match filters. In the FreeBSD + * driver, this is done by the optimised function + * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it + * doesn't support calculating a hash for an IPv6 filter. + */ +static uint32_t +atr_compute_sig_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + uint32_t bucket_hash, sig_hash; + + if (pballoc == RTE_FDIR_PBALLOC_256K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_128KB_HASH_MASK; + else + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_64KB_HASH_MASK; + + sig_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_SIGNATURE_HASH_KEY); + + return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash; +} + +/* + * This is based on ixgbe_fdir_write_perfect_filter_82599() in + * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register + * added, and IPv6 support also added. The hash value is also pre-calculated + * as the pballoc value is needed to do it. + */ +static int +fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, uint8_t queue, + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode) +{ + uint32_t fdirport, fdirvlan; + u32 addr_low, addr_high; + u32 tunnel_type = 0; + int err = 0; + volatile uint32_t *reg; + + if (mode == RTE_FDIR_MODE_PERFECT) { + /* record the IPv4 address (big-endian) + * can not use IXGBE_WRITE_REG. + */ + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA); + *reg = input->formatted.src_ip[0]; + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA); + *reg = input->formatted.dst_ip[0]; + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + /* for mac vlan and tunnel modes */ + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); + } else { + /* tunnel mode */ + if (input->formatted.tunnel_type) + tunnel_type = 0x80000000; + tunnel_type |= addr_high; + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), + input->formatted.tni_vni); + } + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0); + } + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = input->formatted.flex_bytes; + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/** + * This function is based on ixgbe_atr_add_signature_filter_82599() in + * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports + * setting extra fields in the FDIRCMD register, and removes the code that was + * verifying the flow_type field. According to the documentation, a flow type of + * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to + * work ok... + * + * Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @queue: queue index to direct traffic to + * @fdircmd: any extra flags to set in fdircmd register + * @fdirhash: pre-calculated hash value for the filter + **/ +static int +fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, + uint32_t fdirhash) +{ + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/* + * This is based on ixgbe_fdir_erase_perfect_filter_82599() in + * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so + * that it can be used for removing signature and perfect filters. + */ +static int +fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash) +{ + uint32_t fdircmd = 0; + int err = 0; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) { + PMD_INIT_LOG(ERR, "Timeout querying for flow director filter."); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_INIT_LOG(ERR, "Timeout erasing flow director filter."); + return err; + +} + +static inline struct ixgbe_fdir_filter * +ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info, + union ixgbe_atr_input *key) +{ + int ret; + + ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +static inline int +ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info, + struct ixgbe_fdir_filter *fdir_filter) +{ + int ret; + + ret = rte_hash_add_key(fdir_info->hash_handle, + &fdir_filter->ixgbe_fdir); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir filter to hash table %d!", + ret); + return ret; + } + + fdir_info->hash_map[ret] = fdir_filter; + + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries); + + return 0; +} + +static inline int +ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info, + union ixgbe_atr_input *key) +{ + int ret; + struct ixgbe_fdir_filter *fdir_filter; + + ret = rte_hash_del_key(fdir_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret); + return ret; + } + + fdir_filter = fdir_info->hash_map[ret]; + fdir_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); + rte_free(fdir_filter); + + return 0; +} + +static int +ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + struct ixgbe_fdir_rule *rule) +{ + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + int err; + + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + + err = ixgbe_fdir_filter_to_atr_input(fdir_filter, + &rule->ixgbe_fdir, + fdir_mode); + if (err) + return err; + + rule->mode = fdir_mode; + if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) + rule->fdirflags = IXGBE_FDIRCMD_DROP; + rule->queue = fdir_filter->action.rx_queue; + rule->soft_id = fdir_filter->soft_id; + + return 0; +} + +int +ixgbe_fdir_filter_program(struct rte_eth_dev *dev, + struct ixgbe_fdir_rule *rule, + bool del, + bool update) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdircmd_flags; + uint32_t fdirhash; + uint8_t queue; + bool is_perfect = FALSE; + int err; + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + struct ixgbe_fdir_filter *node; + bool add_node = FALSE; + + if (fdir_mode == RTE_FDIR_MODE_NONE || + fdir_mode != rule->mode) + return -ENOTSUP; + + /* + * Sanity check for x550. + * When adding a new filter with flow type set to IPv4, + * the flow director mask should be configed before, + * and the L4 protocol and ports are masked. + */ + if ((!del) && + (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) && + (rule->ixgbe_fdir.formatted.flow_type == + IXGBE_ATR_FLOW_TYPE_IPV4 || + rule->ixgbe_fdir.formatted.flow_type == + IXGBE_ATR_FLOW_TYPE_IPV6) && + (info->mask.src_port_mask != 0 || + info->mask.dst_port_mask != 0) && + (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && + rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) { + PMD_DRV_LOG(ERR, "By this device," + " IPv4 is not supported without" + " L4 protocol and ports masked!"); + return -ENOTSUP; + } + + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + is_perfect = TRUE; + + if (is_perfect) { + if (rule->ixgbe_fdir.formatted.flow_type & + IXGBE_ATR_L4TYPE_IPV6_MASK) { + PMD_DRV_LOG(ERR, "IPv6 is not supported in" + " perfect mode!"); + return -ENOTSUP; + } + fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir, + dev->data->dev_conf.fdir_conf.pballoc); + fdirhash |= rule->soft_id << + IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + } else + fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir, + dev->data->dev_conf.fdir_conf.pballoc); + + if (del) { + err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir); + if (err < 0) + return err; + + err = fdir_erase_filter_82599(hw, fdirhash); + if (err < 0) + PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!"); + else + PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!"); + return err; + } + /* add or update an fdir filter*/ + fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; + if (rule->fdirflags & IXGBE_FDIRCMD_DROP) { + if (is_perfect) { + queue = dev->data->dev_conf.fdir_conf.drop_queue; + fdircmd_flags |= IXGBE_FDIRCMD_DROP; + } else { + PMD_DRV_LOG(ERR, "Drop option is not supported in" + " signature mode."); + return -EINVAL; + } + } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM) + queue = (uint8_t)rule->queue; + else + return -EINVAL; + + node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir); + if (node) { + if (update) { + node->fdirflags = fdircmd_flags; + node->fdirhash = fdirhash; + node->queue = queue; + } else { + PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!"); + return -EINVAL; + } + } else { + add_node = TRUE; + node = rte_zmalloc("ixgbe_fdir", + sizeof(struct ixgbe_fdir_filter), + 0); + if (!node) + return -ENOMEM; + rte_memcpy(&node->ixgbe_fdir, + &rule->ixgbe_fdir, + sizeof(union ixgbe_atr_input)); + node->fdirflags = fdircmd_flags; + node->fdirhash = fdirhash; + node->queue = queue; + + err = ixgbe_insert_fdir_filter(info, node); + if (err < 0) { + rte_free(node); + return err; + } + } + + if (is_perfect) { + err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir, + queue, fdircmd_flags, + fdirhash, fdir_mode); + } else { + err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir, + queue, fdircmd_flags, + fdirhash); + } + if (err < 0) { + PMD_DRV_LOG(ERR, "Fail to add FDIR filter!"); + + if (add_node) + (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir); + } else { + PMD_DRV_LOG(DEBUG, "Success to add FDIR filter"); + } + + return err; +} + +/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter. + * @dev: pointer to the structure rte_eth_dev + * @fdir_filter: fdir filter entry + * @del: 1 - delete, 0 - add + * @update: 1 - update + */ +static int +ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + bool del, + bool update) +{ + struct ixgbe_fdir_rule rule; + int err; + + err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule); + + if (err) + return err; + + return ixgbe_fdir_filter_program(dev, &rule, del, update); +} + +static int +ixgbe_fdir_flush(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + int ret; + + ret = ixgbe_reinit_fdir_tables_82599(hw); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to re-initialize FD table."); + return ret; + } + + info->f_add = 0; + info->f_remove = 0; + info->add = 0; + info->remove = 0; + + return ret; +} + +#define FDIRENTRIES_NUM_SHIFT 10 +static void +ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t fdirctrl, max_num, i; + uint8_t offset; + + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >> + IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t); + + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; + max_num = (1 << (FDIRENTRIES_NUM_SHIFT + + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); + if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT && + fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_info->guarant_spc = max_num; + else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE) + fdir_info->guarant_spc = max_num * 4; + + fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask; + fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask; + fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask; + IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask, + fdir_info->mask.ipv6_mask.src_ip); + IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask, + fdir_info->mask.ipv6_mask.dst_ip); + fdir_info->mask.src_port_mask = info->mask.src_port_mask; + fdir_info->mask.dst_port_mask = info->mask.dst_port_mask; + fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask; + fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask; + fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask; + fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN; + + if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN || + fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_info->flow_types_mask[0] = 0ULL; + else + fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; + for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + fdir_info->flow_types_mask[i] = 0ULL; + + fdir_info->flex_payload_unit = sizeof(uint16_t); + fdir_info->max_flex_payload_segment_num = 1; + fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF; + fdir_info->flex_conf.nb_payloads = 1; + fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD; + fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; + fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1; + fdir_info->flex_conf.nb_flexmasks = 1; + fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN; + fdir_info->flex_conf.flex_mask[0].mask[0] = + (uint8_t)(info->mask.flex_bytes_mask & 0x00FF); + fdir_info->flex_conf.flex_mask[0].mask[1] = + (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8); +} + +static void +ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t reg, max_num; + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + /* Get the information from registers */ + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE); + info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >> + IXGBE_FDIRFREE_COLL_SHIFT); + info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >> + IXGBE_FDIRFREE_FREE_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >> + IXGBE_FDIRLEN_MAXHASH_SHIFT); + info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >> + IXGBE_FDIRLEN_MAXLEN_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >> + IXGBE_FDIRUSTAT_REMOVE_SHIFT; + info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >> + IXGBE_FDIRUSTAT_ADD_SHIFT; + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF; + info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >> + IXGBE_FDIRFSTAT_FREMOVE_SHIFT; + info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >> + IXGBE_FDIRFSTAT_FADD_SHIFT; + + /* Copy the new information in the fdir parameter */ + fdir_stats->collision = info->collision; + fdir_stats->free = info->free; + fdir_stats->maxhash = info->maxhash; + fdir_stats->maxlen = info->maxlen; + fdir_stats->remove = info->remove; + fdir_stats->add = info->add; + fdir_stats->f_remove = info->f_remove; + fdir_stats->f_add = info->f_add; + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + max_num = (1 << (FDIRENTRIES_NUM_SHIFT + + (reg & FDIRCTRL_PBALLOC_MASK))); + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_stats->guarant_cnt = max_num - fdir_stats->free; + else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE) + fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free; + +} + +/* + * ixgbe_fdir_ctrl_func - deal with all operations on flow director. + * @dev: pointer to the structure rte_eth_dev + * @filter_op:operation will be taken + * @arg: a pointer to specific structure corresponding to the filter_op + */ +int +ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOTSUP; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, FALSE, FALSE); + break; + case RTE_ETH_FILTER_UPDATE: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, FALSE, TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, TRUE, FALSE); + break; + case RTE_ETH_FILTER_FLUSH: + ret = ixgbe_fdir_flush(dev); + break; + case RTE_ETH_FILTER_INFO: + ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); + break; + case RTE_ETH_FILTER_STATS: + ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +/* restore flow director filter */ +void +ixgbe_fdir_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_fdir_filter *node; + bool is_perfect = FALSE; + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + is_perfect = TRUE; + + if (is_perfect) { + TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { + (void)fdir_write_perfect_filter_82599(hw, + &node->ixgbe_fdir, + node->queue, + node->fdirflags, + node->fdirhash, + fdir_mode); + } + } else { + TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) { + (void)fdir_add_signature_filter_82599(hw, + &node->ixgbe_fdir, + node->queue, + node->fdirflags, + node->fdirhash); + } + } +} + +/* remove all the flow director filters */ +int +ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_fdir_filter *fdir_filter; + struct ixgbe_fdir_filter *filter_flag; + int ret = 0; + + /* flush flow director */ + rte_hash_reset(fdir_info->hash_handle); + memset(fdir_info->hash_map, 0, + sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM); + filter_flag = TAILQ_FIRST(&fdir_info->fdir_list); + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + if (filter_flag != NULL) + ret = ixgbe_fdir_flush(dev); + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c new file mode 100644 index 000000000..b2a2bfc02 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_flow.c @@ -0,0 +1,3492 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_vf.h" +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_bypass.h" +#include "ixgbe_rxtx.h" +#include "base/ixgbe_type.h" +#include "base/ixgbe_phy.h" +#include "rte_pmd_ixgbe.h" + + +#define IXGBE_MIN_N_TUPLE_PRIO 1 +#define IXGBE_MAX_N_TUPLE_PRIO 7 +#define IXGBE_MAX_FLX_SOURCE_OFF 62 + +/* ntuple filter list structure */ +struct ixgbe_ntuple_filter_ele { + TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries; + struct rte_eth_ntuple_filter filter_info; +}; +/* ethertype filter list structure */ +struct ixgbe_ethertype_filter_ele { + TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries; + struct rte_eth_ethertype_filter filter_info; +}; +/* syn filter list structure */ +struct ixgbe_eth_syn_filter_ele { + TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries; + struct rte_eth_syn_filter filter_info; +}; +/* fdir filter list structure */ +struct ixgbe_fdir_rule_ele { + TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries; + struct ixgbe_fdir_rule filter_info; +}; +/* l2_tunnel filter list structure */ +struct ixgbe_eth_l2_tunnel_conf_ele { + TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; + struct rte_eth_l2_tunnel_conf filter_info; +}; +/* rss filter list structure */ +struct ixgbe_rss_conf_ele { + TAILQ_ENTRY(ixgbe_rss_conf_ele) entries; + struct ixgbe_rte_flow_rss_conf filter_info; +}; +/* ixgbe_flow memory list structure */ +struct ixgbe_flow_mem { + TAILQ_ENTRY(ixgbe_flow_mem) entries; + struct rte_flow *flow; +}; + +TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele); +TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); +TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); +TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); +TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele); +TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); + +static struct ixgbe_ntuple_filter_list filter_ntuple_list; +static struct ixgbe_ethertype_filter_list filter_ethertype_list; +static struct ixgbe_syn_filter_list filter_syn_list; +static struct ixgbe_fdir_rule_filter_list filter_fdir_list; +static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_rss_filter_list filter_rss_list; +static struct ixgbe_flow_mem_list ixgbe_flow_list; + +/** + * Endless loop will never happen with below assumption + * 1. there is at least one no-void item(END) + * 2. cur is before END. + */ +static inline +const struct rte_flow_item *next_no_void_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + cur ? cur + 1 : &pattern[0]; + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_VOID) + return next; + next++; + } +} + +static inline +const struct rte_flow_action *next_no_void_action( + const struct rte_flow_action actions[], + const struct rte_flow_action *cur) +{ + const struct rte_flow_action *next = + cur ? cur + 1 : &actions[0]; + while (1) { + if (next->type != RTE_FLOW_ACTION_TYPE_VOID) + return next; + next++; + } +} + +/** + * Please aware there's an asumption for all the parsers. + * rte_flow_item is using big endian, rte_flow_attr and + * rte_flow_action are using CPU order. + * Because the pattern is used to describe the packets, + * normally the packets should use network order. + */ + +/** + * Parse the rule to see if it is a n-tuple rule. + * And get the n-tuple filter info BTW. + * pattern: + * The first not void item can be ETH or IPV4. + * The second not void item must be IPV4 if the first one is ETH. + * The third not void item must be UDP or TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * next_proto_id 17 0xFF + * UDP/TCP/ src_port 80 0xFFFF + * SCTP dst_port 80 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + * + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY. + * + */ +static int +cons_parse_ntuple_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + struct rte_flow_item_eth eth_null; + struct rte_flow_item_vlan vlan_null; + + if (!pattern) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); + memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); + +#ifdef RTE_LIBRTE_SECURITY + /** + * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY + */ + act = next_no_void_action(actions, NULL); + if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + const void *conf = act->conf; + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* get the IP pattern*/ + item = next_no_void_pattern(pattern, NULL); + while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + if (item->last || + item->type == RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "IP pattern missing."); + return -rte_errno; + } + item = next_no_void_pattern(pattern, item); + } + + filter->proto = IPPROTO_ESP; + return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + } +#endif + + /* the first not void item can be MAC or IPv4 */ + item = next_no_void_pattern(pattern, NULL); + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + eth_spec = item->spec; + eth_mask = item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + + } + /* if the first item is MAC, the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth)) || + memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 or Vlan */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = item->spec; + vlan_mask = item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan)) || + memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { + + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + } + + if (item->mask) { + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + ipv4_mask = item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((ipv4_mask->hdr.src_addr != 0 && + ipv4_mask->hdr.src_addr != UINT32_MAX) || + (ipv4_mask->hdr.dst_addr != 0 && + ipv4_mask->hdr.dst_addr != UINT32_MAX) || + (ipv4_mask->hdr.next_proto_id != UINT8_MAX && + ipv4_mask->hdr.next_proto_id != 0)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + } + + /* check if the next not void item is TCP or UDP */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec && !item->mask)) { + goto action; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && + (!item->spec || !item->mask)) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + + } + + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + tcp_mask = item->mask; + + /** + * Only support src & dst ports, tcp flags, + * others should be masked. + */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((tcp_mask->hdr.src_port != 0 && + tcp_mask->hdr.src_port != UINT16_MAX) || + (tcp_mask->hdr.dst_port != 0 && + tcp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = tcp_mask->hdr.dst_port; + filter->src_port_mask = tcp_mask->hdr.src_port; + if (tcp_mask->hdr.tcp_flags == 0xFF) { + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + } else if (!tcp_mask->hdr.tcp_flags) { + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; + } else { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + tcp_spec = item->spec; + filter->dst_port = tcp_spec->hdr.dst_port; + filter->src_port = tcp_spec->hdr.src_port; + filter->tcp_flags = tcp_spec->hdr.tcp_flags; + } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + udp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + if ((udp_mask->hdr.src_port != 0 && + udp_mask->hdr.src_port != UINT16_MAX) || + (udp_mask->hdr.dst_port != 0 && + udp_mask->hdr.dst_port != UINT16_MAX)) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = udp_mask->hdr.dst_port; + filter->src_port_mask = udp_mask->hdr.src_port; + + udp_spec = item->spec; + filter->dst_port = udp_spec->hdr.dst_port; + filter->src_port = udp_spec->hdr.src_port; + } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + sctp_mask = item->mask; + + /** + * Only support src & dst ports, + * others should be masked. + */ + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(filter, 0, + sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_port_mask = sctp_mask->hdr.dst_port; + filter->src_port_mask = sctp_mask->hdr.src_port; + + sctp_spec = item->spec; + filter->dst_port = sctp_spec->hdr.dst_port; + filter->src_port = sctp_spec->hdr.src_port; + } else { + goto action; + } + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + +action: + + /** + * n-tuple only supports forwarding, + * check if the first not void action is QUEUE. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + item, "Not supported action."); + return -rte_errno; + } + filter->queue = + ((const struct rte_flow_action_queue *)act->conf)->index; + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + filter->priority = (uint16_t)attr->priority; + if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO || + attr->priority > IXGBE_MAX_N_TUPLE_PRIO) + filter->priority = 1; + + return 0; +} + +/* a specific function for ixgbe because the flags is specific */ +static int +ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ntuple_filter *filter, + struct rte_flow_error *error) +{ + int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error); + + if (ret) + return ret; + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (filter->proto == IPPROTO_ESP) + return 0; +#endif + + /* Ixgbe doesn't support tcp flags. */ + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ntuple filter"); + return -rte_errno; + } + + /* Ixgbe doesn't support many priorities. */ + if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO || + filter->priority > IXGBE_MAX_N_TUPLE_PRIO) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Priority not supported by ntuple filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + /* fixed value for ixgbe */ + filter->flags = RTE_5TUPLE_FLAGS; + return 0; +} + +/** + * Parse the rule to see if it is a ethertype rule. + * And get the ethertype filter info BTW. + * pattern: + * The first not void item can be ETH. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH type 0x0807 0xFFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_ethertype_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_action_queue *act_q; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + item = next_no_void_pattern(pattern, NULL); + /* The first non-void item should be MAC. */ + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter"); + return -rte_errno; + } + + eth_spec = item->spec; + eth_mask = item->mask; + + /* Mask bits of source MAC address must be full of 0. + * Mask bits of destination MAC address must be full + * of 1 or full of 0. + */ + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ether address mask"); + return -rte_errno; + } + + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ethertype mask"); + return -rte_errno; + } + + /* If mask bits of destination MAC address + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. + */ + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { + filter->mac_addr = eth_spec->dst; + filter->flags |= RTE_ETHTYPE_FLAGS_MAC; + } else { + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC; + } + filter->ether_type = rte_be_to_cpu_16(eth_spec->type); + + /* Check if the next non-void item is END. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ethertype filter."); + return -rte_errno; + } + + /* Parse action */ + + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + } else { + filter->flags |= RTE_ETHTYPE_FLAGS_DROP; + } + + /* Check if the next non-void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* Parse attr */ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + + return 0; +} + +static int +ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_ethertype_filter *filter, + struct rte_flow_error *error) +{ + int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_ethertype_filter(attr, pattern, + actions, filter, error); + + if (ret) + return ret; + + /* Ixgbe doesn't support MAC address. */ + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->queue >= dev->data->nb_rx_queues) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue index much too big"); + return -rte_errno; + } + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "IPv4/IPv6 not supported by ethertype filter"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "mac compare is unsupported"); + return -rte_errno; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "drop option is unsupported"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse the rule to see if it is a TCP SYN rule. + * And get the TCP SYN filter info BTW. + * pattern: + * The first not void item must be ETH. + * The second not void item must be IPV4 or IPV6. + * The third not void item must be TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * TCP tcp_flags 0x02 0xFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_action_queue *act_q; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + + /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* if the item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN address mask"); + return -rte_errno; + } + + /* check if the next not void item is IPv4 or IPv6 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Skip IP */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* if the item is IP, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + + /* check if the next not void item is TCP */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Get the TCP info. Only support SYN. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + tcp_spec = item->spec; + tcp_mask = item->mask; + if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) || + tcp_mask->hdr.src_port || + tcp_mask->hdr.dst_port || + tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the first not void action is QUEUE. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Support 2 priorities, the lowest or highest. */ + if (!attr->priority) { + filter->hig_pri = 0; + } else if (attr->priority == (uint32_t)~0U) { + filter->hig_pri = 1; + } else { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int +ixgbe_parse_syn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_syn_filter(attr, pattern, + actions, filter, error); + + if (filter->queue >= dev->data->nb_rx_queues) + return -rte_errno; + + if (ret) + return ret; + + return 0; +} + +/** + * Parse the rule to see if it is a L2 tunnel rule. + * And get the L2 tunnel filter info BTW. + * Only support E-tag now. + * pattern: + * The first not void item can be E_TAG. + * The next not void item must be END. + * action: + * The first not void action should be VF or PF. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * E_TAG grp 0x1 0x3 + e_cid_base 0x309 0xFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_e_tag *e_tag_spec; + const struct rte_flow_item_e_tag *e_tag_mask; + const struct rte_flow_action *act; + const struct rte_flow_action_vf *act_vf; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* The first not void item should be e-tag. */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + if (!item->spec || !item->mask) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + e_tag_spec = item->spec; + e_tag_mask = item->mask; + + /* Only care about GRP and E cid base. */ + if (e_tag_mask->epcp_edei_in_ecid_b || + e_tag_mask->in_ecid_e || + e_tag_mask->ecid_e || + e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + /** + * grp and e_cid_base are bit fields and only use 14 bits. + * e-tag id is taken as little endian by HW. + */ + filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b); + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* check if the first not void action is VF or PF. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_VF && + act->type != RTE_FLOW_ACTION_TYPE_PF) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->pool = act_vf->id; + } else { + filter->pool = pci_dev->max_vfs; + } + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +static int +ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_l2_tunnel_conf *l2_tn_filter, + struct rte_flow_error *error) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num; + + ret = cons_parse_l2_tn_filter(dev, attr, pattern, + actions, l2_tn_filter, error); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by L2 tunnel filter"); + return -rte_errno; + } + + vf_num = pci_dev->max_vfs; + + if (l2_tn_filter->pool > vf_num) + return -rte_errno; + + return ret; +} + +/* Parse to get the attr and action info of flow director rule. */ +static int +ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct ixgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark; + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* not supported */ + if (attr->priority) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + /* check if the first not void action is QUEUE or DROP. */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE && + act->type != RTE_FLOW_ACTION_TYPE_DROP) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + rule->queue = act_q->index; + } else { /* drop */ + /* signature mode does not support drop action. */ + if (rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + rule->fdirflags = IXGBE_FDIRCMD_DROP; + } + + /* check if the next not void item is MARK */ + act = next_no_void_action(actions, act); + if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) && + (act->type != RTE_FLOW_ACTION_TYPE_END)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rule->soft_id = 0; + + if (act->type == RTE_FLOW_ACTION_TYPE_MARK) { + mark = (const struct rte_flow_action_mark *)act->conf; + rule->soft_id = mark->id; + act = next_no_void_action(actions, act); + } + + /* check if the next not void item is END */ + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + return 0; +} + +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = item->spec; + last = item->last; + mask = item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + +/** + * Parse the rule to see if it is a IP or MAC VLAN flow director rule. + * And get the flow director filter info BTW. + * UDP/TCP/SCTP PATTERN: + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. + * The next not void item could be UDP or TCP or SCTP (optional) + * The next not void item could be RAW (for flexbyte, optional) + * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END. + * Fuzzy Match is optional for IPV4 but is required for IPV6 + * MAC VLAN PATTERN: + * The first not void item must be ETH. + * The second not void item must be MAC VLAN. + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE or DROP. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * UDP/TCP/SCTP pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4 src_addr 192.168.1.20 0xFFFFFFFF + * dst_addr 192.167.3.50 0xFFFFFFFF + * UDP/TCP/SCTP src_port 80 0xFFFF + * dst_port 80 0xFFFF + * FLEX relative 0 0x1 + * search 0 0x1 + * reserved 0 0 + * offset 12 0xFFFFFFFF + * limit 0 0xFFFF + * length 2 0xFFFF + * pattern[0] 0x86 0xFF + * pattern[1] 0xDD 0xFF + * END + * MAC VLAN pattern example: + * ITEM Spec Mask + * ETH dst_addr + {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, + 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * Other members in mask and spec should set to 0x00. + * Item->last should be NULL. + */ +static int +ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct ixgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec; + const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_item_udp *udp_spec; + const struct rte_flow_item_udp *udp_mask; + const struct rte_flow_item_sctp *sctp_spec; + const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_item_raw *raw_spec; + uint8_t j; + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /** + * Some fields may not be provided. Set spec to 0 and mask to default + * value. So, we need not do anything for the not provided fields later. + */ + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); + rule->mask.vlan_tci_mask = 0; + rule->mask.flex_bytes_mask = 0; + + /** + * The first not void item should be + * MAC or IPv4 or TCP or UDP or SCTP. + */ + item = next_no_fuzzy_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Get the MAC info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /** + * Only support vlan and dst MAC address, + * others should be masked. + */ + if (item->spec && !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + if (item->spec) { + rule->b_spec = TRUE; + eth_spec = item->spec; + + /* Get the dst MAC. */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + rule->ixgbe_fdir.formatted.inner_mac[j] = + eth_spec->dst.addr_bytes[j]; + } + } + + + if (item->mask) { + + rule->b_mask = TRUE; + eth_mask = item->mask; + + /* Ether type should be masked. */ + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + + /** + * src MAC address must be masked, + * and don't support dst MAC address mask. + */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j] || + eth_mask->dst.addr_bytes[j] != 0xFF) { + memset(rule, 0, + sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* When no VLAN, considered as full mask. */ + rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF); + } + /*** If both spec and mask are item, + * it means don't care about ETH. + * Do nothing. + */ + + /** + * Check if the next not void item is vlan or ipv4. + * IPv6 is not supported. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } else { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (!(item->spec && item->mask)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_mask = item->mask; + + rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; + + rule->mask.vlan_tci_mask = vlan_mask->tci; + rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); + /* More than one tags are not supported. */ + + /* Next not void item must be END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV4 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV4; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst addresses, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + ipv4_mask = item->mask; + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr; + rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr; + + if (item->spec) { + rule->b_spec = TRUE; + ipv4_spec = item->spec; + rule->ixgbe_fdir.formatted.dst_ip[0] = + ipv4_spec->hdr.dst_addr; + rule->ixgbe_fdir.formatted.src_ip[0] = + ipv4_spec->hdr.src_addr; + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV6; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = item->spec; + rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the TCP info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_TCP; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + tcp_mask = item->mask; + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = tcp_mask->hdr.src_port; + rule->mask.dst_port_mask = tcp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + tcp_spec = item->spec; + rule->ixgbe_fdir.formatted.src_port = + tcp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + tcp_spec->hdr.dst_port; + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + } + + /* Get the UDP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_UDP; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + udp_mask = item->mask; + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = udp_mask->hdr.src_port; + rule->mask.dst_port_mask = udp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + udp_spec = item->spec; + rule->ixgbe_fdir.formatted.src_port = + udp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + udp_spec->hdr.dst_port; + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + } + + /* Get the SCTP info */ + if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_SCTP; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* only x550 family only support sctp port */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + /** + * Only care about src & dst ports, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->b_mask = TRUE; + sctp_mask = item->mask; + if (sctp_mask->hdr.tag || + sctp_mask->hdr.cksum) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + rule->mask.src_port_mask = sctp_mask->hdr.src_port; + rule->mask.dst_port_mask = sctp_mask->hdr.dst_port; + + if (item->spec) { + rule->b_spec = TRUE; + sctp_spec = item->spec; + rule->ixgbe_fdir.formatted.src_port = + sctp_spec->hdr.src_port; + rule->ixgbe_fdir.formatted.dst_port = + sctp_spec->hdr.dst_port; + } + /* others even sctp port is not supported */ + } else { + sctp_mask = item->mask; + if (sctp_mask && + (sctp_mask->hdr.src_port || + sctp_mask->hdr.dst_port || + sctp_mask->hdr.tag || + sctp_mask->hdr.cksum)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the flex byte info */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + /* mask should not be null */ + if (!item->mask || !item->spec) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_mask = item->mask; + + /* check mask */ + if (raw_mask->relative != 0x1 || + raw_mask->search != 0x1 || + raw_mask->reserved != 0x0 || + (uint32_t)raw_mask->offset != 0xffffffff || + raw_mask->limit != 0xffff || + raw_mask->length != 0xffff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + raw_spec = item->spec; + + /* check spec */ + if (raw_spec->relative != 0 || + raw_spec->search != 0 || + raw_spec->reserved != 0 || + raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF || + raw_spec->offset % 2 || + raw_spec->limit != 0 || + raw_spec->length != 2 || + /* pattern can't be 0xffff */ + (raw_spec->pattern[0] == 0xff && + raw_spec->pattern[1] == 0xff)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check pattern mask */ + if (raw_mask->pattern[0] != 0xff || + raw_mask->pattern[1] != 0xff) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mask.flex_bytes_mask = 0xffff; + rule->ixgbe_fdir.formatted.flex_bytes = + (((uint16_t)raw_spec->pattern[1]) << 8) | + raw_spec->pattern[0]; + rule->flex_bytes_offset = raw_spec->offset; + } + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* check if the next not void item is END */ + item = next_no_fuzzy_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + return ixgbe_parse_fdir_act_attr(attr, actions, rule, error); +} + +#define NVGRE_PROTOCOL 0x6558 + +/** + * Parse the rule to see if it is a VxLAN or NVGRE flow director rule. + * And get the flow director filter info BTW. + * VxLAN PATTERN: + * The first not void item must be ETH. + * The second not void item must be IPV4/ IPV6. + * The third not void item must be NVGRE. + * The next not void item must be END. + * NVGRE PATTERN: + * The first not void item must be ETH. + * The second not void item must be IPV4/ IPV6. + * The third not void item must be NVGRE. + * The next not void item must be END. + * ACTION: + * The first not void action should be QUEUE or DROP. + * The second not void optional action should be MARK, + * mark_id is a uint32_t number. + * The next not void action should be END. + * VxLAN pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * UDP NULL NULL + * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * NEGRV pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * NVGRE protocol 0x6558 0xFFFF + * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF} + * MAC VLAN tci 0x2016 0xEFFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct ixgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + uint32_t j; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /** + * Some fields may not be provided. Set spec to 0 and mask to default + * value. So, we need not do anything for the not provided fields later. + */ + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask)); + rule->mask.vlan_tci_mask = 0; + + /** + * The first not void item should be + * MAC or IPv4 or IPv6 or UDP or VxLAN. + */ + item = next_no_void_pattern(pattern, NULL); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_VXLAN && + item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL; + + /* Skip MAC. */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is IPv4 or IPv6. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Skip IP. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is UDP or NVGRE. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_NVGRE) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Skip UDP. */ + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /* Only used to describe the protocol stack. */ + if (item->spec || item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Check if the next not void item is VxLAN. */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* Get the VxLAN info */ + if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + rule->ixgbe_fdir.formatted.tunnel_type = + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; + + /* Only care about VNI, others should be masked. */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + rule->b_mask = TRUE; + + /* Tunnel type is always meaningful. */ + rule->mask.tunnel_type_mask = 1; + + vxlan_mask = item->mask; + if (vxlan_mask->flags) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* VNI must be totally masked or not. */ + if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] || + vxlan_mask->vni[2]) && + ((vxlan_mask->vni[0] != 0xFF) || + (vxlan_mask->vni[1] != 0xFF) || + (vxlan_mask->vni[2] != 0xFF))) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni, + RTE_DIM(vxlan_mask->vni)); + + if (item->spec) { + rule->b_spec = TRUE; + vxlan_spec = item->spec; + rte_memcpy(((uint8_t *) + &rule->ixgbe_fdir.formatted.tni_vni), + vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); + } + } + + /* Get the NVGRE info */ + if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) { + rule->ixgbe_fdir.formatted.tunnel_type = + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; + + /** + * Only care about flags0, flags1, protocol and TNI, + * others should be masked. + */ + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + rule->b_mask = TRUE; + + /* Tunnel type is always meaningful. */ + rule->mask.tunnel_type_mask = 1; + + nvgre_mask = item->mask; + if (nvgre_mask->flow_id) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->protocol && + nvgre_mask->protocol != 0xFFFF) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* TNI must be totally masked or not. */ + if (nvgre_mask->tni[0] && + ((nvgre_mask->tni[0] != 0xFF) || + (nvgre_mask->tni[1] != 0xFF) || + (nvgre_mask->tni[2] != 0xFF))) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* tni is a 24-bits bit field */ + rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni, + RTE_DIM(nvgre_mask->tni)); + rule->mask.tunnel_id_mask <<= 8; + + if (item->spec) { + rule->b_spec = TRUE; + nvgre_spec = item->spec; + if (nvgre_spec->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + if (nvgre_mask->protocol && + nvgre_spec->protocol != + rte_cpu_to_be_16(NVGRE_PROTOCOL)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /* tni is a 24-bits bit field */ + rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni, + nvgre_spec->tni, RTE_DIM(nvgre_spec->tni)); + } + } + + /* check if the next not void item is MAC */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /** + * Only support vlan and dst MAC address, + * others should be masked. + */ + + if (!item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + rule->b_mask = TRUE; + eth_mask = item->mask; + + /* Ether type should be masked. */ + if (eth_mask->type) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* src MAC address should be masked. */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + if (eth_mask->src.addr_bytes[j]) { + memset(rule, 0, + sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + rule->mask.mac_addr_byte_mask = 0; + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + /* It's a per byte mask. */ + if (eth_mask->dst.addr_bytes[j] == 0xFF) { + rule->mask.mac_addr_byte_mask |= 0x1 << j; + } else if (eth_mask->dst.addr_bytes[j]) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* When no vlan, considered as full mask. */ + rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF); + + if (item->spec) { + rule->b_spec = TRUE; + eth_spec = item->spec; + + /* Get the dst MAC. */ + for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { + rule->ixgbe_fdir.formatted.inner_mac[j] = + eth_spec->dst.addr_bytes[j]; + } + } + + /** + * Check if the next not void item is vlan or ipv4. + * IPv6 is not supported. + */ + item = next_no_void_pattern(pattern, item); + if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) && + (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (!(item->spec && item->mask)) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_mask = item->mask; + + rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; + + rule->mask.vlan_tci_mask = vlan_mask->tci; + rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF); + /* More than one tags are not supported. */ + + /* check if the next not void item is END */ + item = next_no_void_pattern(pattern, item); + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /** + * If the tags is 0, it means don't care about the VLAN. + * Do nothing. + */ + + return ixgbe_parse_fdir_act_attr(attr, actions, rule, error); +} + +static int +ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct ixgbe_fdir_rule *rule, + struct rte_flow_error *error) +{ + int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOTSUP; + + ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern, + actions, rule, error); + + if (!ret) + goto step_next; + + ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern, + actions, rule, error); + + if (ret) + return ret; + +step_next: + + if (hw->mac.type == ixgbe_mac_82599EB && + rule->fdirflags == IXGBE_FDIRCMD_DROP && + (rule->ixgbe_fdir.formatted.src_port != 0 || + rule->ixgbe_fdir.formatted.dst_port != 0)) + return -ENOTSUP; + + if (fdir_mode == RTE_FDIR_MODE_NONE || + fdir_mode != rule->mode) + return -ENOTSUP; + + if (rule->queue >= dev->data->nb_rx_queues) + return -ENOTSUP; + + return ret; +} + +static int +ixgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct ixgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + rss = (const struct rte_flow_action_rss *)act->conf; + + if (!rss || !rss->queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (ixgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + + /* check if the next not void item is END */ + act = next_no_void_action(actions, act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + return 0; +} + +/* remove the rss filter */ +static void +ixgbe_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.conf.queue_num) + ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); +} + +void +ixgbe_filterlist_init(void) +{ + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&filter_rss_list); + TAILQ_INIT(&ixgbe_flow_list); +} + +void +ixgbe_filterlist_flush(void) +{ + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; + + while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, + entries); + rte_free(ntuple_filter_ptr); + } + + while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, + entries); + rte_free(ethertype_filter_ptr); + } + + while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, + entries); + rte_free(syn_filter_ptr); + } + + while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, + entries); + rte_free(l2_tn_filter_ptr); + } + + while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, + entries); + rte_free(fdir_rule_ptr); + } + + while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + } + + while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) { + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, + entries); + rte_free(ixgbe_flow_mem_ptr->flow); + rte_free(ixgbe_flow_mem_ptr); + } +} + +/** + * Create or destroy a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +ixgbe_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct ixgbe_fdir_rule fdir_rule; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rte_flow_rss_conf rss_conf; + struct rte_flow *flow = NULL; + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_rss_conf_ele *rss_filter_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + uint8_t first_mask = FALSE; + + flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem", + sizeof(struct ixgbe_flow_mem), 0); + if (!ixgbe_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + ixgbe_flow_mem_ptr->flow = flow; + TAILQ_INSERT_TAIL(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + +#ifdef RTE_LIBRTE_SECURITY + /* ESP flow not really a flow*/ + if (ntuple_filter.proto == IPPROTO_ESP) + return flow; +#endif + + if (!ret) { + ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter", + sizeof(struct ixgbe_ntuple_filter_ele), 0); + if (!ntuple_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = ixgbe_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = rte_zmalloc( + "ixgbe_ethertype_filter", + sizeof(struct ixgbe_ethertype_filter_ele), 0); + if (!ethertype_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter", + sizeof(struct ixgbe_eth_syn_filter_ele), 0); + if (!syn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } + + memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule)); + ret = ixgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) { + /* A mask cannot be deleted. */ + if (fdir_rule.b_mask) { + if (!fdir_info->mask_added) { + /* It's the first time the mask is set. */ + rte_memcpy(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); + fdir_info->flex_bytes_offset = + fdir_rule.flex_bytes_offset; + + if (fdir_rule.mask.flex_bytes_mask) + ixgbe_fdir_set_flexbytes_offset(dev, + fdir_rule.flex_bytes_offset); + + ret = ixgbe_fdir_set_input_mask(dev); + if (ret) + goto out; + + fdir_info->mask_added = TRUE; + first_mask = TRUE; + } else { + /** + * Only support one global mask, + * all the masks should be the same. + */ + ret = memcmp(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct ixgbe_hw_fdir_mask)); + if (ret) + goto out; + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) + goto out; + } + } + + if (fdir_rule.b_spec) { + ret = ixgbe_fdir_filter_program(dev, &fdir_rule, + FALSE, FALSE); + if (!ret) { + fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter", + sizeof(struct ixgbe_fdir_rule_ele), 0); + if (!fdir_rule_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&fdir_rule_ptr->filter_info, + &fdir_rule, + sizeof(struct ixgbe_fdir_rule)); + TAILQ_INSERT_TAIL(&filter_fdir_list, + fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return flow; + } + + if (ret) { + /** + * clean the mask_added flag if fail to + * program + **/ + if (first_mask) + fdir_info->mask_added = FALSE; + goto out; + } + } + + goto out; + } + + memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) { + ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE); + if (!ret) { + l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter", + sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0); + if (!l2_tn_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + rte_memcpy(&l2_tn_filter_ptr->filter_info, + &l2_tn_filter, + sizeof(struct rte_eth_l2_tunnel_conf)); + TAILQ_INSERT_TAIL(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + flow->rule = l2_tn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL; + return flow; + } + } + + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + if (!ret) { + ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE); + if (!ret) { + rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter", + sizeof(struct ixgbe_rss_conf_ele), 0); + if (!rss_filter_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + goto out; + } + ixgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); + TAILQ_INSERT_TAIL(&filter_rss_list, + rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + } + +out: + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(ixgbe_flow_mem_ptr); + rte_free(flow); + return NULL; +} + +/** + * Check if the flow rule is supported by ixgbe. + * It only checkes the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ +static int +ixgbe_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_fdir_rule fdir_rule; + struct ixgbe_rte_flow_rss_conf rss_conf; + int ret; + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) + return 0; + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) + return 0; + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) + return 0; + + memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule)); + ret = ixgbe_parse_fdir_filter(dev, attr, pattern, + actions, &fdir_rule, error); + if (!ret) + return 0; + + memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern, + actions, &l2_tn_filter, error); + if (!ret) + return 0; + + memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + ret = ixgbe_parse_rss_filter(dev, attr, + actions, &rss_conf, error); + + return ret; +} + +/* Destroy a flow rule on ixgbe. */ +static int +ixgbe_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct ixgbe_fdir_rule fdir_rule; + struct rte_eth_l2_tunnel_conf l2_tn_filter; + struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr; + struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr; + struct ixgbe_eth_syn_filter_ele *syn_filter_ptr; + struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr; + struct ixgbe_fdir_rule_ele *fdir_rule_ptr; + struct ixgbe_flow_mem *ixgbe_flow_mem_ptr; + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + struct ixgbe_rss_conf_ele *rss_filter_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *) + pmd_flow->rule; + rte_memcpy(&ntuple_filter, + &ntuple_filter_ptr->filter_info, + sizeof(struct rte_eth_ntuple_filter)); + ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *) + pmd_flow->rule; + rte_memcpy(ðertype_filter, + ðertype_filter_ptr->filter_info, + sizeof(struct rte_eth_ethertype_filter)); + ret = ixgbe_add_del_ethertype_filter(dev, + ðertype_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *) + pmd_flow->rule; + rte_memcpy(&syn_filter, + &syn_filter_ptr->filter_info, + sizeof(struct rte_eth_syn_filter)); + ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FDIR: + fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule; + rte_memcpy(&fdir_rule, + &fdir_rule_ptr->filter_info, + sizeof(struct ixgbe_fdir_rule)); + ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_fdir_list, + fdir_rule_ptr, entries); + rte_free(fdir_rule_ptr); + if (TAILQ_EMPTY(&filter_fdir_list)) + fdir_info->mask_added = false; + } + break; + case RTE_ETH_FILTER_L2_TUNNEL: + l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *) + pmd_flow->rule; + rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info, + sizeof(struct rte_eth_l2_tunnel_conf)); + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter); + if (!ret) { + TAILQ_REMOVE(&filter_l2_tunnel_list, + l2_tn_filter_ptr, entries); + rte_free(l2_tn_filter_ptr); + } + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct ixgbe_rss_conf_ele *) + pmd_flow->rule; + ret = ixgbe_config_rss_filter(dev, + &rss_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&filter_rss_list, + rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) { + if (ixgbe_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&ixgbe_flow_list, + ixgbe_flow_mem_ptr, entries); + rte_free(ixgbe_flow_mem_ptr); + } + } + rte_free(flow); + + return ret; +} + +/* Destroy all flow rules associated with a port on ixgbe. */ +static int +ixgbe_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + int ret = 0; + + ixgbe_clear_all_ntuple_filter(dev); + ixgbe_clear_all_ethertype_filter(dev); + ixgbe_clear_syn_filter(dev); + + ret = ixgbe_clear_all_fdir_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + ret = ixgbe_clear_all_l2_tn_filter(dev); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rule"); + return ret; + } + + ixgbe_clear_rss_filter(dev); + + ixgbe_filterlist_flush(); + + return 0; +} + +const struct rte_flow_ops ixgbe_flow_ops = { + .validate = ixgbe_flow_validate, + .create = ixgbe_flow_create, + .destroy = ixgbe_flow_destroy, + .flush = ixgbe_flow_flush, +}; diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c new file mode 100644 index 000000000..48f5082d4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c @@ -0,0 +1,755 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "base/ixgbe_type.h" +#include "base/ixgbe_api.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_ipsec.h" + +#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5 + +#define IXGBE_WAIT_RREAD \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \ + IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_RWRITE \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \ + IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_TREAD \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \ + IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) +#define IXGBE_WAIT_TWRITE \ + IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \ + IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS) + +#define CMP_IP(a, b) (\ + (a).ipv6[0] == (b).ipv6[0] && \ + (a).ipv6[1] == (b).ipv6[1] && \ + (a).ipv6[2] == (b).ipv6[2] && \ + (a).ipv6[3] == (b).ipv6[3]) + + +static void +ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC( + dev->data->dev_private); + int i = 0; + + /* clear Rx IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + uint16_t index = i << 3; + uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0); + IXGBE_WAIT_RWRITE; + } + + /* clear Rx SPI and Rx/Tx SA tables*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + uint32_t index = i << 3; + uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | index; + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0); + IXGBE_WAIT_TWRITE; + } + + memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl)); + memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl)); + memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl)); +} + +static int +ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) +{ + struct rte_eth_dev *dev = ic_session->dev; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC( + dev->data->dev_private); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + uint8_t *key; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, + ic_session->dst_ip)) { + ip_index = i; + break; + } + } + /* If no match, find a free entry in the IP table*/ + if (ip_index < 0) { + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (priv->rx_ip_tbl[i].ref_count == 0) { + ip_index = i; + break; + } + } + } + + /* Fail if no match and no free entries*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Rx SA table\n"); + return -1; + } + + priv->rx_ip_tbl[ip_index].ip.ipv6[0] = + ic_session->dst_ip.ipv6[0]; + priv->rx_ip_tbl[ip_index].ip.ipv6[1] = + ic_session->dst_ip.ipv6[1]; + priv->rx_ip_tbl[ip_index].ip.ipv6[2] = + ic_session->dst_ip.ipv6[2]; + priv->rx_ip_tbl[ip_index].ip.ipv6[3] = + ic_session->dst_ip.ipv6[3]; + priv->rx_ip_tbl[ip_index].ref_count++; + + priv->rx_sa_tbl[sa_index].spi = + rte_cpu_to_be_32(ic_session->spi); + priv->rx_sa_tbl[sa_index].ip_index = ip_index; + priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID; + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) + priv->rx_sa_tbl[sa_index].mode |= + (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT); + if (ic_session->dst_ip.type == IPv6) { + priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6; + priv->rx_ip_tbl[ip_index].ip.type = IPv6; + } else if (ic_session->dst_ip.type == IPv4) + priv->rx_ip_tbl[ip_index].ip.type = IPv4; + + priv->rx_sa_tbl[sa_index].used = 1; + + /* write IP table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_IP | (ip_index << 3); + if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) { + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv4); + } else { + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), + priv->rx_ip_tbl[ip_index].ip.ipv6[0]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), + priv->rx_ip_tbl[ip_index].ip.ipv6[1]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), + priv->rx_ip_tbl[ip_index].ip.ipv6[2]); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), + priv->rx_ip_tbl[ip_index].ip.ipv6[3]); + } + IXGBE_WAIT_RWRITE; + + /* write SPI table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_SPI | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, + priv->rx_sa_tbl[sa_index].spi); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, + priv->rx_sa_tbl[sa_index].ip_index); + IXGBE_WAIT_RWRITE; + + /* write Key table entry*/ + key = malloc(ic_session->key_len); + if (!key) + return -ENOMEM; + + memcpy(key, ic_session->key, ic_session->key_len); + + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | + IPSRXIDX_TABLE_KEY | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), + rte_cpu_to_be_32(*(uint32_t *)&key[12])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), + rte_cpu_to_be_32(*(uint32_t *)&key[8])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), + rte_cpu_to_be_32(*(uint32_t *)&key[4])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), + rte_cpu_to_be_32(*(uint32_t *)&key[0])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, + rte_cpu_to_be_32(ic_session->salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, + priv->rx_sa_tbl[sa_index].mode); + IXGBE_WAIT_RWRITE; + + free(key); + + } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */ + uint8_t *key; + int i; + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].used == 0) { + sa_index = i; + break; + } + } + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "No free entry left in the Tx SA table\n"); + return -1; + } + + priv->tx_sa_tbl[sa_index].spi = + rte_cpu_to_be_32(ic_session->spi); + priv->tx_sa_tbl[i].used = 1; + ic_session->sa_index = sa_index; + + key = malloc(ic_session->key_len); + if (!key) + return -ENOMEM; + + memcpy(key, ic_session->key, ic_session->key_len); + + /* write Key table entry*/ + reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), + rte_cpu_to_be_32(*(uint32_t *)&key[12])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), + rte_cpu_to_be_32(*(uint32_t *)&key[8])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), + rte_cpu_to_be_32(*(uint32_t *)&key[4])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), + rte_cpu_to_be_32(*(uint32_t *)&key[0])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, + rte_cpu_to_be_32(ic_session->salt)); + IXGBE_WAIT_TWRITE; + + free(key); + } + + return 0; +} + +static int +ixgbe_crypto_remove_sa(struct rte_eth_dev *dev, + struct ixgbe_crypto_session *ic_session) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_ipsec *priv = + IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private); + uint32_t reg_val; + int sa_index = -1; + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + int i, ip_index = -1; + + /* Find a match in the IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) { + ip_index = i; + break; + } + } + + /* Fail if no match*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx IP table\n"); + return -1; + } + + /* Find a free entry in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->rx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Rx SA table\n"); + return -1; + } + + /* Disable and clear Rx SPI and key table table entryes*/ + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0); + IXGBE_WAIT_RWRITE; + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0); + IXGBE_WAIT_RWRITE; + priv->rx_sa_tbl[sa_index].used = 0; + + /* If last used then clear the IP table entry*/ + priv->rx_ip_tbl[ip_index].ref_count--; + if (priv->rx_ip_tbl[ip_index].ref_count == 0) { + reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | + (ip_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0); + } + } else { /* session->dir == RTE_CRYPTO_OUTBOUND */ + int i; + + /* Find a match in the SA table*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + if (priv->tx_sa_tbl[i].spi == + rte_cpu_to_be_32(ic_session->spi)) { + sa_index = i; + break; + } + } + /* Fail if no match entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, + "Entry not found in the Tx SA table\n"); + return -1; + } + reg_val = IPSRXIDX_WRITE | (sa_index << 3); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0); + IXGBE_WAIT_TWRITE; + + priv->tx_sa_tbl[sa_index].used = 0; + } + + return 0; +} + +static int +ixgbe_crypto_create_session(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *session, + struct rte_mempool *mempool) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + struct ixgbe_crypto_session *ic_session = NULL; + struct rte_crypto_aead_xform *aead_xform; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + + if (rte_mempool_get(mempool, (void **)&ic_session)) { + PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool"); + return -ENOMEM; + } + + if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD || + conf->crypto_xform->aead.algo != + RTE_CRYPTO_AEAD_AES_GCM) { + PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + aead_xform = &conf->crypto_xform->aead; + + if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + } else { + if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION; + } else { + PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -ENOTSUP; + } + } + + ic_session->key = aead_xform->key.data; + ic_session->key_len = aead_xform->key.length; + memcpy(&ic_session->salt, + &aead_xform->key.data[aead_xform->key.length], 4); + ic_session->spi = conf->ipsec.spi; + ic_session->dev = eth_dev; + + set_sec_session_private_data(session, ic_session); + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) { + if (ixgbe_crypto_add_sa(ic_session)) { + PMD_DRV_LOG(ERR, "Failed to add SA\n"); + rte_mempool_put(mempool, (void *)ic_session); + return -EPERM; + } + } + + return 0; +} + +static unsigned int +ixgbe_crypto_session_get_size(__rte_unused void *device) +{ + return sizeof(struct ixgbe_crypto_session); +} + +static int +ixgbe_crypto_remove_session(void *device, + struct rte_security_session *session) +{ + struct rte_eth_dev *eth_dev = device; + struct ixgbe_crypto_session *ic_session = + (struct ixgbe_crypto_session *) + get_sec_session_private_data(session); + struct rte_mempool *mempool = rte_mempool_from_obj(ic_session); + + if (eth_dev != ic_session->dev) { + PMD_DRV_LOG(ERR, "Session not bound to this device\n"); + return -ENODEV; + } + + if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) { + PMD_DRV_LOG(ERR, "Failed to remove session\n"); + return -EFAULT; + } + + rte_mempool_put(mempool, (void *)ic_session); + + return 0; +} + +static inline uint8_t +ixgbe_crypto_compute_pad_len(struct rte_mbuf *m) +{ + if (m->nb_segs == 1) { + /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size + * payload padding size is stored at + */ + uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - + (ESP_TRAILER_SIZE + ESP_ICV_SIZE)); + return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE; + } + return 0; +} + +static int +ixgbe_crypto_update_mb(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + struct ixgbe_crypto_session *ic_session = + get_sec_session_private_data(session); + if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) { + union ixgbe_crypto_tx_desc_md *mdata = + (union ixgbe_crypto_tx_desc_md *)&m->udata64; + mdata->enc = 1; + mdata->sa_idx = ic_session->sa_index; + mdata->pad_len = ixgbe_crypto_compute_pad_len(m); + } + return 0; +} + + +static const struct rte_security_capability * +ixgbe_crypto_capabilities_get(void *device __rte_unused) +{ + static const struct rte_cryptodev_capabilities + aes_gcm_gmac_crypto_capabilities[] = { + { /* AES GMAC (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM (128-bit) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { + .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED + }, } + }, + }; + + static const struct rte_security_capability + ixgbe_security_capabilities[] = { + { /* IPsec Inline Crypto ESP Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { /* IPsec Inline Crypto ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + } }, + .crypto_capabilities = aes_gcm_gmac_crypto_capabilities, + .ol_flags = 0 + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } + }; + + return ixgbe_security_capabilities; +} + + +int +ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; + + /* sanity checks */ + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { + PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); + return -1; + } + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); + return -1; + } + + + /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/ + IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15); + + /* IFG needs to be set to 3 when we are using security. Otherwise a Tx + * hang will occur with heavy traffic. + */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg = (reg & 0xFFFFFFF0) | 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); + + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); + reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + if (reg != 0) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, + IXGBE_SECTXCTRL_STORE_FORWARD); + reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + + ixgbe_crypto_clear_ipsec_tables(dev); + + return 0; +} + +int +ixgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6) +{ + struct ixgbe_crypto_session *ic_session + = get_sec_session_private_data(sess); + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) { + if (is_ipv6) { + const struct rte_flow_item_ipv6 *ipv6 = ip_spec; + ic_session->src_ip.type = IPv6; + ic_session->dst_ip.type = IPv6; + rte_memcpy(ic_session->src_ip.ipv6, + ipv6->hdr.src_addr, 16); + rte_memcpy(ic_session->dst_ip.ipv6, + ipv6->hdr.dst_addr, 16); + } else { + const struct rte_flow_item_ipv4 *ipv4 = ip_spec; + ic_session->src_ip.type = IPv4; + ic_session->dst_ip.type = IPv4; + ic_session->src_ip.ipv4 = ipv4->hdr.src_addr; + ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr; + } + return ixgbe_crypto_add_sa(ic_session); + } + + return 0; +} + +static struct rte_security_ops ixgbe_security_ops = { + .session_create = ixgbe_crypto_create_session, + .session_update = NULL, + .session_get_size = ixgbe_crypto_session_get_size, + .session_stats_get = NULL, + .session_destroy = ixgbe_crypto_remove_session, + .set_pkt_metadata = ixgbe_crypto_update_mb, + .capabilities_get = ixgbe_crypto_capabilities_get +}; + +static int +ixgbe_crypto_capable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg_i, reg, capable = 1; + /* test if rx crypto can be enabled and then write back initial value*/ + reg_i = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); + reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + if (reg != 0) + capable = 0; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg_i); + return capable; +} + +int +ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev) +{ + struct rte_security_ctx *ctx = NULL; + + if (ixgbe_crypto_capable(dev)) { + ctx = rte_malloc("rte_security_instances_ops", + sizeof(struct rte_security_ctx), 0); + if (ctx) { + ctx->device = (void *)dev; + ctx->ops = &ixgbe_security_ops; + ctx->sess_cnt = 0; + dev->security_ctx = ctx; + } else { + return -ENOMEM; + } + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h new file mode 100644 index 000000000..e218c0a4a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_ipsec.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#ifndef IXGBE_IPSEC_H_ +#define IXGBE_IPSEC_H_ + +#include + +#define IPSRXIDX_RX_EN 0x00000001 +#define IPSRXIDX_TABLE_IP 0x00000002 +#define IPSRXIDX_TABLE_SPI 0x00000004 +#define IPSRXIDX_TABLE_KEY 0x00000006 +#define IPSRXIDX_WRITE 0x80000000 +#define IPSRXIDX_READ 0x40000000 +#define IPSRXMOD_VALID 0x00000001 +#define IPSRXMOD_PROTO 0x00000004 +#define IPSRXMOD_DECRYPT 0x00000008 +#define IPSRXMOD_IPV6 0x00000010 +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +#define IPSEC_MAX_RX_IP_COUNT 128 +#define IPSEC_MAX_SA_COUNT 1024 + +#define ESP_ICV_SIZE 16 +#define ESP_TRAILER_SIZE 2 + +enum ixgbe_operation { + IXGBE_OP_AUTHENTICATED_ENCRYPTION, + IXGBE_OP_AUTHENTICATED_DECRYPTION +}; + +enum ixgbe_gcm_key { + IXGBE_GCM_KEY_128, + IXGBE_GCM_KEY_256 +}; + +/** + * Generic IP address structure + * TODO: Find better location for this rte_net.h possibly. + **/ +struct ipaddr { + enum ipaddr_type { + IPv4, + IPv6 + } type; + /**< IP Address Type - IPv4/IPv6 */ + + union { + uint32_t ipv4; + uint32_t ipv6[4]; + }; +}; + +/** inline crypto crypto private session structure */ +struct ixgbe_crypto_session { + enum ixgbe_operation op; + const uint8_t *key; + uint32_t key_len; + uint32_t salt; + uint32_t sa_index; + uint32_t spi; + struct ipaddr src_ip; + struct ipaddr dst_ip; + struct rte_eth_dev *dev; +} __rte_cache_aligned; + +struct ixgbe_crypto_rx_ip_table { + struct ipaddr ip; + uint16_t ref_count; +}; +struct ixgbe_crypto_rx_sa_table { + uint32_t spi; + uint32_t ip_index; + uint8_t mode; + uint8_t used; +}; + +struct ixgbe_crypto_tx_sa_table { + uint32_t spi; + uint8_t used; +}; + +union ixgbe_crypto_tx_desc_md { + uint64_t data; + struct { + /**< SA table index */ + uint32_t sa_idx; + /**< ICV and ESP trailer length */ + uint8_t pad_len; + /**< enable encryption */ + uint8_t enc; + }; +}; + +struct ixgbe_ipsec { + struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT]; + struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT]; + struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT]; +}; + + +int ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev); +int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev); +int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess, + const void *ip_spec, + uint8_t is_ipv6); + + + +#endif /*IXGBE_IPSEC_H_*/ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h new file mode 100644 index 000000000..2a279d109 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_logs.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _IXGBE_LOGS_H_ +#define _IXGBE_LOGS_H_ + +extern int ixgbe_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ixgbe_logtype_init, \ + "%s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +extern int ixgbe_logtype_rx; +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ixgbe_logtype_rx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +extern int ixgbe_logtype_tx; +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ixgbe_logtype_tx, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +extern int ixgbe_logtype_tx_free; +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ixgbe_logtype_tx_free, \ + "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int ixgbe_logtype_driver; +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ixgbe_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _IXGBE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c new file mode 100644 index 000000000..67b5bef44 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_pf.c @@ -0,0 +1,936 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" +#include "rte_pmd_ixgbe.h" + +#define IXGBE_MAX_VFTA (128) +#define IXGBE_VF_MSG_SIZE_DEFAULT 1 +#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5 +#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808 + +static inline uint16_t +dev_num_vf(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + return pci_dev->max_vfs; +} + +static inline +int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) +{ + unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN]; + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint16_t vfn; + + for (vfn = 0; vfn < vf_num; vfn++) { + rte_eth_random_addr(vf_mac_addr); + /* keep the random address as default */ + memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, + RTE_ETHER_ADDR_LEN); + } + + return 0; +} + +static inline int +ixgbe_mb_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_MAILBOX; + + return 0; +} + +void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_vf_info **vfinfo = + IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + struct ixgbe_mirror_info *mirror_info = + IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t vf_num; + uint8_t nb_queue; + + PMD_INIT_FUNC_TRACE(); + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) + return; + + *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0); + if (*vfinfo == NULL) + rte_panic("Cannot allocate memory for private VF data\n"); + + rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); + + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); + memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); + hw->mac.mc_filter_type = 0; + + if (vf_num >= ETH_32_POOLS) { + nb_queue = 2; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS; + } else if (vf_num >= ETH_16_POOLS) { + nb_queue = 4; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS; + } else { + nb_queue = 8; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS; + } + + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue); + + ixgbe_vf_perm_addr_gen(eth_dev, vf_num); + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* set mb interrupt mask */ + ixgbe_mb_intr_setup(eth_dev); +} + +void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_vf_info **vfinfo; + uint16_t vf_num; + int ret; + + PMD_INIT_FUNC_TRACE(); + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0; + + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) + return; + + vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + if (*vfinfo == NULL) + return; + + ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + + rte_free(*vfinfo); + *vfinfo = NULL; +} + +static void +ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + uint16_t vf_num; + int i; + struct ixgbe_ethertype_filter ethertype_filter; + + if (!hw->mac.ops.set_ethertype_anti_spoofing) { + PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n"); + return; + } + + i = ixgbe_ethertype_filter_lookup(filter_info, + IXGBE_ETHERTYPE_FLOW_CTRL); + if (i >= 0) { + PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n"); + return; + } + + ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL; + ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETHERTYPE_FLOW_CTRL; + ethertype_filter.etqs = 0; + ethertype_filter.conf = TRUE; + i = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (i < 0) { + PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n"); + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETHERTYPE_FLOW_CTRL)); + + vf_num = dev_num_vf(eth_dev); + for (i = 0; i < vf_num; i++) + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); +} + +int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) +{ + uint32_t vtctl, fcrth; + uint32_t vfre_slot, vfre_offset; + uint16_t vf_num; + const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ + const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint32_t gpie, gcr_ext; + uint32_t vlanctrl; + int i; + + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) + return -1; + + /* enable VMDq and set the default pool for PF */ + vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vtctl |= IXGBE_VMD_CTL_VMDQ_EN; + vtctl &= ~IXGBE_VT_CTL_POOL_MASK; + vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + << IXGBE_VT_CTL_POOL_SHIFT; + vtctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); + + vfre_offset = vf_num & VFRE_MASK; + vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0; + + /* Enable pools reserved to PF only */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1); + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* clear VMDq map to perment rar 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + + /* clear VMDq map to scan rar 127 */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0); + + /* set VMDq map to default PF pool */ + hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx); + + /* + * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode + */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; + + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT; + + switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { + case ETH_64_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + gpie |= IXGBE_GPIE_VTMODE_64; + break; + case ETH_32_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; + gpie |= IXGBE_GPIE_VTMODE_32; + break; + case ETH_16_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16; + gpie |= IXGBE_GPIE_VTMODE_16; + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* + * enable vlan filtering and allow all vlan tags through + */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IXGBE_MAX_VFTA; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); + + /* set flow control threshold to max to avoid tx switch hang */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + ixgbe_add_tx_flow_control_drop_filter(eth_dev); + + return 0; +} + +static void +set_rx_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + uint16_t vfn = dev_num_vf(dev); + + /* Check for Promiscuous and All Multicast modes */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* set all bits that we expect to always be set */ + fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ + fctrl |= IXGBE_FCTRL_BAM; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + + if (dev_data->promiscuous) { + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); + } else { + if (dev_data->all_multicast) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else { + vmolr |= IXGBE_VMOLR_ROMPE; + } + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + ixgbe_vlan_hw_strip_config(dev); +} + +static inline void +ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + vmolr |= (IXGBE_VMOLR_ROPE | + IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); + + /* reset multicast table array for vf */ + vfinfo[vf].num_vf_mc_hashes = 0; + + /* reset rx mode */ + set_rx_mode(dev); + + hw->mac.ops.clear_rar(hw, rar_entry); +} + +static inline void +ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + uint32_t reg_offset, vf_shift; + const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ + const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); + uint8_t nb_q_per_pool; + int i; + + vf_shift = vf & VFRE_MASK; + reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0; + + /* enable transmit for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + /* enable all queue drop for IOV */ + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) { + IXGBE_WRITE_FLUSH(hw); + reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } + + /* enable receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + + ixgbe_vf_reset_event(dev, vf); +} + +static int +ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr; + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); + + vmolr &= ~IXGBE_VMOLR_MPE; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int +ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + + ixgbe_vf_reset_msg(dev, vf); + + hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV); + + /* Disable multicast promiscuous at reset */ + ixgbe_disable_vf_mc_promisc(dev, vf); + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; + rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN); + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int +ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + + if (rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); + } + return -1; +} + +static int +ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + uint16_t *hash_list = (uint16_t *)&msgbuf[1]; + uint32_t mta_idx; + uint32_t mta_shift; + const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F; + const uint32_t IXGBE_MTA_BIT_SHIFT = 5; + const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1; + uint32_t reg_val; + int i; + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + /* Disable multicast promiscuous first */ + ixgbe_disable_vf_mc_promisc(dev, vf); + + /* only so many hash values supported */ + nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES); + + /* store the mc entries */ + vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries; + for (i = 0; i < nb_entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + if (nb_entries == 0) { + vmolr &= ~IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + return 0; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) + & IXGBE_MTA_INDEX_MASK; + mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK; + reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx)); + reg_val |= (1 << mta_shift); + IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val); + } + + vmolr |= IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int +ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + int add, vid; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + + add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + + if (add) + vfinfo[vf].vlan_count++; + else if (vfinfo[vf].vlan_count) + vfinfo[vf].vlan_count--; + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false); +} + +static int +ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t new_mtu = msgbuf[1]; + uint32_t max_frs; + uint32_t hlreg0; + int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + /* X540 and X550 support jumbo frames in IOV mode */ + if (hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -1; + + if (max_frame < RTE_ETHER_MIN_LEN || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) + return -1; + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; + if (max_frs < new_mtu) { + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (new_mtu > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + return 0; +} + +static int +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + uint32_t api_version = msgbuf[1]; + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + switch (api_version) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + vfinfo[vf].api_version = (uint8_t)api_version; + return 0; + default: + break; + } + + PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", + api_version, vf); + + return -1; +} + +static int +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + struct rte_eth_conf *eth_conf; + struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf; + u8 num_tcs; + struct ixgbe_hw *hw; + u32 vmvir; +#define IXGBE_VMVIR_VLANA_MASK 0xC0000000 +#define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF +#define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000 +#define VLAN_PRIO_SHIFT 13 + u32 vlana; + u32 vid; + u32 user_priority; + + /* Verify if the PF supports the mbox APIs version or not */ + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + break; + default: + return -1; + } + + /* Notify VF of Rx and Tx queue number */ + msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + /* Notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; + + /* Notify VF of number of DCB traffic classes */ + eth_conf = &dev->data->dev_conf; + switch (eth_conf->txmode.mq_mode) { + case ETH_MQ_TX_NONE: + case ETH_MQ_TX_DCB: + PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u" + ", but its tx mode = %d\n", vf, + eth_conf->txmode.mq_mode); + return -1; + + case ETH_MQ_TX_VMDQ_DCB: + vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; + switch (vmdq_dcb_tx_conf->nb_queue_pools) { + case ETH_16_POOLS: + num_tcs = ETH_8_TCS; + break; + case ETH_32_POOLS: + num_tcs = ETH_4_TCS; + break; + default: + return -1; + } + break; + + /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */ + case ETH_MQ_TX_VMDQ_ONLY: + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + vlana = vmvir & IXGBE_VMVIR_VLANA_MASK; + vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK; + user_priority = + (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT; + if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) && + ((vid != 0) || (user_priority != 0))) + num_tcs = 1; + else + num_tcs = 0; + break; + + default: + PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n", + eth_conf->txmode.mq_mode); + return -1; + } + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; + + return 0; +} + +static int +ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */ + u32 vmolr, fctrl, disable, enable; + + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + break; + /* Fall threw */ + case ixgbe_mbox_api_13: + break; + default: + return -1; + } + + if (vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -1; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + PMD_DRV_LOG(ERR, + "Enabling VF promisc requires PF in promisc\n"); + return -1; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; + default: + return -1; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int +ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vf_info = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + + if (index) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { + PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); + return -1; + } + + vf_info[vf].mac_count++; + + hw->mac.ops.set_rar(hw, vf_info[vf].mac_count, + new_mac, vf, IXGBE_RAH_AV); + } else { + if (vf_info[vf].mac_count) { + hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count); + vf_info[vf].mac_count = 0; + } + } + return 0; +} + +static int +ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE; + uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT; + uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE]; + int32_t retval; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct rte_pmd_ixgbe_mb_event_param ret_param; + + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf); + return retval; + } + + /* do nothing with the message already been processed */ + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + IXGBE_WRITE_FLUSH(hw); + + /** + * initialise structure to send to user application + * will return response from user in retval field + */ + ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED; + ret_param.vfid = vf; + ret_param.msg_type = msgbuf[0] & 0xFFFF; + ret_param.msg = (void *)msgbuf; + + /* perform VF reset */ + if (msgbuf[0] == IXGBE_VF_RESET) { + int ret = ixgbe_vf_reset(dev, vf, msgbuf); + + vfinfo[vf].clear_to_send = true; + + /* notify application about VF reset */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, + &ret_param); + return ret; + } + + /** + * ask user application if we allowed to perform those functions + * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED + * then business as usual, + * if 0, do nothing and send ACK to VF + * if ret_param.retval > 1, do nothing and send NAK to VF + */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, + &ret_param); + + retval = ret_param.retval; + + /* check & process VF to PF mailbox message */ + switch ((msgbuf[0] & 0xFFFF)) { + case IXGBE_VF_SET_MAC_ADDR: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_MULTICAST: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_vf_set_multicast(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_LPE: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_set_vf_lpe(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_VLAN: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_vf_set_vlan(dev, vf, msgbuf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(dev, vf, msgbuf); + msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE; + break; + case IXGBE_VF_UPDATE_XCAST_MODE: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_MACVLAN: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf); + break; + default: + PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); + retval = IXGBE_ERR_MBX; + break; + } + + /* response the VF according to the message process result */ + if (retval) + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, msgbuf, msg_size, vf); + + return retval; +} + +static inline void +ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint32_t msg = IXGBE_VT_MSGTYPE_NACK; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + if (!vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev) +{ + uint16_t vf; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { + /* check & process vf function level reset */ + if (!ixgbe_check_for_rst(hw, vf)) + ixgbe_vf_reset_event(eth_dev, vf); + + /* check & process vf mailbox messages */ + if (!ixgbe_check_for_msg(hw, vf)) + ixgbe_rcv_msg_from_vf(eth_dev, vf); + + /* check & process acks from vf */ + if (!ixgbe_check_for_ack(hw, vf)) + ixgbe_rcv_ack_from_vf(eth_dev, vf); + } +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h new file mode 100644 index 000000000..9c9533708 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_regs.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Intel Corporation + */ +#ifndef _IXGBE_REGS_H_ +#define _IXGBE_REGS_H_ + +#include "ixgbe_ethdev.h" + +struct ixgbe_hw; +struct reg_info { + uint32_t base_addr; + uint32_t count; + uint32_t stride; + const char *name; +}; + +static const struct reg_info ixgbe_regs_general[] = { + {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"}, + {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"}, + {IXGBE_CTRL_EXT, 1, 1, "IXGBE_CTRL_EXT"}, + {IXGBE_ESDP, 1, 1, "IXGBE_ESDP"}, + {IXGBE_EODSDP, 1, 1, "IXGBE_EODSDP"}, + {IXGBE_LEDCTL, 1, 1, "IXGBE_LEDCTL"}, + {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"}, + {IXGBE_TCPTIMER, 1, 1, "IXGBE_TCPTIMER"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_general[] = { + {IXGBE_VFCTRL, 1, 1, "IXGBE_VFCTRL"}, + {IXGBE_VFSTATUS, 1, 1, "IXGBE_VFSTATUS"}, + {IXGBE_VFLINKS, 1, 1, "IXGBE_VFLINKS"}, + {IXGBE_VFFRTIMER, 1, 1, "IXGBE_VFFRTIMER"}, + {IXGBE_VFMAILBOX, 1, 1, "IXGBE_VFMAILBOX"}, + {IXGBE_VFMBMEM, 16, 4, "IXGBE_VFMBMEM"}, + {IXGBE_VFRXMEMWRAP, 1, 1, "IXGBE_VFRXMEMWRAP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_nvm[] = { + {IXGBE_EEC, 1, 1, "IXGBE_EEC"}, + {IXGBE_EERD, 1, 1, "IXGBE_EERD"}, + {IXGBE_FLA, 1, 1, "IXGBE_FLA"}, + {IXGBE_EEMNGCTL, 1, 1, "IXGBE_EEMNGCTL"}, + {IXGBE_EEMNGDATA, 1, 1, "IXGBE_EEMNGDATA"}, + {IXGBE_FLMNGCTL, 1, 1, "IXGBE_FLMNGCTL"}, + {IXGBE_FLMNGDATA, 1, 1, "IXGBE_FLMNGDATA"}, + {IXGBE_FLMNGCNT, 1, 1, "IXGBE_FLMNGCNT"}, + {IXGBE_FLOP, 1, 1, "IXGBE_FLOP"}, + {IXGBE_GRC, 1, 1, "IXGBE_GRC"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_interrupt[] = { + {IXGBE_EICS, 1, 1, "IXGBE_EICS"}, + {IXGBE_EIMS, 1, 1, "IXGBE_EIMS"}, + {IXGBE_EIMC, 1, 1, "IXGBE_EIMC"}, + {IXGBE_EIAC, 1, 1, "IXGBE_EIAC"}, + {IXGBE_EIAM, 1, 1, "IXGBE_EIAM"}, + {IXGBE_EITR(0), 24, 4, "IXGBE_EITR"}, + {IXGBE_IVAR(0), 24, 4, "IXGBE_IVAR"}, + {IXGBE_MSIXT, 1, 1, "IXGBE_MSIXT"}, + {IXGBE_MSIXPBA, 1, 1, "IXGBE_MSIXPBA"}, + {IXGBE_PBACL(0), 1, 4, "IXGBE_PBACL"}, + {IXGBE_GPIE, 1, 1, ""}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_interrupt[] = { + {IXGBE_VTEICR, 1, 1, "IXGBE_VTEICR"}, + {IXGBE_VTEICS, 1, 1, "IXGBE_VTEICS"}, + {IXGBE_VTEIMS, 1, 1, "IXGBE_VTEIMS"}, + {IXGBE_VTEIMC, 1, 1, "IXGBE_VTEIMC"}, + {IXGBE_VTEIAM, 1, 1, "IXGBE_VTEIAM"}, + {IXGBE_VTEITR(0), 2, 4, "IXGBE_VTEITR"}, + {IXGBE_VTIVAR(0), 4, 4, "IXGBE_VTIVAR"}, + {IXGBE_VTIVAR_MISC, 1, 1, "IXGBE_VTIVAR_MISC"}, + {IXGBE_VTRSCINT(0), 2, 4, "IXGBE_VTRSCINT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_fctl_mac_82598EB[] = { + {IXGBE_PFCTOP, 1, 1, ""}, + {IXGBE_FCTTV(0), 4, 4, ""}, + {IXGBE_FCRTV, 1, 1, ""}, + {IXGBE_TFCS, 1, 1, ""}, + {IXGBE_FCRTL(0), 8, 8, "IXGBE_FCRTL"}, + {IXGBE_FCRTH(0), 8, 8, "IXGBE_FCRTH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_fctl_others[] = { + {IXGBE_PFCTOP, 1, 1, ""}, + {IXGBE_FCTTV(0), 4, 4, ""}, + {IXGBE_FCRTV, 1, 1, ""}, + {IXGBE_TFCS, 1, 1, ""}, + {IXGBE_FCRTL_82599(0), 8, 4, "IXGBE_FCRTL"}, + {IXGBE_FCRTH_82599(0), 8, 4, "IXGBE_FCRTH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_rxdma[] = { + {IXGBE_RDBAL(0), 64, 0x40, "IXGBE_RDBAL"}, + {IXGBE_RDBAH(0), 64, 0x40, "IXGBE_RDBAH"}, + {IXGBE_RDLEN(0), 64, 0x40, "IXGBE_RDLEN"}, + {IXGBE_RDH(0), 64, 0x40, "IXGBE_RDH"}, + {IXGBE_RDT(0), 64, 0x40, "IXGBE_RDT"}, + {IXGBE_RXDCTL(0), 64, 0x40, "IXGBE_RXDCTL"}, + {IXGBE_SRRCTL(0), 16, 0x4, "IXGBE_SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), 16, 4, "IXGBE_DCA_RXCTRL"}, + {IXGBE_RDRXCTL, 1, 1, "IXGBE_RDRXCTL"}, + {IXGBE_RXPBSIZE(0), 8, 4, "IXGBE_RXPBSIZE"}, + {IXGBE_RXCTRL, 1, 1, "IXGBE_RXCTRL"}, + {IXGBE_DROPEN, 1, 1, "IXGBE_DROPEN"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_rxdma[] = { + {IXGBE_VFRDBAL(0), 8, 0x40, "IXGBE_VFRDBAL"}, + {IXGBE_VFRDBAH(0), 8, 0x40, "IXGBE_VFRDBAH"}, + {IXGBE_VFRDLEN(0), 8, 0x40, "IXGBE_VFRDLEN"}, + {IXGBE_VFRDH(0), 8, 0x40, "IXGBE_VFRDH"}, + {IXGBE_VFRDT(0), 8, 0x40, "IXGBE_VFRDT"}, + {IXGBE_VFRXDCTL(0), 8, 0x40, "IXGBE_VFRXDCTL"}, + {IXGBE_VFSRRCTL(0), 8, 0x40, "IXGBE_VFSRRCTL"}, + {IXGBE_VFPSRTYPE, 1, 1, "IXGBE_VFPSRTYPE"}, + {IXGBE_VFRSCCTL(0), 8, 0x40, "IXGBE_VFRSCCTL"}, + {IXGBE_VFDCA_RXCTRL(0), 8, 0x40, "IXGBE_VFDCA_RXCTRL"}, + {IXGBE_VFDCA_TXCTRL(0), 8, 0x40, "IXGBE_VFDCA_TXCTRL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_rx[] = { + {IXGBE_RXCSUM, 1, 1, "IXGBE_RXCSUM"}, + {IXGBE_RFCTL, 1, 1, "IXGBE_RFCTL"}, + {IXGBE_RAL(0), 16, 8, "IXGBE_RAL"}, + {IXGBE_RAH(0), 16, 8, "IXGBE_RAH"}, + {IXGBE_PSRTYPE(0), 1, 4, "IXGBE_PSRTYPE"}, + {IXGBE_FCTRL, 1, 1, "IXGBE_FCTRL"}, + {IXGBE_VLNCTRL, 1, 1, "IXGBE_VLNCTRL"}, + {IXGBE_MCSTCTRL, 1, 1, "IXGBE_MCSTCTRL"}, + {IXGBE_MRQC, 1, 1, "IXGBE_MRQC"}, + {IXGBE_VMD_CTL, 1, 1, "IXGBE_VMD_CTL"}, + {IXGBE_IMIR(0), 8, 4, "IXGBE_IMIR"}, + {IXGBE_IMIREXT(0), 8, 4, "IXGBE_IMIREXT"}, + {IXGBE_IMIRVP, 1, 1, "IXGBE_IMIRVP"}, + {0, 0, 0, ""} +}; + +static struct reg_info ixgbe_regs_tx[] = { + {IXGBE_TDBAL(0), 32, 0x40, "IXGBE_TDBAL"}, + {IXGBE_TDBAH(0), 32, 0x40, "IXGBE_TDBAH"}, + {IXGBE_TDLEN(0), 32, 0x40, "IXGBE_TDLEN"}, + {IXGBE_TDH(0), 32, 0x40, "IXGBE_TDH"}, + {IXGBE_TDT(0), 32, 0x40, "IXGBE_TDT"}, + {IXGBE_TXDCTL(0), 32, 0x40, "IXGBE_TXDCTL"}, + {IXGBE_TDWBAL(0), 32, 0x40, "IXGBE_TDWBAL"}, + {IXGBE_TDWBAH(0), 32, 0x40, "IXGBE_TDWBAH"}, + {IXGBE_DTXCTL, 1, 1, "IXGBE_DTXCTL"}, + {IXGBE_DCA_TXCTRL(0), 16, 4, "IXGBE_DCA_TXCTRL"}, + {IXGBE_TXPBSIZE(0), 8, 4, "IXGBE_TXPBSIZE"}, + {IXGBE_MNGTXMAP, 1, 1, "IXGBE_MNGTXMAP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_tx[] = { + {IXGBE_VFTDBAL(0), 4, 0x40, "IXGBE_VFTDBAL"}, + {IXGBE_VFTDBAH(0), 4, 0x40, "IXGBE_VFTDBAH"}, + {IXGBE_VFTDLEN(0), 4, 0x40, "IXGBE_VFTDLEN"}, + {IXGBE_VFTDH(0), 4, 0x40, "IXGBE_VFTDH"}, + {IXGBE_VFTDT(0), 4, 0x40, "IXGBE_VFTDT"}, + {IXGBE_VFTXDCTL(0), 4, 0x40, "IXGBE_VFTXDCTL"}, + {IXGBE_VFTDWBAL(0), 4, 0x40, "IXGBE_VFTDWBAL"}, + {IXGBE_VFTDWBAH(0), 4, 0x40, "IXGBE_VFTDWBAH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_wakeup[] = { + {IXGBE_WUC, 1, 1, "IXGBE_WUC"}, + {IXGBE_WUFC, 1, 1, "IXGBE_WUFC"}, + {IXGBE_WUS, 1, 1, "IXGBE_WUS"}, + {IXGBE_IPAV, 1, 1, "IXGBE_IPAV"}, + {IXGBE_IP4AT, 1, 1, "IXGBE_IP4AT"}, + {IXGBE_IP6AT, 1, 1, "IXGBE_IP6AT"}, + {IXGBE_WUPL, 1, 1, "IXGBE_WUPL"}, + {IXGBE_WUPM, 1, 1, "IXGBE_WUPM"}, + {IXGBE_FHFT(0), 1, 1, "IXGBE_FHFT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_dcb[] = { + {IXGBE_RMCS, 1, 1, "IXGBE_RMCS"}, + {IXGBE_DPMCS, 1, 1, "IXGBE_DPMCS"}, + {IXGBE_PDPMCS, 1, 1, "IXGBE_PDPMCS"}, + {IXGBE_RUPPBMR, 1, 1, "IXGBE_RUPPBMR"}, + {IXGBE_RT2CR(0), 8, 4, "IXGBE_RT2CR"}, + {IXGBE_RT2SR(0), 8, 4, "IXGBE_RT2SR"}, + {IXGBE_TDTQ2TCCR(0), 8, 0x40, "IXGBE_TDTQ2TCCR"}, + {IXGBE_TDTQ2TCSR(0), 8, 0x40, "IXGBE_TDTQ2TCSR"}, + {IXGBE_TDPT2TCCR(0), 8, 4, "IXGBE_TDPT2TCCR"}, + {IXGBE_TDPT2TCSR(0), 8, 4, "IXGBE_TDPT2TCSR"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_mac[] = { + {IXGBE_PCS1GCFIG, 1, 1, "IXGBE_PCS1GCFIG"}, + {IXGBE_PCS1GLCTL, 1, 1, "IXGBE_PCS1GLCTL"}, + {IXGBE_PCS1GLSTA, 1, 1, "IXGBE_PCS1GLSTA"}, + {IXGBE_PCS1GDBG0, 1, 1, "IXGBE_PCS1GDBG0"}, + {IXGBE_PCS1GDBG1, 1, 1, "IXGBE_PCS1GDBG1"}, + {IXGBE_PCS1GANA, 1, 1, "IXGBE_PCS1GANA"}, + {IXGBE_PCS1GANLP, 1, 1, "IXGBE_PCS1GANLP"}, + {IXGBE_PCS1GANNP, 1, 1, "IXGBE_PCS1GANNP"}, + {IXGBE_PCS1GANLPNP, 1, 1, "IXGBE_PCS1GANLPNP"}, + {IXGBE_HLREG0, 1, 1, "IXGBE_HLREG0"}, + {IXGBE_HLREG1, 1, 1, "IXGBE_HLREG1"}, + {IXGBE_PAP, 1, 1, "IXGBE_PAP"}, + {IXGBE_MACA, 1, 1, "IXGBE_MACA"}, + {IXGBE_APAE, 1, 1, "IXGBE_APAE"}, + {IXGBE_ARD, 1, 1, "IXGBE_ARD"}, + {IXGBE_AIS, 1, 1, "IXGBE_AIS"}, + {IXGBE_MSCA, 1, 1, "IXGBE_MSCA"}, + {IXGBE_MSRWD, 1, 1, "IXGBE_MSRWD"}, + {IXGBE_MLADD, 1, 1, "IXGBE_MLADD"}, + {IXGBE_MHADD, 1, 1, "IXGBE_MHADD"}, + {IXGBE_TREG, 1, 1, "IXGBE_TREG"}, + {IXGBE_PCSS1, 1, 1, "IXGBE_PCSS1"}, + {IXGBE_PCSS2, 1, 1, "IXGBE_PCSS2"}, + {IXGBE_XPCSS, 1, 1, "IXGBE_XPCSS"}, + {IXGBE_SERDESC, 1, 1, "IXGBE_SERDESC"}, + {IXGBE_MACS, 1, 1, "IXGBE_MACS"}, + {IXGBE_AUTOC, 1, 1, "IXGBE_AUTOC"}, + {IXGBE_LINKS, 1, 1, "IXGBE_LINKS"}, + {IXGBE_AUTOC2, 1, 1, "IXGBE_AUTOC2"}, + {IXGBE_AUTOC3, 1, 1, "IXGBE_AUTOC3"}, + {IXGBE_ANLP1, 1, 1, "IXGBE_ANLP1"}, + {IXGBE_ANLP2, 1, 1, "IXGBE_ANLP2"}, + {IXGBE_ATLASCTL, 1, 1, "IXGBE_ATLASCTL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_diagnostic[] = { + {IXGBE_RDSTATCTL, 1, 1, "IXGBE_RDSTATCTL"}, + {IXGBE_RDSTAT(0), 8, 4, "IXGBE_RDSTAT"}, + {IXGBE_RDHMPN, 1, 1, "IXGBE_RDHMPN"}, + {IXGBE_RIC_DW(0), 4, 4, "IXGBE_RIC_DW"}, + {IXGBE_RDPROBE, 1, 1, "IXGBE_RDPROBE"}, + {IXGBE_TDHMPN, 1, 1, "IXGBE_TDHMPN"}, + {IXGBE_TIC_DW(0), 4, 4, "IXGBE_TIC_DW"}, + {IXGBE_TDPROBE, 1, 1, "IXGBE_TDPROBE"}, + {IXGBE_TXBUFCTRL, 1, 1, "IXGBE_TXBUFCTRL"}, + {IXGBE_TXBUFDATA0, 1, 1, "IXGBE_TXBUFDATA0"}, + {IXGBE_TXBUFDATA1, 1, 1, "IXGBE_TXBUFDATA1"}, + {IXGBE_TXBUFDATA2, 1, 1, "IXGBE_TXBUFDATA2"}, + {IXGBE_TXBUFDATA3, 1, 1, "IXGBE_TXBUFDATA3"}, + {IXGBE_RXBUFCTRL, 1, 1, "IXGBE_RXBUFCTRL"}, + {IXGBE_RXBUFDATA0, 1, 1, "IXGBE_RXBUFDATA0"}, + {IXGBE_RXBUFDATA1, 1, 1, "IXGBE_RXBUFDATA1"}, + {IXGBE_RXBUFDATA2, 1, 1, "IXGBE_RXBUFDATA2"}, + {IXGBE_RXBUFDATA3, 1, 1, "IXGBE_RXBUFDATA3"}, + {IXGBE_PCIE_DIAG(0), 8, 4, ""}, + {IXGBE_RFVAL, 1, 1, "IXGBE_RFVAL"}, + {IXGBE_MDFTC1, 1, 1, "IXGBE_MDFTC1"}, + {IXGBE_MDFTC2, 1, 1, "IXGBE_MDFTC2"}, + {IXGBE_MDFTFIFO1, 1, 1, "IXGBE_MDFTFIFO1"}, + {IXGBE_MDFTFIFO2, 1, 1, "IXGBE_MDFTFIFO2"}, + {IXGBE_MDFTS, 1, 1, "IXGBE_MDFTS"}, + {IXGBE_PCIEECCCTL, 1, 1, "IXGBE_PCIEECCCTL"}, + {IXGBE_PBTXECC, 1, 1, "IXGBE_PBTXECC"}, + {IXGBE_PBRXECC, 1, 1, "IXGBE_PBRXECC"}, + {IXGBE_MFLCN, 1, 1, "IXGBE_MFLCN"}, + {0, 0, 0, ""}, +}; + +/* PF registers */ +static const struct reg_info *ixgbe_regs_others[] = { + ixgbe_regs_general, + ixgbe_regs_nvm, ixgbe_regs_interrupt, + ixgbe_regs_fctl_others, + ixgbe_regs_rxdma, + ixgbe_regs_rx, + ixgbe_regs_tx, + ixgbe_regs_wakeup, + ixgbe_regs_dcb, + ixgbe_regs_mac, + ixgbe_regs_diagnostic, + NULL}; + +static const struct reg_info *ixgbe_regs_mac_82598EB[] = { + ixgbe_regs_general, + ixgbe_regs_nvm, + ixgbe_regs_interrupt, + ixgbe_regs_fctl_mac_82598EB, + ixgbe_regs_rxdma, + ixgbe_regs_rx, + ixgbe_regs_tx, + ixgbe_regs_wakeup, + ixgbe_regs_dcb, + ixgbe_regs_mac, + ixgbe_regs_diagnostic, + NULL}; + +/* VF registers */ +static const struct reg_info *ixgbevf_regs[] = { + ixgbevf_regs_general, + ixgbevf_regs_interrupt, + ixgbevf_regs_rxdma, + ixgbevf_regs_tx, + NULL}; + +static inline int +ixgbe_read_regs(struct ixgbe_hw *hw, const struct reg_info *reg, + uint32_t *reg_buf) +{ + unsigned int i; + + for (i = 0; i < reg->count; i++) + reg_buf[i] = IXGBE_READ_REG(hw, + reg->base_addr + i * reg->stride); + return reg->count; +}; + +static inline int +ixgbe_regs_group_count(const struct reg_info *regs) +{ + int count = 0; + int i = 0; + + while (regs[i].count) + count += regs[i++].count; + return count; +}; + +static inline int +ixgbe_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf, + const struct reg_info *regs) +{ + int count = 0; + int i = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + while (regs[i].count) + count += ixgbe_read_regs(hw, ®s[i++], ®_buf[count]); + return count; +}; + +#endif /* _IXGBE_REGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c new file mode 100644 index 000000000..2e20e18c7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -0,0 +1,5967 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_vf.h" +#include "ixgbe_ethdev.h" +#include "base/ixgbe_dcb.h" +#include "base/ixgbe_common.h" +#include "ixgbe_rxtx.h" + +#ifdef RTE_LIBRTE_IEEE1588 +#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define IXGBE_TX_IEEE1588_TMST 0 +#endif +/* Bit Mask to indicate what bits required for building TX context */ +#define IXGBE_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_MACSEC | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_SEC_OFFLOAD | \ + IXGBE_TX_IEEE1588_TMST) + +#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK) + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +/* + * Prefetch a cache line into all cache levels. + */ +#define rte_ixgbe_prefetch(p) rte_prefetch0(p) +#else +#define rte_ixgbe_prefetch(p) do {} while (0) +#endif + +/********************************************************************* + * + * TX functions + * + **********************************************************************/ + +/* + * Check for descriptors with their DD bit set and free mbufs. + * Return the total number of buffers freed. + */ +static __rte_always_inline int +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry *txep; + uint32_t status; + int i, nb_free = 0; + struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) + return 0; + + /* + * first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); + + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + /* free buffers one at a time */ + m = rte_pktmbuf_prefree_seg(txep->mbuf); + txep->mbuf = NULL; + + if (unlikely(m == NULL)) + continue; + + if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ || + (nb_free > 0 && m->pool != free[0]->pool)) { + rte_mempool_put_bulk(free[0]->pool, + (void **)free, nb_free); + nb_free = 0; + } + + free[nb_free++] = m; + } + + if (nb_free > 0) + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +/* Populate 4 descriptors with data from 4 mbufs */ +static inline void +tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t buf_dma_addr; + uint32_t pkt_len; + int i; + + for (i = 0; i < 4; ++i, ++txdp, ++pkts) { + buf_dma_addr = rte_mbuf_data_iova(*pkts); + pkt_len = (*pkts)->data_len; + + /* write data to descriptor */ + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + + txdp->read.cmd_type_len = + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + + txdp->read.olinfo_status = + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + rte_prefetch0(&(*pkts)->pool); + } +} + +/* Populate 1 descriptor with data from 1 mbuf */ +static inline void +tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t buf_dma_addr; + uint32_t pkt_len; + + buf_dma_addr = rte_mbuf_data_iova(*pkts); + pkt_len = (*pkts)->data_len; + + /* write data to descriptor */ + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); +} + +/* + * Fill H/W descriptor ring with mbuf data. + * Copy mbuf pointers to the S/W ring. + */ +static inline void +ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ + volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]); + struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]); + const int N_PER_LOOP = 4; + const int N_PER_LOOP_MASK = N_PER_LOOP-1; + int mainpart, leftover; + int i, j; + + /* + * Process most of the packets in chunks of N pkts. Any + * leftover packets will get processed one at a time. + */ + mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK)); + leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK)); + for (i = 0; i < mainpart; i += N_PER_LOOP) { + /* Copy N mbuf pointers to the S/W ring */ + for (j = 0; j < N_PER_LOOP; ++j) { + (txep + i + j)->mbuf = *(pkts + i + j); + } + tx4(txdp + i, pkts + i); + } + + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; ++i) { + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); + tx1(txdp + mainpart + i, pkts + mainpart + i); + } + } +} + +static inline uint16_t +tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring; + uint16_t n = 0; + + /* + * Begin scanning the H/W ring for done descriptors when the + * number of available descriptors drops below tx_free_thresh. For + * each done descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + /* Only use descriptors that are available */ + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + /* Use exactly nb_pkts descriptors */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + /* + * At this point, we know there are enough descriptors in the + * ring to transmit all the packets. This assumes that each + * mbuf contains a single segment, and that no new offloads + * are expected, which would require a new context descriptor. + */ + + /* + * See if we're going to wrap-around. If so, handle the top + * of the descriptor ring first, then do the bottom. If not, + * the processing looks just like the "bottom" part anyway... + */ + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); + ixgbe_tx_fill_hw_ring(txq, tx_pkts, n); + + /* + * We know that the last descriptor in the ring will need to + * have its RS bit set because tx_rs_thresh has to be + * a divisor of the ring size + */ + tx_r[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + } + + /* Fill H/W descriptor ring with mbuf data */ + ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); + + /* + * Determine if RS bit should be set + * This is what we actually want: + * if ((txq->tx_tail - 1) >= txq->tx_next_rs) + * but instead of subtracting 1 and doing >=, we can just do + * greater than without subtracting. + */ + if (txq->tx_tail > txq->tx_next_rs) { + tx_r[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + if (txq->tx_next_rs >= txq->nb_tx_desc) + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + } + + /* + * Check for wrap-around. This would only happen if we used + * up to the last descriptor in the ring, no more, no less. + */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + /* update tail pointer */ + rte_wmb(); + IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + + /* Try to transmit at least chunks of TX_MAX_BURST pkts */ + if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST)) + return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts); + + /* transmit more than the max burst, in chunks of TX_MAX_BURST */ + nb_tx = 0; + while (nb_pkts) { + uint16_t ret, n; + + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); + ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n); + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < n) + break; + } + + return nb_tx; +} + +static uint16_t +ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +static inline void +ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, + volatile struct ixgbe_adv_tx_context_desc *ctx_txd, + uint64_t ol_flags, union ixgbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx = 0; + uint32_t ctx_idx; + uint32_t vlan_macip_lens; + union ixgbe_tx_offload tx_offload_mask; + uint32_t seqnum_seed = 0; + + ctx_idx = txq->ctx_curr; + tx_offload_mask.data[0] = 0; + tx_offload_mask.data[1] = 0; + type_tucmd_mlhl = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); + + if (ol_flags & PKT_TX_VLAN_PKT) { + tx_offload_mask.vlan_tci |= ~0; + } + + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + tx_offload_mask.l4_len |= ~0; + tx_offload_mask.tso_segsz |= ~0; + mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & PKT_TX_IP_CKSUM) { + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + } + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_udp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct rte_sctp_hdr) + << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + default: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + break; + } + } + + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + tx_offload_mask.outer_l2_len |= ~0; + tx_offload_mask.outer_l3_len |= ~0; + tx_offload_mask.l2_len |= ~0; + seqnum_seed |= tx_offload.outer_l3_len + << IXGBE_ADVTXD_OUTER_IPLEN; + seqnum_seed |= tx_offload.l2_len + << IXGBE_ADVTXD_TUNNEL_LEN; + } +#ifdef RTE_LIBRTE_SECURITY + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union ixgbe_crypto_tx_desc_md *md = + (union ixgbe_crypto_tx_desc_md *)mdata; + seqnum_seed |= + (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0; + type_tucmd_mlhl |= + (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif + + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; + txq->ctx_cache[ctx_idx].tx_offload.data[1] = + tx_offload_mask.data[1] & tx_offload.data[1]; + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = tx_offload.l3_len; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + vlan_macip_lens |= (tx_offload.outer_l2_len << + IXGBE_ADVTXD_MACLEN_SHIFT); + else + vlan_macip_lens |= (tx_offload.l2_len << + IXGBE_ADVTXD_MACLEN_SHIFT); + vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT); + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = seqnum_seed; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, + union ixgbe_tx_offload tx_offload) +{ + /* If match with the current used context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; + + /* What if match with the next context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; + + /* Mismatch, use the previous context */ + return IXGBE_CTX_NUM; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + uint32_t tmp = 0; + + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_IXSM; + if (ol_flags & PKT_TX_TCP_SEG) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; + return tmp; +} + +static inline uint32_t +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype = 0; + + if (ol_flags & PKT_TX_VLAN_PKT) + cmdtype |= IXGBE_ADVTXD_DCMD_VLE; + if (ol_flags & PKT_TX_TCP_SEG) + cmdtype |= IXGBE_ADVTXD_DCMD_TSE; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT); + if (ol_flags & PKT_TX_MACSEC) + cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC; + return cmdtype; +} + +/* Default RS bit threshold values */ +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif + +/* Reset transmit descriptors after they have been used */ +static inline int +ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry *sw_ring = txq->sw_ring; + volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + uint32_t status; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].wb.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + /* No Error */ + return 0; +} + +uint16_t +ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq; + struct ixgbe_tx_entry *sw_ring; + struct ixgbe_tx_entry *txe, *txn; + volatile union ixgbe_adv_tx_desc *txr; + volatile union ixgbe_adv_tx_desc *txd, *txp; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint64_t tx_ol_req; + uint32_t ctx = 0; + uint32_t new_ctx; + union ixgbe_tx_offload tx_offload; +#ifdef RTE_LIBRTE_SECURITY + uint8_t use_ipsec; +#endif + + tx_offload.data[0] = 0; + tx_offload.data[1] = 0; + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + txp = NULL; + + /* Determine if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_xmit_cleanup(txq); + + rte_prefetch0(&txe->mbuf->pool); + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIBRTE_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif + + /* If hardware offload required */ + tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +#ifdef RTE_LIBRTE_SECURITY + if (use_ipsec) { + union ixgbe_crypto_tx_desc_md *ipsec_mdata = + (union ixgbe_crypto_tx_desc_md *) + &tx_pkt->udata64; + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif + + /* If new context need be built or reuse the exist ctx. */ + ctx = what_advctx_update(txq, tx_ol_req, + tx_offload); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IXGBE_CTX_NUM); + ctx = txq->ctx_curr; + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + + if (txp != NULL && + nb_used + txq->nb_tx_used >= txq->tx_rs_thresh) + /* set RS on the previous packet in the burst */ + txp->read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + if (nb_used > txq->nb_tx_free) { + PMD_TX_FREE_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (ixgbe_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* nb_used better be <= txq->tx_rs_thresh */ + if (unlikely(nb_used > txq->tx_rs_thresh)) { + PMD_TX_FREE_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_rs_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_rs_thresh, + txq->port_id, txq->queue_id); + /* + * Loop here until there are enough TX + * descriptors or until the ring cannot be + * cleaned. + */ + while (nb_used > txq->nb_tx_free) { + if (ixgbe_xmit_cleanup(txq) != 0) { + /* + * Could not clean any + * descriptors + */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - IXGBE_ADVTXD_DTYP_DATA + * - IXGBE_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - IXGBE_ADVTXD_DCMD_IFCS + * - IXGBE_ADVTXD_MAC_1588 + * - IXGBE_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - IXGBE_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - IXGBE_TXD_CMD_RS + */ + cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= IXGBE_ADVTXD_MAC_1588; +#endif + + olinfo_status = 0; + if (tx_ol_req) { + + if (ol_flags & PKT_TX_TCP_SEG) { + /* when TSO is on, paylen in descriptor is the + * not the packet len but the tcp payload len */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); + } + + /* + * Setup the TX Advanced Context Descriptor if required + */ + if (new_ctx) { + volatile struct ixgbe_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + ixgbe_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + tx_offload, &tx_pkt->udata64); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Advanced Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags); + olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT; + } + + olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#ifdef RTE_LIBRTE_SECURITY + if (use_ipsec) + olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; +#endif + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= IXGBE_TXD_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= IXGBE_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + txp = NULL; + } else + txp = txd; + + txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); + } + +end_of_tx: + /* set RS on last packet in the burst */ + if (txp != NULL) + txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /** + * Check if packet meets requirements for number of segments + * + * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and + * non-TSO + */ + + if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) { + rte_errno = EINVAL; + return i; + } + + if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + + /* check the size of packet */ + if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +#define IXGBE_PACKET_TYPE_ETHER 0X00 +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F + +#define IXGBE_PACKET_TYPE_NVGRE 0X00 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_VXLAN 0X80 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD + +/** + * Use 2 different table for normal packet and tunnel packet + * to save the space. + */ +const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = + RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, +}; + +const uint32_t + ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + + [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, +}; + +/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ +static inline uint32_t +ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) +{ + + if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask; + + /* For tunnel packet */ + if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) { + /* Remove the tunnel bit to save the space. */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + /** + * For x550, if it's not tunnel, + * tunnel type bit should be set to 0. + * Reuse 82599's mask. + */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; + + return ptype_table[pkt_info]; +} + +static inline uint64_t +ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) +{ + static uint64_t ip_rss_types_map[16] __rte_cache_aligned = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR, + }; +#ifdef RTE_LIBRTE_IEEE1588 + static uint64_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] | + ip_rss_types_map[pkt_info & 0XF]; + else + return ip_rss_types_map[pkt_info & 0XF]; +#else + return ip_rss_types_map[pkt_info & 0XF]; +#endif +} + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) +{ + uint64_t pkt_flags; + + /* + * Check if VLAN present only. + * Do not check whether L3/L4 rx checksum done by NIC or not, + * That can be found from rte_eth_rxmode.offloads flag + */ + pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0; + +#ifdef RTE_LIBRTE_IEEE1588 + if (rx_status & IXGBE_RXD_STAT_TMST) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* + * Bit 31: IPE, IPv4 checksum error + * Bit 30: L4I, L4I integrity error + */ + static uint64_t error_to_pkt_flags_map[4] = { + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + pkt_flags = error_to_pkt_flags_map[(rx_status >> + IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && + (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { + pkt_flags |= PKT_RX_EIP_CKSUM_BAD; + } + +#ifdef RTE_LIBRTE_SECURITY + if (rx_status & IXGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } +#endif + + return pkt_flags; +} + +/* + * LOOK_AHEAD defines how many desc statuses to check beyond the + * current descriptor. + * It must be a pound define for optimal performance. + * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring + * function only works with LOOK_AHEAD=8. + */ +#define LOOK_AHEAD 8 +#if (LOOK_AHEAD != 8) +#error "PMD IXGBE: LOOK_AHEAD must be 8\n" +#endif +static inline int +ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t pkt_flags; + int nb_dd; + uint32_t s[LOOK_AHEAD]; + uint32_t pkt_info[LOOK_AHEAD]; + int i, j, nb_rx = 0; + uint32_t status; + uint64_t vlan_flags = rxq->vlan_flags; + + /* get references to current descriptor and S/W ring entry */ + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + status = rxdp->wb.upper.status_error; + /* check to make sure there is at least 1 packet to receive */ + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* + * Scan LOOK_AHEAD descriptors at a time to determine which descriptors + * reference packets that are ready to be received. + */ + for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; + i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = 0; j < LOOK_AHEAD; j++) + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (nb_dd = 0; nb_dd < LOOK_AHEAD && + (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++) + ; + + for (j = 0; j < nb_dd; j++) + pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower. + lo_dword.data); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf format */ + for (j = 0; j < nb_dd; ++j) { + mb = rxep[j].mbuf; + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); + + /* convert descriptor fields to rte mbuf flags */ + pkt_flags = rx_desc_status_to_pkt_flags(s[j], + vlan_flags); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags + ((uint16_t)pkt_info[j]); + mb->ol_flags = pkt_flags; + mb->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type + (pkt_info[j], rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); + } + } + + /* Move mbuf pointers from the S/W ring to the stage */ + for (j = 0; j < LOOK_AHEAD; ++j) { + rxq->rx_stage[i + j] = rxep[j].mbuf; + } + + /* stop if all requested packets could not be received */ + if (nb_dd != LOOK_AHEAD) + break; + } + + /* clear software ring entries so we can cleanup correctly */ + for (i = 0; i < nb_rx; ++i) { + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + } + + + return nb_rx; +} + +static inline int +ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx; + __le64 dma_addr; + int diag, i; + + /* allocate buffers in bulk directly into the S/W ring */ + alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1); + rxep = &rxq->sw_ring[alloc_idx]; + diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) + return -ENOMEM; + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; ++i) { + /* populate the static rte mbuf fields */ + mb = rxep[i].mbuf; + if (reset_mbuf) { + mb->port = rxq->port_id; + } + + rte_mbuf_refcnt_set(mb, 1); + mb->data_off = RTE_PKTMBUF_HEADROOM; + + /* populate the descriptors */ + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* update state of internal queue structure */ + rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh; + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = rxq->rx_free_thresh - 1; + + /* no errors */ + return 0; +} + +static inline uint16_t +ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + int i; + + /* how many packets are ready to return? */ + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + /* copy mbuf pointers to the application's packet list */ + for (i = 0; i < nb_pkts; ++i) + rx_pkts[i] = stage[i]; + + /* update internal queue state */ + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue; + uint16_t nb_rx = 0; + + /* Any previously recv'd pkts will be returned from the Rx stage */ + if (rxq->rx_nb_avail) + return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + /* Scan the H/W ring for packets to receive */ + nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq); + + /* update internal queue state */ + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + /* if required, allocate new buffers to replenish descriptors */ + if (rxq->rx_tail > rxq->rx_free_trigger) { + uint16_t cur_free_trigger = rxq->rx_free_trigger; + + if (ixgbe_rx_alloc_bufs(rxq, true) != 0) { + int i, j; + + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + + /* + * Need to rewind any previous receives if we cannot + * allocate new buffers to replenish the old ones. + */ + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j) + rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; + + return 0; + } + + /* update tail pointer */ + rte_wmb(); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + cur_free_trigger); + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + /* received any packets this loop? */ + if (rxq->rx_nb_avail) + return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */ +uint16_t +ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + /* request is relatively large, chunk it up */ + nb_rx = 0; + while (nb_pkts) { + uint16_t ret, n; + + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); + ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < n) + break; + } + + return nb_rx; +} + +uint16_t +ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq; + volatile union ixgbe_adv_rx_desc *rx_ring; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + struct ixgbe_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union ixgbe_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t pkt_info; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint64_t pkt_flags; + uint64_t vlan_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + vlan_flags = rxq->vlan_flags; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet + * is likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "ext_err_stat=0x%08x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[rx_id]); + rte_ixgbe_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + /* Only valid if PKT_RX_VLAN set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + rxm->ol_flags = pkt_flags; + rxm->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, + rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); + } + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +/** + * Detect an RSC descriptor. + */ +static inline uint32_t +ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) +{ + return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; +} + +/** + * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet + * + * Fill the following info in the HEAD buffer of the Rx cluster: + * - RX port identifier + * - hardware offload data, if any: + * - RSS flag & hash + * - IP checksum flag + * - VLAN TCI, if any + * - error flags + * @head HEAD of the packet cluster + * @desc HW descriptor to get data from + * @rxq Pointer to the Rx queue + */ +static inline void +ixgbe_fill_cluster_head_buf( + struct rte_mbuf *head, + union ixgbe_adv_rx_desc *desc, + struct ixgbe_rx_queue *rxq, + uint32_t staterr) +{ + uint32_t pkt_info; + uint64_t pkt_flags; + + head->port = rxq->port_id; + + /* The vlan_tci field is only valid when PKT_RX_VLAN is + * set in the pkt_flags field. + */ + head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + head->ol_flags = pkt_flags; + head->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + head->hash.fdir.hash = + rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK; + head->hash.fdir.id = + rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id); + } +} + +/** + * ixgbe_recv_pkts_lro - receive handler for and LRO case. + * + * @rx_queue Rx queue handle + * @rx_pkts table of received packets + * @nb_pkts size of rx_pkts table + * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling + * + * Handles the Rx HW ring completions when RSC feature is configured. Uses an + * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info. + * + * We use the same logic as in Linux and in FreeBSD ixgbe drivers: + * 1) When non-EOP RSC completion arrives: + * a) Update the HEAD of the current RSC aggregation cluster with the new + * segment's data length. + * b) Set the "next" pointer of the current segment to point to the segment + * at the NEXTP index. + * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry + * in the sw_rsc_ring. + * 2) When EOP arrives we just update the cluster's total length and offload + * flags and deliver the cluster up to the upper layers. In our case - put it + * in the rx_pkts table. + * + * Returns the number of received packets/clusters (according to the "bulk + * receive" interface). + */ +static inline uint16_t +ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + bool bulk_alloc) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring; + struct ixgbe_rx_entry *sw_ring = rxq->sw_ring; + struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = rxq->nb_rx_hold; + uint16_t prev_id = rxq->rx_tail; + + while (nb_rx < nb_pkts) { + bool eop; + struct ixgbe_rx_entry *rxe; + struct ixgbe_scattered_rx_entry *sc_entry; + struct ixgbe_scattered_rx_entry *next_sc_entry = NULL; + struct ixgbe_rx_entry *next_rxe = NULL; + struct rte_mbuf *first_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb = NULL; + union ixgbe_adv_rx_desc rxd; + uint16_t data_len; + uint16_t next_id; + volatile union ixgbe_adv_rx_desc *rxdp; + uint32_t staterr; + +next_desc: + /* + * The code in this whole file uses the volatile pointer to + * ensure the read ordering of the status and the rest of the + * descriptor fields (on the compiler level only!!!). This is so + * UGLY - why not to just use the compiler barrier instead? DPDK + * even has the rte_compiler_barrier() for that. + * + * But most importantly this is just wrong because this doesn't + * ensure memory ordering in a general case at all. For + * instance, DPDK is supposed to work on Power CPUs where + * compiler barrier may just not be enough! + * + * I tried to write only this function properly to have a + * starting point (as a part of an LRO/RSC series) but the + * compiler cursed at me when I tried to cast away the + * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm + * keeping it the way it is for now. + * + * The code in this file is broken in so many other places and + * will just not work on a big endian CPU anyway therefore the + * lines below will have to be revisited together with the rest + * of the ixgbe PMD. + * + * TODO: + * - Get rid of "volatile" and let the compiler do its job. + * - Use the proper memory barrier (rte_rmb()) to ensure the + * memory ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error); + + if (!(staterr & IXGBE_RXDADV_STAT_DD)) + break; + + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", + rxq->port_id, rxq->queue_id, rx_id, staterr, + rte_le_to_cpu_16(rxd.wb.upper.length)); + + if (!bulk_alloc) { + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed " + "port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; + break; + } + } else if (nb_hold > rxq->rx_free_thresh) { + uint16_t next_rdt = rxq->rx_free_trigger; + + if (!ixgbe_rx_alloc_bufs(rxq, false)) { + rte_wmb(); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, + next_rdt); + nb_hold -= rxq->rx_free_thresh; + } else { + PMD_RX_LOG(DEBUG, "RX bulk alloc failed " + "port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; + break; + } + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + eop = staterr & IXGBE_RXDADV_STAT_EOP; + + next_id = rx_id + 1; + if (next_id == rxq->nb_rx_desc) + next_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[next_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 4 pointers + * to mbufs. + */ + if ((next_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[next_id]); + rte_ixgbe_prefetch(&sw_ring[next_id]); + } + + rxm = rxe->mbuf; + + if (!bulk_alloc) { + __le64 dma = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + /* + * Update RX descriptor with the physical address of the + * new data buffer of the new allocated mbuf. + */ + rxe->mbuf = nmb; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma; + } else + rxe->mbuf = NULL; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->data_len = data_len; + + if (!eop) { + uint16_t nextp_id; + /* + * Get next descriptor index: + * - For RSC it's in the NEXTP field. + * - For a scattered packet - it's just a following + * descriptor. + */ + if (ixgbe_rsc_count(&rxd)) + nextp_id = + (staterr & IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT; + else + nextp_id = next_id; + + next_sc_entry = &sw_sc_ring[nextp_id]; + next_rxe = &sw_ring[nextp_id]; + rte_ixgbe_prefetch(next_rxe); + } + + sc_entry = &sw_sc_ring[rx_id]; + first_seg = sc_entry->fbuf; + sc_entry->fbuf = NULL; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + } + + prev_id = rx_id; + rx_id = next_id; + + /* + * If this is not the last buffer of the received packet, update + * the pointer to the first mbuf at the NEXTP entry in the + * sw_sc_ring and continue to parse the RX ring. + */ + if (!eop && next_rxe) { + rxm->next = next_rxe->mbuf; + next_sc_entry->fbuf = first_seg; + goto next_desc; + } + + /* Initialize the first mbuf of the returned packet */ + ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr); + + /* + * Deal with the case, when HW CRC srip is disabled. + * That can't happen when LRO is enabled, but still could + * happen for scattered RX mode. + */ + first_seg->pkt_len -= rxq->crc_len; + if (unlikely(rxm->data_len <= rxq->crc_len)) { + struct rte_mbuf *lp; + + for (lp = first_seg; lp->next != rxm; lp = lp->next) + ; + + first_seg->nb_segs--; + lp->data_len -= rxq->crc_len - rxm->data_len; + lp->next = NULL; + rte_pktmbuf_free_seg(rxm); + } else + rxm->data_len -= rxq->crc_len; + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); + + rte_wmb(); + IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id); + nb_hold = 0; + } + + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false); +} + +uint16_t +ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true); +} + +/********************************************************************* + * + * Queue management functions + * + **********************************************************************/ + +static void __rte_cold +ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static int +ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt) +{ + struct ixgbe_tx_entry *swr_ring = txq->sw_ring; + uint16_t i, tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; + uint32_t pkt_cnt; + + /* Start free mbuf from the next of tx_tail */ + tx_last = txq->tx_tail; + tx_id = swr_ring[tx_last].next_id; + + if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq)) + return 0; + + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; + if (!free_cnt) + free_cnt = txq->nb_tx_desc; + + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { + for (i = 0; i < nb_tx_to_clean && + pkt_cnt < free_cnt && + tx_id != tx_last; i++) { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; + + /* + * last segment in the packet, + * increment packet count + */ + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); + } + + tx_id = swr_ring[tx_id].next_id; + } + + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) + break; + + if (pkt_cnt < free_cnt) { + if (ixgbe_xmit_cleanup(txq)) + break; + + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; + } + } + + return (int)pkt_cnt; +} + +static int +ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq, + uint32_t free_cnt) +{ + int i, n, cnt; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; + + for (i = 0; i < cnt; i += n) { + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) + break; + + n = ixgbe_tx_free_bufs(txq); + + if (n == 0) + break; + } + + return i; +} + +static int +ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused, + uint32_t free_cnt __rte_unused) +{ + return -ENOTSUP; +} + +int +ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + if (txq->offloads == 0 && +#ifdef RTE_LIBRTE_SECURITY + !(txq->using_ipsec) && +#endif + txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + (rte_eal_process_type() != RTE_PROC_PRIMARY || + txq->sw_ring_v != NULL)) { + return ixgbe_tx_done_cleanup_vec(txq, free_cnt); + } else { + return ixgbe_tx_done_cleanup_simple(txq, free_cnt); + } + } + + return ixgbe_tx_done_cleanup_full(txq, free_cnt); +} + +static void __rte_cold +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + if (txq != NULL && + txq->sw_ring != NULL) + rte_free(txq->sw_ring); +} + +static void __rte_cold +ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) +{ + if (txq != NULL && txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); + rte_free(txq); + } +} + +void __rte_cold +ixgbe_dev_tx_queue_release(void *txq) +{ + ixgbe_tx_queue_release(txq); +} + +/* (Re)set dynamic ixgbe_tx_queue fields to defaults */ +static void __rte_cold +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; + struct ixgbe_tx_entry *txe = txq->sw_ring; + uint16_t prev, i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; + } + + /* Initialize SW ring entries */ + prev = (uint16_t) (txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +static const struct ixgbe_txq_ops def_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void __rte_cold +ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) +{ + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if ((txq->offloads == 0) && +#ifdef RTE_LIBRTE_SECURITY + !(txq->using_ipsec) && +#endif + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); + dev->tx_pkt_prepare = NULL; + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + (rte_eal_process_type() != RTE_PROC_PRIMARY || + ixgbe_txq_vec_setup(txq) == 0)) { + PMD_INIT_LOG(DEBUG, "Vector tx enabled."); + dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; + } else + dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; + } else { + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, + " - offloads = 0x%" PRIx64, + txq->offloads); + PMD_INIT_LOG(DEBUG, + " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", + (unsigned long)txq->tx_rs_thresh, + (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); + dev->tx_pkt_burst = ixgbe_xmit_pkts; + dev->tx_pkt_prepare = ixgbe_prep_pkts; + } +} + +uint64_t +ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +uint64_t +ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + return tx_offload_capa; +} + +int __rte_cold +ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct ixgbe_tx_queue *txq; + struct ixgbe_hw *hw; + uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (nb_desc % IXGBE_TXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The following two parameters control the setting of the RS bit on + * transmit descriptors. + * TX descriptors will have their RS bit set after txq->tx_rs_thresh + * descriptors have been used. + * The TX descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required + * to transmit a packet is greater than the number of free TX + * descriptors. + * The following constraints must be satisfied: + * tx_rs_thresh must be greater than 0. + * tx_rs_thresh must be less than the size of the ring minus 2. + * tx_rs_thresh must be less than or equal to tx_free_thresh. + * tx_rs_thresh must be a divisor of the ring size. + * tx_free_thresh must be greater than 0. + * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. + * One descriptor in the TX ring is used as a sentinel to avoid a + * H/W race condition, hence the maximum threshold constraints. + * When set to zero use default values. + */ + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. " + "(tx_rs_thresh=%u port=%d queue=%d)", + DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the number of " + "TX descriptors minus 3. (tx_free_thresh=%u " + "port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) + return -ENOMEM; + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC, + IXGBE_ALIGN, socket_id); + if (tz == NULL) { + ixgbe_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + txq->port_id = dev->data->port_id; + txq->offloads = offloads; + txq->ops = &def_txq_ops; + txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIBRTE_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); +#endif + + /* + * Modification to set VFTDT for virtual function if vf is detected + */ + if (hw->mac.type == ixgbe_mac_82599_vf || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); + else + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); + + txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; + + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc_socket("txq->sw_ring", + sizeof(struct ixgbe_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + ixgbe_tx_queue_release(txq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + /* set up vector or scalar TX function as appropriate */ + ixgbe_set_tx_function(dev, txq); + + txq->ops->reset(txq); + + dev->data->tx_queues[queue_idx] = txq; + + + return 0; +} + +/** + * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster + * + * The "next" pointer of the last segment of (not-yet-completed) RSC clusters + * in the sw_rsc_ring is not set to NULL but rather points to the next + * mbuf of this RSC aggregation (that has not been completed yet and still + * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we + * will just free first "nb_segs" segments of the cluster explicitly by calling + * an rte_pktmbuf_free_seg(). + * + * @m scattered cluster head + */ +static void __rte_cold +ixgbe_free_sc_cluster(struct rte_mbuf *m) +{ + uint16_t i, nb_segs = m->nb_segs; + struct rte_mbuf *next_seg; + + for (i = 0; i < nb_segs; i++) { + next_seg = m->next; + rte_pktmbuf_free_seg(m); + m = next_seg; + } +} + +static void __rte_cold +ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) +{ + unsigned i; + + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + if (rxq->rx_nb_avail) { + for (i = 0; i < rxq->rx_nb_avail; ++i) { + struct rte_mbuf *mb; + + mb = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mb); + } + rxq->rx_nb_avail = 0; + } + } + + if (rxq->sw_sc_ring) + for (i = 0; i < rxq->nb_rx_desc; i++) + if (rxq->sw_sc_ring[i].fbuf) { + ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf); + rxq->sw_sc_ring[i].fbuf = NULL; + } +} + +static void __rte_cold +ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) +{ + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); + rte_free(rxq); + } +} + +void __rte_cold +ixgbe_dev_rx_queue_release(void *rxq) +{ + ixgbe_rx_queue_release(rxq); +} + +/* + * Check if Rx Burst Bulk Alloc function can be used. + * Return + * 0: the preconditions are satisfied and the bulk allocation function + * can be used. + * -EINVAL: the preconditions are NOT satisfied and the default Rx burst + * function must be used. + */ +static inline int __rte_cold +check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) +{ + int ret = 0; + + /* + * Make sure the following pre-conditions are satisfied: + * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST + * rxq->rx_free_thresh < rxq->nb_rx_desc + * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0 + * Scattered packets are not supported. This should be checked + * outside of this function. + */ + if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST); + ret = -EINVAL; + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); + ret = -EINVAL; + } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = -EINVAL; + } + + return ret; +} + +/* Reset dynamic ixgbe_rx_queue fields back to defaults */ +static void __rte_cold +ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) +{ + static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; + unsigned i; + uint16_t len = rxq->nb_rx_desc; + + /* + * By default, the Rx queue setup function allocates enough memory for + * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires + * extra memory at the end of the descriptor ring to be zero'd out. + */ + if (adapter->rx_bulk_alloc_allowed) + /* zero out extra memory */ + len += RTE_PMD_IXGBE_RX_MAX_BURST; + + /* + * Zero out HW ring memory. Zero out extra memory at the end of + * the H/W ring so look-ahead logic in Rx Burst bulk alloc function + * reads extra memory as zeros. + */ + for (i = 0; i < len; i++) { + rxq->rx_ring[i] = zeroed_desc; + } + + /* + * initialize extra software ring entries. Space for these extra + * entries is always allocated + */ + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + for (i = rxq->nb_rx_desc; i < len; ++i) { + rxq->sw_ring[i].mbuf = &rxq->fake_mbuf; + } + + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif +} + +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_RSS_HASH; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + +int __rte_cold +ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct ixgbe_rx_queue *rxq; + struct ixgbe_hw *hw; + uint16_t len; + struct ixgbe_adapter *adapter = dev->data->dev_private; + uint64_t offloads; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (nb_desc % IXGBE_RXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->rx_queues[queue_idx] != NULL) { + ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + return -ENOMEM; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + rxq->port_id = dev->data->port_id; + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; + + /* + * The packet type in RX descriptor is different for different NICs. + * Some bits are used for x550 but reserved for other NICS. + * So set different masks for different NICs. + */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550; + else + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + RX_RING_SZ, IXGBE_ALIGN, socket_id); + if (rz == NULL) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + /* + * Zero init all the descriptors in the ring. + */ + memset(rz->addr, 0, RX_RING_SZ); + + /* + * Modified to setup VFRDT for Virtual Function + */ + if (hw->mac.type == ixgbe_mac_82599_vf || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); + } else { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); + } + + rxq->rx_ring_phys_addr = rz->iova; + rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; + + /* + * Certain constraints must be met in order to use the bulk buffer + * allocation Rx burst function. If any of Rx queues doesn't meet them + * the feature should be disabled for the whole port. + */ + if (check_rx_burst_bulk_alloc_preconditions(rxq)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc " + "preconditions - canceling the feature for " + "the whole port[%d]", + rxq->queue_id, rxq->port_id); + adapter->rx_bulk_alloc_allowed = false; + } + + /* + * Allocate software ring. Allow for space at the end of the + * S/W ring to make sure look-ahead logic in bulk alloc Rx burst + * function does not access an invalid memory region. + */ + len = nb_desc; + if (adapter->rx_bulk_alloc_allowed) + len += RTE_PMD_IXGBE_RX_MAX_BURST; + + rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", + sizeof(struct ixgbe_rx_entry) * len, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_ring) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + /* + * Always allocate even if it's not going to be needed in order to + * simplify the code. + * + * This ring is used in LRO and Scattered Rx cases and Scattered Rx may + * be requested in ixgbe_dev_rx_init(), which is called later from + * dev_start() flow. + */ + rxq->sw_sc_ring = + rte_zmalloc_socket("rxq->sw_sc_ring", + sizeof(struct ixgbe_scattered_rx_entry) * len, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_sc_ring) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p " + "dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring, + rxq->rx_ring_phys_addr); + + if (!rte_is_power_of_2(nb_desc)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx " + "preconditions - canceling the feature for " + "the whole port[%d]", + rxq->queue_id, rxq->port_id); + adapter->rx_vec_allowed = false; + } else + ixgbe_rxq_vec_setup(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + + ixgbe_reset_rx_queue(adapter, rxq); + + return 0; +} + +uint32_t +ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define IXGBE_RXQ_SCAN_INTERVAL 4 + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq; + uint32_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { + desc += IXGBE_RXQ_SCAN_INTERVAL; + rxdp += IXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); +} + +int +ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + volatile uint32_t *status; + uint32_t nb_hold, desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + if (rxq->rx_using_sse) + nb_hold = rxq->rxrearm_nb; + else +#endif + nb_hold = rxq->nb_rx_hold; + if (offset >= rxq->nb_rx_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.upper.status_error; + if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct ixgbe_tx_queue *txq = tx_queue; + volatile uint32_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].wb.status; + if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +/* + * Set up link loopback for X540/X550 mode Tx->Rx. + */ +static inline void __rte_cold +ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable) +{ + uint32_t macc; + PMD_INIT_FUNC_TRACE(); + + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + macc = IXGBE_READ_REG(hw, IXGBE_MACC); + + if (enable) { + /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */ + autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE; + /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */ + macc |= IXGBE_MACC_FLU; + } else { + autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE; + macc &= ~IXGBE_MACC_FLU; + } + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc); +} + +void __rte_cold +ixgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + unsigned i; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct ixgbe_tx_queue *txq = dev->data->tx_queues[i]; + + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + } + } + /* If loopback mode was enabled, reconfigure the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, false); + } +} + +void +ixgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/********************************************************************* + * + * Device RX/TX init functions + * + **********************************************************************/ + +/** + * Receive Side Scaling (RSS) + * See section 7.1.2.8 in the following document: + * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source + * and destination ports of TCP/UDP headers, if any, of received packets are + * hashed against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +ixgbe_rss_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + uint32_t mrqc_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + mrqc &= ~IXGBE_MRQC_RSSEN; + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); +} + +static void +ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) +{ + uint8_t *hash_key; + uint32_t mrqc; + uint32_t rss_key; + uint64_t rss_hf; + uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); + + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); + } + } + + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); +} + +int +ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + uint64_t rss_hf; + uint32_t mrqc_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + + /* + * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): + * "RSS enabling cannot be done dynamically while it must be + * preceded by a software reset" + * Before changing anything, first check that the update RSS operation + * does not attempt to disable RSS, if RSS was enabled at + * initialization time, or does not attempt to enable RSS, if RSS was + * disabled at initialization time. + */ + rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -(EINVAL); + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -(EINVAL); + ixgbe_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +int +ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint8_t *hash_key; + uint32_t mrqc; + uint32_t rss_key; + uint64_t rss_hf; + uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Return RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + /* Get RSS functions configured in MRQC register */ + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ + rss_conf->rss_hf = 0; + return 0; + } + rss_hf = 0; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_conf->rss_hf = rss_hf; + return 0; +} + +static void +ixgbe_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + adapter = dev->data->dev_private; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + + /* + * Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + if (adapter->rss_reta_updated == 0) { + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) { + ixgbe_rss_disable(dev); + return; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + ixgbe_hw_rss_hash_set(hw, &rss_conf); +} + +#define NUM_VFTA_REGISTERS 128 +#define NIC_RX_BUFFER_SIZE 0x200 +#define X550_RX_BUFFER_SIZE 0x180 + +static void +ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_dcb_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl; + uint16_t pbsize; + uint8_t nb_tcs; /* number of traffic classes */ + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + num_pools = cfg->nb_queue_pools; + /* Check we have a valid number of pools */ + if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) { + ixgbe_rss_disable(dev); + return; + } + /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */ + nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools); + + /* + * RXPBSIZE + * split rx buffer up into sections, each for 1 traffic class + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs); + break; + default: + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + break; + } + for (i = 0; i < nb_tcs; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); + /* clear 10 bits. */ + rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + /* zero alloc all unused TCs */ + for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); + /* clear 10 bits. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + + /* MRQC: enable vmdq and dcb */ + mrqc = (num_pools == ETH_16_POOLS) ? + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) { + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + } else { + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + } + + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */ + queue_mapping = 0; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + /* + * mapping is done with 3 bits per priority, + * so shift by i*3 each time + */ + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); + + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); + + /* RTRPCS: DCB related */ + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM); + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* VFRE: pool enabling for receive - 16 or 32 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), + num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | + (cfg->pool_map[i].vlan_id & 0xFFF))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 32 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools); + } +} + +/** + * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters + * @dev: pointer to eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + uint32_t reg; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + if (hw->mac.type != ixgbe_mac_82598EB) { + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) { + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + } else { + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + } + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + } +} + +/** + * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters + * @dev: pointer to rte_eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + if (hw->mac.type != ixgbe_mac_82598EB) + /*PF VF Transmit Enable*/ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), + vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + /*Configure general DCB TX parameters*/ + ixgbe_dcb_tx_hw_config(dev, dcb_config); +} + +static void +ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ + if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) { + dcb_config->num_tcs.pg_tcs = ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + } else { + dcb_config->num_tcs.pg_tcs = ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = vmdq_rx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); + } +} + +static void +ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ + if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) { + dcb_config->num_tcs.pg_tcs = ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + } else { + dcb_config->num_tcs.pg_tcs = ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = vmdq_tx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); + } +} + +static void +ixgbe_dcb_rx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = rx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); + } +} + +static void +ixgbe_dcb_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_dcb_tx_conf *tx_conf = + &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = tx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); + } +} + +/** + * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters + * @dev: pointer to eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + uint32_t reg; + uint32_t vlanctrl; + uint8_t i; + uint32_t q; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + if (hw->mac.type != ixgbe_mac_82598EB) { + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 4) { + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; + else { + /* no matter the mode is DCB or DCB_RSS, just + * set the MRQE to RSSXTCEN. RSS is controlled + * by RSS_FIELD + */ + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 8) { + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT8TCEN; + else { + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + } + } + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* Disable drop for all queues in VMDQ mode*/ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | + (q << IXGBE_QDE_IDX_SHIFT))); + } else { + /* Enable drop for all queues in SRIOV mode */ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | + (q << IXGBE_QDE_IDX_SHIFT) | + IXGBE_QDE_ENABLE)); + } + } + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); +} + +static void +ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, + uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + break; + default: + break; + } +} + +static void +ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max, + uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); + break; + default: + break; + } +} + +#define DCB_RX_CONFIG 1 +#define DCB_TX_CONFIG 1 +#define DCB_TX_PB 1024 +/** + * ixgbe_dcb_hw_configure - Enable DCB and configure + * general DCB in VT mode and non-VT mode parameters + * @dev: pointer to rte_eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static int +ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + int ret = 0; + uint8_t i, pfc_en, nb_tcs; + uint16_t pbsize, rx_buffer_size; + uint8_t config_dcb_rx = 0; + uint8_t config_dcb_tx = 0; + uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + struct ixgbe_dcb_tc_config *tc; + uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_bw_conf *bw_conf = + IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private); + + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + dcb_config->vt_mode = true; + if (hw->mac.type != ixgbe_mac_82598EB) { + config_dcb_rx = DCB_RX_CONFIG; + /* + *get dcb and VT rx configuration parameters + *from rte_eth_conf + */ + ixgbe_vmdq_dcb_rx_config(dev, dcb_config); + /*Configure general VMDQ and DCB RX parameters*/ + ixgbe_vmdq_dcb_configure(dev); + } + break; + case ETH_MQ_RX_DCB: + case ETH_MQ_RX_DCB_RSS: + dcb_config->vt_mode = false; + config_dcb_rx = DCB_RX_CONFIG; + /* Get dcb TX configuration parameters from rte_eth_conf */ + ixgbe_dcb_rx_config(dev, dcb_config); + /*Configure general DCB RX parameters*/ + ixgbe_dcb_rx_hw_config(dev, dcb_config); + break; + default: + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); + break; + } + switch (dev->data->dev_conf.txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + dcb_config->vt_mode = true; + config_dcb_tx = DCB_TX_CONFIG; + /* get DCB and VT TX configuration parameters + * from rte_eth_conf + */ + ixgbe_dcb_vt_tx_config(dev, dcb_config); + /*Configure general VMDQ and DCB TX parameters*/ + ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config); + break; + + case ETH_MQ_TX_DCB: + dcb_config->vt_mode = false; + config_dcb_tx = DCB_TX_CONFIG; + /*get DCB TX configuration parameters from rte_eth_conf*/ + ixgbe_dcb_tx_config(dev, dcb_config); + /*Configure general DCB TX parameters*/ + ixgbe_dcb_tx_hw_config(dev, dcb_config); + break; + default: + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); + break; + } + + nb_tcs = dcb_config->num_tcs.pfc_tcs; + /* Unpack map */ + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); + if (nb_tcs == ETH_4_TCS) { + /* Avoid un-configured priority mapping to TC0 */ + uint8_t j = 4; + uint8_t mask = 0xFF; + + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) + mask = (uint8_t)(mask & (~(1 << map[i]))); + for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { + if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) + map[j++] = i; + mask >>= 1; + } + /* Re-configure 4 TCs BW */ + for (i = 0; i < nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + if (bw_conf->tc_num != nb_tcs) + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); + } + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0; + } + } else { + /* Re-configure 8 TCs BW */ + for (i = 0; i < nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + if (bw_conf->tc_num != nb_tcs) + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs + (i & 1)); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs + (i & 1)); + } + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + rx_buffer_size = X550_RX_BUFFER_SIZE; + break; + default: + rx_buffer_size = NIC_RX_BUFFER_SIZE; + break; + } + + if (config_dcb_rx) { + /* Set RX buffer size */ + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); + uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; + + for (i = 0; i < nb_tcs; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + /* zero alloc all unused TCs */ + for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + } + } + if (config_dcb_tx) { + /* Only support an equally distributed + * Tx packet buffer strategy. + */ + uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs; + uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX; + + for (i = 0; i < nb_tcs; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } + } + + /*Calculates traffic class credits*/ + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, + IXGBE_DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, + IXGBE_DCB_RX_CONFIG); + + if (config_dcb_rx) { + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa); + /* Configure PG(ETS) RX */ + ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map); + } + + if (config_dcb_tx) { + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + /* Configure PG(ETS) TX */ + ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map); + } + + /*Configure queue statistics registers*/ + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + + /* Check if the PFC is supported */ + if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); + for (i = 0; i < nb_tcs; i++) { + /* + * If the TC count is 8,and the default high_water is 48, + * the low_water is 16 as default. + */ + hw->fc.high_water[i] = (pbsize * 3) / 4; + hw->fc.low_water[i] = pbsize / 4; + /* Enable pfc for this TC */ + tc = &dcb_config->tc_config[i]; + tc->pfc = ixgbe_dcb_pfc_enabled; + } + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS) + pfc_en &= 0x0F; + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/** + * ixgbe_configure_dcb - Configure DCB Hardware + * @dev: pointer to rte_eth_dev + */ +void ixgbe_configure_dcb(struct rte_eth_dev *dev) +{ + struct ixgbe_dcb_config *dcb_cfg = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &(dev->data->dev_conf); + + PMD_INIT_FUNC_TRACE(); + + /* check support mq_mode for DCB */ + if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) + return; + + if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES) + return; + + /** Configure DCB hardware **/ + ixgbe_dcb_hw_configure(dev, dcb_cfg); +} + +/* + * VMDq only support for 10 GbE NIC. + */ +static void +ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, vlanctrl; + uint32_t vmolr = 0; + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + num_pools = cfg->nb_queue_pools; + + ixgbe_rss_disable(dev); + + /* MRQC: enable vmdq */ + mrqc = IXGBE_MRQC_VMDQEN; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + else + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + for (i = 0; i < (int)num_pools; i++) { + vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); + } + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX); + + /* VFRE: pool enabling for receive - 64 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX); + if (num_pools == ETH_64_POOLS) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | + (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 64 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2), + (cfg->pool_map[i].pools & UINT32_MAX)); + else + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)), + ((cfg->pool_map[i].pools >> 32) & UINT32_MAX)); + + } + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (cfg->enable_loop_back) { + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++) + IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/* + * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters + * @hw: pointer to hardware structure + */ +static void +ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) +{ + uint32_t reg; + uint32_t q; + + PMD_INIT_FUNC_TRACE(); + /*PF VF Transmit Enable*/ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX); + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + IXGBE_WRITE_FLUSH(hw); +} + +static int __rte_cold +ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) +{ + struct ixgbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union ixgbe_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned) rxq->queue_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +static int +ixgbe_config_vf_rss(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + + ixgbe_rss_configure(dev); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* MRQC: enable VF RSS */ + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc &= ~IXGBE_MRQC_MRQE_MASK; + switch (RTE_ETH_DEV_SRIOV(dev).active) { + case ETH_64_POOLS: + mrqc |= IXGBE_MRQC_VMDQRSS64EN; + break; + + case ETH_32_POOLS: + mrqc |= IXGBE_MRQC_VMDQRSS32EN; + break; + + default: + PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS"); + return -EINVAL; + } + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + return 0; +} + +static int +ixgbe_config_vf_default(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (RTE_ETH_DEV_SRIOV(dev).active) { + case ETH_64_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQEN); + break; + + case ETH_32_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQRT4TCEN); + break; + + case ETH_16_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQRT8TCEN); + break; + default: + PMD_INIT_LOG(ERR, + "invalid pool number in IOV mode"); + break; + } + return 0; +} + +static int +ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB/RSS w/o VMDq multi-queue setting + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_DCB_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; + + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + ixgbe_rss_disable(dev); + break; + } + } else { + /* SRIOV active scheme + * Support RSS together with SRIOV. + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_config_vf_rss(dev); + break; + case ETH_MQ_RX_VMDQ_DCB: + case ETH_MQ_RX_DCB: + /* In SRIOV, the configuration is the same as VMDq case */ + ixgbe_vmdq_dcb_configure(dev); + break; + /* DCB/RSS together with SRIOV is not supported */ + case ETH_MQ_RX_VMDQ_DCB_RSS: + case ETH_MQ_RX_DCB_RSS: + PMD_INIT_LOG(ERR, + "Could not support DCB/RSS with VMDq & SRIOV"); + return -1; + default: + ixgbe_config_vf_default(dev); + break; + } + } + + return 0; +} + +static int +ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mtqc; + uint32_t rttdcs; + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + /* disable arbiter before setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB w/o VMDq multi-queue setting + */ + if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY) + ixgbe_vmdq_tx_hw_configure(hw); + else { + mtqc = IXGBE_MTQC_64Q_1PB; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + } else { + switch (RTE_ETH_DEV_SRIOV(dev).active) { + + /* + * SRIOV active scheme + * FIXME if support DCB together with VMDq & SRIOV + */ + case ETH_64_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + break; + case ETH_32_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF; + break; + case ETH_16_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA | + IXGBE_MTQC_8TC_8TQ; + break; + default: + mtqc = IXGBE_MTQC_64Q_1PB; + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + + /* re-enable arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + return 0; +} + +/** + * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF + * + * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the + * spec rev. 3.0 chapter 8.2.3.8.13. + * + * @pool Memory pool of the Rx queue + */ +static inline uint32_t +ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) +{ + struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool); + + /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ + uint16_t maxdesc = + RTE_IPV4_MAX_PKT_LEN / + (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); + + if (maxdesc >= 16) + return IXGBE_RSCCTL_MAXDESC_16; + else if (maxdesc >= 8) + return IXGBE_RSCCTL_MAXDESC_8; + else if (maxdesc >= 4) + return IXGBE_RSCCTL_MAXDESC_4; + else + return IXGBE_RSCCTL_MAXDESC_1; +} + +/** + * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX + * interrupt + * + * (Taken from FreeBSD tree) + * (yes this is all very magic and confusing :) + * + * @dev port handle + * @entry the register array entry + * @vector the MSIX vector for this queue + * @type RX/TX/MISC + */ +static void +ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 ivar, index; + + vector |= IXGBE_IVAR_ALLOC_VAL; + + switch (hw->mac.type) { + + case ixgbe_mac_82598EB: + if (type == -1) + entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; + else + entry += (type * 64); + index = (entry >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (entry & 0x3))); + ivar |= (vector << (8 * (entry & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (type == -1) { /* MISC IVAR */ + index = (entry & 1) * 8; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); + } else { /* RX/TX IVARS */ + index = (16 * (entry & 1)) + (8 * type); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); + } + + break; + + default: + break; + } +} + +void __rte_cold +ixgbe_set_rx_function(struct rte_eth_dev *dev) +{ + uint16_t i, rx_using_sse; + struct ixgbe_adapter *adapter = dev->data->dev_private; + + /* + * In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (ixgbe_rx_vec_dev_conf_condition_check(dev) || + !adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " + "preconditions", + dev->data->port_id); + + adapter->rx_vec_allowed = false; + } + + /* + * Initialize the appropriate LRO callback. + * + * If all queues satisfy the bulk allocation preconditions + * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation. + * Otherwise use a single allocation version. + */ + if (dev->data->lro) { + if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk " + "allocation version"); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single " + "allocation version"); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + } + } else if (dev->data->scattered_rx) { + /* + * Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (adapter->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, " + "single allocation) " + "Scattered Rx callback " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + } + /* + * Below we set "simple" callbacks according to port/queues parameters. + * If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (adapter->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_vec; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; +#ifdef RTE_LIBRTE_SECURITY + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); +#endif + } +} + +/** + * ixgbe_set_rsc - configure RSC related port HW registers + * + * Configures the port's RSC related registers according to the 4.6.7.2 chapter + * of 82599 Spec (x540 configuration is virtually the same). + * + * @dev port handle + * + * Returns 0 in case of success or a non-zero error code + */ +static int +ixgbe_set_rsc(struct rte_eth_dev *dev) +{ + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_info dev_info = { 0 }; + bool rsc_capable = false; + uint16_t i; + uint32_t rdrxctl; + uint32_t rfctl; + + /* Sanity check */ + dev->dev_ops->dev_infos_get(dev, &dev_info); + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) + rsc_capable = true; + + if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { + PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " + "support it"); + return -EINVAL; + } + + /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ + + if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) && + (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { + /* + * According to chapter of 4.6.7.2.1 of the Spec Rev. + * 3.0 RSC configuration requires HW CRC stripping being + * enabled. If user requested both HW CRC stripping off + * and RSC on - return an error. + */ + PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC " + "is disabled"); + return -EINVAL; + } + + /* RFCTL configuration */ + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) + /* + * Since NFS packets coalescing is not supported - clear + * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is + * enabled. + */ + rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | + IXGBE_RFCTL_NFSR_DIS); + else + rfctl |= IXGBE_RFCTL_RSC_DIS; + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* If LRO hasn't been requested - we are done here. */ + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) + return 0; + + /* Set RDRXCTL.RSCACKC bit */ + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + rdrxctl |= IXGBE_RDRXCTL_RSCACKC; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + + /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + uint32_t srrctl = + IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx)); + uint32_t rscctl = + IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx)); + uint32_t psrtype = + IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx)); + uint32_t eitr = + IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx)); + + /* + * ixgbe PMD doesn't support header-split at the moment. + * + * Following the 4.6.7.2.1 chapter of the 82599/x540 + * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER + * should be configured even if header split is not + * enabled. We will configure it 128 bytes following the + * recommendation in the spec. + */ + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK; + + /* + * TODO: Consider setting the Receive Descriptor Minimum + * Threshold Size for an RSC case. This is not an obviously + * beneficiary option but the one worth considering... + */ + + rscctl |= IXGBE_RSCCTL_RSCEN; + rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool); + psrtype |= IXGBE_PSRTYPE_TCPHDR; + + /* + * RSC: Set ITR interval corresponding to 2K ints/s. + * + * Full-sized RSC aggregations for a 10Gb/s link will + * arrive at about 20K aggregation/s rate. + * + * 2K inst/s rate will make only 10% of the + * aggregations to be closed due to the interrupt timer + * expiration for a streaming at wire-speed case. + * + * For a sparse streaming case this setting will yield + * at most 500us latency for a single RSC aggregation. + */ + eitr &= ~IXGBE_EITR_ITR_INT_MASK; + eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT); + eitr |= IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl); + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); + IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr); + + /* + * RSC requires the mapping of the queue to the + * interrupt vector. + */ + ixgbe_set_ivar(dev, rxq->reg_idx, i, 0); + } + + dev->data->lro = 1; + + PMD_INIT_LOG(DEBUG, "enabling LRO mode"); + + return 0; +} + +/* + * Initializes Receive Unit. + */ +int __rte_cold +ixgbe_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + uint64_t bus_addr; + uint32_t rxctrl; + uint32_t fctrl; + uint32_t hlreg0; + uint32_t maxfrs; + uint32_t srrctl; + uint32_t rdrxctl; + uint32_t rxcsum; + uint16_t buf_size; + uint16_t i; + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + int rc; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Make sure receives are disabled while setting + * up the RX context (registers, descriptor rings, etc.). + */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Enable receipt of broadcasted frames */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* + * Configure CRC stripping, if any. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) + hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; + else + hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; + + /* + * Configure jumbo frame support, if any. + */ + if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (rx_conf->max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + } else + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + + /* + * If loopback mode is configured, set LPBK bit. + */ + if (dev->data->dev_conf.lpbk_mode != 0) { + rc = ixgbe_check_supported_loopback_mode(dev); + if (rc < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + return rc; + } + hlreg0 |= IXGBE_HLREG0_LPBK; + } else { + hlreg0 &= ~IXGBE_HLREG0_LPBK; + } + + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure. + */ + if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rxq->crc_len = RTE_ETHER_CRC_LEN; + else + rxq->crc_len = 0; + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); + + /* Configure the SRRCTL register */ + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) + dev->data->scattered_rx = 1; + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) + dev->data->scattered_rx = 1; + + /* + * Device configured with multiple RX queues. + */ + ixgbe_dev_mq_rx_configure(dev); + + /* + * Setup the Checksum Register. + * Disable Full-Packet Checksum which is mutually exclusive with RSS. + * Enable IP/L4 checkum computation by hardware if requested to do so. + */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + else + rxcsum &= ~IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) { + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) + rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; + else + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + } + + rc = ixgbe_set_rsc(dev); + if (rc) + return rc; + + ixgbe_set_rx_function(dev); + + return 0; +} + +/* + * Initializes Transmit Unit. + */ +void __rte_cold +ixgbe_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint64_t bus_addr; + uint32_t hlreg0; + uint32_t txctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enable TX CRC (checksum offload requirement) and hw padding + * (TSO requirement) + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), + txctrl); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL_82599(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), + txctrl); + break; + } + } + + /* Device configured with multiple TX queues. */ + ixgbe_dev_mq_tx_configure(dev); +} + +/* + * Check if requested loopback mode is supported + */ +int +ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX) + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + return 0; + + return -ENOTSUP; +} + +/* + * Set up link for 82599 loopback mode Tx->Rx. + */ +static inline void __rte_cold +ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) != + IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Could not enable loopback mode"); + /* ignore error */ + return; + } + } + + /* Restart link */ + IXGBE_WRITE_REG(hw, + IXGBE_AUTOC, + IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU); + ixgbe_reset_pipeline_82599(hw); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + msec_delay(50); +} + + +/* + * Start Transmit and Receive Units. + */ +int __rte_cold +ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + struct ixgbe_rx_queue *rxq; + uint32_t txdctl; + uint32_t dmatxctl; + uint32_t rxctrl; + uint16_t i; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq->tx_deferred_start) { + ret = ixgbe_dev_tx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq->rx_deferred_start) { + ret = ixgbe_dev_rx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); + + /* If loopback mode is enabled, set up the link accordingly */ + if (dev->data->dev_conf.lpbk_mode != 0) { + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_setup_loopback_link_82599(hw); + else if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + ixgbe_setup_loopback_link_x540_x550(hw, true); + } + +#ifdef RTE_LIBRTE_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY)) { + ret = ixgbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "ixgbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif + + return 0; +} + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int __rte_cold +ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct ixgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rxq = dev->data->rx_queues[rx_queue_id]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable bit clear */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); + + rte_delay_us(RTE_IXGBE_WAIT_100_US); + + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + txq = dev->data->tx_queues[tx_queue_id]; + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", + tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int __rte_cold +ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, + IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, + IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, + "Tx Queue %d is not empty when stopping.", + tx_queue_id); + } + + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable bit clear */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); + } + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ixgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; +} + +void +ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ixgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/* + * [VF] Initializes Receive Unit. + */ +int __rte_cold +ixgbevf_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint64_t bus_addr; + uint32_t srrctl, psrtype = 0; + uint16_t buf_size; + uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) { + PMD_INIT_LOG(ERR, "The number of Rx queue invalid, " + "it should be power of 2"); + return -1; + } + + if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) { + PMD_INIT_LOG(ERR, "The number of Rx queue invalid, " + "it should be equal to or less than %d", + hw->mac.max_rx_queues); + return -1; + } + + /* + * When the VF driver issues a IXGBE_VF_RESET request, the PF driver + * disables the VF receipt of packets if the PF MTU is > 1500. + * This is done to deal with 82599 limitations that imposes + * the PF and all VFs to share the same MTU. + * Then, the PF driver enables again the VF receipt of packet when + * the VF driver issues a IXGBE_VF_SET_LPE request. + * In the meantime, the VF device cannot be used, even if the VF driver + * and the Guest VM network stack are ready to accept packets with a + * size up to the PF MTU. + * As a work-around to this PF behaviour, force the call to + * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, + * VF packets received can work in all cases. + */ + ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings */ + ret = ixgbe_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + + + /* Configure the SRRCTL register */ + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + + /* + * VF modification to write virtual function SRRCTL register + */ + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER || + /* It adds dual VLAN length for supporting dual VLAN */ + (rxmode->max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->data->scattered_rx = 1; + } + + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + + /* Set RQPL for VF RSS according to max Rx queue */ + psrtype |= (dev->data->nb_rx_queues >> 1) << + IXGBE_PSRTYPE_RQPL_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + + ixgbe_set_rx_function(dev); + + return 0; +} + +/* + * [VF] Initializes Transmit Unit. + */ +void __rte_cold +ixgbevf_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint64_t bus_addr; + uint32_t txctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + txctrl = IXGBE_READ_REG(hw, + IXGBE_VFDCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), + txctrl); + } +} + +/* + * [VF] Start Transmit and Receive Units. + */ +void __rte_cold +ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + struct ixgbe_rx_queue *rxq; + uint32_t txdctl; + uint32_t rxdctl; + uint16_t i; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + + poll_ms = 10; + /* Wait until TX Enable ready */ + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + + rxq = dev->data->rx_queues[i]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = 10; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); + + } +} + +int +ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +int +ixgbe_config_rss_filter(struct rte_eth_dev *dev, + struct ixgbe_rte_flow_rss_conf *conf, bool add) +{ + struct ixgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + + if (!add) { + if (ixgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { + ixgbe_rss_disable(dev); + memset(&filter_info->rss_info, 0, + sizeof(struct ixgbe_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (filter_info->rss_info.conf.queue_num) + return -EINVAL; + /* Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == conf->conf.queue_num) + j = 0; + reta = (reta << 8) | conf->conf.queue[j]; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) { + ixgbe_rss_disable(dev); + return 0; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + ixgbe_hw_rss_hash_set(hw, &rss_conf); + + if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; + + return 0; +} + +/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */ +#if defined(RTE_ARCH_PPC_64) +int +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t +ixgbe_recv_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t +ixgbe_recv_scattered_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return -1; +} + +uint16_t +ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int +ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq) +{ + return -1; +} + +void +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return; +} +#endif diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h new file mode 100644 index 000000000..20a8b291d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _IXGBE_RXTX_H_ +#define _IXGBE_RXTX_H_ + +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will + * also optimize cache line size effect. H/W supports up to cache line size 128. + */ +#define IXGBE_ALIGN 128 + +#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc)) +#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc)) + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * descriptors should meet the following condition: + * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 + */ +#define IXGBE_MIN_RING_DESC 32 +#define IXGBE_MAX_RING_DESC 4096 + +#define RTE_PMD_IXGBE_TX_MAX_BURST 32 +#define RTE_PMD_IXGBE_RX_MAX_BURST 32 +#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64 + +#define RTE_IXGBE_DESCS_PER_LOOP 4 + +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) +#define RTE_IXGBE_RXQ_REARM_THRESH 32 +#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH +#endif + +#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \ + sizeof(union ixgbe_adv_rx_desc)) + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_IXGBE_WAIT_100_US 100 +#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2 + +#define IXGBE_TX_MAX_SEG 40 + +#define IXGBE_TX_MIN_PKT_LEN 14 + +#define IXGBE_PACKET_TYPE_MASK_82599 0X7F +#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF +#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF +#define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000 + +#define IXGBE_PACKET_TYPE_MAX 0X80 +#define IXGBE_PACKET_TYPE_TN_MAX 0X100 +#define IXGBE_PACKET_TYPE_SHIFT 0X04 + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct ixgbe_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +struct ixgbe_scattered_rx_entry { + struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct ixgbe_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct ixgbe_tx_entry_v { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct ixgbe_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */ + struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ + uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ + uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ + uint8_t rx_using_sse; + /**< indicates that vector RX is in use */ +#ifdef RTE_LIBRTE_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec RX feature is in use */ +#endif +#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ +#endif + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t reg_idx; /**< RX queue register index. */ + uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t rx_deferred_start; /**< not in global dev start. */ + /** flags to set in mbuf when a vlan is detected. */ + uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ + /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; +}; + +/** + * IXGBE CTX Constants + */ +enum ixgbe_advctx_num { + IXGBE_CTX_0 = 0, /**< CTX0 */ + IXGBE_CTX_1 = 1, /**< CTX1 */ + IXGBE_CTX_NUM = 2, /**< CTX NUMBER */ +}; + +/** Offload features */ +union ixgbe_tx_offload { + uint64_t data[2]; + struct { + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size */ + uint64_t vlan_tci:16; + /**< VLAN Tag Control Identifier (CPU order). */ + + /* fields for TX offloading of tunnels */ + uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ +#ifdef RTE_LIBRTE_SECURITY + /* inline ipsec related*/ + uint64_t sa_idx:8; /**< TX SA database entry index */ + uint64_t sec_pad_len:4; /**< padding length */ +#endif + }; +}; + +/* + * Compare mask for vlan_macip_len.data, + * should be in sync with ixgbe_vlan_macip.f layout. + * */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + +/** + * Structure to check if new context need be built + */ + +struct ixgbe_advctx_info { + uint64_t flags; /**< ol_flags for context build. */ + /**< tx offload: vlan, tso, l2-l3-l4 lengths. */ + union ixgbe_tx_offload tx_offload; + /** compare mask for tx offload. */ + union ixgbe_tx_offload tx_offload_mask; +}; + +/** + * Structure associated with each TX queue. + */ +struct ixgbe_tx_queue { + /** TX ring virtual address. */ + volatile union ixgbe_adv_tx_desc *tx_ring; + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + union { + struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */ + struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */ + }; + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ + /**< Start freeing TX buffers if there are less free descriptors than + this value. */ + uint16_t tx_free_thresh; + /** Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t tx_next_dd; /**< next desc to scan for DD bit */ + uint16_t tx_next_rs; /**< next desc to set RS bit */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; /**< TX queue register index. */ + uint16_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ + uint32_t ctx_curr; /**< Hardware context states. */ + /** Hardware context0 history. */ + struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; + const struct ixgbe_txq_ops *ops; /**< txq ops */ + uint8_t tx_deferred_start; /**< not in global dev start. */ +#ifdef RTE_LIBRTE_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ +#endif +}; + +struct ixgbe_txq_ops { + void (*release_mbufs)(struct ixgbe_tx_queue *txq); + void (*free_swring)(struct ixgbe_tx_queue *txq); + void (*reset)(struct ixgbe_tx_queue *txq); +}; + +/* + * Populate descriptors with the following info: + * 1.) buffer_addr = phys_addr + headroom + * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len + * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT + */ + +/* Defines for Tx descriptor */ +#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\ + IXGBE_ADVTXD_DCMD_IFCS |\ + IXGBE_ADVTXD_DCMD_DEXT |\ + IXGBE_ADVTXD_DCMD_EOP) + + +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq); + +/** + * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance. + * + * Sets the callback based on the device parameters: + * - ixgbe_hw.rx_bulk_alloc_allowed + * - rte_eth_dev_data.scattered_rx + * - rte_eth_dev_data.lro + * - conditions checked in ixgbe_rx_vec_condition_check() + * + * This means that the parameters above have to be configured prior to calling + * to this function. + * + * @dev rte_eth_dev handle + */ +void ixgbe_set_rx_function(struct rte_eth_dev *dev); + +int ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev); +uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq); +void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq); +int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt); + +extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX]; +extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; + +uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); + +uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); + +#endif /* _IXGBE_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h new file mode 100644 index 000000000..a97c27189 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _IXGBE_RXTX_VEC_COMMON_H_ +#define _IXGBE_RXTX_VEC_COMMON_H_ +#include +#include + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" + +static inline uint16_t +reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned int pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) + end->data_len -= rxq->crc_len; + else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static __rte_always_inline int +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry_v *txep; + uint32_t status; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & IXGBE_ADVTXD_STAT_DD)) + return 0; + + n = txq->tx_rs_thresh; + + /* + * first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static __rte_always_inline void +tx_backlog_entry(struct ixgbe_tx_entry_v *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + unsigned int i; + struct ixgbe_tx_entry_v *txe; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + + if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) + return; + + /* release the used mbufs in sw_ring */ + for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); + i != txq->tx_tail; + i = (i + 1) & max_desc) { + txe = &txq->sw_ring_v[i]; + rte_pktmbuf_free_seg(txe->mbuf); + } + txq->nb_tx_free = max_desc; + + /* reset tx_entry */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txe = &txq->sw_ring_v[i]; + txe->mbuf = NULL; + } +} + +static inline void +_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + const unsigned int mask = rxq->nb_rx_desc - 1; + unsigned int i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline void +_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq) +{ + if (txq == NULL) + return; + + if (txq->sw_ring != NULL) { + rte_free(txq->sw_ring_v - 1); + txq->sw_ring_v = NULL; + } +} + +static inline void +_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq) +{ + static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } }; + struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; + uint16_t i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) + txq->tx_ring[i] = zeroed_desc; + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + + txd->wb.status = IXGBE_TXD_STAT_DD; + txe[i].mbuf = NULL; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +static inline int +ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq, + const struct ixgbe_txq_ops *txq_ops) +{ + if (txq->sw_ring_v == NULL) + return -1; + + /* leave the first one for overflow */ + txq->sw_ring_v = txq->sw_ring_v + 1; + txq->ops = txq_ops; + + return 0; +} + +static inline int +ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} +#endif diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c new file mode 100644 index 000000000..293b7c8bd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "ixgbe_rxtx_vec_common.h" + +#include + +#pragma GCC diagnostic ignored "-Wcast-qual" + +static inline void +ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + uint64x2_t dma_addr0, dma_addr1; + uint64x2_t zero = vdupq_n_u64(0); + uint64_t paddr; + uint8x8_t p; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, + (void *)rxep, + RTE_IXGBE_RXQ_REARM_THRESH) < 0)) { + if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + vst1q_u64((uint64_t *)&rxdp[i].read, + zero); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_IXGBE_RXQ_REARM_THRESH; + return; + } + + p = vld1_u8((uint8_t *)&rxq->mbuf_initializer); + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* + * Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + */ + vst1_u8((uint8_t *)&mb0->rearm_data, p); + paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr0 = vsetq_lane_u64(paddr, zero, 0); + /* flush desc with pa dma_addr */ + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); + + vst1_u8((uint8_t *)&mb1->rearm_data, p); + paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM; + dma_addr1 = vsetq_lane_u64(paddr, zero, 0); + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); +} + +#define VTAG_SHIFT (3) + +static inline void +desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, + uint8x16_t staterr, struct rte_mbuf **rx_pkts) +{ + uint8x16_t ptype; + uint8x16_t vtag; + + union { + uint8_t e[4]; + uint32_t word; + } vol; + + const uint8x16_t pkttype_msk = { + PKT_RX_VLAN, PKT_RX_VLAN, + PKT_RX_VLAN, PKT_RX_VLAN, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}; + + const uint8x16_t rsstype_msk = { + 0x0F, 0x0F, 0x0F, 0x0F, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}; + + const uint8x16_t rss_flags = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR}; + + ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0]; + ptype = vandq_u8(ptype, rsstype_msk); + ptype = vqtbl1q_u8(rss_flags, ptype); + + vtag = vshrq_n_u8(staterr, VTAG_SHIFT); + vtag = vandq_u8(vtag, pkttype_msk); + vtag = vorrq_u8(ptype, vtag); + + vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} + +/* + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ + +#define IXGBE_VPMD_DESC_EOP_MASK 0x02020202 +#define IXGBE_UINT8_BIT (CHAR_BIT * sizeof(uint8_t)) + +static inline uint32_t +get_packet_type(uint32_t pkt_info, + uint32_t etqf_check, + uint32_t tunnel_check) +{ + if (etqf_check) + return RTE_PTYPE_UNKNOWN; + + if (tunnel_check) { + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; + return ptype_table[pkt_info]; +} + +static inline void +desc_to_ptype_v(uint64x2_t descs[4], uint16_t pkt_type_mask, + struct rte_mbuf **rx_pkts) +{ + uint32x4_t etqf_check, tunnel_check; + uint32x4_t etqf_mask = vdupq_n_u32(0x8000); + uint32x4_t tunnel_mask = vdupq_n_u32(0x10000); + uint32x4_t ptype_mask = vdupq_n_u32((uint32_t)pkt_type_mask); + uint32x4_t ptype0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]), + vreinterpretq_u32_u64(descs[2])).val[0]; + uint32x4_t ptype1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]), + vreinterpretq_u32_u64(descs[3])).val[0]; + + /* interleave low 32 bits, + * now we have 4 ptypes in a NEON register + */ + ptype0 = vzipq_u32(ptype0, ptype1).val[0]; + + /* mask etqf bits */ + etqf_check = vandq_u32(ptype0, etqf_mask); + /* mask tunnel bits */ + tunnel_check = vandq_u32(ptype0, tunnel_mask); + + /* shift right by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */ + ptype0 = vandq_u32(vshrq_n_u32(ptype0, IXGBE_PACKET_TYPE_SHIFT), + ptype_mask); + + rx_pkts[0]->packet_type = + get_packet_type(vgetq_lane_u32(ptype0, 0), + vgetq_lane_u32(etqf_check, 0), + vgetq_lane_u32(tunnel_check, 0)); + rx_pkts[1]->packet_type = + get_packet_type(vgetq_lane_u32(ptype0, 1), + vgetq_lane_u32(etqf_check, 1), + vgetq_lane_u32(tunnel_check, 1)); + rx_pkts[2]->packet_type = + get_packet_type(vgetq_lane_u32(ptype0, 2), + vgetq_lane_u32(etqf_check, 2), + vgetq_lane_u32(tunnel_check, 2)); + rx_pkts[3]->packet_type = + get_packet_type(vgetq_lane_u32(ptype0, 3), + vgetq_lane_u32(etqf_check, 3), + vgetq_lane_u32(tunnel_check, 3)); +} + +static inline uint16_t +_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint8x16_t shuf_msk = { + 0xFF, 0xFF, + 0xFF, 0xFF, /* skip 32 bits pkt_type */ + 12, 13, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 12, 13, /* octet 12~13, 16 bits data_len */ + 14, 15, /* octet 14~15, low 16 bits vlan_macip */ + 4, 5, 6, 7 /* octet 4~7, 32bits rss */ + }; + uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0, + rxq->crc_len, 0, 0, 0}; + + /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch_non_temporal(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH) + ixgbe_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_IXGBE_DESCS_PER_LOOP, + rxdp += RTE_IXGBE_DESCS_PER_LOOP) { + uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP]; + uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + uint8x16x2_t sterr_tmp1, sterr_tmp2; + uint64x2_t mbp1, mbp2; + uint8x16_t staterr; + uint16x8_t tmp; + uint32_t stat; + + /* B.1 load 2 mbuf point */ + mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); + + /* B.1 load 2 mbuf point */ + mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + + /* A. load 4 pkts descs */ + descs[0] = vld1q_u64((uint64_t *)(rxdp)); + descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); + descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); + descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); + pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); + pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]), + vreinterpretq_u8_u64(descs[3])); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]), + vreinterpretq_u8_u64(descs[2])); + + /* C.2 get 4 pkts staterr value */ + staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; + + /* set ol_flags with vlan packet type */ + desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, + &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); + pkt_mb4 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); + pkt_mb3 = vreinterpretq_u8_u16(tmp); + + /* D.3 copy final 3,4 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + + /* C* extract and record EOP bit */ + if (split_packet) { + stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0); + /* and with mask to extract bits, flipping 1-0 */ + *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK; + + split_packet += RTE_IXGBE_DESCS_PER_LOOP; + } + + /* C.4 expand DD bit to saturate UINT8 */ + staterr = vshlq_n_u8(staterr, IXGBE_UINT8_BIT - 1); + staterr = vreinterpretq_u8_s8 + (vshrq_n_s8(vreinterpretq_s8_u8(staterr), + IXGBE_UINT8_BIT - 1)); + stat = ~vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0); + + rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP); + + /* D.3 copy final 1,2 data to rx_pkts */ + vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]); + + /* C.5 calc available number of desc */ + if (unlikely(stat == 0)) { + nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP; + } else { + nb_pkts_recd += __builtin_ctz(stat) / IXGBE_UINT8_BIT; + break; + } + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ +uint16_t +ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - don't support ol_flags for rss and csum err + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +uint16_t +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64x2_t descriptor = { + pkt->buf_iova + pkt->data_off, + (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len}; + + vst1q_u64((uint64_t *)&txdp->read, descriptor); +} + +static inline void +vtx(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *txdp; + struct ixgbe_tx_entry_v *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = DCMD_DTYP_FLAGS; + uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +static void __rte_cold +ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_queue_release_mbufs_vec(txq); +} + +void __rte_cold +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + _ixgbe_rx_queue_release_mbufs_vec(rxq); +} + +static void __rte_cold +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_free_swring_vec(txq); +} + +static void __rte_cold +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + _ixgbe_reset_tx_queue_vec(txq); +} + +static const struct ixgbe_txq_ops vec_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs_vec, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +int __rte_cold +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) +{ + return ixgbe_rxq_vec_setup_default(rxq); +} + +int __rte_cold +ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) +{ + return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops); +} + +int __rte_cold +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + + /* no csum error report support */ + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + return -1; + + return ixgbe_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c new file mode 100644 index 000000000..517ca3166 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -0,0 +1,751 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include +#include +#include + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "ixgbe_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mb_pool, + (void *)rxep, + RTE_IXGBE_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_IXGBE_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr)); + vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr)); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* set Header Buffer Address to zero */ + dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); + dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH; + + rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); +} + +#ifdef RTE_LIBRTE_SECURITY +static inline void +desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i sterr, rearm, tmp_e, tmp_p; + uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2; + uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2; + uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2; + uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2; + const __m128i ipsec_sterr_msk = + _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP | + IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED); + const __m128i ipsec_proc_msk = + _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP); + const __m128i ipsec_err_flag = + _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED | + PKT_RX_SEC_OFFLOAD); + const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD); + + rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0); + sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2), + _mm_extract_epi32(descs[2], 2), + _mm_extract_epi32(descs[1], 2), + _mm_extract_epi32(descs[0], 2)); + sterr = _mm_and_si128(sterr, ipsec_sterr_msk); + tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk); + tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk); + sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag), + _mm_and_si128(tmp_p, ipsec_proc_flag)); + rearm = _mm_or_si128(rearm, sterr); + *rearm0 = _mm_extract_epi32(rearm, 0); + *rearm1 = _mm_extract_epi32(rearm, 1); + *rearm2 = _mm_extract_epi32(rearm, 2); + *rearm3 = _mm_extract_epi32(rearm, 3); +} +#endif + +static inline void +desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, + struct rte_mbuf **rx_pkts) +{ + __m128i ptype0, ptype1, vtag0, vtag1, csum; + __m128i rearm0, rearm1, rearm2, rearm3; + + /* mask everything except rss type */ + const __m128i rsstype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x000F, 0x000F, 0x000F, 0x000F); + + /* mask the lower byte of ol_flags */ + const __m128i ol_flags_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x00FF, 0x00FF, 0x00FF, 0x00FF); + + /* map rss type to rss hash flag */ + const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0, + 0, 0, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + + /* mask everything except vlan present and l4/ip csum error */ + const __m128i vlan_csum_msk = _mm_set_epi16( + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP); + /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */ + const __m128i vlan_csum_map_lo = _mm_set_epi8( + 0, 0, 0, 0, + vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + vlan_flags | PKT_RX_IP_CKSUM_BAD, + vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + vlan_flags | PKT_RX_IP_CKSUM_GOOD, + 0, 0, 0, 0, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD); + + const __m128i vlan_csum_map_hi = _mm_set_epi8( + 0, 0, 0, 0, + 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, + PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), + 0, 0, 0, 0, + 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, + PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t)); + + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + + vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); + vtag1 = _mm_and_si128(vtag1, vlan_csum_msk); + + /* csum bits are in the most significant, to use shuffle we need to + * shift them. Change mask to 0xc000 to 0x0003. + */ + csum = _mm_srli_epi16(vtag1, 14); + + /* now or the most significant 64 bits containing the checksum + * flags with the vlan present flags. + */ + csum = _mm_srli_si128(csum, 8); + vtag1 = _mm_or_si128(csum, vtag1); + + /* convert VP, IPE, L4E to ol_flags */ + vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1); + vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t)); + + vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1); + vtag1 = _mm_and_si128(vtag1, ol_flags_msk); + vtag1 = _mm_or_si128(vtag0, vtag1); + + vtag1 = _mm_or_si128(ptype0, vtag1); + + /* + * At this point, we have the 4 sets of flags in the low 64-bits + * of vtag1 (4x16). + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +static inline uint32_t get_packet_type(int index, + uint32_t pkt_info, + uint32_t etqf_check, + uint32_t tunnel_check) +{ + if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) + return RTE_PTYPE_UNKNOWN; + + if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) { + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; + return ptype_table[pkt_info]; +} + +static inline void +desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask, + struct rte_mbuf **rx_pkts) +{ + __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL); + __m128i ptype_mask = _mm_set_epi32( + pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask); + __m128i tunnel_mask = + _mm_set_epi64x(0x100000001000LL, 0x100000001000LL); + + uint32_t etqf_check, tunnel_check, pkt_info; + + __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]); + __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]); + + /* interleave low 32 bits, + * now we have 4 ptypes in a XMM register + */ + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + + /* create a etqf bitmask based on the etqf bit. */ + etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask)); + + /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */ + ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT), + ptype_mask); + + /* create a tunnel bitmask based on the tunnel bit */ + tunnel_check = _mm_movemask_epi8( + _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3)); + + pkt_info = _mm_extract_epi32(ptype0, 0); + rx_pkts[0]->packet_type = + get_packet_type(0, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 1); + rx_pkts[1]->packet_type = + get_packet_type(1, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 2); + rx_pkts[2]->packet_type = + get_packet_type(2, pkt_info, etqf_check, tunnel_check); + pkt_info = _mm_extract_epi32(ptype0, 3); + rx_pkts[3]->packet_type = + get_packet_type(3, pkt_info, etqf_check, tunnel_check); +} + +/* + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +static inline uint16_t +_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + uint16_t nb_pkts_recd; +#ifdef RTE_LIBRTE_SECURITY + uint8_t use_ipsec = rxq->using_ipsec; +#endif + int pos; + uint64_t var; + __m128i shuf_msk; + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + /* + * compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + __m128i dd_check, eop_check; + __m128i mbuf_init; + uint8_t vlan_flags; + + /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH) + ixgbe_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 15, 14, /* octet 14~15, low 16 bits vlan_macip */ + 13, 12, /* octet 12~13, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 13, 12, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip 32 bit pkt_type */ + 0xFF, 0xFF + ); + /* + * Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* ensure these 2 flags are in the lower 8 bits */ + RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX); + vlan_flags = rxq->vlan_flags & UINT8_MAX; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_IXGBE_DESCS_PER_LOOP, + rxdp += RTE_IXGBE_DESCS_PER_LOOP) { + __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + /* set ol_flags with vlan packet type */ + desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); + +#ifdef RTE_LIBRTE_SECURITY + if (unlikely(use_ipsec)) + desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]); +#endif + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_IXGBE_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_IXGBE_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +uint16_t +ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +uint16_t +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 | + flags | pkt->data_len, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)&txdp->read, descriptor); +} + +static inline void +vtx(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *txdp; + struct ixgbe_tx_entry_v *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = DCMD_DTYP_FLAGS; + uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &(txq->tx_ring[tx_id]); + txep = &txq->sw_ring_v[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +static void __rte_cold +ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_queue_release_mbufs_vec(txq); +} + +void __rte_cold +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + _ixgbe_rx_queue_release_mbufs_vec(rxq); +} + +static void __rte_cold +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_free_swring_vec(txq); +} + +static void __rte_cold +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + _ixgbe_reset_tx_queue_vec(txq); +} + +static const struct ixgbe_txq_ops vec_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs_vec, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +int __rte_cold +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) +{ + return ixgbe_rxq_vec_setup_default(rxq); +} + +int __rte_cold +ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) +{ + return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops); +} + +int __rte_cold +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return ixgbe_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c new file mode 100644 index 000000000..73845a73d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_tm.c @@ -0,0 +1,1031 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include + +#include "ixgbe_ethdev.h" + +static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error); +static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error); +static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error); +static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error); +static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error); +static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); +static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); + +const struct rte_tm_ops ixgbe_tm_ops = { + .capabilities_get = ixgbe_tm_capabilities_get, + .shaper_profile_add = ixgbe_shaper_profile_add, + .shaper_profile_delete = ixgbe_shaper_profile_del, + .node_add = ixgbe_node_add, + .node_delete = ixgbe_node_delete, + .node_type_get = ixgbe_node_type_get, + .level_capabilities_get = ixgbe_level_capabilities_get, + .node_capabilities_get = ixgbe_node_capabilities_get, + .hierarchy_commit = ixgbe_hierarchy_commit, +}; + +int +ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &ixgbe_tm_ops; + + return 0; +} + +void +ixgbe_tm_conf_init(struct rte_eth_dev *dev) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + + /* initialize shaper profile list */ + TAILQ_INIT(&tm_conf->shaper_profile_list); + + /* initialize node configuration */ + tm_conf->root = NULL; + TAILQ_INIT(&tm_conf->queue_list); + TAILQ_INIT(&tm_conf->tc_list); + tm_conf->nb_tc_node = 0; + tm_conf->nb_queue_node = 0; + tm_conf->committed = false; +} + +void +ixgbe_tm_conf_uninit(struct rte_eth_dev *dev) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + struct ixgbe_tm_node *tm_node; + + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_queue_node = 0; + while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + rte_free(tm_node); + } + tm_conf->nb_tc_node = 0; + if (tm_conf->root) { + rte_free(tm_conf->root); + tm_conf->root = NULL; + } + + /* Remove all shaper profiles */ + while ((shaper_profile = + TAILQ_FIRST(&tm_conf->shaper_profile_list))) { + TAILQ_REMOVE(&tm_conf->shaper_profile_list, + shaper_profile, node); + rte_free(shaper_profile); + } +} + +static inline uint8_t +ixgbe_tc_nb_get(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *eth_conf; + uint8_t nb_tcs = 0; + + eth_conf = &dev->data->dev_conf; + if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; + } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == + ETH_32_POOLS) + nb_tcs = ETH_4_TCS; + else + nb_tcs = ETH_8_TCS; + } else { + nb_tcs = 1; + } + + return nb_tcs; +} + +static int +ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t tc_nb = ixgbe_tc_nb_get(dev); + + if (!cap || !error) + return -EINVAL; + + if (tc_nb > hw->mac.max_tx_queues) + return -EINVAL; + + error->type = RTE_TM_ERROR_TYPE_NONE; + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(struct rte_tm_capabilities)); + + /** + * here is the max capability not the current configuration. + */ + /* port + TCs + queues */ + cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS + + hw->mac.max_tx_queues; + cap->n_levels_max = 3; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->n_nodes_max; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + cap->sched_n_children_max = hw->mac.max_tx_queues; + /** + * HW supports SP. But no plan to support it now. + * So, all the nodes should have the same priority. + */ + cap->sched_sp_n_priorities_max = 1; + cap->sched_wfq_n_children_per_group_max = 0; + cap->sched_wfq_n_groups_max = 0; + /** + * SW only supports fair round robin now. + * So, all the nodes should have the same weight. + */ + cap->sched_wfq_weight_max = 1; + cap->cman_head_drop_supported = 0; + cap->dynamic_update_mask = 0; + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + cap->stats_mask = 0; + + return 0; +} + +static inline struct ixgbe_tm_shaper_profile * +ixgbe_shaper_profile_search(struct rte_eth_dev *dev, + uint32_t shaper_profile_id) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_shaper_profile_list *shaper_profile_list = + &tm_conf->shaper_profile_list; + struct ixgbe_tm_shaper_profile *shaper_profile; + + TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { + if (shaper_profile_id == shaper_profile->shaper_profile_id) + return shaper_profile; + } + + return NULL; +} + +static int +ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + /* min rate not supported */ + if (profile->committed.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "committed rate not supported"; + return -EINVAL; + } + /* min bucket size not supported */ + if (profile->committed.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + error->message = "committed bucket size not supported"; + return -EINVAL; + } + /* max bucket size not supported */ + if (profile->peak.size) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + error->message = "peak bucket size not supported"; + return -EINVAL; + } + /* length adjustment not supported */ + if (profile->pkt_length_adjust) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; + error->message = "packet length adjustment not supported"; + return -EINVAL; + } + + return 0; +} + +static int +ixgbe_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + int ret; + + if (!profile || !error) + return -EINVAL; + + ret = ixgbe_shaper_profile_param_check(profile, error); + if (ret) + return ret; + + shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); + + if (shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID exist"; + return -EINVAL; + } + + shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile", + sizeof(struct ixgbe_tm_shaper_profile), + 0); + if (!shaper_profile) + return -ENOMEM; + shaper_profile->shaper_profile_id = shaper_profile_id; + rte_memcpy(&shaper_profile->profile, profile, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, + shaper_profile, node); + + return 0; +} + +static int +ixgbe_shaper_profile_del(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_shaper_profile *shaper_profile; + + if (!error) + return -EINVAL; + + shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); + + if (!shaper_profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "profile ID not exist"; + return -EINVAL; + } + + /* don't delete a profile if it's used by one or several nodes */ + if (shaper_profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "profile in use"; + return -EINVAL; + } + + TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node); + rte_free(shaper_profile); + + return 0; +} + +static inline struct ixgbe_tm_node * +ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, + enum ixgbe_tm_node_type *node_type) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + + if (tm_conf->root && tm_conf->root->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_PORT; + return tm_conf->root; + } + + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_TC; + return tm_node; + } + } + + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->id == node_id) { + *node_type = IXGBE_TM_NODE_TYPE_QUEUE; + return tm_node; + } + } + + return NULL; +} + +static void +ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, + uint16_t *base, uint16_t *nb) +{ + uint8_t nb_tcs = ixgbe_tc_nb_get(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint16_t vf_num = pci_dev->max_vfs; + + *base = 0; + *nb = 0; + + /* VT on */ + if (vf_num) { + /* no DCB */ + if (nb_tcs == 1) { + if (vf_num >= ETH_32_POOLS) { + *nb = 2; + *base = vf_num * 2; + } else if (vf_num >= ETH_16_POOLS) { + *nb = 4; + *base = vf_num * 4; + } else { + *nb = 8; + *base = vf_num * 8; + } + } else { + /* DCB */ + *nb = 1; + *base = vf_num * nb_tcs + tc_node_no; + } + } else { + /* VT off */ + if (nb_tcs == ETH_8_TCS) { + switch (tc_node_no) { + case 0: + *base = 0; + *nb = 32; + break; + case 1: + *base = 32; + *nb = 32; + break; + case 2: + *base = 64; + *nb = 16; + break; + case 3: + *base = 80; + *nb = 16; + break; + case 4: + *base = 96; + *nb = 8; + break; + case 5: + *base = 104; + *nb = 8; + break; + case 6: + *base = 112; + *nb = 8; + break; + case 7: + *base = 120; + *nb = 8; + break; + default: + return; + } + } else { + switch (tc_node_no) { + /** + * If no VF and no DCB, only 64 queues can be used. + * This case also be covered by this "case 0". + */ + case 0: + *base = 0; + *nb = 64; + break; + case 1: + *base = 64; + *nb = 32; + break; + case 2: + *base = 96; + *nb = 16; + break; + case 3: + *base = 112; + *nb = 16; + break; + default: + return; + } + } + } +} + +static int +ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t priority, uint32_t weight, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + if (priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority should be 0"; + return -EINVAL; + } + + if (weight != 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "weight must be 1"; + return -EINVAL; + } + + /* not support shared shaper */ + if (params->shared_shaper_id) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; + error->message = "shared shaper not supported"; + return -EINVAL; + } + if (params->n_shared_shapers) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; + error->message = "shared shaper not supported"; + return -EINVAL; + } + + /* for non-leaf node */ + if (node_id >= dev->data->nb_tx_queues) { + /* check the unsupported parameters */ + if (params->nonleaf.wfq_weight_mode) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFQ not supported"; + return -EINVAL; + } + if (params->nonleaf.n_sp_priorities != 1) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; + error->message = "SP priority not supported"; + return -EINVAL; + } else if (params->nonleaf.wfq_weight_mode && + !(*params->nonleaf.wfq_weight_mode)) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; + error->message = "WFP should be byte mode"; + return -EINVAL; + } + + return 0; + } + + /* for leaf node */ + /* check the unsupported parameters */ + if (params->leaf.cman) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; + error->message = "Congestion management not supported"; + return -EINVAL; + } + if (params->leaf.wred.wred_profile_id != + RTE_TM_WRED_PROFILE_ID_NONE) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.shared_wred_context_id) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; + error->message = "WRED not supported"; + return -EINVAL; + } + if (params->leaf.wred.n_shared_wred_contexts) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; + error->message = "WRED not supported"; + return -EINVAL; + } + + return 0; +} + +/** + * Now the TC and queue configuration is controlled by DCB. + * We need check if the node configuration follows the DCB configuration. + * In the future, we may use TM to cover DCB. + */ +static int +ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_shaper_profile *shaper_profile = NULL; + struct ixgbe_tm_node *tm_node; + struct ixgbe_tm_node *parent_node; + uint8_t nb_tcs; + uint16_t q_base = 0; + uint16_t q_nb = 0; + int ret; + + if (!params || !error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + ret = ixgbe_node_param_check(dev, node_id, priority, weight, + params, error); + if (ret) + return ret; + + /* check if the node ID is already used */ + if (ixgbe_tm_node_search(dev, node_id, &node_type)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node id already used"; + return -EINVAL; + } + + /* check the shaper profile id */ + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + shaper_profile = ixgbe_shaper_profile_search( + dev, params->shaper_profile_id); + if (!shaper_profile) { + error->type = + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile not exist"; + return -EINVAL; + } + } + + /* root node if not have a parent */ + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id > IXGBE_TM_NODE_TYPE_PORT) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* obviously no more than one root */ + if (tm_conf->root) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "already have a root"; + return -EINVAL; + } + + /* add the root node */ + tm_node = rte_zmalloc("ixgbe_tm_node", + sizeof(struct ixgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->no = 0; + tm_node->parent = NULL; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + tm_conf->root = tm_node; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; + } + + /* TC or queue node */ + /* check the parent node */ + parent_node = ixgbe_tm_node_search(dev, parent_node_id, + &parent_node_type); + if (!parent_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent not exist"; + return -EINVAL; + } + if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT && + parent_node_type != IXGBE_TM_NODE_TYPE_TC) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent is not port or TC"; + return -EINVAL; + } + /* check level */ + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && + level_id != parent_node_type + 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; + error->message = "Wrong level"; + return -EINVAL; + } + + /* check the node number */ + if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { + /* check TC number */ + nb_tcs = ixgbe_tc_nb_get(dev); + if (tm_conf->nb_tc_node >= nb_tcs) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many TCs"; + return -EINVAL; + } + } else { + /* check queue number */ + if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues"; + return -EINVAL; + } + + ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb); + if (parent_node->reference_count >= q_nb) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too many queues than TC supported"; + return -EINVAL; + } + + /** + * check the node id. + * For queue, the node id means queue id. + */ + if (node_id >= dev->data->nb_tx_queues) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "too large queue id"; + return -EINVAL; + } + } + + /* add the TC or queue node */ + tm_node = rte_zmalloc("ixgbe_tm_node", + sizeof(struct ixgbe_tm_node), + 0); + if (!tm_node) + return -ENOMEM; + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->reference_count = 0; + tm_node->parent = parent_node; + tm_node->shaper_profile = shaper_profile; + rte_memcpy(&tm_node->params, params, + sizeof(struct rte_tm_node_params)); + if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { + tm_node->no = parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->tc_list, + tm_node, node); + tm_conf->nb_tc_node++; + } else { + tm_node->no = q_base + parent_node->reference_count; + TAILQ_INSERT_TAIL(&tm_conf->queue_list, + tm_node, node); + tm_conf->nb_queue_node++; + } + tm_node->parent->reference_count++; + + /* increase the reference counter of the shaper profile */ + if (shaper_profile) + shaper_profile->reference_count++; + + return 0; +} + +static int +ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!error) + return -EINVAL; + + /* if already committed */ + if (tm_conf->committed) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "already committed"; + return -EINVAL; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check the if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* the node should have no child */ + if (tm_node->reference_count) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = + "cannot delete a node which has children"; + return -EINVAL; + } + + /* root node */ + if (node_type == IXGBE_TM_NODE_TYPE_PORT) { + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + rte_free(tm_node); + tm_conf->root = NULL; + return 0; + } + + /* TC or queue node */ + if (tm_node->shaper_profile) + tm_node->shaper_profile->reference_count--; + tm_node->parent->reference_count--; + if (node_type == IXGBE_TM_NODE_TYPE_TC) { + TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); + tm_conf->nb_tc_node--; + } else { + TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); + tm_conf->nb_queue_node--; + } + rte_free(tm_node); + + return 0; +} + +static int +ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!is_leaf || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) + *is_leaf = true; + else + *is_leaf = false; + + return 0; +} + +static int +ixgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= IXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == IXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + } else if (level_id == IXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) { + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + if (level_id == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* queue node */ + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +ixgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; + struct ixgbe_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == IXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + IXGBE_DCB_MAX_TRAFFIC_CLASS; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + +static int +ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + uint64_t bw; + int ret; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!tm_conf->root) + goto done; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile && + tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile && + tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->shaper_profile) + bw = tm_node->shaper_profile->profile.peak.rate; + else + bw = 0; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + } + +done: + tm_conf->committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + ixgbe_tm_conf_uninit(dev); + ixgbe_tm_conf_init(dev); + } + return -EINVAL; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c new file mode 100644 index 000000000..dbbef294a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_vf_representor.c @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include + +#include "base/ixgbe_type.h" +#include "base/ixgbe_vf.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "rte_pmd_ixgbe.h" + + +static int +ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return ixgbe_dev_link_update_share(representor->pf_ethdev, + wait_to_complete, 0); +} + +static int +ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct rte_ether_addr *mac_addr) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_ixgbe_set_vf_mac_addr( + representor->pf_ethdev->data->port_id, + representor->vf_id, mac_addr); +} + +static int +ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW( + representor->pf_ethdev->data->dev_private); + + dev_info->device = representor->pf_ethdev->device; + + dev_info->min_rx_bufsize = 1024; + /**< Minimum size of RX buffer. */ + dev_info->max_rx_pktlen = 9728; + /**< Maximum configurable length of RX pkt. */ + dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + /**< Maximum number of RX queues. */ + dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + /**< Maximum number of TX queues. */ + + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + /**< Maximum number of MAC addresses. */ + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + /**< Device RX offload capabilities. */ + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS; + /**< Device TX offload capabilities. */ + + dev_info->speed_capa = + representor->pf_ethdev->data->dev_link.link_speed; + /**< Supported speeds bitmap (ETH_LINK_SPEED_). */ + + dev_info->switch_info.name = + representor->pf_ethdev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; + + return 0; +} + +static int ixgbe_vf_representor_dev_configure( + __rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int ixgbe_vf_representor_rx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int ixgbe_vf_representor_tx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_ixgbe_set_vf_vlan_filter( + representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on); +} + +static void +ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id, + representor->vf_id, on); +} + +static const struct eth_dev_ops ixgbe_vf_representor_dev_ops = { + .dev_infos_get = ixgbe_vf_representor_dev_infos_get, + + .dev_start = ixgbe_vf_representor_dev_start, + .dev_configure = ixgbe_vf_representor_dev_configure, + .dev_stop = ixgbe_vf_representor_dev_stop, + + .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup, + .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup, + + .link_update = ixgbe_vf_representor_link_update, + + .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set, + .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set, + + .mac_addr_set = ixgbe_vf_representor_mac_addr_set, +}; + +static uint16_t +ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_vf_info *vf_data; + struct rte_pci_device *pci_dev; + struct rte_eth_link *link; + + if (!representor) + return -ENOMEM; + + representor->vf_id = + ((struct ixgbe_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct ixgbe_vf_representor *)init_params)->switch_domain_id; + representor->pf_ethdev = + ((struct ixgbe_vf_representor *)init_params)->pf_ethdev; + + pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev); + + if (representor->vf_id >= pci_dev->max_vfs) + return -ENODEV; + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + ethdev->data->representor_id = representor->vf_id; + + /* Set representor device ops */ + ethdev->dev_ops = &ixgbe_vf_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst; + ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES; + + /* Reference VF mac address from PF data structure */ + vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + representor->pf_ethdev->data->dev_private); + + ethdev->data->mac_addrs = (struct rte_ether_addr *) + vf_data[representor->vf_id].vf_mac_addresses; + + /* Link state. Inherited from PF */ + link = &representor->pf_ethdev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev) +{ + /* mac_addrs must not be freed because part of ixgbe_vf_info */ + ethdev->data->mac_addrs = NULL; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/meson.build b/src/spdk/dpdk/drivers/net/ixgbe/meson.build new file mode 100644 index 000000000..949075eb2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/meson.build @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS'] + +subdir('base') +objs = [base_objs] + +sources = files( + 'ixgbe_82599_bypass.c', + 'ixgbe_bypass.c', + 'ixgbe_ethdev.c', + 'ixgbe_fdir.c', + 'ixgbe_flow.c', + 'ixgbe_ipsec.c', + 'ixgbe_pf.c', + 'ixgbe_rxtx.c', + 'ixgbe_tm.c', + 'ixgbe_vf_representor.c', + 'rte_pmd_ixgbe.c' +) + +deps += ['hash', 'security'] + +if arch_subdir == 'x86' + sources += files('ixgbe_rxtx_vec_sse.c') +elif arch_subdir == 'arm' + sources += files('ixgbe_rxtx_vec_neon.c') +endif + +includes += include_directories('base') + +install_headers('rte_pmd_ixgbe.h') diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c new file mode 100644 index 000000000..9bff557f9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -0,0 +1,1141 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include + +#include "base/ixgbe_api.h" +#include "base/ixgbe_x550.h" +#include "ixgbe_ethdev.h" +#include "rte_pmd_ixgbe.h" + +int +rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf, + struct rte_ether_addr *mac_addr) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + int rar_entry; + uint8_t *new_mac = (uint8_t *)(mac_addr); + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + rar_entry = hw->mac.num_rar_entries - (vf + 1); + + if (rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + RTE_ETHER_ADDR_LEN); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, + IXGBE_RAH_AV); + } + return -EINVAL; +} + +int +rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + + ctrl = IXGBE_PF_CONTROL_MSG; + if (vfinfo[vf].clear_to_send) + ctrl |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, &ctrl, 1, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + + mac->ops.set_vlan_anti_spoofing(hw, on, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + mac->ops.set_mac_anti_spoofing(hw, on, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id) +{ + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (vlan_id > RTE_ETHER_MAX_VLAN_ID) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + if (vlan_id) { + ctrl = vlan_id; + ctrl |= IXGBE_VMVIR_VLANA_DEFAULT; + } else { + ctrl = 0; + } + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + /* enable or disable VMDQ loopback */ + if (on) + ctrl |= IXGBE_PFDTXGSWC_VT_LBEN; + else + ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN; + + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t reg_value; + int i; + int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT); + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + for (i = 0; i <= num_queues; i++) { + reg_value = IXGBE_QDE_WRITE | + (i << IXGBE_QDE_IDX_SHIFT) | + (on & IXGBE_QDE_ENABLE); + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value); + } + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t reg_value; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + /* only support VF's 0 to 63 */ + if ((vf >= pci_dev->max_vfs) || (vf > 63)) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf)); + if (on) + reg_value |= IXGBE_SRRCTL_DROP_EN; + else + reg_value &= ~IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw; + uint16_t queues_per_pool; + uint32_t q; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); + + /* The PF has 128 queue pairs and in SRIOV configuration + * those queues will be assigned to VF's, so RXDCTL + * registers will be dealing with queues which will be + * assigned to VF's. + * Let's say we have SRIOV configured with 31 VF's then the + * first 124 queues 0-123 will be allocated to VF's and only + * the last 4 queues 123-127 will be assigned to the PF. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + queues_per_pool = (uint16_t)hw->mac.max_rx_queues / + ETH_16_POOLS; + else + queues_per_pool = (uint16_t)hw->mac.max_rx_queues / + ETH_64_POOLS; + + for (q = 0; q < queues_per_pool; q++) + (*dev->dev_ops->vlan_strip_queue_set)(dev, + q + vf * queues_per_pool, on); + return 0; +} + +int +rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, + uint16_t rx_mask, uint8_t on) +{ + int val = 0; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw; + uint32_t vmolr; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + if (hw->mac.type == ixgbe_mac_82598EB) { + PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" + " on 82599 hardware and newer"); + return -ENOTSUP; + } + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + + if (on) + vmolr |= val; + else + vmolr &= ~val; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ + if (vf >= 32) { + addr = IXGBE_VFRE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFRE(0); + val = bit1 << vf; + } + + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr, reg); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ + if (vf >= 32) { + addr = IXGBE_VFTE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFTE(0); + val = bit1 << vf; + } + + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr, reg); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) +{ + struct rte_eth_dev *dev; + int ret = 0; + uint16_t vf_idx; + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + for (vf_idx = 0; vf_idx < 64; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx, + vlan_on, false); + if (ret < 0) + return ret; + } + } + + return ret; +} + +int +rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk); +} + +int +rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp) +{ + struct rte_eth_dev *dev; + struct ixgbe_macsec_setting macsec_setting; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + macsec_setting.offload_en = 1; + macsec_setting.encrypt_en = en; + macsec_setting.replayprotect_en = rp; + + ixgbe_dev_macsec_setting_save(dev, &macsec_setting); + + ixgbe_dev_macsec_register_enable(dev, &macsec_setting); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_disable(uint16_t port) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + ixgbe_dev_macsec_setting_reset(dev); + + ixgbe_dev_macsec_register_disable(dev); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl); + + ctrl = mac[4] | (mac[5] << 8); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl); + + pi = rte_cpu_to_be_16(pi); + ctrl = mac[4] | (mac[5] << 8) | (pi << 16); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) + return -EINVAL; + + if (an >= 4) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set the PN and key */ + pn = rte_cpu_to_be_32(pn); + if (idx == 0) { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl); + } + } else { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl); + } + } + + /* Set AN and select the SA */ + ctrl = (an << idx * 2) | (idx << 4); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) + return -EINVAL; + + if (an >= 4) + return -EINVAL; + + /* Set the PN */ + pn = rte_cpu_to_be_32(pn); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn); + + /* Set the key */ + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl); + } + + /* Set the AN and validate the SA */ + ctrl = an | (1 << 2); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, + uint8_t tc_num, + uint8_t *bw_weight) +{ + struct rte_eth_dev *dev; + struct ixgbe_dcb_config *dcb_config; + struct ixgbe_dcb_tc_config *tc; + struct rte_eth_conf *eth_conf; + struct ixgbe_bw_conf *bw_conf; + uint8_t i; + uint8_t nb_tcs; + uint16_t sum; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TCs should be no more than %d.", + IXGBE_DCB_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private); + eth_conf = &dev->data->dev_conf; + + if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; + } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == + ETH_32_POOLS) + nb_tcs = ETH_4_TCS; + else + nb_tcs = ETH_8_TCS; + } else { + nb_tcs = 1; + } + + if (nb_tcs != tc_num) { + PMD_DRV_LOG(ERR, + "Weight should be set for all %d enabled TCs.", + nb_tcs); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < nb_tcs; i++) + sum += bw_weight[i]; + if (sum != 100) { + PMD_DRV_LOG(ERR, + "The summary of the TC weight should be 100."); + return -EINVAL; + } + + for (i = 0; i < nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i]; + } + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0; + } + + bw_conf->tc_num = nb_tcs; + + return 0; +} + +int +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t fctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* If 'enable' set the SBP bit else clear it */ + if (enable) + fctrl |= IXGBE_FCTRL_SBP; + else + fctrl &= ~(IXGBE_FCTRL_SBP); + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + return 0; +} + +#ifdef RTE_LIBRTE_IXGBE_BYPASS +int +rte_pmd_ixgbe_bypass_init(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + ixgbe_bypass_init(dev); + return 0; +} + +int +rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_state_show(dev, state); +} + +int +rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_state_store(dev, new_state); +} + +int +rte_pmd_ixgbe_bypass_event_show(uint16_t port_id, + uint32_t event, + uint32_t *state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_event_show(dev, event, state); +} + +int +rte_pmd_ixgbe_bypass_event_store(uint16_t port_id, + uint32_t event, + uint32_t state) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_event_store(dev, event, state); +} + +int +rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_timeout_store(dev, timeout); +} + +int +rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_ver_show(dev, ver); +} + +int +rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_timeout_show(dev, wd_timeout); +} + +int +rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + return ixgbe_bypass_wd_reset(dev); +} +#endif + +/** + * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared phy token as needed + */ +STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask) +{ + int retries = FW_PHY_TOKEN_RETRIES; + s32 status = IXGBE_SUCCESS; + + while (--retries) { + status = ixgbe_acquire_swfw_semaphore(hw, mask); + if (status) { + PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n", + status); + return status; + } + status = ixgbe_get_phy_token(hw); + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + + if (status == IXGBE_ERR_TOKEN_RETRY) + PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n", + status); + + ixgbe_release_swfw_semaphore(hw, mask); + if (status != IXGBE_ERR_TOKEN_RETRY) { + PMD_DRV_LOG(ERR, + "Retry get PHY token failed, Status=%d\n", + status); + return status; + } + } + PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); + return status; +} + +/** + * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and puts the shared phy token as needed + */ +STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask) +{ + ixgbe_put_phy_token(hw); + ixgbe_release_swfw_semaphore(hw, mask); +} + +int +rte_pmd_ixgbe_mdio_lock(uint16_t port) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + return IXGBE_SUCCESS; +} + +int +rte_pmd_ixgbe_mdio_unlock(uint16_t port) +{ + struct rte_eth_dev *dev; + struct ixgbe_hw *hw; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + rte_pmd_ixgbe_release_swfw(hw, swfw_mask); + + return IXGBE_SUCCESS; +} + +int +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 i, data, command; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) + return IXGBE_ERR_PHY; + + /* Read operation is complete. Get the data from MSRWD */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)data; + + return 0; +} + +int +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data) +{ + struct ixgbe_hw *hw; + u32 i, command; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h new file mode 100644 index 000000000..f62fd761d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -0,0 +1,729 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation + */ + +/** + * @file rte_pmd_ixgbe.h + * ixgbe PMD specific functions. + * + **/ + +#ifndef _PMD_IXGBE_H_ +#define _PMD_IXGBE_H_ + +#include + +/** + * Notify VF when PF link status changes. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* invalid. + */ +int rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf); + +/** + * Set the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf, + struct rte_ether_addr *mac_addr); + +/** + * Enable/Disable VF VLAN anti spoofing. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF on which to set VLAN anti spoofing. + * @param on + * 1 - Enable VFs VLAN anti spoofing. + * 0 - Disable VFs VLAN anti spoofing. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, + uint8_t on); + +/** + * Enable/Disable VF MAC anti spoofing. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF on which to set MAC anti spoofing. + * @param on + * 1 - Enable VFs MAC anti spoofing. + * 0 - Disable VFs MAC anti spoofing. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Enable/Disable vf vlan insert + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param vlan_id + * 0 - Disable VF's vlan insert. + * n - Enable; n is inserted as the vlan id. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, + uint16_t vlan_id); + +/** + * Enable/Disable tx loopback + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable tx loopback. + * 0 - Disable tx loopback. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on); + +/** + * set all queues drop enable bit + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - set the queue drop enable bit for all pools. + * 0 - reset the queue drop enable bit for all pools. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on); + +/** + * set drop enable bit in the VF split rx control register + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param on + * 1 - set the drop enable bit in the split rx control register. + * 0 - reset the drop enable bit in the split rx control register. + * + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ + +int rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Enable/Disable vf vlan strip for all queues in a pool + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * ID specifying VF. + * @param on + * 1 - Enable VF's vlan strip on RX queues. + * 0 - Disable VF's vlan strip on RX queues. + * + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int +rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); + +/** + * Enable MACsec offload. + * + * @param port + * The port identifier of the Ethernet device. + * @param en + * 1 - Enable encryption (encrypt and add integrity signature). + * 0 - Disable encryption (only add integrity signature). + * @param rp + * 1 - Enable replay protection. + * 0 - Disable replay protection. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp); + +/** + * Disable MACsec offload. + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int rte_pmd_ixgbe_macsec_disable(uint16_t port); + +/** + * Configure Tx SC (Secure Connection). + * + * @param port + * The port identifier of the Ethernet device. + * @param mac + * The MAC address on the local side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac); + +/** + * Configure Rx SC (Secure Connection). + * + * @param port + * The port identifier of the Ethernet device. + * @param mac + * The MAC address on the remote side. + * @param pi + * The PI (port identifier) on the remote side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi); + +/** + * Enable Tx SA (Secure Association). + * + * @param port + * The port identifier of the Ethernet device. + * @param idx + * The SA to be enabled (0 or 1). + * @param an + * The association number on the local side. + * @param pn + * The packet number on the local side. + * @param key + * The key on the local side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key); + +/** + * Enable Rx SA (Secure Association). + * + * @param port + * The port identifier of the Ethernet device. + * @param idx + * The SA to be enabled (0 or 1) + * @param an + * The association number on the remote side. + * @param pn + * The packet number on the remote side. + * @param key + * The key on the remote side. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key); + +/** +* Set RX L2 Filtering mode of a VF of an Ethernet device. +* +* @param port +* The port identifier of the Ethernet device. +* @param vf +* VF id. +* @param rx_mask +* The RX mode mask, which is one or more of accepting Untagged Packets, +* packets that match the PFUTA table, Broadcast and Multicast Promiscuous. +* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC, +* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used +* in rx_mode. +* @param on +* 1 - Enable a VF RX mode. +* 0 - Disable a VF RX mode. +* @return +* - (0) if successful. +* - (-ENOTSUP) if hardware doesn't support. +* - (-ENODEV) if *port_id* invalid. +* - (-EINVAL) if bad parameter. +*/ +int +rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask, + uint8_t on); + +/** +* Enable or disable a VF traffic receive of an Ethernet device. +* +* @param port +* The port identifier of the Ethernet device. +* @param vf +* VF id. +* @param on +* 1 - Enable a VF traffic receive. +* 0 - Disable a VF traffic receive. +* @return +* - (0) if successful. +* - (-ENOTSUP) if hardware doesn't support. +* - (-ENODEV) if *port_id* invalid. +* - (-EINVAL) if bad parameter. +*/ +int +rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on); + +/** +* Enable or disable a VF traffic transmit of the Ethernet device. +* +* @param port +* The port identifier of the Ethernet device. +* @param vf +* VF id. +* @param on +* 1 - Enable a VF traffic transmit. +* 0 - Disable a VF traffic transmit. +* @return +* - (0) if successful. +* - (-ENODEV) if *port_id* invalid. +* - (-ENOTSUP) if hardware doesn't support. +* - (-EINVAL) if bad parameter. +*/ +int +rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on); + +/** +* Enable/Disable hardware VF VLAN filtering by an Ethernet device of +* received VLAN packets tagged with a given VLAN Tag Identifier. +* +* @param port +* The port identifier of the Ethernet device. +* @param vlan +* The VLAN Tag Identifier whose filtering must be enabled or disabled. +* @param vf_mask +* Bitmap listing which VFs participate in the VLAN filtering. +* @param vlan_on +* 1 - Enable VFs VLAN filtering. +* 0 - Disable VFs VLAN filtering. +* @return +* - (0) if successful. +* - (-ENOTSUP) if hardware doesn't support. +* - (-ENODEV) if *port_id* invalid. +* - (-EINVAL) if bad parameter. +*/ +int +rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on); + +/** + * Set the rate limitation for a vf on an Ethernet device. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf + * VF id. + * @param tx_rate + * The tx rate allocated from the total link speed for this VF id. + * @param q_msk + * The queue mask which need to set the rate. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); + +/** + * Set all the TCs' bandwidth weight. + * + * The bw_weight means the percentage occupied by the TC. + * It can be taken as the relative min bandwidth setting. + * + * @param port + * The port identifier of the Ethernet device. + * @param tc_num + * Number of TCs. + * @param bw_weight + * An array of relative bandwidth weight for all the TCs. + * The summary of the bw_weight should be 100. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) not supported by firmware. + */ +int rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, + uint8_t tc_num, + uint8_t *bw_weight); + + +/** + * Initialize bypass logic. This function needs to be called before + * executing any other bypass API. + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_init(uint16_t port); + +/** + * Return bypass state. + * + * @param port + * The port identifier of the Ethernet device. + * @param state + * The return bypass state. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_state_show(uint16_t port, uint32_t *state); + +/** + * Set bypass state + * + * @param port + * The port identifier of the Ethernet device. + * @param new_state + * The current bypass state. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_state_set(uint16_t port, uint32_t *new_state); + +/** + * Return bypass state when given event occurs. + * + * @param port + * The port identifier of the Ethernet device. + * @param event + * The bypass event + * - (1) Main power on (power button is pushed) + * - (2) Auxiliary power on (power supply is being plugged) + * - (3) Main power off (system shutdown and power supply is left plugged in) + * - (4) Auxiliary power off (power supply is being unplugged) + * - (5) Display or set the watchdog timer + * @param state + * The bypass state when given event occurred. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_event_show(uint16_t port, + uint32_t event, + uint32_t *state); + +/** + * Set bypass state when given event occurs. + * + * @param port + * The port identifier of the Ethernet device. + * @param event + * The bypass event + * - (1) Main power on (power button is pushed) + * - (2) Auxiliary power on (power supply is being plugged) + * - (3) Main power off (system shutdown and power supply is left plugged in) + * - (4) Auxiliary power off (power supply is being unplugged) + * - (5) Display or set the watchdog timer + * @param state + * The assigned state when given event occurs. + * - (1) Normal mode + * - (2) Bypass mode + * - (3) Isolate mode + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_event_store(uint16_t port, + uint32_t event, + uint32_t state); + +/** + * Set bypass watchdog timeout count. + * + * @param port + * The port identifier of the Ethernet device. + * @param timeout + * The timeout to be set. + * - (0) 0 seconds (timer is off) + * - (1) 1.5 seconds + * - (2) 2 seconds + * - (3) 3 seconds + * - (4) 4 seconds + * - (5) 8 seconds + * - (6) 16 seconds + * - (7) 32 seconds + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout); + +/** + * Get bypass firmware version. + * + * @param port + * The port identifier of the Ethernet device. + * @param ver + * The firmware version + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_ver_show(uint16_t port, uint32_t *ver); + +/** + * Return bypass watchdog timeout in seconds + * + * @param port + * The port identifier of the Ethernet device. + * @param wd_timeout + * The return watchdog timeout. "0" represents timer expired + * - (0) 0 seconds (timer is off) + * - (1) 1.5 seconds + * - (2) 2 seconds + * - (3) 3 seconds + * - (4) 4 seconds + * - (5) 8 seconds + * - (6) 16 seconds + * - (7) 32 seconds + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout); + +/** + * Reset bypass watchdog timer + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-EINVAL) if bad parameter. + */ +int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port); + +/** + * Acquire swfw semaphore lock for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed + */ +__rte_experimental +int +rte_pmd_ixgbe_mdio_lock(uint16_t port); + +/** + * Release swfw semaphore lock used for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + */ +__rte_experimental +int +rte_pmd_ixgbe_mdio_unlock(uint16_t port); + +/** + * Read PHY register using MDIO without MDIO lock + * The lock must be taken separately before calling this + * API + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Pointer for reading PHY register data + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +__rte_experimental +int +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data); + +/** + * Write data to PHY register using without MDIO lock + * The lock must be taken separately before calling this + * API + * + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Data to write to PHY register + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +__rte_experimental +int +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data); + +/** + * Response sent back to ixgbe driver from user app after callback + */ +enum rte_pmd_ixgbe_mb_event_rsp { + RTE_PMD_IXGBE_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */ + RTE_PMD_IXGBE_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */ + RTE_PMD_IXGBE_MB_EVENT_PROCEED, /**< proceed with mbox request */ + RTE_PMD_IXGBE_MB_EVENT_MAX /**< max value of this enum */ +}; + +/** + * Data sent to the user application when the callback is executed. + */ +struct rte_pmd_ixgbe_mb_event_param { + uint16_t vfid; /**< Virtual Function number */ + uint16_t msg_type; /**< VF to PF message type, defined in ixgbe_mbx.h */ + uint16_t retval; /**< return value */ + void *msg; /**< pointer to message */ +}; +enum { + RTE_PMD_IXGBE_BYPASS_MODE_NONE, + RTE_PMD_IXGBE_BYPASS_MODE_NORMAL, + RTE_PMD_IXGBE_BYPASS_MODE_BYPASS, + RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE, + RTE_PMD_IXGBE_BYPASS_MODE_NUM, +}; + +#define RTE_PMD_IXGBE_BYPASS_MODE_VALID(x) \ + ((x) > RTE_PMD_IXGBE_BYPASS_MODE_NONE && \ + (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM) + +enum { + RTE_PMD_IXGBE_BYPASS_EVENT_NONE, + RTE_PMD_IXGBE_BYPASS_EVENT_START, + RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_IXGBE_BYPASS_EVENT_START, + RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON, + RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF, + RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF, + RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT, + RTE_PMD_IXGBE_BYPASS_EVENT_NUM +}; + +#define RTE_PMD_IXGBE_BYPASS_EVENT_VALID(x) \ + ((x) > RTE_PMD_IXGBE_BYPASS_EVENT_NONE && \ + (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM) + +enum { + RTE_PMD_IXGBE_BYPASS_TMT_OFF, /* timeout disabled. */ + RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */ + RTE_PMD_IXGBE_BYPASS_TMT_NUM +}; + +#define RTE_PMD_IXGBE_BYPASS_TMT_VALID(x) \ + ((x) == RTE_PMD_IXGBE_BYPASS_TMT_OFF || \ + ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \ + (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM)) + +/** + * @param port + * The port identifier of the Ethernet device. + * @param enable + * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register + * to receive all packets + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +__rte_experimental +int +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable); +#endif /* _PMD_IXGBE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map new file mode 100644 index 000000000..21534dbc3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -0,0 +1,46 @@ +DPDK_20.0 { + global: + + rte_pmd_ixgbe_bypass_event_show; + rte_pmd_ixgbe_bypass_event_store; + rte_pmd_ixgbe_bypass_init; + rte_pmd_ixgbe_bypass_state_set; + rte_pmd_ixgbe_bypass_state_show; + rte_pmd_ixgbe_bypass_ver_show; + rte_pmd_ixgbe_bypass_wd_reset; + rte_pmd_ixgbe_bypass_wd_timeout_show; + rte_pmd_ixgbe_bypass_wd_timeout_store; + rte_pmd_ixgbe_macsec_config_rxsc; + rte_pmd_ixgbe_macsec_config_txsc; + rte_pmd_ixgbe_macsec_disable; + rte_pmd_ixgbe_macsec_enable; + rte_pmd_ixgbe_macsec_select_rxsa; + rte_pmd_ixgbe_macsec_select_txsa; + rte_pmd_ixgbe_ping_vf; + rte_pmd_ixgbe_set_all_queues_drop_en; + rte_pmd_ixgbe_set_tc_bw_alloc; + rte_pmd_ixgbe_set_tx_loopback; + rte_pmd_ixgbe_set_vf_mac_addr; + rte_pmd_ixgbe_set_vf_mac_anti_spoof; + rte_pmd_ixgbe_set_vf_rate_limit; + rte_pmd_ixgbe_set_vf_rx; + rte_pmd_ixgbe_set_vf_rxmode; + rte_pmd_ixgbe_set_vf_split_drop_en; + rte_pmd_ixgbe_set_vf_tx; + rte_pmd_ixgbe_set_vf_vlan_anti_spoof; + rte_pmd_ixgbe_set_vf_vlan_filter; + rte_pmd_ixgbe_set_vf_vlan_insert; + rte_pmd_ixgbe_set_vf_vlan_stripq; + + local: *; +}; + +EXPERIMENTAL { + global: + + rte_pmd_ixgbe_mdio_lock; + rte_pmd_ixgbe_mdio_unlock; + rte_pmd_ixgbe_mdio_unlocked_read; + rte_pmd_ixgbe_mdio_unlocked_write; + rte_pmd_ixgbe_upd_fctrl_sbp; +}; diff --git a/src/spdk/dpdk/drivers/net/kni/Makefile b/src/spdk/dpdk/drivers/net/kni/Makefile new file mode 100644 index 000000000..0694ffd02 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/kni/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_kni.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lpthread +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_kni_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += rte_eth_kni.c + +# +# Export include files +# +SYMLINK-y-include += + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/kni/meson.build b/src/spdk/dpdk/drivers/net/kni/meson.build new file mode 100644 index 000000000..0539b4768 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/kni/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +# this driver can be built if-and-only-if KNI library is buildable +build = dpdk_conf.has('RTE_LIBRTE_KNI') +reason = 'missing dependency, DPDK KNI library' +sources = files('rte_eth_kni.c') +deps += 'kni' diff --git a/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c b/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c new file mode 100644 index 000000000..d88cb1778 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* Only single queue supported */ +#define KNI_MAX_QUEUE_PER_PORT 1 + +#define MAX_KNI_PORTS 8 + +#define KNI_ETHER_MTU(mbuf_size) \ + ((mbuf_size) - RTE_ETHER_HDR_LEN) /**< Ethernet MTU. */ + +#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread" +static const char * const valid_arguments[] = { + ETH_KNI_NO_REQUEST_THREAD_ARG, + NULL +}; + +struct eth_kni_args { + int no_request_thread; +}; + +struct pmd_queue_stats { + uint64_t pkts; + uint64_t bytes; +}; + +struct pmd_queue { + struct pmd_internals *internals; + struct rte_mempool *mb_pool; + + struct pmd_queue_stats rx; + struct pmd_queue_stats tx; +}; + +struct pmd_internals { + struct rte_kni *kni; + int is_kni_started; + + pthread_t thread; + int stop_thread; + int no_request_thread; + + struct rte_ether_addr eth_addr; + + struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT]; + struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT]; +}; + +static const struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_FIXED, +}; +static int is_kni_initialized; + +static int eth_kni_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_kni_logtype, \ + "%s(): " fmt "\n", __func__, ##args) +static uint16_t +eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + struct pmd_queue *kni_q = q; + struct rte_kni *kni = kni_q->internals->kni; + uint16_t nb_pkts; + + nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs); + + kni_q->rx.pkts += nb_pkts; + + return nb_pkts; +} + +static uint16_t +eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + struct pmd_queue *kni_q = q; + struct rte_kni *kni = kni_q->internals->kni; + uint16_t nb_pkts; + + nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs); + + kni_q->tx.pkts += nb_pkts; + + return nb_pkts; +} + +static void * +kni_handle_request(void *param) +{ + struct pmd_internals *internals = param; +#define MS 1000 + + while (!internals->stop_thread) { + rte_kni_handle_request(internals->kni); + usleep(500 * MS); + } + + return param; +} + +static int +eth_kni_start(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + uint16_t port_id = dev->data->port_id; + struct rte_mempool *mb_pool; + struct rte_kni_conf conf; + const char *name = dev->device->name + 4; /* remove net_ */ + + mb_pool = internals->rx_queues[0].mb_pool; + strlcpy(conf.name, name, RTE_KNI_NAMESIZE); + conf.force_bind = 0; + conf.group_id = port_id; + conf.mbuf_size = + rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + conf.mtu = KNI_ETHER_MTU(conf.mbuf_size); + + internals->kni = rte_kni_alloc(mb_pool, &conf, NULL); + if (internals->kni == NULL) { + PMD_LOG(ERR, + "Fail to create kni interface for port: %d", + port_id); + return -1; + } + + return 0; +} + +static int +eth_kni_dev_start(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + int ret; + + if (internals->is_kni_started == 0) { + ret = eth_kni_start(dev); + if (ret) + return -1; + internals->is_kni_started = 1; + } + + if (internals->no_request_thread == 0) { + internals->stop_thread = 0; + + ret = rte_ctrl_thread_create(&internals->thread, + "kni_handle_req", NULL, + kni_handle_request, internals); + if (ret) { + PMD_LOG(ERR, + "Fail to create kni request thread"); + return -1; + } + } + + dev->data->dev_link.link_status = 1; + + return 0; +} + +static void +eth_kni_dev_stop(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + int ret; + + if (internals->no_request_thread == 0 && internals->stop_thread == 0) { + internals->stop_thread = 1; + + ret = pthread_cancel(internals->thread); + if (ret) + PMD_LOG(ERR, "Can't cancel the thread"); + + ret = pthread_join(internals->thread, NULL); + if (ret) + PMD_LOG(ERR, "Can't join the thread"); + } + + dev->data->dev_link.link_status = 0; +} + +static void +eth_kni_close(struct rte_eth_dev *eth_dev) +{ + struct pmd_internals *internals; + int ret; + + eth_kni_dev_stop(eth_dev); + + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + + internals = eth_dev->data->dev_private; + ret = rte_kni_release(internals->kni); + if (ret) + PMD_LOG(WARNING, "Not able to release kni for %s", + eth_dev->data->name); +} + +static int +eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = UINT32_MAX; + dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT; + dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT; + dev_info->min_rx_bufsize = 0; + + return 0; +} + +static int +eth_kni_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pmd_queue *q; + + q = &internals->rx_queues[rx_queue_id]; + q->internals = internals; + q->mb_pool = mb_pool; + + dev->data->rx_queues[rx_queue_id] = q; + + return 0; +} + +static int +eth_kni_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pmd_queue *q; + + q = &internals->tx_queues[tx_queue_id]; + q->internals = internals; + + dev->data->tx_queues[tx_queue_id] = q; + + return 0; +} + +static void +eth_kni_queue_release(void *q __rte_unused) +{ +} + +static int +eth_kni_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned long rx_packets_total = 0, rx_bytes_total = 0; + unsigned long tx_packets_total = 0, tx_bytes_total = 0; + struct rte_eth_dev_data *data = dev->data; + unsigned int i, num_stats; + struct pmd_queue *q; + + num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, + data->nb_rx_queues); + for (i = 0; i < num_stats; i++) { + q = data->rx_queues[i]; + stats->q_ipackets[i] = q->rx.pkts; + stats->q_ibytes[i] = q->rx.bytes; + rx_packets_total += stats->q_ipackets[i]; + rx_bytes_total += stats->q_ibytes[i]; + } + + num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, + data->nb_tx_queues); + for (i = 0; i < num_stats; i++) { + q = data->tx_queues[i]; + stats->q_opackets[i] = q->tx.pkts; + stats->q_obytes[i] = q->tx.bytes; + tx_packets_total += stats->q_opackets[i]; + tx_bytes_total += stats->q_obytes[i]; + } + + stats->ipackets = rx_packets_total; + stats->ibytes = rx_bytes_total; + stats->opackets = tx_packets_total; + stats->obytes = tx_bytes_total; + + return 0; +} + +static int +eth_kni_stats_reset(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct pmd_queue *q; + unsigned int i; + + for (i = 0; i < data->nb_rx_queues; i++) { + q = data->rx_queues[i]; + q->rx.pkts = 0; + q->rx.bytes = 0; + } + for (i = 0; i < data->nb_tx_queues; i++) { + q = data->tx_queues[i]; + q->tx.pkts = 0; + q->tx.bytes = 0; + } + + return 0; +} + +static const struct eth_dev_ops eth_kni_ops = { + .dev_start = eth_kni_dev_start, + .dev_stop = eth_kni_dev_stop, + .dev_close = eth_kni_close, + .dev_configure = eth_kni_dev_configure, + .dev_infos_get = eth_kni_dev_info, + .rx_queue_setup = eth_kni_rx_queue_setup, + .tx_queue_setup = eth_kni_tx_queue_setup, + .rx_queue_release = eth_kni_queue_release, + .tx_queue_release = eth_kni_queue_release, + .link_update = eth_kni_link_update, + .stats_get = eth_kni_stats_get, + .stats_reset = eth_kni_stats_reset, +}; + +static struct rte_eth_dev * +eth_kni_create(struct rte_vdev_device *vdev, + struct eth_kni_args *args, + unsigned int numa_node) +{ + struct pmd_internals *internals; + struct rte_eth_dev_data *data; + struct rte_eth_dev *eth_dev; + + PMD_LOG(INFO, "Creating kni ethdev on numa socket %u", + numa_node); + + /* reserve an ethdev entry */ + eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals)); + if (!eth_dev) + return NULL; + + internals = eth_dev->data->dev_private; + data = eth_dev->data; + data->nb_rx_queues = 1; + data->nb_tx_queues = 1; + data->dev_link = pmd_link; + data->mac_addrs = &internals->eth_addr; + data->promiscuous = 1; + data->all_multicast = 1; + + data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + rte_eth_random_addr(internals->eth_addr.addr_bytes); + + eth_dev->dev_ops = ð_kni_ops; + + internals->no_request_thread = args->no_request_thread; + + return eth_dev; +} + +static int +kni_init(void) +{ + if (is_kni_initialized == 0) + rte_kni_init(MAX_KNI_PORTS); + + is_kni_initialized++; + + return 0; +} + +static int +eth_kni_kvargs_process(struct eth_kni_args *args, const char *params) +{ + struct rte_kvargs *kvlist; + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return -1; + + memset(args, 0, sizeof(struct eth_kni_args)); + + if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1) + args->no_request_thread = 1; + + rte_kvargs_free(kvlist); + + return 0; +} + +static int +eth_kni_probe(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *eth_dev; + struct eth_kni_args args; + const char *name; + const char *params; + int ret; + + name = rte_vdev_device_name(vdev); + params = rte_vdev_device_args(vdev); + PMD_LOG(INFO, "Initializing eth_kni for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = ð_kni_ops; + eth_dev->device = &vdev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + ret = eth_kni_kvargs_process(&args, params); + if (ret < 0) + return ret; + + ret = kni_init(); + if (ret < 0) + return ret; + + eth_dev = eth_kni_create(vdev, &args, rte_socket_id()); + if (eth_dev == NULL) + goto kni_uninit; + + eth_dev->rx_pkt_burst = eth_kni_rx; + eth_dev->tx_pkt_burst = eth_kni_tx; + + rte_eth_dev_probing_finish(eth_dev); + return 0; + +kni_uninit: + is_kni_initialized--; + if (is_kni_initialized == 0) + rte_kni_close(); + return -1; +} + +static int +eth_kni_remove(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *eth_dev; + const char *name; + + name = rte_vdev_device_name(vdev); + PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name); + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -1; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_kni_dev_stop(eth_dev); + return rte_eth_dev_release_port(eth_dev); + } + + eth_kni_close(eth_dev); + rte_eth_dev_release_port(eth_dev); + + is_kni_initialized--; + if (is_kni_initialized == 0) + rte_kni_close(); + + return 0; +} + +static struct rte_vdev_driver eth_kni_drv = { + .probe = eth_kni_probe, + .remove = eth_kni_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "="); + +RTE_INIT(eth_kni_init_log) +{ + eth_kni_logtype = rte_log_register("pmd.net.kni"); + if (eth_kni_logtype >= 0) + rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map b/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/kni/rte_pmd_kni_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/liquidio/Makefile b/src/spdk/dpdk/drivers/net/liquidio/Makefile new file mode 100644 index 000000000..d7fda7f52 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_lio.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/base -I$(SRCDIR) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_liquidio_version.map + +VPATH += $(RTE_SDK)/drivers/net/liquidio/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_23xx_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_mbox.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h new file mode 100644 index 000000000..9f28504b5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_reg.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_23XX_REG_H_ +#define _LIO_23XX_REG_H_ + +/* ###################### REQUEST QUEUE ######################### */ + +/* 64 registers for Input Queues Start Addr - SLI_PKT(0..63)_INSTR_BADDR */ +#define CN23XX_SLI_PKT_INSTR_BADDR_START64 0x10010 + +/* 64 registers for Input Doorbell - SLI_PKT(0..63)_INSTR_BAOFF_DBELL */ +#define CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START 0x10020 + +/* 64 registers for Input Queue size - SLI_PKT(0..63)_INSTR_FIFO_RSIZE */ +#define CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START 0x10030 + +/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE(0..63)_CNTS */ +#define CN23XX_SLI_PKT_IN_DONE_CNTS_START64 0x10040 + +/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL. + */ +#define CN23XX_SLI_PKT_INPUT_CONTROL_START64 0x10000 + +/* ------- Request Queue Macros --------- */ + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_IQ_OFFSET 0x20000 + +#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ + (CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ + (CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_SIZE(iq) \ + (CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_DOORBELL(iq) \ + (CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ + (CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25) +#define CN23XX_PKT_INPUT_CTL_IS_64B (1 << 24) +#define CN23XX_PKT_INPUT_CTL_RST (1 << 23) +#define CN23XX_PKT_INPUT_CTL_QUIET (1 << 28) +#define CN23XX_PKT_INPUT_CTL_RING_ENB (1 << 22) +#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP (1 << 6) +#define CN23XX_PKT_INPUT_CTL_USE_CSR (1 << 4) +#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2) + +/* These bits[47:44] select the Physical function number within the MAC */ +#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS 45 +/* These bits[43:32] select the function number within the PF */ +#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS 32 + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE | \ + CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \ + CN23XX_PKT_INPUT_CTL_USE_CSR) +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE | \ + CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \ + CN23XX_PKT_INPUT_CTL_USE_CSR | \ + CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP) +#endif + +/* ############################ OUTPUT QUEUE ######################### */ + +/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */ +#define CN23XX_SLI_PKT_OUTPUT_CONTROL_START 0x10050 + +/* 64 registers for Output queue buffer and info size + * SLI_PKT(0..63)_OUT_SIZE + */ +#define CN23XX_SLI_PKT_OUT_SIZE 0x10060 + +/* 64 registers for Output Queue Start Addr - SLI_PKT(0..63)_SLIST_BADDR */ +#define CN23XX_SLI_SLIST_BADDR_START64 0x10070 + +/* 64 registers for Output Queue Packet Credits + * SLI_PKT(0..63)_SLIST_BAOFF_DBELL + */ +#define CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START 0x10080 + +/* 64 registers for Output Queue size - SLI_PKT(0..63)_SLIST_FIFO_RSIZE */ +#define CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START 0x10090 + +/* 64 registers for Output Queue Packet Count - SLI_PKT(0..63)_CNTS */ +#define CN23XX_SLI_PKT_CNTS_START 0x100B0 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_OQ_OFFSET 0x20000 + +/* ------- Output Queue Macros --------- */ + +#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \ + (CN23XX_SLI_PKT_OUTPUT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \ + (CN23XX_SLI_SLIST_BADDR_START64 + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_SIZE(oq) \ + (CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN23XX_SLI_PKT_OUT_SIZE + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKTS_SENT(oq) \ + (CN23XX_SLI_PKT_CNTS_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \ + (CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + ((oq) * CN23XX_OQ_OFFSET)) + +/* ------------------ Masks ---------------- */ +#define CN23XX_PKT_OUTPUT_CTL_IPTR (1 << 11) +#define CN23XX_PKT_OUTPUT_CTL_ES (1 << 9) +#define CN23XX_PKT_OUTPUT_CTL_NSR (1 << 8) +#define CN23XX_PKT_OUTPUT_CTL_ROR (1 << 7) +#define CN23XX_PKT_OUTPUT_CTL_DPTR (1 << 6) +#define CN23XX_PKT_OUTPUT_CTL_BMODE (1 << 5) +#define CN23XX_PKT_OUTPUT_CTL_ES_P (1 << 3) +#define CN23XX_PKT_OUTPUT_CTL_NSR_P (1 << 2) +#define CN23XX_PKT_OUTPUT_CTL_ROR_P (1 << 1) +#define CN23XX_PKT_OUTPUT_CTL_RING_ENB (1 << 0) + +/* Rings per Virtual Function [RO] */ +#define CN23XX_PKT_INPUT_CTL_RPVF_MASK 0x3F +#define CN23XX_PKT_INPUT_CTL_RPVF_POS 48 + +/* These bits[47:44][RO] give the Physical function + * number info within the MAC + */ +#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK 0x7 + +/* These bits[43:32][RO] give the virtual function + * number info within the PF + */ +#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK 0x1FFF + +/* ######################### Mailbox Reg Macros ######################## */ +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200 +#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210 + +#define CN23XX_SLI_MBOX_OFFSET 0x20000 +#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8 + +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ + (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \ + ((q) * CN23XX_SLI_MBOX_OFFSET + \ + (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET)) + +#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \ + (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) + +#endif /* _LIO_23XX_REG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c new file mode 100644 index 000000000..ddbc8c0e0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.c @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include + +#include +#include +#include + +#include "lio_logs.h" +#include "lio_23xx_vf.h" +#include "lio_23xx_reg.h" +#include "lio_mbox.h" + +static int +cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues) +{ + uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT; + uint64_t d64, q_no; + int ret_val = 0; + + PMD_INIT_FUNC_TRACE(); + + for (q_no = 0; q_no < num_queues; q_no++) { + /* set RST bit to 1. This bit applies to both IQ and OQ */ + d64 = lio_read_csr64(lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + d64 = d64 | CN23XX_PKT_INPUT_CTL_RST; + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + d64); + } + + /* wait until the RST bit is clear or the RST and QUIET bits are set */ + for (q_no = 0; q_no < num_queues; q_no++) { + volatile uint64_t reg_val; + + reg_val = lio_read_csr64(lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && + !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && + loop) { + reg_val = lio_read_csr64( + lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + loop = loop - 1; + } + + if (loop == 0) { + lio_dev_err(lio_dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n", + (unsigned long)q_no); + return -1; + } + + reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + + reg_val = lio_read_csr64( + lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { + lio_dev_err(lio_dev, + "clearing the reset failed for qno: %lu\n", + (unsigned long)q_no); + ret_val = -1; + } + } + + return ret_val; +} + +static int +cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev) +{ + uint64_t q_no; + uint64_t d64; + + PMD_INIT_FUNC_TRACE(); + + if (cn23xx_vf_reset_io_queues(lio_dev, + lio_dev->sriov_info.rings_per_vf)) + return -1; + + for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) { + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no), + 0xFFFFFFFF); + + d64 = lio_read_csr64(lio_dev, + CN23XX_SLI_IQ_INSTR_COUNT64(q_no)); + + d64 &= 0xEFFFFFFFFFFFFFFFL; + + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no), + d64); + + /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for + * the Input Queues + */ + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + CN23XX_PKT_INPUT_CTL_MASK); + } + + return 0; +} + +static void +cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev) +{ + uint32_t reg_val; + uint32_t q_no; + + PMD_INIT_FUNC_TRACE(); + + for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) { + lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no), + 0xFFFFFFFF); + + reg_val = + lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no)); + + reg_val &= 0xEFFFFFFFFFFFFFFFL; + + lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val); + + reg_val = + lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + + /* set IPTR & DPTR */ + reg_val |= + (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR); + + /* reset BMODE */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); + + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue Scatter List + * reset ROR_P, NSR_P + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); +#endif + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue Data + * reset ROR, NSR + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); + /* set the ES bit */ + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); + + /* write all the selected settings */ + lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no), + reg_val); + } +} + +static int +cn23xx_vf_setup_device_regs(struct lio_device *lio_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (cn23xx_vf_setup_global_input_regs(lio_dev)) + return -1; + + cn23xx_vf_setup_global_output_regs(lio_dev); + + return 0; +} + +static void +cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no) +{ + struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; + uint64_t pkt_in_done = 0; + + PMD_INIT_FUNC_TRACE(); + + /* Write the start of the input queue's ring and its size */ + lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no); + lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter (used in flush_iq + * calculation) + */ + pkt_in_done = rte_read64(iq->inst_cnt_reg); + + /* Clear the count by writing back what we read, but don't + * enable data traffic here + */ + rte_write64(pkt_in_done, iq->inst_cnt_reg); +} + +static void +cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no) +{ + struct lio_droq *droq = lio_dev->droq[oq_no]; + + PMD_INIT_FUNC_TRACE(); + + lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->nb_desc); + + lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), + (droq->buffer_size | (OCTEON_RH_SIZE << 16))); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); +} + +static void +cn23xx_vf_free_mbox(struct lio_device *lio_dev) +{ + PMD_INIT_FUNC_TRACE(); + + rte_free(lio_dev->mbox[0]); + lio_dev->mbox[0] = NULL; + + rte_free(lio_dev->mbox); + lio_dev->mbox = NULL; +} + +static int +cn23xx_vf_setup_mbox(struct lio_device *lio_dev) +{ + struct lio_mbox *mbox; + + PMD_INIT_FUNC_TRACE(); + + if (lio_dev->mbox == NULL) { + lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0); + if (lio_dev->mbox == NULL) + return -ENOMEM; + } + + mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0); + if (mbox == NULL) { + rte_free(lio_dev->mbox); + lio_dev->mbox = NULL; + return -ENOMEM; + } + + rte_spinlock_init(&mbox->lock); + + mbox->lio_dev = lio_dev; + + mbox->q_no = 0; + + mbox->state = LIO_MBOX_STATE_IDLE; + + /* VF mbox interrupt reg */ + mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_VF_SLI_PKT_MBOX_INT(0); + /* VF reads from SIG0 reg */ + mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0); + /* VF writes into SIG1 reg */ + mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1); + + lio_dev->mbox[0] = mbox; + + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + + return 0; +} + +static int +cn23xx_vf_enable_io_queues(struct lio_device *lio_dev) +{ + uint32_t q_no; + + PMD_INIT_FUNC_TRACE(); + + for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) { + uint64_t reg_val; + + /* set the corresponding IQ IS_64B bit */ + if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) { + reg_val = lio_read_csr64( + lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B; + lio_write_csr64(lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + } + + /* set the corresponding IQ ENB bit */ + if (lio_dev->io_qmask.iq & (1ULL << q_no)) { + reg_val = lio_read_csr64( + lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB; + lio_write_csr64(lio_dev, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + } + } + for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) { + uint32_t reg_val; + + /* set the corresponding OQ ENB bit */ + if (lio_dev->io_qmask.oq & (1ULL << q_no)) { + reg_val = lio_read_csr( + lio_dev, + CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB; + lio_write_csr(lio_dev, + CN23XX_SLI_OQ_PKT_CONTROL(q_no), + reg_val); + } + } + + return 0; +} + +static void +cn23xx_vf_disable_io_queues(struct lio_device *lio_dev) +{ + uint32_t num_queues; + + PMD_INIT_FUNC_TRACE(); + + /* per HRM, rings can only be disabled via reset operation, + * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB] + */ + num_queues = lio_dev->num_iqs; + if (num_queues < lio_dev->num_oqs) + num_queues = lio_dev->num_oqs; + + cn23xx_vf_reset_io_queues(lio_dev, num_queues); +} + +void +cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev) +{ + struct lio_mbox_cmd mbox_cmd; + + memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd)); + mbox_cmd.msg.s.type = LIO_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 0; + mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = NULL; + mbox_cmd.fn_arg = 0; + + lio_mbox_write(lio_dev, &mbox_cmd); +} + +static void +cn23xx_pfvf_hs_callback(struct lio_device *lio_dev, + struct lio_mbox_cmd *cmd, void *arg) +{ + uint32_t major = 0; + + PMD_INIT_FUNC_TRACE(); + + rte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6); + if (cmd->recv_len > 1) { + struct lio_version *lio_ver = (struct lio_version *)cmd->data; + + major = lio_ver->major; + major = major << 16; + } + + rte_atomic64_set((rte_atomic64_t *)arg, major | 1); +} + +int +cn23xx_pfvf_handshake(struct lio_device *lio_dev) +{ + struct lio_mbox_cmd mbox_cmd; + struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0]; + uint32_t q_no, count = 0; + rte_atomic64_t status; + uint32_t pfmajor; + uint32_t vfmajor; + uint32_t ret; + + PMD_INIT_FUNC_TRACE(); + + /* Sending VF_ACTIVE indication to the PF driver */ + lio_dev_dbg(lio_dev, "requesting info from PF\n"); + + mbox_cmd.msg.mbox_msg64 = 0; + mbox_cmd.msg.s.type = LIO_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE; + mbox_cmd.msg.s.len = 2; + mbox_cmd.data[0] = 0; + lio_ver->major = LIO_BASE_MAJOR_VERSION; + lio_ver->minor = LIO_BASE_MINOR_VERSION; + lio_ver->micro = LIO_BASE_MICRO_VERSION; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback; + mbox_cmd.fn_arg = (void *)&status; + + if (lio_mbox_write(lio_dev, &mbox_cmd)) { + lio_dev_err(lio_dev, "Write to mailbox failed\n"); + return -1; + } + + rte_atomic64_set(&status, 0); + + do { + rte_delay_ms(1); + } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000)); + + ret = rte_atomic64_read(&status); + if (ret == 0) { + lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n"); + return -1; + } + + for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) + lio_dev->instr_queue[q_no]->txpciq.s.pkind = + lio_dev->pfvf_hsword.pkind; + + vfmajor = LIO_BASE_MAJOR_VERSION; + pfmajor = ret >> 16; + if (pfmajor != vfmajor) { + lio_dev_err(lio_dev, + "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n", + vfmajor, pfmajor); + ret = -EPERM; + } else { + lio_dev_dbg(lio_dev, + "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n", + vfmajor, pfmajor); + ret = 0; + } + + lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n", + lio_dev->pfvf_hsword.pkind); + + return ret; +} + +void +cn23xx_vf_handle_mbox(struct lio_device *lio_dev) +{ + uint64_t mbox_int_val; + + /* read and clear by writing 1 */ + mbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg); + rte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg); + if (lio_mbox_read(lio_dev->mbox[0])) + lio_mbox_process_message(lio_dev->mbox[0]); +} + +int +cn23xx_vf_setup_device(struct lio_device *lio_dev) +{ + uint64_t reg_val; + + PMD_INIT_FUNC_TRACE(); + + /* INPUT_CONTROL[RPVF] gives the VF IOq count */ + reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0)); + + lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_VF_NUM_MASK; + + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + + lio_dev->sriov_info.rings_per_vf = + reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + + lio_dev->default_config = lio_get_conf(lio_dev); + if (lio_dev->default_config == NULL) + return -1; + + lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs; + lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs; + lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox; + lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox; + + lio_dev->fn_list.setup_device_regs = cn23xx_vf_setup_device_regs; + + lio_dev->fn_list.enable_io_queues = cn23xx_vf_enable_io_queues; + lio_dev->fn_list.disable_io_queues = cn23xx_vf_disable_io_queues; + + return 0; +} + diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h new file mode 100644 index 000000000..8e5362db1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_23xx_vf.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_23XX_VF_H_ +#define _LIO_23XX_VF_H_ + +#include + +#include "lio_struct.h" + +static const struct lio_config default_cn23xx_conf = { + .card_type = LIO_23XX, + .card_name = LIO_23XX_NAME, + /** IQ attributes */ + .iq = { + .max_iqs = CN23XX_CFG_IO_QUEUES, + .pending_list_size = + (CN23XX_MAX_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + }, + + /** OQ attributes */ + .oq = { + .max_oqs = CN23XX_CFG_IO_QUEUES, + .info_ptr = OCTEON_OQ_INFOPTR_MODE, + .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD, + }, + + .num_nic_ports = CN23XX_DEFAULT_NUM_PORTS, + .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, +}; + +static inline const struct lio_config * +lio_get_conf(struct lio_device *lio_dev) +{ + const struct lio_config *default_lio_conf = NULL; + + /* check the LIO Device model & return the corresponding lio + * configuration + */ + default_lio_conf = &default_cn23xx_conf; + + if (default_lio_conf == NULL) { + lio_dev_err(lio_dev, "Configuration verification failed\n"); + return NULL; + } + + return default_lio_conf; +} + +#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000 + +void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev); + +int cn23xx_pfvf_handshake(struct lio_device *lio_dev); + +int cn23xx_vf_setup_device(struct lio_device *lio_dev); + +void cn23xx_vf_handle_mbox(struct lio_device *lio_dev); +#endif /* _LIO_23XX_VF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h new file mode 100644 index 000000000..5e119c124 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_hw_defs.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_HW_DEFS_H_ +#define _LIO_HW_DEFS_H_ + +#include + +#ifndef PCI_VENDOR_ID_CAVIUM +#define PCI_VENDOR_ID_CAVIUM 0x177D +#endif + +#define LIO_CN23XX_VF_VID 0x9712 + +/* CN23xx subsystem device ids */ +#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004 +#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005 +#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006 +#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007 +#define PCI_SUBSYS_DEV_ID_CN2350_210SVPN3 0x0008 +#define PCI_SUBSYS_DEV_ID_CN2360_210SVPN3 0x0009 +#define PCI_SUBSYS_DEV_ID_CN2350_210SVPT 0x000a +#define PCI_SUBSYS_DEV_ID_CN2360_210SVPT 0x000b + +/* --------------------------CONFIG VALUES------------------------ */ + +/* CN23xx IQ configuration macros */ +#define CN23XX_MAX_RINGS_PER_PF 64 +#define CN23XX_MAX_RINGS_PER_VF 8 + +#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF +#define CN23XX_MAX_IQ_DESCRIPTORS 512 +#define CN23XX_MIN_IQ_DESCRIPTORS 128 + +#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF +#define CN23XX_MAX_OQ_DESCRIPTORS 512 +#define CN23XX_MIN_OQ_DESCRIPTORS 128 +#define CN23XX_OQ_BUF_SIZE 1536 + +#define CN23XX_OQ_REFIL_THRESHOLD 16 + +#define CN23XX_DEFAULT_NUM_PORTS 1 + +#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF + +/* common OCTEON configuration macros */ +#define OCTEON_64BYTE_INSTR 64 +#define OCTEON_OQ_INFOPTR_MODE 1 + +/* Max IOQs per LIO Link */ +#define LIO_MAX_IOQS_PER_IF 64 + +/* Wait time in milliseconds for FLR */ +#define LIO_PCI_FLR_WAIT 100 + +enum lio_card_type { + LIO_23XX /* 23xx */ +}; + +#define LIO_23XX_NAME "23xx" + +#define LIO_DEV_RUNNING 0xc + +#define LIO_OQ_REFILL_THRESHOLD_CFG(cfg) \ + ((cfg)->default_config->oq.refill_threshold) +#define LIO_NUM_DEF_TX_DESCS_CFG(cfg) \ + ((cfg)->default_config->num_def_tx_descs) + +#define LIO_IQ_INSTR_TYPE(cfg) ((cfg)->default_config->iq.instr_type) + +/* The following config values are fixed and should not be modified. */ + +/* Maximum number of Instruction queues */ +#define LIO_MAX_INSTR_QUEUES(lio_dev) CN23XX_MAX_RINGS_PER_VF + +#define LIO_MAX_POSSIBLE_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES +#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES + +#define LIO_DEVICE_NAME_LEN 32 +#define LIO_BASE_MAJOR_VERSION 1 +#define LIO_BASE_MINOR_VERSION 5 +#define LIO_BASE_MICRO_VERSION 1 + +#define LIO_FW_VERSION_LENGTH 32 + +#define LIO_Q_RECONF_MIN_VERSION "1.7.0" +#define LIO_VF_TRUST_MIN_VERSION "1.7.1" + +/** Tag types used by Octeon cores in its work. */ +enum octeon_tag_type { + OCTEON_ORDERED_TAG = 0, + OCTEON_ATOMIC_TAG = 1, +}; + +/* pre-defined host->NIC tag values */ +#define LIO_CONTROL (0x11111110) +#define LIO_DATA(i) (0x11111111 + (i)) + +/* used for NIC operations */ +#define LIO_OPCODE 1 + +/* Subcodes are used by host driver/apps to identify the sub-operation + * for the core. They only need to by unique for a given subsystem. + */ +#define LIO_OPCODE_SUBCODE(op, sub) \ + ((((op) & 0x0f) << 8) | ((sub) & 0x7f)) + +/** LIO_OPCODE subcodes */ +/* This subcode is sent by core PCI driver to indicate cores are ready. */ +#define LIO_OPCODE_NW_DATA 0x02 /* network packet data */ +#define LIO_OPCODE_CMD 0x03 +#define LIO_OPCODE_INFO 0x04 +#define LIO_OPCODE_PORT_STATS 0x05 +#define LIO_OPCODE_IF_CFG 0x09 + +#define LIO_MIN_RX_BUF_SIZE 64 +#define LIO_MAX_RX_PKTLEN (64 * 1024) + +/* NIC Command types */ +#define LIO_CMD_CHANGE_MTU 0x1 +#define LIO_CMD_CHANGE_DEVFLAGS 0x3 +#define LIO_CMD_RX_CTL 0x4 +#define LIO_CMD_CLEAR_STATS 0x6 +#define LIO_CMD_SET_RSS 0xD +#define LIO_CMD_TNL_RX_CSUM_CTL 0x10 +#define LIO_CMD_TNL_TX_CSUM_CTL 0x11 +#define LIO_CMD_ADD_VLAN_FILTER 0x17 +#define LIO_CMD_DEL_VLAN_FILTER 0x18 +#define LIO_CMD_VXLAN_PORT_CONFIG 0x19 +#define LIO_CMD_QUEUE_COUNT_CTL 0x1f + +#define LIO_CMD_VXLAN_PORT_ADD 0x0 +#define LIO_CMD_VXLAN_PORT_DEL 0x1 +#define LIO_CMD_RXCSUM_ENABLE 0x0 +#define LIO_CMD_TXCSUM_ENABLE 0x0 + +/* RX(packets coming from wire) Checksum verification flags */ +/* TCP/UDP csum */ +#define LIO_L4_CSUM_VERIFIED 0x1 +#define LIO_IP_CSUM_VERIFIED 0x2 + +/* RSS */ +#define LIO_RSS_PARAM_DISABLE_RSS 0x10 +#define LIO_RSS_PARAM_HASH_KEY_UNCHANGED 0x08 +#define LIO_RSS_PARAM_ITABLE_UNCHANGED 0x04 +#define LIO_RSS_PARAM_HASH_INFO_UNCHANGED 0x02 + +#define LIO_RSS_HASH_IPV4 0x100 +#define LIO_RSS_HASH_TCP_IPV4 0x200 +#define LIO_RSS_HASH_IPV6 0x400 +#define LIO_RSS_HASH_TCP_IPV6 0x1000 +#define LIO_RSS_HASH_IPV6_EX 0x800 +#define LIO_RSS_HASH_TCP_IPV6_EX 0x2000 + +#define LIO_RSS_OFFLOAD_ALL ( \ + LIO_RSS_HASH_IPV4 | \ + LIO_RSS_HASH_TCP_IPV4 | \ + LIO_RSS_HASH_IPV6 | \ + LIO_RSS_HASH_TCP_IPV6 | \ + LIO_RSS_HASH_IPV6_EX | \ + LIO_RSS_HASH_TCP_IPV6_EX) + +#define LIO_RSS_MAX_TABLE_SZ 128 +#define LIO_RSS_MAX_KEY_SZ 40 +#define LIO_RSS_PARAM_SIZE 16 + +/* Interface flags communicated between host driver and core app. */ +enum lio_ifflags { + LIO_IFFLAG_PROMISC = 0x01, + LIO_IFFLAG_ALLMULTI = 0x02, + LIO_IFFLAG_UNICAST = 0x10 +}; + +/* Routines for reading and writing CSRs */ +#ifdef RTE_LIBRTE_LIO_DEBUG_REGS +#define lio_write_csr(lio_dev, reg_off, value) \ + do { \ + typeof(lio_dev) _dev = lio_dev; \ + typeof(reg_off) _reg_off = reg_off; \ + typeof(value) _value = value; \ + PMD_REGS_LOG(_dev, \ + "Write32: Reg: 0x%08lx Val: 0x%08lx\n", \ + (unsigned long)_reg_off, \ + (unsigned long)_value); \ + rte_write32(_value, _dev->hw_addr + _reg_off); \ + } while (0) + +#define lio_write_csr64(lio_dev, reg_off, val64) \ + do { \ + typeof(lio_dev) _dev = lio_dev; \ + typeof(reg_off) _reg_off = reg_off; \ + typeof(val64) _val64 = val64; \ + PMD_REGS_LOG( \ + _dev, \ + "Write64: Reg: 0x%08lx Val: 0x%016llx\n", \ + (unsigned long)_reg_off, \ + (unsigned long long)_val64); \ + rte_write64(_val64, _dev->hw_addr + _reg_off); \ + } while (0) + +#define lio_read_csr(lio_dev, reg_off) \ + ({ \ + typeof(lio_dev) _dev = lio_dev; \ + typeof(reg_off) _reg_off = reg_off; \ + uint32_t val = rte_read32(_dev->hw_addr + _reg_off); \ + PMD_REGS_LOG(_dev, \ + "Read32: Reg: 0x%08lx Val: 0x%08lx\n", \ + (unsigned long)_reg_off, \ + (unsigned long)val); \ + val; \ + }) + +#define lio_read_csr64(lio_dev, reg_off) \ + ({ \ + typeof(lio_dev) _dev = lio_dev; \ + typeof(reg_off) _reg_off = reg_off; \ + uint64_t val64 = rte_read64(_dev->hw_addr + _reg_off); \ + PMD_REGS_LOG( \ + _dev, \ + "Read64: Reg: 0x%08lx Val: 0x%016llx\n", \ + (unsigned long)_reg_off, \ + (unsigned long long)val64); \ + val64; \ + }) +#else +#define lio_write_csr(lio_dev, reg_off, value) \ + rte_write32(value, (lio_dev)->hw_addr + (reg_off)) + +#define lio_write_csr64(lio_dev, reg_off, val64) \ + rte_write64(val64, (lio_dev)->hw_addr + (reg_off)) + +#define lio_read_csr(lio_dev, reg_off) \ + rte_read32((lio_dev)->hw_addr + (reg_off)) + +#define lio_read_csr64(lio_dev, reg_off) \ + rte_read64((lio_dev)->hw_addr + (reg_off)) +#endif +#endif /* _LIO_HW_DEFS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c new file mode 100644 index 000000000..112900151 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.c @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include + +#include "lio_logs.h" +#include "lio_struct.h" +#include "lio_mbox.h" + +/** + * lio_mbox_read: + * @mbox: Pointer mailbox + * + * Reads the 8-bytes of data from the mbox register + * Writes back the acknowledgment indicating completion of read + */ +int +lio_mbox_read(struct lio_mbox *mbox) +{ + union lio_mbox_message msg; + int ret = 0; + + msg.mbox_msg64 = rte_read64(mbox->mbox_read_reg); + + if ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG)) + return 0; + + if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) { + mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = + msg.mbox_msg64; + mbox->mbox_req.recv_len++; + } else { + if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) { + mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] = + msg.mbox_msg64; + mbox->mbox_resp.recv_len++; + } else { + if ((mbox->state & LIO_MBOX_STATE_IDLE) && + (msg.s.type == LIO_MBOX_REQUEST)) { + mbox->state &= ~LIO_MBOX_STATE_IDLE; + mbox->state |= LIO_MBOX_STATE_REQ_RECEIVING; + mbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64; + mbox->mbox_req.q_no = mbox->q_no; + mbox->mbox_req.recv_len = 1; + } else { + if ((mbox->state & + LIO_MBOX_STATE_RES_PENDING) && + (msg.s.type == LIO_MBOX_RESPONSE)) { + mbox->state &= + ~LIO_MBOX_STATE_RES_PENDING; + mbox->state |= + LIO_MBOX_STATE_RES_RECEIVING; + mbox->mbox_resp.msg.mbox_msg64 = + msg.mbox_msg64; + mbox->mbox_resp.q_no = mbox->q_no; + mbox->mbox_resp.recv_len = 1; + } else { + rte_write64(LIO_PFVFERR, + mbox->mbox_read_reg); + mbox->state |= LIO_MBOX_STATE_ERROR; + return -1; + } + } + } + } + + if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) { + if (mbox->mbox_req.recv_len < msg.s.len) { + ret = 0; + } else { + mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING; + mbox->state |= LIO_MBOX_STATE_REQ_RECEIVED; + ret = 1; + } + } else { + if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) { + if (mbox->mbox_resp.recv_len < msg.s.len) { + ret = 0; + } else { + mbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING; + mbox->state |= LIO_MBOX_STATE_RES_RECEIVED; + ret = 1; + } + } else { + RTE_ASSERT(0); + } + } + + rte_write64(LIO_PFVFACK, mbox->mbox_read_reg); + + return ret; +} + +/** + * lio_mbox_write: + * @lio_dev: Pointer lio device + * @mbox_cmd: Cmd to send to mailbox. + * + * Populates the queue specific mbox structure + * with cmd information. + * Write the cmd to mbox register + */ +int +lio_mbox_write(struct lio_device *lio_dev, + struct lio_mbox_cmd *mbox_cmd) +{ + struct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no]; + uint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS; + + if ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) && + !(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED)) + return LIO_MBOX_STATUS_FAILED; + + if ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) && + !(mbox->state & LIO_MBOX_STATE_IDLE)) + return LIO_MBOX_STATUS_BUSY; + + if (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) { + rte_memcpy(&mbox->mbox_resp, mbox_cmd, + sizeof(struct lio_mbox_cmd)); + mbox->state = LIO_MBOX_STATE_RES_PENDING; + } + + count = 0; + + while (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) { + rte_delay_ms(1); + if (count++ == 1000) { + ret = LIO_MBOX_STATUS_FAILED; + break; + } + } + + if (ret == LIO_MBOX_STATUS_SUCCESS) { + rte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg); + for (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) { + count = 0; + while (rte_read64(mbox->mbox_write_reg) != + LIO_PFVFACK) { + rte_delay_ms(1); + if (count++ == 1000) { + ret = LIO_MBOX_STATUS_FAILED; + break; + } + } + rte_write64(mbox_cmd->data[i], mbox->mbox_write_reg); + } + } + + if (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) { + mbox->state = LIO_MBOX_STATE_IDLE; + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + } else { + if ((!mbox_cmd->msg.s.resp_needed) || + (ret == LIO_MBOX_STATUS_FAILED)) { + mbox->state &= ~LIO_MBOX_STATE_RES_PENDING; + if (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING | + LIO_MBOX_STATE_REQ_RECEIVED))) + mbox->state = LIO_MBOX_STATE_IDLE; + } + } + + return ret; +} + +/** + * lio_mbox_process_cmd: + * @mbox: Pointer mailbox + * @mbox_cmd: Pointer to command received + * + * Process the cmd received in mbox + */ +static int +lio_mbox_process_cmd(struct lio_mbox *mbox, + struct lio_mbox_cmd *mbox_cmd) +{ + struct lio_device *lio_dev = mbox->lio_dev; + + if (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED) + lio_dev_err(lio_dev, "Octeon core(s) crashed or got stuck!\n"); + + return 0; +} + +/** + * Process the received mbox message. + */ +int +lio_mbox_process_message(struct lio_mbox *mbox) +{ + struct lio_mbox_cmd mbox_cmd; + + if (mbox->state & LIO_MBOX_STATE_ERROR) { + if (mbox->state & (LIO_MBOX_STATE_RES_PENDING | + LIO_MBOX_STATE_RES_RECEIVING)) { + rte_memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct lio_mbox_cmd)); + mbox->state = LIO_MBOX_STATE_IDLE; + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + mbox_cmd.recv_status = 1; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, + mbox_cmd.fn_arg); + + return 0; + } + + mbox->state = LIO_MBOX_STATE_IDLE; + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + + return 0; + } + + if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) { + rte_memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct lio_mbox_cmd)); + mbox->state = LIO_MBOX_STATE_IDLE; + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + mbox_cmd.recv_status = 0; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg); + + return 0; + } + + if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) { + rte_memcpy(&mbox_cmd, &mbox->mbox_req, + sizeof(struct lio_mbox_cmd)); + if (!mbox_cmd.msg.s.resp_needed) { + mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED; + if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING)) + mbox->state = LIO_MBOX_STATE_IDLE; + rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); + } + + lio_mbox_process_cmd(mbox, &mbox_cmd); + + return 0; + } + + RTE_ASSERT(0); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h new file mode 100644 index 000000000..457917e91 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/base/lio_mbox.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_MBOX_H_ +#define _LIO_MBOX_H_ + +#include + +#include + +/* Macros for Mail Box Communication */ + +#define LIO_MBOX_DATA_MAX 32 + +#define LIO_VF_ACTIVE 0x1 +#define LIO_VF_FLR_REQUEST 0x2 +#define LIO_CORES_CRASHED 0x3 + +/* Macro for Read acknowledgment */ +#define LIO_PFVFACK 0xffffffffffffffff +#define LIO_PFVFSIG 0x1122334455667788 +#define LIO_PFVFERR 0xDEADDEADDEADDEAD + +enum lio_mbox_cmd_status { + LIO_MBOX_STATUS_SUCCESS = 0, + LIO_MBOX_STATUS_FAILED = 1, + LIO_MBOX_STATUS_BUSY = 2 +}; + +enum lio_mbox_message_type { + LIO_MBOX_REQUEST = 0, + LIO_MBOX_RESPONSE = 1 +}; + +union lio_mbox_message { + uint64_t mbox_msg64; + struct { + uint16_t type : 1; + uint16_t resp_needed : 1; + uint16_t cmd : 6; + uint16_t len : 8; + uint8_t params[6]; + } s; +}; + +typedef void (*lio_mbox_callback)(void *, void *, void *); + +struct lio_mbox_cmd { + union lio_mbox_message msg; + uint64_t data[LIO_MBOX_DATA_MAX]; + uint32_t q_no; + uint32_t recv_len; + uint32_t recv_status; + lio_mbox_callback fn; + void *fn_arg; +}; + +enum lio_mbox_state { + LIO_MBOX_STATE_IDLE = 1, + LIO_MBOX_STATE_REQ_RECEIVING = 2, + LIO_MBOX_STATE_REQ_RECEIVED = 4, + LIO_MBOX_STATE_RES_PENDING = 8, + LIO_MBOX_STATE_RES_RECEIVING = 16, + LIO_MBOX_STATE_RES_RECEIVED = 16, + LIO_MBOX_STATE_ERROR = 32 +}; + +struct lio_mbox { + /* A spinlock to protect access to this q_mbox. */ + rte_spinlock_t lock; + + struct lio_device *lio_dev; + + uint32_t q_no; + + enum lio_mbox_state state; + + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + void *mbox_int_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + void *mbox_write_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + void *mbox_read_reg; + + struct lio_mbox_cmd mbox_req; + + struct lio_mbox_cmd mbox_resp; + +}; + +int lio_mbox_read(struct lio_mbox *mbox); +int lio_mbox_write(struct lio_device *lio_dev, + struct lio_mbox_cmd *mbox_cmd); +int lio_mbox_process_message(struct lio_mbox *mbox); +#endif /* _LIO_MBOX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c new file mode 100644 index 000000000..ad4a51ecd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.c @@ -0,0 +1,2173 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "lio_logs.h" +#include "lio_23xx_vf.h" +#include "lio_ethdev.h" +#include "lio_rxtx.h" + +int lio_logtype_init; +int lio_logtype_driver; + +/* Default RSS key in use */ +static uint8_t lio_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static const struct rte_eth_desc_lim lio_rx_desc_lim = { + .nb_max = CN23XX_MAX_OQ_DESCRIPTORS, + .nb_min = CN23XX_MIN_OQ_DESCRIPTORS, + .nb_align = 1, +}; + +static const struct rte_eth_desc_lim lio_tx_desc_lim = { + .nb_max = CN23XX_MAX_IQ_DESCRIPTORS, + .nb_min = CN23XX_MIN_IQ_DESCRIPTORS, + .nb_align = 1, +}; + +/* Wait for control command to reach nic. */ +static uint16_t +lio_wait_for_ctrl_cmd(struct lio_device *lio_dev, + struct lio_dev_ctrl_cmd *ctrl_cmd) +{ + uint16_t timeout = LIO_MAX_CMD_TIMEOUT; + + while ((ctrl_cmd->cond == 0) && --timeout) { + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + rte_delay_ms(1); + } + + return !timeout; +} + +/** + * \brief Send Rx control command + * @param eth_dev Pointer to the structure rte_eth_dev + * @param start_stop whether to start or stop + */ +static int +lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL; + ctrl_pkt.ncmd.s.param1 = start_stop; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send RX Control message\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "RX Control command timed out\n"); + return -1; + } + + return 0; +} + +/* store statistics names and its offset in stats structure */ +struct rte_lio_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = { + {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)}, + {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)}, + {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)}, + {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)}, + {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)}, + {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)}, + {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)}, + {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)}, + {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)}, + {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)}, + {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)}, + {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)}, + {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)}, + {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) + + sizeof(struct octeon_rx_stats)}, + {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) + + sizeof(struct octeon_rx_stats)}, + {"tx_broadcast_pkts", + (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) + + sizeof(struct octeon_rx_stats)}, + {"tx_multicast_pkts", + (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) + + sizeof(struct octeon_rx_stats)}, + {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) + + sizeof(struct octeon_rx_stats)}, + {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) + + sizeof(struct octeon_rx_stats)}, + {"tx_total_collisions", (offsetof(struct octeon_tx_stats, + total_collisions)) + + sizeof(struct octeon_rx_stats)}, + {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) + + sizeof(struct octeon_rx_stats)}, + {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) + + sizeof(struct octeon_rx_stats)}, +}; + +#define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings) + +/* Get hw stats of the port */ +static int +lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t timeout = LIO_MAX_CMD_TIMEOUT; + struct octeon_link_stats *hw_stats; + struct lio_link_stats_resp *resp; + struct lio_soft_command *sc; + uint32_t resp_size; + unsigned int i; + int retval; + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down\n", + lio_dev->port_id); + return -EINVAL; + } + + if (n < LIO_NB_XSTATS) + return LIO_NB_XSTATS; + + resp_size = sizeof(struct lio_link_stats_resp); + sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); + if (sc == NULL) + return -ENOMEM; + + resp = (struct lio_link_stats_resp *)sc->virtrptr; + lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, + LIO_OPCODE_PORT_STATS, 0, 0, 0); + + /* Setting wait time in seconds */ + sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; + + retval = lio_send_soft_command(lio_dev, sc); + if (retval == LIO_IQ_SEND_FAILED) { + lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n", + retval); + goto get_stats_fail; + } + + while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { + lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); + lio_process_ordered_list(lio_dev); + rte_delay_ms(1); + } + + retval = resp->status; + if (retval) { + lio_dev_err(lio_dev, "failed to get port stats from firmware\n"); + goto get_stats_fail; + } + + lio_swap_8B_data((uint64_t *)(&resp->link_stats), + sizeof(struct octeon_link_stats) >> 3); + + hw_stats = &resp->link_stats; + + for (i = 0; i < LIO_NB_XSTATS; i++) { + xstats[i].id = i; + xstats[i].value = + *(uint64_t *)(((char *)hw_stats) + + rte_lio_stats_strings[i].offset); + } + + lio_free_soft_command(sc); + + return LIO_NB_XSTATS; + +get_stats_fail: + lio_free_soft_command(sc); + + return -1; +} + +static int +lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit __rte_unused) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + unsigned int i; + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down\n", + lio_dev->port_id); + return -EINVAL; + } + + if (xstats_names == NULL) + return LIO_NB_XSTATS; + + /* Note: limit checked in rte_eth_xstats_names() */ + + for (i = 0; i < LIO_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), + "%s", rte_lio_stats_strings[i].name); + } + + return LIO_NB_XSTATS; +} + +/* Reset hw stats for the port */ +static int +lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + int ret; + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down\n", + lio_dev->port_id); + return -EINVAL; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt); + if (ret != 0) { + lio_dev_err(lio_dev, "Failed to send clear stats command\n"); + return ret; + } + + ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd); + if (ret != 0) { + lio_dev_err(lio_dev, "Clear stats command timed out\n"); + return ret; + } + + /* clear stored per queue stats */ + RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0); + return (*eth_dev->dev_ops->stats_reset)(eth_dev); +} + +/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ +static int +lio_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_droq_stats *oq_stats; + struct lio_iq_stats *iq_stats; + struct lio_instr_queue *txq; + struct lio_droq *droq; + int i, iq_no, oq_no; + uint64_t bytes = 0; + uint64_t pkts = 0; + uint64_t drop = 0; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + iq_no = lio_dev->linfo.txpciq[i].s.q_no; + txq = lio_dev->instr_queue[iq_no]; + if (txq != NULL) { + iq_stats = &txq->stats; + pkts += iq_stats->tx_done; + drop += iq_stats->tx_dropped; + bytes += iq_stats->tx_tot_bytes; + } + } + + stats->opackets = pkts; + stats->obytes = bytes; + stats->oerrors = drop; + + pkts = 0; + drop = 0; + bytes = 0; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + oq_no = lio_dev->linfo.rxpciq[i].s.q_no; + droq = lio_dev->droq[oq_no]; + if (droq != NULL) { + oq_stats = &droq->stats; + pkts += oq_stats->rx_pkts_received; + drop += (oq_stats->rx_dropped + + oq_stats->dropped_toomany + + oq_stats->dropped_nomem); + bytes += oq_stats->rx_bytes_received; + } + } + stats->ibytes = bytes; + stats->ipackets = pkts; + stats->ierrors = drop; + + return 0; +} + +static int +lio_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_droq_stats *oq_stats; + struct lio_iq_stats *iq_stats; + struct lio_instr_queue *txq; + struct lio_droq *droq; + int i, iq_no, oq_no; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + iq_no = lio_dev->linfo.txpciq[i].s.q_no; + txq = lio_dev->instr_queue[iq_no]; + if (txq != NULL) { + iq_stats = &txq->stats; + memset(iq_stats, 0, sizeof(struct lio_iq_stats)); + } + } + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + oq_no = lio_dev->linfo.rxpciq[i].s.q_no; + droq = lio_dev->droq[oq_no]; + if (droq != NULL) { + oq_stats = &droq->stats; + memset(oq_stats, 0, sizeof(struct lio_droq_stats)); + } + } + + return 0; +} + +static int +lio_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *devinfo) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + switch (pci_dev->id.subsystem_device_id) { + /* CN23xx 10G cards */ + case PCI_SUBSYS_DEV_ID_CN2350_210: + case PCI_SUBSYS_DEV_ID_CN2360_210: + case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: + case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: + case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: + case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: + devinfo->speed_capa = ETH_LINK_SPEED_10G; + break; + /* CN23xx 25G cards */ + case PCI_SUBSYS_DEV_ID_CN2350_225: + case PCI_SUBSYS_DEV_ID_CN2360_225: + devinfo->speed_capa = ETH_LINK_SPEED_25G; + break; + default: + devinfo->speed_capa = ETH_LINK_SPEED_10G; + lio_dev_err(lio_dev, + "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); + return -EINVAL; + } + + devinfo->max_rx_queues = lio_dev->max_rx_queues; + devinfo->max_tx_queues = lio_dev->max_tx_queues; + + devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE; + devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN; + + devinfo->max_mac_addrs = 1; + + devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_RSS_HASH); + devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); + + devinfo->rx_desc_lim = lio_rx_desc_lim; + devinfo->tx_desc_lim = lio_tx_desc_lim; + + devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ; + devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ; + devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_IPV6 | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_EX | + ETH_RSS_IPV6_TCP_EX); + return 0; +} + +static int +lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t pf_mtu = lio_dev->linfo.link.s.mtu; + uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + PMD_INIT_FUNC_TRACE(); + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", + lio_dev->port_id); + return -EINVAL; + } + + /* check if VF MTU is within allowed range. + * New value should not exceed PF MTU. + */ + if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) { + lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n", + RTE_ETHER_MIN_MTU, pf_mtu); + return -EINVAL; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; + ctrl_pkt.ncmd.s.param1 = mtu; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Command to change MTU timed out\n"); + return -1; + } + + if (frame_len > RTE_ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; + eth_dev->data->mtu = mtu; + + return 0; +} + +static int +lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + struct lio_rss_set *rss_param; + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + int i, j, index; + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't update reta\n", + lio_dev->port_id); + return -EINVAL; + } + + if (reta_size != LIO_RSS_MAX_TABLE_SZ) { + lio_dev_err(lio_dev, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", + reta_size, LIO_RSS_MAX_TABLE_SZ); + return -EINVAL; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; + ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + rss_param->param.flags = 0xF; + rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED; + rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ; + + for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { + if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { + index = (i * RTE_RETA_GROUP_SIZE) + j; + rss_state->itable[index] = reta_conf[i].reta[j]; + } + } + } + + rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ; + memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size); + + lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to set rss hash\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Set rss hash timed out\n"); + return -1; + } + + return 0; +} + +static int +lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + int i, num; + + if (reta_size != LIO_RSS_MAX_TABLE_SZ) { + lio_dev_err(lio_dev, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", + reta_size, LIO_RSS_MAX_TABLE_SZ); + return -EINVAL; + } + + num = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { + memcpy(reta_conf->reta, + &rss_state->itable[i * RTE_RETA_GROUP_SIZE], + RTE_RETA_GROUP_SIZE); + reta_conf++; + } + + return 0; +} + +static int +lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + uint8_t *hash_key = NULL; + uint64_t rss_hf = 0; + + if (rss_state->hash_disable) { + lio_dev_info(lio_dev, "RSS disabled in nic\n"); + rss_conf->rss_hf = 0; + return 0; + } + + /* Get key value */ + hash_key = rss_conf->rss_key; + if (hash_key != NULL) + memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size); + + if (rss_state->ip) + rss_hf |= ETH_RSS_IPV4; + if (rss_state->tcp_hash) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (rss_state->ipv6) + rss_hf |= ETH_RSS_IPV6; + if (rss_state->ipv6_tcp_hash) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (rss_state->ipv6_ex) + rss_hf |= ETH_RSS_IPV6_EX; + if (rss_state->ipv6_tcp_ex_hash) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + + rss_conf->rss_hf = rss_hf; + + return 0; +} + +static int +lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + struct lio_rss_set *rss_param; + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't update hash\n", + lio_dev->port_id); + return -EINVAL; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; + ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + rss_param->param.flags = 0xF; + + if (rss_conf->rss_key) { + rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED; + rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ; + rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ; + memcpy(rss_state->hash_key, rss_conf->rss_key, + rss_state->hash_key_size); + memcpy(rss_param->key, rss_state->hash_key, + rss_state->hash_key_size); + } + + if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { + /* Can't disable rss through hash flags, + * if it is enabled by default during init + */ + if (!rss_state->hash_disable) + return -EINVAL; + + /* This is for --disable-rss during testpmd launch */ + rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS; + } else { + uint32_t hashinfo = 0; + + /* Can't enable rss if disabled by default during init */ + if (rss_state->hash_disable) + return -EINVAL; + + if (rss_conf->rss_hf & ETH_RSS_IPV4) { + hashinfo |= LIO_RSS_HASH_IPV4; + rss_state->ip = 1; + } else { + rss_state->ip = 0; + } + + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { + hashinfo |= LIO_RSS_HASH_TCP_IPV4; + rss_state->tcp_hash = 1; + } else { + rss_state->tcp_hash = 0; + } + + if (rss_conf->rss_hf & ETH_RSS_IPV6) { + hashinfo |= LIO_RSS_HASH_IPV6; + rss_state->ipv6 = 1; + } else { + rss_state->ipv6 = 0; + } + + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { + hashinfo |= LIO_RSS_HASH_TCP_IPV6; + rss_state->ipv6_tcp_hash = 1; + } else { + rss_state->ipv6_tcp_hash = 0; + } + + if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) { + hashinfo |= LIO_RSS_HASH_IPV6_EX; + rss_state->ipv6_ex = 1; + } else { + rss_state->ipv6_ex = 0; + } + + if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) { + hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX; + rss_state->ipv6_tcp_ex_hash = 1; + } else { + rss_state->ipv6_tcp_ex_hash = 0; + } + + rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED; + rss_param->param.hashinfo = hashinfo; + } + + lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to set rss hash\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Set rss hash timed out\n"); + return -1; + } + + return 0; +} + +/** + * Add vxlan dest udp port for an interface. + * + * @param eth_dev + * Pointer to the structure rte_eth_dev + * @param udp_tnl + * udp tunnel conf + * + * @return + * On success return 0 + * On failure return -1 + */ +static int +lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tnl) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (udp_tnl == NULL) + return -EINVAL; + + if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { + lio_dev_err(lio_dev, "Unsupported tunnel type\n"); + return -1; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; + ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; + ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n"); + return -1; + } + + return 0; +} + +/** + * Remove vxlan dest udp port for an interface. + * + * @param eth_dev + * Pointer to the structure rte_eth_dev + * @param udp_tnl + * udp tunnel conf + * + * @return + * On success return 0 + * On failure return -1 + */ +static int +lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *udp_tnl) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (udp_tnl == NULL) + return -EINVAL; + + if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { + lio_dev_err(lio_dev, "Unsupported tunnel type\n"); + return -1; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; + ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; + ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n"); + return -1; + } + + return 0; +} + +static int +lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (lio_dev->linfo.vlan_is_admin_assigned) + return -EPERM; + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = on ? + LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER; + ctrl_pkt.ncmd.s.param1 = vlan_id; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to %s VLAN port\n", + on ? "add" : "remove"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n", + on ? "add" : "remove"); + return -1; + } + + return 0; +} + +static uint64_t +lio_hweight64(uint64_t w) +{ + uint64_t res = w - ((w >> 1) & 0x5555555555555555ul); + + res = + (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + + return (res + (res >> 32)) & 0x00000000000000FFul; +} + +static int +lio_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete __rte_unused) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct rte_eth_link link; + + /* Initialize */ + memset(&link, 0, sizeof(link)); + link.link_status = ETH_LINK_DOWN; + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; + + /* Return what we found */ + if (lio_dev->linfo.link.s.link_up == 0) { + /* Interface is down */ + return rte_eth_linkstatus_set(eth_dev, &link); + } + + link.link_status = ETH_LINK_UP; /* Interface is up */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; + switch (lio_dev->linfo.link.s.speed) { + case LIO_LINK_SPEED_10000: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case LIO_LINK_SPEED_25000: + link.link_speed = ETH_SPEED_NUM_25G; + break; + default: + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + } + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +/** + * \brief Net device enable, disable allmulticast + * @param eth_dev Pointer to the structure rte_eth_dev + * + * @return + * On success return 0 + * On failure return negative errno + */ +static int +lio_change_dev_flag(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + /* Create a ctrl pkt command to be sent to core app. */ + ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; + ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send change flag message\n"); + return -EAGAIN; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Change dev flag command timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int +lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_VF_TRUST_MIN_VERSION); + return -EAGAIN; + } + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", + lio_dev->port_id); + return -EAGAIN; + } + + lio_dev->ifflags |= LIO_IFFLAG_PROMISC; + return lio_change_dev_flag(eth_dev); +} + +static int +lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_VF_TRUST_MIN_VERSION); + return -EAGAIN; + } + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", + lio_dev->port_id); + return -EAGAIN; + } + + lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; + return lio_change_dev_flag(eth_dev); +} + +static int +lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n", + lio_dev->port_id); + return -EAGAIN; + } + + lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI; + return lio_change_dev_flag(eth_dev); +} + +static int +lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n", + lio_dev->port_id); + return -EAGAIN; + } + + lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI; + return lio_change_dev_flag(eth_dev); +} + +static void +lio_dev_rss_configure(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + struct rte_eth_rss_reta_entry64 reta_conf[8]; + struct rte_eth_rss_conf rss_conf; + uint16_t i; + + /* Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { + rss_state->hash_disable = 1; + lio_dev_rss_hash_update(eth_dev, &rss_conf); + return; + } + + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = lio_rss_key; /* Default hash key */ + + lio_dev_rss_hash_update(eth_dev, &rss_conf); + + memset(reta_conf, 0, sizeof(reta_conf)); + for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) { + uint8_t q_idx, conf_idx, reta_idx; + + q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? + i % eth_dev->data->nb_rx_queues : 0); + conf_idx = i / RTE_RETA_GROUP_SIZE; + reta_idx = i % RTE_RETA_GROUP_SIZE; + reta_conf[conf_idx].reta[reta_idx] = q_idx; + reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); + } + + lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); +} + +static void +lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_rss_ctx *rss_state = &lio_dev->rss_state; + struct rte_eth_rss_conf rss_conf; + + switch (eth_dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + lio_dev_rss_configure(eth_dev); + break; + case ETH_MQ_RX_NONE: + /* if mq_mode is none, disable rss mode. */ + default: + memset(&rss_conf, 0, sizeof(rss_conf)); + rss_state->hash_disable = 1; + lio_dev_rss_hash_update(eth_dev, &rss_conf); + } +} + +/** + * Setup our receive queue/ringbuffer. This is the + * queue the Octeon uses to send us packets and + * responses. We are given a memory pool for our + * packet buffers that are used to populate the receive + * queue. + * + * @param eth_dev + * Pointer to the structure rte_eth_dev + * @param q_no + * Queue number + * @param num_rx_descs + * Number of entries in the queue + * @param socket_id + * Where to allocate memory + * @param rx_conf + * Pointer to the struction rte_eth_rxconf + * @param mp + * Pointer to the packet pool + * + * @return + * - On success, return 0 + * - On failure, return -1 + */ +static int +lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, + uint16_t num_rx_descs, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct rte_pktmbuf_pool_private *mbp_priv; + uint32_t fw_mapped_oq; + uint16_t buf_size; + + if (q_no >= lio_dev->nb_rx_queues) { + lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no); + return -EINVAL; + } + + lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no); + + fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; + + /* Free previous allocation if any */ + if (eth_dev->data->rx_queues[q_no] != NULL) { + lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]); + eth_dev->data->rx_queues[q_no] = NULL; + } + + mbp_priv = rte_mempool_get_priv(mp); + buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp, + socket_id)) { + lio_dev_err(lio_dev, "droq allocation failed\n"); + return -1; + } + + eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; + + return 0; +} + +/** + * Release the receive queue/ringbuffer. Called by + * the upper layers. + * + * @param rxq + * Opaque pointer to the receive queue to release + * + * @return + * - nothing + */ +void +lio_dev_rx_queue_release(void *rxq) +{ + struct lio_droq *droq = rxq; + int oq_no; + + if (droq) { + oq_no = droq->q_no; + lio_delete_droq_queue(droq->lio_dev, oq_no); + } +} + +/** + * Allocate and initialize SW ring. Initialize associated HW registers. + * + * @param eth_dev + * Pointer to structure rte_eth_dev + * + * @param q_no + * Queue number + * + * @param num_tx_descs + * Number of ringbuffer descriptors + * + * @param socket_id + * NUMA socket id, used for memory allocations + * + * @param tx_conf + * Pointer to the structure rte_eth_txconf + * + * @return + * - On success, return 0 + * - On failure, return -errno value + */ +static int +lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, + uint16_t num_tx_descs, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no; + int retval; + + if (q_no >= lio_dev->nb_tx_queues) { + lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no); + return -EINVAL; + } + + lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); + + /* Free previous allocation if any */ + if (eth_dev->data->tx_queues[q_no] != NULL) { + lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]); + eth_dev->data->tx_queues[q_no] = NULL; + } + + retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], + num_tx_descs, lio_dev, socket_id); + + if (retval) { + lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n"); + return retval; + } + + retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, + lio_dev->instr_queue[fw_mapped_iq]->nb_desc, + socket_id); + + if (retval) { + lio_delete_instruction_queue(lio_dev, fw_mapped_iq); + return retval; + } + + eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; + + return 0; +} + +/** + * Release the transmit queue/ringbuffer. Called by + * the upper layers. + * + * @param txq + * Opaque pointer to the transmit queue to release + * + * @return + * - nothing + */ +void +lio_dev_tx_queue_release(void *txq) +{ + struct lio_instr_queue *tq = txq; + uint32_t fw_mapped_iq_no; + + + if (tq) { + /* Free sg_list */ + lio_delete_sglist(tq); + + fw_mapped_iq_no = tq->txpciq.s.q_no; + lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no); + } +} + +/** + * Api to check link state. + */ +static void +lio_dev_get_link_status(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t timeout = LIO_MAX_CMD_TIMEOUT; + struct lio_link_status_resp *resp; + union octeon_link_status *ls; + struct lio_soft_command *sc; + uint32_t resp_size; + + if (!lio_dev->intf_open) + return; + + resp_size = sizeof(struct lio_link_status_resp); + sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); + if (sc == NULL) + return; + + resp = (struct lio_link_status_resp *)sc->virtrptr; + lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, + LIO_OPCODE_INFO, 0, 0, 0); + + /* Setting wait time in seconds */ + sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; + + if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED) + goto get_status_fail; + + while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { + lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); + rte_delay_ms(1); + } + + if (resp->status) + goto get_status_fail; + + ls = &resp->link_info.link; + + lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); + + if (lio_dev->linfo.link.link_status64 != ls->link_status64) { + if (ls->s.mtu < eth_dev->data->mtu) { + lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", + ls->s.mtu); + eth_dev->data->mtu = ls->s.mtu; + } + lio_dev->linfo.link.link_status64 = ls->link_status64; + lio_dev_link_update(eth_dev, 0); + } + + lio_free_soft_command(sc); + + return; + +get_status_fail: + lio_free_soft_command(sc); +} + +/* This function will be invoked every LSC_TIMEOUT ns (100ms) + * and will update link state if it changes. + */ +static void +lio_sync_link_state_check(void *eth_dev) +{ + struct lio_device *lio_dev = + (((struct rte_eth_dev *)eth_dev)->data->dev_private); + + if (lio_dev->port_configured) + lio_dev_get_link_status(eth_dev); + + /* Schedule periodic link status check. + * Stop check if interface is close and start again while opening. + */ + if (lio_dev->intf_open) + rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, + eth_dev); +} + +static int +lio_dev_start(struct rte_eth_dev *eth_dev) +{ + uint16_t mtu; + uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t timeout = LIO_MAX_CMD_TIMEOUT; + int ret = 0; + + lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); + + if (lio_dev->fn_list.enable_io_queues(lio_dev)) + return -1; + + if (lio_send_rx_ctrl_cmd(eth_dev, 1)) + return -1; + + /* Ready for link status updates */ + lio_dev->intf_open = 1; + rte_mb(); + + /* Configure RSS if device configured with multiple RX queues. */ + lio_dev_mq_rx_configure(eth_dev); + + /* Before update the link info, + * must set linfo.link.link_status64 to 0. + */ + lio_dev->linfo.link.link_status64 = 0; + + /* start polling for lsc */ + ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, + lio_sync_link_state_check, + eth_dev); + if (ret) { + lio_dev_err(lio_dev, + "link state check handler creation failed\n"); + goto dev_lsc_handle_error; + } + + while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout)) + rte_delay_ms(1); + + if (lio_dev->linfo.link.link_status64 == 0) { + ret = -1; + goto dev_mtu_set_error; + } + + mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN); + if (mtu < RTE_ETHER_MIN_MTU) + mtu = RTE_ETHER_MIN_MTU; + + if (eth_dev->data->mtu != mtu) { + ret = lio_dev_mtu_set(eth_dev, mtu); + if (ret) + goto dev_mtu_set_error; + } + + return 0; + +dev_mtu_set_error: + rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); + +dev_lsc_handle_error: + lio_dev->intf_open = 0; + lio_send_rx_ctrl_cmd(eth_dev, 0); + + return ret; +} + +/* Stop device and disable input/output functions */ +static void +lio_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); + lio_dev->intf_open = 0; + rte_mb(); + + /* Cancel callback if still running. */ + rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); + + lio_send_rx_ctrl_cmd(eth_dev, 0); + + lio_wait_for_instr_fetch(lio_dev); + + /* Clear recorded link status */ + lio_dev->linfo.link.link_status64 = 0; +} + +static int +lio_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (!lio_dev->intf_open) { + lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); + return 0; + } + + if (lio_dev->linfo.link.s.link_up) { + lio_dev_info(lio_dev, "Link is already UP\n"); + return 0; + } + + if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { + lio_dev_err(lio_dev, "Unable to set Link UP\n"); + return -1; + } + + lio_dev->linfo.link.s.link_up = 1; + eth_dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +static int +lio_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (!lio_dev->intf_open) { + lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); + return 0; + } + + if (!lio_dev->linfo.link.s.link_up) { + lio_dev_info(lio_dev, "Link is already DOWN\n"); + return 0; + } + + lio_dev->linfo.link.s.link_up = 0; + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + + if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { + lio_dev->linfo.link.s.link_up = 1; + eth_dev->data->dev_link.link_status = ETH_LINK_UP; + lio_dev_err(lio_dev, "Unable to set Link Down\n"); + return -1; + } + + return 0; +} + +/** + * Reset and stop the device. This occurs on the first + * call to this routine. Subsequent calls will simply + * return. NB: This will require the NIC to be rebooted. + * + * @param eth_dev + * Pointer to the structure rte_eth_dev + * + * @return + * - nothing + */ +static void +lio_dev_close(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); + + if (lio_dev->intf_open) + lio_dev_stop(eth_dev); + + /* Reset ioq regs */ + lio_dev->fn_list.setup_device_regs(lio_dev); + + if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) { + cn23xx_vf_ask_pf_to_do_flr(lio_dev); + rte_delay_ms(LIO_PCI_FLR_WAIT); + } + + /* lio_free_mbox */ + lio_dev->fn_list.free_mbox(lio_dev); + + /* Free glist resources */ + rte_free(lio_dev->glist_head); + rte_free(lio_dev->glist_lock); + lio_dev->glist_head = NULL; + lio_dev->glist_lock = NULL; + + lio_dev->port_configured = 0; + + /* Delete all queues */ + lio_dev_clear_queues(eth_dev); +} + +/** + * Enable tunnel rx checksum verification from firmware. + */ +static void +lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL; + ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n"); + return; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) + lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n"); +} + +/** + * Enable checksum calculation for inner packet in a tunnel. + */ +static void +lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL; + ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n"); + return; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) + lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); +} + +static int +lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, + int num_rxq) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_Q_RECONF_MIN_VERSION); + return -ENOTSUP; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; + ctrl_pkt.ncmd.s.param1 = num_txq; + ctrl_pkt.ncmd.s.param2 = num_rxq; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send queue count control command\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Queue count control command timed out\n"); + return -1; + } + + return 0; +} + +static int +lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (lio_dev->nb_rx_queues != num_rxq || + lio_dev->nb_tx_queues != num_txq) { + if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) + return -1; + lio_dev->nb_rx_queues = num_rxq; + lio_dev->nb_tx_queues = num_txq; + } + + if (lio_dev->intf_open) + lio_dev_stop(eth_dev); + + /* Reset ioq registers */ + if (lio_dev->fn_list.setup_device_regs(lio_dev)) { + lio_dev_err(lio_dev, "Failed to configure device registers\n"); + return -1; + } + + return 0; +} + +static int +lio_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t timeout = LIO_MAX_CMD_TIMEOUT; + int retval, num_iqueues, num_oqueues; + uint8_t mac[RTE_ETHER_ADDR_LEN], i; + struct lio_if_cfg_resp *resp; + struct lio_soft_command *sc; + union lio_if_cfg if_cfg; + uint32_t resp_size; + + PMD_INIT_FUNC_TRACE(); + + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + + /* Inform firmware about change in number of queues to use. + * Disable IO queues and reset registers for re-configuration. + */ + if (lio_dev->port_configured) + return lio_reconf_queues(eth_dev, + eth_dev->data->nb_tx_queues, + eth_dev->data->nb_rx_queues); + + lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; + lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; + + /* Set max number of queues which can be re-configured. */ + lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; + lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; + + resp_size = sizeof(struct lio_if_cfg_resp); + sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); + if (sc == NULL) + return -ENOMEM; + + resp = (struct lio_if_cfg_resp *)sc->virtrptr; + + /* Firmware doesn't have capability to reconfigure the queues, + * Claim all queues, and use as many required + */ + if_cfg.if_cfg64 = 0; + if_cfg.s.num_iqueues = lio_dev->nb_tx_queues; + if_cfg.s.num_oqueues = lio_dev->nb_rx_queues; + if_cfg.s.base_queue = 0; + + if_cfg.s.gmx_port_id = lio_dev->pf_num; + + lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, + LIO_OPCODE_IF_CFG, 0, + if_cfg.if_cfg64, 0); + + /* Setting wait time in seconds */ + sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; + + retval = lio_send_soft_command(lio_dev, sc); + if (retval == LIO_IQ_SEND_FAILED) { + lio_dev_err(lio_dev, "iq/oq config failed status: %x\n", + retval); + /* Soft instr is freed by driver in case of failure. */ + goto nic_config_fail; + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { + lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); + lio_process_ordered_list(lio_dev); + rte_delay_ms(1); + } + + retval = resp->status; + if (retval) { + lio_dev_err(lio_dev, "iq/oq config failed\n"); + goto nic_config_fail; + } + + strlcpy(lio_dev->firmware_version, + resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH); + + lio_swap_8B_data((uint64_t *)(&resp->cfg_info), + sizeof(struct octeon_if_cfg_info) >> 3); + + num_iqueues = lio_hweight64(resp->cfg_info.iqmask); + num_oqueues = lio_hweight64(resp->cfg_info.oqmask); + + if (!(num_iqueues) || !(num_oqueues)) { + lio_dev_err(lio_dev, + "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n", + (unsigned long)resp->cfg_info.iqmask, + (unsigned long)resp->cfg_info.oqmask); + goto nic_config_fail; + } + + lio_dev_dbg(lio_dev, + "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", + eth_dev->data->port_id, + (unsigned long)resp->cfg_info.iqmask, + (unsigned long)resp->cfg_info.oqmask, + num_iqueues, num_oqueues); + + lio_dev->linfo.num_rxpciq = num_oqueues; + lio_dev->linfo.num_txpciq = num_iqueues; + + for (i = 0; i < num_oqueues; i++) { + lio_dev->linfo.rxpciq[i].rxpciq64 = + resp->cfg_info.linfo.rxpciq[i].rxpciq64; + lio_dev_dbg(lio_dev, "index %d OQ %d\n", + i, lio_dev->linfo.rxpciq[i].s.q_no); + } + + for (i = 0; i < num_iqueues; i++) { + lio_dev->linfo.txpciq[i].txpciq64 = + resp->cfg_info.linfo.txpciq[i].txpciq64; + lio_dev_dbg(lio_dev, "index %d IQ %d\n", + i, lio_dev->linfo.txpciq[i].s.q_no); + } + + lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio_dev->linfo.link.link_status64 = + resp->cfg_info.linfo.link.link_status64; + + /* 64-bit swap required on LE machines */ + lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1); + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) + + 2 + i)); + + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)mac, + ð_dev->data->mac_addrs[0]); + + /* enable firmware checksum support for tunnel packets */ + lio_enable_hw_tunnel_rx_checksum(eth_dev); + lio_enable_hw_tunnel_tx_checksum(eth_dev); + + lio_dev->glist_lock = + rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0); + if (lio_dev->glist_lock == NULL) + return -ENOMEM; + + lio_dev->glist_head = + rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues, + 0); + if (lio_dev->glist_head == NULL) { + rte_free(lio_dev->glist_lock); + lio_dev->glist_lock = NULL; + return -ENOMEM; + } + + lio_dev_link_update(eth_dev, 0); + + lio_dev->port_configured = 1; + + lio_free_soft_command(sc); + + /* Reset ioq regs */ + lio_dev->fn_list.setup_device_regs(lio_dev); + + /* Free iq_0 used during init */ + lio_free_instr_queue0(lio_dev); + + return 0; + +nic_config_fail: + lio_dev_err(lio_dev, "Failed retval %d\n", retval); + lio_free_soft_command(sc); + lio_free_instr_queue0(lio_dev); + + return -ENODEV; +} + +/* Define our ethernet definitions */ +static const struct eth_dev_ops liovf_eth_dev_ops = { + .dev_configure = lio_dev_configure, + .dev_start = lio_dev_start, + .dev_stop = lio_dev_stop, + .dev_set_link_up = lio_dev_set_link_up, + .dev_set_link_down = lio_dev_set_link_down, + .dev_close = lio_dev_close, + .promiscuous_enable = lio_dev_promiscuous_enable, + .promiscuous_disable = lio_dev_promiscuous_disable, + .allmulticast_enable = lio_dev_allmulticast_enable, + .allmulticast_disable = lio_dev_allmulticast_disable, + .link_update = lio_dev_link_update, + .stats_get = lio_dev_stats_get, + .xstats_get = lio_dev_xstats_get, + .xstats_get_names = lio_dev_xstats_get_names, + .stats_reset = lio_dev_stats_reset, + .xstats_reset = lio_dev_xstats_reset, + .dev_infos_get = lio_dev_info_get, + .vlan_filter_set = lio_dev_vlan_filter_set, + .rx_queue_setup = lio_dev_rx_queue_setup, + .rx_queue_release = lio_dev_rx_queue_release, + .tx_queue_setup = lio_dev_tx_queue_setup, + .tx_queue_release = lio_dev_tx_queue_release, + .reta_update = lio_dev_rss_reta_update, + .reta_query = lio_dev_rss_reta_query, + .rss_hash_conf_get = lio_dev_rss_hash_conf_get, + .rss_hash_update = lio_dev_rss_hash_update, + .udp_tunnel_port_add = lio_dev_udp_tunnel_add, + .udp_tunnel_port_del = lio_dev_udp_tunnel_del, + .mtu_set = lio_dev_mtu_set, +}; + +static void +lio_check_pf_hs_response(void *lio_dev) +{ + struct lio_device *dev = lio_dev; + + /* check till response arrives */ + if (dev->pfvf_hsword.coproc_tics_per_us) + return; + + cn23xx_vf_handle_mbox(dev); + + rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev); +} + +/** + * \brief Identify the LIO device and to map the BAR address space + * @param lio_dev lio device + */ +static int +lio_chip_specific_setup(struct lio_device *lio_dev) +{ + struct rte_pci_device *pdev = lio_dev->pci_dev; + uint32_t dev_id = pdev->id.device_id; + const char *s; + int ret = 1; + + switch (dev_id) { + case LIO_CN23XX_VF_VID: + lio_dev->chip_id = LIO_CN23XX_VF_VID; + ret = cn23xx_vf_setup_device(lio_dev); + s = "CN23XX VF"; + break; + default: + s = "?"; + lio_dev_err(lio_dev, "Unsupported Chip\n"); + } + + if (!ret) + lio_dev_info(lio_dev, "DEVICE : %s\n", s); + + return ret; +} + +static int +lio_first_time_init(struct lio_device *lio_dev, + struct rte_pci_device *pdev) +{ + int dpdk_queues; + + PMD_INIT_FUNC_TRACE(); + + /* set dpdk specific pci device pointer */ + lio_dev->pci_dev = pdev; + + /* Identify the LIO type and set device ops */ + if (lio_chip_specific_setup(lio_dev)) { + lio_dev_err(lio_dev, "Chip specific setup failed\n"); + return -1; + } + + /* Initialize soft command buffer pool */ + if (lio_setup_sc_buffer_pool(lio_dev)) { + lio_dev_err(lio_dev, "sc buffer pool allocation failed\n"); + return -1; + } + + /* Initialize lists to manage the requests of different types that + * arrive from applications for this lio device. + */ + lio_setup_response_list(lio_dev); + + if (lio_dev->fn_list.setup_mbox(lio_dev)) { + lio_dev_err(lio_dev, "Mailbox setup failed\n"); + goto error; + } + + /* Check PF response */ + lio_check_pf_hs_response((void *)lio_dev); + + /* Do handshake and exit if incompatible PF driver */ + if (cn23xx_pfvf_handshake(lio_dev)) + goto error; + + /* Request and wait for device reset. */ + if (pdev->kdrv == RTE_KDRV_IGB_UIO) { + cn23xx_vf_ask_pf_to_do_flr(lio_dev); + /* FLR wait time doubled as a precaution. */ + rte_delay_ms(LIO_PCI_FLR_WAIT * 2); + } + + if (lio_dev->fn_list.setup_device_regs(lio_dev)) { + lio_dev_err(lio_dev, "Failed to configure device registers\n"); + goto error; + } + + if (lio_setup_instr_queue0(lio_dev)) { + lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n"); + goto error; + } + + dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf; + + lio_dev->max_tx_queues = dpdk_queues; + lio_dev->max_rx_queues = dpdk_queues; + + /* Enable input and output queues for this device */ + if (lio_dev->fn_list.enable_io_queues(lio_dev)) + goto error; + + return 0; + +error: + lio_free_sc_buffer_pool(lio_dev); + if (lio_dev->mbox[0]) + lio_dev->fn_list.free_mbox(lio_dev); + if (lio_dev->instr_queue[0]) + lio_free_instr_queue0(lio_dev); + + return -1; +} + +static int +lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* lio_free_sc_buffer_pool */ + lio_free_sc_buffer_pool(lio_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + return 0; +} + +static int +lio_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + PMD_INIT_FUNC_TRACE(); + + eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; + eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; + + /* Primary does the initialization. */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pdev); + + if (pdev->mem_resource[0].addr) { + lio_dev->hw_addr = pdev->mem_resource[0].addr; + } else { + PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n"); + return -ENODEV; + } + + lio_dev->eth_dev = eth_dev; + /* set lio device print string */ + snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string), + "%s[%02x:%02x.%x]", pdev->driver->driver.name, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + + lio_dev->port_id = eth_dev->data->port_id; + + if (lio_first_time_init(lio_dev, pdev)) { + lio_dev_err(lio_dev, "Device init failed\n"); + return -EINVAL; + } + + eth_dev->dev_ops = &liovf_eth_dev_ops; + eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + lio_dev_err(lio_dev, + "MAC addresses memory allocation failed\n"); + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + return -ENOMEM; + } + + rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING); + rte_wmb(); + + lio_dev->port_configured = 0; + /* Always allow unicast packets */ + lio_dev->ifflags |= LIO_IFFLAG_UNICAST; + + return 0; +} + +static int +lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), + lio_eth_dev_init); +} + +static int +lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, + lio_eth_dev_uninit); +} + +/* Set of PCI devices this driver supports */ +static const struct rte_pci_id pci_id_liovf_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) }, + { .vendor_id = 0, /* sentinel */ } +}; + +static struct rte_pci_driver rte_liovf_pmd = { + .id_table = pci_id_liovf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = lio_eth_dev_pci_probe, + .remove = lio_eth_dev_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); + +RTE_INIT(lio_init_log) +{ + lio_logtype_init = rte_log_register("pmd.net.liquidio.init"); + if (lio_logtype_init >= 0) + rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE); + lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver"); + if (lio_logtype_driver >= 0) + rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h new file mode 100644 index 000000000..74cd2fb6c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_ethdev.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_ETHDEV_H_ +#define _LIO_ETHDEV_H_ + +#include + +#include "lio_struct.h" + +/* timeout to check link state updates from firmware in us */ +#define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */ +#define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */ + +#define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private) + +/* LIO Response condition variable */ +struct lio_dev_ctrl_cmd { + struct rte_eth_dev *eth_dev; + uint64_t cond; +}; + +enum lio_bus_speed { + LIO_LINK_SPEED_UNKNOWN = 0, + LIO_LINK_SPEED_10000 = 10000, + LIO_LINK_SPEED_25000 = 25000 +}; + +struct octeon_if_cfg_info { + uint64_t iqmask; /** mask for IQs enabled for the port */ + uint64_t oqmask; /** mask for OQs enabled for the port */ + struct octeon_link_info linfo; /** initial link information */ + char lio_firmware_version[LIO_FW_VERSION_LENGTH]; +}; + +/** Stats for each NIC port in RX direction. */ +struct octeon_rx_stats { + /* link-level stats */ + uint64_t total_rcvd; + uint64_t bytes_rcvd; + uint64_t total_bcst; + uint64_t total_mcst; + uint64_t runts; + uint64_t ctl_rcvd; + uint64_t fifo_err; /* Accounts for over/under-run of buffers */ + uint64_t dmac_drop; + uint64_t fcs_err; + uint64_t jabber_err; + uint64_t l2_err; + uint64_t frame_err; + + /* firmware stats */ + uint64_t fw_total_rcvd; + uint64_t fw_total_fwd; + uint64_t fw_total_fwd_bytes; + uint64_t fw_err_pko; + uint64_t fw_err_link; + uint64_t fw_err_drop; + uint64_t fw_rx_vxlan; + uint64_t fw_rx_vxlan_err; + + /* LRO */ + uint64_t fw_lro_pkts; /* Number of packets that are LROed */ + uint64_t fw_lro_octs; /* Number of octets that are LROed */ + uint64_t fw_total_lro; /* Number of LRO packets formed */ + uint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */ + uint64_t fw_lro_aborts_port; + uint64_t fw_lro_aborts_seq; + uint64_t fw_lro_aborts_tsval; + uint64_t fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + uint64_t fwd_rate; +}; + +/** Stats for each NIC port in RX direction. */ +struct octeon_tx_stats { + /* link-level stats */ + uint64_t total_pkts_sent; + uint64_t total_bytes_sent; + uint64_t mcast_pkts_sent; + uint64_t bcast_pkts_sent; + uint64_t ctl_sent; + uint64_t one_collision_sent; /* Packets sent after one collision */ + /* Packets sent after multiple collision */ + uint64_t multi_collision_sent; + /* Packets not sent due to max collisions */ + uint64_t max_collision_fail; + /* Packets not sent due to max deferrals */ + uint64_t max_deferral_fail; + /* Accounts for over/under-run of buffers */ + uint64_t fifo_err; + uint64_t runts; + uint64_t total_collisions; /* Total number of collisions detected */ + + /* firmware stats */ + uint64_t fw_total_sent; + uint64_t fw_total_fwd; + uint64_t fw_total_fwd_bytes; + uint64_t fw_err_pko; + uint64_t fw_err_link; + uint64_t fw_err_drop; + uint64_t fw_err_tso; + uint64_t fw_tso; /* number of tso requests */ + uint64_t fw_tso_fwd; /* number of packets segmented in tso */ + uint64_t fw_tx_vxlan; +}; + +struct octeon_link_stats { + struct octeon_rx_stats fromwire; + struct octeon_tx_stats fromhost; +}; + +union lio_if_cfg { + uint64_t if_cfg64; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t base_queue : 16; + uint64_t num_iqueues : 16; + uint64_t num_oqueues : 16; + uint64_t gmx_port_id : 8; + uint64_t vf_id : 8; +#else + uint64_t vf_id : 8; + uint64_t gmx_port_id : 8; + uint64_t num_oqueues : 16; + uint64_t num_iqueues : 16; + uint64_t base_queue : 16; +#endif + } s; +}; + +struct lio_if_cfg_resp { + uint64_t rh; + struct octeon_if_cfg_info cfg_info; + uint64_t status; +}; + +struct lio_link_stats_resp { + uint64_t rh; + struct octeon_link_stats link_stats; + uint64_t status; +}; + +struct lio_link_status_resp { + uint64_t rh; + struct octeon_link_info link_info; + uint64_t status; +}; + +struct lio_rss_set { + struct param { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint64_t flags : 16; + uint64_t hashinfo : 32; + uint64_t itablesize : 16; + uint64_t hashkeysize : 16; + uint64_t reserved : 48; +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t itablesize : 16; + uint64_t hashinfo : 32; + uint64_t flags : 16; + uint64_t reserved : 48; + uint64_t hashkeysize : 16; +#endif + } param; + + uint8_t itable[LIO_RSS_MAX_TABLE_SZ]; + uint8_t key[LIO_RSS_MAX_KEY_SZ]; +}; + +void lio_dev_rx_queue_release(void *rxq); + +void lio_dev_tx_queue_release(void *txq); + +#endif /* _LIO_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h b/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h new file mode 100644 index 000000000..f22782708 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_logs.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_LOGS_H_ +#define _LIO_LOGS_H_ + +extern int lio_logtype_driver; +#define lio_dev_printf(lio_dev, level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, lio_logtype_driver, \ + "%s" fmt, (lio_dev)->dev_string, ##args) + +#define lio_dev_info(lio_dev, fmt, args...) \ + lio_dev_printf(lio_dev, INFO, "INFO: " fmt, ##args) + +#define lio_dev_err(lio_dev, fmt, args...) \ + lio_dev_printf(lio_dev, ERR, "ERROR: %s() " fmt, __func__, ##args) + +extern int lio_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, lio_logtype_init, \ + fmt, ## args) + +/* Enable these through config options */ +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, "%s() >>\n", __func__) + +#define lio_dev_dbg(lio_dev, fmt, args...) \ + lio_dev_printf(lio_dev, DEBUG, "DEBUG: %s() " fmt, __func__, ##args) + +#ifdef RTE_LIBRTE_LIO_DEBUG_RX +#define PMD_RX_LOG(lio_dev, level, fmt, args...) \ + lio_dev_printf(lio_dev, level, "RX: %s() " fmt, __func__, ##args) +#else /* !RTE_LIBRTE_LIO_DEBUG_RX */ +#define PMD_RX_LOG(lio_dev, level, fmt, args...) do { } while (0) +#endif /* RTE_LIBRTE_LIO_DEBUG_RX */ + +#ifdef RTE_LIBRTE_LIO_DEBUG_TX +#define PMD_TX_LOG(lio_dev, level, fmt, args...) \ + lio_dev_printf(lio_dev, level, "TX: %s() " fmt, __func__, ##args) +#else /* !RTE_LIBRTE_LIO_DEBUG_TX */ +#define PMD_TX_LOG(lio_dev, level, fmt, args...) do { } while (0) +#endif /* RTE_LIBRTE_LIO_DEBUG_TX */ + +#ifdef RTE_LIBRTE_LIO_DEBUG_MBOX +#define PMD_MBOX_LOG(lio_dev, level, fmt, args...) \ + lio_dev_printf(lio_dev, level, "MBOX: %s() " fmt, __func__, ##args) +#else /* !RTE_LIBRTE_LIO_DEBUG_MBOX */ +#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0) +#endif /* RTE_LIBRTE_LIO_DEBUG_MBOX */ + +#ifdef RTE_LIBRTE_LIO_DEBUG_REGS +#define PMD_REGS_LOG(lio_dev, fmt, args...) \ + lio_dev_printf(lio_dev, DEBUG, "REGS: " fmt, ##args) +#else /* !RTE_LIBRTE_LIO_DEBUG_REGS */ +#define PMD_REGS_LOG(level, fmt, args...) do { } while (0) +#endif /* RTE_LIBRTE_LIO_DEBUG_REGS */ + +#endif /* _LIO_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c new file mode 100644 index 000000000..8d705bfe7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.c @@ -0,0 +1,1806 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include + +#include "lio_logs.h" +#include "lio_struct.h" +#include "lio_ethdev.h" +#include "lio_rxtx.h" + +#define LIO_MAX_SG 12 +/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */ +#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2) +#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL + +static void +lio_droq_compute_max_packet_bufs(struct lio_droq *droq) +{ + uint32_t count = 0; + + do { + count += droq->buffer_size; + } while (count < LIO_MAX_RX_PKTLEN); +} + +static void +lio_droq_reset_indices(struct lio_droq *droq) +{ + droq->read_idx = 0; + droq->write_idx = 0; + droq->refill_idx = 0; + droq->refill_count = 0; + rte_atomic64_set(&droq->pkts_pending, 0); +} + +static void +lio_droq_destroy_ring_buffers(struct lio_droq *droq) +{ + uint32_t i; + + for (i = 0; i < droq->nb_desc; i++) { + if (droq->recv_buf_list[i].buffer) { + rte_pktmbuf_free((struct rte_mbuf *) + droq->recv_buf_list[i].buffer); + droq->recv_buf_list[i].buffer = NULL; + } + } + + lio_droq_reset_indices(droq); +} + +static int +lio_droq_setup_ring_buffers(struct lio_device *lio_dev, + struct lio_droq *droq) +{ + struct lio_droq_desc *desc_ring = droq->desc_ring; + uint32_t i; + void *buf; + + for (i = 0; i < droq->nb_desc; i++) { + buf = rte_pktmbuf_alloc(droq->mpool); + if (buf == NULL) { + lio_dev_err(lio_dev, "buffer alloc failed\n"); + droq->stats.rx_alloc_failure++; + lio_droq_destroy_ring_buffers(droq); + return -ENOMEM; + } + + droq->recv_buf_list[i].buffer = buf; + droq->info_list[i].length = 0; + + /* map ring buffers into memory */ + desc_ring[i].info_ptr = lio_map_ring_info(droq, i); + desc_ring[i].buffer_ptr = + lio_map_ring(droq->recv_buf_list[i].buffer); + } + + lio_droq_reset_indices(droq); + + lio_droq_compute_max_packet_bufs(droq); + + return 0; +} + +static void +lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz) +{ + const struct rte_memzone *mz_tmp; + int ret = 0; + + if (mz == NULL) { + lio_dev_err(lio_dev, "Memzone NULL\n"); + return; + } + + mz_tmp = rte_memzone_lookup(mz->name); + if (mz_tmp == NULL) { + lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name); + return; + } + + ret = rte_memzone_free(mz); + if (ret) + lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret); +} + +/** + * Frees the space for descriptor ring for the droq. + * + * @param lio_dev - pointer to the lio device structure + * @param q_no - droq no. + */ +static void +lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no) +{ + struct lio_droq *droq = lio_dev->droq[q_no]; + + lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no); + + lio_droq_destroy_ring_buffers(droq); + rte_free(droq->recv_buf_list); + droq->recv_buf_list = NULL; + lio_dma_zone_free(lio_dev, droq->info_mz); + lio_dma_zone_free(lio_dev, droq->desc_ring_mz); + + memset(droq, 0, LIO_DROQ_SIZE); +} + +static void * +lio_alloc_info_buffer(struct lio_device *lio_dev, + struct lio_droq *droq, unsigned int socket_id) +{ + droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev, + "info_list", droq->q_no, + (droq->nb_desc * + LIO_DROQ_INFO_SIZE), + RTE_CACHE_LINE_SIZE, + socket_id); + + if (droq->info_mz == NULL) + return NULL; + + droq->info_list_dma = droq->info_mz->iova; + droq->info_alloc_size = droq->info_mz->len; + droq->info_base_addr = (size_t)droq->info_mz->addr; + + return droq->info_mz->addr; +} + +/** + * Allocates space for the descriptor ring for the droq and + * sets the base addr, num desc etc in Octeon registers. + * + * @param lio_dev - pointer to the lio device structure + * @param q_no - droq no. + * @param app_ctx - pointer to application context + * @return Success: 0 Failure: -1 + */ +static int +lio_init_droq(struct lio_device *lio_dev, uint32_t q_no, + uint32_t num_descs, uint32_t desc_size, + struct rte_mempool *mpool, unsigned int socket_id) +{ + uint32_t c_refill_threshold; + uint32_t desc_ring_size; + struct lio_droq *droq; + + lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no); + + droq = lio_dev->droq[q_no]; + droq->lio_dev = lio_dev; + droq->q_no = q_no; + droq->mpool = mpool; + + c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev); + + droq->nb_desc = num_descs; + droq->buffer_size = desc_size; + + desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE; + droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev, + "droq", q_no, + desc_ring_size, + RTE_CACHE_LINE_SIZE, + socket_id); + + if (droq->desc_ring_mz == NULL) { + lio_dev_err(lio_dev, + "Output queue %d ring alloc failed\n", q_no); + return -1; + } + + droq->desc_ring_dma = droq->desc_ring_mz->iova; + droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr; + + lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", + q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma); + lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no, + droq->nb_desc); + + droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id); + if (droq->info_list == NULL) { + lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n"); + goto init_droq_fail; + } + + droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list", + (droq->nb_desc * + LIO_DROQ_RECVBUF_SIZE), + RTE_CACHE_LINE_SIZE, + socket_id); + if (droq->recv_buf_list == NULL) { + lio_dev_err(lio_dev, + "Output queue recv buf list alloc failed\n"); + goto init_droq_fail; + } + + if (lio_droq_setup_ring_buffers(lio_dev, droq)) + goto init_droq_fail; + + droq->refill_threshold = c_refill_threshold; + + rte_spinlock_init(&droq->lock); + + lio_dev->fn_list.setup_oq_regs(lio_dev, q_no); + + lio_dev->io_qmask.oq |= (1ULL << q_no); + + return 0; + +init_droq_fail: + lio_delete_droq(lio_dev, q_no); + + return -1; +} + +int +lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs, + int desc_size, struct rte_mempool *mpool, unsigned int socket_id) +{ + struct lio_droq *droq; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate the DS for the new droq. */ + droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq), + RTE_CACHE_LINE_SIZE, socket_id); + if (droq == NULL) + return -ENOMEM; + + lio_dev->droq[oq_no] = droq; + + /* Initialize the Droq */ + if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool, + socket_id)) { + lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no); + rte_free(lio_dev->droq[oq_no]); + lio_dev->droq[oq_no] = NULL; + return -ENOMEM; + } + + lio_dev->num_oqs++; + + lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs); + + /* Send credit for octeon output queues. credits are always + * sent after the output queue is enabled. + */ + rte_write32(lio_dev->droq[oq_no]->nb_desc, + lio_dev->droq[oq_no]->pkts_credit_reg); + rte_wmb(); + + return 0; +} + +static inline uint32_t +lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len) +{ + uint32_t buf_cnt = 0; + + while (total_len > (buf_size * buf_cnt)) + buf_cnt++; + + return buf_cnt; +} + +/* If we were not able to refill all buffers, try to move around + * the buffers that were not dispatched. + */ +static inline uint32_t +lio_droq_refill_pullup_descs(struct lio_droq *droq, + struct lio_droq_desc *desc_ring) +{ + uint32_t refill_index = droq->refill_idx; + uint32_t desc_refilled = 0; + + while (refill_index != droq->read_idx) { + if (droq->recv_buf_list[refill_index].buffer) { + droq->recv_buf_list[droq->refill_idx].buffer = + droq->recv_buf_list[refill_index].buffer; + desc_ring[droq->refill_idx].buffer_ptr = + desc_ring[refill_index].buffer_ptr; + droq->recv_buf_list[refill_index].buffer = NULL; + desc_ring[refill_index].buffer_ptr = 0; + do { + droq->refill_idx = lio_incr_index( + droq->refill_idx, 1, + droq->nb_desc); + desc_refilled++; + droq->refill_count--; + } while (droq->recv_buf_list[droq->refill_idx].buffer); + } + refill_index = lio_incr_index(refill_index, 1, + droq->nb_desc); + } /* while */ + + return desc_refilled; +} + +/* lio_droq_refill + * + * @param droq - droq in which descriptors require new buffers. + * + * Description: + * Called during normal DROQ processing in interrupt mode or by the poll + * thread to refill the descriptors from which buffers were dispatched + * to upper layers. Attempts to allocate new buffers. If that fails, moves + * up buffers (that were not dispatched) to form a contiguous ring. + * + * Returns: + * No of descriptors refilled. + * + * Locks: + * This routine is called with droq->lock held. + */ +static uint32_t +lio_droq_refill(struct lio_droq *droq) +{ + struct lio_droq_desc *desc_ring; + uint32_t desc_refilled = 0; + void *buf = NULL; + + desc_ring = droq->desc_ring; + + while (droq->refill_count && (desc_refilled < droq->nb_desc)) { + /* If a valid buffer exists (happens if there is no dispatch), + * reuse the buffer, else allocate. + */ + if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) { + buf = rte_pktmbuf_alloc(droq->mpool); + /* If a buffer could not be allocated, no point in + * continuing + */ + if (buf == NULL) { + droq->stats.rx_alloc_failure++; + break; + } + + droq->recv_buf_list[droq->refill_idx].buffer = buf; + } + + desc_ring[droq->refill_idx].buffer_ptr = + lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer); + /* Reset any previous values in the length field. */ + droq->info_list[droq->refill_idx].length = 0; + + droq->refill_idx = lio_incr_index(droq->refill_idx, 1, + droq->nb_desc); + desc_refilled++; + droq->refill_count--; + } + + if (droq->refill_count) + desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring); + + /* if droq->refill_count + * The refill count would not change in pass two. We only moved buffers + * to close the gap in the ring, but we would still have the same no. of + * buffers to refill. + */ + return desc_refilled; +} + +static int +lio_droq_fast_process_packet(struct lio_device *lio_dev, + struct lio_droq *droq, + struct rte_mbuf **rx_pkts) +{ + struct rte_mbuf *nicbuf = NULL; + struct lio_droq_info *info; + uint32_t total_len = 0; + int data_total_len = 0; + uint32_t pkt_len = 0; + union octeon_rh *rh; + int data_pkts = 0; + + info = &droq->info_list[droq->read_idx]; + lio_swap_8B_data((uint64_t *)info, 2); + + if (!info->length) + return -1; + + /* Len of resp hdr in included in the received data len. */ + info->length -= OCTEON_RH_SIZE; + rh = &info->rh; + + total_len += (uint32_t)info->length; + + if (lio_opcode_slow_path(rh)) { + uint32_t buf_cnt; + + buf_cnt = lio_droq_get_bufcount(droq->buffer_size, + (uint32_t)info->length); + droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt, + droq->nb_desc); + droq->refill_count += buf_cnt; + } else { + if (info->length <= droq->buffer_size) { + if (rh->r_dh.has_hash) + pkt_len = (uint32_t)(info->length - 8); + else + pkt_len = (uint32_t)info->length; + + nicbuf = droq->recv_buf_list[droq->read_idx].buffer; + droq->recv_buf_list[droq->read_idx].buffer = NULL; + droq->read_idx = lio_incr_index( + droq->read_idx, 1, + droq->nb_desc); + droq->refill_count++; + + if (likely(nicbuf != NULL)) { + /* We don't have a way to pass flags yet */ + nicbuf->ol_flags = 0; + if (rh->r_dh.has_hash) { + uint64_t *hash_ptr; + + nicbuf->ol_flags |= PKT_RX_RSS_HASH; + hash_ptr = rte_pktmbuf_mtod(nicbuf, + uint64_t *); + lio_swap_8B_data(hash_ptr, 1); + nicbuf->hash.rss = (uint32_t)*hash_ptr; + nicbuf->data_off += 8; + } + + nicbuf->pkt_len = pkt_len; + nicbuf->data_len = pkt_len; + nicbuf->port = lio_dev->port_id; + /* Store the mbuf */ + rx_pkts[data_pkts++] = nicbuf; + data_total_len += pkt_len; + } + + /* Prefetch buffer pointers when on a cache line + * boundary + */ + if ((droq->read_idx & 3) == 0) { + rte_prefetch0( + &droq->recv_buf_list[droq->read_idx]); + rte_prefetch0( + &droq->info_list[droq->read_idx]); + } + } else { + struct rte_mbuf *first_buf = NULL; + struct rte_mbuf *last_buf = NULL; + + while (pkt_len < info->length) { + int cpy_len = 0; + + cpy_len = ((pkt_len + droq->buffer_size) > + info->length) + ? ((uint32_t)info->length - + pkt_len) + : droq->buffer_size; + + nicbuf = + droq->recv_buf_list[droq->read_idx].buffer; + droq->recv_buf_list[droq->read_idx].buffer = + NULL; + + if (likely(nicbuf != NULL)) { + /* Note the first seg */ + if (!pkt_len) + first_buf = nicbuf; + + nicbuf->port = lio_dev->port_id; + /* We don't have a way to pass + * flags yet + */ + nicbuf->ol_flags = 0; + if ((!pkt_len) && (rh->r_dh.has_hash)) { + uint64_t *hash_ptr; + + nicbuf->ol_flags |= + PKT_RX_RSS_HASH; + hash_ptr = rte_pktmbuf_mtod( + nicbuf, uint64_t *); + lio_swap_8B_data(hash_ptr, 1); + nicbuf->hash.rss = + (uint32_t)*hash_ptr; + nicbuf->data_off += 8; + nicbuf->pkt_len = cpy_len - 8; + nicbuf->data_len = cpy_len - 8; + } else { + nicbuf->pkt_len = cpy_len; + nicbuf->data_len = cpy_len; + } + + if (pkt_len) + first_buf->nb_segs++; + + if (last_buf) + last_buf->next = nicbuf; + + last_buf = nicbuf; + } else { + PMD_RX_LOG(lio_dev, ERR, "no buf\n"); + } + + pkt_len += cpy_len; + droq->read_idx = lio_incr_index( + droq->read_idx, + 1, droq->nb_desc); + droq->refill_count++; + + /* Prefetch buffer pointers when on a + * cache line boundary + */ + if ((droq->read_idx & 3) == 0) { + rte_prefetch0(&droq->recv_buf_list + [droq->read_idx]); + + rte_prefetch0( + &droq->info_list[droq->read_idx]); + } + } + rx_pkts[data_pkts++] = first_buf; + if (rh->r_dh.has_hash) + data_total_len += (pkt_len - 8); + else + data_total_len += pkt_len; + } + + /* Inform upper layer about packet checksum verification */ + struct rte_mbuf *m = rx_pkts[data_pkts - 1]; + + if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED) + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED) + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + if (droq->refill_count >= droq->refill_threshold) { + int desc_refilled = lio_droq_refill(droq); + + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory is + * accurate. + */ + rte_wmb(); + rte_write32(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + rte_wmb(); + } + + info->length = 0; + info->rh.rh64 = 0; + + droq->stats.pkts_received++; + droq->stats.rx_pkts_received += data_pkts; + droq->stats.rx_bytes_received += data_total_len; + droq->stats.bytes_received += total_len; + + return data_pkts; +} + +static uint32_t +lio_droq_fast_process_packets(struct lio_device *lio_dev, + struct lio_droq *droq, + struct rte_mbuf **rx_pkts, + uint32_t pkts_to_process) +{ + int ret, data_pkts = 0; + uint32_t pkt; + + for (pkt = 0; pkt < pkts_to_process; pkt++) { + ret = lio_droq_fast_process_packet(lio_dev, droq, + &rx_pkts[data_pkts]); + if (ret < 0) { + lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", + lio_dev->port_id, droq->q_no, + droq->read_idx, pkts_to_process); + break; + } + data_pkts += ret; + } + + rte_atomic64_sub(&droq->pkts_pending, pkt); + + return data_pkts; +} + +static inline uint32_t +lio_droq_check_hw_for_pkts(struct lio_droq *droq) +{ + uint32_t last_count; + uint32_t pkt_count; + + pkt_count = rte_read32(droq->pkts_sent_reg); + + last_count = pkt_count - droq->pkt_count; + droq->pkt_count = pkt_count; + + if (last_count) + rte_atomic64_add(&droq->pkts_pending, last_count); + + return last_count; +} + +uint16_t +lio_dev_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t budget) +{ + struct lio_droq *droq = rx_queue; + struct lio_device *lio_dev = droq->lio_dev; + uint32_t pkts_processed = 0; + uint32_t pkt_count = 0; + + lio_droq_check_hw_for_pkts(droq); + + pkt_count = rte_atomic64_read(&droq->pkts_pending); + if (!pkt_count) + return 0; + + if (pkt_count > budget) + pkt_count = budget; + + /* Grab the lock */ + rte_spinlock_lock(&droq->lock); + pkts_processed = lio_droq_fast_process_packets(lio_dev, + droq, rx_pkts, + pkt_count); + + if (droq->pkt_count) { + rte_write32(droq->pkt_count, droq->pkts_sent_reg); + droq->pkt_count = 0; + } + + /* Release the spin lock */ + rte_spinlock_unlock(&droq->lock); + + return pkts_processed; +} + +void +lio_delete_droq_queue(struct lio_device *lio_dev, + int oq_no) +{ + lio_delete_droq(lio_dev, oq_no); + lio_dev->num_oqs--; + rte_free(lio_dev->droq[oq_no]); + lio_dev->droq[oq_no] = NULL; +} + +/** + * lio_init_instr_queue() + * @param lio_dev - pointer to the lio device structure. + * @param txpciq - queue to be initialized. + * + * Called at driver init time for each input queue. iq_conf has the + * configuration parameters for the queue. + * + * @return Success: 0 Failure: -1 + */ +static int +lio_init_instr_queue(struct lio_device *lio_dev, + union octeon_txpciq txpciq, + uint32_t num_descs, unsigned int socket_id) +{ + uint32_t iq_no = (uint32_t)txpciq.s.q_no; + struct lio_instr_queue *iq; + uint32_t instr_type; + uint32_t q_size; + + instr_type = LIO_IQ_INSTR_TYPE(lio_dev); + + q_size = instr_type * num_descs; + iq = lio_dev->instr_queue[iq_no]; + iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev, + "instr_queue", iq_no, q_size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (iq->iq_mz == NULL) { + lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n", + iq_no); + return -1; + } + + iq->base_addr_dma = iq->iq_mz->iova; + iq->base_addr = (uint8_t *)iq->iq_mz->addr; + + iq->nb_desc = num_descs; + + /* Initialize a list to holds requests that have been posted to Octeon + * but has yet to be fetched by octeon + */ + iq->request_list = rte_zmalloc_socket("request_list", + sizeof(*iq->request_list) * + num_descs, + RTE_CACHE_LINE_SIZE, + socket_id); + if (iq->request_list == NULL) { + lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n", + iq_no); + lio_dma_zone_free(lio_dev, iq->iq_mz); + return -1; + } + + lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n", + iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma, + iq->nb_desc); + + iq->lio_dev = lio_dev; + iq->txpciq.txpciq64 = txpciq.txpciq64; + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->lio_read_index = 0; + iq->flush_index = 0; + + rte_atomic64_set(&iq->instr_pending, 0); + + /* Initialize the spinlock for this instruction queue */ + rte_spinlock_init(&iq->lock); + rte_spinlock_init(&iq->post_lock); + + rte_atomic64_clear(&iq->iq_flush_running); + + lio_dev->io_qmask.iq |= (1ULL << iq_no); + + /* Set the 32B/64B mode for each input queue */ + lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no); + iq->iqcmd_64B = (instr_type == 64); + + lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no); + + return 0; +} + +int +lio_setup_instr_queue0(struct lio_device *lio_dev) +{ + union octeon_txpciq txpciq; + uint32_t num_descs = 0; + uint32_t iq_no = 0; + + num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev); + + lio_dev->num_iqs = 0; + + lio_dev->instr_queue[0] = rte_zmalloc(NULL, + sizeof(struct lio_instr_queue), 0); + if (lio_dev->instr_queue[0] == NULL) + return -ENOMEM; + + lio_dev->instr_queue[0]->q_index = 0; + lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0; + txpciq.txpciq64 = 0; + txpciq.s.q_no = iq_no; + txpciq.s.pkind = lio_dev->pfvf_hsword.pkind; + txpciq.s.use_qpg = 0; + txpciq.s.qpg = 0; + if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) { + rte_free(lio_dev->instr_queue[0]); + lio_dev->instr_queue[0] = NULL; + return -1; + } + + lio_dev->num_iqs++; + + return 0; +} + +/** + * lio_delete_instr_queue() + * @param lio_dev - pointer to the lio device structure. + * @param iq_no - queue to be deleted. + * + * Called at driver unload time for each input queue. Deletes all + * allocated resources for the input queue. + */ +static void +lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no) +{ + struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; + + rte_free(iq->request_list); + iq->request_list = NULL; + lio_dma_zone_free(lio_dev, iq->iq_mz); +} + +void +lio_free_instr_queue0(struct lio_device *lio_dev) +{ + lio_delete_instr_queue(lio_dev, 0); + rte_free(lio_dev->instr_queue[0]); + lio_dev->instr_queue[0] = NULL; + lio_dev->num_iqs--; +} + +/* Return 0 on success, -1 on failure */ +int +lio_setup_iq(struct lio_device *lio_dev, int q_index, + union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx, + unsigned int socket_id) +{ + uint32_t iq_no = (uint32_t)txpciq.s.q_no; + + lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue", + sizeof(struct lio_instr_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (lio_dev->instr_queue[iq_no] == NULL) + return -1; + + lio_dev->instr_queue[iq_no]->q_index = q_index; + lio_dev->instr_queue[iq_no]->app_ctx = app_ctx; + + if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) { + rte_free(lio_dev->instr_queue[iq_no]); + lio_dev->instr_queue[iq_no] = NULL; + return -1; + } + + lio_dev->num_iqs++; + + return 0; +} + +int +lio_wait_for_instr_fetch(struct lio_device *lio_dev) +{ + int pending, instr_cnt; + int i, retry = 1000; + + do { + instr_cnt = 0; + + for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) { + if (!(lio_dev->io_qmask.iq & (1ULL << i))) + continue; + + if (lio_dev->instr_queue[i] == NULL) + break; + + pending = rte_atomic64_read( + &lio_dev->instr_queue[i]->instr_pending); + if (pending) + lio_flush_iq(lio_dev, lio_dev->instr_queue[i]); + + instr_cnt += pending; + } + + if (instr_cnt == 0) + break; + + rte_delay_ms(1); + + } while (retry-- && instr_cnt); + + return instr_cnt; +} + +static inline void +lio_ring_doorbell(struct lio_device *lio_dev, + struct lio_instr_queue *iq) +{ + if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) { + rte_write32(iq->fill_cnt, iq->doorbell_reg); + /* make sure doorbell write goes through */ + rte_wmb(); + iq->fill_cnt = 0; + } +} + +static inline void +copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd) +{ + uint8_t *iqptr, cmdsize; + + cmdsize = ((iq->iqcmd_64B) ? 64 : 32); + iqptr = iq->base_addr + (cmdsize * iq->host_write_index); + + rte_memcpy(iqptr, cmd, cmdsize); +} + +static inline struct lio_iq_post_status +post_command2(struct lio_instr_queue *iq, uint8_t *cmd) +{ + struct lio_iq_post_status st; + + st.status = LIO_IQ_SEND_OK; + + /* This ensures that the read index does not wrap around to the same + * position if queue gets full before Octeon could fetch any instr. + */ + if (rte_atomic64_read(&iq->instr_pending) >= + (int32_t)(iq->nb_desc - 1)) { + st.status = LIO_IQ_SEND_FAILED; + st.index = -1; + return st; + } + + if (rte_atomic64_read(&iq->instr_pending) >= + (int32_t)(iq->nb_desc - 2)) + st.status = LIO_IQ_SEND_STOP; + + copy_cmd_into_iq(iq, cmd); + + /* "index" is returned, host_write_index is modified. */ + st.index = iq->host_write_index; + iq->host_write_index = lio_incr_index(iq->host_write_index, 1, + iq->nb_desc); + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data is in + * memory before indicating that the instruction is pending. + */ + rte_wmb(); + + rte_atomic64_inc(&iq->instr_pending); + + return st; +} + +static inline void +lio_add_to_request_list(struct lio_instr_queue *iq, + int idx, void *buf, int reqtype) +{ + iq->request_list[idx].buf = buf; + iq->request_list[idx].reqtype = reqtype; +} + +static inline void +lio_free_netsgbuf(void *buf) +{ + struct lio_buf_free_info *finfo = buf; + struct lio_device *lio_dev = finfo->lio_dev; + struct rte_mbuf *m = finfo->mbuf; + struct lio_gather *g = finfo->g; + uint8_t iq = finfo->iq_no; + + /* This will take care of multiple segments also */ + rte_pktmbuf_free(m); + + rte_spinlock_lock(&lio_dev->glist_lock[iq]); + STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries); + rte_spinlock_unlock(&lio_dev->glist_lock[iq]); + rte_free(finfo); +} + +/* Can only run in process context */ +static int +lio_process_iq_request_list(struct lio_device *lio_dev, + struct lio_instr_queue *iq) +{ + struct octeon_instr_irh *irh = NULL; + uint32_t old = iq->flush_index; + struct lio_soft_command *sc; + uint32_t inst_count = 0; + int reqtype; + void *buf; + + while (old != iq->lio_read_index) { + reqtype = iq->request_list[old].reqtype; + buf = iq->request_list[old].buf; + + if (reqtype == LIO_REQTYPE_NONE) + goto skip_this; + + switch (reqtype) { + case LIO_REQTYPE_NORESP_NET: + rte_pktmbuf_free((struct rte_mbuf *)buf); + break; + case LIO_REQTYPE_NORESP_NET_SG: + lio_free_netsgbuf(buf); + break; + case LIO_REQTYPE_SOFT_COMMAND: + sc = buf; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + if (irh->rflag) { + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + rte_spinlock_lock(&lio_dev->response_list.lock); + rte_atomic64_inc( + &lio_dev->response_list.pending_req_count); + STAILQ_INSERT_TAIL( + &lio_dev->response_list.head, + &sc->node, entries); + rte_spinlock_unlock( + &lio_dev->response_list.lock); + } else { + if (sc->callback) { + /* This callback must not sleep */ + sc->callback(LIO_REQUEST_DONE, + sc->callback_arg); + } + } + break; + default: + lio_dev_err(lio_dev, + "Unknown reqtype: %d buf: %p at idx %d\n", + reqtype, buf, old); + } + + iq->request_list[old].buf = NULL; + iq->request_list[old].reqtype = 0; + +skip_this: + inst_count++; + old = lio_incr_index(old, 1, iq->nb_desc); + } + + iq->flush_index = old; + + return inst_count; +} + +static void +lio_update_read_index(struct lio_instr_queue *iq) +{ + uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg); + uint32_t last_done; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + /* Add last_done and modulo with the IQ size to get new index */ + iq->lio_read_index = (iq->lio_read_index + + (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) % + iq->nb_desc; +} + +int +lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq) +{ + uint32_t tot_inst_processed = 0; + uint32_t inst_processed = 0; + int tx_done = 1; + + if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0) + return tx_done; + + rte_spinlock_lock(&iq->lock); + + lio_update_read_index(iq); + + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->lio_read_index) + break; + + inst_processed = lio_process_iq_request_list(lio_dev, iq); + + if (inst_processed) { + rte_atomic64_sub(&iq->instr_pending, inst_processed); + iq->stats.instr_processed += inst_processed; + } + + tot_inst_processed += inst_processed; + inst_processed = 0; + + } while (1); + + rte_spinlock_unlock(&iq->lock); + + rte_atomic64_clear(&iq->iq_flush_running); + + return tx_done; +} + +static int +lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd, + void *buf, uint32_t datasize, uint32_t reqtype) +{ + struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; + struct lio_iq_post_status st; + + rte_spinlock_lock(&iq->post_lock); + + st = post_command2(iq, cmd); + + if (st.status != LIO_IQ_SEND_FAILED) { + lio_add_to_request_list(iq, st.index, buf, reqtype); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent, + datasize); + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1); + + lio_ring_doorbell(lio_dev, iq); + } else { + LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1); + } + + rte_spinlock_unlock(&iq->post_lock); + + return st.status; +} + +void +lio_prepare_soft_command(struct lio_device *lio_dev, + struct lio_soft_command *sc, uint8_t opcode, + uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0, + uint64_t ossp1) +{ + struct octeon_instr_pki_ih3 *pki_ih3; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + RTE_ASSERT(opcode <= 15); + RTE_ASSERT(subcode <= 127); + + ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; + + ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind; + + pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; + + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg; + pki_ih3->utt = 1; + + pki_ih3->tag = LIO_CONTROL; + pki_ih3->tagtype = OCTEON_ATOMIC_TAG; + pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x7; + pki_ih3->sl = 8; + + if (sc->datasize) + ih3->dlengsz = sc->datasize; + + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + irh->opcode = opcode; + irh->subcode = subcode; + + /* opcode/subcode specific parameters (ossp) */ + irh->ossp = irh_ossp; + sc->cmd.cmd3.ossp[0] = ossp0; + sc->cmd.cmd3.ossp[1] = ossp1; + + if (sc->rdatasize) { + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; + rdp->pcie_port = lio_dev->pcie_port; + rdp->rlen = sc->rdatasize; + irh->rflag = 1; + /* PKI IH3 */ + ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3; + } else { + irh->rflag = 0; + /* PKI IH3 */ + ih3->fsz = OCTEON_PCI_CMD_O3; + } +} + +int +lio_send_soft_command(struct lio_device *lio_dev, + struct lio_soft_command *sc) +{ + struct octeon_instr_ih3 *ih3; + struct octeon_instr_irh *irh; + uint32_t len = 0; + + ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; + if (ih3->dlengsz) { + RTE_ASSERT(sc->dmadptr); + sc->cmd.cmd3.dptr = sc->dmadptr; + } + + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + if (irh->rflag) { + RTE_ASSERT(sc->dmarptr); + RTE_ASSERT(sc->status_word != NULL); + *sc->status_word = LIO_COMPLETION_WORD_INIT; + sc->cmd.cmd3.rptr = sc->dmarptr; + } + + len = (uint32_t)ih3->dlengsz; + + if (sc->wait_time) + sc->timeout = lio_uptime + sc->wait_time; + + return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len, + LIO_REQTYPE_SOFT_COMMAND); +} + +int +lio_setup_sc_buffer_pool(struct lio_device *lio_dev) +{ + char sc_pool_name[RTE_MEMPOOL_NAMESIZE]; + uint16_t buf_size; + + buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM; + snprintf(sc_pool_name, sizeof(sc_pool_name), + "lio_sc_pool_%u", lio_dev->port_id); + lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name, + LIO_MAX_SOFT_COMMAND_BUFFERS, + 0, 0, buf_size, SOCKET_ID_ANY); + return 0; +} + +void +lio_free_sc_buffer_pool(struct lio_device *lio_dev) +{ + rte_mempool_free(lio_dev->sc_buf_pool); +} + +struct lio_soft_command * +lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize, + uint32_t rdatasize, uint32_t ctxsize) +{ + uint32_t offset = sizeof(struct lio_soft_command); + struct lio_soft_command *sc; + struct rte_mbuf *m; + uint64_t dma_addr; + + RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <= + LIO_SOFT_COMMAND_BUFFER_SIZE); + + m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool); + if (m == NULL) { + lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n"); + return NULL; + } + + /* set rte_mbuf data size and there is only 1 segment */ + m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE; + m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE; + + /* use rte_mbuf buffer for soft command */ + sc = rte_pktmbuf_mtod(m, struct lio_soft_command *); + memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE); + sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE; + sc->dma_addr = rte_mbuf_data_iova(m); + sc->mbuf = m; + + dma_addr = sc->dma_addr; + + if (ctxsize) { + sc->ctxptr = (uint8_t *)sc + offset; + sc->ctxsize = ctxsize; + } + + /* Start data at 128 byte boundary */ + offset = (offset + ctxsize + 127) & 0xffffff80; + + if (datasize) { + sc->virtdptr = (uint8_t *)sc + offset; + sc->dmadptr = dma_addr + offset; + sc->datasize = datasize; + } + + /* Start rdata at 128 byte boundary */ + offset = (offset + datasize + 127) & 0xffffff80; + + if (rdatasize) { + RTE_ASSERT(rdatasize >= 16); + sc->virtrptr = (uint8_t *)sc + offset; + sc->dmarptr = dma_addr + offset; + sc->rdatasize = rdatasize; + sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) + + rdatasize - 8); + } + + return sc; +} + +void +lio_free_soft_command(struct lio_soft_command *sc) +{ + rte_pktmbuf_free(sc->mbuf); +} + +void +lio_setup_response_list(struct lio_device *lio_dev) +{ + STAILQ_INIT(&lio_dev->response_list.head); + rte_spinlock_init(&lio_dev->response_list.lock); + rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0); +} + +int +lio_process_ordered_list(struct lio_device *lio_dev) +{ + int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS; + struct lio_response_list *ordered_sc_list; + struct lio_soft_command *sc; + int request_complete = 0; + uint64_t status64; + uint32_t status; + + ordered_sc_list = &lio_dev->response_list; + + do { + rte_spinlock_lock(&ordered_sc_list->lock); + + if (STAILQ_EMPTY(&ordered_sc_list->head)) { + /* ordered_sc_list is empty; there is + * nothing to process + */ + rte_spinlock_unlock(&ordered_sc_list->lock); + return -1; + } + + sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head, + struct lio_soft_command, node); + + status = LIO_REQUEST_PENDING; + + /* check if octeon has finished DMA'ing a response + * to where rptr is pointing to + */ + status64 = *sc->status_word; + + if (status64 != LIO_COMPLETION_WORD_INIT) { + /* This logic ensures that all 64b have been written. + * 1. check byte 0 for non-FF + * 2. if non-FF, then swap result from BE to host order + * 3. check byte 7 (swapped to 0) for non-FF + * 4. if non-FF, use the low 32-bit status code + * 5. if either byte 0 or byte 7 is FF, don't use status + */ + if ((status64 & 0xff) != 0xff) { + lio_swap_8B_data(&status64, 1); + if (((status64 & 0xff) != 0xff)) { + /* retrieve 16-bit firmware status */ + status = (uint32_t)(status64 & + 0xffffULL); + if (status) { + status = + LIO_FIRMWARE_STATUS_CODE( + status); + } else { + /* i.e. no error */ + status = LIO_REQUEST_DONE; + } + } + } + } else if ((sc->timeout && lio_check_timeout(lio_uptime, + sc->timeout))) { + lio_dev_err(lio_dev, + "cmd failed, timeout (%ld, %ld)\n", + (long)lio_uptime, (long)sc->timeout); + status = LIO_REQUEST_TIMEOUT; + } + + if (status != LIO_REQUEST_PENDING) { + /* we have received a response or we have timed out. + * remove node from linked list + */ + STAILQ_REMOVE(&ordered_sc_list->head, + &sc->node, lio_stailq_node, entries); + rte_atomic64_dec( + &lio_dev->response_list.pending_req_count); + rte_spinlock_unlock(&ordered_sc_list->lock); + + if (sc->callback) + sc->callback(status, sc->callback_arg); + + request_complete++; + } else { + /* no response yet */ + request_complete = 0; + rte_spinlock_unlock(&ordered_sc_list->lock); + } + + /* If we hit the Max Ordered requests to process every loop, + * we quit and let this function be invoked the next time + * the poll thread runs to process the remaining requests. + * This function can take up the entire CPU if there is + * no upper limit to the requests processed. + */ + if (request_complete >= resp_to_process) + break; + } while (request_complete); + + return 0; +} + +static inline struct lio_stailq_node * +list_delete_first_node(struct lio_stailq_head *head) +{ + struct lio_stailq_node *node; + + if (STAILQ_EMPTY(head)) + node = NULL; + else + node = STAILQ_FIRST(head); + + if (node) + STAILQ_REMOVE(head, node, lio_stailq_node, entries); + + return node; +} + +void +lio_delete_sglist(struct lio_instr_queue *txq) +{ + struct lio_device *lio_dev = txq->lio_dev; + int iq_no = txq->q_index; + struct lio_gather *g; + + if (lio_dev->glist_head == NULL) + return; + + do { + g = (struct lio_gather *)list_delete_first_node( + &lio_dev->glist_head[iq_no]); + if (g) { + if (g->sg) + rte_free( + (void *)((unsigned long)g->sg - g->adjust)); + rte_free(g); + } + } while (g); +} + +/** + * \brief Setup gather lists + * @param lio per-network private data + */ +int +lio_setup_sglists(struct lio_device *lio_dev, int iq_no, + int fw_mapped_iq, int num_descs, unsigned int socket_id) +{ + struct lio_gather *g; + int i; + + rte_spinlock_init(&lio_dev->glist_lock[iq_no]); + + STAILQ_INIT(&lio_dev->glist_head[iq_no]); + + for (i = 0; i < num_descs; i++) { + g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE, + socket_id); + if (g == NULL) { + lio_dev_err(lio_dev, + "lio_gather memory allocation failed for qno %d\n", + iq_no); + break; + } + + g->sg_size = + ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE); + + g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8, + RTE_CACHE_LINE_SIZE, socket_id); + if (g->sg == NULL) { + lio_dev_err(lio_dev, + "sg list memory allocation failed for qno %d\n", + iq_no); + rte_free(g); + break; + } + + /* The gather component should be aligned on 64-bit boundary */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = + (struct lio_sg_entry *)((unsigned long)g->sg + + g->adjust); + } + + STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list, + entries); + } + + if (i != num_descs) { + lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]); + return -ENOMEM; + } + + return 0; +} + +void +lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no) +{ + lio_delete_instr_queue(lio_dev, iq_no); + rte_free(lio_dev->instr_queue[iq_no]); + lio_dev->instr_queue[iq_no] = NULL; + lio_dev->num_iqs--; +} + +static inline uint32_t +lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no) +{ + return ((lio_dev->instr_queue[q_no]->nb_desc - 1) - + (uint32_t)rte_atomic64_read( + &lio_dev->instr_queue[q_no]->instr_pending)); +} + +static inline int +lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no) +{ + return ((uint32_t)rte_atomic64_read( + &lio_dev->instr_queue[q_no]->instr_pending) >= + (lio_dev->instr_queue[q_no]->nb_desc - 2)); +} + +static int +lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no) +{ + struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; + uint32_t count = 10000; + + while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) && + --count) + lio_flush_iq(lio_dev, iq); + + return count ? 0 : 1; +} + +static void +lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr) +{ + struct lio_soft_command *sc = sc_ptr; + struct lio_dev_ctrl_cmd *ctrl_cmd; + struct lio_ctrl_pkt *ctrl_pkt; + + ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr; + ctrl_cmd = ctrl_pkt->ctrl_cmd; + ctrl_cmd->cond = 1; + + lio_free_soft_command(sc); +} + +static inline struct lio_soft_command * +lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev, + struct lio_ctrl_pkt *ctrl_pkt) +{ + struct lio_soft_command *sc = NULL; + uint32_t uddsize, datasize; + uint32_t rdatasize; + uint8_t *data; + + uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8); + + datasize = OCTEON_CMD_SIZE + uddsize; + rdatasize = (ctrl_pkt->wait_time) ? 16 : 0; + + sc = lio_alloc_soft_command(lio_dev, datasize, + rdatasize, sizeof(struct lio_ctrl_pkt)); + if (sc == NULL) + return NULL; + + rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt)); + + data = (uint8_t *)sc->virtdptr; + + rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE); + + lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3); + + if (uddsize) { + /* Endian-Swap for UDD should have been done by caller. */ + rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize); + } + + sc->iq_no = (uint32_t)ctrl_pkt->iq_no; + + lio_prepare_soft_command(lio_dev, sc, + LIO_OPCODE, LIO_OPCODE_CMD, + 0, 0, 0); + + sc->callback = lio_ctrl_cmd_callback; + sc->callback_arg = sc; + sc->wait_time = ctrl_pkt->wait_time; + + return sc; +} + +int +lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt) +{ + struct lio_soft_command *sc = NULL; + int retval; + + sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt); + if (sc == NULL) { + lio_dev_err(lio_dev, "soft command allocation failed\n"); + return -1; + } + + retval = lio_send_soft_command(lio_dev, sc); + if (retval == LIO_IQ_SEND_FAILED) { + lio_free_soft_command(sc); + lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n", + lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval); + return -1; + } + + return retval; +} + +/** Send data packet to the device + * @param lio_dev - lio device pointer + * @param ndata - control structure with queueing, and buffer information + * + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay. + */ +static inline int +lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata) +{ + return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd, + ndata->buf, ndata->datasize, ndata->reqtype); +} + +uint16_t +lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) +{ + struct lio_instr_queue *txq = tx_queue; + union lio_cmd_setup cmdsetup; + struct lio_device *lio_dev; + struct lio_iq_stats *stats; + struct lio_data_pkt ndata; + int i, processed = 0; + struct rte_mbuf *m; + uint32_t tag = 0; + int status = 0; + int iq_no; + + lio_dev = txq->lio_dev; + iq_no = txq->txpciq.s.q_no; + stats = &lio_dev->instr_queue[iq_no]->stats; + + if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) { + PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n", + lio_dev->linfo.link.s.link_up); + goto xmit_failed; + } + + lio_dev_cleanup_iq(lio_dev, iq_no); + + for (i = 0; i < nb_pkts; i++) { + uint32_t pkt_len = 0; + + m = pkts[i]; + + /* Prepare the attributes for the data to be passed to BASE. */ + memset(&ndata, 0, sizeof(struct lio_data_pkt)); + + ndata.buf = m; + + ndata.q_no = iq_no; + if (lio_iq_is_full(lio_dev, ndata.q_no)) { + stats->tx_iq_busy++; + if (lio_dev_cleanup_iq(lio_dev, iq_no)) { + PMD_TX_LOG(lio_dev, ERR, + "Transmit failed iq:%d full\n", + ndata.q_no); + break; + } + } + + cmdsetup.cmd_setup64 = 0; + cmdsetup.s.iq_no = iq_no; + + /* check checksum offload flags to form cmd */ + if (m->ol_flags & PKT_TX_IP_CKSUM) + cmdsetup.s.ip_csum = 1; + + if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM) + cmdsetup.s.tnl_csum = 1; + else if ((m->ol_flags & PKT_TX_TCP_CKSUM) || + (m->ol_flags & PKT_TX_UDP_CKSUM)) + cmdsetup.s.transport_csum = 1; + + if (m->nb_segs == 1) { + pkt_len = rte_pktmbuf_data_len(m); + cmdsetup.s.u.datasize = pkt_len; + lio_prepare_pci_cmd(lio_dev, &ndata.cmd, + &cmdsetup, tag); + ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m); + ndata.reqtype = LIO_REQTYPE_NORESP_NET; + } else { + struct lio_buf_free_info *finfo; + struct lio_gather *g; + rte_iova_t phyaddr; + int i, frags; + + finfo = (struct lio_buf_free_info *)rte_malloc(NULL, + sizeof(*finfo), 0); + if (finfo == NULL) { + PMD_TX_LOG(lio_dev, ERR, + "free buffer alloc failed\n"); + goto xmit_failed; + } + + rte_spinlock_lock(&lio_dev->glist_lock[iq_no]); + g = (struct lio_gather *)list_delete_first_node( + &lio_dev->glist_head[iq_no]); + rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]); + if (g == NULL) { + PMD_TX_LOG(lio_dev, ERR, + "Transmit scatter gather: glist null!\n"); + goto xmit_failed; + } + + cmdsetup.s.gather = 1; + cmdsetup.s.u.gatherptrs = m->nb_segs; + lio_prepare_pci_cmd(lio_dev, &ndata.cmd, + &cmdsetup, tag); + + memset(g->sg, 0, g->sg_size); + g->sg[0].ptr[0] = rte_mbuf_data_iova(m); + lio_add_sg_size(&g->sg[0], m->data_len, 0); + pkt_len = m->data_len; + finfo->mbuf = m; + + /* First seg taken care above */ + frags = m->nb_segs - 1; + i = 1; + m = m->next; + while (frags--) { + g->sg[(i >> 2)].ptr[(i & 3)] = + rte_mbuf_data_iova(m); + lio_add_sg_size(&g->sg[(i >> 2)], + m->data_len, (i & 3)); + pkt_len += m->data_len; + i++; + m = m->next; + } + + phyaddr = rte_mem_virt2iova(g->sg); + if (phyaddr == RTE_BAD_IOVA) { + PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n"); + goto xmit_failed; + } + + ndata.cmd.cmd3.dptr = phyaddr; + ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG; + + finfo->g = g; + finfo->lio_dev = lio_dev; + finfo->iq_no = (uint64_t)iq_no; + ndata.buf = finfo; + } + + ndata.datasize = pkt_len; + + status = lio_send_data_pkt(lio_dev, &ndata); + + if (unlikely(status == LIO_IQ_SEND_FAILED)) { + PMD_TX_LOG(lio_dev, ERR, "send failed\n"); + break; + } + + if (unlikely(status == LIO_IQ_SEND_STOP)) { + PMD_TX_LOG(lio_dev, DEBUG, "iq full\n"); + /* create space as iq is full */ + lio_dev_cleanup_iq(lio_dev, iq_no); + } + + stats->tx_done++; + stats->tx_tot_bytes += pkt_len; + processed++; + } + +xmit_failed: + stats->tx_dropped += (nb_pkts - processed); + + return processed; +} + +void +lio_dev_clear_queues(struct rte_eth_dev *eth_dev) +{ + struct lio_instr_queue *txq; + struct lio_droq *rxq; + uint16_t i; + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq != NULL) { + lio_dev_tx_queue_release(txq); + eth_dev->data->tx_queues[i] = NULL; + } + } + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + if (rxq != NULL) { + lio_dev_rx_queue_release(rxq); + eth_dev->data->rx_queues[i] = NULL; + } + } +} diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h new file mode 100644 index 000000000..d2a45104f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_rxtx.h @@ -0,0 +1,740 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_RXTX_H_ +#define _LIO_RXTX_H_ + +#include +#include + +#include +#include + +#include "lio_struct.h" + +#ifndef ROUNDUP4 +#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) +#endif + +#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \ + (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem)) + +#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time)) + +#define lio_uptime \ + (size_t)(rte_get_timer_cycles() / rte_get_timer_hz()) + +/** Descriptor format. + * The descriptor ring is made of descriptors which have 2 64-bit values: + * -# Physical (bus) address of the data buffer. + * -# Physical (bus) address of a lio_droq_info structure. + * The device DMA's incoming packets and its information at the address + * given by these descriptor fields. + */ +struct lio_droq_desc { + /** The buffer pointer */ + uint64_t buffer_ptr; + + /** The Info pointer */ + uint64_t info_ptr; +}; + +#define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc)) + +/** Information about packet DMA'ed by Octeon. + * The format of the information available at Info Pointer after Octeon + * has posted a packet. Not all descriptors have valid information. Only + * the Info field of the first descriptor for a packet has information + * about the packet. + */ +struct lio_droq_info { + /** The Output Receive Header. */ + union octeon_rh rh; + + /** The Length of the packet. */ + uint64_t length; +}; + +#define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info)) + +/** Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. + */ +struct lio_recv_buffer { + /** Packet buffer, including meta data. */ + void *buffer; + + /** Data in the packet buffer. */ + uint8_t *data; + +}; + +#define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer)) + +#define LIO_DROQ_SIZE (sizeof(struct lio_droq)) + +#define LIO_IQ_SEND_OK 0 +#define LIO_IQ_SEND_STOP 1 +#define LIO_IQ_SEND_FAILED -1 + +/* conditions */ +#define LIO_REQTYPE_NONE 0 +#define LIO_REQTYPE_NORESP_NET 1 +#define LIO_REQTYPE_NORESP_NET_SG 2 +#define LIO_REQTYPE_SOFT_COMMAND 3 + +struct lio_request_list { + uint32_t reqtype; + void *buf; +}; + +/*---------------------- INSTRUCTION FORMAT ----------------------------*/ + +struct lio_instr3_64B { + /** Pointer where the input data is available. */ + uint64_t dptr; + + /** Instruction Header. */ + uint64_t ih3; + + /** Instruction Header. */ + uint64_t pki_ih3; + + /** Input Request Header. */ + uint64_t irh; + + /** opcode/subcode specific parameters */ + uint64_t ossp[2]; + + /** Return Data Parameters */ + uint64_t rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + uint64_t rptr; + +}; + +union lio_instr_64B { + struct lio_instr3_64B cmd3; +}; + +/** The size of each buffer in soft command buffer pool */ +#define LIO_SOFT_COMMAND_BUFFER_SIZE 1536 + +/** Maximum number of buffers to allocate into soft command buffer pool */ +#define LIO_MAX_SOFT_COMMAND_BUFFERS 255 + +struct lio_soft_command { + /** Soft command buffer info. */ + struct lio_stailq_node node; + uint64_t dma_addr; + uint32_t size; + + /** Command and return status */ + union lio_instr_64B cmd; + +#define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL + uint64_t *status_word; + + /** Data buffer info */ + void *virtdptr; + uint64_t dmadptr; + uint32_t datasize; + + /** Return buffer info */ + void *virtrptr; + uint64_t dmarptr; + uint32_t rdatasize; + + /** Context buffer info */ + void *ctxptr; + uint32_t ctxsize; + + /** Time out and callback */ + size_t wait_time; + size_t timeout; + uint32_t iq_no; + void (*callback)(uint32_t, void *); + void *callback_arg; + struct rte_mbuf *mbuf; +}; + +struct lio_iq_post_status { + int status; + int index; +}; + +/* wqe + * --------------- 0 + * | wqe word0-3 | + * --------------- 32 + * | PCI IH | + * --------------- 40 + * | RPTR | + * --------------- 48 + * | PCI IRH | + * --------------- 56 + * | OCTEON_CMD | + * --------------- 64 + * | Addtl 8-BData | + * | | + * --------------- + */ + +union octeon_cmd { + uint64_t cmd64; + + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t cmd : 5; + + uint64_t more : 6; /* How many udd words follow the command */ + + uint64_t reserved : 29; + + uint64_t param1 : 16; + + uint64_t param2 : 8; + +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + + uint64_t param2 : 8; + + uint64_t param1 : 16; + + uint64_t reserved : 29; + + uint64_t more : 6; + + uint64_t cmd : 5; + +#endif + } s; +}; + +#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd)) + +/* Maximum number of 8-byte words can be + * sent in a NIC control message. + */ +#define LIO_MAX_NCTRL_UDD 32 + +/* Structure of control information passed by driver to the BASE + * layer when sending control commands to Octeon device software. + */ +struct lio_ctrl_pkt { + /** Command to be passed to the Octeon device software. */ + union octeon_cmd ncmd; + + /** Send buffer */ + void *data; + uint64_t dmadata; + + /** Response buffer */ + void *rdata; + uint64_t dmardata; + + /** Additional data that may be needed by some commands. */ + uint64_t udd[LIO_MAX_NCTRL_UDD]; + + /** Input queue to use to send this command. */ + uint64_t iq_no; + + /** Time to wait for Octeon software to respond to this control command. + * If wait_time is 0, BASE assumes no response is expected. + */ + size_t wait_time; + + struct lio_dev_ctrl_cmd *ctrl_cmd; +}; + +/** Structure of data information passed by driver to the BASE + * layer when forwarding data to Octeon device software. + */ +struct lio_data_pkt { + /** Pointer to information maintained by NIC module for this packet. The + * BASE layer passes this as-is to the driver. + */ + void *buf; + + /** Type of buffer passed in "buf" above. */ + uint32_t reqtype; + + /** Total data bytes to be transferred in this command. */ + uint32_t datasize; + + /** Command to be passed to the Octeon device software. */ + union lio_instr_64B cmd; + + /** Input queue to use to send this command. */ + uint32_t q_no; +}; + +/** Structure passed by driver to BASE layer to prepare a command to send + * network data to Octeon. + */ +union lio_cmd_setup { + struct { + uint32_t iq_no : 8; + uint32_t gather : 1; + uint32_t timestamp : 1; + uint32_t ip_csum : 1; + uint32_t transport_csum : 1; + uint32_t tnl_csum : 1; + uint32_t rsvd : 19; + + union { + uint32_t datasize; + uint32_t gatherptrs; + } u; + } s; + + uint64_t cmd_setup64; +}; + +/* Instruction Header */ +struct octeon_instr_ih3 { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + + /** Reserved3 */ + uint64_t reserved3 : 1; + + /** Gather indicator 1=gather*/ + uint64_t gather : 1; + + /** Data length OR no. of entries in gather list */ + uint64_t dlengsz : 14; + + /** Front Data size */ + uint64_t fsz : 6; + + /** Reserved2 */ + uint64_t reserved2 : 4; + + /** PKI port kind - PKIND */ + uint64_t pkind : 6; + + /** Reserved1 */ + uint64_t reserved1 : 32; + +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + /** Reserved1 */ + uint64_t reserved1 : 32; + + /** PKI port kind - PKIND */ + uint64_t pkind : 6; + + /** Reserved2 */ + uint64_t reserved2 : 4; + + /** Front Data size */ + uint64_t fsz : 6; + + /** Data length OR no. of entries in gather list */ + uint64_t dlengsz : 14; + + /** Gather indicator 1=gather*/ + uint64_t gather : 1; + + /** Reserved3 */ + uint64_t reserved3 : 1; + +#endif +}; + +/* PKI Instruction Header(PKI IH) */ +struct octeon_instr_pki_ih3 { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + + /** Wider bit */ + uint64_t w : 1; + + /** Raw mode indicator 1 = RAW */ + uint64_t raw : 1; + + /** Use Tag */ + uint64_t utag : 1; + + /** Use QPG */ + uint64_t uqpg : 1; + + /** Reserved2 */ + uint64_t reserved2 : 1; + + /** Parse Mode */ + uint64_t pm : 3; + + /** Skip Length */ + uint64_t sl : 8; + + /** Use Tag Type */ + uint64_t utt : 1; + + /** Tag type */ + uint64_t tagtype : 2; + + /** Reserved1 */ + uint64_t reserved1 : 2; + + /** QPG Value */ + uint64_t qpg : 11; + + /** Tag Value */ + uint64_t tag : 32; + +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + + /** Tag Value */ + uint64_t tag : 32; + + /** QPG Value */ + uint64_t qpg : 11; + + /** Reserved1 */ + uint64_t reserved1 : 2; + + /** Tag type */ + uint64_t tagtype : 2; + + /** Use Tag Type */ + uint64_t utt : 1; + + /** Skip Length */ + uint64_t sl : 8; + + /** Parse Mode */ + uint64_t pm : 3; + + /** Reserved2 */ + uint64_t reserved2 : 1; + + /** Use QPG */ + uint64_t uqpg : 1; + + /** Use Tag */ + uint64_t utag : 1; + + /** Raw mode indicator 1 = RAW */ + uint64_t raw : 1; + + /** Wider bit */ + uint64_t w : 1; +#endif +}; + +/** Input Request Header */ +struct octeon_instr_irh { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t opcode : 4; + uint64_t rflag : 1; + uint64_t subcode : 7; + uint64_t vlan : 12; + uint64_t priority : 3; + uint64_t reserved : 5; + uint64_t ossp : 32; /* opcode/subcode specific parameters */ +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint64_t ossp : 32; /* opcode/subcode specific parameters */ + uint64_t reserved : 5; + uint64_t priority : 3; + uint64_t vlan : 12; + uint64_t subcode : 7; + uint64_t rflag : 1; + uint64_t opcode : 4; +#endif +}; + +/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ +#define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8) +/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ +#define OCTEON_PCI_CMD_O3 (24 + 8) + +/** Return Data Parameters */ +struct octeon_instr_rdp { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t reserved : 49; + uint64_t pcie_port : 3; + uint64_t rlen : 12; +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint64_t rlen : 12; + uint64_t pcie_port : 3; + uint64_t reserved : 49; +#endif +}; + +union octeon_packet_params { + uint32_t pkt_params32; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint32_t reserved : 24; + uint32_t ip_csum : 1; /* Perform IP header checksum(s) */ + /* Perform Outer transport header checksum */ + uint32_t transport_csum : 1; + /* Find tunnel, and perform transport csum. */ + uint32_t tnl_csum : 1; + uint32_t tsflag : 1; /* Timestamp this packet */ + uint32_t ipsec_ops : 4; /* IPsec operation */ +#else + uint32_t ipsec_ops : 4; + uint32_t tsflag : 1; + uint32_t tnl_csum : 1; + uint32_t transport_csum : 1; + uint32_t ip_csum : 1; + uint32_t reserved : 7; +#endif + } s; +}; + +/** Utility function to prepare a 64B NIC instruction based on a setup command + * @param cmd - pointer to instruction to be filled in. + * @param setup - pointer to the setup structure + * @param q_no - which queue for back pressure + * + * Assumes the cmd instruction is pre-allocated, but no fields are filled in. + */ +static inline void +lio_prepare_pci_cmd(struct lio_device *lio_dev, + union lio_instr_64B *cmd, + union lio_cmd_setup *setup, + uint32_t tag) +{ + union octeon_packet_params packet_params; + struct octeon_instr_pki_ih3 *pki_ih3; + struct octeon_instr_irh *irh; + struct octeon_instr_ih3 *ih3; + int port; + + memset(cmd, 0, sizeof(union lio_instr_64B)); + + ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3; + pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind; + /* PKI IH */ + ih3->fsz = OCTEON_PCI_CMD_O3; + + if (!setup->s.gather) { + ih3->dlengsz = setup->s.u.datasize; + } else { + ih3->gather = 1; + ih3->dlengsz = setup->s.u.gatherptrs; + } + + pki_ih3->w = 1; + pki_ih3->raw = 0; + pki_ih3->utag = 0; + pki_ih3->utt = 1; + pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; + + port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + pki_ih3->tag = tag; + else + pki_ih3->tag = LIO_DATA(port); + + pki_ih3->tagtype = OCTEON_ORDERED_TAG; + pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x0; /* parse from L2 */ + pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/ + + irh = (struct octeon_instr_irh *)&cmd->cmd3.irh; + + irh->opcode = LIO_OPCODE; + irh->subcode = LIO_OPCODE_NW_DATA; + + packet_params.pkt_params32 = 0; + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.pkt_params32; +} + +int lio_setup_sc_buffer_pool(struct lio_device *lio_dev); +void lio_free_sc_buffer_pool(struct lio_device *lio_dev); + +struct lio_soft_command * +lio_alloc_soft_command(struct lio_device *lio_dev, + uint32_t datasize, uint32_t rdatasize, + uint32_t ctxsize); +void lio_prepare_soft_command(struct lio_device *lio_dev, + struct lio_soft_command *sc, + uint8_t opcode, uint8_t subcode, + uint32_t irh_ossp, uint64_t ossp0, + uint64_t ossp1); +int lio_send_soft_command(struct lio_device *lio_dev, + struct lio_soft_command *sc); +void lio_free_soft_command(struct lio_soft_command *sc); + +/** Send control packet to the device + * @param lio_dev - lio device pointer + * @param nctrl - control structure with command, timeout, and callback info + * + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay. + */ +int lio_send_ctrl_pkt(struct lio_device *lio_dev, + struct lio_ctrl_pkt *ctrl_pkt); + +/** Maximum ordered requests to process in every invocation of + * lio_process_ordered_list(). The function will continue to process requests + * as long as it can find one that has finished processing. If it keeps + * finding requests that have completed, the function can run for ever. The + * value defined here sets an upper limit on the number of requests it can + * process before it returns control to the poll thread. + */ +#define LIO_MAX_ORD_REQS_TO_PROCESS 4096 + +/** Error codes used in Octeon Host-Core communication. + * + * 31 16 15 0 + * ---------------------------- + * | | | + * ---------------------------- + * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number, + * are reserved to identify the group to which the error code belongs. The + * lower 16-bits, called Minor Error Number, carry the actual code. + * + * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER. + */ +/** Status for a request. + * If the request is successfully queued, the driver will return + * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by + * the driver if the response for request failed to arrive before a + * time-out period or if the request processing * got interrupted due to + * a signal respectively. + */ +enum { + /** A value of 0x00000000 indicates no error i.e. success */ + LIO_REQUEST_DONE = 0x00000000, + /** (Major number: 0x0000; Minor Number: 0x0001) */ + LIO_REQUEST_PENDING = 0x00000001, + LIO_REQUEST_TIMEOUT = 0x00000003, + +}; + +/*------ Error codes used by firmware (bits 15..0 set by firmware */ +#define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001 +#define LIO_FIRMWARE_STATUS_CODE(status) \ + ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status)) + +/** Initialize the response lists. The number of response lists to create is + * given by count. + * @param lio_dev - the lio device structure. + */ +void lio_setup_response_list(struct lio_device *lio_dev); + +/** Check the status of first entry in the ordered list. If the instruction at + * that entry finished processing or has timed-out, the entry is cleaned. + * @param lio_dev - the lio device structure. + * @return 1 if the ordered list is empty, 0 otherwise. + */ +int lio_process_ordered_list(struct lio_device *lio_dev); + +#define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \ + (((lio_dev)->instr_queue[iq_no]->stats.field) += count) + +static inline void +lio_swap_8B_data(uint64_t *data, uint32_t blocks) +{ + while (blocks) { + *data = rte_cpu_to_be_64(*data); + blocks--; + data++; + } +} + +static inline uint64_t +lio_map_ring(void *buf) +{ + rte_iova_t dma_addr; + + dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf)); + + return (uint64_t)dma_addr; +} + +static inline uint64_t +lio_map_ring_info(struct lio_droq *droq, uint32_t i) +{ + rte_iova_t dma_addr; + + dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE); + + return (uint64_t)dma_addr; +} + +static inline int +lio_opcode_slow_path(union octeon_rh *rh) +{ + uint16_t subcode1, subcode2; + + subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode); + subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA); + + return subcode2 != subcode1; +} + +static inline void +lio_add_sg_size(struct lio_sg_entry *sg_entry, + uint16_t size, uint32_t pos) +{ +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + sg_entry->u.size[pos] = size; +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + sg_entry->u.size[3 - pos] = size; +#endif +} + +/* Macro to increment index. + * Index is incremented by count; if the sum exceeds + * max, index is wrapped-around to the start. + */ +static inline uint32_t +lio_incr_index(uint32_t index, uint32_t count, uint32_t max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + +int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs, + int desc_size, struct rte_mempool *mpool, + unsigned int socket_id); +uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t budget); +void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no); + +void lio_delete_sglist(struct lio_instr_queue *txq); +int lio_setup_sglists(struct lio_device *lio_dev, int iq_no, + int fw_mapped_iq, int num_descs, unsigned int socket_id); +uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, + uint16_t nb_pkts); +int lio_wait_for_instr_fetch(struct lio_device *lio_dev); +int lio_setup_iq(struct lio_device *lio_dev, int q_index, + union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx, + unsigned int socket_id); +int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq); +void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no); +/** Setup instruction queue zero for the device + * @param lio_dev which lio device to setup + * + * @return 0 if success. -1 if fails + */ +int lio_setup_instr_queue0(struct lio_device *lio_dev); +void lio_free_instr_queue0(struct lio_device *lio_dev); +void lio_dev_clear_queues(struct rte_eth_dev *eth_dev); +#endif /* _LIO_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h b/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h new file mode 100644 index 000000000..10270c560 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/lio_struct.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef _LIO_STRUCT_H_ +#define _LIO_STRUCT_H_ + +#include +#include +#include + +#include +#include + +#include "lio_hw_defs.h" + +struct lio_stailq_node { + STAILQ_ENTRY(lio_stailq_node) entries; +}; + +STAILQ_HEAD(lio_stailq_head, lio_stailq_node); + +struct lio_version { + uint16_t major; + uint16_t minor; + uint16_t micro; + uint16_t reserved; +}; + +/** Input Queue statistics. Each input queue has four stats fields. */ +struct lio_iq_stats { + uint64_t instr_posted; /**< Instructions posted to this queue. */ + uint64_t instr_processed; /**< Instructions processed in this queue. */ + uint64_t instr_dropped; /**< Instructions that could not be processed */ + uint64_t bytes_sent; /**< Bytes sent through this queue. */ + uint64_t tx_done; /**< Num of packets sent to network. */ + uint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */ + uint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */ + uint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */ +}; + +/** Output Queue statistics. Each output queue has four stats fields. */ +struct lio_droq_stats { + /** Number of packets received in this queue. */ + uint64_t pkts_received; + + /** Bytes received by this queue. */ + uint64_t bytes_received; + + /** Packets dropped due to no memory available. */ + uint64_t dropped_nomem; + + /** Packets dropped due to large number of pkts to process. */ + uint64_t dropped_toomany; + + /** Number of packets sent to stack from this queue. */ + uint64_t rx_pkts_received; + + /** Number of Bytes sent to stack from this queue. */ + uint64_t rx_bytes_received; + + /** Num of Packets dropped due to receive path failures. */ + uint64_t rx_dropped; + + /** Num of vxlan packets received; */ + uint64_t rx_vxlan; + + /** Num of failures of rte_pktmbuf_alloc() */ + uint64_t rx_alloc_failure; + +}; + +/** The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * DROQ. + */ +struct lio_droq { + /** A spinlock to protect access to this ring. */ + rte_spinlock_t lock; + + uint32_t q_no; + + uint32_t pkt_count; + + struct lio_device *lio_dev; + + /** The 8B aligned descriptor ring starts at this address. */ + struct lio_droq_desc *desc_ring; + + /** Index in the ring where the driver should read the next packet */ + uint32_t read_idx; + + /** Index in the ring where Octeon will write the next packet */ + uint32_t write_idx; + + /** Index in the ring where the driver will refill the descriptor's + * buffer + */ + uint32_t refill_idx; + + /** Packets pending to be processed */ + rte_atomic64_t pkts_pending; + + /** Number of descriptors in this ring. */ + uint32_t nb_desc; + + /** The number of descriptors pending refill. */ + uint32_t refill_count; + + uint32_t refill_threshold; + + /** The 8B aligned info ptrs begin from this address. */ + struct lio_droq_info *info_list; + + /** The receive buffer list. This list has the virtual addresses of the + * buffers. + */ + struct lio_recv_buffer *recv_buf_list; + + /** The size of each buffer pointed by the buffer pointer. */ + uint32_t buffer_size; + + /** Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + void *pkts_credit_reg; + + /** Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + void *pkts_sent_reg; + + /** Statistics for this DROQ. */ + struct lio_droq_stats stats; + + /** DMA mapped address of the DROQ descriptor ring. */ + size_t desc_ring_dma; + + /** Info ptr list are allocated at this virtual address. */ + size_t info_base_addr; + + /** DMA mapped address of the info list */ + size_t info_list_dma; + + /** Allocated size of info list. */ + uint32_t info_alloc_size; + + /** Memory zone **/ + const struct rte_memzone *desc_ring_mz; + const struct rte_memzone *info_mz; + struct rte_mempool *mpool; +}; + +/** Receive Header */ +union octeon_rh { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t rh64; + struct { + uint64_t opcode : 4; + uint64_t subcode : 8; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t reserved : 17; + uint64_t ossp : 32; /** opcode/subcode specific parameters */ + } r; + struct { + uint64_t opcode : 4; + uint64_t subcode : 8; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t extra : 28; + uint64_t vlan : 12; + uint64_t priority : 3; + uint64_t csum_verified : 3; /** checksum verified. */ + uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/ + uint64_t encap_on : 1; + uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */ + } r_dh; + struct { + uint64_t opcode : 4; + uint64_t subcode : 8; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t reserved : 8; + uint64_t extra : 25; + uint64_t gmxport : 16; + } r_nic_info; +#else + uint64_t rh64; + struct { + uint64_t ossp : 32; /** opcode/subcode specific parameters */ + uint64_t reserved : 17; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t subcode : 8; + uint64_t opcode : 4; + } r; + struct { + uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */ + uint64_t encap_on : 1; + uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */ + uint64_t csum_verified : 3; /** checksum verified. */ + uint64_t priority : 3; + uint64_t vlan : 12; + uint64_t extra : 28; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t subcode : 8; + uint64_t opcode : 4; + } r_dh; + struct { + uint64_t gmxport : 16; + uint64_t extra : 25; + uint64_t reserved : 8; + uint64_t len : 3; /** additional 64-bit words */ + uint64_t subcode : 8; + uint64_t opcode : 4; + } r_nic_info; +#endif +}; + +#define OCTEON_RH_SIZE (sizeof(union octeon_rh)) + +/** The txpciq info passed to host from the firmware */ +union octeon_txpciq { + uint64_t txpciq64; + + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t q_no : 8; + uint64_t port : 8; + uint64_t pkind : 6; + uint64_t use_qpg : 1; + uint64_t qpg : 11; + uint64_t aura_num : 10; + uint64_t reserved : 20; +#else + uint64_t reserved : 20; + uint64_t aura_num : 10; + uint64_t qpg : 11; + uint64_t use_qpg : 1; + uint64_t pkind : 6; + uint64_t port : 8; + uint64_t q_no : 8; +#endif + } s; +}; + +/** The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue for + * a LIO device has one such structure to represent it. + */ +struct lio_instr_queue { + /** A spinlock to protect access to the input ring. */ + rte_spinlock_t lock; + + rte_spinlock_t post_lock; + + struct lio_device *lio_dev; + + uint32_t pkt_in_done; + + rte_atomic64_t iq_flush_running; + + /** Flag that indicates if the queue uses 64 byte commands. */ + uint32_t iqcmd_64B:1; + + /** Queue info. */ + union octeon_txpciq txpciq; + + uint32_t rsvd:17; + + uint32_t status:8; + + /** Number of descriptors in this ring. */ + uint32_t nb_desc; + + /** Index in input ring where the driver should write the next packet */ + uint32_t host_write_index; + + /** Index in input ring where Octeon is expected to read the next + * packet. + */ + uint32_t lio_read_index; + + /** This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + uint32_t flush_index; + + /** This field keeps track of the instructions pending in this queue. */ + rte_atomic64_t instr_pending; + + /** Pointer to the Virtual Base addr of the input ring. */ + uint8_t *base_addr; + + struct lio_request_list *request_list; + + /** Octeon doorbell register for the ring. */ + void *doorbell_reg; + + /** Octeon instruction count register for this ring. */ + void *inst_cnt_reg; + + /** Number of instructions pending to be posted to Octeon. */ + uint32_t fill_cnt; + + /** Statistics for this input queue. */ + struct lio_iq_stats stats; + + /** DMA mapped base address of the input descriptor ring. */ + uint64_t base_addr_dma; + + /** Application context */ + void *app_ctx; + + /* network stack queue index */ + int q_index; + + /* Memory zone */ + const struct rte_memzone *iq_mz; +}; + +/** This structure is used by driver to store information required + * to free the mbuff when the packet has been fetched by Octeon. + * Bytes offset below assume worst-case of a 64-bit system. + */ +struct lio_buf_free_info { + /** Bytes 1-8. Pointer to network device private structure. */ + struct lio_device *lio_dev; + + /** Bytes 9-16. Pointer to mbuff. */ + struct rte_mbuf *mbuf; + + /** Bytes 17-24. Pointer to gather list. */ + struct lio_gather *g; + + /** Bytes 25-32. Physical address of mbuf->data or gather list. */ + uint64_t dptr; + + /** Bytes 33-47. Piggybacked soft command, if any */ + struct lio_soft_command *sc; + + /** Bytes 48-63. iq no */ + uint64_t iq_no; +}; + +/* The Scatter-Gather List Entry. The scatter or gather component used with + * input instruction has this format. + */ +struct lio_sg_entry { + /** The first 64 bit gives the size of data in each dptr. */ + union { + uint16_t size[4]; + uint64_t size64; + } u; + + /** The 4 dptr pointers for this entry. */ + uint64_t ptr[4]; +}; + +#define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry)) + +/** Structure of a node in list of gather components maintained by + * driver for each network device. + */ +struct lio_gather { + /** List manipulation. Next and prev pointers. */ + struct lio_stailq_node list; + + /** Size of the gather component at sg in bytes. */ + int sg_size; + + /** Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /** Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct lio_sg_entry *sg; +}; + +struct lio_rss_ctx { + uint16_t hash_key_size; + uint8_t hash_key[LIO_RSS_MAX_KEY_SZ]; + /* Ideally a factor of number of queues */ + uint8_t itable[LIO_RSS_MAX_TABLE_SZ]; + uint8_t itable_size; + uint8_t ip; + uint8_t tcp_hash; + uint8_t ipv6; + uint8_t ipv6_tcp_hash; + uint8_t ipv6_ex; + uint8_t ipv6_tcp_ex_hash; + uint8_t hash_disable; +}; + +struct lio_io_enable { + uint64_t iq; + uint64_t oq; + uint64_t iq64B; +}; + +struct lio_fn_list { + void (*setup_iq_regs)(struct lio_device *, uint32_t); + void (*setup_oq_regs)(struct lio_device *, uint32_t); + + int (*setup_mbox)(struct lio_device *); + void (*free_mbox)(struct lio_device *); + + int (*setup_device_regs)(struct lio_device *); + int (*enable_io_queues)(struct lio_device *); + void (*disable_io_queues)(struct lio_device *); +}; + +struct lio_pf_vf_hs_word { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + /** PKIND value assigned for the DPI interface */ + uint64_t pkind : 8; + + /** OCTEON core clock multiplier */ + uint64_t core_tics_per_us : 16; + + /** OCTEON coprocessor clock multiplier */ + uint64_t coproc_tics_per_us : 16; + + /** app that currently running on OCTEON */ + uint64_t app_mode : 8; + + /** RESERVED */ + uint64_t reserved : 16; + +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + + /** RESERVED */ + uint64_t reserved : 16; + + /** app that currently running on OCTEON */ + uint64_t app_mode : 8; + + /** OCTEON coprocessor clock multiplier */ + uint64_t coproc_tics_per_us : 16; + + /** OCTEON core clock multiplier */ + uint64_t core_tics_per_us : 16; + + /** PKIND value assigned for the DPI interface */ + uint64_t pkind : 8; +#endif +}; + +struct lio_sriov_info { + /** Number of rings assigned to VF */ + uint32_t rings_per_vf; + + /** Number of VF devices enabled */ + uint32_t num_vfs; +}; + +/* Head of a response list */ +struct lio_response_list { + /** List structure to add delete pending entries to */ + struct lio_stailq_head head; + + /** A lock for this response list */ + rte_spinlock_t lock; + + rte_atomic64_t pending_req_count; +}; + +/* Structure to define the configuration attributes for each Input queue. */ +struct lio_iq_config { + /* Max number of IQs available */ + uint8_t max_iqs; + + /** Pending list size (usually set to the sum of the size of all Input + * queues) + */ + uint32_t pending_list_size; + + /** Command size - 32 or 64 bytes */ + uint32_t instr_type; +}; + +/* Structure to define the configuration attributes for each Output queue. */ +struct lio_oq_config { + /* Max number of OQs available */ + uint8_t max_oqs; + + /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + uint32_t info_ptr; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + uint32_t refill_threshold; +}; + +/* Structure to define the configuration. */ +struct lio_config { + uint16_t card_type; + const char *card_name; + + /** Input Queue attributes. */ + struct lio_iq_config iq; + + /** Output Queue attributes. */ + struct lio_oq_config oq; + + int num_nic_ports; + + int num_def_tx_descs; + + /* Num of desc for rx rings */ + int num_def_rx_descs; + + int def_rx_buf_size; +}; + +/** Status of a RGMII Link on Octeon as seen by core driver. */ +union octeon_link_status { + uint64_t link_status64; + + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t duplex : 8; + uint64_t mtu : 16; + uint64_t speed : 16; + uint64_t link_up : 1; + uint64_t autoneg : 1; + uint64_t if_mode : 5; + uint64_t pause : 1; + uint64_t flashing : 1; + uint64_t reserved : 15; +#else + uint64_t reserved : 15; + uint64_t flashing : 1; + uint64_t pause : 1; + uint64_t if_mode : 5; + uint64_t autoneg : 1; + uint64_t link_up : 1; + uint64_t speed : 16; + uint64_t mtu : 16; + uint64_t duplex : 8; +#endif + } s; +}; + +/** The rxpciq info passed to host from the firmware */ +union octeon_rxpciq { + uint64_t rxpciq64; + + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t q_no : 8; + uint64_t reserved : 56; +#else + uint64_t reserved : 56; + uint64_t q_no : 8; +#endif + } s; +}; + +/** Information for a OCTEON ethernet interface shared between core & host. */ +struct octeon_link_info { + union octeon_link_status link; + uint64_t hw_addr; + +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint64_t gmxport : 16; + uint64_t macaddr_is_admin_assigned : 1; + uint64_t vlan_is_admin_assigned : 1; + uint64_t rsvd : 30; + uint64_t num_txpciq : 8; + uint64_t num_rxpciq : 8; +#else + uint64_t num_rxpciq : 8; + uint64_t num_txpciq : 8; + uint64_t rsvd : 30; + uint64_t vlan_is_admin_assigned : 1; + uint64_t macaddr_is_admin_assigned : 1; + uint64_t gmxport : 16; +#endif + + union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF]; + union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF]; +}; + +/* ----------------------- THE LIO DEVICE --------------------------- */ +/** The lio device. + * Each lio device has this structure to represent all its + * components. + */ +struct lio_device { + /** PCI device pointer */ + struct rte_pci_device *pci_dev; + + /** Octeon Chip type */ + uint16_t chip_id; + uint16_t pf_num; + uint16_t vf_num; + + /** This device's PCIe port used for traffic. */ + uint16_t pcie_port; + + /** The state of this device */ + rte_atomic64_t status; + + uint8_t intf_open; + + struct octeon_link_info linfo; + + uint8_t *hw_addr; + + struct lio_fn_list fn_list; + + uint32_t num_iqs; + + /** Guards each glist */ + rte_spinlock_t *glist_lock; + /** Array of gather component linked lists */ + struct lio_stailq_head *glist_head; + + /* The pool containing pre allocated buffers used for soft commands */ + struct rte_mempool *sc_buf_pool; + + /** The input instruction queues */ + struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES]; + + /** The singly-linked tail queues of instruction response */ + struct lio_response_list response_list; + + uint32_t num_oqs; + + /** The DROQ output queues */ + struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES]; + + struct lio_io_enable io_qmask; + + struct lio_sriov_info sriov_info; + + struct lio_pf_vf_hs_word pfvf_hsword; + + /** Mail Box details of each lio queue. */ + struct lio_mbox **mbox; + + char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */ + + const struct lio_config *default_config; + + struct rte_eth_dev *eth_dev; + + uint64_t ifflags; + uint8_t max_rx_queues; + uint8_t max_tx_queues; + uint8_t nb_rx_queues; + uint8_t nb_tx_queues; + uint8_t port_configured; + struct lio_rss_ctx rss_state; + uint16_t port_id; + char firmware_version[LIO_FW_VERSION_LENGTH]; +}; +#endif /* _LIO_STRUCT_H_ */ diff --git a/src/spdk/dpdk/drivers/net/liquidio/meson.build b/src/spdk/dpdk/drivers/net/liquidio/meson.build new file mode 100644 index 000000000..9ae48e213 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +sources = files('base/lio_23xx_vf.c', + 'base/lio_mbox.c', + 'lio_ethdev.c', + 'lio_rxtx.c') +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map b/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/liquidio/rte_pmd_liquidio_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/memif/Makefile b/src/spdk/dpdk/drivers/net/memif/Makefile new file mode 100644 index 000000000..3bf4ddce4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_memif.a + +EXPORT_MAP := rte_pmd_memif_version.map + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_ethdev -lrte_kvargs -lrte_net +LDLIBS += -lrte_hash +LDLIBS += -lrte_bus_vdev + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MEMIF) += rte_eth_memif.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MEMIF) += memif_socket.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/memif/memif.h b/src/spdk/dpdk/drivers/net/memif/memif.h new file mode 100644 index 000000000..b91230890 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/memif.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ + +#ifndef _MEMIF_H_ +#define _MEMIF_H_ + +#define MEMIF_COOKIE 0x3E31F20 +#define MEMIF_VERSION_MAJOR 2 +#define MEMIF_VERSION_MINOR 0 +#define MEMIF_VERSION ((MEMIF_VERSION_MAJOR << 8) | MEMIF_VERSION_MINOR) +#define MEMIF_NAME_SZ 32 + +/* + * S2M: direction slave -> master + * M2S: direction master -> slave + */ + +/* + * Type definitions + */ + +typedef enum memif_msg_type { + MEMIF_MSG_TYPE_NONE, + MEMIF_MSG_TYPE_ACK, + MEMIF_MSG_TYPE_HELLO, + MEMIF_MSG_TYPE_INIT, + MEMIF_MSG_TYPE_ADD_REGION, + MEMIF_MSG_TYPE_ADD_RING, + MEMIF_MSG_TYPE_CONNECT, + MEMIF_MSG_TYPE_CONNECTED, + MEMIF_MSG_TYPE_DISCONNECT, +} memif_msg_type_t; + +typedef enum { + MEMIF_RING_S2M, /**< buffer ring in direction slave -> master */ + MEMIF_RING_M2S, /**< buffer ring in direction master -> slave */ +} memif_ring_type_t; + +typedef enum { + MEMIF_INTERFACE_MODE_ETHERNET, + MEMIF_INTERFACE_MODE_IP, + MEMIF_INTERFACE_MODE_PUNT_INJECT, +} memif_interface_mode_t; + +typedef uint16_t memif_region_index_t; +typedef uint32_t memif_region_offset_t; +typedef uint64_t memif_region_size_t; +typedef uint16_t memif_ring_index_t; +typedef uint32_t memif_interface_id_t; +typedef uint16_t memif_version_t; +typedef uint8_t memif_log2_ring_size_t; + +/* + * Socket messages + */ + + /** + * M2S + * Contains master interfaces configuration. + */ +typedef struct __rte_packed { + uint8_t name[MEMIF_NAME_SZ]; /**< Client app name. In this case DPDK version */ + memif_version_t min_version; /**< lowest supported memif version */ + memif_version_t max_version; /**< highest supported memif version */ + memif_region_index_t max_region; /**< maximum num of regions */ + memif_ring_index_t max_m2s_ring; /**< maximum num of M2S ring */ + memif_ring_index_t max_s2m_ring; /**< maximum num of S2M rings */ + memif_log2_ring_size_t max_log2_ring_size; /**< maximum ring size (as log2) */ +} memif_msg_hello_t; + +/** + * S2M + * Contains information required to identify interface + * to which the slave wants to connect. + */ +typedef struct __rte_packed { + memif_version_t version; /**< memif version */ + memif_interface_id_t id; /**< interface id */ + memif_interface_mode_t mode:8; /**< interface mode */ + uint8_t secret[24]; /**< optional security parameter */ + uint8_t name[MEMIF_NAME_SZ]; /**< Client app name. In this case DPDK version */ +} memif_msg_init_t; + +/** + * S2M + * Request master to add new shared memory region to master interface. + * Shared files file descriptor is passed in cmsghdr. + */ +typedef struct __rte_packed { + memif_region_index_t index; /**< shm regions index */ + memif_region_size_t size; /**< shm region size */ +} memif_msg_add_region_t; + +/** + * S2M + * Request master to add new ring to master interface. + */ +typedef struct __rte_packed { + uint16_t flags; /**< flags */ +#define MEMIF_MSG_ADD_RING_FLAG_S2M 1 /**< ring is in S2M direction */ + memif_ring_index_t index; /**< ring index */ + memif_region_index_t region; /**< region index on which this ring is located */ + memif_region_offset_t offset; /**< buffer start offset */ + memif_log2_ring_size_t log2_ring_size; /**< ring size (log2) */ + uint16_t private_hdr_size; /**< used for private metadata */ +} memif_msg_add_ring_t; + +/** + * S2M + * Finalize connection establishment. + */ +typedef struct __rte_packed { + uint8_t if_name[MEMIF_NAME_SZ]; /**< slave interface name */ +} memif_msg_connect_t; + +/** + * M2S + * Finalize connection establishment. + */ +typedef struct __rte_packed { + uint8_t if_name[MEMIF_NAME_SZ]; /**< master interface name */ +} memif_msg_connected_t; + +/** + * S2M & M2S + * Disconnect interfaces. + */ +typedef struct __rte_packed { + uint32_t code; /**< error code */ + uint8_t string[96]; /**< disconnect reason */ +} memif_msg_disconnect_t; + +typedef struct __rte_packed __rte_aligned(128) +{ + memif_msg_type_t type:16; + union { + memif_msg_hello_t hello; + memif_msg_init_t init; + memif_msg_add_region_t add_region; + memif_msg_add_ring_t add_ring; + memif_msg_connect_t connect; + memif_msg_connected_t connected; + memif_msg_disconnect_t disconnect; + }; +} memif_msg_t; + +/* + * Ring and Descriptor Layout + */ + +/** + * Buffer descriptor. + */ +typedef struct __rte_packed { + uint16_t flags; /**< flags */ +#define MEMIF_DESC_FLAG_NEXT 1 /**< is chained buffer */ + memif_region_index_t region; /**< region index on which the buffer is located */ + uint32_t length; /**< buffer length */ + memif_region_offset_t offset; /**< buffer offset */ + uint32_t metadata; +} memif_desc_t; + +#define MEMIF_CACHELINE_ALIGN_MARK(mark) \ + RTE_MARKER mark __rte_cache_aligned; + +typedef struct { + MEMIF_CACHELINE_ALIGN_MARK(cacheline0); + uint32_t cookie; /**< MEMIF_COOKIE */ + uint16_t flags; /**< flags */ +#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */ + uint16_t head; /**< pointer to ring buffer head */ + MEMIF_CACHELINE_ALIGN_MARK(cacheline1); + uint16_t tail; /**< pointer to ring buffer tail */ + MEMIF_CACHELINE_ALIGN_MARK(cacheline2); + memif_desc_t desc[0]; /**< buffer descriptors */ +} memif_ring_t; + +#endif /* _MEMIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/memif/memif_socket.c b/src/spdk/dpdk/drivers/net/memif/memif_socket.c new file mode 100644 index 000000000..67794cb6f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/memif_socket.c @@ -0,0 +1,1115 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_eth_memif.h" +#include "memif_socket.h" + +static void memif_intr_handler(void *arg); + +static ssize_t +memif_msg_send(int fd, memif_msg_t *msg, int afd) +{ + struct msghdr mh = { 0 }; + struct iovec iov[1]; + struct cmsghdr *cmsg; + char ctl[CMSG_SPACE(sizeof(int))]; + + iov[0].iov_base = msg; + iov[0].iov_len = sizeof(memif_msg_t); + mh.msg_iov = iov; + mh.msg_iovlen = 1; + + if (afd > 0) { + memset(&ctl, 0, sizeof(ctl)); + mh.msg_control = ctl; + mh.msg_controllen = sizeof(ctl); + cmsg = CMSG_FIRSTHDR(&mh); + cmsg->cmsg_len = CMSG_LEN(sizeof(int)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + rte_memcpy(CMSG_DATA(cmsg), &afd, sizeof(int)); + } + + return sendmsg(fd, &mh, 0); +} + +static int +memif_msg_send_from_queue(struct memif_control_channel *cc) +{ + ssize_t size; + int ret = 0; + struct memif_msg_queue_elt *e; + + e = TAILQ_FIRST(&cc->msg_queue); + if (e == NULL) + return 0; + + size = memif_msg_send(cc->intr_handle.fd, &e->msg, e->fd); + if (size != sizeof(memif_msg_t)) { + MIF_LOG(ERR, "sendmsg fail: %s.", strerror(errno)); + ret = -1; + } else { + MIF_LOG(DEBUG, "Sent msg type %u.", e->msg.type); + } + TAILQ_REMOVE(&cc->msg_queue, e, next); + rte_free(e); + + return ret; +} + +static struct memif_msg_queue_elt * +memif_msg_enq(struct memif_control_channel *cc) +{ + struct memif_msg_queue_elt *e; + + e = rte_zmalloc("memif_msg", sizeof(struct memif_msg_queue_elt), 0); + if (e == NULL) { + MIF_LOG(ERR, "Failed to allocate control message."); + return NULL; + } + + e->fd = -1; + TAILQ_INSERT_TAIL(&cc->msg_queue, e, next); + + return e; +} + +void +memif_msg_enq_disconnect(struct memif_control_channel *cc, const char *reason, + int err_code) +{ + struct memif_msg_queue_elt *e; + struct pmd_internals *pmd; + memif_msg_disconnect_t *d; + + if (cc == NULL) { + MIF_LOG(DEBUG, "Missing control channel."); + return; + } + + e = memif_msg_enq(cc); + if (e == NULL) { + MIF_LOG(WARNING, "Failed to enqueue disconnect message."); + return; + } + + d = &e->msg.disconnect; + + e->msg.type = MEMIF_MSG_TYPE_DISCONNECT; + d->code = err_code; + + if (reason != NULL) { + strlcpy((char *)d->string, reason, sizeof(d->string)); + if (cc->dev != NULL) { + pmd = cc->dev->data->dev_private; + strlcpy(pmd->local_disc_string, reason, + sizeof(pmd->local_disc_string)); + } + } +} + +static int +memif_msg_enq_hello(struct memif_control_channel *cc) +{ + struct memif_msg_queue_elt *e = memif_msg_enq(cc); + memif_msg_hello_t *h; + + if (e == NULL) + return -1; + + h = &e->msg.hello; + + e->msg.type = MEMIF_MSG_TYPE_HELLO; + h->min_version = MEMIF_VERSION; + h->max_version = MEMIF_VERSION; + h->max_s2m_ring = ETH_MEMIF_MAX_NUM_Q_PAIRS; + h->max_m2s_ring = ETH_MEMIF_MAX_NUM_Q_PAIRS; + h->max_region = ETH_MEMIF_MAX_REGION_NUM - 1; + h->max_log2_ring_size = ETH_MEMIF_MAX_LOG2_RING_SIZE; + + strlcpy((char *)h->name, rte_version(), sizeof(h->name)); + + return 0; +} + +static int +memif_msg_receive_hello(struct rte_eth_dev *dev, memif_msg_t *msg) +{ + struct pmd_internals *pmd = dev->data->dev_private; + memif_msg_hello_t *h = &msg->hello; + + if (h->min_version > MEMIF_VERSION || h->max_version < MEMIF_VERSION) { + memif_msg_enq_disconnect(pmd->cc, "Incompatible memif version", 0); + return -1; + } + + /* Set parameters for active connection */ + pmd->run.num_s2m_rings = RTE_MIN(h->max_s2m_ring + 1, + pmd->cfg.num_s2m_rings); + pmd->run.num_m2s_rings = RTE_MIN(h->max_m2s_ring + 1, + pmd->cfg.num_m2s_rings); + pmd->run.log2_ring_size = RTE_MIN(h->max_log2_ring_size, + pmd->cfg.log2_ring_size); + pmd->run.pkt_buffer_size = pmd->cfg.pkt_buffer_size; + + strlcpy(pmd->remote_name, (char *)h->name, sizeof(pmd->remote_name)); + + MIF_LOG(DEBUG, "Connecting to %s.", pmd->remote_name); + + return 0; +} + +static int +memif_msg_receive_init(struct memif_control_channel *cc, memif_msg_t *msg) +{ + memif_msg_init_t *i = &msg->init; + struct memif_socket_dev_list_elt *elt; + struct pmd_internals *pmd; + struct rte_eth_dev *dev; + + if (i->version != MEMIF_VERSION) { + memif_msg_enq_disconnect(cc, "Incompatible memif version", 0); + return -1; + } + + if (cc->socket == NULL) { + memif_msg_enq_disconnect(cc, "Device error", 0); + return -1; + } + + /* Find device with requested ID */ + TAILQ_FOREACH(elt, &cc->socket->dev_queue, next) { + dev = elt->dev; + pmd = dev->data->dev_private; + if (((pmd->flags & ETH_MEMIF_FLAG_DISABLED) == 0) && + (pmd->id == i->id) && (pmd->role == MEMIF_ROLE_MASTER)) { + if (pmd->flags & (ETH_MEMIF_FLAG_CONNECTING | + ETH_MEMIF_FLAG_CONNECTED)) { + memif_msg_enq_disconnect(cc, + "Already connected", 0); + return -1; + } + + /* assign control channel to device */ + cc->dev = dev; + pmd->cc = cc; + + if (i->mode != MEMIF_INTERFACE_MODE_ETHERNET) { + memif_msg_enq_disconnect(pmd->cc, + "Only ethernet mode supported", + 0); + return -1; + } + + strlcpy(pmd->remote_name, (char *)i->name, + sizeof(pmd->remote_name)); + + if (*pmd->secret != '\0') { + if (*i->secret == '\0') { + memif_msg_enq_disconnect(pmd->cc, + "Secret required", 0); + return -1; + } + if (strncmp(pmd->secret, (char *)i->secret, + ETH_MEMIF_SECRET_SIZE) != 0) { + memif_msg_enq_disconnect(pmd->cc, + "Incorrect secret", 0); + return -1; + } + } + + pmd->flags |= ETH_MEMIF_FLAG_CONNECTING; + return 0; + } + } + + /* ID not found on this socket */ + MIF_LOG(DEBUG, "ID %u not found.", i->id); + memif_msg_enq_disconnect(cc, "ID not found", 0); + return -1; +} + +static int +memif_msg_receive_add_region(struct rte_eth_dev *dev, memif_msg_t *msg, + int fd) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + memif_msg_add_region_t *ar = &msg->add_region; + struct memif_region *r; + + if (fd < 0) { + memif_msg_enq_disconnect(pmd->cc, "Missing region fd", 0); + return -1; + } + + if (ar->index >= ETH_MEMIF_MAX_REGION_NUM || + ar->index != proc_private->regions_num || + proc_private->regions[ar->index] != NULL) { + memif_msg_enq_disconnect(pmd->cc, "Invalid region index", 0); + return -1; + } + + r = rte_zmalloc("region", sizeof(struct memif_region), 0); + if (r == NULL) { + memif_msg_enq_disconnect(pmd->cc, "Failed to alloc memif region.", 0); + return -ENOMEM; + } + + r->fd = fd; + r->region_size = ar->size; + r->addr = NULL; + + proc_private->regions[ar->index] = r; + proc_private->regions_num++; + + return 0; +} + +static int +memif_msg_receive_add_ring(struct rte_eth_dev *dev, memif_msg_t *msg, int fd) +{ + struct pmd_internals *pmd = dev->data->dev_private; + memif_msg_add_ring_t *ar = &msg->add_ring; + struct memif_queue *mq; + + if (fd < 0) { + memif_msg_enq_disconnect(pmd->cc, "Missing interrupt fd", 0); + return -1; + } + + /* check if we have enough queues */ + if (ar->flags & MEMIF_MSG_ADD_RING_FLAG_S2M) { + if (ar->index >= pmd->cfg.num_s2m_rings) { + memif_msg_enq_disconnect(pmd->cc, "Invalid ring index", 0); + return -1; + } + pmd->run.num_s2m_rings++; + } else { + if (ar->index >= pmd->cfg.num_m2s_rings) { + memif_msg_enq_disconnect(pmd->cc, "Invalid ring index", 0); + return -1; + } + pmd->run.num_m2s_rings++; + } + + mq = (ar->flags & MEMIF_MSG_ADD_RING_FLAG_S2M) ? + dev->data->rx_queues[ar->index] : dev->data->tx_queues[ar->index]; + + mq->intr_handle.fd = fd; + mq->log2_ring_size = ar->log2_ring_size; + mq->region = ar->region; + mq->ring_offset = ar->offset; + + return 0; +} + +static int +memif_msg_receive_connect(struct rte_eth_dev *dev, memif_msg_t *msg) +{ + struct pmd_internals *pmd = dev->data->dev_private; + memif_msg_connect_t *c = &msg->connect; + int ret; + + ret = memif_connect(dev); + if (ret < 0) + return ret; + + strlcpy(pmd->remote_if_name, (char *)c->if_name, + sizeof(pmd->remote_if_name)); + MIF_LOG(INFO, "Remote interface %s connected.", pmd->remote_if_name); + + return 0; +} + +static int +memif_msg_receive_connected(struct rte_eth_dev *dev, memif_msg_t *msg) +{ + struct pmd_internals *pmd = dev->data->dev_private; + memif_msg_connected_t *c = &msg->connected; + int ret; + + ret = memif_connect(dev); + if (ret < 0) + return ret; + + strlcpy(pmd->remote_if_name, (char *)c->if_name, + sizeof(pmd->remote_if_name)); + MIF_LOG(INFO, "Remote interface %s connected.", pmd->remote_if_name); + + return 0; +} + +static int +memif_msg_receive_disconnect(struct rte_eth_dev *dev, memif_msg_t *msg) +{ + struct pmd_internals *pmd = dev->data->dev_private; + memif_msg_disconnect_t *d = &msg->disconnect; + + memset(pmd->remote_disc_string, 0, sizeof(pmd->remote_disc_string)); + strlcpy(pmd->remote_disc_string, (char *)d->string, + sizeof(pmd->remote_disc_string)); + + MIF_LOG(INFO, "Disconnect received: %s", pmd->remote_disc_string); + + memset(pmd->local_disc_string, 0, 96); + memif_disconnect(dev); + return 0; +} + +static int +memif_msg_enq_ack(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + if (e == NULL) + return -1; + + e->msg.type = MEMIF_MSG_TYPE_ACK; + + return 0; +} + +static int +memif_msg_enq_init(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + memif_msg_init_t *i = &e->msg.init; + + if (e == NULL) + return -1; + + i = &e->msg.init; + e->msg.type = MEMIF_MSG_TYPE_INIT; + i->version = MEMIF_VERSION; + i->id = pmd->id; + i->mode = MEMIF_INTERFACE_MODE_ETHERNET; + + strlcpy((char *)i->name, rte_version(), sizeof(i->name)); + + if (*pmd->secret != '\0') + strlcpy((char *)i->secret, pmd->secret, sizeof(i->secret)); + + return 0; +} + +static int +memif_msg_enq_add_region(struct rte_eth_dev *dev, uint8_t idx) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + memif_msg_add_region_t *ar; + struct memif_region *mr = proc_private->regions[idx]; + + if (e == NULL) + return -1; + + ar = &e->msg.add_region; + e->msg.type = MEMIF_MSG_TYPE_ADD_REGION; + e->fd = mr->fd; + ar->index = idx; + ar->size = mr->region_size; + + return 0; +} + +static int +memif_msg_enq_add_ring(struct rte_eth_dev *dev, uint8_t idx, + memif_ring_type_t type) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + struct memif_queue *mq; + memif_msg_add_ring_t *ar; + + if (e == NULL) + return -1; + + ar = &e->msg.add_ring; + mq = (type == MEMIF_RING_S2M) ? dev->data->tx_queues[idx] : + dev->data->rx_queues[idx]; + + e->msg.type = MEMIF_MSG_TYPE_ADD_RING; + e->fd = mq->intr_handle.fd; + ar->index = idx; + ar->offset = mq->ring_offset; + ar->region = mq->region; + ar->log2_ring_size = mq->log2_ring_size; + ar->flags = (type == MEMIF_RING_S2M) ? MEMIF_MSG_ADD_RING_FLAG_S2M : 0; + ar->private_hdr_size = 0; + + return 0; +} + +static int +memif_msg_enq_connect(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + memif_msg_connect_t *c; + + if (e == NULL) + return -1; + + c = &e->msg.connect; + e->msg.type = MEMIF_MSG_TYPE_CONNECT; + strlcpy((char *)c->if_name, dev->data->name, sizeof(c->if_name)); + + return 0; +} + +static int +memif_msg_enq_connected(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *e = memif_msg_enq(pmd->cc); + memif_msg_connected_t *c; + + if (e == NULL) + return -1; + + c = &e->msg.connected; + e->msg.type = MEMIF_MSG_TYPE_CONNECTED; + strlcpy((char *)c->if_name, dev->data->name, sizeof(c->if_name)); + + return 0; +} + +static void +memif_intr_unregister_handler(struct rte_intr_handle *intr_handle, void *arg) +{ + struct memif_msg_queue_elt *elt; + struct memif_control_channel *cc = arg; + + /* close control channel fd */ + close(intr_handle->fd); + /* clear message queue */ + while ((elt = TAILQ_FIRST(&cc->msg_queue)) != NULL) { + TAILQ_REMOVE(&cc->msg_queue, elt, next); + rte_free(elt); + } + /* free control channel */ + rte_free(cc); +} + +void +memif_disconnect(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_msg_queue_elt *elt, *next; + struct memif_queue *mq; + struct rte_intr_handle *ih; + int i; + int ret; + + dev->data->dev_link.link_status = ETH_LINK_DOWN; + pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING; + pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTED; + + rte_spinlock_lock(&pmd->cc_lock); + if (pmd->cc != NULL) { + /* Clear control message queue (except disconnect message if any). */ + for (elt = TAILQ_FIRST(&pmd->cc->msg_queue); elt != NULL; elt = next) { + next = TAILQ_NEXT(elt, next); + if (elt->msg.type != MEMIF_MSG_TYPE_DISCONNECT) { + TAILQ_REMOVE(&pmd->cc->msg_queue, elt, next); + rte_free(elt); + } + } + /* send disconnect message (if there is any in queue) */ + memif_msg_send_from_queue(pmd->cc); + + /* at this point, there should be no more messages in queue */ + if (TAILQ_FIRST(&pmd->cc->msg_queue) != NULL) { + MIF_LOG(WARNING, + "Unexpected message(s) in message queue."); + } + + ih = &pmd->cc->intr_handle; + if (ih->fd > 0) { + ret = rte_intr_callback_unregister(ih, + memif_intr_handler, + pmd->cc); + /* + * If callback is active (disconnecting based on + * received control message). + */ + if (ret == -EAGAIN) { + ret = rte_intr_callback_unregister_pending(ih, + memif_intr_handler, + pmd->cc, + memif_intr_unregister_handler); + } else if (ret > 0) { + close(ih->fd); + rte_free(pmd->cc); + } + pmd->cc = NULL; + if (ret <= 0) + MIF_LOG(WARNING, + "Failed to unregister control channel callback."); + } + } + rte_spinlock_unlock(&pmd->cc_lock); + + /* unconfig interrupts */ + for (i = 0; i < pmd->cfg.num_s2m_rings; i++) { + if (pmd->role == MEMIF_ROLE_SLAVE) { + if (dev->data->tx_queues != NULL) + mq = dev->data->tx_queues[i]; + else + continue; + } else { + if (dev->data->rx_queues != NULL) + mq = dev->data->rx_queues[i]; + else + continue; + } + if (mq->intr_handle.fd > 0) { + close(mq->intr_handle.fd); + mq->intr_handle.fd = -1; + } + } + for (i = 0; i < pmd->cfg.num_m2s_rings; i++) { + if (pmd->role == MEMIF_ROLE_MASTER) { + if (dev->data->tx_queues != NULL) + mq = dev->data->tx_queues[i]; + else + continue; + } else { + if (dev->data->rx_queues != NULL) + mq = dev->data->rx_queues[i]; + else + continue; + } + if (mq->intr_handle.fd > 0) { + close(mq->intr_handle.fd); + mq->intr_handle.fd = -1; + } + } + + memif_free_regions(dev); + + /* reset connection configuration */ + memset(&pmd->run, 0, sizeof(pmd->run)); + + MIF_LOG(DEBUG, "Disconnected, id: %d, role: %s.", pmd->id, + (pmd->role == MEMIF_ROLE_MASTER) ? "master" : "slave"); +} + +static int +memif_msg_receive(struct memif_control_channel *cc) +{ + char ctl[CMSG_SPACE(sizeof(int)) + + CMSG_SPACE(sizeof(struct ucred))] = { 0 }; + struct msghdr mh = { 0 }; + struct iovec iov[1]; + memif_msg_t msg = { 0 }; + ssize_t size; + int ret = 0; + struct ucred *cr __rte_unused; + cr = 0; + struct cmsghdr *cmsg; + int afd = -1; + int i; + struct pmd_internals *pmd; + struct pmd_process_private *proc_private; + + iov[0].iov_base = (void *)&msg; + iov[0].iov_len = sizeof(memif_msg_t); + mh.msg_iov = iov; + mh.msg_iovlen = 1; + mh.msg_control = ctl; + mh.msg_controllen = sizeof(ctl); + + size = recvmsg(cc->intr_handle.fd, &mh, 0); + if (size != sizeof(memif_msg_t)) { + MIF_LOG(DEBUG, "Invalid message size = %zd", size); + if (size > 0) + /* 0 means end-of-file, negative size means error, + * don't send further disconnect message in such cases. + */ + memif_msg_enq_disconnect(cc, "Invalid message size", 0); + return -1; + } + MIF_LOG(DEBUG, "Received msg type: %u.", msg.type); + + cmsg = CMSG_FIRSTHDR(&mh); + while (cmsg) { + if (cmsg->cmsg_level == SOL_SOCKET) { + if (cmsg->cmsg_type == SCM_CREDENTIALS) + cr = (struct ucred *)CMSG_DATA(cmsg); + else if (cmsg->cmsg_type == SCM_RIGHTS) + rte_memcpy(&afd, CMSG_DATA(cmsg), sizeof(int)); + } + cmsg = CMSG_NXTHDR(&mh, cmsg); + } + + if (cc->dev == NULL && msg.type != MEMIF_MSG_TYPE_INIT) { + MIF_LOG(DEBUG, "Unexpected message."); + memif_msg_enq_disconnect(cc, "Unexpected message", 0); + return -1; + } + + /* get device from hash data */ + switch (msg.type) { + case MEMIF_MSG_TYPE_ACK: + break; + case MEMIF_MSG_TYPE_HELLO: + ret = memif_msg_receive_hello(cc->dev, &msg); + if (ret < 0) + goto exit; + ret = memif_init_regions_and_queues(cc->dev); + if (ret < 0) + goto exit; + ret = memif_msg_enq_init(cc->dev); + if (ret < 0) + goto exit; + pmd = cc->dev->data->dev_private; + proc_private = cc->dev->process_private; + for (i = 0; i < proc_private->regions_num; i++) { + ret = memif_msg_enq_add_region(cc->dev, i); + if (ret < 0) + goto exit; + } + for (i = 0; i < pmd->run.num_s2m_rings; i++) { + ret = memif_msg_enq_add_ring(cc->dev, i, + MEMIF_RING_S2M); + if (ret < 0) + goto exit; + } + for (i = 0; i < pmd->run.num_m2s_rings; i++) { + ret = memif_msg_enq_add_ring(cc->dev, i, + MEMIF_RING_M2S); + if (ret < 0) + goto exit; + } + ret = memif_msg_enq_connect(cc->dev); + if (ret < 0) + goto exit; + break; + case MEMIF_MSG_TYPE_INIT: + /* + * This cc does not have an interface asociated with it. + * If suitable interface is found it will be assigned here. + */ + ret = memif_msg_receive_init(cc, &msg); + if (ret < 0) + goto exit; + ret = memif_msg_enq_ack(cc->dev); + if (ret < 0) + goto exit; + break; + case MEMIF_MSG_TYPE_ADD_REGION: + ret = memif_msg_receive_add_region(cc->dev, &msg, afd); + if (ret < 0) + goto exit; + ret = memif_msg_enq_ack(cc->dev); + if (ret < 0) + goto exit; + break; + case MEMIF_MSG_TYPE_ADD_RING: + ret = memif_msg_receive_add_ring(cc->dev, &msg, afd); + if (ret < 0) + goto exit; + ret = memif_msg_enq_ack(cc->dev); + if (ret < 0) + goto exit; + break; + case MEMIF_MSG_TYPE_CONNECT: + ret = memif_msg_receive_connect(cc->dev, &msg); + if (ret < 0) + goto exit; + ret = memif_msg_enq_connected(cc->dev); + if (ret < 0) + goto exit; + break; + case MEMIF_MSG_TYPE_CONNECTED: + ret = memif_msg_receive_connected(cc->dev, &msg); + break; + case MEMIF_MSG_TYPE_DISCONNECT: + ret = memif_msg_receive_disconnect(cc->dev, &msg); + if (ret < 0) + goto exit; + break; + default: + memif_msg_enq_disconnect(cc, "Unknown message type", 0); + ret = -1; + goto exit; + } + + exit: + return ret; +} + +static void +memif_intr_handler(void *arg) +{ + struct memif_control_channel *cc = arg; + int ret; + + ret = memif_msg_receive(cc); + /* if driver failed to assign device */ + if (cc->dev == NULL) { + memif_msg_send_from_queue(cc); + ret = rte_intr_callback_unregister_pending(&cc->intr_handle, + memif_intr_handler, + cc, + memif_intr_unregister_handler); + if (ret < 0) + MIF_LOG(WARNING, + "Failed to unregister control channel callback."); + return; + } + /* if memif_msg_receive failed */ + if (ret < 0) + goto disconnect; + + ret = memif_msg_send_from_queue(cc); + if (ret < 0) + goto disconnect; + + return; + + disconnect: + if (cc->dev == NULL) { + MIF_LOG(WARNING, "eth dev not allocated"); + return; + } + memif_disconnect(cc->dev); +} + +static void +memif_listener_handler(void *arg) +{ + struct memif_socket *socket = arg; + int sockfd; + int addr_len; + struct sockaddr_un client; + struct memif_control_channel *cc; + int ret; + + addr_len = sizeof(client); + sockfd = accept(socket->intr_handle.fd, (struct sockaddr *)&client, + (socklen_t *)&addr_len); + if (sockfd < 0) { + MIF_LOG(ERR, + "Failed to accept connection request on socket fd %d", + socket->intr_handle.fd); + return; + } + + MIF_LOG(DEBUG, "%s: Connection request accepted.", socket->filename); + + cc = rte_zmalloc("memif-cc", sizeof(struct memif_control_channel), 0); + if (cc == NULL) { + MIF_LOG(ERR, "Failed to allocate control channel."); + goto error; + } + + cc->intr_handle.fd = sockfd; + cc->intr_handle.type = RTE_INTR_HANDLE_EXT; + cc->socket = socket; + cc->dev = NULL; + TAILQ_INIT(&cc->msg_queue); + + ret = rte_intr_callback_register(&cc->intr_handle, memif_intr_handler, cc); + if (ret < 0) { + MIF_LOG(ERR, "Failed to register control channel callback."); + goto error; + } + + ret = memif_msg_enq_hello(cc); + if (ret < 0) { + MIF_LOG(ERR, "Failed to enqueue hello message."); + goto error; + } + ret = memif_msg_send_from_queue(cc); + if (ret < 0) + goto error; + + return; + + error: + if (sockfd >= 0) { + close(sockfd); + sockfd = -1; + } + if (cc != NULL) + rte_free(cc); +} + +static struct memif_socket * +memif_socket_create(char *key, uint8_t listener) +{ + struct memif_socket *sock; + struct sockaddr_un un; + int sockfd; + int ret; + int on = 1; + + sock = rte_zmalloc("memif-socket", sizeof(struct memif_socket), 0); + if (sock == NULL) { + MIF_LOG(ERR, "Failed to allocate memory for memif socket"); + return NULL; + } + + sock->listener = listener; + strlcpy(sock->filename, key, MEMIF_SOCKET_UN_SIZE); + TAILQ_INIT(&sock->dev_queue); + + if (listener != 0) { + sockfd = socket(AF_UNIX, SOCK_SEQPACKET, 0); + if (sockfd < 0) + goto error; + + un.sun_family = AF_UNIX; + strlcpy(un.sun_path, sock->filename, MEMIF_SOCKET_UN_SIZE); + + ret = setsockopt(sockfd, SOL_SOCKET, SO_PASSCRED, &on, + sizeof(on)); + if (ret < 0) + goto error; + + ret = bind(sockfd, (struct sockaddr *)&un, sizeof(un)); + if (ret < 0) + goto error; + + ret = listen(sockfd, 1); + if (ret < 0) + goto error; + + MIF_LOG(DEBUG, "Memif listener socket %s created.", sock->filename); + + sock->intr_handle.fd = sockfd; + sock->intr_handle.type = RTE_INTR_HANDLE_EXT; + ret = rte_intr_callback_register(&sock->intr_handle, + memif_listener_handler, sock); + if (ret < 0) { + MIF_LOG(ERR, "Failed to register interrupt " + "callback for listener socket"); + return NULL; + } + } + + return sock; + + error: + MIF_LOG(ERR, "Failed to setup socket %s: %s", key, strerror(errno)); + if (sock != NULL) + rte_free(sock); + if (sockfd >= 0) + close(sockfd); + return NULL; +} + +static struct rte_hash * +memif_create_socket_hash(void) +{ + struct rte_hash_parameters params = { 0 }; + + params.name = MEMIF_SOCKET_HASH_NAME; + params.entries = 256; + params.key_len = MEMIF_SOCKET_UN_SIZE; + params.hash_func = rte_jhash; + params.hash_func_init_val = 0; + return rte_hash_create(¶ms); +} + +int +memif_socket_init(struct rte_eth_dev *dev, const char *socket_filename) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_socket *socket = NULL; + struct memif_socket_dev_list_elt *elt; + struct pmd_internals *tmp_pmd; + struct rte_hash *hash; + int ret; + char key[MEMIF_SOCKET_UN_SIZE]; + + hash = rte_hash_find_existing(MEMIF_SOCKET_HASH_NAME); + if (hash == NULL) { + hash = memif_create_socket_hash(); + if (hash == NULL) { + MIF_LOG(ERR, "Failed to create memif socket hash."); + return -1; + } + } + + memset(key, 0, MEMIF_SOCKET_UN_SIZE); + strlcpy(key, socket_filename, MEMIF_SOCKET_UN_SIZE); + ret = rte_hash_lookup_data(hash, key, (void **)&socket); + if (ret < 0) { + socket = memif_socket_create(key, + (pmd->role == MEMIF_ROLE_SLAVE) ? 0 : 1); + if (socket == NULL) + return -1; + ret = rte_hash_add_key_data(hash, key, socket); + if (ret < 0) { + MIF_LOG(ERR, "Failed to add socket to socket hash."); + return ret; + } + } + pmd->socket_filename = socket->filename; + + TAILQ_FOREACH(elt, &socket->dev_queue, next) { + tmp_pmd = elt->dev->data->dev_private; + if (tmp_pmd->id == pmd->id && tmp_pmd->role == pmd->role) { + MIF_LOG(ERR, "Two interfaces with the same id (%d) can " + "not have the same role.", pmd->id); + return -1; + } + } + + elt = rte_malloc("pmd-queue", sizeof(struct memif_socket_dev_list_elt), 0); + if (elt == NULL) { + MIF_LOG(ERR, "Failed to add device to socket device list."); + return -1; + } + elt->dev = dev; + TAILQ_INSERT_TAIL(&socket->dev_queue, elt, next); + + return 0; +} + +void +memif_socket_remove_device(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_socket *socket = NULL; + struct memif_socket_dev_list_elt *elt, *next; + struct rte_hash *hash; + int ret; + + hash = rte_hash_find_existing(MEMIF_SOCKET_HASH_NAME); + if (hash == NULL) + return; + + if (pmd->socket_filename == NULL) + return; + + if (rte_hash_lookup_data(hash, pmd->socket_filename, (void **)&socket) < 0) + return; + + for (elt = TAILQ_FIRST(&socket->dev_queue); elt != NULL; elt = next) { + next = TAILQ_NEXT(elt, next); + if (elt->dev == dev) { + TAILQ_REMOVE(&socket->dev_queue, elt, next); + rte_free(elt); + pmd->socket_filename = NULL; + } + } + + /* remove socket, if this was the last device using it */ + if (TAILQ_EMPTY(&socket->dev_queue)) { + rte_hash_del_key(hash, socket->filename); + if (socket->listener) { + /* remove listener socket file, + * so we can create new one later. + */ + ret = remove(socket->filename); + if (ret < 0) + MIF_LOG(ERR, "Failed to remove socket file: %s", + socket->filename); + } + rte_free(socket); + } +} + +int +memif_connect_master(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + + memset(pmd->local_disc_string, 0, ETH_MEMIF_DISC_STRING_SIZE); + memset(pmd->remote_disc_string, 0, ETH_MEMIF_DISC_STRING_SIZE); + pmd->flags &= ~ETH_MEMIF_FLAG_DISABLED; + return 0; +} + +int +memif_connect_slave(struct rte_eth_dev *dev) +{ + int sockfd; + int ret; + struct sockaddr_un sun; + struct pmd_internals *pmd = dev->data->dev_private; + + memset(pmd->local_disc_string, 0, ETH_MEMIF_DISC_STRING_SIZE); + memset(pmd->remote_disc_string, 0, ETH_MEMIF_DISC_STRING_SIZE); + pmd->flags &= ~ETH_MEMIF_FLAG_DISABLED; + + sockfd = socket(AF_UNIX, SOCK_SEQPACKET, 0); + if (sockfd < 0) { + MIF_LOG(ERR, "Failed to open socket."); + return -1; + } + + sun.sun_family = AF_UNIX; + + memcpy(sun.sun_path, pmd->socket_filename, sizeof(sun.sun_path) - 1); + + ret = connect(sockfd, (struct sockaddr *)&sun, + sizeof(struct sockaddr_un)); + if (ret < 0) { + MIF_LOG(ERR, "Failed to connect socket: %s.", pmd->socket_filename); + goto error; + } + + MIF_LOG(DEBUG, "Memif socket: %s connected.", pmd->socket_filename); + + pmd->cc = rte_zmalloc("memif-cc", + sizeof(struct memif_control_channel), 0); + if (pmd->cc == NULL) { + MIF_LOG(ERR, "Failed to allocate control channel."); + goto error; + } + + pmd->cc->intr_handle.fd = sockfd; + pmd->cc->intr_handle.type = RTE_INTR_HANDLE_EXT; + pmd->cc->socket = NULL; + pmd->cc->dev = dev; + TAILQ_INIT(&pmd->cc->msg_queue); + + ret = rte_intr_callback_register(&pmd->cc->intr_handle, + memif_intr_handler, pmd->cc); + if (ret < 0) { + MIF_LOG(ERR, "Failed to register interrupt callback for control fd"); + goto error; + } + + return 0; + + error: + if (sockfd >= 0) { + close(sockfd); + sockfd = -1; + } + if (pmd->cc != NULL) { + rte_free(pmd->cc); + pmd->cc = NULL; + } + return -1; +} diff --git a/src/spdk/dpdk/drivers/net/memif/memif_socket.h b/src/spdk/dpdk/drivers/net/memif/memif_socket.h new file mode 100644 index 000000000..5c49ec24e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/memif_socket.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ + +#ifndef _MEMIF_SOCKET_H_ +#define _MEMIF_SOCKET_H_ + +#include +#include + +/** + * Remove device from socket device list. If no device is left on the socket, + * remove the socket as well. + * + * @param dev + * memif device + */ +void memif_socket_remove_device(struct rte_eth_dev *dev); + +/** + * Enqueue disconnect message to control channel message queue. + * + * @param cc + * control channel + * @param reason + * const string stating disconnect reason (96 characters) + * @param err_code + * error code + */ +void memif_msg_enq_disconnect(struct memif_control_channel *cc, const char *reason, + int err_code); + +/** + * Initialize memif socket for specified device. If socket doesn't exist, create socket. + * + * @param dev + * memif device + * @param socket_filename + * socket filename + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int memif_socket_init(struct rte_eth_dev *dev, const char *socket_filename); + +/** + * Disconnect memif device. Close control channel and shared memory. + * + * @param dev + * memif device + */ +void memif_disconnect(struct rte_eth_dev *dev); + +/** + * If device is properly configured, enable connection establishment. + * + * @param dev + * memif device + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int memif_connect_master(struct rte_eth_dev *dev); + +/** + * If device is properly configured, send connection request. + * + * @param dev + * memif device + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int memif_connect_slave(struct rte_eth_dev *dev); + +struct memif_socket_dev_list_elt { + TAILQ_ENTRY(memif_socket_dev_list_elt) next; + struct rte_eth_dev *dev; /**< pointer to device internals */ + char dev_name[RTE_ETH_NAME_MAX_LEN]; +}; + +#define MEMIF_SOCKET_HASH_NAME "memif-sh" +#define MEMIF_SOCKET_UN_SIZE \ + (sizeof(struct sockaddr_un) - offsetof(struct sockaddr_un, sun_path)) + +struct memif_socket { + struct rte_intr_handle intr_handle; /**< interrupt handle */ + char filename[MEMIF_SOCKET_UN_SIZE]; /**< socket filename */ + + TAILQ_HEAD(, memif_socket_dev_list_elt) dev_queue; + /**< Queue of devices using this socket */ + uint8_t listener; /**< if not zero socket is listener */ +}; + +/* Control message queue. */ +struct memif_msg_queue_elt { + memif_msg_t msg; /**< control message */ + TAILQ_ENTRY(memif_msg_queue_elt) next; + int fd; /**< fd to be sent to peer */ +}; + +struct memif_control_channel { + struct rte_intr_handle intr_handle; /**< interrupt handle */ + TAILQ_HEAD(, memif_msg_queue_elt) msg_queue; /**< control message queue */ + struct memif_socket *socket; /**< pointer to socket */ + struct rte_eth_dev *dev; /**< pointer to device */ +}; + +#endif /* MEMIF_SOCKET_H */ diff --git a/src/spdk/dpdk/drivers/net/memif/meson.build b/src/spdk/dpdk/drivers/net/memif/meson.build new file mode 100644 index 000000000..9c3ba432d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + +if not is_linux + build = false + reason = 'only supported on Linux' +endif + +sources = files('rte_eth_memif.c', + 'memif_socket.c') + +deps += ['hash'] diff --git a/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.c b/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.c new file mode 100644 index 000000000..b6da9a8b4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.c @@ -0,0 +1,1816 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_eth_memif.h" +#include "memif_socket.h" + +#define ETH_MEMIF_ID_ARG "id" +#define ETH_MEMIF_ROLE_ARG "role" +#define ETH_MEMIF_PKT_BUFFER_SIZE_ARG "bsize" +#define ETH_MEMIF_RING_SIZE_ARG "rsize" +#define ETH_MEMIF_SOCKET_ARG "socket" +#define ETH_MEMIF_MAC_ARG "mac" +#define ETH_MEMIF_ZC_ARG "zero-copy" +#define ETH_MEMIF_SECRET_ARG "secret" + +static const char * const valid_arguments[] = { + ETH_MEMIF_ID_ARG, + ETH_MEMIF_ROLE_ARG, + ETH_MEMIF_PKT_BUFFER_SIZE_ARG, + ETH_MEMIF_RING_SIZE_ARG, + ETH_MEMIF_SOCKET_ARG, + ETH_MEMIF_MAC_ARG, + ETH_MEMIF_ZC_ARG, + ETH_MEMIF_SECRET_ARG, + NULL +}; + +static const struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_AUTONEG +}; + +#define MEMIF_MP_SEND_REGION "memif_mp_send_region" + + +static int memif_region_init_zc(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, void *arg); + +const char * +memif_version(void) +{ + return ("memif-" RTE_STR(MEMIF_VERSION_MAJOR) "." RTE_STR(MEMIF_VERSION_MINOR)); +} + +/* Message header to synchronize regions */ +struct mp_region_msg { + char port_name[RTE_DEV_NAME_MAX_LEN]; + memif_region_index_t idx; + memif_region_size_t size; +}; + +static int +memif_mp_send_region(const struct rte_mp_msg *msg, const void *peer) +{ + struct rte_eth_dev *dev; + struct pmd_process_private *proc_private; + const struct mp_region_msg *msg_param = (const struct mp_region_msg *)msg->param; + struct rte_mp_msg reply; + struct mp_region_msg *reply_param = (struct mp_region_msg *)reply.param; + uint16_t port_id; + int ret; + + /* Get requested port */ + ret = rte_eth_dev_get_port_by_name(msg_param->port_name, &port_id); + if (ret) { + MIF_LOG(ERR, "Failed to get port id for %s", + msg_param->port_name); + return -1; + } + dev = &rte_eth_devices[port_id]; + proc_private = dev->process_private; + + memset(&reply, 0, sizeof(reply)); + strlcpy(reply.name, msg->name, sizeof(reply.name)); + reply_param->idx = msg_param->idx; + if (proc_private->regions[msg_param->idx] != NULL) { + reply_param->size = proc_private->regions[msg_param->idx]->region_size; + reply.fds[0] = proc_private->regions[msg_param->idx]->fd; + reply.num_fds = 1; + } + reply.len_param = sizeof(*reply_param); + if (rte_mp_reply(&reply, peer) < 0) { + MIF_LOG(ERR, "Failed to reply to an add region request"); + return -1; + } + + return 0; +} + +/* + * Request regions + * Called by secondary process, when ports link status goes up. + */ +static int +memif_mp_request_regions(struct rte_eth_dev *dev) +{ + int ret, i; + struct timespec timeout = {.tv_sec = 5, .tv_nsec = 0}; + struct rte_mp_msg msg, *reply; + struct rte_mp_reply replies; + struct mp_region_msg *msg_param = (struct mp_region_msg *)msg.param; + struct mp_region_msg *reply_param; + struct memif_region *r; + struct pmd_process_private *proc_private = dev->process_private; + struct pmd_internals *pmd = dev->data->dev_private; + /* in case of zero-copy slave, only request region 0 */ + uint16_t max_region_num = (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) ? + 1 : ETH_MEMIF_MAX_REGION_NUM; + + MIF_LOG(DEBUG, "Requesting memory regions"); + + for (i = 0; i < max_region_num; i++) { + /* Prepare the message */ + memset(&msg, 0, sizeof(msg)); + strlcpy(msg.name, MEMIF_MP_SEND_REGION, sizeof(msg.name)); + strlcpy(msg_param->port_name, dev->data->name, + sizeof(msg_param->port_name)); + msg_param->idx = i; + msg.len_param = sizeof(*msg_param); + + /* Send message */ + ret = rte_mp_request_sync(&msg, &replies, &timeout); + if (ret < 0 || replies.nb_received != 1) { + MIF_LOG(ERR, "Failed to send mp msg: %d", + rte_errno); + return -1; + } + + reply = &replies.msgs[0]; + reply_param = (struct mp_region_msg *)reply->param; + + if (reply_param->size > 0) { + r = rte_zmalloc("region", sizeof(struct memif_region), 0); + if (r == NULL) { + MIF_LOG(ERR, "Failed to alloc memif region."); + free(reply); + return -ENOMEM; + } + r->region_size = reply_param->size; + if (reply->num_fds < 1) { + MIF_LOG(ERR, "Missing file descriptor."); + free(reply); + return -1; + } + r->fd = reply->fds[0]; + r->addr = NULL; + + proc_private->regions[reply_param->idx] = r; + proc_private->regions_num++; + } + free(reply); + } + + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { + ret = rte_memseg_walk(memif_region_init_zc, (void *)proc_private); + if (ret < 0) + return ret; + } + + return memif_connect(dev); +} + +static int +memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) +{ + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN; + dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS; + dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS; + dev_info->min_rx_bufsize = 0; + + return 0; +} + +static memif_ring_t * +memif_get_ring(struct pmd_internals *pmd, struct pmd_process_private *proc_private, + memif_ring_type_t type, uint16_t ring_num) +{ + /* rings only in region 0 */ + void *p = proc_private->regions[0]->addr; + int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) * + (1 << pmd->run.log2_ring_size); + + p = (uint8_t *)p + (ring_num + type * pmd->run.num_s2m_rings) * ring_size; + + return (memif_ring_t *)p; +} + +static memif_region_offset_t +memif_get_ring_offset(struct rte_eth_dev *dev, struct memif_queue *mq, + memif_ring_type_t type, uint16_t num) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + + return ((uint8_t *)memif_get_ring(pmd, proc_private, type, num) - + (uint8_t *)proc_private->regions[mq->region]->addr); +} + +static memif_ring_t * +memif_get_ring_from_queue(struct pmd_process_private *proc_private, + struct memif_queue *mq) +{ + struct memif_region *r; + + r = proc_private->regions[mq->region]; + if (r == NULL) + return NULL; + + return (memif_ring_t *)((uint8_t *)r->addr + mq->ring_offset); +} + +static void * +memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d) +{ + return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset); +} + +/* Free mbufs received by master */ +static void +memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_queue *mq) +{ + uint16_t mask = (1 << mq->log2_ring_size) - 1; + memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); + + /* FIXME: improve performance */ + while (mq->last_tail != ring->tail) { + RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); + /* Decrement refcnt and free mbuf. (current segment) */ + rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); + rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); + mq->last_tail++; + } +} + +static int +memif_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *cur_tail, + struct rte_mbuf *tail) +{ + /* Check for number-of-segments-overflow */ + if (unlikely(head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)) + return -EOVERFLOW; + + /* Chain 'tail' onto the old tail */ + cur_tail->next = tail; + + /* accumulate number of segments and total length. */ + head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs); + + tail->pkt_len = tail->data_len; + head->pkt_len += tail->pkt_len; + + return 0; +} + +static uint16_t +eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct memif_queue *mq = queue; + struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; + struct pmd_process_private *proc_private = + rte_eth_devices[mq->in_port].process_private; + memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); + uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0; + uint16_t n_rx_pkts = 0; + uint16_t mbuf_size = rte_pktmbuf_data_room_size(mq->mempool) - + RTE_PKTMBUF_HEADROOM; + uint16_t src_len, src_off, dst_len, dst_off, cp_len; + memif_ring_type_t type = mq->type; + memif_desc_t *d0; + struct rte_mbuf *mbuf, *mbuf_head, *mbuf_tail; + uint64_t b; + ssize_t size __rte_unused; + uint16_t head; + int ret; + struct rte_eth_link link; + + if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) + return 0; + if (unlikely(ring == NULL)) { + /* Secondary process will attempt to request regions. */ + ret = rte_eth_link_get(mq->in_port, &link); + if (ret < 0) + MIF_LOG(ERR, "Failed to get port %u link info: %s", + mq->in_port, rte_strerror(-ret)); + return 0; + } + + /* consume interrupt */ + if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) + size = read(mq->intr_handle.fd, &b, sizeof(b)); + + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; + + if (type == MEMIF_RING_S2M) { + cur_slot = mq->last_head; + last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE); + } else { + cur_slot = mq->last_tail; + last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + } + + if (cur_slot == last_slot) + goto refill; + n_slots = last_slot - cur_slot; + + while (n_slots && n_rx_pkts < nb_pkts) { + mbuf_head = rte_pktmbuf_alloc(mq->mempool); + if (unlikely(mbuf_head == NULL)) + goto no_free_bufs; + mbuf = mbuf_head; + mbuf->port = mq->in_port; + +next_slot: + s0 = cur_slot & mask; + d0 = &ring->desc[s0]; + + src_len = d0->length; + dst_off = 0; + src_off = 0; + + do { + dst_len = mbuf_size - dst_off; + if (dst_len == 0) { + dst_off = 0; + dst_len = mbuf_size; + + /* store pointer to tail */ + mbuf_tail = mbuf; + mbuf = rte_pktmbuf_alloc(mq->mempool); + if (unlikely(mbuf == NULL)) + goto no_free_bufs; + mbuf->port = mq->in_port; + ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf); + if (unlikely(ret < 0)) { + MIF_LOG(ERR, "number-of-segments-overflow"); + rte_pktmbuf_free(mbuf); + goto no_free_bufs; + } + } + cp_len = RTE_MIN(dst_len, src_len); + + rte_pktmbuf_data_len(mbuf) += cp_len; + rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf); + if (mbuf != mbuf_head) + rte_pktmbuf_pkt_len(mbuf_head) += cp_len; + + memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off), + (uint8_t *)memif_get_buffer(proc_private, d0) + src_off, + cp_len); + + src_off += cp_len; + dst_off += cp_len; + src_len -= cp_len; + } while (src_len); + + cur_slot++; + n_slots--; + + if (d0->flags & MEMIF_DESC_FLAG_NEXT) + goto next_slot; + + mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head); + *bufs++ = mbuf_head; + n_rx_pkts++; + } + +no_free_bufs: + if (type == MEMIF_RING_S2M) { + __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE); + mq->last_head = cur_slot; + } else { + mq->last_tail = cur_slot; + } + +refill: + if (type == MEMIF_RING_M2S) { + head = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE); + n_slots = ring_size - head + mq->last_tail; + + while (n_slots--) { + s0 = head++ & mask; + d0 = &ring->desc[s0]; + d0->length = pmd->run.pkt_buffer_size; + } + __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE); + } + + mq->n_pkts += n_rx_pkts; + return n_rx_pkts; +} + +static uint16_t +eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct memif_queue *mq = queue; + struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; + struct pmd_process_private *proc_private = + rte_eth_devices[mq->in_port].process_private; + memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); + uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0, head; + uint16_t n_rx_pkts = 0; + memif_desc_t *d0; + struct rte_mbuf *mbuf, *mbuf_tail; + struct rte_mbuf *mbuf_head = NULL; + int ret; + struct rte_eth_link link; + + if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) + return 0; + if (unlikely(ring == NULL)) { + /* Secondary process will attempt to request regions. */ + rte_eth_link_get(mq->in_port, &link); + return 0; + } + + /* consume interrupt */ + if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) { + uint64_t b; + ssize_t size __rte_unused; + size = read(mq->intr_handle.fd, &b, sizeof(b)); + } + + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; + + cur_slot = mq->last_tail; + last_slot = ring->tail; + if (cur_slot == last_slot) + goto refill; + n_slots = last_slot - cur_slot; + + while (n_slots && n_rx_pkts < nb_pkts) { + s0 = cur_slot & mask; + + d0 = &ring->desc[s0]; + mbuf_head = mq->buffers[s0]; + mbuf = mbuf_head; + +next_slot: + /* prefetch next descriptor */ + if (n_rx_pkts + 1 < nb_pkts) + rte_prefetch0(&ring->desc[(cur_slot + 1) & mask]); + + mbuf->port = mq->in_port; + rte_pktmbuf_data_len(mbuf) = d0->length; + rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf); + + mq->n_bytes += rte_pktmbuf_data_len(mbuf); + + cur_slot++; + n_slots--; + if (d0->flags & MEMIF_DESC_FLAG_NEXT) { + s0 = cur_slot & mask; + d0 = &ring->desc[s0]; + mbuf_tail = mbuf; + mbuf = mq->buffers[s0]; + ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf); + if (unlikely(ret < 0)) { + MIF_LOG(ERR, "number-of-segments-overflow"); + goto refill; + } + goto next_slot; + } + + *bufs++ = mbuf_head; + n_rx_pkts++; + } + + mq->last_tail = cur_slot; + +/* Supply master with new buffers */ +refill: + head = ring->head; + n_slots = ring_size - head + mq->last_tail; + + if (n_slots < 32) + goto no_free_mbufs; + + ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots); + if (unlikely(ret < 0)) + goto no_free_mbufs; + + while (n_slots--) { + s0 = head++ & mask; + if (n_slots > 0) + rte_prefetch0(mq->buffers[head & mask]); + d0 = &ring->desc[s0]; + /* store buffer header */ + mbuf = mq->buffers[s0]; + /* populate descriptor */ + d0->length = rte_pktmbuf_data_room_size(mq->mempool) - + RTE_PKTMBUF_HEADROOM; + d0->region = 1; + d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) - + (uint8_t *)proc_private->regions[d0->region]->addr; + } +no_free_mbufs: + rte_mb(); + ring->head = head; + + mq->n_pkts += n_rx_pkts; + + return n_rx_pkts; +} + +static uint16_t +eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct memif_queue *mq = queue; + struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; + struct pmd_process_private *proc_private = + rte_eth_devices[mq->in_port].process_private; + memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); + uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0; + uint16_t src_len, src_off, dst_len, dst_off, cp_len; + memif_ring_type_t type = mq->type; + memif_desc_t *d0; + struct rte_mbuf *mbuf; + struct rte_mbuf *mbuf_head; + uint64_t a; + ssize_t size; + struct rte_eth_link link; + + if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) + return 0; + if (unlikely(ring == NULL)) { + int ret; + + /* Secondary process will attempt to request regions. */ + ret = rte_eth_link_get(mq->in_port, &link); + if (ret < 0) + MIF_LOG(ERR, "Failed to get port %u link info: %s", + mq->in_port, rte_strerror(-ret)); + return 0; + } + + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; + + n_free = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE) - mq->last_tail; + mq->last_tail += n_free; + + if (type == MEMIF_RING_S2M) { + slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE); + n_free = ring_size - slot + mq->last_tail; + } else { + slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot; + } + + while (n_tx_pkts < nb_pkts && n_free) { + mbuf_head = *bufs++; + mbuf = mbuf_head; + + saved_slot = slot; + d0 = &ring->desc[slot & mask]; + dst_off = 0; + dst_len = (type == MEMIF_RING_S2M) ? + pmd->run.pkt_buffer_size : d0->length; + +next_in_chain: + src_off = 0; + src_len = rte_pktmbuf_data_len(mbuf); + + while (src_len) { + if (dst_len == 0) { + if (n_free) { + slot++; + n_free--; + d0->flags |= MEMIF_DESC_FLAG_NEXT; + d0 = &ring->desc[slot & mask]; + dst_off = 0; + dst_len = (type == MEMIF_RING_S2M) ? + pmd->run.pkt_buffer_size : d0->length; + d0->flags = 0; + } else { + slot = saved_slot; + goto no_free_slots; + } + } + cp_len = RTE_MIN(dst_len, src_len); + + memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off, + rte_pktmbuf_mtod_offset(mbuf, void *, src_off), + cp_len); + + mq->n_bytes += cp_len; + src_off += cp_len; + dst_off += cp_len; + src_len -= cp_len; + dst_len -= cp_len; + + d0->length = dst_off; + } + + if (rte_pktmbuf_is_contiguous(mbuf) == 0) { + mbuf = mbuf->next; + goto next_in_chain; + } + + n_tx_pkts++; + slot++; + n_free--; + rte_pktmbuf_free(mbuf_head); + } + +no_free_slots: + if (type == MEMIF_RING_S2M) + __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE); + else + __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE); + + if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) { + a = 1; + size = write(mq->intr_handle.fd, &a, sizeof(a)); + if (unlikely(size < 0)) { + MIF_LOG(WARNING, + "Failed to send interrupt. %s", strerror(errno)); + } + } + + mq->n_pkts += n_tx_pkts; + return n_tx_pkts; +} + + +static int +memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq, + memif_ring_t *ring, struct rte_mbuf *mbuf, const uint16_t mask, + uint16_t slot, uint16_t n_free) +{ + memif_desc_t *d0; + int used_slots = 1; + +next_in_chain: + /* store pointer to mbuf to free it later */ + mq->buffers[slot & mask] = mbuf; + /* Increment refcnt to make sure the buffer is not freed before master + * receives it. (current segment) + */ + rte_mbuf_refcnt_update(mbuf, 1); + /* populate descriptor */ + d0 = &ring->desc[slot & mask]; + d0->length = rte_pktmbuf_data_len(mbuf); + /* FIXME: get region index */ + d0->region = 1; + d0->offset = rte_pktmbuf_mtod(mbuf, uint8_t *) - + (uint8_t *)proc_private->regions[d0->region]->addr; + d0->flags = 0; + + /* check if buffer is chained */ + if (rte_pktmbuf_is_contiguous(mbuf) == 0) { + if (n_free < 2) + return 0; + /* mark buffer as chained */ + d0->flags |= MEMIF_DESC_FLAG_NEXT; + /* advance mbuf */ + mbuf = mbuf->next; + /* update counters */ + used_slots++; + slot++; + n_free--; + goto next_in_chain; + } + return used_slots; +} + +static uint16_t +eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct memif_queue *mq = queue; + struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; + struct pmd_process_private *proc_private = + rte_eth_devices[mq->in_port].process_private; + memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq); + uint16_t slot, n_free, ring_size, mask, n_tx_pkts = 0; + memif_ring_type_t type = mq->type; + struct rte_eth_link link; + + if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) + return 0; + if (unlikely(ring == NULL)) { + /* Secondary process will attempt to request regions. */ + rte_eth_link_get(mq->in_port, &link); + return 0; + } + + ring_size = 1 << mq->log2_ring_size; + mask = ring_size - 1; + + /* free mbufs received by master */ + memif_free_stored_mbufs(proc_private, mq); + + /* ring type always MEMIF_RING_S2M */ + slot = ring->head; + n_free = ring_size - ring->head + mq->last_tail; + + int used_slots; + + while (n_free && (n_tx_pkts < nb_pkts)) { + while ((n_free > 4) && ((nb_pkts - n_tx_pkts) > 4)) { + if ((nb_pkts - n_tx_pkts) > 8) { + rte_prefetch0(*bufs + 4); + rte_prefetch0(*bufs + 5); + rte_prefetch0(*bufs + 6); + rte_prefetch0(*bufs + 7); + } + used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++, + mask, slot, n_free); + if (unlikely(used_slots < 1)) + goto no_free_slots; + n_tx_pkts++; + slot += used_slots; + n_free -= used_slots; + + used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++, + mask, slot, n_free); + if (unlikely(used_slots < 1)) + goto no_free_slots; + n_tx_pkts++; + slot += used_slots; + n_free -= used_slots; + + used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++, + mask, slot, n_free); + if (unlikely(used_slots < 1)) + goto no_free_slots; + n_tx_pkts++; + slot += used_slots; + n_free -= used_slots; + + used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++, + mask, slot, n_free); + if (unlikely(used_slots < 1)) + goto no_free_slots; + n_tx_pkts++; + slot += used_slots; + n_free -= used_slots; + } + used_slots = memif_tx_one_zc(proc_private, mq, ring, *bufs++, + mask, slot, n_free); + if (unlikely(used_slots < 1)) + goto no_free_slots; + n_tx_pkts++; + slot += used_slots; + n_free -= used_slots; + } + +no_free_slots: + rte_mb(); + /* update ring pointers */ + if (type == MEMIF_RING_S2M) + ring->head = slot; + else + ring->tail = slot; + + /* Send interrupt, if enabled. */ + if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) { + uint64_t a = 1; + ssize_t size = write(mq->intr_handle.fd, &a, sizeof(a)); + if (unlikely(size < 0)) { + MIF_LOG(WARNING, + "Failed to send interrupt. %s", strerror(errno)); + } + } + + /* increment queue counters */ + mq->n_pkts += n_tx_pkts; + + return n_tx_pkts; +} + +void +memif_free_regions(struct rte_eth_dev *dev) +{ + struct pmd_process_private *proc_private = dev->process_private; + struct pmd_internals *pmd = dev->data->dev_private; + int i; + struct memif_region *r; + + /* regions are allocated contiguously, so it's + * enough to loop until 'proc_private->regions_num' + */ + for (i = 0; i < proc_private->regions_num; i++) { + r = proc_private->regions[i]; + if (r != NULL) { + /* This is memzone */ + if (i > 0 && (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)) { + r->addr = NULL; + if (r->fd > 0) + close(r->fd); + } + if (r->addr != NULL) { + munmap(r->addr, r->region_size); + if (r->fd > 0) { + close(r->fd); + r->fd = -1; + } + } + rte_free(r); + proc_private->regions[i] = NULL; + } + } + proc_private->regions_num = 0; +} + +static int +memif_region_init_zc(const struct rte_memseg_list *msl, const struct rte_memseg *ms, + void *arg) +{ + struct pmd_process_private *proc_private = (struct pmd_process_private *)arg; + struct memif_region *r; + + if (proc_private->regions_num < 1) { + MIF_LOG(ERR, "Missing descriptor region"); + return -1; + } + + r = proc_private->regions[proc_private->regions_num - 1]; + + if (r->addr != msl->base_va) + r = proc_private->regions[++proc_private->regions_num - 1]; + + if (r == NULL) { + r = rte_zmalloc("region", sizeof(struct memif_region), 0); + if (r == NULL) { + MIF_LOG(ERR, "Failed to alloc memif region."); + return -ENOMEM; + } + + r->addr = msl->base_va; + r->region_size = ms->len; + r->fd = rte_memseg_get_fd(ms); + if (r->fd < 0) + return -1; + r->pkt_buffer_offset = 0; + + proc_private->regions[proc_private->regions_num - 1] = r; + } else { + r->region_size += ms->len; + } + + return 0; +} + +static int +memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + char shm_name[ETH_MEMIF_SHM_NAME_SIZE]; + int ret = 0; + struct memif_region *r; + + if (proc_private->regions_num >= ETH_MEMIF_MAX_REGION_NUM) { + MIF_LOG(ERR, "Too many regions."); + return -1; + } + + r = rte_zmalloc("region", sizeof(struct memif_region), 0); + if (r == NULL) { + MIF_LOG(ERR, "Failed to alloc memif region."); + return -ENOMEM; + } + + /* calculate buffer offset */ + r->pkt_buffer_offset = (pmd->run.num_s2m_rings + pmd->run.num_m2s_rings) * + (sizeof(memif_ring_t) + sizeof(memif_desc_t) * + (1 << pmd->run.log2_ring_size)); + + r->region_size = r->pkt_buffer_offset; + /* if region has buffers, add buffers size to region_size */ + if (has_buffers == 1) + r->region_size += (uint32_t)(pmd->run.pkt_buffer_size * + (1 << pmd->run.log2_ring_size) * + (pmd->run.num_s2m_rings + + pmd->run.num_m2s_rings)); + + memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE); + snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d", + proc_private->regions_num); + + r->fd = memfd_create(shm_name, MFD_ALLOW_SEALING); + if (r->fd < 0) { + MIF_LOG(ERR, "Failed to create shm file: %s.", strerror(errno)); + ret = -1; + goto error; + } + + ret = fcntl(r->fd, F_ADD_SEALS, F_SEAL_SHRINK); + if (ret < 0) { + MIF_LOG(ERR, "Failed to add seals to shm file: %s.", strerror(errno)); + goto error; + } + + ret = ftruncate(r->fd, r->region_size); + if (ret < 0) { + MIF_LOG(ERR, "Failed to truncate shm file: %s.", strerror(errno)); + goto error; + } + + r->addr = mmap(NULL, r->region_size, PROT_READ | + PROT_WRITE, MAP_SHARED, r->fd, 0); + if (r->addr == MAP_FAILED) { + MIF_LOG(ERR, "Failed to mmap shm region: %s.", strerror(ret)); + ret = -1; + goto error; + } + + proc_private->regions[proc_private->regions_num] = r; + proc_private->regions_num++; + + return ret; + +error: + if (r->fd > 0) + close(r->fd); + r->fd = -1; + + return ret; +} + +static int +memif_regions_init(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + int ret; + + /* + * Zero-copy exposes dpdk memory. + * Each memseg list will be represented by memif region. + * Zero-copy regions indexing: memseg list idx + 1, + * as we already have region 0 reserved for descriptors. + */ + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { + /* create region idx 0 containing descriptors */ + ret = memif_region_init_shm(dev, 0); + if (ret < 0) + return ret; + ret = rte_memseg_walk(memif_region_init_zc, (void *)dev->process_private); + if (ret < 0) + return ret; + } else { + /* create one memory region contaning rings and buffers */ + ret = memif_region_init_shm(dev, /* has buffers */ 1); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +memif_init_rings(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + memif_ring_t *ring; + int i, j; + uint16_t slot; + + for (i = 0; i < pmd->run.num_s2m_rings; i++) { + ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2M, i); + __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED); + __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED); + ring->cookie = MEMIF_COOKIE; + ring->flags = 0; + + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) + continue; + + for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) { + slot = i * (1 << pmd->run.log2_ring_size) + j; + ring->desc[j].region = 0; + ring->desc[j].offset = + proc_private->regions[0]->pkt_buffer_offset + + (uint32_t)(slot * pmd->run.pkt_buffer_size); + ring->desc[j].length = pmd->run.pkt_buffer_size; + } + } + + for (i = 0; i < pmd->run.num_m2s_rings; i++) { + ring = memif_get_ring(pmd, proc_private, MEMIF_RING_M2S, i); + __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED); + __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED); + ring->cookie = MEMIF_COOKIE; + ring->flags = 0; + + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) + continue; + + for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) { + slot = (i + pmd->run.num_s2m_rings) * + (1 << pmd->run.log2_ring_size) + j; + ring->desc[j].region = 0; + ring->desc[j].offset = + proc_private->regions[0]->pkt_buffer_offset + + (uint32_t)(slot * pmd->run.pkt_buffer_size); + ring->desc[j].length = pmd->run.pkt_buffer_size; + } + } +} + +/* called only by slave */ +static int +memif_init_queues(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_queue *mq; + int i; + + for (i = 0; i < pmd->run.num_s2m_rings; i++) { + mq = dev->data->tx_queues[i]; + mq->log2_ring_size = pmd->run.log2_ring_size; + /* queues located only in region 0 */ + mq->region = 0; + mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2M, i); + mq->last_head = 0; + mq->last_tail = 0; + mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK); + if (mq->intr_handle.fd < 0) { + MIF_LOG(WARNING, + "Failed to create eventfd for tx queue %d: %s.", i, + strerror(errno)); + } + mq->buffers = NULL; + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { + mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) * + (1 << mq->log2_ring_size), 0); + if (mq->buffers == NULL) + return -ENOMEM; + } + } + + for (i = 0; i < pmd->run.num_m2s_rings; i++) { + mq = dev->data->rx_queues[i]; + mq->log2_ring_size = pmd->run.log2_ring_size; + /* queues located only in region 0 */ + mq->region = 0; + mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_M2S, i); + mq->last_head = 0; + mq->last_tail = 0; + mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK); + if (mq->intr_handle.fd < 0) { + MIF_LOG(WARNING, + "Failed to create eventfd for rx queue %d: %s.", i, + strerror(errno)); + } + mq->buffers = NULL; + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { + mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) * + (1 << mq->log2_ring_size), 0); + if (mq->buffers == NULL) + return -ENOMEM; + } + } + return 0; +} + +int +memif_init_regions_and_queues(struct rte_eth_dev *dev) +{ + int ret; + + ret = memif_regions_init(dev); + if (ret < 0) + return ret; + + memif_init_rings(dev); + + ret = memif_init_queues(dev); + if (ret < 0) + return ret; + + return 0; +} + +int +memif_connect(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *proc_private = dev->process_private; + struct memif_region *mr; + struct memif_queue *mq; + memif_ring_t *ring; + int i; + + for (i = 0; i < proc_private->regions_num; i++) { + mr = proc_private->regions[i]; + if (mr != NULL) { + if (mr->addr == NULL) { + if (mr->fd < 0) + return -1; + mr->addr = mmap(NULL, mr->region_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, mr->fd, 0); + if (mr->addr == MAP_FAILED) { + MIF_LOG(ERR, "mmap failed: %s\n", + strerror(errno)); + return -1; + } + } + if (i > 0 && (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY)) { + /* close memseg file */ + close(mr->fd); + mr->fd = -1; + } + } + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + for (i = 0; i < pmd->run.num_s2m_rings; i++) { + mq = (pmd->role == MEMIF_ROLE_SLAVE) ? + dev->data->tx_queues[i] : dev->data->rx_queues[i]; + ring = memif_get_ring_from_queue(proc_private, mq); + if (ring == NULL || ring->cookie != MEMIF_COOKIE) { + MIF_LOG(ERR, "Wrong ring"); + return -1; + } + __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED); + __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED); + mq->last_head = 0; + mq->last_tail = 0; + /* enable polling mode */ + if (pmd->role == MEMIF_ROLE_MASTER) + ring->flags = MEMIF_RING_FLAG_MASK_INT; + } + for (i = 0; i < pmd->run.num_m2s_rings; i++) { + mq = (pmd->role == MEMIF_ROLE_SLAVE) ? + dev->data->rx_queues[i] : dev->data->tx_queues[i]; + ring = memif_get_ring_from_queue(proc_private, mq); + if (ring == NULL || ring->cookie != MEMIF_COOKIE) { + MIF_LOG(ERR, "Wrong ring"); + return -1; + } + __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED); + __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED); + mq->last_head = 0; + mq->last_tail = 0; + /* enable polling mode */ + if (pmd->role == MEMIF_ROLE_SLAVE) + ring->flags = MEMIF_RING_FLAG_MASK_INT; + } + + pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING; + pmd->flags |= ETH_MEMIF_FLAG_CONNECTED; + dev->data->dev_link.link_status = ETH_LINK_UP; + } + MIF_LOG(INFO, "Connected."); + return 0; +} + +static int +memif_dev_start(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + int ret = 0; + + switch (pmd->role) { + case MEMIF_ROLE_SLAVE: + ret = memif_connect_slave(dev); + break; + case MEMIF_ROLE_MASTER: + ret = memif_connect_master(dev); + break; + default: + MIF_LOG(ERR, "Unknown role: %d.", pmd->role); + ret = -1; + break; + } + + return ret; +} + +static void +memif_dev_close(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + memif_msg_enq_disconnect(pmd->cc, "Device closed", 0); + memif_disconnect(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]); + for (i = 0; i < dev->data->nb_tx_queues; i++) + (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]); + + memif_socket_remove_device(dev); + } else { + memif_disconnect(dev); + } + + rte_free(dev->process_private); +} + +static int +memif_dev_configure(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + + /* + * SLAVE - TXQ + * MASTER - RXQ + */ + pmd->cfg.num_s2m_rings = (pmd->role == MEMIF_ROLE_SLAVE) ? + dev->data->nb_tx_queues : dev->data->nb_rx_queues; + + /* + * SLAVE - RXQ + * MASTER - TXQ + */ + pmd->cfg.num_m2s_rings = (pmd->role == MEMIF_ROLE_SLAVE) ? + dev->data->nb_rx_queues : dev->data->nb_tx_queues; + + return 0; +} + +static int +memif_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t qid, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_queue *mq; + + mq = rte_zmalloc("tx-queue", sizeof(struct memif_queue), 0); + if (mq == NULL) { + MIF_LOG(ERR, "Failed to allocate tx queue id: %u", qid); + return -ENOMEM; + } + + mq->type = + (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_S2M : MEMIF_RING_M2S; + mq->n_pkts = 0; + mq->n_bytes = 0; + mq->intr_handle.fd = -1; + mq->intr_handle.type = RTE_INTR_HANDLE_EXT; + mq->in_port = dev->data->port_id; + dev->data->tx_queues[qid] = mq; + + return 0; +} + +static int +memif_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t qid, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_queue *mq; + + mq = rte_zmalloc("rx-queue", sizeof(struct memif_queue), 0); + if (mq == NULL) { + MIF_LOG(ERR, "Failed to allocate rx queue id: %u", qid); + return -ENOMEM; + } + + mq->type = (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_M2S : MEMIF_RING_S2M; + mq->n_pkts = 0; + mq->n_bytes = 0; + mq->intr_handle.fd = -1; + mq->intr_handle.type = RTE_INTR_HANDLE_EXT; + mq->mempool = mb_pool; + mq->in_port = dev->data->port_id; + dev->data->rx_queues[qid] = mq; + + return 0; +} + +static void +memif_queue_release(void *queue) +{ + struct memif_queue *mq = (struct memif_queue *)queue; + + if (!mq) + return; + + rte_free(mq); +} + +static int +memif_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct pmd_process_private *proc_private; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + proc_private = dev->process_private; + if (dev->data->dev_link.link_status == ETH_LINK_UP && + proc_private->regions_num == 0) { + memif_mp_request_regions(dev); + } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN && + proc_private->regions_num > 0) { + memif_free_regions(dev); + } + } + return 0; +} + +static int +memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct pmd_internals *pmd = dev->data->dev_private; + struct memif_queue *mq; + int i; + uint8_t tmp, nq; + + stats->ipackets = 0; + stats->ibytes = 0; + stats->opackets = 0; + stats->obytes = 0; + + tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_s2m_rings : + pmd->run.num_m2s_rings; + nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : + RTE_ETHDEV_QUEUE_STAT_CNTRS; + + /* RX stats */ + for (i = 0; i < nq; i++) { + mq = dev->data->rx_queues[i]; + stats->q_ipackets[i] = mq->n_pkts; + stats->q_ibytes[i] = mq->n_bytes; + stats->ipackets += mq->n_pkts; + stats->ibytes += mq->n_bytes; + } + + tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_m2s_rings : + pmd->run.num_s2m_rings; + nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : + RTE_ETHDEV_QUEUE_STAT_CNTRS; + + /* TX stats */ + for (i = 0; i < nq; i++) { + mq = dev->data->tx_queues[i]; + stats->q_opackets[i] = mq->n_pkts; + stats->q_obytes[i] = mq->n_bytes; + stats->opackets += mq->n_pkts; + stats->obytes += mq->n_bytes; + } + return 0; +} + +static int +memif_stats_reset(struct rte_eth_dev *dev) +{ + struct pmd_internals *pmd = dev->data->dev_private; + int i; + struct memif_queue *mq; + + for (i = 0; i < pmd->run.num_s2m_rings; i++) { + mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->tx_queues[i] : + dev->data->rx_queues[i]; + mq->n_pkts = 0; + mq->n_bytes = 0; + } + for (i = 0; i < pmd->run.num_m2s_rings; i++) { + mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->rx_queues[i] : + dev->data->tx_queues[i]; + mq->n_pkts = 0; + mq->n_bytes = 0; + } + + return 0; +} + +static int +memif_rx_queue_intr_enable(struct rte_eth_dev *dev __rte_unused, + uint16_t qid __rte_unused) +{ + MIF_LOG(WARNING, "Interrupt mode not supported."); + + return -1; +} + +static int +memif_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t qid __rte_unused) +{ + struct pmd_internals *pmd __rte_unused = dev->data->dev_private; + + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = memif_dev_start, + .dev_close = memif_dev_close, + .dev_infos_get = memif_dev_info, + .dev_configure = memif_dev_configure, + .tx_queue_setup = memif_tx_queue_setup, + .rx_queue_setup = memif_rx_queue_setup, + .rx_queue_release = memif_queue_release, + .tx_queue_release = memif_queue_release, + .rx_queue_intr_enable = memif_rx_queue_intr_enable, + .rx_queue_intr_disable = memif_rx_queue_intr_disable, + .link_update = memif_link_update, + .stats_get = memif_stats_get, + .stats_reset = memif_stats_reset, +}; + +static int +memif_create(struct rte_vdev_device *vdev, enum memif_role_t role, + memif_interface_id_t id, uint32_t flags, + const char *socket_filename, + memif_log2_ring_size_t log2_ring_size, + uint16_t pkt_buffer_size, const char *secret, + struct rte_ether_addr *ether_addr) +{ + int ret = 0; + struct rte_eth_dev *eth_dev; + struct rte_eth_dev_data *data; + struct pmd_internals *pmd; + struct pmd_process_private *process_private; + const unsigned int numa_node = vdev->device.numa_node; + const char *name = rte_vdev_device_name(vdev); + + eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd)); + if (eth_dev == NULL) { + MIF_LOG(ERR, "%s: Unable to allocate device struct.", name); + return -1; + } + + process_private = (struct pmd_process_private *) + rte_zmalloc(name, sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (process_private == NULL) { + MIF_LOG(ERR, "Failed to alloc memory for process private"); + return -1; + } + eth_dev->process_private = process_private; + + pmd = eth_dev->data->dev_private; + memset(pmd, 0, sizeof(*pmd)); + + pmd->id = id; + pmd->flags = flags; + pmd->flags |= ETH_MEMIF_FLAG_DISABLED; + pmd->role = role; + /* Zero-copy flag irelevant to master. */ + if (pmd->role == MEMIF_ROLE_MASTER) + pmd->flags &= ~ETH_MEMIF_FLAG_ZERO_COPY; + + ret = memif_socket_init(eth_dev, socket_filename); + if (ret < 0) + return ret; + + memset(pmd->secret, 0, sizeof(char) * ETH_MEMIF_SECRET_SIZE); + if (secret != NULL) + strlcpy(pmd->secret, secret, sizeof(pmd->secret)); + + pmd->cfg.log2_ring_size = log2_ring_size; + /* set in .dev_configure() */ + pmd->cfg.num_s2m_rings = 0; + pmd->cfg.num_m2s_rings = 0; + + pmd->cfg.pkt_buffer_size = pkt_buffer_size; + rte_spinlock_init(&pmd->cc_lock); + + data = eth_dev->data; + data->dev_private = pmd; + data->numa_node = numa_node; + data->dev_link = pmd_link; + data->mac_addrs = ether_addr; + data->promiscuous = 1; + + eth_dev->dev_ops = &ops; + eth_dev->device = &vdev->device; + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { + eth_dev->rx_pkt_burst = eth_memif_rx_zc; + eth_dev->tx_pkt_burst = eth_memif_tx_zc; + } else { + eth_dev->rx_pkt_burst = eth_memif_rx; + eth_dev->tx_pkt_burst = eth_memif_tx; + } + + + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + rte_eth_dev_probing_finish(eth_dev); + + return 0; +} + +static int +memif_set_role(const char *key __rte_unused, const char *value, + void *extra_args) +{ + enum memif_role_t *role = (enum memif_role_t *)extra_args; + + if (strstr(value, "master") != NULL) { + *role = MEMIF_ROLE_MASTER; + } else if (strstr(value, "slave") != NULL) { + *role = MEMIF_ROLE_SLAVE; + } else { + MIF_LOG(ERR, "Unknown role: %s.", value); + return -EINVAL; + } + return 0; +} + +static int +memif_set_zc(const char *key __rte_unused, const char *value, void *extra_args) +{ + uint32_t *flags = (uint32_t *)extra_args; + + if (strstr(value, "yes") != NULL) { + if (!rte_mcfg_get_single_file_segments()) { + MIF_LOG(ERR, "Zero-copy doesn't support multi-file segments."); + return -ENOTSUP; + } + *flags |= ETH_MEMIF_FLAG_ZERO_COPY; + } else if (strstr(value, "no") != NULL) { + *flags &= ~ETH_MEMIF_FLAG_ZERO_COPY; + } else { + MIF_LOG(ERR, "Failed to parse zero-copy param: %s.", value); + return -EINVAL; + } + return 0; +} + +static int +memif_set_id(const char *key __rte_unused, const char *value, void *extra_args) +{ + memif_interface_id_t *id = (memif_interface_id_t *)extra_args; + + /* even if parsing fails, 0 is a valid id */ + *id = strtoul(value, NULL, 10); + return 0; +} + +static int +memif_set_bs(const char *key __rte_unused, const char *value, void *extra_args) +{ + unsigned long tmp; + uint16_t *pkt_buffer_size = (uint16_t *)extra_args; + + tmp = strtoul(value, NULL, 10); + if (tmp == 0 || tmp > 0xFFFF) { + MIF_LOG(ERR, "Invalid buffer size: %s.", value); + return -EINVAL; + } + *pkt_buffer_size = tmp; + return 0; +} + +static int +memif_set_rs(const char *key __rte_unused, const char *value, void *extra_args) +{ + unsigned long tmp; + memif_log2_ring_size_t *log2_ring_size = + (memif_log2_ring_size_t *)extra_args; + + tmp = strtoul(value, NULL, 10); + if (tmp == 0 || tmp > ETH_MEMIF_MAX_LOG2_RING_SIZE) { + MIF_LOG(ERR, "Invalid ring size: %s (max %u).", + value, ETH_MEMIF_MAX_LOG2_RING_SIZE); + return -EINVAL; + } + *log2_ring_size = tmp; + return 0; +} + +/* check if directory exists and if we have permission to read/write */ +static int +memif_check_socket_filename(const char *filename) +{ + char *dir = NULL, *tmp; + uint32_t idx; + int ret = 0; + + if (strlen(filename) >= MEMIF_SOCKET_UN_SIZE) { + MIF_LOG(ERR, "Unix socket address too long (max 108)."); + return -1; + } + + tmp = strrchr(filename, '/'); + if (tmp != NULL) { + idx = tmp - filename; + dir = rte_zmalloc("memif_tmp", sizeof(char) * (idx + 1), 0); + if (dir == NULL) { + MIF_LOG(ERR, "Failed to allocate memory."); + return -1; + } + strlcpy(dir, filename, sizeof(char) * (idx + 1)); + } + + if (dir == NULL || (faccessat(-1, dir, F_OK | R_OK | + W_OK, AT_EACCESS) < 0)) { + MIF_LOG(ERR, "Invalid socket directory."); + ret = -EINVAL; + } + + if (dir != NULL) + rte_free(dir); + + return ret; +} + +static int +memif_set_socket_filename(const char *key __rte_unused, const char *value, + void *extra_args) +{ + const char **socket_filename = (const char **)extra_args; + + *socket_filename = value; + return memif_check_socket_filename(*socket_filename); +} + +static int +memif_set_mac(const char *key __rte_unused, const char *value, void *extra_args) +{ + struct rte_ether_addr *ether_addr = (struct rte_ether_addr *)extra_args; + + if (rte_ether_unformat_addr(value, ether_addr) < 0) + MIF_LOG(WARNING, "Failed to parse mac '%s'.", value); + return 0; +} + +static int +memif_set_secret(const char *key __rte_unused, const char *value, void *extra_args) +{ + const char **secret = (const char **)extra_args; + + *secret = value; + return 0; +} + +static int +rte_pmd_memif_probe(struct rte_vdev_device *vdev) +{ + RTE_BUILD_BUG_ON(sizeof(memif_msg_t) != 128); + RTE_BUILD_BUG_ON(sizeof(memif_desc_t) != 16); + int ret = 0; + struct rte_kvargs *kvlist; + const char *name = rte_vdev_device_name(vdev); + enum memif_role_t role = MEMIF_ROLE_SLAVE; + memif_interface_id_t id = 0; + uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE; + memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE; + const char *socket_filename = ETH_MEMIF_DEFAULT_SOCKET_FILENAME; + uint32_t flags = 0; + const char *secret = NULL; + struct rte_ether_addr *ether_addr = rte_zmalloc("", + sizeof(struct rte_ether_addr), 0); + struct rte_eth_dev *eth_dev; + + rte_eth_random_addr(ether_addr->addr_bytes); + + MIF_LOG(INFO, "Initialize MEMIF: %s.", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + MIF_LOG(ERR, "Failed to probe %s", name); + return -1; + } + + eth_dev->dev_ops = &ops; + eth_dev->device = &vdev->device; + eth_dev->rx_pkt_burst = eth_memif_rx; + eth_dev->tx_pkt_burst = eth_memif_tx; + + if (!rte_eal_primary_proc_alive(NULL)) { + MIF_LOG(ERR, "Primary process is missing"); + return -1; + } + + eth_dev->process_private = (struct pmd_process_private *) + rte_zmalloc(name, + sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + if (eth_dev->process_private == NULL) { + MIF_LOG(ERR, + "Failed to alloc memory for process private"); + return -1; + } + + rte_eth_dev_probing_finish(eth_dev); + + return 0; + } + + ret = rte_mp_action_register(MEMIF_MP_SEND_REGION, memif_mp_send_region); + /* + * Primary process can continue probing, but secondary process won't + * be able to get memory regions information + */ + if (ret < 0 && rte_errno != EEXIST) + MIF_LOG(WARNING, "Failed to register mp action callback: %s", + strerror(rte_errno)); + + kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments); + + /* parse parameters */ + if (kvlist != NULL) { + ret = rte_kvargs_process(kvlist, ETH_MEMIF_ROLE_ARG, + &memif_set_role, &role); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_ID_ARG, + &memif_set_id, &id); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_PKT_BUFFER_SIZE_ARG, + &memif_set_bs, &pkt_buffer_size); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_RING_SIZE_ARG, + &memif_set_rs, &log2_ring_size); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ARG, + &memif_set_socket_filename, + (void *)(&socket_filename)); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG, + &memif_set_mac, ether_addr); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_ZC_ARG, + &memif_set_zc, &flags); + if (ret < 0) + goto exit; + ret = rte_kvargs_process(kvlist, ETH_MEMIF_SECRET_ARG, + &memif_set_secret, (void *)(&secret)); + if (ret < 0) + goto exit; + } + + /* create interface */ + ret = memif_create(vdev, role, id, flags, socket_filename, + log2_ring_size, pkt_buffer_size, secret, ether_addr); + +exit: + if (kvlist != NULL) + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_memif_remove(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *eth_dev; + + eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); + if (eth_dev == NULL) + return 0; + + rte_eth_dev_close(eth_dev->data->port_id); + + return 0; +} + +static struct rte_vdev_driver pmd_memif_drv = { + .probe = rte_pmd_memif_probe, + .remove = rte_pmd_memif_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_memif, pmd_memif_drv); + +RTE_PMD_REGISTER_PARAM_STRING(net_memif, + ETH_MEMIF_ID_ARG "=" + ETH_MEMIF_ROLE_ARG "=master|slave" + ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=" + ETH_MEMIF_RING_SIZE_ARG "=" + ETH_MEMIF_SOCKET_ARG "=" + ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx" + ETH_MEMIF_ZC_ARG "=yes|no" + ETH_MEMIF_SECRET_ARG "="); + +int memif_logtype; + +RTE_INIT(memif_init_log) +{ + memif_logtype = rte_log_register("pmd.net.memif"); + if (memif_logtype >= 0) + rte_log_set_level(memif_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.h b/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.h new file mode 100644 index 000000000..6f45b7072 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/rte_eth_memif.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 Cisco Systems, Inc. All rights reserved. + */ + +#ifndef _RTE_ETH_MEMIF_H_ +#define _RTE_ETH_MEMIF_H_ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif /* GNU_SOURCE */ + +#include + +#include +#include +#include + +#include "memif.h" + +#define ETH_MEMIF_DEFAULT_SOCKET_FILENAME "/run/memif.sock" +#define ETH_MEMIF_DEFAULT_RING_SIZE 10 +#define ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE 2048 + +#define ETH_MEMIF_MAX_NUM_Q_PAIRS 255 +#define ETH_MEMIF_MAX_LOG2_RING_SIZE 14 +#define ETH_MEMIF_MAX_REGION_NUM 256 + +#define ETH_MEMIF_SHM_NAME_SIZE 32 +#define ETH_MEMIF_DISC_STRING_SIZE 96 +#define ETH_MEMIF_SECRET_SIZE 24 + +extern int memif_logtype; + +#define MIF_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, memif_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +enum memif_role_t { + MEMIF_ROLE_MASTER, + MEMIF_ROLE_SLAVE, +}; + +struct memif_region { + void *addr; /**< shared memory address */ + memif_region_size_t region_size; /**< shared memory size */ + int fd; /**< shared memory file descriptor */ + uint32_t pkt_buffer_offset; + /**< offset from 'addr' to first packet buffer */ +}; + +struct memif_queue { + struct rte_mempool *mempool; /**< mempool for RX packets */ + struct pmd_internals *pmd; /**< device internals */ + + memif_ring_type_t type; /**< ring type */ + memif_region_index_t region; /**< shared memory region index */ + + uint16_t in_port; /**< port id */ + + memif_region_offset_t ring_offset; + /**< ring offset from start of shm region (ring - memif_region.addr) */ + + uint16_t last_head; /**< last ring head */ + uint16_t last_tail; /**< last ring tail */ + + struct rte_mbuf **buffers; + /**< Stored mbufs. Used in zero-copy tx. Slave stores transmitted + * mbufs to free them once master has received them. + */ + + /* rx/tx info */ + uint64_t n_pkts; /**< number of rx/tx packets */ + uint64_t n_bytes; /**< number of rx/tx bytes */ + + struct rte_intr_handle intr_handle; /**< interrupt handle */ + + memif_log2_ring_size_t log2_ring_size; /**< log2 of ring size */ +}; + +struct pmd_internals { + memif_interface_id_t id; /**< unique id */ + enum memif_role_t role; /**< device role */ + uint32_t flags; /**< device status flags */ +#define ETH_MEMIF_FLAG_CONNECTING (1 << 0) +/**< device is connecting */ +#define ETH_MEMIF_FLAG_CONNECTED (1 << 1) +/**< device is connected */ +#define ETH_MEMIF_FLAG_ZERO_COPY (1 << 2) +/**< device is zero-copy enabled */ +#define ETH_MEMIF_FLAG_DISABLED (1 << 3) +/**< device has not been configured and can not accept connection requests */ + + char *socket_filename; /**< pointer to socket filename */ + char secret[ETH_MEMIF_SECRET_SIZE]; /**< secret (optional security parameter) */ + + struct memif_control_channel *cc; /**< control channel */ + rte_spinlock_t cc_lock; /**< control channel lock */ + + /* remote info */ + char remote_name[RTE_DEV_NAME_MAX_LEN]; /**< remote app name */ + char remote_if_name[RTE_DEV_NAME_MAX_LEN]; /**< remote peer name */ + + struct { + memif_log2_ring_size_t log2_ring_size; /**< log2 of ring size */ + uint8_t num_s2m_rings; /**< number of slave to master rings */ + uint8_t num_m2s_rings; /**< number of master to slave rings */ + uint16_t pkt_buffer_size; /**< buffer size */ + } cfg; /**< Configured parameters (max values) */ + + struct { + memif_log2_ring_size_t log2_ring_size; /**< log2 of ring size */ + uint8_t num_s2m_rings; /**< number of slave to master rings */ + uint8_t num_m2s_rings; /**< number of master to slave rings */ + uint16_t pkt_buffer_size; /**< buffer size */ + } run; + /**< Parameters used in active connection */ + + char local_disc_string[ETH_MEMIF_DISC_STRING_SIZE]; + /**< local disconnect reason */ + char remote_disc_string[ETH_MEMIF_DISC_STRING_SIZE]; + /**< remote disconnect reason */ +}; + +struct pmd_process_private { + struct memif_region *regions[ETH_MEMIF_MAX_REGION_NUM]; + /**< shared memory regions */ + memif_region_index_t regions_num; /**< number of regions */ +}; + +/** + * Unmap shared memory and free regions from memory. + * + * @param proc_private + * device process private data + */ +void memif_free_regions(struct rte_eth_dev *dev); + +/** + * Finalize connection establishment process. Map shared memory file + * (master role), initialize ring queue, set link status up. + * + * @param dev + * memif device + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int memif_connect(struct rte_eth_dev *dev); + +/** + * Create shared memory file and initialize ring queue. + * Only called by slave when establishing connection + * + * @param dev + * memif device + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int memif_init_regions_and_queues(struct rte_eth_dev *dev); + +/** + * Get memif version string. + * + * @return + * - memif version string + */ +const char *memif_version(void); + +#ifndef MFD_HUGETLB +#ifndef __NR_memfd_create + +#if defined __x86_64__ +#define __NR_memfd_create 319 +#elif defined __x86_32__ +#define __NR_memfd_create 1073742143 +#elif defined __arm__ +#define __NR_memfd_create 385 +#elif defined __aarch64__ +#define __NR_memfd_create 279 +#elif defined __powerpc__ +#define __NR_memfd_create 360 +#elif defined __i386__ +#define __NR_memfd_create 356 +#else +#error "__NR_memfd_create unknown for this architecture" +#endif + +#endif /* __NR_memfd_create */ + +static inline int memfd_create(const char *name, unsigned int flags) +{ + return syscall(__NR_memfd_create, name, flags); +} +#endif /* MFD_HUGETLB */ + +#ifndef F_LINUX_SPECIFIC_BASE +#define F_LINUX_SPECIFIC_BASE 1024 +#endif + +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 0x0002U +#endif + +#ifndef F_ADD_SEALS +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) + +#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */ +#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ +#define F_SEAL_GROW 0x0004 /* prevent file from growing */ +#define F_SEAL_WRITE 0x0008 /* prevent writes */ +#endif + +#endif /* RTE_ETH_MEMIF_H */ diff --git a/src/spdk/dpdk/drivers/net/memif/rte_pmd_memif_version.map b/src/spdk/dpdk/drivers/net/memif/rte_pmd_memif_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/memif/rte_pmd_memif_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/meson.build b/src/spdk/dpdk/drivers/net/meson.build new file mode 100644 index 000000000..266448ff2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/meson.build @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +drivers = ['af_packet', + 'af_xdp', + 'ark', + 'atlantic', + 'avp', + 'axgbe', 'bonding', + 'bnx2x', + 'bnxt', + 'cxgbe', + 'dpaa', 'dpaa2', + 'e1000', + 'ena', + 'enetc', + 'enic', + 'failsafe', + 'fm10k', 'i40e', + 'hinic', + 'hns3', + 'iavf', + 'ice', + 'igc', + 'ipn3ke', + 'ixgbe', + 'kni', + 'liquidio', + 'memif', + 'mlx4', + 'mlx5', + 'mvneta', + 'mvpp2', + 'netvsc', + 'nfb', + 'nfp', + 'null', + 'octeontx', + 'octeontx2', + 'pcap', + 'pfe', + 'qede', + 'ring', + 'sfc', + 'softnic', + 'szedata2', + 'tap', + 'thunderx', + 'vdev_netvsc', + 'vhost', + 'virtio', + 'vmxnet3', +] +std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc +std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std +std_deps += ['bus_vdev'] # same with vdev bus +config_flag_fmt = 'RTE_LIBRTE_@0@_PMD' +driver_name_fmt = 'rte_pmd_@0@' diff --git a/src/spdk/dpdk/drivers/net/mlx4/Makefile b/src/spdk/dpdk/drivers/net/mlx4/Makefile new file mode 100644 index 000000000..02e9b2ec0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/Makefile @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2012 6WIND S.A. +# Copyright 2012 Mellanox Technologies, Ltd + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name. +LIB = librte_pmd_mlx4.a +LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION) +LIB_GLUE_BASE = librte_pmd_mlx4_glue.so +LIB_GLUE_VERSION = 18.02.0 + +# Sources. +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c +ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_glue.c +endif +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mp.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mr.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c + +ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y) +INSTALL-$(CONFIG_RTE_LIBRTE_MLX4_PMD)-lib += $(LIB_GLUE) +endif + +# Basic CFLAGS. +CFLAGS += -O3 +CFLAGS += -std=c11 -Wall -Wextra +CFLAGS += -g +CFLAGS += -I. +CFLAGS += -D_BSD_SOURCE +CFLAGS += -D_DEFAULT_SOURCE +CFLAGS += -D_XOPEN_SOURCE=600 +CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y) +CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"' +CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"' +CFLAGS_mlx4_glue.o += -fPIC +LDLIBS += -ldl +else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y) +LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh) +else +LDLIBS += -libverbs -lmlx4 +endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +# A few warnings cannot be avoided in external headers. +CFLAGS += -Wno-error=cast-qual + +EXPORT_MAP := rte_pmd_mlx4_version.map +# DEBUG which is usually provided on the command-line may enable +# CONFIG_RTE_LIBRTE_MLX4_DEBUG. +ifeq ($(DEBUG),1) +CONFIG_RTE_LIBRTE_MLX4_DEBUG := y +endif + +# User-defined CFLAGS. +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG),y) +CFLAGS += -pedantic +ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -DPEDANTIC +endif +AUTO_CONFIG_CFLAGS += -Wno-pedantic +else +CFLAGS += -UPEDANTIC +endif + +include $(RTE_SDK)/mk/rte.lib.mk + +# Generate and clean-up mlx4_autoconf.h. + +export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS +export AUTO_CONFIG_CFLAGS += -Wno-error + +ifndef V +AUTOCONF_OUTPUT := >/dev/null +endif + +mlx4_autoconf.h.new: FORCE + +mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh + $Q $(RM) -f -- '$@' + $Q : > '$@' + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX4_BUF_ALLOCATORS \ + infiniband/mlx4dv.h \ + enum MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX4_UAR_MMAP_OFFSET \ + infiniband/mlx4dv.h \ + enum MLX4DV_QP_MASK_UAR_MMAP_OFFSET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX4_WQE_LSO_SEG \ + infiniband/mlx4dv.h \ + type 'struct mlx4_wqe_lso_seg' \ + $(AUTOCONF_OUTPUT) + +# Create mlx4_autoconf.h or update it in case it differs from the new one. + +mlx4_autoconf.h: mlx4_autoconf.h.new + $Q [ -f '$@' ] && \ + cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \ + mv '$<' '$@' + +$(SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD):.c=.o): mlx4_autoconf.h + +# Generate dependency plug-in for rdma-core when the PMD must not be linked +# directly, so that applications do not inherit this dependency. + +ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y) + +$(LIB): $(LIB_GLUE) + +ifeq ($(LINK_USING_CC),1) +GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS)) +else +GLUE_LDFLAGS := $(LDFLAGS) +endif +$(LIB_GLUE): mlx4_glue.o + $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ + -Wl,-h,$(LIB_GLUE) \ + -shared -o $@ $< -libverbs -lmlx4 + +mlx4_glue.o: mlx4_autoconf.h + +endif + +clean_mlx4: FORCE + $Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new + $Q rm -f -- mlx4_glue.o $(LIB_GLUE_BASE)* + +clean: clean_mlx4 diff --git a/src/spdk/dpdk/drivers/net/mlx4/meson.build b/src/spdk/dpdk/drivers/net/mlx4/meson.build new file mode 100644 index 000000000..5a25e11a7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/meson.build @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 6WIND S.A. +# Copyright 2018 Mellanox Technologies, Ltd + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif + +static_ibverbs = (get_option('ibverbs_link') == 'static') +dlopen_ibverbs = (get_option('ibverbs_link') == 'dlopen') +LIB_GLUE_BASE = 'librte_pmd_mlx4_glue.so' +LIB_GLUE_VERSION = '18.02.0' +LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION +if dlopen_ibverbs + dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1) + cflags += [ + '-DMLX4_GLUE="@0@"'.format(LIB_GLUE), + '-DMLX4_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION), + ] +endif + +libnames = [ 'mlx4', 'ibverbs' ] +libs = [] +foreach libname:libnames + lib = dependency('lib' + libname, static:static_ibverbs, required:false) + if not lib.found() and not static_ibverbs + lib = cc.find_library(libname, required:false) + endif + if lib.found() + libs += lib + if not static_ibverbs and not dlopen_ibverbs + ext_deps += lib + endif + else + build = false + reason = 'missing dependency, "' + libname + '"' + subdir_done() + endif +endforeach +if static_ibverbs or dlopen_ibverbs + # Build without adding shared libs to Requires.private + ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout() + ext_deps += declare_dependency(compile_args: ibv_cflags.split()) +endif +if static_ibverbs + # Add static deps ldflags to internal apps and Libs.private + ibv_ldflags = run_command(ldflags_ibverbs_static, check:true).stdout() + ext_deps += declare_dependency(link_args:ibv_ldflags.split()) +endif + +sources = files( + 'mlx4.c', + 'mlx4_ethdev.c', + 'mlx4_flow.c', + 'mlx4_intr.c', + 'mlx4_mp.c', + 'mlx4_mr.c', + 'mlx4_rxq.c', + 'mlx4_rxtx.c', + 'mlx4_txq.c', + 'mlx4_utils.c', +) +if not dlopen_ibverbs + sources += files('mlx4_glue.c') +endif +cflags_options = [ + '-std=c11', + '-Wno-strict-prototypes', + '-D_BSD_SOURCE', + '-D_DEFAULT_SOURCE', + '-D_XOPEN_SOURCE=600' +] +foreach option:cflags_options + if cc.has_argument(option) + cflags += option + endif +endforeach +if get_option('buildtype').contains('debug') + cflags += [ '-pedantic', '-DPEDANTIC' ] +else + cflags += [ '-UPEDANTIC' ] +endif +# To maintain the compatibility with the make build system +# mlx4_autoconf.h file is still generated. +# input array for meson member search: +# [ "MACRO to define if found", "header for the search", +# "symbol to search", "struct member to search" ] +# +has_member_args = [ + [ 'HAVE_IBV_MLX4_WQE_LSO_SEG', 'infiniband/mlx4dv.h', + 'struct mlx4_wqe_lso_seg', 'mss_hdr_size' ], +] +# input array for meson symbol search: +# [ "MACRO to define if found", "header for the search", +# "symbol to search" ] +has_sym_args = [ + [ 'HAVE_IBV_MLX4_BUF_ALLOCATORS', 'infiniband/mlx4dv.h', + 'MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS' ], + [ 'HAVE_IBV_MLX4_UAR_MMAP_OFFSET', 'infiniband/mlx4dv.h', + 'MLX4DV_QP_MASK_UAR_MMAP_OFFSET' ], +] +config = configuration_data() +foreach arg:has_sym_args + config.set(arg[0], cc.has_header_symbol(arg[1], arg[2], + dependencies: libs)) +endforeach +foreach arg:has_member_args + file_prefix = '#include <' + arg[1] + '>' + config.set(arg[0], cc.has_member(arg[2], arg[3], + prefix: file_prefix, dependencies: libs)) +endforeach +configure_file(output : 'mlx4_autoconf.h', configuration : config) + +# Build Glue Library +if dlopen_ibverbs + dlopen_name = 'mlx4_glue' + dlopen_lib_name = driver_name_fmt.format(dlopen_name) + dlopen_so_version = LIB_GLUE_VERSION + dlopen_sources = files('mlx4_glue.c') + dlopen_install_dir = [ eal_pmd_path + '-glue' ] + shared_lib = shared_library( + dlopen_lib_name, + dlopen_sources, + include_directories: global_inc, + c_args: cflags, + dependencies: libs, + link_args: [ + '-Wl,-export-dynamic', + '-Wl,-h,@0@'.format(LIB_GLUE), + ], + soversion: dlopen_so_version, + install: true, + install_dir: dlopen_install_dir, + ) +endif diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4.c new file mode 100644 index 000000000..5d7202720 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4.c @@ -0,0 +1,1333 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2012 6WIND S.A. + * Copyright 2012 Mellanox Technologies, Ltd + */ + +/** + * @file + * mlx4 driver initialization. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef RTE_IBVERBS_LINK_DLOPEN +#include +#endif + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_glue.h" +#include "mlx4_flow.h" +#include "mlx4_mr.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +#ifdef MLX4_GLUE +const struct mlx4_glue *mlx4_glue; +#endif + +static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data"; + +/* Shared memory between primary and secondary processes. */ +struct mlx4_shared_data *mlx4_shared_data; + +/* Spinlock for mlx4_shared_data allocation. */ +static rte_spinlock_t mlx4_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + +/* Process local data for secondary processes. */ +static struct mlx4_local_data mlx4_local_data; + +/** Driver-specific log messages type. */ +int mlx4_logtype; + +/** Configuration structure for device arguments. */ +struct mlx4_conf { + struct { + uint32_t present; /**< Bit-field for existing ports. */ + uint32_t enabled; /**< Bit-field for user-enabled ports. */ + } ports; + int mr_ext_memseg_en; + /** Whether memseg should be extended for MR creation. */ +}; + +/* Available parameters list. */ +const char *pmd_mlx4_init_params[] = { + MLX4_PMD_PORT_KVARG, + MLX4_MR_EXT_MEMSEG_EN_KVARG, + NULL, +}; + +static void mlx4_dev_stop(struct rte_eth_dev *dev); + +/** + * Initialize shared data between primary and secondary process. + * + * A memzone is reserved by primary process and secondary processes attach to + * the memzone. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_init_shared_data(void) +{ + const struct rte_memzone *mz; + int ret = 0; + + rte_spinlock_lock(&mlx4_shared_data_lock); + if (mlx4_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate shared memory. */ + mz = rte_memzone_reserve(MZ_MLX4_PMD_SHARED_DATA, + sizeof(*mlx4_shared_data), + SOCKET_ID_ANY, 0); + if (mz == NULL) { + ERROR("Cannot allocate mlx4 shared data\n"); + ret = -rte_errno; + goto error; + } + mlx4_shared_data = mz->addr; + memset(mlx4_shared_data, 0, sizeof(*mlx4_shared_data)); + rte_spinlock_init(&mlx4_shared_data->lock); + } else { + /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA); + if (mz == NULL) { + ERROR("Cannot attach mlx4 shared data\n"); + ret = -rte_errno; + goto error; + } + mlx4_shared_data = mz->addr; + memset(&mlx4_local_data, 0, sizeof(mlx4_local_data)); + } + } +error: + rte_spinlock_unlock(&mlx4_shared_data_lock); + return ret; +} + +#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS +/** + * Verbs callback to allocate a memory. This function should allocate the space + * according to the size provided residing inside a huge page. + * Please note that all allocation must respect the alignment from libmlx4 + * (i.e. currently sysconf(_SC_PAGESIZE)). + * + * @param[in] size + * The size in bytes of the memory to allocate. + * @param[in] data + * A pointer to the callback data. + * + * @return + * Allocated buffer, NULL otherwise and rte_errno is set. + */ +static void * +mlx4_alloc_verbs_buf(size_t size, void *data) +{ + struct mlx4_priv *priv = data; + void *ret; + size_t alignment = sysconf(_SC_PAGESIZE); + unsigned int socket = SOCKET_ID_ANY; + + if (priv->verbs_alloc_ctx.type == MLX4_VERBS_ALLOC_TYPE_TX_QUEUE) { + const struct txq *txq = priv->verbs_alloc_ctx.obj; + + socket = txq->socket; + } else if (priv->verbs_alloc_ctx.type == + MLX4_VERBS_ALLOC_TYPE_RX_QUEUE) { + const struct rxq *rxq = priv->verbs_alloc_ctx.obj; + + socket = rxq->socket; + } + MLX4_ASSERT(data != NULL); + ret = rte_malloc_socket(__func__, size, alignment, socket); + if (!ret && size) + rte_errno = ENOMEM; + return ret; +} + +/** + * Verbs callback to free a memory. + * + * @param[in] ptr + * A pointer to the memory to free. + * @param[in] data + * A pointer to the callback data. + */ +static void +mlx4_free_verbs_buf(void *ptr, void *data __rte_unused) +{ + MLX4_ASSERT(data != NULL); + rte_free(ptr); +} +#endif + +/** + * Initialize process private data structure. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_proc_priv_init(struct rte_eth_dev *dev) +{ + struct mlx4_proc_priv *ppriv; + size_t ppriv_size; + + /* + * UAR register table follows the process private structure. BlueFlame + * registers for Tx queues are stored in the table. + */ + ppriv_size = sizeof(struct mlx4_proc_priv) + + dev->data->nb_tx_queues * sizeof(void *); + ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size, + RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } + ppriv->uar_table_sz = ppriv_size; + dev->process_private = ppriv; + return 0; +} + +/** + * Un-initialize process private data structure. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_proc_priv_uninit(struct rte_eth_dev *dev) +{ + if (!dev->process_private) + return; + rte_free(dev->process_private); + dev->process_private = NULL; +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_dev_configure(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + int ret; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* Prepare internal flow rules. */ + ret = mlx4_flow_sync(priv, &error); + if (ret) { + ERROR("cannot set up internal flow rules (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + -ret, strerror(-ret), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + goto exit; + } + ret = mlx4_intr_install(priv); + if (ret) { + ERROR("%p: interrupt handler installation failed", + (void *)dev); + goto exit; + } + ret = mlx4_proc_priv_init(dev); + if (ret) { + ERROR("%p: process private data allocation failed", + (void *)dev); + goto exit; + } +exit: + return ret; +} + +/** + * DPDK callback to start the device. + * + * Simulate device start by initializing common RSS resources and attaching + * all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_dev_start(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + int ret; + + if (priv->started) + return 0; + DEBUG("%p: attaching configured flows to all RX queues", (void *)dev); + priv->started = 1; + ret = mlx4_rss_init(priv); + if (ret) { + ERROR("%p: cannot initialize RSS resources: %s", + (void *)dev, strerror(-ret)); + goto err; + } +#ifdef RTE_LIBRTE_MLX4_DEBUG + mlx4_mr_dump_dev(dev); +#endif + ret = mlx4_rxq_intr_enable(priv); + if (ret) { + ERROR("%p: interrupt handler installation failed", + (void *)dev); + goto err; + } + ret = mlx4_flow_sync(priv, &error); + if (ret) { + ERROR("%p: cannot attach flow rules (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + (void *)dev, + -ret, strerror(-ret), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + goto err; + } + rte_wmb(); + dev->tx_pkt_burst = mlx4_tx_burst; + dev->rx_pkt_burst = mlx4_rx_burst; + /* Enable datapath on secondary process. */ + mlx4_mp_req_start_rxtx(dev); + return 0; +err: + mlx4_dev_stop(dev); + return ret; +} + +/** + * DPDK callback to stop the device. + * + * Simulate device stop by detaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_dev_stop(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + + if (!priv->started) + return; + DEBUG("%p: detaching flows from all RX queues", (void *)dev); + priv->started = 0; + dev->tx_pkt_burst = mlx4_tx_burst_removed; + dev->rx_pkt_burst = mlx4_rx_burst_removed; + rte_wmb(); + /* Disable datapath on secondary process. */ + mlx4_mp_req_stop_rxtx(dev); + mlx4_flow_sync(priv, NULL); + mlx4_rxq_intr_disable(priv); + mlx4_rss_deinit(priv); +} + +/** + * DPDK callback to close the device. + * + * Destroy all queues and objects, free memory. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_dev_close(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + unsigned int i; + + DEBUG("%p: closing device \"%s\"", + (void *)dev, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); + dev->rx_pkt_burst = mlx4_rx_burst_removed; + dev->tx_pkt_burst = mlx4_tx_burst_removed; + rte_wmb(); + /* Disable datapath on secondary process. */ + mlx4_mp_req_stop_rxtx(dev); + mlx4_flow_clean(priv); + mlx4_rss_deinit(priv); + for (i = 0; i != dev->data->nb_rx_queues; ++i) + mlx4_rx_queue_release(dev->data->rx_queues[i]); + for (i = 0; i != dev->data->nb_tx_queues; ++i) + mlx4_tx_queue_release(dev->data->tx_queues[i]); + mlx4_proc_priv_uninit(dev); + mlx4_mr_release(dev); + if (priv->pd != NULL) { + MLX4_ASSERT(priv->ctx != NULL); + claim_zero(mlx4_glue->dealloc_pd(priv->pd)); + claim_zero(mlx4_glue->close_device(priv->ctx)); + } else + MLX4_ASSERT(priv->ctx == NULL); + mlx4_intr_uninstall(priv); + memset(priv, 0, sizeof(*priv)); +} + +static const struct eth_dev_ops mlx4_dev_ops = { + .dev_configure = mlx4_dev_configure, + .dev_start = mlx4_dev_start, + .dev_stop = mlx4_dev_stop, + .dev_set_link_down = mlx4_dev_set_link_down, + .dev_set_link_up = mlx4_dev_set_link_up, + .dev_close = mlx4_dev_close, + .link_update = mlx4_link_update, + .promiscuous_enable = mlx4_promiscuous_enable, + .promiscuous_disable = mlx4_promiscuous_disable, + .allmulticast_enable = mlx4_allmulticast_enable, + .allmulticast_disable = mlx4_allmulticast_disable, + .mac_addr_remove = mlx4_mac_addr_remove, + .mac_addr_add = mlx4_mac_addr_add, + .mac_addr_set = mlx4_mac_addr_set, + .set_mc_addr_list = mlx4_set_mc_addr_list, + .stats_get = mlx4_stats_get, + .stats_reset = mlx4_stats_reset, + .fw_version_get = mlx4_fw_version_get, + .dev_infos_get = mlx4_dev_infos_get, + .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get, + .vlan_filter_set = mlx4_vlan_filter_set, + .rx_queue_setup = mlx4_rx_queue_setup, + .tx_queue_setup = mlx4_tx_queue_setup, + .rx_queue_release = mlx4_rx_queue_release, + .tx_queue_release = mlx4_tx_queue_release, + .flow_ctrl_get = mlx4_flow_ctrl_get, + .flow_ctrl_set = mlx4_flow_ctrl_set, + .mtu_set = mlx4_mtu_set, + .filter_ctrl = mlx4_filter_ctrl, + .rx_queue_intr_enable = mlx4_rx_intr_enable, + .rx_queue_intr_disable = mlx4_rx_intr_disable, + .is_removed = mlx4_is_removed, +}; + +/* Available operations from secondary process. */ +static const struct eth_dev_ops mlx4_dev_sec_ops = { + .stats_get = mlx4_stats_get, + .stats_reset = mlx4_stats_reset, + .fw_version_get = mlx4_fw_version_get, + .dev_infos_get = mlx4_dev_infos_get, +}; + +/** + * Get PCI information from struct ibv_device. + * + * @param device + * Pointer to Ethernet device structure. + * @param[out] pci_addr + * PCI bus address output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr) +{ + FILE *file; + char line[32]; + MKSTR(path, "%s/device/uevent", device->ibdev_path); + + file = fopen(path, "rb"); + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } + while (fgets(line, sizeof(line), file) == line) { + size_t len = strlen(line); + int ret; + + /* Truncate long lines. */ + if (len == (sizeof(line) - 1)) + while (line[(len - 1)] != '\n') { + ret = fgetc(file); + if (ret == EOF) + break; + line[(len - 1)] = ret; + } + /* Extract information. */ + if (sscanf(line, + "PCI_SLOT_NAME=" + "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", + &pci_addr->domain, + &pci_addr->bus, + &pci_addr->devid, + &pci_addr->function) == 4) { + ret = 0; + break; + } + } + fclose(file); + return 0; +} + +/** + * Verify and store value for device argument. + * + * @param[in] key + * Key argument to verify. + * @param[in] val + * Value associated with key. + * @param[in, out] conf + * Shared configuration data. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf) +{ + unsigned long tmp; + + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + rte_errno = errno; + WARN("%s: \"%s\" is not a valid integer", key, val); + return -rte_errno; + } + if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) { + uint32_t ports = rte_log2_u32(conf->ports.present + 1); + + if (tmp >= ports) { + ERROR("port index %lu outside range [0,%" PRIu32 ")", + tmp, ports); + return -EINVAL; + } + if (!(conf->ports.present & (1 << tmp))) { + rte_errno = EINVAL; + ERROR("invalid port index %lu", tmp); + return -rte_errno; + } + conf->ports.enabled |= 1 << tmp; + } else if (strcmp(MLX4_MR_EXT_MEMSEG_EN_KVARG, key) == 0) { + conf->mr_ext_memseg_en = !!tmp; + } else { + rte_errno = EINVAL; + WARN("%s: unknown parameter", key); + return -rte_errno; + } + return 0; +} + +/** + * Parse device parameters. + * + * @param devargs + * Device arguments structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf) +{ + struct rte_kvargs *kvlist; + unsigned int arg_count; + int ret = 0; + int i; + + if (devargs == NULL) + return 0; + kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params); + if (kvlist == NULL) { + rte_errno = EINVAL; + ERROR("failed to parse kvargs"); + return -rte_errno; + } + /* Process parameters. */ + for (i = 0; pmd_mlx4_init_params[i]; ++i) { + arg_count = rte_kvargs_count(kvlist, pmd_mlx4_init_params[i]); + while (arg_count-- > 0) { + ret = rte_kvargs_process(kvlist, + pmd_mlx4_init_params[i], + (int (*)(const char *, + const char *, + void *)) + mlx4_arg_parse, + conf); + if (ret != 0) + goto free_kvlist; + } + } +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +/** + * Interpret RSS capabilities reported by device. + * + * This function returns the set of usable Verbs RSS hash fields, kernel + * quirks taken into account. + * + * @param ctx + * Verbs context. + * @param pd + * Verbs protection domain. + * @param device_attr_ex + * Extended device attributes to interpret. + * + * @return + * Usable RSS hash fields mask in Verbs format. + */ +static uint64_t +mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd, + struct ibv_device_attr_ex *device_attr_ex) +{ + uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask; + struct ibv_cq *cq = NULL; + struct ibv_wq *wq = NULL; + struct ibv_rwq_ind_table *ind = NULL; + struct ibv_qp *qp = NULL; + + if (!hw_rss_sup) { + WARN("no RSS capabilities reported; disabling support for UDP" + " RSS and inner VXLAN RSS"); + return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | + IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | + IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP; + } + if (!(hw_rss_sup & IBV_RX_HASH_INNER)) + return hw_rss_sup; + /* + * Although reported as supported, missing code in some Linux + * versions (v4.15, v4.16) prevents the creation of hash QPs with + * inner capability. + * + * There is no choice but to attempt to instantiate a temporary RSS + * context in order to confirm its support. + */ + cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0); + wq = cq ? mlx4_glue->create_wq + (ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = pd, + .cq = cq, + }) : NULL; + ind = wq ? mlx4_glue->create_rwq_ind_table + (ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &wq, + .comp_mask = 0, + }) : NULL; + qp = ind ? mlx4_glue->create_qp_ex + (ctx, + &(struct ibv_qp_init_attr_ex){ + .comp_mask = + (IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_RX_HASH | + IBV_QP_INIT_ATTR_IND_TABLE), + .qp_type = IBV_QPT_RAW_PACKET, + .pd = pd, + .rwq_ind_tbl = ind, + .rx_hash_conf = { + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE, + .rx_hash_key = mlx4_rss_hash_key_default, + .rx_hash_fields_mask = hw_rss_sup, + }, + }) : NULL; + if (!qp) { + WARN("disabling unusable inner RSS capability due to kernel" + " quirk"); + hw_rss_sup &= ~IBV_RX_HASH_INNER; + } else { + claim_zero(mlx4_glue->destroy_qp(qp)); + } + if (ind) + claim_zero(mlx4_glue->destroy_rwq_ind_table(ind)); + if (wq) + claim_zero(mlx4_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx4_glue->destroy_cq(cq)); + return hw_rss_sup; +} + +static struct rte_pci_driver mlx4_driver; + +/** + * PMD global initialization. + * + * Independent from individual device, this function initializes global + * per-PMD data structures distinguishing primary and secondary processes. + * Hence, each initialization is called once per a process. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_init_once(void) +{ + struct mlx4_shared_data *sd; + struct mlx4_local_data *ld = &mlx4_local_data; + int ret = 0; + + if (mlx4_init_shared_data()) + return -rte_errno; + sd = mlx4_shared_data; + MLX4_ASSERT(sd); + rte_spinlock_lock(&sd->lock); + switch (rte_eal_process_type()) { + case RTE_PROC_PRIMARY: + if (sd->init_done) + break; + LIST_INIT(&sd->mem_event_cb_list); + rte_rwlock_init(&sd->mem_event_rwlock); + rte_mem_event_callback_register("MLX4_MEM_EVENT_CB", + mlx4_mr_mem_event_cb, NULL); + ret = mlx4_mp_init_primary(); + if (ret) + goto out; + sd->init_done = 1; + break; + case RTE_PROC_SECONDARY: + if (ld->init_done) + break; + ret = mlx4_mp_init_secondary(); + if (ret) + goto out; + ++sd->secondary_cnt; + ld->init_done = 1; + break; + default: + break; + } +out: + rte_spinlock_unlock(&sd->lock); + return ret; +} + +/** + * DPDK callback to register a PCI device. + * + * This function creates an Ethernet device for each port of a given + * PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx4_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + struct ibv_device **list; + struct ibv_device *ibv_dev; + int err = 0; + struct ibv_context *attr_ctx = NULL; + struct ibv_device_attr device_attr; + struct ibv_device_attr_ex device_attr_ex; + struct mlx4_conf conf = { + .ports.present = 0, + .mr_ext_memseg_en = 1, + }; + unsigned int vf; + int i; + char ifname[IF_NAMESIZE]; + + (void)pci_drv; + err = mlx4_init_once(); + if (err) { + ERROR("unable to init PMD global data: %s", + strerror(rte_errno)); + return -rte_errno; + } + MLX4_ASSERT(pci_drv == &mlx4_driver); + list = mlx4_glue->get_device_list(&i); + if (list == NULL) { + rte_errno = errno; + MLX4_ASSERT(rte_errno); + if (rte_errno == ENOSYS) + ERROR("cannot list devices, is ib_uverbs loaded?"); + return -rte_errno; + } + MLX4_ASSERT(i >= 0); + /* + * For each listed device, check related sysfs entry against + * the provided PCI ID. + */ + while (i != 0) { + struct rte_pci_addr pci_addr; + + --i; + DEBUG("checking device \"%s\"", list[i]->name); + if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr)) + continue; + if ((pci_dev->addr.domain != pci_addr.domain) || + (pci_dev->addr.bus != pci_addr.bus) || + (pci_dev->addr.devid != pci_addr.devid) || + (pci_dev->addr.function != pci_addr.function)) + continue; + vf = (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF); + INFO("PCI information matches, using device \"%s\" (VF: %s)", + list[i]->name, (vf ? "true" : "false")); + attr_ctx = mlx4_glue->open_device(list[i]); + err = errno; + break; + } + if (attr_ctx == NULL) { + mlx4_glue->free_device_list(list); + switch (err) { + case 0: + rte_errno = ENODEV; + ERROR("cannot access device, is mlx4_ib loaded?"); + return -rte_errno; + case EINVAL: + rte_errno = EINVAL; + ERROR("cannot use device, are drivers up to date?"); + return -rte_errno; + } + MLX4_ASSERT(err > 0); + rte_errno = err; + return -rte_errno; + } + ibv_dev = list[i]; + DEBUG("device opened"); + if (mlx4_glue->query_device(attr_ctx, &device_attr)) { + err = ENODEV; + goto error; + } + INFO("%u port(s) detected", device_attr.phys_port_cnt); + conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1; + if (mlx4_args(pci_dev->device.devargs, &conf)) { + ERROR("failed to process device arguments"); + err = EINVAL; + goto error; + } + /* Use all ports when none are defined */ + if (!conf.ports.enabled) + conf.ports.enabled = conf.ports.present; + /* Retrieve extended device attributes. */ + if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) { + err = ENODEV; + goto error; + } + MLX4_ASSERT(device_attr.max_sge >= MLX4_MAX_SGE); + for (i = 0; i < device_attr.phys_port_cnt; i++) { + uint32_t port = i + 1; /* ports are indexed from one */ + struct ibv_context *ctx = NULL; + struct ibv_port_attr port_attr; + struct ibv_pd *pd = NULL; + struct mlx4_priv *priv = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct rte_ether_addr mac; + char name[RTE_ETH_NAME_MAX_LEN]; + + /* If port is not enabled, skip. */ + if (!(conf.ports.enabled & (1 << i))) + continue; + DEBUG("using port %u", port); + ctx = mlx4_glue->open_device(ibv_dev); + if (ctx == NULL) { + err = ENODEV; + goto port_error; + } + snprintf(name, sizeof(name), "%s port %u", + mlx4_glue->get_device_name(ibv_dev), port); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + ERROR("can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; + goto error; + } + priv = eth_dev->data->dev_private; + if (!priv->verbs_alloc_ctx.enabled) { + ERROR("secondary process is not supported" + " due to lack of external allocator" + " from Verbs"); + rte_errno = ENOTSUP; + err = rte_errno; + goto error; + } + eth_dev->device = &pci_dev->device; + eth_dev->dev_ops = &mlx4_dev_sec_ops; + err = mlx4_proc_priv_init(eth_dev); + if (err) + goto error; + /* Receive command fd from primary process. */ + err = mlx4_mp_req_verbs_cmd_fd(eth_dev); + if (err < 0) { + err = rte_errno; + goto error; + } + /* Remap UAR for Tx queues. */ + err = mlx4_tx_uar_init_secondary(eth_dev, err); + if (err) { + err = rte_errno; + goto error; + } + /* + * Ethdev pointer is still required as input since + * the primary device is not accessible from the + * secondary process. + */ + eth_dev->tx_pkt_burst = mlx4_tx_burst; + eth_dev->rx_pkt_burst = mlx4_rx_burst; + claim_zero(mlx4_glue->close_device(ctx)); + rte_eth_copy_pci_info(eth_dev, pci_dev); + rte_eth_dev_probing_finish(eth_dev); + continue; + } + /* Check port status. */ + err = mlx4_glue->query_port(ctx, port, &port_attr); + if (err) { + err = ENODEV; + ERROR("port query failed: %s", strerror(err)); + goto port_error; + } + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + err = ENOTSUP; + ERROR("port %d is not configured in Ethernet mode", + port); + goto port_error; + } + if (port_attr.state != IBV_PORT_ACTIVE) + DEBUG("port %d is not active: \"%s\" (%d)", + port, mlx4_glue->port_state_str(port_attr.state), + port_attr.state); + /* Make asynchronous FD non-blocking to handle interrupts. */ + err = mlx4_fd_set_non_blocking(ctx->async_fd); + if (err) { + ERROR("cannot make asynchronous FD non-blocking: %s", + strerror(err)); + goto port_error; + } + /* Allocate protection domain. */ + pd = mlx4_glue->alloc_pd(ctx); + if (pd == NULL) { + err = ENOMEM; + ERROR("PD allocation failure"); + goto port_error; + } + /* from rte_ethdev.c */ + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + err = ENOMEM; + ERROR("priv allocation failure"); + goto port_error; + } + priv->ctx = ctx; + priv->device_attr = device_attr; + priv->port = port; + priv->pd = pd; + priv->mtu = RTE_ETHER_MTU; + priv->vf = vf; + priv->hw_csum = !!(device_attr.device_cap_flags & + IBV_DEVICE_RAW_IP_CSUM); + DEBUG("checksum offloading is %ssupported", + (priv->hw_csum ? "" : "not ")); + /* Only ConnectX-3 Pro supports tunneling. */ + priv->hw_csum_l2tun = + priv->hw_csum && + (device_attr.vendor_part_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); + DEBUG("L2 tunnel checksum offloads are %ssupported", + priv->hw_csum_l2tun ? "" : "not "); + priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd, + &device_attr_ex); + DEBUG("supported RSS hash fields mask: %016" PRIx64, + priv->hw_rss_sup); + priv->hw_rss_max_qps = + device_attr_ex.rss_caps.max_rwq_indirection_table_size; + DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps); + priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DEBUG("FCS stripping toggling is %ssupported", + priv->hw_fcs_strip ? "" : "not "); + priv->tso = + ((device_attr_ex.tso_caps.max_tso > 0) && + (device_attr_ex.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (priv->tso) + priv->tso_max_payload_sz = + device_attr_ex.tso_caps.max_tso; + DEBUG("TSO is %ssupported", + priv->tso ? "" : "not "); + priv->mr_ext_memseg_en = conf.mr_ext_memseg_en; + /* Configure the first MAC address by default. */ + err = mlx4_get_mac(priv, &mac.addr_bytes); + if (err) { + ERROR("cannot get MAC address, is mlx4_en loaded?" + " (error: %s)", strerror(err)); + goto port_error; + } + INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + priv->port, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); + /* Register MAC address. */ + priv->mac[0] = mac; + + if (mlx4_get_ifname(priv, &ifname) == 0) { + DEBUG("port %u ifname is \"%s\"", + priv->port, ifname); + priv->if_index = if_nametoindex(ifname); + } else { + DEBUG("port %u ifname is unknown", priv->port); + } + + /* Get actual MTU if possible. */ + mlx4_mtu_get(priv, &priv->mtu); + DEBUG("port %u MTU is %u", priv->port, priv->mtu); + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + err = ENOMEM; + ERROR("can not allocate rte ethdev"); + goto port_error; + } + eth_dev->data->dev_private = priv; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + /* Initialize local interrupt handle for current port. */ + priv->intr_handle = (struct rte_intr_handle){ + .fd = -1, + .type = RTE_INTR_HANDLE_EXT, + }; + /* + * Override ethdev interrupt handle pointer with private + * handle instead of that of the parent PCI device used by + * default. This prevents it from being shared between all + * ports of the same PCI device since each of them is + * associated its own Verbs context. + * + * Rx interrupts in particular require this as the PMD has + * no control over the registration of queue interrupts + * besides setting up eth_dev->intr_handle, the rest is + * handled by rte_intr_rx_ctl(). + */ + eth_dev->intr_handle = &priv->intr_handle; + priv->dev_data = eth_dev->data; + eth_dev->dev_ops = &mlx4_dev_ops; +#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS + /* Hint libmlx4 to use PMD allocator for data plane resources */ + struct mlx4dv_ctx_allocators alctr = { + .alloc = &mlx4_alloc_verbs_buf, + .free = &mlx4_free_verbs_buf, + .data = priv, + }; + err = mlx4_glue->dv_set_context_attr + (ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS, + (void *)((uintptr_t)&alctr)); + if (err) + WARN("Verbs external allocator is not supported"); + else + priv->verbs_alloc_ctx.enabled = 1; +#endif + /* Bring Ethernet device up. */ + DEBUG("forcing Ethernet interface up"); + mlx4_dev_set_link_up(eth_dev); + /* Update link status once if waiting for LSC. */ + if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + mlx4_link_update(eth_dev, 0); + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx4_mr_btree_init(&priv->mr.cache, + MLX4_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + /* rte_errno is already set. */ + goto port_error; + } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock); + LIST_INSERT_HEAD(&mlx4_shared_data->mem_event_cb_list, + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); + continue; +port_error: + rte_free(priv); + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; + if (pd) + claim_zero(mlx4_glue->dealloc_pd(pd)); + if (ctx) + claim_zero(mlx4_glue->close_device(ctx)); + if (eth_dev != NULL) { + /* mac_addrs must not be freed because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(eth_dev); + } + break; + } + /* + * XXX if something went wrong in the loop above, there is a resource + * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as + * long as the dpdk does not provide a way to deallocate a ethdev and a + * way to enumerate the registered ethdevs to free the previous ones. + */ +error: + if (attr_ctx) + claim_zero(mlx4_glue->close_device(attr_ctx)); + if (list) + mlx4_glue->free_device_list(list); + if (err) + rte_errno = err; + return -err; +} + +static const struct rte_pci_id mlx4_pci_id_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF) + }, + { + .vendor_id = 0 + } +}; + +static struct rte_pci_driver mlx4_driver = { + .driver = { + .name = MLX4_DRIVER_NAME + }, + .id_table = mlx4_pci_id_map, + .probe = mlx4_pci_probe, + .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV, +}; + +#ifdef RTE_IBVERBS_LINK_DLOPEN + +/** + * Suffix RTE_EAL_PMD_PATH with "-glue". + * + * This function performs a sanity check on RTE_EAL_PMD_PATH before + * suffixing its last component. + * + * @param buf[out] + * Output buffer, should be large enough otherwise NULL is returned. + * @param size + * Size of @p out. + * + * @return + * Pointer to @p buf or @p NULL in case suffix cannot be appended. + */ +static char * +mlx4_glue_path(char *buf, size_t size) +{ + static const char *const bad[] = { "/", ".", "..", NULL }; + const char *path = RTE_EAL_PMD_PATH; + size_t len = strlen(path); + size_t off; + int i; + + while (len && path[len - 1] == '/') + --len; + for (off = len; off && path[off - 1] != '/'; --off) + ; + for (i = 0; bad[i]; ++i) + if (!strncmp(path + off, bad[i], (int)(len - off))) + goto error; + i = snprintf(buf, size, "%.*s-glue", (int)len, path); + if (i == -1 || (size_t)i >= size) + goto error; + return buf; +error: + ERROR("unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," + " please re-configure DPDK"); + return NULL; +} + +/** + * Initialization routine for run-time dependency on rdma-core. + */ +static int +mlx4_glue_init(void) +{ + char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; + const char *path[] = { + /* + * A basic security check is necessary before trusting + * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH. + */ + (geteuid() == getuid() && getegid() == getgid() ? + getenv("MLX4_GLUE_PATH") : NULL), + /* + * When RTE_EAL_PMD_PATH is set, use its glue-suffixed + * variant, otherwise let dlopen() look up libraries on its + * own. + */ + (*RTE_EAL_PMD_PATH ? + mlx4_glue_path(glue_path, sizeof(glue_path)) : ""), + }; + unsigned int i = 0; + void *handle = NULL; + void **sym; + const char *dlmsg; + + while (!handle && i != RTE_DIM(path)) { + const char *end; + size_t len; + int ret; + + if (!path[i]) { + ++i; + continue; + } + end = strpbrk(path[i], ":;"); + if (!end) + end = path[i] + strlen(path[i]); + len = end - path[i]; + ret = 0; + do { + char name[ret + 1]; + + ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE, + (int)len, path[i], + (!len || *(end - 1) == '/') ? "" : "/"); + if (ret == -1) + break; + if (sizeof(name) != (size_t)ret + 1) + continue; + DEBUG("looking for rdma-core glue as \"%s\"", name); + handle = dlopen(name, RTLD_LAZY); + break; + } while (1); + path[i] = end + 1; + if (!*end) + ++i; + } + if (!handle) { + rte_errno = EINVAL; + dlmsg = dlerror(); + if (dlmsg) + WARN("cannot load glue library: %s", dlmsg); + goto glue_error; + } + sym = dlsym(handle, "mlx4_glue"); + if (!sym || !*sym) { + rte_errno = EINVAL; + dlmsg = dlerror(); + if (dlmsg) + ERROR("cannot resolve glue symbol: %s", dlmsg); + goto glue_error; + } + mlx4_glue = *sym; + return 0; +glue_error: + if (handle) + dlclose(handle); + WARN("cannot initialize PMD due to missing run-time" + " dependency on rdma-core libraries (libibverbs," + " libmlx4)"); + return -rte_errno; +} + +#endif + +/** + * Driver initialization routine. + */ +RTE_INIT(rte_mlx4_pmd_init) +{ + /* Initialize driver log type. */ + mlx4_logtype = rte_log_register("pmd.net.mlx4"); + if (mlx4_logtype >= 0) + rte_log_set_level(mlx4_logtype, RTE_LOG_NOTICE); + + /* + * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we + * want to get success errno value in case of calling them + * when the device was removed. + */ + setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1); + /* + * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use + * huge pages. Calling ibv_fork_init() during init allows + * applications to use fork() safely for purposes other than + * using this PMD, which is not supported in forked processes. + */ + setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); +#ifdef RTE_IBVERBS_LINK_DLOPEN + if (mlx4_glue_init()) + return; + MLX4_ASSERT(mlx4_glue); +#endif +#ifdef RTE_LIBRTE_MLX4_DEBUG + /* Glue structure must not contain any NULL pointers. */ + { + unsigned int i; + + for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i) + MLX4_ASSERT(((const void *const *)mlx4_glue)[i]); + } +#endif + if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) { + ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required", + mlx4_glue->version, MLX4_GLUE_VERSION); + return; + } + mlx4_glue->fork_init(); + rte_pci_register(&mlx4_driver); +} + +RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__); +RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map); +RTE_PMD_REGISTER_KMOD_DEP(net_mlx4, + "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib"); diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4.h new file mode 100644 index 000000000..c6cb29493 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2012 6WIND S.A. + * Copyright 2012 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX4_H_ +#define RTE_PMD_MLX4_H_ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include + +#include "mlx4_mr.h" + +#ifndef IBV_RX_HASH_INNER +/** This is not necessarily defined by supported RDMA core versions. */ +#define IBV_RX_HASH_INNER (1ull << 31) +#endif /* IBV_RX_HASH_INNER */ + +/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */ +#define MLX4_MAX_MAC_ADDRESSES 128 + +/** Request send completion once in every 64 sends, might be less. */ +#define MLX4_PMD_TX_PER_COMP_REQ 64 + +/** Maximum size for inline data. */ +#define MLX4_PMD_MAX_INLINE 0 + +/** Fixed RSS hash key size in bytes. Cannot be modified. */ +#define MLX4_RSS_HASH_KEY_SIZE 40 + +/** Interrupt alarm timeout value in microseconds. */ +#define MLX4_INTR_ALARM_TIMEOUT 100000 + +/* Maximum packet headers size (L2+L3+L4) for TSO. */ +#define MLX4_MAX_TSO_HEADER 192 + +/** Port parameter. */ +#define MLX4_PMD_PORT_KVARG "port" + +/** Enable extending memsegs when creating a MR. */ +#define MLX4_MR_EXT_MEMSEG_EN_KVARG "mr_ext_memseg_en" + +enum { + PCI_VENDOR_ID_MELLANOX = 0x15b3, +}; + +enum { + PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003, + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004, + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007, +}; + +/* Request types for IPC. */ +enum mlx4_mp_req_type { + MLX4_MP_REQ_VERBS_CMD_FD = 1, + MLX4_MP_REQ_CREATE_MR, + MLX4_MP_REQ_START_RXTX, + MLX4_MP_REQ_STOP_RXTX, +}; + +/* Pameters for IPC. */ +struct mlx4_mp_param { + enum mlx4_mp_req_type type; + int port_id; + int result; + RTE_STD_C11 + union { + uintptr_t addr; /* MLX4_MP_REQ_CREATE_MR */ + } args; +}; + +/** Request timeout for IPC. */ +#define MLX4_MP_REQ_TIMEOUT_SEC 5 + +/** Key string for IPC. */ +#define MLX4_MP_NAME "net_mlx4_mp" + +/** Driver name reported to lower layers and used in log output. */ +#define MLX4_DRIVER_NAME "net_mlx4" + +struct mlx4_drop; +struct mlx4_rss; +struct rxq; +struct txq; +struct rte_flow; + +/** + * Type of object being allocated. + */ +enum mlx4_verbs_alloc_type { + MLX4_VERBS_ALLOC_TYPE_NONE, + MLX4_VERBS_ALLOC_TYPE_TX_QUEUE, + MLX4_VERBS_ALLOC_TYPE_RX_QUEUE, +}; + +/** + * Verbs allocator needs a context to know in the callback which kind of + * resources it is allocating. + */ +struct mlx4_verbs_alloc_ctx { + int enabled; + enum mlx4_verbs_alloc_type type; /* Kind of object being allocated. */ + const void *obj; /* Pointer to the DPDK object. */ +}; + +LIST_HEAD(mlx4_dev_list, mlx4_priv); +LIST_HEAD(mlx4_mr_list, mlx4_mr); + +/* Shared data between primary and secondary processes. */ +struct mlx4_shared_data { + rte_spinlock_t lock; + /* Global spinlock for primary and secondary processes. */ + int init_done; /* Whether primary has done initialization. */ + unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + struct mlx4_dev_list mem_event_cb_list; + rte_rwlock_t mem_event_rwlock; +}; + +/* Per-process data structure, not visible to other processes. */ +struct mlx4_local_data { + int init_done; /* Whether a secondary has done initialization. */ +}; + +extern struct mlx4_shared_data *mlx4_shared_data; + +/* Per-process private structure. */ +struct mlx4_proc_priv { + size_t uar_table_sz; + /* Size of UAR register table. */ + void *uar_table[]; + /* Table of UAR registers for each process. */ +}; + +#define MLX4_PROC_PRIV(port_id) \ + ((struct mlx4_proc_priv *)rte_eth_devices[port_id].process_private) + +/** Private data structure. */ +struct mlx4_priv { + LIST_ENTRY(mlx4_priv) mem_event_cb; + /**< Called by memory event callback. */ + struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ + struct ibv_context *ctx; /**< Verbs context. */ + struct ibv_device_attr device_attr; /**< Device properties. */ + struct ibv_pd *pd; /**< Protection Domain. */ + /* Device properties. */ + unsigned int if_index; /**< Associated network device index */ + uint16_t mtu; /**< Configured MTU. */ + uint8_t port; /**< Physical port number. */ + uint32_t started:1; /**< Device started, flows enabled. */ + uint32_t vf:1; /**< This is a VF device. */ + uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */ + uint32_t isolated:1; /**< Toggle isolated mode. */ + uint32_t rss_init:1; /**< Common RSS context is initialized. */ + uint32_t hw_csum:1; /**< Checksum offload is supported. */ + uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */ + uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */ + uint32_t tso:1; /**< Transmit segmentation offload is supported. */ + uint32_t mr_ext_memseg_en:1; + /** Whether memseg should be extended for MR creation. */ + uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */ + uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */ + uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */ + struct rte_intr_handle intr_handle; /**< Port interrupt handle. */ + struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */ + struct { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR Lock. */ + struct mlx4_mr_btree cache; /* Global MR cache table. */ + struct mlx4_mr_list mr_list; /* Registered MR list. */ + struct mlx4_mr_list mr_free_list; /* Freed MR list. */ + } mr; + LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */ + LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ + struct rte_ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; + /**< Configured MAC addresses. Unused entries are zeroed. */ + uint32_t mac_mc; /**< Number of trailing multicast entries in mac[]. */ + struct mlx4_verbs_alloc_ctx verbs_alloc_ctx; + /**< Context for Verbs allocator. */ +}; + +#define PORT_ID(priv) ((priv)->dev_data->port_id) +#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) + +/* mlx4_ethdev.c */ + +int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]); +int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]); +int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu); +int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +int mlx4_dev_set_link_down(struct rte_eth_dev *dev); +int mlx4_dev_set_link_up(struct rte_eth_dev *dev); +int mlx4_promiscuous_enable(struct rte_eth_dev *dev); +int mlx4_promiscuous_disable(struct rte_eth_dev *dev); +int mlx4_allmulticast_enable(struct rte_eth_dev *dev); +int mlx4_allmulticast_disable(struct rte_eth_dev *dev); +void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq); +int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); +int mlx4_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *list, + uint32_t num); +int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +int mlx4_stats_reset(struct rte_eth_dev *dev); +int mlx4_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size); +int mlx4_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *info); +int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx4_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx4_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev); +int mlx4_is_removed(struct rte_eth_dev *dev); + +/* mlx4_intr.c */ + +int mlx4_intr_uninstall(struct mlx4_priv *priv); +int mlx4_intr_install(struct mlx4_priv *priv); +int mlx4_rxq_intr_enable(struct mlx4_priv *priv); +void mlx4_rxq_intr_disable(struct mlx4_priv *priv); +int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx); +int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx); + +/* mlx4_mp.c */ +void mlx4_mp_req_start_rxtx(struct rte_eth_dev *dev); +void mlx4_mp_req_stop_rxtx(struct rte_eth_dev *dev); +int mlx4_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr); +int mlx4_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev); +int mlx4_mp_init_primary(void); +void mlx4_mp_uninit_primary(void); +int mlx4_mp_init_secondary(void); +void mlx4_mp_uninit_secondary(void); + +#endif /* RTE_PMD_MLX4_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c new file mode 100644 index 000000000..9ff05c673 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_ethdev.c @@ -0,0 +1,990 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Miscellaneous control operations for mlx4 driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_flow.h" +#include "mlx4_glue.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** + * Get interface name from private structure. + * + * @param[in] priv + * Pointer to private structure. + * @param[out] ifname + * Interface name output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]) +{ + DIR *dir; + struct dirent *dent; + unsigned int dev_type = 0; + unsigned int dev_port_prev = ~0u; + char match[IF_NAMESIZE] = ""; + + { + MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path); + + dir = opendir(path); + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } + } + while ((dent = readdir(dir)) != NULL) { + char *name = dent->d_name; + FILE *file; + unsigned int dev_port; + int r; + + if ((name[0] == '.') && + ((name[1] == '\0') || + ((name[1] == '.') && (name[2] == '\0')))) + continue; + + MKSTR(path, "%s/device/net/%s/%s", + priv->ctx->device->ibdev_path, name, + (dev_type ? "dev_id" : "dev_port")); + + file = fopen(path, "rb"); + if (file == NULL) { + if (errno != ENOENT) + continue; + /* + * Switch to dev_id when dev_port does not exist as + * is the case with Linux kernel versions < 3.15. + */ +try_dev_id: + match[0] = '\0'; + if (dev_type) + break; + dev_type = 1; + dev_port_prev = ~0u; + rewinddir(dir); + continue; + } + r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); + fclose(file); + if (r != 1) + continue; + /* + * Switch to dev_id when dev_port returns the same value for + * all ports. May happen when using a MOFED release older than + * 3.0 with a Linux kernel >= 3.15. + */ + if (dev_port == dev_port_prev) + goto try_dev_id; + dev_port_prev = dev_port; + if (dev_port == (priv->port - 1u)) + strlcpy(match, name, sizeof(match)); + } + closedir(dir); + if (match[0] == '\0') { + rte_errno = ENODEV; + return -rte_errno; + } + strncpy(*ifname, match, sizeof(*ifname)); + return 0; +} + +/** + * Perform ifreq ioctl() on associated Ethernet device. + * + * @param[in] priv + * Pointer to private structure. + * @param req + * Request number to pass to ioctl(). + * @param[out] ifr + * Interface request structure output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_ifreq(const struct mlx4_priv *priv, int req, struct ifreq *ifr) +{ + int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + int ret; + + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = mlx4_get_ifname(priv, &ifr->ifr_name); + if (!ret && ioctl(sock, req, ifr) == -1) { + rte_errno = errno; + ret = -rte_errno; + } + close(sock); + return ret; +} + +/** + * Get MAC address by querying netdevice. + * + * @param[in] priv + * Pointer to private structure. + * @param[out] mac + * MAC address output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) +{ + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request); + + if (ret) + return ret; + memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); + return 0; +} + +/** + * Get device MTU. + * + * @param priv + * Pointer to private structure. + * @param[out] mtu + * MTU value output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu) +{ + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request); + + if (ret) + return ret; + *mtu = request.ifr_mtu; + return 0; +} + +/** + * DPDK callback to change the MTU. + * + * @param priv + * Pointer to Ethernet device structure. + * @param mtu + * MTU value to set. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct ifreq request = { .ifr_mtu = mtu, }; + int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request); + + if (ret) + return ret; + priv->mtu = mtu; + return 0; +} + +/** + * Set device flags. + * + * @param priv + * Pointer to private structure. + * @param keep + * Bitmask for flags that must remain untouched. + * @param flags + * Bitmask for flags to modify. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_set_flags(struct mlx4_priv *priv, unsigned int keep, unsigned int flags) +{ + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request); + + if (ret) + return ret; + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx4_ifreq(priv, SIOCSIFFLAGS, &request); +} + +/** + * Change the link state (UP / DOWN). + * + * @param priv + * Pointer to Ethernet device private data. + * @param up + * Nonzero for link up, otherwise link down. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_dev_set_link(struct mlx4_priv *priv, int up) +{ + int err; + + if (up) { + err = mlx4_set_flags(priv, ~IFF_UP, IFF_UP); + if (err) + return err; + } else { + err = mlx4_set_flags(priv, ~IFF_UP, ~IFF_UP); + if (err) + return err; + } + return 0; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + + return mlx4_dev_set_link(priv, 0); +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + + return mlx4_dev_set_link(priv, 1); +} + +/** + * Supported Rx mode toggles. + * + * Even and odd values respectively stand for off and on. + */ +enum rxmode_toggle { + RXMODE_TOGGLE_PROMISC_OFF, + RXMODE_TOGGLE_PROMISC_ON, + RXMODE_TOGGLE_ALLMULTI_OFF, + RXMODE_TOGGLE_ALLMULTI_ON, +}; + +/** + * Helper function to toggle promiscuous and all multicast modes. + * + * @param dev + * Pointer to Ethernet device structure. + * @param toggle + * Toggle to set. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle) +{ + struct mlx4_priv *priv = dev->data->dev_private; + const char *mode; + struct rte_flow_error error; + int ret; + + switch (toggle) { + case RXMODE_TOGGLE_PROMISC_OFF: + case RXMODE_TOGGLE_PROMISC_ON: + mode = "promiscuous"; + dev->data->promiscuous = toggle & 1; + break; + case RXMODE_TOGGLE_ALLMULTI_OFF: + case RXMODE_TOGGLE_ALLMULTI_ON: + mode = "all multicast"; + dev->data->all_multicast = toggle & 1; + break; + default: + mode = "undefined"; + } + + ret = mlx4_flow_sync(priv, &error); + if (!ret) + return 0; + + ERROR("cannot toggle %s mode (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + mode, rte_errno, strerror(rte_errno), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + return ret; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_promiscuous_enable(struct rte_eth_dev *dev) +{ + return mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_ON); +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_promiscuous_disable(struct rte_eth_dev *dev) +{ + return mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_OFF); +} + +/** + * DPDK callback to enable all multicast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_allmulticast_enable(struct rte_eth_dev *dev) +{ + return mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_ON); +} + +/** + * DPDK callback to disable all multicast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_allmulticast_disable(struct rte_eth_dev *dev) +{ + return mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_OFF); +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +void +mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + + if (index >= RTE_DIM(priv->mac) - priv->mac_mc) { + rte_errno = EINVAL; + return; + } + memset(&priv->mac[index], 0, sizeof(priv->mac[index])); + if (!mlx4_flow_sync(priv, &error)) + return; + ERROR("failed to synchronize flow rules after removing MAC address" + " at index %d (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + index, rte_errno, strerror(rte_errno), error.type, error.cause, + error.message ? error.message : "(unspecified)"); +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + int ret; + + (void)vmdq; + if (index >= RTE_DIM(priv->mac) - priv->mac_mc) { + rte_errno = EINVAL; + return -rte_errno; + } + memcpy(&priv->mac[index], mac_addr, sizeof(priv->mac[index])); + ret = mlx4_flow_sync(priv, &error); + if (!ret) + return 0; + ERROR("failed to synchronize flow rules after adding MAC address" + " at index %d (code %d, \"%s\")," + " flow error type %d, cause %p, message: %s", + index, rte_errno, strerror(rte_errno), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + return ret; +} + +/** + * DPDK callback to configure multicast addresses. + * + * @param dev + * Pointer to Ethernet device structure. + * @param list + * List of MAC addresses to register. + * @param num + * Number of entries in list. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *list, + uint32_t num) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + int ret; + + if (num > RTE_DIM(priv->mac)) { + rte_errno = EINVAL; + return -rte_errno; + } + /* + * Make sure there is enough room to increase the number of + * multicast entries without overwriting standard entries. + */ + if (num > priv->mac_mc) { + unsigned int i; + + for (i = RTE_DIM(priv->mac) - num; + i != RTE_DIM(priv->mac) - priv->mac_mc; + ++i) + if (!rte_is_zero_ether_addr(&priv->mac[i])) { + rte_errno = EBUSY; + return -rte_errno; + } + } else if (num < priv->mac_mc) { + /* Clear unused entries. */ + memset(priv->mac + RTE_DIM(priv->mac) - priv->mac_mc, + 0, + sizeof(priv->mac[0]) * (priv->mac_mc - num)); + } + memcpy(priv->mac + RTE_DIM(priv->mac) - num, list, sizeof(*list) * num); + priv->mac_mc = num; + ret = mlx4_flow_sync(priv, &error); + if (!ret) + return 0; + ERROR("failed to synchronize flow rules after modifying MC list," + " (code %d, \"%s\"), flow error type %d, cause %p, message: %s", + rte_errno, strerror(rte_errno), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + return ret; +} + +/** + * DPDK callback to configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + unsigned int vidx = vlan_id / 64; + unsigned int vbit = vlan_id % 64; + uint64_t *v; + int ret; + + if (vidx >= RTE_DIM(dev->data->vlan_filter_conf.ids)) { + rte_errno = EINVAL; + return -rte_errno; + } + v = &dev->data->vlan_filter_conf.ids[vidx]; + *v &= ~(UINT64_C(1) << vbit); + *v |= (uint64_t)!!on << vbit; + ret = mlx4_flow_sync(priv, &error); + if (!ret) + return 0; + ERROR("failed to synchronize flow rules after %s VLAN filter on ID %u" + " (code %d, \"%s\"), " + " flow error type %d, cause %p, message: %s", + on ? "enabling" : "disabling", vlan_id, + rte_errno, strerror(rte_errno), error.type, error.cause, + error.message ? error.message : "(unspecified)"); + return ret; +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + return mlx4_mac_addr_add(dev, mac_addr, 0, 0); +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +int +mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct mlx4_priv *priv = dev->data->dev_private; + unsigned int max; + + /* FIXME: we should ask the device for these values. */ + info->min_rx_bufsize = 32; + info->max_rx_pktlen = 65536; + /* + * Since we need one CQ per QP, the limit is the minimum number + * between the two values. + */ + max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? + priv->device_attr.max_qp : priv->device_attr.max_cq); + /* max_rx_queues is uint16_t. */ + max = RTE_MIN(max, (unsigned int)UINT16_MAX); + info->max_rx_queues = max; + info->max_tx_queues = max; + info->max_mac_addrs = RTE_DIM(priv->mac); + info->tx_offload_capa = mlx4_get_tx_port_offloads(priv); + info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv); + info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) | + info->rx_queue_offload_capa); + info->if_index = priv->if_index; + info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE; + info->speed_capa = + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_56G; + info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1); + + return 0; +} + +/** + * Get firmware version of a device. + * + * @param dev + * Ethernet device port. + * @param fw_ver + * String output allocated by caller. + * @param fw_size + * Size of the output string, including terminating null byte. + * + * @return + * 0 on success, or the size of the non truncated string if too big. + */ +int mlx4_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct ibv_device_attr *attr = &priv->device_attr; + size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1; + + if (fw_size < size) + return size; + if (fw_ver != NULL) + strlcpy(fw_ver, attr->fw_ver, fw_size); + return 0; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats structure output buffer. + */ +int +mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct rte_eth_stats tmp; + unsigned int i; + unsigned int idx; + + memset(&tmp, 0, sizeof(tmp)); + /* Add software counters. */ + for (i = 0; i != dev->data->nb_rx_queues; ++i) { + struct rxq *rxq = dev->data->rx_queues[i]; + + if (rxq == NULL) + continue; + idx = rxq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + tmp.q_ipackets[idx] += rxq->stats.ipackets; + tmp.q_ibytes[idx] += rxq->stats.ibytes; + tmp.q_errors[idx] += (rxq->stats.idropped + + rxq->stats.rx_nombuf); + } + tmp.ipackets += rxq->stats.ipackets; + tmp.ibytes += rxq->stats.ibytes; + tmp.ierrors += rxq->stats.idropped; + tmp.rx_nombuf += rxq->stats.rx_nombuf; + } + for (i = 0; i != dev->data->nb_tx_queues; ++i) { + struct txq *txq = dev->data->tx_queues[i]; + + if (txq == NULL) + continue; + idx = txq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + tmp.q_opackets[idx] += txq->stats.opackets; + tmp.q_obytes[idx] += txq->stats.obytes; + } + tmp.opackets += txq->stats.opackets; + tmp.obytes += txq->stats.obytes; + tmp.oerrors += txq->stats.odropped; + } + *stats = tmp; + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * alwasy 0 on success + */ +int +mlx4_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + + for (i = 0; i != dev->data->nb_rx_queues; ++i) { + struct rxq *rxq = dev->data->rx_queues[i]; + + if (rxq) + rxq->stats = (struct mlx4_rxq_stats){ + .idx = rxq->stats.idx, + }; + } + for (i = 0; i != dev->data->nb_tx_queues; ++i) { + struct txq *txq = dev->data->tx_queues[i]; + + if (txq) + txq->stats = (struct mlx4_txq_stats){ + .idx = txq->stats.idx, + }; + } + + return 0; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + const struct mlx4_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata = { + .cmd = ETHTOOL_GSET, + }; + struct ifreq ifr; + struct rte_eth_link dev_link; + int link_speed = 0; + + if (priv == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + (void)wait_to_complete; + if (mlx4_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno)); + return -rte_errno; + } + memset(&dev_link, 0, sizeof(dev_link)); + dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)); + ifr.ifr_data = (void *)&edata; + if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) { + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + strerror(rte_errno)); + return -rte_errno; + } + link_speed = ethtool_cmd_speed(&edata); + if (link_speed == -1) + dev_link.link_speed = ETH_SPEED_NUM_NONE; + else + dev_link.link_speed = link_speed; + dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + dev->data->dev_link = dev_link; + return 0; +} + +/** + * DPDK callback to get flow control status. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] fc_conf + * Flow control output buffer. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_GPAUSEPARAM, + }; + int ret; + + ifr.ifr_data = (void *)ðpause; + if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = rte_errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" + " failed: %s", + strerror(rte_errno)); + goto out; + } + fc_conf->autoneg = ethpause.autoneg; + if (ethpause.rx_pause && ethpause.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (ethpause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (ethpause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + ret = 0; +out: + MLX4_ASSERT(ret >= 0); + return -ret; +} + +/** + * DPDK callback to modify flow control parameters. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] fc_conf + * Flow control parameters. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_SPAUSEPARAM, + }; + int ret; + + ifr.ifr_data = (void *)ðpause; + ethpause.autoneg = fc_conf->autoneg; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + ethpause.rx_pause = 1; + else + ethpause.rx_pause = 0; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + ethpause.tx_pause = 1; + else + ethpause.tx_pause = 0; + if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = rte_errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + strerror(rte_errno)); + goto out; + } + ret = 0; +out: + MLX4_ASSERT(ret >= 0); + return -ret; +} + +/** + * DPDK callback to retrieve the received packet types that are recognized + * by the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Pointer to an array of recognized packet types if in Rx burst mode, + * NULL otherwise. + */ +const uint32_t * +mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_l2tun[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_UNKNOWN + }; + struct mlx4_priv *priv = dev->data->dev_private; + + if (dev->rx_pkt_burst == mlx4_rx_burst) { + if (priv->hw_csum_l2tun) + return ptypes_l2tun; + else + return ptypes; + } + return NULL; +} + +/** + * Check if mlx4 device was removed. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 1 when device is removed, otherwise 0. + */ +int +mlx4_is_removed(struct rte_eth_dev *dev) +{ + struct ibv_device_attr device_attr; + struct mlx4_priv *priv = dev->data->dev_private; + + if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO) + return 1; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c new file mode 100644 index 000000000..2a86382db --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.c @@ -0,0 +1,1626 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Flow API operations for mlx4 driver. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include + +/* PMD headers. */ +#include "mlx4.h" +#include "mlx4_glue.h" +#include "mlx4_flow.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** Static initializer for a list of subsequent item types. */ +#define NEXT_ITEM(...) \ + (const enum rte_flow_item_type []){ \ + __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \ + } + +/** Processor structure associated with a flow item. */ +struct mlx4_flow_proc_item { + /** Bit-mask for fields supported by this PMD. */ + const void *mask_support; + /** Bit-mask to use when @p item->mask is not provided. */ + const void *mask_default; + /** Size in bytes for @p mask_support and @p mask_default. */ + const unsigned int mask_sz; + /** Merge a pattern item into a flow rule handle. */ + int (*merge)(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error); + /** Size in bytes of the destination structure. */ + const unsigned int dst_sz; + /** List of possible subsequent items. */ + const enum rte_flow_item_type *const next_item; +}; + +/** Shared resources for drop flow rules. */ +struct mlx4_drop { + struct ibv_qp *qp; /**< QP target. */ + struct ibv_cq *cq; /**< CQ associated with above QP. */ + struct mlx4_priv *priv; /**< Back pointer to private data. */ + uint32_t refcnt; /**< Reference count. */ +}; + +/** + * Convert supported RSS hash field types between DPDK and Verbs formats. + * + * This function returns the supported (default) set when @p types has + * special value 0. + * + * @param priv + * Pointer to private structure. + * @param types + * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct + * rte_eth_rss_conf) or Verbs format. + * @param verbs_to_dpdk + * A zero value converts @p types from DPDK to Verbs, a nonzero value + * performs the reverse operation. + * + * @return + * Converted RSS hash fields on success, (uint64_t)-1 otherwise and + * rte_errno is set. + */ +uint64_t +mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk) +{ + enum { + INNER, + IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3, + TCP, UDP, + IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1, + }; + enum { + VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, + VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, + VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, + VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, + }; + static const uint64_t dpdk[] = { + [INNER] = 0, + [IPV4] = ETH_RSS_IPV4, + [IPV4_1] = ETH_RSS_FRAG_IPV4, + [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER, + [IPV6] = ETH_RSS_IPV6, + [IPV6_1] = ETH_RSS_FRAG_IPV6, + [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER, + [IPV6_3] = ETH_RSS_IPV6_EX, + [TCP] = 0, + [UDP] = 0, + [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP, + [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP, + [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP, + [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX, + [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP, + [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX, + }; + static const uint64_t verbs[RTE_DIM(dpdk)] = { + [INNER] = IBV_RX_HASH_INNER, + [IPV4] = VERBS_IPV4, + [IPV4_1] = VERBS_IPV4, + [IPV4_2] = VERBS_IPV4, + [IPV6] = VERBS_IPV6, + [IPV6_1] = VERBS_IPV6, + [IPV6_2] = VERBS_IPV6, + [IPV6_3] = VERBS_IPV6, + [TCP] = VERBS_TCP, + [UDP] = VERBS_UDP, + [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP, + [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP, + [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP, + [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP, + [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP, + [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP, + }; + const uint64_t *in = verbs_to_dpdk ? verbs : dpdk; + const uint64_t *out = verbs_to_dpdk ? dpdk : verbs; + uint64_t seen = 0; + uint64_t conv = 0; + unsigned int i; + + if (!types) { + if (!verbs_to_dpdk) + return priv->hw_rss_sup; + types = priv->hw_rss_sup; + } + for (i = 0; i != RTE_DIM(dpdk); ++i) + if (in[i] && (types & in[i]) == in[i]) { + seen |= types & in[i]; + conv |= out[i]; + } + if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) && + !(types & ~seen)) + return conv; + rte_errno = ENOTSUP; + return (uint64_t)-1; +} + +/** + * Merge Ethernet pattern item into flow rule handle. + * + * Additional mlx4-specific constraints on supported fields: + * + * - No support for partial masks, except in the specific case of matching + * all multicast traffic (@p spec->dst and @p mask->dst equal to + * 01:00:00:00:00:00). + * - Not providing @p item->spec or providing an empty @p mask->dst is + * *only* supported if the rule doesn't specify additional matching + * criteria (i.e. rule is promiscuous-like). + * + * @param[in, out] flow + * Flow rule handle to update. + * @param[in] item + * Pattern item to merge. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_merge_eth(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = + spec ? (item->mask ? item->mask : proc->mask_default) : NULL; + struct ibv_flow_spec_eth *eth; + const char *msg; + unsigned int i; + + if (mask) { + uint32_t sum_dst = 0; + uint32_t sum_src = 0; + + for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) { + sum_dst += mask->dst.addr_bytes[i]; + sum_src += mask->src.addr_bytes[i]; + } + if (sum_src) { + msg = "mlx4 does not support source MAC matching"; + goto error; + } else if (!sum_dst) { + flow->promisc = 1; + } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) { + if (!(spec->dst.addr_bytes[0] & 1)) { + msg = "mlx4 does not support the explicit" + " exclusion of all multicast traffic"; + goto error; + } + flow->allmulti = 1; + } else if (sum_dst != (UINT8_C(0xff) * RTE_ETHER_ADDR_LEN)) { + msg = "mlx4 does not support matching partial" + " Ethernet fields"; + goto error; + } + } + if (!flow->ibv_attr) + return 0; + if (flow->promisc) { + flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT; + return 0; + } + if (flow->allmulti) { + flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT; + return 0; + } + ++flow->ibv_attr->num_of_specs; + eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); + *eth = (struct ibv_flow_spec_eth) { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(*eth), + }; + if (!mask) { + eth->val.dst_mac[0] = 0xff; + flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT; + flow->promisc = 1; + return 0; + } + memcpy(eth->val.dst_mac, spec->dst.addr_bytes, RTE_ETHER_ADDR_LEN); + memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN); + /* Remove unwanted bits from values. */ + for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) + eth->val.dst_mac[i] &= eth->mask.dst_mac[i]; + + return 0; +error: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); +} + +/** + * Merge VLAN pattern item into flow rule handle. + * + * Additional mlx4-specific constraints on supported fields: + * + * - Matching *all* VLAN traffic by omitting @p item->spec or providing an + * empty @p item->mask would also include non-VLAN traffic. Doing so is + * therefore unsupported. + * - No support for partial masks. + * + * @param[in, out] flow + * Flow rule handle to update. + * @param[in] item + * Pattern item to merge. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_merge_vlan(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = + spec ? (item->mask ? item->mask : proc->mask_default) : NULL; + struct ibv_flow_spec_eth *eth; + const char *msg; + + if (!mask || !mask->tci) { + msg = "mlx4 cannot match all VLAN traffic while excluding" + " non-VLAN traffic, TCI VID must be specified"; + goto error; + } + if (mask->tci != RTE_BE16(0x0fff)) { + msg = "mlx4 does not support partial TCI VID matching"; + goto error; + } + if (!flow->ibv_attr) + return 0; + eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size - + sizeof(*eth)); + eth->val.vlan_tag = spec->tci; + eth->mask.vlan_tag = mask->tci; + eth->val.vlan_tag &= eth->mask.vlan_tag; + if (flow->ibv_attr->type == IBV_FLOW_ATTR_ALL_DEFAULT) + flow->ibv_attr->type = IBV_FLOW_ATTR_NORMAL; + return 0; +error: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); +} + +/** + * Merge IPv4 pattern item into flow rule handle. + * + * Additional mlx4-specific constraints on supported fields: + * + * - No support for partial masks. + * + * @param[in, out] flow + * Flow rule handle to update. + * @param[in] item + * Pattern item to merge. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_merge_ipv4(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = + spec ? (item->mask ? item->mask : proc->mask_default) : NULL; + struct ibv_flow_spec_ipv4 *ipv4; + const char *msg; + + if (mask && + ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) || + (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) { + msg = "mlx4 does not support matching partial IPv4 fields"; + goto error; + } + if (!flow->ibv_attr) + return 0; + ++flow->ibv_attr->num_of_specs; + ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); + *ipv4 = (struct ibv_flow_spec_ipv4) { + .type = IBV_FLOW_SPEC_IPV4, + .size = sizeof(*ipv4), + }; + if (!spec) + return 0; + ipv4->val = (struct ibv_flow_ipv4_filter) { + .src_ip = spec->hdr.src_addr, + .dst_ip = spec->hdr.dst_addr, + }; + ipv4->mask = (struct ibv_flow_ipv4_filter) { + .src_ip = mask->hdr.src_addr, + .dst_ip = mask->hdr.dst_addr, + }; + /* Remove unwanted bits from values. */ + ipv4->val.src_ip &= ipv4->mask.src_ip; + ipv4->val.dst_ip &= ipv4->mask.dst_ip; + return 0; +error: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); +} + +/** + * Merge UDP pattern item into flow rule handle. + * + * Additional mlx4-specific constraints on supported fields: + * + * - No support for partial masks. + * - Due to HW/FW limitation, flow rule priority is not taken into account + * when matching UDP destination ports, doing is therefore only supported + * at the highest priority level (0). + * + * @param[in, out] flow + * Flow rule handle to update. + * @param[in] item + * Pattern item to merge. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_merge_udp(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = + spec ? (item->mask ? item->mask : proc->mask_default) : NULL; + struct ibv_flow_spec_tcp_udp *udp; + const char *msg; + + if (mask && + ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) || + (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) { + msg = "mlx4 does not support matching partial UDP fields"; + goto error; + } + if (mask && mask->hdr.dst_port && flow->priority) { + msg = "combining UDP destination port matching with a nonzero" + " priority level is not supported"; + goto error; + } + if (!flow->ibv_attr) + return 0; + ++flow->ibv_attr->num_of_specs; + udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); + *udp = (struct ibv_flow_spec_tcp_udp) { + .type = IBV_FLOW_SPEC_UDP, + .size = sizeof(*udp), + }; + if (!spec) + return 0; + udp->val.dst_port = spec->hdr.dst_port; + udp->val.src_port = spec->hdr.src_port; + udp->mask.dst_port = mask->hdr.dst_port; + udp->mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + udp->val.src_port &= udp->mask.src_port; + udp->val.dst_port &= udp->mask.dst_port; + return 0; +error: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); +} + +/** + * Merge TCP pattern item into flow rule handle. + * + * Additional mlx4-specific constraints on supported fields: + * + * - No support for partial masks. + * + * @param[in, out] flow + * Flow rule handle to update. + * @param[in] item + * Pattern item to merge. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_merge_tcp(struct rte_flow *flow, + const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = + spec ? (item->mask ? item->mask : proc->mask_default) : NULL; + struct ibv_flow_spec_tcp_udp *tcp; + const char *msg; + + if (mask && + ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) || + (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) { + msg = "mlx4 does not support matching partial TCP fields"; + goto error; + } + if (!flow->ibv_attr) + return 0; + ++flow->ibv_attr->num_of_specs; + tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size); + *tcp = (struct ibv_flow_spec_tcp_udp) { + .type = IBV_FLOW_SPEC_TCP, + .size = sizeof(*tcp), + }; + if (!spec) + return 0; + tcp->val.dst_port = spec->hdr.dst_port; + tcp->val.src_port = spec->hdr.src_port; + tcp->mask.dst_port = mask->hdr.dst_port; + tcp->mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + tcp->val.src_port &= tcp->mask.src_port; + tcp->val.dst_port &= tcp->mask.dst_port; + return 0; +error: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); +} + +/** + * Perform basic sanity checks on a pattern item. + * + * @param[in] item + * Item specification. + * @param[in] proc + * Associated item-processing object. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_item_check(const struct rte_flow_item *item, + const struct mlx4_flow_proc_item *proc, + struct rte_flow_error *error) +{ + const uint8_t *mask; + unsigned int i; + + /* item->last and item->mask cannot exist without item->spec. */ + if (!item->spec && (item->mask || item->last)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, + "\"mask\" or \"last\" field provided without a" + " corresponding \"spec\""); + /* No spec, no mask, no problem. */ + if (!item->spec) + return 0; + mask = item->mask ? + (const uint8_t *)item->mask : + (const uint8_t *)proc->mask_default; + MLX4_ASSERT(mask); + /* + * Single-pass check to make sure that: + * - Mask is supported, no bits are set outside proc->mask_support. + * - Both item->spec and item->last are included in mask. + */ + for (i = 0; i != proc->mask_sz; ++i) { + if (!mask[i]) + continue; + if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) != + ((const uint8_t *)proc->mask_support)[i]) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "unsupported field found in \"mask\""); + if (item->last && + (((const uint8_t *)item->spec)[i] & mask[i]) != + (((const uint8_t *)item->last)[i] & mask[i])) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "range between \"spec\" and \"last\"" + " is larger than \"mask\""); + } + return 0; +} + +/** Graph of supported items and associated actions. */ +static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = { + [RTE_FLOW_ITEM_TYPE_END] = { + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH), + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4), + .mask_support = &(const struct rte_flow_item_eth){ + /* Only destination MAC can be matched. */ + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }, + .mask_default = &rte_flow_item_eth_mask, + .mask_sz = sizeof(struct rte_flow_item_eth), + .merge = mlx4_flow_merge_eth, + .dst_sz = sizeof(struct ibv_flow_spec_eth), + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4), + .mask_support = &(const struct rte_flow_item_vlan){ + /* Only TCI VID matching is supported. */ + .tci = RTE_BE16(0x0fff), + }, + .mask_default = &rte_flow_item_vlan_mask, + .mask_sz = sizeof(struct rte_flow_item_vlan), + .merge = mlx4_flow_merge_vlan, + .dst_sz = 0, + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP), + .mask_support = &(const struct rte_flow_item_ipv4){ + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + }, + }, + .mask_default = &rte_flow_item_ipv4_mask, + .mask_sz = sizeof(struct rte_flow_item_ipv4), + .merge = mlx4_flow_merge_ipv4, + .dst_sz = sizeof(struct ibv_flow_spec_ipv4), + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .mask_support = &(const struct rte_flow_item_udp){ + .hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, + }, + .mask_default = &rte_flow_item_udp_mask, + .mask_sz = sizeof(struct rte_flow_item_udp), + .merge = mlx4_flow_merge_udp, + .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .mask_support = &(const struct rte_flow_item_tcp){ + .hdr = { + .src_port = RTE_BE16(0xffff), + .dst_port = RTE_BE16(0xffff), + }, + }, + .mask_default = &rte_flow_item_tcp_mask, + .mask_sz = sizeof(struct rte_flow_item_tcp), + .merge = mlx4_flow_merge_tcp, + .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), + }, +}; + +/** + * Make sure a flow rule is supported and initialize associated structure. + * + * @param priv + * Pointer to private structure. + * @param[in] attr + * Flow rule attributes. + * @param[in] pattern + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] error + * Perform verbose error reporting if not NULL. + * @param[in, out] addr + * Buffer where the resulting flow rule handle pointer must be stored. + * If NULL, stop processing after validation stage. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_prepare(struct mlx4_priv *priv, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow **addr) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *action; + const struct mlx4_flow_proc_item *proc; + struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) }; + struct rte_flow *flow = &temp; + const char *msg = NULL; + int overlap; + + if (attr->group) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, "groups are not supported"); + if (attr->priority > MLX4_FLOW_PRIORITY_LAST) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, "maximum priority level is " + MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)); + if (attr->egress) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, "egress is not supported"); + if (attr->transfer) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + if (!attr->ingress) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, "only ingress is supported"); +fill: + overlap = 0; + proc = mlx4_flow_proc_item_list; + flow->priority = attr->priority; + /* Go over pattern. */ + for (item = pattern; item->type; ++item) { + const struct mlx4_flow_proc_item *next = NULL; + unsigned int i; + int err; + + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) { + flow->internal = 1; + continue; + } + if (flow->promisc || flow->allmulti) { + msg = "mlx4 does not support additional matching" + " criteria combined with indiscriminate" + " matching on Ethernet headers"; + goto exit_item_not_supported; + } + for (i = 0; proc->next_item && proc->next_item[i]; ++i) { + if (proc->next_item[i] == item->type) { + next = &mlx4_flow_proc_item_list[item->type]; + break; + } + } + if (!next) + goto exit_item_not_supported; + proc = next; + /* + * Perform basic sanity checks only once, while handle is + * not allocated. + */ + if (flow == &temp) { + err = mlx4_flow_item_check(item, proc, error); + if (err) + return err; + } + if (proc->merge) { + err = proc->merge(flow, item, proc, error); + if (err) + return err; + } + flow->ibv_attr_size += proc->dst_sz; + } + /* Go over actions list. */ + for (action = actions; action->type; ++action) { + /* This one may appear anywhere multiple times. */ + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + /* Fate-deciding actions may appear exactly once. */ + if (overlap) { + msg = "cannot combine several fate-deciding actions," + " choose between DROP, QUEUE or RSS"; + goto exit_action_not_supported; + } + overlap = 1; + switch (action->type) { + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const uint8_t *rss_key; + uint32_t rss_key_len; + uint64_t fields; + unsigned int i; + + case RTE_FLOW_ACTION_TYPE_DROP: + flow->drop = 1; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + if (flow->rss) + break; + queue = action->conf; + if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) { + msg = "queue target index beyond number of" + " configured Rx queues"; + goto exit_action_not_supported; + } + flow->rss = mlx4_rss_get + (priv, 0, mlx4_rss_hash_key_default, 1, + &queue->index); + if (!flow->rss) { + msg = "not enough resources for additional" + " single-queue RSS context"; + goto exit_action_not_supported; + } + break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (flow->rss) + break; + rss = action->conf; + /* Default RSS configuration if none is provided. */ + if (rss->key_len) { + rss_key = rss->key; + rss_key_len = rss->key_len; + } else { + rss_key = mlx4_rss_hash_key_default; + rss_key_len = MLX4_RSS_HASH_KEY_SIZE; + } + /* Sanity checks. */ + for (i = 0; i < rss->queue_num; ++i) + if (rss->queue[i] >= + ETH_DEV(priv)->data->nb_rx_queues) + break; + if (i != rss->queue_num) { + msg = "queue index target beyond number of" + " configured Rx queues"; + goto exit_action_not_supported; + } + if (!rte_is_power_of_2(rss->queue_num)) { + msg = "for RSS, mlx4 requires the number of" + " queues to be a power of two"; + goto exit_action_not_supported; + } + if (rss_key_len != sizeof(flow->rss->key)) { + msg = "mlx4 supports exactly one RSS hash key" + " length: " + MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE); + goto exit_action_not_supported; + } + for (i = 1; i < rss->queue_num; ++i) + if (rss->queue[i] - rss->queue[i - 1] != 1) + break; + if (i != rss->queue_num) { + msg = "mlx4 requires RSS contexts to use" + " consecutive queue indices only"; + goto exit_action_not_supported; + } + if (rss->queue[0] % rss->queue_num) { + msg = "mlx4 requires the first queue of a RSS" + " context to be aligned on a multiple" + " of the context size"; + goto exit_action_not_supported; + } + if (rss->func && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + msg = "the only supported RSS hash function" + " is Toeplitz"; + goto exit_action_not_supported; + } + if (rss->level) { + msg = "a nonzero RSS encapsulation level is" + " not supported"; + goto exit_action_not_supported; + } + rte_errno = 0; + fields = mlx4_conv_rss_types(priv, rss->types, 0); + if (fields == (uint64_t)-1 && rte_errno) { + msg = "unsupported RSS hash type requested"; + goto exit_action_not_supported; + } + flow->rss = mlx4_rss_get + (priv, fields, rss_key, rss->queue_num, + rss->queue); + if (!flow->rss) { + msg = "either invalid parameters or not enough" + " resources for additional multi-queue" + " RSS context"; + goto exit_action_not_supported; + } + break; + default: + goto exit_action_not_supported; + } + } + /* When fate is unknown, drop traffic. */ + if (!overlap) + flow->drop = 1; + /* Validation ends here. */ + if (!addr) { + if (flow->rss) + mlx4_rss_put(flow->rss); + return 0; + } + if (flow == &temp) { + /* Allocate proper handle based on collected data. */ + const struct mlx4_malloc_vec vec[] = { + { + .align = alignof(struct rte_flow), + .size = sizeof(*flow), + .addr = (void **)&flow, + }, + { + .align = alignof(struct ibv_flow_attr), + .size = temp.ibv_attr_size, + .addr = (void **)&temp.ibv_attr, + }, + }; + + if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) { + if (temp.rss) + mlx4_rss_put(temp.rss); + return rte_flow_error_set + (error, -rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "flow rule handle allocation failure"); + } + /* Most fields will be updated by second pass. */ + *flow = (struct rte_flow){ + .ibv_attr = temp.ibv_attr, + .ibv_attr_size = sizeof(*flow->ibv_attr), + .rss = temp.rss, + }; + *flow->ibv_attr = (struct ibv_flow_attr){ + .type = IBV_FLOW_ATTR_NORMAL, + .size = sizeof(*flow->ibv_attr), + .priority = attr->priority, + .port = priv->port, + }; + goto fill; + } + *addr = flow; + return 0; +exit_item_not_supported: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg ? msg : "item not supported"); +exit_action_not_supported: + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + action, msg ? msg : "action not supported"); +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +static int +mlx4_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx4_priv *priv = dev->data->dev_private; + + return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL); +} + +/** + * Get a drop flow rule resources instance. + * + * @param priv + * Pointer to private structure. + * + * @return + * Pointer to drop flow resources on success, NULL otherwise and rte_errno + * is set. + */ +static struct mlx4_drop * +mlx4_drop_get(struct mlx4_priv *priv) +{ + struct mlx4_drop *drop = priv->drop; + + if (drop) { + MLX4_ASSERT(drop->refcnt); + MLX4_ASSERT(drop->priv == priv); + ++drop->refcnt; + return drop; + } + drop = rte_malloc(__func__, sizeof(*drop), 0); + if (!drop) + goto error; + *drop = (struct mlx4_drop){ + .priv = priv, + .refcnt = 1, + }; + drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); + if (!drop->cq) + goto error; + drop->qp = mlx4_glue->create_qp + (priv->pd, + &(struct ibv_qp_init_attr){ + .send_cq = drop->cq, + .recv_cq = drop->cq, + .qp_type = IBV_QPT_RAW_PACKET, + }); + if (!drop->qp) + goto error; + priv->drop = drop; + return drop; +error: + if (drop) { + if (drop->qp) + claim_zero(mlx4_glue->destroy_qp(drop->qp)); + if (drop->cq) + claim_zero(mlx4_glue->destroy_cq(drop->cq)); + rte_free(drop); + } + rte_errno = ENOMEM; + return NULL; +} + +/** + * Give back a drop flow rule resources instance. + * + * @param drop + * Pointer to drop flow rule resources. + */ +static void +mlx4_drop_put(struct mlx4_drop *drop) +{ + MLX4_ASSERT(drop->refcnt); + if (--drop->refcnt) + return; + drop->priv->drop = NULL; + claim_zero(mlx4_glue->destroy_qp(drop->qp)); + claim_zero(mlx4_glue->destroy_cq(drop->cq)); + rte_free(drop); +} + +/** + * Toggle a configured flow rule. + * + * @param priv + * Pointer to private structure. + * @param flow + * Flow rule handle to toggle. + * @param enable + * Whether associated Verbs flow must be created or removed. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_toggle(struct mlx4_priv *priv, + struct rte_flow *flow, + int enable, + struct rte_flow_error *error) +{ + struct ibv_qp *qp = NULL; + const char *msg; + int err; + + if (!enable) { + if (!flow->ibv_flow) + return 0; + claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); + flow->ibv_flow = NULL; + if (flow->drop) + mlx4_drop_put(priv->drop); + else if (flow->rss) + mlx4_rss_detach(flow->rss); + return 0; + } + MLX4_ASSERT(flow->ibv_attr); + if (!flow->internal && + !priv->isolated && + flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) { + if (flow->ibv_flow) { + claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); + flow->ibv_flow = NULL; + if (flow->drop) + mlx4_drop_put(priv->drop); + else if (flow->rss) + mlx4_rss_detach(flow->rss); + } + err = EACCES; + msg = ("priority level " + MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST) + " is reserved when not in isolated mode"); + goto error; + } + if (flow->rss) { + struct mlx4_rss *rss = flow->rss; + int missing = 0; + unsigned int i; + + /* Stop at the first nonexistent target queue. */ + for (i = 0; i != rss->queues; ++i) + if (rss->queue_id[i] >= + ETH_DEV(priv)->data->nb_rx_queues || + !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) { + missing = 1; + break; + } + if (flow->ibv_flow) { + if (missing ^ !flow->drop) + return 0; + /* Verbs flow needs updating. */ + claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow)); + flow->ibv_flow = NULL; + if (flow->drop) + mlx4_drop_put(priv->drop); + else + mlx4_rss_detach(rss); + } + if (!missing) { + err = mlx4_rss_attach(rss); + if (err) { + err = -err; + msg = "cannot create indirection table or hash" + " QP to associate flow rule with"; + goto error; + } + qp = rss->qp; + } + /* A missing target queue drops traffic implicitly. */ + flow->drop = missing; + } + if (flow->drop) { + if (flow->ibv_flow) + return 0; + mlx4_drop_get(priv); + if (!priv->drop) { + err = rte_errno; + msg = "resources for drop flow rule cannot be created"; + goto error; + } + qp = priv->drop->qp; + } + MLX4_ASSERT(qp); + if (flow->ibv_flow) + return 0; + flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr); + if (flow->ibv_flow) + return 0; + if (flow->drop) + mlx4_drop_put(priv->drop); + else if (flow->rss) + mlx4_rss_detach(flow->rss); + err = errno; + msg = "flow rule rejected by device"; +error: + return rte_flow_error_set + (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg); +} + +/** + * Create a flow. + * + * @see rte_flow_create() + * @see rte_flow_ops + */ +static struct rte_flow * +mlx4_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow *flow; + int err; + + err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow); + if (err) + return NULL; + err = mlx4_flow_toggle(priv, flow, priv->started, error); + if (!err) { + struct rte_flow *curr = LIST_FIRST(&priv->flows); + + /* New rules are inserted after internal ones. */ + if (!curr || !curr->internal) { + LIST_INSERT_HEAD(&priv->flows, flow, next); + } else { + while (LIST_NEXT(curr, next) && + LIST_NEXT(curr, next)->internal) + curr = LIST_NEXT(curr, next); + LIST_INSERT_AFTER(curr, flow, next); + } + return flow; + } + if (flow->rss) + mlx4_rss_put(flow->rss); + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + error->message); + rte_free(flow); + return NULL; +} + +/** + * Configure isolated mode. + * + * @see rte_flow_isolate() + * @see rte_flow_ops + */ +static int +mlx4_flow_isolate(struct rte_eth_dev *dev, + int enable, + struct rte_flow_error *error) +{ + struct mlx4_priv *priv = dev->data->dev_private; + + if (!!enable == !!priv->isolated) + return 0; + priv->isolated = !!enable; + if (mlx4_flow_sync(priv, error)) { + priv->isolated = !enable; + return -rte_errno; + } + return 0; +} + +/** + * Destroy a flow rule. + * + * @see rte_flow_destroy() + * @see rte_flow_ops + */ +static int +mlx4_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mlx4_priv *priv = dev->data->dev_private; + int err = mlx4_flow_toggle(priv, flow, 0, error); + + if (err) + return err; + LIST_REMOVE(flow, next); + if (flow->rss) + mlx4_rss_put(flow->rss); + rte_free(flow); + return 0; +} + +/** + * Destroy user-configured flow rules. + * + * This function skips internal flows rules. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +static int +mlx4_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow *flow = LIST_FIRST(&priv->flows); + + while (flow) { + struct rte_flow *next = LIST_NEXT(flow, next); + + if (!flow->internal) + mlx4_flow_destroy(dev, flow, error); + flow = next; + } + return 0; +} + +/** + * Helper function to determine the next configured VLAN filter. + * + * @param priv + * Pointer to private structure. + * @param vlan + * VLAN ID to use as a starting point. + * + * @return + * Next configured VLAN ID or a high value (>= 4096) if there is none. + */ +static uint16_t +mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan) +{ + while (vlan < 4096) { + if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] & + (UINT64_C(1) << (vlan % 64))) + return vlan; + ++vlan; + } + return vlan; +} + +/** + * Generate internal flow rules. + * + * Various flow rules are created depending on the mode the device is in: + * + * 1. Promiscuous: + * port MAC + broadcast + catch-all (VLAN filtering is ignored). + * 2. All multicast: + * port MAC/VLAN + broadcast + catch-all multicast. + * 3. Otherwise: + * port MAC/VLAN + broadcast MAC/VLAN. + * + * About MAC flow rules: + * + * - MAC flow rules are generated from @p dev->data->mac_addrs + * (@p priv->mac array). + * - An additional flow rule for Ethernet broadcasts is also generated. + * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER + * is enabled and VLAN filters are configured. + * + * @param priv + * Pointer to private structure. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error) +{ + struct rte_flow_attr attr = { + .priority = MLX4_FLOW_PRIORITY_LAST, + .ingress = 1, + }; + struct rte_flow_item_eth eth_spec; + const struct rte_flow_item_eth eth_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }; + const struct rte_flow_item_eth eth_allmulti = { + .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", + }; + struct rte_flow_item_vlan vlan_spec; + const struct rte_flow_item_vlan vlan_mask = { + .tci = RTE_BE16(0x0fff), + }; + struct rte_flow_item pattern[] = { + { + .type = MLX4_FLOW_ITEM_TYPE_INTERNAL, + }, + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = ð_spec, + .mask = ð_mask, + }, + { + /* Replaced with VLAN if filtering is enabled. */ + .type = RTE_FLOW_ITEM_TYPE_END, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + /* + * Round number of queues down to their previous power of 2 to + * comply with RSS context limitations. Extra queues silently do not + * get RSS by default. + */ + uint32_t queues = + rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1; + uint16_t queue[queues]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = 0, + .key_len = MLX4_RSS_HASH_KEY_SIZE, + .queue_num = queues, + .key = mlx4_rss_hash_key_default, + .queue = queue, + }; + struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_ether_addr *rule_mac = ð_spec.dst; + rte_be16_t *rule_vlan = + (ETH_DEV(priv)->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) && + !ETH_DEV(priv)->data->promiscuous ? + &vlan_spec.tci : + NULL; + uint16_t vlan = 0; + struct rte_flow *flow; + unsigned int i; + int err = 0; + + /* Nothing to be done if there are no Rx queues. */ + if (!queues) + goto error; + /* Prepare default RSS configuration. */ + for (i = 0; i != queues; ++i) + queue[i] = i; + /* + * Set up VLAN item if filtering is enabled and at least one VLAN + * filter is configured. + */ + if (rule_vlan) { + vlan = mlx4_flow_internal_next_vlan(priv, 0); + if (vlan < 4096) { + pattern[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &vlan_spec, + .mask = &vlan_mask, + }; +next_vlan: + *rule_vlan = rte_cpu_to_be_16(vlan); + } else { + rule_vlan = NULL; + } + } + for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) { + const struct rte_ether_addr *mac; + + /* Broadcasts are handled by an extra iteration. */ + if (i < RTE_DIM(priv->mac)) + mac = &priv->mac[i]; + else + mac = ð_mask.dst; + if (rte_is_zero_ether_addr(mac)) + continue; + /* Check if MAC flow rule is already present. */ + for (flow = LIST_FIRST(&priv->flows); + flow && flow->internal; + flow = LIST_NEXT(flow, next)) { + const struct ibv_flow_spec_eth *eth = + (const void *)((uintptr_t)flow->ibv_attr + + sizeof(*flow->ibv_attr)); + unsigned int j; + + if (!flow->mac) + continue; + MLX4_ASSERT(flow->ibv_attr->type == + IBV_FLOW_ATTR_NORMAL); + MLX4_ASSERT(flow->ibv_attr->num_of_specs == 1); + MLX4_ASSERT(eth->type == IBV_FLOW_SPEC_ETH); + MLX4_ASSERT(flow->rss); + if (rule_vlan && + (eth->val.vlan_tag != *rule_vlan || + eth->mask.vlan_tag != RTE_BE16(0x0fff))) + continue; + if (!rule_vlan && eth->mask.vlan_tag) + continue; + for (j = 0; j != sizeof(mac->addr_bytes); ++j) + if (eth->val.dst_mac[j] != mac->addr_bytes[j] || + eth->mask.dst_mac[j] != UINT8_C(0xff) || + eth->val.src_mac[j] != UINT8_C(0x00) || + eth->mask.src_mac[j] != UINT8_C(0x00)) + break; + if (j != sizeof(mac->addr_bytes)) + continue; + if (flow->rss->queues != queues || + memcmp(flow->rss->queue_id, action_rss.queue, + queues * sizeof(flow->rss->queue_id[0]))) + continue; + break; + } + if (!flow || !flow->internal) { + /* Not found, create a new flow rule. */ + memcpy(rule_mac, mac, sizeof(*mac)); + flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern, + actions, error); + if (!flow) { + err = -rte_errno; + goto error; + } + } + flow->select = 1; + flow->mac = 1; + } + if (rule_vlan) { + vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1); + if (vlan < 4096) + goto next_vlan; + } + /* Take care of promiscuous and all multicast flow rules. */ + if (ETH_DEV(priv)->data->promiscuous || + ETH_DEV(priv)->data->all_multicast) { + for (flow = LIST_FIRST(&priv->flows); + flow && flow->internal; + flow = LIST_NEXT(flow, next)) { + if (ETH_DEV(priv)->data->promiscuous) { + if (flow->promisc) + break; + } else { + MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast); + if (flow->allmulti) + break; + } + } + if (flow && flow->internal) { + MLX4_ASSERT(flow->rss); + if (flow->rss->queues != queues || + memcmp(flow->rss->queue_id, action_rss.queue, + queues * sizeof(flow->rss->queue_id[0]))) + flow = NULL; + } + if (!flow || !flow->internal) { + /* Not found, create a new flow rule. */ + if (ETH_DEV(priv)->data->promiscuous) { + pattern[1].spec = NULL; + pattern[1].mask = NULL; + } else { + MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast); + pattern[1].spec = ð_allmulti; + pattern[1].mask = ð_allmulti; + } + pattern[2] = pattern[3]; + flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern, + actions, error); + if (!flow) { + err = -rte_errno; + goto error; + } + } + MLX4_ASSERT(flow->promisc || flow->allmulti); + flow->select = 1; + } +error: + /* Clear selection and clean up stale internal flow rules. */ + flow = LIST_FIRST(&priv->flows); + while (flow && flow->internal) { + struct rte_flow *next = LIST_NEXT(flow, next); + + if (!flow->select) + claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow, + error)); + else + flow->select = 0; + flow = next; + } + return err; +} + +/** + * Synchronize flow rules. + * + * This function synchronizes flow rules with the state of the device by + * taking into account isolated mode and whether target queues are + * configured. + * + * @param priv + * Pointer to private structure. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error) +{ + struct rte_flow *flow; + int ret; + + /* Internal flow rules are guaranteed to come first in the list. */ + if (priv->isolated) { + /* + * Get rid of them in isolated mode, stop at the first + * non-internal rule found. + */ + for (flow = LIST_FIRST(&priv->flows); + flow && flow->internal; + flow = LIST_FIRST(&priv->flows)) + claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow, + error)); + } else { + /* Refresh internal rules. */ + ret = mlx4_flow_internal(priv, error); + if (ret) + return ret; + } + /* Toggle the remaining flow rules . */ + LIST_FOREACH(flow, &priv->flows, next) { + ret = mlx4_flow_toggle(priv, flow, priv->started, error); + if (ret) + return ret; + } + if (!priv->started) + MLX4_ASSERT(!priv->drop); + return 0; +} + +/** + * Clean up all flow rules. + * + * Unlike mlx4_flow_flush(), this function takes care of all remaining flow + * rules regardless of whether they are internal or user-configured. + * + * @param priv + * Pointer to private structure. + */ +void +mlx4_flow_clean(struct mlx4_priv *priv) +{ + struct rte_flow *flow; + + while ((flow = LIST_FIRST(&priv->flows))) + mlx4_flow_destroy(ETH_DEV(priv), flow, NULL); + MLX4_ASSERT(LIST_EMPTY(&priv->rss)); +} + +static const struct rte_flow_ops mlx4_flow_ops = { + .validate = mlx4_flow_validate, + .create = mlx4_flow_create, + .destroy = mlx4_flow_destroy, + .flush = mlx4_flow_flush, + .isolate = mlx4_flow_isolate, +}; + +/** + * Manage filter operations. + * + * @param dev + * Pointer to Ethernet device structure. + * @param filter_type + * Filter type. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + break; + *(const void **)arg = &mlx4_flow_ops; + return 0; + default: + ERROR("%p: filter type (%d) not supported", + (void *)dev, filter_type); + break; + } + rte_errno = ENOTSUP; + return -rte_errno; +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h new file mode 100644 index 000000000..26465c66a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_flow.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX4_FLOW_H_ +#define RTE_PMD_MLX4_FLOW_H_ + +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +/** Last and lowest priority level for a flow rule. */ +#define MLX4_FLOW_PRIORITY_LAST UINT32_C(0xfff) + +/** Meta pattern item used to distinguish internal rules. */ +#define MLX4_FLOW_ITEM_TYPE_INTERNAL ((enum rte_flow_item_type)-1) + +/** PMD-specific (mlx4) definition of a flow rule handle. */ +struct rte_flow { + LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct ibv_flow *ibv_flow; /**< Verbs flow. */ + struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ + uint32_t ibv_attr_size; /**< Size of Verbs attributes. */ + uint32_t select:1; /**< Used by operations on the linked list. */ + uint32_t internal:1; /**< Internal flow rule outside isolated mode. */ + uint32_t mac:1; /**< Rule associated with a configured MAC address. */ + uint32_t promisc:1; /**< This rule matches everything. */ + uint32_t allmulti:1; /**< This rule matches all multicast traffic. */ + uint32_t drop:1; /**< This rule drops packets. */ + uint32_t priority; /**< Flow rule priority. */ + struct mlx4_rss *rss; /**< Rx target. */ +}; + +/* mlx4_flow.c */ + +uint64_t mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, + int verbs_to_dpdk); +int mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error); +void mlx4_flow_clean(struct mlx4_priv *priv); +int mlx4_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); + +#endif /* RTE_PMD_MLX4_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c new file mode 100644 index 000000000..67b3bface --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include "mlx4_glue.h" + +static int +mlx4_glue_fork_init(void) +{ + return ibv_fork_init(); +} + +static int +mlx4_glue_get_async_event(struct ibv_context *context, + struct ibv_async_event *event) +{ + return ibv_get_async_event(context, event); +} + +static void +mlx4_glue_ack_async_event(struct ibv_async_event *event) +{ + ibv_ack_async_event(event); +} + +static struct ibv_pd * +mlx4_glue_alloc_pd(struct ibv_context *context) +{ + return ibv_alloc_pd(context); +} + +static int +mlx4_glue_dealloc_pd(struct ibv_pd *pd) +{ + return ibv_dealloc_pd(pd); +} + +static struct ibv_device ** +mlx4_glue_get_device_list(int *num_devices) +{ + return ibv_get_device_list(num_devices); +} + +static void +mlx4_glue_free_device_list(struct ibv_device **list) +{ + ibv_free_device_list(list); +} + +static struct ibv_context * +mlx4_glue_open_device(struct ibv_device *device) +{ + return ibv_open_device(device); +} + +static int +mlx4_glue_close_device(struct ibv_context *context) +{ + return ibv_close_device(context); +} + +static const char * +mlx4_glue_get_device_name(struct ibv_device *device) +{ + return ibv_get_device_name(device); +} + +static int +mlx4_glue_query_device(struct ibv_context *context, + struct ibv_device_attr *device_attr) +{ + return ibv_query_device(context, device_attr); +} + +static int +mlx4_glue_query_device_ex(struct ibv_context *context, + const struct ibv_query_device_ex_input *input, + struct ibv_device_attr_ex *attr) +{ + return ibv_query_device_ex(context, input, attr); +} + +static int +mlx4_glue_query_port(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr *port_attr) +{ + return ibv_query_port(context, port_num, port_attr); +} + +static const char * +mlx4_glue_port_state_str(enum ibv_port_state port_state) +{ + return ibv_port_state_str(port_state); +} + +static struct ibv_comp_channel * +mlx4_glue_create_comp_channel(struct ibv_context *context) +{ + return ibv_create_comp_channel(context); +} + +static int +mlx4_glue_destroy_comp_channel(struct ibv_comp_channel *channel) +{ + return ibv_destroy_comp_channel(channel); +} + +static struct ibv_cq * +mlx4_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context, + struct ibv_comp_channel *channel, int comp_vector) +{ + return ibv_create_cq(context, cqe, cq_context, channel, comp_vector); +} + +static int +mlx4_glue_destroy_cq(struct ibv_cq *cq) +{ + return ibv_destroy_cq(cq); +} + +static int +mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq, + void **cq_context) +{ + return ibv_get_cq_event(channel, cq, cq_context); +} + +static void +mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents) +{ + ibv_ack_cq_events(cq, nevents); +} + +static struct ibv_flow * +mlx4_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow) +{ + return ibv_create_flow(qp, flow); +} + +static int +mlx4_glue_destroy_flow(struct ibv_flow *flow_id) +{ + return ibv_destroy_flow(flow_id); +} + +static struct ibv_qp * +mlx4_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr) +{ + return ibv_create_qp(pd, qp_init_attr); +} + +static struct ibv_qp * +mlx4_glue_create_qp_ex(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex) +{ + return ibv_create_qp_ex(context, qp_init_attr_ex); +} + +static int +mlx4_glue_destroy_qp(struct ibv_qp *qp) +{ + return ibv_destroy_qp(qp); +} + +static int +mlx4_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask) +{ + return ibv_modify_qp(qp, attr, attr_mask); +} + +static struct ibv_mr * +mlx4_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) +{ + return ibv_reg_mr(pd, addr, length, access); +} + +static int +mlx4_glue_dereg_mr(struct ibv_mr *mr) +{ + return ibv_dereg_mr(mr); +} + +static struct ibv_rwq_ind_table * +mlx4_glue_create_rwq_ind_table(struct ibv_context *context, + struct ibv_rwq_ind_table_init_attr *init_attr) +{ + return ibv_create_rwq_ind_table(context, init_attr); +} + +static int +mlx4_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table) +{ + return ibv_destroy_rwq_ind_table(rwq_ind_table); +} + +static struct ibv_wq * +mlx4_glue_create_wq(struct ibv_context *context, + struct ibv_wq_init_attr *wq_init_attr) +{ + return ibv_create_wq(context, wq_init_attr); +} + +static int +mlx4_glue_destroy_wq(struct ibv_wq *wq) +{ + return ibv_destroy_wq(wq); +} +static int +mlx4_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr) +{ + return ibv_modify_wq(wq, wq_attr); +} + +static int +mlx4_glue_dv_init_obj(struct mlx4dv_obj *obj, uint64_t obj_type) +{ + return mlx4dv_init_obj(obj, obj_type); +} + +static int +mlx4_glue_dv_set_context_attr(struct ibv_context *context, + enum mlx4dv_set_ctx_attr_type attr_type, + void *attr) +{ + return mlx4dv_set_context_attr(context, attr_type, attr); +} + +const struct mlx4_glue *mlx4_glue = &(const struct mlx4_glue){ + .version = MLX4_GLUE_VERSION, + .fork_init = mlx4_glue_fork_init, + .get_async_event = mlx4_glue_get_async_event, + .ack_async_event = mlx4_glue_ack_async_event, + .alloc_pd = mlx4_glue_alloc_pd, + .dealloc_pd = mlx4_glue_dealloc_pd, + .get_device_list = mlx4_glue_get_device_list, + .free_device_list = mlx4_glue_free_device_list, + .open_device = mlx4_glue_open_device, + .close_device = mlx4_glue_close_device, + .get_device_name = mlx4_glue_get_device_name, + .query_device = mlx4_glue_query_device, + .query_device_ex = mlx4_glue_query_device_ex, + .query_port = mlx4_glue_query_port, + .port_state_str = mlx4_glue_port_state_str, + .create_comp_channel = mlx4_glue_create_comp_channel, + .destroy_comp_channel = mlx4_glue_destroy_comp_channel, + .create_cq = mlx4_glue_create_cq, + .destroy_cq = mlx4_glue_destroy_cq, + .get_cq_event = mlx4_glue_get_cq_event, + .ack_cq_events = mlx4_glue_ack_cq_events, + .create_flow = mlx4_glue_create_flow, + .destroy_flow = mlx4_glue_destroy_flow, + .create_qp = mlx4_glue_create_qp, + .create_qp_ex = mlx4_glue_create_qp_ex, + .destroy_qp = mlx4_glue_destroy_qp, + .modify_qp = mlx4_glue_modify_qp, + .reg_mr = mlx4_glue_reg_mr, + .dereg_mr = mlx4_glue_dereg_mr, + .create_rwq_ind_table = mlx4_glue_create_rwq_ind_table, + .destroy_rwq_ind_table = mlx4_glue_destroy_rwq_ind_table, + .create_wq = mlx4_glue_create_wq, + .destroy_wq = mlx4_glue_destroy_wq, + .modify_wq = mlx4_glue_modify_wq, + .dv_init_obj = mlx4_glue_dv_init_obj, + .dv_set_context_attr = mlx4_glue_dv_set_context_attr, +}; diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h new file mode 100644 index 000000000..5d9e98549 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_glue.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef MLX4_GLUE_H_ +#define MLX4_GLUE_H_ + +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#ifndef MLX4_GLUE_VERSION +#define MLX4_GLUE_VERSION "" +#endif + +/* LIB_GLUE_VERSION must be updated every time this structure is modified. */ +struct mlx4_glue { + const char *version; + int (*fork_init)(void); + int (*get_async_event)(struct ibv_context *context, + struct ibv_async_event *event); + void (*ack_async_event)(struct ibv_async_event *event); + struct ibv_pd *(*alloc_pd)(struct ibv_context *context); + int (*dealloc_pd)(struct ibv_pd *pd); + struct ibv_device **(*get_device_list)(int *num_devices); + void (*free_device_list)(struct ibv_device **list); + struct ibv_context *(*open_device)(struct ibv_device *device); + int (*close_device)(struct ibv_context *context); + const char *(*get_device_name)(struct ibv_device *device); + int (*query_device)(struct ibv_context *context, + struct ibv_device_attr *device_attr); + int (*query_device_ex)(struct ibv_context *context, + const struct ibv_query_device_ex_input *input, + struct ibv_device_attr_ex *attr); + int (*query_port)(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr *port_attr); + const char *(*port_state_str)(enum ibv_port_state port_state); + struct ibv_comp_channel *(*create_comp_channel) + (struct ibv_context *context); + int (*destroy_comp_channel)(struct ibv_comp_channel *channel); + struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe, + void *cq_context, + struct ibv_comp_channel *channel, + int comp_vector); + int (*destroy_cq)(struct ibv_cq *cq); + int (*get_cq_event)(struct ibv_comp_channel *channel, + struct ibv_cq **cq, void **cq_context); + void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents); + struct ibv_flow *(*create_flow)(struct ibv_qp *qp, + struct ibv_flow_attr *flow); + int (*destroy_flow)(struct ibv_flow *flow_id); + struct ibv_qp *(*create_qp)(struct ibv_pd *pd, + struct ibv_qp_init_attr *qp_init_attr); + struct ibv_qp *(*create_qp_ex) + (struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex); + int (*destroy_qp)(struct ibv_qp *qp); + int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, + int attr_mask); + struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr, + size_t length, int access); + int (*dereg_mr)(struct ibv_mr *mr); + struct ibv_rwq_ind_table *(*create_rwq_ind_table) + (struct ibv_context *context, + struct ibv_rwq_ind_table_init_attr *init_attr); + int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table); + struct ibv_wq *(*create_wq)(struct ibv_context *context, + struct ibv_wq_init_attr *wq_init_attr); + int (*destroy_wq)(struct ibv_wq *wq); + int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr); + int (*dv_init_obj)(struct mlx4dv_obj *obj, uint64_t obj_type); + int (*dv_set_context_attr)(struct ibv_context *context, + enum mlx4dv_set_ctx_attr_type attr_type, + void *attr); +}; + +extern const struct mlx4_glue *mlx4_glue; + +#endif /* MLX4_GLUE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c new file mode 100644 index 000000000..020fc254a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_intr.c @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Interrupts handling for mlx4 driver. + */ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_glue.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +static int mlx4_link_status_check(struct mlx4_priv *priv); + +/** + * Clean up Rx interrupts handler. + * + * @param priv + * Pointer to private structure. + */ +static void +mlx4_rx_intr_vec_disable(struct mlx4_priv *priv) +{ + struct rte_intr_handle *intr_handle = &priv->intr_handle; + + rte_intr_free_epoll_fd(intr_handle); + free(intr_handle->intr_vec); + intr_handle->nb_efd = 0; + intr_handle->intr_vec = NULL; +} + +/** + * Allocate queue vector and fill epoll fd list for Rx interrupts. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mlx4_rx_intr_vec_enable(struct mlx4_priv *priv) +{ + unsigned int i; + unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + unsigned int count = 0; + struct rte_intr_handle *intr_handle = &priv->intr_handle; + + mlx4_rx_intr_vec_disable(priv); + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); + if (intr_handle->intr_vec == NULL) { + rte_errno = ENOMEM; + ERROR("failed to allocate memory for interrupt vector," + " Rx interrupts will not be supported"); + return -rte_errno; + } + for (i = 0; i != n; ++i) { + struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; + + /* Skip queues that cannot request interrupts. */ + if (!rxq || !rxq->channel) { + /* Use invalid intr_vec[] index to disable entry. */ + intr_handle->intr_vec[i] = + RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID; + continue; + } + if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { + rte_errno = E2BIG; + ERROR("too many Rx queues for interrupt vector size" + " (%d), Rx interrupts cannot be enabled", + RTE_MAX_RXTX_INTR_VEC_ID); + mlx4_rx_intr_vec_disable(priv); + return -rte_errno; + } + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; + intr_handle->efds[count] = rxq->channel->fd; + count++; + } + if (!count) + mlx4_rx_intr_vec_disable(priv); + else + intr_handle->nb_efd = count; + return 0; +} + +/** + * Process scheduled link status check. + * + * If LSC interrupts are requested, process related callback. + * + * @param priv + * Pointer to private structure. + */ +static void +mlx4_link_status_alarm(struct mlx4_priv *priv) +{ + const struct rte_intr_conf *const intr_conf = + Ð_DEV(priv)->data->dev_conf.intr_conf; + + MLX4_ASSERT(priv->intr_alarm == 1); + priv->intr_alarm = 0; + if (intr_conf->lsc && !mlx4_link_status_check(priv)) + _rte_eth_dev_callback_process(ETH_DEV(priv), + RTE_ETH_EVENT_INTR_LSC, + NULL); +} + +/** + * Check link status. + * + * In case of inconsistency, another check is scheduled. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success (link status is consistent), negative errno value + * otherwise and rte_errno is set. + */ +static int +mlx4_link_status_check(struct mlx4_priv *priv) +{ + struct rte_eth_link *link = Ð_DEV(priv)->data->dev_link; + int ret = mlx4_link_update(ETH_DEV(priv), 0); + + if (ret) + return ret; + if ((!link->link_speed && link->link_status) || + (link->link_speed && !link->link_status)) { + if (!priv->intr_alarm) { + /* Inconsistent status, check again later. */ + ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT, + (void (*)(void *)) + mlx4_link_status_alarm, + priv); + if (ret) + return ret; + priv->intr_alarm = 1; + } + rte_errno = EINPROGRESS; + return -rte_errno; + } + return 0; +} + +/** + * Handle interrupts from the NIC. + * + * @param priv + * Pointer to private structure. + */ +static void +mlx4_interrupt_handler(struct mlx4_priv *priv) +{ + enum { LSC, RMV, }; + static const enum rte_eth_event_type type[] = { + [LSC] = RTE_ETH_EVENT_INTR_LSC, + [RMV] = RTE_ETH_EVENT_INTR_RMV, + }; + uint32_t caught[RTE_DIM(type)] = { 0 }; + struct ibv_async_event event; + const struct rte_intr_conf *const intr_conf = + Ð_DEV(priv)->data->dev_conf.intr_conf; + unsigned int i; + + /* Read all message and acknowledge them. */ + while (!mlx4_glue->get_async_event(priv->ctx, &event)) { + switch (event.event_type) { + case IBV_EVENT_PORT_ACTIVE: + case IBV_EVENT_PORT_ERR: + if (intr_conf->lsc && !mlx4_link_status_check(priv)) + ++caught[LSC]; + break; + case IBV_EVENT_DEVICE_FATAL: + if (intr_conf->rmv) + ++caught[RMV]; + break; + default: + DEBUG("event type %d on physical port %d not handled", + event.event_type, event.element.port_num); + } + mlx4_glue->ack_async_event(&event); + } + for (i = 0; i != RTE_DIM(caught); ++i) + if (caught[i]) + _rte_eth_dev_callback_process(ETH_DEV(priv), type[i], + NULL); +} + +/** + * MLX4 CQ notification . + * + * @param rxq + * Pointer to receive queue structure. + * @param solicited + * Is request solicited or not. + */ +static void +mlx4_arm_cq(struct rxq *rxq, int solicited) +{ + struct mlx4_cq *cq = &rxq->mcq; + uint64_t doorbell; + uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK; + uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK; + uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT; + + *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci); + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + rte_wmb(); + doorbell = sn << 28 | cmd | cq->cqn; + doorbell <<= 32; + doorbell |= ci; + rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg); +} + +/** + * Uninstall interrupt handler. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_intr_uninstall(struct mlx4_priv *priv) +{ + int err = rte_errno; /* Make sure rte_errno remains unchanged. */ + + if (priv->intr_handle.fd != -1) { + rte_intr_callback_unregister(&priv->intr_handle, + (void (*)(void *)) + mlx4_interrupt_handler, + priv); + priv->intr_handle.fd = -1; + } + rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv); + priv->intr_alarm = 0; + mlx4_rxq_intr_disable(priv); + rte_errno = err; + return 0; +} + +/** + * Install interrupt handler. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_intr_install(struct mlx4_priv *priv) +{ + const struct rte_intr_conf *const intr_conf = + Ð_DEV(priv)->data->dev_conf.intr_conf; + int rc; + + mlx4_intr_uninstall(priv); + if (intr_conf->lsc | intr_conf->rmv) { + priv->intr_handle.fd = priv->ctx->async_fd; + rc = rte_intr_callback_register(&priv->intr_handle, + (void (*)(void *)) + mlx4_interrupt_handler, + priv); + if (rc < 0) { + rte_errno = -rc; + goto error; + } + } + return 0; +error: + mlx4_intr_uninstall(priv); + return -rte_errno; +} + +/** + * DPDK callback for Rx queue interrupt disable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Rx queue index. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct rxq *rxq = dev->data->rx_queues[idx]; + struct ibv_cq *ev_cq; + void *ev_ctx; + int ret; + + if (!rxq || !rxq->channel) { + ret = EINVAL; + } else { + ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq, + &ev_ctx); + if (ret || ev_cq != rxq->cq) + ret = EINVAL; + } + if (ret) { + rte_errno = ret; + WARN("unable to disable interrupt on rx queue %d", + idx); + } else { + rxq->mcq.arm_sn++; + mlx4_glue->ack_cq_events(rxq->cq, 1); + } + return -ret; +} + +/** + * DPDK callback for Rx queue interrupt enable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Rx queue index. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct rxq *rxq = dev->data->rx_queues[idx]; + int ret = 0; + + if (!rxq || !rxq->channel) { + ret = EINVAL; + rte_errno = ret; + WARN("unable to arm interrupt on rx queue %d", idx); + } else { + mlx4_arm_cq(rxq, 0); + } + return -ret; +} + +/** + * Enable datapath interrupts. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rxq_intr_enable(struct mlx4_priv *priv) +{ + const struct rte_intr_conf *const intr_conf = + Ð_DEV(priv)->data->dev_conf.intr_conf; + + if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0) + goto error; + return 0; +error: + return -rte_errno; +} + +/** + * Disable datapath interrupts, keeping other interrupts intact. + * + * @param priv + * Pointer to private structure. + */ +void +mlx4_rxq_intr_disable(struct mlx4_priv *priv) +{ + int err = rte_errno; /* Make sure rte_errno remains unchanged. */ + + mlx4_rx_intr_vec_disable(priv); + rte_errno = err; +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_mp.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mp.c new file mode 100644 index 000000000..eca0c20a8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mp.c @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 6WIND S.A. + * Copyright 2019 Mellanox Technologies, Ltd + */ + +#include +#include + +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** + * Initialize IPC message. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[out] msg + * Pointer to message to fill in. + * @param[in] type + * Message type. + */ +static inline void +mp_init_msg(struct rte_eth_dev *dev, struct rte_mp_msg *msg, + enum mlx4_mp_req_type type) +{ + struct mlx4_mp_param *param = (struct mlx4_mp_param *)msg->param; + + memset(msg, 0, sizeof(*msg)); + strlcpy(msg->name, MLX4_MP_NAME, sizeof(msg->name)); + msg->len_param = sizeof(*param); + param->type = type; + param->port_id = dev->data->port_id; +} + +/** + * IPC message handler of primary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] peer + * Pointer to the peer socket path. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +static int +mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_mp_msg mp_res; + struct mlx4_mp_param *res = (struct mlx4_mp_param *)mp_res.param; + const struct mlx4_mp_param *param = + (const struct mlx4_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; + struct mlx4_priv *priv; + struct mlx4_mr_cache entry; + uint32_t lkey; + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (!rte_eth_dev_is_valid_port(param->port_id)) { + rte_errno = ENODEV; + ERROR("port %u invalid port ID", param->port_id); + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; + priv = dev->data->dev_private; + switch (param->type) { + case MLX4_MP_REQ_CREATE_MR: + mp_init_msg(dev, &mp_res, param->type); + lkey = mlx4_mr_create_primary(dev, &entry, param->args.addr); + if (lkey == UINT32_MAX) + res->result = -rte_errno; + ret = rte_mp_reply(&mp_res, peer); + break; + case MLX4_MP_REQ_VERBS_CMD_FD: + mp_init_msg(dev, &mp_res, param->type); + mp_res.num_fds = 1; + mp_res.fds[0] = priv->ctx->cmd_fd; + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + default: + rte_errno = EINVAL; + ERROR("port %u invalid mp request type", dev->data->port_id); + return -rte_errno; + } + return ret; +} + +/** + * IPC message handler of a secondary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] peer + * Pointer to the peer socket path. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_mp_msg mp_res; + struct mlx4_mp_param *res = (struct mlx4_mp_param *)mp_res.param; + const struct mlx4_mp_param *param = + (const struct mlx4_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + if (!rte_eth_dev_is_valid_port(param->port_id)) { + rte_errno = ENODEV; + ERROR("port %u invalid port ID", param->port_id); + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; + switch (param->type) { + case MLX4_MP_REQ_START_RXTX: + INFO("port %u starting datapath", dev->data->port_id); + rte_mb(); + dev->tx_pkt_burst = mlx4_tx_burst; + dev->rx_pkt_burst = mlx4_rx_burst; + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + case MLX4_MP_REQ_STOP_RXTX: + INFO("port %u stopping datapath", dev->data->port_id); + dev->tx_pkt_burst = mlx4_tx_burst_removed; + dev->rx_pkt_burst = mlx4_rx_burst_removed; + rte_mb(); + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + default: + rte_errno = EINVAL; + ERROR("port %u invalid mp request type", dev->data->port_id); + return -rte_errno; + } + return ret; +} + +/** + * Broadcast request of stopping/starting data-path to secondary processes. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] type + * Request type. + */ +static void +mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx4_mp_req_type type) +{ + struct rte_mp_msg mp_req; + struct rte_mp_msg *mp_res; + struct rte_mp_reply mp_rep; + struct mlx4_mp_param *res __rte_unused; + struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; + int ret; + int i; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (!mlx4_shared_data->secondary_cnt) + return; + if (type != MLX4_MP_REQ_START_RXTX && type != MLX4_MP_REQ_STOP_RXTX) { + ERROR("port %u unknown request (req_type %d)", + dev->data->port_id, type); + return; + } + mp_init_msg(dev, &mp_req, type); + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) + ERROR("port %u failed to request stop/start Rx/Tx (%d)", + dev->data->port_id, type); + goto exit; + } + if (mp_rep.nb_sent != mp_rep.nb_received) { + ERROR("port %u not all secondaries responded (req_type %d)", + dev->data->port_id, type); + goto exit; + } + for (i = 0; i < mp_rep.nb_received; i++) { + mp_res = &mp_rep.msgs[i]; + res = (struct mlx4_mp_param *)mp_res->param; + if (res->result) { + ERROR("port %u request failed on secondary #%d", + dev->data->port_id, i); + goto exit; + } + } +exit: + free(mp_rep.msgs); +} + +/** + * Broadcast request of starting data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void +mlx4_mp_req_start_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, MLX4_MP_REQ_START_RXTX); +} + +/** + * Broadcast request of stopping data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void +mlx4_mp_req_stop_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, MLX4_MP_REQ_STOP_RXTX); +} + +/** + * Request Memory Region creation to the primary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param addr + * Target virtual address to register. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr) +{ + struct rte_mp_msg mp_req; + struct rte_mp_msg *mp_res; + struct rte_mp_reply mp_rep; + struct mlx4_mp_param *req = (struct mlx4_mp_param *)mp_req.param; + struct mlx4_mp_param *res; + struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + mp_init_msg(dev, &mp_req, MLX4_MP_REQ_CREATE_MR); + req->args.addr = addr; + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + ERROR("port %u request to primary process failed", + dev->data->port_id); + return -rte_errno; + } + MLX4_ASSERT(mp_rep.nb_received == 1); + mp_res = &mp_rep.msgs[0]; + res = (struct mlx4_mp_param *)mp_res->param; + ret = res->result; + if (ret) + rte_errno = -ret; + free(mp_rep.msgs); + return ret; +} + +/** + * IPC message handler of primary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * + * @return + * fd on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev) +{ + struct rte_mp_msg mp_req; + struct rte_mp_msg *mp_res; + struct rte_mp_reply mp_rep; + struct mlx4_mp_param *res; + struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + mp_init_msg(dev, &mp_req, MLX4_MP_REQ_VERBS_CMD_FD); + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + ERROR("port %u request to primary process failed", + dev->data->port_id); + return -rte_errno; + } + MLX4_ASSERT(mp_rep.nb_received == 1); + mp_res = &mp_rep.msgs[0]; + res = (struct mlx4_mp_param *)mp_res->param; + if (res->result) { + rte_errno = -res->result; + ERROR("port %u failed to get command FD from primary process", + dev->data->port_id); + ret = -rte_errno; + goto exit; + } + MLX4_ASSERT(mp_res->num_fds == 1); + ret = mp_res->fds[0]; + DEBUG("port %u command FD from primary is %d", + dev->data->port_id, ret); +exit: + free(mp_rep.msgs); + return ret; +} + +/** + * Initialize by primary process. + */ +int +mlx4_mp_init_primary(void) +{ + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + + /* primary is allowed to not support IPC */ + ret = rte_mp_action_register(MLX4_MP_NAME, mp_primary_handle); + if (ret && rte_errno != ENOTSUP) + return -1; + return 0; +} + +/** + * Un-initialize by primary process. + */ +void +mlx4_mp_uninit_primary(void) +{ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + rte_mp_action_unregister(MLX4_MP_NAME); +} + +/** + * Initialize by secondary process. + */ +int +mlx4_mp_init_secondary(void) +{ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + return rte_mp_action_register(MLX4_MP_NAME, mp_secondary_handle); +} + +/** + * Un-initialize by secondary process. + */ +void +mlx4_mp_uninit_secondary(void) +{ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + rte_mp_action_unregister(MLX4_MP_NAME); +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c new file mode 100644 index 000000000..6b2f0cf18 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.c @@ -0,0 +1,1462 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Memory management functions for mlx4 driver. + */ + +#include +#include +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4_glue.h" +#include "mlx4_mr.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx4_mr_ctrl *mr_ctrl; + int ret; +}; + +/** + * Expand B-tree table to a given size. Can't be called with holding + * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc(). + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries for expansion. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_expand(struct mlx4_mr_btree *bt, int n) +{ + void *mem; + int ret = 0; + + if (n <= bt->size) + return ret; + /* + * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is + * used inside if there's no room to expand. Because this is a quite + * rare case and a part of very slow path, it is very acceptable. + * Initially cache_bh[] will be given practically enough space and once + * it is expanded, expansion wouldn't be needed again ever. + */ + mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0); + if (mem == NULL) { + /* Not an error, B-tree search will be skipped. */ + WARN("failed to expand MR B-tree (%p) table", (void *)bt); + ret = -1; + } else { + DEBUG("expanded MR B-tree table (size=%u)", n); + bt->table = mem; + bt->size = n; + } + return ret; +} + +/** + * Look up LKey from given B-tree lookup table, store the last index and return + * searched LKey. + * + * @param bt + * Pointer to B-tree structure. + * @param[out] idx + * Pointer to index. Even on search failure, returns index where it stops + * searching so that index can be used when inserting a new entry. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr) +{ + struct mlx4_mr_cache *lkp_tbl; + uint16_t n; + uint16_t base = 0; + + MLX4_ASSERT(bt != NULL); + lkp_tbl = *bt->table; + n = bt->len; + /* First entry must be NULL for comparison. */ + MLX4_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 && + lkp_tbl[0].lkey == UINT32_MAX)); + /* Binary search. */ + do { + register uint16_t delta = n >> 1; + + if (addr < lkp_tbl[base + delta].start) { + n = delta; + } else { + base += delta; + n -= delta; + } + } while (n > 1); + MLX4_ASSERT(addr >= lkp_tbl[base].start); + *idx = base; + if (addr < lkp_tbl[base].end) + return lkp_tbl[base].lkey; + /* Not found. */ + return UINT32_MAX; +} + +/** + * Insert an entry to B-tree lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param entry + * Pointer to new entry to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry) +{ + struct mlx4_mr_cache *lkp_tbl; + uint16_t idx = 0; + size_t shift; + + MLX4_ASSERT(bt != NULL); + MLX4_ASSERT(bt->len <= bt->size); + MLX4_ASSERT(bt->len > 0); + lkp_tbl = *bt->table; + /* Find out the slot for insertion. */ + if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { + DEBUG("abort insertion to B-tree(%p): already exist at" + " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + /* Already exist, return. */ + return 0; + } + /* If table is full, return error. */ + if (unlikely(bt->len == bt->size)) { + bt->overflow = 1; + return -1; + } + /* Insert entry. */ + ++idx; + shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache); + if (shift) + memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); + lkp_tbl[idx] = *entry; + bt->len++; + DEBUG("inserted B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + return 0; +} + +/** + * Initialize B-tree and allocate memory for lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries to allocate. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket) +{ + if (bt == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + memset(bt, 0, sizeof(*bt)); + bt->table = rte_calloc_socket("B-tree table", + n, sizeof(struct mlx4_mr_cache), + 0, socket); + if (bt->table == NULL) { + rte_errno = ENOMEM; + ERROR("failed to allocate memory for btree cache on socket %d", + socket); + return -rte_errno; + } + bt->size = n; + /* First entry must be NULL for binary search. */ + (*bt->table)[bt->len++] = (struct mlx4_mr_cache) { + .lkey = UINT32_MAX, + }; + DEBUG("initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); + return 0; +} + +/** + * Free B-tree resources. + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_free(struct mlx4_mr_btree *bt) +{ + if (bt == NULL) + return; + DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table); + rte_free(bt->table); + memset(bt, 0, sizeof(*bt)); +} + +#ifdef RTE_LIBRTE_MLX4_DEBUG +/** + * Dump all the entries in a B-tree + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_dump(struct mlx4_mr_btree *bt) +{ + int idx; + struct mlx4_mr_cache *lkp_tbl; + + if (bt == NULL) + return; + lkp_tbl = *bt->table; + for (idx = 0; idx < bt->len; ++idx) { + struct mlx4_mr_cache *entry = &lkp_tbl[idx]; + + DEBUG("B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + } +} +#endif + +/** + * Find virtually contiguous memory chunk in a given MR. + * + * @param dev + * Pointer to MR structure. + * @param[out] entry + * Pointer to returning MR cache entry. If not found, this will not be + * updated. + * @param start_idx + * Start index of the memseg bitmap. + * + * @return + * Next index to go on lookup. + */ +static int +mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, + int base_idx) +{ + uintptr_t start = 0; + uintptr_t end = 0; + uint32_t idx = 0; + + /* MR for external memory doesn't have memseg list. */ + if (mr->msl == NULL) { + struct ibv_mr *ibv_mr = mr->ibv_mr; + + MLX4_ASSERT(mr->ms_bmp_n == 1); + MLX4_ASSERT(mr->ms_n == 1); + MLX4_ASSERT(base_idx == 0); + /* + * Can't search it from memseg list but get it directly from + * verbs MR as there's only one chunk. + */ + entry->start = (uintptr_t)ibv_mr->addr; + entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); + /* Returning 1 ends iteration. */ + return 1; + } + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + + msl = mr->msl; + ms = rte_fbarray_get(&msl->memseg_arr, + mr->ms_base_idx + idx); + MLX4_ASSERT(msl->page_sz == ms->hugepage_sz); + if (!start) + start = ms->addr_64; + end = ms->addr_64 + ms->hugepage_sz; + } else if (start) { + /* Passed the end of a fragment. */ + break; + } + } + if (start) { + /* Found one chunk. */ + entry->start = start; + entry->end = end; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); + } + return idx; +} + +/** + * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. + * Then, this entry will have to be searched by mr_lookup_dev_list() in + * mlx4_mr_create() on miss. + * + * @param dev + * Pointer to Ethernet device. + * @param mr + * Pointer to MR to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + unsigned int n; + + DEBUG("port %u inserting MR(%p) to global cache", + dev->data->port_id, (void *)mr); + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache entry; + + memset(&entry, 0, sizeof(entry)); + /* Find a contiguous chunk and advance the index. */ + n = mr_find_next_chunk(mr, &entry, n); + if (!entry.end) + break; + if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { + /* + * Overflowed, but the global table cannot be expanded + * because of deadlock. + */ + return -1; + } + } + return 0; +} + +/** + * Look up address in the original global MR list. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. + * + * @return + * Found MR on match, NULL otherwise. + */ +static struct mlx4_mr * +mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret; + + memset(&ret, 0, sizeof(ret)); + n = mr_find_next_chunk(mr, &ret, n); + if (addr >= ret.start && addr < ret.end) { + /* Found. */ + *entry = ret; + return mr; + } + } + } + return NULL; +} + +/** + * Look up address on device. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + uint16_t idx; + uint32_t lkey = UINT32_MAX; + struct mlx4_mr *mr; + + /* + * If the global cache has overflowed since it failed to expand the + * B-tree table, it can't have all the existing MRs. Then, the address + * has to be searched by traversing the original MR list instead, which + * is very slow path. Otherwise, the global cache is all inclusive. + */ + if (!unlikely(priv->mr.cache.overflow)) { + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) + *entry = (*priv->mr.cache.table)[idx]; + } else { + /* Falling back to the slowest path. */ + mr = mr_lookup_dev_list(dev, entry, addr); + if (mr != NULL) + lkey = entry->lkey; + } + MLX4_ASSERT(lkey == UINT32_MAX || (addr >= entry->start && + addr < entry->end)); + return lkey; +} + +/** + * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() + * can raise memory free event and the callback function will spin on the lock. + * + * @param mr + * Pointer to MR to free. + */ +static void +mr_free(struct mlx4_mr *mr) +{ + if (mr == NULL) + return; + DEBUG("freeing MR(%p):", (void *)mr); + if (mr->ibv_mr != NULL) + claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr)); + if (mr->ms_bmp != NULL) + rte_bitmap_free(mr->ms_bmp); + rte_free(mr); +} + +/** + * Release resources of detached MR having no online entry. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx4_mr_garbage_collect(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next; + struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + + /* Must be called from the primary process. */ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* + * MR can't be freed with holding the lock because rte_free() could call + * memory free callback function. This will be a deadlock situation. + */ + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach the whole free list and release it after unlocking. */ + free_list = priv->mr.mr_free_list; + LIST_INIT(&priv->mr.mr_free_list); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Release resources. */ + mr_next = LIST_FIRST(&free_list); + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + mr_free(mr); + } +} + +/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */ +static int +mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct mr_find_contig_memsegs_data *data = arg; + + if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) + return 0; + /* Found, save it and stop walking. */ + data->start = ms->addr_64; + data->end = ms->addr_64 + len; + data->msl = msl; + return 1; +} + +/** + * Create a new global Memory Region (MR) for a missing virtual address. + * This API should be called on a secondary process, then a request is sent to + * the primary process in order to create a MR for the address. As the global MR + * list is on the shared memory, following LKey lookup should succeed unless the + * request fails. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mlx4_mr_create_secondary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + int ret; + + DEBUG("port %u requesting MR creation for address (%p)", + dev->data->port_id, (void *)addr); + ret = mlx4_mp_req_mr_create(dev, addr); + if (ret) { + DEBUG("port %u fail to request MR creation for address (%p)", + dev->data->port_id, (void *)addr); + return UINT32_MAX; + } + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + MLX4_ASSERT(entry->lkey != UINT32_MAX); + rte_rwlock_read_unlock(&priv->mr.rwlock); + DEBUG("port %u MR CREATED by primary process for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x", + dev->data->port_id, (void *)addr, + entry->start, entry->end, entry->lkey); + return entry->lkey; +} + +/** + * Create a new global Memory Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * This must be called from the primary process. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +uint32_t +mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + struct mlx4_mr *mr = NULL; + size_t len; + uint32_t ms_n; + uint32_t bmp_size; + void *bmp_mem; + int ms_idx_shift = -1; + unsigned int n; + struct mr_find_contig_memsegs_data data = { + .addr = addr, + }; + struct mr_find_contig_memsegs_data data_re; + + DEBUG("port %u creating a MR using address (%p)", + dev->data->port_id, (void *)addr); + /* + * Release detached MRs if any. This can't be called with holding either + * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have + * been detached by the memory free event but it couldn't be released + * inside the callback due to deadlock. As a result, releasing resources + * is quite opportunistic. + */ + mlx4_mr_garbage_collect(dev); + /* + * If enabled, find out a contiguous virtual address chunk in use, to + * which the given address belongs, in order to register maximum range. + * In the best case where mempools are not dynamically recreated and + * '--socket-mem' is specified as an EAL option, it is very likely to + * have only one MR(LKey) per a socket and per a hugepage-size even + * though the system memory is highly fragmented. As the whole memory + * chunk will be pinned by kernel, it can't be reused unless entire + * chunk is freed from EAL. + * + * If disabled, just register one memseg (page). Then, memory + * consumption will be minimized but it may drop performance if there + * are many MRs to lookup on the datapath. + */ + if (!priv->mr_ext_memseg_en) { + data.msl = rte_mem_virt2memseg_list((void *)addr); + data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz); + data.end = data.start + data.msl->page_sz; + } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_nolock; + } +alloc_resources: + /* Addresses must be page-aligned. */ + MLX4_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz)); + MLX4_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz)); + msl = data.msl; + ms = rte_mem_virt2memseg((void *)data.start, msl); + len = data.end - data.start; + MLX4_ASSERT(msl->page_sz == ms->hugepage_sz); + /* Number of memsegs in the range. */ + ms_n = len / msl->page_sz; + DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); + /* Size of memory for bitmap. */ + bmp_size = rte_bitmap_get_memory_footprint(ms_n); + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE) + + bmp_size, + RTE_CACHE_LINE_SIZE, msl->socket_id); + if (mr == NULL) { + WARN("port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = ENOMEM; + goto err_nolock; + } + mr->msl = msl; + /* + * Save the index of the first memseg and initialize memseg bitmap. To + * see if a memseg of ms_idx in the memseg-list is still valid, check: + * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) + */ + mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); + mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); + if (mr->ms_bmp == NULL) { + WARN("port %u unable to initialize bitmap for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_nolock; + } + /* + * Should recheck whether the extended contiguous chunk is still valid. + * Because memory_hotplug_lock can't be held if there's any memory + * related calls in a critical path, resource allocation above can't be + * locked. If the memory has been changed at this point, try again with + * just single page. If not, go on with the big chunk atomically from + * here. + */ + rte_mcfg_mem_read_lock(); + data_re = data; + if (len > msl->page_sz && + !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_memlock; + } + if (data.start != data_re.start || data.end != data_re.end) { + /* + * The extended contiguous chunk has been changed. Try again + * with single memseg instead. + */ + data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); + data.end = data.start + msl->page_sz; + rte_mcfg_mem_read_unlock(); + mr_free(mr); + goto alloc_resources; + } + MLX4_ASSERT(data.msl == data_re.msl); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* + * Check the address is really missing. If other thread already created + * one or it is not found due to overflow, abort and return. + */ + if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) { + /* + * Insert to the global cache table. It may fail due to + * low-on-memory. Then, this entry will have to be searched + * here again. + */ + mr_btree_insert(&priv->mr.cache, entry); + DEBUG("port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_mcfg_mem_read_unlock(); + /* + * Must be unlocked before calling rte_free() because + * mlx4_mr_mem_event_free_cb() can be called inside. + */ + mr_free(mr); + return entry->lkey; + } + /* + * Trim start and end addresses for verbs MR. Set bits for registering + * memsegs but exclude already registered ones. Bitmap can be + * fragmented. + */ + for (n = 0; n < ms_n; ++n) { + uintptr_t start; + struct mlx4_mr_cache ret; + + memset(&ret, 0, sizeof(ret)); + start = data_re.start + n * msl->page_sz; + /* Exclude memsegs already registered by other MRs. */ + if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { + /* + * Start from the first unregistered memseg in the + * extended range. + */ + if (ms_idx_shift == -1) { + mr->ms_base_idx += n; + data.start = start; + ms_idx_shift = n; + } + data.end = start + msl->page_sz; + rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); + ++mr->ms_n; + } + } + len = data.end - data.start; + mr->ms_bmp_n = len / msl->page_sz; + MLX4_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n); + /* + * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be + * called with holding the memory lock because it doesn't use + * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket() + * through mlx4_alloc_verbs_buf(). + */ + mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + WARN("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_mrlock; + } + MLX4_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start); + MLX4_ASSERT(mr->ibv_mr->length == len); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + MLX4_ASSERT(entry->lkey != UINT32_MAX); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_mcfg_mem_read_unlock(); + return entry->lkey; +err_mrlock: + rte_rwlock_write_unlock(&priv->mr.rwlock); +err_memlock: + rte_mcfg_mem_read_unlock(); +err_nolock: + /* + * In case of error, as this can be called in a datapath, a warning + * message per an error is preferable instead. Must be unlocked before + * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called + * inside. + */ + mr_free(mr); + return UINT32_MAX; +} + +/** + * Create a new global Memory Region (MR) for a missing virtual address. + * This can be called from primary and secondary process. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + uint32_t ret = 0; + + switch (rte_eal_process_type()) { + case RTE_PROC_PRIMARY: + ret = mlx4_mr_create_primary(dev, entry, addr); + break; + case RTE_PROC_SECONDARY: + ret = mlx4_mr_create_secondary(dev, entry, addr); + break; + default: + break; + } + return ret; +} + +/** + * Rebuild the global B-tree cache of device from the original MR list. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mr_rebuild_dev_cache(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + DEBUG("port %u rebuild dev cache[]", dev->data->port_id); + /* Flush cache to rebuild. */ + priv->mr.cache.len = 1; + priv->mr.cache.overflow = 0; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) + if (mr_insert_dev_cache(dev, mr) < 0) + return; +} + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx4_mr_garbage_collect(). + * + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) +{ + struct mlx4_priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + struct mlx4_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DEBUG("port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + MLX4_ASSERT((uintptr_t)addr == + RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + MLX4_ASSERT(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mlx4_mr_cache entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; + MLX4_ASSERT(mr->msl); /* Can't be external memory. */ + ms = rte_mem_virt2memseg((void *)start, msl); + MLX4_ASSERT(ms != NULL); + MLX4_ASSERT(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + MLX4_ASSERT(rte_bitmap_get(mr->ms_bmp, pos)); + MLX4_ASSERT(pos < mr->ms_bmp_n); + DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + DEBUG("port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; + } + if (rebuild) { + mr_rebuild_dev_cache(dev); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++priv->mr.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); + rte_smp_wmb(); + } + rte_rwlock_write_unlock(&priv->mr.rwlock); +#ifdef RTE_LIBRTE_MLX4_DEBUG + if (rebuild) + mlx4_mr_dump_dev(dev); +#endif +} + +/** + * Callback for memory event. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +void +mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct mlx4_priv *priv; + struct mlx4_dev_list *dev_list = &mlx4_shared_data->mem_event_cb_list; + + /* Must be called from the primary process. */ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_read_lock(&mlx4_shared_data->mem_event_rwlock); + /* Iterate all the existing mlx4 devices. */ + LIST_FOREACH(priv, dev_list, mem_event_cb) + mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len); + rte_rwlock_read_unlock(&mlx4_shared_data->mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } +} + +/** + * Look up address in the global MR cache table. If not found, create a new MR. + * Insert the found/created entry to local bottom-half cache table. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this is not written. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct mlx4_mr_cache *entry, uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh; + uint16_t idx; + uint32_t lkey; + + /* If local cache table is full, try to double it. */ + if (unlikely(bt->len == bt->size)) + mr_btree_expand(bt, bt->size << 1); + /* Look up in the global cache. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) { + /* Found. */ + *entry = (*priv->mr.cache.table)[idx]; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* + * Update local cache. Even if it fails, return the found entry + * to update top-half cache. Next time, this entry will be found + * in the global cache. + */ + mr_btree_insert(bt, entry); + return lkey; + } + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* First time to see the address? Create a new MR. */ + lkey = mlx4_mr_create(dev, entry, addr); + /* + * Update the local cache if successfully created a new global MR. Even + * if failed to create one, there's no action to take in this datapath + * code. As returning LKey is invalid, this will eventually make HW + * fail. + */ + if (lkey != UINT32_MAX) + mr_btree_insert(bt, entry); + return lkey; +} + +/** + * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if + * misses, search in the global MR cache table and update the new entry to + * per-queue local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + uintptr_t addr) +{ + uint32_t lkey; + uint16_t bh_idx = 0; + /* Victim in top-half cache to replace with new entry. */ + struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head]; + + /* Binary-search MR translation table. */ + lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); + /* Update top-half cache. */ + if (likely(lkey != UINT32_MAX)) { + *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; + } else { + /* + * If missed in local lookup table, search in the global cache + * and local cache_bh[] will be updated inside if possible. + * Top-half cache entry will also be updated. + */ + lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr); + if (unlikely(lkey == UINT32_MAX)) + return UINT32_MAX; + } + /* Update the most recently used entry. */ + mr_ctrl->mru = mr_ctrl->head; + /* Point to the next victim, the oldest. */ + mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N; + return lkey; +} + +/** + * Bottom-half of LKey search on Rx. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct mlx4_priv *priv = rxq->priv; + + return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx4_priv *priv = txq->priv; + + return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. If it can't be searched in the memseg + * list, register the mempool of the mbuf as externally allocated memory. + * + * @param txq + * Pointer to Tx queue structure. + * @param mb + * Pointer to mbuf. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb) +{ + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; + + lkey = mlx4_tx_addr2mr_bh(txq, addr); + if (lkey == UINT32_MAX && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb)); + } + return lkey; +} + +/** + * Flush all of the local cache entries. + * + * @param mr_ctrl + * Pointer to per-queue MR control structure. + */ +void +mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl) +{ + /* Reset the most-recently-used index. */ + mr_ctrl->mru = 0; + /* Reset the linear search array. */ + mr_ctrl->head = 0; + memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); + /* Reset the B-tree table. */ + mr_ctrl->cache_bh.len = 1; + mr_ctrl->cache_bh.overflow = 0; + /* Update the generation number. */ + mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; + DEBUG("mr_ctrl(%p): flushed, cur_gen=%d", + (void *)mr_ctrl, mr_ctrl->cur_gen); +} + +/** + * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp(). + * + * Externally allocated chunk is registered and a MR is created for the chunk. + * The MR object is added to the global list. If memseg list of a MR object + * (mr->msl) is null, the MR object can be regarded as externally allocated + * memory. + * + * Once external memory is registered, it should be static. If the memory is + * freed and the virtual address range has different physical memory mapped + * again, it may cause crash on device due to the wrong translation entry. PMD + * can't track the free event of the external memory for now. + */ +static void +mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + struct rte_eth_dev *dev = data->dev; + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl; + struct mlx4_mr *mr = NULL; + uintptr_t addr = (uintptr_t)memhdr->addr; + size_t len = memhdr->len; + struct mlx4_mr_cache entry; + uint32_t lkey; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* If already registered, it should return. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_lookup_dev(dev, &entry, addr); + rte_rwlock_read_unlock(&priv->mr.rwlock); + if (lkey != UINT32_MAX) + return; + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE), + RTE_CACHE_LINE_SIZE, mp->socket_id); + if (mr == NULL) { + WARN("port %u unable to allocate memory for a new MR of" + " mempool (%s).", + dev->data->port_id, mp->name); + data->ret = -1; + return; + } + DEBUG("port %u register MR for chunk #%d of mempool (%s)", + dev->data->port_id, mem_idx, mp->name); + mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + WARN("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_free(mr); + data->ret = -1; + return; + } + mr->msl = NULL; /* Mark it is external memory. */ + mr->ms_bmp = NULL; + mr->ms_n = 1; + mr->ms_bmp_n = 1; + rte_rwlock_write_lock(&priv->mr.rwlock); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for external memory %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Insert to the local cache table */ + mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr); +} + +/** + * Register MR for entire memory chunks in a Mempool having externally allocated + * memory and fill in local cache. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +static uint32_t +mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data); + return data.ret; +} + +/** + * Register MR entire memory chunks in a Mempool having externally allocated + * memory and search LKey of the address to return. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Search key. + * @param mp + * Pointer to registering Mempool where addr belongs. + * + * @return + * LKey for address on success, UINT32_MAX on failure. + */ +uint32_t +mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp) +{ + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx4_priv *priv = txq->priv; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + WARN("port %u using address (%p) from unregistered mempool" + " having externally allocated memory" + " in secondary process, please create mempool" + " prior to rte_eth_dev_start()", + PORT_ID(priv), (void *)addr); + return UINT32_MAX; + } + mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); + return mlx4_tx_addr2mr_bh(txq, addr); +} + +/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */ +static void +mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl, + (uintptr_t)memhdr->addr); + if (lkey == UINT32_MAX) + data->ret = -1; +} + +/** + * Register entire memory chunks in a Mempool. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +int +mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data); + if (data.ret < 0 && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp); + } + return data.ret; +} + +#ifdef RTE_LIBRTE_MLX4_DEBUG +/** + * Dump all the created MRs and the global cache entries. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_dump_dev(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + int mr_n = 0; + int chunk_n = 0; + + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret; + + memset(&ret, 0, sizeof(ret)); + n = mr_find_next_chunk(mr, &ret, n); + if (!ret.end) + break; + DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); + } + } + DEBUG("port %u dumping global cache", dev->data->port_id); + mlx4_mr_btree_dump(&priv->mr.cache); + rte_rwlock_read_unlock(&priv->mr.rwlock); +} +#endif + +/** + * Release all the created MRs and resources. Remove device from memory callback + * list. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_release(struct rte_eth_dev *dev) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next; + + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock); + LIST_REMOVE(priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); +#ifdef RTE_LIBRTE_MLX4_DEBUG + mlx4_mr_dump_dev(dev); +#endif + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach from MR list and move to free list. */ + mr_next = LIST_FIRST(&priv->mr.mr_list); + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + } + LIST_INIT(&priv->mr.mr_list); + /* Free global cache. */ + mlx4_mr_btree_free(&priv->mr.cache); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Free all remaining MRs. */ + mlx4_mr_garbage_collect(dev); +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h new file mode 100644 index 000000000..af5251a96 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_mr.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX4_MR_H_ +#define RTE_PMD_MLX4_MR_H_ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include + +/* Size of per-queue MR cache array for linear search. */ +#define MLX4_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX4_MR_BTREE_CACHE_N 256 + +/* Memory Region object. */ +struct mlx4_mr { + LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */ + struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ + const struct rte_memseg_list *msl; + int ms_base_idx; /* Start index of msl->memseg_arr[]. */ + int ms_n; /* Number of memsegs in use. */ + uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ + struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ +}; + +/* Cache entry for Memory Region. */ +struct mlx4_mr_cache { + uintptr_t start; /* Start address of MR. */ + uintptr_t end; /* End address of MR. */ + uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ +} __rte_packed; + +/* MR Cache table for Binary search. */ +struct mlx4_mr_btree { + uint16_t len; /* Number of entries. */ + uint16_t size; /* Total number of entries. */ + int overflow; /* Mark failure of table expansion. */ + struct mlx4_mr_cache (*table)[]; +} __rte_packed; + +/* Per-queue MR control descriptor. */ +struct mlx4_mr_ctrl { + uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ + uint32_t cur_gen; /* Generation number saved to flush caches. */ + uint16_t mru; /* Index of last hit entry in top-half cache. */ + uint16_t head; /* Index of the oldest entry in top-half cache. */ + struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */ + struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */ +} __rte_packed; + +extern struct mlx4_dev_list mlx4_mem_event_cb_list; +extern rte_rwlock_t mlx4_mem_event_rwlock; + +/* First entry must be NULL for comparison. */ +#define mlx4_mr_btree_len(bt) ((bt)->len - 1) + +int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket); +void mlx4_mr_btree_free(struct mlx4_mr_btree *bt); +void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt); +uint32_t mlx4_mr_create_primary(struct rte_eth_dev *dev, + struct mlx4_mr_cache *entry, uintptr_t addr); +void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); +void mlx4_mr_dump_dev(struct rte_eth_dev *dev); +void mlx4_mr_release(struct rte_eth_dev *dev); + +/** + * Look up LKey from given lookup table by linear search. Firstly look up the + * last-hit entry. If miss, the entire array is searched. If found, update the + * last-hit index and return LKey. + * + * @param lkp_tbl + * Pointer to lookup table. + * @param[in,out] cached_idx + * Pointer to last-hit index. + * @param n + * Size of lookup table. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx, + uint16_t n, uintptr_t addr) +{ + uint16_t idx; + + if (likely(addr >= lkp_tbl[*cached_idx].start && + addr < lkp_tbl[*cached_idx].end)) + return lkp_tbl[*cached_idx].lkey; + for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { + if (addr >= lkp_tbl[idx].start && + addr < lkp_tbl[idx].end) { + /* Found. */ + *cached_idx = idx; + return lkp_tbl[idx].lkey; + } + } + return UINT32_MAX; +} + +#endif /* RTE_PMD_MLX4_MR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h new file mode 100644 index 000000000..16ae6db82 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_prm.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef MLX4_PRM_H_ +#define MLX4_PRM_H_ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif +#include "mlx4_autoconf.h" + +/* ConnectX-3 Tx queue basic block. */ +#define MLX4_TXBB_SHIFT 6 +#define MLX4_TXBB_SIZE (1 << MLX4_TXBB_SHIFT) + +/* Typical TSO descriptor with 16 gather entries is 352 bytes. */ +#define MLX4_MAX_SGE 32 +#define MLX4_MAX_WQE_SIZE \ + (MLX4_MAX_SGE * sizeof(struct mlx4_wqe_data_seg) + \ + sizeof(struct mlx4_wqe_ctrl_seg)) +#define MLX4_SEG_SHIFT 4 + +/* Send queue stamping/invalidating information. */ +#define MLX4_SQ_STAMP_STRIDE 64 +#define MLX4_SQ_STAMP_DWORDS (MLX4_SQ_STAMP_STRIDE / 4) +#define MLX4_SQ_OWNER_BIT 31 +#define MLX4_SQ_STAMP_VAL 0x7fffffff + +/* Work queue element (WQE) flags. */ +#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28) +#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27) +#define MLX4_WQE_CTRL_RR (1 << 6) + +/* CQE checksum flags. */ +enum { + MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25), + MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26), + MLX4_CQE_L2_TUNNEL = (int)(1u << 27), + MLX4_CQE_L2_VLAN_MASK = (int)(3u << 29), + MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31), +}; + +/* CQE status flags. */ +#define MLX4_CQE_STATUS_IPV6F (1 << 12) +#define MLX4_CQE_STATUS_IPV4 (1 << 22) +#define MLX4_CQE_STATUS_IPV4F (1 << 23) +#define MLX4_CQE_STATUS_IPV6 (1 << 24) +#define MLX4_CQE_STATUS_IPV4OPT (1 << 25) +#define MLX4_CQE_STATUS_TCP (1 << 26) +#define MLX4_CQE_STATUS_UDP (1 << 27) +#define MLX4_CQE_STATUS_PTYPE_MASK \ + (MLX4_CQE_STATUS_IPV4 | \ + MLX4_CQE_STATUS_IPV4F | \ + MLX4_CQE_STATUS_IPV6 | \ + MLX4_CQE_STATUS_IPV4OPT | \ + MLX4_CQE_STATUS_TCP | \ + MLX4_CQE_STATUS_UDP) + +/* Send queue information. */ +struct mlx4_sq { + volatile uint8_t *buf; /**< SQ buffer. */ + volatile uint8_t *eob; /**< End of SQ buffer */ + uint32_t size; /**< SQ size includes headroom. */ + uint32_t remain_size; /**< Remaining WQE room in SQ (bytes). */ + uint32_t owner_opcode; + /**< Default owner opcode with HW valid owner bit. */ + uint32_t stamp; /**< Stamp value with an invalid HW owner bit. */ + uint32_t *db; /**< Pointer to the doorbell. */ + off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ + uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */ +}; + +/* Completion queue events, numbers and masks. */ +#define MLX4_CQ_DB_GEQ_N_MASK 0x3 +#define MLX4_CQ_DOORBELL 0x20 +#define MLX4_CQ_DB_CI_MASK 0xffffff + +/* Completion queue information. */ +struct mlx4_cq { + volatile void *cq_uar; /**< CQ user access region. */ + volatile void *cq_db_reg; /**< CQ doorbell register. */ + volatile uint32_t *set_ci_db; /**< Pointer to the CQ doorbell. */ + volatile uint32_t *arm_db; /**< Arming Rx events doorbell. */ + volatile uint8_t *buf; /**< Pointer to the completion queue buffer. */ + uint32_t cqe_cnt; /**< Number of entries in the queue. */ + uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */ + uint32_t cons_index; /**< Last queue entry that was handled. */ + uint32_t cqn; /**< CQ number. */ + int arm_sn; /**< Rx event counter. */ +}; + +#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG +/* + * WQE LSO segment structure. + * Defined here as backward compatibility for rdma-core v17 and below. + * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18 + * and above. + */ +struct mlx4_wqe_lso_seg { + rte_be32_t mss_hdr_size; + rte_be32_t header[]; +}; +#endif + +/** + * Retrieve a CQE entry from a CQ. + * + * cqe = cq->buf + cons_index * cqe_size + cqe_offset + * + * Where cqe_size is 32 or 64 bytes and cqe_offset is 0 or 32 (depending on + * cqe_size). + * + * @param cq + * CQ to retrieve entry from. + * @param index + * Entry index. + * + * @return + * Pointer to CQE entry. + */ +static inline volatile struct mlx4_cqe * +mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index) +{ + return (volatile struct mlx4_cqe *)(cq->buf + + ((index & (cq->cqe_cnt - 1)) << + (5 + cq->cqe_64)) + + (cq->cqe_64 << 5)); +} + +/** + * Transpose a flag in a value. + * + * @param val + * Input value. + * @param from + * Flag to retrieve from input value. + * @param to + * Flag to set in output value. + * + * @return + * Output value with transposed flag enabled if present on input. + */ +static inline uint64_t +mlx4_transpose(uint64_t val, uint64_t from, uint64_t to) +{ + return (from >= to ? + (val & from) / (from / to) : + (val & from) * (to / from)); +} + +#endif /* MLX4_PRM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c new file mode 100644 index 000000000..0699bdd5f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxq.c @@ -0,0 +1,941 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Rx queues configuration for mlx4 driver. + */ + +#include +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_glue.h" +#include "mlx4_flow.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** + * Historical RSS hash key. + * + * This used to be the default for mlx4 in Linux before v3.19 switched to + * generating random hash keys through netdev_rss_key_fill(). + * + * It is used in this PMD for consistency with past DPDK releases but can + * now be overridden through user configuration. + * + * Note: this is not const to work around API quirks. + */ +uint8_t +mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = { + 0x2c, 0xc6, 0x81, 0xd1, + 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, + 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, + 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, + 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, + 0xfc, 0x1f, 0xdc, 0x2a, +}; + +/** + * Obtain a RSS context with specified properties. + * + * Used when creating a flow rule targeting one or several Rx queues. + * + * If a matching RSS context already exists, it is returned with its + * reference count incremented. + * + * @param priv + * Pointer to private structure. + * @param fields + * Fields for RSS processing (Verbs format). + * @param[in] key + * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE). + * @param queues + * Number of target queues. + * @param[in] queue_id + * Target queues. + * + * @return + * Pointer to RSS context on success, NULL otherwise and rte_errno is set. + */ +struct mlx4_rss * +mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields, + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + uint16_t queues, const uint16_t queue_id[]) +{ + struct mlx4_rss *rss; + size_t queue_id_size = sizeof(queue_id[0]) * queues; + + LIST_FOREACH(rss, &priv->rss, next) + if (fields == rss->fields && + queues == rss->queues && + !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) && + !memcmp(queue_id, rss->queue_id, queue_id_size)) { + ++rss->refcnt; + return rss; + } + rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) + + queue_id_size, 0); + if (!rss) + goto error; + *rss = (struct mlx4_rss){ + .priv = priv, + .refcnt = 1, + .usecnt = 0, + .qp = NULL, + .ind = NULL, + .fields = fields, + .queues = queues, + }; + memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE); + memcpy(rss->queue_id, queue_id, queue_id_size); + LIST_INSERT_HEAD(&priv->rss, rss, next); + return rss; +error: + rte_errno = ENOMEM; + return NULL; +} + +/** + * Release a RSS context instance. + * + * Used when destroying a flow rule targeting one or several Rx queues. + * + * This function decrements the reference count of the context and destroys + * it after reaching 0. The context must have no users at this point; all + * prior calls to mlx4_rss_attach() must have been followed by matching + * calls to mlx4_rss_detach(). + * + * @param rss + * RSS context to release. + */ +void +mlx4_rss_put(struct mlx4_rss *rss) +{ + MLX4_ASSERT(rss->refcnt); + if (--rss->refcnt) + return; + MLX4_ASSERT(!rss->usecnt); + MLX4_ASSERT(!rss->qp); + MLX4_ASSERT(!rss->ind); + LIST_REMOVE(rss, next); + rte_free(rss); +} + +/** + * Attach a user to a RSS context instance. + * + * Used when the RSS QP and indirection table objects must be instantiated, + * that is, when a flow rule must be enabled. + * + * This function increments the usage count of the context. + * + * @param rss + * RSS context to attach to. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rss_attach(struct mlx4_rss *rss) +{ + MLX4_ASSERT(rss->refcnt); + if (rss->usecnt++) { + MLX4_ASSERT(rss->qp); + MLX4_ASSERT(rss->ind); + return 0; + } + + struct ibv_wq *ind_tbl[rss->queues]; + struct mlx4_priv *priv = rss->priv; + struct rte_eth_dev *dev = ETH_DEV(priv); + const char *msg; + unsigned int i = 0; + int ret; + + if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) { + ret = EINVAL; + msg = "number of RSS queues must be a power of two"; + goto error; + } + for (i = 0; i != RTE_DIM(ind_tbl); ++i) { + uint16_t id = rss->queue_id[i]; + struct rxq *rxq = NULL; + + if (id < dev->data->nb_rx_queues) + rxq = dev->data->rx_queues[id]; + if (!rxq) { + ret = EINVAL; + msg = "RSS target queue is not configured"; + goto error; + } + ret = mlx4_rxq_attach(rxq); + if (ret) { + ret = -ret; + msg = "unable to attach RSS target queue"; + goto error; + } + ind_tbl[i] = rxq->wq; + } + rss->ind = mlx4_glue->create_rwq_ind_table + (priv->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)), + .ind_tbl = ind_tbl, + .comp_mask = 0, + }); + if (!rss->ind) { + ret = errno ? errno : EINVAL; + msg = "RSS indirection table creation failure"; + goto error; + } + rss->qp = mlx4_glue->create_qp_ex + (priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .comp_mask = (IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_RX_HASH | + IBV_QP_INIT_ATTR_IND_TABLE), + .qp_type = IBV_QPT_RAW_PACKET, + .pd = priv->pd, + .rwq_ind_tbl = rss->ind, + .rx_hash_conf = { + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE, + .rx_hash_key = rss->key, + .rx_hash_fields_mask = rss->fields, + }, + }); + if (!rss->qp) { + ret = errno ? errno : EINVAL; + msg = "RSS hash QP creation failure"; + goto error; + } + ret = mlx4_glue->modify_qp + (rss->qp, + &(struct ibv_qp_attr){ + .qp_state = IBV_QPS_INIT, + .port_num = priv->port, + }, + IBV_QP_STATE | IBV_QP_PORT); + if (ret) { + msg = "failed to switch RSS hash QP to INIT state"; + goto error; + } + ret = mlx4_glue->modify_qp + (rss->qp, + &(struct ibv_qp_attr){ + .qp_state = IBV_QPS_RTR, + }, + IBV_QP_STATE); + if (ret) { + msg = "failed to switch RSS hash QP to RTR state"; + goto error; + } + return 0; +error: + if (rss->qp) { + claim_zero(mlx4_glue->destroy_qp(rss->qp)); + rss->qp = NULL; + } + if (rss->ind) { + claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind)); + rss->ind = NULL; + } + while (i--) + mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]); + ERROR("mlx4: %s", msg); + --rss->usecnt; + rte_errno = ret; + return -ret; +} + +/** + * Detach a user from a RSS context instance. + * + * Used when disabling (not destroying) a flow rule. + * + * This function decrements the usage count of the context and destroys + * usage resources after reaching 0. + * + * @param rss + * RSS context to detach from. + */ +void +mlx4_rss_detach(struct mlx4_rss *rss) +{ + struct mlx4_priv *priv = rss->priv; + struct rte_eth_dev *dev = ETH_DEV(priv); + unsigned int i; + + MLX4_ASSERT(rss->refcnt); + MLX4_ASSERT(rss->qp); + MLX4_ASSERT(rss->ind); + if (--rss->usecnt) + return; + claim_zero(mlx4_glue->destroy_qp(rss->qp)); + rss->qp = NULL; + claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind)); + rss->ind = NULL; + for (i = 0; i != rss->queues; ++i) + mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]); +} + +/** + * Initialize common RSS context resources. + * + * Because ConnectX-3 hardware limitations require a fixed order in the + * indirection table, WQs must be allocated sequentially to be part of a + * common RSS context. + * + * Since a newly created WQ cannot be moved to a different context, this + * function allocates them all at once, one for each configured Rx queue, + * as well as all related resources (CQs and mbufs). + * + * This must therefore be done before creating any Rx flow rules relying on + * indirection tables. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rss_init(struct mlx4_priv *priv) +{ + struct rte_eth_dev *dev = ETH_DEV(priv); + uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues); + uint32_t wq_num_prev = 0; + const char *msg; + unsigned int i; + int ret; + + if (priv->rss_init) + return 0; + if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) { + ERROR("RSS does not support more than %d queues", + priv->hw_rss_max_qps); + rte_errno = EINVAL; + return -rte_errno; + } + /* Prepare range for RSS contexts before creating the first WQ. */ + ret = mlx4_glue->dv_set_context_attr + (priv->ctx, + MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ, + &log2_range); + if (ret) { + ERROR("cannot set up range size for RSS context to %u" + " (for %u Rx queues), error: %s", + 1 << log2_range, dev->data->nb_rx_queues, strerror(ret)); + rte_errno = ret; + return -ret; + } + for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) { + struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; + struct ibv_cq *cq; + struct ibv_wq *wq; + uint32_t wq_num; + + /* Attach the configured Rx queues. */ + if (rxq) { + MLX4_ASSERT(!rxq->usecnt); + ret = mlx4_rxq_attach(rxq); + if (!ret) { + wq_num = rxq->wq->wq_num; + goto wq_num_check; + } + ret = -ret; + msg = "unable to create Rx queue resources"; + goto error; + } + /* + * WQs are temporarily allocated for unconfigured Rx queues + * to maintain proper index alignment in indirection table + * by skipping unused WQ numbers. + * + * The reason this works at all even though these WQs are + * immediately destroyed is that WQNs are allocated + * sequentially and are guaranteed to never be reused in the + * same context by the underlying implementation. + */ + cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); + if (!cq) { + ret = ENOMEM; + msg = "placeholder CQ creation failure"; + goto error; + } + wq = mlx4_glue->create_wq + (priv->ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = priv->pd, + .cq = cq, + }); + if (wq) { + wq_num = wq->wq_num; + claim_zero(mlx4_glue->destroy_wq(wq)); + } else { + wq_num = 0; /* Shut up GCC 4.8 warnings. */ + } + claim_zero(mlx4_glue->destroy_cq(cq)); + if (!wq) { + ret = ENOMEM; + msg = "placeholder WQ creation failure"; + goto error; + } +wq_num_check: + /* + * While guaranteed by the implementation, make sure WQ + * numbers are really sequential (as the saying goes, + * trust, but verify). + */ + if (i && wq_num - wq_num_prev != 1) { + if (rxq) + mlx4_rxq_detach(rxq); + ret = ERANGE; + msg = "WQ numbers are not sequential"; + goto error; + } + wq_num_prev = wq_num; + } + priv->rss_init = 1; + return 0; +error: + ERROR("cannot initialize common RSS resources (queue %u): %s: %s", + i, msg, strerror(ret)); + while (i--) { + struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; + + if (rxq) + mlx4_rxq_detach(rxq); + } + rte_errno = ret; + return -ret; +} + +/** + * Release common RSS context resources. + * + * As the reverse of mlx4_rss_init(), this must be done after removing all + * flow rules relying on indirection tables. + * + * @param priv + * Pointer to private structure. + */ +void +mlx4_rss_deinit(struct mlx4_priv *priv) +{ + unsigned int i; + + if (!priv->rss_init) + return; + for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) { + struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; + + if (rxq) { + MLX4_ASSERT(rxq->usecnt == 1); + mlx4_rxq_detach(rxq); + } + } + priv->rss_init = 0; +} + +/** + * Attach a user to a Rx queue. + * + * Used when the resources of an Rx queue must be instantiated for it to + * become in a usable state. + * + * This function increments the usage count of the Rx queue. + * + * @param rxq + * Pointer to Rx queue structure. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rxq_attach(struct rxq *rxq) +{ + if (rxq->usecnt++) { + MLX4_ASSERT(rxq->cq); + MLX4_ASSERT(rxq->wq); + MLX4_ASSERT(rxq->wqes); + MLX4_ASSERT(rxq->rq_db); + return 0; + } + + struct mlx4_priv *priv = rxq->priv; + struct rte_eth_dev *dev = ETH_DEV(priv); + const uint32_t elts_n = 1 << rxq->elts_n; + const uint32_t sges_n = 1 << rxq->sges_n; + struct rte_mbuf *(*elts)[elts_n] = rxq->elts; + struct mlx4dv_obj mlxdv; + struct mlx4dv_rwq dv_rwq; + struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, }; + const char *msg; + struct ibv_cq *cq = NULL; + struct ibv_wq *wq = NULL; + uint32_t create_flags = 0; + uint32_t comp_mask = 0; + volatile struct mlx4_wqe_data_seg (*wqes)[]; + unsigned int i; + int ret; + + MLX4_ASSERT(rte_is_power_of_2(elts_n)); + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_RX_QUEUE; + priv->verbs_alloc_ctx.obj = rxq; + cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL, + rxq->channel, 0); + if (!cq) { + ret = ENOMEM; + msg = "CQ creation failure"; + goto error; + } + /* By default, FCS (CRC) is stripped by hardware. */ + if (rxq->crc_present) { + create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + } + wq = mlx4_glue->create_wq + (priv->ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = elts_n / sges_n, + .max_sge = sges_n, + .pd = priv->pd, + .cq = cq, + .comp_mask = comp_mask, + .create_flags = create_flags, + }); + if (!wq) { + ret = errno ? errno : EINVAL; + msg = "WQ creation failure"; + goto error; + } + ret = mlx4_glue->modify_wq + (wq, + &(struct ibv_wq_attr){ + .attr_mask = IBV_WQ_ATTR_STATE, + .wq_state = IBV_WQS_RDY, + }); + if (ret) { + msg = "WQ state change to IBV_WQS_RDY failed"; + goto error; + } + /* Retrieve device queue information. */ + mlxdv.cq.in = cq; + mlxdv.cq.out = &dv_cq; + mlxdv.rwq.in = wq; + mlxdv.rwq.out = &dv_rwq; + ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ); + if (ret) { + msg = "failed to obtain device information from WQ/CQ objects"; + goto error; + } + /* Pre-register Rx mempool. */ + DEBUG("port %u Rx queue %u registering mp %s having %u chunks", + ETH_DEV(priv)->data->port_id, rxq->stats.idx, + rxq->mp->name, rxq->mp->nb_mem_chunks); + mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp); + wqes = (volatile struct mlx4_wqe_data_seg (*)[]) + ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset); + for (i = 0; i != RTE_DIM(*elts); ++i) { + volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i]; + struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp); + + if (buf == NULL) { + while (i--) { + rte_pktmbuf_free_seg((*elts)[i]); + (*elts)[i] = NULL; + } + ret = ENOMEM; + msg = "cannot allocate mbuf"; + goto error; + } + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + MLX4_ASSERT(buf->data_off == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + MLX4_ASSERT(rte_pktmbuf_data_len(buf) == 0); + MLX4_ASSERT(rte_pktmbuf_pkt_len(buf) == 0); + /* Only the first segment keeps headroom. */ + if (i % sges_n) + buf->data_off = 0; + buf->port = rxq->port_id; + buf->data_len = rte_pktmbuf_tailroom(buf); + buf->pkt_len = rte_pktmbuf_tailroom(buf); + buf->nb_segs = 1; + *scat = (struct mlx4_wqe_data_seg){ + .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)), + .byte_count = rte_cpu_to_be_32(buf->data_len), + .lkey = mlx4_rx_mb2mr(rxq, buf), + }; + (*elts)[i] = buf; + } + DEBUG("%p: allocated and configured %u segments (max %u packets)", + (void *)rxq, elts_n, elts_n / sges_n); + rxq->cq = cq; + rxq->wq = wq; + rxq->wqes = wqes; + rxq->rq_db = dv_rwq.rdb; + rxq->mcq.buf = dv_cq.buf.buf; + rxq->mcq.cqe_cnt = dv_cq.cqe_cnt; + rxq->mcq.set_ci_db = dv_cq.set_ci_db; + rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0; + rxq->mcq.arm_db = dv_cq.arm_db; + rxq->mcq.arm_sn = dv_cq.arm_sn; + rxq->mcq.cqn = dv_cq.cqn; + rxq->mcq.cq_uar = dv_cq.cq_uar; + rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL; + /* Update doorbell counter. */ + rxq->rq_ci = elts_n / sges_n; + rte_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE; + return 0; +error: + if (wq) + claim_zero(mlx4_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx4_glue->destroy_cq(cq)); + --rxq->usecnt; + rte_errno = ret; + ERROR("error while attaching Rx queue %p: %s: %s", + (void *)rxq, msg, strerror(ret)); + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE; + return -ret; +} + +/** + * Detach a user from a Rx queue. + * + * This function decrements the usage count of the Rx queue and destroys + * usage resources after reaching 0. + * + * @param rxq + * Pointer to Rx queue structure. + */ +void +mlx4_rxq_detach(struct rxq *rxq) +{ + unsigned int i; + struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts; + + if (--rxq->usecnt) + return; + rxq->rq_ci = 0; + memset(&rxq->mcq, 0, sizeof(rxq->mcq)); + rxq->rq_db = NULL; + rxq->wqes = NULL; + claim_zero(mlx4_glue->destroy_wq(rxq->wq)); + rxq->wq = NULL; + claim_zero(mlx4_glue->destroy_cq(rxq->cq)); + rxq->cq = NULL; + DEBUG("%p: freeing Rx queue elements", (void *)rxq); + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + if (!(*elts)[i]) + continue; + rte_pktmbuf_free_seg((*elts)[i]); + (*elts)[i] = NULL; + } +} + +/** + * Returns the per-queue supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx4_get_rx_queue_offloads(struct mlx4_priv *priv) +{ + uint64_t offloads = DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; + + if (priv->hw_csum) + offloads |= DEV_RX_OFFLOAD_CHECKSUM; + return offloads; +} + +/** + * Returns the per-port supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx4_get_rx_port_offloads(struct mlx4_priv *priv) +{ + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + + (void)priv; + return offloads; +} + +/** + * DPDK callback to configure a Rx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Rx queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mlx4_priv *priv = dev->data->dev_private; + uint32_t mb_len = rte_pktmbuf_data_room_size(mp); + struct rte_mbuf *(*elts)[rte_align32pow2(desc)]; + struct rxq *rxq; + struct mlx4_malloc_vec vec[] = { + { + .align = RTE_CACHE_LINE_SIZE, + .size = sizeof(*rxq), + .addr = (void **)&rxq, + }, + { + .align = RTE_CACHE_LINE_SIZE, + .size = sizeof(*elts), + .addr = (void **)&elts, + }, + }; + int ret; + uint32_t crc_present; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + + if (idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, dev->data->nb_rx_queues); + return -rte_errno; + } + rxq = dev->data->rx_queues[idx]; + if (rxq) { + rte_errno = EEXIST; + ERROR("%p: Rx queue %u already configured, release it first", + (void *)dev, idx); + return -rte_errno; + } + if (!desc) { + rte_errno = EINVAL; + ERROR("%p: invalid number of Rx descriptors", (void *)dev); + return -rte_errno; + } + if (desc != RTE_DIM(*elts)) { + desc = RTE_DIM(*elts); + WARN("%p: increased number of descriptors in Rx queue %u" + " to the next power of two (%u)", + (void *)dev, idx, desc); + } + /* By default, FCS (CRC) is stripped by hardware. */ + crc_present = 0; + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (priv->hw_fcs_strip) { + crc_present = 1; + } else { + WARN("%p: CRC stripping has been disabled but will still" + " be performed by hardware, make sure MLNX_OFED and" + " firmware are up to date", + (void *)dev); + } + } + DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + (void *)dev, + crc_present ? "disabled" : "enabled", + crc_present << 2); + /* Allocate and initialize Rx queue. */ + mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket); + if (!rxq) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + return -rte_errno; + } + *rxq = (struct rxq){ + .priv = priv, + .mp = mp, + .port_id = dev->data->port_id, + .sges_n = 0, + .elts_n = rte_log2_u32(desc), + .elts = elts, + /* Toggle Rx checksum offload if hardware supports it. */ + .csum = priv->hw_csum && + (offloads & DEV_RX_OFFLOAD_CHECKSUM), + .csum_l2tun = priv->hw_csum_l2tun && + (offloads & DEV_RX_OFFLOAD_CHECKSUM), + .crc_present = crc_present, + .l2tun_offload = priv->hw_csum_l2tun, + .stats = { + .idx = idx, + }, + .socket = socket, + }; + /* Enable scattered packets support for this queue if necessary. */ + MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { + ; + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { + uint32_t size = + RTE_PKTMBUF_HEADROOM + + dev->data->dev_conf.rxmode.max_rx_pkt_len; + uint32_t sges_n; + + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len)); + rxq->sges_n = sges_n; + /* Make sure sges_n did not overflow. */ + size = mb_len * (1 << rxq->sges_n); + size -= RTE_PKTMBUF_HEADROOM; + if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + rte_errno = EOVERFLOW; + ERROR("%p: too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + (void *)dev, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + goto error; + } + } else { + WARN("%p: the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + (void *)dev, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); + } + DEBUG("%p: maximum number of segments per packet: %u", + (void *)dev, 1 << rxq->sges_n); + if (desc % (1 << rxq->sges_n)) { + rte_errno = EINVAL; + ERROR("%p: number of Rx queue descriptors (%u) is not a" + " multiple of maximum segments per packet (%u)", + (void *)dev, + desc, + 1 << rxq->sges_n); + goto error; + } + if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + if (dev->data->dev_conf.intr_conf.rxq) { + rxq->channel = mlx4_glue->create_comp_channel(priv->ctx); + if (rxq->channel == NULL) { + rte_errno = ENOMEM; + ERROR("%p: Rx interrupt completion channel creation" + " failure: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) { + ERROR("%p: unable to make Rx interrupt completion" + " channel non-blocking: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + } + DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq); + dev->data->rx_queues[idx] = rxq; + return 0; +error: + dev->data->rx_queues[idx] = NULL; + ret = rte_errno; + mlx4_rx_queue_release(rxq); + rte_errno = ret; + MLX4_ASSERT(rte_errno > 0); + return -rte_errno; +} + +/** + * DPDK callback to release a Rx queue. + * + * @param dpdk_rxq + * Generic Rx queue pointer. + */ +void +mlx4_rx_queue_release(void *dpdk_rxq) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct mlx4_priv *priv; + unsigned int i; + + if (rxq == NULL) + return; + priv = rxq->priv; + for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) + if (ETH_DEV(priv)->data->rx_queues[i] == rxq) { + DEBUG("%p: removing Rx queue %p from list", + (void *)ETH_DEV(priv), (void *)rxq); + ETH_DEV(priv)->data->rx_queues[i] = NULL; + break; + } + MLX4_ASSERT(!rxq->cq); + MLX4_ASSERT(!rxq->wq); + MLX4_ASSERT(!rxq->wqes); + MLX4_ASSERT(!rxq->rq_db); + if (rxq->channel) + claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel)); + mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh); + rte_free(rxq); +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c new file mode 100644 index 000000000..adc1c9bf8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.c @@ -0,0 +1,1396 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Data plane functions for mlx4 driver. + */ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_prm.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** + * Pointer-value pair structure used in tx_post_send for saving the first + * DWORD (32 byte) of a TXBB. + */ +struct pv { + union { + volatile struct mlx4_wqe_data_seg *dseg; + volatile uint32_t *dst; + }; + uint32_t val; +}; + +/** A helper structure for TSO packet handling. */ +struct tso_info { + /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */ + struct pv *pv; + /** Current entry in the pv array. */ + int pv_counter; + /** Total size of the WQE including padding. */ + uint32_t wqe_size; + /** Size of TSO header to prepend to each packet to send. */ + uint16_t tso_header_size; + /** Total size of the TSO segment in the WQE. */ + uint16_t wqe_tso_seg_size; + /** Raw WQE size in units of 16 Bytes and without padding. */ + uint8_t fence_size; +}; + +/** A table to translate Rx completion flags to packet type. */ +uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = { + /* + * The index to the array should have: + * bit[7] - MLX4_CQE_L2_TUNNEL + * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4 + * bit[5] - MLX4_CQE_STATUS_UDP + * bit[4] - MLX4_CQE_STATUS_TCP + * bit[3] - MLX4_CQE_STATUS_IPV4OPT + * bit[2] - MLX4_CQE_STATUS_IPV6 + * bit[1] - MLX4_CQE_STATUS_IPF + * bit[0] - MLX4_CQE_STATUS_IPV4 + * giving a total of up to 256 entries. + */ + /* L2 */ + [0x00] = RTE_PTYPE_L2_ETHER, + /* L3 */ + [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x08] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_NONFRAG, + [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_NONFRAG, + [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_FRAG, + [0x0b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_FRAG, + /* TCP */ + [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [0x16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_TCP, + [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_TCP, + /* UDP */ + [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [0x26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_UDP, + [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L4_UDP, + /* Tunneled - L3 IPV6 */ + [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, + [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, + [0x8b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, + /* Tunneled - L3 IPV6, TCP */ + [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0x96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + /* Tunneled - L3 IPV6, UDP */ + [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0xa6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + /* Tunneled - L3 IPV4 */ + [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, + [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_NONFRAG, + [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, + [0xcb] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_FRAG, + /* Tunneled - L3 IPV4, TCP */ + [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [0xd6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + /* Tunneled - L3 IPV4, UDP */ + [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + [0xe6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, + [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, +}; + +/** + * Stamp TXBB burst so it won't be reused by the HW. + * + * Routine is used when freeing WQE used by the chip or when failing + * building an WQ entry has failed leaving partial information on the queue. + * + * @param sq + * Pointer to the SQ structure. + * @param start + * Pointer to the first TXBB to stamp. + * @param end + * Pointer to the followed end TXBB to stamp. + * + * @return + * Stamping burst size in byte units. + */ +static uint32_t +mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start, + volatile uint32_t *end) +{ + uint32_t stamp = sq->stamp; + int32_t size = (intptr_t)end - (intptr_t)start; + + MLX4_ASSERT(start != end); + /* Hold SQ ring wrap around. */ + if (size < 0) { + size = (int32_t)sq->size + size; + do { + *start = stamp; + start += MLX4_SQ_STAMP_DWORDS; + } while (start != (volatile uint32_t *)sq->eob); + start = (volatile uint32_t *)sq->buf; + /* Flip invalid stamping ownership. */ + stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT); + sq->stamp = stamp; + if (start == end) + return size; + } + do { + *start = stamp; + start += MLX4_SQ_STAMP_DWORDS; + } while (start != end); + return (uint32_t)size; +} + +/** + * Manage Tx completions. + * + * When sending a burst, mlx4_tx_burst() posts several WRs. + * To improve performance, a completion event is only required once every + * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information + * for other WRs, but this information would not be used anyway. + * + * @param txq + * Pointer to Tx queue structure. + * @param elts_m + * Tx elements number mask. + * @param sq + * Pointer to the SQ structure. + */ +static void +mlx4_txq_complete(struct txq *txq, const unsigned int elts_m, + struct mlx4_sq *sq) +{ + unsigned int elts_tail = txq->elts_tail; + struct mlx4_cq *cq = &txq->mcq; + volatile struct mlx4_cqe *cqe; + uint32_t completed; + uint32_t cons_index = cq->cons_index; + volatile uint32_t *first_txbb; + + /* + * Traverse over all CQ entries reported and handle each WQ entry + * reported by them. + */ + do { + cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index); + if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ + !!(cons_index & cq->cqe_cnt))) + break; +#ifdef RTE_LIBRTE_MLX4_DEBUG + /* + * Make sure we read the CQE after we read the ownership bit. + */ + rte_io_rmb(); + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + volatile struct mlx4_err_cqe *cqe_err = + (volatile struct mlx4_err_cqe *)cqe; + ERROR("%p CQE error - vendor syndrome: 0x%x" + " syndrome: 0x%x\n", + (void *)txq, cqe_err->vendor_err, + cqe_err->syndrome); + break; + } +#endif /* RTE_LIBRTE_MLX4_DEBUG */ + cons_index++; + } while (1); + completed = (cons_index - cq->cons_index) * txq->elts_comp_cd_init; + if (unlikely(!completed)) + return; + /* First stamping address is the end of the last one. */ + first_txbb = (&(*txq->elts)[elts_tail & elts_m])->eocb; + elts_tail += completed; + /* The new tail element holds the end address. */ + sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb, + (&(*txq->elts)[elts_tail & elts_m])->eocb); + /* Update CQ consumer index. */ + cq->cons_index = cons_index; + *cq->set_ci_db = rte_cpu_to_be_32(cons_index & MLX4_CQ_DB_CI_MASK); + txq->elts_tail = elts_tail; +} + +/** + * Write Tx data segment to the SQ. + * + * @param dseg + * Pointer to data segment in SQ. + * @param lkey + * Memory region lkey. + * @param addr + * Data address. + * @param byte_count + * Big endian bytes count of the data to send. + */ +static inline void +mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg, + uint32_t lkey, uintptr_t addr, rte_be32_t byte_count) +{ + dseg->addr = rte_cpu_to_be_64(addr); + dseg->lkey = lkey; +#if RTE_CACHE_LINE_SIZE < 64 + /* + * Need a barrier here before writing the byte_count + * fields to make sure that all the data is visible + * before the byte_count field is set. + * Otherwise, if the segment begins a new cacheline, + * the HCA prefetcher could grab the 64-byte chunk and + * get a valid (!= 0xffffffff) byte count but stale + * data, and end up sending the wrong data. + */ + rte_io_wmb(); +#endif /* RTE_CACHE_LINE_SIZE */ + dseg->byte_count = byte_count; +} + +/** + * Obtain and calculate TSO information needed for assembling a TSO WQE. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to a structure to fill the info with. + * + * @return + * 0 on success, negative value upon error. + */ +static inline int +mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo) +{ + struct mlx4_sq *sq = &txq->msq; + const uint8_t tunneled = txq->priv->hw_csum_l2tun && + (buf->ol_flags & PKT_TX_TUNNEL_MASK); + + tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len; + if (tunneled) + tinfo->tso_header_size += + buf->outer_l2_len + buf->outer_l3_len; + if (unlikely(buf->tso_segsz == 0 || + tinfo->tso_header_size == 0 || + tinfo->tso_header_size > MLX4_MAX_TSO_HEADER || + tinfo->tso_header_size > buf->data_len)) + return -EINVAL; + /* + * Calculate the WQE TSO segment size + * Note: + * 1. An LSO segment must be padded such that the subsequent data + * segment is 16-byte aligned. + * 2. The start address of the TSO segment is always 16 Bytes aligned. + */ + tinfo->wqe_tso_seg_size = RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg) + + tinfo->tso_header_size, + sizeof(struct mlx4_wqe_data_seg)); + tinfo->fence_size = ((sizeof(struct mlx4_wqe_ctrl_seg) + + tinfo->wqe_tso_seg_size) >> MLX4_SEG_SHIFT) + + buf->nb_segs; + tinfo->wqe_size = + RTE_ALIGN((uint32_t)(tinfo->fence_size << MLX4_SEG_SHIFT), + MLX4_TXBB_SIZE); + /* Validate WQE size and WQE space in the send queue. */ + if (sq->remain_size < tinfo->wqe_size || + tinfo->wqe_size > MLX4_MAX_WQE_SIZE) + return -ENOMEM; + /* Init pv. */ + tinfo->pv = (struct pv *)txq->bounce_buf; + tinfo->pv_counter = 0; + return 0; +} + +/** + * Fill the TSO WQE data segments with info on buffers to transmit . + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to TSO info to use. + * @param dseg + * Pointer to the first data segment in the TSO WQE. + * @param ctrl + * Pointer to the control segment in the TSO WQE. + * + * @return + * 0 on success, negative value upon error. + */ +static inline volatile struct mlx4_wqe_ctrl_seg * +mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo, + volatile struct mlx4_wqe_data_seg *dseg, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + uint32_t lkey; + int nb_segs = buf->nb_segs; + int nb_segs_txbb; + struct mlx4_sq *sq = &txq->msq; + struct rte_mbuf *sbuf = buf; + struct pv *pv = tinfo->pv; + int *pv_counter = &tinfo->pv_counter; + volatile struct mlx4_wqe_ctrl_seg *ctrl_next = + (volatile struct mlx4_wqe_ctrl_seg *) + ((volatile uint8_t *)ctrl + tinfo->wqe_size); + uint16_t data_len = sbuf->data_len - tinfo->tso_header_size; + uintptr_t data_addr = rte_pktmbuf_mtod_offset(sbuf, uintptr_t, + tinfo->tso_header_size); + + do { + /* how many dseg entries do we have in the current TXBB ? */ + nb_segs_txbb = (MLX4_TXBB_SIZE - + ((uintptr_t)dseg & (MLX4_TXBB_SIZE - 1))) >> + MLX4_SEG_SHIFT; + switch (nb_segs_txbb) { +#ifdef RTE_LIBRTE_MLX4_DEBUG + default: + /* Should never happen. */ + rte_panic("%p: Invalid number of SGEs(%d) for a TXBB", + (void *)txq, nb_segs_txbb); + /* rte_panic never returns. */ + break; +#endif /* RTE_LIBRTE_MLX4_DEBUG */ + case 4: + /* Memory region key for this memory pool. */ + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + dseg->addr = rte_cpu_to_be_64(data_addr); + dseg->lkey = lkey; + /* + * This data segment starts at the beginning of a new + * TXBB, so we need to postpone its byte_count writing + * for later. + */ + pv[*pv_counter].dseg = dseg; + /* + * Zero length segment is treated as inline segment + * with zero data. + */ + pv[(*pv_counter)++].val = + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 3: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 2: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + case 1: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) + goto err; + mlx4_fill_tx_data_seg(dseg, lkey, data_addr, + rte_cpu_to_be_32(data_len ? + data_len : + 0x80000000)); + if (--nb_segs == 0) + return ctrl_next; + /* Prepare next buf info */ + sbuf = sbuf->next; + dseg++; + data_len = sbuf->data_len; + data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + /* fallthrough */ + } + /* Wrap dseg if it points at the end of the queue. */ + if ((volatile uint8_t *)dseg >= sq->eob) + dseg = (volatile struct mlx4_wqe_data_seg *) + ((volatile uint8_t *)dseg - sq->size); + } while (true); +err: + return NULL; +} + +/** + * Fill the packet's l2, l3 and l4 headers to the WQE. + * + * This will be used as the header for each TSO segment that is transmitted. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param tinfo + * Pointer to TSO info to use. + * @param ctrl + * Pointer to the control segment in the TSO WQE. + * + * @return + * 0 on success, negative value upon error. + */ +static inline volatile struct mlx4_wqe_data_seg * +mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf *buf, + struct txq *txq, + struct tso_info *tinfo, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + volatile struct mlx4_wqe_lso_seg *tseg = + (volatile struct mlx4_wqe_lso_seg *)(ctrl + 1); + struct mlx4_sq *sq = &txq->msq; + struct pv *pv = tinfo->pv; + int *pv_counter = &tinfo->pv_counter; + int remain_size = tinfo->tso_header_size; + char *from = rte_pktmbuf_mtod(buf, char *); + uint16_t txbb_avail_space; + /* Union to overcome volatile constraints when copying TSO header. */ + union { + volatile uint8_t *vto; + uint8_t *to; + } thdr = { .vto = (volatile uint8_t *)tseg->header, }; + + /* + * TSO data always starts at offset 20 from the beginning of the TXBB + * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned + * we can write the first 44 TSO header bytes without worry for TxQ + * wrapping or overwriting the first TXBB 32bit word. + */ + txbb_avail_space = MLX4_TXBB_SIZE - + (sizeof(struct mlx4_wqe_ctrl_seg) + + sizeof(struct mlx4_wqe_lso_seg)); + while (remain_size >= (int)(txbb_avail_space + sizeof(uint32_t))) { + /* Copy to end of txbb. */ + rte_memcpy(thdr.to, from, txbb_avail_space); + from += txbb_avail_space; + thdr.to += txbb_avail_space; + /* New TXBB, Check for TxQ wrap. */ + if (thdr.to >= sq->eob) + thdr.vto = sq->buf; + /* New TXBB, stash the first 32bits for later use. */ + pv[*pv_counter].dst = (volatile uint32_t *)thdr.to; + pv[(*pv_counter)++].val = *(uint32_t *)from, + from += sizeof(uint32_t); + thdr.to += sizeof(uint32_t); + remain_size -= txbb_avail_space + sizeof(uint32_t); + /* Avail space in new TXBB is TXBB size - 4 */ + txbb_avail_space = MLX4_TXBB_SIZE - sizeof(uint32_t); + } + if (remain_size > txbb_avail_space) { + rte_memcpy(thdr.to, from, txbb_avail_space); + from += txbb_avail_space; + thdr.to += txbb_avail_space; + remain_size -= txbb_avail_space; + /* New TXBB, Check for TxQ wrap. */ + if (thdr.to >= sq->eob) + thdr.vto = sq->buf; + pv[*pv_counter].dst = (volatile uint32_t *)thdr.to; + rte_memcpy(&pv[*pv_counter].val, from, remain_size); + (*pv_counter)++; + } else if (remain_size) { + rte_memcpy(thdr.to, from, remain_size); + } + tseg->mss_hdr_size = rte_cpu_to_be_32((buf->tso_segsz << 16) | + tinfo->tso_header_size); + /* Calculate data segment location */ + return (volatile struct mlx4_wqe_data_seg *) + ((uintptr_t)tseg + tinfo->wqe_tso_seg_size); +} + +/** + * Write data segments and header for TSO uni/multi segment packet. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param ctrl + * Pointer to the WQE control segment. + * + * @return + * Pointer to the next WQE control segment on success, NULL otherwise. + */ +static volatile struct mlx4_wqe_ctrl_seg * +mlx4_tx_burst_tso(struct rte_mbuf *buf, struct txq *txq, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + volatile struct mlx4_wqe_data_seg *dseg; + volatile struct mlx4_wqe_ctrl_seg *ctrl_next; + struct mlx4_sq *sq = &txq->msq; + struct tso_info tinfo; + struct pv *pv; + int pv_counter; + int ret; + + ret = mlx4_tx_burst_tso_get_params(buf, txq, &tinfo); + if (unlikely(ret)) + goto error; + dseg = mlx4_tx_burst_fill_tso_hdr(buf, txq, &tinfo, ctrl); + if (unlikely(dseg == NULL)) + goto error; + if ((uintptr_t)dseg >= (uintptr_t)sq->eob) + dseg = (volatile struct mlx4_wqe_data_seg *) + ((uintptr_t)dseg - sq->size); + ctrl_next = mlx4_tx_burst_fill_tso_dsegs(buf, txq, &tinfo, dseg, ctrl); + if (unlikely(ctrl_next == NULL)) + goto error; + /* Write the first DWORD of each TXBB save earlier. */ + if (likely(tinfo.pv_counter)) { + pv = tinfo.pv; + pv_counter = tinfo.pv_counter; + /* Need a barrier here before writing the first TXBB word. */ + rte_io_wmb(); + do { + --pv_counter; + *pv[pv_counter].dst = pv[pv_counter].val; + } while (pv_counter > 0); + } + ctrl->fence_size = tinfo.fence_size; + sq->remain_size -= tinfo.wqe_size; + return ctrl_next; +error: + txq->stats.odropped++; + return NULL; +} + +/** + * Write data segments of multi-segment packet. + * + * @param buf + * Pointer to the first packet mbuf. + * @param txq + * Pointer to Tx queue structure. + * @param ctrl + * Pointer to the WQE control segment. + * + * @return + * Pointer to the next WQE control segment on success, NULL otherwise. + */ +static volatile struct mlx4_wqe_ctrl_seg * +mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq, + volatile struct mlx4_wqe_ctrl_seg *ctrl) +{ + struct pv *pv = (struct pv *)txq->bounce_buf; + struct mlx4_sq *sq = &txq->msq; + struct rte_mbuf *sbuf = buf; + uint32_t lkey; + int pv_counter = 0; + int nb_segs = buf->nb_segs; + uint32_t wqe_size; + volatile struct mlx4_wqe_data_seg *dseg = + (volatile struct mlx4_wqe_data_seg *)(ctrl + 1); + + ctrl->fence_size = 1 + nb_segs; + wqe_size = RTE_ALIGN((uint32_t)(ctrl->fence_size << MLX4_SEG_SHIFT), + MLX4_TXBB_SIZE); + /* Validate WQE size and WQE space in the send queue. */ + if (sq->remain_size < wqe_size || + wqe_size > MLX4_MAX_WQE_SIZE) + return NULL; + /* + * Fill the data segments with buffer information. + * First WQE TXBB head segment is always control segment, + * so jump to tail TXBB data segments code for the first + * WQE data segments filling. + */ + goto txbb_tail_segs; +txbb_head_seg: + /* Memory region key (big endian) for this memory pool. */ + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) { + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + return NULL; + } + /* Handle WQE wraparound. */ + if (dseg >= + (volatile struct mlx4_wqe_data_seg *)sq->eob) + dseg = (volatile struct mlx4_wqe_data_seg *) + sq->buf; + dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t)); + dseg->lkey = lkey; + /* + * This data segment starts at the beginning of a new + * TXBB, so we need to postpone its byte_count writing + * for later. + */ + pv[pv_counter].dseg = dseg; + /* + * Zero length segment is treated as inline segment + * with zero data. + */ + pv[pv_counter++].val = rte_cpu_to_be_32(sbuf->data_len ? + sbuf->data_len : 0x80000000); + sbuf = sbuf->next; + dseg++; + nb_segs--; +txbb_tail_segs: + /* Jump to default if there are more than two segments remaining. */ + switch (nb_segs) { + default: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) { + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + return NULL; + } + mlx4_fill_tx_data_seg(dseg, lkey, + rte_pktmbuf_mtod(sbuf, uintptr_t), + rte_cpu_to_be_32(sbuf->data_len ? + sbuf->data_len : + 0x80000000)); + sbuf = sbuf->next; + dseg++; + nb_segs--; + /* fallthrough */ + case 2: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) { + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + return NULL; + } + mlx4_fill_tx_data_seg(dseg, lkey, + rte_pktmbuf_mtod(sbuf, uintptr_t), + rte_cpu_to_be_32(sbuf->data_len ? + sbuf->data_len : + 0x80000000)); + sbuf = sbuf->next; + dseg++; + nb_segs--; + /* fallthrough */ + case 1: + lkey = mlx4_tx_mb2mr(txq, sbuf); + if (unlikely(lkey == (uint32_t)-1)) { + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + return NULL; + } + mlx4_fill_tx_data_seg(dseg, lkey, + rte_pktmbuf_mtod(sbuf, uintptr_t), + rte_cpu_to_be_32(sbuf->data_len ? + sbuf->data_len : + 0x80000000)); + nb_segs--; + if (nb_segs) { + sbuf = sbuf->next; + dseg++; + goto txbb_head_seg; + } + /* fallthrough */ + case 0: + break; + } + /* Write the first DWORD of each TXBB save earlier. */ + if (pv_counter) { + /* Need a barrier here before writing the byte_count. */ + rte_io_wmb(); + for (--pv_counter; pv_counter >= 0; pv_counter--) + pv[pv_counter].dseg->byte_count = pv[pv_counter].val; + } + sq->remain_size -= wqe_size; + /* Align next WQE address to the next TXBB. */ + return (volatile struct mlx4_wqe_ctrl_seg *) + ((volatile uint8_t *)ctrl + wqe_size); +} + +/** + * DPDK callback for Tx. + * + * @param dpdk_txq + * Generic pointer to Tx queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct txq *txq = (struct txq *)dpdk_txq; + unsigned int elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + const unsigned int elts_m = elts_n - 1; + unsigned int bytes_sent = 0; + unsigned int i; + unsigned int max = elts_head - txq->elts_tail; + struct mlx4_sq *sq = &txq->msq; + volatile struct mlx4_wqe_ctrl_seg *ctrl; + struct txq_elt *elt; + + MLX4_ASSERT(txq->elts_comp_cd != 0); + if (likely(max >= txq->elts_comp_cd_init)) + mlx4_txq_complete(txq, elts_m, sq); + max = elts_n - max; + MLX4_ASSERT(max >= 1); + MLX4_ASSERT(max <= elts_n); + /* Always leave one free entry in the ring. */ + --max; + if (max > pkts_n) + max = pkts_n; + elt = &(*txq->elts)[elts_head & elts_m]; + /* First Tx burst element saves the next WQE control segment. */ + ctrl = elt->wqe; + for (i = 0; (i != max); ++i) { + struct rte_mbuf *buf = pkts[i]; + struct txq_elt *elt_next = &(*txq->elts)[++elts_head & elts_m]; + uint32_t owner_opcode = sq->owner_opcode; + volatile struct mlx4_wqe_data_seg *dseg = + (volatile struct mlx4_wqe_data_seg *)(ctrl + 1); + volatile struct mlx4_wqe_ctrl_seg *ctrl_next; + union { + uint32_t flags; + uint16_t flags16[2]; + } srcrb; + uint32_t lkey; + bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG); + + /* Clean up old buffer. */ + if (likely(elt->buf != NULL)) { + struct rte_mbuf *tmp = elt->buf; + +#ifdef RTE_LIBRTE_MLX4_DEBUG + /* Poisoning. */ + memset(&elt->buf, 0x66, sizeof(struct rte_mbuf *)); +#endif + /* Faster than rte_pktmbuf_free(). */ + do { + struct rte_mbuf *next = tmp->next; + + rte_pktmbuf_free_seg(tmp); + tmp = next; + } while (tmp != NULL); + } + RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + if (tso) { + /* Change opcode to TSO */ + owner_opcode &= ~MLX4_OPCODE_CONFIG_CMD; + owner_opcode |= MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR; + ctrl_next = mlx4_tx_burst_tso(buf, txq, ctrl); + if (!ctrl_next) { + elt->buf = NULL; + break; + } + } else if (buf->nb_segs == 1) { + /* Validate WQE space in the send queue. */ + if (sq->remain_size < MLX4_TXBB_SIZE) { + elt->buf = NULL; + break; + } + lkey = mlx4_tx_mb2mr(txq, buf); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + elt->buf = NULL; + break; + } + mlx4_fill_tx_data_seg(dseg++, lkey, + rte_pktmbuf_mtod(buf, uintptr_t), + rte_cpu_to_be_32(buf->data_len)); + /* Set WQE size in 16-byte units. */ + ctrl->fence_size = 0x2; + sq->remain_size -= MLX4_TXBB_SIZE; + /* Align next WQE address to the next TXBB. */ + ctrl_next = ctrl + 0x4; + } else { + ctrl_next = mlx4_tx_burst_segs(buf, txq, ctrl); + if (!ctrl_next) { + elt->buf = NULL; + break; + } + } + /* Hold SQ ring wrap around. */ + if ((volatile uint8_t *)ctrl_next >= sq->eob) { + ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *) + ((volatile uint8_t *)ctrl_next - sq->size); + /* Flip HW valid ownership. */ + sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT; + } + /* + * For raw Ethernet, the SOLICIT flag is used to indicate + * that no ICRC should be calculated. + */ + if (--txq->elts_comp_cd == 0) { + /* Save the completion burst end address. */ + elt_next->eocb = (volatile uint32_t *)ctrl_next; + txq->elts_comp_cd = txq->elts_comp_cd_init; + srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT | + MLX4_WQE_CTRL_CQ_UPDATE); + } else { + srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT); + } + /* Enable HW checksum offload if requested */ + if (txq->csum && + (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) { + const uint64_t is_tunneled = (buf->ol_flags & + (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN)); + + if (is_tunneled && txq->csum_l2tun) { + owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM | + MLX4_WQE_CTRL_IL4_HDR_CSUM; + if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) + srcrb.flags |= + RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM); + } else { + srcrb.flags |= + RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + } + } + if (txq->lb) { + /* + * Copy destination MAC address to the WQE, this allows + * loopback in eSwitch, so that VFs and PF can + * communicate with each other. + */ + srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *)); + ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *, + sizeof(uint16_t))); + } else { + ctrl->imm = 0; + } + ctrl->srcrb_flags = srcrb.flags; + /* + * Make sure descriptor is fully written before + * setting ownership bit (because HW can start + * executing as soon as we do). + */ + rte_io_wmb(); + ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode); + elt->buf = buf; + bytes_sent += buf->pkt_len; + ctrl = ctrl_next; + elt = elt_next; + } + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; + /* Save WQE address of the next Tx burst element. */ + elt->wqe = ctrl; + /* Increment send statistics counters. */ + txq->stats.opackets += i; + txq->stats.obytes += bytes_sent; + /* Make sure that descriptors are written before doorbell record. */ + rte_wmb(); + /* Ring QP doorbell. */ + rte_write32(txq->msq.doorbell_qpn, MLX4_TX_BFREG(txq)); + txq->elts_head += i; + return i; +} + +/** + * Translate Rx completion flags to packet type. + * + * @param[in] cqe + * Pointer to CQE. + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe, + uint32_t l2tun_offload) +{ + uint8_t idx = 0; + uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn); + uint32_t status = rte_be_to_cpu_32(cqe->status); + + /* + * The index to the array should have: + * bit[7] - MLX4_CQE_L2_TUNNEL + * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4 + */ + if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL)) + idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) | + ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19); + /* + * The index to the array should have: + * bit[5] - MLX4_CQE_STATUS_UDP + * bit[4] - MLX4_CQE_STATUS_TCP + * bit[3] - MLX4_CQE_STATUS_IPV4OPT + * bit[2] - MLX4_CQE_STATUS_IPV6 + * bit[1] - MLX4_CQE_STATUS_IPF + * bit[0] - MLX4_CQE_STATUS_IPV4 + * giving a total of up to 256 entries. + */ + idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22); + if (status & MLX4_CQE_STATUS_IPV6) + idx |= ((status & MLX4_CQE_STATUS_IPV6F) >> 11); + return mlx4_ptype_table[idx]; +} + +/** + * Translate Rx completion flags to offload flags. + * + * @param flags + * Rx completion flags returned by mlx4_cqe_flags(). + * @param csum + * Whether Rx checksums are enabled. + * @param csum_l2tun + * Whether Rx L2 tunnel checksums are enabled. + * + * @return + * Offload flags (ol_flags) in mbuf format. + */ +static inline uint32_t +rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun) +{ + uint32_t ol_flags = 0; + + if (csum) + ol_flags |= + mlx4_transpose(flags, + MLX4_CQE_STATUS_IP_HDR_CSUM_OK, + PKT_RX_IP_CKSUM_GOOD) | + mlx4_transpose(flags, + MLX4_CQE_STATUS_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_GOOD); + if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun) + ol_flags |= + mlx4_transpose(flags, + MLX4_CQE_L2_TUNNEL_IPOK, + PKT_RX_IP_CKSUM_GOOD) | + mlx4_transpose(flags, + MLX4_CQE_L2_TUNNEL_L4_CSUM, + PKT_RX_L4_CKSUM_GOOD); + return ol_flags; +} + +/** + * Extract checksum information from CQE flags. + * + * @param cqe + * Pointer to CQE structure. + * @param csum + * Whether Rx checksums are enabled. + * @param csum_l2tun + * Whether Rx L2 tunnel checksums are enabled. + * + * @return + * CQE checksum information. + */ +static inline uint32_t +mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun) +{ + uint32_t flags = 0; + + /* + * The relevant bits are in different locations on their + * CQE fields therefore we can join them in one 32bit + * variable. + */ + if (csum) + flags = (rte_be_to_cpu_32(cqe->status) & + MLX4_CQE_STATUS_IPV4_CSUM_OK); + if (csum_l2tun) + flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) & + (MLX4_CQE_L2_TUNNEL | + MLX4_CQE_L2_TUNNEL_IPOK | + MLX4_CQE_L2_TUNNEL_L4_CSUM | + MLX4_CQE_L2_TUNNEL_IPV4)); + return flags; +} + +/** + * Poll one CQE from CQ. + * + * @param rxq + * Pointer to the receive queue structure. + * @param[out] out + * Just polled CQE. + * + * @return + * Number of bytes of the CQE, 0 in case there is no completion. + */ +static unsigned int +mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out) +{ + int ret = 0; + volatile struct mlx4_cqe *cqe = NULL; + struct mlx4_cq *cq = &rxq->mcq; + + cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index); + if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ + !!(cq->cons_index & cq->cqe_cnt)) + goto out; + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rte_rmb(); + MLX4_ASSERT(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)); + MLX4_ASSERT((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != + MLX4_CQE_OPCODE_ERROR); + ret = rte_be_to_cpu_32(cqe->byte_cnt); + ++cq->cons_index; +out: + *out = cqe; + return ret; +} + +/** + * DPDK callback for Rx with scattered packets support. + * + * @param dpdk_rxq + * Generic pointer to Rx queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct rxq *rxq = dpdk_rxq; + const uint32_t wr_cnt = (1 << rxq->elts_n) - 1; + const uint16_t sges_n = rxq->sges_n; + struct rte_mbuf *pkt = NULL; + struct rte_mbuf *seg = NULL; + unsigned int i = 0; + uint32_t rq_ci = rxq->rq_ci << sges_n; + int len = 0; + + while (pkts_n) { + volatile struct mlx4_cqe *cqe; + uint32_t idx = rq_ci & wr_cnt; + struct rte_mbuf *rep = (*rxq->elts)[idx]; + volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx]; + + /* Update the 'next' pointer of the previous segment. */ + if (pkt) + seg->next = rep; + seg = rep; + rte_prefetch0(seg); + rte_prefetch0(scat); + rep = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + ++rxq->stats.rx_nombuf; + if (!pkt) { + /* + * No buffers before we even started, + * bail out silently. + */ + break; + } + while (pkt != seg) { + MLX4_ASSERT(pkt != (*rxq->elts)[idx]); + rep = pkt->next; + pkt->next = NULL; + pkt->nb_segs = 1; + rte_mbuf_raw_free(pkt); + pkt = rep; + } + break; + } + if (!pkt) { + /* Looking for the new packet. */ + len = mlx4_cq_poll_one(rxq, &cqe); + if (!len) { + rte_mbuf_raw_free(rep); + break; + } + if (unlikely(len < 0)) { + /* Rx error, packet is likely too large. */ + rte_mbuf_raw_free(rep); + ++rxq->stats.idropped; + goto skip; + } + pkt = seg; + MLX4_ASSERT(len >= (rxq->crc_present << 2)); + /* Update packet information. */ + pkt->packet_type = + rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload); + pkt->ol_flags = PKT_RX_RSS_HASH; + pkt->hash.rss = cqe->immed_rss_invalid; + if (rxq->crc_present) + len -= RTE_ETHER_CRC_LEN; + pkt->pkt_len = len; + if (rxq->csum | rxq->csum_l2tun) { + uint32_t flags = + mlx4_cqe_flags(cqe, + rxq->csum, + rxq->csum_l2tun); + + pkt->ol_flags = + rxq_cq_to_ol_flags(flags, + rxq->csum, + rxq->csum_l2tun); + } + } + rep->nb_segs = 1; + rep->port = rxq->port_id; + rep->data_len = seg->data_len; + rep->data_off = seg->data_off; + (*rxq->elts)[idx] = rep; + /* + * Fill NIC descriptor with the new buffer. The lkey and size + * of the buffers are already known, only the buffer address + * changes. + */ + scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + scat->lkey = mlx4_rx_mb2mr(rxq, rep); + if (len > seg->data_len) { + len -= seg->data_len; + ++pkt->nb_segs; + ++rq_ci; + continue; + } + /* The last segment. */ + seg->data_len = len; + /* Increment bytes counter. */ + rxq->stats.ibytes += pkt->pkt_len; + /* Return packet. */ + *(pkts++) = pkt; + pkt = NULL; + --pkts_n; + ++i; +skip: + /* Align consumer index to the next stride. */ + rq_ci >>= sges_n; + ++rq_ci; + rq_ci <<= sges_n; + } + if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci)) + return 0; + /* Update the consumer index. */ + rxq->rq_ci = rq_ci >> sges_n; + rte_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + *rxq->mcq.set_ci_db = + rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK); + /* Increment packets counter. */ + rxq->stats.ipackets += i; + return i; +} + +/** + * Dummy DPDK callback for Tx. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to Tx queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_txq; + (void)pkts; + (void)pkts_n; + rte_mb(); + return 0; +} + +/** + * Dummy DPDK callback for Rx. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_rxq + * Generic pointer to Rx queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_rxq; + (void)pkts; + (void)pkts_n; + rte_mb(); + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h new file mode 100644 index 000000000..9de6c5941 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_rxtx.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef MLX4_RXTX_H_ +#define MLX4_RXTX_H_ + +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_prm.h" +#include "mlx4_mr.h" + +/** Rx queue counters. */ +struct mlx4_rxq_stats { + unsigned int idx; /**< Mapping index. */ + uint64_t ipackets; /**< Total of successfully received packets. */ + uint64_t ibytes; /**< Total of successfully received bytes. */ + uint64_t idropped; /**< Total of packets dropped when Rx ring full. */ + uint64_t rx_nombuf; /**< Total of Rx mbuf allocation failures. */ +}; + +/** Rx queue descriptor. */ +struct rxq { + struct mlx4_priv *priv; /**< Back pointer to private data. */ + struct rte_mempool *mp; /**< Memory pool for allocations. */ + struct ibv_cq *cq; /**< Completion queue. */ + struct ibv_wq *wq; /**< Work queue. */ + struct ibv_comp_channel *channel; /**< Rx completion channel. */ + uint16_t rq_ci; /**< Saved RQ consumer index. */ + uint16_t port_id; /**< Port ID for incoming packets. */ + uint16_t sges_n; /**< Number of segments per packet (log2 value). */ + uint16_t elts_n; /**< Mbuf queue size (log2 value). */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ + struct rte_mbuf *(*elts)[]; /**< Rx elements. */ + volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */ + volatile uint32_t *rq_db; /**< RQ doorbell record. */ + uint32_t csum:1; /**< Enable checksum offloading. */ + uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t crc_present:1; /**< CRC must be subtracted. */ + uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */ + struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ + struct mlx4_rxq_stats stats; /**< Rx queue counters. */ + unsigned int socket; /**< CPU socket ID for allocations. */ + uint32_t usecnt; /**< Number of users relying on queue resources. */ + uint8_t data[]; /**< Remaining queue resources. */ +}; + +/** Shared flow target for Rx queues. */ +struct mlx4_rss { + LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */ + struct mlx4_priv *priv; /**< Back pointer to private data. */ + uint32_t refcnt; /**< Reference count for this object. */ + uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */ + struct ibv_qp *qp; /**< Queue pair. */ + struct ibv_rwq_ind_table *ind; /**< Indirection table. */ + uint64_t fields; /**< Fields for RSS processing (Verbs format). */ + uint8_t key[MLX4_RSS_HASH_KEY_SIZE]; /**< Hash key to use. */ + uint16_t queues; /**< Number of target queues. */ + uint16_t queue_id[]; /**< Target queues. */ +}; + +/** Tx element. */ +struct txq_elt { + struct rte_mbuf *buf; /**< Buffer. */ + union { + volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */ + volatile uint32_t *eocb; /**< End of completion burst. */ + }; +}; + +/** Tx queue counters. */ +struct mlx4_txq_stats { + unsigned int idx; /**< Mapping index. */ + uint64_t opackets; /**< Total of successfully sent packets. */ + uint64_t obytes; /**< Total of successfully sent bytes. */ + uint64_t odropped; /**< Total number of packets failed to transmit. */ +}; + +/** Tx queue descriptor. */ +struct txq { + struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */ + struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ + uint16_t port_id; /**< Port ID of device. */ + unsigned int elts_head; /**< Current index in (*elts)[]. */ + unsigned int elts_tail; /**< First element awaiting completion. */ + int elts_comp_cd; /**< Countdown for next completion. */ + unsigned int elts_comp_cd_init; /**< Initial value for countdown. */ + unsigned int elts_n; /**< (*elts)[] length. */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ + struct txq_elt (*elts)[]; /**< Tx elements. */ + struct mlx4_txq_stats stats; /**< Tx queue counters. */ + uint32_t max_inline; /**< Max inline send size. */ + uint32_t csum:1; /**< Enable checksum offloading. */ + uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */ + uint8_t *bounce_buf; + /**< Memory used for storing the first DWORD of data TXBBs. */ + struct mlx4_priv *priv; /**< Back pointer to private data. */ + unsigned int socket; /**< CPU socket ID for allocations. */ + struct ibv_cq *cq; /**< Completion queue. */ + struct ibv_qp *qp; /**< Queue pair. */ + uint8_t data[]; /**< Remaining queue resources. */ +}; + +#define MLX4_TX_BFREG(txq) \ + (MLX4_PROC_PRIV((txq)->port_id)->uar_table[(txq)->stats.idx]) + +/* mlx4_rxq.c */ + +extern uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE]; +int mlx4_rss_init(struct mlx4_priv *priv); +void mlx4_rss_deinit(struct mlx4_priv *priv); +struct mlx4_rss *mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields, + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + uint16_t queues, const uint16_t queue_id[]); +void mlx4_rss_put(struct mlx4_rss *rss); +int mlx4_rss_attach(struct mlx4_rss *rss); +void mlx4_rss_detach(struct mlx4_rss *rss); +int mlx4_rxq_attach(struct rxq *rxq); +void mlx4_rxq_detach(struct rxq *rxq); +uint64_t mlx4_get_rx_port_offloads(struct mlx4_priv *priv); +uint64_t mlx4_get_rx_queue_offloads(struct mlx4_priv *priv); +int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +void mlx4_rx_queue_release(void *dpdk_rxq); + +/* mlx4_rxtx.c */ + +uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); + +/* mlx4_txq.c */ + +int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); +uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv); +int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +void mlx4_tx_queue_release(void *dpdk_txq); + +/* mlx4_mr.c */ + +void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl); +uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr); +uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb); +uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, + struct rte_mempool *mp); + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the + * cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static inline struct rte_mempool * +mlx4_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_CLONED(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx4_rx_addr2mr_bh(rxq, addr); +} + +#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + +/** + * Query LKey from a packet buffer for Tx. If not found, add the mempool. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb) +{ + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx4_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half on miss. */ + return mlx4_tx_mb2mr_bh(txq, mb); +} + +#endif /* MLX4_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c new file mode 100644 index 000000000..37b84413f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_txq.c @@ -0,0 +1,526 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Tx queues configuration for mlx4 driver. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "mlx4_glue.h" +#include "mlx4_prm.h" +#include "mlx4_rxtx.h" +#include "mlx4_utils.h" + +/** + * Initialize Tx UAR registers for primary process. + * + * @param txq + * Pointer to Tx queue structure. + */ +static void +txq_uar_init(struct txq *txq) +{ + struct mlx4_priv *priv = txq->priv; + struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv)); + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX4_ASSERT(ppriv); + ppriv->uar_table[txq->stats.idx] = txq->msq.db; +} + +#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET +/** + * Remap UAR register of a Tx queue for secondary process. + * + * Remapped address is stored at the table in the process private structure of + * the device, indexed by queue index. + * + * @param txq + * Pointer to Tx queue structure. + * @param fd + * Verbs file descriptor to map UAR pages. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +txq_uar_init_secondary(struct txq *txq, int fd) +{ + struct mlx4_priv *priv = txq->priv; + struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv)); + void *addr; + uintptr_t uar_va; + uintptr_t offset; + const size_t page_size = sysconf(_SC_PAGESIZE); + + MLX4_ASSERT(ppriv); + /* + * As rdma-core, UARs are mapped in size of OS page + * size. Ref to libmlx4 function: mlx4_init_context() + */ + uar_va = (uintptr_t)txq->msq.db; + offset = uar_va & (page_size - 1); /* Offset in page. */ + addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, + txq->msq.uar_mmap_offset); + if (addr == MAP_FAILED) { + ERROR("port %u mmap failed for BF reg of txq %u", + txq->port_id, txq->stats.idx); + rte_errno = ENXIO; + return -rte_errno; + } + addr = RTE_PTR_ADD(addr, offset); + ppriv->uar_table[txq->stats.idx] = addr; + return 0; +} + +/** + * Unmap UAR register of a Tx queue for secondary process. + * + * @param txq + * Pointer to Tx queue structure. + */ +static void +txq_uar_uninit_secondary(struct txq *txq) +{ + struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(txq->priv)); + const size_t page_size = sysconf(_SC_PAGESIZE); + void *addr; + + addr = ppriv->uar_table[txq->stats.idx]; + munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); +} + +/** + * Initialize Tx UAR registers for secondary process. + * + * @param dev + * Pointer to Ethernet device. + * @param fd + * Verbs file descriptor to map UAR pages. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) +{ + const unsigned int txqs_n = dev->data->nb_tx_queues; + struct txq *txq; + unsigned int i; + int ret; + + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + for (i = 0; i != txqs_n; ++i) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + MLX4_ASSERT(txq->stats.idx == (uint16_t)i); + ret = txq_uar_init_secondary(txq, fd); + if (ret) + goto error; + } + return 0; +error: + /* Rollback. */ + do { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + txq_uar_uninit_secondary(txq); + } while (i--); + return -rte_errno; +} +#else +int +mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused, + int fd __rte_unused) +{ + MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + ERROR("UAR remap is not supported"); + rte_errno = ENOTSUP; + return -rte_errno; +} +#endif + +/** + * Free Tx queue elements. + * + * @param txq + * Pointer to Tx queue structure. + */ +static void +mlx4_txq_free_elts(struct txq *txq) +{ + unsigned int elts_head = txq->elts_head; + unsigned int elts_tail = txq->elts_tail; + struct txq_elt (*elts)[txq->elts_n] = txq->elts; + unsigned int elts_m = txq->elts_n - 1; + + DEBUG("%p: freeing WRs", (void *)txq); + while (elts_tail != elts_head) { + struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m]; + + MLX4_ASSERT(elt->buf != NULL); + rte_pktmbuf_free(elt->buf); + elt->buf = NULL; + elt->wqe = NULL; + } + txq->elts_tail = txq->elts_head; +} + +/** + * Retrieves information needed in order to directly access the Tx queue. + * + * @param txq + * Pointer to Tx queue structure. + * @param mlxdv + * Pointer to device information for this Tx queue. + */ +static void +mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv) +{ + struct mlx4_sq *sq = &txq->msq; + struct mlx4_cq *cq = &txq->mcq; + struct mlx4dv_qp *dqp = mlxdv->qp.out; + struct mlx4dv_cq *dcq = mlxdv->cq.out; + + /* Total length, including headroom and spare WQEs. */ + sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset; + sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset; + sq->eob = sq->buf + sq->size; + uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift); + /* Continuous headroom size bytes must always stay freed. */ + sq->remain_size = sq->size - headroom_size; + sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT); + sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL | + (0u << MLX4_SQ_OWNER_BIT)); +#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET + sq->uar_mmap_offset = dqp->uar_mmap_offset; +#else + sq->uar_mmap_offset = -1; /* Make mmap() fail. */ +#endif + sq->db = dqp->sdb; + sq->doorbell_qpn = dqp->doorbell_qpn; + cq->buf = dcq->buf.buf; + cq->cqe_cnt = dcq->cqe_cnt; + cq->set_ci_db = dcq->set_ci_db; + cq->cqe_64 = (dcq->cqe_size & 64) ? 1 : 0; +} + +/** + * Returns the per-port supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx4_get_tx_port_offloads(struct mlx4_priv *priv) +{ + uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS; + + if (priv->hw_csum) { + offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + } + if (priv->tso) + offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv->hw_csum_l2tun) { + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (priv->tso) + offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); + } + return offloads; +} + +/** + * DPDK callback to configure a Tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Tx queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4dv_obj mlxdv; + struct mlx4dv_qp dv_qp; + struct mlx4dv_cq dv_cq; + struct txq_elt (*elts)[rte_align32pow2(desc)]; + struct ibv_qp_init_attr qp_init_attr; + struct txq *txq; + uint8_t *bounce_buf; + struct mlx4_malloc_vec vec[] = { + { + .align = RTE_CACHE_LINE_SIZE, + .size = sizeof(*txq), + .addr = (void **)&txq, + }, + { + .align = RTE_CACHE_LINE_SIZE, + .size = sizeof(*elts), + .addr = (void **)&elts, + }, + { + .align = RTE_CACHE_LINE_SIZE, + .size = MLX4_MAX_WQE_SIZE, + .addr = (void **)&bounce_buf, + }, + }; + int ret; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, dev->data->nb_tx_queues); + return -rte_errno; + } + txq = dev->data->tx_queues[idx]; + if (txq) { + rte_errno = EEXIST; + DEBUG("%p: Tx queue %u already configured, release it first", + (void *)dev, idx); + return -rte_errno; + } + if (!desc) { + rte_errno = EINVAL; + ERROR("%p: invalid number of Tx descriptors", (void *)dev); + return -rte_errno; + } + if (desc != RTE_DIM(*elts)) { + desc = RTE_DIM(*elts); + WARN("%p: increased number of descriptors in Tx queue %u" + " to the next power of two (%u)", + (void *)dev, idx, desc); + } + /* Allocate and initialize Tx queue. */ + mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket); + if (!txq) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + return -rte_errno; + } + *txq = (struct txq){ + .priv = priv, + .port_id = dev->data->port_id, + .stats = { + .idx = idx, + }, + .socket = socket, + .elts_n = desc, + .elts = elts, + .elts_head = 0, + .elts_tail = 0, + /* + * Request send completion every MLX4_PMD_TX_PER_COMP_REQ + * packets or at least 4 times per ring. + */ + .elts_comp_cd = + RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), + .elts_comp_cd_init = + RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), + .csum = priv->hw_csum && + (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM)), + .csum_l2tun = priv->hw_csum_l2tun && + (offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM), + /* Enable Tx loopback for VF devices. */ + .lb = !!priv->vf, + .bounce_buf = bounce_buf, + }; + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE; + priv->verbs_alloc_ctx.obj = txq; + txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0); + if (!txq->cq) { + rte_errno = ENOMEM; + ERROR("%p: CQ creation failure: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + qp_init_attr = (struct ibv_qp_init_attr){ + .send_cq = txq->cq, + .recv_cq = txq->cq, + .cap = { + .max_send_wr = + RTE_MIN(priv->device_attr.max_qp_wr, desc), + .max_send_sge = 1, + .max_inline_data = MLX4_PMD_MAX_INLINE, + }, + .qp_type = IBV_QPT_RAW_PACKET, + /* No completion events must occur by default. */ + .sq_sig_all = 0, + }; + txq->qp = mlx4_glue->create_qp(priv->pd, &qp_init_attr); + if (!txq->qp) { + rte_errno = errno ? errno : EINVAL; + ERROR("%p: QP creation failure: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + txq->max_inline = qp_init_attr.cap.max_inline_data; + ret = mlx4_glue->modify_qp + (txq->qp, + &(struct ibv_qp_attr){ + .qp_state = IBV_QPS_INIT, + .port_num = priv->port, + }, + IBV_QP_STATE | IBV_QP_PORT); + if (ret) { + rte_errno = ret; + ERROR("%p: QP state to IBV_QPS_INIT failed: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + ret = mlx4_glue->modify_qp + (txq->qp, + &(struct ibv_qp_attr){ + .qp_state = IBV_QPS_RTR, + }, + IBV_QP_STATE); + if (ret) { + rte_errno = ret; + ERROR("%p: QP state to IBV_QPS_RTR failed: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + ret = mlx4_glue->modify_qp + (txq->qp, + &(struct ibv_qp_attr){ + .qp_state = IBV_QPS_RTS, + }, + IBV_QP_STATE); + if (ret) { + rte_errno = ret; + ERROR("%p: QP state to IBV_QPS_RTS failed: %s", + (void *)dev, strerror(rte_errno)); + goto error; + } + /* Retrieve device queue information. */ +#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET + dv_qp = (struct mlx4dv_qp){ + .comp_mask = MLX4DV_QP_MASK_UAR_MMAP_OFFSET, + }; +#endif + mlxdv.cq.in = txq->cq; + mlxdv.cq.out = &dv_cq; + mlxdv.qp.in = txq->qp; + mlxdv.qp.out = &dv_qp; + ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ); + if (ret) { + rte_errno = EINVAL; + ERROR("%p: failed to obtain information needed for" + " accessing the device queues", (void *)dev); + goto error; + } +#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET + if (!(dv_qp.comp_mask & MLX4DV_QP_MASK_UAR_MMAP_OFFSET)) { + WARN("%p: failed to obtain UAR mmap offset", (void *)dev); + dv_qp.uar_mmap_offset = -1; /* Make mmap() fail. */ + } +#endif + mlx4_txq_fill_dv_obj_info(txq, &mlxdv); + txq_uar_init(txq); + /* Save first wqe pointer in the first element. */ + (&(*txq->elts)[0])->wqe = + (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf; + if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; + DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq); + dev->data->tx_queues[idx] = txq; + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE; + return 0; +error: + dev->data->tx_queues[idx] = NULL; + ret = rte_errno; + mlx4_tx_queue_release(txq); + rte_errno = ret; + MLX4_ASSERT(rte_errno > 0); + priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE; + return -rte_errno; +} + +/** + * DPDK callback to release a Tx queue. + * + * @param dpdk_txq + * Generic Tx queue pointer. + */ +void +mlx4_tx_queue_release(void *dpdk_txq) +{ + struct txq *txq = (struct txq *)dpdk_txq; + struct mlx4_priv *priv; + unsigned int i; + + if (txq == NULL) + return; + priv = txq->priv; + for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i) + if (ETH_DEV(priv)->data->tx_queues[i] == txq) { + DEBUG("%p: removing Tx queue %p from list", + (void *)ETH_DEV(priv), (void *)txq); + ETH_DEV(priv)->data->tx_queues[i] = NULL; + break; + } + mlx4_txq_free_elts(txq); + if (txq->qp) + claim_zero(mlx4_glue->destroy_qp(txq->qp)); + if (txq->cq) + claim_zero(mlx4_glue->destroy_cq(txq->cq)); + mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh); + rte_free(txq); +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c new file mode 100644 index 000000000..614dc197d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.c @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +/** + * @file + * Utility functions used by the mlx4 driver. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx4_utils.h" + +/** + * Make a file descriptor non-blocking. + * + * @param fd + * File descriptor to alter. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. + */ +int +mlx4_fd_set_non_blocking(int fd) +{ + int ret = fcntl(fd, F_GETFL); + + if (ret != -1 && !fcntl(fd, F_SETFL, ret | O_NONBLOCK)) + return 0; + MLX4_ASSERT(errno); + rte_errno = errno; + return -rte_errno; +} + +/** + * Internal helper to allocate memory once for several disparate objects. + * + * The most restrictive alignment constraint for standard objects is assumed + * to be sizeof(double) and is used as a default value. + * + * C11 code would include stdalign.h and use alignof(max_align_t) however + * we'll stick with C99 for the time being. + */ +static inline size_t +mlx4_mallocv_inline(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt, int zero, int socket) +{ + unsigned int i; + size_t size; + size_t least; + uint8_t *data = NULL; + int fill = !vec[0].addr; + +fill: + size = 0; + least = 0; + for (i = 0; i < cnt; ++i) { + size_t align = (uintptr_t)vec[i].align; + + if (!align) { + align = sizeof(double); + } else if (!rte_is_power_of_2(align)) { + rte_errno = EINVAL; + goto error; + } + if (least < align) + least = align; + align = RTE_ALIGN_CEIL(size, align); + size = align + vec[i].size; + if (fill && vec[i].addr) + *vec[i].addr = data + align; + } + if (fill) + return size; + if (!zero) + data = rte_malloc_socket(type, size, least, socket); + else + data = rte_zmalloc_socket(type, size, least, socket); + if (data) { + fill = 1; + goto fill; + } + rte_errno = ENOMEM; +error: + for (i = 0; i != cnt; ++i) + if (vec[i].addr) + *vec[i].addr = NULL; + return 0; +} + +/** + * Allocate memory once for several disparate objects. + * + * This function adds iovec-like semantics (e.g. readv()) to rte_malloc(). + * Memory is allocated once for several contiguous objects of nonuniform + * sizes and alignment constraints. + * + * Each entry of @p vec describes the size, alignment constraint and + * provides a buffer address where the resulting object pointer must be + * stored. + * + * The buffer of the first entry is guaranteed to point to the beginning of + * the allocated region and is safe to use with rte_free(). + * + * NULL buffers are silently ignored. + * + * Providing a NULL buffer in the first entry prevents this function from + * allocating any memory but has otherwise no effect on its behavior. In + * this case, the contents of remaining non-NULL buffers are updated with + * addresses relative to zero (i.e. offsets that would have been used during + * the allocation). + * + * @param[in] type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param[in, out] vec + * Description of objects to allocate memory for. + * @param cnt + * Number of entries in @p vec. + * + * @return + * Size in bytes of the allocated region including any padding. In case of + * error, rte_errno is set, 0 is returned and NULL is stored in the + * non-NULL buffers pointed by @p vec. + * + * @see struct mlx4_malloc_vec + * @see rte_malloc() + */ +size_t +mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt) +{ + return mlx4_mallocv_inline(type, vec, cnt, 0, SOCKET_ID_ANY); +} + +/** + * Combines the semantics of mlx4_mallocv() with those of rte_zmalloc(). + * + * @see mlx4_mallocv() + * @see rte_zmalloc() + */ +size_t +mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt) +{ + return mlx4_mallocv_inline(type, vec, cnt, 1, SOCKET_ID_ANY); +} + +/** + * Socket-aware version of mlx4_mallocv(). + * + * This function takes one additional parameter. + * + * @param socket + * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this + * function will behave the same as mlx4_mallocv(). + * + * @see mlx4_mallocv() + * @see rte_malloc_socket() + */ +size_t +mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt, int socket) +{ + return mlx4_mallocv_inline(type, vec, cnt, 0, socket); +} + +/** + * Combines the semantics of mlx4_mallocv_socket() with those of + * mlx4_zmalloc_socket(). + * + * @see mlx4_mallocv_socket() + * @see rte_zmalloc_socket() + */ +size_t +mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt, int socket) +{ + return mlx4_mallocv_inline(type, vec, cnt, 1, socket); +} diff --git a/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h new file mode 100644 index 000000000..b8769562a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/mlx4_utils.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef MLX4_UTILS_H_ +#define MLX4_UTILS_H_ + +#include +#include + +#include +#include + +#include "mlx4.h" + +/* + * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11. + * Otherwise there would be a type conflict between stdbool and altivec. + */ +#if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__) +#undef bool +/* redefine as in stdbool.h */ +#define bool _Bool +#endif + +extern int mlx4_logtype; + +#ifdef RTE_LIBRTE_MLX4_DEBUG + +/* + * When debugging is enabled (MLX4_DEBUG is defined), file, line and function + * information replace the driver name (MLX4_DRIVER_NAME) in log messages. + */ + +/** Return the file name part of a path. */ +static inline const char * +pmd_drv_log_basename(const char *s) +{ + const char *n = s; + + while (*n) + if (*(n++) == '/') + s = n; + return s; +} + +#define PMD_DRV_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, mlx4_logtype, \ + RTE_FMT("%s:%u: %s(): " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + pmd_drv_log_basename(__FILE__), \ + __LINE__, \ + __func__, \ + RTE_FMT_TAIL(__VA_ARGS__,))) +#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__) +#define MLX4_ASSERT(exp) RTE_VERIFY(exp) +#define claim_zero(...) MLX4_ASSERT((__VA_ARGS__) == 0) + +#else /* RTE_LIBRTE_MLX4_DEBUG */ + +/* + * Like MLX4_ASSERT(), DEBUG() becomes a no-op and claim_zero() does not perform + * any check when debugging is disabled. + */ + +#define PMD_DRV_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, mlx4_logtype, \ + RTE_FMT(MLX4_DRIVER_NAME ": " \ + RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + RTE_FMT_TAIL(__VA_ARGS__,))) +#define DEBUG(...) (void)0 +#define MLX4_ASSERT(exp) RTE_ASSERT(exp) +#define claim_zero(...) (__VA_ARGS__) + +#endif /* RTE_LIBRTE_MLX4_DEBUG */ + +#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__) + +/** Allocate a buffer on the stack and fill it with a printf format string. */ +#define MKSTR(name, ...) \ + int mkstr_size_##name = snprintf(NULL, 0, "" __VA_ARGS__); \ + char name[mkstr_size_##name + 1]; \ + \ + snprintf(name, sizeof(name), "" __VA_ARGS__) + +/** Generate a string out of the provided arguments. */ +#define MLX4_STR(...) # __VA_ARGS__ + +/** Similar to MLX4_STR() with enclosed macros expanded first. */ +#define MLX4_STR_EXPAND(...) MLX4_STR(__VA_ARGS__) + +/** Object description used with mlx4_mallocv() and similar functions. */ +struct mlx4_malloc_vec { + size_t align; /**< Alignment constraint (power of 2), 0 if unknown. */ + size_t size; /**< Object size. */ + void **addr; /**< Storage for allocation address. */ +}; + +/* mlx4_utils.c */ + +int mlx4_fd_set_non_blocking(int fd); +size_t mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt); +size_t mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt); +size_t mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt, int socket); +size_t mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec, + unsigned int cnt, int socket); + +#endif /* MLX4_UTILS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map b/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx4/rte_pmd_mlx4_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/mlx5/Makefile b/src/spdk/dpdk/drivers/net/mlx5/Makefile new file mode 100644 index 000000000..2577ee5e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2015 6WIND S.A. +# Copyright 2015 Mellanox Technologies, Ltd + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name. +LIB = librte_pmd_mlx5.a + +# Sources. +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c +ifneq ($(filter y,$(CONFIG_RTE_ARCH_X86_64) \ + $(CONFIG_RTE_ARCH_PPC_64) \ + $(CONFIG_RTE_ARCH_ARM64)),) +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec.c +endif +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxmode.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_meter.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_dv.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mp.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_utils.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c + +# Basic CFLAGS. +CFLAGS += -O3 +CFLAGS += -std=c11 -Wall -Wextra +CFLAGS += -g +CFLAGS += -I$(RTE_SDK)/drivers/common/mlx5 +CFLAGS += -I$(RTE_SDK)/drivers/net/mlx5 +CFLAGS += -I$(BUILDDIR)/drivers/common/mlx5 +CFLAGS += -D_BSD_SOURCE +CFLAGS += -D_DEFAULT_SOURCE +CFLAGS += -D_XOPEN_SOURCE=600 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-strict-prototypes +LDLIBS += -lrte_common_mlx5 +LDLIBS += -lm +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +# A few warnings cannot be avoided in external headers. +CFLAGS += -Wno-error=cast-qual + +EXPORT_MAP := rte_pmd_mlx5_version.map + +# DEBUG which is usually provided on the command-line may enable +# CONFIG_RTE_LIBRTE_MLX5_DEBUG. +ifeq ($(DEBUG),1) +CONFIG_RTE_LIBRTE_MLX5_DEBUG := y +endif + +# User-defined CFLAGS. +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DEBUG),y) +CFLAGS += -pedantic +ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -DPEDANTIC +endif +AUTO_CONFIG_CFLAGS += -Wno-pedantic +else +CFLAGS += -UPEDANTIC +endif + +include $(RTE_SDK)/mk/rte.lib.mk + diff --git a/src/spdk/dpdk/drivers/net/mlx5/meson.build b/src/spdk/dpdk/drivers/net/mlx5/meson.build new file mode 100644 index 000000000..928663af7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/meson.build @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 6WIND S.A. +# Copyright 2018 Mellanox Technologies, Ltd + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif + +deps += ['hash', 'common_mlx5'] +sources = files( + 'mlx5.c', + 'mlx5_ethdev.c', + 'mlx5_flow.c', + 'mlx5_flow_meter.c', + 'mlx5_flow_dv.c', + 'mlx5_flow_verbs.c', + 'mlx5_mac.c', + 'mlx5_mr.c', + 'mlx5_rss.c', + 'mlx5_rxmode.c', + 'mlx5_rxq.c', + 'mlx5_rxtx.c', + 'mlx5_mp.c', + 'mlx5_stats.c', + 'mlx5_trigger.c', + 'mlx5_txq.c', + 'mlx5_vlan.c', + 'mlx5_utils.c', + 'mlx5_socket.c', +) +if (dpdk_conf.has('RTE_ARCH_X86_64') + or dpdk_conf.has('RTE_ARCH_ARM64') + or dpdk_conf.has('RTE_ARCH_PPC_64')) + sources += files('mlx5_rxtx_vec.c') +endif +cflags_options = [ + '-std=c11', + '-Wno-strict-prototypes', + '-D_BSD_SOURCE', + '-D_DEFAULT_SOURCE', + '-D_XOPEN_SOURCE=600' +] +foreach option:cflags_options + if cc.has_argument(option) + cflags += option + endif +endforeach +if get_option('buildtype').contains('debug') + cflags += [ '-pedantic', '-DPEDANTIC' ] +else + cflags += [ '-UPEDANTIC' ] +endif diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5.c new file mode 100644 index 000000000..5589772eb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5.c @@ -0,0 +1,3814 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" +#include "mlx5_mr.h" +#include "mlx5_flow.h" +#include "rte_pmd_mlx5.h" + +/* Device parameter to enable RX completion queue compression. */ +#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" + +/* Device parameter to enable RX completion entry padding to 128B. */ +#define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" + +/* Device parameter to enable padding Rx packet to cacheline size. */ +#define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" + +/* Device parameter to enable Multi-Packet Rx queue. */ +#define MLX5_RX_MPRQ_EN "mprq_en" + +/* Device parameter to configure log 2 of the number of strides for MPRQ. */ +#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" + +/* Device parameter to configure log 2 of the stride size for MPRQ. */ +#define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" + +/* Device parameter to limit the size of memcpy'd packet for MPRQ. */ +#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" + +/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ +#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" + +/* Device parameter to configure inline send. Deprecated, ignored.*/ +#define MLX5_TXQ_INLINE "txq_inline" + +/* Device parameter to limit packet size to inline with ordinary SEND. */ +#define MLX5_TXQ_INLINE_MAX "txq_inline_max" + +/* Device parameter to configure minimal data size to inline. */ +#define MLX5_TXQ_INLINE_MIN "txq_inline_min" + +/* Device parameter to limit packet size to inline with Enhanced MPW. */ +#define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" + +/* + * Device parameter to configure the number of TX queues threshold for + * enabling inline send. + */ +#define MLX5_TXQS_MIN_INLINE "txqs_min_inline" + +/* + * Device parameter to configure the number of TX queues threshold for + * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). + */ +#define MLX5_TXQS_MAX_VEC "txqs_max_vec" + +/* Device parameter to enable multi-packet send WQEs. */ +#define MLX5_TXQ_MPW_EN "txq_mpw_en" + +/* + * Device parameter to force doorbell register mapping + * to non-cahed region eliminating the extra write memory barrier. + */ +#define MLX5_TX_DB_NC "tx_db_nc" + +/* + * Device parameter to include 2 dsegs in the title WQEBB. + * Deprecated, ignored. + */ +#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" + +/* + * Device parameter to limit the size of inlining packet. + * Deprecated, ignored. + */ +#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" + +/* + * Device parameter to enable hardware Tx vector. + * Deprecated, ignored (no vectorized Tx routines anymore). + */ +#define MLX5_TX_VEC_EN "tx_vec_en" + +/* Device parameter to enable hardware Rx vector. */ +#define MLX5_RX_VEC_EN "rx_vec_en" + +/* Allow L3 VXLAN flow creation. */ +#define MLX5_L3_VXLAN_EN "l3_vxlan_en" + +/* Activate DV E-Switch flow steering. */ +#define MLX5_DV_ESW_EN "dv_esw_en" + +/* Activate DV flow steering. */ +#define MLX5_DV_FLOW_EN "dv_flow_en" + +/* Enable extensive flow metadata support. */ +#define MLX5_DV_XMETA_EN "dv_xmeta_en" + +/* Activate Netlink support in VF mode. */ +#define MLX5_VF_NL_EN "vf_nl_en" + +/* Enable extending memsegs when creating a MR. */ +#define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" + +/* Select port representors to instantiate. */ +#define MLX5_REPRESENTOR "representor" + +/* Device parameter to configure the maximum number of dump files per queue. */ +#define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" + +/* Configure timeout of LRO session (in microseconds). */ +#define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" + +/* + * Device parameter to configure the total data buffer size for a single + * hairpin queue (logarithm value). + */ +#define MLX5_HP_BUF_SIZE "hp_buf_log_sz" + +#ifndef HAVE_IBV_MLX5_MOD_MPW +#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) +#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) +#endif + +#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP +#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) +#endif + +static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; + +/* Shared memory between primary and secondary processes. */ +struct mlx5_shared_data *mlx5_shared_data; + +/* Spinlock for mlx5_shared_data allocation. */ +static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + +/* Process local data for secondary processes. */ +static struct mlx5_local_data mlx5_local_data; + +/** Driver-specific log messages type. */ +int mlx5_logtype; + +/** Data associated with devices to spawn. */ +struct mlx5_dev_spawn_data { + uint32_t ifindex; /**< Network interface index. */ + uint32_t max_port; /**< IB device maximal port index. */ + uint32_t ibv_port; /**< IB device physical port index. */ + int pf_bond; /**< bonding device PF index. < 0 - no bonding */ + struct mlx5_switch_info info; /**< Switch information. */ + struct ibv_device *ibv_dev; /**< Associated IB device. */ + struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */ + struct rte_pci_device *pci_dev; /**< Backend PCI device. */ +}; + +static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER(); +static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER; + +static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + { + .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_encap_decap_ipool", + }, + { + .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_push_vlan_ipool", + }, + { + .size = sizeof(struct mlx5_flow_dv_tag_resource), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_tag_ipool", + }, + { + .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_port_id_ipool", + }, + { + .size = sizeof(struct mlx5_flow_tbl_data_entry), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_jump_ipool", + }, +#endif + { + .size = sizeof(struct mlx5_flow_meter), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_meter_ipool", + }, + { + .size = sizeof(struct mlx5_flow_mreg_copy_resource), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_mcp_ipool", + }, + { + .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_hrxq_ipool", + }, + { + .size = sizeof(struct mlx5_flow_handle), + .trunk_size = 64, + .grow_trunk = 3, + .grow_shift = 2, + .need_lock = 0, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "mlx5_flow_handle_ipool", + }, + { + .size = sizeof(struct rte_flow), + .trunk_size = 4096, + .need_lock = 1, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "rte_flow_ipool", + }, +}; + + +#define MLX5_FLOW_MIN_ID_POOL_SIZE 512 +#define MLX5_ID_GENERATION_ARRAY_FACTOR 16 + +#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 +#define MLX5_TAGS_HLIST_ARRAY_SIZE 8192 + +/** + * Allocate ID pool structure. + * + * @param[in] max_id + * The maximum id can be allocated from the pool. + * + * @return + * Pointer to pool object, NULL value otherwise. + */ +struct mlx5_flow_id_pool * +mlx5_flow_id_pool_alloc(uint32_t max_id) +{ + struct mlx5_flow_id_pool *pool; + void *mem; + + pool = rte_zmalloc("id pool allocation", sizeof(*pool), + RTE_CACHE_LINE_SIZE); + if (!pool) { + DRV_LOG(ERR, "can't allocate id pool"); + rte_errno = ENOMEM; + return NULL; + } + mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t), + RTE_CACHE_LINE_SIZE); + if (!mem) { + DRV_LOG(ERR, "can't allocate mem for id pool"); + rte_errno = ENOMEM; + goto error; + } + pool->free_arr = mem; + pool->curr = pool->free_arr; + pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE; + pool->base_index = 0; + pool->max_id = max_id; + return pool; +error: + rte_free(pool); + return NULL; +} + +/** + * Release ID pool structure. + * + * @param[in] pool + * Pointer to flow id pool object to free. + */ +void +mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) +{ + rte_free(pool->free_arr); + rte_free(pool); +} + +/** + * Generate ID. + * + * @param[in] pool + * Pointer to flow id pool. + * @param[out] id + * The generated ID. + * + * @return + * 0 on success, error value otherwise. + */ +uint32_t +mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) +{ + if (pool->curr == pool->free_arr) { + if (pool->base_index == pool->max_id) { + rte_errno = ENOMEM; + DRV_LOG(ERR, "no free id"); + return -rte_errno; + } + *id = ++pool->base_index; + return 0; + } + *id = *(--pool->curr); + return 0; +} + +/** + * Release ID. + * + * @param[in] pool + * Pointer to flow id pool. + * @param[out] id + * The generated ID. + * + * @return + * 0 on success, error value otherwise. + */ +uint32_t +mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) +{ + uint32_t size; + uint32_t size2; + void *mem; + + if (pool->curr == pool->last) { + size = pool->curr - pool->free_arr; + size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR; + MLX5_ASSERT(size2 > size); + mem = rte_malloc("", size2 * sizeof(uint32_t), 0); + if (!mem) { + DRV_LOG(ERR, "can't allocate mem for id pool"); + rte_errno = ENOMEM; + return -rte_errno; + } + memcpy(mem, pool->free_arr, size * sizeof(uint32_t)); + rte_free(pool->free_arr); + pool->free_arr = mem; + pool->curr = pool->free_arr + size; + pool->last = pool->free_arr + size2; + } + *pool->curr = id; + pool->curr++; + return 0; +} + +/** + * Initialize the shared aging list information per port. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + */ +static void +mlx5_flow_aging_init(struct mlx5_ibv_shared *sh) +{ + uint32_t i; + struct mlx5_age_info *age_info; + + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + age_info->flags = 0; + TAILQ_INIT(&age_info->aged_counters); + rte_spinlock_init(&age_info->aged_sl); + MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); + } +} + +/** + * Initialize the counters management structure. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object to free + */ +static void +mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh) +{ + int i; + + memset(&sh->cmng, 0, sizeof(sh->cmng)); + TAILQ_INIT(&sh->cmng.flow_counters); + for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { + TAILQ_INIT(&sh->cmng.ccont[i].pool_list); + rte_spinlock_init(&sh->cmng.ccont[i].resize_sl); + } +} + +/** + * Destroy all the resources allocated for a counter memory management. + * + * @param[in] mng + * Pointer to the memory management structure. + */ +static void +mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) +{ + uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; + + LIST_REMOVE(mng, next); + claim_zero(mlx5_devx_cmd_destroy(mng->dm)); + claim_zero(mlx5_glue->devx_umem_dereg(mng->umem)); + rte_free(mem); +} + +/** + * Close and release all the resources of the counters management. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object to free. + */ +static void +mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh) +{ + struct mlx5_counter_stats_mem_mng *mng; + int i; + int j; + int retries = 1024; + + rte_errno = 0; + while (--retries) { + rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); + if (rte_errno != EINPROGRESS) + break; + rte_pause(); + } + for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) { + struct mlx5_flow_counter_pool *pool; + uint32_t batch = !!(i > 1); + + if (!sh->cmng.ccont[i].pools) + continue; + pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); + while (pool) { + if (batch && pool->min_dcs) + claim_zero(mlx5_devx_cmd_destroy + (pool->min_dcs)); + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { + if (MLX5_POOL_GET_CNT(pool, j)->action) + claim_zero + (mlx5_glue->destroy_flow_action + (MLX5_POOL_GET_CNT + (pool, j)->action)); + if (!batch && MLX5_GET_POOL_CNT_EXT + (pool, j)->dcs) + claim_zero(mlx5_devx_cmd_destroy + (MLX5_GET_POOL_CNT_EXT + (pool, j)->dcs)); + } + TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next); + rte_free(pool); + pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list); + } + rte_free(sh->cmng.ccont[i].pools); + } + mng = LIST_FIRST(&sh->cmng.mem_mngs); + while (mng) { + mlx5_flow_destroy_counter_stat_mem_mng(mng); + mng = LIST_FIRST(&sh->cmng.mem_mngs); + } + memset(&sh->cmng, 0, sizeof(sh->cmng)); +} + +/** + * Initialize the flow resources' indexed mempool. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + * @param[in] sh + * Pointer to user dev config. + */ +static void +mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh, + const struct mlx5_dev_config *config __rte_unused) +{ + uint8_t i; + +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + /* + * While DV is supported, user chooses the verbs mode, + * the mlx5 flow handle size is different with the + * MLX5_FLOW_HANDLE_VERBS_SIZE. + */ + if (!config->dv_flow_en) + mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size = + MLX5_FLOW_HANDLE_VERBS_SIZE; +#endif + for (i = 0; i < MLX5_IPOOL_MAX; ++i) + sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]); +} + +/** + * Release the flow resources' indexed mempool. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + */ +static void +mlx5_flow_ipool_destroy(struct mlx5_ibv_shared *sh) +{ + uint8_t i; + + for (i = 0; i < MLX5_IPOOL_MAX; ++i) + mlx5_ipool_destroy(sh->ipool[i]); +} + +/** + * Extract pdn of PD object using DV API. + * + * @param[in] pd + * Pointer to the verbs PD object. + * @param[out] pdn + * Pointer to the PD object number variable. + * + * @return + * 0 on success, error value otherwise. + */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +static int +mlx5_get_pdn(struct ibv_pd *pd __rte_unused, uint32_t *pdn __rte_unused) +{ + struct mlx5dv_obj obj; + struct mlx5dv_pd pd_info; + int ret = 0; + + obj.pd.in = pd; + obj.pd.out = &pd_info; + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); + if (ret) { + DRV_LOG(DEBUG, "Fail to get PD object info"); + return ret; + } + *pdn = pd_info.pdn; + return 0; +} +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + +static int +mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config) +{ + char *env; + int value; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* Get environment variable to store. */ + env = getenv(MLX5_SHUT_UP_BF); + value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET; + if (config->dbnc == MLX5_ARG_UNSET) + setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1); + else + setenv(MLX5_SHUT_UP_BF, + config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1); + return value; +} + +static void +mlx5_restore_doorbell_mapping_env(int value) +{ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* Restore the original environment variable state. */ + if (value == MLX5_ARG_UNSET) + unsetenv(MLX5_SHUT_UP_BF); + else + setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1); +} + +/** + * Allocate shared IB device context. If there is multiport device the + * master and representors will share this context, if there is single + * port dedicated IB device, the context will be used by only given + * port due to unification. + * + * Routine first searches the context for the specified IB device name, + * if found the shared context assumed and reference counter is incremented. + * If no context found the new one is created and initialized with specified + * IB device context and parameters. + * + * @param[in] spawn + * Pointer to the IB device attributes (name, port, etc). + * @param[in] config + * Pointer to device configuration structure. + * + * @return + * Pointer to mlx5_ibv_shared object on success, + * otherwise NULL and rte_errno is set. + */ +static struct mlx5_ibv_shared * +mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn, + const struct mlx5_dev_config *config) +{ + struct mlx5_ibv_shared *sh; + int dbmap_env; + int err = 0; + uint32_t i; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_devx_tis_attr tis_attr = { 0 }; +#endif + + MLX5_ASSERT(spawn); + /* Secondary process should not create the shared context. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + pthread_mutex_lock(&mlx5_ibv_list_mutex); + /* Search for IB context by device name. */ + LIST_FOREACH(sh, &mlx5_ibv_list, next) { + if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) { + sh->refcnt++; + goto exit; + } + } + /* No device found, we have to create new shared context. */ + MLX5_ASSERT(spawn->max_port); + sh = rte_zmalloc("ethdev shared ib context", + sizeof(struct mlx5_ibv_shared) + + spawn->max_port * + sizeof(struct mlx5_ibv_shared_port), + RTE_CACHE_LINE_SIZE); + if (!sh) { + DRV_LOG(ERR, "shared context allocation failure"); + rte_errno = ENOMEM; + goto exit; + } + /* + * Configure environment variable "MLX5_BF_SHUT_UP" + * before the device creation. The rdma_core library + * checks the variable at device creation and + * stores the result internally. + */ + dbmap_env = mlx5_config_doorbell_mapping_env(config); + /* Try to open IB device with DV first, then usual Verbs. */ + errno = 0; + sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev); + if (sh->ctx) { + sh->devx = 1; + DRV_LOG(DEBUG, "DevX is supported"); + /* The device is created, no need for environment. */ + mlx5_restore_doorbell_mapping_env(dbmap_env); + } else { + /* The environment variable is still configured. */ + sh->ctx = mlx5_glue->open_device(spawn->ibv_dev); + err = errno ? errno : ENODEV; + /* + * The environment variable is not needed anymore, + * all device creation attempts are completed. + */ + mlx5_restore_doorbell_mapping_env(dbmap_env); + if (!sh->ctx) + goto error; + DRV_LOG(DEBUG, "DevX is NOT supported"); + } + err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr); + if (err) { + DRV_LOG(DEBUG, "ibv_query_device_ex() failed"); + goto error; + } + sh->refcnt = 1; + sh->max_port = spawn->max_port; + strncpy(sh->ibdev_name, sh->ctx->device->name, + sizeof(sh->ibdev_name)); + strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path, + sizeof(sh->ibdev_path)); + pthread_mutex_init(&sh->intr_mutex, NULL); + /* + * Setting port_id to max unallowed value means + * there is no interrupt subhandler installed for + * the given port index i. + */ + for (i = 0; i < sh->max_port; i++) { + sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; + sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; + } + sh->pd = mlx5_glue->alloc_pd(sh->ctx); + if (sh->pd == NULL) { + DRV_LOG(ERR, "PD allocation failure"); + err = ENOMEM; + goto error; + } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (sh->devx) { + err = mlx5_get_pdn(sh->pd, &sh->pdn); + if (err) { + DRV_LOG(ERR, "Fail to extract pdn from PD"); + goto error; + } + sh->td = mlx5_devx_cmd_create_td(sh->ctx); + if (!sh->td) { + DRV_LOG(ERR, "TD allocation failure"); + err = ENOMEM; + goto error; + } + tis_attr.transport_domain = sh->td->id; + sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); + if (!sh->tis) { + DRV_LOG(ERR, "TIS allocation failure"); + err = ENOMEM; + goto error; + } + } + sh->flow_id_pool = mlx5_flow_id_pool_alloc + ((1 << HAIRPIN_FLOW_ID_BITS) - 1); + if (!sh->flow_id_pool) { + DRV_LOG(ERR, "can't create flow id pool"); + err = ENOMEM; + goto error; + } +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + * + * At this point the device is not added to the memory + * event list yet, context is just being created. + */ + err = mlx5_mr_btree_init(&sh->share_cache.cache, + MLX5_MR_BTREE_CACHE_N * 2, + spawn->pci_dev->device.numa_node); + if (err) { + err = rte_errno; + goto error; + } + mlx5_flow_aging_init(sh); + mlx5_flow_counters_mng_init(sh); + mlx5_flow_ipool_create(sh, config); + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, + sh, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + /* Add context to the global device list. */ + LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next); +exit: + pthread_mutex_unlock(&mlx5_ibv_list_mutex); + return sh; +error: + pthread_mutex_unlock(&mlx5_ibv_list_mutex); + MLX5_ASSERT(sh); + if (sh->tis) + claim_zero(mlx5_devx_cmd_destroy(sh->tis)); + if (sh->td) + claim_zero(mlx5_devx_cmd_destroy(sh->td)); + if (sh->pd) + claim_zero(mlx5_glue->dealloc_pd(sh->pd)); + if (sh->ctx) + claim_zero(mlx5_glue->close_device(sh->ctx)); + if (sh->flow_id_pool) + mlx5_flow_id_pool_release(sh->flow_id_pool); + rte_free(sh); + MLX5_ASSERT(err > 0); + rte_errno = err; + return NULL; +} + +/** + * Free shared IB device context. Decrement counter and if zero free + * all allocated resources and close handles. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object to free + */ +static void +mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh) +{ + pthread_mutex_lock(&mlx5_ibv_list_mutex); +#ifdef RTE_LIBRTE_MLX5_DEBUG + /* Check the object presence in the list. */ + struct mlx5_ibv_shared *lctx; + + LIST_FOREACH(lctx, &mlx5_ibv_list, next) + if (lctx == sh) + break; + MLX5_ASSERT(lctx); + if (lctx != sh) { + DRV_LOG(ERR, "Freeing non-existing shared IB context"); + goto exit; + } +#endif + MLX5_ASSERT(sh); + MLX5_ASSERT(sh->refcnt); + /* Secondary process should not free the shared context. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (--sh->refcnt) + goto exit; + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_REMOVE(sh, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + /* Release created Memory Regions. */ + mlx5_mr_release_cache(&sh->share_cache); + /* Remove context from the global device list. */ + LIST_REMOVE(sh, next); + /* + * Ensure there is no async event handler installed. + * Only primary process handles async device events. + **/ + mlx5_flow_counters_mng_close(sh); + mlx5_flow_ipool_destroy(sh); + MLX5_ASSERT(!sh->intr_cnt); + if (sh->intr_cnt) + mlx5_intr_callback_unregister + (&sh->intr_handle, mlx5_dev_interrupt_handler, sh); +#ifdef HAVE_MLX5_DEVX_ASYNC_SUPPORT + if (sh->devx_intr_cnt) { + if (sh->intr_handle_devx.fd) + rte_intr_callback_unregister(&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, sh); + if (sh->devx_comp) + mlx5dv_devx_destroy_cmd_comp(sh->devx_comp); + } +#endif + pthread_mutex_destroy(&sh->intr_mutex); + if (sh->pd) + claim_zero(mlx5_glue->dealloc_pd(sh->pd)); + if (sh->tis) + claim_zero(mlx5_devx_cmd_destroy(sh->tis)); + if (sh->td) + claim_zero(mlx5_devx_cmd_destroy(sh->td)); + if (sh->ctx) + claim_zero(mlx5_glue->close_device(sh->ctx)); + if (sh->flow_id_pool) + mlx5_flow_id_pool_release(sh->flow_id_pool); + rte_free(sh); +exit: + pthread_mutex_unlock(&mlx5_ibv_list_mutex); +} + +/** + * Destroy table hash list and all the root entries per domain. + * + * @param[in] priv + * Pointer to the private device data structure. + */ +static void +mlx5_free_table_hash_list(struct mlx5_priv *priv) +{ + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_tbl_data_entry *tbl_data; + union mlx5_flow_tbl_key table_key = { + { + .table_id = 0, + .reserved = 0, + .domain = 0, + .direction = 0, + } + }; + struct mlx5_hlist_entry *pos; + + if (!sh->flow_tbls) + return; + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + if (pos) { + tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, + entry); + MLX5_ASSERT(tbl_data); + mlx5_hlist_remove(sh->flow_tbls, pos); + rte_free(tbl_data); + } + table_key.direction = 1; + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + if (pos) { + tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, + entry); + MLX5_ASSERT(tbl_data); + mlx5_hlist_remove(sh->flow_tbls, pos); + rte_free(tbl_data); + } + table_key.direction = 0; + table_key.domain = 1; + pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64); + if (pos) { + tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, + entry); + MLX5_ASSERT(tbl_data); + mlx5_hlist_remove(sh->flow_tbls, pos); + rte_free(tbl_data); + } + mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL); +} + +/** + * Initialize flow table hash list and create the root tables entry + * for each domain. + * + * @param[in] priv + * Pointer to the private device data structure. + * + * @return + * Zero on success, positive error code otherwise. + */ +static int +mlx5_alloc_table_hash_list(struct mlx5_priv *priv) +{ + struct mlx5_ibv_shared *sh = priv->sh; + char s[MLX5_HLIST_NAMESIZE]; + int err = 0; + + MLX5_ASSERT(sh); + snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); + sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE); + if (!sh->flow_tbls) { + DRV_LOG(ERR, "flow tables with hash creation failed.\n"); + err = ENOMEM; + return err; + } +#ifndef HAVE_MLX5DV_DR + /* + * In case we have not DR support, the zero tables should be created + * because DV expect to see them even if they cannot be created by + * RDMA-CORE. + */ + union mlx5_flow_tbl_key table_key = { + { + .table_id = 0, + .reserved = 0, + .domain = 0, + .direction = 0, + } + }; + struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL, + sizeof(*tbl_data), 0); + + if (!tbl_data) { + err = ENOMEM; + goto error; + } + tbl_data->entry.key = table_key.v64; + err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); + if (err) + goto error; + rte_atomic32_init(&tbl_data->tbl.refcnt); + rte_atomic32_inc(&tbl_data->tbl.refcnt); + table_key.direction = 1; + tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); + if (!tbl_data) { + err = ENOMEM; + goto error; + } + tbl_data->entry.key = table_key.v64; + err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); + if (err) + goto error; + rte_atomic32_init(&tbl_data->tbl.refcnt); + rte_atomic32_inc(&tbl_data->tbl.refcnt); + table_key.direction = 0; + table_key.domain = 1; + tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0); + if (!tbl_data) { + err = ENOMEM; + goto error; + } + tbl_data->entry.key = table_key.v64; + err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry); + if (err) + goto error; + rte_atomic32_init(&tbl_data->tbl.refcnt); + rte_atomic32_inc(&tbl_data->tbl.refcnt); + return err; +error: + mlx5_free_table_hash_list(priv); +#endif /* HAVE_MLX5DV_DR */ + return err; +} + +/** + * Initialize DR related data within private structure. + * Routine checks the reference counter and does actual + * resources creation/initialization only if counter is zero. + * + * @param[in] priv + * Pointer to the private device data structure. + * + * @return + * Zero on success, positive error code otherwise. + */ +static int +mlx5_alloc_shared_dr(struct mlx5_priv *priv) +{ + struct mlx5_ibv_shared *sh = priv->sh; + char s[MLX5_HLIST_NAMESIZE]; + int err = 0; + + if (!sh->flow_tbls) + err = mlx5_alloc_table_hash_list(priv); + else + DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n", + (void *)sh->flow_tbls); + if (err) + return err; + /* Create tags hash list table. */ + snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name); + sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE); + if (!sh->tag_table) { + DRV_LOG(ERR, "tags with hash creation failed.\n"); + err = ENOMEM; + goto error; + } +#ifdef HAVE_MLX5DV_DR + void *domain; + + if (sh->dv_refcnt) { + /* Shared DV/DR structures is already initialized. */ + sh->dv_refcnt++; + priv->dr_shared = 1; + return 0; + } + /* Reference counter is zero, we should initialize structures. */ + domain = mlx5_glue->dr_create_domain(sh->ctx, + MLX5DV_DR_DOMAIN_TYPE_NIC_RX); + if (!domain) { + DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed"); + err = errno; + goto error; + } + sh->rx_domain = domain; + domain = mlx5_glue->dr_create_domain(sh->ctx, + MLX5DV_DR_DOMAIN_TYPE_NIC_TX); + if (!domain) { + DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed"); + err = errno; + goto error; + } + pthread_mutex_init(&sh->dv_mutex, NULL); + sh->tx_domain = domain; +#ifdef HAVE_MLX5DV_DR_ESWITCH + if (priv->config.dv_esw_en) { + domain = mlx5_glue->dr_create_domain + (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); + if (!domain) { + DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed"); + err = errno; + goto error; + } + sh->fdb_domain = domain; + sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop(); + } +#endif + sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); +#endif /* HAVE_MLX5DV_DR */ + sh->dv_refcnt++; + priv->dr_shared = 1; + return 0; +error: + /* Rollback the created objects. */ + if (sh->rx_domain) { + mlx5_glue->dr_destroy_domain(sh->rx_domain); + sh->rx_domain = NULL; + } + if (sh->tx_domain) { + mlx5_glue->dr_destroy_domain(sh->tx_domain); + sh->tx_domain = NULL; + } + if (sh->fdb_domain) { + mlx5_glue->dr_destroy_domain(sh->fdb_domain); + sh->fdb_domain = NULL; + } + if (sh->esw_drop_action) { + mlx5_glue->destroy_flow_action(sh->esw_drop_action); + sh->esw_drop_action = NULL; + } + if (sh->pop_vlan_action) { + mlx5_glue->destroy_flow_action(sh->pop_vlan_action); + sh->pop_vlan_action = NULL; + } + if (sh->tag_table) { + /* tags should be destroyed with flow before. */ + mlx5_hlist_destroy(sh->tag_table, NULL, NULL); + sh->tag_table = NULL; + } + mlx5_free_table_hash_list(priv); + return err; +} + +/** + * Destroy DR related data within private structure. + * + * @param[in] priv + * Pointer to the private device data structure. + */ +static void +mlx5_free_shared_dr(struct mlx5_priv *priv) +{ + struct mlx5_ibv_shared *sh; + + if (!priv->dr_shared) + return; + priv->dr_shared = 0; + sh = priv->sh; + MLX5_ASSERT(sh); +#ifdef HAVE_MLX5DV_DR + MLX5_ASSERT(sh->dv_refcnt); + if (sh->dv_refcnt && --sh->dv_refcnt) + return; + if (sh->rx_domain) { + mlx5_glue->dr_destroy_domain(sh->rx_domain); + sh->rx_domain = NULL; + } + if (sh->tx_domain) { + mlx5_glue->dr_destroy_domain(sh->tx_domain); + sh->tx_domain = NULL; + } +#ifdef HAVE_MLX5DV_DR_ESWITCH + if (sh->fdb_domain) { + mlx5_glue->dr_destroy_domain(sh->fdb_domain); + sh->fdb_domain = NULL; + } + if (sh->esw_drop_action) { + mlx5_glue->destroy_flow_action(sh->esw_drop_action); + sh->esw_drop_action = NULL; + } +#endif + if (sh->pop_vlan_action) { + mlx5_glue->destroy_flow_action(sh->pop_vlan_action); + sh->pop_vlan_action = NULL; + } + pthread_mutex_destroy(&sh->dv_mutex); +#endif /* HAVE_MLX5DV_DR */ + if (sh->tag_table) { + /* tags should be destroyed with flow before. */ + mlx5_hlist_destroy(sh->tag_table, NULL, NULL); + sh->tag_table = NULL; + } + mlx5_free_table_hash_list(priv); +} + +/** + * Initialize shared data between primary and secondary process. + * + * A memzone is reserved by primary process and secondary processes attach to + * the memzone. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_init_shared_data(void) +{ + const struct rte_memzone *mz; + int ret = 0; + + rte_spinlock_lock(&mlx5_shared_data_lock); + if (mlx5_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate shared memory. */ + mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, + sizeof(*mlx5_shared_data), + SOCKET_ID_ANY, 0); + if (mz == NULL) { + DRV_LOG(ERR, + "Cannot allocate mlx5 shared data"); + ret = -rte_errno; + goto error; + } + mlx5_shared_data = mz->addr; + memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); + rte_spinlock_init(&mlx5_shared_data->lock); + } else { + /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); + if (mz == NULL) { + DRV_LOG(ERR, + "Cannot attach mlx5 shared data"); + ret = -rte_errno; + goto error; + } + mlx5_shared_data = mz->addr; + memset(&mlx5_local_data, 0, sizeof(mlx5_local_data)); + } + } +error: + rte_spinlock_unlock(&mlx5_shared_data_lock); + return ret; +} + +/** + * Retrieve integer value from environment variable. + * + * @param[in] name + * Environment variable name. + * + * @return + * Integer value, 0 if the variable is not set. + */ +int +mlx5_getenv_int(const char *name) +{ + const char *val = getenv(name); + + if (val == NULL) + return 0; + return atoi(val); +} + +/** + * Verbs callback to allocate a memory. This function should allocate the space + * according to the size provided residing inside a huge page. + * Please note that all allocation must respect the alignment from libmlx5 + * (i.e. currently sysconf(_SC_PAGESIZE)). + * + * @param[in] size + * The size in bytes of the memory to allocate. + * @param[in] data + * A pointer to the callback data. + * + * @return + * Allocated buffer, NULL otherwise and rte_errno is set. + */ +static void * +mlx5_alloc_verbs_buf(size_t size, void *data) +{ + struct mlx5_priv *priv = data; + void *ret; + size_t alignment = sysconf(_SC_PAGESIZE); + unsigned int socket = SOCKET_ID_ANY; + + if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { + const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; + + socket = ctrl->socket; + } else if (priv->verbs_alloc_ctx.type == + MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { + const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; + + socket = ctrl->socket; + } + MLX5_ASSERT(data != NULL); + ret = rte_malloc_socket(__func__, size, alignment, socket); + if (!ret && size) + rte_errno = ENOMEM; + return ret; +} + +/** + * Verbs callback to free a memory. + * + * @param[in] ptr + * A pointer to the memory to free. + * @param[in] data + * A pointer to the callback data. + */ +static void +mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) +{ + MLX5_ASSERT(data != NULL); + rte_free(ptr); +} + +/** + * DPDK callback to add udp tunnel port + * + * @param[in] dev + * A pointer to eth_dev + * @param[in] udp_tunnel + * A pointer to udp tunnel + * + * @return + * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. + */ +int +mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + MLX5_ASSERT(udp_tunnel != NULL); + if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && + udp_tunnel->udp_port == 4789) + return 0; + if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && + udp_tunnel->udp_port == 4790) + return 0; + return -ENOTSUP; +} + +/** + * Initialize process private data structure. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_proc_priv_init(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_proc_priv *ppriv; + size_t ppriv_size; + + /* + * UAR register table follows the process private structure. BlueFlame + * registers for Tx queues are stored in the table. + */ + ppriv_size = + sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); + ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size, + RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!ppriv) { + rte_errno = ENOMEM; + return -rte_errno; + } + ppriv->uar_table_sz = ppriv_size; + dev->process_private = ppriv; + return 0; +} + +/** + * Un-initialize process private data structure. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx5_proc_priv_uninit(struct rte_eth_dev *dev) +{ + if (!dev->process_private) + return; + rte_free(dev->process_private); + dev->process_private = NULL; +} + +/** + * DPDK callback to close the device. + * + * Destroy all queues and objects, free memory. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx5_dev_close(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + DRV_LOG(DEBUG, "port %u closing device \"%s\"", + dev->data->port_id, + ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : "")); + /* In case mlx5_dev_stop() has not been called. */ + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_dev_interrupt_handler_devx_uninstall(dev); + /* + * If default mreg copy action is removed at the stop stage, + * the search will return none and nothing will be done anymore. + */ + mlx5_flow_stop_default(dev); + mlx5_traffic_disable(dev); + /* + * If all the flows are already flushed in the device stop stage, + * then this will return directly without any action. + */ + mlx5_flow_list_flush(dev, &priv->flows, true); + mlx5_flow_meter_flush(dev, NULL); + /* Free the intermediate buffers for flow creation. */ + mlx5_flow_free_intermediate(dev); + /* Prevent crashes when queues are still in use. */ + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + rte_wmb(); + /* Disable datapath on secondary process. */ + mlx5_mp_req_stop_rxtx(dev); + if (priv->rxqs != NULL) { + /* XXX race condition if mlx5_rx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->rxqs_n); ++i) + mlx5_rxq_release(dev, i); + priv->rxqs_n = 0; + priv->rxqs = NULL; + } + if (priv->txqs != NULL) { + /* XXX race condition if mlx5_tx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) + mlx5_txq_release(dev, i); + priv->txqs_n = 0; + priv->txqs = NULL; + } + mlx5_proc_priv_uninit(dev); + if (priv->mreg_cp_tbl) + mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); + mlx5_mprq_free_mp(dev); + mlx5_free_shared_dr(priv); + if (priv->rss_conf.rss_key != NULL) + rte_free(priv->rss_conf.rss_key); + if (priv->reta_idx != NULL) + rte_free(priv->reta_idx); + if (priv->config.vf) + mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev), + dev->data->mac_addrs, + MLX5_MAX_MAC_ADDRESSES, priv->mac_own); + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->vmwa_context) + mlx5_vlan_vmwa_exit(priv->vmwa_context); + ret = mlx5_hrxq_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some hash Rx queue still remain", + dev->data->port_id); + ret = mlx5_ind_table_obj_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some indirection table still remain", + dev->data->port_id); + ret = mlx5_rxq_obj_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some Rx queue objects still remain", + dev->data->port_id); + ret = mlx5_rxq_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some Rx queues still remain", + dev->data->port_id); + ret = mlx5_txq_obj_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", + dev->data->port_id); + ret = mlx5_txq_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some Tx queues still remain", + dev->data->port_id); + ret = mlx5_flow_verify(dev); + if (ret) + DRV_LOG(WARNING, "port %u some flows still remain", + dev->data->port_id); + if (priv->sh) { + /* + * Free the shared context in last turn, because the cleanup + * routines above may use some shared fields, like + * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing + * ifindex if Netlink fails. + */ + mlx5_free_shared_ibctx(priv->sh); + priv->sh = NULL; + } + if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + unsigned int c = 0; + uint16_t port_id; + + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + + if (!opriv || + opriv->domain_id != priv->domain_id || + &rte_eth_devices[port_id] == dev) + continue; + ++c; + break; + } + if (!c) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + } + memset(priv, 0, sizeof(*priv)); + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + /* + * Reset mac_addrs to NULL such that it is not freed as part of + * rte_eth_dev_release_port(). mac_addrs is part of dev_private so + * it is freed when dev_private is freed. + */ + dev->data->mac_addrs = NULL; +} + +const struct eth_dev_ops mlx5_dev_ops = { + .dev_configure = mlx5_dev_configure, + .dev_start = mlx5_dev_start, + .dev_stop = mlx5_dev_stop, + .dev_set_link_down = mlx5_set_link_down, + .dev_set_link_up = mlx5_set_link_up, + .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, + .link_update = mlx5_link_update, + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .read_clock = mlx5_read_clock, + .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, + .vlan_filter_set = mlx5_vlan_filter_set, + .rx_queue_setup = mlx5_rx_queue_setup, + .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, + .tx_queue_setup = mlx5_tx_queue_setup, + .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, + .rx_queue_release = mlx5_rx_queue_release, + .tx_queue_release = mlx5_tx_queue_release, + .flow_ctrl_get = mlx5_dev_get_flow_ctrl, + .flow_ctrl_set = mlx5_dev_set_flow_ctrl, + .mac_addr_remove = mlx5_mac_addr_remove, + .mac_addr_add = mlx5_mac_addr_add, + .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, + .mtu_set = mlx5_dev_set_mtu, + .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, + .vlan_offload_set = mlx5_vlan_offload_set, + .reta_update = mlx5_dev_rss_reta_update, + .reta_query = mlx5_dev_rss_reta_query, + .rss_hash_update = mlx5_rss_hash_update, + .rss_hash_conf_get = mlx5_rss_hash_conf_get, + .filter_ctrl = mlx5_dev_filter_ctrl, + .rx_descriptor_status = mlx5_rx_descriptor_status, + .tx_descriptor_status = mlx5_tx_descriptor_status, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .rx_queue_count = mlx5_rx_queue_count, + .rx_queue_intr_enable = mlx5_rx_intr_enable, + .rx_queue_intr_disable = mlx5_rx_intr_disable, + .is_removed = mlx5_is_removed, + .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, + .hairpin_cap_get = mlx5_hairpin_cap_get, + .mtr_ops_get = mlx5_flow_meter_ops_get, +}; + +/* Available operations from secondary process. */ +static const struct eth_dev_ops mlx5_dev_sec_ops = { + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .rx_descriptor_status = mlx5_rx_descriptor_status, + .tx_descriptor_status = mlx5_tx_descriptor_status, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, +}; + +/* Available operations in flow isolated mode. */ +const struct eth_dev_ops mlx5_dev_ops_isolate = { + .dev_configure = mlx5_dev_configure, + .dev_start = mlx5_dev_start, + .dev_stop = mlx5_dev_stop, + .dev_set_link_down = mlx5_set_link_down, + .dev_set_link_up = mlx5_set_link_up, + .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, + .link_update = mlx5_link_update, + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, + .vlan_filter_set = mlx5_vlan_filter_set, + .rx_queue_setup = mlx5_rx_queue_setup, + .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, + .tx_queue_setup = mlx5_tx_queue_setup, + .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, + .rx_queue_release = mlx5_rx_queue_release, + .tx_queue_release = mlx5_tx_queue_release, + .flow_ctrl_get = mlx5_dev_get_flow_ctrl, + .flow_ctrl_set = mlx5_dev_set_flow_ctrl, + .mac_addr_remove = mlx5_mac_addr_remove, + .mac_addr_add = mlx5_mac_addr_add, + .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, + .mtu_set = mlx5_dev_set_mtu, + .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, + .vlan_offload_set = mlx5_vlan_offload_set, + .filter_ctrl = mlx5_dev_filter_ctrl, + .rx_descriptor_status = mlx5_rx_descriptor_status, + .tx_descriptor_status = mlx5_tx_descriptor_status, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .rx_queue_intr_enable = mlx5_rx_intr_enable, + .rx_queue_intr_disable = mlx5_rx_intr_disable, + .is_removed = mlx5_is_removed, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, + .hairpin_cap_get = mlx5_hairpin_cap_get, + .mtr_ops_get = mlx5_flow_meter_ops_get, +}; + +/** + * Verify and store value for device argument. + * + * @param[in] key + * Key argument to verify. + * @param[in] val + * Value associated with key. + * @param opaque + * User data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_args_check(const char *key, const char *val, void *opaque) +{ + struct mlx5_dev_config *config = opaque; + unsigned long tmp; + + /* No-op, port representors are processed in mlx5_dev_spawn(). */ + if (!strcmp(MLX5_REPRESENTOR, key)) + return 0; + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + rte_errno = errno; + DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); + return -rte_errno; + } + if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { + config->cqe_comp = !!tmp; + } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { + config->cqe_pad = !!tmp; + } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { + config->hw_padding = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { + config->mprq.enabled = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { + config->mprq.stride_num_n = tmp; + } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { + config->mprq.stride_size_n = tmp; + } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { + config->mprq.max_memcpy_len = tmp; + } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { + config->mprq.min_rxqs_num = tmp; + } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { + DRV_LOG(WARNING, "%s: deprecated parameter," + " converted to txq_inline_max", key); + config->txq_inline_max = tmp; + } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { + config->txq_inline_max = tmp; + } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { + config->txq_inline_min = tmp; + } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { + config->txq_inline_mpw = tmp; + } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { + config->txqs_inline = tmp; + } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { + DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); + } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { + config->mps = !!tmp; + } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { + if (tmp != MLX5_TXDB_CACHED && + tmp != MLX5_TXDB_NCACHED && + tmp != MLX5_TXDB_HEURISTIC) { + DRV_LOG(ERR, "invalid Tx doorbell " + "mapping parameter"); + rte_errno = EINVAL; + return -rte_errno; + } + config->dbnc = tmp; + } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { + DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); + } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { + DRV_LOG(WARNING, "%s: deprecated parameter," + " converted to txq_inline_mpw", key); + config->txq_inline_mpw = tmp; + } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { + DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); + } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { + config->rx_vec_en = !!tmp; + } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { + config->l3_vxlan_en = !!tmp; + } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { + config->vf_nl_en = !!tmp; + } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { + config->dv_esw_en = !!tmp; + } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { + config->dv_flow_en = !!tmp; + } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { + if (tmp != MLX5_XMETA_MODE_LEGACY && + tmp != MLX5_XMETA_MODE_META16 && + tmp != MLX5_XMETA_MODE_META32) { + DRV_LOG(ERR, "invalid extensive " + "metadata parameter"); + rte_errno = EINVAL; + return -rte_errno; + } + config->dv_xmeta_en = tmp; + } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { + config->mr_ext_memseg_en = !!tmp; + } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { + config->max_dump_files_num = tmp; + } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { + config->lro.timeout = tmp; + } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { + DRV_LOG(DEBUG, "class argument is %s.", val); + } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { + config->log_hp_size = tmp; + } else { + DRV_LOG(WARNING, "%s: unknown parameter", key); + rte_errno = EINVAL; + return -rte_errno; + } + return 0; +} + +/** + * Parse device parameters. + * + * @param config + * Pointer to device configuration structure. + * @param devargs + * Device arguments structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) +{ + const char **params = (const char *[]){ + MLX5_RXQ_CQE_COMP_EN, + MLX5_RXQ_CQE_PAD_EN, + MLX5_RXQ_PKT_PAD_EN, + MLX5_RX_MPRQ_EN, + MLX5_RX_MPRQ_LOG_STRIDE_NUM, + MLX5_RX_MPRQ_LOG_STRIDE_SIZE, + MLX5_RX_MPRQ_MAX_MEMCPY_LEN, + MLX5_RXQS_MIN_MPRQ, + MLX5_TXQ_INLINE, + MLX5_TXQ_INLINE_MIN, + MLX5_TXQ_INLINE_MAX, + MLX5_TXQ_INLINE_MPW, + MLX5_TXQS_MIN_INLINE, + MLX5_TXQS_MAX_VEC, + MLX5_TXQ_MPW_EN, + MLX5_TXQ_MPW_HDR_DSEG_EN, + MLX5_TXQ_MAX_INLINE_LEN, + MLX5_TX_DB_NC, + MLX5_TX_VEC_EN, + MLX5_RX_VEC_EN, + MLX5_L3_VXLAN_EN, + MLX5_VF_NL_EN, + MLX5_DV_ESW_EN, + MLX5_DV_FLOW_EN, + MLX5_DV_XMETA_EN, + MLX5_MR_EXT_MEMSEG_EN, + MLX5_REPRESENTOR, + MLX5_MAX_DUMP_FILES_NUM, + MLX5_LRO_TIMEOUT_USEC, + MLX5_CLASS_ARG_NAME, + MLX5_HP_BUF_SIZE, + NULL, + }; + struct rte_kvargs *kvlist; + int ret = 0; + int i; + + if (devargs == NULL) + return 0; + /* Following UGLY cast is done to pass checkpatch. */ + kvlist = rte_kvargs_parse(devargs->args, params); + if (kvlist == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + /* Process parameters. */ + for (i = 0; (params[i] != NULL); ++i) { + if (rte_kvargs_count(kvlist, params[i])) { + ret = rte_kvargs_process(kvlist, params[i], + mlx5_args_check, config); + if (ret) { + rte_errno = EINVAL; + rte_kvargs_free(kvlist); + return -rte_errno; + } + } + } + rte_kvargs_free(kvlist); + return 0; +} + +static struct rte_pci_driver mlx5_driver; + +/** + * PMD global initialization. + * + * Independent from individual device, this function initializes global + * per-PMD data structures distinguishing primary and secondary processes. + * Hence, each initialization is called once per a process. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_init_once(void) +{ + struct mlx5_shared_data *sd; + struct mlx5_local_data *ld = &mlx5_local_data; + int ret = 0; + + if (mlx5_init_shared_data()) + return -rte_errno; + sd = mlx5_shared_data; + MLX5_ASSERT(sd); + rte_spinlock_lock(&sd->lock); + switch (rte_eal_process_type()) { + case RTE_PROC_PRIMARY: + if (sd->init_done) + break; + LIST_INIT(&sd->mem_event_cb_list); + rte_rwlock_init(&sd->mem_event_rwlock); + rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", + mlx5_mr_mem_event_cb, NULL); + ret = mlx5_mp_init_primary(MLX5_MP_NAME, + mlx5_mp_primary_handle); + if (ret) + goto out; + sd->init_done = true; + break; + case RTE_PROC_SECONDARY: + if (ld->init_done) + break; + ret = mlx5_mp_init_secondary(MLX5_MP_NAME, + mlx5_mp_secondary_handle); + if (ret) + goto out; + ++sd->secondary_cnt; + ld->init_done = true; + break; + default: + break; + } +out: + rte_spinlock_unlock(&sd->lock); + return ret; +} + +/** + * Configures the minimal amount of data to inline into WQE + * while sending packets. + * + * - the txq_inline_min has the maximal priority, if this + * key is specified in devargs + * - if DevX is enabled the inline mode is queried from the + * device (HCA attributes and NIC vport context if needed). + * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx + * and none (0 bytes) for other NICs + * + * @param spawn + * Verbs device parameters (name, port, switch_info) to spawn. + * @param config + * Device configuration parameters. + */ +static void +mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, + struct mlx5_dev_config *config) +{ + if (config->txq_inline_min != MLX5_ARG_UNSET) { + /* Application defines size of inlined data explicitly. */ + switch (spawn->pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + if (config->txq_inline_min < + (int)MLX5_INLINE_HSIZE_L2) { + DRV_LOG(DEBUG, + "txq_inline_mix aligned to minimal" + " ConnectX-4 required value %d", + (int)MLX5_INLINE_HSIZE_L2); + config->txq_inline_min = MLX5_INLINE_HSIZE_L2; + } + break; + } + goto exit; + } + if (config->hca_attr.eth_net_offloads) { + /* We have DevX enabled, inline mode queried successfully. */ + switch (config->hca_attr.wqe_inline_mode) { + case MLX5_CAP_INLINE_MODE_L2: + /* outer L2 header must be inlined. */ + config->txq_inline_min = MLX5_INLINE_HSIZE_L2; + goto exit; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + /* No inline data are required by NIC. */ + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; + config->hw_vlan_insert = + config->hca_attr.wqe_vlan_insert; + DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); + goto exit; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + /* inline mode is defined by NIC vport context. */ + if (!config->hca_attr.eth_virt) + break; + switch (config->hca_attr.vport_inline_mode) { + case MLX5_INLINE_MODE_NONE: + config->txq_inline_min = + MLX5_INLINE_HSIZE_NONE; + goto exit; + case MLX5_INLINE_MODE_L2: + config->txq_inline_min = + MLX5_INLINE_HSIZE_L2; + goto exit; + case MLX5_INLINE_MODE_IP: + config->txq_inline_min = + MLX5_INLINE_HSIZE_L3; + goto exit; + case MLX5_INLINE_MODE_TCP_UDP: + config->txq_inline_min = + MLX5_INLINE_HSIZE_L4; + goto exit; + case MLX5_INLINE_MODE_INNER_L2: + config->txq_inline_min = + MLX5_INLINE_HSIZE_INNER_L2; + goto exit; + case MLX5_INLINE_MODE_INNER_IP: + config->txq_inline_min = + MLX5_INLINE_HSIZE_INNER_L3; + goto exit; + case MLX5_INLINE_MODE_INNER_TCP_UDP: + config->txq_inline_min = + MLX5_INLINE_HSIZE_INNER_L4; + goto exit; + } + } + } + /* + * We get here if we are unable to deduce + * inline data size with DevX. Try PCI ID + * to determine old NICs. + */ + switch (spawn->pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: + config->txq_inline_min = MLX5_INLINE_HSIZE_L2; + config->hw_vlan_insert = 0; + break; + case PCI_DEVICE_ID_MELLANOX_CONNECTX5: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + /* + * These NICs support VLAN insertion from WQE and + * report the wqe_vlan_insert flag. But there is the bug + * and PFC control may be broken, so disable feature. + */ + config->hw_vlan_insert = 0; + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; + break; + default: + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; + break; + } +exit: + DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); +} + +/** + * Configures the metadata mask fields in the shared context. + * + * @param [in] dev + * Pointer to Ethernet device. + */ +static void +mlx5_set_metadata_mask(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + uint32_t meta, mark, reg_c0; + + reg_c0 = ~priv->vport_meta_mask; + switch (priv->config.dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + meta = UINT32_MAX; + mark = MLX5_FLOW_MARK_MASK; + break; + case MLX5_XMETA_MODE_META16: + meta = reg_c0 >> rte_bsf32(reg_c0); + mark = MLX5_FLOW_MARK_MASK; + break; + case MLX5_XMETA_MODE_META32: + meta = UINT32_MAX; + mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; + break; + default: + meta = 0; + mark = 0; + MLX5_ASSERT(false); + break; + } + if (sh->dv_mark_mask && sh->dv_mark_mask != mark) + DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", + sh->dv_mark_mask, mark); + else + sh->dv_mark_mask = mark; + if (sh->dv_meta_mask && sh->dv_meta_mask != meta) + DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", + sh->dv_meta_mask, meta); + else + sh->dv_meta_mask = meta; + if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) + DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", + sh->dv_meta_mask, reg_c0); + else + sh->dv_regc0_mask = reg_c0; + DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); + DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); + DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); + DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); +} + +/** + * Allocate page of door-bells and register it using DevX API. + * + * @param [in] dev + * Pointer to Ethernet device. + * + * @return + * Pointer to new page on success, NULL otherwise. + */ +static struct mlx5_devx_dbr_page * +mlx5_alloc_dbr_page(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_devx_dbr_page *page; + + /* Allocate space for door-bell page and management data. */ + page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page), + RTE_CACHE_LINE_SIZE, dev->device->numa_node); + if (!page) { + DRV_LOG(ERR, "port %u cannot allocate dbr page", + dev->data->port_id); + return NULL; + } + /* Register allocated memory. */ + page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs, + MLX5_DBR_PAGE_SIZE, 0); + if (!page->umem) { + DRV_LOG(ERR, "port %u cannot umem reg dbr page", + dev->data->port_id); + rte_free(page); + return NULL; + } + return page; +} + +/** + * Find the next available door-bell, allocate new page if needed. + * + * @param [in] dev + * Pointer to Ethernet device. + * @param [out] dbr_page + * Door-bell page containing the page data. + * + * @return + * Door-bell address offset on success, a negative error value otherwise. + */ +int64_t +mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_devx_dbr_page *page = NULL; + uint32_t i, j; + + LIST_FOREACH(page, &priv->dbrpgs, next) + if (page->dbr_count < MLX5_DBR_PER_PAGE) + break; + if (!page) { /* No page with free door-bell exists. */ + page = mlx5_alloc_dbr_page(dev); + if (!page) /* Failed to allocate new page. */ + return (-1); + LIST_INSERT_HEAD(&priv->dbrpgs, page, next); + } + /* Loop to find bitmap part with clear bit. */ + for (i = 0; + i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX; + i++) + ; /* Empty. */ + /* Find the first clear bit. */ + MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE); + j = rte_bsf64(~page->dbr_bitmap[i]); + page->dbr_bitmap[i] |= (UINT64_C(1) << j); + page->dbr_count++; + *dbr_page = page; + return (((i * 64) + j) * sizeof(uint64_t)); +} + +/** + * Release a door-bell record. + * + * @param [in] dev + * Pointer to Ethernet device. + * @param [in] umem_id + * UMEM ID of page containing the door-bell record to release. + * @param [in] offset + * Offset of door-bell record in page. + * + * @return + * 0 on success, a negative error value otherwise. + */ +int32_t +mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_devx_dbr_page *page = NULL; + int ret = 0; + + LIST_FOREACH(page, &priv->dbrpgs, next) + /* Find the page this address belongs to. */ + if (page->umem->umem_id == umem_id) + break; + if (!page) + return -EINVAL; + page->dbr_count--; + if (!page->dbr_count) { + /* Page not used, free it and remove from list. */ + LIST_REMOVE(page, next); + if (page->umem) + ret = -mlx5_glue->devx_umem_dereg(page->umem); + rte_free(page); + } else { + /* Mark in bitmap that this door-bell is not in use. */ + offset /= MLX5_DBR_SIZE; + int i = offset / 64; + int j = offset % 64; + + page->dbr_bitmap[i] &= ~(UINT64_C(1) << j); + } + return ret; +} + +int +rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) +{ + static const char *const dynf_names[] = { + RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, + RTE_MBUF_DYNFLAG_METADATA_NAME + }; + unsigned int i; + + if (n < RTE_DIM(dynf_names)) + return -ENOMEM; + for (i = 0; i < RTE_DIM(dynf_names); i++) { + if (names[i] == NULL) + return -EINVAL; + strcpy(names[i], dynf_names[i]); + } + return RTE_DIM(dynf_names); +} + +/** + * Check sibling device configurations. + * + * Sibling devices sharing the Infiniband device context + * should have compatible configurations. This regards + * representors and bonding slaves. + * + * @param priv + * Private device descriptor. + * @param config + * Configuration of the device is going to be created. + * + * @return + * 0 on success, EINVAL otherwise + */ +static int +mlx5_dev_check_sibling_config(struct mlx5_priv *priv, + struct mlx5_dev_config *config) +{ + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_dev_config *sh_conf = NULL; + uint16_t port_id; + + MLX5_ASSERT(sh); + /* Nothing to compare for the single/first device. */ + if (sh->refcnt == 1) + return 0; + /* Find the device with shared context. */ + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + + if (opriv && opriv != priv && opriv->sh == sh) { + sh_conf = &opriv->config; + break; + } + } + if (!sh_conf) + return 0; + if (sh_conf->dv_flow_en ^ config->dv_flow_en) { + DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" + " for shared %s context", sh->ibdev_name); + rte_errno = EINVAL; + return rte_errno; + } + if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { + DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" + " for shared %s context", sh->ibdev_name); + rte_errno = EINVAL; + return rte_errno; + } + return 0; +} +/** + * Spawn an Ethernet device from Verbs information. + * + * @param dpdk_dev + * Backing DPDK device. + * @param spawn + * Verbs device parameters (name, port, switch_info) to spawn. + * @param config + * Device configuration parameters. + * + * @return + * A valid Ethernet device object on success, NULL otherwise and rte_errno + * is set. The following errors are defined: + * + * EBUSY: device is not supposed to be spawned. + * EEXIST: device is already spawned + */ +static struct rte_eth_dev * +mlx5_dev_spawn(struct rte_device *dpdk_dev, + struct mlx5_dev_spawn_data *spawn, + struct mlx5_dev_config config) +{ + const struct mlx5_switch_info *switch_info = &spawn->info; + struct mlx5_ibv_shared *sh = NULL; + struct ibv_port_attr port_attr; + struct mlx5dv_context dv_attr = { .comp_mask = 0 }; + struct rte_eth_dev *eth_dev = NULL; + struct mlx5_priv *priv = NULL; + int err = 0; + unsigned int hw_padding = 0; + unsigned int mps; + unsigned int cqe_comp; + unsigned int cqe_pad = 0; + unsigned int tunnel_en = 0; + unsigned int mpls_en = 0; + unsigned int swp = 0; + unsigned int mprq = 0; + unsigned int mprq_min_stride_size_n = 0; + unsigned int mprq_max_stride_size_n = 0; + unsigned int mprq_min_stride_num_n = 0; + unsigned int mprq_max_stride_num_n = 0; + struct rte_ether_addr mac; + char name[RTE_ETH_NAME_MAX_LEN]; + int own_domain_id = 0; + uint16_t port_id; + unsigned int i; +#ifdef HAVE_MLX5DV_DR_DEVX_PORT + struct mlx5dv_devx_port devx_port = { .comp_mask = 0 }; +#endif + + /* Determine if this port representor is supposed to be spawned. */ + if (switch_info->representor && dpdk_dev->devargs) { + struct rte_eth_devargs eth_da; + + err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da); + if (err) { + rte_errno = -err; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + return NULL; + } + for (i = 0; i < eth_da.nb_representor_ports; ++i) + if (eth_da.representor_ports[i] == + (uint16_t)switch_info->port_name) + break; + if (i == eth_da.nb_representor_ports) { + rte_errno = EBUSY; + return NULL; + } + } + /* Build device name. */ + if (spawn->pf_bond < 0) { + /* Single device. */ + if (!switch_info->representor) + strlcpy(name, dpdk_dev->name, sizeof(name)); + else + snprintf(name, sizeof(name), "%s_representor_%u", + dpdk_dev->name, switch_info->port_name); + } else { + /* Bonding device. */ + if (!switch_info->representor) + snprintf(name, sizeof(name), "%s_%s", + dpdk_dev->name, spawn->ibv_dev->name); + else + snprintf(name, sizeof(name), "%s_%s_representor_%u", + dpdk_dev->name, spawn->ibv_dev->name, + switch_info->port_name); + } + /* check if the device is already spawned */ + if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { + rte_errno = EEXIST; + return NULL; + } + DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct mlx5_mp_id mp_id; + + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not attach rte ethdev"); + rte_errno = ENOMEM; + return NULL; + } + eth_dev->device = dpdk_dev; + eth_dev->dev_ops = &mlx5_dev_sec_ops; + err = mlx5_proc_priv_init(eth_dev); + if (err) + return NULL; + mp_id.port_id = eth_dev->data->port_id; + strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); + /* Receive command fd from primary process */ + err = mlx5_mp_req_verbs_cmd_fd(&mp_id); + if (err < 0) + return NULL; + /* Remap UAR for Tx queues. */ + err = mlx5_tx_uar_init_secondary(eth_dev, err); + if (err) + return NULL; + /* + * Ethdev pointer is still required as input since + * the primary device is not accessible from the + * secondary process. + */ + eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev); + eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev); + return eth_dev; + } + /* + * Some parameters ("tx_db_nc" in particularly) are needed in + * advance to create dv/verbs device context. We proceed the + * devargs here to get ones, and later proceed devargs again + * to override some hardware settings. + */ + err = mlx5_args(&config, dpdk_dev->devargs); + if (err) { + err = rte_errno; + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(rte_errno)); + goto error; + } + sh = mlx5_alloc_shared_ibctx(spawn, &config); + if (!sh) + return NULL; + config.devx = sh->devx; +#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR + config.dest_tir = 1; +#endif +#ifdef HAVE_IBV_MLX5_MOD_SWP + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; +#endif + /* + * Multi-packet send is supported by ConnectX-4 Lx PF as well + * as all ConnectX-5 devices. + */ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; +#endif +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; +#endif + mlx5_glue->dv_query_device(sh->ctx, &dv_attr); + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { + if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { + DRV_LOG(DEBUG, "enhanced MPW is supported"); + mps = MLX5_MPW_ENHANCED; + } else { + DRV_LOG(DEBUG, "MPW is supported"); + mps = MLX5_MPW; + } + } else { + DRV_LOG(DEBUG, "MPW isn't supported"); + mps = MLX5_MPW_DISABLED; + } +#ifdef HAVE_IBV_MLX5_MOD_SWP + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP) + swp = dv_attr.sw_parsing_caps.sw_parsing_offloads; + DRV_LOG(DEBUG, "SWP support: %u", swp); +#endif + config.swp = !!swp; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { + struct mlx5dv_striding_rq_caps mprq_caps = + dv_attr.striding_rq_caps; + + DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", + mprq_caps.min_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", + mprq_caps.max_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", + mprq_caps.min_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", + mprq_caps.max_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tsupported_qpts: %d", + mprq_caps.supported_qpts); + DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); + mprq = 1; + mprq_min_stride_size_n = + mprq_caps.min_single_stride_log_num_of_bytes; + mprq_max_stride_size_n = + mprq_caps.max_single_stride_log_num_of_bytes; + mprq_min_stride_num_n = + mprq_caps.min_single_wqe_log_num_of_strides; + mprq_max_stride_num_n = + mprq_caps.max_single_wqe_log_num_of_strides; + } +#endif + if (RTE_CACHE_LINE_SIZE == 128 && + !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) + cqe_comp = 0; + else + cqe_comp = 1; + config.cqe_comp = cqe_comp; +#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD + /* Whether device supports 128B Rx CQE padding. */ + cqe_pad = RTE_CACHE_LINE_SIZE == 128 && + (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD); +#endif +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + tunnel_en = ((dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && + (dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) && + (dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE)); + } + DRV_LOG(DEBUG, "tunnel offloading is %ssupported", + tunnel_en ? "" : "not "); +#else + DRV_LOG(WARNING, + "tunnel offloading disabled due to old OFED/rdma-core version"); +#endif + config.tunnel_en = tunnel_en; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + mpls_en = ((dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && + (dv_attr.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); + DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", + mpls_en ? "" : "not "); +#else + DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" + " old OFED/rdma-core version or firmware configuration"); +#endif + config.mpls_en = mpls_en; + /* Check port status. */ + err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr); + if (err) { + DRV_LOG(ERR, "port query failed: %s", strerror(err)); + goto error; + } + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + DRV_LOG(ERR, "port is not configured in Ethernet mode"); + err = EINVAL; + goto error; + } + if (port_attr.state != IBV_PORT_ACTIVE) + DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)", + mlx5_glue->port_state_str(port_attr.state), + port_attr.state); + /* Allocate private eth device data. */ + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + DRV_LOG(ERR, "priv allocation failure"); + err = ENOMEM; + goto error; + } + priv->sh = sh; + priv->ibv_port = spawn->ibv_port; + priv->pci_dev = spawn->pci_dev; + priv->mtu = RTE_ETHER_MTU; + priv->mp_id.port_id = port_id; + strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); +#ifndef RTE_ARCH_64 + /* Initialize UAR access locks for 32bit implementations. */ + rte_spinlock_init(&priv->uar_lock_cq); + for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) + rte_spinlock_init(&priv->uar_lock[i]); +#endif + /* Some internal functions rely on Netlink sockets, open them now. */ + priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA); + priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE); + priv->representor = !!switch_info->representor; + priv->master = !!switch_info->master; + priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + priv->vport_meta_tag = 0; + priv->vport_meta_mask = 0; + priv->pf_bond = spawn->pf_bond; +#ifdef HAVE_MLX5DV_DR_DEVX_PORT + /* + * The DevX port query API is implemented. E-Switch may use + * either vport or reg_c[0] metadata register to match on + * vport index. The engaged part of metadata register is + * defined by mask. + */ + if (switch_info->representor || switch_info->master) { + devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT | + MLX5DV_DEVX_PORT_MATCH_REG_C_0; + err = mlx5_glue->devx_port_query(sh->ctx, spawn->ibv_port, + &devx_port); + if (err) { + DRV_LOG(WARNING, + "can't query devx port %d on device %s", + spawn->ibv_port, spawn->ibv_dev->name); + devx_port.comp_mask = 0; + } + } + if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) { + priv->vport_meta_tag = devx_port.reg_c_0.value; + priv->vport_meta_mask = devx_port.reg_c_0.mask; + if (!priv->vport_meta_mask) { + DRV_LOG(ERR, "vport zero mask for port %d" + " on bonding device %s", + spawn->ibv_port, spawn->ibv_dev->name); + err = ENOTSUP; + goto error; + } + if (priv->vport_meta_tag & ~priv->vport_meta_mask) { + DRV_LOG(ERR, "invalid vport tag for port %d" + " on bonding device %s", + spawn->ibv_port, spawn->ibv_dev->name); + err = ENOTSUP; + goto error; + } + } + if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) { + priv->vport_id = devx_port.vport_num; + } else if (spawn->pf_bond >= 0) { + DRV_LOG(ERR, "can't deduce vport index for port %d" + " on bonding device %s", + spawn->ibv_port, spawn->ibv_dev->name); + err = ENOTSUP; + goto error; + } else { + /* Suppose vport index in compatible way. */ + priv->vport_id = switch_info->representor ? + switch_info->port_name + 1 : -1; + } +#else + /* + * Kernel/rdma_core support single E-Switch per PF configurations + * only and vport_id field contains the vport index for + * associated VF, which is deduced from representor port name. + * For example, let's have the IB device port 10, it has + * attached network device eth0, which has port name attribute + * pf0vf2, we can deduce the VF number as 2, and set vport index + * as 3 (2+1). This assigning schema should be changed if the + * multiple E-Switch instances per PF configurations or/and PCI + * subfunctions are added. + */ + priv->vport_id = switch_info->representor ? + switch_info->port_name + 1 : -1; +#endif + /* representor_id field keeps the unmodified VF index. */ + priv->representor_id = switch_info->representor ? + switch_info->port_name : -1; + /* + * Look for sibling devices in order to reuse their switch domain + * if any, otherwise allocate one. + */ + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + const struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + + if (!opriv || + opriv->sh != priv->sh || + opriv->domain_id == + RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) + continue; + priv->domain_id = opriv->domain_id; + break; + } + if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + err = rte_eth_switch_domain_alloc(&priv->domain_id); + if (err) { + err = rte_errno; + DRV_LOG(ERR, "unable to allocate switch domain: %s", + strerror(rte_errno)); + goto error; + } + own_domain_id = 1; + } + /* Override some values set by hardware configuration. */ + mlx5_args(&config, dpdk_dev->devargs); + err = mlx5_dev_check_sibling_config(priv, &config); + if (err) + goto error; + config.hw_csum = !!(sh->device_attr.device_cap_flags_ex & + IBV_DEVICE_RAW_IP_CSUM); + DRV_LOG(DEBUG, "checksum offloading is %ssupported", + (config.hw_csum ? "" : "not ")); +#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ + !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + DRV_LOG(DEBUG, "counters are not supported"); +#endif +#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR) + if (config.dv_flow_en) { + DRV_LOG(WARNING, "DV flow is not supported"); + config.dv_flow_en = 0; + } +#endif + config.ind_table_max_size = + sh->device_attr.rss_caps.max_rwq_indirection_table_size; + /* + * Remove this check once DPDK supports larger/variable + * indirection tables. + */ + if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) + config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; + DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", + config.ind_table_max_size); + config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); + DRV_LOG(DEBUG, "VLAN stripping is %ssupported", + (config.hw_vlan_strip ? "" : "not ")); + config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", + (config.hw_fcs_strip ? "" : "not ")); +#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) + hw_padding = !!sh->device_attr.rx_pad_end_addr_align; +#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) + hw_padding = !!(sh->device_attr.device_cap_flags_ex & + IBV_DEVICE_PCI_WRITE_END_PADDING); +#endif + if (config.hw_padding && !hw_padding) { + DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); + config.hw_padding = 0; + } else if (config.hw_padding) { + DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); + } + config.tso = (sh->device_attr.tso_caps.max_tso > 0 && + (sh->device_attr.tso_caps.supported_qpts & + (1 << IBV_QPT_RAW_PACKET))); + if (config.tso) + config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso; + /* + * MPW is disabled by default, while the Enhanced MPW is enabled + * by default. + */ + if (config.mps == MLX5_ARG_UNSET) + config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED : + MLX5_MPW_DISABLED; + else + config.mps = config.mps ? mps : MLX5_MPW_DISABLED; + DRV_LOG(INFO, "%sMPS is %s", + config.mps == MLX5_MPW_ENHANCED ? "enhanced " : + config.mps == MLX5_MPW ? "legacy " : "", + config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + if (config.cqe_comp && !cqe_comp) { + DRV_LOG(WARNING, "Rx CQE compression isn't supported"); + config.cqe_comp = 0; + } + if (config.cqe_pad && !cqe_pad) { + DRV_LOG(WARNING, "Rx CQE padding isn't supported"); + config.cqe_pad = 0; + } else if (config.cqe_pad) { + DRV_LOG(INFO, "Rx CQE padding is enabled"); + } + if (config.devx) { + priv->counter_fallback = 0; + err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr); + if (err) { + err = -err; + goto error; + } + if (!config.hca_attr.flow_counters_dump) + priv->counter_fallback = 1; +#ifndef HAVE_IBV_DEVX_ASYNC + priv->counter_fallback = 1; +#endif + if (priv->counter_fallback) + DRV_LOG(INFO, "Use fall-back DV counter management"); + /* Check for LRO support. */ + if (config.dest_tir && config.hca_attr.lro_cap && + config.dv_flow_en) { + /* TBD check tunnel lro caps. */ + config.lro.supported = config.hca_attr.lro_cap; + DRV_LOG(DEBUG, "Device supports LRO"); + /* + * If LRO timeout is not configured by application, + * use the minimal supported value. + */ + if (!config.lro.timeout) + config.lro.timeout = + config.hca_attr.lro_timer_supported_periods[0]; + DRV_LOG(DEBUG, "LRO session timeout set to %d usec", + config.lro.timeout); + } +#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) + if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup && + config.dv_flow_en) { + uint8_t reg_c_mask = + config.hca_attr.qos.flow_meter_reg_c_ids; + /* + * Meter needs two REG_C's for color match and pre-sfx + * flow match. Here get the REG_C for color match. + * REG_C_0 and REG_C_1 is reserved for metadata feature. + */ + reg_c_mask &= 0xfc; + if (__builtin_popcount(reg_c_mask) < 1) { + priv->mtr_en = 0; + DRV_LOG(WARNING, "No available register for" + " meter."); + } else { + priv->mtr_color_reg = ffs(reg_c_mask) - 1 + + REG_C_0; + priv->mtr_en = 1; + priv->mtr_reg_share = + config.hca_attr.qos.flow_meter_reg_share; + DRV_LOG(DEBUG, "The REG_C meter uses is %d", + priv->mtr_color_reg); + } + } +#endif + } + if (config.mprq.enabled && mprq) { + if (config.mprq.stride_num_n && + (config.mprq.stride_num_n > mprq_max_stride_num_n || + config.mprq.stride_num_n < mprq_min_stride_num_n)) { + config.mprq.stride_num_n = + RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n), + mprq_max_stride_num_n); + DRV_LOG(WARNING, + "the number of strides" + " for Multi-Packet RQ is out of range," + " setting default value (%u)", + 1 << config.mprq.stride_num_n); + } + if (config.mprq.stride_size_n && + (config.mprq.stride_size_n > mprq_max_stride_size_n || + config.mprq.stride_size_n < mprq_min_stride_size_n)) { + config.mprq.stride_size_n = + RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N, + mprq_min_stride_size_n), + mprq_max_stride_size_n); + DRV_LOG(WARNING, + "the size of a stride" + " for Multi-Packet RQ is out of range," + " setting default value (%u)", + 1 << config.mprq.stride_size_n); + } + config.mprq.min_stride_size_n = mprq_min_stride_size_n; + config.mprq.max_stride_size_n = mprq_max_stride_size_n; + } else if (config.mprq.enabled && !mprq) { + DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); + config.mprq.enabled = 0; + } + if (config.max_dump_files_num == 0) + config.max_dump_files_num = 128; + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not allocate rte ethdev"); + err = ENOMEM; + goto error; + } + /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + if (priv->representor) { + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + eth_dev->data->representor_id = priv->representor_id; + } + /* + * Store associated network device interface index. This index + * is permanent throughout the lifetime of device. So, we may store + * the ifindex here and use the cached value further. + */ + MLX5_ASSERT(spawn->ifindex); + priv->if_index = spawn->ifindex; + eth_dev->data->dev_private = priv; + priv->dev_data = eth_dev->data; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = dpdk_dev; + /* Configure the first MAC address by default. */ + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { + DRV_LOG(ERR, + "port %u cannot get MAC address, is mlx5_en" + " loaded? (errno: %s)", + eth_dev->data->port_id, strerror(rte_errno)); + err = ENODEV; + goto error; + } + DRV_LOG(INFO, + "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); +#ifdef RTE_LIBRTE_MLX5_DEBUG + { + char ifname[IF_NAMESIZE]; + + if (mlx5_get_ifname(eth_dev, &ifname) == 0) + DRV_LOG(DEBUG, "port %u ifname is \"%s\"", + eth_dev->data->port_id, ifname); + else + DRV_LOG(DEBUG, "port %u ifname is unknown", + eth_dev->data->port_id); + } +#endif + /* Get actual MTU if possible. */ + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) { + err = rte_errno; + goto error; + } + DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, + priv->mtu); + /* Initialize burst functions to prevent crashes before link-up. */ + eth_dev->rx_pkt_burst = removed_rx_burst; + eth_dev->tx_pkt_burst = removed_tx_burst; + eth_dev->dev_ops = &mlx5_dev_ops; + /* Register MAC address. */ + claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + if (config.vf && config.vf_nl_en) + mlx5_nl_mac_addr_sync(priv->nl_socket_route, + mlx5_ifindex(eth_dev), + eth_dev->data->mac_addrs, + MLX5_MAX_MAC_ADDRESSES); + priv->flows = 0; + priv->ctrl_flows = 0; + TAILQ_INIT(&priv->flow_meters); + TAILQ_INIT(&priv->flow_meter_profiles); + /* Hint libmlx5 to use PMD allocator for data plane resources */ + struct mlx5dv_ctx_allocators alctr = { + .alloc = &mlx5_alloc_verbs_buf, + .free = &mlx5_free_verbs_buf, + .data = priv, + }; + mlx5_glue->dv_set_context_attr(sh->ctx, + MLX5DV_CTX_ATTR_BUF_ALLOCATORS, + (void *)((uintptr_t)&alctr)); + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); + mlx5_set_link_up(eth_dev); + /* + * Even though the interrupt handler is not installed yet, + * interrupts will still trigger on the async_fd from + * Verbs context returned by ibv_open_device(). + */ + mlx5_link_update(eth_dev, 0); +#ifdef HAVE_MLX5DV_DR_ESWITCH + if (!(config.hca_attr.eswitch_manager && config.dv_flow_en && + (switch_info->representor || switch_info->master))) + config.dv_esw_en = 0; +#else + config.dv_esw_en = 0; +#endif + /* Detect minimal data bytes to inline. */ + mlx5_set_min_inline(spawn, &config); + /* Store device configuration on private structure. */ + priv->config = config; + /* Create context for virtual machine VLAN workaround. */ + priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); + if (config.dv_flow_en) { + err = mlx5_alloc_shared_dr(priv); + if (err) + goto error; + /* + * RSS id is shared with meter flow id. Meter flow id can only + * use the 24 MSB of the register. + */ + priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >> + MLX5_MTR_COLOR_BITS); + if (!priv->qrss_id_pool) { + DRV_LOG(ERR, "can't create flow id pool"); + err = ENOMEM; + goto error; + } + } + /* Supported Verbs flow priority number detection. */ + err = mlx5_flow_discover_priorities(eth_dev); + if (err < 0) { + err = -err; + goto error; + } + priv->config.flow_prio = err; + if (!priv->config.dv_esw_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + DRV_LOG(WARNING, "metadata mode %u is not supported " + "(no E-Switch)", priv->config.dv_xmeta_en); + priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; + } + mlx5_set_metadata_mask(eth_dev); + if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + !priv->sh->dv_regc0_mask) { + DRV_LOG(ERR, "metadata mode %u is not supported " + "(no metadata reg_c[0] is available)", + priv->config.dv_xmeta_en); + err = ENOTSUP; + goto error; + } + /* + * Allocate the buffer for flow creating, just once. + * The allocation must be done before any flow creating. + */ + mlx5_flow_alloc_intermediate(eth_dev); + /* Query availibility of metadata reg_c's. */ + err = mlx5_flow_discover_mreg_c(eth_dev); + if (err < 0) { + err = -err; + goto error; + } + if (!mlx5_flow_ext_mreg_supported(eth_dev)) { + DRV_LOG(DEBUG, + "port %u extensive metadata register is not supported", + eth_dev->data->port_id); + if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + DRV_LOG(ERR, "metadata mode %u is not supported " + "(no metadata registers available)", + priv->config.dv_xmeta_en); + err = ENOTSUP; + goto error; + } + } + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(eth_dev) && + priv->sh->dv_regc0_mask) { + priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, + MLX5_FLOW_MREG_HTABLE_SZ); + if (!priv->mreg_cp_tbl) { + err = ENOMEM; + goto error; + } + } + return eth_dev; +error: + if (priv) { + if (priv->mreg_cp_tbl) + mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL); + if (priv->sh) + mlx5_free_shared_dr(priv); + if (priv->nl_socket_route >= 0) + close(priv->nl_socket_route); + if (priv->nl_socket_rdma >= 0) + close(priv->nl_socket_rdma); + if (priv->vmwa_context) + mlx5_vlan_vmwa_exit(priv->vmwa_context); + if (priv->qrss_id_pool) + mlx5_flow_id_pool_release(priv->qrss_id_pool); + if (own_domain_id) + claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + rte_free(priv); + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; + } + if (eth_dev != NULL) { + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(eth_dev); + } + if (sh) + mlx5_free_shared_ibctx(sh); + MLX5_ASSERT(err > 0); + rte_errno = err; + return NULL; +} + +/** + * Comparison callback to sort device data. + * + * This is meant to be used with qsort(). + * + * @param a[in] + * Pointer to pointer to first data object. + * @param b[in] + * Pointer to pointer to second data object. + * + * @return + * 0 if both objects are equal, less than 0 if the first argument is less + * than the second, greater than 0 otherwise. + */ +static int +mlx5_dev_spawn_data_cmp(const void *a, const void *b) +{ + const struct mlx5_switch_info *si_a = + &((const struct mlx5_dev_spawn_data *)a)->info; + const struct mlx5_switch_info *si_b = + &((const struct mlx5_dev_spawn_data *)b)->info; + int ret; + + /* Master device first. */ + ret = si_b->master - si_a->master; + if (ret) + return ret; + /* Then representor devices. */ + ret = si_b->representor - si_a->representor; + if (ret) + return ret; + /* Unidentified devices come last in no specific order. */ + if (!si_a->representor) + return 0; + /* Order representors by name. */ + return si_a->port_name - si_b->port_name; +} + +/** + * Match PCI information for possible slaves of bonding device. + * + * @param[in] ibv_dev + * Pointer to Infiniband device structure. + * @param[in] pci_dev + * Pointer to PCI device structure to match PCI address. + * @param[in] nl_rdma + * Netlink RDMA group socket handle. + * + * @return + * negative value if no bonding device found, otherwise + * positive index of slave PF in bonding. + */ +static int +mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev, + const struct rte_pci_device *pci_dev, + int nl_rdma) +{ + char ifname[IF_NAMESIZE + 1]; + unsigned int ifindex; + unsigned int np, i; + FILE *file = NULL; + int pf = -1; + + /* + * Try to get master device name. If something goes + * wrong suppose the lack of kernel support and no + * bonding devices. + */ + if (nl_rdma < 0) + return -1; + if (!strstr(ibv_dev->name, "bond")) + return -1; + np = mlx5_nl_portnum(nl_rdma, ibv_dev->name); + if (!np) + return -1; + /* + * The Master device might not be on the predefined + * port (not on port index 1, it is not garanted), + * we have to scan all Infiniband device port and + * find master. + */ + for (i = 1; i <= np; ++i) { + /* Check whether Infiniband port is populated. */ + ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i); + if (!ifindex) + continue; + if (!if_indextoname(ifindex, ifname)) + continue; + /* Try to read bonding slave names from sysfs. */ + MKSTR(slaves, + "/sys/class/net/%s/master/bonding/slaves", ifname); + file = fopen(slaves, "r"); + if (file) + break; + } + if (!file) + return -1; + /* Use safe format to check maximal buffer length. */ + MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE); + while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) { + char tmp_str[IF_NAMESIZE + 32]; + struct rte_pci_addr pci_addr; + struct mlx5_switch_info info; + + /* Process slave interface names in the loop. */ + snprintf(tmp_str, sizeof(tmp_str), + "/sys/class/net/%s", ifname); + if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) { + DRV_LOG(WARNING, "can not get PCI address" + " for netdev \"%s\"", ifname); + continue; + } + if (pci_dev->addr.domain != pci_addr.domain || + pci_dev->addr.bus != pci_addr.bus || + pci_dev->addr.devid != pci_addr.devid || + pci_dev->addr.function != pci_addr.function) + continue; + /* Slave interface PCI address match found. */ + fclose(file); + snprintf(tmp_str, sizeof(tmp_str), + "/sys/class/net/%s/phys_port_name", ifname); + file = fopen(tmp_str, "rb"); + if (!file) + break; + info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET; + if (fscanf(file, "%32s", tmp_str) == 1) + mlx5_translate_port_name(tmp_str, &info); + if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY || + info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) + pf = info.port_name; + break; + } + if (file) + fclose(file); + return pf; +} + +/** + * DPDK callback to register a PCI device. + * + * This function spawns Ethernet devices out of a given PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx5_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct ibv_device **ibv_list; + /* + * Number of found IB Devices matching with requested PCI BDF. + * nd != 1 means there are multiple IB devices over the same + * PCI device and we have representors and master. + */ + unsigned int nd = 0; + /* + * Number of found IB device Ports. nd = 1 and np = 1..n means + * we have the single multiport IB device, and there may be + * representors attached to some of found ports. + */ + unsigned int np = 0; + /* + * Number of DPDK ethernet devices to Spawn - either over + * multiple IB devices or multiple ports of single IB device. + * Actually this is the number of iterations to spawn. + */ + unsigned int ns = 0; + /* + * Bonding device + * < 0 - no bonding device (single one) + * >= 0 - bonding device (value is slave PF index) + */ + int bd = -1; + struct mlx5_dev_spawn_data *list = NULL; + struct mlx5_dev_config dev_config; + int ret; + + if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) { + DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5" + " driver."); + return 1; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + mlx5_pmd_socket_init(); + ret = mlx5_init_once(); + if (ret) { + DRV_LOG(ERR, "unable to init PMD global data: %s", + strerror(rte_errno)); + return -rte_errno; + } + MLX5_ASSERT(pci_drv == &mlx5_driver); + errno = 0; + ibv_list = mlx5_glue->get_device_list(&ret); + if (!ibv_list) { + rte_errno = errno ? errno : ENOSYS; + DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); + return -rte_errno; + } + /* + * First scan the list of all Infiniband devices to find + * matching ones, gathering into the list. + */ + struct ibv_device *ibv_match[ret + 1]; + int nl_route = mlx5_nl_init(NETLINK_ROUTE); + int nl_rdma = mlx5_nl_init(NETLINK_RDMA); + unsigned int i; + + while (ret-- > 0) { + struct rte_pci_addr pci_addr; + + DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name); + bd = mlx5_device_bond_pci_match + (ibv_list[ret], pci_dev, nl_rdma); + if (bd >= 0) { + /* + * Bonding device detected. Only one match is allowed, + * the bonding is supported over multi-port IB device, + * there should be no matches on representor PCI + * functions or non VF LAG bonding devices with + * specified address. + */ + if (nd) { + DRV_LOG(ERR, + "multiple PCI match on bonding device" + "\"%s\" found", ibv_list[ret]->name); + rte_errno = ENOENT; + ret = -rte_errno; + goto exit; + } + DRV_LOG(INFO, "PCI information matches for" + " slave %d bonding device \"%s\"", + bd, ibv_list[ret]->name); + ibv_match[nd++] = ibv_list[ret]; + break; + } + if (mlx5_dev_to_pci_addr + (ibv_list[ret]->ibdev_path, &pci_addr)) + continue; + if (pci_dev->addr.domain != pci_addr.domain || + pci_dev->addr.bus != pci_addr.bus || + pci_dev->addr.devid != pci_addr.devid || + pci_dev->addr.function != pci_addr.function) + continue; + DRV_LOG(INFO, "PCI information matches for device \"%s\"", + ibv_list[ret]->name); + ibv_match[nd++] = ibv_list[ret]; + } + ibv_match[nd] = NULL; + if (!nd) { + /* No device matches, just complain and bail out. */ + DRV_LOG(WARNING, + "no Verbs device matches PCI device " PCI_PRI_FMT "," + " are kernel drivers loaded?", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + rte_errno = ENOENT; + ret = -rte_errno; + goto exit; + } + if (nd == 1) { + /* + * Found single matching device may have multiple ports. + * Each port may be representor, we have to check the port + * number and check the representors existence. + */ + if (nl_rdma >= 0) + np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name); + if (!np) + DRV_LOG(WARNING, "can not get IB device \"%s\"" + " ports number", ibv_match[0]->name); + if (bd >= 0 && !np) { + DRV_LOG(ERR, "can not get ports" + " for bonding device"); + rte_errno = ENOENT; + ret = -rte_errno; + goto exit; + } + } +#ifndef HAVE_MLX5DV_DR_DEVX_PORT + if (bd >= 0) { + /* + * This may happen if there is VF LAG kernel support and + * application is compiled with older rdma_core library. + */ + DRV_LOG(ERR, + "No kernel/verbs support for VF LAG bonding found."); + rte_errno = ENOTSUP; + ret = -rte_errno; + goto exit; + } +#endif + /* + * Now we can determine the maximal + * amount of devices to be spawned. + */ + list = rte_zmalloc("device spawn data", + sizeof(struct mlx5_dev_spawn_data) * + (np ? np : nd), + RTE_CACHE_LINE_SIZE); + if (!list) { + DRV_LOG(ERR, "spawn data array allocation failure"); + rte_errno = ENOMEM; + ret = -rte_errno; + goto exit; + } + if (bd >= 0 || np > 1) { + /* + * Single IB device with multiple ports found, + * it may be E-Switch master device and representors. + * We have to perform identification through the ports. + */ + MLX5_ASSERT(nl_rdma >= 0); + MLX5_ASSERT(ns == 0); + MLX5_ASSERT(nd == 1); + MLX5_ASSERT(np); + for (i = 1; i <= np; ++i) { + list[ns].max_port = np; + list[ns].ibv_port = i; + list[ns].ibv_dev = ibv_match[0]; + list[ns].eth_dev = NULL; + list[ns].pci_dev = pci_dev; + list[ns].pf_bond = bd; + list[ns].ifindex = mlx5_nl_ifindex + (nl_rdma, list[ns].ibv_dev->name, i); + if (!list[ns].ifindex) { + /* + * No network interface index found for the + * specified port, it means there is no + * representor on this port. It's OK, + * there can be disabled ports, for example + * if sriov_numvfs < sriov_totalvfs. + */ + continue; + } + ret = -1; + if (nl_route >= 0) + ret = mlx5_nl_switch_info + (nl_route, + list[ns].ifindex, + &list[ns].info); + if (ret || (!list[ns].info.representor && + !list[ns].info.master)) { + /* + * We failed to recognize representors with + * Netlink, let's try to perform the task + * with sysfs. + */ + ret = mlx5_sysfs_switch_info + (list[ns].ifindex, + &list[ns].info); + } + if (!ret && bd >= 0) { + switch (list[ns].info.name_type) { + case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: + if (list[ns].info.port_name == bd) + ns++; + break; + case MLX5_PHYS_PORT_NAME_TYPE_PFVF: + if (list[ns].info.pf_num == bd) + ns++; + break; + default: + break; + } + continue; + } + if (!ret && (list[ns].info.representor ^ + list[ns].info.master)) + ns++; + } + if (!ns) { + DRV_LOG(ERR, + "unable to recognize master/representors" + " on the IB device with multiple ports"); + rte_errno = ENOENT; + ret = -rte_errno; + goto exit; + } + } else { + /* + * The existence of several matching entries (nd > 1) means + * port representors have been instantiated. No existing Verbs + * call nor sysfs entries can tell them apart, this can only + * be done through Netlink calls assuming kernel drivers are + * recent enough to support them. + * + * In the event of identification failure through Netlink, + * try again through sysfs, then: + * + * 1. A single IB device matches (nd == 1) with single + * port (np=0/1) and is not a representor, assume + * no switch support. + * + * 2. Otherwise no safe assumptions can be made; + * complain louder and bail out. + */ + np = 1; + for (i = 0; i != nd; ++i) { + memset(&list[ns].info, 0, sizeof(list[ns].info)); + list[ns].max_port = 1; + list[ns].ibv_port = 1; + list[ns].ibv_dev = ibv_match[i]; + list[ns].eth_dev = NULL; + list[ns].pci_dev = pci_dev; + list[ns].pf_bond = -1; + list[ns].ifindex = 0; + if (nl_rdma >= 0) + list[ns].ifindex = mlx5_nl_ifindex + (nl_rdma, list[ns].ibv_dev->name, 1); + if (!list[ns].ifindex) { + char ifname[IF_NAMESIZE]; + + /* + * Netlink failed, it may happen with old + * ib_core kernel driver (before 4.16). + * We can assume there is old driver because + * here we are processing single ports IB + * devices. Let's try sysfs to retrieve + * the ifindex. The method works for + * master device only. + */ + if (nd > 1) { + /* + * Multiple devices found, assume + * representors, can not distinguish + * master/representor and retrieve + * ifindex via sysfs. + */ + continue; + } + ret = mlx5_get_master_ifname + (ibv_match[i]->ibdev_path, &ifname); + if (!ret) + list[ns].ifindex = + if_nametoindex(ifname); + if (!list[ns].ifindex) { + /* + * No network interface index found + * for the specified device, it means + * there it is neither representor + * nor master. + */ + continue; + } + } + ret = -1; + if (nl_route >= 0) + ret = mlx5_nl_switch_info + (nl_route, + list[ns].ifindex, + &list[ns].info); + if (ret || (!list[ns].info.representor && + !list[ns].info.master)) { + /* + * We failed to recognize representors with + * Netlink, let's try to perform the task + * with sysfs. + */ + ret = mlx5_sysfs_switch_info + (list[ns].ifindex, + &list[ns].info); + } + if (!ret && (list[ns].info.representor ^ + list[ns].info.master)) { + ns++; + } else if ((nd == 1) && + !list[ns].info.representor && + !list[ns].info.master) { + /* + * Single IB device with + * one physical port and + * attached network device. + * May be SRIOV is not enabled + * or there is no representors. + */ + DRV_LOG(INFO, "no E-Switch support detected"); + ns++; + break; + } + } + if (!ns) { + DRV_LOG(ERR, + "unable to recognize master/representors" + " on the multiple IB devices"); + rte_errno = ENOENT; + ret = -rte_errno; + goto exit; + } + } + MLX5_ASSERT(ns); + /* + * Sort list to probe devices in natural order for users convenience + * (i.e. master first, then representors from lowest to highest ID). + */ + qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp); + /* Default configuration. */ + dev_config = (struct mlx5_dev_config){ + .hw_padding = 0, + .mps = MLX5_ARG_UNSET, + .dbnc = MLX5_ARG_UNSET, + .rx_vec_en = 1, + .txq_inline_max = MLX5_ARG_UNSET, + .txq_inline_min = MLX5_ARG_UNSET, + .txq_inline_mpw = MLX5_ARG_UNSET, + .txqs_inline = MLX5_ARG_UNSET, + .vf_nl_en = 1, + .mr_ext_memseg_en = 1, + .mprq = { + .enabled = 0, /* Disabled by default. */ + .stride_num_n = 0, + .stride_size_n = 0, + .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, + .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, + }, + .dv_esw_en = 1, + .dv_flow_en = 1, + .log_hp_size = MLX5_ARG_UNSET, + }; + /* Device specific configuration. */ + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: + case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF: + dev_config.vf = 1; + break; + default: + break; + } + for (i = 0; i != ns; ++i) { + uint32_t restore; + + list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, + &list[i], + dev_config); + if (!list[i].eth_dev) { + if (rte_errno != EBUSY && rte_errno != EEXIST) + break; + /* Device is disabled or already spawned. Ignore it. */ + continue; + } + restore = list[i].eth_dev->data->dev_flags; + rte_eth_copy_pci_info(list[i].eth_dev, pci_dev); + /* Restore non-PCI flags cleared by the above call. */ + list[i].eth_dev->data->dev_flags |= restore; + mlx5_dev_interrupt_handler_devx_install(list[i].eth_dev); + rte_eth_dev_probing_finish(list[i].eth_dev); + } + if (i != ns) { + DRV_LOG(ERR, + "probe of PCI device " PCI_PRI_FMT " aborted after" + " encountering an error: %s", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function, + strerror(rte_errno)); + ret = -rte_errno; + /* Roll back. */ + while (i--) { + if (!list[i].eth_dev) + continue; + mlx5_dev_close(list[i].eth_dev); + /* mac_addrs must not be freed because in dev_private */ + list[i].eth_dev->data->mac_addrs = NULL; + claim_zero(rte_eth_dev_release_port(list[i].eth_dev)); + } + /* Restore original error. */ + rte_errno = -ret; + } else { + ret = 0; + } +exit: + /* + * Do the routine cleanup: + * - close opened Netlink sockets + * - free allocated spawn data array + * - free the Infiniband device list + */ + if (nl_rdma >= 0) + close(nl_rdma); + if (nl_route >= 0) + close(nl_route); + if (list) + rte_free(list); + MLX5_ASSERT(ibv_list); + mlx5_glue->free_device_list(ibv_list); + return ret; +} + +/** + * Look for the ethernet device belonging to mlx5 driver. + * + * @param[in] port_id + * port_id to start looking for device. + * @param[in] pci_dev + * Pointer to the hint PCI device. When device is being probed + * the its siblings (master and preceding representors might + * not have assigned driver yet (because the mlx5_pci_probe() + * is not completed yet, for this case match on hint PCI + * device may be used to detect sibling device. + * + * @return + * port_id of found device, RTE_MAX_ETHPORT if not found. + */ +uint16_t +mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) +{ + while (port_id < RTE_MAX_ETHPORTS) { + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (dev->state != RTE_ETH_DEV_UNUSED && + dev->device && + (dev->device == &pci_dev->device || + (dev->device->driver && + dev->device->driver->name && + !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) + break; + port_id++; + } + if (port_id >= RTE_MAX_ETHPORTS) + return RTE_MAX_ETHPORTS; + return port_id; +} + +/** + * DPDK callback to remove a PCI device. + * + * This function removes all Ethernet devices belong to a given PCI device. + * + * @param[in] pci_dev + * Pointer to the PCI device. + * + * @return + * 0 on success, the function cannot fail. + */ +static int +mlx5_pci_remove(struct rte_pci_device *pci_dev) +{ + uint16_t port_id; + + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) + rte_eth_dev_close(port_id); + return 0; +} + +static const struct rte_pci_id mlx5_pci_id_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX6) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) + }, + { + .vendor_id = 0 + } +}; + +static struct rte_pci_driver mlx5_driver = { + .driver = { + .name = MLX5_DRIVER_NAME + }, + .id_table = mlx5_pci_id_map, + .probe = mlx5_pci_probe, + .remove = mlx5_pci_remove, + .dma_map = mlx5_dma_map, + .dma_unmap = mlx5_dma_unmap, + .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV | + RTE_PCI_DRV_PROBE_AGAIN, +}; + +/** + * Driver initialization routine. + */ +RTE_INIT(rte_mlx5_pmd_init) +{ + /* Initialize driver log type. */ + mlx5_logtype = rte_log_register("pmd.net.mlx5"); + if (mlx5_logtype >= 0) + rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); + + /* Build the static tables for Verbs conversion. */ + mlx5_set_ptype_table(); + mlx5_set_cksum_table(); + mlx5_set_swp_types_table(); + if (mlx5_glue) + rte_pci_register(&mlx5_driver); +} + +RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); +RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); +RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5.h new file mode 100644 index 000000000..d9f5d816f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5.h @@ -0,0 +1,848 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_H_ +#define RTE_PMD_MLX5_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5_utils.h" +#include "mlx5_autoconf.h" + + +enum mlx5_ipool_index { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */ + MLX5_IPOOL_PUSH_VLAN, /* Pool for push vlan resource. */ + MLX5_IPOOL_TAG, /* Pool for tag resource. */ + MLX5_IPOOL_PORT_ID, /* Pool for port id resource. */ + MLX5_IPOOL_JUMP, /* Pool for jump resource. */ +#endif + MLX5_IPOOL_MTR, /* Pool for meter resource. */ + MLX5_IPOOL_MCP, /* Pool for metadata resource. */ + MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */ + MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ + MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ + MLX5_IPOOL_MAX, +}; + +/** Key string for IPC. */ +#define MLX5_MP_NAME "net_mlx5_mp" + + +LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared); + +/* Shared data between primary and secondary processes. */ +struct mlx5_shared_data { + rte_spinlock_t lock; + /* Global spinlock for primary and secondary processes. */ + int init_done; /* Whether primary has done initialization. */ + unsigned int secondary_cnt; /* Number of secondary processes init'd. */ + struct mlx5_dev_list mem_event_cb_list; + rte_rwlock_t mem_event_rwlock; +}; + +/* Per-process data structure, not visible to other processes. */ +struct mlx5_local_data { + int init_done; /* Whether a secondary has done initialization. */ +}; + +extern struct mlx5_shared_data *mlx5_shared_data; + +struct mlx5_counter_ctrl { + /* Name of the counter. */ + char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; + /* Name of the counter on the device table. */ + char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t ib:1; /**< Nonzero for IB counters. */ +}; + +struct mlx5_xstats_ctrl { + /* Number of device stats. */ + uint16_t stats_n; + /* Number of device stats identified by PMD. */ + uint16_t mlx5_stats_n; + /* Index in the device counters table. */ + uint16_t dev_table_idx[MLX5_MAX_XSTATS]; + uint64_t base[MLX5_MAX_XSTATS]; + uint64_t xstats[MLX5_MAX_XSTATS]; + uint64_t hw_stats[MLX5_MAX_XSTATS]; + struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; +}; + +struct mlx5_stats_ctrl { + /* Base for imissed counter. */ + uint64_t imissed_base; + uint64_t imissed; +}; + +/* Default PMD specific parameter value. */ +#define MLX5_ARG_UNSET (-1) + +#define MLX5_LRO_SUPPORTED(dev) \ + (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported) + +/* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */ +#define MLX5_LRO_SEG_CHUNK_SIZE 256u + +/* Maximal size of aggregated LRO packet. */ +#define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE) + +/* LRO configurations structure. */ +struct mlx5_lro_config { + uint32_t supported:1; /* Whether LRO is supported. */ + uint32_t timeout; /* User configuration. */ +}; + +/* + * Device configuration structure. + * + * Merged configuration from: + * + * - Device capabilities, + * - User device parameters disabled features. + */ +struct mlx5_dev_config { + unsigned int hw_csum:1; /* Checksum offload is supported. */ + unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ + unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */ + unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ + unsigned int hw_padding:1; /* End alignment padding is supported. */ + unsigned int vf:1; /* This is a VF. */ + unsigned int tunnel_en:1; + /* Whether tunnel stateless offloads are supported. */ + unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ + unsigned int cqe_comp:1; /* CQE compression is enabled. */ + unsigned int cqe_pad:1; /* CQE padding is enabled. */ + unsigned int tso:1; /* Whether TSO is supported. */ + unsigned int rx_vec_en:1; /* Rx vector is enabled. */ + unsigned int mr_ext_memseg_en:1; + /* Whether memseg should be extended for MR creation. */ + unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ + unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ + unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */ + unsigned int dv_flow_en:1; /* Enable DV flow. */ + unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */ + unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ + unsigned int devx:1; /* Whether devx interface is available or not. */ + unsigned int dest_tir:1; /* Whether advanced DR API is available. */ + struct { + unsigned int enabled:1; /* Whether MPRQ is enabled. */ + unsigned int stride_num_n; /* Number of strides. */ + unsigned int stride_size_n; /* Size of a stride. */ + unsigned int min_stride_size_n; /* Min size of a stride. */ + unsigned int max_stride_size_n; /* Max size of a stride. */ + unsigned int max_memcpy_len; + /* Maximum packet size to memcpy Rx packets. */ + unsigned int min_rxqs_num; + /* Rx queue count threshold to enable MPRQ. */ + } mprq; /* Configurations for Multi-Packet RQ. */ + int mps; /* Multi-packet send supported mode. */ + int dbnc; /* Skip doorbell register write barrier. */ + unsigned int flow_prio; /* Number of flow priorities. */ + enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM]; + /* Availibility of mreg_c's. */ + unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ + unsigned int ind_table_max_size; /* Maximum indirection table size. */ + unsigned int max_dump_files_num; /* Maximum dump files per queue. */ + unsigned int log_hp_size; /* Single hairpin queue data size in total. */ + int txqs_inline; /* Queue number threshold for inlining. */ + int txq_inline_min; /* Minimal amount of data bytes to inline. */ + int txq_inline_max; /* Max packet size for inlining with SEND. */ + int txq_inline_mpw; /* Max packet size for inlining with eMPW. */ + struct mlx5_hca_attr hca_attr; /* HCA attributes. */ + struct mlx5_lro_config lro; /* LRO configuration. */ +}; + + +/** + * Type of object being allocated. + */ +enum mlx5_verbs_alloc_type { + MLX5_VERBS_ALLOC_TYPE_NONE, + MLX5_VERBS_ALLOC_TYPE_TX_QUEUE, + MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, +}; + +/* Structure for VF VLAN workaround. */ +struct mlx5_vf_vlan { + uint32_t tag:12; + uint32_t created:1; +}; + +/** + * Verbs allocator needs a context to know in the callback which kind of + * resources it is allocating. + */ +struct mlx5_verbs_alloc_ctx { + enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */ + const void *obj; /* Pointer to the DPDK object. */ +}; + +/* Flow drop context necessary due to Verbs API. */ +struct mlx5_drop { + struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ + struct mlx5_rxq_obj *rxq; /* Rx queue object. */ +}; + +#define MLX5_COUNTERS_PER_POOL 512 +#define MLX5_MAX_PENDING_QUERIES 4 +#define MLX5_CNT_CONTAINER_RESIZE 64 +#define MLX5_CNT_AGE_OFFSET 0x80000000 +#define CNT_SIZE (sizeof(struct mlx5_flow_counter)) +#define CNTEXT_SIZE (sizeof(struct mlx5_flow_counter_ext)) +#define AGE_SIZE (sizeof(struct mlx5_age_param)) +#define MLX5_AGING_TIME_DELAY 7 +#define CNT_POOL_TYPE_EXT (1 << 0) +#define CNT_POOL_TYPE_AGE (1 << 1) +#define IS_EXT_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_EXT) +#define IS_AGE_POOL(pool) (((pool)->type) & CNT_POOL_TYPE_AGE) +#define MLX_CNT_IS_AGE(counter) ((counter) & MLX5_CNT_AGE_OFFSET ? 1 : 0) +#define MLX5_CNT_LEN(pool) \ + (CNT_SIZE + \ + (IS_AGE_POOL(pool) ? AGE_SIZE : 0) + \ + (IS_EXT_POOL(pool) ? CNTEXT_SIZE : 0)) +#define MLX5_POOL_GET_CNT(pool, index) \ + ((struct mlx5_flow_counter *) \ + ((uint8_t *)((pool) + 1) + (index) * (MLX5_CNT_LEN(pool)))) +#define MLX5_CNT_ARRAY_IDX(pool, cnt) \ + ((int)(((uint8_t *)(cnt) - (uint8_t *)((pool) + 1)) / \ + MLX5_CNT_LEN(pool))) +/* + * The pool index and offset of counter in the pool array makes up the + * counter index. In case the counter is from pool 0 and offset 0, it + * should plus 1 to avoid index 0, since 0 means invalid counter index + * currently. + */ +#define MLX5_MAKE_CNT_IDX(pi, offset) \ + ((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1) +#define MLX5_CNT_TO_CNT_EXT(pool, cnt) \ + ((struct mlx5_flow_counter_ext *)\ + ((uint8_t *)((cnt) + 1) + \ + (IS_AGE_POOL(pool) ? AGE_SIZE : 0))) +#define MLX5_GET_POOL_CNT_EXT(pool, offset) \ + MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset))) +#define MLX5_CNT_TO_AGE(cnt) \ + ((struct mlx5_age_param *)((cnt) + 1)) + +struct mlx5_flow_counter_pool; + +/*age status*/ +enum { + AGE_FREE, /* Initialized state. */ + AGE_CANDIDATE, /* Counter assigned to flows. */ + AGE_TMOUT, /* Timeout, wait for rte_flow_get_aged_flows and destroy. */ +}; + +#define MLX5_CNT_CONTAINER(sh, batch, age) (&(sh)->cmng.ccont \ + [(batch) * 2 + (age)]) + +enum { + MLX5_CCONT_TYPE_SINGLE, + MLX5_CCONT_TYPE_SINGLE_FOR_AGE, + MLX5_CCONT_TYPE_BATCH, + MLX5_CCONT_TYPE_BATCH_FOR_AGE, + MLX5_CCONT_TYPE_MAX, +}; + +/* Counter age parameter. */ +struct mlx5_age_param { + rte_atomic16_t state; /**< Age state. */ + uint16_t port_id; /**< Port id of the counter. */ + uint32_t timeout:15; /**< Age timeout in unit of 0.1sec. */ + uint32_t expire:16; /**< Expire time(0.1sec) in the future. */ + void *context; /**< Flow counter age context. */ +}; + +struct flow_counter_stats { + uint64_t hits; + uint64_t bytes; +}; + +/* Generic counters information. */ +struct mlx5_flow_counter { + TAILQ_ENTRY(mlx5_flow_counter) next; + /**< Pointer to the next flow counter structure. */ + union { + uint64_t hits; /**< Reset value of hits packets. */ + int64_t query_gen; /**< Generation of the last release. */ + }; + uint64_t bytes; /**< Reset value of bytes. */ + void *action; /**< Pointer to the dv action. */ +}; + +/* Extend counters information for none batch counters. */ +struct mlx5_flow_counter_ext { + uint32_t shared:1; /**< Share counter ID with other flow rules. */ + uint32_t batch: 1; + /**< Whether the counter was allocated by batch command. */ + uint32_t ref_cnt:30; /**< Reference counter. */ + uint32_t id; /**< User counter ID. */ + union { /**< Holds the counters for the rule. */ +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) + struct ibv_counter_set *cs; +#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + struct ibv_counters *cs; +#endif + struct mlx5_devx_obj *dcs; /**< Counter Devx object. */ + }; +}; + +TAILQ_HEAD(mlx5_counters, mlx5_flow_counter); + +/* Generic counter pool structure - query is in pool resolution. */ +struct mlx5_flow_counter_pool { + TAILQ_ENTRY(mlx5_flow_counter_pool) next; + struct mlx5_counters counters; /* Free counter list. */ + union { + struct mlx5_devx_obj *min_dcs; + rte_atomic64_t a64_dcs; + }; + /* The devx object of the minimum counter ID. */ + rte_atomic64_t start_query_gen; /* Query start round. */ + rte_atomic64_t end_query_gen; /* Query end round. */ + uint32_t index; /* Pool index in container. */ + uint8_t type; /* Memory type behind the counter array. */ + rte_spinlock_t sl; /* The pool lock. */ + struct mlx5_counter_stats_raw *raw; + struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */ +}; + +struct mlx5_counter_stats_raw; + +/* Memory management structure for group of counter statistics raws. */ +struct mlx5_counter_stats_mem_mng { + LIST_ENTRY(mlx5_counter_stats_mem_mng) next; + struct mlx5_counter_stats_raw *raws; + struct mlx5_devx_obj *dm; + struct mlx5dv_devx_umem *umem; +}; + +/* Raw memory structure for the counter statistics values of a pool. */ +struct mlx5_counter_stats_raw { + LIST_ENTRY(mlx5_counter_stats_raw) next; + int min_dcs_id; + struct mlx5_counter_stats_mem_mng *mem_mng; + volatile struct flow_counter_stats *data; +}; + +TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); + +/* Container structure for counter pools. */ +struct mlx5_pools_container { + rte_atomic16_t n_valid; /* Number of valid pools. */ + uint16_t n; /* Number of pools. */ + rte_spinlock_t resize_sl; /* The resize lock. */ + struct mlx5_counter_pools pool_list; /* Counter pool list. */ + struct mlx5_flow_counter_pool **pools; /* Counter pool array. */ + struct mlx5_counter_stats_mem_mng *mem_mng; + /* Hold the memory management for the next allocated pools raws. */ +}; + +/* Counter global management structure. */ +struct mlx5_flow_counter_mng { + struct mlx5_pools_container ccont[MLX5_CCONT_TYPE_MAX]; + struct mlx5_counters flow_counters; /* Legacy flow counter list. */ + uint8_t pending_queries; + uint8_t batch; + uint16_t pool_index; + uint8_t age; + uint8_t query_thread_on; + LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs; + LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws; +}; + +#define MLX5_AGE_EVENT_NEW 1 +#define MLX5_AGE_TRIGGER 2 +#define MLX5_AGE_SET(age_info, BIT) \ + ((age_info)->flags |= (1 << (BIT))) +#define MLX5_AGE_GET(age_info, BIT) \ + ((age_info)->flags & (1 << (BIT))) +#define GET_PORT_AGE_INFO(priv) \ + (&((priv)->sh->port[(priv)->ibv_port - 1].age_info)) + +/* Aging information for per port. */ +struct mlx5_age_info { + uint8_t flags; /*Indicate if is new event or need be trigered*/ + struct mlx5_counters aged_counters; /* Aged flow counter list. */ + rte_spinlock_t aged_sl; /* Aged flow counter list lock. */ +}; + +/* Per port data of shared IB device. */ +struct mlx5_ibv_shared_port { + uint32_t ih_port_id; + uint32_t devx_ih_port_id; + /* + * Interrupt handler port_id. Used by shared interrupt + * handler to find the corresponding rte_eth device + * by IB port index. If value is equal or greater + * RTE_MAX_ETHPORTS it means there is no subhandler + * installed for specified IB port index. + */ + struct mlx5_age_info age_info; + /* Aging information for per port. */ +}; + +/* Table key of the hash organization. */ +union mlx5_flow_tbl_key { + struct { + /* Table ID should be at the lowest address. */ + uint32_t table_id; /**< ID of the table. */ + uint16_t reserved; /**< must be zero for comparison. */ + uint8_t domain; /**< 1 - FDB, 0 - NIC TX/RX. */ + uint8_t direction; /**< 1 - egress, 0 - ingress. */ + }; + uint64_t v64; /**< full 64bits value of key */ +}; + +/* Table structure. */ +struct mlx5_flow_tbl_resource { + void *obj; /**< Pointer to DR table object. */ + rte_atomic32_t refcnt; /**< Reference counter. */ +}; + +#define MLX5_MAX_TABLES UINT16_MAX +#define MLX5_FLOW_TABLE_LEVEL_METER (UINT16_MAX - 3) +#define MLX5_FLOW_TABLE_LEVEL_SUFFIX (UINT16_MAX - 2) +#define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1) +/* Reserve the last two tables for metadata register copy. */ +#define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1) +#define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2) +/* Tables for metering splits should be added here. */ +#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3) +#define MLX5_MAX_TABLES_FDB UINT16_MAX + +#define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ +#define MLX5_DBR_SIZE 8 +#define MLX5_DBR_PER_PAGE (MLX5_DBR_PAGE_SIZE / MLX5_DBR_SIZE) +#define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / 64) + +struct mlx5_devx_dbr_page { + /* Door-bell records, must be first member in structure. */ + uint8_t dbrs[MLX5_DBR_PAGE_SIZE]; + LIST_ENTRY(mlx5_devx_dbr_page) next; /* Pointer to the next element. */ + struct mlx5dv_devx_umem *umem; + uint32_t dbr_count; /* Number of door-bell records in use. */ + /* 1 bit marks matching door-bell is in use. */ + uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE]; +}; + +/* ID generation structure. */ +struct mlx5_flow_id_pool { + uint32_t *free_arr; /**< Pointer to the a array of free values. */ + uint32_t base_index; + /**< The next index that can be used without any free elements. */ + uint32_t *curr; /**< Pointer to the index to pop. */ + uint32_t *last; /**< Pointer to the last element in the empty arrray. */ + uint32_t max_id; /**< Maximum id can be allocated from the pool. */ +}; + +/* + * Shared Infiniband device context for Master/Representors + * which belong to same IB device with multiple IB ports. + **/ +struct mlx5_ibv_shared { + LIST_ENTRY(mlx5_ibv_shared) next; + uint32_t refcnt; + uint32_t devx:1; /* Opened with DV. */ + uint32_t max_port; /* Maximal IB device port index. */ + struct ibv_context *ctx; /* Verbs/DV context. */ + struct ibv_pd *pd; /* Protection Domain. */ + uint32_t pdn; /* Protection Domain number. */ + uint32_t tdn; /* Transport Domain number. */ + char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */ + char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ + struct ibv_device_attr_ex device_attr; /* Device properties. */ + LIST_ENTRY(mlx5_ibv_shared) mem_event_cb; + /**< Called by memory event callback. */ + struct mlx5_mr_share_cache share_cache; + /* Shared DV/DR flow data section. */ + pthread_mutex_t dv_mutex; /* DV context mutex. */ + uint32_t dv_meta_mask; /* flow META metadata supported mask. */ + uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ + uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */ + uint32_t dv_refcnt; /* DV/DR data reference counter. */ + void *fdb_domain; /* FDB Direct Rules name space handle. */ + void *rx_domain; /* RX Direct Rules name space handle. */ + void *tx_domain; /* TX Direct Rules name space handle. */ + struct mlx5_hlist *flow_tbls; + /* Direct Rules tables for FDB, NIC TX+RX */ + void *esw_drop_action; /* Pointer to DR E-Switch drop action. */ + void *pop_vlan_action; /* Pointer to DR pop VLAN action. */ + uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */ + LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds; + struct mlx5_hlist *tag_table; + uint32_t port_id_action_list; /* List of port ID actions. */ + uint32_t push_vlan_action_list; /* List of push VLAN actions. */ + struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ + struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX]; + /* Memory Pool for mlx5 flow resources. */ + /* Shared interrupt handler section. */ + pthread_mutex_t intr_mutex; /* Interrupt config mutex. */ + uint32_t intr_cnt; /* Interrupt handler reference counter. */ + struct rte_intr_handle intr_handle; /* Interrupt handler for device. */ + uint32_t devx_intr_cnt; /* Devx interrupt handler reference counter. */ + struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */ + struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */ + struct mlx5_devx_obj *tis; /* TIS object. */ + struct mlx5_devx_obj *td; /* Transport domain. */ + struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */ + struct mlx5_ibv_shared_port port[]; /* per device port data array. */ +}; + +/* Per-process private structure. */ +struct mlx5_proc_priv { + size_t uar_table_sz; + /* Size of UAR register table. */ + void *uar_table[]; + /* Table of UAR registers for each process. */ +}; + +/* MTR profile list. */ +TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile); +/* MTR list. */ +TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter); + +#define MLX5_PROC_PRIV(port_id) \ + ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) + +struct mlx5_priv { + struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ + struct mlx5_ibv_shared *sh; /* Shared IB device context. */ + uint32_t ibv_port; /* IB device port number. */ + struct rte_pci_device *pci_dev; /* Backend PCI device. */ + struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ + BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); + /* Bit-field of MAC addresses owned by the PMD. */ + uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ + unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ + /* Device properties. */ + uint16_t mtu; /* Configured MTU. */ + unsigned int isolated:1; /* Whether isolated mode is enabled. */ + unsigned int representor:1; /* Device is a port representor. */ + unsigned int master:1; /* Device is a E-Switch master. */ + unsigned int dr_shared:1; /* DV/DR data is shared. */ + unsigned int counter_fallback:1; /* Use counter fallback management. */ + unsigned int mtr_en:1; /* Whether support meter. */ + unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ + uint16_t domain_id; /* Switch domain identifier. */ + uint16_t vport_id; /* Associated VF vport index (if any). */ + uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */ + uint32_t vport_meta_mask; /* Used for vport index field match mask. */ + int32_t representor_id; /* Port representor identifier. */ + int32_t pf_bond; /* >=0 means PF index in bonding configuration. */ + unsigned int if_index; /* Associated kernel network device index. */ + /* RX/TX queues. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ + struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ + struct mlx5_txq_data *(*txqs)[]; /* TX queues. */ + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ + struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ + unsigned int (*reta_idx)[]; /* RETA index table. */ + unsigned int reta_idx_n; /* RETA index size. */ + struct mlx5_drop drop_queue; /* Flow drop queues. */ + uint32_t flows; /* RTE Flow rules. */ + uint32_t ctrl_flows; /* Control flow rules. */ + void *inter_flows; /* Intermediate resources for flow creation. */ + void *rss_desc; /* Intermediate rss description resources. */ + int flow_idx; /* Intermediate device flow index. */ + int flow_nested_idx; /* Intermediate device flow index, nested. */ + LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ + LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ + uint32_t hrxqs; /* Verbs Hash Rx queues. */ + LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ + LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */ + /* Indirection tables. */ + LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls; + /* Pointer to next element. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + struct ibv_flow_action *verbs_action; + /**< Verbs modify header action object. */ + uint8_t ft_type; /**< Flow table type, Rx or Tx. */ + uint8_t max_lro_msg_size; + /* Tags resources cache. */ + uint32_t link_speed_capa; /* Link speed capabilities. */ + struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ + struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ + struct mlx5_dev_config config; /* Device configuration. */ + struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; + /* Context for Verbs allocator. */ + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ + LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */ + struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ + struct mlx5_flow_id_pool *qrss_id_pool; + struct mlx5_hlist *mreg_cp_tbl; + /* Hash table of Rx metadata register copy table. */ + uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ + uint8_t mtr_color_reg; /* Meter color match REG_C. */ + struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */ + struct mlx5_flow_meters flow_meters; /* MTR list. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ + rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; + /* UAR same-page access control required in 32bit implementations. */ +#endif + uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */ + uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */ + struct mlx5_mp_id mp_id; /* ID of a multi-process process */ + LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */ +}; + +#define PORT_ID(priv) ((priv)->dev_data->port_id) +#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) + +/* mlx5.c */ + +int mlx5_getenv_int(const char *); +int mlx5_proc_priv_init(struct rte_eth_dev *dev); +int64_t mlx5_get_dbr(struct rte_eth_dev *dev, + struct mlx5_devx_dbr_page **dbr_page); +int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, + uint64_t offset); +int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev); + +/* Macro to iterate over all valid ports for mlx5 driver. */ +#define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \ + for (port_id = mlx5_eth_find_next(0, pci_dev); \ + port_id < RTE_MAX_ETHPORTS; \ + port_id = mlx5_eth_find_next(port_id + 1, pci_dev)) + +/* mlx5_ethdev.c */ + +int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); +int mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]); +unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); +int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, + unsigned int flags); +int mlx5_dev_configure(struct rte_eth_dev *dev); +int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); +int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock); +int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size); +const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); +int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); +int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +void mlx5_dev_link_status_handler(void *arg); +void mlx5_dev_interrupt_handler(void *arg); +void mlx5_dev_interrupt_handler_devx(void *arg); +void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev); +int mlx5_set_link_down(struct rte_eth_dev *dev); +int mlx5_set_link_up(struct rte_eth_dev *dev); +int mlx5_is_removed(struct rte_eth_dev *dev); +eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); +struct mlx5_priv *mlx5_port_to_eswitch_info(uint16_t port, bool valid); +struct mlx5_priv *mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev); +int mlx5_sysfs_switch_info(unsigned int ifindex, + struct mlx5_switch_info *info); +void mlx5_sysfs_check_switch_info(bool device_dir, + struct mlx5_switch_info *switch_info); +void mlx5_translate_port_name(const char *port_name_in, + struct mlx5_switch_info *port_info_out); +void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, + rte_intr_callback_fn cb_fn, void *cb_arg); +int mlx5_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); +int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, + struct rte_eth_hairpin_cap *cap); +int mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev); + +/* mlx5_mac.c */ + +int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]); +void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, + uint32_t index, uint32_t vmdq); +struct mlx5_nl_vlan_vmwa_context *mlx5_vlan_vmwa_init + (struct rte_eth_dev *dev, uint32_t ifindex); +int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); +int mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +/* mlx5_rss.c */ + +int mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +/* mlx5_rxmode.c */ + +int mlx5_promiscuous_enable(struct rte_eth_dev *dev); +int mlx5_promiscuous_disable(struct rte_eth_dev *dev); +int mlx5_allmulticast_enable(struct rte_eth_dev *dev); +int mlx5_allmulticast_disable(struct rte_eth_dev *dev); + +/* mlx5_stats.c */ + +void mlx5_stats_init(struct rte_eth_dev *dev); +int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +int mlx5_stats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n); +int mlx5_xstats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); + +/* mlx5_vlan.c */ + +int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); +void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *ctx); +void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev, + struct mlx5_vf_vlan *vf_vlan); +void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev, + struct mlx5_vf_vlan *vf_vlan); + +/* mlx5_trigger.c */ + +int mlx5_dev_start(struct rte_eth_dev *dev); +void mlx5_dev_stop(struct rte_eth_dev *dev); +int mlx5_traffic_enable(struct rte_eth_dev *dev); +void mlx5_traffic_disable(struct rte_eth_dev *dev); +int mlx5_traffic_restart(struct rte_eth_dev *dev); + +/* mlx5_flow.c */ + +int mlx5_flow_discover_mreg_c(struct rte_eth_dev *eth_dev); +bool mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev); +int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); +void mlx5_flow_print(struct rte_flow *flow); +int mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active); +int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *error); +int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +int mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list); +void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list); +int mlx5_flow_start_default(struct rte_eth_dev *dev); +void mlx5_flow_stop_default(struct rte_eth_dev *dev); +void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev); +void mlx5_flow_free_intermediate(struct rte_eth_dev *dev); +int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev); +int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, + uint64_t async_id, int status); +void mlx5_set_query_alarm(struct mlx5_ibv_shared *sh); +void mlx5_flow_query_alarm(void *arg); +uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev); +void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt); +int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, + bool clear, uint64_t *pkts, uint64_t *bytes); +int mlx5_flow_dev_dump(struct rte_eth_dev *dev, FILE *file, + struct rte_flow_error *error); +void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev); +int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, + uint32_t nb_contexts, struct rte_flow_error *error); + +/* mlx5_mp.c */ +int mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer); +int mlx5_mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer); +void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev); +void mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev); + +/* mlx5_socket.c */ + +int mlx5_pmd_socket_init(void); + +/* mlx5_flow_meter.c */ + +int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg); +struct mlx5_flow_meter *mlx5_flow_meter_find(struct mlx5_priv *priv, + uint32_t meter_id); +struct mlx5_flow_meter *mlx5_flow_meter_attach + (struct mlx5_priv *priv, + uint32_t meter_id, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm); + +#endif /* RTE_PMD_MLX5_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h new file mode 100644 index 000000000..260f58429 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_defs.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_DEFS_H_ +#define RTE_PMD_MLX5_DEFS_H_ + +#include +#include + +#include "mlx5_autoconf.h" + +/* Reported driver name. */ +#define MLX5_DRIVER_NAME "net_mlx5" + +/* Maximum number of simultaneous VLAN filters. */ +#define MLX5_MAX_VLAN_IDS 128 + +/* + * Request TX completion every time descriptors reach this threshold since + * the previous request. Must be a power of two for performance reasons. + */ +#define MLX5_TX_COMP_THRESH 32u + +/* + * Request TX completion every time the total number of WQEBBs used for inlining + * packets exceeds the size of WQ divided by this divisor. Better to be power of + * two for performance. + */ +#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3) + +/* + * Maximal amount of normal completion CQEs + * processed in one call of tx_burst() routine. + */ +#define MLX5_TX_COMP_MAX_CQE 2u + + +/* Size of per-queue MR cache array for linear search. */ +#define MLX5_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX5_MR_BTREE_CACHE_N 256 + +/* + * If defined, only use software counters. The PMD will never ask the hardware + * for these, and many of them won't be available. + */ +#ifndef MLX5_PMD_SOFT_COUNTERS +#define MLX5_PMD_SOFT_COUNTERS 1 +#endif + +/* Switch port ID parameters for bonding configurations. */ +#define MLX5_PORT_ID_BONDING_PF_MASK 0xf +#define MLX5_PORT_ID_BONDING_PF_SHIFT 0xf + +/* Alarm timeout. */ +#define MLX5_ALARM_TIMEOUT_US 100000 + +/* Maximum number of extended statistics counters. */ +#define MLX5_MAX_XSTATS 32 + +/* Maximum Packet headers size (L2+L3+L4) for TSO. */ +#define MLX5_MAX_TSO_HEADER (128u + 34u) + +/* Inline data size required by NICs. */ +#define MLX5_INLINE_HSIZE_NONE 0 +#define MLX5_INLINE_HSIZE_L2 (sizeof(struct rte_ether_hdr) + \ + sizeof(struct rte_vlan_hdr)) +#define MLX5_INLINE_HSIZE_L3 (MLX5_INLINE_HSIZE_L2 + \ + sizeof(struct rte_ipv6_hdr)) +#define MLX5_INLINE_HSIZE_L4 (MLX5_INLINE_HSIZE_L3 + \ + sizeof(struct rte_tcp_hdr)) +#define MLX5_INLINE_HSIZE_INNER_L2 (MLX5_INLINE_HSIZE_L3 + \ + sizeof(struct rte_udp_hdr) + \ + sizeof(struct rte_vxlan_hdr) + \ + sizeof(struct rte_ether_hdr) + \ + sizeof(struct rte_vlan_hdr)) +#define MLX5_INLINE_HSIZE_INNER_L3 (MLX5_INLINE_HSIZE_INNER_L2 + \ + sizeof(struct rte_ipv6_hdr)) +#define MLX5_INLINE_HSIZE_INNER_L4 (MLX5_INLINE_HSIZE_INNER_L3 + \ + sizeof(struct rte_tcp_hdr)) + +/* Threshold of buffer replenishment for vectorized Rx. */ +#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \ + (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2)) + +/* Maximum size of burst for vectorized Rx. */ +#define MLX5_VPMD_RX_MAX_BURST 64U + +/* Recommended optimal burst size. */ +#define MLX5_RX_DEFAULT_BURST 64U +#define MLX5_TX_DEFAULT_BURST 64U + +/* Number of packets vectorized Rx can simultaneously process in a loop. */ +#define MLX5_VPMD_DESCS_PER_LOOP 4 + +/* Mask of RSS on source only or destination only. */ +#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \ + ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) + +/* Supported RSS */ +#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \ + MLX5_RSS_SRC_DST_ONLY)) + +/* Timeout in seconds to get a valid link status. */ +#define MLX5_LINK_STATUS_TIMEOUT 10 + +/* Number of times to retry retrieving the physical link information. */ +#define MLX5_GET_LINK_STATUS_RETRY_COUNT 3 + +/* Maximum number of UAR pages used by a port, + * These are the size and mask for an array of mutexes used to synchronize + * the access to port's UARs on platforms that do not support 64 bit writes. + * In such systems it is possible to issue the 64 bits DoorBells through two + * consecutive writes, each write 32 bits. The access to a UAR page (which can + * be accessible by all threads in the process) must be synchronized + * (for example, using a semaphore). Such a synchronization is not required + * when ringing DoorBells on different UAR pages. + * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared + * among the ports. + */ +#define MLX5_UAR_PAGE_NUM_MAX 64 +#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1) + +/* Fields of memory mapping type in offset parameter of mmap() */ +#define MLX5_UAR_MMAP_CMD_SHIFT 8 +#define MLX5_UAR_MMAP_CMD_MASK 0xff + +/* Environment variable to control the doorbell register mapping. */ +#define MLX5_SHUT_UP_BF "MLX5_SHUT_UP_BF" +#if defined(RTE_ARCH_ARM64) +#define MLX5_SHUT_UP_BF_DEFAULT "0" +#else +#define MLX5_SHUT_UP_BF_DEFAULT "1" +#endif + +#ifndef HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD +#define MLX5_MMAP_GET_NC_PAGES_CMD 3 +#endif + +/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */ +#define MLX5_MPRQ_STRIDE_NUM_N 6U + +/* Log 2 of the default size of a stride per WQE for Multi-Packet RQ. */ +#define MLX5_MPRQ_STRIDE_SIZE_N 11U + +/* Two-byte shift is disabled for Multi-Packet RQ. */ +#define MLX5_MPRQ_TWO_BYTE_SHIFT 0 + +/* + * Minimum size of packet to be memcpy'd instead of being attached as an + * external buffer. + */ +#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128 + +/* Minimum number Rx queues to enable Multi-Packet RQ. */ +#define MLX5_MPRQ_MIN_RXQS 12 + +/* Cache size of mempool for Multi-Packet RQ. */ +#define MLX5_MPRQ_MP_CACHE_SZ 32U + +/* MLX5_DV_XMETA_EN supported values. */ +#define MLX5_XMETA_MODE_LEGACY 0 +#define MLX5_XMETA_MODE_META16 1 +#define MLX5_XMETA_MODE_META32 2 + +/* MLX5_TX_DB_NC supported values. */ +#define MLX5_TXDB_CACHED 0 +#define MLX5_TXDB_NCACHED 1 +#define MLX5_TXDB_HEURISTIC 2 + +/* Size of the simple hash table for metadata register table. */ +#define MLX5_FLOW_MREG_HTABLE_SZ 4096 +#define MLX5_FLOW_MREG_HNAME "MARK_COPY_TABLE" +#define MLX5_DEFAULT_COPY_ID UINT32_MAX + +/* Hairpin TX/RX queue configuration parameters. */ +#define MLX5_HAIRPIN_QUEUE_STRIDE 6 +#define MLX5_HAIRPIN_JUMBO_LOG_SIZE (14 + 2) + +/* Definition of static_assert found in /usr/include/assert.h */ +#ifndef HAVE_STATIC_ASSERT +#define static_assert _Static_assert +#endif + +#endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c new file mode 100644 index 000000000..47f11b963 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_ethdev.c @@ -0,0 +1,2071 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/* Supported speed values found in /usr/include/linux/ethtool.h */ +#ifndef HAVE_SUPPORTED_40000baseKR4_Full +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#endif +#ifndef HAVE_SUPPORTED_40000baseCR4_Full +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#endif +#ifndef HAVE_SUPPORTED_40000baseSR4_Full +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#endif +#ifndef HAVE_SUPPORTED_40000baseLR4_Full +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#endif +#ifndef HAVE_SUPPORTED_56000baseKR4_Full +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#endif +#ifndef HAVE_SUPPORTED_56000baseCR4_Full +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#endif +#ifndef HAVE_SUPPORTED_56000baseSR4_Full +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#endif +#ifndef HAVE_SUPPORTED_56000baseLR4_Full +#define SUPPORTED_56000baseLR4_Full (1 << 30) +#endif + +/* Add defines in case the running kernel is not the same as user headers. */ +#ifndef ETHTOOL_GLINKSETTINGS +struct ethtool_link_settings { + uint32_t cmd; + uint32_t speed; + uint8_t duplex; + uint8_t port; + uint8_t phy_address; + uint8_t autoneg; + uint8_t mdio_support; + uint8_t eth_to_mdix; + uint8_t eth_tp_mdix_ctrl; + int8_t link_mode_masks_nwords; + uint32_t reserved[8]; + uint32_t link_mode_masks[]; +}; + +/* The kernel values can be found in /include/uapi/linux/ethtool.h */ +#define ETHTOOL_GLINKSETTINGS 0x0000004c +#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 +#define ETHTOOL_LINK_MODE_Autoneg_BIT 6 +#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 +#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 +#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 +#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 +#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 +#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 +#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 +#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 +#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 +#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 +#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 +#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 +#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 +#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 +#endif +#ifndef HAVE_ETHTOOL_LINK_MODE_25G +#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 +#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 +#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 +#endif +#ifndef HAVE_ETHTOOL_LINK_MODE_50G +#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 +#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 +#endif +#ifndef HAVE_ETHTOOL_LINK_MODE_100G +#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 +#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 +#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 +#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 +#endif +#ifndef HAVE_ETHTOOL_LINK_MODE_200G +#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 +#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 +#define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 0 /* 64 - 64 */ +#define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 1 /* 65 - 64 */ +#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 2 /* 66 - 64 */ +#endif + +/** + * Get master interface name from private structure. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] ifname + * Interface name output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]) +{ + DIR *dir; + struct dirent *dent; + unsigned int dev_type = 0; + unsigned int dev_port_prev = ~0u; + char match[IF_NAMESIZE] = ""; + + MLX5_ASSERT(ibdev_path); + { + MKSTR(path, "%s/device/net", ibdev_path); + + dir = opendir(path); + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } + } + while ((dent = readdir(dir)) != NULL) { + char *name = dent->d_name; + FILE *file; + unsigned int dev_port; + int r; + + if ((name[0] == '.') && + ((name[1] == '\0') || + ((name[1] == '.') && (name[2] == '\0')))) + continue; + + MKSTR(path, "%s/device/net/%s/%s", + ibdev_path, name, + (dev_type ? "dev_id" : "dev_port")); + + file = fopen(path, "rb"); + if (file == NULL) { + if (errno != ENOENT) + continue; + /* + * Switch to dev_id when dev_port does not exist as + * is the case with Linux kernel versions < 3.15. + */ +try_dev_id: + match[0] = '\0'; + if (dev_type) + break; + dev_type = 1; + dev_port_prev = ~0u; + rewinddir(dir); + continue; + } + r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); + fclose(file); + if (r != 1) + continue; + /* + * Switch to dev_id when dev_port returns the same value for + * all ports. May happen when using a MOFED release older than + * 3.0 with a Linux kernel >= 3.15. + */ + if (dev_port == dev_port_prev) + goto try_dev_id; + dev_port_prev = dev_port; + if (dev_port == 0) + strlcpy(match, name, sizeof(match)); + } + closedir(dir); + if (match[0] == '\0') { + rte_errno = ENOENT; + return -rte_errno; + } + strncpy(*ifname, match, sizeof(*ifname)); + return 0; +} + +/** + * Get interface name from private structure. + * + * This is a port representor-aware version of mlx5_get_master_ifname(). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] ifname + * Interface name output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int ifindex; + + MLX5_ASSERT(priv); + MLX5_ASSERT(priv->sh); + ifindex = mlx5_ifindex(dev); + if (!ifindex) { + if (!priv->representor) + return mlx5_get_master_ifname(priv->sh->ibdev_path, + ifname); + rte_errno = ENXIO; + return -rte_errno; + } + if (if_indextoname(ifindex, &(*ifname)[0])) + return 0; + rte_errno = errno; + return -rte_errno; +} + +/** + * Get the interface index from device name. + * + * @param[in] dev + * Pointer to Ethernet device. + * + * @return + * Nonzero interface index on success, zero otherwise and rte_errno is set. + */ +unsigned int +mlx5_ifindex(const struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int ifindex; + + MLX5_ASSERT(priv); + MLX5_ASSERT(priv->if_index); + ifindex = priv->if_index; + if (!ifindex) + rte_errno = ENXIO; + return ifindex; +} + +/** + * Perform ifreq ioctl() on associated Ethernet device. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param req + * Request number to pass to ioctl(). + * @param[out] ifr + * Interface request structure output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) +{ + int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + int ret = 0; + + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (ret) + goto error; + ret = ioctl(sock, req, ifr); + if (ret == -1) { + rte_errno = errno; + goto error; + } + close(sock); + return 0; +error: + close(sock); + return -rte_errno; +} + +/** + * Get device MTU. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] mtu + * MTU value output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) +{ + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); + + if (ret) + return ret; + *mtu = request.ifr_mtu; + return 0; +} + +/** + * Set device MTU. + * + * @param dev + * Pointer to Ethernet device. + * @param mtu + * MTU value to set. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ifreq request = { .ifr_mtu = mtu, }; + + return mlx5_ifreq(dev, SIOCSIFMTU, &request); +} + +/** + * Set device flags. + * + * @param dev + * Pointer to Ethernet device. + * @param keep + * Bitmask for flags that must remain untouched. + * @param flags + * Bitmask for flags to modify. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) +{ + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); + + if (ret) + return ret; + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_configure(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int txqs_n = dev->data->nb_tx_queues; + const uint8_t use_app_rss_key = + !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + int ret = 0; + + if (use_app_rss_key && + (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != + MLX5_RSS_HASH_KEY_LEN)) { + DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", + dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); + rte_errno = EINVAL; + return -rte_errno; + } + priv->rss_conf.rss_key = + rte_realloc(priv->rss_conf.rss_key, + MLX5_RSS_HASH_KEY_LEN, 0); + if (!priv->rss_conf.rss_key) { + DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", + dev->data->port_id, rxqs_n); + rte_errno = ENOMEM; + return -rte_errno; + } + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + memcpy(priv->rss_conf.rss_key, + use_app_rss_key ? + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : + rss_hash_default_key, + MLX5_RSS_HASH_KEY_LEN); + priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; + priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + priv->rxqs = (void *)dev->data->rx_queues; + priv->txqs = (void *)dev->data->tx_queues; + if (txqs_n != priv->txqs_n) { + DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", + dev->data->port_id, priv->txqs_n, txqs_n); + priv->txqs_n = txqs_n; + } + if (rxqs_n > priv->config.ind_table_max_size) { + DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", + dev->data->port_id, rxqs_n); + rte_errno = EINVAL; + return -rte_errno; + } + if (rxqs_n != priv->rxqs_n) { + DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", + dev->data->port_id, priv->rxqs_n, rxqs_n); + priv->rxqs_n = rxqs_n; + } + priv->skip_default_rss_reta = 0; + ret = mlx5_proc_priv_init(dev); + if (ret) + return ret; + return 0; +} + +/** + * Configure default RSS reta. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int i; + unsigned int j; + unsigned int reta_idx_n; + int ret = 0; + unsigned int *rss_queue_arr = NULL; + unsigned int rss_queue_n = 0; + + if (priv->skip_default_rss_reta) + return ret; + rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0); + if (!rss_queue_arr) { + DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)", + dev->data->port_id, rxqs_n); + rte_errno = ENOMEM; + return -rte_errno; + } + for (i = 0, j = 0; i < rxqs_n; i++) { + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + + rxq_data = (*priv->rxqs)[i]; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) + rss_queue_arr[j++] = i; + } + rss_queue_n = j; + if (rss_queue_n > priv->config.ind_table_max_size) { + DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", + dev->data->port_id, rss_queue_n); + rte_errno = EINVAL; + rte_free(rss_queue_arr); + return -rte_errno; + } + DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", + dev->data->port_id, priv->rxqs_n, rxqs_n); + priv->rxqs_n = rxqs_n; + /* + * If the requested number of RX queues is not a power of two, + * use the maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. + */ + reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ? + priv->config.ind_table_max_size : + rss_queue_n)); + ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); + if (ret) { + rte_free(rss_queue_arr); + return ret; + } + /* + * When the number of RX queues is not a power of two, + * the remaining table entries are padded with reused WQs + * and hashes are not spread uniformly. + */ + for (i = 0, j = 0; (i != reta_idx_n); ++i) { + (*priv->reta_idx)[i] = rss_queue_arr[j]; + if (++j == rss_queue_n) + j = 0; + } + rte_free(rss_queue_arr); + return ret; +} + +/** + * Sets default tuning parameters. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] info + * Info structure output buffer. + */ +static void +mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + /* Minimum CPU utilization. */ + info->default_rxportconf.ring_size = 256; + info->default_txportconf.ring_size = 256; + info->default_rxportconf.burst_size = MLX5_RX_DEFAULT_BURST; + info->default_txportconf.burst_size = MLX5_TX_DEFAULT_BURST; + if ((priv->link_speed_capa & ETH_LINK_SPEED_200G) | + (priv->link_speed_capa & ETH_LINK_SPEED_100G)) { + info->default_rxportconf.nb_queues = 16; + info->default_txportconf.nb_queues = 16; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 2048; + info->default_txportconf.ring_size = 2048; + } + } else { + info->default_rxportconf.nb_queues = 8; + info->default_txportconf.nb_queues = 8; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 4096; + info->default_txportconf.ring_size = 4096; + } + } +} + +/** + * Sets tx mbuf limiting parameters. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] info + * Info structure output buffer. + */ +static void +mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + unsigned int inlen; + uint16_t nb_max; + + inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ? + MLX5_SEND_DEF_INLINE_LEN : + (unsigned int)config->txq_inline_max; + MLX5_ASSERT(config->txq_inline_min >= 0); + inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min); + inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE * 2); + nb_max = (MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE - + inlen) / MLX5_WSEG_SIZE; + info->tx_desc_lim.nb_seg_max = nb_max; + info->tx_desc_lim.nb_mtu_seg_max = nb_max; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +int +mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + unsigned int max; + + /* FIXME: we should ask the device for these values. */ + info->min_rx_bufsize = 32; + info->max_rx_pktlen = 65536; + info->max_lro_pkt_size = MLX5_MAX_LRO_SIZE; + /* + * Since we need one CQ per QP, the limit is the minimum number + * between the two values. + */ + max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq, + priv->sh->device_attr.orig_attr.max_qp); + /* max_rx_queues is uint16_t. */ + max = RTE_MIN(max, (unsigned int)UINT16_MAX); + info->max_rx_queues = max; + info->max_tx_queues = max; + info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | + info->rx_queue_offload_capa); + info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); + info->if_index = mlx5_ifindex(dev); + info->reta_size = priv->reta_idx_n ? + priv->reta_idx_n : config->ind_table_max_size; + info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; + info->speed_capa = priv->link_speed_capa; + info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; + mlx5_set_default_params(dev, info); + mlx5_set_txlimit_params(dev, info); + info->switch_info.name = dev->data->name; + info->switch_info.domain_id = priv->domain_id; + info->switch_info.port_id = priv->representor_id; + if (priv->representor) { + uint16_t port_id; + + if (priv->pf_bond >= 0) { + /* + * Switch port ID is opaque value with driver defined + * format. Push the PF index in bonding configurations + * in upper four bits of port ID. If we get too many + * representors (more than 4K) or PFs (more than 15) + * this approach must be reconsidered. + */ + if ((info->switch_info.port_id >> + MLX5_PORT_ID_BONDING_PF_SHIFT) || + priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) { + DRV_LOG(ERR, "can't update switch port ID" + " for bonding device"); + MLX5_ASSERT(false); + return -ENODEV; + } + info->switch_info.port_id |= + priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT; + } + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + + if (!opriv || + opriv->representor || + opriv->sh != priv->sh || + opriv->domain_id != priv->domain_id) + continue; + /* + * Override switch name with that of the master + * device. + */ + info->switch_info.name = opriv->dev_data->name; + break; + } + } + return 0; +} + +/** + * Get device current raw clock counter + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] time + * Current raw clock counter of the device. + * + * @return + * 0 if the clock has correctly been read + * The value of errno in case of error + */ +int +mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_values_ex values; + int err = 0; + + values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; + err = mlx5_glue->query_rt_values_ex(ctx, &values); + if (err != 0) { + DRV_LOG(WARNING, "Could not query the clock !"); + return err; + } + *clock = values.raw_clock.tv_nsec; + return 0; +} + +/** + * Get firmware version of a device. + * + * @param dev + * Ethernet device port. + * @param fw_ver + * String output allocated by caller. + * @param fw_size + * Size of the output string, including terminating null byte. + * + * @return + * 0 on success, or the size of the non truncated string if too big. + */ +int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr; + size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1; + + if (fw_size < size) + return size; + if (fw_ver != NULL) + strlcpy(fw_ver, attr->fw_ver, fw_size); + return 0; +} + +/** + * Get supported packet types. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * A pointer to the supported Packet types array. + */ +const uint32_t * +mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == mlx5_rx_burst || + dev->rx_pkt_burst == mlx5_rx_burst_mprq || + dev->rx_pkt_burst == mlx5_rx_burst_vec) + return ptypes; + return NULL; +} + +/** + * Retrieve the master device for representor in the same switch domain. + * + * @param dev + * Pointer to representor Ethernet device structure. + * + * @return + * Master device structure on success, NULL otherwise. + */ + +static struct rte_eth_dev * +mlx5_find_master_dev(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv; + uint16_t port_id; + uint16_t domain_id; + + priv = dev->data->dev_private; + domain_id = priv->domain_id; + MLX5_ASSERT(priv->representor); + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + if (opriv && + opriv->master && + opriv->domain_id == domain_id && + opriv->sh == priv->sh) + return &rte_eth_devices[port_id]; + } + return NULL; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata = { + .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ + }; + struct ifreq ifr; + struct rte_eth_link dev_link; + int link_speed = 0; + int ret; + + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + dev_link = (struct rte_eth_link) { + .link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)), + }; + ifr = (struct ifreq) { + .ifr_data = (void *)&edata, + }; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + if (ret == -ENOTSUP && priv->representor) { + struct rte_eth_dev *master; + + /* + * For representors we can try to inherit link + * settings from the master device. Actually + * link settings do not make a lot of sense + * for representors due to missing physical + * link. The old kernel drivers supported + * emulated settings query for representors, + * the new ones do not, so we have to add + * this code for compatibility issues. + */ + master = mlx5_find_master_dev(dev); + if (master) { + ifr = (struct ifreq) { + .ifr_data = (void *)&edata, + }; + ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); + } + } + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL," + " ETHTOOL_GSET) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + } + link_speed = ethtool_cmd_speed(&edata); + if (link_speed == -1) + dev_link.link_speed = ETH_SPEED_NUM_NONE; + else + dev_link.link_speed = link_speed; + priv->link_speed_capa = 0; + if (edata.supported & SUPPORTED_Autoneg) + priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; + if (edata.supported & (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseKX_Full)) + priv->link_speed_capa |= ETH_LINK_SPEED_1G; + if (edata.supported & SUPPORTED_10000baseKR_Full) + priv->link_speed_capa |= ETH_LINK_SPEED_10G; + if (edata.supported & (SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full)) + priv->link_speed_capa |= ETH_LINK_SPEED_40G; + dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + if (((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status))) { + rte_errno = EAGAIN; + return -rte_errno; + } + *link = dev_link; + return 0; +} + +/** + * Retrieve physical link information (unlocked version using new ioctl). + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, + struct rte_eth_link *link) + +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; + struct ifreq ifr; + struct rte_eth_link dev_link; + struct rte_eth_dev *master = NULL; + uint64_t sc; + int ret; + + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + dev_link = (struct rte_eth_link) { + .link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)), + }; + ifr = (struct ifreq) { + .ifr_data = (void *)&gcmd, + }; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + if (ret == -ENOTSUP && priv->representor) { + /* + * For representors we can try to inherit link + * settings from the master device. Actually + * link settings do not make a lot of sense + * for representors due to missing physical + * link. The old kernel drivers supported + * emulated settings query for representors, + * the new ones do not, so we have to add + * this code for compatibility issues. + */ + master = mlx5_find_master_dev(dev); + if (master) { + ifr = (struct ifreq) { + .ifr_data = (void *)&gcmd, + }; + ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); + } + } + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL," + " ETHTOOL_GLINKSETTINGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + + } + gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; + + alignas(struct ethtool_link_settings) + uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + + sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; + struct ethtool_link_settings *ecmd = (void *)data; + + *ecmd = gcmd; + ifr.ifr_data = (void *)ecmd; + ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL," + "ETHTOOL_GLINKSETTINGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? ETH_SPEED_NUM_NONE : + ecmd->speed; + sc = ecmd->link_mode_masks[0] | + ((uint64_t)ecmd->link_mode_masks[1] << 32); + priv->link_speed_capa = 0; + if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) + priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_1G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_10G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_20G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_40G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_56G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_25G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_50G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_100G; + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_200G; + + sc = ecmd->link_mode_masks[2] | + ((uint64_t)ecmd->link_mode_masks[3] << 32); + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT) | + MLX5_BITSHIFT( + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT))) + priv->link_speed_capa |= ETH_LINK_SPEED_200G; + dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + if (((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status))) { + rte_errno = EAGAIN; + return -rte_errno; + } + *link = dev_link; + return 0; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion. + * + * @return + * 0 if link status was not updated, positive if it was, a negative errno + * value otherwise and rte_errno is set. + */ +int +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + int ret; + struct rte_eth_link dev_link; + time_t start_time = time(NULL); + int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; + + do { + ret = mlx5_link_update_unlocked_gs(dev, &dev_link); + if (ret == -ENOTSUP) + ret = mlx5_link_update_unlocked_gset(dev, &dev_link); + if (ret == 0) + break; + /* Handle wait to complete situation. */ + if ((wait_to_complete || retry) && ret == -EAGAIN) { + if (abs((int)difftime(time(NULL), start_time)) < + MLX5_LINK_STATUS_TIMEOUT) { + usleep(0); + continue; + } else { + rte_errno = EBUSY; + return -rte_errno; + } + } else if (ret < 0) { + return ret; + } + } while (wait_to_complete || retry-- > 0); + ret = !!memcmp(&dev->data->dev_link, &dev_link, + sizeof(struct rte_eth_link)); + dev->data->dev_link = dev_link; + return ret; +} + +/** + * DPDK callback to change the MTU. + * + * @param dev + * Pointer to Ethernet device structure. + * @param in_mtu + * New MTU. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t kern_mtu = 0; + int ret; + + ret = mlx5_get_mtu(dev, &kern_mtu); + if (ret) + return ret; + /* Set kernel interface MTU first. */ + ret = mlx5_set_mtu(dev, mtu); + if (ret) + return ret; + ret = mlx5_get_mtu(dev, &kern_mtu); + if (ret) + return ret; + if (kern_mtu == mtu) { + priv->mtu = mtu; + DRV_LOG(DEBUG, "port %u adapter MTU set to %u", + dev->data->port_id, mtu); + return 0; + } + rte_errno = EAGAIN; + return -rte_errno; +} + +/** + * DPDK callback to get flow control status. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] fc_conf + * Flow control output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_GPAUSEPARAM + }; + int ret; + + ifr.ifr_data = (void *)ðpause; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + fc_conf->autoneg = ethpause.autoneg; + if (ethpause.rx_pause && ethpause.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (ethpause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (ethpause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + return 0; +} + +/** + * DPDK callback to modify flow control parameters. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] fc_conf + * Flow control parameters. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_SPAUSEPARAM + }; + int ret; + + ifr.ifr_data = (void *)ðpause; + ethpause.autoneg = fc_conf->autoneg; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + ethpause.rx_pause = 1; + else + ethpause.rx_pause = 0; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + ethpause.tx_pause = 1; + else + ethpause.tx_pause = 0; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + return 0; +} + +/** + * Handle asynchronous removal event for entire multiport device. + * + * @param sh + * Infiniband device shared context. + */ +static void +mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh) +{ + uint32_t i; + + for (i = 0; i < sh->max_port; ++i) { + struct rte_eth_dev *dev; + + if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { + /* + * Or not existing port either no + * handler installed for this port. + */ + continue; + } + dev = &rte_eth_devices[sh->port[i].ih_port_id]; + MLX5_ASSERT(dev); + if (dev->data->dev_conf.intr_conf.rmv) + _rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_RMV, NULL); + } +} + +/** + * Handle shared asynchronous events the NIC (removal event + * and link status change). Supports multiport IB device. + * + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler(void *cb_arg) +{ + struct mlx5_ibv_shared *sh = cb_arg; + struct ibv_async_event event; + + /* Read all message from the IB device and acknowledge them. */ + for (;;) { + struct rte_eth_dev *dev; + uint32_t tmp; + + if (mlx5_glue->get_async_event(sh->ctx, &event)) + break; + /* Retrieve and check IB port index. */ + tmp = (uint32_t)event.element.port_num; + if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { + /* + * The DEVICE_FATAL event is called once for + * entire device without port specifying. + * We should notify all existing ports. + */ + mlx5_glue->ack_async_event(&event); + mlx5_dev_interrupt_device_fatal(sh); + continue; + } + MLX5_ASSERT(tmp && (tmp <= sh->max_port)); + if (!tmp) { + /* Unsupported devive level event. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "unsupported common event (type %d)", + event.event_type); + continue; + } + if (tmp > sh->max_port) { + /* Invalid IB port index. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "cannot handle an event (type %d)" + "due to invalid IB port index (%u)", + event.event_type, tmp); + continue; + } + if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { + /* No handler installed. */ + mlx5_glue->ack_async_event(&event); + DRV_LOG(DEBUG, + "cannot handle an event (type %d)" + "due to no handler installed for port %u", + event.event_type, tmp); + continue; + } + /* Retrieve ethernet device descriptor. */ + tmp = sh->port[tmp - 1].ih_port_id; + dev = &rte_eth_devices[tmp]; + MLX5_ASSERT(dev); + if ((event.event_type == IBV_EVENT_PORT_ACTIVE || + event.event_type == IBV_EVENT_PORT_ERR) && + dev->data->dev_conf.intr_conf.lsc) { + mlx5_glue->ack_async_event(&event); + if (mlx5_link_update(dev, 0) == -EAGAIN) { + usleep(0); + continue; + } + _rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_LSC, NULL); + continue; + } + DRV_LOG(DEBUG, + "port %u cannot handle an unknown event (type %d)", + dev->data->port_id, event.event_type); + mlx5_glue->ack_async_event(&event); + } +} + +/* + * Unregister callback handler safely. The handler may be active + * while we are trying to unregister it, in this case code -EAGAIN + * is returned by rte_intr_callback_unregister(). This routine checks + * the return code and tries to unregister handler again. + * + * @param handle + * interrupt handle + * @param cb_fn + * pointer to callback routine + * @cb_arg + * opaque callback parameter + */ +void +mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, + rte_intr_callback_fn cb_fn, void *cb_arg) +{ + /* + * Try to reduce timeout management overhead by not calling + * the timer related routines on the first iteration. If the + * unregistering succeeds on first call there will be no + * timer calls at all. + */ + uint64_t twait = 0; + uint64_t start = 0; + + do { + int ret; + + ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg); + if (ret >= 0) + return; + if (ret != -EAGAIN) { + DRV_LOG(INFO, "failed to unregister interrupt" + " handler (error: %d)", ret); + MLX5_ASSERT(false); + return; + } + if (twait) { + struct timespec onems; + + /* Wait one millisecond and try again. */ + onems.tv_sec = 0; + onems.tv_nsec = NS_PER_S / MS_PER_S; + nanosleep(&onems, 0); + /* Check whether one second elapsed. */ + if ((rte_get_timer_cycles() - start) <= twait) + continue; + } else { + /* + * We get the amount of timer ticks for one second. + * If this amount elapsed it means we spent one + * second in waiting. This branch is executed once + * on first iteration. + */ + twait = rte_get_timer_hz(); + MLX5_ASSERT(twait); + } + /* + * Timeout elapsed, show message (once a second) and retry. + * We have no other acceptable option here, if we ignore + * the unregistering return code the handler will not + * be unregistered, fd will be closed and we may get the + * crush. Hanging and messaging in the loop seems not to be + * the worst choice. + */ + DRV_LOG(INFO, "Retrying to unregister interrupt handler"); + start = rte_get_timer_cycles(); + } while (true); +} + +/** + * Handle DEVX interrupts from the NIC. + * This function is probably called from the DPDK host thread. + * + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler_devx(void *cb_arg) +{ +#ifndef HAVE_IBV_DEVX_ASYNC + (void)cb_arg; + return; +#else + struct mlx5_ibv_shared *sh = cb_arg; + union { + struct mlx5dv_devx_async_cmd_hdr cmd_resp; + uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) + + sizeof(struct mlx5dv_devx_async_cmd_hdr)]; + } out; + uint8_t *buf = out.buf + sizeof(out.cmd_resp); + + while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, + &out.cmd_resp, + sizeof(out.buf))) + mlx5_flow_async_pool_query_handle + (sh, (uint64_t)out.cmd_resp.wr_id, + mlx5_devx_get_out_command_status(buf)); +#endif /* HAVE_IBV_DEVX_ASYNC */ +} + +/** + * Uninstall shared asynchronous device events handler. + * This function is implemented to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + MLX5_ASSERT(priv->ibv_port); + MLX5_ASSERT(priv->ibv_port <= sh->max_port); + MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS) + goto exit; + MLX5_ASSERT(sh->port[priv->ibv_port - 1].ih_port_id == + (uint32_t)dev->data->port_id); + MLX5_ASSERT(sh->intr_cnt); + sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; + if (!sh->intr_cnt || --sh->intr_cnt) + goto exit; + mlx5_intr_callback_unregister(&sh->intr_handle, + mlx5_dev_interrupt_handler, sh); + sh->intr_handle.fd = 0; + sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Uninstall devx shared asynchronous device events handler. + * This function is implemeted to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + MLX5_ASSERT(priv->ibv_port); + MLX5_ASSERT(priv->ibv_port <= sh->max_port); + MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS) + goto exit; + MLX5_ASSERT(sh->port[priv->ibv_port - 1].devx_ih_port_id == + (uint32_t)dev->data->port_id); + sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; + if (!sh->devx_intr_cnt || --sh->devx_intr_cnt) + goto exit; + if (sh->intr_handle_devx.fd) { + rte_intr_callback_unregister(&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, + sh); + sh->intr_handle_devx.fd = 0; + sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN; + } + if (sh->devx_comp) { + mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); + sh->devx_comp = NULL; + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Install shared asynchronous device events handler. + * This function is implemented to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + int ret; + int flags; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + MLX5_ASSERT(priv->ibv_port); + MLX5_ASSERT(priv->ibv_port <= sh->max_port); + MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) { + /* The handler is already installed for this port. */ + MLX5_ASSERT(sh->intr_cnt); + goto exit; + } + if (sh->intr_cnt) { + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; + sh->intr_cnt++; + goto exit; + } + /* No shared handler installed. */ + MLX5_ASSERT(sh->ctx->async_fd > 0); + flags = fcntl(sh->ctx->async_fd, F_GETFL); + ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { + DRV_LOG(INFO, "failed to change file descriptor async event" + " queue"); + /* Indicate there will be no interrupts. */ + dev->data->dev_conf.intr_conf.lsc = 0; + dev->data->dev_conf.intr_conf.rmv = 0; + } else { + sh->intr_handle.fd = sh->ctx->async_fd; + sh->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&sh->intr_handle, + mlx5_dev_interrupt_handler, sh); + sh->intr_cnt++; + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Install devx shared asyncronous device events handler. + * This function is implemeted to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + MLX5_ASSERT(priv->ibv_port); + MLX5_ASSERT(priv->ibv_port <= sh->max_port); + MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) { + /* The handler is already installed for this port. */ + MLX5_ASSERT(sh->devx_intr_cnt); + goto exit; + } + if (sh->devx_intr_cnt) { + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; + goto exit; + } + if (priv->config.devx) { +#ifndef HAVE_IBV_DEVX_ASYNC + goto exit; +#else + sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); + if (sh->devx_comp) { + int flags = fcntl(sh->devx_comp->fd, F_GETFL); + int ret = fcntl(sh->devx_comp->fd, F_SETFL, + flags | O_NONBLOCK); + + if (ret) { + DRV_LOG(INFO, "failed to change file descriptor" + " devx async event queue"); + } else { + sh->intr_handle_devx.fd = sh->devx_comp->fd; + sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register + (&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, sh); + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; + } + } +#endif /* HAVE_IBV_DEVX_ASYNC */ + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Uninstall interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_uninstall(dev); +} + +/** + * Install interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_install(dev); +} + +/** + * Devx uninstall interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_uninstall(dev); +} + +/** + * Devx install interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_install(dev); +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_set_link_down(struct rte_eth_dev *dev) +{ + return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_set_link_up(struct rte_eth_dev *dev) +{ + return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); +} + +/** + * Configure the RX function to use. + * + * @param dev + * Pointer to private data structure. + * + * @return + * Pointer to selected Rx burst function. + */ +eth_rx_burst_t +mlx5_select_rx_function(struct rte_eth_dev *dev) +{ + eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; + + MLX5_ASSERT(dev != NULL); + if (mlx5_check_vec_rx_support(dev) > 0) { + rx_pkt_burst = mlx5_rx_burst_vec; + DRV_LOG(DEBUG, "port %u selected Rx vectorized function", + dev->data->port_id); + } else if (mlx5_mprq_enabled(dev)) { + rx_pkt_burst = mlx5_rx_burst_mprq; + } + return rx_pkt_burst; +} + +/** + * Check if mlx5 device was removed. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 1 when device is removed, otherwise 0. + */ +int +mlx5_is_removed(struct rte_eth_dev *dev) +{ + struct ibv_device_attr device_attr; + struct mlx5_priv *priv = dev->data->dev_private; + + if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO) + return 1; + return 0; +} + +/** + * Get the E-Switch parameters by port id. + * + * @param[in] port + * Device port id. + * @param[in] valid + * Device port id is valid, skip check. This flag is useful + * when trials are performed from probing and device is not + * flagged as valid yet (in attaching process). + * @param[out] es_domain_id + * E-Switch domain id. + * @param[out] es_port_id + * The port id of the port in the E-Switch. + * + * @return + * pointer to device private data structure containing data needed + * on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_priv * +mlx5_port_to_eswitch_info(uint16_t port, bool valid) +{ + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + + if (port >= RTE_MAX_ETHPORTS) { + rte_errno = EINVAL; + return NULL; + } + if (!valid && !rte_eth_dev_is_valid_port(port)) { + rte_errno = ENODEV; + return NULL; + } + dev = &rte_eth_devices[port]; + priv = dev->data->dev_private; + if (!(priv->representor || priv->master)) { + rte_errno = EINVAL; + return NULL; + } + return priv; +} + +/** + * Get the E-Switch parameters by device instance. + * + * @param[in] port + * Device port id. + * @param[out] es_domain_id + * E-Switch domain id. + * @param[out] es_port_id + * The port id of the port in the E-Switch. + * + * @return + * pointer to device private data structure containing data needed + * on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_priv * +mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv; + + priv = dev->data->dev_private; + if (!(priv->representor || priv->master)) { + rte_errno = EINVAL; + return NULL; + } + return priv; +} + +/** + * Get switch information associated with network interface. + * + * @param ifindex + * Network interface index. + * @param[out] info + * Switch information object, populated in case of success. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +{ + char ifname[IF_NAMESIZE]; + char port_name[IF_NAMESIZE]; + FILE *file; + struct mlx5_switch_info data = { + .master = 0, + .representor = 0, + .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, + .port_name = 0, + .switch_id = 0, + }; + DIR *dir; + bool port_switch_id_set = false; + bool device_dir = false; + char c; + int ret; + + if (!if_indextoname(ifindex, ifname)) { + rte_errno = errno; + return -rte_errno; + } + + MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", + ifname); + MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", + ifname); + MKSTR(pci_device, "/sys/class/net/%s/device", + ifname); + + file = fopen(phys_port_name, "rb"); + if (file != NULL) { + ret = fscanf(file, "%s", port_name); + fclose(file); + if (ret == 1) + mlx5_translate_port_name(port_name, &data); + } + file = fopen(phys_switch_id, "rb"); + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } + port_switch_id_set = + fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && + c == '\n'; + fclose(file); + dir = opendir(pci_device); + if (dir != NULL) { + closedir(dir); + device_dir = true; + } + if (port_switch_id_set) { + /* We have some E-Switch configuration. */ + mlx5_sysfs_check_switch_info(device_dir, &data); + } + *info = data; + MLX5_ASSERT(!(data.master && data.representor)); + if (data.master && data.representor) { + DRV_LOG(ERR, "ifindex %u device is recognized as master" + " and as representor", ifindex); + rte_errno = ENODEV; + return -rte_errno; + } + return 0; +} + +/** + * Analyze gathered port parameters via sysfs to recognize master + * and representor devices for E-Switch configuration. + * + * @param[in] device_dir + * flag of presence of "device" directory under port device key. + * @param[inout] switch_info + * Port information, including port name as a number and port name + * type if recognized + * + * @return + * master and representor flags are set in switch_info according to + * recognized parameters (if any). + */ +void +mlx5_sysfs_check_switch_info(bool device_dir, + struct mlx5_switch_info *switch_info) +{ + switch (switch_info->name_type) { + case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: + /* + * Name is not recognized, assume the master, + * check the device directory presence. + */ + switch_info->master = device_dir; + break; + case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: + /* + * Name is not set, this assumes the legacy naming + * schema for master, just check if there is + * a device directory. + */ + switch_info->master = device_dir; + break; + case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: + /* New uplink naming schema recognized. */ + switch_info->master = 1; + break; + case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: + /* Legacy representors naming schema. */ + switch_info->representor = !device_dir; + break; + case MLX5_PHYS_PORT_NAME_TYPE_PFVF: + /* New representors naming schema. */ + switch_info->representor = 1; + break; + } +} + +/** + * DPDK callback to retrieve plug-in module EEPROM information (type and size). + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] modinfo + * Storage for plug-in module EEPROM information. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ethtool_modinfo info = { + .cmd = ETHTOOL_GMODULEINFO, + }; + struct ifreq ifr = (struct ifreq) { + .ifr_data = (void *)&info, + }; + int ret = 0; + + if (!dev || !modinfo) { + DRV_LOG(WARNING, "missing argument, cannot get module info"); + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + modinfo->type = info.type; + modinfo->eeprom_len = info.eeprom_len; + return ret; +} + +/** + * DPDK callback to retrieve plug-in module EEPROM data. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Storage for plug-in module EEPROM data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ethtool_eeprom *eeprom; + struct ifreq ifr; + int ret = 0; + + if (!dev || !info) { + DRV_LOG(WARNING, "missing argument, cannot get module eeprom"); + rte_errno = EINVAL; + return -rte_errno; + } + eeprom = rte_calloc(__func__, 1, + (sizeof(struct ethtool_eeprom) + info->length), 0); + if (!eeprom) { + DRV_LOG(WARNING, "port %u cannot allocate memory for " + "eeprom data", dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } + eeprom->cmd = ETHTOOL_GMODULEEEPROM; + eeprom->offset = info->offset; + eeprom->len = info->length; + ifr = (struct ifreq) { + .ifr_data = (void *)eeprom, + }; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) + DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", + dev->data->port_id, strerror(rte_errno)); + else + rte_memcpy(info->data, eeprom->data, info->length); + rte_free(eeprom); + return ret; +} + +/** + * DPDK callback to retrieve hairpin capabilities. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] cap + * Storage for hairpin capability data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int mlx5_hairpin_cap_get(struct rte_eth_dev *dev, + struct rte_eth_hairpin_cap *cap) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->devx == 0) { + rte_errno = ENOTSUP; + return -rte_errno; + } + cap->max_nb_queues = UINT16_MAX; + cap->max_rx_2_tx = 1; + cap->max_tx_2_rx = 1; + cap->max_nb_desc = 8192; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c new file mode 100644 index 000000000..ae478a510 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -0,0 +1,6204 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_flow.h" +#include "mlx5_rxtx.h" + +/* Dev ops structure defined in mlx5.c */ +extern const struct eth_dev_ops mlx5_dev_ops; +extern const struct eth_dev_ops mlx5_dev_ops_isolate; + +/** Device flow drivers. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops; +#endif +extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; + +const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops; + +const struct mlx5_flow_driver_ops *flow_drv_ops[] = { + [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops, +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops, +#endif + [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops, + [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops +}; + +enum mlx5_expansion { + MLX5_EXPANSION_ROOT, + MLX5_EXPANSION_ROOT_OUTER, + MLX5_EXPANSION_ROOT_ETH_VLAN, + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_ETH_VLAN, + MLX5_EXPANSION_OUTER_VLAN, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_MPLS, + MLX5_EXPANSION_ETH, + MLX5_EXPANSION_ETH_VLAN, + MLX5_EXPANSION_VLAN, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP, + MLX5_EXPANSION_IPV6, + MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP, +}; + +/** Supported expansion of items. */ +static const struct rte_flow_expand_node mlx5_support_expansion[] = { + [MLX5_EXPANSION_ROOT] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_ROOT_OUTER] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH, + MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_ROOT_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN), + .type = RTE_FLOW_ITEM_TYPE_END, + }, + [MLX5_EXPANSION_OUTER_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6, + MLX5_EXPANSION_MPLS), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, + }, + [MLX5_EXPANSION_OUTER_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + .rss_types = 0, + }, + [MLX5_EXPANSION_OUTER_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4, + MLX5_EXPANSION_OUTER_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, + [MLX5_EXPANSION_OUTER_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV4_UDP, + MLX5_EXPANSION_OUTER_IPV4_TCP, + MLX5_EXPANSION_GRE, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV4_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_OUTER_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT + (MLX5_EXPANSION_OUTER_IPV6_UDP, + MLX5_EXPANSION_OUTER_IPV6_TCP, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_OUTER_IPV6_UDP] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN, + MLX5_EXPANSION_VXLAN_GPE), + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_OUTER_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + }, + [MLX5_EXPANSION_VXLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + }, + [MLX5_EXPANSION_VXLAN_GPE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, + MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + }, + [MLX5_EXPANSION_GRE] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4), + .type = RTE_FLOW_ITEM_TYPE_GRE, + }, + [MLX5_EXPANSION_MPLS] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_MPLS, + }, + [MLX5_EXPANSION_ETH] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_ETH_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN), + .type = RTE_FLOW_ITEM_TYPE_ETH, + }, + [MLX5_EXPANSION_VLAN] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, + MLX5_EXPANSION_IPV6), + .type = RTE_FLOW_ITEM_TYPE_VLAN, + }, + [MLX5_EXPANSION_IPV4] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, + MLX5_EXPANSION_IPV4_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER, + }, + [MLX5_EXPANSION_IPV4_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV4_UDP, + }, + [MLX5_EXPANSION_IPV4_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV4_TCP, + }, + [MLX5_EXPANSION_IPV6] = { + .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP, + MLX5_EXPANSION_IPV6_TCP), + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER, + }, + [MLX5_EXPANSION_IPV6_UDP] = { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .rss_types = ETH_RSS_NONFRAG_IPV6_UDP, + }, + [MLX5_EXPANSION_IPV6_TCP] = { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .rss_types = ETH_RSS_NONFRAG_IPV6_TCP, + }, +}; + +static const struct rte_flow_ops mlx5_flow_ops = { + .validate = mlx5_flow_validate, + .create = mlx5_flow_create, + .destroy = mlx5_flow_destroy, + .flush = mlx5_flow_flush, + .isolate = mlx5_flow_isolate, + .query = mlx5_flow_query, + .dev_dump = mlx5_flow_dev_dump, + .get_aged_flows = mlx5_flow_get_aged_flows, +}; + +/* Convert FDIR request to Generic flow. */ +struct mlx5_fdir { + struct rte_flow_attr attr; + struct rte_flow_item items[4]; + struct rte_flow_item_eth l2; + struct rte_flow_item_eth l2_mask; + union { + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + } l3; + union { + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + } l3_mask; + union { + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + } l4; + union { + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + } l4_mask; + struct rte_flow_action actions[2]; + struct rte_flow_action_queue queue; +}; + +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/* Tunnel information. */ +struct mlx5_flow_tunnel_info { + uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */ + uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ +}; + +static struct mlx5_flow_tunnel_info tunnels_info[] = { + { + .tunnel = MLX5_FLOW_LAYER_VXLAN, + .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GENEVE, + .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE, + .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GRE, + .ptype = RTE_PTYPE_TUNNEL_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP, + }, + { + .tunnel = MLX5_FLOW_LAYER_MPLS, + .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_NVGRE, + .ptype = RTE_PTYPE_TUNNEL_NVGRE, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPIP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, + { + .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP, + .ptype = RTE_PTYPE_TUNNEL_IP, + }, + { + .tunnel = MLX5_FLOW_LAYER_GTP, + .ptype = RTE_PTYPE_TUNNEL_GTPU, + }, +}; + +/** + * Translate tag ID to register. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] feature + * The feature that request the register. + * @param[in] id + * The request register ID. + * @param[out] error + * Error description in case of any. + * + * @return + * The request register on success, a negative errno + * value otherwise and rte_errno is set. + */ +int +mlx5_flow_get_reg_id(struct rte_eth_dev *dev, + enum mlx5_feature_name feature, + uint32_t id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + enum modify_reg start_reg; + bool skip_mtr_reg = false; + + switch (feature) { + case MLX5_HAIRPIN_RX: + return REG_B; + case MLX5_HAIRPIN_TX: + return REG_A; + case MLX5_METADATA_RX: + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_B; + case MLX5_XMETA_MODE_META16: + return REG_C_0; + case MLX5_XMETA_MODE_META32: + return REG_C_1; + } + break; + case MLX5_METADATA_TX: + return REG_A; + case MLX5_METADATA_FDB: + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_NONE; + case MLX5_XMETA_MODE_META16: + return REG_C_0; + case MLX5_XMETA_MODE_META32: + return REG_C_1; + } + break; + case MLX5_FLOW_MARK: + switch (config->dv_xmeta_en) { + case MLX5_XMETA_MODE_LEGACY: + return REG_NONE; + case MLX5_XMETA_MODE_META16: + return REG_C_1; + case MLX5_XMETA_MODE_META32: + return REG_C_0; + } + break; + case MLX5_MTR_SFX: + /* + * If meter color and flow match share one register, flow match + * should use the meter color register for match. + */ + if (priv->mtr_reg_share) + return priv->mtr_color_reg; + else + return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + REG_C_3; + case MLX5_MTR_COLOR: + MLX5_ASSERT(priv->mtr_color_reg != REG_NONE); + return priv->mtr_color_reg; + case MLX5_COPY_MARK: + /* + * Metadata COPY_MARK register using is in meter suffix sub + * flow while with meter. It's safe to share the same register. + */ + return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3; + case MLX5_APP_TAG: + /* + * If meter is enable, it will engage the register for color + * match and flow match. If meter color match is not using the + * REG_C_2, need to skip the REG_C_x be used by meter color + * match. + * If meter is disable, free to use all available registers. + */ + start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 : + (priv->mtr_reg_share ? REG_C_3 : REG_C_4); + skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2); + if (id > (REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); + if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unsupported tag id"); + /* + * This case means meter is using the REG_C_x great than 2. + * Take care not to conflict with meter color REG_C_x. + * If the available index REG_C_y >= REG_C_x, skip the + * color register. + */ + if (skip_mtr_reg && config->flow_mreg_c + [id + start_reg - REG_C_0] >= priv->mtr_color_reg) { + if (id >= (REG_C_7 - start_reg)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "invalid tag id"); + if (config->flow_mreg_c + [id + 1 + start_reg - REG_C_0] != REG_NONE) + return config->flow_mreg_c + [id + 1 + start_reg - REG_C_0]; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unsupported tag id"); + } + return config->flow_mreg_c[id + start_reg - REG_C_0]; + } + MLX5_ASSERT(false); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "invalid feature name"); +} + +/** + * Check extensive flow metadata register support. + * + * @param dev + * Pointer to rte_eth_dev structure. + * + * @return + * True if device supports extensive flow metadata register, otherwise false. + */ +bool +mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + + /* + * Having available reg_c can be regarded inclusively as supporting + * extensive flow metadata register, which could mean, + * - metadata register copy action by modify header. + * - 16 modify header actions is supported. + * - reg_c's are preserved across different domain (FDB and NIC) on + * packet loopback by flow lookup miss. + */ + return config->flow_mreg_c[2] != REG_NONE; +} + +/** + * Discover the maximum number of priority available. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * number of supported flow priority on success, a negative errno + * value otherwise and rte_errno is set. + */ +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + .port = (uint8_t)priv->ibv_port, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev); + uint16_t vprio[] = { 8, 16 }; + int i; + int priority = 0; + + if (!drop) { + rte_errno = ENOTSUP; + return -rte_errno; + } + for (i = 0; i != RTE_DIM(vprio); i++) { + flow_attr.attr.priority = vprio[i] - 1; + flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); + if (!flow) + break; + claim_zero(mlx5_glue->destroy_flow(flow)); + priority = vprio[i]; + } + mlx5_hrxq_drop_release(dev); + switch (priority) { + case 8: + priority = RTE_DIM(priority_map_3); + break; + case 16: + priority = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u verbs maximum priority: %d expected 8/16", + dev->data->port_id, priority); + return -rte_errno; + } + DRV_LOG(INFO, "port %u flow maximum priority: %d", + dev->data->port_id, priority); + return priority; +} + +/** + * Adjust flow priority based on the highest layer and the request priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. + */ +uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) +{ + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (priv->config.flow_prio) { + case RTE_DIM(priority_map_3): + res = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + res = priority_map_5[priority][subpriority]; + break; + } + return res; +} + +/** + * Verify the @p item specifications (spec, last, mask) are compatible with the + * NIC capabilities. + * + * @param[in] item + * Item specification. + * @param[in] mask + * @p item->mask or flow default bit-masks. + * @param[in] nic_mask + * Bit-masks covering supported fields by the NIC to compare with user mask. + * @param[in] size + * Bit-masks size in bytes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_item_acceptable(const struct rte_flow_item *item, + const uint8_t *mask, + const uint8_t *nic_mask, + unsigned int size, + struct rte_flow_error *error) +{ + unsigned int i; + + MLX5_ASSERT(nic_mask); + for (i = 0; i < size; ++i) + if ((nic_mask[i] | mask[i]) != nic_mask[i]) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "mask enables non supported" + " bits"); + if (!item->spec && (item->mask || item->last)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "mask/last without a spec is not" + " supported"); + if (item->spec && item->last) { + uint8_t spec[size]; + uint8_t last[size]; + unsigned int i; + int ret; + + for (i = 0; i < size; ++i) { + spec[i] = ((const uint8_t *)item->spec)[i] & mask[i]; + last[i] = ((const uint8_t *)item->last)[i] & mask[i]; + } + ret = memcmp(spec, last, size); + if (ret != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "range is not valid"); + } + return 0; +} + +/** + * Adjust the hash fields according to the @p flow information. + * + * @param[in] dev_flow. + * Pointer to the mlx5_flow. + * @param[in] tunnel + * 1 when the hash field is for a tunnel item. + * @param[in] layer_types + * ETH_RSS_* types. + * @param[in] hash_fields + * Item hash fields. + * + * @return + * The hash fields that should be used. + */ +uint64_t +mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, + int tunnel __rte_unused, uint64_t layer_types, + uint64_t hash_fields) +{ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + int rss_request_inner = rss_desc->level >= 2; + + /* Check RSS hash level for tunnel. */ + if (tunnel && rss_request_inner) + hash_fields |= IBV_RX_HASH_INNER; + else if (tunnel || rss_request_inner) + return 0; +#endif + /* Check if requested layer matches RSS hash fields. */ + if (!(rss_desc->types & layer_types)) + return 0; + return hash_fields; +} + +/** + * Lookup and set the ptype in the data Rx part. A single Ptype can be used, + * if several tunnel rules are used on this queue, the tunnel ptype will be + * cleared. + * + * @param rxq_ctrl + * Rx queue to update. + */ +static void +flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + unsigned int i; + uint32_t tunnel_ptype = 0; + + /* Look up for the ptype to use. */ + for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) { + if (!rxq_ctrl->flow_tunnels_n[i]) + continue; + if (!tunnel_ptype) { + tunnel_ptype = tunnels_info[i].ptype; + } else { + tunnel_ptype = 0; + break; + } + } + rxq_ctrl->rxq.tunnel = tunnel_ptype; +} + +/** + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * flow. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] dev_handle + * Pointer to device flow handle structure. + */ +static void +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; + unsigned int i; + + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + /* + * To support metadata register copy on Tx loopback, + * this must be always enabled (metadata may arive + * from other port - not from local flows only. + */ + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n++; + } + if (tunnel) { + unsigned int j; + + /* Increase the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & + dev_handle->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]++; + break; + } + } + flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } +} + +/** + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] flow + * Pointer to flow structure. + */ +static void +flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; + + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_set(dev, dev_handle); +} + +/** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * device flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] dev_handle + * Pointer to the device flow handle structure. + */ +static void +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, + struct mlx5_flow_handle *dev_handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const int mark = dev_handle->mark; + const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); + struct mlx5_hrxq *hrxq; + unsigned int i; + + if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE) + return; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + dev_handle->rix_hrxq); + if (!hrxq) + return; + MLX5_ASSERT(dev->data->dev_started); + for (i = 0; i != hrxq->ind_table->queues_n; ++i) { + int idx = hrxq->ind_table->queues[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, rxq); + + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { + rxq_ctrl->flow_mark_n--; + rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; + } + if (tunnel) { + unsigned int j; + + /* Decrease the counter matching the flow. */ + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { + if ((tunnels_info[j].tunnel & + dev_handle->layers) == + tunnels_info[j].tunnel) { + rxq_ctrl->flow_tunnels_n[j]--; + break; + } + } + flow_rxq_tunnel_ptype_update(rxq_ctrl); + } + } +} + +/** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. + */ +static void +flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; + + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + flow_drv_rxq_flags_trim(dev, dev_handle); +} + +/** + * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_rxq_flags_clear(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + unsigned int j; + + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], + struct mlx5_rxq_ctrl, rxq); + rxq_ctrl->flow_mark_n = 0; + rxq_ctrl->rxq.mark = 0; + for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) + rxq_ctrl->flow_tunnels_n[j] = 0; + rxq_ctrl->rxq.tunnel = 0; + } +} + +/** + * Set the Rx queue dynamic metadata (mask and offset) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + */ +void +mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *data; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + data = (*priv->rxqs)[i]; + if (!rte_flow_dynf_metadata_avail()) { + data->dynf_meta = 0; + data->flow_meta_mask = 0; + data->flow_meta_offset = -1; + } else { + data->dynf_meta = 1; + data->flow_meta_mask = rte_flow_dynf_metadata_mask; + data->flow_meta_offset = rte_flow_dynf_metadata_offs; + } + } +} + +/* + * return a pointer to the desired action in the list of actions. + * + * @param[in] actions + * The list of actions to search the action in. + * @param[in] action + * The action to find. + * + * @return + * Pointer to the action in the list, if found. NULL otherwise. + */ +const struct rte_flow_action * +mlx5_flow_find_action(const struct rte_flow_action *actions, + enum rte_flow_action_type action) +{ + if (actions == NULL) + return NULL; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) + if (actions->type == action) + return actions; + return NULL; +} + +/* + * Validate the flag action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_flag(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't mark and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 flag" + " actions in same flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "flag action not supported for " + "egress"); + return 0; +} + +/* + * Validate the mark action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_mark *mark = action->conf; + + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "configuration cannot be null"); + if (mark->id >= MLX5_FLOW_MARK_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id must in 0 <= id < " + RTE_STR(MLX5_FLOW_MARK_MAX)); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't flag and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 mark actions in same" + " flow"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "mark action not supported for " + "egress"); + return 0; +} + +/* + * Validate the drop action. + * + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "drop action not supported for " + "egress"); + return 0; +} + +/* + * Validate the queue action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_queue(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_queue *queue = action->conf; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (queue->index >= priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue index out of range"); + if (!(*priv->rxqs)[queue->index]) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &queue->index, + "queue is not configured"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "queue action not supported for " + "egress"); + return 0; +} + +/* + * Validate the rss action. + * + * @param[in] action + * Pointer to the queue action. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[in] item_flags + * Items that were detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint64_t item_flags, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_rss *rss = action->conf; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + unsigned int i; + + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions" + " in same flow"); + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->func, + "RSS hash function not supported"); +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss->level > 2) +#else + if (rss->level > 1) +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->level, + "tunnel RSS is not supported"); + /* allow RSS key_len 0 in case of NULL (default) RSS key. */ + if (rss->key_len == 0 && rss->key != NULL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key length 0"); + if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too small"); + if (rss->key_len > MLX5_RSS_HASH_KEY_LEN) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->key_len, + "RSS hash key too large"); + if (rss->queue_num > priv->config.ind_table_max_size) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue_num, + "number of queues too large"); + if (rss->types & MLX5_RSS_HF_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "some RSS protocols are not" + " supported"); + if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) && + !(rss->types & ETH_RSS_IP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "L3 partial RSS requested but L3 RSS" + " type not specified"); + if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) && + !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "L4 partial RSS requested but L4 RSS" + " type not specified"); + if (!priv->rxqs_n) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No Rx queues configured"); + if (!rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "No queues configured"); + for (i = 0; i != rss->queue_num; ++i) { + if (rss->queue[i] >= priv->rxqs_n) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], "queue index out of range"); + if (!(*priv->rxqs)[rss->queue[i]]) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->queue[i], "queue is not configured"); + } + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "rss action not supported for " + "egress"); + if (rss->level > 1 && !tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, + "inner RSS is not supported for " + "non-tunnel flows"); + return 0; +} + +/* + * Validate the count action. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "count action not supported for " + "egress"); + return 0; +} + +/** + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attributes + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t priority_max = priv->config.flow_prio - 1; + + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, "groups is not supported"); + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, "priority out of range"); + if (attributes->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "egress is not supported"); + if (attributes->transfer && !priv->config.dv_esw_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + if (!attributes->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "ingress attribute is mandatory"); + return 0; +} + +/** + * Validate ICMP6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp6 *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP6 layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 is mandatory to filter on" + " ICMP6"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp6_mask, + sizeof(struct rte_flow_item_icmp6), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate ICMP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 is mandatory to filter" + " on ICMP"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp_mask, + sizeof(struct rte_flow_item_icmp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate Ethernet item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_eth(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *mask = item->mask; + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + int ret; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + + if (item_flags & ethm) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L2 layers not supported"); + if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) || + (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer should not follow " + "L3 layers"); + if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) || + (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L2 layer should not follow VLAN"); + if (!mask) + mask = &rte_flow_item_eth_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_eth), + error); + return ret; +} + +/** + * Validate VLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] dev + * Ethernet device flow is being created on. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(UINT16_MAX), + .inner_type = RTE_BE16(UINT16_MAX), + }; + uint16_t vlan_tag = 0; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4); + const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + + if (item_flags & vlanm) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple VLAN layers not supported"); + else if ((item_flags & l34m) != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN cannot follow L3/L4 layer"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), + error); + if (ret) + return ret; + if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->vmwa_context) { + /* + * Non-NULL context means we have a virtual machine + * and SR-IOV enabled, we have to create VLAN interface + * to make hypervisor to setup E-Switch vport + * context correctly. We avoid creating the multiple + * VLAN interfaces, so we cannot support VLAN tag mask. + */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN tag mask is not" + " supported in virtual" + " environment"); + } + } + if (spec) { + vlan_tag = spec->tci; + vlan_tag &= mask->tci; + } + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!vlan_tag) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "VLAN cannot be empty"); + return 0; +} + +/** + * Validate IPV4 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv4 *acc_mask, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *mask = item->mask; + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + }, + }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV4) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 cannot follow L2/VLAN layer " + "which ether type is not IPv4"); + if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (mask && spec) + next_proto = mask->hdr.next_proto_id & + spec->hdr.next_proto_id; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv6 specified " + "but IPv4 item provided"); + if (item_flags & l3m) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L3 layers not supported"); + else if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + else if (mask->hdr.next_proto_id != 0 && + mask->hdr.next_proto_id != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, + "partial mask is not supported" + " for protocol"); + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv4), + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate IPV6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] acc_mask + * Acceptable mask, if NULL default internal default mask + * will be used to check whether item fields are supported. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv6 *acc_mask, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *mask = item->mask; + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 nic_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + }, + }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + uint8_t next_proto = 0xFF; + const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN | + MLX5_FLOW_LAYER_INNER_VLAN); + + if ((last_item & l2_vlan) && ether_type && + ether_type != RTE_ETHER_TYPE_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 cannot follow L2/VLAN layer " + "which ether type is not IPv6"); + if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) { + if (mask && spec) + next_proto = mask->hdr.proto & spec->hdr.proto; + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "multiple tunnel " + "not supported"); + } + if (item_flags & MLX5_FLOW_LAYER_IPIP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "wrong tunnel type - IPv4 specified " + "but IPv6 item provided"); + if (item_flags & l3m) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L3 layers not supported"); + else if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an L4 layer."); + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) && + !(item_flags & MLX5_FLOW_LAYER_INNER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 cannot follow an NVGRE layer."); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + acc_mask ? (const uint8_t *)acc_mask + : (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate UDP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[in] flow_mask + * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_udp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xff && target_protocol != IPPROTO_UDP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with UDP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 is mandatory to filter on L4"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_udp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate TCP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + const struct rte_flow_item_tcp *flow_mask, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + MLX5_ASSERT(flow_mask); + if (target_protocol != 0xff && target_protocol != IPPROTO_TCP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with TCP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 is mandatory to filter on L4"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_tcp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)flow_mask, + sizeof(struct rte_flow_item_tcp), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate VXLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), + error); + if (ret < 0) + return ret; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + memcpy(&id.vni[1], mask->vni, 3); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN tunnel must be fully defined"); + return 0; +} + +/** + * Validate VXLAN_GPE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] priv + * Pointer to the private data structure. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + int ret; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!priv->config.l3_vxlan_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 VXLAN is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_vxlan_gpe_mask, + sizeof(struct rte_flow_item_vxlan_gpe), + error); + if (ret < 0) + return ret; + if (spec) { + if (spec->protocol) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol" + " not supported"); + memcpy(&id.vni[1], spec->vni, 3); + memcpy(&id.vni[1], mask->vni, 3); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VXLAN-GPE tunnel must be fully" + " defined"); + return 0; +} +/** + * Validate GRE Key item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] gre_item + * Pointer to gre_item + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item *gre_item, + struct rte_flow_error *error) +{ + const rte_be32_t *mask = item->mask; + int ret = 0; + rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); + const struct rte_flow_item_gre *gre_spec; + const struct rte_flow_item_gre *gre_mask; + + if (item_flags & MLX5_FLOW_LAYER_GRE_KEY) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Multiple GRE key not support"); + if (!(item_flags & MLX5_FLOW_LAYER_GRE)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "No preceding GRE header"); + if (item_flags & MLX5_FLOW_LAYER_INNER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GRE key following a wrong item"); + gre_mask = gre_item->mask; + if (!gre_mask) + gre_mask = &rte_flow_item_gre_mask; + gre_spec = gre_item->spec; + if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) && + !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Key bit must be on"); + + if (!mask) + mask = &gre_key_default_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&gre_key_default_mask, + sizeof(rte_be32_t), error); + return ret; +} + +/** + * Validate GRE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_gre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_gre *spec __rte_unused = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + int ret; + const struct rte_flow_item_gre nic_mask = { + .c_rsvd0_ver = RTE_BE16(0xB000), + .protocol = RTE_BE16(UINT16_MAX), + }; + + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_gre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gre), error); + if (ret < 0) + return ret; +#ifndef HAVE_MLX5DV_DR +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec && (spec->protocol & mask->protocol)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "without MPLS support the" + " specification cannot be used for" + " filtering"); +#endif +#endif + return 0; +} + +/** + * Validate Geneve item. + * + * @param[in] item + * Item specification. + * @param[in] itemFlags + * Bit-fields that holds the items detected until now. + * @param[in] enPriv + * Pointer to the private data structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + +int +mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_geneve *spec = item->spec; + const struct rte_flow_item_geneve *mask = item->mask; + int ret; + uint16_t gbhdr; + uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ? + MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0; + const struct rte_flow_item_geneve nic_mask = { + .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80), + .vni = "\xff\xff\xff", + .protocol = RTE_BE16(UINT16_MAX), + }; + + if (!priv->config.hca_attr.tunnel_stateless_geneve_rx) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Geneve is not enabled by device" + " parameter and/or not configured in" + " firmware"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + /* + * Verify only UDPv4 is present as defined in + * https://tools.ietf.org/html/rfc7348 + */ + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_geneve_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_geneve), error); + if (ret) + return ret; + if (spec) { + gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0); + if (MLX5_GENEVE_VER_VAL(gbhdr) || + MLX5_GENEVE_CRITO_VAL(gbhdr) || + MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Geneve protocol unsupported" + " fields are being used"); + if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported Geneve options length"); + } + if (!(item_flags & MLX5_FLOW_LAYER_OUTER)) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Geneve tunnel must be fully defined"); + return 0; +} + +/** + * Validate MPLS item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] prev_layer + * The protocol layer indicated in previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item __rte_unused, + uint64_t item_flags __rte_unused, + uint64_t prev_layer __rte_unused, + struct rte_flow_error *error) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->config.mpls_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + /* MPLS over IP, UDP, GRE is allowed */ + if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4_UDP | + MLX5_FLOW_LAYER_GRE))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with MPLS layer"); + /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */ + if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) && + !(item_flags & MLX5_FLOW_LAYER_GRE)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!mask) + mask = &rte_flow_item_mpls_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_mpls_mask, + sizeof(struct rte_flow_item_mpls), error); + if (ret < 0) + return ret; + return 0; +#endif + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "MPLS is not supported by Verbs, please" + " update."); +} + +/** + * Validate NVGRE item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit flags to mark detected items. + * @param[in] target_protocol + * The next protocol in the previous item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_nvgre *mask = item->mask; + int ret; + + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with this GRE layer"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "L3 Layer is missing"); + if (!mask) + mask = &rte_flow_item_nvgre_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), error); + if (ret < 0) + return ret; + return 0; +} + +/* Allocate unique ID for the split Q/RSS subflows. */ +static uint32_t +flow_qrss_get_id(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t qrss_id, ret; + + ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id); + if (ret) + return 0; + MLX5_ASSERT(qrss_id); + return qrss_id; +} + +/* Free unique ID for the split Q/RSS subflows. */ +static void +flow_qrss_free_id(struct rte_eth_dev *dev, uint32_t qrss_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (qrss_id) + mlx5_flow_id_release(priv->qrss_id_pool, qrss_id); +} + +/** + * Release resource related QUEUE/RSS action split. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Flow to release id's from. + */ +static void +flow_mreg_split_qrss_release(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + struct mlx5_flow_handle *dev_handle; + + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dev_handle, next) + if (dev_handle->split_flow_id) + flow_qrss_free_id(dev, dev_handle->split_flow_id); +} + +static int +flow_null_validate(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + bool external __rte_unused, + int hairpin __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +static struct mlx5_flow * +flow_null_prepare(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); + return NULL; +} + +static int +flow_null_translate(struct rte_eth_dev *dev __rte_unused, + struct mlx5_flow *dev_flow __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +static int +flow_null_apply(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +static void +flow_null_remove(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused) +{ +} + +static void +flow_null_destroy(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused) +{ +} + +static int +flow_null_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error) +{ + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); +} + +/* Void driver to protect from null pointer reference. */ +const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = { + .validate = flow_null_validate, + .prepare = flow_null_prepare, + .translate = flow_null_translate, + .apply = flow_null_apply, + .remove = flow_null_remove, + .destroy = flow_null_destroy, + .query = flow_null_query, +}; + +/** + * Select flow driver type according to flow attributes and device + * configuration. + * + * @param[in] dev + * Pointer to the dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * + * @return + * flow driver type, MLX5_FLOW_TYPE_MAX otherwise. + */ +static enum mlx5_flow_drv_type +flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX; + + if (attr->transfer && priv->config.dv_esw_en) + type = MLX5_FLOW_TYPE_DV; + if (!attr->transfer) + type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV : + MLX5_FLOW_TYPE_VERBS; + return type; +} + +#define flow_get_drv_ops(type) flow_drv_ops[type] + +/** + * Flow driver validation API. This abstracts calling driver specific functions. + * The type of flow driver is determined according to flow attributes. + * + * @param[in] dev + * Pointer to the dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static inline int +flow_drv_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, int hairpin, struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr); + + fops = flow_get_drv_ops(type); + return fops->validate(dev, attr, items, actions, external, + hairpin, error); +} + +/** + * Flow driver preparation API. This abstracts calling driver specific + * functions. Parent flow (rte_flow) should have driver type (drv_type). It + * calculates the size of memory required for device flow, allocates the memory, + * initializes the device flow and returns the pointer. + * + * @note + * This function initializes device flow structure such as dv or verbs in + * struct mlx5_flow. However, it is caller's responsibility to initialize the + * rest. For example, adding returning device flow to flow->dev_flow list and + * setting backward reference to the flow should be done out of this function. + * layers field is not filled either. + * + * @param[in] dev + * Pointer to the dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Pointer to device flow on success, otherwise NULL and rte_errno is set. + */ +static inline struct mlx5_flow * +flow_drv_prepare(struct rte_eth_dev *dev, + const struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + uint32_t flow_idx, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + struct mlx5_flow *mlx5_flow = NULL; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + mlx5_flow = fops->prepare(dev, attr, items, actions, error); + if (mlx5_flow) + mlx5_flow->flow_idx = flow_idx; + return mlx5_flow; +} + +/** + * Flow driver translation API. This abstracts calling driver specific + * functions. Parent flow (rte_flow) should have driver type (drv_type). It + * translates a generic flow into a driver flow. flow_drv_prepare() must + * precede. + * + * @note + * dev_flow->layers could be filled as a result of parsing during translation + * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled + * if necessary. As a flow can have multiple dev_flows by RSS flow expansion, + * flow->actions could be overwritten even though all the expanded dev_flows + * have the same actions. + * + * @param[in] dev + * Pointer to the rte dev structure. + * @param[in, out] dev_flow + * Pointer to the mlx5 flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static inline int +flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = dev_flow->flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->translate(dev, dev_flow, attr, items, actions, error); +} + +/** + * Flow driver apply API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It applies + * translated driver flows on to device. flow_drv_translate() must precede. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static inline int +flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + return fops->apply(dev, flow, error); +} + +/** + * Flow driver remove API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow + * on device. All the resources of the flow should be freed by calling + * flow_drv_destroy(). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static inline void +flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + fops->remove(dev, flow); +} + +/** + * Flow driver destroy API. This abstracts calling driver specific functions. + * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow + * on device and releases resources of the flow. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static inline void +flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + const struct mlx5_flow_driver_ops *fops; + enum mlx5_flow_drv_type type = flow->drv_type; + + flow_mreg_split_qrss_release(dev, flow); + MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(type); + fops->destroy(dev, flow); +} + +/** + * Get RSS action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * + * @return + * Pointer to the RSS action if exist, else return NULL. + */ +static const struct rte_flow_action_rss* +flow_get_rss_action(const struct rte_flow_action actions[]) +{ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + return (const struct rte_flow_action_rss *) + actions->conf; + default: + break; + } + } + return NULL; +} + +static unsigned int +find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +{ + const struct rte_flow_item *item; + unsigned int has_vlan = 0; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + has_vlan = 1; + break; + } + } + if (has_vlan) + return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN : + MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN; + return rss_level < 2 ? MLX5_EXPANSION_ROOT : + MLX5_EXPANSION_ROOT_OUTER; +} + +/** + * Get layer flags from the prefix flow. + * + * Some flows may be split to several subflows, the prefix subflow gets the + * match items and the suffix sub flow gets the actions. + * Some actions need the user defined match item flags to get the detail for + * the action. + * This function helps the suffix flow to get the item layer flags from prefix + * subflow. + * + * @param[in] dev_flow + * Pointer the created preifx subflow. + * + * @return + * The layers get from prefix subflow. + */ +static inline uint64_t +flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow) +{ + uint64_t layers = 0; + + /* + * Layers bits could be localization, but usually the compiler will + * help to do the optimization work for source code. + * If no decap actions, use the layers directly. + */ + if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP)) + return dev_flow->handle->layers; + /* Convert L3 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6) + layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6; + /* Convert L4 layers with decap action. */ + if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP; + else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP) + layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP; + return layers; +} + +/** + * Get metadata split action information. + * + * @param[in] actions + * Pointer to the list of actions. + * @param[out] qrss + * Pointer to the return pointer. + * @param[out] qrss_type + * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned + * if no QUEUE/RSS is found. + * @param[out] encap_idx + * Pointer to the index of the encap action if exists, otherwise the last + * action index. + * + * @return + * Total number of actions. + */ +static int +flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[], + const struct rte_flow_action **qrss, + int *encap_idx) +{ + const struct rte_flow_action_raw_encap *raw_encap; + int actions_n = 0; + int raw_decap_idx = -1; + + *encap_idx = -1; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + *encap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap_idx = actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + *encap_idx = raw_decap_idx != -1 ? + raw_decap_idx : actions_n; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_RSS: + *qrss = actions; + break; + default: + break; + } + actions_n++; + } + if (*encap_idx == -1) + *encap_idx = actions_n; + /* Count RTE_FLOW_ACTION_TYPE_END. */ + return actions_n + 1; +} + +/** + * Check meter action from the action list. + * + * @param[in] actions + * Pointer to the list of actions. + * @param[out] mtr + * Pointer to the meter exist flag. + * + * @return + * Total number of actions. + */ +static int +flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr) +{ + int actions_n = 0; + + MLX5_ASSERT(mtr); + *mtr = 0; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_METER: + *mtr = 1; + break; + default: + break; + } + actions_n++; + } + /* Count RTE_FLOW_ACTION_TYPE_END. */ + return actions_n + 1; +} + +/** + * Check if the flow should be splited due to hairpin. + * The reason for the split is that in current HW we can't + * support encap on Rx, so if a flow have encap we move it + * to Tx. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] attr + * Flow rule attributes. + * @param[in] actions + * Associated actions (list terminated by the END action). + * + * @return + * > 0 the number of actions and the flow should be split, + * 0 when no split required. + */ +static int +flow_check_hairpin_split(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[]) +{ + int queue_action = 0; + int action_n = 0; + int encap = 0; + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action_raw_encap *raw_encap; + + if (!attr->ingress) + return 0; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + if (queue == NULL) + return 0; + if (mlx5_rxq_get_type(dev, queue->index) != + MLX5_RXQ_TYPE_HAIRPIN) + return 0; + queue_action = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = actions->conf; + if (rss == NULL || rss->queue_num == 0) + return 0; + if (mlx5_rxq_get_type(dev, rss->queue[0]) != + MLX5_RXQ_TYPE_HAIRPIN) + return 0; + queue_action = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + encap = 1; + action_n++; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) + encap = 1; + action_n++; + break; + default: + action_n++; + break; + } + } + if (encap == 1 && queue_action) + return action_n; + return 0; +} + +/* Declare flow create/destroy prototype in advance. */ +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error); + +static void +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx); + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * As mark_id is unique, if there's already a registered flow for the mark_id, + * return by increasing the reference counter of the resource. Otherwise, create + * the resource (mcp_res) and flow. + * + * Flow looks like, + * - If ingress port is ANY and reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For default flow (zero mark_id), flow is like, + * - If ingress port is ANY, + * reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param mark_id + * ID of MARK action, zero means default flow for META. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Associated resource on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_mreg_copy_resource * +flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_attr attr = { + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + .ingress = 1, + }; + struct mlx5_rte_flow_item_tag tag_spec = { + .data = mark_id, + }; + struct rte_flow_item items[] = { + [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, + }; + struct rte_flow_action_mark ftag = { + .id = mark_id, + }; + struct mlx5_flow_action_copy_mreg cp_mreg = { + .dst = REG_B, + .src = 0, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + }; + struct rte_flow_action actions[] = { + [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, + }; + struct mlx5_flow_mreg_copy_resource *mcp_res; + uint32_t idx = 0; + int ret; + + /* Fill the register fileds in the flow. */ + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return NULL; + tag_spec.id = ret; + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); + if (ret < 0) + return NULL; + cp_mreg.src = ret; + /* Check if already registered. */ + MLX5_ASSERT(priv->mreg_cp_tbl); + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id); + if (mcp_res) { + /* For non-default rule. */ + if (mark_id != MLX5_DEFAULT_COPY_ID) + mcp_res->refcnt++; + MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID || + mcp_res->refcnt == 1); + return mcp_res; + } + /* Provide the full width of FLAG specific value. */ + if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) + tag_spec.data = MLX5_FLOW_MARK_DEFAULT; + /* Build a new flow. */ + if (mark_id != MLX5_DEFAULT_COPY_ID) { + items[0] = (struct rte_flow_item){ + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = &tag_spec, + }; + items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_MARK, + .conf = &ftag, + }; + actions[1] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[3] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + /* Default rule, wildcard match. */ + attr.priority = MLX5_FLOW_PRIO_RSVD; + items[0] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } + /* Build a new entry. */ + mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx); + if (!mcp_res) { + rte_errno = ENOMEM; + return NULL; + } + mcp_res->idx = idx; + /* + * The copy Flows are not included in any list. There + * ones are referenced from other Flows and can not + * be applied, removed, deleted in ardbitrary order + * by list traversing. + */ + mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items, + actions, false, error); + if (!mcp_res->rix_flow) + goto error; + mcp_res->refcnt++; + mcp_res->hlist_ent.key = mark_id; + ret = mlx5_hlist_insert(priv->mreg_cp_tbl, + &mcp_res->hlist_ent); + MLX5_ASSERT(!ret); + if (ret) + goto error; + return mcp_res; +error: + if (mcp_res->rix_flow) + flow_list_destroy(dev, NULL, mcp_res->rix_flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + return NULL; +} + +/** + * Release flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_del_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow->rix_mreg_copy) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res || !priv->mreg_cp_tbl) + return; + if (flow->copy_applied) { + MLX5_ASSERT(mcp_res->appcnt); + flow->copy_applied = 0; + --mcp_res->appcnt; + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } + } + /* + * We do not check availability of metadata registers here, + * because copy resources are not allocated in this case. + */ + if (--mcp_res->refcnt) + return; + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); + mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); + flow->rix_mreg_copy = 0; +} + +/** + * Start flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_mreg_start_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + if (!flow->rix_mreg_copy || flow->copy_applied) + return 0; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) + return 0; + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) { + ret = flow_drv_apply(dev, mcp_flow, NULL); + if (ret) + return ret; + } + } + ++mcp_res->appcnt; + flow->copy_applied = 1; + return 0; +} + +/** + * Stop flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_stop_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow->rix_mreg_copy || !flow->copy_applied) + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); + if (!mcp_res) + return; + MLX5_ASSERT(mcp_res->appcnt); + --mcp_res->appcnt; + flow->copy_applied = 0; + if (!mcp_res->appcnt) { + struct rte_flow *mcp_flow = mlx5_ipool_get + (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + mcp_res->rix_flow); + + if (mcp_flow) + flow_drv_remove(dev, mcp_flow); + } +} + +/** + * Remove the default copy action from RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + + /* Check if default flow is registered. */ + if (!priv->mreg_cp_tbl) + return; + mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, + MLX5_DEFAULT_COPY_ID); + if (!mcp_res) + return; + MLX5_ASSERT(mcp_res->rix_flow); + flow_list_destroy(dev, NULL, mcp_res->rix_flow); + mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx); +} + +/** + * Add the default copy action in in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 for success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_mreg_copy_resource *mcp_res; + + /* Check whether extensive metadata feature is engaged. */ + if (!priv->config.dv_flow_en || + priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error); + if (!mcp_res) + return -rte_errno; + return 0; +} + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * All the flow having Q/RSS action should be split by + * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL + * performs the following, + * - CQE->flow_tag := reg_c[1] (MARK) + * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) + * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] + * but there should be a flow per each MARK ID set by MARK action. + * + * For the aforementioned reason, if there's a MARK action in flow's action + * list, a corresponding flow should be added to the RX_CP_TBL in order to copy + * the MARK ID to CQE's flow_tag like, + * - If reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For SET_META action which stores value in reg_c[0], as the destination is + * also a flow metadata register (reg_b), adding a default flow is enough. Zero + * MARK ID means the default flow. The default flow looks like, + * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to flow structure. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_update_copy_table(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + struct mlx5_flow_mreg_copy_resource *mcp_res; + const struct rte_flow_action_mark *mark; + + /* Check whether extensive metadata feature is engaged. */ + if (!config->dv_flow_en || + config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + /* Find MARK action. */ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_FLAG: + mcp_res = flow_mreg_add_copy_action + (dev, MLX5_FLOW_MARK_DEFAULT, error); + if (!mcp_res) + return -rte_errno; + flow->rix_mreg_copy = mcp_res->idx; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + case RTE_FLOW_ACTION_TYPE_MARK: + mark = (const struct rte_flow_action_mark *) + actions->conf; + mcp_res = + flow_mreg_add_copy_action(dev, mark->id, error); + if (!mcp_res) + return -rte_errno; + flow->rix_mreg_copy = mcp_res->idx; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + default: + break; + } + } + return 0; +} + +#define MLX5_MAX_SPLIT_ACTIONS 24 +#define MLX5_MAX_SPLIT_ITEMS 24 + +/** + * Split the hairpin flow. + * Since HW can't support encap on Rx we move the encap to Tx. + * If the count action is after the encap then we also + * move the count action. in this case the count will also measure + * the outer bytes. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] actions_rx + * Rx flow actions. + * @param[out] actions_tx + * Tx flow actions.. + * @param[out] pattern_tx + * The pattern items for the Tx flow. + * @param[out] flow_id + * The flow ID connected to this flow. + * + * @return + * 0 on success. + */ +static int +flow_hairpin_split(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow_action actions_rx[], + struct rte_flow_action actions_tx[], + struct rte_flow_item pattern_tx[], + uint32_t *flow_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_raw_encap *raw_encap; + const struct rte_flow_action_raw_decap *raw_decap; + struct mlx5_rte_flow_action_set_tag *set_tag; + struct rte_flow_action *tag_action; + struct mlx5_rte_flow_item_tag *tag_item; + struct rte_flow_item *item; + char *addr; + int encap = 0; + + mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (encap) { + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) { + memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + encap = 1; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap = actions->conf; + if (raw_decap->size < + (sizeof(struct rte_flow_item_eth) + + sizeof(struct rte_flow_item_ipv4))) { + memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; + default: + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + break; + } + } + /* Add set meta action and end action for the Rx flow. */ + tag_action = actions_rx; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + actions_rx++; + rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action)); + actions_rx++; + set_tag = (void *)actions_rx; + set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL); + MLX5_ASSERT(set_tag->id > REG_NONE); + set_tag->data = *flow_id; + tag_action->conf = set_tag; + /* Create Tx item list. */ + rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); + addr = (void *)&pattern_tx[2]; + item = pattern_tx; + item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item = (void *)addr; + tag_item->data = *flow_id; + tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL); + MLX5_ASSERT(set_tag->id > REG_NONE); + item->spec = tag_item; + addr += sizeof(struct mlx5_rte_flow_item_tag); + tag_item = (void *)addr; + tag_item->data = UINT32_MAX; + tag_item->id = UINT16_MAX; + item->mask = tag_item; + addr += sizeof(struct mlx5_rte_flow_item_tag); + item->last = NULL; + item++; + item->type = RTE_FLOW_ITEM_TYPE_END; + return 0; +} + +/** + * The last stage of splitting chain, just creates the subflow + * without any modification. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in, out] sub_flow + * Pointer to return the created subflow, may be NULL. + * @param[in] prefix_layers + * Prefix subflow layers, may be 0. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_inner(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct mlx5_flow **sub_flow, + uint64_t prefix_layers, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) +{ + struct mlx5_flow *dev_flow; + + dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, + flow_idx, error); + if (!dev_flow) + return -rte_errno; + dev_flow->flow = flow; + dev_flow->external = external; + /* Subflow object was created, we must include one in the list. */ + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + /* + * If dev_flow is as one of the suffix flow, some actions in suffix + * flow may need some user defined item layer flags. + */ + if (prefix_layers) + dev_flow->handle->layers = prefix_layers; + if (sub_flow) + *sub_flow = dev_flow; + return flow_drv_translate(dev, dev_flow, attr, items, actions, error); +} + +/** + * Split the meter flow. + * + * As meter flow will split to three sub flow, other than meter + * action, the other actions make sense to only meter accepts + * the packet. If it need to be dropped, no other additional + * actions should be take. + * + * One kind of special action which decapsulates the L3 tunnel + * header will be in the prefix sub flow, as not to take the + * L3 tunnel header into account. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[out] sfx_items + * Suffix flow match items (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[out] actions_sfx + * Suffix flow actions. + * @param[out] actions_pre + * Prefix flow actions. + * @param[out] pattern_sfx + * The pattern items for the suffix flow. + * @param[out] tag_sfx + * Pointer to suffix flow tag. + * + * @return + * 0 on success. + */ +static int +flow_meter_split_prep(struct rte_eth_dev *dev, + const struct rte_flow_item items[], + struct rte_flow_item sfx_items[], + const struct rte_flow_action actions[], + struct rte_flow_action actions_sfx[], + struct rte_flow_action actions_pre[]) +{ + struct rte_flow_action *tag_action = NULL; + struct rte_flow_item *tag_item; + struct mlx5_rte_flow_action_set_tag *set_tag; + struct rte_flow_error error; + const struct rte_flow_action_raw_encap *raw_encap; + const struct rte_flow_action_raw_decap *raw_decap; + struct mlx5_rte_flow_item_tag *tag_spec; + struct mlx5_rte_flow_item_tag *tag_mask; + uint32_t tag_id; + bool copy_vlan = false; + + /* Prepare the actions for prefix and suffix flow. */ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action **action_cur = NULL; + + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_METER: + /* Add the extra tag action first. */ + tag_action = actions_pre; + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + actions_pre++; + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + raw_decap = actions->conf; + if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + action_cur = &actions_pre; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + copy_vlan = true; + break; + default: + break; + } + if (!action_cur) + action_cur = &actions_sfx; + memcpy(*action_cur, actions, sizeof(struct rte_flow_action)); + (*action_cur)++; + } + /* Add end action to the actions. */ + actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; + actions_pre->type = RTE_FLOW_ACTION_TYPE_END; + actions_pre++; + /* Set the tag. */ + set_tag = (void *)actions_pre; + set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); + /* + * Get the id from the qrss_pool to make qrss share the id with meter. + */ + tag_id = flow_qrss_get_id(dev); + set_tag->data = tag_id << MLX5_MTR_COLOR_BITS; + assert(tag_action); + tag_action->conf = set_tag; + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (copy_vlan) { + memcpy(sfx_items, items, sizeof(*sfx_items)); + /* + * Convert to internal match item, it is used + * for vlan push and set vid. + */ + sfx_items->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + sfx_items++; + } + break; + default: + break; + } + } + sfx_items->type = RTE_FLOW_ITEM_TYPE_END; + sfx_items++; + tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items; + tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS; + tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error); + tag_mask = tag_spec + 1; + tag_mask->data = 0xffffff00; + tag_item->type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_spec; + tag_item->last = NULL; + tag_item->mask = tag_mask; + return tag_id; +} + +/** + * Split action list having QUEUE/RSS for metadata register copy. + * + * Once Q/RSS action is detected in user's action list, the flow action + * should be split in order to copy metadata registers, which will happen in + * RX_CP_TBL like, + * - CQE->flow_tag := reg_c[1] (MARK) + * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) + * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL. + * This is because the last action of each flow must be a terminal action + * (QUEUE, RSS or DROP). + * + * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is + * stored and kept in the mlx5_flow structure per each sub_flow. + * + * The Q/RSS action is replaced with, + * - SET_TAG, setting the allocated flow ID to reg_c[2]. + * And the following JUMP action is added at the end, + * - JUMP, to RX_CP_TBL. + * + * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by + * flow_create_split_metadata() routine. The flow will look like, + * - If flow ID matches (reg_c[2]), perform Q/RSS. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] split_actions + * Pointer to store split actions to jump to CP_TBL. + * @param[in] actions + * Pointer to the list of original flow actions. + * @param[in] qrss + * Pointer to the Q/RSS action. + * @param[in] actions_n + * Number of original actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * non-zero unique flow_id on success, otherwise 0 and + * error/rte_error are set. + */ +static uint32_t +flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, + struct rte_flow_action *split_actions, + const struct rte_flow_action *actions, + const struct rte_flow_action *qrss, + int actions_n, struct rte_flow_error *error) +{ + struct mlx5_rte_flow_action_set_tag *set_tag; + struct rte_flow_action_jump *jump; + const int qrss_idx = qrss - actions; + uint32_t flow_id = 0; + int ret = 0; + + /* + * Given actions will be split + * - Replace QUEUE/RSS action with SET_TAG to set flow ID. + * - Add jump to mreg CP_TBL. + * As a result, there will be one more action. + */ + ++actions_n; + memcpy(split_actions, actions, sizeof(*split_actions) * actions_n); + set_tag = (void *)(split_actions + actions_n); + /* + * If tag action is not set to void(it means we are not the meter + * suffix flow), add the tag action. Since meter suffix flow already + * has the tag added. + */ + if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) { + /* + * Allocate the new subflow ID. This one is unique within + * device and not shared with representors. Otherwise, + * we would have to resolve multi-thread access synch + * issue. Each flow on the shared device is appended + * with source vport identifier, so the resulting + * flows will be unique in the shared (by master and + * representors) domain even if they have coinciding + * IDs. + */ + flow_id = flow_qrss_get_id(dev); + if (!flow_id) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't allocate id " + "for split Q/RSS subflow"); + /* Internal SET_TAG action to set flow ID. */ + *set_tag = (struct mlx5_rte_flow_action_set_tag){ + .data = flow_id, + }; + ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error); + if (ret < 0) + return ret; + set_tag->id = ret; + /* Construct new actions array. */ + /* Replace QUEUE/RSS action. */ + split_actions[qrss_idx] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG, + .conf = set_tag, + }; + } + /* JUMP action to jump to mreg copy table (CP_TBL). */ + jump = (void *)(set_tag + 1); + *jump = (struct rte_flow_action_jump){ + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + }; + split_actions[actions_n - 2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = jump, + }; + split_actions[actions_n - 1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + return flow_id; +} + +/** + * Extend the given action list for Tx metadata copy. + * + * Copy the given action list to the ext_actions and add flow metadata register + * copy action in order to copy reg_a set by WQE to reg_c[0]. + * + * @param[out] ext_actions + * Pointer to the extended action list. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] actions_n + * Number of actions in the list. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @param[in] encap_idx + * The encap action inndex. + * + * @return + * 0 on success, negative value otherwise + */ +static int +flow_mreg_tx_copy_prep(struct rte_eth_dev *dev, + struct rte_flow_action *ext_actions, + const struct rte_flow_action *actions, + int actions_n, struct rte_flow_error *error, + int encap_idx) +{ + struct mlx5_flow_action_copy_mreg *cp_mreg = + (struct mlx5_flow_action_copy_mreg *) + (ext_actions + actions_n + 1); + int ret; + + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); + if (ret < 0) + return ret; + cp_mreg->dst = ret; + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error); + if (ret < 0) + return ret; + cp_mreg->src = ret; + if (encap_idx != 0) + memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx); + if (encap_idx == actions_n - 1) { + ext_actions[actions_n - 1] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + ext_actions[actions_n] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + ext_actions[encap_idx] = (struct rte_flow_action){ + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = cp_mreg, + }; + memcpy(ext_actions + encap_idx + 1, actions + encap_idx, + sizeof(*ext_actions) * (actions_n - encap_idx)); + } + return 0; +} + +/** + * The splitting for metadata feature. + * + * - Q/RSS action on NIC Rx should be split in order to pass by + * the mreg copy table (RX_CP_TBL) and then it jumps to the + * action table (RX_ACT_TBL) which has the split Q/RSS action. + * + * - All the actions on NIC Tx should have a mreg copy action to + * copy reg_a from WQE to reg_c[0]. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] prefix_layers + * Prefix flow layer flags. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_metadata(struct rte_eth_dev *dev, + struct rte_flow *flow, + uint64_t prefix_layers, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_action *qrss = NULL; + struct rte_flow_action *ext_actions = NULL; + struct mlx5_flow *dev_flow = NULL; + uint32_t qrss_id = 0; + int mtr_sfx = 0; + size_t act_size; + int actions_n; + int encap_idx; + int ret; + + /* Check whether extensive metadata feature is engaged. */ + if (!config->dv_flow_en || + config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || + !mlx5_flow_ext_mreg_supported(dev)) + return flow_create_split_inner(dev, flow, NULL, prefix_layers, + attr, items, actions, external, + flow_idx, error); + actions_n = flow_parse_metadata_split_actions_info(actions, &qrss, + &encap_idx); + if (qrss) { + /* Exclude hairpin flows from splitting. */ + if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *queue; + + queue = qrss->conf; + if (mlx5_rxq_get_type(dev, queue->index) == + MLX5_RXQ_TYPE_HAIRPIN) + qrss = NULL; + } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) { + const struct rte_flow_action_rss *rss; + + rss = qrss->conf; + if (mlx5_rxq_get_type(dev, rss->queue[0]) == + MLX5_RXQ_TYPE_HAIRPIN) + qrss = NULL; + } + } + if (qrss) { + /* Check if it is in meter suffix table. */ + mtr_sfx = attr->group == (attr->transfer ? + (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : + MLX5_FLOW_TABLE_LEVEL_SUFFIX); + /* + * Q/RSS action on NIC Rx should be split in order to pass by + * the mreg copy table (RX_CP_TBL) and then it jumps to the + * action table (RX_ACT_TBL) which has the split Q/RSS action. + */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + + sizeof(struct rte_flow_action_set_tag) + + sizeof(struct rte_flow_action_jump); + ext_actions = rte_zmalloc(__func__, act_size, 0); + if (!ext_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "metadata flow"); + /* + * If we are the suffix flow of meter, tag already exist. + * Set the tag action to void. + */ + if (mtr_sfx) + ext_actions[qrss - actions].type = + RTE_FLOW_ACTION_TYPE_VOID; + else + ext_actions[qrss - actions].type = + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + /* + * Create the new actions list with removed Q/RSS action + * and appended set tag and jump to register copy table + * (RX_CP_TBL). We should preallocate unique tag ID here + * in advance, because it is needed for set tag action. + */ + qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions, + qrss, actions_n, error); + if (!mtr_sfx && !qrss_id) { + ret = -rte_errno; + goto exit; + } + } else if (attr->egress && !attr->transfer) { + /* + * All the actions on NIC Tx should have a metadata register + * copy action to copy reg_a from WQE to reg_c[meta] + */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 1) + + sizeof(struct mlx5_flow_action_copy_mreg); + ext_actions = rte_zmalloc(__func__, act_size, 0); + if (!ext_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "metadata flow"); + /* Create the action list appended with copy register. */ + ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions, + actions_n, error, encap_idx); + if (ret < 0) + goto exit; + } + /* Add the unmodified original or prefix subflow. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr, + items, ext_actions ? ext_actions : + actions, external, flow_idx, error); + if (ret < 0) + goto exit; + MLX5_ASSERT(dev_flow); + if (qrss) { + const struct rte_flow_attr q_attr = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + .ingress = 1, + }; + /* Internal PMD action to set register. */ + struct mlx5_rte_flow_item_tag q_tag_spec = { + .data = qrss_id, + .id = 0, + }; + struct rte_flow_item q_items[] = { + { + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = &q_tag_spec, + .last = NULL, + .mask = NULL, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action q_actions[] = { + { + .type = qrss->type, + .conf = qrss->conf, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + uint64_t layers = flow_get_prefix_layer_flags(dev_flow); + + /* + * Configure the tag item only if there is no meter subflow. + * Since tag is already marked in the meter suffix subflow + * we can just use the meter suffix items as is. + */ + if (qrss_id) { + /* Not meter subflow. */ + MLX5_ASSERT(!mtr_sfx); + /* + * Put unique id in prefix flow due to it is destroyed + * after suffix flow and id will be freed after there + * is no actual flows with this id and identifier + * reallocation becomes possible (for example, for + * other flows in other threads). + */ + dev_flow->handle->split_flow_id = qrss_id; + ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, + error); + if (ret < 0) + goto exit; + q_tag_spec.id = ret; + } + dev_flow = NULL; + /* Add suffix subflow to execute Q/RSS. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, layers, + &q_attr, mtr_sfx ? items : + q_items, q_actions, + external, flow_idx, error); + if (ret < 0) + goto exit; + /* qrss ID should be freed if failed. */ + qrss_id = 0; + MLX5_ASSERT(dev_flow); + } + +exit: + /* + * We do not destroy the partially created sub_flows in case of error. + * These ones are included into parent flow list and will be destroyed + * by flow_drv_destroy. + */ + flow_qrss_free_id(dev, qrss_id); + rte_free(ext_actions); + return ret; +} + +/** + * The splitting for meter feature. + * + * - The meter flow will be split to two flows as prefix and + * suffix flow. The packets make sense only it pass the prefix + * meter action. + * + * - Reg_C_5 is used for the packet to match betweend prefix and + * suffix flow. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_meter(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_action *sfx_actions = NULL; + struct rte_flow_action *pre_actions = NULL; + struct rte_flow_item *sfx_items = NULL; + struct mlx5_flow *dev_flow = NULL; + struct rte_flow_attr sfx_attr = *attr; + uint32_t mtr = 0; + uint32_t mtr_tag_id = 0; + size_t act_size; + size_t item_size; + int actions_n = 0; + int ret; + + if (priv->mtr_en) + actions_n = flow_check_meter_action(actions, &mtr); + if (mtr) { + /* The five prefix actions: meter, decap, encap, tag, end. */ + act_size = sizeof(struct rte_flow_action) * (actions_n + 5) + + sizeof(struct mlx5_rte_flow_action_set_tag); + /* tag, vlan, port id, end. */ +#define METER_SUFFIX_ITEM 4 + item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0); + if (!sfx_actions) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "meter flow"); + sfx_items = (struct rte_flow_item *)((char *)sfx_actions + + act_size); + pre_actions = sfx_actions + actions_n; + mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items, + actions, sfx_actions, + pre_actions); + if (!mtr_tag_id) { + ret = -rte_errno; + goto exit; + } + /* Add the prefix subflow. */ + ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr, + items, pre_actions, external, + flow_idx, error); + if (ret) { + ret = -rte_errno; + goto exit; + } + dev_flow->handle->split_flow_id = mtr_tag_id; + /* Setting the sfx group atrr. */ + sfx_attr.group = sfx_attr.transfer ? + (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) : + MLX5_FLOW_TABLE_LEVEL_SUFFIX; + } + /* Add the prefix subflow. */ + ret = flow_create_split_metadata(dev, flow, dev_flow ? + flow_get_prefix_layer_flags(dev_flow) : + 0, &sfx_attr, + sfx_items ? sfx_items : items, + sfx_actions ? sfx_actions : actions, + external, flow_idx, error); +exit: + if (sfx_actions) + rte_free(sfx_actions); + return ret; +} + +/** + * Split the flow to subflow set. The splitters might be linked + * in the chain, like this: + * flow_create_split_outer() calls: + * flow_create_split_meter() calls: + * flow_create_split_metadata(meter_subflow_0) calls: + * flow_create_split_inner(metadata_subflow_0) + * flow_create_split_inner(metadata_subflow_1) + * flow_create_split_inner(metadata_subflow_2) + * flow_create_split_metadata(meter_subflow_1) calls: + * flow_create_split_inner(metadata_subflow_0) + * flow_create_split_inner(metadata_subflow_1) + * flow_create_split_inner(metadata_subflow_2) + * + * This provide flexible way to add new levels of flow splitting. + * The all of successfully created subflows are included to the + * parent flow dev_flow list. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Parent flow structure pointer. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] flow_idx + * This memory pool index to the flow. + * @param[out] error + * Perform verbose error reporting if not NULL. + * @return + * 0 on success, negative value otherwise + */ +static int +flow_create_split_outer(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, uint32_t flow_idx, + struct rte_flow_error *error) +{ + int ret; + + ret = flow_create_split_meter(dev, flow, attr, items, + actions, external, flow_idx, error); + MLX5_ASSERT(ret <= 0); + return ret; +} + +/** + * Create a flow and add it to @p list. + * + * @param dev + * Pointer to Ethernet device. + * @param list + * Pointer to a TAILQ flow list. If this parameter NULL, + * no list insertion occurred, flow is just created, + * this is caller's responsibility to track the + * created flow. + * @param[in] attr + * Flow rule attributes. + * @param[in] items + * Pattern specification (list terminated by the END pattern item). + * @param[in] actions + * Associated actions (list terminated by the END action). + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * A flow index on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_list_create(struct rte_eth_dev *dev, uint32_t *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + struct mlx5_flow *dev_flow; + const struct rte_flow_action_rss *rss; + union { + struct rte_flow_expand_rss buf; + uint8_t buffer[2048]; + } expand_buffer; + union { + struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; + uint8_t buffer[2048]; + } actions_rx; + union { + struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; + uint8_t buffer[2048]; + } actions_hairpin_tx; + union { + struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS]; + uint8_t buffer[2048]; + } items_tx; + struct rte_flow_expand_rss *buf = &expand_buffer.buf; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc)[!!priv->flow_idx]; + const struct rte_flow_action *p_actions_rx = actions; + uint32_t i; + uint32_t idx = 0; + int hairpin_flow; + uint32_t hairpin_id = 0; + struct rte_flow_attr attr_tx = { .priority = 0 }; + int ret; + + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + ret = flow_drv_validate(dev, attr, items, p_actions_rx, + external, hairpin_flow, error); + if (ret < 0) + return 0; + if (hairpin_flow > 0) { + if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) { + rte_errno = EINVAL; + return 0; + } + flow_hairpin_split(dev, actions, actions_rx.actions, + actions_hairpin_tx.actions, items_tx.items, + &hairpin_id); + p_actions_rx = actions_rx.actions; + } + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx); + if (!flow) { + rte_errno = ENOMEM; + goto error_before_flow; + } + flow->drv_type = flow_get_drv_type(dev, attr); + if (hairpin_id != 0) + flow->hairpin_flow_id = hairpin_id; + MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && + flow->drv_type < MLX5_FLOW_TYPE_MAX); + memset(rss_desc, 0, sizeof(*rss_desc)); + rss = flow_get_rss_action(p_actions_rx); + if (rss) { + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. + */ + rss_desc->level = rss->level; + /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ + rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types; + } + flow->dev_handles = 0; + if (rss && rss->types) { + unsigned int graph_root; + + graph_root = find_graph_root(items, rss->level); + ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), + items, rss->types, + mlx5_support_expansion, + graph_root); + MLX5_ASSERT(ret > 0 && + (unsigned int)ret < sizeof(expand_buffer.buffer)); + } else { + buf->entries = 1; + buf->entry[0].pattern = (void *)(uintptr_t)items; + } + /* + * Record the start index when there is a nested call. All sub-flows + * need to be translated before another calling. + * No need to use ping-pong buffer to save memory here. + */ + if (priv->flow_idx) { + MLX5_ASSERT(!priv->flow_nested_idx); + priv->flow_nested_idx = priv->flow_idx; + } + for (i = 0; i < buf->entries; ++i) { + /* + * The splitter may create multiple dev_flows, + * depending on configuration. In the simplest + * case it just creates unmodified original flow. + */ + ret = flow_create_split_outer(dev, flow, attr, + buf->entry[i].pattern, + p_actions_rx, external, idx, + error); + if (ret < 0) + goto error; + } + /* Create the tx flow. */ + if (hairpin_flow) { + attr_tx.group = MLX5_HAIRPIN_TX_TABLE; + attr_tx.ingress = 0; + attr_tx.egress = 1; + dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items, + actions_hairpin_tx.actions, + idx, error); + if (!dev_flow) + goto error; + dev_flow->flow = flow; + dev_flow->external = 0; + SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx, + dev_flow->handle, next); + ret = flow_drv_translate(dev, dev_flow, &attr_tx, + items_tx.items, + actions_hairpin_tx.actions, error); + if (ret < 0) + goto error; + } + /* + * Update the metadata register copy table. If extensive + * metadata feature is enabled and registers are supported + * we might create the extra rte_flow for each unique + * MARK/FLAG action ID. + * + * The table is updated for ingress Flows only, because + * the egress Flows belong to the different device and + * copy table should be updated in peer NIC Rx domain. + */ + if (attr->ingress && + (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) { + ret = flow_mreg_update_copy_table(dev, flow, actions, error); + if (ret) + goto error; + } + /* + * If the flow is external (from application) OR device is started, then + * the flow will be applied immediately. + */ + if (external || dev->data->dev_started) { + ret = flow_drv_apply(dev, flow, error); + if (ret < 0) + goto error; + } + if (list) + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx, + flow, next); + flow_rxq_flags_set(dev, flow); + /* Nested flow creation index recovery. */ + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return idx; +error: + MLX5_ASSERT(flow); + ret = rte_errno; /* Save rte_errno before cleanup. */ + flow_mreg_del_copy_action(dev, flow); + flow_drv_destroy(dev, flow); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); + rte_errno = ret; /* Restore rte_errno. */ +error_before_flow: + ret = rte_errno; + if (hairpin_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + hairpin_id); + rte_errno = ret; + priv->flow_idx = priv->flow_nested_idx; + if (priv->flow_nested_idx) + priv->flow_nested_idx = 0; + return 0; +} + +/** + * Create a dedicated flow rule on e-switch table 0 (root table), to direct all + * incoming packets to table 1. + * + * Other flow rules, requested for group n, will be created in + * e-switch table n+1. + * Jump action to e-switch group n will be created to group n+1. + * + * Used when working in switchdev mode, to utilise advantages of table 1 + * and above. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Pointer to flow on success, NULL otherwise and rte_errno is set. + */ +struct rte_flow * +mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev) +{ + const struct rte_flow_attr attr = { + .group = 0, + .priority = 0, + .ingress = 1, + .egress = 0, + .transfer = 1, + }; + const struct rte_flow_item pattern = { + .type = RTE_FLOW_ITEM_TYPE_END, + }; + struct rte_flow_action_jump jump = { + .group = 1, + }; + const struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_error error; + + return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows, + &attr, &pattern, + actions, false, &error); +} + +/** + * Validate a flow supported by the NIC. + * + * @see rte_flow_validate() + * @see rte_flow_ops + */ +int +mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int hairpin_flow; + + hairpin_flow = flow_check_hairpin_split(dev, attr, actions); + return flow_drv_validate(dev, attr, items, actions, + true, hairpin_flow, error); +} + +/** + * Create a flow. + * + * @see rte_flow_create() + * @see rte_flow_ops + */ +struct rte_flow * +mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + /* + * If the device is not started yet, it is not allowed to created a + * flow from application. PMD default flows and traffic control flows + * are not affected. + */ + if (unlikely(!dev->data->dev_started)) { + DRV_LOG(DEBUG, "port %u is not started when " + "inserting a flow", dev->data->port_id); + rte_flow_error_set(error, ENODEV, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port not started"); + return NULL; + } + return (void *)(uintptr_t)flow_list_create(dev, &priv->flows, + attr, items, actions, true, error); +} + +/** + * Destroy a flow in a list. + * + * @param dev + * Pointer to Ethernet device. + * @param list + * Pointer to the Indexed flow list. If this parameter NULL, + * there is no flow removal from the list. Be noted that as + * flow is add to the indexed list, memory of the indexed + * list points to maybe changed as flow destroyed. + * @param[in] flow_idx + * Index of flow to destroy. + */ +static void +flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list, + uint32_t flow_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], flow_idx); + + if (!flow) + return; + /* + * Update RX queue flags only if port is started, otherwise it is + * already clean. + */ + if (dev->data->dev_started) + flow_rxq_flags_trim(dev, flow); + if (flow->hairpin_flow_id) + mlx5_flow_id_release(priv->sh->flow_id_pool, + flow->hairpin_flow_id); + flow_drv_destroy(dev, flow); + if (list) + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, + flow_idx, flow, next); + flow_mreg_del_copy_action(dev, flow); + if (flow->fdir) { + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (priv_fdir_flow->rix_flow == flow_idx) + break; + } + if (priv_fdir_flow) { + LIST_REMOVE(priv_fdir_flow, next); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + } + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); +} + +/** + * Destroy all flows. + * + * @param dev + * Pointer to Ethernet device. + * @param list + * Pointer to the Indexed flow list. + * @param active + * If flushing is called avtively. + */ +void +mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active) +{ + uint32_t num_flushed = 0; + + while (*list) { + flow_list_destroy(dev, list, *list); + num_flushed++; + } + if (active) { + DRV_LOG(INFO, "port %u: %u flows flushed before stopping", + dev->data->port_id, num_flushed); + } +} + +/** + * Remove all flows. + * + * @param dev + * Pointer to Ethernet device. + * @param list + * Pointer to the Indexed flow list. + */ +void +mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + uint32_t idx; + + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { + flow_drv_remove(dev, flow); + flow_mreg_stop_copy_action(dev, flow); + } + flow_mreg_del_default_copy_action(dev); + flow_rxq_flags_clear(dev); +} + +/** + * Add all flows. + * + * @param dev + * Pointer to Ethernet device. + * @param list + * Pointer to the Indexed flow list. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow = NULL; + struct rte_flow_error error; + uint32_t idx; + int ret = 0; + + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + ret = flow_mreg_add_default_copy_action(dev, &error); + if (ret < 0) + return -rte_errno; + /* Apply Flows created by application. */ + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx, + flow, next) { + ret = flow_mreg_start_copy_action(dev, flow); + if (ret < 0) + goto error; + ret = flow_drv_apply(dev, flow, &error); + if (ret < 0) + goto error; + flow_rxq_flags_set(dev, flow); + } + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_stop(dev, list); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Stop all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_stop_default(struct rte_eth_dev *dev) +{ + flow_mreg_del_default_copy_action(dev); + flow_rxq_flags_clear(dev); +} + +/** + * Start all default actions for flows. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_start_default(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + return flow_mreg_add_default_copy_action(dev, &error); +} + +/** + * Allocate intermediate resources for flow creation. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->inter_flows) { + priv->inter_flows = rte_calloc(__func__, 1, + MLX5_NUM_MAX_DEV_FLOWS * + sizeof(struct mlx5_flow) + + (sizeof(struct mlx5_flow_rss_desc) + + sizeof(uint16_t) * UINT16_MAX) * 2, 0); + if (!priv->inter_flows) { + DRV_LOG(ERR, "can't allocate intermediate memory."); + return; + } + } + priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows) + [MLX5_NUM_MAX_DEV_FLOWS]; + /* Reset the index. */ + priv->flow_idx = 0; + priv->flow_nested_idx = 0; +} + +/** + * Free intermediate resources for flows. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_flow_free_intermediate(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + rte_free(priv->inter_flows); + priv->inter_flows = NULL; +} + +/** + * Verify the flow list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return the number of flows not released. + */ +int +mlx5_flow_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow *flow; + uint32_t idx; + int ret = 0; + + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx, + flow, next) { + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); + ++ret; + } + return ret; +} + +/** + * Enable default hairpin egress flow. + * + * @param dev + * Pointer to Ethernet device. + * @param queue + * The queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, + uint32_t queue) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_attr attr = { + .egress = 1, + .priority = 0, + }; + struct mlx5_rte_flow_item_tx_queue queue_spec = { + .queue = queue, + }; + struct mlx5_rte_flow_item_tx_queue queue_mask = { + .queue = UINT32_MAX, + }; + struct rte_flow_item items[] = { + { + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + .spec = &queue_spec, + .last = NULL, + .mask = &queue_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_HAIRPIN_TX_TABLE, + }; + struct rte_flow_action actions[2]; + uint32_t flow_idx; + struct rte_flow_error error; + + actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP; + actions[0].conf = &jump; + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + flow_idx = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); + if (!flow_idx) { + DRV_LOG(DEBUG, + "Failed to create ctrl flow: rte_errno(%d)," + " type(%d), message(%s)", + rte_errno, error.type, + error.message ? error.message : " (no stated reason)"); + return -rte_errno; + } + return 0; +} + +/** + * Enable a control flow configured from the control plane. + * + * @param dev + * Pointer to Ethernet device. + * @param eth_spec + * An Ethernet flow spec to apply. + * @param eth_mask + * An Ethernet flow mask to apply. + * @param vlan_spec + * A VLAN flow spec to apply. + * @param vlan_mask + * A VLAN flow mask to apply. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_attr attr = { + .ingress = 1, + .priority = MLX5_FLOW_PRIO_RSVD, + }; + struct rte_flow_item items[] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = eth_spec, + .last = NULL, + .mask = eth_mask, + }, + { + .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN : + RTE_FLOW_ITEM_TYPE_END, + .spec = vlan_spec, + .last = NULL, + .mask = vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + uint16_t queue[priv->reta_idx_n]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = priv->rss_conf.rss_hf, + .key_len = priv->rss_conf.rss_key_len, + .queue_num = priv->reta_idx_n, + .key = priv->rss_conf.rss_key, + .queue = queue, + }; + struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + uint32_t flow_idx; + struct rte_flow_error error; + unsigned int i; + + if (!priv->reta_idx_n || !priv->rxqs_n) { + return 0; + } + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + action_rss.types = 0; + for (i = 0; i != priv->reta_idx_n; ++i) + queue[i] = (*priv->reta_idx)[i]; + flow_idx = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, false, &error); + if (!flow_idx) + return -rte_errno; + return 0; +} + +/** + * Enable a flow control configured from the control plane. + * + * @param dev + * Pointer to Ethernet device. + * @param eth_spec + * An Ethernet flow spec to apply. + * @param eth_mask + * An Ethernet flow mask to apply. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask) +{ + return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); +} + +/** + * Destroy a flow. + * + * @see rte_flow_destroy() + * @see rte_flow_ops + */ +int +mlx5_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow); + return 0; +} + +/** + * Destroy all flows. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +int +mlx5_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows, false); + return 0; +} + +/** + * Isolated mode. + * + * @see rte_flow_isolate() + * @see rte_flow_ops + */ +int +mlx5_flow_isolate(struct rte_eth_dev *dev, + int enable, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port must be stopped first"); + return -rte_errno; + } + priv->isolated = !!enable; + if (enable) + dev->dev_ops = &mlx5_dev_ops_isolate; + else + dev->dev_ops = &mlx5_dev_ops; + return 0; +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_drv_query(struct rte_eth_dev *dev, + uint32_t flow_idx, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct mlx5_flow_driver_ops *fops; + struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_RTE_FLOW], + flow_idx); + enum mlx5_flow_drv_type ftype; + + if (!flow) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "invalid flow handle"); + } + ftype = flow->drv_type; + MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX); + fops = flow_get_drv_ops(ftype); + + return fops->query(dev, flow, actions, data, error); +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +int +mlx5_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret; + + ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data, + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Convert a flow director filter to a generic flow. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_filter + * Flow director filter to add. + * @param attributes + * Generic flow parameters structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_fdir_filter_convert(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + struct mlx5_fdir *attributes) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_eth_fdir_input *input = &fdir_filter->input; + const struct rte_eth_fdir_masks *mask = + &dev->data->dev_conf.fdir_conf.mask; + + /* Validate queue number. */ + if (fdir_filter->action.rx_queue >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u invalid queue number %d", + dev->data->port_id, fdir_filter->action.rx_queue); + rte_errno = EINVAL; + return -rte_errno; + } + attributes->attr.ingress = 1; + attributes->items[0] = (struct rte_flow_item) { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &attributes->l2, + .mask = &attributes->l2_mask, + }; + switch (fdir_filter->action.behavior) { + case RTE_ETH_FDIR_ACCEPT: + attributes->actions[0] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_QUEUE, + .conf = &attributes->queue, + }; + break; + case RTE_ETH_FDIR_REJECT: + attributes->actions[0] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_DROP, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid behavior %d", + dev->data->port_id, + fdir_filter->action.behavior); + rte_errno = ENOTSUP; + return -rte_errno; + } + attributes->queue.index = fdir_filter->action.rx_queue; + /* Handle L3. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){ + .src_addr = input->flow.ip4_flow.src_ip, + .dst_addr = input->flow.ip4_flow.dst_ip, + .time_to_live = input->flow.ip4_flow.ttl, + .type_of_service = input->flow.ip4_flow.tos, + }; + attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){ + .src_addr = mask->ipv4_mask.src_ip, + .dst_addr = mask->ipv4_mask.dst_ip, + .time_to_live = mask->ipv4_mask.ttl, + .type_of_service = mask->ipv4_mask.tos, + .next_proto_id = mask->ipv4_mask.proto, + }; + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){ + .hop_limits = input->flow.ipv6_flow.hop_limits, + .proto = input->flow.ipv6_flow.proto, + }; + + memcpy(attributes->l3.ipv6.hdr.src_addr, + input->flow.ipv6_flow.src_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3.ipv6.hdr.dst_addr, + input->flow.ipv6_flow.dst_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.src_addr, + mask->ipv6_mask.src_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, + mask->ipv6_mask.dst_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + /* Handle L4. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + attributes->l4.udp.hdr = (struct rte_udp_hdr){ + .src_port = input->flow.udp4_flow.src_port, + .dst_port = input->flow.udp4_flow.dst_port, + }; + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, + }; + attributes->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_UDP, + .spec = &attributes->l4, + .mask = &attributes->l4_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ + .src_port = input->flow.tcp4_flow.src_port, + .dst_port = input->flow.tcp4_flow.dst_port, + }; + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, + }; + attributes->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_TCP, + .spec = &attributes->l4, + .mask = &attributes->l4_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + attributes->l4.udp.hdr = (struct rte_udp_hdr){ + .src_port = input->flow.udp6_flow.src_port, + .dst_port = input->flow.udp6_flow.dst_port, + }; + attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, + }; + attributes->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_UDP, + .spec = &attributes->l4, + .mask = &attributes->l4_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ + .src_port = input->flow.tcp6_flow.src_port, + .dst_port = input->flow.tcp6_flow.dst_port, + }; + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, + }; + attributes->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_TCP, + .spec = &attributes->l4, + .mask = &attributes->l4_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + return 0; +} + +#define FLOW_FDIR_CMP(f1, f2, fld) \ + memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld)) + +/** + * Compare two FDIR flows. If items and actions are identical, the two flows are + * regarded as same. + * + * @param dev + * Pointer to Ethernet device. + * @param f1 + * FDIR flow to compare. + * @param f2 + * FDIR flow to compare. + * + * @return + * Zero on match, 1 otherwise. + */ +static int +flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2) +{ + if (FLOW_FDIR_CMP(f1, f2, attr) || + FLOW_FDIR_CMP(f1, f2, l2) || + FLOW_FDIR_CMP(f1, f2, l2_mask) || + FLOW_FDIR_CMP(f1, f2, l3) || + FLOW_FDIR_CMP(f1, f2, l3_mask) || + FLOW_FDIR_CMP(f1, f2, l4) || + FLOW_FDIR_CMP(f1, f2, l4_mask) || + FLOW_FDIR_CMP(f1, f2, actions[0].type)) + return 1; + if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE && + FLOW_FDIR_CMP(f1, f2, queue)) + return 1; + return 0; +} + +/** + * Search device flow list to find out a matched FDIR flow. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_flow + * FDIR flow to lookup. + * + * @return + * Index of flow if found, 0 otherwise. + */ +static uint32_t +flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t flow_idx = 0; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + + MLX5_ASSERT(fdir_flow); + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) { + DRV_LOG(DEBUG, "port %u found FDIR flow %u", + dev->data->port_id, flow_idx); + flow_idx = priv_fdir_flow->rix_flow; + break; + } + } + return flow_idx; +} + +/** + * Add new flow director filter and store it in list. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_filter + * Flow director filter to add. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_fdir_filter_add(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir *fdir_flow; + struct rte_flow *flow; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + uint32_t flow_idx; + int ret; + + fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0); + if (!fdir_flow) { + rte_errno = ENOMEM; + return -rte_errno; + } + ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow); + if (ret) + goto error; + flow_idx = flow_fdir_filter_lookup(dev, fdir_flow); + if (flow_idx) { + rte_errno = EEXIST; + goto error; + } + priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow), + 0); + if (!priv_fdir_flow) { + rte_errno = ENOMEM; + goto error; + } + flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr, + fdir_flow->items, fdir_flow->actions, true, + NULL); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); + if (!flow) + goto error; + flow->fdir = 1; + priv_fdir_flow->fdir = fdir_flow; + priv_fdir_flow->rix_flow = flow_idx; + LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next); + DRV_LOG(DEBUG, "port %u created FDIR flow %p", + dev->data->port_id, (void *)flow); + return 0; +error: + rte_free(priv_fdir_flow); + rte_free(fdir_flow); + return -rte_errno; +} + +/** + * Delete specific filter. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_filter + * Filter to be deleted. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_fdir_filter_delete(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t flow_idx; + struct mlx5_fdir fdir_flow = { + .attr.group = 0, + }; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + int ret; + + ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow); + if (ret) + return -rte_errno; + LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) { + /* Find the fdir in priv list */ + if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow)) + break; + } + if (!priv_fdir_flow) + return 0; + LIST_REMOVE(priv_fdir_flow, next); + flow_idx = priv_fdir_flow->rix_flow; + flow_list_destroy(dev, &priv->flows, flow_idx); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + DRV_LOG(DEBUG, "port %u deleted FDIR flow %u", + dev->data->port_id, flow_idx); + return 0; +} + +/** + * Update queue for specific filter. + * + * @param dev + * Pointer to Ethernet device. + * @param fdir_filter + * Filter to be updated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_fdir_filter_update(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter) +{ + int ret; + + ret = flow_fdir_filter_delete(dev, fdir_filter); + if (ret) + return ret; + return flow_fdir_filter_add(dev, fdir_filter); +} + +/** + * Flush all filters. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_fdir_filter_flush(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_fdir_flow *priv_fdir_flow = NULL; + + while (!LIST_EMPTY(&priv->fdir_flows)) { + priv_fdir_flow = LIST_FIRST(&priv->fdir_flows); + LIST_REMOVE(priv_fdir_flow, next); + flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow); + rte_free(priv_fdir_flow->fdir); + rte_free(priv_fdir_flow); + } +} + +/** + * Get flow director information. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] fdir_info + * Resulting flow director information. + */ +static void +flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) +{ + struct rte_eth_fdir_masks *mask = + &dev->data->dev_conf.fdir_conf.mask; + + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; + fdir_info->guarant_spc = 0; + rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); + fdir_info->max_flexpayload = 0; + fdir_info->flow_types_mask[0] = 0; + fdir_info->flex_payload_unit = 0; + fdir_info->max_flex_payload_segment_num = 0; + fdir_info->flex_payload_limit = 0; + memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); +} + +/** + * Deal with flow director operations. + * + * @param dev + * Pointer to Ethernet device. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) +{ + enum rte_fdir_mode fdir_mode = + dev->data->dev_conf.fdir_conf.mode; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + if (fdir_mode != RTE_FDIR_MODE_PERFECT && + fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + DRV_LOG(ERR, "port %u flow director mode %d not supported", + dev->data->port_id, fdir_mode); + rte_errno = EINVAL; + return -rte_errno; + } + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + return flow_fdir_filter_add(dev, arg); + case RTE_ETH_FILTER_UPDATE: + return flow_fdir_filter_update(dev, arg); + case RTE_ETH_FILTER_DELETE: + return flow_fdir_filter_delete(dev, arg); + case RTE_ETH_FILTER_FLUSH: + flow_fdir_filter_flush(dev); + break; + case RTE_ETH_FILTER_INFO: + flow_fdir_info_get(dev, arg); + break; + default: + DRV_LOG(DEBUG, "port %u unknown operation %u", + dev->data->port_id, filter_op); + rte_errno = EINVAL; + return -rte_errno; + } + return 0; +} + +/** + * Manage filter operations. + * + * @param dev + * Pointer to Ethernet device structure. + * @param filter_type + * Filter type. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } + *(const void **)arg = &mlx5_flow_ops; + return 0; + case RTE_ETH_FILTER_FDIR: + return flow_fdir_ctrl_func(dev, filter_op, arg); + default: + DRV_LOG(ERR, "port %u filter type (%d) not supported", + dev->data->port_id, filter_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + return 0; +} + +/** + * Create the needed meter and suffix tables. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] fm + * Pointer to the flow meter. + * + * @return + * Pointer to table set on success, NULL otherwise. + */ +struct mlx5_meter_domains_infos * +mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, + const struct mlx5_flow_meter *fm) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_mtr_tbls(dev, fm); +} + +/** + * Destroy the meter table set. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] tbl + * Pointer to the meter table set. + * + * @return + * 0 on success. + */ +int +mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *tbls) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->destroy_mtr_tbls(dev, tbls); +} + +/** + * Create policer rules. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] attr + * Pointer to flow attributes. + * + * @return + * 0 on success, -1 otherwise. + */ +int +mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->create_policer_rules(dev, fm, attr); +} + +/** + * Destroy policer rules. + * + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] attr + * Pointer to flow attributes. + * + * @return + * 0 on success, -1 otherwise. + */ +int +mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr) +{ + const struct mlx5_flow_driver_ops *fops; + + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->destroy_policer_rules(dev, fm, attr); +} + +/** + * Allocate a counter. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * + * @return + * Index to allocated counter on success, 0 otherwise. + */ +uint32_t +mlx5_counter_alloc(struct rte_eth_dev *dev) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->counter_alloc(dev); + } + DRV_LOG(ERR, + "port %u counter allocate is not supported.", + dev->data->port_id); + return 0; +} + +/** + * Free a counter. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] cnt + * Index to counter to be free. + */ +void +mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + fops->counter_free(dev, cnt); + return; + } + DRV_LOG(ERR, + "port %u counter free is not supported.", + dev->data->port_id); +} + +/** + * Query counter statistics. + * + * @param[in] dev + * Pointer to Ethernet device structure. + * @param[in] cnt + * Index to counter to query. + * @param[in] clear + * Set to clear counter statistics. + * @param[out] pkts + * The counter hits packets number to save. + * @param[out] bytes + * The counter hits bytes number to save. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt, + bool clear, uint64_t *pkts, uint64_t *bytes) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->counter_query(dev, cnt, clear, pkts, bytes); + } + DRV_LOG(ERR, + "port %u counter query is not supported.", + dev->data->port_id); + return -ENOTSUP; +} + +#define MLX5_POOL_QUERY_FREQ_US 1000000 + +/** + * Get number of all validate pools. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + * + * @return + * The number of all validate pools. + */ +static uint32_t +mlx5_get_all_valid_pool_count(struct mlx5_ibv_shared *sh) +{ + int i; + uint32_t pools_n = 0; + + for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) + pools_n += rte_atomic16_read(&sh->cmng.ccont[i].n_valid); + return pools_n; +} + +/** + * Set the periodic procedure for triggering asynchronous batch queries for all + * the counter pools. + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + */ +void +mlx5_set_query_alarm(struct mlx5_ibv_shared *sh) +{ + uint32_t pools_n, us; + + pools_n = mlx5_get_all_valid_pool_count(sh); + us = MLX5_POOL_QUERY_FREQ_US / pools_n; + DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us); + if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) { + sh->cmng.query_thread_on = 0; + DRV_LOG(ERR, "Cannot reinitialize query alarm"); + } else { + sh->cmng.query_thread_on = 1; + } +} + +/** + * The periodic procedure for triggering asynchronous batch queries for all the + * counter pools. This function is probably called by the host thread. + * + * @param[in] arg + * The parameter for the alarm process. + */ +void +mlx5_flow_query_alarm(void *arg) +{ + struct mlx5_ibv_shared *sh = arg; + struct mlx5_devx_obj *dcs; + uint16_t offset; + int ret; + uint8_t batch = sh->cmng.batch; + uint8_t age = sh->cmng.age; + uint16_t pool_index = sh->cmng.pool_index; + struct mlx5_pools_container *cont; + struct mlx5_flow_counter_pool *pool; + int cont_loop = MLX5_CCONT_TYPE_MAX; + + if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) + goto set_alarm; +next_container: + cont = MLX5_CNT_CONTAINER(sh, batch, age); + rte_spinlock_lock(&cont->resize_sl); + if (!cont->pools) { + rte_spinlock_unlock(&cont->resize_sl); + /* Check if all the containers are empty. */ + if (unlikely(--cont_loop == 0)) + goto set_alarm; + batch ^= 0x1; + pool_index = 0; + if (batch == 0 && pool_index == 0) { + age ^= 0x1; + sh->cmng.batch = batch; + sh->cmng.age = age; + } + goto next_container; + } + pool = cont->pools[pool_index]; + rte_spinlock_unlock(&cont->resize_sl); + if (pool->raw_hw) + /* There is a pool query in progress. */ + goto set_alarm; + pool->raw_hw = + LIST_FIRST(&sh->cmng.free_stat_raws); + if (!pool->raw_hw) + /* No free counter statistics raw memory. */ + goto set_alarm; + dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read + (&pool->a64_dcs); + offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL; + /* + * Identify the counters released between query trigger and query + * handle more effiecntly. The counter released in this gap period + * should wait for a new round of query as the new arrived packets + * will not be taken into account. + */ + rte_atomic64_add(&pool->start_query_gen, 1); + ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL - + offset, NULL, NULL, + pool->raw_hw->mem_mng->dm->id, + (void *)(uintptr_t) + (pool->raw_hw->data + offset), + sh->devx_comp, + (uint64_t)(uintptr_t)pool); + if (ret) { + rte_atomic64_sub(&pool->start_query_gen, 1); + DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID" + " %d", pool->min_dcs->id); + pool->raw_hw = NULL; + goto set_alarm; + } + pool->raw_hw->min_dcs_id = dcs->id; + LIST_REMOVE(pool->raw_hw, next); + sh->cmng.pending_queries++; + pool_index++; + if (pool_index >= rte_atomic16_read(&cont->n_valid)) { + batch ^= 0x1; + pool_index = 0; + if (batch == 0 && pool_index == 0) + age ^= 0x1; + } +set_alarm: + sh->cmng.batch = batch; + sh->cmng.pool_index = pool_index; + sh->cmng.age = age; + mlx5_set_query_alarm(sh); +} + +/** + * Check and callback event for new aged flow in the counter pool + * + * @param[in] sh + * Pointer to mlx5_ibv_shared object. + * @param[in] pool + * Pointer to Current counter pool. + */ +static void +mlx5_flow_aging_check(struct mlx5_ibv_shared *sh, + struct mlx5_flow_counter_pool *pool) +{ + struct mlx5_priv *priv; + struct mlx5_flow_counter *cnt; + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param; + struct mlx5_counter_stats_raw *cur = pool->raw_hw; + struct mlx5_counter_stats_raw *prev = pool->raw; + uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10); + uint32_t i; + + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + age_param = MLX5_CNT_TO_AGE(cnt); + if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE) + continue; + if (cur->data[i].hits != prev->data[i].hits) { + age_param->expire = curr + age_param->timeout; + continue; + } + if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2)) + continue; + /** + * Hold the lock first, or if between the + * state AGE_TMOUT and tailq operation the + * release happened, the release procedure + * may delete a non-existent tailq node. + */ + priv = rte_eth_devices[age_param->port_id].data->dev_private; + age_info = GET_PORT_AGE_INFO(priv); + rte_spinlock_lock(&age_info->aged_sl); + /* If the cpmset fails, release happens. */ + if (rte_atomic16_cmpset((volatile uint16_t *) + &age_param->state, + AGE_CANDIDATE, + AGE_TMOUT) == + AGE_CANDIDATE) { + TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next); + MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW); + } + rte_spinlock_unlock(&age_info->aged_sl); + } + for (i = 0; i < sh->max_port; i++) { + age_info = &sh->port[i].age_info; + if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) + continue; + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + _rte_eth_dev_callback_process + (&rte_eth_devices[sh->port[i].devx_ih_port_id], + RTE_ETH_EVENT_FLOW_AGED, NULL); + age_info->flags = 0; + } +} + +/** + * Handler for the HW respond about ready values from an asynchronous batch + * query. This function is probably called by the host thread. + * + * @param[in] sh + * The pointer to the shared IB device context. + * @param[in] async_id + * The Devx async ID. + * @param[in] status + * The status of the completion. + */ +void +mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, + uint64_t async_id, int status) +{ + struct mlx5_flow_counter_pool *pool = + (struct mlx5_flow_counter_pool *)(uintptr_t)async_id; + struct mlx5_counter_stats_raw *raw_to_free; + + if (unlikely(status)) { + rte_atomic64_sub(&pool->start_query_gen, 1); + raw_to_free = pool->raw_hw; + } else { + raw_to_free = pool->raw; + if (IS_AGE_POOL(pool)) + mlx5_flow_aging_check(sh, pool); + rte_spinlock_lock(&pool->sl); + pool->raw = pool->raw_hw; + rte_spinlock_unlock(&pool->sl); + MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 == + rte_atomic64_read(&pool->start_query_gen)); + rte_atomic64_set(&pool->end_query_gen, + rte_atomic64_read(&pool->start_query_gen)); + /* Be sure the new raw counters data is updated in memory. */ + rte_cio_wmb(); + } + LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next); + pool->raw_hw = NULL; + sh->cmng.pending_queries--; +} + +/** + * Translate the rte_flow group index to HW table value. + * + * @param[in] attributes + * Pointer to flow attributes + * @param[in] external + * Value is part of flow rule created by request external to PMD. + * @param[in] group + * rte_flow group index value. + * @param[out] fdb_def_rule + * Whether fdb jump to table 1 is configured. + * @param[out] table + * HW table value. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external, + uint32_t group, bool fdb_def_rule, uint32_t *table, + struct rte_flow_error *error) +{ + if (attributes->transfer && external && fdb_def_rule) { + if (group == UINT32_MAX) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "group index not supported"); + *table = group + 1; + } else { + *table = group; + } + return 0; +} + +/** + * Discover availability of metadata reg_c's. + * + * Iteratively use test flows to check availability. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + enum modify_reg idx; + int n = 0; + + /* reg_c[0] and reg_c[1] are reserved. */ + config->flow_mreg_c[n++] = REG_C_0; + config->flow_mreg_c[n++] = REG_C_1; + /* Discover availability of other reg_c's. */ + for (idx = REG_C_2; idx <= REG_C_7; ++idx) { + struct rte_flow_attr attr = { + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + .priority = MLX5_FLOW_PRIO_RSVD, + .ingress = 1, + }; + struct rte_flow_item items[] = { + [0] = { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action actions[] = { + [0] = { + .type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &(struct mlx5_flow_action_copy_mreg){ + .src = REG_C_1, + .dst = idx, + }, + }, + [1] = { + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &(struct rte_flow_action_jump){ + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + }, + }, + [2] = { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + uint32_t flow_idx; + struct rte_flow *flow; + struct rte_flow_error error; + + if (!config->dv_flow_en) + break; + /* Create internal flow, validation skips copy action. */ + flow_idx = flow_list_create(dev, NULL, &attr, items, + actions, false, &error); + flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow_idx); + if (!flow) + continue; + if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL)) + config->flow_mreg_c[n++] = idx; + flow_list_destroy(dev, NULL, flow_idx); + } + for (; n < MLX5_MREG_C_NUM; ++n) + config->flow_mreg_c[n] = REG_NONE; + return 0; +} + +/** + * Dump flow raw hw data to file + * + * @param[in] dev + * The pointer to Ethernet device. + * @param[in] file + * A pointer to a file for output. + * @param[out] error + * Perform verbose error reporting if not NULL. PMDs initialize this + * structure in case of error only. + * @return + * 0 on success, a nagative value otherwise. + */ +int +mlx5_flow_dev_dump(struct rte_eth_dev *dev, + FILE *file, + struct rte_flow_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain, + sh->tx_domain, file); +} + +/** + * Get aged-out flows. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] context + * The address of an array of pointers to the aged-out flows contexts. + * @param[in] nb_countexts + * The length of context array pointers. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * how many contexts get in success, otherwise negative errno value. + * if nb_contexts is 0, return the amount of all aged contexts. + * if nb_contexts is not 0 , return the amount of aged flows reported + * in the context array. + */ +int +mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts, + uint32_t nb_contexts, struct rte_flow_error *error) +{ + const struct mlx5_flow_driver_ops *fops; + struct rte_flow_attr attr = { .transfer = 0 }; + + if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) { + fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV); + return fops->get_aged_flows(dev, contexts, nb_contexts, + error); + } + DRV_LOG(ERR, + "port %u get aged flows is not supported.", + dev->data->port_id); + return -ENOTSUP; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.h new file mode 100644 index 000000000..2c9667756 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.h @@ -0,0 +1,1034 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_FLOW_H_ +#define RTE_PMD_MLX5_FLOW_H_ + +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include + +#include + +#include "mlx5.h" + +/* Private rte flow items. */ +enum mlx5_rte_flow_item_type { + MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, + MLX5_RTE_FLOW_ITEM_TYPE_TAG, + MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, + MLX5_RTE_FLOW_ITEM_TYPE_VLAN, +}; + +/* Private (internal) rte flow actions. */ +enum mlx5_rte_flow_action_type { + MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, + MLX5_RTE_FLOW_ACTION_TYPE_TAG, + MLX5_RTE_FLOW_ACTION_TYPE_MARK, + MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, +}; + +/* Matches on selected register. */ +struct mlx5_rte_flow_item_tag { + enum modify_reg id; + uint32_t data; +}; + +/* Modify selected register. */ +struct mlx5_rte_flow_action_set_tag { + enum modify_reg id; + uint32_t data; +}; + +struct mlx5_flow_action_copy_mreg { + enum modify_reg dst; + enum modify_reg src; +}; + +/* Matches on source queue. */ +struct mlx5_rte_flow_item_tx_queue { + uint32_t queue; +}; + +/* Feature name to allocate metadata register. */ +enum mlx5_feature_name { + MLX5_HAIRPIN_RX, + MLX5_HAIRPIN_TX, + MLX5_METADATA_RX, + MLX5_METADATA_TX, + MLX5_METADATA_FDB, + MLX5_FLOW_MARK, + MLX5_APP_TAG, + MLX5_COPY_MARK, + MLX5_MTR_COLOR, + MLX5_MTR_SFX, +}; + +/* Pattern outer Layer bits. */ +#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) +#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) +#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) +#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) +#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) + +/* Pattern inner Layer bits. */ +#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) +#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) +#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) +#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) +#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) +#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) + +/* Pattern tunnel Layer bits. */ +#define MLX5_FLOW_LAYER_VXLAN (1u << 12) +#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) +#define MLX5_FLOW_LAYER_GRE (1u << 14) +#define MLX5_FLOW_LAYER_MPLS (1u << 15) +/* List of tunnel Layer bits continued below. */ + +/* General pattern items bits. */ +#define MLX5_FLOW_ITEM_METADATA (1u << 16) +#define MLX5_FLOW_ITEM_PORT_ID (1u << 17) +#define MLX5_FLOW_ITEM_TAG (1u << 18) +#define MLX5_FLOW_ITEM_MARK (1u << 19) + +/* Pattern MISC bits. */ +#define MLX5_FLOW_LAYER_ICMP (1u << 20) +#define MLX5_FLOW_LAYER_ICMP6 (1u << 21) +#define MLX5_FLOW_LAYER_GRE_KEY (1u << 22) + +/* Pattern tunnel Layer bits (continued). */ +#define MLX5_FLOW_LAYER_IPIP (1u << 23) +#define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24) +#define MLX5_FLOW_LAYER_NVGRE (1u << 25) +#define MLX5_FLOW_LAYER_GENEVE (1u << 26) + +/* Queue items. */ +#define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27) + +/* Pattern tunnel Layer bits (continued). */ +#define MLX5_FLOW_LAYER_GTP (1u << 28) + +/* Outer Masks. */ +#define MLX5_FLOW_LAYER_OUTER_L3 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) +#define MLX5_FLOW_LAYER_OUTER_L4 \ + (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) +#define MLX5_FLOW_LAYER_OUTER \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ + MLX5_FLOW_LAYER_OUTER_L4) + +/* Tunnel Masks. */ +#define MLX5_FLOW_LAYER_TUNNEL \ + (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ + MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ + MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP) + +/* Inner Masks. */ +#define MLX5_FLOW_LAYER_INNER_L3 \ + (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_INNER_L4 \ + (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) +#define MLX5_FLOW_LAYER_INNER \ + (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ + MLX5_FLOW_LAYER_INNER_L4) + +/* Layer Masks. */ +#define MLX5_FLOW_LAYER_L2 \ + (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) +#define MLX5_FLOW_LAYER_L3_IPV4 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) +#define MLX5_FLOW_LAYER_L3_IPV6 \ + (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) +#define MLX5_FLOW_LAYER_L3 \ + (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) +#define MLX5_FLOW_LAYER_L4 \ + (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) + +/* Actions */ +#define MLX5_FLOW_ACTION_DROP (1u << 0) +#define MLX5_FLOW_ACTION_QUEUE (1u << 1) +#define MLX5_FLOW_ACTION_RSS (1u << 2) +#define MLX5_FLOW_ACTION_FLAG (1u << 3) +#define MLX5_FLOW_ACTION_MARK (1u << 4) +#define MLX5_FLOW_ACTION_COUNT (1u << 5) +#define MLX5_FLOW_ACTION_PORT_ID (1u << 6) +#define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7) +#define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8) +#define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9) +#define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10) +#define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11) +#define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12) +#define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13) +#define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14) +#define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15) +#define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16) +#define MLX5_FLOW_ACTION_JUMP (1u << 17) +#define MLX5_FLOW_ACTION_SET_TTL (1u << 18) +#define MLX5_FLOW_ACTION_DEC_TTL (1u << 19) +#define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20) +#define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21) +#define MLX5_FLOW_ACTION_ENCAP (1u << 22) +#define MLX5_FLOW_ACTION_DECAP (1u << 23) +#define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 24) +#define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 25) +#define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 26) +#define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 27) +#define MLX5_FLOW_ACTION_SET_TAG (1ull << 28) +#define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29) +#define MLX5_FLOW_ACTION_SET_META (1ull << 30) +#define MLX5_FLOW_ACTION_METER (1ull << 31) +#define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32) +#define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33) +#define MLX5_FLOW_ACTION_AGE (1ull << 34) + +#define MLX5_FLOW_FATE_ACTIONS \ + (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ + MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP) + +#define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ + (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ + MLX5_FLOW_ACTION_JUMP) + + +#define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ + MLX5_FLOW_ACTION_SET_IPV4_DST | \ + MLX5_FLOW_ACTION_SET_IPV6_SRC | \ + MLX5_FLOW_ACTION_SET_IPV6_DST | \ + MLX5_FLOW_ACTION_SET_TP_SRC | \ + MLX5_FLOW_ACTION_SET_TP_DST | \ + MLX5_FLOW_ACTION_SET_TTL | \ + MLX5_FLOW_ACTION_DEC_TTL | \ + MLX5_FLOW_ACTION_SET_MAC_SRC | \ + MLX5_FLOW_ACTION_SET_MAC_DST | \ + MLX5_FLOW_ACTION_INC_TCP_SEQ | \ + MLX5_FLOW_ACTION_DEC_TCP_SEQ | \ + MLX5_FLOW_ACTION_INC_TCP_ACK | \ + MLX5_FLOW_ACTION_DEC_TCP_ACK | \ + MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \ + MLX5_FLOW_ACTION_SET_TAG | \ + MLX5_FLOW_ACTION_MARK_EXT | \ + MLX5_FLOW_ACTION_SET_META | \ + MLX5_FLOW_ACTION_SET_IPV4_DSCP | \ + MLX5_FLOW_ACTION_SET_IPV6_DSCP) + +#define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \ + MLX5_FLOW_ACTION_OF_PUSH_VLAN) + +#define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP) + +#ifndef IPPROTO_MPLS +#define IPPROTO_MPLS 137 +#endif + +/* UDP port number for MPLS */ +#define MLX5_UDP_PORT_MPLS 6635 + +/* UDP port numbers for VxLAN. */ +#define MLX5_UDP_PORT_VXLAN 4789 +#define MLX5_UDP_PORT_VXLAN_GPE 4790 + +/* UDP port numbers for GENEVE. */ +#define MLX5_UDP_PORT_GENEVE 6081 + +/* Priority reserved for default flows. */ +#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) + +/* + * Number of sub priorities. + * For each kind of pattern matching i.e. L2, L3, L4 to have a correct + * matching on the NIC (firmware dependent) L4 most have the higher priority + * followed by L3 and ending with L2. + */ +#define MLX5_PRIORITY_MAP_L2 2 +#define MLX5_PRIORITY_MAP_L3 1 +#define MLX5_PRIORITY_MAP_L4 0 +#define MLX5_PRIORITY_MAP_MAX 3 + +/* Valid layer type for IPV4 RSS. */ +#define MLX5_IPV4_LAYER_TYPES \ + (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_OTHER) + +/* IBV hash source bits for IPV4. */ +#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) + +/* Valid layer type for IPV6 RSS. */ +#define MLX5_IPV6_LAYER_TYPES \ + (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER) + +/* IBV hash source bits for IPV6. */ +#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) + +/* IBV hash bits for L3 SRC. */ +#define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6) + +/* IBV hash bits for L3 DST. */ +#define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6) + +/* IBV hash bits for TCP. */ +#define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ + IBV_RX_HASH_DST_PORT_TCP) + +/* IBV hash bits for UDP. */ +#define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \ + IBV_RX_HASH_DST_PORT_UDP) + +/* IBV hash bits for L4 SRC. */ +#define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ + IBV_RX_HASH_SRC_PORT_UDP) + +/* IBV hash bits for L4 DST. */ +#define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \ + IBV_RX_HASH_DST_PORT_UDP) + +/* Geneve header first 16Bit */ +#define MLX5_GENEVE_VER_MASK 0x3 +#define MLX5_GENEVE_VER_SHIFT 14 +#define MLX5_GENEVE_VER_VAL(a) \ + (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) +#define MLX5_GENEVE_OPTLEN_MASK 0x3F +#define MLX5_GENEVE_OPTLEN_SHIFT 7 +#define MLX5_GENEVE_OPTLEN_VAL(a) \ + (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) +#define MLX5_GENEVE_OAMF_MASK 0x1 +#define MLX5_GENEVE_OAMF_SHIFT 7 +#define MLX5_GENEVE_OAMF_VAL(a) \ + (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) +#define MLX5_GENEVE_CRITO_MASK 0x1 +#define MLX5_GENEVE_CRITO_SHIFT 6 +#define MLX5_GENEVE_CRITO_VAL(a) \ + (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) +#define MLX5_GENEVE_RSVD_MASK 0x3F +#define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) +/* + * The length of the Geneve options fields, expressed in four byte multiples, + * not including the eight byte fixed tunnel. + */ +#define MLX5_GENEVE_OPT_LEN_0 14 +#define MLX5_GENEVE_OPT_LEN_1 63 + +#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \ + sizeof(struct rte_flow_item_ipv4)) + +/* Software header modify action numbers of a flow. */ +#define MLX5_ACT_NUM_MDF_IPV4 1 +#define MLX5_ACT_NUM_MDF_IPV6 4 +#define MLX5_ACT_NUM_MDF_MAC 2 +#define MLX5_ACT_NUM_MDF_VID 1 +#define MLX5_ACT_NUM_MDF_PORT 2 +#define MLX5_ACT_NUM_MDF_TTL 1 +#define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL +#define MLX5_ACT_NUM_MDF_TCPSEQ 1 +#define MLX5_ACT_NUM_MDF_TCPACK 1 +#define MLX5_ACT_NUM_SET_REG 1 +#define MLX5_ACT_NUM_SET_TAG 1 +#define MLX5_ACT_NUM_CPY_MREG MLX5_ACT_NUM_SET_TAG +#define MLX5_ACT_NUM_SET_MARK MLX5_ACT_NUM_SET_TAG +#define MLX5_ACT_NUM_SET_META MLX5_ACT_NUM_SET_TAG +#define MLX5_ACT_NUM_SET_DSCP 1 + +enum mlx5_flow_drv_type { + MLX5_FLOW_TYPE_MIN, + MLX5_FLOW_TYPE_DV, + MLX5_FLOW_TYPE_VERBS, + MLX5_FLOW_TYPE_MAX, +}; + +/* Fate action type. */ +enum mlx5_flow_fate_type { + MLX5_FLOW_FATE_NONE, /* Egress flow. */ + MLX5_FLOW_FATE_QUEUE, + MLX5_FLOW_FATE_JUMP, + MLX5_FLOW_FATE_PORT_ID, + MLX5_FLOW_FATE_DROP, + MLX5_FLOW_FATE_MAX, +}; + +/* Matcher PRM representation */ +struct mlx5_flow_dv_match_params { + size_t size; + /**< Size of match value. Do NOT split size and key! */ + uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; + /**< Matcher value. This value is used as the mask or as a key. */ +}; + +/* Matcher structure. */ +struct mlx5_flow_dv_matcher { + LIST_ENTRY(mlx5_flow_dv_matcher) next; + /**< Pointer to the next element. */ + struct mlx5_flow_tbl_resource *tbl; + /**< Pointer to the table(group) the matcher associated with. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + void *matcher_object; /**< Pointer to DV matcher */ + uint16_t crc; /**< CRC of key. */ + uint16_t priority; /**< Priority of matcher. */ + struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ +}; + +#define MLX5_ENCAP_MAX_LEN 132 + +/* Encap/decap resource structure. */ +struct mlx5_flow_dv_encap_decap_resource { + ILIST_ENTRY(uint32_t)next; + /* Pointer to next element. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + void *verbs_action; + /**< Verbs encap/decap action object. */ + uint8_t buf[MLX5_ENCAP_MAX_LEN]; + size_t size; + uint8_t reformat_type; + uint8_t ft_type; + uint64_t flags; /**< Flags for RDMA API. */ +}; + +/* Tag resource structure. */ +struct mlx5_flow_dv_tag_resource { + struct mlx5_hlist_entry entry; + /**< hash list entry for tag resource, tag value as the key. */ + void *action; + /**< Verbs tag action object. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + uint32_t idx; /**< Index for the index memory pool. */ +}; + +/* + * Number of modification commands. + * The maximal actions amount in FW is some constant, and it is 16 in the + * latest releases. In some old releases, it will be limited to 8. + * Since there is no interface to query the capacity, the maximal value should + * be used to allow PMD to create the flow. The validation will be done in the + * lower driver layer or FW. A failure will be returned if exceeds the maximal + * supported actions number on the root table. + * On non-root tables, there is no limitation, but 32 is enough right now. + */ +#define MLX5_MAX_MODIFY_NUM 32 +#define MLX5_ROOT_TBL_MODIFY_NUM 16 + +/* Modify resource structure */ +struct mlx5_flow_dv_modify_hdr_resource { + LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next; + /* Pointer to next element. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + struct ibv_flow_action *verbs_action; + /**< Verbs modify header action object. */ + uint8_t ft_type; /**< Flow table type, Rx or Tx. */ + uint32_t actions_num; /**< Number of modification actions. */ + uint64_t flags; /**< Flags for RDMA API. */ + struct mlx5_modification_cmd actions[]; + /**< Modification actions. */ +}; + +/* Jump action resource structure. */ +struct mlx5_flow_dv_jump_tbl_resource { + rte_atomic32_t refcnt; /**< Reference counter. */ + uint8_t ft_type; /**< Flow table type, Rx or Tx. */ + void *action; /**< Pointer to the rdma core action. */ +}; + +/* Port ID resource structure. */ +struct mlx5_flow_dv_port_id_action_resource { + ILIST_ENTRY(uint32_t)next; + /* Pointer to next element. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + void *action; + /**< Verbs tag action object. */ + uint32_t port_id; /**< Port ID value. */ +}; + +/* Push VLAN action resource structure */ +struct mlx5_flow_dv_push_vlan_action_resource { + ILIST_ENTRY(uint32_t)next; + /* Pointer to next element. */ + rte_atomic32_t refcnt; /**< Reference counter. */ + void *action; /**< Direct verbs action object. */ + uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ + rte_be32_t vlan_tag; /**< VLAN tag value. */ +}; + +/* Metadata register copy table entry. */ +struct mlx5_flow_mreg_copy_resource { + /* + * Hash list entry for copy table. + * - Key is 32/64-bit MARK action ID. + * - MUST be the first entry. + */ + struct mlx5_hlist_entry hlist_ent; + LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; + /* List entry for device flows. */ + uint32_t refcnt; /* Reference counter. */ + uint32_t appcnt; /* Apply/Remove counter. */ + uint32_t idx; + uint32_t rix_flow; /* Built flow for copy. */ +}; + +/* Table data structure of the hash organization. */ +struct mlx5_flow_tbl_data_entry { + struct mlx5_hlist_entry entry; + /**< hash list entry, 64-bits key inside. */ + struct mlx5_flow_tbl_resource tbl; + /**< flow table resource. */ + LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; + /**< matchers' header associated with the flow table. */ + struct mlx5_flow_dv_jump_tbl_resource jump; + /**< jump resource, at most one for each table created. */ + uint32_t idx; /**< index for the indexed mempool. */ +}; + +/* Verbs specification header. */ +struct ibv_spec_header { + enum ibv_flow_spec_type type; + uint16_t size; +}; + +/* RSS description. */ +struct mlx5_flow_rss_desc { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint16_t queue[]; /**< Destination queues to redirect traffic to. */ +}; + + +/** Device flow handle structure for DV mode only. */ +struct mlx5_flow_handle_dv { + /* Flow DV api: */ + struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + /**< Pointer to modify header resource in cache. */ + uint32_t rix_encap_decap; + /**< Index to encap/decap resource in cache. */ + uint32_t rix_push_vlan; + /**< Index to push VLAN action resource in cache. */ + uint32_t rix_tag; + /**< Index to the tag action. */ +} __rte_packed; + +/** Device flow handle structure: used both for creating & destroying. */ +struct mlx5_flow_handle { + SILIST_ENTRY(uint32_t)next; + struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ + /**< Index to next device flow handle. */ + uint64_t layers; + /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ + void *ib_flow; /**< Verbs flow pointer. */ + uint32_t split_flow_id:28; /**< Sub flow unique match flow id. */ + uint32_t mark:1; /**< Metadate rxq mark flag. */ + uint32_t fate_action:3; /**< Fate action type. */ + union { + uint32_t rix_hrxq; /**< Hash Rx queue object index. */ + uint32_t rix_jump; /**< Index to the jump action resource. */ + uint32_t rix_port_id_action; + /**< Index to port ID action resource. */ + uint32_t rix_fate; + /**< Generic value indicates the fate action. */ + }; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_handle_dv dvh; +#endif +} __rte_packed; + +/* + * Size for Verbs device flow handle structure only. Do not use the DV only + * structure in Verbs. No DV flows attributes will be accessed. + * Macro offsetof() could also be used here. + */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#define MLX5_FLOW_HANDLE_VERBS_SIZE \ + (sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv)) +#else +#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle)) +#endif + +/* + * Max number of actions per DV flow. + * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED + * in rdma-core file providers/mlx5/verbs.c. + */ +#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8 + +/** Device flow structure only for DV flow creation. */ +struct mlx5_flow_dv_workspace { + uint32_t group; /**< The group index. */ + uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ + int actions_n; /**< number of actions. */ + void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */ + struct mlx5_flow_dv_encap_decap_resource *encap_decap; + /**< Pointer to encap/decap resource in cache. */ + struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; + /**< Pointer to push VLAN action resource in cache. */ + struct mlx5_flow_dv_tag_resource *tag_resource; + /**< pointer to the tag action. */ + struct mlx5_flow_dv_port_id_action_resource *port_id_action; + /**< Pointer to port ID action resource. */ + struct mlx5_flow_dv_jump_tbl_resource *jump; + /**< Pointer to the jump action resource. */ + struct mlx5_flow_dv_match_params value; + /**< Holds the value that the packet is compared to. */ +}; + +/* + * Maximal Verbs flow specifications & actions size. + * Some elements are mutually exclusive, but enough space should be allocated. + * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers. + * 2. One tunnel header (exception: GRE + MPLS), + * SPEC length: GRE == tunnel. + * Actions: 1. 1 Mark OR Flag. + * 2. 1 Drop (if any). + * 3. No limitation for counters, but it makes no sense to support too + * many counters in a single device flow. + */ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT +#define MLX5_VERBS_MAX_SPEC_SIZE \ + ( \ + (2 * (sizeof(struct ibv_flow_spec_eth) + \ + sizeof(struct ibv_flow_spec_ipv6) + \ + sizeof(struct ibv_flow_spec_tcp_udp)) + \ + sizeof(struct ibv_flow_spec_gre) + \ + sizeof(struct ibv_flow_spec_mpls)) \ + ) +#else +#define MLX5_VERBS_MAX_SPEC_SIZE \ + ( \ + (2 * (sizeof(struct ibv_flow_spec_eth) + \ + sizeof(struct ibv_flow_spec_ipv6) + \ + sizeof(struct ibv_flow_spec_tcp_udp)) + \ + sizeof(struct ibv_flow_spec_tunnel)) \ + ) +#endif + +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ + defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) +#define MLX5_VERBS_MAX_ACT_SIZE \ + ( \ + sizeof(struct ibv_flow_spec_action_tag) + \ + sizeof(struct ibv_flow_spec_action_drop) + \ + sizeof(struct ibv_flow_spec_counter_action) * 4 \ + ) +#else +#define MLX5_VERBS_MAX_ACT_SIZE \ + ( \ + sizeof(struct ibv_flow_spec_action_tag) + \ + sizeof(struct ibv_flow_spec_action_drop) \ + ) +#endif + +#define MLX5_VERBS_MAX_SPEC_ACT_SIZE \ + (MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE) + +/** Device flow structure only for Verbs flow creation. */ +struct mlx5_flow_verbs_workspace { + unsigned int size; /**< Size of the attribute. */ + struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */ + uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE]; + /**< Specifications & actions buffer of verbs flow. */ +}; + +/** Maximal number of device sub-flows supported. */ +#define MLX5_NUM_MAX_DEV_FLOWS 32 + +/** Device flow structure. */ +struct mlx5_flow { + struct rte_flow *flow; /**< Pointer to the main flow. */ + uint32_t flow_idx; /**< The memory pool index to the main flow. */ + uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ + uint64_t act_flags; + /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ + bool external; /**< true if the flow is created external to PMD. */ + uint8_t ingress; /**< 1 if the flow is ingress. */ + union { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + struct mlx5_flow_dv_workspace dv; +#endif + struct mlx5_flow_verbs_workspace verbs; + }; + struct mlx5_flow_handle *handle; + uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */ +}; + +/* Flow meter state. */ +#define MLX5_FLOW_METER_DISABLE 0 +#define MLX5_FLOW_METER_ENABLE 1 + +#define MLX5_MAN_WIDTH 8 +/* Modify this value if enum rte_mtr_color changes. */ +#define RTE_MTR_DROPPED RTE_COLORS + +/* Meter policer statistics */ +struct mlx5_flow_policer_stats { + uint32_t cnt[RTE_COLORS + 1]; + /**< Color counter, extra for drop. */ + uint64_t stats_mask; + /**< Statistics mask for the colors. */ +}; + +/* Meter table structure. */ +struct mlx5_meter_domain_info { + struct mlx5_flow_tbl_resource *tbl; + /**< Meter table. */ + struct mlx5_flow_tbl_resource *sfx_tbl; + /**< Meter suffix table. */ + void *any_matcher; + /**< Meter color not match default criteria. */ + void *color_matcher; + /**< Meter color match criteria. */ + void *jump_actn; + /**< Meter match action. */ + void *policer_rules[RTE_MTR_DROPPED + 1]; + /**< Meter policer for the match. */ +}; + +/* Meter table set for TX RX FDB. */ +struct mlx5_meter_domains_infos { + uint32_t ref_cnt; + /**< Table user count. */ + struct mlx5_meter_domain_info egress; + /**< TX meter table. */ + struct mlx5_meter_domain_info ingress; + /**< RX meter table. */ + struct mlx5_meter_domain_info transfer; + /**< FDB meter table. */ + void *drop_actn; + /**< Drop action as not matched. */ + void *count_actns[RTE_MTR_DROPPED + 1]; + /**< Counters for match and unmatched statistics. */ + uint32_t fmp[MLX5_ST_SZ_DW(flow_meter_parameters)]; + /**< Flow meter parameter. */ + size_t fmp_size; + /**< Flow meter parameter size. */ + void *meter_action; + /**< Flow meter action. */ +}; + +/* Meter parameter structure. */ +struct mlx5_flow_meter { + TAILQ_ENTRY(mlx5_flow_meter) next; + /**< Pointer to the next flow meter structure. */ + uint32_t idx; /* Index to meter object. */ + uint32_t meter_id; + /**< Meter id. */ + struct mlx5_flow_meter_profile *profile; + /**< Meter profile parameters. */ + + /** Policer actions (per meter output color). */ + enum rte_mtr_policer_action action[RTE_COLORS]; + + /** Set of stats counters to be enabled. + * @see enum rte_mtr_stats_type + */ + uint64_t stats_mask; + + /**< Rule applies to ingress traffic. */ + uint32_t ingress:1; + + /**< Rule applies to egress traffic. */ + uint32_t egress:1; + /** + * Instead of simply matching the properties of traffic as it would + * appear on a given DPDK port ID, enabling this attribute transfers + * a flow rule to the lowest possible level of any device endpoints + * found in the pattern. + * + * When supported, this effectively enables an application to + * re-route traffic not necessarily intended for it (e.g. coming + * from or addressed to different physical ports, VFs or + * applications) at the device level. + * + * It complements the behavior of some pattern items such as + * RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them. + * + * When transferring flow rules, ingress and egress attributes keep + * their original meaning, as if processing traffic emitted or + * received by the application. + */ + uint32_t transfer:1; + struct mlx5_meter_domains_infos *mfts; + /**< Flow table created for this meter. */ + struct mlx5_flow_policer_stats policer_stats; + /**< Meter policer statistics. */ + uint32_t ref_cnt; + /**< Use count. */ + uint32_t active_state:1; + /**< Meter state. */ + uint32_t shared:1; + /**< Meter shared or not. */ +}; + +/* RFC2697 parameter structure. */ +struct mlx5_flow_meter_srtcm_rfc2697_prm { + /* green_saturation_value = cbs_mantissa * 2^cbs_exponent */ + uint32_t cbs_exponent:5; + uint32_t cbs_mantissa:8; + /* cir = 8G * cir_mantissa * 1/(2^cir_exponent) Bytes/Sec */ + uint32_t cir_exponent:5; + uint32_t cir_mantissa:8; + /* yellow _saturation_value = ebs_mantissa * 2^ebs_exponent */ + uint32_t ebs_exponent:5; + uint32_t ebs_mantissa:8; +}; + +/* Flow meter profile structure. */ +struct mlx5_flow_meter_profile { + TAILQ_ENTRY(mlx5_flow_meter_profile) next; + /**< Pointer to the next flow meter structure. */ + uint32_t meter_profile_id; /**< Profile id. */ + struct rte_mtr_meter_profile profile; /**< Profile detail. */ + union { + struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm; + /**< srtcm_rfc2697 struct. */ + }; + uint32_t ref_cnt; /**< Use count. */ +}; + +/* Fdir flow structure */ +struct mlx5_fdir_flow { + LIST_ENTRY(mlx5_fdir_flow) next; /* Pointer to the next element. */ + struct mlx5_fdir *fdir; /* Pointer to fdir. */ + uint32_t rix_flow; /* Index to flow. */ +}; + +#define HAIRPIN_FLOW_ID_BITS 28 + +/* Flow structure. */ +struct rte_flow { + ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */ + uint32_t dev_handles; + /**< Device flow handles that are part of the flow. */ + uint32_t drv_type:2; /**< Driver type. */ + uint32_t fdir:1; /**< Identifier of associated FDIR if any. */ + uint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS; + /**< The flow id used for hairpin. */ + uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + uint32_t rix_mreg_copy; + /**< Index to metadata register copy table resource. */ + uint32_t counter; /**< Holds flow counter. */ + uint16_t meter; /**< Holds flow meter id. */ +} __rte_packed; + +typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, + int hairpin, + struct rte_flow_error *error); +typedef struct mlx5_flow *(*mlx5_flow_prepare_t) + (struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], struct rte_flow_error *error); +typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, + struct rte_flow *flow); +typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, + struct rte_flow *flow); +typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); +typedef struct mlx5_meter_domains_infos *(*mlx5_flow_create_mtr_tbls_t) + (struct rte_eth_dev *dev, + const struct mlx5_flow_meter *fm); +typedef int (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *tbls); +typedef int (*mlx5_flow_create_policer_rules_t) + (struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr); +typedef int (*mlx5_flow_destroy_policer_rules_t) + (struct rte_eth_dev *dev, + const struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr); +typedef uint32_t (*mlx5_flow_counter_alloc_t) + (struct rte_eth_dev *dev); +typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev, + uint32_t cnt); +typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev, + uint32_t cnt, + bool clear, uint64_t *pkts, + uint64_t *bytes); +typedef int (*mlx5_flow_get_aged_flows_t) + (struct rte_eth_dev *dev, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error); +struct mlx5_flow_driver_ops { + mlx5_flow_validate_t validate; + mlx5_flow_prepare_t prepare; + mlx5_flow_translate_t translate; + mlx5_flow_apply_t apply; + mlx5_flow_remove_t remove; + mlx5_flow_destroy_t destroy; + mlx5_flow_query_t query; + mlx5_flow_create_mtr_tbls_t create_mtr_tbls; + mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls; + mlx5_flow_create_policer_rules_t create_policer_rules; + mlx5_flow_destroy_policer_rules_t destroy_policer_rules; + mlx5_flow_counter_alloc_t counter_alloc; + mlx5_flow_counter_free_t counter_free; + mlx5_flow_counter_query_t counter_query; + mlx5_flow_get_aged_flows_t get_aged_flows; +}; + +/* mlx5_flow.c */ + +struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(uint32_t max_id); +void mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); +uint32_t mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id); +uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, + uint32_t id); +int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, + bool external, uint32_t group, bool fdb_def_rule, + uint32_t *table, struct rte_flow_error *error); +uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, + int tunnel, uint64_t layer_types, + uint64_t hash_fields); +uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority); +int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, + enum mlx5_feature_name feature, + uint32_t id, + struct rte_flow_error *error); +const struct rte_flow_action *mlx5_flow_find_action + (const struct rte_flow_action *actions, + enum rte_flow_action_type action); +int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +int mlx5_flow_validate_action_drop(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +int mlx5_flow_validate_action_flag(uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, + uint64_t action_flags, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + uint64_t item_flags, + struct rte_flow_error *error); +int mlx5_flow_validate_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + struct rte_flow_error *error); +int mlx5_flow_item_acceptable(const struct rte_flow_item *item, + const uint8_t *mask, + const uint8_t *nic_mask, + unsigned int size, + struct rte_flow_error *error); +int mlx5_flow_validate_item_eth(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error); +int mlx5_flow_validate_item_gre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error); +int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item, + uint64_t item_flags, + const struct rte_flow_item *gre_item, + struct rte_flow_error *error); +int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv4 *acc_mask, + struct rte_flow_error *error); +int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t last_item, + uint16_t ether_type, + const struct rte_flow_item_ipv6 *acc_mask, + struct rte_flow_error *error); +int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t item_flags, + uint64_t prev_layer, + struct rte_flow_error *error); +int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + const struct rte_flow_item_tcp *flow_mask, + struct rte_flow_error *error); +int mlx5_flow_validate_item_udp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error); +int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error); +int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error); +int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error); +int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error); +int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error); +int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error); +int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error); +struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls + (struct rte_eth_dev *dev, + const struct mlx5_flow_meter *fm); +int mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *tbl); +int mlx5_flow_create_policer_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr); +int mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr); +int mlx5_flow_meter_flush(struct rte_eth_dev *dev, + struct rte_mtr_error *error); +#endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_dv.c new file mode 100644 index 000000000..e48183195 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -0,0 +1,9666 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_flow.h" +#include "mlx5_rxtx.h" + +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + +#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS +#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0 +#endif + +#ifndef HAVE_MLX5DV_DR_ESWITCH +#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB +#define MLX5DV_FLOW_TABLE_TYPE_FDB 0 +#endif +#endif + +#ifndef HAVE_MLX5DV_DR +#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1 +#endif + +/* VLAN header definitions */ +#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13 +#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT) +#define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff +#define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK) +#define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK) + +union flow_dv_attr { + struct { + uint32_t valid:1; + uint32_t ipv4:1; + uint32_t ipv6:1; + uint32_t tcp:1; + uint32_t udp:1; + uint32_t reserved:27; + }; + uint32_t attr; +}; + +static int +flow_dv_tbl_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_tbl_resource *tbl); + +/** + * Initialize flow attributes structure according to flow items' types. + * + * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel + * mode. For tunnel mode, the items to be modified are the outermost ones. + * + * @param[in] item + * Pointer to item specification. + * @param[out] attr + * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. + */ +static void +flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + struct mlx5_flow *dev_flow, bool tunnel_decap) +{ + uint64_t layers = dev_flow->handle->layers; + + /* + * If layers is already initialized, it means this dev_flow is the + * suffix flow, the layers flags is set by the prefix flow. Need to + * use the layer flags from prefix flow as the suffix flow may not + * have the user defined items as the flow is split. + */ + if (layers) { + if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + attr->ipv4 = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) + attr->ipv6 = 1; + if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP) + attr->tcp = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP) + attr->udp = 1; + attr->valid = 1; + return; + } + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + uint8_t next_protocol = 0xff; + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_MPLS: + if (tunnel_decap) + attr->attr = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (!attr->ipv6) + attr->ipv4 = 1; + if (item->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + item->mask)->hdr.next_proto_id) + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (item->spec))->hdr.next_proto_id & + ((const struct rte_flow_item_ipv4 *) + (item->mask))->hdr.next_proto_id; + if ((next_protocol == IPPROTO_IPIP || + next_protocol == IPPROTO_IPV6) && tunnel_decap) + attr->attr = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (!attr->ipv4) + attr->ipv6 = 1; + if (item->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + item->mask)->hdr.proto) + next_protocol = + ((const struct rte_flow_item_ipv6 *) + (item->spec))->hdr.proto & + ((const struct rte_flow_item_ipv6 *) + (item->mask))->hdr.proto; + if ((next_protocol == IPPROTO_IPIP || + next_protocol == IPPROTO_IPV6) && tunnel_decap) + attr->attr = 0; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + if (!attr->tcp) + attr->udp = 1; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + if (!attr->udp) + attr->tcp = 1; + break; + default: + break; + } + } + attr->valid = 1; +} + +/** + * Convert rte_mtr_color to mlx5 color. + * + * @param[in] rcol + * rte_mtr_color. + * + * @return + * mlx5 color. + */ +static int +rte_col_2_mlx5_col(enum rte_color rcol) +{ + switch (rcol) { + case RTE_COLOR_GREEN: + return MLX5_FLOW_COLOR_GREEN; + case RTE_COLOR_YELLOW: + return MLX5_FLOW_COLOR_YELLOW; + case RTE_COLOR_RED: + return MLX5_FLOW_COLOR_RED; + default: + break; + } + return MLX5_FLOW_COLOR_UNDEFINED; +} + +struct field_modify_info { + uint32_t size; /* Size of field in protocol header, in bytes. */ + uint32_t offset; /* Offset of field in protocol header, in bytes. */ + enum mlx5_modification_field id; +}; + +struct field_modify_info modify_eth[] = { + {4, 0, MLX5_MODI_OUT_DMAC_47_16}, + {2, 4, MLX5_MODI_OUT_DMAC_15_0}, + {4, 6, MLX5_MODI_OUT_SMAC_47_16}, + {2, 10, MLX5_MODI_OUT_SMAC_15_0}, + {0, 0, 0}, +}; + +struct field_modify_info modify_vlan_out_first_vid[] = { + /* Size in bits !!! */ + {12, 0, MLX5_MODI_OUT_FIRST_VID}, + {0, 0, 0}, +}; + +struct field_modify_info modify_ipv4[] = { + {1, 1, MLX5_MODI_OUT_IP_DSCP}, + {1, 8, MLX5_MODI_OUT_IPV4_TTL}, + {4, 12, MLX5_MODI_OUT_SIPV4}, + {4, 16, MLX5_MODI_OUT_DIPV4}, + {0, 0, 0}, +}; + +struct field_modify_info modify_ipv6[] = { + {1, 0, MLX5_MODI_OUT_IP_DSCP}, + {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT}, + {4, 8, MLX5_MODI_OUT_SIPV6_127_96}, + {4, 12, MLX5_MODI_OUT_SIPV6_95_64}, + {4, 16, MLX5_MODI_OUT_SIPV6_63_32}, + {4, 20, MLX5_MODI_OUT_SIPV6_31_0}, + {4, 24, MLX5_MODI_OUT_DIPV6_127_96}, + {4, 28, MLX5_MODI_OUT_DIPV6_95_64}, + {4, 32, MLX5_MODI_OUT_DIPV6_63_32}, + {4, 36, MLX5_MODI_OUT_DIPV6_31_0}, + {0, 0, 0}, +}; + +struct field_modify_info modify_udp[] = { + {2, 0, MLX5_MODI_OUT_UDP_SPORT}, + {2, 2, MLX5_MODI_OUT_UDP_DPORT}, + {0, 0, 0}, +}; + +struct field_modify_info modify_tcp[] = { + {2, 0, MLX5_MODI_OUT_TCP_SPORT}, + {2, 2, MLX5_MODI_OUT_TCP_DPORT}, + {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM}, + {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM}, + {0, 0, 0}, +}; + +static void +mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, + uint8_t next_protocol, uint64_t *item_flags, + int *tunnel) +{ + MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6); + if (next_protocol == IPPROTO_IPIP) { + *item_flags |= MLX5_FLOW_LAYER_IPIP; + *tunnel = 1; + } + if (next_protocol == IPPROTO_IPV6) { + *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; + *tunnel = 1; + } +} + +/** + * Acquire the synchronizing object to protect multithreaded access + * to shared dv context. Lock occurs only if context is actually + * shared, i.e. we have multiport IB device and representors are + * created. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + */ +static void +flow_dv_shared_lock(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (sh->dv_refcnt > 1) { + int ret; + + ret = pthread_mutex_lock(&sh->dv_mutex); + MLX5_ASSERT(!ret); + (void)ret; + } +} + +static void +flow_dv_shared_unlock(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (sh->dv_refcnt > 1) { + int ret; + + ret = pthread_mutex_unlock(&sh->dv_mutex); + MLX5_ASSERT(!ret); + (void)ret; + } +} + +/* Update VLAN's VID/PCP based on input rte_flow_action. + * + * @param[in] action + * Pointer to struct rte_flow_action. + * @param[out] vlan + * Pointer to struct rte_vlan_hdr. + */ +static void +mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action, + struct rte_vlan_hdr *vlan) +{ + uint16_t vlan_tci; + if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) { + vlan_tci = + ((const struct rte_flow_action_of_set_vlan_pcp *) + action->conf)->vlan_pcp; + vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT; + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; + vlan->vlan_tci |= vlan_tci; + } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) { + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; + vlan->vlan_tci |= rte_be_to_cpu_16 + (((const struct rte_flow_action_of_set_vlan_vid *) + action->conf)->vlan_vid); + } +} + +/** + * Fetch 1, 2, 3 or 4 byte field from the byte array + * and return as unsigned integer in host-endian format. + * + * @param[in] data + * Pointer to data array. + * @param[in] size + * Size of field to extract. + * + * @return + * converted field in host endian format. + */ +static inline uint32_t +flow_dv_fetch_field(const uint8_t *data, uint32_t size) +{ + uint32_t ret; + + switch (size) { + case 1: + ret = *data; + break; + case 2: + ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); + break; + case 3: + ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); + ret = (ret << 8) | *(data + sizeof(uint16_t)); + break; + case 4: + ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); + break; + default: + MLX5_ASSERT(false); + ret = 0; + break; + } + return ret; +} + +/** + * Convert modify-header action to DV specification. + * + * Data length of each action is determined by provided field description + * and the item mask. Data bit offset and width of each action is determined + * by provided item mask. + * + * @param[in] item + * Pointer to item specification. + * @param[in] field + * Pointer to field modification information. + * For MLX5_MODIFICATION_TYPE_SET specifies destination field. + * For MLX5_MODIFICATION_TYPE_ADD specifies destination field. + * For MLX5_MODIFICATION_TYPE_COPY specifies source field. + * @param[in] dcopy + * Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type. + * Negative offset value sets the same offset as source offset. + * size field is ignored, value is taken from source field. + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] type + * Type of modification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_modify_action(struct rte_flow_item *item, + struct field_modify_info *field, + struct field_modify_info *dcopy, + struct mlx5_flow_dv_modify_hdr_resource *resource, + uint32_t type, struct rte_flow_error *error) +{ + uint32_t i = resource->actions_num; + struct mlx5_modification_cmd *actions = resource->actions; + + /* + * The item and mask are provided in big-endian format. + * The fields should be presented as in big-endian format either. + * Mask must be always present, it defines the actual field width. + */ + MLX5_ASSERT(item->mask); + MLX5_ASSERT(field->size); + do { + unsigned int size_b; + unsigned int off_b; + uint32_t mask; + uint32_t data; + + if (i >= MLX5_MAX_MODIFY_NUM) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many items to modify"); + /* Fetch variable byte size mask from the array. */ + mask = flow_dv_fetch_field((const uint8_t *)item->mask + + field->offset, field->size); + if (!mask) { + ++field; + continue; + } + /* Deduce actual data width in bits from mask value. */ + off_b = rte_bsf32(mask); + size_b = sizeof(uint32_t) * CHAR_BIT - + off_b - __builtin_clz(mask); + MLX5_ASSERT(size_b); + size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b; + actions[i] = (struct mlx5_modification_cmd) { + .action_type = type, + .field = field->id, + .offset = off_b, + .length = size_b, + }; + /* Convert entire record to expected big-endian format. */ + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + if (type == MLX5_MODIFICATION_TYPE_COPY) { + MLX5_ASSERT(dcopy); + actions[i].dst_field = dcopy->id; + actions[i].dst_offset = + (int)dcopy->offset < 0 ? off_b : dcopy->offset; + /* Convert entire record to big-endian format. */ + actions[i].data1 = rte_cpu_to_be_32(actions[i].data1); + } else { + MLX5_ASSERT(item->spec); + data = flow_dv_fetch_field((const uint8_t *)item->spec + + field->offset, field->size); + /* Shift out the trailing masked bits from data. */ + data = (data & mask) >> off_b; + actions[i].data1 = rte_cpu_to_be_32(data); + } + ++i; + ++field; + } while (field->size); + if (resource->actions_num == i) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "invalid modification flow item"); + resource->actions_num = i; + return 0; +} + +/** + * Convert modify-header set IPv4 address action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_ipv4 + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_ipv4 *conf = + (const struct rte_flow_action_set_ipv4 *)(action->conf); + struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv4 ipv4_mask; + + memset(&ipv4, 0, sizeof(ipv4)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) { + ipv4.hdr.src_addr = conf->ipv4_addr; + ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr; + } else { + ipv4.hdr.dst_addr = conf->ipv4_addr; + ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr; + } + item.spec = &ipv4; + item.mask = &ipv4_mask; + return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set IPv6 address action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_ipv6 + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_ipv6 *conf = + (const struct rte_flow_action_set_ipv6 *)(action->conf); + struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_ipv6 ipv6_mask; + + memset(&ipv6, 0, sizeof(ipv6)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) { + memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr, + sizeof(ipv6.hdr.src_addr)); + memcpy(&ipv6_mask.hdr.src_addr, + &rte_flow_item_ipv6_mask.hdr.src_addr, + sizeof(ipv6.hdr.src_addr)); + } else { + memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr, + sizeof(ipv6.hdr.dst_addr)); + memcpy(&ipv6_mask.hdr.dst_addr, + &rte_flow_item_ipv6_mask.hdr.dst_addr, + sizeof(ipv6.hdr.dst_addr)); + } + item.spec = &ipv6; + item.mask = &ipv6_mask; + return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set MAC address action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_mac + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_mac *conf = + (const struct rte_flow_action_set_mac *)(action->conf); + struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH }; + struct rte_flow_item_eth eth; + struct rte_flow_item_eth eth_mask; + + memset(ð, 0, sizeof(eth)); + memset(ð_mask, 0, sizeof(eth_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) { + memcpy(ð.src.addr_bytes, &conf->mac_addr, + sizeof(eth.src.addr_bytes)); + memcpy(ð_mask.src.addr_bytes, + &rte_flow_item_eth_mask.src.addr_bytes, + sizeof(eth_mask.src.addr_bytes)); + } else { + memcpy(ð.dst.addr_bytes, &conf->mac_addr, + sizeof(eth.dst.addr_bytes)); + memcpy(ð_mask.dst.addr_bytes, + &rte_flow_item_eth_mask.dst.addr_bytes, + sizeof(eth_mask.dst.addr_bytes)); + } + item.spec = ð + item.mask = ð_mask; + return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set VLAN VID action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_vlan_vid + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_of_set_vlan_vid *conf = + (const struct rte_flow_action_of_set_vlan_vid *)(action->conf); + int i = resource->actions_num; + struct mlx5_modification_cmd *actions = resource->actions; + struct field_modify_info *field = modify_vlan_out_first_vid; + + if (i >= MLX5_MAX_MODIFY_NUM) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many items to modify"); + actions[i] = (struct mlx5_modification_cmd) { + .action_type = MLX5_MODIFICATION_TYPE_SET, + .field = field->id, + .length = field->size, + .offset = field->offset, + }; + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + actions[i].data1 = conf->vlan_vid; + actions[i].data1 = actions[i].data1 << 16; + resource->actions_num = ++i; + return 0; +} + +/** + * Convert modify-header set TP action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[in] items + * Pointer to rte_flow_item objects list. + * @param[in] attr + * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_tp + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + const struct rte_flow_item *items, + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) +{ + const struct rte_flow_action_set_tp *conf = + (const struct rte_flow_action_set_tp *)(action->conf); + struct rte_flow_item item; + struct rte_flow_item_udp udp; + struct rte_flow_item_udp udp_mask; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_tcp tcp_mask; + struct field_modify_info *field; + + if (!attr->valid) + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); + if (attr->udp) { + memset(&udp, 0, sizeof(udp)); + memset(&udp_mask, 0, sizeof(udp_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { + udp.hdr.src_port = conf->port; + udp_mask.hdr.src_port = + rte_flow_item_udp_mask.hdr.src_port; + } else { + udp.hdr.dst_port = conf->port; + udp_mask.hdr.dst_port = + rte_flow_item_udp_mask.hdr.dst_port; + } + item.type = RTE_FLOW_ITEM_TYPE_UDP; + item.spec = &udp; + item.mask = &udp_mask; + field = modify_udp; + } else { + MLX5_ASSERT(attr->tcp); + memset(&tcp, 0, sizeof(tcp)); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) { + tcp.hdr.src_port = conf->port; + tcp_mask.hdr.src_port = + rte_flow_item_tcp_mask.hdr.src_port; + } else { + tcp.hdr.dst_port = conf->port; + tcp_mask.hdr.dst_port = + rte_flow_item_tcp_mask.hdr.dst_port; + } + item.type = RTE_FLOW_ITEM_TYPE_TCP; + item.spec = &tcp; + item.mask = &tcp_mask; + field = modify_tcp; + } + return flow_dv_convert_modify_action(&item, field, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set TTL action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[in] items + * Pointer to rte_flow_item objects list. + * @param[in] attr + * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_ttl + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + const struct rte_flow_item *items, + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) +{ + const struct rte_flow_action_set_ttl *conf = + (const struct rte_flow_action_set_ttl *)(action->conf); + struct rte_flow_item item; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv4 ipv4_mask; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_ipv6 ipv6_mask; + struct field_modify_info *field; + + if (!attr->valid) + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); + if (attr->ipv4) { + memset(&ipv4, 0, sizeof(ipv4)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4.hdr.time_to_live = conf->ttl_value; + ipv4_mask.hdr.time_to_live = 0xFF; + item.type = RTE_FLOW_ITEM_TYPE_IPV4; + item.spec = &ipv4; + item.mask = &ipv4_mask; + field = modify_ipv4; + } else { + MLX5_ASSERT(attr->ipv6); + memset(&ipv6, 0, sizeof(ipv6)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6.hdr.hop_limits = conf->ttl_value; + ipv6_mask.hdr.hop_limits = 0xFF; + item.type = RTE_FLOW_ITEM_TYPE_IPV6; + item.spec = &ipv6; + item.mask = &ipv6_mask; + field = modify_ipv6; + } + return flow_dv_convert_modify_action(&item, field, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header decrement TTL action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[in] items + * Pointer to rte_flow_item objects list. + * @param[in] attr + * Pointer to flow attributes structure. + * @param[in] dev_flow + * Pointer to the sub flow. + * @param[in] tunnel_decap + * Whether action is after tunnel decapsulation. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_dec_ttl + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_item *items, + union flow_dv_attr *attr, struct mlx5_flow *dev_flow, + bool tunnel_decap, struct rte_flow_error *error) +{ + struct rte_flow_item item; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv4 ipv4_mask; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_ipv6 ipv6_mask; + struct field_modify_info *field; + + if (!attr->valid) + flow_dv_attr_init(items, attr, dev_flow, tunnel_decap); + if (attr->ipv4) { + memset(&ipv4, 0, sizeof(ipv4)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4.hdr.time_to_live = 0xFF; + ipv4_mask.hdr.time_to_live = 0xFF; + item.type = RTE_FLOW_ITEM_TYPE_IPV4; + item.spec = &ipv4; + item.mask = &ipv4_mask; + field = modify_ipv4; + } else { + MLX5_ASSERT(attr->ipv6); + memset(&ipv6, 0, sizeof(ipv6)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6.hdr.hop_limits = 0xFF; + ipv6_mask.hdr.hop_limits = 0xFF; + item.type = RTE_FLOW_ITEM_TYPE_IPV6; + item.spec = &ipv6; + item.mask = &ipv6_mask; + field = modify_ipv6; + } + return flow_dv_convert_modify_action(&item, field, NULL, resource, + MLX5_MODIFICATION_TYPE_ADD, error); +} + +/** + * Convert modify-header increment/decrement TCP Sequence number + * to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_tcp_seq + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const rte_be32_t *conf = (const rte_be32_t *)(action->conf); + uint64_t value = rte_be_to_cpu_32(*conf); + struct rte_flow_item item; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_tcp tcp_mask; + + memset(&tcp, 0, sizeof(tcp)); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ) + /* + * The HW has no decrement operation, only increment operation. + * To simulate decrement X from Y using increment operation + * we need to add UINT32_MAX X times to Y. + * Each adding of UINT32_MAX decrements Y by 1. + */ + value *= UINT32_MAX; + tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value); + tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX); + item.type = RTE_FLOW_ITEM_TYPE_TCP; + item.spec = &tcp; + item.mask = &tcp_mask; + return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, + MLX5_MODIFICATION_TYPE_ADD, error); +} + +/** + * Convert modify-header increment/decrement TCP Acknowledgment number + * to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_tcp_ack + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const rte_be32_t *conf = (const rte_be32_t *)(action->conf); + uint64_t value = rte_be_to_cpu_32(*conf); + struct rte_flow_item item; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_tcp tcp_mask; + + memset(&tcp, 0, sizeof(tcp)); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK) + /* + * The HW has no decrement operation, only increment operation. + * To simulate decrement X from Y using increment operation + * we need to add UINT32_MAX X times to Y. + * Each adding of UINT32_MAX decrements Y by 1. + */ + value *= UINT32_MAX; + tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value); + tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX); + item.type = RTE_FLOW_ITEM_TYPE_TCP; + item.spec = &tcp; + item.mask = &tcp_mask; + return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource, + MLX5_MODIFICATION_TYPE_ADD, error); +} + +static enum mlx5_modification_field reg_to_field[] = { + [REG_NONE] = MLX5_MODI_OUT_NONE, + [REG_A] = MLX5_MODI_META_DATA_REG_A, + [REG_B] = MLX5_MODI_META_DATA_REG_B, + [REG_C_0] = MLX5_MODI_META_REG_C_0, + [REG_C_1] = MLX5_MODI_META_REG_C_1, + [REG_C_2] = MLX5_MODI_META_REG_C_2, + [REG_C_3] = MLX5_MODI_META_REG_C_3, + [REG_C_4] = MLX5_MODI_META_REG_C_4, + [REG_C_5] = MLX5_MODI_META_REG_C_5, + [REG_C_6] = MLX5_MODI_META_REG_C_6, + [REG_C_7] = MLX5_MODI_META_REG_C_7, +}; + +/** + * Convert register set to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_set_reg + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct mlx5_rte_flow_action_set_tag *conf = action->conf; + struct mlx5_modification_cmd *actions = resource->actions; + uint32_t i = resource->actions_num; + + if (i >= MLX5_MAX_MODIFY_NUM) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many items to modify"); + MLX5_ASSERT(conf->id != REG_NONE); + MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field)); + actions[i] = (struct mlx5_modification_cmd) { + .action_type = MLX5_MODIFICATION_TYPE_SET, + .field = reg_to_field[conf->id], + }; + actions[i].data0 = rte_cpu_to_be_32(actions[i].data0); + actions[i].data1 = rte_cpu_to_be_32(conf->data); + ++i; + resource->actions_num = i; + return 0; +} + +/** + * Convert SET_TAG action to DV specification. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] conf + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_set_tag + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action_set_tag *conf, + struct rte_flow_error *error) +{ + rte_be32_t data = rte_cpu_to_be_32(conf->data); + rte_be32_t mask = rte_cpu_to_be_32(conf->mask); + struct rte_flow_item item = { + .spec = &data, + .mask = &mask, + }; + struct field_modify_info reg_c_x[] = { + [1] = {0, 0, 0}, + }; + enum mlx5_modification_field reg_type; + int ret; + + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); + if (ret < 0) + return ret; + MLX5_ASSERT(ret != REG_NONE); + MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field)); + reg_type = reg_to_field[ret]; + MLX5_ASSERT(reg_type > 0); + reg_c_x[0] = (struct field_modify_info){4, 0, reg_type}; + return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert internal COPY_REG action to DV specification. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in,out] res + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *res, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct mlx5_flow_action_copy_mreg *conf = action->conf; + rte_be32_t mask = RTE_BE32(UINT32_MAX); + struct rte_flow_item item = { + .spec = NULL, + .mask = &mask, + }; + struct field_modify_info reg_src[] = { + {4, 0, reg_to_field[conf->src]}, + {0, 0, 0}, + }; + struct field_modify_info reg_dst = { + .offset = 0, + .id = reg_to_field[conf->dst], + }; + /* Adjust reg_c[0] usage according to reported mask. */ + if (conf->dst == REG_C_0 || conf->src == REG_C_0) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t reg_c0 = priv->sh->dv_regc0_mask; + + MLX5_ASSERT(reg_c0); + MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY); + if (conf->dst == REG_C_0) { + /* Copy to reg_c[0], within mask only. */ + reg_dst.offset = rte_bsf32(reg_c0); + /* + * Mask is ignoring the enianness, because + * there is no conversion in datapath. + */ +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + /* Copy from destination lower bits to reg_c[0]. */ + mask = reg_c0 >> reg_dst.offset; +#else + /* Copy from destination upper bits to reg_c[0]. */ + mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT - + rte_fls_u32(reg_c0)); +#endif + } else { + mask = rte_cpu_to_be_32(reg_c0); +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + /* Copy from reg_c[0] to destination lower bits. */ + reg_dst.offset = 0; +#else + /* Copy from reg_c[0] to destination upper bits. */ + reg_dst.offset = sizeof(reg_c0) * CHAR_BIT - + (rte_fls_u32(reg_c0) - + rte_bsf32(reg_c0)); +#endif + } + } + return flow_dv_convert_modify_action(&item, + reg_src, ®_dst, res, + MLX5_MODIFICATION_TYPE_COPY, + error); +} + +/** + * Convert MARK action to DV specification. This routine is used + * in extensive metadata only and requires metadata register to be + * handled. In legacy mode hardware tag resource is engaged. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] conf + * Pointer to MARK action specification. + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_mark(struct rte_eth_dev *dev, + const struct rte_flow_action_mark *conf, + struct mlx5_flow_dv_modify_hdr_resource *resource, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK & + priv->sh->dv_mark_mask); + rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask; + struct rte_flow_item item = { + .spec = &data, + .mask = &mask, + }; + struct field_modify_info reg_c_x[] = { + {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */ + {0, 0, 0}, + }; + int reg; + + if (!mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "zero mark action mask"); + reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (reg < 0) + return reg; + MLX5_ASSERT(reg > 0); + if (reg == REG_C_0) { + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0 = rte_bsf32(msk_c0); + + data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0); + mask = rte_cpu_to_be_32(mask) & msk_c0; + mask = rte_cpu_to_be_32(mask << shl_c0); + } + reg_c_x[0].id = reg_to_field[reg]; + return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Get metadata register index for specified steering domain. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Attributes of flow to determine steering domain. + * @param[out] error + * Pointer to the error structure. + * + * @return + * positive index on success, a negative errno value otherwise + * and rte_errno is set. + */ +static enum modify_reg +flow_dv_get_metadata_reg(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + int reg = + mlx5_flow_get_reg_id(dev, attr->transfer ? + MLX5_METADATA_FDB : + attr->egress ? + MLX5_METADATA_TX : + MLX5_METADATA_RX, 0, error); + if (reg < 0) + return rte_flow_error_set(error, + ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "unavailable " + "metadata register"); + return reg; +} + +/** + * Convert SET_META action to DV specification. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] conf + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_set_meta + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_attr *attr, + const struct rte_flow_action_set_meta *conf, + struct rte_flow_error *error) +{ + uint32_t data = conf->data; + uint32_t mask = conf->mask; + struct rte_flow_item item = { + .spec = &data, + .mask = &mask, + }; + struct field_modify_info reg_c_x[] = { + [1] = {0, 0, 0}, + }; + int reg = flow_dv_get_metadata_reg(dev, attr, error); + + if (reg < 0) + return reg; + /* + * In datapath code there is no endianness + * coversions for perfromance reasons, all + * pattern conversions are done in rte_flow. + */ + if (reg == REG_C_0) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0; + + MLX5_ASSERT(msk_c0); +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + shl_c0 = rte_bsf32(msk_c0); +#else + shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0); +#endif + mask <<= shl_c0; + data <<= shl_c0; + MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask))); + } + reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]}; + /* The routine expects parameters in memory as big-endian ones. */ + return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set IPv4 DSCP action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_ipv4_dscp + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_dscp *conf = + (const struct rte_flow_action_set_dscp *)(action->conf); + struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 }; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv4 ipv4_mask; + + memset(&ipv4, 0, sizeof(ipv4)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4.hdr.type_of_service = conf->dscp; + ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2; + item.spec = &ipv4; + item.mask = &ipv4_mask; + return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Convert modify-header set IPv6 DSCP action to DV specification. + * + * @param[in,out] resource + * Pointer to the modify-header resource. + * @param[in] action + * Pointer to action specification. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_action_modify_ipv6_dscp + (struct mlx5_flow_dv_modify_hdr_resource *resource, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_dscp *conf = + (const struct rte_flow_action_set_dscp *)(action->conf); + struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 }; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_ipv6 ipv6_mask; + + memset(&ipv6, 0, sizeof(ipv6)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + /* + * Even though the DSCP bits offset of IPv6 is not byte aligned, + * rdma-core only accept the DSCP bits byte aligned start from + * bit 0 to 5 as to be compatible with IPv4. No need to shift the + * bits in IPv6 case as rdma-core requires byte aligned value. + */ + ipv6.hdr.vtc_flow = conf->dscp; + ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22; + item.spec = &ipv6; + item.mask = &ipv6_mask; + return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource, + MLX5_MODIFICATION_TYPE_SET, error); +} + +/** + * Validate MARK item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_mark(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr __rte_unused, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_item_mark *spec = item->spec; + const struct rte_flow_item_mark *mask = item->mask; + const struct rte_flow_item_mark nic_mask = { + .id = priv->sh->dv_mark_mask, + }; + int ret; + + if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata feature" + " isn't enabled"); + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata register" + " isn't supported"); + if (!nic_mask.id) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata register" + " isn't available"); + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return ret; + if (!spec) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "data cannot be empty"); + if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &spec->id, + "mark id exceeds the limit"); + if (!mask) + mask = &nic_mask; + if (!mask->id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_mark), + error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate META item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_item_meta *spec = item->spec; + const struct rte_flow_item_meta *mask = item->mask; + struct rte_flow_item_meta nic_mask = { + .data = UINT32_MAX + }; + int reg; + int ret; + + if (!spec) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "data cannot be empty"); + if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata register" + " isn't supported"); + reg = flow_dv_get_metadata_reg(dev, attr, error); + if (reg < 0) + return reg; + if (reg == REG_B) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "match on reg_b " + "isn't supported"); + if (reg != REG_A) + nic_mask.data = priv->sh->dv_meta_mask; + } + if (!mask) + mask = &rte_flow_item_meta_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_meta), + error); + return ret; +} + +/** + * Validate TAG item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_tag(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr __rte_unused, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tag *spec = item->spec; + const struct rte_flow_item_tag *mask = item->mask; + const struct rte_flow_item_tag nic_mask = { + .data = RTE_BE32(UINT32_MAX), + .index = 0xff, + }; + int ret; + + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extensive metadata register" + " isn't supported"); + if (!spec) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "data cannot be empty"); + if (!mask) + mask = &rte_flow_item_tag_mask; + if (!mask->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "mask cannot be zero"); + + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_tag), + error); + if (ret < 0) + return ret; + if (mask->index != 0xff) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL, + "partial mask for tag index" + " is not supported"); + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error); + if (ret < 0) + return ret; + MLX5_ASSERT(ret != REG_NONE); + return 0; +} + +/** + * Validate vport item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_port_id(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr, + uint64_t item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_port_id *spec = item->spec; + const struct rte_flow_item_port_id *mask = item->mask; + const struct rte_flow_item_port_id switch_mask = { + .id = 0xffffffff, + }; + struct mlx5_priv *esw_priv; + struct mlx5_priv *dev_priv; + int ret; + + if (!attr->transfer) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "match on port id is valid only" + " when transfer flag is enabled"); + if (item_flags & MLX5_FLOW_ITEM_PORT_ID) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple source ports are not" + " supported"); + if (!mask) + mask = &switch_mask; + if (mask->id != 0xffffffff) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + mask, + "no support for partial mask on" + " \"id\" field"); + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_port_id_mask, + sizeof(struct rte_flow_item_port_id), + error); + if (ret) + return ret; + if (!spec) + return 0; + esw_priv = mlx5_port_to_eswitch_info(spec->id, false); + if (!esw_priv) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, + "failed to obtain E-Switch info for" + " port"); + dev_priv = mlx5_dev_to_eswitch_info(dev); + if (!dev_priv) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to obtain E-Switch info"); + if (esw_priv->domain_id != dev_priv->domain_id) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, + "cannot match on a port from a" + " different E-Switch"); + return 0; +} + +/* + * GTP flags are contained in 1 byte of the format: + * ------------------------------------------- + * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | + * |-----------------------------------------| + * | value | Version | PT | Res | E | S | PN | + * ------------------------------------------- + * + * Matching is supported only for GTP flags E, S, PN. + */ +#define MLX5_GTP_FLAGS_MASK 0x07 + +/** + * Validate VLAN item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] dev + * Ethernet device flow is being created on. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_vlan(const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *mask = item->mask; + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(UINT16_MAX), + .inner_type = RTE_BE16(UINT16_MAX), + }; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret; + const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 | + MLX5_FLOW_LAYER_INNER_L4) : + (MLX5_FLOW_LAYER_OUTER_L3 | + MLX5_FLOW_LAYER_OUTER_L4); + const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + + if (item_flags & vlanm) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple VLAN layers not supported"); + else if ((item_flags & l34m) != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN cannot follow L3/L4 layer"); + if (!mask) + mask = &rte_flow_item_vlan_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_vlan), + error); + if (ret) + return ret; + if (!tunnel && mask->tci != RTE_BE16(0x0fff)) { + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->vmwa_context) { + /* + * Non-NULL context means we have a virtual machine + * and SR-IOV enabled, we have to create VLAN interface + * to make hypervisor to setup E-Switch vport + * context correctly. We avoid creating the multiple + * VLAN interfaces, so we cannot support VLAN tag mask. + */ + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN tag mask is not" + " supported in virtual" + " environment"); + } + } + return 0; +} + +/** + * Validate GTP item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_gtp(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t item_flags, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_gtp *spec = item->spec; + const struct rte_flow_item_gtp *mask = item->mask; + const struct rte_flow_item_gtp nic_mask = { + .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK, + .msg_type = 0xff, + .teid = RTE_BE32(0xffffffff), + }; + + if (!priv->config.hca_attr.tunnel_stateless_gtp) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GTP support is not enabled"); + if (item_flags & MLX5_FLOW_LAYER_TUNNEL) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple tunnel layers not" + " supported"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "no outer UDP layer found"); + if (!mask) + mask = &rte_flow_item_gtp_mask; + if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Match is supported for GTP" + " flags only"); + return mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_gtp), + error); +} + +/** + * Validate the pop VLAN action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the pop vlan action. + * @param[in] item_flags + * The items found in this flow rule. + * @param[in] attr + * Pointer to flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_action *action, + uint64_t item_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + + (void)action; + (void)attr; + if (!priv->sh->pop_vlan_action) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pop vlan action is not supported"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "pop vlan action not supported for " + "egress"); + if (action_flags & MLX5_FLOW_VLAN_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "no support for multiple VLAN " + "actions"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot pop vlan without a " + "match on (outer) vlan in the flow"); + if (action_flags & MLX5_FLOW_ACTION_PORT_ID) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, port_id should " + "be after pop VLAN action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "pop vlan action for VF representor " + "not supported on NIC table"); + return 0; +} + +/** + * Get VLAN default info from vlan match info. + * + * @param[in] items + * the list of item specifications. + * @param[out] vlan + * pointer VLAN info to fill to. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static void +flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items, + struct rte_vlan_hdr *vlan) +{ + const struct rte_flow_item_vlan nic_mask = { + .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK | + MLX5DV_FLOW_VLAN_VID_MASK), + .inner_type = RTE_BE16(0xffff), + }; + + if (items == NULL) + return; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int type = items->type; + + if (type == RTE_FLOW_ITEM_TYPE_VLAN || + type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN) + break; + } + if (items->type != RTE_FLOW_ITEM_TYPE_END) { + const struct rte_flow_item_vlan *vlan_m = items->mask; + const struct rte_flow_item_vlan *vlan_v = items->spec; + + /* If VLAN item in pattern doesn't contain data, return here. */ + if (!vlan_v) + return; + if (!vlan_m) + vlan_m = &nic_mask; + /* Only full match values are accepted */ + if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) == + MLX5DV_FLOW_VLAN_PCP_MASK_BE) { + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK; + vlan->vlan_tci |= + rte_be_to_cpu_16(vlan_v->tci & + MLX5DV_FLOW_VLAN_PCP_MASK_BE); + } + if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) == + MLX5DV_FLOW_VLAN_VID_MASK_BE) { + vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK; + vlan->vlan_tci |= + rte_be_to_cpu_16(vlan_v->tci & + MLX5DV_FLOW_VLAN_VID_MASK_BE); + } + if (vlan_m->inner_type == nic_mask.inner_type) + vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type & + vlan_m->inner_type); + } +} + +/** + * Validate the push VLAN action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] item_flags + * The items found in this flow rule. + * @param[in] action + * Pointer to the action structure. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_item_vlan *vlan_m, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; + const struct mlx5_priv *priv = dev->data->dev_private; + + if (!attr->transfer && attr->ingress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "push VLAN action not supported for " + "ingress"); + if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && + push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "invalid vlan ethertype"); + if (action_flags & MLX5_FLOW_VLAN_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "no support for multiple VLAN " + "actions"); + if (action_flags & MLX5_FLOW_ACTION_PORT_ID) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, port_id should " + "be after push VLAN"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "push vlan action for VF representor " + "not supported on NIC table"); + if (vlan_m && + (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) && + (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) != + MLX5DV_FLOW_VLAN_PCP_MASK_BE && + !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) && + !(mlx5_flow_find_action + (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "not full match mask on VLAN PCP and " + "there is no of_set_vlan_pcp action, " + "push VLAN action cannot figure out " + "PCP value"); + if (vlan_m && + (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) && + (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) != + MLX5DV_FLOW_VLAN_VID_MASK_BE && + !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) && + !(mlx5_flow_find_action + (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "not full match mask on VLAN VID and " + "there is no of_set_vlan_vid action, " + "push VLAN action cannot figure out " + "VID value"); + (void)attr; + return 0; +} + +/** + * Validate the set VLAN PCP. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] actions + * Pointer to the list of actions remaining in the flow rule. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags, + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_action *action = actions; + const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf; + + if (conf->vlan_pcp > 7) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "VLAN PCP value is too big"); + if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "set VLAN PCP action must follow " + "the push VLAN action"); + if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Multiple VLAN PCP modification are " + "not supported"); + if (action_flags & MLX5_FLOW_ACTION_PORT_ID) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, port_id should " + "be after set VLAN PCP"); + return 0; +} + +/** + * Validate the set VLAN VID. + * + * @param[in] item_flags + * Holds the items detected in this rule. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] actions + * Pointer to the list of actions remaining in the flow rule. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_set_vlan_vid(uint64_t item_flags, + uint64_t action_flags, + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_action *action = actions; + const struct rte_flow_action_of_set_vlan_vid *conf = action->conf; + + if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "VLAN VID value is too big"); + if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) && + !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "set VLAN VID action must follow push" + " VLAN action or match on VLAN item"); + if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Multiple VLAN VID modifications are " + "not supported"); + if (action_flags & MLX5_FLOW_ACTION_PORT_ID) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "wrong action order, port_id should " + "be after set VLAN VID"); + return 0; +} + +/* + * Validate the FLAG action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_flag(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + int ret; + + /* Fall back if no extended metadata register support. */ + if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) + return mlx5_flow_validate_action_flag(action_flags, attr, + error); + /* Extensive metadata mode requires registers. */ + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "no metadata registers " + "to support flag action"); + if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "extended metadata register" + " isn't available"); + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return ret; + MLX5_ASSERT(ret > 0); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't mark and flag in same flow"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 flag" + " actions in same flow"); + return 0; +} + +/** + * Validate MARK action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action + * Pointer to action. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_mark(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + const struct rte_flow_action_mark *mark = action->conf; + int ret; + + /* Fall back if no extended metadata register support. */ + if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY) + return mlx5_flow_validate_action_mark(action, action_flags, + attr, error); + /* Extensive metadata mode requires registers. */ + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "no metadata registers " + "to support mark action"); + if (!priv->sh->dv_mark_mask) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "extended metadata register" + " isn't available"); + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return ret; + MLX5_ASSERT(ret > 0); + if (!mark) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be null"); + if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &mark->id, + "mark id exceeds the limit"); + if (action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't flag and mark in same flow"); + if (action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 mark actions in same" + " flow"); + return 0; +} + +/** + * Validate SET_META action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action + * Pointer to the action structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + uint64_t action_flags __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_meta *conf; + uint32_t nic_mask = UINT32_MAX; + int reg; + + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "extended metadata register" + " isn't supported"); + reg = flow_dv_get_metadata_reg(dev, attr, error); + if (reg < 0) + return reg; + if (reg != REG_A && reg != REG_B) { + struct mlx5_priv *priv = dev->data->dev_private; + + nic_mask = priv->sh->dv_meta_mask; + } + if (!(action->conf)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be null"); + conf = (const struct rte_flow_action_set_meta *)action->conf; + if (!conf->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "zero mask doesn't have any effect"); + if (conf->mask & ~nic_mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "meta data must be within reg C0"); + return 0; +} + +/** + * Validate SET_TAG action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action + * Pointer to the action structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_set_tag *conf; + const uint64_t terminal_action_flags = + MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | + MLX5_FLOW_ACTION_RSS; + int ret; + + if (!mlx5_flow_ext_mreg_supported(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "extensive metadata register" + " isn't supported"); + if (!(action->conf)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be null"); + conf = (const struct rte_flow_action_set_tag *)action->conf; + if (!conf->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "zero mask doesn't have any effect"); + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error); + if (ret < 0) + return ret; + if (!attr->transfer && attr->ingress && + (action_flags & terminal_action_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "set_tag has no effect" + " with terminal actions"); + return 0; +} + +/** + * Validate count action. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_count(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!priv->config.devx) + goto notsup_err; +#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS + return 0; +#endif +notsup_err: + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "count action not supported"); +} + +/** + * Validate the L2 encap action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the action structure. + * @param[in] attr + * Pointer to flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + + if (!(action->conf)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be null"); + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can only have a single encap action " + "in a flow"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); + return 0; +} + +/** + * Validate a decap action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attr + * Pointer to flow attributes + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_decap(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + + if (action_flags & MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + action_flags & + MLX5_FLOW_ACTION_DECAP ? "can only " + "have a single decap action" : "decap " + "after encap is not supported"); + if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have decap action after" + " modify action"); + if (attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + NULL, + "decap action not supported for " + "egress"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "decap action for VF representor " + "not supported on NIC table"); + return 0; +} + +const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,}; + +/** + * Validate the raw encap and decap actions. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] decap + * Pointer to the decap action. + * @param[in] encap + * Pointer to the encap action. + * @param[in] attr + * Pointer to flow attributes + * @param[in/out] action_flags + * Holds the actions detected until now. + * @param[out] actions_n + * pointer to the number of actions counter. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_raw_encap_decap + (struct rte_eth_dev *dev, + const struct rte_flow_action_raw_decap *decap, + const struct rte_flow_action_raw_encap *encap, + const struct rte_flow_attr *attr, uint64_t *action_flags, + int *actions_n, struct rte_flow_error *error) +{ + const struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + if (encap && (!encap->size || !encap->data)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "raw encap data cannot be empty"); + if (decap && encap) { + if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && + encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) + /* L3 encap. */ + decap = NULL; + else if (encap->size <= + MLX5_ENCAPSULATION_DECISION_SIZE && + decap->size > + MLX5_ENCAPSULATION_DECISION_SIZE) + /* L3 decap. */ + encap = NULL; + else if (encap->size > + MLX5_ENCAPSULATION_DECISION_SIZE && + decap->size > + MLX5_ENCAPSULATION_DECISION_SIZE) + /* 2 L2 actions: encap and decap. */ + ; + else + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "unsupported too small " + "raw decap and too small raw " + "encap combination"); + } + if (decap) { + ret = flow_dv_validate_action_decap(dev, *action_flags, attr, + error); + if (ret < 0) + return ret; + *action_flags |= MLX5_FLOW_ACTION_DECAP; + ++(*actions_n); + } + if (encap) { + if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "small raw encap size"); + if (*action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "more than one encap action"); + if (!attr->transfer && priv->representor) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "encap action for VF representor " + "not supported on NIC table"); + *action_flags |= MLX5_FLOW_ACTION_ENCAP; + ++(*actions_n); + } + return 0; +} + +/** + * Find existing encap/decap resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to encap/decap resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_encap_decap_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_encap_decap_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + struct mlx5dv_dr_domain *domain; + uint32_t idx = 0; + + resource->flags = dev_flow->dv.group ? 0 : 1; + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + /* Lookup a matching resource from cache. */ + ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx, + cache_resource, next) { + if (resource->reformat_type == cache_resource->reformat_type && + resource->ft_type == cache_resource->ft_type && + resource->flags == cache_resource->flags && + resource->size == cache_resource->size && + !memcmp((const void *)resource->buf, + (const void *)cache_resource->buf, + resource->size)) { + DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->handle->dvh.rix_encap_decap = idx; + dev_flow->dv.encap_decap = cache_resource; + return 0; + } + } + /* Register new encap/decap resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + &dev_flow->handle->dvh.rix_encap_decap); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + cache_resource->verbs_action = + mlx5_glue->dv_create_flow_action_packet_reformat + (sh->ctx, cache_resource->reformat_type, + cache_resource->ft_type, domain, cache_resource->flags, + cache_resource->size, + (cache_resource->size ? cache_resource->buf : NULL)); + if (!cache_resource->verbs_action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps, + dev_flow->handle->dvh.rix_encap_decap, cache_resource, + next); + dev_flow->dv.encap_decap = cache_resource; + DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} + +/** + * Find existing table jump resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] tbl + * Pointer to flow table resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_jump_tbl_resource_register + (struct rte_eth_dev *dev __rte_unused, + struct mlx5_flow_tbl_resource *tbl, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); + int cnt; + + MLX5_ASSERT(tbl); + cnt = rte_atomic32_read(&tbl_data->jump.refcnt); + if (!cnt) { + tbl_data->jump.action = + mlx5_glue->dr_create_flow_action_dest_flow_tbl + (tbl->obj); + if (!tbl_data->jump.action) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create jump action"); + DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++", + (void *)&tbl_data->jump, cnt); + } else { + /* old jump should not make the table ref++. */ + flow_dv_tbl_resource_release(dev, &tbl_data->tbl); + MLX5_ASSERT(tbl_data->jump.action); + DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", + (void *)&tbl_data->jump, cnt); + } + rte_atomic32_inc(&tbl_data->jump.refcnt); + dev_flow->handle->rix_jump = tbl_data->idx; + dev_flow->dv.jump = &tbl_data->jump; + return 0; +} + +/** + * Find existing table port ID resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to port ID action resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_port_id_action_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_port_id_action_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_port_id_action_resource *cache_resource; + uint32_t idx = 0; + + /* Lookup a matching resource from cache. */ + ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, + idx, cache_resource, next) { + if (resource->port_id == cache_resource->port_id) { + DRV_LOG(DEBUG, "port id action resource resource %p: " + "refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->handle->rix_port_id_action = idx; + dev_flow->dv.port_id_action = cache_resource; + return 0; + } + } + /* Register new port id action resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], + &dev_flow->handle->rix_port_id_action); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + /* + * Depending on rdma_core version the glue routine calls + * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port) + * or mlx5dv_dr_action_create_dest_vport(domain, vport_id). + */ + cache_resource->action = + mlx5_glue->dr_create_flow_action_dest_port + (priv->sh->fdb_domain, resource->port_id); + if (!cache_resource->action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, + dev_flow->handle->rix_port_id_action, cache_resource, + next); + dev_flow->dv.port_id_action = cache_resource; + DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} + +/** + * Find existing push vlan resource or create and register a new one. + * + * @param [in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to port ID action resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_push_vlan_action_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_push_vlan_action_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; + struct mlx5dv_dr_domain *domain; + uint32_t idx = 0; + + /* Lookup a matching resource from cache. */ + ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], + sh->push_vlan_action_list, idx, cache_resource, next) { + if (resource->vlan_tag == cache_resource->vlan_tag && + resource->ft_type == cache_resource->ft_type) { + DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " + "refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->handle->dvh.rix_push_vlan = idx; + dev_flow->dv.push_vlan_res = cache_resource; + return 0; + } + } + /* Register new push_vlan action resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], + &dev_flow->handle->dvh.rix_push_vlan); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + domain = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX) + domain = sh->rx_domain; + else + domain = sh->tx_domain; + cache_resource->action = + mlx5_glue->dr_create_flow_action_push_vlan(domain, + resource->vlan_tag); + if (!cache_resource->action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], + &sh->push_vlan_action_list, + dev_flow->handle->dvh.rix_push_vlan, + cache_resource, next); + dev_flow->dv.push_vlan_res = cache_resource; + DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} +/** + * Get the size of specific rte_flow_item_type + * + * @param[in] item_type + * Tested rte_flow_item_type. + * + * @return + * sizeof struct item_type, 0 if void or irrelevant. + */ +static size_t +flow_dv_get_item_len(const enum rte_flow_item_type item_type) +{ + size_t retval; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + retval = sizeof(struct rte_flow_item_eth); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + retval = sizeof(struct rte_flow_item_vlan); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + retval = sizeof(struct rte_flow_item_ipv4); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + retval = sizeof(struct rte_flow_item_ipv6); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + retval = sizeof(struct rte_flow_item_udp); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + retval = sizeof(struct rte_flow_item_tcp); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + retval = sizeof(struct rte_flow_item_vxlan); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + retval = sizeof(struct rte_flow_item_gre); + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + retval = sizeof(struct rte_flow_item_nvgre); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + retval = sizeof(struct rte_flow_item_vxlan_gpe); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + retval = sizeof(struct rte_flow_item_mpls); + break; + case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ + default: + retval = 0; + break; + } + return retval; +} + +#define MLX5_ENCAP_IPV4_VERSION 0x40 +#define MLX5_ENCAP_IPV4_IHL_MIN 0x05 +#define MLX5_ENCAP_IPV4_TTL_DEF 0x40 +#define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000 +#define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff +#define MLX5_ENCAP_VXLAN_FLAGS 0x08000000 +#define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04 + +/** + * Convert the encap action data from list of rte_flow_item to raw buffer + * + * @param[in] items + * Pointer to rte_flow_item objects list. + * @param[out] buf + * Pointer to the output buffer. + * @param[out] size + * Pointer to the output buffer size. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, + size_t *size, struct rte_flow_error *error) +{ + struct rte_ether_hdr *eth = NULL; + struct rte_vlan_hdr *vlan = NULL; + struct rte_ipv4_hdr *ipv4 = NULL; + struct rte_ipv6_hdr *ipv6 = NULL; + struct rte_udp_hdr *udp = NULL; + struct rte_vxlan_hdr *vxlan = NULL; + struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL; + struct rte_gre_hdr *gre = NULL; + size_t len; + size_t temp_size = 0; + + if (!items) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "invalid empty data"); + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + len = flow_dv_get_item_len(items->type); + if (len + temp_size > MLX5_ENCAP_MAX_LEN) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "items total size is too big" + " for encap action"); + rte_memcpy((void *)&buf[temp_size], items->spec, len); + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = (struct rte_ether_hdr *)&buf[temp_size]; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan = (struct rte_vlan_hdr *)&buf[temp_size]; + if (!eth) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "eth header not found"); + if (!eth->ether_type) + eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size]; + if (!vlan && !eth) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "neither eth nor vlan" + " header found"); + if (vlan && !vlan->eth_proto) + vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4); + else if (eth && !eth->ether_type) + eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); + if (!ipv4->version_ihl) + ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION | + MLX5_ENCAP_IPV4_IHL_MIN; + if (!ipv4->time_to_live) + ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size]; + if (!vlan && !eth) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "neither eth nor vlan" + " header found"); + if (vlan && !vlan->eth_proto) + vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6); + else if (eth && !eth->ether_type) + eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); + if (!ipv6->vtc_flow) + ipv6->vtc_flow = + RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW); + if (!ipv6->hop_limits) + ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp = (struct rte_udp_hdr *)&buf[temp_size]; + if (!ipv4 && !ipv6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "ip header not found"); + if (ipv4 && !ipv4->next_proto_id) + ipv4->next_proto_id = IPPROTO_UDP; + else if (ipv6 && !ipv6->proto) + ipv6->proto = IPPROTO_UDP; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = (struct rte_vxlan_hdr *)&buf[temp_size]; + if (!udp) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "udp header not found"); + if (!udp->dst_port) + udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN); + if (!vxlan->vx_flags) + vxlan->vx_flags = + RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size]; + if (!udp) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "udp header not found"); + if (!vxlan_gpe->proto) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "next protocol not found"); + if (!udp->dst_port) + udp->dst_port = + RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE); + if (!vxlan_gpe->vx_flags) + vxlan_gpe->vx_flags = + MLX5_ENCAP_VXLAN_GPE_FLAGS; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + gre = (struct rte_gre_hdr *)&buf[temp_size]; + if (!gre->proto) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "next protocol not found"); + if (!ipv4 && !ipv6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "ip header not found"); + if (ipv4 && !ipv4->next_proto_id) + ipv4->next_proto_id = IPPROTO_GRE; + else if (ipv6 && !ipv6->proto) + ipv6->proto = IPPROTO_GRE; + break; + case RTE_FLOW_ITEM_TYPE_VOID: + break; + default: + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + (void *)items->type, + "unsupported item type"); + break; + } + temp_size += len; + } + *size = temp_size; + return 0; +} + +static int +flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) +{ + struct rte_ether_hdr *eth = NULL; + struct rte_vlan_hdr *vlan = NULL; + struct rte_ipv6_hdr *ipv6 = NULL; + struct rte_udp_hdr *udp = NULL; + char *next_hdr; + uint16_t proto; + + eth = (struct rte_ether_hdr *)data; + next_hdr = (char *)(eth + 1); + proto = RTE_BE16(eth->ether_type); + + /* VLAN skipping */ + while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) { + vlan = (struct rte_vlan_hdr *)next_hdr; + proto = RTE_BE16(vlan->eth_proto); + next_hdr += sizeof(struct rte_vlan_hdr); + } + + /* HW calculates IPv4 csum. no need to proceed */ + if (proto == RTE_ETHER_TYPE_IPV4) + return 0; + + /* non IPv4/IPv6 header. not supported */ + if (proto != RTE_ETHER_TYPE_IPV6) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Cannot offload non IPv4/IPv6"); + } + + ipv6 = (struct rte_ipv6_hdr *)next_hdr; + + /* ignore non UDP */ + if (ipv6->proto != IPPROTO_UDP) + return 0; + + udp = (struct rte_udp_hdr *)(ipv6 + 1); + udp->dgram_cksum = 0; + + return 0; +} + +/** + * Convert L2 encap action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to action structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] transfer + * Mark if the flow is E-Switch flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_l2_encap(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct mlx5_flow *dev_flow, + uint8_t transfer, + struct rte_flow_error *error) +{ + const struct rte_flow_item *encap_data; + const struct rte_flow_action_raw_encap *raw_encap_data; + struct mlx5_flow_dv_encap_decap_resource res = { + .reformat_type = + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL, + .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : + MLX5DV_FLOW_TABLE_TYPE_NIC_TX, + }; + + if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + raw_encap_data = + (const struct rte_flow_action_raw_encap *)action->conf; + res.size = raw_encap_data->size; + memcpy(res.buf, raw_encap_data->data, res.size); + } else { + if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) + encap_data = + ((const struct rte_flow_action_vxlan_encap *) + action->conf)->definition; + else + encap_data = + ((const struct rte_flow_action_nvgre_encap *) + action->conf)->definition; + if (flow_dv_convert_encap_data(encap_data, res.buf, + &res.size, error)) + return -rte_errno; + } + if (flow_dv_zero_encap_udp_csum(res.buf, error)) + return -rte_errno; + if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't create L2 encap action"); + return 0; +} + +/** + * Convert L2 decap action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] transfer + * Mark if the flow is E-Switch flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_l2_decap(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + uint8_t transfer, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_encap_decap_resource res = { + .size = 0, + .reformat_type = + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2, + .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX, + }; + + if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't create L2 decap action"); + return 0; +} + +/** + * Convert raw decap/encap (L3 tunnel) action to DV specification. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to action structure. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_raw_encap(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_raw_encap *encap_data; + struct mlx5_flow_dv_encap_decap_resource res; + + memset(&res, 0, sizeof(res)); + encap_data = (const struct rte_flow_action_raw_encap *)action->conf; + res.size = encap_data->size; + memcpy(res.buf, encap_data->data, res.size); + res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ? + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 : + MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL; + if (attr->transfer) + res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + else + res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "can't create encap action"); + return 0; +} + +/** + * Create action push VLAN. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] vlan + * Pointer to the vlan to push to the Ethernet header. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_create_action_push_vlan(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_vlan_hdr *vlan, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_push_vlan_action_resource res; + + memset(&res, 0, sizeof(res)); + res.vlan_tag = + rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 | + vlan->vlan_tci); + if (attr->transfer) + res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + else + res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + return flow_dv_push_vlan_action_resource_register + (dev, &res, dev_flow, error); +} + +/** + * Validate the modify-header actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_hdr(const uint64_t action_flags, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "action configuration not set"); + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have encap action before" + " modify action"); + return 0; +} + +/** + * Validate the modify-header MAC address actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_mac(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + if (!(item_flags & MLX5_FLOW_LAYER_L2)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no L2 item in pattern"); + } + return ret; +} + +/** + * Validate the modify-header IPv4 address actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_ipv4(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no ipv4 item in pattern"); + } + return ret; +} + +/** + * Validate the modify-header IPv6 address actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_ipv6(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no ipv6 item in pattern"); + } + return ret; +} + +/** + * Validate the modify-header TP actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_tp(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no transport layer " + "in pattern"); + } + return ret; +} + +/** + * Validate the modify-header actions of increment/decrement + * TCP Sequence-number. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no TCP item in" + " pattern"); + if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ && + (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) || + (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ && + (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot decrease and increase" + " TCP sequence number" + " at the same time"); + } + return ret; +} + +/** + * Validate the modify-header actions of increment/decrement + * TCP Acknowledgment number. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no TCP item in" + " pattern"); + if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK && + (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) || + (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK && + (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot decrease and increase" + " TCP acknowledgment number" + " at the same time"); + } + return ret; +} + +/** + * Validate the modify-header TTL actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_ttl(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + uint64_t layer; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ? + MLX5_FLOW_LAYER_INNER_L3 : + MLX5_FLOW_LAYER_OUTER_L3; + if (!(item_flags & layer)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no IP protocol in pattern"); + } + return ret; +} + +/** + * Validate jump action. + * + * @param[in] action + * Pointer to the jump action. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] attributes + * Pointer to flow attributes + * @param[in] external + * Action belongs to flow rule created by request external to PMD. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_jump(const struct rte_flow_action *action, + uint64_t action_flags, + const struct rte_flow_attr *attributes, + bool external, struct rte_flow_error *error) +{ + uint32_t target_group, table; + int ret = 0; + + if (action_flags & (MLX5_FLOW_FATE_ACTIONS | + MLX5_FLOW_FATE_ESWITCH_ACTIONS)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can't have 2 fate actions in" + " same flow"); + if (action_flags & MLX5_FLOW_ACTION_METER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "jump with meter not support"); + if (!action->conf) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "action configuration not set"); + target_group = + ((const struct rte_flow_action_jump *)action->conf)->group; + ret = mlx5_flow_group_to_table(attributes, external, target_group, + true, &table, error); + if (ret) + return ret; + if (attributes->group == target_group) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "target group must be other than" + " the current flow group"); + return 0; +} + +/* + * Validate the port_id action. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] action + * Port_id RTE action structure. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_port_id(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_action_port_id *port_id; + struct mlx5_priv *act_priv; + struct mlx5_priv *dev_priv; + uint16_t port; + + if (!attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "port id action is valid in transfer" + " mode only"); + if (!action || !action->conf) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, + "port id action parameters must be" + " specified"); + if (action_flags & (MLX5_FLOW_FATE_ACTIONS | + MLX5_FLOW_FATE_ESWITCH_ACTIONS)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "can have only one fate actions in" + " a flow"); + dev_priv = mlx5_dev_to_eswitch_info(dev); + if (!dev_priv) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "failed to obtain E-Switch info"); + port_id = action->conf; + port = port_id->original ? dev->data->port_id : port_id->id; + act_priv = mlx5_port_to_eswitch_info(port, false); + if (!act_priv) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id, + "failed to obtain E-Switch port id for port"); + if (act_priv->domain_id != dev_priv->domain_id) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "port does not belong to" + " E-Switch being configured"); + return 0; +} + +/** + * Get the maximum number of modify header actions. + * + * @param dev + * Pointer to rte_eth_dev structure. + * @param flags + * Flags bits to check if root level. + * + * @return + * Max number of modify header actions device can support. + */ +static inline unsigned int +flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, + uint64_t flags) +{ + /* + * There's no way to directly query the max capacity from FW. + * The maximal value on root table should be assumed to be supported. + */ + if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL)) + return MLX5_MAX_MODIFY_NUM; + else + return MLX5_ROOT_TBL_MODIFY_NUM; +} + +/** + * Validate the meter action. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action_flags + * Bit-fields that holds the actions detected until now. + * @param[in] action + * Pointer to the meter action. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_ernno is set. + */ +static int +mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, + uint64_t action_flags, + const struct rte_flow_action *action, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_meter *am = action->conf; + struct mlx5_flow_meter *fm; + + if (!am) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "meter action conf is NULL"); + + if (action_flags & MLX5_FLOW_ACTION_METER) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "meter chaining not support"); + if (action_flags & MLX5_FLOW_ACTION_JUMP) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "meter with jump not support"); + if (!priv->mtr_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "meter action not supported"); + fm = mlx5_flow_meter_find(priv, am->mtr_id); + if (!fm) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Meter not found"); + if (fm->ref_cnt && (!(fm->transfer == attr->transfer || + (!fm->ingress && !attr->ingress && attr->egress) || + (!fm->egress && !attr->egress && attr->ingress)))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Flow attributes are either invalid " + "or have a conflict with current " + "meter attributes"); + return 0; +} + +/** + * Validate the age action. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the age action. + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_age(uint64_t action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_action_age *age = action->conf; + + if (!priv->config.devx || priv->counter_fallback) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "age action not supported"); + if (!(action->conf)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "configuration cannot be null"); + if (age->timeout >= UINT16_MAX / 2 / 10) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Max age time: 3275 seconds"); + if (action_flags & MLX5_FLOW_ACTION_AGE) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Duplicate age ctions set"); + return 0; +} + +/** + * Validate the modify-header IPv4 DSCP actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no ipv4 item in pattern"); + } + return ret; +} + +/** + * Validate the modify-header IPv6 DSCP actions. + * + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] action + * Pointer to the modify action. + * @param[in] item_flags + * Holds the items detected. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags, + const struct rte_flow_action *action, + const uint64_t item_flags, + struct rte_flow_error *error) +{ + int ret = 0; + + ret = flow_dv_validate_action_modify_hdr(action_flags, action, error); + if (!ret) { + if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "no ipv6 item in pattern"); + } + return ret; +} + +/** + * Find existing modify-header resource or create and register a new one. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in, out] resource + * Pointer to modify-header resource. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_modify_hdr_resource_register + (struct rte_eth_dev *dev, + struct mlx5_flow_dv_modify_hdr_resource *resource, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_modify_hdr_resource *cache_resource; + struct mlx5dv_dr_domain *ns; + uint32_t actions_len; + + resource->flags = dev_flow->dv.group ? 0 : + MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; + if (resource->actions_num > flow_dv_modify_hdr_action_max(dev, + resource->flags)) + return rte_flow_error_set(error, EOVERFLOW, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "too many modify header items"); + if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) + ns = sh->fdb_domain; + else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX) + ns = sh->tx_domain; + else + ns = sh->rx_domain; + /* Lookup a matching resource from cache. */ + actions_len = resource->actions_num * sizeof(resource->actions[0]); + LIST_FOREACH(cache_resource, &sh->modify_cmds, next) { + if (resource->ft_type == cache_resource->ft_type && + resource->actions_num == cache_resource->actions_num && + resource->flags == cache_resource->flags && + !memcmp((const void *)resource->actions, + (const void *)cache_resource->actions, + actions_len)) { + DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->handle->dvh.modify_hdr = cache_resource; + return 0; + } + } + /* Register new modify-header resource. */ + cache_resource = rte_calloc(__func__, 1, + sizeof(*cache_resource) + actions_len, 0); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + *cache_resource = *resource; + rte_memcpy(cache_resource->actions, resource->actions, actions_len); + cache_resource->verbs_action = + mlx5_glue->dv_create_flow_action_modify_header + (sh->ctx, cache_resource->ft_type, ns, + cache_resource->flags, actions_len, + (uint64_t *)cache_resource->actions); + if (!cache_resource->verbs_action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next); + dev_flow->handle->dvh.modify_hdr = cache_resource; + DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} + +/** + * Get DV flow counter by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * mlx5 flow counter index in the container. + * @param[out] ppool + * mlx5 flow counter pool in the container, + * + * @return + * Pointer to the counter, NULL otherwise. + */ +static struct mlx5_flow_counter * +flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, + uint32_t idx, + struct mlx5_flow_counter_pool **ppool) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont; + struct mlx5_flow_counter_pool *pool; + uint32_t batch = 0, age = 0; + + idx--; + age = MLX_CNT_IS_AGE(idx); + idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx; + if (idx >= MLX5_CNT_BATCH_OFFSET) { + idx -= MLX5_CNT_BATCH_OFFSET; + batch = 1; + } + cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); + MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n); + pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) + *ppool = pool; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); +} + +/** + * Get a pool by devx counter ID. + * + * @param[in] cont + * Pointer to the counter container. + * @param[in] id + * The counter devx ID. + * + * @return + * The counter pool pointer if exists, NULL otherwise, + */ +static struct mlx5_flow_counter_pool * +flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) +{ + uint32_t i; + uint32_t n_valid = rte_atomic16_read(&cont->n_valid); + + for (i = 0; i < n_valid; i++) { + struct mlx5_flow_counter_pool *pool = cont->pools[i]; + int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * + MLX5_COUNTERS_PER_POOL; + + if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) { + /* + * Move the pool to the head, as counter allocate + * always gets the first pool in the container. + */ + if (pool != TAILQ_FIRST(&cont->pool_list)) { + TAILQ_REMOVE(&cont->pool_list, pool, next); + TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); + } + return pool; + } + } + return NULL; +} + +/** + * Allocate a new memory for the counter values wrapped by all the needed + * management. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] raws_n + * The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters. + * + * @return + * The new memory management pointer on success, otherwise NULL and rte_errno + * is set. + */ +static struct mlx5_counter_stats_mem_mng * +flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_devx_mkey_attr mkey_attr; + struct mlx5_counter_stats_mem_mng *mem_mng; + volatile struct flow_counter_stats *raw_data; + int size = (sizeof(struct flow_counter_stats) * + MLX5_COUNTERS_PER_POOL + + sizeof(struct mlx5_counter_stats_raw)) * raws_n + + sizeof(struct mlx5_counter_stats_mem_mng); + uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE)); + int i; + + if (!mem) { + rte_errno = ENOMEM; + return NULL; + } + mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; + size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; + mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size, + IBV_ACCESS_LOCAL_WRITE); + if (!mem_mng->umem) { + rte_errno = errno; + rte_free(mem); + return NULL; + } + mkey_attr.addr = (uintptr_t)mem; + mkey_attr.size = size; + mkey_attr.umem_id = mem_mng->umem->umem_id; + mkey_attr.pd = sh->pdn; + mkey_attr.log_entity_size = 0; + mkey_attr.pg_access = 0; + mkey_attr.klm_array = NULL; + mkey_attr.klm_num = 0; + if (priv->config.hca_attr.relaxed_ordering_write && + priv->config.hca_attr.relaxed_ordering_read && + !haswell_broadwell_cpu) + mkey_attr.relaxed_ordering = 1; + mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); + if (!mem_mng->dm) { + mlx5_glue->devx_umem_dereg(mem_mng->umem); + rte_errno = errno; + rte_free(mem); + return NULL; + } + mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); + raw_data = (volatile struct flow_counter_stats *)mem; + for (i = 0; i < raws_n; ++i) { + mem_mng->raws[i].mem_mng = mem_mng; + mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL; + } + LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); + return mem_mng; +} + +/** + * Resize a counter container. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] batch + * Whether the pool is for counter that was allocated by batch command. + * @param[in] age + * Whether the pool is for Aging counter. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_container_resize(struct rte_eth_dev *dev, + uint32_t batch, uint32_t age) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, + age); + struct mlx5_counter_stats_mem_mng *mem_mng = NULL; + void *old_pools = cont->pools; + uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; + void *pools = rte_calloc(__func__, 1, mem_size, 0); + + if (!pools) { + rte_errno = ENOMEM; + return -ENOMEM; + } + if (old_pools) + memcpy(pools, old_pools, cont->n * + sizeof(struct mlx5_flow_counter_pool *)); + /* + * Fallback mode query the counter directly, no background query + * resources are needed. + */ + if (!priv->counter_fallback) { + int i; + + mem_mng = flow_dv_create_counter_stat_mem_mng(dev, + MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); + if (!mem_mng) { + rte_free(pools); + return -ENOMEM; + } + for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) + LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws, + mem_mng->raws + + MLX5_CNT_CONTAINER_RESIZE + + i, next); + } + rte_spinlock_lock(&cont->resize_sl); + cont->n = resize; + cont->mem_mng = mem_mng; + cont->pools = pools; + rte_spinlock_unlock(&cont->resize_sl); + if (old_pools) + rte_free(old_pools); + return 0; +} + +/** + * Query a devx flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] cnt + * Index to the flow counter. + * @param[out] pkts + * The statistics value of packets. + * @param[out] bytes + * The statistics value of bytes. + * + * @return + * 0 on success, otherwise a negative errno value and rte_errno is set. + */ +static inline int +_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, + uint64_t *bytes) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + int offset; + + cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (priv->counter_fallback) + return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0, + 0, pkts, bytes, 0, NULL, NULL, 0); + } + + rte_spinlock_lock(&pool->sl); + /* + * The single counters allocation may allocate smaller ID than the + * current allocated in parallel to the host reading. + * In this case the new counter values must be reported as 0. + */ + if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) { + *pkts = 0; + *bytes = 0; + } else { + offset = MLX5_CNT_ARRAY_IDX(pool, cnt); + *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits); + *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes); + } + rte_spinlock_unlock(&pool->sl); + return 0; +} + +/** + * Create and initialize a new counter pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] dcs + * The devX counter handle. + * @param[in] batch + * Whether the pool is for counter that was allocated by batch command. + * @param[in] age + * Whether the pool is for counter that was allocated for aging. + * @param[in/out] cont_cur + * Pointer to the container pointer, it will be update in pool resize. + * + * @return + * The pool container pointer on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_counter_pool * +flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, + uint32_t batch, uint32_t age) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool; + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, + age); + int16_t n_valid = rte_atomic16_read(&cont->n_valid); + uint32_t size = sizeof(*pool); + + if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age)) + return NULL; + size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; + size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); + size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); + pool = rte_calloc(__func__, 1, size, 0); + if (!pool) { + rte_errno = ENOMEM; + return NULL; + } + pool->min_dcs = dcs; + if (!priv->counter_fallback) + pool->raw = cont->mem_mng->raws + n_valid % + MLX5_CNT_CONTAINER_RESIZE; + pool->raw_hw = NULL; + pool->type = 0; + pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); + pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); + rte_spinlock_init(&pool->sl); + /* + * The generation of the new allocated counters in this pool is 0, 2 in + * the pool generation makes all the counters valid for allocation. + * The start and end query generation protect the counters be released + * between the query and update gap period will not be reallocated + * without the last query finished and stats updated to the memory. + */ + rte_atomic64_set(&pool->start_query_gen, 0x2); + /* + * There's no background query thread for fallback mode, set the + * end_query_gen to the maximum value since no need to wait for + * statistics update. + */ + rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ? + INT64_MAX : 0x2); + TAILQ_INIT(&pool->counters); + TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); + pool->index = n_valid; + cont->pools[n_valid] = pool; + /* Pool initialization must be updated before host thread access. */ + rte_cio_wmb(); + rte_atomic16_add(&cont->n_valid, 1); + return pool; +} + +/** + * Update the minimum dcs-id for aged or no-aged counter pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] pool + * Current counter pool. + * @param[in] batch + * Whether the pool is for counter that was allocated by batch command. + * @param[in] age + * Whether the counter is for aging. + */ +static void +flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, + struct mlx5_flow_counter_pool *pool, + uint32_t batch, uint32_t age) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *other; + struct mlx5_pools_container *cont; + + cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1)); + other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id); + if (!other) + return; + if (pool->min_dcs->id < other->min_dcs->id) { + rte_atomic64_set(&other->a64_dcs, + rte_atomic64_read(&pool->a64_dcs)); + } else { + rte_atomic64_set(&pool->a64_dcs, + rte_atomic64_read(&other->a64_dcs)); + } +} +/** + * Prepare a new counter and/or a new counter pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] cnt_free + * Where to put the pointer of a new counter. + * @param[in] batch + * Whether the pool is for counter that was allocated by batch command. + * @param[in] age + * Whether the pool is for counter that was allocated for aging. + * + * @return + * The counter pool pointer and @p cnt_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_counter_pool * +flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, + struct mlx5_flow_counter **cnt_free, + uint32_t batch, uint32_t age) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont; + struct mlx5_flow_counter_pool *pool; + struct mlx5_devx_obj *dcs = NULL; + struct mlx5_flow_counter *cnt; + uint32_t i; + + cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); + if (!batch) { + /* bulk_bitmap must be 0 for single counter allocation. */ + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); + if (!dcs) + return NULL; + pool = flow_dv_find_pool_by_id(cont, dcs->id); + if (!pool) { + pool = flow_dv_pool_create(dev, dcs, batch, age); + if (!pool) { + mlx5_devx_cmd_destroy(dcs); + return NULL; + } + } else if (dcs->id < pool->min_dcs->id) { + rte_atomic64_set(&pool->a64_dcs, + (int64_t)(uintptr_t)dcs); + } + flow_dv_counter_update_min_dcs(dev, + pool, batch, age); + i = dcs->id % MLX5_COUNTERS_PER_POOL; + cnt = MLX5_POOL_GET_CNT(pool, i); + TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; + *cnt_free = cnt; + return pool; + } + /* bulk_bitmap is in 128 counters units. */ + if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) + dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4); + if (!dcs) { + rte_errno = ENODATA; + return NULL; + } + pool = flow_dv_pool_create(dev, dcs, batch, age); + if (!pool) { + mlx5_devx_cmd_destroy(dcs); + return NULL; + } + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + } + *cnt_free = MLX5_POOL_GET_CNT(pool, 0); + return pool; +} + +/** + * Search for existed shared counter. + * + * @param[in] cont + * Pointer to the relevant counter pool container. + * @param[in] id + * The shared counter ID to search. + * @param[out] ppool + * mlx5 flow counter pool in the container, + * + * @return + * NULL if not existed, otherwise pointer to the shared extend counter. + */ +static struct mlx5_flow_counter_ext * +flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, + struct mlx5_flow_counter_pool **ppool) +{ + struct mlx5_flow_counter_ext *cnt; + struct mlx5_flow_counter_pool *pool; + uint32_t i, j; + uint32_t n_valid = rte_atomic16_read(&cont->n_valid); + + for (i = 0; i < n_valid; i++) { + pool = cont->pools[i]; + for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { + cnt = MLX5_GET_POOL_CNT_EXT(pool, j); + if (cnt->ref_cnt && cnt->shared && cnt->id == id) { + if (ppool) + *ppool = cont->pools[i]; + return cnt; + } + } + } + return NULL; +} + +/** + * Allocate a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. + * @param[in] group + * Counter flow group. + * @param[in] age + * Whether the counter was allocated for aging. + * + * @return + * Index to flow counter on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, + uint16_t group, uint32_t age) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt_free = NULL; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + /* + * Currently group 0 flow counter cannot be assigned to a flow if it is + * not the first one in the batch counter allocation, so it is better + * to allocate counters one by one for these flows in a separate + * container. + * A counter can be shared between different groups so need to take + * shared counters from the single container. + */ + uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0; + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch, + age); + uint32_t cnt_idx; + + if (!priv->config.devx) { + rte_errno = ENOTSUP; + return 0; + } + if (shared) { + cnt_ext = flow_dv_counter_shared_search(cont, id, &pool); + if (cnt_ext) { + if (cnt_ext->ref_cnt + 1 == 0) { + rte_errno = E2BIG; + return 0; + } + cnt_ext->ref_cnt++; + cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL + + (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL) + + 1; + return cnt_idx; + } + } + /* Pools which has a free counters are in the start. */ + TAILQ_FOREACH(pool, &cont->pool_list, next) { + /* + * The free counter reset values must be updated between the + * counter release to the counter allocation, so, at least one + * query must be done in this time. ensure it by saving the + * query generation in the release time. + * The free list is sorted according to the generation - so if + * the first one is not updated, all the others are not + * updated too. + */ + cnt_free = TAILQ_FIRST(&pool->counters); + if (cnt_free && cnt_free->query_gen < + rte_atomic64_read(&pool->end_query_gen)) + break; + cnt_free = NULL; + } + if (!cnt_free) { + pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age); + if (!pool) + return 0; + } + if (!batch) + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); + /* Create a DV counter action only in the first time usage. */ + if (!cnt_free->action) { + uint16_t offset; + struct mlx5_devx_obj *dcs; + + if (batch) { + offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); + dcs = pool->min_dcs; + } else { + offset = 0; + dcs = cnt_ext->dcs; + } + cnt_free->action = mlx5_glue->dv_create_flow_action_counter + (dcs->obj, offset); + if (!cnt_free->action) { + rte_errno = errno; + return 0; + } + } + cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, + MLX5_CNT_ARRAY_IDX(pool, cnt_free)); + cnt_idx += batch * MLX5_CNT_BATCH_OFFSET; + cnt_idx += age * MLX5_CNT_AGE_OFFSET; + /* Update the counter reset values. */ + if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, + &cnt_free->bytes)) + return 0; + if (cnt_ext) { + cnt_ext->shared = shared; + cnt_ext->ref_cnt = 1; + cnt_ext->id = id; + } + if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) + /* Start the asynchronous batch query by the host thread. */ + mlx5_set_query_alarm(priv->sh); + TAILQ_REMOVE(&pool->counters, cnt_free, next); + if (TAILQ_EMPTY(&pool->counters)) { + /* Move the pool to the end of the container pool list. */ + TAILQ_REMOVE(&cont->pool_list, pool, next); + TAILQ_INSERT_TAIL(&cont->pool_list, pool, next); + } + return cnt_idx; +} + +/** + * Get age param from counter index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] counter + * Index to the counter handler. + * + * @return + * The aging parameter specified for the counter index. + */ +static struct mlx5_age_param* +flow_dv_counter_idx_get_age(struct rte_eth_dev *dev, + uint32_t counter) +{ + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_pool *pool = NULL; + + flow_dv_counter_get_by_idx(dev, counter, &pool); + counter = (counter - 1) % MLX5_COUNTERS_PER_POOL; + cnt = MLX5_POOL_GET_CNT(pool, counter); + return MLX5_CNT_TO_AGE(cnt); +} + +/** + * Remove a flow counter from aged counter list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] counter + * Index to the counter handler. + * @param[in] cnt + * Pointer to the counter handler. + */ +static void +flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, + uint32_t counter, struct mlx5_flow_counter *cnt) +{ + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param; + struct mlx5_priv *priv = dev->data->dev_private; + + age_info = GET_PORT_AGE_INFO(priv); + age_param = flow_dv_counter_idx_get_age(dev, counter); + if (rte_atomic16_cmpset((volatile uint16_t *) + &age_param->state, + AGE_CANDIDATE, AGE_FREE) + != AGE_CANDIDATE) { + /** + * We need the lock even it is age timeout, + * since counter may still in process. + */ + rte_spinlock_lock(&age_info->aged_sl); + TAILQ_REMOVE(&age_info->aged_counters, cnt, next); + rte_spinlock_unlock(&age_info->aged_sl); + } + rte_atomic16_set(&age_param->state, AGE_FREE); +} +/** + * Release a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] counter + * Index to the counter handler. + */ +static void +flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) +{ + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + + if (!counter) + return; + cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); + MLX5_ASSERT(pool); + if (counter < MLX5_CNT_BATCH_OFFSET) { + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (cnt_ext && --cnt_ext->ref_cnt) + return; + } + if (IS_AGE_POOL(pool)) + flow_dv_counter_remove_from_age(dev, counter, cnt); + /* Put the counter in the end - the last updated one. */ + TAILQ_INSERT_TAIL(&pool->counters, cnt, next); + /* + * Counters released between query trigger and handler need + * to wait the next round of query. Since the packets arrive + * in the gap period will not be taken into account to the + * old counter. + */ + cnt->query_gen = rte_atomic64_read(&pool->start_query_gen); +} + +/** + * Verify the @p attributes will be correctly understood by the NIC and store + * them in the @p flow if everything is correct. + * + * @param[in] dev + * Pointer to dev struct. + * @param[in] attributes + * Pointer to flow attributes + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[out] error + * Pointer to error structure. + * + * @return + * - 0 on success and non root table. + * - 1 on success and root table. + * - a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attributes, + bool external __rte_unused, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t priority_max = priv->config.flow_prio - 1; + int ret = 0; + +#ifndef HAVE_MLX5DV_DR + if (attributes->group) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + NULL, + "groups are not supported"); +#else + uint32_t table = 0; + + ret = mlx5_flow_group_to_table(attributes, external, + attributes->group, !!priv->fdb_def_rule, + &table, error); + if (ret) + return ret; + if (!table) + ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; +#endif + if (attributes->priority != MLX5_FLOW_PRIO_RSVD && + attributes->priority >= priority_max) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + NULL, + "priority out of range"); + if (attributes->transfer) { + if (!priv->config.dv_esw_en) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "E-Switch dr is not supported"); + if (!(priv->representor || priv->master)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "E-Switch configuration can only be" + " done by a master or a representor device"); + if (attributes->egress) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes, + "egress is not supported"); + } + if (!(attributes->egress ^ attributes->ingress)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "must specify exactly one of " + "ingress or egress"); + return ret; +} + +/** + * Internal validation function. For validating both actions and items. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, int hairpin, struct rte_flow_error *error) +{ + int ret; + uint64_t action_flags = 0; + uint64_t item_flags = 0; + uint64_t last_item = 0; + uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; + int actions_n = 0; + uint8_t item_ipv6_proto = 0; + const struct rte_flow_item *gre_item = NULL; + const struct rte_flow_action_raw_decap *decap; + const struct rte_flow_action_raw_encap *encap; + const struct rte_flow_action_rss *rss; + const struct rte_flow_item_tcp nic_tcp_mask = { + .hdr = { + .tcp_flags = 0xFF, + .src_port = RTE_BE16(UINT16_MAX), + .dst_port = RTE_BE16(UINT16_MAX), + } + }; + const struct rte_flow_item_ipv4 nic_ipv4_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + const struct rte_flow_item_ipv6 nic_ipv6_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + uint16_t queue_index = 0xFFFF; + const struct rte_flow_item_vlan *vlan_m = NULL; + int16_t rw_act_num = 0; + uint64_t is_root; + + if (items == NULL) + return -1; + ret = flow_dv_validate_attributes(dev, attr, external, error); + if (ret < 0) + return ret; + is_root = (uint64_t)ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int type = items->type; + + switch (type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_PORT_ID: + ret = flow_dv_validate_item_port_id + (dev, items, attr, item_flags, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_PORT_ID; + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_validate_item_eth(items, item_flags, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = flow_dv_validate_item_vlan(items, item_flags, + dev, error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN : + MLX5_FLOW_LAYER_OUTER_VLAN; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } + /* Store outer VLAN mask for of_push_vlan action. */ + if (!tunnel) + vlan_m = items->mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + ret = mlx5_flow_validate_item_ipv4(items, item_flags, + last_item, + ether_type, + &nic_ipv4_mask, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + items->mask)->hdr.next_proto_id) { + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (items->spec))->hdr.next_proto_id; + next_protocol &= + ((const struct rte_flow_item_ipv4 *) + (items->mask))->hdr.next_proto_id; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + ret = mlx5_flow_validate_item_ipv6(items, item_flags, + last_item, + ether_type, + &nic_ipv6_mask, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto) { + item_ipv6_proto = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + next_protocol = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + next_protocol &= + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_validate_item_tcp + (items, item_flags, + next_protocol, + &nic_tcp_mask, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_validate_item_udp(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_validate_item_gre(items, item_flags, + next_protocol, error); + if (ret < 0) + return ret; + gre_item = items; + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + ret = mlx5_flow_validate_item_nvgre(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_NVGRE; + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + ret = mlx5_flow_validate_item_gre_key + (items, item_flags, gre_item, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GRE_KEY; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_validate_item_vxlan(items, item_flags, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_validate_item_vxlan_gpe(items, + item_flags, dev, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + ret = mlx5_flow_validate_item_geneve(items, + item_flags, dev, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GENEVE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_validate_item_mpls(dev, items, + item_flags, + last_item, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_MPLS; + break; + + case RTE_FLOW_ITEM_TYPE_MARK: + ret = flow_dv_validate_item_mark(dev, items, attr, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_MARK; + break; + case RTE_FLOW_ITEM_TYPE_META: + ret = flow_dv_validate_item_meta(dev, items, attr, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_METADATA; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + ret = mlx5_flow_validate_item_icmp(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_ICMP; + break; + case RTE_FLOW_ITEM_TYPE_ICMP6: + ret = mlx5_flow_validate_item_icmp6(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + item_ipv6_proto = IPPROTO_ICMPV6; + last_item = MLX5_FLOW_LAYER_ICMP6; + break; + case RTE_FLOW_ITEM_TYPE_TAG: + ret = flow_dv_validate_item_tag(dev, items, + attr, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_TAG; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: + case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: + break; + case RTE_FLOW_ITEM_TYPE_GTP: + ret = flow_dv_validate_item_gtp(dev, items, item_flags, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GTP; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); + } + item_flags |= last_item; + } + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + int type = actions->type; + if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "too many actions"); + switch (type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + ret = flow_dv_validate_action_port_id(dev, + action_flags, + actions, + attr, + error); + if (ret) + return ret; + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = flow_dv_validate_action_flag(dev, action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + /* Count all modify-header actions as one. */ + if (!(action_flags & + MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_FLAG | + MLX5_FLOW_ACTION_MARK_EXT; + } else { + action_flags |= MLX5_FLOW_ACTION_FLAG; + ++actions_n; + } + rw_act_num += MLX5_ACT_NUM_SET_MARK; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = flow_dv_validate_action_mark(dev, actions, + action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + /* Count all modify-header actions as one. */ + if (!(action_flags & + MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_MARK_EXT; + } else { + action_flags |= MLX5_FLOW_ACTION_MARK; + ++actions_n; + } + rw_act_num += MLX5_ACT_NUM_SET_MARK; + break; + case RTE_FLOW_ACTION_TYPE_SET_META: + ret = flow_dv_validate_action_set_meta(dev, actions, + action_flags, + attr, error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_SET_META; + rw_act_num += MLX5_ACT_NUM_SET_META; + break; + case RTE_FLOW_ACTION_TYPE_SET_TAG: + ret = flow_dv_validate_action_set_tag(dev, actions, + action_flags, + attr, error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + rw_act_num += MLX5_ACT_NUM_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_validate_action_drop(action_flags, + attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_DROP; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(actions, + action_flags, dev, + attr, error); + if (ret < 0) + return ret; + queue_index = ((const struct rte_flow_action_queue *) + (actions->conf))->index; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = actions->conf; + ret = mlx5_flow_validate_action_rss(actions, + action_flags, dev, + attr, item_flags, + error); + if (ret < 0) + return ret; + if (rss != NULL && rss->queue_num) + queue_index = rss->queue[0]; + action_flags |= MLX5_FLOW_ACTION_RSS; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_validate_action_count(dev, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + if (flow_dv_validate_action_pop_vlan(dev, + action_flags, + actions, + item_flags, attr, + error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + ret = flow_dv_validate_action_push_vlan(dev, + action_flags, + vlan_m, + actions, attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + ret = flow_dv_validate_action_set_vlan_pcp + (action_flags, actions, error); + if (ret < 0) + return ret; + /* Count PCP with push_vlan command. */ + action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP; + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + ret = flow_dv_validate_action_set_vlan_vid + (item_flags, action_flags, + actions, error); + if (ret < 0) + return ret; + /* Count VID with push_vlan command. */ + action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; + rw_act_num += MLX5_ACT_NUM_MDF_VID; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + ret = flow_dv_validate_action_l2_encap(dev, + action_flags, + actions, attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_ENCAP; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + ret = flow_dv_validate_action_decap(dev, action_flags, + attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_DECAP; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap + (dev, NULL, actions->conf, attr, &action_flags, + &actions_n, error); + if (ret < 0) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + decap = actions->conf; + while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; + if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + encap = NULL; + actions--; + } else { + encap = actions->conf; + } + ret = flow_dv_validate_action_raw_encap_decap + (dev, + decap ? decap : &empty_decap, encap, + attr, &action_flags, &actions_n, + error); + if (ret < 0) + return ret; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + ret = flow_dv_validate_action_modify_mac(action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? + MLX5_FLOW_ACTION_SET_MAC_SRC : + MLX5_FLOW_ACTION_SET_MAC_DST; + /* + * Even if the source and destination MAC addresses have + * overlap in the header with 4B alignment, the convert + * function will handle them separately and 4 SW actions + * will be created. And 2 actions will be added each + * time no matter how many bytes of address will be set. + */ + rw_act_num += MLX5_ACT_NUM_MDF_MAC; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + ret = flow_dv_validate_action_modify_ipv4(action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? + MLX5_FLOW_ACTION_SET_IPV4_SRC : + MLX5_FLOW_ACTION_SET_IPV4_DST; + rw_act_num += MLX5_ACT_NUM_MDF_IPV4; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + ret = flow_dv_validate_action_modify_ipv6(action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + if (item_ipv6_proto == IPPROTO_ICMPV6) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Can't change header " + "with ICMPv6 proto"); + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? + MLX5_FLOW_ACTION_SET_IPV6_SRC : + MLX5_FLOW_ACTION_SET_IPV6_DST; + rw_act_num += MLX5_ACT_NUM_MDF_IPV6; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + ret = flow_dv_validate_action_modify_tp(action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? + MLX5_FLOW_ACTION_SET_TP_SRC : + MLX5_FLOW_ACTION_SET_TP_DST; + rw_act_num += MLX5_ACT_NUM_MDF_PORT; + break; + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + case RTE_FLOW_ACTION_TYPE_SET_TTL: + ret = flow_dv_validate_action_modify_ttl(action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_TTL ? + MLX5_FLOW_ACTION_SET_TTL : + MLX5_FLOW_ACTION_DEC_TTL; + rw_act_num += MLX5_ACT_NUM_MDF_TTL; + break; + case RTE_FLOW_ACTION_TYPE_JUMP: + ret = flow_dv_validate_action_jump(actions, + action_flags, + attr, external, + error); + if (ret) + return ret; + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_JUMP; + break; + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + ret = flow_dv_validate_action_modify_tcp_seq + (action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? + MLX5_FLOW_ACTION_INC_TCP_SEQ : + MLX5_FLOW_ACTION_DEC_TCP_SEQ; + rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ; + break; + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + ret = flow_dv_validate_action_modify_tcp_ack + (action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? + MLX5_FLOW_ACTION_INC_TCP_ACK : + MLX5_FLOW_ACTION_DEC_TCP_ACK; + rw_act_num += MLX5_ACT_NUM_MDF_TCPACK; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_MARK: + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TAG: + case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: + rw_act_num += MLX5_ACT_NUM_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_METER: + ret = mlx5_flow_validate_action_meter(dev, + action_flags, + actions, attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_METER; + ++actions_n; + /* Meter action will add one more TAG action. */ + rw_act_num += MLX5_ACT_NUM_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + ret = flow_dv_validate_action_age(action_flags, + actions, dev, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + ret = flow_dv_validate_action_modify_ipv4_dscp + (action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; + rw_act_num += MLX5_ACT_NUM_SET_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + ret = flow_dv_validate_action_modify_ipv6_dscp + (action_flags, + actions, + item_flags, + error); + if (ret < 0) + return ret; + /* Count all modify-header actions as one action. */ + if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; + rw_act_num += MLX5_ACT_NUM_SET_DSCP; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + /* + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for + * Count action. + */ + if ((action_flags & MLX5_FLOW_ACTION_DROP) && + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Drop action is mutually-exclusive " + "with any other action, except for " + "Count action"); + /* Eswitch has few restrictions on using items and actions */ + if (attr->transfer) { + if (!mlx5_flow_ext_mreg_supported(dev) && + action_flags & MLX5_FLOW_ACTION_FLAG) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action FLAG"); + if (!mlx5_flow_ext_mreg_supported(dev) && + action_flags & MLX5_FLOW_ACTION_MARK) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action MARK"); + if (action_flags & MLX5_FLOW_ACTION_QUEUE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); + if (action_flags & MLX5_FLOW_ACTION_RSS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action RSS"); + if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no fate action is found"); + } else { + if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "no fate action is found"); + } + /* Continue validation for Xcap actions.*/ + if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't supported"); + if (!attr->transfer && attr->ingress && (action_flags & + MLX5_FLOW_ACTION_ENCAP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" + " for ingress traffic"); + } + /* Hairpin flow will add one more TAG action. */ + if (hairpin > 0) + rw_act_num += MLX5_ACT_NUM_SET_TAG; + /* extra metadata enabled: one more TAG action will be add. */ + if (dev_conf->dv_flow_en && + dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + mlx5_flow_ext_mreg_supported(dev)) + rw_act_num += MLX5_ACT_NUM_SET_TAG; + if ((uint32_t)rw_act_num > + flow_dv_modify_hdr_action_max(dev, is_root)) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "too many header modify" + " actions to support"); + } + return 0; +} + +/** + * Internal preparation function. Allocates the DV flow size, + * this size is constant. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Pointer to mlx5_flow object on success, + * otherwise NULL and rte_errno is set. + */ +static struct mlx5_flow * +flow_dv_prepare(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error) +{ + uint32_t handle_idx = 0; + struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + + /* In case of corrupting the memory. */ + if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not free temporary device flow"); + return NULL; + } + dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + &handle_idx); + if (!dev_handle) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not enough memory to create flow handle"); + return NULL; + } + /* No multi-thread supporting. */ + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + dev_flow->handle = dev_handle; + dev_flow->handle_idx = handle_idx; + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* + * The matching value needs to be cleared to 0 before using. In the + * past, it will be automatically cleared when using rte_*alloc + * API. The time consumption will be almost the same as before. + */ + memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + dev_flow->ingress = attr->ingress; + dev_flow->dv.transfer = attr->transfer; + return dev_flow; +} + +#ifdef RTE_LIBRTE_MLX5_DEBUG +/** + * Sanity check for match mask and value. Similar to check_valid_spec() in + * kernel driver. If unmasked bit is present in value, it returns failure. + * + * @param match_mask + * pointer to match mask buffer. + * @param match_value + * pointer to match value buffer. + * + * @return + * 0 if valid, -EINVAL otherwise. + */ +static int +flow_dv_check_valid_spec(void *match_mask, void *match_value) +{ + uint8_t *m = match_mask; + uint8_t *v = match_value; + unsigned int i; + + for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) { + if (v[i] & ~m[i]) { + DRV_LOG(ERR, + "match_value differs from match_criteria" + " %p[%u] != %p[%u]", + match_value, i, match_mask, i); + return -EINVAL; + } + } + return 0; +} +#endif + +/** + * Add match of ip_version. + * + * @param[in] group + * Flow group. + * @param[in] headers_v + * Values header pointer. + * @param[in] headers_m + * Masks header pointer. + * @param[in] ip_version + * The IP version to set. + */ +static inline void +flow_dv_set_match_ip_version(uint32_t group, + void *headers_v, + void *headers_m, + uint8_t ip_version) +{ + if (group == 0) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + else + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, + ip_version); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0); +} + +/** + * Add Ethernet item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_eth(void *matcher, void *key, + const struct rte_flow_item *item, int inner, + uint32_t group) +{ + const struct rte_flow_item_eth *eth_m = item->mask; + const struct rte_flow_item_eth *eth_v = item->spec; + const struct rte_flow_item_eth nic_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", + .type = RTE_BE16(0xffff), + }; + void *headers_m; + void *headers_v; + char *l24_v; + unsigned int i; + + if (!eth_v) + return; + if (!eth_m) + eth_m = &nic_mask; + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), + ð_m->dst, sizeof(eth_m->dst)); + /* The value must be in the range of the mask. */ + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); + for (i = 0; i < sizeof(eth_m->dst); ++i) + l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), + ð_m->src, sizeof(eth_m->src)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); + /* The value must be in the range of the mask. */ + for (i = 0; i < sizeof(eth_m->dst); ++i) + l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; + if (eth_v->type) { + /* When ethertype is present set mask for tagged VLAN. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + /* Set value for tagged VLAN if ethertype is 802.1Q. */ + if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || + eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, + 1); + /* Return here to avoid setting match on ethertype. */ + return; + } + } + /* + * HW supports match on one Ethertype, the Ethertype following the last + * VLAN tag of the packet (see PRM). + * Set match on ethertype only if ETH header is not followed by VLAN. + * HW is optimized for IPv4/IPv6. In such cases, avoid setting + * ethertype, and use ip_version field instead. + */ + if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && + eth_m->type == 0xFFFF) { + flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); + } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && + eth_m->type == 0xFFFF) { + flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; + } +} + +/** + * Add VLAN item to matcher and to the value. + * + * @param[in, out] dev_flow + * Flow descriptor. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, + void *matcher, void *key, + const struct rte_flow_item *item, + int inner, uint32_t group) +{ + const struct rte_flow_item_vlan *vlan_m = item->mask; + const struct rte_flow_item_vlan *vlan_v = item->spec; + void *headers_m; + void *headers_v; + uint16_t tci_m; + uint16_t tci_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + /* + * This is workaround, masks are not supported, + * and pre-validated. + */ + if (vlan_v) + dev_flow->handle->vf_vlan.tag = + rte_be_to_cpu_16(vlan_v->tci) & 0x0fff; + } + /* + * When VLAN item exists in flow, mark packet as tagged, + * even if TCI is not specified. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (!vlan_v) + return; + if (!vlan_m) + vlan_m = &rte_flow_item_vlan_mask; + tci_m = rte_be_to_cpu_16(vlan_m->tci); + tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); + /* + * HW is optimized for IPv4/IPv6. In such cases, avoid setting + * ethertype, and use ip_version field instead. + */ + if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && + vlan_m->inner_type == 0xFFFF) { + flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); + } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && + vlan_m->inner_type == 0xFFFF) { + flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type & + vlan_v->inner_type)); + } +} + +/** + * Add IPV4 item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] inner + * Item is inner pattern. + * @param[in] group + * The group to insert the rule. + */ +static void +flow_dv_translate_item_ipv4(void *matcher, void *key, + const struct rte_flow_item *item, + const uint64_t item_flags, + int inner, uint32_t group) +{ + const struct rte_flow_item_ipv4 *ipv4_m = item->mask; + const struct rte_flow_item_ipv4 *ipv4_v = item->spec; + const struct rte_flow_item_ipv4 nic_mask = { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + .next_proto_id = 0xff, + .time_to_live = 0xff, + }, + }; + void *headers_m; + void *headers_v; + char *l24_m; + char *l24_v; + uint8_t tos; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + if (!ipv4_v) + return; + if (!ipv4_m) + ipv4_m = &nic_mask; + l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr; + *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr; + l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + *(uint32_t *)l24_m = ipv4_m->hdr.src_addr; + *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr; + tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, + ipv4_m->hdr.type_of_service); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, + ipv4_m->hdr.type_of_service >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, + ipv4_m->hdr.next_proto_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv4_m->hdr.time_to_live); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live); +} + +/** + * Add IPV6 item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[in] inner + * Item is inner pattern. + * @param[in] group + * The group to insert the rule. + */ +static void +flow_dv_translate_item_ipv6(void *matcher, void *key, + const struct rte_flow_item *item, + const uint64_t item_flags, + int inner, uint32_t group) +{ + const struct rte_flow_item_ipv6 *ipv6_m = item->mask; + const struct rte_flow_item_ipv6 *ipv6_v = item->spec; + const struct rte_flow_item_ipv6 nic_mask = { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xffffffff), + .proto = 0xff, + .hop_limits = 0xff, + }, + }; + void *headers_m; + void *headers_v; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + char *l24_m; + char *l24_v; + uint32_t vtc_m; + uint32_t vtc_v; + int i; + int size; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); + /* + * On outer header (which must contains L2), or inner header with L2, + * set cvlan_tag mask bit to mark this packet as untagged. + * This should be done even if item->spec is empty. + */ + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); + if (!ipv6_v) + return; + if (!ipv6_m) + ipv6_m = &nic_mask; + size = sizeof(ipv6_m->hdr.dst_addr); + l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + memcpy(l24_m, ipv6_m->hdr.dst_addr, size); + for (i = 0; i < size; ++i) + l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i]; + l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, + src_ipv4_src_ipv6.ipv6_layout.ipv6); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6); + memcpy(l24_m, ipv6_m->hdr.src_addr, size); + for (i = 0; i < size; ++i) + l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i]; + /* TOS. */ + vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); + vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22); + /* Label. */ + if (inner) { + MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label, + vtc_m); + MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label, + vtc_v); + } else { + MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label, + vtc_m); + MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label, + vtc_v); + } + /* Protocol. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, + ipv6_m->hdr.proto); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + ipv6_v->hdr.proto & ipv6_m->hdr.proto); + /* Hop limit. */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit, + ipv6_m->hdr.hop_limits); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit, + ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits); +} + +/** + * Add TCP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_tcp(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_tcp *tcp_m = item->mask; + const struct rte_flow_item_tcp *tcp_v = item->spec; + void *headers_m; + void *headers_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); + if (!tcp_v) + return; + if (!tcp_m) + tcp_m = &rte_flow_item_tcp_mask; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, + rte_be_to_cpu_16(tcp_m->hdr.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, + rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport, + rte_be_to_cpu_16(tcp_m->hdr.dst_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, + rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags, + tcp_m->hdr.tcp_flags); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, + (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags)); +} + +/** + * Add UDP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_udp(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_udp *udp_m = item->mask; + const struct rte_flow_item_udp *udp_v = item->spec; + void *headers_m; + void *headers_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); + if (!udp_v) + return; + if (!udp_m) + udp_m = &rte_flow_item_udp_mask; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, + rte_be_to_cpu_16(udp_m->hdr.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, + rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, + rte_be_to_cpu_16(udp_m->hdr.dst_port)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port)); +} + +/** + * Add GRE optional Key item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_gre_key(void *matcher, void *key, + const struct rte_flow_item *item) +{ + const rte_be32_t *key_m = item->mask; + const rte_be32_t *key_v = item->spec; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); + + /* GRE K bit must be on and should already be validated */ + MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1); + MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1); + if (!key_v) + return; + if (!key_m) + key_m = &gre_key_default_mask; + MLX5_SET(fte_match_set_misc, misc_m, gre_key_h, + rte_be_to_cpu_32(*key_m) >> 8); + MLX5_SET(fte_match_set_misc, misc_v, gre_key_h, + rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8); + MLX5_SET(fte_match_set_misc, misc_m, gre_key_l, + rte_be_to_cpu_32(*key_m) & 0xFF); + MLX5_SET(fte_match_set_misc, misc_v, gre_key_l, + rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF); +} + +/** + * Add GRE item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_gre(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_gre *gre_m = item->mask; + const struct rte_flow_item_gre *gre_v = item->spec; + void *headers_m; + void *headers_v; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + struct { + union { + __extension__ + struct { + uint16_t version:3; + uint16_t rsvd0:9; + uint16_t s_present:1; + uint16_t k_present:1; + uint16_t rsvd_bit1:1; + uint16_t c_present:1; + }; + uint16_t value; + }; + } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); + if (!gre_v) + return; + if (!gre_m) + gre_m = &rte_flow_item_gre_mask; + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, + rte_be_to_cpu_16(gre_m->protocol)); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol)); + gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); + gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); + MLX5_SET(fte_match_set_misc, misc_m, gre_c_present, + gre_crks_rsvd0_ver_m.c_present); + MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, + gre_crks_rsvd0_ver_v.c_present & + gre_crks_rsvd0_ver_m.c_present); + MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, + gre_crks_rsvd0_ver_m.k_present); + MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, + gre_crks_rsvd0_ver_v.k_present & + gre_crks_rsvd0_ver_m.k_present); + MLX5_SET(fte_match_set_misc, misc_m, gre_s_present, + gre_crks_rsvd0_ver_m.s_present); + MLX5_SET(fte_match_set_misc, misc_v, gre_s_present, + gre_crks_rsvd0_ver_v.s_present & + gre_crks_rsvd0_ver_m.s_present); +} + +/** + * Add NVGRE item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_nvgre(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_nvgre *nvgre_m = item->mask; + const struct rte_flow_item_nvgre *nvgre_v = item->spec; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + const char *tni_flow_id_m = (const char *)nvgre_m->tni; + const char *tni_flow_id_v = (const char *)nvgre_v->tni; + char *gre_key_m; + char *gre_key_v; + int size; + int i; + + /* For NVGRE, GRE header fields must be set with defined values. */ + const struct rte_flow_item_gre gre_spec = { + .c_rsvd0_ver = RTE_BE16(0x2000), + .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB) + }; + const struct rte_flow_item_gre gre_mask = { + .c_rsvd0_ver = RTE_BE16(0xB000), + .protocol = RTE_BE16(UINT16_MAX), + }; + const struct rte_flow_item gre_item = { + .spec = &gre_spec, + .mask = &gre_mask, + .last = NULL, + }; + flow_dv_translate_item_gre(matcher, key, &gre_item, inner); + if (!nvgre_v) + return; + if (!nvgre_m) + nvgre_m = &rte_flow_item_nvgre_mask; + size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); + gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); + gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); + memcpy(gre_key_m, tni_flow_id_m, size); + for (i = 0; i < size; ++i) + gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; +} + +/** + * Add VXLAN item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_vxlan(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_vxlan *vxlan_m = item->mask; + const struct rte_flow_item_vxlan *vxlan_v = item->spec; + void *headers_m; + void *headers_v; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + char *vni_m; + char *vni_v; + uint16_t dport; + int size; + int i; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? + MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + } + if (!vxlan_v) + return; + if (!vxlan_m) + vxlan_m = &rte_flow_item_vxlan_mask; + size = sizeof(vxlan_m->vni); + vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); + vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); + memcpy(vni_m, vxlan_m->vni, size); + for (i = 0; i < size; ++i) + vni_v[i] = vni_m[i] & vxlan_v->vni[i]; +} + +/** + * Add VXLAN-GPE item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ + +static void +flow_dv_translate_item_vxlan_gpe(void *matcher, void *key, + const struct rte_flow_item *item, int inner) +{ + const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask; + const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec; + void *headers_m; + void *headers_v; + void *misc_m = + MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3); + void *misc_v = + MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + char *vni_m; + char *vni_v; + uint16_t dport; + int size; + int i; + uint8_t flags_m = 0xff; + uint8_t flags_v = 0xc; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ? + MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE; + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + } + if (!vxlan_v) + return; + if (!vxlan_m) + vxlan_m = &rte_flow_item_vxlan_gpe_mask; + size = sizeof(vxlan_m->vni); + vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni); + vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni); + memcpy(vni_m, vxlan_m->vni, size); + for (i = 0; i < size; ++i) + vni_v[i] = vni_m[i] & vxlan_v->vni[i]; + if (vxlan_m->flags) { + flags_m = vxlan_m->flags; + flags_v = vxlan_v->flags; + } + MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m); + MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v); + MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol, + vxlan_m->protocol); + MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol, + vxlan_v->protocol); +} + +/** + * Add Geneve item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ + +static void +flow_dv_translate_item_geneve(void *matcher, void *key, + const struct rte_flow_item *item, int inner) +{ + const struct rte_flow_item_geneve *geneve_m = item->mask; + const struct rte_flow_item_geneve *geneve_v = item->spec; + void *headers_m; + void *headers_v; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + uint16_t dport; + uint16_t gbhdr_m; + uint16_t gbhdr_v; + char *vni_m; + char *vni_v; + size_t size, i; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + dport = MLX5_UDP_PORT_GENEVE; + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + } + if (!geneve_v) + return; + if (!geneve_m) + geneve_m = &rte_flow_item_geneve_mask; + size = sizeof(geneve_m->vni); + vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni); + vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni); + memcpy(vni_m, geneve_m->vni, size); + for (i = 0; i < size; ++i) + vni_v[i] = vni_m[i] & geneve_v->vni[i]; + MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, + rte_be_to_cpu_16(geneve_m->protocol)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, + rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol)); + gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0); + gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0); + MLX5_SET(fte_match_set_misc, misc_m, geneve_oam, + MLX5_GENEVE_OAMF_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, + MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len, + MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); + MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, + MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) & + MLX5_GENEVE_OPTLEN_VAL(gbhdr_m)); +} + +/** + * Add MPLS item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] prev_layer + * The protocol layer indicated in previous item. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_mpls(void *matcher, void *key, + const struct rte_flow_item *item, + uint64_t prev_layer, + int inner) +{ + const uint32_t *in_mpls_m = item->mask; + const uint32_t *in_mpls_v = item->spec; + uint32_t *out_mpls_m = 0; + uint32_t *out_mpls_v = 0; + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_2); + void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); + void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + + switch (prev_layer) { + case MLX5_FLOW_LAYER_OUTER_L4_UDP: + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_MPLS); + break; + case MLX5_FLOW_LAYER_GRE: + MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, + RTE_ETHER_TYPE_MPLS); + break; + default: + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + IPPROTO_MPLS); + break; + } + if (!in_mpls_v) + return; + if (!in_mpls_m) + in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask; + switch (prev_layer) { + case MLX5_FLOW_LAYER_OUTER_L4_UDP: + out_mpls_m = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, + outer_first_mpls_over_udp); + out_mpls_v = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_udp); + break; + case MLX5_FLOW_LAYER_GRE: + out_mpls_m = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m, + outer_first_mpls_over_gre); + out_mpls_v = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v, + outer_first_mpls_over_gre); + break; + default: + /* Inner MPLS not over GRE is not supported. */ + if (!inner) { + out_mpls_m = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, + misc2_m, + outer_first_mpls); + out_mpls_v = + (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, + misc2_v, + outer_first_mpls); + } + break; + } + if (out_mpls_m && out_mpls_v) { + *out_mpls_m = *in_mpls_m; + *out_mpls_v = *in_mpls_v & *in_mpls_m; + } +} + +/** + * Add metadata register item to matcher + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] reg_type + * Type of device metadata register + * @param[in] value + * Register value + * @param[in] mask + * Register mask + */ +static void +flow_dv_match_meta_reg(void *matcher, void *key, + enum modify_reg reg_type, + uint32_t data, uint32_t mask) +{ + void *misc2_m = + MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); + void *misc2_v = + MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); + uint32_t temp; + + data &= mask; + switch (reg_type) { + case REG_A: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data); + break; + case REG_B: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data); + break; + case REG_C_0: + /* + * The metadata register C0 field might be divided into + * source vport index and META item value, we should set + * this field according to specified mask, not as whole one. + */ + temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0); + temp |= mask; + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp); + temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0); + temp &= ~mask; + temp |= data; + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp); + break; + case REG_C_1: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data); + break; + case REG_C_2: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data); + break; + case REG_C_3: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data); + break; + case REG_C_4: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data); + break; + case REG_C_5: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data); + break; + case REG_C_6: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data); + break; + case REG_C_7: + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data); + break; + default: + MLX5_ASSERT(false); + break; + } +} + +/** + * Add MARK item to matcher + * + * @param[in] dev + * The device to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_mark(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_mark *mark; + uint32_t value; + uint32_t mask; + + mark = item->mask ? (const void *)item->mask : + &rte_flow_item_mark_mask; + mask = mark->id & priv->sh->dv_mark_mask; + mark = (const void *)item->spec; + MLX5_ASSERT(mark); + value = mark->id & priv->sh->dv_mark_mask & mask; + if (mask) { + enum modify_reg reg; + + /* Get the metadata register index for the mark. */ + reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL); + MLX5_ASSERT(reg > 0); + if (reg == REG_C_0) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0 = rte_bsf32(msk_c0); + + mask &= msk_c0; + mask <<= shl_c0; + value <<= shl_c0; + } + flow_dv_match_meta_reg(matcher, key, reg, value, mask); + } +} + +/** + * Add META item to matcher + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_meta(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_attr *attr, + const struct rte_flow_item *item) +{ + const struct rte_flow_item_meta *meta_m; + const struct rte_flow_item_meta *meta_v; + + meta_m = (const void *)item->mask; + if (!meta_m) + meta_m = &rte_flow_item_meta_mask; + meta_v = (const void *)item->spec; + if (meta_v) { + int reg; + uint32_t value = meta_v->data; + uint32_t mask = meta_m->data; + + reg = flow_dv_get_metadata_reg(dev, attr, NULL); + if (reg < 0) + return; + /* + * In datapath code there is no endianness + * coversions for perfromance reasons, all + * pattern conversions are done in rte_flow. + */ + value = rte_cpu_to_be_32(value); + mask = rte_cpu_to_be_32(mask); + if (reg == REG_C_0) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0 = rte_bsf32(msk_c0); +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask); + + value >>= shr_c0; + mask >>= shr_c0; +#endif + value <<= shl_c0; + mask <<= shl_c0; + MLX5_ASSERT(msk_c0); + MLX5_ASSERT(!(~msk_c0 & mask)); + } + flow_dv_match_meta_reg(matcher, key, reg, value, mask); + } +} + +/** + * Add vport metadata Reg C0 item to matcher + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] reg + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_meta_vport(void *matcher, void *key, + uint32_t value, uint32_t mask) +{ + flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask); +} + +/** + * Add tag item to matcher + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + const struct mlx5_rte_flow_item_tag *tag_v = item->spec; + const struct mlx5_rte_flow_item_tag *tag_m = item->mask; + uint32_t mask, value; + + MLX5_ASSERT(tag_v); + value = tag_v->data; + mask = tag_m ? tag_m->data : UINT32_MAX; + if (tag_v->id == REG_C_0) { + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t msk_c0 = priv->sh->dv_regc0_mask; + uint32_t shl_c0 = rte_bsf32(msk_c0); + + mask &= msk_c0; + mask <<= shl_c0; + value <<= shl_c0; + } + flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask); +} + +/** + * Add TAG item to matcher + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_tag(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + const struct rte_flow_item_tag *tag_v = item->spec; + const struct rte_flow_item_tag *tag_m = item->mask; + enum modify_reg reg; + + MLX5_ASSERT(tag_v); + tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask; + /* Get the metadata register index for the tag. */ + reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL); + MLX5_ASSERT(reg > 0); + flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data); +} + +/** + * Add source vport match to the specified matcher. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] port + * Source vport value to match + * @param[in] mask + * Mask + */ +static void +flow_dv_translate_item_source_vport(void *matcher, void *key, + int16_t port, uint16_t mask) +{ + void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + + MLX5_SET(fte_match_set_misc, misc_m, source_port, mask); + MLX5_SET(fte_match_set_misc, misc_v, source_port, port); +} + +/** + * Translate port-id item to eswitch match on port-id. + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, + void *key, const struct rte_flow_item *item) +{ + const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; + const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; + struct mlx5_priv *priv; + uint16_t mask, id; + + mask = pid_m ? pid_m->id : 0xffff; + id = pid_v ? pid_v->id : dev->data->port_id; + priv = mlx5_port_to_eswitch_info(id, item == NULL); + if (!priv) + return -rte_errno; + /* Translate to vport field or to metadata, depending on mode. */ + if (priv->vport_meta_mask) + flow_dv_translate_item_meta_vport(matcher, key, + priv->vport_meta_tag, + priv->vport_meta_mask); + else + flow_dv_translate_item_source_vport(matcher, key, + priv->vport_id, mask); + return 0; +} + +/** + * Add ICMP6 item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_icmp6(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_icmp6 *icmp6_m = item->mask; + const struct rte_flow_item_icmp6 *icmp6_v = item->spec; + void *headers_m; + void *headers_v; + void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_3); + void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6); + if (!icmp6_v) + return; + if (!icmp6_m) + icmp6_m = &rte_flow_item_icmp6_mask; + /* + * Force flow only to match the non-fragmented IPv6 ICMPv6 packets. + * If only the protocol is specified, no need to match the frag. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); + MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type); + MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type, + icmp6_v->type & icmp6_m->type); + MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code); + MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code, + icmp6_v->code & icmp6_m->code); +} + +/** + * Add ICMP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_icmp(void *matcher, void *key, + const struct rte_flow_item *item, + int inner) +{ + const struct rte_flow_item_icmp *icmp_m = item->mask; + const struct rte_flow_item_icmp *icmp_v = item->spec; + void *headers_m; + void *headers_v; + void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_3); + void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP); + if (!icmp_v) + return; + if (!icmp_m) + icmp_m = &rte_flow_item_icmp_mask; + /* + * Force flow only to match the non-fragmented IPv4 ICMP packets. + * If only the protocol is specified, no need to match the frag. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); + MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type, + icmp_m->hdr.icmp_type); + MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type, + icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type); + MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code, + icmp_m->hdr.icmp_code); + MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code, + icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code); +} + +/** + * Add GTP item to matcher and to the value. + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_gtp(void *matcher, void *key, + const struct rte_flow_item *item, int inner) +{ + const struct rte_flow_item_gtp *gtp_m = item->mask; + const struct rte_flow_item_gtp *gtp_v = item->spec; + void *headers_m; + void *headers_v; + void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_3); + void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + uint16_t dport = RTE_GTPU_UDP_PORT; + + if (inner) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); + } + if (!gtp_v) + return; + if (!gtp_m) + gtp_m = &rte_flow_item_gtp_mask; + MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, + gtp_m->v_pt_rsv_flags); + MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, + gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags); + MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type); + MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type, + gtp_v->msg_type & gtp_m->msg_type); + MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid, + rte_be_to_cpu_32(gtp_m->teid)); + MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid, + rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); +} + +static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; + +#define HEADER_IS_ZERO(match_criteria, headers) \ + !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ + matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ + +/** + * Calculate flow matcher enable bitmap. + * + * @param match_criteria + * Pointer to flow matcher criteria. + * + * @return + * Bitmap of enabled fields. + */ +static uint8_t +flow_dv_matcher_enable(uint32_t *match_criteria) +{ + uint8_t match_criteria_enable; + + match_criteria_enable = + (!HEADER_IS_ZERO(match_criteria, outer_headers)) << + MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, inner_headers)) << + MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; + return match_criteria_enable; +} + + +/** + * Get a flow table. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in] table_id + * Table id to use. + * @param[in] egress + * Direction of the table. + * @param[in] transfer + * E-Switch or NIC flow. + * @param[out] error + * pointer to error structure. + * + * @return + * Returns tables resource based on the index, NULL in case of failed. + */ +static struct mlx5_flow_tbl_resource * +flow_dv_tbl_resource_get(struct rte_eth_dev *dev, + uint32_t table_id, uint8_t egress, + uint8_t transfer, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_tbl_resource *tbl; + union mlx5_flow_tbl_key table_key = { + { + .table_id = table_id, + .reserved = 0, + .domain = !!transfer, + .direction = !!egress, + } + }; + struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls, + table_key.v64); + struct mlx5_flow_tbl_data_entry *tbl_data; + uint32_t idx = 0; + int ret; + void *domain; + + if (pos) { + tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, + entry); + tbl = &tbl_data->tbl; + rte_atomic32_inc(&tbl->refcnt); + return tbl; + } + tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); + if (!tbl_data) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot allocate flow table data entry"); + return NULL; + } + tbl_data->idx = idx; + tbl = &tbl_data->tbl; + pos = &tbl_data->entry; + if (transfer) + domain = sh->fdb_domain; + else if (egress) + domain = sh->tx_domain; + else + domain = sh->rx_domain; + tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id); + if (!tbl->obj) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create flow table object"); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + return NULL; + } + /* + * No multi-threads now, but still better to initialize the reference + * count before insert it into the hash list. + */ + rte_atomic32_init(&tbl->refcnt); + /* Jump action reference count is initialized here. */ + rte_atomic32_init(&tbl_data->jump.refcnt); + pos->key = table_key.v64; + ret = mlx5_hlist_insert(sh->flow_tbls, pos); + if (ret < 0) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot insert flow table data entry"); + mlx5_glue->dr_destroy_flow_tbl(tbl->obj); + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); + } + rte_atomic32_inc(&tbl->refcnt); + return tbl; +} + +/** + * Release a flow table. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] tbl + * Table resource to be released. + * + * @return + * Returns 0 if table was released, else return 1; + */ +static int +flow_dv_tbl_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_tbl_resource *tbl) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_tbl_data_entry *tbl_data = + container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); + + if (!tbl) + return 0; + if (rte_atomic32_dec_and_test(&tbl->refcnt)) { + struct mlx5_hlist_entry *pos = &tbl_data->entry; + + mlx5_glue->dr_destroy_flow_tbl(tbl->obj); + tbl->obj = NULL; + /* remove the entry from the hash list and free memory. */ + mlx5_hlist_remove(sh->flow_tbls, pos); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP], + tbl_data->idx); + return 0; + } + return 1; +} + +/** + * Register the flow matcher. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] matcher + * Pointer to flow matcher. + * @param[in, out] key + * Pointer to flow table key. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_matcher_register(struct rte_eth_dev *dev, + struct mlx5_flow_dv_matcher *matcher, + union mlx5_flow_tbl_key *key, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_matcher *cache_matcher; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .match_mask = (void *)&matcher->mask, + }; + struct mlx5_flow_tbl_resource *tbl; + struct mlx5_flow_tbl_data_entry *tbl_data; + + tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, + key->domain, error); + if (!tbl) + return -rte_errno; /* No need to refill the error info */ + tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); + /* Lookup from cache. */ + LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) { + if (matcher->crc == cache_matcher->crc && + matcher->priority == cache_matcher->priority && + !memcmp((const void *)matcher->mask.buf, + (const void *)cache_matcher->mask.buf, + cache_matcher->mask.size)) { + DRV_LOG(DEBUG, + "%s group %u priority %hd use %s " + "matcher %p: refcnt %d++", + key->domain ? "FDB" : "NIC", key->table_id, + cache_matcher->priority, + key->direction ? "tx" : "rx", + (void *)cache_matcher, + rte_atomic32_read(&cache_matcher->refcnt)); + rte_atomic32_inc(&cache_matcher->refcnt); + dev_flow->handle->dvh.matcher = cache_matcher; + /* old matcher should not make the table ref++. */ + flow_dv_tbl_resource_release(dev, tbl); + return 0; + } + } + /* Register new matcher. */ + cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0); + if (!cache_matcher) { + flow_dv_tbl_resource_release(dev, tbl); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate matcher memory"); + } + *cache_matcher = *matcher; + dv_attr.match_criteria_enable = + flow_dv_matcher_enable(cache_matcher->mask.buf); + dv_attr.priority = matcher->priority; + if (key->direction) + dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; + cache_matcher->matcher_object = + mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj); + if (!cache_matcher->matcher_object) { + rte_free(cache_matcher); +#ifdef HAVE_MLX5DV_DR + flow_dv_tbl_resource_release(dev, tbl); +#endif + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create matcher"); + } + /* Save the table information */ + cache_matcher->tbl = tbl; + rte_atomic32_init(&cache_matcher->refcnt); + /* only matcher ref++, table ref++ already done above in get API. */ + rte_atomic32_inc(&cache_matcher->refcnt); + LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); + dev_flow->handle->dvh.matcher = cache_matcher; + DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", + key->domain ? "FDB" : "NIC", key->table_id, + cache_matcher->priority, + key->direction ? "tx" : "rx", (void *)cache_matcher, + rte_atomic32_read(&cache_matcher->refcnt)); + return 0; +} + +/** + * Find existing tag resource or create and register a new one. + * + * @param dev[in, out] + * Pointer to rte_eth_dev structure. + * @param[in, out] tag_be24 + * Tag value in big endian then R-shift 8. + * @parm[in, out] dev_flow + * Pointer to the dev_flow. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_tag_resource_register + (struct rte_eth_dev *dev, + uint32_t tag_be24, + struct mlx5_flow *dev_flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_tag_resource *cache_resource; + struct mlx5_hlist_entry *entry; + + /* Lookup a matching resource from cache. */ + entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); + if (entry) { + cache_resource = container_of + (entry, struct mlx5_flow_dv_tag_resource, entry); + rte_atomic32_inc(&cache_resource->refcnt); + dev_flow->handle->dvh.rix_tag = cache_resource->idx; + dev_flow->dv.tag_resource = cache_resource; + DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; + } + /* Register new resource. */ + cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], + &dev_flow->handle->dvh.rix_tag); + if (!cache_resource) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot allocate resource memory"); + cache_resource->entry.key = (uint64_t)tag_be24; + cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24); + if (!cache_resource->action) { + rte_free(cache_resource); + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot create action"); + } + rte_atomic32_init(&cache_resource->refcnt); + rte_atomic32_inc(&cache_resource->refcnt); + if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { + mlx5_glue->destroy_flow_action(cache_resource->action); + rte_free(cache_resource); + return rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot insert tag"); + } + dev_flow->dv.tag_resource = cache_resource; + DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + return 0; +} + +/** + * Release the tag. + * + * @param dev + * Pointer to Ethernet device. + * @param tag_idx + * Tag index. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_tag_release(struct rte_eth_dev *dev, + uint32_t tag_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_tag_resource *tag; + + tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); + if (!tag) + return 0; + DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", + dev->data->port_id, (void *)tag, + rte_atomic32_read(&tag->refcnt)); + if (rte_atomic32_dec_and_test(&tag->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action(tag->action)); + mlx5_hlist_remove(sh->tag_table, &tag->entry); + DRV_LOG(DEBUG, "port %u tag %p: removed", + dev->data->port_id, (void *)tag); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx); + return 0; + } + return 1; +} + +/** + * Translate port ID action to vport. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] action + * Pointer to the port ID action. + * @param[out] dst_port_id + * The target port ID. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_translate_action_port_id(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + uint32_t *dst_port_id, + struct rte_flow_error *error) +{ + uint32_t port; + struct mlx5_priv *priv; + const struct rte_flow_action_port_id *conf = + (const struct rte_flow_action_port_id *)action->conf; + + port = conf->original ? dev->data->port_id : conf->id; + priv = mlx5_port_to_eswitch_info(port, false); + if (!priv) + return rte_flow_error_set(error, -rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "No eswitch info was found for port"); +#ifdef HAVE_MLX5DV_DR_DEVX_PORT + /* + * This parameter is transferred to + * mlx5dv_dr_action_create_dest_ib_port(). + */ + *dst_port_id = priv->ibv_port; +#else + /* + * Legacy mode, no LAG configurations is supported. + * This parameter is transferred to + * mlx5dv_dr_action_create_dest_vport(). + */ + *dst_port_id = priv->vport_id; +#endif + return 0; +} + +/** + * Create a counter with aging configuration. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[out] count + * Pointer to the counter action configuration. + * @param[in] age + * Pointer to the aging action configuration. + * + * @return + * Index to flow counter on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_counter(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_action_count *count, + const struct rte_flow_action_age *age) +{ + uint32_t counter; + struct mlx5_age_param *age_param; + + counter = flow_dv_counter_alloc(dev, + count ? count->shared : 0, + count ? count->id : 0, + dev_flow->dv.group, !!age); + if (!counter || age == NULL) + return counter; + age_param = flow_dv_counter_idx_get_age(dev, counter); + /* + * The counter age accuracy may have a bit delay. Have 3/4 + * second bias on the timeount in order to let it age in time. + */ + age_param->context = age->context ? age->context : + (void *)(uintptr_t)(dev_flow->flow_idx); + /* + * The counter age accuracy may have a bit delay. Have 3/4 + * second bias on the timeount in order to let it age in time. + */ + age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY; + /* Set expire time in unit of 0.1 sec. */ + age_param->port_id = dev->data->port_id; + age_param->expire = age_param->timeout + + rte_rdtsc() / (rte_get_tsc_hz() / 10); + rte_atomic16_set(&age_param->state, AGE_CANDIDATE); + return counter; +} +/** + * Add Tx queue matcher + * + * @param[in] dev + * Pointer to the dev struct. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + const struct mlx5_rte_flow_item_tx_queue *queue_m; + const struct mlx5_rte_flow_item_tx_queue *queue_v; + void *misc_m = + MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); + void *misc_v = + MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + struct mlx5_txq_ctrl *txq; + uint32_t queue; + + + queue_m = (const void *)item->mask; + if (!queue_m) + return; + queue_v = (const void *)item->spec; + if (!queue_v) + return; + txq = mlx5_txq_get(dev, queue_v->queue); + if (!txq) + return; + queue = txq->obj->sq->id; + MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue); + MLX5_SET(fte_match_set_misc, misc_v, source_sqn, + queue & queue_m->queue); + mlx5_txq_release(dev, queue_v->queue); +} + +/** + * Set the hash fields according to the @p flow information. + * + * @param[in] dev_flow + * Pointer to the mlx5_flow. + * @param[in] rss_desc + * Pointer to the mlx5_flow_rss_desc. + */ +static void +flow_dv_hashfields_set(struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc) +{ + uint64_t items = dev_flow->handle->layers; + int rss_inner = 0; + uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types); + + dev_flow->hash_fields = 0; +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (rss_desc->level >= 2) { + dev_flow->hash_fields |= IBV_RX_HASH_INNER; + rss_inner = 1; + } +#endif + if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) || + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) { + if (rss_types & MLX5_IPV4_LAYER_TYPES) { + if (rss_types & ETH_RSS_L3_SRC_ONLY) + dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4; + else if (rss_types & ETH_RSS_L3_DST_ONLY) + dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4; + else + dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH; + } + } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) || + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) { + if (rss_types & MLX5_IPV6_LAYER_TYPES) { + if (rss_types & ETH_RSS_L3_SRC_ONLY) + dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6; + else if (rss_types & ETH_RSS_L3_DST_ONLY) + dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6; + else + dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH; + } + } + if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) || + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) { + if (rss_types & ETH_RSS_UDP) { + if (rss_types & ETH_RSS_L4_SRC_ONLY) + dev_flow->hash_fields |= + IBV_RX_HASH_SRC_PORT_UDP; + else if (rss_types & ETH_RSS_L4_DST_ONLY) + dev_flow->hash_fields |= + IBV_RX_HASH_DST_PORT_UDP; + else + dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH; + } + } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) || + (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) { + if (rss_types & ETH_RSS_TCP) { + if (rss_types & ETH_RSS_L4_SRC_ONLY) + dev_flow->hash_fields |= + IBV_RX_HASH_SRC_PORT_TCP; + else if (rss_types & ETH_RSS_L4_DST_ONLY) + dev_flow->hash_fields |= + IBV_RX_HASH_DST_PORT_TCP; + else + dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH; + } + } +} + +/** + * Fill the flow with DV spec, lock free + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the sub flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +__flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + struct rte_flow *flow = dev_flow->flow; + struct mlx5_flow_handle *handle = dev_flow->handle; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc) + [!!priv->flow_nested_idx]; + uint64_t item_flags = 0; + uint64_t last_item = 0; + uint64_t action_flags = 0; + uint64_t priority = attr->priority; + struct mlx5_flow_dv_matcher matcher = { + .mask = { + .size = sizeof(matcher.mask.buf), + }, + }; + int actions_n = 0; + bool actions_end = false; + union { + struct mlx5_flow_dv_modify_hdr_resource res; + uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) + + sizeof(struct mlx5_modification_cmd) * + (MLX5_MAX_MODIFY_NUM + 1)]; + } mhdr_dummy; + struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; + const struct rte_flow_action_count *count = NULL; + const struct rte_flow_action_age *age = NULL; + union flow_dv_attr flow_attr = { .attr = 0 }; + uint32_t tag_be; + union mlx5_flow_tbl_key tbl_key; + uint32_t modify_action_position = UINT32_MAX; + void *match_mask = matcher.mask.buf; + void *match_value = dev_flow->dv.value.buf; + uint8_t next_protocol = 0xff; + struct rte_vlan_hdr vlan = { 0 }; + uint32_t table; + int ret = 0; + + mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX : + MLX5DV_FLOW_TABLE_TYPE_NIC_RX; + ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group, + !!priv->fdb_def_rule, &table, error); + if (ret) + return ret; + dev_flow->dv.group = table; + if (attr->transfer) + mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + if (priority == MLX5_FLOW_PRIO_RSVD) + priority = dev_conf->flow_prio - 1; + /* number of actions must be set to 0 in case of dirty stack. */ + mhdr_res->actions_num = 0; + for (; !actions_end ; actions++) { + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *action = actions; + const uint8_t *rss_key; + const struct rte_flow_action_jump *jump_data; + const struct rte_flow_action_meter *mtr; + struct mlx5_flow_tbl_resource *tbl; + uint32_t port_id = 0; + struct mlx5_flow_dv_port_id_action_resource port_id_resource; + int action_type = actions->type; + const struct rte_flow_action *found_action = NULL; + struct mlx5_flow_meter *fm = NULL; + + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + if (flow_dv_translate_action_port_id(dev, action, + &port_id, error)) + return -rte_errno; + port_id_resource.port_id = port_id; + MLX5_ASSERT(!handle->rix_port_id_action); + if (flow_dv_port_id_action_resource_register + (dev, &port_id_resource, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.port_id_action->action; + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID; + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + action_flags |= MLX5_FLOW_ACTION_FLAG; + dev_flow->handle->mark = 1; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + struct rte_flow_action_mark mark = { + .id = MLX5_FLOW_MARK_DEFAULT, + }; + + if (flow_dv_convert_action_mark(dev, &mark, + mhdr_res, + error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_MARK_EXT; + break; + } + tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT); + /* + * Only one FLAG or MARK is supported per device flow + * right now. So the pointer to the tag resource must be + * zero before the register process. + */ + MLX5_ASSERT(!handle->dvh.rix_tag); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; + MLX5_ASSERT(dev_flow->dv.tag_resource); + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.tag_resource->action; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + action_flags |= MLX5_FLOW_ACTION_MARK; + dev_flow->handle->mark = 1; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + const struct rte_flow_action_mark *mark = + (const struct rte_flow_action_mark *) + actions->conf; + + if (flow_dv_convert_action_mark(dev, mark, + mhdr_res, + error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_MARK_EXT; + break; + } + /* Fall-through */ + case MLX5_RTE_FLOW_ACTION_TYPE_MARK: + /* Legacy (non-extensive) MARK action. */ + tag_be = mlx5_flow_mark_set + (((const struct rte_flow_action_mark *) + (actions->conf))->id); + MLX5_ASSERT(!handle->dvh.rix_tag); + if (flow_dv_tag_resource_register(dev, tag_be, + dev_flow, error)) + return -rte_errno; + MLX5_ASSERT(dev_flow->dv.tag_resource); + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.tag_resource->action; + break; + case RTE_FLOW_ACTION_TYPE_SET_META: + if (flow_dv_convert_action_set_meta + (dev, mhdr_res, attr, + (const struct rte_flow_action_set_meta *) + actions->conf, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_META; + break; + case RTE_FLOW_ACTION_TYPE_SET_TAG: + if (flow_dv_convert_action_set_tag + (dev, mhdr_res, + (const struct rte_flow_action_set_tag *) + actions->conf, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + action_flags |= MLX5_FLOW_ACTION_DROP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + rss_desc->queue_num = 1; + rss_desc->queue[0] = queue->index; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = actions->conf; + memcpy(rss_desc->queue, rss->queue, + rss->queue_num * sizeof(uint16_t)); + rss_desc->queue_num = rss->queue_num; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; + memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance + * when expanding items for RSS. + */ + action_flags |= MLX5_FLOW_ACTION_RSS; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + case RTE_FLOW_ACTION_TYPE_COUNT: + if (!dev_conf->devx) { + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "count action not supported"); + } + /* Save information first, will apply later. */ + if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) + count = action->conf; + else + age = action->conf; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; + case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: + dev_flow->dv.actions[actions_n++] = + priv->sh->pop_vlan_action; + action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN; + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + if (!(action_flags & + MLX5_FLOW_ACTION_OF_SET_VLAN_VID)) + flow_dev_get_vlan_info_from_items(items, &vlan); + vlan.eth_proto = rte_be_to_cpu_16 + ((((const struct rte_flow_action_of_push_vlan *) + actions->conf)->ethertype)); + found_action = mlx5_flow_find_action + (actions + 1, + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID); + if (found_action) + mlx5_update_vlan_vid_pcp(found_action, &vlan); + found_action = mlx5_flow_find_action + (actions + 1, + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP); + if (found_action) + mlx5_update_vlan_vid_pcp(found_action, &vlan); + if (flow_dv_create_action_push_vlan + (dev, attr, &vlan, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.push_vlan_res->action; + action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN; + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + /* of_vlan_push action handled this action */ + MLX5_ASSERT(action_flags & + MLX5_FLOW_ACTION_OF_PUSH_VLAN); + break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) + break; + flow_dev_get_vlan_info_from_items(items, &vlan); + mlx5_update_vlan_vid_pcp(actions, &vlan); + /* If no VLAN push - this is a modify header action */ + if (flow_dv_convert_action_modify_vlan_vid + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + if (flow_dv_create_action_l2_encap(dev, actions, + dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->verbs_action; + action_flags |= MLX5_FLOW_ACTION_ENCAP; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: + if (flow_dv_create_action_l2_decap(dev, dev_flow, + attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->verbs_action; + action_flags |= MLX5_FLOW_ACTION_DECAP; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + /* Handle encap with preceding decap. */ + if (action_flags & MLX5_FLOW_ACTION_DECAP) { + if (flow_dv_create_action_raw_encap + (dev, actions, dev_flow, attr, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->verbs_action; + } else { + /* Handle encap without preceding decap. */ + if (flow_dv_create_action_l2_encap + (dev, actions, dev_flow, attr->transfer, + error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->verbs_action; + } + action_flags |= MLX5_FLOW_ACTION_ENCAP; + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; + if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + if (flow_dv_create_action_l2_decap + (dev, dev_flow, attr->transfer, error)) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.encap_decap->verbs_action; + } + /* If decap is followed by encap, handle it at encap. */ + action_flags |= MLX5_FLOW_ACTION_DECAP; + break; + case RTE_FLOW_ACTION_TYPE_JUMP: + jump_data = action->conf; + ret = mlx5_flow_group_to_table(attr, dev_flow->external, + jump_data->group, + !!priv->fdb_def_rule, + &table, error); + if (ret) + return ret; + tbl = flow_dv_tbl_resource_get(dev, table, + attr->egress, + attr->transfer, error); + if (!tbl) + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + if (flow_dv_jump_tbl_resource_register + (dev, tbl, dev_flow, error)) { + flow_dv_tbl_resource_release(dev, tbl); + return rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create jump action."); + } + dev_flow->dv.actions[actions_n++] = + dev_flow->dv.jump->action; + action_flags |= MLX5_FLOW_ACTION_JUMP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + if (flow_dv_convert_action_modify_mac + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ? + MLX5_FLOW_ACTION_SET_MAC_SRC : + MLX5_FLOW_ACTION_SET_MAC_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST: + if (flow_dv_convert_action_modify_ipv4 + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ? + MLX5_FLOW_ACTION_SET_IPV4_SRC : + MLX5_FLOW_ACTION_SET_IPV4_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC: + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST: + if (flow_dv_convert_action_modify_ipv6 + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ? + MLX5_FLOW_ACTION_SET_IPV6_SRC : + MLX5_FLOW_ACTION_SET_IPV6_DST; + break; + case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: + case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + if (flow_dv_convert_action_modify_tp + (mhdr_res, actions, items, + &flow_attr, dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? + MLX5_FLOW_ACTION_SET_TP_SRC : + MLX5_FLOW_ACTION_SET_TP_DST; + break; + case RTE_FLOW_ACTION_TYPE_DEC_TTL: + if (flow_dv_convert_action_modify_dec_ttl + (mhdr_res, items, &flow_attr, dev_flow, + !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_DEC_TTL; + break; + case RTE_FLOW_ACTION_TYPE_SET_TTL: + if (flow_dv_convert_action_modify_ttl + (mhdr_res, actions, items, &flow_attr, + dev_flow, !!(action_flags & + MLX5_FLOW_ACTION_DECAP), error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TTL; + break; + case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ: + if (flow_dv_convert_action_modify_tcp_seq + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ? + MLX5_FLOW_ACTION_INC_TCP_SEQ : + MLX5_FLOW_ACTION_DEC_TCP_SEQ; + break; + + case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK: + case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK: + if (flow_dv_convert_action_modify_tcp_ack + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= actions->type == + RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ? + MLX5_FLOW_ACTION_INC_TCP_ACK : + MLX5_FLOW_ACTION_DEC_TCP_ACK; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TAG: + if (flow_dv_convert_action_set_reg + (mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: + if (flow_dv_convert_action_copy_mreg + (dev, mhdr_res, actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_TAG; + break; + case RTE_FLOW_ACTION_TYPE_METER: + mtr = actions->conf; + if (!flow->meter) { + fm = mlx5_flow_meter_attach(priv, mtr->mtr_id, + attr, error); + if (!fm) + return rte_flow_error_set(error, + rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "meter not found " + "or invalid parameters"); + flow->meter = fm->idx; + } + /* Set the meter action. */ + if (!fm) { + fm = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MTR], flow->meter); + if (!fm) + return rte_flow_error_set(error, + rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "meter not found " + "or invalid parameters"); + } + dev_flow->dv.actions[actions_n++] = + fm->mfts->meter_action; + action_flags |= MLX5_FLOW_ACTION_METER; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP: + if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res, + actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res, + actions, error)) + return -rte_errno; + action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP; + break; + case RTE_FLOW_ACTION_TYPE_END: + actions_end = true; + if (mhdr_res->actions_num) { + /* create modify action if needed. */ + if (flow_dv_modify_hdr_resource_register + (dev, mhdr_res, dev_flow, error)) + return -rte_errno; + dev_flow->dv.actions[modify_action_position] = + handle->dvh.modify_hdr->verbs_action; + } + if (action_flags & MLX5_FLOW_ACTION_COUNT) { + flow->counter = + flow_dv_translate_create_counter(dev, + dev_flow, count, age); + + if (!flow->counter) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "cannot create counter" + " object."); + dev_flow->dv.actions[actions_n++] = + (flow_dv_counter_get_by_idx(dev, + flow->counter, NULL))->action; + } + break; + default: + break; + } + if (mhdr_res->actions_num && + modify_action_position == UINT32_MAX) + modify_action_position = actions_n++; + } + dev_flow->dv.actions_n = actions_n; + dev_flow->act_flags = action_flags; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_PORT_ID: + flow_dv_translate_item_port_id(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_PORT_ID; + break; + case RTE_FLOW_ITEM_TYPE_ETH: + flow_dv_translate_item_eth(match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L2; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + flow_dv_translate_item_vlan(dev_flow, + match_mask, match_value, + items, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L2; + last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + flow_dv_translate_item_ipv4(match_mask, match_value, + items, item_flags, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + items->mask)->hdr.next_proto_id) { + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (items->spec))->hdr.next_proto_id; + next_protocol &= + ((const struct rte_flow_item_ipv4 *) + (items->mask))->hdr.next_proto_id; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + mlx5_flow_tunnel_ip_check(items, next_protocol, + &item_flags, &tunnel); + flow_dv_translate_item_ipv6(match_mask, match_value, + items, item_flags, tunnel, + dev_flow->dv.group); + matcher.priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto) { + next_protocol = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + next_protocol &= + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_TCP: + flow_dv_translate_item_tcp(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + flow_dv_translate_item_udp(match_mask, match_value, + items, tunnel); + matcher.priority = MLX5_PRIORITY_MAP_L4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + flow_dv_translate_item_gre(match_mask, match_value, + items, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + flow_dv_translate_item_gre_key(match_mask, + match_value, items); + last_item = MLX5_FLOW_LAYER_GRE_KEY; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + flow_dv_translate_item_nvgre(match_mask, match_value, + items, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + flow_dv_translate_item_vxlan(match_mask, match_value, + items, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + flow_dv_translate_item_vxlan_gpe(match_mask, + match_value, items, + tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + flow_dv_translate_item_geneve(match_mask, match_value, + items, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_GENEVE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + flow_dv_translate_item_mpls(match_mask, match_value, + items, last_item, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_MPLS; + break; + case RTE_FLOW_ITEM_TYPE_MARK: + flow_dv_translate_item_mark(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_MARK; + break; + case RTE_FLOW_ITEM_TYPE_META: + flow_dv_translate_item_meta(dev, match_mask, + match_value, attr, items); + last_item = MLX5_FLOW_ITEM_METADATA; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + flow_dv_translate_item_icmp(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_LAYER_ICMP; + break; + case RTE_FLOW_ITEM_TYPE_ICMP6: + flow_dv_translate_item_icmp6(match_mask, match_value, + items, tunnel); + last_item = MLX5_FLOW_LAYER_ICMP6; + break; + case RTE_FLOW_ITEM_TYPE_TAG: + flow_dv_translate_item_tag(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_TAG; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: + flow_dv_translate_mlx5_item_tag(dev, match_mask, + match_value, items); + last_item = MLX5_FLOW_ITEM_TAG; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: + flow_dv_translate_item_tx_queue(dev, match_mask, + match_value, + items); + last_item = MLX5_FLOW_ITEM_TX_QUEUE; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + flow_dv_translate_item_gtp(match_mask, match_value, + items, tunnel); + matcher.priority = rss_desc->level >= 2 ? + MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + last_item = MLX5_FLOW_LAYER_GTP; + break; + default: + break; + } + item_flags |= last_item; + } + /* + * When E-Switch mode is enabled, we have two cases where we need to + * set the source port manually. + * The first one, is in case of Nic steering rule, and the second is + * E-Switch rule where no port_id item was found. In both cases + * the source port is set according the current port in use. + */ + if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && + (priv->representor || priv->master)) { + if (flow_dv_translate_item_port_id(dev, match_mask, + match_value, NULL)) + return -rte_errno; + } +#ifdef RTE_LIBRTE_MLX5_DEBUG + MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf, + dev_flow->dv.value.buf)); +#endif + /* + * Layers may be already initialized from prefix flow if this dev_flow + * is the suffix flow. + */ + handle->layers |= item_flags; + if (action_flags & MLX5_FLOW_ACTION_RSS) + flow_dv_hashfields_set(dev_flow, rss_desc); + /* Register matcher. */ + matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, + matcher.mask.size); + matcher.priority = mlx5_flow_adjust_priority(dev, priority, + matcher.priority); + /* reserved field no needs to be set to 0 here. */ + tbl_key.domain = attr->transfer; + tbl_key.direction = attr->egress; + tbl_key.table_id = dev_flow->dv.group; + if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error)) + return -rte_errno; + return 0; +} + +/** + * Apply the flow to the NIC, lock free, + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mlx5_flow_dv_workspace *dv; + struct mlx5_flow_handle *dh; + struct mlx5_flow_handle_dv *dv_h; + struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t handle_idx; + int n; + int err; + int idx; + + for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + dv = &dev_flow->dv; + dh = dev_flow->handle; + dv_h = &dh->dvh; + n = dv->actions_n; + if (dh->fate_action == MLX5_FLOW_FATE_DROP) { + if (dv->transfer) { + dv->actions[n++] = priv->sh->esw_drop_action; + } else { + struct mlx5_hrxq *drop_hrxq; + drop_hrxq = mlx5_hrxq_drop_new(dev); + if (!drop_hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot get drop hash queue"); + goto error; + } + /* + * Drop queues will be released by the specify + * mlx5_hrxq_drop_release() function. Assign + * the special index to hrxq to mark the queue + * has been allocated. + */ + dh->rix_hrxq = UINT32_MAX; + dv->actions[n++] = drop_hrxq->action; + } + } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx; + struct mlx5_flow_rss_desc *rss_desc = + &((struct mlx5_flow_rss_desc *)priv->rss_desc) + [!!priv->flow_nested_idx]; + + MLX5_ASSERT(rss_desc->queue_num); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num); + if (!hrxq_idx) { + hrxq_idx = mlx5_hrxq_new + (dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num, + !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL)); + } + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + dh->rix_hrxq = hrxq_idx; + dv->actions[n++] = hrxq->action; + } + dh->ib_flow = + mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, + (void *)&dv->value, n, + dv->actions); + if (!dh->ib_flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); + goto error; + } + if (priv->vmwa_context && + dh->vf_vlan.tag && !dh->vf_vlan.created) { + /* + * The rule contains the VLAN pattern. + * For VF we are going to create VLAN + * interface to make hypervisor set correct + * e-Switch vport context. + */ + mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan); + } + } + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dh, next) { + /* hrxq is union, don't clear it if the flag is not set. */ + if (dh->rix_hrxq) { + if (dh->fate_action == MLX5_FLOW_FATE_DROP) { + mlx5_hrxq_drop_release(dev); + dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; + } + } + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + } + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Release the flow matcher. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_matcher_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher; + + MLX5_ASSERT(matcher->matcher_object); + DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", + dev->data->port_id, (void *)matcher, + rte_atomic32_read(&matcher->refcnt)); + if (rte_atomic32_dec_and_test(&matcher->refcnt)) { + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (matcher->matcher_object)); + LIST_REMOVE(matcher, next); + /* table ref-- in release interface. */ + flow_dv_tbl_resource_release(dev, matcher->tbl); + rte_free(matcher); + DRV_LOG(DEBUG, "port %u matcher %p: removed", + dev->data->port_id, (void *)matcher); + return 0; + } + return 1; +} + +/** + * Release an encap/decap resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = handle->dvh.rix_encap_decap; + struct mlx5_flow_dv_encap_decap_resource *cache_resource; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->verbs_action); + DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], + &priv->sh->encaps_decaps, idx, + cache_resource, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx); + DRV_LOG(DEBUG, "encap/decap resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release an jump to table action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_jump_tbl_resource *cache_resource; + struct mlx5_flow_tbl_data_entry *tbl_data; + + tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP], + handle->rix_jump); + if (!tbl_data) + return 0; + cache_resource = &tbl_data->jump; + MLX5_ASSERT(cache_resource->action); + DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + /* jump action memory free is inside the table release. */ + flow_dv_tbl_resource_release(dev, &tbl_data->tbl); + DRV_LOG(DEBUG, "jump table resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release a modify-header resource. + * + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) +{ + struct mlx5_flow_dv_modify_hdr_resource *cache_resource = + handle->dvh.modify_hdr; + + MLX5_ASSERT(cache_resource->verbs_action); + DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->verbs_action)); + LIST_REMOVE(cache_resource, next); + rte_free(cache_resource); + DRV_LOG(DEBUG, "modify-header resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release port ID action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_dv_port_id_action_resource *cache_resource; + uint32_t idx = handle->rix_port_id_action; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], + idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->action); + DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], + &priv->sh->port_id_action_list, idx, + cache_resource, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx); + DRV_LOG(DEBUG, "port id action resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release push vlan action resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t idx = handle->dvh.rix_push_vlan; + struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; + + cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], + idx); + if (!cache_resource) + return 0; + MLX5_ASSERT(cache_resource->action); + DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", + (void *)cache_resource, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], + &priv->sh->push_vlan_action_list, idx, + cache_resource, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx); + DRV_LOG(DEBUG, "push vlan action resource %p: removed", + (void *)cache_resource); + return 0; + } + return 1; +} + +/** + * Release the fate resource. + * + * @param dev + * Pointer to Ethernet device. + * @param handle + * Pointer to mlx5_flow_handle. + */ +static void +flow_dv_fate_resource_release(struct rte_eth_dev *dev, + struct mlx5_flow_handle *handle) +{ + if (!handle->rix_fate) + return; + if (handle->fate_action == MLX5_FLOW_FATE_DROP) + mlx5_hrxq_drop_release(dev); + else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE) + mlx5_hrxq_release(dev, handle->rix_hrxq); + else if (handle->fate_action == MLX5_FLOW_FATE_JUMP) + flow_dv_jump_tbl_resource_release(dev, handle); + else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID) + flow_dv_port_id_action_resource_release(dev, handle); + else + DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); + handle->rix_fate = 0; +} + +/** + * Remove the flow from the NIC but keeps it in memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_handle *dh; + uint32_t handle_idx; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow) + return; + handle_idx = flow->dev_handles; + while (handle_idx) { + dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + handle_idx); + if (!dh) + return; + if (dh->ib_flow) { + claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow)); + dh->ib_flow = NULL; + } + if (dh->fate_action == MLX5_FLOW_FATE_DROP || + dh->fate_action == MLX5_FLOW_FATE_QUEUE) + flow_dv_fate_resource_release(dev, dh); + if (dh->vf_vlan.tag && dh->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); + handle_idx = dh->next.next; + } +} + +/** + * Remove the flow from the NIC and the memory. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + + if (!flow) + return; + __flow_dv_remove(dev, flow); + if (flow->counter) { + flow_dv_counter_release(dev, flow->counter); + flow->counter = 0; + } + if (flow->meter) { + struct mlx5_flow_meter *fm; + + fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR], + flow->meter); + if (fm) + mlx5_flow_meter_detach(fm); + flow->meter = 0; + } + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + dev_handle = mlx5_ipool_get(priv->sh->ipool + [MLX5_IPOOL_MLX5_FLOW], tmp_idx); + if (!dev_handle) + return; + flow->dev_handles = dev_handle->next.next; + if (dev_handle->dvh.matcher) + flow_dv_matcher_release(dev, dev_handle); + if (dev_handle->dvh.rix_encap_decap) + flow_dv_encap_decap_resource_release(dev, dev_handle); + if (dev_handle->dvh.modify_hdr) + flow_dv_modify_hdr_resource_release(dev_handle); + if (dev_handle->dvh.rix_push_vlan) + flow_dv_push_vlan_action_resource_release(dev, + dev_handle); + if (dev_handle->dvh.rix_tag) + flow_dv_tag_release(dev, + dev_handle->dvh.rix_tag); + flow_dv_fate_resource_release(dev, dev_handle); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + } +} + +/** + * Query a dv flow rule for its statistics via devx. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the sub flow. + * @param[out] data + * data retrieved by the query. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + void *data, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_query_count *qc = data; + + if (!priv->config.devx) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not supported"); + if (flow->counter) { + uint64_t pkts, bytes; + struct mlx5_flow_counter *cnt; + + cnt = flow_dv_counter_get_by_idx(dev, flow->counter, + NULL); + int err = _flow_dv_query_count(dev, flow->counter, &pkts, + &bytes); + + if (err) + return rte_flow_error_set(error, -err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot read counters"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = pkts - cnt->hits; + qc->bytes = bytes - cnt->bytes; + if (qc->reset) { + cnt->hits = pkts; + cnt->bytes = bytes; + } + return 0; + } + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_dv_query(struct rte_eth_dev *dev, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int ret = -EINVAL; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_query_count(dev, flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + return ret; +} + +/** + * Destroy the meter table set. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] tbl + * Pointer to the meter table set. + * + * @return + * Always 0. + */ +static int +flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *tbl) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_meter_domains_infos *mtd = + (struct mlx5_meter_domains_infos *)tbl; + + if (!mtd || !priv->config.dv_flow_en) + return 0; + if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) + claim_zero(mlx5_glue->dv_destroy_flow + (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); + if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) + claim_zero(mlx5_glue->dv_destroy_flow + (mtd->egress.policer_rules[RTE_MTR_DROPPED])); + if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) + claim_zero(mlx5_glue->dv_destroy_flow + (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); + if (mtd->egress.color_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->egress.color_matcher)); + if (mtd->egress.any_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->egress.any_matcher)); + if (mtd->egress.tbl) + flow_dv_tbl_resource_release(dev, mtd->egress.tbl); + if (mtd->egress.sfx_tbl) + flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); + if (mtd->ingress.color_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->ingress.color_matcher)); + if (mtd->ingress.any_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->ingress.any_matcher)); + if (mtd->ingress.tbl) + flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); + if (mtd->ingress.sfx_tbl) + flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); + if (mtd->transfer.color_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->transfer.color_matcher)); + if (mtd->transfer.any_matcher) + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (mtd->transfer.any_matcher)); + if (mtd->transfer.tbl) + flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); + if (mtd->transfer.sfx_tbl) + flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); + if (mtd->drop_actn) + claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn)); + rte_free(mtd); + return 0; +} + +/* Number of meter flow actions, count and jump or count and drop. */ +#define METER_ACTIONS 2 + +/** + * Create specify domain meter table and suffix table. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in,out] mtb + * Pointer to DV meter table set. + * @param[in] egress + * Table attribute. + * @param[in] transfer + * Table attribute. + * @param[in] color_reg_c_idx + * Reg C index for color match. + * + * @return + * 0 on success, -1 otherwise and rte_errno is set. + */ +static int +flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, + struct mlx5_meter_domains_infos *mtb, + uint8_t egress, uint8_t transfer, + uint32_t color_reg_c_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_flow_dv_match_params mask = { + .size = sizeof(mask.buf), + }; + struct mlx5_flow_dv_match_params value = { + .size = sizeof(value.buf), + }; + struct mlx5dv_flow_matcher_attr dv_attr = { + .type = IBV_FLOW_ATTR_NORMAL, + .priority = 0, + .match_criteria_enable = 0, + .match_mask = (void *)&mask, + }; + void *actions[METER_ACTIONS]; + struct mlx5_meter_domain_info *dtb; + struct rte_flow_error error; + int i = 0; + + if (transfer) + dtb = &mtb->transfer; + else if (egress) + dtb = &mtb->egress; + else + dtb = &mtb->ingress; + /* Create the meter table with METER level. */ + dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER, + egress, transfer, &error); + if (!dtb->tbl) { + DRV_LOG(ERR, "Failed to create meter policer table."); + return -1; + } + /* Create the meter suffix table with SUFFIX level. */ + dtb->sfx_tbl = flow_dv_tbl_resource_get(dev, + MLX5_FLOW_TABLE_LEVEL_SUFFIX, + egress, transfer, &error); + if (!dtb->sfx_tbl) { + DRV_LOG(ERR, "Failed to create meter suffix table."); + return -1; + } + /* Create matchers, Any and Color. */ + dv_attr.priority = 3; + dv_attr.match_criteria_enable = 0; + dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, + &dv_attr, + dtb->tbl->obj); + if (!dtb->any_matcher) { + DRV_LOG(ERR, "Failed to create meter" + " policer default matcher."); + goto error_exit; + } + dv_attr.priority = 0; + dv_attr.match_criteria_enable = + 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; + flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, + rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); + dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, + &dv_attr, + dtb->tbl->obj); + if (!dtb->color_matcher) { + DRV_LOG(ERR, "Failed to create meter policer color matcher."); + goto error_exit; + } + if (mtb->count_actns[RTE_MTR_DROPPED]) + actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; + actions[i++] = mtb->drop_actn; + /* Default rule: lowest priority, match any, actions: drop. */ + dtb->policer_rules[RTE_MTR_DROPPED] = + mlx5_glue->dv_create_flow(dtb->any_matcher, + (void *)&value, i, actions); + if (!dtb->policer_rules[RTE_MTR_DROPPED]) { + DRV_LOG(ERR, "Failed to create meter policer drop rule."); + goto error_exit; + } + return 0; +error_exit: + return -1; +} + +/** + * Create the needed meter and suffix tables. + * Lock free, (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] fm + * Pointer to the flow meter. + * + * @return + * Pointer to table set on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_meter_domains_infos * +flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, + const struct mlx5_flow_meter *fm) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_meter_domains_infos *mtb; + int ret; + int i; + + if (!priv->mtr_en) { + rte_errno = ENOTSUP; + return NULL; + } + mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0); + if (!mtb) { + DRV_LOG(ERR, "Failed to allocate memory for meter."); + return NULL; + } + /* Create meter count actions */ + for (i = 0; i <= RTE_MTR_DROPPED; i++) { + struct mlx5_flow_counter *cnt; + if (!fm->policer_stats.cnt[i]) + continue; + cnt = flow_dv_counter_get_by_idx(dev, + fm->policer_stats.cnt[i], NULL); + mtb->count_actns[i] = cnt->action; + } + /* Create drop action. */ + mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop(); + if (!mtb->drop_actn) { + DRV_LOG(ERR, "Failed to create drop action."); + goto error_exit; + } + /* Egress meter table. */ + ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to prepare egress meter table."); + goto error_exit; + } + /* Ingress meter table. */ + ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to prepare ingress meter table."); + goto error_exit; + } + /* FDB meter table. */ + if (priv->config.dv_esw_en) { + ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1, + priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to prepare fdb meter table."); + goto error_exit; + } + } + return mtb; +error_exit: + flow_dv_destroy_mtr_tbl(dev, mtb); + return NULL; +} + +/** + * Destroy domain policer rule. + * + * @param[in] dt + * Pointer to domain table. + */ +static void +flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) +{ + int i; + + for (i = 0; i < RTE_MTR_DROPPED; i++) { + if (dt->policer_rules[i]) { + claim_zero(mlx5_glue->dv_destroy_flow + (dt->policer_rules[i])); + dt->policer_rules[i] = NULL; + } + } + if (dt->jump_actn) { + claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn)); + dt->jump_actn = NULL; + } +} + +/** + * Destroy policer rules. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] attr + * Pointer to flow attributes. + * + * @return + * Always 0. + */ +static int +flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused, + const struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr) +{ + struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL; + + if (!mtb) + return 0; + if (attr->egress) + flow_dv_destroy_domain_policer_rule(&mtb->egress); + if (attr->ingress) + flow_dv_destroy_domain_policer_rule(&mtb->ingress); + if (attr->transfer) + flow_dv_destroy_domain_policer_rule(&mtb->transfer); + return 0; +} + +/** + * Create specify domain meter policer rule. + * + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] mtb + * Pointer to DV meter table set. + * @param[in] mtr_reg_c + * Color match REG_C. + * + * @return + * 0 on success, -1 otherwise. + */ +static int +flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, + struct mlx5_meter_domain_info *dtb, + uint8_t mtr_reg_c) +{ + struct mlx5_flow_dv_match_params matcher = { + .size = sizeof(matcher.buf), + }; + struct mlx5_flow_dv_match_params value = { + .size = sizeof(value.buf), + }; + struct mlx5_meter_domains_infos *mtb = fm->mfts; + void *actions[METER_ACTIONS]; + int i; + + /* Create jump action. */ + if (!dtb->jump_actn) + dtb->jump_actn = + mlx5_glue->dr_create_flow_action_dest_flow_tbl + (dtb->sfx_tbl->obj); + if (!dtb->jump_actn) { + DRV_LOG(ERR, "Failed to create policer jump action."); + goto error; + } + for (i = 0; i < RTE_MTR_DROPPED; i++) { + int j = 0; + + flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c, + rte_col_2_mlx5_col(i), UINT8_MAX); + if (mtb->count_actns[i]) + actions[j++] = mtb->count_actns[i]; + if (fm->action[i] == MTR_POLICER_ACTION_DROP) + actions[j++] = mtb->drop_actn; + else + actions[j++] = dtb->jump_actn; + dtb->policer_rules[i] = + mlx5_glue->dv_create_flow(dtb->color_matcher, + (void *)&value, + j, actions); + if (!dtb->policer_rules[i]) { + DRV_LOG(ERR, "Failed to create policer rule."); + goto error; + } + } + return 0; +error: + rte_errno = errno; + return -1; +} + +/** + * Create policer rules. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] fm + * Pointer to flow meter structure. + * @param[in] attr + * Pointer to flow attributes. + * + * @return + * 0 on success, -1 otherwise. + */ +static int +flow_dv_create_policer_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter *fm, + const struct rte_flow_attr *attr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_meter_domains_infos *mtb = fm->mfts; + int ret; + + if (attr->egress) { + ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress, + priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to create egress policer."); + goto error; + } + } + if (attr->ingress) { + ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress, + priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to create ingress policer."); + goto error; + } + } + if (attr->transfer) { + ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer, + priv->mtr_color_reg); + if (ret) { + DRV_LOG(ERR, "Failed to create transfer policer."); + goto error; + } + } + return 0; +error: + flow_dv_destroy_policer_rules(dev, fm, attr); + return -1; +} + +/** + * Query a devx counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] cnt + * Index to the flow counter. + * @param[in] clear + * Set to clear the counter statistics. + * @param[out] pkts + * The statistics value of packets. + * @param[out] bytes + * The statistics value of bytes. + * + * @return + * 0 on success, otherwise return -1. + */ +static int +flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear, + uint64_t *pkts, uint64_t *bytes) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter *cnt; + uint64_t inn_pkts, inn_bytes; + int ret; + + if (!priv->config.devx) + return -1; + + ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes); + if (ret) + return -1; + cnt = flow_dv_counter_get_by_idx(dev, counter, NULL); + *pkts = inn_pkts - cnt->hits; + *bytes = inn_bytes - cnt->bytes; + if (clear) { + cnt->hits = inn_pkts; + cnt->bytes = inn_bytes; + } + return 0; +} + +/** + * Get aged-out flows. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] context + * The address of an array of pointers to the aged-out flows contexts. + * @param[in] nb_contexts + * The length of context array pointers. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * how many contexts get in success, otherwise negative errno value. + * if nb_contexts is 0, return the amount of all aged contexts. + * if nb_contexts is not 0 , return the amount of aged flows reported + * in the context array. + * @note: only stub for now + */ +static int +flow_get_aged_flows(struct rte_eth_dev *dev, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_age_info *age_info; + struct mlx5_age_param *age_param; + struct mlx5_flow_counter *counter; + int nb_flows = 0; + + if (nb_contexts && !context) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Should assign at least one flow or" + " context to get if nb_contexts != 0"); + age_info = GET_PORT_AGE_INFO(priv); + rte_spinlock_lock(&age_info->aged_sl); + TAILQ_FOREACH(counter, &age_info->aged_counters, next) { + nb_flows++; + if (nb_contexts) { + age_param = MLX5_CNT_TO_AGE(counter); + context[nb_flows - 1] = age_param->context; + if (!(--nb_contexts)) + break; + } + } + rte_spinlock_unlock(&age_info->aged_sl); + MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); + return nb_flows; +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_translate(). + */ +static int +flow_dv_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + + flow_dv_shared_lock(dev); + ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error); + flow_dv_shared_unlock(dev); + return ret; +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_apply(). + */ +static int +flow_dv_apply(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + flow_dv_shared_lock(dev); + ret = __flow_dv_apply(dev, flow, error); + flow_dv_shared_unlock(dev); + return ret; +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_remove(). + */ +static void +flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + flow_dv_shared_lock(dev); + __flow_dv_remove(dev, flow); + flow_dv_shared_unlock(dev); +} + +/* + * Mutex-protected thunk to lock-free __flow_dv_destroy(). + */ +static void +flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + flow_dv_shared_lock(dev); + __flow_dv_destroy(dev, flow); + flow_dv_shared_unlock(dev); +} + +/* + * Mutex-protected thunk to lock-free flow_dv_counter_alloc(). + */ +static uint32_t +flow_dv_counter_allocate(struct rte_eth_dev *dev) +{ + uint32_t cnt; + + flow_dv_shared_lock(dev); + cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0); + flow_dv_shared_unlock(dev); + return cnt; +} + +/* + * Mutex-protected thunk to lock-free flow_dv_counter_release(). + */ +static void +flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt) +{ + flow_dv_shared_lock(dev); + flow_dv_counter_release(dev, cnt); + flow_dv_shared_unlock(dev); +} + +const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { + .validate = flow_dv_validate, + .prepare = flow_dv_prepare, + .translate = flow_dv_translate, + .apply = flow_dv_apply, + .remove = flow_dv_remove, + .destroy = flow_dv_destroy, + .query = flow_dv_query, + .create_mtr_tbls = flow_dv_create_mtr_tbl, + .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl, + .create_policer_rules = flow_dv_create_policer_rules, + .destroy_policer_rules = flow_dv_destroy_policer_rules, + .counter_alloc = flow_dv_counter_allocate, + .counter_free = flow_dv_counter_free, + .counter_query = flow_dv_counter_query, + .get_aged_flows = flow_get_aged_flows, +}; + +#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_meter.c new file mode 100644 index 000000000..08f7dc8d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_meter.c @@ -0,0 +1,1292 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright 2018 Mellanox Technologies, Ltd + */ +#include + +#include +#include +#include +#include + +#include + +#include "mlx5.h" +#include "mlx5_flow.h" + +/** + * Create the meter action. + * + * @param priv + * Pointer to mlx5_priv. + * @param[in] fm + * Pointer to flow meter to be converted. + * + * @return + * Pointer to the meter action on success, NULL otherwise. + */ +static void * +mlx5_flow_meter_action_create(struct mlx5_priv *priv, + struct mlx5_flow_meter *fm) +{ +#ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER + struct mlx5dv_dr_flow_meter_attr mtr_init; + void *attr = fm->mfts->fmp; + struct mlx5_flow_meter_srtcm_rfc2697_prm *srtcm = + &fm->profile->srtcm_prm; + + fm->mfts->fmp_size = MLX5_ST_SZ_BYTES(flow_meter_parameters); + memset(attr, 0, fm->mfts->fmp_size); + MLX5_SET(flow_meter_parameters, attr, valid, 1); + MLX5_SET(flow_meter_parameters, attr, bucket_overflow, 1); + MLX5_SET(flow_meter_parameters, attr, + start_color, MLX5_FLOW_COLOR_GREEN); + MLX5_SET(flow_meter_parameters, attr, both_buckets_on_green, 0); + MLX5_SET(flow_meter_parameters, + attr, cbs_exponent, srtcm->cbs_exponent); + MLX5_SET(flow_meter_parameters, + attr, cbs_mantissa, srtcm->cbs_mantissa); + MLX5_SET(flow_meter_parameters, + attr, cir_exponent, srtcm->cir_exponent); + MLX5_SET(flow_meter_parameters, + attr, cir_mantissa, srtcm->cir_mantissa); + MLX5_SET(flow_meter_parameters, + attr, ebs_exponent, srtcm->ebs_exponent); + MLX5_SET(flow_meter_parameters, + attr, ebs_mantissa, srtcm->ebs_mantissa); + mtr_init.next_table = + fm->transfer ? fm->mfts->transfer.tbl->obj : + fm->egress ? fm->mfts->egress.tbl->obj : + fm->mfts->ingress.tbl->obj; + mtr_init.reg_c_index = priv->mtr_color_reg - REG_C_0; + mtr_init.flow_meter_parameter = fm->mfts->fmp; + mtr_init.flow_meter_parameter_sz = fm->mfts->fmp_size; + mtr_init.active = fm->active_state; + return mlx5_glue->dv_create_flow_action_meter(&mtr_init); +#else + (void)priv; + (void)fm; + return NULL; +#endif +} + +/** + * Find meter profile by id. + * + * @param priv + * Pointer to mlx5_priv. + * @param meter_profile_id + * Meter profile id. + * + * @return + * Pointer to the profile found on success, NULL otherwise. + */ +static struct mlx5_flow_meter_profile * +mlx5_flow_meter_profile_find(struct mlx5_priv *priv, uint32_t meter_profile_id) +{ + struct mlx5_mtr_profiles *fmps = &priv->flow_meter_profiles; + struct mlx5_flow_meter_profile *fmp; + + TAILQ_FOREACH(fmp, fmps, next) + if (meter_profile_id == fmp->meter_profile_id) + return fmp; + return NULL; +} + +/** + * Validate the MTR profile. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_profile_id + * Meter profile id. + * @param[in] profile + * Pointer to meter profile detail. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_profile_validate(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_profile *fmp; + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE, + NULL, "Meter profile is null."); + /* Meter profile ID must be valid. */ + if (meter_profile_id == UINT32_MAX) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Meter profile id not valid."); + /* Meter profile must not exist. */ + fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id); + if (fmp) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile already exists."); + if (profile->alg == RTE_MTR_SRTCM_RFC2697) { + if (priv->config.hca_attr.qos.srtcm_sup) { + /* Verify support for flow meter parameters. */ + if (profile->srtcm_rfc2697.cir > 0 && + profile->srtcm_rfc2697.cir <= MLX5_SRTCM_CIR_MAX && + profile->srtcm_rfc2697.cbs > 0 && + profile->srtcm_rfc2697.cbs <= MLX5_SRTCM_CBS_MAX && + profile->srtcm_rfc2697.ebs <= MLX5_SRTCM_EBS_MAX) + return 0; + else + return -rte_mtr_error_set + (error, ENOTSUP, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, + profile->srtcm_rfc2697.ebs ? + "Metering value ebs must be 0." : + "Invalid metering parameters."); + } + } + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_METER_PROFILE, + NULL, "Metering algorithm not supported."); +} + +/** + * Calculate mantissa and exponent for cir. + * + * @param[in] cir + * Value to be calculated. + * @param[out] man + * Pointer to the mantissa. + * @param[out] exp + * Pointer to the exp. + */ +static void +mlx5_flow_meter_cir_man_exp_calc(int64_t cir, uint8_t *man, uint8_t *exp) +{ + int64_t _cir; + int64_t delta = INT64_MAX; + uint8_t _man = 0; + uint8_t _exp = 0; + uint64_t m, e; + + for (m = 0; m <= 0xFF; m++) { /* man width 8 bit */ + for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */ + _cir = (1000000000ULL * m) >> e; + if (llabs(cir - _cir) <= delta) { + delta = llabs(cir - _cir); + _man = m; + _exp = e; + } + } + } + *man = _man; + *exp = _exp; +} + +/** + * Calculate mantissa and exponent for xbs. + * + * @param[in] xbs + * Value to be calculated. + * @param[out] man + * Pointer to the mantissa. + * @param[out] exp + * Pointer to the exp. + */ +static void +mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp) +{ + int _exp; + double _man; + + /* Special case xbs == 0 ? both exp and matissa are 0. */ + if (xbs == 0) { + *man = 0; + *exp = 0; + return; + } + /* xbs = xbs_mantissa * 2^xbs_exponent */ + _man = frexp(xbs, &_exp); + _man = _man * pow(2, MLX5_MAN_WIDTH); + _exp = _exp - MLX5_MAN_WIDTH; + *man = (uint8_t)ceil(_man); + *exp = _exp; +} + +/** + * Fill the prm meter parameter. + * + * @param[in,out] fmp + * Pointer to meter profie to be converted. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_param_fill(struct mlx5_flow_meter_profile *fmp, + struct rte_mtr_error *error) +{ + struct mlx5_flow_meter_srtcm_rfc2697_prm *srtcm = &fmp->srtcm_prm; + uint8_t man, exp; + + if (fmp->profile.alg != RTE_MTR_SRTCM_RFC2697) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_METER_PROFILE, + NULL, "Metering algorithm not supported."); + /* cbs = cbs_mantissa * 2^cbs_exponent */ + mlx5_flow_meter_xbs_man_exp_calc(fmp->profile.srtcm_rfc2697.cbs, + &man, &exp); + srtcm->cbs_mantissa = man; + srtcm->cbs_exponent = exp; + /* Check if cbs mantissa is too large. */ + if (srtcm->cbs_exponent != exp) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Metering profile parameter cbs is" + " invalid."); + /* ebs = ebs_mantissa * 2^ebs_exponent */ + mlx5_flow_meter_xbs_man_exp_calc(fmp->profile.srtcm_rfc2697.ebs, + &man, &exp); + srtcm->ebs_mantissa = man; + srtcm->ebs_exponent = exp; + /* Check if ebs mantissa is too large. */ + if (srtcm->ebs_exponent != exp) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Metering profile parameter ebs is" + " invalid."); + /* cir = 8G * cir_mantissa * 1/(2^cir_exponent)) Bytes/Sec */ + mlx5_flow_meter_cir_man_exp_calc(fmp->profile.srtcm_rfc2697.cir, + &man, &exp); + srtcm->cir_mantissa = man; + srtcm->cir_exponent = exp; + /* Check if cir mantissa is too large. */ + if (srtcm->cir_exponent != exp) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Metering profile parameter cir is" + " invalid."); + return 0; +} + +/** + * Callback to get MTR capabilities. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] cap + * Pointer to save MTR capabilities. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev, + struct rte_mtr_capabilities *cap, + struct rte_mtr_error *error __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hca_qos_attr *qattr = &priv->config.hca_attr.qos; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + memset(cap, 0, sizeof(*cap)); + cap->n_max = 1 << qattr->log_max_flow_meter; + cap->n_shared_max = cap->n_max; + cap->identical = 1; + cap->shared_identical = 1; + cap->shared_n_flows_per_mtr_max = 4 << 20; + /* 2M flows can share the same meter. */ + cap->chaining_n_mtrs_per_flow_max = 1; /* Chaining is not supported. */ + cap->meter_srtcm_rfc2697_n_max = qattr->srtcm_sup ? cap->n_max : 0; + cap->meter_rate_max = 1ULL << 40; /* 1 Tera tokens per sec. */ + cap->policer_action_drop_supported = 1; + cap->stats_mask = RTE_MTR_STATS_N_BYTES_DROPPED | + RTE_MTR_STATS_N_PKTS_DROPPED; + return 0; +} + +/** + * Callback to add MTR profile. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_profile_id + * Meter profile id. + * @param[in] profile + * Pointer to meter profile detail. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_profile_add(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_mtr_profiles *fmps = &priv->flow_meter_profiles; + struct mlx5_flow_meter_profile *fmp; + int ret; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Check input params. */ + ret = mlx5_flow_meter_profile_validate(dev, meter_profile_id, + profile, error); + if (ret) + return ret; + /* Meter profile memory allocation. */ + fmp = rte_calloc(__func__, 1, sizeof(struct mlx5_flow_meter_profile), + RTE_CACHE_LINE_SIZE); + if (fmp == NULL) + return -rte_mtr_error_set(error, ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Meter profile memory " + "alloc failed."); + /* Fill profile info. */ + fmp->meter_profile_id = meter_profile_id; + fmp->profile = *profile; + /* Fill the flow meter parameters for the PRM. */ + ret = mlx5_flow_meter_param_fill(fmp, error); + if (ret) + goto error; + /* Add to list. */ + TAILQ_INSERT_TAIL(fmps, fmp, next); + return 0; +error: + rte_free(fmp); + return ret; +} + +/** + * Callback to delete MTR profile. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_profile_id + * Meter profile id. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_profile *fmp; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter profile must exist. */ + fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id); + if (fmp == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + &meter_profile_id, + "Meter profile id invalid."); + /* Check profile is unused. */ + if (fmp->ref_cnt) + return -rte_mtr_error_set(error, EBUSY, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Meter profile in use."); + /* Remove from list. */ + TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next); + rte_free(fmp); + return 0; +} + +/** + * Convert wrong color setting action to verbose error. + * + * @param[in] action + * Policy color action. + * + * @return + * Verbose meter color error type. + */ +static inline enum rte_mtr_error_type +action2error(enum rte_mtr_policer_action action) +{ + switch (action) { + case MTR_POLICER_ACTION_COLOR_GREEN: + return RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN; + case MTR_POLICER_ACTION_COLOR_YELLOW: + return RTE_MTR_ERROR_TYPE_POLICER_ACTION_YELLOW; + case MTR_POLICER_ACTION_COLOR_RED: + return RTE_MTR_ERROR_TYPE_POLICER_ACTION_RED; + default: + break; + } + return RTE_MTR_ERROR_TYPE_UNSPECIFIED; +} + +/** + * Check meter validation. + * + * @param[in] priv + * Pointer to mlx5 private data structure. + * @param[in] meter_id + * Meter id. + * @param[in] params + * Pointer to rte meter parameters. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_validate(struct mlx5_priv *priv, uint32_t meter_id, + struct rte_mtr_params *params, + struct rte_mtr_error *error) +{ + static enum rte_mtr_policer_action + valid_recol_action[RTE_COLORS] = { + MTR_POLICER_ACTION_COLOR_GREEN, + MTR_POLICER_ACTION_COLOR_YELLOW, + MTR_POLICER_ACTION_COLOR_RED }; + int i; + + /* Meter params must not be NULL. */ + if (params == NULL) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, "Meter object params null."); + /* Previous meter color is not supported. */ + if (params->use_prev_mtr_color) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, + "Previous meter color " + "not supported."); + /* Validate policer settings. */ + for (i = 0; i < RTE_COLORS; i++) + if (params->action[i] != valid_recol_action[i] && + params->action[i] != MTR_POLICER_ACTION_DROP) + return -rte_mtr_error_set + (error, ENOTSUP, + action2error(params->action[i]), NULL, + "Recolor action not supported."); + /* Validate meter id. */ + if (mlx5_flow_meter_find(priv, meter_id)) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter object already exists."); + return 0; +} + +/** + * Modify the flow meter action. + * + * @param[in] priv + * Pointer to mlx5 private data structure. + * @param[in] fm + * Pointer to flow meter to be modified. + * @param[in] srtcm + * Pointer to meter srtcm description parameter. + * @param[in] modify_bits + * The bit in srtcm to be updated. + * @param[in] active_state + * The state to be updated. + * @return + * 0 on success, o negative value otherwise. + */ +static int +mlx5_flow_meter_action_modify(struct mlx5_priv *priv, + struct mlx5_flow_meter *fm, + const struct mlx5_flow_meter_srtcm_rfc2697_prm *srtcm, + uint64_t modify_bits, uint32_t active_state) +{ +#ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER + uint32_t in[MLX5_ST_SZ_DW(flow_meter_parameters)] = { 0 }; + uint32_t *attr; + struct mlx5dv_dr_flow_meter_attr mod_attr = { 0 }; + int ret; + + /* Fill command parameters. */ + mod_attr.reg_c_index = priv->mtr_color_reg - REG_C_0; + mod_attr.flow_meter_parameter = in; + mod_attr.flow_meter_parameter_sz = fm->mfts->fmp_size; + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE) + mod_attr.active = !!active_state; + else + mod_attr.active = 0; + attr = in; + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS) { + MLX5_SET(flow_meter_parameters, + attr, cbs_exponent, srtcm->cbs_exponent); + MLX5_SET(flow_meter_parameters, + attr, cbs_mantissa, srtcm->cbs_mantissa); + } + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR) { + MLX5_SET(flow_meter_parameters, + attr, cir_exponent, srtcm->cir_exponent); + MLX5_SET(flow_meter_parameters, + attr, cir_mantissa, srtcm->cir_mantissa); + } + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS) { + MLX5_SET(flow_meter_parameters, + attr, ebs_exponent, srtcm->ebs_exponent); + MLX5_SET(flow_meter_parameters, + attr, ebs_mantissa, srtcm->ebs_mantissa); + } + /* Apply modifications to meter only if it was created. */ + if (fm->mfts->meter_action) { + ret = mlx5_glue->dv_modify_flow_action_meter + (fm->mfts->meter_action, &mod_attr, + rte_cpu_to_be_64(modify_bits)); + if (ret) + return ret; + } + /* Update succeedded modify meter parameters. */ + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE) + fm->active_state = !!active_state; + attr = fm->mfts->fmp; + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS) { + MLX5_SET(flow_meter_parameters, + attr, cbs_exponent, srtcm->cbs_exponent); + MLX5_SET(flow_meter_parameters, + attr, cbs_mantissa, srtcm->cbs_mantissa); + } + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR) { + MLX5_SET(flow_meter_parameters, + attr, cir_exponent, srtcm->cir_exponent); + MLX5_SET(flow_meter_parameters, + attr, cir_mantissa, srtcm->cir_mantissa); + } + if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS) { + MLX5_SET(flow_meter_parameters, + attr, ebs_exponent, srtcm->ebs_exponent); + MLX5_SET(flow_meter_parameters, + attr, ebs_mantissa, srtcm->ebs_mantissa); + } + + return 0; +#else + (void)priv; + (void)fm; + (void)srtcm; + (void)modify_bits; + (void)active_state; + return -ENOTSUP; +#endif +} + +/** + * Create meter rules. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[in] params + * Pointer to rte meter parameters. + * @param[in] shared + * Meter shared with other flow or not. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, + struct rte_mtr_params *params, int shared, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meters *fms = &priv->flow_meters; + struct mlx5_flow_meter_profile *fmp; + struct mlx5_flow_meter *fm; + const struct rte_flow_attr attr = { + .ingress = 1, + .egress = 1, + .transfer = priv->config.dv_esw_en ? 1 : 0, + }; + int ret; + unsigned int i; + uint32_t idx = 0; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Validate the parameters. */ + ret = mlx5_flow_meter_validate(priv, meter_id, params, error); + if (ret) + return ret; + /* Meter profile must exist. */ + fmp = mlx5_flow_meter_profile_find(priv, params->meter_profile_id); + if (fmp == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Meter profile id not valid."); + /* Allocate the flow meter memory. */ + fm = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MTR], &idx); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Memory alloc failed for meter."); + fm->idx = idx; + /* Fill the flow meter parameters. */ + fm->meter_id = meter_id; + fm->profile = fmp; + memcpy(fm->action, params->action, sizeof(params->action)); + fm->stats_mask = params->stats_mask; + + /* Alloc policer counters. */ + for (i = 0; i < RTE_DIM(fm->policer_stats.cnt); i++) { + fm->policer_stats.cnt[i] = mlx5_counter_alloc(dev); + if (!fm->policer_stats.cnt[i]) + goto error; + } + fm->mfts = mlx5_flow_create_mtr_tbls(dev, fm); + if (!fm->mfts) + goto error; + ret = mlx5_flow_create_policer_rules(dev, fm, &attr); + if (ret) + goto error; + /* Add to the flow meter list. */ + TAILQ_INSERT_TAIL(fms, fm, next); + fm->active_state = 1; /* Config meter starts as active. */ + fm->shared = !!shared; + fm->policer_stats.stats_mask = params->stats_mask; + fm->profile->ref_cnt++; + return 0; +error: + mlx5_flow_destroy_policer_rules(dev, fm, &attr); + mlx5_flow_destroy_mtr_tbls(dev, fm->mfts); + /* Free policer counters. */ + for (i = 0; i < RTE_DIM(fm->policer_stats.cnt); i++) + if (fm->policer_stats.cnt[i]) + mlx5_counter_free(dev, fm->policer_stats.cnt[i]); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR], idx); + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to create devx meter."); +} + +/** + * Destroy meter rules. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meters *fms = &priv->flow_meters; + struct mlx5_flow_meter_profile *fmp; + struct mlx5_flow_meter *fm; + const struct rte_flow_attr attr = { + .ingress = 1, + .egress = 1, + .transfer = priv->config.dv_esw_en ? 1 : 0, + }; + unsigned int i; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter object id not valid."); + /* Meter object must not have any owner. */ + if (fm->ref_cnt > 0) + return -rte_mtr_error_set(error, EBUSY, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Meter object is being used."); + /* Get the meter profile. */ + fmp = fm->profile; + MLX5_ASSERT(fmp); + /* Update dependencies. */ + fmp->ref_cnt--; + /* Remove from the flow meter list. */ + TAILQ_REMOVE(fms, fm, next); + /* Free policer counters. */ + for (i = 0; i < RTE_DIM(fm->policer_stats.cnt); i++) + if (fm->policer_stats.cnt[i]) + mlx5_counter_free(dev, fm->policer_stats.cnt[i]); + /* Free meter flow table */ + mlx5_flow_destroy_policer_rules(dev, fm, &attr); + mlx5_flow_destroy_mtr_tbls(dev, fm->mfts); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR], fm->idx); + return 0; +} + +/** + * Modify meter state. + * + * @param[in] priv + * Pointer to mlx5 private data structure. + * @param[in] fm + * Pointer to flow meter. + * @param[in] new_state + * New state to update. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_modify_state(struct mlx5_priv *priv, + struct mlx5_flow_meter *fm, + uint32_t new_state, + struct rte_mtr_error *error) +{ + static const struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm = { + .cbs_exponent = 20, + .cbs_mantissa = 191, + .cir_exponent = 0, + .cir_mantissa = 200, + .ebs_exponent = 0, + .ebs_mantissa = 0, + }; + uint64_t modify_bits = MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS | + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR; + int ret; + + if (new_state == MLX5_FLOW_METER_DISABLE) + ret = mlx5_flow_meter_action_modify(priv, fm, &srtcm, + modify_bits, 0); + else + ret = mlx5_flow_meter_action_modify(priv, fm, + &fm->profile->srtcm_prm, + modify_bits, 0); + if (ret) + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, + new_state ? + "Failed to enable meter." : + "Failed to disable meter."); + return 0; +} + +/** + * Callback to enable flow meter. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_enable(struct rte_eth_dev *dev, + uint32_t meter_id, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter *fm; + int ret; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter not found."); + if (fm->active_state == MLX5_FLOW_METER_ENABLE) + return 0; + ret = mlx5_flow_meter_modify_state(priv, fm, MLX5_FLOW_METER_ENABLE, + error); + if (!ret) + fm->active_state = MLX5_FLOW_METER_ENABLE; + return ret; +} + +/** + * Callback to disable flow meter. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_disable(struct rte_eth_dev *dev, + uint32_t meter_id, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter *fm; + int ret; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter not found."); + if (fm->active_state == MLX5_FLOW_METER_DISABLE) + return 0; + ret = mlx5_flow_meter_modify_state(priv, fm, MLX5_FLOW_METER_DISABLE, + error); + if (!ret) + fm->active_state = MLX5_FLOW_METER_DISABLE; + return ret; +} + +/** + * Callback to update meter profile. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[in] meter_profile_id + * To be updated meter profile id. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, + uint32_t meter_id, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter_profile *fmp; + struct mlx5_flow_meter_profile *old_fmp; + struct mlx5_flow_meter *fm; + uint64_t modify_bits = MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS | + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR; + int ret; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter profile must exist. */ + fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id); + if (fmp == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Meter profile not found."); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter not found."); + /* MTR object already set to meter profile id. */ + old_fmp = fm->profile; + if (fmp == old_fmp) + return 0; + /* Update the profile. */ + fm->profile = fmp; + /* Update meter params in HW (if not disabled). */ + if (fm->active_state == MLX5_FLOW_METER_DISABLE) + return 0; + ret = mlx5_flow_meter_action_modify(priv, fm, &fm->profile->srtcm_prm, + modify_bits, fm->active_state); + if (ret) { + fm->profile = old_fmp; + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, "Failed to update meter" + " parmeters in hardware."); + } + old_fmp->ref_cnt--; + fmp->ref_cnt++; + return 0; +} + +/** + * Callback to update meter stats mask. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[in] stats_mask + * To be updated stats_mask. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_stats_update(struct rte_eth_dev *dev, + uint32_t meter_id, + uint64_t stats_mask, + struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter *fm; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter object id not valid."); + fm->policer_stats.stats_mask = stats_mask; + return 0; +} + +/** + * Callback to read meter statistics. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] meter_id + * Meter id. + * @param[out] stats + * Pointer to store the statistics. + * @param[out] stats_mask + * Pointer to store the stats_mask. + * @param[in] clear + * Statistic to be cleared after read or not. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_meter_stats_read(struct rte_eth_dev *dev, + uint32_t meter_id, + struct rte_mtr_stats *stats, + uint64_t *stats_mask, + int clear, + struct rte_mtr_error *error) +{ + static uint64_t meter2mask[RTE_MTR_DROPPED + 1] = { + RTE_MTR_STATS_N_PKTS_GREEN | RTE_MTR_STATS_N_BYTES_GREEN, + RTE_MTR_STATS_N_PKTS_YELLOW | RTE_MTR_STATS_N_BYTES_YELLOW, + RTE_MTR_STATS_N_PKTS_RED | RTE_MTR_STATS_N_BYTES_RED, + RTE_MTR_STATS_N_PKTS_DROPPED | RTE_MTR_STATS_N_BYTES_DROPPED + }; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meter *fm; + struct mlx5_flow_policer_stats *ps; + uint64_t pkts_dropped = 0; + uint64_t bytes_dropped = 0; + uint64_t pkts; + uint64_t bytes; + int i; + int ret = 0; + + if (!priv->mtr_en) + return -rte_mtr_error_set(error, ENOTSUP, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter is not support"); + /* Meter object must exist. */ + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) + return -rte_mtr_error_set(error, ENOENT, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, "Meter object id not valid."); + ps = &fm->policer_stats; + *stats_mask = ps->stats_mask; + for (i = 0; i < RTE_MTR_DROPPED; i++) { + if (*stats_mask & meter2mask[i]) { + ret = mlx5_counter_query(dev, ps->cnt[i], clear, &pkts, + &bytes); + if (ret) + goto error; + if (fm->action[i] == MTR_POLICER_ACTION_DROP) { + pkts_dropped += pkts; + bytes_dropped += bytes; + } + /* If need to read the packets, set it. */ + if ((1 << i) & (*stats_mask & meter2mask[i])) + stats->n_pkts[i] = pkts; + /* If need to read the bytes, set it. */ + if ((1 << (RTE_MTR_DROPPED + 1 + i)) & + (*stats_mask & meter2mask[i])) + stats->n_bytes[i] = bytes; + } + } + /* Dropped packets/bytes are treated differently. */ + if (*stats_mask & meter2mask[i]) { + ret = mlx5_counter_query(dev, ps->cnt[i], clear, &pkts, + &bytes); + if (ret) + goto error; + pkts += pkts_dropped; + bytes += bytes_dropped; + /* If need to read the packets, set it. */ + if ((*stats_mask & meter2mask[i]) & + RTE_MTR_STATS_N_PKTS_DROPPED) + stats->n_pkts_dropped = pkts; + /* If need to read the bytes, set it. */ + if ((*stats_mask & meter2mask[i]) & + RTE_MTR_STATS_N_BYTES_DROPPED) + stats->n_bytes_dropped = bytes; + } + return 0; +error: + return -rte_mtr_error_set(error, ret, RTE_MTR_ERROR_TYPE_STATS, NULL, + "Failed to read policer counters."); +} + +static const struct rte_mtr_ops mlx5_flow_mtr_ops = { + .capabilities_get = mlx5_flow_mtr_cap_get, + .meter_profile_add = mlx5_flow_meter_profile_add, + .meter_profile_delete = mlx5_flow_meter_profile_delete, + .create = mlx5_flow_meter_create, + .destroy = mlx5_flow_meter_destroy, + .meter_enable = mlx5_flow_meter_enable, + .meter_disable = mlx5_flow_meter_disable, + .meter_profile_update = mlx5_flow_meter_profile_update, + .meter_dscp_table_update = NULL, + .policer_actions_update = NULL, + .stats_update = mlx5_flow_meter_stats_update, + .stats_read = mlx5_flow_meter_stats_read, +}; + +/** + * Get meter operations. + * + * @param dev + * Pointer to Ethernet device structure. + * @param arg + * Pointer to set the mtr operations. + * + * @return + * Always 0. + */ +int +mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) +{ + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; + return 0; +} + +/** + * Find meter by id. + * + * @param priv + * Pointer to mlx5_priv. + * @param meter_id + * Meter id. + * + * @return + * Pointer to the profile found on success, NULL otherwise. + */ +struct mlx5_flow_meter * +mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id) +{ + struct mlx5_flow_meters *fms = &priv->flow_meters; + struct mlx5_flow_meter *fm; + + TAILQ_FOREACH(fm, fms, next) + if (meter_id == fm->meter_id) + return fm; + return NULL; +} + +/** + * Attach meter to flow. + * Unidirectional Meter creation can only be done + * when flow direction is known, i.e. when calling meter_attach. + * + * @param [in] priv + * Pointer to mlx5 private data. + * @param [in] meter_id + * Flow meter id. + * @param [in] attr + * Pointer to flow attributes. + * @param [out] error + * Pointer to error structure. + * + * @return the flow meter pointer, NULL otherwise. + */ +struct mlx5_flow_meter * +mlx5_flow_meter_attach(struct mlx5_priv *priv, uint32_t meter_id, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + struct mlx5_flow_meter *fm; + + fm = mlx5_flow_meter_find(priv, meter_id); + if (fm == NULL) { + rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter object id not valid"); + goto error; + } + if (!fm->shared && fm->ref_cnt) { + DRV_LOG(ERR, "Cannot share a non-shared meter."); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Meter can't be shared"); + goto error; + } + if (!fm->ref_cnt++) { + MLX5_ASSERT(!fm->mfts->meter_action); + fm->ingress = attr->ingress; + fm->egress = attr->egress; + fm->transfer = attr->transfer; + /* This also creates the meter object. */ + fm->mfts->meter_action = mlx5_flow_meter_action_create(priv, + fm); + if (!fm->mfts->meter_action) + goto error_detach; + } else { + MLX5_ASSERT(fm->mfts->meter_action); + if (attr->transfer != fm->transfer || + attr->ingress != fm->ingress || + attr->egress != fm->egress) { + DRV_LOG(ERR, "meter I/O attributes do not " + "match flow I/O attributes."); + goto error_detach; + } + } + return fm; +error_detach: + mlx5_flow_meter_detach(fm); + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + fm->mfts->meter_action ? "Meter attr not match" : + "Meter action create failed"); +error: + return NULL; +} + +/** + * Detach meter from flow. + * + * @param [in] fm + * Pointer to flow meter. + */ +void +mlx5_flow_meter_detach(struct mlx5_flow_meter *fm) +{ + MLX5_ASSERT(fm->ref_cnt); + if (--fm->ref_cnt) + return; + if (fm->mfts->meter_action) + mlx5_glue->destroy_flow_action(fm->mfts->meter_action); + fm->mfts->meter_action = NULL; + fm->ingress = 0; + fm->egress = 0; + fm->transfer = 0; +} + +/** + * Flush meter configuration. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] error + * Pointer to rte meter error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_meters *fms = &priv->flow_meters; + struct mlx5_mtr_profiles *fmps = &priv->flow_meter_profiles; + struct mlx5_flow_meter_profile *fmp; + struct mlx5_flow_meter *fm; + const struct rte_flow_attr attr = { + .ingress = 1, + .egress = 1, + .transfer = priv->config.dv_esw_en ? 1 : 0, + }; + void *tmp; + uint32_t i; + + TAILQ_FOREACH_SAFE(fm, fms, next, tmp) { + /* Meter object must not have any owner. */ + MLX5_ASSERT(!fm->ref_cnt); + /* Get meter profile. */ + fmp = fm->profile; + if (fmp == NULL) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "MTR object meter profile invalid."); + /* Update dependencies. */ + fmp->ref_cnt--; + /* Remove from list. */ + TAILQ_REMOVE(fms, fm, next); + /* Free policer counters. */ + for (i = 0; i < RTE_DIM(fm->policer_stats.cnt); i++) + if (fm->policer_stats.cnt[i]) + mlx5_counter_free(dev, + fm->policer_stats.cnt[i]); + /* Free meter flow table. */ + mlx5_flow_destroy_policer_rules(dev, fm, &attr); + mlx5_flow_destroy_mtr_tbls(dev, fm->mfts); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR], fm->idx); + } + TAILQ_FOREACH_SAFE(fmp, fmps, next, tmp) { + /* Check unused. */ + MLX5_ASSERT(!fmp->ref_cnt); + /* Remove from list. */ + TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next); + rte_free(fmp); + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c new file mode 100644 index 000000000..c266e5683 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c @@ -0,0 +1,1987 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_flow.h" +#include "mlx5_rxtx.h" + +#define VERBS_SPEC_INNER(item_flags) \ + (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0) + +/** + * Get Verbs flow counter by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * mlx5 flow counter index in the container. + * @param[out] ppool + * mlx5 flow counter pool in the container, + * + * @return + * A pointer to the counter, NULL otherwise. + */ +static struct mlx5_flow_counter * +flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, + uint32_t idx, + struct mlx5_flow_counter_pool **ppool) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0); + struct mlx5_flow_counter_pool *pool; + + idx--; + pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) + *ppool = pool; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); +} + +/** + * Create Verbs flow counter with Verbs library. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] counter + * mlx5 flow counter object, contains the counter id, + * handle of created Verbs flow counter is returned + * in cs field (if counters are supported). + * + * @return + * 0 On success else a negative errno value is returned + * and rte_errno is set. + */ +static int +flow_verbs_counter_create(struct rte_eth_dev *dev, + struct mlx5_flow_counter_ext *counter) +{ +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_counter_set_init_attr init = { + .counter_set_id = counter->id}; + + counter->cs = mlx5_glue->create_counter_set(ctx, &init); + if (!counter->cs) { + rte_errno = ENOTSUP; + return -ENOTSUP; + } + return 0; +#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_counters_init_attr init = {0}; + struct ibv_counter_attach_attr attach; + int ret; + + memset(&attach, 0, sizeof(attach)); + counter->cs = mlx5_glue->create_counters(ctx, &init); + if (!counter->cs) { + rte_errno = ENOTSUP; + return -ENOTSUP; + } + attach.counter_desc = IBV_COUNTER_PACKETS; + attach.index = 0; + ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL); + if (!ret) { + attach.counter_desc = IBV_COUNTER_BYTES; + attach.index = 1; + ret = mlx5_glue->attach_counters + (counter->cs, &attach, NULL); + } + if (ret) { + claim_zero(mlx5_glue->destroy_counters(counter->cs)); + counter->cs = NULL; + rte_errno = ret; + return -ret; + } + return 0; +#else + (void)dev; + (void)counter; + rte_errno = ENOTSUP; + return -ENOTSUP; +#endif +} + +/** + * Get a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] shared + * Indicate if this counter is shared with other flows. + * @param[in] id + * Counter identifier. + * + * @return + * Index to the counter, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0); + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter_ext *cnt_ext = NULL; + struct mlx5_flow_counter *cnt = NULL; + uint32_t n_valid = rte_atomic16_read(&cont->n_valid); + uint32_t pool_idx; + uint32_t i; + int ret; + + if (shared) { + for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { + pool = cont->pools[pool_idx]; + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); + if (cnt_ext->shared && cnt_ext->id == id) { + cnt_ext->ref_cnt++; + return MLX5_MAKE_CNT_IDX(pool_idx, i); + } + } + } + } + for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { + pool = cont->pools[pool_idx]; + if (!pool) + continue; + cnt = TAILQ_FIRST(&pool->counters); + if (cnt) + break; + } + if (!cnt) { + struct mlx5_flow_counter_pool **pools; + uint32_t size; + + if (n_valid == cont->n) { + /* Resize the container pool array. */ + size = sizeof(struct mlx5_flow_counter_pool *) * + (n_valid + MLX5_CNT_CONTAINER_RESIZE); + pools = rte_zmalloc(__func__, size, 0); + if (!pools) + return 0; + if (n_valid) { + memcpy(pools, cont->pools, + sizeof(struct mlx5_flow_counter_pool *) * + n_valid); + rte_free(cont->pools); + } + cont->pools = pools; + cont->n += MLX5_CNT_CONTAINER_RESIZE; + } + /* Allocate memory for new pool*/ + size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) * + MLX5_COUNTERS_PER_POOL; + pool = rte_calloc(__func__, 1, size, 0); + if (!pool) + return 0; + pool->type |= CNT_POOL_TYPE_EXT; + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + } + cnt = MLX5_POOL_GET_CNT(pool, 0); + cont->pools[n_valid] = pool; + pool_idx = n_valid; + rte_atomic16_add(&cont->n_valid, 1); + TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); + } + i = MLX5_CNT_ARRAY_IDX(pool, cnt); + cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i); + cnt_ext->id = id; + cnt_ext->shared = shared; + cnt_ext->ref_cnt = 1; + cnt->hits = 0; + cnt->bytes = 0; + /* Create counter with Verbs. */ + ret = flow_verbs_counter_create(dev, cnt_ext); + if (!ret) { + TAILQ_REMOVE(&pool->counters, cnt, next); + return MLX5_MAKE_CNT_IDX(pool_idx, i); + } + /* Some error occurred in Verbs library. */ + rte_errno = -ret; + return 0; +} + +/** + * Release a flow counter. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] counter + * Index to the counter handler. + */ +static void +flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) +{ + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_ext *cnt_ext; + + cnt = flow_verbs_counter_get_by_idx(dev, counter, + &pool); + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + if (--cnt_ext->ref_cnt == 0) { +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) + claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs)); + cnt_ext->cs = NULL; +#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs)); + cnt_ext->cs = NULL; +#endif + TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + } +} + +/** + * Query a flow counter via Verbs library call. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow, void *data, + struct rte_flow_error *error) +{ +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ + defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + if (flow->counter) { + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx + (dev, flow->counter, &pool); + struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT + (pool, cnt); + struct rte_flow_query_count *qc = data; + uint64_t counters[2] = {0, 0}; +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) + struct ibv_query_counter_set_attr query_cs_attr = { + .cs = cnt_ext->cs, + .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, + }; + struct ibv_counter_set_data query_out = { + .out = counters, + .outlen = 2 * sizeof(uint64_t), + }; + int err = mlx5_glue->query_counter_set(&query_cs_attr, + &query_out); +#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + int err = mlx5_glue->query_counters + (cnt_ext->cs, counters, + RTE_DIM(counters), + IBV_READ_COUNTERS_ATTR_PREFER_CACHED); +#endif + if (err) + return rte_flow_error_set + (error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); + qc->hits_set = 1; + qc->bytes_set = 1; + qc->hits = counters[0] - cnt->hits; + qc->bytes = counters[1] - cnt->bytes; + if (qc->reset) { + cnt->hits = counters[0]; + cnt->bytes = counters[1]; + } + return 0; + } + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow does not have counter"); +#else + (void)flow; + (void)data; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "counters are not available"); +#endif +} + +/** + * Add a verbs item specification into @p verbs. + * + * @param[out] verbs + * Pointer to verbs structure. + * @param[in] src + * Create specification. + * @param[in] size + * Size in bytes of the specification to copy. + */ +static void +flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs, + void *src, unsigned int size) +{ + void *dst; + + if (!verbs) + return; + MLX5_ASSERT(verbs->specs); + dst = (void *)(verbs->specs + verbs->size); + memcpy(dst, src, size); + ++verbs->attr.num_of_specs; + verbs->size += size; +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags) +{ + const struct rte_flow_item_eth *spec = item->spec; + const struct rte_flow_item_eth *mask = item->mask; + const unsigned int size = sizeof(struct ibv_flow_spec_eth); + struct ibv_flow_spec_eth eth = { + .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_eth_mask; + if (spec) { + unsigned int i; + + memcpy(ð.val.dst_mac, spec->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(ð.val.src_mac, spec->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + eth.val.ether_type = spec->type; + memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(ð.mask.src_mac, mask->src.addr_bytes, + RTE_ETHER_ADDR_LEN); + eth.mask.ether_type = mask->type; + /* Remove unwanted bits from values. */ + for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) { + eth.val.dst_mac[i] &= eth.mask.dst_mac[i]; + eth.val.src_mac[i] &= eth.mask.src_mac[i]; + } + eth.val.ether_type &= eth.mask.ether_type; + } + flow_verbs_spec_add(&dev_flow->verbs, ð, size); +} + +/** + * Update the VLAN tag in the Verbs Ethernet specification. + * This function assumes that the input is valid and there is space to add + * the requested item. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] eth + * Verbs structure containing the VLAN information to copy. + */ +static void +flow_verbs_item_vlan_update(struct ibv_flow_attr *attr, + struct ibv_flow_spec_eth *eth) +{ + unsigned int i; + const enum ibv_flow_spec_type search = eth->type; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + struct ibv_flow_spec_eth *e = + (struct ibv_flow_spec_eth *)hdr; + + e->val.vlan_tag = eth->val.vlan_tag; + e->mask.vlan_tag = eth->mask.vlan_tag; + e->val.ether_type = eth->val.ether_type; + e->mask.ether_type = eth->mask.ether_type; + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags) +{ + const struct rte_flow_item_vlan *spec = item->spec; + const struct rte_flow_item_vlan *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_eth); + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + struct ibv_flow_spec_eth eth = { + .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + + if (!mask) + mask = &rte_flow_item_vlan_mask; + if (spec) { + eth.val.vlan_tag = spec->tci; + eth.mask.vlan_tag = mask->tci; + eth.val.vlan_tag &= eth.mask.vlan_tag; + eth.val.ether_type = spec->inner_type; + eth.mask.ether_type = mask->inner_type; + eth.val.ether_type &= eth.mask.ether_type; + } + if (!(item_flags & l2m)) + flow_verbs_spec_add(&dev_flow->verbs, ð, size); + else + flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð); + if (!tunnel) + dev_flow->handle->vf_vlan.tag = + rte_be_to_cpu_16(spec->tci) & 0x0fff; +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags) +{ + const struct rte_flow_item_ipv4 *spec = item->spec; + const struct rte_flow_item_ipv4 *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext); + struct ibv_flow_spec_ipv4_ext ipv4 = { + .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_ipv4_mask; + if (spec) { + ipv4.val = (struct ibv_flow_ipv4_ext_filter){ + .src_ip = spec->hdr.src_addr, + .dst_ip = spec->hdr.dst_addr, + .proto = spec->hdr.next_proto_id, + .tos = spec->hdr.type_of_service, + }; + ipv4.mask = (struct ibv_flow_ipv4_ext_filter){ + .src_ip = mask->hdr.src_addr, + .dst_ip = mask->hdr.dst_addr, + .proto = mask->hdr.next_proto_id, + .tos = mask->hdr.type_of_service, + }; + /* Remove unwanted bits from values. */ + ipv4.val.src_ip &= ipv4.mask.src_ip; + ipv4.val.dst_ip &= ipv4.mask.dst_ip; + ipv4.val.proto &= ipv4.mask.proto; + ipv4.val.tos &= ipv4.mask.tos; + } + flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size); +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags) +{ + const struct rte_flow_item_ipv6 *spec = item->spec; + const struct rte_flow_item_ipv6 *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_ipv6); + struct ibv_flow_spec_ipv6 ipv6 = { + .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_ipv6_mask; + if (spec) { + unsigned int i; + uint32_t vtc_flow_val; + uint32_t vtc_flow_mask; + + memcpy(&ipv6.val.src_ip, spec->hdr.src_addr, + RTE_DIM(ipv6.val.src_ip)); + memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr, + RTE_DIM(ipv6.val.dst_ip)); + memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr, + RTE_DIM(ipv6.mask.src_ip)); + memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr, + RTE_DIM(ipv6.mask.dst_ip)); + vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); + vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); + ipv6.val.flow_label = + rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >> + RTE_IPV6_HDR_FL_SHIFT); + ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; + ipv6.val.next_hdr = spec->hdr.proto; + ipv6.mask.flow_label = + rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >> + RTE_IPV6_HDR_FL_SHIFT); + ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; + ipv6.mask.next_hdr = mask->hdr.proto; + /* Remove unwanted bits from values. */ + for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { + ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; + ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i]; + } + ipv6.val.flow_label &= ipv6.mask.flow_label; + ipv6.val.traffic_class &= ipv6.mask.traffic_class; + ipv6.val.next_hdr &= ipv6.mask.next_hdr; + } + flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size); +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags __rte_unused) +{ + const struct rte_flow_item_tcp *spec = item->spec; + const struct rte_flow_item_tcp *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); + struct ibv_flow_spec_tcp_udp tcp = { + .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_tcp_mask; + if (spec) { + tcp.val.dst_port = spec->hdr.dst_port; + tcp.val.src_port = spec->hdr.src_port; + tcp.mask.dst_port = mask->hdr.dst_port; + tcp.mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + tcp.val.src_port &= tcp.mask.src_port; + tcp.val.dst_port &= tcp.mask.dst_port; + } + flow_verbs_spec_add(&dev_flow->verbs, &tcp, size); +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags __rte_unused) +{ + const struct rte_flow_item_udp *spec = item->spec; + const struct rte_flow_item_udp *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); + struct ibv_flow_spec_tcp_udp udp = { + .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags), + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_udp_mask; + if (spec) { + udp.val.dst_port = spec->hdr.dst_port; + udp.val.src_port = spec->hdr.src_port; + udp.mask.dst_port = mask->hdr.dst_port; + udp.mask.src_port = mask->hdr.src_port; + /* Remove unwanted bits from values. */ + udp.val.src_port &= udp.mask.src_port; + udp.val.dst_port &= udp.mask.dst_port; + } + item++; + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) + item++; + if (!(udp.val.dst_port & udp.mask.dst_port)) { + switch ((item)->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS); + udp.mask.dst_port = 0xffff; + break; + default: + break; + } + } + + flow_verbs_spec_add(&dev_flow->verbs, &udp, size); +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags __rte_unused) +{ + const struct rte_flow_item_vxlan *spec = item->spec; + const struct rte_flow_item_vxlan *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!mask) + mask = &rte_flow_item_vxlan_mask; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + } + flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size); +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item, + uint64_t item_flags __rte_unused) +{ + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan_gpe = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id = { .vlan_id = 0, }; + + if (!mask) + mask = &rte_flow_item_vxlan_gpe_mask; + if (spec) { + memcpy(&id.vni[1], spec->vni, 3); + vxlan_gpe.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan_gpe.mask.tunnel_id = id.vlan_id; + /* Remove unwanted bits from values. */ + vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id; + } + flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size); +} + +/** + * Update the protocol in Verbs IPv4/IPv6 spec. + * + * @param[in, out] attr + * Pointer to Verbs attributes structure. + * @param[in] search + * Specification type to search in order to update the IP protocol. + * @param[in] protocol + * Protocol value to set if none is present in the specification. + */ +static void +flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, + enum ibv_flow_spec_type search, + uint8_t protocol) +{ + unsigned int i; + struct ibv_spec_header *hdr = (struct ibv_spec_header *) + ((uint8_t *)attr + sizeof(struct ibv_flow_attr)); + + if (!attr) + return; + for (i = 0; i != attr->num_of_specs; ++i) { + if (hdr->type == search) { + union { + struct ibv_flow_spec_ipv4_ext *ipv4; + struct ibv_flow_spec_ipv6 *ipv6; + } ip; + + switch (search) { + case IBV_FLOW_SPEC_IPV4_EXT: + ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr; + if (!ip.ipv4->val.proto) { + ip.ipv4->val.proto = protocol; + ip.ipv4->mask.proto = 0xff; + } + break; + case IBV_FLOW_SPEC_IPV6: + ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr; + if (!ip.ipv6->val.next_hdr) { + ip.ipv6->val.next_hdr = protocol; + ip.ipv6->mask.next_hdr = 0xff; + } + break; + default: + break; + } + break; + } + hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size); + } +} + +/** + * Convert the @p item into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested item + * into the flow. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, + const struct rte_flow_item *item __rte_unused, + uint64_t item_flags) +{ + struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs; +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#else + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = IBV_FLOW_SPEC_GRE, + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_gre_mask; + if (spec) { + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#endif + if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + flow_verbs_item_gre_ip_protocol_update(&verbs->attr, + IBV_FLOW_SPEC_IPV4_EXT, + IPPROTO_GRE); + else + flow_verbs_item_gre_ip_protocol_update(&verbs->attr, + IBV_FLOW_SPEC_IPV6, + IPPROTO_GRE); + flow_verbs_spec_add(verbs, &tunnel, size); +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. This function also return the action that was added. + * + * @param[in, out] dev_flow + * Pointer to dev_flow structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Parsed item flags. + */ +static void +flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused, + const struct rte_flow_item *item __rte_unused, + uint64_t item_flags __rte_unused) +{ +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_mpls); + struct ibv_flow_spec_mpls mpls = { + .type = IBV_FLOW_SPEC_MPLS, + .size = size, + }; + + if (!mask) + mask = &rte_flow_item_mpls_mask; + if (spec) { + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label)); + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label)); + /* Remove unwanted bits from values. */ + mpls.val.label &= mpls.mask.label; + } + flow_verbs_spec_add(&dev_flow->verbs, &mpls, size); +#endif +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] dev_flow + * Pointer to mlx5_flow. + * @param[in] action + * Action configuration. + */ +static void +flow_verbs_translate_action_drop + (struct mlx5_flow *dev_flow, + const struct rte_flow_action *action __rte_unused) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_drop); + struct ibv_flow_spec_action_drop drop = { + .type = IBV_FLOW_SPEC_ACTION_DROP, + .size = size, + }; + + flow_verbs_spec_add(&dev_flow->verbs, &drop, size); +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. + * @param[in] action + * Action configuration. + */ +static void +flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc, + const struct rte_flow_action *action) +{ + const struct rte_flow_action_queue *queue = action->conf; + + rss_desc->queue[0] = queue->index; + rss_desc->queue_num = 1; +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. + * @param[in] action + * Action configuration. + */ +static void +flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc, + const struct rte_flow_action *action) +{ + const struct rte_flow_action_rss *rss = action->conf; + const uint8_t *rss_key; + + memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t)); + rss_desc->queue_num = rss->queue_num; + /* NULL RSS key indicates default RSS key. */ + rss_key = !rss->key ? rss_hash_default_key : rss->key; + memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance when expanding + * items for RSS. + */ +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] dev_flow + * Pointer to mlx5_flow. + * @param[in] action + * Action configuration. + */ +static void +flow_verbs_translate_action_flag + (struct mlx5_flow *dev_flow, + const struct rte_flow_action *action __rte_unused) +{ + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT), + }; + + flow_verbs_spec_add(&dev_flow->verbs, &tag, size); +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] dev_flow + * Pointer to mlx5_flow. + * @param[in] action + * Action configuration. + */ +static void +flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow, + const struct rte_flow_action *action) +{ + const struct rte_flow_action_mark *mark = action->conf; + unsigned int size = sizeof(struct ibv_flow_spec_action_tag); + struct ibv_flow_spec_action_tag tag = { + .type = IBV_FLOW_SPEC_ACTION_TAG, + .size = size, + .tag_id = mlx5_flow_mark_set(mark->id), + }; + + flow_verbs_spec_add(&dev_flow->verbs, &tag, size); +} + +/** + * Convert the @p action into a Verbs specification. This function assumes that + * the input is valid and that there is space to insert the requested action + * into the flow. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] action + * Action configuration. + * @param[in] dev_flow + * Pointer to mlx5_flow. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 On success else a negative errno value is returned and rte_errno is set. + */ +static int +flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + const struct rte_flow_action_count *count = action->conf; + struct rte_flow *flow = dev_flow->flow; +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ + defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt = NULL; + struct mlx5_flow_counter_ext *cnt_ext; + unsigned int size = sizeof(struct ibv_flow_spec_counter_action); + struct ibv_flow_spec_counter_action counter = { + .type = IBV_FLOW_SPEC_ACTION_COUNT, + .size = size, + }; +#endif + + if (!flow->counter) { + flow->counter = flow_verbs_counter_new(dev, count->shared, + count->id); + if (!flow->counter) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "cannot get counter" + " context."); + } +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + counter.counter_set_handle = cnt_ext->cs->handle; + flow_verbs_spec_add(&dev_flow->verbs, &counter, size); +#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); + counter.counters = cnt_ext->cs; + flow_verbs_spec_add(&dev_flow->verbs, &counter, size); +#endif + return 0; +} + +/** + * Internal validation function. For validating both actions and items. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_verbs_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external __rte_unused, + int hairpin __rte_unused, + struct rte_flow_error *error) +{ + int ret; + uint64_t action_flags = 0; + uint64_t item_flags = 0; + uint64_t last_item = 0; + uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; + + if (items == NULL) + return -1; + ret = mlx5_flow_validate_attributes(dev, attr, error); + if (ret < 0) + return ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int ret = 0; + + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + ret = mlx5_flow_validate_item_eth(items, item_flags, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + ret = mlx5_flow_validate_item_vlan(items, item_flags, + dev, error); + if (ret < 0) + return ret; + last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ret = mlx5_flow_validate_item_ipv4(items, item_flags, + last_item, + ether_type, NULL, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv4 *) + items->mask)->hdr.next_proto_id) { + next_protocol = + ((const struct rte_flow_item_ipv4 *) + (items->spec))->hdr.next_proto_id; + next_protocol &= + ((const struct rte_flow_item_ipv4 *) + (items->mask))->hdr.next_proto_id; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ret = mlx5_flow_validate_item_ipv6(items, item_flags, + last_item, + ether_type, NULL, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + if (items->mask != NULL && + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto) { + next_protocol = + ((const struct rte_flow_item_ipv6 *) + items->spec)->hdr.proto; + next_protocol &= + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flow_validate_item_udp(items, item_flags, + next_protocol, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_validate_item_tcp + (items, item_flags, + next_protocol, + &rte_flow_item_tcp_mask, + error); + if (ret < 0) + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = mlx5_flow_validate_item_vxlan(items, item_flags, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + ret = mlx5_flow_validate_item_vxlan_gpe(items, + item_flags, + dev, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + ret = mlx5_flow_validate_item_gre(items, item_flags, + next_protocol, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5_flow_validate_item_mpls(dev, items, + item_flags, + last_item, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_MPLS; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); + } + item_flags |= last_item; + } + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + ret = mlx5_flow_validate_action_flag(action_flags, + attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_FLAG; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = mlx5_flow_validate_action_mark(actions, + action_flags, + attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_validate_action_drop(action_flags, + attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(actions, + action_flags, dev, + attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = mlx5_flow_validate_action_rss(actions, + action_flags, dev, + attr, item_flags, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_RSS; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = mlx5_flow_validate_action_count(dev, attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + /* + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for + * Count action. + */ + if ((action_flags & MLX5_FLOW_ACTION_DROP) && + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Drop action is mutually-exclusive " + "with any other action, except for " + "Count action"); + if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "no fate action is found"); + return 0; +} + +/** + * Calculate the required bytes that are needed for the action part of the verbs + * flow. + * + * @param[in] actions + * Pointer to the list of actions. + * + * @return + * The size of the memory needed for all actions. + */ +static int +flow_verbs_get_actions_size(const struct rte_flow_action actions[]) +{ + int size = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + size += sizeof(struct ibv_flow_spec_action_tag); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + size += sizeof(struct ibv_flow_spec_action_tag); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + size += sizeof(struct ibv_flow_spec_action_drop); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + break; + case RTE_FLOW_ACTION_TYPE_RSS: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: +#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ + defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + size += sizeof(struct ibv_flow_spec_counter_action); +#endif + break; + default: + break; + } + } + return size; +} + +/** + * Calculate the required bytes that are needed for the item part of the verbs + * flow. + * + * @param[in] items + * Pointer to the list of items. + * + * @return + * The size of the memory needed for all items. + */ +static int +flow_verbs_get_items_size(const struct rte_flow_item items[]) +{ + int size = 0; + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + size += sizeof(struct ibv_flow_spec_eth); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + size += sizeof(struct ibv_flow_spec_eth); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + size += sizeof(struct ibv_flow_spec_ipv4_ext); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + size += sizeof(struct ibv_flow_spec_ipv6); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + size += sizeof(struct ibv_flow_spec_tcp_udp); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + size += sizeof(struct ibv_flow_spec_tcp_udp); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + size += sizeof(struct ibv_flow_spec_tunnel); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + size += sizeof(struct ibv_flow_spec_tunnel); + break; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + case RTE_FLOW_ITEM_TYPE_GRE: + size += sizeof(struct ibv_flow_spec_gre); + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + size += sizeof(struct ibv_flow_spec_mpls); + break; +#else + case RTE_FLOW_ITEM_TYPE_GRE: + size += sizeof(struct ibv_flow_spec_tunnel); + break; +#endif + default: + break; + } + } + return size; +} + +/** + * Internal preparation function. Allocate mlx5_flow with the required size. + * The required size is calculate based on the actions and items. This function + * also returns the detected actions and items for later use. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno + * is set. + */ +static struct mlx5_flow * +flow_verbs_prepare(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + size_t size = 0; + uint32_t handle_idx = 0; + struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + + size += flow_verbs_get_actions_size(actions); + size += flow_verbs_get_items_size(items); + if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Verbs spec/action size too large"); + return NULL; + } + /* In case of corrupting the memory. */ + if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not free temporary device flow"); + return NULL; + } + dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + &handle_idx); + if (!dev_handle) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not enough memory to create flow handle"); + return NULL; + } + /* No multi-thread supporting. */ + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; + dev_flow->handle = dev_handle; + dev_flow->handle_idx = handle_idx; + /* Memcpy is used, only size needs to be cleared to 0. */ + dev_flow->verbs.size = 0; + dev_flow->verbs.attr.num_of_specs = 0; + dev_flow->ingress = attr->ingress; + dev_flow->hash_fields = 0; + /* Need to set transfer attribute: not supported in Verbs mode. */ + return dev_flow; +} + +/** + * Fill the flow with verb spec. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] dev_flow + * Pointer to the mlx5 flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Pointer to the error structure. + * + * @return + * 0 on success, else a negative errno value otherwise and rte_errno is set. + */ +static int +flow_verbs_translate(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + uint64_t item_flags = 0; + uint64_t action_flags = 0; + uint64_t priority = attr->priority; + uint32_t subpriority = 0; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *) + priv->rss_desc) + [!!priv->flow_nested_idx]; + + if (priority == MLX5_FLOW_PRIO_RSVD) + priority = priv->config.flow_prio - 1; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + int ret; + + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + flow_verbs_translate_action_flag(dev_flow, actions); + action_flags |= MLX5_FLOW_ACTION_FLAG; + dev_flow->handle->mark = 1; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + flow_verbs_translate_action_mark(dev_flow, actions); + action_flags |= MLX5_FLOW_ACTION_MARK; + dev_flow->handle->mark = 1; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + flow_verbs_translate_action_drop(dev_flow, actions); + action_flags |= MLX5_FLOW_ACTION_DROP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + flow_verbs_translate_action_queue(rss_desc, actions); + action_flags |= MLX5_FLOW_ACTION_QUEUE; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + flow_verbs_translate_action_rss(rss_desc, actions); + action_flags |= MLX5_FLOW_ACTION_RSS; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_verbs_translate_action_count(dev_flow, + actions, + dev, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + dev_flow->act_flags = action_flags; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + break; + case RTE_FLOW_ITEM_TYPE_ETH: + flow_verbs_translate_item_eth(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + flow_verbs_translate_item_vlan(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + flow_verbs_translate_item_ipv4(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L3; + dev_flow->hash_fields |= + mlx5_flow_hashfields_adjust + (rss_desc, tunnel, + MLX5_IPV4_LAYER_TYPES, + MLX5_IPV4_IBV_RX_HASH); + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + flow_verbs_translate_item_ipv6(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L3; + dev_flow->hash_fields |= + mlx5_flow_hashfields_adjust + (rss_desc, tunnel, + MLX5_IPV6_LAYER_TYPES, + MLX5_IPV6_IBV_RX_HASH); + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + flow_verbs_translate_item_tcp(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L4; + dev_flow->hash_fields |= + mlx5_flow_hashfields_adjust + (rss_desc, tunnel, ETH_RSS_TCP, + (IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP)); + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + flow_verbs_translate_item_udp(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L4; + dev_flow->hash_fields |= + mlx5_flow_hashfields_adjust + (rss_desc, tunnel, ETH_RSS_UDP, + (IBV_RX_HASH_SRC_PORT_UDP | + IBV_RX_HASH_DST_PORT_UDP)); + item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + flow_verbs_translate_item_vxlan(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= MLX5_FLOW_LAYER_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + flow_verbs_translate_item_vxlan_gpe(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + flow_verbs_translate_item_gre(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= MLX5_FLOW_LAYER_GRE; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + flow_verbs_translate_item_mpls(dev_flow, items, + item_flags); + subpriority = MLX5_PRIORITY_MAP_L2; + item_flags |= MLX5_FLOW_LAYER_MPLS; + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "item not supported"); + } + } + dev_flow->handle->layers = item_flags; + /* Other members of attr will be ignored. */ + dev_flow->verbs.attr.priority = + mlx5_flow_adjust_priority(dev, priority, subpriority); + dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port; + return 0; +} + +/** + * Remove the flow from the NIC but keeps it in memory. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; + uint32_t handle_idx; + + if (!flow) + return; + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, handle, next) { + if (handle->ib_flow) { + claim_zero(mlx5_glue->destroy_flow(handle->ib_flow)); + handle->ib_flow = NULL; + } + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq) { + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { + mlx5_hrxq_drop_release(dev); + handle->rix_hrxq = 0; + } else if (handle->fate_action == + MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; + } + } + if (handle->vf_vlan.tag && handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); + } +} + +/** + * Remove the flow from the NIC and the memory. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; + + if (!flow) + return; + flow_verbs_remove(dev, flow); + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + if (!handle) + return; + flow->dev_handles = handle->next.next; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + } + if (flow->counter) { + flow_verbs_counter_release(dev, flow->counter); + flow->counter = 0; + } +} + +/** + * Apply the flow to the NIC. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; + struct mlx5_flow *dev_flow; + struct mlx5_hrxq *hrxq; + uint32_t dev_handles; + int err; + int idx; + + for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) { + dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx]; + handle = dev_flow->handle; + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { + hrxq = mlx5_hrxq_drop_new(dev); + if (!hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get drop hash queue"); + goto error; + } + } else { + uint32_t hrxq_idx; + struct mlx5_flow_rss_desc *rss_desc = + &((struct mlx5_flow_rss_desc *)priv->rss_desc) + [!!priv->flow_nested_idx]; + + MLX5_ASSERT(rss_desc->queue_num); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num); + if (!hrxq_idx) + hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num, + !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL)); + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + handle->rix_hrxq = hrxq_idx; + } + MLX5_ASSERT(hrxq); + handle->ib_flow = mlx5_glue->create_flow(hrxq->qp, + &dev_flow->verbs.attr); + if (!handle->ib_flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); + goto error; + } + if (priv->vmwa_context && + handle->vf_vlan.tag && !handle->vf_vlan.created) { + /* + * The rule contains the VLAN pattern. + * For VF we are going to create VLAN + * interface to make hypervisor set correct + * e-Switch vport context. + */ + mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan); + } + } + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + dev_handles, handle, next) { + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq) { + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { + mlx5_hrxq_drop_release(dev); + handle->rix_hrxq = 0; + } else if (handle->fate_action == + MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; + } + } + if (handle->vf_vlan.tag && handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); + } + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_verbs_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = -EINVAL; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_verbs_counter_query(dev, flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + } + return ret; +} + +const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = { + .validate = flow_verbs_validate, + .prepare = flow_verbs_prepare, + .translate = flow_verbs_translate, + .apply = flow_verbs_apply, + .remove = flow_verbs_remove, + .destroy = flow_verbs_destroy, + .query = flow_verbs_query, +}; diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c new file mode 100644 index 000000000..291f7724c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mac.c @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" + +/** + * Get MAC address by querying netdevice. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[out] mac + * MAC address output buffer. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) +{ + struct ifreq request; + int ret; + + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); + if (ret) + return ret; + memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); + return 0; +} + +/** + * Remove a MAC address from the internal array. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; + + MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES); + if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index])) + return; + if (vf) + mlx5_nl_mac_addr_remove(priv->nl_socket_route, + mlx5_ifindex(dev), priv->mac_own, + &dev->data->mac_addrs[index], index); + memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); +} + +/** + * Adds a MAC address to the internal array. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, + uint32_t index) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; + unsigned int i; + + MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES); + if (rte_is_zero_ether_addr(mac)) { + rte_errno = EINVAL; + return -rte_errno; + } + /* First, make sure this address isn't already configured. */ + for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { + /* Skip this index, it's going to be reconfigured. */ + if (i == index) + continue; + if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) + continue; + /* Address already configured elsewhere, return with error. */ + rte_errno = EADDRINUSE; + return -rte_errno; + } + if (vf) { + int ret = mlx5_nl_mac_addr_add(priv->nl_socket_route, + mlx5_ifindex(dev), priv->mac_own, + mac, index); + + if (ret) + return ret; + } + dev->data->mac_addrs[index] = *mac; + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +void +mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) + return; + mlx5_internal_mac_addr_remove(dev, index); + if (!dev->data->promiscuous) { + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot restart traffic: %s", + dev->data->port_id, strerror(rte_errno)); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, + uint32_t index, uint32_t vmdq __rte_unused) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_internal_mac_addr_add(dev, mac, index); + if (ret < 0) + return ret; + if (!dev->data->promiscuous) + return mlx5_traffic_restart(dev); + return 0; +} + +/** + * DPDK callback to set primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + uint16_t port_id; + struct mlx5_priv *priv = dev->data->dev_private; + + /* Configuring the VF instead of its representor. */ + if (priv->representor) { + DRV_LOG(DEBUG, "VF represented by port %u setting primary MAC address", + dev->data->port_id); + RTE_ETH_FOREACH_DEV_SIBLING(port_id, dev->data->port_id) { + priv = rte_eth_devices[port_id].data->dev_private; + if (priv->master == 1) { + priv = dev->data->dev_private; + return mlx5_nl_vf_mac_addr_modify + (priv->nl_socket_route, + mlx5_ifindex(&rte_eth_devices[port_id]), + mac_addr, priv->representor_id); + } + } + rte_errno = -ENOTSUP; + return rte_errno; + } + + DRV_LOG(DEBUG, "port %u setting primary MAC address", + dev->data->port_id); + return mlx5_mac_addr_add(dev, mac_addr, 0, 0); +} + +/** + * DPDK callback to set multicast addresses list. + * + * @see rte_eth_dev_set_mc_addr_list() + */ +int +mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) +{ + uint32_t i; + int ret; + + if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) { + rte_errno = ENOSPC; + return -rte_errno; + } + for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i) + mlx5_internal_mac_addr_remove(dev, i); + i = MLX5_MAX_UC_MAC_ADDRESSES; + while (nb_mc_addr--) { + ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++); + if (ret) + return ret; + } + if (!dev->data->promiscuous) + return mlx5_traffic_restart(dev); + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mp.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mp.c new file mode 100644 index 000000000..7ad322d47 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mp.c @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 6WIND S.A. + * Copyright 2019 Mellanox Technologies, Ltd + */ + +#include +#include + +#include +#include +#include + +#include +#include + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +int +mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_mp_msg mp_res; + struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param; + const struct mlx5_mp_param *param = + (const struct mlx5_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + struct mr_cache_entry entry; + uint32_t lkey; + int ret; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (!rte_eth_dev_is_valid_port(param->port_id)) { + rte_errno = ENODEV; + DRV_LOG(ERR, "port %u invalid port ID", param->port_id); + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; + priv = dev->data->dev_private; + switch (param->type) { + case MLX5_MP_REQ_CREATE_MR: + mp_init_msg(&priv->mp_id, &mp_res, param->type); + lkey = mlx5_mr_create_primary(priv->sh->pd, + &priv->sh->share_cache, + &entry, param->args.addr, + priv->config.mr_ext_memseg_en); + if (lkey == UINT32_MAX) + res->result = -rte_errno; + ret = rte_mp_reply(&mp_res, peer); + break; + case MLX5_MP_REQ_VERBS_CMD_FD: + mp_init_msg(&priv->mp_id, &mp_res, param->type); + mp_res.num_fds = 1; + mp_res.fds[0] = priv->sh->ctx->cmd_fd; + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + case MLX5_MP_REQ_QUEUE_STATE_MODIFY: + mp_init_msg(&priv->mp_id, &mp_res, param->type); + res->result = mlx5_queue_state_modify_primary + (dev, ¶m->args.state_modify); + ret = rte_mp_reply(&mp_res, peer); + break; + default: + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u invalid mp request type", + dev->data->port_id); + return -rte_errno; + } + return ret; +} + +/** + * IPC message handler of a secondary process. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] peer + * Pointer to the peer socket path. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +{ + struct rte_mp_msg mp_res; + struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param; + const struct mlx5_mp_param *param = + (const struct mlx5_mp_param *)mp_msg->param; + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + int ret; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + if (!rte_eth_dev_is_valid_port(param->port_id)) { + rte_errno = ENODEV; + DRV_LOG(ERR, "port %u invalid port ID", param->port_id); + return -rte_errno; + } + dev = &rte_eth_devices[param->port_id]; + priv = dev->data->dev_private; + switch (param->type) { + case MLX5_MP_REQ_START_RXTX: + DRV_LOG(INFO, "port %u starting datapath", dev->data->port_id); + rte_mb(); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + mp_init_msg(&priv->mp_id, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + case MLX5_MP_REQ_STOP_RXTX: + DRV_LOG(INFO, "port %u stopping datapath", dev->data->port_id); + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + rte_mb(); + mp_init_msg(&priv->mp_id, &mp_res, param->type); + res->result = 0; + ret = rte_mp_reply(&mp_res, peer); + break; + default: + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u invalid mp request type", + dev->data->port_id); + return -rte_errno; + } + return ret; +} + +/** + * Broadcast request of stopping/starting data-path to secondary processes. + * + * @param[in] dev + * Pointer to Ethernet structure. + * @param[in] type + * Request type. + */ +static void +mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type) +{ + struct rte_mp_msg mp_req; + struct rte_mp_msg *mp_res; + struct rte_mp_reply mp_rep; + struct mlx5_mp_param *res; + struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0}; + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + int i; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (!mlx5_shared_data->secondary_cnt) + return; + if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) { + DRV_LOG(ERR, "port %u unknown request (req_type %d)", + dev->data->port_id, type); + return; + } + mp_init_msg(&priv->mp_id, &mp_req, type); + ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); + if (ret) { + if (rte_errno != ENOTSUP) + DRV_LOG(ERR, "port %u failed to request stop/start Rx/Tx (%d)", + dev->data->port_id, type); + goto exit; + } + if (mp_rep.nb_sent != mp_rep.nb_received) { + DRV_LOG(ERR, + "port %u not all secondaries responded (req_type %d)", + dev->data->port_id, type); + goto exit; + } + for (i = 0; i < mp_rep.nb_received; i++) { + mp_res = &mp_rep.msgs[i]; + res = (struct mlx5_mp_param *)mp_res->param; + if (res->result) { + DRV_LOG(ERR, "port %u request failed on secondary #%d", + dev->data->port_id, i); + goto exit; + } + } +exit: + free(mp_rep.msgs); +} + +/** + * Broadcast request of starting data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void +mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, MLX5_MP_REQ_START_RXTX); +} + +/** + * Broadcast request of stopping data-path to secondary processes. The request + * is synchronous. + * + * @param[in] dev + * Pointer to Ethernet structure. + */ +void +mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev) +{ + mp_req_on_rxtx(dev, MLX5_MP_REQ_STOP_RXTX); +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c new file mode 100644 index 000000000..2b4b3e289 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.c @@ -0,0 +1,551 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd + */ + +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_mr.h" +#include "mlx5_rxtx.h" + +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx5_mr_ctrl *mr_ctrl; + int ret; +}; + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx5_mr_garbage_collect(). Even if this callback is called from a + * secondary process, the garbage collector will be called in primary process + * as the secondary process can't call mlx5_mr_create(). + * + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. + * + * @param sh + * Pointer to the Ethernet device shared context. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh, + const void *addr, size_t len) +{ + const struct rte_memseg_list *msl; + struct mlx5_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DEBUG("device %s free callback: addr=%p, len=%zu", + sh->ibdev_name, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + MLX5_ASSERT((uintptr_t)addr == + RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&sh->share_cache.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mr_cache_entry entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start); + if (mr == NULL) + continue; + MLX5_ASSERT(mr->msl); /* Can't be external memory. */ + ms = rte_mem_virt2memseg((void *)start, msl); + MLX5_ASSERT(ms != NULL); + MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos)); + MLX5_ASSERT(pos < mr->ms_bmp_n); + DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p", + sh->ibdev_name, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); + DEBUG("device %s remove MR(%p) from list", + sh->ibdev_name, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; + } + if (rebuild) { + mlx5_mr_rebuild_cache(&sh->share_cache); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++sh->share_cache.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + sh->share_cache.dev_gen); + rte_smp_wmb(); + } + rte_rwlock_write_unlock(&sh->share_cache.rwlock); +} + +/** + * Callback for memory event. This can be called from both primary and secondary + * process. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct mlx5_ibv_shared *sh; + struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list; + + /* Must be called from the primary process. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + /* Iterate all the existing mlx5 devices. */ + LIST_FOREACH(sh, dev_list, mem_event_cb) + mlx5_mr_mem_event_free_cb(sh, addr, len); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } +} + +/** + * Bottom-half of LKey search on Rx. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr) +{ + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct mlx5_priv *priv = rxq_ctrl->priv; + + return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id, + &priv->sh->share_cache, mr_ctrl, addr, + priv->config.mr_ext_memseg_en); +} + +/** + * Bottom-half of LKey search on Tx. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr) +{ + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx5_priv *priv = txq_ctrl->priv; + + return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id, + &priv->sh->share_cache, mr_ctrl, addr, + priv->config.mr_ext_memseg_en); +} + +/** + * Bottom-half of LKey search on Tx. If it can't be searched in the memseg + * list, register the mempool of the mbuf as externally allocated memory. + * + * @param txq + * Pointer to Tx queue structure. + * @param mb + * Pointer to mbuf. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb) +{ + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; + + lkey = mlx5_tx_addr2mr_bh(txq, addr); + if (lkey == UINT32_MAX && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb)); + } + return lkey; +} + +/** + * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp(). + * + * Externally allocated chunk is registered and a MR is created for the chunk. + * The MR object is added to the global list. If memseg list of a MR object + * (mr->msl) is null, the MR object can be regarded as externally allocated + * memory. + * + * Once external memory is registered, it should be static. If the memory is + * freed and the virtual address range has different physical memory mapped + * again, it may cause crash on device due to the wrong translation entry. PMD + * can't track the free event of the external memory for now. + */ +static void +mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + struct rte_eth_dev *dev = data->dev; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl; + struct mlx5_mr *mr = NULL; + uintptr_t addr = (uintptr_t)memhdr->addr; + size_t len = memhdr->len; + struct mr_cache_entry entry; + uint32_t lkey; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* If already registered, it should return. */ + rte_rwlock_read_lock(&sh->share_cache.rwlock); + lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr); + rte_rwlock_read_unlock(&sh->share_cache.rwlock); + if (lkey != UINT32_MAX) + return; + DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", + dev->data->port_id, mem_idx, mp->name); + mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id); + if (!mr) { + DRV_LOG(WARNING, + "port %u unable to allocate a new MR of" + " mempool (%s).", + dev->data->port_id, mp->name); + data->ret = -1; + return; + } + rte_rwlock_write_lock(&sh->share_cache.rwlock); + LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr); + /* Insert to the global cache table. */ + mlx5_mr_insert_cache(&sh->share_cache, mr); + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + /* Insert to the local cache table */ + mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache, + mr_ctrl, addr, priv->config.mr_ext_memseg_en); +} + +/** + * Finds the first ethdev that match the pci device. + * The existence of multiple ethdev per pci device is only with representors. + * On such case, it is enough to get only one of the ports as they all share + * the same ibv context. + * + * @param pdev + * Pointer to the PCI device. + * + * @return + * Pointer to the ethdev if found, NULL otherwise. + */ +static struct rte_eth_dev * +pci_dev_to_eth_dev(struct rte_pci_device *pdev) +{ + uint16_t port_id; + + RTE_ETH_FOREACH_DEV_OF(port_id, &pdev->device) + return &rte_eth_devices[port_id]; + return NULL; +} + +/** + * DPDK callback to DMA map external memory to a PCI device. + * + * @param pdev + * Pointer to the PCI device. + * @param addr + * Starting virtual address of memory to be mapped. + * @param iova + * Starting IOVA address of memory to be mapped. + * @param len + * Length of memory segment being mapped. + * + * @return + * 0 on success, negative value on error. + */ +int +mlx5_dma_map(struct rte_pci_device *pdev, void *addr, + uint64_t iova __rte_unused, size_t len) +{ + struct rte_eth_dev *dev; + struct mlx5_mr *mr; + struct mlx5_priv *priv; + struct mlx5_ibv_shared *sh; + + dev = pci_dev_to_eth_dev(pdev); + if (!dev) { + DRV_LOG(WARNING, "unable to find matching ethdev " + "to PCI device %p", (void *)pdev); + rte_errno = ENODEV; + return -1; + } + priv = dev->data->dev_private; + sh = priv->sh; + mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY); + if (!mr) { + DRV_LOG(WARNING, + "port %u unable to dma map", dev->data->port_id); + rte_errno = EINVAL; + return -1; + } + rte_rwlock_write_lock(&sh->share_cache.rwlock); + LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr); + /* Insert to the global cache table. */ + mlx5_mr_insert_cache(&sh->share_cache, mr); + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + return 0; +} + +/** + * DPDK callback to DMA unmap external memory to a PCI device. + * + * @param pdev + * Pointer to the PCI device. + * @param addr + * Starting virtual address of memory to be unmapped. + * @param iova + * Starting IOVA address of memory to be unmapped. + * @param len + * Length of memory segment being unmapped. + * + * @return + * 0 on success, negative value on error. + */ +int +mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, + uint64_t iova __rte_unused, size_t len __rte_unused) +{ + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + struct mlx5_ibv_shared *sh; + struct mlx5_mr *mr; + struct mr_cache_entry entry; + + dev = pci_dev_to_eth_dev(pdev); + if (!dev) { + DRV_LOG(WARNING, "unable to find matching ethdev " + "to PCI device %p", (void *)pdev); + rte_errno = ENODEV; + return -1; + } + priv = dev->data->dev_private; + sh = priv->sh; + rte_rwlock_read_lock(&sh->share_cache.rwlock); + mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr); + if (!mr) { + rte_rwlock_read_unlock(&sh->share_cache.rwlock); + DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered " + "to PCI device %p", (uintptr_t)addr, + (void *)pdev); + rte_errno = EINVAL; + return -1; + } + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr); + DEBUG("port %u remove MR(%p) from list", dev->data->port_id, + (void *)mr); + mlx5_mr_rebuild_cache(&sh->share_cache); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++sh->share_cache.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + sh->share_cache.dev_gen); + rte_smp_wmb(); + rte_rwlock_read_unlock(&sh->share_cache.rwlock); + return 0; +} + +/** + * Register MR for entire memory chunks in a Mempool having externally allocated + * memory and fill in local cache. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +static uint32_t +mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data); + return data.ret; +} + +/** + * Register MR entire memory chunks in a Mempool having externally allocated + * memory and search LKey of the address to return. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Search key. + * @param mp + * Pointer to registering Mempool where addr belongs. + * + * @return + * LKey for address on success, UINT32_MAX on failure. + */ +uint32_t +mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, + struct rte_mempool *mp) +{ + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx5_priv *priv = txq_ctrl->priv; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DRV_LOG(WARNING, + "port %u using address (%p) from unregistered mempool" + " having externally allocated memory" + " in secondary process, please create mempool" + " prior to rte_eth_dev_start()", + PORT_ID(priv), (void *)addr); + return UINT32_MAX; + } + mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); + return mlx5_tx_addr2mr_bh(txq, addr); +} + +/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */ +static void +mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + struct rte_eth_dev *dev = data->dev; + struct mlx5_priv *priv = dev->data->dev_private; + + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id, + &priv->sh->share_cache, data->mr_ctrl, + (uintptr_t)memhdr->addr, + priv->config.mr_ext_memseg_en); + if (lkey == UINT32_MAX) + data->ret = -1; +} + +/** + * Register entire memory chunks in a Mempool. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +int +mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); + if (data.ret < 0 && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp); + } + return data.ret; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h new file mode 100644 index 000000000..0c5877b3d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_mr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_MR_H_ +#define RTE_PMD_MLX5_MR_H_ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +#include + +/* First entry must be NULL for comparison. */ +#define mlx5_mr_btree_len(bt) ((bt)->len - 1) + +void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); + +#endif /* RTE_PMD_MLX5_MR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c new file mode 100644 index 000000000..653b06914 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rss.c @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_rxtx.h" + +/** + * DPDK callback to update the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] rss_conf + * RSS configuration data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int idx; + + if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { + rte_errno = EINVAL; + return -rte_errno; + } + if (rss_conf->rss_key && rss_conf->rss_key_len) { + if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) { + DRV_LOG(ERR, + "port %u RSS key len must be %s Bytes long", + dev->data->port_id, + RTE_STR(MLX5_RSS_HASH_KEY_LEN)); + rte_errno = EINVAL; + return -rte_errno; + } + priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, + rss_conf->rss_key_len, 0); + if (!priv->rss_conf.rss_key) { + rte_errno = ENOMEM; + return -rte_errno; + } + memcpy(priv->rss_conf.rss_key, rss_conf->rss_key, + rss_conf->rss_key_len); + priv->rss_conf.rss_key_len = rss_conf->rss_key_len; + } + priv->rss_conf.rss_hf = rss_conf->rss_hf; + /* Enable the RSS hash in all Rx queues. */ + for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf && + !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS); + ++idx; + } + return 0; +} + +/** + * DPDK callback to get the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in, out] rss_conf + * RSS configuration data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!rss_conf) { + rte_errno = EINVAL; + return -rte_errno; + } + if (rss_conf->rss_key && + (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { + memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, + priv->rss_conf.rss_key_len); + } + rss_conf->rss_key_len = priv->rss_conf.rss_key_len; + rss_conf->rss_hf = priv->rss_conf.rss_hf; + return 0; +} + +/** + * Allocate/reallocate RETA index table. + * + * @param dev + * Pointer to Ethernet device. + * @praram reta_size + * The size of the array to allocate. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) +{ + struct mlx5_priv *priv = dev->data->dev_private; + void *mem; + unsigned int old_size = priv->reta_idx_n; + + if (priv->reta_idx_n == reta_size) + return 0; + + mem = rte_realloc(priv->reta_idx, + reta_size * sizeof((*priv->reta_idx)[0]), 0); + if (!mem) { + rte_errno = ENOMEM; + return -rte_errno; + } + priv->reta_idx = mem; + priv->reta_idx_n = reta_size; + if (old_size < reta_size) + memset(&(*priv->reta_idx)[old_size], 0, + (reta_size - old_size) * + sizeof((*priv->reta_idx)[0])); + return 0; +} + +/** + * DPDK callback to get the RETA indirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int idx; + unsigned int i; + + if (!reta_size || reta_size > priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } + /* Fill each entry of the table even if its bit is not set. */ + for (idx = 0, i = 0; (i != reta_size); ++i) { + idx = i / RTE_RETA_GROUP_SIZE; + reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] = + (*priv->reta_idx)[i]; + } + return 0; +} + +/** + * DPDK callback to update the RETA indirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + int ret; + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int idx; + unsigned int i; + unsigned int pos; + + if (!reta_size) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_rss_reta_index_resize(dev, reta_size); + if (ret) + return ret; + for (idx = 0, i = 0; (i != reta_size); ++i) { + idx = i / RTE_RETA_GROUP_SIZE; + pos = i % RTE_RETA_GROUP_SIZE; + if (((reta_conf[idx].mask >> i) & 0x1) == 0) + continue; + MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n); + (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; + } + if (dev->data->dev_started) { + mlx5_dev_stop(dev); + priv->skip_default_rss_reta = 1; + return mlx5_dev_start(dev); + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c new file mode 100644 index 000000000..84c8b0526 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxmode.c @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + dev->data->promiscuous = 1; + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable promiscuous mode" + " in flow isolation mode", + dev->data->port_id); + return 0; + } + if (priv->config.vf) { + ret = mlx5_nl_promisc(priv->nl_socket_route, mlx5_ifindex(dev), + 1); + if (ret) + return ret; + } + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); + + /* + * rte_eth_dev_promiscuous_enable() rollback + * dev->data->promiscuous in the case of failure. + */ + return ret; +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + dev->data->promiscuous = 0; + if (priv->config.vf) { + ret = mlx5_nl_promisc(priv->nl_socket_route, mlx5_ifindex(dev), + 0); + if (ret) + return ret; + } + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); + + /* + * rte_eth_dev_promiscuous_disable() rollback + * dev->data->promiscuous in the case of failure. + */ + return ret; +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + dev->data->all_multicast = 1; + if (priv->isolated) { + DRV_LOG(WARNING, + "port %u cannot enable allmulticast mode" + " in flow isolation mode", + dev->data->port_id); + return 0; + } + if (priv->config.vf) { + ret = mlx5_nl_allmulti(priv->nl_socket_route, mlx5_ifindex(dev), + 1); + if (ret) + goto error; + } + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); +error: + /* + * rte_eth_allmulticast_enable() rollback + * dev->data->all_multicast in the case of failure. + */ + return ret; +} + +/** + * DPDK callback to disable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret; + + dev->data->all_multicast = 0; + if (priv->config.vf) { + ret = mlx5_nl_allmulti(priv->nl_socket_route, mlx5_ifindex(dev), + 0); + if (ret) + goto error; + } + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); +error: + /* + * rte_eth_allmulticast_disable() rollback + * dev->data->all_multicast in the case of failure. + */ + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c new file mode 100644 index 000000000..7a50ec6f1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -0,0 +1,2976 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" +#include "mlx5_autoconf.h" +#include "mlx5_flow.h" + + +/* Default RSS hash key also used for ConnectX-3. */ +uint8_t rss_hash_default_key[] = { + 0x2c, 0xc6, 0x81, 0xd1, + 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, + 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, + 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, + 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, + 0xfc, 0x1f, 0xdc, 0x2a, +}; + +/* Length of the default RSS hash key. */ +static_assert(MLX5_RSS_HASH_KEY_LEN == + (unsigned int)sizeof(rss_hash_default_key), + "wrong RSS default key size."); + +/** + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t i; + uint16_t n = 0; + uint16_t n_ibv = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = container_of + (rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + continue; + n_ibv++; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + MLX5_ASSERT(n == 0 || n == n_ibv); + return n == n_ibv; +} + +/** + * Allocate RX queue elements for Multi-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + unsigned int wqe_n = 1 << rxq->elts_n; + unsigned int i; + int err; + + /* Iterate on segments. */ + for (i = 0; i <= wqe_n; ++i) { + struct mlx5_mprq_buf *buf; + + if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) { + DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id); + rte_errno = ENOMEM; + goto error; + } + if (i < wqe_n) + (*rxq->mprq_bufs)[i] = buf; + else + rxq->mprq_repl = buf; + } + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments", + rxq->port_id, rxq->idx, wqe_n); + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + wqe_n = i; + for (i = 0; (i != wqe_n); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + rte_mempool_put(rxq->mprq_mp, + (*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + rxq->port_id, rxq->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Allocate RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; + unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; + unsigned int i; + int err; + + /* Iterate on segments. */ + for (i = 0; (i != elts_n); ++i) { + struct rte_mbuf *buf; + + buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); + if (buf == NULL) { + DRV_LOG(ERR, "port %u empty mbuf pool", + PORT_ID(rxq_ctrl->priv)); + rte_errno = ENOMEM; + goto error; + } + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0); + MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0); + MLX5_ASSERT(!buf->next); + /* Only the first segment keeps headroom. */ + if (i % sges_n) + SET_DATA_OFF(buf, 0); + PORT(buf) = rxq_ctrl->rxq.port_id; + DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); + PKT_LEN(buf) = DATA_LEN(buf); + NB_SEGS(buf) = 1; + (*rxq_ctrl->rxq.elts)[i] = buf; + } + /* If Rx vector is activated. */ + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; + struct rte_pktmbuf_pool_private *priv = + (struct rte_pktmbuf_pool_private *) + rte_mempool_get_priv(rxq_ctrl->rxq.mp); + int j; + + /* Initialize default rearm_data for vPMD. */ + mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; + rte_mbuf_refcnt_set(mbuf_init, 1); + mbuf_init->nb_segs = 1; + mbuf_init->port = rxq->port_id; + if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) + mbuf_init->ol_flags = EXT_ATTACHED_MBUF; + /* + * prevent compiler reordering: + * rearm_data covers previous fields. + */ + rte_compiler_barrier(); + rxq->mbuf_initializer = + *(rte_xmm_t *)&mbuf_init->rearm_data; + /* Padding with a fake mbuf for vectorized Rx. */ + for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) + (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; + } + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments" + " (max %u packets)", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n, + elts_n / (1 << rxq_ctrl->rxq.sges_n)); + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + elts_n = i; + for (i = 0; (i != elts_n); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; + } + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Allocate RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl); +} + +/** + * Free RX queue elements for Multi-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + uint16_t i; + + DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", + rxq->port_id, rxq->idx); + if (rxq->mprq_bufs == NULL) + return; + MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0); + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + if (rxq->mprq_repl != NULL) { + mlx5_mprq_buf_free(rxq->mprq_repl); + rxq->mprq_repl = NULL; + } +} + +/** + * Free RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + const uint16_t q_n = (1 << rxq->elts_n); + const uint16_t q_mask = q_n - 1; + uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); + uint16_t i; + + DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", + PORT_ID(rxq_ctrl->priv), rxq->idx); + if (rxq->elts == NULL) + return; + /** + * Some mbuf in the Ring belongs to the application. They cannot be + * freed. + */ + if (mlx5_rxq_check_vec_support(rxq) > 0) { + for (i = 0; i < used; ++i) + (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; + rxq->rq_pi = rxq->rq_ci; + } + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq->elts)[i]); + (*rxq->elts)[i] = NULL; + } +} + +/** + * Free RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + rxq_free_elts_mprq(rxq_ctrl); + else + rxq_free_elts_sprq(rxq_ctrl); +} + +/** + * Returns the per-queue supported offloads. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH); + + if (config->hw_fcs_strip) + offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + + if (config->hw_csum) + offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM); + if (config->hw_vlan_strip) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (MLX5_LRO_SUPPORTED(dev)) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + return offloads; +} + + +/** + * Returns the per-port supported offloads. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx5_get_rx_port_offloads(void) +{ + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + + return offloads; +} + +/** + * Verify if the queue can be released. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * + * @return + * 1 if the queue can be released + * 0 if the queue can not be released, there are references to it. + * Negative errno and rte_errno is set if queue doesn't exist. + */ +static int +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } + rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); + return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); +} + +/** + * Rx queue presetup checks. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Rx queue %u" + " to the next power of two (%d)", + dev->data->port_id, idx, desc); + } + DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", + dev->data->port_id, idx, desc); + if (idx >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->rxqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; + } + if (!mlx5_rxq_releasable(dev, idx)) { + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + rte_errno = EBUSY; + return -rte_errno; + } + mlx5_rxq_release(dev, idx); + return 0; +} + +/** + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int res; + + res = mlx5_rx_queue_pre_setup(dev, idx, desc); + if (res) + return res; + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); + if (!rxq_ctrl) { + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; + } + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); + (*priv->rxqs)[idx] = &rxq_ctrl->rxq; + return 0; +} + +/** + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param hairpin_conf + * Hairpin configuration parameters. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int res; + + res = mlx5_rx_queue_pre_setup(dev, idx, desc); + if (res) + return res; + if (hairpin_conf->peer_count != 1 || + hairpin_conf->peers[0].port != dev->data->port_id || + hairpin_conf->peers[0].queue >= priv->txqs_n) { + DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u " + " invalid hairpind configuration", dev->data->port_id, + idx); + rte_errno = EINVAL; + return -rte_errno; + } + rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf); + if (!rxq_ctrl) { + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; + } + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); + (*priv->rxqs)[idx] = &rxq_ctrl->rxq; + return 0; +} + +/** + * DPDK callback to release a RX queue. + * + * @param dpdk_rxq + * Generic RX queue pointer. + */ +void +mlx5_rx_queue_release(void *dpdk_rxq) +{ + struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_priv *priv; + + if (rxq == NULL) + return; + rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + priv = rxq_ctrl->priv; + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx)) + rte_panic("port %u Rx queue %u is still used by a flow and" + " cannot be removed\n", + PORT_ID(priv), rxq->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); +} + +/** + * Get an Rx queue Verbs/DevX object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Rx queue array + * + * @return + * The Verbs/DevX object if it exists. + */ +static struct mlx5_rxq_obj * +mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (idx >= priv->rxqs_n) + return NULL; + if (!rxq_data) + return NULL; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl->obj) + rte_atomic32_inc(&rxq_ctrl->obj->refcnt); + return rxq_ctrl->obj; +} + +/** + * Release the resources allocated for an RQ DevX object. + * + * @param rxq_ctrl + * DevX Rx queue object. + */ +static void +rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + if (rxq_ctrl->rxq.wqes) { + rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); + rxq_ctrl->rxq.wqes = NULL; + } + if (rxq_ctrl->wq_umem) { + mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); + rxq_ctrl->wq_umem = NULL; + } +} + +/** + * Release an Rx hairpin related resources. + * + * @param rxq_obj + * Hairpin Rx queue object. + */ +static void +rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj) +{ + struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; + + MLX5_ASSERT(rxq_obj); + rq_attr.state = MLX5_RQC_STATE_RST; + rq_attr.rq_state = MLX5_RQC_STATE_RDY; + mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); + claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); +} + +/** + * Release an Rx verbs/DevX queue object. + * + * @param rxq_obj + * Verbs/DevX Rx queue object. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj) +{ + MLX5_ASSERT(rxq_obj); + if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) { + switch (rxq_obj->type) { + case MLX5_RXQ_OBJ_TYPE_IBV: + MLX5_ASSERT(rxq_obj->wq); + MLX5_ASSERT(rxq_obj->cq); + rxq_free_elts(rxq_obj->rxq_ctrl); + claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); + claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq)); + break; + case MLX5_RXQ_OBJ_TYPE_DEVX_RQ: + MLX5_ASSERT(rxq_obj->cq); + MLX5_ASSERT(rxq_obj->rq); + rxq_free_elts(rxq_obj->rxq_ctrl); + claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); + rxq_release_rq_resources(rxq_obj->rxq_ctrl); + claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq)); + break; + case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN: + MLX5_ASSERT(rxq_obj->rq); + rxq_obj_hairpin_release(rxq_obj); + break; + } + if (rxq_obj->channel) + claim_zero(mlx5_glue->destroy_comp_channel + (rxq_obj->channel)); + LIST_REMOVE(rxq_obj, next); + rte_free(rxq_obj); + return 0; + } + return 1; +} + +/** + * Allocate queue vector and fill epoll fd list for Rx interrupts. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + unsigned int count = 0; + struct rte_intr_handle *intr_handle = dev->intr_handle; + + if (!dev->data->dev_conf.intr_conf.rxq) + return 0; + mlx5_rx_intr_vec_disable(dev); + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); + if (intr_handle->intr_vec == NULL) { + DRV_LOG(ERR, + "port %u failed to allocate memory for interrupt" + " vector, Rx interrupts will not be supported", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } + intr_handle->type = RTE_INTR_HANDLE_EXT; + for (i = 0; i != n; ++i) { + /* This rxq obj must not be released in this function. */ + struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i); + int fd; + int flags; + int rc; + + /* Skip queues that cannot request interrupts. */ + if (!rxq_obj || !rxq_obj->channel) { + /* Use invalid intr_vec[] index to disable entry. */ + intr_handle->intr_vec[i] = + RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID; + continue; + } + if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { + DRV_LOG(ERR, + "port %u too many Rx queues for interrupt" + " vector size (%d), Rx interrupts cannot be" + " enabled", + dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID); + mlx5_rx_intr_vec_disable(dev); + rte_errno = ENOMEM; + return -rte_errno; + } + fd = rxq_obj->channel->fd; + flags = fcntl(fd, F_GETFL); + rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + rte_errno = errno; + DRV_LOG(ERR, + "port %u failed to make Rx interrupt file" + " descriptor %d non-blocking for queue index" + " %d", + dev->data->port_id, fd, i); + mlx5_rx_intr_vec_disable(dev); + return -rte_errno; + } + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; + intr_handle->efds[count] = fd; + count++; + } + if (!count) + mlx5_rx_intr_vec_disable(dev); + else + intr_handle->nb_efd = count; + return 0; +} + +/** + * Clean up Rx interrupts handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; + unsigned int i; + unsigned int rxqs_n = priv->rxqs_n; + unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + + if (!dev->data->dev_conf.intr_conf.rxq) + return; + if (!intr_handle->intr_vec) + goto free; + for (i = 0; i != n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_data *rxq_data; + + if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET + + RTE_MAX_RXTX_INTR_VEC_ID) + continue; + /** + * Need to access directly the queue to release the reference + * kept in mlx5_rx_intr_vec_enable(). + */ + rxq_data = (*priv->rxqs)[i]; + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl->obj) + mlx5_rxq_obj_release(rxq_ctrl->obj); + } +free: + rte_intr_free_epoll_fd(intr_handle); + if (intr_handle->intr_vec) + free(intr_handle->intr_vec); + intr_handle->nb_efd = 0; + intr_handle->intr_vec = NULL; +} + +/** + * MLX5 CQ notification . + * + * @param rxq + * Pointer to receive queue structure. + * @param sq_n_rxq + * Sequence number per receive queue . + */ +static inline void +mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) +{ + int sq_n = 0; + uint32_t doorbell_hi; + uint64_t doorbell; + void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL; + + sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; + doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); + doorbell = (uint64_t)doorbell_hi << 32; + doorbell |= rxq->cqn; + rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); + mlx5_uar_write64(rte_cpu_to_be_64(doorbell), + cq_db_reg, rxq->uar_lock_cq); +} + +/** + * DPDK callback for Rx queue interrupt enable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Rx queue number. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + + rxq_data = (*priv->rxqs)[rx_queue_id]; + if (!rxq_data) { + rte_errno = EINVAL; + return -rte_errno; + } + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (rxq_ctrl->irq) { + struct mlx5_rxq_obj *rxq_obj; + + rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); + if (!rxq_obj) { + rte_errno = EINVAL; + return -rte_errno; + } + mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); + mlx5_rxq_obj_release(rxq_obj); + } + return 0; +} + +/** + * DPDK callback for Rx queue interrupt disable. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Rx queue number. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data; + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_rxq_obj *rxq_obj = NULL; + struct ibv_cq *ev_cq; + void *ev_ctx; + int ret; + + rxq_data = (*priv->rxqs)[rx_queue_id]; + if (!rxq_data) { + rte_errno = EINVAL; + return -rte_errno; + } + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + if (!rxq_ctrl->irq) + return 0; + rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); + if (!rxq_obj) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx); + if (ret || ev_cq != rxq_obj->cq) { + rte_errno = EINVAL; + goto exit; + } + rxq_data->cq_arm_sn++; + mlx5_glue->ack_cq_events(rxq_obj->cq, 1); + mlx5_rxq_obj_release(rxq_obj); + return 0; +exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (rxq_obj) + mlx5_rxq_obj_release(rxq_obj); + DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", + dev->data->port_id, rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Create a CQ Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * @param priv + * Pointer to device private data. + * @param rxq_data + * Pointer to Rx queue data. + * @param cqe_n + * Number of CQEs in CQ. + * @param rxq_obj + * Pointer to Rx queue object data. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +static struct ibv_cq * +mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, + struct mlx5_rxq_data *rxq_data, + unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj) +{ + struct { + struct ibv_cq_init_attr_ex ibv; + struct mlx5dv_cq_init_attr mlx5; + } cq_attr; + + cq_attr.ibv = (struct ibv_cq_init_attr_ex){ + .cqe = cqe_n, + .channel = rxq_obj->channel, + .comp_mask = 0, + }; + cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ + .comp_mask = 0, + }; + if (priv->config.cqe_comp && !rxq_data->hw_timestamp && + !rxq_data->lro) { + cq_attr.mlx5.comp_mask |= + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + cq_attr.mlx5.cqe_comp_res_format = + mlx5_rxq_mprq_enabled(rxq_data) ? + MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : + MLX5DV_CQE_RES_FORMAT_HASH; +#else + cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; +#endif + /* + * For vectorized Rx, it must not be doubled in order to + * make cq_ci and rq_ci aligned. + */ + if (mlx5_rxq_check_vec_support(rxq_data) < 0) + cq_attr.ibv.cqe *= 2; + } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for HW" + " timestamp", + dev->data->port_id); + } else if (priv->config.cqe_comp && rxq_data->lro) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for LRO", + dev->data->port_id); + } +#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD + if (priv->config.cqe_pad) { + cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; + cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; + } +#endif + return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx, + &cq_attr.ibv, + &cq_attr.mlx5)); +} + +/** + * Create a WQ Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * @param priv + * Pointer to device private data. + * @param rxq_data + * Pointer to Rx queue data. + * @param idx + * Queue index in DPDK Rx queue array + * @param wqe_n + * Number of WQEs in WQ. + * @param rxq_obj + * Pointer to Rx queue object data. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +static struct ibv_wq * +mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, + struct mlx5_rxq_data *rxq_data, uint16_t idx, + unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj) +{ + struct { + struct ibv_wq_init_attr ibv; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + struct mlx5dv_wq_init_attr mlx5; +#endif + } wq_attr; + + wq_attr.ibv = (struct ibv_wq_init_attr){ + .wq_context = NULL, /* Could be useful in the future. */ + .wq_type = IBV_WQT_RQ, + /* Max number of outstanding WRs. */ + .max_wr = wqe_n >> rxq_data->sges_n, + /* Max number of scatter/gather elements in a WR. */ + .max_sge = 1 << rxq_data->sges_n, + .pd = priv->sh->pd, + .cq = rxq_obj->cq, + .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0, + .create_flags = (rxq_data->vlan_strip ? + IBV_WQ_FLAGS_CVLAN_STRIPPING : 0), + }; + /* By default, FCS (CRC) is stripped by hardware. */ + if (rxq_data->crc_present) { + wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + } + if (priv->config.hw_padding) { +#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) + wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; + wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; +#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) + wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING; + wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; +#endif + } +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){ + .comp_mask = 0, + }; + if (mlx5_rxq_mprq_enabled(rxq_data)) { + struct mlx5dv_striding_rq_init_attr *mprq_attr = + &wq_attr.mlx5.striding_rq_attrs; + + wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; + *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ + .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, + .single_wqe_log_num_of_strides = rxq_data->strd_num_n, + .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, + }; + } + rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv, + &wq_attr.mlx5); +#else + rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv); +#endif + if (rxq_obj->wq) { + /* + * Make sure number of WRs*SGEs match expectations since a queue + * cannot allocate more than "desc" buffers. + */ + if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || + wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) { + DRV_LOG(ERR, + "port %u Rx queue %u requested %u*%u but got" + " %u*%u WRs*SGEs", + dev->data->port_id, idx, + wqe_n >> rxq_data->sges_n, + (1 << rxq_data->sges_n), + wq_attr.ibv.max_wr, wq_attr.ibv.max_sge); + claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); + rxq_obj->wq = NULL; + rte_errno = EINVAL; + } + } + return rxq_obj->wq; +} + +/** + * Fill common fields of create RQ attributes structure. + * + * @param rxq_data + * Pointer to Rx queue data. + * @param cqn + * CQ number to use with this RQ. + * @param rq_attr + * RQ attributes structure to fill.. + */ +static void +mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, + struct mlx5_devx_create_rq_attr *rq_attr) +{ + rq_attr->state = MLX5_RQC_STATE_RST; + rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; + rq_attr->cqn = cqn; + rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; +} + +/** + * Fill common fields of DevX WQ attributes structure. + * + * @param priv + * Pointer to device private data. + * @param rxq_ctrl + * Pointer to Rx queue control structure. + * @param wq_attr + * WQ attributes structure to fill.. + */ +static void +mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, + struct mlx5_devx_wq_attr *wq_attr) +{ + wq_attr->end_padding_mode = priv->config.cqe_pad ? + MLX5_WQ_END_PAD_MODE_ALIGN : + MLX5_WQ_END_PAD_MODE_NONE; + wq_attr->pd = priv->sh->pdn; + wq_attr->dbr_addr = rxq_ctrl->dbr_offset; + wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id; + wq_attr->dbr_umem_valid = 1; + wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id; + wq_attr->wq_umem_valid = 1; +} + +/** + * Create a RQ object using DevX. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Rx queue array + * @param cqn + * CQ number to use with this RQ. + * + * @return + * The DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_devx_obj * +mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_devx_create_rq_attr rq_attr; + uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); + uint32_t wq_size = 0; + uint32_t wqe_size = 0; + uint32_t log_wqe_size = 0; + void *buf = NULL; + struct mlx5_devx_obj *rq; + + memset(&rq_attr, 0, sizeof(rq_attr)); + /* Fill RQ attributes. */ + rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; + rq_attr.flush_in_error_en = 1; + mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); + /* Fill WQ attributes for this RQ. */ + if (mlx5_rxq_mprq_enabled(rxq_data)) { + rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; + /* + * Number of strides in each WQE: + * 512*2^single_wqe_log_num_of_strides. + */ + rq_attr.wq_attr.single_wqe_log_num_of_strides = + rxq_data->strd_num_n - + MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; + /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ + rq_attr.wq_attr.single_stride_log_num_of_bytes = + rxq_data->strd_sz_n - + MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; + wqe_size = sizeof(struct mlx5_wqe_mprq); + } else { + rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; + wqe_size = sizeof(struct mlx5_wqe_data_seg); + } + log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; + rq_attr.wq_attr.log_wq_stride = log_wqe_size; + rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; + /* Calculate and allocate WQ memory space. */ + wqe_size = 1 << log_wqe_size; /* round up power of two.*/ + wq_size = wqe_n * wqe_size; + buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT, + rxq_ctrl->socket); + if (!buf) + return NULL; + rxq_data->wqes = buf; + rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, + buf, wq_size, 0); + if (!rxq_ctrl->wq_umem) { + rte_free(buf); + return NULL; + } + mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); + rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); + if (!rq) + rxq_release_rq_resources(rxq_ctrl); + return rq; +} + +/** + * Create the Rx hairpin queue object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Rx queue array + * + * @return + * The hairpin DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_rxq_obj * +mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_devx_create_rq_attr attr = { 0 }; + struct mlx5_rxq_obj *tmpl = NULL; + int ret = 0; + uint32_t max_wq_data; + + MLX5_ASSERT(rxq_data); + MLX5_ASSERT(!rxq_ctrl->obj); + tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, + rxq_ctrl->socket); + if (!tmpl) { + DRV_LOG(ERR, + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_data->idx); + rte_errno = ENOMEM; + goto error; + } + tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; + tmpl->rxq_ctrl = rxq_ctrl; + attr.hairpin = 1; + max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; + /* Jumbo frames > 9KB should be supported, and more packets. */ + if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { + if (priv->config.log_hp_size > max_wq_data) { + DRV_LOG(ERR, "total data size %u power of 2 is " + "too large for hairpin", + priv->config.log_hp_size); + rte_errno = ERANGE; + return NULL; + } + attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; + } else { + attr.wq_attr.log_hairpin_data_sz = + (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? + max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; + } + /* Set the packets number to the maximum value for performance. */ + attr.wq_attr.log_hairpin_num_packets = + attr.wq_attr.log_hairpin_data_sz - + MLX5_HAIRPIN_QUEUE_STRIDE; + tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, + rxq_ctrl->socket); + if (!tmpl->rq) { + DRV_LOG(ERR, + "port %u Rx hairpin queue %u can't create rq object", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return tmpl; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (tmpl->rq) + mlx5_devx_cmd_destroy(tmpl->rq); + rte_errno = ret; /* Restore rte_errno. */ + return NULL; +} + +/** + * Create the Rx queue Verbs/DevX object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Rx queue array + * @param type + * Type of Rx queue object to create. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_obj * +mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + enum mlx5_rxq_obj_type type) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct ibv_wq_attr mod; + unsigned int cqe_n; + unsigned int wqe_n = 1 << rxq_data->elts_n; + struct mlx5_rxq_obj *tmpl = NULL; + struct mlx5dv_cq cq_info; + struct mlx5dv_rwq rwq; + int ret = 0; + struct mlx5dv_obj obj; + + MLX5_ASSERT(rxq_data); + MLX5_ASSERT(!rxq_ctrl->obj); + if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) + return mlx5_rxq_obj_hairpin_new(dev, idx); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; + priv->verbs_alloc_ctx.obj = rxq_ctrl; + tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, + rxq_ctrl->socket); + if (!tmpl) { + DRV_LOG(ERR, + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_data->idx); + rte_errno = ENOMEM; + goto error; + } + tmpl->type = type; + tmpl->rxq_ctrl = rxq_ctrl; + if (rxq_ctrl->irq) { + tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx); + if (!tmpl->channel) { + DRV_LOG(ERR, "port %u: comp channel creation failure", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + } + if (mlx5_rxq_mprq_enabled(rxq_data)) + cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + else + cqe_n = wqe_n - 1; + tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl); + if (!tmpl->cq) { + DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } + obj.cq.in = tmpl->cq; + obj.cq.out = &cq_info; + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ); + if (ret) { + rte_errno = ret; + goto error; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; + goto error; + } + DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d", + dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d", + dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge); + /* Allocate door-bell for types created with DevX. */ + if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) { + struct mlx5_devx_dbr_page *dbr_page; + int64_t dbr_offset; + + dbr_offset = mlx5_get_dbr(dev, &dbr_page); + if (dbr_offset < 0) + goto error; + rxq_ctrl->dbr_offset = dbr_offset; + rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id; + rxq_ctrl->dbr_umem_id_valid = 1; + rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + + (uintptr_t)rxq_ctrl->dbr_offset); + } + if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) { + tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n, + tmpl); + if (!tmpl->wq) { + DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } + /* Change queue state to ready. */ + mod = (struct ibv_wq_attr){ + .attr_mask = IBV_WQ_ATTR_STATE, + .wq_state = IBV_WQS_RDY, + }; + ret = mlx5_glue->modify_wq(tmpl->wq, &mod); + if (ret) { + DRV_LOG(ERR, + "port %u Rx queue %u WQ state to IBV_WQS_RDY" + " failed", dev->data->port_id, idx); + rte_errno = ret; + goto error; + } + obj.rwq.in = tmpl->wq; + obj.rwq.out = &rwq; + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ); + if (ret) { + rte_errno = ret; + goto error; + } + rxq_data->wqes = rwq.buf; + rxq_data->rq_db = rwq.dbrec; + } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) { + struct mlx5_devx_modify_rq_attr rq_attr; + + memset(&rq_attr, 0, sizeof(rq_attr)); + tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn); + if (!tmpl->rq) { + DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } + /* Change queue state to ready. */ + rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.state = MLX5_RQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr); + if (ret) + goto error; + } + /* Fill the rings. */ + rxq_data->cqe_n = log2above(cq_info.cqe_cnt); + rxq_data->cq_db = cq_info.dbrec; + rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; + rxq_data->cq_uar = cq_info.cq_uar; + rxq_data->cqn = cq_info.cqn; + rxq_data->cq_arm_sn = 0; + mlx5_rxq_initialize(rxq_data); + rxq_data->cq_ci = 0; + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return tmpl; +error: + if (tmpl) { + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq) + claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); + else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq) + claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); + if (tmpl->cq) + claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); + if (tmpl->channel) + claim_zero(mlx5_glue->destroy_comp_channel + (tmpl->channel)); + rte_free(tmpl); + rte_errno = ret; /* Restore rte_errno. */ + } + if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) + rxq_release_rq_resources(rxq_ctrl); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return NULL; +} + +/** + * Verify the Rx queue objects list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of objects not released. + */ +int +mlx5_rxq_obj_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + struct mlx5_rxq_obj *rxq_obj; + + LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) { + DRV_LOG(DEBUG, "port %u Rx queue %u still referenced", + dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx); + ++ret; + } + return ret; +} + +/** + * Callback function to initialize mbufs for Multi-Packet RQ. + */ +static inline void +mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg, + void *_m, unsigned int i __rte_unused) +{ + struct mlx5_mprq_buf *buf = _m; + struct rte_mbuf_ext_shared_info *shinfo; + unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg; + unsigned int j; + + memset(_m, 0, sizeof(*buf)); + buf->mp = mp; + rte_atomic16_set(&buf->refcnt, 1); + for (j = 0; j != strd_n; ++j) { + shinfo = &buf->shinfos[j]; + shinfo->free_cb = mlx5_mprq_buf_free_cb; + shinfo->fcb_opaque = buf; + } +} + +/** + * Free mempool of Multi-Packet RQ. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_free_mp(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + unsigned int i; + + if (mp == NULL) + return 0; + DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ", + dev->data->port_id, mp->name); + /* + * If a buffer in the pool has been externally attached to a mbuf and it + * is still in use by application, destroying the Rx queue can spoil + * the packet. It is unlikely to happen but if application dynamically + * creates and destroys with holding Rx packets, this can happen. + * + * TODO: It is unavoidable for now because the mempool for Multi-Packet + * RQ isn't provided by application but managed by PMD. + */ + if (!rte_mempool_full(mp)) { + DRV_LOG(ERR, + "port %u mempool for Multi-Packet RQ is still in use", + dev->data->port_id); + rte_errno = EBUSY; + return -rte_errno; + } + rte_mempool_free(mp); + /* Unset mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = NULL; + } + priv->mprq_mp = NULL; + return 0; +} + +/** + * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the + * mempool. If already allocated, reuse it if there're enough elements. + * Otherwise, resize it. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + char name[RTE_MEMPOOL_NAMESIZE]; + unsigned int desc = 0; + unsigned int buf_len; + unsigned int obj_num; + unsigned int obj_size; + unsigned int strd_num_n = 0; + unsigned int strd_sz_n = 0; + unsigned int i; + unsigned int n_ibv = 0; + + if (!mlx5_mprq_enabled(dev)) + return 0; + /* Count the total number of descriptors configured. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = container_of + (rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + continue; + n_ibv++; + desc += 1 << rxq->elts_n; + /* Get the max number of strides. */ + if (strd_num_n < rxq->strd_num_n) + strd_num_n = rxq->strd_num_n; + /* Get the max size of a stride. */ + if (strd_sz_n < rxq->strd_sz_n) + strd_sz_n = rxq->strd_sz_n; + } + MLX5_ASSERT(strd_num_n && strd_sz_n); + buf_len = (1 << strd_num_n) * (1 << strd_sz_n); + obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * + sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; + /* + * Received packets can be either memcpy'd or externally referenced. In + * case that the packet is attached to an mbuf as an external buffer, as + * it isn't possible to predict how the buffers will be queued by + * application, there's no option to exactly pre-allocate needed buffers + * in advance but to speculatively prepares enough buffers. + * + * In the data path, if this Mempool is depleted, PMD will try to memcpy + * received packets to buffers provided by application (rxq->mp) until + * this Mempool gets available again. + */ + desc *= 4; + obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv; + /* + * rte_mempool_create_empty() has sanity check to refuse large cache + * size compared to the number of elements. + * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a + * constant number 2 instead. + */ + obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); + /* Check a mempool is already allocated and if it can be resued. */ + if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { + DRV_LOG(DEBUG, "port %u mempool %s is being reused", + dev->data->port_id, mp->name); + /* Reuse. */ + goto exit; + } else if (mp != NULL) { + DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it", + dev->data->port_id, mp->name); + /* + * If failed to free, which means it may be still in use, no way + * but to keep using the existing one. On buffer underrun, + * packets will be memcpy'd instead of external buffer + * attachment. + */ + if (mlx5_mprq_free_mp(dev)) { + if (mp->elt_size >= obj_size) + goto exit; + else + return -rte_errno; + } + } + snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); + mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, + 0, NULL, NULL, mlx5_mprq_buf_init, + (void *)(uintptr_t)(1 << strd_num_n), + dev->device->numa_node, 0); + if (mp == NULL) { + DRV_LOG(ERR, + "port %u failed to allocate a mempool for" + " Multi-Packet RQ, count=%u, size=%u", + dev->data->port_id, obj_num, obj_size); + rte_errno = ENOMEM; + return -rte_errno; + } + priv->mprq_mp = mp; +exit: + /* Set mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = container_of + (rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + continue; + rxq->mprq_mp = mp; + } + DRV_LOG(INFO, "port %u Multi-Packet RQ is configured", + dev->data->port_id); + return 0; +} + +#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \ + sizeof(struct rte_vlan_hdr) * 2 + \ + sizeof(struct rte_ipv6_hdr))) +#define MAX_TCP_OPTION_SIZE 40u +#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \ + sizeof(struct rte_tcp_hdr) + \ + MAX_TCP_OPTION_SIZE)) + +/** + * Adjust the maximum LRO massage size. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * @param max_lro_size + * The maximum size for LRO packet. + */ +static void +mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, + uint32_t max_lro_size) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.hca_attr.lro_max_msg_sz_mode == + MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size > + MLX5_MAX_TCP_HDR_OFFSET) + max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; + max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); + MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE); + max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE; + if (priv->max_lro_msg_size) + priv->max_lro_msg_size = + RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); + else + priv->max_lro_msg_size = max_lro_size; + DRV_LOG(DEBUG, + "port %u Rx Queue %u max LRO message size adjusted to %u bytes", + dev->data->port_id, idx, + priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); +} + +/** + * Create a DPDK Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * A DPDK queue object on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_ctrl * +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *tmpl; + unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mprq_stride_nums; + unsigned int mprq_stride_size; + unsigned int mprq_stride_cap; + struct mlx5_dev_config *config = &priv->config; + /* + * Always allocate extra slots, even if eventually + * the vector Rx will not be used. + */ + uint16_t desc_n = + desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); + const int mprq_en = mlx5_check_mprq_support(dev) > 0; + unsigned int max_rx_pkt_len = lro_on_queue ? + dev->data->dev_conf.rxmode.max_lro_pkt_size : + dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + + RTE_PKTMBUF_HEADROOM; + unsigned int max_lro_size = 0; + unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; + + if (non_scatter_min_mbuf_size > mb_len && !(offloads & + DEV_RX_OFFLOAD_SCATTER)) { + DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" + " configured and no enough mbuf space(%u) to contain " + "the maximum RX packet length(%u) with head-room(%u)", + dev->data->port_id, idx, mb_len, max_rx_pkt_len, + RTE_PKTMBUF_HEADROOM); + rte_errno = ENOSPC; + return NULL; + } + tmpl = rte_calloc_socket("RXQ", 1, + sizeof(*tmpl) + + desc_n * sizeof(struct rte_mbuf *), + 0, socket); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + tmpl->type = MLX5_RXQ_TYPE_STANDARD; + if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + tmpl->socket = socket; + if (dev->data->dev_conf.intr_conf.rxq) + tmpl->irq = 1; + mprq_stride_nums = config->mprq.stride_num_n ? + config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; + mprq_stride_size = non_scatter_min_mbuf_size <= + (1U << config->mprq.max_stride_size_n) ? + log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N; + mprq_stride_cap = (config->mprq.stride_num_n ? + (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) * + (config->mprq.stride_size_n ? + (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size)); + /* + * This Rx queue can be configured as a Multi-Packet RQ if all of the + * following conditions are met: + * - MPRQ is enabled. + * - The number of descs is more than the number of strides. + * - max_rx_pkt_len plus overhead is less than the max size + * of a stride or mprq_stride_size is specified by a user. + * Need to nake sure that there are enough stides to encap + * the maximum packet size in case mprq_stride_size is set. + * Otherwise, enable Rx scatter if necessary. + */ + if (mprq_en && desc > (1U << mprq_stride_nums) && + (non_scatter_min_mbuf_size <= + (1U << config->mprq.max_stride_size_n) || + (config->mprq.stride_size_n && + non_scatter_min_mbuf_size <= mprq_stride_cap))) { + /* TODO: Rx scatter isn't supported yet. */ + tmpl->rxq.sges_n = 0; + /* Trim the number of descs needed. */ + desc >>= mprq_stride_nums; + tmpl->rxq.strd_num_n = config->mprq.stride_num_n ? + config->mprq.stride_num_n : mprq_stride_nums; + tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ? + config->mprq.stride_size_n : mprq_stride_size; + tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; + tmpl->rxq.strd_scatter_en = + !!(offloads & DEV_RX_OFFLOAD_SCATTER); + tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, + config->mprq.max_memcpy_len); + max_lro_size = RTE_MIN(max_rx_pkt_len, + (1u << tmpl->rxq.strd_num_n) * + (1u << tmpl->rxq.strd_sz_n)); + DRV_LOG(DEBUG, + "port %u Rx queue %u: Multi-Packet RQ is enabled" + " strd_num_n = %u, strd_sz_n = %u", + dev->data->port_id, idx, + tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + } else if (max_rx_pkt_len <= first_mb_free_size) { + tmpl->rxq.sges_n = 0; + max_lro_size = max_rx_pkt_len; + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { + unsigned int size = non_scatter_min_mbuf_size; + unsigned int sges_n; + + if (lro_on_queue && first_mb_free_size < + MLX5_MAX_LRO_HEADER_FIX) { + DRV_LOG(ERR, "Not enough space in the first segment(%u)" + " to include the max header size(%u) for LRO", + first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX); + rte_errno = ENOTSUP; + goto error; + } + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + 1 << sges_n, max_rx_pkt_len, + 1u << MLX5_MAX_LOG_RQ_SEGS); + rte_errno = ENOTSUP; + goto error; + } + tmpl->rxq.sges_n = sges_n; + max_lro_size = max_rx_pkt_len; + } + if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + DRV_LOG(WARNING, + "port %u MPRQ is requested but cannot be enabled\n" + " (requested: pkt_sz = %u, desc_num = %u," + " rxq_num = %u, stride_sz = %u, stride_num = %u\n" + " supported: min_rxqs_num = %u," + " min_stride_sz = %u, max_stride_sz = %u).", + dev->data->port_id, non_scatter_min_mbuf_size, + desc, priv->rxqs_n, + config->mprq.stride_size_n ? + (1U << config->mprq.stride_size_n) : + (1U << mprq_stride_size), + config->mprq.stride_num_n ? + (1U << config->mprq.stride_num_n) : + (1U << mprq_stride_nums), + config->mprq.min_rxqs_num, + (1U << config->mprq.min_stride_size_n), + (1U << config->mprq.max_stride_size_n)); + DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", + dev->data->port_id, 1 << tmpl->rxq.sges_n); + if (desc % (1 << tmpl->rxq.sges_n)) { + DRV_LOG(ERR, + "port %u number of Rx queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + dev->data->port_id, + desc, + 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; + goto error; + } + mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size); + /* Toggle RX checksum offload if hardware supports it. */ + tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); + /* Configure VLAN stripping. */ + tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + /* By default, FCS (CRC) is stripped by hardware. */ + tmpl->rxq.crc_present = 0; + tmpl->rxq.lro = lro_on_queue; + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (config->hw_fcs_strip) { + /* + * RQs used for LRO-enabled TIRs should not be + * configured to scatter the FCS. + */ + if (lro_on_queue) + DRV_LOG(WARNING, + "port %u CRC stripping has been " + "disabled but will still be performed " + "by hardware, because LRO is enabled", + dev->data->port_id); + else + tmpl->rxq.crc_present = 1; + } else { + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); + } + } + DRV_LOG(DEBUG, + "port %u CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + dev->data->port_id, + tmpl->rxq.crc_present ? "disabled" : "enabled", + tmpl->rxq.crc_present << 2); + /* Save port ID. */ + tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && + (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); + tmpl->rxq.port_id = dev->data->port_id; + tmpl->priv = priv; + tmpl->rxq.mp = mp; + tmpl->rxq.elts_n = log2above(desc); + tmpl->rxq.rq_repl_thresh = + MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); + tmpl->rxq.elts = + (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); +#ifndef RTE_ARCH_64 + tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; +#endif + tmpl->rxq.idx = idx; + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; +error: + rte_free(tmpl); + return NULL; +} + +/** + * Create a DPDK Rx hairpin queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param hairpin_conf + * The hairpin binding configuration. + * + * @return + * A DPDK queue object on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_ctrl * +mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *tmpl; + + tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + tmpl->type = MLX5_RXQ_TYPE_HAIRPIN; + tmpl->socket = SOCKET_ID_ANY; + tmpl->rxq.rss_hash = 0; + tmpl->rxq.port_id = dev->data->port_id; + tmpl->priv = priv; + tmpl->rxq.mp = NULL; + tmpl->rxq.elts_n = log2above(desc); + tmpl->rxq.elts = NULL; + tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; + tmpl->hairpin_conf = *hairpin_conf; + tmpl->rxq.idx = idx; + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; +} + +/** + * Get a Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * + * @return + * A pointer to the queue if it exists, NULL otherwise. + */ +struct mlx5_rxq_ctrl * +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + + if ((*priv->rxqs)[idx]) { + rxq_ctrl = container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, + rxq); + mlx5_rxq_obj_get(dev, idx); + rte_atomic32_inc(&rxq_ctrl->refcnt); + } + return rxq_ctrl; +} + +/** + * Release a Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * RX queue index. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + + if (!(*priv->rxqs)[idx]) + return 0; + rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); + MLX5_ASSERT(rxq_ctrl->priv); + if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj)) + rxq_ctrl->obj = NULL; + if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { + if (rxq_ctrl->dbr_umem_id_valid) + claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id, + rxq_ctrl->dbr_offset)); + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) + mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); + LIST_REMOVE(rxq_ctrl, next); + rte_free(rxq_ctrl); + (*priv->rxqs)[idx] = NULL; + return 0; + } + return 1; +} + +/** + * Verify the Rx Queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_rxq_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl; + int ret = 0; + + LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { + DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", + dev->data->port_id, rxq_ctrl->rxq.idx); + ++ret; + } + return ret; +} + +/** + * Get a Rx queue type. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Rx queue index. + * + * @return + * The Rx queue type. + */ +enum mlx5_rxq_type +mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; + + if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) { + rxq_ctrl = container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, + rxq); + return rxq_ctrl->type; + } + return MLX5_RXQ_TYPE_UNDEFINED; +} + +/** + * Create an indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n, enum mlx5_ind_tbl_type type) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl; + unsigned int i = 0, j = 0, k = 0; + + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + + queues_n * sizeof(uint16_t), 0); + if (!ind_tbl) { + rte_errno = ENOMEM; + return NULL; + } + ind_tbl->type = type; + if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { + const unsigned int wq_n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + struct ibv_wq *wq[1 << wq_n]; + + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, + queues[i]); + if (!rxq) + goto error; + wq[i] = rxq->obj->wq; + ind_tbl->queues[i] = queues[i]; + } + ind_tbl->queues_n = queues_n; + /* Finalise indirection table. */ + k = i; /* Retain value of i for use in error case. */ + for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j) + wq[k] = wq[j]; + ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table + (priv->sh->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = wq_n, + .ind_tbl = wq, + .comp_mask = 0, + }); + if (!ind_tbl->ind_table) { + rte_errno = errno; + goto error; + } + } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ + struct mlx5_devx_rqt_attr *rqt_attr = NULL; + const unsigned int rqt_n = + 1 << (rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size)); + + rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) + + rqt_n * sizeof(uint32_t), 0); + if (!rqt_attr) { + DRV_LOG(ERR, "port %u cannot allocate RQT resources", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rqt_attr->rqt_max_size = priv->config.ind_table_max_size; + rqt_attr->rqt_actual_size = rqt_n; + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, + queues[i]); + if (!rxq) + goto error; + rqt_attr->rq_list[i] = rxq->obj->rq->id; + ind_tbl->queues[i] = queues[i]; + } + k = i; /* Retain value of i for use in error case. */ + for (j = 0; k != rqt_n; ++k, ++j) + rqt_attr->rq_list[k] = rqt_attr->rq_list[j]; + ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, + rqt_attr); + rte_free(rqt_attr); + if (!ind_tbl->rqt) { + DRV_LOG(ERR, "port %u cannot create DevX RQT", + dev->data->port_id); + rte_errno = errno; + goto error; + } + ind_tbl->queues_n = queues_n; + } + rte_atomic32_inc(&ind_tbl->refcnt); + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + return ind_tbl; +error: + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_free(ind_tbl); + DEBUG("port %u cannot create indirection table", dev->data->port_id); + return NULL; +} + +/** + * Get an indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * An indirection table if found. + */ +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + if ((ind_tbl->queues_n == queues_n) && + (memcmp(ind_tbl->queues, queues, + ind_tbl->queues_n * sizeof(ind_tbl->queues[0])) + == 0)) + break; + } + if (ind_tbl) { + unsigned int i; + + rte_atomic32_inc(&ind_tbl->refcnt); + for (i = 0; i != ind_tbl->queues_n; ++i) + mlx5_rxq_get(dev, ind_tbl->queues[i]); + } + return ind_tbl; +} + +/** + * Release an indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param ind_table + * Indirection table to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +mlx5_ind_table_obj_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) +{ + unsigned int i; + + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) { + if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) + claim_zero(mlx5_glue->destroy_rwq_ind_table + (ind_tbl->ind_table)); + else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX) + claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); + } + for (i = 0; i != ind_tbl->queues_n; ++i) + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); + if (!rte_atomic32_read(&ind_tbl->refcnt)) { + LIST_REMOVE(ind_tbl, next); + rte_free(ind_tbl); + return 0; + } + return 1; +} + +/** + * Verify the Rx Queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl; + int ret = 0; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + DRV_LOG(DEBUG, + "port %u indirection table obj %p still referenced", + dev->data->port_id, (void *)ind_tbl); + ++ret; + } + return ret; +} + +/** + * Create an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_key + * RSS key for the Rx hash queue. + * @param rss_key_len + * RSS key length. + * @param hash_fields + * Verbs protocol hash field to make the RSS on. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * @param tunnel + * Tunnel type. + * + * @return + * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. + */ +uint32_t +mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx = 0; + struct ibv_qp *qp = NULL; + struct mlx5_ind_table_obj *ind_tbl; + int err; + struct mlx5_devx_obj *tir = NULL; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + + queues_n = hash_fields ? queues_n : 1; + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) { + enum mlx5_ind_tbl_type type; + + type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ? + MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX; + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type); + } + if (!ind_tbl) { + rte_errno = ENOMEM; + return 0; + } + if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + struct mlx5dv_qp_init_attr qp_init_attr; + + memset(&qp_init_attr, 0, sizeof(qp_init_attr)); + if (tunnel) { + qp_init_attr.comp_mask = + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags = + MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; + } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (dev->data->dev_conf.lpbk_mode) { + /* + * Allow packet sent from NIC loop back + * w/o source MAC check. + */ + qp_init_attr.comp_mask |= + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags |= + MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; + } +#endif + qp = mlx5_glue->dv_create_qp + (priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = + (void *)(uintptr_t)rss_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd, + }, + &qp_init_attr); +#else + qp = mlx5_glue->create_qp_ex + (priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = + (void *)(uintptr_t)rss_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd, + }); +#endif + if (!qp) { + rte_errno = errno; + goto error; + } + } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ + struct mlx5_devx_tir_attr tir_attr; + uint32_t i; + uint32_t lro = 1; + + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < queues_n; ++i) { + if (!(*priv->rxqs)[queues[i]]->lro) { + lro = 0; + break; + } + } + memset(&tir_attr, 0, sizeof(tir_attr)); + tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; + tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; + tir_attr.tunneled_offload_en = !!tunnel; + /* If needed, translate hash_fields bitmap to PRM format. */ + if (hash_fields) { +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + struct mlx5_rx_hash_field_select *rx_hash_field_select = + hash_fields & IBV_RX_HASH_INNER ? + &tir_attr.rx_hash_field_selector_inner : + &tir_attr.rx_hash_field_selector_outer; +#else + struct mlx5_rx_hash_field_select *rx_hash_field_select = + &tir_attr.rx_hash_field_selector_outer; +#endif + + /* 1 bit: 0: IPv4, 1: IPv6. */ + rx_hash_field_select->l3_prot_type = + !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); + /* 1 bit: 0: TCP, 1: UDP. */ + rx_hash_field_select->l4_prot_type = + !!(hash_fields & MLX5_UDP_IBV_RX_HASH); + /* Bitmask which sets which fields to use in RX Hash. */ + rx_hash_field_select->selected_fields = + ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | + (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | + (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | + (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; + } + if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) + tir_attr.transport_domain = priv->sh->td->id; + else + tir_attr.transport_domain = priv->sh->tdn; + memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, + MLX5_RSS_HASH_KEY_LEN); + tir_attr.indirect_table = ind_tbl->rqt->id; + if (dev->data->dev_conf.lpbk_mode) + tir_attr.self_lb_block = + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; + if (lro) { + tir_attr.lro_timeout_period_usecs = + priv->config.lro.timeout; + tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; + tir_attr.lro_enable_mask = + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; + } + tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); + if (!tir) { + DRV_LOG(ERR, "port %u cannot create DevX TIR", + dev->data->port_id); + rte_errno = errno; + goto error; + } + } + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); + if (!hrxq) + goto error; + hrxq->ind_table = ind_tbl; + if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { + hrxq->qp = qp; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = + mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ + hrxq->tir = tir; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir + (hrxq->tir->obj); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + } + hrxq->rss_key_len = rss_key_len; + hrxq->hash_fields = hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + rte_atomic32_inc(&hrxq->refcnt); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, + hrxq, next); + return hrxq_idx; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_obj_release(dev, ind_tbl); + if (qp) + claim_zero(mlx5_glue->destroy_qp(qp)); + else if (tir) + claim_zero(mlx5_devx_cmd_destroy(tir)); + rte_errno = err; /* Restore rte_errno. */ + return 0; +} + +/** + * Get an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_conf + * RSS configuration for the Rx hash queue. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * + * @return + * An hash Rx queue index on success. + */ +uint32_t +mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + uint32_t idx; + + queues_n = hash_fields ? queues_n : 1; + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, + hrxq, next) { + struct mlx5_ind_table_obj *ind_tbl; + + if (hrxq->rss_key_len != rss_key_len) + continue; + if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) + continue; + if (hrxq->hash_fields != hash_fields) + continue; + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + continue; + if (ind_tbl != hrxq->ind_table) { + mlx5_ind_table_obj_release(dev, ind_tbl); + continue; + } + rte_atomic32_inc(&hrxq->refcnt); + return idx; + } + return 0; +} + +/** + * Release the hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param hrxq + * Index to Hash Rx queue to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int +mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + if (!hrxq) + return 0; + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_glue->destroy_flow_action(hrxq->action); +#endif + if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV) + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */ + claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); + mlx5_ind_table_obj_release(dev, hrxq->ind_table); + ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, + hrxq_idx, hrxq, next); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + return 0; + } + claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); + return 1; +} + +/** + * Verify the Rx Queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_hrxq_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + uint32_t idx; + int ret = 0; + + ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, + hrxq, next) { + DRV_LOG(DEBUG, + "port %u hash Rx queue %p still referenced", + dev->data->port_id, (void *)hrxq); + ++ret; + } + return ret; +} + +/** + * Create a drop Rx queue Verbs/DevX object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_rxq_obj * +mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_cq *cq; + struct ibv_wq *wq = NULL; + struct mlx5_rxq_obj *rxq; + + if (priv->drop_queue.rxq) + return priv->drop_queue.rxq; + cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); + if (!cq) { + DEBUG("port %u cannot allocate CQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + wq = mlx5_glue->create_wq(ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = priv->sh->pd, + .cq = cq, + }); + if (!wq) { + DEBUG("port %u cannot allocate WQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0); + if (!rxq) { + DEBUG("port %u cannot allocate drop Rx queue memory", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq->cq = cq; + rxq->wq = wq; + priv->drop_queue.rxq = rxq; + return rxq; +error: + if (wq) + claim_zero(mlx5_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx5_glue->destroy_cq(cq)); + return NULL; +} + +/** + * Release a drop Rx queue Verbs/DevX object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +static void +mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; + + if (rxq->wq) + claim_zero(mlx5_glue->destroy_wq(rxq->wq)); + if (rxq->cq) + claim_zero(mlx5_glue->destroy_cq(rxq->cq)); + rte_free(rxq); + priv->drop_queue.rxq = NULL; +} + +/** + * Create a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_rxq_obj *rxq; + struct mlx5_ind_table_obj tmpl; + + rxq = mlx5_rxq_obj_drop_new(dev); + if (!rxq) + return NULL; + tmpl.ind_table = mlx5_glue->create_rwq_ind_table + (priv->sh->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &rxq->wq, + .comp_mask = 0, + }); + if (!tmpl.ind_table) { + DEBUG("port %u cannot allocate indirection table for drop" + " queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0); + if (!ind_tbl) { + rte_errno = ENOMEM; + goto error; + } + ind_tbl->ind_table = tmpl.ind_table; + return ind_tbl; +error: + mlx5_rxq_obj_drop_release(dev); + return NULL; +} + +/** + * Release a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table; + + claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); + mlx5_rxq_obj_drop_release(dev); + rte_free(ind_tbl); + priv->drop_queue.hrxq->ind_table = NULL; +} + +/** + * Create a drop Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_hrxq * +mlx5_hrxq_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_obj *ind_tbl = NULL; + struct ibv_qp *qp = NULL; + struct mlx5_hrxq *hrxq = NULL; + + if (priv->drop_queue.hrxq) { + rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + return priv->drop_queue.hrxq; + } + hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0); + if (!hrxq) { + DRV_LOG(WARNING, + "port %u cannot allocate memory for drop queue", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + priv->drop_queue.hrxq = hrxq; + ind_tbl = mlx5_ind_table_obj_drop_new(dev); + if (!ind_tbl) + goto error; + hrxq->ind_table = ind_tbl; + qp = mlx5_glue->create_qp_ex(priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd + }); + if (!qp) { + DEBUG("port %u cannot allocate QP for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + hrxq->qp = qp; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + rte_atomic32_set(&hrxq->refcnt, 1); + return hrxq; +error: +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (hrxq && hrxq->action) + mlx5_glue->destroy_flow_action(hrxq->action); +#endif + if (qp) + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + if (ind_tbl) + mlx5_ind_table_obj_drop_release(dev); + if (hrxq) { + priv->drop_queue.hrxq = NULL; + rte_free(hrxq); + } + return NULL; +} + +/** + * Release a drop hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_hrxq_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_glue->destroy_flow_action(hrxq->action); +#endif + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_ind_table_obj_drop_release(dev); + rte_free(hrxq); + priv->drop_queue.hrxq = NULL; + } +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c new file mode 100644 index 000000000..6a17a9a5d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c @@ -0,0 +1,5691 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015-2019 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_mr.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" + +/* TX burst subroutines return codes. */ +enum mlx5_txcmp_code { + MLX5_TXCMP_CODE_EXIT = 0, + MLX5_TXCMP_CODE_ERROR, + MLX5_TXCMP_CODE_SINGLE, + MLX5_TXCMP_CODE_MULTI, + MLX5_TXCMP_CODE_TSO, + MLX5_TXCMP_CODE_EMPW, +}; + +/* + * These defines are used to configure Tx burst routine option set + * supported at compile time. The not specified options are optimized out + * out due to if conditions can be explicitly calculated at compile time. + * The offloads with bigger runtime check (require more CPU cycles to + * skip) overhead should have the bigger index - this is needed to + * select the better matching routine function if no exact match and + * some offloads are not actually requested. + */ +#define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/ +#define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/ +#define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/ +#define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */ +#define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */ +#define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/ +#define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */ +#define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/ +#define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/ + +/* The most common offloads groups. */ +#define MLX5_TXOFF_CONFIG_NONE 0 +#define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \ + MLX5_TXOFF_CONFIG_TSO | \ + MLX5_TXOFF_CONFIG_SWP | \ + MLX5_TXOFF_CONFIG_CSUM | \ + MLX5_TXOFF_CONFIG_INLINE | \ + MLX5_TXOFF_CONFIG_VLAN | \ + MLX5_TXOFF_CONFIG_METADATA) + +#define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask) + +#define MLX5_TXOFF_DECL(func, olx) \ +static uint16_t mlx5_tx_burst_##func(void *txq, \ + struct rte_mbuf **pkts, \ + uint16_t pkts_n) \ +{ \ + return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \ + pkts, pkts_n, (olx)); \ +} + +#define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx}, + +static __rte_always_inline uint32_t +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); + +static __rte_always_inline int +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); + +static __rte_always_inline uint32_t +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); + +static __rte_always_inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res); + +static __rte_always_inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, + const unsigned int strd_n); + +static int +mlx5_queue_state_modify(struct rte_eth_dev *dev, + struct mlx5_mp_arg_queue_state_modify *sm); + +static inline void +mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, + volatile struct mlx5_cqe *restrict cqe, + uint32_t phcsum); + +static inline void +mlx5_lro_update_hdr(uint8_t *restrict padd, + volatile struct mlx5_cqe *restrict cqe, + uint32_t len); + +uint32_t mlx5_ptype_table[] __rte_cache_aligned = { + [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ +}; + +uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; +uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; + +uint64_t rte_net_mlx5_dynf_inline_mask; +#define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask + +/** + * Build a table to translate Rx completion flags to packet type. + * + * @note: fix mlx5_dev_supported_ptypes_get() if any change here. + */ +void +mlx5_set_ptype_table(void) +{ + unsigned int i; + uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table; + + /* Last entry must not be overwritten, reserved for errored packet. */ + for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i) + (*p)[i] = RTE_PTYPE_UNKNOWN; + /* + * The index to the array should have: + * bit[1:0] = l3_hdr_type + * bit[4:2] = l4_hdr_type + * bit[5] = ip_frag + * bit[6] = tunneled + * bit[7] = outer_l3_type + */ + /* L2 */ + (*p)[0x00] = RTE_PTYPE_L2_ETHER; + /* L3 */ + (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + /* Fragmented */ + (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + /* TCP */ + (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + /* UDP */ + (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + /* Repeat with outer_l3_type being set. Just in case. */ + (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG; + (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG; + (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP; + /* Tunneled - L3 */ + (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG; + /* Tunneled - Fragmented */ + (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG; + /* Tunneled - TCP */ + (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + /* Tunneled - UDP */ + (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; + (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP; +} + +/** + * Build a table to translate packet to checksum type of Verbs. + */ +void +mlx5_set_cksum_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { + v = 0; + if (i & (1 << 9)) { + /* Tunneled packet. */ + if (i & (1 << 8)) /* Outer IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (1 << 4)) /* Inner IP. */ + v |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + /* No tunnel. */ + if (i & (1 << 4)) /* IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_CSUM; + } + mlx5_cksum_table[i] = v; + } +} + +/** + * Build a table to translate packet type of mbuf to SWP type of Verbs. + */ +void +mlx5_set_swp_types_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { + v = 0; + if (i & (1 << 8)) + v |= MLX5_ETH_WQE_L3_OUTER_IPV6; + if (i & (1 << 9)) + v |= MLX5_ETH_WQE_L4_OUTER_UDP; + if (i & (1 << 4)) + v |= MLX5_ETH_WQE_L3_INNER_IPV6; + if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) + v |= MLX5_ETH_WQE_L4_INNER_UDP; + mlx5_swp_types_table[i] = v; + } +} + +/** + * Set Software Parser flags and offsets in Ethernet Segment of WQE. + * Flags must be preliminary initialized to zero. + * + * @param loc + * Pointer to burst routine local context. + * @param swp_flags + * Pointer to store Software Parser flags + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Software Parser offsets packed in dword. + * Software Parser flags are set by pointer. + */ +static __rte_always_inline uint32_t +txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc, + uint8_t *swp_flags, + unsigned int olx) +{ + uint64_t ol, tunnel; + unsigned int idx, off; + uint32_t set; + + if (!MLX5_TXOFF_CONFIG(SWP)) + return 0; + ol = loc->mbuf->ol_flags; + tunnel = ol & PKT_TX_TUNNEL_MASK; + /* + * Check whether Software Parser is required. + * Only customized tunnels may ask for. + */ + if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)) + return 0; + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52; + idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0; + *swp_flags = mlx5_swp_types_table[idx]; + /* + * Set offsets for SW parser. Since ConnectX-5, SW parser just + * complements HW parser. SW parser starts to engage only if HW parser + * can't reach a header. For the older devices, HW parser will not kick + * in if any of SWP offsets is set. Therefore, all of the L3 offsets + * should be set regardless of HW offload. + */ + off = loc->mbuf->outer_l2_len; + if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT) + off += sizeof(struct rte_vlan_hdr); + set = (off >> 1) << 8; /* Outer L3 offset. */ + off += loc->mbuf->outer_l3_len; + if (tunnel == PKT_TX_TUNNEL_UDP) + set |= off >> 1; /* Outer L4 offset. */ + if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */ + const uint64_t csum = ol & PKT_TX_L4_MASK; + off += loc->mbuf->l2_len; + set |= (off >> 1) << 24; /* Inner L3 offset. */ + if (csum == PKT_TX_TCP_CKSUM || + csum == PKT_TX_UDP_CKSUM || + (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) { + off += loc->mbuf->l3_len; + set |= (off >> 1) << 16; /* Inner L4 offset. */ + } + } + set = rte_cpu_to_le_32(set); + return set; +} + +/** + * Convert the Checksum offloads to Verbs. + * + * @param buf + * Pointer to the mbuf. + * + * @return + * Converted checksum flags. + */ +static __rte_always_inline uint8_t +txq_ol_cksum_to_cs(struct rte_mbuf *buf) +{ + uint32_t idx; + uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); + const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); + return mlx5_cksum_table[idx]; +} + +/** + * Internal function to compute the number of used descriptors in an RX queue + * + * @param rxq + * The Rx queue. + * + * @return + * The number of used rx descriptor. + */ +static uint32_t +rx_queue_count(struct mlx5_rxq_data *rxq) +{ + struct rxq_zip *zip = &rxq->zip; + volatile struct mlx5_cqe *cqe; + const unsigned int cqe_n = (1 << rxq->cqe_n); + const unsigned int cqe_cnt = cqe_n - 1; + unsigned int cq_ci; + unsigned int used; + + /* if we are processing a compressed cqe */ + if (zip->ai) { + used = zip->cqe_cnt - zip->ca; + cq_ci = zip->cq_ci; + } else { + used = 0; + cq_ci = rxq->cq_ci; + } + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { + int8_t op_own; + unsigned int n; + + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) + n = rte_be_to_cpu_32(cqe->byte_cnt); + else + n = 1; + cq_ci += n; + used += n; + cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; + } + used = RTE_MIN(used, (1U << rxq->elts_n) - 1); + return used; +} + +/** + * DPDK callback to check the status of a rx descriptor. + * + * @param rx_queue + * The Rx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. + */ +int +mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct mlx5_rxq_data *rxq = rx_queue; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); + + if (dev->rx_pkt_burst != mlx5_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } + if (offset >= (1 << rxq->elts_n)) { + rte_errno = EINVAL; + return -rte_errno; + } + if (offset < rx_queue_count(rxq)) + return RTE_ETH_RX_DESC_DONE; + return RTE_ETH_RX_DESC_AVAIL; +} + +/** + * DPDK callback to get the RX queue information + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * Rx queue identificator. + * + * @param qinfo + * Pointer to the RX queue information structure. + * + * @return + * None. + */ + +void +mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + if (!rxq) + return; + qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq->mprq_mp : rxq->mp; + qinfo->conf.rx_thresh.pthresh = 0; + qinfo->conf.rx_thresh.hthresh = 0; + qinfo->conf.rx_thresh.wthresh = 0; + qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh; + qinfo->conf.rx_drop_en = 1; + qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1; + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = 1 << rxq->elts_n; +} + +/** + * DPDK callback to get the RX packet burst mode information + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * Rx queue identificatior. + * + * @param mode + * Pointer to the burts mode information. + * + * @return + * 0 as success, -EINVAL as failure. + */ + +int +mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, + uint16_t rx_queue_id __rte_unused, + struct rte_eth_burst_mode *mode) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + + if (pkt_burst == mlx5_rx_burst) { + snprintf(mode->info, sizeof(mode->info), "%s", "Scalar"); + } else if (pkt_burst == mlx5_rx_burst_mprq) { + snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ"); + } else if (pkt_burst == mlx5_rx_burst_vec) { +#if defined RTE_ARCH_X86_64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE"); +#elif defined RTE_ARCH_ARM64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon"); +#elif defined RTE_ARCH_PPC_64 + snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec"); +#else + return -EINVAL; +#endif + } else { + return -EINVAL; + } + return 0; +} + +/** + * DPDK callback to get the number of used descriptors in a RX queue + * + * @param dev + * Pointer to the device structure. + * + * @param rx_queue_id + * The Rx queue. + * + * @return + * The number of used rx descriptor. + * -EINVAL if the queue is invalid + */ +uint32_t +mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq; + + if (dev->rx_pkt_burst != mlx5_rx_burst) { + rte_errno = ENOTSUP; + return -rte_errno; + } + rxq = (*priv->rxqs)[rx_queue_id]; + if (!rxq) { + rte_errno = EINVAL; + return -rte_errno; + } + return rx_queue_count(rxq); +} + +#define MLX5_SYSTEM_LOG_DIR "/var/log" +/** + * Dump debug information to log file. + * + * @param fname + * The file name. + * @param hex_title + * If not NULL this string is printed as a header to the output + * and the output will be in hexadecimal view. + * @param buf + * This is the buffer address to print out. + * @param len + * The number of bytes to dump out. + */ +void +mlx5_dump_debug_information(const char *fname, const char *hex_title, + const void *buf, unsigned int hex_len) +{ + FILE *fd; + + MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname); + fd = fopen(path, "a+"); + if (!fd) { + DRV_LOG(WARNING, "cannot open %s for debug dump", path); + MKSTR(path2, "./%s", fname); + fd = fopen(path2, "a+"); + if (!fd) { + DRV_LOG(ERR, "cannot open %s for debug dump", path2); + return; + } + DRV_LOG(INFO, "New debug dump in file %s", path2); + } else { + DRV_LOG(INFO, "New debug dump in file %s", path); + } + if (hex_title) + rte_hexdump(fd, hex_title, buf, hex_len); + else + fprintf(fd, "%s", (const char *)buf); + fprintf(fd, "\n\n\n"); + fclose(fd); +} + +/** + * Move QP from error state to running state and initialize indexes. + * + * @param txq_ctrl + * Pointer to TX queue control structure. + * + * @return + * 0 on success, else -1. + */ +static int +tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5_mp_arg_queue_state_modify sm = { + .is_wq = 0, + .queue_id = txq_ctrl->txq.idx, + }; + + if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm)) + return -1; + txq_ctrl->txq.wqe_ci = 0; + txq_ctrl->txq.wqe_pi = 0; + txq_ctrl->txq.elts_comp = 0; + return 0; +} + +/* Return 1 if the error CQE is signed otherwise, sign it and return 0. */ +static int +check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) +{ + static const uint8_t magic[] = "seen"; + int ret = 1; + unsigned int i; + + for (i = 0; i < sizeof(magic); ++i) + if (!ret || err_cqe->rsvd1[i] != magic[i]) { + ret = 0; + err_cqe->rsvd1[i] = magic[i]; + } + return ret; +} + +/** + * Handle error CQE. + * + * @param txq + * Pointer to TX queue structure. + * @param error_cqe + * Pointer to the error CQE. + * + * @return + * Negative value if queue recovery failed, otherwise + * the error completion entry is handled successfully. + */ +static int +mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, + volatile struct mlx5_err_cqe *err_cqe) +{ + if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) { + const uint16_t wqe_m = ((1 << txq->wqe_n) - 1); + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter); + int seen = check_err_cqe_seen(err_cqe); + + if (!seen && txq_ctrl->dump_file_n < + txq_ctrl->priv->config.max_dump_files_num) { + MKSTR(err_str, "Unexpected CQE error syndrome " + "0x%02x CQN = %u SQN = %u wqe_counter = %u " + "wq_ci = %u cq_ci = %u", err_cqe->syndrome, + txq->cqe_s, txq->qp_num_8s >> 8, + rte_be_to_cpu_16(err_cqe->wqe_counter), + txq->wqe_ci, txq->cq_ci); + MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u", + PORT_ID(txq_ctrl->priv), txq->idx, + txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc()); + mlx5_dump_debug_information(name, NULL, err_str, 0); + mlx5_dump_debug_information(name, "MLX5 Error CQ:", + (const void *)((uintptr_t) + txq->cqes), + sizeof(*err_cqe) * + (1 << txq->cqe_n)); + mlx5_dump_debug_information(name, "MLX5 Error SQ:", + (const void *)((uintptr_t) + txq->wqes), + MLX5_WQE_SIZE * + (1 << txq->wqe_n)); + txq_ctrl->dump_file_n++; + } + if (!seen) + /* + * Count errors in WQEs units. + * Later it can be improved to count error packets, + * for example, by SQ parsing to find how much packets + * should be counted for each WQE. + */ + txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - + new_wqe_pi) & wqe_m; + if (tx_recover_qp(txq_ctrl)) { + /* Recovering failed - retry later on the same WQE. */ + return -1; + } + /* Release all the remaining buffers. */ + txq_free_elts(txq_ctrl); + } + return 0; +} + +/** + * Translate RX completion flags to packet type. + * + * @param[in] rxq + * Pointer to RX queue structure. + * @param[in] cqe + * Pointer to CQE. + * + * @note: fix mlx5_dev_supported_ptypes_get() if any change here. + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) +{ + uint8_t idx; + uint8_t pinfo = cqe->pkt_info; + uint16_t ptype = cqe->hdr_type_etc; + + /* + * The index to the array should have: + * bit[1:0] = l3_hdr_type + * bit[4:2] = l4_hdr_type + * bit[5] = ip_frag + * bit[6] = tunneled + * bit[7] = outer_l3_type + */ + idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); + return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); +} + +/** + * Initialize Rx WQ and indexes. + * + * @param[in] rxq + * Pointer to RX queue structure. + */ +void +mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) +{ + const unsigned int wqe_n = 1 << rxq->elts_n; + unsigned int i; + + for (i = 0; (i != wqe_n); ++i) { + volatile struct mlx5_wqe_data_seg *scat; + uintptr_t addr; + uint32_t byte_count; + + if (mlx5_rxq_mprq_enabled(rxq)) { + struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; + + scat = &((volatile struct mlx5_wqe_mprq *) + rxq->wqes)[i].dseg; + addr = (uintptr_t)mlx5_mprq_buf_addr(buf, + 1 << rxq->strd_num_n); + byte_count = (1 << rxq->strd_sz_n) * + (1 << rxq->strd_num_n); + } else { + struct rte_mbuf *buf = (*rxq->elts)[i]; + + scat = &((volatile struct mlx5_wqe_data_seg *) + rxq->wqes)[i]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + byte_count = DATA_LEN(buf); + } + /* scat->addr must be able to store a pointer. */ + MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t)); + *scat = (struct mlx5_wqe_data_seg){ + .addr = rte_cpu_to_be_64(addr), + .byte_count = rte_cpu_to_be_32(byte_count), + .lkey = mlx5_rx_addr2mr(rxq, addr), + }; + } + rxq->consumed_strd = 0; + rxq->decompressed = 0; + rxq->rq_pi = 0; + rxq->zip = (struct rxq_zip){ + .ai = 0, + }; + /* Update doorbell counter. */ + rxq->rq_ci = wqe_n >> rxq->sges_n; + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); +} + +/** + * Modify a Verbs/DevX queue state. + * This must be called from the primary process. + * + * @param dev + * Pointer to Ethernet device. + * @param sm + * State modify request parameters. + * + * @return + * 0 in case of success else non-zero value and rte_errno is set. + */ +int +mlx5_queue_state_modify_primary(struct rte_eth_dev *dev, + const struct mlx5_mp_arg_queue_state_modify *sm) +{ + int ret; + struct mlx5_priv *priv = dev->data->dev_private; + + if (sm->is_wq) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) { + struct ibv_wq_attr mod = { + .attr_mask = IBV_WQ_ATTR_STATE, + .wq_state = sm->state, + }; + + ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod); + } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */ + struct mlx5_devx_modify_rq_attr rq_attr; + + memset(&rq_attr, 0, sizeof(rq_attr)); + if (sm->state == IBV_WQS_RESET) { + rq_attr.rq_state = MLX5_RQC_STATE_ERR; + rq_attr.state = MLX5_RQC_STATE_RST; + } else if (sm->state == IBV_WQS_RDY) { + rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.state = MLX5_RQC_STATE_RDY; + } else if (sm->state == IBV_WQS_ERR) { + rq_attr.rq_state = MLX5_RQC_STATE_RDY; + rq_attr.state = MLX5_RQC_STATE_ERR; + } + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, + &rq_attr); + } + if (ret) { + DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s", + sm->state, strerror(errno)); + rte_errno = errno; + return ret; + } + } else { + struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct ibv_qp_attr mod = { + .qp_state = IBV_QPS_RESET, + .port_num = (uint8_t)priv->ibv_port, + }; + struct ibv_qp *qp = txq_ctrl->obj->qp; + + ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change the Tx QP state to RESET " + "%s", strerror(errno)); + rte_errno = errno; + return ret; + } + mod.qp_state = IBV_QPS_INIT; + ret = mlx5_glue->modify_qp(qp, &mod, + (IBV_QP_STATE | IBV_QP_PORT)); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + mod.qp_state = IBV_QPS_RTR; + ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + mod.qp_state = IBV_QPS_RTS; + ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + } + return 0; +} + +/** + * Modify a Verbs queue state. + * + * @param dev + * Pointer to Ethernet device. + * @param sm + * State modify request parameters. + * + * @return + * 0 in case of success else non-zero value. + */ +static int +mlx5_queue_state_modify(struct rte_eth_dev *dev, + struct mlx5_mp_arg_queue_state_modify *sm) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + + switch (rte_eal_process_type()) { + case RTE_PROC_PRIMARY: + ret = mlx5_queue_state_modify_primary(dev, sm); + break; + case RTE_PROC_SECONDARY: + ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm); + break; + default: + break; + } + return ret; +} + +/** + * Handle a Rx error. + * The function inserts the RQ state to reset when the first error CQE is + * shown, then drains the CQ by the caller function loop. When the CQ is empty, + * it moves the RQ state to ready and initializes the RQ. + * Next CQE identification and error counting are in the caller responsibility. + * + * @param[in] rxq + * Pointer to RX queue structure. + * @param[in] vec + * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. + * 0 when called from non-vectorized Rx burst. + * + * @return + * -1 in case of recovery error, otherwise the CQE status. + */ +int +mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +{ + const uint16_t cqe_n = 1 << rxq->cqe_n; + const uint16_t cqe_mask = cqe_n - 1; + const unsigned int wqe_n = 1 << rxq->elts_n; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + union { + volatile struct mlx5_cqe *cqe; + volatile struct mlx5_err_cqe *err_cqe; + } u = { + .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], + }; + struct mlx5_mp_arg_queue_state_modify sm; + int ret; + + switch (rxq->err_state) { + case MLX5_RXQ_ERR_STATE_NO_ERROR: + rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; + /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NEED_RESET: + sm.is_wq = 1; + sm.queue_id = rxq->idx; + sm.state = IBV_WQS_RESET; + if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm)) + return -1; + if (rxq_ctrl->dump_file_n < + rxq_ctrl->priv->config.max_dump_files_num) { + MKSTR(err_str, "Unexpected CQE error syndrome " + "0x%02x CQN = %u RQN = %u wqe_counter = %u" + " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, + rxq->cqn, rxq_ctrl->wqn, + rte_be_to_cpu_16(u.err_cqe->wqe_counter), + rxq->rq_ci << rxq->sges_n, rxq->cq_ci); + MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u", + rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc()); + mlx5_dump_debug_information(name, NULL, err_str, 0); + mlx5_dump_debug_information(name, "MLX5 Error CQ:", + (const void *)((uintptr_t) + rxq->cqes), + sizeof(*u.cqe) * cqe_n); + mlx5_dump_debug_information(name, "MLX5 Error RQ:", + (const void *)((uintptr_t) + rxq->wqes), + 16 * wqe_n); + rxq_ctrl->dump_file_n++; + } + rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY; + /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NEED_READY: + ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci); + if (ret == MLX5_CQE_STATUS_HW_OWN) { + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + rte_cio_wmb(); + /* + * The RQ consumer index must be zeroed while moving + * from RESET state to RDY state. + */ + *rxq->rq_db = rte_cpu_to_be_32(0); + rte_cio_wmb(); + sm.is_wq = 1; + sm.queue_id = rxq->idx; + sm.state = IBV_WQS_RDY; + if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), + &sm)) + return -1; + if (vec) { + const uint16_t q_mask = wqe_n - 1; + uint16_t elt_idx; + struct rte_mbuf **elt; + int i; + unsigned int n = wqe_n - (rxq->rq_ci - + rxq->rq_pi); + + for (i = 0; i < (int)n; ++i) { + elt_idx = (rxq->rq_ci + i) & q_mask; + elt = &(*rxq->elts)[elt_idx]; + *elt = rte_mbuf_raw_alloc(rxq->mp); + if (!*elt) { + for (i--; i >= 0; --i) { + elt_idx = (rxq->rq_ci + + i) & q_mask; + elt = &(*rxq->elts) + [elt_idx]; + rte_pktmbuf_free_seg + (*elt); + } + return -1; + } + } + for (i = 0; i < (int)wqe_n; ++i) { + elt = &(*rxq->elts)[i]; + DATA_LEN(*elt) = + (uint16_t)((*elt)->buf_len - + rte_pktmbuf_headroom(*elt)); + } + /* Padding with a fake mbuf for vec Rx. */ + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + (*rxq->elts)[wqe_n + i] = + &rxq->fake_mbuf; + } + mlx5_rxq_initialize(rxq); + rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; + } + return ret; + default: + return -1; + } +} + +/** + * Get size of the next packet for a given CQE. For compressed CQEs, the + * consumer index is updated only once all packets of the current one have + * been processed. + * + * @param rxq + * Pointer to RX queue. + * @param cqe + * CQE to process. + * @param[out] mcqe + * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not + * written. + * + * @return + * 0 in case of empty CQE, otherwise the packet size in bytes. + */ +static inline int +mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) +{ + struct rxq_zip *zip = &rxq->zip; + uint16_t cqe_n = cqe_cnt + 1; + int len; + uint16_t idx, end; + + do { + len = 0; + /* Process compressed data in the CQE and mini arrays. */ + if (zip->ai) { + volatile struct mlx5_mini_cqe8 (*mc)[8] = + (volatile struct mlx5_mini_cqe8 (*)[8]) + (uintptr_t)(&(*rxq->cqes)[zip->ca & + cqe_cnt].pkt_info); + + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *mcqe = &(*mc)[zip->ai & 7]; + if ((++zip->ai & 7) == 0) { + /* Invalidate consumed CQEs */ + idx = zip->ca; + end = zip->na; + while (idx != end) { + (*rxq->cqes)[idx & cqe_cnt].op_own = + MLX5_CQE_INVALIDATE; + ++idx; + } + /* + * Increment consumer index to skip the number + * of CQEs consumed. Hardware leaves holes in + * the CQ ring for software use. + */ + zip->ca = zip->na; + zip->na += 8; + } + if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { + /* Invalidate the rest */ + idx = zip->ca; + end = zip->cq_ci; + + while (idx != end) { + (*rxq->cqes)[idx & cqe_cnt].op_own = + MLX5_CQE_INVALIDATE; + ++idx; + } + rxq->cq_ci = zip->cq_ci; + zip->ai = 0; + } + /* + * No compressed data, get next CQE and verify if it is + * compressed. + */ + } else { + int ret; + int8_t op_own; + + ret = check_cqe(cqe, cqe_n, rxq->cq_ci); + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { + if (unlikely(ret == MLX5_CQE_STATUS_ERR || + rxq->err_state)) { + ret = mlx5_rx_err_handle(rxq, 0); + if (ret == MLX5_CQE_STATUS_HW_OWN || + ret == -1) + return 0; + } else { + return 0; + } + } + ++rxq->cq_ci; + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { + volatile struct mlx5_mini_cqe8 (*mc)[8] = + (volatile struct mlx5_mini_cqe8 (*)[8]) + (uintptr_t)(&(*rxq->cqes) + [rxq->cq_ci & + cqe_cnt].pkt_info); + + /* Fix endianness. */ + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); + /* + * Current mini array position is the one + * returned by check_cqe64(). + * + * If completion comprises several mini arrays, + * as a special case the second one is located + * 7 CQEs after the initial CQE instead of 8 + * for subsequent ones. + */ + zip->ca = rxq->cq_ci; + zip->na = zip->ca + 7; + /* Compute the next non compressed CQE. */ + --rxq->cq_ci; + zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; + /* Get packet size to return. */ + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *mcqe = &(*mc)[0]; + zip->ai = 1; + /* Prefetch all to be invalidated */ + idx = zip->ca; + end = zip->cq_ci; + while (idx != end) { + rte_prefetch0(&(*rxq->cqes)[(idx) & + cqe_cnt]); + ++idx; + } + } else { + len = rte_be_to_cpu_32(cqe->byte_cnt); + } + } + if (unlikely(rxq->err_state)) { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + ++rxq->stats.idropped; + } else { + return len; + } + } while (1); +} + +/** + * Translate RX completion flags to offload flags. + * + * @param[in] cqe + * Pointer to CQE. + * + * @return + * Offload flags (ol_flags) for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) +{ + uint32_t ol_flags = 0; + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); + + ol_flags = + TRANSPOSE(flags, + MLX5_CQE_RX_L3_HDR_VALID, + PKT_RX_IP_CKSUM_GOOD) | + TRANSPOSE(flags, + MLX5_CQE_RX_L4_HDR_VALID, + PKT_RX_L4_CKSUM_GOOD); + return ol_flags; +} + +/** + * Fill in mbuf fields from RX completion flags. + * Note that pkt->ol_flags should be initialized outside of this function. + * + * @param rxq + * Pointer to RX queue. + * @param pkt + * mbuf to fill. + * @param cqe + * CQE to process. + * @param rss_hash_res + * Packet RSS Hash result. + */ +static inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res) +{ + /* Update packet information. */ + pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe); + if (rss_hash_res && rxq->rss_hash) { + pkt->hash.rss = rss_hash_res; + pkt->ol_flags |= PKT_RX_RSS_HASH; + } + if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { + pkt->ol_flags |= PKT_RX_FDIR; + if (cqe->sop_drop_qpn != + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { + uint32_t mark = cqe->sop_drop_qpn; + + pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); + } + } + if (rxq->dynf_meta && cqe->flow_table_metadata) { + pkt->ol_flags |= rxq->flow_meta_mask; + *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) = + cqe->flow_table_metadata; + } + if (rxq->csum) + pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); + if (rxq->vlan_strip && + (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { + pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); + } + if (rxq->hw_timestamp) { + pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp); + pkt->ol_flags |= PKT_RX_TIMESTAMP; + } +} + +/** + * DPDK callback for RX. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct mlx5_rxq_data *rxq = dpdk_rxq; + const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1; + const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1; + const unsigned int sges_n = rxq->sges_n; + struct rte_mbuf *pkt = NULL; + struct rte_mbuf *seg = NULL; + volatile struct mlx5_cqe *cqe = + &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + unsigned int i = 0; + unsigned int rq_ci = rxq->rq_ci << sges_n; + int len = 0; /* keep its value across iterations. */ + + while (pkts_n) { + unsigned int idx = rq_ci & wqe_cnt; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; + struct rte_mbuf *rep = (*rxq->elts)[idx]; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + uint32_t rss_hash_res; + + if (pkt) + NEXT(seg) = rep; + seg = rep; + rte_prefetch0(seg); + rte_prefetch0(cqe); + rte_prefetch0(wqe); + rep = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + ++rxq->stats.rx_nombuf; + if (!pkt) { + /* + * no buffers before we even started, + * bail out silently. + */ + break; + } + while (pkt != seg) { + MLX5_ASSERT(pkt != (*rxq->elts)[idx]); + rep = NEXT(pkt); + NEXT(pkt) = NULL; + NB_SEGS(pkt) = 1; + rte_mbuf_raw_free(pkt); + pkt = rep; + } + break; + } + if (!pkt) { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); + if (!len) { + rte_mbuf_raw_free(rep); + break; + } + pkt = seg; + MLX5_ASSERT(len >= (rxq->crc_present << 2)); + pkt->ol_flags &= EXT_ATTACHED_MBUF; + /* If compressed, take hash result from mini-CQE. */ + rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ? + cqe->rx_hash_res : + mcqe->rx_hash_result); + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); + if (rxq->crc_present) + len -= RTE_ETHER_CRC_LEN; + PKT_LEN(pkt) = len; + if (cqe->lro_num_seg > 1) { + mlx5_lro_update_hdr + (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, + len); + pkt->ol_flags |= PKT_RX_LRO; + pkt->tso_segsz = len / cqe->lro_num_seg; + } + } + DATA_LEN(rep) = DATA_LEN(seg); + PKT_LEN(rep) = PKT_LEN(seg); + SET_DATA_OFF(rep, DATA_OFF(seg)); + PORT(rep) = PORT(seg); + (*rxq->elts)[idx] = rep; + /* + * Fill NIC descriptor with the new buffer. The lkey and size + * of the buffers are already known, only the buffer address + * changes. + */ + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_mb2mr(rxq, rep); + if (len > DATA_LEN(seg)) { + len -= DATA_LEN(seg); + ++NB_SEGS(pkt); + ++rq_ci; + continue; + } + DATA_LEN(seg) = len; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += PKT_LEN(pkt); +#endif + /* Return packet. */ + *(pkts++) = pkt; + pkt = NULL; + --pkts_n; + ++i; + /* Align consumer index to the next stride. */ + rq_ci >>= sges_n; + ++rq_ci; + rq_ci <<= sges_n; + } + if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci))) + return 0; + /* Update the consumer index. */ + rxq->rq_ci = rq_ci >> sges_n; + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += i; +#endif + return i; +} + +/** + * Update LRO packet TCP header. + * The HW LRO feature doesn't update the TCP header after coalescing the + * TCP segments but supplies information in CQE to fill it by SW. + * + * @param tcp + * Pointer to the TCP header. + * @param cqe + * Pointer to the completion entry.. + * @param phcsum + * The L3 pseudo-header checksum. + */ +static inline void +mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp, + volatile struct mlx5_cqe *restrict cqe, + uint32_t phcsum) +{ + uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) & + MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT; + /* + * The HW calculates only the TCP payload checksum, need to complete + * the TCP header checksum and the L3 pseudo-header checksum. + */ + uint32_t csum = phcsum + cqe->csum; + + if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK || + l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) { + tcp->tcp_flags |= RTE_TCP_ACK_FLAG; + tcp->recv_ack = cqe->lro_ack_seq_num; + tcp->rx_win = cqe->lro_tcp_win; + } + if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK) + tcp->tcp_flags |= RTE_TCP_PSH_FLAG; + tcp->cksum = 0; + csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4); + csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); + csum = (~csum) & 0xffff; + if (csum == 0) + csum = 0xffff; + tcp->cksum = csum; +} + +/** + * Update LRO packet headers. + * The HW LRO feature doesn't update the L3/TCP headers after coalescing the + * TCP segments but supply information in CQE to fill it by SW. + * + * @param padd + * The packet address. + * @param cqe + * Pointer to the completion entry.. + * @param len + * The packet length. + */ +static inline void +mlx5_lro_update_hdr(uint8_t *restrict padd, + volatile struct mlx5_cqe *restrict cqe, + uint32_t len) +{ + union { + struct rte_ether_hdr *eth; + struct rte_vlan_hdr *vlan; + struct rte_ipv4_hdr *ipv4; + struct rte_ipv6_hdr *ipv6; + struct rte_tcp_hdr *tcp; + uint8_t *hdr; + } h = { + .hdr = padd, + }; + uint16_t proto = h.eth->ether_type; + uint32_t phcsum; + + h.eth++; + while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) || + proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { + proto = h.vlan->eth_proto; + h.vlan++; + } + if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) { + h.ipv4->time_to_live = cqe->lro_min_ttl; + h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd)); + h.ipv4->hdr_checksum = 0; + h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4); + phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0); + h.ipv4++; + } else { + h.ipv6->hop_limits = cqe->lro_min_ttl; + h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) - + sizeof(*h.ipv6)); + phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0); + h.ipv6++; + } + mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum); +} + +void +mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) +{ + struct mlx5_mprq_buf *buf = opaque; + + if (rte_atomic16_read(&buf->refcnt) == 1) { + rte_mempool_put(buf->mp, buf); + } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) { + rte_atomic16_set(&buf->refcnt, 1); + rte_mempool_put(buf->mp, buf); + } +} + +void +mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) +{ + mlx5_mprq_buf_free_cb(NULL, buf); +} + +static inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx, + const unsigned int strd_n) +{ + struct mlx5_mprq_buf *rep = rxq->mprq_repl; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; + void *addr; + + MLX5_ASSERT(rep != NULL); + /* Replace MPRQ buf. */ + (*rxq->mprq_bufs)[rq_idx] = rep; + /* Replace WQE. */ + addr = mlx5_mprq_buf_addr(rep, strd_n); + wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); + /* Stash a mbuf for next replacement. */ + if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) + rxq->mprq_repl = rep; + else + rxq->mprq_repl = NULL; +} + +/** + * DPDK callback for RX with Multi-Packet RQ support. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct mlx5_rxq_data *rxq = dpdk_rxq; + const unsigned int strd_n = 1 << rxq->strd_num_n; + const unsigned int strd_sz = 1 << rxq->strd_sz_n; + const unsigned int strd_shift = + MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; + const unsigned int cq_mask = (1 << rxq->cqe_n) - 1; + const unsigned int wq_mask = (1 << rxq->elts_n) - 1; + volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + unsigned int i = 0; + uint32_t rq_ci = rxq->rq_ci; + uint16_t consumed_strd = rxq->consumed_strd; + struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + + while (i < pkts_n) { + struct rte_mbuf *pkt; + void *addr; + int ret; + uint32_t len; + uint16_t strd_cnt; + uint16_t strd_idx; + uint32_t offset; + uint32_t byte_cnt; + int32_t hdrm_overlap; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + uint32_t rss_hash_res = 0; + + if (consumed_strd == strd_n) { + /* Replace WQE only if the buffer is still in use. */ + if (rte_atomic16_read(&buf->refcnt) > 1) { + mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n); + /* Release the old buffer. */ + mlx5_mprq_buf_free(buf); + } else if (unlikely(rxq->mprq_repl == NULL)) { + struct mlx5_mprq_buf *rep; + + /* + * Currently, the MPRQ mempool is out of buffer + * and doing memcpy regardless of the size of Rx + * packet. Retry allocation to get back to + * normal. + */ + if (!rte_mempool_get(rxq->mprq_mp, + (void **)&rep)) + rxq->mprq_repl = rep; + } + /* Advance to the next WQE. */ + consumed_strd = 0; + ++rq_ci; + buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + } + cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); + if (!ret) + break; + byte_cnt = ret; + strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> + MLX5_MPRQ_STRIDE_NUM_SHIFT; + MLX5_ASSERT(strd_cnt); + consumed_strd += strd_cnt; + if (byte_cnt & MLX5_MPRQ_FILLER_MASK) + continue; + if (mcqe == NULL) { + rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res); + strd_idx = rte_be_to_cpu_16(cqe->wqe_counter); + } else { + /* mini-CQE for MPRQ doesn't have hash result. */ + strd_idx = rte_be_to_cpu_16(mcqe->stride_idx); + } + MLX5_ASSERT(strd_idx < strd_n); + MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & + wq_mask)); + pkt = rte_pktmbuf_alloc(rxq->mp); + if (unlikely(pkt == NULL)) { + ++rxq->stats.rx_nombuf; + break; + } + len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; + MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); + if (rxq->crc_present) + len -= RTE_ETHER_CRC_LEN; + offset = strd_idx * strd_sz + strd_shift; + addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset); + hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz; + /* + * Memcpy packets to the target mbuf if: + * - The size of packet is smaller than mprq_max_memcpy_len. + * - Out of buffer in the Mempool for Multi-Packet RQ. + * - The packet's stride overlaps a headroom and scatter is off. + */ + if (len <= rxq->mprq_max_memcpy_len || + rxq->mprq_repl == NULL || + (hdrm_overlap > 0 && !rxq->strd_scatter_en)) { + if (likely(rte_pktmbuf_tailroom(pkt) >= len)) { + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), + addr, len); + DATA_LEN(pkt) = len; + } else if (rxq->strd_scatter_en) { + struct rte_mbuf *prev = pkt; + uint32_t seg_len = + RTE_MIN(rte_pktmbuf_tailroom(pkt), len); + uint32_t rem_len = len - seg_len; + + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), + addr, seg_len); + DATA_LEN(pkt) = seg_len; + while (rem_len) { + struct rte_mbuf *next = + rte_pktmbuf_alloc(rxq->mp); + + if (unlikely(next == NULL)) { + rte_pktmbuf_free(pkt); + ++rxq->stats.rx_nombuf; + goto out; + } + NEXT(prev) = next; + SET_DATA_OFF(next, 0); + addr = RTE_PTR_ADD(addr, seg_len); + seg_len = RTE_MIN + (rte_pktmbuf_tailroom(next), + rem_len); + rte_memcpy + (rte_pktmbuf_mtod(next, void *), + addr, seg_len); + DATA_LEN(next) = seg_len; + rem_len -= seg_len; + prev = next; + ++NB_SEGS(pkt); + } + } else { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.idropped; + continue; + } + } else { + rte_iova_t buf_iova; + struct rte_mbuf_ext_shared_info *shinfo; + uint16_t buf_len = strd_cnt * strd_sz; + void *buf_addr; + + /* Increment the refcnt of the whole chunk. */ + rte_atomic16_add_return(&buf->refcnt, 1); + MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <= + strd_n + 1); + buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); + /* + * MLX5 device doesn't use iova but it is necessary in a + * case where the Rx packet is transmitted via a + * different PMD. + */ + buf_iova = rte_mempool_virt2iova(buf) + + RTE_PTR_DIFF(buf_addr, buf); + shinfo = &buf->shinfos[strd_idx]; + rte_mbuf_ext_refcnt_set(shinfo, 1); + /* + * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when + * attaching the stride to mbuf and more offload flags + * will be added below by calling rxq_cq_to_mbuf(). + * Other fields will be overwritten. + */ + rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova, + buf_len, shinfo); + /* Set mbuf head-room. */ + SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); + MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF); + MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= + len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); + DATA_LEN(pkt) = len; + /* + * Copy the last fragment of a packet (up to headroom + * size bytes) in case there is a stride overlap with + * a next packet's headroom. Allocate a separate mbuf + * to store this fragment and link it. Scatter is on. + */ + if (hdrm_overlap > 0) { + MLX5_ASSERT(rxq->strd_scatter_en); + struct rte_mbuf *seg = + rte_pktmbuf_alloc(rxq->mp); + + if (unlikely(seg == NULL)) { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.rx_nombuf; + break; + } + SET_DATA_OFF(seg, 0); + rte_memcpy(rte_pktmbuf_mtod(seg, void *), + RTE_PTR_ADD(addr, len - hdrm_overlap), + hdrm_overlap); + DATA_LEN(seg) = hdrm_overlap; + DATA_LEN(pkt) = len - hdrm_overlap; + NEXT(pkt) = seg; + NB_SEGS(pkt) = 2; + } + } + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); + if (cqe->lro_num_seg > 1) { + mlx5_lro_update_hdr(addr, cqe, len); + pkt->ol_flags |= PKT_RX_LRO; + pkt->tso_segsz = len / cqe->lro_num_seg; + } + PKT_LEN(pkt) = len; + PORT(pkt) = rxq->port_id; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += PKT_LEN(pkt); +#endif + /* Return packet. */ + *(pkts++) = pkt; + ++i; + } +out: + /* Update the consumer indexes. */ + rxq->consumed_strd = consumed_strd; + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + if (rq_ci != rxq->rq_ci) { + rxq->rq_ci = rq_ci; + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += i; +#endif + return i; +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) +{ + rte_mb(); + return 0; +} + +/** + * Dummy DPDK callback for RX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) +{ + rte_mb(); + return 0; +} + +/* + * Vectorized Rx/Tx routines are not compiled in when required vector + * instructions are not supported on a target architecture. The following null + * stubs are needed for linkage when those are not included outside of this file + * (e.g. mlx5_rxtx_vec_sse.c for x86). + */ + +__rte_weak uint16_t +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) +{ + return 0; +} + +__rte_weak int +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) +{ + return -ENOTSUP; +} + +__rte_weak int +mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) +{ + return -ENOTSUP; +} + +/** + * Free the mbufs from the linear array of pointers. + * + * @param pkts + * Pointer to array of packets to be free. + * @param pkts_n + * Number of packets to be freed. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + unsigned int olx __rte_unused) +{ + struct rte_mempool *pool = NULL; + struct rte_mbuf **p_free = NULL; + struct rte_mbuf *mbuf; + unsigned int n_free = 0; + + /* + * The implemented algorithm eliminates + * copying pointers to temporary array + * for rte_mempool_put_bulk() calls. + */ + MLX5_ASSERT(pkts); + MLX5_ASSERT(pkts_n); + for (;;) { + for (;;) { + /* + * Decrement mbuf reference counter, detach + * indirect and external buffers if needed. + */ + mbuf = rte_pktmbuf_prefree_seg(*pkts); + if (likely(mbuf != NULL)) { + MLX5_ASSERT(mbuf == *pkts); + if (likely(n_free != 0)) { + if (unlikely(pool != mbuf->pool)) + /* From different pool. */ + break; + } else { + /* Start new scan array. */ + pool = mbuf->pool; + p_free = pkts; + } + ++n_free; + ++pkts; + --pkts_n; + if (unlikely(pkts_n == 0)) { + mbuf = NULL; + break; + } + } else { + /* + * This happens if mbuf is still referenced. + * We can't put it back to the pool, skip. + */ + ++pkts; + --pkts_n; + if (unlikely(n_free != 0)) + /* There is some array to free.*/ + break; + if (unlikely(pkts_n == 0)) + /* Last mbuf, nothing to free. */ + return; + } + } + for (;;) { + /* + * This loop is implemented to avoid multiple + * inlining of rte_mempool_put_bulk(). + */ + MLX5_ASSERT(pool); + MLX5_ASSERT(p_free); + MLX5_ASSERT(n_free); + /* + * Free the array of pre-freed mbufs + * belonging to the same memory pool. + */ + rte_mempool_put_bulk(pool, (void *)p_free, n_free); + if (unlikely(mbuf != NULL)) { + /* There is the request to start new scan. */ + pool = mbuf->pool; + p_free = pkts++; + n_free = 1; + --pkts_n; + if (likely(pkts_n != 0)) + break; + /* + * This is the last mbuf to be freed. + * Do one more loop iteration to complete. + * This is rare case of the last unique mbuf. + */ + mbuf = NULL; + continue; + } + if (likely(pkts_n == 0)) + return; + n_free = 0; + break; + } + } +} + +/** + * Free the mbuf from the elts ring buffer till new tail. + * + * @param txq + * Pointer to Tx queue structure. + * @param tail + * Index in elts to free up to, becomes new elts tail. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq, + uint16_t tail, + unsigned int olx __rte_unused) +{ + uint16_t n_elts = tail - txq->elts_tail; + + MLX5_ASSERT(n_elts); + MLX5_ASSERT(n_elts <= txq->elts_s); + /* + * Implement a loop to support ring buffer wraparound + * with single inlining of mlx5_tx_free_mbuf(). + */ + do { + unsigned int part; + + part = txq->elts_s - (txq->elts_tail & txq->elts_m); + part = RTE_MIN(part, n_elts); + MLX5_ASSERT(part); + MLX5_ASSERT(part <= txq->elts_s); + mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m], + part, olx); + txq->elts_tail += part; + n_elts -= part; + } while (n_elts); +} + +/** + * Store the mbuf being sent into elts ring buffer. + * On Tx completion these mbufs will be freed. + * + * @param txq + * Pointer to Tx queue structure. + * @param pkts + * Pointer to array of packets to be stored. + * @param pkts_n + * Number of packets to be stored. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + unsigned int olx __rte_unused) +{ + unsigned int part; + struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts; + + MLX5_ASSERT(pkts); + MLX5_ASSERT(pkts_n); + part = txq->elts_s - (txq->elts_head & txq->elts_m); + MLX5_ASSERT(part); + MLX5_ASSERT(part <= txq->elts_s); + /* This code is a good candidate for vectorizing with SIMD. */ + rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)), + (void *)pkts, + RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *)); + txq->elts_head += pkts_n; + if (unlikely(part < pkts_n)) + /* The copy is wrapping around the elts array. */ + rte_memcpy((void *)elts, (void *)(pkts + part), + (pkts_n - part) * sizeof(struct rte_mbuf *)); +} + +/** + * Update completion queue consuming index via doorbell + * and flush the completed data buffers. + * + * @param txq + * Pointer to TX queue structure. + * @param valid CQE pointer + * if not NULL update txq->wqe_pi and flush the buffers + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, + volatile struct mlx5_cqe *last_cqe, + unsigned int olx __rte_unused) +{ + if (likely(last_cqe != NULL)) { + uint16_t tail; + + txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); + tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m]; + if (likely(tail != txq->elts_tail)) { + mlx5_tx_free_elts(txq, tail, olx); + MLX5_ASSERT(tail == txq->elts_tail); + } + } +} + +/** + * Manage TX completions. This routine checks the CQ for + * arrived CQEs, deduces the last accomplished WQE in SQ, + * updates SQ producing index and frees all completed mbufs. + * + * @param txq + * Pointer to TX queue structure. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * NOTE: not inlined intentionally, it makes tx_burst + * routine smaller, simple and faster - from experiments. + */ +static void +mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq, + unsigned int olx __rte_unused) +{ + unsigned int count = MLX5_TX_COMP_MAX_CQE; + volatile struct mlx5_cqe *last_cqe = NULL; + bool ring_doorbell = false; + int ret; + + static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); + static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value"); + do { + volatile struct mlx5_cqe *cqe; + + cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; + ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { + if (likely(ret != MLX5_CQE_STATUS_ERR)) { + /* No new CQEs in completion queue. */ + MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN); + break; + } + /* + * Some error occurred, try to restart. + * We have no barrier after WQE related Doorbell + * written, make sure all writes are completed + * here, before we might perform SQ reset. + */ + rte_wmb(); + ret = mlx5_tx_error_cqe_handle + (txq, (volatile struct mlx5_err_cqe *)cqe); + if (unlikely(ret < 0)) { + /* + * Some error occurred on queue error + * handling, we do not advance the index + * here, allowing to retry on next call. + */ + return; + } + /* + * We are going to fetch all entries with + * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status. + * The send queue is supposed to be empty. + */ + ring_doorbell = true; + ++txq->cq_ci; + txq->cq_pi = txq->cq_ci; + last_cqe = NULL; + continue; + } + /* Normal transmit completion. */ + MLX5_ASSERT(txq->cq_ci != txq->cq_pi); + MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) == + cqe->wqe_counter); + ring_doorbell = true; + ++txq->cq_ci; + last_cqe = cqe; + /* + * We have to restrict the amount of processed CQEs + * in one tx_burst routine call. The CQ may be large + * and many CQEs may be updated by the NIC in one + * transaction. Buffers freeing is time consuming, + * multiple iterations may introduce significant + * latency. + */ + if (likely(--count == 0)) + break; + } while (true); + if (likely(ring_doorbell)) { + /* Ring doorbell to notify hardware. */ + rte_compiler_barrier(); + *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); + mlx5_tx_comp_flush(txq, last_cqe, olx); + } +} + +/** + * Check if the completion request flag should be set in the last WQE. + * Both pushed mbufs and WQEs are monitored and the completion request + * flag is set if any of thresholds is reached. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + uint16_t head = txq->elts_head; + unsigned int part; + + part = MLX5_TXOFF_CONFIG(INLINE) ? + 0 : loc->pkts_sent - loc->pkts_copy; + head += part; + if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH || + (MLX5_TXOFF_CONFIG(INLINE) && + (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) { + volatile struct mlx5_wqe *last = loc->wqe_last; + + MLX5_ASSERT(last); + txq->elts_comp = head; + if (MLX5_TXOFF_CONFIG(INLINE)) + txq->wqe_comp = txq->wqe_ci; + /* Request unconditional completion on last WQE. */ + last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << + MLX5_COMP_MODE_OFFSET); + /* Save elts_head in dedicated free on completion queue. */ +#ifdef RTE_LIBRTE_MLX5_DEBUG + txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head | + (last->cseg.opcode >> 8) << 16; +#else + txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head; +#endif + /* A CQE slot must always be available. */ + MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s); + } +} + +/** + * DPDK callback to check the status of a tx descriptor. + * + * @param tx_queue + * The tx queue. + * @param[in] offset + * The index of the descriptor in the ring. + * + * @return + * The status of the tx descriptor. + */ +int +mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct mlx5_txq_data *restrict txq = tx_queue; + uint16_t used; + + mlx5_tx_handle_completion(txq, 0); + used = txq->elts_head - txq->elts_tail; + if (offset < used) + return RTE_ETH_TX_DESC_FULL; + return RTE_ETH_TX_DESC_DONE; +} + +/** + * Build the Control Segment with specified opcode: + * - MLX5_OPCODE_SEND + * - MLX5_OPCODE_ENHANCED_MPSW + * - MLX5_OPCODE_TSO + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Control Segment. + * @param ds + * Supposed length of WQE in segments. + * @param opcode + * SQ WQE opcode to put into Control Segment. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc __rte_unused, + struct mlx5_wqe *restrict wqe, + unsigned int ds, + unsigned int opcode, + unsigned int olx __rte_unused) +{ + struct mlx5_wqe_cseg *restrict cs = &wqe->cseg; + + /* For legacy MPW replace the EMPW by TSO with modifier. */ + if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW) + opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24; + cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode); + cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); + cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << + MLX5_COMP_MODE_OFFSET); + cs->misc = RTE_BE32(0); +} + +/** + * Build the Ethernet Segment without inlined data. + * Supports Software Parser, Checksums and VLAN + * insertion Tx offload features. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Ethernet Segment. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe *restrict wqe, + unsigned int olx) +{ + struct mlx5_wqe_eseg *restrict es = &wqe->eseg; + uint32_t csum; + + /* + * Calculate and set check sum flags first, dword field + * in segment may be shared with Software Parser flags. + */ + csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; + es->flags = rte_cpu_to_le_32(csum); + /* + * Calculate and set Software Parser offsets and flags. + * These flags a set for custom UDP and IP tunnel packets. + */ + es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); + /* Fill metadata field if needed. */ + es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? + loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ? + *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0; + /* Engage VLAN tag insertion feature if requested. */ + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + /* + * We should get here only if device support + * this feature correctly. + */ + MLX5_ASSERT(txq->vlan_en); + es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT | + loc->mbuf->vlan_tci); + } else { + es->inline_hdr = RTE_BE32(0); + } +} + +/** + * Build the Ethernet Segment with minimal inlined data + * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is + * used to fill the gap in single WQEBB WQEs. + * Supports Software Parser, Checksums and VLAN + * insertion Tx offload features. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Ethernet Segment. + * @param vlan + * Length of VLAN tag insertion if any. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe *restrict wqe, + unsigned int vlan, + unsigned int olx) +{ + struct mlx5_wqe_eseg *restrict es = &wqe->eseg; + uint32_t csum; + uint8_t *psrc, *pdst; + + /* + * Calculate and set check sum flags first, dword field + * in segment may be shared with Software Parser flags. + */ + csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; + es->flags = rte_cpu_to_le_32(csum); + /* + * Calculate and set Software Parser offsets and flags. + * These flags a set for custom UDP and IP tunnel packets. + */ + es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); + /* Fill metadata field if needed. */ + es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? + loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ? + *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0; + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(rte_v128u32_t)), + "invalid Ethernet Segment data size"); + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(struct rte_vlan_hdr) + + 2 * RTE_ETHER_ADDR_LEN), + "invalid Ethernet Segment data size"); + psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); + es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE); + es->inline_data = *(unaligned_uint16_t *)psrc; + psrc += sizeof(uint16_t); + pdst = (uint8_t *)(es + 1); + if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { + /* Implement VLAN tag insertion as part inline data. */ + memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); + pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); + psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); + /* Insert VLAN ethertype + VLAN tag. */ + *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 + ((RTE_ETHER_TYPE_VLAN << 16) | + loc->mbuf->vlan_tci); + pdst += sizeof(struct rte_vlan_hdr); + /* Copy the rest two bytes from packet data. */ + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); + *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; + } else { + /* Fill the gap in the title WQEBB with inline data. */ + rte_mov16(pdst, psrc); + } +} + +/** + * Build the Ethernet Segment with entire packet + * data inlining. Checks the boundary of WQEBB and + * ring buffer wrapping, supports Software Parser, + * Checksums and VLAN insertion Tx offload features. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Ethernet Segment. + * @param vlan + * Length of VLAN tag insertion if any. + * @param inlen + * Length of data to inline (VLAN included, if any). + * @param tso + * TSO flag, set mss field from the packet. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Pointer to the next Data Segment (aligned and wrapped around). + */ +static __rte_always_inline struct mlx5_wqe_dseg * +mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe *restrict wqe, + unsigned int vlan, + unsigned int inlen, + unsigned int tso, + unsigned int olx) +{ + struct mlx5_wqe_eseg *restrict es = &wqe->eseg; + uint32_t csum; + uint8_t *psrc, *pdst; + unsigned int part; + + /* + * Calculate and set check sum flags first, dword field + * in segment may be shared with Software Parser flags. + */ + csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; + if (tso) { + csum <<= 24; + csum |= loc->mbuf->tso_segsz; + es->flags = rte_cpu_to_be_32(csum); + } else { + es->flags = rte_cpu_to_le_32(csum); + } + /* + * Calculate and set Software Parser offsets and flags. + * These flags a set for custom UDP and IP tunnel packets. + */ + es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); + /* Fill metadata field if needed. */ + es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? + loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ? + *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0; + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(rte_v128u32_t)), + "invalid Ethernet Segment data size"); + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(struct rte_vlan_hdr) + + 2 * RTE_ETHER_ADDR_LEN), + "invalid Ethernet Segment data size"); + psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); + es->inline_hdr_sz = rte_cpu_to_be_16(inlen); + es->inline_data = *(unaligned_uint16_t *)psrc; + psrc += sizeof(uint16_t); + pdst = (uint8_t *)(es + 1); + if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { + /* Implement VLAN tag insertion as part inline data. */ + memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t)); + pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); + psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t); + /* Insert VLAN ethertype + VLAN tag. */ + *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 + ((RTE_ETHER_TYPE_VLAN << 16) | + loc->mbuf->vlan_tci); + pdst += sizeof(struct rte_vlan_hdr); + /* Copy the rest two bytes from packet data. */ + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t))); + *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc; + psrc += sizeof(uint16_t); + } else { + /* Fill the gap in the title WQEBB with inline data. */ + rte_mov16(pdst, psrc); + psrc += sizeof(rte_v128u32_t); + } + pdst = (uint8_t *)(es + 2); + MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end); + inlen -= MLX5_ESEG_MIN_INLINE_SIZE; + if (!inlen) { + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); + return (struct mlx5_wqe_dseg *)pdst; + } + /* + * The WQEBB space availability is checked by caller. + * Here we should be aware of WQE ring buffer wraparound only. + */ + part = (uint8_t *)txq->wqes_end - pdst; + part = RTE_MIN(part, inlen); + do { + rte_memcpy(pdst, psrc, part); + inlen -= part; + if (likely(!inlen)) { + /* + * If return value is not used by the caller + * the code below will be optimized out. + */ + pdst += part; + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) + pdst = (uint8_t *)txq->wqes; + return (struct mlx5_wqe_dseg *)pdst; + } + pdst = (uint8_t *)txq->wqes; + psrc += part; + part = inlen; + } while (true); +} + +/** + * Copy data from chain of mbuf to the specified linear buffer. + * Checksums and VLAN insertion Tx offload features. If data + * from some mbuf copied completely this mbuf is freed. Local + * structure is used to keep the byte stream state. + * + * @param pdst + * Pointer to the destination linear buffer. + * @param loc + * Pointer to burst routine local context. + * @param len + * Length of data to be copied. + * @param must + * Length of data to be copied ignoring no inline hint. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Number of actual copied data bytes. This is always greater than or + * equal to must parameter and might be lesser than len in no inline + * hint flag is encountered. + */ +static __rte_always_inline unsigned int +mlx5_tx_mseg_memcpy(uint8_t *pdst, + struct mlx5_txq_local *restrict loc, + unsigned int len, + unsigned int must, + unsigned int olx __rte_unused) +{ + struct rte_mbuf *mbuf; + unsigned int part, dlen, copy = 0; + uint8_t *psrc; + + MLX5_ASSERT(len); + MLX5_ASSERT(must <= len); + do { + /* Allow zero length packets, must check first. */ + dlen = rte_pktmbuf_data_len(loc->mbuf); + if (dlen <= loc->mbuf_off) { + /* Exhausted packet, just free. */ + mbuf = loc->mbuf; + loc->mbuf = mbuf->next; + rte_pktmbuf_free_seg(mbuf); + loc->mbuf_off = 0; + MLX5_ASSERT(loc->mbuf_nseg > 1); + MLX5_ASSERT(loc->mbuf); + --loc->mbuf_nseg; + if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) { + unsigned int diff; + + if (copy >= must) { + /* + * We already copied the minimal + * requested amount of data. + */ + return copy; + } + diff = must - copy; + if (diff <= rte_pktmbuf_data_len(loc->mbuf)) { + /* + * Copy only the minimal required + * part of the data buffer. + */ + len = diff; + } + } + continue; + } + dlen -= loc->mbuf_off; + psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, + loc->mbuf_off); + part = RTE_MIN(len, dlen); + rte_memcpy(pdst, psrc, part); + copy += part; + loc->mbuf_off += part; + len -= part; + if (!len) { + if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) { + loc->mbuf_off = 0; + /* Exhausted packet, just free. */ + mbuf = loc->mbuf; + loc->mbuf = mbuf->next; + rte_pktmbuf_free_seg(mbuf); + loc->mbuf_off = 0; + MLX5_ASSERT(loc->mbuf_nseg >= 1); + --loc->mbuf_nseg; + } + return copy; + } + pdst += part; + } while (true); +} + +/** + * Build the Ethernet Segment with inlined data from + * multi-segment packet. Checks the boundary of WQEBB + * and ring buffer wrapping, supports Software Parser, + * Checksums and VLAN insertion Tx offload features. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Ethernet Segment. + * @param vlan + * Length of VLAN tag insertion if any. + * @param inlen + * Length of data to inline (VLAN included, if any). + * @param tso + * TSO flag, set mss field from the packet. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Pointer to the next Data Segment (aligned and + * possible NOT wrapped around - caller should do + * wrapping check on its own). + */ +static __rte_always_inline struct mlx5_wqe_dseg * +mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe *restrict wqe, + unsigned int vlan, + unsigned int inlen, + unsigned int tso, + unsigned int olx) +{ + struct mlx5_wqe_eseg *restrict es = &wqe->eseg; + uint32_t csum; + uint8_t *pdst; + unsigned int part, tlen = 0; + + /* + * Calculate and set check sum flags first, uint32_t field + * in segment may be shared with Software Parser flags. + */ + csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0; + if (tso) { + csum <<= 24; + csum |= loc->mbuf->tso_segsz; + es->flags = rte_cpu_to_be_32(csum); + } else { + es->flags = rte_cpu_to_le_32(csum); + } + /* + * Calculate and set Software Parser offsets and flags. + * These flags a set for custom UDP and IP tunnel packets. + */ + es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx); + /* Fill metadata field if needed. */ + es->metadata = MLX5_TXOFF_CONFIG(METADATA) ? + loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ? + *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0; + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(rte_v128u32_t)), + "invalid Ethernet Segment data size"); + static_assert(MLX5_ESEG_MIN_INLINE_SIZE == + (sizeof(uint16_t) + + sizeof(struct rte_vlan_hdr) + + 2 * RTE_ETHER_ADDR_LEN), + "invalid Ethernet Segment data size"); + MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE); + pdst = (uint8_t *)&es->inline_data; + if (MLX5_TXOFF_CONFIG(VLAN) && vlan) { + /* Implement VLAN tag insertion as part inline data. */ + mlx5_tx_mseg_memcpy(pdst, loc, + 2 * RTE_ETHER_ADDR_LEN, + 2 * RTE_ETHER_ADDR_LEN, olx); + pdst += 2 * RTE_ETHER_ADDR_LEN; + *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32 + ((RTE_ETHER_TYPE_VLAN << 16) | + loc->mbuf->vlan_tci); + pdst += sizeof(struct rte_vlan_hdr); + tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr); + } + MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end); + /* + * The WQEBB space availability is checked by caller. + * Here we should be aware of WQE ring buffer wraparound only. + */ + part = (uint8_t *)txq->wqes_end - pdst; + part = RTE_MIN(part, inlen - tlen); + MLX5_ASSERT(part); + do { + unsigned int copy; + + /* + * Copying may be interrupted inside the routine + * if run into no inline hint flag. + */ + copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen); + copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx); + tlen += copy; + if (likely(inlen <= tlen) || copy < part) { + es->inline_hdr_sz = rte_cpu_to_be_16(tlen); + pdst += copy; + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + return (struct mlx5_wqe_dseg *)pdst; + } + pdst = (uint8_t *)txq->wqes; + part = inlen - tlen; + } while (true); +} + +/** + * Build the Data Segment of pointer type. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param dseg + * Pointer to WQE to fill with built Data Segment. + * @param buf + * Data buffer to point. + * @param len + * Data buffer length. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe_dseg *restrict dseg, + uint8_t *buf, + unsigned int len, + unsigned int olx __rte_unused) + +{ + MLX5_ASSERT(len); + dseg->bcount = rte_cpu_to_be_32(len); + dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); + dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); +} + +/** + * Build the Data Segment of pointer type or inline + * if data length is less than buffer in minimal + * Data Segment size. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param dseg + * Pointer to WQE to fill with built Data Segment. + * @param buf + * Data buffer to point. + * @param len + * Data buffer length. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe_dseg *restrict dseg, + uint8_t *buf, + unsigned int len, + unsigned int olx __rte_unused) + +{ + uintptr_t dst, src; + + MLX5_ASSERT(len); + if (len > MLX5_DSEG_MIN_INLINE_SIZE) { + dseg->bcount = rte_cpu_to_be_32(len); + dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); + dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); + + return; + } + dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); + /* Unrolled implementation of generic rte_memcpy. */ + dst = (uintptr_t)&dseg->inline_data[0]; + src = (uintptr_t)buf; + if (len & 0x08) { +#ifdef RTE_ARCH_STRICT_ALIGN + MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t))); + *(uint32_t *)dst = *(unaligned_uint32_t *)src; + dst += sizeof(uint32_t); + src += sizeof(uint32_t); + *(uint32_t *)dst = *(unaligned_uint32_t *)src; + dst += sizeof(uint32_t); + src += sizeof(uint32_t); +#else + *(uint64_t *)dst = *(unaligned_uint64_t *)src; + dst += sizeof(uint64_t); + src += sizeof(uint64_t); +#endif + } + if (len & 0x04) { + *(uint32_t *)dst = *(unaligned_uint32_t *)src; + dst += sizeof(uint32_t); + src += sizeof(uint32_t); + } + if (len & 0x02) { + *(uint16_t *)dst = *(unaligned_uint16_t *)src; + dst += sizeof(uint16_t); + src += sizeof(uint16_t); + } + if (len & 0x01) + *(uint8_t *)dst = *(uint8_t *)src; +} + +/** + * Build the Data Segment of inlined data from single + * segment packet, no VLAN insertion. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param dseg + * Pointer to WQE to fill with built Data Segment. + * @param buf + * Data buffer to point. + * @param len + * Data buffer length. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Pointer to the next Data Segment after inlined data. + * Ring buffer wraparound check is needed. We do not + * do it here because it may not be needed for the + * last packet in the eMPW session. + */ +static __rte_always_inline struct mlx5_wqe_dseg * +mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc __rte_unused, + struct mlx5_wqe_dseg *restrict dseg, + uint8_t *buf, + unsigned int len, + unsigned int olx __rte_unused) +{ + unsigned int part; + uint8_t *pdst; + + if (!MLX5_TXOFF_CONFIG(MPW)) { + /* Store the descriptor byte counter for eMPW sessions. */ + dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE); + pdst = &dseg->inline_data[0]; + } else { + /* The entire legacy MPW session counter is stored on close. */ + pdst = (uint8_t *)dseg; + } + /* + * The WQEBB space availability is checked by caller. + * Here we should be aware of WQE ring buffer wraparound only. + */ + part = (uint8_t *)txq->wqes_end - pdst; + part = RTE_MIN(part, len); + do { + rte_memcpy(pdst, buf, part); + len -= part; + if (likely(!len)) { + pdst += part; + if (!MLX5_TXOFF_CONFIG(MPW)) + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + /* Note: no final wraparound check here. */ + return (struct mlx5_wqe_dseg *)pdst; + } + pdst = (uint8_t *)txq->wqes; + buf += part; + part = len; + } while (true); +} + +/** + * Build the Data Segment of inlined data from single + * segment packet with VLAN insertion. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param dseg + * Pointer to the dseg fill with built Data Segment. + * @param buf + * Data buffer to point. + * @param len + * Data buffer length. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Pointer to the next Data Segment after inlined data. + * Ring buffer wraparound check is needed. + */ +static __rte_always_inline struct mlx5_wqe_dseg * +mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc __rte_unused, + struct mlx5_wqe_dseg *restrict dseg, + uint8_t *buf, + unsigned int len, + unsigned int olx __rte_unused) + +{ + unsigned int part; + uint8_t *pdst; + + MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE); + static_assert(MLX5_DSEG_MIN_INLINE_SIZE == + (2 * RTE_ETHER_ADDR_LEN), + "invalid Data Segment data size"); + if (!MLX5_TXOFF_CONFIG(MPW)) { + /* Store the descriptor byte counter for eMPW sessions. */ + dseg->bcount = rte_cpu_to_be_32 + ((len + sizeof(struct rte_vlan_hdr)) | + MLX5_ETH_WQE_DATA_INLINE); + pdst = &dseg->inline_data[0]; + } else { + /* The entire legacy MPW session counter is stored on close. */ + pdst = (uint8_t *)dseg; + } + memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); + buf += MLX5_DSEG_MIN_INLINE_SIZE; + pdst += MLX5_DSEG_MIN_INLINE_SIZE; + len -= MLX5_DSEG_MIN_INLINE_SIZE; + /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */ + MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); + if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) + pdst = (uint8_t *)txq->wqes; + *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) | + loc->mbuf->vlan_tci); + pdst += sizeof(struct rte_vlan_hdr); + /* + * The WQEBB space availability is checked by caller. + * Here we should be aware of WQE ring buffer wraparound only. + */ + part = (uint8_t *)txq->wqes_end - pdst; + part = RTE_MIN(part, len); + do { + rte_memcpy(pdst, buf, part); + len -= part; + if (likely(!len)) { + pdst += part; + if (!MLX5_TXOFF_CONFIG(MPW)) + pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); + /* Note: no final wraparound check here. */ + return (struct mlx5_wqe_dseg *)pdst; + } + pdst = (uint8_t *)txq->wqes; + buf += part; + part = len; + } while (true); +} + +/** + * Build the Ethernet Segment with optionally inlined data with + * VLAN insertion and following Data Segments (if any) from + * multi-segment packet. Used by ordinary send and TSO. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Ethernet/Data Segments. + * @param vlan + * Length of VLAN header to insert, 0 means no VLAN insertion. + * @param inlen + * Data length to inline. For TSO this parameter specifies + * exact value, for ordinary send routine can be aligned by + * caller to provide better WQE space saving and data buffer + * start address alignment. This length includes VLAN header + * being inserted. + * @param tso + * Zero means ordinary send, inlined data can be extended, + * otherwise this is TSO, inlined data length is fixed. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * Actual size of built WQE in segments. + */ +static __rte_always_inline unsigned int +mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + struct mlx5_wqe *restrict wqe, + unsigned int vlan, + unsigned int inlen, + unsigned int tso, + unsigned int olx __rte_unused) +{ + struct mlx5_wqe_dseg *restrict dseg; + unsigned int ds; + + MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen); + loc->mbuf_nseg = NB_SEGS(loc->mbuf); + loc->mbuf_off = 0; + + dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx); + if (!loc->mbuf_nseg) + goto dseg_done; + /* + * There are still some mbuf remaining, not inlined. + * The first mbuf may be partially inlined and we + * must process the possible non-zero data offset. + */ + if (loc->mbuf_off) { + unsigned int dlen; + uint8_t *dptr; + + /* + * Exhausted packets must be dropped before. + * Non-zero offset means there are some data + * remained in the packet. + */ + MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf)); + MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf)); + dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *, + loc->mbuf_off); + dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off; + /* + * Build the pointer/minimal data Data Segment. + * Do ring buffer wrapping check in advance. + */ + if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) + dseg = (struct mlx5_wqe_dseg *)txq->wqes; + mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx); + /* Store the mbuf to be freed on completion. */ + MLX5_ASSERT(loc->elts_free); + txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; + --loc->elts_free; + ++dseg; + if (--loc->mbuf_nseg == 0) + goto dseg_done; + loc->mbuf = loc->mbuf->next; + loc->mbuf_off = 0; + } + do { + if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { + struct rte_mbuf *mbuf; + + /* Zero length segment found, just skip. */ + mbuf = loc->mbuf; + loc->mbuf = loc->mbuf->next; + rte_pktmbuf_free_seg(mbuf); + if (--loc->mbuf_nseg == 0) + break; + } else { + if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) + dseg = (struct mlx5_wqe_dseg *)txq->wqes; + mlx5_tx_dseg_iptr + (txq, loc, dseg, + rte_pktmbuf_mtod(loc->mbuf, uint8_t *), + rte_pktmbuf_data_len(loc->mbuf), olx); + MLX5_ASSERT(loc->elts_free); + txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; + --loc->elts_free; + ++dseg; + if (--loc->mbuf_nseg == 0) + break; + loc->mbuf = loc->mbuf->next; + } + } while (true); + +dseg_done: + /* Calculate actual segments used from the dseg pointer. */ + if ((uintptr_t)wqe < (uintptr_t)dseg) + ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE; + else + ds = (((uintptr_t)dseg - (uintptr_t)wqe) + + txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE; + return ds; +} + +/** + * Tx one packet function for multi-segment TSO. Supports all + * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs, + * sends one packet per WQE. + * + * This routine is responsible for storing processed mbuf + * into elts ring buffer and update elts_head. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * Local context variables partially updated. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + struct mlx5_wqe *restrict wqe; + unsigned int ds, dlen, inlen, ntcp, vlan = 0; + + /* + * Calculate data length to be inlined to estimate + * the required space in WQE ring buffer. + */ + dlen = rte_pktmbuf_pkt_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) + vlan = sizeof(struct rte_vlan_hdr); + inlen = loc->mbuf->l2_len + vlan + + loc->mbuf->l3_len + loc->mbuf->l4_len; + if (unlikely((!inlen || !loc->mbuf->tso_segsz))) + return MLX5_TXCMP_CODE_ERROR; + if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) + inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len; + /* Packet must contain all TSO headers. */ + if (unlikely(inlen > MLX5_MAX_TSO_HEADER || + inlen <= MLX5_ESEG_MIN_INLINE_SIZE || + inlen > (dlen + vlan))) + return MLX5_TXCMP_CODE_ERROR; + MLX5_ASSERT(inlen >= txq->inlen_mode); + /* + * Check whether there are enough free WQEBBs: + * - Control Segment + * - Ethernet Segment + * - First Segment of inlined Ethernet data + * - ... data continued ... + * - Data Segments of pointer/min inline type + */ + ds = NB_SEGS(loc->mbuf) + 2 + (inlen - + MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WSEG_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + if (unlikely(loc->wqe_free < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + /* Check for maximal WQE size. */ + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_ERROR; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes/packets counters. */ + ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) / + loc->mbuf->tso_segsz; + /* + * One will be added for mbuf itself + * at the end of the mlx5_tx_burst from + * loc->pkts_sent field. + */ + --ntcp; + txq->stats.opackets += ntcp; + txq->stats.obytes += dlen + vlan + ntcp * inlen; +#endif + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx); + ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx); + wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; + return MLX5_TXCMP_CODE_MULTI; +} + +/** + * Tx one packet function for multi-segment SEND. Supports all + * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, + * sends one packet per WQE, without any data inlining in + * Ethernet Segment. + * + * This routine is responsible for storing processed mbuf + * into elts ring buffer and update elts_head. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * Local context variables partially updated. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + struct mlx5_wqe_dseg *restrict dseg; + struct mlx5_wqe *restrict wqe; + unsigned int ds, nseg; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); + /* + * No inline at all, it means the CPU cycles saving + * is prioritized at configuration, we should not + * copy any packet data to WQE. + */ + nseg = NB_SEGS(loc->mbuf); + ds = 2 + nseg; + if (unlikely(loc->wqe_free < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + /* Check for maximal WQE size. */ + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_ERROR; + /* + * Some Tx offloads may cause an error if + * packet is not long enough, check against + * assumed minimal length. + */ + if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE) + return MLX5_TXCMP_CODE_ERROR; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) + txq->stats.obytes += sizeof(struct rte_vlan_hdr); +#endif + /* + * SEND WQE, one WQEBB: + * - Control Segment, SEND opcode + * - Ethernet Segment, optional VLAN, no inline + * - Data Segments, pointer only type + */ + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx); + mlx5_tx_eseg_none(txq, loc, wqe, olx); + dseg = &wqe->dseg[0]; + do { + if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) { + struct rte_mbuf *mbuf; + + /* + * Zero length segment found, have to + * correct total size of WQE in segments. + * It is supposed to be rare occasion, so + * in normal case (no zero length segments) + * we avoid extra writing to the Control + * Segment. + */ + --ds; + wqe->cseg.sq_ds -= RTE_BE32(1); + mbuf = loc->mbuf; + loc->mbuf = mbuf->next; + rte_pktmbuf_free_seg(mbuf); + if (--nseg == 0) + break; + } else { + mlx5_tx_dseg_ptr + (txq, loc, dseg, + rte_pktmbuf_mtod(loc->mbuf, uint8_t *), + rte_pktmbuf_data_len(loc->mbuf), olx); + txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; + --loc->elts_free; + if (--nseg == 0) + break; + ++dseg; + if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) + dseg = (struct mlx5_wqe_dseg *)txq->wqes; + loc->mbuf = loc->mbuf->next; + } + } while (true); + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; + return MLX5_TXCMP_CODE_MULTI; +} + +/** + * Tx one packet function for multi-segment SEND. Supports all + * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs, + * sends one packet per WQE, with data inlining in + * Ethernet Segment and minimal Data Segments. + * + * This routine is responsible for storing processed mbuf + * into elts ring buffer and update elts_head. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * Local context variables partially updated. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + struct mlx5_wqe *restrict wqe; + unsigned int ds, inlen, dlen, vlan = 0; + + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); + /* + * First calculate data length to be inlined + * to estimate the required space for WQE. + */ + dlen = rte_pktmbuf_pkt_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) + vlan = sizeof(struct rte_vlan_hdr); + inlen = dlen + vlan; + /* Check against minimal length. */ + if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) + return MLX5_TXCMP_CODE_ERROR; + MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); + if (inlen > txq->inlen_send || + loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) { + struct rte_mbuf *mbuf; + unsigned int nxlen; + uintptr_t start; + + /* + * Packet length exceeds the allowed inline + * data length, check whether the minimal + * inlining is required. + */ + if (txq->inlen_mode) { + MLX5_ASSERT(txq->inlen_mode >= + MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send); + inlen = txq->inlen_mode; + } else { + if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE || + !vlan || txq->vlan_en) { + /* + * VLAN insertion will be done inside by HW. + * It is not utmost effective - VLAN flag is + * checked twice, but we should proceed the + * inlining length correctly and take into + * account the VLAN header being inserted. + */ + return mlx5_tx_packet_multi_send + (txq, loc, olx); + } + inlen = MLX5_ESEG_MIN_INLINE_SIZE; + } + /* + * Now we know the minimal amount of data is requested + * to inline. Check whether we should inline the buffers + * from the chain beginning to eliminate some mbufs. + */ + mbuf = loc->mbuf; + nxlen = rte_pktmbuf_data_len(mbuf); + if (unlikely(nxlen <= txq->inlen_send)) { + /* We can inline first mbuf at least. */ + if (nxlen < inlen) { + unsigned int smlen; + + /* Scan mbufs till inlen filled. */ + do { + smlen = nxlen; + mbuf = NEXT(mbuf); + MLX5_ASSERT(mbuf); + nxlen = rte_pktmbuf_data_len(mbuf); + nxlen += smlen; + } while (unlikely(nxlen < inlen)); + if (unlikely(nxlen > txq->inlen_send)) { + /* We cannot inline entire mbuf. */ + smlen = inlen - smlen; + start = rte_pktmbuf_mtod_offset + (mbuf, uintptr_t, smlen); + goto do_align; + } + } + do { + inlen = nxlen; + mbuf = NEXT(mbuf); + /* There should be not end of packet. */ + MLX5_ASSERT(mbuf); + nxlen = inlen + rte_pktmbuf_data_len(mbuf); + } while (unlikely(nxlen < txq->inlen_send)); + } + start = rte_pktmbuf_mtod(mbuf, uintptr_t); + /* + * Check whether we can do inline to align start + * address of data buffer to cacheline. + */ +do_align: + start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1); + if (unlikely(start)) { + start += inlen; + if (start <= txq->inlen_send) + inlen = start; + } + } + /* + * Check whether there are enough free WQEBBs: + * - Control Segment + * - Ethernet Segment + * - First Segment of inlined Ethernet data + * - ... data continued ... + * - Data Segments of pointer/min inline type + * + * Estimate the number of Data Segments conservatively, + * supposing no any mbufs is being freed during inlining. + */ + MLX5_ASSERT(inlen <= txq->inlen_send); + ds = NB_SEGS(loc->mbuf) + 2 + (inlen - + MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WSEG_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + if (unlikely(loc->wqe_free < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + /* Check for maximal WQE size. */ + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_ERROR; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes/packets counters. */ + txq->stats.obytes += dlen + vlan; +#endif + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx); + ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx); + wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; + return MLX5_TXCMP_CODE_MULTI; +} + +/** + * Tx burst function for multi-segment packets. Supports all + * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs, + * sends one packet per WQE. Function stops sending if it + * encounters the single-segment packet. + * + * This routine is responsible for storing processed mbuf + * into elts ring buffer and update elts_head. + * + * @param txq + * Pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. + * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered. + * Local context variables updated. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); + pkts += loc->pkts_sent + 1; + pkts_n -= loc->pkts_sent; + for (;;) { + enum mlx5_txcmp_code ret; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); + /* + * Estimate the number of free elts quickly but + * conservatively. Some segment may be fully inlined + * and freed, ignore this here - precise estimation + * is costly. + */ + if (loc->elts_free < NB_SEGS(loc->mbuf)) + return MLX5_TXCMP_CODE_EXIT; + if (MLX5_TXOFF_CONFIG(TSO) && + unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) { + /* Proceed with multi-segment TSO. */ + ret = mlx5_tx_packet_multi_tso(txq, loc, olx); + } else if (MLX5_TXOFF_CONFIG(INLINE)) { + /* Proceed with multi-segment SEND with inlining. */ + ret = mlx5_tx_packet_multi_inline(txq, loc, olx); + } else { + /* Proceed with multi-segment SEND w/o inlining. */ + ret = mlx5_tx_packet_multi_send(txq, loc, olx); + } + if (ret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (ret == MLX5_TXCMP_CODE_ERROR) + return MLX5_TXCMP_CODE_ERROR; + /* WQE is built, go to the next packet. */ + ++loc->pkts_sent; + --pkts_n; + if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + loc->mbuf = *pkts++; + if (pkts_n > 1) + rte_prefetch0(*pkts); + if (likely(NB_SEGS(loc->mbuf) > 1)) + continue; + /* Here ends the series of multi-segment packets. */ + if (MLX5_TXOFF_CONFIG(TSO) && + unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) + return MLX5_TXCMP_CODE_TSO; + return MLX5_TXCMP_CODE_SINGLE; + } + MLX5_ASSERT(false); +} + +/** + * Tx burst function for single-segment packets with TSO. + * Supports all types of Tx offloads, except multi-packets. + * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE. + * Function stops sending if it encounters the multi-segment + * packet or packet without TSO requested. + * + * The routine is responsible for storing processed mbuf + * into elts ring buffer and update elts_head if inline + * offloads is requested due to possible early freeing + * of the inlined mbufs (can not store pkts array in elts + * as a batch). + * + * @param txq + * Pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered. + * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. + * Local context variables updated. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); + pkts += loc->pkts_sent + 1; + pkts_n -= loc->pkts_sent; + for (;;) { + struct mlx5_wqe_dseg *restrict dseg; + struct mlx5_wqe *restrict wqe; + unsigned int ds, dlen, hlen, ntcp, vlan = 0; + uint8_t *dptr; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + dlen = rte_pktmbuf_data_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + vlan = sizeof(struct rte_vlan_hdr); + } + /* + * First calculate the WQE size to check + * whether we have enough space in ring buffer. + */ + hlen = loc->mbuf->l2_len + vlan + + loc->mbuf->l3_len + loc->mbuf->l4_len; + if (unlikely((!hlen || !loc->mbuf->tso_segsz))) + return MLX5_TXCMP_CODE_ERROR; + if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK) + hlen += loc->mbuf->outer_l2_len + + loc->mbuf->outer_l3_len; + /* Segment must contain all TSO headers. */ + if (unlikely(hlen > MLX5_MAX_TSO_HEADER || + hlen <= MLX5_ESEG_MIN_INLINE_SIZE || + hlen > (dlen + vlan))) + return MLX5_TXCMP_CODE_ERROR; + /* + * Check whether there are enough free WQEBBs: + * - Control Segment + * - Ethernet Segment + * - First Segment of inlined Ethernet data + * - ... data continued ... + * - Finishing Data Segment of pointer type + */ + ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + if (loc->wqe_free < ((ds + 3) / 4)) + return MLX5_TXCMP_CODE_EXIT; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes/packets counters. */ + ntcp = (dlen + vlan - hlen + + loc->mbuf->tso_segsz - 1) / + loc->mbuf->tso_segsz; + /* + * One will be added for mbuf itself at the end + * of the mlx5_tx_burst from loc->pkts_sent field. + */ + --ntcp; + txq->stats.opackets += ntcp; + txq->stats.obytes += dlen + vlan + ntcp * hlen; +#endif + /* + * Build the TSO WQE: + * - Control Segment + * - Ethernet Segment with hlen bytes inlined + * - Data Segment of pointer type + */ + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, ds, + MLX5_OPCODE_TSO, olx); + dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx); + dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan; + dlen -= hlen - vlan; + mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); + /* + * WQE is built, update the loop parameters + * and go to the next packet. + */ + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; + if (MLX5_TXOFF_CONFIG(INLINE)) + txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; + --loc->elts_free; + ++loc->pkts_sent; + --pkts_n; + if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + loc->mbuf = *pkts++; + if (pkts_n > 1) + rte_prefetch0(*pkts); + if (MLX5_TXOFF_CONFIG(MULTI) && + unlikely(NB_SEGS(loc->mbuf) > 1)) + return MLX5_TXCMP_CODE_MULTI; + if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))) + return MLX5_TXCMP_CODE_SINGLE; + /* Continue with the next TSO packet. */ + } + MLX5_ASSERT(false); +} + +/** + * Analyze the packet and select the best method to send. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * @param newp + * The predefined flag whether do complete check for + * multi-segment packets and TSO. + * + * @return + * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. + * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO. + * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND. + * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int olx, + bool newp) +{ + /* Check for multi-segment packet. */ + if (newp && + MLX5_TXOFF_CONFIG(MULTI) && + unlikely(NB_SEGS(loc->mbuf) > 1)) + return MLX5_TXCMP_CODE_MULTI; + /* Check for TSO packet. */ + if (newp && + MLX5_TXOFF_CONFIG(TSO) && + unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) + return MLX5_TXCMP_CODE_TSO; + /* Check if eMPW is enabled at all. */ + if (!MLX5_TXOFF_CONFIG(EMPW)) + return MLX5_TXCMP_CODE_SINGLE; + /* Check if eMPW can be engaged. */ + if (MLX5_TXOFF_CONFIG(VLAN) && + unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) && + (!MLX5_TXOFF_CONFIG(INLINE) || + unlikely((rte_pktmbuf_data_len(loc->mbuf) + + sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) { + /* + * eMPW does not support VLAN insertion offload, + * we have to inline the entire packet but + * packet is too long for inlining. + */ + return MLX5_TXCMP_CODE_SINGLE; + } + return MLX5_TXCMP_CODE_EMPW; +} + +/** + * Check the next packet attributes to match with the eMPW batch ones. + * In addition, for legacy MPW the packet length is checked either. + * + * @param txq + * Pointer to TX queue structure. + * @param es + * Pointer to Ethernet Segment of eMPW batch. + * @param loc + * Pointer to burst routine local context. + * @param dlen + * Length of previous packet in MPW descriptor. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * true - packet match with eMPW batch attributes. + * false - no match, eMPW should be restarted. + */ +static __rte_always_inline bool +mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused, + struct mlx5_wqe_eseg *restrict es, + struct mlx5_txq_local *restrict loc, + uint32_t dlen, + unsigned int olx) +{ + uint8_t swp_flags = 0; + + /* Compare the checksum flags, if any. */ + if (MLX5_TXOFF_CONFIG(CSUM) && + txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags) + return false; + /* Compare the Software Parser offsets and flags. */ + if (MLX5_TXOFF_CONFIG(SWP) && + (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) || + es->swp_flags != swp_flags)) + return false; + /* Fill metadata field if needed. */ + if (MLX5_TXOFF_CONFIG(METADATA) && + es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ? + *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0)) + return false; + /* Legacy MPW can send packets with the same lengt only. */ + if (MLX5_TXOFF_CONFIG(MPW) && + dlen != rte_pktmbuf_data_len(loc->mbuf)) + return false; + /* There must be no VLAN packets in eMPW loop. */ + if (MLX5_TXOFF_CONFIG(VLAN)) + MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)); + return true; +} + +/* + * Update send loop variables and WQE for eMPW loop + * without data inlining. Number of Data Segments is + * equal to the number of sent packets. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param ds + * Number of packets/Data Segments/Packets. + * @param slen + * Accumulated statistics, bytes sent + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * true - packet match with eMPW batch attributes. + * false - no match, eMPW should be restarted. + */ +static __rte_always_inline void +mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int ds, + unsigned int slen, + unsigned int olx __rte_unused) +{ + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += slen; +#else + (void)slen; +#endif + loc->elts_free -= ds; + loc->pkts_sent += ds; + ds += 2; + loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; +} + +/* + * Update send loop variables and WQE for eMPW loop + * with data inlining. Gets the size of pushed descriptors + * and data to the WQE. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param len + * Total size of descriptor/data in bytes. + * @param slen + * Accumulated statistics, data bytes sent. + * @param wqem + * The base WQE for the eMPW/MPW descriptor. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * true - packet match with eMPW batch attributes. + * false - no match, eMPW should be restarted. + */ +static __rte_always_inline void +mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, + unsigned int len, + unsigned int slen, + struct mlx5_wqe *restrict wqem, + unsigned int olx __rte_unused) +{ + struct mlx5_wqe_dseg *dseg = &wqem->dseg[0]; + + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += slen; +#else + (void)slen; +#endif + if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) { + /* + * If the legacy MPW session contains the inline packets + * we should set the only inline data segment length + * and align the total length to the segment size. + */ + MLX5_ASSERT(len > sizeof(dseg->bcount)); + dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) | + MLX5_ETH_WQE_DATA_INLINE); + len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2; + } else { + /* + * The session is not legacy MPW or contains the + * data buffer pointer segments. + */ + MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0); + len = len / MLX5_WSEG_SIZE + 2; + } + wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); + txq->wqe_ci += (len + 3) / 4; + loc->wqe_free -= (len + 3) / 4; + loc->wqe_last = wqem; +} + +/** + * The set of Tx burst functions for single-segment packets + * without TSO and with Multi-Packet Writing feature support. + * Supports all types of Tx offloads, except multi-packets + * and TSO. + * + * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends + * as many packet per WQE as it can. If eMPW is not configured + * or packet can not be sent with eMPW (VLAN insertion) the + * ordinary SEND opcode is used and only one packet placed + * in WQE. + * + * Functions stop sending if it encounters the multi-segment + * packet or packet with TSO requested. + * + * The routines are responsible for storing processed mbuf + * into elts ring buffer and update elts_head if inlining + * offload is requested. Otherwise the copying mbufs to elts + * can be postponed and completed at the end of burst routine. + * + * @param txq + * Pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * @param loc + * Pointer to burst routine local context. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + * + * @return + * MLX5_TXCMP_CODE_EXIT - sending is done or impossible. + * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred. + * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered. + * MLX5_TXCMP_CODE_TSO - TSO packet encountered. + * MLX5_TXCMP_CODE_SINGLE - used inside functions set. + * MLX5_TXCMP_CODE_EMPW - used inside functions set. + * + * Local context variables updated. + * + * + * The routine sends packets with MLX5_OPCODE_EMPW + * without inlining, this is dedicated optimized branch. + * No VLAN insertion is supported. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + /* + * Subroutine is the part of mlx5_tx_burst_single() + * and sends single-segment packet with eMPW opcode + * without data inlining. + */ + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW)); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); + static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); + pkts += loc->pkts_sent + 1; + pkts_n -= loc->pkts_sent; + for (;;) { + struct mlx5_wqe_dseg *restrict dseg; + struct mlx5_wqe_eseg *restrict eseg; + enum mlx5_txcmp_code ret; + unsigned int part, loop; + unsigned int slen = 0; + +next_empw: + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? + MLX5_MPW_MAX_PACKETS : + MLX5_EMPW_MAX_PACKETS); + if (unlikely(loc->elts_free < part)) { + /* We have no enough elts to save all mbufs. */ + if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS)) + return MLX5_TXCMP_CODE_EXIT; + /* But we still able to send at least minimal eMPW. */ + part = loc->elts_free; + } + /* Check whether we have enough WQEs */ + if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) { + if (unlikely(loc->wqe_free < + ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + part = (loc->wqe_free * 4) - 2; + } + if (likely(part > 1)) + rte_prefetch0(*pkts); + loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m); + /* + * Build eMPW title WQEBB: + * - Control Segment, eMPW opcode + * - Ethernet Segment, no inline + */ + mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2, + MLX5_OPCODE_ENHANCED_MPSW, olx); + mlx5_tx_eseg_none(txq, loc, loc->wqe_last, + olx & ~MLX5_TXOFF_CONFIG_VLAN); + eseg = &loc->wqe_last->eseg; + dseg = &loc->wqe_last->dseg[0]; + loop = part; + /* Store the packet length for legacy MPW. */ + if (MLX5_TXOFF_CONFIG(MPW)) + eseg->mss = rte_cpu_to_be_16 + (rte_pktmbuf_data_len(loc->mbuf)); + for (;;) { + uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + slen += dlen; +#endif + mlx5_tx_dseg_ptr + (txq, loc, dseg, + rte_pktmbuf_mtod(loc->mbuf, uint8_t *), + dlen, olx); + if (unlikely(--loop == 0)) + break; + loc->mbuf = *pkts++; + if (likely(loop > 1)) + rte_prefetch0(*pkts); + ret = mlx5_tx_able_to_empw(txq, loc, olx, true); + /* + * Unroll the completion code to avoid + * returning variable value - it results in + * unoptimized sequent checking in caller. + */ + if (ret == MLX5_TXCMP_CODE_MULTI) { + part -= loop; + mlx5_tx_sdone_empw(txq, loc, part, slen, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_MULTI; + } + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + if (ret == MLX5_TXCMP_CODE_TSO) { + part -= loop; + mlx5_tx_sdone_empw(txq, loc, part, slen, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_TSO; + } + if (ret == MLX5_TXCMP_CODE_SINGLE) { + part -= loop; + mlx5_tx_sdone_empw(txq, loc, part, slen, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_SINGLE; + } + if (ret != MLX5_TXCMP_CODE_EMPW) { + MLX5_ASSERT(false); + part -= loop; + mlx5_tx_sdone_empw(txq, loc, part, slen, olx); + return MLX5_TXCMP_CODE_ERROR; + } + /* + * Check whether packet parameters coincide + * within assumed eMPW batch: + * - check sum settings + * - metadata value + * - software parser settings + * - packets length (legacy MPW only) + */ + if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) { + MLX5_ASSERT(loop); + part -= loop; + mlx5_tx_sdone_empw(txq, loc, part, slen, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + pkts_n -= part; + goto next_empw; + } + /* Packet attributes match, continue the same eMPW. */ + ++dseg; + if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) + dseg = (struct mlx5_wqe_dseg *)txq->wqes; + } + /* eMPW is built successfully, update loop parameters. */ + MLX5_ASSERT(!loop); + MLX5_ASSERT(pkts_n >= part); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += slen; +#endif + loc->elts_free -= part; + loc->pkts_sent += part; + txq->wqe_ci += (2 + part + 3) / 4; + loc->wqe_free -= (2 + part + 3) / 4; + pkts_n -= part; + if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + loc->mbuf = *pkts++; + ret = mlx5_tx_able_to_empw(txq, loc, olx, true); + if (unlikely(ret != MLX5_TXCMP_CODE_EMPW)) + return ret; + /* Continue sending eMPW batches. */ + } + MLX5_ASSERT(false); +} + +/** + * The routine sends packets with MLX5_OPCODE_EMPW + * with inlining, optionally supports VLAN insertion. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + /* + * Subroutine is the part of mlx5_tx_burst_single() + * and sends single-segment packet with eMPW opcode + * with data inlining. + */ + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW)); + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); + static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size"); + pkts += loc->pkts_sent + 1; + pkts_n -= loc->pkts_sent; + for (;;) { + struct mlx5_wqe_dseg *restrict dseg; + struct mlx5_wqe *restrict wqem; + enum mlx5_txcmp_code ret; + unsigned int room, part, nlim; + unsigned int slen = 0; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + /* + * Limits the amount of packets in one WQE + * to improve CQE latency generation. + */ + nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? + MLX5_MPW_INLINE_MAX_PACKETS : + MLX5_EMPW_MAX_PACKETS); + /* Check whether we have minimal amount WQEs */ + if (unlikely(loc->wqe_free < + ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + if (likely(pkts_n > 1)) + rte_prefetch0(*pkts); + wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m); + /* + * Build eMPW title WQEBB: + * - Control Segment, eMPW opcode, zero DS + * - Ethernet Segment, no inline + */ + mlx5_tx_cseg_init(txq, loc, wqem, 0, + MLX5_OPCODE_ENHANCED_MPSW, olx); + mlx5_tx_eseg_none(txq, loc, wqem, + olx & ~MLX5_TXOFF_CONFIG_VLAN); + dseg = &wqem->dseg[0]; + /* Store the packet length for legacy MPW. */ + if (MLX5_TXOFF_CONFIG(MPW)) + wqem->eseg.mss = rte_cpu_to_be_16 + (rte_pktmbuf_data_len(loc->mbuf)); + room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE, + loc->wqe_free) * MLX5_WQE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE; + /* Limit the room for legacy MPW sessions for performance. */ + if (MLX5_TXOFF_CONFIG(MPW)) + room = RTE_MIN(room, + RTE_MAX(txq->inlen_empw + + sizeof(dseg->bcount) + + (MLX5_TXOFF_CONFIG(VLAN) ? + sizeof(struct rte_vlan_hdr) : 0), + MLX5_MPW_INLINE_MAX_PACKETS * + MLX5_WQE_DSEG_SIZE)); + /* Build WQE till we have space, packets and resources. */ + part = room; + for (;;) { + uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf); + uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *); + unsigned int tlen; + + MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE); + MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0); + MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end); + /* + * Some Tx offloads may cause an error if + * packet is not long enough, check against + * assumed minimal length. + */ + if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) { + part -= room; + if (unlikely(!part)) + return MLX5_TXCMP_CODE_ERROR; + /* + * We have some successfully built + * packet Data Segments to send. + */ + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + return MLX5_TXCMP_CODE_ERROR; + } + /* Inline or not inline - that's the Question. */ + if (dlen > txq->inlen_empw || + loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) + goto pointer_empw; + if (MLX5_TXOFF_CONFIG(MPW)) { + if (dlen > txq->inlen_send) + goto pointer_empw; + tlen = dlen; + if (part == room) { + /* Open new inline MPW session. */ + tlen += sizeof(dseg->bcount); + dseg->bcount = RTE_BE32(0); + dseg = RTE_PTR_ADD + (dseg, sizeof(dseg->bcount)); + } else { + /* + * No pointer and inline descriptor + * intermix for legacy MPW sessions. + */ + if (wqem->dseg[0].bcount) + break; + } + } else { + tlen = sizeof(dseg->bcount) + dlen; + } + /* Inline entire packet, optional VLAN insertion. */ + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + /* + * The packet length must be checked in + * mlx5_tx_able_to_empw() and packet + * fits into inline length guaranteed. + */ + MLX5_ASSERT((dlen + + sizeof(struct rte_vlan_hdr)) <= + txq->inlen_empw); + tlen += sizeof(struct rte_vlan_hdr); + if (room < tlen) + break; + dseg = mlx5_tx_dseg_vlan(txq, loc, dseg, + dptr, dlen, olx); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + slen += sizeof(struct rte_vlan_hdr); +#endif + } else { + if (room < tlen) + break; + dseg = mlx5_tx_dseg_empw(txq, loc, dseg, + dptr, dlen, olx); + } + if (!MLX5_TXOFF_CONFIG(MPW)) + tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE); + MLX5_ASSERT(room >= tlen); + room -= tlen; + /* + * Packet data are completely inlined, + * free the packet immediately. + */ + rte_pktmbuf_free_seg(loc->mbuf); + goto next_mbuf; +pointer_empw: + /* + * No pointer and inline descriptor + * intermix for legacy MPW sessions. + */ + if (MLX5_TXOFF_CONFIG(MPW) && + part != room && + wqem->dseg[0].bcount == RTE_BE32(0)) + break; + /* + * Not inlinable VLAN packets are + * proceeded outside of this routine. + */ + MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE); + if (MLX5_TXOFF_CONFIG(VLAN)) + MLX5_ASSERT(!(loc->mbuf->ol_flags & + PKT_TX_VLAN_PKT)); + mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx); + /* We have to store mbuf in elts.*/ + txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; + room -= MLX5_WQE_DSEG_SIZE; + /* Ring buffer wraparound is checked at the loop end.*/ + ++dseg; +next_mbuf: +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + slen += dlen; +#endif + loc->pkts_sent++; + loc->elts_free--; + pkts_n--; + if (unlikely(!pkts_n || !loc->elts_free)) { + /* + * We have no resources/packets to + * continue build descriptors. + */ + part -= room; + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + return MLX5_TXCMP_CODE_EXIT; + } + loc->mbuf = *pkts++; + if (likely(pkts_n > 1)) + rte_prefetch0(*pkts); + ret = mlx5_tx_able_to_empw(txq, loc, olx, true); + /* + * Unroll the completion code to avoid + * returning variable value - it results in + * unoptimized sequent checking in caller. + */ + if (ret == MLX5_TXCMP_CODE_MULTI) { + part -= room; + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_MULTI; + } + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + if (ret == MLX5_TXCMP_CODE_TSO) { + part -= room; + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_TSO; + } + if (ret == MLX5_TXCMP_CODE_SINGLE) { + part -= room; + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + return MLX5_TXCMP_CODE_SINGLE; + } + if (ret != MLX5_TXCMP_CODE_EMPW) { + MLX5_ASSERT(false); + part -= room; + mlx5_tx_idone_empw(txq, loc, part, + slen, wqem, olx); + return MLX5_TXCMP_CODE_ERROR; + } + /* Check if we have minimal room left. */ + nlim--; + if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE)) + break; + /* + * Check whether packet parameters coincide + * within assumed eMPW batch: + * - check sum settings + * - metadata value + * - software parser settings + * - packets length (legacy MPW only) + */ + if (!mlx5_tx_match_empw(txq, &wqem->eseg, + loc, dlen, olx)) + break; + /* Packet attributes match, continue the same eMPW. */ + if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end) + dseg = (struct mlx5_wqe_dseg *)txq->wqes; + } + /* + * We get here to close an existing eMPW + * session and start the new one. + */ + MLX5_ASSERT(pkts_n); + part -= room; + if (unlikely(!part)) + return MLX5_TXCMP_CODE_EXIT; + mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx); + if (unlikely(!loc->elts_free || + !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + /* Continue the loop with new eMPW session. */ + } + MLX5_ASSERT(false); +} + +/** + * The routine sends packets with ordinary MLX5_OPCODE_SEND. + * Data inlining and VLAN insertion are supported. + */ +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + /* + * Subroutine is the part of mlx5_tx_burst_single() + * and sends single-segment packet with SEND opcode. + */ + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + MLX5_ASSERT(pkts_n > loc->pkts_sent); + pkts += loc->pkts_sent + 1; + pkts_n -= loc->pkts_sent; + for (;;) { + struct mlx5_wqe *restrict wqe; + enum mlx5_txcmp_code ret; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + if (MLX5_TXOFF_CONFIG(INLINE)) { + unsigned int inlen, vlan = 0; + + inlen = rte_pktmbuf_data_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + vlan = sizeof(struct rte_vlan_hdr); + inlen += vlan; + static_assert((sizeof(struct rte_vlan_hdr) + + sizeof(struct rte_ether_hdr)) == + MLX5_ESEG_MIN_INLINE_SIZE, + "invalid min inline data size"); + } + /* + * If inlining is enabled at configuration time + * the limit must be not less than minimal size. + * Otherwise we would do extra check for data + * size to avoid crashes due to length overflow. + */ + MLX5_ASSERT(txq->inlen_send >= + MLX5_ESEG_MIN_INLINE_SIZE); + if (inlen <= txq->inlen_send) { + unsigned int seg_n, wqe_n; + + rte_prefetch0(rte_pktmbuf_mtod + (loc->mbuf, uint8_t *)); + /* Check against minimal length. */ + if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) + return MLX5_TXCMP_CODE_ERROR; + if (loc->mbuf->ol_flags & + PKT_TX_DYNF_NOINLINE) { + /* + * The hint flag not to inline packet + * data is set. Check whether we can + * follow the hint. + */ + if ((!MLX5_TXOFF_CONFIG(EMPW) && + txq->inlen_mode) || + (MLX5_TXOFF_CONFIG(MPW) && + txq->inlen_mode)) { + /* + * The hardware requires the + * minimal inline data header. + */ + goto single_min_inline; + } + if (MLX5_TXOFF_CONFIG(VLAN) && + vlan && !txq->vlan_en) { + /* + * We must insert VLAN tag + * by software means. + */ + goto single_part_inline; + } + goto single_no_inline; + } + /* + * Completely inlined packet data WQE: + * - Control Segment, SEND opcode + * - Ethernet Segment, no VLAN insertion + * - Data inlined, VLAN optionally inserted + * - Alignment to MLX5_WSEG_SIZE + * Have to estimate amount of WQEBBs + */ + seg_n = (inlen + 3 * MLX5_WSEG_SIZE - + MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + /* Check if there are enough WQEBBs. */ + wqe_n = (seg_n + 3) / 4; + if (wqe_n > loc->wqe_free) + return MLX5_TXCMP_CODE_EXIT; + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, seg_n, + MLX5_OPCODE_SEND, olx); + mlx5_tx_eseg_data(txq, loc, wqe, + vlan, inlen, 0, olx); + txq->wqe_ci += wqe_n; + loc->wqe_free -= wqe_n; + /* + * Packet data are completely inlined, + * free the packet immediately. + */ + rte_pktmbuf_free_seg(loc->mbuf); + } else if ((!MLX5_TXOFF_CONFIG(EMPW) || + MLX5_TXOFF_CONFIG(MPW)) && + txq->inlen_mode) { + /* + * If minimal inlining is requested the eMPW + * feature should be disabled due to data is + * inlined into Ethernet Segment, which can + * not contain inlined data for eMPW due to + * segment shared for all packets. + */ + struct mlx5_wqe_dseg *restrict dseg; + unsigned int ds; + uint8_t *dptr; + + /* + * The inline-mode settings require + * to inline the specified amount of + * data bytes to the Ethernet Segment. + * We should check the free space in + * WQE ring buffer to inline partially. + */ +single_min_inline: + MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode); + MLX5_ASSERT(inlen > txq->inlen_mode); + MLX5_ASSERT(txq->inlen_mode >= + MLX5_ESEG_MIN_INLINE_SIZE); + /* + * Check whether there are enough free WQEBBs: + * - Control Segment + * - Ethernet Segment + * - First Segment of inlined Ethernet data + * - ... data continued ... + * - Finishing Data Segment of pointer type + */ + ds = (MLX5_WQE_CSEG_SIZE + + MLX5_WQE_ESEG_SIZE + + MLX5_WQE_DSEG_SIZE + + txq->inlen_mode - + MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WQE_DSEG_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + if (loc->wqe_free < ((ds + 3) / 4)) + return MLX5_TXCMP_CODE_EXIT; + /* + * Build the ordinary SEND WQE: + * - Control Segment + * - Ethernet Segment, inline inlen_mode bytes + * - Data Segment of pointer type + */ + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, ds, + MLX5_OPCODE_SEND, olx); + dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, + txq->inlen_mode, + 0, olx); + dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + + txq->inlen_mode - vlan; + inlen -= txq->inlen_mode; + mlx5_tx_dseg_ptr(txq, loc, dseg, + dptr, inlen, olx); + /* + * WQE is built, update the loop parameters + * and got to the next packet. + */ + txq->wqe_ci += (ds + 3) / 4; + loc->wqe_free -= (ds + 3) / 4; + /* We have to store mbuf in elts.*/ + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + txq->elts[txq->elts_head++ & txq->elts_m] = + loc->mbuf; + --loc->elts_free; + } else { + uint8_t *dptr; + unsigned int dlen; + + /* + * Partially inlined packet data WQE, we have + * some space in title WQEBB, we can fill it + * with some packet data. It takes one WQEBB, + * it is available, no extra space check: + * - Control Segment, SEND opcode + * - Ethernet Segment, no VLAN insertion + * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data + * - Data Segment, pointer type + * + * We also get here if VLAN insertion is not + * supported by HW, the inline is enabled. + */ +single_part_inline: + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, 4, + MLX5_OPCODE_SEND, olx); + mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx); + dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + + MLX5_ESEG_MIN_INLINE_SIZE - vlan; + /* + * The length check is performed above, by + * comparing with txq->inlen_send. We should + * not get overflow here. + */ + MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE); + dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE; + mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1], + dptr, dlen, olx); + ++txq->wqe_ci; + --loc->wqe_free; + /* We have to store mbuf in elts.*/ + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + txq->elts[txq->elts_head++ & txq->elts_m] = + loc->mbuf; + --loc->elts_free; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += vlan + + rte_pktmbuf_data_len(loc->mbuf); +#endif + } else { + /* + * No inline at all, it means the CPU cycles saving + * is prioritized at configuration, we should not + * copy any packet data to WQE. + * + * SEND WQE, one WQEBB: + * - Control Segment, SEND opcode + * - Ethernet Segment, optional VLAN, no inline + * - Data Segment, pointer type + */ +single_no_inline: + wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); + loc->wqe_last = wqe; + mlx5_tx_cseg_init(txq, loc, wqe, 3, + MLX5_OPCODE_SEND, olx); + mlx5_tx_eseg_none(txq, loc, wqe, olx); + mlx5_tx_dseg_ptr + (txq, loc, &wqe->dseg[0], + rte_pktmbuf_mtod(loc->mbuf, uint8_t *), + rte_pktmbuf_data_len(loc->mbuf), olx); + ++txq->wqe_ci; + --loc->wqe_free; + /* + * We should not store mbuf pointer in elts + * if no inlining is configured, this is done + * by calling routine in a batch copy. + */ + MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); + --loc->elts_free; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ + txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf); + if (MLX5_TXOFF_CONFIG(VLAN) && + loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) + txq->stats.obytes += + sizeof(struct rte_vlan_hdr); +#endif + } + ++loc->pkts_sent; + --pkts_n; + if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) + return MLX5_TXCMP_CODE_EXIT; + loc->mbuf = *pkts++; + if (pkts_n > 1) + rte_prefetch0(*pkts); + ret = mlx5_tx_able_to_empw(txq, loc, olx, true); + if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE)) + return ret; + } + MLX5_ASSERT(false); +} + +static __rte_always_inline enum mlx5_txcmp_code +mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + unsigned int pkts_n, + struct mlx5_txq_local *restrict loc, + unsigned int olx) +{ + enum mlx5_txcmp_code ret; + + ret = mlx5_tx_able_to_empw(txq, loc, olx, false); + if (ret == MLX5_TXCMP_CODE_SINGLE) + goto ordinary_send; + MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW); + for (;;) { + /* Optimize for inline/no inline eMPW send. */ + ret = (MLX5_TXOFF_CONFIG(INLINE)) ? + mlx5_tx_burst_empw_inline + (txq, pkts, pkts_n, loc, olx) : + mlx5_tx_burst_empw_simple + (txq, pkts, pkts_n, loc, olx); + if (ret != MLX5_TXCMP_CODE_SINGLE) + return ret; + /* The resources to send one packet should remain. */ + MLX5_ASSERT(loc->elts_free && loc->wqe_free); +ordinary_send: + ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx); + MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE); + if (ret != MLX5_TXCMP_CODE_EMPW) + return ret; + /* The resources to send one packet should remain. */ + MLX5_ASSERT(loc->elts_free && loc->wqe_free); + } +} + +/** + * DPDK Tx callback template. This is configured template + * used to generate routines optimized for specified offload setup. + * One of this generated functions is chosen at SQ configuration + * time. + * + * @param txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * @param olx + * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx + * values. Should be static to take compile time static configuration + * advantages. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +static __rte_always_inline uint16_t +mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq, + struct rte_mbuf **restrict pkts, + uint16_t pkts_n, + unsigned int olx) +{ + struct mlx5_txq_local loc; + enum mlx5_txcmp_code ret; + unsigned int part; + + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + if (unlikely(!pkts_n)) + return 0; + loc.pkts_sent = 0; + loc.pkts_copy = 0; + loc.wqe_last = NULL; + +send_loop: + loc.pkts_loop = loc.pkts_sent; + /* + * Check if there are some CQEs, if any: + * - process an encountered errors + * - process the completed WQEs + * - free related mbufs + * - doorbell the NIC about processed CQEs + */ + rte_prefetch0(*(pkts + loc.pkts_sent)); + mlx5_tx_handle_completion(txq, olx); + /* + * Calculate the number of available resources - elts and WQEs. + * There are two possible different scenarios: + * - no data inlining into WQEs, one WQEBB may contains up to + * four packets, in this case elts become scarce resource + * - data inlining into WQEs, one packet may require multiple + * WQEBBs, the WQEs become the limiting factor. + */ + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + loc.elts_free = txq->elts_s - + (uint16_t)(txq->elts_head - txq->elts_tail); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + loc.wqe_free = txq->wqe_s - + (uint16_t)(txq->wqe_ci - txq->wqe_pi); + if (unlikely(!loc.elts_free || !loc.wqe_free)) + goto burst_exit; + for (;;) { + /* + * Fetch the packet from array. Usually this is + * the first packet in series of multi/single + * segment packets. + */ + loc.mbuf = *(pkts + loc.pkts_sent); + /* Dedicated branch for multi-segment packets. */ + if (MLX5_TXOFF_CONFIG(MULTI) && + unlikely(NB_SEGS(loc.mbuf) > 1)) { + /* + * Multi-segment packet encountered. + * Hardware is able to process it only + * with SEND/TSO opcodes, one packet + * per WQE, do it in dedicated routine. + */ +enter_send_multi: + MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy); + part = loc.pkts_sent - loc.pkts_copy; + if (!MLX5_TXOFF_CONFIG(INLINE) && part) { + /* + * There are some single-segment mbufs not + * stored in elts. The mbufs must be in the + * same order as WQEs, so we must copy the + * mbufs to elts here, before the coming + * multi-segment packet mbufs is appended. + */ + mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, + part, olx); + loc.pkts_copy = loc.pkts_sent; + } + MLX5_ASSERT(pkts_n > loc.pkts_sent); + ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx); + if (!MLX5_TXOFF_CONFIG(INLINE)) + loc.pkts_copy = loc.pkts_sent; + /* + * These returned code checks are supposed + * to be optimized out due to routine inlining. + */ + if (ret == MLX5_TXCMP_CODE_EXIT) { + /* + * The routine returns this code when + * all packets are sent or there is no + * enough resources to complete request. + */ + break; + } + if (ret == MLX5_TXCMP_CODE_ERROR) { + /* + * The routine returns this code when + * some error in the incoming packets + * format occurred. + */ + txq->stats.oerrors++; + break; + } + if (ret == MLX5_TXCMP_CODE_SINGLE) { + /* + * The single-segment packet was encountered + * in the array, try to send it with the + * best optimized way, possible engaging eMPW. + */ + goto enter_send_single; + } + if (MLX5_TXOFF_CONFIG(TSO) && + ret == MLX5_TXCMP_CODE_TSO) { + /* + * The single-segment TSO packet was + * encountered in the array. + */ + goto enter_send_tso; + } + /* We must not get here. Something is going wrong. */ + MLX5_ASSERT(false); + txq->stats.oerrors++; + break; + } + /* Dedicated branch for single-segment TSO packets. */ + if (MLX5_TXOFF_CONFIG(TSO) && + unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) { + /* + * TSO might require special way for inlining + * (dedicated parameters) and is sent with + * MLX5_OPCODE_TSO opcode only, provide this + * in dedicated branch. + */ +enter_send_tso: + MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1); + MLX5_ASSERT(pkts_n > loc.pkts_sent); + ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx); + /* + * These returned code checks are supposed + * to be optimized out due to routine inlining. + */ + if (ret == MLX5_TXCMP_CODE_EXIT) + break; + if (ret == MLX5_TXCMP_CODE_ERROR) { + txq->stats.oerrors++; + break; + } + if (ret == MLX5_TXCMP_CODE_SINGLE) + goto enter_send_single; + if (MLX5_TXOFF_CONFIG(MULTI) && + ret == MLX5_TXCMP_CODE_MULTI) { + /* + * The multi-segment packet was + * encountered in the array. + */ + goto enter_send_multi; + } + /* We must not get here. Something is going wrong. */ + MLX5_ASSERT(false); + txq->stats.oerrors++; + break; + } + /* + * The dedicated branch for the single-segment packets + * without TSO. Often these ones can be sent using + * MLX5_OPCODE_EMPW with multiple packets in one WQE. + * The routine builds the WQEs till it encounters + * the TSO or multi-segment packet (in case if these + * offloads are requested at SQ configuration time). + */ +enter_send_single: + MLX5_ASSERT(pkts_n > loc.pkts_sent); + ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx); + /* + * These returned code checks are supposed + * to be optimized out due to routine inlining. + */ + if (ret == MLX5_TXCMP_CODE_EXIT) + break; + if (ret == MLX5_TXCMP_CODE_ERROR) { + txq->stats.oerrors++; + break; + } + if (MLX5_TXOFF_CONFIG(MULTI) && + ret == MLX5_TXCMP_CODE_MULTI) { + /* + * The multi-segment packet was + * encountered in the array. + */ + goto enter_send_multi; + } + if (MLX5_TXOFF_CONFIG(TSO) && + ret == MLX5_TXCMP_CODE_TSO) { + /* + * The single-segment TSO packet was + * encountered in the array. + */ + goto enter_send_tso; + } + /* We must not get here. Something is going wrong. */ + MLX5_ASSERT(false); + txq->stats.oerrors++; + break; + } + /* + * Main Tx loop is completed, do the rest: + * - set completion request if thresholds are reached + * - doorbell the hardware + * - copy the rest of mbufs to elts (if any) + */ + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) || + loc.pkts_sent >= loc.pkts_copy); + /* Take a shortcut if nothing is sent. */ + if (unlikely(loc.pkts_sent == loc.pkts_loop)) + goto burst_exit; + /* Request CQE generation if limits are reached. */ + mlx5_tx_request_completion(txq, &loc, olx); + /* + * Ring QP doorbell immediately after WQE building completion + * to improve latencies. The pure software related data treatment + * can be completed after doorbell. Tx CQEs for this SQ are + * processed in this thread only by the polling. + * + * The rdma core library can map doorbell register in two ways, + * depending on the environment variable "MLX5_SHUT_UP_BF": + * + * - as regular cached memory, the variable is either missing or + * set to zero. This type of mapping may cause the significant + * doorbell register writing latency and requires explicit + * memory write barrier to mitigate this issue and prevent + * write combining. + * + * - as non-cached memory, the variable is present and set to + * not "0" value. This type of mapping may cause performance + * impact under heavy loading conditions but the explicit write + * memory barrier is not required and it may improve core + * performance. + * + * - the legacy behaviour (prior 19.08 release) was to use some + * heuristics to decide whether write memory barrier should + * be performed. This behavior is supported with specifying + * tx_db_nc=2, write barrier is skipped if application + * provides the full recommended burst of packets, it + * supposes the next packets are coming and the write barrier + * will be issued on the next burst (after descriptor writing, + * at least). + */ + mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc && + (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST)); + /* Not all of the mbufs may be stored into elts yet. */ + part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy; + if (!MLX5_TXOFF_CONFIG(INLINE) && part) { + /* + * There are some single-segment mbufs not stored in elts. + * It can be only if the last packet was single-segment. + * The copying is gathered into one place due to it is + * a good opportunity to optimize that with SIMD. + * Unfortunately if inlining is enabled the gaps in + * pointer array may happen due to early freeing of the + * inlined mbufs. + */ + mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx); + loc.pkts_copy = loc.pkts_sent; + } + MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail)); + MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi)); + if (pkts_n > loc.pkts_sent) { + /* + * If burst size is large there might be no enough CQE + * fetched from completion queue and no enough resources + * freed to send all the packets. + */ + goto send_loop; + } +burst_exit: +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += loc.pkts_sent; +#endif + return loc.pkts_sent; +} + +/* Generate routines with Enhanced Multi-Packet Write support. */ +MLX5_TXOFF_DECL(full_empw, + MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(none_empw, + MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(md_empw, + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(mt_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(mtsc_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(mti_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(mtv_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(mtiv_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(sc_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(sci_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(scv_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(sciv_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(i_empw, + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(v_empw, + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_DECL(iv_empw, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +/* Generate routines without Enhanced Multi-Packet Write support. */ +MLX5_TXOFF_DECL(full, + MLX5_TXOFF_CONFIG_FULL) + +MLX5_TXOFF_DECL(none, + MLX5_TXOFF_CONFIG_NONE) + +MLX5_TXOFF_DECL(md, + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(mt, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(mtsc, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(mti, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + + +MLX5_TXOFF_DECL(mtv, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + + +MLX5_TXOFF_DECL(mtiv, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(sc, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(sci, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + + +MLX5_TXOFF_DECL(scv, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + + +MLX5_TXOFF_DECL(sciv, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(i, + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(v, + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_DECL(iv, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +/* + * Generate routines with Legacy Multi-Packet Write support. + * This mode is supported by ConnectX-4 Lx only and imposes + * offload limitations, not supported: + * - ACL/Flows (metadata are becoming meaningless) + * - WQE Inline headers + * - SRIOV (E-Switch offloads) + * - VLAN insertion + * - tunnel encapsulation/decapsulation + * - TSO + */ +MLX5_TXOFF_DECL(none_mpw, + MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_DECL(mci_mpw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_DECL(mc_mpw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_DECL(i_mpw, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) + +/* + * Array of declared and compiled Tx burst function and corresponding + * supported offloads set. The array is used to select the Tx burst + * function for specified offloads set at Tx queue configuration time. + */ +const struct { + eth_tx_burst_t func; + unsigned int olx; +} txoff_func[] = { +MLX5_TXOFF_INFO(full_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(none_empw, + MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(md_empw, + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(mt_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(mtsc_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(mti_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(mtv_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(mtiv_empw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(sc_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(sci_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(scv_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(sciv_empw, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(i_empw, + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(v_empw, + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(iv_empw, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW) + +MLX5_TXOFF_INFO(full, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(none, + MLX5_TXOFF_CONFIG_NONE) + +MLX5_TXOFF_INFO(md, + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(mt, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(mtsc, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(mti, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(mtv, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(mtiv, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(sc, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(sci, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(scv, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(sciv, + MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(i, + MLX5_TXOFF_CONFIG_INLINE | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(v, + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(iv, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA) + +MLX5_TXOFF_INFO(none_mpw, + MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_INFO(mci_mpw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_INFO(mc_mpw, + MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM | + MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW) + +MLX5_TXOFF_INFO(i_mpw, + MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW) +}; + +/** + * Configure the Tx function to use. The routine checks configured + * Tx offloads for the device and selects appropriate Tx burst + * routine. There are multiple Tx burst routines compiled from + * the same template in the most optimal way for the dedicated + * Tx offloads set. + * + * @param dev + * Pointer to private data structure. + * + * @return + * Pointer to selected Tx burst function. + */ +eth_tx_burst_t +mlx5_select_tx_function(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + unsigned int diff = 0, olx = 0, i, m; + + static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <= + MLX5_DSEG_MAX, "invalid WQE max size"); + static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE, + "invalid WQE Control Segment size"); + static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE, + "invalid WQE Ethernet Segment size"); + static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE, + "invalid WQE Data Segment size"); + static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE, + "invalid WQE size"); + MLX5_ASSERT(priv); + if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { + /* We should support Multi-Segment Packets. */ + olx |= MLX5_TXOFF_CONFIG_MULTI; + } + if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)) { + /* We should support TCP Send Offload. */ + olx |= MLX5_TXOFF_CONFIG_TSO; + } + if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + /* We should support Software Parser for Tunnels. */ + olx |= MLX5_TXOFF_CONFIG_SWP; + } + if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + /* We should support IP/TCP/UDP Checksums. */ + olx |= MLX5_TXOFF_CONFIG_CSUM; + } + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) { + /* We should support VLAN insertion. */ + olx |= MLX5_TXOFF_CONFIG_VLAN; + } + if (priv->txqs_n && (*priv->txqs)[0]) { + struct mlx5_txq_data *txd = (*priv->txqs)[0]; + + if (txd->inlen_send) { + /* + * Check the data inline requirements. Data inline + * is enabled on per device basis, we can check + * the first Tx queue only. + * + * If device does not support VLAN insertion in WQE + * and some queues are requested to perform VLAN + * insertion offload than inline must be enabled. + */ + olx |= MLX5_TXOFF_CONFIG_INLINE; + } + } + if (config->mps == MLX5_MPW_ENHANCED && + config->txq_inline_min <= 0) { + /* + * The NIC supports Enhanced Multi-Packet Write + * and does not require minimal inline data. + */ + olx |= MLX5_TXOFF_CONFIG_EMPW; + } + if (rte_flow_dynf_metadata_avail()) { + /* We should support Flow metadata. */ + olx |= MLX5_TXOFF_CONFIG_METADATA; + } + if (config->mps == MLX5_MPW) { + /* + * The NIC supports Legacy Multi-Packet Write. + * The MLX5_TXOFF_CONFIG_MPW controls the + * descriptor building method in combination + * with MLX5_TXOFF_CONFIG_EMPW. + */ + if (!(olx & (MLX5_TXOFF_CONFIG_TSO | + MLX5_TXOFF_CONFIG_SWP | + MLX5_TXOFF_CONFIG_VLAN | + MLX5_TXOFF_CONFIG_METADATA))) + olx |= MLX5_TXOFF_CONFIG_EMPW | + MLX5_TXOFF_CONFIG_MPW; + } + /* + * Scan the routines table to find the minimal + * satisfying routine with requested offloads. + */ + m = RTE_DIM(txoff_func); + for (i = 0; i < RTE_DIM(txoff_func); i++) { + unsigned int tmp; + + tmp = txoff_func[i].olx; + if (tmp == olx) { + /* Meets requested offloads exactly.*/ + m = i; + break; + } + if ((tmp & olx) != olx) { + /* Does not meet requested offloads at all. */ + continue; + } + if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW) + /* Do not enable eMPW if not configured. */ + continue; + if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE) + /* Do not enable inlining if not configured. */ + continue; + /* + * Some routine meets the requirements. + * Check whether it has minimal amount + * of not requested offloads. + */ + tmp = __builtin_popcountl(tmp & ~olx); + if (m >= RTE_DIM(txoff_func) || tmp < diff) { + /* First or better match, save and continue. */ + m = i; + diff = tmp; + continue; + } + if (tmp == diff) { + tmp = txoff_func[i].olx ^ txoff_func[m].olx; + if (__builtin_ffsl(txoff_func[i].olx & ~tmp) < + __builtin_ffsl(txoff_func[m].olx & ~tmp)) { + /* Lighter not requested offload. */ + m = i; + } + } + } + if (m >= RTE_DIM(txoff_func)) { + DRV_LOG(DEBUG, "port %u has no selected Tx function" + " for requested offloads %04X", + dev->data->port_id, olx); + return NULL; + } + DRV_LOG(DEBUG, "port %u has selected Tx function" + " supporting offloads %04X/%04X", + dev->data->port_id, olx, txoff_func[m].olx); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI) + DRV_LOG(DEBUG, "\tMULTI (multi segment)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO) + DRV_LOG(DEBUG, "\tTSO (TCP send offload)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP) + DRV_LOG(DEBUG, "\tSWP (software parser)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM) + DRV_LOG(DEBUG, "\tCSUM (checksum offload)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE) + DRV_LOG(DEBUG, "\tINLIN (inline data)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN) + DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA) + DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)"); + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) { + if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW) + DRV_LOG(DEBUG, "\tMPW (Legacy MPW)"); + else + DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)"); + } + return txoff_func[m].func; +} + +/** + * DPDK callback to get the TX queue information + * + * @param dev + * Pointer to the device structure. + * + * @param tx_queue_id + * Tx queue identificator. + * + * @param qinfo + * Pointer to the TX queue information structure. + * + * @return + * None. + */ + +void +mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + + if (!txq) + return; + qinfo->nb_desc = txq->elts_s; + qinfo->conf.tx_thresh.pthresh = 0; + qinfo->conf.tx_thresh.hthresh = 0; + qinfo->conf.tx_thresh.wthresh = 0; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.tx_free_thresh = 0; + qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1; + qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; +} + +/** + * DPDK callback to get the TX packet burst mode information + * + * @param dev + * Pointer to the device structure. + * + * @param tx_queue_id + * Tx queue identificatior. + * + * @param mode + * Pointer to the burts mode information. + * + * @return + * 0 as success, -EINVAL as failure. + */ + +int +mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, + uint16_t tx_queue_id __rte_unused, + struct rte_eth_burst_mode *mode) +{ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + unsigned int i, olx; + + for (i = 0; i < RTE_DIM(txoff_func); i++) { + if (pkt_burst == txoff_func[i].func) { + olx = txoff_func[i].olx; + snprintf(mode->info, sizeof(mode->info), + "%s%s%s%s%s%s%s%s", + (olx & MLX5_TXOFF_CONFIG_EMPW) ? + ((olx & MLX5_TXOFF_CONFIG_MPW) ? + "Legacy MPW" : "Enhanced MPW") : "No MPW", + (olx & MLX5_TXOFF_CONFIG_MULTI) ? + " + MULTI" : "", + (olx & MLX5_TXOFF_CONFIG_TSO) ? + " + TSO" : "", + (olx & MLX5_TXOFF_CONFIG_SWP) ? + " + SWP" : "", + (olx & MLX5_TXOFF_CONFIG_CSUM) ? + " + CSUM" : "", + (olx & MLX5_TXOFF_CONFIG_INLINE) ? + " + INLINE" : "", + (olx & MLX5_TXOFF_CONFIG_VLAN) ? + " + VLAN" : "", + (olx & MLX5_TXOFF_CONFIG_METADATA) ? + " + METADATA" : ""); + return 0; + } + } + return -EINVAL; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h new file mode 100644 index 000000000..48f2b7941 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.h @@ -0,0 +1,683 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_RXTX_H_ +#define RTE_PMD_MLX5_RXTX_H_ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5_utils.h" +#include "mlx5.h" +#include "mlx5_autoconf.h" + +/* Support tunnel matching. */ +#define MLX5_FLOW_TUNNEL 10 + +/* Mbuf dynamic flag offset for inline. */ +extern uint64_t rte_net_mlx5_dynf_inline_mask; + +struct mlx5_rxq_stats { +#ifdef MLX5_PMD_SOFT_COUNTERS + uint64_t ipackets; /**< Total of successfully received packets. */ + uint64_t ibytes; /**< Total of successfully received bytes. */ +#endif + uint64_t idropped; /**< Total of packets dropped when RX ring full. */ + uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ +}; + +struct mlx5_txq_stats { +#ifdef MLX5_PMD_SOFT_COUNTERS + uint64_t opackets; /**< Total of successfully sent packets. */ + uint64_t obytes; /**< Total of successfully sent bytes. */ +#endif + uint64_t oerrors; /**< Total number of failed transmitted packets. */ +}; + +struct mlx5_priv; + +/* Compressed CQE context. */ +struct rxq_zip { + uint16_t ai; /* Array index. */ + uint16_t ca; /* Current array index. */ + uint16_t na; /* Next array index. */ + uint16_t cq_ci; /* The next CQE. */ + uint32_t cqe_cnt; /* Number of CQEs. */ +}; + +/* Multi-Packet RQ buffer header. */ +struct mlx5_mprq_buf { + struct rte_mempool *mp; + rte_atomic16_t refcnt; /* Atomically accessed refcnt. */ + uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */ + struct rte_mbuf_ext_shared_info shinfos[]; + /* + * Shared information per stride. + * More memory will be allocated for the first stride head-room and for + * the strides data. + */ +} __rte_cache_aligned; + +/* Get pointer to the first stride. */ +#define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \ + sizeof(struct mlx5_mprq_buf) + \ + (strd_n) * \ + sizeof(struct rte_mbuf_ext_shared_info) + \ + RTE_PKTMBUF_HEADROOM)) + +#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 +#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 + +enum mlx5_rxq_err_state { + MLX5_RXQ_ERR_STATE_NO_ERROR = 0, + MLX5_RXQ_ERR_STATE_NEED_RESET, + MLX5_RXQ_ERR_STATE_NEED_READY, +}; + +/* RX queue descriptor. */ +struct mlx5_rxq_data { + unsigned int csum:1; /* Enable checksum offloading. */ + unsigned int hw_timestamp:1; /* Enable HW timestamp. */ + unsigned int vlan_strip:1; /* Enable VLAN stripping. */ + unsigned int crc_present:1; /* CRC must be subtracted. */ + unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ + unsigned int cqe_n:4; /* Log 2 of CQ elements. */ + unsigned int elts_n:4; /* Log 2 of Mbufs. */ + unsigned int rss_hash:1; /* RSS hash result is enabled. */ + unsigned int mark:1; /* Marked flow available on the queue. */ + unsigned int strd_num_n:5; /* Log 2 of the number of stride. */ + unsigned int strd_sz_n:4; /* Log 2 of stride size. */ + unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ + unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ + unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ + unsigned int lro:1; /* Enable LRO. */ + unsigned int dynf_meta:1; /* Dynamic metadata is configured. */ + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; + uint16_t port_id; + uint32_t rq_ci; + uint16_t consumed_strd; /* Number of consumed strides in WQE. */ + uint32_t rq_pi; + uint32_t cq_ci; + uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ + union { + struct rxq_zip zip; /* Compressed context. */ + uint16_t decompressed; + /* Number of ready mbufs decompressed from the CQ. */ + }; + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ + uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ + volatile void *wqes; + volatile struct mlx5_cqe(*cqes)[]; + RTE_STD_C11 + union { + struct rte_mbuf *(*elts)[]; + struct mlx5_mprq_buf *(*mprq_bufs)[]; + }; + struct rte_mempool *mp; + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ + struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ + uint16_t idx; /* Queue index. */ + struct mlx5_rxq_stats stats; + rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ + struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ + void *cq_uar; /* CQ user access region. */ + uint32_t cqn; /* CQ number. */ + uint8_t cq_arm_sn; /* CQ arm seq number. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock_cq; + /* CQ (UAR) access lock required for 32bit implementations */ +#endif + uint32_t tunnel; /* Tunnel information. */ + uint64_t flow_meta_mask; + int32_t flow_meta_offset; +} __rte_cache_aligned; + +enum mlx5_rxq_obj_type { + MLX5_RXQ_OBJ_TYPE_IBV, /* mlx5_rxq_obj with ibv_wq. */ + MLX5_RXQ_OBJ_TYPE_DEVX_RQ, /* mlx5_rxq_obj with mlx5_devx_rq. */ + MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN, + /* mlx5_rxq_obj with mlx5_devx_rq and hairpin support. */ +}; + +enum mlx5_rxq_type { + MLX5_RXQ_TYPE_STANDARD, /* Standard Rx queue. */ + MLX5_RXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ + MLX5_RXQ_TYPE_UNDEFINED, +}; + +/* Verbs/DevX Rx queue elements. */ +struct mlx5_rxq_obj { + LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */ + struct ibv_cq *cq; /* Completion Queue. */ + enum mlx5_rxq_obj_type type; + RTE_STD_C11 + union { + struct ibv_wq *wq; /* Work Queue. */ + struct mlx5_devx_obj *rq; /* DevX object for Rx Queue. */ + }; + struct ibv_comp_channel *channel; +}; + +/* RX queue control descriptor. */ +struct mlx5_rxq_ctrl { + struct mlx5_rxq_data rxq; /* Data path structure. */ + LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ + struct mlx5_priv *priv; /* Back pointer to private data. */ + enum mlx5_rxq_type type; /* Rxq type. */ + unsigned int socket; /* CPU socket ID for allocations. */ + unsigned int irq:1; /* Whether IRQ is enabled. */ + unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */ + uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ + uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ + uint32_t wqn; /* WQ number. */ + uint16_t dump_file_n; /* Number of dump files. */ + uint32_t dbr_umem_id; /* Storing door-bell information, */ + uint64_t dbr_offset; /* needed when freeing door-bell. */ + struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */ + struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ +}; + +enum mlx5_ind_tbl_type { + MLX5_IND_TBL_TYPE_IBV, + MLX5_IND_TBL_TYPE_DEVX, +}; + +/* Indirection table. */ +struct mlx5_ind_table_obj { + LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + enum mlx5_ind_tbl_type type; + RTE_STD_C11 + union { + struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ + struct mlx5_devx_obj *rqt; /* DevX RQT object. */ + }; + uint32_t queues_n; /**< Number of queues in the list. */ + uint16_t queues[]; /**< Queue list. */ +}; + +/* Hash Rx queue. */ +struct mlx5_hrxq { + ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ + RTE_STD_C11 + union { + struct ibv_qp *qp; /* Verbs queue pair. */ + struct mlx5_devx_obj *tir; /* DevX TIR object. */ + }; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + void *action; /* DV QP action pointer. */ +#endif + uint64_t hash_fields; /* Verbs Hash fields. */ + uint32_t rss_key_len; /* Hash key length in bytes. */ + uint8_t rss_key[]; /* Hash key. */ +}; + +/* TX queue send local data. */ +__extension__ +struct mlx5_txq_local { + struct mlx5_wqe *wqe_last; /* last sent WQE pointer. */ + struct rte_mbuf *mbuf; /* first mbuf to process. */ + uint16_t pkts_copy; /* packets copied to elts. */ + uint16_t pkts_sent; /* packets sent. */ + uint16_t pkts_loop; /* packets sent on loop entry. */ + uint16_t elts_free; /* available elts remain. */ + uint16_t wqe_free; /* available wqe remain. */ + uint16_t mbuf_off; /* data offset in current mbuf. */ + uint16_t mbuf_nseg; /* number of remaining mbuf. */ +}; + +/* TX queue descriptor. */ +__extension__ +struct mlx5_txq_data { + uint16_t elts_head; /* Current counter in (*elts)[]. */ + uint16_t elts_tail; /* Counter of first element awaiting completion. */ + uint16_t elts_comp; /* elts index since last completion request. */ + uint16_t elts_s; /* Number of mbuf elements. */ + uint16_t elts_m; /* Mask for mbuf elements indices. */ + /* Fields related to elts mbuf storage. */ + uint16_t wqe_ci; /* Consumer index for work queue. */ + uint16_t wqe_pi; /* Producer index for work queue. */ + uint16_t wqe_s; /* Number of WQ elements. */ + uint16_t wqe_m; /* Mask Number for WQ elements. */ + uint16_t wqe_comp; /* WQE index since last completion request. */ + uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */ + /* WQ related fields. */ + uint16_t cq_ci; /* Consumer index for completion queue. */ + uint16_t cq_pi; /* Production index for completion queue. */ + uint16_t cqe_s; /* Number of CQ elements. */ + uint16_t cqe_m; /* Mask for CQ indices. */ + /* CQ related fields. */ + uint16_t elts_n:4; /* elts[] length (in log2). */ + uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ + uint16_t wqe_n:4; /* Number of WQ elements (in log2). */ + uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint16_t tunnel_en:1; + /* When set TX offload for tunneled packets are supported. */ + uint16_t swp_en:1; /* Whether SW parser is enabled. */ + uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */ + uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */ + uint16_t db_heu:1; /* Doorbell heuristic write barrier. */ + uint16_t inlen_send; /* Ordinary send data inline size. */ + uint16_t inlen_empw; /* eMPW max packet size to inline. */ + uint16_t inlen_mode; /* Minimal data length to inline. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + uint64_t offloads; /* Offloads for Tx Queue. */ + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ + struct mlx5_wqe *wqes; /* Work queue. */ + struct mlx5_wqe *wqes_end; /* Work queue array limit. */ +#ifdef RTE_LIBRTE_MLX5_DEBUG + uint32_t *fcqs; /* Free completion queue (debug extended). */ +#else + uint16_t *fcqs; /* Free completion queue. */ +#endif + volatile struct mlx5_cqe *cqes; /* Completion queue. */ + volatile uint32_t *qp_db; /* Work queue doorbell. */ + volatile uint32_t *cq_db; /* Completion queue doorbell. */ + uint16_t port_id; /* Port ID of device. */ + uint16_t idx; /* Queue index. */ + struct mlx5_txq_stats stats; /* TX queue counters. */ +#ifndef RTE_ARCH_64 + rte_spinlock_t *uar_lock; + /* UAR access lock required for 32bit implementations */ +#endif + struct rte_mbuf *elts[0]; + /* Storage for queued packets, must be the last field. */ +} __rte_cache_aligned; + +enum mlx5_txq_obj_type { + MLX5_TXQ_OBJ_TYPE_IBV, /* mlx5_txq_obj with ibv_wq. */ + MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN, + /* mlx5_txq_obj with mlx5_devx_tq and hairpin support. */ +}; + +enum mlx5_txq_type { + MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */ + MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Rx queue. */ +}; + +/* Verbs/DevX Tx queue elements. */ +struct mlx5_txq_obj { + LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */ + enum mlx5_txq_obj_type type; /* The txq object type. */ + RTE_STD_C11 + union { + struct { + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + }; + struct { + struct mlx5_devx_obj *sq; + /* DevX object for Sx queue. */ + struct mlx5_devx_obj *tis; /* The TIS object. */ + }; + }; +}; + +/* TX queue control descriptor. */ +struct mlx5_txq_ctrl { + LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + unsigned int socket; /* CPU socket ID for allocations. */ + enum mlx5_txq_type type; /* The txq ctrl type. */ + unsigned int max_inline_data; /* Max inline data. */ + unsigned int max_tso_header; /* Max TSO header size. */ + struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */ + struct mlx5_priv *priv; /* Back pointer to private data. */ + off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ + void *bf_reg; /* BlueFlame register from Verbs. */ + uint16_t dump_file_n; /* Number of dump files. */ + struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ + struct mlx5_txq_data txq; /* Data path structure. */ + /* Must be the last field in the structure, contains elts[]. */ +}; + +#define MLX5_TX_BFREG(txq) \ + (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx]) + +/* mlx5_rxq.c */ + +extern uint8_t rss_hash_default_key[]; + +int mlx5_check_mprq_support(struct rte_eth_dev *dev); +int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); +int mlx5_mprq_enabled(struct rte_eth_dev *dev); +int mlx5_mprq_free_mp(struct rte_eth_dev *dev); +int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); +int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +int mlx5_rx_hairpin_queue_setup + (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf); +void mlx5_rx_queue_release(void *dpdk_rxq); +int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); +void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); +int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); +struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + enum mlx5_rxq_obj_type type); +int mlx5_rxq_obj_verify(struct rte_eth_dev *dev); +struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new + (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf); +struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_verify(struct rte_eth_dev *dev); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); +uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused); +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n); +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); +int mlx5_hrxq_verify(struct rte_eth_dev *dev); +enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); +void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); +uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); + +/* mlx5_txq.c */ + +int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); +int mlx5_tx_hairpin_queue_setup + (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf); +void mlx5_tx_queue_release(void *dpdk_txq); +int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); +struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + enum mlx5_txq_obj_type type); +struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_ibv); +int mlx5_txq_obj_verify(struct rte_eth_dev *dev); +struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_txq_hairpin_new + (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf); +struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_verify(struct rte_eth_dev *dev); +void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl); +uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); + +/* mlx5_rxtx.c */ + +extern uint32_t mlx5_ptype_table[]; +extern uint8_t mlx5_cksum_table[]; +extern uint8_t mlx5_swp_types_table[]; + +void mlx5_set_ptype_table(void); +void mlx5_set_cksum_table(void); +void mlx5_set_swp_types_table(void); +uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); +__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); +void mlx5_mprq_buf_free_cb(void *addr, void *opaque); +void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); +uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); +int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void mlx5_dump_debug_information(const char *path, const char *title, + const void *buf, unsigned int len); +int mlx5_queue_state_modify_primary(struct rte_eth_dev *dev, + const struct mlx5_mp_arg_queue_state_modify *sm); +void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_burst_mode *mode); +int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_burst_mode *mode); + +/* Vectorized version of mlx5_rxtx.c */ +int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); +int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); +uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); + +/* mlx5_mr.c */ + +void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); +uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); +uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb); +uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, + struct rte_mempool *mp); +int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova, + size_t len); +int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, + size_t len); + +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64_relaxed(uint64_t val, void *addr, + rte_spinlock_t *lock __rte_unused) +{ +#ifdef RTE_ARCH_64 + *(uint64_t *)addr = val; +#else /* !RTE_ARCH_64 */ + rte_spinlock_lock(lock); + *(uint32_t *)addr = val; + rte_io_wmb(); + *((uint32_t *)addr + 1) = val >> 32; + rte_spinlock_unlock(lock); +#endif +} + +/** + * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and + * 64bit architectures while guaranteeing the order of execution with the + * code being executed. + * + * @param val + * value to write in CPU endian format. + * @param addr + * Address to write to. + * @param lock + * Address of the lock to use for that UAR access. + */ +static __rte_always_inline void +__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock) +{ + rte_io_wmb(); + __mlx5_uar_write64_relaxed(val, addr, lock); +} + +/* Assist macros, used instead of directly calling the functions they wrap. */ +#ifdef RTE_ARCH_64 +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, NULL) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL) +#else +#define mlx5_uar_write64_relaxed(val, dst, lock) \ + __mlx5_uar_write64_relaxed(val, dst, lock) +#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock) +#endif + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the + * cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static inline struct rte_mempool * +mlx5_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_CLONED(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) +{ + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx5_rx_addr2mr_bh(rxq, addr); +} + +#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + +/** + * Query LKey from a packet buffer for Tx. If not found, add the mempool. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) +{ + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx5_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half on miss. */ + return mlx5_tx_mb2mr_bh(txq, mb); +} + +/** + * Ring TX queue doorbell and flush the update if requested. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the last WQE posted in the NIC. + * @param cond + * Request for write memory barrier after BlueFlame update. + */ +static __rte_always_inline void +mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, + int cond) +{ + uint64_t *dst = MLX5_TX_BFREG(txq); + volatile uint64_t *src = ((volatile uint64_t *)wqe); + + rte_cio_wmb(); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); + /* Ensure ordering between DB record and BF copy. */ + rte_wmb(); + mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock); + if (cond) + rte_wmb(); +} + +/** + * Ring TX queue doorbell and flush the update by write memory barrier. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the last WQE posted in the NIC. + */ +static __rte_always_inline void +mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) +{ + mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); +} + +#endif /* RTE_PMD_MLX5_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c new file mode 100644 index 000000000..1518bdd5b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include + +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_rxtx_vec.h" +#include "mlx5_autoconf.h" + +#if defined RTE_ARCH_X86_64 +#include "mlx5_rxtx_vec_sse.h" +#elif defined RTE_ARCH_ARM64 +#include "mlx5_rxtx_vec_neon.h" +#elif defined RTE_ARCH_PPC_64 +#include "mlx5_rxtx_vec_altivec.h" +#else +#error "This should not be compiled if SIMD instructions are not supported." +#endif + +/** + * Skip error packets. + * + * @param rxq + * Pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +static uint16_t +rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + uint16_t n = 0; + unsigned int i; +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t err_bytes = 0; +#endif + + for (i = 0; i < pkts_n; ++i) { + struct rte_mbuf *pkt = pkts[i]; + + if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { +#ifdef MLX5_PMD_SOFT_COUNTERS + err_bytes += PKT_LEN(pkt); +#endif + rte_pktmbuf_free_seg(pkt); + } else { + pkts[n++] = pkt; + } + } + rxq->stats.idropped += (pkts_n - n); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Correct counters of errored completions. */ + rxq->stats.ipackets -= (pkts_n - n); + rxq->stats.ibytes -= err_bytes; +#endif + mlx5_rx_err_handle(rxq, 1); + return n; +} + +/** + * DPDK callback for vectorized RX. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct mlx5_rxq_data *rxq = dpdk_rxq; + uint16_t nb_rx; + uint64_t err = 0; + + nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err); + if (unlikely(err | rxq->err_state)) + nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx); + return nb_rx; +} + +/** + * Check a RX queue can support vectorized RX. + * + * @param rxq + * Pointer to RX queue. + * + * @return + * 1 if supported, negative errno value if not. + */ +int __rte_cold +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) +{ + struct mlx5_rxq_ctrl *ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + + if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv))) + return -ENOTSUP; + if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) + return -ENOTSUP; + if (rxq->lro) + return -ENOTSUP; + return 1; +} + +/** + * Check a device can support vectorized RX. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +int __rte_cold +mlx5_check_vec_rx_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t i; + + if (!priv->config.rx_vec_en) + return -ENOTSUP; + if (mlx5_mprq_enabled(dev)) + return -ENOTSUP; + /* All the configured queues should support. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (!rxq) + continue; + if (mlx5_rxq_check_vec_support(rxq) < 0) + break; + } + if (i != priv->rxqs_n) + return -ENOTSUP; + return 1; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h new file mode 100644 index 000000000..6ddcbfb0a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_RXTX_VEC_H_ +#define RTE_PMD_MLX5_RXTX_VEC_H_ + +#include +#include + +#include + +#include "mlx5_autoconf.h" + +#include "mlx5_mr.h" + +/* HW checksum offload capabilities of vectorized Tx. */ +#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ + (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + +/* + * Compile time sanity check for vectorized functions. + */ + +#define S_ASSERT_RTE_MBUF(s) \ + static_assert(s, "A field of struct rte_mbuf is changed") +#define S_ASSERT_MLX5_CQE(s) \ + static_assert(s, "A field of struct mlx5_cqe is changed") + +/* rxq_cq_decompress_v() */ +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) == + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + +/* rxq_cq_to_ptype_oflags_v() */ +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) == + offsetof(struct rte_mbuf, rearm_data) + 8); +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) == + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + +/* rxq_burst_v() */ +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); +S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); +#if (RTE_CACHE_LINE_SIZE == 128) +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64); +#else +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); +#endif +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == + offsetof(struct mlx5_cqe, pkt_info) + 12); +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 == + offsetof(struct mlx5_cqe, hdr_type_etc)); +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == + offsetof(struct mlx5_cqe, hdr_type_etc) + 2); +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 == + offsetof(struct mlx5_cqe, byte_cnt)); +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) == + RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); +S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == + offsetof(struct mlx5_cqe, sop_drop_qpn) + 7); + +/** + * Replenish buffers for RX in bulk. + * + * @param rxq + * Pointer to RX queue structure. + * @param n + * Number of buffers to be replenished. + */ +static inline void +mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) +{ + const uint16_t q_n = 1 << rxq->elts_n; + const uint16_t q_mask = q_n - 1; + uint16_t elts_idx = rxq->rq_ci & q_mask; + struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; + volatile struct mlx5_wqe_data_seg *wq = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; + unsigned int i; + + MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); + MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); + MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > + MLX5_VPMD_DESCS_PER_LOOP); + /* Not to cross queue end. */ + n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); + if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { + rxq->stats.rx_nombuf += n; + return; + } + for (i = 0; i < n; ++i) { + void *buf_addr; + + /* + * In order to support the mbufs with external attached + * data buffer we should use the buf_addr pointer instead of + * rte_mbuf_buf_addr(). It touches the mbuf itself and may + * impact the performance. + */ + buf_addr = elts[i]->buf_addr; + wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + + RTE_PKTMBUF_HEADROOM); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); + } + rxq->rq_ci += n; + /* Prevent overflowing into consumed mbufs. */ + elts_idx = rxq->rq_ci & q_mask; + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); +} + +#endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h new file mode 100644 index 000000000..26715ef45 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h @@ -0,0 +1,1114 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ +#define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ + +#include +#include +#include + +#include + +#include +#include +#include + +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_rxtx_vec.h" +#include "mlx5_autoconf.h" + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + +/** + * Store free buffers to RX SW ring. + * + * @param rxq + * Pointer to RX queue structure. + * @param pkts + * Pointer to array of packets to be stored. + * @param pkts_n + * Number of packets to be stored. + */ +static inline void +rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) +{ + const uint16_t q_mask = (1 << rxq->elts_n) - 1; + struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; + unsigned int pos; + uint16_t p = n & -2; + + for (pos = 0; pos < p; pos += 2) { + vector unsigned char mbp; + + mbp = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&elts[pos]); + *(vector unsigned char *)&pkts[pos] = mbp; + } + if (n & 1) + pkts[pos] = elts[pos]; +} + +/** + * Decompress a compressed completion and fill in mbufs in RX SW ring with data + * extracted from the title completion descriptor. + * + * @param rxq + * Pointer to RX queue structure. + * @param cq + * Pointer to completion array having a compressed completion at first. + * @param elts + * Pointer to SW ring to be filled. The first mbuf has to be pre-built from + * the title completion descriptor to be copied to the rest of mbufs. + * + * @return + * Number of mini-CQEs successfully decompressed. + */ +static inline uint16_t +rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + struct rte_mbuf **elts) +{ + volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info; + struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */ + const vector unsigned char zero = (vector unsigned char){0}; + /* Mask to shuffle from extracted mini CQE to mbuf. */ + const vector unsigned char shuf_mask1 = (vector unsigned char){ + -1, -1, -1, -1, /* skip packet_type */ + 7, 6, -1, -1, /* bswap16, pkt_len */ + 7, 6, /* bswap16, data_len */ + -1, -1, /* skip vlan_tci */ + 3, 2, 1, 0}; /* bswap32, rss */ + const vector unsigned char shuf_mask2 = (vector unsigned char){ + -1, -1, -1, -1, /* skip packet_type */ + 15, 14, -1, -1, /* bswap16, pkt_len */ + 15, 14, /* data_len, bswap16 */ + -1, -1, /* skip vlan_tci */ + 11, 10, 9, 8}; /* bswap32, rss */ + /* Restore the compressed count. Must be 16 bits. */ + const uint16_t mcqe_n = t_pkt->data_len + + (rxq->crc_present * RTE_ETHER_CRC_LEN); + const vector unsigned char rearm = + (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&t_pkt->rearm_data); + const vector unsigned char rxdf = + (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&t_pkt->rx_descriptor_fields1); + const vector unsigned char crc_adj = + (vector unsigned char)(vector unsigned short){ + 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0}; + const vector unsigned short rxdf_sel_mask = + (vector unsigned short){ + 0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0}; + const uint32_t flow_tag = t_pkt->hash.fdir.hi; + unsigned int pos; + unsigned int i; + unsigned int inv = 0; + +#ifdef MLX5_PMD_SOFT_COUNTERS + const vector unsigned char ones = vec_splat_u8(-1); + uint32_t rcvd_byte = 0; + /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ + const vector unsigned char len_shuf_mask = (vector unsigned char){ + 3, 2, 11, 10, + 7, 6, 15, 14, + -1, -1, -1, -1, + -1, -1, -1, -1}; +#endif + + /* + * A. load mCQEs into a 128bit register. + * B. store rearm data to mbuf. + * C. combine data from mCQEs with rx_descriptor_fields1. + * D. store rx_descriptor_fields1. + * E. store flow tag (rte_flow mark). + */ + for (pos = 0; pos < mcqe_n; ) { + vector unsigned char mcqe1, mcqe2; + vector unsigned char rxdf1, rxdf2; +#ifdef MLX5_PMD_SOFT_COUNTERS + const vector unsigned short mcqe_sel_mask = + (vector unsigned short){0, 0, 0xffff, 0xffff, + 0, 0, 0xfff, 0xffff}; + const vector unsigned char lower_half = { + 0, 1, 4, 5, 8, 9, 12, 13, 16, + 17, 20, 21, 24, 25, 28, 29}; + const vector unsigned char upper_half = { + 2, 3, 6, 7, 10, 11, 14, 15, + 18, 19, 22, 23, 26, 27, 30, 31}; + vector unsigned short left, right; + vector unsigned char byte_cnt, invalid_mask; + vector unsigned long lshift; + __attribute__((altivec(vector__))) + __attribute__((altivec(bool__))) + unsigned long long shmask; + const vector unsigned long shmax = {64, 64}; +#endif + + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + if (likely(pos + i < mcqe_n)) + rte_prefetch0((void *)(cq + pos + i)); + + /* A.1 load mCQEs into a 128bit register. */ + mcqe1 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&mcq[pos % 8]); + mcqe2 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&mcq[pos % 8 + 2]); + + /* B.1 store rearm data to mbuf. */ + *(vector unsigned char *) + &elts[pos]->rearm_data = rearm; + *(vector unsigned char *) + &elts[pos + 1]->rearm_data = rearm; + + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + rxdf1 = vec_perm(mcqe1, zero, shuf_mask1); + rxdf2 = vec_perm(mcqe1, zero, shuf_mask2); + rxdf1 = (vector unsigned char) + ((vector unsigned short)rxdf1 - + (vector unsigned short)crc_adj); + rxdf2 = (vector unsigned char) + ((vector unsigned short)rxdf2 - + (vector unsigned short)crc_adj); + rxdf1 = (vector unsigned char) + vec_sel((vector unsigned short)rxdf1, + (vector unsigned short)rxdf, rxdf_sel_mask); + rxdf2 = (vector unsigned char) + vec_sel((vector unsigned short)rxdf2, + (vector unsigned short)rxdf, rxdf_sel_mask); + + /* D.1 store rx_descriptor_fields1. */ + *(vector unsigned char *) + &elts[pos]->rx_descriptor_fields1 = rxdf1; + *(vector unsigned char *) + &elts[pos + 1]->rx_descriptor_fields1 = rxdf2; + + /* B.1 store rearm data to mbuf. */ + *(vector unsigned char *) + &elts[pos + 2]->rearm_data = rearm; + *(vector unsigned char *) + &elts[pos + 3]->rearm_data = rearm; + + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + rxdf1 = vec_perm(mcqe2, zero, shuf_mask1); + rxdf2 = vec_perm(mcqe2, zero, shuf_mask2); + rxdf1 = (vector unsigned char) + ((vector unsigned short)rxdf1 - + (vector unsigned short)crc_adj); + rxdf2 = (vector unsigned char) + ((vector unsigned short)rxdf2 - + (vector unsigned short)crc_adj); + rxdf1 = (vector unsigned char) + vec_sel((vector unsigned short)rxdf1, + (vector unsigned short)rxdf, rxdf_sel_mask); + rxdf2 = (vector unsigned char) + vec_sel((vector unsigned short)rxdf2, + (vector unsigned short)rxdf, rxdf_sel_mask); + + /* D.1 store rx_descriptor_fields1. */ + *(vector unsigned char *) + &elts[pos + 2]->rx_descriptor_fields1 = rxdf1; + *(vector unsigned char *) + &elts[pos + 3]->rx_descriptor_fields1 = rxdf2; + +#ifdef MLX5_PMD_SOFT_COUNTERS + invalid_mask = (vector unsigned char)(vector unsigned long){ + (mcqe_n - pos) * sizeof(uint16_t) * 8, 0}; + + lshift = + vec_splat((vector unsigned long)invalid_mask, 0); + shmask = vec_cmpgt(shmax, lshift); + invalid_mask = (vector unsigned char) + vec_sl((vector unsigned long)ones, lshift); + invalid_mask = (vector unsigned char) + vec_sel((vector unsigned long)shmask, + (vector unsigned long)invalid_mask, shmask); + + mcqe1 = (vector unsigned char) + vec_sro((vector unsigned short)mcqe1, + (vector unsigned char){32}), + byte_cnt = (vector unsigned char) + vec_sel((vector unsigned short)mcqe1, + (vector unsigned short)mcqe2, mcqe_sel_mask); + byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask); + byte_cnt = (vector unsigned char) + vec_andc((vector unsigned long)byte_cnt, + (vector unsigned long)invalid_mask); + left = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, lower_half); + right = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, upper_half); + byte_cnt = (vector unsigned char)vec_add(left, right); + left = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, lower_half); + right = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, upper_half); + byte_cnt = (vector unsigned char)vec_add(left, right); + rcvd_byte += ((vector unsigned long)byte_cnt)[0]; +#endif + + if (rxq->mark) { + /* E.1 store flow tag (rte_flow mark). */ + elts[pos]->hash.fdir.hi = flow_tag; + elts[pos + 1]->hash.fdir.hi = flow_tag; + elts[pos + 2]->hash.fdir.hi = flow_tag; + elts[pos + 3]->hash.fdir.hi = flow_tag; + } + if (rxq->dynf_meta) { + int32_t offs = rxq->flow_meta_offset; + const uint32_t meta = + *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *); + + /* Check if title packet has valid metadata. */ + if (meta) { + MLX5_ASSERT(t_pkt->ol_flags & + rxq->flow_meta_mask); + *RTE_MBUF_DYNFIELD(elts[pos], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 1], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 2], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 3], offs, + uint32_t *) = meta; + } + } + + pos += MLX5_VPMD_DESCS_PER_LOOP; + /* Move to next CQE and invalidate consumed CQEs. */ + if (!(pos & 0x7) && pos < mcqe_n) { + mcq = (void *)&(cq + pos)->pkt_info; + for (i = 0; i < 8; ++i) + cq[inv++].op_own = MLX5_CQE_INVALIDATE; + } + } + + /* Invalidate the rest of CQEs. */ + for (; inv < mcqe_n; ++inv) + cq[inv].op_own = MLX5_CQE_INVALIDATE; + +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += mcqe_n; + rxq->stats.ibytes += rcvd_byte; +#endif + + rxq->cq_ci += mcqe_n; + return mcqe_n; +} + +/** + * Calculate packet type and offload flag for mbuf and store it. + * + * @param rxq + * Pointer to RX queue structure. + * @param cqes[4] + * Array of four 16bytes completions extracted from the original completion + * descriptor. + * @param op_err + * Opcode vector having responder error status. Each field is 4B. + * @param pkts + * Pointer to array of packets to be filled. + */ +static inline void +rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, + vector unsigned char cqes[4], vector unsigned char op_err, + struct rte_mbuf **pkts) +{ + vector unsigned char pinfo0, pinfo1; + vector unsigned char pinfo, ptype; + vector unsigned char ol_flags = (vector unsigned char) + (vector unsigned int){ + rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP, + rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP, + rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP, + rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP}; + vector unsigned char cv_flags; + const vector unsigned char zero = (vector unsigned char){0}; + const vector unsigned char ptype_mask = + (vector unsigned char)(vector unsigned int){ + 0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06}; + const vector unsigned char ptype_ol_mask = + (vector unsigned char)(vector unsigned int){ + 0x00000106, 0x00000106, 0x00000106, 0x00000106}; + const vector unsigned char pinfo_mask = + (vector unsigned char)(vector unsigned int){ + 0x00000003, 0x00000003, 0x00000003, 0x00000003}; + const vector unsigned char cv_flag_sel = (vector unsigned char){ + 0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), + (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0, + (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0, + (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1), + 0, 0, 0, 0, 0, 0, 0, 0, 0}; + const vector unsigned char cv_mask = + (vector unsigned char)(vector unsigned int){ + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED}; + const vector unsigned char mbuf_init = + (vector unsigned char)vec_vsx_ld + (0, (vector unsigned char *)&rxq->mbuf_initializer); + const vector unsigned short rearm_sel_mask = + (vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0}; + vector unsigned char rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; + + /* Extract pkt_info field. */ + pinfo0 = (vector unsigned char) + vec_mergeh((vector unsigned int)cqes[0], + (vector unsigned int)cqes[1]); + pinfo1 = (vector unsigned char) + vec_mergeh((vector unsigned int)cqes[2], + (vector unsigned int)cqes[3]); + pinfo = (vector unsigned char) + vec_mergeh((vector unsigned long)pinfo0, + (vector unsigned long)pinfo1); + + /* Extract hdr_type_etc field. */ + pinfo0 = (vector unsigned char) + vec_mergel((vector unsigned int)cqes[0], + (vector unsigned int)cqes[1]); + pinfo1 = (vector unsigned char) + vec_mergel((vector unsigned int)cqes[2], + (vector unsigned int)cqes[3]); + ptype = (vector unsigned char) + vec_mergeh((vector unsigned long)pinfo0, + (vector unsigned long)pinfo1); + + if (rxq->mark) { + const vector unsigned char pinfo_ft_mask = + (vector unsigned char)(vector unsigned int){ + 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00}; + const vector unsigned char fdir_flags = + (vector unsigned char)(vector unsigned int){ + PKT_RX_FDIR, PKT_RX_FDIR, + PKT_RX_FDIR, PKT_RX_FDIR}; + vector unsigned char fdir_id_flags = + (vector unsigned char)(vector unsigned int){ + PKT_RX_FDIR_ID, PKT_RX_FDIR_ID, + PKT_RX_FDIR_ID, PKT_RX_FDIR_ID}; + vector unsigned char flow_tag, invalid_mask; + + flow_tag = (vector unsigned char) + vec_and((vector unsigned long)pinfo, + (vector unsigned long)pinfo_ft_mask); + + /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ + invalid_mask = (vector unsigned char) + vec_cmpeq((vector unsigned int)flow_tag, + (vector unsigned int)zero); + ol_flags = (vector unsigned char) + vec_or((vector unsigned long)ol_flags, + (vector unsigned long) + vec_andc((vector unsigned long)fdir_flags, + (vector unsigned long)invalid_mask)); + + /* Mask out invalid entries. */ + fdir_id_flags = (vector unsigned char) + vec_andc((vector unsigned long)fdir_id_flags, + (vector unsigned long)invalid_mask); + + /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ + ol_flags = (vector unsigned char) + vec_or((vector unsigned long)ol_flags, + (vector unsigned long) + vec_andc((vector unsigned long)fdir_id_flags, + (vector unsigned long) + vec_cmpeq((vector unsigned int)flow_tag, + (vector unsigned int)pinfo_ft_mask))); + } + /* + * Merge the two fields to generate the following: + * bit[1] = l3_ok + * bit[2] = l4_ok + * bit[8] = cv + * bit[11:10] = l3_hdr_type + * bit[14:12] = l4_hdr_type + * bit[15] = ip_frag + * bit[16] = tunneled + * bit[17] = outer_l3_type + */ + ptype = (vector unsigned char) + vec_and((vector unsigned long)ptype, + (vector unsigned long)ptype_mask); + pinfo = (vector unsigned char) + vec_and((vector unsigned long)pinfo, + (vector unsigned long)pinfo_mask); + pinfo = (vector unsigned char) + vec_sl((vector unsigned int)pinfo, + (vector unsigned int){16, 16, 16, 16}); + + /* Make pinfo has merged fields for ol_flags calculation. */ + pinfo = (vector unsigned char) + vec_or((vector unsigned long)ptype, + (vector unsigned long)pinfo); + ptype = (vector unsigned char) + vec_sr((vector unsigned int)pinfo, + (vector unsigned int){10, 10, 10, 10}); + ptype = (vector unsigned char) + vec_packs((vector unsigned int)ptype, + (vector unsigned int)zero); + + /* Errored packets will have RTE_PTYPE_ALL_MASK. */ + op_err = (vector unsigned char) + vec_sr((vector unsigned short)op_err, + (vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8}); + ptype = (vector unsigned char) + vec_or((vector unsigned long)ptype, + (vector unsigned long)op_err); + + pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0]; + pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2]; + pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4]; + pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6]; + + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; + + /* Fill flags for checksum and VLAN. */ + pinfo = (vector unsigned char) + vec_and((vector unsigned long)pinfo, + (vector unsigned long)ptype_ol_mask); + pinfo = vec_perm(cv_flag_sel, zero, pinfo); + + /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */ + cv_flags = (vector unsigned char) + vec_sl((vector unsigned int)pinfo, + (vector unsigned int){9, 9, 9, 9}); + cv_flags = (vector unsigned char) + vec_or((vector unsigned long)pinfo, + (vector unsigned long)cv_flags); + + /* Move back flags to start from byte[0]. */ + cv_flags = (vector unsigned char) + vec_sr((vector unsigned int)cv_flags, + (vector unsigned int){8, 8, 8, 8}); + + /* Mask out garbage bits. */ + cv_flags = (vector unsigned char) + vec_and((vector unsigned long)cv_flags, + (vector unsigned long)cv_mask); + + /* Merge to ol_flags. */ + ol_flags = (vector unsigned char) + vec_or((vector unsigned long)ol_flags, + (vector unsigned long)cv_flags); + + /* Merge mbuf_init and ol_flags. */ + rearm0 = (vector unsigned char) + vec_sel((vector unsigned short)mbuf_init, + (vector unsigned short) + vec_slo((vector unsigned short)ol_flags, + (vector unsigned char){64}), rearm_sel_mask); + rearm1 = (vector unsigned char) + vec_sel((vector unsigned short)mbuf_init, + (vector unsigned short) + vec_slo((vector unsigned short)ol_flags, + (vector unsigned char){32}), rearm_sel_mask); + rearm2 = (vector unsigned char) + vec_sel((vector unsigned short)mbuf_init, + (vector unsigned short)ol_flags, rearm_sel_mask); + rearm3 = (vector unsigned char) + vec_sel((vector unsigned short)mbuf_init, + (vector unsigned short) + vec_sro((vector unsigned short)ol_flags, + (vector unsigned char){32}), rearm_sel_mask); + + /* Write 8B rearm_data and 8B ol_flags. */ + vec_vsx_st(rearm0, 0, + (vector unsigned char *)&pkts[0]->rearm_data); + vec_vsx_st(rearm1, 0, + (vector unsigned char *)&pkts[1]->rearm_data); + vec_vsx_st(rearm2, 0, + (vector unsigned char *)&pkts[2]->rearm_data); + vec_vsx_st(rearm3, 0, + (vector unsigned char *)&pkts[3]->rearm_data); +} + + +/** + * Receive burst of packets. An errored completion also consumes a mbuf, but the + * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed + * before returning to application. + * + * @param rxq + * Pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ +static inline uint16_t +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) +{ + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; + volatile struct mlx5_cqe *cq; + struct rte_mbuf **elts; + unsigned int pos; + uint64_t n; + uint16_t repl_n; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; + uint16_t rcvd_pkt = 0; + unsigned int cq_idx = rxq->cq_ci & q_mask; + unsigned int elts_idx; + unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1)); + const vector unsigned char zero = (vector unsigned char){0}; + const vector unsigned char ones = vec_splat_u8(-1); + const vector unsigned char owner_check = + (vector unsigned char)(vector unsigned long){ + 0x0100000001000000LL, 0x0100000001000000LL}; + const vector unsigned char opcode_check = + (vector unsigned char)(vector unsigned long){ + 0xf0000000f0000000LL, 0xf0000000f0000000LL}; + const vector unsigned char format_check = + (vector unsigned char)(vector unsigned long){ + 0x0c0000000c000000LL, 0x0c0000000c000000LL}; + const vector unsigned char resp_err_check = + (vector unsigned char)(vector unsigned long){ + 0xe0000000e0000000LL, 0xe0000000e0000000LL}; +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t rcvd_byte = 0; + /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ + const vector unsigned char len_shuf_mask = (vector unsigned char){ + 1, 0, 5, 4, + 9, 8, 13, 12, + -1, -1, -1, -1, + -1, -1, -1, -1}; +#endif + /* Mask to shuffle from extracted CQE to mbuf. */ + const vector unsigned char shuf_mask = (vector unsigned char){ + 5, 4, /* bswap16, pkt_len */ + -1, -1, /* zero out 2nd half of pkt_len */ + 5, 4, /* bswap16, data_len */ + 11, 10, /* bswap16, vlan+tci */ + 15, 14, 13, 12, /* bswap32, rss */ + 1, 2, 3, -1}; /* fdir.hi */ + /* Mask to blend from the last Qword to the first DQword. */ + /* Mask to blend from the last Qword to the first DQword. */ + const vector unsigned char blend_mask = (vector unsigned char){ + -1, 0, 0, 0, + 0, 0, 0, 0, + -1, -1, -1, -1, + -1, -1, -1, -1}; + const vector unsigned char crc_adj = + (vector unsigned char)(vector unsigned short){ + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0}; + const vector unsigned char flow_mark_adj = + (vector unsigned char)(vector unsigned int){ + 0, 0, 0, rxq->mark * (-1)}; + const vector unsigned short cqe_sel_mask1 = + (vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0}; + const vector unsigned short cqe_sel_mask2 = + (vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0}; + + MLX5_ASSERT(rxq->sges_n == 0); + MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); + cq = &(*rxq->cqes)[cq_idx]; + rte_prefetch0(cq); + rte_prefetch0(cq + 1); + rte_prefetch0(cq + 2); + rte_prefetch0(cq + 3); + pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); + + repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); + if (repl_n >= rxq->rq_repl_thresh) + mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); + /* See if there're unreturned mbufs from compressed CQE. */ + rcvd_pkt = rxq->decompressed; + if (rcvd_pkt > 0) { + rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); + rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); + rxq->rq_pi += rcvd_pkt; + rxq->decompressed -= rcvd_pkt; + pkts += rcvd_pkt; + } + elts_idx = rxq->rq_pi & q_mask; + elts = &(*rxq->elts)[elts_idx]; + /* Not to overflow pkts array. */ + pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); + if (!pkts_n) + return rcvd_pkt; + /* At this point, there shouldn't be any remaining packets. */ + MLX5_ASSERT(rxq->decompressed == 0); + + /* + * A. load first Qword (8bytes) in one loop. + * B. copy 4 mbuf pointers from elts ring to returing pkts. + * C. load remaining CQE data and extract necessary fields. + * Final 16bytes cqes[] extracted from original 64bytes CQE has the + * following structure: + * struct { + * uint8_t pkt_info; + * uint8_t flow_tag[3]; + * uint16_t byte_cnt; + * uint8_t rsvd4; + * uint8_t op_own; + * uint16_t hdr_type_etc; + * uint16_t vlan_info; + * uint32_t rx_has_res; + * } c; + * D. fill in mbuf. + * E. get valid CQEs. + * F. find compressed CQE. + */ + for (pos = 0; + pos < pkts_n; + pos += MLX5_VPMD_DESCS_PER_LOOP) { + vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP]; + vector unsigned char cqe_tmp1, cqe_tmp2; + vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + vector unsigned char op_own, op_own_tmp1, op_own_tmp2; + vector unsigned char opcode, owner_mask, invalid_mask; + vector unsigned char comp_mask; + vector unsigned char mask; +#ifdef MLX5_PMD_SOFT_COUNTERS + const vector unsigned char lower_half = { + 0, 1, 4, 5, 8, 9, 12, 13, + 16, 17, 20, 21, 24, 25, 28, 29}; + const vector unsigned char upper_half = { + 2, 3, 6, 7, 10, 11, 14, 15, + 18, 19, 22, 23, 26, 27, 30, 31}; + const vector unsigned long shmax = {64, 64}; + vector unsigned char byte_cnt; + vector unsigned short left, right; + vector unsigned long lshift; + vector __attribute__((altivec(bool__))) + unsigned long shmask; +#endif + vector unsigned char mbp1, mbp2; + vector unsigned char p = + (vector unsigned char)(vector unsigned short){ + 0, 1, 2, 3, 0, 0, 0, 0}; + unsigned int p1, p2, p3; + + /* Prefetch next 4 CQEs. */ + if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]); + } + + /* A.0 do not cross the end of CQ. */ + mask = (vector unsigned char)(vector unsigned long){ + (pkts_n - pos) * sizeof(uint16_t) * 8, 0}; + + { + vector unsigned long lshift; + vector __attribute__((altivec(bool__))) + unsigned long shmask; + const vector unsigned long shmax = {64, 64}; + + lshift = vec_splat((vector unsigned long)mask, 0); + shmask = vec_cmpgt(shmax, lshift); + mask = (vector unsigned char) + vec_sl((vector unsigned long)ones, lshift); + mask = (vector unsigned char) + vec_sel((vector unsigned long)shmask, + (vector unsigned long)mask, shmask); + } + + p = (vector unsigned char) + vec_andc((vector unsigned long)p, + (vector unsigned long)mask); + + /* A.1 load cqes. */ + p3 = (unsigned int)((vector unsigned short)p)[3]; + cqes[3] = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p3].sop_drop_qpn, 0LL}; + rte_compiler_barrier(); + + p2 = (unsigned int)((vector unsigned short)p)[2]; + cqes[2] = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p2].sop_drop_qpn, 0LL}; + rte_compiler_barrier(); + + /* B.1 load mbuf pointers. */ + mbp1 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&elts[pos]); + mbp2 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&elts[pos + 2]); + + /* A.1 load a block having op_own. */ + p1 = (unsigned int)((vector unsigned short)p)[1]; + cqes[1] = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p1].sop_drop_qpn, 0LL}; + rte_compiler_barrier(); + + cqes[0] = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos].sop_drop_qpn, 0LL}; + rte_compiler_barrier(); + + /* B.2 copy mbuf pointers. */ + *(vector unsigned char *)&pkts[pos] = mbp1; + *(vector unsigned char *)&pkts[pos + 2] = mbp2; + rte_cio_rmb(); + + /* C.1 load remaining CQE data and extract necessary fields. */ + cqe_tmp2 = *(vector unsigned char *) + &cq[pos + p3].pkt_info; + cqe_tmp1 = *(vector unsigned char *) + &cq[pos + p2].pkt_info; + cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask); + cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask); + cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&cq[pos + p3].csum); + cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&cq[pos + p2].csum); + cqes[3] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[3], + (vector unsigned short)cqe_tmp2, cqe_sel_mask1); + cqes[2] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[2], + (vector unsigned short)cqe_tmp1, cqe_sel_mask1); + cqe_tmp2 = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p3].rsvd3[9], 0LL}; + cqe_tmp1 = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p2].rsvd3[9], 0LL}; + cqes[3] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[3], + (vector unsigned short)cqe_tmp2, + (vector unsigned short)cqe_sel_mask2); + cqes[2] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[2], + (vector unsigned short)cqe_tmp1, + (vector unsigned short)cqe_sel_mask2); + + /* C.2 generate final structure for mbuf with swapping bytes. */ + pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask); + pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask); + + /* C.3 adjust CRC length. */ + pkt_mb3 = (vector unsigned char) + ((vector unsigned short)pkt_mb3 - + (vector unsigned short)crc_adj); + pkt_mb2 = (vector unsigned char) + ((vector unsigned short)pkt_mb2 - + (vector unsigned short)crc_adj); + + /* C.4 adjust flow mark. */ + pkt_mb3 = (vector unsigned char) + ((vector unsigned int)pkt_mb3 + + (vector unsigned int)flow_mark_adj); + pkt_mb2 = (vector unsigned char) + ((vector unsigned int)pkt_mb2 + + (vector unsigned int)flow_mark_adj); + + /* D.1 fill in mbuf - rx_descriptor_fields1. */ + *(vector unsigned char *) + &pkts[pos + 3]->pkt_len = pkt_mb3; + *(vector unsigned char *) + &pkts[pos + 2]->pkt_len = pkt_mb2; + + /* E.1 extract op_own field. */ + op_own_tmp2 = (vector unsigned char) + vec_mergeh((vector unsigned int)cqes[2], + (vector unsigned int)cqes[3]); + + /* C.1 load remaining CQE data and extract necessary fields. */ + cqe_tmp2 = *(vector unsigned char *) + &cq[pos + p1].pkt_info; + cqe_tmp1 = *(vector unsigned char *) + &cq[pos].pkt_info; + cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask); + cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask); + cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&cq[pos + p1].csum); + cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0, + (signed int const *)&cq[pos].csum); + cqes[1] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[1], + (vector unsigned short)cqe_tmp2, cqe_sel_mask1); + cqes[0] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[0], + (vector unsigned short)cqe_tmp1, cqe_sel_mask1); + cqe_tmp2 = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos + p1].rsvd3[9], 0LL}; + cqe_tmp1 = (vector unsigned char)(vector unsigned long){ + *(__rte_aligned(8) unsigned long *) + &cq[pos].rsvd3[9], 0LL}; + cqes[1] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[1], + (vector unsigned short)cqe_tmp2, cqe_sel_mask2); + cqes[0] = (vector unsigned char) + vec_sel((vector unsigned short)cqes[0], + (vector unsigned short)cqe_tmp1, cqe_sel_mask2); + + /* C.2 generate final structure for mbuf with swapping bytes. */ + pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask); + pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask); + + /* C.3 adjust CRC length. */ + pkt_mb1 = (vector unsigned char) + ((vector unsigned short)pkt_mb1 - + (vector unsigned short)crc_adj); + pkt_mb0 = (vector unsigned char) + ((vector unsigned short)pkt_mb0 - + (vector unsigned short)crc_adj); + + /* C.4 adjust flow mark. */ + pkt_mb1 = (vector unsigned char) + ((vector unsigned int)pkt_mb1 + + (vector unsigned int)flow_mark_adj); + pkt_mb0 = (vector unsigned char) + ((vector unsigned int)pkt_mb0 + + (vector unsigned int)flow_mark_adj); + + /* E.1 extract op_own byte. */ + op_own_tmp1 = (vector unsigned char) + vec_mergeh((vector unsigned int)cqes[0], + (vector unsigned int)cqes[1]); + op_own = (vector unsigned char) + vec_mergel((vector unsigned long)op_own_tmp1, + (vector unsigned long)op_own_tmp2); + + /* D.1 fill in mbuf - rx_descriptor_fields1. */ + *(vector unsigned char *) + &pkts[pos + 1]->pkt_len = pkt_mb1; + *(vector unsigned char *) + &pkts[pos]->pkt_len = pkt_mb0; + + /* E.2 flip owner bit to mark CQEs from last round. */ + owner_mask = (vector unsigned char) + vec_and((vector unsigned long)op_own, + (vector unsigned long)owner_check); + if (ownership) + owner_mask = (vector unsigned char) + vec_xor((vector unsigned long)owner_mask, + (vector unsigned long)owner_check); + owner_mask = (vector unsigned char) + vec_cmpeq((vector unsigned int)owner_mask, + (vector unsigned int)owner_check); + owner_mask = (vector unsigned char) + vec_packs((vector unsigned int)owner_mask, + (vector unsigned int)zero); + + /* E.3 get mask for invalidated CQEs. */ + opcode = (vector unsigned char) + vec_and((vector unsigned long)op_own, + (vector unsigned long)opcode_check); + invalid_mask = (vector unsigned char) + vec_cmpeq((vector unsigned int)opcode_check, + (vector unsigned int)opcode); + invalid_mask = (vector unsigned char) + vec_packs((vector unsigned int)invalid_mask, + (vector unsigned int)zero); + + /* E.4 mask out beyond boundary. */ + invalid_mask = (vector unsigned char) + vec_or((vector unsigned long)invalid_mask, + (vector unsigned long)mask); + + /* E.5 merge invalid_mask with invalid owner. */ + invalid_mask = (vector unsigned char) + vec_or((vector unsigned long)invalid_mask, + (vector unsigned long)owner_mask); + + /* F.1 find compressed CQE format. */ + comp_mask = (vector unsigned char) + vec_and((vector unsigned long)op_own, + (vector unsigned long)format_check); + comp_mask = (vector unsigned char) + vec_cmpeq((vector unsigned int)comp_mask, + (vector unsigned int)format_check); + comp_mask = (vector unsigned char) + vec_packs((vector unsigned int)comp_mask, + (vector unsigned int)zero); + + /* F.2 mask out invalid entries. */ + comp_mask = (vector unsigned char) + vec_andc((vector unsigned long)comp_mask, + (vector unsigned long)invalid_mask); + comp_idx = ((vector unsigned long)comp_mask)[0]; + + /* F.3 get the first compressed CQE. */ + comp_idx = comp_idx ? __builtin_ctzll(comp_idx) / + (sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP; + + /* E.6 mask out entries after the compressed CQE. */ + mask = (vector unsigned char)(vector unsigned long){ + (comp_idx * sizeof(uint16_t) * 8), 0}; + lshift = vec_splat((vector unsigned long)mask, 0); + shmask = vec_cmpgt(shmax, lshift); + mask = (vector unsigned char) + vec_sl((vector unsigned long)ones, lshift); + mask = (vector unsigned char) + vec_sel((vector unsigned long)shmask, + (vector unsigned long)mask, shmask); + invalid_mask = (vector unsigned char) + vec_or((vector unsigned long)invalid_mask, + (vector unsigned long)mask); + + /* E.7 count non-compressed valid CQEs. */ + n = ((vector unsigned long)invalid_mask)[0]; + n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) : + MLX5_VPMD_DESCS_PER_LOOP; + nocmp_n += n; + + /* D.2 get the final invalid mask. */ + mask = (vector unsigned char)(vector unsigned long){ + (n * sizeof(uint16_t) * 8), 0}; + lshift = vec_splat((vector unsigned long)mask, 0); + shmask = vec_cmpgt(shmax, lshift); + mask = (vector unsigned char) + vec_sl((vector unsigned long)ones, lshift); + mask = (vector unsigned char) + vec_sel((vector unsigned long)shmask, + (vector unsigned long)mask, shmask); + invalid_mask = (vector unsigned char) + vec_or((vector unsigned long)invalid_mask, + (vector unsigned long)mask); + + /* D.3 check error in opcode. */ + opcode = (vector unsigned char) + vec_cmpeq((vector unsigned int)resp_err_check, + (vector unsigned int)opcode); + opcode = (vector unsigned char) + vec_packs((vector unsigned int)opcode, + (vector unsigned int)zero); + opcode = (vector unsigned char) + vec_andc((vector unsigned long)opcode, + (vector unsigned long)invalid_mask); + + /* D.4 mark if any error is set */ + *err |= ((vector unsigned long)opcode)[0]; + + /* D.5 fill in mbuf - rearm_data and packet_type. */ + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (rxq->hw_timestamp) { + pkts[pos]->timestamp = + rte_be_to_cpu_64(cq[pos].timestamp); + pkts[pos + 1]->timestamp = + rte_be_to_cpu_64(cq[pos + p1].timestamp); + pkts[pos + 2]->timestamp = + rte_be_to_cpu_64(cq[pos + p2].timestamp); + pkts[pos + 3]->timestamp = + rte_be_to_cpu_64(cq[pos + p3].timestamp); + } + if (rxq->dynf_meta) { + uint64_t flag = rxq->flow_meta_mask; + int32_t offs = rxq->flow_meta_offset; + uint32_t metadata; + + /* This code is subject for futher optimization. */ + metadata = cq[pos].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + metadata; + pkts[pos]->ol_flags |= metadata ? flag : 0ULL; + metadata = cq[pos + 1].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) = + metadata; + pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL; + metadata = cq[pos + 2].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) = + metadata; + pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL; + metadata = cq[pos + 3].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) = + metadata; + pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Add up received bytes count. */ + byte_cnt = vec_perm(op_own, zero, len_shuf_mask); + byte_cnt = (vector unsigned char) + vec_andc((vector unsigned long)byte_cnt, + (vector unsigned long)invalid_mask); + left = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, lower_half); + right = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, upper_half); + byte_cnt = (vector unsigned char)vec_add(left, right); + left = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, lower_half); + right = vec_perm((vector unsigned short)byte_cnt, + (vector unsigned short)zero, upper_half); + byte_cnt = (vector unsigned char)vec_add(left, right); + rcvd_byte += ((vector unsigned long)byte_cnt)[0]; +#endif + + /* + * Break the loop unless more valid CQE is expected, or if + * there's a compressed CQE. + */ + if (n != MLX5_VPMD_DESCS_PER_LOOP) + break; + } + /* If no new CQE seen, return without updating cq_db. */ + if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) + return rcvd_pkt; + /* Update the consumer indexes for non-compressed CQEs. */ + MLX5_ASSERT(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; + rxq->rq_pi += nocmp_n; + rcvd_pkt += nocmp_n; +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += nocmp_n; + rxq->stats.ibytes += rcvd_byte; +#endif + /* Decompress the last CQE if compressed. */ + if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { + MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); + rxq->decompressed = + rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]); + /* Return more packets if needed. */ + if (nocmp_n < pkts_n) { + uint16_t n = rxq->decompressed; + + n = RTE_MIN(n, pkts_n - nocmp_n); + rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); + rxq->rq_pi += n; + rcvd_pkt += n; + rxq->decompressed -= n; + } + } + rte_compiler_barrier(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + return rcvd_pkt; +} + +#endif /* RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h new file mode 100644 index 000000000..ecafbf800 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -0,0 +1,780 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_ +#define RTE_PMD_MLX5_RXTX_VEC_NEON_H_ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_rxtx_vec.h" +#include "mlx5_autoconf.h" + +#pragma GCC diagnostic ignored "-Wcast-qual" + +/** + * Store free buffers to RX SW ring. + * + * @param rxq + * Pointer to RX queue structure. + * @param pkts + * Pointer to array of packets to be stored. + * @param pkts_n + * Number of packets to be stored. + */ +static inline void +rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) +{ + const uint16_t q_mask = (1 << rxq->elts_n) - 1; + struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; + unsigned int pos; + uint16_t p = n & -2; + + for (pos = 0; pos < p; pos += 2) { + uint64x2_t mbp; + + mbp = vld1q_u64((void *)&elts[pos]); + vst1q_u64((void *)&pkts[pos], mbp); + } + if (n & 1) + pkts[pos] = elts[pos]; +} + +/** + * Decompress a compressed completion and fill in mbufs in RX SW ring with data + * extracted from the title completion descriptor. + * + * @param rxq + * Pointer to RX queue structure. + * @param cq + * Pointer to completion array having a compressed completion at first. + * @param elts + * Pointer to SW ring to be filled. The first mbuf has to be pre-built from + * the title completion descriptor to be copied to the rest of mbufs. + * + * @return + * Number of mini-CQEs successfully decompressed. + */ +static inline uint16_t +rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + struct rte_mbuf **elts) +{ + volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info; + struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */ + unsigned int pos; + unsigned int i; + unsigned int inv = 0; + /* Mask to shuffle from extracted mini CQE to mbuf. */ + const uint8x16_t mcqe_shuf_m1 = { + -1, -1, -1, -1, /* skip packet_type */ + 7, 6, -1, -1, /* pkt_len, bswap16 */ + 7, 6, /* data_len, bswap16 */ + -1, -1, /* skip vlan_tci */ + 3, 2, 1, 0 /* hash.rss, bswap32 */ + }; + const uint8x16_t mcqe_shuf_m2 = { + -1, -1, -1, -1, /* skip packet_type */ + 15, 14, -1, -1, /* pkt_len, bswap16 */ + 15, 14, /* data_len, bswap16 */ + -1, -1, /* skip vlan_tci */ + 11, 10, 9, 8 /* hash.rss, bswap32 */ + }; + /* Restore the compressed count. Must be 16 bits. */ + const uint16_t mcqe_n = t_pkt->data_len + + (rxq->crc_present * RTE_ETHER_CRC_LEN); + const uint64x2_t rearm = + vld1q_u64((void *)&t_pkt->rearm_data); + const uint32x4_t rxdf_mask = { + 0xffffffff, /* packet_type */ + 0, /* skip pkt_len */ + 0xffff0000, /* vlan_tci, skip data_len */ + 0, /* skip hash.rss */ + }; + const uint8x16_t rxdf = + vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1), + vreinterpretq_u8_u32(rxdf_mask)); + const uint16x8_t crc_adj = { + 0, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, + 0, 0 + }; + const uint32_t flow_tag = t_pkt->hash.fdir.hi; +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t rcvd_byte = 0; +#endif + /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ + const uint8x8_t len_shuf_m = { + 7, 6, /* 1st mCQE */ + 15, 14, /* 2nd mCQE */ + 23, 22, /* 3rd mCQE */ + 31, 30 /* 4th mCQE */ + }; + + /* + * A. load mCQEs into a 128bit register. + * B. store rearm data to mbuf. + * C. combine data from mCQEs with rx_descriptor_fields1. + * D. store rx_descriptor_fields1. + * E. store flow tag (rte_flow mark). + */ + for (pos = 0; pos < mcqe_n; ) { + uint8_t *p = (void *)&mcq[pos % 8]; + uint8_t *e0 = (void *)&elts[pos]->rearm_data; + uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data; + uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data; + uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data; + uint16x4_t byte_cnt; +#ifdef MLX5_PMD_SOFT_COUNTERS + uint16x4_t invalid_mask = + vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ? + -1UL << ((mcqe_n - pos) * + sizeof(uint16_t) * 8) : 0); +#endif + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + if (likely(pos + i < mcqe_n)) + rte_prefetch0((void *)(cq + pos + i)); + __asm__ volatile ( + /* A.1 load mCQEs into a 128bit register. */ + "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t" + /* B.1 store rearm data to mbuf. */ + "st1 {%[rearm].2d}, [%[e0]] \n\t" + "add %[e0], %[e0], #16 \n\t" + "st1 {%[rearm].2d}, [%[e1]] \n\t" + "add %[e1], %[e1], #16 \n\t" + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t" + "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t" + "sub v18.8h, v18.8h, %[crc_adj].8h \n\t" + "sub v19.8h, v19.8h, %[crc_adj].8h \n\t" + "orr v18.16b, v18.16b, %[rxdf].16b \n\t" + "orr v19.16b, v19.16b, %[rxdf].16b \n\t" + /* D.1 store rx_descriptor_fields1. */ + "st1 {v18.2d}, [%[e0]] \n\t" + "st1 {v19.2d}, [%[e1]] \n\t" + /* B.1 store rearm data to mbuf. */ + "st1 {%[rearm].2d}, [%[e2]] \n\t" + "add %[e2], %[e2], #16 \n\t" + "st1 {%[rearm].2d}, [%[e3]] \n\t" + "add %[e3], %[e3], #16 \n\t" + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t" + "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t" + "sub v18.8h, v18.8h, %[crc_adj].8h \n\t" + "sub v19.8h, v19.8h, %[crc_adj].8h \n\t" + "orr v18.16b, v18.16b, %[rxdf].16b \n\t" + "orr v19.16b, v19.16b, %[rxdf].16b \n\t" + /* D.1 store rx_descriptor_fields1. */ + "st1 {v18.2d}, [%[e2]] \n\t" + "st1 {v19.2d}, [%[e3]] \n\t" +#ifdef MLX5_PMD_SOFT_COUNTERS + "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t" +#endif + :[byte_cnt]"=&w"(byte_cnt) + :[mcq]"r"(p), + [rxdf]"w"(rxdf), + [rearm]"w"(rearm), + [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0), + [mcqe_shuf_m1]"w"(mcqe_shuf_m1), + [mcqe_shuf_m2]"w"(mcqe_shuf_m2), + [crc_adj]"w"(crc_adj), + [len_shuf_m]"w"(len_shuf_m) + :"memory", "v16", "v17", "v18", "v19"); +#ifdef MLX5_PMD_SOFT_COUNTERS + byte_cnt = vbic_u16(byte_cnt, invalid_mask); + rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0); +#endif + if (rxq->mark) { + /* E.1 store flow tag (rte_flow mark). */ + elts[pos]->hash.fdir.hi = flow_tag; + elts[pos + 1]->hash.fdir.hi = flow_tag; + elts[pos + 2]->hash.fdir.hi = flow_tag; + elts[pos + 3]->hash.fdir.hi = flow_tag; + } + if (rxq->dynf_meta) { + int32_t offs = rxq->flow_meta_offset; + const uint32_t meta = + *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *); + + /* Check if title packet has valid metadata. */ + if (meta) { + MLX5_ASSERT(t_pkt->ol_flags & + rxq->flow_meta_mask); + *RTE_MBUF_DYNFIELD(elts[pos], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 1], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 2], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 3], offs, + uint32_t *) = meta; + } + } + pos += MLX5_VPMD_DESCS_PER_LOOP; + /* Move to next CQE and invalidate consumed CQEs. */ + if (!(pos & 0x7) && pos < mcqe_n) { + mcq = (void *)&(cq + pos)->pkt_info; + for (i = 0; i < 8; ++i) + cq[inv++].op_own = MLX5_CQE_INVALIDATE; + } + } + /* Invalidate the rest of CQEs. */ + for (; inv < mcqe_n; ++inv) + cq[inv].op_own = MLX5_CQE_INVALIDATE; +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += mcqe_n; + rxq->stats.ibytes += rcvd_byte; +#endif + rxq->cq_ci += mcqe_n; + return mcqe_n; +} + +/** + * Calculate packet type and offload flag for mbuf and store it. + * + * @param rxq + * Pointer to RX queue structure. + * @param ptype_info + * Array of four 4bytes packet type info extracted from the original + * completion descriptor. + * @param flow_tag + * Array of four 4bytes flow ID extracted from the original completion + * descriptor. + * @param op_err + * Opcode vector having responder error status. Each field is 4B. + * @param pkts + * Pointer to array of packets to be filled. + */ +static inline void +rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, + uint32x4_t ptype_info, uint32x4_t flow_tag, + uint16x4_t op_err, struct rte_mbuf **pkts) +{ + uint16x4_t ptype; + uint32x4_t pinfo, cv_flags; + uint32x4_t ol_flags = + vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP); + const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 }; + const uint8x16_t cv_flag_sel = { + 0, + (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), + (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), + 0, + (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), + 0, + (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1), + 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + const uint32x4_t cv_mask = + vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + const uint64x2_t mbuf_init = vld1q_u64 + ((const uint64_t *)&rxq->mbuf_initializer); + uint64x2_t rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; + + if (rxq->mark) { + const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); + const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR); + uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t invalid_mask; + + /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ + invalid_mask = vceqzq_u32(flow_tag); + ol_flags = vorrq_u32(ol_flags, + vbicq_u32(fdir_flags, invalid_mask)); + /* Mask out invalid entries. */ + fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask); + /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ + ol_flags = vorrq_u32(ol_flags, + vbicq_u32(fdir_id_flags, + vceqq_u32(flow_tag, ft_def))); + } + /* + * ptype_info has the following: + * bit[1] = l3_ok + * bit[2] = l4_ok + * bit[8] = cv + * bit[11:10] = l3_hdr_type + * bit[14:12] = l4_hdr_type + * bit[15] = ip_frag + * bit[16] = tunneled + * bit[17] = outer_l3_type + */ + ptype = vshrn_n_u32(ptype_info, 10); + /* Errored packets will have RTE_PTYPE_ALL_MASK. */ + ptype = vorr_u16(ptype, op_err); + pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6); + pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4); + pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2); + pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; + /* Fill flags for checksum and VLAN. */ + pinfo = vandq_u32(ptype_info, ptype_ol_mask); + pinfo = vreinterpretq_u32_u8( + vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo))); + /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */ + cv_flags = vshlq_n_u32(pinfo, 9); + cv_flags = vorrq_u32(pinfo, cv_flags); + /* Move back flags to start from byte[0]. */ + cv_flags = vshrq_n_u32(cv_flags, 8); + /* Mask out garbage bits. */ + cv_flags = vandq_u32(cv_flags, cv_mask); + /* Merge to ol_flags. */ + ol_flags = vorrq_u32(ol_flags, cv_flags); + /* Merge mbuf_init and ol_flags, and store. */ + rearm0 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 3), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm1 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 2), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm2 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 1), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm3 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 0), + vreinterpretq_u32_u64(mbuf_init), 2)); + + vst1q_u64((void *)&pkts[0]->rearm_data, rearm0); + vst1q_u64((void *)&pkts[1]->rearm_data, rearm1); + vst1q_u64((void *)&pkts[2]->rearm_data, rearm2); + vst1q_u64((void *)&pkts[3]->rearm_data, rearm3); +} + +/** + * Receive burst of packets. An errored completion also consumes a mbuf, but the + * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed + * before returning to application. + * + * @param rxq + * Pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ +static inline uint16_t +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) +{ + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; + volatile struct mlx5_cqe *cq; + struct rte_mbuf **elts; + unsigned int pos; + uint64_t n; + uint16_t repl_n; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; + uint16_t rcvd_pkt = 0; + unsigned int cq_idx = rxq->cq_ci & q_mask; + unsigned int elts_idx; + const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1))); + const uint16x4_t owner_check = vcreate_u16(0x0001000100010001); + const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0); + const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c); + const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0); +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t rcvd_byte = 0; +#endif + /* Mask to generate 16B length vector. */ + const uint8x8_t len_shuf_m = { + 52, 53, /* 4th CQE */ + 36, 37, /* 3rd CQE */ + 20, 21, /* 2nd CQE */ + 4, 5 /* 1st CQE */ + }; + /* Mask to extract 16B data from a 64B CQE. */ + const uint8x16_t cqe_shuf_m = { + 28, 29, /* hdr_type_etc */ + 0, /* pkt_info */ + -1, /* null */ + 47, 46, /* byte_cnt, bswap16 */ + 31, 30, /* vlan_info, bswap16 */ + 15, 14, 13, 12, /* rx_hash_res, bswap32 */ + 57, 58, 59, /* flow_tag */ + 63 /* op_own */ + }; + /* Mask to generate 16B data for mbuf. */ + const uint8x16_t mb_shuf_m = { + 4, 5, -1, -1, /* pkt_len */ + 4, 5, /* data_len */ + 6, 7, /* vlan_tci */ + 8, 9, 10, 11, /* hash.rss */ + 12, 13, 14, -1 /* hash.fdir.hi */ + }; + /* Mask to generate 16B owner vector. */ + const uint8x8_t owner_shuf_m = { + 63, -1, /* 4th CQE */ + 47, -1, /* 3rd CQE */ + 31, -1, /* 2nd CQE */ + 15, -1 /* 1st CQE */ + }; + /* Mask to generate a vector having packet_type/ol_flags. */ + const uint8x16_t ptype_shuf_m = { + 48, 49, 50, -1, /* 4th CQE */ + 32, 33, 34, -1, /* 3rd CQE */ + 16, 17, 18, -1, /* 2nd CQE */ + 0, 1, 2, -1 /* 1st CQE */ + }; + /* Mask to generate a vector having flow tags. */ + const uint8x16_t ftag_shuf_m = { + 60, 61, 62, -1, /* 4th CQE */ + 44, 45, 46, -1, /* 3rd CQE */ + 28, 29, 30, -1, /* 2nd CQE */ + 12, 13, 14, -1 /* 1st CQE */ + }; + const uint16x8_t crc_adj = { + 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0 + }; + const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) }; + + MLX5_ASSERT(rxq->sges_n == 0); + MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); + cq = &(*rxq->cqes)[cq_idx]; + rte_prefetch_non_temporal(cq); + rte_prefetch_non_temporal(cq + 1); + rte_prefetch_non_temporal(cq + 2); + rte_prefetch_non_temporal(cq + 3); + pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); + repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); + if (repl_n >= rxq->rq_repl_thresh) + mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); + /* See if there're unreturned mbufs from compressed CQE. */ + rcvd_pkt = rxq->decompressed; + if (rcvd_pkt > 0) { + rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); + rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); + rxq->rq_pi += rcvd_pkt; + pkts += rcvd_pkt; + rxq->decompressed -= rcvd_pkt; + } + elts_idx = rxq->rq_pi & q_mask; + elts = &(*rxq->elts)[elts_idx]; + /* Not to overflow pkts array. */ + pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); + if (!pkts_n) + return rcvd_pkt; + /* At this point, there shouldn't be any remained packets. */ + MLX5_ASSERT(rxq->decompressed == 0); + /* + * Note that vectors have reverse order - {v3, v2, v1, v0}, because + * there's no instruction to count trailing zeros. __builtin_clzl() is + * used instead. + * + * A. copy 4 mbuf pointers from elts ring to returing pkts. + * B. load 64B CQE and extract necessary fields + * Final 16bytes cqes[] extracted from original 64bytes CQE has the + * following structure: + * struct { + * uint16_t hdr_type_etc; + * uint8_t pkt_info; + * uint8_t rsvd; + * uint16_t byte_cnt; + * uint16_t vlan_info; + * uint32_t rx_has_res; + * uint8_t flow_tag[3]; + * uint8_t op_own; + * } c; + * C. fill in mbuf. + * D. get valid CQEs. + * E. find compressed CQE. + */ + for (pos = 0; + pos < pkts_n; + pos += MLX5_VPMD_DESCS_PER_LOOP) { + uint16x4_t op_own; + uint16x4_t opcode, owner_mask, invalid_mask; + uint16x4_t comp_mask; + uint16x4_t mask; + uint16x4_t byte_cnt; + uint32x4_t ptype_info, flow_tag; + register uint64x2_t c0, c1, c2, c3; + uint8_t *p0, *p1, *p2, *p3; + uint8_t *e0 = (void *)&elts[pos]->pkt_len; + uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len; + uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len; + uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len; + void *elts_p = (void *)&elts[pos]; + void *pkts_p = (void *)&pkts[pos]; + + /* A.0 do not cross the end of CQ. */ + mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ? + -1UL >> ((pkts_n - pos) * + sizeof(uint16_t) * 8) : 0); + p0 = (void *)&cq[pos].pkt_info; + p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe); + p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe); + p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe); + /* B.0 (CQE 3) load a block having op_own. */ + c3 = vld1q_u64((uint64_t *)(p3 + 48)); + /* B.0 (CQE 2) load a block having op_own. */ + c2 = vld1q_u64((uint64_t *)(p2 + 48)); + /* B.0 (CQE 1) load a block having op_own. */ + c1 = vld1q_u64((uint64_t *)(p1 + 48)); + /* B.0 (CQE 0) load a block having op_own. */ + c0 = vld1q_u64((uint64_t *)(p0 + 48)); + /* Synchronize for loading the rest of blocks. */ + rte_cio_rmb(); + /* Prefetch next 4 CQEs. */ + if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { + unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; + rte_prefetch_non_temporal(&cq[next]); + rte_prefetch_non_temporal(&cq[next + 1]); + rte_prefetch_non_temporal(&cq[next + 2]); + rte_prefetch_non_temporal(&cq[next + 3]); + } + __asm__ volatile ( + /* B.1 (CQE 3) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t" + /* B.2 (CQE 3) move the block having op_own. */ + "mov v19.16b, %[c3].16b \n\t" + /* B.3 (CQE 3) extract 16B fields. */ + "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 2) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t" + /* B.4 (CQE 3) adjust CRC length. */ + "sub v23.8h, v23.8h, %[crc_adj].8h \n\t" + /* C.1 (CQE 3) generate final structure for mbuf. */ + "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t" + /* B.2 (CQE 2) move the block having op_own. */ + "mov v19.16b, %[c2].16b \n\t" + /* B.3 (CQE 2) extract 16B fields. */ + "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 1) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t" + /* B.4 (CQE 2) adjust CRC length. */ + "sub v22.8h, v22.8h, %[crc_adj].8h \n\t" + /* C.1 (CQE 2) generate final structure for mbuf. */ + "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t" + /* B.2 (CQE 1) move the block having op_own. */ + "mov v19.16b, %[c1].16b \n\t" + /* B.3 (CQE 1) extract 16B fields. */ + "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 0) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t" + /* B.4 (CQE 1) adjust CRC length. */ + "sub v21.8h, v21.8h, %[crc_adj].8h \n\t" + /* C.1 (CQE 1) generate final structure for mbuf. */ + "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t" + /* B.2 (CQE 0) move the block having op_own. */ + "mov v19.16b, %[c0].16b \n\t" + /* A.1 load mbuf pointers. */ + "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t" + /* B.3 (CQE 0) extract 16B fields. */ + "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.4 (CQE 0) adjust CRC length. */ + "sub v20.8h, v20.8h, %[crc_adj].8h \n\t" + /* D.1 extract op_own byte. */ + "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t" + /* C.2 (CQE 3) adjust flow mark. */ + "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t" + /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */ + "st1 {v15.2d}, [%[e3]] \n\t" + /* C.2 (CQE 2) adjust flow mark. */ + "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t" + /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */ + "st1 {v14.2d}, [%[e2]] \n\t" + /* C.1 (CQE 0) generate final structure for mbuf. */ + "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t" + /* C.2 (CQE 1) adjust flow mark. */ + "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t" + /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */ + "st1 {v13.2d}, [%[e1]] \n\t" +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Extract byte_cnt. */ + "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t" +#endif + /* Extract ptype_info. */ + "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t" + /* Extract flow_tag. */ + "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t" + /* A.2 copy mbuf pointers. */ + "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t" + /* C.2 (CQE 0) adjust flow mark. */ + "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t" + /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */ + "st1 {v12.2d}, [%[e0]] \n\t" + :[op_own]"=&w"(op_own), + [byte_cnt]"=&w"(byte_cnt), + [ptype_info]"=&w"(ptype_info), + [flow_tag]"=&w"(flow_tag) + :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0), + [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0), + [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0), + [elts_p]"r"(elts_p), + [pkts_p]"r"(pkts_p), + [cqe_shuf_m]"w"(cqe_shuf_m), + [mb_shuf_m]"w"(mb_shuf_m), + [owner_shuf_m]"w"(owner_shuf_m), + [len_shuf_m]"w"(len_shuf_m), + [ptype_shuf_m]"w"(ptype_shuf_m), + [ftag_shuf_m]"w"(ftag_shuf_m), + [crc_adj]"w"(crc_adj), + [flow_mark_adj]"w"(flow_mark_adj) + :"memory", + "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", + "v20", "v21", "v22", "v23", + "v24", "v25"); + /* D.2 flip owner bit to mark CQEs from last round. */ + owner_mask = vand_u16(op_own, owner_check); + owner_mask = vceq_u16(owner_mask, ownership); + /* D.3 get mask for invalidated CQEs. */ + opcode = vand_u16(op_own, opcode_check); + invalid_mask = vceq_u16(opcode_check, opcode); + /* E.1 find compressed CQE format. */ + comp_mask = vand_u16(op_own, format_check); + comp_mask = vceq_u16(comp_mask, format_check); + /* D.4 mask out beyond boundary. */ + invalid_mask = vorr_u16(invalid_mask, mask); + /* D.5 merge invalid_mask with invalid owner. */ + invalid_mask = vorr_u16(invalid_mask, owner_mask); + /* E.2 mask out invalid entries. */ + comp_mask = vbic_u16(comp_mask, invalid_mask); + /* E.3 get the first compressed CQE. */ + comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( + comp_mask), 0)) / + (sizeof(uint16_t) * 8); + /* D.6 mask out entries after the compressed CQE. */ + mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ? + -1UL >> (comp_idx * sizeof(uint16_t) * 8) : + 0); + invalid_mask = vorr_u16(invalid_mask, mask); + /* D.7 count non-compressed valid CQEs. */ + n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( + invalid_mask), 0)) / (sizeof(uint16_t) * 8); + nocmp_n += n; + /* D.2 get the final invalid mask. */ + mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ? + -1UL >> (n * sizeof(uint16_t) * 8) : 0); + invalid_mask = vorr_u16(invalid_mask, mask); + /* D.3 check error in opcode. */ + opcode = vceq_u16(resp_err_check, opcode); + opcode = vbic_u16(opcode, invalid_mask); + /* D.4 mark if any error is set */ + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + /* C.4 fill in mbuf - rearm_data and packet_type. */ + rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, + opcode, &elts[pos]); + if (rxq->hw_timestamp) { + elts[pos]->timestamp = + rte_be_to_cpu_64( + container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 1]->timestamp = + rte_be_to_cpu_64( + container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 2]->timestamp = + rte_be_to_cpu_64( + container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 3]->timestamp = + rte_be_to_cpu_64( + container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); + } + if (!!rxq->flow_meta_mask) { + /* This code is subject for futher optimization. */ + int32_t offs = rxq->flow_meta_offset; + + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p0, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p1, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p2, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p3, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *)) + elts[pos]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *)) + elts[pos + 1]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *)) + elts[pos + 2]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *)) + elts[pos + 3]->ol_flags |= rxq->flow_meta_mask; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Add up received bytes count. */ + byte_cnt = vbic_u16(byte_cnt, invalid_mask); + rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0); +#endif + /* + * Break the loop unless more valid CQE is expected, or if + * there's a compressed CQE. + */ + if (n != MLX5_VPMD_DESCS_PER_LOOP) + break; + } + /* If no new CQE seen, return without updating cq_db. */ + if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) + return rcvd_pkt; + /* Update the consumer indexes for non-compressed CQEs. */ + MLX5_ASSERT(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; + rxq->rq_pi += nocmp_n; + rcvd_pkt += nocmp_n; +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += nocmp_n; + rxq->stats.ibytes += rcvd_byte; +#endif + /* Decompress the last CQE if compressed. */ + if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { + MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); + rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], + &elts[nocmp_n]); + /* Return more packets if needed. */ + if (nocmp_n < pkts_n) { + uint16_t n = rxq->decompressed; + + n = RTE_MIN(n, pkts_n - nocmp_n); + rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); + rxq->rq_pi += n; + rcvd_pkt += n; + rxq->decompressed -= n; + } + } + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + return rcvd_pkt; +} + +#endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h new file mode 100644 index 000000000..6847ae782 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -0,0 +1,731 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_ +#define RTE_PMD_MLX5_RXTX_VEC_SSE_H_ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_rxtx_vec.h" +#include "mlx5_autoconf.h" + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +/** + * Store free buffers to RX SW ring. + * + * @param rxq + * Pointer to RX queue structure. + * @param pkts + * Pointer to array of packets to be stored. + * @param pkts_n + * Number of packets to be stored. + */ +static inline void +rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) +{ + const uint16_t q_mask = (1 << rxq->elts_n) - 1; + struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; + unsigned int pos; + uint16_t p = n & -2; + + for (pos = 0; pos < p; pos += 2) { + __m128i mbp; + + mbp = _mm_loadu_si128((__m128i *)&elts[pos]); + _mm_storeu_si128((__m128i *)&pkts[pos], mbp); + } + if (n & 1) + pkts[pos] = elts[pos]; +} + +/** + * Decompress a compressed completion and fill in mbufs in RX SW ring with data + * extracted from the title completion descriptor. + * + * @param rxq + * Pointer to RX queue structure. + * @param cq + * Pointer to completion array having a compressed completion at first. + * @param elts + * Pointer to SW ring to be filled. The first mbuf has to be pre-built from + * the title completion descriptor to be copied to the rest of mbufs. + * + * @return + * Number of mini-CQEs successfully decompressed. + */ +static inline uint16_t +rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + struct rte_mbuf **elts) +{ + volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1); + struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */ + unsigned int pos; + unsigned int i; + unsigned int inv = 0; + /* Mask to shuffle from extracted mini CQE to mbuf. */ + const __m128i shuf_mask1 = + _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */ + -1, -1, /* skip vlan_tci */ + 6, 7, /* data_len, bswap16 */ + -1, -1, 6, 7, /* pkt_len, bswap16 */ + -1, -1, -1, -1 /* skip packet_type */); + const __m128i shuf_mask2 = + _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */ + -1, -1, /* skip vlan_tci */ + 14, 15, /* data_len, bswap16 */ + -1, -1, 14, 15, /* pkt_len, bswap16 */ + -1, -1, -1, -1 /* skip packet_type */); + /* Restore the compressed count. Must be 16 bits. */ + const uint16_t mcqe_n = t_pkt->data_len + + (rxq->crc_present * RTE_ETHER_CRC_LEN); + const __m128i rearm = + _mm_loadu_si128((__m128i *)&t_pkt->rearm_data); + const __m128i rxdf = + _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1); + const __m128i crc_adj = + _mm_set_epi16(0, 0, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, + 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, + 0, 0); + const uint32_t flow_tag = t_pkt->hash.fdir.hi; +#ifdef MLX5_PMD_SOFT_COUNTERS + const __m128i zero = _mm_setzero_si128(); + const __m128i ones = _mm_cmpeq_epi32(zero, zero); + uint32_t rcvd_byte = 0; + /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ + const __m128i len_shuf_mask = + _mm_set_epi8(-1, -1, -1, -1, + -1, -1, -1, -1, + 14, 15, 6, 7, + 10, 11, 2, 3); +#endif + /* + * A. load mCQEs into a 128bit register. + * B. store rearm data to mbuf. + * C. combine data from mCQEs with rx_descriptor_fields1. + * D. store rx_descriptor_fields1. + * E. store flow tag (rte_flow mark). + */ + for (pos = 0; pos < mcqe_n; ) { + __m128i mcqe1, mcqe2; + __m128i rxdf1, rxdf2; +#ifdef MLX5_PMD_SOFT_COUNTERS + __m128i byte_cnt, invalid_mask; +#endif + + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + if (likely(pos + i < mcqe_n)) + rte_prefetch0((void *)(cq + pos + i)); + + /* A.1 load mCQEs into a 128bit register. */ + mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]); + mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]); + /* B.1 store rearm data to mbuf. */ + _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm); + _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm); + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1); + rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2); + rxdf1 = _mm_sub_epi16(rxdf1, crc_adj); + rxdf2 = _mm_sub_epi16(rxdf2, crc_adj); + rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23); + rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23); + /* D.1 store rx_descriptor_fields1. */ + _mm_storeu_si128((__m128i *) + &elts[pos]->rx_descriptor_fields1, + rxdf1); + _mm_storeu_si128((__m128i *) + &elts[pos + 1]->rx_descriptor_fields1, + rxdf2); + /* B.1 store rearm data to mbuf. */ + _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm); + _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm); + /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ + rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1); + rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2); + rxdf1 = _mm_sub_epi16(rxdf1, crc_adj); + rxdf2 = _mm_sub_epi16(rxdf2, crc_adj); + rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23); + rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23); + /* D.1 store rx_descriptor_fields1. */ + _mm_storeu_si128((__m128i *) + &elts[pos + 2]->rx_descriptor_fields1, + rxdf1); + _mm_storeu_si128((__m128i *) + &elts[pos + 3]->rx_descriptor_fields1, + rxdf2); +#ifdef MLX5_PMD_SOFT_COUNTERS + invalid_mask = _mm_set_epi64x(0, + (mcqe_n - pos) * + sizeof(uint16_t) * 8); + invalid_mask = _mm_sll_epi64(ones, invalid_mask); + mcqe1 = _mm_srli_si128(mcqe1, 4); + byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc); + byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask); + byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt); + byte_cnt = _mm_hadd_epi16(byte_cnt, zero); + rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero)); +#endif + if (rxq->mark) { + /* E.1 store flow tag (rte_flow mark). */ + elts[pos]->hash.fdir.hi = flow_tag; + elts[pos + 1]->hash.fdir.hi = flow_tag; + elts[pos + 2]->hash.fdir.hi = flow_tag; + elts[pos + 3]->hash.fdir.hi = flow_tag; + } + if (rxq->dynf_meta) { + int32_t offs = rxq->flow_meta_offset; + const uint32_t meta = + *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *); + + /* Check if title packet has valid metadata. */ + if (meta) { + MLX5_ASSERT(t_pkt->ol_flags & + rxq->flow_meta_mask); + *RTE_MBUF_DYNFIELD(elts[pos], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 1], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 2], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 3], offs, + uint32_t *) = meta; + } + } + pos += MLX5_VPMD_DESCS_PER_LOOP; + /* Move to next CQE and invalidate consumed CQEs. */ + if (!(pos & 0x7) && pos < mcqe_n) { + mcq = (void *)(cq + pos); + for (i = 0; i < 8; ++i) + cq[inv++].op_own = MLX5_CQE_INVALIDATE; + } + } + /* Invalidate the rest of CQEs. */ + for (; inv < mcqe_n; ++inv) + cq[inv].op_own = MLX5_CQE_INVALIDATE; +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += mcqe_n; + rxq->stats.ibytes += rcvd_byte; +#endif + rxq->cq_ci += mcqe_n; + return mcqe_n; +} + +/** + * Calculate packet type and offload flag for mbuf and store it. + * + * @param rxq + * Pointer to RX queue structure. + * @param cqes[4] + * Array of four 16bytes completions extracted from the original completion + * descriptor. + * @param op_err + * Opcode vector having responder error status. Each field is 4B. + * @param pkts + * Pointer to array of packets to be filled. + */ +static inline void +rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], + __m128i op_err, struct rte_mbuf **pkts) +{ + __m128i pinfo0, pinfo1; + __m128i pinfo, ptype; + __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP); + __m128i cv_flags; + const __m128i zero = _mm_setzero_si128(); + const __m128i ptype_mask = + _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06); + const __m128i ptype_ol_mask = + _mm_set_epi32(0x106, 0x106, 0x106, 0x106); + const __m128i pinfo_mask = + _mm_set_epi32(0x3, 0x3, 0x3, 0x3); + const __m128i cv_flag_sel = + _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, + (uint8_t)((PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_GOOD) >> 1), + 0, + (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), + 0, + (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), + (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), + 0); + const __m128i cv_mask = + _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + const __m128i mbuf_init = + _mm_load_si128((__m128i *)&rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; + + /* Extract pkt_info field. */ + pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]); + pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]); + pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1); + /* Extract hdr_type_etc field. */ + pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]); + pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]); + ptype = _mm_unpacklo_epi64(pinfo0, pinfo1); + if (rxq->mark) { + const __m128i pinfo_ft_mask = + _mm_set_epi32(0xffffff00, 0xffffff00, + 0xffffff00, 0xffffff00); + const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR); + __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID); + __m128i flow_tag, invalid_mask; + + flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask); + /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ + invalid_mask = _mm_cmpeq_epi32(flow_tag, zero); + ol_flags = _mm_or_si128(ol_flags, + _mm_andnot_si128(invalid_mask, + fdir_flags)); + /* Mask out invalid entries. */ + fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags); + /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ + ol_flags = _mm_or_si128(ol_flags, + _mm_andnot_si128( + _mm_cmpeq_epi32(flow_tag, + pinfo_ft_mask), + fdir_id_flags)); + } + /* + * Merge the two fields to generate the following: + * bit[1] = l3_ok + * bit[2] = l4_ok + * bit[8] = cv + * bit[11:10] = l3_hdr_type + * bit[14:12] = l4_hdr_type + * bit[15] = ip_frag + * bit[16] = tunneled + * bit[17] = outer_l3_type + */ + ptype = _mm_and_si128(ptype, ptype_mask); + pinfo = _mm_and_si128(pinfo, pinfo_mask); + pinfo = _mm_slli_epi32(pinfo, 16); + /* Make pinfo has merged fields for ol_flags calculation. */ + pinfo = _mm_or_si128(ptype, pinfo); + ptype = _mm_srli_epi32(pinfo, 10); + ptype = _mm_packs_epi32(ptype, zero); + /* Errored packets will have RTE_PTYPE_ALL_MASK. */ + op_err = _mm_srli_epi16(op_err, 8); + ptype = _mm_or_si128(ptype, op_err); + pt_idx0 = _mm_extract_epi8(ptype, 0); + pt_idx1 = _mm_extract_epi8(ptype, 2); + pt_idx2 = _mm_extract_epi8(ptype, 4); + pt_idx3 = _mm_extract_epi8(ptype, 6); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; + /* Fill flags for checksum and VLAN. */ + pinfo = _mm_and_si128(pinfo, ptype_ol_mask); + pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo); + /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */ + cv_flags = _mm_slli_epi32(pinfo, 9); + cv_flags = _mm_or_si128(pinfo, cv_flags); + /* Move back flags to start from byte[0]. */ + cv_flags = _mm_srli_epi32(cv_flags, 8); + /* Mask out garbage bits. */ + cv_flags = _mm_and_si128(cv_flags, cv_mask); + /* Merge to ol_flags. */ + ol_flags = _mm_or_si128(ol_flags, cv_flags); + /* Merge mbuf_init and ol_flags. */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30); + rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30); + /* Write 8B rearm_data and 8B ol_flags. */ + _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3); +} + +/** + * Receive burst of packets. An errored completion also consumes a mbuf, but the + * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed + * before returning to application. + * + * @param rxq + * Pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. + * + * @return + * Number of packets received including errors (<= pkts_n). + */ +static inline uint16_t +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) +{ + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; + volatile struct mlx5_cqe *cq; + struct rte_mbuf **elts; + unsigned int pos; + uint64_t n; + uint16_t repl_n; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; + uint16_t rcvd_pkt = 0; + unsigned int cq_idx = rxq->cq_ci & q_mask; + unsigned int elts_idx; + unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1)); + const __m128i owner_check = + _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL); + const __m128i opcode_check = + _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL); + const __m128i format_check = + _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL); + const __m128i resp_err_check = + _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL); +#ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t rcvd_byte = 0; + /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ + const __m128i len_shuf_mask = + _mm_set_epi8(-1, -1, -1, -1, + -1, -1, -1, -1, + 12, 13, 8, 9, + 4, 5, 0, 1); +#endif + /* Mask to shuffle from extracted CQE to mbuf. */ + const __m128i shuf_mask = + _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */ + 12, 13, 14, 15, /* rss, bswap32 */ + 10, 11, /* vlan_tci, bswap16 */ + 4, 5, /* data_len, bswap16 */ + -1, -1, /* zero out 2nd half of pkt_len */ + 4, 5 /* pkt_len, bswap16 */); + /* Mask to blend from the last Qword to the first DQword. */ + const __m128i blend_mask = + _mm_set_epi8(-1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, -1); + const __m128i zero = _mm_setzero_si128(); + const __m128i ones = _mm_cmpeq_epi32(zero, zero); + const __m128i crc_adj = + _mm_set_epi16(0, 0, 0, 0, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, + 0, + rxq->crc_present * RTE_ETHER_CRC_LEN); + const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0); + + MLX5_ASSERT(rxq->sges_n == 0); + MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); + cq = &(*rxq->cqes)[cq_idx]; + rte_prefetch0(cq); + rte_prefetch0(cq + 1); + rte_prefetch0(cq + 2); + rte_prefetch0(cq + 3); + pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); + repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); + if (repl_n >= rxq->rq_repl_thresh) + mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); + /* See if there're unreturned mbufs from compressed CQE. */ + rcvd_pkt = rxq->decompressed; + if (rcvd_pkt > 0) { + rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); + rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); + rxq->rq_pi += rcvd_pkt; + rxq->decompressed -= rcvd_pkt; + pkts += rcvd_pkt; + } + elts_idx = rxq->rq_pi & q_mask; + elts = &(*rxq->elts)[elts_idx]; + /* Not to overflow pkts array. */ + pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); + /* Not to cross queue end. */ + pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); + if (!pkts_n) + return rcvd_pkt; + /* At this point, there shouldn't be any remained packets. */ + MLX5_ASSERT(rxq->decompressed == 0); + /* + * A. load first Qword (8bytes) in one loop. + * B. copy 4 mbuf pointers from elts ring to returing pkts. + * C. load remained CQE data and extract necessary fields. + * Final 16bytes cqes[] extracted from original 64bytes CQE has the + * following structure: + * struct { + * uint8_t pkt_info; + * uint8_t flow_tag[3]; + * uint16_t byte_cnt; + * uint8_t rsvd4; + * uint8_t op_own; + * uint16_t hdr_type_etc; + * uint16_t vlan_info; + * uint32_t rx_has_res; + * } c; + * D. fill in mbuf. + * E. get valid CQEs. + * F. find compressed CQE. + */ + for (pos = 0; + pos < pkts_n; + pos += MLX5_VPMD_DESCS_PER_LOOP) { + __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP]; + __m128i cqe_tmp1, cqe_tmp2; + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __m128i op_own, op_own_tmp1, op_own_tmp2; + __m128i opcode, owner_mask, invalid_mask; + __m128i comp_mask; + __m128i mask; +#ifdef MLX5_PMD_SOFT_COUNTERS + __m128i byte_cnt; +#endif + __m128i mbp1, mbp2; + __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0); + unsigned int p1, p2, p3; + + /* Prefetch next 4 CQEs. */ + if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]); + rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]); + } + /* A.0 do not cross the end of CQ. */ + mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8); + mask = _mm_sll_epi64(ones, mask); + p = _mm_andnot_si128(mask, p); + /* A.1 load cqes. */ + p3 = _mm_extract_epi16(p, 3); + cqes[3] = _mm_loadl_epi64((__m128i *) + &cq[pos + p3].sop_drop_qpn); + rte_compiler_barrier(); + p2 = _mm_extract_epi16(p, 2); + cqes[2] = _mm_loadl_epi64((__m128i *) + &cq[pos + p2].sop_drop_qpn); + rte_compiler_barrier(); + /* B.1 load mbuf pointers. */ + mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]); + mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]); + /* A.1 load a block having op_own. */ + p1 = _mm_extract_epi16(p, 1); + cqes[1] = _mm_loadl_epi64((__m128i *) + &cq[pos + p1].sop_drop_qpn); + rte_compiler_barrier(); + cqes[0] = _mm_loadl_epi64((__m128i *) + &cq[pos].sop_drop_qpn); + /* B.2 copy mbuf pointers. */ + _mm_storeu_si128((__m128i *)&pkts[pos], mbp1); + _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2); + rte_cio_rmb(); + /* C.1 load remained CQE data and extract necessary fields. */ + cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]); + cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]); + cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask); + cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask); + cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].csum); + cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum); + cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30); + cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30); + cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]); + cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]); + cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04); + cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04); + /* C.2 generate final structure for mbuf with swapping bytes. */ + pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask); + pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask); + /* C.3 adjust CRC length. */ + pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj); + pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj); + /* C.4 adjust flow mark. */ + pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj); + pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj); + /* D.1 fill in mbuf - rx_descriptor_fields1. */ + _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3); + _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2); + /* E.1 extract op_own field. */ + op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]); + /* C.1 load remained CQE data and extract necessary fields. */ + cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]); + cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]); + cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask); + cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask); + cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].csum); + cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum); + cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30); + cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30); + cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]); + cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]); + cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04); + cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04); + /* C.2 generate final structure for mbuf with swapping bytes. */ + pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask); + pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask); + /* C.3 adjust CRC length. */ + pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj); + pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj); + /* C.4 adjust flow mark. */ + pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj); + pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj); + /* E.1 extract op_own byte. */ + op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]); + op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2); + /* D.1 fill in mbuf - rx_descriptor_fields1. */ + _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1); + _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0); + /* E.2 flip owner bit to mark CQEs from last round. */ + owner_mask = _mm_and_si128(op_own, owner_check); + if (ownership) + owner_mask = _mm_xor_si128(owner_mask, owner_check); + owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check); + owner_mask = _mm_packs_epi32(owner_mask, zero); + /* E.3 get mask for invalidated CQEs. */ + opcode = _mm_and_si128(op_own, opcode_check); + invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode); + invalid_mask = _mm_packs_epi32(invalid_mask, zero); + /* E.4 mask out beyond boundary. */ + invalid_mask = _mm_or_si128(invalid_mask, mask); + /* E.5 merge invalid_mask with invalid owner. */ + invalid_mask = _mm_or_si128(invalid_mask, owner_mask); + /* F.1 find compressed CQE format. */ + comp_mask = _mm_and_si128(op_own, format_check); + comp_mask = _mm_cmpeq_epi32(comp_mask, format_check); + comp_mask = _mm_packs_epi32(comp_mask, zero); + /* F.2 mask out invalid entries. */ + comp_mask = _mm_andnot_si128(invalid_mask, comp_mask); + comp_idx = _mm_cvtsi128_si64(comp_mask); + /* F.3 get the first compressed CQE. */ + comp_idx = comp_idx ? + __builtin_ctzll(comp_idx) / + (sizeof(uint16_t) * 8) : + MLX5_VPMD_DESCS_PER_LOOP; + /* E.6 mask out entries after the compressed CQE. */ + mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8); + mask = _mm_sll_epi64(ones, mask); + invalid_mask = _mm_or_si128(invalid_mask, mask); + /* E.7 count non-compressed valid CQEs. */ + n = _mm_cvtsi128_si64(invalid_mask); + n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) : + MLX5_VPMD_DESCS_PER_LOOP; + nocmp_n += n; + /* D.2 get the final invalid mask. */ + mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8); + mask = _mm_sll_epi64(ones, mask); + invalid_mask = _mm_or_si128(invalid_mask, mask); + /* D.3 check error in opcode. */ + opcode = _mm_cmpeq_epi32(resp_err_check, opcode); + opcode = _mm_packs_epi32(opcode, zero); + opcode = _mm_andnot_si128(invalid_mask, opcode); + /* D.4 mark if any error is set */ + *err |= _mm_cvtsi128_si64(opcode); + /* D.5 fill in mbuf - rearm_data and packet_type. */ + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (rxq->hw_timestamp) { + pkts[pos]->timestamp = + rte_be_to_cpu_64(cq[pos].timestamp); + pkts[pos + 1]->timestamp = + rte_be_to_cpu_64(cq[pos + p1].timestamp); + pkts[pos + 2]->timestamp = + rte_be_to_cpu_64(cq[pos + p2].timestamp); + pkts[pos + 3]->timestamp = + rte_be_to_cpu_64(cq[pos + p3].timestamp); + } + if (rxq->dynf_meta) { + /* This code is subject for futher optimization. */ + int32_t offs = rxq->flow_meta_offset; + + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + cq[pos].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) = + cq[pos + p1].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) = + cq[pos + p2].flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) = + cq[pos + p3].flow_table_metadata; + if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *)) + pkts[pos]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *)) + pkts[pos + 1]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *)) + pkts[pos + 2]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *)) + pkts[pos + 3]->ol_flags |= rxq->flow_meta_mask; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Add up received bytes count. */ + byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask); + byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt); + byte_cnt = _mm_hadd_epi16(byte_cnt, zero); + rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero)); +#endif + /* + * Break the loop unless more valid CQE is expected, or if + * there's a compressed CQE. + */ + if (n != MLX5_VPMD_DESCS_PER_LOOP) + break; + } + /* If no new CQE seen, return without updating cq_db. */ + if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) + return rcvd_pkt; + /* Update the consumer indexes for non-compressed CQEs. */ + MLX5_ASSERT(nocmp_n <= pkts_n); + rxq->cq_ci += nocmp_n; + rxq->rq_pi += nocmp_n; + rcvd_pkt += nocmp_n; +#ifdef MLX5_PMD_SOFT_COUNTERS + rxq->stats.ipackets += nocmp_n; + rxq->stats.ibytes += rcvd_byte; +#endif + /* Decompress the last CQE if compressed. */ + if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { + MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); + rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], + &elts[nocmp_n]); + /* Return more packets if needed. */ + if (nocmp_n < pkts_n) { + uint16_t n = rxq->decompressed; + + n = RTE_MIN(n, pkts_n - nocmp_n); + rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); + rxq->rq_pi += n; + rcvd_pkt += n; + rxq->decompressed -= n; + } + } + rte_compiler_barrier(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + return rcvd_pkt; +} + +#endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c new file mode 100644 index 000000000..a79896cb3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_socket.c @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 Mellanox Technologies, Ltd + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "rte_eal.h" +#include "mlx5_utils.h" +#include "mlx5.h" + +/* PMD socket service for tools. */ + +int server_socket; /* Unix socket for primary process. */ +struct rte_intr_handle server_intr_handle; /* Interrupt handler. */ + +static void +mlx5_pmd_make_path(struct sockaddr_un *addr, int pid) +{ + snprintf(addr->sun_path, sizeof(addr->sun_path), "/var/tmp/dpdk_%s_%d", + MLX5_DRIVER_NAME, pid); +} + +/** + * Handle server pmd socket interrupts. + */ +static void +mlx5_pmd_socket_handle(void *cb __rte_unused) +{ + int conn_sock; + int ret = -1; + struct cmsghdr *cmsg = NULL; + int data; + char buf[CMSG_SPACE(sizeof(int))] = { 0 }; + struct iovec io = { + .iov_base = &data, + .iov_len = sizeof(data), + }; + struct msghdr msg = { + .msg_iov = &io, + .msg_iovlen = 1, + .msg_control = buf, + .msg_controllen = sizeof(buf), + }; + uint16_t port_id; + int fd; + FILE *file = NULL; + struct rte_eth_dev *dev; + + /* Accept the connection from the client. */ + conn_sock = accept(server_socket, NULL, NULL); + if (conn_sock < 0) { + DRV_LOG(WARNING, "connection failed: %s", strerror(errno)); + return; + } + ret = recvmsg(conn_sock, &msg, MSG_WAITALL); + if (ret < 0) { + DRV_LOG(WARNING, "wrong message received: %s", + strerror(errno)); + goto error; + } + /* Receive file descriptor. */ + cmsg = CMSG_FIRSTHDR(&msg); + if (cmsg == NULL || cmsg->cmsg_type != SCM_RIGHTS || + cmsg->cmsg_len < sizeof(int)) { + DRV_LOG(WARNING, "invalid file descriptor message"); + goto error; + } + memcpy(&fd, CMSG_DATA(cmsg), sizeof(fd)); + file = fdopen(fd, "w"); + if (!file) { + DRV_LOG(WARNING, "Failed to open file"); + goto error; + } + /* Receive port number. */ + if (msg.msg_iovlen != 1 || msg.msg_iov->iov_len < sizeof(uint16_t)) { + DRV_LOG(WARNING, "wrong port number message"); + goto error; + } + memcpy(&port_id, msg.msg_iov->iov_base, sizeof(port_id)); + if (!rte_eth_dev_is_valid_port(port_id)) { + DRV_LOG(WARNING, "Invalid port %u", port_id); + goto error; + } + /* Dump flow. */ + dev = &rte_eth_devices[port_id]; + ret = mlx5_flow_dev_dump(dev, file, NULL); + /* Set-up the ancillary data and reply. */ + msg.msg_controllen = 0; + msg.msg_control = NULL; + msg.msg_iovlen = 1; + msg.msg_iov = &io; + data = -ret; + io.iov_len = sizeof(data); + io.iov_base = &data; + do { + ret = sendmsg(conn_sock, &msg, 0); + } while (ret < 0 && errno == EINTR); + if (ret < 0) + DRV_LOG(WARNING, "failed to send response %s", + strerror(errno)); +error: + if (conn_sock > 0) + close(conn_sock); + if (file) + fclose(file); +} + +/** + * Install interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +mlx5_pmd_interrupt_handler_install(void) +{ + MLX5_ASSERT(server_socket); + server_intr_handle.fd = server_socket; + server_intr_handle.type = RTE_INTR_HANDLE_EXT; + return rte_intr_callback_register(&server_intr_handle, + mlx5_pmd_socket_handle, NULL); +} + +/** + * Uninstall interrupt handler. + */ +static void +mlx5_pmd_interrupt_handler_uninstall(void) +{ + if (server_socket) { + mlx5_intr_callback_unregister(&server_intr_handle, + mlx5_pmd_socket_handle, + NULL); + } + server_intr_handle.fd = 0; + server_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; +} + +/** + * Initialise the socket to communicate with the secondary process + * + * @param[in] dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, a negative value otherwise. + */ +int +mlx5_pmd_socket_init(void) +{ + struct sockaddr_un sun = { + .sun_family = AF_UNIX, + }; + int ret = -1; + int flags; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + if (server_socket) + return 0; + /* + * Initialize the socket to communicate with the secondary + * process. + */ + ret = socket(AF_UNIX, SOCK_STREAM, 0); + if (ret < 0) { + DRV_LOG(WARNING, "Failed to open mlx5 socket: %s", + strerror(errno)); + goto error; + } + server_socket = ret; + flags = fcntl(server_socket, F_GETFL, 0); + if (flags == -1) + goto error; + ret = fcntl(server_socket, F_SETFL, flags | O_NONBLOCK); + if (ret < 0) + goto error; + mlx5_pmd_make_path(&sun, getpid()); + remove(sun.sun_path); + ret = bind(server_socket, (const struct sockaddr *)&sun, sizeof(sun)); + if (ret < 0) { + DRV_LOG(WARNING, + "cannot bind mlx5 socket: %s", strerror(errno)); + goto close; + } + ret = listen(server_socket, 0); + if (ret < 0) { + DRV_LOG(WARNING, "cannot listen on mlx5 socket: %s", + strerror(errno)); + goto close; + } + if (mlx5_pmd_interrupt_handler_install()) { + DRV_LOG(WARNING, "cannot register interrupt handler for mlx5 socket: %s", + strerror(errno)); + goto close; + } + return 0; +close: + remove(sun.sun_path); +error: + claim_zero(close(server_socket)); + server_socket = 0; + DRV_LOG(ERR, "Cannot initialize socket: %s", strerror(errno)); + return -errno; +} + +/** + * Un-Initialize the pmd socket + */ +RTE_FINI(mlx5_pmd_socket_uninit) +{ + if (!server_socket) + return; + mlx5_pmd_interrupt_handler_uninstall(); + claim_zero(close(server_socket)); + server_socket = 0; + MKSTR(path, "/var/tmp/dpdk_%s_%d", MLX5_DRIVER_NAME, getpid()); + claim_zero(remove(path)); +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c new file mode 100644 index 000000000..b4ca6922a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -0,0 +1,589 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "mlx5_defs.h" +#include "mlx5.h" +#include "mlx5_rxtx.h" + + +static const struct mlx5_counter_ctrl mlx5_counters_init[] = { + { + .dpdk_name = "rx_port_unicast_bytes", + .ctr_name = "rx_vport_unicast_bytes", + }, + { + .dpdk_name = "rx_port_multicast_bytes", + .ctr_name = "rx_vport_multicast_bytes", + }, + { + .dpdk_name = "rx_port_broadcast_bytes", + .ctr_name = "rx_vport_broadcast_bytes", + }, + { + .dpdk_name = "rx_port_unicast_packets", + .ctr_name = "rx_vport_unicast_packets", + }, + { + .dpdk_name = "rx_port_multicast_packets", + .ctr_name = "rx_vport_multicast_packets", + }, + { + .dpdk_name = "rx_port_broadcast_packets", + .ctr_name = "rx_vport_broadcast_packets", + }, + { + .dpdk_name = "tx_port_unicast_bytes", + .ctr_name = "tx_vport_unicast_bytes", + }, + { + .dpdk_name = "tx_port_multicast_bytes", + .ctr_name = "tx_vport_multicast_bytes", + }, + { + .dpdk_name = "tx_port_broadcast_bytes", + .ctr_name = "tx_vport_broadcast_bytes", + }, + { + .dpdk_name = "tx_port_unicast_packets", + .ctr_name = "tx_vport_unicast_packets", + }, + { + .dpdk_name = "tx_port_multicast_packets", + .ctr_name = "tx_vport_multicast_packets", + }, + { + .dpdk_name = "tx_port_broadcast_packets", + .ctr_name = "tx_vport_broadcast_packets", + }, + { + .dpdk_name = "rx_wqe_err", + .ctr_name = "rx_wqe_err", + }, + { + .dpdk_name = "rx_crc_errors_phy", + .ctr_name = "rx_crc_errors_phy", + }, + { + .dpdk_name = "rx_in_range_len_errors_phy", + .ctr_name = "rx_in_range_len_errors_phy", + }, + { + .dpdk_name = "rx_symbol_err_phy", + .ctr_name = "rx_symbol_err_phy", + }, + { + .dpdk_name = "tx_errors_phy", + .ctr_name = "tx_errors_phy", + }, + { + .dpdk_name = "rx_out_of_buffer", + .ctr_name = "out_of_buffer", + .ib = 1, + }, + { + .dpdk_name = "tx_packets_phy", + .ctr_name = "tx_packets_phy", + }, + { + .dpdk_name = "rx_packets_phy", + .ctr_name = "rx_packets_phy", + }, + { + .dpdk_name = "tx_discards_phy", + .ctr_name = "tx_discards_phy", + }, + { + .dpdk_name = "rx_discards_phy", + .ctr_name = "rx_discards_phy", + }, + { + .dpdk_name = "tx_bytes_phy", + .ctr_name = "tx_bytes_phy", + }, + { + .dpdk_name = "rx_bytes_phy", + .ctr_name = "rx_bytes_phy", + }, + /* Representor only */ + { + .dpdk_name = "rx_packets", + .ctr_name = "vport_rx_packets", + }, + { + .dpdk_name = "rx_bytes", + .ctr_name = "vport_rx_bytes", + }, + { + .dpdk_name = "tx_packets", + .ctr_name = "vport_tx_packets", + }, + { + .dpdk_name = "tx_bytes", + .ctr_name = "vport_tx_bytes", + }, +}; + +static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); + +static inline int +mlx5_read_ib_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat) +{ + int fd; + + if (priv->sh) { + MKSTR(path, "%s/ports/%d/hw_counters/%s", + priv->sh->ibdev_path, + priv->ibv_port, + ctr_name); + fd = open(path, O_RDONLY); + if (fd != -1) { + char buf[21] = {'\0'}; + ssize_t n = read(fd, buf, sizeof(buf)); + + close(fd); + if (n != -1) { + *stat = strtoull(buf, NULL, 10); + return 0; + } + } + } + *stat = 0; + return 1; +} + +/** + * Read device counters table. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] stats + * Counters table output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + struct ifreq ifr; + unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); + unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; + struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; + + et_stats->cmd = ETHTOOL_GSTATS; + et_stats->n_stats = xstats_ctrl->stats_n; + ifr.ifr_data = (caddr_t)et_stats; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u unable to read statistic values from device", + dev->data->port_id); + return ret; + } + for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { + if (xstats_ctrl->info[i].ib) { + ret = mlx5_read_ib_stat(priv, + xstats_ctrl->info[i].ctr_name, + &stats[i]); + /* return last xstats counter if fail to read. */ + if (ret == 0) + xstats_ctrl->xstats[i] = stats[i]; + else + stats[i] = xstats_ctrl->xstats[i]; + } else { + stats[i] = (uint64_t) + et_stats->data[xstats_ctrl->dev_table_idx[i]]; + } + } + return 0; +} + +/** + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Number of statistics on success, negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { + struct ethtool_drvinfo drvinfo; + struct ifreq ifr; + int ret; + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to query number of statistics", + dev->data->port_id); + return ret; + } + return drvinfo.n_stats; +} + +/** + * Init the structures to read device counters. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_stats_init(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; + unsigned int i; + unsigned int j; + struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; + unsigned int dev_stats_n; + unsigned int str_sz; + int ret; + + /* So that it won't aggregate for each init. */ + xstats_ctrl->mlx5_stats_n = 0; + ret = mlx5_ethtool_get_stats_n(dev); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); + return; + } + dev_stats_n = ret; + /* Allocate memory to grab stat names and values. */ + str_sz = dev_stats_n * ETH_GSTRING_LEN; + strings = (struct ethtool_gstrings *) + rte_malloc("xstats_strings", + str_sz + sizeof(struct ethtool_gstrings), 0); + if (!strings) { + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", + dev->data->port_id); + return; + } + strings->cmd = ETHTOOL_GSTRINGS; + strings->string_set = ETH_SS_STATS; + strings->len = dev_stats_n; + ifr.ifr_data = (caddr_t)strings; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to get statistic names", + dev->data->port_id); + goto free; + } + for (i = 0; i != dev_stats_n; ++i) { + const char *curr_string = (const char *) + &strings->data[i * ETH_GSTRING_LEN]; + + for (j = 0; j != xstats_n; ++j) { + if (!strcmp(mlx5_counters_init[j].ctr_name, + curr_string)) { + unsigned int idx = xstats_ctrl->mlx5_stats_n++; + + xstats_ctrl->dev_table_idx[idx] = i; + xstats_ctrl->info[idx] = mlx5_counters_init[j]; + break; + } + } + } + /* Add IB counters. */ + for (i = 0; i != xstats_n; ++i) { + if (mlx5_counters_init[i].ib) { + unsigned int idx = xstats_ctrl->mlx5_stats_n++; + + xstats_ctrl->info[idx] = mlx5_counters_init[i]; + xstats_ctrl->hw_stats[idx] = 0; + } + } + MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); + xstats_ctrl->stats_n = dev_stats_n; + /* Copy to base at first time. */ + ret = mlx5_read_dev_counters(dev, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); + mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); + stats_ctrl->imissed = 0; +free: + rte_free(strings); +} + +/** + * DPDK callback to get extended device statistics. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] stats + * Pointer to rte extended stats table. + * @param n + * The size of the stats table. + * + * @return + * Number of extended stats on success and stats is filled, + * negative on error and rte_errno is set. + */ +int +mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + uint64_t counters[n]; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; + + if (n >= mlx5_stats_n && stats) { + int stats_n; + int ret; + + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) + return stats_n; + if (xstats_ctrl->stats_n != stats_n) + mlx5_stats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) + return ret; + for (i = 0; i != mlx5_stats_n; ++i) { + stats[i].id = i; + if (xstats_ctrl->info[i].ib) { + uint64_t wrap_n; + uint64_t hw_stat = xstats_ctrl->hw_stats[i]; + + stats[i].value = (counters[i] - + xstats_ctrl->base[i]) & + (uint64_t)UINT32_MAX; + wrap_n = hw_stat >> 32; + if (stats[i].value < + (hw_stat & (uint64_t)UINT32_MAX)) + wrap_n++; + stats[i].value |= (wrap_n) << 32; + xstats_ctrl->hw_stats[i] = stats[i].value; + } else { + stats[i].value = + (counters[i] - xstats_ctrl->base[i]); + } + } + } + return mlx5_stats_n; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. + */ +int +mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; + struct rte_eth_stats tmp; + unsigned int i; + unsigned int idx; + uint64_t wrap_n; + int ret; + + memset(&tmp, 0, sizeof(tmp)); + /* Add software counters. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + idx = rxq->idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.q_ipackets[idx] += rxq->stats.ipackets; + tmp.q_ibytes[idx] += rxq->stats.ibytes; +#endif + tmp.q_errors[idx] += (rxq->stats.idropped + + rxq->stats.rx_nombuf); + } +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.ipackets += rxq->stats.ipackets; + tmp.ibytes += rxq->stats.ibytes; +#endif + tmp.ierrors += rxq->stats.idropped; + tmp.rx_nombuf += rxq->stats.rx_nombuf; + } + for (i = 0; (i != priv->txqs_n); ++i) { + struct mlx5_txq_data *txq = (*priv->txqs)[i]; + + if (txq == NULL) + continue; + idx = txq->idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.q_opackets[idx] += txq->stats.opackets; + tmp.q_obytes[idx] += txq->stats.obytes; +#endif + } +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.opackets += txq->stats.opackets; + tmp.obytes += txq->stats.obytes; +#endif + tmp.oerrors += txq->stats.oerrors; + } + ret = mlx5_read_ib_stat(priv, "out_of_buffer", &tmp.imissed); + if (ret == 0) { + tmp.imissed = (tmp.imissed - stats_ctrl->imissed_base) & + (uint64_t)UINT32_MAX; + wrap_n = stats_ctrl->imissed >> 32; + if (tmp.imissed < (stats_ctrl->imissed & (uint64_t)UINT32_MAX)) + wrap_n++; + tmp.imissed |= (wrap_n) << 32; + stats_ctrl->imissed = tmp.imissed; + } else { + tmp.imissed = stats_ctrl->imissed; + } +#ifndef MLX5_PMD_SOFT_COUNTERS + /* FIXME: retrieve and add hardware counters. */ +#endif + *stats = tmp; + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * always 0 on success and stats is reset + */ +int +mlx5_stats_reset(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; + unsigned int i; + + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + memset(&(*priv->rxqs)[i]->stats, 0, + sizeof(struct mlx5_rxq_stats)); + } + for (i = 0; (i != priv->txqs_n); ++i) { + if ((*priv->txqs)[i] == NULL) + continue; + memset(&(*priv->txqs)[i]->stats, 0, + sizeof(struct mlx5_txq_stats)); + } + mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); + stats_ctrl->imissed = 0; +#ifndef MLX5_PMD_SOFT_COUNTERS + /* FIXME: reset hardware counters. */ +#endif + + return 0; +} + +/** + * DPDK callback to clear device extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success and stats is reset, negative errno value otherwise and + * rte_errno is set. + */ +int +mlx5_xstats_reset(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + unsigned int i; + unsigned int n = xstats_ctrl->mlx5_stats_n; + uint64_t counters[n]; + int ret; + + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, + strerror(-stats_n)); + return stats_n; + } + if (xstats_ctrl->stats_n != stats_n) + mlx5_stats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; + } + for (i = 0; i != n; ++i) { + xstats_ctrl->base[i] = counters[i]; + xstats_ctrl->hw_stats[i] = 0; + } + + return 0; +} + +/** + * DPDK callback to retrieve names of extended device statistics + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] xstats_names + * Buffer to insert names into. + * @param n + * Number of names. + * + * @return + * Number of xstats names. + */ +int +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, unsigned int n) +{ + unsigned int i; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n; + + if (n >= mlx5_xstats_n && xstats_names) { + for (i = 0; i != mlx5_xstats_n; ++i) { + strncpy(xstats_names[i].name, + xstats_ctrl->info[i].dpdk_name, + RTE_ETH_XSTATS_NAME_SIZE); + xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + return mlx5_xstats_n; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c new file mode 100644 index 000000000..8106598ff --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_trigger.c @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include + +#include +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_mr.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" +#include "rte_pmd_mlx5.h" + +/** + * Stop traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx5_txq_stop(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->txqs_n; ++i) + mlx5_txq_release(dev, i); +} + +/** + * Start traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_txq_start(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + for (i = 0; i != priv->txqs_n; ++i) { + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); + + if (!txq_ctrl) + continue; + if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { + txq_ctrl->obj = mlx5_txq_obj_new + (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN); + } else { + txq_alloc_elts(txq_ctrl); + txq_ctrl->obj = mlx5_txq_obj_new + (dev, i, MLX5_TXQ_OBJ_TYPE_IBV); + } + if (!txq_ctrl->obj) { + rte_errno = ENOMEM; + goto error; + } + } + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + do { + mlx5_txq_release(dev, i); + } while (i-- != 0); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Stop traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx5_rxq_stop(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) + mlx5_rxq_release(dev, i); +} + +/** + * Start traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_rxq_start(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + int ret = 0; + enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV; + struct mlx5_rxq_data *rxq = NULL; + + for (i = 0; i < priv->rxqs_n; ++i) { + rxq = (*priv->rxqs)[i]; + if (rxq && rxq->lro) { + obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; + break; + } + } + /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ + if (mlx5_mprq_alloc_mp(dev)) { + /* Should not release Rx queues but return immediately. */ + return -rte_errno; + } + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); + struct rte_mempool *mp; + + if (!rxq_ctrl) + continue; + if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { + rxq_ctrl->obj = mlx5_rxq_obj_new + (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN); + if (!rxq_ctrl->obj) + goto error; + continue; + } + /* Pre-register Rx mempool. */ + mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; + DRV_LOG(DEBUG, + "port %u Rx queue %u registering" + " mp %s having %u chunks", + dev->data->port_id, rxq_ctrl->rxq.idx, + mp->name, mp->nb_mem_chunks); + mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); + ret = rxq_alloc_elts(rxq_ctrl); + if (ret) + goto error; + rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type); + if (!rxq_ctrl->obj) + goto error; + if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV) + rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num; + else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) + rxq_ctrl->wqn = rxq_ctrl->obj->rq->id; + } + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + do { + mlx5_rxq_release(dev, i); + } while (i-- != 0); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Binds Tx queues to Rx queues for hairpin. + * + * Binds Tx queues to the target Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_hairpin_bind(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; + struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; + struct mlx5_txq_ctrl *txq_ctrl; + struct mlx5_rxq_ctrl *rxq_ctrl; + struct mlx5_devx_obj *sq; + struct mlx5_devx_obj *rq; + unsigned int i; + int ret = 0; + + for (i = 0; i != priv->txqs_n; ++i) { + txq_ctrl = mlx5_txq_get(dev, i); + if (!txq_ctrl) + continue; + if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + mlx5_txq_release(dev, i); + continue; + } + if (!txq_ctrl->obj) { + rte_errno = ENOMEM; + DRV_LOG(ERR, "port %u no txq object found: %d", + dev->data->port_id, i); + mlx5_txq_release(dev, i); + return -rte_errno; + } + sq = txq_ctrl->obj->sq; + rxq_ctrl = mlx5_rxq_get(dev, + txq_ctrl->hairpin_conf.peers[0].queue); + if (!rxq_ctrl) { + mlx5_txq_release(dev, i); + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u no rxq object found: %d", + dev->data->port_id, + txq_ctrl->hairpin_conf.peers[0].queue); + return -rte_errno; + } + if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || + rxq_ctrl->hairpin_conf.peers[0].queue != i) { + rte_errno = ENOMEM; + DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " + "Rx queue %d", dev->data->port_id, + i, txq_ctrl->hairpin_conf.peers[0].queue); + goto error; + } + rq = rxq_ctrl->obj->rq; + if (!rq) { + rte_errno = ENOMEM; + DRV_LOG(ERR, "port %u hairpin no matching rxq: %d", + dev->data->port_id, + txq_ctrl->hairpin_conf.peers[0].queue); + goto error; + } + sq_attr.state = MLX5_SQC_STATE_RDY; + sq_attr.sq_state = MLX5_SQC_STATE_RST; + sq_attr.hairpin_peer_rq = rq->id; + sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; + ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); + if (ret) + goto error; + rq_attr.state = MLX5_SQC_STATE_RDY; + rq_attr.rq_state = MLX5_SQC_STATE_RST; + rq_attr.hairpin_peer_sq = sq->id; + rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; + ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); + if (ret) + goto error; + mlx5_txq_release(dev, i); + mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); + } + return 0; +error: + mlx5_txq_release(dev, i); + mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); + return -rte_errno; +} + +/** + * DPDK callback to start the device. + * + * Simulate device start by attaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_dev_start(struct rte_eth_dev *dev) +{ + int ret; + int fine_inline; + + DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); + fine_inline = rte_mbuf_dynflag_lookup + (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL); + if (fine_inline > 0) + rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline; + else + rte_net_mlx5_dynf_inline_mask = 0; + if (dev->data->nb_rx_queues > 0) { + ret = mlx5_dev_configure_rss_reta(dev); + if (ret) { + DRV_LOG(ERR, "port %u reta config failed: %s", + dev->data->port_id, strerror(rte_errno)); + return -rte_errno; + } + } + ret = mlx5_txq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); + return -rte_errno; + } + ret = mlx5_rxq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); + mlx5_txq_stop(dev); + return -rte_errno; + } + ret = mlx5_hairpin_bind(dev); + if (ret) { + DRV_LOG(ERR, "port %u hairpin binding failed: %s", + dev->data->port_id, strerror(rte_errno)); + mlx5_txq_stop(dev); + return -rte_errno; + } + /* Set started flag here for the following steps like control flow. */ + dev->data->dev_started = 1; + ret = mlx5_rx_intr_vec_enable(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", + dev->data->port_id); + goto error; + } + mlx5_stats_init(dev); + ret = mlx5_traffic_enable(dev); + if (ret) { + DRV_LOG(ERR, "port %u failed to set defaults flows", + dev->data->port_id); + goto error; + } + /* Set a mask and offset of dynamic metadata flows into Rx queues*/ + mlx5_flow_rxq_dynf_metadata_set(dev); + /* + * In non-cached mode, it only needs to start the default mreg copy + * action and no flow created by application exists anymore. + * But it is worth wrapping the interface for further usage. + */ + ret = mlx5_flow_start_default(dev); + if (ret) { + DRV_LOG(DEBUG, "port %u failed to start default actions: %s", + dev->data->port_id, strerror(rte_errno)); + goto error; + } + rte_wmb(); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + /* Enable datapath on secondary process. */ + mlx5_mp_req_start_rxtx(dev); + mlx5_dev_interrupt_handler_install(dev); + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + /* Rollback. */ + dev->data->dev_started = 0; + mlx5_flow_stop_default(dev); + mlx5_traffic_disable(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * DPDK callback to stop the device. + * + * Simulate device stop by detaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_dev_stop(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + dev->data->dev_started = 0; + /* Prevent crashes when queues are still in use. */ + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + rte_wmb(); + /* Disable datapath on secondary process. */ + mlx5_mp_req_stop_rxtx(dev); + usleep(1000 * priv->rxqs_n); + DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); + mlx5_flow_stop_default(dev); + /* Control flows for default traffic can be removed firstly. */ + mlx5_traffic_disable(dev); + /* All RX queue flags will be cleared in the flush interface. */ + mlx5_flow_list_flush(dev, &priv->flows, true); + mlx5_rx_intr_vec_disable(dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); +} + +/** + * Enable traffic flows configured by control plane + * + * @param dev + * Pointer to Ethernet device private data. + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_traffic_enable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_item_eth bcast = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }; + struct rte_flow_item_eth ipv6_multi_spec = { + .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", + }; + struct rte_flow_item_eth ipv6_multi_mask = { + .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", + }; + struct rte_flow_item_eth unicast = { + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + }; + struct rte_flow_item_eth unicast_mask = { + .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", + }; + const unsigned int vlan_filter_n = priv->vlan_filter_n; + const struct rte_ether_addr cmp = { + .addr_bytes = "\x00\x00\x00\x00\x00\x00", + }; + unsigned int i; + unsigned int j; + int ret; + + /* + * Hairpin txq default flow should be created no matter if it is + * isolation mode. Or else all the packets to be sent will be sent + * out directly without the TX flow actions, e.g. encapsulation. + */ + for (i = 0; i != priv->txqs_n; ++i) { + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); + if (!txq_ctrl) + continue; + if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { + ret = mlx5_ctrl_flow_source_queue(dev, i); + if (ret) { + mlx5_txq_release(dev, i); + goto error; + } + } + mlx5_txq_release(dev, i); + } + if (priv->config.dv_esw_en && !priv->config.vf) { + if (mlx5_flow_create_esw_table_zero_flow(dev)) + priv->fdb_def_rule = 1; + else + DRV_LOG(INFO, "port %u FDB default rule cannot be" + " configured - only Eswitch group 0 flows are" + " supported.", dev->data->port_id); + } + if (priv->isolated) + return 0; + if (dev->data->promiscuous) { + struct rte_flow_item_eth promisc = { + .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0, + }; + + ret = mlx5_ctrl_flow(dev, &promisc, &promisc); + if (ret) + goto error; + } + if (dev->data->all_multicast) { + struct rte_flow_item_eth multicast = { + .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0, + }; + + ret = mlx5_ctrl_flow(dev, &multicast, &multicast); + if (ret) + goto error; + } else { + /* Add broadcast/multicast flows. */ + for (i = 0; i != vlan_filter_n; ++i) { + uint16_t vlan = priv->vlan_filter[i]; + + struct rte_flow_item_vlan vlan_spec = { + .tci = rte_cpu_to_be_16(vlan), + }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; + + ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, + &vlan_spec, &vlan_mask); + if (ret) + goto error; + ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, + &ipv6_multi_mask, + &vlan_spec, &vlan_mask); + if (ret) + goto error; + } + if (!vlan_filter_n) { + ret = mlx5_ctrl_flow(dev, &bcast, &bcast); + if (ret) + goto error; + ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, + &ipv6_multi_mask); + if (ret) + goto error; + } + } + /* Add MAC address flows. */ + for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { + struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; + + if (!memcmp(mac, &cmp, sizeof(*mac))) + continue; + memcpy(&unicast.dst.addr_bytes, + mac->addr_bytes, + RTE_ETHER_ADDR_LEN); + for (j = 0; j != vlan_filter_n; ++j) { + uint16_t vlan = priv->vlan_filter[j]; + + struct rte_flow_item_vlan vlan_spec = { + .tci = rte_cpu_to_be_16(vlan), + }; + struct rte_flow_item_vlan vlan_mask = + rte_flow_item_vlan_mask; + + ret = mlx5_ctrl_flow_vlan(dev, &unicast, + &unicast_mask, + &vlan_spec, + &vlan_mask); + if (ret) + goto error; + } + if (!vlan_filter_n) { + ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); + if (ret) + goto error; + } + } + return 0; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; +} + + +/** + * Disable traffic flows configured by control plane + * + * @param dev + * Pointer to Ethernet device private data. + */ +void +mlx5_traffic_disable(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); +} + +/** + * Restart traffic flows configured by control plane + * + * @param dev + * Pointer to Ethernet device private data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_traffic_restart(struct rte_eth_dev *dev) +{ + if (dev->data->dev_started) { + mlx5_traffic_disable(dev); + return mlx5_traffic_enable(dev); + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c new file mode 100644 index 000000000..a211fa91b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_txq.c @@ -0,0 +1,1470 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mlx5_defs.h" +#include "mlx5_utils.h" +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" + +/** + * Allocate TX queue elements. + * + * @param txq_ctrl + * Pointer to TX queue structure. + */ +void +txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) +{ + const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n; + unsigned int i; + + for (i = 0; (i != elts_n); ++i) + txq_ctrl->txq.elts[i] = NULL; + DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; +} + +/** + * Free TX queue elements. + * + * @param txq_ctrl + * Pointer to TX queue structure. + */ +void +txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) +{ + const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; + const uint16_t elts_m = elts_n - 1; + uint16_t elts_head = txq_ctrl->txq.elts_head; + uint16_t elts_tail = txq_ctrl->txq.elts_tail; + struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts; + + DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; + + while (elts_tail != elts_head) { + struct rte_mbuf *elt = (*elts)[elts_tail & elts_m]; + + MLX5_ASSERT(elt != NULL); + rte_pktmbuf_free_seg(elt); +#ifdef RTE_LIBRTE_MLX5_DEBUG + /* Poisoning. */ + memset(&(*elts)[elts_tail & elts_m], + 0x77, + sizeof((*elts)[elts_tail & elts_m])); +#endif + ++elts_tail; + } +} + +/** + * Returns the per-port supported offloads. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT); + struct mlx5_dev_config *config = &priv->config; + + if (config->hw_csum) + offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + if (config->tso) + offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } + if (config->tunnel_en) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + } + return offloads; +} + +/** + * Tx queue presetup checks. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Tx queue index. + * @param desc + * Number of descriptors to configure in queue. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (desc <= MLX5_TX_COMP_THRESH) { + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" + " %u must be higher than MLX5_TX_COMP_THRESH, using %u" + " instead of %u", + dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); + desc = MLX5_TX_COMP_THRESH + 1; + } + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Tx queue" + " %u to the next power of two (%d)", + dev->data->port_id, idx, desc); + } + DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors", + dev->data->port_id, idx, desc); + if (idx >= priv->txqs_n) { + DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->txqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; + } + if (!mlx5_txq_releasable(dev, idx)) { + rte_errno = EBUSY; + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + return -rte_errno; + } + mlx5_txq_release(dev, idx); + return 0; +} +/** + * DPDK callback to configure a TX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + int res; + + res = mlx5_tx_queue_pre_setup(dev, idx, desc); + if (res) + return res; + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); + if (!txq_ctrl) { + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + return -rte_errno; + } + DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", + dev->data->port_id, idx); + (*priv->txqs)[idx] = &txq_ctrl->txq; + return 0; +} + +/** + * DPDK callback to configure a TX hairpin queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param[in] hairpin_conf + * The hairpin binding configuration. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + int res; + + res = mlx5_tx_queue_pre_setup(dev, idx, desc); + if (res) + return res; + if (hairpin_conf->peer_count != 1 || + hairpin_conf->peers[0].port != dev->data->port_id || + hairpin_conf->peers[0].queue >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u " + " invalid hairpind configuration", dev->data->port_id, + idx); + rte_errno = EINVAL; + return -rte_errno; + } + txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf); + if (!txq_ctrl) { + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + return -rte_errno; + } + DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", + dev->data->port_id, idx); + (*priv->txqs)[idx] = &txq_ctrl->txq; + return 0; +} + +/** + * DPDK callback to release a TX queue. + * + * @param dpdk_txq + * Generic TX queue pointer. + */ +void +mlx5_tx_queue_release(void *dpdk_txq) +{ + struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; + struct mlx5_txq_ctrl *txq_ctrl; + struct mlx5_priv *priv; + unsigned int i; + + if (txq == NULL) + return; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + priv = txq_ctrl->priv; + for (i = 0; (i != priv->txqs_n); ++i) + if ((*priv->txqs)[i] == txq) { + DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", + PORT_ID(priv), txq->idx); + mlx5_txq_release(ETH_DEV(priv), i); + break; + } +} + +/** + * Configure the doorbell register non-cached attribute. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * @param page_size + * Systme page size + */ +static void +txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + off_t cmd; + + txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC; + txq_ctrl->txq.db_nc = 0; + /* Check the doorbell register mapping type. */ + cmd = txq_ctrl->uar_mmap_offset / page_size; + cmd >>= MLX5_UAR_MMAP_CMD_SHIFT; + cmd &= MLX5_UAR_MMAP_CMD_MASK; + if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD) + txq_ctrl->txq.db_nc = 1; +} + +/** + * Initialize Tx UAR registers for primary process. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + */ +static void +txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); + const size_t page_size = sysconf(_SC_PAGESIZE); +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif + + if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) + return; + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + MLX5_ASSERT(ppriv); + ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg; + txq_uar_ncattr_init(txq_ctrl, page_size); +#ifndef RTE_ARCH_64 + /* Assign an UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx]; +#endif +} + +/** + * Remap UAR register of a Tx queue for secondary process. + * + * Remapped address is stored at the table in the process private structure of + * the device, indexed by queue index. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * @param fd + * Verbs file descriptor to map UAR pages. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); + struct mlx5_txq_data *txq = &txq_ctrl->txq; + void *addr; + uintptr_t uar_va; + uintptr_t offset; + const size_t page_size = sysconf(_SC_PAGESIZE); + + if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) + return 0; + MLX5_ASSERT(ppriv); + /* + * As rdma-core, UARs are mapped in size of OS page + * size. Ref to libmlx5 function: mlx5_init_context() + */ + uar_va = (uintptr_t)txq_ctrl->bf_reg; + offset = uar_va & (page_size - 1); /* Offset in page. */ + addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, + txq_ctrl->uar_mmap_offset); + if (addr == MAP_FAILED) { + DRV_LOG(ERR, + "port %u mmap failed for BF reg of txq %u", + txq->port_id, txq->idx); + rte_errno = ENXIO; + return -rte_errno; + } + addr = RTE_PTR_ADD(addr, offset); + ppriv->uar_table[txq->idx] = addr; + txq_uar_ncattr_init(txq_ctrl, page_size); + return 0; +} + +/** + * Unmap UAR register of a Tx queue for secondary process. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + */ +static void +txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv)); + const size_t page_size = sysconf(_SC_PAGESIZE); + void *addr; + + if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) + return; + addr = ppriv->uar_table[txq_ctrl->txq.idx]; + munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size); +} + +/** + * Initialize Tx UAR registers for secondary process. + * + * @param dev + * Pointer to Ethernet device. + * @param fd + * Verbs file descriptor to map UAR pages. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq; + struct mlx5_txq_ctrl *txq_ctrl; + unsigned int i; + int ret; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY); + for (i = 0; i != priv->txqs_n; ++i) { + if (!(*priv->txqs)[i]) + continue; + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD) + continue; + MLX5_ASSERT(txq->idx == (uint16_t)i); + ret = txq_uar_init_secondary(txq_ctrl, fd); + if (ret) + goto error; + } + return 0; +error: + /* Rollback. */ + do { + if (!(*priv->txqs)[i]) + continue; + txq = (*priv->txqs)[i]; + txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + txq_uar_uninit_secondary(txq_ctrl); + } while (i--); + return -rte_errno; +} + +/** + * Create the Tx hairpin queue object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Tx queue array + * + * @return + * The hairpin DevX object initialised, NULL otherwise and rte_errno is set. + */ +static struct mlx5_txq_obj * +mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq_data, struct mlx5_txq_ctrl, txq); + struct mlx5_devx_create_sq_attr attr = { 0 }; + struct mlx5_txq_obj *tmpl = NULL; + int ret = 0; + uint32_t max_wq_data; + + MLX5_ASSERT(txq_data); + MLX5_ASSERT(!txq_ctrl->obj); + tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, + txq_ctrl->socket); + if (!tmpl) { + DRV_LOG(ERR, + "port %u Tx queue %u cannot allocate memory resources", + dev->data->port_id, txq_data->idx); + rte_errno = ENOMEM; + goto error; + } + tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN; + tmpl->txq_ctrl = txq_ctrl; + attr.hairpin = 1; + attr.tis_lst_sz = 1; + max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; + /* Jumbo frames > 9KB should be supported, and more packets. */ + if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { + if (priv->config.log_hp_size > max_wq_data) { + DRV_LOG(ERR, "total data size %u power of 2 is " + "too large for hairpin", + priv->config.log_hp_size); + rte_errno = ERANGE; + return NULL; + } + attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; + } else { + attr.wq_attr.log_hairpin_data_sz = + (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? + max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; + } + /* Set the packets number to the maximum value for performance. */ + attr.wq_attr.log_hairpin_num_packets = + attr.wq_attr.log_hairpin_data_sz - + MLX5_HAIRPIN_QUEUE_STRIDE; + attr.tis_num = priv->sh->tis->id; + tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); + if (!tmpl->sq) { + DRV_LOG(ERR, + "port %u tx hairpin queue %u can't create sq object", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next); + return tmpl; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (tmpl->tis) + mlx5_devx_cmd_destroy(tmpl->tis); + if (tmpl->sq) + mlx5_devx_cmd_destroy(tmpl->sq); + rte_errno = ret; /* Restore rte_errno. */ + return NULL; +} + +/** + * Create the Tx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Tx queue array. + * @param type + * Type of the Tx queue object to create. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_txq_obj * +mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, + enum mlx5_txq_obj_type type) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq_data, struct mlx5_txq_ctrl, txq); + struct mlx5_txq_obj tmpl; + struct mlx5_txq_obj *txq_obj = NULL; + union { + struct ibv_qp_init_attr_ex init; + struct ibv_cq_init_attr_ex cq; + struct ibv_qp_attr mod; + } attr; + unsigned int cqe_n; + struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET }; + struct mlx5dv_cq cq_info; + struct mlx5dv_obj obj; + const int desc = 1 << txq_data->elts_n; + int ret = 0; + + if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) + return mlx5_txq_obj_hairpin_new(dev, idx); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + /* If using DevX, need additional mask to read tisn value. */ + if (priv->config.devx && !priv->sh->tdn) + qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES; +#endif + MLX5_ASSERT(txq_data); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; + priv->verbs_alloc_ctx.obj = txq_ctrl; + if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { + DRV_LOG(ERR, + "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set", + dev->data->port_id); + rte_errno = EINVAL; + return NULL; + } + memset(&tmpl, 0, sizeof(struct mlx5_txq_obj)); + attr.cq = (struct ibv_cq_init_attr_ex){ + .comp_mask = 0, + }; + cqe_n = desc / MLX5_TX_COMP_THRESH + + 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; + tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0); + if (tmpl.cq == NULL) { + DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + attr.init = (struct ibv_qp_init_attr_ex){ + /* CQ to be associated with the send queue. */ + .send_cq = tmpl.cq, + /* CQ to be associated with the receive queue. */ + .recv_cq = tmpl.cq, + .cap = { + /* Max number of outstanding WRs. */ + .max_send_wr = + ((priv->sh->device_attr.orig_attr.max_qp_wr < + desc) ? + priv->sh->device_attr.orig_attr.max_qp_wr : + desc), + /* + * Max number of scatter/gather elements in a WR, + * must be 1 to prevent libmlx5 from trying to affect + * too much memory. TX gather is not impacted by the + * device_attr.max_sge limit and will still work + * properly. + */ + .max_send_sge = 1, + }, + .qp_type = IBV_QPT_RAW_PACKET, + /* + * Do *NOT* enable this, completions events are managed per + * Tx burst. + */ + .sq_sig_all = 0, + .pd = priv->sh->pd, + .comp_mask = IBV_QP_INIT_ATTR_PD, + }; + if (txq_data->inlen_send) + attr.init.cap.max_inline_data = txq_ctrl->max_inline_data; + if (txq_data->tso_en) { + attr.init.max_tso_header = txq_ctrl->max_tso_header; + attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; + } + tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init); + if (tmpl.qp == NULL) { + DRV_LOG(ERR, "port %u Tx queue %u QP creation failure", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + attr.mod = (struct ibv_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* IB device port number. */ + .port_num = (uint8_t)priv->ibv_port, + }; + ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, + (IBV_QP_STATE | IBV_QP_PORT)); + if (ret) { + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_INIT failed", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + attr.mod = (struct ibv_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTR failed", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + attr.mod.qp_state = IBV_QPS_RTS; + ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); + if (ret) { + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTS failed", + dev->data->port_id, idx); + rte_errno = errno; + goto error; + } + txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0, + txq_ctrl->socket); + if (!txq_obj) { + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } + obj.cq.in = tmpl.cq; + obj.cq.out = &cq_info; + obj.qp.in = tmpl.qp; + obj.qp.out = &qp; + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); + if (ret != 0) { + rte_errno = errno; + goto error; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; + goto error; + } + txq_data->cqe_n = log2above(cq_info.cqe_cnt); + txq_data->cqe_s = 1 << txq_data->cqe_n; + txq_data->cqe_m = txq_data->cqe_s - 1; + txq_data->qp_num_8s = tmpl.qp->qp_num << 8; + txq_data->wqes = qp.sq.buf; + txq_data->wqe_n = log2above(qp.sq.wqe_cnt); + txq_data->wqe_s = 1 << txq_data->wqe_n; + txq_data->wqe_m = txq_data->wqe_s - 1; + txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; + txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR]; + txq_data->cq_db = cq_info.dbrec; + txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; + txq_data->cq_ci = 0; + txq_data->cq_pi = 0; + txq_data->wqe_ci = 0; + txq_data->wqe_pi = 0; + txq_data->wqe_comp = 0; + txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; + txq_data->fcqs = rte_calloc_socket(__func__, + txq_data->cqe_s, + sizeof(*txq_data->fcqs), + RTE_CACHE_LINE_SIZE, + txq_ctrl->socket); + if (!txq_data->fcqs) { + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + /* + * If using DevX need to query and store TIS transport domain value. + * This is done once per port. + * Will use this value on Rx, when creating matching TIR. + */ + if (priv->config.devx && !priv->sh->tdn) { + ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn, + &priv->sh->tdn); + if (ret) { + DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS " + "transport domain", dev->data->port_id, idx); + rte_errno = EINVAL; + goto error; + } else { + DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d " + "transport domain %d", dev->data->port_id, + idx, qp.tisn, priv->sh->tdn); + } + } +#endif + txq_obj->qp = tmpl.qp; + txq_obj->cq = tmpl.cq; + rte_atomic32_inc(&txq_obj->refcnt); + txq_ctrl->bf_reg = qp.bf.reg; + if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { + txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64, + dev->data->port_id, txq_ctrl->uar_mmap_offset); + } else { + DRV_LOG(ERR, + "port %u failed to retrieve UAR info, invalid" + " libmlx5.so", + dev->data->port_id); + rte_errno = EINVAL; + goto error; + } + txq_uar_init(txq_ctrl); + LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next); + txq_obj->txq_ctrl = txq_ctrl; + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + return txq_obj; +error: + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (tmpl.cq) + claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); + if (tmpl.qp) + claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); + if (txq_data && txq_data->fcqs) + rte_free(txq_data->fcqs); + if (txq_obj) + rte_free(txq_obj); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ + return NULL; +} + +/** + * Get an Tx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * Queue index in DPDK Tx queue array. + * + * @return + * The Verbs object if it exists. + */ +struct mlx5_txq_obj * +mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *txq_ctrl; + + if (idx >= priv->txqs_n) + return NULL; + if (!(*priv->txqs)[idx]) + return NULL; + txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + if (txq_ctrl->obj) + rte_atomic32_inc(&txq_ctrl->obj->refcnt); + return txq_ctrl->obj; +} + +/** + * Release an Tx verbs queue object. + * + * @param txq_obj + * Verbs Tx queue object. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int +mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) +{ + MLX5_ASSERT(txq_obj); + if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) { + if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { + if (txq_obj->tis) + claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); + } else { + claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); + claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); + if (txq_obj->txq_ctrl->txq.fcqs) + rte_free(txq_obj->txq_ctrl->txq.fcqs); + } + LIST_REMOVE(txq_obj, next); + rte_free(txq_obj); + return 0; + } + return 1; +} + +/** + * Verify the Verbs Tx queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_txq_obj_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + struct mlx5_txq_obj *txq_obj; + + LIST_FOREACH(txq_obj, &priv->txqsobj, next) { + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", + dev->data->port_id, txq_obj->txq_ctrl->txq.idx); + ++ret; + } + return ret; +} + +/** + * Calculate the total number of WQEBB for Tx queue. + * + * Simplified version of calc_sq_size() in rdma-core. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * + * @return + * The number of WQEBB. + */ +static int +txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl) +{ + unsigned int wqe_size; + const unsigned int desc = 1 << txq_ctrl->txq.elts_n; + + wqe_size = MLX5_WQE_CSEG_SIZE + + MLX5_WQE_ESEG_SIZE + + MLX5_WSEG_SIZE - + MLX5_ESEG_MIN_INLINE_SIZE + + txq_ctrl->max_inline_data; + return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE; +} + +/** + * Calculate the maximal inline data size for Tx queue. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * + * @return + * The maximal inline data size. + */ +static unsigned int +txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl) +{ + const unsigned int desc = 1 << txq_ctrl->txq.elts_n; + struct mlx5_priv *priv = txq_ctrl->priv; + unsigned int wqe_size; + + wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc; + if (!wqe_size) + return 0; + /* + * This calculation is derived from tthe source of + * mlx5_calc_send_wqe() in rdma_core library. + */ + wqe_size = wqe_size * MLX5_WQE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WSEG_SIZE - + MLX5_WSEG_SIZE + + MLX5_DSEG_MIN_INLINE_SIZE; + return wqe_size; +} + +/** + * Set Tx queue parameters from device configuration. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + */ +static void +txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + struct mlx5_dev_config *config = &priv->config; + unsigned int inlen_send; /* Inline data for ordinary SEND.*/ + unsigned int inlen_empw; /* Inline data for enhanced MPW. */ + unsigned int inlen_mode; /* Minimal required Inline data. */ + unsigned int txqs_inline; /* Min Tx queues to enable inline. */ + uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads; + bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + bool vlan_inline; + unsigned int temp; + + if (config->txqs_inline == MLX5_ARG_UNSET) + txqs_inline = +#if defined(RTE_ARCH_ARM64) + (priv->pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ? + MLX5_INLINE_MAX_TXQS_BLUEFIELD : +#endif + MLX5_INLINE_MAX_TXQS; + else + txqs_inline = (unsigned int)config->txqs_inline; + inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ? + MLX5_SEND_DEF_INLINE_LEN : + (unsigned int)config->txq_inline_max; + inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ? + MLX5_EMPW_DEF_INLINE_LEN : + (unsigned int)config->txq_inline_mpw; + inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ? + 0 : (unsigned int)config->txq_inline_min; + if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW) + inlen_empw = 0; + /* + * If there is requested minimal amount of data to inline + * we MUST enable inlining. This is a case for ConnectX-4 + * which usually requires L2 inlined for correct operating + * and ConnectX-4 Lx which requires L2-L4 inlined to + * support E-Switch Flows. + */ + if (inlen_mode) { + if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) { + /* + * Optimize minimal inlining for single + * segment packets to fill one WQEBB + * without gaps. + */ + temp = MLX5_ESEG_MIN_INLINE_SIZE; + } else { + temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE; + temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) + + MLX5_ESEG_MIN_INLINE_SIZE; + temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN); + } + if (temp != inlen_mode) { + DRV_LOG(INFO, + "port %u minimal required inline setting" + " aligned from %u to %u", + PORT_ID(priv), inlen_mode, temp); + inlen_mode = temp; + } + } + /* + * If port is configured to support VLAN insertion and device + * does not support this feature by HW (for NICs before ConnectX-5 + * or in case of wqe_vlan_insert flag is not set) we must enable + * data inline on all queues because it is supported by single + * tx_burst routine. + */ + txq_ctrl->txq.vlan_en = config->hw_vlan_insert; + vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) && + !config->hw_vlan_insert; + /* + * If there are few Tx queues it is prioritized + * to save CPU cycles and disable data inlining at all. + */ + if (inlen_send && priv->txqs_n >= txqs_inline) { + /* + * The data sent with ordinal MLX5_OPCODE_SEND + * may be inlined in Ethernet Segment, align the + * length accordingly to fit entire WQEBBs. + */ + temp = RTE_MAX(inlen_send, + MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE); + temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE; + temp = RTE_ALIGN(temp, MLX5_WQE_SIZE); + temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE; + temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE * 2); + temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN); + temp = RTE_MAX(temp, inlen_mode); + if (temp != inlen_send) { + DRV_LOG(INFO, + "port %u ordinary send inline setting" + " aligned from %u to %u", + PORT_ID(priv), inlen_send, temp); + inlen_send = temp; + } + /* + * Not aligned to cache lines, but to WQEs. + * First bytes of data (initial alignment) + * is going to be copied explicitly at the + * beginning of inlining buffer in Ethernet + * Segment. + */ + MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX + + MLX5_ESEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE * 2); + } else if (inlen_mode) { + /* + * If minimal inlining is requested we must + * enable inlining in general, despite the + * number of configured queues. Ignore the + * txq_inline_max devarg, this is not + * full-featured inline. + */ + inlen_send = inlen_mode; + inlen_empw = 0; + } else if (vlan_inline) { + /* + * Hardware does not report offload for + * VLAN insertion, we must enable data inline + * to implement feature by software. + */ + inlen_send = MLX5_ESEG_MIN_INLINE_SIZE; + inlen_empw = 0; + } else { + inlen_send = 0; + inlen_empw = 0; + } + txq_ctrl->txq.inlen_send = inlen_send; + txq_ctrl->txq.inlen_mode = inlen_mode; + txq_ctrl->txq.inlen_empw = 0; + if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) { + /* + * The data sent with MLX5_OPCODE_ENHANCED_MPSW + * may be inlined in Data Segment, align the + * length accordingly to fit entire WQEBBs. + */ + temp = RTE_MAX(inlen_empw, + MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE); + temp -= MLX5_DSEG_MIN_INLINE_SIZE; + temp = RTE_ALIGN(temp, MLX5_WQE_SIZE); + temp += MLX5_DSEG_MIN_INLINE_SIZE; + temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX + + MLX5_DSEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE); + temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN); + if (temp != inlen_empw) { + DRV_LOG(INFO, + "port %u enhanced empw inline setting" + " aligned from %u to %u", + PORT_ID(priv), inlen_empw, temp); + inlen_empw = temp; + } + MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE); + MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX + + MLX5_DSEG_MIN_INLINE_SIZE - + MLX5_WQE_CSEG_SIZE - + MLX5_WQE_ESEG_SIZE - + MLX5_WQE_DSEG_SIZE); + txq_ctrl->txq.inlen_empw = inlen_empw; + } + txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw); + if (tso) { + txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER; + txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data, + MLX5_MAX_TSO_HEADER); + txq_ctrl->txq.tso_en = 1; + } + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & + txq_ctrl->txq.offloads) && config->swp; +} + +/** + * Adjust Tx queue data inline parameters for large queue sizes. + * The data inline feature requires multiple WQEs to fit the packets, + * and if the large amount of Tx descriptors is requested by application + * the total WQE amount may exceed the hardware capabilities. If the + * default inline setting are used we can try to adjust these ones and + * meet the hardware requirements and not exceed the queue size. + * + * @param txq_ctrl + * Pointer to Tx queue control structure. + * + * @return + * Zero on success, otherwise the parameters can not be adjusted. + */ +static int +txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl) +{ + struct mlx5_priv *priv = txq_ctrl->priv; + struct mlx5_dev_config *config = &priv->config; + unsigned int max_inline; + + max_inline = txq_calc_inline_max(txq_ctrl); + if (!txq_ctrl->txq.inlen_send) { + /* + * Inline data feature is not engaged at all. + * There is nothing to adjust. + */ + return 0; + } + if (txq_ctrl->max_inline_data <= max_inline) { + /* + * The requested inline data length does not + * exceed queue capabilities. + */ + return 0; + } + if (txq_ctrl->txq.inlen_mode > max_inline) { + DRV_LOG(ERR, + "minimal data inline requirements (%u) are not" + " satisfied (%u) on port %u, try the smaller" + " Tx queue size (%d)", + txq_ctrl->txq.inlen_mode, max_inline, + priv->dev_data->port_id, + priv->sh->device_attr.orig_attr.max_qp_wr); + goto error; + } + if (txq_ctrl->txq.inlen_send > max_inline && + config->txq_inline_max != MLX5_ARG_UNSET && + config->txq_inline_max > (int)max_inline) { + DRV_LOG(ERR, + "txq_inline_max requirements (%u) are not" + " satisfied (%u) on port %u, try the smaller" + " Tx queue size (%d)", + txq_ctrl->txq.inlen_send, max_inline, + priv->dev_data->port_id, + priv->sh->device_attr.orig_attr.max_qp_wr); + goto error; + } + if (txq_ctrl->txq.inlen_empw > max_inline && + config->txq_inline_mpw != MLX5_ARG_UNSET && + config->txq_inline_mpw > (int)max_inline) { + DRV_LOG(ERR, + "txq_inline_mpw requirements (%u) are not" + " satisfied (%u) on port %u, try the smaller" + " Tx queue size (%d)", + txq_ctrl->txq.inlen_empw, max_inline, + priv->dev_data->port_id, + priv->sh->device_attr.orig_attr.max_qp_wr); + goto error; + } + if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) { + DRV_LOG(ERR, + "tso header inline requirements (%u) are not" + " satisfied (%u) on port %u, try the smaller" + " Tx queue size (%d)", + MLX5_MAX_TSO_HEADER, max_inline, + priv->dev_data->port_id, + priv->sh->device_attr.orig_attr.max_qp_wr); + goto error; + } + if (txq_ctrl->txq.inlen_send > max_inline) { + DRV_LOG(WARNING, + "adjust txq_inline_max (%u->%u)" + " due to large Tx queue on port %u", + txq_ctrl->txq.inlen_send, max_inline, + priv->dev_data->port_id); + txq_ctrl->txq.inlen_send = max_inline; + } + if (txq_ctrl->txq.inlen_empw > max_inline) { + DRV_LOG(WARNING, + "adjust txq_inline_mpw (%u->%u)" + "due to large Tx queue on port %u", + txq_ctrl->txq.inlen_empw, max_inline, + priv->dev_data->port_id); + txq_ctrl->txq.inlen_empw = max_inline; + } + txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send, + txq_ctrl->txq.inlen_empw); + MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send); + MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw || + !txq_ctrl->txq.inlen_empw); + return 0; +error: + rte_errno = ENOMEM; + return -ENOMEM; +} + +/** + * Create a DPDK Tx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * A DPDK queue object on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_txq_ctrl * +mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *tmpl; + + tmpl = rte_calloc_socket("TXQ", 1, + sizeof(*tmpl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen; + MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH); + tmpl->txq.offloads = conf->offloads | + dev->data->dev_conf.txmode.offloads; + tmpl->priv = priv; + tmpl->socket = socket; + tmpl->txq.elts_n = log2above(desc); + tmpl->txq.elts_s = desc; + tmpl->txq.elts_m = desc - 1; + tmpl->txq.port_id = dev->data->port_id; + tmpl->txq.idx = idx; + txq_set_params(tmpl); + if (txq_adjust_params(tmpl)) + goto error; + if (txq_calc_wqebb_cnt(tmpl) > + priv->sh->device_attr.orig_attr.max_qp_wr) { + DRV_LOG(ERR, + "port %u Tx WQEBB count (%d) exceeds the limit (%d)," + " try smaller queue size", + dev->data->port_id, txq_calc_wqebb_cnt(tmpl), + priv->sh->device_attr.orig_attr.max_qp_wr); + rte_errno = ENOMEM; + goto error; + } + rte_atomic32_inc(&tmpl->refcnt); + tmpl->type = MLX5_TXQ_TYPE_STANDARD; + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; +error: + rte_free(tmpl); + return NULL; +} + +/** + * Create a DPDK Tx hairpin queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param hairpin_conf + * The hairpin configuration. + * + * @return + * A DPDK queue object on success, NULL otherwise and rte_errno is set. + */ +struct mlx5_txq_ctrl * +mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + const struct rte_eth_hairpin_conf *hairpin_conf) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *tmpl; + + tmpl = rte_calloc_socket("TXQ", 1, + sizeof(*tmpl), 0, SOCKET_ID_ANY); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + tmpl->priv = priv; + tmpl->socket = SOCKET_ID_ANY; + tmpl->txq.elts_n = log2above(desc); + tmpl->txq.port_id = dev->data->port_id; + tmpl->txq.idx = idx; + tmpl->hairpin_conf = *hairpin_conf; + tmpl->type = MLX5_TXQ_TYPE_HAIRPIN; + rte_atomic32_inc(&tmpl->refcnt); + LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); + return tmpl; +} + +/** + * Get a Tx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * TX queue index. + * + * @return + * A pointer to the queue if it exists. + */ +struct mlx5_txq_ctrl * +mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *ctrl = NULL; + + if ((*priv->txqs)[idx]) { + ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, + txq); + mlx5_txq_obj_get(dev, idx); + rte_atomic32_inc(&ctrl->refcnt); + } + return ctrl; +} + +/** + * Release a Tx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * TX queue index. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int +mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *txq; + + if (!(*priv->txqs)[idx]) + return 0; + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + if (txq->obj && !mlx5_txq_obj_release(txq->obj)) + txq->obj = NULL; + if (rte_atomic32_dec_and_test(&txq->refcnt)) { + txq_free_elts(txq); + mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); + LIST_REMOVE(txq, next); + rte_free(txq); + (*priv->txqs)[idx] = NULL; + return 0; + } + return 1; +} + +/** + * Verify if the queue can be released. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * TX queue index. + * + * @return + * 1 if the queue can be released. + */ +int +mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *txq; + + if (!(*priv->txqs)[idx]) + return -1; + txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); + return (rte_atomic32_read(&txq->refcnt) == 1); +} + +/** + * Verify the Tx Queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_txq_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_ctrl *txq_ctrl; + int ret = 0; + + LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) { + DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", + dev->data->port_id, txq_ctrl->txq.idx); + ++ret; + } + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.c new file mode 100644 index 000000000..d29fbcbc8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.c @@ -0,0 +1,484 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2019 Mellanox Technologies, Ltd + */ + +#include +#include + +#include "mlx5_utils.h" + +struct mlx5_hlist * +mlx5_hlist_create(const char *name, uint32_t size) +{ + struct mlx5_hlist *h; + uint32_t act_size; + uint32_t alloc_size; + + if (!size) + return NULL; + /* Align to the next power of 2, 32bits integer is enough now. */ + if (!rte_is_power_of_2(size)) { + act_size = rte_align32pow2(size); + DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " + "be aligned to 0x%" PRIX32 ".\n", size, act_size); + } else { + act_size = size; + } + alloc_size = sizeof(struct mlx5_hlist) + + sizeof(struct mlx5_hlist_head) * act_size; + /* Using zmalloc, then no need to initialize the heads. */ + h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE); + if (!h) { + DRV_LOG(ERR, "No memory for hash list %s creation\n", + name ? name : "None"); + return NULL; + } + if (name) + snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); + h->table_sz = act_size; + h->mask = act_size - 1; + DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.\n", + h->name, act_size); + return h; +} + +struct mlx5_hlist_entry * +mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key) +{ + uint32_t idx; + struct mlx5_hlist_head *first; + struct mlx5_hlist_entry *node; + + MLX5_ASSERT(h); + idx = rte_hash_crc_8byte(key, 0) & h->mask; + first = &h->heads[idx]; + LIST_FOREACH(node, first, next) { + if (node->key == key) + return node; + } + return NULL; +} + +int +mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) +{ + uint32_t idx; + struct mlx5_hlist_head *first; + struct mlx5_hlist_entry *node; + + MLX5_ASSERT(h && entry); + idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; + first = &h->heads[idx]; + /* No need to reuse the lookup function. */ + LIST_FOREACH(node, first, next) { + if (node->key == entry->key) + return -EEXIST; + } + LIST_INSERT_HEAD(first, entry, next); + return 0; +} + +void +mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, + struct mlx5_hlist_entry *entry) +{ + MLX5_ASSERT(entry && entry->next.le_prev); + LIST_REMOVE(entry, next); + /* Set to NULL to get rid of removing action for more than once. */ + entry->next.le_prev = NULL; +} + +void +mlx5_hlist_destroy(struct mlx5_hlist *h, + mlx5_hlist_destroy_callback_fn cb, void *ctx) +{ + uint32_t idx; + struct mlx5_hlist_entry *entry; + + MLX5_ASSERT(h); + for (idx = 0; idx < h->table_sz; ++idx) { + /* no LIST_FOREACH_SAFE, using while instead */ + while (!LIST_EMPTY(&h->heads[idx])) { + entry = LIST_FIRST(&h->heads[idx]); + LIST_REMOVE(entry, next); + /* + * The owner of whole element which contains data entry + * is the user, so it's the user's duty to do the clean + * up and the free work because someone may not put the + * hlist entry at the beginning(suggested to locate at + * the beginning). Or else the default free function + * will be used. + */ + if (cb) + cb(entry, ctx); + else + rte_free(entry); + } + } + rte_free(h); +} + +static inline void +mlx5_ipool_lock(struct mlx5_indexed_pool *pool) +{ + if (pool->cfg.need_lock) + rte_spinlock_lock(&pool->lock); +} + +static inline void +mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) +{ + if (pool->cfg.need_lock) + rte_spinlock_unlock(&pool->lock); +} + +static inline uint32_t +mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + uint32_t trunk_idx = 0; + uint32_t i; + + if (!cfg->grow_trunk) + return entry_idx / cfg->trunk_size; + if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { + trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / + (cfg->trunk_size << (cfg->grow_shift * + cfg->grow_trunk)) + cfg->grow_trunk; + } else { + for (i = 0; i < cfg->grow_trunk; i++) { + if (entry_idx < pool->grow_tbl[i]) + break; + } + trunk_idx = i; + } + return trunk_idx; +} + +static inline uint32_t +mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + + return cfg->trunk_size << (cfg->grow_shift * + (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); +} + +static inline uint32_t +mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) +{ + struct mlx5_indexed_pool_config *cfg = &pool->cfg; + uint32_t offset = 0; + + if (!trunk_idx) + return 0; + if (!cfg->grow_trunk) + return cfg->trunk_size * trunk_idx; + if (trunk_idx < cfg->grow_trunk) + offset = pool->grow_tbl[trunk_idx - 1]; + else + offset = pool->grow_tbl[cfg->grow_trunk - 1] + + (cfg->trunk_size << (cfg->grow_shift * + cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); + return offset; +} + +struct mlx5_indexed_pool * +mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) +{ + struct mlx5_indexed_pool *pool; + uint32_t i; + + if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) || + (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || + ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) + return NULL; + pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk * + sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE); + if (!pool) + return NULL; + pool->cfg = *cfg; + if (!pool->cfg.trunk_size) + pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; + if (!cfg->malloc && !cfg->free) { + pool->cfg.malloc = rte_malloc_socket; + pool->cfg.free = rte_free; + } + pool->free_list = TRUNK_INVALID; + if (pool->cfg.need_lock) + rte_spinlock_init(&pool->lock); + /* + * Initialize the dynamic grow trunk size lookup table to have a quick + * lookup for the trunk entry index offset. + */ + for (i = 0; i < cfg->grow_trunk; i++) { + pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); + if (i > 0) + pool->grow_tbl[i] += pool->grow_tbl[i - 1]; + } + return pool; +} + +static int +mlx5_ipool_grow(struct mlx5_indexed_pool *pool) +{ + struct mlx5_indexed_trunk *trunk; + struct mlx5_indexed_trunk **trunk_tmp; + struct mlx5_indexed_trunk **p; + size_t trunk_size = 0; + size_t data_size; + size_t bmp_size; + uint32_t idx; + + if (pool->n_trunk_valid == TRUNK_MAX_IDX) + return -ENOMEM; + if (pool->n_trunk_valid == pool->n_trunk) { + /* No free trunk flags, expand trunk list. */ + int n_grow = pool->n_trunk_valid ? pool->n_trunk : + RTE_CACHE_LINE_SIZE / sizeof(void *); + + p = pool->cfg.malloc(pool->cfg.type, + (pool->n_trunk_valid + n_grow) * + sizeof(struct mlx5_indexed_trunk *), + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!p) + return -ENOMEM; + if (pool->trunks) + memcpy(p, pool->trunks, pool->n_trunk_valid * + sizeof(struct mlx5_indexed_trunk *)); + memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, + n_grow * sizeof(void *)); + trunk_tmp = pool->trunks; + pool->trunks = p; + if (trunk_tmp) + pool->cfg.free(trunk_tmp); + pool->n_trunk += n_grow; + } + if (!pool->cfg.release_mem_en) { + idx = pool->n_trunk_valid; + } else { + /* Find the first available slot in trunk list */ + for (idx = 0; idx < pool->n_trunk; idx++) + if (pool->trunks[idx] == NULL) + break; + } + trunk_size += sizeof(*trunk); + data_size = mlx5_trunk_size_get(pool, idx); + bmp_size = rte_bitmap_get_memory_footprint(data_size); + /* rte_bitmap requires memory cacheline aligned. */ + trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); + trunk_size += bmp_size; + trunk = pool->cfg.malloc(pool->cfg.type, trunk_size, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!trunk) + return -ENOMEM; + pool->trunks[idx] = trunk; + trunk->idx = idx; + trunk->free = data_size; + trunk->prev = TRUNK_INVALID; + trunk->next = TRUNK_INVALID; + MLX5_ASSERT(pool->free_list == TRUNK_INVALID); + pool->free_list = idx; + /* Mark all entries as available. */ + trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data + [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], + bmp_size); + MLX5_ASSERT(trunk->bmp); + pool->n_trunk_valid++; +#ifdef POOL_DEBUG + pool->trunk_new++; + pool->trunk_avail++; +#endif + return 0; +} + +void * +mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) +{ + struct mlx5_indexed_trunk *trunk; + uint64_t slab = 0; + uint32_t iidx = 0; + void *p; + + mlx5_ipool_lock(pool); + if (pool->free_list == TRUNK_INVALID) { + /* If no available trunks, grow new. */ + if (mlx5_ipool_grow(pool)) { + mlx5_ipool_unlock(pool); + return NULL; + } + } + MLX5_ASSERT(pool->free_list != TRUNK_INVALID); + trunk = pool->trunks[pool->free_list]; + MLX5_ASSERT(trunk->free); + if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { + mlx5_ipool_unlock(pool); + return NULL; + } + MLX5_ASSERT(slab); + iidx += __builtin_ctzll(slab); + MLX5_ASSERT(iidx != UINT32_MAX); + MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); + rte_bitmap_clear(trunk->bmp, iidx); + p = &trunk->data[iidx * pool->cfg.size]; + iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); + iidx += 1; /* non-zero index. */ + trunk->free--; +#ifdef POOL_DEBUG + pool->n_entry++; +#endif + if (!trunk->free) { + /* Full trunk will be removed from free list in imalloc. */ + MLX5_ASSERT(pool->free_list == trunk->idx); + pool->free_list = trunk->next; + if (trunk->next != TRUNK_INVALID) + pool->trunks[trunk->next]->prev = TRUNK_INVALID; + trunk->prev = TRUNK_INVALID; + trunk->next = TRUNK_INVALID; +#ifdef POOL_DEBUG + pool->trunk_empty++; + pool->trunk_avail--; +#endif + } + *idx = iidx; + mlx5_ipool_unlock(pool); + return p; +} + +void * +mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) +{ + void *entry = mlx5_ipool_malloc(pool, idx); + + if (entry) + memset(entry, 0, pool->cfg.size); + return entry; +} + +void +mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) +{ + struct mlx5_indexed_trunk *trunk; + uint32_t trunk_idx; + uint32_t entry_idx; + + if (!idx) + return; + idx -= 1; + mlx5_ipool_lock(pool); + trunk_idx = mlx5_trunk_idx_get(pool, idx); + if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || + (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) + goto out; + trunk = pool->trunks[trunk_idx]; + if (!trunk) + goto out; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); + if (trunk_idx != trunk->idx || + rte_bitmap_get(trunk->bmp, entry_idx)) + goto out; + rte_bitmap_set(trunk->bmp, entry_idx); + trunk->free++; + if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get + (pool, trunk->idx)) { + if (pool->free_list == trunk->idx) + pool->free_list = trunk->next; + if (trunk->next != TRUNK_INVALID) + pool->trunks[trunk->next]->prev = trunk->prev; + if (trunk->prev != TRUNK_INVALID) + pool->trunks[trunk->prev]->next = trunk->next; + pool->cfg.free(trunk); + pool->trunks[trunk_idx] = NULL; + pool->n_trunk_valid--; +#ifdef POOL_DEBUG + pool->trunk_avail--; + pool->trunk_free++; +#endif + if (pool->n_trunk_valid == 0) { + pool->cfg.free(pool->trunks); + pool->trunks = NULL; + pool->n_trunk = 0; + } + } else if (trunk->free == 1) { + /* Put into free trunk list head. */ + MLX5_ASSERT(pool->free_list != trunk->idx); + trunk->next = pool->free_list; + trunk->prev = TRUNK_INVALID; + if (pool->free_list != TRUNK_INVALID) + pool->trunks[pool->free_list]->prev = trunk->idx; + pool->free_list = trunk->idx; +#ifdef POOL_DEBUG + pool->trunk_empty--; + pool->trunk_avail++; +#endif + } +#ifdef POOL_DEBUG + pool->n_entry--; +#endif +out: + mlx5_ipool_unlock(pool); +} + +void * +mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) +{ + struct mlx5_indexed_trunk *trunk; + void *p = NULL; + uint32_t trunk_idx; + uint32_t entry_idx; + + if (!idx) + return NULL; + idx -= 1; + mlx5_ipool_lock(pool); + trunk_idx = mlx5_trunk_idx_get(pool, idx); + if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || + (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) + goto out; + trunk = pool->trunks[trunk_idx]; + if (!trunk) + goto out; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); + if (trunk_idx != trunk->idx || + rte_bitmap_get(trunk->bmp, entry_idx)) + goto out; + p = &trunk->data[entry_idx * pool->cfg.size]; +out: + mlx5_ipool_unlock(pool); + return p; +} + +int +mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) +{ + struct mlx5_indexed_trunk **trunks; + uint32_t i; + + MLX5_ASSERT(pool); + mlx5_ipool_lock(pool); + trunks = pool->trunks; + for (i = 0; i < pool->n_trunk; i++) { + if (trunks[i]) + pool->cfg.free(trunks[i]); + } + if (!pool->trunks) + pool->cfg.free(pool->trunks); + mlx5_ipool_unlock(pool); + rte_free(pool); + return 0; +} + +void +mlx5_ipool_dump(struct mlx5_indexed_pool *pool) +{ + printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " + "total: %d\n", + pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, + pool->cfg.trunk_size, pool->n_trunk_valid); +#ifdef POOL_DEBUG + printf("Pool %s entry %u, trunk alloc %u, empty: %u, " + "available %u free %u\n", + pool->cfg.type, pool->n_entry, pool->trunk_new, + pool->trunk_empty, pool->trunk_avail, pool->trunk_free); +#endif +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h new file mode 100644 index 000000000..f4ec15170 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_utils.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_UTILS_H_ +#define RTE_PMD_MLX5_UTILS_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "mlx5_defs.h" + + +/* Convert a bit number to the corresponding 64-bit mask */ +#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) + +/* Save and restore errno around argument evaluation. */ +#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0])) + +extern int mlx5_logtype; + +/* Generic printf()-like logging macro with automatic line feed. */ +#define DRV_LOG(level, ...) \ + PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \ + __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ + PMD_DRV_LOG_CPAREN) + +#define INFO(...) DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__) + +/* Convenience macros for accessing mbuf fields. */ +#define NEXT(m) ((m)->next) +#define DATA_LEN(m) ((m)->data_len) +#define PKT_LEN(m) ((m)->pkt_len) +#define DATA_OFF(m) ((m)->data_off) +#define SET_DATA_OFF(m, o) ((m)->data_off = (o)) +#define NB_SEGS(m) ((m)->nb_segs) +#define PORT(m) ((m)->port) + +/* Transpose flags. Useful to convert IBV to DPDK flags. */ +#define TRANSPOSE(val, from, to) \ + (((from) >= (to)) ? \ + (((val) & (from)) / ((from) / (to))) : \ + (((val) & (from)) * ((to) / (from)))) + +/* + * The indexed memory entry index is made up of trunk index and offset of + * the entry in the trunk. Since the entry index is 32 bits, in case user + * prefers to have small trunks, user can change the macro below to a big + * number which helps the pool contains more trunks with lots of entries + * allocated. + */ +#define TRUNK_IDX_BITS 16 +#define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1) +#define TRUNK_INVALID TRUNK_MAX_IDX +#define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS)) +#ifdef RTE_LIBRTE_MLX5_DEBUG +#define POOL_DEBUG 1 +#endif + +struct mlx5_indexed_pool_config { + uint32_t size; /* Pool entry size. */ + uint32_t trunk_size:22; + /* + * Trunk entry number. Must be power of 2. It can be increased + * if trunk_grow enable. The trunk entry number increases with + * left shift grow_shift. Trunks with index are after grow_trunk + * will keep the entry number same with the last grow trunk. + */ + uint32_t grow_trunk:4; + /* + * Trunks with entry number increase in the pool. Set it to 0 + * to make the pool works as trunk entry fixed pool. It works + * only if grow_shift is not 0. + */ + uint32_t grow_shift:4; + /* + * Trunk entry number increase shift value, stop after grow_trunk. + * It works only if grow_trunk is not 0. + */ + uint32_t need_lock:1; + /* Lock is needed for multiple thread usage. */ + uint32_t release_mem_en:1; /* Rlease trunk when it is free. */ + const char *type; /* Memory allocate type name. */ + void *(*malloc)(const char *type, size_t size, unsigned int align, + int socket); + /* User defined memory allocator. */ + void (*free)(void *addr); /* User defined memory release. */ +}; + +struct mlx5_indexed_trunk { + uint32_t idx; /* Trunk id. */ + uint32_t prev; /* Previous free trunk in free list. */ + uint32_t next; /* Next free trunk in free list. */ + uint32_t free; /* Free entries available */ + struct rte_bitmap *bmp; + uint8_t data[] __rte_cache_aligned; /* Entry data start. */ +}; + +struct mlx5_indexed_pool { + struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */ + rte_spinlock_t lock; /* Pool lock for multiple thread usage. */ + uint32_t n_trunk_valid; /* Trunks allocated. */ + uint32_t n_trunk; /* Trunk pointer array size. */ + /* Dim of trunk pointer array. */ + struct mlx5_indexed_trunk **trunks; + uint32_t free_list; /* Index to first free trunk. */ +#ifdef POOL_DEBUG + uint32_t n_entry; + uint32_t trunk_new; + uint32_t trunk_avail; + uint32_t trunk_empty; + uint32_t trunk_free; +#endif + uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */ +}; + +/** + * Return logarithm of the nearest power of two above input value. + * + * @param v + * Input value. + * + * @return + * Logarithm of the nearest power of two above input value. + */ +static inline unsigned int +log2above(unsigned int v) +{ + unsigned int l; + unsigned int r; + + for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) + r |= (v & 1); + return l + r; +} + +/** Maximum size of string for naming the hlist table. */ +#define MLX5_HLIST_NAMESIZE 32 + +/** + * Structure of the entry in the hash list, user should define its own struct + * that contains this in order to store the data. The 'key' is 64-bits right + * now and its user's responsibility to guarantee there is no collision. + */ +struct mlx5_hlist_entry { + LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */ + uint64_t key; /* user defined 'key', could be the hash signature. */ +}; + +/** Structure for hash head. */ +LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry); + +/** Type of function that is used to handle the data before freeing. */ +typedef void (*mlx5_hlist_destroy_callback_fn)(void *p, void *ctx); + +/** hash list table structure */ +struct mlx5_hlist { + char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */ + /**< number of heads, need to be power of 2. */ + uint32_t table_sz; + /**< mask to get the index of the list heads. */ + uint32_t mask; + struct mlx5_hlist_head heads[]; /**< list head arrays. */ +}; + +/** + * Create a hash list table, the user can specify the list heads array size + * of the table, now the size should be a power of 2 in order to get better + * distribution for the entries. Each entry is a part of the whole data element + * and the caller should be responsible for the data element's allocation and + * cleanup / free. Key of each entry will be calculated with CRC in order to + * generate a little fairer distribution. + * + * @param name + * Name of the hash list(optional). + * @param size + * Heads array size of the hash list. + * + * @return + * Pointer of the hash list table created, NULL on failure. + */ +struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size); + +/** + * Search an entry matching the key. + * + * @param h + * Pointer to the hast list table. + * @param key + * Key for the searching entry. + * + * @return + * Pointer of the hlist entry if found, NULL otherwise. + */ +struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key); + +/** + * Insert an entry to the hash list table, the entry is only part of whole data + * element and a 64B key is used for matching. User should construct the key or + * give a calculated hash signature and guarantee there is no collision. + * + * @param h + * Pointer to the hast list table. + * @param entry + * Entry to be inserted into the hash list table. + * + * @return + * - zero for success. + * - -EEXIST if the entry is already inserted. + */ +int mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry); + +/** + * Remove an entry from the hash list table. User should guarantee the validity + * of the entry. + * + * @param h + * Pointer to the hast list table. (not used) + * @param entry + * Entry to be removed from the hash list table. + */ +void mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, + struct mlx5_hlist_entry *entry); + +/** + * Destroy the hash list table, all the entries already inserted into the lists + * will be handled by the callback function provided by the user (including + * free if needed) before the table is freed. + * + * @param h + * Pointer to the hast list table. + * @param cb + * Callback function for each inserted entry when destroying the hash list. + * @param ctx + * Common context parameter used by callback function for each entry. + */ +void mlx5_hlist_destroy(struct mlx5_hlist *h, + mlx5_hlist_destroy_callback_fn cb, void *ctx); + +/** + * This function allocates non-initialized memory entry from pool. + * In NUMA systems, the memory entry allocated resides on the same + * NUMA socket as the core that calls this function. + * + * Memory entry is allocated from memory trunk, no alignment. + * + * @param pool + * Pointer to indexed memory entry pool. + * No initialization required. + * @param[out] idx + * Pointer to memory to save allocated index. + * Memory index always positive value. + * @return + * - Pointer to the allocated memory entry. + * - NULL on error. Not enough memory, or invalid arguments. + */ +void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx); + +/** + * This function allocates zero initialized memory entry from pool. + * In NUMA systems, the memory entry allocated resides on the same + * NUMA socket as the core that calls this function. + * + * Memory entry is allocated from memory trunk, no alignment. + * + * @param pool + * Pointer to indexed memory pool. + * No initialization required. + * @param[out] idx + * Pointer to memory to save allocated index. + * Memory index always positive value. + * @return + * - Pointer to the allocated memory entry . + * - NULL on error. Not enough memory, or invalid arguments. + */ +void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx); + +/** + * This function frees indexed memory entry to pool. + * Caller has to make sure that the index is allocated from same pool. + * + * @param pool + * Pointer to indexed memory pool. + * @param idx + * Allocated memory entry index. + */ +void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx); + +/** + * This function returns pointer of indexed memory entry from index. + * Caller has to make sure that the index is valid, and allocated + * from same pool. + * + * @param pool + * Pointer to indexed memory pool. + * @param idx + * Allocated memory index. + * @return + * - Pointer to indexed memory entry. + */ +void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx); + +/** + * This function creates indexed memory pool. + * Caller has to configure the configuration accordingly. + * + * @param pool + * Pointer to indexed memory pool. + * @param cfg + * Allocated memory index. + */ +struct mlx5_indexed_pool * +mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg); + +/** + * This function releases all resources of pool. + * Caller has to make sure that all indexes and memories allocated + * from this pool not referenced anymore. + * + * @param pool + * Pointer to indexed memory pool. + * @return + * - non-zero value on error. + * - 0 on success. + */ +int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool); + +/** + * This function dumps debug info of pool. + * + * @param pool + * Pointer to indexed memory pool. + */ +void mlx5_ipool_dump(struct mlx5_indexed_pool *pool); + +/* + * Macros for linked list based on indexed memory. + * Example data structure: + * struct Foo { + * ILIST_ENTRY(uint16_t) next; + * ... + * } + * + */ +#define ILIST_ENTRY(type) \ +struct { \ + type prev; /* Index of previous element. */ \ + type next; /* Index of next element. */ \ +} + +#define ILIST_INSERT(pool, head, idx, elem, field) \ + do { \ + typeof(elem) peer; \ + MLX5_ASSERT((elem) && (idx)); \ + (elem)->field.next = *(head); \ + (elem)->field.prev = 0; \ + if (*(head)) { \ + (peer) = mlx5_ipool_get(pool, *(head)); \ + if (peer) \ + (peer)->field.prev = (idx); \ + } \ + *(head) = (idx); \ + } while (0) + +#define ILIST_REMOVE(pool, head, idx, elem, field) \ + do { \ + typeof(elem) peer; \ + MLX5_ASSERT(elem); \ + MLX5_ASSERT(head); \ + if ((elem)->field.prev) { \ + (peer) = mlx5_ipool_get \ + (pool, (elem)->field.prev); \ + if (peer) \ + (peer)->field.next = (elem)->field.next;\ + } \ + if ((elem)->field.next) { \ + (peer) = mlx5_ipool_get \ + (pool, (elem)->field.next); \ + if (peer) \ + (peer)->field.prev = (elem)->field.prev;\ + } \ + if (*(head) == (idx)) \ + *(head) = (elem)->field.next; \ + } while (0) + +#define ILIST_FOREACH(pool, head, idx, elem, field) \ + for ((idx) = (head), (elem) = \ + (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ + idx = (elem)->field.next, (elem) = \ + (idx) ? mlx5_ipool_get(pool, idx) : NULL) + +/* Single index list. */ +#define SILIST_ENTRY(type) \ +struct { \ + type next; /* Index of next element. */ \ +} + +#define SILIST_INSERT(head, idx, elem, field) \ + do { \ + MLX5_ASSERT((elem) && (idx)); \ + (elem)->field.next = *(head); \ + *(head) = (idx); \ + } while (0) + +#define SILIST_FOREACH(pool, head, idx, elem, field) \ + for ((idx) = (head), (elem) = \ + (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ + idx = (elem)->field.next, (elem) = \ + (idx) ? mlx5_ipool_get(pool, idx) : NULL) + +#endif /* RTE_PMD_MLX5_UTILS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c b/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c new file mode 100644 index 000000000..f65e416da --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/mlx5_vlan.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox Technologies, Ltd + */ + +#include +#include +#include +#include + + +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_autoconf.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/** + * DPDK callback to configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16, + dev->data->port_id, (on ? "enable" : "disable"), vlan_id); + MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); + for (i = 0; (i != priv->vlan_filter_n); ++i) + if (priv->vlan_filter[i] == vlan_id) + break; + /* Check if there's room for another VLAN filter. */ + if (i == RTE_DIM(priv->vlan_filter)) { + rte_errno = ENOMEM; + return -rte_errno; + } + if (i < priv->vlan_filter_n) { + MLX5_ASSERT(priv->vlan_filter_n != 0); + /* Enabling an existing VLAN filter has no effect. */ + if (on) + goto out; + /* Remove VLAN filter from list. */ + --priv->vlan_filter_n; + memmove(&priv->vlan_filter[i], + &priv->vlan_filter[i + 1], + sizeof(priv->vlan_filter[i]) * + (priv->vlan_filter_n - i)); + priv->vlan_filter[priv->vlan_filter_n] = 0; + } else { + MLX5_ASSERT(i == priv->vlan_filter_n); + /* Disabling an unknown VLAN filter has no effect. */ + if (!on) + goto out; + /* Add new VLAN filter. */ + priv->vlan_filter[priv->vlan_filter_n] = vlan_id; + ++priv->vlan_filter_n; + } +out: + if (dev->data->dev_started) + return mlx5_traffic_restart(dev); + return 0; +} + +/** + * Callback to set/reset VLAN stripping for a specific queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue + * RX queue index. + * @param on + * Enable/disable VLAN stripping. + */ +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct ibv_wq_attr mod; + uint16_t vlan_offloads = + (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | + 0; + int ret = 0; + + /* Validate hw support */ + if (!priv->config.hw_vlan_strip) { + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); + return; + } + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d", + dev->data->port_id, queue); + return; + } + DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d", + dev->data->port_id, vlan_offloads, rxq->port_id, queue); + if (!rxq_ctrl->obj) { + /* Update related bits in RX queue. */ + rxq->vlan_strip = !!on; + return; + } + if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) { + mod = (struct ibv_wq_attr){ + .attr_mask = IBV_WQ_ATTR_FLAGS, + .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, + .flags = vlan_offloads, + }; + ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod); + } else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) { + struct mlx5_devx_modify_rq_attr rq_attr; + + memset(&rq_attr, 0, sizeof(rq_attr)); + rq_attr.rq_state = MLX5_RQC_STATE_RDY; + rq_attr.state = MLX5_RQC_STATE_RDY; + rq_attr.vsd = (on ? 0 : 1); + rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); + } + if (ret) { + DRV_LOG(ERR, "port %u failed to modify object %d stripping " + "mode: %s", dev->data->port_id, + rxq_ctrl->obj->type, strerror(rte_errno)); + return; + } + /* Update related bits in RX queue. */ + rxq->vlan_strip = !!on; +} + +/** + * Callback to set/reset VLAN offloads for a port. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mask + * VLAN offload bit mask. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + + if (mask & ETH_VLAN_STRIP_MASK) { + int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP); + + if (!priv->config.hw_vlan_strip) { + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); + return 0; + } + /* Run on every RX queue and set/reset VLAN stripping. */ + for (i = 0; (i != priv->rxqs_n); i++) + mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip); + } + return 0; +} + +/* + * Release VLAN network device, created for VM workaround. + * + * @param[in] dev + * Ethernet device object, Netlink context provider. + * @param[in] vlan + * Object representing the network device to release. + */ +void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev, + struct mlx5_vf_vlan *vlan) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context; + struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0]; + + MLX5_ASSERT(vlan->created); + MLX5_ASSERT(priv->vmwa_context); + if (!vlan->created || !vmwa) + return; + vlan->created = 0; + MLX5_ASSERT(vlan_dev[vlan->tag].refcnt); + if (--vlan_dev[vlan->tag].refcnt == 0 && + vlan_dev[vlan->tag].ifindex) { + mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex); + vlan_dev[vlan->tag].ifindex = 0; + } +} + +/** + * Acquire VLAN interface with specified tag for VM workaround. + * + * @param[in] dev + * Ethernet device object, Netlink context provider. + * @param[in] vlan + * Object representing the network device to acquire. + */ +void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev, + struct mlx5_vf_vlan *vlan) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context; + struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0]; + + MLX5_ASSERT(!vlan->created); + MLX5_ASSERT(priv->vmwa_context); + if (vlan->created || !vmwa) + return; + if (vlan_dev[vlan->tag].refcnt == 0) { + MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex); + vlan_dev[vlan->tag].ifindex = + mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex, + vlan->tag); + } + if (vlan_dev[vlan->tag].ifindex) { + vlan_dev[vlan->tag].refcnt++; + vlan->created = 1; + } +} + +/* + * Create per ethernet device VLAN VM workaround context + */ +struct mlx5_nl_vlan_vmwa_context * +mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + struct mlx5_nl_vlan_vmwa_context *vmwa; + enum rte_hypervisor hv_type; + + /* Do not engage workaround over PF. */ + if (!config->vf) + return NULL; + /* Check whether there is desired virtual environment */ + hv_type = rte_hypervisor_get(); + switch (hv_type) { + case RTE_HYPERVISOR_UNKNOWN: + case RTE_HYPERVISOR_VMWARE: + /* + * The "white list" of configurations + * to engage the workaround. + */ + break; + default: + /* + * The configuration is not found in the "white list". + * We should not engage the VLAN workaround. + */ + return NULL; + } + vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t)); + if (!vmwa) { + DRV_LOG(WARNING, + "Can not allocate memory" + " for VLAN workaround context"); + return NULL; + } + vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE); + if (vmwa->nl_socket < 0) { + DRV_LOG(WARNING, + "Can not create Netlink socket" + " for VLAN workaround context"); + rte_free(vmwa); + return NULL; + } + vmwa->vf_ifindex = ifindex; + /* Cleanup for existing VLAN devices. */ + return vmwa; +} + +/* + * Destroy per ethernet device VLAN VM workaround context + */ +void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa) +{ + unsigned int i; + + /* Delete all remaining VLAN devices. */ + for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) { + if (vmwa->vlan_dev[i].ifindex) + mlx5_nl_vlan_vmwa_delete(vmwa, + vmwa->vlan_dev[i].ifindex); + } + if (vmwa->nl_socket >= 0) + close(vmwa->nl_socket); + rte_free(vmwa); +} diff --git a/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5.h b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5.h new file mode 100644 index 000000000..8c6922835 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2020 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_PRIVATE_MLX5_H_ +#define RTE_PMD_PRIVATE_MLX5_H_ + +/** + * @file + * MLX5 public header. + * + * This interface provides the ability to support private PMD + * dynamic flags. + */ + +#define RTE_PMD_MLX5_FINE_GRANULARITY_INLINE "mlx5_fine_granularity_inline" + +/** + * Returns the dynamic flags name, that are supported. + * + * @param[out] names + * Array that is used to return the supported dynamic flags names. + * @param[in] n + * The number of elements in the names array. + * + * @return + * The number of dynamic flags that were copied if not negative. + * Otherwise: + * - ENOMEM - not enough entries in the array + * - EINVAL - invalid array entry + */ +__rte_experimental +int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n); + +#endif diff --git a/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map new file mode 100644 index 000000000..c8b1031b0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mlx5/rte_pmd_mlx5_version.map @@ -0,0 +1,10 @@ +DPDK_20.0 { + local: *; +}; + +EXPERIMENTAL { + global: + + # added in 20.02 + rte_pmd_mlx5_get_dyn_flag_names; +}; diff --git a/src/spdk/dpdk/drivers/net/mvneta/Makefile b/src/spdk/dpdk/drivers/net/mvneta/Makefile new file mode 100644 index 000000000..41e50479f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/Makefile @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvneta.a + +# versioning export map +EXPORT_MAP := rte_pmd_mvneta_version.map + +# external library dependencies +CFLAGS += -I$(RTE_SDK)/drivers/common/mvep +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_SIZE=64 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +LDLIBS += -L$(LIBMUSDK_PATH)/lib +LDLIBS += -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile +LDLIBS += -lrte_bus_vdev -lrte_common_mvep + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta_ethdev.c mvneta_rxtx.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/mvneta/meson.build b/src/spdk/dpdk/drivers/net/mvneta/meson.build new file mode 100644 index 000000000..8d7202788 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/meson.build @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false) +if not lib.found() + build = false + reason = 'missing dependency, "libmusdk"' +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += [ + '-DMVCONF_TYPES_PUBLIC', + '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC', + '-DMVCONF_DMA_PHYS_ADDR_T_SIZE=64' + ] +endif + +sources = files( + 'mvneta_ethdev.c', + 'mvneta_rxtx.c' +) + +deps += ['cfgfile', 'common_mvep'] diff --git a/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.c new file mode 100644 index 000000000..4aea87648 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.c @@ -0,0 +1,991 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mvneta_rxtx.h" + + +#define MVNETA_IFACE_NAME_ARG "iface" + +#define MVNETA_PKT_SIZE_MAX (16382 - MV_MH_SIZE) /* 9700B */ +#define MVNETA_DEFAULT_MTU 1500 + +#define MVNETA_MAC_ADDRS_MAX 256 /*16 UC, 256 IP, 256 MC/BC */ +/** Maximum length of a match string */ +#define MVNETA_MATCH_LEN 16 + +int mvneta_logtype; + +static const char * const valid_args[] = { + MVNETA_IFACE_NAME_ARG, + NULL +}; + +struct mvneta_ifnames { + const char *names[NETA_NUM_ETH_PPIO]; + int idx; +}; + +static int mvneta_dev_num; + +static int mvneta_stats_reset(struct rte_eth_dev *dev); +static int rte_pmd_mvneta_remove(struct rte_vdev_device *vdev); + + +/** + * Deinitialize packet processor. + */ +static void +mvneta_neta_deinit(void) +{ + neta_deinit(); +} + +/** + * Initialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_neta_init(void) +{ + return neta_init(); +} + +/** + * Callback used by rte_kvargs_process() during argument parsing. + * + * @param key + * Pointer to the parsed key (unused). + * @param value + * Pointer to the parsed value. + * @param extra_args + * Pointer to the extra arguments which contains address of the + * table of pointers to parsed interface names. + * + * @return + * Always 0. + */ +static int +mvneta_ifnames_get(const char *key __rte_unused, const char *value, + void *extra_args) +{ + struct mvneta_ifnames *ifnames = extra_args; + + ifnames->names[ifnames->idx++] = value; + + return 0; +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues and + * configure RSS if supported. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_dev_configure(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + struct neta_ppio_params *ppio_params; + + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) { + MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d", + dev->data->dev_conf.rxmode.mq_mode); + if (dev->data->nb_rx_queues > 1) + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.split_hdr_size) { + MVNETA_LOG(INFO, "Split headers not supported"); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - + MRVL_NETA_ETH_HDRS_LEN; + + if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + priv->multiseg = 1; + + ppio_params = &priv->ppio_params; + ppio_params->outqs_params.num_outqs = dev->data->nb_tx_queues; + /* Default: 1 TC, no QoS supported. */ + ppio_params->inqs_params.num_tcs = 1; + ppio_params->inqs_params.tcs_params[0].pkt_offset = MRVL_NETA_PKT_OFFS; + priv->ppio_id = dev->data->port_id; + + return 0; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * @param info + * Info structure output buffer. + */ +static int +mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *info) +{ + info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G; + + info->max_rx_queues = MRVL_NETA_RXQ_MAX; + info->max_tx_queues = MRVL_NETA_TXQ_MAX; + info->max_mac_addrs = MVNETA_MAC_ADDRS_MAX; + + info->rx_desc_lim.nb_max = MRVL_NETA_RXD_MAX; + info->rx_desc_lim.nb_min = MRVL_NETA_RXD_MIN; + info->rx_desc_lim.nb_align = MRVL_NETA_RXD_ALIGN; + + info->tx_desc_lim.nb_max = MRVL_NETA_TXD_MAX; + info->tx_desc_lim.nb_min = MRVL_NETA_TXD_MIN; + info->tx_desc_lim.nb_align = MRVL_NETA_TXD_ALIGN; + + info->rx_offload_capa = MVNETA_RX_OFFLOADS; + info->rx_queue_offload_capa = MVNETA_RX_OFFLOADS; + + info->tx_offload_capa = MVNETA_TX_OFFLOADS; + info->tx_queue_offload_capa = MVNETA_TX_OFFLOADS; + + /* By default packets are dropped if no descriptors are available */ + info->default_rxconf.rx_drop_en = 1; + /* Deferred tx queue start is not supported */ + info->default_txconf.tx_deferred_start = 0; + info->default_txconf.offloads = 0; + + info->max_rx_pktlen = MVNETA_PKT_SIZE_MAX; + + return 0; +} + +/** + * Return supported packet types. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * + * @return + * Const pointer to the table with supported packet types. + */ +static const uint32_t * +mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP + }; + + return ptypes; +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MRU + * will be dropped). + * + * @param dev + * Pointer to Ethernet device structure. + * @param mtu + * New MTU. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mvneta_priv *priv = dev->data->dev_private; + uint16_t mbuf_data_size = 0; /* SW buffer size */ + uint16_t mru; + int ret; + + mru = MRVL_NETA_MTU_TO_MRU(mtu); + /* + * min_rx_buf_size is equal to mbuf data size + * if pmd didn't set it differently + */ + mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + /* Prevent PMD from: + * - setting mru greater than the mbuf size resulting in + * hw and sw buffer size mismatch + * - setting mtu that requires the support of scattered packets + * when this feature has not been enabled/supported so far. + */ + if (!dev->data->scattered_rx && + (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) { + mru = mbuf_data_size - MRVL_NETA_PKT_OFFS; + mtu = MRVL_NETA_MRU_TO_MTU(mru); + MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by" + " current mbuf size: %u. Set MTU to %u, MRU to %u", + mbuf_data_size, mtu, mru); + } + + if (mtu < RTE_ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) { + MVNETA_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru); + return -EINVAL; + } + + dev->data->mtu = mtu; + dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE; + + if (!priv->ppio) + /* It is OK. New MTU will be set later on mvneta_dev_start */ + return 0; + + ret = neta_ppio_set_mru(priv->ppio, mru); + if (ret) { + MVNETA_LOG(ERR, "Failed to change MRU"); + return ret; + } + + ret = neta_ppio_set_mtu(priv->ppio, mtu); + if (ret) { + MVNETA_LOG(ERR, "Failed to change MTU"); + return ret; + } + MVNETA_LOG(INFO, "MTU changed to %u, MRU = %u", mtu, mru); + + return 0; +} + +/** + * DPDK callback to bring the link up. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return 0; + + return neta_ppio_enable(priv->ppio); +} + +/** + * DPDK callback to bring the link down. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return 0; + + return neta_ppio_disable(priv->ppio); +} + +/** + * DPDK callback to start the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mvneta_dev_start(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + char match[MVNETA_MATCH_LEN]; + int ret = 0, i; + + if (priv->ppio) + return mvneta_dev_set_link_up(dev); + + strlcpy(match, dev->data->name, sizeof(match)); + priv->ppio_params.match = match; + priv->ppio_params.inqs_params.mtu = dev->data->mtu; + + ret = neta_ppio_init(&priv->ppio_params, &priv->ppio); + if (ret) { + MVNETA_LOG(ERR, "Failed to init ppio"); + return ret; + } + priv->ppio_id = priv->ppio->port_id; + + mvneta_stats_reset(dev); + + /* + * In case there are some some stale uc/mc mac addresses flush them + * here. It cannot be done during mvneta_dev_close() as port information + * is already gone at that point (due to neta_ppio_deinit() in + * mvneta_dev_stop()). + */ + if (!priv->uc_mc_flushed) { + ret = neta_ppio_flush_mac_addrs(priv->ppio, 0, 1); + if (ret) { + MVNETA_LOG(ERR, + "Failed to flush uc/mc filter list"); + goto out; + } + priv->uc_mc_flushed = 1; + } + + ret = mvneta_alloc_rx_bufs(dev); + if (ret) + goto out; + + ret = mvneta_mtu_set(dev, dev->data->mtu); + if (ret) { + MVNETA_LOG(ERR, "Failed to set MTU %d", dev->data->mtu); + goto out; + } + + ret = mvneta_dev_set_link_up(dev); + if (ret) { + MVNETA_LOG(ERR, "Failed to set link up"); + goto out; + } + + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + mvneta_set_tx_function(dev); + + return 0; + +out: + MVNETA_LOG(ERR, "Failed to start device"); + neta_ppio_deinit(priv->ppio); + return ret; +} + +/** + * DPDK callback to stop the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mvneta_dev_stop(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return; + + mvneta_dev_set_link_down(dev); + mvneta_flush_queues(dev); + neta_ppio_deinit(priv->ppio); + + priv->ppio = NULL; +} + +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mvneta_dev_close(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + int i; + + if (priv->ppio) + mvneta_dev_stop(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + mvneta_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + mvneta_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + + mvneta_dev_num--; + + if (mvneta_dev_num == 0) { + MVNETA_LOG(INFO, "Perform MUSDK deinit"); + mvneta_neta_deinit(); + rte_mvep_deinit(MVEP_MOD_T_NETA); + } +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + /* + * TODO + * once MUSDK provides necessary API use it here + */ + struct mvneta_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata; + struct ifreq req; + int ret, fd, link_up; + + if (!priv->ppio) + return -EPERM; + + edata.cmd = ETHTOOL_GSET; + + strcpy(req.ifr_name, dev->data->name); + req.ifr_data = (void *)&edata; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd == -1) + return -EFAULT; + ret = ioctl(fd, SIOCETHTOOL, &req); + if (ret == -1) { + close(fd); + return -EFAULT; + } + + close(fd); + + switch (ethtool_cmd_speed(&edata)) { + case SPEED_10: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; + break; + case SPEED_100: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; + break; + case SPEED_1000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; + break; + case SPEED_2500: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G; + break; + default: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + } + + dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : + ETH_LINK_FIXED; + + neta_ppio_get_link_state(priv->ppio, &link_up); + dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + return 0; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * always 0 + */ +static int +mvneta_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + int ret, en; + + if (!priv->ppio) + return 0; + + neta_ppio_get_promisc(priv->ppio, &en); + if (en) { + MVNETA_LOG(INFO, "Promiscuous already enabled"); + return 0; + } + + ret = neta_ppio_set_promisc(priv->ppio, 1); + if (ret) + MVNETA_LOG(ERR, "Failed to enable promiscuous mode"); + + return 0; +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * always 0 + */ +static int +mvneta_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + int ret, en; + + if (!priv->ppio) + return 0; + + neta_ppio_get_promisc(priv->ppio, &en); + if (!en) { + MVNETA_LOG(INFO, "Promiscuous already disabled"); + return 0; + } + + ret = neta_ppio_set_promisc(priv->ppio, 0); + if (ret) + MVNETA_LOG(ERR, "Failed to disable promiscuous mode"); + + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mvneta_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mvneta_priv *priv = dev->data->dev_private; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + if (!priv->ppio) + return; + + ret = neta_ppio_remove_mac_addr(priv->ppio, + dev->data->mac_addrs[index].addr_bytes); + if (ret) { + rte_ether_format_addr(buf, sizeof(buf), + &dev->data->mac_addrs[index]); + MVNETA_LOG(ERR, "Failed to remove mac %s", buf); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (unused). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct mvneta_priv *priv = dev->data->dev_private; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + if (index == 0) + /* For setting index 0, mrvl_mac_addr_set() should be used.*/ + return -1; + + if (!priv->ppio) + return 0; + + ret = neta_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + rte_ether_format_addr(buf, sizeof(buf), mac_addr); + MVNETA_LOG(ERR, "Failed to add mac %s", buf); + return -1; + } + + return 0; +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + */ +static int +mvneta_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct mvneta_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EINVAL; + + ret = neta_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, sizeof(buf), mac_addr); + MVNETA_LOG(ERR, "Failed to set mac to %s", buf); + } + return 0; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Stats structure output buffer. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mvneta_priv *priv = dev->data->dev_private; + struct neta_ppio_statistics ppio_stats; + unsigned int ret; + + if (!priv->ppio) + return -EPERM; + + ret = neta_ppio_get_statistics(priv->ppio, &ppio_stats); + if (unlikely(ret)) { + MVNETA_LOG(ERR, "Failed to update port statistics"); + return ret; + } + + stats->ipackets += ppio_stats.rx_packets + + ppio_stats.rx_broadcast_packets + + ppio_stats.rx_multicast_packets - + priv->prev_stats.ipackets; + stats->opackets += ppio_stats.tx_packets + + ppio_stats.tx_broadcast_packets + + ppio_stats.tx_multicast_packets - + priv->prev_stats.opackets; + stats->ibytes += ppio_stats.rx_bytes - priv->prev_stats.ibytes; + stats->obytes += ppio_stats.tx_bytes - priv->prev_stats.obytes; + stats->imissed += ppio_stats.rx_discard + + ppio_stats.rx_overrun - + priv->prev_stats.imissed; + stats->ierrors = ppio_stats.rx_packets_err - + priv->prev_stats.ierrors; + stats->oerrors = ppio_stats.tx_errors - priv->prev_stats.oerrors; + + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_stats_reset(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + unsigned int ret; + + if (!priv->ppio) + return 0; + + ret = mvneta_stats_get(dev, &priv->prev_stats); + if (unlikely(ret)) + MVNETA_LOG(ERR, "Failed to reset port statistics"); + + return ret; +} + + +static const struct eth_dev_ops mvneta_ops = { + .dev_configure = mvneta_dev_configure, + .dev_start = mvneta_dev_start, + .dev_stop = mvneta_dev_stop, + .dev_set_link_up = mvneta_dev_set_link_up, + .dev_set_link_down = mvneta_dev_set_link_down, + .dev_close = mvneta_dev_close, + .link_update = mvneta_link_update, + .promiscuous_enable = mvneta_promiscuous_enable, + .promiscuous_disable = mvneta_promiscuous_disable, + .mac_addr_remove = mvneta_mac_addr_remove, + .mac_addr_add = mvneta_mac_addr_add, + .mac_addr_set = mvneta_mac_addr_set, + .mtu_set = mvneta_mtu_set, + .stats_get = mvneta_stats_get, + .stats_reset = mvneta_stats_reset, + .dev_infos_get = mvneta_dev_infos_get, + .dev_supported_ptypes_get = mvneta_dev_supported_ptypes_get, + .rxq_info_get = mvneta_rxq_info_get, + .txq_info_get = mvneta_txq_info_get, + .rx_queue_setup = mvneta_rx_queue_setup, + .rx_queue_release = mvneta_rx_queue_release, + .tx_queue_setup = mvneta_tx_queue_setup, + .tx_queue_release = mvneta_tx_queue_release, +}; + +/** + * Create device representing Ethernet port. + * + * @param name + * Pointer to the port's name. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name) +{ + int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); + struct rte_eth_dev *eth_dev; + struct mvneta_priv *priv; + struct ifreq req; + + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return -ENOMEM; + + priv = rte_zmalloc_socket(name, sizeof(*priv), 0, rte_socket_id()); + if (!priv) { + ret = -ENOMEM; + goto out_free; + } + eth_dev->data->dev_private = priv; + + eth_dev->data->mac_addrs = + rte_zmalloc("mac_addrs", + RTE_ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0); + if (!eth_dev->data->mac_addrs) { + MVNETA_LOG(ERR, "Failed to allocate space for eth addrs"); + ret = -ENOMEM; + goto out_free; + } + + memset(&req, 0, sizeof(req)); + strcpy(req.ifr_name, name); + ret = ioctl(fd, SIOCGIFHWADDR, &req); + if (ret) + goto out_free; + + memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN); + + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->device = &vdev->device; + eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst; + mvneta_set_tx_function(eth_dev); + eth_dev->dev_ops = &mvneta_ops; + + /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +out_free: + rte_eth_dev_release_port(eth_dev); + + return ret; +} + +/** + * Cleanup previously created device representing Ethernet port. + * + * @param eth_dev + * Pointer to the corresponding rte_eth_dev structure. + */ +static void +mvneta_eth_dev_destroy(struct rte_eth_dev *eth_dev) +{ + rte_eth_dev_release_port(eth_dev); +} + +/** + * Cleanup previously created device representing Ethernet port. + * + * @param name + * Pointer to the port name. + */ +static void +mvneta_eth_dev_destroy_name(const char *name) +{ + struct rte_eth_dev *eth_dev; + + eth_dev = rte_eth_dev_allocated(name); + if (!eth_dev) + return; + + mvneta_eth_dev_destroy(eth_dev); +} + +/** + * DPDK callback to register the virtual device. + * + * @param vdev + * Pointer to the virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mvneta_probe(struct rte_vdev_device *vdev) +{ + struct rte_kvargs *kvlist; + struct mvneta_ifnames ifnames; + int ret = -EINVAL; + uint32_t i, ifnum; + const char *params; + + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + kvlist = rte_kvargs_parse(params, valid_args); + if (!kvlist) + return -EINVAL; + + ifnum = rte_kvargs_count(kvlist, MVNETA_IFACE_NAME_ARG); + if (ifnum > RTE_DIM(ifnames.names)) + goto out_free_kvlist; + + ifnames.idx = 0; + rte_kvargs_process(kvlist, MVNETA_IFACE_NAME_ARG, + mvneta_ifnames_get, &ifnames); + + /* + * The below system initialization should be done only once, + * on the first provided configuration file + */ + if (mvneta_dev_num) + goto init_devices; + + MVNETA_LOG(INFO, "Perform MUSDK initializations"); + + ret = rte_mvep_init(MVEP_MOD_T_NETA, kvlist); + if (ret) + goto out_free_kvlist; + + ret = mvneta_neta_init(); + if (ret) { + MVNETA_LOG(ERR, "Failed to init NETA!"); + rte_mvep_deinit(MVEP_MOD_T_NETA); + goto out_free_kvlist; + } + +init_devices: + for (i = 0; i < ifnum; i++) { + MVNETA_LOG(INFO, "Creating %s", ifnames.names[i]); + ret = mvneta_eth_dev_create(vdev, ifnames.names[i]); + if (ret) + goto out_cleanup; + + mvneta_dev_num++; + } + + rte_kvargs_free(kvlist); + + return 0; +out_cleanup: + rte_pmd_mvneta_remove(vdev); + +out_free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +/** + * DPDK callback to remove virtual device. + * + * @param vdev + * Pointer to the removed virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mvneta_remove(struct rte_vdev_device *vdev) +{ + uint16_t port_id; + + RTE_ETH_FOREACH_DEV(port_id) { + if (rte_eth_devices[port_id].device != &vdev->device) + continue; + rte_eth_dev_close(port_id); + } + + return 0; +} + +static struct rte_vdev_driver pmd_mvneta_drv = { + .probe = rte_pmd_mvneta_probe, + .remove = rte_pmd_mvneta_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_mvneta, pmd_mvneta_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_mvneta, "iface="); + +RTE_INIT(mvneta_init_log) +{ + mvneta_logtype = rte_log_register("pmd.net.mvneta"); + if (mvneta_logtype >= 0) + rte_log_set_level(mvneta_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.h b/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.h new file mode 100644 index 000000000..ef8067790 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/mvneta_ethdev.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#ifndef _MVNETA_ETHDEV_H_ +#define _MVNETA_ETHDEV_H_ + +#include +#include +#include + +/* + * container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include +#include + +/** Packet offset inside RX buffer. */ +#define MRVL_NETA_PKT_OFFS 64 + +/** Maximum number of rx/tx queues per port */ +#define MRVL_NETA_RXQ_MAX 8 +#define MRVL_NETA_TXQ_MAX 8 + +/** Minimum/maximum number of descriptors in tx queue */ +#define MRVL_NETA_TXD_MIN 16 +#define MRVL_NETA_TXD_MAX 2048 + +/** Tx queue descriptors alignment in B */ +#define MRVL_NETA_TXD_ALIGN 32 + +/** Minimum/maximum number of descriptors in rx queue */ +#define MRVL_NETA_RXD_MIN 16 +#define MRVL_NETA_RXD_MAX 2048 + +/** Rx queue descriptors alignment in B */ +#define MRVL_NETA_RXD_ALIGN 32 + +#define MRVL_NETA_VLAN_TAG_LEN 4 +#define MRVL_NETA_ETH_HDRS_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + MRVL_NETA_VLAN_TAG_LEN) + +#define MRVL_NETA_HDRS_LEN (MV_MH_SIZE + MRVL_NETA_ETH_HDRS_LEN) +#define MRVL_NETA_MTU_TO_MRU(mtu) ((mtu) + MRVL_NETA_HDRS_LEN) +#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN) + +/** Rx offloads capabilities */ +#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CHECKSUM) + +/** Tx offloads capabilities */ +#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) +#define MVNETA_TX_OFFLOADS (MVNETA_TX_OFFLOAD_CHECKSUM | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define MVNETA_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_CKSUM | \ + PKT_TX_UDP_CKSUM) + +struct mvneta_priv { + /* Hot fields, used in fast path. */ + struct neta_ppio *ppio; /**< Port handler pointer */ + + uint8_t pp_id; + uint8_t ppio_id; /* ppio port id */ + uint8_t uc_mc_flushed; + uint8_t multiseg; + + struct neta_ppio_params ppio_params; + + uint64_t rate_max; + struct rte_eth_stats prev_stats; +}; + +/** Current log type. */ +extern int mvneta_logtype; + +#define MVNETA_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, mvneta_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#endif /* _MVNETA_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.c b/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.c new file mode 100644 index 000000000..10b6f5758 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.c @@ -0,0 +1,1012 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include "mvneta_rxtx.h" + +#define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE) + +#define MRVL_NETA_DEFAULT_TC 0 + +/** Maximum number of descriptors in shadow queue. Must be power of 2 */ +#define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX + +/** Shadow queue size mask (since shadow queue size is power of 2) */ +#define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1) + +/** Minimum number of sent buffers to release from shadow queue to BM */ +#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN 16 + +/** Maximum number of sent buffers to release from shadow queue to BM */ +#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX 64 + +#define MVNETA_COOKIE_ADDR_INVALID ~0ULL +#define MVNETA_COOKIE_HIGH_ADDR_SHIFT (sizeof(neta_cookie_t) * 8) +#define MVNETA_COOKIE_HIGH_ADDR_MASK (~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT) + +#define MVNETA_SET_COOKIE_HIGH_ADDR(addr) { \ + if (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID)) \ + cookie_addr_high = \ + (uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\ +} + +#define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr) \ + ((likely(cookie_addr_high == \ + ((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0) + +struct mvneta_rxq { + struct mvneta_priv *priv; + struct rte_mempool *mp; + int queue_id; + int port_id; + int size; + int cksum_enabled; + uint64_t bytes_recv; + uint64_t drop_mac; + uint64_t pkts_processed; +}; + +/* + * To use buffer harvesting based on loopback port shadow queue structure + * was introduced for buffers information bookkeeping. + */ +struct mvneta_shadow_txq { + int head; /* write index - used when sending buffers */ + int tail; /* read index - used when releasing buffers */ + u16 size; /* queue occupied size */ + struct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */ +}; + +struct mvneta_txq { + struct mvneta_priv *priv; + int queue_id; + int port_id; + uint64_t bytes_sent; + struct mvneta_shadow_txq shadow_txq; + int tx_deferred_start; +}; + +static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID; +static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN; + +static inline int +mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num) +{ + struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX]; + struct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX]; + int i, ret; + uint16_t nb_desc = *num; + + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc); + if (ret) { + MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc); + *num = 0; + return -1; + } + + MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]); + + for (i = 0; i < nb_desc; i++) { + if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) { + MVNETA_LOG(ERR, + "mbuf virt high addr 0x%lx out of range 0x%lx", + (uint64_t)mbufs[i] >> 32, + cookie_addr_high >> 32); + *num = 0; + goto out; + } + entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]); + entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i]; + } + neta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num); + +out: + for (i = *num; i < nb_desc; i++) + rte_pktmbuf_free(mbufs[i]); + + return 0; +} + +/** + * Allocate buffers from mempool + * and store addresses in rx descriptors. + * + * @return + * 0 on success, negative error value otherwise. + */ +static inline int +mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num) +{ + uint16_t nb_desc, nb_desc_burst, sent = 0; + int ret = 0; + + nb_desc = *num; + + do { + nb_desc_burst = + (nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ? + nb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX; + + ret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst); + if (unlikely(ret || !nb_desc_burst)) + break; + + sent += nb_desc_burst; + nb_desc -= nb_desc_burst; + + } while (nb_desc); + + *num = sent; + + return ret; +} + +static inline void +mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf) +{ + sq->ent[sq->head].cookie = (uint64_t)buf; + sq->ent[sq->head].addr = buf ? + rte_mbuf_data_iova_default(buf) : 0; + + sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK; + sq->size++; +} + +static inline void +mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf) +{ + neta_ppio_outq_desc_reset(desc); + neta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf)); + neta_ppio_outq_desc_set_pkt_offset(desc, 0); + neta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf)); +} + +/** + * Release already sent buffers to mempool. + * + * @param ppio + * Pointer to the port structure. + * @param sq + * Pointer to the shadow queue. + * @param qid + * Queue id number. + * @param force + * Force releasing packets. + */ +static inline void +mvneta_sent_buffers_free(struct neta_ppio *ppio, + struct mvneta_shadow_txq *sq, int qid) +{ + struct neta_buff_inf *entry; + uint16_t nb_done = 0; + int i; + int tail = sq->tail; + + neta_ppio_get_num_outq_done(ppio, qid, &nb_done); + + if (nb_done > sq->size) { + MVNETA_LOG(ERR, "nb_done: %d, sq->size %d", + nb_done, sq->size); + return; + } + + for (i = 0; i < nb_done; i++) { + entry = &sq->ent[tail]; + + if (unlikely(!entry->addr)) { + MVNETA_LOG(DEBUG, + "Shadow memory @%d: cookie(%lx), pa(%lx)!", + tail, (u64)entry->cookie, + (u64)entry->addr); + tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; + continue; + } + + struct rte_mbuf *mbuf; + + mbuf = (struct rte_mbuf *) + (cookie_addr_high | entry->cookie); + rte_pktmbuf_free(mbuf); + tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; + } + + sq->tail = tail; + sq->size -= nb_done; +} + +/** + * Return packet type information and l3/l4 offsets. + * + * @param desc + * Pointer to the received packet descriptor. + * @param l3_offset + * l3 packet offset. + * @param l4_offset + * l4 packet offset. + * + * @return + * Packet type information. + */ +static inline uint64_t +mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc, + uint8_t *l3_offset, uint8_t *l4_offset) +{ + enum neta_inq_l3_type l3_type; + enum neta_inq_l4_type l4_type; + uint64_t packet_type; + + neta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); + neta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); + + packet_type = RTE_PTYPE_L2_ETHER; + + if (NETA_RXD_GET_VLAN_INFO(desc)) + packet_type |= RTE_PTYPE_L2_ETHER_VLAN; + + switch (l3_type) { + case NETA_INQ_L3_TYPE_IPV4_BAD: + case NETA_INQ_L3_TYPE_IPV4_OK: + packet_type |= RTE_PTYPE_L3_IPV4; + break; + case NETA_INQ_L3_TYPE_IPV6: + packet_type |= RTE_PTYPE_L3_IPV6; + break; + default: + packet_type |= RTE_PTYPE_UNKNOWN; + MVNETA_LOG(DEBUG, "Failed to recognize l3 packet type"); + break; + } + + switch (l4_type) { + case NETA_INQ_L4_TYPE_TCP: + packet_type |= RTE_PTYPE_L4_TCP; + break; + case NETA_INQ_L4_TYPE_UDP: + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: + packet_type |= RTE_PTYPE_UNKNOWN; + MVNETA_LOG(DEBUG, "Failed to recognize l4 packet type"); + break; + } + + return packet_type; +} + +/** + * Prepare offload information. + * + * @param ol_flags + * Offload flags. + * @param l3_type + * Pointer to the neta_ouq_l3_type structure. + * @param l4_type + * Pointer to the neta_outq_l4_type structure. + * @param gen_l3_cksum + * Will be set to 1 in case l3 checksum is computed. + * @param l4_cksum + * Will be set to 1 in case l4 checksum is computed. + */ +static inline void +mvneta_prepare_proto_info(uint64_t ol_flags, + enum neta_outq_l3_type *l3_type, + enum neta_outq_l4_type *l4_type, + int *gen_l3_cksum, + int *gen_l4_cksum) +{ + /* + * Based on ol_flags prepare information + * for neta_ppio_outq_desc_set_proto_info() which setups descriptor + * for offloading. + * in most of the checksum cases ipv4 must be set, so this is the + * default value + */ + *l3_type = NETA_OUTQ_L3_TYPE_IPV4; + *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; + + if (ol_flags & PKT_TX_IPV6) { + *l3_type = NETA_OUTQ_L3_TYPE_IPV6; + /* no checksum for ipv6 header */ + *gen_l3_cksum = 0; + } + + if (ol_flags & PKT_TX_TCP_CKSUM) { + *l4_type = NETA_OUTQ_L4_TYPE_TCP; + *gen_l4_cksum = 1; + } else if (ol_flags & PKT_TX_UDP_CKSUM) { + *l4_type = NETA_OUTQ_L4_TYPE_UDP; + *gen_l4_cksum = 1; + } else { + *l4_type = NETA_OUTQ_L4_TYPE_OTHER; + /* no checksum for other type */ + *gen_l4_cksum = 0; + } +} + +/** + * Get offload information from the received packet descriptor. + * + * @param desc + * Pointer to the received packet descriptor. + * + * @return + * Mbuf offload flags. + */ +static inline uint64_t +mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc) +{ + uint64_t flags; + enum neta_inq_desc_status status; + + status = neta_ppio_inq_desc_get_l3_pkt_error(desc); + if (unlikely(status != NETA_DESC_ERR_OK)) + flags = PKT_RX_IP_CKSUM_BAD; + else + flags = PKT_RX_IP_CKSUM_GOOD; + + status = neta_ppio_inq_desc_get_l4_pkt_error(desc); + if (unlikely(status != NETA_DESC_ERR_OK)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + return flags; +} + +/** + * DPDK callback for transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mvneta_txq *q = txq; + struct mvneta_shadow_txq *sq; + struct neta_ppio_desc descs[nb_pkts]; + int i, bytes_sent = 0; + uint16_t num, sq_free_size; + uint64_t addr; + + sq = &q->shadow_txq; + if (unlikely(!nb_pkts || !q->priv->ppio)) + return 0; + + if (sq->size) + mvneta_sent_buffers_free(q->priv->ppio, + sq, q->queue_id); + + sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1; + if (unlikely(nb_pkts > sq_free_size)) { + MVNETA_LOG(DEBUG, + "No room in shadow queue for %d packets! %d packets will be sent.", + nb_pkts, sq_free_size); + nb_pkts = sq_free_size; + } + + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + int gen_l3_cksum, gen_l4_cksum; + enum neta_outq_l3_type l3_type; + enum neta_outq_l4_type l4_type; + + /* Fill first mbuf info in shadow queue */ + mvneta_fill_shadowq(sq, mbuf); + mvneta_fill_desc(&descs[i], mbuf); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + + if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS)) + continue; + mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, + &gen_l3_cksum, &gen_l4_cksum); + + neta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + num = nb_pkts; + neta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts); + + + /* number of packets that were not sent */ + if (unlikely(num > nb_pkts)) { + for (i = nb_pkts; i < num; i++) { + sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) & + MRVL_NETA_TX_SHADOWQ_MASK; + addr = cookie_addr_high | sq->ent[sq->head].cookie; + bytes_sent -= + rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); + } + sq->size -= num - nb_pkts; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** DPDK callback for S/G transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mvneta_txq *q = txq; + struct mvneta_shadow_txq *sq; + struct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS]; + struct neta_ppio_sg_pkts pkts; + uint8_t frags[nb_pkts]; + int i, j, bytes_sent = 0; + int tail, tail_first; + uint16_t num, sq_free_size; + uint16_t nb_segs, total_descs = 0; + uint64_t addr; + + sq = &q->shadow_txq; + pkts.frags = frags; + pkts.num = 0; + + if (unlikely(!q->priv->ppio)) + return 0; + + if (sq->size) + mvneta_sent_buffers_free(q->priv->ppio, + sq, q->queue_id); + /* Save shadow queue free size */ + sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1; + + tail = 0; + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + struct rte_mbuf *seg = NULL; + int gen_l3_cksum, gen_l4_cksum; + enum neta_outq_l3_type l3_type; + enum neta_outq_l4_type l4_type; + + nb_segs = mbuf->nb_segs; + total_descs += nb_segs; + + /* + * Check if total_descs does not exceed + * shadow queue free size + */ + if (unlikely(total_descs > sq_free_size)) { + total_descs -= nb_segs; + MVNETA_LOG(DEBUG, + "No room in shadow queue for %d packets! " + "%d packets will be sent.", + nb_pkts, i); + break; + } + + + /* Check if nb_segs does not exceed the max nb of desc per + * fragmented packet + */ + if (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) { + total_descs -= nb_segs; + MVNETA_LOG(ERR, + "Too many segments. Packet won't be sent."); + break; + } + + pkts.frags[pkts.num] = nb_segs; + pkts.num++; + tail_first = tail; + + seg = mbuf; + for (j = 0; j < nb_segs - 1; j++) { + /* For the subsequent segments, set shadow queue + * buffer to NULL + */ + mvneta_fill_shadowq(sq, NULL); + mvneta_fill_desc(&descs[tail], seg); + + tail++; + seg = seg->next; + } + /* Put first mbuf info in last shadow queue entry */ + mvneta_fill_shadowq(sq, mbuf); + /* Update descriptor with last segment */ + mvneta_fill_desc(&descs[tail++], seg); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + + if (!(mbuf->ol_flags & MVNETA_TX_PKT_OFFLOADS)) + continue; + mvneta_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, + &gen_l3_cksum, &gen_l4_cksum); + + neta_ppio_outq_desc_set_proto_info(&descs[tail_first], + l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + num = total_descs; + neta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs, + &pkts); + + /* number of packets that were not sent */ + if (unlikely(num > total_descs)) { + for (i = total_descs; i < num; i++) { + sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + + sq->head - 1) & + MRVL_NETA_TX_SHADOWQ_MASK; + addr = sq->ent[sq->head].cookie; + if (addr) { + struct rte_mbuf *mbuf; + + mbuf = (struct rte_mbuf *) + (cookie_addr_high | addr); + bytes_sent -= rte_pktmbuf_pkt_len(mbuf); + } + } + sq->size -= num - total_descs; + nb_pkts = pkts.num; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** + * Set tx burst function according to offload flag + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mvneta_set_tx_function(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (priv->multiseg) { + MVNETA_LOG(INFO, "Using multi-segment tx callback"); + dev->tx_pkt_burst = mvneta_tx_sg_pkt_burst; + } else { + MVNETA_LOG(INFO, "Using single-segment tx callback"); + dev->tx_pkt_burst = mvneta_tx_pkt_burst; + } +} + +/** + * DPDK callback for receive. + * + * @param rxq + * Generic pointer to the receive queue. + * @param rx_pkts + * Array to store received packets. + * @param nb_pkts + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received. + */ +uint16_t +mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct mvneta_rxq *q = rxq; + struct neta_ppio_desc descs[nb_pkts]; + int i, ret, rx_done = 0, rx_dropped = 0; + + if (unlikely(!q || !q->priv->ppio)) + return 0; + + ret = neta_ppio_recv(q->priv->ppio, q->queue_id, + descs, &nb_pkts); + + if (unlikely(ret < 0)) { + MVNETA_LOG(ERR, "Failed to receive packets"); + return 0; + } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf; + uint8_t l3_offset, l4_offset; + enum neta_inq_desc_status status; + uint64_t addr; + + addr = cookie_addr_high | + neta_ppio_inq_desc_get_cookie(&descs[i]); + mbuf = (struct rte_mbuf *)addr; + + rte_pktmbuf_reset(mbuf); + + /* drop packet in case of mac, overrun or resource error */ + status = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]); + if (unlikely(status != NETA_DESC_ERR_OK)) { + /* Release the mbuf to the mempool since + * it won't be transferred to tx path + */ + rte_pktmbuf_free(mbuf); + q->drop_mac++; + rx_dropped++; + continue; + } + + mbuf->data_off += MVNETA_PKT_EFFEC_OFFS; + mbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = q->port_id; + mbuf->packet_type = + mvneta_desc_to_packet_type_and_offset(&descs[i], + &l3_offset, + &l4_offset); + mbuf->l2_len = l3_offset; + mbuf->l3_len = l4_offset - l3_offset; + + if (likely(q->cksum_enabled)) + mbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]); + + rx_pkts[rx_done++] = mbuf; + q->bytes_recv += mbuf->pkt_len; + } + q->pkts_processed += rx_done + rx_dropped; + + if (q->pkts_processed > rx_desc_free_thresh) { + int buf_to_refill = rx_desc_free_thresh; + + ret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill); + if (ret) + MVNETA_LOG(ERR, "Refill failed"); + q->pkts_processed -= buf_to_refill; + } + + return rx_done; +} + +/** + * DPDK callback to configure the receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Thresholds parameters (unused_). + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative error value otherwise. + */ +int +mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf __rte_unused, + struct rte_mempool *mp) +{ + struct mvneta_priv *priv = dev->data->dev_private; + struct mvneta_rxq *rxq; + uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp); + uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + + frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS; + + if (frame_size < max_rx_pkt_len) { + MVNETA_LOG(ERR, + "Mbuf size must be increased to %u bytes to hold up " + "to %u bytes of data.", + buf_size + max_rx_pkt_len - frame_size, + max_rx_pkt_len); + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + MVNETA_LOG(INFO, "Setting max rx pkt len to %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len); + } + + if (dev->data->rx_queues[idx]) { + rte_free(dev->data->rx_queues[idx]); + dev->data->rx_queues[idx] = NULL; + } + + rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); + if (!rxq) + return -ENOMEM; + + rxq->priv = priv; + rxq->mp = mp; + rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_IPV4_CKSUM; + rxq->queue_id = idx; + rxq->port_id = dev->data->port_id; + rxq->size = desc; + rx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2)); + priv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size = + desc; + + dev->data->rx_queues[idx] = rxq; + + return 0; +} + +/** + * DPDK callback to configure the transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Transmit queue index. + * @param desc + * Number of descriptors to configure in the queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Tx queue configuration parameters. + * + * @return + * 0 on success, negative error value otherwise. + */ +int +mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct mvneta_priv *priv = dev->data->dev_private; + struct mvneta_txq *txq; + + if (dev->data->tx_queues[idx]) { + rte_free(dev->data->tx_queues[idx]); + dev->data->tx_queues[idx] = NULL; + } + + txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); + if (!txq) + return -ENOMEM; + + txq->priv = priv; + txq->queue_id = idx; + txq->port_id = dev->data->port_id; + txq->tx_deferred_start = conf->tx_deferred_start; + dev->data->tx_queues[idx] = txq; + + priv->ppio_params.outqs_params.outqs_params[idx].size = desc; + priv->ppio_params.outqs_params.outqs_params[idx].weight = 1; + + return 0; +} + +/** + * DPDK callback to release the transmit queue. + * + * @param txq + * Generic transmit queue pointer. + */ +void +mvneta_tx_queue_release(void *txq) +{ + struct mvneta_txq *q = txq; + + if (!q) + return; + + rte_free(q); +} + +/** + * Return mbufs to mempool. + * + * @param rxq + * Pointer to rx queue structure + * @param desc + * Array of rx descriptors + */ +static void +mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num) +{ + uint64_t addr; + uint8_t i; + + for (i = 0; i < num; i++) { + if (desc) { + addr = cookie_addr_high | + neta_ppio_inq_desc_get_cookie(desc); + if (addr) + rte_pktmbuf_free((struct rte_mbuf *)addr); + desc++; + } + } +} + +int +mvneta_alloc_rx_bufs(struct rte_eth_dev *dev) +{ + struct mvneta_priv *priv = dev->data->dev_private; + int ret = 0, i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mvneta_rxq *rxq = dev->data->rx_queues[i]; + int num = rxq->size; + + ret = mvneta_buffs_alloc(priv, rxq, &num); + if (ret || num != rxq->size) { + rte_free(rxq); + return ret; + } + } + + return 0; +} + +/** + * Flush single receive queue. + * + * @param rxq + * Pointer to rx queue structure. + * @param descs + * Array of rx descriptors + */ +static void +mvneta_rx_queue_flush(struct mvneta_rxq *rxq) +{ + struct neta_ppio_desc *descs; + struct neta_buff_inf *bufs; + uint16_t num; + int ret, i; + + descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0); + bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0); + + do { + num = MRVL_NETA_RXD_MAX; + ret = neta_ppio_recv(rxq->priv->ppio, + rxq->queue_id, + descs, &num); + mvneta_recv_buffs_free(descs, num); + } while (ret == 0 && num); + + rxq->pkts_processed = 0; + + num = MRVL_NETA_RXD_MAX; + + neta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num); + MVNETA_LOG(INFO, "freeing %u unused bufs.", num); + + for (i = 0; i < num; i++) { + uint64_t addr; + if (bufs[i].cookie) { + addr = cookie_addr_high | bufs[i].cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } + } + + rte_free(descs); + rte_free(bufs); +} + +/** + * Flush single transmit queue. + * + * @param txq + * Pointer to tx queue structure + */ +static void +mvneta_tx_queue_flush(struct mvneta_txq *txq) +{ + struct mvneta_shadow_txq *sq = &txq->shadow_txq; + + if (sq->size) + mvneta_sent_buffers_free(txq->priv->ppio, sq, + txq->queue_id); + + /* free the rest of them */ + while (sq->tail != sq->head) { + uint64_t addr = cookie_addr_high | + sq->ent[sq->tail].cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + sq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK; + } + memset(sq, 0, sizeof(*sq)); +} + +void +mvneta_flush_queues(struct rte_eth_dev *dev) +{ + int i; + + MVNETA_LOG(INFO, "Flushing rx queues"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mvneta_rxq *rxq = dev->data->rx_queues[i]; + + mvneta_rx_queue_flush(rxq); + } + + MVNETA_LOG(INFO, "Flushing tx queues"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mvneta_txq *txq = dev->data->tx_queues[i]; + + mvneta_tx_queue_flush(txq); + } +} + +/** + * DPDK callback to release the receive queue. + * + * @param rxq + * Generic receive queue pointer. + */ +void +mvneta_rx_queue_release(void *rxq) +{ + struct mvneta_rxq *q = rxq; + + if (!q) + return; + + /* If dev_stop was called already, mbufs are already + * returned to mempool and ppio is deinitialized. + * Skip this step. + */ + + if (q->priv->ppio) + mvneta_rx_queue_flush(q); + + rte_free(rxq); +} + +/** + * DPDK callback to get information about specific receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Receive queue index. + * @param qinfo + * Receive queue information structure. + */ +void +mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id]; + + qinfo->mp = q->mp; + qinfo->nb_desc = q->size; +} + +/** + * DPDK callback to get information about specific transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param tx_queue_id + * Transmit queue index. + * @param qinfo + * Transmit queue information structure. + */ +void +mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mvneta_priv *priv = dev->data->dev_private; + + qinfo->nb_desc = + priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; +} diff --git a/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.h b/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.h new file mode 100644 index 000000000..cc2919017 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/mvneta_rxtx.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#ifndef _MVNETA_RXTX_H_ +#define _MVNETA_RXTX_H_ + +#include "mvneta_ethdev.h" + +int mvneta_alloc_rx_bufs(struct rte_eth_dev *dev); + +void mvneta_flush_queues(struct rte_eth_dev *dev); + +void mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo); +void mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo); + +void mvneta_set_tx_function(struct rte_eth_dev *dev); + +uint16_t +mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +int +mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf __rte_unused, + struct rte_mempool *mp); +int +mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); + +void mvneta_rx_queue_release(void *rxq); +void mvneta_tx_queue_release(void *txq); + +#endif /* _MVNETA_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvneta/rte_pmd_mvneta_version.map b/src/spdk/dpdk/drivers/net/mvneta/rte_pmd_mvneta_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvneta/rte_pmd_mvneta_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/mvpp2/Makefile b/src/spdk/dpdk/drivers/net/mvpp2/Makefile new file mode 100644 index 000000000..8a3ec93a6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Marvell International Ltd. +# Copyright(c) 2017 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvpp2.a + +# versioning export map +EXPORT_MAP := rte_pmd_mvpp2_version.map + +# external library dependencies +CFLAGS += -I$(RTE_SDK)/drivers/common/mvep +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +LDLIBS += -L$(LIBMUSDK_PATH)/lib +LDLIBS += -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile +LDLIBS += -lrte_bus_vdev -lrte_common_mvep + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_mtr.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_tm.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/mvpp2/meson.build b/src/spdk/dpdk/drivers/net/mvpp2/meson.build new file mode 100644 index 000000000..e06eddaac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/meson.build @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false) +if not lib.found() + build = false + reason = 'missing dependency, "libmusdk"' +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files( + 'mrvl_ethdev.c', + 'mrvl_flow.c', + 'mrvl_qos.c', + 'mrvl_mtr.c', + 'mrvl_tm.c' +) + +deps += ['cfgfile', 'common_mvep'] diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c new file mode 100644 index 000000000..b98b1fd66 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.c @@ -0,0 +1,3049 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "mrvl_ethdev.h" +#include "mrvl_qos.h" +#include "mrvl_flow.h" +#include "mrvl_mtr.h" +#include "mrvl_tm.h" + +/* bitmask with reserved hifs */ +#define MRVL_MUSDK_HIFS_RESERVED 0x0F +/* bitmask with reserved bpools */ +#define MRVL_MUSDK_BPOOLS_RESERVED 0x07 +/* bitmask with reserved kernel RSS tables */ +#define MRVL_MUSDK_RSS_RESERVED 0x01 +/* maximum number of available hifs */ +#define MRVL_MUSDK_HIFS_MAX 9 + +/* prefetch shift */ +#define MRVL_MUSDK_PREFETCH_SHIFT 2 + +/* TCAM has 25 entries reserved for uc/mc filter entries */ +#define MRVL_MAC_ADDRS_MAX 25 +#define MRVL_MATCH_LEN 16 +#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) +/* Maximum allowable packet size */ +#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) + +#define MRVL_IFACE_NAME_ARG "iface" +#define MRVL_CFG_ARG "cfg" + +#define MRVL_BURST_SIZE 64 + +#define MRVL_ARP_LENGTH 28 + +#define MRVL_COOKIE_ADDR_INVALID ~0ULL +#define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000 + +/** Port Rx offload capabilities */ +#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CHECKSUM) + +/** Port Tx offloads capabilities */ +#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +static const char * const valid_args[] = { + MRVL_IFACE_NAME_ARG, + MRVL_CFG_ARG, + NULL +}; + +static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; +static struct pp2_hif *hifs[RTE_MAX_LCORE]; +static int used_bpools[PP2_NUM_PKT_PROC] = { + [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED +}; + +static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; +static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; +static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; + +int mrvl_logtype; + +struct mrvl_ifnames { + const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; + int idx; +}; + +/* + * To use buffer harvesting based on loopback port shadow queue structure + * was introduced for buffers information bookkeeping. + * + * Before sending the packet, related buffer information (pp2_buff_inf) is + * stored in shadow queue. After packet is transmitted no longer used + * packet buffer is released back to it's original hardware pool, + * on condition it originated from interface. + * In case it was generated by application itself i.e: mbuf->port field is + * 0xff then its released to software mempool. + */ +struct mrvl_shadow_txq { + int head; /* write index - used when sending buffers */ + int tail; /* read index - used when releasing buffers */ + u16 size; /* queue occupied size */ + u16 num_to_release; /* number of descriptors sent, that can be + * released + */ + struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ +}; + +struct mrvl_rxq { + struct mrvl_priv *priv; + struct rte_mempool *mp; + int queue_id; + int port_id; + int cksum_enabled; + uint64_t bytes_recv; + uint64_t drop_mac; +}; + +struct mrvl_txq { + struct mrvl_priv *priv; + int queue_id; + int port_id; + uint64_t bytes_sent; + struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; + int tx_deferred_start; +}; + +static int mrvl_lcore_first; +static int mrvl_lcore_last; +static int mrvl_dev_num; + +static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); +static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, + struct pp2_hif *hif, unsigned int core_id, + struct mrvl_shadow_txq *sq, int qid, int force); + +static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev); +static void mrvl_deinit_pp2(void); +static void mrvl_deinit_hifs(void); + + +#define MRVL_XSTATS_TBL_ENTRY(name) { \ + #name, offsetof(struct pp2_ppio_statistics, name), \ + sizeof(((struct pp2_ppio_statistics *)0)->name) \ +} + +/* Table with xstats data */ +static struct { + const char *name; + unsigned int offset; + unsigned int size; +} mrvl_xstats_tbl[] = { + MRVL_XSTATS_TBL_ENTRY(rx_bytes), + MRVL_XSTATS_TBL_ENTRY(rx_packets), + MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(rx_errors), + MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), + MRVL_XSTATS_TBL_ENTRY(tx_bytes), + MRVL_XSTATS_TBL_ENTRY(tx_packets), + MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(tx_errors) +}; + +static inline void +mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf) +{ + sq->ent[sq->head].buff.cookie = (uint64_t)buf; + sq->ent[sq->head].buff.addr = buf ? + rte_mbuf_data_iova_default(buf) : 0; + + sq->ent[sq->head].bpool = + (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS || + buf->refcnt > 1)) ? NULL : + mrvl_port_to_bpool_lookup[buf->port]; + + sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size++; +} + +static inline void +mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf) +{ + pp2_ppio_outq_desc_reset(desc); + pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf)); + pp2_ppio_outq_desc_set_pkt_offset(desc, 0); + pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf)); +} + +static inline int +mrvl_get_bpool_size(int pp2_id, int pool_id) +{ + int i; + int size = 0; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) + size += mrvl_port_bpool_size[pp2_id][pool_id][i]; + + return size; +} + +static inline int +mrvl_reserve_bit(int *bitmap, int max) +{ + int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); + + if (n >= max) + return -1; + + *bitmap |= 1 << n; + + return n; +} + +static int +mrvl_init_hif(int core_id) +{ + struct pp2_hif_params params; + char match[MRVL_MATCH_LEN]; + int ret; + + ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); + if (ret < 0) { + MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); + return ret; + } + + snprintf(match, sizeof(match), "hif-%d", ret); + memset(¶ms, 0, sizeof(params)); + params.match = match; + params.out_size = MRVL_PP2_AGGR_TXQD_MAX; + ret = pp2_hif_init(¶ms, &hifs[core_id]); + if (ret) { + MRVL_LOG(ERR, "Failed to initialize hif %d", core_id); + return ret; + } + + return 0; +} + +static inline struct pp2_hif* +mrvl_get_hif(struct mrvl_priv *priv, int core_id) +{ + int ret; + + if (likely(hifs[core_id] != NULL)) + return hifs[core_id]; + + rte_spinlock_lock(&priv->lock); + + ret = mrvl_init_hif(core_id); + if (ret < 0) { + MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); + goto out; + } + + if (core_id < mrvl_lcore_first) + mrvl_lcore_first = core_id; + + if (core_id > mrvl_lcore_last) + mrvl_lcore_last = core_id; +out: + rte_spinlock_unlock(&priv->lock); + + return hifs[core_id]; +} + +/** + * Set tx burst function according to offload flag + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_set_tx_function(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (priv->multiseg) { + RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n"); + dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst; + } else { + RTE_LOG(INFO, PMD, "Using single-segment tx callback\n"); + dev->tx_pkt_burst = mrvl_tx_pkt_burst; + } +} + +/** + * Configure rss based on dpdk rss configuration. + * + * @param priv + * Pointer to private structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) +{ + if (rss_conf->rss_key) + MRVL_LOG(WARNING, "Changing hash key is not supported"); + + if (rss_conf->rss_hf == 0) { + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_2_TUPLE; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 1; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 0; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues and + * configure RSS. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_configure(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (priv->ppio) { + MRVL_LOG(INFO, "Device reconfiguration is not supported"); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && + dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + MRVL_LOG(INFO, "Unsupported rx multi queue mode %d", + dev->data->dev_conf.rxmode.mq_mode); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.split_hdr_size) { + MRVL_LOG(INFO, "Split headers not supported"); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - + MRVL_PP2_ETH_HDRS_LEN; + + if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + priv->multiseg = 1; + + ret = mrvl_configure_rxqs(priv, dev->data->port_id, + dev->data->nb_rx_queues); + if (ret < 0) + return ret; + + ret = mrvl_configure_txqs(priv, dev->data->port_id, + dev->data->nb_tx_queues); + if (ret < 0) + return ret; + + priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; + priv->ppio_params.maintain_stats = 1; + priv->nb_rx_queues = dev->data->nb_rx_queues; + + ret = mrvl_tm_init(dev); + if (ret < 0) + return ret; + + if (dev->data->nb_rx_queues == 1 && + dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + MRVL_LOG(WARNING, "Disabling hash for 1 rx queue"); + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + + return 0; + } + + return mrvl_configure_rss(priv, + &dev->data->dev_conf.rx_adv_conf.rss_conf); +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MRU + * will be dropped). + * + * @param dev + * Pointer to Ethernet device structure. + * @param mtu + * New MTU. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mrvl_priv *priv = dev->data->dev_private; + uint16_t mru; + uint16_t mbuf_data_size = 0; /* SW buffer size */ + int ret; + + mru = MRVL_PP2_MTU_TO_MRU(mtu); + /* + * min_rx_buf_size is equal to mbuf data size + * if pmd didn't set it differently + */ + mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + /* Prevent PMD from: + * - setting mru greater than the mbuf size resulting in + * hw and sw buffer size mismatch + * - setting mtu that requires the support of scattered packets + * when this feature has not been enabled/supported so far + * (TODO check scattered_rx flag here once scattered RX is supported). + */ + if (mru + MRVL_PKT_OFFS > mbuf_data_size) { + mru = mbuf_data_size - MRVL_PKT_OFFS; + mtu = MRVL_PP2_MRU_TO_MTU(mru); + MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted " + "by current mbuf size: %u. Set MTU to %u, MRU to %u", + mbuf_data_size, mtu, mru); + } + + if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) { + MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru); + return -EINVAL; + } + + dev->data->mtu = mtu; + dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE; + + if (!priv->ppio) + return 0; + + ret = pp2_ppio_set_mru(priv->ppio, mru); + if (ret) { + MRVL_LOG(ERR, "Failed to change MRU"); + return ret; + } + + ret = pp2_ppio_set_mtu(priv->ppio, mtu); + if (ret) { + MRVL_LOG(ERR, "Failed to change MTU"); + return ret; + } + + return 0; +} + +/** + * DPDK callback to bring the link up. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + ret = pp2_ppio_enable(priv->ppio); + if (ret) + return ret; + + /* + * mtu/mru can be updated if pp2_ppio_enable() was called at least once + * as pp2_ppio_enable() changes port->t_mode from default 0 to + * PP2_TRAFFIC_INGRESS_EGRESS. + * + * Set mtu to default DPDK value here. + */ + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + pp2_ppio_disable(priv->ppio); + + return ret; +} + +/** + * DPDK callback to bring the link down. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + return pp2_ppio_disable(priv->ppio); +} + +/** + * DPDK callback to start tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv) + return -EPERM; + + /* passing 1 enables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); + if (ret) { + MRVL_LOG(ERR, "Failed to start txq %d", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/** + * DPDK callback to stop tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + /* passing 0 disables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); + if (ret) { + MRVL_LOG(ERR, "Failed to stop txq %d", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/** + * DPDK callback to start the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mrvl_dev_start(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char match[MRVL_MATCH_LEN]; + int ret = 0, i, def_init_size; + + if (priv->ppio) + return mrvl_dev_set_link_up(dev); + + snprintf(match, sizeof(match), "ppio-%d:%d", + priv->pp_id, priv->ppio_id); + priv->ppio_params.match = match; + + /* + * Calculate the minimum bpool size for refill feature as follows: + * 2 default burst sizes multiply by number of rx queues. + * If the bpool size will be below this value, new buffers will + * be added to the pool. + */ + priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; + + /* In case initial bpool size configured in queues setup is + * smaller than minimum size add more buffers + */ + def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; + if (priv->bpool_init_size < def_init_size) { + int buffs_to_add = def_init_size - priv->bpool_init_size; + + priv->bpool_init_size += buffs_to_add; + ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); + if (ret) + MRVL_LOG(ERR, "Failed to add buffers to bpool"); + } + + /* + * Calculate the maximum bpool size for refill feature as follows: + * maximum number of descriptors in rx queue multiply by number + * of rx queues plus minimum bpool size. + * In case the bpool size will exceed this value, superfluous buffers + * will be removed + */ + priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + + priv->bpool_min_size; + + ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); + if (ret) { + MRVL_LOG(ERR, "Failed to init ppio"); + return ret; + } + + /* + * In case there are some some stale uc/mc mac addresses flush them + * here. It cannot be done during mrvl_dev_close() as port information + * is already gone at that point (due to pp2_ppio_deinit() in + * mrvl_dev_stop()). + */ + if (!priv->uc_mc_flushed) { + ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); + if (ret) { + MRVL_LOG(ERR, + "Failed to flush uc/mc filter list"); + goto out; + } + priv->uc_mc_flushed = 1; + } + + if (!priv->vlan_flushed) { + ret = pp2_ppio_flush_vlan(priv->ppio); + if (ret) { + MRVL_LOG(ERR, "Failed to flush vlan list"); + /* + * TODO + * once pp2_ppio_flush_vlan() is supported jump to out + * goto out; + */ + } + priv->vlan_flushed = 1; + } + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu); + + /* For default QoS config, don't start classifier. */ + if (mrvl_qos_cfg && + mrvl_qos_cfg->port[dev->data->port_id].use_global_defaults == 0) { + ret = mrvl_start_qos_mapping(priv); + if (ret) { + MRVL_LOG(ERR, "Failed to setup QoS mapping"); + goto out; + } + } + + ret = mrvl_dev_set_link_up(dev); + if (ret) { + MRVL_LOG(ERR, "Failed to set link up"); + goto out; + } + + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + if (!txq->tx_deferred_start) + continue; + + /* + * All txqs are started by default. Stop them + * so that tx_deferred_start works as expected. + */ + ret = mrvl_tx_queue_stop(dev, i); + if (ret) + goto out; + } + + mrvl_flow_init(dev); + mrvl_mtr_init(dev); + mrvl_set_tx_function(dev); + + return 0; +out: + MRVL_LOG(ERR, "Failed to start device"); + pp2_ppio_deinit(priv->ppio); + return ret; +} + +/** + * Flush receive queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_rx_queues(struct rte_eth_dev *dev) +{ + int i; + + MRVL_LOG(INFO, "Flushing rx queues"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + int ret, num; + + do { + struct mrvl_rxq *q = dev->data->rx_queues[i]; + struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; + + num = MRVL_PP2_RXD_MAX; + ret = pp2_ppio_recv(q->priv->ppio, + q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, + descs, (uint16_t *)&num); + } while (ret == 0 && num); + } +} + +/** + * Flush transmit shadow queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) +{ + int i, j; + struct mrvl_txq *txq; + + MRVL_LOG(INFO, "Flushing tx shadow queues"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = (struct mrvl_txq *)dev->data->tx_queues[i]; + + for (j = 0; j < RTE_MAX_LCORE; j++) { + struct mrvl_shadow_txq *sq; + + if (!hifs[j]) + continue; + + sq = &txq->shadow_txqs[j]; + mrvl_free_sent_buffers(txq->priv->ppio, + hifs[j], j, sq, txq->queue_id, 1); + while (sq->tail != sq->head) { + uint64_t addr = cookie_addr_high | + sq->ent[sq->tail].buff.cookie; + rte_pktmbuf_free( + (struct rte_mbuf *)addr); + sq->tail = (sq->tail + 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + } + memset(sq, 0, sizeof(*sq)); + } + } +} + +/** + * Flush hardware bpool (buffer-pool). + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_bpool(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_hif *hif; + uint32_t num; + int ret; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(priv, core_id); + + ret = pp2_bpool_get_num_buffs(priv->bpool, &num); + if (ret) { + MRVL_LOG(ERR, "Failed to get bpool buffers number"); + return; + } + + while (num--) { + struct pp2_buff_inf inf; + uint64_t addr; + + ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); + if (ret) + break; + + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } +} + +/** + * DPDK callback to stop the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_stop(struct rte_eth_dev *dev) +{ + mrvl_dev_set_link_down(dev); +} + +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_close(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + size_t i; + + mrvl_flush_rx_queues(dev); + mrvl_flush_tx_shadow_queues(dev); + mrvl_flow_deinit(dev); + mrvl_mtr_deinit(dev); + + for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[i]; + + if (tc_params->inqs_params) { + rte_free(tc_params->inqs_params); + tc_params->inqs_params = NULL; + } + } + + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + + if (priv->qos_tbl) { + pp2_cls_qos_tbl_deinit(priv->qos_tbl); + priv->qos_tbl = NULL; + } + + mrvl_flush_bpool(dev); + mrvl_tm_deinit(dev); + + if (priv->ppio) { + pp2_ppio_deinit(priv->ppio); + priv->ppio = NULL; + } + + /* policer must be released after ppio deinitialization */ + if (priv->default_policer) { + pp2_cls_plcr_deinit(priv->default_policer); + priv->default_policer = NULL; + } + + + if (priv->bpool) { + pp2_bpool_deinit(priv->bpool); + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); + priv->bpool = NULL; + } + + mrvl_dev_num--; + + if (mrvl_dev_num == 0) { + MRVL_LOG(INFO, "Perform MUSDK deinit"); + mrvl_deinit_hifs(); + mrvl_deinit_pp2(); + rte_mvep_deinit(MVEP_MOD_T_PP2); + } +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + /* + * TODO + * once MUSDK provides necessary API use it here + */ + struct mrvl_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata; + struct ifreq req; + int ret, fd, link_up; + + if (!priv->ppio) + return -EPERM; + + edata.cmd = ETHTOOL_GSET; + + strcpy(req.ifr_name, dev->data->name); + req.ifr_data = (void *)&edata; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd == -1) + return -EFAULT; + + ret = ioctl(fd, SIOCETHTOOL, &req); + if (ret == -1) { + close(fd); + return -EFAULT; + } + + close(fd); + + switch (ethtool_cmd_speed(&edata)) { + case SPEED_10: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; + break; + case SPEED_100: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; + break; + case SPEED_1000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; + break; + case SPEED_10000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; + break; + default: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + } + + dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : + ETH_LINK_FIXED; + pp2_ppio_get_link_state(priv->ppio, &link_up); + dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + return 0; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + if (priv->isolated) + return 0; + + ret = pp2_ppio_set_promisc(priv->ppio, 1); + if (ret) { + MRVL_LOG(ERR, "Failed to enable promiscuous mode"); + return -EAGAIN; + } + + return 0; +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + if (priv->isolated) + return 0; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); + if (ret) { + MRVL_LOG(ERR, "Failed enable all-multicast mode"); + return -EAGAIN; + } + + return 0; +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + ret = pp2_ppio_set_promisc(priv->ppio, 0); + if (ret) { + MRVL_LOG(ERR, "Failed to disable promiscuous mode"); + return -EAGAIN; + } + + return 0; +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); + if (ret) { + MRVL_LOG(ERR, "Failed to disable all-multicast mode"); + return -EAGAIN; + } + + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_remove_mac_addr(priv->ppio, + dev->data->mac_addrs[index].addr_bytes); + if (ret) { + rte_ether_format_addr(buf, sizeof(buf), + &dev->data->mac_addrs[index]); + MRVL_LOG(ERR, "Failed to remove mac %s", buf); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (unused). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + int ret; + + if (priv->isolated) + return -ENOTSUP; + + if (index == 0) + /* For setting index 0, mrvl_mac_addr_set() should be used.*/ + return -1; + + if (!priv->ppio) + return 0; + + /* + * Maximum number of uc addresses can be tuned via kernel module mvpp2x + * parameter uc_filter_max. Maximum number of mc addresses is then + * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and + * 21 respectively. + * + * If more than uc_filter_max uc addresses were added to filter list + * then NIC will switch to promiscuous mode automatically. + * + * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses + * were added to filter list then NIC will switch to all-multicast mode + * automatically. + */ + ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + rte_ether_format_addr(buf, sizeof(buf), mac_addr); + MRVL_LOG(ERR, "Failed to add mac %s", buf); + return -1; + } + + return 0; +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + if (priv->isolated) + return -ENOTSUP; + + ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, sizeof(buf), mac_addr); + MRVL_LOG(ERR, "Failed to set mac to %s", buf); + } + + return ret; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Stats structure output buffer. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + uint64_t drop_mac = 0; + unsigned int i, idx, ret; + + if (!priv->ppio) + return -EPERM; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + struct pp2_ppio_inq_statistics rx_stats; + + if (!rxq) + continue; + + idx = rxq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + MRVL_LOG(ERR, + "rx queue %d stats out of range (0 - %d)", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + continue; + } + + ret = pp2_ppio_inq_get_statistics(priv->ppio, + priv->rxq_map[idx].tc, + priv->rxq_map[idx].inq, + &rx_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, + "Failed to update rx queue %d stats", idx); + break; + } + + stats->q_ibytes[idx] = rxq->bytes_recv; + stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; + stats->q_errors[idx] = rx_stats.drop_early + + rx_stats.drop_fullq + + rx_stats.drop_bm + + rxq->drop_mac; + stats->ibytes += rxq->bytes_recv; + drop_mac += rxq->drop_mac; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + struct pp2_ppio_outq_statistics tx_stats; + + if (!txq) + continue; + + idx = txq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + MRVL_LOG(ERR, + "tx queue %d stats out of range (0 - %d)", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + } + + ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, + &tx_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, + "Failed to update tx queue %d stats", idx); + break; + } + + stats->q_opackets[idx] = tx_stats.deq_desc; + stats->q_obytes[idx] = txq->bytes_sent; + stats->obytes += txq->bytes_sent; + } + + ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + if (unlikely(ret)) { + MRVL_LOG(ERR, "Failed to update port statistics"); + return ret; + } + + stats->ipackets += ppio_stats.rx_packets - drop_mac; + stats->opackets += ppio_stats.tx_packets; + stats->imissed += ppio_stats.rx_fullq_dropped + + ppio_stats.rx_bm_dropped + + ppio_stats.rx_early_dropped + + ppio_stats.rx_fifo_dropped + + ppio_stats.rx_cls_dropped; + stats->ierrors = drop_mac; + + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_stats_reset(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int i; + + if (!priv->ppio) + return 0; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + + pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, + priv->rxq_map[i].inq, NULL, 1); + rxq->bytes_recv = 0; + rxq->drop_mac = 0; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); + txq->bytes_sent = 0; + } + + return pp2_ppio_get_statistics(priv->ppio, NULL, 1); +} + +/** + * DPDK callback to get extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Pointer to xstats table. + * @param n + * Number of entries in xstats table. + * @return + * Negative value on error, number of read xstats otherwise. + */ +static int +mrvl_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + unsigned int i; + + if (!stats) + return 0; + + pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { + uint64_t val; + + if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) + val = *(uint32_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) + val = *(uint64_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else + return -EINVAL; + + stats[i].id = i; + stats[i].value = val; + } + + return n; +} + +/** + * DPDK callback to reset extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_xstats_reset(struct rte_eth_dev *dev) +{ + return mrvl_stats_reset(dev); +} + +/** + * DPDK callback to get extended statistics names. + * + * @param dev (unused) + * Pointer to Ethernet device structure. + * @param xstats_names + * Pointer to xstats names table. + * @param size + * Size of the xstats names table. + * @return + * Number of read names. + */ +static int +mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + unsigned int i; + + if (!xstats_names) + return RTE_DIM(mrvl_xstats_tbl); + + for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) + strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name, + RTE_ETH_XSTATS_NAME_SIZE); + + return size; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * @param info + * Info structure output buffer. + */ +static int +mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *info) +{ + info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + + info->max_rx_queues = MRVL_PP2_RXQ_MAX; + info->max_tx_queues = MRVL_PP2_TXQ_MAX; + info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; + + info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; + info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; + info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; + + info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; + info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; + info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; + + info->rx_offload_capa = MRVL_RX_OFFLOADS; + info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; + + info->tx_offload_capa = MRVL_TX_OFFLOADS; + info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; + + info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP; + + /* By default packets are dropped if no descriptors are available */ + info->default_rxconf.rx_drop_en = 1; + + info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; + + return 0; +} + +/** + * Return supported packet types. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * + * @return + * Const pointer to the table with supported packet types. + */ +static const uint32_t * +mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP + }; + + return ptypes; +} + +/** + * DPDK callback to get information about specific receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Receive queue index. + * @param qinfo + * Receive queue information structure. + */ +static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; + struct mrvl_priv *priv = dev->data->dev_private; + int inq = priv->rxq_map[rx_queue_id].inq; + int tc = priv->rxq_map[rx_queue_id].tc; + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[tc]; + + qinfo->mp = q->mp; + qinfo->nb_desc = tc_params->inqs_params[inq].size; +} + +/** + * DPDK callback to get information about specific transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param tx_queue_id + * Transmit queue index. + * @param qinfo + * Transmit queue information structure. + */ +static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; + + qinfo->nb_desc = + priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/** + * DPDK callback to Configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + if (priv->isolated) + return -ENOTSUP; + + return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : + pp2_ppio_remove_vlan(priv->ppio, vlan_id); +} + +/** + * Release buffers to hardware bpool (buffer-pool) + * + * @param rxq + * Receive queue pointer. + * @param num + * Number of buffers to release to bpool. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) +{ + struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; + struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; + int i, ret; + unsigned int core_id; + struct pp2_hif *hif; + struct pp2_bpool *bpool; + + core_id = rte_lcore_id(); + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(rxq->priv, core_id); + if (!hif) + return -1; + + bpool = rxq->priv->bpool; + + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); + if (ret) + return ret; + + if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) + cookie_addr_high = + (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; + + for (i = 0; i < num; i++) { + if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) + != cookie_addr_high) { + MRVL_LOG(ERR, + "mbuf virtual addr high 0x%lx out of range", + (uint64_t)mbufs[i] >> 32); + goto out; + } + + entries[i].buff.addr = + rte_mbuf_data_iova_default(mbufs[i]); + entries[i].buff.cookie = (uint64_t)mbufs[i]; + entries[i].bpool = bpool; + } + + pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; + + if (i != num) + goto out; + + return 0; +out: + for (; i < num; i++) + rte_pktmbuf_free(mbufs[i]); + + return -1; +} + +/** + * DPDK callback to configure the receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_rxq *rxq; + uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp); + uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + int ret, tc, inq; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + + if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have a correct queue. + */ + MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu", + idx, priv->ppio_id); + return -EFAULT; + } + + frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS; + if (frame_size < max_rx_pkt_len) { + MRVL_LOG(WARNING, + "Mbuf size must be increased to %u bytes to hold up " + "to %u bytes of data.", + buf_size + max_rx_pkt_len - frame_size, + max_rx_pkt_len); + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + MRVL_LOG(INFO, "Setting max rx pkt len to %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len); + } + + if (dev->data->rx_queues[idx]) { + rte_free(dev->data->rx_queues[idx]); + dev->data->rx_queues[idx] = NULL; + } + + rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); + if (!rxq) + return -ENOMEM; + + rxq->priv = priv; + rxq->mp = mp; + rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; + rxq->queue_id = idx; + rxq->port_id = dev->data->port_id; + mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; + + tc = priv->rxq_map[rxq->queue_id].tc, + inq = priv->rxq_map[rxq->queue_id].inq; + priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = + desc; + + ret = mrvl_fill_bpool(rxq, desc); + if (ret) { + rte_free(rxq); + return ret; + } + + priv->bpool_init_size += desc; + + dev->data->rx_queues[idx] = rxq; + + return 0; +} + +/** + * DPDK callback to release the receive queue. + * + * @param rxq + * Generic receive queue pointer. + */ +static void +mrvl_rx_queue_release(void *rxq) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_tc_params *tc_params; + int i, num, tc, inq; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + if (!q) + return; + + hif = mrvl_get_hif(q->priv, core_id); + + if (!hif) + return; + + tc = q->priv->rxq_map[q->queue_id].tc; + inq = q->priv->rxq_map[q->queue_id].inq; + tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; + num = tc_params->inqs_params[inq].size; + for (i = 0; i < num; i++) { + struct pp2_buff_inf inf; + uint64_t addr; + + pp2_bpool_get_buff(hif, q->priv->bpool, &inf); + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } + + rte_free(q); +} + +/** + * DPDK callback to configure the transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Transmit queue index. + * @param desc + * Number of descriptors to configure in the queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Tx queue configuration parameters. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_txconf *conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq; + + if (dev->data->tx_queues[idx]) { + rte_free(dev->data->tx_queues[idx]); + dev->data->tx_queues[idx] = NULL; + } + + txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); + if (!txq) + return -ENOMEM; + + txq->priv = priv; + txq->queue_id = idx; + txq->port_id = dev->data->port_id; + txq->tx_deferred_start = conf->tx_deferred_start; + dev->data->tx_queues[idx] = txq; + + priv->ppio_params.outqs_params.outqs_params[idx].size = desc; + + return 0; +} + +/** + * DPDK callback to release the transmit queue. + * + * @param txq + * Generic transmit queue pointer. + */ +static void +mrvl_tx_queue_release(void *txq) +{ + struct mrvl_txq *q = txq; + + if (!q) + return; + + rte_free(q); +} + +/** + * DPDK callback to get flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret, en; + + if (!priv) + return -EPERM; + + ret = pp2_ppio_get_rx_pause(priv->ppio, &en); + if (ret) { + MRVL_LOG(ERR, "Failed to read rx pause state"); + return ret; + } + + fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; + + return 0; +} + +/** + * DPDK callback to set flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv) + return -EPERM; + + if (fc_conf->high_water || + fc_conf->low_water || + fc_conf->pause_time || + fc_conf->mac_ctrl_frame_fwd || + fc_conf->autoneg) { + MRVL_LOG(ERR, "Flowctrl parameter is not supported"); + + return -EINVAL; + } + + if (fc_conf->mode == RTE_FC_NONE || + fc_conf->mode == RTE_FC_RX_PAUSE) { + int ret, en; + + en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; + ret = pp2_ppio_set_rx_pause(priv->ppio, en); + if (ret) + MRVL_LOG(ERR, + "Failed to change flowctrl on RX side"); + + return ret; + } + + return 0; +} + +/** + * Update RSS hash configuration + * + * @param dev + * Pointer to Ethernet device structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (priv->isolated) + return -ENOTSUP; + + return mrvl_configure_rss(priv, rss_conf); +} + +/** + * DPDK callback to get RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @rss_conf + * Pointer to RSS configuration. + * + * @return + * Always 0. + */ +static int +mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + enum pp2_ppio_hash_type hash_type = + priv->ppio_params.inqs_params.hash_type; + + rss_conf->rss_key = NULL; + + if (hash_type == PP2_PPIO_HASH_T_NONE) + rss_conf->rss_hf = 0; + else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) + rss_conf->rss_hf = ETH_RSS_IPV4; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; + + return 0; +} + +/** + * DPDK callback to get rte_flow callbacks. + * + * @param dev + * Pointer to the device structure. + * @param filer_type + * Flow filter type. + * @param filter_op + * Flow filter operation. + * @param arg + * Pointer to pass the flow ops. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &mrvl_flow_ops; + return 0; + default: + MRVL_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + return -EINVAL; + } +} + +/** + * DPDK callback to get rte_mtr callbacks. + * + * @param dev + * Pointer to the device structure. + * @param ops + * Pointer to pass the mtr ops. + * + * @return + * Always 0. + */ +static int +mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) +{ + *(const void **)ops = &mrvl_mtr_ops; + + return 0; +} + +/** + * DPDK callback to get rte_tm callbacks. + * + * @param dev + * Pointer to the device structure. + * @param ops + * Pointer to pass the tm ops. + * + * @return + * Always 0. + */ +static int +mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) +{ + *(const void **)ops = &mrvl_tm_ops; + + return 0; +} + +static const struct eth_dev_ops mrvl_ops = { + .dev_configure = mrvl_dev_configure, + .dev_start = mrvl_dev_start, + .dev_stop = mrvl_dev_stop, + .dev_set_link_up = mrvl_dev_set_link_up, + .dev_set_link_down = mrvl_dev_set_link_down, + .dev_close = mrvl_dev_close, + .link_update = mrvl_link_update, + .promiscuous_enable = mrvl_promiscuous_enable, + .allmulticast_enable = mrvl_allmulticast_enable, + .promiscuous_disable = mrvl_promiscuous_disable, + .allmulticast_disable = mrvl_allmulticast_disable, + .mac_addr_remove = mrvl_mac_addr_remove, + .mac_addr_add = mrvl_mac_addr_add, + .mac_addr_set = mrvl_mac_addr_set, + .mtu_set = mrvl_mtu_set, + .stats_get = mrvl_stats_get, + .stats_reset = mrvl_stats_reset, + .xstats_get = mrvl_xstats_get, + .xstats_reset = mrvl_xstats_reset, + .xstats_get_names = mrvl_xstats_get_names, + .dev_infos_get = mrvl_dev_infos_get, + .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, + .rxq_info_get = mrvl_rxq_info_get, + .txq_info_get = mrvl_txq_info_get, + .vlan_filter_set = mrvl_vlan_filter_set, + .tx_queue_start = mrvl_tx_queue_start, + .tx_queue_stop = mrvl_tx_queue_stop, + .rx_queue_setup = mrvl_rx_queue_setup, + .rx_queue_release = mrvl_rx_queue_release, + .tx_queue_setup = mrvl_tx_queue_setup, + .tx_queue_release = mrvl_tx_queue_release, + .flow_ctrl_get = mrvl_flow_ctrl_get, + .flow_ctrl_set = mrvl_flow_ctrl_set, + .rss_hash_update = mrvl_rss_hash_update, + .rss_hash_conf_get = mrvl_rss_hash_conf_get, + .filter_ctrl = mrvl_eth_filter_ctrl, + .mtr_ops_get = mrvl_mtr_ops_get, + .tm_ops_get = mrvl_tm_ops_get, +}; + +/** + * Return packet type information and l3/l4 offsets. + * + * @param desc + * Pointer to the received packet descriptor. + * @param l3_offset + * l3 packet offset. + * @param l4_offset + * l4 packet offset. + * + * @return + * Packet type information. + */ +static inline uint64_t +mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + uint8_t *l3_offset, uint8_t *l4_offset) +{ + enum pp2_inq_l3_type l3_type; + enum pp2_inq_l4_type l4_type; + enum pp2_inq_vlan_tag vlan_tag; + uint64_t packet_type; + + pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); + pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); + pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag); + + packet_type = RTE_PTYPE_L2_ETHER; + + switch (vlan_tag) { + case PP2_INQ_VLAN_TAG_SINGLE: + packet_type |= RTE_PTYPE_L2_ETHER_VLAN; + break; + case PP2_INQ_VLAN_TAG_DOUBLE: + case PP2_INQ_VLAN_TAG_TRIPLE: + packet_type |= RTE_PTYPE_L2_ETHER_QINQ; + break; + default: + break; + } + + switch (l3_type) { + case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: + packet_type |= RTE_PTYPE_L3_IPV4; + break; + case PP2_INQ_L3_TYPE_IPV4_OK: + packet_type |= RTE_PTYPE_L3_IPV4_EXT; + break; + case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + break; + case PP2_INQ_L3_TYPE_IPV6_NO_EXT: + packet_type |= RTE_PTYPE_L3_IPV6; + break; + case PP2_INQ_L3_TYPE_IPV6_EXT: + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + break; + case PP2_INQ_L3_TYPE_ARP: + packet_type |= RTE_PTYPE_L2_ETHER_ARP; + /* + * In case of ARP l4_offset is set to wrong value. + * Set it to proper one so that later on mbuf->l3_len can be + * calculated subtracting l4_offset and l3_offset. + */ + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: + MRVL_LOG(DEBUG, "Failed to recognise l3 packet type"); + break; + } + + switch (l4_type) { + case PP2_INQ_L4_TYPE_TCP: + packet_type |= RTE_PTYPE_L4_TCP; + break; + case PP2_INQ_L4_TYPE_UDP: + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: + MRVL_LOG(DEBUG, "Failed to recognise l4 packet type"); + break; + } + + return packet_type; +} + +/** + * Get offload information from the received packet descriptor. + * + * @param desc + * Pointer to the received packet descriptor. + * + * @return + * Mbuf offload flags. + */ +static inline uint64_t +mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) +{ + uint64_t flags; + enum pp2_inq_desc_status status; + + status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags = PKT_RX_IP_CKSUM_BAD; + else + flags = PKT_RX_IP_CKSUM_GOOD; + + status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + return flags; +} + +/** + * DPDK callback for receive. + * + * @param rxq + * Generic pointer to the receive queue. + * @param rx_pkts + * Array to store received packets. + * @param nb_pkts + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received. + */ +static uint16_t +mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_desc descs[nb_pkts]; + struct pp2_bpool *bpool; + int i, ret, rx_done = 0; + int num; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + hif = mrvl_get_hif(q->priv, core_id); + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + bpool = q->priv->bpool; + + ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); + if (unlikely(ret < 0)) { + MRVL_LOG(ERR, "Failed to receive packets"); + return 0; + } + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf; + uint8_t l3_offset, l4_offset; + enum pp2_inq_desc_status status; + uint64_t addr; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct pp2_ppio_desc *pref_desc; + u64 pref_addr; + + pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; + pref_addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(pref_desc); + rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); + rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); + } + + addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(&descs[i]); + mbuf = (struct rte_mbuf *)addr; + rte_pktmbuf_reset(mbuf); + + /* drop packet in case of mac, overrun or resource error */ + status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); + if (unlikely(status != PP2_DESC_ERR_OK)) { + struct pp2_buff_inf binf = { + .addr = rte_mbuf_data_iova_default(mbuf), + .cookie = (uint64_t)mbuf, + }; + + pp2_bpool_put_buff(hif, bpool, &binf); + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id]++; + q->drop_mac++; + continue; + } + + mbuf->data_off += MRVL_PKT_EFFEC_OFFS; + mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = q->port_id; + mbuf->packet_type = + mrvl_desc_to_packet_type_and_offset(&descs[i], + &l3_offset, + &l4_offset); + mbuf->l2_len = l3_offset; + mbuf->l3_len = l4_offset - l3_offset; + + if (likely(q->cksum_enabled)) + mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); + + rx_pkts[rx_done++] = mbuf; + q->bytes_recv += mbuf->pkt_len; + } + + if (rte_spinlock_trylock(&q->priv->lock) == 1) { + num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); + + if (unlikely(num <= q->priv->bpool_min_size || + (!rx_done && num < q->priv->bpool_init_size))) { + ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); + if (ret) + MRVL_LOG(ERR, "Failed to fill bpool"); + } else if (unlikely(num > q->priv->bpool_max_size)) { + int i; + int pkt_to_remove = num - q->priv->bpool_init_size; + struct rte_mbuf *mbuf; + struct pp2_buff_inf buff; + + MRVL_LOG(DEBUG, + "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)", + bpool->pp2_id, q->priv->ppio->port_id, + bpool->id, pkt_to_remove, num, + q->priv->bpool_init_size); + + for (i = 0; i < pkt_to_remove; i++) { + ret = pp2_bpool_get_buff(hif, bpool, &buff); + if (ret) + break; + mbuf = (struct rte_mbuf *) + (cookie_addr_high | buff.cookie); + rte_pktmbuf_free(mbuf); + } + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id] -= i; + } + rte_spinlock_unlock(&q->priv->lock); + } + + return rx_done; +} + +/** + * Prepare offload information. + * + * @param ol_flags + * Offload flags. + * @param packet_type + * Packet type bitfield. + * @param l3_type + * Pointer to the pp2_ouq_l3_type structure. + * @param l4_type + * Pointer to the pp2_outq_l4_type structure. + * @param gen_l3_cksum + * Will be set to 1 in case l3 checksum is computed. + * @param l4_cksum + * Will be set to 1 in case l4 checksum is computed. + * + * @return + * 0 on success, negative error value otherwise. + */ +static inline int +mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, + enum pp2_outq_l3_type *l3_type, + enum pp2_outq_l4_type *l4_type, + int *gen_l3_cksum, + int *gen_l4_cksum) +{ + /* + * Based on ol_flags prepare information + * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor + * for offloading. + */ + if (ol_flags & PKT_TX_IPV4) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV4; + *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; + } else if (ol_flags & PKT_TX_IPV6) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV6; + /* no checksum for ipv6 header */ + *gen_l3_cksum = 0; + } else { + /* if something different then stop processing */ + return -1; + } + + ol_flags &= PKT_TX_L4_MASK; + if ((packet_type & RTE_PTYPE_L4_TCP) && + ol_flags == PKT_TX_TCP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_TCP; + *gen_l4_cksum = 1; + } else if ((packet_type & RTE_PTYPE_L4_UDP) && + ol_flags == PKT_TX_UDP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_UDP; + *gen_l4_cksum = 1; + } else { + *l4_type = PP2_OUTQ_L4_TYPE_OTHER; + /* no checksum for other type */ + *gen_l4_cksum = 0; + } + + return 0; +} + +/** + * Release already sent buffers to bpool (buffer-pool). + * + * @param ppio + * Pointer to the port structure. + * @param hif + * Pointer to the MUSDK hardware interface. + * @param sq + * Pointer to the shadow queue. + * @param qid + * Queue id number. + * @param force + * Force releasing packets. + */ +static inline void +mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, + unsigned int core_id, struct mrvl_shadow_txq *sq, + int qid, int force) +{ + struct buff_release_entry *entry; + uint16_t nb_done = 0, num = 0, skip_bufs = 0; + int i; + + pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); + + sq->num_to_release += nb_done; + + if (likely(!force && + sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) + return; + + nb_done = sq->num_to_release; + sq->num_to_release = 0; + + for (i = 0; i < nb_done; i++) { + entry = &sq->ent[sq->tail + num]; + if (unlikely(!entry->buff.addr)) { + MRVL_LOG(ERR, + "Shadow memory @%d: cookie(%lx), pa(%lx)!", + sq->tail, (u64)entry->buff.cookie, + (u64)entry->buff.addr); + skip_bufs = 1; + goto skip; + } + + if (unlikely(!entry->bpool)) { + struct rte_mbuf *mbuf; + + mbuf = (struct rte_mbuf *) + (cookie_addr_high | entry->buff.cookie); + rte_pktmbuf_free(mbuf); + skip_bufs = 1; + goto skip; + } + + mrvl_port_bpool_size + [entry->bpool->pp2_id][entry->bpool->id][core_id]++; + num++; + if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) + goto skip; + continue; +skip: + if (likely(num)) + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + num += skip_bufs; + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + num = 0; + skip_bufs = 0; + } + + if (likely(num)) { + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + } +} + +/** + * DPDK callback for transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mrvl_txq *q = txq; + struct mrvl_shadow_txq *sq; + struct pp2_hif *hif; + struct pp2_ppio_desc descs[nb_pkts]; + unsigned int core_id = rte_lcore_id(); + int i, ret, bytes_sent = 0; + uint16_t num, sq_free_size; + uint64_t addr; + + hif = mrvl_get_hif(q->priv, core_id); + sq = &q->shadow_txqs[core_id]; + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + if (sq->size) + mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, + sq, q->queue_id, 0); + + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; + if (unlikely(nb_pkts > sq_free_size)) { + MRVL_LOG(DEBUG, + "No room in shadow queue for %d packets! %d packets will be sent.", + nb_pkts, sq_free_size); + nb_pkts = sq_free_size; + } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + int gen_l3_cksum, gen_l4_cksum; + enum pp2_outq_l3_type l3_type; + enum pp2_outq_l4_type l4_type; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct rte_mbuf *pref_pkt_hdr; + + pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; + rte_mbuf_prefetch_part1(pref_pkt_hdr); + rte_mbuf_prefetch_part2(pref_pkt_hdr); + } + + mrvl_fill_shadowq(sq, mbuf); + mrvl_fill_desc(&descs[i], mbuf); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + /* + * in case unsupported ol_flags were passed + * do not update descriptor offload information + */ + ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, + &l3_type, &l4_type, &gen_l3_cksum, + &gen_l4_cksum); + if (unlikely(ret)) + continue; + + pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + + num = nb_pkts; + pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); + /* number of packets that were not sent */ + if (unlikely(num > nb_pkts)) { + for (i = nb_pkts; i < num; i++) { + sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; + bytes_sent -= + rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); + } + sq->size -= num - nb_pkts; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** DPDK callback for S/G transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct mrvl_txq *q = txq; + struct mrvl_shadow_txq *sq; + struct pp2_hif *hif; + struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS]; + struct pp2_ppio_sg_pkts pkts; + uint8_t frags[nb_pkts]; + unsigned int core_id = rte_lcore_id(); + int i, j, ret, bytes_sent = 0; + int tail, tail_first; + uint16_t num, sq_free_size; + uint16_t nb_segs, total_descs = 0; + uint64_t addr; + + hif = mrvl_get_hif(q->priv, core_id); + sq = &q->shadow_txqs[core_id]; + pkts.frags = frags; + pkts.num = 0; + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + if (sq->size) + mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, + sq, q->queue_id, 0); + + /* Save shadow queue free size */ + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; + + tail = 0; + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + struct rte_mbuf *seg = NULL; + int gen_l3_cksum, gen_l4_cksum; + enum pp2_outq_l3_type l3_type; + enum pp2_outq_l4_type l4_type; + + nb_segs = mbuf->nb_segs; + tail_first = tail; + total_descs += nb_segs; + + /* + * Check if total_descs does not exceed + * shadow queue free size + */ + if (unlikely(total_descs > sq_free_size)) { + total_descs -= nb_segs; + RTE_LOG(DEBUG, PMD, + "No room in shadow queue for %d packets! " + "%d packets will be sent.\n", + nb_pkts, i); + break; + } + + /* Check if nb_segs does not exceed the max nb of desc per + * fragmented packet + */ + if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) { + total_descs -= nb_segs; + RTE_LOG(ERR, PMD, + "Too many segments. Packet won't be sent.\n"); + break; + } + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct rte_mbuf *pref_pkt_hdr; + + pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; + rte_mbuf_prefetch_part1(pref_pkt_hdr); + rte_mbuf_prefetch_part2(pref_pkt_hdr); + } + + pkts.frags[pkts.num] = nb_segs; + pkts.num++; + + seg = mbuf; + for (j = 0; j < nb_segs - 1; j++) { + /* For the subsequent segments, set shadow queue + * buffer to NULL + */ + mrvl_fill_shadowq(sq, NULL); + mrvl_fill_desc(&descs[tail], seg); + + tail++; + seg = seg->next; + } + /* Put first mbuf info in last shadow queue entry */ + mrvl_fill_shadowq(sq, mbuf); + /* Update descriptor with last segment */ + mrvl_fill_desc(&descs[tail++], seg); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + /* In case unsupported ol_flags were passed + * do not update descriptor offload information + */ + ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, + &l3_type, &l4_type, &gen_l3_cksum, + &gen_l4_cksum); + if (unlikely(ret)) + continue; + + pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type, + l4_type, mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + + num = total_descs; + pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs, + &total_descs, &pkts); + /* number of packets that were not sent */ + if (unlikely(num > total_descs)) { + for (i = total_descs; i < num; i++) { + sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + + addr = sq->ent[sq->head].buff.cookie; + if (addr) + bytes_sent -= + rte_pktmbuf_pkt_len((struct rte_mbuf *) + (cookie_addr_high | addr)); + } + sq->size -= num - total_descs; + nb_pkts = pkts.num; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** + * Initialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_init_pp2(void) +{ + struct pp2_init_params init_params; + + memset(&init_params, 0, sizeof(init_params)); + init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; + init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; + init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; + + return pp2_init(&init_params); +} + +/** + * Deinitialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static void +mrvl_deinit_pp2(void) +{ + pp2_deinit(); +} + +/** + * Create private device structure. + * + * @param dev_name + * Pointer to the port name passed in the initialization parameters. + * + * @return + * Pointer to the newly allocated private device structure. + */ +static struct mrvl_priv * +mrvl_priv_create(const char *dev_name) +{ + struct pp2_bpool_params bpool_params; + char match[MRVL_MATCH_LEN]; + struct mrvl_priv *priv; + int ret, bpool_bit; + + priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); + if (!priv) + return NULL; + + ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, + &priv->pp_id, &priv->ppio_id); + if (ret) + goto out_free_priv; + + bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], + PP2_BPOOL_NUM_POOLS); + if (bpool_bit < 0) + goto out_free_priv; + priv->bpool_bit = bpool_bit; + + snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, + priv->bpool_bit); + memset(&bpool_params, 0, sizeof(bpool_params)); + bpool_params.match = match; + bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; + ret = pp2_bpool_init(&bpool_params, &priv->bpool); + if (ret) + goto out_clear_bpool_bit; + + priv->ppio_params.type = PP2_PPIO_T_NIC; + rte_spinlock_init(&priv->lock); + + return priv; +out_clear_bpool_bit: + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); +out_free_priv: + rte_free(priv); + return NULL; +} + +/** + * Create device representing Ethernet port. + * + * @param name + * Pointer to the port's name. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) +{ + int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); + struct rte_eth_dev *eth_dev; + struct mrvl_priv *priv; + struct ifreq req; + + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return -ENOMEM; + + priv = mrvl_priv_create(name); + if (!priv) { + ret = -ENOMEM; + goto out_free; + } + eth_dev->data->dev_private = priv; + + eth_dev->data->mac_addrs = + rte_zmalloc("mac_addrs", + RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); + if (!eth_dev->data->mac_addrs) { + MRVL_LOG(ERR, "Failed to allocate space for eth addrs"); + ret = -ENOMEM; + goto out_free; + } + + memset(&req, 0, sizeof(req)); + strcpy(req.ifr_name, name); + ret = ioctl(fd, SIOCGIFHWADDR, &req); + if (ret) + goto out_free; + + memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN); + + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->device = &vdev->device; + eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; + mrvl_set_tx_function(eth_dev); + eth_dev->dev_ops = &mrvl_ops; + + /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +out_free: + rte_eth_dev_release_port(eth_dev); + + return ret; +} + +/** + * Callback used by rte_kvargs_process() during argument parsing. + * + * @param key + * Pointer to the parsed key (unused). + * @param value + * Pointer to the parsed value. + * @param extra_args + * Pointer to the extra arguments which contains address of the + * table of pointers to parsed interface names. + * + * @return + * Always 0. + */ +static int +mrvl_get_ifnames(const char *key __rte_unused, const char *value, + void *extra_args) +{ + struct mrvl_ifnames *ifnames = extra_args; + + ifnames->names[ifnames->idx++] = value; + + return 0; +} + +/** + * Deinitialize per-lcore MUSDK hardware interfaces (hifs). + */ +static void +mrvl_deinit_hifs(void) +{ + int i; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { + if (hifs[i]) + pp2_hif_deinit(hifs[i]); + } + used_hifs = MRVL_MUSDK_HIFS_RESERVED; + memset(hifs, 0, sizeof(hifs)); +} + +/** + * DPDK callback to register the virtual device. + * + * @param vdev + * Pointer to the virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) +{ + struct rte_kvargs *kvlist; + struct mrvl_ifnames ifnames; + int ret = -EINVAL; + uint32_t i, ifnum, cfgnum; + const char *params; + + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + kvlist = rte_kvargs_parse(params, valid_args); + if (!kvlist) + return -EINVAL; + + ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); + if (ifnum > RTE_DIM(ifnames.names)) + goto out_free_kvlist; + + ifnames.idx = 0; + rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, + mrvl_get_ifnames, &ifnames); + + + /* + * The below system initialization should be done only once, + * on the first provided configuration file + */ + if (!mrvl_qos_cfg) { + cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); + MRVL_LOG(INFO, "Parsing config file!"); + if (cfgnum > 1) { + MRVL_LOG(ERR, "Cannot handle more than one config file!"); + goto out_free_kvlist; + } else if (cfgnum == 1) { + rte_kvargs_process(kvlist, MRVL_CFG_ARG, + mrvl_get_qoscfg, &mrvl_qos_cfg); + } + } + + if (mrvl_dev_num) + goto init_devices; + + MRVL_LOG(INFO, "Perform MUSDK initializations"); + + ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist); + if (ret) + goto out_free_kvlist; + + ret = mrvl_init_pp2(); + if (ret) { + MRVL_LOG(ERR, "Failed to init PP!"); + rte_mvep_deinit(MVEP_MOD_T_PP2); + goto out_free_kvlist; + } + + memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); + memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); + + mrvl_lcore_first = RTE_MAX_LCORE; + mrvl_lcore_last = 0; + +init_devices: + for (i = 0; i < ifnum; i++) { + MRVL_LOG(INFO, "Creating %s", ifnames.names[i]); + ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); + if (ret) + goto out_cleanup; + mrvl_dev_num++; + } + + rte_kvargs_free(kvlist); + + return 0; +out_cleanup: + rte_pmd_mrvl_remove(vdev); + +out_free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +/** + * DPDK callback to remove virtual device. + * + * @param vdev + * Pointer to the removed virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) +{ + uint16_t port_id; + + RTE_ETH_FOREACH_DEV(port_id) { + if (rte_eth_devices[port_id].device != &vdev->device) + continue; + rte_eth_dev_close(port_id); + } + + return 0; +} + +static struct rte_vdev_driver pmd_mrvl_drv = { + .probe = rte_pmd_mrvl_probe, + .remove = rte_pmd_mrvl_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); +RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); + +RTE_INIT(mrvl_init_log) +{ + mrvl_logtype = rte_log_register("pmd.net.mvpp2"); + if (mrvl_logtype >= 0) + rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h new file mode 100644 index 000000000..db6632f5b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_ethdev.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_ETHDEV_H_ +#define _MRVL_ETHDEV_H_ + +#include +#include +#include +#include + +/* + * container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include +#include +#include +#include +#include +#include +#include "env/mv_common.h" /* for BIT() */ + +/** Maximum number of rx queues per port */ +#define MRVL_PP2_RXQ_MAX 32 + +/** Maximum number of tx queues per port */ +#define MRVL_PP2_TXQ_MAX 8 + +/** Minimum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MIN 16 + +/** Maximum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MAX 2048 + +/** Tx queue descriptors alignment */ +#define MRVL_PP2_TXD_ALIGN 16 + +/** Minimum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MIN 16 + +/** Maximum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MAX 2048 + +/** Rx queue descriptors alignment */ +#define MRVL_PP2_RXD_ALIGN 16 + +/** Maximum number of descriptors in tx aggregated queue */ +#define MRVL_PP2_AGGR_TXQD_MAX 2048 + +/** Maximum number of Traffic Classes. */ +#define MRVL_PP2_TC_MAX 8 + +/** Packet offset inside RX buffer. */ +#define MRVL_PKT_OFFS 64 + +/** Maximum number of descriptors in shadow queue. Must be power of 2 */ +#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX + +/** Shadow queue size mask (since shadow queue size is power of 2) */ +#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1) + +/** Minimum number of sent buffers to release from shadow queue to BM */ +#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64 + +#define MRVL_PP2_VLAN_TAG_LEN 4 +#define MRVL_PP2_ETH_HDRS_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + (2 * MRVL_PP2_VLAN_TAG_LEN)) +#define MRVL_PP2_HDRS_LEN (MV_MH_SIZE + MRVL_PP2_ETH_HDRS_LEN) +#define MRVL_PP2_MTU_TO_MRU(mtu) ((mtu) + MRVL_PP2_HDRS_LEN) +#define MRVL_PP2_MRU_TO_MTU(mru) ((mru) - MRVL_PP2_HDRS_LEN) + +/** Maximum length of a match string */ +#define MRVL_MATCH_LEN 16 + +/** Parsed fields in processed rte_flow_item. */ +enum mrvl_parsed_fields { + /* eth flags */ + F_DMAC = BIT(0), + F_SMAC = BIT(1), + F_TYPE = BIT(2), + /* vlan flags */ + F_VLAN_PRI = BIT(3), + F_VLAN_ID = BIT(4), + F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */ + /* ip4 flags */ + F_IP4_TOS = BIT(6), + F_IP4_SIP = BIT(7), + F_IP4_DIP = BIT(8), + F_IP4_PROTO = BIT(9), + /* ip6 flags */ + F_IP6_TC = BIT(10), /* not supported by MUSDK yet */ + F_IP6_SIP = BIT(11), + F_IP6_DIP = BIT(12), + F_IP6_FLOW = BIT(13), + F_IP6_NEXT_HDR = BIT(14), + /* tcp flags */ + F_TCP_SPORT = BIT(15), + F_TCP_DPORT = BIT(16), + /* udp flags */ + F_UDP_SPORT = BIT(17), + F_UDP_DPORT = BIT(18), +}; + +/** PMD-specific definition of a flow rule handle. */ +struct mrvl_mtr; +struct rte_flow { + LIST_ENTRY(rte_flow) next; + struct mrvl_mtr *mtr; + + enum mrvl_parsed_fields pattern; + + struct pp2_cls_tbl_rule rule; + struct pp2_cls_cos_desc cos; + struct pp2_cls_tbl_action action; +}; + +struct mrvl_mtr_profile { + LIST_ENTRY(mrvl_mtr_profile) next; + uint32_t profile_id; + int refcnt; + struct rte_mtr_meter_profile profile; +}; + +struct mrvl_mtr { + LIST_ENTRY(mrvl_mtr) next; + uint32_t mtr_id; + int refcnt; + int shared; + int enabled; + int plcr_bit; + struct mrvl_mtr_profile *profile; + struct pp2_cls_plcr *plcr; +}; + +struct mrvl_tm_shaper_profile { + LIST_ENTRY(mrvl_tm_shaper_profile) next; + uint32_t id; + int refcnt; + struct rte_tm_shaper_params params; +}; + +enum { + MRVL_NODE_PORT, + MRVL_NODE_QUEUE, +}; + +struct mrvl_tm_node { + LIST_ENTRY(mrvl_tm_node) next; + uint32_t id; + uint32_t type; + int refcnt; + struct mrvl_tm_node *parent; + struct mrvl_tm_shaper_profile *profile; + uint8_t weight; + uint64_t stats_mask; +}; + +struct mrvl_priv { + /* Hot fields, used in fast path. */ + struct pp2_bpool *bpool; /**< BPool pointer */ + struct pp2_ppio *ppio; /**< Port handler pointer */ + rte_spinlock_t lock; /**< Spinlock for checking bpool status */ + uint16_t bpool_max_size; /**< BPool maximum size */ + uint16_t bpool_min_size; /**< BPool minimum size */ + uint16_t bpool_init_size; /**< Configured BPool size */ + + /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */ + struct { + uint8_t tc; /**< Traffic Class */ + uint8_t inq; /**< Relative in-queue number */ + } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned; + + /* Configuration data, used sporadically. */ + uint8_t pp_id; + uint8_t ppio_id; + uint8_t bpool_bit; + uint8_t rss_hf_tcp; + uint8_t uc_mc_flushed; + uint8_t vlan_flushed; + uint8_t isolated; + uint8_t multiseg; + + struct pp2_ppio_params ppio_params; + struct pp2_cls_qos_tbl_params qos_tbl_params; + struct pp2_cls_tbl *qos_tbl; + uint16_t nb_rx_queues; + + struct pp2_cls_tbl_params cls_tbl_params; + struct pp2_cls_tbl *cls_tbl; + uint32_t cls_tbl_pattern; + LIST_HEAD(mrvl_flows, rte_flow) flows; + + struct pp2_cls_plcr *default_policer; + + LIST_HEAD(profiles, mrvl_mtr_profile) profiles; + LIST_HEAD(mtrs, mrvl_mtr) mtrs; + uint32_t used_plcrs; + + LIST_HEAD(shaper_profiles, mrvl_tm_shaper_profile) shaper_profiles; + LIST_HEAD(nodes, mrvl_tm_node) nodes; + uint64_t rate_max; +}; + +/** Flow operations forward declaration. */ +extern const struct rte_flow_ops mrvl_flow_ops; + +/** Meter operations forward declaration. */ +extern const struct rte_mtr_ops mrvl_mtr_ops; + +/** Traffic manager operations forward declaration. */ +extern const struct rte_tm_ops mrvl_tm_ops; + +/** Current log type. */ +extern int mrvl_logtype; + +#define MRVL_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, mrvl_logtype, "%s(): " fmt "\n", \ + __func__, ##args) + +#endif /* _MRVL_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c new file mode 100644 index 000000000..ea4325528 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.c @@ -0,0 +1,2824 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include + +#include "mrvl_flow.h" +#include "mrvl_qos.h" + +/** Number of rules in the classifier table. */ +#define MRVL_CLS_MAX_NUM_RULES 20 + +/** Size of the classifier key and mask strings. */ +#define MRVL_CLS_STR_SIZE_MAX 40 + +static const enum rte_flow_item_type pattern_eth[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_tcp[] = { + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_udp[] = { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +#define MRVL_VLAN_ID_MASK 0x0fff +#define MRVL_VLAN_PRI_MASK 0x7000 +#define MRVL_IPV4_DSCP_MASK 0xfc +#define MRVL_IPV4_ADDR_MASK 0xffffffff +#define MRVL_IPV6_FLOW_MASK 0x0fffff + +/** + * Given a flow item, return the next non-void one. + * + * @param items Pointer to the item in the table. + * @returns Next not-void item, NULL otherwise. + */ +static const struct rte_flow_item * +mrvl_next_item(const struct rte_flow_item *items) +{ + const struct rte_flow_item *item = items; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + return item; + } + + return NULL; +} + +/** + * Allocate memory for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field) +{ + unsigned int id = rte_socket_id(); + + field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->key) + goto out; + + field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->mask) + goto out_mask; + + return 0; +out_mask: + rte_free(field->key); +out: + field->key = NULL; + field->mask = NULL; + return -1; +} + +/** + * Free memory allocated for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + */ +static void +mrvl_free_key_mask(struct pp2_cls_rule_key_field *field) +{ + rte_free(field->key); + rte_free(field->mask); + field->key = NULL; + field->mask = NULL; +} + +/** + * Free memory allocated for all classifier rule key and mask fields. + * + * @param rule Pointer to the classifier table rule. + */ +static void +mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule) +{ + int i; + + for (i = 0; i < rule->num_fields; i++) + mrvl_free_key_mask(&rule->fields[i]); + rule->num_fields = 0; +} + +/* + * Initialize rte flow item parsing. + * + * @param item Pointer to the flow item. + * @param spec_ptr Pointer to the specific item pointer. + * @param mask_ptr Pointer to the specific item's mask pointer. + * @def_mask Pointer to the default mask. + * @size Size of the flow item. + * @error Pointer to the rte flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_init(const struct rte_flow_item *item, + const void **spec_ptr, + const void **mask_ptr, + const void *def_mask, + unsigned int size, + struct rte_flow_error *error) +{ + const uint8_t *spec; + const uint8_t *mask; + const uint8_t *last; + uint8_t zeros[size]; + + memset(zeros, 0, size); + + if (item == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "NULL item\n"); + return -rte_errno; + } + + if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Mask or last is set without spec\n"); + return -rte_errno; + } + + /* + * If "mask" is not set, default mask is used, + * but if default mask is NULL, "mask" should be set. + */ + if (item->mask == NULL) { + if (def_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Mask should be specified\n"); + return -rte_errno; + } + + mask = (const uint8_t *)def_mask; + } else { + mask = (const uint8_t *)item->mask; + } + + spec = (const uint8_t *)item->spec; + last = (const uint8_t *)item->last; + + if (spec == NULL) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Spec should be specified\n"); + return -rte_errno; + } + + /* + * If field values in "last" are either 0 or equal to the corresponding + * values in "spec" then they are ignored. + */ + if (last != NULL && + !memcmp(last, zeros, size) && + memcmp(last, spec, size) != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Ranging is not supported\n"); + return -rte_errno; + } + + *spec_ptr = spec; + *mask_ptr = mask; + + return 0; +} + +/** + * Parse the eth flow item. + * + * This will create classifier rule that matches either destination or source + * mac. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param parse_dst Parse either destination or source mac address. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_mac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + const uint8_t *k, *m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + if (parse_dst) { + k = spec->dst.addr_bytes; + m = mask->dst.addr_bytes; + + flow->pattern |= F_DMAC; + } else { + k = spec->src.addr_bytes; + m = mask->src.addr_bytes; + + flow->pattern |= F_SMAC; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 6; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + k[0], k[1], k[2], k[3], k[4], k[5]); + + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + m[0], m[1], m[2], m[3], m[4], m[5]); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the eth flow item destination mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_dmac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 1, flow); +} + +/** + * Helper for parsing the eth flow item source mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_smac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 0, flow); +} + +/** + * Parse the ether type field of the eth flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_type(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->type); + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_TYPE; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the vid field of the vlan rte flow item. + * + * This will create classifier rule that matches vid. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_ID; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the pri field of the vlan rte flow item. + * + * This will create classifier rule that matches pri. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_PRI; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the dscp field of the ipv4 rte flow item. + * + * This will create classifier rule that matches dscp field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP4_TOS; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv4 flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param parse_dst Parse either destination or source ip address. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + struct in_addr k; + uint32_t m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + k.s_addr = spec->hdr.dst_addr; + m = rte_be_to_cpu_32(mask->hdr.dst_addr); + + flow->pattern |= F_IP4_DIP; + } else { + k.s_addr = spec->hdr.src_addr; + m = rte_be_to_cpu_32(mask->hdr.src_addr); + + flow->pattern |= F_IP4_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 4; + + inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 0, flow); +} + +/** + * Parse the proto field of the ipv4 rte flow item. + * + * This will create classifier rule that matches proto field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.next_proto_id; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP4_PROTO; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv6 rte flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param parse_dst Parse either destination or source ipv6 address. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + int size = sizeof(spec->hdr.dst_addr); + struct in6_addr k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + memcpy(k.s6_addr, spec->hdr.dst_addr, size); + memcpy(m.s6_addr, mask->hdr.dst_addr, size); + + flow->pattern |= F_IP6_DIP; + } else { + memcpy(k.s6_addr, spec->hdr.src_addr, size); + memcpy(m.s6_addr, mask->hdr.src_addr, size); + + flow->pattern |= F_IP6_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 16; + + inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 0, flow); +} + +/** + * Parse the flow label of the ipv6 flow item. + * + * This will create classifier rule that matches flow field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK, + m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 3; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP6_FLOW; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the next header of the ipv6 flow item. + * + * This will create classifier rule that matches next header field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.proto; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP6_NEXT_HDR; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse destination or source port of the tcp flow item. + * + * This will create classifier rule that matches either destination or + * source tcp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param parse_dst Parse either destination or source port. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_TCP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_TCP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the tcp source port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the tcp destination port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 1, flow); +} + +/** + * Parse destination or source port of the udp flow item. + * + * This will create classifier rule that matches either destination or + * source udp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param parse_dst Parse either destination or source port. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_udp_port(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_UDP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_UDP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the udp source port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the udp destination port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 1, flow); +} + +/** + * Parse eth flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *spec = NULL, *mask = NULL; + struct rte_ether_addr zero; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_eth_mask, + sizeof(struct rte_flow_item_eth), error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) { + ret = mrvl_parse_dmac(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(&mask->src, &zero, sizeof(mask->src))) { + ret = mrvl_parse_smac(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->type) { + MRVL_LOG(WARNING, "eth type mask is ignored"); + ret = mrvl_parse_type(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse vlan flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_vlan(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = NULL, *mask = NULL; + uint16_t m; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_vlan_mask, + sizeof(struct rte_flow_item_vlan), error); + if (ret) + return ret; + + m = rte_be_to_cpu_16(mask->tci); + if (m & MRVL_VLAN_ID_MASK) { + MRVL_LOG(WARNING, "vlan id mask is ignored"); + ret = mrvl_parse_vlan_id(spec, mask, flow); + if (ret) + goto out; + } + + if (m & MRVL_VLAN_PRI_MASK) { + MRVL_LOG(WARNING, "vlan pri mask is ignored"); + ret = mrvl_parse_vlan_pri(spec, mask, flow); + if (ret) + goto out; + } + + if (flow->pattern & F_TYPE) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported"); + return -rte_errno; + } + if (mask->inner_type) { + struct rte_flow_item_eth spec_eth = { + .type = spec->inner_type, + }; + struct rte_flow_item_eth mask_eth = { + .type = mask->inner_type, + }; + + MRVL_LOG(WARNING, "inner eth type mask is ignored"); + ret = mrvl_parse_type(&spec_eth, &mask_eth, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv4 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip4(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.version_ihl || + mask->hdr.total_length || + mask->hdr.packet_id || + mask->hdr.fragment_offset || + mask->hdr.time_to_live || + mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) { + ret = mrvl_parse_ip4_dscp(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.src_addr) { + ret = mrvl_parse_ip4_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_addr) { + ret = mrvl_parse_ip4_dip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.next_proto_id) { + MRVL_LOG(WARNING, "next proto id mask is ignored"); + ret = mrvl_parse_ip4_proto(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv6 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip6(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL; + struct rte_ipv6_hdr zero; + uint32_t flow_mask; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, + (const void **)&mask, + &rte_flow_item_ipv6_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (mask->hdr.payload_len || + mask->hdr.hop_limits) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (memcmp(mask->hdr.src_addr, + zero.src_addr, sizeof(mask->hdr.src_addr))) { + ret = mrvl_parse_ip6_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(mask->hdr.dst_addr, + zero.dst_addr, sizeof(mask->hdr.dst_addr))) { + ret = mrvl_parse_ip6_dip(spec, mask, flow); + if (ret) + goto out; + } + + flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + if (flow_mask) { + ret = mrvl_parse_ip6_flow(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.proto) { + MRVL_LOG(WARNING, "next header mask is ignored"); + ret = mrvl_parse_ip6_next_hdr(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse tcp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_tcp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.sent_seq || + mask->hdr.recv_ack || + mask->hdr.data_off || + mask->hdr.tcp_flags || + mask->hdr.rx_win || + mask->hdr.cksum || + mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + MRVL_LOG(WARNING, "tcp sport mask is ignored"); + ret = mrvl_parse_tcp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + MRVL_LOG(WARNING, "tcp dport mask is ignored"); + ret = mrvl_parse_tcp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse udp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_udp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.dgram_len || + mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + MRVL_LOG(WARNING, "udp sport mask is ignored"); + ret = mrvl_parse_udp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + MRVL_LOG(WARNING, "udp dport mask is ignored"); + ret = mrvl_parse_udp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse flow pattern composed of the the eth item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_eth(pattern, flow, error); +} + +/** + * Parse flow pattern composed of the eth and vlan items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ip4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ip4/ip6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ip4/ip6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ipv4/ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the tcp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the udp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Structure used to map specific flow pattern to the pattern parse callback + * which will iterate over each pattern item and extract relevant data. + */ +static const struct { + const enum rte_flow_item_type *pattern; + int (*parse)(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error); +} mrvl_patterns[] = { + { pattern_eth, mrvl_parse_pattern_eth }, + { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan }, + { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 }, + { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 }, + { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 }, + { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp }, + { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp }, + { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 }, + { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp }, + { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp }, + { pattern_vlan, mrvl_parse_pattern_vlan }, + { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 }, + { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp }, + { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp }, + { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 }, + { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp }, + { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp }, + { pattern_ip, mrvl_parse_pattern_ip4 }, + { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp }, + { pattern_ip_udp, mrvl_parse_pattern_ip4_udp }, + { pattern_ip6, mrvl_parse_pattern_ip6 }, + { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp }, + { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp }, + { pattern_tcp, mrvl_parse_pattern_tcp }, + { pattern_udp, mrvl_parse_pattern_udp } +}; + +/** + * Check whether provided pattern matches any of the supported ones. + * + * @param type_pattern Pointer to the pattern type. + * @param item_pattern Pointer to the flow pattern. + * @returns 1 in case of success, 0 value otherwise. + */ +static int +mrvl_patterns_match(const enum rte_flow_item_type *type_pattern, + const struct rte_flow_item *item_pattern) +{ + const enum rte_flow_item_type *type = type_pattern; + const struct rte_flow_item *item = item_pattern; + + for (;;) { + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) { + item++; + continue; + } + + if (*type == RTE_FLOW_ITEM_TYPE_END || + item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (*type != item->type) + break; + + item++; + type++; + } + + return *type == item->type; +} + +/** + * Parse flow attribute. + * + * This will check whether the provided attribute's flags are supported. + * + * @param priv Unused + * @param attr Pointer to the flow attribute. + * @param flow Unused + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow *flow __rte_unused, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute"); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, + "Groups are not supported"); + return -rte_errno; + } + if (attr->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, + "Priorities are not supported"); + return -rte_errno; + } + if (!attr->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, + "Only ingress is supported"); + return -rte_errno; + } + if (attr->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "Egress is not supported"); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, + "Transfer is not supported"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow pattern. + * + * Specific classifier rule will be created as well. + * + * @param priv Unused + * @param pattern Pointer to the flow pattern. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + unsigned int i; + int ret; + + for (i = 0; i < RTE_DIM(mrvl_patterns); i++) { + if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern)) + continue; + + ret = mrvl_patterns[i].parse(pattern, flow, error); + if (ret) + mrvl_free_all_key_mask(&flow->rule); + + return ret; + } + + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Unsupported pattern"); + + return -rte_errno; +} + +/** + * Parse flow actions. + * + * @param priv Pointer to the port's private data. + * @param actions Pointer the action table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_actions(struct mrvl_priv *priv, + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action = actions; + int specified = 0; + + for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + + if (action->type == RTE_FLOW_ACTION_TYPE_DROP) { + flow->cos.ppio = priv->ppio; + flow->cos.tc = 0; + flow->action.type = PP2_CLS_TBL_ACT_DROP; + flow->action.cos = &flow->cos; + specified++; + } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *q = + (const struct rte_flow_action_queue *) + action->conf; + + if (q->index > priv->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Queue index out of range"); + return -rte_errno; + } + + if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have + * a correct queue. + */ + MRVL_LOG(ERR, + "Unknown TC mapping for queue %hu eth%hhu", + q->index, priv->ppio_id); + + rte_flow_error_set(error, EFAULT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + return -rte_errno; + } + + MRVL_LOG(DEBUG, + "Action: Assign packets to queue %d, tc:%d, q:%d", + q->index, priv->rxq_map[q->index].tc, + priv->rxq_map[q->index].inq); + + flow->cos.ppio = priv->ppio; + flow->cos.tc = priv->rxq_map[q->index].tc; + flow->action.type = PP2_CLS_TBL_ACT_DONE; + flow->action.cos = &flow->cos; + specified++; + } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) { + const struct rte_flow_action_meter *meter; + struct mrvl_mtr *mtr; + + meter = action->conf; + if (!meter) + return -rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Invalid meter\n"); + + LIST_FOREACH(mtr, &priv->mtrs, next) + if (mtr->mtr_id == meter->mtr_id) + break; + + if (!mtr) + return -rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Meter id does not exist\n"); + + if (!mtr->shared && mtr->refcnt) + return -rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Meter cannot be shared\n"); + + /* + * In case cos has already been set + * do not modify it. + */ + if (!flow->cos.ppio) { + flow->cos.ppio = priv->ppio; + flow->cos.tc = 0; + } + + flow->action.type = PP2_CLS_TBL_ACT_DONE; + flow->action.cos = &flow->cos; + flow->action.plcr = mtr->enabled ? mtr->plcr : NULL; + flow->mtr = mtr; + mtr->refcnt++; + specified++; + } else { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Action not supported"); + return -rte_errno; + } + } + + if (!specified) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Action not specified"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow attribute, pattern and actions. + * + * @param priv Pointer to the port's private data. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + ret = mrvl_flow_parse_attr(priv, attr, flow, error); + if (ret) + return ret; + + ret = mrvl_flow_parse_pattern(priv, pattern, flow, error); + if (ret) + return ret; + + return mrvl_flow_parse_actions(priv, actions, flow, error); +} + +/** + * Get engine type for the given flow. + * + * @param field Pointer to the flow. + * @returns The type of the engine. + */ +static inline enum pp2_cls_tbl_type +mrvl_engine_type(const struct rte_flow *flow) +{ + int i, size = 0; + + for (i = 0; i < flow->rule.num_fields; i++) + size += flow->rule.fields[i].size; + + /* + * For maskable engine type the key size must be up to 8 bytes. + * For keys with size bigger than 8 bytes, engine type must + * be set to exact match. + */ + if (size > 8) + return PP2_CLS_TBL_EXACT_MATCH; + + return PP2_CLS_TBL_MASKABLE; +} + +/** + * Create classifier table. + * + * @param dev Pointer to the device. + * @param flow Pointer to the very first flow. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key; + int ret; + + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + + memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params)); + + priv->cls_tbl_params.type = mrvl_engine_type(first_flow); + MRVL_LOG(INFO, "Setting cls search engine type to %s", + priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ? + "exact" : "maskable"); + priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES; + priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE; + priv->cls_tbl_params.default_act.cos = &first_flow->cos; + + if (first_flow->pattern & F_DMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_SMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TYPE) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_ID) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_PRI) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = + MV_NET_VLAN_F_PRI; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_TOS) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = + MV_NET_IP4_F_DSCP; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_PROTO) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = + MV_NET_IP4_F_PROTO; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_FLOW) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_FLOW; + key->key_size += 3; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_NEXT_HDR) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_NEXT_HDR; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl); + if (!ret) + priv->cls_tbl_pattern = first_flow->pattern; + + return ret; +} + +/** + * Check whether new flow can be added to the table + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the new flow. + * @return 1 in case flow can be added, 0 otherwise. + */ +static inline int +mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow) +{ + return flow->pattern == priv->cls_tbl_pattern && + mrvl_engine_type(flow) == priv->cls_tbl_params.type; +} + +/** + * DPDK flow create callback called when flow is to be created. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns Pointer to the created flow in case of success, NULL otherwise. + */ +static struct rte_flow * +mrvl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *flow, *first; + int ret; + + if (!dev->data->dev_started) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Port must be started first\n"); + return NULL; + } + + flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id()); + if (!flow) + return NULL; + + ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error); + if (ret) + goto out; + + /* + * Four cases here: + * + * 1. In case table does not exist - create one. + * 2. In case table exists, is empty and new flow cannot be added + * recreate table. + * 3. In case table is not empty and new flow matches table format + * add it. + * 4. Otherwise flow cannot be added. + */ + first = LIST_FIRST(&priv->flows); + if (!priv->cls_tbl) { + ret = mrvl_create_cls_table(dev, flow); + } else if (!first && !mrvl_flow_can_be_added(priv, flow)) { + ret = mrvl_create_cls_table(dev, flow); + } else if (mrvl_flow_can_be_added(priv, flow)) { + ret = 0; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Pattern does not match cls table format\n"); + goto out; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to create cls table\n"); + goto out; + } + + ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to add rule\n"); + goto out; + } + + LIST_INSERT_HEAD(&priv->flows, flow, next); + + return flow; +out: + rte_free(flow); + return NULL; +} + +/** + * Remove classifier rule associated with given flow. + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + if (!priv->cls_tbl) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Classifier table not initialized"); + return -rte_errno; + } + + ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to remove rule"); + return -rte_errno; + } + + mrvl_free_all_key_mask(&flow->rule); + + if (flow->mtr) { + flow->mtr->refcnt--; + flow->mtr = NULL; + } + + return 0; +} + +/** + * DPDK flow destroy callback called when flow is to be removed. + * + * @param dev Pointer to the device. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *f; + int ret; + + LIST_FOREACH(f, &priv->flows, next) { + if (f == flow) + break; + } + + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Rule was not found"); + return -rte_errno; + } + + LIST_REMOVE(f, next); + + ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + rte_free(flow); + + return 0; +} + +/** + * DPDK flow callback called to verify given attribute, pattern and actions. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + static struct rte_flow *flow; + + flow = mrvl_flow_create(dev, attr, pattern, actions, error); + if (!flow) + return -rte_errno; + + mrvl_flow_destroy(dev, flow, error); + + return 0; +} + +/** + * DPDK flow flush callback called when flows are to be flushed. + * + * @param dev Pointer to the device. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + while (!LIST_EMPTY(&priv->flows)) { + struct rte_flow *flow = LIST_FIRST(&priv->flows); + int ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + LIST_REMOVE(flow, next); + rte_free(flow); + } + + return 0; +} + +/** + * DPDK flow isolate callback called to isolate port. + * + * @param dev Pointer to the device. + * @param enable Pass 0/1 to disable/enable port isolation. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Port must be stopped first\n"); + return -rte_errno; + } + + priv->isolated = enable; + + return 0; +} + +const struct rte_flow_ops mrvl_flow_ops = { + .validate = mrvl_flow_validate, + .create = mrvl_flow_create, + .destroy = mrvl_flow_destroy, + .flush = mrvl_flow_flush, + .isolate = mrvl_flow_isolate +}; + +/** + * Initialize flow resources. + * + * @param dev Pointer to the device. + */ +void +mrvl_flow_init(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + LIST_INIT(&priv->flows); +} + +/** + * Cleanup flow resources. + * + * @param dev Pointer to the device. + */ +void +mrvl_flow_deinit(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + mrvl_flow_flush(dev, NULL); + + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } +} diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.h new file mode 100644 index 000000000..f63747c11 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_flow.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_FLOW_H_ +#define _MRVL_FLOW_H_ + +#include "mrvl_ethdev.h" + +void mrvl_flow_init(struct rte_eth_dev *dev); +void mrvl_flow_deinit(struct rte_eth_dev *dev); + +#endif /* _MRVL_FLOW_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.c new file mode 100644 index 000000000..39272acea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.c @@ -0,0 +1,511 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include +#include + +#include "mrvl_mtr.h" + +/** Maximum meter rate */ +#define MRVL_SRTCM_RFC2697_CIR_MAX 1023000 + +/** Invalid plcr bit */ +#define MRVL_PLCR_BIT_INVALID -1 + +/** + * Return meter object capabilities. + * + * @param dev Pointer to the device (unused). + * @param cap Pointer to the meter object capabilities. + * @param error Pointer to the error (unused). + * @returns 0 always. + */ +static int +mrvl_capabilities_get(struct rte_eth_dev *dev __rte_unused, + struct rte_mtr_capabilities *cap, + struct rte_mtr_error *error __rte_unused) +{ + struct rte_mtr_capabilities capa = { + .n_max = PP2_CLS_PLCR_NUM, + .n_shared_max = PP2_CLS_PLCR_NUM, + .shared_n_flows_per_mtr_max = -1, + .meter_srtcm_rfc2697_n_max = PP2_CLS_PLCR_NUM, + .meter_rate_max = MRVL_SRTCM_RFC2697_CIR_MAX, + }; + + memcpy(cap, &capa, sizeof(capa)); + + return 0; +} + +/** + * Get profile using it's id. + * + * @param priv Pointer to the port's private data. + * @param meter_profile_id Profile id used by the meter. + * @returns Pointer to the profile if exists, NULL otherwise. + */ +static struct mrvl_mtr_profile * +mrvl_mtr_profile_from_id(struct mrvl_priv *priv, uint32_t meter_profile_id) +{ + struct mrvl_mtr_profile *profile = NULL; + + LIST_FOREACH(profile, &priv->profiles, next) + if (profile->profile_id == meter_profile_id) + break; + + return profile; +} + +/** + * Add profile to the list of profiles. + * + * @param dev Pointer to the device. + * @param meter_profile_id Id of the new profile. + * @param profile Pointer to the profile configuration. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_meter_profile_add(struct rte_eth_dev *dev, uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr_profile *prof; + + if (!profile) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + if (profile->alg != RTE_MTR_SRTCM_RFC2697) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Only srTCM RFC 2697 is supported\n"); + + prof = mrvl_mtr_profile_from_id(priv, meter_profile_id); + if (prof) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Profile id already exists\n"); + + prof = rte_zmalloc_socket(NULL, sizeof(*prof), 0, rte_socket_id()); + if (!prof) + return -rte_mtr_error_set(error, ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + prof->profile_id = meter_profile_id; + memcpy(&prof->profile, profile, sizeof(*profile)); + + LIST_INSERT_HEAD(&priv->profiles, prof, next); + + return 0; +} + +/** + * Remove profile from the list of profiles. + * + * @param dev Pointer to the device. + * @param meter_profile_id Id of the profile to remove. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_meter_profile_delete(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr_profile *profile; + + profile = mrvl_mtr_profile_from_id(priv, meter_profile_id); + if (!profile) + return -rte_mtr_error_set(error, ENODEV, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Profile id does not exist\n"); + + if (profile->refcnt) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Profile is used\n"); + + LIST_REMOVE(profile, next); + rte_free(profile); + + return 0; +} + +/** + * Get meter using it's id. + * + * @param priv Pointer to port's private data. + * @param mtr_id Id of the meter. + * @returns Pointer to the meter if exists, NULL otherwise. + */ +static struct mrvl_mtr * +mrvl_mtr_from_id(struct mrvl_priv *priv, uint32_t mtr_id) +{ + struct mrvl_mtr *mtr = NULL; + + LIST_FOREACH(mtr, &priv->mtrs, next) + if (mtr->mtr_id == mtr_id) + break; + + return mtr; +} + +/** + * Reserve a policer bit in a bitmap. + * + * @param plcrs Pointer to the policers bitmap. + * @returns Reserved bit number on success, negative value otherwise. + */ +static int +mrvl_reserve_plcr(uint32_t *plcrs) +{ + uint32_t i, num; + + num = PP2_CLS_PLCR_NUM; + if (num > sizeof(uint32_t) * 8) { + num = sizeof(uint32_t) * 8; + MRVL_LOG(WARNING, "Plcrs number was limited to 32."); + } + + for (i = 0; i < num; i++) { + uint32_t bit = BIT(i); + + if (!(*plcrs & bit)) { + *plcrs |= bit; + + return i; + } + } + + return -1; +} + +/** + * Enable meter object. + * + * @param dev Pointer to the device. + * @param mtr_id Id of the meter. + * @param error Pointer to the error. + * @returns 0 in success, negative value otherwise. + */ +static int +mrvl_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr *mtr = mrvl_mtr_from_id(priv, mtr_id); + struct pp2_cls_plcr_params params; + char match[MRVL_MATCH_LEN]; + struct rte_flow *flow; + int ret; + + if (!priv->ppio) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is uninitialized\n"); + + if (!mtr) + return -rte_mtr_error_set(error, ENODEV, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter id does not exist\n"); + + if (mtr->plcr) + goto skip; + + mtr->plcr_bit = mrvl_reserve_plcr(&priv->used_plcrs); + if (mtr->plcr_bit < 0) + return -rte_mtr_error_set(error, ENOSPC, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to reserve plcr entry\n"); + + memset(¶ms, 0, sizeof(params)); + snprintf(match, sizeof(match), "policer-%d:%d", priv->pp_id, + mtr->plcr_bit); + params.match = match; + params.token_unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT; + params.color_mode = PP2_CLS_PLCR_COLOR_BLIND_MODE; + params.cir = mtr->profile->profile.srtcm_rfc2697.cir; + params.cbs = mtr->profile->profile.srtcm_rfc2697.cbs; + params.ebs = mtr->profile->profile.srtcm_rfc2697.ebs; + + ret = pp2_cls_plcr_init(¶ms, &mtr->plcr); + if (ret) { + priv->used_plcrs &= ~BIT(mtr->plcr_bit); + mtr->plcr_bit = MRVL_PLCR_BIT_INVALID; + + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to setup policer\n"); + } + + mtr->enabled = 1; +skip: + /* iterate over flows that have this mtr attached */ + LIST_FOREACH(flow, &priv->flows, next) { + if (flow->mtr != mtr) + continue; + + flow->action.plcr = mtr->plcr; + + ret = pp2_cls_tbl_modify_rule(priv->cls_tbl, &flow->rule, + &flow->action); + if (ret) + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to update cls rule\n"); + } + + return 0; +} + +/** + * Disable meter object. + * + * @param dev Pointer to the device. + * @param mtr Id of the meter. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr *mtr = mrvl_mtr_from_id(priv, mtr_id); + struct rte_flow *flow; + int ret; + + if (!mtr) + return -rte_mtr_error_set(error, ENODEV, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter id does not exist\n"); + + LIST_FOREACH(flow, &priv->flows, next) { + if (flow->mtr != mtr) + continue; + + flow->action.plcr = NULL; + + ret = pp2_cls_tbl_modify_rule(priv->cls_tbl, &flow->rule, + &flow->action); + if (ret) + return -rte_mtr_error_set(error, -ret, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to disable meter\n"); + } + + mtr->enabled = 0; + + return 0; +} + +/** + * Create new meter. + * + * @param dev Pointer to the device. + * @param mtr_id Id of the meter. + * @param params Pointer to the meter parameters. + * @param shared Flags indicating whether meter is shared. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_create(struct rte_eth_dev *dev, uint32_t mtr_id, + struct rte_mtr_params *params, int shared, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr_profile *profile; + struct mrvl_mtr *mtr; + + mtr = mrvl_mtr_from_id(priv, mtr_id); + if (mtr) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter id already exists\n"); + + mtr = rte_zmalloc_socket(NULL, sizeof(*mtr), 0, rte_socket_id()); + if (!mtr) + return -rte_mtr_error_set(error, ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id); + if (!profile) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Profile id does not exist\n"); + + mtr->shared = shared; + mtr->mtr_id = mtr_id; + mtr->plcr_bit = MRVL_PLCR_BIT_INVALID; + mtr->profile = profile; + profile->refcnt++; + LIST_INSERT_HEAD(&priv->mtrs, mtr, next); + + if (params->meter_enable) + return mrvl_meter_enable(dev, mtr_id, error); + + return 0; +} + +/** + * Destroy meter object. + * + * @param dev Pointer to the device. + * @param mtr_id Id of the meter object. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_destroy(struct rte_eth_dev *dev, uint32_t mtr_id, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr *mtr; + + if (!priv->ppio) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is uninitialized\n"); + + mtr = mrvl_mtr_from_id(priv, mtr_id); + if (!mtr) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter id does not exist\n"); + + if (mtr->refcnt) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter is used\n"); + + LIST_REMOVE(mtr, next); + mtr->profile->refcnt--; + + if (mtr->plcr_bit != MRVL_PLCR_BIT_INVALID) + priv->used_plcrs &= ~BIT(mtr->plcr_bit); + + if (mtr->plcr) + pp2_cls_plcr_deinit(mtr->plcr); + + rte_free(mtr); + + return 0; +} + +/** + * Update profile used by the meter. + * + * @param dev Pointer to the device. + * @param mtr_id Id of the meter object. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_meter_profile_update(struct rte_eth_dev *dev, uint32_t mtr_id, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr_profile *profile; + struct mrvl_mtr *mtr; + int ret, enabled = 0; + + if (!priv->ppio) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is uninitialized\n"); + + mtr = mrvl_mtr_from_id(priv, mtr_id); + if (!mtr) + return -rte_mtr_error_set(error, EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, NULL, + "Meter id does not exist\n"); + + profile = mrvl_mtr_profile_from_id(priv, meter_profile_id); + if (!profile) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, "Profile id does not exist\n"); + + ret = mrvl_meter_disable(dev, mtr_id, error); + if (ret) + return -rte_mtr_error_set(error, EPERM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + NULL); + + if (mtr->plcr) { + enabled = 1; + pp2_cls_plcr_deinit(mtr->plcr); + mtr->plcr = NULL; + } + + mtr->profile->refcnt--; + mtr->profile = profile; + profile->refcnt++; + + if (enabled) + return mrvl_meter_enable(dev, mtr_id, error); + + return 0; +} + +const struct rte_mtr_ops mrvl_mtr_ops = { + .capabilities_get = mrvl_capabilities_get, + .meter_profile_add = mrvl_meter_profile_add, + .meter_profile_delete = mrvl_meter_profile_delete, + .create = mrvl_create, + .destroy = mrvl_destroy, + .meter_enable = mrvl_meter_enable, + .meter_disable = mrvl_meter_disable, + .meter_profile_update = mrvl_meter_profile_update, +}; + +/** + * Initialize metering resources. + * + * @param dev Pointer to the device. + */ +void +mrvl_mtr_init(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + LIST_INIT(&priv->profiles); + LIST_INIT(&priv->mtrs); +} + +/** + * Cleanup metering resources. + * + * @param dev Pointer to the device. + */ +void +mrvl_mtr_deinit(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_mtr_profile *profile, *tmp_profile; + struct mrvl_mtr *mtr, *tmp_mtr; + + for (mtr = LIST_FIRST(&priv->mtrs); + mtr && (tmp_mtr = LIST_NEXT(mtr, next), 1); + mtr = tmp_mtr) + mrvl_destroy(dev, mtr->mtr_id, NULL); + + for (profile = LIST_FIRST(&priv->profiles); + profile && (tmp_profile = LIST_NEXT(profile, next), 1); + profile = tmp_profile) + mrvl_meter_profile_delete(dev, profile->profile_id, NULL); +} diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.h new file mode 100644 index 000000000..302a20fbf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_mtr.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_MTR_H_ +#define _MRVL_MTR_H_ + +#include "mrvl_ethdev.h" + +void mrvl_mtr_init(struct rte_eth_dev *dev); +void mrvl_mtr_deinit(struct rte_eth_dev *dev); + +#endif /* _MRVL_MTR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c new file mode 100644 index 000000000..7fd970309 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.c @@ -0,0 +1,912 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mrvl_qos.h" + +/* Parsing tokens. Defined conveniently, so that any correction is easy. */ +#define MRVL_TOK_DEFAULT "default" +#define MRVL_TOK_DEFAULT_TC "default_tc" +#define MRVL_TOK_DSCP "dscp" +#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority" +#define MRVL_TOK_IP "ip" +#define MRVL_TOK_IP_VLAN "ip/vlan" +#define MRVL_TOK_PCP "pcp" +#define MRVL_TOK_PORT "port" +#define MRVL_TOK_RXQ "rxq" +#define MRVL_TOK_TC "tc" +#define MRVL_TOK_TXQ "txq" +#define MRVL_TOK_VLAN "vlan" +#define MRVL_TOK_VLAN_IP "vlan/ip" + +/* egress specific configuration tokens */ +#define MRVL_TOK_BURST_SIZE "burst_size" +#define MRVL_TOK_RATE_LIMIT "rate_limit" +#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable" +#define MRVL_TOK_SCHED_MODE "sched_mode" +#define MRVL_TOK_SCHED_MODE_SP "sp" +#define MRVL_TOK_SCHED_MODE_WRR "wrr" +#define MRVL_TOK_WRR_WEIGHT "wrr_weight" + +/* policer specific configuration tokens */ +#define MRVL_TOK_PLCR "policer" +#define MRVL_TOK_PLCR_DEFAULT "default_policer" +#define MRVL_TOK_PLCR_UNIT "token_unit" +#define MRVL_TOK_PLCR_UNIT_BYTES "bytes" +#define MRVL_TOK_PLCR_UNIT_PACKETS "packets" +#define MRVL_TOK_PLCR_COLOR "color_mode" +#define MRVL_TOK_PLCR_COLOR_BLIND "blind" +#define MRVL_TOK_PLCR_COLOR_AWARE "aware" +#define MRVL_TOK_PLCR_CIR "cir" +#define MRVL_TOK_PLCR_CBS "cbs" +#define MRVL_TOK_PLCR_EBS "ebs" +#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red" + +/** Number of tokens in range a-b = 2. */ +#define MAX_RNG_TOKENS 2 + +/** Maximum possible value of PCP. */ +#define MAX_PCP 7 + +/** Maximum possible value of DSCP. */ +#define MAX_DSCP 63 + +/** Global QoS configuration. */ +struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Convert string to uint32_t with extra checks for result correctness. + * + * @param string String to convert. + * @param val Conversion result. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_val_securely(const char *string, uint32_t *val) +{ + char *endptr; + size_t len = strlen(string); + + if (len == 0) + return -1; + + errno = 0; + *val = strtoul(string, &endptr, 0); + if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len) + return -2; + + return 0; +} + +/** + * Read out-queue configuration from file. + * + * @param file Path to the configuration file. + * @param port Port number. + * @param outq Out queue number. + * @param cfg Pointer to the Marvell QoS configuration structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_outq_cfg(struct rte_cfgfile *file, int port, int outq, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + uint32_t val; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + /* Read scheduling mode */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE); + if (entry) { + if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP, + strlen(MRVL_TOK_SCHED_MODE_SP))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_SP; + } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR, + strlen(MRVL_TOK_SCHED_MODE_WRR))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_WRR; + } else { + MRVL_LOG(ERR, "Unknown token: %s", entry); + return -1; + } + } + + /* Read wrr weight */ + if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_WRR_WEIGHT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].weight = val; + } + } + + /* + * There's no point in setting rate limiting for specific outq as + * global port rate limiting has priority. + */ + if (cfg->port[port].rate_limit_enable) { + MRVL_LOG(WARNING, "Port %d rate limiting already enabled", + port); + return 0; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_enable = val; + } + + if (!cfg->port[port].outq[outq].rate_limit_enable) + return 0; + + /* Read CBS (in kB) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cbs = val; + } + + /* Read CIR (in kbps) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cir = val; + } + + return 0; +} + +/** + * Gets multiple-entry values and places them in table. + * + * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to + * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}. + * As all result table's elements are always 1-byte long, we + * won't overcomplicate the function, but we'll keep API generic, + * check if someone hasn't changed element size and make it simple + * to extend to other sizes. + * + * This function is purely utilitary, it does not print any error, only returns + * different error numbers. + * + * @param entry[in] Values string to parse. + * @param tab[out] Results table. + * @param elem_sz[in] Element size (in bytes). + * @param max_elems[in] Number of results table elements available. + * @param max val[in] Maximum value allowed. + * @returns Number of correctly parsed elements in case of success. + * @retval -1 Wrong element size. + * @retval -2 More tokens than result table allows. + * @retval -3 Wrong range syntax. + * @retval -4 Wrong range values. + * @retval -5 Maximum value exceeded. + */ +static int +get_entry_values(const char *entry, uint8_t *tab, + size_t elem_sz, uint8_t max_elems, uint8_t max_val) +{ + /* There should not be more tokens than max elements. + * Add 1 for error trap. + */ + char *tokens[max_elems + 1]; + + /* Begin, End + error trap = 3. */ + char *rng_tokens[MAX_RNG_TOKENS + 1]; + long beg, end; + uint32_t token_val; + int nb_tokens, nb_rng_tokens; + int i; + int values = 0; + char val; + char entry_cpy[CFG_VALUE_LEN]; + + if (elem_sz != 1) + return -1; + + /* Copy the entry to safely use rte_strsplit(). */ + strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy)); + + /* + * If there are more tokens than array size, rte_strsplit will + * not return error, just array size. + */ + nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy), + tokens, max_elems + 1, ' '); + + /* Quick check, will be refined later. */ + if (nb_tokens > max_elems) + return -2; + + for (i = 0; i < nb_tokens; ++i) { + if (strchr(tokens[i], '-') != NULL) { + /* + * Split to begin and end tokens. + * We want to catch error cases too, thus we leave + * option for number of tokens to be more than 2. + */ + nb_rng_tokens = rte_strsplit(tokens[i], + strlen(tokens[i]), rng_tokens, + RTE_DIM(rng_tokens), '-'); + if (nb_rng_tokens != 2) + return -3; + + /* Range and sanity checks. */ + if (get_val_securely(rng_tokens[0], &token_val) < 0) + return -4; + beg = (char)token_val; + if (get_val_securely(rng_tokens[1], &token_val) < 0) + return -4; + end = (char)token_val; + if (beg < 0 || beg > UCHAR_MAX || + end < 0 || end > UCHAR_MAX || end < beg) + return -4; + + for (val = beg; val <= end; ++val) { + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } else { + /* Single values. */ + if (get_val_securely(tokens[i], &token_val) < 0) + return -5; + val = (char)token_val; + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } + + return values; +} + +/** + * Parse Traffic Class'es mapping configuration. + * + * @param file Config file handle. + * @param port Which port to look for. + * @param tc Which Traffic Class to look for. + * @param cfg[out] Parsing results. + * @returns 0 in case of success, negative value otherwise. + */ +static int +parse_tc_cfg(struct rte_cfgfile *file, int port, int tc, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + int n; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TC, tc); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + cfg->port[port].use_global_defaults = 0; + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].inq, + sizeof(cfg->port[port].tc[tc].inq[0]), + RTE_DIM(cfg->port[port].tc[tc].inq), + MRVL_PP2_RXQ_MAX); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].inqs = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].pcp, + sizeof(cfg->port[port].tc[tc].pcp[0]), + RTE_DIM(cfg->port[port].tc[tc].pcp), + MAX_PCP); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].pcps = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].dscp, + sizeof(cfg->port[port].tc[tc].dscp[0]), + RTE_DIM(cfg->port[port].tc[tc].dscp), + MAX_DSCP); + if (n < 0) { + MRVL_LOG(ERR, "Error %d while parsing: %s", + n, entry); + return n; + } + cfg->port[port].tc[tc].dscps = n; + } + + if (!cfg->port[port].setup_policer) + return 0; + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_DEFAULT_COLOR); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED; + } else { + MRVL_LOG(ERR, "Error while parsing: %s", entry); + return -1; + } + } + + return 0; +} + +/** + * Parse default port policer. + * + * @param file Config file handle. + * @param sec_name Section name with policer configuration + * @param port Port number. + * @param cfg[out] Parsing results. + * @returns 0 in case of success, negative value otherwise. + */ +static int +parse_policer(struct rte_cfgfile *file, int port, const char *sec_name, + struct mrvl_qos_cfg *cfg) +{ + const char *entry; + uint32_t val; + + /* Read policer token unit */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_UNIT); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES, + sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) { + cfg->port[port].policer_params.token_unit = + PP2_CLS_PLCR_BYTES_TOKEN_UNIT; + } else if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_PACKETS, + sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) { + cfg->port[port].policer_params.token_unit = + PP2_CLS_PLCR_PACKETS_TOKEN_UNIT; + } else { + MRVL_LOG(ERR, "Unknown token: %s", entry); + return -1; + } + } + + /* Read policer color mode */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_COLOR); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND, + sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) { + cfg->port[port].policer_params.color_mode = + PP2_CLS_PLCR_COLOR_BLIND_MODE; + } else if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_AWARE, + sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) { + cfg->port[port].policer_params.color_mode = + PP2_CLS_PLCR_COLOR_AWARE_MODE; + } else { + MRVL_LOG(ERR, "Error in parsing: %s", entry); + return -1; + } + } + + /* Read policer cir */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_CIR); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].policer_params.cir = val; + } + + /* Read policer cbs */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_CBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].policer_params.cbs = val; + } + + /* Read policer ebs */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_EBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].policer_params.ebs = val; + } + + cfg->port[port].setup_policer = 1; + + return 0; +} + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args) +{ + struct mrvl_qos_cfg **cfg = extra_args; + struct rte_cfgfile *file = rte_cfgfile_load(path, 0); + uint32_t val; + int n, i, ret; + const char *entry; + char sec_name[32]; + + if (file == NULL) + rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path); + + /* Create configuration. This is never accessed on the fast path, + * so we can ignore socket. + */ + *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0); + if (*cfg == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n", + path); + + n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT, + sizeof(MRVL_TOK_PORT) - 1); + + if (n == 0) { + /* This is weird, but not bad. */ + MRVL_LOG(WARNING, "Empty configuration file?"); + return 0; + } + + /* Use the number of ports given as vdev parameters. */ + for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) { + snprintf(sec_name, sizeof(sec_name), "%s %d %s", + MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT); + + /* Use global defaults, unless an override occurs */ + (*cfg)->port[n].use_global_defaults = 1; + + /* Skip ports non-existing in configuration. */ + if (rte_cfgfile_num_sections(file, sec_name, + strlen(sec_name)) <= 0) { + continue; + } + + /* + * Read per-port rate limiting. Setting that will + * disable per-queue rate limiting. + */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_enable = val; + } + + if ((*cfg)->port[n].rate_limit_enable) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cbs = val; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cir = val; + } + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_MAPPING_PRIORITY); + if (entry) { + (*cfg)->port[n].use_global_defaults = 0; + if (!strncmp(entry, MRVL_TOK_VLAN_IP, + sizeof(MRVL_TOK_VLAN_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_IP_VLAN, + sizeof(MRVL_TOK_IP_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_VLAN_PRI; + else if (!strncmp(entry, MRVL_TOK_IP, + sizeof(MRVL_TOK_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_VLAN, + sizeof(MRVL_TOK_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_PRI; + else + rte_exit(EXIT_FAILURE, + "Error in parsing %s value (%s)!\n", + MRVL_TOK_MAPPING_PRIORITY, entry); + } else { + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + } + + /* Parse policer configuration (if any) */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_DEFAULT); + if (entry) { + (*cfg)->port[n].use_global_defaults = 0; + if (get_val_securely(entry, &val) < 0) + return -1; + + snprintf(sec_name, sizeof(sec_name), "%s %d", + MRVL_TOK_PLCR, val); + ret = parse_policer(file, n, sec_name, *cfg); + if (ret) + return -1; + } + + for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) { + ret = get_outq_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d outq %d!\n", + ret, n, i); + } + + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + ret = parse_tc_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d tc %d!\n", + ret, n, i); + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_DEFAULT_TC); + if (entry) { + if (get_val_securely(entry, &val) < 0 || + val > USHRT_MAX) + return -1; + (*cfg)->port[n].default_tc = (uint8_t)val; + } else { + if ((*cfg)->port[n].use_global_defaults == 0) { + MRVL_LOG(ERR, + "Default Traffic Class required in custom configuration!"); + return -1; + } + } + } + + return 0; +} + +/** + * Setup Traffic Class. + * + * Fill in TC parameters in single MUSDK TC config entry. + * @param param TC parameters entry. + * @param inqs Number of MUSDK in-queues in this TC. + * @param bpool Bpool for this TC. + * @param color Default color for this TC. + * @returns 0 in case of success, exits otherwise. + */ +static int +setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs, + struct pp2_bpool *bpool, enum pp2_ppio_color color) +{ + struct pp2_ppio_inq_params *inq_params; + + param->pkt_offset = MRVL_PKT_OFFS; + param->pools[0][0] = bpool; + param->default_color = color; + + inq_params = rte_zmalloc_socket("inq_params", + inqs * sizeof(*inq_params), + 0, rte_socket_id()); + if (!inq_params) + return -ENOMEM; + + param->num_in_qs = inqs; + + /* Release old config if necessary. */ + if (param->inqs_params) + rte_free(param->inqs_params); + + param->inqs_params = inq_params; + + return 0; +} + +/** + * Setup ingress policer. + * + * @param priv Port's private data. + * @param params Pointer to the policer's configuration. + * @param plcr_id Policer id. + * @returns 0 in case of success, negative values otherwise. + */ +static int +setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params) +{ + char match[16]; + int ret; + + /* + * At this point no other policers are used which means + * any policer can be picked up and used as a default one. + * + * Lets use 0th then. + */ + sprintf(match, "policer-%d:%d\n", priv->pp_id, 0); + params->match = match; + + ret = pp2_cls_plcr_init(params, &priv->default_policer); + if (ret) { + MRVL_LOG(ERR, "Failed to setup %s", match); + return -1; + } + + priv->ppio_params.inqs_params.plcr = priv->default_policer; + priv->used_plcrs = BIT(0); + + return 0; +} + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + size_t i, tc; + + if (mrvl_qos_cfg == NULL || + mrvl_qos_cfg->port[portid].use_global_defaults) { + /* + * No port configuration, use default: 1 TC, no QoS, + * TC color set to green. + */ + priv->ppio_params.inqs_params.num_tcs = 1; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[0], + max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN); + + /* Direct mapping of queues i.e. 0->0, 1->1 etc. */ + for (i = 0; i < max_queues; ++i) { + priv->rxq_map[i].tc = 0; + priv->rxq_map[i].inq = i; + } + return 0; + } + + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + + priv->qos_tbl_params.type = port_cfg->mapping_priority; + + /* + * We need to reverse mapping, from tc->pcp (better from usability + * point of view) to pcp->tc (configurable in MUSDK). + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc; + + /* Then, fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) { + /* Better safe than sorry. */ + MRVL_LOG(ERR, + "Too many PCPs configured in TC %zu!", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].pcps; ++i) { + priv->qos_tbl_params.pcp_cos_map[ + port_cfg->tc[tc].pcp[i]].tc = tc; + } + } + + /* + * The same logic goes with DSCP. + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].tc = + port_cfg->default_tc; + + /* Fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) { + /* Better safe than sorry. */ + MRVL_LOG(ERR, + "Too many DSCPs configured in TC %zu!", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].dscps; ++i) { + priv->qos_tbl_params.dscp_cos_map[ + port_cfg->tc[tc].dscp[i]].tc = tc; + } + } + + /* + * Surprisingly, similar logic goes with queue mapping. + * We need only to store qid->tc mapping, + * to know TC when queue is read. + */ + for (i = 0; i < RTE_DIM(priv->rxq_map); ++i) + priv->rxq_map[i].tc = MRVL_UNKNOWN_TC; + + /* Set up DPDKq->(TC,inq) mapping. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) { + /* Overflow. */ + MRVL_LOG(ERR, + "Too many RX queues configured per TC %zu!", + tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].inqs; ++i) { + uint8_t idx = port_cfg->tc[tc].inq[i]; + + if (idx > RTE_DIM(priv->rxq_map)) { + MRVL_LOG(ERR, "Bad queue index %d!", idx); + return -1; + } + + priv->rxq_map[idx].tc = tc; + priv->rxq_map[idx].inq = i; + } + } + + /* + * Set up TC configuration. TCs need to be sequenced: 0, 1, 2 + * with no gaps. Empty TC means end of processing. + */ + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + if (port_cfg->tc[i].inqs == 0) + break; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[i], + port_cfg->tc[i].inqs, + priv->bpool, port_cfg->tc[i].color); + } + + priv->ppio_params.inqs_params.num_tcs = i; + + if (port_cfg->setup_policer) + return setup_policer(priv, &port_cfg->policer_params); + + return 0; +} + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + int i; + + if (mrvl_qos_cfg == NULL) + return 0; + + priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable; + if (port_cfg->rate_limit_enable) + priv->ppio_params.rate_limit_params = + port_cfg->rate_limit_params; + + for (i = 0; i < max_queues; i++) { + struct pp2_ppio_outq_params *params = + &priv->ppio_params.outqs_params.outqs_params[i]; + + params->sched_mode = port_cfg->outq[i].sched_mode; + params->weight = port_cfg->outq[i].weight; + params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable; + params->rate_limit_params = port_cfg->outq[i].rate_limit_params; + } + + return 0; +} + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv) +{ + size_t i; + + if (priv->ppio == NULL) { + MRVL_LOG(ERR, "ppio must not be NULL here!"); + return -1; + } + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio; + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio; + + /* Initialize Classifier QoS table. */ + + return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl); +} diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h new file mode 100644 index 000000000..f03e7731c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_qos.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_QOS_H_ +#define _MRVL_QOS_H_ + +#include + +#include "mrvl_ethdev.h" + +/** Code Points per Traffic Class. Equals max(DSCP, PCP). */ +#define MRVL_CP_PER_TC (64) + +/** Value used as "unknown". */ +#define MRVL_UNKNOWN_TC (0xFF) + +/* QoS config. */ +struct mrvl_qos_cfg { + struct port_cfg { + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + struct { + uint8_t inq[MRVL_PP2_RXQ_MAX]; + uint8_t dscp[MRVL_CP_PER_TC]; + uint8_t pcp[MRVL_CP_PER_TC]; + uint8_t inqs; + uint8_t dscps; + uint8_t pcps; + enum pp2_ppio_color color; + } tc[MRVL_PP2_TC_MAX]; + struct { + enum pp2_ppio_outq_sched_mode sched_mode; + uint8_t weight; + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + } outq[MRVL_PP2_RXQ_MAX]; + enum pp2_cls_qos_tbl_type mapping_priority; + uint16_t inqs; + uint16_t outqs; + uint8_t default_tc; + uint8_t use_global_defaults; + struct pp2_cls_plcr_params policer_params; + uint8_t setup_policer; + } port[RTE_MAX_ETHPORTS]; +}; + +/** Global QoS configuration. */ +extern struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args); + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv); + +#endif /* _MRVL_QOS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.c b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.c new file mode 100644 index 000000000..3de899703 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.c @@ -0,0 +1,1009 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include + +#include +#include +#include +#include + +#include "mrvl_tm.h" + +/** Minimum rate value in Bytes/s */ +#define MRVL_RATE_MIN (PP2_PPIO_MIN_CIR * 1000 / 8) + +/** Minimum burst size in Bytes */ +#define MRVL_BURST_MIN (PP2_PPIO_MIN_CBS * 1000) + +/** Maximum burst size in Bytes */ +#define MRVL_BURST_MAX 256000000 + +/** Maximum WRR weight */ +#define MRVL_WEIGHT_MAX 255 + +/** + * Get maximum port rate in Bytes/s. + * + * @param dev Pointer to the device. + * @param rate Pointer to the rate. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_get_max_rate(struct rte_eth_dev *dev, uint64_t *rate) +{ + struct ethtool_cmd edata; + struct ifreq req; + int ret, fd; + + memset(&edata, 0, sizeof(edata)); + memset(&req, 0, sizeof(req)); + edata.cmd = ETHTOOL_GSET; + strcpy(req.ifr_name, dev->data->name); + req.ifr_data = (void *)&edata; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd == -1) + return -1; + + ret = ioctl(fd, SIOCETHTOOL, &req); + if (ret == -1) { + close(fd); + return -1; + } + + close(fd); + + *rate = ethtool_cmd_speed(&edata) * 1000 * 1000 / 8; + + return 0; +} + +/** + * Initialize traffic manager related data. + * + * @param dev Pointer to the device. + * @returns 0 on success, failure otherwise. + */ +int +mrvl_tm_init(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + LIST_INIT(&priv->shaper_profiles); + LIST_INIT(&priv->nodes); + + if (priv->rate_max) + return 0; + + return mrvl_get_max_rate(dev, &priv->rate_max); +} + +/** + * Cleanup traffic manager related data. + * + * @param dev Pointer to the device. + */ +void mrvl_tm_deinit(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_shaper_profile *profile = + LIST_FIRST(&priv->shaper_profiles); + struct mrvl_tm_node *node = LIST_FIRST(&priv->nodes); + + while (profile) { + struct mrvl_tm_shaper_profile *next = LIST_NEXT(profile, next); + + LIST_REMOVE(profile, next); + rte_free(profile); + profile = next; + } + + while (node) { + struct mrvl_tm_node *next = LIST_NEXT(node, next); + + LIST_REMOVE(node, next); + rte_free(node); + node = next; + } +} + +/** + * Get node using its id. + * + * @param priv Pointer to the port's private data. + * @param node_id Id used by this node. + * @returns Pointer to the node if exists, NULL otherwise. + */ +static struct mrvl_tm_node * +mrvl_node_from_id(struct mrvl_priv *priv, uint32_t node_id) +{ + struct mrvl_tm_node *node; + + LIST_FOREACH(node, &priv->nodes, next) + if (node->id == node_id) + return node; + + return NULL; +} + +/** + * Check whether node is leaf or root. + * + * @param dev Pointer to the device. + * @param node_id Id used by this node. + * @param is_leaf Pointer to flag indicating whether node is a leaf. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + + if (!is_leaf) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + *is_leaf = node->type == MRVL_NODE_QUEUE ? 1 : 0; + + return 0; +} + +/** + * Get traffic manager capabilities. + * + * @param dev Pointer to the device (unused). + * @param cap Pointer to the capabilities. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!cap) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Capabilities are missing\n"); + + memset(cap, 0, sizeof(*cap)); + + cap->n_nodes_max = 1 + dev->data->nb_tx_queues; /* port + txqs number */ + cap->n_levels_max = 2; /* port level + txqs level */ + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->shaper_n_max; + cap->shaper_private_rate_min = MRVL_RATE_MIN; + cap->shaper_private_rate_max = priv->rate_max; + + cap->sched_n_children_max = dev->data->nb_tx_queues; + cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues; + cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues; + cap->sched_wfq_n_groups_max = 1; + cap->sched_wfq_weight_max = MRVL_WEIGHT_MAX; + + cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_SUSPEND_RESUME | + RTE_TM_UPDATE_NODE_STATS; + cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES; + + return 0; +} + +/** + * Get traffic manager hierarchy level capabilities. + * + * @param dev Pointer to the device. + * @param level_id Id of the level. + * @param cap Pointer to the level capabilities. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!cap) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + memset(cap, 0, sizeof(*cap)); + + if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, "Wrong level id\n"); + + if (level_id == MRVL_NODE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = 1; + cap->nonleaf.shaper_private_rate_min = MRVL_RATE_MIN; + cap->nonleaf.shaper_private_rate_max = priv->rate_max; + + cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + dev->data->nb_tx_queues; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX; + cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + } else { /* level_id == MRVL_NODE_QUEUE */ + cap->n_nodes_max = dev->data->nb_tx_queues; + cap->n_nodes_leaf_max = dev->data->nb_tx_queues; + cap->leaf_nodes_identical = 1; + + cap->leaf.shaper_private_supported = 1; + cap->leaf.shaper_private_rate_min = MRVL_RATE_MIN; + cap->leaf.shaper_private_rate_max = priv->rate_max; + cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS; + } + + return 0; +} + +/** + * Get node capabilities. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param cap Pointer to the capabilities. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + + if (!cap) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + memset(cap, 0, sizeof(*cap)); + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + cap->shaper_private_supported = 1; + cap->shaper_private_rate_min = MRVL_RATE_MIN; + cap->shaper_private_rate_max = priv->rate_max; + + if (node->type == MRVL_NODE_PORT) { + cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + dev->data->nb_tx_queues; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX; + cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES; + } else { + cap->stats_mask = RTE_TM_STATS_N_PKTS; + } + + return 0; +} + +/** + * Get shaper profile using its id. + * + * @param priv Pointer to the port's private data. + * @param shaper_profile_id Id used by the shaper. + * @returns Pointer to the shaper profile if exists, NULL otherwise. + */ +static struct mrvl_tm_shaper_profile * +mrvl_shaper_profile_from_id(struct mrvl_priv *priv, uint32_t shaper_profile_id) +{ + struct mrvl_tm_shaper_profile *profile; + + LIST_FOREACH(profile, &priv->shaper_profiles, next) + if (profile->id == shaper_profile_id) + return profile; + + return NULL; +} + +/** + * Add a new shaper profile. + * + * @param dev Pointer to the device. + * @param shaper_profile_id Id of the new profile. + * @param params Pointer to the shaper profile parameters. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id, + struct rte_tm_shaper_params *params, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_shaper_profile *profile; + + if (!params) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + if (params->committed.rate) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE, + NULL, "Committed rate not supported\n"); + + if (params->committed.size) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE, + NULL, "Committed bucket size not supported\n"); + + if (params->peak.rate < MRVL_RATE_MIN || + params->peak.rate > priv->rate_max) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE, + NULL, "Peak rate is out of range\n"); + + if (params->peak.size < MRVL_BURST_MIN || + params->peak.size > MRVL_BURST_MAX) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE, + NULL, "Peak size is out of range\n"); + + if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, "Wrong shaper profile id\n"); + + profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id); + if (profile) + return -rte_tm_error_set(error, EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, "Profile id already exists\n"); + + profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0, + rte_socket_id()); + if (!profile) + return -rte_tm_error_set(error, ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + profile->id = shaper_profile_id; + rte_memcpy(&profile->params, params, sizeof(profile->params)); + + LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next); + + return 0; +} + +/** + * Remove a shaper profile. + * + * @param dev Pointer to the device. + * @param shaper_profile_id Id of the shaper profile. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_shaper_profile *profile; + + profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id); + if (!profile) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, "Profile id does not exist\n"); + + if (profile->refcnt) + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, "Profile is used\n"); + + LIST_REMOVE(profile, next); + rte_free(profile); + + return 0; +} + +/** + * Check node parameters. + * + * @param dev Pointer to the device. + * @param node_id Id used by the node. + * @param priority Priority value. + * @param weight Weight value. + * @param level_id Id of the level. + * @param params Pointer to the node parameters. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_check_params(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t priority, uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + if (node_id == RTE_TM_NODE_ID_NULL) + return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL, + NULL, "Node id is invalid\n"); + + if (priority) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, "Priority should be 0\n"); + + if (weight > MRVL_WEIGHT_MAX) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, "Weight is out of range\n"); + + if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, "Wrong level id\n"); + + if (!params) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + if (params->shared_shaper_id) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID, + NULL, "Shared shaper is not supported\n"); + + if (params->n_shared_shapers) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, "Shared shaper is not supported\n"); + + /* verify port (root node) settings */ + if (node_id >= dev->data->nb_tx_queues) { + if (params->nonleaf.wfq_weight_mode) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE, + NULL, "WFQ is not supported\n"); + + if (params->nonleaf.n_sp_priorities != 1) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, + NULL, "SP is not supported\n"); + + if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES)) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + "Requested port stats are not supported\n"); + + return 0; + } + + /* verify txq (leaf node) settings */ + if (params->leaf.cman) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN, + NULL, + "Congestion mngmt is not supported\n"); + + if (params->leaf.wred.wred_profile_id) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID, + NULL, "WRED is not supported\n"); + + if (params->leaf.wred.shared_wred_context_id) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID, + NULL, "WRED is not supported\n"); + + if (params->leaf.wred.n_shared_wred_contexts) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS, + NULL, "WRED is not supported\n"); + + if (params->stats_mask & ~RTE_TM_STATS_N_PKTS) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + "Requested txq stats are not supported\n"); + + return 0; +} + +/** + * Add a new node. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param parent_node_id Id of the parent node. + * @param priority Priority value. + * @param weight Weight value. + * @param level_id Id of the level. + * @param params Pointer to the node parameters. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_add(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, uint32_t weight, + uint32_t level_id, struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_shaper_profile *profile = NULL; + struct mrvl_tm_node *node, *parent = NULL; + int ret; + + if (priv->ppio) + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is already started\n"); + + ret = mrvl_node_check_params(dev, node_id, priority, weight, level_id, + params, error); + if (ret) + return ret; + + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = mrvl_shaper_profile_from_id(priv, + params->shaper_profile_id); + if (!profile) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, "Shaper id does not exist\n"); + } + + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + LIST_FOREACH(node, &priv->nodes, next) { + if (node->type != MRVL_NODE_PORT) + continue; + + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Root node exists\n"); + } + } else { + parent = mrvl_node_from_id(priv, parent_node_id); + if (!parent) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, "Node id does not exist\n"); + } + + node = mrvl_node_from_id(priv, node_id); + if (node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id already exists\n"); + + node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id()); + if (!node) + return -rte_tm_error_set(error, ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + + node->id = node_id; + node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? MRVL_NODE_PORT : + MRVL_NODE_QUEUE; + + if (parent) { + node->parent = parent; + parent->refcnt++; + } + + if (profile) { + node->profile = profile; + profile->refcnt++; + } + + node->weight = weight; + node->stats_mask = params->stats_mask; + + LIST_INSERT_HEAD(&priv->nodes, node, next); + + return 0; +} + +/** + * Delete a node. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + + if (priv->ppio) { + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is already started\n"); + } + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + if (node->refcnt) + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id is used\n"); + + if (node->parent) + node->parent->refcnt--; + + if (node->profile) + node->profile->refcnt--; + + LIST_REMOVE(node, next); + rte_free(node); + + return 0; +} + +/** + * Helper for suspending specific tx queue. + * + * @param dev Pointer to the device. + * @param node_id Id used by this node. + * @returns 0 on success, negative value otherwise. + */ +static int mrvl_node_suspend_one(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + int ret = dev->dev_ops->tx_queue_stop(dev, node_id); + if (ret) + return -rte_tm_error_set(error, ret, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to suspend a txq\n"); + + return 0; +} + +/** + * Suspend a node. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param error Pointer to the error. + * returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_suspend(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node, *tmp; + int ret; + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + if (!node->parent) { + LIST_FOREACH(tmp, &priv->nodes, next) { + if (!tmp->parent) + continue; + + if (node != tmp->parent) + continue; + + ret = mrvl_node_suspend_one(dev, tmp->id, error); + if (ret) + return ret; + } + + return 0; + } + + return mrvl_node_suspend_one(dev, node_id, error); +} + +/** + * Resume a node. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param error Pointer to the error. + * returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_resume(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + int ret; + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + + if (!node->parent) + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Cannot suspend a port\n"); + + ret = dev->dev_ops->tx_queue_start(dev, node_id); + if (ret) + return -rte_tm_error_set(error, ret, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to resume a txq\n"); + return 0; +} + +/** + * Apply traffic manager hierarchy. + * + * @param dev Pointer to the device. + * @param clear_on_fail Flag indicating whether to do cleanup on the failure. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + int ret; + + if (priv->ppio) { + ret = -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is already started\n"); + goto out; + } + + LIST_FOREACH(node, &priv->nodes, next) { + struct pp2_ppio_outq_params *p; + + if (node->type == MRVL_NODE_PORT) { + if (!node->profile) + continue; + + priv->ppio_params.rate_limit_enable = 1; + priv->ppio_params.rate_limit_params.cir = + node->profile->params.peak.rate * 8 / 1000; + priv->ppio_params.rate_limit_params.cbs = + node->profile->params.peak.size / 1000; + + MRVL_LOG(INFO, + "Port rate limit overrides txqs rate limit"); + + continue; + } + + if (node->id >= dev->data->nb_tx_queues) { + ret = -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, NULL, + "Not enough txqs are configured\n"); + goto out; + } + + p = &priv->ppio_params.outqs_params.outqs_params[node->id]; + + if (node->weight) { + p->sched_mode = PP2_PPIO_SCHED_M_WRR; + p->weight = node->weight; + } else { + p->sched_mode = PP2_PPIO_SCHED_M_SP; + p->weight = 0; + } + + if (node->profile) { + p->rate_limit_enable = 1; + /* convert Bytes/s to kilo bits/s */ + p->rate_limit_params.cir = + node->profile->params.peak.rate * 8 / 1000; + /* convert bits to kilo bits */ + p->rate_limit_params.cbs = + node->profile->params.peak.size / 1000; + } else { + p->rate_limit_enable = 0; + p->rate_limit_params.cir = 0; + p->rate_limit_params.cbs = 0; + } + } + + /* reset to defaults in case applied tm hierarchy is empty */ + if (LIST_EMPTY(&priv->nodes)) { + int i; + + for (i = 0; i < priv->ppio_params.outqs_params.num_outqs; i++) { + struct pp2_ppio_outq_params *p = + &priv->ppio_params.outqs_params.outqs_params[i]; + + p->sched_mode = PP2_PPIO_SCHED_M_WRR; + p->weight = 0; + p->rate_limit_enable = 0; + p->rate_limit_params.cir = 0; + p->rate_limit_params.cbs = 0; + } + } + + return 0; +out: + if (clear_on_fail) { + mrvl_tm_deinit(dev); + mrvl_tm_init(dev); + } + + return ret; +} + +/** + * Read statistics counters for current node. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param stats Pointer to the statistics counters. + * @param stats_mask Pointer to mask of enabled statistics counters + * that are retrieved. + * @param clear Flag indicating whether to clear statistics. + * Non-zero value clears statistics. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id, + struct rte_tm_node_stats *stats, uint64_t *stats_mask, + int clear, struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + int ret; + + if (!priv->ppio) { + return -rte_tm_error_set(error, EPERM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, "Port is not started\n"); + } + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + if (stats_mask) + *stats_mask = node->stats_mask; + + if (!stats) + return 0; + + memset(stats, 0, sizeof(*stats)); + + if (!node->parent) { + struct pp2_ppio_statistics s; + + memset(&s, 0, sizeof(s)); + ret = pp2_ppio_get_statistics(priv->ppio, &s, clear); + if (ret) + return -rte_tm_error_set(error, -ret, + RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to read port statistics\n"); + + if (node->stats_mask & RTE_TM_STATS_N_PKTS) + stats->n_pkts = s.tx_packets; + + if (node->stats_mask & RTE_TM_STATS_N_BYTES) + stats->n_bytes = s.tx_bytes; + } else { + struct pp2_ppio_outq_statistics s; + + memset(&s, 0, sizeof(s)); + ret = pp2_ppio_outq_get_statistics(priv->ppio, node_id, &s, + clear); + if (ret) + return -rte_tm_error_set(error, -ret, + RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to read txq statistics\n"); + + if (node->stats_mask & RTE_TM_STATS_N_PKTS) + stats->n_pkts = s.deq_desc; + } + + return 0; +} + +/** + * Update node statistics. + * + * @param dev Pointer to the device. + * @param node_id Id of the node. + * @param stats_mask Bitmask of statistics counters to be enabled. + * @param error Pointer to the error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_node_stats_update(struct rte_eth_dev *dev, uint32_t node_id, + uint64_t stats_mask, struct rte_tm_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_tm_node *node; + + node = mrvl_node_from_id(priv, node_id); + if (!node) + return -rte_tm_error_set(error, ENODEV, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, "Node id does not exist\n"); + + if (!node->parent) { + if (stats_mask & ~(RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES)) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + "Requested port stats are not supported\n"); + } else { + if (stats_mask & ~RTE_TM_STATS_N_PKTS) + return -rte_tm_error_set(error, EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + "Requested txq stats are not supported\n"); + } + + node->stats_mask = stats_mask; + + return 0; +} + +const struct rte_tm_ops mrvl_tm_ops = { + .node_type_get = mrvl_node_type_get, + .capabilities_get = mrvl_capabilities_get, + .level_capabilities_get = mrvl_level_capabilities_get, + .node_capabilities_get = mrvl_node_capabilities_get, + .shaper_profile_add = mrvl_shaper_profile_add, + .shaper_profile_delete = mrvl_shaper_profile_delete, + .node_add = mrvl_node_add, + .node_delete = mrvl_node_delete, + .node_suspend = mrvl_node_suspend, + .node_resume = mrvl_node_resume, + .hierarchy_commit = mrvl_hierarchy_commit, + .node_stats_update = mrvl_node_stats_update, + .node_stats_read = mrvl_node_stats_read, +}; diff --git a/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.h b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.h new file mode 100644 index 000000000..9d81ede2b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/mrvl_tm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_TM_H_ +#define _MRVL_TM_H_ + +#include "mrvl_ethdev.h" + +int mrvl_tm_init(struct rte_eth_dev *dev); +void mrvl_tm_deinit(struct rte_eth_dev *dev); + +#endif /* _MRVL_TM_H_ */ diff --git a/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map b/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/mvpp2/rte_pmd_mvpp2_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/netvsc/Makefile b/src/spdk/dpdk/drivers/net/netvsc/Makefile new file mode 100644 index 000000000..da8991584 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause + +include $(RTE_SDK)/mk/rte.vars.mk + +LIB = librte_pmd_netvsc.a + +CFLAGS += -O3 $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_netvsc_version.map + +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c +SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_vf.c + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vmbus + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c b/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c new file mode 100644 index 000000000..55b8a6380 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_ethdev.c @@ -0,0 +1,1129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2018 Microsoft Corporation + * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_rndis.h" +#include "hn_nvs.h" +#include "ndis.h" + +#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT) + +#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_RSS_HASH) + +int hn_logtype_init; +int hn_logtype_driver; + +struct hn_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned int offset; +}; + +static const struct hn_xstats_name_off hn_stat_strings[] = { + { "good_packets", offsetof(struct hn_stats, packets) }, + { "good_bytes", offsetof(struct hn_stats, bytes) }, + { "errors", offsetof(struct hn_stats, errors) }, + { "ring full", offsetof(struct hn_stats, ring_full) }, + { "multicast_packets", offsetof(struct hn_stats, multicast) }, + { "broadcast_packets", offsetof(struct hn_stats, broadcast) }, + { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) }, + { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) }, + { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) }, + { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) }, + { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) }, + { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) }, + { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) }, + { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) }, +}; + +/* The default RSS key. + * This value is the same as MLX5 so that flows will be + * received on same path for both VF and synthetic NIC. + */ +static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = { + 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a, +}; + +static struct rte_eth_dev * +eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size) +{ + struct rte_eth_dev *eth_dev; + const char *name; + + if (!dev) + return NULL; + + name = dev->device.name; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev"); + return NULL; + } + + if (private_data_size) { + eth_dev->data->dev_private = + rte_zmalloc_socket(name, private_data_size, + RTE_CACHE_LINE_SIZE, dev->device.numa_node); + if (!eth_dev->data->dev_private) { + PMD_DRV_LOG(NOTICE, "can not allocate driver data"); + rte_eth_dev_release_port(eth_dev); + return NULL; + } + } + } else { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_DRV_LOG(NOTICE, "can not attach secondary"); + return NULL; + } + } + + eth_dev->device = &dev->device; + + /* interrupt is simulated */ + dev->intr_handle.type = RTE_INTR_HANDLE_EXT; + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + eth_dev->intr_handle = &dev->intr_handle; + + /* allow ethdev to remove on close */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + return eth_dev; +} + +static void +eth_dev_vmbus_release(struct rte_eth_dev *eth_dev) +{ + /* free ether device */ + rte_eth_dev_release_port(eth_dev); + + eth_dev->device = NULL; + eth_dev->intr_handle = NULL; +} + +/* handle "latency=X" from devargs */ +static int hn_set_latency(const char *key, const char *value, void *opaque) +{ + struct hn_data *hv = opaque; + char *endp = NULL; + unsigned long lat; + + errno = 0; + lat = strtoul(value, &endp, 0); + + if (*value == '\0' || *endp != '\0') { + PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat); + + hv->latency = lat * 1000; /* usec to nsec */ + return 0; +} + +/* Parse device arguments */ +static int hn_parse_args(const struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_devargs *devargs = dev->device->devargs; + static const char * const valid_keys[] = { + "latency", + NULL + }; + struct rte_kvargs *kvlist; + int ret; + + if (!devargs) + return 0; + + PMD_INIT_LOG(DEBUG, "device args %s %s", + devargs->name, devargs->args); + + kvlist = rte_kvargs_parse(devargs->args, valid_keys); + if (!kvlist) { + PMD_DRV_LOG(NOTICE, "invalid parameters"); + return -EINVAL; + } + + ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv); + if (ret) + PMD_DRV_LOG(ERR, "Unable to process latency arg\n"); + + rte_kvargs_free(kvlist); + return ret; +} + +/* Update link status. + * Note: the DPDK definition of "wait_to_complete" + * means block this call until link is up. + * which is not worth supporting. + */ +int +hn_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_link link, old; + int error; + + old = dev->data->dev_link; + + error = hn_rndis_get_linkstatus(hv); + if (error) + return error; + + hn_rndis_get_linkspeed(hv); + + hn_vf_link_update(dev, wait_to_complete); + + link = (struct rte_eth_link) { + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_autoneg = ETH_LINK_SPEED_FIXED, + .link_speed = hv->link_speed / 10000, + }; + + if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED) + link.link_status = ETH_LINK_UP; + else + link.link_status = ETH_LINK_DOWN; + + if (old.link_status == link.link_status) + return 0; + + PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id, + (link.link_status == ETH_LINK_UP) ? "up" : "down"); + + return rte_eth_linkstatus_set(dev, &link); +} + +static int hn_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct hn_data *hv = dev->data->dev_private; + int rc; + + dev_info->speed_capa = ETH_LINK_SPEED_10G; + dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = HN_MAX_XFER_LEN; + dev_info->max_mac_addrs = 1; + + dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ; + dev_info->flow_type_rss_offloads = hv->rss_offloads; + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + + dev_info->max_rx_queues = hv->max_queues; + dev_info->max_tx_queues = hv->max_queues; + + dev_info->tx_desc_lim.nb_min = 1; + dev_info->tx_desc_lim.nb_max = 4096; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* fills in rx and tx offload capability */ + rc = hn_rndis_get_offload(hv, dev_info); + if (rc != 0) + return rc; + + /* merges the offload and queues of vf */ + return hn_vf_info_get(hv, dev_info); +} + +static int hn_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + unsigned int i; + int err; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size != NDIS_HASH_INDCNT) { + PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS"); + return -EINVAL; + } + + for (i = 0; i < NDIS_HASH_INDCNT; i++) { + uint16_t idx = i / RTE_RETA_GROUP_SIZE; + uint16_t shift = i % RTE_RETA_GROUP_SIZE; + uint64_t mask = (uint64_t)1 << shift; + + if (reta_conf[idx].mask & mask) + hv->rss_ind[i] = reta_conf[idx].reta[shift]; + } + + err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); + if (err) { + PMD_DRV_LOG(NOTICE, + "rss disable failed"); + return err; + } + + err = hn_rndis_conf_rss(hv, 0); + if (err) { + PMD_DRV_LOG(NOTICE, + "reta reconfig failed"); + return err; + } + + return hn_vf_reta_hash_update(dev, reta_conf, reta_size); +} + +static int hn_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size != NDIS_HASH_INDCNT) { + PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS"); + return -EINVAL; + } + + for (i = 0; i < NDIS_HASH_INDCNT; i++) { + uint16_t idx = i / RTE_RETA_GROUP_SIZE; + uint16_t shift = i % RTE_RETA_GROUP_SIZE; + uint64_t mask = (uint64_t)1 << shift; + + if (reta_conf[idx].mask & mask) + reta_conf[idx].reta[shift] = hv->rss_ind[i]; + } + return 0; +} + +static void hn_rss_hash_init(struct hn_data *hv, + const struct rte_eth_rss_conf *rss_conf) +{ + /* Convert from DPDK RSS hash flags to NDIS hash flags */ + hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ; + + if (rss_conf->rss_hf & ETH_RSS_IPV4) + hv->rss_hash |= NDIS_HASH_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + hv->rss_hash |= NDIS_HASH_TCP_IPV4; + if (rss_conf->rss_hf & ETH_RSS_IPV6) + hv->rss_hash |= NDIS_HASH_IPV6; + if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) + hv->rss_hash |= NDIS_HASH_IPV6_EX; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + hv->rss_hash |= NDIS_HASH_TCP_IPV6; + if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) + hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX; + + memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key, + NDIS_HASH_KEYSIZE_TOEPLITZ); +} + +static int hn_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + int err; + + PMD_INIT_FUNC_TRACE(); + + err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); + if (err) { + PMD_DRV_LOG(NOTICE, + "rss disable failed"); + return err; + } + + hn_rss_hash_init(hv, rss_conf); + + if (rss_conf->rss_hf != 0) { + err = hn_rndis_conf_rss(hv, 0); + if (err) { + PMD_DRV_LOG(NOTICE, + "rss reconfig failed (RSS disabled)"); + return err; + } + } + + return hn_vf_rss_hash_update(dev, rss_conf); +} + +static int hn_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (hv->ndis_ver < NDIS_VERSION_6_20) { + PMD_DRV_LOG(DEBUG, "RSS not supported on this host"); + return -EOPNOTSUPP; + } + + rss_conf->rss_key_len = NDIS_HASH_KEYSIZE_TOEPLITZ; + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, hv->rss_key, + NDIS_HASH_KEYSIZE_TOEPLITZ); + + rss_conf->rss_hf = 0; + if (hv->rss_hash & NDIS_HASH_IPV4) + rss_conf->rss_hf |= ETH_RSS_IPV4; + + if (hv->rss_hash & NDIS_HASH_TCP_IPV4) + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + + if (hv->rss_hash & NDIS_HASH_IPV6) + rss_conf->rss_hf |= ETH_RSS_IPV6; + + if (hv->rss_hash & NDIS_HASH_IPV6_EX) + rss_conf->rss_hf |= ETH_RSS_IPV6_EX; + + if (hv->rss_hash & NDIS_HASH_TCP_IPV6) + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + + if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX) + rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX; + + return 0; +} + +static int +hn_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS); + return hn_vf_promiscuous_enable(dev); +} + +static int +hn_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + uint32_t filter; + + filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST; + if (dev->data->all_multicast) + filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; + hn_rndis_set_rxfilter(hv, filter); + return hn_vf_promiscuous_disable(dev); +} + +static int +hn_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED | + NDIS_PACKET_TYPE_ALL_MULTICAST | + NDIS_PACKET_TYPE_BROADCAST); + return hn_vf_allmulticast_enable(dev); +} + +static int +hn_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED | + NDIS_PACKET_TYPE_BROADCAST); + return hn_vf_allmulticast_disable(dev); +} + +static int +hn_dev_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + /* No filtering on the synthetic path, but can do it on VF */ + return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr); +} + +/* Setup shared rx/tx queue data */ +static int hn_subchan_configure(struct hn_data *hv, + uint32_t subchan) +{ + struct vmbus_channel *primary = hn_primary_chan(hv); + int err; + unsigned int retry = 0; + + PMD_DRV_LOG(DEBUG, + "open %u subchannels", subchan); + + /* Send create sub channels command */ + err = hn_nvs_alloc_subchans(hv, &subchan); + if (err) + return err; + + while (subchan > 0) { + struct vmbus_channel *new_sc; + uint16_t chn_index; + + err = rte_vmbus_subchan_open(primary, &new_sc); + if (err == -ENOENT && ++retry < 1000) { + /* This can happen if not ready yet */ + rte_delay_ms(10); + continue; + } + + if (err) { + PMD_DRV_LOG(ERR, + "open subchannel failed: %d", err); + return err; + } + + rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency); + + retry = 0; + chn_index = rte_vmbus_sub_channel_index(new_sc); + if (chn_index == 0 || chn_index > hv->max_queues) { + PMD_DRV_LOG(ERR, + "Invalid subchannel offermsg channel %u", + chn_index); + return -EIO; + } + + PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index); + hv->channels[chn_index] = new_sc; + --subchan; + } + + return err; +} + +static int hn_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct rte_eth_rss_conf *rss_conf = &dev_conf->rx_adv_conf.rss_conf; + const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode; + const struct rte_eth_txmode *txmode = &dev_conf->txmode; + struct hn_data *hv = dev->data->dev_private; + uint64_t unsupported; + int i, err, subchan; + + PMD_INIT_FUNC_TRACE(); + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS; + if (unsupported) { + PMD_DRV_LOG(NOTICE, + "unsupported TX offload: %#" PRIx64, + unsupported); + return -EINVAL; + } + + unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS; + if (unsupported) { + PMD_DRV_LOG(NOTICE, + "unsupported RX offload: %#" PRIx64, + rxmode->offloads); + return -EINVAL; + } + + hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + + err = hn_rndis_conf_offload(hv, txmode->offloads, + rxmode->offloads); + if (err) { + PMD_DRV_LOG(NOTICE, + "offload configure failed"); + return err; + } + + hv->num_queues = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + for (i = 0; i < NDIS_HASH_INDCNT; i++) + hv->rss_ind[i] = i % dev->data->nb_rx_queues; + + hn_rss_hash_init(hv, rss_conf); + + subchan = hv->num_queues - 1; + if (subchan > 0) { + err = hn_subchan_configure(hv, subchan); + if (err) { + PMD_DRV_LOG(NOTICE, + "subchannel configuration failed"); + return err; + } + + err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); + if (err) { + PMD_DRV_LOG(NOTICE, + "rss disable failed"); + return err; + } + + if (rss_conf->rss_hf != 0) { + err = hn_rndis_conf_rss(hv, 0); + if (err) { + PMD_DRV_LOG(NOTICE, + "initial RSS config failed"); + return err; + } + } + } + + return hn_vf_configure(dev, dev_conf); +} + +static int hn_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + unsigned int i; + + hn_vf_stats_get(dev, stats); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + stats->opackets += txq->stats.packets; + stats->obytes += txq->stats.bytes; + stats->oerrors += txq->stats.errors; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->stats.packets; + stats->q_obytes[i] = txq->stats.bytes; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + stats->ipackets += rxq->stats.packets; + stats->ibytes += rxq->stats.bytes; + stats->ierrors += rxq->stats.errors; + stats->imissed += rxq->stats.ring_full; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxq->stats.packets; + stats->q_ibytes[i] = rxq->stats.bytes; + } + } + + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; + return 0; +} + +static int +hn_dev_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + memset(&txq->stats, 0, sizeof(struct hn_stats)); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + memset(&rxq->stats, 0, sizeof(struct hn_stats)); + } + + return 0; +} + +static int +hn_dev_xstats_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = hn_dev_stats_reset(dev); + if (ret != 0) + return 0; + + return hn_vf_xstats_reset(dev); +} + +static int +hn_dev_xstats_count(struct rte_eth_dev *dev) +{ + int ret, count; + + count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings); + count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings); + + ret = hn_vf_xstats_get_names(dev, NULL, 0); + if (ret < 0) + return ret; + + return count + ret; +} + +static int +hn_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, t, count = 0; + int ret; + + if (!xstats_names) + return hn_dev_xstats_count(dev); + + /* Note: limit checked in rte_eth_xstats_names() */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + if (count >= limit) + break; + + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + snprintf(xstats_names[count++].name, + RTE_ETH_XSTATS_NAME_SIZE, + "tx_q%u_%s", i, hn_stat_strings[t].name); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + if (count >= limit) + break; + + for (t = 0; t < RTE_DIM(hn_stat_strings); t++) + snprintf(xstats_names[count++].name, + RTE_ETH_XSTATS_NAME_SIZE, + "rx_q%u_%s", i, + hn_stat_strings[t].name); + } + + ret = hn_vf_xstats_get_names(dev, xstats_names + count, + limit - count); + if (ret < 0) + return ret; + + return count + ret; +} + +static int +hn_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + unsigned int i, t, count = 0; + const unsigned int nstats = hn_dev_xstats_count(dev); + const char *stats; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (n < nstats) + return nstats; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct hn_tx_queue *txq = dev->data->tx_queues[i]; + + if (!txq) + continue; + + stats = (const char *)&txq->stats; + for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) { + xstats[count].id = count; + xstats[count].value = *(const uint64_t *) + (stats + hn_stat_strings[t].offset); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + if (!rxq) + continue; + + stats = (const char *)&rxq->stats; + for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) { + xstats[count].id = count; + xstats[count].value = *(const uint64_t *) + (stats + hn_stat_strings[t].offset); + } + } + + ret = hn_vf_xstats_get(dev, xstats, count, n); + if (ret < 0) + return ret; + + return count + ret; +} + +static int +hn_dev_start(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + int error; + + PMD_INIT_FUNC_TRACE(); + + error = hn_rndis_set_rxfilter(hv, + NDIS_PACKET_TYPE_BROADCAST | + NDIS_PACKET_TYPE_ALL_MULTICAST | + NDIS_PACKET_TYPE_DIRECTED); + if (error) + return error; + + error = hn_vf_start(dev); + if (error) + hn_rndis_set_rxfilter(hv, 0); + + /* Initialize Link state */ + if (error == 0) + hn_dev_link_update(dev, 0); + + return error; +} + +static void +hn_dev_stop(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + hn_rndis_set_rxfilter(hv, 0); + hn_vf_stop(dev); +} + +static void +hn_dev_close(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + hn_vf_close(dev); + hn_dev_free_queues(dev); +} + +static const struct eth_dev_ops hn_eth_dev_ops = { + .dev_configure = hn_dev_configure, + .dev_start = hn_dev_start, + .dev_stop = hn_dev_stop, + .dev_close = hn_dev_close, + .dev_infos_get = hn_dev_info_get, + .dev_supported_ptypes_get = hn_vf_supported_ptypes, + .promiscuous_enable = hn_dev_promiscuous_enable, + .promiscuous_disable = hn_dev_promiscuous_disable, + .allmulticast_enable = hn_dev_allmulticast_enable, + .allmulticast_disable = hn_dev_allmulticast_disable, + .set_mc_addr_list = hn_dev_mc_addr_list, + .reta_update = hn_rss_reta_update, + .reta_query = hn_rss_reta_query, + .rss_hash_update = hn_rss_hash_update, + .rss_hash_conf_get = hn_rss_hash_conf_get, + .tx_queue_setup = hn_dev_tx_queue_setup, + .tx_queue_release = hn_dev_tx_queue_release, + .tx_done_cleanup = hn_dev_tx_done_cleanup, + .rx_queue_setup = hn_dev_rx_queue_setup, + .rx_queue_release = hn_dev_rx_queue_release, + .link_update = hn_dev_link_update, + .stats_get = hn_dev_stats_get, + .stats_reset = hn_dev_stats_reset, + .xstats_get = hn_dev_xstats_get, + .xstats_get_names = hn_dev_xstats_get_names, + .xstats_reset = hn_dev_xstats_reset, +}; + +/* + * Setup connection between PMD and kernel. + */ +static int +hn_attach(struct hn_data *hv, unsigned int mtu) +{ + int error; + + /* Attach NVS */ + error = hn_nvs_attach(hv, mtu); + if (error) + goto failed_nvs; + + /* Attach RNDIS */ + error = hn_rndis_attach(hv); + if (error) + goto failed_rndis; + + /* + * NOTE: + * Under certain conditions on certain versions of Hyper-V, + * the RNDIS rxfilter is _not_ zero on the hypervisor side + * after the successful RNDIS initialization. + */ + hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE); + return 0; +failed_rndis: + hn_nvs_detach(hv); +failed_nvs: + return error; +} + +static void +hn_detach(struct hn_data *hv) +{ + hn_nvs_detach(hv); + hn_rndis_detach(hv); +} + +static int +eth_hn_dev_init(struct rte_eth_dev *eth_dev) +{ + struct hn_data *hv = eth_dev->data->dev_private; + struct rte_device *device = eth_dev->device; + struct rte_vmbus_device *vmbus; + unsigned int rxr_cnt; + int err, max_chan; + + PMD_INIT_FUNC_TRACE(); + + vmbus = container_of(device, struct rte_vmbus_device, device); + eth_dev->dev_ops = &hn_eth_dev_ops; + eth_dev->tx_pkt_burst = &hn_xmit_pkts; + eth_dev->rx_pkt_burst = &hn_recv_pkts; + + /* + * for secondary processes, we don't initialize any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Since Hyper-V only supports one MAC address */ + eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS, + sizeof(struct rte_ether_addr), 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory store MAC addresses"); + return -ENOMEM; + } + + hv->vmbus = vmbus; + hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP]; + hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP]; + hv->port_id = eth_dev->data->port_id; + hv->latency = HN_CHAN_LATENCY_NS; + hv->max_queues = 1; + rte_rwlock_init(&hv->vf_lock); + hv->vf_port = HN_INVALID_PORT; + + err = hn_parse_args(eth_dev); + if (err) + return err; + + strlcpy(hv->owner.name, eth_dev->device->name, + RTE_ETH_MAX_OWNER_NAME_LEN); + err = rte_eth_dev_owner_new(&hv->owner.id); + if (err) { + PMD_INIT_LOG(ERR, "Can not get owner id"); + return err; + } + + /* Initialize primary channel input for control operations */ + err = rte_vmbus_chan_open(vmbus, &hv->channels[0]); + if (err) + return err; + + rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency); + + hv->primary = hn_rx_queue_alloc(hv, 0, + eth_dev->device->numa_node); + + if (!hv->primary) + return -ENOMEM; + + err = hn_attach(hv, RTE_ETHER_MTU); + if (err) + goto failed; + + err = hn_chim_init(eth_dev); + if (err) + goto failed; + + err = hn_rndis_get_eaddr(hv, eth_dev->data->mac_addrs->addr_bytes); + if (err) + goto failed; + + /* Multi queue requires later versions of windows server */ + if (hv->nvs_ver < NVS_VERSION_5) + return 0; + + max_chan = rte_vmbus_max_channels(vmbus); + PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan); + if (max_chan <= 0) + goto failed; + + if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0) + rxr_cnt = 1; + + hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan); + + /* If VF was reported but not added, do it now */ + if (hv->vf_present && !hn_vf_attached(hv)) { + PMD_INIT_LOG(DEBUG, "Adding VF device"); + + err = hn_vf_add(eth_dev, hv); + if (err) + hv->vf_present = 0; + } + + return 0; + +failed: + PMD_INIT_LOG(NOTICE, "device init failed"); + + hn_chim_uninit(eth_dev); + hn_detach(hv); + return err; +} + +static int +eth_hn_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct hn_data *hv = eth_dev->data->dev_private; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hn_dev_stop(eth_dev); + hn_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->rx_pkt_burst = NULL; + + hn_detach(hv); + hn_chim_uninit(eth_dev); + rte_vmbus_chan_close(hv->primary->chan); + rte_free(hv->primary); + ret = rte_eth_dev_owner_delete(hv->owner.id); + if (ret != 0) + return ret; + + return 0; +} + +static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused, + struct rte_vmbus_device *dev) +{ + struct rte_eth_dev *eth_dev; + int ret; + + PMD_INIT_FUNC_TRACE(); + + eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data)); + if (!eth_dev) + return -ENOMEM; + + ret = eth_hn_dev_init(eth_dev); + if (ret) + eth_dev_vmbus_release(eth_dev); + else + rte_eth_dev_probing_finish(eth_dev); + + return ret; +} + +static int eth_hn_remove(struct rte_vmbus_device *dev) +{ + struct rte_eth_dev *eth_dev; + int ret; + + PMD_INIT_FUNC_TRACE(); + + eth_dev = rte_eth_dev_allocated(dev->device.name); + if (!eth_dev) + return -ENODEV; + + ret = eth_hn_dev_uninit(eth_dev); + if (ret) + return ret; + + eth_dev_vmbus_release(eth_dev); + return 0; +} + +/* Network device GUID */ +static const rte_uuid_t hn_net_ids[] = { + /* f8615163-df3e-46c5-913f-f2d2f965ed0e */ + RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL), + { 0 } +}; + +static struct rte_vmbus_driver rte_netvsc_pmd = { + .id_table = hn_net_ids, + .probe = eth_hn_probe, + .remove = eth_hn_remove, +}; + +RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd); +RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic"); + +RTE_INIT(hn_init_log) +{ + hn_logtype_init = rte_log_register("pmd.net.netvsc.init"); + if (hn_logtype_init >= 0) + rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE); + hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver"); + if (hn_logtype_driver >= 0) + rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h b/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h new file mode 100644 index 000000000..cddadef09 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_logs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#ifndef _HN_LOGS_H_ +#define _HN_LOGS_H_ + +#include + +extern int hn_logtype_init; +extern int hn_logtype_driver; + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_init, "%s(): " fmt "\n",\ + __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, \ + "%s() rx: " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, \ + "%s() tx: " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, hn_logtype_driver, "%s(): " fmt "\n", \ + __func__, ## args) + +#endif /* _HN_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c new file mode 100644 index 000000000..477202b2a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.c @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * Copyright (c) 2010-2012 Citrix Inc. + * Copyright (c) 2012 NetApp Inc. + * All rights reserved. + */ + +/* + * Network Virtualization Service. + */ + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_nvs.h" + +static const uint32_t hn_nvs_version[] = { + NVS_VERSION_61, + NVS_VERSION_6, + NVS_VERSION_5, + NVS_VERSION_4, + NVS_VERSION_2, + NVS_VERSION_1 +}; + +static int hn_nvs_req_send(struct hn_data *hv, + void *req, uint32_t reqlen) +{ + return rte_vmbus_chan_send(hn_primary_chan(hv), + VMBUS_CHANPKT_TYPE_INBAND, + req, reqlen, 0, + VMBUS_CHANPKT_FLAG_NONE, NULL); +} + +static int +__hn_nvs_execute(struct hn_data *hv, + void *req, uint32_t reqlen, + void *resp, uint32_t resplen, + uint32_t type) +{ + struct vmbus_channel *chan = hn_primary_chan(hv); + char buffer[NVS_RESPSIZE_MAX]; + const struct hn_nvs_hdr *hdr; + uint64_t xactid; + uint32_t len; + int ret; + + /* Send request to ring buffer */ + ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, + req, reqlen, 0, + VMBUS_CHANPKT_FLAG_RC, NULL); + + if (ret) { + PMD_DRV_LOG(ERR, "send request failed: %d", ret); + return ret; + } + + retry: + len = sizeof(buffer); + ret = rte_vmbus_chan_recv(chan, buffer, &len, &xactid); + if (ret == -EAGAIN) { + rte_delay_us(HN_CHAN_INTERVAL_US); + goto retry; + } + + if (ret < 0) { + PMD_DRV_LOG(ERR, "recv response failed: %d", ret); + return ret; + } + + if (len < sizeof(*hdr)) { + PMD_DRV_LOG(ERR, "response missing NVS header"); + return -EINVAL; + } + + hdr = (struct hn_nvs_hdr *)buffer; + + /* Silently drop received packets while waiting for response */ + if (hdr->type == NVS_TYPE_RNDIS) { + hn_nvs_ack_rxbuf(chan, xactid); + --hv->rxbuf_outstanding; + goto retry; + } + + if (hdr->type != type) { + PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x", + hdr->type, type); + return -EINVAL; + } + + if (len < resplen) { + PMD_DRV_LOG(ERR, + "invalid NVS resp len %u (expect %u)", + len, resplen); + return -EINVAL; + } + + memcpy(resp, buffer, resplen); + + /* All pass! */ + return 0; +} + + +/* + * Execute one control command and get the response. + * Only one command can be active on a channel at once + * Unlike BSD, DPDK does not have an interrupt context + * so the polling is required to wait for response. + */ +static int +hn_nvs_execute(struct hn_data *hv, + void *req, uint32_t reqlen, + void *resp, uint32_t resplen, + uint32_t type) +{ + struct hn_rx_queue *rxq = hv->primary; + int ret; + + rte_spinlock_lock(&rxq->ring_lock); + ret = __hn_nvs_execute(hv, req, reqlen, resp, resplen, type); + rte_spinlock_unlock(&rxq->ring_lock); + + return ret; +} + +static int +hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver) +{ + struct hn_nvs_init init; + struct hn_nvs_init_resp resp; + uint32_t status; + int error; + + memset(&init, 0, sizeof(init)); + init.type = NVS_TYPE_INIT; + init.ver_min = nvs_ver; + init.ver_max = nvs_ver; + + error = hn_nvs_execute(hv, &init, sizeof(init), + &resp, sizeof(resp), + NVS_TYPE_INIT_RESP); + if (error) + return error; + + status = resp.status; + if (status != NVS_STATUS_OK) { + /* Not fatal, try other versions */ + PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x", + nvs_ver); + return -EINVAL; + } + + return 0; +} + +static int +hn_nvs_conn_rxbuf(struct hn_data *hv) +{ + struct hn_nvs_rxbuf_conn conn; + struct hn_nvs_rxbuf_connresp resp; + uint32_t status; + int error; + + /* Kernel has already setup RXBUF on primary channel. */ + + /* + * Connect RXBUF to NVS. + */ + conn.type = NVS_TYPE_RXBUF_CONN; + conn.gpadl = hv->rxbuf_res->phys_addr; + conn.sig = NVS_RXBUF_SIG; + PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64, + hv->rxbuf_res->addr, + hv->rxbuf_res->phys_addr); + + error = hn_nvs_execute(hv, &conn, sizeof(conn), + &resp, sizeof(resp), + NVS_TYPE_RXBUF_CONNRESP); + if (error) { + PMD_DRV_LOG(ERR, + "exec nvs rxbuf conn failed: %d", + error); + return error; + } + + status = resp.status; + if (status != NVS_STATUS_OK) { + PMD_DRV_LOG(ERR, + "nvs rxbuf conn failed: %x", status); + return -EIO; + } + if (resp.nsect != 1) { + PMD_DRV_LOG(ERR, + "nvs rxbuf response num sections %u != 1", + resp.nsect); + return -EIO; + } + + PMD_DRV_LOG(INFO, + "receive buffer size %u count %u", + resp.nvs_sect[0].slotsz, + resp.nvs_sect[0].slotcnt); + hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt; + + hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt, + sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE); + if (!hv->rxbuf_info) { + PMD_DRV_LOG(ERR, + "could not allocate rxbuf info"); + return -ENOMEM; + } + + return 0; +} + +static void +hn_nvs_disconn_rxbuf(struct hn_data *hv) +{ + struct hn_nvs_rxbuf_disconn disconn; + int error; + + /* + * Disconnect RXBUF from NVS. + */ + memset(&disconn, 0, sizeof(disconn)); + disconn.type = NVS_TYPE_RXBUF_DISCONN; + disconn.sig = NVS_RXBUF_SIG; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &disconn, sizeof(disconn)); + if (error) { + PMD_DRV_LOG(ERR, + "send nvs rxbuf disconn failed: %d", + error); + } + + rte_free(hv->rxbuf_info); + /* + * Linger long enough for NVS to disconnect RXBUF. + */ + rte_delay_ms(200); +} + +static void +hn_nvs_disconn_chim(struct hn_data *hv) +{ + int error; + + if (hv->chim_cnt != 0) { + struct hn_nvs_chim_disconn disconn; + + /* Disconnect chimney sending buffer from NVS. */ + memset(&disconn, 0, sizeof(disconn)); + disconn.type = NVS_TYPE_CHIM_DISCONN; + disconn.sig = NVS_CHIM_SIG; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &disconn, sizeof(disconn)); + + if (error) { + PMD_DRV_LOG(ERR, + "send nvs chim disconn failed: %d", error); + } + + hv->chim_cnt = 0; + /* + * Linger long enough for NVS to disconnect chimney + * sending buffer. + */ + rte_delay_ms(200); + } +} + +static int +hn_nvs_conn_chim(struct hn_data *hv) +{ + struct hn_nvs_chim_conn chim; + struct hn_nvs_chim_connresp resp; + uint32_t sectsz; + unsigned long len = hv->chim_res->len; + int error; + + /* Connect chimney sending buffer to NVS */ + memset(&chim, 0, sizeof(chim)); + chim.type = NVS_TYPE_CHIM_CONN; + chim.gpadl = hv->chim_res->phys_addr; + chim.sig = NVS_CHIM_SIG; + PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64, + hv->chim_res->addr, + hv->chim_res->phys_addr); + + error = hn_nvs_execute(hv, &chim, sizeof(chim), + &resp, sizeof(resp), + NVS_TYPE_CHIM_CONNRESP); + if (error) { + PMD_DRV_LOG(ERR, "exec nvs chim conn failed"); + return error; + } + + if (resp.status != NVS_STATUS_OK) { + PMD_DRV_LOG(ERR, "nvs chim conn failed: %x", + resp.status); + return -EIO; + } + + sectsz = resp.sectsz; + if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) { + /* Can't use chimney sending buffer; done! */ + PMD_DRV_LOG(NOTICE, + "invalid chimney sending buffer section size: %u", + sectsz); + error = -EINVAL; + goto cleanup; + } + + hv->chim_szmax = sectsz; + hv->chim_cnt = len / sectsz; + + PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u", + len, hv->chim_szmax, hv->chim_cnt); + + /* Done! */ + return 0; + +cleanup: + hn_nvs_disconn_chim(hv); + return error; +} + +/* + * Configure MTU and enable VLAN. + */ +static int +hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu) +{ + struct hn_nvs_ndis_conf conf; + int error; + + memset(&conf, 0, sizeof(conf)); + conf.type = NVS_TYPE_NDIS_CONF; + conf.mtu = mtu + RTE_ETHER_HDR_LEN; + conf.caps = NVS_NDIS_CONF_VLAN; + + /* enable SRIOV */ + if (hv->nvs_ver >= NVS_VERSION_5) + conf.caps |= NVS_NDIS_CONF_SRIOV; + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &conf, sizeof(conf)); + if (error) { + PMD_DRV_LOG(ERR, + "send nvs ndis conf failed: %d", error); + return error; + } + + return 0; +} + +static int +hn_nvs_init_ndis(struct hn_data *hv) +{ + struct hn_nvs_ndis_init ndis; + int error; + + memset(&ndis, 0, sizeof(ndis)); + ndis.type = NVS_TYPE_NDIS_INIT; + ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver); + ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver); + + /* NOTE: No response. */ + error = hn_nvs_req_send(hv, &ndis, sizeof(ndis)); + if (error) + PMD_DRV_LOG(ERR, + "send nvs ndis init failed: %d", error); + + return error; +} + +static int +hn_nvs_init(struct hn_data *hv) +{ + unsigned int i; + int error; + + /* + * Find the supported NVS version and set NDIS version accordingly. + */ + for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) { + error = hn_nvs_doinit(hv, hn_nvs_version[i]); + if (error) { + PMD_INIT_LOG(DEBUG, "version %#x error %d", + hn_nvs_version[i], error); + continue; + } + + hv->nvs_ver = hn_nvs_version[i]; + + /* Set NDIS version according to NVS version. */ + hv->ndis_ver = NDIS_VERSION_6_30; + if (hv->nvs_ver <= NVS_VERSION_4) + hv->ndis_ver = NDIS_VERSION_6_1; + + PMD_INIT_LOG(DEBUG, + "NVS version %#x, NDIS version %u.%u", + hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver), + NDIS_VERSION_MINOR(hv->ndis_ver)); + return 0; + } + + PMD_DRV_LOG(ERR, + "no NVS compatible version available"); + return -ENXIO; +} + +int +hn_nvs_attach(struct hn_data *hv, unsigned int mtu) +{ + int error; + + /* + * Initialize NVS. + */ + error = hn_nvs_init(hv); + if (error) + return error; + + /** Configure NDIS before initializing it. */ + if (hv->nvs_ver >= NVS_VERSION_2) { + error = hn_nvs_conf_ndis(hv, mtu); + if (error) + return error; + } + + /* + * Initialize NDIS. + */ + error = hn_nvs_init_ndis(hv); + if (error) + return error; + + /* + * Connect RXBUF. + */ + error = hn_nvs_conn_rxbuf(hv); + if (error) + return error; + + /* + * Connect chimney sending buffer. + */ + error = hn_nvs_conn_chim(hv); + if (error) { + hn_nvs_disconn_rxbuf(hv); + return error; + } + + return 0; +} + +void +hn_nvs_detach(struct hn_data *hv __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + /* NOTE: there are no requests to stop the NVS. */ + hn_nvs_disconn_rxbuf(hv); + hn_nvs_disconn_chim(hv); +} + +/* + * Ack the consumed RXBUF associated w/ this channel packet, + * so that this RXBUF can be recycled by the hypervisor. + */ +void +hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid) +{ + unsigned int retries = 0; + struct hn_nvs_rndis_ack ack = { + .type = NVS_TYPE_RNDIS_ACK, + .status = NVS_STATUS_OK, + }; + int error; + + PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid); + + again: + error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP, + &ack, sizeof(ack), tid, + VMBUS_CHANPKT_FLAG_NONE, NULL); + + if (error == 0) + return; + + if (error == -EAGAIN) { + /* + * NOTE: + * This should _not_ happen in real world, since the + * consumption of the TX bufring from the TX path is + * controlled. + */ + PMD_RX_LOG(NOTICE, "RXBUF ack retry"); + if (++retries < 10) { + rte_delay_ms(1); + goto again; + } + } + /* RXBUF leaks! */ + PMD_DRV_LOG(ERR, "RXBUF ack failed"); +} + +int +hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch) +{ + struct hn_nvs_subch_req req; + struct hn_nvs_subch_resp resp; + int error; + + memset(&req, 0, sizeof(req)); + req.type = NVS_TYPE_SUBCH_REQ; + req.op = NVS_SUBCH_OP_ALLOC; + req.nsubch = *nsubch; + + error = hn_nvs_execute(hv, &req, sizeof(req), + &resp, sizeof(resp), + NVS_TYPE_SUBCH_RESP); + if (error) + return error; + + if (resp.status != NVS_STATUS_OK) { + PMD_INIT_LOG(ERR, + "nvs subch alloc failed: %#x", + resp.status); + return -EIO; + } + + if (resp.nsubch > *nsubch) { + PMD_INIT_LOG(NOTICE, + "%u subchans are allocated, requested %u", + resp.nsubch, *nsubch); + } + *nsubch = resp.nsubch; + + return 0; +} + +void +hn_nvs_set_datapath(struct hn_data *hv, uint32_t path) +{ + struct hn_nvs_datapath dp; + int error; + + PMD_DRV_LOG(DEBUG, "set datapath %s", + path ? "VF" : "Synthetic"); + + memset(&dp, 0, sizeof(dp)); + dp.type = NVS_TYPE_SET_DATAPATH; + dp.active_path = path; + + error = hn_nvs_req_send(hv, &dp, sizeof(dp)); + if (error) { + PMD_DRV_LOG(ERR, + "send set datapath failed: %d", + error); + } +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h new file mode 100644 index 000000000..015839e36 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_nvs.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * All rights reserved. + */ + +/* + * The indirection table message is the largest message + * received from host, and that is 112 bytes. + */ +#define NVS_RESPSIZE_MAX 256 + +/* + * NDIS protocol version numbers + */ +#define NDIS_VERSION_6_1 0x00060001 +#define NDIS_VERSION_6_20 0x00060014 +#define NDIS_VERSION_6_30 0x0006001e +#define NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16) +#define NDIS_VERSION_MINOR(ver) ((ver) & 0xffff) + +/* + * NVS versions. + */ +#define NVS_VERSION_1 0x00002 +#define NVS_VERSION_2 0x30002 +#define NVS_VERSION_4 0x40000 +#define NVS_VERSION_5 0x50000 +#define NVS_VERSION_6 0x60000 +#define NVS_VERSION_61 0x60001 + +#define NVS_RXBUF_SIG 0xcafe +#define NVS_CHIM_SIG 0xface + +#define NVS_CHIM_IDX_INVALID 0xffffffff + +#define NVS_RNDIS_MTYPE_DATA 0 +#define NVS_RNDIS_MTYPE_CTRL 1 + +/* + * NVS message transaction status codes. + */ +#define NVS_STATUS_OK 1 +#define NVS_STATUS_FAILED 2 + +/* + * NVS request/response message types. + */ +#define NVS_TYPE_INIT 1 +#define NVS_TYPE_INIT_RESP 2 + +#define NVS_TYPE_NDIS_INIT 100 +#define NVS_TYPE_RXBUF_CONN 101 +#define NVS_TYPE_RXBUF_CONNRESP 102 +#define NVS_TYPE_RXBUF_DISCONN 103 +#define NVS_TYPE_CHIM_CONN 104 +#define NVS_TYPE_CHIM_CONNRESP 105 +#define NVS_TYPE_CHIM_DISCONN 106 +#define NVS_TYPE_RNDIS 107 +#define NVS_TYPE_RNDIS_ACK 108 + +#define NVS_TYPE_NDIS_CONF 125 +#define NVS_TYPE_VFASSOC_NOTE 128 /* notification */ +#define NVS_TYPE_SET_DATAPATH 129 +#define NVS_TYPE_SUBCH_REQ 133 +#define NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */ +#define NVS_TYPE_TXTBL_NOTE 134 /* notification */ + + +/* NVS message common header */ +struct hn_nvs_hdr { + uint32_t type; +} __rte_packed; + +struct hn_nvs_init { + uint32_t type; /* NVS_TYPE_INIT */ + uint32_t ver_min; + uint32_t ver_max; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_init_resp { + uint32_t type; /* NVS_TYPE_INIT_RESP */ + uint32_t ver; /* deprecated */ + uint32_t rsvd; + uint32_t status; /* NVS_STATUS_ */ +} __rte_packed; + +/* No response */ +struct hn_nvs_ndis_conf { + uint32_t type; /* NVS_TYPE_NDIS_CONF */ + uint32_t mtu; + uint32_t rsvd; + uint64_t caps; /* NVS_NDIS_CONF_ */ + uint8_t rsvd1[20]; +} __rte_packed; + +#define NVS_NDIS_CONF_SRIOV 0x0004 +#define NVS_NDIS_CONF_VLAN 0x0008 + +/* No response */ +struct hn_nvs_ndis_init { + uint32_t type; /* NVS_TYPE_NDIS_INIT */ + uint32_t ndis_major; /* NDIS_VERSION_MAJOR_ */ + uint32_t ndis_minor; /* NDIS_VERSION_MINOR_ */ + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_vf_association { + uint32_t type; /* NVS_TYPE_VFASSOC_NOTE */ + uint32_t allocated; + uint32_t serial; +} __rte_packed; + +#define NVS_DATAPATH_SYNTHETIC 0 +#define NVS_DATAPATH_VF 1 + +/* No response */ +struct hn_nvs_datapath { + uint32_t type; /* NVS_TYPE_SET_DATAPATH */ + uint32_t active_path;/* NVS_DATAPATH_* */ + uint8_t rsvd[32]; +} __rte_packed; + +struct hn_nvs_rxbuf_conn { + uint32_t type; /* NVS_TYPE_RXBUF_CONN */ + uint32_t gpadl; /* RXBUF vmbus GPADL */ + uint16_t sig; /* NVS_RXBUF_SIG */ + uint8_t rsvd[30]; +} __rte_packed; + +struct hn_nvs_rxbuf_sect { + uint32_t start; + uint32_t slotsz; + uint32_t slotcnt; + uint32_t end; +} __rte_packed; + +struct hn_nvs_rxbuf_connresp { + uint32_t type; /* NVS_TYPE_RXBUF_CONNRESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t nsect; /* # of elem in nvs_sect */ + struct hn_nvs_rxbuf_sect nvs_sect[1]; +} __rte_packed; + +/* No response */ +struct hn_nvs_rxbuf_disconn { + uint32_t type; /* NVS_TYPE_RXBUF_DISCONN */ + uint16_t sig; /* NVS_RXBUF_SIG */ + uint8_t rsvd[34]; +} __rte_packed; + +struct hn_nvs_chim_conn { + uint32_t type; /* NVS_TYPE_CHIM_CONN */ + uint32_t gpadl; /* chimney buf vmbus GPADL */ + uint16_t sig; /* NDIS_NVS_CHIM_SIG */ + uint8_t rsvd[30]; +} __rte_packed; + +struct hn_nvs_chim_connresp { + uint32_t type; /* NVS_TYPE_CHIM_CONNRESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t sectsz; /* section size */ +} __rte_packed; + +/* No response */ +struct hn_nvs_chim_disconn { + uint32_t type; /* NVS_TYPE_CHIM_DISCONN */ + uint16_t sig; /* NVS_CHIM_SIG */ + uint8_t rsvd[34]; +} __rte_packed; + +#define NVS_SUBCH_OP_ALLOC 1 + +struct hn_nvs_subch_req { + uint32_t type; /* NVS_TYPE_SUBCH_REQ */ + uint32_t op; /* NVS_SUBCH_OP_ */ + uint32_t nsubch; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_subch_resp { + uint32_t type; /* NVS_TYPE_SUBCH_RESP */ + uint32_t status; /* NVS_STATUS_ */ + uint32_t nsubch; + uint8_t rsvd[28]; +} __rte_packed; + +struct hn_nvs_rndis { + uint32_t type; /* NVS_TYPE_RNDIS */ + uint32_t rndis_mtype;/* NVS_RNDIS_MTYPE_ */ + /* + * Chimney sending buffer index and size. + * + * NOTE: + * If nvs_chim_idx is set to NVS_CHIM_IDX_INVALID + * and nvs_chim_sz is set to 0, then chimney sending + * buffer is _not_ used by this RNDIS message. + */ + uint32_t chim_idx; + uint32_t chim_sz; + uint8_t rsvd[24]; +} __rte_packed; + +struct hn_nvs_rndis_ack { + uint32_t type; /* NVS_TYPE_RNDIS_ACK */ + uint32_t status; /* NVS_STATUS_ */ + uint8_t rsvd[32]; +} __rte_packed; + + +int hn_nvs_attach(struct hn_data *hv, unsigned int mtu); +void hn_nvs_detach(struct hn_data *hv); +void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid); +int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch); +void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path); +void hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, + const struct vmbus_chanpkt_hdr *hdr, + const void *data); + +static inline int +hn_nvs_send(struct vmbus_channel *chan, uint16_t flags, + void *nvs_msg, int nvs_msglen, uintptr_t sndc, + bool *need_sig) +{ + return rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, + nvs_msg, nvs_msglen, (uint64_t)sndc, + flags, need_sig); +} + +static inline int +hn_nvs_send_sglist(struct vmbus_channel *chan, + struct vmbus_gpa sg[], unsigned int sglen, + void *nvs_msg, int nvs_msglen, + uintptr_t sndc, bool *need_sig) +{ + return rte_vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen, + (uint64_t)sndc, need_sig); +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c new file mode 100644 index 000000000..7947ca233 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.c @@ -0,0 +1,1106 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2009-2018 Microsoft Corp. + * Copyright (c) 2010-2012 Citrix Inc. + * Copyright (c) 2012 NetApp Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_nvs.h" +#include "hn_rndis.h" +#include "ndis.h" + +#define HN_RNDIS_XFER_SIZE 0x4000 + +#define HN_NDIS_TXCSUM_CAP_IP4 \ + (NDIS_TXCSUM_CAP_IP4 | NDIS_TXCSUM_CAP_IP4OPT) +#define HN_NDIS_TXCSUM_CAP_TCP4 \ + (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT) +#define HN_NDIS_TXCSUM_CAP_TCP6 \ + (NDIS_TXCSUM_CAP_TCP6 | NDIS_TXCSUM_CAP_TCP6OPT | \ + NDIS_TXCSUM_CAP_IP6EXT) +#define HN_NDIS_TXCSUM_CAP_UDP6 \ + (NDIS_TXCSUM_CAP_UDP6 | NDIS_TXCSUM_CAP_IP6EXT) +#define HN_NDIS_LSOV2_CAP_IP6 \ + (NDIS_LSOV2_CAP_IP6EXT | NDIS_LSOV2_CAP_TCP6OPT) + +/* Get unique request id */ +static inline uint32_t +hn_rndis_rid(struct hn_data *hv) +{ + uint32_t rid; + + do { + rid = rte_atomic32_add_return(&hv->rndis_req_id, 1); + } while (rid == 0); + + return rid; +} + +static void *hn_rndis_alloc(size_t size) +{ + return rte_zmalloc("RNDIS", size, PAGE_SIZE); +} + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP +void hn_rndis_dump(const void *buf) +{ + const union { + struct rndis_msghdr hdr; + struct rndis_packet_msg pkt; + struct rndis_init_req init_request; + struct rndis_init_comp init_complete; + struct rndis_halt_req halt; + struct rndis_query_req query_request; + struct rndis_query_comp query_complete; + struct rndis_set_req set_request; + struct rndis_set_comp set_complete; + struct rndis_reset_req reset_request; + struct rndis_reset_comp reset_complete; + struct rndis_keepalive_req keepalive_request; + struct rndis_keepalive_comp keepalive_complete; + struct rndis_status_msg indicate_status; + } *rndis_msg = buf; + + switch (rndis_msg->hdr.type) { + case RNDIS_PACKET_MSG: { + const struct rndis_pktinfo *ppi; + unsigned int ppi_len; + + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_PACKET (len %u, data %u:%u, # oob %u %u:%u, pkt %u:%u)\n", + rndis_msg->pkt.len, + rndis_msg->pkt.dataoffset, + rndis_msg->pkt.datalen, + rndis_msg->pkt.oobdataelements, + rndis_msg->pkt.oobdataoffset, + rndis_msg->pkt.oobdatalen, + rndis_msg->pkt.pktinfooffset, + rndis_msg->pkt.pktinfolen); + + ppi = (const struct rndis_pktinfo *) + ((const char *)buf + + RNDIS_PACKET_MSG_OFFSET_ABS(rndis_msg->pkt.pktinfooffset)); + + ppi_len = rndis_msg->pkt.pktinfolen; + while (ppi_len > 0) { + const void *ppi_data; + + ppi_data = ppi->data; + + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + " PPI (size %u, type %u, offs %u data %#x)\n", + ppi->size, ppi->type, ppi->offset, + *(const uint32_t *)ppi_data); + if (ppi->size == 0) + break; + ppi_len -= ppi->size; + ppi = (const struct rndis_pktinfo *) + ((const char *)ppi + ppi->size); + } + break; + } + case RNDIS_INITIALIZE_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INIT (len %u id %#x, ver %u.%u max xfer %u)\n", + rndis_msg->init_request.len, + rndis_msg->init_request.rid, + rndis_msg->init_request.ver_major, + rndis_msg->init_request.ver_minor, + rndis_msg->init_request.max_xfersz); + break; + + case RNDIS_INITIALIZE_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INIT_C (len %u, id %#x, status 0x%x, vers %u.%u, " + "flags %d, max xfer %u, max pkts %u, aligned %u)\n", + rndis_msg->init_complete.len, + rndis_msg->init_complete.rid, + rndis_msg->init_complete.status, + rndis_msg->init_complete.ver_major, + rndis_msg->init_complete.ver_minor, + rndis_msg->init_complete.devflags, + rndis_msg->init_complete.pktmaxsz, + rndis_msg->init_complete.pktmaxcnt, + rndis_msg->init_complete.align); + break; + + case RNDIS_HALT_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_HALT (len %u id %#x)\n", + rndis_msg->halt.len, rndis_msg->halt.rid); + break; + + case RNDIS_QUERY_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_QUERY (len %u, id %#x, oid %#x, info %u:%u)\n", + rndis_msg->query_request.len, + rndis_msg->query_request.rid, + rndis_msg->query_request.oid, + rndis_msg->query_request.infobuflen, + rndis_msg->query_request.infobufoffset); + break; + + case RNDIS_QUERY_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_QUERY_C (len %u, id %#x, status 0x%x, buf %u:%u)\n", + rndis_msg->query_complete.len, + rndis_msg->query_complete.rid, + rndis_msg->query_complete.status, + rndis_msg->query_complete.infobuflen, + rndis_msg->query_complete.infobufoffset); + break; + + case RNDIS_SET_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_SET (len %u, id %#x, oid %#x, info %u:%u)\n", + rndis_msg->set_request.len, + rndis_msg->set_request.rid, + rndis_msg->set_request.oid, + rndis_msg->set_request.infobuflen, + rndis_msg->set_request.infobufoffset); + break; + + case RNDIS_SET_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n", + rndis_msg->set_complete.len, + rndis_msg->set_complete.rid, + rndis_msg->set_complete.status); + break; + + case RNDIS_INDICATE_STATUS_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_MSG_INDICATE (len %u, status %#x, buf len %u, buf offset %u)\n", + rndis_msg->indicate_status.len, + rndis_msg->indicate_status.status, + rndis_msg->indicate_status.stbuflen, + rndis_msg->indicate_status.stbufoffset); + break; + + case RNDIS_RESET_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_RESET (len %u, id %#x)\n", + rndis_msg->reset_request.len, + rndis_msg->reset_request.rid); + break; + + case RNDIS_RESET_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_RESET_C (len %u, status %#x address %#x)\n", + rndis_msg->reset_complete.len, + rndis_msg->reset_complete.status, + rndis_msg->reset_complete.adrreset); + break; + + case RNDIS_KEEPALIVE_MSG: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_KEEPALIVE (len %u, id %#x)\n", + rndis_msg->keepalive_request.len, + rndis_msg->keepalive_request.rid); + break; + + case RNDIS_KEEPALIVE_CMPLT: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS_KEEPALIVE_C (len %u, id %#x address %#x)\n", + rndis_msg->keepalive_complete.len, + rndis_msg->keepalive_complete.rid, + rndis_msg->keepalive_complete.status); + break; + + default: + rte_log(RTE_LOG_DEBUG, hn_logtype_driver, + "RNDIS type %#x len %u\n", + rndis_msg->hdr.type, + rndis_msg->hdr.len); + break; + } +} +#endif + +static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, + const void *req, uint32_t reqlen) + +{ + struct hn_nvs_rndis nvs_rndis = { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_CTRL, + .chim_idx = NVS_CHIM_IDX_INVALID, + .chim_sz = 0 + }; + struct vmbus_gpa sg; + rte_iova_t addr; + + addr = rte_malloc_virt2iova(req); + if (unlikely(addr == RTE_BAD_IOVA)) { + PMD_DRV_LOG(ERR, "RNDIS send request can not get iova"); + return -EINVAL; + } + + if (unlikely(reqlen > PAGE_SIZE)) { + PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size", + reqlen); + return -EINVAL; + } + + sg.page = addr / PAGE_SIZE; + sg.ofs = addr & PAGE_MASK; + sg.len = reqlen; + + if (sg.ofs + reqlen > PAGE_SIZE) { + PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary"); + return -EINVAL; + } + + hn_rndis_dump(req); + + return hn_nvs_send_sglist(chan, &sg, 1, + &nvs_rndis, sizeof(nvs_rndis), 0U, NULL); +} + +void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg) +{ + const struct rndis_status_msg *indicate = msg; + + hn_rndis_dump(msg); + + PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status); + + switch (indicate->status) { + case RNDIS_STATUS_NETWORK_CHANGE: + case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG: + /* ignore not in DPDK API */ + break; + + case RNDIS_STATUS_LINK_SPEED_CHANGE: + case RNDIS_STATUS_MEDIA_CONNECT: + case RNDIS_STATUS_MEDIA_DISCONNECT: + if (dev->data->dev_conf.intr_conf.lsc && + hn_dev_link_update(dev, 0) == 0) + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + break; + default: + PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x", + indicate->status); + } +} + +/* Callback from hn_process_events when response is visible */ +void hn_rndis_receive_response(struct hn_data *hv, + const void *data, uint32_t len) +{ + const struct rndis_init_comp *hdr = data; + + hn_rndis_dump(data); + + if (len < sizeof(3 * sizeof(uint32_t))) { + PMD_DRV_LOG(ERR, + "missing RNDIS header %u", len); + return; + } + + if (len < hdr->len) { + PMD_DRV_LOG(ERR, + "truncated RNDIS response %u", len); + return; + } + + if (len > sizeof(hv->rndis_resp)) { + PMD_DRV_LOG(NOTICE, + "RNDIS response exceeds buffer"); + len = sizeof(hv->rndis_resp); + } + + if (hdr->rid == 0) { + PMD_DRV_LOG(NOTICE, + "RNDIS response id zero!"); + } + + memcpy(hv->rndis_resp, data, len); + + /* make sure response copied before update */ + rte_smp_wmb(); + + if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) { + PMD_DRV_LOG(ERR, + "received id %#x pending id %#x", + hdr->rid, (uint32_t)hv->rndis_pending); + } +} + +/* Do request/response transaction */ +static int hn_rndis_exec1(struct hn_data *hv, + const void *req, uint32_t reqlen, + void *comp, uint32_t comp_len) +{ + const struct rndis_halt_req *hdr = req; + uint32_t rid = hdr->rid; + struct vmbus_channel *chan = hn_primary_chan(hv); + int error; + + if (comp_len > sizeof(hv->rndis_resp)) { + PMD_DRV_LOG(ERR, + "Expected completion size %u exceeds buffer %zu", + comp_len, sizeof(hv->rndis_resp)); + return -EIO; + } + + if (comp != NULL && + rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) { + PMD_DRV_LOG(ERR, + "Request already pending"); + return -EBUSY; + } + + error = hn_nvs_send_rndis_ctrl(chan, req, reqlen); + if (error) { + PMD_DRV_LOG(ERR, "RNDIS ctrl send failed: %d", error); + return error; + } + + if (comp) { + /* Poll primary channel until response received */ + while (hv->rndis_pending == rid) + hn_process_events(hv, 0, 1); + + memcpy(comp, hv->rndis_resp, comp_len); + } + + return 0; +} + +/* Do transaction and validate response */ +static int hn_rndis_execute(struct hn_data *hv, uint32_t rid, + const void *req, uint32_t reqlen, + void *comp, uint32_t comp_len, uint32_t comp_type) +{ + const struct rndis_comp_hdr *hdr = comp; + int ret; + + memset(comp, 0, comp_len); + + ret = hn_rndis_exec1(hv, req, reqlen, comp, comp_len); + if (ret < 0) + return ret; + /* + * Check this RNDIS complete message. + */ + if (unlikely(hdr->type != comp_type)) { + PMD_DRV_LOG(ERR, + "unexpected RNDIS response complete %#x expect %#x", + hdr->type, comp_type); + + return -ENXIO; + } + if (unlikely(hdr->rid != rid)) { + PMD_DRV_LOG(ERR, + "RNDIS comp rid mismatch %#x, expect %#x", + hdr->rid, rid); + return -EINVAL; + } + + /* All pass! */ + return 0; +} + +static int +hn_rndis_query(struct hn_data *hv, uint32_t oid, + const void *idata, uint32_t idlen, + void *odata, uint32_t odlen) +{ + struct rndis_query_req *req; + struct rndis_query_comp *comp; + uint32_t reqlen, comp_len; + int error = -EIO; + unsigned int ofs; + uint32_t rid; + + reqlen = sizeof(*req) + idlen; + req = hn_rndis_alloc(reqlen); + if (req == NULL) + return -ENOMEM; + + comp_len = sizeof(*comp) + odlen; + comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE); + if (!comp) { + error = -ENOMEM; + goto done; + } + comp->status = RNDIS_STATUS_PENDING; + + rid = hn_rndis_rid(hv); + + req->type = RNDIS_QUERY_MSG; + req->len = reqlen; + req->rid = rid; + req->oid = oid; + req->infobufoffset = RNDIS_QUERY_REQ_INFOBUFOFFSET; + req->infobuflen = idlen; + + /* Input data immediately follows RNDIS query. */ + memcpy(req + 1, idata, idlen); + + error = hn_rndis_execute(hv, rid, req, reqlen, + comp, comp_len, RNDIS_QUERY_CMPLT); + + if (error) + goto done; + + if (comp->status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "RNDIS query 0x%08x failed: status 0x%08x", + oid, comp->status); + error = -EINVAL; + goto done; + } + + if (comp->infobuflen == 0 || comp->infobufoffset == 0) { + /* No output data! */ + PMD_DRV_LOG(ERR, "RNDIS query 0x%08x, no data", oid); + error = 0; + goto done; + } + + /* + * Check output data length and offset. + */ + /* ofs is the offset from the beginning of comp. */ + ofs = RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(comp->infobufoffset); + if (ofs < sizeof(*comp) || ofs + comp->infobuflen > comp_len) { + PMD_DRV_LOG(ERR, "RNDIS query invalid comp ib off/len, %u/%u", + comp->infobufoffset, comp->infobuflen); + error = -EINVAL; + goto done; + } + + /* Save output data. */ + if (comp->infobuflen < odlen) + odlen = comp->infobuflen; + + /* ofs is the offset from the beginning of comp. */ + memcpy(odata, (const char *)comp + ofs, odlen); + + error = 0; +done: + rte_free(comp); + rte_free(req); + return error; +} + +static int +hn_rndis_halt(struct hn_data *hv) +{ + struct rndis_halt_req *halt; + + halt = hn_rndis_alloc(sizeof(*halt)); + if (halt == NULL) + return -ENOMEM; + + halt->type = RNDIS_HALT_MSG; + halt->len = sizeof(*halt); + halt->rid = hn_rndis_rid(hv); + + /* No RNDIS completion; rely on NVS message send completion */ + hn_rndis_exec1(hv, halt, sizeof(*halt), NULL, 0); + + rte_free(halt); + + PMD_INIT_LOG(DEBUG, "RNDIS halt done"); + return 0; +} + +static int +hn_rndis_query_hwcaps(struct hn_data *hv, struct ndis_offload *caps) +{ + struct ndis_offload in; + uint32_t caps_len, size; + int error; + + memset(caps, 0, sizeof(*caps)); + memset(&in, 0, sizeof(in)); + in.ndis_hdr.ndis_type = NDIS_OBJTYPE_OFFLOAD; + + if (hv->ndis_ver >= NDIS_VERSION_6_30) { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_3; + size = NDIS_OFFLOAD_SIZE; + } else if (hv->ndis_ver >= NDIS_VERSION_6_1) { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_2; + size = NDIS_OFFLOAD_SIZE_6_1; + } else { + in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_1; + size = NDIS_OFFLOAD_SIZE_6_0; + } + in.ndis_hdr.ndis_size = size; + + caps_len = NDIS_OFFLOAD_SIZE; + error = hn_rndis_query(hv, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, + &in, size, caps, caps_len); + if (error) + return error; + + /* Preliminary verification. */ + if (caps->ndis_hdr.ndis_type != NDIS_OBJTYPE_OFFLOAD) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objtype 0x%02x", + caps->ndis_hdr.ndis_type); + return -EINVAL; + } + if (caps->ndis_hdr.ndis_rev < NDIS_OFFLOAD_REV_1) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objrev 0x%02x", + caps->ndis_hdr.ndis_rev); + return -EINVAL; + } + if (caps->ndis_hdr.ndis_size > caps_len) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u, data size %u", + caps->ndis_hdr.ndis_size, caps_len); + return -EINVAL; + } else if (caps->ndis_hdr.ndis_size < NDIS_OFFLOAD_SIZE_6_0) { + PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u", + caps->ndis_hdr.ndis_size); + return -EINVAL; + } + + return 0; +} + +int +hn_rndis_query_rsscaps(struct hn_data *hv, + unsigned int *rxr_cnt0) +{ + struct ndis_rss_caps in, caps; + unsigned int indsz, rxr_cnt; + uint32_t caps_len; + int error; + + *rxr_cnt0 = 0; + + if (hv->ndis_ver < NDIS_VERSION_6_20) { + PMD_DRV_LOG(DEBUG, "RSS not supported on this host"); + return -EOPNOTSUPP; + } + + memset(&in, 0, sizeof(in)); + in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS; + in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2; + in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE; + + caps_len = NDIS_RSS_CAPS_SIZE; + error = hn_rndis_query(hv, OID_GEN_RECEIVE_SCALE_CAPABILITIES, + &in, NDIS_RSS_CAPS_SIZE, + &caps, caps_len); + if (error) + return error; + + PMD_INIT_LOG(DEBUG, "RX rings %u indirect %u caps %#x", + caps.ndis_nrxr, caps.ndis_nind, caps.ndis_caps); + /* + * Preliminary verification. + */ + if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) { + PMD_DRV_LOG(ERR, "invalid NDIS objtype 0x%02x", + caps.ndis_hdr.ndis_type); + return -EINVAL; + } + if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) { + PMD_DRV_LOG(ERR, "invalid NDIS objrev 0x%02x", + caps.ndis_hdr.ndis_rev); + return -EINVAL; + } + if (caps.ndis_hdr.ndis_size > caps_len) { + PMD_DRV_LOG(ERR, + "invalid NDIS objsize %u, data size %u", + caps.ndis_hdr.ndis_size, caps_len); + return -EINVAL; + } else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) { + PMD_DRV_LOG(ERR, "invalid NDIS objsize %u", + caps.ndis_hdr.ndis_size); + return -EINVAL; + } + + /* + * Save information for later RSS configuration. + */ + if (caps.ndis_nrxr == 0) { + PMD_DRV_LOG(ERR, "0 RX rings!?"); + return -EINVAL; + } + rxr_cnt = caps.ndis_nrxr; + + if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE && + caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) { + if (caps.ndis_nind > NDIS_HASH_INDCNT) { + PMD_DRV_LOG(ERR, + "too many RSS indirect table entries %u", + caps.ndis_nind); + return -EOPNOTSUPP; + } + if (!rte_is_power_of_2(caps.ndis_nind)) { + PMD_DRV_LOG(ERR, + "RSS indirect table size is not power-of-2 %u", + caps.ndis_nind); + } + + indsz = caps.ndis_nind; + } else { + indsz = NDIS_HASH_INDCNT; + } + + if (indsz < rxr_cnt) { + PMD_DRV_LOG(NOTICE, + "# of RX rings (%d) > RSS indirect table size %d", + rxr_cnt, indsz); + rxr_cnt = indsz; + } + + hv->rss_offloads = 0; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV4) + hv->rss_offloads |= ETH_RSS_IPV4 + | ETH_RSS_NONFRAG_IPV4_TCP + | ETH_RSS_NONFRAG_IPV4_UDP; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV6) + hv->rss_offloads |= ETH_RSS_IPV6 + | ETH_RSS_NONFRAG_IPV6_TCP; + if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX) + hv->rss_offloads |= ETH_RSS_IPV6_EX + | ETH_RSS_IPV6_TCP_EX; + + /* Commit! */ + *rxr_cnt0 = rxr_cnt; + + return 0; +} + +static int +hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen) +{ + struct rndis_set_req *req; + struct rndis_set_comp comp; + uint32_t reqlen, comp_len; + uint32_t rid; + int error; + + reqlen = sizeof(*req) + dlen; + req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE); + if (!req) + return -ENOMEM; + + rid = hn_rndis_rid(hv); + req->type = RNDIS_SET_MSG; + req->len = reqlen; + req->rid = rid; + req->oid = oid; + req->infobuflen = dlen; + req->infobufoffset = RNDIS_SET_REQ_INFOBUFOFFSET; + + /* Data immediately follows RNDIS set. */ + memcpy(req + 1, data, dlen); + + comp_len = sizeof(comp); + error = hn_rndis_execute(hv, rid, req, reqlen, + &comp, comp_len, + RNDIS_SET_CMPLT); + if (error) { + PMD_DRV_LOG(ERR, "exec RNDIS set %#" PRIx32 " failed", + oid); + error = EIO; + goto done; + } + + if (comp.status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, + "RNDIS set %#" PRIx32 " failed: status %#" PRIx32, + oid, comp.status); + error = EIO; + goto done; + } + +done: + rte_free(req); + return error; +} + +int hn_rndis_conf_offload(struct hn_data *hv, + uint64_t tx_offloads, uint64_t rx_offloads) +{ + struct ndis_offload_params params; + struct ndis_offload hwcaps; + int error; + + error = hn_rndis_query_hwcaps(hv, &hwcaps); + if (error) { + PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error); + return error; + } + + /* NOTE: 0 means "no change" */ + memset(¶ms, 0, sizeof(params)); + + params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT; + if (hv->ndis_ver < NDIS_VERSION_6_30) { + params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2; + params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1; + } else { + params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3; + params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE; + } + + if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4) + params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + + if (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_TCP6) + params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + + if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) { + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) + == NDIS_RXCSUM_CAP_TCP4) + params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + + if ((hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6) + == NDIS_RXCSUM_CAP_TCP6) + params.ndis_tcp6csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) + params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + + if ((hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) + == NDIS_TXCSUM_CAP_UDP6) + params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + + if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) + params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + + if (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6) + params.ndis_udp6csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) { + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4) + == NDIS_TXCSUM_CAP_IP4) + params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX; + else + goto unsupported; + } + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4) + params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX; + else + goto unsupported; + } + + if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) + params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON; + else + goto unsupported; + + if ((hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6) + == HN_NDIS_LSOV2_CAP_IP6) + params.ndis_lsov2_ip6 = NDIS_OFFLOAD_LSOV2_ON; + else + goto unsupported; + } + + error = hn_rndis_set(hv, OID_TCP_OFFLOAD_PARAMETERS, ¶ms, + params.ndis_hdr.ndis_size); + if (error) { + PMD_DRV_LOG(ERR, "offload config failed"); + return error; + } + + return 0; + unsupported: + PMD_DRV_LOG(NOTICE, + "offload tx:%" PRIx64 " rx:%" PRIx64 " not supported by this version", + tx_offloads, rx_offloads); + return -EINVAL; +} + +int hn_rndis_get_offload(struct hn_data *hv, + struct rte_eth_dev_info *dev_info) +{ + struct ndis_offload hwcaps; + int error; + + memset(&hwcaps, 0, sizeof(hwcaps)); + + error = hn_rndis_query_hwcaps(hv, &hwcaps); + if (error) { + PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error); + return error; + } + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4) + == HN_NDIS_TXCSUM_CAP_IP4) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4) + == HN_NDIS_TXCSUM_CAP_TCP4 && + (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6) + == HN_NDIS_TXCSUM_CAP_TCP6) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) && + (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6)) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM; + + if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) && + (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6) + == HN_NDIS_LSOV2_CAP_IP6) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_RSS_HASH; + + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) && + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) && + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM; + + return 0; +} + +uint32_t +hn_rndis_get_ptypes(struct hn_data *hv) +{ + struct ndis_offload hwcaps; + uint32_t ptypes; + int error; + + memset(&hwcaps, 0, sizeof(hwcaps)); + + error = hn_rndis_query_hwcaps(hv, &hwcaps); + if (error) { + PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error); + return RTE_PTYPE_L2_ETHER; + } + + ptypes = RTE_PTYPE_L2_ETHER; + + if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4) + ptypes |= RTE_PTYPE_L3_IPV4; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) || + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)) + ptypes |= RTE_PTYPE_L4_TCP; + + if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) || + (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)) + ptypes |= RTE_PTYPE_L4_UDP; + + return ptypes; +} + +int +hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter) +{ + int error; + + error = hn_rndis_set(hv, OID_GEN_CURRENT_PACKET_FILTER, + &filter, sizeof(filter)); + if (error) { + PMD_DRV_LOG(ERR, "set RX filter %#" PRIx32 " failed: %d", + filter, error); + } else { + PMD_DRV_LOG(DEBUG, "set RX filter %#" PRIx32 " done", filter); + } + + return error; +} + +int hn_rndis_conf_rss(struct hn_data *hv, uint32_t flags) +{ + struct ndis_rssprm_toeplitz rssp; + struct ndis_rss_params *prm = &rssp.rss_params; + unsigned int i; + int error; + + memset(&rssp, 0, sizeof(rssp)); + + prm->ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_PARAMS; + prm->ndis_hdr.ndis_rev = NDIS_RSS_PARAMS_REV_2; + prm->ndis_hdr.ndis_size = sizeof(*prm); + prm->ndis_flags = flags; + prm->ndis_hash = hv->rss_hash; + prm->ndis_indsize = sizeof(rssp.rss_ind[0]) * NDIS_HASH_INDCNT; + prm->ndis_indoffset = offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]); + prm->ndis_keysize = NDIS_HASH_KEYSIZE_TOEPLITZ; + prm->ndis_keyoffset = offsetof(struct ndis_rssprm_toeplitz, rss_key[0]); + + for (i = 0; i < NDIS_HASH_INDCNT; i++) + rssp.rss_ind[i] = hv->rss_ind[i]; + + /* Set hask key values */ + memcpy(&rssp.rss_key, hv->rss_key, NDIS_HASH_KEYSIZE_TOEPLITZ); + + error = hn_rndis_set(hv, OID_GEN_RECEIVE_SCALE_PARAMETERS, + &rssp, sizeof(rssp)); + if (error != 0) { + PMD_DRV_LOG(ERR, + "RSS config num queues=%u failed: %d", + hv->num_queues, error); + } + return error; +} + +static int hn_rndis_init(struct hn_data *hv) +{ + struct rndis_init_req *req; + struct rndis_init_comp comp; + uint32_t comp_len, rid; + int error; + + req = hn_rndis_alloc(sizeof(*req)); + if (!req) { + PMD_DRV_LOG(ERR, "no memory for RNDIS init"); + return -ENXIO; + } + + rid = hn_rndis_rid(hv); + req->type = RNDIS_INITIALIZE_MSG; + req->len = sizeof(*req); + req->rid = rid; + req->ver_major = RNDIS_VERSION_MAJOR; + req->ver_minor = RNDIS_VERSION_MINOR; + req->max_xfersz = HN_RNDIS_XFER_SIZE; + + comp_len = RNDIS_INIT_COMP_SIZE_MIN; + error = hn_rndis_execute(hv, rid, req, sizeof(*req), + &comp, comp_len, + RNDIS_INITIALIZE_CMPLT); + if (error) + goto done; + + if (comp.status != RNDIS_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "RNDIS init failed: status 0x%08x", + comp.status); + error = -EIO; + goto done; + } + + hv->rndis_agg_size = comp.pktmaxsz; + hv->rndis_agg_pkts = comp.pktmaxcnt; + hv->rndis_agg_align = 1U << comp.align; + + if (hv->rndis_agg_align < sizeof(uint32_t)) { + /* + * The RNDIS packet message encap assumes that the RNDIS + * packet message is at least 4 bytes aligned. Fix up the + * alignment here, if the remote side sets the alignment + * too low. + */ + PMD_DRV_LOG(NOTICE, + "fixup RNDIS aggpkt align: %u -> %zu", + hv->rndis_agg_align, sizeof(uint32_t)); + hv->rndis_agg_align = sizeof(uint32_t); + } + + PMD_INIT_LOG(INFO, + "RNDIS ver %u.%u, aggpkt size %u, aggpkt cnt %u, aggpkt align %u", + comp.ver_major, comp.ver_minor, + hv->rndis_agg_size, hv->rndis_agg_pkts, + hv->rndis_agg_align); + error = 0; +done: + rte_free(req); + return error; +} + +int +hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr) +{ + uint32_t eaddr_len; + int error; + + eaddr_len = RTE_ETHER_ADDR_LEN; + error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0, + eaddr, eaddr_len); + if (error) + return error; + + PMD_DRV_LOG(INFO, "MAC address %02x:%02x:%02x:%02x:%02x:%02x", + eaddr[0], eaddr[1], eaddr[2], + eaddr[3], eaddr[4], eaddr[5]); + return 0; +} + +int +hn_rndis_get_linkstatus(struct hn_data *hv) +{ + return hn_rndis_query(hv, OID_GEN_MEDIA_CONNECT_STATUS, NULL, 0, + &hv->link_status, sizeof(uint32_t)); +} + +int +hn_rndis_get_linkspeed(struct hn_data *hv) +{ + return hn_rndis_query(hv, OID_GEN_LINK_SPEED, NULL, 0, + &hv->link_speed, sizeof(uint32_t)); +} + +int +hn_rndis_attach(struct hn_data *hv) +{ + /* Initialize RNDIS. */ + return hn_rndis_init(hv); +} + +void +hn_rndis_detach(struct hn_data *hv) +{ + /* Halt the RNDIS. */ + hn_rndis_halt(hv); +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h new file mode 100644 index 000000000..9a8251fc2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rndis.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#include "rndis.h" + +struct hn_data; + +void hn_rndis_receive_response(struct hn_data *hv, + const void *data, uint32_t len); +void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg); +int hn_rndis_attach(struct hn_data *hv); +void hn_rndis_detach(struct hn_data *hv); +int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr); +int hn_rndis_get_linkstatus(struct hn_data *hv); +int hn_rndis_get_linkspeed(struct hn_data *hv); +int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter); +void hn_rndis_rx_ctrl(struct hn_data *hv, const void *data, + int dlen); +int hn_rndis_get_offload(struct hn_data *hv, + struct rte_eth_dev_info *dev_info); +int hn_rndis_conf_offload(struct hn_data *hv, + uint64_t tx_offloads, + uint64_t rx_offloads); +int hn_rndis_query_rsscaps(struct hn_data *hv, + unsigned int *rxr_cnt0); +int hn_rndis_query_rss(struct hn_data *hv, + struct rte_eth_rss_conf *rss_conf); +int hn_rndis_conf_rss(struct hn_data *hv, uint32_t flags); +uint32_t hn_rndis_get_ptypes(struct hn_data *hv); + +#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP +void hn_rndis_dump(const void *buf); +#else +#define hn_rndis_dump(buf) +#endif diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c b/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c new file mode 100644 index 000000000..31fae5597 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_rxtx.c @@ -0,0 +1,1535 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2018 Microsoft Corporation + * Copyright(c) 2013-2016 Brocade Communications Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_rndis.h" +#include "hn_nvs.h" +#include "ndis.h" + +#define HN_NVS_SEND_MSG_SIZE \ + (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis)) + +#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ +#define HN_TXCOPY_THRESHOLD 512 + +#define HN_RXCOPY_THRESHOLD 256 +#define HN_RXQ_EVENT_DEFAULT 2048 + +struct hn_rxinfo { + uint32_t vlan_info; + uint32_t csum_info; + uint32_t hash_info; + uint32_t hash_value; +}; + +#define HN_RXINFO_VLAN 0x0001 +#define HN_RXINFO_CSUM 0x0002 +#define HN_RXINFO_HASHINF 0x0004 +#define HN_RXINFO_HASHVAL 0x0008 +#define HN_RXINFO_ALL \ + (HN_RXINFO_VLAN | \ + HN_RXINFO_CSUM | \ + HN_RXINFO_HASHINF | \ + HN_RXINFO_HASHVAL) + +#define HN_NDIS_VLAN_INFO_INVALID 0xffffffff +#define HN_NDIS_RXCSUM_INFO_INVALID 0 +#define HN_NDIS_HASH_INFO_INVALID 0 + +/* + * Per-transmit book keeping. + * A slot in transmit ring (chim_index) is reserved for each transmit. + * + * There are two types of transmit: + * - buffered transmit where chimney buffer is used and RNDIS header + * is in the buffer. mbuf == NULL for this case. + * + * - direct transmit where RNDIS header is in the in rndis_pkt + * mbuf is freed after transmit. + * + * Descriptors come from per-port pool which is used + * to limit number of outstanding requests per device. + */ +struct hn_txdesc { + struct rte_mbuf *m; + + uint16_t queue_id; + uint32_t chim_index; + uint32_t chim_size; + uint32_t data_size; + uint32_t packets; + + struct rndis_packet_msg *rndis_pkt; +}; + +#define HN_RNDIS_PKT_LEN \ + (sizeof(struct rndis_packet_msg) + \ + RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \ + RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE)) + +#define HN_RNDIS_PKT_ALIGNED RTE_ALIGN(HN_RNDIS_PKT_LEN, RTE_CACHE_LINE_SIZE) + +/* Minimum space required for a packet */ +#define HN_PKTSIZE_MIN(align) \ + RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align) + +#define DEFAULT_TX_FREE_THRESH 32 + +static void +hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m) +{ + uint32_t s = m->pkt_len; + const struct rte_ether_addr *ea; + + if (s == 64) { + stats->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + stats->size_bins[bin]++; + } else { + if (s < 64) + stats->size_bins[0]++; + else if (s < 1519) + stats->size_bins[6]++; + else + stats->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(m, const struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + stats->broadcast++; + else + stats->multicast++; + } +} + +static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt) +{ + return pkt->pktinfooffset + pkt->pktinfolen; +} + +static inline uint32_t +hn_rndis_pktmsg_offset(uint32_t ofs) +{ + return ofs - offsetof(struct rndis_packet_msg, dataoffset); +} + +static void hn_txd_init(struct rte_mempool *mp __rte_unused, + void *opaque, void *obj, unsigned int idx) +{ + struct hn_tx_queue *txq = opaque; + struct hn_txdesc *txd = obj; + + memset(txd, 0, sizeof(*txd)); + + txd->queue_id = txq->queue_id; + txd->chim_index = NVS_CHIM_IDX_INVALID; + txd->rndis_pkt = (struct rndis_packet_msg *)(char *)txq->tx_rndis + + idx * HN_RNDIS_PKT_ALIGNED; +} + +int +hn_chim_init(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + uint32_t i, chim_bmp_size; + + rte_spinlock_init(&hv->chim_lock); + chim_bmp_size = rte_bitmap_get_memory_footprint(hv->chim_cnt); + hv->chim_bmem = rte_zmalloc("hn_chim_bitmap", chim_bmp_size, + RTE_CACHE_LINE_SIZE); + if (hv->chim_bmem == NULL) { + PMD_INIT_LOG(ERR, "failed to allocate bitmap size %u", + chim_bmp_size); + return -1; + } + + hv->chim_bmap = rte_bitmap_init(hv->chim_cnt, + hv->chim_bmem, chim_bmp_size); + if (hv->chim_bmap == NULL) { + PMD_INIT_LOG(ERR, "failed to init chim bitmap"); + return -1; + } + + for (i = 0; i < hv->chim_cnt; i++) + rte_bitmap_set(hv->chim_bmap, i); + + return 0; +} + +void +hn_chim_uninit(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + + rte_bitmap_free(hv->chim_bmap); + rte_free(hv->chim_bmem); + hv->chim_bmem = NULL; +} + +static uint32_t hn_chim_alloc(struct hn_data *hv) +{ + uint32_t index = NVS_CHIM_IDX_INVALID; + uint64_t slab; + + rte_spinlock_lock(&hv->chim_lock); + if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) + rte_bitmap_clear(hv->chim_bmap, index); + rte_spinlock_unlock(&hv->chim_lock); + + return index; +} + +static void hn_chim_free(struct hn_data *hv, uint32_t chim_idx) +{ + if (chim_idx >= hv->chim_cnt) { + PMD_DRV_LOG(ERR, "Invalid chimney index %u", chim_idx); + } else { + rte_spinlock_lock(&hv->chim_lock); + rte_bitmap_set(hv->chim_bmap, chim_idx); + rte_spinlock_unlock(&hv->chim_lock); + } +} + +static void hn_reset_txagg(struct hn_tx_queue *txq) +{ + txq->agg_szleft = txq->agg_szmax; + txq->agg_pktleft = txq->agg_pktmax; + txq->agg_txd = NULL; + txq->agg_prevpkt = NULL; +} + +int +hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) + +{ + struct hn_data *hv = dev->data->dev_private; + struct hn_tx_queue *txq; + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t tx_free_thresh; + int err = -ENOMEM; + + PMD_INIT_FUNC_TRACE(); + + txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) + return -ENOMEM; + + txq->hv = hv; + txq->chan = hv->channels[queue_idx]; + txq->port_id = dev->data->port_id; + txq->queue_id = queue_idx; + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = RTE_MIN(nb_desc / 4, + DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh + 3 >= nb_desc) { + PMD_INIT_LOG(ERR, + "tx_free_thresh must be less than the number of TX entries minus 3(%u)." + " (tx_free_thresh=%u port=%u queue=%u)\n", + nb_desc - 3, + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + txq->free_thresh = tx_free_thresh; + + snprintf(name, sizeof(name), + "hn_txd_%u_%u", dev->data->port_id, queue_idx); + + PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu", + name, nb_desc, sizeof(struct hn_txdesc)); + + txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc, + HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE); + if (txq->tx_rndis == NULL) + goto error; + + txq->txdesc_pool = rte_mempool_create(name, nb_desc, + sizeof(struct hn_txdesc), + 0, 0, NULL, NULL, + hn_txd_init, txq, + dev->device->numa_node, 0); + if (txq->txdesc_pool == NULL) { + PMD_DRV_LOG(ERR, + "mempool %s create failed: %d", name, rte_errno); + goto error; + } + + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); + txq->agg_pktmax = hv->rndis_agg_pkts; + txq->agg_align = hv->rndis_agg_align; + + hn_reset_txagg(txq); + + err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc, + socket_id, tx_conf); + if (err == 0) { + dev->data->tx_queues[queue_idx] = txq; + return 0; + } + +error: + if (txq->txdesc_pool) + rte_mempool_free(txq->txdesc_pool); + rte_free(txq->tx_rndis); + rte_free(txq); + return err; +} + + +static struct hn_txdesc *hn_txd_get(struct hn_tx_queue *txq) +{ + struct hn_txdesc *txd; + + if (rte_mempool_get(txq->txdesc_pool, (void **)&txd)) { + ++txq->stats.ring_full; + PMD_TX_LOG(DEBUG, "tx pool exhausted!"); + return NULL; + } + + txd->m = NULL; + txd->packets = 0; + txd->data_size = 0; + txd->chim_size = 0; + + return txd; +} + +static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd) +{ + rte_mempool_put(txq->txdesc_pool, txd); +} + +void +hn_dev_tx_queue_release(void *arg) +{ + struct hn_tx_queue *txq = arg; + + PMD_INIT_FUNC_TRACE(); + + if (!txq) + return; + + if (txq->txdesc_pool) + rte_mempool_free(txq->txdesc_pool); + + rte_free(txq->tx_rndis); + rte_free(txq); +} + +static void +hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id, + unsigned long xactid, const struct hn_nvs_rndis_ack *ack) +{ + struct hn_data *hv = dev->data->dev_private; + struct hn_txdesc *txd = (struct hn_txdesc *)xactid; + struct hn_tx_queue *txq; + + /* Control packets are sent with xacid == 0 */ + if (!txd) + return; + + txq = dev->data->tx_queues[queue_id]; + if (likely(ack->status == NVS_STATUS_OK)) { + PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u", + txq->port_id, txq->queue_id, txd->chim_index, + txd->packets, txd->data_size); + txq->stats.bytes += txd->data_size; + txq->stats.packets += txd->packets; + } else { + PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u", + txq->port_id, txq->queue_id, txd->chim_index, ack->status); + ++txq->stats.errors; + } + + if (txd->chim_index != NVS_CHIM_IDX_INVALID) + hn_chim_free(hv, txd->chim_index); + + rte_pktmbuf_free(txd->m); + hn_txd_put(txq, txd); +} + +/* Handle transmit completion events */ +static void +hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id, + const struct vmbus_chanpkt_hdr *pkt, + const void *data) +{ + const struct hn_nvs_hdr *hdr = data; + + switch (hdr->type) { + case NVS_TYPE_RNDIS_ACK: + hn_nvs_send_completed(dev, queue_id, pkt->xactid, data); + break; + + default: + PMD_TX_LOG(NOTICE, + "unexpected send completion type %u", + hdr->type); + } +} + +/* Parse per-packet info (meta data) */ +static int +hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen, + struct hn_rxinfo *info) +{ + const struct rndis_pktinfo *pi = info_data; + uint32_t mask = 0; + + while (info_dlen != 0) { + const void *data; + uint32_t dlen; + + if (unlikely(info_dlen < sizeof(*pi))) + return -EINVAL; + + if (unlikely(info_dlen < pi->size)) + return -EINVAL; + info_dlen -= pi->size; + + if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK)) + return -EINVAL; + if (unlikely(pi->size < pi->offset)) + return -EINVAL; + + dlen = pi->size - pi->offset; + data = pi->data; + + switch (pi->type) { + case NDIS_PKTINFO_TYPE_VLAN: + if (unlikely(dlen < NDIS_VLAN_INFO_SIZE)) + return -EINVAL; + info->vlan_info = *((const uint32_t *)data); + mask |= HN_RXINFO_VLAN; + break; + + case NDIS_PKTINFO_TYPE_CSUM: + if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE)) + return -EINVAL; + info->csum_info = *((const uint32_t *)data); + mask |= HN_RXINFO_CSUM; + break; + + case NDIS_PKTINFO_TYPE_HASHVAL: + if (unlikely(dlen < NDIS_HASH_VALUE_SIZE)) + return -EINVAL; + info->hash_value = *((const uint32_t *)data); + mask |= HN_RXINFO_HASHVAL; + break; + + case NDIS_PKTINFO_TYPE_HASHINF: + if (unlikely(dlen < NDIS_HASH_INFO_SIZE)) + return -EINVAL; + info->hash_info = *((const uint32_t *)data); + mask |= HN_RXINFO_HASHINF; + break; + + default: + goto next; + } + + if (mask == HN_RXINFO_ALL) + break; /* All found; done */ +next: + pi = (const struct rndis_pktinfo *) + ((const uint8_t *)pi + pi->size); + } + + /* + * Final fixup. + * - If there is no hash value, invalidate the hash info. + */ + if (!(mask & HN_RXINFO_HASHVAL)) + info->hash_info = HN_NDIS_HASH_INFO_INVALID; + return 0; +} + +/* + * Ack the consumed RXBUF associated w/ this channel packet, + * so that this RXBUF can be recycled by the hypervisor. + */ +static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) +{ + struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; + struct hn_data *hv = rxb->hv; + + if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { + hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); + --hv->rxbuf_outstanding; + } +} + +static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) +{ + hn_rx_buf_release(opaque); +} + +static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, + const struct vmbus_chanpkt_rxbuf *pkt) +{ + struct hn_rx_bufinfo *rxb; + + rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; + rxb->chan = rxq->chan; + rxb->xactid = pkt->hdr.xactid; + rxb->hv = rxq->hv; + + rxb->shinfo.free_cb = hn_rx_buf_free_cb; + rxb->shinfo.fcb_opaque = rxb; + rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1); + return rxb; +} + +static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + uint8_t *data, unsigned int headroom, unsigned int dlen, + const struct hn_rxinfo *info) +{ + struct hn_data *hv = rxq->hv; + struct rte_mbuf *m; + + m = rte_pktmbuf_alloc(rxq->mb_pool); + if (unlikely(!m)) { + struct rte_eth_dev *dev = + &rte_eth_devices[rxq->port_id]; + + dev->data->rx_mbuf_alloc_failed++; + return; + } + + /* + * For large packets, avoid copy if possible but need to keep + * some space available in receive area for later packets. + */ + if (dlen >= HN_RXCOPY_THRESHOLD && + hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { + struct rte_mbuf_ext_shared_info *shinfo; + const void *rxbuf; + rte_iova_t iova; + + /* + * Build an external mbuf that points to recveive area. + * Use refcount to handle multiple packets in same + * receive buffer section. + */ + rxbuf = hv->rxbuf_res->addr; + iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); + shinfo = &rxb->shinfo; + + if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) + ++hv->rxbuf_outstanding; + + rte_pktmbuf_attach_extbuf(m, data, iova, + dlen + headroom, shinfo); + m->data_off = headroom; + } else { + /* Mbuf's in pool must be large enough to hold small packets */ + if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) { + rte_pktmbuf_free_seg(m); + ++rxq->stats.errors; + return; + } + rte_memcpy(rte_pktmbuf_mtod(m, void *), + data + headroom, dlen); + } + + m->port = rxq->port_id; + m->pkt_len = dlen; + m->data_len = dlen; + m->packet_type = rte_net_get_ptype(m, NULL, + RTE_PTYPE_L2_MASK | + RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + + if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { + m->vlan_tci = info->vlan_info; + m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + + /* NDIS always strips tag, put it back if necessary */ + if (!hv->vlan_strip && rte_vlan_insert(&m)) { + PMD_DRV_LOG(DEBUG, "vlan insert failed"); + ++rxq->stats.errors; + rte_pktmbuf_free(m); + return; + } + } + + if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) { + if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK + | NDIS_RXCSUM_INFO_TCPCS_OK)) + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED + | NDIS_RXCSUM_INFO_UDPCS_FAILED)) + m->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + + if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) { + m->ol_flags |= PKT_RX_RSS_HASH; + m->hash.rss = info->hash_value; + } + + PMD_RX_LOG(DEBUG, + "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64, + rxq->port_id, rxq->queue_id, rxb->xactid, + m->pkt_len, m->packet_type, m->ol_flags); + + ++rxq->stats.packets; + rxq->stats.bytes += m->pkt_len; + hn_update_packet_stats(&rxq->stats, m); + + if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) { + ++rxq->stats.ring_full; + rte_pktmbuf_free(m); + } +} + +static void hn_rndis_rx_data(struct hn_rx_queue *rxq, + struct hn_rx_bufinfo *rxb, + void *data, uint32_t dlen) +{ + unsigned int data_off, data_len, pktinfo_off, pktinfo_len; + const struct rndis_packet_msg *pkt = data; + struct hn_rxinfo info = { + .vlan_info = HN_NDIS_VLAN_INFO_INVALID, + .csum_info = HN_NDIS_RXCSUM_INFO_INVALID, + .hash_info = HN_NDIS_HASH_INFO_INVALID, + }; + int err; + + hn_rndis_dump(pkt); + + if (unlikely(dlen < sizeof(*pkt))) + goto error; + + if (unlikely(dlen < pkt->len)) + goto error; /* truncated RNDIS from host */ + + if (unlikely(pkt->len < pkt->datalen + + pkt->oobdatalen + pkt->pktinfolen)) + goto error; + + if (unlikely(pkt->datalen == 0)) + goto error; + + /* Check offsets. */ + if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN)) + goto error; + + if (likely(pkt->pktinfooffset > 0) && + unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN || + (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK))) + goto error; + + data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); + data_len = pkt->datalen; + pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset); + pktinfo_len = pkt->pktinfolen; + + if (likely(pktinfo_len > 0)) { + err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off, + pktinfo_len, &info); + if (err) + goto error; + } + + if (unlikely(data_off + data_len > pkt->len)) + goto error; + + if (unlikely(data_len < RTE_ETHER_HDR_LEN)) + goto error; + + hn_rxpkt(rxq, rxb, data, data_off, data_len, &info); + return; +error: + ++rxq->stats.errors; +} + +static void +hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq, + struct hn_rx_bufinfo *rxb, void *buf, uint32_t len) +{ + const struct rndis_msghdr *hdr = buf; + + switch (hdr->type) { + case RNDIS_PACKET_MSG: + if (dev->data->dev_started) + hn_rndis_rx_data(rxq, rxb, buf, len); + break; + + case RNDIS_INDICATE_STATUS_MSG: + hn_rndis_link_status(dev, buf); + break; + + case RNDIS_INITIALIZE_CMPLT: + case RNDIS_QUERY_CMPLT: + case RNDIS_SET_CMPLT: + hn_rndis_receive_response(rxq->hv, buf, len); + break; + + default: + PMD_DRV_LOG(NOTICE, + "unexpected RNDIS message (type %#x len %u)", + hdr->type, len); + break; + } +} + +static void +hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, + struct hn_data *hv, + struct hn_rx_queue *rxq, + const struct vmbus_chanpkt_hdr *hdr, + const void *buf) +{ + const struct vmbus_chanpkt_rxbuf *pkt; + const struct hn_nvs_hdr *nvs_hdr = buf; + uint32_t rxbuf_sz = hv->rxbuf_res->len; + char *rxbuf = hv->rxbuf_res->addr; + unsigned int i, hlen, count; + struct hn_rx_bufinfo *rxb; + + /* At minimum we need type header */ + if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) { + PMD_RX_LOG(ERR, "invalid receive nvs RNDIS"); + return; + } + + /* Make sure that this is a RNDIS message. */ + if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) { + PMD_RX_LOG(ERR, "nvs type %u, not RNDIS", + nvs_hdr->type); + return; + } + + hlen = vmbus_chanpkt_getlen(hdr->hlen); + if (unlikely(hlen < sizeof(*pkt))) { + PMD_RX_LOG(ERR, "invalid rxbuf chanpkt"); + return; + } + + pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr); + if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) { + PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x", + pkt->rxbuf_id); + return; + } + + count = pkt->rxbuf_cnt; + if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf, + rxbuf[count]))) { + PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count); + return; + } + + if (pkt->hdr.xactid > hv->rxbuf_section_cnt) { + PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64, + pkt->hdr.xactid); + return; + } + + /* Setup receive buffer info to allow for callback */ + rxb = hn_rx_buf_init(rxq, pkt); + + /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ + for (i = 0; i < count; ++i) { + unsigned int ofs, len; + + ofs = pkt->rxbuf[i].ofs; + len = pkt->rxbuf[i].len; + + if (unlikely(ofs + len > rxbuf_sz)) { + PMD_RX_LOG(ERR, + "%uth RNDIS msg overflow ofs %u, len %u", + i, ofs, len); + continue; + } + + if (unlikely(len == 0)) { + PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len); + continue; + } + + hn_rndis_receive(dev, rxq, rxb, + rxbuf + ofs, len); + } + + /* Send ACK now if external mbuf not used */ + hn_rx_buf_release(rxb); +} + +/* + * Called when NVS inband events are received. + * Send up a two part message with port_id and the NVS message + * to the pipe to the netvsc-vf-event control thread. + */ +static void hn_nvs_handle_notify(struct rte_eth_dev *dev, + const struct vmbus_chanpkt_hdr *pkt, + const void *data) +{ + const struct hn_nvs_hdr *hdr = data; + + switch (hdr->type) { + case NVS_TYPE_TXTBL_NOTE: + /* Transmit indirection table has locking problems + * in DPDK and therefore not implemented + */ + PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table"); + break; + + case NVS_TYPE_VFASSOC_NOTE: + hn_nvs_handle_vfassoc(dev, pkt, data); + break; + + default: + PMD_DRV_LOG(INFO, + "got notify, nvs type %u", hdr->type); + } +} + +struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + uint16_t queue_id, + unsigned int socket_id) +{ + struct hn_rx_queue *rxq; + + rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) + return NULL; + + rxq->hv = hv; + rxq->chan = hv->channels[queue_id]; + rte_spinlock_init(&rxq->ring_lock); + rxq->port_id = hv->port_id; + rxq->queue_id = queue_id; + rxq->event_sz = HN_RXQ_EVENT_DEFAULT; + rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->event_buf) { + rte_free(rxq); + return NULL; + } + + return rxq; +} + +int +hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct hn_data *hv = dev->data->dev_private; + char ring_name[RTE_RING_NAMESIZE]; + struct hn_rx_queue *rxq; + unsigned int count; + int error = -ENOMEM; + + PMD_INIT_FUNC_TRACE(); + + if (queue_idx == 0) { + rxq = hv->primary; + } else { + rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); + if (!rxq) + return -ENOMEM; + } + + rxq->mb_pool = mp; + count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues; + if (nb_desc == 0 || nb_desc > count) + nb_desc = count; + + /* + * Staging ring from receive event logic to rx_pkts. + * rx_pkts assumes caller is handling multi-thread issue. + * event logic has locking. + */ + snprintf(ring_name, sizeof(ring_name), + "hn_rx_%u_%u", dev->data->port_id, queue_idx); + rxq->rx_ring = rte_ring_create(ring_name, + rte_align32pow2(nb_desc), + socket_id, 0); + if (!rxq->rx_ring) + goto fail; + + error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc, + socket_id, rx_conf, mp); + if (error) + goto fail; + + dev->data->rx_queues[queue_idx] = rxq; + return 0; + +fail: + rte_ring_free(rxq->rx_ring); + rte_free(rxq->event_buf); + rte_free(rxq); + return error; +} + +static void +hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) +{ + + if (!rxq) + return; + + rte_ring_free(rxq->rx_ring); + rxq->rx_ring = NULL; + rxq->mb_pool = NULL; + + hn_vf_rx_queue_release(rxq->hv, rxq->queue_id); + + /* Keep primary queue to allow for control operations */ + if (keep_primary && rxq == rxq->hv->primary) + return; + + rte_free(rxq->event_buf); + rte_free(rxq); +} + +void +hn_dev_rx_queue_release(void *arg) +{ + struct hn_rx_queue *rxq = arg; + + PMD_INIT_FUNC_TRACE(); + + hn_rx_queue_free(rxq, true); +} + +int +hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt) +{ + struct hn_tx_queue *txq = arg; + + return hn_process_events(txq->hv, txq->queue_id, free_cnt); +} + +/* + * Process pending events on the channel. + * Called from both Rx queue poll and Tx cleanup + */ +uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, + uint32_t tx_limit) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id]; + struct hn_rx_queue *rxq; + uint32_t bytes_read = 0; + uint32_t tx_done = 0; + int ret = 0; + + rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id]; + + /* + * Since channel is shared between Rx and TX queue need to have a lock + * since DPDK does not force same CPU to be used for Rx/Tx. + */ + if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock))) + return 0; + + for (;;) { + const struct vmbus_chanpkt_hdr *pkt; + uint32_t len = rxq->event_sz; + const void *data; + +retry: + ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len); + if (ret == -EAGAIN) + break; /* ring is empty */ + + if (unlikely(ret == -ENOBUFS)) { + /* event buffer not large enough to read ring */ + + PMD_DRV_LOG(DEBUG, + "event buffer expansion (need %u)", len); + rxq->event_sz = len + len / 4; + rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz, + RTE_CACHE_LINE_SIZE); + if (rxq->event_buf) + goto retry; + /* out of memory, no more events now */ + rxq->event_sz = 0; + break; + } + + if (unlikely(ret <= 0)) { + /* This indicates a failure to communicate (or worse) */ + rte_exit(EXIT_FAILURE, + "vmbus ring buffer error: %d", ret); + } + + bytes_read += ret; + pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf; + data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen); + + switch (pkt->type) { + case VMBUS_CHANPKT_TYPE_COMP: + ++tx_done; + hn_nvs_handle_comp(dev, queue_id, pkt, data); + break; + + case VMBUS_CHANPKT_TYPE_RXBUF: + hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data); + break; + + case VMBUS_CHANPKT_TYPE_INBAND: + hn_nvs_handle_notify(dev, pkt, data); + break; + + default: + PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type); + break; + } + + if (tx_limit && tx_done >= tx_limit) + break; + } + + if (bytes_read > 0) + rte_vmbus_chan_signal_read(rxq->chan, bytes_read); + + rte_spinlock_unlock(&rxq->ring_lock); + + return tx_done; +} + +static void hn_append_to_chim(struct hn_tx_queue *txq, + struct rndis_packet_msg *pkt, + const struct rte_mbuf *m) +{ + struct hn_txdesc *txd = txq->agg_txd; + uint8_t *buf = (uint8_t *)pkt; + unsigned int data_offs; + + hn_rndis_dump(pkt); + + data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset); + txd->chim_size += pkt->len; + txd->data_size += m->pkt_len; + ++txd->packets; + hn_update_packet_stats(&txq->stats, m); + + for (; m; m = m->next) { + uint16_t len = rte_pktmbuf_data_len(m); + + rte_memcpy(buf + data_offs, + rte_pktmbuf_mtod(m, const char *), len); + data_offs += len; + } +} + +/* + * Send pending aggregated data in chimney buffer (if any). + * Returns error if send was unsuccessful because channel ring buffer + * was full. + */ +static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig) + +{ + struct hn_txdesc *txd = txq->agg_txd; + struct hn_nvs_rndis rndis; + int ret; + + if (!txd) + return 0; + + rndis = (struct hn_nvs_rndis) { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_DATA, + .chim_idx = txd->chim_index, + .chim_sz = txd->chim_size, + }; + + PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u", + txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size); + + ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC, + &rndis, sizeof(rndis), (uintptr_t)txd, need_sig); + + if (likely(ret == 0)) + hn_reset_txagg(txq); + else + PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d", + txq->port_id, txq->queue_id, ret); + + return ret; +} + +/* + * Try and find a place in a send chimney buffer to put + * the small packet. If space is available, this routine + * returns a pointer of where to place the data. + * If no space, caller should try direct transmit. + */ +static void * +hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, + struct hn_txdesc *txd, uint32_t pktsize) +{ + struct hn_txdesc *agg_txd = txq->agg_txd; + struct rndis_packet_msg *pkt; + void *chim; + + if (agg_txd) { + unsigned int padding, olen; + + /* + * Update the previous RNDIS packet's total length, + * it can be increased due to the mandatory alignment + * padding for this RNDIS packet. And update the + * aggregating txdesc's chimney sending buffer size + * accordingly. + * + * Zero-out the padding, as required by the RNDIS spec. + */ + pkt = txq->agg_prevpkt; + olen = pkt->len; + padding = RTE_ALIGN(olen, txq->agg_align) - olen; + if (padding > 0) { + agg_txd->chim_size += padding; + pkt->len += padding; + memset((uint8_t *)pkt + olen, 0, padding); + } + + chim = (uint8_t *)pkt + pkt->len; + txq->agg_prevpkt = chim; + txq->agg_pktleft--; + txq->agg_szleft -= pktsize; + if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) { + /* + * Probably can't aggregate more packets, + * flush this aggregating txdesc proactively. + */ + txq->agg_pktleft = 0; + } + + hn_txd_put(txq, txd); + return chim; + } + + txd->chim_index = hn_chim_alloc(hv); + if (txd->chim_index == NVS_CHIM_IDX_INVALID) + return NULL; + + chim = (uint8_t *)hv->chim_res->addr + + txd->chim_index * hv->chim_szmax; + + txq->agg_txd = txd; + txq->agg_pktleft = txq->agg_pktmax - 1; + txq->agg_szleft = txq->agg_szmax - pktsize; + txq->agg_prevpkt = chim; + + return chim; +} + +static inline void * +hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, + uint32_t pi_dlen, uint32_t pi_type) +{ + const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen); + struct rndis_pktinfo *pi; + + /* + * Per-packet-info does not move; it only grows. + * + * NOTE: + * pktinfooffset in this phase counts from the beginning + * of rndis_packet_msg. + */ + pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt)); + + pkt->pktinfolen += pi_size; + + pi->size = pi_size; + pi->type = pi_type; + pi->offset = RNDIS_PKTINFO_OFFSET; + + return pi->data; +} + +/* Put RNDIS header and packet info on packet */ +static void hn_encap(struct rndis_packet_msg *pkt, + uint16_t queue_id, + const struct rte_mbuf *m) +{ + unsigned int hlen = m->l2_len + m->l3_len; + uint32_t *pi_data; + uint32_t pkt_hlen; + + pkt->type = RNDIS_PACKET_MSG; + pkt->len = m->pkt_len; + pkt->dataoffset = 0; + pkt->datalen = m->pkt_len; + pkt->oobdataoffset = 0; + pkt->oobdatalen = 0; + pkt->oobdataelements = 0; + pkt->pktinfooffset = sizeof(*pkt); + pkt->pktinfolen = 0; + pkt->vchandle = 0; + pkt->reserved = 0; + + /* + * Set the hash value for this packet, to the queue_id to cause + * TX done event for this packet on the right channel. + */ + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE, + NDIS_PKTINFO_TYPE_HASHVAL); + *pi_data = queue_id; + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, + NDIS_PKTINFO_TYPE_VLAN); + *pi_data = m->vlan_tci; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE, + NDIS_PKTINFO_TYPE_LSO); + + if (m->ol_flags & PKT_TX_IPV6) { + *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen, + m->tso_segsz); + } else { + *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen, + m->tso_segsz); + } + } else if (m->ol_flags & + (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE, + NDIS_PKTINFO_TYPE_CSUM); + *pi_data = 0; + + if (m->ol_flags & PKT_TX_IPV6) + *pi_data |= NDIS_TXCSUM_INFO_IPV6; + if (m->ol_flags & PKT_TX_IPV4) { + *pi_data |= NDIS_TXCSUM_INFO_IPV4; + + if (m->ol_flags & PKT_TX_IP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_IPCS; + } + + if (m->ol_flags & PKT_TX_TCP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen); + else if (m->ol_flags & PKT_TX_UDP_CKSUM) + *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen); + } + + pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen; + /* Fixup RNDIS packet message total length */ + pkt->len += pkt_hlen; + + /* Convert RNDIS packet message offsets */ + pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen); + pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset); +} + +/* How many scatter gather list elements ar needed */ +static unsigned int hn_get_slots(const struct rte_mbuf *m) +{ + unsigned int slots = 1; /* for RNDIS header */ + + while (m) { + unsigned int size = rte_pktmbuf_data_len(m); + unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK; + + slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE; + m = m->next; + } + + return slots; +} + +/* Build scatter gather list from chained mbuf */ +static unsigned int hn_fill_sg(struct vmbus_gpa *sg, + const struct rte_mbuf *m) +{ + unsigned int segs = 0; + + while (m) { + rte_iova_t addr = rte_mbuf_data_iova(m); + unsigned int page = addr / PAGE_SIZE; + unsigned int offset = addr & PAGE_MASK; + unsigned int len = rte_pktmbuf_data_len(m); + + while (len > 0) { + unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset); + + sg[segs].page = page; + sg[segs].ofs = offset; + sg[segs].len = bytes; + segs++; + + ++page; + offset = 0; + len -= bytes; + } + m = m->next; + } + + return segs; +} + +/* Transmit directly from mbuf */ +static int hn_xmit_sg(struct hn_tx_queue *txq, + const struct hn_txdesc *txd, const struct rte_mbuf *m, + bool *need_sig) +{ + struct vmbus_gpa sg[hn_get_slots(m)]; + struct hn_nvs_rndis nvs_rndis = { + .type = NVS_TYPE_RNDIS, + .rndis_mtype = NVS_RNDIS_MTYPE_DATA, + .chim_sz = txd->chim_size, + }; + rte_iova_t addr; + unsigned int segs; + + /* attach aggregation data if present */ + if (txd->chim_size > 0) + nvs_rndis.chim_idx = txd->chim_index; + else + nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID; + + hn_rndis_dump(txd->rndis_pkt); + + /* pass IOVA of rndis header in first segment */ + addr = rte_malloc_virt2iova(txd->rndis_pkt); + if (unlikely(addr == RTE_BAD_IOVA)) { + PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); + return -EINVAL; + } + + sg[0].page = addr / PAGE_SIZE; + sg[0].ofs = addr & PAGE_MASK; + sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt)); + segs = 1; + + hn_update_packet_stats(&txq->stats, m); + + segs += hn_fill_sg(sg + 1, m); + + PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u", + txq->port_id, txq->queue_id, txd->chim_index, + segs, nvs_rndis.chim_sz); + + return hn_nvs_send_sglist(txq->chan, sg, segs, + &nvs_rndis, sizeof(nvs_rndis), + (uintptr_t)txd, need_sig); +} + +uint16_t +hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct hn_tx_queue *txq = ptxq; + uint16_t queue_id = txq->queue_id; + struct hn_data *hv = txq->hv; + struct rte_eth_dev *vf_dev; + bool need_sig = false; + uint16_t nb_tx, tx_thresh; + int ret; + + if (unlikely(hv->closed)) + return 0; + + /* + * Always check for events on the primary channel + * because that is where hotplug notifications occur. + */ + tx_thresh = RTE_MAX(txq->free_thresh, nb_pkts); + if (txq->queue_id == 0 || + rte_mempool_avail_count(txq->txdesc_pool) < tx_thresh) + hn_process_events(hv, txq->queue_id, 0); + + /* Transmit over VF if present and up */ + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->data->dev_started) { + void *sub_q = vf_dev->data->tx_queues[queue_id]; + + nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); + rte_rwlock_read_unlock(&hv->vf_lock); + return nb_tx; + } + rte_rwlock_read_unlock(&hv->vf_lock); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; + struct rndis_packet_msg *pkt; + struct hn_txdesc *txd; + + txd = hn_txd_get(txq); + if (txd == NULL) + break; + + /* For small packets aggregate them in chimney buffer */ + if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { + /* If this packet will not fit, then flush */ + if (txq->agg_pktleft == 0 || + RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) { + if (hn_flush_txagg(txq, &need_sig)) + goto fail; + } + + + pkt = hn_try_txagg(hv, txq, txd, pkt_size); + if (unlikely(!pkt)) + break; + + hn_encap(pkt, queue_id, m); + hn_append_to_chim(txq, pkt, m); + + rte_pktmbuf_free(m); + + /* if buffer is full, flush */ + if (txq->agg_pktleft == 0 && + hn_flush_txagg(txq, &need_sig)) + goto fail; + } else { + /* Send any outstanding packets in buffer */ + if (txq->agg_txd && hn_flush_txagg(txq, &need_sig)) + goto fail; + + pkt = txd->rndis_pkt; + txd->m = m; + txd->data_size = m->pkt_len; + ++txd->packets; + + hn_encap(pkt, queue_id, m); + + ret = hn_xmit_sg(txq, txd, m, &need_sig); + if (unlikely(ret != 0)) { + PMD_TX_LOG(NOTICE, "sg send failed: %d", ret); + ++txq->stats.errors; + hn_txd_put(txq, txd); + goto fail; + } + } + } + + /* If partial buffer left, then try and send it. + * if that fails, then reuse it on next send. + */ + hn_flush_txagg(txq, &need_sig); + +fail: + if (need_sig) + rte_vmbus_chan_signal_tx(txq->chan); + + return nb_tx; +} + +static uint16_t +hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + uint16_t i, n; + + if (unlikely(nb_pkts == 0)) + return 0; + + n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts); + + /* relabel the received mbufs */ + for (i = 0; i < n; i++) + rx_pkts[i]->port = rxq->port_id; + + return n; +} + +uint16_t +hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hn_rx_queue *rxq = prxq; + struct hn_data *hv = rxq->hv; + struct rte_eth_dev *vf_dev; + uint16_t nb_rcv; + + if (unlikely(hv->closed)) + return 0; + + /* Check for new completions (and hotplug) */ + if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts)) + hn_process_events(hv, rxq->queue_id, 0); + + /* Always check the vmbus path for multicast and new flows */ + nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring, + (void **)rx_pkts, nb_pkts, NULL); + + /* If VF is available, check that as well */ + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->data->dev_started) + nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq, + rx_pkts + nb_rcv, nb_pkts - nb_rcv); + + rte_rwlock_read_unlock(&hv->vf_lock); + return nb_rcv; +} + +void +hn_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct hn_rx_queue *rxq = dev->data->rx_queues[i]; + + hn_rx_queue_free(rxq, false); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + hn_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_var.h b/src/spdk/dpdk/drivers/net/netvsc/hn_var.h new file mode 100644 index 000000000..d1d38b459 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_var.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2009-2018 Microsoft Corp. + * Copyright (c) 2016 Brocade Communications Systems, Inc. + * Copyright (c) 2012 NetApp Inc. + * Copyright (c) 2012 Citrix Inc. + * All rights reserved. + */ + +/* + * Tunable ethdev params + */ +#define HN_MIN_RX_BUF_SIZE 1024 +#define HN_MAX_XFER_LEN 2048 +#define HN_MAX_MAC_ADDRS 1 +#define HN_MAX_CHANNELS 64 + +/* Claimed to be 12232B */ +#define HN_MTU_MAX (9 * 1024) + +/* Retry interval */ +#define HN_CHAN_INTERVAL_US 100 + +/* Host monitor interval */ +#define HN_CHAN_LATENCY_NS 50000 + +/* Buffers need to be aligned */ +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +#ifndef PAGE_MASK +#define PAGE_MASK (PAGE_SIZE - 1) +#endif + +struct hn_data; +struct hn_txdesc; + +struct hn_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t ring_full; + uint64_t multicast; + uint64_t broadcast; + /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ + uint64_t size_bins[8]; +}; + +struct hn_tx_queue { + struct hn_data *hv; + struct vmbus_channel *chan; + uint16_t port_id; + uint16_t queue_id; + uint32_t free_thresh; + struct rte_mempool *txdesc_pool; + void *tx_rndis; + + /* Applied packet transmission aggregation limits. */ + uint32_t agg_szmax; + uint32_t agg_pktmax; + uint32_t agg_align; + + /* Packet transmission aggregation states */ + struct hn_txdesc *agg_txd; + uint32_t agg_pktleft; + uint32_t agg_szleft; + struct rndis_packet_msg *agg_prevpkt; + + struct hn_stats stats; +}; + +struct hn_rx_queue { + struct hn_data *hv; + struct vmbus_channel *chan; + struct rte_mempool *mb_pool; + struct rte_ring *rx_ring; + + rte_spinlock_t ring_lock; + uint32_t event_sz; + uint16_t port_id; + uint16_t queue_id; + struct hn_stats stats; + + void *event_buf; +}; + + +/* multi-packet data from host */ +struct hn_rx_bufinfo { + struct vmbus_channel *chan; + struct hn_data *hv; + uint64_t xactid; + struct rte_mbuf_ext_shared_info shinfo; +} __rte_cache_aligned; + +#define HN_INVALID_PORT UINT16_MAX + +struct hn_data { + struct rte_vmbus_device *vmbus; + struct hn_rx_queue *primary; + rte_rwlock_t vf_lock; + uint16_t port_id; + uint16_t vf_port; + + uint8_t vf_present; + uint8_t closed; + uint8_t vlan_strip; + + uint32_t link_status; + uint32_t link_speed; + + struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */ + struct hn_rx_bufinfo *rxbuf_info; + uint32_t rxbuf_section_cnt; /* # of Rx sections */ + volatile uint32_t rxbuf_outstanding; + uint16_t max_queues; /* Max available queues */ + uint16_t num_queues; + uint64_t rss_offloads; + + rte_spinlock_t chim_lock; + struct rte_mem_resource *chim_res; /* UIO resource for Tx */ + struct rte_bitmap *chim_bmap; /* Send buffer map */ + void *chim_bmem; + uint32_t chim_szmax; /* Max size per buffer */ + uint32_t chim_cnt; /* Max packets per buffer */ + + uint32_t latency; + uint32_t nvs_ver; + uint32_t ndis_ver; + uint32_t rndis_agg_size; + uint32_t rndis_agg_pkts; + uint32_t rndis_agg_align; + + volatile uint32_t rndis_pending; + rte_atomic32_t rndis_req_id; + uint8_t rndis_resp[256]; + + uint32_t rss_hash; + uint8_t rss_key[40]; + uint16_t rss_ind[128]; + + struct rte_eth_dev_owner owner; + struct rte_intr_handle vf_intr; + + struct vmbus_channel *channels[HN_MAX_CHANNELS]; +}; + +static inline struct vmbus_channel * +hn_primary_chan(const struct hn_data *hv) +{ + return hv->channels[0]; +} + +uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id, + uint32_t tx_limit); + +uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +int hn_chim_init(struct rte_eth_dev *dev); +void hn_chim_uninit(struct rte_eth_dev *dev); +int hn_dev_link_update(struct rte_eth_dev *dev, int wait); +int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void hn_dev_tx_queue_release(void *arg); +void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, + struct rte_eth_txq_info *qinfo); +int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt); + +struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + uint16_t queue_id, + unsigned int socket_id); +int hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void hn_dev_rx_queue_release(void *arg); +void hn_dev_free_queues(struct rte_eth_dev *dev); + +/* Check if VF is attached */ +static inline bool +hn_vf_attached(const struct hn_data *hv) +{ + return hv->vf_port != HN_INVALID_PORT; +} + +/* + * Get VF device for existing netvsc device + * Assumes vf_lock is held. + */ +static inline struct rte_eth_dev * +hn_get_vf_dev(const struct hn_data *hv) +{ + uint16_t vf_port = hv->vf_port; + + if (vf_port == HN_INVALID_PORT) + return NULL; + else + return &rte_eth_devices[vf_port]; +} + +int hn_vf_info_get(struct hn_data *hv, + struct rte_eth_dev_info *info); +int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv); +int hn_vf_configure(struct rte_eth_dev *dev, + const struct rte_eth_conf *dev_conf); +const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev); +int hn_vf_start(struct rte_eth_dev *dev); +void hn_vf_reset(struct rte_eth_dev *dev); +void hn_vf_stop(struct rte_eth_dev *dev); +void hn_vf_close(struct rte_eth_dev *dev); + +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev); +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev); +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev); +int hn_vf_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +int hn_vf_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id); +int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id); + +int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +int hn_vf_stats_reset(struct rte_eth_dev *dev); +int hn_vf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +int hn_vf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n); +int hn_vf_xstats_reset(struct rte_eth_dev *dev); +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); diff --git a/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c b/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c new file mode 100644 index 000000000..b7e3ba46b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c @@ -0,0 +1,630 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hn_logs.h" +#include "hn_var.h" +#include "hn_nvs.h" + +/* Search for VF with matching MAC address, return port id */ +static int hn_vf_match(const struct rte_eth_dev *dev) +{ + const struct rte_ether_addr *mac = dev->data->mac_addrs; + int i; + + RTE_ETH_FOREACH_DEV(i) { + const struct rte_eth_dev *vf_dev = &rte_eth_devices[i]; + const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs; + + if (vf_dev == dev) + continue; + + if (rte_is_same_ether_addr(mac, vf_mac)) + return i; + } + return -ENOENT; +} + + +/* + * Attach new PCI VF device and return the port_id + */ +static int hn_vf_attach(struct hn_data *hv, uint16_t port_id) +{ + struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER }; + int ret; + + if (hn_vf_attached(hv)) { + PMD_DRV_LOG(ERR, "VF already attached"); + return -EEXIST; + } + + ret = rte_eth_dev_owner_get(port_id, &owner); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id); + return ret; + } + + if (owner.id != RTE_ETH_DEV_NO_OWNER) { + PMD_DRV_LOG(ERR, "Port %u already owned by other device %s", + port_id, owner.name); + return -EBUSY; + } + + ret = rte_eth_dev_owner_set(port_id, &hv->owner); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id); + return ret; + } + + PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id); + hv->vf_port = port_id; + return 0; +} + +/* Add new VF device to synthetic device */ +int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) +{ + int port, err; + + port = hn_vf_match(dev); + if (port < 0) { + PMD_DRV_LOG(NOTICE, "No matching MAC found"); + return port; + } + + err = hn_vf_attach(hv, port); + if (err == 0) { + dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + hv->vf_intr = (struct rte_intr_handle) { + .fd = -1, + .type = RTE_INTR_HANDLE_EXT, + }; + dev->intr_handle = &hv->vf_intr; + hn_nvs_set_datapath(hv, NVS_DATAPATH_VF); + } + + return err; +} + +/* Remove new VF device */ +static void hn_vf_remove(struct hn_data *hv) +{ + + if (!hn_vf_attached(hv)) { + PMD_DRV_LOG(ERR, "VF path not active"); + } else { + /* Stop incoming packets from arriving on VF */ + hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC); + + /* Stop transmission over VF */ + hv->vf_port = HN_INVALID_PORT; + + /* Give back ownership */ + rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id); + } +} + +/* Handle VF association message from host */ +void +hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, + const struct vmbus_chanpkt_hdr *hdr, + const void *data) +{ + struct hn_data *hv = dev->data->dev_private; + const struct hn_nvs_vf_association *vf_assoc = data; + + if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) { + PMD_DRV_LOG(ERR, "invalid vf association NVS"); + return; + } + + PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u", + vf_assoc->serial, + vf_assoc->allocated ? "add to" : "remove from", + dev->data->port_id); + + rte_rwlock_write_lock(&hv->vf_lock); + hv->vf_present = vf_assoc->allocated; + + if (dev->state == RTE_ETH_DEV_ATTACHED) { + if (vf_assoc->allocated) + hn_vf_add(dev, hv); + else + hn_vf_remove(hv); + } + rte_rwlock_write_unlock(&hv->vf_lock); +} + +static void +hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim, + const struct rte_eth_desc_lim *vf_lim) +{ + lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max); + lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min); + lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align); + lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); + lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max); +} + +/* + * Merge the info from the VF and synthetic path. + * use the default config of the VF + * and the minimum number of queues and buffer sizes. + */ +static int hn_vf_info_merge(struct rte_eth_dev *vf_dev, + struct rte_eth_dev_info *info) +{ + struct rte_eth_dev_info vf_info; + int ret; + + ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info); + if (ret != 0) + return ret; + + info->speed_capa = vf_info.speed_capa; + info->default_rxportconf = vf_info.default_rxportconf; + info->default_txportconf = vf_info.default_txportconf; + + info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues, + info->max_rx_queues); + info->rx_offload_capa &= vf_info.rx_offload_capa; + info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa; + info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads; + + info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues, + info->max_tx_queues); + info->tx_offload_capa &= vf_info.tx_offload_capa; + info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa; + hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim); + + info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize, + info->min_rx_bufsize); + info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen, + info->max_rx_pktlen); + hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim); + + return 0; +} + +int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) +{ + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = hn_vf_info_merge(vf_dev, info); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +int hn_vf_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->link_update) + ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +} + +/* called when VF has link state interrupts enabled */ +static int hn_vf_lsc_event(uint16_t port_id __rte_unused, + enum rte_eth_event_type event, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *dev = cb_arg; + + if (event != RTE_ETH_EVENT_INTR_LSC) + return 0; + + /* if link state has changed pass on */ + if (hn_dev_link_update(dev, 0) == 0) + return 0; /* no change */ + + return _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); +} + +static int _hn_vf_configure(struct rte_eth_dev *dev, + uint16_t vf_port, + const struct rte_eth_conf *dev_conf) +{ + struct rte_eth_conf vf_conf = *dev_conf; + struct rte_eth_dev *vf_dev; + int ret; + + vf_dev = &rte_eth_devices[vf_port]; + if (dev_conf->intr_conf.lsc && + (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { + PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u", + vf_port); + vf_conf.intr_conf.lsc = 1; + } else { + PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u", + vf_port); + vf_conf.intr_conf.lsc = 0; + } + + ret = rte_eth_dev_configure(vf_port, + dev->data->nb_rx_queues, + dev->data->nb_tx_queues, + &vf_conf); + if (ret) { + PMD_DRV_LOG(ERR, + "VF configuration failed: %d", ret); + } else if (vf_conf.intr_conf.lsc) { + ret = rte_eth_dev_callback_register(vf_port, + RTE_ETH_DEV_INTR_LSC, + hn_vf_lsc_event, dev); + if (ret) + PMD_DRV_LOG(ERR, + "Failed to register LSC callback for VF %u", + vf_port); + } + return ret; +} + +/* + * Configure VF if present. + * Force VF to have same number of queues as synthetic device + */ +int hn_vf_configure(struct rte_eth_dev *dev, + const struct rte_eth_conf *dev_conf) +{ + struct hn_data *hv = dev->data->dev_private; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + if (hv->vf_port != HN_INVALID_PORT) + ret = _hn_vf_configure(dev, hv->vf_port, dev_conf); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + const uint32_t *ptypes = NULL; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get) + ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ptypes; +} + +int hn_vf_start(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_dev_start(vf_dev->data->port_id); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +void hn_vf_stop(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + rte_eth_dev_stop(vf_dev->data->port_id); + rte_rwlock_read_unlock(&hv->vf_lock); +} + +/* If VF is present, then cascade configuration down */ +#define VF_ETHDEV_FUNC(dev, func) \ + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + rte_rwlock_read_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + func(vf_dev->data->port_id); \ + rte_rwlock_read_unlock(&hv->vf_lock); \ + } + +/* If VF is present, then cascade configuration down */ +#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \ + { \ + struct hn_data *hv = (dev)->data->dev_private; \ + struct rte_eth_dev *vf_dev; \ + int ret = 0; \ + rte_rwlock_read_lock(&hv->vf_lock); \ + vf_dev = hn_get_vf_dev(hv); \ + if (vf_dev) \ + ret = func(vf_dev->data->port_id); \ + rte_rwlock_read_unlock(&hv->vf_lock); \ + return ret; \ + } + +void hn_vf_reset(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC(dev, rte_eth_dev_reset); +} + +void hn_vf_close(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + uint16_t vf_port; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_port = hv->vf_port; + if (vf_port != HN_INVALID_PORT) + rte_eth_dev_close(vf_port); + + hv->vf_port = HN_INVALID_PORT; + rte_rwlock_read_unlock(&hv->vf_lock); +} + +int hn_vf_stats_reset(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset); +} + +int hn_vf_allmulticast_enable(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable); +} + +int hn_vf_allmulticast_disable(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable); +} + +int hn_vf_promiscuous_enable(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable); +} + +int hn_vf_promiscuous_disable(struct rte_eth_dev *dev) +{ + VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable); +} + +int hn_vf_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id, + mc_addr_set, nb_mc_addr); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_tx_queue_setup(vf_dev->data->port_id, + queue_idx, nb_desc, + socket_id, tx_conf); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) +{ + struct rte_eth_dev *vf_dev; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->tx_queue_release) { + void *subq = vf_dev->data->tx_queues[queue_id]; + + (*vf_dev->dev_ops->tx_queue_release)(subq); + } + + rte_rwlock_read_unlock(&hv->vf_lock); +} + +int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_rx_queue_setup(vf_dev->data->port_id, + queue_idx, nb_desc, + socket_id, rx_conf, mp); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id) +{ + struct rte_eth_dev *vf_dev; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rx_queue_release) { + void *subq = vf_dev->data->rx_queues[queue_id]; + + (*vf_dev->dev_ops->rx_queue_release)(subq); + } + rte_rwlock_read_unlock(&hv->vf_lock); +} + +int hn_vf_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_stats_get(vf_dev->data->port_id, stats); + rte_rwlock_read_unlock(&hv->vf_lock); + return ret; +} + +int hn_vf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *names, + unsigned int n) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int i, count = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); + rte_rwlock_read_unlock(&hv->vf_lock); + + /* add vf_ prefix to xstat names */ + if (names) { + for (i = 0; i < count; i++) { + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + + snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name); + strlcpy(names[i].name, tmp, sizeof(names[i].name)); + } + } + + return count; +} + +int hn_vf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, + unsigned int offset, + unsigned int n) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int i, count = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); + rte_rwlock_read_unlock(&hv->vf_lock); + + /* Offset id's for VF stats */ + if (count > 0) { + for (i = 0; i < count; i++) + xstats[i + offset].id += offset; + } + + return count; +} + +int hn_vf_xstats_reset(struct rte_eth_dev *dev) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev) + ret = rte_eth_xstats_reset(vf_dev->data->port_id); + else + ret = -EINVAL; + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->rss_hash_update) + ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +} + +int hn_vf_reta_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct hn_data *hv = dev->data->dev_private; + struct rte_eth_dev *vf_dev; + int ret = 0; + + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (vf_dev && vf_dev->dev_ops->reta_update) + ret = vf_dev->dev_ops->reta_update(vf_dev, + reta_conf, reta_size); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/netvsc/meson.build b/src/spdk/dpdk/drivers/net/netvsc/meson.build new file mode 100644 index 000000000..e7f449302 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Microsoft Corporation + +build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS') +reason = 'missing dependency, DPDK VMBus driver' +sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c', 'hn_vf.c') + +deps += ['bus_vmbus' ] diff --git a/src/spdk/dpdk/drivers/net/netvsc/ndis.h b/src/spdk/dpdk/drivers/net/netvsc/ndis.h new file mode 100644 index 000000000..d97a397a8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/ndis.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * All rights reserved. + */ + +#ifndef _NET_NDIS_H_ +#define _NET_NDIS_H_ + +#define NDIS_MEDIA_STATE_CONNECTED 0 +#define NDIS_MEDIA_STATE_DISCONNECTED 1 + +#define NDIS_NETCHANGE_TYPE_POSSIBLE 1 +#define NDIS_NETCHANGE_TYPE_DEFINITE 2 +#define NDIS_NETCHANGE_TYPE_FROMMEDIA 3 + +#define NDIS_OFFLOAD_SET_NOCHG 0 +#define NDIS_OFFLOAD_SET_ON 1 +#define NDIS_OFFLOAD_SET_OFF 2 + +/* a.k.a GRE MAC */ +#define NDIS_ENCAP_TYPE_NVGRE 0x00000001 + +#define NDIS_HASH_FUNCTION_MASK 0x000000FF /* see hash function */ +#define NDIS_HASH_TYPE_MASK 0x00FFFF00 /* see hash type */ + +/* hash function */ +#define NDIS_HASH_FUNCTION_TOEPLITZ 0x00000001 + +/* hash type */ +#define NDIS_HASH_IPV4 0x00000100 +#define NDIS_HASH_TCP_IPV4 0x00000200 +#define NDIS_HASH_IPV6 0x00000400 +#define NDIS_HASH_IPV6_EX 0x00000800 +#define NDIS_HASH_TCP_IPV6 0x00001000 +#define NDIS_HASH_TCP_IPV6_EX 0x00002000 + +#define NDIS_HASH_KEYSIZE_TOEPLITZ 40 +#define NDIS_HASH_INDCNT 128 + +#define NDIS_OBJTYPE_DEFAULT 0x80 +#define NDIS_OBJTYPE_RSS_CAPS 0x88 +#define NDIS_OBJTYPE_RSS_PARAMS 0x89 +#define NDIS_OBJTYPE_OFFLOAD 0xa7 + +struct ndis_object_hdr { + uint8_t ndis_type; /* NDIS_OBJTYPE_ */ + uint8_t ndis_rev; /* type specific */ + uint16_t ndis_size; /* incl. this hdr */ +} __rte_packed; + +/* + * OID_TCP_OFFLOAD_PARAMETERS + * ndis_type: NDIS_OBJTYPE_DEFAULT + */ +struct ndis_offload_params { + struct ndis_object_hdr ndis_hdr; + uint8_t ndis_ip4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_tcp4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_udp4csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_tcp6csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_udp6csum; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_lsov1; /* NDIS_OFFLOAD_PARAM_ */ + uint8_t ndis_ipsecv1; /* NDIS_OFFLOAD_IPSECV1_ */ + uint8_t ndis_lsov2_ip4; /* NDIS_OFFLOAD_LSOV2_ */ + uint8_t ndis_lsov2_ip6; /* NDIS_OFFLOAD_LSOV2_ */ + uint8_t ndis_tcp4conn; /* 0 */ + uint8_t ndis_tcp6conn; /* 0 */ + uint32_t ndis_flags; /* 0 */ + /* NDIS >= 6.1 */ + uint8_t ndis_ipsecv2; /* NDIS_OFFLOAD_IPSECV2_ */ + uint8_t ndis_ipsecv2_ip4;/* NDIS_OFFLOAD_IPSECV2_ */ + /* NDIS >= 6.30 */ + uint8_t ndis_rsc_ip4; /* NDIS_OFFLOAD_RSC_ */ + uint8_t ndis_rsc_ip6; /* NDIS_OFFLOAD_RSC_ */ + uint8_t ndis_encap; /* NDIS_OFFLOAD_SET_ */ + uint8_t ndis_encap_types;/* NDIS_ENCAP_TYPE_ */ +}; + +#define NDIS_OFFLOAD_PARAMS_SIZE sizeof(struct ndis_offload_params) +#define NDIS_OFFLOAD_PARAMS_SIZE_6_1 \ + offsetof(struct ndis_offload_params, ndis_rsc_ip4) + +#define NDIS_OFFLOAD_PARAMS_REV_2 2 /* NDIS 6.1 */ +#define NDIS_OFFLOAD_PARAMS_REV_3 3 /* NDIS 6.30 */ + +#define NDIS_OFFLOAD_PARAM_NOCHG 0 /* common */ +#define NDIS_OFFLOAD_PARAM_OFF 1 +#define NDIS_OFFLOAD_PARAM_TX 2 +#define NDIS_OFFLOAD_PARAM_RX 3 +#define NDIS_OFFLOAD_PARAM_TXRX 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_LSOV1_OFF 1 +#define NDIS_OFFLOAD_LSOV1_ON 2 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_IPSECV1_OFF 1 +#define NDIS_OFFLOAD_IPSECV1_AH 2 +#define NDIS_OFFLOAD_IPSECV1_ESP 3 +#define NDIS_OFFLOAD_IPSECV1_AH_ESP 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_LSOV2_OFF 1 +#define NDIS_OFFLOAD_LSOV2_ON 2 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_IPSECV2_OFF 1 +#define NDIS_OFFLOAD_IPSECV2_AH 2 +#define NDIS_OFFLOAD_IPSECV2_ESP 3 +#define NDIS_OFFLOAD_IPSECV2_AH_ESP 4 + +/* NDIS_OFFLOAD_PARAM_NOCHG */ +#define NDIS_OFFLOAD_RSC_OFF 1 +#define NDIS_OFFLOAD_RSC_ON 2 + +/* + * OID_GEN_RECEIVE_SCALE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_RSS_CAPS + */ +struct ndis_rss_caps { + struct ndis_object_hdr ndis_hdr; + uint32_t ndis_caps; /* NDIS_RSS_CAP_ */ + uint32_t ndis_nmsi; /* # of MSIs */ + uint32_t ndis_nrxr; /* # of RX rings */ + /* NDIS >= 6.30 */ + uint16_t ndis_nind; /* # of indtbl ent. */ + uint16_t ndis_pad; +} __rte_packed; + +#define NDIS_RSS_CAPS_SIZE \ + offsetof(struct ndis_rss_caps, ndis_pad) +#define NDIS_RSS_CAPS_SIZE_6_0 \ + offsetof(struct ndis_rss_caps, ndis_nind) + +#define NDIS_RSS_CAPS_REV_1 1 /* NDIS 6.{0,1,20} */ +#define NDIS_RSS_CAPS_REV_2 2 /* NDIS 6.30 */ + +#define NDIS_RSS_CAP_MSI 0x01000000 +#define NDIS_RSS_CAP_CLASSIFY_ISR 0x02000000 +#define NDIS_RSS_CAP_CLASSIFY_DPC 0x04000000 +#define NDIS_RSS_CAP_MSIX 0x08000000 +#define NDIS_RSS_CAP_IPV4 0x00000100 +#define NDIS_RSS_CAP_IPV6 0x00000200 +#define NDIS_RSS_CAP_IPV6_EX 0x00000400 +#define NDIS_RSS_CAP_HASH_TOEPLITZ NDIS_HASH_FUNCTION_TOEPLITZ +#define NDIS_RSS_CAP_HASHFUNC_MASK NDIS_HASH_FUNCTION_MASK + +/* + * OID_GEN_RECEIVE_SCALE_PARAMETERS + * ndis_type: NDIS_OBJTYPE_RSS_PARAMS + */ +struct ndis_rss_params { + struct ndis_object_hdr ndis_hdr; + uint16_t ndis_flags; /* NDIS_RSS_FLAG_ */ + uint16_t ndis_bcpu; /* base cpu 0 */ + uint32_t ndis_hash; /* NDIS_HASH_ */ + uint16_t ndis_indsize; /* indirect table */ + uint32_t ndis_indoffset; + uint16_t ndis_keysize; /* hash key */ + uint32_t ndis_keyoffset; + /* NDIS >= 6.20 */ + uint32_t ndis_cpumaskoffset; + uint32_t ndis_cpumaskcnt; + uint32_t ndis_cpumaskentsz; +}; + +#define NDIS_RSS_PARAMS_SIZE sizeof(struct ndis_rss_params) +#define NDIS_RSS_PARAMS_SIZE_6_0 \ + offsetof(struct ndis_rss_params, ndis_cpumaskoffset) + +#define NDIS_RSS_PARAMS_REV_1 1 /* NDIS 6.0 */ +#define NDIS_RSS_PARAMS_REV_2 2 /* NDIS 6.20 */ + +#define NDIS_RSS_FLAG_NONE 0x0000 +#define NDIS_RSS_FLAG_BCPU_UNCHG 0x0001 +#define NDIS_RSS_FLAG_HASH_UNCHG 0x0002 +#define NDIS_RSS_FLAG_IND_UNCHG 0x0004 +#define NDIS_RSS_FLAG_KEY_UNCHG 0x0008 +#define NDIS_RSS_FLAG_DISABLE 0x0010 + +/* non-standard convenient struct */ +struct ndis_rssprm_toeplitz { + struct ndis_rss_params rss_params; + /* Indirect table */ + uint32_t rss_ind[NDIS_HASH_INDCNT]; + /* Toeplitz hash key */ + uint8_t rss_key[NDIS_HASH_KEYSIZE_TOEPLITZ]; +}; + +#define NDIS_RSSPRM_TOEPLITZ_SIZE(nind) \ + offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind]) + +/* + * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES + * ndis_type: NDIS_OBJTYPE_OFFLOAD + */ + +#define NDIS_OFFLOAD_ENCAP_NONE 0x0000 +#define NDIS_OFFLOAD_ENCAP_NULL 0x0001 +#define NDIS_OFFLOAD_ENCAP_8023 0x0002 +#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004 +#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008 +#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010 + +struct ndis_csum_offload { + uint32_t ndis_ip4_txenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_txcsum; +#define NDIS_TXCSUM_CAP_IP4OPT 0x001 +#define NDIS_TXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP4 0x010 +#define NDIS_TXCSUM_CAP_UDP4 0x040 +#define NDIS_TXCSUM_CAP_IP4 0x100 + uint32_t ndis_ip4_rxenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_rxcsum; +#define NDIS_RXCSUM_CAP_IP4OPT 0x001 +#define NDIS_RXCSUM_CAP_TCP4OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP4 0x010 +#define NDIS_RXCSUM_CAP_UDP4 0x040 +#define NDIS_RXCSUM_CAP_IP4 0x100 + uint32_t ndis_ip6_txenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_txcsum; +#define NDIS_TXCSUM_CAP_IP6EXT 0x001 +#define NDIS_TXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_TXCSUM_CAP_TCP6 0x010 +#define NDIS_TXCSUM_CAP_UDP6 0x040 + uint32_t ndis_ip6_rxenc; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_rxcsum; +#define NDIS_RXCSUM_CAP_IP6EXT 0x001 +#define NDIS_RXCSUM_CAP_TCP6OPT 0x004 +#define NDIS_RXCSUM_CAP_TCP6 0x010 +#define NDIS_RXCSUM_CAP_UDP6 0x040 +}; + +struct ndis_lsov1_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_maxsize; + uint32_t ndis_minsegs; + uint32_t ndis_opts; +}; + +struct ndis_ipsecv1_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ah_esp; + uint32_t ndis_xport_tun; + uint32_t ndis_ip4_opts; + uint32_t ndis_flags; + uint32_t ndis_ip4_ah; + uint32_t ndis_ip4_esp; +}; + +struct ndis_lsov2_offload { + uint32_t ndis_ip4_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip4_maxsz; + uint32_t ndis_ip4_minsg; + uint32_t ndis_ip6_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint32_t ndis_ip6_maxsz; + uint32_t ndis_ip6_minsg; + uint32_t ndis_ip6_opts; +#define NDIS_LSOV2_CAP_IP6EXT 0x001 +#define NDIS_LSOV2_CAP_TCP6OPT 0x004 +}; + +struct ndis_ipsecv2_offload { + uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ + uint8_t ndis_ip6; + uint8_t ndis_ip4opt; + uint8_t ndis_ip6ext; + uint8_t ndis_ah; + uint8_t ndis_esp; + uint8_t ndis_ah_esp; + uint8_t ndis_xport; + uint8_t ndis_tun; + uint8_t ndis_xport_tun; + uint8_t ndis_lso; + uint8_t ndis_extseq; + uint32_t ndis_udp_esp; + uint32_t ndis_auth; + uint32_t ndis_crypto; + uint32_t ndis_sa_caps; +}; + +struct ndis_rsc_offload { + uint8_t ndis_ip4; + uint8_t ndis_ip6; +}; + +struct ndis_encap_offload { + uint32_t ndis_flags; + uint32_t ndis_maxhdr; +}; + +struct ndis_offload { + struct ndis_object_hdr ndis_hdr; + struct ndis_csum_offload ndis_csum; + struct ndis_lsov1_offload ndis_lsov1; + struct ndis_ipsecv1_offload ndis_ipsecv1; + struct ndis_lsov2_offload ndis_lsov2; + uint32_t ndis_flags; + /* NDIS >= 6.1 */ + struct ndis_ipsecv2_offload ndis_ipsecv2; + /* NDIS >= 6.30 */ + struct ndis_rsc_offload ndis_rsc; + struct ndis_encap_offload ndis_encap_gre; +}; + +#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload) +#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ndis_ipsecv2) +#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, ndis_rsc) + +#define NDIS_OFFLOAD_REV_1 1 /* NDIS 6.0 */ +#define NDIS_OFFLOAD_REV_2 2 /* NDIS 6.1 */ +#define NDIS_OFFLOAD_REV_3 3 /* NDIS 6.30 */ + +/* + * Per-packet-info + */ + +/* VLAN */ +#define NDIS_VLAN_INFO_SIZE sizeof(uint32_t) +#define NDIS_VLAN_INFO_PRI_MASK 0x0007 +#define NDIS_VLAN_INFO_CFI_MASK 0x0008 +#define NDIS_VLAN_INFO_ID_MASK 0xfff0 +#define NDIS_VLAN_INFO_MAKE(id, pri, cfi) \ + (((pri) & NDIS_VLAN_INFO_PRI_MASK) | \ + (((cfi) & 0x1) << 3) | (((id) & 0xfff) << 4)) +#define NDIS_VLAN_INFO_ID(inf) (((inf) & NDIS_VLAN_INFO_ID_MASK) >> 4) +#define NDIS_VLAN_INFO_CFI(inf) (((inf) & NDIS_VLAN_INFO_CFI_MASK) >> 3) +#define NDIS_VLAN_INFO_PRI(inf) ((inf) & NDIS_VLAN_INFO_PRI_MASK) + +/* Reception checksum */ +#define NDIS_RXCSUM_INFO_SIZE sizeof(uint32_t) +#define NDIS_RXCSUM_INFO_TCPCS_FAILED 0x0001 +#define NDIS_RXCSUM_INFO_UDPCS_FAILED 0x0002 +#define NDIS_RXCSUM_INFO_IPCS_FAILED 0x0004 +#define NDIS_RXCSUM_INFO_TCPCS_OK 0x0008 +#define NDIS_RXCSUM_INFO_UDPCS_OK 0x0010 +#define NDIS_RXCSUM_INFO_IPCS_OK 0x0020 +#define NDIS_RXCSUM_INFO_LOOPBACK 0x0040 +#define NDIS_RXCSUM_INFO_TCPCS_INVAL 0x0080 +#define NDIS_RXCSUM_INFO_IPCS_INVAL 0x0100 + +/* LSOv2 */ +#define NDIS_LSO2_INFO_SIZE sizeof(uint32_t) +#define NDIS_LSO2_INFO_MSS_MASK 0x000fffff +#define NDIS_LSO2_INFO_THOFF_MASK 0x3ff00000 +#define NDIS_LSO2_INFO_ISLSO2 0x40000000 +#define NDIS_LSO2_INFO_ISIPV6 0x80000000 + +#define NDIS_LSO2_INFO_MAKE(thoff, mss) \ + ((((uint32_t)(mss)) & NDIS_LSO2_INFO_MSS_MASK) | \ + ((((uint32_t)(thoff)) & 0x3ff) << 20) | \ + NDIS_LSO2_INFO_ISLSO2) + +#define NDIS_LSO2_INFO_MAKEIPV4(thoff, mss) \ + NDIS_LSO2_INFO_MAKE((thoff), (mss)) + +#define NDIS_LSO2_INFO_MAKEIPV6(thoff, mss) \ + (NDIS_LSO2_INFO_MAKE((thoff), (mss)) | NDIS_LSO2_INFO_ISIPV6) + +/* Transmission checksum */ +#define NDIS_TXCSUM_INFO_SIZE sizeof(uint32_t) +#define NDIS_TXCSUM_INFO_IPV4 0x00000001 +#define NDIS_TXCSUM_INFO_IPV6 0x00000002 +#define NDIS_TXCSUM_INFO_TCPCS 0x00000004 +#define NDIS_TXCSUM_INFO_UDPCS 0x00000008 +#define NDIS_TXCSUM_INFO_IPCS 0x00000010 +#define NDIS_TXCSUM_INFO_THOFF 0x03ff0000 + +#define NDIS_TXCSUM_INFO_MKL4CS(thoff, flag) \ + ((((uint32_t)(thoff)) << 16) | (flag)) + +#define NDIS_TXCSUM_INFO_MKTCPCS(thoff) \ + NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_TCPCS) + +#define NDIS_TXCSUM_INFO_MKUDPCS(thoff) \ + NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_UDPCS) + +#endif /* !_NET_NDIS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/netvsc/rndis.h b/src/spdk/dpdk/drivers/net/netvsc/rndis.h new file mode 100644 index 000000000..eac9a99fd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/rndis.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2018 Microsoft Corp. + * Copyright (c) 2010 Jonathan Armani + * Copyright (c) 2010 Fabien Romano + * Copyright (c) 2010 Michael Knudsen + * All rights reserved. + */ + +#ifndef _NET_RNDIS_H_ +#define _NET_RNDIS_H_ + +/* Canonical major/minor version as of 22th Aug. 2016. */ +#define RNDIS_VERSION_MAJOR 0x00000001 +#define RNDIS_VERSION_MINOR 0x00000000 + +#define RNDIS_STATUS_SUCCESS 0x00000000 +#define RNDIS_STATUS_PENDING 0x00000103 + +#define RNDIS_STATUS_ONLINE 0x40010003 +#define RNDIS_STATUS_RESET_START 0x40010004 +#define RNDIS_STATUS_RESET_END 0x40010005 +#define RNDIS_STATUS_RING_STATUS 0x40010006 +#define RNDIS_STATUS_CLOSED 0x40010007 +#define RNDIS_STATUS_WAN_LINE_UP 0x40010008 +#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009 +#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C +#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D +#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E +#define RNDIS_STATUS_INTERFACE_UP 0x4001000F +#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010 +#define RNDIS_STATUS_MEDIA_BUSY 0x40010011 +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 +#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION +#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013 +#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018 +#define RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG 0x40020006 + +#define RNDIS_STATUS_FAILURE 0xC0000001 +#define RNDIS_STATUS_RESOURCES 0xC000009A +#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BB +#define RNDIS_STATUS_CLOSING 0xC0010002 +#define RNDIS_STATUS_BAD_VERSION 0xC0010004 +#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005 +#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006 +#define RNDIS_STATUS_OPEN_FAILED 0xC0010007 +#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008 +#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009 +#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A +#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B +#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C +#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D +#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E +#define RNDIS_STATUS_INVALID_PACKET 0xC001000F +#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010 +#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011 +#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012 +#define RNDIS_STATUS_NOT_INDICATING 0xC0010013 +#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014 +#define RNDIS_STATUS_INVALID_DATA 0xC0010015 +#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016 +#define RNDIS_STATUS_INVALID_OID 0xC0010017 +#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018 +#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019 +#define RNDIS_STATUS_GROUP_ADDRESS_IN_US 0xC001001A +#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B +#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C +#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D +#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E +#define RNDIS_STATUS_NO_CABLE 0xC001001F + +#define OID_GEN_SUPPORTED_LIST 0x00010101 +#define OID_GEN_HARDWARE_STATUS 0x00010102 +#define OID_GEN_MEDIA_SUPPORTED 0x00010103 +#define OID_GEN_MEDIA_IN_USE 0x00010104 +#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105 +#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106 +#define OID_GEN_LINK_SPEED 0x00010107 +#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108 +#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109 +#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A +#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B +#define OID_GEN_VENDOR_ID 0x0001010C +#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D +#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E +#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F +#define OID_GEN_DRIVER_VERSION 0x00010110 +#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111 +#define OID_GEN_PROTOCOL_OPTIONS 0x00010112 +#define OID_GEN_MAC_OPTIONS 0x00010113 +#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114 +#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115 +#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116 +#define OID_GEN_SUPPORTED_GUIDS 0x00010117 +#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118 +#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119 +#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 +#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 +#define OID_GEN_MACHINE_NAME 0x0001021A +#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B +#define OID_GEN_VLAN_ID 0x0001021C + +#define OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define OID_802_3_CURRENT_ADDRESS 0x01010102 +#define OID_802_3_MULTICAST_LIST 0x01010103 +#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 +#define OID_802_3_MAC_OPTIONS 0x01010105 +#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 +#define OID_802_3_XMIT_ONE_COLLISION 0x01020102 +#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 +#define OID_802_3_XMIT_DEFERRED 0x01020201 +#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 +#define OID_802_3_RCV_OVERRUN 0x01020203 +#define OID_802_3_XMIT_UNDERRUN 0x01020204 +#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205 +#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 +#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 + +#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C +#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D + +#define RNDIS_MEDIUM_802_3 0x00000000 + +/* Device flags */ +#define RNDIS_DF_CONNECTIONLESS 0x00000001 +#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002 + +/* + * Common RNDIS message header. + */ +struct rndis_msghdr { + uint32_t type; + uint32_t len; +}; + +/* + * RNDIS data message + */ +#define RNDIS_PACKET_MSG 0x00000001 + +struct rndis_packet_msg { + uint32_t type; + uint32_t len; + uint32_t dataoffset; + uint32_t datalen; + uint32_t oobdataoffset; + uint32_t oobdatalen; + uint32_t oobdataelements; + uint32_t pktinfooffset; + uint32_t pktinfolen; + uint32_t vchandle; + uint32_t reserved; +}; + +/* + * Minimum value for dataoffset, oobdataoffset, and + * pktinfooffset. + */ +#define RNDIS_PACKET_MSG_OFFSET_MIN \ + (sizeof(struct rndis_packet_msg) - \ + offsetof(struct rndis_packet_msg, dataoffset)) + +/* Offset from the beginning of rndis_packet_msg. */ +#define RNDIS_PACKET_MSG_OFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_packet_msg, dataoffset)) + +#define RNDIS_PACKET_MSG_OFFSET_ALIGN 4 +#define RNDIS_PACKET_MSG_OFFSET_ALIGNMASK \ + (RNDIS_PACKET_MSG_OFFSET_ALIGN - 1) + +/* Per-packet-info for RNDIS data message */ +struct rndis_pktinfo { + uint32_t size; + uint32_t type; /* NDIS_PKTINFO_TYPE_ */ + uint32_t offset; + uint8_t data[]; +}; + +#define RNDIS_PKTINFO_OFFSET \ + offsetof(struct rndis_pktinfo, data[0]) +#define RNDIS_PKTINFO_SIZE_ALIGN 4 +#define RNDIS_PKTINFO_SIZE_ALIGNMASK (RNDIS_PKTINFO_SIZE_ALIGN - 1) + +#define NDIS_PKTINFO_TYPE_CSUM 0 +#define NDIS_PKTINFO_TYPE_IPSEC 1 +#define NDIS_PKTINFO_TYPE_LSO 2 +#define NDIS_PKTINFO_TYPE_CLASSIFY 3 +/* reserved 4 */ +#define NDIS_PKTINFO_TYPE_SGLIST 5 +#define NDIS_PKTINFO_TYPE_VLAN 6 +#define NDIS_PKTINFO_TYPE_ORIG 7 +#define NDIS_PKTINFO_TYPE_PKT_CANCELID 8 +#define NDIS_PKTINFO_TYPE_ORIG_NBLIST 9 +#define NDIS_PKTINFO_TYPE_CACHE_NBLIST 10 +#define NDIS_PKTINFO_TYPE_PKT_PAD 11 + +/* RNDIS extension */ + +/* Per-packet hash info */ +#define NDIS_HASH_INFO_SIZE sizeof(uint32_t) +#define NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST +/* NDIS_HASH_ */ + +/* Per-packet hash value */ +#define NDIS_HASH_VALUE_SIZE sizeof(uint32_t) +#define NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID + +/* Per-packet-info size */ +#define RNDIS_PKTINFO_SIZE(dlen) offsetof(struct rndis_pktinfo, data[dlen]) + +/* + * RNDIS control messages + */ + +/* + * Common header for RNDIS completion messages. + * + * NOTE: It does not apply to RNDIS_RESET_CMPLT. + */ +struct rndis_comp_hdr { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* Initialize the device. */ +#define RNDIS_INITIALIZE_MSG 0x00000002 +#define RNDIS_INITIALIZE_CMPLT 0x80000002 + +struct rndis_init_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t ver_major; + uint32_t ver_minor; + uint32_t max_xfersz; +}; + +struct rndis_init_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; + uint32_t ver_major; + uint32_t ver_minor; + uint32_t devflags; + uint32_t medium; + uint32_t pktmaxcnt; + uint32_t pktmaxsz; + uint32_t align; + uint32_t aflistoffset; + uint32_t aflistsz; +}; + +#define RNDIS_INIT_COMP_SIZE_MIN \ + offsetof(struct rndis_init_comp, aflistsz) + +/* Halt the device. No response sent. */ +#define RNDIS_HALT_MSG 0x00000003 + +struct rndis_halt_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +/* Send a query object. */ +#define RNDIS_QUERY_MSG 0x00000004 +#define RNDIS_QUERY_CMPLT 0x80000004 + +struct rndis_query_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t oid; + uint32_t infobuflen; + uint32_t infobufoffset; + uint32_t devicevchdl; +}; + +#define RNDIS_QUERY_REQ_INFOBUFOFFSET \ + (sizeof(struct rndis_query_req) - \ + offsetof(struct rndis_query_req, rid)) + +struct rndis_query_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; + uint32_t infobuflen; + uint32_t infobufoffset; +}; + +/* infobuf offset from the beginning of rndis_query_comp. */ +#define RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_query_comp, rid)) + +/* Send a set object request. */ +#define RNDIS_SET_MSG 0x00000005 +#define RNDIS_SET_CMPLT 0x80000005 + +struct rndis_set_req { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t oid; + uint32_t infobuflen; + uint32_t infobufoffset; + uint32_t devicevchdl; +}; + +#define RNDIS_SET_REQ_INFOBUFOFFSET \ + (sizeof(struct rndis_set_req) - \ + offsetof(struct rndis_set_req, rid)) + +struct rndis_set_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* + * Parameter used by OID_GEN_RNDIS_CONFIG_PARAMETER. + */ +#define RNDIS_SET_PARAM_NUMERIC 0x00000000 +#define RNDIS_SET_PARAM_STRING 0x00000002 + +struct rndis_set_parameter { + uint32_t nameoffset; + uint32_t namelen; + uint32_t type; + uint32_t valueoffset; + uint32_t valuelen; +}; + +/* Perform a soft reset on the device. */ +#define RNDIS_RESET_MSG 0x00000006 +#define RNDIS_RESET_CMPLT 0x80000006 + +struct rndis_reset_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +struct rndis_reset_comp { + uint32_t type; + uint32_t len; + uint32_t status; + uint32_t adrreset; +}; + +/* 802.3 link-state or undefined message error. Sent by device. */ +#define RNDIS_INDICATE_STATUS_MSG 0x00000007 + +struct rndis_status_msg { + uint32_t type; + uint32_t len; + uint32_t status; + uint32_t stbuflen; + uint32_t stbufoffset; + /* rndis_diag_info */ +}; + +/* stbuf offset from the beginning of rndis_status_msg. */ +#define RNDIS_STBUFOFFSET_ABS(ofs) \ + ((ofs) + offsetof(struct rndis_status_msg, status)) + +/* + * Immediately after rndis_status_msg.stbufoffset, if a control + * message is malformatted, or a packet message contains inappropriate + * content. + */ +struct rndis_diag_info { + uint32_t diagstatus; + uint32_t erroffset; +}; + +/* Keepalive message. May be sent by device. */ +#define RNDIS_KEEPALIVE_MSG 0x00000008 +#define RNDIS_KEEPALIVE_CMPLT 0x80000008 + +struct rndis_keepalive_req { + uint32_t type; + uint32_t len; + uint32_t rid; +}; + +struct rndis_keepalive_comp { + uint32_t type; + uint32_t len; + uint32_t rid; + uint32_t status; +}; + +/* Packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ +#define NDIS_PACKET_TYPE_NONE 0x00000000 +#define NDIS_PACKET_TYPE_DIRECTED 0x00000001 +#define NDIS_PACKET_TYPE_MULTICAST 0x00000002 +#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 +#define NDIS_PACKET_TYPE_BROADCAST 0x00000008 +#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010 +#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020 +#define NDIS_PACKET_TYPE_SMT 0x00000040 +#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080 +#define NDIS_PACKET_TYPE_GROUP 0x00001000 +#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000 +#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000 +#define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000 + +#endif /* !_NET_RNDIS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map b/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/netvsc/rte_pmd_netvsc_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/nfb/Makefile b/src/spdk/dpdk/drivers/net/nfb/Makefile new file mode 100644 index 000000000..e92d29dcd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Cesnet +# Copyright(c) 2019 Netcope Technologies, a.s. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_nfb.a + +INCLUDES :=-I$(SRCDIR) + + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --cflags netcope-common) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_kvargs +LDLIBS += -lrte_ethdev -lrte_net +LDLIBS += -lrte_bus_pci +LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs netcope-common) + +EXPORT_MAP := rte_pmd_nfb_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_tx.c +SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += nfb_rxmode.c + +# +# Export include files +# +SYMLINK-y-include += + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/nfb/meson.build b/src/spdk/dpdk/drivers/net/nfb/meson.build new file mode 100644 index 000000000..d53e8eca7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019 Cesnet +# Copyright(c) 2019 Netcope Technologies, a.s. +# All rights reserved. + +dep = dependency('netcope-common', required: false) +reason = 'missing dependency, "libnfb"' +build = dep.found() +ext_deps += dep + +sources = files('nfb_rx.c', 'nfb_tx.c', 'nfb_stats.c', 'nfb_ethdev.c', 'nfb_rxmode.c') diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb.h b/src/spdk/dpdk/drivers/net/nfb/nfb.h new file mode 100644 index 000000000..59d3ab498 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#ifndef _NFB_H_ +#define _NFB_H_ + +#include +#include +#include +#include + +#include "nfb_rx.h" +#include "nfb_tx.h" + +/* PCI Vendor ID */ +#define PCI_VENDOR_ID_NETCOPE 0x1b26 +#define PCI_VENDOR_ID_SILICOM 0x1c2c + +/* PCI Device IDs */ +#define PCI_DEVICE_ID_NFB_40G2 0xcb80 +#define PCI_DEVICE_ID_NFB_100G2 0xc2c1 +#define PCI_DEVICE_ID_NFB_200G2QL 0xc250 +#define PCI_DEVICE_ID_FB2CGG3 0x00d0 +#define PCI_DEVICE_ID_FB2CGG3D 0xc240 + +/* Max index of ndp rx/tx queues */ +#define RTE_ETH_NDP_MAX_RX_QUEUES 32 +#define RTE_ETH_NDP_MAX_TX_QUEUES 32 + +/* Max index of rx/tx dmas */ +#define RTE_MAX_NC_RXMAC 256 +#define RTE_MAX_NC_TXMAC 256 + +#define RTE_NFB_DRIVER_NAME net_nfb + +/* Device arguments */ +#define TIMESTAMP_ARG "timestamp" +static const char * const VALID_KEYS[] = {TIMESTAMP_ARG, NULL}; + +struct pmd_internals { + uint16_t max_rxmac; + uint16_t max_txmac; + struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC]; + struct nc_txmac *txmac[RTE_MAX_NC_TXMAC]; + + char nfb_dev[PATH_MAX]; + struct nfb_device *nfb; + /* Place to remember if filter was promiscuous or filtering by table, + * when disabling allmulticast + */ + enum nc_rxmac_mac_filter rx_filter_original; +}; + +#endif /* _NFB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_ethdev.c b/src/spdk/dpdk/drivers/net/nfb/nfb_ethdev.c new file mode 100644 index 000000000..b039ab6fc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_ethdev.c @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include +#include + +#include "nfb_stats.h" +#include "nfb_rx.h" +#include "nfb_tx.h" +#include "nfb_rxmode.h" +#include "nfb.h" + +/** + * Default MAC addr + */ +static const struct rte_ether_addr eth_addr = { + .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 } +}; + +/** + * Open all RX DMA queues + * + * @param dev + * Pointer to nfb device. + * @param[out] rxmac + * Pointer to output array of nc_rxmac + * @param[out] max_rxmac + * Pointer to output max index of rxmac + */ +static void +nfb_nc_rxmac_init(struct nfb_device *nfb, + struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC], + uint16_t *max_rxmac) +{ + *max_rxmac = 0; + while ((rxmac[*max_rxmac] = nc_rxmac_open_index(nfb, *max_rxmac))) + ++(*max_rxmac); +} + +/** + * Open all TX DMA queues + * + * @param dev + * Pointer to nfb device. + * @param[out] txmac + * Pointer to output array of nc_txmac + * @param[out] max_rxmac + * Pointer to output max index of txmac + */ +static void +nfb_nc_txmac_init(struct nfb_device *nfb, + struct nc_txmac *txmac[RTE_MAX_NC_TXMAC], + uint16_t *max_txmac) +{ + *max_txmac = 0; + while ((txmac[*max_txmac] = nc_txmac_open_index(nfb, *max_txmac))) + ++(*max_txmac); +} + +/** + * Close all RX DMA queues + * + * @param rxmac + * Pointer to array of nc_rxmac + * @param max_rxmac + * Maximum index of rxmac + */ +static void +nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC], + uint16_t max_rxmac) +{ + for (; max_rxmac > 0; --max_rxmac) { + nc_rxmac_close(rxmac[max_rxmac]); + rxmac[max_rxmac] = NULL; + } +} + +/** + * Close all TX DMA queues + * + * @param txmac + * Pointer to array of nc_txmac + * @param max_txmac + * Maximum index of txmac + */ +static void +nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC], + uint16_t max_txmac) +{ + for (; max_txmac > 0; --max_txmac) { + nc_txmac_close(txmac[max_txmac]); + txmac[max_txmac] = NULL; + } +} + +/** + * DPDK callback to start the device. + * + * Start device by starting all configured queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_start(struct rte_eth_dev *dev) +{ + int ret; + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + for (i = 0; i < nb_rx; i++) { + ret = nfb_eth_rx_queue_start(dev, i); + if (ret != 0) + goto err_rx; + } + + for (i = 0; i < nb_tx; i++) { + ret = nfb_eth_tx_queue_start(dev, i); + if (ret != 0) + goto err_tx; + } + + return 0; + +err_tx: + for (i = 0; i < nb_tx; i++) + nfb_eth_tx_queue_stop(dev, i); +err_rx: + for (i = 0; i < nb_rx; i++) + nfb_eth_rx_queue_stop(dev, i); + return ret; +} + +/** + * DPDK callback to stop the device. + * + * Stop device by stopping all configured queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +nfb_eth_dev_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + for (i = 0; i < nb_tx; i++) + nfb_eth_tx_queue_stop(dev, i); + + for (i = 0; i < nb_rx; i++) + nfb_eth_rx_queue_stop(dev, i); +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +static int +nfb_eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; + dev_info->speed_capa = ETH_LINK_SPEED_100G; + + return 0; +} + +/** + * DPDK callback to close the device. + * + * Destroy all queues and objects, free memory. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +nfb_eth_dev_close(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + nfb_eth_dev_stop(dev); + + nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac); + nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac); + + for (i = 0; i < nb_rx; i++) { + nfb_eth_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + for (i = 0; i < nb_tx; i++) { + nfb_eth_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + uint16_t i; + struct nc_rxmac_status status; + struct rte_eth_link link; + memset(&link, 0, sizeof(link)); + + struct pmd_internals *internals = dev->data->dev_private; + + status.speed = MAC_SPEED_UNKNOWN; + + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_status = ETH_LINK_DOWN; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = ETH_LINK_SPEED_FIXED; + + if (internals->rxmac[0] != NULL) { + nc_rxmac_read_status(internals->rxmac[0], &status); + + switch (status.speed) { + case MAC_SPEED_10G: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case MAC_SPEED_40G: + link.link_speed = ETH_SPEED_NUM_40G; + break; + case MAC_SPEED_100G: + link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + link.link_speed = ETH_SPEED_NUM_NONE; + break; + } + } + + for (i = 0; i < internals->max_rxmac; ++i) { + nc_rxmac_read_status(internals->rxmac[i], &status); + + if (status.enabled && status.link_up) { + link.link_status = ETH_LINK_UP; + break; + } + } + + rte_eth_linkstatus_set(dev, &link); + + return 0; +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + uint16_t i; + for (i = 0; i < internals->max_rxmac; ++i) + nc_rxmac_enable(internals->rxmac[i]); + + for (i = 0; i < internals->max_txmac; ++i) + nc_txmac_enable(internals->txmac[i]); + + return 0; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + uint16_t i; + for (i = 0; i < internals->max_rxmac; ++i) + nc_rxmac_disable(internals->rxmac[i]); + + for (i = 0; i < internals->max_txmac; ++i) + nc_txmac_disable(internals->txmac[i]); + + return 0; +} + +/** + * DPDK callback to set primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + unsigned int i; + uint64_t mac = 0; + struct rte_eth_dev_data *data = dev->data; + struct pmd_internals *internals = (struct pmd_internals *) + data->dev_private; + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) + return -EINVAL; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { + mac <<= 8; + mac |= mac_addr->addr_bytes[i] & 0xFF; + } + + for (i = 0; i < internals->max_rxmac; ++i) + nc_rxmac_set_mac(internals->rxmac[i], 0, mac, 1); + + rte_ether_addr_copy(mac_addr, data->mac_addrs); + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = nfb_eth_dev_start, + .dev_stop = nfb_eth_dev_stop, + .dev_set_link_up = nfb_eth_dev_set_link_up, + .dev_set_link_down = nfb_eth_dev_set_link_down, + .dev_close = nfb_eth_dev_close, + .dev_configure = nfb_eth_dev_configure, + .dev_infos_get = nfb_eth_dev_info, + .promiscuous_enable = nfb_eth_promiscuous_enable, + .promiscuous_disable = nfb_eth_promiscuous_disable, + .allmulticast_enable = nfb_eth_allmulticast_enable, + .allmulticast_disable = nfb_eth_allmulticast_disable, + .rx_queue_start = nfb_eth_rx_queue_start, + .rx_queue_stop = nfb_eth_rx_queue_stop, + .tx_queue_start = nfb_eth_tx_queue_start, + .tx_queue_stop = nfb_eth_tx_queue_stop, + .rx_queue_setup = nfb_eth_rx_queue_setup, + .tx_queue_setup = nfb_eth_tx_queue_setup, + .rx_queue_release = nfb_eth_rx_queue_release, + .tx_queue_release = nfb_eth_tx_queue_release, + .link_update = nfb_eth_link_update, + .stats_get = nfb_eth_stats_get, + .stats_reset = nfb_eth_stats_reset, + .mac_addr_set = nfb_eth_mac_addr_set, +}; + +/** + * DPDK callback to initialize an ethernet device + * + * @param dev + * Pointer to ethernet device structure + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_init(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct pmd_internals *internals = (struct pmd_internals *) + data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_pci_addr *pci_addr = &pci_dev->addr; + struct rte_ether_addr eth_addr_init; + struct rte_kvargs *kvlist; + + RTE_LOG(INFO, PMD, "Initializing NFB device (" PCI_PRI_FMT ")\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + snprintf(internals->nfb_dev, PATH_MAX, + "/dev/nfb/by-pci-slot/" PCI_PRI_FMT, + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + /* Check validity of device args */ + if (dev->device->devargs != NULL && + dev->device->devargs->args != NULL && + strlen(dev->device->devargs->args) > 0) { + kvlist = rte_kvargs_parse(dev->device->devargs->args, + VALID_KEYS); + if (kvlist == NULL) { + RTE_LOG(ERR, PMD, "Failed to parse device arguments %s", + dev->device->devargs->args); + rte_kvargs_free(kvlist); + return -EINVAL; + } + rte_kvargs_free(kvlist); + } + + /* Let rte_eth_dev_close() release the port resources */ + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* + * Get number of available DMA RX and TX queues, which is maximum + * number of queues that can be created and store it in private device + * data structure. + */ + internals->nfb = nfb_open(internals->nfb_dev); + if (internals->nfb == NULL) { + RTE_LOG(ERR, PMD, "nfb_open(): failed to open %s", + internals->nfb_dev); + return -EINVAL; + } + data->nb_rx_queues = ndp_get_rx_queue_available_count(internals->nfb); + data->nb_tx_queues = ndp_get_tx_queue_available_count(internals->nfb); + + RTE_LOG(INFO, PMD, "Available NDP queues RX: %u TX: %u\n", + data->nb_rx_queues, data->nb_tx_queues); + + nfb_nc_rxmac_init(internals->nfb, + internals->rxmac, + &internals->max_rxmac); + nfb_nc_txmac_init(internals->nfb, + internals->txmac, + &internals->max_txmac); + + /* Set rx, tx burst functions */ + dev->rx_pkt_burst = nfb_eth_ndp_rx; + dev->tx_pkt_burst = nfb_eth_ndp_tx; + + /* Set function callbacks for Ethernet API */ + dev->dev_ops = &ops; + + /* Get link state */ + nfb_eth_link_update(dev, 0); + + /* Allocate space for one mac address */ + data->mac_addrs = rte_zmalloc(data->name, sizeof(struct rte_ether_addr), + RTE_CACHE_LINE_SIZE); + if (data->mac_addrs == NULL) { + RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n"); + nfb_close(internals->nfb); + return -EINVAL; + } + + rte_eth_random_addr(eth_addr_init.addr_bytes); + eth_addr_init.addr_bytes[0] = eth_addr.addr_bytes[0]; + eth_addr_init.addr_bytes[1] = eth_addr.addr_bytes[1]; + eth_addr_init.addr_bytes[2] = eth_addr.addr_bytes[2]; + + nfb_eth_mac_addr_set(dev, ð_addr_init); + + data->promiscuous = nfb_eth_promiscuous_get(dev); + data->all_multicast = nfb_eth_allmulticast_get(dev); + internals->rx_filter_original = data->promiscuous; + + RTE_LOG(INFO, PMD, "NFB device (" + PCI_PRI_FMT ") successfully initialized\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + return 0; +} + +/** + * DPDK callback to uninitialize an ethernet device + * + * @param dev + * Pointer to ethernet device structure + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_dev_uninit(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_pci_addr *pci_addr = &pci_dev->addr; + + nfb_eth_dev_close(dev); + + RTE_LOG(INFO, PMD, "NFB device (" + PCI_PRI_FMT ") successfully uninitialized\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + return 0; +} + +static const struct rte_pci_id nfb_pci_id_table[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_40G2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_100G2) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_200G2QL) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3D) }, + { .vendor_id = 0, } +}; + +/** + * DPDK callback to register a PCI device. + * + * This function spawns Ethernet devices out of a given PCI device. + * + * @param[in] pci_drv + * PCI driver structure (nfb_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +static int +nfb_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct pmd_internals), nfb_eth_dev_init); +} + +/** + * DPDK callback to remove a PCI device. + * + * This function removes all Ethernet devices belong to a given PCI device. + * + * @param[in] pci_dev + * Pointer to the PCI device. + * + * @return + * 0 on success, the function cannot fail. + */ +static int +nfb_eth_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, nfb_eth_dev_uninit); +} + +static struct rte_pci_driver nfb_eth_driver = { + .id_table = nfb_pci_id_table, + .probe = nfb_eth_pci_probe, + .remove = nfb_eth_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(RTE_NFB_DRIVER_NAME, nfb_eth_driver); +RTE_PMD_REGISTER_PCI_TABLE(RTE_NFB_DRIVER_NAME, nfb_pci_id_table); +RTE_PMD_REGISTER_KMOD_DEP(RTE_NFB_DRIVER_NAME, "* nfb"); +RTE_PMD_REGISTER_PARAM_STRING(RTE_NFB_DRIVER_NAME, TIMESTAMP_ARG "=<0|1>"); diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_rx.c b/src/spdk/dpdk/drivers/net/nfb/nfb_rx.c new file mode 100644 index 000000000..d97179f81 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_rx.c @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#include + +#include "nfb_rx.h" +#include "nfb.h" + +static int +timestamp_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + + +static int +nfb_check_timestamp(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, TIMESTAMP_ARG)) { + rte_kvargs_free(kvlist); + return 0; + } + /* Timestamps are enabled when there is + * key-value pair: enable_timestamp=1 + */ + if (rte_kvargs_process(kvlist, TIMESTAMP_ARG, + timestamp_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +int +nfb_eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id) +{ + struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id]; + int ret; + + if (rxq->queue == NULL) { + RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n"); + return -EINVAL; + } + + ret = ndp_queue_start(rxq->queue); + if (ret != 0) + goto err; + dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +err: + return -EINVAL; +} + +int +nfb_eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id) +{ + struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id]; + int ret; + + if (rxq->queue == NULL) { + RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n"); + return -EINVAL; + } + + ret = ndp_queue_stop(rxq->queue); + if (ret != 0) + return -EINVAL; + + dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +int +nfb_eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + + struct ndp_rx_queue *rxq; + int ret; + + rxq = rte_zmalloc_socket("ndp rx queue", + sizeof(struct ndp_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (rxq == NULL) { + RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for rx queue id " + "%" PRIu16 "!\n", rx_queue_id); + return -ENOMEM; + } + + rxq->flags = 0; + + ret = nfb_eth_rx_queue_init(internals->nfb, + rx_queue_id, + dev->data->port_id, + mb_pool, + rxq); + + if (ret == 0) + dev->data->rx_queues[rx_queue_id] = rxq; + else + rte_free(rxq); + + if (nfb_check_timestamp(dev->device->devargs)) + rxq->flags |= NFB_TIMESTAMP_FLAG; + + return ret; +} + +int +nfb_eth_rx_queue_init(struct nfb_device *nfb, + uint16_t rx_queue_id, + uint16_t port_id, + struct rte_mempool *mb_pool, + struct ndp_rx_queue *rxq) +{ + const struct rte_pktmbuf_pool_private *mbp_priv = + rte_mempool_get_priv(mb_pool); + + if (nfb == NULL) + return -EINVAL; + + rxq->queue = ndp_open_rx_queue(nfb, rx_queue_id); + if (rxq->queue == NULL) + return -EINVAL; + + rxq->nfb = nfb; + rxq->rx_queue_id = rx_queue_id; + rxq->in_port = port_id; + rxq->mb_pool = mb_pool; + rxq->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + + rxq->rx_pkts = 0; + rxq->rx_bytes = 0; + rxq->err_pkts = 0; + + return 0; +} + +void +nfb_eth_rx_queue_release(void *q) +{ + struct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q; + if (rxq->queue != NULL) { + ndp_close_rx_queue(rxq->queue); + rte_free(rxq); + rxq->queue = NULL; + } +} diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_rx.h b/src/spdk/dpdk/drivers/net/nfb/nfb_rx.h new file mode 100644 index 000000000..cf3899b2f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_rx.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#ifndef _NFB_RX_H_ +#define _NFB_RX_H_ + +#include +#include + +#include +#include + +#define NFB_TIMESTAMP_FLAG (1 << 0) + +struct ndp_rx_queue { + struct nfb_device *nfb; /* nfb dev structure */ + struct ndp_queue *queue; /* rx queue */ + uint16_t rx_queue_id; /* index */ + uint8_t in_port; /* port */ + uint8_t flags; /* setup flags */ + + struct rte_mempool *mb_pool; /* memory pool to allocate packets */ + uint16_t buf_size; /* mbuf size */ + + volatile uint64_t rx_pkts; /* packets read */ + volatile uint64_t rx_bytes; /* bytes read */ + volatile uint64_t err_pkts; /* erroneous packets */ +}; + +/** + * Initialize ndp_rx_queue structure + * + * @param nfb + * Pointer to nfb device structure. + * @param rx_queue_id + * RX queue index. + * @param port_id + * Device [external] port identifier. + * @param mb_pool + * Memory pool for buffer allocations. + * @param[out] rxq + * Pointer to ndp_rx_queue output structure + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_rx_queue_init(struct nfb_device *nfb, + uint16_t rx_queue_id, + uint16_t port_id, + struct rte_mempool *mb_pool, + struct ndp_rx_queue *rxq); + +/** + * DPDK callback to setup a RX queue for use. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mb_pool + * Memory pool for buffer allocations. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool); + +/** + * DPDK callback to release a RX queue. + * + * @param dpdk_rxq + * Generic RX queue pointer. + */ +void +nfb_eth_rx_queue_release(void *q); + +/** + * Start traffic on Rx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq_id + * RX queue index. + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id); + +/** + * Stop traffic on Rx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq_id + * RX queue index. + */ +int +nfb_eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id); + +/** + * DPDK callback for RX. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] bufs + * Array to store received packets. + * @param nb_pkts + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= nb_pkts). + */ +static __rte_always_inline uint16_t +nfb_eth_ndp_rx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct ndp_rx_queue *ndp = queue; + uint8_t timestamping_enabled; + uint16_t packet_size; + uint64_t num_bytes = 0; + uint16_t num_rx; + unsigned int i; + + const uint16_t buf_size = ndp->buf_size; + + struct rte_mbuf *mbuf; + struct ndp_packet packets[nb_pkts]; + + struct rte_mbuf *mbufs[nb_pkts]; + + if (unlikely(ndp->queue == NULL || nb_pkts == 0)) { + RTE_LOG(ERR, PMD, "RX invalid arguments!\n"); + return 0; + } + + timestamping_enabled = ndp->flags & NFB_TIMESTAMP_FLAG; + + /* returns either all or nothing */ + i = rte_pktmbuf_alloc_bulk(ndp->mb_pool, mbufs, nb_pkts); + if (unlikely(i != 0)) + return 0; + + num_rx = ndp_rx_burst_get(ndp->queue, packets, nb_pkts); + + if (unlikely(num_rx != nb_pkts)) { + for (i = num_rx; i < nb_pkts; i++) + rte_pktmbuf_free(mbufs[i]); + } + + nb_pkts = num_rx; + + num_rx = 0; + /* + * Reads the given number of packets from NDP queue given + * by queue and copies the packet data into a newly allocated mbuf + * to return. + */ + for (i = 0; i < nb_pkts; ++i) { + mbuf = mbufs[i]; + + /* get the space available for data in the mbuf */ + packet_size = packets[i].data_length; + + if (likely(packet_size <= buf_size)) { + /* NDP packet will fit in one mbuf, go ahead and copy */ + rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), + packets[i].data, packet_size); + + mbuf->data_len = (uint16_t)packet_size; + + mbuf->pkt_len = packet_size; + mbuf->port = ndp->in_port; + mbuf->ol_flags = 0; + + if (timestamping_enabled) { + /* nanoseconds */ + mbuf->timestamp = + rte_le_to_cpu_32(*((uint32_t *) + (packets[i].header + 4))); + mbuf->timestamp <<= 32; + /* seconds */ + mbuf->timestamp |= + rte_le_to_cpu_32(*((uint32_t *) + (packets[i].header + 8))); + mbuf->ol_flags |= PKT_RX_TIMESTAMP; + } + + bufs[num_rx++] = mbuf; + num_bytes += packet_size; + } else { + /* + * NDP packet will not fit in one mbuf, + * scattered mode is not enabled, drop packet + */ + rte_pktmbuf_free(mbuf); + } + } + + ndp_rx_burst_put(ndp->queue); + + ndp->rx_pkts += num_rx; + ndp->rx_bytes += num_bytes; + return num_rx; +} + +#endif /* _NFB_RX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.c b/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.c new file mode 100644 index 000000000..2d0b613d2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.c @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#include "nfb_rxmode.h" +#include "nfb.h" + +int +nfb_eth_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + uint16_t i; + + internals->rx_filter_original = RXMAC_MAC_FILTER_PROMISCUOUS; + + for (i = 0; i < internals->max_rxmac; ++i) { + nc_rxmac_mac_filter_enable(internals->rxmac[i], + RXMAC_MAC_FILTER_PROMISCUOUS); + } + + return 0; +} + +int +nfb_eth_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + uint16_t i; + + internals->rx_filter_original = RXMAC_MAC_FILTER_TABLE; + + /* if promisc is not enabled, do nothing */ + if (!nfb_eth_promiscuous_get(dev)) + return 0; + + for (i = 0; i < internals->max_rxmac; ++i) { + nc_rxmac_mac_filter_enable(internals->rxmac[i], + RXMAC_MAC_FILTER_TABLE); + } + + return 0; +} + +int +nfb_eth_promiscuous_get(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + struct nc_rxmac_status status; + status.mac_filter = RXMAC_MAC_FILTER_PROMISCUOUS; + + if (internals->max_rxmac > 0) + nc_rxmac_read_status(internals->rxmac[0], &status); + + return (status.mac_filter == RXMAC_MAC_FILTER_PROMISCUOUS); +} + +int +nfb_eth_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + uint16_t i; + for (i = 0; i < internals->max_rxmac; ++i) { + nc_rxmac_mac_filter_enable(internals->rxmac[i], + RXMAC_MAC_FILTER_TABLE_BCAST_MCAST); + } + + return 0; +} + +int +nfb_eth_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + uint16_t i; + + /* if multicast is not enabled do nothing */ + if (!nfb_eth_allmulticast_get(dev)) + return 0; + + for (i = 0; i < internals->max_rxmac; ++i) { + nc_rxmac_mac_filter_enable(internals->rxmac[i], + internals->rx_filter_original); + } + + return 0; +} + +int +nfb_eth_allmulticast_get(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + struct nc_rxmac_status status; + status.mac_filter = RXMAC_MAC_FILTER_PROMISCUOUS; + + if (internals->max_rxmac > 0) + nc_rxmac_read_status(internals->rxmac[0], &status); + + return (status.mac_filter == RXMAC_MAC_FILTER_TABLE_BCAST_MCAST); +} diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.h b/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.h new file mode 100644 index 000000000..5a29e5ffe --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_rxmode.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#ifndef _NFB_RXMODE_H_ +#define _NFB_RXMODE_H_ + +#include +#include + +#include + +/** + * Getter for promiscuous mode + * @param dev + * Pointer to Ethernet device structure. + * @return 1 if enabled 0 otherwise + */ +int +nfb_eth_promiscuous_get(struct rte_eth_dev *dev); + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return always 0 + */ +int +nfb_eth_promiscuous_enable(struct rte_eth_dev *dev); + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return always 0 + */ +int +nfb_eth_promiscuous_disable(struct rte_eth_dev *dev); + +/** + * Getter for allmulticast mode + * @param dev + * Pointer to Ethernet device structure. + * @return 1 if enabled 0 otherwise + */ +int +nfb_eth_allmulticast_get(struct rte_eth_dev *dev); + +/** + * DPDK callback to enable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return always 0 + */ +int +nfb_eth_allmulticast_enable(struct rte_eth_dev *dev); + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return always 0 + */ +int +nfb_eth_allmulticast_disable(struct rte_eth_dev *dev); + +#endif /* _NFB_RXMODE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_stats.c b/src/spdk/dpdk/drivers/net/nfb/nfb_stats.c new file mode 100644 index 000000000..e107dff4b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_stats.c @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#include "nfb_stats.h" +#include "nfb.h" + +int +nfb_eth_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + uint64_t rx_total = 0; + uint64_t tx_total = 0; + uint64_t tx_err_total = 0; + uint64_t rx_total_bytes = 0; + uint64_t tx_total_bytes = 0; + + struct ndp_rx_queue *rx_queue = *((struct ndp_rx_queue **) + dev->data->rx_queues); + struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) + dev->data->tx_queues); + + for (i = 0; i < nb_rx; i++) { + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rx_queue[i].rx_pkts; + stats->q_ibytes[i] = rx_queue[i].rx_bytes; + } + rx_total += rx_queue[i].rx_pkts; + rx_total_bytes += rx_queue[i].rx_bytes; + } + + for (i = 0; i < nb_tx; i++) { + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = tx_queue[i].tx_pkts; + stats->q_obytes[i] = tx_queue[i].tx_bytes; + } + tx_total += tx_queue[i].tx_pkts; + tx_total_bytes += tx_queue[i].tx_bytes; + tx_err_total += tx_queue[i].err_pkts; + } + + stats->ipackets = rx_total; + stats->opackets = tx_total; + stats->ibytes = rx_total_bytes; + stats->obytes = tx_total_bytes; + stats->oerrors = tx_err_total; + return 0; +} + +int +nfb_eth_stats_reset(struct rte_eth_dev *dev) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + struct ndp_rx_queue *rx_queue = *((struct ndp_rx_queue **) + dev->data->rx_queues); + struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) + dev->data->tx_queues); + + for (i = 0; i < nb_rx; i++) { + rx_queue[i].rx_pkts = 0; + rx_queue[i].rx_bytes = 0; + rx_queue[i].err_pkts = 0; + } + for (i = 0; i < nb_tx; i++) { + tx_queue[i].tx_pkts = 0; + tx_queue[i].tx_bytes = 0; + tx_queue[i].err_pkts = 0; + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_stats.h b/src/spdk/dpdk/drivers/net/nfb/nfb_stats.h new file mode 100644 index 000000000..ad96ea29b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_stats.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#ifndef _NFB_STATS_H_ +#define _NFB_STATS_H_ + +#include +#include + +#include + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise. + */ +int +nfb_eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value otherwise. + */ +int +nfb_eth_stats_reset(struct rte_eth_dev *dev); + +#endif /* _NFB_STATS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_tx.c b/src/spdk/dpdk/drivers/net/nfb/nfb_tx.c new file mode 100644 index 000000000..9b912feb1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_tx.c @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#include "nfb_tx.h" +#include "nfb.h" + +int +nfb_eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id) +{ + struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id]; + int ret; + + if (txq->queue == NULL) { + RTE_LOG(ERR, PMD, "RX NDP queue is NULL!\n"); + return -EINVAL; + } + + ret = ndp_queue_start(txq->queue); + if (ret != 0) + goto err; + dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +err: + return -EINVAL; +} + +int +nfb_eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id) +{ + struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id]; + int ret; + + if (txq->queue == NULL) { + RTE_LOG(ERR, PMD, "TX NDP queue is NULL!\n"); + return -EINVAL; + } + + ret = ndp_queue_stop(txq->queue); + if (ret != 0) + return -EINVAL; + dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +int +nfb_eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + int ret; + struct ndp_tx_queue *txq; + + txq = rte_zmalloc_socket("ndp tx queue", + sizeof(struct ndp_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (txq == NULL) { + RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for tx queue id " + "%" PRIu16 "!\n", tx_queue_id); + return -ENOMEM; + } + + ret = nfb_eth_tx_queue_init(internals->nfb, + tx_queue_id, + txq); + + if (ret == 0) + dev->data->tx_queues[tx_queue_id] = txq; + else + rte_free(txq); + + return ret; +} + +int +nfb_eth_tx_queue_init(struct nfb_device *nfb, + uint16_t tx_queue_id, + struct ndp_tx_queue *txq) +{ + if (nfb == NULL) + return -EINVAL; + + txq->queue = ndp_open_tx_queue(nfb, tx_queue_id); + if (txq->queue == NULL) + return -EINVAL; + + txq->nfb = nfb; + txq->tx_queue_id = tx_queue_id; + + txq->tx_pkts = 0; + txq->tx_bytes = 0; + txq->err_pkts = 0; + + return 0; +} + +void +nfb_eth_tx_queue_release(void *q) +{ + struct ndp_tx_queue *txq = (struct ndp_tx_queue *)q; + if (txq->queue != NULL) { + ndp_close_tx_queue(txq->queue); + rte_free(txq); + txq->queue = NULL; + } +} diff --git a/src/spdk/dpdk/drivers/net/nfb/nfb_tx.h b/src/spdk/dpdk/drivers/net/nfb/nfb_tx.h new file mode 100644 index 000000000..b6578cc38 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/nfb_tx.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Cesnet + * Copyright(c) 2019 Netcope Technologies, a.s. + * All rights reserved. + */ + +#ifndef _NFB_TX_H_ +#define _NFB_TX_H_ + +#include +#include + +#include +#include +#include + +struct ndp_tx_queue { + struct nfb_device *nfb; /* nfb dev structure */ + struct ndp_queue *queue; /* tx queue */ + uint16_t tx_queue_id; /* index */ + volatile uint64_t tx_pkts; /* packets transmitted */ + volatile uint64_t tx_bytes; /* bytes transmitted */ + volatile uint64_t err_pkts; /* erroneous packets */ +}; + +/** + * DPDK callback to setup a TX queue for use. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused); + +/** + * Initialize ndp_tx_queue structure + * + * @param nfb + * Pointer to nfb device structure. + * @param tx_queue_id + * TX queue index. + * @param[out] txq + * Pointer to ndp_tx_queue output structure + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_tx_queue_init(struct nfb_device *nfb, + uint16_t tx_queue_id, + struct ndp_tx_queue *txq); + +/** + * DPDK callback to release a RX queue. + * + * @param dpdk_rxq + * Generic RX queue pointer. + */ +void +nfb_eth_tx_queue_release(void *q); + +/** + * Start traffic on Tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq_id + * TX queue index. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id); + +/** + * Stop traffic on Tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq_id + * TX queue index. + * + * @return + * 0 on success, a negative errno value otherwise. + */ +int +nfb_eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id); + +/** + * DPDK callback for TX. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param bufs + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= nb_pkts). + */ +static __rte_always_inline uint16_t +nfb_eth_ndp_tx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + int i; + struct rte_mbuf *mbuf; + struct ndp_tx_queue *ndp = queue; + uint16_t num_tx = 0; + uint64_t num_bytes = 0; + + void *dst; + uint32_t pkt_len; + uint8_t mbuf_segs; + + struct ndp_packet packets[nb_pkts]; + + if (unlikely(ndp->queue == NULL || nb_pkts == 0)) { + RTE_LOG(ERR, PMD, "TX invalid arguments!\n"); + return 0; + } + + for (i = 0; i < nb_pkts; i++) { + packets[i].data_length = bufs[i]->pkt_len; + packets[i].header_length = 0; + } + + num_tx = ndp_tx_burst_get(ndp->queue, packets, nb_pkts); + + if (unlikely(num_tx != nb_pkts)) + return 0; + + for (i = 0; i < nb_pkts; ++i) { + mbuf = bufs[i]; + + pkt_len = mbuf->pkt_len; + mbuf_segs = mbuf->nb_segs; + + num_bytes += pkt_len; + if (mbuf_segs == 1) { + /* + * non-scattered packet, + * transmit from one mbuf + */ + rte_memcpy(packets[i].data, + rte_pktmbuf_mtod(mbuf, const void *), + pkt_len); + } else { + /* scattered packet, transmit from more mbufs */ + struct rte_mbuf *m = mbuf; + while (m) { + dst = packets[i].data; + + rte_memcpy(dst, + rte_pktmbuf_mtod(m, + const void *), + m->data_len); + dst = ((uint8_t *)(dst)) + + m->data_len; + m = m->next; + } + } + + rte_pktmbuf_free(mbuf); + } + + ndp_tx_burst_flush(ndp->queue); + + ndp->tx_pkts += num_tx; + ndp->err_pkts += nb_pkts - num_tx; + ndp->tx_bytes += num_bytes; + return num_tx; +} + +#endif /* _NFB_TX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfb/rte_pmd_nfb_version.map b/src/spdk/dpdk/drivers/net/nfb/rte_pmd_nfb_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfb/rte_pmd_nfb_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/nfp/Makefile b/src/spdk/dpdk/drivers/net/nfp/Makefile new file mode 100644 index 000000000..289b3a60e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/Makefile @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_nfp.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lm +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_nfp_version.map + +VPATH += $(SRCDIR)/nfpcore + +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/nfp/meson.build b/src/spdk/dpdk/drivers/net/nfp/meson.build new file mode 100644 index 000000000..24a9a6da9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/meson.build @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on 64-bit linux' +endif +sources = files('nfpcore/nfp_cpp_pcie_ops.c', + 'nfpcore/nfp_nsp.c', + 'nfpcore/nfp_cppcore.c', + 'nfpcore/nfp_resource.c', + 'nfpcore/nfp_mip.c', + 'nfpcore/nfp_nffw.c', + 'nfpcore/nfp_rtsym.c', + 'nfpcore/nfp_nsp_cmds.c', + 'nfpcore/nfp_crc.c', + 'nfpcore/nfp_mutex.c', + 'nfpcore/nfp_nsp_eth.c', + 'nfpcore/nfp_hwinfo.c', + 'nfp_net.c') diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net.c b/src/spdk/dpdk/drivers/net/nfp/nfp_net.c new file mode 100644 index 000000000..2460ee199 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net.c @@ -0,0 +1,3787 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Netronome Systems, Inc. + * All rights reserved. + * + * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * @file dpdk/pmd/nfp_net.c + * + * Netronome vNIC DPDK Poll-Mode Driver: Main entry point + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_hwinfo.h" +#include "nfpcore/nfp_mip.h" +#include "nfpcore/nfp_rtsym.h" +#include "nfpcore/nfp_nsp.h" + +#include "nfp_net_pmd.h" +#include "nfp_net_logs.h" +#include "nfp_net_ctrl.h" + +#include +#include +#include +#include +#include +#include +#include + +/* Prototypes */ +static void nfp_net_close(struct rte_eth_dev *dev); +static int nfp_net_configure(struct rte_eth_dev *dev); +static void nfp_net_dev_interrupt_handler(void *param); +static void nfp_net_dev_interrupt_delayed_handler(void *param); +static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int nfp_net_init(struct rte_eth_dev *eth_dev); +static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static int nfp_net_promisc_enable(struct rte_eth_dev *dev); +static int nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); +static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, + uint16_t queue_idx); +static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +static void nfp_net_rx_queue_release(void *rxq); +static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq); +static void nfp_net_tx_queue_release(void *txq); +static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +static int nfp_net_start(struct rte_eth_dev *dev); +static int nfp_net_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int nfp_net_stats_reset(struct rte_eth_dev *dev); +static void nfp_net_stop(struct rte_eth_dev *dev); +static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +static int nfp_net_rss_config_default(struct rte_eth_dev *dev); +static int nfp_net_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int nfp_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); + +/* The offset of the queue controller queues in the PCIe Target */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) + +/* Maximum value which can be added to a queue with one transaction */ +#define NFP_QCP_MAX_ADD 0x7f + +#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \ + (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM) + +/* nfp_qcp_ptr - Read or Write Pointer of a queue */ +enum nfp_qcp_ptr { + NFP_QCP_READ_PTR = 0, + NFP_QCP_WRITE_PTR +}; + +/* + * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue + * @q: Base address for queue structure + * @ptr: Add to the Read or Write pointer + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void +nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) +{ + uint32_t off; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_ADD_RPTR; + else + off = NFP_QCP_QUEUE_ADD_WPTR; + + while (val > NFP_QCP_MAX_ADD) { + nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off); + val -= NFP_QCP_MAX_ADD; + } + + nn_writel(rte_cpu_to_le_32(val), q + off); +} + +/* + * nfp_qcp_read - Read the current Read/Write pointer value for a queue + * @q: Base address for queue structure + * @ptr: Read or Write pointer + */ +static inline uint32_t +nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) +{ + uint32_t off; + uint32_t val; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_STS_LO; + else + off = NFP_QCP_QUEUE_STS_HI; + + val = rte_cpu_to_le_32(nn_readl(q + off)); + + if (ptr == NFP_QCP_READ_PTR) + return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; + else + return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; +} + +/* + * Functions to read/write from/to Config BAR + * Performs any endian conversion necessary. + */ +static inline uint8_t +nn_cfg_readb(struct nfp_net_hw *hw, int off) +{ + return nn_readb(hw->ctrl_bar + off); +} + +static inline void +nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val) +{ + nn_writeb(val, hw->ctrl_bar + off); +} + +static inline uint32_t +nn_cfg_readl(struct nfp_net_hw *hw, int off) +{ + return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off)); +} + +static inline void +nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val) +{ + nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off); +} + +static inline uint64_t +nn_cfg_readq(struct nfp_net_hw *hw, int off) +{ + return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off)); +} + +static inline void +nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) +{ + nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); +} + +static void +nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) +{ + unsigned i; + + if (rxq->rxbufs == NULL) + return; + + for (i = 0; i < rxq->rx_count; i++) { + if (rxq->rxbufs[i].mbuf) { + rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf); + rxq->rxbufs[i].mbuf = NULL; + } + } +} + +static void +nfp_net_rx_queue_release(void *rx_queue) +{ + struct nfp_net_rxq *rxq = rx_queue; + + if (rxq) { + nfp_net_rx_queue_release_mbufs(rxq); + rte_free(rxq->rxbufs); + rte_free(rxq); + } +} + +static void +nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq) +{ + nfp_net_rx_queue_release_mbufs(rxq); + rxq->rd_p = 0; + rxq->nb_rx_hold = 0; +} + +static void +nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) +{ + unsigned i; + + if (txq->txbufs == NULL) + return; + + for (i = 0; i < txq->tx_count; i++) { + if (txq->txbufs[i].mbuf) { + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + txq->txbufs[i].mbuf = NULL; + } + } +} + +static void +nfp_net_tx_queue_release(void *tx_queue) +{ + struct nfp_net_txq *txq = tx_queue; + + if (txq) { + nfp_net_tx_queue_release_mbufs(txq); + rte_free(txq->txbufs); + rte_free(txq); + } +} + +static void +nfp_net_reset_tx_queue(struct nfp_net_txq *txq) +{ + nfp_net_tx_queue_release_mbufs(txq); + txq->wr_p = 0; + txq->rd_p = 0; +} + +static int +__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) +{ + int cnt; + uint32_t new; + struct timespec wait; + + PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...", + hw->qcp_cfg); + + if (hw->qcp_cfg == NULL) + rte_panic("Bad configuration queue pointer\n"); + + nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1); + + wait.tv_sec = 0; + wait.tv_nsec = 1000000; + + PMD_DRV_LOG(DEBUG, "Polling for update ack..."); + + /* Poll update field, waiting for NFP to ack the config */ + for (cnt = 0; ; cnt++) { + new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE); + if (new == 0) + break; + if (new & NFP_NET_CFG_UPDATE_ERR) { + PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new); + return -1; + } + if (cnt >= NFP_NET_POLL_TIMEOUT) { + PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after" + " %dms", update, cnt); + rte_panic("Exiting\n"); + } + nanosleep(&wait, 0); /* waiting for a 1ms */ + } + PMD_DRV_LOG(DEBUG, "Ack DONE"); + return 0; +} + +/* + * Reconfigure the NIC + * @nn: device to reconfigure + * @ctrl: The value for the ctrl field in the BAR config + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. Then poll + * until the firmware has acknowledged the update by zeroing the update word. + */ +static int +nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) +{ + uint32_t err; + + PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x", + ctrl, update); + + rte_spinlock_lock(&hw->reconfig_lock); + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); + nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); + + rte_wmb(); + + err = __nfp_net_reconfig(hw, update); + + rte_spinlock_unlock(&hw->reconfig_lock); + + if (!err) + return 0; + + /* + * Reconfig errors imply situations where they can be handled. + * Otherwise, rte_panic is called inside __nfp_net_reconfig + */ + PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x", + ctrl, update); + return -EIO; +} + +/* + * Configure an Ethernet device. This function must be invoked first + * before any other function in the Ethernet API. This function can + * also be re-invoked when a device is in the stopped state. + */ +static int +nfp_net_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * A DPDK app sends info about how many queues to use and how + * those queues need to be configured. This is used by the + * DPDK core and it makes sure no more queues than those + * advertised by the driver are requested. This function is + * called after that internal process + */ + + PMD_INIT_LOG(DEBUG, "Configure"); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* Checking TX mode */ + if (txmode->mq_mode) { + PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported"); + return -EINVAL; + } + + /* Checking RX mode */ + if (rxmode->mq_mode & ETH_MQ_RX_RSS && + !(hw->cap & NFP_NET_CFG_CTRL_RSS)) { + PMD_INIT_LOG(INFO, "RSS not supported"); + return -EINVAL; + } + + return 0; +} + +static void +nfp_net_enable_queues(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + uint64_t enabled_queues = 0; + int i; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enabling the required TX queues in the device */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + enabled_queues |= (1 << i); + + nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues); + + enabled_queues = 0; + + /* Enabling the required RX queues in the device */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + enabled_queues |= (1 << i); + + nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues); +} + +static void +nfp_net_disable_queues(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + uint32_t new_ctrl, update = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0); + + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE; + update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING | + NFP_NET_CFG_UPDATE_MSIX; + + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; + + /* If an error when reconfig we avoid to change hw state */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +static int +nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) +{ + int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0) + return -1; + } + return 0; +} + +static void +nfp_net_params_setup(struct nfp_net_hw *hw) +{ + nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu); + nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz); +} + +static void +nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) +{ + hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; +} + +#define ETH_ADDR_LEN 6 + +static void +nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + dst[i] = src[i]; +} + +static int +nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port) +{ + struct nfp_eth_table *nfp_eth_table; + + nfp_eth_table = nfp_eth_read_ports(hw->cpp); + /* + * hw points to port0 private data. We need hw now pointing to + * right port. + */ + hw += port; + nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, + (uint8_t *)&nfp_eth_table->ports[port].mac_addr); + + free(nfp_eth_table); + return 0; +} + +static void +nfp_net_vf_read_mac(struct nfp_net_hw *hw) +{ + uint32_t tmp; + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); + memcpy(&hw->mac_addr[0], &tmp, 4); + + tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); + memcpy(&hw->mac_addr[4], &tmp, 2); +} + +static void +nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) +{ + uint32_t mac0 = *(uint32_t *)mac; + uint16_t mac1; + + nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR); + + mac += 4; + mac1 = *(uint16_t *)mac; + nn_writew(rte_cpu_to_be_16(mac1), + hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); +} + +int +nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct nfp_net_hw *hw; + uint32_t update, ctrl; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) { + PMD_INIT_LOG(INFO, "MAC address unable to change when" + " port enabled"); + return -EBUSY; + } + + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + return -EBUSY; + + /* Writing new MAC to the specific port BAR address */ + nfp_net_write_mac(hw, (uint8_t *)mac_addr); + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; + ctrl = hw->ctrl; + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; + if (nfp_net_reconfig(hw, ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } + return 0; +} + +static int +nfp_configure_rx_interrupt(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct nfp_net_hw *hw; + int i; + + if (!intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (intr_handle->type == RTE_INTR_HANDLE_UIO) { + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO"); + /* UIO just supports one queue and no LSC*/ + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0); + intr_handle->intr_vec[0] = 0; + } else { + PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + /* + * The first msix vector is reserved for non + * efd interrupts + */ + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); + intr_handle->intr_vec[i] = i + 1; + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i, + intr_handle->intr_vec[i]); + } + } + + /* Avoiding TX interrupts */ + hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF; + return 0; +} + +static uint32_t +nfp_check_offloads(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + uint32_t ctrl = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + hw->mtu = rxmode->max_rx_pkt_len; + + if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + + /* L2 broadcast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2BC) + ctrl |= NFP_NET_CFG_CTRL_L2BC; + + /* L2 multicast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2MC) + ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* TX checksum offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) + ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + + /* LSO offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hw->cap & NFP_NET_CFG_CTRL_LSO) + ctrl |= NFP_NET_CFG_CTRL_LSO; + else + ctrl |= NFP_NET_CFG_CTRL_LSO2; + } + + /* RX gather */ + if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + ctrl |= NFP_NET_CFG_CTRL_GATHER; + + return ctrl; +} + +static int +nfp_net_start(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + uint32_t intr_vector; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, "Start"); + + /* Disabling queues just in case... */ + nfp_net_disable_queues(dev); + + /* Enabling the required queues in the device */ + nfp_net_enable_queues(dev); + + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + if (hw->pf_multiport_enabled) { + PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " + "with NFP multiport PF"); + return -EINVAL; + } + if (intr_handle->type == RTE_INTR_HANDLE_UIO) { + /* + * Better not to share LSC with RX interrupts. + * Unregistering LSC interrupt handler + */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); + + if (dev->data->nb_rx_queues > 1) { + PMD_INIT_LOG(ERR, "PMD rx interrupt only " + "supports 1 queue with UIO"); + return -EIO; + } + } + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + + nfp_configure_rx_interrupt(dev, intr_handle); + update = NFP_NET_CFG_UPDATE_MSIX; + } + + rte_intr_enable(intr_handle); + + new_ctrl = nfp_check_offloads(dev); + + /* Writing configuration parameters in the device */ + nfp_net_params_setup(hw); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + + if (rxmode->mq_mode & ETH_MQ_RX_RSS) { + nfp_net_rss_config_default(dev); + update |= NFP_NET_CFG_UPDATE_RSS; + new_ctrl |= NFP_NET_CFG_CTRL_RSS; + } + + /* Enable device */ + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; + + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + + /* + * Allocating rte mbufs for configured rx queues. + * This requires queues being enabled before + */ + if (nfp_net_rx_freelist_setup(dev) < 0) { + ret = -ENOMEM; + goto error; + } + + if (hw->is_pf) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port up */ + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); + else + nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 1); + } + + hw->ctrl = new_ctrl; + + return 0; + +error: + /* + * An error returned by this function should mean the app + * exiting and then the system releasing all the memory + * allocated even memory coming from hugepages. + * + * The device could be enabled at this point with some queues + * ready for getting packets. This is true if the call to + * nfp_net_rx_freelist_setup() succeeds for some queues but + * fails for subsequent queues. + * + * This should make the app exiting but better if we tell the + * device first. + */ + nfp_net_disable_queues(dev); + + return ret; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static void +nfp_net_stop(struct rte_eth_dev *dev) +{ + int i; + struct nfp_net_hw *hw; + + PMD_INIT_LOG(DEBUG, "Stop"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + nfp_net_reset_tx_queue( + (struct nfp_net_txq *)dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + nfp_net_reset_rx_queue( + (struct nfp_net_rxq *)dev->data->rx_queues[i]); + } + + if (hw->is_pf) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); + else + nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 0); + } +} + +/* Set the link up. */ +static int +nfp_net_set_link_up(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 1); +} + +/* Set the link down. */ +static int +nfp_net_set_link_down(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!hw->is_pf) + return -ENOTSUP; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ + return nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); + else + return nfp_eth_set_configured(dev->process_private, + hw->pf_port_idx, 0); +} + +/* Reset and stop device. The device can not be restarted. */ +static void +nfp_net_close(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; + int i; + + PMD_INIT_LOG(DEBUG, "Close"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + /* + * We assume that the DPDK application is stopping all the + * threads/queues before calling the device close function. + */ + + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + nfp_net_reset_tx_queue( + (struct nfp_net_txq *)dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + nfp_net_reset_rx_queue( + (struct nfp_net_rxq *)dev->data->rx_queues[i]); + } + + rte_intr_disable(&pci_dev->intr_handle); + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)dev); + + /* + * The ixgbe PMD driver disables the pcie master on the + * device. The i40e does not... + */ +} + +static int +nfp_net_promisc_enable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + int ret; + + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { + PMD_INIT_LOG(INFO, "Promiscuous mode not supported"); + return -ENOTSUP; + } + + if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); + return 0; + } + + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode on just after this call assuming + * it can not fail ... + */ + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; + + hw->ctrl = new_ctrl; + + return 0; +} + +static int +nfp_net_promisc_disable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); + return 0; + } + + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode off just before this call + * assuming it can not fail ... + */ + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (ret < 0) + return ret; + + hw->ctrl = new_ctrl; + + return 0; +} + +/* + * return 0 means link status changed, -1 means not changed + * + * Wait to complete is needed as it can take up to 9 seconds to get the Link + * status. + */ +static int +nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + struct nfp_net_hw *hw; + struct rte_eth_link link; + uint32_t nn_link_status; + int ret; + + static const uint32_t ls_to_ethtool[] = { + [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE, + [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G, + [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G, + [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G, + [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G, + [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G, + [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G, + }; + + PMD_DRV_LOG(DEBUG, "Link update"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS); + + memset(&link, 0, sizeof(struct rte_eth_link)); + + if (nn_link_status & NFP_NET_CFG_STS_LINK) + link.link_status = ETH_LINK_UP; + + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) & + NFP_NET_CFG_STS_LINK_RATE_MASK; + + if (nn_link_status >= RTE_DIM(ls_to_ethtool)) + link.link_speed = ETH_SPEED_NUM_NONE; + else + link.link_speed = ls_to_ethtool[nn_link_status]; + + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == 0) { + if (link.link_status) + PMD_DRV_LOG(INFO, "NIC Link is Up"); + else + PMD_DRV_LOG(INFO, "NIC Link is Down"); + } + return ret; +} + +static int +nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + int i; + struct nfp_net_hw *hw; + struct rte_eth_stats nfp_dev_stats; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + + memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); + + /* reading per RX ring stats */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nfp_dev_stats.q_ipackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); + + nfp_dev_stats.q_ipackets[i] -= + hw->eth_stats_base.q_ipackets[i]; + + nfp_dev_stats.q_ibytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); + + nfp_dev_stats.q_ibytes[i] -= + hw->eth_stats_base.q_ibytes[i]; + } + + /* reading per TX ring stats */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nfp_dev_stats.q_opackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + + nfp_dev_stats.q_opackets[i] -= + hw->eth_stats_base.q_opackets[i]; + + nfp_dev_stats.q_obytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + + nfp_dev_stats.q_obytes[i] -= + hw->eth_stats_base.q_obytes[i]; + } + + nfp_dev_stats.ipackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); + + nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets; + + nfp_dev_stats.ibytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); + + nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes; + + nfp_dev_stats.opackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); + + nfp_dev_stats.opackets -= hw->eth_stats_base.opackets; + + nfp_dev_stats.obytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); + + nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; + + /* reading general device stats */ + nfp_dev_stats.ierrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); + + nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors; + + nfp_dev_stats.oerrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); + + nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; + + /* RX ring mbuf allocation failures */ + nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; + + nfp_dev_stats.imissed = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + nfp_dev_stats.imissed -= hw->eth_stats_base.imissed; + + if (stats) { + memcpy(stats, &nfp_dev_stats, sizeof(*stats)); + return 0; + } + return -EINVAL; +} + +static int +nfp_net_stats_reset(struct rte_eth_dev *dev) +{ + int i; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * hw->eth_stats_base records the per counter starting point. + * Lets update it now + */ + + /* reading per RX ring stats */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + hw->eth_stats_base.q_ipackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); + + hw->eth_stats_base.q_ibytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); + } + + /* reading per TX ring stats */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + hw->eth_stats_base.q_opackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + + hw->eth_stats_base.q_obytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + } + + hw->eth_stats_base.ipackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); + + hw->eth_stats_base.ibytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); + + hw->eth_stats_base.opackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); + + hw->eth_stats_base.obytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); + + /* reading general device stats */ + hw->eth_stats_base.ierrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); + + hw->eth_stats_base.oerrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); + + /* RX ring mbuf allocation failures */ + dev->data->rx_mbuf_alloc_failed = 0; + + hw->eth_stats_base.imissed = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + return 0; +} + +static int +nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; + dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; + dev_info->max_rx_pktlen = hw->max_mtu; + /* Next should change when PF support is implemented */ + dev_info->max_mac_addrs = 1; + + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_RSS_HASH; + + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + + if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->cap & NFP_NET_CFG_CTRL_GATHER) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = DEFAULT_RX_PTHRESH, + .hthresh = DEFAULT_RX_HTHRESH, + .wthresh = DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = DEFAULT_TX_PTHRESH, + .hthresh = DEFAULT_TX_HTHRESH, + .wthresh = DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, + }; + + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_IPV6 | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_NONFRAG_IPV6_UDP; + + dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; + dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; + + return 0; +} + +static const uint32_t * +nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to nfp_net_set_hash() */ + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_MASK, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == nfp_net_recv_pkts) + return ptypes; + return NULL; +} + +static uint32_t +nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct nfp_net_rxq *rxq; + struct nfp_net_rx_desc *rxds; + uint32_t idx; + uint32_t count; + + rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]; + + idx = rxq->rd_p; + + count = 0; + + /* + * Other PMDs are just checking the DD bit in intervals of 4 + * descriptors and counting all four if the first has the DD + * bit on. Of course, this is not accurate but can be good for + * performance. But ideally that should be done in descriptors + * chunks belonging to the same cache line + */ + + while (count < rxq->rx_count) { + rxds = &rxq->rxds[idx]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + + count++; + idx++; + + /* Wrapping? */ + if ((idx) == rxq->rx_count) + idx = 0; + } + + return count; +} + +static int +nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw; + int base = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) + base = 1; + + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), + NFP_NET_CFG_ICR_UNMASKED); + return 0; +} + +static int +nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw; + int base = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO) + base = 1; + + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1); + return 0; +} + +static void +nfp_net_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); + else + PMD_DRV_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); + + PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); +} + +/* Interrupt configuration and handling */ + +/* + * nfp_net_irq_unmask - Unmask an interrupt + * + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +static void +nfp_net_irq_unmask(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_pci_device *pci_dev; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + /* If MSI-X auto-masking is used, clear the entry */ + rte_wmb(); + rte_intr_ack(&pci_dev->intr_handle); + } else { + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX), + NFP_NET_CFG_ICR_UNMASKED); + } +} + +static void +nfp_net_dev_interrupt_handler(void *param) +{ + int64_t timeout; + struct rte_eth_link link; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!"); + + rte_eth_linkstatus_get(dev, &link); + + nfp_net_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) { + /* handle it 1 sec later, wait it being stable */ + timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + } else { + /* handle it 4 sec later, wait it being stable */ + timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT; + } + + if (rte_eal_alarm_set(timeout * 1000, + nfp_net_dev_interrupt_delayed_handler, + (void *)dev) < 0) { + PMD_INIT_LOG(ERR, "Error setting alarm"); + /* Unmasking */ + nfp_net_irq_unmask(dev); + } +} + +/* + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the NIC + * interrupt state is not stable for nfp after link is just down, it needs + * to wait 4 seconds to get the stable status. + * + * @param handle Pointer to interrupt handle. + * @param param The address of parameter (struct rte_eth_dev *) + * + * @return void + */ +static void +nfp_net_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + nfp_net_link_update(dev, 0); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + + nfp_net_dev_link_status_print(dev); + + /* Unmasking */ + nfp_net_irq_unmask(dev); +} + +static int +nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* check that mtu is within the allowed range */ + if (mtu < RTE_ETHER_MIN_MTU || (uint32_t)mtu > hw->max_mtu) + return -EINVAL; + + /* mtu setting is forbidden if port is started */ + if (dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev->data->port_id); + return -EBUSY; + } + + /* switch to jumbo mode if needed */ + if ((uint32_t)mtu > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; + + /* writing to configuration space */ + nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu); + + hw->mtu = mtu; + + return 0; +} + +static int +nfp_net_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *tz; + struct nfp_net_rxq *rxq; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ + if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || + (nb_desc > NFP_NET_MAX_RX_DESC) || + (nb_desc < NFP_NET_MIN_RX_DESC)) { + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + return -EINVAL; + } + + /* + * Free memory prior to re-allocation if needed. This is the case after + * calling nfp_net_stop + */ + if (dev->data->rx_queues[queue_idx]) { + nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocating rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + return -ENOMEM; + + /* Hw queues mapping based on firmware configuration */ + rxq->qidx = queue_idx; + rxq->fl_qcidx = queue_idx * hw->stride_rx; + rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); + rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx); + rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx); + + /* + * Tracking mbuf size for detecting a potential mbuf overflow due to + * RX offset + */ + rxq->mem_pool = mp; + rxq->mbuf_size = rxq->mem_pool->elt_size; + rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM); + hw->flbufsz = rxq->mbuf_size; + + rxq->rx_count = nb_desc; + rxq->port_id = dev->data->port_id; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->drop_en = rx_conf->rx_drop_en; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + sizeof(struct nfp_net_rx_desc) * + NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN, + socket_id); + + if (tz == NULL) { + PMD_DRV_LOG(ERR, "Error allocating rx dma"); + nfp_net_rx_queue_release(rxq); + return -ENOMEM; + } + + /* Saving physical and virtual addresses for the RX ring */ + rxq->dma = (uint64_t)tz->iova; + rxq->rxds = (struct nfp_net_rx_desc *)tz->addr; + + /* mbuf pointers array for referencing mbufs linked to RX descriptors */ + rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs", + sizeof(*rxq->rxbufs) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->rxbufs == NULL) { + nfp_net_rx_queue_release(rxq); + return -ENOMEM; + } + + PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, + rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma); + + nfp_net_reset_rx_queue(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + rxq->hw = hw; + + /* + * Telling the HW about the physical address of the RX ring and number + * of descriptors in log2 format + */ + nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc)); + + return 0; +} + +static int +nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) +{ + struct nfp_net_rx_buff *rxe = rxq->rxbufs; + uint64_t dma_addr; + unsigned i; + + PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors", + rxq->rx_count); + + for (i = 0; i < rxq->rx_count; i++) { + struct nfp_net_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); + + if (mbuf == NULL) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned)rxq->qidx); + return -ENOMEM; + } + + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf)); + + rxd = &rxq->rxds[i]; + rxd->fld.dd = 0; + rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; + rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; + rxe[i].mbuf = mbuf; + PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr); + } + + /* Make sure all writes are flushed before telling the hardware */ + rte_wmb(); + + /* Not advertising the whole ring as the firmware gets confused if so */ + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u", + rxq->rx_count - 1); + + nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); + + return 0; +} + +static int +nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct nfp_net_txq *txq; + uint16_t tx_free_thresh; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ + if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || + (nb_desc > NFP_NET_MAX_TX_DESC) || + (nb_desc < NFP_NET_MIN_TX_DESC)) { + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + return -EINVAL; + } + + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : + DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh > (nb_desc)) { + PMD_DRV_LOG(ERR, + "tx_free_thresh must be less than the number of TX " + "descriptors. (tx_free_thresh=%u port=%d " + "queue=%d)", (unsigned int)tx_free_thresh, + dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* + * Free memory prior to re-allocation if needed. This is the case after + * calling nfp_net_stop + */ + if (dev->data->tx_queues[queue_idx]) { + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + queue_idx); + nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocating tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_DRV_LOG(ERR, "Error allocating tx dma"); + return -ENOMEM; + } + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + sizeof(struct nfp_net_tx_desc) * + NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, + socket_id); + if (tz == NULL) { + PMD_DRV_LOG(ERR, "Error allocating tx dma"); + nfp_net_tx_queue_release(txq); + return -ENOMEM; + } + + txq->tx_count = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_pthresh = tx_conf->tx_thresh.pthresh; + txq->tx_hthresh = tx_conf->tx_thresh.hthresh; + txq->tx_wthresh = tx_conf->tx_thresh.wthresh; + + /* queue mapping based on firmware configuration */ + txq->qidx = queue_idx; + txq->tx_qcidx = queue_idx * hw->stride_tx; + txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); + + txq->port_id = dev->data->port_id; + + /* Saving physical and virtual addresses for the TX ring */ + txq->dma = (uint64_t)tz->iova; + txq->txds = (struct nfp_net_tx_desc *)tz->addr; + + /* mbuf pointers array for referencing mbufs linked to TX descriptors */ + txq->txbufs = rte_zmalloc_socket("txq->txbufs", + sizeof(*txq->txbufs) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->txbufs == NULL) { + nfp_net_tx_queue_release(txq); + return -ENOMEM; + } + PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, + txq->txbufs, txq->txds, (unsigned long int)txq->dma); + + nfp_net_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + txq->hw = hw; + + /* + * Telling the HW about the physical address of the TX ring and number + * of descriptors in log2 format + */ + nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); + nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc)); + + return 0; +} + +/* nfp_net_tx_tso - Set TX descriptor for TSO */ +static inline void +nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) + goto clean_txd; + + ol_flags = mb->ol_flags; + + if (!(ol_flags & PKT_TX_TCP_SEG)) + goto clean_txd; + + txd->l3_offset = mb->l2_len; + txd->l4_offset = mb->l2_len + mb->l3_len; + txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + txd->mss = rte_cpu_to_le_16(mb->tso_segsz); + txd->flags = PCIE_DESC_TX_LSO; + return; + +clean_txd: + txd->flags = 0; + txd->l3_offset = 0; + txd->l4_offset = 0; + txd->lso_hdrlen = 0; + txd->mss = 0; +} + +/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ +static inline void +nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) + return; + + ol_flags = mb->ol_flags; + + /* IPv6 does not need checksum */ + if (ol_flags & PKT_TX_IP_CKSUM) + txd->flags |= PCIE_DESC_TX_IP4_CSUM; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + txd->flags |= PCIE_DESC_TX_UDP_CSUM; + break; + case PKT_TX_TCP_CKSUM: + txd->flags |= PCIE_DESC_TX_TCP_CSUM; + break; + } + + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + txd->flags |= PCIE_DESC_TX_CSUM; +} + +/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */ +static inline void +nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mb) +{ + struct nfp_net_hw *hw = rxq->hw; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM)) + return; + + /* If IPv4 and IP checksum error, fail */ + if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))) + mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + + /* If neither UDP nor TCP return */ + if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM)) + return; + + if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK)) + mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else + mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; +} + +#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4) +#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8) + +#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK) + +/* + * nfp_net_set_hash - Set mbuf hash data + * + * The RSS hash and hash-type are pre-pended to the packet data. + * Extract and decode it and set the mbuf fields. + */ +static inline void +nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mbuf) +{ + struct nfp_net_hw *hw = rxq->hw; + uint8_t *meta_offset; + uint32_t meta_info; + uint32_t hash = 0; + uint32_t hash_type = 0; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return; + + /* this is true for new firmwares */ + if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) || + (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) && + NFP_DESC_META_LEN(rxd))) { + /* + * new metadata api: + * <---- 32 bit -----> + * m field type word + * e data field #2 + * t data field #1 + * a data field #0 + * ==================== + * packet data + * + * Field type word contains up to 8 4bit field types + * A 4bit field type refers to a data field word + * A data field word can have several 4bit field types + */ + meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *); + meta_offset -= NFP_DESC_META_LEN(rxd); + meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset); + meta_offset += 4; + /* NFP PMD just supports metadata for hashing */ + switch (meta_info & NFP_NET_META_FIELD_MASK) { + case NFP_NET_META_HASH: + /* next field type is about the hash type */ + meta_info >>= NFP_NET_META_FIELD_SIZE; + /* hash value is in the data field */ + hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset); + hash_type = meta_info & NFP_NET_META_FIELD_MASK; + break; + default: + /* Unsupported metadata can be a performance issue */ + return; + } + } else { + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) + return; + + hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); + hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); + } + + mbuf->hash.rss = hash; + mbuf->ol_flags |= PKT_RX_RSS_HASH; + + switch (hash_type) { + case NFP_NET_RSS_IPV4: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4; + break; + case NFP_NET_RSS_IPV6: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6; + break; + case NFP_NET_RSS_IPV6_EX: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV4_TCP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV6_TCP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV4_UDP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + case NFP_NET_RSS_IPV6_UDP: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + default: + mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK; + } +} + +static inline void +nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) +{ + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; +} + +#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK) + +/* + * RX path design: + * + * There are some decisions to take: + * 1) How to check DD RX descriptors bit + * 2) How and when to allocate new mbufs + * + * Current implementation checks just one single DD bit each loop. As each + * descriptor is 8 bytes, it is likely a good idea to check descriptors in + * a single cache line instead. Tests with this change have not shown any + * performance improvement but it requires further investigation. For example, + * depending on which descriptor is next, the number of descriptors could be + * less than 8 for just checking those in the same cache line. This implies + * extra work which could be counterproductive by itself. Indeed, last firmware + * changes are just doing this: writing several descriptors with the DD bit + * for saving PCIe bandwidth and DMA operations from the NFP. + * + * Mbuf allocation is done when a new packet is received. Then the descriptor + * is automatically linked with the new mbuf and the old one is given to the + * user. The main drawback with this design is mbuf allocation is heavier than + * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the + * cache point of view it does not seem allocating the mbuf early on as we are + * doing now have any benefit at all. Again, tests with this change have not + * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing + * so looking at the implications of this type of allocation should be studied + * deeply + */ + +static uint16_t +nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct nfp_net_rxq *rxq; + struct nfp_net_rx_desc *rxds; + struct nfp_net_rx_buff *rxb; + struct nfp_net_hw *hw; + struct rte_mbuf *mb; + struct rte_mbuf *new_mb; + uint16_t nb_hold; + uint64_t dma_addr; + int avail; + + rxq = rx_queue; + if (unlikely(rxq == NULL)) { + /* + * DPDK just checks the queue is lower than max queues + * enabled. But the queue needs to be configured + */ + RTE_LOG_DP(ERR, PMD, "RX Bad queue\n"); + return -EINVAL; + } + + hw = rxq->hw; + avail = 0; + nb_hold = 0; + + while (avail < nb_pkts) { + rxb = &rxq->rxbufs[rxq->rd_p]; + if (unlikely(rxb == NULL)) { + RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n"); + break; + } + + rxds = &rxq->rxds[rxq->rd_p]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + + /* + * Memory barrier to ensure that we won't do other + * reads before the DD bit. + */ + rte_rmb(); + + /* + * We got a packet. Let's alloc a new mbuf for refilling the + * free descriptor ring as soon as possible + */ + new_mb = rte_pktmbuf_alloc(rxq->mem_pool); + if (unlikely(new_mb == NULL)) { + RTE_LOG_DP(DEBUG, PMD, + "RX mbuf alloc failed port_id=%u queue_id=%u\n", + rxq->port_id, (unsigned int)rxq->qidx); + nfp_net_mbuf_alloc_failed(rxq); + break; + } + + nb_hold++; + + /* + * Grab the mbuf and refill the descriptor with the + * previously allocated mbuf + */ + mb = rxb->mbuf; + rxb->mbuf = new_mb; + + PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u", + rxds->rxd.data_len, rxq->mbuf_size); + + /* Size of this segment */ + mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); + /* Size of the whole packet. We just support 1 segment */ + mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); + + if (unlikely((mb->data_len + hw->rx_offset) > + rxq->mbuf_size)) { + /* + * This should not happen and the user has the + * responsibility of avoiding it. But we have + * to give some info about the error + */ + RTE_LOG_DP(ERR, PMD, + "mbuf overflow likely due to the RX offset.\n" + "\t\tYour mbuf size should have extra space for" + " RX offset=%u bytes.\n" + "\t\tCurrently you just have %u bytes available" + " but the received packet is %u bytes long", + hw->rx_offset, + rxq->mbuf_size - hw->rx_offset, + mb->data_len); + return -EINVAL; + } + + /* Filling the received mbuf with packet info */ + if (hw->rx_offset) + mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; + else + mb->data_off = RTE_PKTMBUF_HEADROOM + + NFP_DESC_META_LEN(rxds); + + /* No scatter mode supported */ + mb->nb_segs = 1; + mb->next = NULL; + + mb->port = rxq->port_id; + + /* Checking the RSS flag */ + nfp_net_set_hash(rxq, rxds, mb); + + /* Checking the checksum flag */ + nfp_net_rx_cksum(rxq, rxds, mb); + + if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && + (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { + mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } + + /* Adding the mbuf to the mbuf array passed by the app */ + rx_pkts[avail++] = mb; + + /* Now resetting and updating the descriptor */ + rxds->vals[0] = 0; + rxds->vals[1] = 0; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); + rxds->fld.dd = 0; + rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + + rxq->rd_p++; + if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/ + rxq->rd_p = 0; + } + + if (nb_hold == 0) + return nb_hold; + + PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received", + rxq->port_id, (unsigned int)rxq->qidx, nb_hold); + + nb_hold += rxq->nb_rx_hold; + + /* + * FL descriptors needs to be written before incrementing the + * FL queue WR pointer + */ + rte_wmb(); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u", + rxq->port_id, (unsigned int)rxq->qidx, + (unsigned)nb_hold, (unsigned)avail); + nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return avail; +} + +/* + * nfp_net_tx_free_bufs - Check for descriptors with a complete + * status + * @txq: TX queue to work with + * Returns number of descriptors freed + */ +int +nfp_net_tx_free_bufs(struct nfp_net_txq *txq) +{ + uint32_t qcp_rd_p; + int todo; + + PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete" + " status", txq->qidx); + + /* Work out how many packets have been sent */ + qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); + + if (qcp_rd_p == txq->rd_p) { + PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending " + "packets (%u, %u)", txq->qidx, + qcp_rd_p, txq->rd_p); + return 0; + } + + if (qcp_rd_p > txq->rd_p) + todo = qcp_rd_p - txq->rd_p; + else + todo = qcp_rd_p + txq->tx_count - txq->rd_p; + + PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u", + qcp_rd_p, txq->rd_p, txq->rd_p); + + if (todo == 0) + return todo; + + txq->rd_p += todo; + if (unlikely(txq->rd_p >= txq->tx_count)) + txq->rd_p -= txq->tx_count; + + return todo; +} + +/* Leaving always free descriptors for avoiding wrapping confusion */ +static inline +uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq) +{ + if (txq->wr_p >= txq->rd_p) + return txq->tx_count - (txq->wr_p - txq->rd_p) - 8; + else + return txq->rd_p - txq->wr_p - 8; +} + +/* + * nfp_net_txq_full - Check if the TX queue free descriptors + * is below tx_free_threshold + * + * @txq: TX queue to check + * + * This function uses the host copy* of read/write pointers + */ +static inline +uint32_t nfp_net_txq_full(struct nfp_net_txq *txq) +{ + return (nfp_free_tx_desc(txq) < txq->tx_free_thresh); +} + +static uint16_t +nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct nfp_net_txq *txq; + struct nfp_net_hw *hw; + struct nfp_net_tx_desc *txds, txd; + struct rte_mbuf *pkt; + uint64_t dma_addr; + int pkt_size, dma_size; + uint16_t free_descs, issued_descs; + struct rte_mbuf **lmbuf; + int i; + + txq = tx_queue; + hw = txq->hw; + txds = &txq->txds[txq->wr_p]; + + PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets", + txq->qidx, txq->wr_p, nb_pkts); + + if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq))) + nfp_net_tx_free_bufs(txq); + + free_descs = (uint16_t)nfp_free_tx_desc(txq); + if (unlikely(free_descs == 0)) + return 0; + + pkt = *tx_pkts; + + i = 0; + issued_descs = 0; + PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", + txq->qidx, nb_pkts); + /* Sending packets */ + while ((i < nb_pkts) && free_descs) { + /* Grabbing the mbuf linked to the current descriptor */ + lmbuf = &txq->txbufs[txq->wr_p].mbuf; + /* Warming the cache for releasing the mbuf later on */ + RTE_MBUF_PREFETCH_TO_FREE(*lmbuf); + + pkt = *(tx_pkts + i); + + if (unlikely((pkt->nb_segs > 1) && + !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) { + PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set"); + rte_panic("Multisegment packet unsupported\n"); + } + + /* Checking if we have enough descriptors */ + if (unlikely(pkt->nb_segs > free_descs)) + goto xmit_end; + + /* + * Checksum and VLAN flags just in the first descriptor for a + * multisegment packet, but TSO info needs to be in all of them. + */ + txd.data_len = pkt->pkt_len; + nfp_net_tx_tso(txq, &txd, pkt); + nfp_net_tx_cksum(txq, &txd, pkt); + + if ((pkt->ol_flags & PKT_TX_VLAN_PKT) && + (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { + txd.flags |= PCIE_DESC_TX_VLAN; + txd.vlan = pkt->vlan_tci; + } + + /* + * mbuf data_len is the data in one segment and pkt_len data + * in the whole packet. When the packet is just one segment, + * then data_len = pkt_len + */ + pkt_size = pkt->pkt_len; + + while (pkt) { + /* Copying TSO, VLAN and cksum info */ + *txds = txd; + + /* Releasing mbuf used by this descriptor previously*/ + if (*lmbuf) + rte_pktmbuf_free_seg(*lmbuf); + + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + + dma_size = pkt->data_len; + dma_addr = rte_mbuf_data_iova(pkt); + PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" + "%" PRIx64 "", dma_addr); + + /* Filling descriptors fields */ + txds->dma_len = dma_size; + txds->data_len = txd.data_len; + txds->dma_addr_hi = (dma_addr >> 32) & 0xff; + txds->dma_addr_lo = (dma_addr & 0xffffffff); + ASSERT(free_descs > 0); + free_descs--; + + txq->wr_p++; + if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/ + txq->wr_p = 0; + + pkt_size -= dma_size; + + /* + * Making the EOP, packets with just one segment + * the priority + */ + if (likely(!pkt_size)) + txds->offset_eop = PCIE_DESC_TX_EOP; + else + txds->offset_eop = 0; + + pkt = pkt->next; + /* Referencing next free TX descriptor */ + txds = &txq->txds[txq->wr_p]; + lmbuf = &txq->txbufs[txq->wr_p].mbuf; + issued_descs++; + } + i++; + } + +xmit_end: + /* Increment write pointers. Force memory write before we let HW know */ + rte_wmb(); + nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs); + + return i; +} + +static int +nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + uint32_t new_ctrl, update; + struct nfp_net_hw *hw; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + new_ctrl = 0; + + if ((mask & ETH_VLAN_FILTER_OFFLOAD) || + (mask & ETH_VLAN_EXTEND_OFFLOAD)) + PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or" + " ETH_VLAN_EXTEND_OFFLOAD"); + + /* Enable vlan strip if it is not configured yet */ + if ((mask & ETH_VLAN_STRIP_OFFLOAD) && + !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN; + + /* Disable vlan strip just if it is configured */ + if (!(mask & ETH_VLAN_STRIP_OFFLOAD) && + (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN; + + if (new_ctrl == 0) + return 0; + + update = NFP_NET_CFG_UPDATE_GEN; + + ret = nfp_net_reconfig(hw, new_ctrl, update); + if (!ret) + hw->ctrl = new_ctrl; + + return ret; +} + +static int +nfp_net_rss_reta_write(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint32_t reta, mask; + int i, j; + int idx, shift; + struct nfp_net_hw *hw = + NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + return -EINVAL; + } + + /* + * Update Redirection Table. There are 128 8bit-entries which can be + * manage as 32 32bit-entries + */ + for (i = 0; i < reta_size; i += 4) { + /* Handling 4 RSS entries per loop */ + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); + + if (!mask) + continue; + + reta = 0; + /* If all 4 entries were set, don't need read RETA register */ + if (mask != 0xF) + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i); + + for (j = 0; j < 4; j++) { + if (!(mask & (0x1 << j))) + continue; + if (mask != 0xF) + /* Clearing the entry bits */ + reta &= ~(0xFF << (8 * j)); + reta |= reta_conf[idx].reta[shift + j] << (8 * j); + } + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, + reta); + } + return 0; +} + +/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ +static int +nfp_net_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct nfp_net_hw *hw = + NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t update; + int ret; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size); + if (ret != 0) + return ret; + + update = NFP_NET_CFG_UPDATE_RSS; + + if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + return -EIO; + + return 0; +} + + /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */ +static int +nfp_net_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + int idx, shift; + uint32_t reta; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + return -EINVAL; + } + + /* + * Reading Redirection Table. There are 128 8bit-entries which can be + * manage as 32 32bit-entries + */ + for (i = 0; i < reta_size; i += 4) { + /* Handling 4 RSS entries per loop */ + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); + + if (!mask) + continue; + + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + + shift); + for (j = 0; j < 4; j++) { + if (!(mask & (0x1 << j))) + continue; + reta_conf[idx].reta[shift + j] = + (uint8_t)((reta >> (8 * j)) & 0xF); + } + } + return 0; +} + +static int +nfp_net_rss_hash_write(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct nfp_net_hw *hw; + uint64_t rss_hf; + uint32_t cfg_rss_ctrl = 0; + uint8_t key; + int i; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Writing the key byte a byte */ + for (i = 0; i < rss_conf->rss_key_len; i++) { + memcpy(&key, &rss_conf->rss_key[i], 1); + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key); + } + + rss_hf = rss_conf->rss_hf; + + if (rss_hf & ETH_RSS_IPV4) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP; + + if (rss_hf & ETH_RSS_IPV6) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP; + + cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; + + /* configuring where to apply the RSS hash */ + nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); + + /* Writing the key size */ + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len); + + return 0; +} + +static int +nfp_net_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + uint32_t update; + uint64_t rss_hf; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rss_hf = rss_conf->rss_hf; + + /* Checking if RSS is enabled */ + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) { + if (rss_hf != 0) { /* Enable RSS? */ + PMD_DRV_LOG(ERR, "RSS unsupported"); + return -EINVAL; + } + return 0; /* Nothing to do */ + } + + if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { + PMD_DRV_LOG(ERR, "hash key too long"); + return -EINVAL; + } + + nfp_net_rss_hash_write(dev, rss_conf); + + update = NFP_NET_CFG_UPDATE_RSS; + + if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + return -EIO; + + return 0; +} + +static int +nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + uint64_t rss_hf; + uint32_t cfg_rss_ctrl; + uint8_t key; + int i; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + rss_hf = rss_conf->rss_hf; + cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP; + + /* Reading the key size */ + rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ); + + /* Reading the key byte a byte */ + for (i = 0; i < rss_conf->rss_key_len; i++) { + key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i); + memcpy(&rss_conf->rss_key[i], &key, 1); + } + + return 0; +} + +static int +nfp_net_rss_config_default(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf; + struct rte_eth_rss_conf rss_conf; + struct rte_eth_rss_reta_entry64 nfp_reta_conf[2]; + uint16_t rx_queues = dev->data->nb_rx_queues; + uint16_t queue; + int i, j, ret; + + PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues", + rx_queues); + + nfp_reta_conf[0].mask = ~0x0; + nfp_reta_conf[1].mask = ~0x0; + + queue = 0; + for (i = 0; i < 0x40; i += 8) { + for (j = i; j < (i + 8); j++) { + nfp_reta_conf[0].reta[j] = queue; + nfp_reta_conf[1].reta[j] = queue++; + queue %= rx_queues; + } + } + ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80); + if (ret != 0) + return ret; + + dev_conf = &dev->data->dev_conf; + if (!dev_conf) { + PMD_DRV_LOG(INFO, "wrong rss conf"); + return -EINVAL; + } + rss_conf = dev_conf->rx_adv_conf.rss_conf; + + ret = nfp_net_rss_hash_write(dev, &rss_conf); + + return ret; +} + + +/* Initialise and register driver with DPDK Application */ +static const struct eth_dev_ops nfp_net_eth_dev_ops = { + .dev_configure = nfp_net_configure, + .dev_start = nfp_net_start, + .dev_stop = nfp_net_stop, + .dev_set_link_up = nfp_net_set_link_up, + .dev_set_link_down = nfp_net_set_link_down, + .dev_close = nfp_net_close, + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, + .link_update = nfp_net_link_update, + .stats_get = nfp_net_stats_get, + .stats_reset = nfp_net_stats_reset, + .dev_infos_get = nfp_net_infos_get, + .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, + .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_set_mac_addr, + .vlan_offload_set = nfp_net_vlan_offload_set, + .reta_update = nfp_net_reta_update, + .reta_query = nfp_net_reta_query, + .rss_hash_update = nfp_net_rss_hash_update, + .rss_hash_conf_get = nfp_net_rss_hash_conf_get, + .rx_queue_setup = nfp_net_rx_queue_setup, + .rx_queue_release = nfp_net_rx_queue_release, + .rx_queue_count = nfp_net_rx_queue_count, + .tx_queue_setup = nfp_net_tx_queue_setup, + .tx_queue_release = nfp_net_tx_queue_release, + .rx_queue_intr_enable = nfp_rx_queue_intr_enable, + .rx_queue_intr_disable = nfp_rx_queue_intr_disable, +}; + +/* + * All eth_dev created got its private data, but before nfp_net_init, that + * private data is referencing private data for all the PF ports. This is due + * to how the vNIC bars are mapped based on first port, so all ports need info + * about port 0 private data. Inside nfp_net_init the private data pointer is + * changed to the right address for each port once the bars have been mapped. + * + * This functions helps to find out which port and therefore which offset + * inside the private data array to use. + */ +static int +get_pf_port_number(char *name) +{ + char *pf_str = name; + int size = 0; + + while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30)) + pf_str++; + + if (size == 30) + /* + * This should not happen at all and it would mean major + * implementation fault. + */ + rte_panic("nfp_net: problem with pf device name\n"); + + /* Expecting _portX with X within [0,7] */ + pf_str += 5; + + return (int)strtol(pf_str, NULL, 10); +} + +static int +nfp_net_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw, *hwport0; + + uint64_t tx_bar_off = 0, rx_bar_off = 0; + uint32_t start_q; + int stride = 4; + int port = 0; + int err; + + PMD_INIT_FUNC_TRACE(); + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* NFP can not handle DMA addresses requiring more than 40 bits */ + if (rte_mem_check_dma_mask(40)) { + RTE_LOG(ERR, PMD, "device %s can not be used:", + pci_dev->device.name); + RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n"); + return -ENODEV; + }; + + if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || + (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { + port = get_pf_port_number(eth_dev->data->name); + if (port < 0 || port > 7) { + PMD_DRV_LOG(ERR, "Port value is wrong"); + return -ENODEV; + } + + PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port); + + /* This points to port 0 private data */ + hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + /* This points to the specific port private data */ + hw = &hwport0[port]; + } else { + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + hwport0 = 0; + } + + eth_dev->dev_ops = &nfp_net_eth_dev_ops; + eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; + eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + + PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", + pci_dev->id.vendor_id, pci_dev->id.device_id, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; + if (hw->ctrl_bar == NULL) { + PMD_DRV_LOG(ERR, + "hw->ctrl_bar is NULL. BAR0 not configured"); + return -ENODEV; + } + + if (hw->is_pf && port == 0) { + hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0", + hw->total_ports * 32768, + &hw->ctrl_area); + if (!hw->ctrl_bar) { + printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar"); + return -EIO; + } + + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); + } + + if (port > 0) { + if (!hwport0->ctrl_bar) + return -ENODEV; + + /* address based on port0 offset */ + hw->ctrl_bar = hwport0->ctrl_bar + + (port * NFP_PF_CSR_SLICE_SIZE); + } + + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); + + hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); + hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); + + /* Work out where in the BAR the queues start. */ + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_NFP4000_PF_NIC: + case PCI_DEVICE_ID_NFP6000_PF_NIC: + case PCI_DEVICE_ID_NFP6000_VF_NIC: + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); + tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); + rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; + break; + default: + PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); + err = -ENODEV; + goto dev_err_ctrl_map; + } + + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); + + if (hw->is_pf && port == 0) { + /* configure access to tx/rx vNIC BARs */ + hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0, + NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, + &hw->hwqueues_area); + + if (!hwport0->hw_queues) { + printf("nfp_rtsym_map fails for net.qc"); + err = -EIO; + goto dev_err_ctrl_map; + } + + PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", + hwport0->hw_queues); + } + + if (hw->is_pf) { + hw->tx_bar = hwport0->hw_queues + tx_bar_off; + hw->rx_bar = hwport0->hw_queues + rx_bar_off; + eth_dev->data->dev_private = hw; + } else { + hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + + tx_bar_off; + hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + + rx_bar_off; + } + + PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", + hw->ctrl_bar, hw->tx_bar, hw->rx_bar); + + nfp_net_cfg_queue_setup(hw); + + /* Get some of the read-only fields from the config BAR */ + hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); + hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); + hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); + hw->mtu = RTE_ETHER_MTU; + + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; + + if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) + hw->rx_offset = NFP_NET_RX_OFFSET; + else + hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); + + PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", + NFD_CFG_MAJOR_VERSION_of(hw->ver), + NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); + + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, + hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", + hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", + hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", + hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", + hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", + hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", + hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", + hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", + hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", + hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", + hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); + + hw->ctrl = 0; + + hw->stride_rx = stride; + hw->stride_tx = stride; + + PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", + hw->max_rx_queues, hw->max_tx_queues); + + /* Initializing spinlock for reconfigs */ + rte_spinlock_init(&hw->reconfig_lock); + + /* Allocating memory for mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to space for MAC address"); + err = -ENOMEM; + goto dev_err_queues_map; + } + + if (hw->is_pf) { + nfp_net_pf_read_mac(hwport0, port); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } else { + nfp_net_vf_read_mac(hw); + } + + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)&hw->mac_addr)) { + PMD_INIT_LOG(INFO, "Using random mac address for port %d", + port); + /* Using random mac addresses for VFs */ + rte_eth_random_addr(&hw->mac_addr[0]); + nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); + } + + /* Copying mac address to DPDK eth_dev struct */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, + ð_dev->data->mac_addrs[0]); + + if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; + + PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " + "mac=%02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, + hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], + hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Registering LSC interrupt handler */ + rte_intr_callback_register(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + } + + return 0; + +dev_err_queues_map: + nfp_cpp_area_free(hw->hwqueues_area); +dev_err_ctrl_map: + nfp_cpp_area_free(hw->ctrl_area); + + return err; +} + +#define NFP_CPP_MEMIO_BOUNDARY (1 << 20) + +/* + * Serving a write request to NFP from host programs. The request + * sends the write size and the CPP target. The bridge makes use + * of CPP interface handler configured by the PMD setup. + */ +static int +nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp) +{ + struct nfp_cpp_area *area; + off_t offset, nfp_offset; + uint32_t cpp_id, pos, len; + uint32_t tmpbuf[16]; + size_t count, curlen, totlen = 0; + int err = 0; + + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, + sizeof(off_t), sizeof(size_t)); + + /* Reading the count param */ + err = recv(sockfd, &count, sizeof(off_t), 0); + if (err != sizeof(off_t)) + return -EINVAL; + + curlen = count; + + /* Reading the offset param */ + err = recv(sockfd, &offset, sizeof(off_t), 0); + if (err != sizeof(off_t)) + return -EINVAL; + + /* Obtain target's CPP ID and offset in target */ + cpp_id = (offset >> 40) << 8; + nfp_offset = offset & ((1ull << 40) - 1); + + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, + offset); + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, + cpp_id, nfp_offset); + + /* Adjust length if not aligned */ + if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) != + (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { + curlen = NFP_CPP_MEMIO_BOUNDARY - + (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); + } + + while (count > 0) { + /* configure a CPP PCIe2CPP BAR for mapping the CPP target */ + area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", + nfp_offset, curlen); + if (!area) { + RTE_LOG(ERR, PMD, "%s: area alloc fail\n", __func__); + return -EIO; + } + + /* mapping the target */ + err = nfp_cpp_area_acquire(area); + if (err < 0) { + RTE_LOG(ERR, PMD, "area acquire failed\n"); + nfp_cpp_area_free(area); + return -EIO; + } + + for (pos = 0; pos < curlen; pos += len) { + len = curlen - pos; + if (len > sizeof(tmpbuf)) + len = sizeof(tmpbuf); + + PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__, + len, count); + err = recv(sockfd, tmpbuf, len, MSG_WAITALL); + if (err != (int)len) { + RTE_LOG(ERR, PMD, + "%s: error when receiving, %d of %zu\n", + __func__, err, count); + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + return -EIO; + } + err = nfp_cpp_area_write(area, pos, tmpbuf, len); + if (err < 0) { + RTE_LOG(ERR, PMD, "nfp_cpp_area_write error\n"); + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + return -EIO; + } + } + + nfp_offset += pos; + totlen += pos; + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + + count -= pos; + curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ? + NFP_CPP_MEMIO_BOUNDARY : count; + } + + return 0; +} + +/* + * Serving a read request to NFP from host programs. The request + * sends the read size and the CPP target. The bridge makes use + * of CPP interface handler configured by the PMD setup. The read + * data is sent to the requester using the same socket. + */ +static int +nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp) +{ + struct nfp_cpp_area *area; + off_t offset, nfp_offset; + uint32_t cpp_id, pos, len; + uint32_t tmpbuf[16]; + size_t count, curlen, totlen = 0; + int err = 0; + + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__, + sizeof(off_t), sizeof(size_t)); + + /* Reading the count param */ + err = recv(sockfd, &count, sizeof(off_t), 0); + if (err != sizeof(off_t)) + return -EINVAL; + + curlen = count; + + /* Reading the offset param */ + err = recv(sockfd, &offset, sizeof(off_t), 0); + if (err != sizeof(off_t)) + return -EINVAL; + + /* Obtain target's CPP ID and offset in target */ + cpp_id = (offset >> 40) << 8; + nfp_offset = offset & ((1ull << 40) - 1); + + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count, + offset); + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__, + cpp_id, nfp_offset); + + /* Adjust length if not aligned */ + if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) != + (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) { + curlen = NFP_CPP_MEMIO_BOUNDARY - + (nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1)); + } + + while (count > 0) { + area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", + nfp_offset, curlen); + if (!area) { + RTE_LOG(ERR, PMD, "%s: area alloc failed\n", __func__); + return -EIO; + } + + err = nfp_cpp_area_acquire(area); + if (err < 0) { + RTE_LOG(ERR, PMD, "area acquire failed\n"); + nfp_cpp_area_free(area); + return -EIO; + } + + for (pos = 0; pos < curlen; pos += len) { + len = curlen - pos; + if (len > sizeof(tmpbuf)) + len = sizeof(tmpbuf); + + err = nfp_cpp_area_read(area, pos, tmpbuf, len); + if (err < 0) { + RTE_LOG(ERR, PMD, "nfp_cpp_area_read error\n"); + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + return -EIO; + } + PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__, + len, count); + + err = send(sockfd, tmpbuf, len, 0); + if (err != (int)len) { + RTE_LOG(ERR, PMD, + "%s: error when sending: %d of %zu\n", + __func__, err, count); + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + return -EIO; + } + } + + nfp_offset += pos; + totlen += pos; + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); + + count -= pos; + curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ? + NFP_CPP_MEMIO_BOUNDARY : count; + } + return 0; +} + +#define NFP_IOCTL 'n' +#define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t) +/* + * Serving a ioctl command from host NFP tools. This usually goes to + * a kernel driver char driver but it is not available when the PF is + * bound to the PMD. Currently just one ioctl command is served and it + * does not require any CPP access at all. + */ +static int +nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp) +{ + uint32_t cmd, ident_size, tmp; + int err; + + /* Reading now the IOCTL command */ + err = recv(sockfd, &cmd, 4, 0); + if (err != 4) { + RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__); + return -EIO; + } + + /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */ + if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) { + RTE_LOG(ERR, PMD, "%s: unknown cmd %d\n", __func__, cmd); + return -EINVAL; + } + + err = recv(sockfd, &ident_size, 4, 0); + if (err != 4) { + RTE_LOG(ERR, PMD, "%s: read error from socket\n", __func__); + return -EIO; + } + + tmp = nfp_cpp_model(cpp); + + PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x\n", __func__, tmp); + + err = send(sockfd, &tmp, 4, 0); + if (err != 4) { + RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__); + return -EIO; + } + + tmp = cpp->interface; + + PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x\n", __func__, tmp); + + err = send(sockfd, &tmp, 4, 0); + if (err != 4) { + RTE_LOG(ERR, PMD, "%s: error writing to socket\n", __func__); + return -EIO; + } + + return 0; +} + +#define NFP_BRIDGE_OP_READ 20 +#define NFP_BRIDGE_OP_WRITE 30 +#define NFP_BRIDGE_OP_IOCTL 40 + +/* + * This is the code to be executed by a service core. The CPP bridge interface + * is based on a unix socket and requests usually received by a kernel char + * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools + * can be executed with a wrapper library and LD_LIBRARY being completely + * unaware of the CPP bridge performing the NFP kernel char driver for CPP + * accesses. + */ +static int32_t +nfp_cpp_bridge_service_func(void *args) +{ + struct sockaddr address; + struct nfp_cpp *cpp = args; + int sockfd, datafd, op, ret; + + unlink("/tmp/nfp_cpp"); + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sockfd < 0) { + RTE_LOG(ERR, PMD, "%s: socket creation error. Service failed\n", + __func__); + return -EIO; + } + + memset(&address, 0, sizeof(struct sockaddr)); + + address.sa_family = AF_UNIX; + strcpy(address.sa_data, "/tmp/nfp_cpp"); + + ret = bind(sockfd, (const struct sockaddr *)&address, + sizeof(struct sockaddr)); + if (ret < 0) { + RTE_LOG(ERR, PMD, "%s: bind error (%d). Service failed\n", + __func__, errno); + close(sockfd); + return ret; + } + + ret = listen(sockfd, 20); + if (ret < 0) { + RTE_LOG(ERR, PMD, "%s: listen error(%d). Service failed\n", + __func__, errno); + close(sockfd); + return ret; + } + + for (;;) { + datafd = accept(sockfd, NULL, NULL); + if (datafd < 0) { + RTE_LOG(ERR, PMD, "%s: accept call error (%d)\n", + __func__, errno); + RTE_LOG(ERR, PMD, "%s: service failed\n", __func__); + close(sockfd); + return -EIO; + } + + while (1) { + ret = recv(datafd, &op, 4, 0); + if (ret <= 0) { + PMD_CPP_LOG(DEBUG, "%s: socket close\n", + __func__); + break; + } + + PMD_CPP_LOG(DEBUG, "%s: getting op %u\n", __func__, op); + + if (op == NFP_BRIDGE_OP_READ) + nfp_cpp_bridge_serve_read(datafd, cpp); + + if (op == NFP_BRIDGE_OP_WRITE) + nfp_cpp_bridge_serve_write(datafd, cpp); + + if (op == NFP_BRIDGE_OP_IOCTL) + nfp_cpp_bridge_serve_ioctl(datafd, cpp); + + if (op == 0) + break; + } + close(datafd); + } + close(sockfd); + + return 0; +} + +static int +nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, + struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, + int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv) +{ + struct rte_eth_dev *eth_dev; + struct nfp_net_hw *hw = NULL; + char *port_name; + struct rte_service_spec service; + int retval; + + port_name = rte_zmalloc("nfp_pf_port_name", 100, 0); + if (!port_name) + return -ENOMEM; + + if (ports > 1) + snprintf(port_name, 100, "%s_port%d", dev->device.name, port); + else + strlcat(port_name, dev->device.name, 100); + + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(port_name); + if (!eth_dev) { + rte_free(port_name); + return -ENODEV; + } + if (port == 0) { + *priv = rte_zmalloc(port_name, + sizeof(struct nfp_net_adapter) * + ports, RTE_CACHE_LINE_SIZE); + if (!*priv) { + rte_free(port_name); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } + } + eth_dev->data->dev_private = *priv; + + /* + * dev_private pointing to port0 dev_private because we need + * to configure vNIC bars based on port0 at nfp_net_init. + * Then dev_private is adjusted per port. + */ + hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port; + hw->cpp = cpp; + hw->hwinfo = hwinfo; + hw->sym_tbl = sym_tbl; + hw->pf_port_idx = phys_port; + hw->is_pf = 1; + if (ports > 1) + hw->pf_multiport_enabled = 1; + + hw->total_ports = ports; + } else { + eth_dev = rte_eth_dev_attach_secondary(port_name); + if (!eth_dev) { + RTE_LOG(ERR, EAL, "secondary process attach failed, " + "ethdev doesn't exist"); + rte_free(port_name); + return -ENODEV; + } + eth_dev->process_private = cpp; + } + + eth_dev->device = &dev->device; + rte_eth_copy_pci_info(eth_dev, dev); + + retval = nfp_net_init(eth_dev); + + if (retval) { + retval = -ENODEV; + goto probe_failed; + } else { + rte_eth_dev_probing_finish(eth_dev); + } + + rte_free(port_name); + + if (port == 0) { + /* + * The rte_service needs to be created just once per PMD. + * And the cpp handler needs to be linked to the service. + * Secondary processes will be used for debugging DPDK apps + * when requiring to use the CPP interface for accessing NFP + * components. And the cpp handler for secondary processes is + * available at this point. + */ + memset(&service, 0, sizeof(struct rte_service_spec)); + snprintf(service.name, sizeof(service.name), "nfp_cpp_service"); + service.callback = nfp_cpp_bridge_service_func; + service.callback_userdata = (void *)cpp; + + hw = (struct nfp_net_hw *)(eth_dev->data->dev_private); + + if (rte_service_component_register(&service, + &hw->nfp_cpp_service_id)) + RTE_LOG(ERR, PMD, "NFP CPP bridge service register() failed"); + else + RTE_LOG(DEBUG, PMD, "NFP CPP bridge service registered"); + } + + return retval; + +probe_failed: + rte_free(port_name); + /* free ports private data if primary process */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_free(eth_dev->data->dev_private); + eth_dev->data->dev_private = NULL; + } + rte_eth_dev_release_port(eth_dev); + + return retval; +} + +#define DEFAULT_FW_PATH "/lib/firmware/netronome" + +static int +nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +{ + struct nfp_cpp *cpp = nsp->cpp; + int fw_f; + char *fw_buf; + char fw_name[125]; + char serial[40]; + struct stat file_stat; + off_t fsize, bytes; + + /* Looking for firmware file in order of priority */ + + /* First try to find a firmware image specific for this device */ + snprintf(serial, sizeof(serial), + "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], + cpp->serial[4], cpp->serial[5], cpp->interface >> 8, + cpp->interface & 0xff); + + snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, + serial); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f >= 0) + goto read_fw; + + /* Then try the PCI name */ + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, + dev->device.name); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f >= 0) + goto read_fw; + + /* Finally try the card type and media */ + snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); + return -ENOENT; + } + +read_fw: + if (fstat(fw_f, &file_stat) < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name); + close(fw_f); + return -ENOENT; + } + + fsize = file_stat.st_size; + PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "", + fw_name, (uint64_t)fsize); + + fw_buf = malloc((size_t)fsize); + if (!fw_buf) { + PMD_DRV_LOG(INFO, "malloc failed for fw buffer"); + close(fw_f); + return -ENOMEM; + } + memset(fw_buf, 0, fsize); + + bytes = read(fw_f, fw_buf, fsize); + if (bytes != fsize) { + PMD_DRV_LOG(INFO, "Reading fw to buffer failed." + "Just %" PRIu64 " of %" PRIu64 " bytes read", + (uint64_t)bytes, (uint64_t)fsize); + free(fw_buf); + close(fw_f); + return -EIO; + } + + PMD_DRV_LOG(INFO, "Uploading the firmware ..."); + nfp_nsp_load_fw(nsp, fw_buf, bytes); + PMD_DRV_LOG(INFO, "Done"); + + free(fw_buf); + close(fw_f); + + return 0; +} + +static int +nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, + struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo) +{ + struct nfp_nsp *nsp; + const char *nfp_fw_model; + char card_desc[100]; + int err = 0; + + nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); + + if (nfp_fw_model) { + PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); + } else { + PMD_DRV_LOG(ERR, "firmware model NOT found"); + return -EIO; + } + + if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { + PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", + nfp_eth_table->count); + return -EIO; + } + + PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", + nfp_eth_table->count); + + PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); + + snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw", + nfp_fw_model, nfp_eth_table->count, + nfp_eth_table->ports[0].speed / 1000); + + nsp = nfp_nsp_open(cpp); + if (!nsp) { + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + return -EIO; + } + + nfp_nsp_device_soft_reset(nsp); + err = nfp_fw_upload(dev, nsp, card_desc); + + nfp_nsp_close(nsp); + return err; +} + +static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *dev) +{ + struct nfp_cpp *cpp; + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + struct nfp_eth_table *nfp_eth_table = NULL; + int total_ports; + void *priv = 0; + int ret = -ENODEV; + int err; + int i; + + if (!dev) + return ret; + + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (dev->kdrv == RTE_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(dev, 0); + else + cpp = nfp_cpp_from_device_name(dev, 1); + + if (!cpp) { + PMD_DRV_LOG(ERR, "A CPP handle can not be obtained"); + ret = -EIO; + goto error; + } + + hwinfo = nfp_hwinfo_read(cpp); + if (!hwinfo) { + PMD_DRV_LOG(ERR, "Error reading hwinfo table"); + return -EIO; + } + + nfp_eth_table = nfp_eth_read_ports(cpp); + if (!nfp_eth_table) { + PMD_DRV_LOG(ERR, "Error reading NFP ethernet table"); + return -EIO; + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) { + PMD_DRV_LOG(INFO, "Error when uploading firmware"); + ret = -EIO; + goto error; + } + } + + /* Now the symbol table should be there */ + sym_tbl = nfp_rtsym_table_read(cpp); + if (!sym_tbl) { + PMD_DRV_LOG(ERR, "Something is wrong with the firmware" + " symbol table"); + ret = -EIO; + goto error; + } + + total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); + if (total_ports != (int)nfp_eth_table->count) { + PMD_DRV_LOG(ERR, "Inconsistent number of ports"); + ret = -EIO; + goto error; + } + PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports); + + if (total_ports <= 0 || total_ports > 8) { + PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); + ret = -ENODEV; + goto error; + } + + for (i = 0; i < total_ports; i++) { + ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo, + nfp_eth_table->ports[i].index, + sym_tbl, &priv); + if (ret) + break; + } + +error: + free(nfp_eth_table); + return ret; +} + +int nfp_logtype_init; +int nfp_logtype_driver; + +static const struct rte_pci_id pci_id_nfp_pf_net_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP4000_PF_NIC) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_PF_NIC) + }, + { + .vendor_id = 0, + }, +}; + +static const struct rte_pci_id pci_id_nfp_vf_net_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_VF_NIC) + }, + { + .vendor_id = 0, + }, +}; + +static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct nfp_net_adapter), nfp_net_init); +} + +static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *eth_dev; + struct nfp_net_hw *hw, *hwport0; + int port = 0; + + eth_dev = rte_eth_dev_allocated(pci_dev->device.name); + if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || + (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { + port = get_pf_port_number(eth_dev->data->name); + /* + * hotplug is not possible with multiport PF although freeing + * data structures can be done for first port. + */ + if (port != 0) + return -ENOTSUP; + hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + hw = &hwport0[port]; + nfp_cpp_area_free(hw->ctrl_area); + nfp_cpp_area_free(hw->hwqueues_area); + free(hw->hwinfo); + free(hw->sym_tbl); + nfp_cpp_free(hw->cpp); + } else { + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + } + /* hotplug is not possible with multiport PF */ + if (hw->pf_multiport_enabled) + return -ENOTSUP; + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_nfp_net_pf_pmd = { + .id_table = pci_id_nfp_pf_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = nfp_pf_pci_probe, + .remove = eth_nfp_pci_remove, +}; + +static struct rte_pci_driver rte_nfp_net_vf_pmd = { + .id_table = pci_id_nfp_vf_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_nfp_pci_probe, + .remove = eth_nfp_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd); +RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map); +RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); + +RTE_INIT(nfp_init_log) +{ + nfp_logtype_init = rte_log_register("pmd.net.nfp.init"); + if (nfp_logtype_init >= 0) + rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE); + nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver"); + if (nfp_logtype_driver >= 0) + rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE); +} +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h new file mode 100644 index 000000000..4f26ccf48 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_ctrl.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * Netronome network device driver: Control BAR layout + */ +#ifndef _NFP_NET_CTRL_H_ +#define _NFP_NET_CTRL_H_ + +/* + * Configuration BAR size. + * + * The configuration BAR is 8K in size, but on the NFP6000, due to + * THB-350, 32k needs to be reserved. + */ +#ifdef __NFP_IS_6000 +#define NFP_NET_CFG_BAR_SZ (32 * 1024) +#else +#define NFP_NET_CFG_BAR_SZ (8 * 1024) +#endif + +/* Offset in Freelist buffer where packet starts on RX */ +#define NFP_NET_RX_OFFSET 32 + +/* working with metadata api (NFD version > 3.0) */ +#define NFP_NET_META_FIELD_SIZE 4 +#define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1) + +/* Prepend field types */ +#define NFP_NET_META_HASH 1 /* next field carries hash type */ + +/* Hash type pre-pended when a RSS hash was computed */ +#define NFP_NET_RSS_NONE 0 +#define NFP_NET_RSS_IPV4 1 +#define NFP_NET_RSS_IPV6 2 +#define NFP_NET_RSS_IPV6_EX 3 +#define NFP_NET_RSS_IPV4_TCP 4 +#define NFP_NET_RSS_IPV6_TCP 5 +#define NFP_NET_RSS_IPV6_EX_TCP 6 +#define NFP_NET_RSS_IPV4_UDP 7 +#define NFP_NET_RSS_IPV6_UDP 8 +#define NFP_NET_RSS_IPV6_EX_UDP 9 + +/* + * @NFP_NET_TXR_MAX: Maximum number of TX rings + * @NFP_NET_TXR_MASK: Mask for TX rings + * @NFP_NET_RXR_MAX: Maximum number of RX rings + * @NFP_NET_RXR_MASK: Mask for RX rings + */ +#define NFP_NET_TXR_MAX 64 +#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) +#define NFP_NET_RXR_MAX 64 +#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) + +/* + * Read/Write config words (0x0000 - 0x002c) + * @NFP_NET_CFG_CTRL: Global control + * @NFP_NET_CFG_UPDATE: Indicate which fields are updated + * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings + * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings + * @NFP_NET_CFG_MTU: Set MTU size + * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) + * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * @NFP_NET_CFG_MACADDR: MAC address + * + * TODO: + * - define Error details in UPDATE + */ +#define NFP_NET_CFG_CTRL 0x0000 +#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ +#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ +#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ +#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ +#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ +#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ +#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ +#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ +#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ +#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ +#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */ +#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ +#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */ +#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ +#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ +#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ +#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ +#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */ +#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */ +#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */ +#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */ +#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */ +#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31)/* live MAC addr change */ +#define NFP_NET_CFG_UPDATE 0x0004 +#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ +#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ +#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ +#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ +#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ +#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ +#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ +#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ +#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ +#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ +#define NFP_NET_CFG_UPDATE_ERR (0x1U << 31) /* A error occurred */ +#define NFP_NET_CFG_TXRS_ENABLE 0x0008 +#define NFP_NET_CFG_RXRS_ENABLE 0x0010 +#define NFP_NET_CFG_MTU 0x0018 +#define NFP_NET_CFG_FLBUFSZ 0x001c +#define NFP_NET_CFG_EXN 0x001f +#define NFP_NET_CFG_LSC 0x0020 +#define NFP_NET_CFG_MACADDR 0x0024 + +#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2) + +/* + * Read-only words (0x0030 - 0x0050): + * @NFP_NET_CFG_VERSION: Firmware version number + * @NFP_NET_CFG_STS: Status + * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) + * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) + * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) + * + * TODO: + * - define more STS bits + */ +#define NFP_NET_CFG_VERSION 0x0030 +#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24) +#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16) +#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) +#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0 +#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8) +#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) +#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0) +#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) +#define NFP_NET_CFG_STS 0x0034 +#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +/* Link rate */ +#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1 +#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF +#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0 +#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1 +#define NFP_NET_CFG_STS_LINK_RATE_1G 2 +#define NFP_NET_CFG_STS_LINK_RATE_10G 3 +#define NFP_NET_CFG_STS_LINK_RATE_25G 4 +#define NFP_NET_CFG_STS_LINK_RATE_40G 5 +#define NFP_NET_CFG_STS_LINK_RATE_50G 6 +#define NFP_NET_CFG_STS_LINK_RATE_100G 7 +#define NFP_NET_CFG_CAP 0x0038 +#define NFP_NET_CFG_MAX_TXRINGS 0x003c +#define NFP_NET_CFG_MAX_RXRINGS 0x0040 +#define NFP_NET_CFG_MAX_MTU 0x0044 +/* Next two words are being used by VFs for solving THB350 issue */ +#define NFP_NET_CFG_START_TXQ 0x0048 +#define NFP_NET_CFG_START_RXQ 0x004c + +/* + * NFP-3200 workaround (0x0050 - 0x0058) + * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix) + */ +#define NFP_NET_CFG_SPARE_ADDR 0x0050 +/** + * NFP6000/NFP4000 - Prepend configuration + */ +#define NFP_NET_CFG_RX_OFFSET 0x0050 +#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ + +/** + * Reuse spare address to contain the offset from the start of + * the host buffer where the first byte of the received frame + * will land. Any metadata will come prior to that offset. If the + * value in this field is 0, it means that that the metadata will + * always land starting at the first byte of the host buffer and + * packet data will immediately follow the metadata. As always, + * the RX descriptor indicates the presence or absence of metadata + * along with the length thereof. + */ +#define NFP_NET_CFG_RX_OFFSET_ADDR 0x0050 + +#define NFP_NET_CFG_VXLAN_PORT 0x0060 +#define NFP_NET_CFG_VXLAN_SZ 0x0008 + +/* Offload definitions */ +#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(uint16_t)) + +/** + * 64B reserved for future use (0x0080 - 0x00c0) + */ +#define NFP_NET_CFG_RESERVED 0x0080 +#define NFP_NET_CFG_RESERVED_SZ 0x0040 + +/* + * RSS configuration (0x0100 - 0x01ac): + * Used only when NFP_NET_CFG_CTRL_RSS is enabled + * @NFP_NET_CFG_RSS_CFG: RSS configuration word + * @NFP_NET_CFG_RSS_KEY: RSS "secret" key + * @NFP_NET_CFG_RSS_ITBL: RSS indirection table + */ +#define NFP_NET_CFG_RSS_BASE 0x0100 +#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE +#define NFP_NET_CFG_RSS_MASK (0x7f) +#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) +#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ +#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ +#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ +#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ +#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ +#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) +#define NFP_NET_CFG_RSS_KEY_SZ 0x28 +#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ + NFP_NET_CFG_RSS_KEY_SZ) +#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 + +/* + * TX ring configuration (0x200 - 0x800) + * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration + * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) + * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_TXR_BASE 0x0200 +#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) +#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) +#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) +#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \ + ((_x) * 0x4)) + +/* + * RX ring configuration (0x0800 - 0x0c00) + * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration + * @NFP_NET_CFG_RXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_RXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_RXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_RXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_RXR_BASE 0x0800 +#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) +#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) +#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) +#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \ + ((_x) * 0x4)) + +/* + * Interrupt Control/Cause registers (0x0c00 - 0x0d00) + * These registers are only used when MSI-X auto-masking is not + * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index + * by MSI-X entry and are 1B in size. If an entry is zero, the + * corresponding entry is enabled. If the FW generates an interrupt, + * it writes a cause into the corresponding field. This also masks + * the MSI-X entry and the host driver must clear the register to + * re-enable the interrupt. + */ +#define NFP_NET_CFG_ICR_BASE 0x0c00 +#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) +#define NFP_NET_CFG_ICR_UNMASKED 0x0 +#define NFP_NET_CFG_ICR_RXTX 0x1 +#define NFP_NET_CFG_ICR_LSC 0x2 + +/* + * General device stats (0x0d00 - 0x0d90) + * all counters are 64bit. + */ +#define NFP_NET_CFG_STATS_BASE 0x0d00 +#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) +#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) +#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) +#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) +#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) +#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) +#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) +#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) +#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) + +#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) +#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) +#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) +#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) +#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) +#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) +#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) +#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) +#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) + +/* + * Per ring stats (0x1000 - 0x1800) + * options, 64bit per entry + * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) + * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) + */ +#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 +#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ + ((_x) * 0x10)) +#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 +#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ + ((_x) * 0x10)) + +/* PF multiport offset */ +#define NFP_PF_CSR_SLICE_SIZE (32 * 1024) + +#endif /* _NFP_NET_CTRL_H_ */ +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h new file mode 100644 index 000000000..27dd87611 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_logs.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef _NFP_NET_LOGS_H_ +#define _NFP_NET_LOGS_H_ + +#include + +extern int nfp_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, nfp_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args) +#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x") +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#define ASSERT(x) do { } while (0) +#endif + +#define RTE_LIBRTE_NFP_NET_DEBUG_CPP + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_CPP +#define PMD_CPP_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_CPP_LOG(level, fmt, args...) do { } while (0) +#endif + +extern int nfp_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) + +#endif /* _NFP_NET_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h b/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h new file mode 100644 index 000000000..cb2d19afe --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfp_net_pmd.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * @file dpdk/pmd/nfp_net_pmd.h + * + * Netronome NFP_NET PMD driver + */ + +#ifndef _NFP_NET_PMD_H_ +#define _NFP_NET_PMD_H_ + +#define NFP_NET_PMD_VERSION "0.1" +#define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000 +#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000 +#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003 + +/* Forward declaration */ +struct nfp_net_adapter; + +/* + * The maximum number of descriptors is limited by design as + * DPDK uses uint16_t variables for these values + */ +#define NFP_NET_MAX_TX_DESC (32 * 1024) +#define NFP_NET_MIN_TX_DESC 64 + +#define NFP_NET_MAX_RX_DESC (32 * 1024) +#define NFP_NET_MIN_RX_DESC 64 + +/* Bar allocation */ +#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_TX_BAR 2 +#define NFP_NET_RX_BAR 2 +#define NFP_QCP_QUEUE_AREA_SZ 0x80000 + +/* Macros for accessing the Queue Controller Peripheral 'CSRs' */ +#define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) +#define NFP_QCP_QUEUE_ADD_RPTR 0x0000 +#define NFP_QCP_QUEUE_ADD_WPTR 0x0004 +#define NFP_QCP_QUEUE_STS_LO 0x0008 +#define NFP_QCP_QUEUE_STS_LO_READPTR_mask (0x3ffff) +#define NFP_QCP_QUEUE_STS_HI 0x000c +#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask (0x3ffff) + +/* Interrupt definitions */ +#define NFP_NET_IRQ_LSC_IDX 0 + +/* Default values for RX/TX configuration */ +#define DEFAULT_RX_FREE_THRESH 32 +#define DEFAULT_RX_PTHRESH 8 +#define DEFAULT_RX_HTHRESH 8 +#define DEFAULT_RX_WTHRESH 0 + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 +#define DEFAULT_TX_PTHRESH 32 +#define DEFAULT_TX_HTHRESH 0 +#define DEFAULT_TX_WTHRESH 0 +#define DEFAULT_TX_RSBIT_THRESH 32 + +/* Alignment for dma zones */ +#define NFP_MEMZONE_ALIGN 128 + +/* + * This is used by the reconfig protocol. It sets the maximum time waiting in + * milliseconds before a reconfig timeout happens. + */ +#define NFP_NET_POLL_TIMEOUT 5000 + +#define NFP_QCP_QUEUE_ADDR_SZ (0x800) + +#define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ + +/* Version number helper defines */ +#define NFD_CFG_CLASS_VER_msk 0xff +#define NFD_CFG_CLASS_VER_shf 24 +#define NFD_CFG_CLASS_VER(x) (((x) & 0xff) << 24) +#define NFD_CFG_CLASS_VER_of(x) (((x) >> 24) & 0xff) +#define NFD_CFG_CLASS_TYPE_msk 0xff +#define NFD_CFG_CLASS_TYPE_shf 16 +#define NFD_CFG_CLASS_TYPE(x) (((x) & 0xff) << 16) +#define NFD_CFG_CLASS_TYPE_of(x) (((x) >> 16) & 0xff) +#define NFD_CFG_MAJOR_VERSION_msk 0xff +#define NFD_CFG_MAJOR_VERSION_shf 8 +#define NFD_CFG_MAJOR_VERSION(x) (((x) & 0xff) << 8) +#define NFD_CFG_MAJOR_VERSION_of(x) (((x) >> 8) & 0xff) +#define NFD_CFG_MINOR_VERSION_msk 0xff +#define NFD_CFG_MINOR_VERSION_shf 0 +#define NFD_CFG_MINOR_VERSION(x) (((x) & 0xff) << 0) +#define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff) + +#include +#include + +static inline uint8_t nn_readb(volatile const void *addr) +{ + return rte_read8(addr); +} + +static inline void nn_writeb(uint8_t val, volatile void *addr) +{ + rte_write8(val, addr); +} + +static inline uint32_t nn_readl(volatile const void *addr) +{ + return rte_read32(addr); +} + +static inline void nn_writel(uint32_t val, volatile void *addr) +{ + rte_write32(val, addr); +} + +static inline void nn_writew(uint16_t val, volatile void *addr) +{ + rte_write16(val, addr); +} + +static inline uint64_t nn_readq(volatile void *addr) +{ + const volatile uint32_t *p = addr; + uint32_t low, high; + + high = nn_readl((volatile const void *)(p + 1)); + low = nn_readl((volatile const void *)p); + + return low + ((uint64_t)high << 32); +} + +static inline void nn_writeq(uint64_t val, volatile void *addr) +{ + nn_writel(val >> 32, (volatile char *)addr + 4); + nn_writel(val, addr); +} + +/* TX descriptor format */ +#define PCIE_DESC_TX_EOP (1 << 7) +#define PCIE_DESC_TX_OFFSET_MASK (0x7f) + +/* Flags in the host TX descriptor */ +#define PCIE_DESC_TX_CSUM (1 << 7) +#define PCIE_DESC_TX_IP4_CSUM (1 << 6) +#define PCIE_DESC_TX_TCP_CSUM (1 << 5) +#define PCIE_DESC_TX_UDP_CSUM (1 << 4) +#define PCIE_DESC_TX_VLAN (1 << 3) +#define PCIE_DESC_TX_LSO (1 << 2) +#define PCIE_DESC_TX_ENCAP_NONE (0) +#define PCIE_DESC_TX_ENCAP_VXLAN (1 << 1) +#define PCIE_DESC_TX_ENCAP_GRE (1 << 0) + +struct nfp_net_tx_desc { + union { + struct { + uint8_t dma_addr_hi; /* High bits of host buf address */ + __le16 dma_len; /* Length to DMA for this desc */ + uint8_t offset_eop; /* Offset in buf where pkt starts + + * highest bit is eop flag. + */ + __le32 dma_addr_lo; /* Low 32bit of host buf addr */ + + __le16 mss; /* MSS to be used for LSO */ + uint8_t lso_hdrlen; /* LSO, where the data starts */ + uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + union { + struct { + /* + * L3 and L4 header offsets required + * for TSOv2 + */ + uint8_t l3_offset; + uint8_t l4_offset; + }; + __le16 vlan; /* VLAN tag to add if indicated */ + }; + __le16 data_len; /* Length of frame + meta data */ + } __rte_packed; + __le32 vals[4]; + }; +}; + +struct nfp_net_txq { + struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */ + + /* + * Queue information: @qidx is the queue index from Linux's + * perspective. @tx_qcidx is the index of the Queue + * Controller Peripheral queue relative to the TX queue BAR. + * @cnt is the size of the queue in number of + * descriptors. @qcp_q is a pointer to the base of the queue + * structure on the NFP + */ + uint8_t *qcp_q; + + /* + * Read and Write pointers. @wr_p and @rd_p are host side pointer, + * they are free running and have little relation to the QCP pointers * + * @qcp_rd_p is a local copy queue controller peripheral read pointer + */ + + uint32_t wr_p; + uint32_t rd_p; + + uint32_t tx_count; + + uint32_t tx_free_thresh; + + /* + * For each descriptor keep a reference to the mbuf and + * DMA address used until completion is signalled. + */ + struct { + struct rte_mbuf *mbuf; + } *txbufs; + + /* + * Information about the host side queue location. @txds is + * the virtual address for the queue, @dma is the DMA address + * of the queue and @size is the size in bytes for the queue + * (needed for free) + */ + struct nfp_net_tx_desc *txds; + + /* + * At this point 48 bytes have been used for all the fields in the + * TX critical path. We have room for 8 bytes and still all placed + * in a cache line. We are not using the threshold values below but + * if we need to, we can add the most used in the remaining bytes. + */ + uint32_t tx_rs_thresh; /* not used by now. Future? */ + uint32_t tx_pthresh; /* not used by now. Future? */ + uint32_t tx_hthresh; /* not used by now. Future? */ + uint32_t tx_wthresh; /* not used by now. Future? */ + uint16_t port_id; + int qidx; + int tx_qcidx; + __le64 dma; +} __rte_aligned(64); + +/* RX and freelist descriptor format */ +#define PCIE_DESC_RX_DD (1 << 7) +#define PCIE_DESC_RX_META_LEN_MASK (0x7f) + +/* Flags in the RX descriptor */ +#define PCIE_DESC_RX_RSS (1 << 15) +#define PCIE_DESC_RX_I_IP4_CSUM (1 << 14) +#define PCIE_DESC_RX_I_IP4_CSUM_OK (1 << 13) +#define PCIE_DESC_RX_I_TCP_CSUM (1 << 12) +#define PCIE_DESC_RX_I_TCP_CSUM_OK (1 << 11) +#define PCIE_DESC_RX_I_UDP_CSUM (1 << 10) +#define PCIE_DESC_RX_I_UDP_CSUM_OK (1 << 9) +#define PCIE_DESC_RX_SPARE (1 << 8) +#define PCIE_DESC_RX_EOP (1 << 7) +#define PCIE_DESC_RX_IP4_CSUM (1 << 6) +#define PCIE_DESC_RX_IP4_CSUM_OK (1 << 5) +#define PCIE_DESC_RX_TCP_CSUM (1 << 4) +#define PCIE_DESC_RX_TCP_CSUM_OK (1 << 3) +#define PCIE_DESC_RX_UDP_CSUM (1 << 2) +#define PCIE_DESC_RX_UDP_CSUM_OK (1 << 1) +#define PCIE_DESC_RX_VLAN (1 << 0) + +#define PCIE_DESC_RX_L4_CSUM_OK (PCIE_DESC_RX_TCP_CSUM_OK | \ + PCIE_DESC_RX_UDP_CSUM_OK) +struct nfp_net_rx_desc { + union { + /* Freelist descriptor */ + struct { + uint8_t dma_addr_hi; + __le16 spare; + uint8_t dd; + + __le32 dma_addr_lo; + } __rte_packed fld; + + /* RX descriptor */ + struct { + __le16 data_len; + uint8_t reserved; + uint8_t meta_len_dd; + + __le16 flags; + __le16 vlan; + } __rte_packed rxd; + + __le32 vals[2]; + }; +}; + +struct nfp_net_rx_buff { + struct rte_mbuf *mbuf; +}; + +struct nfp_net_rxq { + struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */ + + /* + * @qcp_fl and @qcp_rx are pointers to the base addresses of the + * freelist and RX queue controller peripheral queue structures on the + * NFP + */ + uint8_t *qcp_fl; + uint8_t *qcp_rx; + + /* + * Read and Write pointers. @wr_p and @rd_p are host side + * pointer, they are free running and have little relation to + * the QCP pointers. @wr_p is where the driver adds new + * freelist descriptors and @rd_p is where the driver start + * reading descriptors for newly arrive packets from. + */ + uint32_t rd_p; + + /* + * For each buffer placed on the freelist, record the + * associated SKB + */ + struct nfp_net_rx_buff *rxbufs; + + /* + * Information about the host side queue location. @rxds is + * the virtual address for the queue + */ + struct nfp_net_rx_desc *rxds; + + /* + * The mempool is created by the user specifying a mbuf size. + * We save here the reference of the mempool needed in the RX + * path and the mbuf size for checking received packets can be + * safely copied to the mbuf using the NFP_NET_RX_OFFSET + */ + struct rte_mempool *mem_pool; + uint16_t mbuf_size; + + /* + * Next two fields are used for giving more free descriptors + * to the NFP + */ + uint16_t rx_free_thresh; + uint16_t nb_rx_hold; + + /* the size of the queue in number of descriptors */ + uint16_t rx_count; + + /* + * Fields above this point fit in a single cache line and are all used + * in the RX critical path. Fields below this point are just used + * during queue configuration or not used at all (yet) + */ + + /* referencing dev->data->port_id */ + uint16_t port_id; + + uint8_t crc_len; /* Not used by now */ + uint8_t drop_en; /* Not used by now */ + + /* DMA address of the queue */ + __le64 dma; + + /* + * Queue information: @qidx is the queue index from Linux's + * perspective. @fl_qcidx is the index of the Queue + * Controller peripheral queue relative to the RX queue BAR + * used for the freelist and @rx_qcidx is the Queue Controller + * Peripheral index for the RX queue. + */ + int qidx; + int fl_qcidx; + int rx_qcidx; +} __rte_aligned(64); + +struct nfp_net_hw { + /* Info from the firmware */ + uint32_t ver; + uint32_t cap; + uint32_t max_mtu; + uint32_t mtu; + uint32_t rx_offset; + + /* Current values for control */ + uint32_t ctrl; + + uint8_t *ctrl_bar; + uint8_t *tx_bar; + uint8_t *rx_bar; + + int stride_rx; + int stride_tx; + + uint8_t *qcp_cfg; + rte_spinlock_t reconfig_lock; + + uint32_t max_tx_queues; + uint32_t max_rx_queues; + uint16_t flbufsz; + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_device_id; + uint16_t subsystem_vendor_id; +#if defined(DSTQ_SELECTION) +#if DSTQ_SELECTION + uint16_t device_function; +#endif +#endif + + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + + /* Records starting point for counters */ + struct rte_eth_stats eth_stats_base; + + struct nfp_cpp *cpp; + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *hwqueues_area; + struct nfp_cpp_area *msix_area; + + uint8_t *hw_queues; + uint8_t is_pf; + uint8_t pf_port_idx; + uint8_t pf_multiport_enabled; + uint8_t total_ports; + + union eth_table_entry *eth_table; + + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + uint32_t nfp_cpp_service_id; +}; + +struct nfp_net_adapter { + struct nfp_net_hw hw; +}; + +#define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct nfp_net_adapter *)adapter)->hw) + +#endif /* _NFP_NET_PMD_H_ */ +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h new file mode 100644 index 000000000..538f882bf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h @@ -0,0 +1,725 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPPAT_H__ +#define __NFP_CPPAT_H__ + +#include "nfp_platform.h" +#include "nfp_resid.h" + +/* This file contains helpers for creating CPP commands + * + * All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0); + +static uint64_t +_nic_mask64(int msb, int lsb, int at0) +{ + uint64_t v; + int w = msb - lsb + 1; + + if (w == 64) + return ~(uint64_t)0; + + if ((lsb + w) > 64) + return 0; + + v = (UINT64_C(1) << w) - 1; + + if (at0) + return v; + + return v << lsb; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static inline int +_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb; + int i, v = 0; + int isld[2]; + + isld[0] = isld0; + isld[1] = isld1; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * In this specific mode we'd rather not modify the + * address but we can verify if the existing contents + * will point to a valid island. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, + isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + iid_lsb = (addr40) ? 34 : 26; + + /* <39:34> or <31:26> */ + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + idx_lsb = (addr40) ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* iid<0> = addr<30> = channel<0> */ + /* channel<1> = addr<31> = Index */ + + /* + * Special case where we allow channel bits to be set + * before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + **/ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or non-0 + * island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + case 3: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + /* + * For VQDR, in this mode for 32-bit addressing it would be + * islands 0, 16, 32 and 48 depending on channel and upper + * address bits. Since those are not all valid islands, most + * decode cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = (addr40) ? 34 : 26; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* + * For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * + * That would be valid as long as both islands have VQDR. + * Let's allow this. + */ + + idx_lsb = (addr40) ? 39 : 31; + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* + * For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* + * In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case one + * could use this to send the upper half of the VQDR channel to + * another MU, but this is getting very specific. However, as + * above for mode 0, this is the decoder and the caller should + * validate the resulting IID. This blindly does what the + * silicon would do. + */ + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0: + case 1: + case 2: + case 3: + return (addr40) ? 38 : 30; + default: + break; + } + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40, + int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb, locality_lsb; + int i, v; + int isld[2]; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (locality_lsb < 0) + return NFP_ERRNO(EINVAL); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or + * non-0 island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + case 3: + /* + * Only the EMU will use 40 bit addressing. Silently set the + * direct locality bit for everyone else. The SDK toolchain + * uses dest_island <= 0 to test for atypical address encodings + * to support access to local-island CTM with a 32-but address + * (high-locality is effectively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && + (dest_island < 24 || dest_island > 26)) { + *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40, + int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int da; + + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_encode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return NFP_ERRNO(EINVAL); + + *addr &= ~_nic_mask64(29, 24, 0); + *addr |= (((uint64_t)dest_island) << 24) & + _nic_mask64(29, 24, 0); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_decode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return -EINVAL; + *dest_island = (int)(addr >> 24) & 0x3F; + return 0; + default: + break; + } + + return -EINVAL; +} + +static inline int +_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40) +{ + int iid_lsb, locality_lsb, da; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + switch (mode) { + case 0: + iid_lsb = (addr40) ? 34 : 26; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + iid_lsb = (addr40) ? 39 : 31; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + iid_lsb = (addr40) ? 38 : 30; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_MU: + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + da = (((*addr >> locality_lsb) & 3) == + _NIC_NFP6000_MU_LOCALITY_DIRECT); + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 36 : 28; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 35 : 27; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return 0; + *addr &= ~(UINT64_C(0x3F) << 24); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +#endif /* __NFP_CPPAT_H__ */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h new file mode 100644 index 000000000..d46574b10 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_PLATFORM_H__ +#define __NFP_PLATFORM_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef BIT_ULL +#define BIT(x) (1 << (x)) +#define BIT_ULL(x) (1ULL << (x)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define NFP_ERRNO(err) (errno = (err), -1) +#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret)) +#define NFP_NOERR(errv) (errno) +#define NFP_ERRPTR(err) (errno = (err), NULL) +#define NFP_PTRERR(errv) (errno) + +#endif /* __NFP_PLATFORM_H__ */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h new file mode 100644 index 000000000..0e03948ec --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RESID_H__ +#define __NFP_RESID_H__ + +#if (!defined(_NFP_RESID_NO_C_FUNC) && \ + (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS))) +#define _NFP_RESID_NO_C_FUNC +#endif + +#ifndef _NFP_RESID_NO_C_FUNC +#include "nfp_platform.h" +#endif + +/* + * NFP Chip Architectures + * + * These are semi-arbitrary values to indicate an NFP architecture. + * They serve as a software view of a group of chip families, not necessarily a + * direct mapping to actual hardware design. + */ +#define NFP_CHIP_ARCH_YD 1 +#define NFP_CHIP_ARCH_TH 2 + +/* + * NFP Chip Families. + * + * These are not enums, because they need to be microcode compatible. + * They are also not maskable. + * + * Note: The NFP-4xxx family is handled as NFP-6xxx in most software + * components. + * + */ +#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */ + +/* NFP Microengine/Flow Processing Core Versions */ +#define NFP_CHIP_ME_VERSION_2_7 0x0207 +#define NFP_CHIP_ME_VERSION_2_8 0x0208 +#define NFP_CHIP_ME_VERSION_2_9 0x0209 + +/* NFP Chip Base Revisions. Minor stepping can just be added to these */ +#define NFP_CHIP_REVISION_A0 0x00 +#define NFP_CHIP_REVISION_B0 0x10 +#define NFP_CHIP_REVISION_C0 0x20 +#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */ + +/* CPP Targets for each chip architecture */ +#define NFP6000_CPPTGT_NBI 1 +#define NFP6000_CPPTGT_VQDR 2 +#define NFP6000_CPPTGT_ILA 6 +#define NFP6000_CPPTGT_MU 7 +#define NFP6000_CPPTGT_PCIE 9 +#define NFP6000_CPPTGT_ARM 10 +#define NFP6000_CPPTGT_CRYPTO 12 +#define NFP6000_CPPTGT_CTXPB 14 +#define NFP6000_CPPTGT_CLS 15 + +/* + * Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a read or + * write instruction/call is performed on the NFP_CPP_ID. It is recomended that + * the RW action is used even if all actions to be performed on a NFP_CPP_ID are + * known to be only reads or writes. Doing so will in many cases save NFP CPP + * internal software resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +/* + * NFP_CPP_ID - pack target, token, and action into a CPP ID. + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP functions. Some + * CPP devices may allow wildcard identifiers to be specified. + * + * @param[in] target NFP CPP target id + * @param[in] action NFP CPP action id + * @param[in] token NFP CPP token id + * @return NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +#ifndef _NFP_RESID_NO_C_FUNC + +/** + * Return the NFP CPP target of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP target + */ +static inline uint8_t +NFP_CPP_ID_TARGET_of(uint32_t id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/* + * Return the NFP CPP token of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP token + */ +static inline uint8_t +NFP_CPP_ID_TOKEN_of(uint32_t id) +{ + return (id >> 16) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ACTION_of(uint32_t id) +{ + return (id >> 8) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ISLAND_of(uint32_t id) +{ + return (id) & 0xff; +} + +#endif /* _NFP_RESID_NO_C_FUNC */ + +/* + * Check if @p chip_family is an ARCH_TH chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_ARCH_TH(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Get the NFP_CHIP_ARCH_* of @p chip_family. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_ARCH(x) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \ + NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \ + })) + +/* + * Check if @p chip_family is an NFP-6xxx chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_NFP6000(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Make microengine ID for NFP-6xxx. + * @param island_id Island ID. + * @param menum ME number, 0 based, within island. + * + * NOTE: menum should really be unsigned - MSC compiler throws error (not + * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type + * unsigned int hence the cast of the menum to an int in that particular clause + */ +#define NFP6000_MEID(a, b) \ + (__extension__ ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + (((((int)(_a) & 0x3F) == (int)(_a)) && \ + (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \ + (int)(((_a) << 4) | ((_b) + 4)) : -1) \ + })) + +/* + * Do a general sanity check on the ME ID. + * The check is on the highest possible island ID for the chip family and the + * microengine number must be a master ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_IS_VALID(meid) \ + (__extension__ ({ \ + typeof(meid) _a = (meid); \ + ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \ + (((_a) & 0xF) >= 4)) \ + })) + +/* + * Extract island ID from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F) + +/* + * Extract microengine number (0 based) from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4) + +/* + * Extract microengine group number (0 based) from ME ID. + * The group is two code-sharing microengines, so group 0 refers to MEs 0,1, + * group 1 refers to MEs 2,3 etc. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1) + +#ifndef _NFP_RESID_NO_C_FUNC + +/* + * Convert a string to an ME ID. + * + * @param s A string of format iX.meY + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME ID part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return ME ID on success, -1 on error. + */ +int nfp6000_idstr2meid(const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2island("i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp6000_idstr2island("i32", &c); + * // val == 32, c == "" + * + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the island part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the island ID, -1 on error. + */ +int nfp6000_idstr2island(const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp6000_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp6000_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME number, -1 on error. + */ +int nfp6000_idstr2menum(const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp6000_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp6000_idstr2ctxnum(const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp6000_idstr2megrp("tg5", &c); + * // val == 2, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp6000_idstr2megrp(const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_meid2str(char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp6000_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp6000_meid2altstr(char *s, int meid); + +/* + * Create string of format "iX". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2str(char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2altstr(char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_menum2str(char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_ctxnum2str(char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_megrp2str(char *s, int megrp); + +/* + * Convert a string to an ME ID. + * + * @param chip_family Chip family ID + * @param s A string of format iX.meY (or clX.meY) + * @param endptr If non-NULL, *endptr will point to the trailing + * string after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return ME ID on success, -1 on error. + */ +int nfp_idstr2meid(int chip_family, const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp_idstr2island(chip, "i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp_idstr2island(chip, "i32", &c); + * // val == 32, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The island ID on succes, -1 on error. + */ +int nfp_idstr2island(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The ME number on succes, -1 on error. + */ +int nfp_idstr2menum(int chip_family, const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp_idstr2megrp("tg5", &c); + * // val == 5, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_meid2str(int chip_family, char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp_meid2altstr(int chip_family, char *s, int meid); + +/* + * Create string of format "iX". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2str(int chip_family, char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2altstr(int chip_family, char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_menum2str(int chip_family, char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_megrp2str(int chip_family, char *s, int megrp); + +/* + * Convert a two character string to revision number. + * + * Revision integer is 0x00 for A0, 0x11 for B1 etc. + * + * @param s Two character string. + * @return Revision number, -1 on error + */ +int nfp_idstr2rev(const char *s); + +/* + * Create string from revision number. + * + * String will be upper case. + * + * @param s Pointer to char buffer with size of at least 3 + * for 2 characters and string terminator. + * @param rev Revision number. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_rev2str(char *s, int rev); + +/* + * Get the NFP CPP address from a string + * + * String is in the format [island@]target[:[action:[token:]]address] + * + * @param chip_family Chip family ID + * @param tid Pointer to string to parse + * @param cpp_idp Pointer to CPP ID + * @param cpp_addrp Pointer to CPP address + * @return 0 on success, or -1 and errno + */ +int nfp_str2cpp(int chip_family, + const char *tid, + uint32_t *cpp_idp, + uint64_t *cpp_addrp); + + +#endif /* _NFP_RESID_NO_C_FUNC */ + +#endif /* __NFP_RESID_H__ */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 000000000..47e1ddaee --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFP6000_H__ +#define __NFP_NFP6000_H__ + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +static inline int +nfp_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +#endif /* NFP_NFP6000_H */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 000000000..7ada1bb2f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_XPB_H__ +#define __NFP_XPB_H__ + +/* + * For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* + * For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP_XPB_H */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 000000000..1427954c1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,781 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include + +#include "nfp-common/nfp_platform.h" +#include "nfp-common/nfp_resid.h" + +struct nfp_cpp_mutex; + +/* + * NFP CPP handle + */ +struct nfp_cpp { + uint32_t model; + uint32_t interface; + uint8_t *serial; + int serial_len; + void *priv; + + /* Mutex cache */ + struct nfp_cpp_mutex *mutex_cache; + const struct nfp_cpp_operations *op; + + /* + * NFP-6xxx originating island IMB CPP Address Translation. CPP Target + * ID is index into array. Values are obtained at runtime from local + * island XPB CSRs. + */ + uint32_t imb_cat_table[16]; + + int driver_lock_needed; +}; + +/* + * NFP CPP device area handle + */ +struct nfp_cpp_area { + struct nfp_cpp *cpp; + char *name; + unsigned long long offset; + unsigned long size; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +/* + * NFP CPP operations structure + */ +struct nfp_cpp_operations { + /* Size of priv area in struct nfp_cpp_area */ + size_t area_priv_size; + + /* Instance an NFP CPP */ + int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev); + + /* + * Free the bus. + * Called only once, during nfp_cpp_unregister() + */ + void (*free)(struct nfp_cpp *cpp); + + /* + * Initialize a new NFP CPP area + * NOTE: This is _not_ serialized + */ + int (*area_init)(struct nfp_cpp_area *area, + uint32_t dest, + unsigned long long address, + unsigned long size); + /* + * Clean up a NFP CPP area before it is freed + * NOTE: This is _not_ serialized + */ + void (*area_cleanup)(struct nfp_cpp_area *area); + + /* + * Acquire resources for a NFP CPP area + * Serialized + */ + int (*area_acquire)(struct nfp_cpp_area *area); + /* + * Release resources for a NFP CPP area + * Serialized + */ + void (*area_release)(struct nfp_cpp_area *area); + /* + * Return a void IO pointer to a NFP CPP area + * NOTE: This is _not_ serialized + */ + + void *(*area_iomem)(struct nfp_cpp_area *area); + + void *(*area_mapped)(struct nfp_cpp_area *area); + /* + * Perform a read from a NFP CPP area + * Serialized + */ + int (*area_read)(struct nfp_cpp_area *area, + void *kernel_vaddr, + unsigned long offset, + unsigned int length); + /* + * Perform a write to a NFP CPP area + * Serialized + */ + int (*area_write)(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, + unsigned int length); +}; + +/* + * This should be the only external function the transport + * module supplies + */ +const struct nfp_cpp_operations *nfp_cpp_transport_operations(void); + +/* + * Set the model id + * + * @param cpp NFP CPP operations structure + * @param model Model ID + */ +void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param interface Interface ID + */ +void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param serial NFP serial byte array + * @param len Length of the serial byte array + */ +int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len); + +/* + * Set the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv); + +/* + * Return the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp); + +/* + * Get the privately allocated portion of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the private area, or NULL on failure + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); + +uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp); + +/* + * NFP CPP core interface for CPP clients. + */ + +/* + * Open a NFP CPP handle to a CPP device + * + * @param[in] id 0-based ID for the CPP interface to use + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev, + int driver_lock_needed); + +/* + * Free a NFP CPP handle + * + * @param[in] cpp NFP CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp); + +#define NFP_CPP_MODEL_INVALID 0xffffffff + +/* + * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID + * + * The chip ID is a 16-bit BCD+A-F encoding for the chip type. + * + * @param[in] model NFP CPP model id + * @return NFP CPP chip id + */ +#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff) + +/* + * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices + * + * NOTE: The NFP4000 series is considered as a NFP6000 series variant. + * + * @param[in] model NFP CPP model id + * @return true if model is in the NFP6000 family, false otherwise. + */ +#define NFP_CPP_MODEL_IS_6000(model) \ + ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \ + (NFP_CPP_MODEL_CHIP_of(model) < 0x7000)) + +/* + * nfp_cpp_model - Retrieve the Model ID of the NFP + * + * @param[in] cpp NFP CPP handle + * @return NFP CPP Model ID + */ +uint32_t nfp_cpp_model(struct nfp_cpp *cpp); + +/* + * NFP Interface types - logical interface for this CPP connection 4 bits are + * reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/* + * Construct a 16-bit NFP Interface ID + * + * Interface IDs consists of 4 bits of interface type, 4 bits of unit + * identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of NFP CPP API mutexes, + * which use the MU Atomic CompareAndWrite operation - hence the limit to 16 + * bits to be able to use the NFP Interface ID as a lock owner. + * + * @param[in] type NFP Interface Type + * @param[in] unit Unit identifier for the interface type + * @param[in] channel Channel identifier for the interface unit + * @return Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/* + * Get the interface type of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/* + * Get the interface unit of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/* + * Get the interface channel of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* + * Retrieve the Interface ID of the NFP + * @param[in] cpp NFP CPP handle + * @return NFP CPP Interface ID + */ +uint16_t nfp_cpp_interface(struct nfp_cpp *cpp); + +/* + * Retrieve the NFP Serial Number (unique per NFP) + * @param[in] cpp NFP CPP handle + * @param[out] serial Pointer to reference the serial number array + * + * @return size of the NFP6000 serial number, in bytes + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] name Name of owner of the area + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + uint32_t cpp_id, + const char *name, + unsigned long long address, + unsigned long size); + +/* + * Free an allocated NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area); + +/* + * Acquire the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); + +/* + * Release the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area); + +/* + * Allocate, then acquire the resources needed to access the NFP CPP area handle + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Release the resources, then free the NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); + +uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, + uint64_t addr, unsigned long size, + struct nfp_cpp_area **area); +/* + * Return an IO pointer to the beginning of the NFP CPP area handle. The area + * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * + * @return Pointer to IO memory, or NULL on failure (and set errno accordingly). + */ +void *nfp_cpp_area_mapped(struct nfp_cpp_area *area); + +/* + * Read from a NFP CPP area handle into a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer to receive the data + * @param[in] length Length of the data to read + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); + +/* + * Write to a NFP CPP area handle from a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer that holds the data + * @param[in] length Length of the data to read + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +/* + * Verify that IO can be performed on an offset in an area + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] size Size of region to validate + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long size); + +/* + * Get the NFP CPP handle that is the parent of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); + +/* + * Get the name passed during allocation of the NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the area's name + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); + +/* + * Read a block of data from a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy read data to + * @param[in] length Size of the area to reserve + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); + +/* + * Write a block of data to a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy write data from + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); + + + +/* + * Fill a NFP CPP area handle and offset with a value + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the NFP CPP ID address space + * @param[in] value 32-bit value to fill area with + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length); + +/* + * Read a single 32-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value); + +/* + * Write a single 32-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value); + +/* + * Read a single 64-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value); + +/* + * Write a single 64-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value); + +/* + * Write a single 32-bit value on the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value); + +/* + * Read a single 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us); + +/* + * Read a 32-bit word from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t *value); + +/* + * Write a 32-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t value); + +/* + * Read a 64-bit work from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t *value); + +/* + * Write a 64-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t value); + +/* + * Initialize a mutex location + + * The CPP target:address must point to a 64-bit aligned location, and will + * initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this nfp_cpp_interface(). + * + * This function should only be called when setting up the initial lock state + * upon boot-up of the system. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key_id); + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and reserve + * 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the MU Atomic + * Engine's CmpAndSwap32 command are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on + * failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + uint32_t key_id); + +/* + * Get the NFP CPP handle the mutex was created with + * + * @param mutex NFP mutex handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex key + * + * @param mutex NFP mutex handle + * @return Mutex key + */ +uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex owner + * + * @param mutex NFP mutex handle + * @return Interface ID of the mutex owner + * + * NOTE: This is for debug purposes ONLY - the owner may change at any time, + * unless it has been locked by this NFP CPP handle. + */ +uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex target + * + * @param mutex NFP mutex handle + * @return Mutex CPP target (ie NFP_CPP_TARGET_MU) + */ +int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex address + * + * @param mutex NFP mutex handle + * @return Mutex CPP address + */ +uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex); + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + +#endif /* !__NFP_CPP_H__ */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c new file mode 100644 index 000000000..0b9db974e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c @@ -0,0 +1,937 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_cpp_pcie_ops.c + * Authors: Vinayak Tammineedi + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed and allocated if they are available. + * The generic CPP bus abstraction builds upon this BAR interface. + */ + +#include +#include +#if defined(RTE_BACKTRACE) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) + +#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21) + +/* + * Minimal size of the PCIe cfg memory we depend on being mapped, + * queue controller and DMA controller don't have to be covered. + */ +#define NFP_PCI_MIN_MAP_SIZE 0x080000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4) + +#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) + +/* + * Define to enable a bit more verbose debug output. + * Set to 1 to enable a bit more verbose debug output. + */ +struct nfp_pcie_user; +struct nfp6000_area_priv; + +/* + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @lock: lock to specify if bar is in use + * @refcnt: number of current users + * @iomem: mapped IO memory + */ +#define NFP_BAR_MAX 7 +struct nfp_bar { + struct nfp_pcie_user *nfp; + uint32_t barcfg; + uint64_t base; /* CPP address base */ + uint64_t mask; /* Bit mask of the bar */ + uint32_t bitsize; /* Bit size of the bar */ + int index; + int lock; + + char *csr; + char *iomem; +}; + +#define BUSDEV_SZ 13 +struct nfp_pcie_user { + struct nfp_bar bar[NFP_BAR_MAX]; + + int device; + int lock; + int secondary_lock; + char busdev[BUSDEV_SZ]; + int barsz; + char *cfg; +}; + +static uint32_t +nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config, + uint64_t *bar_base, int tgt, int act, int tok, + uint64_t offset, size_t size, int width) +{ + uint32_t bitsize; + uint32_t newcfg; + uint64_t mask; + + if (tgt >= 16) + return -EINVAL; + + switch (width) { + case 8: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT); + break; + case 4: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT); + break; + case 0: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for Fixed mapping\n", + bar->index); + printf("\t<%#llx,%#llx>, action=%d\n", + (unsigned long long)offset, + (unsigned long long)(offset + size), act); + printf("\tBAR too small (0x%llx).\n", + (unsigned long long)mask); + return -EINVAL; + } + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created Fixed mapping\n", bar->index); + printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok, + (unsigned long long)offset, + (unsigned long long)(offset + mask)); +#endif + + bitsize = 40 - 16; + } else { + mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for bulk mapping\n", + bar->index); + printf("\t<%#llx,%#llx>\n", (unsigned long long)offset, + (unsigned long long)(offset + size)); + printf("\ttarget=%d, token=%d\n", tgt, tok); + printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n", + (unsigned long long)mask, + (unsigned long long)(offset & mask), + (unsigned long long)(offset + size - 1) & mask); + + return -EINVAL; + } + + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n", + bar->index, tgt, tok, (unsigned long long)offset, + (unsigned long long)(offset + ~mask)); +#endif + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) { + printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok, + act); + return -EINVAL; + } + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, + uint32_t newcfg) +{ + int base, slot; + + base = bar->index >> 3; + slot = bar->index & 7; + + if (!nfp->cfg) + return (-ENOMEM); + + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot); + + *(uint32_t *)(bar->csr) = newcfg; + + bar->barcfg = newcfg; +#ifdef DEBUG + printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg); +#endif + + return 0; +} + +static int +nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt, + int act, int tok, uint64_t offset, size_t size, int width) +{ + uint64_t newbase; + uint32_t newcfg; + int err; + + err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset, + size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp_bar_write(nfp, bar, newcfg); +} + +/* + * Map all PCI bars. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + * + * Halving PCItoCPPBars for primary and secondary processes. + * NFP PMD just requires two fixed slots, one for configuration BAR, + * and another for accessing the hw queues. Another slot is needed + * for setting the link up or down. Secondary processes do not need + * to map the first two slots again, but it requires one slot for + * accessing the link, even if it is not likely the secondary process + * starting the port. This implies a limit of secondary processes + * supported. Due to this requirement and future extensions requiring + * new slots per process, only one secondary process is supported by + * now. + */ +static int +nfp_enable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x, start, end; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + start = 4; + end = 1; + } else { + start = 7; + end = 4; + } + for (x = start; x > end; x--) { + bar = &nfp->bar[x - 1]; + bar->barcfg = 0; + bar->nfp = nfp; + bar->index = x; + bar->mask = (1 << (nfp->barsz - 3)) - 1; + bar->bitsize = nfp->barsz - 3; + bar->base = 0; + bar->iomem = NULL; + bar->lock = 0; + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3, + bar->index & 7); + + bar->iomem = nfp->cfg + (bar->index << bar->bitsize); + } + return 0; +} + +static struct nfp_bar * +nfp_alloc_bar(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x, start, end; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + start = 4; + end = 1; + } else { + start = 7; + end = 4; + } + for (x = start; x > end; x--) { + bar = &nfp->bar[x - 1]; + if (!bar->lock) { + bar->lock = 1; + return bar; + } + } + return NULL; +} + +static void +nfp_disable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x, start, end; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + start = 4; + end = 1; + } else { + start = 7; + end = 4; + } + + for (x = start; x > end; x--) { + bar = &nfp->bar[x - 1]; + if (bar->iomem) { + bar->iomem = NULL; + bar->lock = 0; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + struct nfp_bar *bar; + uint32_t bar_offset; + + uint32_t target; + uint32_t action; + uint32_t token; + uint64_t offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + char *iomem; +}; + +static int +nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest, + unsigned long long address, unsigned long size) +{ + struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + uint32_t target = NFP_CPP_ID_TARGET_of(dest); + uint32_t action = NFP_CPP_ID_ACTION_of(dest); + uint32_t token = NFP_CPP_ID_TOKEN_of(dest); + int pp, ret = 0; + + pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token), + address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + + if (priv->width.read > 0 && + priv->width.write > 0 && priv->width.read != priv->width.write) + return -EINVAL; + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + priv->bar = nfp_alloc_bar(nfp); + if (priv->bar == NULL) + return -ENOMEM; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + + ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action, + priv->token, priv->offset, priv->size, + priv->width.bar); + + return ret; +} + +static int +nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar, + priv->target); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* Must have been too big. Sub-allocate. */ + if (!priv->bar->iomem) + return (-ENOMEM); + + priv->iomem = priv->bar->iomem + priv->bar_offset; + + return 0; +} + +static void * +nfp6000_area_mapped(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area); + + if (!area_priv->iomem) + return NULL; + + return area_priv->iomem; +} + +static void +nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + priv->bar->lock = 0; + priv->bar = NULL; + priv->iomem = NULL; +} + +static void * +nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + return priv->iomem; +} + +static int +nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + uint64_t *wrptr64 = kernel_vaddr; + const volatile uint64_t *rdptr64; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32 = kernel_vaddr; + const volatile uint32_t *rdptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = (uint64_t *)(priv->iomem + offset); + rdptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) { + printf("aread_read unaligned!!!\n"); + return -EINVAL; + } + + is_64 = width == TARGET_WIDTH_64; + + /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) { + is_64 = false; + } + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const uint64_t *rdptr64 = kernel_vaddr; + uint64_t *wrptr64; + const uint32_t *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + wrptr64 = (uint64_t *)(priv->iomem + offset); + wrptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return -EINVAL; + + is_64 = width == TARGET_WIDTH_64; + + /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +#define PCI_DEVICES "/sys/bus/pci/devices" + +static int +nfp_acquire_process_lock(struct nfp_pcie_user *desc) +{ + int rc; + struct flock lock; + char lockname[30]; + + memset(&lock, 0, sizeof(lock)); + + snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev); + desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); + if (desc->lock < 0) + return desc->lock; + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + rc = -1; + while (rc != 0) { + rc = fcntl(desc->lock, F_SETLKW, &lock); + if (rc < 0) { + if (errno != EAGAIN && errno != EACCES) { + close(desc->lock); + return rc; + } + } + } + + return 0; +} + +static int +nfp_acquire_secondary_process_lock(struct nfp_pcie_user *desc) +{ + int rc; + struct flock lock; + const char *lockname = "/.lock_nfp_secondary"; + char *home_path; + char *lockfile; + + memset(&lock, 0, sizeof(lock)); + + /* + * Using user's home directory. Note this can be called in a DPDK app + * being executed as non-root. This is not the case for the previous + * function nfp_acquire_process_lock which is invoked only when UIO + * driver is used because that implies root user. + */ + home_path = getenv("HOME"); + lockfile = calloc(strlen(home_path) + strlen(lockname) + 1, + sizeof(char)); + + if (!lockfile) + return -ENOMEM; + + strcat(lockfile, home_path); + strcat(lockfile, "/.lock_nfp_secondary"); + desc->secondary_lock = open(lockfile, O_RDWR | O_CREAT | O_NONBLOCK, + 0666); + if (desc->secondary_lock < 0) { + RTE_LOG(ERR, PMD, "NFP lock for secondary process failed\n"); + free(lockfile); + return desc->secondary_lock; + } + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + rc = fcntl(desc->secondary_lock, F_SETLK, &lock); + if (rc < 0) { + RTE_LOG(ERR, PMD, "NFP lock for secondary process failed\n"); + close(desc->secondary_lock); + } + + free(lockfile); + return rc; +} + +static int +nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint32_t model; + + if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) { + printf("nfp set model failed\n"); + return -1; + } + + model = model << 16; + nfp_cpp_model_set(cpp, model); + + return 0; +} + +static int +nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint16_t interface; + + if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) { + printf("nfp set interface failed\n"); + return -1; + } + + nfp_cpp_interface_set(cpp, interface); + + return 0; +} + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 +#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff) +#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) +#define PCI_EXT_CAP_ID_DSN 0x03 +static int +nfp_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap) +{ + uint32_t header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + printf("nfp error reading extended capabilities\n"); + return -1; + } + + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + if (rte_pci_read_config(dev, &header, 4, pos) < 0) { + printf("nfp error reading extended capabilities\n"); + return -1; + } + } + + return 0; +} + +static int +nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp) +{ + uint16_t tmp; + uint8_t serial[6]; + int serial_len = 6; + int pos; + + pos = nfp_pci_find_next_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (pos <= 0) { + printf("PCI_EXT_CAP_ID_DSN not found. nfp set serial failed\n"); + return -1; + } else { + pos += 6; + } + + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[4] = (uint8_t)((tmp >> 8) & 0xff); + serial[5] = (uint8_t)(tmp & 0xff); + + pos += 2; + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[2] = (uint8_t)((tmp >> 8) & 0xff); + serial[3] = (uint8_t)(tmp & 0xff); + + pos += 2; + if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) { + printf("nfp set serial failed\n"); + return -1; + } + + serial[0] = (uint8_t)((tmp >> 8) & 0xff); + serial[1] = (uint8_t)(tmp & 0xff); + + nfp_cpp_serial_set(cpp, serial, serial_len); + + return 0; +} + +static int +nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc) +{ + unsigned long tmp; + int i = 0; + + tmp = dev->mem_resource[0].len; + + while (tmp >>= 1) + i++; + + desc->barsz = i; + return 0; +} + +static int +nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev) +{ + int ret = 0; + struct nfp_pcie_user *desc; + + desc = malloc(sizeof(*desc)); + if (!desc) + return -1; + + + memset(desc->busdev, 0, BUSDEV_SZ); + strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev)); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY && + cpp->driver_lock_needed) { + ret = nfp_acquire_process_lock(desc); + if (ret) + goto error; + } + + /* Just support for one secondary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + ret = nfp_acquire_secondary_process_lock(desc); + if (ret) + goto error; + } + + if (nfp6000_set_model(dev, cpp) < 0) + goto error; + if (nfp6000_set_interface(dev, cpp) < 0) + goto error; + if (nfp6000_set_serial(dev, cpp) < 0) + goto error; + if (nfp6000_set_barsz(dev, desc) < 0) + goto error; + + desc->cfg = (char *)dev->mem_resource[0].addr; + + nfp_enable_bars(desc); + + nfp_cpp_priv_set(cpp, desc); + + return 0; + +error: + free(desc); + return -1; +} + +static void +nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); + + nfp_disable_bars(desc); + if (cpp->driver_lock_needed) + close(desc->lock); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + close(desc->secondary_lock); + close(desc->device); + free(desc); +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .init = nfp6000_init, + .free = nfp6000_free, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_mapped = nfp6000_area_mapped, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + .area_iomem = nfp6000_area_iomem, +}; + +const struct +nfp_cpp_operations *nfp_cpp_transport_operations(void) +{ + return &nfp6000_pcie_ops; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 000000000..dec4a8b6d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,861 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" +#include "nfp_nffw.h" + +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK 0xff + +#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 + +void +nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +{ + cpp->priv = priv; +} + +void * +nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +void +nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) +{ + cpp->model = model; +} + +uint32_t +nfp_cpp_model(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_MODEL_INVALID; + + if (cpp->model == 0) + cpp->model = __nfp_cpp_model_autodetect(cpp); + + return cpp->model; +} + +void +nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface) +{ + cpp->interface = interface; +} + +int +nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial) +{ + *serial = cpp->serial; + return cpp->serial_len; +} + +int +nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len) +{ + if (cpp->serial_len) + free(cpp->serial); + + cpp->serial = malloc(serial_len); + if (!cpp->serial) + return -1; + + memcpy(cpp->serial, serial, serial_len); + cpp->serial_len = serial_len; + + return 0; +} + +uint16_t +nfp_cpp_interface(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0); + + return cpp->interface; +} + +void * +nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +struct nfp_cpp * +nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +const char * +nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->name; +} + +/* + * nfp_cpp_area_alloc - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, + const char *name, unsigned long long address, + unsigned long size) +{ + struct nfp_cpp_area *area; + uint64_t tmp64 = (uint64_t)address; + int tmp, err; + + if (!cpp) + return NULL; + + /* CPP bus uses only a 40-bit address */ + if ((address + size) > (1ULL << 40)) + return NFP_ERRPTR(EFAULT); + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = (unsigned long long)tmp64; + + if (!name) + name = ""; + + area = calloc(1, sizeof(*area) + cpp->op->area_priv_size + + strlen(name) + 1); + if (!area) + return NULL; + + area->cpp = cpp; + area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size; + memcpy(area->name, name, strlen(name) + 1); + + /* + * Preserve errno around the call to area_init, since most + * implementations will blindly call nfp_target_action_width()for both + * read or write modes, and that will set errno to EINVAL. + */ + tmp = errno; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + free(area); + return NULL; + } + + /* Restore errno */ + errno = tmp; + + area->offset = address; + area->size = size; + + return area; +} + +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/* + * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down + * + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area + * + * Allocate and initilizae a CPP area structure, and lock it down so + * that it can be accessed directly. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * NOTE: The area must also be 'released' when the structure is freed. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + + area = nfp_cpp_area_alloc(cpp, destination, address, size); + if (!area) + return NULL; + + if (nfp_cpp_area_acquire(area)) { + nfp_cpp_area_free(area); + return NULL; + } + + return area; +} + +/* + * nfp_cpp_area_free - free up the CPP area + * area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void +nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + free(area); +} + +/* + * nfp_cpp_area_release_free - release CPP area and free it + * area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void +nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/* + * nfp_cpp_area_acquire - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + */ +int +nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_acquire) { + int err = area->cpp->op->area_acquire(area); + + if (err < 0) + return -1; + } + + return 0; +} + +/* + * nfp_cpp_area_release - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void +nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); +} + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void * +nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/* + * nfp_cpp_area_read - read data from CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/* + * nfp_cpp_area_write - write data to CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +void * +nfp_cpp_area_mapped(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_mapped) + return area->cpp->op->area_mapped(area); + return NULL; +} + +/* + * nfp_cpp_area_check_range - check if address range fits in CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @length: size of address range in bytes + * + * Check if address range fits within CPP area. Return 0 if area fits + * or -1 on error. + */ +int +nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset, + unsigned long length) +{ + if (((offset + length) > area->size)) + return NFP_ERRNO(EFAULT); + + return 0; +} + +/* + * Return the correct CPP address, and fixup xpb_addr as needed, + * based upon NFP model. + */ +static uint32_t +nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) +{ + uint32_t xpb; + int island; + + if (!NFP_CPP_MODEL_IS_6000(cpp->model)) + return 0; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + + /* + * Ensure that non-local XPB accesses go out through the + * global XPBM bus. + */ + island = ((*xpb_addr) >> 24) & 0x3f; + + if (!island) + return xpb; + + if (island == 1) { + /* + * Accesses to the ARM Island overlay uses Island 0 + * Global Bit + */ + (*xpb_addr) &= ~0x7f000000; + if (*xpb_addr < 0x60000) + *xpb_addr |= (1 << 30); + else + /* And only non-ARM interfaces use island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) != + NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= (1 << 24); + } else { + (*xpb_addr) |= (1 << 30); + } + + return xpb; +} + +int +nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value) +{ + int sz; + uint32_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value) +{ + int sz; + uint64_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t *value) +{ + int sz; + uint32_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t *value) +{ + int sz; + uint64_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +int +nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +static struct nfp_cpp * +nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) +{ + const struct nfp_cpp_operations *ops; + struct nfp_cpp *cpp; + int err; + + ops = nfp_cpp_transport_operations(); + + if (!ops || !ops->init) + return NFP_ERRPTR(EINVAL); + + cpp = calloc(1, sizeof(*cpp)); + if (!cpp) + return NULL; + + cpp->op = ops; + cpp->driver_lock_needed = driver_lock_needed; + + if (cpp->op->init) { + err = cpp->op->init(cpp, dev); + if (err < 0) { + free(cpp); + return NULL; + } + } + + if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) { + uint32_t xpbaddr; + size_t tgt; + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + (uint32_t *)&cpp->imb_cat_table[tgt]); + if (err < 0) { + free(cpp); + return NULL; + } + } + } + + return cpp; +} + +/* + * nfp_cpp_free - free the CPP handle + * @cpp: CPP handle + */ +void +nfp_cpp_free(struct nfp_cpp *cpp) +{ + if (cpp->op && cpp->op->free) + cpp->op->free(cpp); + + if (cpp->serial_len) + free(cpp->serial); + + free(cpp); +} + +struct nfp_cpp * +nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed) +{ + return nfp_cpp_alloc(dev, driver_lock_needed); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value) +{ + int err; + uint32_t tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= (mask & value); + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us) +{ + uint32_t tmp; + int err; + + do { + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + goto exit; + + if ((tmp & mask) == (value & mask)) { + if (timeout_us < 0) + timeout_us = 0; + break; + } + + if (timeout_us < 0) + continue; + + timeout_us -= 100; + usleep(100); + } while (timeout_us >= 0); + + if (timeout_us < 0) + err = NFP_ERRNO(ETIMEDOUT); + else + err = timeout_us; + +exit: + return err; +} + +/* + * nfp_cpp_read - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + */ +int +nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) { + printf("Area allocation/acquire failed\n"); + return -1; + } + + err = nfp_cpp_area_read(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_write - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + */ +int +nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, const void *kernel_vaddr, + size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) + return -1; + + err = nfp_cpp_area_write(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_area_fill - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + */ +int +nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length) +{ + int err; + size_t i; + uint64_t value64; + + value = rte_cpu_to_le_32(value); + value64 = ((uint64_t)value << 32) | value; + + if ((offset + length) > area->size) + return NFP_ERRNO(EINVAL); + + if ((area->offset + offset) & 3) + return NFP_ERRNO(EINVAL); + + if (((area->offset + offset) & 7) == 4 && length >= 4) { + err = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + offset += sizeof(value); + length -= sizeof(value); + } + + for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) { + err = + nfp_cpp_area_write(area, offset + i, &value64, + sizeof(value64)); + if (err < 0) + return err; + if (err != sizeof(value64)) + return NFP_ERRNO(ENOSPC); + } + + if ((i + sizeof(value)) <= length) { + err = + nfp_cpp_area_write(area, offset + i, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + i += sizeof(value); + } + + return (int)i; +} + +/* + * NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +uint32_t +__nfp_cpp_model_autodetect(struct nfp_cpp *cpp) +{ + uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); + uint32_t model = 0; + + if (nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model)) + return 0; + + if (NFP_CPP_MODEL_IS_6000(model)) { + uint32_t tmp; + + nfp_cpp_model_set(cpp, model); + + /* The PL's PluDeviceID revision code is authoratative */ + model &= ~0xff; + if (nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + + NFP_PL_DEVICE_ID, &tmp)) + return 0; + + model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10; + } + + return model; +} + +/* + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler + * @domain: CPP domain + * @target: CPP target + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) + * + * Map an area of IOMEM access. To undo the effect of this function call + * @nfp_cpp_area_release_free(*area). + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +uint8_t * +nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr, + unsigned long size, struct nfp_cpp_area **area) +{ + uint8_t *res; + uint32_t dest; + + dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); + + *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size); + if (!*area) + goto err_eio; + + res = nfp_cpp_area_iomem(*area); + if (!res) + goto err_release_free; + + return res; + +err_release_free: + nfp_cpp_area_release_free(*area); +err_eio: + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c new file mode 100644 index 000000000..20431bf84 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_crc.h" + +static inline uint32_t +nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len, + uint32_t polynomial) +{ + int i; + while (len--) { + crc ^= *p++ << 24; + for (i = 0; i < 8; i++) + crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : + 0); + } + return crc; +} + +static inline uint32_t +nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len) +{ + return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE); +} + +static uint32_t +nfp_crc32_posix_end(uint32_t crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + uint8_t c = total_len & 0xff; + + crc = nfp_crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +uint32_t +nfp_crc32_posix(const void *buff, size_t len) +{ + return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len); +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h new file mode 100644 index 000000000..f99c89fca --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_crc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CRC_H__ +#define __NFP_CRC_H__ + +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +uint32_t nfp_crc32_posix(const void *buff, size_t len); + +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 000000000..c0516bf8e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_hwinfo.h" +#include "nfp_crc.h" + +static int +nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return hwinfo->version & NFP_HWINFO_VERSION_UPDATING; +} + +static int +nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + if (val >= end) { + printf("Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + printf("Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + return 0; +} + +static int +nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) +{ + uint32_t size, new_crc, *crc; + + size = db->size; + if (size > len) { + printf("Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(uint32_t); + new_crc = nfp_crc32_posix((char *)db, size); + crc = (uint32_t *)(db->start + size); + if (new_crc != *crc) { + printf("Corrupt hwinfo table (CRC mismatch)\n"); + printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc); + return -EINVAL; + } + + return nfp_hwinfo_db_walk(db, size); +} + +static struct nfp_hwinfo * +nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + void *res; + uint64_t cpp_addr; + uint32_t cpp_id; + int err; + uint8_t *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (res) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return NULL; + } else { + return NULL; + } + + db = malloc(*cpp_size + 1); + if (!db) + return NULL; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != (int)*cpp_size) + goto exit_free; + + header = (void *)db; + printf("NFP HWINFO header: %08x\n", *(uint32_t *)header); + if (nfp_hwinfo_is_updating(header)) + goto exit_free; + + if (header->version != NFP_HWINFO_VERSION_2) { + printf("Unknown HWInfo version: 0x%08x\n", + header->version); + goto exit_free; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + return (void *)db; +exit_free: + free(db); + return NULL; +} + +static struct nfp_hwinfo * +nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + struct timespec wait; + struct nfp_hwinfo *db; + int count; + + wait.tv_sec = 0; + wait.tv_nsec = 10000000; + count = 0; + + for (;;) { + db = nfp_hwinfo_try_fetch(cpp, hwdb_size); + if (db) + return db; + + nanosleep(&wait, NULL); + if (count++ > 200) { + printf("NFP access error\n"); + return NULL; + } + } +} + +struct nfp_hwinfo * +nfp_hwinfo_read(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + db = nfp_hwinfo_fetch(cpp, &hwdb_size); + if (!db) + return NULL; + + err = nfp_hwinfo_db_validate(db, hwdb_size); + if (err) { + free(db); + return NULL; + } + return db; +} + +/* + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @hwinfo: NFP HWinfo table + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char * +nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) +{ + const char *key, *val, *end; + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + hwinfo->size - sizeof(uint32_t); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h new file mode 100644 index 000000000..ccc616321 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_HWINFO_H__ +#define __NFP_HWINFO_H__ + +#include + +#define HWINFO_SIZE_MIN 0x100 + +/* + * The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: uint32_t version Hardware Info Table version (1.0) + * 0x0004: uint32_t size Total size of the table, including the + * CRC32 (IEEE 802.3) + * 0x0008: uint32_t jumptab Offset of key/value table + * 0x000c: uint32_t keys Total number of keys in the key/value + * table + * NNNNNN: Key/value jump table and string data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: uint32_t version Hardware Info Table version (2.0) + * 0x0004: uint32_t size Current size of the data area, excluding + * CRC32 + * 0x0008: uint32_t limit Maximum size of the table + * 0x000c: uint32_t reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit of + * version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: uint32_t key_1 Offset to the first key + * N+4: uint32_t val_1 Offset to the first value + * N+8: uint32_t key_2 Offset to the second key + * N+c: uint32_t val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + uint8_t start[0]; + + uint32_t version; + uint32_t size; + + /* v2 specific fields */ + uint32_t limit; + uint32_t resv; + + char data[]; +}; + +struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); + +const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); + +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c new file mode 100644 index 000000000..c86966df8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION rte_cpu_to_le_32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + uint32_t signature; + uint32_t mip_version; + uint32_t mip_size; + uint32_t first_entry; + + uint32_t version; + uint32_t buildnum; + uint32_t buildtime; + uint32_t loadtime; + + uint32_t symtab_addr; + uint32_t symtab_size; + uint32_t strtab_addr; + uint32_t strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, + struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + printf("Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + printf("Incorrect MIP signature (0x%08x)\n", + rte_le_to_cpu_32(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + printf("Unsupported MIP version (%d)\n", + rte_le_to_cpu_32(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int +nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + uint32_t cpp_id; + uint64_t addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (!nffw_info) + return -ENODEV; + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/* + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +struct nfp_mip * +nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = malloc(sizeof(*mip)); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + free(mip); + return NULL; + } + + mip->name[sizeof(mip->name) - 1] = 0; + + return mip; +} + +void +nfp_mip_close(struct nfp_mip *mip) +{ + free(mip); +} + +const char * +nfp_mip_name(const struct nfp_mip *mip) +{ + return mip->name; +} + +/* + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void +nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->symtab_addr); + *size = rte_le_to_cpu_32(mip->symtab_size); +} + +/* + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void +nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->strtab_addr); + *size = rte_le_to_cpu_32(mip->strtab_size); +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h new file mode 100644 index 000000000..d0919b58f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mip.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_MIP_H__ +#define __NFP_MIP_H__ + +#include "nfp_nffw.h" + +struct nfp_mip; + +struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(struct nfp_mip *mip); + +const char *nfp_mip_name(const struct nfp_mip *mip); +void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off); +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 000000000..318c5800d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f) +#define MUTEX_UNLOCK(interface) (0 | 0x0000) + +#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f) +#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000) +#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff) + +/* + * If you need more than 65536 recursive locks, please + * rethink your code. + */ +#define MUTEX_DEPTH_MAX 0xffff + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + uint8_t target; + uint16_t depth; + unsigned long long address; + uint32_t key; + unsigned int usage; + struct nfp_cpp_mutex *prev, *next; +}; + +static int +_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address) +{ + /* Address must be 64-bit aligned */ + if (address & 7) + return NFP_ERRNO(EINVAL); + + if (NFP_CPP_MODEL_IS_6000(model)) { + if (*target != NFP_CPP_TARGET_MU) + return NFP_ERRNO(EINVAL); + } else { + return NFP_ERRNO(EINVAL); + } + + return 0; +} + +/* + * Initialize a mutex location + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * @param mutex NFP CPP Mutex handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, + uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + int err; + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err < 0) + return err; + + err = + nfp_cpp_writel(cpp, muw, address + 0, + MUTEX_LOCKED(nfp_cpp_interface(cpp))); + if (err < 0) + return err; + + return 0; +} + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex * +nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + struct nfp_cpp_mutex *mutex; + uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + int err; + uint32_t tmp; + + /* Look for cached mutex */ + for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) { + if (mutex->target == target && mutex->address == address) + break; + } + + if (mutex) { + if (mutex->key == key) { + mutex->usage++; + return mutex; + } + + /* If the key doesn't match... */ + return NFP_ERRPTR(EEXIST); + } + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NFP_ERRPTR(EEXIST); + + mutex = calloc(sizeof(*mutex), 1); + if (!mutex) + return NFP_ERRPTR(ENOMEM); + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + mutex->usage = 1; + + /* Add mutex to the cache */ + if (cpp->mutex_cache) { + cpp->mutex_cache->prev = mutex; + mutex->next = cpp->mutex_cache; + cpp->mutex_cache = mutex; + } else { + cpp->mutex_cache = mutex; + } + + return mutex; +} + +struct nfp_cpp * +nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex) +{ + return mutex->cpp; +} + +uint32_t +nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex) +{ + return mutex->key; +} + +uint16_t +nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t value, key; + int err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return NFP_ERRNO(EPERM); + + if (!MUTEX_IS_LOCKED(value)) + return 0; + + return MUTEX_INTERFACE(value); +} + +int +nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex) +{ + return mutex->target; +} + +uint64_t +nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex) +{ + return mutex->address; +} + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void +nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + mutex->usage--; + if (mutex->usage > 0) + return; + + /* Remove mutex from the cache */ + if (mutex->next) + mutex->next->prev = mutex->prev; + if (mutex->prev) + mutex->prev->next = mutex->next; + + /* If mutex->cpp == NULL, something broke */ + if (mutex->cpp && mutex == mutex->cpp->mutex_cache) + mutex->cpp->mutex_cache = mutex->next; + + free(mutex); +} + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + int err; + time_t warn_at = time(NULL) + 15; + + while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) { + /* If errno != EBUSY, then the lock was damaged */ + if (err < 0 && errno != EBUSY) + return err; + if (time(NULL) >= warn_at) { + printf("Warning: waiting for NFP mutex\n"); + printf("\tusage:%u\n", mutex->usage); + printf("\tdepth:%hd]\n", mutex->depth); + printf("\ttarget:%d\n", mutex->target); + printf("\taddr:%llx\n", mutex->address); + printf("\tkey:%08x]\n", mutex->key); + warn_at = time(NULL) + 60; + } + sched_yield(); + } + return 0; +} + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + uint32_t key, value; + uint16_t interface = nfp_cpp_interface(cpp); + int err; + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + goto exit; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + if (value != MUTEX_LOCKED(interface)) { + err = NFP_ERRNO(EACCES); + goto exit; + } + + err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface)); + if (err < 0) + goto exit; + + mutex->depth = 0; + +exit: + return err; +} + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * Valid lock states: + * + * 0x....0000 - Unlocked + * 0x....000f - Locked + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int +nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + uint32_t key, value, tmp; + struct nfp_cpp *cpp = mutex->cpp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == MUTEX_DEPTH_MAX) + return NFP_ERRNO(E2BIG); + + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + /* + * Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = MUTEX_LOCKED(nfp_cpp_interface(cpp)); + + /* + * We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + goto exit; + + /* Was it unlocked? */ + if (MUTEX_IS_UNLOCKED(tmp)) { + /* + * The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + * + * Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + goto exit; + + mutex->depth = 1; + goto exit; + } + + /* Already locked by us? Success! */ + if (tmp == value) { + mutex->depth = 1; + goto exit; + } + + err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL); + +exit: + return err; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 000000000..8bec0e3c9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp_mip.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" + +/* + * flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static uint32_t +nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static uint32_t +nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static uint32_t +nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static uint32_t +nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return fi->mip_cppid; +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static uint32_t +nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static uint64_t +nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi; + + return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo; +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int +nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + uint32_t xpbaddr, imbcppat; + int err; + + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; + err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); + if (err < 0) + return err; + + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + return nfp_cppat_mu_locality_lsb(mode, addr40); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* + * For the this code, version 0 is most likely to be version 1 in this + * case. Since the kernel driver does not take responsibility for + * initialising the nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set the init flag is going + * to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/* + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: 0, or -ERRNO + */ +struct nfp_nffw_info * +nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + uint32_t info_ver; + int err; + + state = malloc(sizeof(*state)); + if (!state) + return NULL; + + memset(state, 0, sizeof(*state)); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (!state->res) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < (int)sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + free(state); + return NULL; +} + +/* + * nfp_nffw_info_release() - Release the lock on the NFFW table + * @state: NFP FW info state + * + * Return: 0, or -ERRNO + */ +void +nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + free(state); +} + +/* + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo * +nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/* + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int +nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off; + + if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) + return 0; + + locality_off = nfp_mip_mu_locality_lsb(state->cpp); + if (locality_off < 0) + return locality_off; + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 000000000..3bbdf1c13 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFFW_H__ +#define __NFP_NFFW_H__ + +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +/* + * Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/** + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +struct nffw_meinfo { + uint32_t ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + uint32_t loaded__mu_da__mip_off_hi; + uint32_t mip_cppid; /* 0 means no MIP */ + uint32_t mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +struct nfp_nffw_info_data { + uint32_t flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); + +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 000000000..876a4017c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#define NFP_SUBSYS "nfp_nsp" + +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_resource.h" + +int +nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void +nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified) +{ + state->modified = modified; +} + +void * +nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int +nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void +nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void +nfp_nsp_print_extended_error(uint32_t ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++) + if (ret_val == (uint32_t)nsp_errors[i].code) + printf("err msg: %s\n", nsp_errors[i].msg); +} + +static int +nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + uint64_t nsp_status, reg; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + printf("Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { + printf("Unsupported ABI %hu.%hu\n", state->ver.major, + state->ver.minor); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + printf("Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/* + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp * +nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (!res) + return NULL; + + state = malloc(sizeof(*state)); + if (!state) { + nfp_resource_release(res); + return NULL; + } + memset(state, 0, sizeof(*state)); + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return NULL; + } + + return state; +} + +/* + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void +nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + free(state); +} + +uint16_t +nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +uint16_t +nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp, + uint64_t addr, uint64_t mask, uint64_t val) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + nanosleep(&wait, 0); + if (count++ > 1000) + return -ETIMEDOUT; + } +} + +/* + * nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @code: NFP SP Command Code + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * + * Return: 0 for success with no result + * + * positive value for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + */ +static int +nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option, + uint32_t buff_cpp, uint64_t buff_addr) +{ + uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + printf("Host buffer out of reach %08x %" PRIx64 "\n", + buff_cpp, buff_addr); + return -EINVAL; + } + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, + FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, option) | + FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, + NSP_COMMAND_START, 0); + if (err) { + printf("Error %d waiting for code 0x%04x to start\n", + err, code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, + 0); + if (err) { + printf("Error %d waiting for code 0x%04x to complete\n", + err, code); + return err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + printf("Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, code); + nfp_nsp_print_extended_error(ret_val); + return -err; + } + + return ret_val; +} + +#define SZ_1M 0x00100000 + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option, + const void *in_buf, unsigned int in_size, void *out_buf, + unsigned int out_size) +{ + struct nfp_cpp *cpp = nsp->cpp; + unsigned int max_size; + uint64_t reg, cpp_buf; + int ret, err; + uint32_t cpp_id; + + if (nsp->ver.minor < 13) { + printf("NSP: Code 0x%04x with buffer not supported\n", code); + printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = RTE_MAX(in_size, out_size); + if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { + printf("NSP: default buffer too small for command 0x%04x\n", + code); + printf("\t(%llu < %u)\n", + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + max_size); + return -EINVAL; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + + if (in_buf && in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (err < 0) + return err; + } + /* Zero out remaining part of the buffer */ + if (out_buf && out_size && out_size > in_size) { + memset(out_buf, 0, out_size - in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf, + out_size - in_size); + if (err < 0) + return err; + } + + ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + if (ret < 0) + return ret; + + if (out_buf && out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (err < 0) + return err; + } + + return ret; +} + +int +nfp_nsp_wait(struct nfp_nsp *state) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + if (err != -EAGAIN) + break; + + nanosleep(&wait, 0); + + if (count++ > 1000) { + err = -ETIMEDOUT; + break; + } + } + if (err) + printf("NSP failed to respond %d\n", err); + + return err; +} + +int +nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); +} + +int +nfp_nsp_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); +} + +int +nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL, + 0, buf, size); +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 000000000..c9c7b0d0f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include "nfp_cpp.h" +#include "nfp_nsp.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) + +#define NSP_DFLT_BUFFER 0x18 + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR 8 + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ +}; + +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + uint16_t major; + uint16_t minor; + } ver; + + /* Eth table config state */ + int modified; + unsigned int idx; + void *entries; +}; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, + void *buf, unsigned int size); + +static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 20; +} + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +enum nfp_eth_fec { + NFP_FEC_AUTO_BIT = 0, + NFP_FEC_BASER_BIT, + NFP_FEC_REED_SOLOMON_BIT, + NFP_FEC_DISABLED_BIT, +}; + +#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) + +#define ETH_ALEN 6 + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @max_index: max of @index fields of all @ports + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @interface: interface (module) plugged in + * @media: media type of the @interface + * @fec: forward error correction mode + * @aneg: auto negotiation mode + * @mac_addr: interface MAC address + * @label_port: port id + * @label_subport: id of interface within port (for split ports) + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + * @override_changed: is media reconfig pending? + * + * @port_type: one of %PORT_* defines for ethtool + * @port_lanes: total number of lanes on the port (sum of lanes of all subports) + * @is_split: is interface part of a split port + * @fec_modes_supported: bitmap of FEC modes supported + */ +struct nfp_eth_table { + unsigned int count; + unsigned int max_index; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_fec fec; + enum nfp_eth_aneg aneg; + + uint8_t mac_addr[ETH_ALEN]; + + uint8_t label_port; + uint8_t label_subport; + + int enabled; + int tx_enabled; + int rx_enabled; + + int override_changed; + + /* Computed fields */ + uint8_t port_type; + + unsigned int port_lanes; + + int is_split; + + unsigned int fec_modes_supported; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + int configed); +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +int nfp_nsp_config_modified(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); + +static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +{ + return !!eth_port->fec_modes_supported; +} + +static inline unsigned int +nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) +{ + return eth_port->fec_modes_supported; +} + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + * @sensor_mask: mask of present sensors available on NIC + */ +struct nfp_nsp_identify { + char version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +enum nfp_nsp_sensor_id { + NFP_SENSOR_CHIP_TEMPERATURE, + NFP_SENSOR_ASSEMBLY_POWER, + NFP_SENSOR_ASSEMBLY_12V_POWER, + NFP_SENSOR_ASSEMBLY_3V3_POWER, +}; + +int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, + long *val); + +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c new file mode 100644 index 000000000..bfd1eddb3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_nffw.h" + +struct nsp_identify { + uint8_t version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint8_t reserved[6]; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify * +__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = malloc(sizeof(*ni)); + if (!ni) + return NULL; + + memset(ni, 0, sizeof(*ni)); + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + printf("reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = malloc(sizeof(*nspi)); + if (!nspi) + goto exit_free; + + memset(nspi, 0, sizeof(*nspi)); + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = rte_le_to_cpu_16(ni->primary); + nspi->secondary = rte_le_to_cpu_16(ni->secondary); + nspi->nsp = rte_le_to_cpu_16(ni->nsp); + nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask); + +exit_free: + free(ni); + return nspi; +} + +struct nfp_sensors { + uint32_t chip_temp; + uint32_t assembly_power; + uint32_t assembly_12v_power; + uint32_t assembly_3v3_power; +}; + +int +nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val) +{ + struct nfp_sensors s; + struct nfp_nsp *nsp; + int ret; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return -EIO; + + ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s)); + nfp_nsp_close(nsp); + + if (ret < 0) + return ret; + + switch (id) { + case NFP_SENSOR_CHIP_TEMPERATURE: + *val = rte_le_to_cpu_32(s.chip_temp); + break; + case NFP_SENSOR_ASSEMBLY_POWER: + *val = rte_le_to_cpu_32(s.assembly_power); + break; + case NFP_SENSOR_ASSEMBLY_12V_POWER: + *val = rte_le_to_cpu_32(s.assembly_12v_power); + break; + case NFP_SENSOR_ASSEMBLY_3V3_POWER: + *val = rte_le_to_cpu_32(s.assembly_3v3_power); + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 000000000..67946891a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp6000/nfp6000.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(union eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) +#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) + +#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) + +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) +#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7) + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_5000 5000 +#define SPEED_10000 10000 +#define SPEED_14000 14000 +#define SPEED_20000 20000 +#define SPEED_25000 25000 +#define SPEED_40000 40000 +#define SPEED_50000 50000 +#define SPEED_56000 56000 +#define SPEED_100000 100000 + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +union eth_table_entry { + struct { + uint64_t port; + uint64_t state; + uint8_t mac_addr[6]; + uint8_t resv[2]; + uint64_t control; + }; + uint64_t raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, +}; + +static unsigned int +nfp_eth_rate2speed(enum nfp_eth_rate rate) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int +nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; + + return RATE_INVALID; +} + +static void +nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +{ + int i; + + for (i = 0; i < (int)ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) +{ + unsigned int rate; + unsigned int fec; + uint64_t port, state; + + port = rte_le_to_cpu_64(src->port); + state = rte_le_to_cpu_64(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 22) + return; + + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port); + dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); + dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; + if (dst->fec_modes_supported) + dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; + + dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); +} + +static void +nfp_eth_calc_port_geometry(struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) { + table->max_index = RTE_MAX(table->max_index, + table->ports[i].index); + + for (j = 0; j < table->count; j++) { + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + table->ports[i].port_lanes += table->ports[j].lanes; + + if (i == j) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + printf("Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = 1; + } + } +} + +static void +nfp_eth_calc_port_type(struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; +} + +static struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries; + struct nfp_eth_table *table; + uint32_t table_sz; + int i, j, ret, cnt = 0; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. For + * those that give a port count, verify it against the value calculated + * above. + */ + if (ret && ret != cnt) { + printf("table entry count (%d) unmatch entries present (%d)\n", + ret, cnt); + goto err; + } + + table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt; + table = malloc(table_sz); + if (!table) + goto err; + + memset(table, 0, table_sz); + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(nsp, &entries[i], i, + &table->ports[j++]); + + nfp_eth_calc_port_geometry(table); + for (i = 0; i < (int)table->count; i++) + nfp_eth_calc_port_type(&table->ports[i]); + + free(entries); + + return table; + +err: + free(entries); + return NULL; +} + +/* + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table * +nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return NULL; + + ret = __nfp_eth_read_ports(nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_nsp * +nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + int ret; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + nsp = nfp_nsp_open(cpp); + if (!nsp) { + free(entries); + return nsp; + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + printf("trying to set port state on disabled port %d\n", idx); + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + free(entries); + return NULL; +} + +void +nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, 0); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + free(entries); +} + +/* + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/* + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -1; + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + /* + * Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +static int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const uint64_t mask, const unsigned int shift, + unsigned int val, const uint64_t ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + uint64_t reg; + + /* + * Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + printf("set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]); + if (val == (reg & mask) >> shift) + return 0; + + reg &= ~mask; + reg |= (val << shift) & mask; + entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg); + + entries[idx].control |= rte_cpu_to_le_64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, 1); + + return 0; +} + +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + (__extension__ ({ \ + typeof(mask) _x = (mask); \ + nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \ + val, ctrl_bit); \ + })) + +/* + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/* + * __nfp_eth_set_fec() - set PHY forward error correction control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired fec mode + * + * Set the PHY module forward error correction mode. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +static int +__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_FEC, mode, + NSP_ETH_CTRL_SET_FEC); +} + +/* + * nfp_eth_set_fec() - set PHY forward error correction control mode + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @mode: Desired fec mode + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + err = __nfp_eth_set_fec(nsp, mode); + if (err) { + nfp_eth_config_cleanup_end(nsp); + return err; + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + printf("could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/* + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c new file mode 100644 index 000000000..dd41fa4de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_crc.h" + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/* + * struct nfp_resource_entry - Resource table entry + * @owner: NFP CPP Lock, interface owner + * @key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @name: ASCII, zero padded name + * @reserved + * @cpp_action: CPP Action + * @cpp_token: CPP Token + * @cpp_target: CPP Target ID + * @page_offset: 256-byte page offset into target's CPP address + * @page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + uint32_t owner; + uint32_t key; + } mutex; + struct nfp_resource_entry_region { + uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ]; + uint8_t reserved[5]; + uint8_t cpp_action; + uint8_t cpp_token; + uint8_t cpp_target; + uint32_t page_offset; + uint32_t page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + uint32_t cpp_id; + uint64_t addr; + uint64_t size; + struct nfp_cpp_mutex *mutex; +}; + +static int +nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2]; + struct nfp_resource_entry entry; + uint32_t cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + memset(name_pad, 0, sizeof(name_pad)); + strlcpy(name_pad, res->name, sizeof(name_pad)); + + /* Search for a matching entry */ + if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + printf("Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + uint64_t addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = ((uint64_t)entry.region.page_offset) << 8; + res->size = (uint64_t)entry.region.page_size << 8; + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/* + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + struct timespec wait; + int count; + + res = malloc(sizeof(*res)); + if (!res) + return NULL; + + memset(res, 0, sizeof(*res)); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + free(res); + return NULL; + } + + wait.tv_sec = 0; + wait.tv_nsec = 1000000; + count = 0; + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + if (count++ > 1000) { + printf("Error: resource %s timed out\n", name); + err = -EBUSY; + goto err_free; + } + + nanosleep(&wait, NULL); + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + free(res); + return NULL; +} + +/* + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void +nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + free(res); +} + +/* + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +uint32_t +nfp_resource_cpp_id(const struct nfp_resource *res) +{ + return res->cpp_id; +} + +/* + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char +*nfp_resource_name(const struct nfp_resource *res) +{ + return res->name; +} + +/* + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +uint64_t +nfp_resource_address(const struct nfp_resource *res) +{ + return res->addr; +} + +/* + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +uint64_t +nfp_resource_size(const struct nfp_resource *res) +{ + return res->size; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h new file mode 100644 index 000000000..06cc6f74f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_resource.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_RESOURCE_H +#define NFP_RESOURCE_H + +#include "nfp_cpp.h" + +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" +#define NFP_RESOURCE_NSP "nfp.sp" + +/** + * Opaque handle to a NFP Resource + */ +struct nfp_resource; + +struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp, + const char *name); + +/** + * Release a NFP Resource, and free the handle + * @param[in] res NFP Resource handle + */ +void nfp_resource_release(struct nfp_resource *res); + +/** + * Return the CPP ID of a NFP Resource + * @param[in] res NFP Resource handle + * @return CPP ID of the NFP Resource + */ +uint32_t nfp_resource_cpp_id(const struct nfp_resource *res); + +/** + * Return the name of a NFP Resource + * @param[in] res NFP Resource handle + * @return Name of the NFP Resource + */ +const char *nfp_resource_name(const struct nfp_resource *res); + +/** + * Return the target address of a NFP Resource + * @param[in] res NFP Resource handle + * @return Address of the NFP Resource + */ +uint64_t nfp_resource_address(const struct nfp_resource *res); + +uint64_t nfp_resource_size(const struct nfp_resource *res); + +#endif /* NFP_RESOURCE_H */ diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 000000000..cb7d83db5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_rtsym.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + uint8_t type; + uint8_t target; + uint8_t island; + uint8_t addr_hi; + uint32_t addr_lo; + uint16_t name; + uint8_t menum; + uint8_t size_hi; + uint32_t size_lo; +}; + +struct nfp_rtsym_table { + struct nfp_cpp *cpp; + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int +nfp_meid(uint8_t island_id, uint8_t menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size; + sw->addr = ((uint64_t)fw->addr_hi << 32) | + rte_le_to_cpu_32(fw->addr_lo); + sw->size = ((uint64_t)fw->size_hi << 32) | + rte_le_to_cpu_32(fw->size_lo); + +#ifdef DEBUG + printf("rtsym_entry_init\n"); + printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n", + sw->name, sw->addr, sw->size, sw->target); +#endif + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +struct nfp_rtsym_table * +nfp_rtsym_table_read(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_table *rtbl; + struct nfp_mip *mip; + + mip = nfp_mip_open(cpp); + rtbl = __nfp_rtsym_table_read(cpp, mip); + nfp_mip_close(mip); + + return rtbl; +} + +/* + * This looks more complex than it should be. But we need to get the type for + * the ~ right in round_down (it needs to be as wide as the result!), and we + * want to evaluate the macro arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) + +#define round_up(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((((_x) - 1) | __round_mask(_x, y)) + 1); \ + })) + +#define round_down(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((_x) & ~__round_mask(_x, y)); \ + })) + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) +{ + uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_table *cache; + const uint32_t dram = + NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + int err, n, size; + + if (!mip) + return NULL; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return NULL; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = malloc(symtab_size); + if (!rtsymtab) + return NULL; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = malloc(size); + if (!cache) + goto exit_free_rtsym_raw; + + cache->cpp = cpp; + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != (int)symtab_size) + goto exit_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != (int)strtab_size) + goto exit_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + free(rtsymtab); + + return cache; + +exit_free_cache: + free(cache); +exit_free_rtsym_raw: + free(rtsymtab); + return NULL; +} + +/* + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @rtbl: NFP RTsym table + * + * Return: Number of RTSYM descriptors + */ +int +nfp_rtsym_count(struct nfp_rtsym_table *rtbl) +{ + if (!rtbl) + return -EINVAL; + + return rtbl->num; +} + +/* + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @rtbl: NFP RTsym table + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) +{ + if (!rtbl) + return NULL; + + if (idx >= rtbl->num) + return NULL; + + return &rtbl->symtab[idx]; +} + +/* + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @rtbl: NFP RTsym table + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +{ + int n; + + if (!rtbl) + return NULL; + + for (n = 0; n < rtbl->num; n++) + if (strcmp(name, rtbl->symtab[n].name) == 0) + return &rtbl->symtab[n]; + + return NULL; +} + +/* + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +uint64_t +nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) +{ + const struct nfp_rtsym *sym; + uint32_t val32, id; + uint64_t val; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + err = -ENOENT; + goto exit; + } + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + +#ifdef DEBUG + printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n", + name, sym->size, sym->addr); +#endif + switch (sym->size) { + case 4: + err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + val = val32; + break; + case 8: + err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + break; + default: + printf("rtsym '%s' unsupported size: %" PRId64 "\n", + name, sym->size); + err = -EINVAL; + break; + } + + if (err) + err = -EIO; +exit: + if (error) + *error = err; + + if (err) + return ~0ULL; + + return val; +} + +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area) +{ + const struct nfp_rtsym *sym; + uint8_t *mem; + +#ifdef DEBUG + printf("mapping symbol %s\n", name); +#endif + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + printf("symbol lookup fails for %s\n", name); + return NULL; + } + + if (sym->size < min_size) { + printf("Symbol %s too small (%" PRIu64 " < %u)\n", name, + sym->size, min_size); + return NULL; + } + + mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr, + sym->size, area); + if (!mem) { + printf("Failed to map symbol %s\n", name); + return NULL; + } +#ifdef DEBUG + printf("symbol %s with address %p\n", name, mem); +#endif + + return mem; +} diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h new file mode 100644 index 000000000..8b494211b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RTSYM_H__ +#define __NFP_RTSYM_H__ + +#define NFP_RTSYM_TYPE_NONE 0 +#define NFP_RTSYM_TYPE_OBJECT 1 +#define NFP_RTSYM_TYPE_FUNCTION 2 +#define NFP_RTSYM_TYPE_ABS 3 + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/* + * Structure describing a run-time NFP symbol. + * + * The memory target of the symbol is generally the CPP target number and can be + * used directly by the nfp_cpp API calls. However, in some cases (i.e., for + * local memory or control store) the target is encoded using a negative number. + * + * When the target type can not be used to fully describe the location of a + * symbol the domain field is used to further specify the location (i.e., the + * specific ME or island number). + * + * For ME target resources, 'domain' is an MEID. + * For Island target resources, 'domain' is an island ID, with the one exception + * of "sram" symbols for backward compatibility, which are viewed as global. + */ +struct nfp_rtsym { + const char *name; + uint64_t addr; + uint64_t size; + int type; + int target; + int domain; +}; + +struct nfp_rtsym_table; + +struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp); + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip); + +int nfp_rtsym_count(struct nfp_rtsym_table *rtbl); + +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); + +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); + +uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, + int *error); +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area); +#endif diff --git a/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h new file mode 100644 index 000000000..2884a0034 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/nfpcore/nfp_target.h @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_TARGET_H +#define NFP_TARGET_H + +#include "nfp-common/nfp_resid.h" +#include "nfp-common/nfp_cppat.h" +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +#define P32 1 +#define P64 2 + +#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0)) + +#ifndef NFP_ERRNO +#include +#define NFP_ERRNO(x) (errno = (x), -1) +#endif + +static inline int +pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return NFP_ERRNO(EINVAL); + return (2 << pp); +} + +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int +target_rw(uint32_t cpp_id, int pp, int start, int len) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < start || island > (start + len))) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): + return PUSHPULL(0, pp); + case NFP_CPP_ID(0, 1, 0): + return PUSHPULL(pp, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(pp, pp); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_dma(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_stats(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_tm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_ppc(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi(uint32_t cpp_id, uint64_t address) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + uint64_t rel_addr = address & 0x3fFFFF; + + if (island && (island < 8 || island > 9)) + return NFP_ERRNO(EINVAL); + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* + * This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static inline int +nfp6000_mu_common(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, 0, 0): /* read_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 1): /* read_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 2): /* read_swap_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 3): /* read_swap_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* write_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 1): /* write_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 2): /* write_swap_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 3): /* write_swap_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 3, 0): /* atomic_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* atomic_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */ + return PUSHPULL(0, 0); + case NFP_CPP_ID(0, 4, 3): /* swap_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 5, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 5, 3): /* test_set_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 6, 0): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 7, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 7, 3): /* test_add_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 8, 0): /* addsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 0): /* microq128_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 1): /* microq128_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 2): /* microq128_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 0): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 0): /* read32_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 1): /* read32_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 31, 0): /* write32_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 1): /* write32_le */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */ + return PUSHPULL(P32, 0); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_mu_ctm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_emu(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 18, 0): /* read_queue */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 2): /* write_queue */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 20, 2): /* journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 21, 0): /* get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 1): /* get_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 2): /* get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 0): /* pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 1): /* pop_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 2): /* pop_freely */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_imu(uint32_t cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static inline int +nfp6000_mu(uint32_t cpp_id, uint64_t address) +{ + int pp; + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island == 0) { + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + } else if (island >= 24 && island <= 27) { + pp = nfp6000_mu_emu(cpp_id); + } else if (island >= 28 && island <= 31) { + pp = nfp6000_mu_imu(cpp_id); + } else if (island == 1 || + (island >= 4 && island <= 7) || + (island >= 12 && island <= 13) || + (island >= 32 && island <= 47) || + (island >= 48 && island <= 51)) { + pp = nfp6000_mu_ctm(cpp_id); + } else { + pp = NFP_ERRNO(EINVAL); + } + + return pp; +} + +static inline int +nfp6000_ila(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 48 || island > 51)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* read_check_error */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 0): /* read_int */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* write_int */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static inline int +nfp6000_pci(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 4 || island > 7)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static inline int +nfp6000_crypto(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 12 || island > 15)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static inline int +nfp6000_cap_xpb(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* RingGet */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 1): /* RingPut */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 2): /* CTNNWr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): + return PUSHPULL(P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static inline int +nfp6000_cls(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 3): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 1): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 1): /* add64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 1): /* sub64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 2): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 2): /* hash_mask */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* hash_clear */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 0): /* ring_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 1): /* ring_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* ring_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 2): /* ring_journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 17, 2): /* statistic */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 24, 0): /* ring_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 24, 1): /* ring_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +static inline int +nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP6000_CPPTGT_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP6000_CPPTGT_VQDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP6000_CPPTGT_ILA: + return nfp6000_ila(cpp_id); + case NFP6000_CPPTGT_MU: + return nfp6000_mu(cpp_id, address); + case NFP6000_CPPTGT_PCIE: + return nfp6000_pci(cpp_id); + case NFP6000_CPPTGT_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP6000_CPPTGT_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP6000_CPPTGT_CTXPB: + return nfp6000_cap_xpb(cpp_id); + case NFP6000_CPPTGT_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_pushpull_width(int pp, int write_not_read) +{ + if (pp < 0) + return pp; + + if (write_not_read) + return PULL_WIDTH(pp); + else + return PUSH_WIDTH(pp); +} + +static inline int +nfp6000_target_action_width(uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + int pp; + + pp = nfp6000_target_pushpull(cpp_id, address); + + return nfp_target_pushpull_width(pp, write_not_read); +} + +static inline int +nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + if (NFP_CPP_MODEL_IS_6000(model)) { + return nfp6000_target_action_width(cpp_id, address, + write_not_read); + } else { + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, + uint32_t *cpp_target_id, uint64_t *cpp_target_address, + const uint32_t *imb_table) +{ + int err; + int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + uint32_t imb; + + if (target < 0 || target >= 16) + return NFP_ERRNO(EINVAL); + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + if (!imb_table) { + /* CPP + Island only allowed on systems with IMB tables */ + return NFP_ERRNO(EINVAL); + } + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), + ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), + ((imb >> 0) & 0x3f)); + if (err == 0) { + *cpp_target_id = + NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + } + + return err; +} + +#endif /* NFP_TARGET_H */ diff --git a/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map b/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/nfp/rte_pmd_nfp_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/null/Makefile b/src/spdk/dpdk/drivers/net/null/Makefile new file mode 100644 index 000000000..f51150c13 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/null/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015 IGEL Co.,Ltd. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_null.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_null_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/null/meson.build b/src/spdk/dpdk/drivers/net/null/meson.build new file mode 100644 index 000000000..68ac0d2ae --- /dev/null +++ b/src/spdk/dpdk/drivers/net/null/meson.build @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = files('rte_eth_null.c') diff --git a/src/spdk/dpdk/drivers/net/null/rte_eth_null.c b/src/spdk/dpdk/drivers/net/null/rte_eth_null.c new file mode 100644 index 000000000..11258ccea --- /dev/null +++ b/src/spdk/dpdk/drivers/net/null/rte_eth_null.c @@ -0,0 +1,738 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) IGEL Co.,Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ETH_NULL_PACKET_SIZE_ARG "size" +#define ETH_NULL_PACKET_COPY_ARG "copy" +#define ETH_NULL_PACKET_NO_RX_ARG "no-rx" + +static unsigned int default_packet_size = 64; +static unsigned int default_packet_copy; +static unsigned int default_no_rx; + +static const char *valid_arguments[] = { + ETH_NULL_PACKET_SIZE_ARG, + ETH_NULL_PACKET_COPY_ARG, + ETH_NULL_PACKET_NO_RX_ARG, + NULL +}; + +struct pmd_internals; + +struct null_queue { + struct pmd_internals *internals; + + struct rte_mempool *mb_pool; + struct rte_mbuf *dummy_packet; + + rte_atomic64_t rx_pkts; + rte_atomic64_t tx_pkts; +}; + +struct pmd_options { + unsigned int packet_copy; + unsigned int packet_size; + unsigned int no_rx; +}; + +struct pmd_internals { + unsigned int packet_size; + unsigned int packet_copy; + unsigned int no_rx; + uint16_t port_id; + + struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + + struct rte_ether_addr eth_addr; + /** Bit mask of RSS offloads, the bit offset also means flow type */ + uint64_t flow_type_rss_offloads; + + rte_spinlock_t rss_lock; + + uint16_t reta_size; + struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 / + RTE_RETA_GROUP_SIZE]; + + uint8_t rss_key[40]; /**< 40-byte hash key. */ +}; +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_FIXED, +}; + +static int eth_null_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_null_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +static uint16_t +eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned int packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + + for (i = 0; i < nb_bufs; i++) { + bufs[i]->data_len = (uint16_t)packet_size; + bufs[i]->pkt_len = packet_size; + bufs[i]->port = h->internals->port_id; + } + + rte_atomic64_add(&(h->rx_pkts), i); + + return i; +} + +static uint16_t +eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned int packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + + for (i = 0; i < nb_bufs; i++) { + rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet, + packet_size); + bufs[i]->data_len = (uint16_t)packet_size; + bufs[i]->pkt_len = packet_size; + bufs[i]->port = h->internals->port_id; + } + + rte_atomic64_add(&(h->rx_pkts), i); + + return i; +} + +static uint16_t +eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused, + uint16_t nb_bufs __rte_unused) +{ + return 0; +} + +static uint16_t +eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + for (i = 0; i < nb_bufs; i++) + rte_pktmbuf_free(bufs[i]); + + rte_atomic64_add(&(h->tx_pkts), i); + + return i; +} + +static uint16_t +eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned int packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + for (i = 0; i < nb_bufs; i++) { + rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *), + packet_size); + rte_pktmbuf_free(bufs[i]); + } + + rte_atomic64_add(&(h->tx_pkts), i); + + return i; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + if (dev == NULL) + return -EINVAL; + + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + if (dev == NULL) + return; + + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct rte_mbuf *dummy_packet; + struct pmd_internals *internals; + unsigned int packet_size; + + if ((dev == NULL) || (mb_pool == NULL)) + return -EINVAL; + + internals = dev->data->dev_private; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -ENODEV; + + packet_size = internals->packet_size; + + internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool; + dev->data->rx_queues[rx_queue_id] = + &internals->rx_null_queues[rx_queue_id]; + dummy_packet = rte_zmalloc_socket(NULL, + packet_size, 0, dev->data->numa_node); + if (dummy_packet == NULL) + return -ENOMEM; + + internals->rx_null_queues[rx_queue_id].internals = internals; + internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet; + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct rte_mbuf *dummy_packet; + struct pmd_internals *internals; + unsigned int packet_size; + + if (dev == NULL) + return -EINVAL; + + internals = dev->data->dev_private; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -ENODEV; + + packet_size = internals->packet_size; + + dev->data->tx_queues[tx_queue_id] = + &internals->tx_null_queues[tx_queue_id]; + dummy_packet = rte_zmalloc_socket(NULL, + packet_size, 0, dev->data->numa_node); + if (dummy_packet == NULL) + return -ENOMEM; + + internals->tx_null_queues[tx_queue_id].internals = internals; + internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet; + + return 0; +} + +static int +eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused) +{ + return 0; +} + +static int +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals; + + if ((dev == NULL) || (dev_info == NULL)) + return -EINVAL; + + internals = dev->data->dev_private; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues); + dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues); + dev_info->min_rx_bufsize = 0; + dev_info->reta_size = internals->reta_size; + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + + return 0; +} + +static int +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) +{ + unsigned int i, num_stats; + unsigned long rx_total = 0, tx_total = 0; + const struct pmd_internals *internal; + + if ((dev == NULL) || (igb_stats == NULL)) + return -EINVAL; + + internal = dev->data->dev_private; + num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, + RTE_MIN(dev->data->nb_rx_queues, + RTE_DIM(internal->rx_null_queues))); + for (i = 0; i < num_stats; i++) { + igb_stats->q_ipackets[i] = + internal->rx_null_queues[i].rx_pkts.cnt; + rx_total += igb_stats->q_ipackets[i]; + } + + num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, + RTE_MIN(dev->data->nb_tx_queues, + RTE_DIM(internal->tx_null_queues))); + for (i = 0; i < num_stats; i++) { + igb_stats->q_opackets[i] = + internal->tx_null_queues[i].tx_pkts.cnt; + tx_total += igb_stats->q_opackets[i]; + } + + igb_stats->ipackets = rx_total; + igb_stats->opackets = tx_total; + + return 0; +} + +static int +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internal; + + if (dev == NULL) + return -EINVAL; + + internal = dev->data->dev_private; + for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++) + internal->rx_null_queues[i].rx_pkts.cnt = 0; + for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) + internal->tx_null_queues[i].tx_pkts.cnt = 0; + + return 0; +} + +static void +eth_queue_release(void *q) +{ + struct null_queue *nq; + + if (q == NULL) + return; + + nq = q; + rte_free(nq->dummy_packet); +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) { return 0; } + +static int +eth_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct pmd_internals *internal = dev->data->dev_private; + + if (reta_size != internal->reta_size) + return -EINVAL; + + rte_spinlock_lock(&internal->rss_lock); + + /* Copy RETA table */ + for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) { + internal->reta_conf[i].mask = reta_conf[i].mask; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + internal->reta_conf[i].reta[j] = reta_conf[i].reta[j]; + } + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct pmd_internals *internal = dev->data->dev_private; + + if (reta_size != internal->reta_size) + return -EINVAL; + + rte_spinlock_lock(&internal->rss_lock); + + /* Copy RETA table */ + for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = internal->reta_conf[i].reta[j]; + } + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) +{ + struct pmd_internals *internal = dev->data->dev_private; + + rte_spinlock_lock(&internal->rss_lock); + + if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + rss_conf->rss_hf & internal->flow_type_rss_offloads; + + if (rss_conf->rss_key) + rte_memcpy(internal->rss_key, rss_conf->rss_key, 40); + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct pmd_internals *internal = dev->data->dev_private; + + rte_spinlock_lock(&internal->rss_lock); + + rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + if (rss_conf->rss_key) + rte_memcpy(rss_conf->rss_key, internal->rss_key, 40); + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_mac_address_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_ether_addr *addr) +{ + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .mtu_set = eth_mtu_set, + .link_update = eth_link_update, + .mac_addr_set = eth_mac_address_set, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, + .reta_update = eth_rss_reta_update, + .reta_query = eth_rss_reta_query, + .rss_hash_update = eth_rss_hash_update, + .rss_hash_conf_get = eth_rss_hash_conf_get +}; + +static int +eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args) +{ + const unsigned int nb_rx_queues = 1; + const unsigned int nb_tx_queues = 1; + struct rte_eth_dev_data *data; + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + + static const uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, + 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B, + 0xBE, 0xAC, 0x01, 0xFA + }; + + if (dev->device.numa_node == SOCKET_ID_ANY) + dev->device.numa_node = rte_socket_id(); + + PMD_LOG(INFO, "Creating null ethdev on numa socket %u", + dev->device.numa_node); + + eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals)); + if (!eth_dev) + return -ENOMEM; + + /* now put it all together + * - store queue data in internals, + * - store numa_node info in ethdev data + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + /* NOTE: we'll replace the data element, of originally allocated eth_dev + * so the nulls are local per-process */ + + internals = eth_dev->data->dev_private; + internals->packet_size = args->packet_size; + internals->packet_copy = args->packet_copy; + internals->no_rx = args->no_rx; + internals->port_id = eth_dev->data->port_id; + rte_eth_random_addr(internals->eth_addr.addr_bytes); + + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; + + rte_memcpy(internals->rss_key, default_rss_key, 40); + + data = eth_dev->data; + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = &internals->eth_addr; + data->promiscuous = 1; + data->all_multicast = 1; + + eth_dev->dev_ops = &ops; + + /* finally assign rx and tx ops */ + if (internals->packet_copy) { + eth_dev->rx_pkt_burst = eth_null_copy_rx; + eth_dev->tx_pkt_burst = eth_null_copy_tx; + } else if (internals->no_rx) { + eth_dev->rx_pkt_burst = eth_null_no_rx; + eth_dev->tx_pkt_burst = eth_null_tx; + } else { + eth_dev->rx_pkt_burst = eth_null_rx; + eth_dev->tx_pkt_burst = eth_null_tx; + } + + rte_eth_dev_probing_finish(eth_dev); + return 0; +} + +static inline int +get_packet_size_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + const char *a = value; + unsigned int *packet_size = extra_args; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + + *packet_size = (unsigned int)strtoul(a, NULL, 0); + if (*packet_size == UINT_MAX) + return -1; + + return 0; +} + +static inline int +get_packet_copy_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + const char *a = value; + unsigned int *packet_copy = extra_args; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + + *packet_copy = (unsigned int)strtoul(a, NULL, 0); + if (*packet_copy == UINT_MAX) + return -1; + + return 0; +} + +static int +get_packet_no_rx_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + const char *a = value; + unsigned int no_rx; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + no_rx = (unsigned int)strtoul(a, NULL, 0); + if (no_rx != 0 && no_rx != 1) + return -1; + + *(unsigned int *)extra_args = no_rx; + return 0; +} + +static int +rte_pmd_null_probe(struct rte_vdev_device *dev) +{ + const char *name, *params; + struct pmd_options args = { + .packet_copy = default_packet_copy, + .packet_size = default_packet_size, + .no_rx = default_no_rx, + }; + struct rte_kvargs *kvlist = NULL; + struct rte_eth_dev *eth_dev; + int ret; + + if (!dev) + return -EINVAL; + + name = rte_vdev_device_name(dev); + params = rte_vdev_device_args(dev); + PMD_LOG(INFO, "Initializing pmd_null for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_internals *internals; + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + internals = eth_dev->data->dev_private; + if (internals->packet_copy) { + eth_dev->rx_pkt_burst = eth_null_copy_rx; + eth_dev->tx_pkt_burst = eth_null_copy_tx; + } else if (internals->no_rx) { + eth_dev->rx_pkt_burst = eth_null_no_rx; + eth_dev->tx_pkt_burst = eth_null_tx; + } else { + eth_dev->rx_pkt_burst = eth_null_rx; + eth_dev->tx_pkt_burst = eth_null_tx; + } + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + if (params != NULL) { + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + ETH_NULL_PACKET_SIZE_ARG, + &get_packet_size_arg, &args.packet_size); + if (ret < 0) + goto free_kvlist; + + + ret = rte_kvargs_process(kvlist, + ETH_NULL_PACKET_COPY_ARG, + &get_packet_copy_arg, &args.packet_copy); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + ETH_NULL_PACKET_NO_RX_ARG, + &get_packet_no_rx_arg, &args.no_rx); + if (ret < 0) + goto free_kvlist; + + if (args.no_rx && args.packet_copy) { + PMD_LOG(ERR, + "Both %s and %s arguments at the same time not supported", + ETH_NULL_PACKET_COPY_ARG, + ETH_NULL_PACKET_NO_RX_ARG); + goto free_kvlist; + } + } + + PMD_LOG(INFO, "Configure pmd_null: packet size is %d, " + "packet copy is %s", args.packet_size, + args.packet_copy ? "enabled" : "disabled"); + + ret = eth_dev_null_create(dev, &args); + +free_kvlist: + if (kvlist) + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_null_remove(struct rte_vdev_device *dev) +{ + struct rte_eth_dev *eth_dev = NULL; + + if (!dev) + return -EINVAL; + + PMD_LOG(INFO, "Closing null ethdev on numa socket %u", + rte_socket_id()); + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); + if (eth_dev == NULL) + return -1; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_vdev_driver pmd_null_drv = { + .probe = rte_pmd_null_probe, + .remove = rte_pmd_null_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv); +RTE_PMD_REGISTER_ALIAS(net_null, eth_null); +RTE_PMD_REGISTER_PARAM_STRING(net_null, + "size= " + "copy= " + ETH_NULL_PACKET_NO_RX_ARG "=0|1"); + +RTE_INIT(eth_null_init_log) +{ + eth_null_logtype = rte_log_register("pmd.net.null"); + if (eth_null_logtype >= 0) + rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map b/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/null/rte_pmd_null_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/octeontx/Makefile b/src/spdk/dpdk/drivers/net/octeontx/Makefile new file mode 100644 index 000000000..c4db87800 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_octeontx.a + +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ +CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/ + +EXPORT_MAP := rte_pmd_octeontx_version.map + +OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkovf.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkivf.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_bgx.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev_ops.c + +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_octeontx_rxtx.o += -fno-prefetch-loop-arrays + +ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) +CFLAGS_octeontx_rxtx.o += -O3 -Ofast +else +CFLAGS_octeontx_rxtx.o += -O3 -ffast-math +endif + +else +CFLAGS_octeontx_rxtx.o += -O3 -Ofast +endif + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx +LDLIBS += -lrte_mempool_octeontx +LDLIBS += -lrte_eventdev +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_bus_vdev + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/meson.build b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build new file mode 100644 index 000000000..b8fe4b301 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/meson.build @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc + +sources = [ + 'octeontx_pkovf.c', + 'octeontx_pkivf.c', + 'octeontx_bgx.c' +] + +depends = ['ethdev', 'mempool_octeontx'] +static_objs = [] +foreach d: depends + if not is_variable('shared_rte_' + d) + subdir_done() + endif + static_objs += get_variable('static_rte_' + d) +endforeach + +c_args = cflags +base_lib = static_library('octeontx_base', sources, + c_args: c_args, + dependencies: static_objs, +) + +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c new file mode 100644 index 000000000..ac856ff86 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.c @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include + +#include "octeontx_bgx.h" + +int +octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf) +{ + struct octeontx_mbox_hdr hdr; + octeontx_mbox_bgx_port_conf_t bgx_conf; + int len = sizeof(octeontx_mbox_bgx_port_conf_t); + int res; + + memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t)); + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_OPEN; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + if (res < 0) + return -EACCES; + + conf->enable = bgx_conf.enable; + conf->promisc = bgx_conf.promisc; + conf->bpen = bgx_conf.bpen; + conf->node = bgx_conf.node; + conf->base_chan = bgx_conf.base_chan; + conf->num_chans = bgx_conf.num_chans; + conf->mtu = bgx_conf.mtu; + conf->bgx = bgx_conf.bgx; + conf->lmac = bgx_conf.lmac; + conf->mode = bgx_conf.mode; + conf->pkind = bgx_conf.pkind; + memcpy(conf->macaddr, bgx_conf.macaddr, 6); + + return res; +} + +int +octeontx_bgx_port_close(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_CLOSE; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_start(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_START; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_stop(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_STOP; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf) +{ + struct octeontx_mbox_hdr hdr; + octeontx_mbox_bgx_port_conf_t bgx_conf; + int len = sizeof(octeontx_mbox_bgx_port_conf_t); + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_CONFIG; + hdr.vfid = port; + + memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t)); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + if (res < 0) + return -EACCES; + + conf->enable = bgx_conf.enable; + conf->promisc = bgx_conf.promisc; + conf->bpen = bgx_conf.bpen; + conf->node = bgx_conf.node; + conf->base_chan = bgx_conf.base_chan; + conf->num_chans = bgx_conf.num_chans; + conf->mtu = bgx_conf.mtu; + conf->bgx = bgx_conf.bgx; + conf->lmac = bgx_conf.lmac; + conf->mode = bgx_conf.mode; + conf->pkind = bgx_conf.pkind; + memcpy(conf->macaddr, bgx_conf.macaddr, 6); + + return res; +} + +int +octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat) +{ + struct octeontx_mbox_hdr hdr; + octeontx_mbox_bgx_port_status_t bgx_stat; + int len = sizeof(octeontx_mbox_bgx_port_status_t); + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_STATUS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len); + if (res < 0) + return -EACCES; + + stat->link_up = bgx_stat.link_up; + + return res; +} + +int +octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats) +{ + struct octeontx_mbox_hdr hdr; + octeontx_mbox_bgx_port_stats_t bgx_stats; + int len = sizeof(octeontx_mbox_bgx_port_stats_t); + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_STATS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len); + if (res < 0) + return -EACCES; + + stats->rx_packets = bgx_stats.rx_packets; + stats->rx_bytes = bgx_stats.rx_bytes; + stats->rx_dropped = bgx_stats.rx_dropped; + stats->rx_errors = bgx_stats.rx_errors; + stats->tx_packets = bgx_stats.tx_packets; + stats->tx_bytes = bgx_stats.tx_bytes; + stats->tx_dropped = bgx_stats.tx_dropped; + stats->tx_errors = bgx_stats.tx_errors; + return res; +} + +int +octeontx_bgx_port_stats_clr(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_CLR_STATS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_link_status(int port) +{ + struct octeontx_mbox_hdr hdr; + uint8_t link; + int len = sizeof(uint8_t); + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, &link, len); + if (res < 0) + return -EACCES; + + return link; +} + +int +octeontx_bgx_port_set_link_state(int port, bool enable) +{ + struct octeontx_mbox_hdr hdr; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_SET_LINK_STATE; + hdr.vfid = port; + + return octeontx_mbox_send(&hdr, &enable, sizeof(bool), NULL, 0); +} + +int +octeontx_bgx_port_promisc_set(int port, int en) +{ + struct octeontx_mbox_hdr hdr; + uint8_t prom; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_SET_PROMISC; + hdr.vfid = port; + prom = en ? 1 : 0; + + res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_mtu_set(int port, int mtu) +{ + struct octeontx_mbox_hdr hdr; + int res; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_SET_MTU; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &mtu, sizeof(mtu), NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr) +{ + struct octeontx_mbox_hdr hdr; + int len = 6; + int res = 0; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_SET_MACADDR; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_mac_add(int port, uint8_t *mac_addr, int index) +{ + struct octeontx_mbox_bgx_port_mac_filter filter; + struct octeontx_mbox_hdr hdr; + int len = 6; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_ADD_MACADDR; + hdr.vfid = port; + + memcpy(filter.mac_addr, mac_addr, len); + filter.index = index; + len = sizeof(struct octeontx_mbox_bgx_port_mac_filter); + + return octeontx_mbox_send(&hdr, &filter, len, NULL, 0); +} + +int +octeontx_bgx_port_mac_del(int port, uint32_t index) +{ + struct octeontx_mbox_hdr hdr; + int len = sizeof(uint32_t); + int res = 0; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_DEL_MACADDR; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &index, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_bgx_port_mac_entries_get(int port) +{ + struct octeontx_mbox_hdr hdr; + int resp = 6; + int res = 0; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_MACADDR_ENTRIES; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, NULL, 0, &resp, sizeof(int)); + if (res < 0) + return -EACCES; + + return resp; +} + +int octeontx_bgx_port_get_fifo_cfg(int port, + octeontx_mbox_bgx_port_fifo_cfg_t *cfg) +{ + int len = sizeof(octeontx_mbox_bgx_port_fifo_cfg_t); + octeontx_mbox_bgx_port_fifo_cfg_t conf; + struct octeontx_mbox_hdr hdr; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_GET_FIFO_CFG; + hdr.vfid = port; + + if (octeontx_mbox_send(&hdr, NULL, 0, &conf, len) < 0) + return -EACCES; + + cfg->rx_fifosz = conf.rx_fifosz; + + return 0; +} + +int octeontx_bgx_port_flow_ctrl_cfg(int port, + octeontx_mbox_bgx_port_fc_cfg_t *cfg) +{ + int len = sizeof(octeontx_mbox_bgx_port_fc_cfg_t); + octeontx_mbox_bgx_port_fc_cfg_t conf; + struct octeontx_mbox_hdr hdr; + + hdr.coproc = OCTEONTX_BGX_COPROC; + hdr.msg = MBOX_BGX_PORT_FLOW_CTRL_CFG; + hdr.vfid = port; + + if (cfg->fc_cfg == BGX_PORT_FC_CFG_SET) + memcpy(&conf, cfg, len); + else + memset(&conf, 0, len); + + if (octeontx_mbox_send(&hdr, &conf, len, &conf, len) < 0) + return -EACCES; + + if (cfg->fc_cfg == BGX_PORT_FC_CFG_SET) + goto done; + + cfg->rx_pause = conf.rx_pause; + cfg->tx_pause = conf.tx_pause; + cfg->low_water = conf.low_water; + cfg->high_water = conf.high_water; + +done: + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h new file mode 100644 index 000000000..d126a0b7f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_bgx.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_BGX_H__ +#define __OCTEONTX_BGX_H__ + +#include +#include +#include + +#include + +#define OCTEONTX_BGX_RSVD_RX_FIFOBYTES 0x40 + +#define OCTEONTX_BGX_COPROC 6 + +/* BGX messages */ +#define MBOX_BGX_PORT_OPEN 0 +#define MBOX_BGX_PORT_CLOSE 1 +#define MBOX_BGX_PORT_START 2 +#define MBOX_BGX_PORT_STOP 3 +#define MBOX_BGX_PORT_GET_CONFIG 4 +#define MBOX_BGX_PORT_GET_STATUS 5 +#define MBOX_BGX_PORT_GET_STATS 6 +#define MBOX_BGX_PORT_CLR_STATS 7 +#define MBOX_BGX_PORT_GET_LINK_STATUS 8 +#define MBOX_BGX_PORT_SET_PROMISC 9 +#define MBOX_BGX_PORT_SET_MACADDR 10 +#define MBOX_BGX_PORT_SET_BP 11 +#define MBOX_BGX_PORT_SET_BCAST 12 +#define MBOX_BGX_PORT_SET_MCAST 13 +#define MBOX_BGX_PORT_SET_MTU 14 +#define MBOX_BGX_PORT_ADD_MACADDR 15 +#define MBOX_BGX_PORT_DEL_MACADDR 16 +#define MBOX_BGX_PORT_GET_MACADDR_ENTRIES 17 +#define MBOX_BGX_PORT_GET_FIFO_CFG 18 +#define MBOX_BGX_PORT_FLOW_CTRL_CFG 19 +#define MBOX_BGX_PORT_SET_LINK_STATE 20 + +/* BGX port configuration parameters: */ +typedef struct octeontx_mbox_bgx_port_conf { + uint8_t enable; + uint8_t promisc; + uint8_t bpen; + uint8_t macaddr[6]; /* MAC address.*/ + uint8_t fcs_strip; + uint8_t bcast_mode; + uint8_t mcast_mode; + uint8_t node; /* CPU node */ + uint16_t base_chan; + uint16_t num_chans; + uint16_t mtu; + uint8_t bgx; + uint8_t lmac; + uint8_t mode; + uint8_t pkind; +} octeontx_mbox_bgx_port_conf_t; + +/* BGX port status: */ +typedef struct octeontx_mbox_bgx_port_status { + uint8_t link_up; + uint8_t bp; + uint8_t duplex; + uint32_t speed; +} octeontx_mbox_bgx_port_status_t; + +/* BGX port statistics: */ +typedef struct octeontx_mbox_bgx_port_stats { + uint64_t rx_packets; + uint64_t tx_packets; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t rx_errors; + uint64_t tx_errors; + uint64_t rx_dropped; + uint64_t tx_dropped; + uint64_t multicast; + uint64_t collisions; + + uint64_t rx_length_errors; + uint64_t rx_over_errors; + uint64_t rx_crc_errors; + uint64_t rx_frame_errors; + uint64_t rx_fifo_errors; + uint64_t rx_missed_errors; + + /* Detailed transmit errors. */ + uint64_t tx_aborted_errors; + uint64_t tx_carrier_errors; + uint64_t tx_fifo_errors; + uint64_t tx_heartbeat_errors; + uint64_t tx_window_errors; + + /* Extended statistics based on RFC2819. */ + uint64_t rx_1_to_64_packets; + uint64_t rx_65_to_127_packets; + uint64_t rx_128_to_255_packets; + uint64_t rx_256_to_511_packets; + uint64_t rx_512_to_1023_packets; + uint64_t rx_1024_to_1522_packets; + uint64_t rx_1523_to_max_packets; + + uint64_t tx_1_to_64_packets; + uint64_t tx_65_to_127_packets; + uint64_t tx_128_to_255_packets; + uint64_t tx_256_to_511_packets; + uint64_t tx_512_to_1023_packets; + uint64_t tx_1024_to_1522_packets; + uint64_t tx_1523_to_max_packets; + + uint64_t tx_multicast_packets; + uint64_t rx_broadcast_packets; + uint64_t tx_broadcast_packets; + uint64_t rx_undersized_errors; + uint64_t rx_oversize_errors; + uint64_t rx_fragmented_errors; + uint64_t rx_jabber_errors; +} octeontx_mbox_bgx_port_stats_t; + +struct octeontx_mbox_bgx_port_mac_filter { + uint8_t mac_addr[6]; + int index; +}; + +/* BGX port fifo config: */ +typedef struct octeontx_mbox_bgx_port_fifo_cfg { + uint32_t rx_fifosz; /* in Bytes */ +} octeontx_mbox_bgx_port_fifo_cfg_t; + +typedef enum { + BGX_PORT_FC_CFG_GET = 0, + BGX_PORT_FC_CFG_SET = 1 +} bgx_port_fc_t; + +/* BGX port flow control config: */ +typedef struct octeontx_mbox_bgx_port_fc_cfg { + /* BP on/off threshold levels in Bytes, must be a multiple of 16 */ + uint16_t high_water; + uint16_t low_water; + uint8_t rx_pause; /* rx_pause = 1/0 to enable/disable fc on Tx */ + uint8_t tx_pause; /* tx_pause = 1/0 to enable/disable fc on Rx */ + bgx_port_fc_t fc_cfg; +} octeontx_mbox_bgx_port_fc_cfg_t; + +int octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf); +int octeontx_bgx_port_close(int port); +int octeontx_bgx_port_start(int port); +int octeontx_bgx_port_stop(int port); +int octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf); +int octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat); +int octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats); +int octeontx_bgx_port_stats_clr(int port); +int octeontx_bgx_port_link_status(int port); +int octeontx_bgx_port_promisc_set(int port, int en); +int octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr); +int octeontx_bgx_port_mac_add(int port, uint8_t *mac_addr, int index); +int octeontx_bgx_port_mac_del(int port, uint32_t index); +int octeontx_bgx_port_mac_entries_get(int port); +int octeontx_bgx_port_mtu_set(int port, int mtu); +int octeontx_bgx_port_set_link_state(int port, bool en); +int octeontx_bgx_port_get_fifo_cfg(int port, + octeontx_mbox_bgx_port_fifo_cfg_t *cfg); +int octeontx_bgx_port_flow_ctrl_cfg(int port, + octeontx_mbox_bgx_port_fc_cfg_t *cfg); + +#endif /* __OCTEONTX_BGX_H__ */ + diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h new file mode 100644 index 000000000..04b9ce191 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_io.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_IO_H__ +#define __OCTEONTX_IO_H__ + +#include +#include + +#include + +/* In Cavium OCTEON TX SoC, all accesses to the device registers are + * implicitly strongly ordered. So, The relaxed version of IO operation is + * safe to use with out any IO memory barriers. + */ +#define octeontx_read64 rte_read64_relaxed +#define octeontx_write64 rte_write64_relaxed + +/* ARM64 specific functions */ +#if defined(RTE_ARCH_ARM64) +#define octeontx_prefetch_store_keep(_ptr) ({\ + asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); }) + +#define octeontx_load_pair(val0, val1, addr) ({ \ + asm volatile( \ + "ldp %x[x0], %x[x1], [%x[p1]]" \ + :[x0]"=r"(val0), [x1]"=r"(val1) \ + :[p1]"r"(addr) \ + ); }) + +#define octeontx_store_pair(val0, val1, addr) ({ \ + asm volatile( \ + "stp %x[x0], %x[x1], [%x[p1]]" \ + ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ + ); }) +#else /* Un optimized functions for building on non arm64 arch */ + +#define octeontx_prefetch_store_keep(_ptr) do {} while (0) + +#define octeontx_load_pair(val0, val1, addr) \ +do { \ + val0 = rte_read64(addr); \ + val1 = rte_read64(((uint8_t *)addr) + 8); \ +} while (0) + +#define octeontx_store_pair(val0, val1, addr) \ +do { \ + rte_write64(val0, addr); \ + rte_write64(val1, (((uint8_t *)addr) + 8)); \ +} while (0) +#endif + +#if defined(RTE_ARCH_ARM64) +/** + * Perform an atomic fetch-and-add operation. + */ +static inline uint64_t +octeontx_reg_ldadd_u64(void *addr, int64_t off) +{ + uint64_t old_val; + + __asm__ volatile( + " .cpu generic+lse\n" + " ldadd %1, %0, [%2]\n" + : "=r" (old_val) : "r" (off), "r" (addr) : "memory"); + + return old_val; +} + +/** + * Perform a LMTST operation - an atomic write of up to 128 byte to + * an I/O block that supports this operation type. + * + * @param lmtline_va is the address where LMTLINE is mapped + * @param ioreg_va is the virtual address of the device register + * @param cmdbuf is the array of peripheral commands to execute + * @param cmdsize is the number of 64-bit words in 'cmdbuf' + * + * @return N/A + */ +static inline void +octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], + uint64_t cmdsize) +{ + uint64_t result; + uint64_t word_count; + uint64_t *lmtline = lmtline_va; + + word_count = cmdsize; + + do { + /* Copy commands to LMTLINE */ + for (result = 0; result < word_count; result += 2) { + lmtline[result + 0] = cmdbuf[result + 0]; + lmtline[result + 1] = cmdbuf[result + 1]; + } + + /* LDEOR initiates atomic transfer to I/O device */ + __asm__ volatile( + " .cpu generic+lse\n" + " ldeor xzr, %0, [%1]\n" + : "=r" (result) : "r" (ioreg_va) : "memory"); + } while (!result); +} + +#else + +static inline uint64_t +octeontx_reg_ldadd_u64(void *addr, int64_t off) +{ + RTE_SET_USED(addr); + RTE_SET_USED(off); + return 0; +} + +static inline void +octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[], + uint64_t cmdsize) +{ + RTE_SET_USED(lmtline_va); + RTE_SET_USED(ioreg_va); + RTE_SET_USED(cmdbuf); + RTE_SET_USED(cmdsize); +} + +#endif +#endif /* __OCTEONTX_IO_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h new file mode 100644 index 000000000..4445369ce --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pki_var.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_PKI_VAR_H__ +#define __OCTEONTX_PKI_VAR_H__ + +#include + +#define OCTTX_PACKET_WQE_SKIP 128 +#define OCTTX_PACKET_FIRST_SKIP_MAXREGVAL 496 +#define OCTTX_PACKET_FIRST_SKIP_MAXLEN 512 +#define OCTTX_PACKET_FIRST_SKIP_ADJUST(x) \ + (RTE_MIN(x, OCTTX_PACKET_FIRST_SKIP_MAXREGVAL)) +#define OCTTX_PACKET_FIRST_SKIP_SUM(p) \ + (OCTTX_PACKET_WQE_SKIP \ + + rte_pktmbuf_priv_size(p) \ + + RTE_PKTMBUF_HEADROOM) +#define OCTTX_PACKET_FIRST_SKIP(p) \ + OCTTX_PACKET_FIRST_SKIP_ADJUST(OCTTX_PACKET_FIRST_SKIP_SUM(p)) +#define OCTTX_PACKET_LATER_SKIP 128 + +/* WQE descriptor */ +typedef union octtx_wqe_s { + uint64_t w[6]; + + struct { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + struct { + uint64_t pknd : 6; + uint64_t rsvd0 : 10; + uint64_t style : 8; + uint64_t bufs : 8; + uint64_t chan : 12; + uint64_t apad : 3; + uint64_t rsvd1 : 1; + uint64_t aura : 12; + uint64_t rsvd2 : 4; + } w0; + + struct { + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t grp : 10; + uint64_t rsvd0 : 2; + uint64_t rsvd1 : 2; + uint64_t len : 16; + } w1; + + struct { + uint64_t op_code : 8; + uint64_t err_lev : 3; + uint64_t raw : 1; + uint64_t l2m : 1; + uint64_t l2b : 1; + uint64_t l3m : 1; + uint64_t l3b : 1; + uint64_t l3fr : 1; + uint64_t pf1 : 1; + uint64_t pf2 : 1; + uint64_t pf3 : 1; + uint64_t pf4 : 1; + uint64_t sh : 1; + uint64_t vs : 1; + uint64_t vv : 1; + uint64_t rsvd0 : 8; + uint64_t lae : 1; + uint64_t lbty : 5; + uint64_t lcty : 5; + uint64_t ldty : 5; + uint64_t lety : 5; + uint64_t lfty : 5; + uint64_t lgty : 5; + uint64_t sw : 1; + } w2; + + struct { + uint64_t addr; /* Byte addr of start-of-pkt */ + } w3; + + struct { + uint64_t laptr : 8; + uint64_t lbptr : 8; + uint64_t lcptr : 8; + uint64_t ldprt : 8; + uint64_t leptr : 8; + uint64_t lfptr : 8; + uint64_t lgptr : 8; + uint64_t vlptr : 8; + } w4; + + struct { + uint64_t rsvd0 : 47; + uint64_t dwd : 1; + uint64_t size : 16; + } w5; +#else + struct { + uint64_t rsvd2 : 4; + uint64_t aura : 12; + uint64_t rsvd1 : 1; + uint64_t apad : 3; + uint64_t chan : 12; + uint64_t bufs : 8; + uint64_t style : 8; + uint64_t rsvd0 : 10; + uint64_t pknd : 6; + } w0; + + struct { + uint64_t len : 16; + uint64_t rsvd1 : 2; + uint64_t rsvd0 : 2; + uint64_t grp : 10; + uint64_t tt : 2; + uint64_t tag : 32; + } w1; + + struct { + uint64_t sw : 1; + uint64_t lgty : 5; + uint64_t lfty : 5; + uint64_t lety : 5; + uint64_t ldty : 5; + uint64_t lcty : 5; + uint64_t lbty : 5; + uint64_t lae : 1; + uint64_t rsvd0 : 8; + uint64_t vv : 1; + uint64_t vs : 1; + uint64_t sh : 1; + uint64_t pf4 : 1; + uint64_t pf3 : 1; + uint64_t pf2 : 1; + uint64_t pf1 : 1; + uint64_t l3fr : 1; + uint64_t l3b : 1; + uint64_t l3m : 1; + uint64_t l2b : 1; + uint64_t l2m : 1; + uint64_t raw : 1; + uint64_t err_lev : 3; + uint64_t op_code : 8; + } w2; + + struct { + uint64_t addr; /* Byte addr of start-of-pkt */ + } w3; + + struct { + uint64_t vlptr : 8; + uint64_t lgptr : 8; + uint64_t lfptr : 8; + uint64_t leptr : 8; + uint64_t ldprt : 8; + uint64_t lcptr : 8; + uint64_t lbptr : 8; + uint64_t laptr : 8; + } w4; +#endif + } s; + +} __rte_packed octtx_wqe_t; + +enum occtx_pki_ltype_e { + OCCTX_PKI_LTYPE_NONE = 0, + OCCTX_PKI_LTYPE_ENET = 1, + OCCTX_PKI_LTYPE_VLAN = 2, + OCCTX_PKI_LTYPE_SNAP_PAYLD = 5, + OCCTX_PKI_LTYPE_ARP = 6, + OCCTX_PKI_LTYPE_RARP = 7, + OCCTX_PKI_LTYPE_IP4 = 8, + OCCTX_PKI_LTYPE_IP4_OPT = 9, + OCCTX_PKI_LTYPE_IP6 = 0xa, + OCCTX_PKI_LTYPE_IP6_OPT = 0xb, + OCCTX_PKI_LTYPE_IPSEC_ESP = 0xc, + OCCTX_PKI_LTYPE_IPFRAG = 0xd, + OCCTX_PKI_LTYPE_IPCOMP = 0xe, + OCCTX_PKI_LTYPE_TCP = 0x10, + OCCTX_PKI_LTYPE_UDP = 0x11, + OCCTX_PKI_LTYPE_SCTP = 0x12, + OCCTX_PKI_LTYPE_UDP_VXLAN = 0x13, + OCCTX_PKI_LTYPE_GRE = 0x14, + OCCTX_PKI_LTYPE_NVGRE = 0x15, + OCCTX_PKI_LTYPE_GTP = 0x16, + OCCTX_PKI_LTYPE_UDP_GENEVE = 0x17, + OCCTX_PKI_LTYPE_SW28 = 0x1c, + OCCTX_PKI_LTYPE_SW29 = 0x1d, + OCCTX_PKI_LTYPE_SW30 = 0x1e, + OCCTX_PKI_LTYPE_SW31 = 0x1f, + OCCTX_PKI_LTYPE_LAST +}; + +enum lc_type_e { + LC_NONE = OCCTX_PKI_LTYPE_NONE, + LC_IPV4 = OCCTX_PKI_LTYPE_IP4, + LC_IPV4_OPT = OCCTX_PKI_LTYPE_IP4_OPT, + LC_IPV6 = OCCTX_PKI_LTYPE_IP6, + LC_IPV6_OPT = OCCTX_PKI_LTYPE_IP6_OPT, +}; + +enum le_type_e { + LE_NONE = OCCTX_PKI_LTYPE_NONE, +}; + +enum lf_type_e { + LF_NONE = OCCTX_PKI_LTYPE_NONE, + LF_IPSEC_ESP = OCCTX_PKI_LTYPE_IPSEC_ESP, + LF_IPFRAG = OCCTX_PKI_LTYPE_IPFRAG, + LF_IPCOMP = OCCTX_PKI_LTYPE_IPCOMP, + LF_TCP = OCCTX_PKI_LTYPE_TCP, + LF_UDP = OCCTX_PKI_LTYPE_UDP, + LF_GRE = OCCTX_PKI_LTYPE_GRE, + LF_UDP_GENEVE = OCCTX_PKI_LTYPE_UDP_GENEVE, + LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN, + LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE, +}; + +/* Word 0 of HW segment buflink structure */ +typedef union octtx_pki_buflink_w0_u { + uint64_t v; + struct { + uint64_t size:16; + uint64_t rsvd1:15; + uint64_t invfree:1; + /** Aura number of the next segment */ + uint64_t aura:16; + uint64_t sw:9; + uint64_t later_invfree:1; + uint64_t rsvd2:5; + /** 1 if aura number is set */ + uint64_t has_aura:1; + } s; +} octtx_pki_buflink_w0_t; + +/* Word 1 of HW segment buflink structure */ +typedef union octtx_pki_buflink_w1_u { + uint64_t v; + struct { + uint64_t addr; + } s; +} octtx_pki_buflink_w1_t; + +/* HW structure linking packet segments into singly linked list */ +typedef struct octtx_pki_buflink_s { + octtx_pki_buflink_w0_t w0; /* Word 0 of the buflink */ + octtx_pki_buflink_w1_t w1; /* Word 1 of the buflink */ +} octtx_pki_buflink_t; + +#endif /* __OCTEONTX_PKI_VAR_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c new file mode 100644 index 000000000..0ddff5488 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.c @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include + +#include +#include + +#include "../octeontx_logs.h" +#include "octeontx_io.h" +#include "octeontx_pkivf.h" + + +struct octeontx_pkivf { + uint8_t *bar0; + uint8_t status; + uint16_t domain; + uint16_t vfid; +}; + +struct octeontx_pki_vf_ctl_s { + struct octeontx_pkivf pki[PKI_VF_MAX]; +}; + +static struct octeontx_pki_vf_ctl_s pki_vf_ctl; + +int +octeontx_pki_port_open(int port) +{ + uint16_t global_domain = octeontx_get_global_domain(); + struct octeontx_mbox_hdr hdr; + pki_port_type_t port_type; + int i, res; + + /* Check if atleast one PKI vf is in application domain. */ + for (i = 0; i < PKI_VF_MAX; i++) { + if (pki_vf_ctl.pki[i].domain != global_domain) + continue; + break; + } + + if (i == PKI_VF_MAX) + return -ENODEV; + + port_type.port_type = OCTTX_PORT_TYPE_NET; + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_OPEN; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &port_type, sizeof(pki_port_type_t), + NULL, 0); + if (res < 0) + return -EACCES; + return res; +} + +int +octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_hash_cfg_t h_cfg = *(pki_hash_cfg_t *)hash_cfg; + int len = sizeof(pki_hash_cfg_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_HASH_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_pktbuf_cfg_t b_cfg = *(pki_pktbuf_cfg_t *)buf_cfg; + int len = sizeof(pki_pktbuf_cfg_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + return res; +} + +int +octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_qos_cfg_t q_cfg = *(pki_qos_cfg_t *)qos_cfg; + int len = sizeof(pki_qos_cfg_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_CREATE_QOS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + + +int +octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_errchk_cfg_t e_cfg; + e_cfg = *((pki_errchk_cfg_t *)(cfg)); + int len = sizeof(pki_errchk_cfg_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_pki_port_vlan_fltr_config(int port, + pki_port_vlan_filter_config_t *fltr_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_port_vlan_filter_config_t cfg = *fltr_cfg; + int len = sizeof(pki_port_vlan_filter_config_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + return res; +} + +int +octeontx_pki_port_vlan_fltr_entry_config(int port, + pki_port_vlan_filter_entry_config_t *e_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_port_vlan_filter_entry_config_t cfg = *e_cfg; + int len = sizeof(pki_port_vlan_filter_entry_config_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + return res; +} + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_PKI_VF 0xA0DD + +/* PKIVF pcie device */ +static int +pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + struct octeontx_pkivf *res; + static uint8_t vf_cnt; + uint16_t domain; + uint16_t vfid; + uint8_t *bar0; + uint64_t val; + + RTE_SET_USED(pci_drv); + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL) { + octeontx_log_err("PKI Empty bar[0] %p", + pci_dev->mem_resource[0].addr); + return -ENODEV; + } + + bar0 = pci_dev->mem_resource[0].addr; + val = octeontx_read64(bar0); + domain = val & 0xffff; + vfid = (val >> 16) & 0xffff; + + if (unlikely(vfid >= PKI_VF_MAX)) { + octeontx_log_err("pki: Invalid vfid %d", vfid); + return -EINVAL; + } + + res = &pki_vf_ctl.pki[vf_cnt++]; + res->vfid = vfid; + res->domain = domain; + res->bar0 = bar0; + + octeontx_log_dbg("PKI Domain=%d vfid=%d", res->domain, res->vfid); + return 0; +} + +static const struct rte_pci_id pci_pkivf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_PKI_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_pkivf = { + .id_table = pci_pkivf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = pkivf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_pkivf, pci_pkivf); diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h new file mode 100644 index 000000000..d41eaa57e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkivf.h @@ -0,0 +1,372 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_PKI_H__ +#define __OCTEONTX_PKI_H__ + +#include + +#include + +#define OCTEONTX_PKI_COPROC 5 + +/* PKI messages */ + +#define MBOX_PKI_PORT_OPEN 1 +#define MBOX_PKI_PORT_START 2 +#define MBOX_PKI_PORT_STOP 3 +#define MBOX_PKI_PORT_CLOSE 4 +#define MBOX_PKI_PORT_CONFIG 5 +#define MBOX_PKI_PORT_OPT_PARSER_CONFIG 6 +#define MBOX_PKI_PORT_CUSTOM_PARSER_CONFIG 7 +#define MBOX_PKI_PORT_PKTBUF_CONFIG 8 +#define MBOX_PKI_PORT_HASH_CONFIG 9 +#define MBOX_PKI_PORT_ERRCHK_CONFIG 10 +#define MBOX_PKI_PORT_CREATE_QOS 11 +#define MBOX_PKI_PORT_MODIFY_QOS 12 +#define MBOX_PKI_PORT_DELETE_QOS 13 +#define MBOX_PKI_PORT_PKTDROP_CONFIG 14 +#define MBOX_PKI_PORT_WQE_GEN_CONFIG 15 +#define MBOX_PKI_BACKPRESSURE_CONFIG 16 +#define MBOX_PKI_PORT_GET_STATS 17 +#define MBOX_PKI_PORT_RESET_STATS 18 +#define MBOX_PKI_GET_PORT_CONFIG 19 +#define MBOX_PKI_GET_PORT_QOS_CONFIG 20 +#define MBOX_PKI_PORT_ALLOC_QPG 21 +#define MBOX_PKI_PORT_FREE_QPG 22 +#define MBOX_PKI_SET_PORT_CONFIG 23 +#define MBOX_PKI_PORT_VLAN_FILTER_CONFIG 24 +#define MBOX_PKI_PORT_VLAN_FILTER_ENTRY_CONFIG 25 + +#define MBOX_PKI_MAX_QOS_ENTRY 64 + +/* PKI maximum constants */ +#define PKI_VF_MAX (32) +#define PKI_MAX_PKTLEN (32768) + +/* Interface types: */ +enum { + OCTTX_PORT_TYPE_NET, /* Network interface ports */ + OCTTX_PORT_TYPE_INT, /* CPU internal interface ports */ + OCTTX_PORT_TYPE_PCI, /* DPI/PCIe interface ports */ + OCTTX_PORT_TYPE_MAX +}; + +/* pki pkind parse mode */ +enum { + PKI_PARSE_LA_TO_LG = 0, + PKI_PARSE_LB_TO_LG = 1, + PKI_PARSE_LC_TO_LG = 3, + PKI_PARSE_LG = 0x3f, + PKI_PARSE_NOTHING = 0x7f +}; + +/* CACHE MODE*/ +enum { + PKI_OPC_MODE_STT = 0LL, + PKI_OPC_MODE_STF = 1LL, + PKI_OPC_MODE_STF1_STT = 2LL, + PKI_OPC_MODE_STF2_STT = 3LL +}; + +/* PKI QPG QOS*/ +enum { + PKI_QPG_QOS_NONE = 0, + PKI_QPG_QOS_VLAN, + PKI_QPG_QOS_MPLS, + PKI_QPG_QOS_DSA_SRC, + PKI_QPG_QOS_DIFFSERV, + PKI_QPG_QOS_HIGIG, +}; + +/* pki port config */ +typedef struct pki_port_type { + uint8_t port_type; +} pki_port_type_t; + +/* pki port config */ +typedef struct pki_port_cfg { + uint8_t port_type; + struct { + uint8_t fcs_pres:1; + uint8_t fcs_skip:1; + uint8_t parse_mode:1; + uint8_t mpls_parse:1; + uint8_t inst_hdr_parse:1; + uint8_t fulc_parse:1; + uint8_t dsa_parse:1; + uint8_t hg2_parse:1; + uint8_t hg_parse:1; + } mmask; + uint8_t fcs_pres; + uint8_t fcs_skip; + uint8_t parse_mode; + uint8_t mpls_parse; + uint8_t inst_hdr_parse; + uint8_t fulc_parse; + uint8_t dsa_parse; + uint8_t hg2_parse; + uint8_t hg_parse; +} pki_prt_cfg_t; + + +/* pki Flow/style packet buffer config */ +typedef struct pki_port_pktbuf_cfg { + uint8_t port_type; + struct { + uint16_t f_mbuff_size:1; + uint16_t f_wqe_skip:1; + uint16_t f_first_skip:1; + uint16_t f_later_skip:1; + uint16_t f_pkt_outside_wqe:1; + uint16_t f_wqe_endian:1; + uint16_t f_cache_mode:1; + } mmask; + uint16_t mbuff_size; + uint16_t wqe_skip; + uint16_t first_skip; + uint16_t later_skip; + uint8_t pkt_outside_wqe; + uint8_t wqe_endian; + uint8_t cache_mode; +} pki_pktbuf_cfg_t; + +/* pki flow/style tag config */ +typedef struct pki_port_hash_cfg { + uint8_t port_type; + uint32_t tag_slf:1; + uint32_t tag_sle:1; + uint32_t tag_sld:1; + uint32_t tag_slc:1; + uint32_t tag_dlf:1; + uint32_t tag_dle:1; + uint32_t tag_dld:1; + uint32_t tag_dlc:1; + uint32_t tag_prt:1; + uint32_t tag_vlan0:1; + uint32_t tag_vlan1:1; + uint32_t tag_ip_pctl:1; + uint32_t tag_sync:1; + uint32_t tag_spi:1; + uint32_t tag_gtp:1; + uint32_t tag_vni:1; +} pki_hash_cfg_t; + +/* pki flow/style errcheck config */ +typedef struct pki_port_errcheck_cfg { + uint8_t port_type; + struct { + uint32_t f_ip6_udp_opt:1; + uint32_t f_lenerr_en:1; + uint32_t f_maxerr_en:1; + uint32_t f_minerr_en:1; + uint32_t f_fcs_chk:1; + uint32_t f_fcs_strip:1; + uint32_t f_len_lf:1; + uint32_t f_len_le:1; + uint32_t f_len_ld:1; + uint32_t f_len_lc:1; + uint32_t f_csum_lf:1; + uint32_t f_csum_le:1; + uint32_t f_csum_ld:1; + uint32_t f_csum_lc:1; + uint32_t f_min_frame_len; + uint32_t f_max_frame_len; + } mmask; + uint64_t ip6_udp_opt:1; + uint64_t lenerr_en:1; + uint64_t maxerr_en:1; + uint64_t minerr_en:1; + uint64_t fcs_chk:1; + uint64_t fcs_strip:1; + uint64_t len_lf:1; + uint64_t len_le:1; + uint64_t len_ld:1; + uint64_t len_lc:1; + uint64_t csum_lf:1; + uint64_t csum_le:1; + uint64_t csum_ld:1; + uint64_t csum_lc:1; + uint64_t min_frame_len; + uint64_t max_frame_len; +} pki_errchk_cfg_t; + +struct pki_qos_entry { + uint16_t port_add; + uint16_t ggrp_ok; + uint16_t ggrp_bad; + uint16_t gaura; + uint8_t grptag_ok; + uint8_t grptag_bad; + uint8_t ena_red; + uint8_t ena_drop; + uint8_t tag_type; +}; + +#define PKO_MAX_QOS_ENTRY 64 + +/* pki flow/style enable qos */ +typedef struct pki_port_create_qos { + uint8_t port_type; + uint8_t qpg_qos; + uint8_t num_entry; + uint8_t tag_type; + uint8_t drop_policy; + struct pki_qos_entry qos_entry[PKO_MAX_QOS_ENTRY]; +} pki_qos_cfg_t; + +/* pki flow/style enable qos */ +typedef struct pki_port_delete_qos_entry { + uint8_t port_type; + uint16_t index; +} pki_del_qos_t; + +/* pki flow/style enable qos */ +typedef struct pki_port_modify_qos_entry { + uint8_t port_type; + uint16_t index; + struct { + uint8_t f_port_add:1; + uint8_t f_grp_ok:1; + uint8_t f_grp_bad:1; + uint8_t f_gaura:1; + uint8_t f_grptag_ok:1; + uint8_t f_grptag_bad:1; + uint8_t f_tag_type:1; + } mmask; + struct pki_qos_entry qos_entry; +} pki_mod_qos_t; + +/* pki port VLAN filter config */ +typedef struct pki_port_vlan_filter_config { + uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */ + uint8_t fltr_conf; /* '1' to enable & '0' to disable */ +} pki_port_vlan_filter_config_t; + +/* pki port VLAN filter entry config */ +typedef struct pki_port_vlan_filter_entry_config { + uint8_t port_type; /* OCTTX_PORT_TYPE_[NET/INT/PCI] */ + uint8_t entry_conf; /* '1' to add & '0' to remove */ + uint16_t vlan_tpid; /* in host byte-order */ + uint16_t vlan_id; /* in host byte-order */ +} pki_port_vlan_filter_entry_config_t; + +static inline int +octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_mod_qos_t q_cfg = *(pki_mod_qos_t *)qos_cfg; + int len = sizeof(pki_mod_qos_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_MODIFY_QOS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +static inline int +octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_del_qos_t q_cfg = *(pki_del_qos_t *)qos_cfg; + int len = sizeof(pki_del_qos_t); + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_DELETE_QOS; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +static inline int +octeontx_pki_port_close(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_port_type_t ptype; + int len = sizeof(pki_port_type_t); + memset(&ptype, 0, len); + ptype.port_type = OCTTX_PORT_TYPE_NET; + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_CLOSE; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +static inline int +octeontx_pki_port_start(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_port_type_t ptype; + int len = sizeof(pki_port_type_t); + memset(&ptype, 0, len); + ptype.port_type = OCTTX_PORT_TYPE_NET; + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_START; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +static inline int +octeontx_pki_port_stop(int port) +{ + struct octeontx_mbox_hdr hdr; + int res; + + pki_port_type_t ptype; + int len = sizeof(pki_port_type_t); + memset(&ptype, 0, len); + ptype.port_type = OCTTX_PORT_TYPE_NET; + + hdr.coproc = OCTEONTX_PKI_COPROC; + hdr.msg = MBOX_PKI_PORT_STOP; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int octeontx_pki_port_open(int port); +int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg); +int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg); +int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg); +int octeontx_pki_port_close(int port); +int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg); +int octeontx_pki_port_vlan_fltr_config(int port, + pki_port_vlan_filter_config_t *fltr_cfg); +int octeontx_pki_port_vlan_fltr_entry_config(int port, + pki_port_vlan_filter_entry_config_t *entry_cfg); + +#endif /* __OCTEONTX_PKI_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c new file mode 100644 index 000000000..bf28bc799 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c @@ -0,0 +1,640 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "../octeontx_logs.h" +#include "octeontx_io.h" +#include "octeontx_pkovf.h" + +struct octeontx_pko_iomem { + uint8_t *va; + rte_iova_t iova; + size_t size; +}; + +#define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0} +#define PKO_VALID 0x1 +#define PKO_INUSE 0x2 + +struct octeontx_pko_fc_ctl_s { + int64_t buf_cnt; + int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1]; +}; + +struct octeontx_pkovf { + uint8_t *bar0; + uint8_t *bar2; + uint8_t status; + uint16_t domain; + uint16_t vfid; +}; + +struct octeontx_pko_vf_ctl_s { + rte_spinlock_t lock; + uint16_t global_domain; + struct octeontx_pko_iomem fc_iomem; + struct octeontx_pko_fc_ctl_s *fc_ctl; + struct octeontx_pkovf pko[PKO_VF_MAX]; + struct { + uint64_t chanid; + } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ]; +}; + +static struct octeontx_pko_vf_ctl_s pko_vf_ctl; + +static void * +octeontx_pko_dq_vf_bar0(uint16_t txq) +{ + int vf_ix; + + vf_ix = txq / PKO_VF_NUM_DQ; + return pko_vf_ctl.pko[vf_ix].bar0; +} + +static int +octeontx_pko_dq_gdq(uint16_t txq) +{ + return txq % PKO_VF_NUM_DQ; +} + +/** + * Open a PKO DQ. + */ +static inline +int octeontx_pko_dq_open(uint16_t txq) +{ + unsigned int reg_off; + uint8_t *vf_bar0; + uint64_t rtn; + int gdq; + + vf_bar0 = octeontx_pko_dq_vf_bar0(txq); + gdq = octeontx_pko_dq_gdq(txq); + + if (unlikely(gdq < 0 || vf_bar0 == NULL)) + return -EINVAL; + *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) = + PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID; + + rte_wmb(); + + octeontx_write64(PKO_DQ_FC_DEPTH_PAGES, + vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq)); + + /* Set the register to return descriptor (packet) count as DEPTH */ + /* KIND=1, NCB_QUERY_RSP=0 */ + octeontx_write64(1ull << PKO_DQ_KIND_BIT, + vf_bar0 + PKO_VF_DQ_WM_CTL(gdq)); + reg_off = PKO_VF_DQ_OP_OPEN(gdq); + + rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0); + + /* PKO_DQOP_E::OPEN */ + if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1) + return -EIO; + + switch (rtn >> PKO_DQ_STATUS_BIT) { + case 0xC: /* DQALREADYCREATED */ + case 0x0: /* PASS */ + break; + default: + return -EIO; + } + + /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */ + octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); + + return rtn & ((1ull << PKO_DQ_OP_BIT) - 1); +} + +/** + * Close a PKO DQ + * Flush all packets pending. + */ +static inline +int octeontx_pko_dq_close(uint16_t txq) +{ + unsigned int reg_off; + uint8_t *vf_bar0; + uint64_t rtn; + int res; + + vf_bar0 = octeontx_pko_dq_vf_bar0(txq); + res = octeontx_pko_dq_gdq(txq); + + if (unlikely(res < 0 || vf_bar0 == NULL)) + return -EINVAL; + + reg_off = PKO_VF_DQ_OP_CLOSE(res); + + rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0); + + /* PKO_DQOP_E::CLOSE */ + if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2) + return -EIO; + + switch (rtn >> PKO_DQ_STATUS_BIT) { + case 0xD: /* DQNOTCREATED */ + case 0x0: /* PASS */ + break; + default: + return -EIO; + } + + res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */ + return res; +} + +/* Flush all packets pending on a DQ */ +static inline +int octeontx_pko_dq_drain(uint16_t txq) +{ + unsigned int gdq; + uint8_t *vf_bar0; + uint64_t reg; + int res, timo = PKO_DQ_DRAIN_TO; + + vf_bar0 = octeontx_pko_dq_vf_bar0(txq); + res = octeontx_pko_dq_gdq(txq); + gdq = res; + + /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */ + octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); + /* Wait until buffers leave DQs */ + reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq)); + while (reg && timo > 0) { + rte_delay_us(100); + timo--; + reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq)); + } + /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */ + octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); + + return reg; +} + +static inline int +octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, + unsigned int dq_num, unsigned int dq_from) +{ + unsigned int dq, dq_cnt; + unsigned int dq_base; + + dq_cnt = 0; + dq = dq_from; + while (dq < RTE_DIM(ctl->dq_map)) { + dq_base = dq; + dq_cnt = 0; + while (ctl->dq_map[dq].chanid == ~chanid && + dq < RTE_DIM(ctl->dq_map)) { + dq_cnt++; + if (dq_cnt == dq_num) + return dq_base; + dq++; + } + dq++; + } + return -1; +} + +static inline void +octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, + unsigned int dq_base, unsigned int dq_num) +{ + unsigned int dq, dq_cnt; + + dq_cnt = 0; + while (dq_cnt < dq_num) { + dq = dq_base + dq_cnt; + + octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq, + chanid); + + ctl->dq_map[dq].chanid = ~chanid; + dq_cnt++; + } +} + +static inline int +octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base, + unsigned int dq_num, uint64_t chanid) +{ + const uint64_t null_chanid = ~0ull; + int dq; + + rte_spinlock_lock(&ctl->lock); + + dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base); + if (dq < 0 || (unsigned int)dq != dq_base) { + rte_spinlock_unlock(&ctl->lock); + return -1; + } + octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num); + + rte_spinlock_unlock(&ctl->lock); + + return 0; +} + +static inline int +octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) +{ + const uint64_t null_chanid = ~0ull; + unsigned int dq = 0, dq_cnt = 0; + + rte_spinlock_lock(&ctl->lock); + while (dq < RTE_DIM(ctl->dq_map)) { + if (ctl->dq_map[dq].chanid == ~chanid) { + ctl->dq_map[dq].chanid = ~null_chanid; + dq_cnt++; + } + dq++; + } + rte_spinlock_unlock(&ctl->lock); + + return dq_cnt > 0 ? 0 : -EINVAL; +} + +int +octeontx_pko_channel_open(int dq_base, int dq_num, int chanid) +{ + struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; + int res; + + res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid); + if (res < 0) + return -1; + + return 0; +} + +int +octeontx_pko_channel_close(int chanid) +{ + struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; + int res; + + res = octeontx_pko_dq_free(ctl, chanid); + if (res < 0) + return -1; + + return 0; +} + +static inline int +octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) +{ + unsigned int dq_vf; + unsigned int dq, dq_cnt; + + dq_cnt = 0; + dq = 0; + while (dq < RTE_DIM(ctl->dq_map)) { + dq_vf = dq / PKO_VF_NUM_DQ; + + if (!ctl->pko[dq_vf].bar0) { + dq += PKO_VF_NUM_DQ; + continue; + } + + if (ctl->dq_map[dq].chanid != ~chanid) { + dq++; + continue; + } + + if (octeontx_pko_dq_open(dq) < 0) + break; + + dq_cnt++; + dq++; + } + + return dq_cnt; +} + +int +octeontx_pko_channel_start(int chanid) +{ + struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; + int dq_cnt; + + dq_cnt = octeontx_pko_chan_start(ctl, chanid); + if (dq_cnt < 0) + return -1; + + return dq_cnt; +} + +static inline int +octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) +{ + unsigned int dq, dq_cnt, dq_vf; + int res; + + dq_cnt = 0; + dq = 0; + while (dq < RTE_DIM(ctl->dq_map)) { + dq_vf = dq / PKO_VF_NUM_DQ; + + if (!ctl->pko[dq_vf].bar0) { + dq += PKO_VF_NUM_DQ; + continue; + } + + if (ctl->dq_map[dq].chanid != ~chanid) { + dq++; + continue; + } + + res = octeontx_pko_dq_drain(dq); + if (res > 0) + octeontx_log_err("draining DQ%d, buffers left: %x", + dq, res); + + res = octeontx_pko_dq_close(dq); + if (res < 0) + octeontx_log_err("closing DQ%d failed\n", dq); + + dq_cnt++; + dq++; + } + return dq_cnt; +} + +int +octeontx_pko_channel_stop(int chanid) +{ + struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; + + octeontx_pko_chan_stop(ctl, chanid); + return 0; +} + +static inline int +octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, + void *out, size_t out_elem_size, + size_t dq_num, octeontx_pko_dq_getter_t getter) +{ + octeontx_dq_t curr; + unsigned int dq_vf; + unsigned int dq; + + RTE_SET_USED(out_elem_size); + memset(&curr, 0, sizeof(octeontx_dq_t)); + + dq_vf = dq_num / PKO_VF_NUM_DQ; + dq = dq_num % PKO_VF_NUM_DQ; + + if (!ctl->pko[dq_vf].bar0) + return -EINVAL; + + if (ctl->dq_map[dq_num].chanid != ~chanid) + return -EINVAL; + + uint8_t *iter = (uint8_t *)out; + curr.lmtline_va = ctl->pko[dq_vf].bar2; + curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0 + + PKO_VF_DQ_OP_SEND((dq), 0)); + curr.fc_status_va = ctl->fc_ctl + dq_num; + + octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p", + curr.lmtline_va, curr.ioreg_va, + curr.fc_status_va); + + getter(&curr, (void *)iter); + return 0; +} + +int +octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size, + size_t dq_num, octeontx_pko_dq_getter_t getter) +{ + struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; + int dq_cnt; + + dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size, + dq_num, getter); + if (dq_cnt < 0) + return -1; + + return dq_cnt; +} + +int +octeontx_pko_vf_count(void) +{ + uint16_t global_domain = octeontx_get_global_domain(); + int vf_cnt; + + pko_vf_ctl.global_domain = global_domain; + vf_cnt = 0; + while (pko_vf_ctl.pko[vf_cnt].bar0) + vf_cnt++; + + return vf_cnt; +} + +size_t +octeontx_pko_get_vfid(void) +{ + size_t vf_cnt = octeontx_pko_vf_count(); + size_t vf_idx; + + + for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) { + if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID)) + continue; + if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE) + continue; + + pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE; + return pko_vf_ctl.pko[vf_idx].vfid; + } + + return SIZE_MAX; +} + +int +octeontx_pko_send_mtu(int port, int mtu) +{ + struct octeontx_mbox_hdr hdr; + int res; + mbox_pko_mtu_cfg_t cfg; + + cfg.mtu = mtu; + + hdr.coproc = OCTEONTX_PKO_COPROC; + hdr.msg = MBOX_PKO_MTU_CONFIG; + hdr.vfid = port; + + res = octeontx_mbox_send(&hdr, &cfg, sizeof(mbox_pko_mtu_cfg_t), + NULL, 0); + if (res < 0) + return -EACCES; + + return res; +} + +int +octeontx_pko_init_fc(const size_t pko_vf_count) +{ + int dq_ix; + uint64_t reg; + uint8_t *vf_bar0; + size_t vf_idx; + size_t fc_mem_size; + + fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) * + pko_vf_count * PKO_VF_NUM_DQ; + + pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128); + if (unlikely(!pko_vf_ctl.fc_iomem.va)) { + octeontx_log_err("fc_iomem: not enough memory"); + return -ENOMEM; + } + + pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *) + pko_vf_ctl.fc_iomem.va); + pko_vf_ctl.fc_iomem.size = fc_mem_size; + + pko_vf_ctl.fc_ctl = + (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va; + + /* Configure Flow-Control feature for all DQs of open VFs */ + for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) { + if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain) + continue; + + dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ; + vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0; + + reg = (pko_vf_ctl.fc_iomem.iova + + (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F; + reg |= /* BASE */ + (0x2 << 3) | /* HYST_BITS */ + (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) | + (0x1 << 0); /* ENABLE */ + + octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG); + pko_vf_ctl.pko[vf_idx].status = PKO_VALID; + + octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "", + vf_bar0, (int)vf_idx, reg); + } + return 0; +} + +void +octeontx_pko_fc_free(void) +{ + rte_free(pko_vf_ctl.fc_iomem.va); +} + +static void +octeontx_pkovf_setup(void) +{ + static bool init_once; + + if (!init_once) { + unsigned int i; + + rte_spinlock_init(&pko_vf_ctl.lock); + + pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL; + pko_vf_ctl.fc_ctl = NULL; + + for (i = 0; i < PKO_VF_MAX; i++) { + pko_vf_ctl.pko[i].bar0 = NULL; + pko_vf_ctl.pko[i].bar2 = NULL; + pko_vf_ctl.pko[i].domain = ~(uint16_t)0; + pko_vf_ctl.pko[i].vfid = ~(uint16_t)0; + } + + for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++) + pko_vf_ctl.dq_map[i].chanid = 0; + + init_once = true; + } +} + +/* PKOVF pcie device*/ +static int +pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + uint16_t domain; + uint8_t *bar0; + uint8_t *bar2; + static uint8_t vf_cnt; + struct octeontx_pkovf *res; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL) { + octeontx_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr); + return -ENODEV; + } + bar0 = pci_dev->mem_resource[0].addr; + bar2 = pci_dev->mem_resource[2].addr; + + octeontx_pkovf_setup(); + + /* get vfid and domain */ + val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG); + domain = (val >> 7) & 0xffff; + vfid = (val >> 23) & 0xffff; + + if (unlikely(vfid >= PKO_VF_MAX)) { + octeontx_log_err("pko: Invalid vfid %d", vfid); + return -EINVAL; + } + + res = &pko_vf_ctl.pko[vf_cnt++]; + res->vfid = vfid; + res->domain = domain; + res->bar0 = bar0; + res->bar2 = bar2; + + octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid); + return 0; +} + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049 + +static const struct rte_pci_id pci_pkovf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_PKO_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_pkovf = { + .id_table = pci_pkovf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = pkovf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf); diff --git a/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h new file mode 100644 index 000000000..7e1aba3e3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/base/octeontx_pkovf.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_PKO_H__ +#define __OCTEONTX_PKO_H__ + +#include + +/* PKO maximum constants */ +#define PKO_VF_MAX (32) +#define PKO_VF_NUM_DQ (8) +#define PKO_MAX_NUM_DQ (8) +#define PKO_DQ_DRAIN_TO (1000) + +#define PKO_DQ_FC_SKID (4) +#define PKO_DQ_FC_DEPTH_PAGES (2048) +#define PKO_DQ_FC_STRIDE_16 (16) +#define PKO_DQ_FC_STRIDE_128 (128) +#define PKO_DQ_FC_STRIDE PKO_DQ_FC_STRIDE_16 + +#define PKO_DQ_KIND_BIT 49 +#define PKO_DQ_STATUS_BIT 60 +#define PKO_DQ_OP_BIT 48 + +/* PKO VF register offsets from VF_BAR0 */ +#define PKO_VF_DQ_SW_XOFF(gdq) (0x000100 | (gdq) << 17) +#define PKO_VF_DQ_WM_CTL(gdq) (0x000130 | (gdq) << 17) +#define PKO_VF_DQ_WM_CNT(gdq) (0x000150 | (gdq) << 17) +#define PKO_VF_DQ_FC_CONFIG (0x000160) +#define PKO_VF_DQ_FC_STATUS(gdq) (0x000168 | (gdq) << 17) +#define PKO_VF_DQ_OP_SEND(gdq, op) (0x001000 | (gdq) << 17 | (op) << 3) +#define PKO_VF_DQ_OP_OPEN(gdq) (0x001100 | (gdq) << 17) +#define PKO_VF_DQ_OP_CLOSE(gdq) (0x001200 | (gdq) << 17) +#define PKO_VF_DQ_OP_QUERY(gdq) (0x001300 | (gdq) << 17) + +/* pko_send_hdr_s + pko_send_link */ +#define PKO_CMD_SZ (2 << 1) +#define PKO_SEND_BUFLINK_SUBDC (0x0ull << 60) +#define PKO_SEND_BUFLINK_LDTYPE(x) ((x) << 58) +#define PKO_SEND_BUFLINK_GAUAR(x) ((x) << 24) +#define PKO_SEND_GATHER_SUBDC (0x2ull << 60) +#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58) +#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24) + +#define OCTEONTX_PKO_COPROC 4 +#define MBOX_PKO_MTU_CONFIG 1 + +typedef struct mbox_pko_mtu_cfg { + uint32_t mtu; +} mbox_pko_mtu_cfg_t; + +typedef struct octeontx_dq_s { + void *lmtline_va; + void *ioreg_va; + void *fc_status_va; +} octeontx_dq_t; + +/** + * Function for extracting information out of a given DQ. + * + * It is intended to be used in slow path (configuration) in + * octeontx_pko_channel_query(). + * + * @param dq The DQ to extract information from. + * @param out Pointer to the user's structure he wants to fill. + */ +typedef void (*octeontx_pko_dq_getter_t)(octeontx_dq_t *dq, void *out); + +int +octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size, + size_t dq_num, octeontx_pko_dq_getter_t getter); +int octeontx_pko_channel_open(int dq_base, int dq_num, int chanid); +int octeontx_pko_channel_close(int chanid); +int octeontx_pko_channel_start(int chanid); +int octeontx_pko_channel_stop(int chanid); +int octeontx_pko_vf_count(void); +size_t octeontx_pko_get_vfid(void); +int octeontx_pko_init_fc(const size_t pko_vf_count); +void octeontx_pko_fc_free(void); +int octeontx_pko_send_mtu(int port, int mtu); + +#endif /* __OCTEONTX_PKO_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/meson.build b/src/spdk/dpdk/drivers/net/octeontx/meson.build new file mode 100644 index 000000000..e8d3ff4a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Cavium, Inc + +subdir('base') +objs = [base_objs] + +sources = files('octeontx_rxtx.c', + 'octeontx_ethdev.c', + 'octeontx_ethdev_ops.c' + ) + +deps += ['mempool_octeontx', 'eventdev'] + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c new file mode 100644 index 000000000..d5371ae07 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.c @@ -0,0 +1,1672 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "octeontx_ethdev.h" +#include "octeontx_rxtx.h" +#include "octeontx_logs.h" + +struct evdev_priv_data { + OFFLOAD_FLAGS; /*Sequence should not be changed */ +} __rte_cache_aligned; + +struct octeontx_vdev_init_params { + uint8_t nr_port; +}; + +uint16_t +rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX]; + +enum octeontx_link_speed { + OCTEONTX_LINK_SPEED_SGMII, + OCTEONTX_LINK_SPEED_XAUI, + OCTEONTX_LINK_SPEED_RXAUI, + OCTEONTX_LINK_SPEED_10G_R, + OCTEONTX_LINK_SPEED_40G_R, + OCTEONTX_LINK_SPEED_RESERVE1, + OCTEONTX_LINK_SPEED_QSGMII, + OCTEONTX_LINK_SPEED_RESERVE2 +}; + +int otx_net_logtype_mbox; +int otx_net_logtype_init; +int otx_net_logtype_driver; + +RTE_INIT(otx_net_init_log) +{ + otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox"); + if (otx_net_logtype_mbox >= 0) + rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE); + + otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init"); + if (otx_net_logtype_init >= 0) + rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE); + + otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver"); + if (otx_net_logtype_driver >= 0) + rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE); +} + +/* Parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *)extra_args; + + *i = atoi(value); + if (*i < 0) { + octeontx_log_err("argument has to be positive."); + return -1; + } + + return 0; +} + +static int +octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params, + struct rte_vdev_device *dev) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + static const char * const octeontx_vdev_valid_params[] = { + OCTEONTX_VDEV_NR_PORT_ARG, + NULL + }; + + const char *input_args = rte_vdev_device_args(dev); + if (params == NULL) + return -EINVAL; + + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + octeontx_vdev_valid_params); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + OCTEONTX_VDEV_NR_PORT_ARG, + &parse_integer_arg, + ¶ms->nr_port); + if (ret < 0) + goto free_kvlist; + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +static int +octeontx_port_open(struct octeontx_nic *nic) +{ + octeontx_mbox_bgx_port_conf_t bgx_port_conf; + octeontx_mbox_bgx_port_fifo_cfg_t fifo_cfg; + int res; + + res = 0; + memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf)); + PMD_INIT_FUNC_TRACE(); + + res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf); + if (res < 0) { + octeontx_log_err("failed to open port %d", res); + return res; + } + + nic->node = bgx_port_conf.node; + nic->port_ena = bgx_port_conf.enable; + nic->base_ichan = bgx_port_conf.base_chan; + nic->base_ochan = bgx_port_conf.base_chan; + nic->num_ichans = bgx_port_conf.num_chans; + nic->num_ochans = bgx_port_conf.num_chans; + nic->bgx_mtu = bgx_port_conf.mtu; + nic->bpen = bgx_port_conf.bpen; + nic->fcs_strip = bgx_port_conf.fcs_strip; + nic->bcast_mode = bgx_port_conf.bcast_mode; + nic->mcast_mode = bgx_port_conf.mcast_mode; + nic->speed = bgx_port_conf.mode; + + memset(&fifo_cfg, 0x0, sizeof(fifo_cfg)); + + res = octeontx_bgx_port_get_fifo_cfg(nic->port_id, &fifo_cfg); + if (res < 0) { + octeontx_log_err("failed to get port %d fifo cfg", res); + return res; + } + + nic->fc.rx_fifosz = fifo_cfg.rx_fifosz; + + memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], + RTE_ETHER_ADDR_LEN); + + octeontx_log_dbg("port opened %d", nic->port_id); + return res; +} + +static void +octeontx_link_status_print(struct rte_eth_dev *eth_dev, + struct rte_eth_link *link) +{ + if (link && link->link_status) + octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s", + (eth_dev->data->port_id), + link->link_speed, + link->link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + else + octeontx_log_info("Port %d: Link Down", + (int)(eth_dev->data->port_id)); +} + +static void +octeontx_link_status_update(struct octeontx_nic *nic, + struct rte_eth_link *link) +{ + memset(link, 0, sizeof(*link)); + + link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + switch (nic->speed) { + case OCTEONTX_LINK_SPEED_SGMII: + link->link_speed = ETH_SPEED_NUM_1G; + break; + + case OCTEONTX_LINK_SPEED_XAUI: + link->link_speed = ETH_SPEED_NUM_10G; + break; + + case OCTEONTX_LINK_SPEED_RXAUI: + case OCTEONTX_LINK_SPEED_10G_R: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case OCTEONTX_LINK_SPEED_QSGMII: + link->link_speed = ETH_SPEED_NUM_5G; + break; + case OCTEONTX_LINK_SPEED_40G_R: + link->link_speed = ETH_SPEED_NUM_40G; + break; + + case OCTEONTX_LINK_SPEED_RESERVE1: + case OCTEONTX_LINK_SPEED_RESERVE2: + default: + link->link_speed = ETH_SPEED_NUM_NONE; + octeontx_log_err("incorrect link speed %d", nic->speed); + break; + } + + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_autoneg = ETH_LINK_AUTONEG; +} + +static void +octeontx_link_status_poll(void *arg) +{ + struct octeontx_nic *nic = arg; + struct rte_eth_link link; + struct rte_eth_dev *dev; + int res; + + PMD_INIT_FUNC_TRACE(); + + dev = nic->dev; + + res = octeontx_bgx_port_link_status(nic->port_id); + if (res < 0) { + octeontx_log_err("Failed to get port %d link status", + nic->port_id); + } else { + if (nic->link_up != (uint8_t)res) { + nic->link_up = (uint8_t)res; + octeontx_link_status_update(nic, &link); + octeontx_link_status_print(dev, &link); + rte_eth_linkstatus_set(dev, &link); + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } + } + + res = rte_eal_alarm_set(OCCTX_INTR_POLL_INTERVAL_MS * 1000, + octeontx_link_status_poll, nic); + if (res < 0) + octeontx_log_err("Failed to restart alarm for port %d, err: %d", + nic->port_id, res); +} + +static void +octeontx_port_close(struct octeontx_nic *nic) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eal_alarm_cancel(octeontx_link_status_poll, nic); + octeontx_bgx_port_close(nic->port_id); + octeontx_log_dbg("port closed %d", nic->port_id); +} + +static int +octeontx_port_start(struct octeontx_nic *nic) +{ + PMD_INIT_FUNC_TRACE(); + + return octeontx_bgx_port_start(nic->port_id); +} + +static int +octeontx_port_stop(struct octeontx_nic *nic) +{ + PMD_INIT_FUNC_TRACE(); + + return octeontx_bgx_port_stop(nic->port_id); +} + +static int +octeontx_port_promisc_set(struct octeontx_nic *nic, int en) +{ + struct rte_eth_dev *dev; + int res; + + res = 0; + PMD_INIT_FUNC_TRACE(); + dev = nic->dev; + + res = octeontx_bgx_port_promisc_set(nic->port_id, en); + if (res < 0) { + octeontx_log_err("failed to set promiscuous mode %d", + nic->port_id); + return res; + } + + /* Set proper flag for the mode */ + dev->data->promiscuous = (en != 0) ? 1 : 0; + + octeontx_log_dbg("port %d : promiscuous mode %s", + nic->port_id, en ? "set" : "unset"); + + return 0; +} + +static int +octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats) +{ + octeontx_mbox_bgx_port_stats_t bgx_stats; + int res; + + PMD_INIT_FUNC_TRACE(); + + res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats); + if (res < 0) { + octeontx_log_err("failed to get port stats %d", nic->port_id); + return res; + } + + stats->ipackets = bgx_stats.rx_packets; + stats->ibytes = bgx_stats.rx_bytes; + stats->imissed = bgx_stats.rx_dropped; + stats->ierrors = bgx_stats.rx_errors; + stats->opackets = bgx_stats.tx_packets; + stats->obytes = bgx_stats.tx_bytes; + stats->oerrors = bgx_stats.tx_errors; + + octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "", + nic->port_id, stats->ipackets, stats->opackets); + + return 0; +} + +static int +octeontx_port_stats_clr(struct octeontx_nic *nic) +{ + PMD_INIT_FUNC_TRACE(); + + return octeontx_bgx_port_stats_clr(nic->port_id); +} + +static inline void +devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, + struct rte_event_dev_info *info) +{ + memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); + dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; + + dev_conf->nb_event_ports = info->max_event_ports; + dev_conf->nb_event_queues = info->max_event_queues; + + dev_conf->nb_event_queue_flows = info->max_event_queue_flows; + dev_conf->nb_event_port_dequeue_depth = + info->max_event_port_dequeue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_events_limit = + info->max_num_events; +} + +static uint16_t +octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + uint16_t flags = 0; + + if (nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || + nic->tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F; + + if (nic->tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || + nic->tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM || + nic->tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM || + nic->tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) + flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F; + + if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F; + + if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + flags |= OCCTX_TX_MULTI_SEG_F; + + return flags; +} + +static uint16_t +octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + uint16_t flags = 0; + + if (nic->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM)) + flags |= OCCTX_RX_OFFLOAD_CSUM_F; + + if (nic->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + flags |= OCCTX_RX_OFFLOAD_CSUM_F; + + if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + flags |= OCCTX_RX_MULTI_SEG_F; + eth_dev->data->scattered_rx = 1; + /* If scatter mode is enabled, TX should also be in multi + * seg mode, else memory leak will occur + */ + nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + } + + return flags; +} + +static int +octeontx_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *conf = &data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + struct rte_eth_txmode *txmode = &conf->txmode; + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + RTE_SET_USED(conf); + + if (!rte_eal_has_hugepages()) { + octeontx_log_err("huge page is not configured"); + return -EINVAL; + } + + if (txmode->mq_mode) { + octeontx_log_err("tx mq_mode DCB or VMDq not supported"); + return -EINVAL; + } + + if (rxmode->mq_mode != ETH_MQ_RX_NONE && + rxmode->mq_mode != ETH_MQ_RX_RSS) { + octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { + PMD_INIT_LOG(NOTICE, "cant disable lockfree tx"); + txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; + } + + if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + octeontx_log_err("setting link speed/duplex not supported"); + return -EINVAL; + } + + if (conf->dcb_capability_en) { + octeontx_log_err("DCB enable not supported"); + return -EINVAL; + } + + if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { + octeontx_log_err("flow director not supported"); + return -EINVAL; + } + + nic->num_tx_queues = dev->data->nb_tx_queues; + + ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ, + nic->num_tx_queues, + nic->base_ochan); + if (ret) { + octeontx_log_err("failed to open channel %d no-of-txq %d", + nic->base_ochan, nic->num_tx_queues); + return -EFAULT; + } + + ret = octeontx_dev_vlan_offload_init(dev); + if (ret) { + octeontx_log_err("failed to initialize vlan offload"); + return -EFAULT; + } + + nic->pki.classifier_enable = false; + nic->pki.hash_enable = true; + nic->pki.initialized = false; + + nic->rx_offloads |= rxmode->offloads; + nic->tx_offloads |= txmode->offloads; + nic->rx_offload_flags |= octeontx_rx_offload_flags(dev); + nic->tx_offload_flags |= octeontx_tx_offload_flags(dev); + + return 0; +} + +static void +octeontx_dev_close(struct rte_eth_dev *dev) +{ + struct octeontx_txq *txq = NULL; + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + unsigned int i; + int ret; + + PMD_INIT_FUNC_TRACE(); + + rte_event_dev_close(nic->evdev); + + octeontx_dev_flow_ctrl_fini(dev); + + octeontx_dev_vlan_offload_fini(dev); + + ret = octeontx_pko_channel_close(nic->base_ochan); + if (ret < 0) { + octeontx_log_err("failed to close channel %d VF%d %d %d", + nic->base_ochan, nic->port_id, nic->num_tx_queues, + ret); + } + /* Free txq resources for this port */ + for (i = 0; i < nic->num_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + + rte_free(txq); + } + + /* Free MAC address table */ + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + octeontx_port_close(nic); + + dev->tx_pkt_burst = NULL; + dev->rx_pkt_burst = NULL; +} + +static int +octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + uint32_t buffsz, frame_size = mtu + OCCTX_L2_OVERHEAD; + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + int rc = 0; + + /* Check if MTU is within the allowed range */ + if (frame_size < OCCTX_MIN_FRS || frame_size > OCCTX_MAX_FRS) + return -EINVAL; + + buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + + /* Refuse MTU that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (data->dev_started && frame_size > buffsz && + !(nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) { + octeontx_log_err("Scatter mode is disabled"); + return -EINVAL; + } + + /* Check * >= max_frame */ + if ((nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) && + (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX)) + return -EINVAL; + + rc = octeontx_pko_send_mtu(nic->port_id, frame_size); + if (rc) + return rc; + + rc = octeontx_bgx_port_mtu_set(nic->port_id, frame_size); + if (rc) + return rc; + + if (frame_size > RTE_ETHER_MAX_LEN) + nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* Update max_rx_pkt_len */ + data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + octeontx_log_info("Received pkt beyond maxlen %d will be dropped", + frame_size); + + return rc; +} + +static int +octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq) +{ + struct rte_eth_dev *eth_dev = rxq->eth_dev; + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_pktmbuf_pool_private *mbp_priv; + struct evdev_priv_data *evdev_priv; + struct rte_eventdev *dev; + uint32_t buffsz; + + /* Get rx buffer size */ + mbp_priv = rte_mempool_get_priv(rxq->pool); + buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + /* Setup scatter mode if needed by jumbo */ + if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { + nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev); + nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev); + } + + /* Sharing offload flags via eventdev priv region */ + dev = &rte_eventdevs[rxq->evdev]; + evdev_priv = dev->data->dev_private; + evdev_priv->rx_offload_flags = nic->rx_offload_flags; + evdev_priv->tx_offload_flags = nic->tx_offload_flags; + + /* Setup MTU based on max_rx_pkt_len */ + nic->mtu = data->dev_conf.rxmode.max_rx_pkt_len - OCCTX_L2_OVERHEAD; + + return 0; +} + +static int +octeontx_dev_start(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_rxq *rxq; + int ret, i; + + PMD_INIT_FUNC_TRACE(); + /* Rechecking if any new offload set to update + * rx/tx burst function pointer accordingly. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + octeontx_recheck_rx_offloads(rxq); + } + + /* Setting up the mtu based on max_rx_pkt_len */ + ret = octeontx_dev_mtu_set(dev, nic->mtu); + if (ret) { + octeontx_log_err("Failed to set default MTU size %d", ret); + goto error; + } + + /* + * Tx start + */ + octeontx_set_tx_function(dev); + ret = octeontx_pko_channel_start(nic->base_ochan); + if (ret < 0) { + octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d", + nic->port_id, nic->num_tx_queues, nic->base_ochan, + ret); + goto error; + } + + /* + * Rx start + */ + dev->rx_pkt_burst = octeontx_recv_pkts; + ret = octeontx_pki_port_start(nic->port_id); + if (ret < 0) { + octeontx_log_err("fail to start Rx on port %d", nic->port_id); + goto channel_stop_error; + } + + /* + * Start port + */ + ret = octeontx_port_start(nic); + if (ret < 0) { + octeontx_log_err("failed start port %d", ret); + goto pki_port_stop_error; + } + + PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d", + nic->base_ochan, nic->num_tx_queues, nic->port_id); + + ret = rte_event_dev_start(nic->evdev); + if (ret < 0) { + octeontx_log_err("failed to start evdev: ret (%d)", ret); + goto pki_port_stop_error; + } + + /* Success */ + return ret; + +pki_port_stop_error: + octeontx_pki_port_stop(nic->port_id); +channel_stop_error: + octeontx_pko_channel_stop(nic->base_ochan); +error: + return ret; +} + +static void +octeontx_dev_stop(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + + rte_event_dev_stop(nic->evdev); + + ret = octeontx_port_stop(nic); + if (ret < 0) { + octeontx_log_err("failed to req stop port %d res=%d", + nic->port_id, ret); + return; + } + + ret = octeontx_pki_port_stop(nic->port_id); + if (ret < 0) { + octeontx_log_err("failed to stop pki port %d res=%d", + nic->port_id, ret); + return; + } + + ret = octeontx_pko_channel_stop(nic->base_ochan); + if (ret < 0) { + octeontx_log_err("failed to stop channel %d VF%d %d %d", + nic->base_ochan, nic->port_id, nic->num_tx_queues, + ret); + return; + } +} + +static int +octeontx_dev_promisc_enable(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + return octeontx_port_promisc_set(nic, 1); +} + +static int +octeontx_dev_promisc_disable(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + return octeontx_port_promisc_set(nic, 0); +} + +static int +octeontx_port_link_status(struct octeontx_nic *nic) +{ + int res; + + PMD_INIT_FUNC_TRACE(); + res = octeontx_bgx_port_link_status(nic->port_id); + if (res < 0) { + octeontx_log_err("failed to get port %d link status", + nic->port_id); + return res; + } + + if (nic->link_up != (uint8_t)res || nic->print_flag == -1) { + nic->link_up = (uint8_t)res; + nic->print_flag = 1; + } + octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up); + + return res; +} + +/* + * Return 0 means link status changed, -1 means not changed + */ +static int +octeontx_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct rte_eth_link link; + int res; + + PMD_INIT_FUNC_TRACE(); + + res = octeontx_port_link_status(nic); + if (res < 0) { + octeontx_log_err("failed to request link status %d", res); + return res; + } + + octeontx_link_status_update(nic, &link); + if (nic->print_flag) { + octeontx_link_status_print(nic->dev, &link); + nic->print_flag = 0; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +static int +octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + return octeontx_port_stats(nic, stats); +} + +static int +octeontx_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + return octeontx_port_stats_clr(nic); +} + +static void +octeontx_dev_mac_addr_del(struct rte_eth_dev *dev, uint32_t index) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + ret = octeontx_bgx_port_mac_del(nic->port_id, index); + if (ret != 0) + octeontx_log_err("failed to del MAC address filter on port %d", + nic->port_id); +} + +static int +octeontx_dev_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + __rte_unused uint32_t vmdq) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + ret = octeontx_bgx_port_mac_add(nic->port_id, mac_addr->addr_bytes, + index); + if (ret < 0) { + octeontx_log_err("failed to add MAC address filter on port %d", + nic->port_id); + return ret; + } + + return 0; +} + +static int +octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes); + if (ret == 0) { + /* Update same mac address to BGX CAM table */ + ret = octeontx_bgx_port_mac_add(nic->port_id, addr->addr_bytes, + 0); + } + if (ret < 0) { + octeontx_log_err("failed to set MAC address on port %d", + nic->port_id); + } + + return ret; +} + +static int +octeontx_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + /* Autonegotiation may be disabled */ + dev_info->speed_capa = ETH_LINK_SPEED_FIXED; + dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_40G; + + /* Min/Max MTU supported */ + dev_info->min_rx_bufsize = OCCTX_MIN_FRS; + dev_info->max_rx_pktlen = OCCTX_MAX_FRS; + dev_info->max_mtu = dev_info->max_rx_pktlen - OCCTX_L2_OVERHEAD; + dev_info->min_mtu = dev_info->min_rx_bufsize - OCCTX_L2_OVERHEAD; + + dev_info->max_mac_addrs = + octeontx_bgx_port_mac_entries_get(nic->port_id); + dev_info->max_rx_pktlen = PKI_MAX_PKTLEN; + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = PKO_MAX_NUM_DQ; + dev_info->min_rx_bufsize = 0; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = 0, + .rx_drop_en = 0, + .offloads = OCTEONTX_RX_OFFLOADS, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = 0, + .offloads = OCTEONTX_TX_OFFLOADS, + }; + + dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS; + dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS; + dev_info->rx_queue_offload_capa = OCTEONTX_RX_OFFLOADS; + dev_info->tx_queue_offload_capa = OCTEONTX_TX_OFFLOADS; + + return 0; +} + +static void +octeontx_dq_info_getter(octeontx_dq_t *dq, void *out) +{ + ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va; + ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va; + ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va; +} + +static int +octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic, + uint16_t qidx) +{ + struct octeontx_txq *txq; + int res; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + txq = dev->data->tx_queues[qidx]; + + res = octeontx_pko_channel_query_dqs(nic->base_ochan, + &txq->dq, + sizeof(octeontx_dq_t), + txq->queue_id, + octeontx_dq_info_getter); + if (res < 0) { + res = -EFAULT; + goto close_port; + } + + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + return res; + +close_port: + (void)octeontx_port_stop(nic); + octeontx_pko_channel_stop(nic->base_ochan); + octeontx_pko_channel_close(nic->base_ochan); + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return res; +} + +int +octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + qidx = qidx % PKO_VF_NUM_DQ; + return octeontx_vf_start_tx_queue(dev, nic, qidx); +} + +static inline int +octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic, + uint16_t qidx) +{ + int ret = 0; + + RTE_SET_USED(nic); + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return ret; +} + +int +octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + qidx = qidx % PKO_VF_NUM_DQ; + + return octeontx_vf_stop_tx_queue(dev, nic, qidx); +} + +static void +octeontx_dev_tx_queue_release(void *tx_queue) +{ + struct octeontx_txq *txq = tx_queue; + int res; + + PMD_INIT_FUNC_TRACE(); + + if (txq) { + res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id); + if (res < 0) + octeontx_log_err("failed stop tx_queue(%d)\n", + txq->queue_id); + + rte_free(txq); + } +} + +static int +octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_txq *txq = NULL; + uint16_t dq_num; + int res = 0; + + RTE_SET_USED(nb_desc); + RTE_SET_USED(socket_id); + + dq_num = (nic->pko_vfid * PKO_VF_NUM_DQ) + qidx; + + /* Socket id check */ + if (socket_id != (unsigned int)SOCKET_ID_ANY && + socket_id != (unsigned int)nic->node) + PMD_TX_LOG(INFO, "socket_id expected %d, configured %d", + socket_id, nic->node); + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->tx_queues[qidx] != NULL) { + PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d", + qidx); + octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]); + dev->data->tx_queues[qidx] = NULL; + } + + /* Allocating tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq), + RTE_CACHE_LINE_SIZE, nic->node); + if (txq == NULL) { + octeontx_log_err("failed to allocate txq=%d", qidx); + res = -ENOMEM; + goto err; + } + + txq->eth_dev = dev; + txq->queue_id = dq_num; + dev->data->tx_queues[qidx] = txq; + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + + res = octeontx_pko_channel_query_dqs(nic->base_ochan, + &txq->dq, + sizeof(octeontx_dq_t), + txq->queue_id, + octeontx_dq_info_getter); + if (res < 0) { + res = -EFAULT; + goto err; + } + + PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p", + qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va, + txq->dq.ioreg_va, + txq->dq.fc_status_va); + + return res; + +err: + if (txq) + rte_free(txq); + + return res; +} + +static int +octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct rte_mempool_ops *mp_ops = NULL; + struct octeontx_rxq *rxq = NULL; + pki_pktbuf_cfg_t pktbuf_conf; + pki_hash_cfg_t pki_hash; + pki_qos_cfg_t pki_qos; + uintptr_t pool; + int ret, port; + uint16_t gaura; + unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx; + unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx; + + RTE_SET_USED(nb_desc); + + memset(&pktbuf_conf, 0, sizeof(pktbuf_conf)); + memset(&pki_hash, 0, sizeof(pki_hash)); + memset(&pki_qos, 0, sizeof(pki_qos)); + + mp_ops = rte_mempool_get_ops(mb_pool->ops_index); + if (strcmp(mp_ops->name, "octeontx_fpavf")) { + octeontx_log_err("failed to find octeontx_fpavf mempool"); + return -ENOTSUP; + } + + /* Handle forbidden configurations */ + if (nic->pki.classifier_enable) { + octeontx_log_err("cannot setup queue %d. " + "Classifier option unsupported", qidx); + return -EINVAL; + } + + port = nic->port_id; + + /* Rx deferred start is not supported */ + if (rx_conf->rx_deferred_start) { + octeontx_log_err("rx deferred start not supported"); + return -EINVAL; + } + + /* Verify queue index */ + if (qidx >= dev->data->nb_rx_queues) { + octeontx_log_err("QID %d not supporteded (0 - %d available)\n", + qidx, (dev->data->nb_rx_queues - 1)); + return -ENOTSUP; + } + + /* Socket id check */ + if (socket_id != (unsigned int)SOCKET_ID_ANY && + socket_id != (unsigned int)nic->node) + PMD_RX_LOG(INFO, "socket_id expected %d, configured %d", + socket_id, nic->node); + + /* Allocating rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq), + RTE_CACHE_LINE_SIZE, nic->node); + if (rxq == NULL) { + octeontx_log_err("failed to allocate rxq=%d", qidx); + return -ENOMEM; + } + + if (!nic->pki.initialized) { + pktbuf_conf.port_type = 0; + pki_hash.port_type = 0; + pki_qos.port_type = 0; + + pktbuf_conf.mmask.f_wqe_skip = 1; + pktbuf_conf.mmask.f_first_skip = 1; + pktbuf_conf.mmask.f_later_skip = 1; + pktbuf_conf.mmask.f_mbuff_size = 1; + pktbuf_conf.mmask.f_cache_mode = 1; + + pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP; + pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP(mb_pool); + pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP; + pktbuf_conf.mbuff_size = (mb_pool->elt_size - + RTE_PKTMBUF_HEADROOM - + rte_pktmbuf_priv_size(mb_pool) - + sizeof(struct rte_mbuf)); + + pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT; + + ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf); + if (ret != 0) { + octeontx_log_err("fail to configure pktbuf for port %d", + port); + rte_free(rxq); + return ret; + } + PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n" + "\tmbuf_size:\t0x%0x\n" + "\twqe_skip:\t0x%0x\n" + "\tfirst_skip:\t0x%0x\n" + "\tlater_skip:\t0x%0x\n" + "\tcache_mode:\t%s\n", + port, + pktbuf_conf.mbuff_size, + pktbuf_conf.wqe_skip, + pktbuf_conf.first_skip, + pktbuf_conf.later_skip, + (pktbuf_conf.cache_mode == + PKI_OPC_MODE_STT) ? + "STT" : + (pktbuf_conf.cache_mode == + PKI_OPC_MODE_STF) ? + "STF" : + (pktbuf_conf.cache_mode == + PKI_OPC_MODE_STF1_STT) ? + "STF1_STT" : "STF2_STT"); + + if (nic->pki.hash_enable) { + pki_hash.tag_dlc = 1; + pki_hash.tag_slc = 1; + pki_hash.tag_dlf = 1; + pki_hash.tag_slf = 1; + pki_hash.tag_prt = 1; + octeontx_pki_port_hash_config(port, &pki_hash); + } + + pool = (uintptr_t)mb_pool->pool_id; + + /* Get the gaura Id */ + gaura = octeontx_fpa_bufpool_gaura(pool); + + pki_qos.qpg_qos = PKI_QPG_QOS_NONE; + pki_qos.num_entry = 1; + pki_qos.drop_policy = 0; + pki_qos.tag_type = 0L; + pki_qos.qos_entry[0].port_add = 0; + pki_qos.qos_entry[0].gaura = gaura; + pki_qos.qos_entry[0].ggrp_ok = ev_queues; + pki_qos.qos_entry[0].ggrp_bad = ev_queues; + pki_qos.qos_entry[0].grptag_bad = 0; + pki_qos.qos_entry[0].grptag_ok = 0; + + ret = octeontx_pki_port_create_qos(port, &pki_qos); + if (ret < 0) { + octeontx_log_err("failed to create QOS port=%d, q=%d", + port, qidx); + rte_free(rxq); + return ret; + } + nic->pki.initialized = true; + } + + rxq->port_id = nic->port_id; + rxq->eth_dev = dev; + rxq->queue_id = qidx; + rxq->evdev = nic->evdev; + rxq->ev_queues = ev_queues; + rxq->ev_ports = ev_ports; + rxq->pool = mb_pool; + + octeontx_recheck_rx_offloads(rxq); + dev->data->rx_queues[qidx] = rxq; + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static void +octeontx_dev_rx_queue_release(void *rxq) +{ + rte_free(rxq); +} + +static const uint32_t * +octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == octeontx_recv_pkts) + return ptypes; + + return NULL; +} + +static int +octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool) +{ + RTE_SET_USED(dev); + + if (!strcmp(pool, "octeontx_fpavf")) + return 0; + + return -ENOTSUP; +} + +/* Initialize and register driver with DPDK Application */ +static const struct eth_dev_ops octeontx_dev_ops = { + .dev_configure = octeontx_dev_configure, + .dev_infos_get = octeontx_dev_info, + .dev_close = octeontx_dev_close, + .dev_start = octeontx_dev_start, + .dev_stop = octeontx_dev_stop, + .promiscuous_enable = octeontx_dev_promisc_enable, + .promiscuous_disable = octeontx_dev_promisc_disable, + .link_update = octeontx_dev_link_update, + .stats_get = octeontx_dev_stats_get, + .stats_reset = octeontx_dev_stats_reset, + .mac_addr_remove = octeontx_dev_mac_addr_del, + .mac_addr_add = octeontx_dev_mac_addr_add, + .mac_addr_set = octeontx_dev_default_mac_addr_set, + .vlan_offload_set = octeontx_dev_vlan_offload_set, + .vlan_filter_set = octeontx_dev_vlan_filter_set, + .tx_queue_start = octeontx_dev_tx_queue_start, + .tx_queue_stop = octeontx_dev_tx_queue_stop, + .tx_queue_setup = octeontx_dev_tx_queue_setup, + .tx_queue_release = octeontx_dev_tx_queue_release, + .rx_queue_setup = octeontx_dev_rx_queue_setup, + .rx_queue_release = octeontx_dev_rx_queue_release, + .dev_set_link_up = octeontx_dev_set_link_up, + .dev_set_link_down = octeontx_dev_set_link_down, + .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get, + .mtu_set = octeontx_dev_mtu_set, + .pool_ops_supported = octeontx_pool_ops, + .flow_ctrl_get = octeontx_dev_flow_ctrl_get, + .flow_ctrl_set = octeontx_dev_flow_ctrl_set, +}; + +/* Create Ethdev interface per BGX LMAC ports */ +static int +octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, + int socket_id) +{ + int res; + size_t pko_vfid; + char octtx_name[OCTEONTX_MAX_NAME_LEN]; + struct octeontx_nic *nic = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct rte_eth_dev_data *data; + const char *name = rte_vdev_device_name(dev); + int max_entries; + + PMD_INIT_FUNC_TRACE(); + + sprintf(octtx_name, "%s_%d", name, port); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_attach_secondary(octtx_name); + if (eth_dev == NULL) + return -ENODEV; + + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; + octeontx_set_tx_function(eth_dev); + eth_dev->rx_pkt_burst = octeontx_recv_pkts; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + /* Reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(octtx_name); + if (eth_dev == NULL) { + octeontx_log_err("failed to allocate rte_eth_dev"); + res = -ENOMEM; + goto err; + } + data = eth_dev->data; + + nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id); + if (nic == NULL) { + octeontx_log_err("failed to allocate nic structure"); + res = -ENOMEM; + goto err; + } + data->dev_private = nic; + pko_vfid = octeontx_pko_get_vfid(); + + if (pko_vfid == SIZE_MAX) { + octeontx_log_err("failed to get pko vfid"); + res = -ENODEV; + goto err; + } + + nic->pko_vfid = pko_vfid; + nic->port_id = port; + nic->evdev = evdev; + + res = octeontx_port_open(nic); + if (res < 0) + goto err; + + /* Rx side port configuration */ + res = octeontx_pki_port_open(port); + if (res != 0) { + octeontx_log_err("failed to open PKI port %d", port); + res = -ENODEV; + goto err; + } + + eth_dev->device = &dev->device; + eth_dev->intr_handle = NULL; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->numa_node = dev->device.numa_node; + + data->port_id = eth_dev->data->port_id; + + nic->ev_queues = 1; + nic->ev_ports = 1; + nic->print_flag = -1; + + data->dev_link.link_status = ETH_LINK_DOWN; + data->dev_started = 0; + data->promiscuous = 0; + data->all_multicast = 0; + data->scattered_rx = 0; + + /* Get maximum number of supported MAC entries */ + max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id); + if (max_entries < 0) { + octeontx_log_err("Failed to get max entries for mac addr"); + res = -ENOTSUP; + goto err; + } + + data->mac_addrs = rte_zmalloc_socket(octtx_name, max_entries * + RTE_ETHER_ADDR_LEN, 0, + socket_id); + if (data->mac_addrs == NULL) { + octeontx_log_err("failed to allocate memory for mac_addrs"); + res = -ENOMEM; + goto err; + } + + eth_dev->dev_ops = &octeontx_dev_ops; + + /* Finally save ethdev pointer to the NIC structure */ + nic->dev = eth_dev; + + if (nic->port_id != data->port_id) { + octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)", + data->port_id, nic->port_id); + res = -EINVAL; + goto free_mac_addrs; + } + + res = rte_eal_alarm_set(OCCTX_INTR_POLL_INTERVAL_MS * 1000, + octeontx_link_status_poll, nic); + if (res) { + octeontx_log_err("Failed to start link polling alarm"); + goto err; + } + + /* Update port_id mac to eth_dev */ + memcpy(data->mac_addrs, nic->mac_addr, RTE_ETHER_ADDR_LEN); + + /* Update same mac address to BGX CAM table at index 0 */ + octeontx_bgx_port_mac_add(nic->port_id, nic->mac_addr, 0); + + res = octeontx_dev_flow_ctrl_init(eth_dev); + if (res < 0) + goto err; + + PMD_INIT_LOG(DEBUG, "ethdev info: "); + PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d", + nic->port_id, nic->port_ena, + nic->base_ochan, nic->num_ochans, + nic->num_tx_queues); + PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->bgx_mtu); + + rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7] + [(nic->base_ochan >> 4) & 0xF] = data->port_id; + + rte_eth_dev_probing_finish(eth_dev); + return data->port_id; + +free_mac_addrs: + rte_free(data->mac_addrs); + data->mac_addrs = NULL; +err: + if (nic) + octeontx_port_close(nic); + + rte_eth_dev_release_port(eth_dev); + + return res; +} + +/* Un initialize octeontx device */ +static int +octeontx_remove(struct rte_vdev_device *dev) +{ + char octtx_name[OCTEONTX_MAX_NAME_LEN]; + struct rte_eth_dev *eth_dev = NULL; + struct octeontx_nic *nic = NULL; + int i; + + if (dev == NULL) + return -EINVAL; + + for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) { + sprintf(octtx_name, "eth_octeontx_%d", i); + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocated(octtx_name); + if (eth_dev == NULL) + return -ENODEV; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_eth_dev_release_port(eth_dev); + continue; + } + + nic = octeontx_pmd_priv(eth_dev); + rte_event_dev_stop(nic->evdev); + PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name); + + rte_eth_dev_release_port(eth_dev); + rte_event_dev_close(nic->evdev); + } + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Free FC resource */ + octeontx_pko_fc_free(); + + return 0; +} + +/* Initialize octeontx device */ +static int +octeontx_probe(struct rte_vdev_device *dev) +{ + const char *dev_name; + static int probe_once; + uint8_t socket_id, qlist; + int tx_vfcnt, port_id, evdev, qnum, pnum, res, i; + struct rte_event_dev_config dev_conf; + const char *eventdev_name = "event_octeontx"; + struct rte_event_dev_info info; + struct rte_eth_dev *eth_dev; + + struct octeontx_vdev_init_params init_params = { + OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT + }; + + dev_name = rte_vdev_device_name(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(dev_name); + if (!eth_dev) { + PMD_INIT_LOG(ERR, "Failed to probe %s", dev_name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + res = octeontx_parse_vdev_init_params(&init_params, dev); + if (res < 0) + return -EINVAL; + + if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) { + octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port, + OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT); + return -ENOTSUP; + } + + PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name); + + socket_id = rte_socket_id(); + + tx_vfcnt = octeontx_pko_vf_count(); + + if (tx_vfcnt < init_params.nr_port) { + octeontx_log_err("not enough PKO (%d) for port number (%d)", + tx_vfcnt, init_params.nr_port); + return -EINVAL; + } + evdev = rte_event_dev_get_dev_id(eventdev_name); + if (evdev < 0) { + octeontx_log_err("eventdev %s not found", eventdev_name); + return -ENODEV; + } + + res = rte_event_dev_info_get(evdev, &info); + if (res < 0) { + octeontx_log_err("failed to eventdev info %d", res); + return -EINVAL; + } + + PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d", + info.max_event_queues, info.max_event_ports); + + if (octeontx_pko_init_fc(tx_vfcnt)) + return -ENOMEM; + + devconf_set_default_sane_values(&dev_conf, &info); + res = rte_event_dev_configure(evdev, &dev_conf); + if (res < 0) + goto parse_error; + + rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT, + (uint32_t *)&pnum); + rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, + (uint32_t *)&qnum); + if (pnum < qnum) { + octeontx_log_err("too few event ports (%d) for event_q(%d)", + pnum, qnum); + res = -EINVAL; + goto parse_error; + } + + /* Enable all queues available */ + for (i = 0; i < qnum; i++) { + res = rte_event_queue_setup(evdev, i, NULL); + if (res < 0) { + octeontx_log_err("failed to setup event_q(%d): res %d", + i, res); + goto parse_error; + } + } + + /* Enable all ports available */ + for (i = 0; i < pnum; i++) { + res = rte_event_port_setup(evdev, i, NULL); + if (res < 0) { + res = -ENODEV; + octeontx_log_err("failed to setup ev port(%d) res=%d", + i, res); + goto parse_error; + } + } + + /* + * Do 1:1 links for ports & queues. All queues would be mapped to + * one port. If there are more ports than queues, then some ports + * won't be linked to any queue. + */ + for (i = 0; i < qnum; i++) { + /* Link one queue to one event port */ + qlist = i; + res = rte_event_port_link(evdev, i, &qlist, NULL, 1); + if (res < 0) { + res = -ENODEV; + octeontx_log_err("failed to link port (%d): res=%d", + i, res); + goto parse_error; + } + } + + /* Create ethdev interface */ + for (i = 0; i < init_params.nr_port; i++) { + port_id = octeontx_create(dev, i, evdev, socket_id); + if (port_id < 0) { + octeontx_log_err("failed to create device %s", + dev_name); + res = -ENODEV; + goto parse_error; + } + + PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name, + port_id); + } + + if (probe_once) { + octeontx_log_err("interface %s not supported", dev_name); + octeontx_remove(dev); + res = -ENOTSUP; + goto parse_error; + } + rte_mbuf_set_platform_mempool_ops("octeontx_fpavf"); + probe_once = 1; + + return 0; + +parse_error: + octeontx_pko_fc_free(); + return res; +} + +static struct rte_vdev_driver octeontx_pmd_drv = { + .probe = octeontx_probe, + .remove = octeontx_remove, +}; + +RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv); +RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx); +RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port= "); diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h new file mode 100644 index 000000000..7246fb6d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_ETHDEV_H__ +#define __OCTEONTX_ETHDEV_H__ + +#include + +#include +#include +#include +#include +#include + +#include + +#include "base/octeontx_bgx.h" +#include "base/octeontx_pki_var.h" +#include "base/octeontx_pkivf.h" +#include "base/octeontx_pkovf.h" +#include "base/octeontx_io.h" + +#define OCTEONTX_PMD net_octeontx +#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12 +#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port") +#define OCTEONTX_MAX_NAME_LEN 32 + +#define OCTEONTX_MAX_BGX_PORTS 4 +#define OCTEONTX_MAX_LMAC_PER_BGX 4 + +#define OCCTX_RX_NB_SEG_MAX 6 +#define OCCTX_INTR_POLL_INTERVAL_MS 1000 +/* VLAN tag inserted by OCCTX_TX_VTAG_ACTION. + * In Tx space is always reserved for this in FRS. + */ +#define OCCTX_MAX_VTAG_INS 2 +#define OCCTX_MAX_VTAG_ACT_SIZE (4 * OCCTX_MAX_VTAG_INS) + +/* HW config of frame size doesn't include FCS */ +#define OCCTX_MAX_HW_FRS 9212 +#define OCCTX_MIN_HW_FRS 60 + +/* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ +#define OCCTX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \ + OCCTX_MAX_VTAG_ACT_SIZE) + +/* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */ +#define OCCTX_MAX_FRS \ + (OCCTX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - OCCTX_MAX_VTAG_ACT_SIZE) + +#define OCCTX_MIN_FRS (OCCTX_MIN_HW_FRS + RTE_ETHER_CRC_LEN) + +#define OCCTX_MAX_MTU (OCCTX_MAX_FRS - OCCTX_L2_OVERHEAD) + +#define OCTEONTX_RX_OFFLOADS ( \ + DEV_RX_OFFLOAD_CHECKSUM | \ + DEV_RX_OFFLOAD_SCTP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_VLAN_FILTER) + +#define OCTEONTX_TX_OFFLOADS ( \ + DEV_TX_OFFLOAD_MBUF_FAST_FREE | \ + DEV_TX_OFFLOAD_MT_LOCKFREE | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +static inline struct octeontx_nic * +octeontx_pmd_priv(struct rte_eth_dev *dev) +{ + return dev->data->dev_private; +} + +extern uint16_t +rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX]; + +struct vlan_entry { + TAILQ_ENTRY(vlan_entry) next; + uint16_t vlan_id; +}; + +TAILQ_HEAD(octeontx_vlan_filter_tbl, vlan_entry); + +struct octeontx_vlan_info { + struct octeontx_vlan_filter_tbl fltr_tbl; + uint8_t filter_on; +}; + +struct octeontx_fc_info { + enum rte_eth_fc_mode mode; /**< Link flow control mode */ + enum rte_eth_fc_mode def_mode; + uint16_t high_water; + uint16_t low_water; + uint16_t def_highmark; + uint16_t def_lowmark; + uint32_t rx_fifosz; +}; + +/* Octeontx ethdev nic */ +struct octeontx_nic { + struct rte_eth_dev *dev; + int node; + int port_id; + int port_ena; + int base_ichan; + int num_ichans; + int base_ochan; + int num_ochans; + uint8_t evdev; + uint8_t bpen; + uint8_t fcs_strip; + uint8_t bcast_mode; + uint8_t mcast_mode; + uint16_t num_tx_queues; + uint64_t hwcap; + uint8_t pko_vfid; + uint8_t link_up; + uint8_t duplex; + uint8_t speed; + uint16_t bgx_mtu; + uint16_t mtu; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + /* Rx port parameters */ + struct { + bool classifier_enable; + bool hash_enable; + bool initialized; + } pki; + + uint16_t ev_queues; + uint16_t ev_ports; + uint64_t rx_offloads; + uint16_t rx_offload_flags; + uint64_t tx_offloads; + uint16_t tx_offload_flags; + struct octeontx_vlan_info vlan_info; + int print_flag; + struct octeontx_fc_info fc; +} __rte_cache_aligned; + +struct octeontx_txq { + uint16_t queue_id; + octeontx_dq_t dq; + struct rte_eth_dev *eth_dev; +} __rte_cache_aligned; + +struct octeontx_rxq { + uint16_t queue_id; + uint16_t port_id; + uint8_t evdev; + struct rte_eth_dev *eth_dev; + uint16_t ev_queues; + uint16_t ev_ports; + struct rte_mempool *pool; +} __rte_cache_aligned; + +void +octeontx_set_tx_function(struct rte_eth_dev *dev); + +/* VLAN */ +int octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx); +int octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx); +int octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev); +int octeontx_dev_vlan_offload_fini(struct rte_eth_dev *eth_dev); +int octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); +int octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +int octeontx_dev_set_link_up(struct rte_eth_dev *eth_dev); +int octeontx_dev_set_link_down(struct rte_eth_dev *eth_dev); + +/* Flow control */ +int octeontx_dev_flow_ctrl_init(struct rte_eth_dev *dev); +int octeontx_dev_flow_ctrl_fini(struct rte_eth_dev *dev); +int octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); + +#endif /* __OCTEONTX_ETHDEV_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c new file mode 100644 index 000000000..ff627a68e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_ethdev_ops.c @@ -0,0 +1,343 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2020 Marvell International Ltd. + */ + +#include + +#include "octeontx_ethdev.h" +#include "octeontx_logs.h" +#include "octeontx_rxtx.h" + +static int +octeontx_vlan_hw_filter(struct octeontx_nic *nic, uint8_t flag) +{ + struct octeontx_vlan_info *vlan = &nic->vlan_info; + pki_port_vlan_filter_config_t fltr_conf; + int rc = 0; + + if (vlan->filter_on == flag) + return rc; + + fltr_conf.port_type = OCTTX_PORT_TYPE_NET; + fltr_conf.fltr_conf = flag; + + rc = octeontx_pki_port_vlan_fltr_config(nic->port_id, &fltr_conf); + if (rc != 0) { + octeontx_log_err("Fail to configure vlan hw filter for port %d", + nic->port_id); + goto done; + } + + vlan->filter_on = flag; + +done: + return rc; +} + +int +octeontx_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct rte_eth_rxmode *rxmode; + int rc = 0; + + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_EXTEND_MASK) { + octeontx_log_err("Extend offload not supported"); + return -ENOTSUP; + } + + if (mask & ETH_VLAN_STRIP_MASK) { + octeontx_log_err("VLAN strip offload not supported"); + return -ENOTSUP; + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + rc = octeontx_vlan_hw_filter(nic, true); + if (rc) + goto done; + + nic->rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + nic->rx_offload_flags |= OCCTX_RX_VLAN_FLTR_F; + } else { + rc = octeontx_vlan_hw_filter(nic, false); + if (rc) + goto done; + + nic->rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + nic->rx_offload_flags &= ~OCCTX_RX_VLAN_FLTR_F; + } + } + +done: + return rc; +} + +int +octeontx_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_vlan_info *vlan = &nic->vlan_info; + pki_port_vlan_filter_entry_config_t fltr_entry; + struct vlan_entry *entry = NULL; + int entry_count = 0; + int rc = -EINVAL; + + if (on) { + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) + if (entry->vlan_id == vlan_id) { + octeontx_log_dbg("Vlan Id is already set"); + return 0; + } + } else { + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) + entry_count++; + + if (!entry_count) + return 0; + } + + fltr_entry.port_type = OCTTX_PORT_TYPE_NET; + fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN; + fltr_entry.vlan_id = vlan_id; + fltr_entry.entry_conf = on; + + if (on) { + entry = rte_zmalloc("octeontx_nic_vlan_entry", + sizeof(struct vlan_entry), 0); + if (!entry) { + octeontx_log_err("Failed to allocate memory"); + return -ENOMEM; + } + } + + rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id, + &fltr_entry); + if (rc != 0) { + octeontx_log_err("Fail to configure vlan filter entry " + "for port %d", nic->port_id); + if (entry) + rte_free(entry); + + goto done; + } + + if (on) { + entry->vlan_id = vlan_id; + TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next); + } else { + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + if (entry->vlan_id == vlan_id) { + TAILQ_REMOVE(&vlan->fltr_tbl, entry, next); + rte_free(entry); + break; + } + } + } + +done: + return rc; +} + +int +octeontx_dev_vlan_offload_init(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int rc; + + TAILQ_INIT(&nic->vlan_info.fltr_tbl); + + rc = octeontx_dev_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + if (rc) + octeontx_log_err("Failed to set vlan offload rc=%d", rc); + + return rc; +} + +int +octeontx_dev_vlan_offload_fini(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_vlan_info *vlan = &nic->vlan_info; + pki_port_vlan_filter_entry_config_t fltr_entry; + struct vlan_entry *entry; + int rc = 0; + + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + fltr_entry.port_type = OCTTX_PORT_TYPE_NET; + fltr_entry.vlan_tpid = RTE_ETHER_TYPE_VLAN; + fltr_entry.vlan_id = entry->vlan_id; + fltr_entry.entry_conf = 0; + + rc = octeontx_pki_port_vlan_fltr_entry_config(nic->port_id, + &fltr_entry); + if (rc != 0) { + octeontx_log_err("Fail to configure vlan filter entry " + "for port %d", nic->port_id); + break; + } + } + + return rc; +} + +int +octeontx_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + int rc, i; + + rc = octeontx_bgx_port_set_link_state(nic->port_id, true); + if (rc) + goto done; + + /* Start tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + octeontx_dev_tx_queue_start(eth_dev, i); + +done: + return rc; +} + +int +octeontx_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + int i; + + /* Stop tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + octeontx_dev_tx_queue_stop(eth_dev, i); + + return octeontx_bgx_port_set_link_state(nic->port_id, false); +} + +int +octeontx_dev_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + octeontx_mbox_bgx_port_fc_cfg_t conf; + int rc; + + memset(&conf, 0, sizeof(octeontx_mbox_bgx_port_fc_cfg_t)); + + rc = octeontx_bgx_port_flow_ctrl_cfg(nic->port_id, &conf); + if (rc) + return rc; + + if (conf.rx_pause && conf.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (conf.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (conf.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + /* low_water & high_water values are in Bytes */ + fc_conf->low_water = conf.low_water; + fc_conf->high_water = conf.high_water; + + return rc; +} + +int +octeontx_dev_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_fc_info *fc = &nic->fc; + octeontx_mbox_bgx_port_fc_cfg_t conf; + uint8_t tx_pause, rx_pause; + uint16_t max_high_water; + int rc; + + if (fc_conf->pause_time || fc_conf->mac_ctrl_frame_fwd || + fc_conf->autoneg) { + octeontx_log_err("Below flowctrl parameters are not supported " + "pause_time, mac_ctrl_frame_fwd and autoneg"); + return -EINVAL; + } + + if (fc_conf->high_water == fc->high_water && + fc_conf->low_water == fc->low_water && + fc_conf->mode == fc->mode) + return 0; + + max_high_water = fc->rx_fifosz - OCTEONTX_BGX_RSVD_RX_FIFOBYTES; + + if (fc_conf->high_water > max_high_water || + fc_conf->high_water < fc_conf->low_water) { + octeontx_log_err("Invalid high/low water values " + "High_water(in Bytes) must <= 0x%x ", + max_high_water); + return -EINVAL; + } + + if (fc_conf->high_water % BIT(4) || fc_conf->low_water % BIT(4)) { + octeontx_log_err("High/low water value must be multiple of 16"); + return -EINVAL; + } + + rx_pause = (fc_conf->mode == RTE_FC_FULL) || + (fc_conf->mode == RTE_FC_RX_PAUSE); + tx_pause = (fc_conf->mode == RTE_FC_FULL) || + (fc_conf->mode == RTE_FC_TX_PAUSE); + + conf.high_water = fc_conf->high_water; + conf.low_water = fc_conf->low_water; + conf.fc_cfg = BGX_PORT_FC_CFG_SET; + conf.rx_pause = rx_pause; + conf.tx_pause = tx_pause; + + rc = octeontx_bgx_port_flow_ctrl_cfg(nic->port_id, &conf); + if (rc) + return rc; + + fc->high_water = fc_conf->high_water; + fc->low_water = fc_conf->low_water; + fc->mode = fc_conf->mode; + + return rc; +} + +int +octeontx_dev_flow_ctrl_init(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_fc_info *fc = &nic->fc; + struct rte_eth_fc_conf fc_conf; + int rc; + + rc = octeontx_dev_flow_ctrl_get(dev, &fc_conf); + if (rc) { + octeontx_log_err("Failed to get flow control info"); + return rc; + } + + fc->def_highmark = fc_conf.high_water; + fc->def_lowmark = fc_conf.low_water; + fc->def_mode = fc_conf.mode; + + return rc; +} + +int +octeontx_dev_flow_ctrl_fini(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + struct octeontx_fc_info *fc = &nic->fc; + struct rte_eth_fc_conf fc_conf; + + memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + + /* Restore flow control parameters with default values */ + fc_conf.high_water = fc->def_highmark; + fc_conf.low_water = fc->def_lowmark; + fc_conf.mode = fc->def_mode; + + return octeontx_dev_flow_ctrl_set(dev, &fc_conf); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h new file mode 100644 index 000000000..dec8042c6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_logs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_LOGS_H__ +#define __OCTEONTX_LOGS_H__ + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, otx_net_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>") + +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, otx_net_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) + +#define PMD_MBOX_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, otx_net_logtype_mbox, \ + "%s(): " fmt "\n", __func__, ## args) + +#define octeontx_log_info(fmt, args...) \ + RTE_LOG(INFO, PMD, fmt "\n", ## args) + +#define octeontx_log_err(s, ...) PMD_INIT_LOG(ERR, s, ##__VA_ARGS__) +#define octeontx_log_dbg(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__) +#define octeontx_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__) + +#define PMD_RX_LOG PMD_DRV_LOG +#define PMD_TX_LOG PMD_DRV_LOG + +extern int otx_net_logtype_init; +extern int otx_net_logtype_driver; +extern int otx_net_logtype_mbox; + +#endif /* __OCTEONTX_LOGS_H__*/ diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c new file mode 100644 index 000000000..bbe43a874 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.c @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "octeontx_ethdev.h" +#include "octeontx_rxtx.h" +#include "octeontx_logs.h" + +uint16_t __rte_hot +octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct octeontx_rxq *rxq; + struct rte_event ev; + size_t count; + uint16_t valid_event; + + rxq = rx_queue; + count = 0; + while (count < nb_pkts) { + valid_event = rte_event_dequeue_burst(rxq->evdev, + rxq->ev_ports, &ev, + 1, 0); + if (!valid_event) + break; + rx_pkts[count++] = ev.mbuf; + } + + return count; /* return number of pkts received */ +} + +#define T(name, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __rte_hot \ +octeontx_xmit_pkts_ ##name(void *tx_queue, \ + struct rte_mbuf **tx_pkts, uint16_t pkts) \ +{ \ + uint64_t cmd[(sz)]; \ + \ + return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, \ + flags); \ +} + +OCCTX_TX_FASTPATH_MODES +#undef T + +void __rte_hot +octeontx_set_tx_function(struct rte_eth_dev *dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + + const eth_tx_burst_t tx_burst_func[2][2][2][2] = { +#define T(name, f3, f2, f1, f0, sz, flags) \ + [f3][f2][f1][f0] = octeontx_xmit_pkts_ ##name, + +OCCTX_TX_FASTPATH_MODES +#undef T + }; + + dev->tx_pkt_burst = tx_burst_func + [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(nic->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)] + [!!(nic->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)]; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h new file mode 100644 index 000000000..8b46105b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/octeontx_rxtx.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ + +#ifndef __OCTEONTX_RXTX_H__ +#define __OCTEONTX_RXTX_H__ + +#include + +#define OFFLOAD_FLAGS \ + uint16_t rx_offload_flags; \ + uint16_t tx_offload_flags + +#define BIT(nr) (1UL << (nr)) + +#define OCCTX_RX_OFFLOAD_NONE (0) +#define OCCTX_RX_MULTI_SEG_F BIT(0) +#define OCCTX_RX_OFFLOAD_CSUM_F BIT(1) +#define OCCTX_RX_VLAN_FLTR_F BIT(2) + +#define OCCTX_TX_OFFLOAD_NONE (0) +#define OCCTX_TX_MULTI_SEG_F BIT(0) +#define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F BIT(1) +#define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2) +#define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3) + +/* Packet type table */ +#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST + +/* octeontx send header sub descriptor structure */ +RTE_STD_C11 +union octeontx_send_hdr_w0_u { + uint64_t u; + struct { + uint64_t total : 16; + uint64_t markptr : 8; + uint64_t l3ptr : 8; + uint64_t l4ptr : 8; + uint64_t ii : 1; + uint64_t shp_dis : 1; + uint64_t ckle : 1; + uint64_t cklf : 2; + uint64_t ckl3 : 1; + uint64_t ckl4 : 2; + uint64_t p : 1; + uint64_t format : 7; + uint64_t tstamp : 1; + uint64_t tso_eom : 1; + uint64_t df : 1; + uint64_t tso : 1; + uint64_t n2 : 1; + uint64_t scntn1 : 3; + }; +}; + +RTE_STD_C11 +union octeontx_send_hdr_w1_u { + uint64_t u; + struct { + uint64_t tso_mss : 14; + uint64_t shp_ra : 2; + uint64_t tso_sb : 8; + uint64_t leptr : 8; + uint64_t lfptr : 8; + uint64_t shp_chg : 9; + uint64_t tso_fn : 7; + uint64_t l2len : 8; + }; +}; + +struct octeontx_send_hdr_s { + union octeontx_send_hdr_w0_u w0; + union octeontx_send_hdr_w1_u w1; +}; + +static const uint32_t __rte_cache_aligned +ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = { + [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN, + [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN, + [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG, + [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN, + [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP, + [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP, + [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE, + [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, + [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, + + [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, + [LC_IPV4][LE_NONE][LF_IPSEC_ESP] = + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4, + [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG, + [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, + [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE, + [LC_IPV4][LE_NONE][LF_UDP_GENEVE] = + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE, + [LC_IPV4][LE_NONE][LF_UDP_VXLAN] = + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN, + [LC_IPV4][LE_NONE][LF_NVGRE] = + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE, + + [LC_IPV4_OPT][LE_NONE][LF_NONE] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, + [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4, + [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG, + [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, + [LC_IPV4_OPT][LE_NONE][LF_TCP] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [LC_IPV4_OPT][LE_NONE][LF_UDP] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [LC_IPV4_OPT][LE_NONE][LF_GRE] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE, + [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE, + [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN, + [LC_IPV4_OPT][LE_NONE][LF_NVGRE] = + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE, + + [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, + [LC_IPV6][LE_NONE][LF_IPSEC_ESP] = + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4, + [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG, + [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, + [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE, + [LC_IPV6][LE_NONE][LF_UDP_GENEVE] = + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE, + [LC_IPV6][LE_NONE][LF_UDP_VXLAN] = + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN, + [LC_IPV6][LE_NONE][LF_NVGRE] = + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE, + [LC_IPV6_OPT][LE_NONE][LF_NONE] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, + [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4, + [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG, + [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, + [LC_IPV6_OPT][LE_NONE][LF_TCP] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [LC_IPV6_OPT][LE_NONE][LF_UDP] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [LC_IPV6_OPT][LE_NONE][LF_GRE] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE, + [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE, + [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN, + [LC_IPV6_OPT][LE_NONE][LF_NVGRE] = + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE, + +}; + + +static __rte_always_inline uint64_t +octeontx_pktmbuf_detach(struct rte_mbuf *m) +{ + struct rte_mempool *mp = m->pool; + uint32_t mbuf_size, buf_len; + struct rte_mbuf *md; + uint16_t priv_size; + uint16_t refcount; + + /* Update refcount of direct mbuf */ + md = rte_mbuf_from_indirect(m); + refcount = rte_mbuf_refcnt_update(md, -1); + + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); + buf_len = rte_pktmbuf_data_room_size(mp); + + m->priv_size = priv_size; + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m->buf_len = (uint16_t)buf_len; + rte_pktmbuf_reset_headroom(m); + m->data_len = 0; + m->ol_flags = 0; + m->next = NULL; + m->nb_segs = 1; + + /* Now indirect mbuf is safe to free */ + rte_pktmbuf_free(m); + + if (refcount == 0) { + rte_mbuf_refcnt_set(md, 1); + md->data_len = 0; + md->ol_flags = 0; + md->next = NULL; + md->nb_segs = 1; + return 0; + } else { + return 1; + } +} + +static __rte_always_inline uint64_t +octeontx_prefree_seg(struct rte_mbuf *m) +{ + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + if (!RTE_MBUF_DIRECT(m)) + return octeontx_pktmbuf_detach(m); + + m->next = NULL; + m->nb_segs = 1; + return 0; + } else if (rte_mbuf_refcnt_update(m, -1) == 0) { + if (!RTE_MBUF_DIRECT(m)) + return octeontx_pktmbuf_detach(m); + + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; + m->nb_segs = 1; + return 0; + } + + /* Mbuf is having refcount more than 1 so need not to be freed */ + return 1; +} + +static __rte_always_inline void +octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags, + struct rte_mbuf *m) +{ + struct octeontx_send_hdr_s *send_hdr = + (struct octeontx_send_hdr_s *)cmd_buf; + uint64_t ol_flags = m->ol_flags; + + /* PKO Checksum L4 Algorithm Enumeration + * 0x0 - No checksum + * 0x1 - UDP L4 checksum + * 0x2 - TCP L4 checksum + * 0x3 - SCTP L4 checksum + */ + const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) + + (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) + + (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3)); + + const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) || + !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) || + !!(ol_flags & PKT_TX_TUNNEL_VXLAN) || + !!(ol_flags & PKT_TX_TUNNEL_GRE) || + !!(ol_flags & PKT_TX_TUNNEL_GENEVE) || + !!(ol_flags & PKT_TX_TUNNEL_IP) || + !!(ol_flags & PKT_TX_TUNNEL_IPIP)); + + const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) || + !!(ol_flags & PKT_TX_TUNNEL_UDP)); + const uint8_t outer_l2_len = m->outer_l2_len; + const uint8_t l2_len = m->l2_len; + + if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) && + (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) { + if (is_tunnel_parsed) { + /* Outer L3 */ + send_hdr->w0.l3ptr = outer_l2_len; + send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len; + /* Set clk3 for PKO to calculate IPV4 header checksum */ + send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4); + + /* Outer L4 */ + send_hdr->w0.ckl4 = csum_outer; + + /* Inner L3 */ + send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len; + send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len; + /* Set clke for PKO to calculate inner IPV4 header + * checksum. + */ + send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4); + + /* Inner L4 */ + send_hdr->w0.cklf = csum; + } else { + /* Inner L3 */ + send_hdr->w0.l3ptr = l2_len; + send_hdr->w0.l4ptr = l2_len + m->l3_len; + /* Set clk3 for PKO to calculate IPV4 header checksum */ + send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4); + + /* Inner L4 */ + send_hdr->w0.ckl4 = csum; + } + } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) { + /* Outer L3 */ + send_hdr->w0.l3ptr = outer_l2_len; + send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len; + /* Set clk3 for PKO to calculate IPV4 header checksum */ + send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4); + + /* Outer L4 */ + send_hdr->w0.ckl4 = csum_outer; + } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) { + /* Inner L3 */ + send_hdr->w0.l3ptr = l2_len; + send_hdr->w0.l4ptr = l2_len + m->l3_len; + /* Set clk3 for PKO to calculate IPV4 header checksum */ + send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4); + + /* Inner L4 */ + send_hdr->w0.ckl4 = csum; + } +} + +static __rte_always_inline uint16_t +__octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf, + const uint16_t flag) +{ + uint16_t gaura_id, nb_desc = 0; + + /* Setup PKO_SEND_HDR_S */ + cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff; + cmd_buf[nb_desc++] = 0x0; + + /* Enable tx checksum offload */ + if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) || + (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) + octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt); + + /* SEND_HDR[DF] bit controls if buffer is to be freed or + * not, as SG_DESC[I] and SEND_HDR[II] are clear. + */ + if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) + cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) << + 58); + + /* Mark mempool object as "put" since it is freed by PKO */ + if (!(cmd_buf[0] & (1ULL << 58))) + __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt, + 1, 0); + /* Get the gaura Id */ + gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t) + tx_pkt->pool->pool_id); + + /* Setup PKO_SEND_BUFLINK_S */ + cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC | + PKO_SEND_BUFLINK_LDTYPE(0x1ull) | + PKO_SEND_BUFLINK_GAUAR((long)gaura_id) | + tx_pkt->data_len; + cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt); + + return nb_desc; +} + +static __rte_always_inline uint16_t +__octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf, + const uint16_t flag) +{ + uint16_t nb_segs, nb_desc = 0; + uint16_t gaura_id, len = 0; + struct rte_mbuf *m_next = NULL; + + nb_segs = tx_pkt->nb_segs; + /* Setup PKO_SEND_HDR_S */ + cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff; + cmd_buf[nb_desc++] = 0x0; + + /* Enable tx checksum offload */ + if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) || + (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) + octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt); + + do { + m_next = tx_pkt->next; + /* To handle case where mbufs belong to diff pools, like + * fragmentation + */ + gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t) + tx_pkt->pool->pool_id); + + /* Setup PKO_SEND_GATHER_S */ + cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC | + PKO_SEND_GATHER_LDTYPE(0x1ull) | + PKO_SEND_GATHER_GAUAR((long)gaura_id) | + tx_pkt->data_len; + + /* SG_DESC[I] bit controls if buffer is to be freed or + * not, as SEND_HDR[DF] and SEND_HDR[II] are clear. + */ + if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) { + cmd_buf[nb_desc] |= + (octeontx_prefree_seg(tx_pkt) << 57); + } + + /* Mark mempool object as "put" since it is freed by + * PKO. + */ + if (!(cmd_buf[nb_desc] & (1ULL << 57))) { + tx_pkt->next = NULL; + __mempool_check_cookies(tx_pkt->pool, + (void **)&tx_pkt, 1, 0); + } + nb_desc++; + + cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt); + + nb_segs--; + len += tx_pkt->data_len; + tx_pkt = m_next; + } while (nb_segs); + + return nb_desc; +} + +static __rte_always_inline uint16_t +__octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts, uint64_t *cmd_buf, + const uint16_t flags) +{ + struct octeontx_txq *txq = tx_queue; + octeontx_dq_t *dq = &txq->dq; + uint16_t count = 0, nb_desc; + rte_cio_wmb(); + + while (count < nb_pkts) { + if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0)) + break; + + if (flags & OCCTX_TX_MULTI_SEG_F) { + nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count], + cmd_buf, flags); + } else { + nb_desc = __octeontx_xmit_prepare(tx_pkts[count], + cmd_buf, flags); + } + + octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf, + nb_desc); + + count++; + } + return count; +} + +uint16_t +octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +#define L3L4CSUM_F OCCTX_TX_OFFLOAD_L3_L4_CSUM_F +#define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F +#define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F +#define MULT_F OCCTX_TX_MULTI_SEG_F + +/* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */ +#define OCCTX_TX_FASTPATH_MODES \ +T(no_offload, 0, 0, 0, 0, 4, \ + OCCTX_TX_OFFLOAD_NONE) \ +T(mseg, 0, 0, 0, 1, 14, \ + MULT_F) \ +T(l3l4csum, 0, 0, 1, 0, 4, \ + L3L4CSUM_F) \ +T(l3l4csum_mseg, 0, 0, 1, 1, 14, \ + L3L4CSUM_F | MULT_F) \ +T(ol3ol4csum, 0, 1, 0, 0, 4, \ + OL3OL4CSUM_F) \ +T(ol3l4csum_mseg, 0, 1, 0, 1, 14, \ + OL3OL4CSUM_F | MULT_F) \ +T(ol3l4csum_l3l4csum, 0, 1, 1, 0, 4, \ + OL3OL4CSUM_F | L3L4CSUM_F) \ +T(ol3l4csum_l3l4csum_mseg, 0, 1, 1, 1, 14, \ + OL3OL4CSUM_F | L3L4CSUM_F | MULT_F) \ +T(noff, 1, 0, 0, 0, 4, \ + NOFF_F) \ +T(noff_mseg, 1, 0, 0, 1, 14, \ + NOFF_F | MULT_F) \ +T(noff_l3l4csum, 1, 0, 1, 0, 4, \ + NOFF_F | L3L4CSUM_F) \ +T(noff_l3l4csum_mseg, 1, 0, 1, 1, 14, \ + NOFF_F | L3L4CSUM_F | MULT_F) \ +T(noff_ol3ol4csum, 1, 1, 0, 0, 4, \ + NOFF_F | OL3OL4CSUM_F) \ +T(noff_ol3ol4csum_mseg, 1, 1, 0, 1, 14, \ + NOFF_F | OL3OL4CSUM_F | MULT_F) \ +T(noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 4, \ + NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(noff_ol3ol4csum_l3l4csum_mseg, 1, 1, 1, 1, 14, \ + NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F | \ + MULT_F) + +/* RX offload macros */ +#define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F +#define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F +#define MULT_RX_F OCCTX_RX_MULTI_SEG_F + +/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */ +#define OCCTX_RX_FASTPATH_MODES \ +R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \ +R(mseg, 0, 0, 1, MULT_RX_F) \ +R(csum, 0, 1, 0, CSUM_F) \ +R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \ +R(vlan, 1, 0, 0, VLAN_FLTR_F) \ +R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \ +R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \ +R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \ + MULT_RX_F) + + #endif /* __OCTEONTX_RXTX_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map new file mode 100644 index 000000000..f7cae02fa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx/rte_pmd_octeontx_version.map @@ -0,0 +1,7 @@ +DPDK_20.0 { + global: + + rte_octeontx_pchan_map; + + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/octeontx2/Makefile b/src/spdk/dpdk/drivers/net/octeontx2/Makefile new file mode 100644 index 000000000..0de43e36a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/Makefile @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2019 Marvell International Ltd. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_octeontx2.a + +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/cpt +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2 +CFLAGS += -O3 +ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -flax-vector-conversions +endif + +ifneq ($(CONFIG_RTE_ARCH_64),y) +CFLAGS += -Wno-int-to-pointer-cast +CFLAGS += -Wno-pointer-to-int-cast +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -diag-disable 2259 +endif +endif + +EXPORT_MAP := rte_pmd_octeontx2_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_PMD) += \ + otx2_rx.c \ + otx2_tx.c \ + otx2_tm.c \ + otx2_rss.c \ + otx2_mac.c \ + otx2_ptp.c \ + otx2_flow.c \ + otx2_link.c \ + otx2_vlan.c \ + otx2_stats.c \ + otx2_mcast.c \ + otx2_lookup.c \ + otx2_ethdev.c \ + otx2_flow_ctrl.c \ + otx2_flow_parse.c \ + otx2_flow_utils.c \ + otx2_ethdev_irq.c \ + otx2_ethdev_ops.c \ + otx2_ethdev_sec.c \ + otx2_ethdev_debug.c \ + otx2_ethdev_devargs.c + +LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2 -lrte_eal -lrte_net +LDLIBS += -lrte_ethdev -lrte_bus_pci -lrte_kvargs -lrte_mbuf -lrte_mempool -lm +LDLIBS += -lrte_cryptodev -lrte_eventdev -lrte_security + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/octeontx2/meson.build b/src/spdk/dpdk/drivers/net/octeontx2/meson.build new file mode 100644 index 000000000..599ade672 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/meson.build @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2019 Marvell International Ltd. +# + +sources = files('otx2_rx.c', + 'otx2_tx.c', + 'otx2_tm.c', + 'otx2_rss.c', + 'otx2_mac.c', + 'otx2_ptp.c', + 'otx2_flow.c', + 'otx2_link.c', + 'otx2_vlan.c', + 'otx2_stats.c', + 'otx2_mcast.c', + 'otx2_lookup.c', + 'otx2_ethdev.c', + 'otx2_flow_ctrl.c', + 'otx2_flow_parse.c', + 'otx2_flow_utils.c', + 'otx2_ethdev_irq.c', + 'otx2_ethdev_ops.c', + 'otx2_ethdev_sec.c', + 'otx2_ethdev_debug.c', + 'otx2_ethdev_devargs.c' + ) + +deps += ['bus_pci', 'cryptodev', 'eventdev', 'security'] +deps += ['common_octeontx2', 'mempool_octeontx2'] + +extra_flags = ['-flax-vector-conversions'] +# This integrated controller runs only on a arm64 machine, remove 32bit warnings +if not dpdk_conf.get('RTE_ARCH_64') + extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast'] +endif + +foreach flag: extra_flags + if cc.has_argument(flag) + cflags += flag + endif +endforeach + +includes += include_directories('../../common/cpt') +includes += include_directories('../../crypto/octeontx2') diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c new file mode 100644 index 000000000..3f3f0a693 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c @@ -0,0 +1,2553 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "otx2_ethdev.h" +#include "otx2_ethdev_sec.h" + +static inline uint64_t +nix_get_rx_offload_capa(struct otx2_eth_dev *dev) +{ + uint64_t capa = NIX_RX_OFFLOAD_CAPA; + + if (otx2_dev_is_vf(dev) || + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) + capa &= ~DEV_RX_OFFLOAD_TIMESTAMP; + + return capa; +} + +static inline uint64_t +nix_get_tx_offload_capa(struct otx2_eth_dev *dev) +{ + uint64_t capa = NIX_TX_OFFLOAD_CAPA; + + /* TSO not supported for earlier chip revisions */ + if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev)) + capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); + return capa; +} + +static const struct otx2_dev_ops otx2_dev_ops = { + .link_status_update = otx2_eth_dev_link_status_update, + .ptp_info_update = otx2_eth_dev_ptp_info_update +}; + +static int +nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_lf_alloc_req *req; + struct nix_lf_alloc_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox); + req->rq_cnt = nb_rxq; + req->sq_cnt = nb_txq; + req->cq_cnt = nb_rxq; + /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */ + RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128); + req->xqe_sz = NIX_XQESZ_W16; + req->rss_sz = dev->rss_info.rss_size; + req->rss_grps = NIX_RSS_GRPS; + req->npa_func = otx2_npa_pf_func_get(); + req->sso_func = otx2_sso_pf_func_get(); + req->rx_cfg = BIT_ULL(35 /* DIS_APAD */); + if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM)) { + req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */); + req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */); + } + req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) | + BIT_ULL(33 /* Outer L2 Length */) | + BIT_ULL(38 /* Inner L4 UDP Length */) | + BIT_ULL(39 /* Inner L3 Length */) | + BIT_ULL(40 /* Outer L4 UDP Length */) | + BIT_ULL(41 /* Outer L3 Length */)); + + if (dev->rss_tag_as_xor == 0) + req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + dev->sqb_size = rsp->sqb_size; + dev->tx_chan_base = rsp->tx_chan_base; + dev->rx_chan_base = rsp->rx_chan_base; + dev->rx_chan_cnt = rsp->rx_chan_cnt; + dev->tx_chan_cnt = rsp->tx_chan_cnt; + dev->lso_tsov4_idx = rsp->lso_tsov4_idx; + dev->lso_tsov6_idx = rsp->lso_tsov6_idx; + dev->lf_tx_stats = rsp->lf_tx_stats; + dev->lf_rx_stats = rsp->lf_rx_stats; + dev->cints = rsp->cints; + dev->qints = rsp->qints; + dev->npc_flow.channel = dev->rx_chan_base; + dev->ptp_en = rsp->hw_rx_tstamp_en; + + return 0; +} + +static int +nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable) +{ + struct otx2_mbox *mbox = dev->mbox; + struct npc_set_pkind *req; + struct msg_resp *rsp; + int rc; + + if (dev->npc_flow.switch_header_type == 0) + return 0; + + if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B && + !otx2_dev_is_sdp(dev)) { + otx2_err("chlen90b is not supported on non-SDP device"); + return -EINVAL; + } + + /* Notify AF about higig2 config */ + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_RX; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + req = otx2_mbox_alloc_msg_npc_set_pkind(mbox); + req->mode = dev->npc_flow.switch_header_type; + if (enable == 0) + req->mode = OTX2_PRIV_FLAGS_DEFAULT; + req->dir = PKIND_TX; + return otx2_mbox_process_msg(mbox, (void *)&rsp); +} + +static int +nix_lf_free(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_lf_free_req *req; + struct ndc_sync_op *ndc_req; + int rc; + + /* Sync NDC-NIX for LF */ + ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); + ndc_req->nix_lf_tx_sync = 1; + ndc_req->nix_lf_rx_sync = 1; + rc = otx2_mbox_process(mbox); + if (rc) + otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc); + + req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + /* Let AF driver free all this nix lf's + * NPC entries allocated using NPC MBOX. + */ + req->flags = 0; + + return otx2_mbox_process(mbox); +} + +int +otx2_cgx_rxtx_start(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_start_rxtx(mbox); + + return otx2_mbox_process(mbox); +} + +int +otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox); + + return otx2_mbox_process(mbox); +} + +static int +npc_rx_enable(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + otx2_mbox_alloc_msg_nix_lf_start_rx(mbox); + + return otx2_mbox_process(mbox); +} + +static int +npc_rx_disable(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox); + + return otx2_mbox_process(mbox); +} + +static int +nix_cgx_start_link_event(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_start_linkevents(mbox); + + return otx2_mbox_process(mbox); +} + +static int +cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (en && otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + if (en) + otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox); + else + otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox); + + return otx2_mbox_process(mbox); +} + +static int +nix_cgx_stop_link_event(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox); + + return otx2_mbox_process(mbox); +} + +static inline void +nix_rx_queue_reset(struct otx2_eth_rxq *rxq) +{ + rxq->head = 0; + rxq->available = 0; +} + +static inline uint32_t +nix_qsize_to_val(enum nix_q_size_e qsize) +{ + return (16UL << (qsize * 2)); +} + +static inline enum nix_q_size_e +nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val) +{ + int i; + + if (otx2_ethdev_fixup_is_min_4k_q(dev)) + i = nix_q_size_4K; + else + i = nix_q_size_16; + + for (; i < nix_q_size_max; i++) + if (val <= nix_qsize_to_val(i)) + break; + + if (i >= nix_q_size_max) + i = nix_q_size_max - 1; + + return i; +} + +static int +nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev, + uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp) +{ + struct otx2_mbox *mbox = dev->mbox; + const struct rte_memzone *rz; + uint32_t ring_size, cq_size; + struct nix_aq_enq_req *aq; + uint16_t first_skip; + int rc; + + cq_size = rxq->qlen; + ring_size = cq_size * NIX_CQ_ENTRY_SZ; + rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size, + NIX_CQ_ALIGN, dev->node); + if (rz == NULL) { + otx2_err("Failed to allocate mem for cq hw ring"); + rc = -ENOMEM; + goto fail; + } + memset(rz->addr, 0, rz->len); + rxq->desc = (uintptr_t)rz->addr; + rxq->qmask = cq_size - 1; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_INIT; + + aq->cq.ena = 1; + aq->cq.caching = 1; + aq->cq.qsize = rxq->qsize; + aq->cq.base = rz->iova; + aq->cq.avg_level = 0xff; + aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT); + aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR); + + /* Many to one reduction */ + aq->cq.qint_idx = qid % dev->qints; + /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */ + aq->cq.cint_idx = qid; + + if (otx2_ethdev_fixup_is_limit_cq_full(dev)) { + const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID; + uint16_t min_rx_drop; + + min_rx_drop = ceil(rx_cq_skid / (float)cq_size); + aq->cq.drop = min_rx_drop; + aq->cq.drop_ena = 1; + rxq->cq_drop = min_rx_drop; + } else { + rxq->cq_drop = NIX_CQ_THRESH_LEVEL; + aq->cq.drop = rxq->cq_drop; + aq->cq.drop_ena = 1; + } + + /* TX pause frames enable flowctrl on RX side */ + if (dev->fc_info.tx_pause) { + /* Single bpid is allocated for all rx channels for now */ + aq->cq.bpid = dev->fc_info.bpid[0]; + aq->cq.bp = rxq->cq_drop; + aq->cq.bp_ena = 1; + } + + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("Failed to init cq context"); + goto fail; + } + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_INIT; + + aq->rq.sso_ena = 0; + + if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY) + aq->rq.ipsech_ena = 1; + + aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */ + aq->rq.spb_ena = 0; + aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id); + first_skip = (sizeof(struct rte_mbuf)); + first_skip += RTE_PKTMBUF_HEADROOM; + first_skip += rte_pktmbuf_priv_size(mp); + rxq->data_off = first_skip; + + first_skip /= 8; /* Expressed in number of dwords */ + aq->rq.first_skip = first_skip; + aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8); + aq->rq.flow_tagw = 32; /* 32-bits */ + aq->rq.lpb_sizem1 = mp->elt_size / 8; + aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */ + aq->rq.ena = 1; + aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */ + aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */ + aq->rq.rq_int_ena = 0; + /* Many to one reduction */ + aq->rq.qint_idx = qid % dev->qints; + + aq->rq.xqe_drop_ena = 1; + + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("Failed to init rq context"); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +nix_rq_enb_dis(struct rte_eth_dev *eth_dev, + struct otx2_eth_rxq *rxq, const bool enb) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_req *aq; + + /* Pkts will be dropped silently if RQ is disabled */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + aq->rq.ena = enb; + aq->rq_mask.ena = ~(aq->rq_mask.ena); + + return otx2_mbox_process(mbox); +} + +static int +nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_req *aq; + int rc; + + /* RQ is already disabled */ + /* Disable CQ */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + aq->cq.ena = 0; + aq->cq_mask.ena = ~(aq->cq_mask.ena); + + rc = otx2_mbox_process(mbox); + if (rc < 0) { + otx2_err("Failed to disable cq context"); + return rc; + } + + return 0; +} + +static inline int +nix_get_data_off(struct otx2_eth_dev *dev) +{ + return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0; +} + +uint64_t +otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id) +{ + struct rte_mbuf mb_def; + uint64_t *tmp; + + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - + offsetof(struct rte_mbuf, data_off) != 2); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - + offsetof(struct rte_mbuf, data_off) != 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - + offsetof(struct rte_mbuf, data_off) != 6); + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev); + mb_def.port = port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* Prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + tmp = (uint64_t *)&mb_def.rearm_data; + + return *tmp; +} + +static void +otx2_nix_rx_queue_release(void *rx_queue) +{ + struct otx2_eth_rxq *rxq = rx_queue; + + if (!rxq) + return; + + otx2_nix_dbg("Releasing rxq %u", rxq->rq); + nix_cq_rq_uninit(rxq->eth_dev, rxq); + rte_free(rx_queue); +} + +static int +otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq, + uint16_t nb_desc, unsigned int socket, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_mempool_ops *ops; + struct otx2_eth_rxq *rxq; + const char *platform_ops; + enum nix_q_size_e qsize; + uint64_t offloads; + int rc; + + rc = -EINVAL; + + /* Compile time check to make sure all fast path elements in a CL */ + RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128); + + /* Sanity checks */ + if (rx_conf->rx_deferred_start == 1) { + otx2_err("Deferred Rx start is not supported"); + goto fail; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + /* This driver needs octeontx2_npa mempool ops to work */ + ops = rte_mempool_get_ops(mp->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + otx2_err("mempool ops should be of octeontx2_npa type"); + goto fail; + } + + if (mp->pool_id == 0) { + otx2_err("Invalid pool_id"); + goto fail; + } + + /* Free memory prior to re-allocation if needed */ + if (eth_dev->data->rx_queues[rq] != NULL) { + otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq); + otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]); + eth_dev->data->rx_queues[rq] = NULL; + } + + offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; + dev->rx_offloads |= offloads; + + /* Find the CQ queue size */ + qsize = nix_qsize_clampup_get(dev, nb_desc); + /* Allocate rxq memory */ + rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket); + if (rxq == NULL) { + otx2_err("Failed to allocate rq=%d", rq); + rc = -ENOMEM; + goto fail; + } + + rxq->eth_dev = eth_dev; + rxq->rq = rq; + rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR; + rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS); + rxq->wdata = (uint64_t)rq << 32; + rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id); + rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev, + eth_dev->data->port_id); + rxq->offloads = offloads; + rxq->pool = mp; + rxq->qlen = nix_qsize_to_val(qsize); + rxq->qsize = qsize; + rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get(); + rxq->tstamp = &dev->tstamp; + + /* Alloc completion queue */ + rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp); + if (rc) { + otx2_err("Failed to allocate rxq=%u", rq); + goto free_rxq; + } + + rxq->qconf.socket_id = socket; + rxq->qconf.nb_desc = nb_desc; + rxq->qconf.mempool = mp; + memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf)); + + nix_rx_queue_reset(rxq); + otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d", + rq, mp->name, qsize, nb_desc, rxq->qlen); + + eth_dev->data->rx_queues[rq] = rxq; + eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED; + + /* Calculating delta and freq mult between PTP HI clock and tsc. + * These are needed in deriving raw clock value from tsc counter. + * read_clock eth op returns raw clock value. + */ + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) { + rc = otx2_nix_raw_clock_tsc_conv(dev); + if (rc) { + otx2_err("Failed to calculate delta and freq mult"); + goto fail; + } + } + + return 0; + +free_rxq: + otx2_nix_rx_queue_release(rxq); +fail: + return rc; +} + +static inline uint8_t +nix_sq_max_sqe_sz(struct otx2_eth_txq *txq) +{ + /* + * Maximum three segments can be supported with W8, Choose + * NIX_MAXSQESZ_W16 for multi segment offload. + */ + if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + return NIX_MAXSQESZ_W16; + else + return NIX_MAXSQESZ_W8; +} + +static uint16_t +nix_rx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_eth_conf *conf = &data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + uint16_t flags = 0; + + if (rxmode->mq_mode == ETH_MQ_RX_RSS && + (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH)) + flags |= NIX_RX_OFFLOAD_RSS_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM)) + flags |= NIX_RX_OFFLOAD_CHECKSUM_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + flags |= NIX_RX_OFFLOAD_CHECKSUM_F; + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; + + if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP)) + flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F; + + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) + flags |= NIX_RX_OFFLOAD_SECURITY_F; + + if (!dev->ptype_disable) + flags |= NIX_RX_OFFLOAD_PTYPE_F; + + return flags; +} + +static uint16_t +nix_tx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t conf = dev->tx_offloads; + uint16_t flags = 0; + + /* Fastpath is dependent on these enums */ + RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52)); + RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54)); + RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60)); + RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41)); + RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7); + RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != + offsetof(struct rte_mbuf, buf_iova) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, buf_iova) + 16); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, ol_flags) + 12); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) != + offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *)); + + if (conf & DEV_TX_OFFLOAD_VLAN_INSERT || + conf & DEV_TX_OFFLOAD_QINQ_INSERT) + flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F; + + if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || + conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F; + + if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM || + conf & DEV_TX_OFFLOAD_TCP_CKSUM || + conf & DEV_TX_OFFLOAD_UDP_CKSUM || + conf & DEV_TX_OFFLOAD_SCTP_CKSUM) + flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F; + + if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F; + + if (conf & DEV_TX_OFFLOAD_MULTI_SEGS) + flags |= NIX_TX_MULTI_SEG_F; + + /* Enable Inner checksum for TSO */ + if (conf & DEV_TX_OFFLOAD_TCP_TSO) + flags |= (NIX_TX_OFFLOAD_TSO_F | + NIX_TX_OFFLOAD_L3_L4_CSUM_F); + + /* Enable Inner and Outer checksum for Tunnel TSO */ + if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)) + flags |= (NIX_TX_OFFLOAD_TSO_F | + NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | + NIX_TX_OFFLOAD_L3_L4_CSUM_F); + + if (conf & DEV_TX_OFFLOAD_SECURITY) + flags |= NIX_TX_OFFLOAD_SECURITY_F; + + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)) + flags |= NIX_TX_OFFLOAD_TSTAMP_F; + + return flags; +} + +static int +nix_sq_init(struct otx2_eth_txq *txq) +{ + struct otx2_eth_dev *dev = txq->dev; + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_req *sq; + uint32_t rr_quantum; + uint16_t smq; + int rc; + + if (txq->sqb_pool->pool_id == 0) + return -EINVAL; + + rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq); + if (rc) { + otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc); + return rc; + } + + sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + sq->qidx = txq->sq; + sq->ctype = NIX_AQ_CTYPE_SQ; + sq->op = NIX_AQ_INSTOP_INIT; + sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq); + + sq->sq.smq = smq; + sq->sq.smq_rr_quantum = rr_quantum; + sq->sq.default_chan = dev->tx_chan_base; + sq->sq.sqe_stype = NIX_STYPE_STF; + sq->sq.ena = 1; + if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8) + sq->sq.sqe_stype = NIX_STYPE_STP; + sq->sq.sqb_aura = + npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id); + sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR); + sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL); + sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR); + sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR); + + /* Many to one reduction */ + sq->sq.qint_idx = txq->sq % dev->qints; + + return otx2_mbox_process(mbox); +} + +static int +nix_sq_uninit(struct otx2_eth_txq *txq) +{ + struct otx2_eth_dev *dev = txq->dev; + struct otx2_mbox *mbox = dev->mbox; + struct ndc_sync_op *ndc_req; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + uint16_t sqes_per_sqb; + void *sqb_buf; + int rc, count; + + otx2_nix_dbg("Cleaning up sq %u", txq->sq); + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = txq->sq; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + /* Check if sq is already cleaned up */ + if (!rsp->sq.ena) + return 0; + + /* Disable sq */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = txq->sq; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + aq->sq_mask.ena = ~aq->sq_mask.ena; + aq->sq.ena = 0; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + /* Read SQ and free sqb's */ + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = txq->sq; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (aq->sq.smq_pend) + otx2_err("SQ has pending sqe's"); + + count = aq->sq.sqb_count; + sqes_per_sqb = 1 << txq->sqes_per_sqb_log2; + /* Free SQB's that are used */ + sqb_buf = (void *)rsp->sq.head_sqb; + while (count) { + void *next_sqb; + + next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t) + ((sqes_per_sqb - 1) * + nix_sq_max_sqe_sz(txq))); + npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1, + (uint64_t)sqb_buf); + sqb_buf = next_sqb; + count--; + } + + /* Free next to use sqb */ + if (rsp->sq.next_sqb) + npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1, + rsp->sq.next_sqb); + + /* Sync NDC-NIX-TX for LF */ + ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); + ndc_req->nix_lf_tx_sync = 1; + rc = otx2_mbox_process(mbox); + if (rc) + otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc); + + return rc; +} + +static int +nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs) +{ + struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf; + struct npa_aq_enq_req *aura_req; + + aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id); + aura_req->ctype = NPA_AQ_CTYPE_AURA; + aura_req->op = NPA_AQ_INSTOP_WRITE; + + aura_req->aura.limit = nb_sqb_bufs; + aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit); + + return otx2_mbox_process(npa_lf->mbox); +} + +static int +nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc) +{ + struct otx2_eth_dev *dev = txq->dev; + uint16_t sqes_per_sqb, nb_sqb_bufs; + char name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool_objsz sz; + struct npa_aura_s *aura; + uint32_t tmp, blk_sz; + + aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN); + snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq); + blk_sz = dev->sqb_size; + + if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16) + sqes_per_sqb = (dev->sqb_size / 8) / 16; + else + sqes_per_sqb = (dev->sqb_size / 8) / 8; + + nb_sqb_bufs = nb_desc / sqes_per_sqb; + /* Clamp up to devarg passed SQB count */ + nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB, + nb_sqb_bufs + NIX_SQB_LIST_SPACE)); + + txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz, + 0, 0, dev->node, + MEMPOOL_F_NO_SPREAD); + txq->nb_sqb_bufs = nb_sqb_bufs; + txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb); + txq->nb_sqb_bufs_adj = nb_sqb_bufs - + RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb; + txq->nb_sqb_bufs_adj = + (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100; + + if (txq->sqb_pool == NULL) { + otx2_err("Failed to allocate sqe mempool"); + goto fail; + } + + memset(aura, 0, sizeof(*aura)); + aura->fc_ena = 1; + aura->fc_addr = txq->fc_iova; + aura->fc_hyst_bits = 0; /* Store count on all updates */ + if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) { + otx2_err("Failed to set ops for sqe mempool"); + goto fail; + } + if (rte_mempool_populate_default(txq->sqb_pool) < 0) { + otx2_err("Failed to populate sqe mempool"); + goto fail; + } + + tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz); + if (dev->sqb_size != sz.elt_size) { + otx2_err("sqe pool block size is not expected %d != %d", + dev->sqb_size, tmp); + goto fail; + } + + nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs); + + return 0; +fail: + return -ENOMEM; +} + +void +otx2_nix_form_default_desc(struct otx2_eth_txq *txq) +{ + struct nix_send_ext_s *send_hdr_ext; + struct nix_send_hdr_s *send_hdr; + struct nix_send_mem_s *send_mem; + union nix_send_sg_s *sg; + + /* Initialize the fields based on basic single segment packet */ + memset(&txq->cmd, 0, sizeof(txq->cmd)); + + if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) { + send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0]; + /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */ + send_hdr->w0.sizem1 = 2; + + send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2]; + send_hdr_ext->w0.subdc = NIX_SUBDC_EXT; + if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) { + /* Default: one seg packet would have: + * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM) + * => 8/2 - 1 = 3 + */ + send_hdr->w0.sizem1 = 3; + send_hdr_ext->w0.tstmp = 1; + + /* To calculate the offset for send_mem, + * send_hdr->w0.sizem1 * 2 + */ + send_mem = (struct nix_send_mem_s *)(txq->cmd + + (send_hdr->w0.sizem1 << 1)); + send_mem->subdc = NIX_SUBDC_MEM; + send_mem->alg = NIX_SENDMEMALG_SETTSTMP; + send_mem->addr = txq->dev->tstamp.tx_tstamp_iova; + } + sg = (union nix_send_sg_s *)&txq->cmd[4]; + } else { + send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0]; + /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */ + send_hdr->w0.sizem1 = 1; + sg = (union nix_send_sg_s *)&txq->cmd[2]; + } + + send_hdr->w0.sq = txq->sq; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 1; + sg->ld_type = NIX_SENDLDTYPE_LDD; + + rte_smp_wmb(); +} + +static void +otx2_nix_tx_queue_release(void *_txq) +{ + struct otx2_eth_txq *txq = _txq; + struct rte_eth_dev *eth_dev; + + if (!txq) + return; + + eth_dev = txq->dev->eth_dev; + + otx2_nix_dbg("Releasing txq %u", txq->sq); + + /* Flush and disable tm */ + otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started); + + /* Free sqb's and disable sq */ + nix_sq_uninit(txq); + + if (txq->sqb_pool) { + rte_mempool_free(txq->sqb_pool); + txq->sqb_pool = NULL; + } + otx2_nix_sq_flush_post(txq); + rte_free(txq); +} + + +static int +otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + const struct rte_memzone *fc; + struct otx2_eth_txq *txq; + uint64_t offloads; + int rc; + + rc = -EINVAL; + + /* Compile time check to make sure all fast path elements in a CL */ + RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128); + + if (tx_conf->tx_deferred_start) { + otx2_err("Tx deferred start is not supported"); + goto fail; + } + + /* Free memory prior to re-allocation if needed. */ + if (eth_dev->data->tx_queues[sq] != NULL) { + otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq); + otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]); + eth_dev->data->tx_queues[sq] = NULL; + } + + /* Find the expected offloads for this queue */ + offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; + + /* Allocating tx queue data structure */ + txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq), + OTX2_ALIGN, socket_id); + if (txq == NULL) { + otx2_err("Failed to alloc txq=%d", sq); + rc = -ENOMEM; + goto fail; + } + txq->sq = sq; + txq->dev = dev; + txq->sqb_pool = NULL; + txq->offloads = offloads; + dev->tx_offloads |= offloads; + + /* + * Allocate memory for flow control updates from HW. + * Alloc one cache line, so that fits all FC_STYPE modes. + */ + fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq, + OTX2_ALIGN + sizeof(struct npa_aura_s), + OTX2_ALIGN, dev->node); + if (fc == NULL) { + otx2_err("Failed to allocate mem for fcmem"); + rc = -ENOMEM; + goto free_txq; + } + txq->fc_iova = fc->iova; + txq->fc_mem = fc->addr; + + /* Initialize the aura sqb pool */ + rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc); + if (rc) { + otx2_err("Failed to alloc sqe pool rc=%d", rc); + goto free_txq; + } + + /* Initialize the SQ */ + rc = nix_sq_init(txq); + if (rc) { + otx2_err("Failed to init sq=%d context", sq); + goto free_txq; + } + + txq->fc_cache_pkts = 0; + txq->io_addr = dev->base + NIX_LF_OP_SENDX(0); + /* Evenly distribute LMT slot for each sq */ + txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12)); + + txq->qconf.socket_id = socket_id; + txq->qconf.nb_desc = nb_desc; + memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf)); + + otx2_nix_form_default_desc(txq); + + otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 "" + " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq, + fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr, + txq->nb_sqb_bufs, txq->sqes_per_sqb_log2); + eth_dev->data->tx_queues[sq] = txq; + eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; + +free_txq: + otx2_nix_tx_queue_release(txq); +fail: + return rc; +} + +static int +nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_eth_qconf *tx_qconf = NULL; + struct otx2_eth_qconf *rx_qconf = NULL; + struct otx2_eth_txq **txq; + struct otx2_eth_rxq **rxq; + int i, nb_rxq, nb_txq; + + nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues); + nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues); + + tx_qconf = malloc(nb_txq * sizeof(*tx_qconf)); + if (tx_qconf == NULL) { + otx2_err("Failed to allocate memory for tx_qconf"); + goto fail; + } + + rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf)); + if (rx_qconf == NULL) { + otx2_err("Failed to allocate memory for rx_qconf"); + goto fail; + } + + txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; + for (i = 0; i < nb_txq; i++) { + if (txq[i] == NULL) { + tx_qconf[i].valid = false; + otx2_info("txq[%d] is already released", i); + continue; + } + memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf)); + tx_qconf[i].valid = true; + otx2_nix_tx_queue_release(txq[i]); + eth_dev->data->tx_queues[i] = NULL; + } + + rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues; + for (i = 0; i < nb_rxq; i++) { + if (rxq[i] == NULL) { + rx_qconf[i].valid = false; + otx2_info("rxq[%d] is already released", i); + continue; + } + memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf)); + rx_qconf[i].valid = true; + otx2_nix_rx_queue_release(rxq[i]); + eth_dev->data->rx_queues[i] = NULL; + } + + dev->tx_qconf = tx_qconf; + dev->rx_qconf = rx_qconf; + return 0; + +fail: + if (tx_qconf) + free(tx_qconf); + if (rx_qconf) + free(rx_qconf); + + return -ENOMEM; +} + +static int +nix_restore_queue_cfg(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_eth_qconf *tx_qconf = dev->tx_qconf; + struct otx2_eth_qconf *rx_qconf = dev->rx_qconf; + struct otx2_eth_txq **txq; + struct otx2_eth_rxq **rxq; + int rc, i, nb_rxq, nb_txq; + + nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues); + nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues); + + rc = -ENOMEM; + /* Setup tx & rx queues with previous configuration so + * that the queues can be functional in cases like ports + * are started without re configuring queues. + * + * Usual re config sequence is like below: + * port_configure() { + * if(reconfigure) { + * queue_release() + * queue_setup() + * } + * queue_configure() { + * queue_release() + * queue_setup() + * } + * } + * port_start() + * + * In some application's control path, queue_configure() would + * NOT be invoked for TXQs/RXQs in port_configure(). + * In such cases, queues can be functional after start as the + * queues are already setup in port_configure(). + */ + for (i = 0; i < nb_txq; i++) { + if (!tx_qconf[i].valid) + continue; + rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, + tx_qconf[i].socket_id, + &tx_qconf[i].conf.tx); + if (rc) { + otx2_err("Failed to setup tx queue rc=%d", rc); + txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; + for (i -= 1; i >= 0; i--) + otx2_nix_tx_queue_release(txq[i]); + goto fail; + } + } + + free(tx_qconf); tx_qconf = NULL; + + for (i = 0; i < nb_rxq; i++) { + if (!rx_qconf[i].valid) + continue; + rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, + rx_qconf[i].socket_id, + &rx_qconf[i].conf.rx, + rx_qconf[i].mempool); + if (rc) { + otx2_err("Failed to setup rx queue rc=%d", rc); + rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues; + for (i -= 1; i >= 0; i--) + otx2_nix_rx_queue_release(rxq[i]); + goto release_tx_queues; + } + } + + free(rx_qconf); rx_qconf = NULL; + + return 0; + +release_tx_queues: + txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + otx2_nix_tx_queue_release(txq[i]); +fail: + if (tx_qconf) + free(tx_qconf); + if (rx_qconf) + free(rx_qconf); + + return rc; +} + +static uint16_t +nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts) +{ + RTE_SET_USED(queue); + RTE_SET_USED(mbufs); + RTE_SET_USED(pkts); + + return 0; +} + +static void +nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev) +{ + /* These dummy functions are required for supporting + * some applications which reconfigure queues without + * stopping tx burst and rx burst threads(eg kni app) + * When the queues context is saved, txq/rxqs are released + * which caused app crash since rx/tx burst is still + * on different lcores + */ + eth_dev->tx_pkt_burst = nix_eth_nop_burst; + eth_dev->rx_pkt_burst = nix_eth_nop_burst; + rte_mb(); +} + +static void +nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4) +{ + volatile struct nix_lso_format *field; + + /* Format works only with TCP packet marked by OL3/OL4 */ + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 */ + field->layer = NIX_TXLAYER_OL3; + field->offset = v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + /* TCP flags field */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static void +nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Outer UDP length */ + field->layer = NIX_TXLAYER_OL4; + field->offset = 4; + field->sizem1 = 1; + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static void +nix_lso_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + +static int +nix_setup_lso_formats(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_lso_format_cfg_rsp *rsp; + struct nix_lso_format_cfg *req; + uint8_t base; + int rc; + + /* Skip if TSO was not requested */ + if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)) + return 0; + /* + * IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tcp(req, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + base = rsp->lso_format_idx; + if (base != NIX_LSO_FORMAT_IDX_TSOV4) + return -EFAULT; + dev->lso_base_idx = base; + otx2_nix_dbg("tcpv4 lso fmt=%u", base); + + + /* + * IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tcp(req, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 1) + return -EFAULT; + otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1); + + /* + * IPv4/UDP/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 2) + return -EFAULT; + otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2); + + /* + * IPv4/UDP/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 3) + return -EFAULT; + otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3); + + /* + * IPv6/UDP/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 4) + return -EFAULT; + otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4); + + /* + * IPv6/UDP/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_udp_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + if (rsp->lso_format_idx != base + 5) + return -EFAULT; + otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5); + + /* + * IPv4/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 6) + return -EFAULT; + otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6); + + /* + * IPv4/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 7) + return -EFAULT; + otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7); + + /* + * IPv6/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 8) + return -EFAULT; + otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8); + + /* + * IPv6/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + if (rsp->lso_format_idx != base + 9) + return -EFAULT; + otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9); + return 0; +} + +static int +otx2_nix_configure(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_eth_conf *conf = &data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + struct rte_eth_txmode *txmode = &conf->txmode; + char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *ea; + uint8_t nb_rxq, nb_txq; + int rc; + + rc = -EINVAL; + + /* Sanity checks */ + if (rte_eal_has_hugepages() == 0) { + otx2_err("Huge page is not configured"); + goto fail_configure; + } + + if (conf->dcb_capability_en == 1) { + otx2_err("dcb enable is not supported"); + goto fail_configure; + } + + if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { + otx2_err("Flow director is not supported"); + goto fail_configure; + } + + if (rxmode->mq_mode != ETH_MQ_RX_NONE && + rxmode->mq_mode != ETH_MQ_RX_RSS) { + otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode); + goto fail_configure; + } + + if (txmode->mq_mode != ETH_MQ_TX_NONE) { + otx2_err("Unsupported mq tx mode %d", txmode->mq_mode); + goto fail_configure; + } + + if (otx2_dev_is_Ax(dev) && + (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && + ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) { + otx2_err("Outer IP and SCTP checksum unsupported"); + goto fail_configure; + } + + /* Free the resources allocated from the previous configure */ + if (dev->configured == 1) { + otx2_eth_sec_fini(eth_dev); + otx2_nix_rxchan_bpid_cfg(eth_dev, false); + otx2_nix_vlan_fini(eth_dev); + otx2_nix_mc_addr_list_uninstall(eth_dev); + otx2_flow_free_all_resources(dev); + oxt2_nix_unregister_queue_irqs(eth_dev); + if (eth_dev->data->dev_conf.intr_conf.rxq) + oxt2_nix_unregister_cq_irqs(eth_dev); + nix_set_nop_rxtx_function(eth_dev); + rc = nix_store_queue_cfg_and_then_release(eth_dev); + if (rc) + goto fail_configure; + otx2_nix_tm_fini(eth_dev); + nix_lf_free(dev); + } + + dev->rx_offloads = rxmode->offloads; + dev->tx_offloads = txmode->offloads; + dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev); + dev->rss_info.rss_grps = NIX_RSS_GRPS; + + nb_rxq = RTE_MAX(data->nb_rx_queues, 1); + nb_txq = RTE_MAX(data->nb_tx_queues, 1); + + /* Alloc a nix lf */ + rc = nix_lf_alloc(dev, nb_rxq, nb_txq); + if (rc) { + otx2_err("Failed to init nix_lf rc=%d", rc); + goto fail_offloads; + } + + otx2_nix_err_intr_enb_dis(eth_dev, true); + otx2_nix_ras_intr_enb_dis(eth_dev, true); + + if (dev->ptp_en && + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + otx2_err("Both PTP and switch header enabled"); + goto free_nix_lf; + } + + rc = nix_lf_switch_header_type_enable(dev, true); + if (rc) { + otx2_err("Failed to enable switch type nix_lf rc=%d", rc); + goto free_nix_lf; + } + + rc = nix_setup_lso_formats(dev); + if (rc) { + otx2_err("failed to setup nix lso format fields, rc=%d", rc); + goto free_nix_lf; + } + + /* Configure RSS */ + rc = otx2_nix_rss_config(eth_dev); + if (rc) { + otx2_err("Failed to configure rss rc=%d", rc); + goto free_nix_lf; + } + + /* Init the default TM scheduler hierarchy */ + rc = otx2_nix_tm_init_default(eth_dev); + if (rc) { + otx2_err("Failed to init traffic manager rc=%d", rc); + goto free_nix_lf; + } + + rc = otx2_nix_vlan_offload_init(eth_dev); + if (rc) { + otx2_err("Failed to init vlan offload rc=%d", rc); + goto tm_fini; + } + + /* Register queue IRQs */ + rc = oxt2_nix_register_queue_irqs(eth_dev); + if (rc) { + otx2_err("Failed to register queue interrupts rc=%d", rc); + goto vlan_fini; + } + + /* Register cq IRQs */ + if (eth_dev->data->dev_conf.intr_conf.rxq) { + if (eth_dev->data->nb_rx_queues > dev->cints) { + otx2_err("Rx interrupt cannot be enabled, rxq > %d", + dev->cints); + goto q_irq_fini; + } + /* Rx interrupt feature cannot work with vector mode because, + * vector mode doesn't process packets unless min 4 pkts are + * received, while cq interrupts are generated even for 1 pkt + * in the CQ. + */ + dev->scalar_ena = true; + + rc = oxt2_nix_register_cq_irqs(eth_dev); + if (rc) { + otx2_err("Failed to register CQ interrupts rc=%d", rc); + goto q_irq_fini; + } + } + + /* Configure loop back mode */ + rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode); + if (rc) { + otx2_err("Failed to configure cgx loop back mode rc=%d", rc); + goto cq_fini; + } + + rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true); + if (rc) { + otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc); + goto cq_fini; + } + + /* Enable security */ + rc = otx2_eth_sec_init(eth_dev); + if (rc) + goto cq_fini; + + rc = otx2_nix_flow_ctrl_init(eth_dev); + if (rc) { + otx2_err("Failed to init flow ctrl mode %d", rc); + goto cq_fini; + } + + rc = otx2_nix_mc_addr_list_install(eth_dev); + if (rc < 0) { + otx2_err("Failed to install mc address list rc=%d", rc); + goto sec_fini; + } + + /* + * Restore queue config when reconfigure followed by + * reconfigure and no queue configure invoked from application case. + */ + if (dev->configured == 1) { + rc = nix_restore_queue_cfg(eth_dev); + if (rc) + goto uninstall_mc_list; + } + + /* Update the mac address */ + ea = eth_dev->data->mac_addrs; + memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN); + if (rte_is_zero_ether_addr(ea)) + rte_eth_random_addr((uint8_t *)ea); + + rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea); + + /* Apply new link configurations if changed */ + rc = otx2_apply_link_speed(eth_dev); + if (rc) { + otx2_err("Failed to set link configuration"); + goto uninstall_mc_list; + } + + otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d" + " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "" + " rx_flags=0x%x tx_flags=0x%x", + eth_dev->data->port_id, ea_fmt, nb_rxq, + nb_txq, dev->rx_offloads, dev->tx_offloads, + dev->rx_offload_flags, dev->tx_offload_flags); + + /* All good */ + dev->configured = 1; + dev->configured_nb_rx_qs = data->nb_rx_queues; + dev->configured_nb_tx_qs = data->nb_tx_queues; + return 0; + +uninstall_mc_list: + otx2_nix_mc_addr_list_uninstall(eth_dev); +sec_fini: + otx2_eth_sec_fini(eth_dev); +cq_fini: + oxt2_nix_unregister_cq_irqs(eth_dev); +q_irq_fini: + oxt2_nix_unregister_queue_irqs(eth_dev); +vlan_fini: + otx2_nix_vlan_fini(eth_dev); +tm_fini: + otx2_nix_tm_fini(eth_dev); +free_nix_lf: + nix_lf_free(dev); +fail_offloads: + dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev); + dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev); +fail_configure: + dev->configured = 0; + return rc; +} + +int +otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) +{ + struct rte_eth_dev_data *data = eth_dev->data; + struct otx2_eth_txq *txq; + int rc = -EINVAL; + + txq = eth_dev->data->tx_queues[qidx]; + + if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + rc = otx2_nix_sq_sqb_aura_fc(txq, true); + if (rc) { + otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d", + qidx, rc); + goto done; + } + + data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + +done: + return rc; +} + +int +otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) +{ + struct rte_eth_dev_data *data = eth_dev->data; + struct otx2_eth_txq *txq; + int rc; + + txq = eth_dev->data->tx_queues[qidx]; + + if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + txq->fc_cache_pkts = 0; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d", + qidx, rc); + goto done; + } + + data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + +done: + return rc; +} + +static int +otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) +{ + struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx]; + struct rte_eth_dev_data *data = eth_dev->data; + int rc; + + if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true); + if (rc) { + otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc); + goto done; + } + + data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + +done: + return rc; +} + +static int +otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) +{ + struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx]; + struct rte_eth_dev_data *data = eth_dev->data; + int rc; + + if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false); + if (rc) { + otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc); + goto done; + } + + data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + +done: + return rc; +} + +static void +otx2_nix_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_mbuf *rx_pkts[32]; + struct otx2_eth_rxq *rxq; + int count, i, j, rc; + + nix_lf_switch_header_type_enable(dev, false); + nix_cgx_stop_link_event(dev); + npc_rx_disable(dev); + + /* Stop rx queues and free up pkts pending */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rc = otx2_nix_rx_queue_stop(eth_dev, i); + if (rc) + continue; + + rxq = eth_dev->data->rx_queues[i]; + count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); + while (count) { + for (j = 0; j < count; j++) + rte_pktmbuf_free(rx_pkts[j]); + count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32); + } + } + + /* Stop tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + otx2_nix_tx_queue_stop(eth_dev, i); +} + +static int +otx2_nix_dev_start(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, i; + + /* MTU recalculate should be avoided here if PTP is enabled by PF, as + * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf + * call below. + */ + if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) { + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) + return rc; + } + + /* Start rx queues */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rc = otx2_nix_rx_queue_start(eth_dev, i); + if (rc) + return rc; + } + + /* Start tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = otx2_nix_tx_queue_start(eth_dev, i); + if (rc) + return rc; + } + + rc = otx2_nix_update_flow_ctrl_mode(eth_dev); + if (rc) { + otx2_err("Failed to update flow ctrl mode %d", rc); + return rc; + } + + /* Enable PTP if it was requested by the app or if it is already + * enabled in PF owning this VF + */ + memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info)); + if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || + otx2_ethdev_is_ptp_en(dev)) + otx2_nix_timesync_enable(eth_dev); + else + otx2_nix_timesync_disable(eth_dev); + + /* Update VF about data off shifted by 8 bytes if PTP already + * enabled in PF owning this VF + */ + if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev)) + otx2_nix_ptp_enable_vf(eth_dev); + + rc = npc_rx_enable(dev); + if (rc) { + otx2_err("Failed to enable NPC rx %d", rc); + return rc; + } + + otx2_nix_toggle_flag_link_cfg(dev, true); + + rc = nix_cgx_start_link_event(dev); + if (rc) { + otx2_err("Failed to start cgx link event %d", rc); + goto rx_disable; + } + + otx2_nix_toggle_flag_link_cfg(dev, false); + otx2_eth_set_tx_function(eth_dev); + otx2_eth_set_rx_function(eth_dev); + + return 0; + +rx_disable: + npc_rx_disable(dev); + otx2_nix_toggle_flag_link_cfg(dev, false); + return rc; +} + +static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev); +static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev); + +/* Initialize and register driver with DPDK Application */ +static const struct eth_dev_ops otx2_eth_dev_ops = { + .dev_infos_get = otx2_nix_info_get, + .dev_configure = otx2_nix_configure, + .link_update = otx2_nix_link_update, + .tx_queue_setup = otx2_nix_tx_queue_setup, + .tx_queue_release = otx2_nix_tx_queue_release, + .tm_ops_get = otx2_nix_tm_ops_get, + .rx_queue_setup = otx2_nix_rx_queue_setup, + .rx_queue_release = otx2_nix_rx_queue_release, + .dev_start = otx2_nix_dev_start, + .dev_stop = otx2_nix_dev_stop, + .dev_close = otx2_nix_dev_close, + .tx_queue_start = otx2_nix_tx_queue_start, + .tx_queue_stop = otx2_nix_tx_queue_stop, + .rx_queue_start = otx2_nix_rx_queue_start, + .rx_queue_stop = otx2_nix_rx_queue_stop, + .dev_set_link_up = otx2_nix_dev_set_link_up, + .dev_set_link_down = otx2_nix_dev_set_link_down, + .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get, + .dev_ptypes_set = otx2_nix_ptypes_set, + .dev_reset = otx2_nix_dev_reset, + .stats_get = otx2_nix_dev_stats_get, + .stats_reset = otx2_nix_dev_stats_reset, + .get_reg = otx2_nix_dev_get_reg, + .mtu_set = otx2_nix_mtu_set, + .mac_addr_add = otx2_nix_mac_addr_add, + .mac_addr_remove = otx2_nix_mac_addr_del, + .mac_addr_set = otx2_nix_mac_addr_set, + .set_mc_addr_list = otx2_nix_set_mc_addr_list, + .promiscuous_enable = otx2_nix_promisc_enable, + .promiscuous_disable = otx2_nix_promisc_disable, + .allmulticast_enable = otx2_nix_allmulticast_enable, + .allmulticast_disable = otx2_nix_allmulticast_disable, + .queue_stats_mapping_set = otx2_nix_queue_stats_mapping, + .reta_update = otx2_nix_dev_reta_update, + .reta_query = otx2_nix_dev_reta_query, + .rss_hash_update = otx2_nix_rss_hash_update, + .rss_hash_conf_get = otx2_nix_rss_hash_conf_get, + .xstats_get = otx2_nix_xstats_get, + .xstats_get_names = otx2_nix_xstats_get_names, + .xstats_reset = otx2_nix_xstats_reset, + .xstats_get_by_id = otx2_nix_xstats_get_by_id, + .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id, + .rxq_info_get = otx2_nix_rxq_info_get, + .txq_info_get = otx2_nix_txq_info_get, + .rx_burst_mode_get = otx2_rx_burst_mode_get, + .tx_burst_mode_get = otx2_tx_burst_mode_get, + .rx_queue_count = otx2_nix_rx_queue_count, + .rx_descriptor_done = otx2_nix_rx_descriptor_done, + .rx_descriptor_status = otx2_nix_rx_descriptor_status, + .tx_descriptor_status = otx2_nix_tx_descriptor_status, + .tx_done_cleanup = otx2_nix_tx_done_cleanup, + .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit, + .pool_ops_supported = otx2_nix_pool_ops_supported, + .filter_ctrl = otx2_nix_dev_filter_ctrl, + .get_module_info = otx2_nix_get_module_info, + .get_module_eeprom = otx2_nix_get_module_eeprom, + .fw_version_get = otx2_nix_fw_version_get, + .flow_ctrl_get = otx2_nix_flow_ctrl_get, + .flow_ctrl_set = otx2_nix_flow_ctrl_set, + .timesync_enable = otx2_nix_timesync_enable, + .timesync_disable = otx2_nix_timesync_disable, + .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp, + .timesync_adjust_time = otx2_nix_timesync_adjust_time, + .timesync_read_time = otx2_nix_timesync_read_time, + .timesync_write_time = otx2_nix_timesync_write_time, + .vlan_offload_set = otx2_nix_vlan_offload_set, + .vlan_filter_set = otx2_nix_vlan_filter_set, + .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set, + .vlan_tpid_set = otx2_nix_vlan_tpid_set, + .vlan_pvid_set = otx2_nix_vlan_pvid_set, + .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable, + .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable, + .read_clock = otx2_nix_read_clock, +}; + +static inline int +nix_lf_attach(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct rsrc_attach_req *req; + + /* Attach NIX(lf) */ + req = otx2_mbox_alloc_msg_attach_resources(mbox); + req->modify = true; + req->nixlf = true; + + return otx2_mbox_process(mbox); +} + +static inline int +nix_lf_get_msix_offset(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct msix_offset_rsp *msix_rsp; + int rc; + + /* Get NPA and NIX MSIX vector offsets */ + otx2_mbox_alloc_msg_msix_offset(mbox); + + rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp); + + dev->nix_msixoff = msix_rsp->nix_msixoff; + + return rc; +} + +static inline int +otx2_eth_dev_lf_detach(struct otx2_mbox *mbox) +{ + struct rsrc_detach_req *req; + + req = otx2_mbox_alloc_msg_detach_resources(mbox); + + /* Detach all except npa lf */ + req->partial = true; + req->nixlf = true; + req->sso = true; + req->ssow = true; + req->timlfs = true; + req->cptlfs = true; + + return otx2_mbox_process(mbox); +} + +static bool +otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev) +{ + if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF || + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + return true; + return false; +} + +static int +otx2_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_pci_device *pci_dev; + int rc, max_entries; + + eth_dev->dev_ops = &otx2_eth_dev_ops; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* Setup callbacks for secondary process */ + otx2_eth_set_tx_function(eth_dev); + otx2_eth_set_rx_function(eth_dev); + return 0; + } + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + /* Zero out everything after OTX2_DEV to allow proper dev_reset() */ + memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) - + offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start)); + + /* Parse devargs string */ + rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev); + if (rc) { + otx2_err("Failed to parse devargs rc=%d", rc); + goto error; + } + + if (!dev->mbox_active) { + /* Initialize the base otx2_dev object + * only if already present + */ + rc = otx2_dev_init(pci_dev, dev); + if (rc) { + otx2_err("Failed to initialize otx2_dev rc=%d", rc); + goto error; + } + } + if (otx2_eth_dev_is_sdp(pci_dev)) + dev->sdp_link = true; + else + dev->sdp_link = false; + /* Device generic callbacks */ + dev->ops = &otx2_dev_ops; + dev->eth_dev = eth_dev; + + /* Grab the NPA LF if required */ + rc = otx2_npa_lf_init(pci_dev, dev); + if (rc) + goto otx2_dev_uninit; + + dev->configured = 0; + dev->drv_inited = true; + dev->ptype_disable = 0; + dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20); + dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20); + + /* Attach NIX LF */ + rc = nix_lf_attach(dev); + if (rc) + goto otx2_npa_uninit; + + /* Get NIX MSIX offset */ + rc = nix_lf_get_msix_offset(dev); + if (rc) + goto otx2_npa_uninit; + + /* Register LF irq handlers */ + rc = otx2_nix_register_irqs(eth_dev); + if (rc) + goto mbox_detach; + + /* Get maximum number of supported MAC entries */ + max_entries = otx2_cgx_mac_max_entries_get(dev); + if (max_entries < 0) { + otx2_err("Failed to get max entries for mac addr"); + rc = -ENOTSUP; + goto unregister_irq; + } + + /* For VFs, returned max_entries will be 0. But to keep default MAC + * address, one entry must be allocated. So setting up to 1. + */ + if (max_entries == 0) + max_entries = 1; + + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries * + RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + otx2_err("Failed to allocate memory for mac addr"); + rc = -ENOMEM; + goto unregister_irq; + } + + dev->max_mac_entries = max_entries; + + rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr); + if (rc) + goto free_mac_addrs; + + /* Update the mac address */ + memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN); + + /* Also sync same MAC address to CGX table */ + otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]); + + /* Initialize the tm data structures */ + otx2_nix_tm_conf_init(eth_dev); + + dev->tx_offload_capa = nix_get_tx_offload_capa(dev); + dev->rx_offload_capa = nix_get_rx_offload_capa(dev); + + if (otx2_dev_is_96xx_A0(dev) || + otx2_dev_is_95xx_Ax(dev)) { + dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q; + dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL; + } + + /* Create security ctx */ + rc = otx2_eth_sec_ctx_create(eth_dev); + if (rc) + goto free_mac_addrs; + dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + + /* Initialize rte-flow */ + rc = otx2_flow_init(dev); + if (rc) + goto sec_ctx_destroy; + + otx2_nix_mc_filter_init(dev); + + otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64 + " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64, + eth_dev->data->port_id, dev->pf, dev->vf, + OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap, + dev->rx_offload_capa, dev->tx_offload_capa); + return 0; + +sec_ctx_destroy: + otx2_eth_sec_ctx_destroy(eth_dev); +free_mac_addrs: + rte_free(eth_dev->data->mac_addrs); +unregister_irq: + otx2_nix_unregister_irqs(eth_dev); +mbox_detach: + otx2_eth_dev_lf_detach(dev->mbox); +otx2_npa_uninit: + otx2_npa_lf_fini(); +otx2_dev_uninit: + otx2_dev_fini(pci_dev, dev); +error: + otx2_err("Failed to init nix eth_dev rc=%d", rc); + return rc; +} + +static int +otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_pci_device *pci_dev; + int rc, i; + + /* Nothing to be done for secondary processes */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Clear the flag since we are closing down */ + dev->configured = 0; + + /* Disable nix bpid config */ + otx2_nix_rxchan_bpid_cfg(eth_dev, false); + + npc_rx_disable(dev); + + /* Disable vlan offloads */ + otx2_nix_vlan_fini(eth_dev); + + /* Disable other rte_flow entries */ + otx2_flow_fini(dev); + + /* Free multicast filter list */ + otx2_nix_mc_filter_fini(dev); + + /* Disable PTP if already enabled */ + if (otx2_ethdev_is_ptp_en(dev)) + otx2_nix_timesync_disable(eth_dev); + + nix_cgx_stop_link_event(dev); + + /* Free up SQs */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]); + eth_dev->data->tx_queues[i] = NULL; + } + eth_dev->data->nb_tx_queues = 0; + + /* Free up RQ's and CQ's */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]); + eth_dev->data->rx_queues[i] = NULL; + } + eth_dev->data->nb_rx_queues = 0; + + /* Free tm resources */ + rc = otx2_nix_tm_fini(eth_dev); + if (rc) + otx2_err("Failed to cleanup tm, rc=%d", rc); + + /* Unregister queue irqs */ + oxt2_nix_unregister_queue_irqs(eth_dev); + + /* Unregister cq irqs */ + if (eth_dev->data->dev_conf.intr_conf.rxq) + oxt2_nix_unregister_cq_irqs(eth_dev); + + rc = nix_lf_free(dev); + if (rc) + otx2_err("Failed to free nix lf, rc=%d", rc); + + rc = otx2_npa_lf_fini(); + if (rc) + otx2_err("Failed to cleanup npa lf, rc=%d", rc); + + /* Disable security */ + otx2_eth_sec_fini(eth_dev); + + /* Destroy security ctx */ + otx2_eth_sec_ctx_destroy(eth_dev); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + dev->drv_inited = false; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + otx2_nix_unregister_irqs(eth_dev); + + rc = otx2_eth_dev_lf_detach(dev->mbox); + if (rc) + otx2_err("Failed to detach resources, rc=%d", rc); + + /* Check if mbox close is needed */ + if (!mbox_close) + return 0; + + if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) { + /* Will be freed later by PMD */ + eth_dev->data->dev_private = NULL; + return 0; + } + + otx2_dev_fini(pci_dev, dev); + return 0; +} + +static void +otx2_nix_dev_close(struct rte_eth_dev *eth_dev) +{ + otx2_eth_dev_uninit(eth_dev, true); +} + +static int +otx2_nix_dev_reset(struct rte_eth_dev *eth_dev) +{ + int rc; + + rc = otx2_eth_dev_uninit(eth_dev, false); + if (rc) + return rc; + + return otx2_eth_dev_init(eth_dev); +} + +static int +nix_remove(struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *eth_dev; + struct otx2_idev_cfg *idev; + struct otx2_dev *otx2_dev; + int rc; + + eth_dev = rte_eth_dev_allocated(pci_dev->device.name); + if (eth_dev) { + /* Cleanup eth dev */ + rc = otx2_eth_dev_uninit(eth_dev, true); + if (rc) + return rc; + + rte_eth_dev_pci_release(eth_dev); + } + + /* Nothing to be done for secondary processes */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* Check for common resources */ + idev = otx2_intra_dev_get_cfg(); + if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev) + return 0; + + otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf); + + if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev)) + goto exit; + + /* Safe to cleanup mbox as no more users */ + otx2_dev_fini(pci_dev, otx2_dev); + rte_free(otx2_dev); + return 0; + +exit: + otx2_info("%s: common resource in use by other devices", pci_dev->name); + return -EAGAIN; +} + +static int +nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + int rc; + + RTE_SET_USED(pci_drv); + + rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev), + otx2_eth_dev_init); + + /* On error on secondary, recheck if port exists in primary or + * in mid of detach state. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc) + if (!rte_eth_dev_allocated(pci_dev->device.name)) + return 0; + return rc; +} + +static const struct rte_pci_id pci_nix_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF_VF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_PF) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_SDP_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_nix = { + .id_table = pci_nix_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | + RTE_PCI_DRV_INTR_LSC, + .probe = nix_probe, + .remove = nix_remove, +}; + +RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix); +RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map); +RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci"); diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h new file mode 100644 index 000000000..0fbf68b8e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_ETHDEV_H__ +#define __OTX2_ETHDEV_H__ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "otx2_common.h" +#include "otx2_dev.h" +#include "otx2_flow.h" +#include "otx2_irq.h" +#include "otx2_mempool.h" +#include "otx2_rx.h" +#include "otx2_tm.h" +#include "otx2_tx.h" + +#define OTX2_ETH_DEV_PMD_VERSION "1.0" + +/* Ethdev HWCAP and Fixup flags. Use from MSB bits to avoid conflict with dev */ + +/* Minimum CQ size should be 4K */ +#define OTX2_FIXUP_F_MIN_4K_Q BIT_ULL(63) +#define otx2_ethdev_fixup_is_min_4k_q(dev) \ + ((dev)->hwcap & OTX2_FIXUP_F_MIN_4K_Q) +/* Limit CQ being full */ +#define OTX2_FIXUP_F_LIMIT_CQ_FULL BIT_ULL(62) +#define otx2_ethdev_fixup_is_limit_cq_full(dev) \ + ((dev)->hwcap & OTX2_FIXUP_F_LIMIT_CQ_FULL) + +/* Used for struct otx2_eth_dev::flags */ +#define OTX2_LINK_CFG_IN_PROGRESS_F BIT_ULL(0) + +/* VLAN tag inserted by NIX_TX_VTAG_ACTION. + * In Tx space is always reserved for this in FRS. + */ +#define NIX_MAX_VTAG_INS 2 +#define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS) + +/* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ +#define NIX_L2_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8) + +/* HW config of frame size doesn't include FCS */ +#define NIX_MAX_HW_FRS 9212 +#define NIX_MIN_HW_FRS 60 + +/* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */ +#define NIX_MAX_FRS \ + (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE) + +#define NIX_MIN_FRS \ + (NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN) + +#define NIX_MAX_MTU \ + (NIX_MAX_FRS - NIX_L2_OVERHEAD) + +#define NIX_MAX_SQB 512 +#define NIX_DEF_SQB 16 +#define NIX_MIN_SQB 8 +#define NIX_SQB_LIST_SPACE 2 +#define NIX_RSS_RETA_SIZE_MAX 256 +/* Group 0 will be used for RSS, 1 -7 will be used for rte_flow RSS action*/ +#define NIX_RSS_GRPS 8 +#define NIX_HASH_KEY_SIZE 48 /* 352 Bits */ +#define NIX_RSS_RETA_SIZE 64 +#define NIX_RX_MIN_DESC 16 +#define NIX_RX_MIN_DESC_ALIGN 16 +#define NIX_RX_NB_SEG_MAX 6 +#define NIX_CQ_ENTRY_SZ 128 +#define NIX_CQ_ALIGN 512 +#define NIX_SQB_LOWER_THRESH 70 +#define LMT_SLOT_MASK 0x7f +#define NIX_RX_DEFAULT_RING_SZ 4096 + +/* If PTP is enabled additional SEND MEM DESC is required which + * takes 2 words, hence max 7 iova address are possible + */ +#if defined(RTE_LIBRTE_IEEE1588) +#define NIX_TX_NB_SEG_MAX 7 +#else +#define NIX_TX_NB_SEG_MAX 9 +#endif + +#define NIX_TX_MSEG_SG_DWORDS \ + ((RTE_ALIGN_MUL_CEIL(NIX_TX_NB_SEG_MAX, 3) / 3) \ + + NIX_TX_NB_SEG_MAX) + +/* Apply BP/DROP when CQ is 95% full */ +#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100) +#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256) + +#define CQ_OP_STAT_OP_ERR 63 +#define CQ_OP_STAT_CQ_ERR 46 + +#define OP_ERR BIT_ULL(CQ_OP_STAT_OP_ERR) +#define CQ_ERR BIT_ULL(CQ_OP_STAT_CQ_ERR) + +#define CQ_CQE_THRESH_DEFAULT 0x1ULL /* IRQ triggered when + * NIX_LF_CINTX_CNT[QCOUNT] + * crosses this value + */ +#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */ +#define CQ_TIMER_THRESH_MAX 255 + +#define NIX_RSS_L3_L4_SRC_DST (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \ + | ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) + +#define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\ + ETH_RSS_TCP | ETH_RSS_SCTP | \ + ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \ + NIX_RSS_L3_L4_SRC_DST) + +#define NIX_TX_OFFLOAD_CAPA ( \ + DEV_TX_OFFLOAD_MBUF_FAST_FREE | \ + DEV_TX_OFFLOAD_MT_LOCKFREE | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_IPV4_CKSUM) + +#define NIX_RX_OFFLOAD_CAPA ( \ + DEV_RX_OFFLOAD_CHECKSUM | \ + DEV_RX_OFFLOAD_SCTP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_QINQ_STRIP | \ + DEV_RX_OFFLOAD_TIMESTAMP | \ + DEV_RX_OFFLOAD_RSS_HASH) + +#define NIX_DEFAULT_RSS_CTX_GROUP 0 +#define NIX_DEFAULT_RSS_MCAM_IDX -1 + +#define otx2_ethdev_is_ptp_en(dev) ((dev)->ptp_en) + +#define NIX_TIMESYNC_TX_CMD_LEN 8 +/* Additional timesync values. */ +#define OTX2_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +enum nix_q_size_e { + nix_q_size_16, /* 16 entries */ + nix_q_size_64, /* 64 entries */ + nix_q_size_256, + nix_q_size_1K, + nix_q_size_4K, + nix_q_size_16K, + nix_q_size_64K, + nix_q_size_256K, + nix_q_size_1M, /* Million entries */ + nix_q_size_max +}; + +struct otx2_qint { + struct rte_eth_dev *eth_dev; + uint8_t qintx; +}; + +struct otx2_rss_info { + uint64_t nix_rss; + uint32_t flowkey_cfg; + uint16_t rss_size; + uint8_t rss_grps; + uint8_t alg_idx; /* Selected algo index */ + uint16_t ind_tbl[NIX_RSS_RETA_SIZE_MAX]; + uint8_t key[NIX_HASH_KEY_SIZE]; +}; + +struct otx2_eth_qconf { + union { + struct rte_eth_txconf tx; + struct rte_eth_rxconf rx; + } conf; + void *mempool; + uint32_t socket_id; + uint16_t nb_desc; + uint8_t valid; +}; + +struct otx2_fc_info { + enum rte_eth_fc_mode mode; /**< Link flow control mode */ + uint8_t rx_pause; + uint8_t tx_pause; + uint8_t chan_cnt; + uint16_t bpid[NIX_MAX_CHAN]; +}; + +struct vlan_mkex_info { + struct npc_xtract_info la_xtract; + struct npc_xtract_info lb_xtract; + uint64_t lb_lt_offset; +}; + +struct mcast_entry { + struct rte_ether_addr mcast_mac; + uint16_t mcam_index; + TAILQ_ENTRY(mcast_entry) next; +}; + +TAILQ_HEAD(otx2_nix_mc_filter_tbl, mcast_entry); + +struct vlan_entry { + uint32_t mcam_idx; + uint16_t vlan_id; + TAILQ_ENTRY(vlan_entry) next; +}; + +TAILQ_HEAD(otx2_vlan_filter_tbl, vlan_entry); + +struct otx2_vlan_info { + struct otx2_vlan_filter_tbl fltr_tbl; + /* MKEX layer info */ + struct mcam_entry def_tx_mcam_ent; + struct mcam_entry def_rx_mcam_ent; + struct vlan_mkex_info mkex; + /* Default mcam entry that matches vlan packets */ + uint32_t def_rx_mcam_idx; + uint32_t def_tx_mcam_idx; + /* MCAM entry that matches double vlan packets */ + uint32_t qinq_mcam_idx; + /* Indices of tx_vtag def registers */ + uint32_t outer_vlan_idx; + uint32_t inner_vlan_idx; + uint16_t outer_vlan_tpid; + uint16_t inner_vlan_tpid; + uint16_t pvid; + /* QinQ entry allocated before default one */ + uint8_t qinq_before_def; + uint8_t pvid_insert_on; + /* Rx vtag action type */ + uint8_t vtag_type_idx; + uint8_t filter_on; + uint8_t strip_on; + uint8_t qinq_on; + uint8_t promisc_on; +}; + +struct otx2_eth_dev { + OTX2_DEV; /* Base class */ + RTE_MARKER otx2_eth_dev_data_start; + uint16_t sqb_size; + uint16_t rx_chan_base; + uint16_t tx_chan_base; + uint8_t rx_chan_cnt; + uint8_t tx_chan_cnt; + uint8_t lso_tsov4_idx; + uint8_t lso_tsov6_idx; + uint8_t lso_base_idx; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint8_t mkex_pfl_name[MKEX_NAME_LEN]; + uint8_t max_mac_entries; + uint8_t lf_tx_stats; + uint8_t lf_rx_stats; + uint16_t flags; + uint16_t cints; + uint16_t qints; + uint8_t configured; + uint8_t configured_qints; + uint8_t configured_cints; + uint8_t configured_nb_rx_qs; + uint8_t configured_nb_tx_qs; + uint8_t ptype_disable; + uint16_t nix_msixoff; + uintptr_t base; + uintptr_t lmt_addr; + uint16_t scalar_ena; + uint16_t rss_tag_as_xor; + uint16_t max_sqb_count; + uint16_t rx_offload_flags; /* Selected Rx offload flags(NIX_RX_*_F) */ + uint64_t rx_offloads; + uint16_t tx_offload_flags; /* Selected Tx offload flags(NIX_TX_*_F) */ + uint64_t tx_offloads; + uint64_t rx_offload_capa; + uint64_t tx_offload_capa; + struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT]; + struct otx2_qint cints_mem[RTE_MAX_QUEUES_PER_PORT]; + uint16_t txschq[NIX_TXSCH_LVL_CNT]; + uint16_t txschq_contig[NIX_TXSCH_LVL_CNT]; + uint16_t txschq_index[NIX_TXSCH_LVL_CNT]; + uint16_t txschq_contig_index[NIX_TXSCH_LVL_CNT]; + /* Dis-contiguous queues */ + uint16_t txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + /* Contiguous queues */ + uint16_t txschq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + uint16_t otx2_tm_root_lvl; + uint16_t link_cfg_lvl; + uint16_t tm_flags; + uint16_t tm_leaf_cnt; + uint64_t tm_rate_min; + struct otx2_nix_tm_node_list node_list; + struct otx2_nix_tm_shaper_profile_list shaper_profile_list; + struct otx2_rss_info rss_info; + struct otx2_fc_info fc_info; + uint32_t txmap[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + uint32_t rxmap[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + struct otx2_npc_flow_info npc_flow; + struct otx2_vlan_info vlan_info; + struct otx2_eth_qconf *tx_qconf; + struct otx2_eth_qconf *rx_qconf; + struct rte_eth_dev *eth_dev; + eth_rx_burst_t rx_pkt_burst_no_offload; + /* PTP counters */ + bool ptp_en; + struct otx2_timesync_info tstamp; + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; + double clk_freq_mult; + uint64_t clk_delta; + bool mc_tbl_set; + struct otx2_nix_mc_filter_tbl mc_fltr_tbl; + bool sdp_link; /* SDP flag */ + /* Inline IPsec params */ + uint16_t ipsec_in_max_spi; + uint8_t duplex; + uint32_t speed; +} __rte_cache_aligned; + +struct otx2_eth_txq { + uint64_t cmd[8]; + int64_t fc_cache_pkts; + uint64_t *fc_mem; + void *lmt_addr; + rte_iova_t io_addr; + rte_iova_t fc_iova; + uint16_t sqes_per_sqb_log2; + int16_t nb_sqb_bufs_adj; + RTE_MARKER slow_path_start; + uint16_t nb_sqb_bufs; + uint16_t sq; + uint64_t offloads; + struct otx2_eth_dev *dev; + struct rte_mempool *sqb_pool; + struct otx2_eth_qconf qconf; +} __rte_cache_aligned; + +struct otx2_eth_rxq { + uint64_t mbuf_initializer; + uint64_t data_off; + uintptr_t desc; + void *lookup_mem; + uintptr_t cq_door; + uint64_t wdata; + int64_t *cq_status; + uint32_t head; + uint32_t qmask; + uint32_t available; + uint16_t rq; + struct otx2_timesync_info *tstamp; + RTE_MARKER slow_path_start; + uint64_t aura; + uint64_t offloads; + uint32_t qlen; + struct rte_mempool *pool; + enum nix_q_size_e qsize; + struct rte_eth_dev *eth_dev; + struct otx2_eth_qconf qconf; + uint16_t cq_drop; +} __rte_cache_aligned; + +static inline struct otx2_eth_dev * +otx2_eth_pmd_priv(struct rte_eth_dev *eth_dev) +{ + return eth_dev->data->dev_private; +} + +/* Ops */ +int otx2_nix_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info); +int otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg); +int otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, + size_t fw_size); +int otx2_nix_get_module_info(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_module_info *modinfo); +int otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev, + struct rte_dev_eeprom_info *info); +int otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool); +void otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); +int otx2_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int otx2_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx); +int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt); +int otx2_nix_rx_descriptor_done(void *rxq, uint16_t offset); +int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset); +int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset); + +void otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en); +int otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev); +int otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev); +int otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev); +int otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev); +int otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx); +int otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx); +uint64_t otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id); + +/* Multicast filter APIs */ +void otx2_nix_mc_filter_init(struct otx2_eth_dev *dev); +void otx2_nix_mc_filter_fini(struct otx2_eth_dev *dev); +int otx2_nix_mc_addr_list_install(struct rte_eth_dev *eth_dev); +int otx2_nix_mc_addr_list_uninstall(struct rte_eth_dev *eth_dev); +int otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +/* MTU */ +int otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); +int otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev); + +/* Link */ +void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set); +int otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete); +void otx2_eth_dev_link_status_update(struct otx2_dev *dev, + struct cgx_link_user_info *link); +int otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev); +int otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev); +int otx2_apply_link_speed(struct rte_eth_dev *eth_dev); + +/* IRQ */ +int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev); +int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev); +int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev); +void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev); +void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev); +void oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev); +void otx2_nix_err_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb); +void otx2_nix_ras_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb); + +int otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id); +int otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id); + +/* Debug */ +int otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data); +int otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev, + struct rte_dev_reg_info *regs); +int otx2_nix_queues_ctx_dump(struct rte_eth_dev *eth_dev); +void otx2_nix_cqe_dump(const struct nix_cqe_hdr_s *cq); +void otx2_nix_tm_dump(struct otx2_eth_dev *dev); + +/* Stats */ +int otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats); +int otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev); + +int otx2_nix_queue_stats_mapping(struct rte_eth_dev *dev, + uint16_t queue_id, uint8_t stat_idx, + uint8_t is_rx); +int otx2_nix_xstats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, unsigned int n); +int otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit); +int otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev); + +int otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, + const uint64_t *ids, + uint64_t *values, unsigned int n); +int otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit); + +/* RSS */ +void otx2_nix_rss_set_key(struct otx2_eth_dev *dev, + uint8_t *key, uint32_t key_len); +uint32_t otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, + uint64_t ethdev_rss, uint8_t rss_level); +int otx2_rss_set_hf(struct otx2_eth_dev *dev, + uint32_t flowkey_cfg, uint8_t *alg_idx, + uint8_t group, int mcam_index); +int otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev, uint8_t group, + uint16_t *ind_tbl); +int otx2_nix_rss_config(struct rte_eth_dev *eth_dev); + +int otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); + +int otx2_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); + +/* CGX */ +int otx2_cgx_rxtx_start(struct otx2_eth_dev *dev); +int otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev); +int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *addr); + +/* Flow Control */ +int otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev); + +int otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); + +int otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf); + +int otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb); + +int otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev); + +/* VLAN */ +int otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev); +int otx2_nix_vlan_fini(struct rte_eth_dev *eth_dev); +int otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask); +void otx2_nix_vlan_update_promisc(struct rte_eth_dev *eth_dev, int enable); +int otx2_nix_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on); +void otx2_nix_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue, int on); +int otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev, + enum rte_vlan_type type, uint16_t tpid); +int otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); + +/* Lookup configuration */ +void *otx2_nix_fastpath_lookup_mem_get(void); + +/* PTYPES */ +const uint32_t *otx2_nix_supported_ptypes_get(struct rte_eth_dev *dev); +int otx2_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask); + +/* Mac address handling */ +int otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *addr); +int otx2_nix_mac_addr_get(struct rte_eth_dev *eth_dev, uint8_t *addr); +int otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *addr, + uint32_t index, uint32_t pool); +void otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index); +int otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev); + +/* Devargs */ +int otx2_ethdev_parse_devargs(struct rte_devargs *devargs, + struct otx2_eth_dev *dev); + +/* Rx and Tx routines */ +void otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev); +void otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev); +void otx2_nix_form_default_desc(struct otx2_eth_txq *txq); + +/* Timesync - PTP routines */ +int otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev); +int otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev); +int otx2_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev, + struct timespec *timestamp, + uint32_t flags); +int otx2_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev, + struct timespec *timestamp); +int otx2_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta); +int otx2_nix_timesync_write_time(struct rte_eth_dev *eth_dev, + const struct timespec *ts); +int otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev, + struct timespec *ts); +int otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en); +int otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *time); +int otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev); +void otx2_nix_ptp_enable_vf(struct rte_eth_dev *eth_dev); + +#endif /* __OTX2_ETHDEV_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c new file mode 100644 index 000000000..6d951bc7e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c @@ -0,0 +1,811 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" + +#define nix_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__) +#define NIX_REG_INFO(reg) {reg, #reg} +#define NIX_REG_NAME_SZ 48 + +struct nix_lf_reg_info { + uint32_t offset; + const char *name; +}; + +static const struct +nix_lf_reg_info nix_lf_reg[] = { + NIX_REG_INFO(NIX_LF_RX_SECRETX(0)), + NIX_REG_INFO(NIX_LF_RX_SECRETX(1)), + NIX_REG_INFO(NIX_LF_RX_SECRETX(2)), + NIX_REG_INFO(NIX_LF_RX_SECRETX(3)), + NIX_REG_INFO(NIX_LF_RX_SECRETX(4)), + NIX_REG_INFO(NIX_LF_RX_SECRETX(5)), + NIX_REG_INFO(NIX_LF_CFG), + NIX_REG_INFO(NIX_LF_GINT), + NIX_REG_INFO(NIX_LF_GINT_W1S), + NIX_REG_INFO(NIX_LF_GINT_ENA_W1C), + NIX_REG_INFO(NIX_LF_GINT_ENA_W1S), + NIX_REG_INFO(NIX_LF_ERR_INT), + NIX_REG_INFO(NIX_LF_ERR_INT_W1S), + NIX_REG_INFO(NIX_LF_ERR_INT_ENA_W1C), + NIX_REG_INFO(NIX_LF_ERR_INT_ENA_W1S), + NIX_REG_INFO(NIX_LF_RAS), + NIX_REG_INFO(NIX_LF_RAS_W1S), + NIX_REG_INFO(NIX_LF_RAS_ENA_W1C), + NIX_REG_INFO(NIX_LF_RAS_ENA_W1S), + NIX_REG_INFO(NIX_LF_SQ_OP_ERR_DBG), + NIX_REG_INFO(NIX_LF_MNQ_ERR_DBG), + NIX_REG_INFO(NIX_LF_SEND_ERR_DBG), +}; + +static int +nix_lf_get_reg_count(struct otx2_eth_dev *dev) +{ + int reg_count = 0; + + reg_count = RTE_DIM(nix_lf_reg); + /* NIX_LF_TX_STATX */ + reg_count += dev->lf_tx_stats; + /* NIX_LF_RX_STATX */ + reg_count += dev->lf_rx_stats; + /* NIX_LF_QINTX_CNT*/ + reg_count += dev->qints; + /* NIX_LF_QINTX_INT */ + reg_count += dev->qints; + /* NIX_LF_QINTX_ENA_W1S */ + reg_count += dev->qints; + /* NIX_LF_QINTX_ENA_W1C */ + reg_count += dev->qints; + /* NIX_LF_CINTX_CNT */ + reg_count += dev->cints; + /* NIX_LF_CINTX_WAIT */ + reg_count += dev->cints; + /* NIX_LF_CINTX_INT */ + reg_count += dev->cints; + /* NIX_LF_CINTX_INT_W1S */ + reg_count += dev->cints; + /* NIX_LF_CINTX_ENA_W1S */ + reg_count += dev->cints; + /* NIX_LF_CINTX_ENA_W1C */ + reg_count += dev->cints; + + return reg_count; +} + +int +otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data) +{ + uintptr_t nix_lf_base = dev->base; + bool dump_stdout; + uint64_t reg; + uint32_t i; + + dump_stdout = data ? 0 : 1; + + for (i = 0; i < RTE_DIM(nix_lf_reg); i++) { + reg = otx2_read64(nix_lf_base + nix_lf_reg[i].offset); + if (dump_stdout && reg) + nix_dump("%32s = 0x%" PRIx64, + nix_lf_reg[i].name, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_TX_STATX */ + for (i = 0; i < dev->lf_tx_stats; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_TX_STATX(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_TX_STATX", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_RX_STATX */ + for (i = 0; i < dev->lf_rx_stats; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_RX_STATX(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_RX_STATX", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_QINTX_CNT*/ + for (i = 0; i < dev->qints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_CNT(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_QINTX_CNT", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_QINTX_INT */ + for (i = 0; i < dev->qints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_INT(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_QINTX_INT", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_QINTX_ENA_W1S */ + for (i = 0; i < dev->qints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1S(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_QINTX_ENA_W1S", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_QINTX_ENA_W1C */ + for (i = 0; i < dev->qints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1C(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_QINTX_ENA_W1C", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_CNT */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_CNT(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_CNT", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_WAIT */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_WAIT(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_WAIT", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_INT */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_INT(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_INT", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_INT_W1S */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_INT_W1S(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_INT_W1S", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_ENA_W1S */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1S(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_ENA_W1S", i, reg); + if (data) + *data++ = reg; + } + + /* NIX_LF_CINTX_ENA_W1C */ + for (i = 0; i < dev->cints; i++) { + reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1C(i)); + if (dump_stdout && reg) + nix_dump("%32s_%d = 0x%" PRIx64, + "NIX_LF_CINTX_ENA_W1C", i, reg); + if (data) + *data++ = reg; + } + return 0; +} + +int +otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t *data = regs->data; + + if (data == NULL) { + regs->length = nix_lf_get_reg_count(dev); + regs->width = 8; + return 0; + } + + if (!regs->length || + regs->length == (uint32_t)nix_lf_get_reg_count(dev)) { + otx2_nix_reg_dump(dev, data); + return 0; + } + + return -ENOTSUP; +} + +static inline void +nix_lf_sq_dump(__otx2_io struct nix_sq_ctx_s *ctx) +{ + nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d", + ctx->sqe_way_mask, ctx->cq); + nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x", + ctx->sdp_mcast, ctx->substream); + nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", + ctx->qint_idx, ctx->ena); + + nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d", + ctx->sqb_count, ctx->default_chan); + nix_dump("W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d", + ctx->smq_rr_quantum, ctx->sso_ena); + nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n", + ctx->xoff, ctx->cq_ena, ctx->smq); + + nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d", + ctx->sqe_stype, ctx->sq_int_ena); + nix_dump("W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d", + ctx->sq_int, ctx->sqb_aura); + nix_dump("W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count); + + nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d", + ctx->smq_next_sq_vld, ctx->smq_pend); + nix_dump("W3: smenq_next_sqb_vld \t%d\nW3: head_offset\t\t\t%d", + ctx->smenq_next_sqb_vld, ctx->head_offset); + nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d", + ctx->smenq_offset, ctx->tail_offset); + nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d", + ctx->smq_lso_segnum, ctx->smq_next_sq); + nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", + ctx->mnq_dis, ctx->lmt_dis); + nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n", + ctx->cq_limit, ctx->max_sqe_size); + + nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb); + nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb); + nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb); + nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb); + nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb); + + nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", + ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena); + nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d", + ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps); + nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", + ctx->vfi_lso_sb, ctx->vfi_lso_sizem1); + nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total); + + nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", + (uint64_t)ctx->scm_lso_rem); + nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs); + nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts); + nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", + (uint64_t)ctx->drop_octs); + nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", + (uint64_t)ctx->drop_pkts); +} + +static inline void +nix_lf_rq_dump(__otx2_io struct nix_rq_ctx_s *ctx) +{ + nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x", + ctx->wqe_aura, ctx->substream); + nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", + ctx->cq, ctx->ena_wqwd); + nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d", + ctx->ipsech_ena, ctx->sso_ena); + nix_dump("W0: ena \t\t\t%d\n", ctx->ena); + + nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d", + ctx->lpb_drop_ena, ctx->spb_drop_ena); + nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d", + ctx->xqe_drop_ena, ctx->wqe_caching); + nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d", + ctx->pb_caching, ctx->sso_tt); + nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", + ctx->sso_grp, ctx->lpb_aura); + nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura); + + nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d", + ctx->xqe_hdr_split, ctx->xqe_imm_copy); + nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d", + ctx->xqe_imm_size, ctx->later_skip); + nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d", + ctx->first_skip, ctx->lpb_sizem1); + nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", + ctx->spb_ena, ctx->wqe_skip); + nix_dump("W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1); + + nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d", + ctx->spb_pool_pass, ctx->spb_pool_drop); + nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d", + ctx->spb_aura_pass, ctx->spb_aura_drop); + nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d", + ctx->wqe_pool_pass, ctx->wqe_pool_drop); + nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n", + ctx->xqe_pass, ctx->xqe_drop); + + nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d", + ctx->qint_idx, ctx->rq_int_ena); + nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", + ctx->rq_int, ctx->lpb_pool_pass); + nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d", + ctx->lpb_pool_drop, ctx->lpb_aura_pass); + nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop); + + nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d", + ctx->flow_tagw, ctx->bad_utag); + nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", + ctx->good_utag, ctx->ltag); + + nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs); + nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts); + nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs); + nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts); + nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts); +} + +static inline void +nix_lf_cq_dump(__otx2_io struct nix_cq_ctx_s *ctx) +{ + nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base); + + nix_dump("W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr); + nix_dump("W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d", + ctx->avg_con, ctx->cint_idx); + nix_dump("W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d", + ctx->cq_err, ctx->qint_idx); + nix_dump("W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n", + ctx->bpid, ctx->bp_ena); + + nix_dump("W2: update_time \t\t%d\nW2: avg_level \t\t\t%d", + ctx->update_time, ctx->avg_level); + nix_dump("W2: head \t\t\t%d\nW2: tail \t\t\t%d\n", + ctx->head, ctx->tail); + + nix_dump("W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d", + ctx->cq_err_int_ena, ctx->cq_err_int); + nix_dump("W3: qsize \t\t\t%d\nW3: caching \t\t\t%d", + ctx->qsize, ctx->caching); + nix_dump("W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d", + ctx->substream, ctx->ena); + nix_dump("W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d", + ctx->drop_ena, ctx->drop); + nix_dump("W3: bp \t\t\t\t%d\n", ctx->bp); +} + +int +otx2_nix_queues_ctx_dump(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, q, rq = eth_dev->data->nb_rx_queues; + int sq = eth_dev->data->nb_tx_queues; + struct otx2_mbox *mbox = dev->mbox; + struct npa_aq_enq_rsp *npa_rsp; + struct npa_aq_enq_req *npa_aq; + struct otx2_npa_lf *npa_lf; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + + npa_lf = otx2_npa_lf_obj_get(); + + for (q = 0; q < rq; q++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = q; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to get cq context"); + goto fail; + } + nix_dump("============== port=%d cq=%d ===============", + eth_dev->data->port_id, q); + nix_lf_cq_dump(&rsp->cq); + } + + for (q = 0; q < rq; q++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = q; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void **)&rsp); + if (rc) { + otx2_err("Failed to get rq context"); + goto fail; + } + nix_dump("============== port=%d rq=%d ===============", + eth_dev->data->port_id, q); + nix_lf_rq_dump(&rsp->rq); + } + for (q = 0; q < sq; q++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = q; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to get sq context"); + goto fail; + } + nix_dump("============== port=%d sq=%d ===============", + eth_dev->data->port_id, q); + nix_lf_sq_dump(&rsp->sq); + + if (!npa_lf) { + otx2_err("NPA LF doesn't exist"); + continue; + } + + /* Dump SQB Aura minimal info */ + npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox); + npa_aq->aura_id = rsp->sq.sqb_aura; + npa_aq->ctype = NPA_AQ_CTYPE_AURA; + npa_aq->op = NPA_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(npa_lf->mbox, (void *)&npa_rsp); + if (rc) { + otx2_err("Failed to get sq's sqb_aura context"); + continue; + } + + nix_dump("\nSQB Aura W0: Pool addr\t\t0x%"PRIx64"", + npa_rsp->aura.pool_addr); + nix_dump("SQB Aura W1: ena\t\t\t%d", + npa_rsp->aura.ena); + nix_dump("SQB Aura W2: count\t\t%"PRIx64"", + (uint64_t)npa_rsp->aura.count); + nix_dump("SQB Aura W3: limit\t\t%"PRIx64"", + (uint64_t)npa_rsp->aura.limit); + nix_dump("SQB Aura W3: fc_ena\t\t%d", + npa_rsp->aura.fc_ena); + nix_dump("SQB Aura W4: fc_addr\t\t0x%"PRIx64"\n", + npa_rsp->aura.fc_addr); + } + +fail: + return rc; +} + +/* Dumps struct nix_cqe_hdr_s and struct nix_rx_parse_s */ +void +otx2_nix_cqe_dump(const struct nix_cqe_hdr_s *cq) +{ + const struct nix_rx_parse_s *rx = + (const struct nix_rx_parse_s *)((const uint64_t *)cq + 1); + + nix_dump("tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d", + cq->tag, cq->q, cq->node, cq->cqe_type); + + nix_dump("W0: chan \t%d\t\tdesc_sizem1 \t%d", + rx->chan, rx->desc_sizem1); + nix_dump("W0: imm_copy \t%d\t\texpress \t%d", + rx->imm_copy, rx->express); + nix_dump("W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d", + rx->wqwd, rx->errlev, rx->errcode); + nix_dump("W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d", + rx->latype, rx->lbtype, rx->lctype); + nix_dump("W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d", + rx->ldtype, rx->letype, rx->lftype); + nix_dump("W0: lgtype \t%d \t\tlhtype \t\t%d", + rx->lgtype, rx->lhtype); + + nix_dump("W1: pkt_lenm1 \t%d", rx->pkt_lenm1); + nix_dump("W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d", + rx->l2m, rx->l2b, rx->l3m, rx->l3b); + nix_dump("W1: vtag0_valid %d\t\tvtag0_gone \t%d", + rx->vtag0_valid, rx->vtag0_gone); + nix_dump("W1: vtag1_valid %d\t\tvtag1_gone \t%d", + rx->vtag1_valid, rx->vtag1_gone); + nix_dump("W1: pkind \t%d", rx->pkind); + nix_dump("W1: vtag0_tci \t%d\t\tvtag1_tci \t%d", + rx->vtag0_tci, rx->vtag1_tci); + + nix_dump("W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d", + rx->laflags, rx->lbflags, rx->lcflags); + nix_dump("W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d", + rx->ldflags, rx->leflags, rx->lfflags); + nix_dump("W2: lgflags \t%d\t\tlhflags \t%d", + rx->lgflags, rx->lhflags); + + nix_dump("W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d", + rx->eoh_ptr, rx->wqe_aura, rx->pb_aura); + nix_dump("W3: match_id \t%d", rx->match_id); + + nix_dump("W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d", + rx->laptr, rx->lbptr, rx->lcptr); + nix_dump("W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d", + rx->ldptr, rx->leptr, rx->lfptr); + nix_dump("W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr); + + nix_dump("W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d", + rx->vtag0_ptr, rx->vtag1_ptr, rx->flow_key_alg); +} + +static uint8_t +prepare_nix_tm_reg_dump(uint16_t hw_lvl, uint16_t schq, uint16_t link, + uint64_t *reg, char regstr[][NIX_REG_NAME_SZ]) +{ + uint8_t k = 0; + + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + reg[k] = NIX_AF_SMQX_CFG(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_SMQ[%u]_CFG", schq); + + reg[k] = NIX_AF_MDQX_PARENT(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_PARENT", schq); + + reg[k] = NIX_AF_MDQX_SCHEDULE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_SCHEDULE", schq); + + reg[k] = NIX_AF_MDQX_PIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_PIR", schq); + + reg[k] = NIX_AF_MDQX_CIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_CIR", schq); + + reg[k] = NIX_AF_MDQX_SHAPE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_SHAPE", schq); + + reg[k] = NIX_AF_MDQX_SW_XOFF(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_MDQ[%u]_SW_XOFF", schq); + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_PARENT(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_PARENT", schq); + + reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_TOPOLOGY", schq); + + reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_SDP_LINK_CFG", schq); + + reg[k] = NIX_AF_TL4X_SCHEDULE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_SCHEDULE", schq); + + reg[k] = NIX_AF_TL4X_PIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_PIR", schq); + + reg[k] = NIX_AF_TL4X_CIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_CIR", schq); + + reg[k] = NIX_AF_TL4X_SHAPE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_SHAPE", schq); + + reg[k] = NIX_AF_TL4X_SW_XOFF(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL4[%u]_SW_XOFF", schq); + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_PARENT(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_PARENT", schq); + + reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_TOPOLOGY", schq); + + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3_TL2[%u]_LINK[%u]_CFG", schq, link); + + reg[k] = NIX_AF_TL3X_SCHEDULE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_SCHEDULE", schq); + + reg[k] = NIX_AF_TL3X_PIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_PIR", schq); + + reg[k] = NIX_AF_TL3X_CIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_CIR", schq); + + reg[k] = NIX_AF_TL3X_SHAPE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_SHAPE", schq); + + reg[k] = NIX_AF_TL3X_SW_XOFF(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3[%u]_SW_XOFF", schq); + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_PARENT(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_PARENT", schq); + + reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_TOPOLOGY", schq); + + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL3_TL2[%u]_LINK[%u]_CFG", schq, link); + + reg[k] = NIX_AF_TL2X_SCHEDULE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_SCHEDULE", schq); + + reg[k] = NIX_AF_TL2X_PIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_PIR", schq); + + reg[k] = NIX_AF_TL2X_CIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_CIR", schq); + + reg[k] = NIX_AF_TL2X_SHAPE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_SHAPE", schq); + + reg[k] = NIX_AF_TL2X_SW_XOFF(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL2[%u]_SW_XOFF", schq); + break; + case NIX_TXSCH_LVL_TL1: + + reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL1[%u]_TOPOLOGY", schq); + + reg[k] = NIX_AF_TL1X_SCHEDULE(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL1[%u]_SCHEDULE", schq); + + reg[k] = NIX_AF_TL1X_CIR(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL1[%u]_CIR", schq); + + reg[k] = NIX_AF_TL1X_SW_XOFF(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL1[%u]_SW_XOFF", schq); + + reg[k] = NIX_AF_TL1X_DROPPED_PACKETS(schq); + snprintf(regstr[k++], NIX_REG_NAME_SZ, + "NIX_AF_TL1[%u]_DROPPED_PACKETS", schq); + break; + default: + break; + } + + if (k > MAX_REGS_PER_MBOX_MSG) { + nix_dump("\t!!!NIX TM Registers request overflow!!!"); + return 0; + } + return k; +} + +/* Dump TM hierarchy and registers */ +void +otx2_nix_tm_dump(struct otx2_eth_dev *dev) +{ + char regstr[MAX_REGS_PER_MBOX_MSG * 2][NIX_REG_NAME_SZ]; + struct otx2_nix_tm_node *tm_node, *root_node, *parent; + uint64_t reg[MAX_REGS_PER_MBOX_MSG * 2]; + struct nix_txschq_config *req; + const char *lvlstr, *parent_lvlstr; + struct nix_txschq_config *rsp; + uint32_t schq, parent_schq; + int hw_lvl, j, k, rc; + + nix_dump("===TM hierarchy and registers dump of %s===", + dev->eth_dev->data->name); + + root_node = NULL; + + for (hw_lvl = 0; hw_lvl <= NIX_TXSCH_LVL_CNT; hw_lvl++) { + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != hw_lvl) + continue; + + parent = tm_node->parent; + if (hw_lvl == NIX_TXSCH_LVL_CNT) { + lvlstr = "SQ"; + schq = tm_node->id; + } else { + lvlstr = nix_hwlvl2str(tm_node->hw_lvl); + schq = tm_node->hw_id; + } + + if (parent) { + parent_schq = parent->hw_id; + parent_lvlstr = + nix_hwlvl2str(parent->hw_lvl); + } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) { + parent_schq = otx2_nix_get_link(dev); + parent_lvlstr = "LINK"; + } else { + parent_schq = tm_node->parent_hw_id; + parent_lvlstr = + nix_hwlvl2str(tm_node->hw_lvl + 1); + } + + nix_dump("%s_%d->%s_%d", lvlstr, schq, + parent_lvlstr, parent_schq); + + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + /* Need to dump TL1 when root is TL2 */ + if (tm_node->hw_lvl == dev->otx2_tm_root_lvl) + root_node = tm_node; + + /* Dump registers only when HWRES is present */ + k = prepare_nix_tm_reg_dump(tm_node->hw_lvl, schq, + otx2_nix_get_link(dev), reg, + regstr); + if (!k) + continue; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->read = 1; + req->lvl = tm_node->hw_lvl; + req->num_regs = k; + otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k); + rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp); + if (!rc) { + for (j = 0; j < k; j++) + nix_dump("\t%s=0x%016"PRIx64, + regstr[j], rsp->regval[j]); + } else { + nix_dump("\t!!!Failed to dump registers!!!"); + } + } + nix_dump("\n"); + } + + /* Dump TL1 node data when root level is TL2 */ + if (root_node && root_node->hw_lvl == NIX_TXSCH_LVL_TL2) { + k = prepare_nix_tm_reg_dump(NIX_TXSCH_LVL_TL1, + root_node->parent_hw_id, + otx2_nix_get_link(dev), + reg, regstr); + if (!k) + return; + + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->read = 1; + req->lvl = NIX_TXSCH_LVL_TL1; + req->num_regs = k; + otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k); + rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp); + if (!rc) { + for (j = 0; j < k; j++) + nix_dump("\t%s=0x%016"PRIx64, + regstr[j], rsp->regval[j]); + } else { + nix_dump("\t!!!Failed to dump registers!!!"); + } + } + + otx2_nix_queues_ctx_dump(dev->eth_dev); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c new file mode 100644 index 000000000..e8ddaa69f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include + +#include "otx2_ethdev.h" + +static int +parse_flow_max_priority(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint16_t val; + + val = atoi(value); + + /* Limit the max priority to 32 */ + if (val < 1 || val > 32) + return -EINVAL; + + *(uint16_t *)extra_args = val; + + return 0; +} + +static int +parse_flow_prealloc_size(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint16_t val; + + val = atoi(value); + + /* Limit the prealloc size to 32 */ + if (val < 1 || val > 32) + return -EINVAL; + + *(uint16_t *)extra_args = val; + + return 0; +} + +static int +parse_reta_size(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint32_t val; + + val = atoi(value); + + if (val <= ETH_RSS_RETA_SIZE_64) + val = ETH_RSS_RETA_SIZE_64; + else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128) + val = ETH_RSS_RETA_SIZE_128; + else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256) + val = ETH_RSS_RETA_SIZE_256; + else + val = NIX_RSS_RETA_SIZE; + + *(uint16_t *)extra_args = val; + + return 0; +} + +static int +parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint32_t val; + + val = atoi(value); + + *(uint16_t *)extra_args = val; + + return 0; +} + +static int +parse_flag(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + + *(uint16_t *)extra_args = atoi(value); + + return 0; +} + +static int +parse_sqb_count(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + uint32_t val; + + val = atoi(value); + + if (val < NIX_MIN_SQB || val > NIX_MAX_SQB) + return -EINVAL; + + *(uint16_t *)extra_args = val; + + return 0; +} + +static int +parse_switch_header_type(const char *key, const char *value, void *extra_args) +{ + RTE_SET_USED(key); + + if (strcmp(value, "higig2") == 0) + *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_HIGIG; + + if (strcmp(value, "dsa") == 0) + *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_EDSA; + + if (strcmp(value, "chlen90b") == 0) + *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_LEN_90B; + return 0; +} + +#define OTX2_RSS_RETA_SIZE "reta_size" +#define OTX2_IPSEC_IN_MAX_SPI "ipsec_in_max_spi" +#define OTX2_SCL_ENABLE "scalar_enable" +#define OTX2_MAX_SQB_COUNT "max_sqb_count" +#define OTX2_FLOW_PREALLOC_SIZE "flow_prealloc_size" +#define OTX2_FLOW_MAX_PRIORITY "flow_max_priority" +#define OTX2_SWITCH_HEADER_TYPE "switch_header" +#define OTX2_RSS_TAG_AS_XOR "tag_as_xor" + +int +otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev) +{ + uint16_t rss_size = NIX_RSS_RETA_SIZE; + uint16_t sqb_count = NIX_MAX_SQB; + uint16_t flow_prealloc_size = 8; + uint16_t switch_header_type = 0; + uint16_t flow_max_priority = 3; + uint16_t ipsec_in_max_spi = 1; + uint16_t scalar_enable = 0; + uint16_t rss_tag_as_xor = 0; + struct rte_kvargs *kvlist; + + if (devargs == NULL) + goto null_devargs; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + goto exit; + + rte_kvargs_process(kvlist, OTX2_RSS_RETA_SIZE, + &parse_reta_size, &rss_size); + rte_kvargs_process(kvlist, OTX2_IPSEC_IN_MAX_SPI, + &parse_ipsec_in_max_spi, &ipsec_in_max_spi); + rte_kvargs_process(kvlist, OTX2_SCL_ENABLE, + &parse_flag, &scalar_enable); + rte_kvargs_process(kvlist, OTX2_MAX_SQB_COUNT, + &parse_sqb_count, &sqb_count); + rte_kvargs_process(kvlist, OTX2_FLOW_PREALLOC_SIZE, + &parse_flow_prealloc_size, &flow_prealloc_size); + rte_kvargs_process(kvlist, OTX2_FLOW_MAX_PRIORITY, + &parse_flow_max_priority, &flow_max_priority); + rte_kvargs_process(kvlist, OTX2_SWITCH_HEADER_TYPE, + &parse_switch_header_type, &switch_header_type); + rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR, + &parse_flag, &rss_tag_as_xor); + otx2_parse_common_devargs(kvlist); + rte_kvargs_free(kvlist); + +null_devargs: + dev->ipsec_in_max_spi = ipsec_in_max_spi; + dev->scalar_ena = scalar_enable; + dev->rss_tag_as_xor = rss_tag_as_xor; + dev->max_sqb_count = sqb_count; + dev->rss_info.rss_size = rss_size; + dev->npc_flow.flow_prealloc_size = flow_prealloc_size; + dev->npc_flow.flow_max_priority = flow_max_priority; + dev->npc_flow.switch_header_type = switch_header_type; + return 0; + +exit: + return -EINVAL; +} + +RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2, + OTX2_RSS_RETA_SIZE "=<64|128|256>" + OTX2_IPSEC_IN_MAX_SPI "=<1-65535>" + OTX2_SCL_ENABLE "=1" + OTX2_MAX_SQB_COUNT "=<8-512>" + OTX2_FLOW_PREALLOC_SIZE "=<1-32>" + OTX2_FLOW_MAX_PRIORITY "=<1-32>" + OTX2_SWITCH_HEADER_TYPE "=" + OTX2_RSS_TAG_AS_XOR "=1" + OTX2_NPA_LOCK_MASK "=<1-65535>"); diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c new file mode 100644 index 000000000..b121488fa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c @@ -0,0 +1,494 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include +#include + +#include "otx2_ethdev.h" + +static void +nix_lf_err_irq(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t intr; + + intr = otx2_read64(dev->base + NIX_LF_ERR_INT); + if (intr == 0) + return; + + otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf); + + /* Clear interrupt */ + otx2_write64(intr, dev->base + NIX_LF_ERR_INT); + + /* Dump registers to std out */ + otx2_nix_reg_dump(dev, NULL); + otx2_nix_queues_ctx_dump(eth_dev); +} + +static int +nix_lf_register_err_irq(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, vec; + + vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT; + + /* Clear err interrupt */ + otx2_nix_err_intr_enb_dis(eth_dev, false); + /* Set used interrupt vectors */ + rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec); + /* Enable all dev interrupt except for RQ_DISABLED */ + otx2_nix_err_intr_enb_dis(eth_dev, true); + + return rc; +} + +static void +nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int vec; + + vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT; + + /* Clear err interrupt */ + otx2_nix_err_intr_enb_dis(eth_dev, false); + otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec); +} + +static void +nix_lf_ras_irq(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t intr; + + intr = otx2_read64(dev->base + NIX_LF_RAS); + if (intr == 0) + return; + + otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf); + + /* Clear interrupt */ + otx2_write64(intr, dev->base + NIX_LF_RAS); + + /* Dump registers to std out */ + otx2_nix_reg_dump(dev, NULL); + otx2_nix_queues_ctx_dump(eth_dev); +} + +static int +nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, vec; + + vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON; + + /* Clear err interrupt */ + otx2_nix_ras_intr_enb_dis(eth_dev, false); + /* Set used interrupt vectors */ + rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec); + /* Enable dev interrupt */ + otx2_nix_ras_intr_enb_dis(eth_dev, true); + + return rc; +} + +static void +nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int vec; + + vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON; + + /* Clear err interrupt */ + otx2_nix_ras_intr_enb_dis(eth_dev, false); + otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec); +} + +static inline uint8_t +nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q, + uint32_t off, uint64_t mask) +{ + uint64_t reg, wdata; + uint8_t qint; + + wdata = (uint64_t)q << 44; + reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off)); + + if (reg & BIT_ULL(42) /* OP_ERR */) { + otx2_err("Failed execute irq get off=0x%x", off); + return 0; + } + + qint = reg & 0xff; + wdata &= mask; + otx2_write64(wdata | qint, dev->base + off); + + return qint; +} + +static inline uint8_t +nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq) +{ + return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00); +} + +static inline uint8_t +nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq) +{ + return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00); +} + +static inline uint8_t +nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq) +{ + return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00); +} + +static inline void +nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off) +{ + uint64_t reg; + + reg = otx2_read64(dev->base + off); + if (reg & BIT_ULL(44)) + otx2_err("SQ=%d err_code=0x%x", + (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff)); +} + +static void +nix_lf_cq_irq(void *param) +{ + struct otx2_qint *cint = (struct otx2_qint *)param; + struct rte_eth_dev *eth_dev = cint->eth_dev; + struct otx2_eth_dev *dev; + + dev = otx2_eth_pmd_priv(eth_dev); + /* Clear interrupt */ + otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_INT(cint->qintx)); +} + +static void +nix_lf_q_irq(void *param) +{ + struct otx2_qint *qint = (struct otx2_qint *)param; + struct rte_eth_dev *eth_dev = qint->eth_dev; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint8_t irq, qintx = qint->qintx; + int q, cq, rq, sq; + uint64_t intr; + + intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx)); + if (intr == 0) + return; + + otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d", + intr, qintx, dev->pf, dev->vf); + + /* Handle RQ interrupts */ + for (q = 0; q < eth_dev->data->nb_rx_queues; q++) { + rq = q % dev->qints; + irq = nix_lf_rq_irq_get_and_clear(dev, rq); + + if (irq & BIT_ULL(NIX_RQINT_DROP)) + otx2_err("RQ=%d NIX_RQINT_DROP", rq); + + if (irq & BIT_ULL(NIX_RQINT_RED)) + otx2_err("RQ=%d NIX_RQINT_RED", rq); + } + + /* Handle CQ interrupts */ + for (q = 0; q < eth_dev->data->nb_rx_queues; q++) { + cq = q % dev->qints; + irq = nix_lf_cq_irq_get_and_clear(dev, cq); + + if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) + otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq); + + if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL)) + otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq); + + if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) + otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq); + } + + /* Handle SQ interrupts */ + for (q = 0; q < eth_dev->data->nb_tx_queues; q++) { + sq = q % dev->qints; + irq = nix_lf_sq_irq_get_and_clear(dev, sq); + + if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) { + otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq); + nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG); + } + if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) { + otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq); + nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG); + } + if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) { + otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq); + nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG); + } + if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) { + otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq); + nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG); + } + } + + /* Clear interrupt */ + otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx)); + + /* Dump registers to std out */ + otx2_nix_reg_dump(dev, NULL); + otx2_nix_queues_ctx_dump(eth_dev); +} + +int +oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int vec, q, sqs, rqs, qs, rc = 0; + + /* Figure out max qintx required */ + rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues); + sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues); + qs = RTE_MAX(rqs, sqs); + + dev->configured_qints = qs; + + for (q = 0; q < qs; q++) { + vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q; + + /* Clear QINT CNT */ + otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q)); + + /* Clear interrupt */ + otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q)); + + dev->qints_mem[q].eth_dev = eth_dev; + dev->qints_mem[q].qintx = q; + + /* Sync qints_mem update */ + rte_smp_wmb(); + + /* Register queue irq vector */ + rc = otx2_register_irq(handle, nix_lf_q_irq, + &dev->qints_mem[q], vec); + if (rc) + break; + + otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q)); + otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q)); + /* Enable QINT interrupt */ + otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q)); + } + + return rc; +} + +void +oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int vec, q; + + for (q = 0; q < dev->configured_qints; q++) { + vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q; + + /* Clear QINT CNT */ + otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q)); + otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q)); + + /* Clear interrupt */ + otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q)); + + /* Unregister queue irq vector */ + otx2_unregister_irq(handle, nix_lf_q_irq, + &dev->qints_mem[q], vec); + } +} + +int +oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint8_t rc = 0, vec, q; + + dev->configured_cints = RTE_MIN(dev->cints, + eth_dev->data->nb_rx_queues); + + for (q = 0; q < dev->configured_cints; q++) { + vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q; + + /* Clear CINT CNT */ + otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q)); + + /* Clear interrupt */ + otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q)); + + dev->cints_mem[q].eth_dev = eth_dev; + dev->cints_mem[q].qintx = q; + + /* Sync cints_mem update */ + rte_smp_wmb(); + + /* Register queue irq vector */ + rc = otx2_register_irq(handle, nix_lf_cq_irq, + &dev->cints_mem[q], vec); + if (rc) { + otx2_err("Fail to register CQ irq, rc=%d", rc); + return rc; + } + + if (!handle->intr_vec) { + handle->intr_vec = rte_zmalloc("intr_vec", + dev->configured_cints * + sizeof(int), 0); + if (!handle->intr_vec) { + otx2_err("Failed to allocate %d rx intr_vec", + dev->configured_cints); + return -ENOMEM; + } + } + /* VFIO vector zero is resereved for misc interrupt so + * doing required adjustment. (b13bfab4cd) + */ + handle->intr_vec[q] = RTE_INTR_VEC_RXTX_OFFSET + vec; + + /* Configure CQE interrupt coalescing parameters */ + otx2_write64(((CQ_CQE_THRESH_DEFAULT) | + (CQ_CQE_THRESH_DEFAULT << 32) | + (CQ_TIMER_THRESH_DEFAULT << 48)), + dev->base + NIX_LF_CINTX_WAIT((q))); + + /* Keeping the CQ interrupt disabled as the rx interrupt + * feature needs to be enabled/disabled on demand. + */ + } + + return rc; +} + +void +oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *handle = &pci_dev->intr_handle; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int vec, q; + + for (q = 0; q < dev->configured_cints; q++) { + vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q; + + /* Clear CINT CNT */ + otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q)); + + /* Clear interrupt */ + otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q)); + + /* Unregister queue irq vector */ + otx2_unregister_irq(handle, nix_lf_cq_irq, + &dev->cints_mem[q], vec); + } +} + +int +otx2_nix_register_irqs(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc; + + if (dev->nix_msixoff == MSIX_VECTOR_INVALID) { + otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x", + dev->nix_msixoff); + return -EINVAL; + } + + /* Register lf err interrupt */ + rc = nix_lf_register_err_irq(eth_dev); + /* Register RAS interrupt */ + rc |= nix_lf_register_ras_irq(eth_dev); + + return rc; +} + +void +otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev) +{ + nix_lf_unregister_err_irq(eth_dev); + nix_lf_unregister_ras_irq(eth_dev); +} + +int +otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* Enable CINT interrupt */ + otx2_write64(BIT_ULL(0), dev->base + + NIX_LF_CINTX_ENA_W1S(rx_queue_id)); + + return 0; +} + +int +otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* Clear and disable CINT interrupt */ + otx2_write64(BIT_ULL(0), dev->base + + NIX_LF_CINTX_ENA_W1C(rx_queue_id)); + + return 0; +} + +void +otx2_nix_err_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* Enable all nix lf error interrupts except + * RQ_DISABLED and CQ_DISABLED. + */ + if (enb) + otx2_write64(~(BIT_ULL(11) | BIT_ULL(24)), + dev->base + NIX_LF_ERR_INT_ENA_W1S); + else + otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C); +} + +void +otx2_nix_ras_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (enb) + otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S); + else + otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c new file mode 100644 index 000000000..80ac2b96e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include + +#include "otx2_ethdev.h" + +int +otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct otx2_mbox *mbox = dev->mbox; + struct nix_frs_cfg *req; + int rc; + + frame_size += NIX_TIMESYNC_RX_OFFSET * otx2_ethdev_is_ptp_en(dev); + + /* Check if MTU is within the allowed range */ + if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS) + return -EINVAL; + + buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + + /* Refuse MTU that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (data->dev_started && frame_size > buffsz && + !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) + return -EINVAL; + + /* Check * >= max_frame */ + if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) && + (frame_size > buffsz * NIX_RX_NB_SEG_MAX)) + return -EINVAL; + + req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox); + req->update_smq = true; + if (otx2_dev_is_sdp(dev)) + req->sdp_link = true; + /* FRS HW config should exclude FCS but include NPC VTAG insert size */ + req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + /* Now just update Rx MAXLEN */ + req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox); + req->maxlen = frame_size - RTE_ETHER_CRC_LEN; + if (otx2_dev_is_sdp(dev)) + req->sdp_link = true; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* Update max_rx_pkt_len */ + data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return rc; +} + +int +otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_pktmbuf_pool_private *mbp_priv; + struct otx2_eth_rxq *rxq; + uint32_t buffsz; + uint16_t mtu; + int rc; + + /* Get rx buffer size */ + rxq = data->rx_queues[0]; + mbp_priv = rte_mempool_get_priv(rxq->pool); + buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + /* Setup scatter mode if needed by jumbo */ + if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) + dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + + /* Setup MTU based on max_rx_pkt_len */ + mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD; + + rc = otx2_nix_mtu_set(eth_dev, mtu); + if (rc) + otx2_err("Failed to set default MTU size %d", rc); + + return rc; +} + +static void +nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_dev_is_vf_or_sdp(dev)) + return; + + if (en) + otx2_mbox_alloc_msg_cgx_promisc_enable(mbox); + else + otx2_mbox_alloc_msg_cgx_promisc_disable(mbox); + + otx2_mbox_process(mbox); +} + +void +otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_rx_mode *req; + + if (otx2_dev_is_vf(dev)) + return; + + req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox); + + if (en) + req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC; + + otx2_mbox_process(mbox); + eth_dev->data->promiscuous = en; + otx2_nix_vlan_update_promisc(eth_dev, en); +} + +int +otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev) +{ + otx2_nix_promisc_config(eth_dev, 1); + nix_cgx_promisc_config(eth_dev, 1); + + return 0; +} + +int +otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev) +{ + otx2_nix_promisc_config(eth_dev, 0); + nix_cgx_promisc_config(eth_dev, 0); + + return 0; +} + +static void +nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_rx_mode *req; + + if (otx2_dev_is_vf(dev)) + return; + + req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox); + + if (en) + req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI; + else if (eth_dev->data->promiscuous) + req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC; + + otx2_mbox_process(mbox); +} + +int +otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + nix_allmulticast_config(eth_dev, 1); + + return 0; +} + +int +otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + nix_allmulticast_config(eth_dev, 0); + + return 0; +} + +void +otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct otx2_eth_rxq *rxq; + + rxq = eth_dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->pool; + qinfo->scattered_rx = eth_dev->data->scattered_rx; + qinfo->nb_desc = rxq->qconf.nb_desc; + + qinfo->conf.rx_free_thresh = 0; + qinfo->conf.rx_drop_en = 0; + qinfo->conf.rx_deferred_start = 0; + qinfo->conf.offloads = rxq->offloads; +} + +void +otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct otx2_eth_txq *txq; + + txq = eth_dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->qconf.nb_desc; + + qinfo->conf.tx_thresh.pthresh = 0; + qinfo->conf.tx_thresh.hthresh = 0; + qinfo->conf.tx_thresh.wthresh = 0; + + qinfo->conf.tx_free_thresh = 0; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = 0; +} + +int +otx2_rx_burst_mode_get(struct rte_eth_dev *eth_dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + const struct burst_info { + uint16_t flags; + const char *output; + } rx_offload_map[] = { + {NIX_RX_OFFLOAD_RSS_F, "RSS,"}, + {NIX_RX_OFFLOAD_PTYPE_F, " Ptype,"}, + {NIX_RX_OFFLOAD_CHECKSUM_F, " Checksum,"}, + {NIX_RX_OFFLOAD_VLAN_STRIP_F, " VLAN Strip,"}, + {NIX_RX_OFFLOAD_MARK_UPDATE_F, " Mark Update,"}, + {NIX_RX_OFFLOAD_TSTAMP_F, " Timestamp,"}, + {NIX_RX_MULTI_SEG_F, " Scattered,"} + }; + static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:", + "Scalar, Rx Offloads:" + }; + uint32_t i; + + /* Update burst mode info */ + rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena], + str_size - bytes); + if (rc < 0) + goto done; + + bytes += rc; + + /* Update Rx offload info */ + for (i = 0; i < RTE_DIM(rx_offload_map); i++) { + if (dev->rx_offload_flags & rx_offload_map[i].flags) { + rc = rte_strscpy(mode->info + bytes, + rx_offload_map[i].output, + str_size - bytes); + if (rc < 0) + goto done; + + bytes += rc; + } + } + +done: + return 0; +} + +int +otx2_tx_burst_mode_get(struct rte_eth_dev *eth_dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + const struct burst_info { + uint16_t flags; + const char *output; + } tx_offload_map[] = { + {NIX_TX_OFFLOAD_L3_L4_CSUM_F, " Inner L3/L4 csum,"}, + {NIX_TX_OFFLOAD_OL3_OL4_CSUM_F, " Outer L3/L4 csum,"}, + {NIX_TX_OFFLOAD_VLAN_QINQ_F, " VLAN Insertion,"}, + {NIX_TX_OFFLOAD_MBUF_NOFF_F, " MBUF free disable,"}, + {NIX_TX_OFFLOAD_TSTAMP_F, " Timestamp,"}, + {NIX_TX_OFFLOAD_TSO_F, " TSO,"}, + {NIX_TX_MULTI_SEG_F, " Scattered,"} + }; + static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:", + "Scalar, Tx Offloads:" + }; + uint32_t i; + + /* Update burst mode info */ + rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena], + str_size - bytes); + if (rc < 0) + goto done; + + bytes += rc; + + /* Update Tx offload info */ + for (i = 0; i < RTE_DIM(tx_offload_map); i++) { + if (dev->tx_offload_flags & tx_offload_map[i].flags) { + rc = rte_strscpy(mode->info + bytes, + tx_offload_map[i].output, + str_size - bytes); + if (rc < 0) + goto done; + + bytes += rc; + } + } + +done: + return 0; +} + +static void +nix_rx_head_tail_get(struct otx2_eth_dev *dev, + uint32_t *head, uint32_t *tail, uint16_t queue_idx) +{ + uint64_t reg, val; + + if (head == NULL || tail == NULL) + return; + + reg = (((uint64_t)queue_idx) << 32); + val = otx2_atomic64_add_nosync(reg, (int64_t *) + (dev->base + NIX_LF_CQ_OP_STATUS)); + if (val & (OP_ERR | CQ_ERR)) + val = 0; + + *tail = (uint32_t)(val & 0xFFFFF); + *head = (uint32_t)((val >> 20) & 0xFFFFF); +} + +uint32_t +otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx) +{ + struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx]; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t head, tail; + + nix_rx_head_tail_get(dev, &head, &tail, queue_idx); + return (tail - head) % rxq->qlen; +} + +static inline int +nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset) +{ + /* Check given offset(queue index) has packet filled by HW */ + if (tail > head && offset <= tail && offset >= head) + return 1; + /* Wrap around case */ + if (head > tail && (offset >= head || offset <= tail)) + return 1; + + return 0; +} + +int +otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + struct otx2_eth_rxq *rxq = rx_queue; + uint32_t head, tail; + + nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev), + &head, &tail, rxq->rq); + + return nix_offset_has_packet(head, tail, offset); +} + +int +otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct otx2_eth_rxq *rxq = rx_queue; + uint32_t head, tail; + + if (rxq->qlen <= offset) + return -EINVAL; + + nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev), + &head, &tail, rxq->rq); + + if (nix_offset_has_packet(head, tail, offset)) + return RTE_ETH_RX_DESC_DONE; + else + return RTE_ETH_RX_DESC_AVAIL; +} + +static void +nix_tx_head_tail_get(struct otx2_eth_dev *dev, + uint32_t *head, uint32_t *tail, uint16_t queue_idx) +{ + uint64_t reg, val; + + if (head == NULL || tail == NULL) + return; + + reg = (((uint64_t)queue_idx) << 32); + val = otx2_atomic64_add_nosync(reg, (int64_t *) + (dev->base + NIX_LF_SQ_OP_STATUS)); + if (val & OP_ERR) + val = 0; + + *tail = (uint32_t)((val >> 28) & 0x3F); + *head = (uint32_t)((val >> 20) & 0x3F); +} + +int +otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct otx2_eth_txq *txq = tx_queue; + uint32_t head, tail; + + if (txq->qconf.nb_desc <= offset) + return -EINVAL; + + nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq); + + if (nix_offset_has_packet(head, tail, offset)) + return RTE_ETH_TX_DESC_DONE; + else + return RTE_ETH_TX_DESC_FULL; +} + +/* It is a NOP for octeontx2 as HW frees the buffer on xmit */ +int +otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + RTE_SET_USED(txq); + RTE_SET_USED(free_cnt); + + return 0; +} + +int +otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, + size_t fw_size) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc = (int)fw_size; + + if (fw_size > sizeof(dev->mkex_pfl_name)) + rc = sizeof(dev->mkex_pfl_name); + + rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc); + + rc += 1; /* Add the size of '\0' */ + if (fw_size < (uint32_t)rc) + return rc; + + return 0; +} + +int +otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool) +{ + RTE_SET_USED(eth_dev); + + if (!strcmp(pool, rte_mbuf_platform_mempool_ops())) + return 0; + + return -ENOTSUP; +} + +int +otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + RTE_SET_USED(eth_dev); + + if (filter_type != RTE_ETH_FILTER_GENERIC) { + otx2_err("Unsupported filter type %d", filter_type); + return -ENOTSUP; + } + + if (filter_op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &otx2_flow_ops; + return 0; + } + + otx2_err("Invalid filter_op %d", filter_op); + return -EINVAL; +} + +static struct cgx_fw_data * +nix_get_fwdata(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct cgx_fw_data *rsp = NULL; + int rc; + + otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox); + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to get fw data: %d", rc); + return NULL; + } + + return rsp; +} + +int +otx2_nix_get_module_info(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct cgx_fw_data *rsp; + + rsp = nix_get_fwdata(dev); + if (rsp == NULL) + return -EIO; + + modinfo->type = rsp->fwdata.sfp_eeprom.sff_id; + modinfo->eeprom_len = SFP_EEPROM_SIZE; + + return 0; +} + +int +otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev, + struct rte_dev_eeprom_info *info) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct cgx_fw_data *rsp; + + if (!info->data || !info->length || + (info->offset + info->length > SFP_EEPROM_SIZE)) + return -EINVAL; + + rsp = nix_get_fwdata(dev); + if (rsp == NULL) + return -EIO; + + otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset, + info->length); + + return 0; +} + +int +otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + devinfo->min_rx_bufsize = NIX_MIN_FRS; + devinfo->max_rx_pktlen = NIX_MAX_FRS; + devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; + devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + devinfo->max_mac_addrs = dev->max_mac_entries; + devinfo->max_vfs = pci_dev->max_vfs; + devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD; + devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD; + + devinfo->rx_offload_capa = dev->rx_offload_capa; + devinfo->tx_offload_capa = dev->tx_offload_capa; + devinfo->rx_queue_offload_capa = 0; + devinfo->tx_queue_offload_capa = 0; + + devinfo->reta_size = dev->rss_info.rss_size; + devinfo->hash_key_size = NIX_HASH_KEY_SIZE; + devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD; + + devinfo->default_rxconf = (struct rte_eth_rxconf) { + .rx_drop_en = 0, + .offloads = 0, + }; + + devinfo->default_txconf = (struct rte_eth_txconf) { + .offloads = 0, + }; + + devinfo->default_rxportconf = (struct rte_eth_dev_portconf) { + .ring_size = NIX_RX_DEFAULT_RING_SZ, + }; + + devinfo->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = UINT16_MAX, + .nb_min = NIX_RX_MIN_DESC, + .nb_align = NIX_RX_MIN_DESC_ALIGN, + .nb_seg_max = NIX_RX_NB_SEG_MAX, + .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX, + }; + devinfo->rx_desc_lim.nb_max = + RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max, + NIX_RX_MIN_DESC_ALIGN); + + devinfo->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = UINT16_MAX, + .nb_min = 1, + .nb_align = 1, + .nb_seg_max = NIX_TX_NB_SEG_MAX, + .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX, + }; + + /* Auto negotiation disabled */ + devinfo->speed_capa = ETH_LINK_SPEED_FIXED; + if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) { + devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G; + + /* 50G and 100G to be supported for board version C0 + * and above. + */ + if (!otx2_dev_is_Ax(dev)) + devinfo->speed_capa |= ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + } + + devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c new file mode 100644 index 000000000..5f6140f70 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c @@ -0,0 +1,842 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "otx2_common.h" +#include "otx2_cryptodev_qp.h" +#include "otx2_ethdev.h" +#include "otx2_ethdev_sec.h" +#include "otx2_ipsec_fp.h" +#include "otx2_sec_idev.h" + +#define AH_HDR_LEN 12 +#define AES_GCM_IV_LEN 8 +#define AES_GCM_MAC_LEN 16 +#define AES_CBC_IV_LEN 16 +#define SHA1_HMAC_LEN 12 + +#define AES_GCM_ROUNDUP_BYTE_LEN 4 +#define AES_CBC_ROUNDUP_BYTE_LEN 16 + +struct eth_sec_tag_const { + RTE_STD_C11 + union { + struct { + uint32_t rsvd_11_0 : 12; + uint32_t port : 8; + uint32_t event_type : 4; + uint32_t rsvd_31_24 : 8; + }; + uint32_t u32; + }; +}; + +static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = { + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 8, + .max = 12, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 20, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 12, + .max = 12, + .increment = 0 + }, + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_security_capability otx2_eth_sec_capabilities[] = { + { /* IPsec Inline Protocol ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + }, + .crypto_capabilities = otx2_eth_sec_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Protocol ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { 0 } + }, + .crypto_capabilities = otx2_eth_sec_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } +}; + +static void +lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) + return; + + mem = mz->addr; + + sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START); + if (sa_tbl[port] == NULL) + return; + + rte_free(sa_tbl[port]); + sa_tbl[port] = NULL; +} + +static int +lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) { + otx2_err("Could not find fastpath lookup table"); + return -EINVAL; + } + + mem = mz->addr; + + sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START); + + if (sa_tbl[port] == NULL) { + sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi * + sizeof(uint64_t), 0); + } + + sa_tbl[port][spi] = (uint64_t)sa; + + return 0; +} + +static inline void +in_sa_mz_name_get(char *name, int size, uint16_t port) +{ + snprintf(name, size, "otx2_ipsec_in_sadb_%u", port); +} + +static struct otx2_ipsec_fp_in_sa * +in_sa_get(uint16_t port, int sa_index) +{ + char name[RTE_MEMZONE_NAMESIZE]; + struct otx2_ipsec_fp_in_sa *sa; + const struct rte_memzone *mz; + + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); + mz = rte_memzone_lookup(name); + if (mz == NULL) { + otx2_err("Could not get the memzone reserved for IN SA DB"); + return NULL; + } + + sa = mz->addr; + + return sa + sa_index; +} + +static int +ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *xform, + struct otx2_sec_session_ipsec_ip *sess) +{ + struct rte_crypto_sym_xform *cipher_xform, *auth_xform; + + sess->partial_len = sizeof(struct rte_ipv4_hdr); + + if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) { + sess->partial_len += sizeof(struct rte_esp_hdr); + sess->roundup_len = sizeof(struct rte_esp_tail); + } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) { + sess->partial_len += AH_HDR_LEN; + } else { + return -EINVAL; + } + + if (ipsec->options.udp_encap) + sess->partial_len += sizeof(struct rte_udp_hdr); + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) { + sess->partial_len += AES_GCM_IV_LEN; + sess->partial_len += AES_GCM_MAC_LEN; + sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN; + } + return 0; + } + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + cipher_xform = xform; + auth_xform = xform->next; + } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + auth_xform = xform; + cipher_xform = xform->next; + } else { + return -EINVAL; + } + if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) { + sess->partial_len += AES_CBC_IV_LEN; + sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN; + } else { + return -EINVAL; + } + + if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) + sess->partial_len += SHA1_HMAC_LEN; + else + return -EINVAL; + + return 0; +} + +static int +hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp, + const uint8_t *auth_key, int len, uint8_t *hmac_key) +{ + struct inst_data { + struct otx2_cpt_res cpt_res; + uint8_t buffer[64]; + } *md; + + volatile struct otx2_cpt_res *res; + uint64_t timeout, lmt_status; + struct otx2_cpt_inst_s inst; + rte_iova_t md_iova; + int ret; + + memset(&inst, 0, sizeof(struct otx2_cpt_inst_s)); + + md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN); + if (md == NULL) + return -ENOMEM; + + memcpy(md->buffer, auth_key, len); + + md_iova = rte_malloc_virt2iova(md); + if (md_iova == RTE_BAD_IOVA) { + ret = -EINVAL; + goto free_md; + } + + inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res); + inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD; + inst.param2 = ctl->auth_type; + inst.dlen = len; + inst.dptr = md_iova + offsetof(struct inst_data, buffer); + inst.rptr = inst.dptr; + inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC; + + md->cpt_res.compcode = 0; + md->cpt_res.uc_compcode = 0xff; + + timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz(); + + rte_cio_wmb(); + + do { + otx2_lmt_mov(qp->lmtline, &inst, 2); + lmt_status = otx2_lmt_submit(qp->lf_nq_reg); + } while (lmt_status == 0); + + res = (volatile struct otx2_cpt_res *)&md->cpt_res; + + /* Wait until instruction completes or times out */ + while (res->uc_compcode == 0xff) { + if (rte_get_timer_cycles() > timeout) + break; + } + + if (res->u16[0] != OTX2_SEC_COMP_GOOD) { + ret = -EIO; + goto free_md; + } + + /* Retrieve the ipad and opad from rptr */ + memcpy(hmac_key, md->buffer, 48); + + ret = 0; + +free_md: + rte_free(md); + return ret; +} + +static int +eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sec_sess) +{ + struct rte_crypto_sym_xform *auth_xform, *cipher_xform; + struct otx2_sec_session_ipsec_ip *sess; + uint16_t port = eth_dev->data->port_id; + int cipher_key_len, auth_key_len, ret; + const uint8_t *cipher_key, *auth_key; + struct otx2_ipsec_fp_sa_ctl *ctl; + struct otx2_ipsec_fp_out_sa *sa; + struct otx2_sec_session *priv; + struct otx2_cpt_inst_s inst; + struct otx2_cpt_qp *qp; + + priv = get_sec_session_private_data(sec_sess); + sess = &priv->ipsec.ip; + + sa = &sess->out_sa; + ctl = &sa->ctl; + if (ctl->valid) { + otx2_err("SA already registered"); + return -EINVAL; + } + + memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip)); + + sess->seq = 1; + + ret = ipsec_sa_const_set(ipsec, crypto_xform, sess); + if (ret < 0) + return ret; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + memcpy(sa->nonce, &ipsec->salt, 4); + + if (ipsec->options.udp_encap == 1) { + sa->udp_src = 4500; + sa->udp_dst = 4500; + } + + if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + /* Start ip id from 1 */ + sess->ip_id = 1; + + if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip, + sizeof(struct in_addr)); + memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip, + sizeof(struct in_addr)); + } else { + return -EINVAL; + } + } else { + return -EINVAL; + } + + cipher_xform = crypto_xform; + auth_xform = crypto_xform->next; + + cipher_key_len = 0; + auth_key_len = 0; + auth_key = NULL; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + cipher_key = crypto_xform->aead.key.data; + cipher_key_len = crypto_xform->aead.key.length; + } else { + cipher_key = cipher_xform->cipher.key.data; + cipher_key_len = cipher_xform->cipher.key.length; + auth_key = auth_xform->auth.key.data; + auth_key_len = auth_xform->auth.key.length; + } + + if (cipher_key_len != 0) + memcpy(sa->cipher_key, cipher_key, cipher_key_len); + else + return -EINVAL; + + /* Determine word 7 of CPT instruction */ + inst.u64[7] = 0; + inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC; + inst.cptr = rte_mempool_virt2iova(sa); + sess->inst_w7 = inst.u64[7]; + + /* Get CPT QP to be used for this SA */ + ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp); + if (ret) + return ret; + + sess->qp = qp; + + sess->cpt_lmtline = qp->lmtline; + sess->cpt_nq_reg = qp->lf_nq_reg; + + /* Populate control word */ + ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); + if (ret) + goto cpt_put; + + if (auth_key_len && auth_key) { + ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key); + if (ret) + goto cpt_put; + } + + return 0; +cpt_put: + otx2_sec_idev_tx_cpt_qp_put(sess->qp); + return ret; +} + +static int +eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sec_sess) +{ + struct rte_crypto_sym_xform *auth_xform, *cipher_xform; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_sec_session_ipsec_ip *sess; + uint16_t port = eth_dev->data->port_id; + int cipher_key_len, auth_key_len, ret; + const uint8_t *cipher_key, *auth_key; + struct otx2_ipsec_fp_sa_ctl *ctl; + struct otx2_ipsec_fp_in_sa *sa; + struct otx2_sec_session *priv; + struct otx2_cpt_qp *qp; + + if (ipsec->spi >= dev->ipsec_in_max_spi) { + otx2_err("SPI exceeds max supported"); + return -EINVAL; + } + + sa = in_sa_get(port, ipsec->spi); + ctl = &sa->ctl; + + priv = get_sec_session_private_data(sec_sess); + sess = &priv->ipsec.ip; + + if (ctl->valid) { + otx2_err("SA already registered"); + return -EINVAL; + } + + memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa)); + + auth_xform = crypto_xform; + cipher_xform = crypto_xform->next; + + cipher_key_len = 0; + auth_key_len = 0; + auth_key = NULL; + + if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + memcpy(sa->nonce, &ipsec->salt, 4); + cipher_key = crypto_xform->aead.key.data; + cipher_key_len = crypto_xform->aead.key.length; + } else { + cipher_key = cipher_xform->cipher.key.data; + cipher_key_len = cipher_xform->cipher.key.length; + auth_key = auth_xform->auth.key.data; + auth_key_len = auth_xform->auth.key.length; + } + + if (cipher_key_len != 0) + memcpy(sa->cipher_key, cipher_key, cipher_key_len); + else + return -EINVAL; + + sess->in_sa = sa; + + sa->userdata = priv->userdata; + + if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa)) + return -EINVAL; + + ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); + if (ret) + return ret; + + if (auth_key_len && auth_key) { + /* Get a queue pair for HMAC init */ + ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp); + if (ret) + return ret; + ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key); + otx2_sec_idev_tx_cpt_qp_put(qp); + } + return ret; +} + +static int +eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev, + struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *crypto_xform, + struct rte_security_session *sess) +{ + int ret; + + ret = ipsec_fp_xform_verify(ipsec, crypto_xform); + if (ret) + return ret; + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) + return eth_sec_ipsec_in_sess_create(eth_dev, ipsec, + crypto_xform, sess); + else + return eth_sec_ipsec_out_sess_create(eth_dev, ipsec, + crypto_xform, sess); +} + +static int +otx2_eth_sec_session_create(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *sess, + struct rte_mempool *mempool) +{ + struct otx2_sec_session *priv; + int ret; + + if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + return -ENOTSUP; + + if (rte_mempool_get(mempool, (void **)&priv)) { + otx2_err("Could not allocate security session private data"); + return -ENOMEM; + } + + set_sec_session_private_data(sess, priv); + + /* + * Save userdata provided by the application. For ingress packets, this + * could be used to identify the SA. + */ + priv->userdata = conf->userdata; + + if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC) + ret = eth_sec_ipsec_sess_create(device, &conf->ipsec, + conf->crypto_xform, + sess); + else + ret = -ENOTSUP; + + if (ret) + goto mempool_put; + + return 0; + +mempool_put: + rte_mempool_put(mempool, priv); + set_sec_session_private_data(sess, NULL); + return ret; +} + +static int +otx2_eth_sec_session_destroy(void *device __rte_unused, + struct rte_security_session *sess) +{ + struct otx2_sec_session_ipsec_ip *sess_ip; + struct otx2_sec_session *priv; + struct rte_mempool *sess_mp; + int ret; + + priv = get_sec_session_private_data(sess); + if (priv == NULL) + return -EINVAL; + + sess_ip = &priv->ipsec.ip; + + /* Release CPT LF used for this session */ + if (sess_ip->qp != NULL) { + ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp); + if (ret) + return ret; + } + + sess_mp = rte_mempool_from_obj(priv); + + set_sec_session_private_data(sess, NULL); + rte_mempool_put(sess_mp, priv); + + return 0; +} + +static unsigned int +otx2_eth_sec_session_get_size(void *device __rte_unused) +{ + return sizeof(struct otx2_sec_session); +} + +static int +otx2_eth_sec_set_pkt_mdata(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + /* Set security session as the pkt metadata */ + m->udata64 = (uint64_t)session; + + return 0; +} + +static int +otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md, + void **userdata) +{ + /* Retrieve userdata */ + *userdata = (void *)md; + + return 0; +} + +static const struct rte_security_capability * +otx2_eth_sec_capabilities_get(void *device __rte_unused) +{ + return otx2_eth_sec_capabilities; +} + +static struct rte_security_ops otx2_eth_sec_ops = { + .session_create = otx2_eth_sec_session_create, + .session_destroy = otx2_eth_sec_session_destroy, + .session_get_size = otx2_eth_sec_session_get_size, + .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata, + .get_userdata = otx2_eth_sec_get_userdata, + .capabilities_get = otx2_eth_sec_capabilities_get +}; + +int +otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev) +{ + struct rte_security_ctx *ctx; + int ret; + + ctx = rte_malloc("otx2_eth_sec_ctx", + sizeof(struct rte_security_ctx), 0); + if (ctx == NULL) + return -ENOMEM; + + ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id); + if (ret) { + rte_free(ctx); + return ret; + } + + /* Populate ctx */ + + ctx->device = eth_dev; + ctx->ops = &otx2_eth_sec_ops; + ctx->sess_cnt = 0; + + eth_dev->security_ctx = ctx; + + return 0; +} + +void +otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev) +{ + rte_free(eth_dev->security_ctx); +} + +static int +eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + struct nix_inline_ipsec_lf_cfg *req; + struct otx2_mbox *mbox = dev->mbox; + struct eth_sec_tag_const tag_const; + char name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); + mz = rte_memzone_lookup(name); + if (mz == NULL) + return -EINVAL; + + req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox); + req->enable = 1; + req->sa_base_addr = mz->iova; + + req->ipsec_cfg0.tt = tt; + + tag_const.u32 = 0; + tag_const.event_type = RTE_EVENT_TYPE_ETHDEV; + tag_const.port = port; + req->ipsec_cfg0.tag_const = tag_const.u32; + + req->ipsec_cfg0.sa_pow2_size = + rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa)); + req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1; + + req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi); + req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1; + + return otx2_mbox_process(mbox); +} + +int +otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + int ret; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = 0; /* Read RQ:0 context */ + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + + ret = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (ret < 0) { + otx2_err("Could not read RQ context"); + return ret; + } + + /* Update tag type */ + ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt); + if (ret < 0) + otx2_err("Could not update sec eth tag type"); + + return ret; +} + +int +otx2_eth_sec_init(struct rte_eth_dev *eth_dev) +{ + const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa); + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + char name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int mz_sz, ret; + uint16_t nb_sa; + + RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 || + !RTE_IS_POWER_OF_2(sa_width)); + + if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) && + !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)) + return 0; + + nb_sa = dev->ipsec_in_max_spi; + mz_sz = nb_sa * sa_width; + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); + mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN); + + if (mz == NULL) { + otx2_err("Could not allocate inbound SA DB"); + return -ENOMEM; + } + + memset(mz->addr, 0, mz_sz); + + ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED); + if (ret < 0) { + otx2_err("Could not configure inline IPsec"); + goto sec_fini; + } + + return 0; + +sec_fini: + otx2_err("Could not configure device for security"); + otx2_eth_sec_fini(eth_dev); + return ret; +} + +void +otx2_eth_sec_fini(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + char name[RTE_MEMZONE_NAMESIZE]; + + if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) && + !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)) + return; + + lookup_mem_sa_tbl_clear(eth_dev); + + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); + rte_memzone_free(rte_memzone_lookup(name)); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h new file mode 100644 index 000000000..e24358a05 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef __OTX2_ETHDEV_SEC_H__ +#define __OTX2_ETHDEV_SEC_H__ + +#include + +#include "otx2_ipsec_fp.h" + +#define OTX2_CPT_RES_ALIGN 16 +#define OTX2_NIX_SEND_DESC_ALIGN 16 +#define OTX2_CPT_INST_SIZE 64 + +#define OTX2_CPT_EGRP_INLINE_IPSEC 1 + +#define OTX2_CPT_OP_INLINE_IPSEC_OUTB (0x40 | 0x25) +#define OTX2_CPT_OP_INLINE_IPSEC_INB (0x40 | 0x26) +#define OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD (0x40 | 0x27) + +#define OTX2_SEC_CPT_COMP_GOOD 0x1 +#define OTX2_SEC_UC_COMP_GOOD 0x0 +#define OTX2_SEC_COMP_GOOD (OTX2_SEC_UC_COMP_GOOD << 8 | \ + OTX2_SEC_CPT_COMP_GOOD) + +/* CPT Result */ +struct otx2_cpt_res { + union { + struct { + uint64_t compcode:8; + uint64_t uc_compcode:8; + uint64_t doneint:1; + uint64_t reserved_17_63:47; + uint64_t reserved_64_127; + }; + uint16_t u16[8]; + }; +}; + +struct otx2_cpt_inst_s { + union { + struct { + /* W0 */ + uint64_t nixtxl : 3; + uint64_t doneint : 1; + uint64_t nixtx_addr : 60; + /* W1 */ + uint64_t res_addr : 64; + /* W2 */ + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t grp : 10; + uint64_t rsvd_175_172 : 4; + uint64_t rvu_pf_func : 16; + /* W3 */ + uint64_t qord : 1; + uint64_t rsvd_194_193 : 2; + uint64_t wqe_ptr : 61; + /* W4 */ + uint64_t dlen : 16; + uint64_t param2 : 16; + uint64_t param1 : 16; + uint64_t opcode : 16; + /* W5 */ + uint64_t dptr : 64; + /* W6 */ + uint64_t rptr : 64; + /* W7 */ + uint64_t cptr : 61; + uint64_t egrp : 3; + }; + uint64_t u64[8]; + }; +}; + +/* + * Security session for inline IPsec protocol offload. This is private data of + * inline capable PMD. + */ +struct otx2_sec_session_ipsec_ip { + RTE_STD_C11 + union { + /* + * Inbound SA would accessed by crypto block. And so the memory + * is allocated differently and shared with the h/w. Only + * holding a pointer to this memory in the session private + * space. + */ + void *in_sa; + /* Outbound SA */ + struct otx2_ipsec_fp_out_sa out_sa; + }; + + /* Address of CPT LMTLINE */ + void *cpt_lmtline; + /* CPT LF enqueue register address */ + rte_iova_t cpt_nq_reg; + + /* Pre calculated lengths and data for a session */ + uint8_t partial_len; + uint8_t roundup_len; + uint8_t roundup_byte; + uint16_t ip_id; + union { + uint64_t esn; + struct { + uint32_t seq; + uint32_t esn_hi; + }; + }; + + uint64_t inst_w7; + + /* CPT QP used by SA */ + struct otx2_cpt_qp *qp; +}; + +struct otx2_sec_session_ipsec { + struct otx2_sec_session_ipsec_ip ip; +}; + +struct otx2_sec_session { + struct otx2_sec_session_ipsec ipsec; + void *userdata; + /**< Userdata registered by the application */ +} __rte_cache_aligned; + +int otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev); + +void otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev); + +int otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev); + +int otx2_eth_sec_init(struct rte_eth_dev *eth_dev); + +void otx2_eth_sec_fini(struct rte_eth_dev *eth_dev); + +#endif /* __OTX2_ETHDEV_SEC_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h new file mode 100644 index 000000000..2e35a8c77 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2020 Marvell International Ltd. + */ + +#ifndef __OTX2_ETHDEV_SEC_TX_H__ +#define __OTX2_ETHDEV_SEC_TX_H__ + +#include +#include + +#include "otx2_ethdev_sec.h" + +struct otx2_ipsec_fp_out_hdr { + uint32_t ip_id; + uint32_t seq; + uint8_t iv[16]; +}; + +static __rte_always_inline int32_t +otx2_ipsec_fp_out_rlen_get(struct otx2_sec_session_ipsec_ip *sess, + uint32_t plen) +{ + uint32_t enc_payload_len; + + enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len, + sess->roundup_byte); + + return sess->partial_len + enc_payload_len; +} + +static __rte_always_inline void +otx2_ssogws_head_wait(struct otx2_ssogws *ws); + +static __rte_always_inline int +otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event *ev, + struct rte_mbuf *m, const struct otx2_eth_txq *txq, + const uint32_t offload_flags) +{ + uint32_t dlen, rlen, desc_headroom, extend_head, extend_tail; + struct otx2_sec_session_ipsec_ip *sess; + struct otx2_ipsec_fp_out_hdr *hdr; + struct otx2_ipsec_fp_out_sa *sa; + uint64_t data_addr, desc_addr; + struct otx2_sec_session *priv; + struct otx2_cpt_inst_s inst; + uint64_t lmt_status; + char *data; + + struct desc { + struct otx2_cpt_res cpt_res __rte_aligned(OTX2_CPT_RES_ALIGN); + struct nix_send_hdr_s nix_hdr + __rte_aligned(OTX2_NIX_SEND_DESC_ALIGN); + union nix_send_sg_s nix_sg; + struct nix_iova_s nix_iova; + } *sd; + + priv = get_sec_session_private_data((void *)(m->udata64)); + sess = &priv->ipsec.ip; + sa = &sess->out_sa; + + RTE_ASSERT(sess->cpt_lmtline != NULL); + RTE_ASSERT(!(offload_flags & (NIX_TX_OFFLOAD_MBUF_NOFF_F | + NIX_TX_OFFLOAD_VLAN_QINQ_F))); + + dlen = rte_pktmbuf_pkt_len(m) + sizeof(*hdr) - RTE_ETHER_HDR_LEN; + rlen = otx2_ipsec_fp_out_rlen_get(sess, dlen - sizeof(*hdr)); + + RTE_BUILD_BUG_ON(OTX2_CPT_RES_ALIGN % OTX2_NIX_SEND_DESC_ALIGN); + RTE_BUILD_BUG_ON(sizeof(sd->cpt_res) % OTX2_NIX_SEND_DESC_ALIGN); + + extend_head = sizeof(*hdr); + extend_tail = rlen - dlen; + + desc_headroom = (OTX2_CPT_RES_ALIGN - 1) + sizeof(*sd); + + if (unlikely(!rte_pktmbuf_is_contiguous(m)) || + unlikely(rte_pktmbuf_headroom(m) < extend_head + desc_headroom) || + unlikely(rte_pktmbuf_tailroom(m) < extend_tail)) { + goto drop; + } + + /* + * Extend mbuf data to point to the expected packet buffer for NIX. + * This includes the Ethernet header followed by the encrypted IPsec + * payload + */ + rte_pktmbuf_append(m, extend_tail); + data = rte_pktmbuf_prepend(m, extend_head); + data_addr = rte_pktmbuf_mtophys(m); + + /* + * Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior + * to the IP header + */ + memcpy(data, data + sizeof(*hdr), RTE_ETHER_HDR_LEN); + + hdr = (struct otx2_ipsec_fp_out_hdr *)(data + RTE_ETHER_HDR_LEN); + + if (sa->ctl.enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) { + /* AES-128-GCM */ + memcpy(hdr->iv, &sa->nonce, 4); + memset(hdr->iv + 4, 0, 12); //TODO: make it random + } else { + /* AES-128-[CBC] + [SHA1] */ + memset(hdr->iv, 0, 16); //TODO: make it random + } + + /* Keep CPT result and NIX send descriptors in headroom */ + sd = (void *)RTE_PTR_ALIGN(data - desc_headroom, OTX2_CPT_RES_ALIGN); + desc_addr = data_addr - RTE_PTR_DIFF(data, sd); + + /* Prepare CPT instruction */ + + inst.nixtx_addr = (desc_addr + offsetof(struct desc, nix_hdr)) >> 4; + inst.doneint = 0; + inst.nixtxl = 1; + inst.res_addr = desc_addr + offsetof(struct desc, cpt_res); + inst.u64[2] = 0; + inst.u64[3] = 0; + inst.wqe_ptr = desc_addr >> 3; /* FIXME: Handle errors */ + inst.qord = 1; + inst.opcode = OTX2_CPT_OP_INLINE_IPSEC_OUTB; + inst.dlen = dlen; + inst.dptr = data_addr + RTE_ETHER_HDR_LEN; + inst.u64[7] = sess->inst_w7; + + /* First word contains 8 bit completion code & 8 bit uc comp code */ + sd->cpt_res.u16[0] = 0; + + /* Prepare NIX send descriptors for output expected from CPT */ + + sd->nix_hdr.w0.u = 0; + sd->nix_hdr.w1.u = 0; + sd->nix_hdr.w0.sq = txq->sq; + sd->nix_hdr.w0.sizem1 = 1; + sd->nix_hdr.w0.total = rte_pktmbuf_data_len(m); + sd->nix_hdr.w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id); + + sd->nix_sg.u = 0; + sd->nix_sg.subdc = NIX_SUBDC_SG; + sd->nix_sg.ld_type = NIX_SENDLDTYPE_LDD; + sd->nix_sg.segs = 1; + sd->nix_sg.seg1_size = rte_pktmbuf_data_len(m); + + sd->nix_iova.addr = rte_mbuf_data_iova(m); + + /* Mark mempool object as "put" since it is freed by NIX */ + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + + if (!ev->sched_type) + otx2_ssogws_head_wait(ws); + + inst.param1 = sess->esn_hi >> 16; + inst.param2 = sess->esn_hi & 0xffff; + + hdr->seq = rte_cpu_to_be_32(sess->seq); + hdr->ip_id = rte_cpu_to_be_32(sess->ip_id); + + sess->ip_id++; + sess->esn++; + + rte_cio_wmb(); + + do { + otx2_lmt_mov(sess->cpt_lmtline, &inst, 2); + lmt_status = otx2_lmt_submit(sess->cpt_nq_reg); + } while (lmt_status == 0); + + return 1; + +drop: + if (offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + /* Don't free if reference count > 1 */ + if (rte_pktmbuf_prefree_seg(m) == NULL) + return 0; + } + rte_pktmbuf_free(m); + return 0; +} + +#endif /* __OTX2_ETHDEV_SEC_TX_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c new file mode 100644 index 000000000..13a76e441 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c @@ -0,0 +1,1007 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" +#include "otx2_ethdev_sec.h" +#include "otx2_flow.h" + +int +otx2_flow_free_all_resources(struct otx2_eth_dev *hw) +{ + struct otx2_npc_flow_info *npc = &hw->npc_flow; + struct otx2_mbox *mbox = hw->mbox; + struct otx2_mcam_ents_info *info; + struct rte_bitmap *bmap; + struct rte_flow *flow; + int entry_count = 0; + int rc, idx; + + for (idx = 0; idx < npc->flow_max_priority; idx++) { + info = &npc->flow_entry_info[idx]; + entry_count += info->live_ent; + } + + if (entry_count == 0) + return 0; + + /* Free all MCAM entries allocated */ + rc = otx2_flow_mcam_free_all_entries(mbox); + + /* Free any MCAM counters and delete flow list */ + for (idx = 0; idx < npc->flow_max_priority; idx++) { + while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) { + if (flow->ctr_id != NPC_COUNTER_NONE) + rc |= otx2_flow_mcam_free_counter(mbox, + flow->ctr_id); + + TAILQ_REMOVE(&npc->flow_list[idx], flow, next); + rte_free(flow); + bmap = npc->live_entries[flow->priority]; + rte_bitmap_clear(bmap, flow->mcam_id); + } + info = &npc->flow_entry_info[idx]; + info->free_ent = 0; + info->live_ent = 0; + } + return rc; +} + + +static int +flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox, + struct otx2_npc_flow_info *flow_info) +{ + /* This is non-LDATA part in search key */ + uint64_t key_data[2] = {0ULL, 0ULL}; + uint64_t key_mask[2] = {0ULL, 0ULL}; + int intf = pst->flow->nix_intf; + int key_len, bit = 0, index; + int off, idx, data_off = 0; + uint8_t lid, mask, data; + uint16_t layer_info; + uint64_t lt, flags; + + + /* Skip till Layer A data start */ + while (bit < NPC_PARSE_KEX_S_LA_OFFSET) { + if (flow_info->keyx_supp_nmask[intf] & (1 << bit)) + data_off++; + bit++; + } + + /* Each bit represents 1 nibble */ + data_off *= 4; + + index = 0; + for (lid = 0; lid < NPC_MAX_LID; lid++) { + /* Offset in key */ + off = NPC_PARSE_KEX_S_LID_OFFSET(lid); + lt = pst->lt[lid] & 0xf; + flags = pst->flags[lid] & 0xff; + + /* NPC_LAYER_KEX_S */ + layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7); + + if (layer_info) { + for (idx = 0; idx <= 2 ; idx++) { + if (layer_info & (1 << idx)) { + if (idx == 2) + data = lt; + else if (idx == 1) + data = ((flags >> 4) & 0xf); + else + data = (flags & 0xf); + + if (data_off >= 64) { + data_off = 0; + index++; + } + key_data[index] |= ((uint64_t)data << + data_off); + mask = 0xf; + if (lt == 0) + mask = 0; + key_mask[index] |= ((uint64_t)mask << + data_off); + data_off += 4; + } + } + } + } + + otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64, + key_data[0], key_data[1]); + + /* Copy this into mcam string */ + key_len = (pst->npc->keyx_len[intf] + 7) / 8; + otx2_npc_dbg("Key_len = %d", key_len); + memcpy(pst->flow->mcam_data, key_data, key_len); + memcpy(pst->flow->mcam_mask, key_mask, key_len); + + otx2_npc_dbg("Final flow data"); + for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) { + otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64, + idx, pst->flow->mcam_data[idx], + idx, pst->flow->mcam_mask[idx]); + } + + /* + * Now we have mcam data and mask formatted as + * [Key_len/4 nibbles][0 or 1 nibble hole][data] + * hole is present if key_len is odd number of nibbles. + * mcam data must be split into 64 bits + 48 bits segments + * for each back W0, W1. + */ + + return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info); +} + +static int +flow_parse_attr(struct rte_eth_dev *eth_dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error, + struct rte_flow *flow) +{ + struct otx2_eth_dev *dev = eth_dev->data->dev_private; + const char *errmsg = NULL; + + if (attr == NULL) + errmsg = "Attribute can't be empty"; + else if (attr->group) + errmsg = "Groups are not supported"; + else if (attr->priority >= dev->npc_flow.flow_max_priority) + errmsg = "Priority should be with in specified range"; + else if ((!attr->egress && !attr->ingress) || + (attr->egress && attr->ingress)) + errmsg = "Exactly one of ingress or egress must be set"; + + if (errmsg != NULL) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR, + attr, errmsg); + return -ENOTSUP; + } + + if (attr->ingress) + flow->nix_intf = OTX2_INTF_RX; + else + flow->nix_intf = OTX2_INTF_TX; + + flow->priority = attr->priority; + return 0; +} + +static inline int +flow_get_free_rss_grp(struct rte_bitmap *bmap, + uint32_t size, uint32_t *pos) +{ + for (*pos = 0; *pos < size; ++*pos) { + if (!rte_bitmap_get(bmap, *pos)) + break; + } + + return *pos < size ? 0 : -1; +} + +static int +flow_configure_rss_action(struct otx2_eth_dev *dev, + const struct rte_flow_action_rss *rss, + uint8_t *alg_idx, uint32_t *rss_grp, + int mcam_index) +{ + struct otx2_npc_flow_info *flow_info = &dev->npc_flow; + uint16_t reta[NIX_RSS_RETA_SIZE_MAX]; + uint32_t flowkey_cfg, grp_aval, i; + uint16_t *ind_tbl = NULL; + uint8_t flowkey_algx; + int rc; + + rc = flow_get_free_rss_grp(flow_info->rss_grp_entries, + flow_info->rss_grps, &grp_aval); + /* RSS group :0 is not usable for flow rss action */ + if (rc < 0 || grp_aval == 0) + return -ENOSPC; + + *rss_grp = grp_aval; + + otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key, + rss->key_len); + + /* If queue count passed in the rss action is less than + * HW configured reta size, replicate rss action reta + * across HW reta table. + */ + if (dev->rss_info.rss_size > rss->queue_num) { + ind_tbl = reta; + + for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++) + memcpy(reta + i * rss->queue_num, rss->queue, + sizeof(uint16_t) * rss->queue_num); + + i = dev->rss_info.rss_size % rss->queue_num; + if (i) + memcpy(&reta[dev->rss_info.rss_size] - i, + rss->queue, i * sizeof(uint16_t)); + } else { + ind_tbl = (uint16_t *)(uintptr_t)rss->queue; + } + + rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl); + if (rc) { + otx2_err("Failed to init rss table rc = %d", rc); + return rc; + } + + flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level); + + rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx, + *rss_grp, mcam_index); + if (rc) { + otx2_err("Failed to set rss hash function rc = %d", rc); + return rc; + } + + *alg_idx = flowkey_algx; + + rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp); + + return 0; +} + + +static int +flow_program_rss_action(struct rte_eth_dev *eth_dev, + const struct rte_flow_action actions[], + struct rte_flow *flow) +{ + struct otx2_eth_dev *dev = eth_dev->data->dev_private; + const struct rte_flow_action_rss *rss; + uint32_t rss_grp; + uint8_t alg_idx; + int rc; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + rss = (const struct rte_flow_action_rss *)actions->conf; + + rc = flow_configure_rss_action(dev, + rss, &alg_idx, &rss_grp, + flow->mcam_id); + if (rc) + return rc; + + flow->npc_action |= + ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) << + NIX_RSS_ACT_ALG_OFFSET) | + ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) << + NIX_RSS_ACT_GRP_OFFSET); + } + } + return 0; +} + +static int +flow_free_rss_action(struct rte_eth_dev *eth_dev, + struct rte_flow *flow) +{ + struct otx2_eth_dev *dev = eth_dev->data->dev_private; + struct otx2_npc_flow_info *npc = &dev->npc_flow; + uint32_t rss_grp; + + if (flow->npc_action & NIX_RX_ACTIONOP_RSS) { + rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) & + NIX_RSS_ACT_GRP_MASK; + if (rss_grp == 0 || rss_grp >= npc->rss_grps) + return -EINVAL; + + rte_bitmap_clear(npc->rss_grp_entries, rss_grp); + } + + return 0; +} + +static int +flow_update_sec_tt(struct rte_eth_dev *eth_dev, + const struct rte_flow_action actions[]) +{ + int rc = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + rc = otx2_eth_sec_update_tag_type(eth_dev); + break; + } + } + + return rc; +} + +static int +flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst) +{ + otx2_npc_dbg("Meta Item"); + return 0; +} + +/* + * Parse function of each layer: + * - Consume one or more patterns that are relevant. + * - Update parse_state + * - Set parse_state.pattern = last item consumed + * - Set appropriate error code/message when returning error. + */ +typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst); + +static int +flow_parse_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct rte_flow *flow, + struct otx2_parse_state *pst) +{ + flow_parse_stage_func_t parse_stage_funcs[] = { + flow_parse_meta_items, + otx2_flow_parse_higig2_hdr, + otx2_flow_parse_la, + otx2_flow_parse_lb, + otx2_flow_parse_lc, + otx2_flow_parse_ld, + otx2_flow_parse_le, + otx2_flow_parse_lf, + otx2_flow_parse_lg, + otx2_flow_parse_lh, + }; + struct otx2_eth_dev *hw = dev->data->dev_private; + uint8_t layer = 0; + int key_offset; + int rc; + + if (pattern == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + "pattern is NULL"); + return -EINVAL; + } + + memset(pst, 0, sizeof(*pst)); + pst->npc = &hw->npc_flow; + pst->error = error; + pst->flow = flow; + + /* Use integral byte offset */ + key_offset = pst->npc->keyx_len[flow->nix_intf]; + key_offset = (key_offset + 7) / 8; + + /* Location where LDATA would begin */ + pst->mcam_data = (uint8_t *)flow->mcam_data; + pst->mcam_mask = (uint8_t *)flow->mcam_mask; + + while (pattern->type != RTE_FLOW_ITEM_TYPE_END && + layer < RTE_DIM(parse_stage_funcs)) { + otx2_npc_dbg("Pattern type = %d", pattern->type); + + /* Skip place-holders */ + pattern = otx2_flow_skip_void_and_any_items(pattern); + + pst->pattern = pattern; + otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer); + rc = parse_stage_funcs[layer](pst); + if (rc != 0) + return -rte_errno; + + layer++; + + /* + * Parse stage function sets pst->pattern to + * 1 past the last item it consumed. + */ + pattern = pst->pattern; + + if (pst->terminate) + break; + } + + /* Skip trailing place-holders */ + pattern = otx2_flow_skip_void_and_any_items(pattern); + + /* Are there more items than what we can handle? */ + if (pattern->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "unsupported item in the sequence"); + return -ENOTSUP; + } + + return 0; +} + +static int +flow_parse_rule(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow, + struct otx2_parse_state *pst) +{ + int err; + + /* Check attributes */ + err = flow_parse_attr(dev, attr, error, flow); + if (err) + return err; + + /* Check actions */ + err = otx2_flow_parse_actions(dev, attr, actions, error, flow); + if (err) + return err; + + /* Check pattern */ + err = flow_parse_pattern(dev, pattern, error, flow, pst); + if (err) + return err; + + /* Check for overlaps? */ + return 0; +} + +static int +otx2_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct otx2_parse_state parse_state; + struct rte_flow flow; + + memset(&flow, 0, sizeof(flow)); + return flow_parse_rule(dev, attr, pattern, actions, error, &flow, + &parse_state); +} + +static struct rte_flow * +otx2_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + struct otx2_parse_state parse_state; + struct otx2_mbox *mbox = hw->mbox; + struct rte_flow *flow, *flow_iter; + struct otx2_flow_list *list; + int rc; + + flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0); + if (flow == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Memory allocation failed"); + return NULL; + } + memset(flow, 0, sizeof(*flow)); + + rc = flow_parse_rule(dev, attr, pattern, actions, error, flow, + &parse_state); + if (rc != 0) + goto err_exit; + + rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow); + if (rc != 0) { + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to insert filter"); + goto err_exit; + } + + rc = flow_program_rss_action(dev, actions, flow); + if (rc != 0) { + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to program rss action"); + goto err_exit; + } + + if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + rc = flow_update_sec_tt(dev, actions); + if (rc != 0) { + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to update tt with sec act"); + goto err_exit; + } + } + + list = &hw->npc_flow.flow_list[flow->priority]; + /* List in ascending order of mcam entries */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id > flow->mcam_id) { + TAILQ_INSERT_BEFORE(flow_iter, flow, next); + return flow; + } + } + + TAILQ_INSERT_TAIL(list, flow, next); + return flow; + +err_exit: + rte_free(flow); + return NULL; +} + +static int +otx2_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + struct otx2_npc_flow_info *npc = &hw->npc_flow; + struct otx2_mbox *mbox = hw->mbox; + struct rte_bitmap *bmap; + uint16_t match_id; + int rc; + + match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) & + NIX_RX_ACT_MATCH_MASK; + + if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) { + if (rte_atomic32_read(&npc->mark_actions) == 0) + return -EINVAL; + + /* Clear mark offload flag if there are no more mark actions */ + if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) { + hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F; + otx2_eth_set_rx_function(dev); + } + } + + rc = flow_free_rss_action(dev, flow); + if (rc != 0) { + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to free rss action"); + } + + rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id); + if (rc != 0) { + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to destroy filter"); + } + + TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next); + + bmap = npc->live_entries[flow->priority]; + rte_bitmap_clear(bmap, flow->mcam_id); + + rte_free(flow); + return 0; +} + +static int +otx2_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + int rc; + + rc = otx2_flow_free_all_resources(hw); + if (rc) { + otx2_err("Error when deleting NPC MCAM entries " + ", counters"); + rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to flush filter"); + return -rte_errno; + } + + return 0; +} + +static int +otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused, + int enable __rte_unused, + struct rte_flow_error *error) +{ + /* + * If we support, we need to un-install the default mcam + * entry for this port. + */ + + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Flow isolation not supported"); + + return -rte_errno; +} + +static int +otx2_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *action, + void *data, + struct rte_flow_error *error) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + struct rte_flow_query_count *query = data; + struct otx2_mbox *mbox = hw->mbox; + const char *errmsg = NULL; + int errcode = ENOTSUP; + int rc; + + if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) { + errmsg = "Only COUNT is supported in query"; + goto err_exit; + } + + if (flow->ctr_id == NPC_COUNTER_NONE) { + errmsg = "Counter is not available"; + goto err_exit; + } + + rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits); + if (rc != 0) { + errcode = EIO; + errmsg = "Error reading flow counter"; + goto err_exit; + } + query->hits_set = 1; + query->bytes_set = 0; + + if (query->reset) + rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id); + if (rc != 0) { + errcode = EIO; + errmsg = "Error clearing flow counter"; + goto err_exit; + } + + return 0; + +err_exit: + rte_flow_error_set(error, errcode, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + errmsg); + return -rte_errno; +} + +const struct rte_flow_ops otx2_flow_ops = { + .validate = otx2_flow_validate, + .create = otx2_flow_create, + .destroy = otx2_flow_destroy, + .flush = otx2_flow_flush, + .query = otx2_flow_query, + .isolate = otx2_flow_isolate, +}; + +static int +flow_supp_key_len(uint32_t supp_mask) +{ + int nib_count = 0; + while (supp_mask) { + nib_count++; + supp_mask &= (supp_mask - 1); + } + return nib_count * 4; +} + +/* Refer HRM register: + * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG + * and + * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG + **/ +#define BYTESM1_SHIFT 16 +#define HDR_OFF_SHIFT 8 +static void +flow_update_kex_info(struct npc_xtract_info *xtract_info, + uint64_t val) +{ + xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1; + xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff; + xtract_info->key_off = val & 0x3f; + xtract_info->enable = ((val >> 7) & 0x1); + xtract_info->flags_enable = ((val >> 6) & 0x1); +} + +static void +flow_process_mkex_cfg(struct otx2_npc_flow_info *npc, + struct npc_get_kex_cfg_rsp *kex_rsp) +{ + volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT] + [NPC_MAX_LD]; + struct npc_xtract_info *x_info = NULL; + int lid, lt, ld, fl, ix; + otx2_dxcfg_t *p; + uint64_t keyw; + uint64_t val; + + npc->keyx_supp_nmask[NPC_MCAM_RX] = + kex_rsp->rx_keyx_cfg & 0x7fffffffULL; + npc->keyx_supp_nmask[NPC_MCAM_TX] = + kex_rsp->tx_keyx_cfg & 0x7fffffffULL; + npc->keyx_len[NPC_MCAM_RX] = + flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]); + npc->keyx_len[NPC_MCAM_TX] = + flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]); + + keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL; + npc->keyw[NPC_MCAM_RX] = keyw; + keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL; + npc->keyw[NPC_MCAM_TX] = keyw; + + /* Update KEX_LD_FLAG */ + for (ix = 0; ix < NPC_MAX_INTF; ix++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + x_info = + &npc->prx_fxcfg[ix][ld][fl].xtract[0]; + val = kex_rsp->intf_ld_flags[ix][ld][fl]; + flow_update_kex_info(x_info, val); + } + } + } + + /* Update LID, LT and LDATA cfg */ + p = &npc->prx_dxcfg; + q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]) + (&kex_rsp->intf_lid_lt_ld); + for (ix = 0; ix < NPC_MAX_INTF; ix++) { + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + x_info = &(*p)[ix][lid][lt].xtract[ld]; + val = (*q)[ix][lid][lt][ld]; + flow_update_kex_info(x_info, val); + } + } + } + } + /* Update LDATA Flags cfg */ + npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0]; + npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1]; +} + +static struct otx2_idev_kex_cfg * +flow_intra_dev_kex_cfg(void) +{ + static const char name[] = "octeontx2_intra_device_kex_conf"; + struct otx2_idev_kex_cfg *idev; + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz->addr; + + /* Request for the first time */ + mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg), + SOCKET_ID_ANY, 0, OTX2_ALIGN); + if (mz) { + idev = mz->addr; + rte_atomic16_set(&idev->kex_refcnt, 0); + return idev; + } + return NULL; +} + +static int +flow_fetch_kex_cfg(struct otx2_eth_dev *dev) +{ + struct otx2_npc_flow_info *npc = &dev->npc_flow; + struct npc_get_kex_cfg_rsp *kex_rsp; + struct otx2_mbox *mbox = dev->mbox; + char mkex_pfl_name[MKEX_NAME_LEN]; + struct otx2_idev_kex_cfg *idev; + int rc = 0; + + idev = flow_intra_dev_kex_cfg(); + if (!idev) + return -ENOMEM; + + /* Is kex_cfg read by any another driver? */ + if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) { + /* Call mailbox to get key & data size */ + (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox); + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp); + if (rc) { + otx2_err("Failed to fetch NPC keyx config"); + goto done; + } + memcpy(&idev->kex_cfg, kex_rsp, + sizeof(struct npc_get_kex_cfg_rsp)); + } + + otx2_mbox_memcpy(mkex_pfl_name, + idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN); + + strlcpy((char *)dev->mkex_pfl_name, + mkex_pfl_name, sizeof(dev->mkex_pfl_name)); + + flow_process_mkex_cfg(npc, &idev->kex_cfg); + +done: + return rc; +} + +int +otx2_flow_init(struct otx2_eth_dev *hw) +{ + uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL; + struct otx2_npc_flow_info *npc = &hw->npc_flow; + uint32_t bmap_sz; + int rc = 0, idx; + + rc = flow_fetch_kex_cfg(hw); + if (rc) { + otx2_err("Failed to fetch NPC keyx config from idev"); + return rc; + } + + rte_atomic32_init(&npc->mark_actions); + + npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX]; + /* Free, free_rev, live and live_rev entries */ + bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries); + mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority, + RTE_CACHE_LINE_SIZE); + if (mem == NULL) { + otx2_err("Bmap alloc failed"); + rc = -ENOMEM; + return rc; + } + + npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct otx2_mcam_ents_info), + 0); + if (npc->flow_entry_info == NULL) { + otx2_err("flow_entry_info alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct rte_bitmap *), + 0); + if (npc->free_entries == NULL) { + otx2_err("free_entries alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct rte_bitmap *), + 0); + if (npc->free_entries_rev == NULL) { + otx2_err("free_entries_rev alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct rte_bitmap *), + 0); + if (npc->live_entries == NULL) { + otx2_err("live_entries alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct rte_bitmap *), + 0); + if (npc->live_entries_rev == NULL) { + otx2_err("live_entries_rev alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority + * sizeof(struct otx2_flow_list), + 0); + if (npc->flow_list == NULL) { + otx2_err("flow_list alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc_mem = mem; + for (idx = 0; idx < npc->flow_max_priority; idx++) { + TAILQ_INIT(&npc->flow_list[idx]); + + npc->free_entries[idx] = + rte_bitmap_init(npc->mcam_entries, mem, bmap_sz); + mem += bmap_sz; + + npc->free_entries_rev[idx] = + rte_bitmap_init(npc->mcam_entries, mem, bmap_sz); + mem += bmap_sz; + + npc->live_entries[idx] = + rte_bitmap_init(npc->mcam_entries, mem, bmap_sz); + mem += bmap_sz; + + npc->live_entries_rev[idx] = + rte_bitmap_init(npc->mcam_entries, mem, bmap_sz); + mem += bmap_sz; + + npc->flow_entry_info[idx].free_ent = 0; + npc->flow_entry_info[idx].live_ent = 0; + npc->flow_entry_info[idx].max_id = 0; + npc->flow_entry_info[idx].min_id = ~(0); + } + + npc->rss_grps = NIX_RSS_GRPS; + + bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps); + nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE); + if (nix_mem == NULL) { + otx2_err("Bmap alloc failed"); + rc = -ENOMEM; + goto err; + } + + npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz); + + /* Group 0 will be used for RSS, + * 1 -7 will be used for rte_flow RSS action + */ + rte_bitmap_set(npc->rss_grp_entries, 0); + + return 0; + +err: + if (npc->flow_list) + rte_free(npc->flow_list); + if (npc->live_entries_rev) + rte_free(npc->live_entries_rev); + if (npc->live_entries) + rte_free(npc->live_entries); + if (npc->free_entries_rev) + rte_free(npc->free_entries_rev); + if (npc->free_entries) + rte_free(npc->free_entries); + if (npc->flow_entry_info) + rte_free(npc->flow_entry_info); + if (npc_mem) + rte_free(npc_mem); + return rc; +} + +int +otx2_flow_fini(struct otx2_eth_dev *hw) +{ + struct otx2_npc_flow_info *npc = &hw->npc_flow; + int rc; + + rc = otx2_flow_free_all_resources(hw); + if (rc) { + otx2_err("Error when deleting NPC MCAM entries, counters"); + return rc; + } + + if (npc->flow_list) + rte_free(npc->flow_list); + if (npc->live_entries_rev) + rte_free(npc->live_entries_rev); + if (npc->live_entries) + rte_free(npc->live_entries); + if (npc->free_entries_rev) + rte_free(npc->free_entries_rev); + if (npc->free_entries) + rte_free(npc->free_entries); + if (npc->flow_entry_info) + rte_free(npc->flow_entry_info); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h new file mode 100644 index 000000000..df78f41d3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_FLOW_H__ +#define __OTX2_FLOW_H__ + +#include + +#include +#include +#include + +#include "otx2_common.h" +#include "otx2_ethdev.h" +#include "otx2_mbox.h" + +struct otx2_eth_dev; + +int otx2_flow_init(struct otx2_eth_dev *hw); +int otx2_flow_fini(struct otx2_eth_dev *hw); +extern const struct rte_flow_ops otx2_flow_ops; + +enum { + OTX2_INTF_RX = 0, + OTX2_INTF_TX = 1, + OTX2_INTF_MAX = 2, +}; + +#define NPC_IH_LENGTH 8 +#define NPC_TPID_LENGTH 2 +#define NPC_HIGIG2_LENGTH 16 +#define NPC_COUNTER_NONE (-1) +/* 32 bytes from LDATA_CFG & 32 bytes from FLAGS_CFG */ +#define NPC_MAX_EXTRACT_DATA_LEN (64) +#define NPC_LDATA_LFLAG_LEN (16) +#define NPC_MCAM_TOT_ENTRIES (4096) +#define NPC_MAX_KEY_NIBBLES (31) +/* Nibble offsets */ +#define NPC_LAYER_KEYX_SZ (3) +#define NPC_PARSE_KEX_S_LA_OFFSET (7) +#define NPC_PARSE_KEX_S_LID_OFFSET(lid) \ + ((((lid) - NPC_LID_LA) * NPC_LAYER_KEYX_SZ) \ + + NPC_PARSE_KEX_S_LA_OFFSET) + + +/* supported flow actions flags */ +#define OTX2_FLOW_ACT_MARK (1 << 0) +#define OTX2_FLOW_ACT_FLAG (1 << 1) +#define OTX2_FLOW_ACT_DROP (1 << 2) +#define OTX2_FLOW_ACT_QUEUE (1 << 3) +#define OTX2_FLOW_ACT_RSS (1 << 4) +#define OTX2_FLOW_ACT_DUP (1 << 5) +#define OTX2_FLOW_ACT_SEC (1 << 6) +#define OTX2_FLOW_ACT_COUNT (1 << 7) +#define OTX2_FLOW_ACT_PF (1 << 8) +#define OTX2_FLOW_ACT_VF (1 << 9) + +/* terminating actions */ +#define OTX2_FLOW_ACT_TERM (OTX2_FLOW_ACT_DROP | \ + OTX2_FLOW_ACT_QUEUE | \ + OTX2_FLOW_ACT_RSS | \ + OTX2_FLOW_ACT_DUP | \ + OTX2_FLOW_ACT_SEC) + +/* This mark value indicates flag action */ +#define OTX2_FLOW_FLAG_VAL (0xffff) + +#define NIX_RX_ACT_MATCH_OFFSET (40) +#define NIX_RX_ACT_MATCH_MASK (0xFFFF) + +#define NIX_RSS_ACT_GRP_OFFSET (20) +#define NIX_RSS_ACT_ALG_OFFSET (56) +#define NIX_RSS_ACT_GRP_MASK (0xFFFFF) +#define NIX_RSS_ACT_ALG_MASK (0x1F) + +/* PMD-specific definition of the opaque struct rte_flow */ +#define OTX2_MAX_MCAM_WIDTH_DWORDS 7 + +enum npc_mcam_intf { + NPC_MCAM_RX, + NPC_MCAM_TX +}; + +struct npc_xtract_info { + /* Length in bytes of pkt data extracted. len = 0 + * indicates that extraction is disabled. + */ + uint8_t len; + uint8_t hdr_off; /* Byte offset of proto hdr: extract_src */ + uint8_t key_off; /* Byte offset in MCAM key where data is placed */ + uint8_t enable; /* Extraction enabled or disabled */ + uint8_t flags_enable; /* Flags extraction enabled */ +}; + +/* Information for a given {LAYER, LTYPE} */ +struct npc_lid_lt_xtract_info { + /* Info derived from parser configuration */ + uint16_t npc_proto; /* Network protocol identified */ + uint8_t valid_flags_mask; /* Flags applicable */ + uint8_t is_terminating:1; /* No more parsing */ + struct npc_xtract_info xtract[NPC_MAX_LD]; +}; + +union npc_kex_ldata_flags_cfg { + struct { + #if defined(__BIG_ENDIAN_BITFIELD) + uint64_t rvsd_62_1 : 61; + uint64_t lid : 3; + #else + uint64_t lid : 3; + uint64_t rvsd_62_1 : 61; + #endif + } s; + + uint64_t i; +}; + +typedef struct npc_lid_lt_xtract_info + otx2_dxcfg_t[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]; +typedef struct npc_lid_lt_xtract_info + otx2_fxcfg_t[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +typedef union npc_kex_ldata_flags_cfg otx2_ld_flags_t[NPC_MAX_LD]; + + +/* MBOX_MSG_NPC_GET_DATAX_CFG Response */ +struct npc_get_datax_cfg { + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + union npc_kex_ldata_flags_cfg ld_flags[NPC_MAX_LD]; + /* Extract information indexed with [LID][LTYPE] */ + struct npc_lid_lt_xtract_info lid_lt_xtract[NPC_MAX_LID][NPC_MAX_LT]; + /* Flags based extract indexed with [LDATA][FLAGS_LOWER_NIBBLE] + * Fields flags_ena_ld0, flags_ena_ld1 in + * struct npc_lid_lt_xtract_info indicate if this is applicable + * for a given {LAYER, LTYPE} + */ + struct npc_xtract_info flag_xtract[NPC_MAX_LD][NPC_MAX_LT]; +}; + +struct otx2_mcam_ents_info { + /* Current max & min values of mcam index */ + uint32_t max_id; + uint32_t min_id; + uint32_t free_ent; + uint32_t live_ent; +}; + +struct rte_flow { + uint8_t nix_intf; + uint32_t mcam_id; + int32_t ctr_id; + uint32_t priority; + /* Contiguous match string */ + uint64_t mcam_data[OTX2_MAX_MCAM_WIDTH_DWORDS]; + uint64_t mcam_mask[OTX2_MAX_MCAM_WIDTH_DWORDS]; + uint64_t npc_action; + TAILQ_ENTRY(rte_flow) next; +}; + +TAILQ_HEAD(otx2_flow_list, rte_flow); + +/* Accessed from ethdev private - otx2_eth_dev */ +struct otx2_npc_flow_info { + rte_atomic32_t mark_actions; + uint32_t keyx_supp_nmask[NPC_MAX_INTF];/* nibble mask */ + uint32_t keyx_len[NPC_MAX_INTF]; /* per intf key len in bits */ + uint32_t datax_len[NPC_MAX_INTF]; /* per intf data len in bits */ + uint32_t keyw[NPC_MAX_INTF]; /* max key + data len bits */ + uint32_t mcam_entries; /* mcam entries supported */ + otx2_dxcfg_t prx_dxcfg; /* intf, lid, lt, extract */ + otx2_fxcfg_t prx_fxcfg; /* Flag extract */ + otx2_ld_flags_t prx_lfcfg; /* KEX LD_Flags CFG */ + /* mcam entry info per priority level: both free & in-use */ + struct otx2_mcam_ents_info *flow_entry_info; + /* Bitmap of free preallocated entries in ascending index & + * descending priority + */ + struct rte_bitmap **free_entries; + /* Bitmap of free preallocated entries in descending index & + * ascending priority + */ + struct rte_bitmap **free_entries_rev; + /* Bitmap of live entries in ascending index & descending priority */ + struct rte_bitmap **live_entries; + /* Bitmap of live entries in descending index & ascending priority */ + struct rte_bitmap **live_entries_rev; + /* Priority bucket wise tail queue of all rte_flow resources */ + struct otx2_flow_list *flow_list; + uint32_t rss_grps; /* rss groups supported */ + struct rte_bitmap *rss_grp_entries; + uint16_t channel; /*rx channel */ + uint16_t flow_prealloc_size; + uint16_t flow_max_priority; + uint16_t switch_header_type; +}; + +struct otx2_parse_state { + struct otx2_npc_flow_info *npc; + const struct rte_flow_item *pattern; + const struct rte_flow_item *last_pattern; /* Temp usage */ + struct rte_flow_error *error; + struct rte_flow *flow; + uint8_t tunnel; + uint8_t terminate; + uint8_t layer_mask; + uint8_t lt[NPC_MAX_LID]; + uint8_t flags[NPC_MAX_LID]; + uint8_t *mcam_data; /* point to flow->mcam_data + key_len */ + uint8_t *mcam_mask; /* point to flow->mcam_mask + key_len */ +}; + +struct otx2_flow_item_info { + const void *def_mask; /* rte_flow default mask */ + void *hw_mask; /* hardware supported mask */ + int len; /* length of item */ + const void *spec; /* spec to use, NULL implies match any */ + const void *mask; /* mask to use */ + uint8_t hw_hdr_len; /* Extra data len at each layer*/ +}; + +struct otx2_idev_kex_cfg { + struct npc_get_kex_cfg_rsp kex_cfg; + rte_atomic16_t kex_refcnt; +}; + +enum npc_kpu_parser_flag { + NPC_F_NA = 0, + NPC_F_PKI, + NPC_F_PKI_VLAN, + NPC_F_PKI_ETAG, + NPC_F_PKI_ITAG, + NPC_F_PKI_MPLS, + NPC_F_PKI_NSH, + NPC_F_ETYPE_UNK, + NPC_F_ETHER_VLAN, + NPC_F_ETHER_ETAG, + NPC_F_ETHER_ITAG, + NPC_F_ETHER_MPLS, + NPC_F_ETHER_NSH, + NPC_F_STAG_CTAG, + NPC_F_STAG_CTAG_UNK, + NPC_F_STAG_STAG_CTAG, + NPC_F_STAG_STAG_STAG, + NPC_F_QINQ_CTAG, + NPC_F_QINQ_CTAG_UNK, + NPC_F_QINQ_QINQ_CTAG, + NPC_F_QINQ_QINQ_QINQ, + NPC_F_BTAG_ITAG, + NPC_F_BTAG_ITAG_STAG, + NPC_F_BTAG_ITAG_CTAG, + NPC_F_BTAG_ITAG_UNK, + NPC_F_ETAG_CTAG, + NPC_F_ETAG_BTAG_ITAG, + NPC_F_ETAG_STAG, + NPC_F_ETAG_QINQ, + NPC_F_ETAG_ITAG, + NPC_F_ETAG_ITAG_STAG, + NPC_F_ETAG_ITAG_CTAG, + NPC_F_ETAG_ITAG_UNK, + NPC_F_ITAG_STAG_CTAG, + NPC_F_ITAG_STAG, + NPC_F_ITAG_CTAG, + NPC_F_MPLS_4_LABELS, + NPC_F_MPLS_3_LABELS, + NPC_F_MPLS_2_LABELS, + NPC_F_IP_HAS_OPTIONS, + NPC_F_IP_IP_IN_IP, + NPC_F_IP_6TO4, + NPC_F_IP_MPLS_IN_IP, + NPC_F_IP_UNK_PROTO, + NPC_F_IP_IP_IN_IP_HAS_OPTIONS, + NPC_F_IP_6TO4_HAS_OPTIONS, + NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS, + NPC_F_IP_UNK_PROTO_HAS_OPTIONS, + NPC_F_IP6_HAS_EXT, + NPC_F_IP6_TUN_IP6, + NPC_F_IP6_MPLS_IN_IP, + NPC_F_TCP_HAS_OPTIONS, + NPC_F_TCP_HTTP, + NPC_F_TCP_HTTPS, + NPC_F_TCP_PPTP, + NPC_F_TCP_UNK_PORT, + NPC_F_TCP_HTTP_HAS_OPTIONS, + NPC_F_TCP_HTTPS_HAS_OPTIONS, + NPC_F_TCP_PPTP_HAS_OPTIONS, + NPC_F_TCP_UNK_PORT_HAS_OPTIONS, + NPC_F_UDP_VXLAN, + NPC_F_UDP_VXLAN_NOVNI, + NPC_F_UDP_VXLAN_NOVNI_NSH, + NPC_F_UDP_VXLANGPE, + NPC_F_UDP_VXLANGPE_NSH, + NPC_F_UDP_VXLANGPE_MPLS, + NPC_F_UDP_VXLANGPE_NOVNI, + NPC_F_UDP_VXLANGPE_NOVNI_NSH, + NPC_F_UDP_VXLANGPE_NOVNI_MPLS, + NPC_F_UDP_VXLANGPE_UNK, + NPC_F_UDP_VXLANGPE_NONP, + NPC_F_UDP_GTP_GTPC, + NPC_F_UDP_GTP_GTPU_G_PDU, + NPC_F_UDP_GTP_GTPU_UNK, + NPC_F_UDP_UNK_PORT, + NPC_F_UDP_GENEVE, + NPC_F_UDP_GENEVE_OAM, + NPC_F_UDP_GENEVE_CRI_OPT, + NPC_F_UDP_GENEVE_OAM_CRI_OPT, + NPC_F_GRE_NVGRE, + NPC_F_GRE_HAS_SRE, + NPC_F_GRE_HAS_CSUM, + NPC_F_GRE_HAS_KEY, + NPC_F_GRE_HAS_SEQ, + NPC_F_GRE_HAS_CSUM_KEY, + NPC_F_GRE_HAS_CSUM_SEQ, + NPC_F_GRE_HAS_KEY_SEQ, + NPC_F_GRE_HAS_CSUM_KEY_SEQ, + NPC_F_GRE_HAS_ROUTE, + NPC_F_GRE_UNK_PROTO, + NPC_F_GRE_VER1, + NPC_F_GRE_VER1_HAS_SEQ, + NPC_F_GRE_VER1_HAS_ACK, + NPC_F_GRE_VER1_HAS_SEQ_ACK, + NPC_F_GRE_VER1_UNK_PROTO, + NPC_F_TU_ETHER_UNK, + NPC_F_TU_ETHER_CTAG, + NPC_F_TU_ETHER_CTAG_UNK, + NPC_F_TU_ETHER_STAG_CTAG, + NPC_F_TU_ETHER_STAG_CTAG_UNK, + NPC_F_TU_ETHER_STAG, + NPC_F_TU_ETHER_STAG_UNK, + NPC_F_TU_ETHER_QINQ_CTAG, + NPC_F_TU_ETHER_QINQ_CTAG_UNK, + NPC_F_TU_ETHER_QINQ, + NPC_F_TU_ETHER_QINQ_UNK, + NPC_F_LAST /* has to be the last item */ +}; + + +int otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id); + +int otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id, + uint64_t *count); + +int otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id); + +int otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry); + +int otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox); + +int otx2_flow_update_parse_state(struct otx2_parse_state *pst, + struct otx2_flow_item_info *info, + int lid, int lt, uint8_t flags); + +int otx2_flow_parse_item_basic(const struct rte_flow_item *item, + struct otx2_flow_item_info *info, + struct rte_flow_error *error); + +void otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask); + +int otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, + struct otx2_mbox *mbox, + struct otx2_parse_state *pst, + struct otx2_npc_flow_info *flow_info); + +void otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst, + struct otx2_flow_item_info *info, + int lid, int lt); + +const struct rte_flow_item * +otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern); + +int otx2_flow_parse_lh(struct otx2_parse_state *pst); + +int otx2_flow_parse_lg(struct otx2_parse_state *pst); + +int otx2_flow_parse_lf(struct otx2_parse_state *pst); + +int otx2_flow_parse_le(struct otx2_parse_state *pst); + +int otx2_flow_parse_ld(struct otx2_parse_state *pst); + +int otx2_flow_parse_lc(struct otx2_parse_state *pst); + +int otx2_flow_parse_lb(struct otx2_parse_state *pst); + +int otx2_flow_parse_la(struct otx2_parse_state *pst); + +int otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst); + +int otx2_flow_parse_actions(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow); + +int otx2_flow_free_all_resources(struct otx2_eth_dev *hw); + +int otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid); +#endif /* __OTX2_FLOW_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c new file mode 100644 index 000000000..76bf48100 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" + +int +otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_fc_info *fc = &dev->fc_info; + struct otx2_mbox *mbox = dev->mbox; + struct nix_bp_cfg_req *req; + struct nix_bp_cfg_rsp *rsp; + int rc; + + if (otx2_dev_is_sdp(dev)) + return 0; + + if (enb) { + req = otx2_mbox_alloc_msg_nix_bp_enable(mbox); + req->chan_base = 0; + req->chan_cnt = 1; + req->bpid_per_chan = 0; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc || req->chan_cnt != rsp->chan_cnt) { + otx2_err("Insufficient BPIDs, alloc=%u < req=%u rc=%d", + rsp->chan_cnt, req->chan_cnt, rc); + return rc; + } + + fc->bpid[0] = rsp->chan_bpid[0]; + } else { + req = otx2_mbox_alloc_msg_nix_bp_disable(mbox); + req->chan_base = 0; + req->chan_cnt = 1; + + rc = otx2_mbox_process(mbox); + + memset(fc->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN); + } + + return rc; +} + +int +otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct cgx_pause_frm_cfg *req, *rsp; + struct otx2_mbox *mbox = dev->mbox; + int rc; + + if (otx2_dev_is_lbk(dev)) { + fc_conf->mode = RTE_FC_NONE; + return 0; + } + + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox); + req->set = 0; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto done; + + if (rsp->rx_pause && rsp->tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rsp->rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (rsp->tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + +done: + return rc; +} + +static int +otx2_nix_cq_bp_cfg(struct rte_eth_dev *eth_dev, bool enb) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_fc_info *fc = &dev->fc_info; + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_req *aq; + struct otx2_eth_rxq *rxq; + int i, rc; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rxq = eth_dev->data->rx_queues[i]; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) { + /* The shared memory buffer can be full. + * flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + return rc; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOMEM; + } + aq->qidx = rxq->rq; + aq->ctype = NIX_AQ_CTYPE_CQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + if (enb) { + aq->cq.bpid = fc->bpid[0]; + aq->cq_mask.bpid = ~(aq->cq_mask.bpid); + aq->cq.bp = rxq->cq_drop; + aq->cq_mask.bp = ~(aq->cq_mask.bp); + } + + aq->cq.bp_ena = !!enb; + aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena); + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + return rc; + + return 0; +} + +static int +otx2_nix_rx_fc_cfg(struct rte_eth_dev *eth_dev, bool enb) +{ + return otx2_nix_cq_bp_cfg(eth_dev, enb); +} + +int +otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_fc_info *fc = &dev->fc_info; + struct otx2_mbox *mbox = dev->mbox; + struct cgx_pause_frm_cfg *req; + uint8_t tx_pause, rx_pause; + int rc = 0; + + if (otx2_dev_is_lbk(dev)) { + otx2_info("No flow control support for LBK bound ethports"); + return -ENOTSUP; + } + + if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time || + fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) { + otx2_info("Flowctrl parameter is not supported"); + return -EINVAL; + } + + if (fc_conf->mode == fc->mode) + return 0; + + rx_pause = (fc_conf->mode == RTE_FC_FULL) || + (fc_conf->mode == RTE_FC_RX_PAUSE); + tx_pause = (fc_conf->mode == RTE_FC_FULL) || + (fc_conf->mode == RTE_FC_TX_PAUSE); + + /* Check if TX pause frame is already enabled or not */ + if (fc->tx_pause ^ tx_pause) { + if (otx2_dev_is_Ax(dev) && eth_dev->data->dev_started) { + /* on Ax, CQ should be in disabled state + * while setting flow control configuration. + */ + otx2_info("Stop the port=%d for setting flow control\n", + eth_dev->data->port_id); + return 0; + } + /* TX pause frames, enable/disable flowctrl on RX side. */ + rc = otx2_nix_rx_fc_cfg(eth_dev, tx_pause); + if (rc) + return rc; + } + + req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox); + req->set = 1; + req->rx_pause = rx_pause; + req->tx_pause = tx_pause; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + fc->tx_pause = tx_pause; + fc->rx_pause = rx_pause; + fc->mode = fc_conf->mode; + + return rc; +} + +int +otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_fc_info *fc = &dev->fc_info; + struct rte_eth_fc_conf fc_conf; + + if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev)) + return 0; + + memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + fc_conf.mode = fc->mode; + + /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ + if (otx2_dev_is_Ax(dev) && + (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) && + (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) { + fc_conf.mode = + (fc_conf.mode == RTE_FC_FULL || + fc_conf.mode == RTE_FC_TX_PAUSE) ? + RTE_FC_TX_PAUSE : RTE_FC_NONE; + } + + return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf); +} + +int +otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_fc_info *fc = &dev->fc_info; + struct rte_eth_fc_conf fc_conf; + int rc; + + if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev)) + return 0; + + memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW + * by AF driver, update those info in PMD structure. + */ + rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf); + if (rc) + goto exit; + + fc->mode = fc_conf.mode; + fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) || + (fc_conf.mode == RTE_FC_RX_PAUSE); + fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) || + (fc_conf.mode == RTE_FC_TX_PAUSE); + +exit: + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c new file mode 100644 index 000000000..2d9a5857c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c @@ -0,0 +1,1046 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" +#include "otx2_flow.h" + +const struct rte_flow_item * +otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern) +{ + while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) || + (pattern->type == RTE_FLOW_ITEM_TYPE_ANY)) + pattern++; + + return pattern; +} + +/* + * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP, + * Tunnel+SCTP + */ +int +otx2_flow_parse_lh(struct otx2_parse_state *pst) +{ + struct otx2_flow_item_info info; + char hw_mask[64]; + int lid, lt; + int rc; + + if (!pst->tunnel) + return 0; + + info.hw_mask = &hw_mask; + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + lid = NPC_LID_LH; + + switch (pst->pattern->type) { + case RTE_FLOW_ITEM_TYPE_UDP: + lt = NPC_LT_LH_TU_UDP; + info.def_mask = &rte_flow_item_udp_mask; + info.len = sizeof(struct rte_flow_item_udp); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + lt = NPC_LT_LH_TU_TCP; + info.def_mask = &rte_flow_item_tcp_mask; + info.len = sizeof(struct rte_flow_item_tcp); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + lt = NPC_LT_LH_TU_SCTP; + info.def_mask = &rte_flow_item_sctp_mask; + info.len = sizeof(struct rte_flow_item_sctp); + break; + case RTE_FLOW_ITEM_TYPE_ESP: + lt = NPC_LT_LH_TU_ESP; + info.def_mask = &rte_flow_item_esp_mask; + info.len = sizeof(struct rte_flow_item_esp); + break; + default: + return 0; + } + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, 0); +} + +/* Tunnel+IPv4, Tunnel+IPv6 */ +int +otx2_flow_parse_lg(struct otx2_parse_state *pst) +{ + struct otx2_flow_item_info info; + char hw_mask[64]; + int lid, lt; + int rc; + + if (!pst->tunnel) + return 0; + + info.hw_mask = &hw_mask; + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + lid = NPC_LID_LG; + + if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) { + lt = NPC_LT_LG_TU_IP; + info.def_mask = &rte_flow_item_ipv4_mask; + info.len = sizeof(struct rte_flow_item_ipv4); + } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) { + lt = NPC_LT_LG_TU_IP6; + info.def_mask = &rte_flow_item_ipv6_mask; + info.len = sizeof(struct rte_flow_item_ipv6); + } else { + /* There is no tunneled IP header */ + return 0; + } + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, 0); +} + +/* Tunnel+Ether */ +int +otx2_flow_parse_lf(struct otx2_parse_state *pst) +{ + const struct rte_flow_item *pattern, *last_pattern; + struct rte_flow_item_eth hw_mask; + struct otx2_flow_item_info info; + int lid, lt, lflags; + int nr_vlans = 0; + int rc; + + /* We hit this layer if there is a tunneling protocol */ + if (!pst->tunnel) + return 0; + + if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH) + return 0; + + lid = NPC_LID_LF; + lt = NPC_LT_LF_TU_ETHER; + lflags = 0; + + info.def_mask = &rte_flow_item_vlan_mask; + /* No match support for vlan tags */ + info.hw_mask = NULL; + info.len = sizeof(struct rte_flow_item_vlan); + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + + /* Look ahead and find out any VLAN tags. These can be + * detected but no data matching is available. + */ + last_pattern = pst->pattern; + pattern = pst->pattern + 1; + pattern = otx2_flow_skip_void_and_any_items(pattern); + while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) { + nr_vlans++; + rc = otx2_flow_parse_item_basic(pattern, &info, pst->error); + if (rc != 0) + return rc; + last_pattern = pattern; + pattern++; + pattern = otx2_flow_skip_void_and_any_items(pattern); + } + otx2_npc_dbg("Nr_vlans = %d", nr_vlans); + switch (nr_vlans) { + case 0: + break; + case 1: + lflags = NPC_F_TU_ETHER_CTAG; + break; + case 2: + lflags = NPC_F_TU_ETHER_STAG_CTAG; + break; + default: + rte_flow_error_set(pst->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + last_pattern, + "more than 2 vlans with tunneled Ethernet " + "not supported"); + return -rte_errno; + } + + info.def_mask = &rte_flow_item_eth_mask; + info.hw_mask = &hw_mask; + info.len = sizeof(struct rte_flow_item_eth); + info.hw_hdr_len = 0; + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + info.spec = NULL; + info.mask = NULL; + + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + pst->pattern = last_pattern; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags); +} + +int +otx2_flow_parse_le(struct otx2_parse_state *pst) +{ + /* + * We are positioned at UDP. Scan ahead and look for + * UDP encapsulated tunnel protocols. If available, + * parse them. In that case handle this: + * - RTE spec assumes we point to tunnel header. + * - NPC parser provides offset from UDP header. + */ + + /* + * Note: Add support to GENEVE, VXLAN_GPE when we + * upgrade DPDK + * + * Note: Better to split flags into two nibbles: + * - Higher nibble can have flags + * - Lower nibble to further enumerate protocols + * and have flags based extraction + */ + const struct rte_flow_item *pattern = pst->pattern; + struct otx2_flow_item_info info; + int lid, lt, lflags; + char hw_mask[64]; + int rc; + + if (pst->tunnel) + return 0; + + if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) + return otx2_flow_parse_mpls(pst, NPC_LID_LE); + + info.spec = NULL; + info.mask = NULL; + info.hw_mask = NULL; + info.def_mask = NULL; + info.len = 0; + info.hw_hdr_len = 0; + lid = NPC_LID_LE; + lflags = 0; + + /* Ensure we are not matching anything in UDP */ + rc = otx2_flow_parse_item_basic(pattern, &info, pst->error); + if (rc) + return rc; + + info.hw_mask = &hw_mask; + pattern = otx2_flow_skip_void_and_any_items(pattern); + otx2_npc_dbg("Pattern->type = %d", pattern->type); + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + lflags = NPC_F_UDP_VXLAN; + info.def_mask = &rte_flow_item_vxlan_mask; + info.len = sizeof(struct rte_flow_item_vxlan); + lt = NPC_LT_LE_VXLAN; + break; + case RTE_FLOW_ITEM_TYPE_GTPC: + lflags = NPC_F_UDP_GTP_GTPC; + info.def_mask = &rte_flow_item_gtp_mask; + info.len = sizeof(struct rte_flow_item_gtp); + lt = NPC_LT_LE_GTPC; + break; + case RTE_FLOW_ITEM_TYPE_GTPU: + lflags = NPC_F_UDP_GTP_GTPU_G_PDU; + info.def_mask = &rte_flow_item_gtp_mask; + info.len = sizeof(struct rte_flow_item_gtp); + lt = NPC_LT_LE_GTPU; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + lflags = NPC_F_UDP_GENEVE; + info.def_mask = &rte_flow_item_geneve_mask; + info.len = sizeof(struct rte_flow_item_geneve); + lt = NPC_LT_LE_GENEVE; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + lflags = NPC_F_UDP_VXLANGPE; + info.def_mask = &rte_flow_item_vxlan_gpe_mask; + info.len = sizeof(struct rte_flow_item_vxlan_gpe); + lt = NPC_LT_LE_VXLANGPE; + break; + default: + return 0; + } + + pst->tunnel = 1; + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pattern, &info, pst->error); + if (rc != 0) + return rc; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags); +} + +static int +flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag) +{ + int nr_labels = 0; + const struct rte_flow_item *pattern = pst->pattern; + struct otx2_flow_item_info info; + int rc; + uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS, + NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS}; + + /* + * pst->pattern points to first MPLS label. We only check + * that subsequent labels do not have anything to match. + */ + info.def_mask = &rte_flow_item_mpls_mask; + info.hw_mask = NULL; + info.len = sizeof(struct rte_flow_item_mpls); + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + + while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) { + nr_labels++; + + /* Basic validation of 2nd/3rd/4th mpls item */ + if (nr_labels > 1) { + rc = otx2_flow_parse_item_basic(pattern, &info, + pst->error); + if (rc != 0) + return rc; + } + pst->last_pattern = pattern; + pattern++; + pattern = otx2_flow_skip_void_and_any_items(pattern); + } + + if (nr_labels > 4) { + rte_flow_error_set(pst->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pst->last_pattern, + "more than 4 mpls labels not supported"); + return -rte_errno; + } + + *flag = flag_list[nr_labels - 1]; + return 0; +} + +int +otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid) +{ + /* Find number of MPLS labels */ + struct rte_flow_item_mpls hw_mask; + struct otx2_flow_item_info info; + int lt, lflags; + int rc; + + lflags = 0; + + if (lid == NPC_LID_LC) + lt = NPC_LT_LC_MPLS; + else if (lid == NPC_LID_LD) + lt = NPC_LT_LD_TU_MPLS_IN_IP; + else + lt = NPC_LT_LE_TU_MPLS_IN_UDP; + + /* Prepare for parsing the first item */ + info.def_mask = &rte_flow_item_mpls_mask; + info.hw_mask = &hw_mask; + info.len = sizeof(struct rte_flow_item_mpls); + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + /* + * Parse for more labels. + * This sets lflags and pst->last_pattern correctly. + */ + rc = flow_parse_mpls_label_stack(pst, &lflags); + if (rc != 0) + return rc; + + pst->tunnel = 1; + pst->pattern = pst->last_pattern; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags); +} + +/* + * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE, + * GTP, GTPC, GTPU, ESP + * + * Note: UDP tunnel protocols are identified by flags. + * LPTR for these protocol still points to UDP + * header. Need flag based extraction to support + * this. + */ +int +otx2_flow_parse_ld(struct otx2_parse_state *pst) +{ + char hw_mask[NPC_MAX_EXTRACT_DATA_LEN]; + uint32_t gre_key_mask = 0xffffffff; + struct otx2_flow_item_info info; + int lid, lt, lflags; + int rc; + + if (pst->tunnel) { + /* We have already parsed MPLS or IPv4/v6 followed + * by MPLS or IPv4/v6. Subsequent TCP/UDP etc + * would be parsed as tunneled versions. Skip + * this layer, except for tunneled MPLS. If LC is + * MPLS, we have anyway skipped all stacked MPLS + * labels. + */ + if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) + return otx2_flow_parse_mpls(pst, NPC_LID_LD); + return 0; + } + info.hw_mask = &hw_mask; + info.spec = NULL; + info.mask = NULL; + info.def_mask = NULL; + info.len = 0; + info.hw_hdr_len = 0; + + lid = NPC_LID_LD; + lflags = 0; + + otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type); + switch (pst->pattern->type) { + case RTE_FLOW_ITEM_TYPE_ICMP: + if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6) + lt = NPC_LT_LD_ICMP6; + else + lt = NPC_LT_LD_ICMP; + info.def_mask = &rte_flow_item_icmp_mask; + info.len = sizeof(struct rte_flow_item_icmp); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + lt = NPC_LT_LD_UDP; + info.def_mask = &rte_flow_item_udp_mask; + info.len = sizeof(struct rte_flow_item_udp); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + lt = NPC_LT_LD_TCP; + info.def_mask = &rte_flow_item_tcp_mask; + info.len = sizeof(struct rte_flow_item_tcp); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + lt = NPC_LT_LD_SCTP; + info.def_mask = &rte_flow_item_sctp_mask; + info.len = sizeof(struct rte_flow_item_sctp); + break; + case RTE_FLOW_ITEM_TYPE_ESP: + lt = NPC_LT_LD_ESP; + info.def_mask = &rte_flow_item_esp_mask; + info.len = sizeof(struct rte_flow_item_esp); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + lt = NPC_LT_LD_GRE; + info.def_mask = &rte_flow_item_gre_mask; + info.len = sizeof(struct rte_flow_item_gre); + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + lt = NPC_LT_LD_GRE; + info.def_mask = &gre_key_mask; + info.len = sizeof(gre_key_mask); + info.hw_hdr_len = 4; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + lt = NPC_LT_LD_NVGRE; + lflags = NPC_F_GRE_NVGRE; + info.def_mask = &rte_flow_item_nvgre_mask; + info.len = sizeof(struct rte_flow_item_nvgre); + /* Further IP/Ethernet are parsed as tunneled */ + pst->tunnel = 1; + break; + default: + return 0; + } + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags); +} + +static inline void +flow_check_lc_ip_tunnel(struct otx2_parse_state *pst) +{ + const struct rte_flow_item *pattern = pst->pattern + 1; + + pattern = otx2_flow_skip_void_and_any_items(pattern); + if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS || + pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 || + pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) + pst->tunnel = 1; +} + +/* Outer IPv4, Outer IPv6, MPLS, ARP */ +int +otx2_flow_parse_lc(struct otx2_parse_state *pst) +{ + uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN]; + struct otx2_flow_item_info info; + int lid, lt; + int rc; + + if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) + return otx2_flow_parse_mpls(pst, NPC_LID_LC); + + info.hw_mask = &hw_mask; + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; + lid = NPC_LID_LC; + + switch (pst->pattern->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + lt = NPC_LT_LC_IP; + info.def_mask = &rte_flow_item_ipv4_mask; + info.len = sizeof(struct rte_flow_item_ipv4); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + lid = NPC_LID_LC; + lt = NPC_LT_LC_IP6; + info.def_mask = &rte_flow_item_ipv6_mask; + info.len = sizeof(struct rte_flow_item_ipv6); + break; + case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4: + lt = NPC_LT_LC_ARP; + info.def_mask = &rte_flow_item_arp_eth_ipv4_mask; + info.len = sizeof(struct rte_flow_item_arp_eth_ipv4); + break; + case RTE_FLOW_ITEM_TYPE_IPV6_EXT: + lid = NPC_LID_LC; + lt = NPC_LT_LC_IP6_EXT; + info.def_mask = &rte_flow_item_ipv6_ext_mask; + info.len = sizeof(struct rte_flow_item_ipv6_ext); + info.hw_hdr_len = 40; + break; + default: + /* No match at this layer */ + return 0; + } + + /* Identify if IP tunnels MPLS or IPv4/v6 */ + flow_check_lc_ip_tunnel(pst); + + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + return otx2_flow_update_parse_state(pst, &info, lid, lt, 0); +} + +/* VLAN, ETAG */ +int +otx2_flow_parse_lb(struct otx2_parse_state *pst) +{ + const struct rte_flow_item *pattern = pst->pattern; + const struct rte_flow_item *last_pattern; + char hw_mask[NPC_MAX_EXTRACT_DATA_LEN]; + struct otx2_flow_item_info info; + int lid, lt, lflags; + int nr_vlans = 0; + int rc; + + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = NPC_TPID_LENGTH; + + lid = NPC_LID_LB; + lflags = 0; + last_pattern = pattern; + + if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) { + /* RTE vlan is either 802.1q or 802.1ad, + * this maps to either CTAG/STAG. We need to decide + * based on number of VLANS present. Matching is + * supported on first tag only. + */ + info.def_mask = &rte_flow_item_vlan_mask; + info.hw_mask = NULL; + info.len = sizeof(struct rte_flow_item_vlan); + + pattern = pst->pattern; + while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) { + nr_vlans++; + + /* Basic validation of 2nd/3rd vlan item */ + if (nr_vlans > 1) { + otx2_npc_dbg("Vlans = %d", nr_vlans); + rc = otx2_flow_parse_item_basic(pattern, &info, + pst->error); + if (rc != 0) + return rc; + } + last_pattern = pattern; + pattern++; + pattern = otx2_flow_skip_void_and_any_items(pattern); + } + + switch (nr_vlans) { + case 1: + lt = NPC_LT_LB_CTAG; + break; + case 2: + lt = NPC_LT_LB_STAG_QINQ; + lflags = NPC_F_STAG_CTAG; + break; + case 3: + lt = NPC_LT_LB_STAG_QINQ; + lflags = NPC_F_STAG_STAG_CTAG; + break; + default: + rte_flow_error_set(pst->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + last_pattern, + "more than 3 vlans not supported"); + return -rte_errno; + } + } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) { + /* we can support ETAG and match a subsequent CTAG + * without any matching support. + */ + lt = NPC_LT_LB_ETAG; + lflags = 0; + + last_pattern = pst->pattern; + pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1); + if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) { + info.def_mask = &rte_flow_item_vlan_mask; + /* set supported mask to NULL for vlan tag */ + info.hw_mask = NULL; + info.len = sizeof(struct rte_flow_item_vlan); + rc = otx2_flow_parse_item_basic(pattern, &info, + pst->error); + if (rc != 0) + return rc; + + lflags = NPC_F_ETAG_CTAG; + last_pattern = pattern; + } + + info.def_mask = &rte_flow_item_e_tag_mask; + info.len = sizeof(struct rte_flow_item_e_tag); + } else { + return 0; + } + + info.hw_mask = &hw_mask; + info.spec = NULL; + info.mask = NULL; + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc != 0) + return rc; + + /* Point pattern to last item consumed */ + pst->pattern = last_pattern; + return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags); +} + +int +otx2_flow_parse_la(struct otx2_parse_state *pst) +{ + struct rte_flow_item_eth hw_mask; + struct otx2_flow_item_info info; + int lid, lt; + int rc; + + /* Identify the pattern type into lid, lt */ + if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH) + return 0; + + lid = NPC_LID_LA; + lt = NPC_LT_LA_ETHER; + info.hw_hdr_len = 0; + + if (pst->flow->nix_intf == NIX_INTF_TX) { + lt = NPC_LT_LA_IH_NIX_ETHER; + info.hw_hdr_len = NPC_IH_LENGTH; + if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER; + info.hw_hdr_len += NPC_HIGIG2_LENGTH; + } + } else { + if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + lt = NPC_LT_LA_HIGIG2_ETHER; + info.hw_hdr_len = NPC_HIGIG2_LENGTH; + } + } + + /* Prepare for parsing the item */ + info.def_mask = &rte_flow_item_eth_mask; + info.hw_mask = &hw_mask; + info.len = sizeof(struct rte_flow_item_eth); + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + info.spec = NULL; + info.mask = NULL; + + /* Basic validation of item parameters */ + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc) + return rc; + + /* Update pst if not validate only? clash check? */ + return otx2_flow_update_parse_state(pst, &info, lid, lt, 0); +} + +int +otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst) +{ + struct rte_flow_item_higig2_hdr hw_mask; + struct otx2_flow_item_info info; + int lid, lt; + int rc; + + /* Identify the pattern type into lid, lt */ + if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2) + return 0; + + lid = NPC_LID_LA; + lt = NPC_LT_LA_HIGIG2_ETHER; + info.hw_hdr_len = 0; + + if (pst->flow->nix_intf == NIX_INTF_TX) { + lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER; + info.hw_hdr_len = NPC_IH_LENGTH; + } + + /* Prepare for parsing the item */ + info.def_mask = &rte_flow_item_higig2_hdr_mask; + info.hw_mask = &hw_mask; + info.len = sizeof(struct rte_flow_item_higig2_hdr); + otx2_flow_get_hw_supp_mask(pst, &info, lid, lt); + info.spec = NULL; + info.mask = NULL; + + /* Basic validation of item parameters */ + rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error); + if (rc) + return rc; + + /* Update pst if not validate only? clash check? */ + return otx2_flow_update_parse_state(pst, &info, lid, lt, 0); +} + +static int +parse_rss_action(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action *act, + struct rte_flow_error *error) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + struct otx2_rss_info *rss_info = &hw->rss_info; + const struct rte_flow_action_rss *rss; + uint32_t i; + + rss = (const struct rte_flow_action_rss *)act->conf; + + /* Not supported */ + if (attr->egress) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "No support of RSS in egress"); + } + + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "multi-queue mode is disabled"); + + /* Parse RSS related parameters from configuration */ + if (!rss || !rss->queue_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "no valid queues"); + + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions" + " are not supported"); + + if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key too large"); + + if (rss->queue_num > rss_info->rss_size) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + + for (i = 0; i < rss->queue_num; i++) { + if (rss->queue[i] >= dev->data->nb_rx_queues) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number" + " of queues"); + } + + return 0; +} + +int +otx2_flow_parse_actions(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + struct otx2_eth_dev *hw = dev->data->dev_private; + struct otx2_npc_flow_info *npc = &hw->npc_flow; + const struct rte_flow_action_count *act_count; + const struct rte_flow_action_mark *act_mark; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *vf_act; + const char *errmsg = NULL; + int sel_act, req_act = 0; + uint16_t pf_func, vf_id; + int errcode = 0; + int mark = 0; + int rq = 0; + + /* Initialize actions */ + flow->ctr_id = NPC_COUNTER_NONE; + pf_func = otx2_pfvf_func(hw->pf, hw->vf); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + otx2_npc_dbg("Action type = %d", actions->type); + + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_MARK: + act_mark = + (const struct rte_flow_action_mark *)actions->conf; + + /* We have only 16 bits. Use highest val for flag */ + if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) { + errmsg = "mark value must be < 0xfffe"; + errcode = ENOTSUP; + goto err_exit; + } + mark = act_mark->id + 1; + req_act |= OTX2_FLOW_ACT_MARK; + rte_atomic32_inc(&npc->mark_actions); + break; + + case RTE_FLOW_ACTION_TYPE_FLAG: + mark = OTX2_FLOW_FLAG_VAL; + req_act |= OTX2_FLOW_ACT_FLAG; + rte_atomic32_inc(&npc->mark_actions); + break; + + case RTE_FLOW_ACTION_TYPE_COUNT: + act_count = + (const struct rte_flow_action_count *) + actions->conf; + + if (act_count->shared == 1) { + errmsg = "Shared Counters not supported"; + errcode = ENOTSUP; + goto err_exit; + } + /* Indicates, need a counter */ + flow->ctr_id = 1; + req_act |= OTX2_FLOW_ACT_COUNT; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + req_act |= OTX2_FLOW_ACT_DROP; + break; + + case RTE_FLOW_ACTION_TYPE_PF: + req_act |= OTX2_FLOW_ACT_PF; + pf_func &= (0xfc00); + break; + + case RTE_FLOW_ACTION_TYPE_VF: + vf_act = (const struct rte_flow_action_vf *) + actions->conf; + req_act |= OTX2_FLOW_ACT_VF; + if (vf_act->original == 0) { + vf_id = vf_act->id & RVU_PFVF_FUNC_MASK; + if (vf_id >= hw->maxvf) { + errmsg = "invalid vf specified"; + errcode = EINVAL; + goto err_exit; + } + pf_func &= (0xfc00); + pf_func = (pf_func | (vf_id + 1)); + } + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Applicable only to ingress flow */ + act_q = (const struct rte_flow_action_queue *) + actions->conf; + rq = act_q->index; + if (rq >= dev->data->nb_rx_queues) { + errmsg = "invalid queue index"; + errcode = EINVAL; + goto err_exit; + } + req_act |= OTX2_FLOW_ACT_QUEUE; + break; + + case RTE_FLOW_ACTION_TYPE_RSS: + errcode = parse_rss_action(dev, attr, actions, error); + if (errcode) + return -rte_errno; + + req_act |= OTX2_FLOW_ACT_RSS; + break; + + case RTE_FLOW_ACTION_TYPE_SECURITY: + /* Assumes user has already configured security + * session for this flow. Associated conf is + * opaque. When RTE security is implemented for otx2, + * we need to verify that for specified security + * session: + * action_type == + * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL && + * session_protocol == + * RTE_SECURITY_PROTOCOL_IPSEC + * + * RSS is not supported with inline ipsec. Get the + * rq from associated conf, or make + * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this + * action. + * Currently, rq = 0 is assumed. + */ + req_act |= OTX2_FLOW_ACT_SEC; + rq = 0; + break; + default: + errmsg = "Unsupported action specified"; + errcode = ENOTSUP; + goto err_exit; + } + } + + /* Check if actions specified are compatible */ + if (attr->egress) { + /* Only DROP/COUNT is supported */ + if (!(req_act & OTX2_FLOW_ACT_DROP)) { + errmsg = "DROP is required action for egress"; + errcode = EINVAL; + goto err_exit; + } else if (req_act & ~(OTX2_FLOW_ACT_DROP | + OTX2_FLOW_ACT_COUNT)) { + errmsg = "Unsupported action specified"; + errcode = ENOTSUP; + goto err_exit; + } + flow->npc_action = NIX_TX_ACTIONOP_DROP; + goto set_pf_func; + } + + /* We have already verified the attr, this is ingress. + * - Exactly one terminating action is supported + * - Exactly one of MARK or FLAG is supported + * - If terminating action is DROP, only count is valid. + */ + sel_act = req_act & OTX2_FLOW_ACT_TERM; + if ((sel_act & (sel_act - 1)) != 0) { + errmsg = "Only one terminating action supported"; + errcode = EINVAL; + goto err_exit; + } + + if (req_act & OTX2_FLOW_ACT_DROP) { + sel_act = req_act & ~OTX2_FLOW_ACT_COUNT; + if ((sel_act & (sel_act - 1)) != 0) { + errmsg = "Only COUNT action is supported " + "with DROP ingress action"; + errcode = ENOTSUP; + goto err_exit; + } + } + + if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) + == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) { + errmsg = "Only one of FLAG or MARK action is supported"; + errcode = ENOTSUP; + goto err_exit; + } + + /* Set NIX_RX_ACTIONOP */ + if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + if (req_act & OTX2_FLOW_ACT_QUEUE) + flow->npc_action |= (uint64_t)rq << 20; + } else if (req_act & OTX2_FLOW_ACT_DROP) { + flow->npc_action = NIX_RX_ACTIONOP_DROP; + } else if (req_act & OTX2_FLOW_ACT_QUEUE) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + flow->npc_action |= (uint64_t)rq << 20; + } else if (req_act & OTX2_FLOW_ACT_RSS) { + /* When user added a rule for rss, first we will add the + *rule in MCAM and then update the action, once if we have + *FLOW_KEY_ALG index. So, till we update the action with + *flow_key_alg index, set the action to drop. + */ + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + flow->npc_action = NIX_RX_ACTIONOP_DROP; + else + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + } else if (req_act & OTX2_FLOW_ACT_SEC) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC; + flow->npc_action |= (uint64_t)rq << 20; + } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + } else if (req_act & OTX2_FLOW_ACT_COUNT) { + /* Keep OTX2_FLOW_ACT_COUNT always at the end + * This is default action, when user specify only + * COUNT ACTION + */ + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + } else { + /* Should never reach here */ + errmsg = "Invalid action specified"; + errcode = EINVAL; + goto err_exit; + } + + if (mark) + flow->npc_action |= (uint64_t)mark << 40; + + if (rte_atomic32_read(&npc->mark_actions) == 1) { + hw->rx_offload_flags |= + NIX_RX_OFFLOAD_MARK_UPDATE_F; + otx2_eth_set_rx_function(dev); + } + +set_pf_func: + /* Ideally AF must ensure that correct pf_func is set */ + flow->npc_action |= (uint64_t)pf_func << 4; + + return 0; + +err_exit: + rte_flow_error_set(error, errcode, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + errmsg); + return -rte_errno; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c new file mode 100644 index 000000000..14625c9ad --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c @@ -0,0 +1,959 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" +#include "otx2_flow.h" + +static int +flow_mcam_alloc_counter(struct otx2_mbox *mbox, uint16_t *ctr) +{ + struct npc_mcam_alloc_counter_req *req; + struct npc_mcam_alloc_counter_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_counter(mbox); + req->count = 1; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp); + + *ctr = rsp->cntr_list[0]; + return rc; +} + +int +otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id) +{ + struct npc_mcam_oper_counter_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox); + req->cntr = ctr_id; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, NULL); + + return rc; +} + +int +otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id, + uint64_t *count) +{ + struct npc_mcam_oper_counter_req *req; + struct npc_mcam_oper_counter_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox); + req->cntr = ctr_id; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp); + + *count = rsp->stat; + return rc; +} + +int +otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id) +{ + struct npc_mcam_oper_counter_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox); + req->cntr = ctr_id; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, NULL); + + return rc; +} + +int +otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry) +{ + struct npc_mcam_free_entry_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + req->entry = entry; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, NULL); + + return rc; +} + +int +otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox) +{ + struct npc_mcam_free_entry_req *req; + int rc; + + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + req->all = 1; + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, NULL); + + return rc; +} + +static void +flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len) +{ + int idx; + + for (idx = 0; idx < len; idx++) + ptr[idx] = data[len - 1 - idx]; +} + +static int +flow_check_copysz(size_t size, size_t len) +{ + if (len <= size) + return len; + return -1; +} + +static inline int +flow_mem_is_zero(const void *mem, int len) +{ + const char *m = mem; + int i; + + for (i = 0; i < len; i++) { + if (m[i] != 0) + return 0; + } + return 1; +} + +static void +flow_set_hw_mask(struct otx2_flow_item_info *info, + struct npc_xtract_info *xinfo, + char *hw_mask) +{ + int max_off, offset; + int j; + + if (xinfo->enable == 0) + return; + + if (xinfo->hdr_off < info->hw_hdr_len) + return; + + max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len; + + if (max_off > info->len) + max_off = info->len; + + offset = xinfo->hdr_off - info->hw_hdr_len; + for (j = offset; j < max_off; j++) + hw_mask[j] = 0xff; +} + +void +otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst, + struct otx2_flow_item_info *info, int lid, int lt) +{ + struct npc_xtract_info *xinfo, *lfinfo; + char *hw_mask = info->hw_mask; + int lf_cfg; + int i, j; + int intf; + + intf = pst->flow->nix_intf; + xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract; + memset(hw_mask, 0, info->len); + + for (i = 0; i < NPC_MAX_LD; i++) { + flow_set_hw_mask(info, &xinfo[i], hw_mask); + } + + for (i = 0; i < NPC_MAX_LD; i++) { + + if (xinfo[i].flags_enable == 0) + continue; + + lf_cfg = pst->npc->prx_lfcfg[i].i; + if (lf_cfg == lid) { + for (j = 0; j < NPC_MAX_LFL; j++) { + lfinfo = pst->npc->prx_fxcfg[intf] + [i][j].xtract; + flow_set_hw_mask(info, &lfinfo[0], hw_mask); + } + } + } +} + +static int +flow_update_extraction_data(struct otx2_parse_state *pst, + struct otx2_flow_item_info *info, + struct npc_xtract_info *xinfo) +{ + uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN]; + uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN]; + struct npc_xtract_info *x; + int k, idx, hdr_off; + int len = 0; + + x = xinfo; + len = x->len; + hdr_off = x->hdr_off; + + if (hdr_off < info->hw_hdr_len) + return 0; + + if (x->enable == 0) + return 0; + + otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d," + "x->key_off = %d", x->hdr_off, len, info->len, + x->key_off); + + hdr_off -= info->hw_hdr_len; + + if (hdr_off + len > info->len) + len = info->len - hdr_off; + + /* Check for over-write of previous layer */ + if (!flow_mem_is_zero(pst->mcam_mask + x->key_off, + len)) { + /* Cannot support this data match */ + rte_flow_error_set(pst->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pst->pattern, + "Extraction unsupported"); + return -rte_errno; + } + + len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8) + - x->key_off, + len); + if (len < 0) { + rte_flow_error_set(pst->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + pst->pattern, + "Internal Error"); + return -rte_errno; + } + + /* Need to reverse complete structure so that dest addr is at + * MSB so as to program the MCAM using mcam_data & mcam_mask + * arrays + */ + flow_prep_mcam_ldata(int_info, + (const uint8_t *)info->spec + hdr_off, + x->len); + flow_prep_mcam_ldata(int_info_mask, + (const uint8_t *)info->mask + hdr_off, + x->len); + + otx2_npc_dbg("Spec: "); + for (k = 0; k < info->len; k++) + otx2_npc_dbg("0x%.2x ", + ((const uint8_t *)info->spec)[k]); + + otx2_npc_dbg("Int_info: "); + for (k = 0; k < info->len; k++) + otx2_npc_dbg("0x%.2x ", int_info[k]); + + memcpy(pst->mcam_mask + x->key_off, int_info_mask, len); + memcpy(pst->mcam_data + x->key_off, int_info, len); + + otx2_npc_dbg("Parse state mcam data & mask"); + for (idx = 0; idx < len ; idx++) + otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx, + *(pst->mcam_data + idx + x->key_off), idx, + *(pst->mcam_mask + idx + x->key_off)); + return 0; +} + +int +otx2_flow_update_parse_state(struct otx2_parse_state *pst, + struct otx2_flow_item_info *info, int lid, int lt, + uint8_t flags) +{ + struct npc_lid_lt_xtract_info *xinfo; + struct npc_xtract_info *lfinfo; + int intf, lf_cfg; + int i, j, rc = 0; + + otx2_npc_dbg("Parse state function info mask total %s", + (const uint8_t *)info->mask); + + pst->layer_mask |= lid; + pst->lt[lid] = lt; + pst->flags[lid] = flags; + + intf = pst->flow->nix_intf; + xinfo = &pst->npc->prx_dxcfg[intf][lid][lt]; + otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating); + if (xinfo->is_terminating) + pst->terminate = 1; + + if (info->spec == NULL) { + otx2_npc_dbg("Info spec NULL"); + goto done; + } + + for (i = 0; i < NPC_MAX_LD; i++) { + rc = flow_update_extraction_data(pst, info, &xinfo->xtract[i]); + if (rc != 0) + return rc; + } + + for (i = 0; i < NPC_MAX_LD; i++) { + if (xinfo->xtract[i].flags_enable == 0) + continue; + + lf_cfg = pst->npc->prx_lfcfg[i].i; + if (lf_cfg == lid) { + for (j = 0; j < NPC_MAX_LFL; j++) { + lfinfo = pst->npc->prx_fxcfg[intf] + [i][j].xtract; + rc = flow_update_extraction_data(pst, info, + &lfinfo[0]); + if (rc != 0) + return rc; + + if (lfinfo[0].enable) + pst->flags[lid] = j; + } + } + } + +done: + /* Next pattern to parse by subsequent layers */ + pst->pattern++; + return 0; +} + +static inline int +flow_range_is_valid(const char *spec, const char *last, const char *mask, + int len) +{ + /* Mask must be zero or equal to spec as we do not support + * non-contiguous ranges. + */ + while (len--) { + if (last[len] && + (spec[len] & mask[len]) != (last[len] & mask[len])) + return 0; /* False */ + } + return 1; +} + + +static inline int +flow_mask_is_supported(const char *mask, const char *hw_mask, int len) +{ + /* + * If no hw_mask, assume nothing is supported. + * mask is never NULL + */ + if (hw_mask == NULL) + return flow_mem_is_zero(mask, len); + + while (len--) { + if ((mask[len] | hw_mask[len]) != hw_mask[len]) + return 0; /* False */ + } + return 1; +} + +int +otx2_flow_parse_item_basic(const struct rte_flow_item *item, + struct otx2_flow_item_info *info, + struct rte_flow_error *error) +{ + /* Item must not be NULL */ + if (item == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Item is NULL"); + return -rte_errno; + } + /* If spec is NULL, both mask and last must be NULL, this + * makes it to match ANY value (eq to mask = 0). + * Setting either mask or last without spec is an error + */ + if (item->spec == NULL) { + if (item->last == NULL && item->mask == NULL) { + info->spec = NULL; + return 0; + } + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "mask or last set without spec"); + return -rte_errno; + } + + /* We have valid spec */ + info->spec = item->spec; + + /* If mask is not set, use default mask, err if default mask is + * also NULL. + */ + if (item->mask == NULL) { + otx2_npc_dbg("Item mask null, using default mask"); + if (info->def_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "No mask or default mask given"); + return -rte_errno; + } + info->mask = info->def_mask; + } else { + info->mask = item->mask; + } + + /* mask specified must be subset of hw supported mask + * mask | hw_mask == hw_mask + */ + if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Unsupported field in the mask"); + return -rte_errno; + } + + /* Now we have spec and mask. OTX2 does not support non-contiguous + * range. We should have either: + * - spec & mask == last & mask or, + * - last == 0 or, + * - last == NULL + */ + if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) { + if (!flow_range_is_valid(item->spec, item->last, info->mask, + info->len)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported range for match"); + return -rte_errno; + } + } + + return 0; +} + +void +otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask) +{ + uint64_t cdata[2] = {0ULL, 0ULL}, nibble; + int i, j = 0; + + for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) { + if (nibble_mask & (1 << i)) { + nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf; + cdata[j / 16] |= (nibble << ((j & 0xf) * 4)); + j += 1; + } + } + + data[0] = cdata[0]; + data[1] = cdata[1]; +} + +static int +flow_first_set_bit(uint64_t slab) +{ + int num = 0; + + if ((slab & 0xffffffff) == 0) { + num += 32; + slab >>= 32; + } + if ((slab & 0xffff) == 0) { + num += 16; + slab >>= 16; + } + if ((slab & 0xff) == 0) { + num += 8; + slab >>= 8; + } + if ((slab & 0xf) == 0) { + num += 4; + slab >>= 4; + } + if ((slab & 0x3) == 0) { + num += 2; + slab >>= 2; + } + if ((slab & 0x1) == 0) + num += 1; + + return num; +} + +static int +flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow, + struct otx2_npc_flow_info *flow_info, + uint32_t old_ent, uint32_t new_ent) +{ + struct npc_mcam_shift_entry_req *req; + struct npc_mcam_shift_entry_rsp *rsp; + struct otx2_flow_list *list; + struct rte_flow *flow_iter; + int rc = 0; + + otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent, + flow->priority); + + list = &flow_info->flow_list[flow->priority]; + + /* Old entry is disabled & it's contents are moved to new_entry, + * new entry is enabled finally. + */ + req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox); + req->curr_entry[0] = old_ent; + req->new_entry[0] = new_ent; + req->shift_count = 1; + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp); + if (rc) + return rc; + + /* Remove old node from list */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id == old_ent) + TAILQ_REMOVE(list, flow_iter, next); + } + + /* Insert node with new mcam id at right place */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id > new_ent) + TAILQ_INSERT_BEFORE(flow_iter, flow, next); + } + return rc; +} + +/* Exchange all required entries with a given priority level */ +static int +flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow, + struct otx2_npc_flow_info *flow_info, + struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl) +{ + struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp; + uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries; + uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0; + /* Bit position within the slab */ + uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0; + /* Overall bit position of the start of slab */ + /* free & live entry index */ + int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0; + struct otx2_mcam_ents_info *ent_info; + /* free & live bitmap slab */ + uint64_t sl_fr = 0, sl_lv = 0, *sl; + + fr_bmp = flow_info->free_entries[prio_lvl]; + fr_bmp_rev = flow_info->free_entries_rev[prio_lvl]; + lv_bmp = flow_info->live_entries[prio_lvl]; + lv_bmp_rev = flow_info->live_entries_rev[prio_lvl]; + ent_info = &flow_info->flow_entry_info[prio_lvl]; + mcam_entries = flow_info->mcam_entries; + + + /* New entries allocated are always contiguous, but older entries + * already in free/live bitmap can be non-contiguous: so return + * shifted entries should be in non-contiguous format. + */ + while (idx <= rsp->count) { + if (!sl_fr && !sl_lv) { + /* Lower index elements to be exchanged */ + if (dir < 0) { + rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr); + rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv); + otx2_npc_dbg("Fwd slab rc fr %u rc lv %u " + "e_fr %u e_lv %u", rc_fr, rc_lv, + e_fr, e_lv); + } else { + rc_fr = rte_bitmap_scan(fr_bmp_rev, + &sl_fr_bit_off, + &sl_fr); + rc_lv = rte_bitmap_scan(lv_bmp_rev, + &sl_lv_bit_off, + &sl_lv); + + otx2_npc_dbg("Rev slab rc fr %u rc lv %u " + "e_fr %u e_lv %u", rc_fr, rc_lv, + e_fr, e_lv); + } + } + + if (rc_fr) { + fr_bit_pos = flow_first_set_bit(sl_fr); + e_fr = sl_fr_bit_off + fr_bit_pos; + otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos); + } else { + e_fr = ~(0); + } + + if (rc_lv) { + lv_bit_pos = flow_first_set_bit(sl_lv); + e_lv = sl_lv_bit_off + lv_bit_pos; + otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos); + } else { + e_lv = ~(0); + } + + /* First entry is from free_bmap */ + if (e_fr < e_lv) { + bmp = fr_bmp; + e = e_fr; + sl = &sl_fr; + bit_pos = fr_bit_pos; + if (dir > 0) + e_id = mcam_entries - e - 1; + else + e_id = e; + otx2_npc_dbg("Fr e %u e_id %u", e, e_id); + } else { + bmp = lv_bmp; + e = e_lv; + sl = &sl_lv; + bit_pos = lv_bit_pos; + if (dir > 0) + e_id = mcam_entries - e - 1; + else + e_id = e; + + otx2_npc_dbg("Lv e %u e_id %u", e, e_id); + if (idx < rsp->count) + rc = + flow_shift_lv_ent(mbox, flow, + flow_info, e_id, + rsp->entry + idx); + } + + rte_bitmap_clear(bmp, e); + rte_bitmap_set(bmp, rsp->entry + idx); + /* Update entry list, use non-contiguous + * list now. + */ + rsp->entry_list[idx] = e_id; + *sl &= ~(1 << bit_pos); + + /* Update min & max entry identifiers in current + * priority level. + */ + if (dir < 0) { + ent_info->max_id = rsp->entry + idx; + ent_info->min_id = e_id; + } else { + ent_info->max_id = e_id; + ent_info->min_id = rsp->entry; + } + + idx++; + } + return rc; +} + +/* Validate if newly allocated entries lie in the correct priority zone + * since NPC_MCAM_LOWER_PRIO & NPC_MCAM_HIGHER_PRIO don't ensure zone accuracy. + * If not properly aligned, shift entries to do so + */ +static int +flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow, + struct otx2_npc_flow_info *flow_info, + struct npc_mcam_alloc_entry_rsp *rsp, + int req_prio) +{ + int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority; + struct otx2_mcam_ents_info *info = flow_info->flow_entry_info; + int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1; + uint32_t tot_ent = 0; + + otx2_npc_dbg("Dir %d, priority = %d", dir, prio); + + if (dir < 0) + prio_idx = flow_info->flow_max_priority - 1; + + /* Only live entries needs to be shifted, free entries can just be + * moved by bits manipulation. + */ + + /* For dir = -1(NPC_MCAM_LOWER_PRIO), when shifting, + * NPC_MAX_PREALLOC_ENT are exchanged with adjoining higher priority + * level entries(lower indexes). + * + * For dir = +1(NPC_MCAM_HIGHER_PRIO), during shift, + * NPC_MAX_PREALLOC_ENT are exchanged with adjoining lower priority + * level entries(higher indexes) with highest indexes. + */ + do { + tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent; + + if (dir < 0 && prio_idx != prio && + rsp->entry > info[prio_idx].max_id && tot_ent) { + otx2_npc_dbg("Rsp entry %u prio idx %u " + "max id %u", rsp->entry, prio_idx, + info[prio_idx].max_id); + + needs_shift = 1; + } else if ((dir > 0) && (prio_idx != prio) && + (rsp->entry < info[prio_idx].min_id) && tot_ent) { + otx2_npc_dbg("Rsp entry %u prio idx %u " + "min id %u", rsp->entry, prio_idx, + info[prio_idx].min_id); + needs_shift = 1; + } + + otx2_npc_dbg("Needs_shift = %d", needs_shift); + if (needs_shift) { + needs_shift = 0; + rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir, + prio_idx); + } else { + for (idx = 0; idx < rsp->count; idx++) + rsp->entry_list[idx] = rsp->entry + idx; + } + } while ((prio_idx != prio) && (prio_idx += dir)); + + return rc; +} + +static int +flow_find_ref_entry(struct otx2_npc_flow_info *flow_info, int *prio, + int prio_lvl) +{ + struct otx2_mcam_ents_info *info = flow_info->flow_entry_info; + int step = 1; + + while (step < flow_info->flow_max_priority) { + if (((prio_lvl + step) < flow_info->flow_max_priority) && + info[prio_lvl + step].live_ent) { + *prio = NPC_MCAM_HIGHER_PRIO; + return info[prio_lvl + step].min_id; + } + + if (((prio_lvl - step) >= 0) && + info[prio_lvl - step].live_ent) { + otx2_npc_dbg("Prio_lvl %u live %u", prio_lvl - step, + info[prio_lvl - step].live_ent); + *prio = NPC_MCAM_LOWER_PRIO; + return info[prio_lvl - step].max_id; + } + step++; + } + *prio = NPC_MCAM_ANY_PRIO; + return 0; +} + +static int +flow_fill_entry_cache(struct otx2_mbox *mbox, struct rte_flow *flow, + struct otx2_npc_flow_info *flow_info, uint32_t *free_ent) +{ + struct rte_bitmap *free_bmp, *free_bmp_rev, *live_bmp, *live_bmp_rev; + struct npc_mcam_alloc_entry_rsp rsp_local; + struct npc_mcam_alloc_entry_rsp *rsp_cmd; + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_mcam_ents_info *info; + uint16_t ref_ent, idx; + int rc, prio; + + info = &flow_info->flow_entry_info[flow->priority]; + free_bmp = flow_info->free_entries[flow->priority]; + free_bmp_rev = flow_info->free_entries_rev[flow->priority]; + live_bmp = flow_info->live_entries[flow->priority]; + live_bmp_rev = flow_info->live_entries_rev[flow->priority]; + + ref_ent = flow_find_ref_entry(flow_info, &prio, flow->priority); + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox); + req->contig = 1; + req->count = flow_info->flow_prealloc_size; + req->priority = prio; + req->ref_entry = ref_ent; + + otx2_npc_dbg("Fill cache ref entry %u prio %u", ref_ent, prio); + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp_cmd); + if (rc) + return rc; + + rsp = &rsp_local; + memcpy(rsp, rsp_cmd, sizeof(*rsp)); + + otx2_npc_dbg("Alloc entry %u count %u , prio = %d", rsp->entry, + rsp->count, prio); + + /* Non-first ent cache fill */ + if (prio != NPC_MCAM_ANY_PRIO) { + flow_validate_and_shift_prio_ent(mbox, flow, flow_info, rsp, + prio); + } else { + /* Copy into response entry list */ + for (idx = 0; idx < rsp->count; idx++) + rsp->entry_list[idx] = rsp->entry + idx; + } + + otx2_npc_dbg("Fill entry cache rsp count %u", rsp->count); + /* Update free entries, reverse free entries list, + * min & max entry ids. + */ + for (idx = 0; idx < rsp->count; idx++) { + if (unlikely(rsp->entry_list[idx] < info->min_id)) + info->min_id = rsp->entry_list[idx]; + + if (unlikely(rsp->entry_list[idx] > info->max_id)) + info->max_id = rsp->entry_list[idx]; + + /* Skip entry to be returned, not to be part of free + * list. + */ + if (prio == NPC_MCAM_HIGHER_PRIO) { + if (unlikely(idx == (rsp->count - 1))) { + *free_ent = rsp->entry_list[idx]; + continue; + } + } else { + if (unlikely(!idx)) { + *free_ent = rsp->entry_list[idx]; + continue; + } + } + info->free_ent++; + rte_bitmap_set(free_bmp, rsp->entry_list[idx]); + rte_bitmap_set(free_bmp_rev, flow_info->mcam_entries - + rsp->entry_list[idx] - 1); + + otx2_npc_dbg("Final rsp entry %u rsp entry rev %u", + rsp->entry_list[idx], + flow_info->mcam_entries - rsp->entry_list[idx] - 1); + } + + otx2_npc_dbg("Cache free entry %u, rev = %u", *free_ent, + flow_info->mcam_entries - *free_ent - 1); + info->live_ent++; + rte_bitmap_set(live_bmp, *free_ent); + rte_bitmap_set(live_bmp_rev, flow_info->mcam_entries - *free_ent - 1); + + return 0; +} + +static int +flow_check_preallocated_entry_cache(struct otx2_mbox *mbox, + struct rte_flow *flow, + struct otx2_npc_flow_info *flow_info) +{ + struct rte_bitmap *free, *free_rev, *live, *live_rev; + uint32_t pos = 0, free_ent = 0, mcam_entries; + struct otx2_mcam_ents_info *info; + uint64_t slab = 0; + int rc; + + otx2_npc_dbg("Flow priority %u", flow->priority); + + info = &flow_info->flow_entry_info[flow->priority]; + + free_rev = flow_info->free_entries_rev[flow->priority]; + free = flow_info->free_entries[flow->priority]; + live_rev = flow_info->live_entries_rev[flow->priority]; + live = flow_info->live_entries[flow->priority]; + mcam_entries = flow_info->mcam_entries; + + if (info->free_ent) { + rc = rte_bitmap_scan(free, &pos, &slab); + if (rc) { + /* Get free_ent from free entry bitmap */ + free_ent = pos + __builtin_ctzll(slab); + otx2_npc_dbg("Allocated from cache entry %u", free_ent); + /* Remove from free bitmaps and add to live ones */ + rte_bitmap_clear(free, free_ent); + rte_bitmap_set(live, free_ent); + rte_bitmap_clear(free_rev, + mcam_entries - free_ent - 1); + rte_bitmap_set(live_rev, + mcam_entries - free_ent - 1); + + info->free_ent--; + info->live_ent++; + return free_ent; + } + + otx2_npc_dbg("No free entry:its a mess"); + return -1; + } + + rc = flow_fill_entry_cache(mbox, flow, flow_info, &free_ent); + if (rc) + return rc; + + return free_ent; +} + +int +otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox, + __rte_unused struct otx2_parse_state *pst, + struct otx2_npc_flow_info *flow_info) +{ + int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1); + struct npc_mcam_write_entry_req *req; + struct mbox_msghdr *rsp; + uint16_t ctr = ~(0); + int rc, idx; + int entry; + + if (use_ctr) { + rc = flow_mcam_alloc_counter(mbox, &ctr); + if (rc) + return rc; + } + + entry = flow_check_preallocated_entry_cache(mbox, flow, flow_info); + if (entry < 0) { + otx2_err("Prealloc failed"); + otx2_flow_mcam_free_counter(mbox, ctr); + return NPC_MCAM_ALLOC_FAILED; + } + req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox); + req->set_cntr = use_ctr; + req->cntr = ctr; + req->entry = entry; + otx2_npc_dbg("Alloc & write entry %u", entry); + + req->intf = + (flow->nix_intf == OTX2_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX; + req->enable_entry = 1; + req->entry_data.action = flow->npc_action; + + /* + * DPDK sets vtag action on per interface basis, not + * per flow basis. It is a matter of how we decide to support + * this pmd specific behavior. There are two ways: + * 1. Inherit the vtag action from the one configured + * for this interface. This can be read from the + * vtag_action configured for default mcam entry of + * this pf_func. + * 2. Do not support vtag action with rte_flow. + * + * Second approach is used now. + */ + req->entry_data.vtag_action = 0ULL; + + for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) { + req->entry_data.kw[idx] = flow->mcam_data[idx]; + req->entry_data.kw_mask[idx] = flow->mcam_mask[idx]; + } + + if (flow->nix_intf == OTX2_INTF_RX) { + req->entry_data.kw[0] |= flow_info->channel; + req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1); + } else { + uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; + + pf_func = htons(pf_func); + req->entry_data.kw[0] |= ((uint64_t)pf_func << 32); + req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32); + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp); + if (rc != 0) + return rc; + + flow->mcam_id = entry; + if (use_ctr) + flow->ctr_id = ctr; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c new file mode 100644 index 000000000..12bf6c323 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include + +#include "otx2_ethdev.h" + +void +otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set) +{ + if (set) + dev->flags |= OTX2_LINK_CFG_IN_PROGRESS_F; + else + dev->flags &= ~OTX2_LINK_CFG_IN_PROGRESS_F; + + rte_wmb(); +} + +static inline int +nix_wait_for_link_cfg(struct otx2_eth_dev *dev) +{ + uint16_t wait = 1000; + + do { + rte_rmb(); + if (!(dev->flags & OTX2_LINK_CFG_IN_PROGRESS_F)) + break; + wait--; + rte_delay_ms(1); + } while (wait); + + return wait ? 0 : -1; +} + +static void +nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link) +{ + if (link && link->link_status) + otx2_info("Port %d: Link Up - speed %u Mbps - %s", + (int)(eth_dev->data->port_id), + (uint32_t)link->link_speed, + link->link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + else + otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id)); +} + +void +otx2_eth_dev_link_status_update(struct otx2_dev *dev, + struct cgx_link_user_info *link) +{ + struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev; + struct rte_eth_link eth_link; + struct rte_eth_dev *eth_dev; + + if (!link || !dev) + return; + + eth_dev = otx2_dev->eth_dev; + if (!eth_dev || !eth_dev->data->dev_conf.intr_conf.lsc) + return; + + if (nix_wait_for_link_cfg(otx2_dev)) { + otx2_err("Timeout waiting for link_cfg to complete"); + return; + } + + eth_link.link_status = link->link_up; + eth_link.link_speed = link->speed; + eth_link.link_autoneg = ETH_LINK_AUTONEG; + eth_link.link_duplex = link->full_duplex; + + otx2_dev->speed = link->speed; + otx2_dev->duplex = link->full_duplex; + + /* Print link info */ + nix_link_status_print(eth_dev, ð_link); + + /* Update link info */ + rte_eth_linkstatus_set(eth_dev, ð_link); + + /* Set the flag and execute application callbacks */ + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static int +lbk_link_update(struct rte_eth_link *link) +{ + link->link_status = ETH_LINK_UP; + link->link_speed = ETH_SPEED_NUM_100G; + link->link_autoneg = ETH_LINK_FIXED; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + return 0; +} + +static int +cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link) +{ + struct otx2_mbox *mbox = dev->mbox; + struct cgx_link_info_msg *rsp; + int rc; + otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + link->link_status = rsp->link_info.link_up; + link->link_speed = rsp->link_info.speed; + link->link_autoneg = ETH_LINK_AUTONEG; + + if (rsp->link_info.full_duplex) + link->link_duplex = rsp->link_info.full_duplex; + return 0; +} + +int +otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_link link; + int rc; + + RTE_SET_USED(wait_to_complete); + memset(&link, 0, sizeof(struct rte_eth_link)); + + if (otx2_dev_is_sdp(dev)) + return 0; + + if (otx2_dev_is_lbk(dev)) + rc = lbk_link_update(&link); + else + rc = cgx_link_update(dev, &link); + + if (rc) + return rc; + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +static int +nix_dev_set_link_state(struct rte_eth_dev *eth_dev, uint8_t enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct cgx_set_link_state_msg *req; + + req = otx2_mbox_alloc_msg_cgx_set_link_state(mbox); + req->enable = enable; + return otx2_mbox_process(mbox); +} + +int +otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, i; + + if (otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + rc = nix_dev_set_link_state(eth_dev, 1); + if (rc) + goto done; + + /* Start tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + otx2_nix_tx_queue_start(eth_dev, i); + +done: + return rc; +} + +int +otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int i; + + if (otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + /* Stop tx queues */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + otx2_nix_tx_queue_stop(eth_dev, i); + + return nix_dev_set_link_state(eth_dev, 0); +} + +static int +cgx_change_mode(struct otx2_eth_dev *dev, struct cgx_set_link_mode_args *cfg) +{ + struct otx2_mbox *mbox = dev->mbox; + struct cgx_set_link_mode_req *req; + + req = otx2_mbox_alloc_msg_cgx_set_link_mode(mbox); + req->args.speed = cfg->speed; + req->args.duplex = cfg->duplex; + req->args.an = cfg->an; + + return otx2_mbox_process(mbox); +} + +#define SPEED_NONE 0 +static inline uint32_t +nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds) +{ + uint32_t link_speed = SPEED_NONE; + + /* 50G and 100G to be supported for board version C0 and above */ + if (!otx2_dev_is_Ax(dev)) { + if (link_speeds & ETH_LINK_SPEED_100G) + link_speed = 100000; + if (link_speeds & ETH_LINK_SPEED_50G) + link_speed = 50000; + } + if (link_speeds & ETH_LINK_SPEED_40G) + link_speed = 40000; + if (link_speeds & ETH_LINK_SPEED_25G) + link_speed = 25000; + if (link_speeds & ETH_LINK_SPEED_20G) + link_speed = 20000; + if (link_speeds & ETH_LINK_SPEED_10G) + link_speed = 10000; + if (link_speeds & ETH_LINK_SPEED_5G) + link_speed = 5000; + if (link_speeds & ETH_LINK_SPEED_1G) + link_speed = 1000; + + return link_speed; +} + +static inline uint8_t +nix_parse_eth_link_duplex(uint32_t link_speeds) +{ + if ((link_speeds & ETH_LINK_SPEED_10M_HD) || + (link_speeds & ETH_LINK_SPEED_100M_HD)) + return ETH_LINK_HALF_DUPLEX; + else + return ETH_LINK_FULL_DUPLEX; +} + +int +otx2_apply_link_speed(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct rte_eth_conf *conf = ð_dev->data->dev_conf; + struct cgx_set_link_mode_args cfg; + + /* If VF/SDP/LBK, link attributes cannot be changed */ + if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) + return 0; + + memset(&cfg, 0, sizeof(struct cgx_set_link_mode_args)); + cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds); + if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) { + cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds); + cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0; + + return cgx_change_mode(dev, &cfg); + } + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c new file mode 100644 index 000000000..10944bc17 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include + +#include "otx2_common.h" +#include "otx2_ethdev.h" + +/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */ +#define ERRCODE_ERRLEN_WIDTH 12 +#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\ + sizeof(uint32_t)) + +#define SA_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uint64_t)) +#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ +\ + SA_TBL_SZ) + +const uint32_t * +otx2_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev) +{ + RTE_SET_USED(eth_dev); + + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER_QINQ, /* LB */ + RTE_PTYPE_L2_ETHER_VLAN, /* LB */ + RTE_PTYPE_L2_ETHER_TIMESYNC, /* LB */ + RTE_PTYPE_L2_ETHER_ARP, /* LC */ + RTE_PTYPE_L2_ETHER_NSH, /* LC */ + RTE_PTYPE_L2_ETHER_FCOE, /* LC */ + RTE_PTYPE_L2_ETHER_MPLS, /* LC */ + RTE_PTYPE_L3_IPV4, /* LC */ + RTE_PTYPE_L3_IPV4_EXT, /* LC */ + RTE_PTYPE_L3_IPV6, /* LC */ + RTE_PTYPE_L3_IPV6_EXT, /* LC */ + RTE_PTYPE_L4_TCP, /* LD */ + RTE_PTYPE_L4_UDP, /* LD */ + RTE_PTYPE_L4_SCTP, /* LD */ + RTE_PTYPE_L4_ICMP, /* LD */ + RTE_PTYPE_L4_IGMP, /* LD */ + RTE_PTYPE_TUNNEL_GRE, /* LD */ + RTE_PTYPE_TUNNEL_ESP, /* LD */ + RTE_PTYPE_TUNNEL_NVGRE, /* LD */ + RTE_PTYPE_TUNNEL_VXLAN, /* LE */ + RTE_PTYPE_TUNNEL_GENEVE, /* LE */ + RTE_PTYPE_TUNNEL_GTPC, /* LE */ + RTE_PTYPE_TUNNEL_GTPU, /* LE */ + RTE_PTYPE_TUNNEL_VXLAN_GPE, /* LE */ + RTE_PTYPE_TUNNEL_MPLS_IN_GRE, /* LE */ + RTE_PTYPE_TUNNEL_MPLS_IN_UDP, /* LE */ + RTE_PTYPE_INNER_L2_ETHER,/* LF */ + RTE_PTYPE_INNER_L3_IPV4, /* LG */ + RTE_PTYPE_INNER_L3_IPV6, /* LG */ + RTE_PTYPE_INNER_L4_TCP, /* LH */ + RTE_PTYPE_INNER_L4_UDP, /* LH */ + RTE_PTYPE_INNER_L4_SCTP, /* LH */ + RTE_PTYPE_INNER_L4_ICMP, /* LH */ + RTE_PTYPE_UNKNOWN, + }; + + return ptypes; +} + +int +otx2_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (ptype_mask) { + dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F; + dev->ptype_disable = 0; + } else { + dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F; + dev->ptype_disable = 1; + } + + otx2_eth_set_rx_function(eth_dev); + + return 0; +} + +/* + * +------------------ +------------------ + + * | | IL4 | IL3| IL2 | TU | L4 | L3 | L2 | + * +-------------------+-------------------+ + * + * +-------------------+------------------ + + * | | LH | LG | LF | LE | LD | LC | LB | + * +-------------------+-------------------+ + * + * ptype [LE - LD - LC - LB] = TU - L4 - L3 - T2 + * ptype_tunnel[LH - LG - LF] = IL4 - IL3 - IL2 - TU + * + */ +static void +nix_create_non_tunnel_ptype_array(uint16_t *ptype) +{ + uint8_t lb, lc, ld, le; + uint16_t val; + uint32_t idx; + + for (idx = 0; idx < PTYPE_NON_TUNNEL_ARRAY_SZ; idx++) { + lb = idx & 0xF; + lc = (idx & 0xF0) >> 4; + ld = (idx & 0xF00) >> 8; + le = (idx & 0xF000) >> 12; + val = RTE_PTYPE_UNKNOWN; + + switch (lb) { + case NPC_LT_LB_STAG_QINQ: + val |= RTE_PTYPE_L2_ETHER_QINQ; + break; + case NPC_LT_LB_CTAG: + val |= RTE_PTYPE_L2_ETHER_VLAN; + break; + } + + switch (lc) { + case NPC_LT_LC_ARP: + val |= RTE_PTYPE_L2_ETHER_ARP; + break; + case NPC_LT_LC_NSH: + val |= RTE_PTYPE_L2_ETHER_NSH; + break; + case NPC_LT_LC_FCOE: + val |= RTE_PTYPE_L2_ETHER_FCOE; + break; + case NPC_LT_LC_MPLS: + val |= RTE_PTYPE_L2_ETHER_MPLS; + break; + case NPC_LT_LC_IP: + val |= RTE_PTYPE_L3_IPV4; + break; + case NPC_LT_LC_IP_OPT: + val |= RTE_PTYPE_L3_IPV4_EXT; + break; + case NPC_LT_LC_IP6: + val |= RTE_PTYPE_L3_IPV6; + break; + case NPC_LT_LC_IP6_EXT: + val |= RTE_PTYPE_L3_IPV6_EXT; + break; + case NPC_LT_LC_PTP: + val |= RTE_PTYPE_L2_ETHER_TIMESYNC; + break; + } + + switch (ld) { + case NPC_LT_LD_TCP: + val |= RTE_PTYPE_L4_TCP; + break; + case NPC_LT_LD_UDP: + val |= RTE_PTYPE_L4_UDP; + break; + case NPC_LT_LD_SCTP: + val |= RTE_PTYPE_L4_SCTP; + break; + case NPC_LT_LD_ICMP: + case NPC_LT_LD_ICMP6: + val |= RTE_PTYPE_L4_ICMP; + break; + case NPC_LT_LD_IGMP: + val |= RTE_PTYPE_L4_IGMP; + break; + case NPC_LT_LD_GRE: + val |= RTE_PTYPE_TUNNEL_GRE; + break; + case NPC_LT_LD_NVGRE: + val |= RTE_PTYPE_TUNNEL_NVGRE; + break; + case NPC_LT_LD_ESP: + val |= RTE_PTYPE_TUNNEL_ESP; + break; + } + + switch (le) { + case NPC_LT_LE_VXLAN: + val |= RTE_PTYPE_TUNNEL_VXLAN; + break; + case NPC_LT_LE_VXLANGPE: + val |= RTE_PTYPE_TUNNEL_VXLAN_GPE; + break; + case NPC_LT_LE_GENEVE: + val |= RTE_PTYPE_TUNNEL_GENEVE; + break; + case NPC_LT_LE_GTPC: + val |= RTE_PTYPE_TUNNEL_GTPC; + break; + case NPC_LT_LE_GTPU: + val |= RTE_PTYPE_TUNNEL_GTPU; + break; + case NPC_LT_LE_TU_MPLS_IN_GRE: + val |= RTE_PTYPE_TUNNEL_MPLS_IN_GRE; + break; + case NPC_LT_LE_TU_MPLS_IN_UDP: + val |= RTE_PTYPE_TUNNEL_MPLS_IN_UDP; + break; + } + ptype[idx] = val; + } +} + +#define TU_SHIFT(x) ((x) >> PTYPE_NON_TUNNEL_WIDTH) +static void +nix_create_tunnel_ptype_array(uint16_t *ptype) +{ + uint8_t lf, lg, lh; + uint16_t val; + uint32_t idx; + + /* Skip non tunnel ptype array memory */ + ptype = ptype + PTYPE_NON_TUNNEL_ARRAY_SZ; + + for (idx = 0; idx < PTYPE_TUNNEL_ARRAY_SZ; idx++) { + lf = idx & 0xF; + lg = (idx & 0xF0) >> 4; + lh = (idx & 0xF00) >> 8; + val = RTE_PTYPE_UNKNOWN; + + switch (lf) { + case NPC_LT_LF_TU_ETHER: + val |= TU_SHIFT(RTE_PTYPE_INNER_L2_ETHER); + break; + } + switch (lg) { + case NPC_LT_LG_TU_IP: + val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV4); + break; + case NPC_LT_LG_TU_IP6: + val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV6); + break; + } + switch (lh) { + case NPC_LT_LH_TU_TCP: + val |= TU_SHIFT(RTE_PTYPE_INNER_L4_TCP); + break; + case NPC_LT_LH_TU_UDP: + val |= TU_SHIFT(RTE_PTYPE_INNER_L4_UDP); + break; + case NPC_LT_LH_TU_SCTP: + val |= TU_SHIFT(RTE_PTYPE_INNER_L4_SCTP); + break; + case NPC_LT_LH_TU_ICMP: + case NPC_LT_LH_TU_ICMP6: + val |= TU_SHIFT(RTE_PTYPE_INNER_L4_ICMP); + break; + } + + ptype[idx] = val; + } +} + +static void +nix_create_rx_ol_flags_array(void *mem) +{ + uint16_t idx, errcode, errlev; + uint32_t val, *ol_flags; + + /* Skip ptype array memory */ + ol_flags = (uint32_t *)((uint8_t *)mem + PTYPE_ARRAY_SZ); + + for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) { + errlev = idx & 0xf; + errcode = (idx & 0xff0) >> 4; + + val = PKT_RX_IP_CKSUM_UNKNOWN; + val |= PKT_RX_L4_CKSUM_UNKNOWN; + val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; + + switch (errlev) { + case NPC_ERRLEV_RE: + /* Mark all errors as BAD checksum errors + * including Outer L2 length mismatch error + */ + if (errcode) { + val |= PKT_RX_IP_CKSUM_BAD; + val |= PKT_RX_L4_CKSUM_BAD; + } else { + val |= PKT_RX_IP_CKSUM_GOOD; + val |= PKT_RX_L4_CKSUM_GOOD; + } + break; + case NPC_ERRLEV_LC: + if (errcode == NPC_EC_OIP4_CSUM || + errcode == NPC_EC_IP_FRAG_OFFSET_1) { + val |= PKT_RX_IP_CKSUM_BAD; + val |= PKT_RX_EIP_CKSUM_BAD; + } else { + val |= PKT_RX_IP_CKSUM_GOOD; + } + break; + case NPC_ERRLEV_LG: + if (errcode == NPC_EC_IIP4_CSUM) + val |= PKT_RX_IP_CKSUM_BAD; + else + val |= PKT_RX_IP_CKSUM_GOOD; + break; + case NPC_ERRLEV_NIX: + if (errcode == NIX_RX_PERRCODE_OL4_CHK || + errcode == NIX_RX_PERRCODE_OL4_LEN || + errcode == NIX_RX_PERRCODE_OL4_PORT) { + val |= PKT_RX_IP_CKSUM_GOOD; + val |= PKT_RX_L4_CKSUM_BAD; + val |= PKT_RX_OUTER_L4_CKSUM_BAD; + } else if (errcode == NIX_RX_PERRCODE_IL4_CHK || + errcode == NIX_RX_PERRCODE_IL4_LEN || + errcode == NIX_RX_PERRCODE_IL4_PORT) { + val |= PKT_RX_IP_CKSUM_GOOD; + val |= PKT_RX_L4_CKSUM_BAD; + } else if (errcode == NIX_RX_PERRCODE_IL3_LEN || + errcode == NIX_RX_PERRCODE_OL3_LEN) { + val |= PKT_RX_IP_CKSUM_BAD; + } else { + val |= PKT_RX_IP_CKSUM_GOOD; + val |= PKT_RX_L4_CKSUM_GOOD; + } + break; + } + ol_flags[idx] = val; + } +} + +void * +otx2_nix_fastpath_lookup_mem_get(void) +{ + const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + const struct rte_memzone *mz; + void *mem; + + /* SA_TBL starts after PTYPE_ARRAY & ERR_ARRAY */ + RTE_BUILD_BUG_ON(OTX2_NIX_SA_TBL_START != (PTYPE_ARRAY_SZ + + ERR_ARRAY_SZ)); + + mz = rte_memzone_lookup(name); + if (mz != NULL) + return mz->addr; + + /* Request for the first time */ + mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ, + SOCKET_ID_ANY, 0, OTX2_ALIGN); + if (mz != NULL) { + mem = mz->addr; + /* Form the ptype array lookup memory */ + nix_create_non_tunnel_ptype_array(mem); + nix_create_tunnel_ptype_array(mem); + /* Form the rx ol_flags based on errcode */ + nix_create_rx_ol_flags_array(mem); + return mem; + } + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c new file mode 100644 index 000000000..262d185e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_dev.h" +#include "otx2_ethdev.h" + +int +otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct cgx_mac_addr_set_or_get *req; + struct otx2_mbox *mbox = dev->mbox; + int rc; + + if (otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + if (otx2_dev_active_vfs(dev)) + return -ENOTSUP; + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(mbox); + otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + + rc = otx2_mbox_process(mbox); + if (rc) + otx2_err("Failed to set mac address in CGX, rc=%d", rc); + + return 0; +} + +int +otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct otx2_mbox *mbox = dev->mbox; + int rc; + + if (otx2_dev_is_vf_or_sdp(dev)) + return 0; + + otx2_mbox_alloc_msg_cgx_mac_max_entries_get(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + return rsp->max_dmac_filters; +} + +int +otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr, + uint32_t index __rte_unused, uint32_t pool __rte_unused) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int rc; + + if (otx2_dev_is_vf_or_sdp(dev)) + return -ENOTSUP; + + if (otx2_dev_active_vfs(dev)) + return -ENOTSUP; + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(mbox); + otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to add mac address, rc=%d", rc); + goto done; + } + + /* Enable promiscuous mode at NIX level */ + otx2_nix_promisc_config(eth_dev, 1); + +done: + return rc; +} + +void +otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct cgx_mac_addr_del_req *req; + int rc; + + if (otx2_dev_is_vf_or_sdp(dev)) + return; + + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(mbox); + req->index = index; + + rc = otx2_mbox_process(mbox); + if (rc) + otx2_err("Failed to delete mac address, rc=%d", rc); +} + +int +otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_set_mac_addr *req; + int rc; + + req = otx2_mbox_alloc_msg_nix_set_mac_addr(mbox); + otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("Failed to set mac address, rc=%d", rc); + goto done; + } + + otx2_mbox_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + + /* Install the same entry into CGX DMAC filter table too. */ + otx2_cgx_mac_addr_set(eth_dev, addr); + +done: + return rc; +} + +int +otx2_nix_mac_addr_get(struct rte_eth_dev *eth_dev, uint8_t *addr) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_get_mac_addr_rsp *rsp; + int rc; + + otx2_mbox_alloc_msg_nix_get_mac_addr(mbox); + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp); + if (rc) { + otx2_err("Failed to get mac address, rc=%d", rc); + goto done; + } + + otx2_mbox_memcpy(addr, rsp->mac_addr, RTE_ETHER_ADDR_LEN); + +done: + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c new file mode 100644 index 000000000..f84aa1bf5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" + +static int +nix_mc_addr_list_free(struct otx2_eth_dev *dev, uint32_t entry_count) +{ + struct npc_mcam_free_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + struct mcast_entry *entry; + int rc = 0; + + if (entry_count == 0) + goto exit; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) { + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + req->entry = entry->mcam_index; + + rc = otx2_mbox_process_msg(mbox, NULL); + if (rc < 0) + goto exit; + + TAILQ_REMOVE(&dev->mc_fltr_tbl, entry, next); + rte_free(entry); + entry_count--; + + if (entry_count == 0) + break; + } + + if (entry == NULL) + dev->mc_tbl_set = false; + +exit: + return rc; +} + +static int +nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_npc_flow_info *npc = &dev->npc_flow; + volatile uint8_t *key_data, *key_mask; + struct npc_mcam_write_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + struct npc_xtract_info *x_info; + uint64_t mcam_data, mcam_mask; + struct mcast_entry *entry; + otx2_dxcfg_t *ld_cfg; + uint8_t *mac_addr; + uint64_t action; + int idx, rc = 0; + + ld_cfg = &npc->prx_dxcfg; + /* Get ETH layer profile info for populating mcam entries */ + x_info = &(*ld_cfg)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0]; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) { + req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox); + if (req == NULL) { + /* The mbox memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + goto exit; + + req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox); + if (req == NULL) { + rc = -ENOMEM; + goto exit; + } + } + req->entry = entry->mcam_index; + req->intf = NPC_MCAM_RX; + req->enable_entry = 1; + + /* Channel base extracted to KW0[11:0] */ + req->entry_data.kw[0] = dev->rx_chan_base; + req->entry_data.kw_mask[0] = RTE_LEN2MASK(12, uint64_t); + + /* Update mcam address */ + key_data = (volatile uint8_t *)req->entry_data.kw; + key_mask = (volatile uint8_t *)req->entry_data.kw_mask; + + mcam_data = 0ull; + mcam_mask = RTE_LEN2MASK(48, uint64_t); + mac_addr = &entry->mcast_mac.addr_bytes[0]; + for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--) + mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx); + + otx2_mbox_memcpy(key_data + x_info->key_off, + &mcam_data, x_info->len); + otx2_mbox_memcpy(key_mask + x_info->key_off, + &mcam_mask, x_info->len); + + action = NIX_RX_ACTIONOP_UCAST; + + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + action = NIX_RX_ACTIONOP_RSS; + action |= (uint64_t)(dev->rss_info.alg_idx) << 56; + } + + action |= ((uint64_t)otx2_pfvf_func(dev->pf, dev->vf)) << 4; + req->entry_data.action = action; + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + +exit: + return rc; +} + +int +otx2_nix_mc_addr_list_install(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_mbox *mbox = dev->mbox; + uint32_t entry_count = 0, idx = 0; + struct mcast_entry *entry; + int rc = 0; + + if (!dev->mc_tbl_set) + return 0; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) + entry_count++; + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox); + req->priority = NPC_MCAM_ANY_PRIO; + req->count = entry_count; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc || rsp->count < entry_count) { + otx2_err("Failed to allocate required mcam entries"); + goto exit; + } + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) + entry->mcam_index = rsp->entry_list[idx]; + + rc = nix_hw_update_mc_addr_list(eth_dev); + +exit: + return rc; +} + +int +otx2_nix_mc_addr_list_uninstall(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct npc_mcam_free_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + struct mcast_entry *entry; + int rc = 0; + + if (!dev->mc_tbl_set) + return 0; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) { + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + if (req == NULL) { + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + goto exit; + + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + if (req == NULL) { + rc = -ENOMEM; + goto exit; + } + } + req->entry = entry->mcam_index; + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + +exit: + return rc; +} + +static int +nix_setup_mc_addr_list(struct otx2_eth_dev *dev, + struct rte_ether_addr *mc_addr_set) +{ + struct npc_mcam_ena_dis_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + struct mcast_entry *entry; + uint32_t idx = 0; + int rc = 0; + + /* Populate PMD's mcast list with given mcast mac addresses and + * disable all mcam entries pertaining to the mcast list. + */ + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) { + rte_memcpy(&entry->mcast_mac, &mc_addr_set[idx++], + RTE_ETHER_ADDR_LEN); + + req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox); + if (req == NULL) { + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + goto exit; + + req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox); + if (req == NULL) { + rc = -ENOMEM; + goto exit; + } + } + req->entry = entry->mcam_index; + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + +exit: + return rc; +} + +int +otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_mbox *mbox = dev->mbox; + uint32_t idx, priv_count = 0; + struct mcast_entry *entry; + int rc = 0; + + if (otx2_dev_is_vf(dev)) + return -ENOTSUP; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) + priv_count++; + + if (nb_mc_addr == 0 || mc_addr_set == NULL) { + /* Free existing list if new list is null */ + nb_mc_addr = priv_count; + goto exit; + } + + for (idx = 0; idx < nb_mc_addr; idx++) { + if (!rte_is_multicast_ether_addr(&mc_addr_set[idx])) + return -EINVAL; + } + + /* New list is bigger than the existing list, + * allocate mcam entries for the extra entries. + */ + if (nb_mc_addr > priv_count) { + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox); + req->priority = NPC_MCAM_ANY_PRIO; + req->count = nb_mc_addr - priv_count; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc || (rsp->count + priv_count < nb_mc_addr)) { + otx2_err("Failed to allocate required entries"); + nb_mc_addr = priv_count; + goto exit; + } + + /* Append new mcam entries to the existing mc list */ + for (idx = 0; idx < rsp->count; idx++) { + entry = rte_zmalloc("otx2_nix_mc_entry", + sizeof(struct mcast_entry), 0); + if (!entry) { + otx2_err("Failed to allocate memory"); + nb_mc_addr = priv_count; + rc = -ENOMEM; + goto exit; + } + entry->mcam_index = rsp->entry_list[idx]; + TAILQ_INSERT_HEAD(&dev->mc_fltr_tbl, entry, next); + } + } else { + /* Free the extra mcam entries if the new list is smaller + * than exiting list. + */ + nix_mc_addr_list_free(dev, priv_count - nb_mc_addr); + } + + + /* Now mc_fltr_tbl has the required number of mcam entries, + * Traverse through it and add new multicast filter table entries. + */ + rc = nix_setup_mc_addr_list(dev, mc_addr_set); + if (rc < 0) + goto exit; + + rc = nix_hw_update_mc_addr_list(eth_dev); + if (rc < 0) + goto exit; + + dev->mc_tbl_set = true; + + return 0; + +exit: + nix_mc_addr_list_free(dev, nb_mc_addr); + return rc; +} + +void +otx2_nix_mc_filter_init(struct otx2_eth_dev *dev) +{ + if (otx2_dev_is_vf(dev)) + return; + + TAILQ_INIT(&dev->mc_fltr_tbl); +} + +void +otx2_nix_mc_filter_fini(struct otx2_eth_dev *dev) +{ + struct mcast_entry *entry; + uint32_t count = 0; + + if (otx2_dev_is_vf(dev)) + return; + + TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) + count++; + + nix_mc_addr_list_free(dev, count); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c new file mode 100644 index 000000000..ae5a2b7cd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_ethdev.h" + +#define PTP_FREQ_ADJUST (1 << 9) + +/* Function to enable ptp config for VFs */ +void +otx2_nix_ptp_enable_vf(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (otx2_nix_recalc_mtu(eth_dev)) + otx2_err("Failed to set MTU size for ptp"); + + dev->scalar_ena = true; + dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + /* Setting up the function pointers as per new offload flags */ + otx2_eth_set_rx_function(eth_dev); + otx2_eth_set_tx_function(eth_dev); +} + +static uint16_t +nix_eth_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts) +{ + struct otx2_eth_rxq *rxq = queue; + struct rte_eth_dev *eth_dev; + + RTE_SET_USED(mbufs); + RTE_SET_USED(pkts); + + eth_dev = rxq->eth_dev; + otx2_nix_ptp_enable_vf(eth_dev); + + return 0; +} + +static int +nix_read_raw_clock(struct otx2_eth_dev *dev, uint64_t *clock, uint64_t *tsc, + uint8_t is_pmu) +{ + struct otx2_mbox *mbox = dev->mbox; + struct ptp_req *req; + struct ptp_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_ptp_op(mbox); + req->op = PTP_OP_GET_CLOCK; + req->is_pmu = is_pmu; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto fail; + + if (clock) + *clock = rsp->clk; + if (tsc) + *tsc = rsp->tsc; + +fail: + return rc; +} + +/* This function calculates two parameters "clk_freq_mult" and + * "clk_delta" which is useful in deriving PTP HI clock from + * timestamp counter (tsc) value. + */ +int +otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev) +{ + uint64_t ticks_base = 0, ticks = 0, tsc = 0, t_freq; + int rc, val; + + /* Calculating the frequency at which PTP HI clock is running */ + rc = nix_read_raw_clock(dev, &ticks_base, &tsc, false); + if (rc) { + otx2_err("Failed to read the raw clock value: %d", rc); + goto fail; + } + + rte_delay_ms(100); + + rc = nix_read_raw_clock(dev, &ticks, &tsc, false); + if (rc) { + otx2_err("Failed to read the raw clock value: %d", rc); + goto fail; + } + + t_freq = (ticks - ticks_base) * 10; + + /* Calculating the freq multiplier viz the ratio between the + * frequency at which PTP HI clock works and tsc clock runs + */ + dev->clk_freq_mult = + (double)pow(10, floor(log10(t_freq))) / rte_get_timer_hz(); + + val = false; +#ifdef RTE_ARM_EAL_RDTSC_USE_PMU + val = true; +#endif + rc = nix_read_raw_clock(dev, &ticks, &tsc, val); + if (rc) { + otx2_err("Failed to read the raw clock value: %d", rc); + goto fail; + } + + /* Calculating delta between PTP HI clock and tsc */ + dev->clk_delta = ((uint64_t)(ticks / dev->clk_freq_mult) - tsc); + +fail: + return rc; +} + +static void +nix_start_timecounters(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + memset(&dev->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&dev->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&dev->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + dev->systime_tc.cc_mask = OTX2_CYCLECOUNTER_MASK; + dev->rx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK; + dev->tx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK; +} + +static int +nix_ptp_config(struct rte_eth_dev *eth_dev, int en) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + uint8_t rc = -EINVAL; + + if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) + return rc; + + if (en) { + /* Enable time stamping of sent PTP packets. */ + otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(mbox); + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("MBOX ptp tx conf enable failed: err %d", rc); + return rc; + } + /* Enable time stamping of received PTP packets. */ + otx2_mbox_alloc_msg_cgx_ptp_rx_enable(mbox); + } else { + /* Disable time stamping of sent PTP packets. */ + otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(mbox); + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("MBOX ptp tx conf disable failed: err %d", rc); + return rc; + } + /* Disable time stamping of received PTP packets. */ + otx2_mbox_alloc_msg_cgx_ptp_rx_disable(mbox); + } + + return otx2_mbox_process(mbox); +} + +int +otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en) +{ + struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev; + struct rte_eth_dev *eth_dev; + int i; + + if (!dev) + return -EINVAL; + + eth_dev = otx2_dev->eth_dev; + if (!eth_dev) + return -EINVAL; + + otx2_dev->ptp_en = ptp_en; + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[i]; + rxq->mbuf_initializer = + otx2_nix_rxq_mbuf_setup(otx2_dev, + eth_dev->data->port_id); + } + if (otx2_dev_is_vf(otx2_dev) && !(otx2_dev_is_sdp(otx2_dev)) && + !(otx2_dev_is_lbk(otx2_dev))) { + /* In case of VF, setting of MTU cant be done directly in this + * function as this is running as part of MBOX request(PF->VF) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ + eth_dev->rx_pkt_burst = nix_eth_ptp_vf_burst; + rte_mb(); + } + + return 0; +} + +int +otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int i, rc = 0; + + /* If we are VF/SDP/LBK, ptp cannot not be enabled */ + if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) { + otx2_info("PTP cannot be enabled in case of VF/SDP/LBK"); + return -EINVAL; + } + + if (otx2_ethdev_is_ptp_en(dev)) { + otx2_info("PTP mode is already enabled"); + return -EINVAL; + } + + if (!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)) { + otx2_err("Ptype offload is disabled, it should be enabled"); + return -EINVAL; + } + + if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) { + otx2_err("Both PTP and switch header enabled"); + return -EINVAL; + } + + /* Allocating a iova address for tx tstamp */ + const struct rte_memzone *ts; + ts = rte_eth_dma_zone_reserve(eth_dev, "otx2_ts", + 0, OTX2_ALIGN, OTX2_ALIGN, + dev->node); + if (ts == NULL) { + otx2_err("Failed to allocate mem for tx tstamp addr"); + return -ENOMEM; + } + + dev->tstamp.tx_tstamp_iova = ts->iova; + dev->tstamp.tx_tstamp = ts->addr; + + /* System time should be already on by default */ + nix_start_timecounters(eth_dev); + + dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F; + dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F; + + rc = nix_ptp_config(eth_dev, 1); + if (!rc) { + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i]; + otx2_nix_form_default_desc(txq); + } + + /* Setting up the function pointers as per new offload flags */ + otx2_eth_set_rx_function(eth_dev); + otx2_eth_set_tx_function(eth_dev); + } + + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) + otx2_err("Failed to set MTU size for ptp"); + + return rc; +} + +int +otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int i, rc = 0; + + if (!otx2_ethdev_is_ptp_en(dev)) { + otx2_nix_dbg("PTP mode is disabled"); + return -EINVAL; + } + + if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) + return -EINVAL; + + dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP; + dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F; + dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F; + + rc = nix_ptp_config(eth_dev, 0); + if (!rc) { + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i]; + otx2_nix_form_default_desc(txq); + } + + /* Setting up the function pointers as per new offload flags */ + otx2_eth_set_rx_function(eth_dev); + otx2_eth_set_tx_function(eth_dev); + } + + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) + otx2_err("Failed to set MTU size for ptp"); + + return rc; +} + +int +otx2_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev, + struct timespec *timestamp, + uint32_t __rte_unused flags) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_timesync_info *tstamp = &dev->tstamp; + uint64_t ns; + + if (!tstamp->rx_ready) + return -EINVAL; + + ns = rte_timecounter_update(&dev->rx_tstamp_tc, tstamp->rx_tstamp); + *timestamp = rte_ns_to_timespec(ns); + tstamp->rx_ready = 0; + + otx2_nix_dbg("rx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"", + (uint64_t)tstamp->rx_tstamp, (uint64_t)timestamp->tv_sec, + (uint64_t)timestamp->tv_nsec); + + return 0; +} + +int +otx2_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev, + struct timespec *timestamp) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_timesync_info *tstamp = &dev->tstamp; + uint64_t ns; + + if (*tstamp->tx_tstamp == 0) + return -EINVAL; + + ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp); + *timestamp = rte_ns_to_timespec(ns); + + otx2_nix_dbg("tx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"", + *tstamp->tx_tstamp, (uint64_t)timestamp->tv_sec, + (uint64_t)timestamp->tv_nsec); + + *tstamp->tx_tstamp = 0; + rte_wmb(); + + return 0; +} + +int +otx2_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct ptp_req *req; + struct ptp_rsp *rsp; + int rc; + + /* Adjust the frequent to make tics increments in 10^9 tics per sec */ + if (delta < PTP_FREQ_ADJUST && delta > -PTP_FREQ_ADJUST) { + req = otx2_mbox_alloc_msg_ptp_op(mbox); + req->op = PTP_OP_ADJFINE; + req->scaled_ppm = delta; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + /* Since the frequency of PTP comp register is tuned, delta and + * freq mult calculation for deriving PTP_HI from timestamp + * counter should be done again. + */ + rc = otx2_nix_raw_clock_tsc_conv(dev); + if (rc) + otx2_err("Failed to calculate delta and freq mult"); + } + dev->systime_tc.nsec += delta; + dev->rx_tstamp_tc.nsec += delta; + dev->tx_tstamp_tc.nsec += delta; + + return 0; +} + +int +otx2_nix_timesync_write_time(struct rte_eth_dev *eth_dev, + const struct timespec *ts) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t ns; + + ns = rte_timespec_to_ns(ts); + /* Set the time counters to a new value. */ + dev->systime_tc.nsec = ns; + dev->rx_tstamp_tc.nsec = ns; + dev->tx_tstamp_tc.nsec = ns; + + return 0; +} + +int +otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev, struct timespec *ts) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct ptp_req *req; + struct ptp_rsp *rsp; + uint64_t ns; + int rc; + + req = otx2_mbox_alloc_msg_ptp_op(mbox); + req->op = PTP_OP_GET_CLOCK; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + ns = rte_timecounter_update(&dev->systime_tc, rsp->clk); + *ts = rte_ns_to_timespec(ns); + + otx2_nix_dbg("PTP time read: %"PRIu64" .%09"PRIu64"", + (uint64_t)ts->tv_sec, (uint64_t)ts->tv_nsec); + + return 0; +} + + +int +otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* This API returns the raw PTP HI clock value. Since LFs doesn't + * have direct access to PTP registers and it requires mbox msg + * to AF for this value. In fastpath reading this value for every + * packet (which involes mbox call) becomes very expensive, hence + * we should be able to derive PTP HI clock value from tsc by + * using freq_mult and clk_delta calculated during configure stage. + */ + *clock = (rte_get_tsc_cycles() + dev->clk_delta) * dev->clk_freq_mult; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c new file mode 100644 index 000000000..5e3f86681 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include "otx2_ethdev.h" + +int +otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev, + uint8_t group, uint16_t *ind_tbl) +{ + struct otx2_rss_info *rss = &dev->rss_info; + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_req *req; + int rc, idx; + + for (idx = 0; idx < rss->rss_size; idx++) { + req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + return rc; + + req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + if (!req) + return -ENOMEM; + } + req->rss.rq = ind_tbl[idx]; + /* Fill AQ info */ + req->qidx = (group * rss->rss_size) + idx; + req->ctype = NIX_AQ_CTYPE_RSS; + req->op = NIX_AQ_INSTOP_INIT; + } + + otx2_mbox_msg_send(mbox, 0); + rc = otx2_mbox_wait_for_rsp(mbox, 0); + if (rc < 0) + return rc; + + return 0; +} + +int +otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_rss_info *rss = &dev->rss_info; + int rc, i, j; + int idx = 0; + + rc = -EINVAL; + if (reta_size != dev->rss_info.rss_size) { + otx2_err("Size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, dev->rss_info.rss_size); + goto fail; + } + + /* Copy RETA table */ + for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { + if ((reta_conf[i].mask >> j) & 0x01) + rss->ind_tbl[idx] = reta_conf[i].reta[j]; + idx++; + } + } + + return otx2_nix_rss_tbl_init(dev, 0, dev->rss_info.ind_tbl); + +fail: + return rc; +} + +int +otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_rss_info *rss = &dev->rss_info; + int rc, i, j; + + rc = -EINVAL; + + if (reta_size != dev->rss_info.rss_size) { + otx2_err("Size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, dev->rss_info.rss_size); + goto fail; + } + + /* Copy RETA table */ + for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = rss->ind_tbl[j]; + } + + return 0; + +fail: + return rc; +} + +void +otx2_nix_rss_set_key(struct otx2_eth_dev *dev, uint8_t *key, + uint32_t key_len) +{ + const uint8_t default_key[NIX_HASH_KEY_SIZE] = { + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD + }; + struct otx2_rss_info *rss = &dev->rss_info; + uint64_t *keyptr; + uint64_t val; + uint32_t idx; + + if (key == NULL || key == 0) { + keyptr = (uint64_t *)(uintptr_t)default_key; + key_len = NIX_HASH_KEY_SIZE; + memset(rss->key, 0, key_len); + } else { + memcpy(rss->key, key, key_len); + keyptr = (uint64_t *)rss->key; + } + + for (idx = 0; idx < (key_len >> 3); idx++) { + val = rte_cpu_to_be_64(*keyptr); + otx2_write64(val, dev->base + NIX_LF_RX_SECRETX(idx)); + keyptr++; + } +} + +static void +rss_get_key(struct otx2_eth_dev *dev, uint8_t *key) +{ + uint64_t *keyptr = (uint64_t *)key; + uint64_t val; + int idx; + + for (idx = 0; idx < (NIX_HASH_KEY_SIZE >> 3); idx++) { + val = otx2_read64(dev->base + NIX_LF_RX_SECRETX(idx)); + *keyptr = rte_be_to_cpu_64(val); + keyptr++; + } +} + +#define RSS_IPV4_ENABLE ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_SCTP) + +#define RSS_IPV6_ENABLE ( \ + ETH_RSS_IPV6 | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_SCTP) + +#define RSS_IPV6_EX_ENABLE ( \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +#define RSS_MAX_LEVELS 3 + +#define RSS_IPV4_INDEX 0 +#define RSS_IPV6_INDEX 1 +#define RSS_TCP_INDEX 2 +#define RSS_UDP_INDEX 3 +#define RSS_SCTP_INDEX 4 +#define RSS_DMAC_INDEX 5 + +uint32_t +otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss, + uint8_t rss_level) +{ + uint32_t flow_key_type[RSS_MAX_LEVELS][6] = { + { + FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, + FLOW_KEY_TYPE_TCP, FLOW_KEY_TYPE_UDP, + FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC + }, + { + FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6, + FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP, + FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC + }, + { + FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4, + FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6, + FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP, + FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP, + FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP, + FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC + } + }; + uint32_t flowkey_cfg = 0; + + dev->rss_info.nix_rss = ethdev_rss; + + if (ethdev_rss & ETH_RSS_L2_PAYLOAD && + dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B) { + flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B; + } + + if (ethdev_rss & ETH_RSS_L3_SRC_ONLY) + flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC; + + if (ethdev_rss & ETH_RSS_L3_DST_ONLY) + flowkey_cfg |= FLOW_KEY_TYPE_L3_DST; + + if (ethdev_rss & ETH_RSS_L4_SRC_ONLY) + flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC; + + if (ethdev_rss & ETH_RSS_L4_DST_ONLY) + flowkey_cfg |= FLOW_KEY_TYPE_L4_DST; + + if (ethdev_rss & RSS_IPV4_ENABLE) + flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX]; + + if (ethdev_rss & RSS_IPV6_ENABLE) + flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX]; + + if (ethdev_rss & ETH_RSS_TCP) + flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX]; + + if (ethdev_rss & ETH_RSS_UDP) + flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX]; + + if (ethdev_rss & ETH_RSS_SCTP) + flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX]; + + if (ethdev_rss & ETH_RSS_L2_PAYLOAD) + flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX]; + + if (ethdev_rss & RSS_IPV6_EX_ENABLE) + flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT; + + if (ethdev_rss & ETH_RSS_PORT) + flowkey_cfg |= FLOW_KEY_TYPE_PORT; + + if (ethdev_rss & ETH_RSS_NVGRE) + flowkey_cfg |= FLOW_KEY_TYPE_NVGRE; + + if (ethdev_rss & ETH_RSS_VXLAN) + flowkey_cfg |= FLOW_KEY_TYPE_VXLAN; + + if (ethdev_rss & ETH_RSS_GENEVE) + flowkey_cfg |= FLOW_KEY_TYPE_GENEVE; + + if (ethdev_rss & ETH_RSS_GTPU) + flowkey_cfg |= FLOW_KEY_TYPE_GTPU; + + return flowkey_cfg; +} + +int +otx2_rss_set_hf(struct otx2_eth_dev *dev, uint32_t flowkey_cfg, + uint8_t *alg_idx, uint8_t group, int mcam_index) +{ + struct nix_rss_flowkey_cfg_rsp *rss_rsp; + struct otx2_mbox *mbox = dev->mbox; + struct nix_rss_flowkey_cfg *cfg; + int rc; + + rc = -EINVAL; + + dev->rss_info.flowkey_cfg = flowkey_cfg; + + cfg = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(mbox); + + cfg->flowkey_cfg = flowkey_cfg; + cfg->mcam_index = mcam_index; /* -1 indicates default group */ + cfg->group = group; /* 0 is default group */ + + rc = otx2_mbox_process_msg(mbox, (void *)&rss_rsp); + if (rc) + return rc; + + if (alg_idx) + *alg_idx = rss_rsp->alg_idx; + + return rc; +} + +int +otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t flowkey_cfg; + uint8_t alg_idx; + int rc; + + rc = -EINVAL; + + if (rss_conf->rss_key && rss_conf->rss_key_len != NIX_HASH_KEY_SIZE) { + otx2_err("Hash key size mismatch %d vs %d", + rss_conf->rss_key_len, NIX_HASH_KEY_SIZE); + goto fail; + } + + if (rss_conf->rss_key) + otx2_nix_rss_set_key(dev, rss_conf->rss_key, + (uint32_t)rss_conf->rss_key_len); + + flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_conf->rss_hf, 0); + + rc = otx2_rss_set_hf(dev, flowkey_cfg, &alg_idx, + NIX_DEFAULT_RSS_CTX_GROUP, + NIX_DEFAULT_RSS_MCAM_IDX); + if (rc) { + otx2_err("Failed to set RSS hash function rc=%d", rc); + return rc; + } + + dev->rss_info.alg_idx = alg_idx; + +fail: + return rc; +} + +int +otx2_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (rss_conf->rss_key) + rss_get_key(dev, rss_conf->rss_key); + + rss_conf->rss_key_len = NIX_HASH_KEY_SIZE; + rss_conf->rss_hf = dev->rss_info.nix_rss; + + return 0; +} + +int +otx2_nix_rss_config(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t idx, qcnt = eth_dev->data->nb_rx_queues; + uint32_t flowkey_cfg; + uint64_t rss_hf; + uint8_t alg_idx; + int rc; + + /* Skip further configuration if selected mode is not RSS */ + if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt) + return 0; + + /* Update default RSS key and cfg */ + otx2_nix_rss_set_key(dev, NULL, 0); + + /* Update default RSS RETA */ + for (idx = 0; idx < dev->rss_info.rss_size; idx++) + dev->rss_info.ind_tbl[idx] = idx % qcnt; + + /* Init RSS table context */ + rc = otx2_nix_rss_tbl_init(dev, 0, dev->rss_info.ind_tbl); + if (rc) { + otx2_err("Failed to init RSS table rc=%d", rc); + return rc; + } + + rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, 0); + + rc = otx2_rss_set_hf(dev, flowkey_cfg, &alg_idx, + NIX_DEFAULT_RSS_CTX_GROUP, + NIX_DEFAULT_RSS_MCAM_IDX); + if (rc) { + otx2_err("Failed to set RSS hash function rc=%d", rc); + return rc; + } + + dev->rss_info.alg_idx = alg_idx; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c new file mode 100644 index 000000000..ac40704b6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_ethdev.h" +#include "otx2_rx.h" + +#define NIX_DESCS_PER_LOOP 4 +#define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x)) +#define CQE_SZ(x) ((x) * NIX_CQ_ENTRY_SZ) + +static inline uint16_t +nix_rx_nb_pkts(struct otx2_eth_rxq *rxq, const uint64_t wdata, + const uint16_t pkts, const uint32_t qmask) +{ + uint32_t available = rxq->available; + + /* Update the available count if cached value is not enough */ + if (unlikely(available < pkts)) { + uint64_t reg, head, tail; + + /* Use LDADDA version to avoid reorder */ + reg = otx2_atomic64_add_sync(wdata, rxq->cq_status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(CQ_OP_STAT_OP_ERR) || + reg & BIT_ULL(CQ_OP_STAT_CQ_ERR)) + return 0; + + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + rxq->available = available; + } + + return RTE_MIN(pkts, available); +} + +static __rte_always_inline uint16_t +nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t pkts, const uint16_t flags) +{ + struct otx2_eth_rxq *rxq = rx_queue; + const uint64_t mbuf_init = rxq->mbuf_initializer; + const void *lookup_mem = rxq->lookup_mem; + const uint64_t data_off = rxq->data_off; + const uintptr_t desc = rxq->desc; + const uint64_t wdata = rxq->wdata; + const uint32_t qmask = rxq->qmask; + uint16_t packets = 0, nb_pkts; + uint32_t head = rxq->head; + struct nix_cqe_hdr_s *cq; + struct rte_mbuf *mbuf; + + nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask); + + while (packets < nb_pkts) { + /* Prefetch N desc ahead */ + rte_prefetch_non_temporal((void *)(desc + + (CQE_SZ((head + 2) & qmask)))); + cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head)); + + mbuf = nix_get_mbuf_from_cqe(cq, data_off); + + otx2_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init, + flags); + otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags, + (uint64_t *)((uint8_t *)mbuf + data_off)); + rx_pkts[packets++] = mbuf; + otx2_prefetch_store_keep(mbuf); + head++; + head &= qmask; + } + + rxq->head = head; + rxq->available -= nb_pkts; + + /* Free all the CQs that we've processed */ + otx2_write64((wdata | nb_pkts), rxq->cq_door); + + return nb_pkts; +} + +#if defined(RTE_ARCH_ARM64) + +static __rte_always_inline uint64_t +nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f) +{ + if (w2 & BIT_ULL(21) /* vtag0_gone */) { + ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5); + } + + return ol_flags; +} + +static __rte_always_inline uint64_t +nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf) +{ + if (w2 & BIT_ULL(23) /* vtag1_gone */) { + ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED; + mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48); + } + + return ol_flags; +} + +static __rte_always_inline uint16_t +nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t pkts, const uint16_t flags) +{ + struct otx2_eth_rxq *rxq = rx_queue; uint16_t packets = 0; + uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23; + const uint64_t mbuf_initializer = rxq->mbuf_initializer; + const uint64x2_t data_off = vdupq_n_u64(rxq->data_off); + uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3; + uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer); + uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer); + uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer); + uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer); + struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; + const uint16_t *lookup_mem = rxq->lookup_mem; + const uint32_t qmask = rxq->qmask; + const uint64_t wdata = rxq->wdata; + const uintptr_t desc = rxq->desc; + uint8x16_t f0, f1, f2, f3; + uint32_t head = rxq->head; + uint16_t pkts_left; + + pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask); + pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1); + + /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */ + pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP); + + while (packets < pkts) { + /* Exit loop if head is about to wrap and become unaligned */ + if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) < + NIX_DESCS_PER_LOOP) { + pkts_left += (pkts - packets); + break; + } + + const uintptr_t cq0 = desc + CQE_SZ(head); + + /* Prefetch N desc ahead */ + rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8))); + rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9))); + rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10))); + rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11))); + + /* Get NIX_RX_SG_S for size and buffer pointer */ + cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64)); + cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64)); + cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64)); + cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64)); + + /* Extract mbuf from NIX_RX_SG_S */ + mbuf01 = vzip2q_u64(cq0_w8, cq1_w8); + mbuf23 = vzip2q_u64(cq2_w8, cq3_w8); + mbuf01 = vqsubq_u64(mbuf01, data_off); + mbuf23 = vqsubq_u64(mbuf23, data_off); + + /* Move mbufs to scalar registers for future use */ + mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0); + mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1); + mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0); + mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1); + + /* Mask to get packet len from NIX_RX_SG_S */ + const uint8x16_t shuf_msk = { + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0, 1, /* octet 1~0, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 0, 1, /* octet 1~0, 16 bits data_len */ + 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF + }; + + /* Form the rx_descriptor_fields1 with pkt_len and data_len */ + f0 = vqtbl1q_u8(cq0_w8, shuf_msk); + f1 = vqtbl1q_u8(cq1_w8, shuf_msk); + f2 = vqtbl1q_u8(cq2_w8, shuf_msk); + f3 = vqtbl1q_u8(cq3_w8, shuf_msk); + + /* Load CQE word0 and word 1 */ + uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0]; + uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1]; + uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0]; + uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1]; + uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0]; + uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1]; + uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0]; + uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1]; + + if (flags & NIX_RX_OFFLOAD_RSS_F) { + /* Fill rss in the rx_descriptor_fields1 */ + f0 = vsetq_lane_u32(cq0_w0, f0, 3); + f1 = vsetq_lane_u32(cq1_w0, f1, 3); + f2 = vsetq_lane_u32(cq2_w0, f2, 3); + f3 = vsetq_lane_u32(cq3_w0, f3, 3); + ol_flags0 = PKT_RX_RSS_HASH; + ol_flags1 = PKT_RX_RSS_HASH; + ol_flags2 = PKT_RX_RSS_HASH; + ol_flags3 = PKT_RX_RSS_HASH; + } else { + ol_flags0 = 0; ol_flags1 = 0; + ol_flags2 = 0; ol_flags3 = 0; + } + + if (flags & NIX_RX_OFFLOAD_PTYPE_F) { + /* Fill packet_type in the rx_descriptor_fields1 */ + f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1), + f0, 0); + f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1), + f1, 0); + f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1), + f2, 0); + f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1), + f3, 0); + } + + if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) { + ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1); + ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1); + ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1); + ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1); + } + + if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) { + uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16); + uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16); + uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16); + uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16); + + ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0); + ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1); + ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2); + ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3); + + ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0); + ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1); + ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2); + ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3); + } + + if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) { + ol_flags0 = nix_update_match_id(*(uint16_t *) + (cq0 + CQE_SZ(0) + 38), ol_flags0, mbuf0); + ol_flags1 = nix_update_match_id(*(uint16_t *) + (cq0 + CQE_SZ(1) + 38), ol_flags1, mbuf1); + ol_flags2 = nix_update_match_id(*(uint16_t *) + (cq0 + CQE_SZ(2) + 38), ol_flags2, mbuf2); + ol_flags3 = nix_update_match_id(*(uint16_t *) + (cq0 + CQE_SZ(3) + 38), ol_flags3, mbuf3); + } + + /* Form rearm_data with ol_flags */ + rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1); + rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1); + rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1); + rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1); + + /* Update rx_descriptor_fields1 */ + vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0); + vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1); + vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2); + vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3); + + /* Update rearm_data */ + vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0); + vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1); + vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2); + vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3); + + /* Store the mbufs to rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01); + vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23); + + /* Prefetch mbufs */ + otx2_prefetch_store_keep(mbuf0); + otx2_prefetch_store_keep(mbuf1); + otx2_prefetch_store_keep(mbuf2); + otx2_prefetch_store_keep(mbuf3); + + /* Mark mempool obj as "get" as it is alloc'ed by NIX */ + __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1); + __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1); + __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1); + __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1); + + /* Advance head pointer and packets */ + head += NIX_DESCS_PER_LOOP; head &= qmask; + packets += NIX_DESCS_PER_LOOP; + } + + rxq->head = head; + rxq->available -= packets; + + rte_cio_wmb(); + /* Free all the CQs that we've processed */ + otx2_write64((rxq->wdata | packets), rxq->cq_door); + + if (unlikely(pkts_left)) + packets += nix_recv_pkts(rx_queue, &rx_pkts[packets], + pkts_left, flags); + + return packets; +} + +#else + +static inline uint16_t +nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t pkts, const uint16_t flags) +{ + RTE_SET_USED(rx_queue); + RTE_SET_USED(rx_pkts); + RTE_SET_USED(pkts); + RTE_SET_USED(flags); + + return 0; +} + +#endif + +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_recv_pkts_ ## name(void *rx_queue, \ + struct rte_mbuf **rx_pkts, uint16_t pkts) \ +{ \ + return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \ +} \ + \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue, \ + struct rte_mbuf **rx_pkts, uint16_t pkts) \ +{ \ + return nix_recv_pkts(rx_queue, rx_pkts, pkts, \ + (flags) | NIX_RX_MULTI_SEG_F); \ +} \ + \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_recv_pkts_vec_ ## name(void *rx_queue, \ + struct rte_mbuf **rx_pkts, uint16_t pkts) \ +{ \ + /* TSTMP is not supported by vector */ \ + if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F) \ + return 0; \ + return nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, (flags)); \ +} \ + +NIX_RX_FASTPATH_MODES +#undef R + +static inline void +pick_rx_func(struct rte_eth_dev *eth_dev, + const eth_rx_burst_t rx_burst[2][2][2][2][2][2][2]) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* [SEC] [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */ + eth_dev->rx_pkt_burst = rx_burst + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)] + [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)]; +} + +void +otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_ ## name, + +NIX_RX_FASTPATH_MODES +#undef R + }; + + const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_mseg_ ## name, + +NIX_RX_FASTPATH_MODES +#undef R + }; + + const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_vec_ ## name, + +NIX_RX_FASTPATH_MODES +#undef R + }; + + /* For PTP enabled, scalar rx function should be chosen as most of the + * PTP apps are implemented to rx burst 1 pkt. + */ + if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) + pick_rx_func(eth_dev, nix_eth_rx_burst); + else + pick_rx_func(eth_dev, nix_eth_rx_vec_burst); + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + pick_rx_func(eth_dev, nix_eth_rx_burst_mseg); + + /* Copy multi seg version with no offload for tear down sequence */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + dev->rx_pkt_burst_no_offload = + nix_eth_rx_burst_mseg[0][0][0][0][0][0][0]; + rte_mb(); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h new file mode 100644 index 000000000..d8648b692 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_RX_H__ +#define __OTX2_RX_H__ + +#include + +#include "otx2_common.h" +#include "otx2_ethdev_sec.h" +#include "otx2_ipsec_fp.h" + +/* Default mark value used when none is provided. */ +#define OTX2_FLOW_ACTION_FLAG_DEFAULT 0xffff + +#define PTYPE_NON_TUNNEL_WIDTH 16 +#define PTYPE_TUNNEL_WIDTH 12 +#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH) +#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH) +#define PTYPE_ARRAY_SZ ((PTYPE_NON_TUNNEL_ARRAY_SZ +\ + PTYPE_TUNNEL_ARRAY_SZ) *\ + sizeof(uint16_t)) + +#define NIX_RX_OFFLOAD_NONE (0) +#define NIX_RX_OFFLOAD_RSS_F BIT(0) +#define NIX_RX_OFFLOAD_PTYPE_F BIT(1) +#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2) +#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(3) +#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(4) +#define NIX_RX_OFFLOAD_TSTAMP_F BIT(5) +#define NIX_RX_OFFLOAD_SECURITY_F BIT(6) + +/* Flags to control cqe_to_mbuf conversion function. + * Defining it from backwards to denote its been + * not used as offload flags to pick function + */ +#define NIX_RX_MULTI_SEG_F BIT(15) +#define NIX_TIMESYNC_RX_OFFSET 8 + +/* Inline IPsec offsets */ + +#define INLINE_INB_RPTR_HDR 16 +/* nix_cqe_hdr_s + nix_rx_parse_s + nix_rx_sg_s + nix_iova_s */ +#define INLINE_CPT_RESULT_OFFSET 80 + +struct otx2_timesync_info { + uint64_t rx_tstamp; + rte_iova_t tx_tstamp_iova; + uint64_t *tx_tstamp; + uint8_t tx_ready; + uint8_t rx_ready; +} __rte_cache_aligned; + +union mbuf_initializer { + struct { + uint16_t data_off; + uint16_t refcnt; + uint16_t nb_segs; + uint16_t port; + } fields; + uint64_t value; +}; + +static __rte_always_inline void +otx2_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf, + struct otx2_timesync_info *tstamp, const uint16_t flag, + uint64_t *tstamp_ptr) +{ + if ((flag & NIX_RX_OFFLOAD_TSTAMP_F) && + (mbuf->data_off == RTE_PKTMBUF_HEADROOM + + NIX_TIMESYNC_RX_OFFSET)) { + + mbuf->pkt_len -= NIX_TIMESYNC_RX_OFFSET; + + /* Reading the rx timestamp inserted by CGX, viz at + * starting of the packet data. + */ + mbuf->timestamp = rte_be_to_cpu_64(*tstamp_ptr); + /* PKT_RX_IEEE1588_TMST flag needs to be set only in case + * PTP packets are received. + */ + if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) { + tstamp->rx_tstamp = mbuf->timestamp; + tstamp->rx_ready = 1; + mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | + PKT_RX_IEEE1588_TMST | PKT_RX_TIMESTAMP; + } + } +} + +static __rte_always_inline uint64_t +nix_clear_data_off(uint64_t oldval) +{ + union mbuf_initializer mbuf_init = { .value = oldval }; + + mbuf_init.fields.data_off = 0; + return mbuf_init.value; +} + +static __rte_always_inline struct rte_mbuf * +nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off) +{ + rte_iova_t buff; + + /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */ + buff = *((rte_iova_t *)((uint64_t *)cq + 9)); + return (struct rte_mbuf *)(buff - data_off); +} + + +static __rte_always_inline uint32_t +nix_ptype_get(const void * const lookup_mem, const uint64_t in) +{ + const uint16_t * const ptype = lookup_mem; + const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52; + const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36]; + const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf]; + + return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2; +} + +static __rte_always_inline uint32_t +nix_rx_olflags_get(const void * const lookup_mem, const uint64_t in) +{ + const uint32_t * const ol_flags = (const uint32_t *) + ((const uint8_t *)lookup_mem + PTYPE_ARRAY_SZ); + + return ol_flags[(in & 0xfff00000) >> 20]; +} + +static inline uint64_t +nix_update_match_id(const uint16_t match_id, uint64_t ol_flags, + struct rte_mbuf *mbuf) +{ + /* There is no separate bit to check match_id + * is valid or not? and no flag to identify it is an + * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK + * action. The former case addressed through 0 being invalid + * value and inc/dec match_id pair when MARK is activated. + * The later case addressed through defining + * OTX2_FLOW_MARK_DEFAULT as value for + * RTE_FLOW_ACTION_TYPE_MARK. + * This would translate to not use + * OTX2_FLOW_ACTION_FLAG_DEFAULT - 1 and + * OTX2_FLOW_ACTION_FLAG_DEFAULT for match_id. + * i.e valid mark_id's are from + * 0 to OTX2_FLOW_ACTION_FLAG_DEFAULT - 2 + */ + if (likely(match_id)) { + ol_flags |= PKT_RX_FDIR; + if (match_id != OTX2_FLOW_ACTION_FLAG_DEFAULT) { + ol_flags |= PKT_RX_FDIR_ID; + mbuf->hash.fdir.hi = match_id - 1; + } + } + + return ol_flags; +} + +static __rte_always_inline void +nix_cqe_xtract_mseg(const struct nix_rx_parse_s *rx, + struct rte_mbuf *mbuf, uint64_t rearm) +{ + const rte_iova_t *iova_list; + struct rte_mbuf *head; + const rte_iova_t *eol; + uint8_t nb_segs; + uint64_t sg; + + sg = *(const uint64_t *)(rx + 1); + nb_segs = (sg >> 48) & 0x3; + mbuf->nb_segs = nb_segs; + mbuf->data_len = sg & 0xFFFF; + sg = sg >> 16; + + eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1)); + /* Skip SG_S and first IOVA*/ + iova_list = ((const rte_iova_t *)(rx + 1)) + 2; + nb_segs--; + + rearm = rearm & ~0xFFFF; + + head = mbuf; + while (nb_segs) { + mbuf->next = ((struct rte_mbuf *)*iova_list) - 1; + mbuf = mbuf->next; + + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1); + + mbuf->data_len = sg & 0xFFFF; + sg = sg >> 16; + *(uint64_t *)(&mbuf->rearm_data) = rearm; + nb_segs--; + iova_list++; + + if (!nb_segs && (iova_list + 1 < eol)) { + sg = *(const uint64_t *)(iova_list); + nb_segs = (sg >> 48) & 0x3; + head->nb_segs += nb_segs; + iova_list = (const rte_iova_t *)(iova_list + 1); + } + } +} + +static __rte_always_inline uint16_t +nix_rx_sec_cptres_get(const void *cq) +{ + volatile const struct otx2_cpt_res *res; + + res = (volatile const struct otx2_cpt_res *)((const char *)cq + + INLINE_CPT_RESULT_OFFSET); + + return res->u16[0]; +} + +static __rte_always_inline void * +nix_rx_sec_sa_get(const void * const lookup_mem, int spi, uint16_t port) +{ + const uint64_t *const *sa_tbl = (const uint64_t * const *) + ((const uint8_t *)lookup_mem + OTX2_NIX_SA_TBL_START); + + return (void *)sa_tbl[port][spi]; +} + +static __rte_always_inline uint64_t +nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m, + const void * const lookup_mem) +{ + struct otx2_ipsec_fp_in_sa *sa; + struct rte_ipv4_hdr *ipv4; + uint16_t m_len; + uint32_t spi; + char *data; + + if (unlikely(nix_rx_sec_cptres_get(cq) != OTX2_SEC_COMP_GOOD)) + return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED; + + /* 20 bits of tag would have the SPI */ + spi = cq->tag & 0xFFFFF; + + sa = nix_rx_sec_sa_get(lookup_mem, spi, m->port); + m->udata64 = (uint64_t)sa->userdata; + + data = rte_pktmbuf_mtod(m, char *); + memcpy(data + INLINE_INB_RPTR_HDR, data, RTE_ETHER_HDR_LEN); + + m->data_off += INLINE_INB_RPTR_HDR; + + ipv4 = (struct rte_ipv4_hdr *)(data + INLINE_INB_RPTR_HDR + + RTE_ETHER_HDR_LEN); + + m_len = rte_be_to_cpu_16(ipv4->total_length) + RTE_ETHER_HDR_LEN; + + m->data_len = m_len; + m->pkt_len = m_len; + return PKT_RX_SEC_OFFLOAD; +} + +static __rte_always_inline void +otx2_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, + struct rte_mbuf *mbuf, const void *lookup_mem, + const uint64_t val, const uint16_t flag) +{ + const struct nix_rx_parse_s *rx = + (const struct nix_rx_parse_s *)((const uint64_t *)cq + 1); + const uint64_t w1 = *(const uint64_t *)rx; + const uint16_t len = rx->pkt_lenm1 + 1; + uint64_t ol_flags = 0; + + /* Mark mempool obj as "get" as it is alloc'ed by NIX */ + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1); + + if (flag & NIX_RX_OFFLOAD_PTYPE_F) + mbuf->packet_type = nix_ptype_get(lookup_mem, w1); + else + mbuf->packet_type = 0; + + if (flag & NIX_RX_OFFLOAD_RSS_F) { + mbuf->hash.rss = tag; + ol_flags |= PKT_RX_RSS_HASH; + } + + if (flag & NIX_RX_OFFLOAD_CHECKSUM_F) + ol_flags |= nix_rx_olflags_get(lookup_mem, w1); + + if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) { + if (rx->vtag0_gone) { + ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mbuf->vlan_tci = rx->vtag0_tci; + } + if (rx->vtag1_gone) { + ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED; + mbuf->vlan_tci_outer = rx->vtag1_tci; + } + } + + if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F) + ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf); + + if ((flag & NIX_RX_OFFLOAD_SECURITY_F) && + cq->cqe_type == NIX_XQE_TYPE_RX_IPSECH) { + *(uint64_t *)(&mbuf->rearm_data) = val; + ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, lookup_mem); + mbuf->ol_flags = ol_flags; + return; + } + + mbuf->ol_flags = ol_flags; + *(uint64_t *)(&mbuf->rearm_data) = val; + mbuf->pkt_len = len; + + if (flag & NIX_RX_MULTI_SEG_F) + nix_cqe_xtract_mseg(rx, mbuf, val); + else + mbuf->data_len = len; +} + +#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F +#define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F +#define RSS_F NIX_RX_OFFLOAD_RSS_F +#define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F +#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F +#define TS_F NIX_RX_OFFLOAD_TSTAMP_F +#define RX_SEC_F NIX_RX_OFFLOAD_SECURITY_F + +/* [SEC] [TSMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */ +#define NIX_RX_FASTPATH_MODES \ +R(no_offload, 0, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \ +R(rss, 0, 0, 0, 0, 0, 0, 1, RSS_F) \ +R(ptype, 0, 0, 0, 0, 0, 1, 0, PTYPE_F) \ +R(ptype_rss, 0, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \ +R(cksum, 0, 0, 0, 0, 1, 0, 0, CKSUM_F) \ +R(cksum_rss, 0, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \ +R(cksum_ptype, 0, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \ +R(cksum_ptype_rss, 0, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F)\ +R(vlan, 0, 0, 0, 1, 0, 0, 0, RX_VLAN_F) \ +R(vlan_rss, 0, 0, 0, 1, 0, 0, 1, RX_VLAN_F | RSS_F) \ +R(vlan_ptype, 0, 0, 0, 1, 0, 1, 0, RX_VLAN_F | PTYPE_F) \ +R(vlan_ptype_rss, 0, 0, 0, 1, 0, 1, 1, \ + RX_VLAN_F | PTYPE_F | RSS_F) \ +R(vlan_cksum, 0, 0, 0, 1, 1, 0, 0, RX_VLAN_F | CKSUM_F) \ +R(vlan_cksum_rss, 0, 0, 0, 1, 1, 0, 1, \ + RX_VLAN_F | CKSUM_F | RSS_F) \ +R(vlan_cksum_ptype, 0, 0, 0, 1, 1, 1, 0, \ + RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(vlan_cksum_ptype_rss, 0, 0, 0, 1, 1, 1, 1, \ + RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(mark, 0, 0, 1, 0, 0, 0, 0, MARK_F) \ +R(mark_rss, 0, 0, 1, 0, 0, 0, 1, MARK_F | RSS_F) \ +R(mark_ptype, 0, 0, 1, 0, 0, 1, 0, MARK_F | PTYPE_F) \ +R(mark_ptype_rss, 0, 0, 1, 0, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \ +R(mark_cksum, 0, 0, 1, 0, 1, 0, 0, MARK_F | CKSUM_F) \ +R(mark_cksum_rss, 0, 0, 1, 0, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \ +R(mark_cksum_ptype, 0, 0, 1, 0, 1, 1, 0, \ + MARK_F | CKSUM_F | PTYPE_F) \ +R(mark_cksum_ptype_rss, 0, 0, 1, 0, 1, 1, 1, \ + MARK_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(mark_vlan, 0, 0, 1, 1, 0, 0, 0, MARK_F | RX_VLAN_F) \ +R(mark_vlan_rss, 0, 0, 1, 1, 0, 0, 1, \ + MARK_F | RX_VLAN_F | RSS_F) \ +R(mark_vlan_ptype, 0, 0, 1, 1, 0, 1, 0, \ + MARK_F | RX_VLAN_F | PTYPE_F) \ +R(mark_vlan_ptype_rss, 0, 0, 1, 1, 0, 1, 1, \ + MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(mark_vlan_cksum, 0, 0, 1, 1, 1, 0, 0, \ + MARK_F | RX_VLAN_F | CKSUM_F) \ +R(mark_vlan_cksum_rss, 0, 0, 1, 1, 1, 0, 1, \ + MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \ +R(mark_vlan_cksum_ptype, 0, 0, 1, 1, 1, 1, 0, \ + MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(mark_vlan_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, 1, \ + MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(ts, 0, 1, 0, 0, 0, 0, 0, TS_F) \ +R(ts_rss, 0, 1, 0, 0, 0, 0, 1, TS_F | RSS_F) \ +R(ts_ptype, 0, 1, 0, 0, 0, 1, 0, TS_F | PTYPE_F) \ +R(ts_ptype_rss, 0, 1, 0, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \ +R(ts_cksum, 0, 1, 0, 0, 1, 0, 0, TS_F | CKSUM_F) \ +R(ts_cksum_rss, 0, 1, 0, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \ +R(ts_cksum_ptype, 0, 1, 0, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \ +R(ts_cksum_ptype_rss, 0, 1, 0, 0, 1, 1, 1, \ + TS_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(ts_vlan, 0, 1, 0, 1, 0, 0, 0, TS_F | RX_VLAN_F) \ +R(ts_vlan_rss, 0, 1, 0, 1, 0, 0, 1, TS_F | RX_VLAN_F | RSS_F) \ +R(ts_vlan_ptype, 0, 1, 0, 1, 0, 1, 0, \ + TS_F | RX_VLAN_F | PTYPE_F) \ +R(ts_vlan_ptype_rss, 0, 1, 0, 1, 0, 1, 1, \ + TS_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(ts_vlan_cksum, 0, 1, 0, 1, 1, 0, 0, \ + TS_F | RX_VLAN_F | CKSUM_F) \ +R(ts_vlan_cksum_rss, 0, 1, 0, 1, 1, 0, 1, \ + MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \ +R(ts_vlan_cksum_ptype, 0, 1, 0, 1, 1, 1, 0, \ + TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(ts_vlan_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, 1, \ + TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(ts_mark, 0, 1, 1, 0, 0, 0, 0, TS_F | MARK_F) \ +R(ts_mark_rss, 0, 1, 1, 0, 0, 0, 1, TS_F | MARK_F | RSS_F) \ +R(ts_mark_ptype, 0, 1, 1, 0, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \ +R(ts_mark_ptype_rss, 0, 1, 1, 0, 0, 1, 1, \ + TS_F | MARK_F | PTYPE_F | RSS_F) \ +R(ts_mark_cksum, 0, 1, 1, 0, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \ +R(ts_mark_cksum_rss, 0, 1, 1, 0, 1, 0, 1, \ + TS_F | MARK_F | CKSUM_F | RSS_F) \ +R(ts_mark_cksum_ptype, 0, 1, 1, 0, 1, 1, 0, \ + TS_F | MARK_F | CKSUM_F | PTYPE_F) \ +R(ts_mark_cksum_ptype_rss, 0, 1, 1, 0, 1, 1, 1, \ + TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(ts_mark_vlan, 0, 1, 1, 1, 0, 0, 0, TS_F | MARK_F | RX_VLAN_F)\ +R(ts_mark_vlan_rss, 0, 1, 1, 1, 0, 0, 1, \ + TS_F | MARK_F | RX_VLAN_F | RSS_F) \ +R(ts_mark_vlan_ptype, 0, 1, 1, 1, 0, 1, 0, \ + TS_F | MARK_F | RX_VLAN_F | PTYPE_F) \ +R(ts_mark_vlan_ptype_rss, 0, 1, 1, 1, 0, 1, 1, \ + TS_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(ts_mark_vlan_cksum_ptype, 0, 1, 1, 1, 1, 1, 0, \ + TS_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(ts_mark_vlan_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, 1, \ + TS_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec, 1, 0, 0, 0, 0, 0, 0, RX_SEC_F) \ +R(sec_rss, 1, 0, 0, 0, 0, 0, 1, RX_SEC_F | RSS_F) \ +R(sec_ptype, 1, 0, 0, 0, 0, 1, 0, RX_SEC_F | PTYPE_F) \ +R(sec_ptype_rss, 1, 0, 0, 0, 0, 1, 1, \ + RX_SEC_F | PTYPE_F | RSS_F) \ +R(sec_cksum, 1, 0, 0, 0, 1, 0, 0, RX_SEC_F | CKSUM_F) \ +R(sec_cksum_rss, 1, 0, 0, 0, 1, 0, 1, \ + RX_SEC_F | CKSUM_F | RSS_F) \ +R(sec_cksum_ptype, 1, 0, 0, 0, 1, 1, 0, \ + RX_SEC_F | CKSUM_F | PTYPE_F) \ +R(sec_cksum_ptype_rss, 1, 0, 0, 0, 1, 1, 1, \ + RX_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec_vlan, 1, 0, 0, 1, 0, 0, 0, RX_SEC_F | RX_VLAN_F) \ +R(sec_vlan_rss, 1, 0, 0, 1, 0, 0, 1, \ + RX_SEC_F | RX_VLAN_F | RSS_F) \ +R(sec_vlan_ptype, 1, 0, 0, 1, 0, 1, 0, \ + RX_SEC_F | RX_VLAN_F | PTYPE_F) \ +R(sec_vlan_ptype_rss, 1, 0, 0, 1, 0, 1, 1, \ + RX_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(sec_vlan_cksum, 1, 0, 0, 1, 1, 0, 0, \ + RX_SEC_F | RX_VLAN_F | CKSUM_F) \ +R(sec_vlan_cksum_rss, 1, 0, 0, 1, 1, 0, 1, \ + RX_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \ +R(sec_vlan_cksum_ptype, 1, 0, 0, 1, 1, 1, 0, \ + RX_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(sec_vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, 1, \ + RX_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec_mark, 1, 0, 1, 0, 0, 0, 0, RX_SEC_F | MARK_F) \ +R(sec_mark_rss, 1, 0, 1, 0, 0, 0, 1, RX_SEC_F | MARK_F | RSS_F)\ +R(sec_mark_ptype, 1, 0, 1, 0, 0, 1, 0, \ + RX_SEC_F | MARK_F | PTYPE_F) \ +R(sec_mark_ptype_rss, 1, 0, 1, 0, 0, 1, 1, \ + RX_SEC_F | MARK_F | PTYPE_F | RSS_F) \ +R(sec_mark_cksum, 1, 0, 1, 0, 1, 0, 0, \ + RX_SEC_F | MARK_F | CKSUM_F) \ +R(sec_mark_cksum_rss, 1, 0, 1, 0, 1, 0, 1, \ + RX_SEC_F | MARK_F | CKSUM_F | RSS_F) \ +R(sec_mark_cksum_ptype, 1, 0, 1, 0, 1, 1, 0, \ + RX_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \ +R(sec_mark_cksum_ptype_rss, 1, 0, 1, 0, 1, 1, 1, \ + RX_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec_mark_vlan, 1, 0, 1, 1, 0, 0, 0, RX_SEC_F | RX_VLAN_F) \ +R(sec_mark_vlan_rss, 1, 0, 1, 1, 0, 0, 1, \ + RX_SEC_F | MARK_F | RX_VLAN_F | RSS_F) \ +R(sec_mark_vlan_ptype, 1, 0, 1, 1, 0, 1, 0, \ + RX_SEC_F | MARK_F | RX_VLAN_F | PTYPE_F) \ +R(sec_mark_vlan_ptype_rss, 1, 0, 1, 1, 0, 1, 1, \ + RX_SEC_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(sec_mark_vlan_cksum, 1, 0, 1, 1, 1, 0, 0, \ + RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F) \ +R(sec_mark_vlan_cksum_rss, 1, 0, 1, 1, 1, 0, 1, \ + RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \ +R(sec_mark_vlan_cksum_ptype, 1, 0, 1, 1, 1, 1, 0, \ + RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(sec_mark_vlan_cksum_ptype_rss, \ + 1, 0, 1, 1, 1, 1, 1, \ + RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | \ + RSS_F) \ +R(sec_ts, 1, 1, 0, 0, 0, 0, 0, RX_SEC_F | TS_F) \ +R(sec_ts_rss, 1, 1, 0, 0, 0, 0, 1, RX_SEC_F | TS_F | RSS_F) \ +R(sec_ts_ptype, 1, 1, 0, 0, 0, 1, 0, RX_SEC_F | TS_F | PTYPE_F)\ +R(sec_ts_ptype_rss, 1, 1, 0, 0, 0, 1, 1, \ + RX_SEC_F | TS_F | PTYPE_F | RSS_F) \ +R(sec_ts_cksum, 1, 1, 0, 0, 1, 0, 0, RX_SEC_F | TS_F | CKSUM_F)\ +R(sec_ts_cksum_rss, 1, 1, 0, 0, 1, 0, 1, \ + RX_SEC_F | TS_F | CKSUM_F | RSS_F) \ +R(sec_ts_cksum_ptype, 1, 1, 0, 0, 1, 1, 0, \ + RX_SEC_F | CKSUM_F | PTYPE_F) \ +R(sec_ts_cksum_ptype_rss, 1, 1, 0, 0, 1, 1, 1, \ + RX_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec_ts_vlan, 1, 1, 0, 1, 0, 0, 0, \ + RX_SEC_F | TS_F | RX_VLAN_F) \ +R(sec_ts_vlan_rss, 1, 1, 0, 1, 0, 0, 1, \ + RX_SEC_F | TS_F | RX_VLAN_F | RSS_F) \ +R(sec_ts_vlan_ptype, 1, 1, 0, 1, 0, 1, 0, \ + RX_SEC_F | TS_F | RX_VLAN_F | PTYPE_F) \ +R(sec_ts_vlan_ptype_rss, 1, 1, 0, 1, 0, 1, 1, \ + RX_SEC_F | TS_F | RX_VLAN_F | PTYPE_F | RSS_F) \ +R(sec_ts_vlan_cksum, 1, 1, 0, 1, 1, 0, 0, \ + RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F) \ +R(sec_ts_vlan_cksum_rss, 1, 1, 0, 1, 1, 0, 1, \ + RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | RSS_F) \ +R(sec_ts_vlan_cksum_ptype, 1, 1, 0, 1, 1, 1, 0, \ + RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \ +R(sec_ts_vlan_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, 1, \ + RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F | \ + RSS_F) \ +R(sec_ts_mark, 1, 1, 1, 0, 0, 0, 0, RX_SEC_F | TS_F | MARK_F) \ +R(sec_ts_mark_rss, 1, 1, 1, 0, 0, 0, 1, \ + RX_SEC_F | TS_F | MARK_F | RSS_F) \ +R(sec_ts_mark_ptype, 1, 1, 1, 0, 0, 1, 0, \ + RX_SEC_F | TS_F | MARK_F | PTYPE_F) \ +R(sec_ts_mark_ptype_rss, 1, 1, 1, 0, 0, 1, 1, \ + RX_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \ +R(sec_ts_mark_cksum, 1, 1, 1, 0, 1, 0, 0, \ + RX_SEC_F | TS_F | MARK_F | CKSUM_F) \ +R(sec_ts_mark_cksum_rss, 1, 1, 1, 0, 1, 0, 1, \ + RX_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \ +R(sec_ts_mark_cksum_ptype, 1, 1, 1, 0, 1, 1, 0, \ + RX_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \ +R(sec_ts_mark_cksum_ptype_rss, 1, 1, 1, 0, 1, 1, 1, \ + RX_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \ +R(sec_ts_mark_vlan, 1, 1, 1, 1, 0, 0, 0, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F) \ +R(sec_ts_mark_vlan_rss, 1, 1, 1, 1, 0, 0, 1, \ + RX_SEC_F | RX_VLAN_F | RSS_F) \ +R(sec_ts_mark_vlan_ptype, 1, 1, 1, 1, 0, 1, 0, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | PTYPE_F) \ +R(sec_ts_mark_vlan_ptype_rss, 1, 1, 1, 1, 0, 1, 1, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F)\ +R(sec_ts_mark_vlan_cksum, 1, 1, 1, 1, 1, 0, 0, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F) \ +R(sec_ts_mark_vlan_cksum_rss, 1, 1, 1, 1, 1, 0, 1, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | RSS_F)\ +R(sec_ts_mark_vlan_cksum_ptype, 1, 1, 1, 1, 1, 1, 0, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | \ + PTYPE_F) \ +R(sec_ts_mark_vlan_cksum_ptype_rss, \ + 1, 1, 1, 1, 1, 1, 1, \ + RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | \ + PTYPE_F | RSS_F) +#endif /* __OTX2_RX_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c new file mode 100644 index 000000000..8aaf270a7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c @@ -0,0 +1,396 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_ethdev.h" + +struct otx2_nix_xstats_name { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset; +}; + +static const struct otx2_nix_xstats_name nix_tx_xstats[] = { + {"tx_ucast", NIX_STAT_LF_TX_TX_UCAST}, + {"tx_bcast", NIX_STAT_LF_TX_TX_BCAST}, + {"tx_mcast", NIX_STAT_LF_TX_TX_MCAST}, + {"tx_drop", NIX_STAT_LF_TX_TX_DROP}, + {"tx_octs", NIX_STAT_LF_TX_TX_OCTS}, +}; + +static const struct otx2_nix_xstats_name nix_rx_xstats[] = { + {"rx_octs", NIX_STAT_LF_RX_RX_OCTS}, + {"rx_ucast", NIX_STAT_LF_RX_RX_UCAST}, + {"rx_bcast", NIX_STAT_LF_RX_RX_BCAST}, + {"rx_mcast", NIX_STAT_LF_RX_RX_MCAST}, + {"rx_drop", NIX_STAT_LF_RX_RX_DROP}, + {"rx_drop_octs", NIX_STAT_LF_RX_RX_DROP_OCTS}, + {"rx_fcs", NIX_STAT_LF_RX_RX_FCS}, + {"rx_err", NIX_STAT_LF_RX_RX_ERR}, + {"rx_drp_bcast", NIX_STAT_LF_RX_RX_DRP_BCAST}, + {"rx_drp_mcast", NIX_STAT_LF_RX_RX_DRP_MCAST}, + {"rx_drp_l3bcast", NIX_STAT_LF_RX_RX_DRP_L3BCAST}, + {"rx_drp_l3mcast", NIX_STAT_LF_RX_RX_DRP_L3MCAST}, +}; + +static const struct otx2_nix_xstats_name nix_q_xstats[] = { + {"rq_op_re_pkts", NIX_LF_RQ_OP_RE_PKTS}, +}; + +#define OTX2_NIX_NUM_RX_XSTATS RTE_DIM(nix_rx_xstats) +#define OTX2_NIX_NUM_TX_XSTATS RTE_DIM(nix_tx_xstats) +#define OTX2_NIX_NUM_QUEUE_XSTATS RTE_DIM(nix_q_xstats) + +#define OTX2_NIX_NUM_XSTATS_REG (OTX2_NIX_NUM_RX_XSTATS + \ + OTX2_NIX_NUM_TX_XSTATS + OTX2_NIX_NUM_QUEUE_XSTATS) + +int +otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t reg, val; + uint32_t qidx, i; + int64_t *addr; + + stats->opackets = otx2_read64(dev->base + + NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_UCAST)); + stats->opackets += otx2_read64(dev->base + + NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_MCAST)); + stats->opackets += otx2_read64(dev->base + + NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_BCAST)); + stats->oerrors = otx2_read64(dev->base + + NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_DROP)); + stats->obytes = otx2_read64(dev->base + + NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_OCTS)); + + stats->ipackets = otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_UCAST)); + stats->ipackets += otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_MCAST)); + stats->ipackets += otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_BCAST)); + stats->imissed = otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_DROP)); + stats->ibytes = otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_OCTS)); + stats->ierrors = otx2_read64(dev->base + + NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_ERR)); + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + if (dev->txmap[i] & (1U << 31)) { + qidx = dev->txmap[i] & 0xFFFF; + reg = (((uint64_t)qidx) << 32); + + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_opackets[i] = val; + + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_obytes[i] = val; + + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_DROP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_errors[i] = val; + } + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + if (dev->rxmap[i] & (1U << 31)) { + qidx = dev->rxmap[i] & 0xFFFF; + reg = (((uint64_t)qidx) << 32); + + addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_ipackets[i] = val; + + addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_OCTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_ibytes[i] = val; + + addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_DROP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->q_errors[i] += val; + } + } + + return 0; +} + +int +otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + + if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL) + return -ENOMEM; + + return otx2_mbox_process(mbox); +} + +int +otx2_nix_queue_stats_mapping(struct rte_eth_dev *eth_dev, uint16_t queue_id, + uint8_t stat_idx, uint8_t is_rx) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (is_rx) + dev->rxmap[stat_idx] = ((1U << 31) | queue_id); + else + dev->txmap[stat_idx] = ((1U << 31) | queue_id); + + return 0; +} + +int +otx2_nix_xstats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + unsigned int i, count = 0; + uint64_t reg, val; + + if (n < OTX2_NIX_NUM_XSTATS_REG) + return OTX2_NIX_NUM_XSTATS_REG; + + if (xstats == NULL) + return 0; + + for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) { + xstats[count].value = otx2_read64(dev->base + + NIX_LF_TX_STATX(nix_tx_xstats[i].offset)); + xstats[count].id = count; + count++; + } + + for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) { + xstats[count].value = otx2_read64(dev->base + + NIX_LF_RX_STATX(nix_rx_xstats[i].offset)); + xstats[count].id = count; + count++; + } + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + reg = (((uint64_t)i) << 32); + val = otx2_atomic64_add_nosync(reg, (int64_t *)(dev->base + + nix_q_xstats[0].offset)); + if (val & OP_ERR) + val = 0; + xstats[count].value += val; + } + xstats[count].id = count; + count++; + + return count; +} + +int +otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, count = 0; + + RTE_SET_USED(eth_dev); + + if (limit < OTX2_NIX_NUM_XSTATS_REG && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names) { + for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", nix_tx_xstats[i].name); + count++; + } + + for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", nix_rx_xstats[i].name); + count++; + } + + for (i = 0; i < OTX2_NIX_NUM_QUEUE_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", nix_q_xstats[i].name); + count++; + } + } + + return OTX2_NIX_NUM_XSTATS_REG; +} + +int +otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit) +{ + struct rte_eth_xstat_name xstats_names_copy[OTX2_NIX_NUM_XSTATS_REG]; + uint16_t i; + + if (limit < OTX2_NIX_NUM_XSTATS_REG && ids == NULL) + return OTX2_NIX_NUM_XSTATS_REG; + + if (limit > OTX2_NIX_NUM_XSTATS_REG) + return -EINVAL; + + if (xstats_names == NULL) + return -ENOMEM; + + otx2_nix_xstats_get_names(eth_dev, xstats_names_copy, limit); + + for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) { + if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) { + otx2_err("Invalid id value"); + return -EINVAL; + } + strncpy(xstats_names[i].name, xstats_names_copy[ids[i]].name, + sizeof(xstats_names[i].name)); + } + + return limit; +} + +int +otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct rte_eth_xstat xstats[OTX2_NIX_NUM_XSTATS_REG]; + uint16_t i; + + if (n < OTX2_NIX_NUM_XSTATS_REG && ids == NULL) + return OTX2_NIX_NUM_XSTATS_REG; + + if (n > OTX2_NIX_NUM_XSTATS_REG) + return -EINVAL; + + if (values == NULL) + return -ENOMEM; + + otx2_nix_xstats_get(eth_dev, xstats, n); + + for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) { + if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) { + otx2_err("Invalid id value"); + return -EINVAL; + } + values[i] = xstats[ids[i]].value; + } + + return n; +} + +static int +nix_queue_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + uint32_t i; + int rc; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = i; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to read rq context"); + return rc; + } + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = i; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_WRITE; + otx2_mbox_memcpy(&aq->rq, &rsp->rq, sizeof(rsp->rq)); + otx2_mbox_memset(&aq->rq_mask, 0, sizeof(aq->rq_mask)); + aq->rq.octs = 0; + aq->rq.pkts = 0; + aq->rq.drop_octs = 0; + aq->rq.drop_pkts = 0; + aq->rq.re_pkts = 0; + + aq->rq_mask.octs = ~(aq->rq_mask.octs); + aq->rq_mask.pkts = ~(aq->rq_mask.pkts); + aq->rq_mask.drop_octs = ~(aq->rq_mask.drop_octs); + aq->rq_mask.drop_pkts = ~(aq->rq_mask.drop_pkts); + aq->rq_mask.re_pkts = ~(aq->rq_mask.re_pkts); + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("Failed to write rq context"); + return rc; + } + } + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = i; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_READ; + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to read sq context"); + return rc; + } + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = i; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_WRITE; + otx2_mbox_memcpy(&aq->sq, &rsp->sq, sizeof(rsp->sq)); + otx2_mbox_memset(&aq->sq_mask, 0, sizeof(aq->sq_mask)); + aq->sq.octs = 0; + aq->sq.pkts = 0; + aq->sq.drop_octs = 0; + aq->sq.drop_pkts = 0; + + aq->sq_mask.octs = ~(aq->sq_mask.octs); + aq->sq_mask.pkts = ~(aq->sq_mask.pkts); + aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs); + aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts); + rc = otx2_mbox_process(mbox); + if (rc) { + otx2_err("Failed to write sq context"); + return rc; + } + } + + return 0; +} + +int +otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + int ret; + + if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL) + return -ENOMEM; + + ret = otx2_mbox_process(mbox); + if (ret != 0) + return ret; + + /* Reset queue stats */ + return nix_queue_stats_reset(eth_dev); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c new file mode 100644 index 000000000..8ed059549 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c @@ -0,0 +1,3216 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_ethdev.h" +#include "otx2_tm.h" + +/* Use last LVL_CNT nodes as default nodes */ +#define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT) + +enum otx2_tm_node_level { + OTX2_TM_LVL_ROOT = 0, + OTX2_TM_LVL_SCH1, + OTX2_TM_LVL_SCH2, + OTX2_TM_LVL_SCH3, + OTX2_TM_LVL_SCH4, + OTX2_TM_LVL_QUEUE, + OTX2_TM_LVL_MAX, +}; + +static inline +uint64_t shaper2regval(struct shaper_params *shaper) +{ + return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) | + (shaper->div_exp << 13) | (shaper->exponent << 9) | + (shaper->mantissa << 1); +} + +int +otx2_nix_get_link(struct otx2_eth_dev *dev) +{ + int link = 13 /* SDP */; + uint16_t lmac_chan; + uint16_t map; + + lmac_chan = dev->tx_chan_base; + + /* CGX lmac link */ + if (lmac_chan >= 0x800) { + map = lmac_chan & 0x7FF; + link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); + } else if (lmac_chan < 0x700) { + /* LBK channel */ + link = 12; + } + + return link; +} + +static uint8_t +nix_get_relchan(struct otx2_eth_dev *dev) +{ + return dev->tx_chan_base & 0xff; +} + +static bool +nix_tm_have_tl1_access(struct otx2_eth_dev *dev) +{ + bool is_lbk = otx2_dev_is_lbk(dev); + return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk; +} + +static bool +nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl) +{ + if (nix_tm_have_tl1_access(dev)) + return (lvl == OTX2_TM_LVL_QUEUE); + + return (lvl == OTX2_TM_LVL_SCH4); +} + +static int +find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id) +{ + struct otx2_nix_tm_node *child_node; + + TAILQ_FOREACH(child_node, &dev->node_list, node) { + if (!child_node->parent) + continue; + if (!(child_node->parent->id == node_id)) + continue; + if (child_node->priority == child_node->parent->rr_prio) + continue; + return child_node->hw_id - child_node->priority; + } + return 0; +} + + +static struct otx2_nix_tm_shaper_profile * +nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id) +{ + struct otx2_nix_tm_shaper_profile *tm_shaper_profile; + + TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) { + if (tm_shaper_profile->shaper_profile_id == shaper_id) + return tm_shaper_profile; + } + return NULL; +} + +static inline uint64_t +shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p, + uint64_t *mantissa_p, uint64_t *div_exp_p) +{ + uint64_t div_exp, exponent, mantissa; + + /* Boundary checks */ + if (value < MIN_SHAPER_RATE || + value > MAX_SHAPER_RATE) + return 0; + + if (value <= SHAPER_RATE(0, 0, 0)) { + /* Calculate rate div_exp and mantissa using + * the following formula: + * + * value = (2E6 * (256 + mantissa) + * / ((1 << div_exp) * 256)) + */ + div_exp = 0; + exponent = 0; + mantissa = MAX_RATE_MANTISSA; + + while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp))) + div_exp += 1; + + while (value < + ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) / + ((1 << div_exp) * 256))) + mantissa -= 1; + } else { + /* Calculate rate exponent and mantissa using + * the following formula: + * + * value = (2E6 * ((256 + mantissa) << exponent)) / 256 + * + */ + div_exp = 0; + exponent = MAX_RATE_EXPONENT; + mantissa = MAX_RATE_MANTISSA; + + while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent))) + exponent -= 1; + + while (value < ((NIX_SHAPER_RATE_CONST * + ((256 + mantissa) << exponent)) / 256)) + mantissa -= 1; + } + + if (div_exp > MAX_RATE_DIV_EXP || + exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA) + return 0; + + if (div_exp_p) + *div_exp_p = div_exp; + if (exponent_p) + *exponent_p = exponent; + if (mantissa_p) + *mantissa_p = mantissa; + + /* Calculate real rate value */ + return SHAPER_RATE(exponent, mantissa, div_exp); +} + +static inline uint64_t +shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p, + uint64_t *mantissa_p) +{ + uint64_t exponent, mantissa; + + if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST) + return 0; + + /* Calculate burst exponent and mantissa using + * the following formula: + * + * value = (((256 + mantissa) << (exponent + 1) + / 256) + * + */ + exponent = MAX_BURST_EXPONENT; + mantissa = MAX_BURST_MANTISSA; + + while (value < (1ull << (exponent + 1))) + exponent -= 1; + + while (value < ((256 + mantissa) << (exponent + 1)) / 256) + mantissa -= 1; + + if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA) + return 0; + + if (exponent_p) + *exponent_p = exponent; + if (mantissa_p) + *mantissa_p = mantissa; + + return SHAPER_BURST(exponent, mantissa); +} + +static void +shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile, + struct shaper_params *cir, + struct shaper_params *pir) +{ + struct rte_tm_shaper_params *param = &profile->params; + + if (!profile) + return; + + /* Calculate CIR exponent and mantissa */ + if (param->committed.rate) + cir->rate = shaper_rate_to_nix(param->committed.rate, + &cir->exponent, + &cir->mantissa, + &cir->div_exp); + + /* Calculate PIR exponent and mantissa */ + if (param->peak.rate) + pir->rate = shaper_rate_to_nix(param->peak.rate, + &pir->exponent, + &pir->mantissa, + &pir->div_exp); + + /* Calculate CIR burst exponent and mantissa */ + if (param->committed.size) + cir->burst = shaper_burst_to_nix(param->committed.size, + &cir->burst_exponent, + &cir->burst_mantissa); + + /* Calculate PIR burst exponent and mantissa */ + if (param->peak.size) + pir->burst = shaper_burst_to_nix(param->peak.size, + &pir->burst_exponent, + &pir->burst_mantissa); +} + +static void +shaper_default_red_algo(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + struct otx2_nix_tm_shaper_profile *profile) +{ + struct shaper_params cir, pir; + + /* C0 doesn't support STALL when both PIR & CIR are enabled */ + if (profile && otx2_dev_is_96xx_Cx(dev)) { + memset(&cir, 0, sizeof(cir)); + memset(&pir, 0, sizeof(pir)); + shaper_config_to_nix(profile, &cir, &pir); + + if (pir.rate && cir.rate) { + tm_node->red_algo = NIX_REDALG_DISCARD; + tm_node->flags |= NIX_TM_NODE_RED_DISCARD; + return; + } + } + + tm_node->red_algo = NIX_REDALG_STD; + tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD; +} + +static int +populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_txschq_config *req; + + /* + * Default config for TL1. + * For VF this is always ignored. + */ + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_TL1; + + /* Set DWRR quantum */ + req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); + req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; + req->num_regs++; + + req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); + req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + req->num_regs++; + + req->reg[2] = NIX_AF_TL1X_CIR(schq); + req->regval[2] = 0; + req->num_regs++; + + return otx2_mbox_process(mbox); +} + +static uint8_t +prepare_tm_sched_reg(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + uint64_t strict_prio = tm_node->priority; + uint32_t hw_lvl = tm_node->hw_lvl; + uint32_t schq = tm_node->hw_id; + uint64_t rr_quantum; + uint8_t k = 0; + + rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); + + /* For children to root, strict prio is default if either + * device root is TL2 or TL1 Static Priority is disabled. + */ + if (hw_lvl == NIX_TXSCH_LVL_TL2 && + (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 || + dev->tm_flags & NIX_TM_TL1_NO_SP)) + strict_prio = TXSCH_TL1_DFLT_RR_PRIO; + + otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, " + "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)", + nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl, + tm_node->id, strict_prio, rr_quantum, tm_node); + + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + reg[k] = NIX_AF_MDQX_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_SCHEDULE(schq); + regval[k] = (strict_prio << 24) | rr_quantum; + k++; + + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_SCHEDULE(schq); + regval[k] = rr_quantum; + k++; + + break; + } + + return k; +} + +static uint8_t +prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node, + struct otx2_nix_tm_shaper_profile *profile, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + struct shaper_params cir, pir; + uint32_t schq = tm_node->hw_id; + uint8_t k = 0; + + memset(&cir, 0, sizeof(cir)); + memset(&pir, 0, sizeof(pir)); + shaper_config_to_nix(profile, &cir, &pir); + + otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, " + "pir %" PRIu64 "(%" PRIu64 "B)," + " cir %" PRIu64 "(%" PRIu64 "B) (%p)", + nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl, + tm_node->id, pir.rate, pir.burst, + cir.rate, cir.burst, tm_node); + + switch (tm_node->hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_MDQX_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_MDQX_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED ALG */ + reg[k] = NIX_AF_MDQX_SHAPE(schq); + regval[k] = ((uint64_t)tm_node->red_algo << 9); + k++; + break; + case NIX_TXSCH_LVL_TL4: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL4X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL4X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL4X_SHAPE(schq); + regval[k] = ((uint64_t)tm_node->red_algo << 9); + k++; + break; + case NIX_TXSCH_LVL_TL3: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL3X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL3X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL3X_SHAPE(schq); + regval[k] = ((uint64_t)tm_node->red_algo << 9); + k++; + + break; + case NIX_TXSCH_LVL_TL2: + /* Configure PIR, CIR */ + reg[k] = NIX_AF_TL2X_PIR(schq); + regval[k] = (pir.rate && pir.burst) ? + (shaper2regval(&pir) | 1) : 0; + k++; + + reg[k] = NIX_AF_TL2X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + + /* Configure RED algo */ + reg[k] = NIX_AF_TL2X_SHAPE(schq); + regval[k] = ((uint64_t)tm_node->red_algo << 9); + k++; + + break; + case NIX_TXSCH_LVL_TL1: + /* Configure CIR */ + reg[k] = NIX_AF_TL1X_CIR(schq); + regval[k] = (cir.rate && cir.burst) ? + (shaper2regval(&cir) | 1) : 0; + k++; + break; + } + + return k; +} + +static uint8_t +prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable, + volatile uint64_t *reg, volatile uint64_t *regval) +{ + uint32_t hw_lvl = tm_node->hw_lvl; + uint32_t schq = tm_node->hw_id; + uint8_t k = 0; + + otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)", + nix_hwlvl2str(hw_lvl), schq, tm_node->lvl, + tm_node->id, enable, tm_node); + + regval[k] = enable; + + switch (hw_lvl) { + case NIX_TXSCH_LVL_MDQ: + reg[k] = NIX_AF_MDQX_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL4: + reg[k] = NIX_AF_TL4X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL3: + reg[k] = NIX_AF_TL3X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL2: + reg[k] = NIX_AF_TL2X_SW_XOFF(schq); + k++; + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_SW_XOFF(schq); + k++; + break; + default: + break; + } + + return k; +} + +static int +populate_tm_reg(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node) +{ + struct otx2_nix_tm_shaper_profile *profile; + uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG]; + uint64_t regval[MAX_REGS_PER_MBOX_MSG]; + uint64_t reg[MAX_REGS_PER_MBOX_MSG]; + struct otx2_mbox *mbox = dev->mbox; + uint64_t parent = 0, child = 0; + uint32_t hw_lvl, rr_prio, schq; + struct nix_txschq_config *req; + int rc = -EFAULT; + uint8_t k = 0; + + memset(regval_mask, 0, sizeof(regval_mask)); + profile = nix_tm_shaper_profile_search(dev, + tm_node->params.shaper_profile_id); + rr_prio = tm_node->rr_prio; + hw_lvl = tm_node->hw_lvl; + schq = tm_node->hw_id; + + /* Root node will not have a parent node */ + if (hw_lvl == dev->otx2_tm_root_lvl) + parent = tm_node->parent_hw_id; + else + parent = tm_node->parent->hw_id; + + /* Do we need this trigger to configure TL1 */ + if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && + hw_lvl == dev->otx2_tm_root_lvl) { + rc = populate_tm_tl1_default(dev, parent); + if (rc) + goto error; + } + + if (hw_lvl != NIX_TXSCH_LVL_SMQ) + child = find_prio_anchor(dev, tm_node->id); + + /* Override default rr_prio when TL1 + * Static Priority is disabled + */ + if (hw_lvl == NIX_TXSCH_LVL_TL1 && + dev->tm_flags & NIX_TM_TL1_NO_SP) { + rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + child = 0; + } + + otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u" + " prio_anchor %"PRIu64" rr_prio %u (%p)", + nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1), + parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node); + + /* Prepare Topology and Link config */ + switch (hw_lvl) { + case NIX_TXSCH_LVL_SMQ: + + /* Set xoff which will be cleared later and minimum length + * which will be used for zero padding if packet length is + * smaller + */ + reg[k] = NIX_AF_SMQX_CFG(schq); + regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS; + regval_mask[k] = ~(BIT_ULL(50) | 0x7f); + k++; + + /* Parent and schedule conf */ + reg[k] = NIX_AF_MDQX_PARENT(schq); + regval[k] = parent << 16; + k++; + + break; + case NIX_TXSCH_LVL_TL4: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL4X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL4X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Configure TL4 to send to SDP channel instead of CGX/LBK */ + if (otx2_dev_is_sdp(dev)) { + reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + regval[k] = BIT_ULL(12); + k++; + } + break; + case NIX_TXSCH_LVL_TL3: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL3X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL3X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!otx2_dev_is_sdp(dev) && + dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_nix_get_link(dev)); + regval[k] = BIT_ULL(12) | nix_get_relchan(dev); + k++; + } + + break; + case NIX_TXSCH_LVL_TL2: + /* Parent and schedule conf */ + reg[k] = NIX_AF_TL2X_PARENT(schq); + regval[k] = parent << 16; + k++; + + reg[k] = NIX_AF_TL2X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1); + k++; + + /* Link configuration */ + if (!otx2_dev_is_sdp(dev) && + dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) { + reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, + otx2_nix_get_link(dev)); + regval[k] = BIT_ULL(12) | nix_get_relchan(dev); + k++; + } + + break; + case NIX_TXSCH_LVL_TL1: + reg[k] = NIX_AF_TL1X_TOPOLOGY(schq); + regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/); + k++; + + break; + } + + /* Prepare schedule config */ + k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]); + + /* Prepare shaping config */ + k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]); + + if (!k) + return 0; + + /* Copy and send config mbox */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = hw_lvl; + req->num_regs = k; + + otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k); + otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k); + otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k); + + rc = otx2_mbox_process(mbox); + if (rc) + goto error; + + return 0; +error: + otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc); + return rc; +} + + +static int +nix_tm_txsch_reg_config(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_node *tm_node; + uint32_t hw_lvl; + int rc = 0; + + for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) { + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl == hw_lvl && + tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) { + rc = populate_tm_reg(dev, tm_node); + if (rc) + goto exit; + } + } + } +exit: + return rc; +} + +static struct otx2_nix_tm_node * +nix_tm_node_search(struct otx2_eth_dev *dev, + uint32_t node_id, bool user) +{ + struct otx2_nix_tm_node *tm_node; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->id == node_id && + (user == !!(tm_node->flags & NIX_TM_NODE_USER))) + return tm_node; + } + return NULL; +} + +static uint32_t +check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id) +{ + struct otx2_nix_tm_node *tm_node; + uint32_t rr_num = 0; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!tm_node->parent) + continue; + + if (!(tm_node->parent->id == parent_id)) + continue; + + if (tm_node->priority == priority) + rr_num++; + } + return rr_num; +} + +static int +nix_tm_update_parent_info(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_node *tm_node_child; + struct otx2_nix_tm_node *tm_node; + struct otx2_nix_tm_node *parent; + uint32_t rr_num = 0; + uint32_t priority; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!tm_node->parent) + continue; + /* Count group of children of same priority i.e are RR */ + parent = tm_node->parent; + priority = tm_node->priority; + rr_num = check_rr(dev, priority, parent->id); + + /* Assuming that multiple RR groups are + * not configured based on capability. + */ + if (rr_num > 1) { + parent->rr_prio = priority; + parent->rr_num = rr_num; + } + + /* Find out static priority children that are not in RR */ + TAILQ_FOREACH(tm_node_child, &dev->node_list, node) { + if (!tm_node_child->parent) + continue; + if (parent->id != tm_node_child->parent->id) + continue; + if (parent->max_prio == UINT32_MAX && + tm_node_child->priority != parent->rr_prio) + parent->max_prio = 0; + + if (parent->max_prio < tm_node_child->priority && + parent->rr_prio != tm_node_child->priority) + parent->max_prio = tm_node_child->priority; + } + } + + return 0; +} + +static int +nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint16_t hw_lvl, + uint16_t lvl, bool user, + struct rte_tm_node_params *params) +{ + struct otx2_nix_tm_shaper_profile *profile; + struct otx2_nix_tm_node *tm_node, *parent_node; + uint32_t profile_id; + + profile_id = params->shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + + parent_node = nix_tm_node_search(dev, parent_node_id, user); + + tm_node = rte_zmalloc("otx2_nix_tm_node", + sizeof(struct otx2_nix_tm_node), 0); + if (!tm_node) + return -ENOMEM; + + tm_node->lvl = lvl; + tm_node->hw_lvl = hw_lvl; + + /* Maintain minimum weight */ + if (!weight) + weight = 1; + + tm_node->id = node_id; + tm_node->priority = priority; + tm_node->weight = weight; + tm_node->rr_prio = 0xf; + tm_node->max_prio = UINT32_MAX; + tm_node->hw_id = UINT32_MAX; + tm_node->flags = 0; + if (user) + tm_node->flags = NIX_TM_NODE_USER; + rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); + + if (profile) + profile->reference_count++; + + tm_node->parent = parent_node; + tm_node->parent_hw_id = UINT32_MAX; + shaper_default_red_algo(dev, tm_node, profile); + + TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node); + + return 0; +} + +static int +nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_shaper_profile *shaper_profile; + + while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) { + if (shaper_profile->reference_count) + otx2_tm_dbg("Shaper profile %u has non zero references", + shaper_profile->shaper_profile_id); + TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper); + rte_free(shaper_profile); + } + + return 0; +} + +static int +nix_clear_path_xoff(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node) +{ + struct nix_txschq_config *req; + struct otx2_nix_tm_node *p; + int rc; + + /* Manipulating SW_XOFF not supported on Ax */ + if (otx2_dev_is_Ax(dev)) + return 0; + + /* Enable nodes in path for flush to succeed */ + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + p = tm_node; + else + p = tm_node->parent; + while (p) { + if (!(p->flags & NIX_TM_NODE_ENABLED) && + (p->flags & NIX_TM_NODE_HWRES)) { + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = p->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(p, false, req->reg, + req->regval); + rc = otx2_mbox_process(dev->mbox); + if (rc) + return rc; + + p->flags |= NIX_TM_NODE_ENABLED; + } + p = p->parent; + } + + return 0; +} + +static int +nix_smq_xoff(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + bool enable) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_txschq_config *req; + uint16_t smq; + int rc; + + smq = tm_node->hw_id; + otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq, + enable ? "enable" : "disable"); + + rc = nix_clear_path_xoff(dev, tm_node); + if (rc) + return rc; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_SMQ; + req->num_regs = 1; + + req->reg[0] = NIX_AF_SMQX_CFG(smq); + req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0; + req->regval_mask[0] = enable ? + ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50); + + return otx2_mbox_process(mbox); +} + +int +otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable) +{ + struct otx2_eth_txq *txq = __txq; + struct npa_aq_enq_req *req; + struct npa_aq_enq_rsp *rsp; + struct otx2_npa_lf *lf; + struct otx2_mbox *mbox; + uint64_t aura_handle; + int rc; + + otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq, + enable ? "enable" : "disable"); + + lf = otx2_npa_lf_obj_get(); + if (!lf) + return -EFAULT; + mbox = lf->mbox; + /* Set/clear sqb aura fc_ena */ + aura_handle = txq->sqb_pool->pool_id; + req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); + + req->aura_id = npa_lf_aura_handle_to_aura(aura_handle); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_WRITE; + /* Below is not needed for aura writes but AF driver needs it */ + /* AF will translate to associated poolctx */ + req->aura.pool_addr = req->aura_id; + + req->aura.fc_ena = enable; + req->aura_mask.fc_ena = 1; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + /* Read back npa aura ctx */ + req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); + + req->aura_id = npa_lf_aura_handle_to_aura(aura_handle); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + /* Init when enabled as there might be no triggers */ + if (enable) + *(volatile uint64_t *)txq->fc_mem = rsp->aura.count; + else + *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs; + /* Sync write barrier */ + rte_wmb(); + + return 0; +} + +static int +nix_txq_flush_sq_spin(struct otx2_eth_txq *txq) +{ + uint16_t sqb_cnt, head_off, tail_off; + struct otx2_eth_dev *dev = txq->dev; + uint64_t wdata, val, prev; + uint16_t sq = txq->sq; + int64_t *regaddr; + uint64_t timeout;/* 10's of usec */ + + /* Wait for enough time based on shaper min rate */ + timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5); + timeout = timeout / dev->tm_rate_min; + if (!timeout) + timeout = 10000; + + wdata = ((uint64_t)sq << 32); + regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS); + val = otx2_atomic64_add_nosync(wdata, regaddr); + + /* Spin multiple iterations as "txq->fc_cache_pkts" can still + * have space to send pkts even though fc_mem is disabled + */ + + while (true) { + prev = val; + rte_delay_us(10); + val = otx2_atomic64_add_nosync(wdata, regaddr); + /* Continue on error */ + if (val & BIT_ULL(63)) + continue; + + if (prev != val) + continue; + + sqb_cnt = val & 0xFFFF; + head_off = (val >> 20) & 0x3F; + tail_off = (val >> 28) & 0x3F; + + /* SQ reached quiescent state */ + if (sqb_cnt <= 1 && head_off == tail_off && + (*txq->fc_mem == txq->nb_sqb_bufs)) { + break; + } + + /* Timeout */ + if (!timeout) + goto exit; + timeout--; + } + + return 0; +exit: + otx2_nix_tm_dump(dev); + return -EFAULT; +} + +/* Flush and disable tx queue and its parent SMQ */ +int otx2_nix_sq_flush_pre(void *_txq, bool dev_started) +{ + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_eth_txq *txq; + struct otx2_eth_dev *dev; + uint16_t sq; + bool user; + int rc; + + txq = _txq; + dev = txq->dev; + sq = txq->sq; + + user = !!(dev->tm_flags & NIX_TM_COMMITTED); + + /* Find the node for this SQ */ + tm_node = nix_tm_node_search(dev, sq, user); + if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) { + otx2_err("Invalid node/state for sq %u", sq); + return -EFAULT; + } + + /* Enable CGX RXTX to drain pkts */ + if (!dev_started) { + /* Though it enables both RX MCAM Entries and CGX Link + * we assume all the rx queues are stopped way back. + */ + otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox); + rc = otx2_mbox_process(dev->mbox); + if (rc) { + otx2_err("cgx start failed, rc=%d", rc); + return rc; + } + } + + /* Disable smq xoff for case it was enabled earlier */ + rc = nix_smq_xoff(dev, tm_node->parent, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + return rc; + } + + /* As per HRM, to disable an SQ, all other SQ's + * that feed to same SMQ must be paused before SMQ flush. + */ + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + if (!(sibling->flags & NIX_TM_NODE_ENABLED)) + continue; + + sq = sibling->id; + txq = dev->eth_dev->data->tx_queues[sq]; + if (!txq) + continue; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc); + return rc; + } + } + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Disable and flush */ + rc = nix_smq_xoff(dev, tm_node->parent, true); + if (rc) { + otx2_err("Failed to disable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + goto cleanup; + } +cleanup: + /* Restore cgx state */ + if (!dev_started) { + otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox); + rc |= otx2_mbox_process(dev->mbox); + } + + return rc; +} + +int otx2_nix_sq_flush_post(void *_txq) +{ + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_eth_txq *txq = _txq; + struct otx2_eth_txq *s_txq; + struct otx2_eth_dev *dev; + bool once = false; + uint16_t sq, s_sq; + bool user; + int rc; + + dev = txq->dev; + sq = txq->sq; + user = !!(dev->tm_flags & NIX_TM_COMMITTED); + + /* Find the node for this SQ */ + tm_node = nix_tm_node_search(dev, sq, user); + if (!tm_node) { + otx2_err("Invalid node for sq %u", sq); + return -EFAULT; + } + + /* Enable all the siblings back */ + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + + if (sibling->id == sq) + continue; + + if (!(sibling->flags & NIX_TM_NODE_ENABLED)) + continue; + + s_sq = sibling->id; + s_txq = dev->eth_dev->data->tx_queues[s_sq]; + if (!s_txq) + continue; + + if (!once) { + /* Enable back if any SQ is still present */ + rc = nix_smq_xoff(dev, tm_node->parent, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->parent->hw_id, rc); + return rc; + } + once = true; + } + + rc = otx2_nix_sq_sqb_aura_fc(s_txq, true); + if (rc) { + otx2_err("Failed to enable sqb aura fc, rc=%d", rc); + return rc; + } + } + + return 0; +} + +static int +nix_sq_sched_data(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *tm_node, + bool rr_quantum_only) +{ + struct rte_eth_dev *eth_dev = dev->eth_dev; + struct otx2_mbox *mbox = dev->mbox; + uint16_t sq = tm_node->id, smq; + struct nix_aq_enq_req *req; + uint64_t rr_quantum; + int rc; + + smq = tm_node->parent->hw_id; + rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); + + if (rr_quantum_only) + otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum); + else + otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64, + sq, smq, rr_quantum); + + if (sq > eth_dev->data->nb_tx_queues) + return -EFAULT; + + req = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + req->qidx = sq; + req->ctype = NIX_AQ_CTYPE_SQ; + req->op = NIX_AQ_INSTOP_WRITE; + + /* smq update only when needed */ + if (!rr_quantum_only) { + req->sq.smq = smq; + req->sq_mask.smq = ~req->sq_mask.smq; + } + req->sq.smq_rr_quantum = rr_quantum; + req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum; + + rc = otx2_mbox_process(mbox); + if (rc) + otx2_err("Failed to set smq, rc=%d", rc); + return rc; +} + +int otx2_nix_sq_enable(void *_txq) +{ + struct otx2_eth_txq *txq = _txq; + int rc; + + /* Enable sqb_aura fc */ + rc = otx2_nix_sq_sqb_aura_fc(txq, true); + if (rc) { + otx2_err("Failed to enable sqb aura fc, rc=%d", rc); + return rc; + } + + return 0; +} + +static int +nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask, + uint32_t flags, bool hw_only) +{ + struct otx2_nix_tm_shaper_profile *profile; + struct otx2_nix_tm_node *tm_node, *next_node; + struct otx2_mbox *mbox = dev->mbox; + struct nix_txsch_free_req *req; + uint32_t profile_id; + int rc = 0; + + next_node = TAILQ_FIRST(&dev->node_list); + while (next_node) { + tm_node = next_node; + next_node = TAILQ_NEXT(tm_node, node); + + /* Check for only requested nodes */ + if ((tm_node->flags & flags_mask) != flags) + continue; + + if (!nix_tm_is_leaf(dev, tm_node->lvl) && + tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 && + tm_node->flags & NIX_TM_NODE_HWRES) { + /* Free specific HW resource */ + otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)", + nix_hwlvl2str(tm_node->hw_lvl), + tm_node->hw_id, tm_node->lvl, + tm_node->id, tm_node); + + rc = nix_clear_path_xoff(dev, tm_node); + if (rc) + return rc; + + req = otx2_mbox_alloc_msg_nix_txsch_free(mbox); + req->flags = 0; + req->schq_lvl = tm_node->hw_lvl; + req->schq = tm_node->hw_id; + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + tm_node->flags &= ~NIX_TM_NODE_HWRES; + } + + /* Leave software elements if needed */ + if (hw_only) + continue; + + otx2_tm_dbg("Free node lvl %u id %u (%p)", + tm_node->lvl, tm_node->id, tm_node); + + profile_id = tm_node->params.shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile) + profile->reference_count--; + + TAILQ_REMOVE(&dev->node_list, tm_node, node); + rte_free(tm_node); + } + + if (!flags_mask) { + /* Free all hw resources */ + req = otx2_mbox_alloc_msg_nix_txsch_free(mbox); + req->flags = TXSCHQ_FREE_ALL; + + return otx2_mbox_process(mbox); + } + + return rc; +} + +static uint8_t +nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev, + struct nix_txsch_alloc_rsp *rsp) +{ + uint16_t schq; + uint8_t lvl; + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) { + dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; + dev->txschq_contig_list[lvl][schq] = + rsp->schq_contig_list[lvl][schq]; + } + + dev->txschq[lvl] = rsp->schq[lvl]; + dev->txschq_contig[lvl] = rsp->schq_contig[lvl]; + } + return 0; +} + +static int +nix_tm_assign_id_to_node(struct otx2_eth_dev *dev, + struct otx2_nix_tm_node *child, + struct otx2_nix_tm_node *parent) +{ + uint32_t hw_id, schq_con_index, prio_offset; + uint32_t l_id, schq_index; + + otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)", + nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child); + + child->flags |= NIX_TM_NODE_HWRES; + + /* Process root nodes */ + if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && + child->hw_lvl == dev->otx2_tm_root_lvl && !parent) { + int idx = 0; + uint32_t tschq_con_index; + + l_id = child->hw_lvl; + tschq_con_index = dev->txschq_contig_index[l_id]; + hw_id = dev->txschq_contig_list[l_id][tschq_con_index]; + child->hw_id = hw_id; + dev->txschq_contig_index[l_id]++; + /* Update TL1 hw_id for its parent for config purpose */ + idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++; + hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx]; + child->parent_hw_id = hw_id; + return 0; + } + if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 && + child->hw_lvl == dev->otx2_tm_root_lvl && !parent) { + uint32_t tschq_con_index; + + l_id = child->hw_lvl; + tschq_con_index = dev->txschq_index[l_id]; + hw_id = dev->txschq_list[l_id][tschq_con_index]; + child->hw_id = hw_id; + dev->txschq_index[l_id]++; + return 0; + } + + /* Process children with parents */ + l_id = child->hw_lvl; + schq_index = dev->txschq_index[l_id]; + schq_con_index = dev->txschq_contig_index[l_id]; + + if (child->priority == parent->rr_prio) { + hw_id = dev->txschq_list[l_id][schq_index]; + child->hw_id = hw_id; + child->parent_hw_id = parent->hw_id; + dev->txschq_index[l_id]++; + } else { + prio_offset = schq_con_index + child->priority; + hw_id = dev->txschq_contig_list[l_id][prio_offset]; + child->hw_id = hw_id; + } + return 0; +} + +static int +nix_tm_assign_hw_id(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_node *parent, *child; + uint32_t child_hw_lvl, con_index_inc, i; + + for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) { + TAILQ_FOREACH(parent, &dev->node_list, node) { + child_hw_lvl = parent->hw_lvl - 1; + if (parent->hw_lvl != i) + continue; + TAILQ_FOREACH(child, &dev->node_list, node) { + if (!child->parent) + continue; + if (child->parent->id != parent->id) + continue; + nix_tm_assign_id_to_node(dev, child, parent); + } + + con_index_inc = parent->max_prio + 1; + dev->txschq_contig_index[child_hw_lvl] += con_index_inc; + + /* + * Explicitly assign id to parent node if it + * doesn't have a parent + */ + if (parent->hw_lvl == dev->otx2_tm_root_lvl) + nix_tm_assign_id_to_node(dev, parent, NULL); + } + } + return 0; +} + +static uint8_t +nix_tm_count_req_schq(struct otx2_eth_dev *dev, + struct nix_txsch_alloc_req *req, uint8_t lvl) +{ + struct otx2_nix_tm_node *tm_node; + uint8_t contig_count; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (lvl == tm_node->hw_lvl) { + req->schq[lvl - 1] += tm_node->rr_num; + if (tm_node->max_prio != UINT32_MAX) { + contig_count = tm_node->max_prio + 1; + req->schq_contig[lvl - 1] += contig_count; + } + } + if (lvl == dev->otx2_tm_root_lvl && + dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 && + tm_node->hw_lvl == dev->otx2_tm_root_lvl) { + req->schq_contig[dev->otx2_tm_root_lvl]++; + } + } + + req->schq[NIX_TXSCH_LVL_TL1] = 1; + req->schq_contig[NIX_TXSCH_LVL_TL1] = 0; + + return 0; +} + +static int +nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev, + struct nix_txsch_alloc_req *req) +{ + uint8_t i; + + for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) + nix_tm_count_req_schq(dev, req, i); + + for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) { + dev->txschq_index[i] = 0; + dev->txschq_contig_index[i] = 0; + } + return 0; +} + +static int +nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev) +{ + struct otx2_mbox *mbox = dev->mbox; + struct nix_txsch_alloc_req *req; + struct nix_txsch_alloc_rsp *rsp; + int rc; + + req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox); + + rc = nix_tm_prepare_txschq_req(dev, req); + if (rc) + return rc; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + nix_tm_copy_rsp_to_dev(dev, rsp); + dev->link_cfg_lvl = rsp->link_cfg_lvl; + + nix_tm_assign_hw_id(dev); + return 0; +} + +static int +nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + struct otx2_eth_txq *txq; + uint16_t sq; + int rc; + + nix_tm_update_parent_info(dev); + + rc = nix_tm_send_txsch_alloc_msg(dev); + if (rc) { + otx2_err("TM failed to alloc tm resources=%d", rc); + return rc; + } + + rc = nix_tm_txsch_reg_config(dev); + if (rc) { + otx2_err("TM failed to configure sched registers=%d", rc); + return rc; + } + + /* Trigger MTU recalculate as SMQ needs MTU conf */ + if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) { + rc = otx2_nix_recalc_mtu(eth_dev); + if (rc) { + otx2_err("TM MTU update failed, rc=%d", rc); + return rc; + } + } + + /* Mark all non-leaf's as enabled */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + tm_node->flags |= NIX_TM_NODE_ENABLED; + } + + if (!xmit_enable) + return 0; + + /* Update SQ Sched Data while SQ is idle */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + continue; + + rc = nix_sq_sched_data(dev, tm_node, false); + if (rc) { + otx2_err("SQ %u sched update failed, rc=%d", + tm_node->id, rc); + return rc; + } + } + + /* Finally XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + return rc; + } + } + + /* Enable xmit as all the topology is ready */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!nix_tm_is_leaf(dev, tm_node->lvl)) + continue; + + sq = tm_node->id; + txq = eth_dev->data->tx_queues[sq]; + + rc = otx2_nix_sq_enable(txq); + if (rc) { + otx2_err("TM sw xon failed on SQ %u, rc=%d", + tm_node->id, rc); + return rc; + } + tm_node->flags |= NIX_TM_NODE_ENABLED; + } + + return 0; +} + +static int +send_tm_reqval(struct otx2_mbox *mbox, + struct nix_txschq_config *req, + struct rte_tm_error *error) +{ + int rc; + + if (!req->num_regs || + req->num_regs > MAX_REGS_PER_MBOX_MSG) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "invalid config"; + return -EIO; + } + + rc = otx2_mbox_process(mbox); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + } + return rc; +} + +static uint16_t +nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl) +{ + if (nix_tm_have_tl1_access(dev)) { + switch (lvl) { + case OTX2_TM_LVL_ROOT: + return NIX_TXSCH_LVL_TL1; + case OTX2_TM_LVL_SCH1: + return NIX_TXSCH_LVL_TL2; + case OTX2_TM_LVL_SCH2: + return NIX_TXSCH_LVL_TL3; + case OTX2_TM_LVL_SCH3: + return NIX_TXSCH_LVL_TL4; + case OTX2_TM_LVL_SCH4: + return NIX_TXSCH_LVL_SMQ; + default: + return NIX_TXSCH_LVL_CNT; + } + } else { + switch (lvl) { + case OTX2_TM_LVL_ROOT: + return NIX_TXSCH_LVL_TL2; + case OTX2_TM_LVL_SCH1: + return NIX_TXSCH_LVL_TL3; + case OTX2_TM_LVL_SCH2: + return NIX_TXSCH_LVL_TL4; + case OTX2_TM_LVL_SCH3: + return NIX_TXSCH_LVL_SMQ; + default: + return NIX_TXSCH_LVL_CNT; + } + } +} + +static uint16_t +nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl) +{ + if (hw_lvl >= NIX_TXSCH_LVL_CNT) + return 0; + + /* MDQ doesn't support SP */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + return 0; + + /* PF's TL1 with VF's enabled doesn't support SP */ + if (hw_lvl == NIX_TXSCH_LVL_TL1 && + (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 || + (dev->tm_flags & NIX_TM_TL1_NO_SP))) + return 0; + + return TXSCH_TLX_SP_PRIO_MAX - 1; +} + + +static int +validate_prio(struct otx2_eth_dev *dev, uint32_t lvl, + uint32_t parent_id, uint32_t priority, + struct rte_tm_error *error) +{ + uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX]; + struct otx2_nix_tm_node *tm_node; + uint32_t rr_num = 0; + int i; + + /* Validate priority against max */ + if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "unsupported priority value"; + return -EINVAL; + } + + if (parent_id == RTE_TM_NODE_ID_NULL) + return 0; + + memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX); + priorities[priority] = 1; + + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (!tm_node->parent) + continue; + + if (!(tm_node->flags & NIX_TM_NODE_USER)) + continue; + + if (tm_node->parent->id != parent_id) + continue; + + priorities[tm_node->priority]++; + } + + for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++) + if (priorities[i] > 1) + rr_num++; + + /* At max, one rr groups per parent */ + if (rr_num > 1) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "multiple DWRR node priority"; + return -EINVAL; + } + + /* Check for previous priority to avoid holes in priorities */ + if (priority && !priorities[priority - 1]) { + error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; + error->message = "priority not in order"; + return -EINVAL; + } + + return 0; +} + +static int +read_tm_reg(struct otx2_mbox *mbox, uint64_t reg, + uint64_t *regval, uint32_t hw_lvl) +{ + volatile struct nix_txschq_config *req; + struct nix_txschq_config *rsp; + int rc; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = hw_lvl; + req->reg[0] = reg; + req->num_regs = 1; + + rc = otx2_mbox_process_msg(mbox, (void **)&rsp); + if (rc) + return rc; + *regval = rsp->regval[0]; + return 0; +} + +/* Search for min rate in topology */ +static void +nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev) +{ + struct otx2_nix_tm_shaper_profile *profile; + uint64_t rate_min = 1E9; /* 1 Gbps */ + + TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) { + if (profile->params.peak.rate && + profile->params.peak.rate < rate_min) + rate_min = profile->params.peak.rate; + + if (profile->params.committed.rate && + profile->params.committed.rate < rate_min) + rate_min = profile->params.committed.rate; + } + + dev->tm_rate_min = rate_min; +} + +static int +nix_xmit_disable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + uint16_t sqb_cnt, head_off, tail_off; + struct otx2_nix_tm_node *tm_node; + struct otx2_eth_txq *txq; + uint64_t wdata, val; + int i, rc; + + otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name); + + /* Enable CGX RXTX to drain pkts */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox); + rc = otx2_mbox_process(dev->mbox); + if (rc) + return rc; + } + + /* XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Flush all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq, rc=%d\n", rc); + goto cleanup; + } + } + + /* XOFF & Flush all SMQ's. HRM mandates + * all SQ's empty before SMQ flush is issued. + */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, true); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Verify sanity of all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + wdata = ((uint64_t)txq->sq << 32); + val = otx2_atomic64_add_nosync(wdata, + (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS)); + + sqb_cnt = val & 0xFFFF; + head_off = (val >> 20) & 0x3F; + tail_off = (val >> 28) & 0x3F; + + if (sqb_cnt > 1 || head_off != tail_off || + (*txq->fc_mem != txq->nb_sqb_bufs)) + otx2_err("Failed to gracefully flush sq %u", txq->sq); + } + +cleanup: + /* restore cgx state */ + if (!eth_dev->data->dev_started) { + otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox); + rc |= otx2_mbox_process(dev->mbox); + } + + return rc; +} + +static int +otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + int *is_leaf, struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + + if (is_leaf == NULL) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + return -EINVAL; + } + if (nix_tm_is_leaf(dev, tm_node->lvl)) + *is_leaf = true; + else + *is_leaf = false; + return 0; +} + +static int +otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + int rc, max_nr_nodes = 0, i; + struct free_rsrcs_rsp *rsp; + + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + for (i = 0; i < NIX_TXSCH_LVL_TL1; i++) + max_nr_nodes += rsp->schq[i]; + + cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt; + /* TL1 level is reserved for PF */ + cap->n_levels_max = nix_tm_have_tl1_access(dev) ? + OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + + /* Shaper Capabilities */ + cap->shaper_private_n_max = max_nr_nodes; + cap->shaper_n_max = max_nr_nodes; + cap->shaper_private_dual_rate_n_max = max_nr_nodes; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + cap->shaper_pkt_length_adjust_min = 0; + cap->shaper_pkt_length_adjust_max = 0; + + /* Schedule Capabilities */ + cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ]; + cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX; + cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max; + cap->sched_wfq_n_groups_max = 1; + cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + cap->dynamic_update_mask = + RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL | + RTE_TM_UPDATE_NODE_SUSPEND_RESUME; + cap->stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES | + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + for (i = 0; i < RTE_COLORS; i++) { + cap->mark_vlan_dei_supported[i] = false; + cap->mark_ip_ecn_tcp_supported[i] = false; + cap->mark_ip_dscp_supported[i] = false; + } + + return 0; +} + +static int +otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct free_rsrcs_rsp *rsp; + uint16_t hw_lvl; + int rc; + + memset(cap, 0, sizeof(*cap)); + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + hw_lvl = nix_tm_lvl2nix(dev, lvl); + + if (nix_tm_is_leaf(dev, lvl)) { + /* Leaf */ + cap->n_nodes_max = dev->tm_leaf_cnt; + cap->n_nodes_leaf_max = dev->tm_leaf_cnt; + cap->leaf_nodes_identical = 1; + cap->leaf.stats_mask = + RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + + } else if (lvl == OTX2_TM_LVL_ROOT) { + /* Root node, aka TL2(vf)/TL1(pf) */ + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = + nix_tm_have_tl1_access(dev) ? false : true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + if (nix_tm_have_tl1_access(dev)) + cap->nonleaf.stats_mask = + RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + } else if ((lvl < OTX2_TM_LVL_MAX) && + (hw_lvl < NIX_TXSCH_LVL_CNT)) { + /* TL2, TL3, TL4, MDQ */ + cap->n_nodes_max = rsp->schq[hw_lvl]; + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + cap->non_leaf_nodes_identical = 1; + + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = true; + cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + /* MDQ doesn't support Strict Priority */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = + rsp->schq[hw_lvl - 1]; + cap->nonleaf.sched_sp_n_priorities_max = + nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + } else { + /* unsupported level */ + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + return rc; + } + return 0; +} + +static int +otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct free_rsrcs_rsp *rsp; + int rc, hw_lvl, lvl; + + memset(cap, 0, sizeof(*cap)); + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + hw_lvl = tm_node->hw_lvl; + lvl = tm_node->lvl; + + /* Leaf node */ + if (nix_tm_is_leaf(dev, lvl)) { + cap->stats_mask = RTE_TM_STATS_N_PKTS | + RTE_TM_STATS_N_BYTES; + return 0; + } + + otx2_mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "unexpected fatal error"; + return rc; + } + + /* Non Leaf Shaper */ + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = + (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true; + cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8; + cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8; + + /* Non Leaf Scheduler */ + if (hw_lvl == NIX_TXSCH_LVL_MDQ) + cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt; + else + cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1]; + + cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + cap->nonleaf.sched_wfq_n_groups_max = 1; + cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT; + + if (hw_lvl == NIX_TXSCH_LVL_TL1) + cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + return 0; +} + +static int +otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_shaper_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile; + + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID exist"; + return -EINVAL; + } + + /* Committed rate and burst size can be enabled/disabled */ + if (params->committed.size || params->committed.rate) { + if (params->committed.size < MIN_SHAPER_BURST || + params->committed.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->committed.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper committed rate invalid"; + return -EINVAL; + } + } + + /* Peak rate and burst size can be enabled/disabled */ + if (params->peak.size || params->peak.rate) { + if (params->peak.size < MIN_SHAPER_BURST || + params->peak.size > MAX_SHAPER_BURST) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; + return -EINVAL; + } else if (!shaper_rate_to_nix(params->peak.rate * 8, + NULL, NULL, NULL)) { + error->type = + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; + error->message = "shaper peak rate invalid"; + return -EINVAL; + } + } + + profile = rte_zmalloc("otx2_nix_tm_shaper_profile", + sizeof(struct otx2_nix_tm_shaper_profile), 0); + if (!profile) + return -ENOMEM; + + profile->shaper_profile_id = profile_id; + rte_memcpy(&profile->params, params, + sizeof(struct rte_tm_shaper_params)); + TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper); + + otx2_tm_dbg("Added TM shaper profile %u, " + " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64 + ", cbs %" PRIu64 " , adj %u", + profile_id, + params->peak.rate * 8, + params->peak.size, + params->committed.rate * 8, + params->committed.size, + params->pkt_length_adjust); + + /* Translate rate as bits per second */ + profile->params.peak.rate = profile->params.peak.rate * 8; + profile->params.committed.rate = profile->params.committed.rate * 8; + /* Always use PIR for single rate shaping */ + if (!params->peak.rate && params->committed.rate) { + profile->params.peak = profile->params.committed; + memset(&profile->params.committed, 0, + sizeof(profile->params.committed)); + } + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_nix_tm_shaper_profile *profile; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + profile = nix_tm_shaper_profile_search(dev, profile_id); + + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + + if (profile->reference_count) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "shaper profile in use"; + return -EINVAL; + } + + otx2_tm_dbg("Removing TM shaper profile %u", profile_id); + TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper); + rte_free(profile); + + /* update min rate */ + nix_tm_shaper_profile_update_min(dev); + return 0; +} + +static int +otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t lvl, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *parent_node; + int rc, clear_on_fail = 0; + uint32_t exp_next_lvl; + uint16_t hw_lvl; + + /* we don't support dynamic updates */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "dynamic update not supported"; + return -EIO; + } + + /* Leaf nodes have to be same priority */ + if (nix_tm_is_leaf(dev, lvl) && priority != 0) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "queue shapers must be priority 0"; + return -EIO; + } + + parent_node = nix_tm_node_search(dev, parent_node_id, true); + + /* find the right level */ + if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) { + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + lvl = OTX2_TM_LVL_ROOT; + } else if (parent_node) { + lvl = parent_node->lvl + 1; + } else { + /* Neigher proper parent nor proper level id given */ + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -ERANGE; + } + } + + /* Translate rte_tm level id's to nix hw level id's */ + hw_lvl = nix_tm_lvl2nix(dev, lvl); + if (hw_lvl == NIX_TXSCH_LVL_CNT && + !nix_tm_is_leaf(dev, lvl)) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "invalid level id"; + return -ERANGE; + } + + if (node_id < dev->tm_leaf_cnt) + exp_next_lvl = NIX_TXSCH_LVL_SMQ; + else + exp_next_lvl = hw_lvl + 1; + + /* Check if there is no parent node yet */ + if (hw_lvl != dev->otx2_tm_root_lvl && + (!parent_node || parent_node->hw_lvl != exp_next_lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "invalid parent node id"; + return -EINVAL; + } + + /* Check if a node already exists */ + if (nix_tm_node_search(dev, node_id, true)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node already exists"; + return -EINVAL; + } + + /* Check if shaper profile exists for non leaf node */ + if (!nix_tm_is_leaf(dev, lvl) && + params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && + !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "invalid shaper profile"; + return -EINVAL; + } + + /* Check if there is second DWRR already in siblings or holes in prio */ + if (validate_prio(dev, lvl, parent_node_id, priority, error)) + return -EINVAL; + + if (weight > MAX_SCHED_WEIGHT) { + error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; + error->message = "max weight exceeded"; + return -EINVAL; + } + + rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id, + priority, weight, hw_lvl, + lvl, true, params); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + /* cleanup user added nodes */ + if (clear_on_fail) + nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, false); + error->message = "failed to add node"; + return rc; + } + error->type = RTE_TM_ERROR_TYPE_NONE; + return 0; +} + +static int +otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *child_node; + struct otx2_nix_tm_shaper_profile *profile; + uint32_t profile_id; + + /* we don't support dynamic updates yet */ + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "hierarchy exists"; + return -EIO; + } + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Check for any existing children */ + TAILQ_FOREACH(child_node, &dev->node_list, node) { + if (child_node->parent == tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "children exist"; + return -EINVAL; + } + } + + /* Remove shaper profile reference */ + profile_id = tm_node->params.shaper_profile_id; + profile = nix_tm_shaper_profile_search(dev, profile_id); + profile->reference_count--; + + TAILQ_REMOVE(&dev->node_list, tm_node, node); + rte_free(tm_node); + return 0; +} + +static int +nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error, bool suspend) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint16_t flags; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + flags = tm_node->flags; + flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) : + (flags | NIX_TM_NODE_ENABLED); + + if (tm_node->flags == flags) + return 0; + + /* send mbox for state change */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node, suspend, + req->reg, req->regval); + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags = flags; + return rc; +} + +static int +otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, true); +} + +static int +otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_error *error) +{ + return nix_tm_node_suspend_resume(eth_dev, node_id, error, false); +} + +static int +otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint32_t leaf_cnt = 0; + int rc; + + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy exists"; + return -EINVAL; + } + + /* Check if we have all the leaf nodes */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->flags & NIX_TM_NODE_USER && + tm_node->id < dev->tm_leaf_cnt) + leaf_cnt++; + } + + if (leaf_cnt != dev->tm_leaf_cnt) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "incomplete hierarchy"; + return -EINVAL; + } + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + /* Delete default/ratelimit tree */ + if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) { + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free default resources"; + return rc; + } + dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE | + NIX_TM_RATE_LIMIT_TREE); + } + + /* Free up user alloc'ed resources */ + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free user resources"; + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "alloc resources failed"; + /* TODO should we restore default config ? */ + if (clear_on_fail) + nix_tm_free_resources(dev, 0, 0, false); + return rc; + } + + error->type = RTE_TM_ERROR_TYPE_NONE; + dev->tm_flags |= NIX_TM_COMMITTED; + return 0; +} + +static int +otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node"; + return -EINVAL; + } + + if (profile_id == tm_node->params.shaper_profile_id) + return 0; + + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + } + + tm_node->params.shaper_profile_id = profile_id; + + /* Nothing to do if not yet committed */ + if (!(dev->tm_flags & NIX_TM_COMMITTED)) + return 0; + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Flush the specific node with SW_XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval); + req->num_regs = k; + + rc = send_tm_reqval(mbox, req, error); + if (rc) + return rc; + + shaper_default_red_algo(dev, tm_node, profile); + + /* Update the PIR/CIR and clear SW XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + + k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval); + + k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]); + + req->num_regs = k; + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags |= NIX_TM_NODE_ENABLED; + return rc; +} + +static int +otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, uint32_t new_parent_id, + uint32_t priority, uint32_t weight, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_nix_tm_node *new_parent; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Parent id valid only for non root nodes */ + if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) { + new_parent = nix_tm_node_search(dev, new_parent_id, true); + if (!new_parent) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "no such parent node"; + return -EINVAL; + } + + /* Current support is only for dynamic weight update */ + if (tm_node->parent != new_parent || + tm_node->priority != priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "only weight update supported"; + return -EINVAL; + } + } + + /* Skip if no change */ + if (tm_node->weight == weight) + return 0; + + tm_node->weight = weight; + + /* For leaf nodes, SQ CTX needs update */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + /* Update SQ quantum data on the fly */ + rc = nix_sq_sched_data(dev, tm_node, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "sq sched data update failed"; + return rc; + } + } else { + /* XOFF Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XOFF this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, true, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* Update new weight for current node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sched_reg(dev, tm_node, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, false, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + } + return 0; +} + +static int +otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, int clear, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint64_t reg, val; + int64_t *addr; + int rc = 0; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Stats support only for leaf node or TL1 root */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + reg = (((uint64_t)tm_node->id) << 32); + + /* Packets */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_pkts = val - tm_node->last_pkts; + + /* Bytes */ + addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS); + val = otx2_atomic64_add_nosync(reg, addr); + if (val & OP_ERR) + val = 0; + stats->n_bytes = val - tm_node->last_bytes; + + if (clear) { + tm_node->last_pkts = stats->n_pkts; + tm_node->last_bytes = stats->n_bytes; + } + + *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES; + + } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "stats read error"; + + /* RED Drop packets */ + reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_pkts_dropped[RTE_COLOR_RED] = + val - tm_node->last_pkts; + + /* RED Drop bytes */ + reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id); + rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1); + if (rc) + goto exit; + stats->leaf.n_bytes_dropped[RTE_COLOR_RED] = + val - tm_node->last_bytes; + + /* Clear stats */ + if (clear) { + tm_node->last_pkts = + stats->leaf.n_pkts_dropped[RTE_COLOR_RED]; + tm_node->last_bytes = + stats->leaf.n_bytes_dropped[RTE_COLOR_RED]; + } + + *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED | + RTE_TM_STATS_N_BYTES_RED_DROPPED; + + } else { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "unsupported node"; + rc = -EINVAL; + } + +exit: + return rc; +} + +const struct rte_tm_ops otx2_tm_ops = { + .node_type_get = otx2_nix_tm_node_type_get, + + .capabilities_get = otx2_nix_tm_capa_get, + .level_capabilities_get = otx2_nix_tm_level_capa_get, + .node_capabilities_get = otx2_nix_tm_node_capa_get, + + .shaper_profile_add = otx2_nix_tm_shaper_profile_add, + .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete, + + .node_add = otx2_nix_tm_node_add, + .node_delete = otx2_nix_tm_node_delete, + .node_suspend = otx2_nix_tm_node_suspend, + .node_resume = otx2_nix_tm_node_resume, + .hierarchy_commit = otx2_nix_tm_hierarchy_commit, + + .node_shaper_update = otx2_nix_tm_node_shaper_update, + .node_parent_update = otx2_nix_tm_node_parent_update, + .node_stats_read = otx2_nix_tm_node_stats_read, +}; + +static int +nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i; + int rc = 0, leaf_level; + + /* Default params */ + memset(¶ms, 0, sizeof(params)); + params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto exit; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, false, ¶ms); + if (rc) + goto exit; + + leaf_parent = def + 4; + leaf_level = OTX2_TM_LVL_QUEUE; + } else { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto exit; + + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto exit; + + leaf_parent = def + 3; + leaf_level = OTX2_TM_LVL_SCH4; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + leaf_level, false, ¶ms); + if (rc) + break; + } + +exit: + return rc; +} + +void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + TAILQ_INIT(&dev->node_list); + TAILQ_INIT(&dev->shaper_profile_list); + dev->tm_rate_min = 1E9; /* 1Gbps */ +} + +int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + int rc; + + /* Free up all resources already held */ + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc) { + otx2_err("Failed to freeup existing resources,rc=%d", rc); + return rc; + } + + /* Clear shaper profiles */ + nix_tm_clear_shaper_profiles(dev); + dev->tm_flags = NIX_TM_DEFAULT_TREE; + + /* Disable TL1 Static Priority when VF's are enabled + * as otherwise VF's TL2 reallocation will be needed + * runtime to support a specific topology of PF. + */ + if (pci_dev->max_vfs) + dev->tm_flags |= NIX_TM_TL1_NO_SP; + + rc = nix_tm_prepare_default_tree(eth_dev); + if (rc != 0) + return rc; + + rc = nix_tm_alloc_resources(eth_dev, false); + if (rc != 0) + return rc; + dev->tm_leaf_cnt = sq_cnt; + + return 0; +} + +static int +nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i, rc = 0; + + memset(¶ms, 0, sizeof(params)); + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 3; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, + leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_QUEUE, + false, ¶ms); + if (rc) + goto error; + } + + return 0; + } + + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 2; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + break; + } +error: + return rc; +} + +static int +otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev, + struct otx2_nix_tm_node *tm_node, + uint64_t tx_rate) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile profile; + struct otx2_mbox *mbox = dev->mbox; + volatile uint64_t *reg, *regval; + struct nix_txschq_config *req; + uint16_t flags; + uint8_t k = 0; + int rc; + + flags = tm_node->flags; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_MDQ; + reg = req->reg; + regval = req->regval; + + if (tx_rate == 0) { + k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]); + flags &= ~NIX_TM_NODE_ENABLED; + goto exit; + } + + if (!(flags & NIX_TM_NODE_ENABLED)) { + k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]); + flags |= NIX_TM_NODE_ENABLED; + } + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.params.peak.rate = tx_rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS, + (4ull * tx_rate) / (1E6 * 8)); + if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate) + dev->tm_rate_min = tx_rate; + + k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]); +exit: + req->num_regs = k; + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + tm_node->flags = flags; + return 0; +} + +int +otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate_mbps) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6; + struct otx2_nix_tm_node *tm_node; + int rc; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + if (queue_idx >= eth_dev->data->nb_tx_queues) + return -EINVAL; + + if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) && + !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE)) + goto error; + + if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) && + eth_dev->data->nb_tx_queues > 1) { + /* For TM topology change ethdev needs to be stopped */ + if (eth_dev->data->dev_started) + return -EBUSY; + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc < 0) { + otx2_tm_dbg("failed to free default resources, rc %d", + rc); + return -EIO; + } + + rc = nix_tm_prepare_rate_limited_tree(eth_dev); + if (rc < 0) { + otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc); + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc != 0) { + otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc); + return rc; + } + + dev->tm_flags &= ~NIX_TM_DEFAULT_TREE; + dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE; + } + + tm_node = nix_tm_node_search(dev, queue_idx, false); + + /* check if we found a valid leaf node */ + if (!tm_node || + !nix_tm_is_leaf(dev, tm_node->lvl) || + !tm_node->parent || + tm_node->parent->hw_id == UINT32_MAX) + return -EIO; + + return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate); +error: + otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags); + return -EINVAL; +} + +int +otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + if (!arg) + return -EINVAL; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + *(const void **)arg = &otx2_tm_ops; + + return 0; +} + +int +otx2_nix_tm_fini(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc; + + /* Xmit is assumed to be disabled */ + /* Free up resources already held */ + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc) { + otx2_err("Failed to freeup existing resources,rc=%d", rc); + return rc; + } + + /* Clear shaper profiles */ + nix_tm_clear_shaper_profiles(dev); + + dev->tm_flags = 0; + return 0; +} + +int +otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq, + uint32_t *rr_quantum, uint16_t *smq) +{ + struct otx2_nix_tm_node *tm_node; + int rc; + + /* 0..sq_cnt-1 are leaf nodes */ + if (sq >= dev->tm_leaf_cnt) + return -EINVAL; + + /* Search for internal node first */ + tm_node = nix_tm_node_search(dev, sq, false); + if (!tm_node) + tm_node = nix_tm_node_search(dev, sq, true); + + /* Check if we found a valid leaf node */ + if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) || + !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) { + return -EIO; + } + + /* Get SMQ Id of leaf node's parent */ + *smq = tm_node->parent->hw_id; + *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight); + + rc = nix_smq_xoff(dev, tm_node->parent, false); + if (rc) + return rc; + tm_node->flags |= NIX_TM_NODE_ENABLED; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h new file mode 100644 index 000000000..4a80c234e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_TM_H__ +#define __OTX2_TM_H__ + +#include + +#include + +#define NIX_TM_DEFAULT_TREE BIT_ULL(0) +#define NIX_TM_COMMITTED BIT_ULL(1) +#define NIX_TM_RATE_LIMIT_TREE BIT_ULL(2) +#define NIX_TM_TL1_NO_SP BIT_ULL(3) + +struct otx2_eth_dev; + +void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev); +int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev); +int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev); +int otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops); +int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq, + uint32_t *rr_quantum, uint16_t *smq); +int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate); +int otx2_nix_sq_flush_pre(void *_txq, bool dev_started); +int otx2_nix_sq_flush_post(void *_txq); +int otx2_nix_sq_enable(void *_txq); +int otx2_nix_get_link(struct otx2_eth_dev *dev); +int otx2_nix_sq_sqb_aura_fc(void *_txq, bool enable); + +struct otx2_nix_tm_node { + TAILQ_ENTRY(otx2_nix_tm_node) node; + uint32_t id; + uint32_t hw_id; + uint32_t priority; + uint32_t weight; + uint16_t lvl; + uint16_t hw_lvl; + uint32_t rr_prio; + uint32_t rr_num; + uint32_t max_prio; + uint32_t parent_hw_id; + uint32_t flags:16; +#define NIX_TM_NODE_HWRES BIT_ULL(0) +#define NIX_TM_NODE_ENABLED BIT_ULL(1) +#define NIX_TM_NODE_USER BIT_ULL(2) +#define NIX_TM_NODE_RED_DISCARD BIT_ULL(3) + /* Shaper algorithm for RED state @NIX_REDALG_E */ + uint32_t red_algo:2; + + struct otx2_nix_tm_node *parent; + struct rte_tm_node_params params; + + /* Last stats */ + uint64_t last_pkts; + uint64_t last_bytes; +}; + +struct otx2_nix_tm_shaper_profile { + TAILQ_ENTRY(otx2_nix_tm_shaper_profile) shaper; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params params; /* Rate in bits/sec */ +}; + +struct shaper_params { + uint64_t burst_exponent; + uint64_t burst_mantissa; + uint64_t div_exp; + uint64_t exponent; + uint64_t mantissa; + uint64_t burst; + uint64_t rate; +}; + +TAILQ_HEAD(otx2_nix_tm_node_list, otx2_nix_tm_node); +TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile); + +#define MAX_SCHED_WEIGHT ((uint8_t)~0) +#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1) +#define NIX_TM_WEIGHT_TO_RR_QUANTUM(__weight) \ + ((((__weight) & MAX_SCHED_WEIGHT) * \ + NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT) + +/* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT */ +/* = NIX_MAX_HW_MTU */ +#define DEFAULT_RR_WEIGHT 71 + +/** NIX rate limits */ +#define MAX_RATE_DIV_EXP 12 +#define MAX_RATE_EXPONENT 0xf +#define MAX_RATE_MANTISSA 0xff + +#define NIX_SHAPER_RATE_CONST ((uint64_t)2E6) + +/* NIX rate calculation in Bits/Sec + * PIR_ADD = ((256 + NIX_*_PIR[RATE_MANTISSA]) + * << NIX_*_PIR[RATE_EXPONENT]) / 256 + * PIR = (2E6 * PIR_ADD / (1 << NIX_*_PIR[RATE_DIVIDER_EXPONENT])) + * + * CIR_ADD = ((256 + NIX_*_CIR[RATE_MANTISSA]) + * << NIX_*_CIR[RATE_EXPONENT]) / 256 + * CIR = (2E6 * CIR_ADD / (CCLK_TICKS << NIX_*_CIR[RATE_DIVIDER_EXPONENT])) + */ +#define SHAPER_RATE(exponent, mantissa, div_exp) \ + ((NIX_SHAPER_RATE_CONST * ((256 + (mantissa)) << (exponent)))\ + / (((1ull << (div_exp)) * 256))) + +/* 96xx rate limits in Bits/Sec */ +#define MIN_SHAPER_RATE \ + SHAPER_RATE(0, 0, MAX_RATE_DIV_EXP) + +#define MAX_SHAPER_RATE \ + SHAPER_RATE(MAX_RATE_EXPONENT, MAX_RATE_MANTISSA, 0) + +/** TM Shaper - low level operations */ + +/** NIX burst limits */ +#define MAX_BURST_EXPONENT 0xf +#define MAX_BURST_MANTISSA 0xff + +/* NIX burst calculation + * PIR_BURST = ((256 + NIX_*_PIR[BURST_MANTISSA]) + * << (NIX_*_PIR[BURST_EXPONENT] + 1)) + * / 256 + * + * CIR_BURST = ((256 + NIX_*_CIR[BURST_MANTISSA]) + * << (NIX_*_CIR[BURST_EXPONENT] + 1)) + * / 256 + */ +#define SHAPER_BURST(exponent, mantissa) \ + (((256 + (mantissa)) << ((exponent) + 1)) / 256) + +/** Shaper burst limits */ +#define MIN_SHAPER_BURST \ + SHAPER_BURST(0, 0) + +#define MAX_SHAPER_BURST \ + SHAPER_BURST(MAX_BURST_EXPONENT,\ + MAX_BURST_MANTISSA) + +/* Default TL1 priority and Quantum from AF */ +#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1) +#define TXSCH_TL1_DFLT_RR_PRIO 1 + +#define TXSCH_TLX_SP_PRIO_MAX 10 + +static inline const char * +nix_hwlvl2str(uint32_t hw_lvl) +{ + switch (hw_lvl) { + case NIX_TXSCH_LVL_MDQ: + return "SMQ/MDQ"; + case NIX_TXSCH_LVL_TL4: + return "TL4"; + case NIX_TXSCH_LVL_TL3: + return "TL3"; + case NIX_TXSCH_LVL_TL2: + return "TL2"; + case NIX_TXSCH_LVL_TL1: + return "TL1"; + default: + break; + } + + return "???"; +} + +#endif /* __OTX2_TM_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c new file mode 100644 index 000000000..1af6fa649 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c @@ -0,0 +1,1060 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include + +#include "otx2_ethdev.h" + +#define NIX_XMIT_FC_OR_RETURN(txq, pkts) do { \ + /* Cached value is low, Update the fc_cache_pkts */ \ + if (unlikely((txq)->fc_cache_pkts < (pkts))) { \ + /* Multiply with sqe_per_sqb to express in pkts */ \ + (txq)->fc_cache_pkts = \ + ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) << \ + (txq)->sqes_per_sqb_log2; \ + /* Check it again for the room */ \ + if (unlikely((txq)->fc_cache_pkts < (pkts))) \ + return 0; \ + } \ +} while (0) + + +static __rte_always_inline uint16_t +nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t pkts, uint64_t *cmd, const uint16_t flags) +{ + struct otx2_eth_txq *txq = tx_queue; uint16_t i; + const rte_iova_t io_addr = txq->io_addr; + void *lmt_addr = txq->lmt_addr; + + NIX_XMIT_FC_OR_RETURN(txq, pkts); + + otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags)); + + /* Perform header writes before barrier for TSO */ + if (flags & NIX_TX_OFFLOAD_TSO_F) { + for (i = 0; i < pkts; i++) + otx2_nix_xmit_prepare_tso(tx_pkts[i], flags); + } + + /* Lets commit any changes in the packet */ + rte_cio_wmb(); + + for (i = 0; i < pkts; i++) { + otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags); + /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */ + otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0], + tx_pkts[i]->ol_flags, 4, flags); + otx2_nix_xmit_one(cmd, lmt_addr, io_addr, flags); + } + + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; + + return pkts; +} + +static __rte_always_inline uint16_t +nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t pkts, uint64_t *cmd, const uint16_t flags) +{ + struct otx2_eth_txq *txq = tx_queue; uint64_t i; + const rte_iova_t io_addr = txq->io_addr; + void *lmt_addr = txq->lmt_addr; + uint16_t segdw; + + NIX_XMIT_FC_OR_RETURN(txq, pkts); + + otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags)); + + /* Perform header writes before barrier for TSO */ + if (flags & NIX_TX_OFFLOAD_TSO_F) { + for (i = 0; i < pkts; i++) + otx2_nix_xmit_prepare_tso(tx_pkts[i], flags); + } + + /* Lets commit any changes in the packet */ + rte_cio_wmb(); + + for (i = 0; i < pkts; i++) { + otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags); + segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags); + otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0], + tx_pkts[i]->ol_flags, segdw, + flags); + otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw); + } + + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; + + return pkts; +} + +#if defined(RTE_ARCH_ARM64) + +#define NIX_DESCS_PER_LOOP 4 +static __rte_always_inline uint16_t +nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t pkts, uint64_t *cmd, const uint16_t flags) +{ + uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3; + uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3; + uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3; + uint64x2_t senddesc01_w0, senddesc23_w0; + uint64x2_t senddesc01_w1, senddesc23_w1; + uint64x2_t sgdesc01_w0, sgdesc23_w0; + uint64x2_t sgdesc01_w1, sgdesc23_w1; + struct otx2_eth_txq *txq = tx_queue; + uint64_t *lmt_addr = txq->lmt_addr; + rte_iova_t io_addr = txq->io_addr; + uint64x2_t ltypes01, ltypes23; + uint64x2_t xtmp128, ytmp128; + uint64x2_t xmask01, xmask23; + uint64x2_t cmd00, cmd01; + uint64x2_t cmd10, cmd11; + uint64x2_t cmd20, cmd21; + uint64x2_t cmd30, cmd31; + uint64_t lmt_status, i; + uint16_t pkts_left; + + NIX_XMIT_FC_OR_RETURN(txq, pkts); + + pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1); + pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP); + + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; + + /* Lets commit any changes in the packet */ + rte_cio_wmb(); + + senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]); + senddesc23_w0 = senddesc01_w0; + senddesc01_w1 = vdupq_n_u64(0); + senddesc23_w1 = senddesc01_w1; + sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]); + sgdesc23_w0 = sgdesc01_w0; + + for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) { + /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */ + senddesc01_w0 = vbicq_u64(senddesc01_w0, + vdupq_n_u64(0xFFFFFFFF)); + sgdesc01_w0 = vbicq_u64(sgdesc01_w0, + vdupq_n_u64(0xFFFFFFFF)); + + senddesc23_w0 = senddesc01_w0; + sgdesc23_w0 = sgdesc01_w0; + + /* Move mbufs to iova */ + mbuf0 = (uint64_t *)tx_pkts[0]; + mbuf1 = (uint64_t *)tx_pkts[1]; + mbuf2 = (uint64_t *)tx_pkts[2]; + mbuf3 = (uint64_t *)tx_pkts[3]; + + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mbuf, buf_iova)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mbuf, buf_iova)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mbuf, buf_iova)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mbuf, buf_iova)); + /* + * Get mbuf's, olflags, iova, pktlen, dataoff + * dataoff_iovaX.D[0] = iova, + * dataoff_iovaX.D[1](15:0) = mbuf->dataoff + * len_olflagsX.D[0] = ol_flags, + * len_olflagsX.D[1](63:32) = mbuf->pkt_len + */ + dataoff_iova0 = vld1q_u64(mbuf0); + len_olflags0 = vld1q_u64(mbuf0 + 2); + dataoff_iova1 = vld1q_u64(mbuf1); + len_olflags1 = vld1q_u64(mbuf1 + 2); + dataoff_iova2 = vld1q_u64(mbuf2); + len_olflags2 = vld1q_u64(mbuf2 + 2); + dataoff_iova3 = vld1q_u64(mbuf3); + len_olflags3 = vld1q_u64(mbuf3 + 2); + + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + struct rte_mbuf *mbuf; + /* Set don't free bit if reference count > 1 */ + xmask01 = vdupq_n_u64(0); + xmask23 = xmask01; + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 - + offsetof(struct rte_mbuf, buf_iova)); + + if (otx2_nix_prefree_seg(mbuf)) + vsetq_lane_u64(0x80000, xmask01, 0); + else + __mempool_check_cookies(mbuf->pool, + (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 - + offsetof(struct rte_mbuf, buf_iova)); + if (otx2_nix_prefree_seg(mbuf)) + vsetq_lane_u64(0x80000, xmask01, 1); + else + __mempool_check_cookies(mbuf->pool, + (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 - + offsetof(struct rte_mbuf, buf_iova)); + if (otx2_nix_prefree_seg(mbuf)) + vsetq_lane_u64(0x80000, xmask23, 0); + else + __mempool_check_cookies(mbuf->pool, + (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 - + offsetof(struct rte_mbuf, buf_iova)); + if (otx2_nix_prefree_seg(mbuf)) + vsetq_lane_u64(0x80000, xmask23, 1); + else + __mempool_check_cookies(mbuf->pool, + (void **)&mbuf, + 1, 0); + senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01); + senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23); + } else { + struct rte_mbuf *mbuf; + /* Mark mempool object as "put" since + * it is freed by NIX + */ + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 - + offsetof(struct rte_mbuf, buf_iova)); + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 - + offsetof(struct rte_mbuf, buf_iova)); + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 - + offsetof(struct rte_mbuf, buf_iova)); + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, + 1, 0); + + mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 - + offsetof(struct rte_mbuf, buf_iova)); + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, + 1, 0); + RTE_SET_USED(mbuf); + } + + /* Move mbufs to point pool */ + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mbuf, pool) - + offsetof(struct rte_mbuf, buf_iova)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mbuf, pool) - + offsetof(struct rte_mbuf, buf_iova)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mbuf, pool) - + offsetof(struct rte_mbuf, buf_iova)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mbuf, pool) - + offsetof(struct rte_mbuf, buf_iova)); + + if (flags & + (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | + NIX_TX_OFFLOAD_L3_L4_CSUM_F)) { + /* Get tx_offload for ol2, ol3, l2, l3 lengths */ + /* + * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7) + * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7) + */ + + asm volatile ("LD1 {%[a].D}[0],[%[in]]\n\t" : + [a]"+w"(senddesc01_w1) : + [in]"r"(mbuf0 + 2) : "memory"); + + asm volatile ("LD1 {%[a].D}[1],[%[in]]\n\t" : + [a]"+w"(senddesc01_w1) : + [in]"r"(mbuf1 + 2) : "memory"); + + asm volatile ("LD1 {%[b].D}[0],[%[in]]\n\t" : + [b]"+w"(senddesc23_w1) : + [in]"r"(mbuf2 + 2) : "memory"); + + asm volatile ("LD1 {%[b].D}[1],[%[in]]\n\t" : + [b]"+w"(senddesc23_w1) : + [in]"r"(mbuf3 + 2) : "memory"); + + /* Get pool pointer alone */ + mbuf0 = (uint64_t *)*mbuf0; + mbuf1 = (uint64_t *)*mbuf1; + mbuf2 = (uint64_t *)*mbuf2; + mbuf3 = (uint64_t *)*mbuf3; + } else { + /* Get pool pointer alone */ + mbuf0 = (uint64_t *)*mbuf0; + mbuf1 = (uint64_t *)*mbuf1; + mbuf2 = (uint64_t *)*mbuf2; + mbuf3 = (uint64_t *)*mbuf3; + } + + const uint8x16_t shuf_mask2 = { + 0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + }; + xtmp128 = vzip2q_u64(len_olflags0, len_olflags1); + ytmp128 = vzip2q_u64(len_olflags2, len_olflags3); + + /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */ + const uint64x2_t and_mask0 = { + 0xFFFFFFFFFFFFFFFF, + 0x000000000000FFFF, + }; + + dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0); + dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0); + dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0); + dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0); + + /* + * Pick only 16 bits of pktlen preset at bits 63:32 + * and place them at bits 15:0. + */ + xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2); + ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2); + + /* Add pairwise to get dataoff + iova in sgdesc_w1 */ + sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1); + sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3); + + /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of + * pktlen at 15:0 position. + */ + sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128); + sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128); + senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128); + senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128); + + if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) && + !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) { + /* + * Lookup table to translate ol_flags to + * il3/il4 types. But we still use ol3/ol4 types in + * senddesc_w1 as only one header processing is enabled. + */ + const uint8x16_t tbl = { + /* [0-15] = il4type:il3type */ + 0x04, /* none (IPv6 assumed) */ + 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */ + 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */ + 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */ + 0x03, /* PKT_TX_IP_CKSUM */ + 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */ + 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */ + 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */ + 0x02, /* PKT_TX_IPV4 */ + 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */ + 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */ + 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */ + 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */ + 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_TCP_CKSUM + */ + 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_SCTP_CKSUM + */ + 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_UDP_CKSUM + */ + }; + + /* Extract olflags to translate to iltypes */ + xtmp128 = vzip1q_u64(len_olflags0, len_olflags1); + ytmp128 = vzip1q_u64(len_olflags2, len_olflags3); + + /* + * E(47):L3_LEN(9):L2_LEN(7+z) + * E(47):L3_LEN(9):L2_LEN(7+z) + */ + senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1); + senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1); + + /* Move OLFLAGS bits 55:52 to 51:48 + * with zeros preprended on the byte and rest + * don't care + */ + xtmp128 = vshrq_n_u8(xtmp128, 4); + ytmp128 = vshrq_n_u8(ytmp128, 4); + /* + * E(48):L3_LEN(8):L2_LEN(z+7) + * E(48):L3_LEN(8):L2_LEN(z+7) + */ + const int8x16_t tshft3 = { + -1, 0, 8, 8, 8, 8, 8, 8, + -1, 0, 8, 8, 8, 8, 8, 8, + }; + + senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3); + senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3); + + /* Do the lookup */ + ltypes01 = vqtbl1q_u8(tbl, xtmp128); + ltypes23 = vqtbl1q_u8(tbl, ytmp128); + + /* Just use ld1q to retrieve aura + * when we don't need tx_offload + */ + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mempool, pool_id)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mempool, pool_id)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mempool, pool_id)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mempool, pool_id)); + + /* Pick only relevant fields i.e Bit 48:55 of iltype + * and place it in ol3/ol4type of senddesc_w1 + */ + const uint8x16_t shuf_mask0 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF, + }; + + ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0); + ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0); + + /* Prepare ol4ptr, ol3ptr from ol3len, ol2len. + * a [E(32):E(16):OL3(8):OL2(8)] + * a = a + (a << 8) + * a [E(32):E(16):(OL3+OL2):OL2] + * => E(32):E(16)::OL4PTR(8):OL3PTR(8) + */ + senddesc01_w1 = vaddq_u8(senddesc01_w1, + vshlq_n_u16(senddesc01_w1, 8)); + senddesc23_w1 = vaddq_u8(senddesc23_w1, + vshlq_n_u16(senddesc23_w1, 8)); + + /* Create first half of 4W cmd for 4 mbufs (sgdesc) */ + cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1); + cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1); + cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1); + cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1); + + xmask01 = vdupq_n_u64(0); + xmask23 = xmask01; + asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory"); + + asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory"); + + asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory"); + + asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory"); + xmask01 = vshlq_n_u64(xmask01, 20); + xmask23 = vshlq_n_u64(xmask23, 20); + + senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01); + senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23); + /* Move ltypes to senddesc*_w1 */ + senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01); + senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23); + + /* Create first half of 4W cmd for 4 mbufs (sendhdr) */ + cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1); + cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1); + cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1); + cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1); + + } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) && + (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) { + /* + * Lookup table to translate ol_flags to + * ol3/ol4 types. + */ + + const uint8x16_t tbl = { + /* [0-15] = ol4type:ol3type */ + 0x00, /* none */ + 0x03, /* OUTER_IP_CKSUM */ + 0x02, /* OUTER_IPV4 */ + 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */ + 0x04, /* OUTER_IPV6 */ + 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */ + 0x00, /* OUTER_IPV6 | OUTER_IPV4 */ + 0x00, /* OUTER_IPV6 | OUTER_IPV4 | + * OUTER_IP_CKSUM + */ + 0x00, /* OUTER_UDP_CKSUM */ + 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */ + 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */ + 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 | + * OUTER_IP_CKSUM + */ + 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IP_CKSUM + */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IPV4 + */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IPV4 | OUTER_IP_CKSUM + */ + }; + + /* Extract olflags to translate to iltypes */ + xtmp128 = vzip1q_u64(len_olflags0, len_olflags1); + ytmp128 = vzip1q_u64(len_olflags2, len_olflags3); + + /* + * E(47):OL3_LEN(9):OL2_LEN(7+z) + * E(47):OL3_LEN(9):OL2_LEN(7+z) + */ + const uint8x16_t shuf_mask5 = { + 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + }; + senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5); + senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5); + + /* Extract outer ol flags only */ + const uint64x2_t o_cksum_mask = { + 0x1C00020000000000, + 0x1C00020000000000, + }; + + xtmp128 = vandq_u64(xtmp128, o_cksum_mask); + ytmp128 = vandq_u64(ytmp128, o_cksum_mask); + + /* Extract OUTER_UDP_CKSUM bit 41 and + * move it to bit 61 + */ + + xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20); + ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20); + + /* Shift oltype by 2 to start nibble from BIT(56) + * instead of BIT(58) + */ + xtmp128 = vshrq_n_u8(xtmp128, 2); + ytmp128 = vshrq_n_u8(ytmp128, 2); + /* + * E(48):L3_LEN(8):L2_LEN(z+7) + * E(48):L3_LEN(8):L2_LEN(z+7) + */ + const int8x16_t tshft3 = { + -1, 0, 8, 8, 8, 8, 8, 8, + -1, 0, 8, 8, 8, 8, 8, 8, + }; + + senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3); + senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3); + + /* Do the lookup */ + ltypes01 = vqtbl1q_u8(tbl, xtmp128); + ltypes23 = vqtbl1q_u8(tbl, ytmp128); + + /* Just use ld1q to retrieve aura + * when we don't need tx_offload + */ + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mempool, pool_id)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mempool, pool_id)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mempool, pool_id)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mempool, pool_id)); + + /* Pick only relevant fields i.e Bit 56:63 of oltype + * and place it in ol3/ol4type of senddesc_w1 + */ + const uint8x16_t shuf_mask0 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF, + }; + + ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0); + ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0); + + /* Prepare ol4ptr, ol3ptr from ol3len, ol2len. + * a [E(32):E(16):OL3(8):OL2(8)] + * a = a + (a << 8) + * a [E(32):E(16):(OL3+OL2):OL2] + * => E(32):E(16)::OL4PTR(8):OL3PTR(8) + */ + senddesc01_w1 = vaddq_u8(senddesc01_w1, + vshlq_n_u16(senddesc01_w1, 8)); + senddesc23_w1 = vaddq_u8(senddesc23_w1, + vshlq_n_u16(senddesc23_w1, 8)); + + /* Create second half of 4W cmd for 4 mbufs (sgdesc) */ + cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1); + cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1); + cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1); + cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1); + + xmask01 = vdupq_n_u64(0); + xmask23 = xmask01; + asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory"); + + asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory"); + + asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory"); + + asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory"); + xmask01 = vshlq_n_u64(xmask01, 20); + xmask23 = vshlq_n_u64(xmask23, 20); + + senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01); + senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23); + /* Move ltypes to senddesc*_w1 */ + senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01); + senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23); + + /* Create first half of 4W cmd for 4 mbufs (sendhdr) */ + cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1); + cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1); + cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1); + cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1); + + } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) && + (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) { + /* Lookup table to translate ol_flags to + * ol4type, ol3type, il4type, il3type of senddesc_w1 + */ + const uint8x16x2_t tbl = { + { + { + /* [0-15] = il4type:il3type */ + 0x04, /* none (IPv6) */ + 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */ + 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */ + 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */ + 0x03, /* PKT_TX_IP_CKSUM */ + 0x13, /* PKT_TX_IP_CKSUM | + * PKT_TX_TCP_CKSUM + */ + 0x23, /* PKT_TX_IP_CKSUM | + * PKT_TX_SCTP_CKSUM + */ + 0x33, /* PKT_TX_IP_CKSUM | + * PKT_TX_UDP_CKSUM + */ + 0x02, /* PKT_TX_IPV4 */ + 0x12, /* PKT_TX_IPV4 | + * PKT_TX_TCP_CKSUM + */ + 0x22, /* PKT_TX_IPV4 | + * PKT_TX_SCTP_CKSUM + */ + 0x32, /* PKT_TX_IPV4 | + * PKT_TX_UDP_CKSUM + */ + 0x03, /* PKT_TX_IPV4 | + * PKT_TX_IP_CKSUM + */ + 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_TCP_CKSUM + */ + 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_SCTP_CKSUM + */ + 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM | + * PKT_TX_UDP_CKSUM + */ + }, + + { + /* [16-31] = ol4type:ol3type */ + 0x00, /* none */ + 0x03, /* OUTER_IP_CKSUM */ + 0x02, /* OUTER_IPV4 */ + 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */ + 0x04, /* OUTER_IPV6 */ + 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */ + 0x00, /* OUTER_IPV6 | OUTER_IPV4 */ + 0x00, /* OUTER_IPV6 | OUTER_IPV4 | + * OUTER_IP_CKSUM + */ + 0x00, /* OUTER_UDP_CKSUM */ + 0x33, /* OUTER_UDP_CKSUM | + * OUTER_IP_CKSUM + */ + 0x32, /* OUTER_UDP_CKSUM | + * OUTER_IPV4 + */ + 0x33, /* OUTER_UDP_CKSUM | + * OUTER_IPV4 | OUTER_IP_CKSUM + */ + 0x34, /* OUTER_UDP_CKSUM | + * OUTER_IPV6 + */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IP_CKSUM + */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IPV4 + */ + 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 | + * OUTER_IPV4 | OUTER_IP_CKSUM + */ + }, + } + }; + + /* Extract olflags to translate to oltype & iltype */ + xtmp128 = vzip1q_u64(len_olflags0, len_olflags1); + ytmp128 = vzip1q_u64(len_olflags2, len_olflags3); + + /* + * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z) + * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z) + */ + const uint32x4_t tshft_4 = { + 1, 0, + 1, 0, + }; + senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4); + senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4); + + /* + * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z) + * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z) + */ + const uint8x16_t shuf_mask5 = { + 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF, + 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF, + }; + senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5); + senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5); + + /* Extract outer and inner header ol_flags */ + const uint64x2_t oi_cksum_mask = { + 0x1CF0020000000000, + 0x1CF0020000000000, + }; + + xtmp128 = vandq_u64(xtmp128, oi_cksum_mask); + ytmp128 = vandq_u64(ytmp128, oi_cksum_mask); + + /* Extract OUTER_UDP_CKSUM bit 41 and + * move it to bit 61 + */ + + xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20); + ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20); + + /* Shift right oltype by 2 and iltype by 4 + * to start oltype nibble from BIT(58) + * instead of BIT(56) and iltype nibble from BIT(48) + * instead of BIT(52). + */ + const int8x16_t tshft5 = { + 8, 8, 8, 8, 8, 8, -4, -2, + 8, 8, 8, 8, 8, 8, -4, -2, + }; + + xtmp128 = vshlq_u8(xtmp128, tshft5); + ytmp128 = vshlq_u8(ytmp128, tshft5); + /* + * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8) + * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8) + */ + const int8x16_t tshft3 = { + -1, 0, -1, 0, 0, 0, 0, 0, + -1, 0, -1, 0, 0, 0, 0, 0, + }; + + senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3); + senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3); + + /* Mark Bit(4) of oltype */ + const uint64x2_t oi_cksum_mask2 = { + 0x1000000000000000, + 0x1000000000000000, + }; + + xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2); + ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2); + + /* Do the lookup */ + ltypes01 = vqtbl2q_u8(tbl, xtmp128); + ltypes23 = vqtbl2q_u8(tbl, ytmp128); + + /* Just use ld1q to retrieve aura + * when we don't need tx_offload + */ + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mempool, pool_id)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mempool, pool_id)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mempool, pool_id)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mempool, pool_id)); + + /* Pick only relevant fields i.e Bit 48:55 of iltype and + * Bit 56:63 of oltype and place it in corresponding + * place in senddesc_w1. + */ + const uint8x16_t shuf_mask0 = { + 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF, + }; + + ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0); + ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0); + + /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from + * l3len, l2len, ol3len, ol2len. + * a [E(32):L3(8):L2(8):OL3(8):OL2(8)] + * a = a + (a << 8) + * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2] + * a = a + (a << 16) + * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2] + * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8) + */ + senddesc01_w1 = vaddq_u8(senddesc01_w1, + vshlq_n_u32(senddesc01_w1, 8)); + senddesc23_w1 = vaddq_u8(senddesc23_w1, + vshlq_n_u32(senddesc23_w1, 8)); + + /* Create second half of 4W cmd for 4 mbufs (sgdesc) */ + cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1); + cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1); + cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1); + cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1); + + /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */ + senddesc01_w1 = vaddq_u8(senddesc01_w1, + vshlq_n_u32(senddesc01_w1, 16)); + senddesc23_w1 = vaddq_u8(senddesc23_w1, + vshlq_n_u32(senddesc23_w1, 16)); + + xmask01 = vdupq_n_u64(0); + xmask23 = xmask01; + asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory"); + + asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory"); + + asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory"); + + asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory"); + xmask01 = vshlq_n_u64(xmask01, 20); + xmask23 = vshlq_n_u64(xmask23, 20); + + senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01); + senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23); + /* Move ltypes to senddesc*_w1 */ + senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01); + senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23); + + /* Create first half of 4W cmd for 4 mbufs (sendhdr) */ + cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1); + cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1); + cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1); + cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1); + } else { + /* Just use ld1q to retrieve aura + * when we don't need tx_offload + */ + mbuf0 = (uint64_t *)((uintptr_t)mbuf0 + + offsetof(struct rte_mempool, pool_id)); + mbuf1 = (uint64_t *)((uintptr_t)mbuf1 + + offsetof(struct rte_mempool, pool_id)); + mbuf2 = (uint64_t *)((uintptr_t)mbuf2 + + offsetof(struct rte_mempool, pool_id)); + mbuf3 = (uint64_t *)((uintptr_t)mbuf3 + + offsetof(struct rte_mempool, pool_id)); + xmask01 = vdupq_n_u64(0); + xmask23 = xmask01; + asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory"); + + asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" : + [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory"); + + asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory"); + + asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" : + [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory"); + xmask01 = vshlq_n_u64(xmask01, 20); + xmask23 = vshlq_n_u64(xmask23, 20); + + senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01); + senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23); + + /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */ + cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1); + cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1); + cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1); + cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1); + cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1); + cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1); + cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1); + cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1); + } + + do { + vst1q_u64(lmt_addr, cmd00); + vst1q_u64(lmt_addr + 2, cmd01); + vst1q_u64(lmt_addr + 4, cmd10); + vst1q_u64(lmt_addr + 6, cmd11); + vst1q_u64(lmt_addr + 8, cmd20); + vst1q_u64(lmt_addr + 10, cmd21); + vst1q_u64(lmt_addr + 12, cmd30); + vst1q_u64(lmt_addr + 14, cmd31); + lmt_status = otx2_lmt_submit(io_addr); + + } while (lmt_status == 0); + tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP; + } + + if (unlikely(pkts_left)) + pkts += nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd, flags); + + return pkts; +} + +#else +static __rte_always_inline uint16_t +nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t pkts, uint64_t *cmd, const uint16_t flags) +{ + RTE_SET_USED(tx_queue); + RTE_SET_USED(tx_pkts); + RTE_SET_USED(pkts); + RTE_SET_USED(cmd); + RTE_SET_USED(flags); + return 0; +} +#endif + +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_xmit_pkts_ ## name(void *tx_queue, \ + struct rte_mbuf **tx_pkts, uint16_t pkts) \ +{ \ + uint64_t cmd[sz]; \ + \ + /* For TSO inner checksum is a must */ \ + if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \ + !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \ + return 0; \ + return nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags); \ +} + +NIX_TX_FASTPATH_MODES +#undef T + +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \ + struct rte_mbuf **tx_pkts, uint16_t pkts) \ +{ \ + uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \ + \ + /* For TSO inner checksum is a must */ \ + if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \ + !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \ + return 0; \ + return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \ + (flags) | NIX_TX_MULTI_SEG_F); \ +} + +NIX_TX_FASTPATH_MODES +#undef T + +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __rte_hot \ +otx2_nix_xmit_pkts_vec_ ## name(void *tx_queue, \ + struct rte_mbuf **tx_pkts, uint16_t pkts) \ +{ \ + uint64_t cmd[sz]; \ + \ + /* VLAN, TSTMP, TSO is not supported by vec */ \ + if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \ + (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \ + (flags) & NIX_TX_OFFLOAD_TSO_F) \ + return 0; \ + return nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd, (flags)); \ +} + +NIX_TX_FASTPATH_MODES +#undef T + +static inline void +pick_tx_func(struct rte_eth_dev *eth_dev, + const eth_tx_burst_t tx_burst[2][2][2][2][2][2][2]) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + /* [SEC] [TSTMP] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */ + eth_dev->tx_pkt_burst = tx_burst + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_SECURITY_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] + [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; +} + +void +otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + + const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name, + +NIX_TX_FASTPATH_MODES +#undef T + }; + + const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name, + +NIX_TX_FASTPATH_MODES +#undef T + }; + + const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_vec_ ## name, + +NIX_TX_FASTPATH_MODES +#undef T + }; + + if (dev->scalar_ena || + (dev->tx_offload_flags & + (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | + NIX_TX_OFFLOAD_TSO_F))) + pick_tx_func(eth_dev, nix_eth_tx_burst); + else + pick_tx_func(eth_dev, nix_eth_tx_vec_burst); + + if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + pick_tx_func(eth_dev, nix_eth_tx_burst_mseg); + + rte_mb(); +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h new file mode 100644 index 000000000..3c4317092 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h @@ -0,0 +1,744 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_TX_H__ +#define __OTX2_TX_H__ + +#define NIX_TX_OFFLOAD_NONE (0) +#define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0) +#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1) +#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2) +#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3) +#define NIX_TX_OFFLOAD_TSTAMP_F BIT(4) +#define NIX_TX_OFFLOAD_TSO_F BIT(5) +#define NIX_TX_OFFLOAD_SECURITY_F BIT(6) + +/* Flags to control xmit_prepare function. + * Defining it from backwards to denote its been + * not used as offload flags to pick function + */ +#define NIX_TX_MULTI_SEG_F BIT(15) + +#define NIX_TX_NEED_SEND_HDR_W1 \ + (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \ + NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F) + +#define NIX_TX_NEED_EXT_HDR \ + (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | \ + NIX_TX_OFFLOAD_TSO_F) + +#define NIX_UDP_TUN_BITMASK \ + ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \ + (1ull << (PKT_TX_TUNNEL_GENEVE >> 45))) + +#define NIX_LSO_FORMAT_IDX_TSOV4 (0) +#define NIX_LSO_FORMAT_IDX_TSOV6 (1) + +/* Function to determine no of tx subdesc required in case ext + * sub desc is enabled. + */ +static __rte_always_inline int +otx2_nix_tx_ext_subs(const uint16_t flags) +{ + return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 2 : + ((flags & (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? + 1 : 0); +} + +static __rte_always_inline void +otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc, + const uint64_t ol_flags, const uint16_t no_segdw, + const uint16_t flags) +{ + if (flags & NIX_TX_OFFLOAD_TSTAMP_F) { + struct nix_send_mem_s *send_mem; + uint16_t off = (no_segdw - 1) << 1; + const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST); + + send_mem = (struct nix_send_mem_s *)(cmd + off); + if (flags & NIX_TX_MULTI_SEG_F) { + /* Retrieving the default desc values */ + cmd[off] = send_mem_desc[6]; + + /* Using compiler barier to avoid voilation of C + * aliasing rules. + */ + rte_compiler_barrier(); + } + + /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp + * should not be recorded, hence changing the alg type to + * NIX_SENDMEMALG_SET and also changing send mem addr field to + * next 8 bytes as it corrpt the actual tx tstamp registered + * address. + */ + send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp); + + send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] + + (is_ol_tstamp)); + } +} + +static __rte_always_inline uint64_t +otx2_pktmbuf_detach(struct rte_mbuf *m) +{ + struct rte_mempool *mp = m->pool; + uint32_t mbuf_size, buf_len; + struct rte_mbuf *md; + uint16_t priv_size; + uint16_t refcount; + + /* Update refcount of direct mbuf */ + md = rte_mbuf_from_indirect(m); + refcount = rte_mbuf_refcnt_update(md, -1); + + priv_size = rte_pktmbuf_priv_size(mp); + mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); + buf_len = rte_pktmbuf_data_room_size(mp); + + m->priv_size = priv_size; + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m->buf_len = (uint16_t)buf_len; + rte_pktmbuf_reset_headroom(m); + m->data_len = 0; + m->ol_flags = 0; + m->next = NULL; + m->nb_segs = 1; + + /* Now indirect mbuf is safe to free */ + rte_pktmbuf_free(m); + + if (refcount == 0) { + rte_mbuf_refcnt_set(md, 1); + md->data_len = 0; + md->ol_flags = 0; + md->next = NULL; + md->nb_segs = 1; + return 0; + } else { + return 1; + } +} + +static __rte_always_inline uint64_t +otx2_nix_prefree_seg(struct rte_mbuf *m) +{ + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + if (!RTE_MBUF_DIRECT(m)) + return otx2_pktmbuf_detach(m); + + m->next = NULL; + m->nb_segs = 1; + return 0; + } else if (rte_mbuf_refcnt_update(m, -1) == 0) { + if (!RTE_MBUF_DIRECT(m)) + return otx2_pktmbuf_detach(m); + + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; + m->nb_segs = 1; + return 0; + } + + /* Mbuf is having refcount more than 1 so need not to be freed */ + return 1; +} + +static __rte_always_inline void +otx2_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) +{ + uint64_t mask, ol_flags = m->ol_flags; + + if (flags & NIX_TX_OFFLOAD_TSO_F && + (ol_flags & PKT_TX_TCP_SEG)) { + uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t); + uint16_t *iplen, *oiplen, *oudplen; + uint16_t lso_sb, paylen; + + mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)); + lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) + + m->l2_len + m->l3_len + m->l4_len; + + /* Reduce payload len from base headers */ + paylen = m->pkt_len - lso_sb; + + /* Get iplen position assuming no tunnel hdr */ + iplen = (uint16_t *)(mdata + m->l2_len + + (2 << !!(ol_flags & PKT_TX_IPV6))); + /* Handle tunnel tso */ + if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) && + (ol_flags & PKT_TX_TUNNEL_MASK)) { + const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >> + ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1; + + oiplen = (uint16_t *)(mdata + m->outer_l2_len + + (2 << !!(ol_flags & PKT_TX_OUTER_IPV6))); + *oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) - + paylen); + + /* Update format for UDP tunneled packet */ + if (is_udp_tun) { + oudplen = (uint16_t *)(mdata + m->outer_l2_len + + m->outer_l3_len + 4); + *oudplen = + rte_cpu_to_be_16(rte_be_to_cpu_16(*oudplen) - + paylen); + } + + /* Update iplen position to inner ip hdr */ + iplen = (uint16_t *)(mdata + lso_sb - m->l3_len - + m->l4_len + (2 << !!(ol_flags & PKT_TX_IPV6))); + } + + *iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen); + } +} + +static __rte_always_inline void +otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) +{ + struct nix_send_ext_s *send_hdr_ext; + struct nix_send_hdr_s *send_hdr; + uint64_t ol_flags = 0, mask; + union nix_send_hdr_w1_u w1; + union nix_send_sg_s *sg; + + send_hdr = (struct nix_send_hdr_s *)cmd; + if (flags & NIX_TX_NEED_EXT_HDR) { + send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2); + sg = (union nix_send_sg_s *)(cmd + 4); + /* Clear previous markings */ + send_hdr_ext->w0.lso = 0; + send_hdr_ext->w1.u = 0; + } else { + sg = (union nix_send_sg_s *)(cmd + 2); + } + + if (flags & NIX_TX_NEED_SEND_HDR_W1) { + ol_flags = m->ol_flags; + w1.u = 0; + } + + if (!(flags & NIX_TX_MULTI_SEG_F)) { + send_hdr->w0.total = m->data_len; + send_hdr->w0.aura = + npa_lf_aura_handle_to_aura(m->pool->pool_id); + } + + /* + * L3type: 2 => IPV4 + * 3 => IPV4 with csum + * 4 => IPV6 + * L3type and L3ptr needs to be set for either + * L3 csum or L4 csum or LSO + * + */ + + if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) && + (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) { + const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM); + const uint8_t ol3type = + ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) + + ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) + + !!(ol_flags & PKT_TX_OUTER_IP_CKSUM); + + /* Outer L3 */ + w1.ol3type = ol3type; + mask = 0xffffull << ((!!ol3type) << 4); + w1.ol3ptr = ~mask & m->outer_l2_len; + w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len); + + /* Outer L4 */ + w1.ol4type = csum + (csum << 1); + + /* Inner L3 */ + w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) + + ((!!(ol_flags & PKT_TX_IPV6)) << 2); + w1.il3ptr = w1.ol4ptr + m->l2_len; + w1.il4ptr = w1.il3ptr + m->l3_len; + /* Increment it by 1 if it is IPV4 as 3 is with csum */ + w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM); + + /* Inner L4 */ + w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52; + + /* In case of no tunnel header use only + * shift IL3/IL4 fields a bit to use + * OL3/OL4 for header checksum + */ + mask = !ol3type; + w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) | + ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4)); + + } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) { + const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM); + const uint8_t outer_l2_len = m->outer_l2_len; + + /* Outer L3 */ + w1.ol3ptr = outer_l2_len; + w1.ol4ptr = outer_l2_len + m->outer_l3_len; + /* Increment it by 1 if it is IPV4 as 3 is with csum */ + w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) + + ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) + + !!(ol_flags & PKT_TX_OUTER_IP_CKSUM); + + /* Outer L4 */ + w1.ol4type = csum + (csum << 1); + + } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) { + const uint8_t l2_len = m->l2_len; + + /* Always use OLXPTR and OLXTYPE when only + * when one header is present + */ + + /* Inner L3 */ + w1.ol3ptr = l2_len; + w1.ol4ptr = l2_len + m->l3_len; + /* Increment it by 1 if it is IPV4 as 3 is with csum */ + w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) + + ((!!(ol_flags & PKT_TX_IPV6)) << 2) + + !!(ol_flags & PKT_TX_IP_CKSUM); + + /* Inner L4 */ + w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52; + } + + if (flags & NIX_TX_NEED_EXT_HDR && + flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) { + send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN); + /* HW will update ptr after vlan0 update */ + send_hdr_ext->w1.vlan1_ins_ptr = 12; + send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci; + + send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ); + /* 2B before end of l2 header */ + send_hdr_ext->w1.vlan0_ins_ptr = 12; + send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer; + } + + if (flags & NIX_TX_OFFLOAD_TSO_F && + (ol_flags & PKT_TX_TCP_SEG)) { + uint16_t lso_sb; + uint64_t mask; + + mask = -(!w1.il3type); + lso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len; + + send_hdr_ext->w0.lso_sb = lso_sb; + send_hdr_ext->w0.lso = 1; + send_hdr_ext->w0.lso_mps = m->tso_segsz; + send_hdr_ext->w0.lso_format = + NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6); + w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM; + + /* Handle tunnel tso */ + if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) && + (ol_flags & PKT_TX_TUNNEL_MASK)) { + const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >> + ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1; + + w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM; + w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0; + /* Update format for UDP tunneled packet */ + send_hdr_ext->w0.lso_format += is_udp_tun ? 2 : 6; + + send_hdr_ext->w0.lso_format += + !!(ol_flags & PKT_TX_OUTER_IPV6) << 1; + } + } + + if (flags & NIX_TX_NEED_SEND_HDR_W1) + send_hdr->w1.u = w1.u; + + if (!(flags & NIX_TX_MULTI_SEG_F)) { + sg->seg1_size = m->data_len; + *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m); + + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + /* DF bit = 1 if refcount of current mbuf or parent mbuf + * is greater than 1 + * DF bit = 0 otherwise + */ + send_hdr->w0.df = otx2_nix_prefree_seg(m); + } + /* Mark mempool object as "put" since it is freed by NIX */ + if (!send_hdr->w0.df) + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + } +} + + +static __rte_always_inline void +otx2_nix_xmit_one(uint64_t *cmd, void *lmt_addr, + const rte_iova_t io_addr, const uint32_t flags) +{ + uint64_t lmt_status; + + do { + otx2_lmt_mov(lmt_addr, cmd, otx2_nix_tx_ext_subs(flags)); + lmt_status = otx2_lmt_submit(io_addr); + } while (lmt_status == 0); +} + +static __rte_always_inline uint16_t +otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) +{ + struct nix_send_hdr_s *send_hdr; + union nix_send_sg_s *sg; + struct rte_mbuf *m_next; + uint64_t *slist, sg_u; + uint64_t nb_segs; + uint64_t segdw; + uint8_t off, i; + + send_hdr = (struct nix_send_hdr_s *)cmd; + send_hdr->w0.total = m->pkt_len; + send_hdr->w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id); + + if (flags & NIX_TX_NEED_EXT_HDR) + off = 2; + else + off = 0; + + sg = (union nix_send_sg_s *)&cmd[2 + off]; + /* Clear sg->u header before use */ + sg->u &= 0xFC00000000000000; + sg_u = sg->u; + slist = &cmd[3 + off]; + + i = 0; + nb_segs = m->nb_segs; + + /* Fill mbuf segments */ + do { + m_next = m->next; + sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); + *slist = rte_mbuf_data_iova(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) + sg_u |= (otx2_nix_prefree_seg(m) << (i + 55)); + /* Mark mempool object as "put" since it is freed by NIX */ + if (!(sg_u & (1ULL << (i + 55)))) { + m->next = NULL; + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + } + slist++; + i++; + nb_segs--; + if (i > 2 && nb_segs) { + i = 0; + /* Next SG subdesc */ + *(uint64_t *)slist = sg_u & 0xFC00000000000000; + sg->u = sg_u; + sg->segs = 3; + sg = (union nix_send_sg_s *)slist; + sg_u = sg->u; + slist++; + } + m = m_next; + } while (nb_segs); + + sg->u = sg_u; + sg->segs = i; + segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off]; + /* Roundup extra dwords to multiple of 2 */ + segdw = (segdw >> 1) + (segdw & 0x1); + /* Default dwords */ + segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + send_hdr->w0.sizem1 = segdw - 1; + + return segdw; +} + +static __rte_always_inline void +otx2_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, + rte_iova_t io_addr, uint16_t segdw) +{ + uint64_t lmt_status; + + do { + otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw); + lmt_status = otx2_lmt_submit(io_addr); + } while (lmt_status == 0); +} + +#define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F +#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F +#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F +#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F +#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F +#define TSO_F NIX_TX_OFFLOAD_TSO_F +#define TX_SEC_F NIX_TX_OFFLOAD_SECURITY_F + +/* [SEC] [TSO] [TSTMP] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */ +#define NIX_TX_FASTPATH_MODES \ +T(no_offload, 0, 0, 0, 0, 0, 0, 0, 4, \ + NIX_TX_OFFLOAD_NONE) \ +T(l3l4csum, 0, 0, 0, 0, 0, 0, 1, 4, \ + L3L4CSUM_F) \ +T(ol3ol4csum, 0, 0, 0, 0, 0, 1, 0, 4, \ + OL3OL4CSUM_F) \ +T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 0, 1, 1, 4, \ + OL3OL4CSUM_F | L3L4CSUM_F) \ +T(vlan, 0, 0, 0, 0, 1, 0, 0, 6, \ + VLAN_F) \ +T(vlan_l3l4csum, 0, 0, 0, 0, 1, 0, 1, 6, \ + VLAN_F | L3L4CSUM_F) \ +T(vlan_ol3ol4csum, 0, 0, 0, 0, 1, 1, 0, 6, \ + VLAN_F | OL3OL4CSUM_F) \ +T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 1, 6, \ + VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(noff, 0, 0, 0, 1, 0, 0, 0, 4, \ + NOFF_F) \ +T(noff_l3l4csum, 0, 0, 0, 1, 0, 0, 1, 4, \ + NOFF_F | L3L4CSUM_F) \ +T(noff_ol3ol4csum, 0, 0, 0, 1, 0, 1, 0, 4, \ + NOFF_F | OL3OL4CSUM_F) \ +T(noff_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 0, 1, 1, 4, \ + NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(noff_vlan, 0, 0, 0, 1, 1, 0, 0, 6, \ + NOFF_F | VLAN_F) \ +T(noff_vlan_l3l4csum, 0, 0, 0, 1, 1, 0, 1, 6, \ + NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(noff_vlan_ol3ol4csum, 0, 0, 0, 1, 1, 1, 0, 6, \ + NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 1, 6, \ + NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(ts, 0, 0, 1, 0, 0, 0, 0, 8, \ + TSP_F) \ +T(ts_l3l4csum, 0, 0, 1, 0, 0, 0, 1, 8, \ + TSP_F | L3L4CSUM_F) \ +T(ts_ol3ol4csum, 0, 0, 1, 0, 0, 1, 0, 8, \ + TSP_F | OL3OL4CSUM_F) \ +T(ts_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 0, 1, 1, 8, \ + TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(ts_vlan, 0, 0, 1, 0, 1, 0, 0, 8, \ + TSP_F | VLAN_F) \ +T(ts_vlan_l3l4csum, 0, 0, 1, 0, 1, 0, 1, 8, \ + TSP_F | VLAN_F | L3L4CSUM_F) \ +T(ts_vlan_ol3ol4csum, 0, 0, 1, 0, 1, 1, 0, 8, \ + TSP_F | VLAN_F | OL3OL4CSUM_F) \ +T(ts_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 1, 8, \ + TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(ts_noff, 0, 0, 1, 1, 0, 0, 0, 8, \ + TSP_F | NOFF_F) \ +T(ts_noff_l3l4csum, 0, 0, 1, 1, 0, 0, 1, 8, \ + TSP_F | NOFF_F | L3L4CSUM_F) \ +T(ts_noff_ol3ol4csum, 0, 0, 1, 1, 0, 1, 0, 8, \ + TSP_F | NOFF_F | OL3OL4CSUM_F) \ +T(ts_noff_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 0, 1, 1, 8, \ + TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(ts_noff_vlan, 0, 0, 1, 1, 1, 0, 0, 8, \ + TSP_F | NOFF_F | VLAN_F) \ +T(ts_noff_vlan_l3l4csum, 0, 0, 1, 1, 1, 0, 1, 8, \ + TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(ts_noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 1, 0, 8, \ + TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(ts_noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 1, 8, \ + TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ + \ +T(tso, 0, 1, 0, 0, 0, 0, 0, 6, \ + TSO_F) \ +T(tso_l3l4csum, 0, 1, 0, 0, 0, 0, 1, 6, \ + TSO_F | L3L4CSUM_F) \ +T(tso_ol3ol4csum, 0, 1, 0, 0, 0, 1, 0, 6, \ + TSO_F | OL3OL4CSUM_F) \ +T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 0, 1, 1, 6, \ + TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_vlan, 0, 1, 0, 0, 1, 0, 0, 6, \ + TSO_F | VLAN_F) \ +T(tso_vlan_l3l4csum, 0, 1, 0, 0, 1, 0, 1, 6, \ + TSO_F | VLAN_F | L3L4CSUM_F) \ +T(tso_vlan_ol3ol4csum, 0, 1, 0, 0, 1, 1, 0, 6, \ + TSO_F | VLAN_F | OL3OL4CSUM_F) \ +T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 1, 6, \ + TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_noff, 0, 1, 0, 1, 0, 0, 0, 6, \ + TSO_F | NOFF_F) \ +T(tso_noff_l3l4csum, 0, 1, 0, 1, 0, 0, 1, 6, \ + TSO_F | NOFF_F | L3L4CSUM_F) \ +T(tso_noff_ol3ol4csum, 0, 1, 0, 1, 0, 1, 0, 6, \ + TSO_F | NOFF_F | OL3OL4CSUM_F) \ +T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 0, 1, 1, 6, \ + TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_noff_vlan, 0, 1, 0, 1, 1, 0, 0, 6, \ + TSO_F | NOFF_F | VLAN_F) \ +T(tso_noff_vlan_l3l4csum, 0, 1, 0, 1, 1, 0, 1, 6, \ + TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(tso_noff_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 1, 0, 6, \ + TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 1, 6, \ + TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_ts, 0, 1, 1, 0, 0, 0, 0, 8, \ + TSO_F | TSP_F) \ +T(tso_ts_l3l4csum, 0, 1, 1, 0, 0, 0, 1, 8, \ + TSO_F | TSP_F | L3L4CSUM_F) \ +T(tso_ts_ol3ol4csum, 0, 1, 1, 0, 0, 1, 0, 8, \ + TSO_F | TSP_F | OL3OL4CSUM_F) \ +T(tso_ts_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 0, 1, 1, 8, \ + TSO_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_ts_vlan, 0, 1, 1, 0, 1, 0, 0, 8, \ + TSO_F | TSP_F | VLAN_F) \ +T(tso_ts_vlan_l3l4csum, 0, 1, 1, 0, 1, 0, 1, 8, \ + TSO_F | TSP_F | VLAN_F | L3L4CSUM_F) \ +T(tso_ts_vlan_ol3ol4csum, 0, 1, 1, 0, 1, 1, 0, 8, \ + TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \ +T(tso_ts_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 1, 8, \ + TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_ts_noff, 0, 1, 1, 1, 0, 0, 0, 8, \ + TSO_F | TSP_F | NOFF_F) \ +T(tso_ts_noff_l3l4csum, 0, 1, 1, 1, 0, 0, 1, 8, \ + TSO_F | TSP_F | NOFF_F | L3L4CSUM_F) \ +T(tso_ts_noff_ol3ol4csum, 0, 1, 1, 1, 0, 1, 0, 8, \ + TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \ +T(tso_ts_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 0, 1, 1, 8, \ + TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(tso_ts_noff_vlan, 0, 1, 1, 1, 1, 0, 0, 8, \ + TSO_F | TSP_F | NOFF_F | VLAN_F) \ +T(tso_ts_noff_vlan_l3l4csum, 0, 1, 1, 1, 1, 0, 1, 8, \ + TSO_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(tso_ts_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 1, 0, 8, \ + TSO_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(tso_ts_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 1, 8, \ + TSO_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \ + L3L4CSUM_F) \ +T(sec, 1, 0, 0, 0, 0, 0, 0, 8, \ + TX_SEC_F) \ +T(sec_l3l4csum, 1, 0, 0, 0, 0, 0, 1, 8, \ + TX_SEC_F | L3L4CSUM_F) \ +T(sec_ol3ol4csum, 1, 0, 0, 0, 0, 1, 0, 8, \ + TX_SEC_F | OL3OL4CSUM_F) \ +T(sec_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 0, 1, 1, 8, \ + TX_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_vlan, 1, 0, 0, 0, 1, 0, 0, 8, \ + TX_SEC_F | VLAN_F) \ +T(sec_vlan_l3l4csum, 1, 0, 0, 0, 1, 0, 1, 8, \ + TX_SEC_F | VLAN_F | L3L4CSUM_F) \ +T(sec_vlan_ol3ol4csum, 1, 0, 0, 0, 1, 1, 0, 8, \ + TX_SEC_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 1, 8, \ + TX_SEC_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_noff, 1, 0, 0, 1, 0, 0, 0, 8, \ + TX_SEC_F | NOFF_F) \ +T(sec_noff_l3l4csum, 1, 0, 0, 1, 0, 0, 1, 8, \ + TX_SEC_F | NOFF_F | L3L4CSUM_F) \ +T(sec_noff_ol3ol4csum, 1, 0, 0, 1, 0, 1, 0, 8, \ + TX_SEC_F | NOFF_F | OL3OL4CSUM_F) \ +T(sec_noff_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 0, 1, 1, 8, \ + TX_SEC_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_noff_vlan, 1, 0, 0, 1, 1, 0, 0, 8, \ + TX_SEC_F | NOFF_F | VLAN_F) \ +T(sec_noff_vlan_l3l4csum, 1, 0, 0, 1, 1, 0, 1, 8, \ + TX_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(sec_noff_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 1, 0, 8, \ + TX_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 1, 8, \ + TX_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_ts, 1, 0, 1, 0, 0, 0, 0, 8, \ + TX_SEC_F | TSP_F) \ +T(sec_ts_l3l4csum, 1, 0, 1, 0, 0, 0, 1, 8, \ + TX_SEC_F | TSP_F | L3L4CSUM_F) \ +T(sec_ts_ol3ol4csum, 1, 0, 1, 0, 0, 1, 0, 8, \ + TX_SEC_F | TSP_F | OL3OL4CSUM_F) \ +T(sec_ts_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 0, 1, 1, 8, \ + TX_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_ts_vlan, 1, 0, 1, 0, 1, 0, 0, 8, \ + TX_SEC_F | TSP_F | VLAN_F) \ +T(sec_ts_vlan_l3l4csum, 1, 0, 1, 0, 1, 0, 1, 8, \ + TX_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F) \ +T(sec_ts_vlan_ol3ol4csum, 1, 0, 1, 0, 1, 1, 0, 8, \ + TX_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 1, 8, \ + TX_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_ts_noff, 1, 0, 1, 1, 0, 0, 0, 8, \ + TX_SEC_F | TSP_F | NOFF_F) \ +T(sec_ts_noff_l3l4csum, 1, 0, 1, 1, 0, 0, 1, 8, \ + TX_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F) \ +T(sec_ts_noff_ol3ol4csum, 1, 0, 1, 1, 0, 1, 0, 8, \ + TX_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \ +T(sec_ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 0, 1, 1, 8, \ + TX_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_ts_noff_vlan, 1, 0, 1, 1, 1, 0, 0, 8, \ + TX_SEC_F | TSP_F | NOFF_F | VLAN_F) \ +T(sec_ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 1, 0, 1, 8, \ + TX_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(sec_ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 1, 0, 8, \ + TX_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 1, 8, \ + TX_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \ + L3L4CSUM_F) \ +T(sec_tso, 1, 1, 0, 0, 0, 0, 0, 8, \ + TX_SEC_F | TSO_F) \ +T(sec_tso_l3l4csum, 1, 1, 0, 0, 0, 0, 1, 8, \ + TX_SEC_F | TSO_F | L3L4CSUM_F) \ +T(sec_tso_ol3ol4csum, 1, 1, 0, 0, 0, 1, 0, 8, \ + TX_SEC_F | TSO_F | OL3OL4CSUM_F) \ +T(sec_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 0, 1, 1, 8, \ + TX_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_tso_vlan, 1, 1, 0, 0, 1, 0, 0, 8, \ + TX_SEC_F | TSO_F | VLAN_F) \ +T(sec_tso_vlan_l3l4csum, 1, 1, 0, 0, 1, 0, 1, 8, \ + TX_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F) \ +T(sec_tso_vlan_ol3ol4csum, 1, 1, 0, 0, 1, 1, 0, 8, \ + TX_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 1, 8, \ + TX_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_tso_noff, 1, 1, 0, 1, 0, 0, 0, 8, \ + TX_SEC_F | TSO_F | NOFF_F) \ +T(sec_tso_noff_l3l4csum, 1, 1, 0, 1, 0, 0, 1, 8, \ + TX_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F) \ +T(sec_tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 1, 0, 8, \ + TX_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \ +T(sec_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 0, 1, 1, 8, \ + TX_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_tso_noff_vlan, 1, 1, 0, 1, 1, 0, 0, 8, \ + TX_SEC_F | TSO_F | NOFF_F | VLAN_F) \ +T(sec_tso_noff_vlan_l3l4csum, 1, 1, 0, 1, 1, 0, 1, 8, \ + TX_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \ +T(sec_tso_noff_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 1, 0, 8, \ + TX_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, \ + 1, 1, 0, 1, 1, 1, 1, 8, \ + TX_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \ + L3L4CSUM_F) \ +T(sec_tso_ts, 1, 1, 1, 0, 0, 0, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F) \ +T(sec_tso_ts_l3l4csum, 1, 1, 1, 0, 0, 0, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | L3L4CSUM_F) \ +T(sec_tso_ts_ol3ol4csum, 1, 1, 1, 0, 0, 1, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | OL3OL4CSUM_F) \ +T(sec_tso_ts_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 0, 1, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \ +T(sec_tso_ts_vlan, 1, 1, 1, 0, 1, 0, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | VLAN_F) \ +T(sec_tso_ts_vlan_l3l4csum, 1, 1, 1, 0, 1, 0, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | VLAN_F | L3L4CSUM_F) \ +T(sec_tso_ts_vlan_ol3ol4csum, 1, 1, 1, 0, 1, 1, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \ +T(sec_tso_ts_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F | \ + L3L4CSUM_F) \ +T(sec_tso_ts_noff, 1, 1, 1, 1, 0, 0, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F) \ +T(sec_tso_ts_noff_l3l4csum, 1, 1, 1, 1, 0, 0, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | L3L4CSUM_F) \ +T(sec_tso_ts_noff_ol3ol4csum, 1, 1, 1, 1, 0, 1, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \ +T(sec_tso_ts_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 0, 1, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F | \ + L3L4CSUM_F) \ +T(sec_tso_ts_noff_vlan, 1, 1, 1, 1, 1, 0, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F) \ +T(sec_tso_ts_noff_vlan_l3l4csum, 1, 1, 1, 1, 1, 0, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)\ +T(sec_tso_ts_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 1, 0, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | \ + OL3OL4CSUM_F) \ +T(sec_tso_ts_noff_vlan_ol3ol4csum_l3l4csum, \ + 1, 1, 1, 1, 1, 1, 1, 8, \ + TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | \ + OL3OL4CSUM_F | L3L4CSUM_F) +#endif /* __OTX2_TX_H__ */ diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c new file mode 100644 index 000000000..322a565b3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c @@ -0,0 +1,1040 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include + +#include "otx2_ethdev.h" +#include "otx2_flow.h" + + +#define VLAN_ID_MATCH 0x1 +#define VTAG_F_MATCH 0x2 +#define MAC_ADDR_MATCH 0x4 +#define QINQ_F_MATCH 0x8 +#define VLAN_DROP 0x10 +#define DEF_F_ENTRY 0x20 + +enum vtag_cfg_dir { + VTAG_TX, + VTAG_RX +}; + +static int +nix_vlan_mcam_enb_dis(struct otx2_eth_dev *dev, + uint32_t entry, const int enable) +{ + struct npc_mcam_ena_dis_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + int rc = -EINVAL; + + if (enable) + req = otx2_mbox_alloc_msg_npc_mcam_ena_entry(mbox); + else + req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox); + + req->entry = entry; + + rc = otx2_mbox_process_msg(mbox, NULL); + return rc; +} + +static void +nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev, + struct mcam_entry *entry, bool qinq, bool drop) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int pcifunc = otx2_pfvf_func(dev->pf, dev->vf); + uint64_t action = 0, vtag_action = 0; + + action = NIX_RX_ACTIONOP_UCAST; + + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + action = NIX_RX_ACTIONOP_RSS; + action |= (uint64_t)(dev->rss_info.alg_idx) << 56; + } + + action |= (uint64_t)pcifunc << 4; + entry->action = action; + + if (drop) { + entry->action &= ~((uint64_t)0xF); + entry->action |= NIX_RX_ACTIONOP_DROP; + return; + } + + if (!qinq) { + /* VTAG0 fields denote CTAG in single vlan case */ + vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15); + vtag_action |= (NPC_LID_LB << 8); + vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR; + } else { + /* VTAG0 & VTAG1 fields denote CTAG & STAG respectively */ + vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15); + vtag_action |= (NPC_LID_LB << 8); + vtag_action |= NIX_RX_VTAGACTION_VTAG1_RELPTR; + vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 47); + vtag_action |= ((uint64_t)(NPC_LID_LB) << 40); + vtag_action |= (NIX_RX_VTAGACTION_VTAG0_RELPTR << 32); + } + + entry->vtag_action = vtag_action; +} + +static void +nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type, + int vtag_index) +{ + union { + uint64_t reg; + struct nix_tx_vtag_action_s act; + } vtag_action; + + uint64_t action; + + action = NIX_TX_ACTIONOP_UCAST_DEFAULT; + + /* + * Take offset from LA since in case of untagged packet, + * lbptr is zero. + */ + if (type == ETH_VLAN_TYPE_OUTER) { + vtag_action.act.vtag0_def = vtag_index; + vtag_action.act.vtag0_lid = NPC_LID_LA; + vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT; + vtag_action.act.vtag0_relptr = NIX_TX_VTAGACTION_VTAG0_RELPTR; + } else { + vtag_action.act.vtag1_def = vtag_index; + vtag_action.act.vtag1_lid = NPC_LID_LA; + vtag_action.act.vtag1_op = NIX_TX_VTAGOP_INSERT; + vtag_action.act.vtag1_relptr = NIX_TX_VTAGACTION_VTAG1_RELPTR; + } + + entry->action = action; + entry->vtag_action = vtag_action.reg; +} + +static int +nix_vlan_mcam_free(struct otx2_eth_dev *dev, uint32_t entry) +{ + struct npc_mcam_free_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + int rc = -EINVAL; + + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox); + req->entry = entry; + + rc = otx2_mbox_process_msg(mbox, NULL); + return rc; +} + +static int +nix_vlan_mcam_write(struct rte_eth_dev *eth_dev, uint16_t ent_idx, + struct mcam_entry *entry, uint8_t intf, uint8_t ena) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct npc_mcam_write_entry_req *req; + struct otx2_mbox *mbox = dev->mbox; + struct msghdr *rsp; + int rc = -EINVAL; + + req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox); + + req->entry = ent_idx; + req->intf = intf; + req->enable_entry = ena; + memcpy(&req->entry_data, entry, sizeof(struct mcam_entry)); + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + return rc; +} + +static int +nix_vlan_mcam_alloc_and_write(struct rte_eth_dev *eth_dev, + struct mcam_entry *entry, + uint8_t intf, bool drop) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct npc_mcam_alloc_and_write_entry_req *req; + struct npc_mcam_alloc_and_write_entry_rsp *rsp; + struct otx2_mbox *mbox = dev->mbox; + int rc = -EINVAL; + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_and_write_entry(mbox); + + if (intf == NPC_MCAM_RX) { + if (!drop && dev->vlan_info.def_rx_mcam_idx) { + req->priority = NPC_MCAM_HIGHER_PRIO; + req->ref_entry = dev->vlan_info.def_rx_mcam_idx; + } else if (drop && dev->vlan_info.qinq_mcam_idx) { + req->priority = NPC_MCAM_LOWER_PRIO; + req->ref_entry = dev->vlan_info.qinq_mcam_idx; + } else { + req->priority = NPC_MCAM_ANY_PRIO; + req->ref_entry = 0; + } + } else { + req->priority = NPC_MCAM_ANY_PRIO; + req->ref_entry = 0; + } + + req->intf = intf; + req->enable_entry = 1; + memcpy(&req->entry_data, entry, sizeof(struct mcam_entry)); + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + return rsp->entry; +} + +static void +nix_vlan_update_mac(struct rte_eth_dev *eth_dev, int mcam_index, + int enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct vlan_mkex_info *mkex = &dev->vlan_info.mkex; + volatile uint8_t *key_data, *key_mask; + struct npc_mcam_read_entry_req *req; + struct npc_mcam_read_entry_rsp *rsp; + struct otx2_mbox *mbox = dev->mbox; + uint64_t mcam_data, mcam_mask; + struct mcam_entry entry; + uint8_t intf, mcam_ena; + int idx, rc = -EINVAL; + uint8_t *mac_addr; + + memset(&entry, 0, sizeof(struct mcam_entry)); + + /* Read entry first */ + req = otx2_mbox_alloc_msg_npc_mcam_read_entry(mbox); + + req->entry = mcam_index; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) { + otx2_err("Failed to read entry %d", mcam_index); + return; + } + + entry = rsp->entry_data; + intf = rsp->intf; + mcam_ena = rsp->enable; + + /* Update mcam address */ + key_data = (volatile uint8_t *)entry.kw; + key_mask = (volatile uint8_t *)entry.kw_mask; + + if (enable) { + mcam_mask = 0; + otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off, + &mcam_mask, mkex->la_xtract.len + 1); + + } else { + mcam_data = 0ULL; + mac_addr = dev->mac_addr; + for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--) + mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx); + + mcam_mask = BIT_ULL(48) - 1; + + otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off, + &mcam_data, mkex->la_xtract.len + 1); + otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off, + &mcam_mask, mkex->la_xtract.len + 1); + } + + /* Write back the mcam entry */ + rc = nix_vlan_mcam_write(eth_dev, mcam_index, + &entry, intf, mcam_ena); + if (rc) { + otx2_err("Failed to write entry %d", mcam_index); + return; + } +} + +void +otx2_nix_vlan_update_promisc(struct rte_eth_dev *eth_dev, int enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + struct vlan_entry *entry; + + /* Already in required mode */ + if (enable == vlan->promisc_on) + return; + + /* Update default rx entry */ + if (vlan->def_rx_mcam_idx) + nix_vlan_update_mac(eth_dev, vlan->def_rx_mcam_idx, enable); + + /* Update all other rx filter entries */ + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) + nix_vlan_update_mac(eth_dev, entry->mcam_idx, enable); + + vlan->promisc_on = enable; +} + +/* Configure mcam entry with required MCAM search rules */ +static int +nix_vlan_mcam_config(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, uint16_t flags) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct vlan_mkex_info *mkex = &dev->vlan_info.mkex; + volatile uint8_t *key_data, *key_mask; + uint64_t mcam_data, mcam_mask; + struct mcam_entry entry; + uint8_t *mac_addr; + int idx, kwi = 0; + + memset(&entry, 0, sizeof(struct mcam_entry)); + key_data = (volatile uint8_t *)entry.kw; + key_mask = (volatile uint8_t *)entry.kw_mask; + + /* Channel base extracted to KW0[11:0] */ + entry.kw[kwi] = dev->rx_chan_base; + entry.kw_mask[kwi] = BIT_ULL(12) - 1; + + /* Adds vlan_id & LB CTAG flag to MCAM KW */ + if (flags & VLAN_ID_MATCH) { + entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ) + << mkex->lb_lt_offset; + entry.kw_mask[kwi] |= + (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ)) + << mkex->lb_lt_offset; + + mcam_data = ((uint32_t)vlan_id << 16); + mcam_mask = (BIT_ULL(16) - 1) << 16; + otx2_mbox_memcpy(key_data + mkex->lb_xtract.key_off, + &mcam_data, mkex->lb_xtract.len + 1); + otx2_mbox_memcpy(key_mask + mkex->lb_xtract.key_off, + &mcam_mask, mkex->lb_xtract.len + 1); + } + + /* Adds LB STAG flag to MCAM KW */ + if (flags & QINQ_F_MATCH) { + entry.kw[kwi] |= NPC_LT_LB_STAG_QINQ << mkex->lb_lt_offset; + entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset; + } + + /* Adds LB CTAG & LB STAG flags to MCAM KW */ + if (flags & VTAG_F_MATCH) { + entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ) + << mkex->lb_lt_offset; + entry.kw_mask[kwi] |= + (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ)) + << mkex->lb_lt_offset; + } + + /* Adds port MAC address to MCAM KW */ + if (flags & MAC_ADDR_MATCH) { + mcam_data = 0ULL; + mac_addr = dev->mac_addr; + for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--) + mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx); + + mcam_mask = BIT_ULL(48) - 1; + otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off, + &mcam_data, mkex->la_xtract.len + 1); + otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off, + &mcam_mask, mkex->la_xtract.len + 1); + } + + /* VLAN_DROP: for drop action for all vlan packets when filter is on. + * For QinQ, enable vtag action for both outer & inner tags + */ + if (flags & VLAN_DROP) + nix_set_rx_vlan_action(eth_dev, &entry, false, true); + else if (flags & QINQ_F_MATCH) + nix_set_rx_vlan_action(eth_dev, &entry, true, false); + else + nix_set_rx_vlan_action(eth_dev, &entry, false, false); + + if (flags & DEF_F_ENTRY) + dev->vlan_info.def_rx_mcam_ent = entry; + + return nix_vlan_mcam_alloc_and_write(eth_dev, &entry, NIX_INTF_RX, + flags & VLAN_DROP); +} + +/* Installs/Removes/Modifies default rx entry */ +static int +nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip, + bool filter, bool enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + uint16_t flags = 0; + int mcam_idx, rc; + + /* Use default mcam entry to either drop vlan traffic when + * vlan filter is on or strip vtag when strip is enabled. + * Allocate default entry which matches port mac address + * and vtag(ctag/stag) flags with drop action. + */ + if (!vlan->def_rx_mcam_idx) { + if (!eth_dev->data->promiscuous) + flags = MAC_ADDR_MATCH; + + if (filter && enable) + flags |= VTAG_F_MATCH | VLAN_DROP; + else if (strip && enable) + flags |= VTAG_F_MATCH; + else + return 0; + + flags |= DEF_F_ENTRY; + + mcam_idx = nix_vlan_mcam_config(eth_dev, 0, flags); + if (mcam_idx < 0) { + otx2_err("Failed to config vlan mcam"); + return -mcam_idx; + } + + vlan->def_rx_mcam_idx = mcam_idx; + return 0; + } + + /* Filter is already enabled, so packets would be dropped anyways. No + * processing needed for enabling strip wrt mcam entry. + */ + + /* Filter disable request */ + if (vlan->filter_on && filter && !enable) { + vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF); + + /* Free default rx entry only when + * 1. strip is not on and + * 2. qinq entry is allocated before default entry. + */ + if (vlan->strip_on || + (vlan->qinq_on && !vlan->qinq_before_def)) { + if (eth_dev->data->dev_conf.rxmode.mq_mode == + ETH_MQ_RX_RSS) + vlan->def_rx_mcam_ent.action |= + NIX_RX_ACTIONOP_RSS; + else + vlan->def_rx_mcam_ent.action |= + NIX_RX_ACTIONOP_UCAST; + return nix_vlan_mcam_write(eth_dev, + vlan->def_rx_mcam_idx, + &vlan->def_rx_mcam_ent, + NIX_INTF_RX, 1); + } else { + rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx); + if (rc) + return rc; + vlan->def_rx_mcam_idx = 0; + } + } + + /* Filter enable request */ + if (!vlan->filter_on && filter && enable) { + vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF); + vlan->def_rx_mcam_ent.action |= NIX_RX_ACTIONOP_DROP; + return nix_vlan_mcam_write(eth_dev, vlan->def_rx_mcam_idx, + &vlan->def_rx_mcam_ent, NIX_INTF_RX, 1); + } + + /* Strip disable request */ + if (vlan->strip_on && strip && !enable) { + if (!vlan->filter_on && + !(vlan->qinq_on && !vlan->qinq_before_def)) { + rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx); + if (rc) + return rc; + vlan->def_rx_mcam_idx = 0; + } + } + + return 0; +} + +/* Installs/Removes default tx entry */ +static int +nix_vlan_handle_default_tx_entry(struct rte_eth_dev *eth_dev, + enum rte_vlan_type type, int vtag_index, + int enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + struct mcam_entry entry; + uint16_t pf_func; + int rc; + + if (!vlan->def_tx_mcam_idx && enable) { + memset(&entry, 0, sizeof(struct mcam_entry)); + + /* Only pf_func is matched, swap it's bytes */ + pf_func = (dev->pf_func & 0xff) << 8; + pf_func |= (dev->pf_func >> 8) & 0xff; + + /* PF Func extracted to KW1[47:32] */ + entry.kw[0] = (uint64_t)pf_func << 32; + entry.kw_mask[0] = (BIT_ULL(16) - 1) << 32; + + nix_set_tx_vlan_action(&entry, type, vtag_index); + vlan->def_tx_mcam_ent = entry; + + return nix_vlan_mcam_alloc_and_write(eth_dev, &entry, + NIX_INTF_TX, 0); + } + + if (vlan->def_tx_mcam_idx && !enable) { + rc = nix_vlan_mcam_free(dev, vlan->def_tx_mcam_idx); + if (rc) + return rc; + vlan->def_rx_mcam_idx = 0; + } + + return 0; +} + +/* Configure vlan stripping on or off */ +static int +nix_vlan_hw_strip(struct rte_eth_dev *eth_dev, const uint8_t enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_vtag_config *vtag_cfg; + int rc = -EINVAL; + + rc = nix_vlan_handle_default_rx_entry(eth_dev, true, false, enable); + if (rc) { + otx2_err("Failed to config default rx entry"); + return rc; + } + + vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox); + /* cfg_type = 1 for rx vlan cfg */ + vtag_cfg->cfg_type = VTAG_RX; + + if (enable) + vtag_cfg->rx.strip_vtag = 1; + else + vtag_cfg->rx.strip_vtag = 0; + + /* Always capture */ + vtag_cfg->rx.capture_vtag = 1; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + /* Use rx vtag type index[0] for now */ + vtag_cfg->rx.vtag_type = 0; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + dev->vlan_info.strip_on = enable; + return rc; +} + +/* Configure vlan filtering on or off for all vlans if vlan_id == 0 */ +static int +nix_vlan_hw_filter(struct rte_eth_dev *eth_dev, const uint8_t enable, + uint16_t vlan_id) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + struct vlan_entry *entry; + int rc = -EINVAL; + + if (!vlan_id && enable) { + rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, + enable); + if (rc) { + otx2_err("Failed to config vlan mcam"); + return rc; + } + dev->vlan_info.filter_on = enable; + return 0; + } + + /* Enable/disable existing vlan filter entries */ + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + if (vlan_id) { + if (entry->vlan_id == vlan_id) { + rc = nix_vlan_mcam_enb_dis(dev, + entry->mcam_idx, + enable); + if (rc) + return rc; + } + } else { + rc = nix_vlan_mcam_enb_dis(dev, entry->mcam_idx, + enable); + if (rc) + return rc; + } + } + + if (!vlan_id && !enable) { + rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, + enable); + if (rc) { + otx2_err("Failed to config vlan mcam"); + return rc; + } + dev->vlan_info.filter_on = enable; + return 0; + } + + return 0; +} + +/* Enable/disable vlan filtering for the given vlan_id */ +int +otx2_nix_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, + int on) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + struct vlan_entry *entry; + int entry_exists = 0; + int rc = -EINVAL; + int mcam_idx; + + if (!vlan_id) { + otx2_err("Vlan Id can't be zero"); + return rc; + } + + if (!vlan->def_rx_mcam_idx) { + otx2_err("Vlan Filtering is disabled, enable it first"); + return rc; + } + + if (on) { + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + if (entry->vlan_id == vlan_id) { + /* Vlan entry already exists */ + entry_exists = 1; + /* Mcam entry already allocated */ + if (entry->mcam_idx) { + rc = nix_vlan_hw_filter(eth_dev, on, + vlan_id); + return rc; + } + break; + } + } + + if (!entry_exists) { + entry = rte_zmalloc("otx2_nix_vlan_entry", + sizeof(struct vlan_entry), 0); + if (!entry) { + otx2_err("Failed to allocate memory"); + return -ENOMEM; + } + } + + /* Enables vlan_id & mac address based filtering */ + if (eth_dev->data->promiscuous) + mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id, + VLAN_ID_MATCH); + else + mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id, + VLAN_ID_MATCH | + MAC_ADDR_MATCH); + if (mcam_idx < 0) { + otx2_err("Failed to config vlan mcam"); + TAILQ_REMOVE(&vlan->fltr_tbl, entry, next); + rte_free(entry); + return mcam_idx; + } + + entry->mcam_idx = mcam_idx; + if (!entry_exists) { + entry->vlan_id = vlan_id; + TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next); + } + } else { + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + if (entry->vlan_id == vlan_id) { + rc = nix_vlan_mcam_free(dev, entry->mcam_idx); + if (rc) + return rc; + TAILQ_REMOVE(&vlan->fltr_tbl, entry, next); + rte_free(entry); + break; + } + } + } + return 0; +} + +/* Configure double vlan(qinq) on or off */ +static int +otx2_nix_config_double_vlan(struct rte_eth_dev *eth_dev, + const uint8_t enable) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan_info; + int mcam_idx; + int rc; + + vlan_info = &dev->vlan_info; + + if (!enable) { + if (!vlan_info->qinq_mcam_idx) + return 0; + + rc = nix_vlan_mcam_free(dev, vlan_info->qinq_mcam_idx); + if (rc) + return rc; + + vlan_info->qinq_mcam_idx = 0; + dev->vlan_info.qinq_on = 0; + vlan_info->qinq_before_def = 0; + return 0; + } + + if (eth_dev->data->promiscuous) + mcam_idx = nix_vlan_mcam_config(eth_dev, 0, QINQ_F_MATCH); + else + mcam_idx = nix_vlan_mcam_config(eth_dev, 0, + QINQ_F_MATCH | MAC_ADDR_MATCH); + if (mcam_idx < 0) + return mcam_idx; + + if (!vlan_info->def_rx_mcam_idx) + vlan_info->qinq_before_def = 1; + + vlan_info->qinq_mcam_idx = mcam_idx; + dev->vlan_info.qinq_on = 1; + return 0; +} + +int +otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t offloads = dev->rx_offloads; + struct rte_eth_rxmode *rxmode; + int rc = 0; + + rxmode = ð_dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_EXTEND_MASK) { + otx2_err("Extend offload not supported"); + return -ENOTSUP; + } + + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + rc = nix_vlan_hw_strip(eth_dev, true); + } else { + offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + rc = nix_vlan_hw_strip(eth_dev, false); + } + if (rc) + goto done; + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + rc = nix_vlan_hw_filter(eth_dev, true, 0); + } else { + offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + rc = nix_vlan_hw_filter(eth_dev, false, 0); + } + if (rc) + goto done; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) { + if (!dev->vlan_info.qinq_on) { + offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + rc = otx2_nix_config_double_vlan(eth_dev, true); + if (rc) + goto done; + } + } else { + if (dev->vlan_info.qinq_on) { + offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + rc = otx2_nix_config_double_vlan(eth_dev, false); + if (rc) + goto done; + } + } + + if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP)) { + dev->rx_offloads |= offloads; + dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F; + otx2_eth_set_rx_function(eth_dev); + } + +done: + return rc; +} + +int +otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev, + enum rte_vlan_type type, uint16_t tpid) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct nix_set_vlan_tpid *tpid_cfg; + struct otx2_mbox *mbox = dev->mbox; + int rc; + + tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox); + + tpid_cfg->tpid = tpid; + if (type == ETH_VLAN_TYPE_OUTER) + tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER; + else + tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER; + + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + if (type == ETH_VLAN_TYPE_OUTER) + dev->vlan_info.outer_vlan_tpid = tpid; + else + dev->vlan_info.inner_vlan_tpid = tpid; + return 0; +} + +int +otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct otx2_eth_dev *otx2_dev = otx2_eth_pmd_priv(dev); + struct otx2_mbox *mbox = otx2_dev->mbox; + struct nix_vtag_config *vtag_cfg; + struct nix_vtag_config_rsp *rsp; + struct otx2_vlan_info *vlan; + int rc, rc1, vtag_index = 0; + + if (vlan_id == 0) { + otx2_err("vlan id can't be zero"); + return -EINVAL; + } + + vlan = &otx2_dev->vlan_info; + + if (on && vlan->pvid_insert_on && vlan->pvid == vlan_id) { + otx2_err("pvid %d is already enabled", vlan_id); + return -EINVAL; + } + + if (on && vlan->pvid_insert_on && vlan->pvid != vlan_id) { + otx2_err("another pvid is enabled, disable that first"); + return -EINVAL; + } + + /* No pvid active */ + if (!on && !vlan->pvid_insert_on) + return 0; + + /* Given pvid already disabled */ + if (!on && vlan->pvid != vlan_id) + return 0; + + vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox); + + if (on) { + vtag_cfg->cfg_type = VTAG_TX; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + + if (vlan->outer_vlan_tpid) + vtag_cfg->tx.vtag0 = ((uint32_t)vlan->outer_vlan_tpid + << 16) | vlan_id; + else + vtag_cfg->tx.vtag0 = + ((RTE_ETHER_TYPE_VLAN << 16) | vlan_id); + vtag_cfg->tx.cfg_vtag0 = 1; + } else { + vtag_cfg->cfg_type = VTAG_TX; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + + vtag_cfg->tx.vtag0_idx = vlan->outer_vlan_idx; + vtag_cfg->tx.free_vtag0 = 1; + } + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (on) { + vtag_index = rsp->vtag0_idx; + } else { + vlan->pvid = 0; + vlan->pvid_insert_on = 0; + vlan->outer_vlan_idx = 0; + } + + rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER, + vtag_index, on); + if (rc < 0) { + printf("Default tx entry failed with rc %d\n", rc); + vtag_cfg->tx.vtag0_idx = vtag_index; + vtag_cfg->tx.free_vtag0 = 1; + vtag_cfg->tx.cfg_vtag0 = 0; + + rc1 = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc1) + otx2_err("Vtag free failed"); + + return rc; + } + + if (on) { + vlan->pvid = vlan_id; + vlan->pvid_insert_on = 1; + vlan->outer_vlan_idx = vtag_index; + } + + return 0; +} + +void otx2_nix_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue, + __rte_unused int on) +{ + otx2_err("Not Supported"); +} + +static int +nix_vlan_rx_mkex_offset(uint64_t mask) +{ + int nib_count = 0; + + while (mask) { + nib_count += mask & 1; + mask >>= 1; + } + + return nib_count * 4; +} + +static int +nix_vlan_get_mkex_info(struct otx2_eth_dev *dev) +{ + struct vlan_mkex_info *mkex = &dev->vlan_info.mkex; + struct otx2_npc_flow_info *npc = &dev->npc_flow; + struct npc_xtract_info *x_info = NULL; + uint64_t rx_keyx; + otx2_dxcfg_t *p; + int rc = -EINVAL; + + if (npc == NULL) { + otx2_err("Missing npc mkex configuration"); + return rc; + } + +#define NPC_KEX_CHAN_NIBBLE_ENA 0x7ULL +#define NPC_KEX_LB_LTYPE_NIBBLE_ENA 0x1000ULL +#define NPC_KEX_LB_LTYPE_NIBBLE_MASK 0xFFFULL + + rx_keyx = npc->keyx_supp_nmask[NPC_MCAM_RX]; + if ((rx_keyx & NPC_KEX_CHAN_NIBBLE_ENA) != NPC_KEX_CHAN_NIBBLE_ENA) + return rc; + + if ((rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_ENA) != + NPC_KEX_LB_LTYPE_NIBBLE_ENA) + return rc; + + mkex->lb_lt_offset = + nix_vlan_rx_mkex_offset(rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_MASK); + + p = &npc->prx_dxcfg; + x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0]; + memcpy(&mkex->la_xtract, x_info, sizeof(struct npc_xtract_info)); + x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LB][NPC_LT_LB_CTAG].xtract[0]; + memcpy(&mkex->lb_xtract, x_info, sizeof(struct npc_xtract_info)); + + return 0; +} + +static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct vlan_entry *entry; + int rc; + + /* VLAN filters can't be set without setting filtern on */ + rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true); + if (rc) { + otx2_err("Failed to reinstall vlan filters"); + return; + } + + TAILQ_FOREACH(entry, &dev->vlan_info.fltr_tbl, next) { + rc = otx2_nix_vlan_filter_set(eth_dev, entry->vlan_id, true); + if (rc) + otx2_err("Failed to reinstall filter for vlan:%d", + entry->vlan_id); + } +} + +int +otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + int rc, mask; + + /* Port initialized for first time or restarted */ + if (!dev->configured) { + rc = nix_vlan_get_mkex_info(dev); + if (rc) { + otx2_err("Failed to get vlan mkex info rc=%d", rc); + return rc; + } + + TAILQ_INIT(&dev->vlan_info.fltr_tbl); + } else { + /* Reinstall all mcam entries now if filter offload is set */ + if (eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + nix_vlan_reinstall_vlan_filters(eth_dev); + } + + mask = + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + rc = otx2_nix_vlan_offload_set(eth_dev, mask); + if (rc) { + otx2_err("Failed to set vlan offload rc=%d", rc); + return rc; + } + + return 0; +} + +int +otx2_nix_vlan_fini(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_vlan_info *vlan = &dev->vlan_info; + struct vlan_entry *entry; + int rc; + + TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) { + if (!dev->configured) { + TAILQ_REMOVE(&vlan->fltr_tbl, entry, next); + rte_free(entry); + } else { + /* MCAM entries freed by flow_fini & lf_free on + * port stop. + */ + entry->mcam_idx = 0; + } + } + + if (!dev->configured) { + if (vlan->def_rx_mcam_idx) { + rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx); + if (rc) + return rc; + } + } + + otx2_nix_config_double_vlan(eth_dev, false); + vlan->def_rx_mcam_idx = 0; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map b/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/pcap/Makefile b/src/spdk/dpdk/drivers/net/pcap/Makefile new file mode 100644 index 000000000..f243d1a0f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pcap/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_pcap.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lpcap +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_pcap_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c + +# +# Export include files +# +SYMLINK-y-include += + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/pcap/meson.build b/src/spdk/dpdk/drivers/net/pcap/meson.build new file mode 100644 index 000000000..b680710aa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pcap/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +if not dpdk_conf.has('RTE_PORT_PCAP') + build = false + reason = 'missing dependency, "libpcap"' +endif +sources = files('rte_eth_pcap.c') +ext_deps += pcap_dep diff --git a/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c b/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c new file mode 100644 index 000000000..b4c79d174 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c @@ -0,0 +1,1588 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. + * All rights reserved. + */ + +#include + +#include +#include +#include +#include + +#if defined(RTE_EXEC_ENV_FREEBSD) +#include +#include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535 +#define RTE_ETH_PCAP_SNAPLEN RTE_ETHER_MAX_JUMBO_FRAME_LEN +#define RTE_ETH_PCAP_PROMISC 1 +#define RTE_ETH_PCAP_TIMEOUT -1 + +#define ETH_PCAP_RX_PCAP_ARG "rx_pcap" +#define ETH_PCAP_TX_PCAP_ARG "tx_pcap" +#define ETH_PCAP_RX_IFACE_ARG "rx_iface" +#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in" +#define ETH_PCAP_TX_IFACE_ARG "tx_iface" +#define ETH_PCAP_IFACE_ARG "iface" +#define ETH_PCAP_PHY_MAC_ARG "phy_mac" +#define ETH_PCAP_INFINITE_RX_ARG "infinite_rx" + +#define ETH_PCAP_ARG_MAXLEN 64 + +#define RTE_PMD_PCAP_MAX_QUEUES 16 + +static char errbuf[PCAP_ERRBUF_SIZE]; +static struct timeval start_time; +static uint64_t start_cycles; +static uint64_t hz; +static uint8_t iface_idx; + +struct queue_stat { + volatile unsigned long pkts; + volatile unsigned long bytes; + volatile unsigned long err_pkts; +}; + +struct pcap_rx_queue { + uint16_t port_id; + uint16_t queue_id; + struct rte_mempool *mb_pool; + struct queue_stat rx_stat; + char name[PATH_MAX]; + char type[ETH_PCAP_ARG_MAXLEN]; + + /* Contains pre-generated packets to be looped through */ + struct rte_ring *pkts; +}; + +struct pcap_tx_queue { + uint16_t port_id; + uint16_t queue_id; + struct queue_stat tx_stat; + char name[PATH_MAX]; + char type[ETH_PCAP_ARG_MAXLEN]; +}; + +struct pmd_internals { + struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES]; + struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES]; + char devargs[ETH_PCAP_ARG_MAXLEN]; + struct rte_ether_addr eth_addr; + int if_index; + int single_iface; + int phy_mac; + unsigned int infinite_rx; +}; + +struct pmd_process_private { + pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES]; +}; + +struct pmd_devargs { + unsigned int num_of_queue; + struct devargs_queue { + pcap_dumper_t *dumper; + pcap_t *pcap; + const char *name; + const char *type; + } queue[RTE_PMD_PCAP_MAX_QUEUES]; + int phy_mac; +}; + +struct pmd_devargs_all { + struct pmd_devargs rx_queues; + struct pmd_devargs tx_queues; + int single_iface; + unsigned int is_tx_pcap; + unsigned int is_tx_iface; + unsigned int is_rx_pcap; + unsigned int is_rx_iface; + unsigned int infinite_rx; +}; + +static const char *valid_arguments[] = { + ETH_PCAP_RX_PCAP_ARG, + ETH_PCAP_TX_PCAP_ARG, + ETH_PCAP_RX_IFACE_ARG, + ETH_PCAP_RX_IFACE_IN_ARG, + ETH_PCAP_TX_IFACE_ARG, + ETH_PCAP_IFACE_ARG, + ETH_PCAP_PHY_MAC_ARG, + ETH_PCAP_INFINITE_RX_ARG, + NULL +}; + +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_FIXED, +}; + +static int eth_pcap_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +static int +eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, + const u_char *data, uint16_t data_len) +{ + /* Copy the first segment. */ + uint16_t len = rte_pktmbuf_tailroom(mbuf); + struct rte_mbuf *m = mbuf; + + rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len); + data_len -= len; + data += len; + + while (data_len > 0) { + /* Allocate next mbuf and point to that. */ + m->next = rte_pktmbuf_alloc(mb_pool); + + if (unlikely(!m->next)) + return -1; + + m = m->next; + + /* Headroom is not needed in chained mbufs. */ + rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m)); + m->pkt_len = 0; + m->data_len = 0; + + /* Copy next segment. */ + len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len); + rte_memcpy(rte_pktmbuf_append(m, len), data, len); + + mbuf->nb_segs++; + data_len -= len; + data += len; + } + + return mbuf->nb_segs; +} + +static uint16_t +eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + int i; + struct pcap_rx_queue *pcap_q = queue; + uint32_t rx_bytes = 0; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0) + return 0; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *pcap_buf; + int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf); + if (err) + return i; + + rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), + rte_pktmbuf_mtod(pcap_buf, void *), + pcap_buf->data_len); + bufs[i]->data_len = pcap_buf->data_len; + bufs[i]->pkt_len = pcap_buf->pkt_len; + bufs[i]->port = pcap_q->port_id; + rx_bytes += pcap_buf->data_len; + + /* Enqueue packet back on ring to allow infinite rx. */ + rte_ring_enqueue(pcap_q->pkts, pcap_buf); + } + + pcap_q->rx_stat.pkts += i; + pcap_q->rx_stat.bytes += rx_bytes; + + return i; +} + +static uint16_t +eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned int i; + struct pcap_pkthdr header; + struct pmd_process_private *pp; + const u_char *packet; + struct rte_mbuf *mbuf; + struct pcap_rx_queue *pcap_q = queue; + uint16_t num_rx = 0; + uint32_t rx_bytes = 0; + pcap_t *pcap; + + pp = rte_eth_devices[pcap_q->port_id].process_private; + pcap = pp->rx_pcap[pcap_q->queue_id]; + + if (unlikely(pcap == NULL || nb_pkts == 0)) + return 0; + + /* Reads the given number of packets from the pcap file one by one + * and copies the packet data into a newly allocated mbuf to return. + */ + for (i = 0; i < nb_pkts; i++) { + /* Get the next PCAP packet */ + packet = pcap_next(pcap, &header); + if (unlikely(packet == NULL)) + break; + + mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); + if (unlikely(mbuf == NULL)) + break; + + if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { + /* pcap packet will fit in the mbuf, can copy it */ + rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, + header.caplen); + mbuf->data_len = (uint16_t)header.caplen; + } else { + /* Try read jumbo frame into multi mbufs. */ + if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, + mbuf, + packet, + header.caplen) == -1)) { + rte_pktmbuf_free(mbuf); + break; + } + } + + mbuf->pkt_len = (uint16_t)header.caplen; + mbuf->timestamp = (uint64_t)header.ts.tv_sec * 1000000 + + header.ts.tv_usec; + mbuf->ol_flags |= PKT_RX_TIMESTAMP; + mbuf->port = pcap_q->port_id; + bufs[num_rx] = mbuf; + num_rx++; + rx_bytes += header.caplen; + } + pcap_q->rx_stat.pkts += num_rx; + pcap_q->rx_stat.bytes += rx_bytes; + + return num_rx; +} + +static uint16_t +eth_null_rx(void *queue __rte_unused, + struct rte_mbuf **bufs __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +static inline void +calculate_timestamp(struct timeval *ts) { + uint64_t cycles; + struct timeval cur_time; + + cycles = rte_get_timer_cycles() - start_cycles; + cur_time.tv_sec = cycles / hz; + cur_time.tv_usec = (cycles % hz) * 1e6 / hz; + timeradd(&start_time, &cur_time, ts); +} + +/* + * Callback to handle writing packets to a pcap file. + */ +static uint16_t +eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned int i; + struct rte_mbuf *mbuf; + struct pmd_process_private *pp; + struct pcap_tx_queue *dumper_q = queue; + uint16_t num_tx = 0; + uint32_t tx_bytes = 0; + struct pcap_pkthdr header; + pcap_dumper_t *dumper; + unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + size_t len, caplen; + + pp = rte_eth_devices[dumper_q->port_id].process_private; + dumper = pp->tx_dumper[dumper_q->queue_id]; + + if (dumper == NULL || nb_pkts == 0) + return 0; + + /* writes the nb_pkts packets to the previously opened pcap file + * dumper */ + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + len = caplen = rte_pktmbuf_pkt_len(mbuf); + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && + len > sizeof(temp_data))) { + caplen = sizeof(temp_data); + } + + calculate_timestamp(&header.ts); + header.len = len; + header.caplen = caplen; + /* rte_pktmbuf_read() returns a pointer to the data directly + * in the mbuf (when the mbuf is contiguous) or, otherwise, + * a pointer to temp_data after copying into it. + */ + pcap_dump((u_char *)dumper, &header, + rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); + + num_tx++; + tx_bytes += caplen; + rte_pktmbuf_free(mbuf); + } + + /* + * Since there's no place to hook a callback when the forwarding + * process stops and to make sure the pcap file is actually written, + * we flush the pcap dumper within each burst. + */ + pcap_dump_flush(dumper); + dumper_q->tx_stat.pkts += num_tx; + dumper_q->tx_stat.bytes += tx_bytes; + dumper_q->tx_stat.err_pkts += nb_pkts - num_tx; + + return nb_pkts; +} + +/* + * Callback to handle dropping packets in the infinite rx case. + */ +static uint16_t +eth_tx_drop(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned int i; + uint32_t tx_bytes = 0; + struct pcap_tx_queue *tx_queue = queue; + + if (unlikely(nb_pkts == 0)) + return 0; + + for (i = 0; i < nb_pkts; i++) { + tx_bytes += bufs[i]->data_len; + rte_pktmbuf_free(bufs[i]); + } + + tx_queue->tx_stat.pkts += nb_pkts; + tx_queue->tx_stat.bytes += tx_bytes; + + return i; +} + +/* + * Callback to handle sending packets through a real NIC. + */ +static uint16_t +eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned int i; + int ret; + struct rte_mbuf *mbuf; + struct pmd_process_private *pp; + struct pcap_tx_queue *tx_queue = queue; + uint16_t num_tx = 0; + uint32_t tx_bytes = 0; + pcap_t *pcap; + unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + size_t len; + + pp = rte_eth_devices[tx_queue->port_id].process_private; + pcap = pp->tx_pcap[tx_queue->queue_id]; + + if (unlikely(nb_pkts == 0 || pcap == NULL)) + return 0; + + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + len = rte_pktmbuf_pkt_len(mbuf); + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && + len > sizeof(temp_data))) { + PMD_LOG(ERR, + "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", + len, sizeof(temp_data)); + rte_pktmbuf_free(mbuf); + continue; + } + + /* rte_pktmbuf_read() returns a pointer to the data directly + * in the mbuf (when the mbuf is contiguous) or, otherwise, + * a pointer to temp_data after copying into it. + */ + ret = pcap_sendpacket(pcap, + rte_pktmbuf_read(mbuf, 0, len, temp_data), len); + if (unlikely(ret != 0)) + break; + num_tx++; + tx_bytes += len; + rte_pktmbuf_free(mbuf); + } + + tx_queue->tx_stat.pkts += num_tx; + tx_queue->tx_stat.bytes += tx_bytes; + tx_queue->tx_stat.err_pkts += i - num_tx; + + return i; +} + +/* + * pcap_open_live wrapper function + */ +static inline int +open_iface_live(const char *iface, pcap_t **pcap) { + *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN, + RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); + + if (*pcap == NULL) { + PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf); + return -1; + } + + return 0; +} + +static int +open_single_iface(const char *iface, pcap_t **pcap) +{ + if (open_iface_live(iface, pcap) < 0) { + PMD_LOG(ERR, "Couldn't open interface %s", iface); + return -1; + } + + return 0; +} + +static int +open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) +{ + pcap_t *tx_pcap; + + /* + * We need to create a dummy empty pcap_t to use it + * with pcap_dump_open(). We create big enough an Ethernet + * pcap holder. + */ + tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN); + if (tx_pcap == NULL) { + PMD_LOG(ERR, "Couldn't create dead pcap"); + return -1; + } + + /* The dumper is created using the previous pcap_t reference */ + *dumper = pcap_dump_open(tx_pcap, pcap_filename); + if (*dumper == NULL) { + pcap_close(tx_pcap); + PMD_LOG(ERR, "Couldn't open %s for writing.", + pcap_filename); + return -1; + } + + pcap_close(tx_pcap); + return 0; +} + +static int +open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) +{ + *pcap = pcap_open_offline(pcap_filename, errbuf); + if (*pcap == NULL) { + PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename, + errbuf); + return -1; + } + + return 0; +} + +static uint64_t +count_packets_in_pcap(pcap_t **pcap, struct pcap_rx_queue *pcap_q) +{ + const u_char *packet; + struct pcap_pkthdr header; + uint64_t pcap_pkt_count = 0; + + while ((packet = pcap_next(*pcap, &header))) + pcap_pkt_count++; + + /* The pcap is reopened so it can be used as normal later. */ + pcap_close(*pcap); + *pcap = NULL; + open_single_rx_pcap(pcap_q->name, pcap); + + return pcap_pkt_count; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internals = dev->data->dev_private; + struct pmd_process_private *pp = dev->process_private; + struct pcap_tx_queue *tx; + struct pcap_rx_queue *rx; + + /* Special iface case. Single pcap is open and shared between tx/rx. */ + if (internals->single_iface) { + tx = &internals->tx_queue[0]; + rx = &internals->rx_queue[0]; + + if (!pp->tx_pcap[0] && + strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { + if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0) + return -1; + pp->rx_pcap[0] = pp->tx_pcap[0]; + } + + goto status_up; + } + + /* If not open already, open tx pcaps/dumpers */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + tx = &internals->tx_queue[i]; + + if (!pp->tx_dumper[i] && + strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) { + if (open_single_tx_pcap(tx->name, + &pp->tx_dumper[i]) < 0) + return -1; + } else if (!pp->tx_pcap[i] && + strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) { + if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0) + return -1; + } + } + + /* If not open already, open rx pcaps */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rx = &internals->rx_queue[i]; + + if (pp->rx_pcap[i] != NULL) + continue; + + if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { + if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0) + return -1; + } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) { + if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0) + return -1; + } + } + +status_up: + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +/* + * This function gets called when the current port gets stopped. + * Is the only place for us to close all the tx streams dumpers. + * If not called the dumpers will be flushed within each tx burst. + */ +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internals = dev->data->dev_private; + struct pmd_process_private *pp = dev->process_private; + + /* Special iface case. Single pcap is open and shared between tx/rx. */ + if (internals->single_iface) { + pcap_close(pp->tx_pcap[0]); + pp->tx_pcap[0] = NULL; + pp->rx_pcap[0] = NULL; + goto status_down; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (pp->tx_dumper[i] != NULL) { + pcap_dump_close(pp->tx_dumper[i]); + pp->tx_dumper[i] = NULL; + } + + if (pp->tx_pcap[i] != NULL) { + pcap_close(pp->tx_pcap[i]); + pp->tx_pcap[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (pp->rx_pcap[i] != NULL) { + pcap_close(pp->rx_pcap[i]); + pp->rx_pcap[i] = NULL; + } + } + +status_down: + for (i = 0; i < dev->data->nb_rx_queues; i++) + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev_info->if_index = internals->if_index; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t) -1; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; + dev_info->min_rx_bufsize = 0; + + return 0; +} + +static int +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned int i; + unsigned long rx_packets_total = 0, rx_bytes_total = 0; + unsigned long tx_packets_total = 0, tx_bytes_total = 0; + unsigned long tx_packets_err_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_rx_queues; i++) { + stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts; + stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes; + rx_packets_total += stats->q_ipackets[i]; + rx_bytes_total += stats->q_ibytes[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_tx_queues; i++) { + stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts; + stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes; + tx_packets_total += stats->q_opackets[i]; + tx_bytes_total += stats->q_obytes[i]; + tx_packets_err_total += internal->tx_queue[i].tx_stat.err_pkts; + } + + stats->ipackets = rx_packets_total; + stats->ibytes = rx_bytes_total; + stats->opackets = tx_packets_total; + stats->obytes = tx_bytes_total; + stats->oerrors = tx_packets_err_total; + + return 0; +} + +static int +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + internal->rx_queue[i].rx_stat.pkts = 0; + internal->rx_queue[i].rx_stat.bytes = 0; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + internal->tx_queue[i].tx_stat.pkts = 0; + internal->tx_queue[i].tx_stat.bytes = 0; + internal->tx_queue[i].tx_stat.err_pkts = 0; + } + + return 0; +} + +static void +eth_dev_close(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internals = dev->data->dev_private; + + /* Device wide flag, but cleanup must be performed per queue. */ + if (internals->infinite_rx) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct pcap_rx_queue *pcap_q = &internals->rx_queue[i]; + struct rte_mbuf *pcap_buf; + + while (!rte_ring_dequeue(pcap_q->pkts, + (void **)&pcap_buf)) + rte_pktmbuf_free(pcap_buf); + + rte_ring_free(pcap_q->pkts); + } + } + +} + +static void +eth_queue_release(void *q __rte_unused) +{ +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; + + pcap_q->mb_pool = mb_pool; + pcap_q->port_id = dev->data->port_id; + pcap_q->queue_id = rx_queue_id; + dev->data->rx_queues[rx_queue_id] = pcap_q; + + if (internals->infinite_rx) { + struct pmd_process_private *pp; + char ring_name[NAME_MAX]; + static uint32_t ring_number; + uint64_t pcap_pkt_count = 0; + struct rte_mbuf *bufs[1]; + pcap_t **pcap; + + pp = rte_eth_devices[pcap_q->port_id].process_private; + pcap = &pp->rx_pcap[pcap_q->queue_id]; + + if (unlikely(*pcap == NULL)) + return -ENOENT; + + pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q); + + snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16, + ring_number); + + pcap_q->pkts = rte_ring_create(ring_name, + rte_align64pow2(pcap_pkt_count + 1), 0, + RING_F_SP_ENQ | RING_F_SC_DEQ); + ring_number++; + if (!pcap_q->pkts) + return -ENOENT; + + /* Fill ring with packets from PCAP file one by one. */ + while (eth_pcap_rx(pcap_q, bufs, 1)) { + /* Check for multiseg mbufs. */ + if (bufs[0]->nb_segs != 1) { + rte_pktmbuf_free(*bufs); + + while (!rte_ring_dequeue(pcap_q->pkts, + (void **)bufs)) + rte_pktmbuf_free(*bufs); + + rte_ring_free(pcap_q->pkts); + PMD_LOG(ERR, "Multiseg mbufs are not supported in infinite_rx " + "mode."); + return -EINVAL; + } + + rte_ring_enqueue_bulk(pcap_q->pkts, + (void * const *)bufs, 1, NULL); + } + /* + * Reset the stats for this queue since eth_pcap_rx calls above + * didn't result in the application receiving packets. + */ + pcap_q->rx_stat.pkts = 0; + pcap_q->rx_stat.bytes = 0; + } + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; + + pcap_q->port_id = dev->data->port_id; + pcap_q->queue_id = tx_queue_id; + dev->data->tx_queues[tx_queue_id] = pcap_q; + + return 0; +} + +static int +eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_start = eth_rx_queue_start, + .tx_queue_start = eth_tx_queue_start, + .rx_queue_stop = eth_rx_queue_stop, + .tx_queue_stop = eth_tx_queue_stop, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +static int +add_queue(struct pmd_devargs *pmd, const char *name, const char *type, + pcap_t *pcap, pcap_dumper_t *dumper) +{ + if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES) + return -1; + if (pcap) + pmd->queue[pmd->num_of_queue].pcap = pcap; + if (dumper) + pmd->queue[pmd->num_of_queue].dumper = dumper; + pmd->queue[pmd->num_of_queue].name = name; + pmd->queue[pmd->num_of_queue].type = type; + pmd->num_of_queue++; + return 0; +} + +/* + * Function handler that opens the pcap file for reading a stores a + * reference of it for use it later on. + */ +static int +open_rx_pcap(const char *key, const char *value, void *extra_args) +{ + const char *pcap_filename = value; + struct pmd_devargs *rx = extra_args; + pcap_t *pcap = NULL; + + if (open_single_rx_pcap(pcap_filename, &pcap) < 0) + return -1; + + if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) { + pcap_close(pcap); + return -1; + } + + return 0; +} + +/* + * Opens a pcap file for writing and stores a reference to it + * for use it later on. + */ +static int +open_tx_pcap(const char *key, const char *value, void *extra_args) +{ + const char *pcap_filename = value; + struct pmd_devargs *dumpers = extra_args; + pcap_dumper_t *dumper; + + if (open_single_tx_pcap(pcap_filename, &dumper) < 0) + return -1; + + if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) { + pcap_dump_close(dumper); + return -1; + } + + return 0; +} + +/* + * Opens an interface for reading and writing + */ +static inline int +open_rx_tx_iface(const char *key, const char *value, void *extra_args) +{ + const char *iface = value; + struct pmd_devargs *tx = extra_args; + pcap_t *pcap = NULL; + + if (open_single_iface(iface, &pcap) < 0) + return -1; + + tx->queue[0].pcap = pcap; + tx->queue[0].name = iface; + tx->queue[0].type = key; + + return 0; +} + +static inline int +set_iface_direction(const char *iface, pcap_t *pcap, + pcap_direction_t direction) +{ + const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT"; + if (pcap_setdirection(pcap, direction) < 0) { + PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n", + iface, direction_str, pcap_geterr(pcap)); + return -1; + } + PMD_LOG(INFO, "Setting %s pcap direction %s\n", + iface, direction_str); + return 0; +} + +static inline int +open_iface(const char *key, const char *value, void *extra_args) +{ + const char *iface = value; + struct pmd_devargs *pmd = extra_args; + pcap_t *pcap = NULL; + + if (open_single_iface(iface, &pcap) < 0) + return -1; + if (add_queue(pmd, iface, key, pcap, NULL) < 0) { + pcap_close(pcap); + return -1; + } + + return 0; +} + +/* + * Opens a NIC for reading packets from it + */ +static inline int +open_rx_iface(const char *key, const char *value, void *extra_args) +{ + int ret = open_iface(key, value, extra_args); + if (ret < 0) + return ret; + if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) { + struct pmd_devargs *pmd = extra_args; + unsigned int qid = pmd->num_of_queue - 1; + + set_iface_direction(pmd->queue[qid].name, + pmd->queue[qid].pcap, + PCAP_D_IN); + } + + return 0; +} + +static inline int +rx_iface_args_process(const char *key, const char *value, void *extra_args) +{ + if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 || + strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) + return open_rx_iface(key, value, extra_args); + + return 0; +} + +/* + * Opens a NIC for writing packets to it + */ +static int +open_tx_iface(const char *key, const char *value, void *extra_args) +{ + return open_iface(key, value, extra_args); +} + +static int +select_phy_mac(const char *key __rte_unused, const char *value, + void *extra_args) +{ + if (extra_args) { + const int phy_mac = atoi(value); + int *enable_phy_mac = extra_args; + + if (phy_mac) + *enable_phy_mac = 1; + } + return 0; +} + +static int +get_infinite_rx_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (extra_args) { + const int infinite_rx = atoi(value); + int *enable_infinite_rx = extra_args; + + if (infinite_rx > 0) + *enable_infinite_rx = 1; + } + return 0; +} + +static int +pmd_init_internals(struct rte_vdev_device *vdev, + const unsigned int nb_rx_queues, + const unsigned int nb_tx_queues, + struct pmd_internals **internals, + struct rte_eth_dev **eth_dev) +{ + struct rte_eth_dev_data *data; + struct pmd_process_private *pp; + unsigned int numa_node = vdev->device.numa_node; + + PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", + numa_node); + + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + return -1; + } + + /* reserve an ethdev entry */ + *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); + if (!(*eth_dev)) { + rte_free(pp); + return -1; + } + (*eth_dev)->process_private = pp; + /* now put it all together + * - store queue data in internals, + * - store numa_node info in eth_dev + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + *internals = (*eth_dev)->data->dev_private; + /* + * Interface MAC = 02:70:63:61:70: + * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx' + * where the middle 4 characters are converted to hex. + */ + (*internals)->eth_addr = (struct rte_ether_addr) { + .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ } + }; + (*internals)->phy_mac = 0; + data = (*eth_dev)->data; + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = &(*internals)->eth_addr; + data->promiscuous = 1; + data->all_multicast = 1; + + /* + * NOTE: we'll replace the data element, of originally allocated + * eth_dev so the rings are local per-process + */ + (*eth_dev)->dev_ops = &ops; + + strlcpy((*internals)->devargs, rte_vdev_device_args(vdev), + ETH_PCAP_ARG_MAXLEN); + + return 0; +} + +static int +eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev, + const unsigned int numa_node) +{ +#if defined(RTE_EXEC_ENV_LINUX) + void *mac_addrs; + struct ifreq ifr; + int if_fd = socket(AF_INET, SOCK_DGRAM, 0); + + if (if_fd == -1) + return -1; + + rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name)); + if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) { + close(if_fd); + return -1; + } + + mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node); + if (!mac_addrs) { + close(if_fd); + return -1; + } + + PMD_LOG(INFO, "Setting phy MAC for %s", if_name); + eth_dev->data->mac_addrs = mac_addrs; + rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); + + close(if_fd); + + return 0; + +#elif defined(RTE_EXEC_ENV_FREEBSD) + void *mac_addrs; + struct if_msghdr *ifm; + struct sockaddr_dl *sdl; + int mib[6]; + size_t len = 0; + char *buf; + + mib[0] = CTL_NET; + mib[1] = AF_ROUTE; + mib[2] = 0; + mib[3] = AF_LINK; + mib[4] = NET_RT_IFLIST; + mib[5] = if_nametoindex(if_name); + + if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) + return -1; + + if (len == 0) + return -1; + + buf = rte_malloc(NULL, len, 0); + if (!buf) + return -1; + + if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) { + rte_free(buf); + return -1; + } + ifm = (struct if_msghdr *)buf; + sdl = (struct sockaddr_dl *)(ifm + 1); + + mac_addrs = rte_zmalloc_socket(NULL, RTE_ETHER_ADDR_LEN, 0, numa_node); + if (!mac_addrs) { + rte_free(buf); + return -1; + } + + PMD_LOG(INFO, "Setting phy MAC for %s", if_name); + eth_dev->data->mac_addrs = mac_addrs; + rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + LLADDR(sdl), RTE_ETHER_ADDR_LEN); + + rte_free(buf); + + return 0; +#else + return -1; +#endif +} + +static int +eth_from_pcaps_common(struct rte_vdev_device *vdev, + struct pmd_devargs_all *devargs_all, + struct pmd_internals **internals, struct rte_eth_dev **eth_dev) +{ + struct pmd_process_private *pp; + struct pmd_devargs *rx_queues = &devargs_all->rx_queues; + struct pmd_devargs *tx_queues = &devargs_all->tx_queues; + const unsigned int nb_rx_queues = rx_queues->num_of_queue; + const unsigned int nb_tx_queues = tx_queues->num_of_queue; + unsigned int i; + + if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals, + eth_dev) < 0) + return -1; + + pp = (*eth_dev)->process_private; + for (i = 0; i < nb_rx_queues; i++) { + struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; + struct devargs_queue *queue = &rx_queues->queue[i]; + + pp->rx_pcap[i] = queue->pcap; + strlcpy(rx->name, queue->name, sizeof(rx->name)); + strlcpy(rx->type, queue->type, sizeof(rx->type)); + } + + for (i = 0; i < nb_tx_queues; i++) { + struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; + struct devargs_queue *queue = &tx_queues->queue[i]; + + pp->tx_dumper[i] = queue->dumper; + pp->tx_pcap[i] = queue->pcap; + strlcpy(tx->name, queue->name, sizeof(tx->name)); + strlcpy(tx->type, queue->type, sizeof(tx->type)); + } + + return 0; +} + +static int +eth_from_pcaps(struct rte_vdev_device *vdev, + struct pmd_devargs_all *devargs_all) +{ + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct pmd_devargs *rx_queues = &devargs_all->rx_queues; + int single_iface = devargs_all->single_iface; + unsigned int infinite_rx = devargs_all->infinite_rx; + int ret; + + ret = eth_from_pcaps_common(vdev, devargs_all, &internals, ð_dev); + + if (ret < 0) + return ret; + + /* store weather we are using a single interface for rx/tx or not */ + internals->single_iface = single_iface; + + if (single_iface) { + internals->if_index = if_nametoindex(rx_queues->queue[0].name); + + /* phy_mac arg is applied only only if "iface" devarg is provided */ + if (rx_queues->phy_mac) { + int ret = eth_pcap_update_mac(rx_queues->queue[0].name, + eth_dev, vdev->device.numa_node); + if (ret == 0) + internals->phy_mac = 1; + } + } + + internals->infinite_rx = infinite_rx; + /* Assign rx ops. */ + if (infinite_rx) + eth_dev->rx_pkt_burst = eth_pcap_rx_infinite; + else if (devargs_all->is_rx_pcap || devargs_all->is_rx_iface || + single_iface) + eth_dev->rx_pkt_burst = eth_pcap_rx; + else + eth_dev->rx_pkt_burst = eth_null_rx; + + /* Assign tx ops. */ + if (devargs_all->is_tx_pcap) + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + else if (devargs_all->is_tx_iface || single_iface) + eth_dev->tx_pkt_burst = eth_pcap_tx; + else + eth_dev->tx_pkt_burst = eth_tx_drop; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +} + +static int +pmd_pcap_probe(struct rte_vdev_device *dev) +{ + const char *name; + struct rte_kvargs *kvlist; + struct pmd_devargs pcaps = {0}; + struct pmd_devargs dumpers = {0}; + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internal; + int ret = 0; + + struct pmd_devargs_all devargs_all = { + .single_iface = 0, + .is_tx_pcap = 0, + .is_tx_iface = 0, + .infinite_rx = 0, + }; + + name = rte_vdev_device_name(dev); + PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); + + gettimeofday(&start_time, NULL); + start_cycles = rte_get_timer_cycles(); + hz = rte_get_timer_hz(); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + + internal = eth_dev->data->dev_private; + + kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); + if (kvlist == NULL) + return -1; + } else { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + valid_arguments); + if (kvlist == NULL) + return -1; + } + + /* + * If iface argument is passed we open the NICs and use them for + * reading / writing + */ + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG, + &open_rx_tx_iface, &pcaps); + if (ret < 0) + goto free_kvlist; + + dumpers.queue[0] = pcaps.queue[0]; + + ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG, + &select_phy_mac, &pcaps.phy_mac); + if (ret < 0) + goto free_kvlist; + + dumpers.phy_mac = pcaps.phy_mac; + + devargs_all.single_iface = 1; + pcaps.num_of_queue = 1; + dumpers.num_of_queue = 1; + + goto create_eth; + } + + /* + * We check whether we want to open a RX stream from a real NIC, a + * pcap file or open a dummy RX stream + */ + devargs_all.is_rx_pcap = + rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; + devargs_all.is_rx_iface = + rte_kvargs_count(kvlist, ETH_PCAP_RX_IFACE_ARG) ? 1 : 0; + pcaps.num_of_queue = 0; + + devargs_all.is_tx_pcap = + rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; + devargs_all.is_tx_iface = + rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG) ? 1 : 0; + dumpers.num_of_queue = 0; + + if (devargs_all.is_rx_pcap) { + /* + * We check whether we want to infinitely rx the pcap file. + */ + unsigned int infinite_rx_arg_cnt = rte_kvargs_count(kvlist, + ETH_PCAP_INFINITE_RX_ARG); + + if (infinite_rx_arg_cnt == 1) { + ret = rte_kvargs_process(kvlist, + ETH_PCAP_INFINITE_RX_ARG, + &get_infinite_rx_arg, + &devargs_all.infinite_rx); + if (ret < 0) + goto free_kvlist; + PMD_LOG(INFO, "infinite_rx has been %s for %s", + devargs_all.infinite_rx ? "enabled" : "disabled", + name); + + } else if (infinite_rx_arg_cnt > 1) { + PMD_LOG(WARNING, "infinite_rx has not been enabled since the " + "argument has been provided more than once " + "for %s", name); + } + + ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, + &open_rx_pcap, &pcaps); + } else if (devargs_all.is_rx_iface) { + ret = rte_kvargs_process(kvlist, NULL, + &rx_iface_args_process, &pcaps); + } else if (devargs_all.is_tx_iface || devargs_all.is_tx_pcap) { + unsigned int i; + + /* Count number of tx queue args passed before dummy rx queue + * creation so a dummy rx queue can be created for each tx queue + */ + unsigned int num_tx_queues = + (rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) + + rte_kvargs_count(kvlist, ETH_PCAP_TX_IFACE_ARG)); + + PMD_LOG(INFO, "Creating null rx queue since no rx queues were provided."); + + /* Creating a dummy rx queue for each tx queue passed */ + for (i = 0; i < num_tx_queues; i++) + ret = add_queue(&pcaps, "dummy_rx", "rx_null", NULL, + NULL); + } else { + PMD_LOG(ERR, "Error - No rx or tx queues provided"); + ret = -ENOENT; + } + if (ret < 0) + goto free_kvlist; + + /* + * We check whether we want to open a TX stream to a real NIC, + * a pcap file, or drop packets on tx + */ + if (devargs_all.is_tx_pcap) { + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, + &open_tx_pcap, &dumpers); + } else if (devargs_all.is_tx_iface) { + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, + &open_tx_iface, &dumpers); + } else { + unsigned int i; + + PMD_LOG(INFO, "Dropping packets on tx since no tx queues were provided."); + + /* Add 1 dummy queue per rxq which counts and drops packets. */ + for (i = 0; i < pcaps.num_of_queue; i++) + ret = add_queue(&dumpers, "dummy_tx", "tx_drop", NULL, + NULL); + } + + if (ret < 0) + goto free_kvlist; + +create_eth: + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_process_private *pp; + unsigned int i; + + internal = eth_dev->data->dev_private; + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, + sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + ret = -1; + goto free_kvlist; + } + + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + + /* setup process private */ + for (i = 0; i < pcaps.num_of_queue; i++) + pp->rx_pcap[i] = pcaps.queue[i].pcap; + + for (i = 0; i < dumpers.num_of_queue; i++) { + pp->tx_dumper[i] = dumpers.queue[i].dumper; + pp->tx_pcap[i] = dumpers.queue[i].pcap; + } + + eth_dev->process_private = pp; + eth_dev->rx_pkt_burst = eth_pcap_rx; + if (devargs_all.is_tx_pcap) + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + else + eth_dev->tx_pkt_burst = eth_pcap_tx; + + rte_eth_dev_probing_finish(eth_dev); + goto free_kvlist; + } + + devargs_all.rx_queues = pcaps; + devargs_all.tx_queues = dumpers; + + ret = eth_from_pcaps(dev, &devargs_all); + +free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +static int +pmd_pcap_remove(struct rte_vdev_device *dev) +{ + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + + PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d", + rte_socket_id()); + + if (!dev) + return -1; + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); + if (eth_dev == NULL) + return -1; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + internals = eth_dev->data->dev_private; + if (internals != NULL && internals->phy_mac == 0) + /* not dynamically allocated, must not be freed */ + eth_dev->data->mac_addrs = NULL; + } + + eth_dev_close(eth_dev); + + rte_free(eth_dev->process_private); + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_vdev_driver pmd_pcap_drv = { + .probe = pmd_pcap_probe, + .remove = pmd_pcap_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv); +RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap); +RTE_PMD_REGISTER_PARAM_STRING(net_pcap, + ETH_PCAP_RX_PCAP_ARG "= " + ETH_PCAP_TX_PCAP_ARG "= " + ETH_PCAP_RX_IFACE_ARG "= " + ETH_PCAP_RX_IFACE_IN_ARG "= " + ETH_PCAP_TX_IFACE_ARG "= " + ETH_PCAP_IFACE_ARG "= " + ETH_PCAP_PHY_MAC_ARG "=" + ETH_PCAP_INFINITE_RX_ARG "=<0|1>"); + +RTE_INIT(eth_pcap_init_log) +{ + eth_pcap_logtype = rte_log_register("pmd.net.pcap"); + if (eth_pcap_logtype >= 0) + rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map b/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pcap/rte_pmd_pcap_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/pfe/Makefile b/src/spdk/dpdk/drivers/net/pfe/Makefile new file mode 100644 index 000000000..75d30b01a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018-2019 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_pfe.a + +CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -Wno-pointer-arith +CFLAGS += -I$(RTE_SDK)/drivers/net/pfe/base/ +CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax + +EXPORT_MAP := rte_pmd_pfe_version.map + +# Interfaces with DPDK +SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hal.c +SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif_lib.c +SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif.c + +LDLIBS += -lrte_bus_vdev +LDLIBS += -lrte_bus_dpaa +LDLIBS += -lrte_common_dpaax +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_ethdev -lrte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus.h new file mode 100644 index 000000000..fe7ea6006 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _CBUS_H_ +#define _CBUS_H_ + +#include + +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000) +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000) +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000) +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000) +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000) +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000) +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000) +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000) +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000) +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000) +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000) +#define LMEM_SIZE 0x10000 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE) +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000) +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000) +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000) +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000) +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000) + +/* + * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR + * XXX_MEM_ACCESS_ADDR register bit definitions. + */ +#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */ +#define PE_MEM_ACCESS_IMEM BIT(15) +#define PE_MEM_ACCESS_DMEM BIT(16) + +/* Byte Enables of the Internal memory access. These are interpred in BE */ +#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \ + ({ typeof(size) size_ = (size); \ + (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; }) + +#include "cbus/emac_mtip.h" +#include "cbus/gpi.h" +#include "cbus/bmu.h" +#include "cbus/hif.h" +#include "cbus/tmu_csr.h" +#include "cbus/class_csr.h" +#include "cbus/hif_nocpy.h" +#include "cbus/util_csr.h" + +/* PFE cores states */ +#define CORE_DISABLE 0x00000000 +#define CORE_ENABLE 0x00000001 +#define CORE_SW_RESET 0x00000002 + +/* LMEM defines */ +#define LMEM_HDR_SIZE 0x0010 +#define LMEM_BUF_SIZE_LN2 0x7 +#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2) + +/* DDR defines */ +#define DDR_HDR_SIZE 0x0100 +#define DDR_BUF_SIZE_LN2 0xb +#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2) + +#endif /* _CBUS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h new file mode 100644 index 000000000..4821fd1f2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/bmu.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _BMU_H_ +#define _BMU_H_ + +#define BMU_VERSION 0x000 +#define BMU_CTRL 0x004 +#define BMU_UCAST_CONFIG 0x008 +#define BMU_UCAST_BASE_ADDR 0x00c +#define BMU_BUF_SIZE 0x010 +#define BMU_BUF_CNT 0x014 +#define BMU_THRES 0x018 +#define BMU_INT_SRC 0x020 +#define BMU_INT_ENABLE 0x024 +#define BMU_ALLOC_CTRL 0x030 +#define BMU_FREE_CTRL 0x034 +#define BMU_FREE_ERR_ADDR 0x038 +#define BMU_CURR_BUF_CNT 0x03c +#define BMU_MCAST_CNT 0x040 +#define BMU_MCAST_ALLOC_CTRL 0x044 +#define BMU_REM_BUF_CNT 0x048 +#define BMU_LOW_WATERMARK 0x050 +#define BMU_HIGH_WATERMARK 0x054 +#define BMU_INT_MEM_ACCESS 0x100 + +struct BMU_CFG { + unsigned long baseaddr; + u32 count; + u32 size; + u32 low_watermark; + u32 high_watermark; +}; + +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2 + +#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL) + +#endif /* _BMU_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h new file mode 100644 index 000000000..a3f51c3a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/class_csr.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _CLASS_CSR_H_ +#define _CLASS_CSR_H_ + +#include + +/* @file class_csr.h. + * class_csr - block containing all the classifier control and status register. + * Mapped on CBUS and accessible from all PE's and ARM. + */ +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000) +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004) +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010) + +/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */ +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014) + +/* LMEM header size for the Classifier block.\ Data in the LMEM + * is written from this offset. + */ +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f) + +/* DDR header size for the Classifier block.\ Data in the DDR + * is written from this offset. + */ +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16) + +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020) + +/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */ +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024) + +/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */ +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060) + +/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */ +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064) + +/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */ + +/* @name Class PE memory access. Allows external PE's and HOST to + * read/write PMEM/DMEM memory ranges for each classifier PE. + */ +/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]}, + * See \ref XXX_MEM_ACCESS_ADDR for details. + */ +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100) + +/* Internal Memory Access Write Data [31:0] */ +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104) + +/* Internal Memory Access Read Data [31:0] */ +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108) +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114) +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118) + +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c) +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120) +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124) +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128) +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c) +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130) +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134) +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138) +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c) +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140) +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144) +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148) +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c) +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150) +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154) +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158) +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c) +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160) +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164) +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168) +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c) +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170) +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174) +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178) +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c) +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180) +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184) +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188) +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c) +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190) +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194) +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198) +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c) +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0) +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4) +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8) +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac) +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0) +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4) +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8) +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc) +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0) +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4) +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8) +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc) +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0) +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4) +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8) +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc) +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0) +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4) +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8) +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec) +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0) +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4) +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8) + +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200) +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204) +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208) +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c) +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210) +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214) +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218) +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c) +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220) +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224) + +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228) + +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c) +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230) + +/* (route_entry_size[9:0], route_hash_size[23:16] + * (this is actually ln2(size))) + */ +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234) + +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff) +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16) + +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238) + +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c) +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240) +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244) +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248) +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c) +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250) +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254) + +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258) +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000) +/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */ + +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c) + +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260) +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264) +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268) +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c) +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270) +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274) +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278) +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c) +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280) +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284) +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288) +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c) + +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290) +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294) + +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298) +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c) + +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0) + +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4) +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8) +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac) +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0) +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4) +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8) + +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc) + +/* CLASS defines */ +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */ +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */ + +/* Can be configured */ +#define CLASS_PBUF0_BASE_ADDR 0x000 +/* Can be configured */ +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE) +/* Can be configured */ +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE) +/* Can be configured */ +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE) + +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \ + CLASS_PBUF_HEADER_OFFSET) +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \ + CLASS_PBUF_HEADER_OFFSET) +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \ + CLASS_PBUF_HEADER_OFFSET) +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \ + CLASS_PBUF_HEADER_OFFSET) + +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \ + CLASS_PBUF0_BASE_ADDR) +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \ + CLASS_PBUF2_BASE_ADDR) + +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\ + CLASS_PBUF0_HEADER_BASE_ADDR) +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\ + CLASS_PBUF2_HEADER_BASE_ADDR) + +#define CLASS_ROUTE_SIZE 128 +#define CLASS_MAX_ROUTE_SIZE 256 +#define CLASS_ROUTE_HASH_BITS 20 +#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1) + +/* Can be configured */ +#define CLASS_ROUTE0_BASE_ADDR 0x400 +/* Can be configured */ +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE) +/* Can be configured */ +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE) +/* Can be configured */ +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE) + +#define CLASS_SA_SIZE 128 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600 +/* not used */ +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE) +/* not used */ +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE) +/* not used */ +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE) + +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */ +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \ + (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE)) +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \ + CLASS_SA_SIZE)) + +#define TWO_LEVEL_ROUTE BIT(0) +#define PHYNO_IN_HASH BIT(1) +#define HW_ROUTE_FETCH BIT(3) +#define HW_BRIDGE_FETCH BIT(5) +#define IP_ALIGNED BIT(6) +#define ARC_HIT_CHECK_EN BIT(7) +#define CLASS_TOE BIT(11) +#define HASH_NORMAL (0 << 12) +#define HASH_CRC_PORT BIT(12) +#define HASH_CRC_IP (2 << 12) +#define HASH_CRC_PORT_IP (3 << 12) +#define QB2BUS_LE BIT(15) + +#define TCP_CHKSUM_DROP BIT(0) +#define UDP_CHKSUM_DROP BIT(1) +#define IPV4_CHKSUM_DROP BIT(9) + +/*CLASS_HIF_PARSE bits*/ +#define HIF_PKT_CLASS_EN BIT(0) +#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1) + +struct class_cfg { + u32 toe_mode; + unsigned long route_table_baseaddr; + u32 route_table_hash_bits; + u32 pe_sys_clk_ratio; + u32 resume; +}; + +#endif /* _CLASS_CSR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h new file mode 100644 index 000000000..e1afc3148 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/emac_mtip.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _EMAC_H_ +#define _EMAC_H_ + +/* This file is for Ethernet MAC registers and offsets + */ + +#include + +#define EMAC_IEVENT_REG 0x004 +#define EMAC_IMASK_REG 0x008 +#define EMAC_R_DES_ACTIVE_REG 0x010 +#define EMAC_X_DES_ACTIVE_REG 0x014 +#define EMAC_ECNTRL_REG 0x024 +#define EMAC_MII_DATA_REG 0x040 +#define EMAC_MII_CTRL_REG 0x044 +#define EMAC_MIB_CTRL_STS_REG 0x064 +#define EMAC_RCNTRL_REG 0x084 +#define EMAC_TCNTRL_REG 0x0C4 +#define EMAC_PHY_ADDR_LOW 0x0E4 +#define EMAC_PHY_ADDR_HIGH 0x0E8 +#define EMAC_GAUR 0x120 +#define EMAC_GALR 0x124 +#define EMAC_TFWR_STR_FWD 0x144 +#define EMAC_RX_SECTION_FULL 0x190 +#define EMAC_RX_SECTION_EMPTY 0x194 +#define EMAC_TX_SECTION_EMPTY 0x1A0 +#define EMAC_TRUNC_FL 0x1B0 + +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2a4 /* Reserved */ +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ + +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/ +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/ + +/* GEMAC definitions and settings */ + +#define EMAC_PORT_0 0 +#define EMAC_PORT_1 1 + +/* GEMAC Bit definitions */ +#define EMAC_IEVENT_HBERR 0x80000000 +#define EMAC_IEVENT_BABR 0x40000000 +#define EMAC_IEVENT_BABT 0x20000000 +#define EMAC_IEVENT_GRA 0x10000000 +#define EMAC_IEVENT_TXF 0x08000000 +#define EMAC_IEVENT_TXB 0x04000000 +#define EMAC_IEVENT_RXF 0x02000000 +#define EMAC_IEVENT_RXB 0x01000000 +#define EMAC_IEVENT_MII 0x00800000 +#define EMAC_IEVENT_EBERR 0x00400000 +#define EMAC_IEVENT_LC 0x00200000 +#define EMAC_IEVENT_RL 0x00100000 +#define EMAC_IEVENT_UN 0x00080000 + +#define EMAC_IMASK_HBERR 0x80000000 +#define EMAC_IMASK_BABR 0x40000000 +#define EMAC_IMASKT_BABT 0x20000000 +#define EMAC_IMASK_GRA 0x10000000 +#define EMAC_IMASKT_TXF 0x08000000 +#define EMAC_IMASK_TXB 0x04000000 +#define EMAC_IMASKT_RXF 0x02000000 +#define EMAC_IMASK_RXB 0x01000000 +#define EMAC_IMASK_MII 0x00800000 +#define EMAC_IMASK_EBERR 0x00400000 +#define EMAC_IMASK_LC 0x00200000 +#define EMAC_IMASKT_RL 0x00100000 +#define EMAC_IMASK_UN 0x00080000 + +#define EMAC_RCNTRL_MAX_FL_SHIFT 16 +#define EMAC_RCNTRL_LOOP 0x00000001 +#define EMAC_RCNTRL_DRT 0x00000002 +#define EMAC_RCNTRL_MII_MODE 0x00000004 +#define EMAC_RCNTRL_PROM 0x00000008 +#define EMAC_RCNTRL_BC_REJ 0x00000010 +#define EMAC_RCNTRL_FCE 0x00000020 +#define EMAC_RCNTRL_RGMII 0x00000040 +#define EMAC_RCNTRL_SGMII 0x00000080 +#define EMAC_RCNTRL_RMII 0x00000100 +#define EMAC_RCNTRL_RMII_10T 0x00000200 +#define EMAC_RCNTRL_CRC_FWD 0x00004000 + +#define EMAC_TCNTRL_GTS 0x00000001 +#define EMAC_TCNTRL_HBC 0x00000002 +#define EMAC_TCNTRL_FDEN 0x00000004 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010 + +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */ +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */ +#define EMAC_ECNTRL_MAGIC_ENA 0x00000004 +#define EMAC_ECNTRL_SLEEP 0x00000008 +#define EMAC_ECNTRL_SPEED 0x00000020 +#define EMAC_ECNTRL_DBSWAP 0x00000100 + +#define EMAC_X_WMRK_STRFWD 0x00000100 + +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000 + +#define EMAC_RX_SECTION_EMPTY_V 0x00010006 +/* + * The possible operating speeds of the MAC, currently supporting 10, 100 and + * 1000Mb modes. + */ +enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS}; + +/* MII-related definitios */ +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */ +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */ +#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */ +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */ +#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */ +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */ +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */ +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */ +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */ + +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */ +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */ +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */ +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */ + +#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \ + EMAC_MII_DATA_RA_SHIFT) +#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \ + EMAC_MII_DATA_PA_SHIFT) +#define EMAC_MII_DATA(v) ((v) & 0xffff) + +#define EMAC_MII_SPEED_SHIFT 1 +#define EMAC_HOLDTIME_SHIFT 8 +#define EMAC_HOLDTIME_MASK 0x7 +#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \ + EMAC_HOLDTIME_SHIFT) + +/* + * The Address organisation for the MAC device. All addresses are split into + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of + * the address and the other field are the high order bits - this may be 16-bits + * in the case of MAC addresses, or 32-bits for the hash address. + * In terms of memory storage, the first item (bottom) is assumed to be at a + * lower address location than 'top'. i.e. top should be at address location of + * 'bottom' + 4 bytes. + */ +struct pfe_mac_addr { + u32 bottom; /* Lower 32-bits of address. */ + u32 top; /* Upper 32-bits of address. */ +}; + +/* + * The following is the organisation of the address filters section of the MAC + * registers. The Cadence MAC contains four possible specific address match + * addresses, if an incoming frame corresponds to any one of these four + * addresses then the frame will be copied to memory. + * It is not necessary for all four of the address match registers to be + * programmed, this is application dependent. + */ +struct spec_addr { + struct pfe_mac_addr one; /* Specific address register 1. */ + struct pfe_mac_addr two; /* Specific address register 2. */ + struct pfe_mac_addr three; /* Specific address register 3. */ + struct pfe_mac_addr four; /* Specific address register 4. */ +}; + +struct gemac_cfg { + u32 mode; + u32 speed; + u32 duplex; +}; + +/* EMAC Hash size */ +#define EMAC_HASH_REG_BITS 64 + +#define EMAC_SPEC_ADDR_MAX 4 + +#endif /* _EMAC_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h new file mode 100644 index 000000000..3ebdef926 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/gpi.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _GPI_H_ +#define _GPI_H_ + +/* Generic Packet Interface:The generic packet interface block interfaces + * to block like ethernet, Host interfac and covert data into WSP internal + * structures + */ + +#define GPI_VERSION 0x00 +#define GPI_CTRL 0x04 +#define GPI_RX_CONFIG 0x08 +#define GPI_HDR_SIZE 0x0c +#define GPI_BUF_SIZE 0x10 +#define GPI_LMEM_ALLOC_ADDR 0x14 +#define GPI_LMEM_FREE_ADDR 0x18 +#define GPI_DDR_ALLOC_ADDR 0x1c +#define GPI_DDR_FREE_ADDR 0x20 +#define GPI_CLASS_ADDR 0x24 +#define GPI_DRX_FIFO 0x28 +#define GPI_TRX_FIFO 0x2c +#define GPI_INQ_PKTPTR 0x30 +#define GPI_DDR_DATA_OFFSET 0x34 +#define GPI_LMEM_DATA_OFFSET 0x38 +#define GPI_TMLF_TX 0x4c +#define GPI_DTX_ASEQ 0x50 +#define GPI_FIFO_STATUS 0x54 +#define GPI_FIFO_DEBUG 0x58 +#define GPI_TX_PAUSE_TIME 0x5c +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64 +#define GPI_TOE_CHKSUM_EN 0x68 +#define GPI_OVERRUN_DROPCNT 0x6c +#define GPI_CSR_MTIP_PAUSE_REG 0x74 +#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78 +#define GPI_CSR_RX_CNT 0x7c +#define GPI_CSR_TX_CNT 0x80 +#define GPI_CSR_DEBUG1 0x84 +#define GPI_CSR_DEBUG2 0x88 + +struct gpi_cfg { + u32 lmem_rtry_cnt; + u32 tmlf_txthres; + u32 aseq_len; + u32 mtip_pause_reg; +}; + +/* GPI commons defines */ +#define GPI_LMEM_BUF_EN 0x1 +#define GPI_DDR_BUF_EN 0x1 + +/* EGPI 1 defines */ +#define EGPI1_LMEM_RTRY_CNT 0x40 +#define EGPI1_TMLF_TXTHRES 0xBC +#define EGPI1_ASEQ_LEN 0x50 + +/* EGPI 2 defines */ +#define EGPI2_LMEM_RTRY_CNT 0x40 +#define EGPI2_TMLF_TXTHRES 0xBC +#define EGPI2_ASEQ_LEN 0x40 + +/* EGPI 3 defines */ +#define EGPI3_LMEM_RTRY_CNT 0x40 +#define EGPI3_TMLF_TXTHRES 0xBC +#define EGPI3_ASEQ_LEN 0x40 + +/* HGPI defines */ +#define HGPI_LMEM_RTRY_CNT 0x40 +#define HGPI_TMLF_TXTHRES 0xBC +#define HGPI_ASEQ_LEN 0x40 + +#define EGPI_PAUSE_TIME 0x000007D0 +#define EGPI_PAUSE_ENABLE 0x40000000 +#endif /* _GPI_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h new file mode 100644 index 000000000..be821c2db --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _HIF_H_ +#define _HIF_H_ + +/* @file hif.h. + * hif - PFE hif block control and status register. + * Mapped on CBUS and accessible from all PE's and ARM. + */ +#define HIF_VERSION (HIF_BASE_ADDR + 0x00) +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04) +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08) +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c) +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10) +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14) +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20) +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24) +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30) +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34) +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38) +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c) +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40) +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44) +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48) +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c) +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50) + +/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */ +#define HIF_INT BIT(0) +#define HIF_RXBD_INT BIT(1) +#define HIF_RXPKT_INT BIT(2) +#define HIF_TXBD_INT BIT(3) +#define HIF_TXPKT_INT BIT(4) + +/* HIF_TX_CTRL bits */ +#define HIF_CTRL_DMA_EN BIT(0) +#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1) +#define HIF_CTRL_BDP_CH_START_WSTB BIT(2) + +/* HIF_RX_STATUS bits */ +#define BDP_CSR_RX_DMA_ACTV BIT(16) + +/* HIF_INT_ENABLE bits */ +#define HIF_INT_EN BIT(0) +#define HIF_RXBD_INT_EN BIT(1) +#define HIF_RXPKT_INT_EN BIT(2) +#define HIF_TXBD_INT_EN BIT(3) +#define HIF_TXPKT_INT_EN BIT(4) + +/* HIF_POLL_CTRL bits*/ +#define HIF_RX_POLL_CTRL_CYCLE 0x0400 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400 + +/* HIF_INT_COAL bits*/ +#define HIF_INT_COAL_ENABLE BIT(31) + +/* Buffer descriptor control bits */ +#define BD_CTRL_BUFLEN_MASK 0x3fff +#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK) +#define BD_CTRL_CBD_INT_EN BIT(16) +#define BD_CTRL_PKT_INT_EN BIT(17) +#define BD_CTRL_LIFM BIT(18) +#define BD_CTRL_LAST_BD BIT(19) +#define BD_CTRL_DIR BIT(20) +#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */ +#define BD_CTRL_PKT_XFER BIT(24) +#define BD_CTRL_DESC_EN BIT(31) +#define BD_CTRL_PARSE_DISABLE BIT(25) +#define BD_CTRL_BRFETCH_DISABLE BIT(26) +#define BD_CTRL_RTFETCH_DISABLE BIT(27) + +/* Buffer descriptor status bits*/ +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff) +#define BD_STATUS_DIR_PROC_ID BIT(16) +#define BD_STATUS_CONN_ID_EN BIT(17) +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18) +#define BD_STATUS_LE_DATA BIT(21) +#define BD_STATUS_CHKSUM_EN BIT(22) + +/* HIF Buffer descriptor status bits */ +#define DIR_PROC_ID BIT(16) +#define PROC_ID(id) ((id) << 18) + +#endif /* _HIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h new file mode 100644 index 000000000..f98f1a6be --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/hif_nocpy.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _HIF_NOCPY_H_ +#define _HIF_NOCPY_H_ + +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00) +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04) +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08) +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c) +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10) +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14) +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20) +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24) +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30) +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34) +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38) +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c) +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40) +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44) +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48) +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c) +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50) +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54) +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60) +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64) +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68) +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70) +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74) +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c) +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80) +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84) +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90) + +#endif /* _HIF_NOCPY_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h new file mode 100644 index 000000000..fd25a166f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/tmu_csr.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _TMU_CSR_H_ +#define _TMU_CSR_H_ + +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000) +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004) +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008) +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c) +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010) +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014) +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018) +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c) +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020) +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024) +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028) +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c) +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030) +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034) +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038) +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c) +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040) +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044) +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048) +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c) +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050) +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054) +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058) +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c) +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060) +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064) +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068) +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c) +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070) +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074) +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078) +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c) +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080) +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084) +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088) +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c) +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090) +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094) +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098) +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c) +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0) +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4) +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8) +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac) +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0) +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4) +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ. + * This is a global Enable for all schedulers in PHY0 + */ +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8) + +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc) +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0) +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4) +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8) +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc) +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0) +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4) +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8) +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc) +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0) + +/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory + * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of + * the internal memory. This address is used to access both the PM and DM of + * all the PE's + */ +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4) + +/* Internal Memory Access Write Data */ +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8) +/* Internal Memory Access Read Data. The commands are blocked + * at the mem_access only + */ +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec) + +/* [31:0] PHY0 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0) +/* [31:0] PHY1 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4) +/* [31:0] PHY2 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8) +/* [31:0] PHY3 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc) +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100) +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104) + +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108) +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c) +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110) + +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114) +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118) +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c) +/* [31:0] PHY4 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134) +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ. + * This is a global Enable for all schedulers in PHY1 + */ +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138) +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ. + * This is a global Enable for all schedulers in PHY2 + */ +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c) +/* [9:0] Scheduler Enable for each of the scheduler in the TDQ. + * This is a global Enable for all schedulers in PHY3 + */ +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140) +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144) +/* [31:0] PHY5 in queue address (must be initialized with one of the + * xxx_INQ_PKTPTR cbus addresses) + */ +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148) + +#define SW_RESET BIT(0) /* Global software reset */ +#define INQ_RESET BIT(2) +#define TEQ_RESET BIT(3) +#define TDQ_RESET BIT(4) +#define PE_RESET BIT(5) +#define MEM_INIT BIT(6) +#define MEM_INIT_DONE BIT(7) +#define LLM_INIT BIT(8) +#define LLM_INIT_DONE BIT(9) +#define ECC_MEM_INIT_DONE BIT(10) + +struct tmu_cfg { + u32 pe_sys_clk_ratio; + unsigned long llm_base_addr; + u32 llm_queue_len; +}; + +/* Not HW related for pfe_ctrl / pfe common defines */ +#define DEFAULT_MAX_QDEPTH 80 +#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */ +#define DEFAULT_TMU3_QDEPTH 127 + +#endif /* _TMU_CSR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h b/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h new file mode 100644 index 000000000..7a4124ab6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/cbus/util_csr.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _UTIL_CSR_H_ +#define _UTIL_CSR_H_ + +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000) +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004) +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010) + +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014) + +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020) +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024) +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060) +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064) + +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100) +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104) +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108) + +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114) +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118) + +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200) +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204) +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208) +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c) +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210) +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214) +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218) +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c) +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220) +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224) + +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228) +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c) +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230) + +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234) + +struct util_cfg { + u32 pe_sys_clk_ratio; +}; + +#endif /* _UTIL_CSR_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/base/pfe.h b/src/spdk/dpdk/drivers/net/pfe/base/pfe.h new file mode 100644 index 000000000..72741ba4a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/base/pfe.h @@ -0,0 +1,422 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_H_ +#define _PFE_H_ + +#include "cbus.h" + +/* + * WARNING: non atomic version. + */ +static inline void +set_bit(unsigned long nr, void *addr) +{ + int *m = ((int *)addr) + (nr >> 5); + *m |= 1 << (nr & 31); +} + +static inline int +test_bit(int nr, const void *addr) +{ + return (1UL & (((const int *)addr)[nr >> 5] >> (nr & 31))) != 0UL; +} + +/* + * WARNING: non atomic version. + */ +static inline void +clear_bit(unsigned long nr, void *addr) +{ + int *m = ((int *)addr) + (nr >> 5); + *m &= ~(1 << (nr & 31)); +} + +/* + * WARNING: non atomic version. + */ +static inline int +test_and_clear_bit(unsigned long nr, void *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *)addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +/* + * WARNING: non atomic version. + */ +static inline int +test_and_set_bit(unsigned long nr, void *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *)addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20)) +/* + * Only valid for mem access register interface + */ +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20)) +#define CLASS_DMEM_SIZE 0x00002000 +#define CLASS_IMEM_SIZE 0x00008000 + +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20)) +/* + * Only valid for mem access register interface + */ +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20)) +#define TMU_DMEM_SIZE 0x00000800 +#define TMU_IMEM_SIZE 0x00002000 + +#define UTIL_DMEM_BASE_ADDR 0x00000000 +#define UTIL_DMEM_SIZE 0x00002000 + +#define PE_LMEM_BASE_ADDR 0xc3010000 +#define PE_LMEM_SIZE 0x8000 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE) + +#define DMEM_BASE_ADDR 0x00000000 +#define DMEM_SIZE 0x2000 /* TMU has less... */ +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE) + +#define PMEM_BASE_ADDR 0x00010000 +#define PMEM_SIZE 0x8000 /* TMU has less... */ +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE) + +#define writel(v, p) ({*(volatile unsigned int *)(p) = (v); }) +#define readl(p) (*(const volatile unsigned int *)(p)) + +/* These check memory ranges from PE point of view/memory map */ +#define IS_DMEM(addr, len) \ + ({ typeof(addr) addr_ = (addr); \ + ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \ + (((unsigned long)(addr_) + (len)) <= DMEM_END); }) + +#define IS_PMEM(addr, len) \ + ({ typeof(addr) addr_ = (addr); \ + ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \ + (((unsigned long)(addr_) + (len)) <= PMEM_END); }) + +#define IS_PE_LMEM(addr, len) \ + ({ typeof(addr) addr_ = (addr); \ + ((unsigned long)(addr_) >= \ + PE_LMEM_BASE_ADDR) && \ + (((unsigned long)(addr_) + \ + (len)) <= PE_LMEM_END); }) + +#define IS_PFE_LMEM(addr, len) \ + ({ typeof(addr) addr_ = (addr); \ + ((unsigned long)(addr_) >= \ + CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \ + (((unsigned long)(addr_) + (len)) <= \ + CBUS_VIRT_TO_PFE(LMEM_END)); }) + +#define __IS_PHYS_DDR(addr, len) \ + ({ typeof(addr) addr_ = (addr); \ + ((unsigned long)(addr_) >= \ + DDR_PHYS_BASE_ADDR) && \ + (((unsigned long)(addr_) + (len)) <= \ + DDR_PHYS_END); }) + +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len) + +/* + * If using a run-time virtual address for the cbus base address use this code + */ +extern void *cbus_base_addr; +extern void *ddr_base_addr; +extern unsigned long ddr_phys_base_addr; +extern unsigned int ddr_size; + +#define CBUS_BASE_ADDR cbus_base_addr +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr +#define DDR_BASE_ADDR ddr_base_addr +#define DDR_SIZE ddr_size + +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE) + +#define LS1012A_PFE_RESET_WA /* + * PFE doesn't have global reset and re-init + * should takecare few things to make PFE + * functional after reset + */ +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address + * as seen by PE's. + */ +/* CBUS physical base address as seen by PE's. */ +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000 + +#define DDR_PHYS_TO_PFE(p) (((unsigned long)(p)) & 0x7FFFFFFF) +#define DDR_PFE_TO_PHYS(p) (((unsigned long)(p)) | 0x80000000) +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \ + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE) +/* Translates to PFE address map */ + +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR) +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR) +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p))) + +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \ + PFE_CBUS_PHYS_BASE_ADDR) +#define CBUS_PFE_TO_VIRT(p) (((unsigned long)(p) - \ + PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR) + +/* The below part of the code is used in QOS control driver from host */ +#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by + * pe's + */ + +enum { + CLASS0_ID = 0, + CLASS1_ID, + CLASS2_ID, + CLASS3_ID, + CLASS4_ID, + CLASS5_ID, + TMU0_ID, + TMU1_ID, + TMU2_ID, + TMU3_ID, +#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED) + UTIL_ID, +#endif + MAX_PE +}; + +#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\ + BIT(CLASS2_ID) | BIT(CLASS3_ID) |\ + BIT(CLASS4_ID) | BIT(CLASS5_ID)) +#define CLASS_MAX_ID CLASS5_ID + +#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\ + BIT(TMU3_ID)) + +#define TMU_MAX_ID TMU3_ID + +#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED) +#define UTIL_MASK BIT(UTIL_ID) +#endif + +struct pe_status { + u32 cpu_state; + u32 activity_counter; + u32 rx; + union { + u32 tx; + u32 tmu_qstatus; + }; + u32 drop; +#if defined(CFG_PE_DEBUG) + u32 debug_indicator; + u32 debug[16]; +#endif +} __rte_aligned(16); + +struct pe_sync_mailbox { + u32 stop; + u32 stopped; +}; + +/* Drop counter definitions */ + +#define CLASS_NUM_DROP_COUNTERS 13 +#define UTIL_NUM_DROP_COUNTERS 8 + +/* PE information. + * Structure containing PE's specific information. It is used to create + * generic C functions common to all PE's. + * Before using the library functions this structure needs to be initialized + * with the different registers virtual addresses + * (according to the ARM MMU mmaping). The default initialization supports a + * virtual == physical mapping. + */ +struct pe_info { + u32 dmem_base_addr; /* PE's dmem base address */ + u32 pmem_base_addr; /* PE's pmem base address */ + u32 pmem_size; /* PE's pmem size */ + + void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register + * address + */ + void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register + * address + */ + void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register + * address + */ +}; + +void pe_lmem_read(u32 *dst, u32 len, u32 offset); +void pe_lmem_write(u32 *src, u32 len, u32 offset); + +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len); +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len); + +u32 pe_pmem_read(int id, u32 addr, u8 size); + +void pe_dmem_write(int id, u32 val, u32 addr, u8 size); +u32 pe_dmem_read(int id, u32 addr, u8 size); +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len); +void class_pe_lmem_memset(u32 dst, int val, unsigned int len); +void class_bus_write(u32 val, u32 addr, u8 size); +u32 class_bus_read(u32 addr, u8 size); + +#define class_bus_readl(addr) class_bus_read(addr, 4) +#define class_bus_readw(addr) class_bus_read(addr, 2) +#define class_bus_readb(addr) class_bus_read(addr, 1) + +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4) +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2) +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1) + +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4) +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2) +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1) + +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4) +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2) +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1) + +/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */ +//int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr, +// struct device *dev); + +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, + unsigned int ddr_size); +void bmu_init(void *base, struct BMU_CFG *cfg); +void bmu_reset(void *base); +void bmu_enable(void *base); +void bmu_disable(void *base); +void bmu_set_config(void *base, struct BMU_CFG *cfg); + +/* + * An enumerated type for loopback values. This can be one of three values, no + * loopback -normal operation, local loopback with internal loopback module of + * MAC or PHY loopback which is through the external PHY. + */ +#ifndef __MAC_LOOP_ENUM__ +#define __MAC_LOOP_ENUM__ +enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL}; +#endif + +void gemac_init(void *base, void *config); +void gemac_disable_rx_checksum_offload(void *base); +void gemac_enable_rx_checksum_offload(void *base); +void gemac_set_mdc_div(void *base, int mdc_div); +void gemac_set_speed(void *base, enum mac_speed gem_speed); +void gemac_set_duplex(void *base, int duplex); +void gemac_set_mode(void *base, int mode); +void gemac_enable(void *base); +void gemac_tx_disable(void *base); +void gemac_tx_enable(void *base); +void gemac_disable(void *base); +void gemac_reset(void *base); +void gemac_set_address(void *base, struct spec_addr *addr); +struct spec_addr gemac_get_address(void *base); +void gemac_set_loop(void *base, enum mac_loop gem_loop); +void gemac_set_laddr1(void *base, struct pfe_mac_addr *address); +void gemac_set_laddr2(void *base, struct pfe_mac_addr *address); +void gemac_set_laddr3(void *base, struct pfe_mac_addr *address); +void gemac_set_laddr4(void *base, struct pfe_mac_addr *address); +void gemac_set_laddrN(void *base, struct pfe_mac_addr *address, + unsigned int entry_index); +void gemac_clear_laddr1(void *base); +void gemac_clear_laddr2(void *base); +void gemac_clear_laddr3(void *base); +void gemac_clear_laddr4(void *base); +void gemac_clear_laddrN(void *base, unsigned int entry_index); +struct pfe_mac_addr gemac_get_hash(void *base); +void gemac_set_hash(void *base, struct pfe_mac_addr *hash); +struct pfe_mac_addr gem_get_laddr1(void *base); +struct pfe_mac_addr gem_get_laddr2(void *base); +struct pfe_mac_addr gem_get_laddr3(void *base); +struct pfe_mac_addr gem_get_laddr4(void *base); +struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index); +void gemac_set_config(void *base, struct gemac_cfg *cfg); +void gemac_allow_broadcast(void *base); +void gemac_no_broadcast(void *base); +void gemac_enable_1536_rx(void *base); +void gemac_disable_1536_rx(void *base); +int gemac_set_rx(void *base, int mtu); +void gemac_enable_rx_jmb(void *base); +void gemac_disable_rx_jmb(void *base); +void gemac_enable_stacked_vlan(void *base); +void gemac_disable_stacked_vlan(void *base); +void gemac_enable_pause_rx(void *base); +void gemac_disable_pause_rx(void *base); +void gemac_enable_pause_tx(void *base); +void gemac_disable_pause_tx(void *base); +void gemac_enable_copy_all(void *base); +void gemac_disable_copy_all(void *base); +void gemac_set_bus_width(void *base, int width); +void gemac_set_wol(void *base, u32 wol_conf); + +void gpi_init(void *base, struct gpi_cfg *cfg); +void gpi_reset(void *base); +void gpi_enable(void *base); +void gpi_disable(void *base); +void gpi_set_config(void *base, struct gpi_cfg *cfg); + +void hif_init(void); +void hif_tx_enable(void); +void hif_tx_disable(void); +void hif_rx_enable(void); +void hif_rx_disable(void); + +/* Get Chip Revision level + * + */ +static inline unsigned int CHIP_REVISION(void) +{ + /*For LS1012A return always 1 */ + return 1; +} + +/* Start HIF rx DMA + * + */ +static inline void hif_rx_dma_start(void) +{ + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL); +} + +/* Start HIF tx DMA + * + */ +static inline void hif_tx_dma_start(void) +{ + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL); +} + + +static inline void *pfe_mem_ptov(phys_addr_t paddr) +{ + return rte_mem_iova2virt(paddr); +} + +static phys_addr_t pfe_mem_vtop(uint64_t vaddr) __rte_unused; + +static inline phys_addr_t pfe_mem_vtop(uint64_t vaddr) +{ + const struct rte_memseg *memseg; + + memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL); + if (memseg) + return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr); + + return (size_t)NULL; +} + +#endif /* _PFE_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/meson.build b/src/spdk/dpdk/drivers/net/pfe/meson.build new file mode 100644 index 000000000..3e1a228a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2019 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['common_dpaax'] + +sources = files('pfe_ethdev.c', + 'pfe_hal.c', + 'pfe_hif_lib.c', + 'pfe_hif.c') + +if cc.has_argument('-Wno-pointer-arith') + cflags += '-Wno-pointer-arith' +endif + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h b/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h new file mode 100644 index 000000000..9820d7bf4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_eth.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_ETH_H_ +#define _PFE_ETH_H_ + +#include +#include +#include + +#define ETH_ALEN 6 +#define GEMAC_NO_PHY BIT(0) + +#define PFE_SOC_ID_FILE "/sys/devices/soc0/soc_id" +extern unsigned int pfe_svr; +#define SVR_LS1012A_REV2 0x87040020 +#define SVR_LS1012A_REV1 0x87040010 + +#define PFE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) +#define MAX_MTU_ON_REV1 1878 +struct ls1012a_eth_platform_data { + /* device specific information */ + u32 device_flags; + char name[16]; + + /* board specific information */ + u32 mii_config; + u32 phy_flags; + u32 gem_id; + u32 bus_id; + u32 phy_id; + u32 mdio_muxval; + u8 mac_addr[ETH_ALEN]; +}; + +struct ls1012a_mdio_platform_data { + int enabled; + int irq[32]; + u32 phy_mask; + int mdc_div; +}; + +struct ls1012a_pfe_platform_data { + struct ls1012a_eth_platform_data ls1012a_eth_pdata[3]; + struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3]; +}; + +#define EMAC_TXQ_CNT 16 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT) + +#define JUMBO_FRAME_SIZE 10258 +#define EMAC_RXQ_CNT 1 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT + +struct pfe_eth_priv_s { + struct pfe *pfe; + struct hif_client_s client; + int low_tmu_q; + int high_tmu_q; + struct rte_eth_dev *ndev; + struct rte_eth_stats stats; + int id; + int promisc; + int link_fd; + + spinlock_t lock; /* protect member variables */ + void *EMAC_baseaddr; + /* This points to the EMAC base from where we access PHY */ + void *PHY_baseaddr; + void *GPI_baseaddr; + + struct ls1012a_eth_platform_data *einfo; +}; + +#endif /* _PFE_ETH_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c b/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c new file mode 100644 index 000000000..b1de866d3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_ethdev.c @@ -0,0 +1,1190 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pfe_logs.h" +#include "pfe_mod.h" + +#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */ +#define PFE_VDEV_GEM_ID_ARG "intf" + +struct pfe_vdev_init_params { + int8_t gem_id; +}; +static struct pfe *g_pfe; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + +/* TODO: make pfe_svr a runtime option. + * Driver should be able to get the SVR + * information from HW. + */ +unsigned int pfe_svr = SVR_LS1012A_REV1; +static void *cbus_emac_base[3]; +static void *cbus_gpi_base[3]; + +int pfe_logtype_pmd; + +/* pfe_gemac_init + */ +static int +pfe_gemac_init(struct pfe_eth_priv_s *priv) +{ + struct gemac_cfg cfg; + + cfg.speed = SPEED_1000M; + cfg.duplex = DUPLEX_FULL; + + gemac_set_config(priv->EMAC_baseaddr, &cfg); + gemac_allow_broadcast(priv->EMAC_baseaddr); + gemac_enable_1536_rx(priv->EMAC_baseaddr); + gemac_enable_stacked_vlan(priv->EMAC_baseaddr); + gemac_enable_pause_rx(priv->EMAC_baseaddr); + gemac_set_bus_width(priv->EMAC_baseaddr, 64); + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); + + return 0; +} + +static void +pfe_soc_version_get(void) +{ + FILE *svr_file = NULL; + unsigned int svr_ver = 0; + + PMD_INIT_FUNC_TRACE(); + + svr_file = fopen(PFE_SOC_ID_FILE, "r"); + if (!svr_file) { + PFE_PMD_ERR("Unable to open SoC device"); + return; /* Not supported on this infra */ + } + + if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) + pfe_svr = svr_ver; + else + PFE_PMD_ERR("Unable to read SoC device"); + + fclose(svr_file); +} + +static int pfe_eth_start(struct pfe_eth_priv_s *priv) +{ + gpi_enable(priv->GPI_baseaddr); + gemac_enable(priv->EMAC_baseaddr); + + return 0; +} + +static void +pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int + __rte_unused from_tx, __rte_unused int n_desc) +{ + struct rte_mbuf *mbuf; + unsigned int flags; + + /* Clean HIF and client queue */ + while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, + tx_q_num, &flags, + HIF_TX_DESC_NT))) { + if (mbuf) { + mbuf->next = NULL; + mbuf->nb_segs = 1; + rte_pktmbuf_free(mbuf); + } + } +} + + +static void +pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) +{ + unsigned int ii; + + for (ii = 0; ii < emac_txq_cnt; ii++) + pfe_eth_flush_txQ(priv, ii, 0, 0); +} + +static int +pfe_eth_event_handler(void *data, int event, __rte_unused int qno) +{ + struct pfe_eth_priv_s *priv = data; + + switch (event) { + case EVENT_TXDONE_IND: + pfe_eth_flush_tx(priv); + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); + break; + case EVENT_HIGH_RX_WM: + default: + break; + } + + return 0; +} + +static uint16_t +pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hif_client_rx_queue *queue = rxq; + struct pfe_eth_priv_s *priv = queue->priv; + struct epoll_event epoll_ev; + uint64_t ticks = 1; /* 1 msec */ + int ret; + int have_something, work_done; + +#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) + + /*TODO can we remove this cleanup from here?*/ + pfe_tx_do_cleanup(priv->pfe); + have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); + work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, + rx_pkts, nb_pkts); + + if (!have_something || !work_done) { + writel(RESET_STATUS, HIF_INT_SRC); + writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); + ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); + if (ret < 0 && errno != EINTR) + PFE_PMD_ERR("epoll_wait fails with %d\n", errno); + } + + return work_done; +} + +static uint16_t +pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hif_client_rx_queue *queue = rxq; + struct pfe_eth_priv_s *priv = queue->priv; + struct rte_mempool *pool; + + /*TODO can we remove this cleanup from here?*/ + pfe_tx_do_cleanup(priv->pfe); + pfe_hif_rx_process(priv->pfe, nb_pkts); + pool = priv->pfe->hif.shm->pool; + + return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); +} + +static uint16_t +pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct hif_client_tx_queue *queue = tx_queue; + struct pfe_eth_priv_s *priv = queue->priv; + struct rte_eth_stats *stats = &priv->stats; + int i; + + for (i = 0; i < nb_pkts; i++) { + if (tx_pkts[i]->nb_segs > 1) { + struct rte_mbuf *mbuf; + int j; + + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), + tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, + tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, + tx_pkts[i]); + + mbuf = tx_pkts[i]->next; + for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(mbuf), + mbuf->buf_addr + mbuf->data_off, + mbuf->data_len, + 0x0, 0x0, mbuf); + mbuf = mbuf->next; + } + + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(mbuf), + mbuf->buf_addr + mbuf->data_off, + mbuf->data_len, + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, + mbuf); + } else { + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), + tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, + tx_pkts[i]->pkt_len, 0 /*ctrl*/, + HIF_FIRST_BUFFER | HIF_LAST_BUFFER | + HIF_DATA_VALID, + tx_pkts[i]); + } + stats->obytes += tx_pkts[i]->pkt_len; + hif_tx_dma_start(); + } + stats->opackets += nb_pkts; + pfe_tx_do_cleanup(priv->pfe); + + return nb_pkts; +} + +static uint16_t +pfe_dummy_xmit_pkts(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +pfe_dummy_recv_pkts(__rte_unused void *rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static int +pfe_eth_open(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct hif_client_s *client; + struct hif_shm *hif_shm; + int rc; + + /* Register client driver with HIF */ + client = &priv->client; + + if (client->pfe) { + hif_shm = client->pfe->hif.shm; + /* TODO please remove the below code of if block, once we add + * the proper cleanup in eth_close + */ + if (!test_bit(PFE_CL_GEM0 + priv->id, + &hif_shm->g_client_status[0])) { + /* Register client driver with HIF */ + memset(client, 0, sizeof(*client)); + client->id = PFE_CL_GEM0 + priv->id; + client->tx_qn = emac_txq_cnt; + client->rx_qn = EMAC_RXQ_CNT; + client->priv = priv; + client->pfe = priv->pfe; + client->port_id = dev->data->port_id; + client->event_handler = pfe_eth_event_handler; + + client->tx_qsize = EMAC_TXQ_DEPTH; + client->rx_qsize = EMAC_RXQ_DEPTH; + + rc = hif_lib_client_register(client); + if (rc) { + PFE_PMD_ERR("hif_lib_client_register(%d)" + " failed", client->id); + goto err0; + } + } else { + /* Freeing the packets if already exists */ + int ret = 0; + struct rte_mbuf *rx_pkts[32]; + /* TODO multiqueue support */ + ret = hif_lib_receive_pkt(&client->rx_q[0], + hif_shm->pool, rx_pkts, 32); + while (ret) { + int i; + for (i = 0; i < ret; i++) + rte_pktmbuf_free(rx_pkts[i]); + ret = hif_lib_receive_pkt(&client->rx_q[0], + hif_shm->pool, + rx_pkts, 32); + } + } + } else { + /* Register client driver with HIF */ + memset(client, 0, sizeof(*client)); + client->id = PFE_CL_GEM0 + priv->id; + client->tx_qn = emac_txq_cnt; + client->rx_qn = EMAC_RXQ_CNT; + client->priv = priv; + client->pfe = priv->pfe; + client->port_id = dev->data->port_id; + client->event_handler = pfe_eth_event_handler; + + client->tx_qsize = EMAC_TXQ_DEPTH; + client->rx_qsize = EMAC_RXQ_DEPTH; + + rc = hif_lib_client_register(client); + if (rc) { + PFE_PMD_ERR("hif_lib_client_register(%d) failed", + client->id); + goto err0; + } + } + rc = pfe_eth_start(priv); + dev->rx_pkt_burst = &pfe_recv_pkts; + dev->tx_pkt_burst = &pfe_xmit_pkts; + /* If no prefetch is configured. */ + if (getenv("PFE_INTR_SUPPORT")) { + dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; + PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); + } + + +err0: + return rc; +} + +static int +pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) +{ + int pfe_cdev_fd; + + if (priv == NULL) + return -1; + + pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); + if (pfe_cdev_fd < 0) { + PFE_PMD_WARN("Unable to open PFE device file (%s).\n", + PFE_CDEV_PATH); + PFE_PMD_WARN("Link status update will not be available.\n"); + priv->link_fd = PFE_CDEV_INVALID_FD; + return -1; + } + + priv->link_fd = pfe_cdev_fd; + + return 0; +} + +static void +pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) +{ + if (priv == NULL) + return; + + if (priv->link_fd != PFE_CDEV_INVALID_FD) { + close(priv->link_fd); + priv->link_fd = PFE_CDEV_INVALID_FD; + } +} + +static void +pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + gemac_disable(priv->EMAC_baseaddr); + gpi_disable(priv->GPI_baseaddr); + + dev->rx_pkt_burst = &pfe_dummy_recv_pkts; + dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; +} + +static void +pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe) +{ + PMD_INIT_FUNC_TRACE(); + + pfe_eth_stop(dev); + /* Close the device file for link status */ + pfe_eth_close_cdev(dev->data->dev_private); + + rte_eth_dev_release_port(dev); + pfe->nb_devs--; +} + +static void +pfe_eth_close(struct rte_eth_dev *dev) +{ + if (!dev) + return; + + if (!g_pfe) + return; + + pfe_eth_exit(dev, g_pfe); + + if (g_pfe->nb_devs == 0) { + pfe_hif_exit(g_pfe); + pfe_hif_lib_exit(g_pfe); + rte_free(g_pfe); + g_pfe = NULL; + } +} + +static int +pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +pfe_eth_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pfe_eth_priv_s *internals = dev->data->dev_private; + + dev_info->if_index = internals->id; + dev_info->max_mac_addrs = PFE_MAX_MACS; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; + dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; + if (pfe_svr == SVR_LS1012A_REV1) { + dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; + dev_info->max_mtu = MAX_MTU_ON_REV1; + } else { + dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; + dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; + } + + return 0; +} + +/* Only first mb_pool given on first call of this API will be used + * in whole system, also nb_rx_desc and rx_conf are unused params + */ +static int +pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + int rc = 0; + struct pfe *pfe; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe = priv->pfe; + + if (queue_idx >= EMAC_RXQ_CNT) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, EMAC_RXQ_CNT); + return -1; + } + + if (!pfe->hif.setuped) { + rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); + if (rc) { + PFE_PMD_ERR("Could not allocate buffer descriptors"); + return -1; + } + + pfe->hif.shm->pool = mb_pool; + if (pfe_hif_init_buffers(&pfe->hif)) { + PFE_PMD_ERR("Could not initialize buffer descriptors"); + return -1; + } + hif_init(); + hif_rx_enable(); + hif_tx_enable(); + pfe->hif.setuped = 1; + } + dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; + priv->client.rx_q[queue_idx].queue_id = queue_idx; + + return 0; +} + +static void +pfe_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +pfe_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +pfe_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + if (queue_idx >= emac_txq_cnt) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, emac_txq_cnt); + return -1; + } + dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; + priv->client.tx_q[queue_idx].queue_id = queue_idx; + return 0; +} + +static const uint32_t * +pfe_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /*todo -= add more types */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP + }; + + if (dev->rx_pkt_burst == pfe_recv_pkts || + dev->rx_pkt_burst == pfe_recv_pkts_on_intr) + return ptypes; + return NULL; +} + +static inline int +pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &dev->data->dev_link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline int +pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &dev->data->dev_link; + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static int +pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + int ret, ioctl_cmd = 0; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct rte_eth_link link, old; + unsigned int lstatus = 1; + + if (dev == NULL) { + PFE_PMD_ERR("Invalid device in link_update.\n"); + return 0; + } + + memset(&old, 0, sizeof(old)); + memset(&link, 0, sizeof(struct rte_eth_link)); + + pfe_eth_atomic_read_link_status(dev, &old); + + /* Read from PFE CDEV, status of link, if file was successfully + * opened. + */ + if (priv->link_fd != PFE_CDEV_INVALID_FD) { + if (priv->id == 0) + ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; + if (priv->id == 1) + ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; + + ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); + if (ret != 0) { + PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); + /* use dummy link value */ + link.link_status = 1; + } + PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", + lstatus, priv->id); + } + + if (old.link_status == lstatus) { + /* no change in status */ + PFE_PMD_DEBUG("No change in link status; Not updating.\n"); + return -1; + } + + link.link_status = lstatus; + link.link_speed = ETH_LINK_SPEED_1G; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; + + pfe_eth_atomic_write_link_status(dev, &link); + + PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, + link.link_status ? "up" : "down"); + + return 0; +} + +static int +pfe_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + priv->promisc = 1; + dev->data->promiscuous = 1; + gemac_enable_copy_all(priv->EMAC_baseaddr); + + return 0; +} + +static int +pfe_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + priv->promisc = 0; + dev->data->promiscuous = 0; + gemac_disable_copy_all(priv->EMAC_baseaddr); + + return 0; +} + +static int +pfe_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct pfe_mac_addr hash_addr; /* hash register structure */ + + /* Set the hash to rx all multicast frames */ + hash_addr.bottom = 0xFFFFFFFF; + hash_addr.top = 0xFFFFFFFF; + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); + dev->data->all_multicast = 1; + + return 0; +} + +static int +pfe_link_down(struct rte_eth_dev *dev) +{ + pfe_eth_stop(dev); + return 0; +} + +static int +pfe_link_up(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe_eth_start(priv); + return 0; +} + +static int +pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + /*TODO Support VLAN*/ + ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size); + if (!ret) + dev->data->mtu = mtu; + + return ret; +} + +/* pfe_eth_enet_addr_byte_mac + */ +static int +pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, + struct pfe_mac_addr *enet_addr) +{ + if (!enet_byte_addr || !enet_addr) { + return -1; + + } else { + enet_addr->bottom = enet_byte_addr[0] | + (enet_byte_addr[1] << 8) | + (enet_byte_addr[2] << 16) | + (enet_byte_addr[3] << 24); + enet_addr->top = enet_byte_addr[4] | + (enet_byte_addr[5] << 8); + return 0; + } +} + +static int +pfe_dev_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct pfe_mac_addr spec_addr; + int ret; + + ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); + if (ret) + return ret; + + gemac_set_laddrN(priv->EMAC_baseaddr, + (struct pfe_mac_addr *)&spec_addr, 1); + rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); + return 0; +} + +static int +pfe_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct rte_eth_stats *eth_stats = &priv->stats; + + if (stats == NULL) + return -1; + + memset(stats, 0, sizeof(struct rte_eth_stats)); + + stats->ipackets = eth_stats->ipackets; + stats->ibytes = eth_stats->ibytes; + stats->opackets = eth_stats->opackets; + stats->obytes = eth_stats->obytes; + + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = pfe_eth_open, + .dev_stop = pfe_eth_stop, + .dev_close = pfe_eth_close, + .dev_configure = pfe_eth_configure, + .dev_infos_get = pfe_eth_info, + .rx_queue_setup = pfe_rx_queue_setup, + .rx_queue_release = pfe_rx_queue_release, + .tx_queue_setup = pfe_tx_queue_setup, + .tx_queue_release = pfe_tx_queue_release, + .dev_supported_ptypes_get = pfe_supported_ptypes_get, + .link_update = pfe_eth_link_update, + .promiscuous_enable = pfe_promiscuous_enable, + .promiscuous_disable = pfe_promiscuous_disable, + .allmulticast_enable = pfe_allmulticast_enable, + .dev_set_link_down = pfe_link_down, + .dev_set_link_up = pfe_link_up, + .mtu_set = pfe_mtu_set, + .mac_addr_set = pfe_dev_set_mac_addr, + .stats_get = pfe_stats_get, +}; + +static int +pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) +{ + struct rte_eth_dev *eth_dev = NULL; + struct pfe_eth_priv_s *priv = NULL; + struct ls1012a_eth_platform_data *einfo; + struct ls1012a_pfe_platform_data *pfe_info; + struct rte_ether_addr addr; + int err; + + eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); + if (eth_dev == NULL) + return -ENOMEM; + + /* Extract pltform data */ + pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data; + if (!pfe_info) { + PFE_PMD_ERR("pfe missing additional platform data"); + err = -ENODEV; + goto err0; + } + + einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata; + + /* einfo never be NULL, but no harm in having this check */ + if (!einfo) { + PFE_PMD_ERR("pfe missing additional gemacs platform data"); + err = -ENODEV; + goto err0; + } + + priv = eth_dev->data->dev_private; + priv->ndev = eth_dev; + priv->id = einfo[id].gem_id; + priv->pfe = pfe; + + pfe->eth.eth_priv[id] = priv; + + /* Set the info in the priv to the current info */ + priv->einfo = &einfo[id]; + priv->EMAC_baseaddr = cbus_emac_base[id]; + priv->PHY_baseaddr = cbus_emac_base[id]; + priv->GPI_baseaddr = cbus_gpi_base[id]; + +#define HIF_GEMAC_TMUQ_BASE 6 + priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2); + priv->high_tmu_q = priv->low_tmu_q + 1; + + rte_spinlock_init(&priv->lock); + + /* Copy the station address into the dev structure, */ + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", + ETHER_ADDR_LEN * PFE_MAX_MACS, 0); + if (eth_dev->data->mac_addrs == NULL) { + PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses", + ETHER_ADDR_LEN * PFE_MAX_MACS); + err = -ENOMEM; + goto err0; + } + + memcpy(addr.addr_bytes, priv->einfo->mac_addr, + ETH_ALEN); + + pfe_dev_set_mac_addr(eth_dev, &addr); + rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); + + eth_dev->data->mtu = 1500; + eth_dev->dev_ops = &ops; + pfe_eth_stop(eth_dev); + pfe_gemac_init(priv); + + eth_dev->data->nb_rx_queues = 1; + eth_dev->data->nb_tx_queues = 1; + + /* For link status, open the PFE CDEV; Error from this function + * is silently ignored; In case of error, the link status will not + * be available. + */ + pfe_eth_open_cdev(priv); + rte_eth_dev_probing_finish(eth_dev); + + return 0; +err0: + rte_eth_dev_release_port(eth_dev); + return err; +} + +static int +pfe_get_gemac_if_proprties(struct pfe *pfe, + __rte_unused const struct device_node *parent, + unsigned int port, unsigned int if_cnt, + struct ls1012a_pfe_platform_data *pdata) +{ + const struct device_node *gem = NULL; + size_t size; + unsigned int ii = 0, phy_id = 0; + const u32 *addr; + const void *mac_addr; + + for (ii = 0; ii < if_cnt; ii++) { + gem = of_get_next_child(parent, gem); + if (!gem) + goto err; + addr = of_get_property(gem, "reg", &size); + if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port)) + break; + } + + if (ii >= if_cnt) { + PFE_PMD_ERR("Failed to find interface = %d", if_cnt); + goto err; + } + + pdata->ls1012a_eth_pdata[port].gem_id = port; + + mac_addr = of_get_mac_address(gem); + + if (mac_addr) { + memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr, + ETH_ALEN); + } + + addr = of_get_property(gem, "fsl,mdio-mux-val", &size); + if (!addr) { + PFE_PMD_ERR("Invalid mdio-mux-val...."); + } else { + phy_id = rte_be_to_cpu_32((unsigned int)*addr); + pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id; + } + if (pdata->ls1012a_eth_pdata[port].phy_id < 32) + pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] = + pdata->ls1012a_eth_pdata[port].mdio_muxval; + + return 0; + +err: + return -1; +} + +/* Parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int i; + char *end; + errno = 0; + + i = strtol(value, &end, 10); + if (*end != 0 || errno != 0 || i < 0 || i > 1) { + PFE_PMD_ERR("Supported Port IDS are 0 and 1"); + return -EINVAL; + } + + *((uint32_t *)extra_args) = i; + + return 0; +} + +static int +pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params, + struct rte_vdev_device *dev) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + static const char * const pfe_vdev_valid_params[] = { + PFE_VDEV_GEM_ID_ARG, + NULL + }; + + const char *input_args = rte_vdev_device_args(dev); + + if (!input_args) + return -1; + + kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + PFE_VDEV_GEM_ID_ARG, + &parse_integer_arg, + ¶ms->gem_id); + rte_kvargs_free(kvlist); + return ret; +} + +static int +pmd_pfe_probe(struct rte_vdev_device *vdev) +{ + const u32 *prop; + const struct device_node *np; + const char *name; + const uint32_t *addr; + uint64_t cbus_addr, ddr_size, cbus_size; + int rc = -1, fd = -1, gem_id; + unsigned int ii, interface_count = 0; + size_t size = 0; + struct pfe_vdev_init_params init_params = { + .gem_id = -1 + }; + + name = rte_vdev_device_name(vdev); + rc = pfe_parse_vdev_init_params(&init_params, vdev); + if (rc < 0) + return -EINVAL; + + PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d", + name, init_params.gem_id); + + if (g_pfe) { + if (g_pfe->nb_devs >= g_pfe->max_intf) { + PFE_PMD_ERR("PFE %d dev already created Max is %d", + g_pfe->nb_devs, g_pfe->max_intf); + return -EINVAL; + } + goto eth_init; + } + + g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE); + if (g_pfe == NULL) + return -EINVAL; + + /* Load the device-tree driver */ + rc = of_init(); + if (rc) { + PFE_PMD_ERR("of_init failed with ret: %d", rc); + goto err; + } + + np = of_find_compatible_node(NULL, NULL, "fsl,pfe"); + if (!np) { + PFE_PMD_ERR("Invalid device node"); + rc = -EINVAL; + goto err; + } + + addr = of_get_address(np, 0, &cbus_size, NULL); + if (!addr) { + PFE_PMD_ERR("of_get_address cannot return qman address\n"); + goto err; + } + cbus_addr = of_translate_address(np, addr); + if (!cbus_addr) { + PFE_PMD_ERR("of_translate_address failed\n"); + goto err; + } + + addr = of_get_address(np, 1, &ddr_size, NULL); + if (!addr) { + PFE_PMD_ERR("of_get_address cannot return qman address\n"); + goto err; + } + + g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); + if (!g_pfe->ddr_phys_baseaddr) { + PFE_PMD_ERR("of_translate_address failed\n"); + goto err; + } + + g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr); + g_pfe->ddr_size = ddr_size; + g_pfe->cbus_size = cbus_size; + + fd = open("/dev/mem", O_RDWR); + g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, cbus_addr); + close(fd); + if (g_pfe->cbus_baseaddr == MAP_FAILED) { + PFE_PMD_ERR("Can not map cbus base"); + rc = -EINVAL; + goto err; + } + + /* Read interface count */ + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); + if (!prop) { + PFE_PMD_ERR("Failed to read number of interfaces"); + rc = -ENXIO; + goto err_prop; + } + + interface_count = rte_be_to_cpu_32((unsigned int)*prop); + if (interface_count <= 0) { + PFE_PMD_ERR("No ethernet interface count : %d", + interface_count); + rc = -ENXIO; + goto err_prop; + } + PFE_PMD_INFO("num interfaces = %d ", interface_count); + + g_pfe->max_intf = interface_count; + g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff; + + for (ii = 0; ii < interface_count; ii++) { + pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count, + &g_pfe->platform_data); + } + + pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr, + g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size); + + PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION)); + PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION)); + + PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION)); + PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION)); + + PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION)); + PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION)); + PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION)); + + PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION)); + PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION)); + + cbus_emac_base[0] = EMAC1_BASE_ADDR; + cbus_emac_base[1] = EMAC2_BASE_ADDR; + + cbus_gpi_base[0] = EGPI1_BASE_ADDR; + cbus_gpi_base[1] = EGPI2_BASE_ADDR; + + rc = pfe_hif_lib_init(g_pfe); + if (rc < 0) + goto err_hif_lib; + + rc = pfe_hif_init(g_pfe); + if (rc < 0) + goto err_hif; + pfe_soc_version_get(); +eth_init: + if (init_params.gem_id < 0) + gem_id = g_pfe->nb_devs; + else + gem_id = init_params.gem_id; + + PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)", + name, gem_id, init_params.gem_id); + + rc = pfe_eth_init(vdev, g_pfe, gem_id); + if (rc < 0) + goto err_eth; + else + g_pfe->nb_devs++; + + return 0; + +err_eth: + pfe_hif_exit(g_pfe); + +err_hif: + pfe_hif_lib_exit(g_pfe); + +err_hif_lib: +err_prop: + munmap(g_pfe->cbus_baseaddr, cbus_size); +err: + rte_free(g_pfe); + return rc; +} + +static int +pmd_pfe_remove(struct rte_vdev_device *vdev) +{ + const char *name; + struct rte_eth_dev *eth_dev = NULL; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + PFE_PMD_INFO("Closing eventdev sw device %s", name); + + if (!g_pfe) + return 0; + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + pfe_eth_exit(eth_dev, g_pfe); + munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size); + + if (g_pfe->nb_devs == 0) { + pfe_hif_exit(g_pfe); + pfe_hif_lib_exit(g_pfe); + rte_free(g_pfe); + g_pfe = NULL; + } + return 0; +} + +static +struct rte_vdev_driver pmd_pfe_drv = { + .probe = pmd_pfe_probe, + .remove = pmd_pfe_remove, +}; + +RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv); +RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "= "); + +RTE_INIT(pfe_pmd_init_log) +{ + pfe_logtype_pmd = rte_log_register("pmd.net.pfe"); + if (pfe_logtype_pmd >= 0) + rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c new file mode 100644 index 000000000..0d25ec052 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hal.c @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#include "pfe_logs.h" +#include "pfe_mod.h" + +#define PFE_MTU_RESET_MASK 0xC000FFFF + +void *cbus_base_addr; +void *ddr_base_addr; +unsigned long ddr_phys_base_addr; +unsigned int ddr_size; +static struct pe_info pe[MAX_PE]; + +/* Initializes the PFE library. + * Must be called before using any of the library functions. + * + * @param[in] cbus_base CBUS virtual base address (as mapped in + * the host CPU address space) + * @param[in] ddr_base PFE DDR range virtual base address (as + * mapped in the host CPU address space) + * @param[in] ddr_phys_base PFE DDR range physical base address (as + * mapped in platform) + * @param[in] size PFE DDR range size (as defined by the host + * software) + */ +void +pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, + unsigned int size) +{ + cbus_base_addr = cbus_base; + ddr_base_addr = ddr_base; + ddr_phys_base_addr = ddr_phys_base; + ddr_size = size; + + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0); + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0); + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1); + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1); + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2); + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2); + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3); + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3); + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4); + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4); + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5); + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5); + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE; + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA; + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR; + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA; + + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0); + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0); + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE; + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; + + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1); + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1); + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE; + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; + + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3); + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3); + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE; + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA; + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR; + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA; + +#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED) + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR; + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA; + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR; + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA; +#endif +} + +/**************************** MTIP GEMAC ***************************/ + +/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP, + * TCP or UDP checksums are discarded + * + * @param[in] base GEMAC base address. + */ +void +gemac_enable_rx_checksum_offload(__rte_unused void *base) +{ + /*Do not find configuration to do this */ +} + +/* Disable Rx Checksum Engine. + * + * @param[in] base GEMAC base address. + */ +void +gemac_disable_rx_checksum_offload(__rte_unused void *base) +{ + /*Do not find configuration to do this */ +} + +/* GEMAC set speed. + * @param[in] base GEMAC base address + * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps) + */ +void +gemac_set_speed(void *base, enum mac_speed gem_speed) +{ + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED; + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T; + + switch (gem_speed) { + case SPEED_10M: + rcr |= EMAC_RCNTRL_RMII_10T; + break; + + case SPEED_1000M: + ecr |= EMAC_ECNTRL_SPEED; + break; + + case SPEED_100M: + default: + /*It is in 100M mode */ + break; + } + writel(ecr, (base + EMAC_ECNTRL_REG)); + writel(rcr, (base + EMAC_RCNTRL_REG)); +} + +/* GEMAC set duplex. + * @param[in] base GEMAC base address + * @param[in] duplex GEMAC duplex mode (Full, Half) + */ +void +gemac_set_duplex(void *base, int duplex) +{ + if (duplex == DUPLEX_HALF) { + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base + + EMAC_TCNTRL_REG); + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base + + EMAC_RCNTRL_REG)); + } else { + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base + + EMAC_TCNTRL_REG); + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base + + EMAC_RCNTRL_REG)); + } +} + +/* GEMAC set mode. + * @param[in] base GEMAC base address + * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII) + */ +void +gemac_set_mode(void *base, __rte_unused int mode) +{ + u32 val = readl(base + EMAC_RCNTRL_REG); + + /*Remove loopbank*/ + val &= ~EMAC_RCNTRL_LOOP; + + /*Enable flow control and MII mode*/ + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD); + + writel(val, base + EMAC_RCNTRL_REG); +} + +/* GEMAC enable function. + * @param[in] base GEMAC base address + */ +void +gemac_enable(void *base) +{ + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base + + EMAC_ECNTRL_REG); +} + +/* GEMAC disable function. + * @param[in] base GEMAC base address + */ +void +gemac_disable(void *base) +{ + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base + + EMAC_ECNTRL_REG); +} + +/* GEMAC TX disable function. + * @param[in] base GEMAC base address + */ +void +gemac_tx_disable(void *base) +{ + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base + + EMAC_TCNTRL_REG); +} + +void +gemac_tx_enable(void *base) +{ + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base + + EMAC_TCNTRL_REG); +} + +/* Sets the hash register of the MAC. + * This register is used for matching unicast and multicast frames. + * + * @param[in] base GEMAC base address. + * @param[in] hash 64-bit hash to be configured. + */ +void +gemac_set_hash(void *base, struct pfe_mac_addr *hash) +{ + writel(hash->bottom, base + EMAC_GALR); + writel(hash->top, base + EMAC_GAUR); +} + +void +gemac_set_laddrN(void *base, struct pfe_mac_addr *address, + unsigned int entry_index) +{ + if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX) + return; + + entry_index = entry_index - 1; + if (entry_index < 1) { + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW); + writel((htonl(address->top) | 0x8808), base + + EMAC_PHY_ADDR_HIGH); + } else { + writel(htonl(address->bottom), base + ((entry_index - 1) * 8) + + EMAC_SMAC_0_0); + writel((htonl(address->top) | 0x8808), base + ((entry_index - + 1) * 8) + EMAC_SMAC_0_1); + } +} + +void +gemac_clear_laddrN(void *base, unsigned int entry_index) +{ + if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX) + return; + + entry_index = entry_index - 1; + if (entry_index < 1) { + writel(0, base + EMAC_PHY_ADDR_LOW); + writel(0, base + EMAC_PHY_ADDR_HIGH); + } else { + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0); + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1); + } +} + +/* Set the loopback mode of the MAC. This can be either no loopback for + * normal operation, local loopback through MAC internal loopback module or PHY + * loopback for external loopback through a PHY. This asserts the external + * loop pin. + * + * @param[in] base GEMAC base address. + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC + * Loopback, + * LB_EXT - PHY Loopback. + */ +void +gemac_set_loop(void *base, __rte_unused enum mac_loop gem_loop) +{ + pr_info("%s()\n", __func__); + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base + + EMAC_RCNTRL_REG)); +} + +/* GEMAC allow frames + * @param[in] base GEMAC base address + */ +void +gemac_enable_copy_all(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base + + EMAC_RCNTRL_REG)); +} + +/* GEMAC do not allow frames + * @param[in] base GEMAC base address + */ +void +gemac_disable_copy_all(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base + + EMAC_RCNTRL_REG)); +} + +/* GEMAC allow broadcast function. + * @param[in] base GEMAC base address + */ +void +gemac_allow_broadcast(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base + + EMAC_RCNTRL_REG); +} + +/* GEMAC no broadcast function. + * @param[in] base GEMAC base address + */ +void +gemac_no_broadcast(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base + + EMAC_RCNTRL_REG); +} + +/* GEMAC enable 1536 rx function. + * @param[in] base GEMAC base address + */ +void +gemac_enable_1536_rx(void *base) +{ + /* Set 1536 as Maximum frame length */ + writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK) + | (1536 << 16), + base + EMAC_RCNTRL_REG); +} + +/* GEMAC set Max rx function. + * @param[in] base GEMAC base address + */ +int +gemac_set_rx(void *base, int mtu) +{ + if (mtu < HIF_RX_PKT_MIN_SIZE || mtu > JUMBO_FRAME_SIZE) { + PFE_PMD_ERR("Invalid or not support MTU size"); + return -1; + } + + if (pfe_svr == SVR_LS1012A_REV1 && + mtu > (MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD)) { + PFE_PMD_ERR("Max supported MTU on Rev1 is %d", MAX_MTU_ON_REV1); + return -1; + } + + writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK) + | (mtu << 16), + base + EMAC_RCNTRL_REG); + return 0; +} + +/* GEMAC enable jumbo function. + * @param[in] base GEMAC base address + */ +void +gemac_enable_rx_jmb(void *base) +{ + if (pfe_svr == SVR_LS1012A_REV1) { + PFE_PMD_ERR("Jumbo not supported on Rev1"); + return; + } + + writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK) | + (JUMBO_FRAME_SIZE << 16), base + EMAC_RCNTRL_REG); +} + +/* GEMAC enable stacked vlan function. + * @param[in] base GEMAC base address + */ +void +gemac_enable_stacked_vlan(__rte_unused void *base) +{ + /* MTIP doesn't support stacked vlan */ +} + +/* GEMAC enable pause rx function. + * @param[in] base GEMAC base address + */ +void +gemac_enable_pause_rx(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE, + base + EMAC_RCNTRL_REG); +} + +/* GEMAC disable pause rx function. + * @param[in] base GEMAC base address + */ +void +gemac_disable_pause_rx(void *base) +{ + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE, + base + EMAC_RCNTRL_REG); +} + +/* GEMAC enable pause tx function. + * @param[in] base GEMAC base address + */ +void +gemac_enable_pause_tx(void *base) +{ + writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY); +} + +/* GEMAC disable pause tx function. + * @param[in] base GEMAC base address + */ +void +gemac_disable_pause_tx(void *base) +{ + writel(0x0, base + EMAC_RX_SECTION_EMPTY); +} + +/* GEMAC wol configuration + * @param[in] base GEMAC base address + * @param[in] wol_conf WoL register configuration + */ +void +gemac_set_wol(void *base, u32 wol_conf) +{ + u32 val = readl(base + EMAC_ECNTRL_REG); + + if (wol_conf) + val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP); + else + val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP); + writel(val, base + EMAC_ECNTRL_REG); +} + +/* Sets Gemac bus width to 64bit + * @param[in] base GEMAC base address + * @param[in] width gemac bus width to be set possible values are 32/64/128 + */ +void +gemac_set_bus_width(__rte_unused void *base, __rte_unused int width) +{ +} + +/* Sets Gemac configuration. + * @param[in] base GEMAC base address + * @param[in] cfg GEMAC configuration + */ +void +gemac_set_config(void *base, struct gemac_cfg *cfg) +{ + /*GEMAC config taken from VLSI */ + writel(0x00000004, base + EMAC_TFWR_STR_FWD); + writel(0x00000005, base + EMAC_RX_SECTION_FULL); + + if (pfe_svr == SVR_LS1012A_REV1) + writel(0x00000768, base + EMAC_TRUNC_FL); + else + writel(0x00003fff, base + EMAC_TRUNC_FL); + + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY); + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG); + + gemac_set_mode(base, cfg->mode); + + gemac_set_speed(base, cfg->speed); + + gemac_set_duplex(base, cfg->duplex); +} + +/**************************** GPI ***************************/ + +/* Initializes a GPI block. + * @param[in] base GPI base address + * @param[in] cfg GPI configuration + */ +void +gpi_init(void *base, struct gpi_cfg *cfg) +{ + gpi_reset(base); + + gpi_disable(base); + + gpi_set_config(base, cfg); +} + +/* Resets a GPI block. + * @param[in] base GPI base address + */ +void +gpi_reset(void *base) +{ + writel(CORE_SW_RESET, base + GPI_CTRL); +} + +/* Enables a GPI block. + * @param[in] base GPI base address + */ +void +gpi_enable(void *base) +{ + writel(CORE_ENABLE, base + GPI_CTRL); +} + +/* Disables a GPI block. + * @param[in] base GPI base address + */ +void +gpi_disable(void *base) +{ + writel(CORE_DISABLE, base + GPI_CTRL); +} + +/* Sets the configuration of a GPI block. + * @param[in] base GPI base address + * @param[in] cfg GPI configuration + */ +void +gpi_set_config(void *base, struct gpi_cfg *cfg) +{ + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base + + GPI_LMEM_ALLOC_ADDR); + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base + + GPI_LMEM_FREE_ADDR); + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base + + GPI_DDR_ALLOC_ADDR); + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base + + GPI_DDR_FREE_ADDR); + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR); + writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET); + writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET); + writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET); + writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET); + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE); + writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE); + + writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) | + GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG); + writel(cfg->tmlf_txthres, base + GPI_TMLF_TX); + writel(cfg->aseq_len, base + GPI_DTX_ASEQ); + writel(1, base + GPI_TOE_CHKSUM_EN); + + if (cfg->mtip_pause_reg) { + writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG); + writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME); + } +} + +/**************************** HIF ***************************/ +/* Initializes HIF copy block. + * + */ +void +hif_init(void) +{ + /*Initialize HIF registers*/ + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE, + HIF_POLL_CTRL); +} + +/* Enable hif tx DMA and interrupt + * + */ +void +hif_tx_enable(void) +{ + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL); + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN), + HIF_INT_ENABLE); +} + +/* Disable hif tx DMA and interrupt + * + */ +void +hif_tx_disable(void) +{ + u32 hif_int; + + writel(0, HIF_TX_CTRL); + + hif_int = readl(HIF_INT_ENABLE); + hif_int &= HIF_TXPKT_INT_EN; + writel(hif_int, HIF_INT_ENABLE); +} + +/* Enable hif rx DMA and interrupt + * + */ +void +hif_rx_enable(void) +{ + hif_rx_dma_start(); + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN), + HIF_INT_ENABLE); +} + +/* Disable hif rx DMA and interrupt + * + */ +void +hif_rx_disable(void) +{ + u32 hif_int; + + writel(0, HIF_RX_CTRL); + + hif_int = readl(HIF_INT_ENABLE); + hif_int &= HIF_RXPKT_INT_EN; + writel(hif_int, HIF_INT_ENABLE); +} diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c new file mode 100644 index 000000000..be5b2ada1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.c @@ -0,0 +1,868 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#include "pfe_logs.h" +#include "pfe_mod.h" +#include +#include +#include + +static int +pfe_hif_alloc_descr(struct pfe_hif *hif) +{ + void *addr; + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) + + HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE); + if (!addr) { + PFE_PMD_ERR("Could not allocate buffer descriptors!"); + err = -ENOMEM; + goto err0; + } + + hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr); + hif->descr_baseaddr_v = addr; + hif->rx_ring_size = HIF_RX_DESC_NT; + hif->tx_ring_size = HIF_TX_DESC_NT; + + return 0; + +err0: + return err; +} + +static void +pfe_hif_free_descr(struct pfe_hif *hif) +{ + PMD_INIT_FUNC_TRACE(); + + rte_free(hif->descr_baseaddr_v); +} + +/* pfe_hif_release_buffers */ +static void +pfe_hif_release_buffers(struct pfe_hif *hif) +{ + struct hif_desc *desc; + uint32_t i = 0; + struct rte_mbuf *mbuf; + struct rte_pktmbuf_pool_private *mb_priv; + + hif->rx_base = hif->descr_baseaddr_v; + + /*Free Rx buffers */ + desc = hif->rx_base; + mb_priv = rte_mempool_get_priv(hif->shm->pool); + for (i = 0; i < hif->rx_ring_size; i++) { + if (readl(&desc->data)) { + if (i < hif->shm->rx_buf_pool_cnt && + !hif->shm->rx_buf_pool[i]) { + mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ + - sizeof(struct rte_mbuf) + - RTE_PKTMBUF_HEADROOM + - mb_priv->mbuf_priv_size; + hif->shm->rx_buf_pool[i] = mbuf; + } + } + writel(0, &desc->data); + writel(0, &desc->status); + writel(0, &desc->ctrl); + desc++; + } +} + +/* + * pfe_hif_init_buffers + * This function initializes the HIF Rx/Tx ring descriptors and + * initialize Rx queue with buffers. + */ +int +pfe_hif_init_buffers(struct pfe_hif *hif) +{ + struct hif_desc *desc, *first_desc_p; + uint32_t i = 0; + + PMD_INIT_FUNC_TRACE(); + + /* Check enough Rx buffers available in the shared memory */ + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size) + return -ENOMEM; + + hif->rx_base = hif->descr_baseaddr_v; + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc)); + + /*Initialize Rx descriptors */ + desc = hif->rx_base; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p; + + for (i = 0; i < hif->rx_ring_size; i++) { + /* Initialize Rx buffers from the shared memory */ + struct rte_mbuf *mbuf = + (struct rte_mbuf *)hif->shm->rx_buf_pool[i]; + + /* PFE mbuf structure is as follow: + * ----------------------------------------------------------+ + * | mbuf | priv | headroom (annotation + PFE data) | data | + * ----------------------------------------------------------+ + * + * As we are expecting additional information like parse + * results, eth id, queue id from PFE block along with data. + * so we have to provide additional memory for each packet to + * HIF rx rings so that PFE block can write its headers. + * so, we are giving the data pointor to HIF rings whose + * calculation is as below: + * mbuf->data_pointor - Required_header_size + * + * We are utilizing the HEADROOM area to receive the PFE + * block headers. On packet reception, HIF driver will use + * PFE headers information based on which it will decide + * the clients and fill the parse results. + * after that application can use/overwrite the HEADROOM area. + */ + hif->rx_buf_vaddr[i] = + (void *)((size_t)mbuf->buf_addr + mbuf->data_off - + PFE_PKT_HEADER_SZ); + hif->rx_buf_addr[i] = + (void *)(size_t)(rte_pktmbuf_iova(mbuf) - + PFE_PKT_HEADER_SZ); + hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; + + hif->shm->rx_buf_pool[i] = NULL; + + writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]), + &desc->data); + writel(0, &desc->status); + + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM + | BD_CTRL_DIR | BD_CTRL_DESC_EN + | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl); + + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + desc++; + } + + /* Overwrite last descriptor to chain it to first one*/ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + + hif->rxtoclean_index = 0; + + /*Initialize Rx buffer descriptor ring base address */ + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR); + + hif->tx_base = hif->rx_base + hif->rx_ring_size; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + + hif->rx_ring_size; + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc)); + + /*Initialize tx descriptors */ + desc = hif->tx_base; + + for (i = 0; i < hif->tx_ring_size; i++) { + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + writel(0, &desc->ctrl); + desc++; + } + + /* Overwrite last descriptor to chain it to first one */ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + hif->txavail = hif->tx_ring_size; + hif->txtosend = 0; + hif->txtoclean = 0; + hif->txtoflush = 0; + + /*Initialize Tx buffer descriptor ring base address */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR); + + return 0; +} + +/* + * pfe_hif_client_register + * + * This function used to register a client driver with the HIF driver. + * + * Return value: + * 0 - on Successful registration + */ +static int +pfe_hif_client_register(struct pfe_hif *hif, u32 client_id, + struct hif_client_shm *client_shm) +{ + struct hif_client *client = &hif->client[client_id]; + u32 i, cnt; + struct rx_queue_desc *rx_qbase; + struct tx_queue_desc *tx_qbase; + struct hif_rx_queue *rx_queue; + struct hif_tx_queue *tx_queue; + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + rte_spinlock_lock(&hif->tx_lock); + + if (test_bit(client_id, &hif->shm->g_client_status[0])) { + PFE_PMD_ERR("client %d already registered", client_id); + err = -1; + goto unlock; + } + + memset(client, 0, sizeof(struct hif_client)); + + /* Initialize client Rx queues baseaddr, size */ + + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl); + /* Check if client is requesting for more queues than supported */ + if (cnt > HIF_CLIENT_QUEUES_MAX) + cnt = HIF_CLIENT_QUEUES_MAX; + + client->rx_qn = cnt; + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase; + for (i = 0; i < cnt; i++) { + rx_queue = &client->rx_q[i]; + rx_queue->base = rx_qbase + i * client_shm->rx_qsize; + rx_queue->size = client_shm->rx_qsize; + rx_queue->write_idx = 0; + } + + /* Initialize client Tx queues baseaddr, size */ + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl); + + /* Check if client is requesting for more queues than supported */ + if (cnt > HIF_CLIENT_QUEUES_MAX) + cnt = HIF_CLIENT_QUEUES_MAX; + + client->tx_qn = cnt; + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase; + for (i = 0; i < cnt; i++) { + tx_queue = &client->tx_q[i]; + tx_queue->base = tx_qbase + i * client_shm->tx_qsize; + tx_queue->size = client_shm->tx_qsize; + tx_queue->ack_idx = 0; + } + + set_bit(client_id, &hif->shm->g_client_status[0]); + +unlock: + rte_spinlock_unlock(&hif->tx_lock); + + return err; +} + +/* + * pfe_hif_client_unregister + * + * This function used to unregister a client from the HIF driver. + * + */ +static void +pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id) +{ + PMD_INIT_FUNC_TRACE(); + + /* + * Mark client as no longer available (which prevents further packet + * receive for this client) + */ + rte_spinlock_lock(&hif->tx_lock); + + if (!test_bit(client_id, &hif->shm->g_client_status[0])) { + PFE_PMD_ERR("client %d not registered", client_id); + + rte_spinlock_unlock(&hif->tx_lock); + return; + } + + clear_bit(client_id, &hif->shm->g_client_status[0]); + + rte_spinlock_unlock(&hif->tx_lock); +} + +/* + * client_put_rxpacket- + */ +static struct rte_mbuf * +client_put_rxpacket(struct hif_rx_queue *queue, + void *pkt, u32 len, + u32 flags, u32 client_ctrl, + struct rte_mempool *pool, + u32 *rem_len) +{ + struct rx_queue_desc *desc = queue->base + queue->write_idx; + struct rte_mbuf *mbuf = NULL; + + + if (readl(&desc->ctrl) & CL_DESC_OWN) { + mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool)); + if (unlikely(!mbuf)) { + PFE_PMD_WARN("Buffer allocation failure\n"); + return NULL; + } + + desc->data = pkt; + desc->client_ctrl = client_ctrl; + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl); + queue->write_idx = (queue->write_idx + 1) + & (queue->size - 1); + + *rem_len = mbuf->buf_len; + } + + return mbuf; +} + +/* + * pfe_hif_rx_process- + * This function does pfe hif rx queue processing. + * Dequeue packet from Rx queue and send it to corresponding client queue + */ +int +pfe_hif_rx_process(struct pfe *pfe, int budget) +{ + struct hif_desc *desc; + struct hif_hdr *pkt_hdr; + struct __hif_hdr hif_hdr; + void *free_buf; + int rtc, len, rx_processed = 0; + struct __hif_desc local_desc; + int flags = 0, wait_for_last = 0, retry = 0; + unsigned int buf_size = 0; + struct rte_mbuf *mbuf = NULL; + struct pfe_hif *hif = &pfe->hif; + + rte_spinlock_lock(&hif->lock); + + rtc = hif->rxtoclean_index; + + while (rx_processed < budget) { + desc = hif->rx_base + rtc; + + __memcpy12(&local_desc, desc); + + /* ACK pending Rx interrupt */ + if (local_desc.ctrl & BD_CTRL_DESC_EN) { + if (unlikely(wait_for_last)) + continue; + else + break; + } + + len = BD_BUF_LEN(local_desc.ctrl); + pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc]; + + /* Track last HIF header received */ + if (!hif->started) { + hif->started = 1; + + __memcpy8(&hif_hdr, pkt_hdr); + + hif->qno = hif_hdr.hdr.q_num; + hif->client_id = hif_hdr.hdr.client_id; + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) | + hif_hdr.hdr.client_ctrl; + flags = CL_DESC_FIRST; + + } else { + flags = 0; + } + + if (local_desc.ctrl & BD_CTRL_LIFM) { + flags |= CL_DESC_LAST; + wait_for_last = 0; + } else { + wait_for_last = 1; + } + + /* Check for valid client id and still registered */ + if (hif->client_id >= HIF_CLIENTS_MAX || + !(test_bit(hif->client_id, + &hif->shm->g_client_status[0]))) { + PFE_PMD_INFO("packet with invalid client id %d qnum %d", + hif->client_id, hif->qno); + + free_buf = hif->rx_buf_addr[rtc]; + + goto pkt_drop; + } + + /* Check to valid queue number */ + if (hif->client[hif->client_id].rx_qn <= hif->qno) { + PFE_DP_LOG(DEBUG, "packet with invalid queue: %d", + hif->qno); + hif->qno = 0; + } + +retry: + mbuf = + client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno], + (void *)pkt_hdr, len, flags, + hif->client_ctrl, hif->shm->pool, + &buf_size); + + if (unlikely(!mbuf)) { + if (!retry) { + pfe_tx_do_cleanup(pfe); + retry = 1; + goto retry; + } + rx_processed = budget; + + if (flags & CL_DESC_FIRST) + hif->started = 0; + + PFE_DP_LOG(DEBUG, "No buffers"); + break; + } + + retry = 0; + + free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf); + free_buf = free_buf - PFE_PKT_HEADER_SZ; + + /*Fill free buffer in the descriptor */ + hif->rx_buf_addr[rtc] = free_buf; + hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr + + mbuf->data_off - PFE_PKT_HEADER_SZ); + hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM; + +pkt_drop: + writel(DDR_PHYS_TO_PFE(free_buf), &desc->data); + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR | + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])), + &desc->ctrl); + + rtc = (rtc + 1) & (hif->rx_ring_size - 1); + + if (local_desc.ctrl & BD_CTRL_LIFM) { + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) + rx_processed++; + + hif->started = 0; + } + } + + + hif->rxtoclean_index = rtc; + rte_spinlock_unlock(&hif->lock); + + /* we made some progress, re-start rx dma in case it stopped */ + hif_rx_dma_start(); + + return rx_processed; +} + +/* + * client_ack_txpacket- + * This function ack the Tx packet in the give client Tx queue by resetting + * ownership bit in the descriptor. + */ +static int +client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id, + unsigned int q_no) +{ + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no]; + struct tx_queue_desc *desc = queue->base + queue->ack_idx; + + if (readl(&desc->ctrl) & CL_DESC_OWN) { + writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl); + queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1); + + return 0; + + } else { + /*This should not happen */ + PFE_PMD_ERR("%d %d %d %d %d %p %d", + hif->txtosend, hif->txtoclean, hif->txavail, + client_id, q_no, queue, queue->ack_idx); + return 1; + } +} + +static void +__hif_tx_done_process(struct pfe *pfe, int count) +{ + struct hif_desc *desc; + struct hif_desc_sw *desc_sw; + unsigned int ttc, tx_avl; + int pkts_done[HIF_CLIENTS_MAX] = {0, 0}; + struct pfe_hif *hif = &pfe->hif; + + ttc = hif->txtoclean; + tx_avl = hif->txavail; + + while ((tx_avl < hif->tx_ring_size) && count--) { + desc = hif->tx_base + ttc; + + if (readl(&desc->ctrl) & BD_CTRL_DESC_EN) + break; + + desc_sw = &hif->tx_sw_queue[ttc]; + + if (desc_sw->client_id > HIF_CLIENTS_MAX) + PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id); + + pkts_done[desc_sw->client_id]++; + + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no); + + ttc = (ttc + 1) & (hif->tx_ring_size - 1); + tx_avl++; + } + + if (pkts_done[0]) + hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND, + 0); + if (pkts_done[1]) + hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND, + 0); + hif->txtoclean = ttc; + hif->txavail = tx_avl; +} + +static inline void +hif_tx_done_process(struct pfe *pfe, int count) +{ + struct pfe_hif *hif = &pfe->hif; + rte_spinlock_lock(&hif->tx_lock); + __hif_tx_done_process(pfe, count); + rte_spinlock_unlock(&hif->tx_lock); +} + +void +pfe_tx_do_cleanup(struct pfe *pfe) +{ + hif_tx_done_process(pfe, HIF_TX_DESC_NT); +} + +/* + * __hif_xmit_pkt - + * This function puts one packet in the HIF Tx queue + */ +void +hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int + q_no, void *data, u32 len, unsigned int flags) +{ + struct hif_desc *desc; + struct hif_desc_sw *desc_sw; + + desc = hif->tx_base + hif->txtosend; + desc_sw = &hif->tx_sw_queue[hif->txtosend]; + + desc_sw->len = len; + desc_sw->client_id = client_id; + desc_sw->q_no = q_no; + desc_sw->flags = flags; + + writel((u32)DDR_PHYS_TO_PFE(data), &desc->data); + + hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1); + hif->txavail--; + + if ((!((flags & HIF_DATA_VALID) && (flags & + HIF_LAST_BUFFER)))) + goto skip_tx; + + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + + do { + desc_sw = &hif->tx_sw_queue[hif->txtoflush]; + desc = hif->tx_base + hif->txtoflush; + + if (desc_sw->flags & HIF_LAST_BUFFER) { + writel((BD_CTRL_LIFM | + BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE + | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN | + BD_BUF_LEN(desc_sw->len)), + &desc->ctrl); + } else { + writel((BD_CTRL_DESC_EN | + BD_BUF_LEN(desc_sw->len)), &desc->ctrl); + } + hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1); + } + while (hif->txtoflush != hif->txtosend) + ; + +skip_tx: + return; +} + +void +hif_process_client_req(struct pfe_hif *hif, int req, + int data1, __rte_unused int data2) +{ + unsigned int client_id = data1; + + if (client_id >= HIF_CLIENTS_MAX) { + PFE_PMD_ERR("client id %d out of bounds", client_id); + return; + } + + switch (req) { + case REQUEST_CL_REGISTER: + /* Request for register a client */ + PFE_PMD_INFO("register client_id %d", client_id); + pfe_hif_client_register(hif, client_id, (struct + hif_client_shm *)&hif->shm->client[client_id]); + break; + + case REQUEST_CL_UNREGISTER: + PFE_PMD_INFO("unregister client_id %d", client_id); + + /* Request for unregister a client */ + pfe_hif_client_unregister(hif, client_id); + + break; + + default: + PFE_PMD_ERR("unsupported request %d", req); + break; + } + + /* + * Process client Tx queues + * Currently we don't have checking for tx pending + */ +} + +#if defined(LS1012A_PFE_RESET_WA) +static void +pfe_hif_disable_rx_desc(struct pfe_hif *hif) +{ + u32 ii; + struct hif_desc *desc = hif->rx_base; + + /*Mark all descriptors as LAST_BD */ + for (ii = 0; ii < hif->rx_ring_size; ii++) { + desc->ctrl |= BD_CTRL_LAST_BD; + desc++; + } +} + +struct class_rx_hdr_t { + u32 next_ptr; /* ptr to the start of the first DDR buffer */ + u16 length; /* total packet length */ + u16 phyno; /* input physical port number */ + u32 status; /* gemac status bits */ + u32 status2; /* reserved for software usage */ +}; + +/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled) + * except overflow + */ +#define STATUS_BAD_FRAME_ERR BIT(16) +#define STATUS_LENGTH_ERR BIT(17) +#define STATUS_CRC_ERR BIT(18) +#define STATUS_TOO_SHORT_ERR BIT(19) +#define STATUS_TOO_LONG_ERR BIT(20) +#define STATUS_CODE_ERR BIT(21) +#define STATUS_MC_HASH_MATCH BIT(22) +#define STATUS_CUMULATIVE_ARC_HIT BIT(23) +#define STATUS_UNICAST_HASH_MATCH BIT(24) +#define STATUS_IP_CHECKSUM_CORRECT BIT(25) +#define STATUS_TCP_CHECKSUM_CORRECT BIT(26) +#define STATUS_UDP_CHECKSUM_CORRECT BIT(27) +#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */ +#define MIN_PKT_SIZE 64 +#define DUMMY_PKT_COUNT 128 + +static inline void +copy_to_lmem(u32 *dst, u32 *src, int len) +{ + int i; + + for (i = 0; i < len; i += sizeof(u32)) { + *dst = htonl(*src); + dst++; src++; + } +} +#if defined(RTE_TOOLCHAIN_GCC) +__attribute__ ((optimize(1))) +#endif +static void +send_dummy_pkt_to_hif(void) +{ + void *lmem_ptr, *ddr_ptr, *lmem_virt_addr; + u64 physaddr; + struct class_rx_hdr_t local_hdr; + static u32 dummy_pkt[] = { + 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608, + 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0, + 0x33221100, 0xa8c05544, 0x00000301, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f }; + + ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL); + if (!ddr_ptr) + return; + + lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL); + if (!lmem_ptr) + return; + + PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr); + physaddr = DDR_VIRT_TO_PFE(ddr_ptr); + + lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr); + + local_hdr.phyno = htons(0); /* RX_PHY_0 */ + local_hdr.length = htons(MIN_PKT_SIZE); + + local_hdr.next_ptr = htonl((u32)physaddr); + /*Mark checksum is correct */ + local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT | + STATUS_UDP_CHECKSUM_CORRECT | + STATUS_TCP_CHECKSUM_CORRECT | + STATUS_UNICAST_HASH_MATCH | + STATUS_CUMULATIVE_ARC_HIT)); + copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr, + sizeof(local_hdr)); + + copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt, + 0x40); + + writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR); +} + +void +pfe_hif_rx_idle(struct pfe_hif *hif) +{ + int hif_stop_loop = DUMMY_PKT_COUNT; + u32 rx_status; + + pfe_hif_disable_rx_desc(hif); + PFE_PMD_INFO("Bringing hif to idle state..."); + writel(0, HIF_INT_ENABLE); + /*If HIF Rx BDP is busy send a dummy packet */ + do { + rx_status = readl(HIF_RX_STATUS); + if (rx_status & BDP_CSR_RX_DMA_ACTV) + send_dummy_pkt_to_hif(); + + sleep(1); + } while (--hif_stop_loop); + + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV) + PFE_PMD_ERR("Failed\n"); + else + PFE_PMD_INFO("Done\n"); +} +#endif + +/* + * pfe_hif_init + * This function initializes the baseaddresses and irq, etc. + */ +int +pfe_hif_init(struct pfe *pfe) +{ + struct pfe_hif *hif = &pfe->hif; + int err; + + PMD_INIT_FUNC_TRACE(); + +#if defined(LS1012A_PFE_RESET_WA) + pfe_hif_rx_idle(hif); +#endif + + err = pfe_hif_alloc_descr(hif); + if (err) + goto err0; + + rte_spinlock_init(&hif->tx_lock); + rte_spinlock_init(&hif->lock); + + gpi_enable(HGPI_BASE_ADDR); + if (getenv("PFE_INTR_SUPPORT")) { + struct epoll_event epoll_ev; + int event_fd = -1, epoll_fd, pfe_cdev_fd; + + pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR); + if (pfe_cdev_fd < 0) { + PFE_PMD_WARN("Unable to open PFE device file (%s).\n", + PFE_CDEV_PATH); + pfe->cdev_fd = PFE_CDEV_INVALID_FD; + return -1; + } + pfe->cdev_fd = pfe_cdev_fd; + + event_fd = eventfd(0, EFD_NONBLOCK); + /* hif interrupt enable */ + err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd); + if (err) { + PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n", + errno); + goto err0; + } + epoll_fd = epoll_create(1); + epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET; + epoll_ev.data.fd = event_fd; + err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev); + if (err < 0) { + PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno); + goto err0; + } + pfe->hif.epoll_fd = epoll_fd; + } + return 0; +err0: + return err; +} + +/* pfe_hif_exit- */ +void +pfe_hif_exit(struct pfe *pfe) +{ + struct pfe_hif *hif = &pfe->hif; + + PMD_INIT_FUNC_TRACE(); + + rte_spinlock_lock(&hif->lock); + hif->shm->g_client_status[0] = 0; + /* Make sure all clients are disabled*/ + hif->shm->g_client_status[1] = 0; + + rte_spinlock_unlock(&hif->lock); + + if (hif->setuped) { +#if defined(LS1012A_PFE_RESET_WA) + pfe_hif_rx_idle(hif); +#endif + /*Disable Rx/Tx */ + hif_rx_disable(); + hif_tx_disable(); + + pfe_hif_release_buffers(hif); + pfe_hif_shm_clean(hif->shm); + + pfe_hif_free_descr(hif); + pfe->hif.setuped = 0; + } + gpi_disable(HGPI_BASE_ADDR); +} diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h new file mode 100644 index 000000000..6aaf904bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_HIF_H_ +#define _PFE_HIF_H_ + +#define HIF_CLIENT_QUEUES_MAX 16 +#define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE +/* + * HIF_TX_DESC_NT value should be always greter than 4, + * Otherwise HIF_TX_POLL_MARK will become zero. + */ +#define HIF_RX_DESC_NT 64 +#define HIF_TX_DESC_NT 2048 + +#define HIF_FIRST_BUFFER BIT(0) +#define HIF_LAST_BUFFER BIT(1) +#define HIF_DONT_DMA_MAP BIT(2) +#define HIF_DATA_VALID BIT(3) +#define HIF_TSO BIT(4) + +enum { + PFE_CL_GEM0 = 0, + PFE_CL_GEM1, + HIF_CLIENTS_MAX +}; + +/*structure to store client queue info */ +struct hif_rx_queue { + struct rx_queue_desc *base; + u32 size; + u32 write_idx; +}; + +struct hif_tx_queue { + struct tx_queue_desc *base; + u32 size; + u32 ack_idx; +}; + +/*Structure to store the client info */ +struct hif_client { + unsigned int rx_qn; + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; + unsigned int tx_qn; + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; +}; + +/*HIF hardware buffer descriptor */ +struct hif_desc { + u32 ctrl; + u32 status; + u32 data; + u32 next; +}; + +struct __hif_desc { + u32 ctrl; + u32 status; + u32 data; +}; + +struct hif_desc_sw { + dma_addr_t data; + u16 len; + u8 client_id; + u8 q_no; + u16 flags; +}; + +struct hif_hdr { + u8 client_id; + u8 q_num; + u16 client_ctrl; + u16 client_ctrl1; +}; + +struct __hif_hdr { + union { + struct hif_hdr hdr; + u32 word[2]; + }; +}; + +struct hif_ipsec_hdr { + u16 sa_handle[2]; +} __packed; + +struct pfe_parse { + unsigned int packet_type; + uint16_t hash; + uint16_t parse_incomplete; + unsigned long long ol_flags; +}; + +/* HIF_CTRL_TX... defines */ +#define HIF_CTRL_TX_CHECKSUM BIT(2) + +/* HIF_CTRL_RX... defines */ +#define HIF_CTRL_RX_OFFSET_OFST (24) +#define HIF_CTRL_RX_CHECKSUMMED BIT(2) +#define HIF_CTRL_RX_CONTINUED BIT(1) + +struct pfe_hif { + /* To store registered clients in hif layer */ + struct hif_client client[HIF_CLIENTS_MAX]; + struct hif_shm *shm; + + void *descr_baseaddr_v; + unsigned long descr_baseaddr_p; + + struct hif_desc *rx_base; + u32 rx_ring_size; + u32 rxtoclean_index; + void *rx_buf_addr[HIF_RX_DESC_NT]; + void *rx_buf_vaddr[HIF_RX_DESC_NT]; + int rx_buf_len[HIF_RX_DESC_NT]; + unsigned int qno; + unsigned int client_id; + unsigned int client_ctrl; + unsigned int started; + unsigned int setuped; + + struct hif_desc *tx_base; + u32 tx_ring_size; + u32 txtosend; + u32 txtoclean; + u32 txavail; + u32 txtoflush; + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT]; + int32_t epoll_fd; /**< File descriptor created for interrupt polling */ + +/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */ + rte_spinlock_t tx_lock; +/* lock synchronizes hif rx queue processing */ + rte_spinlock_t lock; + struct rte_device *dev; +}; + +void hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int + q_no, void *data, u32 len, unsigned int flags); +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int + data2); +int pfe_hif_init(struct pfe *pfe); +void pfe_hif_exit(struct pfe *pfe); +void pfe_hif_rx_idle(struct pfe_hif *hif); +int pfe_hif_rx_process(struct pfe *pfe, int budget); +int pfe_hif_init_buffers(struct pfe_hif *hif); +void pfe_tx_do_cleanup(struct pfe *pfe); + +#define __memcpy8(dst, src) memcpy(dst, src, 8) +#define __memcpy12(dst, src) memcpy(dst, src, 12) +#define __memcpy(dst, src, len) memcpy(dst, src, len) + +#endif /* _PFE_HIF_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c new file mode 100644 index 000000000..799050dce --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.c @@ -0,0 +1,576 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#include "pfe_logs.h" +#include "pfe_mod.h" + +unsigned int emac_txq_cnt; + +/* + * @pfe_hal_lib.c + * Common functions used by HIF client drivers + */ + +/*HIF shared memory Global variable */ +struct hif_shm ghif_shm; + +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool. + * This function should be called after pfe_hif_exit + * + * @param[in] hif_shm Shared memory address location in DDR + */ +void +pfe_hif_shm_clean(struct hif_shm *hif_shm) +{ + unsigned int i; + void *pkt; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + pkt = hif_shm->rx_buf_pool[i]; + if (pkt) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + } +} + +/* Initialize shared memory used between HIF driver and clients, + * allocate rx_buffer_pool required for HIF Rx descriptors. + * This function should be called before initializing HIF driver. + * + * @param[in] hif_shm Shared memory address location in DDR + * @rerurn 0 - on succes, <0 on fail to initialize + */ +int +pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool) +{ + unsigned int i; + struct rte_mbuf *mbuf; + + memset(hif_shm, 0, sizeof(struct hif_shm)); + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool)); + if (mbuf) + hif_shm->rx_buf_pool[i] = mbuf; + else + goto err0; + } + + return 0; + +err0: + PFE_PMD_ERR("Low memory"); + pfe_hif_shm_clean(hif_shm); + return -ENOMEM; +} + +/*This function sends indication to HIF driver + * + * @param[in] hif hif context + */ +static void +hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int + data2) +{ + hif_process_client_req(hif, req, data1, data2); +} + +void +hif_lib_indicate_client(struct hif_client_s *client, int event_type, + int qno) +{ + if (!client || event_type >= HIF_EVENT_MAX || + qno >= HIF_CLIENT_QUEUES_MAX) + return; + + if (!test_and_set_bit(qno, &client->queue_mask[event_type])) + client->event_handler(client->priv, event_type, qno); +} + +/*This function releases Rx queue descriptors memory and pre-filled buffers + * + * @param[in] client hif_client context + */ +static void +hif_lib_client_release_rx_buffers(struct hif_client_s *client) +{ + struct rte_mempool *pool; + struct rte_pktmbuf_pool_private *mb_priv; + struct rx_queue_desc *desc; + unsigned int qno, ii; + void *buf; + + pool = client->pfe->hif.shm->pool; + mb_priv = rte_mempool_get_priv(pool); + for (qno = 0; qno < client->rx_qn; qno++) { + desc = client->rx_q[qno].base; + + for (ii = 0; ii < client->rx_q[qno].size; ii++) { + buf = (void *)desc->data; + if (buf) { + /* Data pointor to mbuf pointor calculation: + * "Data - User private data - headroom - mbufsize" + * Actual data pointor given to HIF BDs was + * "mbuf->data_offset - PFE_PKT_HEADER_SZ" + */ + buf = buf + PFE_PKT_HEADER_SZ + - sizeof(struct rte_mbuf) + - RTE_PKTMBUF_HEADROOM + - mb_priv->mbuf_priv_size; + rte_pktmbuf_free((struct rte_mbuf *)buf); + desc->ctrl = 0; + } + desc++; + } + } + rte_free(client->rx_qbase); +} + +/*This function allocates memory for the rxq descriptors and pre-fill rx queues + * with buffers. + * @param[in] client client context + * @param[in] q_size size of the rxQ, all queues are of same size + */ +static int +hif_lib_client_init_rx_buffers(struct hif_client_s *client, + int q_size) +{ + struct rx_queue_desc *desc; + struct hif_client_rx_queue *queue; + unsigned int ii, qno; + + /*Allocate memory for the client queues */ + client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size * + sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE); + if (!client->rx_qbase) + goto err; + + for (qno = 0; qno < client->rx_qn; qno++) { + queue = &client->rx_q[qno]; + + queue->base = client->rx_qbase + qno * q_size * sizeof(struct + rx_queue_desc); + queue->size = q_size; + queue->read_idx = 0; + queue->write_idx = 0; + queue->queue_id = 0; + queue->port_id = client->port_id; + queue->priv = client->priv; + PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno, + queue->base, queue->size); + } + + for (qno = 0; qno < client->rx_qn; qno++) { + queue = &client->rx_q[qno]; + desc = queue->base; + + for (ii = 0; ii < queue->size; ii++) { + desc->ctrl = CL_DESC_OWN; + desc++; + } + } + + return 0; + +err: + return 1; +} + + +static void +hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue) +{ + /* + * Check if there are any pending packets. Client must flush the tx + * queues before unregistering, by calling by calling + * hif_lib_tx_get_next_complete() + * + * Hif no longer calls since we are no longer registered + */ + if (queue->tx_pending) + PFE_PMD_ERR("pending transmit packet"); +} + +static void +hif_lib_client_release_tx_buffers(struct hif_client_s *client) +{ + unsigned int qno; + + for (qno = 0; qno < client->tx_qn; qno++) + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]); + + rte_free(client->tx_qbase); +} + +static int +hif_lib_client_init_tx_buffers(struct hif_client_s *client, int + q_size) +{ + struct hif_client_tx_queue *queue; + unsigned int qno; + + client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size * + sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE); + if (!client->tx_qbase) + return 1; + + for (qno = 0; qno < client->tx_qn; qno++) { + queue = &client->tx_q[qno]; + + queue->base = client->tx_qbase + qno * q_size * sizeof(struct + tx_queue_desc); + queue->size = q_size; + queue->read_idx = 0; + queue->write_idx = 0; + queue->tx_pending = 0; + queue->nocpy_flag = 0; + queue->prev_tmu_tx_pkts = 0; + queue->done_tmu_tx_pkts = 0; + queue->priv = client->priv; + queue->queue_id = 0; + queue->port_id = client->port_id; + + PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno, + queue->base, queue->size); + } + + return 0; +} + +static int +hif_lib_event_dummy(__rte_unused void *priv, + __rte_unused int event_type, __rte_unused int qno) +{ + return 0; +} + +int +hif_lib_client_register(struct hif_client_s *client) +{ + struct hif_shm *hif_shm; + struct hif_client_shm *client_shm; + int err, i; + + PMD_INIT_FUNC_TRACE(); + + /*Allocate memory before spin_lock*/ + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) { + err = -ENOMEM; + goto err_rx; + } + + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) { + err = -ENOMEM; + goto err_tx; + } + + rte_spinlock_lock(&client->pfe->hif.lock); + if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX || + client->pfe->hif_client[client->id]) { + err = -EINVAL; + goto err; + } + + hif_shm = client->pfe->hif.shm; + + if (!client->event_handler) + client->event_handler = hif_lib_event_dummy; + + /*Initialize client specific shared memory */ + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id]; + client_shm->rx_qbase = (unsigned long)client->rx_qbase; + client_shm->rx_qsize = client->rx_qsize; + client_shm->tx_qbase = (unsigned long)client->tx_qbase; + client_shm->tx_qsize = client->tx_qsize; + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) | + (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST); + + for (i = 0; i < HIF_EVENT_MAX; i++) { + client->queue_mask[i] = 0; /* + * By default all events are + * unmasked + */ + } + + /*Indicate to HIF driver*/ + hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER, + client->id, 0); + + PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d", + client, client->id, client->tx_qsize, client->rx_qsize); + + client->cpu_id = -1; + + client->pfe->hif_client[client->id] = client; + rte_spinlock_unlock(&client->pfe->hif.lock); + + return 0; + +err: + rte_spinlock_unlock(&client->pfe->hif.lock); + hif_lib_client_release_tx_buffers(client); + +err_tx: + hif_lib_client_release_rx_buffers(client); + +err_rx: + return err; +} + +int +hif_lib_client_unregister(struct hif_client_s *client) +{ + struct pfe *pfe = client->pfe; + u32 client_id = client->id; + + PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d", + client, client->id, client->tx_qsize, client->rx_qsize); + + rte_spinlock_lock(&pfe->hif.lock); + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0); + + hif_lib_client_release_tx_buffers(client); + hif_lib_client_release_rx_buffers(client); + pfe->hif_client[client_id] = NULL; + rte_spinlock_unlock(&pfe->hif.lock); + + return 0; +} + +int +hif_lib_event_handler_start(struct hif_client_s *client, int event, + int qno) +{ + struct hif_client_rx_queue *queue = &client->rx_q[qno]; + struct rx_queue_desc *desc = queue->base + queue->read_idx; + + if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) { + PFE_PMD_WARN("Unsupported event : %d queue number : %d", + event, qno); + return -1; + } + + test_and_clear_bit(qno, &client->queue_mask[event]); + + switch (event) { + case EVENT_RX_PKT_IND: + if (!(desc->ctrl & CL_DESC_OWN)) + hif_lib_indicate_client(client, + EVENT_RX_PKT_IND, qno); + break; + + case EVENT_HIGH_RX_WM: + case EVENT_TXDONE_IND: + default: + break; + } + + return 0; +} + +#ifdef RTE_LIBRTE_PFE_SW_PARSE +static inline void +pfe_sw_parse_pkt(struct rte_mbuf *mbuf) +{ + struct rte_net_hdr_lens hdr_lens; + + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK + | RTE_PTYPE_L4_MASK); + mbuf->l2_len = hdr_lens.l2_len; + mbuf->l3_len = hdr_lens.l3_len; +} +#endif + +/* + * This function gets one packet from the specified client queue + * It also refill the rx buffer + */ +int +hif_lib_receive_pkt(struct hif_client_rx_queue *queue, + struct rte_mempool *pool, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rx_queue_desc *desc; + struct pfe_eth_priv_s *priv = queue->priv; + struct rte_pktmbuf_pool_private *mb_priv; + struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL; + struct rte_eth_stats *stats = &priv->stats; + int i, wait_for_last = 0; +#ifndef RTE_LIBRTE_PFE_SW_PARSE + struct pfe_parse *parse_res; +#endif + + for (i = 0; i < nb_pkts;) { + do { + desc = queue->base + queue->read_idx; + if ((desc->ctrl & CL_DESC_OWN)) { + stats->ipackets += i; + return i; + } + + mb_priv = rte_mempool_get_priv(pool); + + mbuf = desc->data + PFE_PKT_HEADER_SZ + - sizeof(struct rte_mbuf) + - RTE_PKTMBUF_HEADROOM + - mb_priv->mbuf_priv_size; + mbuf->next = NULL; + if (desc->ctrl & CL_DESC_FIRST) { + /* TODO size of priv data if present in + * descriptor + */ + u16 size = 0; + mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl) + - PFE_PKT_HEADER_SZ - size; + mbuf->data_len = mbuf->pkt_len; + mbuf->port = queue->port_id; +#ifdef RTE_LIBRTE_PFE_SW_PARSE + pfe_sw_parse_pkt(mbuf); +#else + parse_res = (struct pfe_parse *)(desc->data + + PFE_HIF_SIZE); + mbuf->packet_type = parse_res->packet_type; +#endif + mbuf->nb_segs = 1; + first_mbuf = mbuf; + rx_pkts[i++] = first_mbuf; + } else { + mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl); + mbuf->data_off = mbuf->data_off - + PFE_PKT_HEADER_SZ; + first_mbuf->pkt_len += mbuf->data_len; + first_mbuf->nb_segs++; + p_mbuf->next = mbuf; + } + stats->ibytes += mbuf->data_len; + p_mbuf = mbuf; + + if (desc->ctrl & CL_DESC_LAST) + wait_for_last = 0; + else + wait_for_last = 1; + /* + * Needed so we don't free a buffer/page + * twice on module_exit + */ + desc->data = NULL; + + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + + desc->ctrl = CL_DESC_OWN; + queue->read_idx = (queue->read_idx + 1) & + (queue->size - 1); + } while (wait_for_last); + } + stats->ipackets += i; + return i; +} + +static inline void +hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int + client_id, unsigned int qno, + u32 client_ctrl) +{ + /* Optimize the write since the destinaton may be non-cacheable */ + if (!((unsigned long)pkt_hdr & 0x3)) { + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) | + client_id; + } else { + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF); + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF); + } +} + +/*This function puts the given packet in the specific client queue */ +void +hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, + void *data, void *data1, unsigned int len, + u32 client_ctrl, unsigned int flags, void *client_data) +{ + struct hif_client_tx_queue *queue = &client->tx_q[qno]; + struct tx_queue_desc *desc = queue->base + queue->write_idx; + + /* First buffer */ + if (flags & HIF_FIRST_BUFFER) { + data1 -= PFE_HIF_SIZE; + data -= PFE_HIF_SIZE; + len += PFE_HIF_SIZE; + + hif_hdr_write(data1, client->id, qno, client_ctrl); + } + + desc->data = client_data; + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags); + + hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags); + + queue->write_idx = (queue->write_idx + 1) & (queue->size - 1); + + queue->tx_pending++; +} + +void * +hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, + unsigned int *flags, __rte_unused int count) +{ + struct hif_client_tx_queue *queue = &client->tx_q[qno]; + struct tx_queue_desc *desc = queue->base + queue->read_idx; + + PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d", + qno, queue->read_idx, queue->tx_pending); + + if (!queue->tx_pending) + return NULL; + + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) { + u32 tmu_tx_pkts = 0; + + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts) + queue->done_tmu_tx_pkts = UINT_MAX - + queue->prev_tmu_tx_pkts + tmu_tx_pkts; + else + queue->done_tmu_tx_pkts = tmu_tx_pkts - + queue->prev_tmu_tx_pkts; + + queue->prev_tmu_tx_pkts = tmu_tx_pkts; + + if (!queue->done_tmu_tx_pkts) + return NULL; + } + + if (desc->ctrl & CL_DESC_OWN) + return NULL; + + queue->read_idx = (queue->read_idx + 1) & (queue->size - 1); + queue->tx_pending--; + + *flags = CL_DESC_GET_FLAGS(desc->ctrl); + + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER)) + queue->done_tmu_tx_pkts--; + + return desc->data; +} + +int +pfe_hif_lib_init(struct pfe *pfe) +{ + PMD_INIT_FUNC_TRACE(); + + emac_txq_cnt = EMAC_TXQ_CNT; + pfe->hif.shm = &ghif_shm; + + return 0; +} + +void +pfe_hif_lib_exit(__rte_unused struct pfe *pfe) +{ + PMD_INIT_FUNC_TRACE(); +} diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h new file mode 100644 index 000000000..d7c060694 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_hif_lib.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_HIF_LIB_H_ +#define _PFE_HIF_LIB_H_ + +#include "pfe_hif.h" + +#define HIF_CL_REQ_TIMEOUT 10 +#define GFP_DMA_PFE 0 + +enum { + REQUEST_CL_REGISTER = 0, + REQUEST_CL_UNREGISTER, + HIF_REQUEST_MAX +}; + +enum { + /* Event to indicate that client rx queue is reached water mark level */ + EVENT_HIGH_RX_WM = 0, + /* Event to indicate that, packet received for client */ + EVENT_RX_PKT_IND, + /* Event to indicate that, packet tx done for client */ + EVENT_TXDONE_IND, + HIF_EVENT_MAX +}; + +/*structure to store client queue info */ + +/*structure to store client queue info */ +struct hif_client_rx_queue { + struct rx_queue_desc *base; + u32 size; + u32 read_idx; + u32 write_idx; + u16 queue_id; + u16 port_id; + void *priv; +}; + +struct hif_client_tx_queue { + struct tx_queue_desc *base; + u32 size; + u32 read_idx; + u32 write_idx; + u32 tx_pending; + unsigned long jiffies_last_packet; + u32 nocpy_flag; + u32 prev_tmu_tx_pkts; + u32 done_tmu_tx_pkts; + u16 queue_id; + u16 port_id; + void *priv; +}; + +struct hif_client_s { + int id; + unsigned int tx_qn; + unsigned int rx_qn; + void *rx_qbase; + void *tx_qbase; + int tx_qsize; + int rx_qsize; + int cpu_id; + int port_id; + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX]; + int (*event_handler)(void *data, int event, int qno); + unsigned long queue_mask[HIF_EVENT_MAX]; + struct pfe *pfe; + void *priv; +}; + +/* + * Client specific shared memory + * It contains number of Rx/Tx queues, base addresses and queue sizes + */ +struct hif_client_shm { + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */ + unsigned long rx_qbase; /*Rx queue base address */ + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */ + unsigned long tx_qbase; /* Tx queue base address */ + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */ +}; + +/*Client shared memory ctrl bit description */ +#define CLIENT_CTRL_RX_Q_CNT_OFST 0 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \ + & 0xFF) +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \ + & 0xFF) + +/* + * Shared memory used to communicate between HIF driver and host/client drivers + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be + * initialized with host buffers and buffers count in the pool. + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT. + * + */ +struct hif_shm { + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/ + /*Rx buffers required to initialize HIF rx descriptors */ + struct rte_mempool *pool; + void *rx_buf_pool[HIF_RX_DESC_NT]; + unsigned long g_client_status[2]; /*Global client status bit mask */ + /* Client specific shared memory */ + struct hif_client_shm client[HIF_CLIENTS_MAX]; +}; + +#define CL_DESC_OWN BIT(31) +/* This sets owner ship to HIF driver */ +#define CL_DESC_LAST BIT(30) +/* This indicates last packet for multi buffers handling */ +#define CL_DESC_FIRST BIT(29) +/* This indicates first packet for multi buffers handling */ + +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF) +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16) +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF) + +struct rx_queue_desc { + void *data; + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/ + u32 client_ctrl; +}; + +struct tx_queue_desc { + void *data; + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/ +}; + +/* HIF Rx is not working properly for 2-byte aligned buffers and + * ip_header should be 4byte aligned for better iperformance. + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned. + * In case HW parse support: + * "ip_header = 64 + 6(hif_header) + 16 (parse) + 14 (MAC Header)" will be + * 4byte aligned. + */ +#define PFE_HIF_SIZE sizeof(struct hif_hdr) + +#ifdef RTE_LIBRTE_PFE_SW_PARSE +#define PFE_PKT_HEADER_SZ PFE_HIF_SIZE +#else +#define PFE_PKT_HEADER_SZ (PFE_HIF_SIZE + sizeof(struct pfe_parse)) +#endif + +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */ +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */ +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */ +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \ + + MAX_L4_HDR_SIZE) +/* Used in page mode to clamp packet size to the maximum supported by the hif + *hw interface (<16KiB) + */ +#define MAX_PFE_PKT_SIZE 16380UL + +extern unsigned int emac_txq_cnt; + +int pfe_hif_lib_init(struct pfe *pfe); +void pfe_hif_lib_exit(struct pfe *pfe); +int hif_lib_client_register(struct hif_client_s *client); +int hif_lib_client_unregister(struct hif_client_s *client); +void hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, + void *data, void *data1, unsigned int len, + u32 client_ctrl, unsigned int flags, void *client_data); +void hif_lib_indicate_client(struct hif_client_s *client, int event, int data); +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int + data); +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, + unsigned int *flags, int count); +int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool); +void pfe_hif_shm_clean(struct hif_shm *hif_shm); + +int hif_lib_receive_pkt(struct hif_client_rx_queue *queue, + struct rte_mempool *pool, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +#endif /* _PFE_HIF_LIB_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h b/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h new file mode 100644 index 000000000..58d5e8e7c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_logs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_LOGS_H_ +#define _PFE_LOGS_H_ + +extern int pfe_logtype_pmd; + +/* PMD related logs */ +#define PFE_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, pfe_logtype_pmd, "pfe_net: %s()" \ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PFE_PMD_LOG(DEBUG, " >>") + +#define PFE_PMD_DEBUG(fmt, args...) \ + PFE_PMD_LOG(DEBUG, fmt, ## args) +#define PFE_PMD_ERR(fmt, args...) \ + PFE_PMD_LOG(ERR, fmt, ## args) +#define PFE_PMD_INFO(fmt, args...) \ + PFE_PMD_LOG(INFO, fmt, ## args) + +#define PFE_PMD_WARN(fmt, args...) \ + PFE_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define PFE_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#endif /* _PFE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h b/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h new file mode 100644 index 000000000..88d3d6ffc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/pfe_mod.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018-2019 NXP + */ + +#ifndef _PFE_MOD_H_ +#define _PFE_MOD_H_ + +struct pfe; + +#include + +#include "pfe.h" +#include "pfe_hif.h" +#include "pfe_hif_lib.h" +#include "pfe_eth.h" + +#define PHYID_MAX_VAL 32 + +/* PFE DPDK driver supports two interfaces. + */ +#define PFE_CDEV_ETH_COUNT 2 + +/* PFE DPDK driver needs a kernel module named "pfe.ko", This module + * is required for PHY initialisation and creates a character device + * "pfe_us_cdev" for IOCTL support. PFE DPDK driver uses this character + * device for link status. + */ +#define PFE_CDEV_PATH "/dev/pfe_us_cdev" +#define PFE_CDEV_INVALID_FD -1 +#define PFE_NAME_PMD net_pfe + +/* used when 'read' call is issued, returning PFE_CDEV_ETH_COUNT number of + * pfe_shared_info as array. + */ +struct pfe_shared_info { + uint32_t phy_id; /* Link phy ID */ + uint8_t state; /* Has either 0 or 1 */ +}; + +struct pfe_eth { + struct pfe_eth_priv_s *eth_priv[PFE_CDEV_ETH_COUNT]; +}; + +struct pfe { + uint64_t ddr_phys_baseaddr; + void *ddr_baseaddr; + uint64_t ddr_size; + void *cbus_baseaddr; + uint64_t cbus_size; + struct ls1012a_pfe_platform_data platform_data; + struct pfe_hif hif; + struct pfe_eth eth; + struct hif_client_s *hif_client[HIF_CLIENTS_MAX]; + int mdio_muxval[PHYID_MAX_VAL]; + uint8_t nb_devs; + uint8_t max_intf; + int cdev_fd; +}; + +/* IOCTL Commands */ +#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int) +#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int) +#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int) +#endif /* _PFE_MOD_H */ diff --git a/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map b/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/pfe/rte_pmd_pfe_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/qede/Makefile b/src/spdk/dpdk/drivers/net/qede/Makefile new file mode 100644 index 000000000..5810b4d49 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/Makefile @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2016 - 2018 Cavium Inc. +# All rights reserved. +# www.cavium.com + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_qede.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci + +EXPORT_MAP := rte_pmd_qede_version.map + +# +# OS +# +OS_TYPE := $(shell uname -s) + +# +# CFLAGS +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-missing-prototypes +CFLAGS_BASE_DRIVER += -Wno-cast-qual +CFLAGS_BASE_DRIVER += -Wno-unused-function +CFLAGS_BASE_DRIVER += -Wno-unused-variable +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-missing-prototypes + +ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +ifeq ($(OS_TYPE),Linux) +ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0) +CFLAGS_BASE_DRIVER += -Wno-shift-negative-value +endif +endif +endif + +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +endif +CFLAGS_BASE_DRIVER += -Wno-missing-declarations +ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized +endif +CFLAGS_BASE_DRIVER += -Wno-strict-prototypes +ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-shift-negative-value +ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough +endif +endif +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +CFLAGS_BASE_DRIVER += -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-visibility +CFLAGS_BASE_DRIVER += -Wno-empty-body +CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding +CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized +ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0) +CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion +endif +else #ICC +CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant +endif + +# +# Add extra flags for base ecore driver files +# to disable warnings in them +# +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_cxt.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_l2.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sp_commands.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_fw_funcs.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_spq.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_mcp.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_int.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dcbx.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c new file mode 100644 index 000000000..48d016e24 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.c @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include +#include + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_dev_api.h" +#include "ecore_iov_api.h" +#include "ecore_mcp_api.h" +#include "ecore_l2_api.h" + +/* Array of memzone pointers */ +static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE]; +/* Counter to track current memzone allocated */ +static uint16_t ecore_mz_count; + +unsigned long qede_log2_align(unsigned long n) +{ + unsigned long ret = n ? 1 : 0; + unsigned long _n = n >> 1; + + while (_n) { + _n >>= 1; + ret <<= 1; + } + + if (ret < n) + ret <<= 1; + + return ret; +} + +u32 qede_osal_log2(u32 val) +{ + u32 log = 0; + + while (val >>= 1) + log++; + + return log; +} + +inline void qede_set_bit(u32 nr, unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +inline void qede_clr_bit(u32 nr, unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +inline bool qede_test_bit(u32 nr, unsigned long *addr) +{ + bool res; + + rte_mb(); + res = ((*addr) & (1UL << nr)) != 0; + rte_mb(); + return res; +} + +static inline u32 qede_ffb(unsigned long word) +{ + unsigned long first_bit; + + first_bit = __builtin_ffsl(word); + return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL; +} + +inline u32 qede_find_first_bit(unsigned long *addr, u32 limit) +{ + u32 i; + u32 nwords = 0; + OSAL_BUILD_BUG_ON(!limit); + nwords = (limit - 1) / OSAL_BITS_PER_UL + 1; + for (i = 0; i < nwords; i++) + if (addr[i] != 0) + break; + + return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]); +} + +static inline u32 qede_ffz(unsigned long word) +{ + unsigned long first_zero; + + first_zero = __builtin_ffsl(~word); + return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL; +} + +inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit) +{ + u32 i; + u32 nwords = 0; + OSAL_BUILD_BUG_ON(!limit); + nwords = (limit - 1) / OSAL_BITS_PER_UL + 1; + for (i = 0; i < nwords && ~(addr[i]) == 0; i++); + return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]); +} + +void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn, + __rte_unused struct vf_pf_resc_request *resc_req, + struct ecore_vf_acquire_sw_info *vf_sw_info) +{ + vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE; + vf_sw_info->override_fw_version = 1; +} + +void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, + dma_addr_t *phys, size_t size) +{ + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t core_id = rte_lcore_id(); + unsigned int socket_id; + + if (ecore_mz_count >= RTE_MAX_MEMZONE) { + DP_ERR(p_dev, "Memzone allocation count exceeds %u\n", + RTE_MAX_MEMZONE); + *phys = 0; + return OSAL_NULL; + } + + OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); + snprintf(mz_name, sizeof(mz_name), "%lx", + (unsigned long)rte_get_timer_cycles()); + if (core_id == (unsigned int)LCORE_ID_ANY) + core_id = rte_get_master_lcore(); + socket_id = rte_lcore_to_socket_id(core_id); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); + if (!mz) { + DP_ERR(p_dev, "Unable to allocate DMA memory " + "of size %zu bytes - %s\n", + size, rte_strerror(rte_errno)); + *phys = 0; + return OSAL_NULL; + } + *phys = mz->iova; + ecore_mz_mapping[ecore_mz_count++] = mz; + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "Allocated dma memory size=%zu phys=0x%lx" + " virt=%p core=%d\n", + mz->len, (unsigned long)mz->iova, mz->addr, core_id); + return mz->addr; +} + +void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, + dma_addr_t *phys, size_t size, int align) +{ + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t core_id = rte_lcore_id(); + unsigned int socket_id; + + if (ecore_mz_count >= RTE_MAX_MEMZONE) { + DP_ERR(p_dev, "Memzone allocation count exceeds %u\n", + RTE_MAX_MEMZONE); + *phys = 0; + return OSAL_NULL; + } + + OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); + snprintf(mz_name, sizeof(mz_name), "%lx", + (unsigned long)rte_get_timer_cycles()); + if (core_id == (unsigned int)LCORE_ID_ANY) + core_id = rte_get_master_lcore(); + socket_id = rte_lcore_to_socket_id(core_id); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); + if (!mz) { + DP_ERR(p_dev, "Unable to allocate DMA memory " + "of size %zu bytes - %s\n", + size, rte_strerror(rte_errno)); + *phys = 0; + return OSAL_NULL; + } + *phys = mz->iova; + ecore_mz_mapping[ecore_mz_count++] = mz; + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "Allocated aligned dma memory size=%zu phys=0x%lx" + " virt=%p core=%d\n", + mz->len, (unsigned long)mz->iova, mz->addr, core_id); + return mz->addr; +} + +void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys) +{ + uint16_t j; + + for (j = 0 ; j < ecore_mz_count; j++) { + if (phys == ecore_mz_mapping[j]->iova) { + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "Free memzone %s\n", ecore_mz_mapping[j]->name); + rte_memzone_free(ecore_mz_mapping[j]); + while (j < ecore_mz_count - 1) { + ecore_mz_mapping[j] = ecore_mz_mapping[j + 1]; + j++; + } + ecore_mz_count--; + return; + } + } + + DP_ERR(p_dev, "Unexpected memory free request\n"); +} + +#ifdef CONFIG_ECORE_ZIPPED_FW +u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf) +{ + int rc; + + p_hwfn->stream->next_in = input_buf; + p_hwfn->stream->avail_in = input_len; + p_hwfn->stream->next_out = unzip_buf; + p_hwfn->stream->avail_out = max_size; + + rc = inflateInit2(p_hwfn->stream, MAX_WBITS); + + if (rc != Z_OK) { + DP_ERR(p_hwfn, + "zlib init failed, rc = %d\n", rc); + return 0; + } + + rc = inflate(p_hwfn->stream, Z_FINISH); + inflateEnd(p_hwfn->stream); + + if (rc != Z_OK && rc != Z_STREAM_END) { + DP_ERR(p_hwfn, + "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg, + rc); + return 0; + } + + return p_hwfn->stream->total_out / 4; +} +#endif + +void +qede_get_mcp_proto_stats(struct ecore_dev *edev, + enum ecore_mcp_protocol_type type, + union ecore_mcp_protocol_stats *stats) +{ + struct ecore_eth_stats lan_stats; + + if (type == ECORE_MCP_LAN_STATS) { + ecore_get_vport_stats(edev, &lan_stats); + + /* @DPDK */ + stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts; + + stats->lan_stats.fcs_err = -1; + } else { + DP_INFO(edev, "Statistics request type %d not supported\n", + type); + } +} + +void +qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type) +{ + char err_str[64]; + + switch (err_type) { + case ECORE_HW_ERR_FAN_FAIL: + strcpy(err_str, "Fan Failure"); + break; + case ECORE_HW_ERR_MFW_RESP_FAIL: + strcpy(err_str, "MFW Response Failure"); + break; + case ECORE_HW_ERR_HW_ATTN: + strcpy(err_str, "HW Attention"); + break; + case ECORE_HW_ERR_DMAE_FAIL: + strcpy(err_str, "DMAE Failure"); + break; + case ECORE_HW_ERR_RAMROD_FAIL: + strcpy(err_str, "Ramrod Failure"); + break; + case ECORE_HW_ERR_FW_ASSERT: + strcpy(err_str, "FW Assertion"); + break; + default: + strcpy(err_str, "Unknown"); + } + + DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str); + ecore_int_attn_clr_enable(p_hwfn->p_dev, true); +} + +u32 qede_crc32(u32 crc, u8 *ptr, u32 length) +{ + int i; + + while (length--) { + crc ^= *ptr++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); + } + return crc; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h new file mode 100644 index 000000000..8b2faec5b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/bcm_osal.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __BCM_OSAL_H +#define __BCM_OSAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Forward declaration */ +struct ecore_dev; +struct ecore_hwfn; +struct ecore_ptt; +struct ecore_vf_acquire_sw_info; +struct vf_pf_resc_request; +enum ecore_mcp_protocol_type; +union ecore_mcp_protocol_stats; +enum ecore_hw_err_type; + +void qed_link_update(struct ecore_hwfn *hwfn); + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#undef __BIG_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN +#endif +#else +#undef __LITTLE_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN +#endif +#endif + +#define OSAL_WARN(arg1, arg2, arg3, ...) (0) + +#define UNUSED(x) (void)(x) + +/* Memory Types */ +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef int16_t s16; +typedef int32_t s32; + +typedef u16 __le16; +typedef u32 __le32; +typedef u32 OSAL_BE32; + +#define osal_uintptr_t uintptr_t + +typedef rte_iova_t dma_addr_t; + +typedef rte_spinlock_t osal_spinlock_t; + +typedef void *osal_dpc_t; + +typedef size_t osal_size_t; + +typedef intptr_t osal_int_ptr_t; + +#define nothing do {} while (0) + +/* Delays */ + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000 * (x)) +#define OSAL_UDELAY(time) usec_delay(time) +#define OSAL_MSLEEP(time) msec_delay(time) + +/* Memory allocations and deallocations */ + +#define OSAL_NULL ((void *)0) +#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0) +#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0) +#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0) +#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0) +#define OSAL_FREE(dev, memory) \ + do { \ + rte_free((void *)memory); \ + memory = OSAL_NULL; \ + } while (0) +#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory) +#define OSAL_MEM_ZERO(mem, size) bzero(mem, size) +#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size) +#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size) +#define OSAL_MEMSET(dst, val, length) \ + memset(dst, val, length) + +void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t); + +void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *, + size_t, int); + +void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys); + +#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \ + osal_dma_alloc_coherent(dev, phys, size) + +#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \ + osal_dma_alloc_coherent_aligned(dev, phys, size, align) + +#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \ + osal_dma_free_mem(dev, phys) + +/* HW reads/writes */ + +#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr) + +#define REG_RD(_p_hwfn, _reg_offset) \ + DIRECT_REG_RD(_p_hwfn, \ + ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset))) + +#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr)) + +#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr)) + +#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \ + rte_write32_relaxed((_val), (_reg_addr)) + +#define REG_WR(_p_hwfn, _reg_offset, _val) \ + DIRECT_REG_WR(NULL, \ + ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val) + +#define REG_WR16(_p_hwfn, _reg_offset, _val) \ + DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \ + (_reg_offset)), (u16)_val) + +#define DOORBELL(_p_hwfn, _db_addr, _val) \ + DIRECT_REG_WR_RELAXED((_p_hwfn), \ + ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \ + (_db_addr)), (u32)_val) + +#define DIRECT_REG_RD64(hwfn, addr) rte_read64(addr) +#define DIRECT_REG_WR64(hwfn, addr, value) rte_write64((value), (addr)) + +/* Mutexes */ + +typedef pthread_mutex_t osal_mutex_t; +#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock) +#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL) +#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock) +#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing +#define OSAL_MUTEX_DEALLOC(lock) nothing + +/* Spinlocks */ + +#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock) +#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock) +#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock) +#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) \ + do { \ + UNUSED(lock); \ + flags = 0; \ + UNUSED(flags); \ + } while (0) +#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing +#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing +#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing + +/* DPC */ + +#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t)) +#define OSAL_DPC_INIT(dpc, hwfn) nothing +#define OSAL_POLL_MODE_DPC(hwfn) nothing +#define OSAL_DPC_SYNC(hwfn) nothing + +/* Lists */ + +#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing +#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing + +typedef struct _osal_list_entry_t { + struct _osal_list_entry_t *next, *prev; +} osal_list_entry_t; + +typedef struct osal_list_t { + osal_list_entry_t *head, *tail; + unsigned long cnt; +} osal_list_t; + +#define OSAL_LIST_INIT(list) \ + do { \ + (list)->head = NULL; \ + (list)->tail = NULL; \ + (list)->cnt = 0; \ + } while (0) + +#define OSAL_LIST_PUSH_HEAD(entry, list) \ + do { \ + (entry)->prev = (osal_list_entry_t *)0; \ + (entry)->next = (list)->head; \ + if ((list)->tail == (osal_list_entry_t *)0) { \ + (list)->tail = (entry); \ + } else { \ + (list)->head->prev = (entry); \ + } \ + (list)->head = (entry); \ + (list)->cnt++; \ + } while (0) + +#define OSAL_LIST_PUSH_TAIL(entry, list) \ + do { \ + (entry)->next = (osal_list_entry_t *)0; \ + (entry)->prev = (list)->tail; \ + if ((list)->tail) { \ + (list)->tail->next = (entry); \ + } else { \ + (list)->head = (entry); \ + } \ + (list)->tail = (entry); \ + (list)->cnt++; \ + } while (0) + +#define OSAL_LIST_FIRST_ENTRY(list, type, field) \ + (type *)((list)->head) + +#define OSAL_LIST_REMOVE_ENTRY(entry, list) \ + do { \ + if ((list)->head == (entry)) { \ + if ((list)->head) { \ + (list)->head = (list)->head->next; \ + if ((list)->head) { \ + (list)->head->prev = (osal_list_entry_t *)0;\ + } else { \ + (list)->tail = (osal_list_entry_t *)0; \ + } \ + (list)->cnt--; \ + } \ + } else if ((list)->tail == (entry)) { \ + if ((list)->tail) { \ + (list)->tail = (list)->tail->prev; \ + if ((list)->tail) { \ + (list)->tail->next = (osal_list_entry_t *)0;\ + } else { \ + (list)->head = (osal_list_entry_t *)0; \ + } \ + (list)->cnt--; \ + } \ + } else { \ + (entry)->prev->next = (entry)->next; \ + (entry)->next->prev = (entry)->prev; \ + (list)->cnt--; \ + } \ + } while (0) + +#define OSAL_LIST_IS_EMPTY(list) \ + ((list)->cnt == 0) + +#define OSAL_LIST_NEXT(entry, field, type) \ + (type *)((&((entry)->field))->next) + +/* TODO: Check field, type order */ + +#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \ + for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \ + entry; \ + entry = OSAL_LIST_NEXT(entry, field, type)) + +#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \ + for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \ + tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \ + entry != NULL; \ + entry = (type *)tmp_entry, \ + tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL) + +/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */ +#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \ + OSAL_LIST_PUSH_HEAD(new_entry, list) + +/* PCI config space */ + +#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing +#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing +#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing +#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0 +#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0 +#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing +#define OSAL_BAR_SIZE(dev, bar_id) 0 + +/* Barriers */ + +#define OSAL_MMIOWB(dev) rte_wmb() +#define OSAL_BARRIER(dev) rte_compiler_barrier() +#define OSAL_SMP_RMB(dev) rte_rmb() +#define OSAL_SMP_WMB(dev) rte_wmb() +#define OSAL_RMB(dev) rte_rmb() +#define OSAL_WMB(dev) rte_wmb() +#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing + +#define OSAL_BIT(nr) (1UL << (nr)) +#define OSAL_BITS_PER_BYTE (8) +#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE) +#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1) + +/* Bitops */ +void qede_set_bit(u32, unsigned long *); +#define OSAL_SET_BIT(bit, bitmap) \ + qede_set_bit(bit, bitmap) + +void qede_clr_bit(u32, unsigned long *); +#define OSAL_CLEAR_BIT(bit, bitmap) \ + qede_clr_bit(bit, bitmap) + +bool qede_test_bit(u32, unsigned long *); +#define OSAL_TEST_BIT(bit, bitmap) \ + qede_test_bit(bit, bitmap) + +u32 qede_find_first_bit(unsigned long *, u32); +#define OSAL_FIND_FIRST_BIT(bitmap, length) \ + qede_find_first_bit(bitmap, length) + +u32 qede_find_first_zero_bit(unsigned long *, u32); +#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \ + qede_find_first_zero_bit(bitmap, length) + +#define OSAL_BUILD_BUG_ON(cond) nothing +#define ETH_ALEN RTE_ETHER_ADDR_LEN +#define ETHER_TYPE_VLAN RTE_ETHER_TYPE_VLAN +#define ETHER_TYPE_QINQ RTE_ETHER_TYPE_QINQ + +#define OSAL_BITMAP_WEIGHT(bitmap, count) 0 + +#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn) +#define OSAL_TRANSCEIVER_UPDATE(hwfn) nothing +#define OSAL_DCBX_AEN(hwfn, mib_type) nothing + +/* SR-IOV channel */ + +#define OSAL_VF_FLR_UPDATE(hwfn) nothing +#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0 +#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0) +#define OSAL_PF_VF_MSG(hwfn, vfid) 0 +#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing +#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0 +#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing +#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0 +#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing +#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0 +#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0 +#define OSAL_IOV_GET_OS_TYPE() 0 +#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing +#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing +#define OSAL_IOV_VF_VPORT_STOP(hwfn, vf) nothing + +u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf); +void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *, + struct ecore_vf_acquire_sw_info *); +void qede_hw_err_notify(struct ecore_hwfn *p_hwfn, + enum ecore_hw_err_type err_type); +#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \ + qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info) + +#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \ + qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf) + +/* TODO: */ +#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing +#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) \ + qede_hw_err_notify(hwfn, err_type) + +#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1) +#define OSAL_NUM_CPUS() 0 + +/* Utility functions */ + +#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what) +#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what)) + +unsigned long qede_log2_align(unsigned long n); +#define OSAL_ROUNDUP_POW_OF_TWO(val) \ + qede_log2_align(val) + +u32 qede_osal_log2(u32); +#define OSAL_LOG2(val) \ + qede_osal_log2(val) + +#define PRINT(format, ...) printf +#define PRINT_ERR(format, ...) PRINT + +#define OFFSETOF(str, field) __builtin_offsetof(str, field) +#define OSAL_ASSERT(is_assert) assert(is_assert) +#define OSAL_BEFORE_PF_START(file, engine) nothing +#define OSAL_AFTER_PF_STOP(file, engine) nothing + +/* Endian macros */ +#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val) +#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val) +#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val) +#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val) +#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val) +#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val) +#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val) + +#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr) +#define OSAL_SPRINTF(name, pattern, ...) \ + sprintf(name, pattern, ##__VA_ARGS__) +#define OSAL_SNPRINTF(buf, size, format, ...) \ + snprintf(buf, size, format, ##__VA_ARGS__) +#define OSAL_STRLEN(string) strlen(string) +#define OSAL_STRCPY(dst, string) strcpy(dst, string) +#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len) +#define OSAL_STRCMP(str1, str2) strcmp(str1, str2) +#define OSAL_STRTOUL(str, base, res) 0 + +#define OSAL_INLINE inline +#define OSAL_REG_ADDR(_p_hwfn, _offset) \ + (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset)) +#define OSAL_PAGE_SIZE 4096 +#define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE +#define OSAL_IOMEM volatile +#define OSAL_UNUSED __rte_unused +#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0) +#define OSAL_MIN_T(type, __min1, __min2) \ + ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2)) +#define OSAL_MAX_T(type, __max1, __max2) \ + ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2)) + +void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type, + union ecore_mcp_protocol_stats *); +#define OSAL_GET_PROTOCOL_STATS(dev, type, stats) \ + qede_get_mcp_proto_stats(dev, type, stats) + +#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0) + +u32 qede_crc32(u32 crc, u8 *ptr, u32 length); +#define OSAL_CRC32(crc, buf, length) qede_crc32(crc, buf, length) +#define OSAL_CRC8_POPULATE(table, polynomial) nothing +#define OSAL_CRC8(table, pdata, nbytes, crc) 0 +#define OSAL_MFW_TLV_REQ(p_hwfn) nothing +#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0) +#define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing +#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing +#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0 + +#define OSAL_DIV_S64(a, b) ((a) / (b)) +#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing +#define OSAL_GET_EPOCH(p_hwfn) 0 +#define OSAL_DBG_ALLOC_USER_DATA(p_hwfn, user_data_ptr) (0) +#define OSAL_DB_REC_OCCURRED(p_hwfn) nothing + +#endif /* __BCM_OSAL_H */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h b/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h new file mode 100644 index 000000000..e230fe5ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/common_hsi.h @@ -0,0 +1,1700 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ +/********************************/ +/* PROTOCOL COMMON FW CONSTANTS */ +/********************************/ + +/* Temporarily here should be added to HSI automatically by resource allocation + * tool. + */ +#define T_TEST_AGG_INT_TEMP 6 +#define M_TEST_AGG_INT_TEMP 8 +#define U_TEST_AGG_INT_TEMP 6 +#define X_TEST_AGG_INT_TEMP 14 +#define Y_TEST_AGG_INT_TEMP 4 +#define P_TEST_AGG_INT_TEMP 4 + +#define X_FINAL_CLEANUP_AGG_INT 1 + +#define EVENT_RING_PAGE_SIZE_BYTES 4096 + +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 + +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 +#define ETH_CDU_TASK_SEG_TYPE 2 + +#define FW_ASSERT_GENERAL_ATTN_IDX 32 + +#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3 + +/* Queue Zone sizes in bytes */ +#define TSTORM_QZONE_SIZE 8 /*tstorm_queue_zone*/ +/*mstorm_eth_queue_zone. Used only for RX producer of VFs in backward + * compatibility mode. + */ +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 /*ustorm_queue_zone*/ +#define XSTORM_QZONE_SIZE 8 /*xstorm_eth_queue_zone*/ +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +/*Log of mstorm default VF zone size.*/ +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +/*Maximum number of RX queues that can be allocated to VF by default*/ +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 +/*Maximum number of RX queues that can be allocated to VF with doubled VF zone + * size. Up to 96 VF supported in this mode + */ +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 +/*Maximum number of RX queues that can be allocated to VF with 4 VF zone size. + * Up to 48 VF supported in this mode + */ +#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 + +#define ETH_RGSRC_CTX_SIZE 6 /*Size in QREGS*/ +#define ETH_TGSRC_CTX_SIZE 6 /*Size in QREGS*/ +/********************************/ +/* CORE (LIGHT L2) FW CONSTANTS */ +/********************************/ + +#define CORE_LL2_MAX_RAMROD_PER_CON 8 +#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096 +#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1 + +#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12 + +#define CORE_SPQE_PAGE_SIZE_BYTES 4096 + +/* Number of LL2 RAM based (RX producers and statistics) queues */ +#define MAX_NUM_LL2_RX_RAM_QUEUES 32 +/* Number of LL2 context based (RX producers and statistics) queues */ +#define MAX_NUM_LL2_RX_CTX_QUEUES 208 +#define MAX_NUM_LL2_RX_QUEUES (MAX_NUM_LL2_RX_RAM_QUEUES + \ + MAX_NUM_LL2_RX_CTX_QUEUES) + +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 + + +/****************************************************************************/ +/* Include firmware version number only- do not add constants here to avoid */ +/* redundunt compilations */ +/****************************************************************************/ + + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 40 +#define FW_REVISION_VERSION 33 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS_K2 (192) +#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2) + +/* in both BB and K2, the VF number starts from 16. so for arrays containing all + * possible PFs and VFs - we need a constant for this size + */ +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2) +#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_K2) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES_K2 (320) + +#define FW_LOWEST_CONSUMEDDMAE_CHANNEL (26) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 4 +#define NUM_OF_PHYS_TCS 8 +#define PURE_LB_TC NUM_OF_PHYS_TCS +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_TASK_TYPES (8) +#define NUM_OF_LCIDS (320) + +/* Global PXP windows (GTT) */ +#define NUM_OF_GTT 19 +#define GTT_DWORD_SIZE_BITS 10 +#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2) +#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS) + +/* Tools Version */ +#define TOOLS_VERSION 10 +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + +/*enabled, type A, use all */ +#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3D) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 +#define DQ_DEMS_TOE_MORE_TO_SEND 3 +#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 +#define DQ_DEMS_ROCE_CQ_CONS 7 + +/* XCM agg val selection (HW) */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection (FW) */ +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 +#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* UCM agg val selection (HW) */ +#define DQ_UCM_AGG_VAL_SEL_WORD0 0 +#define DQ_UCM_AGG_VAL_SEL_WORD1 1 +#define DQ_UCM_AGG_VAL_SEL_WORD2 2 +#define DQ_UCM_AGG_VAL_SEL_WORD3 3 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 + +/* UCM agg val selection (FW) */ +#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 +#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3 +#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0 +#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2 + +/* TCM agg val selection (HW) */ +#define DQ_TCM_AGG_VAL_SEL_WORD0 0 +#define DQ_TCM_AGG_VAL_SEL_WORD1 1 +#define DQ_TCM_AGG_VAL_SEL_WORD2 2 +#define DQ_TCM_AGG_VAL_SEL_WORD3 3 +#define DQ_TCM_AGG_VAL_SEL_REG1 4 +#define DQ_TCM_AGG_VAL_SEL_REG2 5 +#define DQ_TCM_AGG_VAL_SEL_REG6 6 +#define DQ_TCM_AGG_VAL_SEL_REG9 7 + +/* TCM agg val selection (FW) */ +#define DQ_TCM_L2B_BD_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD1 +#define DQ_TCM_ROCE_RQ_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD0 + +/* XCM agg counter flag selection (HW) */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection (FW) */ +#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) + +/* UCM agg counter flag selection (HW) */ +#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_UCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_UCM_AGG_FLG_SHIFT_CF3 2 +#define DQ_UCM_AGG_FLG_SHIFT_CF4 3 +#define DQ_UCM_AGG_FLG_SHIFT_CF5 4 +#define DQ_UCM_AGG_FLG_SHIFT_CF6 5 +#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6 +#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 + +/* UCM agg counter flag selection (FW) */ +#define DQ_UCM_NVMF_NEW_CQE_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF1) +#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_ROCE_CQ_ARM_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF3) +#define DQ_UCM_TOE_SLOW_PATH_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_TOE_DQ_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) + +/* TCM agg counter flag selection (HW) */ +#define DQ_TCM_AGG_FLG_SHIFT_CF0 0 +#define DQ_TCM_AGG_FLG_SHIFT_CF1 1 +#define DQ_TCM_AGG_FLG_SHIFT_CF2 2 +#define DQ_TCM_AGG_FLG_SHIFT_CF3 3 +#define DQ_TCM_AGG_FLG_SHIFT_CF4 4 +#define DQ_TCM_AGG_FLG_SHIFT_CF5 5 +#define DQ_TCM_AGG_FLG_SHIFT_CF6 6 +#define DQ_TCM_AGG_FLG_SHIFT_CF7 7 + +/* TCM agg counter flag selection (FW) */ +#define DQ_TCM_FCOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_ISCSI_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_TOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_IWARP_POST_RQ_CF_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1) + +/* PWM address mapping */ +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28 +#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30 +#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38 +#define DQ_PWM_OFFSET_XCM16_BASE 0x40 +#define DQ_PWM_OFFSET_XCM32_BASE 0x44 +#define DQ_PWM_OFFSET_UCM16_BASE 0x48 +#define DQ_PWM_OFFSET_UCM32_BASE 0x4C +#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_TCM16_BASE 0x58 +#define DQ_PWM_OFFSET_TCM32_BASE 0x5C +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B + +#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) +#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4) +#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) +#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) +#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) + +#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \ + (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2) +#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \ + (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4) +#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \ + (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1) + +#define DQ_REGION_SHIFT (12) + +/* DPM */ +#define DQ_DPM_WQE_BUFF_SIZE (320) + +/* Conn type ranges */ +#define DQ_CONN_TYPE_RANGE_SHIFT (4) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* the size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 + +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context + */ +#define CM_TX_PQ_BASE 0x200 + +/* number of global Vport/QCN rate limiters */ +#define MAX_QM_GLOBAL_RLS 256 + +/* number of global rate limiters */ +#define MAX_QM_GLOBAL_RLS 256 +#define COMMON_MAX_QM_GLOBAL_RLS (MAX_QM_GLOBAL_RLS) + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT (1U << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT (1U << (QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB 12 +#define MAX_PIS_PER_SB PIS_PER_SB + +/* fsm is stopped or not valid for this sb */ +#define CAU_HC_STOPPED_STATE 3 +/* fsm is working without interrupt coalescing for this sb*/ +#define CAU_HC_DISABLE_STATE 4 +/* fsm is working with interrupt coalescing for this sb*/ +#define CAU_HC_ENABLE_STATE 0 + + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* Bars for Blocks */ +#define PXP_BAR_GRC 0 +#define PXP_BAR_TSDM 0 +#define PXP_BAR_USDM 0 +#define PXP_BAR_XSDM 0 +#define PXP_BAR_MSDM 0 +#define PXP_BAR_YSDM 0 +#define PXP_BAR_PSDM 0 +#define PXP_BAR_IGU 0 +#define PXP_BAR_DQ 1 + +/* PTT and GTT */ +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_NUM_PF_WINDOWS 12 + +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +/* PF BAR */ +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC \ + (PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU \ + (PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 +#define PXP_BAR0_END_TSDM \ + (PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM \ + (PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM \ + (PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM \ + (PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM \ + (PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM \ + (PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1) + +#define PXP_BAR0_FIRST_INVALID_ADDRESS \ + (PXP_BAR0_END_PSDM + 1) + +/* VF BAR */ +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU \ + (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \ + (PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4) +#define PXP_VF_BAR0_END_DQ \ + (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B \ + (PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B \ + (PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B \ + (PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B \ + (PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B \ + (PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B \ + (PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC \ + (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_START_IGU2 0x10000 +#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 +#define PXP_VF_BAR0_END_IGU2 \ + (PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1) + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +// ILT Records +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS \ + OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +// Host Interface +#define PXP_QUEUES_ZONE_MAX_NUM 320 + + +/*****************/ +/* PRM CONSTANTS */ +/*****************/ +#define PRM_DMA_PAD_BYTES_NUM 2 +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/***********************************************************/ +/* Completion types */ +/***********************************************************/ + +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +/* Send direct message to local CM and/or remote CMs. Destinations are defined + * by vector in CompParams. + */ +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +/* Send direct message to PXP (like "internal write" command) to write to remote + * Storm RAM via remote SDM + */ +#define SDM_COMP_TYPE_PXP 5 +/* Indicate error per thread */ +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +/* Write to local RAM as a completion */ +#define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */ + + +/******************/ +/* PBF CONSTANTS */ +/******************/ + +/* Number of PBF command queue lines. */ +#define PBF_MAX_CMD_LINES 3328 /* Each line is 256b */ + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS_BB 1440 /* 2880 blocks of 128B */ +#define BTB_MAX_BLOCKS_K2 1840 /* 3680 blocks of 128B */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +#define PRS_GFT_CAM_LINES_NO_MATCH 31 + +/* + * Interrupt coalescing TimeSet + */ +struct coalescing_timeset { + u8 value; +/* Interrupt coalescing TimeSet (timeout_ticks = TimeSet shl (TimerRes+1)) */ +#define COALESCING_TIMESET_TIMESET_MASK 0x7F +#define COALESCING_TIMESET_TIMESET_SHIFT 0 +/* Only if this flag is set, timeset will take effect */ +#define COALESCING_TIMESET_VALID_MASK 0x1 +#define COALESCING_TIMESET_VALID_SHIFT 7 +}; + +struct common_queue_zone { + __le16 ring_drv_data_consumer; + __le16 reserved; +}; + +struct nvmf_eqe_data { + __le16 icid /* The connection ID for which the EQE is written. */; + u8 reserved0[6] /* Alignment to line */; +}; + + +/* + * ETH Rx producers data + */ +struct eth_rx_prod_data { + __le16 bd_prod /* BD producer. */; + __le16 cqe_prod /* CQE producer. */; +}; + + +struct tcp_ulp_connect_done_params { + __le16 mss; + u8 snd_wnd_scale; + u8 flags; +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 +}; + +struct iscsi_connect_done_results { + __le16 icid /* Context ID of the connection */; + __le16 conn_id /* Driver connection ID */; +/* decided tcp params after connect done */ + struct tcp_ulp_connect_done_params params; +}; + + +struct iscsi_eqe_data { + __le16 icid /* Context ID of the connection */; + __le16 conn_id /* Driver connection ID */; + __le16 reserved; +/* error code - relevant only if the opcode indicates its an error */ + u8 error_code; + u8 error_pdu_opcode_reserved; +/* The processed PDUs opcode on which happened the error - updated for specific + * error codes, by default=0xFF + */ +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 +/* Indication for driver is the error_pdu_opcode field has valid value */ +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 +#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 +#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1 +#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 +}; + + +/* + * Multi function mode + */ +enum mf_mode { + ERROR_MODE /* Unsupported mode */, + MF_OVLAN /* Multi function based on outer VLAN */, + MF_NPAR /* Multi function based on MAC address (NIC partitioning) */, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_ISCSI /* iSCSI */, + PROTOCOLID_FCOE /* FCoE */, + PROTOCOLID_ROCE /* RoCE */, + PROTOCOLID_CORE /* Core (light L2, slow path core) */, + PROTOCOLID_ETH /* Ethernet */, + PROTOCOLID_IWARP /* iWARP */, + PROTOCOLID_TOE /* TOE */, + PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */, + PROTOCOLID_COMMON /* ProtocolCommon */, + PROTOCOLID_TCP /* TCP */, + PROTOCOLID_RDMA /* RDMA */, + PROTOCOLID_SCSI /* SCSI */, + MAX_PROTOCOL_TYPE +}; + + +struct regpair { + __le32 lo /* low word for reg-pair */; + __le32 hi /* high word for reg-pair */; +}; + +/* + * RoCE Destroy Event Data + */ +struct rdma_eqe_destroy_qp { + __le32 cid /* Dedicated field RoCE destroy QP event */; + u8 reserved[4]; +}; + +/* + * RoCE Suspend Event Data + */ +struct rdma_eqe_suspend_qp { + __le32 cid /* Dedicated field RoCE Suspend QP event */; + u8 reserved[4]; +}; + +/* + * RDMA Event Data Union + */ +union rdma_eqe_data { + struct regpair async_handle /* Host handle for the Async Completions */; + /* RoCE Destroy Event Data */ + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; + /* RoCE Suspend QP Event Data */ + struct rdma_eqe_suspend_qp rdma_suspend_qp_data; +}; + +struct tstorm_queue_zone { + __le32 reserved[2]; +}; + + +/* + * Ustorm Queue Zone + */ +struct ustorm_eth_queue_zone { +/* Rx interrupt coalescing TimeSet */ + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[3]; +}; + + +struct ustorm_queue_zone { + struct ustorm_eth_queue_zone eth; + struct common_queue_zone common; +}; + +/* status block structure */ +struct cau_pi_entry { + __le32 prod; +/* A per protocol indexPROD value. */ +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +/* This value determines the TimeSet that the PI is associated with */ +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +/* Select the FSM within the SB */ +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +/* Select the FSM within the SB */ +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* status block structure */ +struct cau_sb_entry { + __le32 data; +/* The SB PROD index which is sent to the IGU. */ +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF /* RX state */ +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF /* TX state */ +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + __le32 params; +/* Indicates the RX TimeSet that this SB is associated with. */ +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +/* Indicates the TX TimeSet that this SB is associated with. */ +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +/* This value will determine the RX FSM timer resolution in ticks */ +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +/* This value will determine the TX FSM timer resolution in ticks */ +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise + * the STAG will be equal to all ones. + */ +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + + +/* + * Igu cleanup bit values to distinguish between clean or producer consumer + * update. + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + + +/* core doorbell data */ +struct core_db_data { + u8 params; +/* destination of doorbell (use enum db_dest) */ +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +/* aggregative command to CM (use enum db_agg_cmd_sel) */ +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */ +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +/* aggregative value selection */ +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 +/* bit for every DQ counter flags in CM context that DQ can increment */ + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP /* No operation */, + DB_AGG_CMD_SET /* Set the value */, + DB_AGG_CMD_ADD /* Add the value */, + DB_AGG_CMD_MAX /* Set max of current and new value */, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM /* TX doorbell to XCM */, + DB_DEST_UCM /* RX doorbell to UCM */, + DB_DEST_TCM /* RX doorbell to TCM */, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + + +/* + * Enum of doorbell DPM types + */ +enum db_dpm_type { + DPM_LEGACY /* Legacy DPM- to Xstorm RAM */, + DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */, +/* L2 DPM inline- to PBF, with packet data on doorbell */ + DPM_L2_INLINE, + DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */, + MAX_DB_DPM_TYPE +}; + +/* + * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM + * burst + */ +struct db_l2_dpm_data { + __le16 icid /* internal CID */; + __le16 bd_prod /* bd producer value to update */; + __le32 params; +/* Size in QWORD-s of the DPM burst */ +#define DB_L2_DPM_DATA_SIZE_MASK 0x3F +#define DB_L2_DPM_DATA_SIZE_SHIFT 0 +/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type) + */ +#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3 +#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6 +#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */ +#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8 +/* size of the packet to be transmitted in bytes */ +#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF +#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16 +#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 +/* In DPM_L2_BD mode: the number of SGE-s */ +#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 +#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 +/* Flag indicating whether to enable GFS search */ +#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 +#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +}; + +/* + * Structure for SGE in a DPM doorbell of type DPM_L2_BD + */ +struct db_l2_dpm_sge { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + __le16 bitfields; +/* The TPH STAG index value */ +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +/* Indicate if ST hint is requested or not */ +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 + __le32 reserved2; +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +/* doorbell extraction mode specifier- 0 if not used */ +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF /* internal CID */ +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* + * Structure for doorbell address, in PWM mode + */ +struct db_pwm_addr { + __le32 addr; +#define DB_PWM_ADDR_RESERVED0_MASK 0x7 +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +/* Offset in PWM address space */ +#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_OFFSET_SHIFT 3 +#define DB_PWM_ADDR_WID_MASK 0x3 /* Window ID */ +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF /* Doorbell page ID */ +#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_RESERVED1_MASK 0xF +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +}; + +/* + * Structure for doorbell address, in legacy mode, without DEMS + */ +struct db_legacy_wo_dems_addr { + __le32 addr; +#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF /* internal CID */ +#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2 +}; + + +/* + * Parameters to RDMA firmware, passed in EDPM doorbell + */ +struct db_rdma_dpm_params { + __le32 params; +/* Size in QWORD-s of the DPM burst */ +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */ +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +/* opcode for RDMA operation */ +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +/* the size of the WQE payload in bytes */ +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +/* RoCE ack request (will be set 1) */ +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */ +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +/* RoCE completion flag for FW use */ +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30 +/* Connection type is iWARP */ +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 +}; + +/* + * Parameters to RDMA firmware, passed in EDPM doorbell + */ +struct db_rdma_24b_icid_dpm_params { + __le32 params; +/* Size in QWORD-s of the DPM burst */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0 +/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6 +/* opcode for RDMA operation */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8 +/* ICID extension */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF +#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16 +/* Number of invalid bytes in last QWROD of the DPM transaction */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7 +#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24 +/* Flag indicating 24b icid mode is enabled */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27 +/* RoCE completion flag */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +/* RoCE S flag */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30 +/* Connection type is iWARP */ +#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 +}; + + +/* + * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a + * DPM burst + */ +struct db_rdma_dpm_data { + __le16 icid /* internal CID */; + __le16 prod_val /* aggregated value to update */; +/* parameters passed to RDMA firmware */ + struct db_rdma_dpm_params params; +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + __le32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +/* interrupt enable/disable/nop (use enum igu_int_cmd) */ +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +/* (use enum igu_seg_access) */ +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +/* must always be set cleared (use enum command_type_bit) */ +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + + +/* + * Enumeration for L3 type field of parsing_and_err_flags_union. L3Type: + * 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according + * to the last-ethertype) + */ +enum l3_type { + e_l3_type_unknown, + e_l3_type_ipv4, + e_l3_type_ipv6, + MAX_L3_TYPE +}; + + +/* + * Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol + * 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the + * first fragment, the protocol-type should be set to none. + */ +enum l4_protocol { + e_l4_protocol_none, + e_l4_protocol_tcp, + e_l4_protocol_udp, + MAX_L4_PROTOCOL +}; + + +/* + * Parsing and error flags field. + */ +struct parsing_and_err_flags { + __le16 flags; +/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled + * according to the last-ethertype) (use enum l3_type) + */ +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and + * its not the first fragment, the protocol-type should be set to none. + * (use enum l4_protocol) + */ +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +/* Set if the packet is IPv4 fragment. */ +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +/* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */ +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +/* Set if L4 checksum was calculated. */ +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +/* Set for PTP packet. */ +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +/* Set if PTP timestamp recorded. */ +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6 + * ver mismatch + */ +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +/* Set if L4 checksum validation failed. Valid only if L4 checksum was + * calculated. + */ +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +/* Set if GRE/VXLAN/GENEVE tunnel detected. */ +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +/* Set if VLAN tag exists in tunnel header. */ +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or + * tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch + */ +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +/* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */ +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum + * was calculated. + */ +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + + +/* + * Parsing error flags bitmap. + */ +struct parsing_err_flags { + __le16 flags; +/* MAC error indication */ +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +/* truncation error indication */ +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +/* packet too small indication */ +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +/* Header Missing Tag */ +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len + * indicates number that is bigger than real packet length 3. tunneling: + * total-ip-length of the outer header points to offset that is smaller than + * the one pointed to by the total-ip-len of the inner hdr. + */ +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +/* from frame cracker output. for either TCP or UDP */ +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any + * reason, like: udp/ipv4 checksum is 0 etc. + */ +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +/* set if geneve option size was over 32 byte */ +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +/* from frame cracker output */ +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + + +/* + * Pb context + */ +struct pb_context { + __le32 crc[4]; +}; + +/* Concrete Function ID. */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF /* Parent PFID */ +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 /* port number */ +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 /* path number */ +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register. */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window. */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + + +/* + * VF Zone A Permission Register. + */ +struct pxp_vf_zone_a_permission { + __le32 control; +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +}; + + +/* + * Rdif context + */ +struct rdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + u8 flags0; +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +/* 0 = IP checksum, 1 = CRC */ +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +/* 1/2/3 - Protection Type */ +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +/* 0=0x0000, 1=0xffff */ +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +/* Keep reference tag constant */ +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 + u8 partial_dif_data[7]; + __le16 partial_crc_value; + __le16 partial_checksum_value; + __le32 offset_in_io; + __le16 flags1; +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */ +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +/* 0=None, 1=DIF, 2=DIX */ +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +/* DIF tag right at the beginning of DIF interval */ +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +/* 0=None, 1=DIF */ +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +/* Forward application tag with mask */ +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +/* Forward reference tag with mask */ +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + __le16 state; +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 +/* mask for refernce tag handling */ +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 + __le32 reserved2; +}; + +/* + * RSS hash type + */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* + * status block structure + */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +}; + + +/* + * Tdif context + */ +struct tdif_task_context { + __le32 initial_ref_tag; + __le16 app_tag_value; + __le16 app_tag_mask; + __le16 partial_crc_value_b; + __le16 partial_checksum_value_b; + __le16 stateB; +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 + u8 reserved1; + u8 flags0; +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +/* 0 = IP checksum, 1 = CRC */ +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +/* 1/2/3 - Protection Type */ +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +/* 0=0x0000, 1=0xffff */ +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 + __le32 flags1; +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */ +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +/* 0=None, 1=DIF, 2=DIX */ +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +/* DIF tag right at the beginning of DIF interval */ +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */ +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +/* 0=None, 1=DIF */ +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 +/* mask for refernce tag handling */ +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 +/* Forward application tag with mask */ +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 +/* Forward reference tag with mask */ +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 +/* Keep reference tag constant */ +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_io_b; + __le16 partial_crc_value_a; + __le16 partial_checksum_value_a; + __le32 offset_in_io_a; + u8 partial_dif_data_a[8]; + u8 partial_dif_data_b[8]; +}; + + +/* + * Timers context + */ +struct timers_context { + __le32 logical_client_0; +/* Expiration time of logical client 0 */ +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +/* Valid bit of logical client 0 */ +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +/* Active bit of logical client 0 */ +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 + __le32 logical_client_1; +/* Expiration time of logical client 1 */ +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +/* Valid bit of logical client 1 */ +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +/* Active bit of logical client 1 */ +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 + __le32 logical_client_2; +/* Expiration time of logical client 2 */ +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +/* Valid bit of logical client 2 */ +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +/* Active bit of logical client 2 */ +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 + __le32 host_expiration_fields; +/* Expiration time on host (closest one) */ +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +/* Valid bit of host expiration */ +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 +}; + + +/* + * Enum for next_protocol field of tunnel_parsing_flags + */ +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + +#endif /* __COMMON_HSI__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore.h b/src/spdk/dpdk/drivers/net/qede/base/ecore.h new file mode 100644 index 000000000..b2077bc46 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore.h @@ -0,0 +1,1073 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_H +#define __ECORE_H + +/* @DPDK */ +#include +#include +#include + +#define CONFIG_ECORE_BINARY_FW +#undef CONFIG_ECORE_ZIPPED_FW + +#ifdef CONFIG_ECORE_ZIPPED_FW +#include +#endif + +#include "ecore_status.h" +#include "ecore_hsi_common.h" +#include "ecore_hsi_debug_tools.h" +#include "ecore_hsi_init_func.h" +#include "ecore_hsi_init_tool.h" +#include "ecore_proto_if.h" +#include "mcp_public.h" + +#define ECORE_MAJOR_VERSION 8 +#define ECORE_MINOR_VERSION 40 +#define ECORE_REVISION_VERSION 26 +#define ECORE_ENGINEERING_VERSION 0 + +#define ECORE_VERSION \ + ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \ + (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION) + +#define STORM_FW_VERSION \ + ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ + (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) + +#define IS_ECORE_PACING(p_hwfn) \ + (!!(p_hwfn->b_en_pacing)) + +#define MAX_HWFNS_PER_DEVICE 2 +#define NAME_SIZE 128 /* @DPDK */ +#define ECORE_WFQ_UNIT 100 +#include "../qede_logs.h" /* @DPDK */ + +#define ISCSI_BDQ_ID(_port_id) (_port_id) +#define FCOE_BDQ_ID(_port_id) (_port_id + 2) +/* Constants */ +#define ECORE_WID_SIZE (1024) +#define ECORE_MIN_WIDS (4) + +/* Configurable */ +#define ECORE_PF_DEMS_SIZE (4) + +/* cau states */ +enum ecore_coalescing_mode { + ECORE_COAL_MODE_DISABLE, + ECORE_COAL_MODE_ENABLE +}; + +enum ecore_nvm_cmd { + ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, + ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA, + ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM, + ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM, + ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE, + ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE, + ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE, + ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ, + ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE, + ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ, + ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE, + ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00 +}; + +#ifndef LINUX_REMOVE +#if !defined(CONFIG_ECORE_L2) +#define CONFIG_ECORE_L2 +#define CONFIG_ECORE_SRIOV +#endif +#endif + +/* helpers */ +#ifndef __EXTRACT__LINUX__ +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name##_MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name##_MASK) << _name##_SHIFT) + +#define SET_FIELD(value, name, flag) \ +do { \ + (value) &= ~(name##_MASK << name##_SHIFT); \ + (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\ +} while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name##_SHIFT)) & name##_MASK) + +#define GET_MFW_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _OFFSET)) + +#define SET_MFW_FIELD(name, field, value) \ +do { \ + (name) &= ~((field ## _MASK)); \ + (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \ +} while (0) +#endif + +static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + (cid * ECORE_PF_DEMS_SIZE); + + return db_addr; +} + +static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); + + return db_addr; +} + +#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ + ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \ + ~((1 << (p_hwfn->p_dev->cache_shift)) - 1)) + +#ifndef LINUX_REMOVE +#ifndef U64_HI +#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#endif + +#ifndef U64_LO +#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) +#endif +#endif + +#ifndef __EXTRACT__LINUX__ +enum DP_LEVEL { + ECORE_LEVEL_VERBOSE = 0x0, + ECORE_LEVEL_INFO = 0x1, + ECORE_LEVEL_NOTICE = 0x2, + ECORE_LEVEL_ERR = 0x3, +}; + +#define ECORE_LOG_LEVEL_SHIFT (30) +#define ECORE_LOG_VERBOSE_MASK (0x3fffffff) +#define ECORE_LOG_INFO_MASK (0x40000000) +#define ECORE_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { +#ifndef LINUX_REMOVE + ECORE_MSG_DRV = 0x0001, + ECORE_MSG_PROBE = 0x0002, + ECORE_MSG_LINK = 0x0004, + ECORE_MSG_TIMER = 0x0008, + ECORE_MSG_IFDOWN = 0x0010, + ECORE_MSG_IFUP = 0x0020, + ECORE_MSG_RX_ERR = 0x0040, + ECORE_MSG_TX_ERR = 0x0080, + ECORE_MSG_TX_QUEUED = 0x0100, + ECORE_MSG_INTR = 0x0200, + ECORE_MSG_TX_DONE = 0x0400, + ECORE_MSG_RX_STATUS = 0x0800, + ECORE_MSG_PKTDATA = 0x1000, + ECORE_MSG_HW = 0x2000, + ECORE_MSG_WOL = 0x4000, +#endif + ECORE_MSG_SPQ = 0x10000, + ECORE_MSG_STATS = 0x20000, + ECORE_MSG_DCB = 0x40000, + ECORE_MSG_IOV = 0x80000, + ECORE_MSG_SP = 0x100000, + ECORE_MSG_STORAGE = 0x200000, + ECORE_MSG_OOO = 0x200000, + ECORE_MSG_CXT = 0x800000, + ECORE_MSG_LL2 = 0x1000000, + ECORE_MSG_ILT = 0x2000000, + ECORE_MSG_RDMA = 0x4000000, + ECORE_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; +#endif + +#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++) + +#define D_TRINE(val, cond1, cond2, true1, true2, def) \ + (val == (cond1) ? true1 : \ + (val == (cond2) ? true2 : def)) + +/* forward */ +struct ecore_ptt_pool; +struct ecore_spq; +struct ecore_sb_info; +struct ecore_sb_attn_info; +struct ecore_cxt_mngr; +struct ecore_dma_mem; +struct ecore_sb_sp_info; +struct ecore_ll2_info; +struct ecore_l2_info; +struct ecore_igu_info; +struct ecore_mcp_info; +struct ecore_dcbx_info; +struct ecore_llh_info; + +struct ecore_rt_data { + u32 *init_val; + bool *b_valid; +}; + +enum ecore_tunn_mode { + ECORE_MODE_L2GENEVE_TUNN, + ECORE_MODE_IPGENEVE_TUNN, + ECORE_MODE_L2GRE_TUNN, + ECORE_MODE_IPGRE_TUNN, + ECORE_MODE_VXLAN_TUNN, +}; + +enum ecore_tunn_clss { + ECORE_TUNN_CLSS_MAC_VLAN, + ECORE_TUNN_CLSS_MAC_VNI, + ECORE_TUNN_CLSS_INNER_MAC_VLAN, + ECORE_TUNN_CLSS_INNER_MAC_VNI, + ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_ECORE_TUNN_CLSS, +}; + +struct ecore_tunn_update_type { + bool b_update_mode; + bool b_mode_enabled; + enum ecore_tunn_clss tun_cls; +}; + +struct ecore_tunn_update_udp_port { + bool b_update_port; + u16 port; +}; + +struct ecore_tunnel_info { + struct ecore_tunn_update_type vxlan; + struct ecore_tunn_update_type l2_geneve; + struct ecore_tunn_update_type ip_geneve; + struct ecore_tunn_update_type l2_gre; + struct ecore_tunn_update_type ip_gre; + + struct ecore_tunn_update_udp_port vxlan_port; + struct ecore_tunn_update_udp_port geneve_port; + + bool b_update_rx_cls; + bool b_update_tx_cls; +}; + +/* The PCI personality is not quite synonymous to protocol ID: + * 1. All personalities need CORE connections + * 2. The Ethernet personality may support also the RoCE/iWARP protocol + */ +enum ecore_pci_personality { + ECORE_PCI_ETH, + ECORE_PCI_FCOE, + ECORE_PCI_ISCSI, + ECORE_PCI_ETH_ROCE, + ECORE_PCI_ETH_IWARP, + ECORE_PCI_ETH_RDMA, + ECORE_PCI_DEFAULT /* default in shmem */ +}; + +/* All VFs are symmetric, all counters are PF + all VFs */ +struct ecore_qm_iids { + u32 cids; + u32 vf_cids; + u32 tids; +}; + +#define MAX_PF_PER_PORT 8 + +/* HW / FW resources, output of features supported below, most information + * is received from MFW. + */ +enum ecore_resources { + ECORE_L2_QUEUE, + ECORE_VPORT, + ECORE_RSS_ENG, + ECORE_PQ, + ECORE_RL, + ECORE_MAC, + ECORE_VLAN, + ECORE_RDMA_CNQ_RAM, + ECORE_ILT, + ECORE_LL2_QUEUE, + ECORE_CMDQS_CQS, + ECORE_RDMA_STATS_QUEUE, + ECORE_BDQ, + + /* This is needed only internally for matching against the IGU. + * In case of legacy MFW, would be set to `0'. + */ + ECORE_SB, + + ECORE_MAX_RESC, +}; + +/* Features that require resources, given as input to the resource management + * algorithm, the output are the resources above + */ +enum ecore_feature { + ECORE_PF_L2_QUE, + ECORE_PF_TC, + ECORE_VF, + ECORE_EXTRA_VF_QUE, + ECORE_VMQ, + ECORE_RDMA_CNQ, + ECORE_ISCSI_CQ, + ECORE_FCOE_CQ, + ECORE_VF_L2_QUE, + ECORE_MAX_FEATURES, +}; + +enum ecore_port_mode { + ECORE_PORT_MODE_DE_2X40G, + ECORE_PORT_MODE_DE_2X50G, + ECORE_PORT_MODE_DE_1X100G, + ECORE_PORT_MODE_DE_4X10G_F, + ECORE_PORT_MODE_DE_4X10G_E, + ECORE_PORT_MODE_DE_4X20G, + ECORE_PORT_MODE_DE_1X40G, + ECORE_PORT_MODE_DE_2X25G, + ECORE_PORT_MODE_DE_1X25G, + ECORE_PORT_MODE_DE_4X25G, + ECORE_PORT_MODE_DE_2X10G, +}; + +enum ecore_dev_cap { + ECORE_DEV_CAP_ETH, + ECORE_DEV_CAP_FCOE, + ECORE_DEV_CAP_ISCSI, + ECORE_DEV_CAP_ROCE, + ECORE_DEV_CAP_IWARP +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_hw_err_type { + ECORE_HW_ERR_FAN_FAIL, + ECORE_HW_ERR_MFW_RESP_FAIL, + ECORE_HW_ERR_HW_ATTN, + ECORE_HW_ERR_DMAE_FAIL, + ECORE_HW_ERR_RAMROD_FAIL, + ECORE_HW_ERR_FW_ASSERT, +}; +#endif + +enum ecore_db_rec_exec { + DB_REC_DRY_RUN, + DB_REC_REAL_DEAL, + DB_REC_ONCE, +}; + +struct ecore_hw_info { + /* PCI personality */ + enum ecore_pci_personality personality; +#define ECORE_IS_RDMA_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) +#define ECORE_IS_ROCE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) +#define ECORE_IS_IWARP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA) +#define ECORE_IS_L2_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_ETH || \ + ECORE_IS_RDMA_PERSONALITY(dev)) +#define ECORE_IS_FCOE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_FCOE) +#define ECORE_IS_ISCSI_PERSONALITY(dev) \ + ((dev)->hw_info.personality == ECORE_PCI_ISCSI) + + /* Resource Allocation scheme results */ + u32 resc_start[ECORE_MAX_RESC]; + u32 resc_num[ECORE_MAX_RESC]; + u32 feat_num[ECORE_MAX_FEATURES]; + + #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) + #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) + #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) + #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) + + /* Amount of traffic classes HW supports */ + u8 num_hw_tc; + +/* Amount of TCs which should be active according to DCBx or upper layer driver + * configuration + */ + + u8 num_active_tc; + + /* The traffic class used by PF for it's offloaded protocol */ + u8 offload_tc; + + u32 concrete_fid; + u16 opaque_fid; + u16 ovlan; + u32 part_num[4]; + + unsigned char hw_mac_addr[ETH_ALEN]; + u64 node_wwn; /* For FCoE only */ + u64 port_wwn; /* For FCoE only */ + + u16 num_iscsi_conns; + u16 num_fcoe_conns; + + struct ecore_igu_info *p_igu_info; + /* Sriov */ + u8 max_chains_per_vf; + + u32 port_mode; + u32 hw_mode; + unsigned long device_capabilities; + + /* Default DCBX mode */ + u8 dcbx_mode; + + u16 mtu; +}; + +/* maximun size of read/write commands (HW limit) */ +#define DMAE_MAX_RW_SIZE 0x2000 + +struct ecore_dmae_info { + /* Spinlock for synchronizing access to functions */ + osal_spinlock_t lock; + + bool b_mem_ready; + + u8 channel; + + dma_addr_t completion_word_phys_addr; + + /* The memory location where the DMAE writes the completion + * value when an operation is finished on this context. + */ + u32 *p_completion_word; + + dma_addr_t intermediate_buffer_phys_addr; + + /* An intermediate buffer for DMAE operations that use virtual + * addresses - data is DMA'd to/from this buffer and then + * memcpy'd to/from the virtual address + */ + u32 *p_intermediate_buffer; + + dma_addr_t dmae_cmd_phys_addr; + struct dmae_cmd *p_dmae_cmd; +}; + +struct ecore_wfq_data { + u32 default_min_speed; /* When wfq feature is not configured */ + u32 min_speed; /* when feature is configured for any 1 vport */ + bool configured; +}; + +#define OFLD_GRP_SIZE 4 + +struct ecore_qm_info { + struct init_qm_pq_params *qm_pq_params; + struct init_qm_vport_params *qm_vport_params; + struct init_qm_port_params *qm_port_params; + u16 start_pq; + u8 start_vport; + u16 pure_lb_pq; + u16 offload_pq; + u16 pure_ack_pq; + u16 ooo_pq; + u16 first_vf_pq; + u16 first_mcos_pq; + u16 first_rl_pq; + u16 num_pqs; + u16 num_vf_pqs; + u8 num_vports; + u8 max_phys_tcs_per_port; + u8 ooo_tc; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + u8 pf_wfq; + u32 pf_rl; + struct ecore_wfq_data *wfq_data; + u8 num_pf_rls; +}; + +struct ecore_db_recovery_info { + osal_list_t list; + osal_spinlock_t lock; + u32 db_recovery_counter; +}; + +struct storm_stats { + u32 address; + u32 len; +}; + +struct ecore_fw_data { +#ifdef CONFIG_ECORE_BINARY_FW + struct fw_ver_info *fw_ver_info; +#endif + const u8 *modes_tree_buf; + union init_op *init_ops; + const u32 *arr_data; + const u32 *fw_overlays; + u32 fw_overlays_len; + u32 init_ops_size; +}; + +enum ecore_mf_mode_bit { + /* Supports PF-classification based on tag */ + ECORE_MF_OVLAN_CLSS, + + /* Supports PF-classification based on MAC */ + ECORE_MF_LLH_MAC_CLSS, + + /* Supports PF-classification based on protocol type */ + ECORE_MF_LLH_PROTO_CLSS, + + /* Requires a default PF to be set */ + ECORE_MF_NEED_DEF_PF, + + /* Allow LL2 to multicast/broadcast */ + ECORE_MF_LL2_NON_UNICAST, + + /* Allow Cross-PF [& child VFs] Tx-switching */ + ECORE_MF_INTER_PF_SWITCH, + + /* TODO - if we ever re-utilize any of this logic, we can rename */ + ECORE_MF_UFP_SPECIFIC, + + ECORE_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + ECORE_MF_8021Q_TAGGING, + + /* Use stag for steering */ + ECORE_MF_8021AD_TAGGING, + + /* Allow FIP discovery fallback */ + ECORE_MF_FIP_SPECIAL, +}; + +enum ecore_ufp_mode { + ECORE_UFP_MODE_ETS, + ECORE_UFP_MODE_VNIC_BW, +}; + +enum ecore_ufp_pri_type { + ECORE_UFP_PRI_OS, + ECORE_UFP_PRI_VNIC +}; + +struct ecore_ufp_info { + enum ecore_ufp_pri_type pri_type; + enum ecore_ufp_mode mode; + u8 tc; +}; + +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + +struct ecore_hwfn { + struct ecore_dev *p_dev; + u8 my_id; /* ID inside the PF */ +#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) + u8 rel_pf_id; /* Relative to engine*/ + u8 abs_pf_id; +#define ECORE_PATH_ID(_p_hwfn) \ + (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0) + u8 port_id; + bool b_active; + + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + void *dp_ctx; + + bool first_on_engine; + bool hw_init_done; + + u8 num_funcs_on_engine; + u8 enabled_func_idx; + u8 num_funcs_on_port; + + /* BAR access */ + void OSAL_IOMEM *regview; + void OSAL_IOMEM *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PTT pool */ + struct ecore_ptt_pool *p_ptt_pool; + + /* HW info */ + struct ecore_hw_info hw_info; + + /* rt_array (for init-tool) */ + struct ecore_rt_data rt_data; + + /* SPQ */ + struct ecore_spq *p_spq; + + /* EQ */ + struct ecore_eq *p_eq; + + /* Consolidate Q*/ + struct ecore_consq *p_consq; + + /* Slow-Path definitions */ + osal_dpc_t sp_dpc; + bool b_sp_dpc_enabled; + + struct ecore_ptt *p_main_ptt; + struct ecore_ptt *p_dpc_ptt; + + struct ecore_sb_sp_info *p_sp_sb; + struct ecore_sb_attn_info *p_sb_attn; + + /* Protocol related */ + bool using_ll2; + struct ecore_ll2_info *p_ll2_info; + struct ecore_ooo_info *p_ooo_info; + struct ecore_iscsi_info *p_iscsi_info; + struct ecore_fcoe_info *p_fcoe_info; + struct ecore_rdma_info *p_rdma_info; + struct ecore_pf_params pf_params; + + bool b_rdma_enabled_in_prs; + u32 rdma_prs_search_reg; + + struct ecore_cxt_mngr *p_cxt_mngr; + + /* Flag indicating whether interrupts are enabled or not*/ + bool b_int_enabled; + bool b_int_requested; + + /* True if the driver requests for the link */ + bool b_drv_link_init; + + struct ecore_vf_iov *vf_iov_info; + struct ecore_pf_iov *pf_iov_info; + struct ecore_mcp_info *mcp_info; + struct ecore_dcbx_info *p_dcbx_info; + struct ecore_ufp_info ufp_info; + + struct ecore_dmae_info dmae_info; + + /* QM init */ + struct ecore_qm_info qm_info; + +#ifdef CONFIG_ECORE_ZIPPED_FW + /* Buffer for unzipping firmware data */ + void *unzip_buf; +#endif + + struct dbg_tools_data dbg_info; + void *dbg_user_info; + + struct z_stream_s *stream; + + /* PWM region specific data */ + u32 dpi_size; + u32 dpi_count; + u32 dpi_start_offset; /* this is used to + * calculate th + * doorbell address + */ + + /* If one of the following is set then EDPM shouldn't be used */ + u8 dcbx_no_edpm; + u8 db_bar_no_edpm; + + /* L2-related */ + struct ecore_l2_info *p_l2_info; + + /* Mechanism for recovering from doorbell drop */ + struct ecore_db_recovery_info db_recovery_info; + + /* Enable/disable pacing, if request to enable then + * IOV and mcos configuration will be skipped. + * this actually reflects the value requested in + * struct ecore_hw_prepare_params by ecore client. + */ + bool b_en_pacing; + + struct phys_mem_desc *fw_overlay_mem; + + /* @DPDK */ + struct ecore_ptt *p_arfs_ptt; +}; + +enum ecore_mf_mode { + ECORE_MF_DEFAULT, + ECORE_MF_OVLAN, + ECORE_MF_NPAR, + ECORE_MF_UFP, +}; + +/* @DPDK */ +struct ecore_dbg_feature { + u8 *dump_buf; + u32 buf_size; + u32 dumped_dwords; +}; + +enum qed_dbg_features { + DBG_FEATURE_BUS, + DBG_FEATURE_GRC, + DBG_FEATURE_IDLE_CHK, + DBG_FEATURE_MCP_TRACE, + DBG_FEATURE_REG_FIFO, + DBG_FEATURE_PROTECTION_OVERRIDE, + DBG_FEATURE_NUM +}; + +enum ecore_dev_type { + ECORE_DEV_TYPE_BB, + ECORE_DEV_TYPE_AH, +}; + +struct ecore_dev { + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + void *dp_ctx; + + enum ecore_dev_type type; +/* Translate type/revision combo into the proper conditions */ +#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB) +#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev)) +#ifndef ASIC_ONLY +#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \ + (CHIP_REV_IS_TEDIBEAR(dev))) +#else +#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) +#endif +#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH) +#define ECORE_IS_K2(dev) ECORE_IS_AH(dev) + + u16 vendor_id; + u16 device_id; +#define ECORE_DEV_ID_MASK 0xff00 +#define ECORE_DEV_ID_MASK_BB 0x1600 +#define ECORE_DEV_ID_MASK_AH 0x8000 + + u16 chip_num; +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 0 + + u8 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 0 +#ifndef ASIC_ONLY +#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5) +#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe) +#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc) +#define CHIP_REV_IS_EMUL(_p_dev) \ + (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev)) +#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf) +#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd) +#define CHIP_REV_IS_FPGA(_p_dev) \ + (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev)) +#define CHIP_REV_IS_SLOW(_p_dev) \ + (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev)) +#define CHIP_REV_IS_A0(_p_dev) \ + (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \ + (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)) +#define CHIP_REV_IS_B0(_p_dev) \ + (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \ + ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)) +#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev) +#else +#define CHIP_REV_IS_A0(_p_dev) \ + (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal) +#define CHIP_REV_IS_B0(_p_dev) \ + ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal) +#endif + + u8 chip_metal; +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 0 + + u8 chip_bond_id; +#define CHIP_BOND_ID_MASK 0xff +#define CHIP_BOND_ID_SHIFT 0 + + u8 num_engines; + u8 num_ports; + u8 num_ports_in_engine; + u8 num_funcs_in_port; + + u8 path_id; + + unsigned long mf_bits; + enum ecore_mf_mode mf_mode; +#define IS_MF_DEFAULT(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR) +#define IS_MF_SD(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN) + + int pcie_width; + int pcie_speed; + + /* Add MF related configuration */ + u8 mcp_rev; + u8 boot_mode; + + u8 wol; + + u32 int_mode; + enum ecore_coalescing_mode int_coalescing_mode; + u16 rx_coalesce_usecs; + u16 tx_coalesce_usecs; + + /* Start Bar offset of first hwfn */ + void OSAL_IOMEM *regview; + void OSAL_IOMEM *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PCI */ + u8 cache_shift; + + /* Init */ + const u32 *iro_arr; +#define IRO ((const struct iro *)p_hwfn->p_dev->iro_arr) + + /* HW functions */ + u8 num_hwfns; + struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; +#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0]) +#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1) + + /* Engine affinity */ + u8 l2_affin_hint; + u8 fir_affin; + u8 iwarp_affin; + /* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */ +#define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin]) + /* Macro for getting the engine-affinitized hwfn for iWARP */ +#define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin]) + /* Generic macro for getting the engine-affinitized hwfn */ +#define ECORE_AFFIN_HWFN(dev) \ + (ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \ + ECORE_IWARP_AFFIN_HWFN(dev) : \ + ECORE_FIR_AFFIN_HWFN(dev)) + /* Macro for getting the index (0/1) of the engine-affinitized hwfn */ +#define ECORE_AFFIN_HWFN_IDX(dev) \ + (IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1) + + /* SRIOV */ + struct ecore_hw_sriov_info *p_iov_info; +#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info) + struct ecore_tunnel_info tunnel; + bool b_is_vf; + bool b_dont_override_vf_msix; + + u32 drv_type; + + u32 rdma_max_sge; + u32 rdma_max_inline; + u32 rdma_max_srq_sge; + + struct ecore_eth_stats *reset_stats; + struct ecore_fw_data *fw_data; + + u32 mcp_nvm_resp; + + /* Recovery */ + bool recov_in_prog; + +/* Indicates whether should prevent attentions from being reasserted */ + + bool attn_clr_en; + + /* Indicates whether allowing the MFW to collect a crash dump */ + bool allow_mdump; + + /* Indicates if the reg_fifo is checked after any register access */ + bool chk_reg_fifo; + +#ifndef ASIC_ONLY + bool b_is_emul_full; + bool b_is_emul_mac; +#endif + /* LLH info */ + u8 ppfid_bitmap; + struct ecore_llh_info *p_llh_info; + + /* Indicates whether this PF serves a storage target */ + bool b_is_target; + +#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */ + void *firmware; + u64 fw_len; +#endif + + /* @DPDK */ + struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM]; + u8 engine_for_debug; +}; + +enum ecore_hsi_def_type { + ECORE_HSI_DEF_MAX_NUM_VFS, + ECORE_HSI_DEF_MAX_NUM_L2_QUEUES, + ECORE_HSI_DEF_MAX_NUM_PORTS, + ECORE_HSI_DEF_MAX_SB_PER_PATH, + ECORE_HSI_DEF_MAX_NUM_PFS, + ECORE_HSI_DEF_MAX_NUM_VPORTS, + ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE, + ECORE_HSI_DEF_MAX_QM_TX_QUEUES, + ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS, + ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS, + ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS, + ECORE_HSI_DEF_MAX_PBF_CMD_LINES, + ECORE_HSI_DEF_MAX_BTB_BLOCKS, + ECORE_NUM_HSI_DEFS +}; + +u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, + enum ecore_hsi_def_type type); + +#define NUM_OF_VFS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VFS) +#define NUM_OF_L2_QUEUES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_L2_QUEUES) +#define NUM_OF_PORTS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PORTS) +#define NUM_OF_SBS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_SB_PER_PATH) +#define NUM_OF_ENG_PFS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PFS) +#define NUM_OF_VPORTS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VPORTS) +#define NUM_OF_RSS_ENGINES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE) +#define NUM_OF_QM_TX_QUEUES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_TX_QUEUES) +#define NUM_OF_PXP_ILT_RECORDS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS) +#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS) +#define NUM_OF_QM_GLOBAL_RLS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS) +#define NUM_OF_PBF_CMD_LINES(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_PBF_CMD_LINES) +#define NUM_OF_BTB_BLOCKS(dev) \ + ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_BTB_BLOCKS) + +#define CRC8_TABLE_SIZE 256 + +/** + * @brief ecore_concrete_to_sw_fid - get the sw function id from + * the concrete value. + * + * @param concrete_fid + * + * @return OSAL_INLINE u8 + */ +static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid) +{ + u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); + u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID); + u8 sw_fid; + + if (vf_valid) + sw_fid = vfid + MAX_NUM_PFS; + else + sw_fid = pfid; + + return sw_fid; +} + +#define PKT_LB_TC 9 + +int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate); +void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, + u32 min_pf_rate); + +int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw); +int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw); +void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +int ecore_device_num_engines(struct ecore_dev *p_dev); +int ecore_device_num_ports(struct ecore_dev *p_dev); +void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, + u8 *mac); + +/* Flags for indication of required queues */ +#define PQ_FLAGS_RLS (1 << 0) +#define PQ_FLAGS_MCOS (1 << 1) +#define PQ_FLAGS_LB (1 << 2) +#define PQ_FLAGS_OOO (1 << 3) +#define PQ_FLAGS_ACK (1 << 4) +#define PQ_FLAGS_OFLD (1 << 5) +#define PQ_FLAGS_VFS (1 << 6) +#define PQ_FLAGS_LLT (1 << 7) + +/* physical queue index for cm context intialization */ +u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags); +u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc); +u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf); +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); + +/* qm vport for rate limit configuration */ +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); + +const char *ecore_hw_get_resc_name(enum ecore_resources res_id); + +/* doorbell recovery mechanism */ +void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn); +void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, + enum ecore_db_rec_exec); + +bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn); + +/* amount of resources used in qm init */ +u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn); +u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn); +u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn); +u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn); +u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn); + +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + ecore_device_num_ports((_p_hwfn)->p_dev)) + +/* The PFID<->PPFID calculation is based on the relative index of a PF on its + * port. In BB there is a bug in the LLH in which the PPFID is actually engine + * based, and thus it equals the PFID. + */ +#define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \ + (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ + (abs_ppfid) : \ + (abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \ + MFW_PORT(_p_hwfn)) +#define ECORE_PPFID_BY_PFID(_p_hwfn) \ + (ECORE_IS_BB((_p_hwfn)->p_dev) ? \ + (_p_hwfn)->rel_pf_id : \ + (_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine) + +enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 addr, + u32 val); + +/* Utility functions for dumping the content of the NIG LLH filters */ +enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid); +enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev); + +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A + +#define MSTORM_QZONE_START(dev) \ + (TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) + +#endif /* __ECORE_H */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h new file mode 100644 index 000000000..ec773fbdd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_attn_values.h @@ -0,0 +1,13285 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ATTN_VALUES_H__ +#define __ATTN_VALUES_H__ + +#ifndef __PREVENT_INT_ATTN__ + +/* HW Attention register */ +struct attn_hw_reg { + u16 reg_idx; /* Index of this register in its block */ + u16 num_of_bits; /* number of valid attention bits */ + const u16 *bit_attn_idx; /* attention index per valid bit */ + u32 sts_addr; /* Address of the STS register */ + u32 sts_clr_addr; /* Address of the STS_CLR register */ + u32 sts_wr_addr; /* Address of the STS_WR register */ + u32 mask_addr; /* Address of the MASK register */ +}; + +/* HW block attention registers */ +struct attn_hw_regs { + u16 num_of_int_regs; /* Number of interrupt regs */ + u16 num_of_prty_regs; /* Number of parity regs */ + struct attn_hw_reg **int_regs; /* interrupt regs */ + struct attn_hw_reg **prty_regs; /* parity regs */ +}; + +/* HW block attention registers */ +struct attn_hw_block { + const char *name; /* Block name */ + const char **int_desc; /* Array of interrupt attention descriptions */ + const char **prty_desc; /* Array of parity attention descriptions */ + struct attn_hw_regs chip_regs[3]; /* attention regs per chip.*/ +}; + +#ifdef ATTN_DESC +static const char *grc_int_attn_desc[5] = { + "grc_address_error", + "grc_timeout_event", + "grc_global_reserved_address", + "grc_path_isolation_error", + "grc_trace_fifo_valid_data", +}; +#else +#define grc_int_attn_desc OSAL_NULL +#endif + +static const u16 grc_int0_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg grc_int0_bb_a0 = { + 0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_bb_a0_regs[1] = { + &grc_int0_bb_a0, +}; + +static const u16 grc_int0_bb_b0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg grc_int0_bb_b0 = { + 0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { + &grc_int0_bb_b0, +}; + +static const u16 grc_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg grc_int0_k2 = { + 0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_k2_regs[1] = { + &grc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *grc_prty_attn_desc[3] = { + "grc_mem003_i_mem_prty", + "grc_mem002_i_mem_prty", + "grc_mem001_i_mem_prty", +}; +#else +#define grc_prty_attn_desc OSAL_NULL +#endif + +static const u16 grc_prty1_bb_a0_attn_idx[2] = { + 1, 2, +}; + +static struct attn_hw_reg grc_prty1_bb_a0 = { + 0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = { + &grc_prty1_bb_a0, +}; + +static const u16 grc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg grc_prty1_bb_b0 = { + 0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { + &grc_prty1_bb_b0, +}; + +static const u16 grc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg grc_prty1_k2 = { + 0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_k2_regs[1] = { + &grc_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *miscs_int_attn_desc[14] = { + "miscs_address_error", + "miscs_generic_sw", + "miscs_cnig_interrupt", + "miscs_opte_dorq_fifo_err_eng1", + "miscs_opte_dorq_fifo_err_eng0", + "miscs_opte_dbg_fifo_err_eng1", + "miscs_opte_dbg_fifo_err_eng0", + "miscs_opte_btb_if1_fifo_err_eng1", + "miscs_opte_btb_if1_fifo_err_eng0", + "miscs_opte_btb_if0_fifo_err_eng1", + "miscs_opte_btb_if0_fifo_err_eng0", + "miscs_opte_btb_sop_fifo_err_eng1", + "miscs_opte_btb_sop_fifo_err_eng0", + "miscs_opte_storm_fifo_err_eng0", +}; +#else +#define miscs_int_attn_desc OSAL_NULL +#endif + +static const u16 miscs_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg miscs_int0_bb_a0 = { + 0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static const u16 miscs_int1_bb_a0_attn_idx[11] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg miscs_int1_bb_a0 = { + 1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194 +}; + +static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = { + &miscs_int0_bb_a0, &miscs_int1_bb_a0, +}; + +static const u16 miscs_int0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg miscs_int0_bb_b0 = { + 0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static const u16 miscs_int1_bb_b0_attn_idx[11] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg miscs_int1_bb_b0 = { + 1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194 +}; + +static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { + &miscs_int0_bb_b0, &miscs_int1_bb_b0, +}; + +static const u16 miscs_int0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg miscs_int0_k2 = { + 0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static struct attn_hw_reg *miscs_int_k2_regs[1] = { + &miscs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *miscs_prty_attn_desc[1] = { + "miscs_cnig_parity", +}; +#else +#define miscs_prty_attn_desc OSAL_NULL +#endif + +static const u16 miscs_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg miscs_prty0_bb_b0 = { + 0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4 +}; + +static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { + &miscs_prty0_bb_b0, +}; + +static const u16 miscs_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg miscs_prty0_k2 = { + 0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4 +}; + +static struct attn_hw_reg *miscs_prty_k2_regs[1] = { + &miscs_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *misc_int_attn_desc[1] = { + "misc_address_error", +}; +#else +#define misc_int_attn_desc OSAL_NULL +#endif + +static const u16 misc_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_bb_a0 = { + 0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_bb_a0_regs[1] = { + &misc_int0_bb_a0, +}; + +static const u16 misc_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_bb_b0 = { + 0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { + &misc_int0_bb_b0, +}; + +static const u16 misc_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_k2 = { + 0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_k2_regs[1] = { + &misc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pglue_b_int_attn_desc[24] = { + "pglue_b_address_error", + "pglue_b_incorrect_rcv_behavior", + "pglue_b_was_error_attn", + "pglue_b_vf_length_violation_attn", + "pglue_b_vf_grc_space_violation_attn", + "pglue_b_tcpl_error_attn", + "pglue_b_tcpl_in_two_rcbs_attn", + "pglue_b_cssnoop_fifo_overflow", + "pglue_b_tcpl_translation_size_different", + "pglue_b_pcie_rx_l0s_timeout", + "pglue_b_master_zlr_attn", + "pglue_b_admin_window_violation_attn", + "pglue_b_out_of_range_function_in_pretend", + "pglue_b_illegal_address", + "pglue_b_pgl_cpl_err", + "pglue_b_pgl_txw_of", + "pglue_b_pgl_cpl_aft", + "pglue_b_pgl_cpl_of", + "pglue_b_pgl_cpl_ecrc", + "pglue_b_pgl_pcie_attn", + "pglue_b_pgl_read_blocked", + "pglue_b_pgl_write_blocked", + "pglue_b_vf_ilt_err", + "pglue_b_rxobffexception_attn", +}; +#else +#define pglue_b_int_attn_desc OSAL_NULL +#endif + +static const u16 pglue_b_int0_bb_a0_attn_idx[23] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, +}; + +static struct attn_hw_reg pglue_b_int0_bb_a0 = { + 0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, + 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = { + &pglue_b_int0_bb_a0, +}; + +static const u16 pglue_b_int0_bb_b0_attn_idx[23] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, +}; + +static struct attn_hw_reg pglue_b_int0_bb_b0 = { + 0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, + 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { + &pglue_b_int0_bb_b0, +}; + +static const u16 pglue_b_int0_k2_attn_idx[24] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, +}; + +static struct attn_hw_reg pglue_b_int0_k2 = { + 0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_k2_regs[1] = { + &pglue_b_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pglue_b_prty_attn_desc[35] = { + "pglue_b_datapath_registers", + "pglue_b_mem027_i_mem_prty", + "pglue_b_mem007_i_mem_prty", + "pglue_b_mem009_i_mem_prty", + "pglue_b_mem010_i_mem_prty", + "pglue_b_mem008_i_mem_prty", + "pglue_b_mem022_i_mem_prty", + "pglue_b_mem023_i_mem_prty", + "pglue_b_mem024_i_mem_prty", + "pglue_b_mem025_i_mem_prty", + "pglue_b_mem004_i_mem_prty", + "pglue_b_mem005_i_mem_prty", + "pglue_b_mem011_i_mem_prty", + "pglue_b_mem016_i_mem_prty", + "pglue_b_mem017_i_mem_prty", + "pglue_b_mem012_i_mem_prty", + "pglue_b_mem013_i_mem_prty", + "pglue_b_mem014_i_mem_prty", + "pglue_b_mem015_i_mem_prty", + "pglue_b_mem018_i_mem_prty", + "pglue_b_mem020_i_mem_prty", + "pglue_b_mem021_i_mem_prty", + "pglue_b_mem019_i_mem_prty", + "pglue_b_mem026_i_mem_prty", + "pglue_b_mem006_i_mem_prty", + "pglue_b_mem003_i_mem_prty", + "pglue_b_mem002_i_mem_prty_0", + "pglue_b_mem002_i_mem_prty_1", + "pglue_b_mem002_i_mem_prty_2", + "pglue_b_mem002_i_mem_prty_3", + "pglue_b_mem002_i_mem_prty_4", + "pglue_b_mem002_i_mem_prty_5", + "pglue_b_mem002_i_mem_prty_6", + "pglue_b_mem002_i_mem_prty_7", + "pglue_b_mem001_i_mem_prty", +}; +#else +#define pglue_b_prty_attn_desc OSAL_NULL +#endif + +static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = { + 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty1_bb_a0 = { + 0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = { + &pglue_b_prty1_bb_a0, +}; + +static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglue_b_prty0_bb_b0 = { + 0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, + 0x2a8194 +}; + +static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = { + 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty1_bb_b0 = { + 1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { + &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0, +}; + +static const u16 pglue_b_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglue_b_prty0_k2 = { + 0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194 +}; + +static const u16 pglue_b_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pglue_b_prty1_k2 = { + 1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static const u16 pglue_b_prty2_k2_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty2_k2 = { + 2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214 +}; + +static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = { + &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *cnig_int_attn_desc[10] = { + "cnig_address_error", + "cnig_tx_illegal_sop_port0", + "cnig_tx_illegal_sop_port1", + "cnig_tx_illegal_sop_port2", + "cnig_tx_illegal_sop_port3", + "cnig_tdm_lane_0_bandwidth_exceed", + "cnig_tdm_lane_1_bandwidth_exceed", + "cnig_pmeg_intr", + "cnig_pmfc_intr", + "cnig_fifo_error", +}; +#else +#define cnig_int_attn_desc OSAL_NULL +#endif + +static const u16 cnig_int0_bb_a0_attn_idx[4] = { + 0, 7, 8, 9, +}; + +static struct attn_hw_reg cnig_int0_bb_a0 = { + 0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec +}; + +static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = { + &cnig_int0_bb_a0, +}; + +static const u16 cnig_int0_bb_b0_attn_idx[6] = { + 0, 1, 3, 7, 8, 9, +}; + +static struct attn_hw_reg cnig_int0_bb_b0 = { + 0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec +}; + +static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { + &cnig_int0_bb_b0, +}; + +static const u16 cnig_int0_k2_attn_idx[7] = { + 0, 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg cnig_int0_k2 = { + 0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c +}; + +static struct attn_hw_reg *cnig_int_k2_regs[1] = { + &cnig_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cnig_prty_attn_desc[3] = { + "cnig_unused_0", + "cnig_datapath_tx", + "cnig_datapath_rx", +}; +#else +#define cnig_prty_attn_desc OSAL_NULL +#endif + +static const u16 cnig_prty0_bb_b0_attn_idx[2] = { + 1, 2, +}; + +static struct attn_hw_reg cnig_prty0_bb_b0 = { + 0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c +}; + +static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { + &cnig_prty0_bb_b0, +}; + +static const u16 cnig_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg cnig_prty0_k2 = { + 0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230 +}; + +static struct attn_hw_reg *cnig_prty_k2_regs[1] = { + &cnig_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *cpmu_int_attn_desc[1] = { + "cpmu_address_error", +}; +#else +#define cpmu_int_attn_desc OSAL_NULL +#endif + +static const u16 cpmu_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_bb_a0 = { + 0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = { + &cpmu_int0_bb_a0, +}; + +static const u16 cpmu_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_bb_b0 = { + 0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { + &cpmu_int0_bb_b0, +}; + +static const u16 cpmu_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_k2 = { + 0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_k2_regs[1] = { + &cpmu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ncsi_int_attn_desc[1] = { + "ncsi_address_error", +}; +#else +#define ncsi_int_attn_desc OSAL_NULL +#endif + +static const u16 ncsi_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_bb_a0 = { + 0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = { + &ncsi_int0_bb_a0, +}; + +static const u16 ncsi_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_bb_b0 = { + 0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { + &ncsi_int0_bb_b0, +}; + +static const u16 ncsi_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_k2 = { + 0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_k2_regs[1] = { + &ncsi_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ncsi_prty_attn_desc[1] = { + "ncsi_mem002_i_mem_prty", +}; +#else +#define ncsi_prty_attn_desc OSAL_NULL +#endif + +static const u16 ncsi_prty1_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_bb_a0 = { + 0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = { + &ncsi_prty1_bb_a0, +}; + +static const u16 ncsi_prty1_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_bb_b0 = { + 0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { + &ncsi_prty1_bb_b0, +}; + +static const u16 ncsi_prty1_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_k2 = { + 0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_k2_regs[1] = { + &ncsi_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *opte_prty_attn_desc[12] = { + "opte_mem009_i_mem_prty", + "opte_mem010_i_mem_prty", + "opte_mem005_i_mem_prty", + "opte_mem006_i_mem_prty", + "opte_mem007_i_mem_prty", + "opte_mem008_i_mem_prty", + "opte_mem001_i_mem_prty", + "opte_mem002_i_mem_prty", + "opte_mem003_i_mem_prty", + "opte_mem004_i_mem_prty", + "opte_mem011_i_mem_prty", + "opte_datapath_parity_error", +}; +#else +#define opte_prty_attn_desc OSAL_NULL +#endif + +static const u16 opte_prty1_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_bb_a0 = { + 0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = { + &opte_prty1_bb_a0, +}; + +static const u16 opte_prty1_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_bb_b0 = { + 0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static const u16 opte_prty0_bb_b0_attn_idx[1] = { + 11, +}; + +static struct attn_hw_reg opte_prty0_bb_b0 = { + 1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c +}; + +static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { + &opte_prty1_bb_b0, &opte_prty0_bb_b0, +}; + +static const u16 opte_prty1_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_k2 = { + 0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static const u16 opte_prty0_k2_attn_idx[1] = { + 11, +}; + +static struct attn_hw_reg opte_prty0_k2 = { + 1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c +}; + +static struct attn_hw_reg *opte_prty_k2_regs[2] = { + &opte_prty1_k2, &opte_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *bmb_int_attn_desc[297] = { + "bmb_address_error", + "bmb_rc_pkt0_rls_error", + "bmb_unused_0", + "bmb_rc_pkt0_protocol_error", + "bmb_rc_pkt1_rls_error", + "bmb_unused_1", + "bmb_rc_pkt1_protocol_error", + "bmb_rc_pkt2_rls_error", + "bmb_unused_2", + "bmb_rc_pkt2_protocol_error", + "bmb_rc_pkt3_rls_error", + "bmb_unused_3", + "bmb_rc_pkt3_protocol_error", + "bmb_rc_sop_req_tc_port_error", + "bmb_unused_4", + "bmb_wc0_protocol_error", + "bmb_wc1_protocol_error", + "bmb_wc2_protocol_error", + "bmb_wc3_protocol_error", + "bmb_unused_5", + "bmb_ll_blk_error", + "bmb_unused_6", + "bmb_mac0_fc_cnt_error", + "bmb_ll_arb_calc_error", + "bmb_wc0_inp_fifo_error", + "bmb_wc0_sop_fifo_error", + "bmb_wc0_len_fifo_error", + "bmb_wc0_queue_fifo_error", + "bmb_wc0_free_point_fifo_error", + "bmb_wc0_next_point_fifo_error", + "bmb_wc0_strt_fifo_error", + "bmb_wc0_second_dscr_fifo_error", + "bmb_wc0_pkt_avail_fifo_error", + "bmb_wc0_cos_cnt_fifo_error", + "bmb_wc0_notify_fifo_error", + "bmb_wc0_ll_req_fifo_error", + "bmb_wc0_ll_pa_cnt_error", + "bmb_wc0_bb_pa_cnt_error", + "bmb_wc1_inp_fifo_error", + "bmb_wc1_sop_fifo_error", + "bmb_wc1_queue_fifo_error", + "bmb_wc1_free_point_fifo_error", + "bmb_wc1_next_point_fifo_error", + "bmb_wc1_strt_fifo_error", + "bmb_wc1_second_dscr_fifo_error", + "bmb_wc1_pkt_avail_fifo_error", + "bmb_wc1_cos_cnt_fifo_error", + "bmb_wc1_notify_fifo_error", + "bmb_wc1_ll_req_fifo_error", + "bmb_wc1_ll_pa_cnt_error", + "bmb_wc1_bb_pa_cnt_error", + "bmb_wc2_inp_fifo_error", + "bmb_wc2_sop_fifo_error", + "bmb_wc2_queue_fifo_error", + "bmb_wc2_free_point_fifo_error", + "bmb_wc2_next_point_fifo_error", + "bmb_wc2_strt_fifo_error", + "bmb_wc2_second_dscr_fifo_error", + "bmb_wc2_pkt_avail_fifo_error", + "bmb_wc2_cos_cnt_fifo_error", + "bmb_wc2_notify_fifo_error", + "bmb_wc2_ll_req_fifo_error", + "bmb_wc2_ll_pa_cnt_error", + "bmb_wc2_bb_pa_cnt_error", + "bmb_wc3_inp_fifo_error", + "bmb_wc3_sop_fifo_error", + "bmb_wc3_queue_fifo_error", + "bmb_wc3_free_point_fifo_error", + "bmb_wc3_next_point_fifo_error", + "bmb_wc3_strt_fifo_error", + "bmb_wc3_second_dscr_fifo_error", + "bmb_wc3_pkt_avail_fifo_error", + "bmb_wc3_cos_cnt_fifo_error", + "bmb_wc3_notify_fifo_error", + "bmb_wc3_ll_req_fifo_error", + "bmb_wc3_ll_pa_cnt_error", + "bmb_wc3_bb_pa_cnt_error", + "bmb_rc_pkt0_side_fifo_error", + "bmb_rc_pkt0_req_fifo_error", + "bmb_rc_pkt0_blk_fifo_error", + "bmb_rc_pkt0_rls_left_fifo_error", + "bmb_rc_pkt0_strt_ptr_fifo_error", + "bmb_rc_pkt0_second_ptr_fifo_error", + "bmb_rc_pkt0_rsp_fifo_error", + "bmb_rc_pkt0_dscr_fifo_error", + "bmb_rc_pkt1_side_fifo_error", + "bmb_rc_pkt1_req_fifo_error", + "bmb_rc_pkt1_blk_fifo_error", + "bmb_rc_pkt1_rls_left_fifo_error", + "bmb_rc_pkt1_strt_ptr_fifo_error", + "bmb_rc_pkt1_second_ptr_fifo_error", + "bmb_rc_pkt1_rsp_fifo_error", + "bmb_rc_pkt1_dscr_fifo_error", + "bmb_rc_pkt2_side_fifo_error", + "bmb_rc_pkt2_req_fifo_error", + "bmb_rc_pkt2_blk_fifo_error", + "bmb_rc_pkt2_rls_left_fifo_error", + "bmb_rc_pkt2_strt_ptr_fifo_error", + "bmb_rc_pkt2_second_ptr_fifo_error", + "bmb_rc_pkt2_rsp_fifo_error", + "bmb_rc_pkt2_dscr_fifo_error", + "bmb_rc_pkt3_side_fifo_error", + "bmb_rc_pkt3_req_fifo_error", + "bmb_rc_pkt3_blk_fifo_error", + "bmb_rc_pkt3_rls_left_fifo_error", + "bmb_rc_pkt3_strt_ptr_fifo_error", + "bmb_rc_pkt3_second_ptr_fifo_error", + "bmb_rc_pkt3_rsp_fifo_error", + "bmb_rc_pkt3_dscr_fifo_error", + "bmb_rc_sop_strt_fifo_error", + "bmb_rc_sop_req_fifo_error", + "bmb_rc_sop_dscr_fifo_error", + "bmb_rc_sop_queue_fifo_error", + "bmb_ll_arb_rls_fifo_error", + "bmb_ll_arb_prefetch_fifo_error", + "bmb_rc_pkt0_rls_fifo_error", + "bmb_rc_pkt1_rls_fifo_error", + "bmb_rc_pkt2_rls_fifo_error", + "bmb_rc_pkt3_rls_fifo_error", + "bmb_rc_pkt4_rls_fifo_error", + "bmb_rc_pkt5_rls_fifo_error", + "bmb_rc_pkt6_rls_fifo_error", + "bmb_rc_pkt7_rls_fifo_error", + "bmb_rc_pkt8_rls_fifo_error", + "bmb_rc_pkt9_rls_fifo_error", + "bmb_rc_pkt4_rls_error", + "bmb_rc_pkt4_protocol_error", + "bmb_rc_pkt4_side_fifo_error", + "bmb_rc_pkt4_req_fifo_error", + "bmb_rc_pkt4_blk_fifo_error", + "bmb_rc_pkt4_rls_left_fifo_error", + "bmb_rc_pkt4_strt_ptr_fifo_error", + "bmb_rc_pkt4_second_ptr_fifo_error", + "bmb_rc_pkt4_rsp_fifo_error", + "bmb_rc_pkt4_dscr_fifo_error", + "bmb_rc_pkt5_rls_error", + "bmb_rc_pkt5_protocol_error", + "bmb_rc_pkt5_side_fifo_error", + "bmb_rc_pkt5_req_fifo_error", + "bmb_rc_pkt5_blk_fifo_error", + "bmb_rc_pkt5_rls_left_fifo_error", + "bmb_rc_pkt5_strt_ptr_fifo_error", + "bmb_rc_pkt5_second_ptr_fifo_error", + "bmb_rc_pkt5_rsp_fifo_error", + "bmb_rc_pkt5_dscr_fifo_error", + "bmb_rc_pkt6_rls_error", + "bmb_rc_pkt6_protocol_error", + "bmb_rc_pkt6_side_fifo_error", + "bmb_rc_pkt6_req_fifo_error", + "bmb_rc_pkt6_blk_fifo_error", + "bmb_rc_pkt6_rls_left_fifo_error", + "bmb_rc_pkt6_strt_ptr_fifo_error", + "bmb_rc_pkt6_second_ptr_fifo_error", + "bmb_rc_pkt6_rsp_fifo_error", + "bmb_rc_pkt6_dscr_fifo_error", + "bmb_rc_pkt7_rls_error", + "bmb_rc_pkt7_protocol_error", + "bmb_rc_pkt7_side_fifo_error", + "bmb_rc_pkt7_req_fifo_error", + "bmb_rc_pkt7_blk_fifo_error", + "bmb_rc_pkt7_rls_left_fifo_error", + "bmb_rc_pkt7_strt_ptr_fifo_error", + "bmb_rc_pkt7_second_ptr_fifo_error", + "bmb_rc_pkt7_rsp_fifo_error", + "bmb_packet_available_sync_fifo_push_error", + "bmb_rc_pkt8_rls_error", + "bmb_rc_pkt8_protocol_error", + "bmb_rc_pkt8_side_fifo_error", + "bmb_rc_pkt8_req_fifo_error", + "bmb_rc_pkt8_blk_fifo_error", + "bmb_rc_pkt8_rls_left_fifo_error", + "bmb_rc_pkt8_strt_ptr_fifo_error", + "bmb_rc_pkt8_second_ptr_fifo_error", + "bmb_rc_pkt8_rsp_fifo_error", + "bmb_rc_pkt8_dscr_fifo_error", + "bmb_rc_pkt9_rls_error", + "bmb_rc_pkt9_protocol_error", + "bmb_rc_pkt9_side_fifo_error", + "bmb_rc_pkt9_req_fifo_error", + "bmb_rc_pkt9_blk_fifo_error", + "bmb_rc_pkt9_rls_left_fifo_error", + "bmb_rc_pkt9_strt_ptr_fifo_error", + "bmb_rc_pkt9_second_ptr_fifo_error", + "bmb_rc_pkt9_rsp_fifo_error", + "bmb_rc_pkt9_dscr_fifo_error", + "bmb_wc4_protocol_error", + "bmb_wc5_protocol_error", + "bmb_wc6_protocol_error", + "bmb_wc7_protocol_error", + "bmb_wc8_protocol_error", + "bmb_wc9_protocol_error", + "bmb_wc4_inp_fifo_error", + "bmb_wc4_sop_fifo_error", + "bmb_wc4_queue_fifo_error", + "bmb_wc4_free_point_fifo_error", + "bmb_wc4_next_point_fifo_error", + "bmb_wc4_strt_fifo_error", + "bmb_wc4_second_dscr_fifo_error", + "bmb_wc4_pkt_avail_fifo_error", + "bmb_wc4_cos_cnt_fifo_error", + "bmb_wc4_notify_fifo_error", + "bmb_wc4_ll_req_fifo_error", + "bmb_wc4_ll_pa_cnt_error", + "bmb_wc4_bb_pa_cnt_error", + "bmb_wc5_inp_fifo_error", + "bmb_wc5_sop_fifo_error", + "bmb_wc5_queue_fifo_error", + "bmb_wc5_free_point_fifo_error", + "bmb_wc5_next_point_fifo_error", + "bmb_wc5_strt_fifo_error", + "bmb_wc5_second_dscr_fifo_error", + "bmb_wc5_pkt_avail_fifo_error", + "bmb_wc5_cos_cnt_fifo_error", + "bmb_wc5_notify_fifo_error", + "bmb_wc5_ll_req_fifo_error", + "bmb_wc5_ll_pa_cnt_error", + "bmb_wc5_bb_pa_cnt_error", + "bmb_wc6_inp_fifo_error", + "bmb_wc6_sop_fifo_error", + "bmb_wc6_queue_fifo_error", + "bmb_wc6_free_point_fifo_error", + "bmb_wc6_next_point_fifo_error", + "bmb_wc6_strt_fifo_error", + "bmb_wc6_second_dscr_fifo_error", + "bmb_wc6_pkt_avail_fifo_error", + "bmb_wc6_cos_cnt_fifo_error", + "bmb_wc6_notify_fifo_error", + "bmb_wc6_ll_req_fifo_error", + "bmb_wc6_ll_pa_cnt_error", + "bmb_wc6_bb_pa_cnt_error", + "bmb_wc7_inp_fifo_error", + "bmb_wc7_sop_fifo_error", + "bmb_wc7_queue_fifo_error", + "bmb_wc7_free_point_fifo_error", + "bmb_wc7_next_point_fifo_error", + "bmb_wc7_strt_fifo_error", + "bmb_wc7_second_dscr_fifo_error", + "bmb_wc7_pkt_avail_fifo_error", + "bmb_wc7_cos_cnt_fifo_error", + "bmb_wc7_notify_fifo_error", + "bmb_wc7_ll_req_fifo_error", + "bmb_wc7_ll_pa_cnt_error", + "bmb_wc7_bb_pa_cnt_error", + "bmb_wc8_inp_fifo_error", + "bmb_wc8_sop_fifo_error", + "bmb_wc8_queue_fifo_error", + "bmb_wc8_free_point_fifo_error", + "bmb_wc8_next_point_fifo_error", + "bmb_wc8_strt_fifo_error", + "bmb_wc8_second_dscr_fifo_error", + "bmb_wc8_pkt_avail_fifo_error", + "bmb_wc8_cos_cnt_fifo_error", + "bmb_wc8_notify_fifo_error", + "bmb_wc8_ll_req_fifo_error", + "bmb_wc8_ll_pa_cnt_error", + "bmb_wc8_bb_pa_cnt_error", + "bmb_wc9_inp_fifo_error", + "bmb_wc9_sop_fifo_error", + "bmb_wc9_queue_fifo_error", + "bmb_wc9_free_point_fifo_error", + "bmb_wc9_next_point_fifo_error", + "bmb_wc9_strt_fifo_error", + "bmb_wc9_second_dscr_fifo_error", + "bmb_wc9_pkt_avail_fifo_error", + "bmb_wc9_cos_cnt_fifo_error", + "bmb_wc9_notify_fifo_error", + "bmb_wc9_ll_req_fifo_error", + "bmb_wc9_ll_pa_cnt_error", + "bmb_wc9_bb_pa_cnt_error", + "bmb_rc9_sop_rc_out_sync_fifo_error", + "bmb_rc9_sop_out_sync_fifo_push_error", + "bmb_rc0_sop_pend_fifo_error", + "bmb_rc1_sop_pend_fifo_error", + "bmb_rc2_sop_pend_fifo_error", + "bmb_rc3_sop_pend_fifo_error", + "bmb_rc4_sop_pend_fifo_error", + "bmb_rc5_sop_pend_fifo_error", + "bmb_rc6_sop_pend_fifo_error", + "bmb_rc7_sop_pend_fifo_error", + "bmb_rc0_dscr_pend_fifo_error", + "bmb_rc1_dscr_pend_fifo_error", + "bmb_rc2_dscr_pend_fifo_error", + "bmb_rc3_dscr_pend_fifo_error", + "bmb_rc4_dscr_pend_fifo_error", + "bmb_rc5_dscr_pend_fifo_error", + "bmb_rc6_dscr_pend_fifo_error", + "bmb_rc7_dscr_pend_fifo_error", + "bmb_rc8_sop_inp_sync_fifo_push_error", + "bmb_rc9_sop_inp_sync_fifo_push_error", + "bmb_rc8_sop_out_sync_fifo_push_error", + "bmb_rc_gnt_pend_fifo_error", + "bmb_rc8_out_sync_fifo_push_error", + "bmb_rc9_out_sync_fifo_push_error", + "bmb_wc8_sync_fifo_push_error", + "bmb_wc9_sync_fifo_push_error", + "bmb_rc8_sop_rc_out_sync_fifo_error", + "bmb_rc_pkt7_dscr_fifo_error", +}; +#else +#define bmb_int_attn_desc OSAL_NULL +#endif + +static const u16 bmb_int0_bb_a0_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_bb_a0 = { + 0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_bb_a0_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_bb_a0 = { + 1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_bb_a0_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_bb_a0 = { + 2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_bb_a0_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_bb_a0 = { + 3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_bb_a0_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_bb_a0 = { + 4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_bb_a0_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_bb_a0 = { + 5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_bb_a0_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_bb_a0 = { + 6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_bb_a0_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_bb_a0 = { + 7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_bb_a0_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_bb_a0 = { + 8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_bb_a0_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_bb_a0 = { + 9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_bb_a0_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_bb_a0 = { + 10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_bb_a0_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_bb_a0 = { + 11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = { + &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0, + &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0, + &bmb_int8_bb_a0, &bmb_int9_bb_a0, + &bmb_int10_bb_a0, &bmb_int11_bb_a0, +}; + +static const u16 bmb_int0_bb_b0_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_bb_b0 = { + 0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_bb_b0_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_bb_b0 = { + 1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_bb_b0_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_bb_b0 = { + 2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_bb_b0_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_bb_b0 = { + 3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_bb_b0_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_bb_b0 = { + 4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_bb_b0_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_bb_b0 = { + 5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_bb_b0_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_bb_b0 = { + 6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_bb_b0_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_bb_b0 = { + 7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_bb_b0_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_bb_b0 = { + 8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_bb_b0_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_bb_b0 = { + 9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_bb_b0_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_bb_b0 = { + 10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_bb_b0_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_bb_b0 = { + 11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { + &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, + &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, + &bmb_int8_bb_b0, &bmb_int9_bb_b0, + &bmb_int10_bb_b0, &bmb_int11_bb_b0, +}; + +static const u16 bmb_int0_k2_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_k2 = { + 0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_k2_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_k2 = { + 1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_k2_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_k2 = { + 2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_k2_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_k2 = { + 3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_k2_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_k2 = { + 4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_k2_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_k2 = { + 5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_k2_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_k2 = { + 6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_k2_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_k2 = { + 7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_k2_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_k2 = { + 8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_k2_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_k2 = { + 9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_k2_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_k2 = { + 10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_k2_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_k2 = { + 11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_k2_regs[12] = { + &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2, + &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2, + &bmb_int10_k2, &bmb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *bmb_prty_attn_desc[61] = { + "bmb_ll_bank0_mem_prty", + "bmb_ll_bank1_mem_prty", + "bmb_ll_bank2_mem_prty", + "bmb_ll_bank3_mem_prty", + "bmb_datapath_registers", + "bmb_mem001_i_ecc_rf_int", + "bmb_mem008_i_ecc_rf_int", + "bmb_mem009_i_ecc_rf_int", + "bmb_mem010_i_ecc_rf_int", + "bmb_mem011_i_ecc_rf_int", + "bmb_mem012_i_ecc_rf_int", + "bmb_mem013_i_ecc_rf_int", + "bmb_mem014_i_ecc_rf_int", + "bmb_mem015_i_ecc_rf_int", + "bmb_mem016_i_ecc_rf_int", + "bmb_mem002_i_ecc_rf_int", + "bmb_mem003_i_ecc_rf_int", + "bmb_mem004_i_ecc_rf_int", + "bmb_mem005_i_ecc_rf_int", + "bmb_mem006_i_ecc_rf_int", + "bmb_mem007_i_ecc_rf_int", + "bmb_mem059_i_mem_prty", + "bmb_mem060_i_mem_prty", + "bmb_mem037_i_mem_prty", + "bmb_mem038_i_mem_prty", + "bmb_mem039_i_mem_prty", + "bmb_mem040_i_mem_prty", + "bmb_mem041_i_mem_prty", + "bmb_mem042_i_mem_prty", + "bmb_mem043_i_mem_prty", + "bmb_mem044_i_mem_prty", + "bmb_mem045_i_mem_prty", + "bmb_mem046_i_mem_prty", + "bmb_mem047_i_mem_prty", + "bmb_mem048_i_mem_prty", + "bmb_mem049_i_mem_prty", + "bmb_mem050_i_mem_prty", + "bmb_mem051_i_mem_prty", + "bmb_mem052_i_mem_prty", + "bmb_mem053_i_mem_prty", + "bmb_mem054_i_mem_prty", + "bmb_mem055_i_mem_prty", + "bmb_mem056_i_mem_prty", + "bmb_mem057_i_mem_prty", + "bmb_mem058_i_mem_prty", + "bmb_mem033_i_mem_prty", + "bmb_mem034_i_mem_prty", + "bmb_mem035_i_mem_prty", + "bmb_mem036_i_mem_prty", + "bmb_mem021_i_mem_prty", + "bmb_mem022_i_mem_prty", + "bmb_mem023_i_mem_prty", + "bmb_mem024_i_mem_prty", + "bmb_mem025_i_mem_prty", + "bmb_mem026_i_mem_prty", + "bmb_mem027_i_mem_prty", + "bmb_mem028_i_mem_prty", + "bmb_mem029_i_mem_prty", + "bmb_mem030_i_mem_prty", + "bmb_mem031_i_mem_prty", + "bmb_mem032_i_mem_prty", +}; +#else +#define bmb_prty_attn_desc OSAL_NULL +#endif + +static const u16 bmb_prty1_bb_a0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_bb_a0 = { + 0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_bb_a0_attn_idx[25] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, +}; + +static struct attn_hw_reg bmb_prty2_bb_a0 = { + 1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = { + &bmb_prty1_bb_a0, &bmb_prty2_bb_a0, +}; + +static const u16 bmb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg bmb_prty0_bb_b0 = { + 0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0 +}; + +static const u16 bmb_prty1_bb_b0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_bb_b0 = { + 1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_bb_b0_attn_idx[15] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_prty2_bb_b0 = { + 2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { + &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0, +}; + +static const u16 bmb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg bmb_prty0_k2 = { + 0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0 +}; + +static const u16 bmb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_k2 = { + 1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_k2_attn_idx[15] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_prty2_k2 = { + 2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_k2_regs[3] = { + &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcie_int_attn_desc[17] = { + "pcie_address_error", + "pcie_link_down_detect", + "pcie_link_up_detect", + "pcie_cfg_link_eq_req_int", + "pcie_pcie_bandwidth_change_detect", + "pcie_early_hot_reset_detect", + "pcie_hot_reset_detect", + "pcie_l1_entry_detect", + "pcie_l1_exit_detect", + "pcie_ltssm_state_match_detect", + "pcie_fc_timeout_detect", + "pcie_pme_turnoff_message_detect", + "pcie_cfg_send_cor_err", + "pcie_cfg_send_nf_err", + "pcie_cfg_send_f_err", + "pcie_qoverflow_detect", + "pcie_vdm_detect", +}; +#else +#define pcie_int_attn_desc OSAL_NULL +#endif + +static const u16 pcie_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg pcie_int0_k2 = { + 0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4 +}; + +static struct attn_hw_reg *pcie_int_k2_regs[1] = { + &pcie_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pcie_prty_attn_desc[24] = { + "pcie_mem003_i_ecc_rf_int", + "pcie_mem004_i_ecc_rf_int", + "pcie_mem008_i_mem_prty", + "pcie_mem007_i_mem_prty", + "pcie_mem005_i_mem_prty", + "pcie_mem006_i_mem_prty", + "pcie_mem001_i_mem_prty", + "pcie_mem002_i_mem_prty", + "pcie_mem001_i_ecc_rf_int", + "pcie_mem005_i_ecc_rf_int", + "pcie_mem010_i_ecc_rf_int", + "pcie_mem009_i_ecc_rf_int", + "pcie_mem007_i_ecc_rf_int", + "pcie_mem004_i_mem_prty_0", + "pcie_mem004_i_mem_prty_1", + "pcie_mem004_i_mem_prty_2", + "pcie_mem004_i_mem_prty_3", + "pcie_mem011_i_mem_prty_1", + "pcie_mem011_i_mem_prty_2", + "pcie_mem012_i_mem_prty_1", + "pcie_mem012_i_mem_prty_2", + "pcie_app_parity_errs_0", + "pcie_app_parity_errs_1", + "pcie_app_parity_errs_2", +}; +#else +#define pcie_prty_attn_desc OSAL_NULL +#endif + +static const u16 pcie_prty1_bb_a0_attn_idx[17] = { + 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +}; + +static struct attn_hw_reg pcie_prty1_bb_a0 = { + 0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = { + &pcie_prty1_bb_a0, +}; + +static const u16 pcie_prty1_bb_b0_attn_idx[17] = { + 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +}; + +static struct attn_hw_reg pcie_prty1_bb_b0 = { + 0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { + &pcie_prty1_bb_b0, +}; + +static const u16 pcie_prty1_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg pcie_prty1_k2 = { + 0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static const u16 pcie_prty0_k2_attn_idx[3] = { + 21, 22, 23, +}; + +static struct attn_hw_reg pcie_prty0_k2 = { + 1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4 +}; + +static struct attn_hw_reg *pcie_prty_k2_regs[2] = { + &pcie_prty1_k2, &pcie_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *mcp2_prty_attn_desc[13] = { + "mcp2_rom_parity", + "mcp2_mem001_i_ecc_rf_int", + "mcp2_mem006_i_ecc_0_rf_int", + "mcp2_mem006_i_ecc_1_rf_int", + "mcp2_mem006_i_ecc_2_rf_int", + "mcp2_mem006_i_ecc_3_rf_int", + "mcp2_mem007_i_ecc_rf_int", + "mcp2_mem004_i_mem_prty", + "mcp2_mem003_i_mem_prty", + "mcp2_mem002_i_mem_prty", + "mcp2_mem009_i_mem_prty", + "mcp2_mem008_i_mem_prty", + "mcp2_mem005_i_mem_prty", +}; +#else +#define mcp2_prty_attn_desc OSAL_NULL +#endif + +static const u16 mcp2_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_bb_a0 = { + 0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_bb_a0_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_bb_a0 = { + 1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = { + &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0, +}; + +static const u16 mcp2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_bb_b0 = { + 0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_bb_b0_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_bb_b0 = { + 1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { + &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0, +}; + +static const u16 mcp2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_k2 = { + 0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_k2_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_k2 = { + 1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_k2_regs[2] = { + &mcp2_prty0_k2, &mcp2_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst_int_attn_desc[18] = { + "pswhst_address_error", + "pswhst_hst_src_fifo1_err", + "pswhst_hst_src_fifo2_err", + "pswhst_hst_src_fifo3_err", + "pswhst_hst_src_fifo4_err", + "pswhst_hst_src_fifo5_err", + "pswhst_hst_hdr_sync_fifo_err", + "pswhst_hst_data_sync_fifo_err", + "pswhst_hst_cpl_sync_fifo_err", + "pswhst_hst_vf_disabled_access", + "pswhst_hst_permission_violation", + "pswhst_hst_incorrect_access", + "pswhst_hst_src_fifo6_err", + "pswhst_hst_src_fifo7_err", + "pswhst_hst_src_fifo8_err", + "pswhst_hst_src_fifo9_err", + "pswhst_hst_source_credit_violation", + "pswhst_hst_timeout", +}; +#else +#define pswhst_int_attn_desc OSAL_NULL +#endif + +static const u16 pswhst_int0_bb_a0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_bb_a0 = { + 0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, + 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = { + &pswhst_int0_bb_a0, +}; + +static const u16 pswhst_int0_bb_b0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_bb_b0 = { + 0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, + 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { + &pswhst_int0_bb_b0, +}; + +static const u16 pswhst_int0_k2_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_k2 = { + 0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_k2_regs[1] = { + &pswhst_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst_prty_attn_desc[18] = { + "pswhst_datapath_registers", + "pswhst_mem006_i_mem_prty", + "pswhst_mem007_i_mem_prty", + "pswhst_mem005_i_mem_prty", + "pswhst_mem002_i_mem_prty", + "pswhst_mem003_i_mem_prty", + "pswhst_mem001_i_mem_prty", + "pswhst_mem008_i_mem_prty", + "pswhst_mem004_i_mem_prty", + "pswhst_mem009_i_mem_prty", + "pswhst_mem010_i_mem_prty", + "pswhst_mem016_i_mem_prty", + "pswhst_mem012_i_mem_prty", + "pswhst_mem013_i_mem_prty", + "pswhst_mem014_i_mem_prty", + "pswhst_mem015_i_mem_prty", + "pswhst_mem011_i_mem_prty", + "pswhst_mem017_i_mem_prty", +}; +#else +#define pswhst_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswhst_prty1_bb_a0_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_bb_a0 = { + 0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, + 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = { + &pswhst_prty1_bb_a0, +}; + +static const u16 pswhst_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst_prty0_bb_b0 = { + 0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, + 0x2a0194 +}; + +static const u16 pswhst_prty1_bb_b0_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_bb_b0 = { + 1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, + 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { + &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0, +}; + +static const u16 pswhst_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst_prty0_k2 = { + 0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194 +}; + +static const u16 pswhst_prty1_k2_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_k2 = { + 1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_k2_regs[2] = { + &pswhst_prty0_k2, &pswhst_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst2_int_attn_desc[5] = { + "pswhst2_address_error", + "pswhst2_hst_header_fifo_err", + "pswhst2_hst_data_fifo_err", + "pswhst2_hst_cpl_fifo_err", + "pswhst2_hst_ireq_fifo_err", +}; +#else +#define pswhst2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswhst2_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_bb_a0 = { + 0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188, + 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = { + &pswhst2_int0_bb_a0, +}; + +static const u16 pswhst2_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_bb_b0 = { + 0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188, + 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { + &pswhst2_int0_bb_b0, +}; + +static const u16 pswhst2_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_k2 = { + 0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_k2_regs[1] = { + &pswhst2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst2_prty_attn_desc[1] = { + "pswhst2_datapath_registers", +}; +#else +#define pswhst2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst2_prty0_bb_b0 = { + 0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198, + 0x29e194 +}; + +static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { + &pswhst2_prty0_bb_b0, +}; + +static const u16 pswhst2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst2_prty0_k2 = { + 0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194 +}; + +static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = { + &pswhst2_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd_int_attn_desc[3] = { + "pswrd_address_error", + "pswrd_pop_error", + "pswrd_pop_pbf_error", +}; +#else +#define pswrd_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrd_int0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_bb_a0 = { + 0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = { + &pswrd_int0_bb_a0, +}; + +static const u16 pswrd_int0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_bb_b0 = { + 0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { + &pswrd_int0_bb_b0, +}; + +static const u16 pswrd_int0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_k2 = { + 0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_k2_regs[1] = { + &pswrd_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd_prty_attn_desc[1] = { + "pswrd_datapath_registers", +}; +#else +#define pswrd_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrd_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd_prty0_bb_b0 = { + 0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198, + 0x29c194 +}; + +static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { + &pswrd_prty0_bb_b0, +}; + +static const u16 pswrd_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd_prty0_k2 = { + 0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194 +}; + +static struct attn_hw_reg *pswrd_prty_k2_regs[1] = { + &pswrd_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd2_int_attn_desc[5] = { + "pswrd2_address_error", + "pswrd2_sr_fifo_error", + "pswrd2_blk_fifo_error", + "pswrd2_push_error", + "pswrd2_push_pbf_error", +}; +#else +#define pswrd2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrd2_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_bb_a0 = { + 0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188, + 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = { + &pswrd2_int0_bb_a0, +}; + +static const u16 pswrd2_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_bb_b0 = { + 0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188, + 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { + &pswrd2_int0_bb_b0, +}; + +static const u16 pswrd2_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_k2 = { + 0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_k2_regs[1] = { + &pswrd2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd2_prty_attn_desc[36] = { + "pswrd2_datapath_registers", + "pswrd2_mem017_i_ecc_rf_int", + "pswrd2_mem018_i_ecc_rf_int", + "pswrd2_mem019_i_ecc_rf_int", + "pswrd2_mem020_i_ecc_rf_int", + "pswrd2_mem021_i_ecc_rf_int", + "pswrd2_mem022_i_ecc_rf_int", + "pswrd2_mem023_i_ecc_rf_int", + "pswrd2_mem024_i_ecc_rf_int", + "pswrd2_mem025_i_ecc_rf_int", + "pswrd2_mem015_i_ecc_rf_int", + "pswrd2_mem034_i_mem_prty", + "pswrd2_mem032_i_mem_prty", + "pswrd2_mem028_i_mem_prty", + "pswrd2_mem033_i_mem_prty", + "pswrd2_mem030_i_mem_prty", + "pswrd2_mem029_i_mem_prty", + "pswrd2_mem031_i_mem_prty", + "pswrd2_mem027_i_mem_prty", + "pswrd2_mem026_i_mem_prty", + "pswrd2_mem001_i_mem_prty", + "pswrd2_mem007_i_mem_prty", + "pswrd2_mem008_i_mem_prty", + "pswrd2_mem009_i_mem_prty", + "pswrd2_mem010_i_mem_prty", + "pswrd2_mem011_i_mem_prty", + "pswrd2_mem012_i_mem_prty", + "pswrd2_mem013_i_mem_prty", + "pswrd2_mem014_i_mem_prty", + "pswrd2_mem002_i_mem_prty", + "pswrd2_mem003_i_mem_prty", + "pswrd2_mem004_i_mem_prty", + "pswrd2_mem005_i_mem_prty", + "pswrd2_mem006_i_mem_prty", + "pswrd2_mem016_i_mem_prty", + "pswrd2_mem015_i_mem_prty", +}; +#else +#define pswrd2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, +}; + +static struct attn_hw_reg pswrd2_prty1_bb_a0 = { + 0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208, + 0x29d204 +}; + +static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = { + 33, 34, 35, +}; + +static struct attn_hw_reg pswrd2_prty2_bb_a0 = { + 1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218, + 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = { + &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0, +}; + +static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd2_prty0_bb_b0 = { + 0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198, + 0x29d194 +}; + +static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswrd2_prty1_bb_b0 = { + 1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208, + 0x29d204 +}; + +static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pswrd2_prty2_bb_b0 = { + 2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218, + 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { + &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0, +}; + +static const u16 pswrd2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd2_prty0_k2 = { + 0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194 +}; + +static const u16 pswrd2_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswrd2_prty1_k2 = { + 1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204 +}; + +static const u16 pswrd2_prty2_k2_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pswrd2_prty2_k2 = { + 2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = { + &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr_int_attn_desc[16] = { + "pswwr_address_error", + "pswwr_src_fifo_overflow", + "pswwr_qm_fifo_overflow", + "pswwr_tm_fifo_overflow", + "pswwr_usdm_fifo_overflow", + "pswwr_usdmdp_fifo_overflow", + "pswwr_xsdm_fifo_overflow", + "pswwr_tsdm_fifo_overflow", + "pswwr_cduwr_fifo_overflow", + "pswwr_dbg_fifo_overflow", + "pswwr_dmae_fifo_overflow", + "pswwr_hc_fifo_overflow", + "pswwr_msdm_fifo_overflow", + "pswwr_ysdm_fifo_overflow", + "pswwr_psdm_fifo_overflow", + "pswwr_m2p_fifo_overflow", +}; +#else +#define pswwr_int_attn_desc OSAL_NULL +#endif + +static const u16 pswwr_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_bb_a0 = { + 0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188, + 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = { + &pswwr_int0_bb_a0, +}; + +static const u16 pswwr_int0_bb_b0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_bb_b0 = { + 0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188, + 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { + &pswwr_int0_bb_b0, +}; + +static const u16 pswwr_int0_k2_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_k2 = { + 0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_k2_regs[1] = { + &pswwr_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr_prty_attn_desc[1] = { + "pswwr_datapath_registers", +}; +#else +#define pswwr_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswwr_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr_prty0_bb_b0 = { + 0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198, + 0x29a194 +}; + +static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { + &pswwr_prty0_bb_b0, +}; + +static const u16 pswwr_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr_prty0_k2 = { + 0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194 +}; + +static struct attn_hw_reg *pswwr_prty_k2_regs[1] = { + &pswwr_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr2_int_attn_desc[19] = { + "pswwr2_address_error", + "pswwr2_pglue_eop_error", + "pswwr2_pglue_lsr_error", + "pswwr2_tm_underflow", + "pswwr2_qm_underflow", + "pswwr2_src_underflow", + "pswwr2_usdm_underflow", + "pswwr2_tsdm_underflow", + "pswwr2_xsdm_underflow", + "pswwr2_usdmdp_underflow", + "pswwr2_cdu_underflow", + "pswwr2_dbg_underflow", + "pswwr2_dmae_underflow", + "pswwr2_hc_underflow", + "pswwr2_msdm_underflow", + "pswwr2_ysdm_underflow", + "pswwr2_psdm_underflow", + "pswwr2_m2p_underflow", + "pswwr2_pglue_eop_error_in_line", +}; +#else +#define pswwr2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswwr2_int0_bb_a0_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_bb_a0 = { + 0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188, + 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = { + &pswwr2_int0_bb_a0, +}; + +static const u16 pswwr2_int0_bb_b0_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_bb_b0 = { + 0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188, + 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { + &pswwr2_int0_bb_b0, +}; + +static const u16 pswwr2_int0_k2_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_k2 = { + 0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_k2_regs[1] = { + &pswwr2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr2_prty_attn_desc[114] = { + "pswwr2_datapath_registers", + "pswwr2_mem008_i_ecc_rf_int", + "pswwr2_mem001_i_mem_prty", + "pswwr2_mem014_i_mem_prty_0", + "pswwr2_mem014_i_mem_prty_1", + "pswwr2_mem014_i_mem_prty_2", + "pswwr2_mem014_i_mem_prty_3", + "pswwr2_mem014_i_mem_prty_4", + "pswwr2_mem014_i_mem_prty_5", + "pswwr2_mem014_i_mem_prty_6", + "pswwr2_mem014_i_mem_prty_7", + "pswwr2_mem014_i_mem_prty_8", + "pswwr2_mem016_i_mem_prty_0", + "pswwr2_mem016_i_mem_prty_1", + "pswwr2_mem016_i_mem_prty_2", + "pswwr2_mem016_i_mem_prty_3", + "pswwr2_mem016_i_mem_prty_4", + "pswwr2_mem016_i_mem_prty_5", + "pswwr2_mem016_i_mem_prty_6", + "pswwr2_mem016_i_mem_prty_7", + "pswwr2_mem016_i_mem_prty_8", + "pswwr2_mem007_i_mem_prty_0", + "pswwr2_mem007_i_mem_prty_1", + "pswwr2_mem007_i_mem_prty_2", + "pswwr2_mem007_i_mem_prty_3", + "pswwr2_mem007_i_mem_prty_4", + "pswwr2_mem007_i_mem_prty_5", + "pswwr2_mem007_i_mem_prty_6", + "pswwr2_mem007_i_mem_prty_7", + "pswwr2_mem007_i_mem_prty_8", + "pswwr2_mem017_i_mem_prty_0", + "pswwr2_mem017_i_mem_prty_1", + "pswwr2_mem017_i_mem_prty_2", + "pswwr2_mem017_i_mem_prty_3", + "pswwr2_mem017_i_mem_prty_4", + "pswwr2_mem017_i_mem_prty_5", + "pswwr2_mem017_i_mem_prty_6", + "pswwr2_mem017_i_mem_prty_7", + "pswwr2_mem017_i_mem_prty_8", + "pswwr2_mem009_i_mem_prty_0", + "pswwr2_mem009_i_mem_prty_1", + "pswwr2_mem009_i_mem_prty_2", + "pswwr2_mem009_i_mem_prty_3", + "pswwr2_mem009_i_mem_prty_4", + "pswwr2_mem009_i_mem_prty_5", + "pswwr2_mem009_i_mem_prty_6", + "pswwr2_mem009_i_mem_prty_7", + "pswwr2_mem009_i_mem_prty_8", + "pswwr2_mem013_i_mem_prty_0", + "pswwr2_mem013_i_mem_prty_1", + "pswwr2_mem013_i_mem_prty_2", + "pswwr2_mem013_i_mem_prty_3", + "pswwr2_mem013_i_mem_prty_4", + "pswwr2_mem013_i_mem_prty_5", + "pswwr2_mem013_i_mem_prty_6", + "pswwr2_mem013_i_mem_prty_7", + "pswwr2_mem013_i_mem_prty_8", + "pswwr2_mem006_i_mem_prty_0", + "pswwr2_mem006_i_mem_prty_1", + "pswwr2_mem006_i_mem_prty_2", + "pswwr2_mem006_i_mem_prty_3", + "pswwr2_mem006_i_mem_prty_4", + "pswwr2_mem006_i_mem_prty_5", + "pswwr2_mem006_i_mem_prty_6", + "pswwr2_mem006_i_mem_prty_7", + "pswwr2_mem006_i_mem_prty_8", + "pswwr2_mem010_i_mem_prty_0", + "pswwr2_mem010_i_mem_prty_1", + "pswwr2_mem010_i_mem_prty_2", + "pswwr2_mem010_i_mem_prty_3", + "pswwr2_mem010_i_mem_prty_4", + "pswwr2_mem010_i_mem_prty_5", + "pswwr2_mem010_i_mem_prty_6", + "pswwr2_mem010_i_mem_prty_7", + "pswwr2_mem010_i_mem_prty_8", + "pswwr2_mem012_i_mem_prty", + "pswwr2_mem011_i_mem_prty_0", + "pswwr2_mem011_i_mem_prty_1", + "pswwr2_mem011_i_mem_prty_2", + "pswwr2_mem011_i_mem_prty_3", + "pswwr2_mem011_i_mem_prty_4", + "pswwr2_mem011_i_mem_prty_5", + "pswwr2_mem011_i_mem_prty_6", + "pswwr2_mem011_i_mem_prty_7", + "pswwr2_mem011_i_mem_prty_8", + "pswwr2_mem004_i_mem_prty_0", + "pswwr2_mem004_i_mem_prty_1", + "pswwr2_mem004_i_mem_prty_2", + "pswwr2_mem004_i_mem_prty_3", + "pswwr2_mem004_i_mem_prty_4", + "pswwr2_mem004_i_mem_prty_5", + "pswwr2_mem004_i_mem_prty_6", + "pswwr2_mem004_i_mem_prty_7", + "pswwr2_mem004_i_mem_prty_8", + "pswwr2_mem015_i_mem_prty_0", + "pswwr2_mem015_i_mem_prty_1", + "pswwr2_mem015_i_mem_prty_2", + "pswwr2_mem005_i_mem_prty_0", + "pswwr2_mem005_i_mem_prty_1", + "pswwr2_mem005_i_mem_prty_2", + "pswwr2_mem005_i_mem_prty_3", + "pswwr2_mem005_i_mem_prty_4", + "pswwr2_mem005_i_mem_prty_5", + "pswwr2_mem005_i_mem_prty_6", + "pswwr2_mem005_i_mem_prty_7", + "pswwr2_mem005_i_mem_prty_8", + "pswwr2_mem002_i_mem_prty_0", + "pswwr2_mem002_i_mem_prty_1", + "pswwr2_mem002_i_mem_prty_2", + "pswwr2_mem002_i_mem_prty_3", + "pswwr2_mem002_i_mem_prty_4", + "pswwr2_mem003_i_mem_prty_0", + "pswwr2_mem003_i_mem_prty_1", + "pswwr2_mem003_i_mem_prty_2", +}; +#else +#define pswwr2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_bb_a0 = { + 0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208, + 0x29b204 +}; + +static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_bb_a0 = { + 1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218, + 0x29b214 +}; + +static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_bb_a0 = { + 2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228, + 0x29b224 +}; + +static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_bb_a0 = { + 3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238, + 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = { + &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0, + &pswwr2_prty4_bb_a0, +}; + +static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr2_prty0_bb_b0 = { + 0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198, + 0x29b194 +}; + +static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_bb_b0 = { + 1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208, + 0x29b204 +}; + +static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_bb_b0 = { + 2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218, + 0x29b214 +}; + +static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_bb_b0 = { + 3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228, + 0x29b224 +}; + +static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_bb_b0 = { + 4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238, + 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { + &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, + &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0, +}; + +static const u16 pswwr2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr2_prty0_k2 = { + 0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194 +}; + +static const u16 pswwr2_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_k2 = { + 1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204 +}; + +static const u16 pswwr2_prty2_k2_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_k2 = { + 2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214 +}; + +static const u16 pswwr2_prty3_k2_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_k2 = { + 3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224 +}; + +static const u16 pswwr2_prty4_k2_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_k2 = { + 4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = { + &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2, + &pswwr2_prty4_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq_int_attn_desc[21] = { + "pswrq_address_error", + "pswrq_pbf_fifo_overflow", + "pswrq_src_fifo_overflow", + "pswrq_qm_fifo_overflow", + "pswrq_tm_fifo_overflow", + "pswrq_usdm_fifo_overflow", + "pswrq_m2p_fifo_overflow", + "pswrq_xsdm_fifo_overflow", + "pswrq_tsdm_fifo_overflow", + "pswrq_ptu_fifo_overflow", + "pswrq_cduwr_fifo_overflow", + "pswrq_cdurd_fifo_overflow", + "pswrq_dmae_fifo_overflow", + "pswrq_hc_fifo_overflow", + "pswrq_dbg_fifo_overflow", + "pswrq_msdm_fifo_overflow", + "pswrq_ysdm_fifo_overflow", + "pswrq_psdm_fifo_overflow", + "pswrq_prm_fifo_overflow", + "pswrq_muld_fifo_overflow", + "pswrq_xyld_fifo_overflow", +}; +#else +#define pswrq_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrq_int0_bb_a0_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_bb_a0 = { + 0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188, + 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = { + &pswrq_int0_bb_a0, +}; + +static const u16 pswrq_int0_bb_b0_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_bb_b0 = { + 0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188, + 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { + &pswrq_int0_bb_b0, +}; + +static const u16 pswrq_int0_k2_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_k2 = { + 0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_k2_regs[1] = { + &pswrq_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq_prty_attn_desc[1] = { + "pswrq_pxp_busip_parity", +}; +#else +#define pswrq_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrq_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrq_prty0_bb_b0 = { + 0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198, + 0x280194 +}; + +static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { + &pswrq_prty0_bb_b0, +}; + +static const u16 pswrq_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrq_prty0_k2 = { + 0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194 +}; + +static struct attn_hw_reg *pswrq_prty_k2_regs[1] = { + &pswrq_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq2_int_attn_desc[15] = { + "pswrq2_address_error", + "pswrq2_l2p_fifo_overflow", + "pswrq2_wdfifo_overflow", + "pswrq2_phyaddr_fifo_of", + "pswrq2_l2p_violation_1", + "pswrq2_l2p_violation_2", + "pswrq2_free_list_empty", + "pswrq2_elt_addr", + "pswrq2_l2p_vf_err", + "pswrq2_core_wdone_overflow", + "pswrq2_treq_fifo_underflow", + "pswrq2_treq_fifo_overflow", + "pswrq2_icpl_fifo_underflow", + "pswrq2_icpl_fifo_overflow", + "pswrq2_back2back_atc_response", +}; +#else +#define pswrq2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrq2_int0_bb_a0_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_bb_a0 = { + 0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188, + 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = { + &pswrq2_int0_bb_a0, +}; + +static const u16 pswrq2_int0_bb_b0_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_bb_b0 = { + 0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188, + 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { + &pswrq2_int0_bb_b0, +}; + +static const u16 pswrq2_int0_k2_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_k2 = { + 0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_k2_regs[1] = { + &pswrq2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq2_prty_attn_desc[11] = { + "pswrq2_mem004_i_ecc_rf_int", + "pswrq2_mem005_i_ecc_rf_int", + "pswrq2_mem001_i_ecc_rf_int", + "pswrq2_mem006_i_mem_prty", + "pswrq2_mem008_i_mem_prty", + "pswrq2_mem009_i_mem_prty", + "pswrq2_mem003_i_mem_prty", + "pswrq2_mem002_i_mem_prty", + "pswrq2_mem010_i_mem_prty", + "pswrq2_mem007_i_mem_prty", + "pswrq2_mem005_i_mem_prty", +}; +#else +#define pswrq2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = { + 0, 2, 3, 4, 5, 6, 7, 9, 10, +}; + +static struct attn_hw_reg pswrq2_prty1_bb_a0 = { + 0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208, + 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = { + &pswrq2_prty1_bb_a0, +}; + +static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = { + 0, 2, 3, 4, 5, 6, 7, 9, 10, +}; + +static struct attn_hw_reg pswrq2_prty1_bb_b0 = { + 0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208, + 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { + &pswrq2_prty1_bb_b0, +}; + +static const u16 pswrq2_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg pswrq2_prty1_k2 = { + 0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = { + &pswrq2_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pglcs_int_attn_desc[2] = { + "pglcs_address_error", + "pglcs_rasdp_error", +}; +#else +#define pglcs_int_attn_desc OSAL_NULL +#endif + +static const u16 pglcs_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglcs_int0_bb_a0 = { + 0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = { + &pglcs_int0_bb_a0, +}; + +static const u16 pglcs_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglcs_int0_bb_b0 = { + 0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { + &pglcs_int0_bb_b0, +}; + +static const u16 pglcs_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg pglcs_int0_k2 = { + 0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_k2_regs[1] = { + &pglcs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dmae_int_attn_desc[2] = { + "dmae_address_error", + "dmae_pci_rd_buf_err", +}; +#else +#define dmae_int_attn_desc OSAL_NULL +#endif + +static const u16 dmae_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_bb_a0 = { + 0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = { + &dmae_int0_bb_a0, +}; + +static const u16 dmae_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_bb_b0 = { + 0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { + &dmae_int0_bb_b0, +}; + +static const u16 dmae_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_k2 = { + 0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_k2_regs[1] = { + &dmae_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dmae_prty_attn_desc[3] = { + "dmae_mem002_i_mem_prty", + "dmae_mem001_i_mem_prty", + "dmae_mem003_i_mem_prty", +}; +#else +#define dmae_prty_attn_desc OSAL_NULL +#endif + +static const u16 dmae_prty1_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_bb_a0 = { + 0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = { + &dmae_prty1_bb_a0, +}; + +static const u16 dmae_prty1_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_bb_b0 = { + 0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { + &dmae_prty1_bb_b0, +}; + +static const u16 dmae_prty1_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_k2 = { + 0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_k2_regs[1] = { + &dmae_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ptu_int_attn_desc[8] = { + "ptu_address_error", + "ptu_atc_tcpl_to_not_pend", + "ptu_atc_gpa_multiple_hits", + "ptu_atc_rcpl_to_empty_cnt", + "ptu_atc_tcpl_error", + "ptu_atc_inv_halt", + "ptu_atc_reuse_transpend", + "ptu_atc_ireq_less_than_stu", +}; +#else +#define ptu_int_attn_desc OSAL_NULL +#endif + +static const u16 ptu_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_bb_a0 = { + 0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = { + &ptu_int0_bb_a0, +}; + +static const u16 ptu_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_bb_b0 = { + 0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { + &ptu_int0_bb_b0, +}; + +static const u16 ptu_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_k2 = { + 0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_k2_regs[1] = { + &ptu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ptu_prty_attn_desc[18] = { + "ptu_mem017_i_ecc_rf_int", + "ptu_mem018_i_mem_prty", + "ptu_mem006_i_mem_prty", + "ptu_mem001_i_mem_prty", + "ptu_mem002_i_mem_prty", + "ptu_mem003_i_mem_prty", + "ptu_mem004_i_mem_prty", + "ptu_mem005_i_mem_prty", + "ptu_mem009_i_mem_prty", + "ptu_mem010_i_mem_prty", + "ptu_mem016_i_mem_prty", + "ptu_mem007_i_mem_prty", + "ptu_mem015_i_mem_prty", + "ptu_mem013_i_mem_prty", + "ptu_mem012_i_mem_prty", + "ptu_mem014_i_mem_prty", + "ptu_mem011_i_mem_prty", + "ptu_mem008_i_mem_prty", +}; +#else +#define ptu_prty_attn_desc OSAL_NULL +#endif + +static const u16 ptu_prty1_bb_a0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_bb_a0 = { + 0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = { + &ptu_prty1_bb_a0, +}; + +static const u16 ptu_prty1_bb_b0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_bb_b0 = { + 0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { + &ptu_prty1_bb_b0, +}; + +static const u16 ptu_prty1_k2_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_k2 = { + 0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_k2_regs[1] = { + &ptu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tcm_int_attn_desc[41] = { + "tcm_address_error", + "tcm_is_storm_ovfl_err", + "tcm_is_storm_under_err", + "tcm_is_tsdm_ovfl_err", + "tcm_is_tsdm_under_err", + "tcm_is_msem_ovfl_err", + "tcm_is_msem_under_err", + "tcm_is_ysem_ovfl_err", + "tcm_is_ysem_under_err", + "tcm_is_dorq_ovfl_err", + "tcm_is_dorq_under_err", + "tcm_is_pbf_ovfl_err", + "tcm_is_pbf_under_err", + "tcm_is_prs_ovfl_err", + "tcm_is_prs_under_err", + "tcm_is_tm_ovfl_err", + "tcm_is_tm_under_err", + "tcm_is_qm_p_ovfl_err", + "tcm_is_qm_p_under_err", + "tcm_is_qm_s_ovfl_err", + "tcm_is_qm_s_under_err", + "tcm_is_grc_ovfl_err0", + "tcm_is_grc_under_err0", + "tcm_is_grc_ovfl_err1", + "tcm_is_grc_under_err1", + "tcm_is_grc_ovfl_err2", + "tcm_is_grc_under_err2", + "tcm_is_grc_ovfl_err3", + "tcm_is_grc_under_err3", + "tcm_in_prcs_tbl_ovfl", + "tcm_agg_con_data_buf_ovfl", + "tcm_agg_con_cmd_buf_ovfl", + "tcm_sm_con_data_buf_ovfl", + "tcm_sm_con_cmd_buf_ovfl", + "tcm_agg_task_data_buf_ovfl", + "tcm_agg_task_cmd_buf_ovfl", + "tcm_sm_task_data_buf_ovfl", + "tcm_sm_task_cmd_buf_ovfl", + "tcm_fi_desc_input_violate", + "tcm_se_desc_input_violate", + "tcm_qmreg_more4", +}; +#else +#define tcm_int_attn_desc OSAL_NULL +#endif + +static const u16 tcm_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_bb_a0 = { + 0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188, + 0x1180184 +}; + +static const u16 tcm_int1_bb_a0_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_bb_a0 = { + 1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198, + 0x1180194 +}; + +static const u16 tcm_int2_bb_a0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_bb_a0 = { + 2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, + 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = { + &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0, +}; + +static const u16 tcm_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_bb_b0 = { + 0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188, + 0x1180184 +}; + +static const u16 tcm_int1_bb_b0_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_bb_b0 = { + 1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198, + 0x1180194 +}; + +static const u16 tcm_int2_bb_b0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_bb_b0 = { + 2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, + 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { + &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0, +}; + +static const u16 tcm_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_k2 = { + 0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184 +}; + +static const u16 tcm_int1_k2_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_k2 = { + 1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194 +}; + +static const u16 tcm_int2_k2_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_k2 = { + 2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_k2_regs[3] = { + &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *tcm_prty_attn_desc[51] = { + "tcm_mem026_i_ecc_rf_int", + "tcm_mem003_i_ecc_0_rf_int", + "tcm_mem003_i_ecc_1_rf_int", + "tcm_mem022_i_ecc_0_rf_int", + "tcm_mem022_i_ecc_1_rf_int", + "tcm_mem005_i_ecc_0_rf_int", + "tcm_mem005_i_ecc_1_rf_int", + "tcm_mem024_i_ecc_0_rf_int", + "tcm_mem024_i_ecc_1_rf_int", + "tcm_mem018_i_mem_prty", + "tcm_mem019_i_mem_prty", + "tcm_mem015_i_mem_prty", + "tcm_mem016_i_mem_prty", + "tcm_mem017_i_mem_prty", + "tcm_mem010_i_mem_prty", + "tcm_mem020_i_mem_prty", + "tcm_mem011_i_mem_prty", + "tcm_mem012_i_mem_prty", + "tcm_mem013_i_mem_prty", + "tcm_mem014_i_mem_prty", + "tcm_mem029_i_mem_prty", + "tcm_mem028_i_mem_prty", + "tcm_mem027_i_mem_prty", + "tcm_mem004_i_mem_prty", + "tcm_mem023_i_mem_prty", + "tcm_mem006_i_mem_prty", + "tcm_mem025_i_mem_prty", + "tcm_mem021_i_mem_prty", + "tcm_mem007_i_mem_prty_0", + "tcm_mem007_i_mem_prty_1", + "tcm_mem008_i_mem_prty", + "tcm_mem025_i_ecc_rf_int", + "tcm_mem021_i_ecc_0_rf_int", + "tcm_mem021_i_ecc_1_rf_int", + "tcm_mem023_i_ecc_0_rf_int", + "tcm_mem023_i_ecc_1_rf_int", + "tcm_mem026_i_mem_prty", + "tcm_mem022_i_mem_prty", + "tcm_mem024_i_mem_prty", + "tcm_mem009_i_mem_prty", + "tcm_mem024_i_ecc_rf_int", + "tcm_mem001_i_ecc_0_rf_int", + "tcm_mem001_i_ecc_1_rf_int", + "tcm_mem019_i_ecc_0_rf_int", + "tcm_mem019_i_ecc_1_rf_int", + "tcm_mem022_i_ecc_rf_int", + "tcm_mem002_i_mem_prty", + "tcm_mem005_i_mem_prty_0", + "tcm_mem005_i_mem_prty_1", + "tcm_mem001_i_mem_prty", + "tcm_mem007_i_mem_prty", +}; +#else +#define tcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tcm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32, + 33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg tcm_prty1_bb_a0 = { + 0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_bb_a0_attn_idx[3] = { + 50, 21, 20, +}; + +static struct attn_hw_reg tcm_prty2_bb_a0 = { + 1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218, + 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = { + &tcm_prty1_bb_a0, &tcm_prty2_bb_a0, +}; + +static const u16 tcm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, + 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_prty1_bb_b0 = { + 0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_bb_b0_attn_idx[2] = { + 49, 46, +}; + +static struct attn_hw_reg tcm_prty2_bb_b0 = { + 1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218, + 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { + &tcm_prty1_bb_b0, &tcm_prty2_bb_b0, +}; + +static const u16 tcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg tcm_prty1_k2 = { + 0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_k2_attn_idx[3] = { + 39, 49, 46, +}; + +static struct attn_hw_reg tcm_prty2_k2 = { + 1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_k2_regs[2] = { + &tcm_prty1_k2, &tcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *mcm_int_attn_desc[41] = { + "mcm_address_error", + "mcm_is_storm_ovfl_err", + "mcm_is_storm_under_err", + "mcm_is_msdm_ovfl_err", + "mcm_is_msdm_under_err", + "mcm_is_ysdm_ovfl_err", + "mcm_is_ysdm_under_err", + "mcm_is_usdm_ovfl_err", + "mcm_is_usdm_under_err", + "mcm_is_tmld_ovfl_err", + "mcm_is_tmld_under_err", + "mcm_is_usem_ovfl_err", + "mcm_is_usem_under_err", + "mcm_is_ysem_ovfl_err", + "mcm_is_ysem_under_err", + "mcm_is_pbf_ovfl_err", + "mcm_is_pbf_under_err", + "mcm_is_qm_p_ovfl_err", + "mcm_is_qm_p_under_err", + "mcm_is_qm_s_ovfl_err", + "mcm_is_qm_s_under_err", + "mcm_is_grc_ovfl_err0", + "mcm_is_grc_under_err0", + "mcm_is_grc_ovfl_err1", + "mcm_is_grc_under_err1", + "mcm_is_grc_ovfl_err2", + "mcm_is_grc_under_err2", + "mcm_is_grc_ovfl_err3", + "mcm_is_grc_under_err3", + "mcm_in_prcs_tbl_ovfl", + "mcm_agg_con_data_buf_ovfl", + "mcm_agg_con_cmd_buf_ovfl", + "mcm_sm_con_data_buf_ovfl", + "mcm_sm_con_cmd_buf_ovfl", + "mcm_agg_task_data_buf_ovfl", + "mcm_agg_task_cmd_buf_ovfl", + "mcm_sm_task_data_buf_ovfl", + "mcm_sm_task_cmd_buf_ovfl", + "mcm_fi_desc_input_violate", + "mcm_se_desc_input_violate", + "mcm_qmreg_more4", +}; +#else +#define mcm_int_attn_desc OSAL_NULL +#endif + +static const u16 mcm_int0_bb_a0_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_bb_a0 = { + 0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188, + 0x1200184 +}; + +static const u16 mcm_int1_bb_a0_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_bb_a0 = { + 1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198, + 0x1200194 +}; + +static const u16 mcm_int2_bb_a0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_bb_a0 = { + 2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, + 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = { + &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0, +}; + +static const u16 mcm_int0_bb_b0_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_bb_b0 = { + 0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188, + 0x1200184 +}; + +static const u16 mcm_int1_bb_b0_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_bb_b0 = { + 1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198, + 0x1200194 +}; + +static const u16 mcm_int2_bb_b0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_bb_b0 = { + 2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, + 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { + &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0, +}; + +static const u16 mcm_int0_k2_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_k2 = { + 0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184 +}; + +static const u16 mcm_int1_k2_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_k2 = { + 1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194 +}; + +static const u16 mcm_int2_k2_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_k2 = { + 2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_k2_regs[3] = { + &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *mcm_prty_attn_desc[46] = { + "mcm_mem028_i_ecc_rf_int", + "mcm_mem003_i_ecc_rf_int", + "mcm_mem023_i_ecc_0_rf_int", + "mcm_mem023_i_ecc_1_rf_int", + "mcm_mem005_i_ecc_0_rf_int", + "mcm_mem005_i_ecc_1_rf_int", + "mcm_mem025_i_ecc_0_rf_int", + "mcm_mem025_i_ecc_1_rf_int", + "mcm_mem026_i_ecc_rf_int", + "mcm_mem017_i_mem_prty", + "mcm_mem019_i_mem_prty", + "mcm_mem016_i_mem_prty", + "mcm_mem015_i_mem_prty", + "mcm_mem020_i_mem_prty", + "mcm_mem021_i_mem_prty", + "mcm_mem018_i_mem_prty", + "mcm_mem011_i_mem_prty", + "mcm_mem012_i_mem_prty", + "mcm_mem013_i_mem_prty", + "mcm_mem014_i_mem_prty", + "mcm_mem031_i_mem_prty", + "mcm_mem030_i_mem_prty", + "mcm_mem029_i_mem_prty", + "mcm_mem004_i_mem_prty", + "mcm_mem024_i_mem_prty", + "mcm_mem006_i_mem_prty", + "mcm_mem027_i_mem_prty", + "mcm_mem022_i_mem_prty", + "mcm_mem007_i_mem_prty_0", + "mcm_mem007_i_mem_prty_1", + "mcm_mem008_i_mem_prty", + "mcm_mem001_i_ecc_rf_int", + "mcm_mem021_i_ecc_0_rf_int", + "mcm_mem021_i_ecc_1_rf_int", + "mcm_mem003_i_ecc_0_rf_int", + "mcm_mem003_i_ecc_1_rf_int", + "mcm_mem024_i_ecc_rf_int", + "mcm_mem009_i_mem_prty", + "mcm_mem010_i_mem_prty", + "mcm_mem028_i_mem_prty", + "mcm_mem002_i_mem_prty", + "mcm_mem025_i_mem_prty", + "mcm_mem005_i_mem_prty_0", + "mcm_mem005_i_mem_prty_1", + "mcm_mem001_i_mem_prty", + "mcm_mem007_i_mem_prty", +}; +#else +#define mcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 mcm_prty1_bb_a0_attn_idx[31] = { + 2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg mcm_prty1_bb_a0 = { + 0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_bb_a0_attn_idx[4] = { + 45, 30, 21, 20, +}; + +static struct attn_hw_reg mcm_prty2_bb_a0 = { + 1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218, + 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = { + &mcm_prty1_bb_a0, &mcm_prty2_bb_a0, +}; + +static const u16 mcm_prty1_bb_b0_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg mcm_prty1_bb_b0 = { + 0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_bb_b0_attn_idx[4] = { + 37, 38, 44, 40, +}; + +static struct attn_hw_reg mcm_prty2_bb_b0 = { + 1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218, + 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { + &mcm_prty1_bb_b0, &mcm_prty2_bb_b0, +}; + +static const u16 mcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg mcm_prty1_k2 = { + 0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_k2_attn_idx[4] = { + 37, 38, 44, 40, +}; + +static struct attn_hw_reg mcm_prty2_k2 = { + 1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_k2_regs[2] = { + &mcm_prty1_k2, &mcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *ucm_int_attn_desc[47] = { + "ucm_address_error", + "ucm_is_storm_ovfl_err", + "ucm_is_storm_under_err", + "ucm_is_xsdm_ovfl_err", + "ucm_is_xsdm_under_err", + "ucm_is_ysdm_ovfl_err", + "ucm_is_ysdm_under_err", + "ucm_is_usdm_ovfl_err", + "ucm_is_usdm_under_err", + "ucm_is_rdif_ovfl_err", + "ucm_is_rdif_under_err", + "ucm_is_tdif_ovfl_err", + "ucm_is_tdif_under_err", + "ucm_is_muld_ovfl_err", + "ucm_is_muld_under_err", + "ucm_is_yuld_ovfl_err", + "ucm_is_yuld_under_err", + "ucm_is_dorq_ovfl_err", + "ucm_is_dorq_under_err", + "ucm_is_pbf_ovfl_err", + "ucm_is_pbf_under_err", + "ucm_is_tm_ovfl_err", + "ucm_is_tm_under_err", + "ucm_is_qm_p_ovfl_err", + "ucm_is_qm_p_under_err", + "ucm_is_qm_s_ovfl_err", + "ucm_is_qm_s_under_err", + "ucm_is_grc_ovfl_err0", + "ucm_is_grc_under_err0", + "ucm_is_grc_ovfl_err1", + "ucm_is_grc_under_err1", + "ucm_is_grc_ovfl_err2", + "ucm_is_grc_under_err2", + "ucm_is_grc_ovfl_err3", + "ucm_is_grc_under_err3", + "ucm_in_prcs_tbl_ovfl", + "ucm_agg_con_data_buf_ovfl", + "ucm_agg_con_cmd_buf_ovfl", + "ucm_sm_con_data_buf_ovfl", + "ucm_sm_con_cmd_buf_ovfl", + "ucm_agg_task_data_buf_ovfl", + "ucm_agg_task_cmd_buf_ovfl", + "ucm_sm_task_data_buf_ovfl", + "ucm_sm_task_cmd_buf_ovfl", + "ucm_fi_desc_input_violate", + "ucm_se_desc_input_violate", + "ucm_qmreg_more4", +}; +#else +#define ucm_int_attn_desc OSAL_NULL +#endif + +static const u16 ucm_int0_bb_a0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_bb_a0 = { + 0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188, + 0x1280184 +}; + +static const u16 ucm_int1_bb_a0_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_bb_a0 = { + 1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198, + 0x1280194 +}; + +static const u16 ucm_int2_bb_a0_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_bb_a0 = { + 2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, + 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = { + &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0, +}; + +static const u16 ucm_int0_bb_b0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_bb_b0 = { + 0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188, + 0x1280184 +}; + +static const u16 ucm_int1_bb_b0_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_bb_b0 = { + 1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198, + 0x1280194 +}; + +static const u16 ucm_int2_bb_b0_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_bb_b0 = { + 2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, + 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { + &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0, +}; + +static const u16 ucm_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_k2 = { + 0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184 +}; + +static const u16 ucm_int1_k2_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_k2 = { + 1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194 +}; + +static const u16 ucm_int2_k2_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_k2 = { + 2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_k2_regs[3] = { + &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *ucm_prty_attn_desc[54] = { + "ucm_mem030_i_ecc_rf_int", + "ucm_mem005_i_ecc_0_rf_int", + "ucm_mem005_i_ecc_1_rf_int", + "ucm_mem024_i_ecc_0_rf_int", + "ucm_mem024_i_ecc_1_rf_int", + "ucm_mem025_i_ecc_rf_int", + "ucm_mem007_i_ecc_0_rf_int", + "ucm_mem007_i_ecc_1_rf_int", + "ucm_mem008_i_ecc_rf_int", + "ucm_mem027_i_ecc_0_rf_int", + "ucm_mem027_i_ecc_1_rf_int", + "ucm_mem028_i_ecc_rf_int", + "ucm_mem020_i_mem_prty", + "ucm_mem021_i_mem_prty", + "ucm_mem019_i_mem_prty", + "ucm_mem013_i_mem_prty", + "ucm_mem018_i_mem_prty", + "ucm_mem022_i_mem_prty", + "ucm_mem014_i_mem_prty", + "ucm_mem015_i_mem_prty", + "ucm_mem016_i_mem_prty", + "ucm_mem017_i_mem_prty", + "ucm_mem033_i_mem_prty", + "ucm_mem032_i_mem_prty", + "ucm_mem031_i_mem_prty", + "ucm_mem006_i_mem_prty", + "ucm_mem026_i_mem_prty", + "ucm_mem009_i_mem_prty", + "ucm_mem029_i_mem_prty", + "ucm_mem023_i_mem_prty", + "ucm_mem010_i_mem_prty_0", + "ucm_mem003_i_ecc_0_rf_int", + "ucm_mem003_i_ecc_1_rf_int", + "ucm_mem022_i_ecc_0_rf_int", + "ucm_mem022_i_ecc_1_rf_int", + "ucm_mem023_i_ecc_rf_int", + "ucm_mem006_i_ecc_rf_int", + "ucm_mem025_i_ecc_0_rf_int", + "ucm_mem025_i_ecc_1_rf_int", + "ucm_mem026_i_ecc_rf_int", + "ucm_mem011_i_mem_prty", + "ucm_mem012_i_mem_prty", + "ucm_mem030_i_mem_prty", + "ucm_mem004_i_mem_prty", + "ucm_mem024_i_mem_prty", + "ucm_mem007_i_mem_prty", + "ucm_mem027_i_mem_prty", + "ucm_mem008_i_mem_prty_0", + "ucm_mem010_i_mem_prty_1", + "ucm_mem003_i_mem_prty", + "ucm_mem001_i_mem_prty", + "ucm_mem002_i_mem_prty", + "ucm_mem008_i_mem_prty_1", + "ucm_mem010_i_mem_prty", +}; +#else +#define ucm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ucm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34, + 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, +}; + +static struct attn_hw_reg ucm_prty1_bb_a0 = { + 0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_bb_a0_attn_idx[7] = { + 50, 51, 52, 27, 53, 23, 22, +}; + +static struct attn_hw_reg ucm_prty2_bb_a0 = { + 1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218, + 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = { + &ucm_prty1_bb_a0, &ucm_prty2_bb_a0, +}; + +static const u16 ucm_prty1_bb_b0_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ucm_prty1_bb_b0 = { + 0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_bb_b0_attn_idx[7] = { + 48, 40, 41, 49, 43, 50, 51, +}; + +static struct attn_hw_reg ucm_prty2_bb_b0 = { + 1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218, + 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { + &ucm_prty1_bb_b0, &ucm_prty2_bb_b0, +}; + +static const u16 ucm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ucm_prty1_k2 = { + 0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_k2_attn_idx[7] = { + 48, 40, 41, 49, 43, 50, 51, +}; + +static struct attn_hw_reg ucm_prty2_k2 = { + 1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_k2_regs[2] = { + &ucm_prty1_k2, &ucm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *xcm_int_attn_desc[49] = { + "xcm_address_error", + "xcm_is_storm_ovfl_err", + "xcm_is_storm_under_err", + "xcm_is_msdm_ovfl_err", + "xcm_is_msdm_under_err", + "xcm_is_xsdm_ovfl_err", + "xcm_is_xsdm_under_err", + "xcm_is_ysdm_ovfl_err", + "xcm_is_ysdm_under_err", + "xcm_is_usdm_ovfl_err", + "xcm_is_usdm_under_err", + "xcm_is_msem_ovfl_err", + "xcm_is_msem_under_err", + "xcm_is_usem_ovfl_err", + "xcm_is_usem_under_err", + "xcm_is_ysem_ovfl_err", + "xcm_is_ysem_under_err", + "xcm_is_dorq_ovfl_err", + "xcm_is_dorq_under_err", + "xcm_is_pbf_ovfl_err", + "xcm_is_pbf_under_err", + "xcm_is_tm_ovfl_err", + "xcm_is_tm_under_err", + "xcm_is_qm_p_ovfl_err", + "xcm_is_qm_p_under_err", + "xcm_is_qm_s_ovfl_err", + "xcm_is_qm_s_under_err", + "xcm_is_grc_ovfl_err0", + "xcm_is_grc_under_err0", + "xcm_is_grc_ovfl_err1", + "xcm_is_grc_under_err1", + "xcm_is_grc_ovfl_err2", + "xcm_is_grc_under_err2", + "xcm_is_grc_ovfl_err3", + "xcm_is_grc_under_err3", + "xcm_in_prcs_tbl_ovfl", + "xcm_agg_con_data_buf_ovfl", + "xcm_agg_con_cmd_buf_ovfl", + "xcm_sm_con_data_buf_ovfl", + "xcm_sm_con_cmd_buf_ovfl", + "xcm_fi_desc_input_violate", + "xcm_qm_act_st_cnt_msg_prcs_under", + "xcm_qm_act_st_cnt_msg_prcs_ovfl", + "xcm_qm_act_st_cnt_ext_ld_under", + "xcm_qm_act_st_cnt_ext_ld_ovfl", + "xcm_qm_act_st_cnt_rbc_under", + "xcm_qm_act_st_cnt_rbc_ovfl", + "xcm_qm_act_st_cnt_drop_under", + "xcm_qm_act_st_cnt_illeg_pqnum", +}; +#else +#define xcm_int_attn_desc OSAL_NULL +#endif + +static const u16 xcm_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_bb_a0 = { + 0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188, + 0x1000184 +}; + +static const u16 xcm_int1_bb_a0_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_bb_a0 = { + 1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198, + 0x1000194 +}; + +static const u16 xcm_int2_bb_a0_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_bb_a0 = { + 2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, + 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = { + &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0, +}; + +static const u16 xcm_int0_bb_b0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_bb_b0 = { + 0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188, + 0x1000184 +}; + +static const u16 xcm_int1_bb_b0_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_bb_b0 = { + 1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198, + 0x1000194 +}; + +static const u16 xcm_int2_bb_b0_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_bb_b0 = { + 2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, + 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { + &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0, +}; + +static const u16 xcm_int0_k2_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_k2 = { + 0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184 +}; + +static const u16 xcm_int1_k2_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_k2 = { + 1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194 +}; + +static const u16 xcm_int2_k2_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_k2 = { + 2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_k2_regs[3] = { + &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *xcm_prty_attn_desc[59] = { + "xcm_mem036_i_ecc_rf_int", + "xcm_mem003_i_ecc_0_rf_int", + "xcm_mem003_i_ecc_1_rf_int", + "xcm_mem003_i_ecc_2_rf_int", + "xcm_mem003_i_ecc_3_rf_int", + "xcm_mem004_i_ecc_rf_int", + "xcm_mem033_i_ecc_0_rf_int", + "xcm_mem033_i_ecc_1_rf_int", + "xcm_mem034_i_ecc_rf_int", + "xcm_mem026_i_mem_prty", + "xcm_mem025_i_mem_prty", + "xcm_mem022_i_mem_prty", + "xcm_mem029_i_mem_prty", + "xcm_mem023_i_mem_prty", + "xcm_mem028_i_mem_prty", + "xcm_mem030_i_mem_prty", + "xcm_mem017_i_mem_prty", + "xcm_mem024_i_mem_prty", + "xcm_mem027_i_mem_prty", + "xcm_mem018_i_mem_prty", + "xcm_mem019_i_mem_prty", + "xcm_mem020_i_mem_prty", + "xcm_mem021_i_mem_prty", + "xcm_mem039_i_mem_prty", + "xcm_mem038_i_mem_prty", + "xcm_mem037_i_mem_prty", + "xcm_mem005_i_mem_prty", + "xcm_mem035_i_mem_prty", + "xcm_mem031_i_mem_prty", + "xcm_mem006_i_mem_prty", + "xcm_mem015_i_mem_prty", + "xcm_mem035_i_ecc_rf_int", + "xcm_mem032_i_ecc_0_rf_int", + "xcm_mem032_i_ecc_1_rf_int", + "xcm_mem033_i_ecc_rf_int", + "xcm_mem036_i_mem_prty", + "xcm_mem034_i_mem_prty", + "xcm_mem016_i_mem_prty", + "xcm_mem002_i_ecc_0_rf_int", + "xcm_mem002_i_ecc_1_rf_int", + "xcm_mem002_i_ecc_2_rf_int", + "xcm_mem002_i_ecc_3_rf_int", + "xcm_mem003_i_ecc_rf_int", + "xcm_mem031_i_ecc_0_rf_int", + "xcm_mem031_i_ecc_1_rf_int", + "xcm_mem032_i_ecc_rf_int", + "xcm_mem004_i_mem_prty", + "xcm_mem033_i_mem_prty", + "xcm_mem014_i_mem_prty", + "xcm_mem032_i_mem_prty", + "xcm_mem007_i_mem_prty", + "xcm_mem008_i_mem_prty", + "xcm_mem009_i_mem_prty", + "xcm_mem010_i_mem_prty", + "xcm_mem011_i_mem_prty", + "xcm_mem012_i_mem_prty", + "xcm_mem013_i_mem_prty", + "xcm_mem001_i_mem_prty", + "xcm_mem002_i_mem_prty", +}; +#else +#define xcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 xcm_prty1_bb_a0_attn_idx[31] = { + 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30, + 35, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_prty1_bb_a0 = { + 0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_bb_a0_attn_idx[11] = { + 50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24, +}; + +static struct attn_hw_reg xcm_prty2_bb_a0 = { + 1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = { + &xcm_prty1_bb_a0, &xcm_prty2_bb_a0, +}; + +static const u16 xcm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 24, + 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, +}; + +static struct attn_hw_reg xcm_prty1_bb_b0 = { + 0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_bb_b0_attn_idx[11] = { + 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28, +}; + +static struct attn_hw_reg xcm_prty2_bb_b0 = { + 1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { + &xcm_prty1_bb_b0, &xcm_prty2_bb_b0, +}; + +static const u16 xcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg xcm_prty1_k2 = { + 0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_k2_attn_idx[12] = { + 37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, +}; + +static struct attn_hw_reg xcm_prty2_k2 = { + 1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_k2_regs[2] = { + &xcm_prty1_k2, &xcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *ycm_int_attn_desc[37] = { + "ycm_address_error", + "ycm_is_storm_ovfl_err", + "ycm_is_storm_under_err", + "ycm_is_msdm_ovfl_err", + "ycm_is_msdm_under_err", + "ycm_is_ysdm_ovfl_err", + "ycm_is_ysdm_under_err", + "ycm_is_xyld_ovfl_err", + "ycm_is_xyld_under_err", + "ycm_is_msem_ovfl_err", + "ycm_is_msem_under_err", + "ycm_is_usem_ovfl_err", + "ycm_is_usem_under_err", + "ycm_is_pbf_ovfl_err", + "ycm_is_pbf_under_err", + "ycm_is_qm_p_ovfl_err", + "ycm_is_qm_p_under_err", + "ycm_is_qm_s_ovfl_err", + "ycm_is_qm_s_under_err", + "ycm_is_grc_ovfl_err0", + "ycm_is_grc_under_err0", + "ycm_is_grc_ovfl_err1", + "ycm_is_grc_under_err1", + "ycm_is_grc_ovfl_err2", + "ycm_is_grc_under_err2", + "ycm_is_grc_ovfl_err3", + "ycm_is_grc_under_err3", + "ycm_in_prcs_tbl_ovfl", + "ycm_sm_con_data_buf_ovfl", + "ycm_sm_con_cmd_buf_ovfl", + "ycm_agg_task_data_buf_ovfl", + "ycm_agg_task_cmd_buf_ovfl", + "ycm_sm_task_data_buf_ovfl", + "ycm_sm_task_cmd_buf_ovfl", + "ycm_fi_desc_input_violate", + "ycm_se_desc_input_violate", + "ycm_qmreg_more4", +}; +#else +#define ycm_int_attn_desc OSAL_NULL +#endif + +static const u16 ycm_int0_bb_a0_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_bb_a0 = { + 0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188, + 0x1080184 +}; + +static const u16 ycm_int1_bb_a0_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_bb_a0 = { + 1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198, + 0x1080194 +}; + +static const u16 ycm_int2_bb_a0_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_bb_a0 = { + 2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, + 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = { + &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0, +}; + +static const u16 ycm_int0_bb_b0_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_bb_b0 = { + 0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188, + 0x1080184 +}; + +static const u16 ycm_int1_bb_b0_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_bb_b0 = { + 1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198, + 0x1080194 +}; + +static const u16 ycm_int2_bb_b0_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_bb_b0 = { + 2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, + 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { + &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0, +}; + +static const u16 ycm_int0_k2_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_k2 = { + 0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184 +}; + +static const u16 ycm_int1_k2_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_k2 = { + 1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194 +}; + +static const u16 ycm_int2_k2_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_k2 = { + 2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_k2_regs[3] = { + &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *ycm_prty_attn_desc[44] = { + "ycm_mem027_i_ecc_rf_int", + "ycm_mem003_i_ecc_0_rf_int", + "ycm_mem003_i_ecc_1_rf_int", + "ycm_mem022_i_ecc_0_rf_int", + "ycm_mem022_i_ecc_1_rf_int", + "ycm_mem023_i_ecc_rf_int", + "ycm_mem005_i_ecc_0_rf_int", + "ycm_mem005_i_ecc_1_rf_int", + "ycm_mem025_i_ecc_0_rf_int", + "ycm_mem025_i_ecc_1_rf_int", + "ycm_mem018_i_mem_prty", + "ycm_mem020_i_mem_prty", + "ycm_mem017_i_mem_prty", + "ycm_mem016_i_mem_prty", + "ycm_mem019_i_mem_prty", + "ycm_mem015_i_mem_prty", + "ycm_mem011_i_mem_prty", + "ycm_mem012_i_mem_prty", + "ycm_mem013_i_mem_prty", + "ycm_mem014_i_mem_prty", + "ycm_mem030_i_mem_prty", + "ycm_mem029_i_mem_prty", + "ycm_mem028_i_mem_prty", + "ycm_mem004_i_mem_prty", + "ycm_mem024_i_mem_prty", + "ycm_mem006_i_mem_prty", + "ycm_mem026_i_mem_prty", + "ycm_mem021_i_mem_prty", + "ycm_mem007_i_mem_prty_0", + "ycm_mem007_i_mem_prty_1", + "ycm_mem008_i_mem_prty", + "ycm_mem026_i_ecc_rf_int", + "ycm_mem021_i_ecc_0_rf_int", + "ycm_mem021_i_ecc_1_rf_int", + "ycm_mem022_i_ecc_rf_int", + "ycm_mem024_i_ecc_0_rf_int", + "ycm_mem024_i_ecc_1_rf_int", + "ycm_mem027_i_mem_prty", + "ycm_mem023_i_mem_prty", + "ycm_mem025_i_mem_prty", + "ycm_mem009_i_mem_prty", + "ycm_mem010_i_mem_prty", + "ycm_mem001_i_mem_prty", + "ycm_mem002_i_mem_prty", +}; +#else +#define ycm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ycm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg ycm_prty1_bb_a0 = { + 0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_bb_a0_attn_idx[3] = { + 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_bb_a0 = { + 1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218, + 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = { + &ycm_prty1_bb_a0, &ycm_prty2_bb_a0, +}; + +static const u16 ycm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg ycm_prty1_bb_b0 = { + 0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_bb_b0_attn_idx[3] = { + 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_bb_b0 = { + 1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218, + 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { + &ycm_prty1_bb_b0, &ycm_prty2_bb_b0, +}; + +static const u16 ycm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ycm_prty1_k2 = { + 0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_k2_attn_idx[4] = { + 40, 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_k2 = { + 1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_k2_regs[2] = { + &ycm_prty1_k2, &ycm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcm_int_attn_desc[20] = { + "pcm_address_error", + "pcm_is_storm_ovfl_err", + "pcm_is_storm_under_err", + "pcm_is_psdm_ovfl_err", + "pcm_is_psdm_under_err", + "pcm_is_pbf_ovfl_err", + "pcm_is_pbf_under_err", + "pcm_is_grc_ovfl_err0", + "pcm_is_grc_under_err0", + "pcm_is_grc_ovfl_err1", + "pcm_is_grc_under_err1", + "pcm_is_grc_ovfl_err2", + "pcm_is_grc_under_err2", + "pcm_is_grc_ovfl_err3", + "pcm_is_grc_under_err3", + "pcm_in_prcs_tbl_ovfl", + "pcm_sm_con_data_buf_ovfl", + "pcm_sm_con_cmd_buf_ovfl", + "pcm_fi_desc_input_violate", + "pcm_qmreg_more4", +}; +#else +#define pcm_int_attn_desc OSAL_NULL +#endif + +static const u16 pcm_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_bb_a0 = { + 0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188, + 0x1100184 +}; + +static const u16 pcm_int1_bb_a0_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_bb_a0 = { + 1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198, + 0x1100194 +}; + +static const u16 pcm_int2_bb_a0_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_bb_a0 = { + 2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, + 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = { + &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0, +}; + +static const u16 pcm_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_bb_b0 = { + 0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188, + 0x1100184 +}; + +static const u16 pcm_int1_bb_b0_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_bb_b0 = { + 1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198, + 0x1100194 +}; + +static const u16 pcm_int2_bb_b0_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_bb_b0 = { + 2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, + 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { + &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0, +}; + +static const u16 pcm_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_k2 = { + 0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184 +}; + +static const u16 pcm_int1_k2_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_k2 = { + 1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194 +}; + +static const u16 pcm_int2_k2_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_k2 = { + 2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_k2_regs[3] = { + &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcm_prty_attn_desc[18] = { + "pcm_mem012_i_ecc_rf_int", + "pcm_mem010_i_ecc_0_rf_int", + "pcm_mem010_i_ecc_1_rf_int", + "pcm_mem008_i_mem_prty", + "pcm_mem007_i_mem_prty", + "pcm_mem006_i_mem_prty", + "pcm_mem002_i_mem_prty", + "pcm_mem003_i_mem_prty", + "pcm_mem004_i_mem_prty", + "pcm_mem005_i_mem_prty", + "pcm_mem011_i_mem_prty", + "pcm_mem001_i_mem_prty", + "pcm_mem011_i_ecc_rf_int", + "pcm_mem009_i_ecc_0_rf_int", + "pcm_mem009_i_ecc_1_rf_int", + "pcm_mem010_i_mem_prty", + "pcm_mem013_i_mem_prty", + "pcm_mem012_i_mem_prty", +}; +#else +#define pcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 pcm_prty1_bb_a0_attn_idx[14] = { + 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pcm_prty1_bb_a0 = { + 0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = { + &pcm_prty1_bb_a0, +}; + +static const u16 pcm_prty1_bb_b0_attn_idx[11] = { + 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pcm_prty1_bb_b0 = { + 0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { + &pcm_prty1_bb_b0, +}; + +static const u16 pcm_prty1_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg pcm_prty1_k2 = { + 0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_k2_regs[1] = { + &pcm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *qm_int_attn_desc[22] = { + "qm_address_error", + "qm_ovf_err_tx", + "qm_ovf_err_other", + "qm_pf_usg_cnt_err", + "qm_vf_usg_cnt_err", + "qm_voq_crd_inc_err", + "qm_voq_crd_dec_err", + "qm_byte_crd_inc_err", + "qm_byte_crd_dec_err", + "qm_err_incdec_rlglblcrd", + "qm_err_incdec_rlpfcrd", + "qm_err_incdec_wfqpfcrd", + "qm_err_incdec_wfqvpcrd", + "qm_err_incdec_voqlinecrd", + "qm_err_incdec_voqbytecrd", + "qm_fifos_error", + "qm_qm_rl_dc_exp_pf_controller_pop_error", + "qm_qm_rl_dc_exp_pf_controller_push_error", + "qm_qm_rl_dc_rf_req_controller_pop_error", + "qm_qm_rl_dc_rf_req_controller_push_error", + "qm_qm_rl_dc_rf_res_controller_pop_error", + "qm_qm_rl_dc_rf_res_controller_push_error", +}; +#else +#define qm_int_attn_desc OSAL_NULL +#endif + +static const u16 qm_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg qm_int0_bb_a0 = { + 0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_bb_a0_regs[1] = { + &qm_int0_bb_a0, +}; + +static const u16 qm_int0_bb_b0_attn_idx[22] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, +}; + +static struct attn_hw_reg qm_int0_bb_b0 = { + 0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { + &qm_int0_bb_b0, +}; + +static const u16 qm_int0_k2_attn_idx[22] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, +}; + +static struct attn_hw_reg qm_int0_k2 = { + 0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_k2_regs[1] = { + &qm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *qm_prty_attn_desc[109] = { + "qm_xcm_wrc_fifo", + "qm_ucm_wrc_fifo", + "qm_tcm_wrc_fifo", + "qm_ccm_wrc_fifo", + "qm_bigramhigh", + "qm_bigramlow", + "qm_base_address", + "qm_wrbuff", + "qm_bigramhigh_ext_a", + "qm_bigramlow_ext_a", + "qm_base_address_ext_a", + "qm_mem006_i_ecc_0_rf_int", + "qm_mem006_i_ecc_1_rf_int", + "qm_mem005_i_ecc_0_rf_int", + "qm_mem005_i_ecc_1_rf_int", + "qm_mem012_i_ecc_rf_int", + "qm_mem037_i_mem_prty", + "qm_mem036_i_mem_prty", + "qm_mem039_i_mem_prty", + "qm_mem038_i_mem_prty", + "qm_mem040_i_mem_prty", + "qm_mem042_i_mem_prty", + "qm_mem041_i_mem_prty", + "qm_mem056_i_mem_prty", + "qm_mem055_i_mem_prty", + "qm_mem053_i_mem_prty", + "qm_mem054_i_mem_prty", + "qm_mem057_i_mem_prty", + "qm_mem058_i_mem_prty", + "qm_mem062_i_mem_prty", + "qm_mem061_i_mem_prty", + "qm_mem059_i_mem_prty", + "qm_mem060_i_mem_prty", + "qm_mem063_i_mem_prty", + "qm_mem064_i_mem_prty", + "qm_mem033_i_mem_prty", + "qm_mem032_i_mem_prty", + "qm_mem030_i_mem_prty", + "qm_mem031_i_mem_prty", + "qm_mem034_i_mem_prty", + "qm_mem035_i_mem_prty", + "qm_mem051_i_mem_prty", + "qm_mem042_i_ecc_0_rf_int", + "qm_mem042_i_ecc_1_rf_int", + "qm_mem041_i_ecc_0_rf_int", + "qm_mem041_i_ecc_1_rf_int", + "qm_mem048_i_ecc_rf_int", + "qm_mem009_i_mem_prty", + "qm_mem008_i_mem_prty", + "qm_mem011_i_mem_prty", + "qm_mem010_i_mem_prty", + "qm_mem012_i_mem_prty", + "qm_mem014_i_mem_prty", + "qm_mem013_i_mem_prty", + "qm_mem028_i_mem_prty", + "qm_mem027_i_mem_prty", + "qm_mem025_i_mem_prty", + "qm_mem026_i_mem_prty", + "qm_mem029_i_mem_prty", + "qm_mem005_i_mem_prty", + "qm_mem004_i_mem_prty", + "qm_mem002_i_mem_prty", + "qm_mem003_i_mem_prty", + "qm_mem006_i_mem_prty", + "qm_mem007_i_mem_prty", + "qm_mem023_i_mem_prty", + "qm_mem047_i_mem_prty", + "qm_mem049_i_mem_prty", + "qm_mem048_i_mem_prty", + "qm_mem052_i_mem_prty", + "qm_mem050_i_mem_prty", + "qm_mem045_i_mem_prty", + "qm_mem046_i_mem_prty", + "qm_mem043_i_mem_prty", + "qm_mem044_i_mem_prty", + "qm_mem017_i_mem_prty", + "qm_mem016_i_mem_prty", + "qm_mem021_i_mem_prty", + "qm_mem024_i_mem_prty", + "qm_mem019_i_mem_prty", + "qm_mem018_i_mem_prty", + "qm_mem015_i_mem_prty", + "qm_mem022_i_mem_prty", + "qm_mem020_i_mem_prty", + "qm_mem007_i_mem_prty_0", + "qm_mem007_i_mem_prty_1", + "qm_mem007_i_mem_prty_2", + "qm_mem001_i_mem_prty", + "qm_mem043_i_mem_prty_0", + "qm_mem043_i_mem_prty_1", + "qm_mem043_i_mem_prty_2", + "qm_mem007_i_mem_prty_3", + "qm_mem007_i_mem_prty_4", + "qm_mem007_i_mem_prty_5", + "qm_mem007_i_mem_prty_6", + "qm_mem007_i_mem_prty_7", + "qm_mem007_i_mem_prty_8", + "qm_mem007_i_mem_prty_9", + "qm_mem007_i_mem_prty_10", + "qm_mem007_i_mem_prty_11", + "qm_mem007_i_mem_prty_12", + "qm_mem007_i_mem_prty_13", + "qm_mem007_i_mem_prty_14", + "qm_mem007_i_mem_prty_15", + "qm_mem043_i_mem_prty_3", + "qm_mem043_i_mem_prty_4", + "qm_mem043_i_mem_prty_5", + "qm_mem043_i_mem_prty_6", + "qm_mem043_i_mem_prty_7", +}; +#else +#define qm_prty_attn_desc OSAL_NULL +#endif + +static const u16 qm_prty0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_bb_a0 = { + 0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_bb_a0_attn_idx[31] = { + 17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, +}; + +static struct attn_hw_reg qm_prty1_bb_a0 = { + 1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_bb_a0_attn_idx[31] = { + 66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25, + 27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90, +}; + +static struct attn_hw_reg qm_prty2_bb_a0 = { + 2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_bb_a0_attn_idx[11] = { + 104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71, +}; + +static struct attn_hw_reg qm_prty3_bb_a0 = { + 3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = { + &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0, +}; + +static const u16 qm_prty0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_bb_b0 = { + 0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_bb_b0_attn_idx[31] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg qm_prty1_bb_b0 = { + 1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_bb_b0_attn_idx[31] = { + 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78, + 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86, +}; + +static struct attn_hw_reg qm_prty2_bb_b0 = { + 2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_bb_b0_attn_idx[11] = { + 91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47, +}; + +static struct attn_hw_reg qm_prty3_bb_b0 = { + 3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { + &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0, +}; + +static const u16 qm_prty0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_k2 = { + 0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_k2_attn_idx[31] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg qm_prty1_k2 = { + 1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_k2_attn_idx[31] = { + 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78, + 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86, +}; + +static struct attn_hw_reg qm_prty2_k2 = { + 2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_k2_attn_idx[19] = { + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61, + 50, 47, +}; + +static struct attn_hw_reg qm_prty3_k2 = { + 3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_k2_regs[4] = { + &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2, +}; + +#ifdef ATTN_DESC +static const char *tm_int_attn_desc[43] = { + "tm_address_error", + "tm_pxp_read_data_fifo_ov", + "tm_pxp_read_data_fifo_un", + "tm_pxp_read_ctrl_fifo_ov", + "tm_pxp_read_ctrl_fifo_un", + "tm_cfc_load_command_fifo_ov", + "tm_cfc_load_command_fifo_un", + "tm_cfc_load_echo_fifo_ov", + "tm_cfc_load_echo_fifo_un", + "tm_client_out_fifo_ov", + "tm_client_out_fifo_un", + "tm_ac_command_fifo_ov", + "tm_ac_command_fifo_un", + "tm_client_in_pbf_fifo_ov", + "tm_client_in_pbf_fifo_un", + "tm_client_in_ucm_fifo_ov", + "tm_client_in_ucm_fifo_un", + "tm_client_in_tcm_fifo_ov", + "tm_client_in_tcm_fifo_un", + "tm_client_in_xcm_fifo_ov", + "tm_client_in_xcm_fifo_un", + "tm_expiration_cmd_fifo_ov", + "tm_expiration_cmd_fifo_un", + "tm_stop_all_lc_invalid", + "tm_command_lc_invalid_0", + "tm_command_lc_invalid_1", + "tm_init_command_lc_valid", + "tm_stop_all_exp_lc_valid", + "tm_command_cid_invalid_0", + "tm_reserved_command", + "tm_command_cid_invalid_1", + "tm_cload_res_loaderr_conn", + "tm_cload_res_loadcancel_conn", + "tm_cload_res_validerr_conn", + "tm_context_rd_last", + "tm_context_wr_last", + "tm_pxp_rd_data_eop_bvalid", + "tm_pend_conn_scan", + "tm_pend_task_scan", + "tm_pxp_rd_data_eop_error", + "tm_cload_res_loaderr_task", + "tm_cload_res_loadcancel_task", + "tm_cload_res_validerr_task", +}; +#else +#define tm_int_attn_desc OSAL_NULL +#endif + +static const u16 tm_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_bb_a0 = { + 0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_bb_a0_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_bb_a0 = { + 1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_bb_a0_regs[2] = { + &tm_int0_bb_a0, &tm_int1_bb_a0, +}; + +static const u16 tm_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_bb_b0 = { + 0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_bb_b0_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_bb_b0 = { + 1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { + &tm_int0_bb_b0, &tm_int1_bb_b0, +}; + +static const u16 tm_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_k2 = { + 0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_k2_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_k2 = { + 1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_k2_regs[2] = { + &tm_int0_k2, &tm_int1_k2, +}; + +#ifdef ATTN_DESC +static const char *tm_prty_attn_desc[17] = { + "tm_mem012_i_ecc_0_rf_int", + "tm_mem012_i_ecc_1_rf_int", + "tm_mem003_i_ecc_rf_int", + "tm_mem016_i_mem_prty", + "tm_mem007_i_mem_prty", + "tm_mem010_i_mem_prty", + "tm_mem008_i_mem_prty", + "tm_mem009_i_mem_prty", + "tm_mem013_i_mem_prty", + "tm_mem015_i_mem_prty", + "tm_mem014_i_mem_prty", + "tm_mem004_i_mem_prty", + "tm_mem005_i_mem_prty", + "tm_mem006_i_mem_prty", + "tm_mem011_i_mem_prty", + "tm_mem001_i_mem_prty", + "tm_mem002_i_mem_prty", +}; +#else +#define tm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tm_prty1_bb_a0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_bb_a0 = { + 0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = { + &tm_prty1_bb_a0, +}; + +static const u16 tm_prty1_bb_b0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_bb_b0 = { + 0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { + &tm_prty1_bb_b0, +}; + +static const u16 tm_prty1_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_k2 = { + 0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_k2_regs[1] = { + &tm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *dorq_int_attn_desc[9] = { + "dorq_address_error", + "dorq_db_drop", + "dorq_dorq_fifo_ovfl_err", + "dorq_dorq_fifo_afull", + "dorq_cfc_byp_validation_err", + "dorq_cfc_ld_resp_err", + "dorq_xcm_done_cnt_err", + "dorq_cfc_ld_req_fifo_ovfl_err", + "dorq_cfc_ld_req_fifo_under_err", +}; +#else +#define dorq_int_attn_desc OSAL_NULL +#endif + +static const u16 dorq_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_bb_a0 = { + 0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = { + &dorq_int0_bb_a0, +}; + +static const u16 dorq_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_bb_b0 = { + 0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { + &dorq_int0_bb_b0, +}; + +static const u16 dorq_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_k2 = { + 0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_k2_regs[1] = { + &dorq_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dorq_prty_attn_desc[7] = { + "dorq_datapath_registers", + "dorq_mem002_i_ecc_rf_int", + "dorq_mem001_i_mem_prty", + "dorq_mem003_i_mem_prty", + "dorq_mem004_i_mem_prty", + "dorq_mem005_i_mem_prty", + "dorq_mem006_i_mem_prty", +}; +#else +#define dorq_prty_attn_desc OSAL_NULL +#endif + +static const u16 dorq_prty1_bb_a0_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_bb_a0 = { + 0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = { + &dorq_prty1_bb_a0, +}; + +static const u16 dorq_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dorq_prty0_bb_b0 = { + 0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194 +}; + +static const u16 dorq_prty1_bb_b0_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_bb_b0 = { + 1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { + &dorq_prty0_bb_b0, &dorq_prty1_bb_b0, +}; + +static const u16 dorq_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dorq_prty0_k2 = { + 0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194 +}; + +static const u16 dorq_prty1_k2_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_k2 = { + 1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_k2_regs[2] = { + &dorq_prty0_k2, &dorq_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *brb_int_attn_desc[237] = { + "brb_address_error", + "brb_rc_pkt0_rls_error", + "brb_rc_pkt0_1st_error", + "brb_rc_pkt0_len_error", + "brb_rc_pkt0_middle_error", + "brb_rc_pkt0_protocol_error", + "brb_rc_pkt1_rls_error", + "brb_rc_pkt1_1st_error", + "brb_rc_pkt1_len_error", + "brb_rc_pkt1_middle_error", + "brb_rc_pkt1_protocol_error", + "brb_rc_pkt2_rls_error", + "brb_rc_pkt2_1st_error", + "brb_rc_pkt2_len_error", + "brb_rc_pkt2_middle_error", + "brb_rc_pkt2_protocol_error", + "brb_rc_pkt3_rls_error", + "brb_rc_pkt3_1st_error", + "brb_rc_pkt3_len_error", + "brb_rc_pkt3_middle_error", + "brb_rc_pkt3_protocol_error", + "brb_rc_sop_req_tc_port_error", + "brb_uncomplient_lossless_error", + "brb_wc0_protocol_error", + "brb_wc1_protocol_error", + "brb_wc2_protocol_error", + "brb_wc3_protocol_error", + "brb_ll_arb_prefetch_sop_error", + "brb_ll_blk_error", + "brb_packet_counter_error", + "brb_byte_counter_error", + "brb_mac0_fc_cnt_error", + "brb_mac1_fc_cnt_error", + "brb_ll_arb_calc_error", + "brb_unused_0", + "brb_wc0_inp_fifo_error", + "brb_wc0_sop_fifo_error", + "brb_unused_1", + "brb_wc0_eop_fifo_error", + "brb_wc0_queue_fifo_error", + "brb_wc0_free_point_fifo_error", + "brb_wc0_next_point_fifo_error", + "brb_wc0_strt_fifo_error", + "brb_wc0_second_dscr_fifo_error", + "brb_wc0_pkt_avail_fifo_error", + "brb_wc0_cos_cnt_fifo_error", + "brb_wc0_notify_fifo_error", + "brb_wc0_ll_req_fifo_error", + "brb_wc0_ll_pa_cnt_error", + "brb_wc0_bb_pa_cnt_error", + "brb_wc1_inp_fifo_error", + "brb_wc1_sop_fifo_error", + "brb_wc1_eop_fifo_error", + "brb_wc1_queue_fifo_error", + "brb_wc1_free_point_fifo_error", + "brb_wc1_next_point_fifo_error", + "brb_wc1_strt_fifo_error", + "brb_wc1_second_dscr_fifo_error", + "brb_wc1_pkt_avail_fifo_error", + "brb_wc1_cos_cnt_fifo_error", + "brb_wc1_notify_fifo_error", + "brb_wc1_ll_req_fifo_error", + "brb_wc1_ll_pa_cnt_error", + "brb_wc1_bb_pa_cnt_error", + "brb_wc2_inp_fifo_error", + "brb_wc2_sop_fifo_error", + "brb_wc2_eop_fifo_error", + "brb_wc2_queue_fifo_error", + "brb_wc2_free_point_fifo_error", + "brb_wc2_next_point_fifo_error", + "brb_wc2_strt_fifo_error", + "brb_wc2_second_dscr_fifo_error", + "brb_wc2_pkt_avail_fifo_error", + "brb_wc2_cos_cnt_fifo_error", + "brb_wc2_notify_fifo_error", + "brb_wc2_ll_req_fifo_error", + "brb_wc2_ll_pa_cnt_error", + "brb_wc2_bb_pa_cnt_error", + "brb_wc3_inp_fifo_error", + "brb_wc3_sop_fifo_error", + "brb_wc3_eop_fifo_error", + "brb_wc3_queue_fifo_error", + "brb_wc3_free_point_fifo_error", + "brb_wc3_next_point_fifo_error", + "brb_wc3_strt_fifo_error", + "brb_wc3_second_dscr_fifo_error", + "brb_wc3_pkt_avail_fifo_error", + "brb_wc3_cos_cnt_fifo_error", + "brb_wc3_notify_fifo_error", + "brb_wc3_ll_req_fifo_error", + "brb_wc3_ll_pa_cnt_error", + "brb_wc3_bb_pa_cnt_error", + "brb_rc_pkt0_side_fifo_error", + "brb_rc_pkt0_req_fifo_error", + "brb_rc_pkt0_blk_fifo_error", + "brb_rc_pkt0_rls_left_fifo_error", + "brb_rc_pkt0_strt_ptr_fifo_error", + "brb_rc_pkt0_second_ptr_fifo_error", + "brb_rc_pkt0_rsp_fifo_error", + "brb_rc_pkt0_dscr_fifo_error", + "brb_rc_pkt1_side_fifo_error", + "brb_rc_pkt1_req_fifo_error", + "brb_rc_pkt1_blk_fifo_error", + "brb_rc_pkt1_rls_left_fifo_error", + "brb_rc_pkt1_strt_ptr_fifo_error", + "brb_rc_pkt1_second_ptr_fifo_error", + "brb_rc_pkt1_rsp_fifo_error", + "brb_rc_pkt1_dscr_fifo_error", + "brb_rc_pkt2_side_fifo_error", + "brb_rc_pkt2_req_fifo_error", + "brb_rc_pkt2_blk_fifo_error", + "brb_rc_pkt2_rls_left_fifo_error", + "brb_rc_pkt2_strt_ptr_fifo_error", + "brb_rc_pkt2_second_ptr_fifo_error", + "brb_rc_pkt2_rsp_fifo_error", + "brb_rc_pkt2_dscr_fifo_error", + "brb_rc_pkt3_side_fifo_error", + "brb_rc_pkt3_req_fifo_error", + "brb_rc_pkt3_blk_fifo_error", + "brb_rc_pkt3_rls_left_fifo_error", + "brb_rc_pkt3_strt_ptr_fifo_error", + "brb_rc_pkt3_second_ptr_fifo_error", + "brb_rc_pkt3_rsp_fifo_error", + "brb_rc_pkt3_dscr_fifo_error", + "brb_rc_sop_strt_fifo_error", + "brb_rc_sop_req_fifo_error", + "brb_rc_sop_dscr_fifo_error", + "brb_rc_sop_queue_fifo_error", + "brb_rc0_eop_error", + "brb_rc1_eop_error", + "brb_ll_arb_rls_fifo_error", + "brb_ll_arb_prefetch_fifo_error", + "brb_rc_pkt0_rls_fifo_error", + "brb_rc_pkt1_rls_fifo_error", + "brb_rc_pkt2_rls_fifo_error", + "brb_rc_pkt3_rls_fifo_error", + "brb_rc_pkt4_rls_fifo_error", + "brb_rc_pkt4_rls_error", + "brb_rc_pkt4_1st_error", + "brb_rc_pkt4_len_error", + "brb_rc_pkt4_middle_error", + "brb_rc_pkt4_protocol_error", + "brb_rc_pkt4_side_fifo_error", + "brb_rc_pkt4_req_fifo_error", + "brb_rc_pkt4_blk_fifo_error", + "brb_rc_pkt4_rls_left_fifo_error", + "brb_rc_pkt4_strt_ptr_fifo_error", + "brb_rc_pkt4_second_ptr_fifo_error", + "brb_rc_pkt4_rsp_fifo_error", + "brb_rc_pkt4_dscr_fifo_error", + "brb_rc_pkt5_rls_error", + "brb_packet_available_sync_fifo_push_error", + "brb_wc4_protocol_error", + "brb_wc5_protocol_error", + "brb_wc6_protocol_error", + "brb_wc7_protocol_error", + "brb_wc4_inp_fifo_error", + "brb_wc4_sop_fifo_error", + "brb_wc4_queue_fifo_error", + "brb_wc4_free_point_fifo_error", + "brb_wc4_next_point_fifo_error", + "brb_wc4_strt_fifo_error", + "brb_wc4_second_dscr_fifo_error", + "brb_wc4_pkt_avail_fifo_error", + "brb_wc4_cos_cnt_fifo_error", + "brb_wc4_notify_fifo_error", + "brb_wc4_ll_req_fifo_error", + "brb_wc4_ll_pa_cnt_error", + "brb_wc4_bb_pa_cnt_error", + "brb_wc5_inp_fifo_error", + "brb_wc5_sop_fifo_error", + "brb_wc5_queue_fifo_error", + "brb_wc5_free_point_fifo_error", + "brb_wc5_next_point_fifo_error", + "brb_wc5_strt_fifo_error", + "brb_wc5_second_dscr_fifo_error", + "brb_wc5_pkt_avail_fifo_error", + "brb_wc5_cos_cnt_fifo_error", + "brb_wc5_notify_fifo_error", + "brb_wc5_ll_req_fifo_error", + "brb_wc5_ll_pa_cnt_error", + "brb_wc5_bb_pa_cnt_error", + "brb_wc6_inp_fifo_error", + "brb_wc6_sop_fifo_error", + "brb_wc6_queue_fifo_error", + "brb_wc6_free_point_fifo_error", + "brb_wc6_next_point_fifo_error", + "brb_wc6_strt_fifo_error", + "brb_wc6_second_dscr_fifo_error", + "brb_wc6_pkt_avail_fifo_error", + "brb_wc6_cos_cnt_fifo_error", + "brb_wc6_notify_fifo_error", + "brb_wc6_ll_req_fifo_error", + "brb_wc6_ll_pa_cnt_error", + "brb_wc6_bb_pa_cnt_error", + "brb_wc7_inp_fifo_error", + "brb_wc7_sop_fifo_error", + "brb_wc7_queue_fifo_error", + "brb_wc7_free_point_fifo_error", + "brb_wc7_next_point_fifo_error", + "brb_wc7_strt_fifo_error", + "brb_wc7_second_dscr_fifo_error", + "brb_wc7_pkt_avail_fifo_error", + "brb_wc7_cos_cnt_fifo_error", + "brb_wc7_notify_fifo_error", + "brb_wc7_ll_req_fifo_error", + "brb_wc7_ll_pa_cnt_error", + "brb_wc7_bb_pa_cnt_error", + "brb_wc9_queue_fifo_error", + "brb_rc_sop_inp_sync_fifo_push_error", + "brb_rc0_inp_sync_fifo_push_error", + "brb_rc1_inp_sync_fifo_push_error", + "brb_rc2_inp_sync_fifo_push_error", + "brb_rc3_inp_sync_fifo_push_error", + "brb_rc0_out_sync_fifo_push_error", + "brb_rc1_out_sync_fifo_push_error", + "brb_rc2_out_sync_fifo_push_error", + "brb_rc3_out_sync_fifo_push_error", + "brb_rc4_out_sync_fifo_push_error", + "brb_unused_2", + "brb_rc0_eop_inp_sync_fifo_push_error", + "brb_rc1_eop_inp_sync_fifo_push_error", + "brb_rc2_eop_inp_sync_fifo_push_error", + "brb_rc3_eop_inp_sync_fifo_push_error", + "brb_rc0_eop_out_sync_fifo_push_error", + "brb_rc1_eop_out_sync_fifo_push_error", + "brb_rc2_eop_out_sync_fifo_push_error", + "brb_rc3_eop_out_sync_fifo_push_error", + "brb_unused_3", + "brb_rc2_eop_error", + "brb_rc3_eop_error", + "brb_mac2_fc_cnt_error", + "brb_mac3_fc_cnt_error", + "brb_wc4_eop_fifo_error", + "brb_wc5_eop_fifo_error", + "brb_wc6_eop_fifo_error", + "brb_wc7_eop_fifo_error", +}; +#else +#define brb_int_attn_desc OSAL_NULL +#endif + +static const u16 brb_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_bb_a0 = { + 0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_bb_a0_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_bb_a0 = { + 1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_bb_a0_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_bb_a0 = { + 2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_bb_a0_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_bb_a0 = { + 3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_bb_a0_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_bb_a0 = { + 4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_bb_a0_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_bb_a0 = { + 5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_bb_a0_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_bb_a0 = { + 6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_bb_a0_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_bb_a0 = { + 7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_bb_a0_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_bb_a0 = { + 8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_bb_a0_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_bb_a0 = { + 9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_bb_a0_attn_idx[14] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225, +}; + +static struct attn_hw_reg brb_int10_bb_a0 = { + 10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, + 0x3401b8 +}; + +static const u16 brb_int11_bb_a0_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_bb_a0 = { + 11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_bb_a0_regs[12] = { + &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0, + &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0, + &brb_int8_bb_a0, &brb_int9_bb_a0, + &brb_int10_bb_a0, &brb_int11_bb_a0, +}; + +static const u16 brb_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_bb_b0 = { + 0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_bb_b0_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_bb_b0 = { + 1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_bb_b0_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_bb_b0 = { + 2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_bb_b0_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_bb_b0 = { + 3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_bb_b0_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_bb_b0 = { + 4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_bb_b0_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_bb_b0 = { + 5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_bb_b0_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_bb_b0 = { + 6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_bb_b0_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_bb_b0 = { + 7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_bb_b0_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_bb_b0 = { + 8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_bb_b0_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_bb_b0 = { + 9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_bb_b0_attn_idx[14] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225, +}; + +static struct attn_hw_reg brb_int10_bb_b0 = { + 10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, + 0x3401b8 +}; + +static const u16 brb_int11_bb_b0_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_bb_b0 = { + 11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { + &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, + &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, + &brb_int8_bb_b0, &brb_int9_bb_b0, + &brb_int10_bb_b0, &brb_int11_bb_b0, +}; + +static const u16 brb_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_k2 = { + 0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_k2_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_k2 = { + 1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_k2_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_k2 = { + 2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_k2_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_k2 = { + 3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_k2_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_k2 = { + 4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_k2_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_k2 = { + 5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_k2_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_k2 = { + 6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_k2_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_k2 = { + 7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_k2_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_k2 = { + 8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_k2_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_k2 = { + 9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_k2_attn_idx[18] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223, + 224, + 225, 226, 227, +}; + +static struct attn_hw_reg brb_int10_k2 = { + 10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8 +}; + +static const u16 brb_int11_k2_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_k2 = { + 11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_k2_regs[12] = { + &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2, + &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2, + &brb_int10_k2, &brb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *brb_prty_attn_desc[75] = { + "brb_ll_bank0_mem_prty", + "brb_ll_bank1_mem_prty", + "brb_ll_bank2_mem_prty", + "brb_ll_bank3_mem_prty", + "brb_datapath_registers", + "brb_mem001_i_ecc_rf_int", + "brb_mem008_i_ecc_rf_int", + "brb_mem009_i_ecc_rf_int", + "brb_mem010_i_ecc_rf_int", + "brb_mem011_i_ecc_rf_int", + "brb_mem012_i_ecc_rf_int", + "brb_mem013_i_ecc_rf_int", + "brb_mem014_i_ecc_rf_int", + "brb_mem015_i_ecc_rf_int", + "brb_mem016_i_ecc_rf_int", + "brb_mem002_i_ecc_rf_int", + "brb_mem003_i_ecc_rf_int", + "brb_mem004_i_ecc_rf_int", + "brb_mem005_i_ecc_rf_int", + "brb_mem006_i_ecc_rf_int", + "brb_mem007_i_ecc_rf_int", + "brb_mem070_i_mem_prty", + "brb_mem069_i_mem_prty", + "brb_mem053_i_mem_prty", + "brb_mem054_i_mem_prty", + "brb_mem055_i_mem_prty", + "brb_mem056_i_mem_prty", + "brb_mem057_i_mem_prty", + "brb_mem058_i_mem_prty", + "brb_mem059_i_mem_prty", + "brb_mem060_i_mem_prty", + "brb_mem061_i_mem_prty", + "brb_mem062_i_mem_prty", + "brb_mem063_i_mem_prty", + "brb_mem064_i_mem_prty", + "brb_mem065_i_mem_prty", + "brb_mem045_i_mem_prty", + "brb_mem046_i_mem_prty", + "brb_mem047_i_mem_prty", + "brb_mem048_i_mem_prty", + "brb_mem049_i_mem_prty", + "brb_mem050_i_mem_prty", + "brb_mem051_i_mem_prty", + "brb_mem052_i_mem_prty", + "brb_mem041_i_mem_prty", + "brb_mem042_i_mem_prty", + "brb_mem043_i_mem_prty", + "brb_mem044_i_mem_prty", + "brb_mem040_i_mem_prty", + "brb_mem035_i_mem_prty", + "brb_mem066_i_mem_prty", + "brb_mem067_i_mem_prty", + "brb_mem068_i_mem_prty", + "brb_mem030_i_mem_prty", + "brb_mem031_i_mem_prty", + "brb_mem032_i_mem_prty", + "brb_mem033_i_mem_prty", + "brb_mem037_i_mem_prty", + "brb_mem038_i_mem_prty", + "brb_mem034_i_mem_prty", + "brb_mem036_i_mem_prty", + "brb_mem017_i_mem_prty", + "brb_mem018_i_mem_prty", + "brb_mem019_i_mem_prty", + "brb_mem020_i_mem_prty", + "brb_mem021_i_mem_prty", + "brb_mem022_i_mem_prty", + "brb_mem023_i_mem_prty", + "brb_mem024_i_mem_prty", + "brb_mem029_i_mem_prty", + "brb_mem026_i_mem_prty", + "brb_mem027_i_mem_prty", + "brb_mem028_i_mem_prty", + "brb_mem025_i_mem_prty", + "brb_mem039_i_mem_prty", +}; +#else +#define brb_prty_attn_desc OSAL_NULL +#endif + +static const u16 brb_prty1_bb_a0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36, + 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49, +}; + +static struct attn_hw_reg brb_prty1_bb_a0 = { + 0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_bb_a0_attn_idx[19] = { + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74, + 48, +}; + +static struct attn_hw_reg brb_prty2_bb_a0 = { + 1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = { + &brb_prty1_bb_a0, &brb_prty2_bb_a0, +}; + +static const u16 brb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg brb_prty0_bb_b0 = { + 0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0 +}; + +static const u16 brb_prty1_bb_b0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36, + 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg brb_prty1_bb_b0 = { + 1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_bb_b0_attn_idx[14] = { + 53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73, +}; + +static struct attn_hw_reg brb_prty2_bb_b0 = { + 2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { + &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0, +}; + +static const u16 brb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg brb_prty0_k2 = { + 0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0 +}; + +static const u16 brb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg brb_prty1_k2 = { + 1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_k2_attn_idx[30] = { + 50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58, + 59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, +}; + +static struct attn_hw_reg brb_prty2_k2 = { + 2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_k2_regs[3] = { + &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *src_int_attn_desc[1] = { + "src_address_error", +}; +#else +#define src_int_attn_desc OSAL_NULL +#endif + +static const u16 src_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_bb_a0 = { + 0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_bb_a0_regs[1] = { + &src_int0_bb_a0, +}; + +static const u16 src_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_bb_b0 = { + 0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_bb_b0_regs[1] = { + &src_int0_bb_b0, +}; + +static const u16 src_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_k2 = { + 0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_k2_regs[1] = { + &src_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prs_int_attn_desc[2] = { + "prs_address_error", + "prs_lcid_validation_err", +}; +#else +#define prs_int_attn_desc OSAL_NULL +#endif + +static const u16 prs_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_bb_a0 = { + 0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_bb_a0_regs[1] = { + &prs_int0_bb_a0, +}; + +static const u16 prs_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_bb_b0 = { + 0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { + &prs_int0_bb_b0, +}; + +static const u16 prs_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_k2 = { + 0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_k2_regs[1] = { + &prs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prs_prty_attn_desc[75] = { + "prs_cam_parity", + "prs_gft_cam_parity", + "prs_mem011_i_ecc_rf_int", + "prs_mem012_i_ecc_rf_int", + "prs_mem016_i_ecc_rf_int", + "prs_mem017_i_ecc_rf_int", + "prs_mem021_i_ecc_rf_int", + "prs_mem022_i_ecc_rf_int", + "prs_mem026_i_ecc_rf_int", + "prs_mem027_i_ecc_rf_int", + "prs_mem064_i_mem_prty", + "prs_mem044_i_mem_prty", + "prs_mem043_i_mem_prty", + "prs_mem037_i_mem_prty", + "prs_mem033_i_mem_prty", + "prs_mem034_i_mem_prty", + "prs_mem035_i_mem_prty", + "prs_mem036_i_mem_prty", + "prs_mem029_i_mem_prty", + "prs_mem030_i_mem_prty", + "prs_mem031_i_mem_prty", + "prs_mem032_i_mem_prty", + "prs_mem007_i_mem_prty", + "prs_mem028_i_mem_prty", + "prs_mem039_i_mem_prty", + "prs_mem040_i_mem_prty", + "prs_mem058_i_mem_prty", + "prs_mem059_i_mem_prty", + "prs_mem041_i_mem_prty", + "prs_mem042_i_mem_prty", + "prs_mem060_i_mem_prty", + "prs_mem061_i_mem_prty", + "prs_mem009_i_mem_prty", + "prs_mem009_i_ecc_rf_int", + "prs_mem010_i_ecc_rf_int", + "prs_mem014_i_ecc_rf_int", + "prs_mem015_i_ecc_rf_int", + "prs_mem026_i_mem_prty", + "prs_mem025_i_mem_prty", + "prs_mem021_i_mem_prty", + "prs_mem019_i_mem_prty", + "prs_mem020_i_mem_prty", + "prs_mem017_i_mem_prty", + "prs_mem018_i_mem_prty", + "prs_mem005_i_mem_prty", + "prs_mem016_i_mem_prty", + "prs_mem023_i_mem_prty", + "prs_mem024_i_mem_prty", + "prs_mem008_i_mem_prty", + "prs_mem012_i_mem_prty", + "prs_mem013_i_mem_prty", + "prs_mem006_i_mem_prty", + "prs_mem011_i_mem_prty", + "prs_mem003_i_mem_prty", + "prs_mem004_i_mem_prty", + "prs_mem027_i_mem_prty", + "prs_mem010_i_mem_prty", + "prs_mem014_i_mem_prty", + "prs_mem015_i_mem_prty", + "prs_mem054_i_mem_prty", + "prs_mem055_i_mem_prty", + "prs_mem056_i_mem_prty", + "prs_mem057_i_mem_prty", + "prs_mem046_i_mem_prty", + "prs_mem047_i_mem_prty", + "prs_mem048_i_mem_prty", + "prs_mem049_i_mem_prty", + "prs_mem050_i_mem_prty", + "prs_mem051_i_mem_prty", + "prs_mem052_i_mem_prty", + "prs_mem053_i_mem_prty", + "prs_mem062_i_mem_prty", + "prs_mem045_i_mem_prty", + "prs_mem002_i_mem_prty", + "prs_mem001_i_mem_prty", +}; +#else +#define prs_prty_attn_desc OSAL_NULL +#endif + +static const u16 prs_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prs_prty0_bb_a0 = { + 0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_bb_a0_attn_idx[31] = { + 13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, +}; + +static struct attn_hw_reg prs_prty1_bb_a0 = { + 1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_bb_a0_attn_idx[5] = { + 73, 74, 20, 17, 19, +}; + +static struct attn_hw_reg prs_prty2_bb_a0 = { + 2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = { + &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0, +}; + +static const u16 prs_prty0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_prty0_bb_b0 = { + 0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_bb_b0_attn_idx[31] = { + 13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, +}; + +static struct attn_hw_reg prs_prty1_bb_b0 = { + 1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_bb_b0_attn_idx[5] = { + 73, 74, 20, 17, 55, +}; + +static struct attn_hw_reg prs_prty2_bb_b0 = { + 2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { + &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0, +}; + +static const u16 prs_prty0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_prty0_k2 = { + 0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_k2_attn_idx[31] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, +}; + +static struct attn_hw_reg prs_prty1_k2 = { + 1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_k2_attn_idx[31] = { + 56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44, + 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, +}; + +static struct attn_hw_reg prs_prty2_k2 = { + 2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_k2_regs[3] = { + &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *tsdm_int_attn_desc[28] = { + "tsdm_address_error", + "tsdm_inp_queue_error", + "tsdm_delay_fifo_error", + "tsdm_async_host_error", + "tsdm_prm_fifo_error", + "tsdm_ccfc_load_pend_error", + "tsdm_tcfc_load_pend_error", + "tsdm_dst_int_ram_wait_error", + "tsdm_dst_pas_buf_wait_error", + "tsdm_dst_pxp_immed_error", + "tsdm_dst_pxp_dst_pend_error", + "tsdm_dst_brb_src_pend_error", + "tsdm_dst_brb_src_addr_error", + "tsdm_rsp_brb_pend_error", + "tsdm_rsp_int_ram_pend_error", + "tsdm_rsp_brb_rd_data_error", + "tsdm_rsp_int_ram_rd_data_error", + "tsdm_rsp_pxp_rd_data_error", + "tsdm_cm_delay_error", + "tsdm_sh_delay_error", + "tsdm_cmpl_pend_error", + "tsdm_cprm_pend_error", + "tsdm_timer_addr_error", + "tsdm_timer_pend_error", + "tsdm_dorq_dpm_error", + "tsdm_dst_pxp_done_error", + "tsdm_xcm_rmt_buffer_error", + "tsdm_ycm_rmt_buffer_error", +}; +#else +#define tsdm_int_attn_desc OSAL_NULL +#endif + +static const u16 tsdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg tsdm_int0_bb_a0 = { + 0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = { + &tsdm_int0_bb_a0, +}; + +static const u16 tsdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg tsdm_int0_bb_b0 = { + 0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { + &tsdm_int0_bb_b0, +}; + +static const u16 tsdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg tsdm_int0_k2 = { + 0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_k2_regs[1] = { + &tsdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tsdm_prty_attn_desc[10] = { + "tsdm_mem009_i_mem_prty", + "tsdm_mem008_i_mem_prty", + "tsdm_mem007_i_mem_prty", + "tsdm_mem006_i_mem_prty", + "tsdm_mem005_i_mem_prty", + "tsdm_mem002_i_mem_prty", + "tsdm_mem010_i_mem_prty", + "tsdm_mem001_i_mem_prty", + "tsdm_mem003_i_mem_prty", + "tsdm_mem004_i_mem_prty", +}; +#else +#define tsdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tsdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_bb_a0 = { + 0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, + 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = { + &tsdm_prty1_bb_a0, +}; + +static const u16 tsdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_bb_b0 = { + 0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, + 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { + &tsdm_prty1_bb_b0, +}; + +static const u16 tsdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_k2 = { + 0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_k2_regs[1] = { + &tsdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *msdm_int_attn_desc[28] = { + "msdm_address_error", + "msdm_inp_queue_error", + "msdm_delay_fifo_error", + "msdm_async_host_error", + "msdm_prm_fifo_error", + "msdm_ccfc_load_pend_error", + "msdm_tcfc_load_pend_error", + "msdm_dst_int_ram_wait_error", + "msdm_dst_pas_buf_wait_error", + "msdm_dst_pxp_immed_error", + "msdm_dst_pxp_dst_pend_error", + "msdm_dst_brb_src_pend_error", + "msdm_dst_brb_src_addr_error", + "msdm_rsp_brb_pend_error", + "msdm_rsp_int_ram_pend_error", + "msdm_rsp_brb_rd_data_error", + "msdm_rsp_int_ram_rd_data_error", + "msdm_rsp_pxp_rd_data_error", + "msdm_cm_delay_error", + "msdm_sh_delay_error", + "msdm_cmpl_pend_error", + "msdm_cprm_pend_error", + "msdm_timer_addr_error", + "msdm_timer_pend_error", + "msdm_dorq_dpm_error", + "msdm_dst_pxp_done_error", + "msdm_xcm_rmt_buffer_error", + "msdm_ycm_rmt_buffer_error", +}; +#else +#define msdm_int_attn_desc OSAL_NULL +#endif + +static const u16 msdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg msdm_int0_bb_a0 = { + 0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = { + &msdm_int0_bb_a0, +}; + +static const u16 msdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg msdm_int0_bb_b0 = { + 0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { + &msdm_int0_bb_b0, +}; + +static const u16 msdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg msdm_int0_k2 = { + 0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_k2_regs[1] = { + &msdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *msdm_prty_attn_desc[11] = { + "msdm_mem009_i_mem_prty", + "msdm_mem008_i_mem_prty", + "msdm_mem007_i_mem_prty", + "msdm_mem006_i_mem_prty", + "msdm_mem005_i_mem_prty", + "msdm_mem002_i_mem_prty", + "msdm_mem011_i_mem_prty", + "msdm_mem001_i_mem_prty", + "msdm_mem003_i_mem_prty", + "msdm_mem004_i_mem_prty", + "msdm_mem010_i_mem_prty", +}; +#else +#define msdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 msdm_prty1_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_bb_a0 = { + 0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, + 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = { + &msdm_prty1_bb_a0, +}; + +static const u16 msdm_prty1_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_bb_b0 = { + 0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, + 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { + &msdm_prty1_bb_b0, +}; + +static const u16 msdm_prty1_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_k2 = { + 0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_k2_regs[1] = { + &msdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *usdm_int_attn_desc[28] = { + "usdm_address_error", + "usdm_inp_queue_error", + "usdm_delay_fifo_error", + "usdm_async_host_error", + "usdm_prm_fifo_error", + "usdm_ccfc_load_pend_error", + "usdm_tcfc_load_pend_error", + "usdm_dst_int_ram_wait_error", + "usdm_dst_pas_buf_wait_error", + "usdm_dst_pxp_immed_error", + "usdm_dst_pxp_dst_pend_error", + "usdm_dst_brb_src_pend_error", + "usdm_dst_brb_src_addr_error", + "usdm_rsp_brb_pend_error", + "usdm_rsp_int_ram_pend_error", + "usdm_rsp_brb_rd_data_error", + "usdm_rsp_int_ram_rd_data_error", + "usdm_rsp_pxp_rd_data_error", + "usdm_cm_delay_error", + "usdm_sh_delay_error", + "usdm_cmpl_pend_error", + "usdm_cprm_pend_error", + "usdm_timer_addr_error", + "usdm_timer_pend_error", + "usdm_dorq_dpm_error", + "usdm_dst_pxp_done_error", + "usdm_xcm_rmt_buffer_error", + "usdm_ycm_rmt_buffer_error", +}; +#else +#define usdm_int_attn_desc OSAL_NULL +#endif + +static const u16 usdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg usdm_int0_bb_a0 = { + 0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = { + &usdm_int0_bb_a0, +}; + +static const u16 usdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg usdm_int0_bb_b0 = { + 0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { + &usdm_int0_bb_b0, +}; + +static const u16 usdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg usdm_int0_k2 = { + 0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_k2_regs[1] = { + &usdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *usdm_prty_attn_desc[10] = { + "usdm_mem008_i_mem_prty", + "usdm_mem007_i_mem_prty", + "usdm_mem006_i_mem_prty", + "usdm_mem005_i_mem_prty", + "usdm_mem002_i_mem_prty", + "usdm_mem010_i_mem_prty", + "usdm_mem001_i_mem_prty", + "usdm_mem003_i_mem_prty", + "usdm_mem004_i_mem_prty", + "usdm_mem009_i_mem_prty", +}; +#else +#define usdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 usdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_bb_a0 = { + 0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, + 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = { + &usdm_prty1_bb_a0, +}; + +static const u16 usdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_bb_b0 = { + 0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, + 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { + &usdm_prty1_bb_b0, +}; + +static const u16 usdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_k2 = { + 0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_k2_regs[1] = { + &usdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xsdm_int_attn_desc[28] = { + "xsdm_address_error", + "xsdm_inp_queue_error", + "xsdm_delay_fifo_error", + "xsdm_async_host_error", + "xsdm_prm_fifo_error", + "xsdm_ccfc_load_pend_error", + "xsdm_tcfc_load_pend_error", + "xsdm_dst_int_ram_wait_error", + "xsdm_dst_pas_buf_wait_error", + "xsdm_dst_pxp_immed_error", + "xsdm_dst_pxp_dst_pend_error", + "xsdm_dst_brb_src_pend_error", + "xsdm_dst_brb_src_addr_error", + "xsdm_rsp_brb_pend_error", + "xsdm_rsp_int_ram_pend_error", + "xsdm_rsp_brb_rd_data_error", + "xsdm_rsp_int_ram_rd_data_error", + "xsdm_rsp_pxp_rd_data_error", + "xsdm_cm_delay_error", + "xsdm_sh_delay_error", + "xsdm_cmpl_pend_error", + "xsdm_cprm_pend_error", + "xsdm_timer_addr_error", + "xsdm_timer_pend_error", + "xsdm_dorq_dpm_error", + "xsdm_dst_pxp_done_error", + "xsdm_xcm_rmt_buffer_error", + "xsdm_ycm_rmt_buffer_error", +}; +#else +#define xsdm_int_attn_desc OSAL_NULL +#endif + +static const u16 xsdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg xsdm_int0_bb_a0 = { + 0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = { + &xsdm_int0_bb_a0, +}; + +static const u16 xsdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg xsdm_int0_bb_b0 = { + 0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { + &xsdm_int0_bb_b0, +}; + +static const u16 xsdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg xsdm_int0_k2 = { + 0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_k2_regs[1] = { + &xsdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xsdm_prty_attn_desc[10] = { + "xsdm_mem009_i_mem_prty", + "xsdm_mem008_i_mem_prty", + "xsdm_mem007_i_mem_prty", + "xsdm_mem006_i_mem_prty", + "xsdm_mem003_i_mem_prty", + "xsdm_mem010_i_mem_prty", + "xsdm_mem002_i_mem_prty", + "xsdm_mem004_i_mem_prty", + "xsdm_mem005_i_mem_prty", + "xsdm_mem001_i_mem_prty", +}; +#else +#define xsdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 xsdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_bb_a0 = { + 0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208, + 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = { + &xsdm_prty1_bb_a0, +}; + +static const u16 xsdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_bb_b0 = { + 0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208, + 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { + &xsdm_prty1_bb_b0, +}; + +static const u16 xsdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_k2 = { + 0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_k2_regs[1] = { + &xsdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ysdm_int_attn_desc[28] = { + "ysdm_address_error", + "ysdm_inp_queue_error", + "ysdm_delay_fifo_error", + "ysdm_async_host_error", + "ysdm_prm_fifo_error", + "ysdm_ccfc_load_pend_error", + "ysdm_tcfc_load_pend_error", + "ysdm_dst_int_ram_wait_error", + "ysdm_dst_pas_buf_wait_error", + "ysdm_dst_pxp_immed_error", + "ysdm_dst_pxp_dst_pend_error", + "ysdm_dst_brb_src_pend_error", + "ysdm_dst_brb_src_addr_error", + "ysdm_rsp_brb_pend_error", + "ysdm_rsp_int_ram_pend_error", + "ysdm_rsp_brb_rd_data_error", + "ysdm_rsp_int_ram_rd_data_error", + "ysdm_rsp_pxp_rd_data_error", + "ysdm_cm_delay_error", + "ysdm_sh_delay_error", + "ysdm_cmpl_pend_error", + "ysdm_cprm_pend_error", + "ysdm_timer_addr_error", + "ysdm_timer_pend_error", + "ysdm_dorq_dpm_error", + "ysdm_dst_pxp_done_error", + "ysdm_xcm_rmt_buffer_error", + "ysdm_ycm_rmt_buffer_error", +}; +#else +#define ysdm_int_attn_desc OSAL_NULL +#endif + +static const u16 ysdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg ysdm_int0_bb_a0 = { + 0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = { + &ysdm_int0_bb_a0, +}; + +static const u16 ysdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg ysdm_int0_bb_b0 = { + 0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { + &ysdm_int0_bb_b0, +}; + +static const u16 ysdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg ysdm_int0_k2 = { + 0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_k2_regs[1] = { + &ysdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ysdm_prty_attn_desc[9] = { + "ysdm_mem008_i_mem_prty", + "ysdm_mem007_i_mem_prty", + "ysdm_mem006_i_mem_prty", + "ysdm_mem005_i_mem_prty", + "ysdm_mem002_i_mem_prty", + "ysdm_mem009_i_mem_prty", + "ysdm_mem001_i_mem_prty", + "ysdm_mem003_i_mem_prty", + "ysdm_mem004_i_mem_prty", +}; +#else +#define ysdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ysdm_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_bb_a0 = { + 0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = { + &ysdm_prty1_bb_a0, +}; + +static const u16 ysdm_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_bb_b0 = { + 0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { + &ysdm_prty1_bb_b0, +}; + +static const u16 ysdm_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_k2 = { + 0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_k2_regs[1] = { + &ysdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *psdm_int_attn_desc[28] = { + "psdm_address_error", + "psdm_inp_queue_error", + "psdm_delay_fifo_error", + "psdm_async_host_error", + "psdm_prm_fifo_error", + "psdm_ccfc_load_pend_error", + "psdm_tcfc_load_pend_error", + "psdm_dst_int_ram_wait_error", + "psdm_dst_pas_buf_wait_error", + "psdm_dst_pxp_immed_error", + "psdm_dst_pxp_dst_pend_error", + "psdm_dst_brb_src_pend_error", + "psdm_dst_brb_src_addr_error", + "psdm_rsp_brb_pend_error", + "psdm_rsp_int_ram_pend_error", + "psdm_rsp_brb_rd_data_error", + "psdm_rsp_int_ram_rd_data_error", + "psdm_rsp_pxp_rd_data_error", + "psdm_cm_delay_error", + "psdm_sh_delay_error", + "psdm_cmpl_pend_error", + "psdm_cprm_pend_error", + "psdm_timer_addr_error", + "psdm_timer_pend_error", + "psdm_dorq_dpm_error", + "psdm_dst_pxp_done_error", + "psdm_xcm_rmt_buffer_error", + "psdm_ycm_rmt_buffer_error", +}; +#else +#define psdm_int_attn_desc OSAL_NULL +#endif + +static const u16 psdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg psdm_int0_bb_a0 = { + 0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = { + &psdm_int0_bb_a0, +}; + +static const u16 psdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg psdm_int0_bb_b0 = { + 0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { + &psdm_int0_bb_b0, +}; + +static const u16 psdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg psdm_int0_k2 = { + 0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_k2_regs[1] = { + &psdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *psdm_prty_attn_desc[9] = { + "psdm_mem008_i_mem_prty", + "psdm_mem007_i_mem_prty", + "psdm_mem006_i_mem_prty", + "psdm_mem005_i_mem_prty", + "psdm_mem002_i_mem_prty", + "psdm_mem009_i_mem_prty", + "psdm_mem001_i_mem_prty", + "psdm_mem003_i_mem_prty", + "psdm_mem004_i_mem_prty", +}; +#else +#define psdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 psdm_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_bb_a0 = { + 0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = { + &psdm_prty1_bb_a0, +}; + +static const u16 psdm_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_bb_b0 = { + 0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { + &psdm_prty1_bb_b0, +}; + +static const u16 psdm_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_k2 = { + 0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_k2_regs[1] = { + &psdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tsem_int_attn_desc[46] = { + "tsem_address_error", + "tsem_fic_last_error", + "tsem_fic_length_error", + "tsem_fic_fifo_error", + "tsem_pas_buf_fifo_error", + "tsem_sync_fin_pop_error", + "tsem_sync_dra_wr_push_error", + "tsem_sync_dra_wr_pop_error", + "tsem_sync_dra_rd_push_error", + "tsem_sync_dra_rd_pop_error", + "tsem_sync_fin_push_error", + "tsem_sem_fast_address_error", + "tsem_cam_lsb_inp_fifo", + "tsem_cam_msb_inp_fifo", + "tsem_cam_out_fifo", + "tsem_fin_fifo", + "tsem_thread_fifo_error", + "tsem_thread_overrun", + "tsem_sync_ext_store_push_error", + "tsem_sync_ext_store_pop_error", + "tsem_sync_ext_load_push_error", + "tsem_sync_ext_load_pop_error", + "tsem_sync_ram_rd_push_error", + "tsem_sync_ram_rd_pop_error", + "tsem_sync_ram_wr_pop_error", + "tsem_sync_ram_wr_push_error", + "tsem_sync_dbg_push_error", + "tsem_sync_dbg_pop_error", + "tsem_dbg_fifo_error", + "tsem_cam_msb2_inp_fifo", + "tsem_vfc_interrupt", + "tsem_vfc_out_fifo_error", + "tsem_storm_stack_uf_attn", + "tsem_storm_stack_of_attn", + "tsem_storm_runtime_error", + "tsem_ext_load_pend_wr_error", + "tsem_thread_rls_orun_error", + "tsem_thread_rls_aloc_error", + "tsem_thread_rls_vld_error", + "tsem_ext_thread_oor_error", + "tsem_ord_id_fifo_error", + "tsem_invld_foc_error", + "tsem_ext_ld_len_error", + "tsem_thrd_ord_fifo_error", + "tsem_invld_thrd_ord_error", + "tsem_fast_memory_address_error", +}; +#else +#define tsem_int_attn_desc OSAL_NULL +#endif + +static const u16 tsem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_bb_a0 = { + 0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_bb_a0 = { + 1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = { + 2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c, + 0x1740048, 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = { + &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0, +}; + +static const u16 tsem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_bb_b0 = { + 0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_bb_b0 = { + 1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { + 2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c, + 0x1740048, 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { + &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0, +}; + +static const u16 tsem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_k2 = { + 0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_k2 = { + 1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_k2 = { + 2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c, + 0x1740048, + 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_k2_regs[3] = { + &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tsem_prty_attn_desc[23] = { + "tsem_vfc_rbc_parity_error", + "tsem_storm_rf_parity_error", + "tsem_reg_gen_parity_error", + "tsem_mem005_i_ecc_0_rf_int", + "tsem_mem005_i_ecc_1_rf_int", + "tsem_mem004_i_mem_prty", + "tsem_mem002_i_mem_prty", + "tsem_mem003_i_mem_prty", + "tsem_mem001_i_mem_prty", + "tsem_fast_memory_mem024_i_mem_prty", + "tsem_fast_memory_mem023_i_mem_prty", + "tsem_fast_memory_mem022_i_mem_prty", + "tsem_fast_memory_mem021_i_mem_prty", + "tsem_fast_memory_mem020_i_mem_prty", + "tsem_fast_memory_mem019_i_mem_prty", + "tsem_fast_memory_mem018_i_mem_prty", + "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "tsem_fast_memory_vfc_config_mem006_i_mem_prty", + "tsem_fast_memory_vfc_config_mem001_i_mem_prty", + "tsem_fast_memory_vfc_config_mem004_i_mem_prty", + "tsem_fast_memory_vfc_config_mem003_i_mem_prty", + "tsem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define tsem_prty_attn_desc OSAL_NULL +#endif + +static const u16 tsem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_bb_a0 = { + 0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_bb_a0 = { + 1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = { + 2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200, + 0x174a20c, 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = { + &tsem_prty0_bb_a0, &tsem_prty1_bb_a0, + &tsem_fast_memory_vfc_config_prty1_bb_a0, +}; + +static const u16 tsem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_bb_b0 = { + 0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_bb_b0 = { + 1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200, + 0x174a20c, 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { + &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, + &tsem_fast_memory_vfc_config_prty1_bb_b0, +}; + +static const u16 tsem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_k2 = { + 0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_k2 = { + 1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg tsem_fast_memory_prty1_k2 = { + 2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c, + 0x1740208, + 0x1740204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = { + 16, 17, 18, 19, 20, 21, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = { + 3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200, + 0x174a20c, + 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_k2_regs[4] = { + &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2, + &tsem_fast_memory_vfc_config_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *msem_int_attn_desc[46] = { + "msem_address_error", + "msem_fic_last_error", + "msem_fic_length_error", + "msem_fic_fifo_error", + "msem_pas_buf_fifo_error", + "msem_sync_fin_pop_error", + "msem_sync_dra_wr_push_error", + "msem_sync_dra_wr_pop_error", + "msem_sync_dra_rd_push_error", + "msem_sync_dra_rd_pop_error", + "msem_sync_fin_push_error", + "msem_sem_fast_address_error", + "msem_cam_lsb_inp_fifo", + "msem_cam_msb_inp_fifo", + "msem_cam_out_fifo", + "msem_fin_fifo", + "msem_thread_fifo_error", + "msem_thread_overrun", + "msem_sync_ext_store_push_error", + "msem_sync_ext_store_pop_error", + "msem_sync_ext_load_push_error", + "msem_sync_ext_load_pop_error", + "msem_sync_ram_rd_push_error", + "msem_sync_ram_rd_pop_error", + "msem_sync_ram_wr_pop_error", + "msem_sync_ram_wr_push_error", + "msem_sync_dbg_push_error", + "msem_sync_dbg_pop_error", + "msem_dbg_fifo_error", + "msem_cam_msb2_inp_fifo", + "msem_vfc_interrupt", + "msem_vfc_out_fifo_error", + "msem_storm_stack_uf_attn", + "msem_storm_stack_of_attn", + "msem_storm_runtime_error", + "msem_ext_load_pend_wr_error", + "msem_thread_rls_orun_error", + "msem_thread_rls_aloc_error", + "msem_thread_rls_vld_error", + "msem_ext_thread_oor_error", + "msem_ord_id_fifo_error", + "msem_invld_foc_error", + "msem_ext_ld_len_error", + "msem_thrd_ord_fifo_error", + "msem_invld_thrd_ord_error", + "msem_fast_memory_address_error", +}; +#else +#define msem_int_attn_desc OSAL_NULL +#endif + +static const u16 msem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_bb_a0 = { + 0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_bb_a0 = { + 1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = { + 2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c, + 0x1840048, 0x1840044 +}; + +static struct attn_hw_reg *msem_int_bb_a0_regs[3] = { + &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0, +}; + +static const u16 msem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_bb_b0 = { + 0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_bb_b0 = { + 1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { + 2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c, + 0x1840048, 0x1840044 +}; + +static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { + &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0, +}; + +static const u16 msem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_k2 = { + 0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_k2 = { + 1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_k2 = { + 2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c, + 0x1840048, + 0x1840044 +}; + +static struct attn_hw_reg *msem_int_k2_regs[3] = { + &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *msem_prty_attn_desc[23] = { + "msem_vfc_rbc_parity_error", + "msem_storm_rf_parity_error", + "msem_reg_gen_parity_error", + "msem_mem005_i_ecc_0_rf_int", + "msem_mem005_i_ecc_1_rf_int", + "msem_mem004_i_mem_prty", + "msem_mem002_i_mem_prty", + "msem_mem003_i_mem_prty", + "msem_mem001_i_mem_prty", + "msem_fast_memory_mem024_i_mem_prty", + "msem_fast_memory_mem023_i_mem_prty", + "msem_fast_memory_mem022_i_mem_prty", + "msem_fast_memory_mem021_i_mem_prty", + "msem_fast_memory_mem020_i_mem_prty", + "msem_fast_memory_mem019_i_mem_prty", + "msem_fast_memory_mem018_i_mem_prty", + "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "msem_fast_memory_vfc_config_mem006_i_mem_prty", + "msem_fast_memory_vfc_config_mem001_i_mem_prty", + "msem_fast_memory_vfc_config_mem004_i_mem_prty", + "msem_fast_memory_vfc_config_mem003_i_mem_prty", + "msem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define msem_prty_attn_desc OSAL_NULL +#endif + +static const u16 msem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_bb_a0 = { + 0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_bb_a0 = { + 1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = { + &msem_prty0_bb_a0, &msem_prty1_bb_a0, +}; + +static const u16 msem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_bb_b0 = { + 0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_bb_b0 = { + 1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { + &msem_prty0_bb_b0, &msem_prty1_bb_b0, +}; + +static const u16 msem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_k2 = { + 0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_k2 = { + 1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg msem_fast_memory_prty1_k2 = { + 2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c, + 0x1840208, + 0x1840204 +}; + +static struct attn_hw_reg *msem_prty_k2_regs[3] = { + &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *usem_int_attn_desc[46] = { + "usem_address_error", + "usem_fic_last_error", + "usem_fic_length_error", + "usem_fic_fifo_error", + "usem_pas_buf_fifo_error", + "usem_sync_fin_pop_error", + "usem_sync_dra_wr_push_error", + "usem_sync_dra_wr_pop_error", + "usem_sync_dra_rd_push_error", + "usem_sync_dra_rd_pop_error", + "usem_sync_fin_push_error", + "usem_sem_fast_address_error", + "usem_cam_lsb_inp_fifo", + "usem_cam_msb_inp_fifo", + "usem_cam_out_fifo", + "usem_fin_fifo", + "usem_thread_fifo_error", + "usem_thread_overrun", + "usem_sync_ext_store_push_error", + "usem_sync_ext_store_pop_error", + "usem_sync_ext_load_push_error", + "usem_sync_ext_load_pop_error", + "usem_sync_ram_rd_push_error", + "usem_sync_ram_rd_pop_error", + "usem_sync_ram_wr_pop_error", + "usem_sync_ram_wr_push_error", + "usem_sync_dbg_push_error", + "usem_sync_dbg_pop_error", + "usem_dbg_fifo_error", + "usem_cam_msb2_inp_fifo", + "usem_vfc_interrupt", + "usem_vfc_out_fifo_error", + "usem_storm_stack_uf_attn", + "usem_storm_stack_of_attn", + "usem_storm_runtime_error", + "usem_ext_load_pend_wr_error", + "usem_thread_rls_orun_error", + "usem_thread_rls_aloc_error", + "usem_thread_rls_vld_error", + "usem_ext_thread_oor_error", + "usem_ord_id_fifo_error", + "usem_invld_foc_error", + "usem_ext_ld_len_error", + "usem_thrd_ord_fifo_error", + "usem_invld_thrd_ord_error", + "usem_fast_memory_address_error", +}; +#else +#define usem_int_attn_desc OSAL_NULL +#endif + +static const u16 usem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_bb_a0 = { + 0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_bb_a0 = { + 1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = { + 2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c, + 0x1940048, 0x1940044 +}; + +static struct attn_hw_reg *usem_int_bb_a0_regs[3] = { + &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0, +}; + +static const u16 usem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_bb_b0 = { + 0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_bb_b0 = { + 1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { + 2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c, + 0x1940048, 0x1940044 +}; + +static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { + &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0, +}; + +static const u16 usem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_k2 = { + 0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_k2 = { + 1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_k2 = { + 2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c, + 0x1940048, + 0x1940044 +}; + +static struct attn_hw_reg *usem_int_k2_regs[3] = { + &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *usem_prty_attn_desc[23] = { + "usem_vfc_rbc_parity_error", + "usem_storm_rf_parity_error", + "usem_reg_gen_parity_error", + "usem_mem005_i_ecc_0_rf_int", + "usem_mem005_i_ecc_1_rf_int", + "usem_mem004_i_mem_prty", + "usem_mem002_i_mem_prty", + "usem_mem003_i_mem_prty", + "usem_mem001_i_mem_prty", + "usem_fast_memory_mem024_i_mem_prty", + "usem_fast_memory_mem023_i_mem_prty", + "usem_fast_memory_mem022_i_mem_prty", + "usem_fast_memory_mem021_i_mem_prty", + "usem_fast_memory_mem020_i_mem_prty", + "usem_fast_memory_mem019_i_mem_prty", + "usem_fast_memory_mem018_i_mem_prty", + "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "usem_fast_memory_vfc_config_mem006_i_mem_prty", + "usem_fast_memory_vfc_config_mem001_i_mem_prty", + "usem_fast_memory_vfc_config_mem004_i_mem_prty", + "usem_fast_memory_vfc_config_mem003_i_mem_prty", + "usem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define usem_prty_attn_desc OSAL_NULL +#endif + +static const u16 usem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_bb_a0 = { + 0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_bb_a0 = { + 1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = { + &usem_prty0_bb_a0, &usem_prty1_bb_a0, +}; + +static const u16 usem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_bb_b0 = { + 0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_bb_b0 = { + 1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { + &usem_prty0_bb_b0, &usem_prty1_bb_b0, +}; + +static const u16 usem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_k2 = { + 0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_k2 = { + 1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg usem_fast_memory_prty1_k2 = { + 2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c, + 0x1940208, + 0x1940204 +}; + +static struct attn_hw_reg *usem_prty_k2_regs[3] = { + &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xsem_int_attn_desc[46] = { + "xsem_address_error", + "xsem_fic_last_error", + "xsem_fic_length_error", + "xsem_fic_fifo_error", + "xsem_pas_buf_fifo_error", + "xsem_sync_fin_pop_error", + "xsem_sync_dra_wr_push_error", + "xsem_sync_dra_wr_pop_error", + "xsem_sync_dra_rd_push_error", + "xsem_sync_dra_rd_pop_error", + "xsem_sync_fin_push_error", + "xsem_sem_fast_address_error", + "xsem_cam_lsb_inp_fifo", + "xsem_cam_msb_inp_fifo", + "xsem_cam_out_fifo", + "xsem_fin_fifo", + "xsem_thread_fifo_error", + "xsem_thread_overrun", + "xsem_sync_ext_store_push_error", + "xsem_sync_ext_store_pop_error", + "xsem_sync_ext_load_push_error", + "xsem_sync_ext_load_pop_error", + "xsem_sync_ram_rd_push_error", + "xsem_sync_ram_rd_pop_error", + "xsem_sync_ram_wr_pop_error", + "xsem_sync_ram_wr_push_error", + "xsem_sync_dbg_push_error", + "xsem_sync_dbg_pop_error", + "xsem_dbg_fifo_error", + "xsem_cam_msb2_inp_fifo", + "xsem_vfc_interrupt", + "xsem_vfc_out_fifo_error", + "xsem_storm_stack_uf_attn", + "xsem_storm_stack_of_attn", + "xsem_storm_runtime_error", + "xsem_ext_load_pend_wr_error", + "xsem_thread_rls_orun_error", + "xsem_thread_rls_aloc_error", + "xsem_thread_rls_vld_error", + "xsem_ext_thread_oor_error", + "xsem_ord_id_fifo_error", + "xsem_invld_foc_error", + "xsem_ext_ld_len_error", + "xsem_thrd_ord_fifo_error", + "xsem_invld_thrd_ord_error", + "xsem_fast_memory_address_error", +}; +#else +#define xsem_int_attn_desc OSAL_NULL +#endif + +static const u16 xsem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_bb_a0 = { + 0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_bb_a0 = { + 1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = { + 2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c, + 0x1440048, 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = { + &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0, +}; + +static const u16 xsem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_bb_b0 = { + 0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_bb_b0 = { + 1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { + 2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c, + 0x1440048, 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { + &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0, +}; + +static const u16 xsem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_k2 = { + 0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_k2 = { + 1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_k2 = { + 2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c, + 0x1440048, + 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_k2_regs[3] = { + &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xsem_prty_attn_desc[24] = { + "xsem_vfc_rbc_parity_error", + "xsem_storm_rf_parity_error", + "xsem_reg_gen_parity_error", + "xsem_mem006_i_ecc_0_rf_int", + "xsem_mem006_i_ecc_1_rf_int", + "xsem_mem005_i_mem_prty", + "xsem_mem002_i_mem_prty", + "xsem_mem004_i_mem_prty", + "xsem_mem003_i_mem_prty", + "xsem_mem001_i_mem_prty", + "xsem_fast_memory_mem024_i_mem_prty", + "xsem_fast_memory_mem023_i_mem_prty", + "xsem_fast_memory_mem022_i_mem_prty", + "xsem_fast_memory_mem021_i_mem_prty", + "xsem_fast_memory_mem020_i_mem_prty", + "xsem_fast_memory_mem019_i_mem_prty", + "xsem_fast_memory_mem018_i_mem_prty", + "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "xsem_fast_memory_vfc_config_mem006_i_mem_prty", + "xsem_fast_memory_vfc_config_mem001_i_mem_prty", + "xsem_fast_memory_vfc_config_mem004_i_mem_prty", + "xsem_fast_memory_vfc_config_mem003_i_mem_prty", + "xsem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define xsem_prty_attn_desc OSAL_NULL +#endif + +static const u16 xsem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_bb_a0 = { + 0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_bb_a0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_bb_a0 = { + 1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = { + &xsem_prty0_bb_a0, &xsem_prty1_bb_a0, +}; + +static const u16 xsem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_bb_b0 = { + 0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_bb_b0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_bb_b0 = { + 1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { + &xsem_prty0_bb_b0, &xsem_prty1_bb_b0, +}; + +static const u16 xsem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_k2 = { + 0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_k2_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_k2 = { + 1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = { + 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg xsem_fast_memory_prty1_k2 = { + 2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c, + 0x1440208, + 0x1440204 +}; + +static struct attn_hw_reg *xsem_prty_k2_regs[3] = { + &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ysem_int_attn_desc[46] = { + "ysem_address_error", + "ysem_fic_last_error", + "ysem_fic_length_error", + "ysem_fic_fifo_error", + "ysem_pas_buf_fifo_error", + "ysem_sync_fin_pop_error", + "ysem_sync_dra_wr_push_error", + "ysem_sync_dra_wr_pop_error", + "ysem_sync_dra_rd_push_error", + "ysem_sync_dra_rd_pop_error", + "ysem_sync_fin_push_error", + "ysem_sem_fast_address_error", + "ysem_cam_lsb_inp_fifo", + "ysem_cam_msb_inp_fifo", + "ysem_cam_out_fifo", + "ysem_fin_fifo", + "ysem_thread_fifo_error", + "ysem_thread_overrun", + "ysem_sync_ext_store_push_error", + "ysem_sync_ext_store_pop_error", + "ysem_sync_ext_load_push_error", + "ysem_sync_ext_load_pop_error", + "ysem_sync_ram_rd_push_error", + "ysem_sync_ram_rd_pop_error", + "ysem_sync_ram_wr_pop_error", + "ysem_sync_ram_wr_push_error", + "ysem_sync_dbg_push_error", + "ysem_sync_dbg_pop_error", + "ysem_dbg_fifo_error", + "ysem_cam_msb2_inp_fifo", + "ysem_vfc_interrupt", + "ysem_vfc_out_fifo_error", + "ysem_storm_stack_uf_attn", + "ysem_storm_stack_of_attn", + "ysem_storm_runtime_error", + "ysem_ext_load_pend_wr_error", + "ysem_thread_rls_orun_error", + "ysem_thread_rls_aloc_error", + "ysem_thread_rls_vld_error", + "ysem_ext_thread_oor_error", + "ysem_ord_id_fifo_error", + "ysem_invld_foc_error", + "ysem_ext_ld_len_error", + "ysem_thrd_ord_fifo_error", + "ysem_invld_thrd_ord_error", + "ysem_fast_memory_address_error", +}; +#else +#define ysem_int_attn_desc OSAL_NULL +#endif + +static const u16 ysem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_bb_a0 = { + 0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_bb_a0 = { + 1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = { + 2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c, + 0x1540048, 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = { + &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0, +}; + +static const u16 ysem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_bb_b0 = { + 0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_bb_b0 = { + 1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { + 2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c, + 0x1540048, 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { + &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0, +}; + +static const u16 ysem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_k2 = { + 0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_k2 = { + 1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_k2 = { + 2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c, + 0x1540048, + 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_k2_regs[3] = { + &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ysem_prty_attn_desc[24] = { + "ysem_vfc_rbc_parity_error", + "ysem_storm_rf_parity_error", + "ysem_reg_gen_parity_error", + "ysem_mem006_i_ecc_0_rf_int", + "ysem_mem006_i_ecc_1_rf_int", + "ysem_mem005_i_mem_prty", + "ysem_mem002_i_mem_prty", + "ysem_mem004_i_mem_prty", + "ysem_mem003_i_mem_prty", + "ysem_mem001_i_mem_prty", + "ysem_fast_memory_mem024_i_mem_prty", + "ysem_fast_memory_mem023_i_mem_prty", + "ysem_fast_memory_mem022_i_mem_prty", + "ysem_fast_memory_mem021_i_mem_prty", + "ysem_fast_memory_mem020_i_mem_prty", + "ysem_fast_memory_mem019_i_mem_prty", + "ysem_fast_memory_mem018_i_mem_prty", + "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "ysem_fast_memory_vfc_config_mem006_i_mem_prty", + "ysem_fast_memory_vfc_config_mem001_i_mem_prty", + "ysem_fast_memory_vfc_config_mem004_i_mem_prty", + "ysem_fast_memory_vfc_config_mem003_i_mem_prty", + "ysem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define ysem_prty_attn_desc OSAL_NULL +#endif + +static const u16 ysem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_bb_a0 = { + 0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_bb_a0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_bb_a0 = { + 1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = { + &ysem_prty0_bb_a0, &ysem_prty1_bb_a0, +}; + +static const u16 ysem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_bb_b0 = { + 0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_bb_b0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_bb_b0 = { + 1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { + &ysem_prty0_bb_b0, &ysem_prty1_bb_b0, +}; + +static const u16 ysem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_k2 = { + 0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_k2_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_k2 = { + 1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = { + 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ysem_fast_memory_prty1_k2 = { + 2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c, + 0x1540208, + 0x1540204 +}; + +static struct attn_hw_reg *ysem_prty_k2_regs[3] = { + &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *psem_int_attn_desc[46] = { + "psem_address_error", + "psem_fic_last_error", + "psem_fic_length_error", + "psem_fic_fifo_error", + "psem_pas_buf_fifo_error", + "psem_sync_fin_pop_error", + "psem_sync_dra_wr_push_error", + "psem_sync_dra_wr_pop_error", + "psem_sync_dra_rd_push_error", + "psem_sync_dra_rd_pop_error", + "psem_sync_fin_push_error", + "psem_sem_fast_address_error", + "psem_cam_lsb_inp_fifo", + "psem_cam_msb_inp_fifo", + "psem_cam_out_fifo", + "psem_fin_fifo", + "psem_thread_fifo_error", + "psem_thread_overrun", + "psem_sync_ext_store_push_error", + "psem_sync_ext_store_pop_error", + "psem_sync_ext_load_push_error", + "psem_sync_ext_load_pop_error", + "psem_sync_ram_rd_push_error", + "psem_sync_ram_rd_pop_error", + "psem_sync_ram_wr_pop_error", + "psem_sync_ram_wr_push_error", + "psem_sync_dbg_push_error", + "psem_sync_dbg_pop_error", + "psem_dbg_fifo_error", + "psem_cam_msb2_inp_fifo", + "psem_vfc_interrupt", + "psem_vfc_out_fifo_error", + "psem_storm_stack_uf_attn", + "psem_storm_stack_of_attn", + "psem_storm_runtime_error", + "psem_ext_load_pend_wr_error", + "psem_thread_rls_orun_error", + "psem_thread_rls_aloc_error", + "psem_thread_rls_vld_error", + "psem_ext_thread_oor_error", + "psem_ord_id_fifo_error", + "psem_invld_foc_error", + "psem_ext_ld_len_error", + "psem_thrd_ord_fifo_error", + "psem_invld_thrd_ord_error", + "psem_fast_memory_address_error", +}; +#else +#define psem_int_attn_desc OSAL_NULL +#endif + +static const u16 psem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_bb_a0 = { + 0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_bb_a0 = { + 1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = { + 2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c, + 0x1640048, 0x1640044 +}; + +static struct attn_hw_reg *psem_int_bb_a0_regs[3] = { + &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0, +}; + +static const u16 psem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_bb_b0 = { + 0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_bb_b0 = { + 1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { + 2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c, + 0x1640048, 0x1640044 +}; + +static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { + &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0, +}; + +static const u16 psem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_k2 = { + 0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_k2 = { + 1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_k2 = { + 2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c, + 0x1640048, + 0x1640044 +}; + +static struct attn_hw_reg *psem_int_k2_regs[3] = { + &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *psem_prty_attn_desc[23] = { + "psem_vfc_rbc_parity_error", + "psem_storm_rf_parity_error", + "psem_reg_gen_parity_error", + "psem_mem005_i_ecc_0_rf_int", + "psem_mem005_i_ecc_1_rf_int", + "psem_mem004_i_mem_prty", + "psem_mem002_i_mem_prty", + "psem_mem003_i_mem_prty", + "psem_mem001_i_mem_prty", + "psem_fast_memory_mem024_i_mem_prty", + "psem_fast_memory_mem023_i_mem_prty", + "psem_fast_memory_mem022_i_mem_prty", + "psem_fast_memory_mem021_i_mem_prty", + "psem_fast_memory_mem020_i_mem_prty", + "psem_fast_memory_mem019_i_mem_prty", + "psem_fast_memory_mem018_i_mem_prty", + "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "psem_fast_memory_vfc_config_mem006_i_mem_prty", + "psem_fast_memory_vfc_config_mem001_i_mem_prty", + "psem_fast_memory_vfc_config_mem004_i_mem_prty", + "psem_fast_memory_vfc_config_mem003_i_mem_prty", + "psem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define psem_prty_attn_desc OSAL_NULL +#endif + +static const u16 psem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_bb_a0 = { + 0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_bb_a0 = { + 1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = { + 2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200, + 0x164a20c, 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = { + &psem_prty0_bb_a0, &psem_prty1_bb_a0, + &psem_fast_memory_vfc_config_prty1_bb_a0, +}; + +static const u16 psem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_bb_b0 = { + 0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_bb_b0 = { + 1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200, + 0x164a20c, 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { + &psem_prty0_bb_b0, &psem_prty1_bb_b0, + &psem_fast_memory_vfc_config_prty1_bb_b0, +}; + +static const u16 psem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_k2 = { + 0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_k2 = { + 1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg psem_fast_memory_prty1_k2 = { + 2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c, + 0x1640208, + 0x1640204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = { + 16, 17, 18, 19, 20, 21, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = { + 3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200, + 0x164a20c, + 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_k2_regs[4] = { + &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2, + &psem_fast_memory_vfc_config_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *rss_int_attn_desc[12] = { + "rss_address_error", + "rss_msg_inp_cnt_error", + "rss_msg_out_cnt_error", + "rss_inp_state_error", + "rss_out_state_error", + "rss_main_state_error", + "rss_calc_state_error", + "rss_inp_fifo_error", + "rss_cmd_fifo_error", + "rss_msg_fifo_error", + "rss_rsp_fifo_error", + "rss_hdr_fifo_error", +}; +#else +#define rss_int_attn_desc OSAL_NULL +#endif + +static const u16 rss_int0_bb_a0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_bb_a0 = { + 0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_bb_a0_regs[1] = { + &rss_int0_bb_a0, +}; + +static const u16 rss_int0_bb_b0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_bb_b0 = { + 0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { + &rss_int0_bb_b0, +}; + +static const u16 rss_int0_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_k2 = { + 0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_k2_regs[1] = { + &rss_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rss_prty_attn_desc[4] = { + "rss_mem002_i_ecc_rf_int", + "rss_mem001_i_ecc_rf_int", + "rss_mem003_i_mem_prty", + "rss_mem004_i_mem_prty", +}; +#else +#define rss_prty_attn_desc OSAL_NULL +#endif + +static const u16 rss_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_bb_a0 = { + 0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = { + &rss_prty1_bb_a0, +}; + +static const u16 rss_prty1_bb_b0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_bb_b0 = { + 0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { + &rss_prty1_bb_b0, +}; + +static const u16 rss_prty1_k2_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_k2 = { + 0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_k2_regs[1] = { + &rss_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tmld_int_attn_desc[6] = { + "tmld_address_error", + "tmld_ld_hdr_err", + "tmld_ld_seg_msg_err", + "tmld_ld_tid_mini_cache_err", + "tmld_ld_cid_mini_cache_err", + "tmld_ld_long_message", +}; +#else +#define tmld_int_attn_desc OSAL_NULL +#endif + +static const u16 tmld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_bb_a0 = { + 0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = { + &tmld_int0_bb_a0, +}; + +static const u16 tmld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_bb_b0 = { + 0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { + &tmld_int0_bb_b0, +}; + +static const u16 tmld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_k2 = { + 0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_k2_regs[1] = { + &tmld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tmld_prty_attn_desc[8] = { + "tmld_mem006_i_ecc_rf_int", + "tmld_mem002_i_ecc_rf_int", + "tmld_mem003_i_mem_prty", + "tmld_mem004_i_mem_prty", + "tmld_mem007_i_mem_prty", + "tmld_mem008_i_mem_prty", + "tmld_mem005_i_mem_prty", + "tmld_mem001_i_mem_prty", +}; +#else +#define tmld_prty_attn_desc OSAL_NULL +#endif + +static const u16 tmld_prty1_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_bb_a0 = { + 0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = { + &tmld_prty1_bb_a0, +}; + +static const u16 tmld_prty1_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_bb_b0 = { + 0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { + &tmld_prty1_bb_b0, +}; + +static const u16 tmld_prty1_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_k2 = { + 0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_k2_regs[1] = { + &tmld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *muld_int_attn_desc[6] = { + "muld_address_error", + "muld_ld_hdr_err", + "muld_ld_seg_msg_err", + "muld_ld_tid_mini_cache_err", + "muld_ld_cid_mini_cache_err", + "muld_ld_long_message", +}; +#else +#define muld_int_attn_desc OSAL_NULL +#endif + +static const u16 muld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_bb_a0 = { + 0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_bb_a0_regs[1] = { + &muld_int0_bb_a0, +}; + +static const u16 muld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_bb_b0 = { + 0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { + &muld_int0_bb_b0, +}; + +static const u16 muld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_k2 = { + 0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_k2_regs[1] = { + &muld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *muld_prty_attn_desc[10] = { + "muld_mem005_i_ecc_rf_int", + "muld_mem001_i_ecc_rf_int", + "muld_mem008_i_ecc_rf_int", + "muld_mem007_i_ecc_rf_int", + "muld_mem002_i_mem_prty", + "muld_mem003_i_mem_prty", + "muld_mem009_i_mem_prty", + "muld_mem010_i_mem_prty", + "muld_mem004_i_mem_prty", + "muld_mem006_i_mem_prty", +}; +#else +#define muld_prty_attn_desc OSAL_NULL +#endif + +static const u16 muld_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_bb_a0 = { + 0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, + 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = { + &muld_prty1_bb_a0, +}; + +static const u16 muld_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_bb_b0 = { + 0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, + 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { + &muld_prty1_bb_b0, +}; + +static const u16 muld_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_k2 = { + 0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_k2_regs[1] = { + &muld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *yuld_int_attn_desc[6] = { + "yuld_address_error", + "yuld_ld_hdr_err", + "yuld_ld_seg_msg_err", + "yuld_ld_tid_mini_cache_err", + "yuld_ld_cid_mini_cache_err", + "yuld_ld_long_message", +}; +#else +#define yuld_int_attn_desc OSAL_NULL +#endif + +static const u16 yuld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_bb_a0 = { + 0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = { + &yuld_int0_bb_a0, +}; + +static const u16 yuld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_bb_b0 = { + 0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { + &yuld_int0_bb_b0, +}; + +static const u16 yuld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_k2 = { + 0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_k2_regs[1] = { + &yuld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *yuld_prty_attn_desc[6] = { + "yuld_mem001_i_mem_prty", + "yuld_mem002_i_mem_prty", + "yuld_mem005_i_mem_prty", + "yuld_mem006_i_mem_prty", + "yuld_mem004_i_mem_prty", + "yuld_mem003_i_mem_prty", +}; +#else +#define yuld_prty_attn_desc OSAL_NULL +#endif + +static const u16 yuld_prty1_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_bb_a0 = { + 0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = { + &yuld_prty1_bb_a0, +}; + +static const u16 yuld_prty1_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_bb_b0 = { + 0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { + &yuld_prty1_bb_b0, +}; + +static const u16 yuld_prty1_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_k2 = { + 0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_k2_regs[1] = { + &yuld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xyld_int_attn_desc[6] = { + "xyld_address_error", + "xyld_ld_hdr_err", + "xyld_ld_seg_msg_err", + "xyld_ld_tid_mini_cache_err", + "xyld_ld_cid_mini_cache_err", + "xyld_ld_long_message", +}; +#else +#define xyld_int_attn_desc OSAL_NULL +#endif + +static const u16 xyld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_bb_a0 = { + 0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = { + &xyld_int0_bb_a0, +}; + +static const u16 xyld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_bb_b0 = { + 0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { + &xyld_int0_bb_b0, +}; + +static const u16 xyld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_k2 = { + 0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_k2_regs[1] = { + &xyld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xyld_prty_attn_desc[9] = { + "xyld_mem004_i_ecc_rf_int", + "xyld_mem006_i_ecc_rf_int", + "xyld_mem001_i_mem_prty", + "xyld_mem002_i_mem_prty", + "xyld_mem008_i_mem_prty", + "xyld_mem009_i_mem_prty", + "xyld_mem003_i_mem_prty", + "xyld_mem005_i_mem_prty", + "xyld_mem007_i_mem_prty", +}; +#else +#define xyld_prty_attn_desc OSAL_NULL +#endif + +static const u16 xyld_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_bb_a0 = { + 0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = { + &xyld_prty1_bb_a0, +}; + +static const u16 xyld_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_bb_b0 = { + 0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { + &xyld_prty1_bb_b0, +}; + +static const u16 xyld_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_k2 = { + 0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_k2_regs[1] = { + &xyld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *prm_int_attn_desc[11] = { + "prm_address_error", + "prm_ififo_error", + "prm_immed_fifo_error", + "prm_ofst_pend_error", + "prm_pad_pend_error", + "prm_pbinp_pend_error", + "prm_tag_pend_error", + "prm_mstorm_eop_err", + "prm_ustorm_eop_err", + "prm_mstorm_que_err", + "prm_ustorm_que_err", +}; +#else +#define prm_int_attn_desc OSAL_NULL +#endif + +static const u16 prm_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_bb_a0 = { + 0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_bb_a0_regs[1] = { + &prm_int0_bb_a0, +}; + +static const u16 prm_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_bb_b0 = { + 0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { + &prm_int0_bb_b0, +}; + +static const u16 prm_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_k2 = { + 0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_k2_regs[1] = { + &prm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prm_prty_attn_desc[30] = { + "prm_datapath_registers", + "prm_mem012_i_ecc_rf_int", + "prm_mem013_i_ecc_rf_int", + "prm_mem014_i_ecc_rf_int", + "prm_mem020_i_ecc_rf_int", + "prm_mem004_i_mem_prty", + "prm_mem024_i_mem_prty", + "prm_mem016_i_mem_prty", + "prm_mem017_i_mem_prty", + "prm_mem008_i_mem_prty", + "prm_mem009_i_mem_prty", + "prm_mem010_i_mem_prty", + "prm_mem015_i_mem_prty", + "prm_mem011_i_mem_prty", + "prm_mem003_i_mem_prty", + "prm_mem002_i_mem_prty", + "prm_mem005_i_mem_prty", + "prm_mem023_i_mem_prty", + "prm_mem006_i_mem_prty", + "prm_mem007_i_mem_prty", + "prm_mem001_i_mem_prty", + "prm_mem022_i_mem_prty", + "prm_mem021_i_mem_prty", + "prm_mem019_i_mem_prty", + "prm_mem015_i_ecc_rf_int", + "prm_mem021_i_ecc_rf_int", + "prm_mem025_i_mem_prty", + "prm_mem018_i_mem_prty", + "prm_mem012_i_mem_prty", + "prm_mem020_i_mem_prty", +}; +#else +#define prm_prty_attn_desc OSAL_NULL +#endif + +static const u16 prm_prty1_bb_a0_attn_idx[25] = { + 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, + 25, 26, 27, 28, 29, +}; + +static struct attn_hw_reg prm_prty1_bb_a0 = { + 0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = { + &prm_prty1_bb_a0, +}; + +static const u16 prm_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prm_prty0_bb_b0 = { + 0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054 +}; + +static const u16 prm_prty1_bb_b0_attn_idx[24] = { + 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25, + 26, 27, 28, 29, +}; + +static struct attn_hw_reg prm_prty1_bb_b0 = { + 1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { + &prm_prty0_bb_b0, &prm_prty1_bb_b0, +}; + +static const u16 prm_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prm_prty0_k2 = { + 0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054 +}; + +static const u16 prm_prty1_k2_attn_idx[23] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, +}; + +static struct attn_hw_reg prm_prty1_k2 = { + 1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_k2_regs[2] = { + &prm_prty0_k2, &prm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb1_int_attn_desc[9] = { + "pbf_pb1_address_error", + "pbf_pb1_eop_error", + "pbf_pb1_ififo_error", + "pbf_pb1_pfifo_error", + "pbf_pb1_db_buf_error", + "pbf_pb1_th_exec_error", + "pbf_pb1_tq_error_wr", + "pbf_pb1_tq_error_rd_th", + "pbf_pb1_tq_error_rd_ih", +}; +#else +#define pbf_pb1_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_bb_a0 = { + 0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048, + 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = { + &pbf_pb1_int0_bb_a0, +}; + +static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { + 0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048, + 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { + &pbf_pb1_int0_bb_b0, +}; + +static const u16 pbf_pb1_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_k2 = { + 0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = { + &pbf_pb1_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb1_prty_attn_desc[1] = { + "pbf_pb1_datapath_registers", +}; +#else +#define pbf_pb1_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { + 0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058, + 0xda0054 +}; + +static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { + &pbf_pb1_prty0_bb_b0, +}; + +static const u16 pbf_pb1_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb1_prty0_k2 = { + 0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054 +}; + +static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = { + &pbf_pb1_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb2_int_attn_desc[9] = { + "pbf_pb2_address_error", + "pbf_pb2_eop_error", + "pbf_pb2_ififo_error", + "pbf_pb2_pfifo_error", + "pbf_pb2_db_buf_error", + "pbf_pb2_th_exec_error", + "pbf_pb2_tq_error_wr", + "pbf_pb2_tq_error_rd_th", + "pbf_pb2_tq_error_rd_ih", +}; +#else +#define pbf_pb2_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_bb_a0 = { + 0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048, + 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = { + &pbf_pb2_int0_bb_a0, +}; + +static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { + 0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048, + 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { + &pbf_pb2_int0_bb_b0, +}; + +static const u16 pbf_pb2_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_k2 = { + 0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = { + &pbf_pb2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb2_prty_attn_desc[1] = { + "pbf_pb2_datapath_registers", +}; +#else +#define pbf_pb2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { + 0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058, + 0xda4054 +}; + +static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { + &pbf_pb2_prty0_bb_b0, +}; + +static const u16 pbf_pb2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb2_prty0_k2 = { + 0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054 +}; + +static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = { + &pbf_pb2_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *rpb_int_attn_desc[9] = { + "rpb_address_error", + "rpb_eop_error", + "rpb_ififo_error", + "rpb_pfifo_error", + "rpb_db_buf_error", + "rpb_th_exec_error", + "rpb_tq_error_wr", + "rpb_tq_error_rd_th", + "rpb_tq_error_rd_ih", +}; +#else +#define rpb_int_attn_desc OSAL_NULL +#endif + +static const u16 rpb_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_bb_a0 = { + 0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = { + &rpb_int0_bb_a0, +}; + +static const u16 rpb_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_bb_b0 = { + 0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { + &rpb_int0_bb_b0, +}; + +static const u16 rpb_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_k2 = { + 0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_k2_regs[1] = { + &rpb_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rpb_prty_attn_desc[1] = { + "rpb_datapath_registers", +}; +#else +#define rpb_prty_attn_desc OSAL_NULL +#endif + +static const u16 rpb_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg rpb_prty0_bb_b0 = { + 0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054 +}; + +static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { + &rpb_prty0_bb_b0, +}; + +static const u16 rpb_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg rpb_prty0_k2 = { + 0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054 +}; + +static struct attn_hw_reg *rpb_prty_k2_regs[1] = { + &rpb_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *btb_int_attn_desc[139] = { + "btb_address_error", + "btb_rc_pkt0_rls_error", + "btb_unused_0", + "btb_rc_pkt0_len_error", + "btb_unused_1", + "btb_rc_pkt0_protocol_error", + "btb_rc_pkt1_rls_error", + "btb_unused_2", + "btb_rc_pkt1_len_error", + "btb_unused_3", + "btb_rc_pkt1_protocol_error", + "btb_rc_pkt2_rls_error", + "btb_unused_4", + "btb_rc_pkt2_len_error", + "btb_unused_5", + "btb_rc_pkt2_protocol_error", + "btb_rc_pkt3_rls_error", + "btb_unused_6", + "btb_rc_pkt3_len_error", + "btb_unused_7", + "btb_rc_pkt3_protocol_error", + "btb_rc_sop_req_tc_port_error", + "btb_unused_8", + "btb_wc0_protocol_error", + "btb_unused_9", + "btb_ll_blk_error", + "btb_ll_arb_calc_error", + "btb_fc_alm_calc_error", + "btb_wc0_inp_fifo_error", + "btb_wc0_sop_fifo_error", + "btb_wc0_len_fifo_error", + "btb_wc0_eop_fifo_error", + "btb_wc0_queue_fifo_error", + "btb_wc0_free_point_fifo_error", + "btb_wc0_next_point_fifo_error", + "btb_wc0_strt_fifo_error", + "btb_wc0_second_dscr_fifo_error", + "btb_wc0_pkt_avail_fifo_error", + "btb_wc0_notify_fifo_error", + "btb_wc0_ll_req_fifo_error", + "btb_wc0_ll_pa_cnt_error", + "btb_wc0_bb_pa_cnt_error", + "btb_wc_dup_upd_data_fifo_error", + "btb_wc_dup_rsp_dscr_fifo_error", + "btb_wc_dup_upd_point_fifo_error", + "btb_wc_dup_pkt_avail_fifo_error", + "btb_wc_dup_pkt_avail_cnt_error", + "btb_rc_pkt0_side_fifo_error", + "btb_rc_pkt0_req_fifo_error", + "btb_rc_pkt0_blk_fifo_error", + "btb_rc_pkt0_rls_left_fifo_error", + "btb_rc_pkt0_strt_ptr_fifo_error", + "btb_rc_pkt0_second_ptr_fifo_error", + "btb_rc_pkt0_rsp_fifo_error", + "btb_rc_pkt0_dscr_fifo_error", + "btb_rc_pkt1_side_fifo_error", + "btb_rc_pkt1_req_fifo_error", + "btb_rc_pkt1_blk_fifo_error", + "btb_rc_pkt1_rls_left_fifo_error", + "btb_rc_pkt1_strt_ptr_fifo_error", + "btb_rc_pkt1_second_ptr_fifo_error", + "btb_rc_pkt1_rsp_fifo_error", + "btb_rc_pkt1_dscr_fifo_error", + "btb_rc_pkt2_side_fifo_error", + "btb_rc_pkt2_req_fifo_error", + "btb_rc_pkt2_blk_fifo_error", + "btb_rc_pkt2_rls_left_fifo_error", + "btb_rc_pkt2_strt_ptr_fifo_error", + "btb_rc_pkt2_second_ptr_fifo_error", + "btb_rc_pkt2_rsp_fifo_error", + "btb_rc_pkt2_dscr_fifo_error", + "btb_rc_pkt3_side_fifo_error", + "btb_rc_pkt3_req_fifo_error", + "btb_rc_pkt3_blk_fifo_error", + "btb_rc_pkt3_rls_left_fifo_error", + "btb_rc_pkt3_strt_ptr_fifo_error", + "btb_rc_pkt3_second_ptr_fifo_error", + "btb_rc_pkt3_rsp_fifo_error", + "btb_rc_pkt3_dscr_fifo_error", + "btb_rc_sop_queue_fifo_error", + "btb_ll_arb_rls_fifo_error", + "btb_ll_arb_prefetch_fifo_error", + "btb_rc_pkt0_rls_fifo_error", + "btb_rc_pkt1_rls_fifo_error", + "btb_rc_pkt2_rls_fifo_error", + "btb_rc_pkt3_rls_fifo_error", + "btb_rc_pkt4_rls_fifo_error", + "btb_rc_pkt5_rls_fifo_error", + "btb_rc_pkt6_rls_fifo_error", + "btb_rc_pkt7_rls_fifo_error", + "btb_rc_pkt4_rls_error", + "btb_rc_pkt4_len_error", + "btb_rc_pkt4_protocol_error", + "btb_rc_pkt4_side_fifo_error", + "btb_rc_pkt4_req_fifo_error", + "btb_rc_pkt4_blk_fifo_error", + "btb_rc_pkt4_rls_left_fifo_error", + "btb_rc_pkt4_strt_ptr_fifo_error", + "btb_rc_pkt4_second_ptr_fifo_error", + "btb_rc_pkt4_rsp_fifo_error", + "btb_rc_pkt4_dscr_fifo_error", + "btb_rc_pkt5_rls_error", + "btb_rc_pkt5_len_error", + "btb_rc_pkt5_protocol_error", + "btb_rc_pkt5_side_fifo_error", + "btb_rc_pkt5_req_fifo_error", + "btb_rc_pkt5_blk_fifo_error", + "btb_rc_pkt5_rls_left_fifo_error", + "btb_rc_pkt5_strt_ptr_fifo_error", + "btb_rc_pkt5_second_ptr_fifo_error", + "btb_rc_pkt5_rsp_fifo_error", + "btb_rc_pkt5_dscr_fifo_error", + "btb_rc_pkt6_rls_error", + "btb_rc_pkt6_len_error", + "btb_rc_pkt6_protocol_error", + "btb_rc_pkt6_side_fifo_error", + "btb_rc_pkt6_req_fifo_error", + "btb_rc_pkt6_blk_fifo_error", + "btb_rc_pkt6_rls_left_fifo_error", + "btb_rc_pkt6_strt_ptr_fifo_error", + "btb_rc_pkt6_second_ptr_fifo_error", + "btb_rc_pkt6_rsp_fifo_error", + "btb_rc_pkt6_dscr_fifo_error", + "btb_rc_pkt7_rls_error", + "btb_rc_pkt7_len_error", + "btb_rc_pkt7_protocol_error", + "btb_rc_pkt7_side_fifo_error", + "btb_rc_pkt7_req_fifo_error", + "btb_rc_pkt7_blk_fifo_error", + "btb_rc_pkt7_rls_left_fifo_error", + "btb_rc_pkt7_strt_ptr_fifo_error", + "btb_rc_pkt7_second_ptr_fifo_error", + "btb_rc_pkt7_rsp_fifo_error", + "btb_packet_available_sync_fifo_push_error", + "btb_wc6_notify_fifo_error", + "btb_wc9_queue_fifo_error", + "btb_wc0_sync_fifo_push_error", + "btb_rls_sync_fifo_push_error", + "btb_rc_pkt7_dscr_fifo_error", +}; +#else +#define btb_int_attn_desc OSAL_NULL +#endif + +static const u16 btb_int0_bb_a0_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_bb_a0 = { + 0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_bb_a0_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_bb_a0 = { + 1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_bb_a0_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_bb_a0 = { + 2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_bb_a0_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_bb_a0 = { + 3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_bb_a0_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_bb_a0 = { + 4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_bb_a0_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_bb_a0 = { + 5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_bb_a0_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_bb_a0 = { + 6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_bb_a0_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_bb_a0 = { + 7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_bb_a0_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_bb_a0 = { + 8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_bb_a0_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_bb_a0 = { + 9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_bb_a0_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_bb_a0 = { + 10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_bb_a0_regs[11] = { + &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0, + &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0, + &btb_int9_bb_a0, &btb_int10_bb_a0, + &btb_int11_bb_a0, +}; + +static const u16 btb_int0_bb_b0_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_bb_b0 = { + 0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_bb_b0_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_bb_b0 = { + 1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_bb_b0_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_bb_b0 = { + 2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_bb_b0_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_bb_b0 = { + 3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_bb_b0_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_bb_b0 = { + 4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_bb_b0_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_bb_b0 = { + 5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_bb_b0_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_bb_b0 = { + 6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_bb_b0_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_bb_b0 = { + 7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_bb_b0_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_bb_b0 = { + 8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_bb_b0_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_bb_b0 = { + 9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_bb_b0_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_bb_b0 = { + 10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { + &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, + &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, + &btb_int9_bb_b0, &btb_int10_bb_b0, + &btb_int11_bb_b0, +}; + +static const u16 btb_int0_k2_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_k2 = { + 0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_k2_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_k2 = { + 1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_k2_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_k2 = { + 2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_k2_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_k2 = { + 3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_k2_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_k2 = { + 4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_k2_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_k2 = { + 5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_k2_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_k2 = { + 6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_k2_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_k2 = { + 7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_k2_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_k2 = { + 8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_k2_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_k2 = { + 9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_k2_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_k2 = { + 10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_k2_regs[11] = { + &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2, + &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2, + &btb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *btb_prty_attn_desc[36] = { + "btb_ll_bank0_mem_prty", + "btb_ll_bank1_mem_prty", + "btb_ll_bank2_mem_prty", + "btb_ll_bank3_mem_prty", + "btb_datapath_registers", + "btb_mem001_i_ecc_rf_int", + "btb_mem008_i_ecc_rf_int", + "btb_mem009_i_ecc_rf_int", + "btb_mem010_i_ecc_rf_int", + "btb_mem011_i_ecc_rf_int", + "btb_mem012_i_ecc_rf_int", + "btb_mem013_i_ecc_rf_int", + "btb_mem014_i_ecc_rf_int", + "btb_mem015_i_ecc_rf_int", + "btb_mem016_i_ecc_rf_int", + "btb_mem002_i_ecc_rf_int", + "btb_mem003_i_ecc_rf_int", + "btb_mem004_i_ecc_rf_int", + "btb_mem005_i_ecc_rf_int", + "btb_mem006_i_ecc_rf_int", + "btb_mem007_i_ecc_rf_int", + "btb_mem033_i_mem_prty", + "btb_mem035_i_mem_prty", + "btb_mem034_i_mem_prty", + "btb_mem032_i_mem_prty", + "btb_mem031_i_mem_prty", + "btb_mem021_i_mem_prty", + "btb_mem022_i_mem_prty", + "btb_mem023_i_mem_prty", + "btb_mem024_i_mem_prty", + "btb_mem025_i_mem_prty", + "btb_mem026_i_mem_prty", + "btb_mem027_i_mem_prty", + "btb_mem028_i_mem_prty", + "btb_mem030_i_mem_prty", + "btb_mem029_i_mem_prty", +}; +#else +#define btb_prty_attn_desc OSAL_NULL +#endif + +static const u16 btb_prty1_bb_a0_attn_idx[27] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27, + 28, + 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_bb_a0 = { + 0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = { + &btb_prty1_bb_a0, +}; + +static const u16 btb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg btb_prty0_bb_b0 = { + 0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0 +}; + +static const u16 btb_prty1_bb_b0_attn_idx[23] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31, + 32, + 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_bb_b0 = { + 1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { + &btb_prty0_bb_b0, &btb_prty1_bb_b0, +}; + +static const u16 btb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg btb_prty0_k2 = { + 0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0 +}; + +static const u16 btb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_k2 = { + 1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_k2_regs[2] = { + &btb_prty0_k2, &btb_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_int_attn_desc[1] = { + "pbf_address_error", +}; +#else +#define pbf_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_bb_a0 = { + 0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = { + &pbf_int0_bb_a0, +}; + +static const u16 pbf_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_bb_b0 = { + 0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { + &pbf_int0_bb_b0, +}; + +static const u16 pbf_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_k2 = { + 0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_k2_regs[1] = { + &pbf_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_prty_attn_desc[59] = { + "pbf_datapath_registers", + "pbf_mem041_i_ecc_rf_int", + "pbf_mem042_i_ecc_rf_int", + "pbf_mem033_i_ecc_rf_int", + "pbf_mem003_i_ecc_rf_int", + "pbf_mem018_i_ecc_rf_int", + "pbf_mem009_i_ecc_0_rf_int", + "pbf_mem009_i_ecc_1_rf_int", + "pbf_mem012_i_ecc_0_rf_int", + "pbf_mem012_i_ecc_1_rf_int", + "pbf_mem012_i_ecc_2_rf_int", + "pbf_mem012_i_ecc_3_rf_int", + "pbf_mem012_i_ecc_4_rf_int", + "pbf_mem012_i_ecc_5_rf_int", + "pbf_mem012_i_ecc_6_rf_int", + "pbf_mem012_i_ecc_7_rf_int", + "pbf_mem012_i_ecc_8_rf_int", + "pbf_mem012_i_ecc_9_rf_int", + "pbf_mem012_i_ecc_10_rf_int", + "pbf_mem012_i_ecc_11_rf_int", + "pbf_mem012_i_ecc_12_rf_int", + "pbf_mem012_i_ecc_13_rf_int", + "pbf_mem012_i_ecc_14_rf_int", + "pbf_mem012_i_ecc_15_rf_int", + "pbf_mem040_i_mem_prty", + "pbf_mem039_i_mem_prty", + "pbf_mem038_i_mem_prty", + "pbf_mem034_i_mem_prty", + "pbf_mem032_i_mem_prty", + "pbf_mem031_i_mem_prty", + "pbf_mem030_i_mem_prty", + "pbf_mem029_i_mem_prty", + "pbf_mem022_i_mem_prty", + "pbf_mem023_i_mem_prty", + "pbf_mem021_i_mem_prty", + "pbf_mem020_i_mem_prty", + "pbf_mem001_i_mem_prty", + "pbf_mem002_i_mem_prty", + "pbf_mem006_i_mem_prty", + "pbf_mem007_i_mem_prty", + "pbf_mem005_i_mem_prty", + "pbf_mem004_i_mem_prty", + "pbf_mem028_i_mem_prty", + "pbf_mem026_i_mem_prty", + "pbf_mem027_i_mem_prty", + "pbf_mem019_i_mem_prty", + "pbf_mem016_i_mem_prty", + "pbf_mem017_i_mem_prty", + "pbf_mem008_i_mem_prty", + "pbf_mem011_i_mem_prty", + "pbf_mem010_i_mem_prty", + "pbf_mem024_i_mem_prty", + "pbf_mem025_i_mem_prty", + "pbf_mem037_i_mem_prty", + "pbf_mem036_i_mem_prty", + "pbf_mem035_i_mem_prty", + "pbf_mem014_i_mem_prty", + "pbf_mem015_i_mem_prty", + "pbf_mem013_i_mem_prty", +}; +#else +#define pbf_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_bb_a0 = { + 0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_bb_a0_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_bb_a0 = { + 1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = { + &pbf_prty1_bb_a0, &pbf_prty2_bb_a0, +}; + +static const u16 pbf_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_prty0_bb_b0 = { + 0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194 +}; + +static const u16 pbf_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_bb_b0 = { + 1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_bb_b0_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_bb_b0 = { + 2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { + &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0, +}; + +static const u16 pbf_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_prty0_k2 = { + 0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194 +}; + +static const u16 pbf_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_k2 = { + 1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_k2_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_k2 = { + 2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_k2_regs[3] = { + &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *rdif_int_attn_desc[9] = { + "rdif_address_error", + "rdif_fatal_dix_err", + "rdif_fatal_config_err", + "rdif_cmd_fifo_err", + "rdif_order_fifo_err", + "rdif_rdata_fifo_err", + "rdif_dif_stop_err", + "rdif_partial_dif_w_eob", + "rdif_l1_dirty_bit", +}; +#else +#define rdif_int_attn_desc OSAL_NULL +#endif + +static const u16 rdif_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg rdif_int0_bb_a0 = { + 0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = { + &rdif_int0_bb_a0, +}; + +static const u16 rdif_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg rdif_int0_bb_b0 = { + 0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { + &rdif_int0_bb_b0, +}; + +static const u16 rdif_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rdif_int0_k2 = { + 0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_k2_regs[1] = { + &rdif_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rdif_prty_attn_desc[2] = { + "rdif_unused_0", + "rdif_datapath_registers", +}; +#else +#define rdif_prty_attn_desc OSAL_NULL +#endif + +static const u16 rdif_prty0_bb_b0_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg rdif_prty0_bb_b0 = { + 0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194 +}; + +static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { + &rdif_prty0_bb_b0, +}; + +static const u16 rdif_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg rdif_prty0_k2 = { + 0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194 +}; + +static struct attn_hw_reg *rdif_prty_k2_regs[1] = { + &rdif_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *tdif_int_attn_desc[9] = { + "tdif_address_error", + "tdif_fatal_dix_err", + "tdif_fatal_config_err", + "tdif_cmd_fifo_err", + "tdif_order_fifo_err", + "tdif_rdata_fifo_err", + "tdif_dif_stop_err", + "tdif_partial_dif_w_eob", + "tdif_l1_dirty_bit", +}; +#else +#define tdif_int_attn_desc OSAL_NULL +#endif + +static const u16 tdif_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tdif_int0_bb_a0 = { + 0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = { + &tdif_int0_bb_a0, +}; + +static const u16 tdif_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tdif_int0_bb_b0 = { + 0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { + &tdif_int0_bb_b0, +}; + +static const u16 tdif_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tdif_int0_k2 = { + 0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_k2_regs[1] = { + &tdif_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tdif_prty_attn_desc[13] = { + "tdif_unused_0", + "tdif_datapath_registers", + "tdif_mem005_i_ecc_rf_int", + "tdif_mem009_i_ecc_rf_int", + "tdif_mem010_i_ecc_rf_int", + "tdif_mem011_i_ecc_rf_int", + "tdif_mem001_i_mem_prty", + "tdif_mem003_i_mem_prty", + "tdif_mem002_i_mem_prty", + "tdif_mem006_i_mem_prty", + "tdif_mem007_i_mem_prty", + "tdif_mem008_i_mem_prty", + "tdif_mem004_i_mem_prty", +}; +#else +#define tdif_prty_attn_desc OSAL_NULL +#endif + +static const u16 tdif_prty1_bb_a0_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_bb_a0 = { + 0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208, + 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = { + &tdif_prty1_bb_a0, +}; + +static const u16 tdif_prty0_bb_b0_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg tdif_prty0_bb_b0 = { + 0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194 +}; + +static const u16 tdif_prty1_bb_b0_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_bb_b0 = { + 1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208, + 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { + &tdif_prty0_bb_b0, &tdif_prty1_bb_b0, +}; + +static const u16 tdif_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg tdif_prty0_k2 = { + 0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194 +}; + +static const u16 tdif_prty1_k2_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_k2 = { + 1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_k2_regs[2] = { + &tdif_prty0_k2, &tdif_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *cdu_int_attn_desc[8] = { + "cdu_address_error", + "cdu_ccfc_ld_l1_num_error", + "cdu_tcfc_ld_l1_num_error", + "cdu_ccfc_wb_l1_num_error", + "cdu_tcfc_wb_l1_num_error", + "cdu_ccfc_cvld_error", + "cdu_tcfc_cvld_error", + "cdu_bvalid_error", +}; +#else +#define cdu_int_attn_desc OSAL_NULL +#endif + +static const u16 cdu_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_bb_a0 = { + 0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = { + &cdu_int0_bb_a0, +}; + +static const u16 cdu_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_bb_b0 = { + 0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { + &cdu_int0_bb_b0, +}; + +static const u16 cdu_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_k2 = { + 0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_k2_regs[1] = { + &cdu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cdu_prty_attn_desc[5] = { + "cdu_mem001_i_mem_prty", + "cdu_mem004_i_mem_prty", + "cdu_mem002_i_mem_prty", + "cdu_mem005_i_mem_prty", + "cdu_mem003_i_mem_prty", +}; +#else +#define cdu_prty_attn_desc OSAL_NULL +#endif + +static const u16 cdu_prty1_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_bb_a0 = { + 0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = { + &cdu_prty1_bb_a0, +}; + +static const u16 cdu_prty1_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_bb_b0 = { + 0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { + &cdu_prty1_bb_b0, +}; + +static const u16 cdu_prty1_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_k2 = { + 0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_k2_regs[1] = { + &cdu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ccfc_int_attn_desc[2] = { + "ccfc_address_error", + "ccfc_exe_error", +}; +#else +#define ccfc_int_attn_desc OSAL_NULL +#endif + +static const u16 ccfc_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_bb_a0 = { + 0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = { + &ccfc_int0_bb_a0, +}; + +static const u16 ccfc_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_bb_b0 = { + 0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { + &ccfc_int0_bb_b0, +}; + +static const u16 ccfc_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_k2 = { + 0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_k2_regs[1] = { + &ccfc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ccfc_prty_attn_desc[10] = { + "ccfc_mem001_i_ecc_rf_int", + "ccfc_mem003_i_mem_prty", + "ccfc_mem007_i_mem_prty", + "ccfc_mem006_i_mem_prty", + "ccfc_ccam_par_err", + "ccfc_scam_par_err", + "ccfc_lc_que_ram_porta_lsb_par_err", + "ccfc_lc_que_ram_porta_msb_par_err", + "ccfc_lc_que_ram_portb_lsb_par_err", + "ccfc_lc_que_ram_portb_msb_par_err", +}; +#else +#define ccfc_prty_attn_desc OSAL_NULL +#endif + +static const u16 ccfc_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg ccfc_prty1_bb_a0 = { + 0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_bb_a0_attn_idx[2] = { + 4, 5, +}; + +static struct attn_hw_reg ccfc_prty0_bb_a0 = { + 1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = { + &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0, +}; + +static const u16 ccfc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_prty1_bb_b0 = { + 0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_bb_b0_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ccfc_prty0_bb_b0 = { + 1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { + &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0, +}; + +static const u16 ccfc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_prty1_k2 = { + 0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_k2_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ccfc_prty0_k2 = { + 1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_k2_regs[2] = { + &ccfc_prty1_k2, &ccfc_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *tcfc_int_attn_desc[2] = { + "tcfc_address_error", + "tcfc_exe_error", +}; +#else +#define tcfc_int_attn_desc OSAL_NULL +#endif + +static const u16 tcfc_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_bb_a0 = { + 0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = { + &tcfc_int0_bb_a0, +}; + +static const u16 tcfc_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_bb_b0 = { + 0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { + &tcfc_int0_bb_b0, +}; + +static const u16 tcfc_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_k2 = { + 0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_k2_regs[1] = { + &tcfc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tcfc_prty_attn_desc[10] = { + "tcfc_mem002_i_mem_prty", + "tcfc_mem001_i_mem_prty", + "tcfc_mem006_i_mem_prty", + "tcfc_mem005_i_mem_prty", + "tcfc_ccam_par_err", + "tcfc_scam_par_err", + "tcfc_lc_que_ram_porta_lsb_par_err", + "tcfc_lc_que_ram_porta_msb_par_err", + "tcfc_lc_que_ram_portb_lsb_par_err", + "tcfc_lc_que_ram_portb_msb_par_err", +}; +#else +#define tcfc_prty_attn_desc OSAL_NULL +#endif + +static const u16 tcfc_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg tcfc_prty1_bb_a0 = { + 0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_bb_a0_attn_idx[2] = { + 4, 5, +}; + +static struct attn_hw_reg tcfc_prty0_bb_a0 = { + 1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = { + &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0, +}; + +static const u16 tcfc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_prty1_bb_b0 = { + 0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_bb_b0_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tcfc_prty0_bb_b0 = { + 1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { + &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0, +}; + +static const u16 tcfc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_prty1_k2 = { + 0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_k2_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tcfc_prty0_k2 = { + 1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_k2_regs[2] = { + &tcfc_prty1_k2, &tcfc_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *igu_int_attn_desc[11] = { + "igu_address_error", + "igu_ctrl_fifo_error_err", + "igu_pxp_req_length_too_big", + "igu_host_tries2access_prod_upd", + "igu_vf_tries2acc_attn_cmd", + "igu_mme_bigger_then_5", + "igu_sb_index_is_not_valid", + "igu_durin_int_read_with_simd_dis", + "igu_cmd_fid_not_match", + "igu_segment_access_invalid", + "igu_attn_prod_acc", +}; +#else +#define igu_int_attn_desc OSAL_NULL +#endif + +static const u16 igu_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_bb_a0 = { + 0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_bb_a0_regs[1] = { + &igu_int0_bb_a0, +}; + +static const u16 igu_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_bb_b0 = { + 0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { + &igu_int0_bb_b0, +}; + +static const u16 igu_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_k2 = { + 0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_k2_regs[1] = { + &igu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *igu_prty_attn_desc[42] = { + "igu_cam_parity", + "igu_mem009_i_ecc_rf_int", + "igu_mem015_i_mem_prty", + "igu_mem016_i_mem_prty", + "igu_mem017_i_mem_prty", + "igu_mem018_i_mem_prty", + "igu_mem019_i_mem_prty", + "igu_mem001_i_mem_prty", + "igu_mem002_i_mem_prty_0", + "igu_mem002_i_mem_prty_1", + "igu_mem004_i_mem_prty_0", + "igu_mem004_i_mem_prty_1", + "igu_mem004_i_mem_prty_2", + "igu_mem003_i_mem_prty", + "igu_mem005_i_mem_prty", + "igu_mem006_i_mem_prty_0", + "igu_mem006_i_mem_prty_1", + "igu_mem008_i_mem_prty_0", + "igu_mem008_i_mem_prty_1", + "igu_mem008_i_mem_prty_2", + "igu_mem007_i_mem_prty", + "igu_mem010_i_mem_prty_0", + "igu_mem010_i_mem_prty_1", + "igu_mem012_i_mem_prty_0", + "igu_mem012_i_mem_prty_1", + "igu_mem012_i_mem_prty_2", + "igu_mem011_i_mem_prty", + "igu_mem013_i_mem_prty", + "igu_mem014_i_mem_prty", + "igu_mem020_i_mem_prty", + "igu_mem003_i_mem_prty_0", + "igu_mem003_i_mem_prty_1", + "igu_mem003_i_mem_prty_2", + "igu_mem002_i_mem_prty", + "igu_mem007_i_mem_prty_0", + "igu_mem007_i_mem_prty_1", + "igu_mem007_i_mem_prty_2", + "igu_mem006_i_mem_prty", + "igu_mem010_i_mem_prty_2", + "igu_mem010_i_mem_prty_3", + "igu_mem013_i_mem_prty_0", + "igu_mem013_i_mem_prty_1", +}; +#else +#define igu_prty_attn_desc OSAL_NULL +#endif + +static const u16 igu_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_bb_a0 = { + 0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_bb_a0_attn_idx[31] = { + 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29, + 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg igu_prty1_bb_a0 = { + 1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static const u16 igu_prty2_bb_a0_attn_idx[1] = { + 2, +}; + +static struct attn_hw_reg igu_prty2_bb_a0 = { + 2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214 +}; + +static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = { + &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0, +}; + +static const u16 igu_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_bb_b0 = { + 0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_bb_b0_attn_idx[31] = { + 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29, + 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg igu_prty1_bb_b0 = { + 1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static const u16 igu_prty2_bb_b0_attn_idx[1] = { + 2, +}; + +static struct attn_hw_reg igu_prty2_bb_b0 = { + 2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214 +}; + +static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { + &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0, +}; + +static const u16 igu_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_k2 = { + 0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_k2_attn_idx[28] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, +}; + +static struct attn_hw_reg igu_prty1_k2 = { + 1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static struct attn_hw_reg *igu_prty_k2_regs[2] = { + &igu_prty0_k2, &igu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *cau_int_attn_desc[11] = { + "cau_address_error", + "cau_unauthorized_pxp_rd_cmd", + "cau_unauthorized_pxp_length_cmd", + "cau_pxp_sb_address_error", + "cau_pxp_pi_number_error", + "cau_cleanup_reg_sb_idx_error", + "cau_fsm_invalid_line", + "cau_cqe_fifo_err", + "cau_igu_wdata_fifo_err", + "cau_igu_req_fifo_err", + "cau_igu_cmd_fifo_err", +}; +#else +#define cau_int_attn_desc OSAL_NULL +#endif + +static const u16 cau_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_bb_a0 = { + 0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_bb_a0_regs[1] = { + &cau_int0_bb_a0, +}; + +static const u16 cau_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_bb_b0 = { + 0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { + &cau_int0_bb_b0, +}; + +static const u16 cau_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_k2 = { + 0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_k2_regs[1] = { + &cau_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cau_prty_attn_desc[15] = { + "cau_mem006_i_ecc_rf_int", + "cau_mem001_i_ecc_0_rf_int", + "cau_mem001_i_ecc_1_rf_int", + "cau_mem002_i_ecc_rf_int", + "cau_mem004_i_ecc_rf_int", + "cau_mem005_i_mem_prty", + "cau_mem007_i_mem_prty", + "cau_mem008_i_mem_prty", + "cau_mem009_i_mem_prty", + "cau_mem010_i_mem_prty", + "cau_mem011_i_mem_prty", + "cau_mem003_i_mem_prty_0", + "cau_mem003_i_mem_prty_1", + "cau_mem002_i_mem_prty", + "cau_mem004_i_mem_prty", +}; +#else +#define cau_prty_attn_desc OSAL_NULL +#endif + +static const u16 cau_prty1_bb_a0_attn_idx[13] = { + 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg cau_prty1_bb_a0 = { + 0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = { + &cau_prty1_bb_a0, +}; + +static const u16 cau_prty1_bb_b0_attn_idx[13] = { + 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg cau_prty1_bb_b0 = { + 0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { + &cau_prty1_bb_b0, +}; + +static const u16 cau_prty1_k2_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg cau_prty1_k2 = { + 0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_k2_regs[1] = { + &cau_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *umac_int_attn_desc[2] = { + "umac_address_error", + "umac_tx_overflow", +}; +#else +#define umac_int_attn_desc OSAL_NULL +#endif + +static const u16 umac_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg umac_int0_k2 = { + 0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184 +}; + +static struct attn_hw_reg *umac_int_k2_regs[1] = { + &umac_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dbg_int_attn_desc[1] = { + "dbg_address_error", +}; +#else +#define dbg_int_attn_desc OSAL_NULL +#endif + +static const u16 dbg_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_bb_a0 = { + 0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = { + &dbg_int0_bb_a0, +}; + +static const u16 dbg_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_bb_b0 = { + 0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { + &dbg_int0_bb_b0, +}; + +static const u16 dbg_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_k2 = { + 0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_k2_regs[1] = { + &dbg_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dbg_prty_attn_desc[1] = { + "dbg_mem001_i_mem_prty", +}; +#else +#define dbg_prty_attn_desc OSAL_NULL +#endif + +static const u16 dbg_prty1_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_bb_a0 = { + 0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = { + &dbg_prty1_bb_a0, +}; + +static const u16 dbg_prty1_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_bb_b0 = { + 0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { + &dbg_prty1_bb_b0, +}; + +static const u16 dbg_prty1_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_k2 = { + 0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_k2_regs[1] = { + &dbg_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *nig_int_attn_desc[196] = { + "nig_address_error", + "nig_debug_fifo_error", + "nig_dorq_fifo_error", + "nig_dbg_syncfifo_error_wr", + "nig_dorq_syncfifo_error_wr", + "nig_storm_syncfifo_error_wr", + "nig_dbgmux_syncfifo_error_wr", + "nig_msdm_syncfifo_error_wr", + "nig_tsdm_syncfifo_error_wr", + "nig_usdm_syncfifo_error_wr", + "nig_xsdm_syncfifo_error_wr", + "nig_ysdm_syncfifo_error_wr", + "nig_tx_sopq0_error", + "nig_tx_sopq1_error", + "nig_tx_sopq2_error", + "nig_tx_sopq3_error", + "nig_tx_sopq4_error", + "nig_tx_sopq5_error", + "nig_tx_sopq6_error", + "nig_tx_sopq7_error", + "nig_tx_sopq8_error", + "nig_tx_sopq9_error", + "nig_tx_sopq10_error", + "nig_tx_sopq11_error", + "nig_tx_sopq12_error", + "nig_tx_sopq13_error", + "nig_tx_sopq14_error", + "nig_tx_sopq15_error", + "nig_lb_sopq0_error", + "nig_lb_sopq1_error", + "nig_lb_sopq2_error", + "nig_lb_sopq3_error", + "nig_lb_sopq4_error", + "nig_lb_sopq5_error", + "nig_lb_sopq6_error", + "nig_lb_sopq7_error", + "nig_lb_sopq8_error", + "nig_lb_sopq9_error", + "nig_lb_sopq10_error", + "nig_lb_sopq11_error", + "nig_lb_sopq12_error", + "nig_lb_sopq13_error", + "nig_lb_sopq14_error", + "nig_lb_sopq15_error", + "nig_p0_purelb_sopq_error", + "nig_p0_rx_macfifo_error", + "nig_p0_tx_macfifo_error", + "nig_p0_tx_bmb_fifo_error", + "nig_p0_lb_bmb_fifo_error", + "nig_p0_tx_btb_fifo_error", + "nig_p0_lb_btb_fifo_error", + "nig_p0_rx_llh_dfifo_error", + "nig_p0_tx_llh_dfifo_error", + "nig_p0_lb_llh_dfifo_error", + "nig_p0_rx_llh_hfifo_error", + "nig_p0_tx_llh_hfifo_error", + "nig_p0_lb_llh_hfifo_error", + "nig_p0_rx_llh_rfifo_error", + "nig_p0_tx_llh_rfifo_error", + "nig_p0_lb_llh_rfifo_error", + "nig_p0_storm_fifo_error", + "nig_p0_storm_dscr_fifo_error", + "nig_p0_tx_gnt_fifo_error", + "nig_p0_lb_gnt_fifo_error", + "nig_p0_tx_pause_too_long_int", + "nig_p0_tc0_pause_too_long_int", + "nig_p0_tc1_pause_too_long_int", + "nig_p0_tc2_pause_too_long_int", + "nig_p0_tc3_pause_too_long_int", + "nig_p0_tc4_pause_too_long_int", + "nig_p0_tc5_pause_too_long_int", + "nig_p0_tc6_pause_too_long_int", + "nig_p0_tc7_pause_too_long_int", + "nig_p0_lb_tc0_pause_too_long_int", + "nig_p0_lb_tc1_pause_too_long_int", + "nig_p0_lb_tc2_pause_too_long_int", + "nig_p0_lb_tc3_pause_too_long_int", + "nig_p0_lb_tc4_pause_too_long_int", + "nig_p0_lb_tc5_pause_too_long_int", + "nig_p0_lb_tc6_pause_too_long_int", + "nig_p0_lb_tc7_pause_too_long_int", + "nig_p0_lb_tc8_pause_too_long_int", + "nig_p1_purelb_sopq_error", + "nig_p1_rx_macfifo_error", + "nig_p1_tx_macfifo_error", + "nig_p1_tx_bmb_fifo_error", + "nig_p1_lb_bmb_fifo_error", + "nig_p1_tx_btb_fifo_error", + "nig_p1_lb_btb_fifo_error", + "nig_p1_rx_llh_dfifo_error", + "nig_p1_tx_llh_dfifo_error", + "nig_p1_lb_llh_dfifo_error", + "nig_p1_rx_llh_hfifo_error", + "nig_p1_tx_llh_hfifo_error", + "nig_p1_lb_llh_hfifo_error", + "nig_p1_rx_llh_rfifo_error", + "nig_p1_tx_llh_rfifo_error", + "nig_p1_lb_llh_rfifo_error", + "nig_p1_storm_fifo_error", + "nig_p1_storm_dscr_fifo_error", + "nig_p1_tx_gnt_fifo_error", + "nig_p1_lb_gnt_fifo_error", + "nig_p1_tx_pause_too_long_int", + "nig_p1_tc0_pause_too_long_int", + "nig_p1_tc1_pause_too_long_int", + "nig_p1_tc2_pause_too_long_int", + "nig_p1_tc3_pause_too_long_int", + "nig_p1_tc4_pause_too_long_int", + "nig_p1_tc5_pause_too_long_int", + "nig_p1_tc6_pause_too_long_int", + "nig_p1_tc7_pause_too_long_int", + "nig_p1_lb_tc0_pause_too_long_int", + "nig_p1_lb_tc1_pause_too_long_int", + "nig_p1_lb_tc2_pause_too_long_int", + "nig_p1_lb_tc3_pause_too_long_int", + "nig_p1_lb_tc4_pause_too_long_int", + "nig_p1_lb_tc5_pause_too_long_int", + "nig_p1_lb_tc6_pause_too_long_int", + "nig_p1_lb_tc7_pause_too_long_int", + "nig_p1_lb_tc8_pause_too_long_int", + "nig_p2_purelb_sopq_error", + "nig_p2_rx_macfifo_error", + "nig_p2_tx_macfifo_error", + "nig_p2_tx_bmb_fifo_error", + "nig_p2_lb_bmb_fifo_error", + "nig_p2_tx_btb_fifo_error", + "nig_p2_lb_btb_fifo_error", + "nig_p2_rx_llh_dfifo_error", + "nig_p2_tx_llh_dfifo_error", + "nig_p2_lb_llh_dfifo_error", + "nig_p2_rx_llh_hfifo_error", + "nig_p2_tx_llh_hfifo_error", + "nig_p2_lb_llh_hfifo_error", + "nig_p2_rx_llh_rfifo_error", + "nig_p2_tx_llh_rfifo_error", + "nig_p2_lb_llh_rfifo_error", + "nig_p2_storm_fifo_error", + "nig_p2_storm_dscr_fifo_error", + "nig_p2_tx_gnt_fifo_error", + "nig_p2_lb_gnt_fifo_error", + "nig_p2_tx_pause_too_long_int", + "nig_p2_tc0_pause_too_long_int", + "nig_p2_tc1_pause_too_long_int", + "nig_p2_tc2_pause_too_long_int", + "nig_p2_tc3_pause_too_long_int", + "nig_p2_tc4_pause_too_long_int", + "nig_p2_tc5_pause_too_long_int", + "nig_p2_tc6_pause_too_long_int", + "nig_p2_tc7_pause_too_long_int", + "nig_p2_lb_tc0_pause_too_long_int", + "nig_p2_lb_tc1_pause_too_long_int", + "nig_p2_lb_tc2_pause_too_long_int", + "nig_p2_lb_tc3_pause_too_long_int", + "nig_p2_lb_tc4_pause_too_long_int", + "nig_p2_lb_tc5_pause_too_long_int", + "nig_p2_lb_tc6_pause_too_long_int", + "nig_p2_lb_tc7_pause_too_long_int", + "nig_p2_lb_tc8_pause_too_long_int", + "nig_p3_purelb_sopq_error", + "nig_p3_rx_macfifo_error", + "nig_p3_tx_macfifo_error", + "nig_p3_tx_bmb_fifo_error", + "nig_p3_lb_bmb_fifo_error", + "nig_p3_tx_btb_fifo_error", + "nig_p3_lb_btb_fifo_error", + "nig_p3_rx_llh_dfifo_error", + "nig_p3_tx_llh_dfifo_error", + "nig_p3_lb_llh_dfifo_error", + "nig_p3_rx_llh_hfifo_error", + "nig_p3_tx_llh_hfifo_error", + "nig_p3_lb_llh_hfifo_error", + "nig_p3_rx_llh_rfifo_error", + "nig_p3_tx_llh_rfifo_error", + "nig_p3_lb_llh_rfifo_error", + "nig_p3_storm_fifo_error", + "nig_p3_storm_dscr_fifo_error", + "nig_p3_tx_gnt_fifo_error", + "nig_p3_lb_gnt_fifo_error", + "nig_p3_tx_pause_too_long_int", + "nig_p3_tc0_pause_too_long_int", + "nig_p3_tc1_pause_too_long_int", + "nig_p3_tc2_pause_too_long_int", + "nig_p3_tc3_pause_too_long_int", + "nig_p3_tc4_pause_too_long_int", + "nig_p3_tc5_pause_too_long_int", + "nig_p3_tc6_pause_too_long_int", + "nig_p3_tc7_pause_too_long_int", + "nig_p3_lb_tc0_pause_too_long_int", + "nig_p3_lb_tc1_pause_too_long_int", + "nig_p3_lb_tc2_pause_too_long_int", + "nig_p3_lb_tc3_pause_too_long_int", + "nig_p3_lb_tc4_pause_too_long_int", + "nig_p3_lb_tc5_pause_too_long_int", + "nig_p3_lb_tc6_pause_too_long_int", + "nig_p3_lb_tc7_pause_too_long_int", + "nig_p3_lb_tc8_pause_too_long_int", +}; +#else +#define nig_int_attn_desc OSAL_NULL +#endif + +static const u16 nig_int0_bb_a0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_bb_a0 = { + 0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_bb_a0_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_bb_a0 = { + 1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_bb_a0_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_bb_a0 = { + 2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_bb_a0_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_bb_a0 = { + 3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_bb_a0_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_bb_a0 = { + 4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_bb_a0_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_bb_a0 = { + 5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static struct attn_hw_reg *nig_int_bb_a0_regs[6] = { + &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0, + &nig_int4_bb_a0, &nig_int5_bb_a0, +}; + +static const u16 nig_int0_bb_b0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_bb_b0 = { + 0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_bb_b0_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_bb_b0 = { + 1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_bb_b0_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_bb_b0 = { + 2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_bb_b0_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_bb_b0 = { + 3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_bb_b0_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_bb_b0 = { + 4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_bb_b0_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_bb_b0 = { + 5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { + &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, + &nig_int4_bb_b0, &nig_int5_bb_b0, +}; + +static const u16 nig_int0_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_k2 = { + 0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_k2_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_k2 = { + 1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_k2_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_k2 = { + 2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_k2_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_k2 = { + 3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_k2_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_k2 = { + 4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_k2_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_k2 = { + 5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static const u16 nig_int6_k2_attn_idx[20] = { + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, + 135, 136, 137, 138, 139, +}; + +static struct attn_hw_reg nig_int6_k2 = { + 6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4 +}; + +static const u16 nig_int7_k2_attn_idx[18] = { + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, + 155, 156, 157, +}; + +static struct attn_hw_reg nig_int7_k2 = { + 7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4 +}; + +static const u16 nig_int8_k2_attn_idx[20] = { + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, + 173, 174, 175, 176, 177, +}; + +static struct attn_hw_reg nig_int8_k2 = { + 8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4 +}; + +static const u16 nig_int9_k2_attn_idx[18] = { + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, + 193, 194, 195, +}; + +static struct attn_hw_reg nig_int9_k2 = { + 9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4 +}; + +static struct attn_hw_reg *nig_int_k2_regs[10] = { + &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2, + &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2, +}; + +#ifdef ATTN_DESC +static const char *nig_prty_attn_desc[113] = { + "nig_datapath_parity_error", + "nig_mem107_i_mem_prty", + "nig_mem103_i_mem_prty", + "nig_mem104_i_mem_prty", + "nig_mem105_i_mem_prty", + "nig_mem106_i_mem_prty", + "nig_mem072_i_mem_prty", + "nig_mem071_i_mem_prty", + "nig_mem074_i_mem_prty", + "nig_mem073_i_mem_prty", + "nig_mem076_i_mem_prty", + "nig_mem075_i_mem_prty", + "nig_mem078_i_mem_prty", + "nig_mem077_i_mem_prty", + "nig_mem055_i_mem_prty", + "nig_mem062_i_mem_prty", + "nig_mem063_i_mem_prty", + "nig_mem064_i_mem_prty", + "nig_mem065_i_mem_prty", + "nig_mem066_i_mem_prty", + "nig_mem067_i_mem_prty", + "nig_mem068_i_mem_prty", + "nig_mem069_i_mem_prty", + "nig_mem070_i_mem_prty", + "nig_mem056_i_mem_prty", + "nig_mem057_i_mem_prty", + "nig_mem058_i_mem_prty", + "nig_mem059_i_mem_prty", + "nig_mem060_i_mem_prty", + "nig_mem061_i_mem_prty", + "nig_mem035_i_mem_prty", + "nig_mem046_i_mem_prty", + "nig_mem051_i_mem_prty", + "nig_mem052_i_mem_prty", + "nig_mem090_i_mem_prty", + "nig_mem089_i_mem_prty", + "nig_mem092_i_mem_prty", + "nig_mem091_i_mem_prty", + "nig_mem109_i_mem_prty", + "nig_mem110_i_mem_prty", + "nig_mem001_i_mem_prty", + "nig_mem008_i_mem_prty", + "nig_mem009_i_mem_prty", + "nig_mem010_i_mem_prty", + "nig_mem011_i_mem_prty", + "nig_mem012_i_mem_prty", + "nig_mem013_i_mem_prty", + "nig_mem014_i_mem_prty", + "nig_mem015_i_mem_prty", + "nig_mem016_i_mem_prty", + "nig_mem002_i_mem_prty", + "nig_mem003_i_mem_prty", + "nig_mem004_i_mem_prty", + "nig_mem005_i_mem_prty", + "nig_mem006_i_mem_prty", + "nig_mem007_i_mem_prty", + "nig_mem080_i_mem_prty", + "nig_mem081_i_mem_prty", + "nig_mem082_i_mem_prty", + "nig_mem083_i_mem_prty", + "nig_mem048_i_mem_prty", + "nig_mem049_i_mem_prty", + "nig_mem102_i_mem_prty", + "nig_mem087_i_mem_prty", + "nig_mem086_i_mem_prty", + "nig_mem088_i_mem_prty", + "nig_mem079_i_mem_prty", + "nig_mem047_i_mem_prty", + "nig_mem050_i_mem_prty", + "nig_mem053_i_mem_prty", + "nig_mem054_i_mem_prty", + "nig_mem036_i_mem_prty", + "nig_mem037_i_mem_prty", + "nig_mem038_i_mem_prty", + "nig_mem039_i_mem_prty", + "nig_mem040_i_mem_prty", + "nig_mem041_i_mem_prty", + "nig_mem042_i_mem_prty", + "nig_mem043_i_mem_prty", + "nig_mem044_i_mem_prty", + "nig_mem045_i_mem_prty", + "nig_mem093_i_mem_prty", + "nig_mem094_i_mem_prty", + "nig_mem027_i_mem_prty", + "nig_mem028_i_mem_prty", + "nig_mem029_i_mem_prty", + "nig_mem030_i_mem_prty", + "nig_mem017_i_mem_prty", + "nig_mem018_i_mem_prty", + "nig_mem095_i_mem_prty", + "nig_mem084_i_mem_prty", + "nig_mem085_i_mem_prty", + "nig_mem099_i_mem_prty", + "nig_mem100_i_mem_prty", + "nig_mem096_i_mem_prty", + "nig_mem097_i_mem_prty", + "nig_mem098_i_mem_prty", + "nig_mem031_i_mem_prty", + "nig_mem032_i_mem_prty", + "nig_mem033_i_mem_prty", + "nig_mem034_i_mem_prty", + "nig_mem019_i_mem_prty", + "nig_mem020_i_mem_prty", + "nig_mem021_i_mem_prty", + "nig_mem022_i_mem_prty", + "nig_mem101_i_mem_prty", + "nig_mem023_i_mem_prty", + "nig_mem024_i_mem_prty", + "nig_mem025_i_mem_prty", + "nig_mem026_i_mem_prty", + "nig_mem108_i_mem_prty", + "nig_mem031_ext_i_mem_prty", + "nig_mem034_ext_i_mem_prty", +}; +#else +#define nig_prty_attn_desc OSAL_NULL +#endif + +static const u16 nig_prty1_bb_a0_attn_idx[31] = { + 1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66, +}; + +static struct attn_hw_reg nig_prty1_bb_a0 = { + 0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_bb_a0_attn_idx[31] = { + 33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25, + 26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95, +}; + +static struct attn_hw_reg nig_prty2_bb_a0 = { + 1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_bb_a0_attn_idx[31] = { + 96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83, + 84, + 3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87, +}; + +static struct attn_hw_reg nig_prty3_bb_a0 = { + 2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_bb_a0_attn_idx[14] = { + 88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100, +}; + +static struct attn_hw_reg nig_prty4_bb_a0 = { + 3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = { + &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0, +}; + +static const u16 nig_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg nig_prty0_bb_b0 = { + 0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4 +}; + +static const u16 nig_prty1_bb_b0_attn_idx[31] = { + 4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, +}; + +static struct attn_hw_reg nig_prty1_bb_b0 = { + 1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_bb_b0_attn_idx[31] = { + 90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22, + 23, + 7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93, +}; + +static struct attn_hw_reg nig_prty2_bb_b0 = { + 2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_bb_b0_attn_idx[31] = { + 94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60, + 69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89, +}; + +static struct attn_hw_reg nig_prty3_bb_b0 = { + 3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_bb_b0_attn_idx[17] = { + 106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72, +}; + +static struct attn_hw_reg nig_prty4_bb_b0 = { + 4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { + &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0, + &nig_prty4_bb_b0, +}; + +static const u16 nig_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg nig_prty0_k2 = { + 0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4 +}; + +static const u16 nig_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg nig_prty1_k2 = { + 1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_k2_attn_idx[31] = { + 67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89, +}; + +static struct attn_hw_reg nig_prty2_k2 = { + 2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_k2_attn_idx[31] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106, + 107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34, +}; + +static struct attn_hw_reg nig_prty3_k2 = { + 3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_k2_attn_idx[14] = { + 44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54, +}; + +static struct attn_hw_reg nig_prty4_k2 = { + 4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_k2_regs[5] = { + &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2, + &nig_prty4_k2, +}; + +#ifdef ATTN_DESC +static const char *wol_int_attn_desc[1] = { + "wol_address_error", +}; +#else +#define wol_int_attn_desc OSAL_NULL +#endif + +static const u16 wol_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg wol_int0_k2 = { + 0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044 +}; + +static struct attn_hw_reg *wol_int_k2_regs[1] = { + &wol_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *wol_prty_attn_desc[24] = { + "wol_mem017_i_mem_prty", + "wol_mem018_i_mem_prty", + "wol_mem019_i_mem_prty", + "wol_mem020_i_mem_prty", + "wol_mem021_i_mem_prty", + "wol_mem022_i_mem_prty", + "wol_mem023_i_mem_prty", + "wol_mem024_i_mem_prty", + "wol_mem001_i_mem_prty", + "wol_mem008_i_mem_prty", + "wol_mem009_i_mem_prty", + "wol_mem010_i_mem_prty", + "wol_mem011_i_mem_prty", + "wol_mem012_i_mem_prty", + "wol_mem013_i_mem_prty", + "wol_mem014_i_mem_prty", + "wol_mem015_i_mem_prty", + "wol_mem016_i_mem_prty", + "wol_mem002_i_mem_prty", + "wol_mem003_i_mem_prty", + "wol_mem004_i_mem_prty", + "wol_mem005_i_mem_prty", + "wol_mem006_i_mem_prty", + "wol_mem007_i_mem_prty", +}; +#else +#define wol_prty_attn_desc OSAL_NULL +#endif + +static const u16 wol_prty1_k2_attn_idx[24] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, +}; + +static struct attn_hw_reg wol_prty1_k2 = { + 0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204 +}; + +static struct attn_hw_reg *wol_prty_k2_regs[1] = { + &wol_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *bmbn_int_attn_desc[1] = { + "bmbn_address_error", +}; +#else +#define bmbn_int_attn_desc OSAL_NULL +#endif + +static const u16 bmbn_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg bmbn_int0_k2 = { + 0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044 +}; + +static struct attn_hw_reg *bmbn_int_k2_regs[1] = { + &bmbn_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ipc_int_attn_desc[14] = { + "ipc_address_error", + "ipc_unused_0", + "ipc_vmain_por_assert", + "ipc_vmain_por_deassert", + "ipc_perst_assert", + "ipc_perst_deassert", + "ipc_otp_ecc_ded_0", + "ipc_otp_ecc_ded_1", + "ipc_otp_ecc_ded_2", + "ipc_otp_ecc_ded_3", + "ipc_otp_ecc_ded_4", + "ipc_otp_ecc_ded_5", + "ipc_otp_ecc_ded_6", + "ipc_otp_ecc_ded_7", +}; +#else +#define ipc_int_attn_desc OSAL_NULL +#endif + +static const u16 ipc_int0_bb_a0_attn_idx[5] = { + 0, 2, 3, 4, 5, +}; + +static struct attn_hw_reg ipc_int0_bb_a0 = { + 0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510 +}; + +static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = { + &ipc_int0_bb_a0, +}; + +static const u16 ipc_int0_bb_b0_attn_idx[13] = { + 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg ipc_int0_bb_b0 = { + 0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510 +}; + +static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { + &ipc_int0_bb_b0, +}; + +static const u16 ipc_int0_k2_attn_idx[5] = { + 0, 2, 3, 4, 5, +}; + +static struct attn_hw_reg ipc_int0_k2 = { + 0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0 +}; + +static struct attn_hw_reg *ipc_int_k2_regs[1] = { + &ipc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ipc_prty_attn_desc[1] = { + "ipc_fake_par_err", +}; +#else +#define ipc_prty_attn_desc OSAL_NULL +#endif + +static const u16 ipc_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ipc_prty0_bb_a0 = { + 0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520 +}; + +static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = { + &ipc_prty0_bb_a0, +}; + +static const u16 ipc_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ipc_prty0_bb_b0 = { + 0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520 +}; + +static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { + &ipc_prty0_bb_b0, +}; + +#ifdef ATTN_DESC +static const char *nwm_int_attn_desc[18] = { + "nwm_address_error", + "nwm_tx_overflow_0", + "nwm_tx_underflow_0", + "nwm_tx_overflow_1", + "nwm_tx_underflow_1", + "nwm_tx_overflow_2", + "nwm_tx_underflow_2", + "nwm_tx_overflow_3", + "nwm_tx_underflow_3", + "nwm_unused_0", + "nwm_ln0_at_10M", + "nwm_ln0_at_100M", + "nwm_ln1_at_10M", + "nwm_ln1_at_100M", + "nwm_ln2_at_10M", + "nwm_ln2_at_100M", + "nwm_ln3_at_10M", + "nwm_ln3_at_100M", +}; +#else +#define nwm_int_attn_desc OSAL_NULL +#endif + +static const u16 nwm_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg nwm_int0_k2 = { + 0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008 +}; + +static struct attn_hw_reg *nwm_int_k2_regs[1] = { + &nwm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *nwm_prty_attn_desc[72] = { + "nwm_mem020_i_mem_prty", + "nwm_mem028_i_mem_prty", + "nwm_mem036_i_mem_prty", + "nwm_mem044_i_mem_prty", + "nwm_mem023_i_mem_prty", + "nwm_mem031_i_mem_prty", + "nwm_mem039_i_mem_prty", + "nwm_mem047_i_mem_prty", + "nwm_mem024_i_mem_prty", + "nwm_mem032_i_mem_prty", + "nwm_mem040_i_mem_prty", + "nwm_mem048_i_mem_prty", + "nwm_mem018_i_mem_prty", + "nwm_mem026_i_mem_prty", + "nwm_mem034_i_mem_prty", + "nwm_mem042_i_mem_prty", + "nwm_mem017_i_mem_prty", + "nwm_mem025_i_mem_prty", + "nwm_mem033_i_mem_prty", + "nwm_mem041_i_mem_prty", + "nwm_mem021_i_mem_prty", + "nwm_mem029_i_mem_prty", + "nwm_mem037_i_mem_prty", + "nwm_mem045_i_mem_prty", + "nwm_mem019_i_mem_prty", + "nwm_mem027_i_mem_prty", + "nwm_mem035_i_mem_prty", + "nwm_mem043_i_mem_prty", + "nwm_mem022_i_mem_prty", + "nwm_mem030_i_mem_prty", + "nwm_mem038_i_mem_prty", + "nwm_mem046_i_mem_prty", + "nwm_mem057_i_mem_prty", + "nwm_mem059_i_mem_prty", + "nwm_mem061_i_mem_prty", + "nwm_mem063_i_mem_prty", + "nwm_mem058_i_mem_prty", + "nwm_mem060_i_mem_prty", + "nwm_mem062_i_mem_prty", + "nwm_mem064_i_mem_prty", + "nwm_mem009_i_mem_prty", + "nwm_mem010_i_mem_prty", + "nwm_mem011_i_mem_prty", + "nwm_mem012_i_mem_prty", + "nwm_mem013_i_mem_prty", + "nwm_mem014_i_mem_prty", + "nwm_mem015_i_mem_prty", + "nwm_mem016_i_mem_prty", + "nwm_mem001_i_mem_prty", + "nwm_mem002_i_mem_prty", + "nwm_mem003_i_mem_prty", + "nwm_mem004_i_mem_prty", + "nwm_mem005_i_mem_prty", + "nwm_mem006_i_mem_prty", + "nwm_mem007_i_mem_prty", + "nwm_mem008_i_mem_prty", + "nwm_mem049_i_mem_prty", + "nwm_mem053_i_mem_prty", + "nwm_mem050_i_mem_prty", + "nwm_mem054_i_mem_prty", + "nwm_mem051_i_mem_prty", + "nwm_mem055_i_mem_prty", + "nwm_mem052_i_mem_prty", + "nwm_mem056_i_mem_prty", + "nwm_mem066_i_mem_prty", + "nwm_mem068_i_mem_prty", + "nwm_mem070_i_mem_prty", + "nwm_mem072_i_mem_prty", + "nwm_mem065_i_mem_prty", + "nwm_mem067_i_mem_prty", + "nwm_mem069_i_mem_prty", + "nwm_mem071_i_mem_prty", +}; +#else +#define nwm_prty_attn_desc OSAL_NULL +#endif + +static const u16 nwm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg nwm_prty1_k2 = { + 0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204 +}; + +static const u16 nwm_prty2_k2_attn_idx[31] = { + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, +}; + +static struct attn_hw_reg nwm_prty2_k2 = { + 1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214 +}; + +static const u16 nwm_prty3_k2_attn_idx[10] = { + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, +}; + +static struct attn_hw_reg nwm_prty3_k2 = { + 2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224 +}; + +static struct attn_hw_reg *nwm_prty_k2_regs[3] = { + &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2, +}; + +#ifdef ATTN_DESC +static const char *nws_int_attn_desc[38] = { + "nws_address_error", + "nws_ln0_an_resolve_50g_cr2", + "nws_ln0_an_resolve_50g_kr2", + "nws_ln0_an_resolve_40g_cr4", + "nws_ln0_an_resolve_40g_kr4", + "nws_ln0_an_resolve_25g_gr", + "nws_ln0_an_resolve_25g_cr", + "nws_ln0_an_resolve_25g_kr", + "nws_ln0_an_resolve_10g_kr", + "nws_ln0_an_resolve_1g_kx", + "nws_unused_0", + "nws_ln1_an_resolve_50g_cr2", + "nws_ln1_an_resolve_50g_kr2", + "nws_ln1_an_resolve_40g_cr4", + "nws_ln1_an_resolve_40g_kr4", + "nws_ln1_an_resolve_25g_gr", + "nws_ln1_an_resolve_25g_cr", + "nws_ln1_an_resolve_25g_kr", + "nws_ln1_an_resolve_10g_kr", + "nws_ln1_an_resolve_1g_kx", + "nws_ln2_an_resolve_50g_cr2", + "nws_ln2_an_resolve_50g_kr2", + "nws_ln2_an_resolve_40g_cr4", + "nws_ln2_an_resolve_40g_kr4", + "nws_ln2_an_resolve_25g_gr", + "nws_ln2_an_resolve_25g_cr", + "nws_ln2_an_resolve_25g_kr", + "nws_ln2_an_resolve_10g_kr", + "nws_ln2_an_resolve_1g_kx", + "nws_ln3_an_resolve_50g_cr2", + "nws_ln3_an_resolve_50g_kr2", + "nws_ln3_an_resolve_40g_cr4", + "nws_ln3_an_resolve_40g_kr4", + "nws_ln3_an_resolve_25g_gr", + "nws_ln3_an_resolve_25g_cr", + "nws_ln3_an_resolve_25g_kr", + "nws_ln3_an_resolve_10g_kr", + "nws_ln3_an_resolve_1g_kx", +}; +#else +#define nws_int_attn_desc OSAL_NULL +#endif + +static const u16 nws_int0_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg nws_int0_k2 = { + 0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184 +}; + +static const u16 nws_int1_k2_attn_idx[9] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, +}; + +static struct attn_hw_reg nws_int1_k2 = { + 1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194 +}; + +static const u16 nws_int2_k2_attn_idx[9] = { + 20, 21, 22, 23, 24, 25, 26, 27, 28, +}; + +static struct attn_hw_reg nws_int2_k2 = { + 2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4 +}; + +static const u16 nws_int3_k2_attn_idx[9] = { + 29, 30, 31, 32, 33, 34, 35, 36, 37, +}; + +static struct attn_hw_reg nws_int3_k2 = { + 3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4 +}; + +static struct attn_hw_reg *nws_int_k2_regs[4] = { + &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2, +}; + +#ifdef ATTN_DESC +static const char *nws_prty_attn_desc[4] = { + "nws_mem003_i_mem_prty", + "nws_mem001_i_mem_prty", + "nws_mem004_i_mem_prty", + "nws_mem002_i_mem_prty", +}; +#else +#define nws_prty_attn_desc OSAL_NULL +#endif + +static const u16 nws_prty1_k2_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg nws_prty1_k2 = { + 0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204 +}; + +static struct attn_hw_reg *nws_prty_k2_regs[1] = { + &nws_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ms_int_attn_desc[1] = { + "ms_address_error", +}; +#else +#define ms_int_attn_desc OSAL_NULL +#endif + +static const u16 ms_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ms_int0_k2 = { + 0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184 +}; + +static struct attn_hw_reg *ms_int_k2_regs[1] = { + &ms_int0_k2, +}; + +static struct attn_hw_block attn_blocks[] = { + {"grc", grc_int_attn_desc, grc_prty_attn_desc, { + {1, 1, + grc_int_bb_a0_regs, + grc_prty_bb_a0_regs}, + {1, 1, + grc_int_bb_b0_regs, + grc_prty_bb_b0_regs}, + {1, 1, grc_int_k2_regs, + grc_prty_k2_regs} } }, + {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, { + {2, 0, + + miscs_int_bb_a0_regs, + OSAL_NULL}, + {2, 1, + + miscs_int_bb_b0_regs, + + miscs_prty_bb_b0_regs}, + {1, 1, + + miscs_int_k2_regs, + + miscs_prty_k2_regs } } }, + {"misc", misc_int_attn_desc, OSAL_NULL, { + {1, 0, misc_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, misc_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, misc_int_k2_regs, + OSAL_NULL } } }, + {"dbu", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, { + {1, 1, + + pglue_b_int_bb_a0_regs, + + pglue_b_prty_bb_a0_regs}, + {1, 2, + + pglue_b_int_bb_b0_regs, + + pglue_b_prty_bb_b0_regs}, + {1, 3, + + pglue_b_int_k2_regs, + + pglue_b_prty_k2_regs } } }, + {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, { + {1, 0, + cnig_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + cnig_int_bb_b0_regs, + + cnig_prty_bb_b0_regs}, + {1, 1, + cnig_int_k2_regs, + + cnig_prty_k2_regs } } }, + {"cpmu", cpmu_int_attn_desc, OSAL_NULL, { + {1, 0, cpmu_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, cpmu_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, cpmu_int_k2_regs, + OSAL_NULL } } }, + {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, { + {1, 1, + ncsi_int_bb_a0_regs, + + ncsi_prty_bb_a0_regs}, + {1, 1, + ncsi_int_bb_b0_regs, + + ncsi_prty_bb_b0_regs}, + {1, 1, + ncsi_int_k2_regs, + + ncsi_prty_k2_regs } } }, + {"opte", OSAL_NULL, opte_prty_attn_desc, { + {0, 1, OSAL_NULL, + opte_prty_bb_a0_regs}, + {0, 2, OSAL_NULL, + opte_prty_bb_b0_regs}, + {0, 2, OSAL_NULL, + opte_prty_k2_regs } } }, + {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, { + {12, 2, + bmb_int_bb_a0_regs, + bmb_prty_bb_a0_regs}, + {12, 3, + bmb_int_bb_b0_regs, + bmb_prty_bb_b0_regs}, + {12, 3, bmb_int_k2_regs, + bmb_prty_k2_regs } } }, + {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, { + {0, 1, OSAL_NULL, + + pcie_prty_bb_a0_regs}, + {0, 1, OSAL_NULL, + + pcie_prty_bb_b0_regs}, + {1, 2, + pcie_int_k2_regs, + + pcie_prty_k2_regs } } }, + {"mcp", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, { + {0, 2, OSAL_NULL, + mcp2_prty_bb_a0_regs}, + {0, 2, OSAL_NULL, + mcp2_prty_bb_b0_regs}, + {0, 2, OSAL_NULL, + mcp2_prty_k2_regs } } }, + {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, { + {1, 1, + + pswhst_int_bb_a0_regs, + + pswhst_prty_bb_a0_regs}, + {1, 2, + + pswhst_int_bb_b0_regs, + + pswhst_prty_bb_b0_regs}, + {1, 2, + + pswhst_int_k2_regs, + + pswhst_prty_k2_regs } } }, + {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, { + {1, 0, + + pswhst2_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswhst2_int_bb_b0_regs, + + pswhst2_prty_bb_b0_regs}, + {1, 1, + + pswhst2_int_k2_regs, + + pswhst2_prty_k2_regs } } }, + {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, { + {1, 0, + + pswrd_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswrd_int_bb_b0_regs, + + pswrd_prty_bb_b0_regs}, + {1, 1, + + pswrd_int_k2_regs, + + pswrd_prty_k2_regs } } }, + {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, { + {1, 2, + + pswrd2_int_bb_a0_regs, + + pswrd2_prty_bb_a0_regs}, + {1, 3, + + pswrd2_int_bb_b0_regs, + + pswrd2_prty_bb_b0_regs}, + {1, 3, + + pswrd2_int_k2_regs, + + pswrd2_prty_k2_regs } } }, + {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, { + {1, 0, + + pswwr_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswwr_int_bb_b0_regs, + + pswwr_prty_bb_b0_regs}, + {1, 1, + + pswwr_int_k2_regs, + + pswwr_prty_k2_regs } } }, + {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, { + {1, 4, + + pswwr2_int_bb_a0_regs, + + pswwr2_prty_bb_a0_regs}, + {1, 5, + + pswwr2_int_bb_b0_regs, + + pswwr2_prty_bb_b0_regs}, + {1, 5, + + pswwr2_int_k2_regs, + + pswwr2_prty_k2_regs } } }, + {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, { + {1, 0, + + pswrq_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswrq_int_bb_b0_regs, + + pswrq_prty_bb_b0_regs}, + {1, 1, + + pswrq_int_k2_regs, + + pswrq_prty_k2_regs } } }, + {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, { + {1, 1, + + pswrq2_int_bb_a0_regs, + + pswrq2_prty_bb_a0_regs}, + {1, 1, + + pswrq2_int_bb_b0_regs, + + pswrq2_prty_bb_b0_regs}, + {1, 1, + + pswrq2_int_k2_regs, + + pswrq2_prty_k2_regs } } }, + {"pglcs", pglcs_int_attn_desc, OSAL_NULL, { + {1, 0, pglcs_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, pglcs_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, pglcs_int_k2_regs, + OSAL_NULL } } }, + {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, { + {1, 1, + dmae_int_bb_a0_regs, + + dmae_prty_bb_a0_regs}, + {1, 1, + dmae_int_bb_b0_regs, + + dmae_prty_bb_b0_regs}, + {1, 1, + dmae_int_k2_regs, + + dmae_prty_k2_regs } } }, + {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, { + {1, 1, + ptu_int_bb_a0_regs, + ptu_prty_bb_a0_regs}, + {1, 1, + ptu_int_bb_b0_regs, + ptu_prty_bb_b0_regs}, + {1, 1, ptu_int_k2_regs, + ptu_prty_k2_regs } } }, + {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, { + {3, 2, + tcm_int_bb_a0_regs, + tcm_prty_bb_a0_regs}, + {3, 2, + tcm_int_bb_b0_regs, + tcm_prty_bb_b0_regs}, + {3, 2, tcm_int_k2_regs, + tcm_prty_k2_regs } } }, + {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, { + {3, 2, + mcm_int_bb_a0_regs, + mcm_prty_bb_a0_regs}, + {3, 2, + mcm_int_bb_b0_regs, + mcm_prty_bb_b0_regs}, + {3, 2, mcm_int_k2_regs, + mcm_prty_k2_regs } } }, + {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, { + {3, 2, + ucm_int_bb_a0_regs, + ucm_prty_bb_a0_regs}, + {3, 2, + ucm_int_bb_b0_regs, + ucm_prty_bb_b0_regs}, + {3, 2, ucm_int_k2_regs, + ucm_prty_k2_regs } } }, + {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, { + {3, 2, + xcm_int_bb_a0_regs, + xcm_prty_bb_a0_regs}, + {3, 2, + xcm_int_bb_b0_regs, + xcm_prty_bb_b0_regs}, + {3, 2, xcm_int_k2_regs, + xcm_prty_k2_regs } } }, + {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, { + {3, 2, + ycm_int_bb_a0_regs, + ycm_prty_bb_a0_regs}, + {3, 2, + ycm_int_bb_b0_regs, + ycm_prty_bb_b0_regs}, + {3, 2, ycm_int_k2_regs, + ycm_prty_k2_regs } } }, + {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, { + {3, 1, + pcm_int_bb_a0_regs, + pcm_prty_bb_a0_regs}, + {3, 1, + pcm_int_bb_b0_regs, + pcm_prty_bb_b0_regs}, + {3, 1, pcm_int_k2_regs, + pcm_prty_k2_regs } } }, + {"qm", qm_int_attn_desc, qm_prty_attn_desc, { + {1, 4, qm_int_bb_a0_regs, + qm_prty_bb_a0_regs}, + {1, 4, qm_int_bb_b0_regs, + qm_prty_bb_b0_regs}, + {1, 4, qm_int_k2_regs, + qm_prty_k2_regs } } }, + {"tm", tm_int_attn_desc, tm_prty_attn_desc, { + {2, 1, tm_int_bb_a0_regs, + tm_prty_bb_a0_regs}, + {2, 1, tm_int_bb_b0_regs, + tm_prty_bb_b0_regs}, + {2, 1, tm_int_k2_regs, + tm_prty_k2_regs } } }, + {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, { + {1, 1, + dorq_int_bb_a0_regs, + + dorq_prty_bb_a0_regs}, + {1, 2, + dorq_int_bb_b0_regs, + + dorq_prty_bb_b0_regs}, + {1, 2, + dorq_int_k2_regs, + + dorq_prty_k2_regs } } }, + {"brb", brb_int_attn_desc, brb_prty_attn_desc, { + {12, 2, + brb_int_bb_a0_regs, + brb_prty_bb_a0_regs}, + {12, 3, + brb_int_bb_b0_regs, + brb_prty_bb_b0_regs}, + {12, 3, brb_int_k2_regs, + brb_prty_k2_regs } } }, + {"src", src_int_attn_desc, OSAL_NULL, { + {1, 0, src_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, src_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, src_int_k2_regs, + OSAL_NULL } } }, + {"prs", prs_int_attn_desc, prs_prty_attn_desc, { + {1, 3, + prs_int_bb_a0_regs, + prs_prty_bb_a0_regs}, + {1, 3, + prs_int_bb_b0_regs, + prs_prty_bb_b0_regs}, + {1, 3, prs_int_k2_regs, + prs_prty_k2_regs } } }, + {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, { + {1, 1, + tsdm_int_bb_a0_regs, + + tsdm_prty_bb_a0_regs}, + {1, 1, + tsdm_int_bb_b0_regs, + + tsdm_prty_bb_b0_regs}, + {1, 1, + tsdm_int_k2_regs, + + tsdm_prty_k2_regs } } }, + {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, { + {1, 1, + msdm_int_bb_a0_regs, + + msdm_prty_bb_a0_regs}, + {1, 1, + msdm_int_bb_b0_regs, + + msdm_prty_bb_b0_regs}, + {1, 1, + msdm_int_k2_regs, + + msdm_prty_k2_regs } } }, + {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, { + {1, 1, + usdm_int_bb_a0_regs, + + usdm_prty_bb_a0_regs}, + {1, 1, + usdm_int_bb_b0_regs, + + usdm_prty_bb_b0_regs}, + {1, 1, + usdm_int_k2_regs, + + usdm_prty_k2_regs } } }, + {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, { + {1, 1, + xsdm_int_bb_a0_regs, + + xsdm_prty_bb_a0_regs}, + {1, 1, + xsdm_int_bb_b0_regs, + + xsdm_prty_bb_b0_regs}, + {1, 1, + xsdm_int_k2_regs, + + xsdm_prty_k2_regs } } }, + {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, { + {1, 1, + ysdm_int_bb_a0_regs, + + ysdm_prty_bb_a0_regs}, + {1, 1, + ysdm_int_bb_b0_regs, + + ysdm_prty_bb_b0_regs}, + {1, 1, + ysdm_int_k2_regs, + + ysdm_prty_k2_regs } } }, + {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, { + {1, 1, + psdm_int_bb_a0_regs, + + psdm_prty_bb_a0_regs}, + {1, 1, + psdm_int_bb_b0_regs, + + psdm_prty_bb_b0_regs}, + {1, 1, + psdm_int_k2_regs, + + psdm_prty_k2_regs } } }, + {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, { + {3, 3, + tsem_int_bb_a0_regs, + + tsem_prty_bb_a0_regs}, + {3, 3, + tsem_int_bb_b0_regs, + + tsem_prty_bb_b0_regs}, + {3, 4, + tsem_int_k2_regs, + + tsem_prty_k2_regs } } }, + {"msem", msem_int_attn_desc, msem_prty_attn_desc, { + {3, 2, + msem_int_bb_a0_regs, + + msem_prty_bb_a0_regs}, + {3, 2, + msem_int_bb_b0_regs, + + msem_prty_bb_b0_regs}, + {3, 3, + msem_int_k2_regs, + + msem_prty_k2_regs } } }, + {"usem", usem_int_attn_desc, usem_prty_attn_desc, { + {3, 2, + usem_int_bb_a0_regs, + + usem_prty_bb_a0_regs}, + {3, 2, + usem_int_bb_b0_regs, + + usem_prty_bb_b0_regs}, + {3, 3, + usem_int_k2_regs, + + usem_prty_k2_regs } } }, + {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, { + {3, 2, + xsem_int_bb_a0_regs, + + xsem_prty_bb_a0_regs}, + {3, 2, + xsem_int_bb_b0_regs, + + xsem_prty_bb_b0_regs}, + {3, 3, + xsem_int_k2_regs, + + xsem_prty_k2_regs } } }, + {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, { + {3, 2, + ysem_int_bb_a0_regs, + + ysem_prty_bb_a0_regs}, + {3, 2, + ysem_int_bb_b0_regs, + + ysem_prty_bb_b0_regs}, + {3, 3, + ysem_int_k2_regs, + + ysem_prty_k2_regs } } }, + {"psem", psem_int_attn_desc, psem_prty_attn_desc, { + {3, 3, + psem_int_bb_a0_regs, + + psem_prty_bb_a0_regs}, + {3, 3, + psem_int_bb_b0_regs, + + psem_prty_bb_b0_regs}, + {3, 4, + psem_int_k2_regs, + + psem_prty_k2_regs } } }, + {"rss", rss_int_attn_desc, rss_prty_attn_desc, { + {1, 1, + rss_int_bb_a0_regs, + rss_prty_bb_a0_regs}, + {1, 1, + rss_int_bb_b0_regs, + rss_prty_bb_b0_regs}, + {1, 1, rss_int_k2_regs, + rss_prty_k2_regs } } }, + {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, { + {1, 1, + tmld_int_bb_a0_regs, + + tmld_prty_bb_a0_regs}, + {1, 1, + tmld_int_bb_b0_regs, + + tmld_prty_bb_b0_regs}, + {1, 1, + tmld_int_k2_regs, + + tmld_prty_k2_regs } } }, + {"muld", muld_int_attn_desc, muld_prty_attn_desc, { + {1, 1, + muld_int_bb_a0_regs, + + muld_prty_bb_a0_regs}, + {1, 1, + muld_int_bb_b0_regs, + + muld_prty_bb_b0_regs}, + {1, 1, + muld_int_k2_regs, + + muld_prty_k2_regs } } }, + {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, { + {1, 1, + yuld_int_bb_a0_regs, + + yuld_prty_bb_a0_regs}, + {1, 1, + yuld_int_bb_b0_regs, + + yuld_prty_bb_b0_regs}, + {1, 1, + yuld_int_k2_regs, + + yuld_prty_k2_regs } } }, + {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, { + {1, 1, + xyld_int_bb_a0_regs, + + xyld_prty_bb_a0_regs}, + {1, 1, + xyld_int_bb_b0_regs, + + xyld_prty_bb_b0_regs}, + {1, 1, + xyld_int_k2_regs, + + xyld_prty_k2_regs } } }, + {"prm", prm_int_attn_desc, prm_prty_attn_desc, { + {1, 1, + prm_int_bb_a0_regs, + prm_prty_bb_a0_regs}, + {1, 2, + prm_int_bb_b0_regs, + prm_prty_bb_b0_regs}, + {1, 2, prm_int_k2_regs, + prm_prty_k2_regs } } }, + {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, { + {1, 0, + + pbf_pb1_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pbf_pb1_int_bb_b0_regs, + + pbf_pb1_prty_bb_b0_regs}, + {1, 1, + + pbf_pb1_int_k2_regs, + + pbf_pb1_prty_k2_regs } } }, + {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, { + {1, 0, + + pbf_pb2_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pbf_pb2_int_bb_b0_regs, + + pbf_pb2_prty_bb_b0_regs}, + {1, 1, + + pbf_pb2_int_k2_regs, + + pbf_pb2_prty_k2_regs } } }, + {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, { + {1, 0, + rpb_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + rpb_int_bb_b0_regs, + rpb_prty_bb_b0_regs}, + {1, 1, rpb_int_k2_regs, + rpb_prty_k2_regs } } }, + {"btb", btb_int_attn_desc, btb_prty_attn_desc, { + {11, 1, + btb_int_bb_a0_regs, + btb_prty_bb_a0_regs}, + {11, 2, + btb_int_bb_b0_regs, + btb_prty_bb_b0_regs}, + {11, 2, btb_int_k2_regs, + btb_prty_k2_regs } } }, + {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, { + {1, 2, + pbf_int_bb_a0_regs, + pbf_prty_bb_a0_regs}, + {1, 3, + pbf_int_bb_b0_regs, + pbf_prty_bb_b0_regs}, + {1, 3, pbf_int_k2_regs, + pbf_prty_k2_regs } } }, + {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, { + {1, 0, + rdif_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + rdif_int_bb_b0_regs, + + rdif_prty_bb_b0_regs}, + {1, 1, + rdif_int_k2_regs, + + rdif_prty_k2_regs } } }, + {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, { + {1, 1, + tdif_int_bb_a0_regs, + + tdif_prty_bb_a0_regs}, + {1, 2, + tdif_int_bb_b0_regs, + + tdif_prty_bb_b0_regs}, + {1, 2, + tdif_int_k2_regs, + + tdif_prty_k2_regs } } }, + {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, { + {1, 1, + cdu_int_bb_a0_regs, + cdu_prty_bb_a0_regs}, + {1, 1, + cdu_int_bb_b0_regs, + cdu_prty_bb_b0_regs}, + {1, 1, cdu_int_k2_regs, + cdu_prty_k2_regs } } }, + {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, { + {1, 2, + ccfc_int_bb_a0_regs, + + ccfc_prty_bb_a0_regs}, + {1, 2, + ccfc_int_bb_b0_regs, + + ccfc_prty_bb_b0_regs}, + {1, 2, + ccfc_int_k2_regs, + + ccfc_prty_k2_regs } } }, + {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, { + {1, 2, + tcfc_int_bb_a0_regs, + + tcfc_prty_bb_a0_regs}, + {1, 2, + tcfc_int_bb_b0_regs, + + tcfc_prty_bb_b0_regs}, + {1, 2, + tcfc_int_k2_regs, + + tcfc_prty_k2_regs } } }, + {"igu", igu_int_attn_desc, igu_prty_attn_desc, { + {1, 3, + igu_int_bb_a0_regs, + igu_prty_bb_a0_regs}, + {1, 3, + igu_int_bb_b0_regs, + igu_prty_bb_b0_regs}, + {1, 2, igu_int_k2_regs, + igu_prty_k2_regs } } }, + {"cau", cau_int_attn_desc, cau_prty_attn_desc, { + {1, 1, + cau_int_bb_a0_regs, + cau_prty_bb_a0_regs}, + {1, 1, + cau_int_bb_b0_regs, + cau_prty_bb_b0_regs}, + {1, 1, cau_int_k2_regs, + cau_prty_k2_regs } } }, + {"umac", umac_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, umac_int_k2_regs, + OSAL_NULL } } }, + {"xmac", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, { + {1, 1, + dbg_int_bb_a0_regs, + dbg_prty_bb_a0_regs}, + {1, 1, + dbg_int_bb_b0_regs, + dbg_prty_bb_b0_regs}, + {1, 1, dbg_int_k2_regs, + dbg_prty_k2_regs } } }, + {"nig", nig_int_attn_desc, nig_prty_attn_desc, { + {6, 4, + nig_int_bb_a0_regs, + nig_prty_bb_a0_regs}, + {6, 5, + nig_int_bb_b0_regs, + nig_prty_bb_b0_regs}, + {10, 5, nig_int_k2_regs, + nig_prty_k2_regs } } }, + {"wol", wol_int_attn_desc, wol_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {1, 1, wol_int_k2_regs, + wol_prty_k2_regs } } }, + {"bmbn", bmbn_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, bmbn_int_k2_regs, + OSAL_NULL } } }, + {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, { + {1, 1, + ipc_int_bb_a0_regs, + ipc_prty_bb_a0_regs}, + {1, 1, + ipc_int_bb_b0_regs, + ipc_prty_bb_b0_regs}, + {1, 0, ipc_int_k2_regs, + OSAL_NULL } } }, + {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {1, 3, nwm_int_k2_regs, + nwm_prty_k2_regs } } }, + {"nws", nws_int_attn_desc, nws_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {4, 1, nws_int_k2_regs, + nws_prty_k2_regs } } }, + {"ms", ms_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, ms_int_k2_regs, + OSAL_NULL } } }, + {"phy_pcie", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"misc_aeu", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"bar0_map", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, +}; + +#define NUM_INT_REGS 423 +#define NUM_PRTY_REGS 378 + +#endif /* __PREVENT_INT_ATTN__ */ + +#endif /* __ATTN_VALUES_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h new file mode 100644 index 000000000..c69920be5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_chain.h @@ -0,0 +1,842 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_CHAIN_H__ +#define __ECORE_CHAIN_H__ + +#include /* @DPDK */ + +#include "common_hsi.h" +#include "ecore_utils.h" + +enum ecore_chain_mode { + /* Each Page contains a next pointer at its end */ + ECORE_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + ECORE_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + ECORE_CHAIN_MODE_PBL, +}; + +enum ecore_chain_use_mode { + ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +enum ecore_chain_cnt_type { + /* The chain's size/prod/cons are kept in 16-bit variables */ + ECORE_CHAIN_CNT_TYPE_U16, + + /* The chain's size/prod/cons are kept in 32-bit variables */ + ECORE_CHAIN_CNT_TYPE_U32, +}; + +struct ecore_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct ecore_chain_pbl_u16 { + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct ecore_chain_pbl_u32 { + u32 prod_page_idx; + u32 cons_page_idx; +}; + +struct ecore_chain_ext_pbl { + dma_addr_t p_pbl_phys; + void *p_pbl_virt; +}; + +struct ecore_chain_u16 { + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; +}; + +struct ecore_chain_u32 { + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; +}; + +struct ecore_chain { + /* fastpath portion of the chain - required for commands such + * as produce / consume. + */ + /* Point to next element to produce/consume */ + void *p_prod_elem; + void *p_cons_elem; + + /* Fastpath portions of the PBL [if exists] */ + + struct { + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; + + union { + struct ecore_chain_pbl_u16 pbl_u16; + struct ecore_chain_pbl_u32 pbl_u32; + } c; + } pbl; + + union { + struct ecore_chain_u16 chain16; + struct ecore_chain_u32 chain32; + } u; + + /* Capacity counts only usable elements */ + u32 capacity; + u32 page_cnt; + + /* A u8 would suffice for mode, but it would save as a lot of headaches + * on castings & defaults. + */ + enum ecore_chain_mode mode; + + /* Elements information for fast calculations */ + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; + + u8 cnt_type; + + /* Slowpath of the chain - required for initialization and destruction, + * but isn't involved in regular functionality. + */ + + /* Base address of a pre-allocated buffer for pbl */ + struct { + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; + + /* Address of first page of the chain - the address is required + * for fastpath operation [consume/produce] but only for the SINGLE + * flavour which isn't considered fastpath [== SPQ]. + */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + + /* Total number of elements [for entire chain] */ + u32 size; + + u8 intended_use; + + /* TBD - do we really need this? Couldn't find usage for it */ + bool b_external_pbl; + + void *dp_ctx; +}; + +#define ECORE_CHAIN_PBL_ENTRY_SIZE (8) +#define ECORE_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32) + +/* Accessors */ +static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u16(p_chain)); + return p_chain->u.chain16.prod_idx; +} + +static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u32(p_chain)); + return p_chain->u.chain32.prod_idx; +} + +static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u16(p_chain)); + return p_chain->u.chain16.cons_idx; +} + +static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u32(p_chain)); + return p_chain->u.chain32.cons_idx; +} + +/* FIXME: + * Should create OSALs for the below definitions. + * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle + * kernel versions that lack them. + */ +#define ECORE_U16_MAX ((u16)~0U) +#define ECORE_U32_MAX ((u32)~0U) + +static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain) +{ + u16 used; + + OSAL_ASSERT(is_chain_u16(p_chain)); + + used = (u16)(((u32)ECORE_U16_MAX + 1 + + (u32)(p_chain->u.chain16.prod_idx)) - + (u32)p_chain->u.chain16.cons_idx); + if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - + p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + + return (u16)(p_chain->capacity - used); +} + +static OSAL_INLINE u32 +ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain) +{ + u32 used; + + OSAL_ASSERT(is_chain_u32(p_chain)); + + used = (u32)(((u64)ECORE_U32_MAX + 1 + + (u64)(p_chain->u.chain32.prod_idx)) - + (u64)p_chain->u.chain32.cons_idx); + if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - + p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + + return p_chain->capacity - used; +} + +static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + return (ecore_chain_get_elem_left(p_chain) == + p_chain->capacity); + else + return (ecore_chain_get_elem_left_u32(p_chain) == + p_chain->capacity); +} + +static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + return (ecore_chain_get_elem_left(p_chain) == 0); + else + return (ecore_chain_get_elem_left_u32(p_chain) == 0); +} + +static OSAL_INLINE +u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain) +{ + return p_chain->elem_per_page; +} + +static OSAL_INLINE +u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static OSAL_INLINE +u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain) +{ + return p_chain->size; +} + +static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain) +{ + return p_chain->page_cnt; +} + +static OSAL_INLINE +dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain) +{ + return p_chain->pbl_sp.p_phys_table; +} + +/** + * @brief ecore_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static OSAL_INLINE void +ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem, + void *idx_to_inc, void *page_to_inc) +{ + struct ecore_chain_next *p_next = OSAL_NULL; + u32 page_index = 0; + + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + p_next = (struct ecore_chain_next *)(*p_next_elem); + *p_next_elem = p_next->next_virt; + if (is_chain_u16(p_chain)) + *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable; + else + *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable; + break; + case ECORE_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + case ECORE_CHAIN_MODE_PBL: + if (is_chain_u16(p_chain)) { + if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) + *(u16 *)page_to_inc = 0; + page_index = *(u16 *)page_to_inc; + } else { + if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) + *(u32 *)page_to_inc = 0; + page_index = *(u32 *)page_to_inc; + } + *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_idx_u32(p, idx) \ + (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx(p, idx) \ + ((((p)->u.chain16.idx + 1) & \ + (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx_u32(p, idx) \ + ((((p)->u.chain32.idx + 1) & \ + (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define test_and_skip(p, idx) \ + do { \ + if (is_chain_u16(p)) { \ + if (is_unusable_idx(p, idx)) \ + (p)->u.chain16.idx += \ + (p)->elem_unusable; \ + } else { \ + if (is_unusable_idx_u32(p, idx)) \ + (p)->u.chain32.idx += \ + (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief ecore_chain_return_multi_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + * @param num + */ +static OSAL_INLINE +void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx += (u16)num; + else + p_chain->u.chain32.cons_idx += num; + test_and_skip(p_chain, cons_idx); +} + +/** + * @brief ecore_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx++; + else + p_chain->u.chain32.cons_idx++; + test_and_skip(p_chain, cons_idx); +} + +/** + * @brief ecore_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain) +{ + void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain16.prod_idx; + p_prod_page_idx = &p_chain->pbl.c.pbl_u16.prod_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain16.prod_idx++; + } else { + if ((p_chain->u.chain32.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain32.prod_idx; + p_prod_page_idx = &p_chain->pbl.c.pbl_u32.prod_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain32.prod_idx++; + } + + p_ret = p_chain->p_prod_elem; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief ecore_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return number of unusable BDs + */ +static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief ecore_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static OSAL_INLINE +void ecore_chain_recycle_consumed(struct ecore_chain *p_chain) +{ + test_and_skip(p_chain, prod_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx++; + else + p_chain->u.chain32.prod_idx++; +} + +/** + * @brief ecore_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain) +{ + void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain16.cons_idx; + p_cons_page_idx = &p_chain->pbl.c.pbl_u16.cons_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain16.cons_idx++; + } else { + if ((p_chain->u.chain32.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain32.cons_idx; + p_cons_page_idx = &p_chain->pbl.c.pbl_u32.cons_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain32.cons_idx++; + } + + p_ret = p_chain->p_cons_elem; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief ecore_chain_reset - + * + * Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) +{ + u32 i; + + if (is_chain_u16(p_chain)) { + p_chain->u.chain16.prod_idx = 0; + p_chain->u.chain16.cons_idx = 0; + } else { + p_chain->u.chain32.prod_idx = 0; + p_chain->u.chain32.cons_idx = 0; + } + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "page_cnt-1" as a reset value for the prod/cons page's + * indices, to avoid unnecessary page advancing on the first + * call to ecore_chain_produce/consume. Instead, the indices + * will be advanced to page_cnt and then will be wrapped to 0. + */ + u32 reset_val = p_chain->page_cnt - 1; + + if (is_chain_u16(p_chain)) { + p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)reset_val; + p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)reset_val; + } else { + p_chain->pbl.c.pbl_u32.prod_page_idx = reset_val; + p_chain->pbl.c.pbl_u32.cons_page_idx = reset_val; + } + } + + switch (p_chain->intended_use) { + case ECORE_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + ecore_chain_recycle_consumed(p_chain); + break; + + case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE: + case ECORE_CHAIN_USE_TO_PRODUCE: + default: + /* Do nothing */ + break; + } +} + +/** + * @brief ecore_chain_init_params - + * + * Initalizes a basic chain struct + * + * @param p_chain + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + * @param cnt_type + * @param dp_ctx + */ +static OSAL_INLINE void +ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, void *dp_ctx) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = OSAL_NULL; + p_chain->p_phys_addr = 0; + p_chain->elem_size = elem_size; + p_chain->intended_use = (u8)intended_use; + p_chain->mode = mode; + p_chain->cnt_type = (u8)cnt_type; + + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->b_external_pbl = false; + p_chain->pbl_sp.p_phys_table = 0; + p_chain->pbl_sp.p_virt_table = OSAL_NULL; + p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; + + p_chain->dp_ctx = dp_ctx; +} + +/** + * @brief ecore_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * + */ +static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr) +{ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} + +/** + * @brief ecore_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + void **pp_virt_addr_tbl) +{ + p_chain->pbl_sp.p_phys_table = p_phys_pbl; + p_chain->pbl_sp.p_virt_table = p_virt_pbl; + p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; +} + +/** + * @brief ecore_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page + * + */ +static OSAL_INLINE void +ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) +{ + struct ecore_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; +} + +/** + * @brief ecore_chain_get_last_elem - + * + * Returns a pointer to the last element of the chain + * + * @param p_chain + * + * @return void* + */ +static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain) +{ + struct ecore_chain_next *p_next = OSAL_NULL; + void *p_virt_addr = OSAL_NULL; + u32 size, last_page_idx; + + if (!p_chain->p_virt_addr) + goto out; + + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + size = p_chain->elem_size * p_chain->usable_per_page; + p_virt_addr = p_chain->p_virt_addr; + p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size); + while (p_next->next_virt != p_chain->p_virt_addr) { + p_virt_addr = p_next->next_virt; + p_next = + (struct ecore_chain_next *)((u8 *)p_virt_addr + + size); + } + break; + case ECORE_CHAIN_MODE_SINGLE: + p_virt_addr = p_chain->p_virt_addr; + break; + case ECORE_CHAIN_MODE_PBL: + last_page_idx = p_chain->page_cnt - 1; + p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + break; + } + /* p_virt_addr points at this stage to the last page of the chain */ + size = p_chain->elem_size * (p_chain->usable_per_page - 1); + p_virt_addr = ((u8 *)p_virt_addr + size); +out: + return p_virt_addr; +} + +/** + * @brief ecore_chain_set_prod - sets the prod to the given value + * + * @param prod_idx + * @param p_prod_elem + */ +static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, + u32 prod_idx, void *p_prod_elem) +{ + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + u32 cur_prod, page_mask, page_cnt, page_diff; + + cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx + : p_chain->u.chain32.prod_idx; + + /* Assume that number of elements in a page is power of 2 */ + page_mask = ~p_chain->elem_per_page_mask; + + /* Use "cur_prod - 1" and "prod_idx - 1" since producer index + * reaches the first element of next page before the page index + * is incremented. See ecore_chain_produce(). + * Index wrap around is not a problem because the difference + * between current and given producer indexes is always + * positive and lower than the chain's capacity. + */ + page_diff = (((cur_prod - 1) & page_mask) - + ((prod_idx - 1) & page_mask)) / + p_chain->elem_per_page; + + page_cnt = ecore_chain_get_page_cnt(p_chain); + if (is_chain_u16(p_chain)) + p_chain->pbl.c.pbl_u16.prod_page_idx = + (p_chain->pbl.c.pbl_u16.prod_page_idx - + page_diff + page_cnt) % page_cnt; + else + p_chain->pbl.c.pbl_u32.prod_page_idx = + (p_chain->pbl.c.pbl_u32.prod_page_idx - + page_diff + page_cnt) % page_cnt; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx = (u16)prod_idx; + else + p_chain->u.chain32.prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief ecore_chain_set_cons - sets the cons to the given value + * + * @param cons_idx + * @param p_cons_elem + */ +static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain, + u32 cons_idx, void *p_cons_elem) +{ + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + u32 cur_cons, page_mask, page_cnt, page_diff; + + cur_cons = is_chain_u16(p_chain) ? p_chain->u.chain16.cons_idx + : p_chain->u.chain32.cons_idx; + + /* Assume that number of elements in a page is power of 2 */ + page_mask = ~p_chain->elem_per_page_mask; + + /* Use "cur_cons - 1" and "cons_idx - 1" since consumer index + * reaches the first element of next page before the page index + * is incremented. See ecore_chain_consume(). + * Index wrap around is not a problem because the difference + * between current and given consumer indexes is always + * positive and lower than the chain's capacity. + */ + page_diff = (((cur_cons - 1) & page_mask) - + ((cons_idx - 1) & page_mask)) / + p_chain->elem_per_page; + + page_cnt = ecore_chain_get_page_cnt(p_chain); + if (is_chain_u16(p_chain)) + p_chain->pbl.c.pbl_u16.cons_page_idx = + (p_chain->pbl.c.pbl_u16.cons_page_idx - + page_diff + page_cnt) % page_cnt; + else + p_chain->pbl.c.pbl_u32.cons_page_idx = + (p_chain->pbl.c.pbl_u32.cons_page_idx - + page_diff + page_cnt) % page_cnt; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx = (u16)cons_idx; + else + p_chain->u.chain32.cons_idx = cons_idx; + + p_chain->p_cons_elem = p_cons_elem; +} + +/** + * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 + * + * @param p_chain + */ +static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain) +{ + u32 i, page_cnt; + + if (p_chain->mode != ECORE_CHAIN_MODE_PBL) + return; + + page_cnt = ecore_chain_get_page_cnt(p_chain); + + for (i = 0; i < page_cnt; i++) + OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i], + ECORE_CHAIN_PAGE_SIZE); +} + +int ecore_chain_print(struct ecore_chain *p_chain, char *buffer, + u32 buffer_size, u32 *element_indx, u32 stop_indx, + bool print_metadata, + int (*func_ptr_print_element)(struct ecore_chain *p_chain, + void *p_element, + char *buffer), + int (*func_ptr_print_metadata)(struct ecore_chain + *p_chain, + char *buffer)); + +#endif /* __ECORE_CHAIN_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c new file mode 100644 index 000000000..773b75ecd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.c @@ -0,0 +1,2308 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "common_hsi.h" +#include "ecore_hsi_common.h" +#include "ecore_hsi_eth.h" +#include "ecore_rt_defs.h" +#include "ecore_status.h" +#include "ecore.h" +#include "ecore_init_ops.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_hw.h" +#include "ecore_dev_api.h" +#include "ecore_sriov.h" +#include "ecore_mcp.h" + +/* Max number of connection types in HW (DQ/CDU etc.) */ +#define MAX_CONN_TYPES PROTOCOLID_COMMON +#define NUM_TASK_TYPES 2 +#define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 + +/* Doorbell-Queue constants */ +#define DQ_RANGE_SHIFT 4 +#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT) + +/* Searcher constants */ +#define SRC_MIN_NUM_ELEMS 256 + +/* GFS constants */ +#define RGFS_MIN_NUM_ELEMS 256 +#define TGFS_MIN_NUM_ELEMS 256 + +/* Timers constants */ +#define TM_SHIFT 7 +#define TM_ALIGN (1 << TM_SHIFT) +#define TM_ELEM_SIZE 4 + +/* ILT constants */ +#define ILT_DEFAULT_HW_P_SIZE 4 + +#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) +#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET + +/* ILT entry structure */ +#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_SHIFT 0 +#define ILT_ENTRY_VALID_MASK 0x1ULL +#define ILT_ENTRY_VALID_SHIFT 52 +#define ILT_ENTRY_IN_REGS 2 +#define ILT_REG_SIZE_IN_BYTES 4 + +/* connection context union */ +union conn_context { + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; +}; + +/* TYPE-0 task context - iSCSI, FCOE */ +union type0_task_context { +}; + +/* TYPE-1 task context - ROCE */ +union type1_task_context { + struct regpair reserved; /* @DPDK */ +}; + +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) + +#define CONN_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) + +#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */ + +#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) + +/* Alignment is inherent to the type1_task_context structure */ +#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) + +/* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct ecore_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + +struct ecore_conn_type_cfg { + u32 cid_count; + u32 cids_per_vf; + struct ecore_tid_seg tid_seg[TASK_SEGMENTS]; +}; + +/* ILT Client configuration, + * Per connection type (protocol) resources (cids, tis, vf cids etc.) + * 1 - for connection context (CDUC) and for each task context we need two + * values, for regular task context and for force load memory + */ +#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) +#define CDUC_BLK (0) +#define SRQ_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS) + +struct ilt_cfg_pair { + u32 reg; + u32 val; +}; + +struct ecore_ilt_cli_blk { + u32 total_size; /* 0 means not active */ + u32 real_size_in_page; + u32 start_line; + u32 dynamic_line_offset; + u32 dynamic_line_cnt; +}; + +struct ecore_ilt_client_cfg { + bool active; + + /* ILT boundaries */ + struct ilt_cfg_pair first; + struct ilt_cfg_pair last; + struct ilt_cfg_pair p_size; + + /* ILT client blocks for PF */ + struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; + u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; +}; + +#define MAP_WORD_SIZE sizeof(unsigned long) +#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8) + +struct ecore_cid_acquired_map { + u32 start_cid; + u32 max_count; + unsigned long *cid_map; +}; + +struct ecore_src_t2 { + struct phys_mem_desc *dma_mem; + u32 num_pages; + u64 first_free; + u64 last_free; +}; + +struct ecore_cxt_mngr { + /* Per protocl configuration */ + struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; + + /* computed ILT structure */ + struct ecore_ilt_client_cfg clients[ILT_CLI_MAX]; + + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + + /* Acquired CIDs */ + struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES]; + struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES]; + + /* ILT shadow table */ + struct phys_mem_desc *ilt_shadow; + u32 pf_start_line; + + /* Mutex for a dynamic ILT allocation */ + osal_mutex_t mutex; + + /* SRC T2 */ + struct ecore_src_t2 src_t2; + + /* The infrastructure originally was very generic and context/task + * oriented - per connection-type we would set how many of those + * are needed, and later when determining how much memory we're + * needing for a given block we'd iterate over all the relevant + * connection-types. + * But since then we've had some additional resources, some of which + * require memory which is indepent of the general context/task + * scheme. We add those here explicitly per-feature. + */ + + /* total number of SRQ's for this hwfn */ + u32 srq_count; + + /* Maximal number of L2 steering filters */ + u32 arfs_count; + + /* TODO - VF arfs filters ? */ +}; + +static OSAL_INLINE bool tm_cid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_TOE; +} + +static bool tm_tid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_FCOE; +} + +/* counts the iids for the CDU/CDUC ILT client configuration */ +struct ecore_cdu_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr, + struct ecore_cdu_iids *iids) +{ + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->pf_cids += p_mngr->conn_cfg[type].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } +} + +/* counts the iids for the Searcher block configuration */ +struct ecore_src_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr, + struct ecore_src_iids *iids) +{ + u32 i; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + iids->pf_cids += p_mngr->conn_cfg[i].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; + } + + /* Add L2 filtering filters in addition */ + iids->pf_cids += p_mngr->arfs_count; +} + +/* counts the iids for the Timers block configuration */ +struct ecore_tm_iids { + u32 pf_cids; + u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ + u32 pf_tids_total; + u32 per_vf_cids; + u32 per_vf_tids; +}; + +static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn, + struct ecore_cxt_mngr *p_mngr, + struct ecore_tm_iids *iids) +{ + struct ecore_conn_type_cfg *p_cfg; + bool tm_vf_required = false; + bool tm_required = false; + u32 i, j; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + p_cfg = &p_mngr->conn_cfg[i]; + + if (tm_cid_proto(i) || tm_required) { + if (p_cfg->cid_count) + tm_required = true; + + iids->pf_cids += p_cfg->cid_count; + } + + if (tm_cid_proto(i) || tm_vf_required) { + if (p_cfg->cids_per_vf) + tm_vf_required = true; + + } + + if (tm_tid_proto(i)) { + struct ecore_tid_seg *segs = p_cfg->tid_seg; + + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->pf_tids[j] += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + } + + iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN); + iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN); + iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN); + + for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { + iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN); + iids->pf_tids_total += iids->pf_tids[j]; + } +} + +static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, + struct ecore_qm_iids *iids) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_tid_seg *segs; + u32 vf_cids = 0, type, j; + u32 vf_tids = 0; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->cids += p_mngr->conn_cfg[type].cid_count; + vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + + segs = p_mngr->conn_cfg[type].tid_seg; + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->tids += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + + iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->tids += vf_tids * p_mngr->vf_count; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", + iids->cids, iids->vf_cids, iids->tids, vf_tids); +} + +static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn, + u32 seg) +{ + struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; + u32 i; + + /* Find the protocol with tid count > 0 for this segment. + * Note: there can only be one and this is already validated. + */ + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (p_cfg->conn_cfg[i].tid_seg[seg].count) + return &p_cfg->conn_cfg[i].tid_seg[seg]; + } + return OSAL_NULL; +} + +static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs) +{ + struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + p_mgr->srq_count = num_srqs; +} + +u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + return p_mgr->srq_count; +} + +/* set the iids (cid/tid) count per protocol */ +static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 cid_count, u32 vf_cid_cnt) +{ + struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; + + p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN); + p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN); +} + +u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid) +{ + if (vf_cid) + *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; + + return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; +} + +u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn, + enum protocol_type type) +{ + return p_hwfn->p_cxt_mngr->acquired[type].start_cid; +} + +u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type) +{ + u32 cnt = 0; + int i; + + for (i = 0; i < TASK_SEGMENTS; i++) + cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; + + return cnt; +} + +static OSAL_INLINE void +ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, u8 seg_type, u32 count, bool has_fl) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + p_seg->count = count; + p_seg->has_fl_mem = has_fl; + p_seg->type = seg_type; +} + +/* the *p_line parameter must be either 0 for the first invocation or the + * value returned in the previous invocation. + */ +static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli, + struct ecore_ilt_cli_blk *p_blk, + u32 start_line, + u32 total_size, u32 elem_size) +{ + u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + /* verify that it's called once for each block */ + if (p_blk->total_size) + return; + + p_blk->total_size = total_size; + p_blk->real_size_in_page = 0; + if (elem_size) + p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; + p_blk->start_line = start_line; +} + +static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn, + struct ecore_ilt_client_cfg *p_cli, + struct ecore_ilt_cli_blk *p_blk, + u32 *p_line, enum ilt_clients client_id) +{ + if (!p_blk->total_size) + return; + + if (!p_cli->active) + p_cli->first.val = *p_line; + + p_cli->active = true; + *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); + p_cli->last.val = *p_line - 1; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x" + " [Real %08x] Start line %d\n", + client_id, p_cli->first.val, p_cli->last.val, + p_blk->total_size, p_blk->real_size_in_page, + p_blk->start_line); +} + +static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn, + enum ilt_clients ilt_client, + u32 *dynamic_line_offset, + u32 *dynamic_line_cnt) +{ + struct ecore_ilt_client_cfg *p_cli; + struct ecore_conn_type_cfg *p_cfg; + u32 cxts_per_p; + + /* TBD MK: ILT code should be simplified once PROTO enum is changed */ + + *dynamic_line_offset = 0; + *dynamic_line_cnt = 0; + + if (ilt_client == ILT_CLI_CDUC) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE]; + + cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / + (u32)CONN_CXT_SIZE(p_hwfn); + + *dynamic_line_cnt = p_cfg->cid_count / cxts_per_p; + } +} + +static struct ecore_ilt_client_cfg * +ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli) +{ + p_cli->active = false; + p_cli->first.val = 0; + p_cli->last.val = 0; + return p_cli; +} + +static struct ecore_ilt_cli_blk * +ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk) +{ + p_blk->total_size = 0; + return p_blk; + } + +static u32 +ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr) +{ + struct ecore_src_iids src_iids; + u32 elem_num = 0; + + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + ecore_cxt_src_iids(p_mngr, &src_iids); + + /* Both the PF and VFs searcher connections are stored in the per PF + * database. Thus sum the PF searcher cids and all the VFs searcher + * cids. + */ + elem_num = src_iids.pf_cids + + src_iids.per_vf_cids * p_mngr->vf_count; + if (elem_num == 0) + return elem_num; + + elem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS); + elem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num); + + return elem_num; +} + +enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn) +{ + u32 curr_line, total, i, task_size, line, total_size, elem_size; + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_blk; + struct ecore_cdu_iids cdu_iids; + struct ecore_qm_iids qm_iids; + struct ecore_tm_iids tm_iids; + struct ecore_tid_seg *p_seg; + + OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids)); + OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids)); + OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); + + p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "hwfn [%d] - Set context mngr starting line to be 0x%08x\n", + p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); + + /* CDUC */ + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); + + curr_line = p_mngr->pf_start_line; + + /* CDUC PF */ + p_cli->pf_total_lines = 0; + + /* get the counters for the CDUC,CDUC and QM clients */ + ecore_cxt_cdu_iids(p_mngr, &cdu_iids); + + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); + + total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC, + &p_blk->dynamic_line_offset, + &p_blk->dynamic_line_cnt); + + /* CDUC VF */ + p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); + total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->vf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUC); + + /* CDUT PF */ + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); + p_cli->first.val = curr_line; + + /* first the 'working' task memory */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + + /* next the 'init' task memory (forced load memory) */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = + ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); + + if (!p_seg->has_fl_mem) { + /* The segment is active (total size pf 'working' + * memory is > 0) but has no FL (forced-load, Init) + * memory. Thus: + * + * 1. The total-size in the corrsponding FL block of + * the ILT client is set to 0 - No ILT line are + * provisioned and no ILT memory allocated. + * + * 2. The start-line of said block is set to the + * start line of the matching working memory + * block in the ILT client. This is later used to + * configure the CDU segment offset registers and + * results in an FL command for TIDs of this + * segment behaves as regular load commands + * (loading TIDs from the working memory). + */ + line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; + + ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + continue; + } + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->pf_total_lines = curr_line - p_cli->first.val; + + /* CDUT VF */ + p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); + if (p_seg && p_seg->count) { + /* Stricly speaking we need to iterate over all VF + * task segment types, but a VF has only 1 segment + */ + + /* 'working' memory */ + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* 'init' memory */ + p_blk = + ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); + if (!p_seg->has_fl_mem) { + /* see comment above */ + line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; + ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + } else { + task_size = p_mngr->task_type_size[p_seg->type]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, task_size); + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->vf_total_lines = curr_line - (p_cli->first.val + + p_cli->pf_total_lines); + + /* Now for the rest of the VFs */ + for (i = 1; i < p_mngr->vf_count; i++) { + /* don't set p_blk i.e. don't clear total_size */ + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* don't set p_blk i.e. don't clear total_size */ + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + } + + /* QM */ + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); + + /* At this stage, after the first QM configuration, the PF PQs amount + * is the highest possible. Save this value at qm_info->ilt_pf_pqs to + * detect overflows in the future. + * Even though VF PQs amount can be larger than VF count, use vf_count + * because each VF requires only the full amount of CIDs. + */ + ecore_cxt_qm_iids(p_hwfn, &qm_iids); + total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids, + qm_iids.vf_cids, qm_iids.tids, + p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE, + p_hwfn->qm_info.num_vf_pqs); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d," + " num_vf_pqs=%d, memory_size=%d)\n", + qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, + p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, + QM_PQ_ELEMENT_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + /* TM PF */ + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); + ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, + TM_ELEM_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM VF */ + total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; + if (total) { + p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]); + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + + p_cli->vf_total_lines = curr_line - p_blk->start_line; + for (i = 1; i < p_mngr->vf_count; i++) { + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + } + } + + /* SRC */ + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); + total = ecore_cxt_src_elements(p_mngr); + + if (total) { + total_size = total * sizeof(struct src_ent); + elem_size = sizeof(struct src_ent); + + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]); + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total_size, elem_size); + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_SRC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TSDM (SRQ CONTEXT) */ + total = ecore_cxt_get_srq_count(p_hwfn); + + if (total) { + p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); + p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TSDM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > + RESC_NUM(p_hwfn, ECORE_ILT)) { + DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", + curr_line - p_hwfn->p_cxt_mngr->pf_start_line); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2; + u32 i; + + if (!p_t2 || !p_t2->dma_mem) + return; + + for (i = 0; i < p_t2->num_pages; i++) + if (p_t2->dma_mem[i].virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_t2->dma_mem[i].virt_addr, + p_t2->dma_mem[i].phys_addr, + p_t2->dma_mem[i].size); + + OSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem); + p_t2->dma_mem = OSAL_NULL; +} + +static enum _ecore_status_t +ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn, + struct ecore_src_t2 *p_t2, + u32 total_size, u32 page_size) +{ + void **p_virt; + u32 size, i; + + if (!p_t2 || !p_t2->dma_mem) + return ECORE_INVAL; + + for (i = 0; i < p_t2->num_pages; i++) { + size = OSAL_MIN_T(u32, total_size, page_size); + p_virt = &p_t2->dma_mem[i].virt_addr; + + *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_t2->dma_mem[i].phys_addr, + size); + if (!p_t2->dma_mem[i].virt_addr) + return ECORE_NOMEM; + + OSAL_MEM_ZERO(*p_virt, size); + p_t2->dma_mem[i].size = size; + total_size -= size; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_num, total_size, ent_per_page, psz, i; + struct phys_mem_desc *p_t2_last_page; + struct ecore_ilt_client_cfg *p_src; + struct ecore_src_iids src_iids; + struct ecore_src_t2 *p_t2; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + + /* if the SRC ILT client is inactive - there are no connection + * requiring the searcer, leave. + */ + p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; + if (!p_src->active) + return ECORE_SUCCESS; + + ecore_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + total_size = conn_num * sizeof(struct src_ent); + + /* use the same page size as the SRC ILT client */ + psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); + p_t2 = &p_mngr->src_t2; + p_t2->num_pages = DIV_ROUND_UP(total_size, psz); + + /* allocate t2 */ + p_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + p_t2->num_pages * + sizeof(struct phys_mem_desc)); + if (!p_t2->dma_mem) { + DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n"); + rc = ECORE_NOMEM; + goto t2_fail; + } + + rc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz); + if (rc) + goto t2_fail; + + /* Set the t2 pointers */ + + /* entries per page - must be a power of two */ + ent_per_page = psz / sizeof(struct src_ent); + + p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr; + + p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page]; + p_t2->last_free = (u64)p_t2_last_page->phys_addr + + ((conn_num - 1) & (ent_per_page - 1)) * + sizeof(struct src_ent); + + for (i = 0; i < p_t2->num_pages; i++) { + u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num); + struct src_ent *entries = p_t2->dma_mem[i].virt_addr; + u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val; + u32 j; + + for (j = 0; j < ent_num - 1; j++) { + val = p_ent_phys + (j + 1) * sizeof(struct src_ent); + entries[j].next = OSAL_CPU_TO_BE64(val); + } + + if (i < p_t2->num_pages - 1) + val = (u64)p_t2->dma_mem[i + 1].phys_addr; + else + val = 0; + entries[j].next = OSAL_CPU_TO_BE64(val); + + conn_num -= ent_num; + } + + return ECORE_SUCCESS; + +t2_fail: + ecore_cxt_src_t2_free(p_hwfn); + return rc; +} + +#define for_each_ilt_valid_client(pos, clients) \ + for (pos = 0; pos < ILT_CLI_MAX; pos++) \ + if (!clients[pos].active) { \ + continue; \ + } else \ + + +/* Total number of ILT lines used by this PF */ +static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients) +{ + u32 size = 0; + u32 i; + + for_each_ilt_valid_client(i, ilt_clients) + size += (ilt_clients[i].last.val - + ilt_clients[i].first.val + 1); + + return size; +} + +static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 ilt_size, i; + + if (p_mngr->ilt_shadow == OSAL_NULL) + return; + + ilt_size = ecore_cxt_ilt_shadow_size(p_cli); + + for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { + struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i]; + + if (p_dma->virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_dma->p_virt, + p_dma->phys_addr, p_dma->size); + p_dma->virt_addr = OSAL_NULL; + } + OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); + p_mngr->ilt_shadow = OSAL_NULL; +} + +static enum _ecore_status_t +ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ilt_cli_blk *p_blk, + enum ilt_clients ilt_client, u32 start_line_offset) +{ + struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 lines, line, sz_left, lines_to_skip, first_skipped_line; + + /* Special handling for RoCE that supports dynamic allocation */ + if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM) + return ECORE_SUCCESS; + + if (!p_blk->total_size) + return ECORE_SUCCESS; + + sz_left = p_blk->total_size; + lines_to_skip = p_blk->dynamic_line_cnt; + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; + line = p_blk->start_line + start_line_offset - + p_hwfn->p_cxt_mngr->pf_start_line; + first_skipped_line = line + p_blk->dynamic_line_offset; + + while (lines) { + dma_addr_t p_phys; + void *p_virt; + u32 size; + + if (lines_to_skip && (line == first_skipped_line)) { + line += lines_to_skip; + continue; + } + + size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page); + +/* @DPDK */ +#define ILT_BLOCK_ALIGN_SIZE 0x1000 + p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev, + &p_phys, size, + ILT_BLOCK_ALIGN_SIZE); + if (!p_virt) + return ECORE_NOMEM; + OSAL_MEM_ZERO(p_virt, size); + + ilt_shadow[line].phys_addr = p_phys; + ilt_shadow[line].virt_addr = p_virt; + ilt_shadow[line].size = size; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "ILT shadow: Line [%d] Physical 0x%lx" + " Virtual %p Size %d\n", + line, (unsigned long)p_phys, p_virt, size); + + sz_left -= size; + line++; + lines--; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_ilt_client_cfg *clients = p_mngr->clients; + struct ecore_ilt_cli_blk *p_blk; + u32 size, i, j, k; + enum _ecore_status_t rc; + + size = ecore_cxt_ilt_shadow_size(clients); + p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + size * sizeof(struct phys_mem_desc)); + + if (!p_mngr->ilt_shadow) { + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n"); + rc = ECORE_NOMEM; + goto ilt_shadow_fail; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "Allocated 0x%x bytes for ilt shadow\n", + (u32)(size * sizeof(struct phys_mem_desc))); + + for_each_ilt_valid_client(i, clients) { + for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { + p_blk = &clients[i].pf_blks[j]; + rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0); + if (rc != ECORE_SUCCESS) + goto ilt_shadow_fail; + } + for (k = 0; k < p_mngr->vf_count; k++) { + for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { + u32 lines = clients[i].vf_total_lines * k; + + p_blk = &clients[i].vf_blks[j]; + rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, + i, lines); + if (rc != ECORE_SUCCESS) + goto ilt_shadow_fail; + } + } + } + + return ECORE_SUCCESS; + +ilt_shadow_fail: + ecore_ilt_shadow_free(p_hwfn); + return rc; +} + +static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) +{ + u32 type, vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].cid_map = OSAL_NULL; + p_mngr->acquired[type].max_count = 0; + p_mngr->acquired[type].start_cid = 0; + + for (vf = 0; vf < max_num_vfs; vf++) { + OSAL_FREE(p_hwfn->p_dev, + p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL; + p_mngr->acquired_vf[type][vf].max_count = 0; + p_mngr->acquired_vf[type][vf].start_cid = 0; + } + } +} + +static enum _ecore_status_t +__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, + u32 cid_start, u32 cid_count, + struct ecore_cid_acquired_map *p_map) +{ + u32 size; + + if (!cid_count) + return ECORE_SUCCESS; + + size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD); + p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); + if (p_map->cid_map == OSAL_NULL) + return ECORE_NOMEM; + + p_map->max_count = cid_count; + p_map->start_cid = cid_start; + + DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, + "Type %08x start: %08x count %08x\n", + type, p_map->start_cid, p_map->max_count); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid, + u32 vf_start_cid) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + struct ecore_cid_acquired_map *p_map; + struct ecore_conn_type_cfg *p_cfg; + enum _ecore_status_t rc; + + p_cfg = &p_mngr->conn_cfg[type]; + + /* Handle PF maps */ + p_map = &p_mngr->acquired[type]; + rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid, + p_cfg->cid_count, p_map); + if (rc != ECORE_SUCCESS) + return rc; + + /* Handle VF maps */ + for (vf = 0; vf < max_num_vfs; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + rc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid, + p_cfg->cids_per_vf, p_map); + if (rc != ECORE_SUCCESS) + return rc; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 start_cid = 0, vf_start_cid = 0; + u32 type; + enum _ecore_status_t rc; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid, + vf_start_cid); + if (rc != ECORE_SUCCESS) + goto cid_map_fail; + + start_cid += p_mngr->conn_cfg[type].cid_count; + vf_start_cid += p_mngr->conn_cfg[type].cids_per_vf; + } + + return ECORE_SUCCESS; + +cid_map_fail: + ecore_cid_map_free(p_hwfn); + return rc; +} + +enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cid_acquired_map *acquired_vf; + struct ecore_ilt_client_cfg *clients; + struct ecore_cxt_mngr *p_mngr; + u32 i, max_num_vfs; + + p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); + if (!p_mngr) { + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n"); + return ECORE_NOMEM; + } + + /* Initialize ILT client registers */ + clients = p_mngr->clients; + clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); + clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); + clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); + + clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); + clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); + clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); + + clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); + clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); + clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); + + clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); + clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); + clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); + + /* default ILT page size for all clients is 64K */ + for (i = 0; i < ILT_CLI_MAX; i++) + p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + + /* due to removal of ISCSI/FCoE files union type0_task_context + * task_type_size will be 0. So hardcoded for now. + */ + p_mngr->task_type_size[0] = 512; /* @DPDK */ + p_mngr->task_type_size[1] = 128; /* @DPDK */ + + if (p_hwfn->p_dev->p_iov_info) + p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs; + + /* Initialize the dynamic ILT allocation mutex */ +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) { + DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n"); + return ECORE_NOMEM; + } +#endif + OSAL_MUTEX_INIT(&p_mngr->mutex); + + /* Set the cxt mangr pointer prior to further allocations */ + p_hwfn->p_cxt_mngr = p_mngr; + + max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + for (i = 0; i < MAX_CONN_TYPES; i++) { + acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL, + max_num_vfs, sizeof(*acquired_vf)); + if (!acquired_vf) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate an array of `struct ecore_cid_acquired_map'\n"); + return ECORE_NOMEM; + } + + p_mngr->acquired_vf[i] = acquired_vf; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc; + + /* Allocate the ILT shadow table */ + rc = ecore_ilt_shadow_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n"); + goto tables_alloc_fail; + } + + /* Allocate the T2 table */ + rc = ecore_cxt_src_t2_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n"); + goto tables_alloc_fail; + } + + /* Allocate and initialize the acquired cids bitmaps */ + rc = ecore_cid_map_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n"); + goto tables_alloc_fail; + } + + return ECORE_SUCCESS; + +tables_alloc_fail: + ecore_cxt_mngr_free(p_hwfn); + return rc; +} + +void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn) +{ + u32 i; + + if (!p_hwfn->p_cxt_mngr) + return; + + ecore_cid_map_free(p_hwfn); + ecore_cxt_src_t2_free(p_hwfn); + ecore_ilt_shadow_free(p_hwfn); +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex); +#endif + for (i = 0; i < MAX_CONN_TYPES; i++) + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr); + + p_hwfn->p_cxt_mngr = OSAL_NULL; +} + +void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 len, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + struct ecore_cid_acquired_map *p_map; + struct ecore_conn_type_cfg *p_cfg; + int type; + + /* Reset acquired cids */ + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 vf; + + p_cfg = &p_mngr->conn_cfg[type]; + if (p_cfg->cid_count) { + p_map = &p_mngr->acquired[type]; + len = DIV_ROUND_UP(p_map->max_count, + BITS_PER_MAP_WORD) * + MAP_WORD_SIZE; + OSAL_MEM_ZERO(p_map->cid_map, len); + } + + if (!p_cfg->cids_per_vf) + continue; + + for (vf = 0; vf < max_num_vfs; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + len = DIV_ROUND_UP(p_map->max_count, + BITS_PER_MAP_WORD) * + MAP_WORD_SIZE; + OSAL_MEM_ZERO(p_map->cid_map, len); + } + } +} + +/* HW initialization helper (per Block, per phase) */ + +/* CDU Common */ +#define CDUC_CXT_SIZE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT + +#define CDUC_CXT_SIZE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) + +#define CDUC_BLOCK_WASTE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT + +#define CDUC_BLOCK_WASTE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) + +#define CDUC_NCIB_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT + +#define CDUC_NCIB_MASK \ + (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) + +#define CDUT_TYPE0_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT + +#define CDUT_TYPE0_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ + CDUT_TYPE0_CXT_SIZE_SHIFT) + +#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE0_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ + CDUT_TYPE0_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE0_NCIB_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE0_NCIB_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE0_NCIB_SHIFT) + +#define CDUT_TYPE1_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT + +#define CDUT_TYPE1_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ + CDUT_TYPE1_CXT_SIZE_SHIFT) + +#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE1_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ + CDUT_TYPE1_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE1_NCIB_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE1_NCIB_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE1_NCIB_SHIFT) + +static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn) +{ + u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; + + /* CDUC - connection configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + cxt_size = CONN_CXT_SIZE(p_hwfn); + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); + SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); + SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-0 tasks configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-1 tasks configuration */ + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); +} + +/* CDU PF */ +#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT +#define CDU_SEG_REG_TYPE_MASK 0x1 +#define CDU_SEG_REG_OFFSET_SHIFT 0 +#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK + +static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli; + struct ecore_tid_seg *p_seg; + u32 cdu_seg_params, offset; + int i; + + static const u32 rt_type_offset_arr[] = { + CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + static const u32 rt_type_offset_fl_arr[] = { + CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + + /* There are initializations only for CDUT during pf Phase */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + /* Segment 0 */ + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg) + continue; + + /* Note: start_line is already adjusted for the CDU + * segment register granularity, so we just need to + * divide. Adjustment is implicit as we assume ILT + * Page size is larger than 32K! + */ + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); + + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); + } +} + +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + struct ecore_qm_iids iids; + + OSAL_MEM_ZERO(&iids, sizeof(iids)); + ecore_cxt_qm_iids(p_hwfn, &iids); + ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + qm_info->max_phys_tcs_per_port, + is_pf_loading, + iids.cids, iids.vf_cids, iids.tids, + qm_info->start_pq, + qm_info->num_pqs - qm_info->num_vf_pqs, + qm_info->num_vf_pqs, + qm_info->start_vport, + qm_info->num_vports, qm_info->pf_wfq, + qm_info->pf_rl, + p_hwfn->qm_info.qm_pq_params, + p_hwfn->qm_info.qm_vport_params); +} + +/* CM PF */ +static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn) +{ + STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, + ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); +} + +/* DQ PF */ +static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; + + dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); + + /* Connection types 6 & 7 are not in use, yet they must be configured + * as the highest possible connection. Not configuring them means the + * defaults will be used, and with a large number of cids a bug may + * occur, if the defaults will be smaller than dq_pf_max_cid / + * dq_vf_max_cid. + */ + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); + + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); +} + +static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *ilt_clients; + int i; + + ilt_clients = p_hwfn->p_cxt_mngr->clients; + for_each_ilt_valid_client(i, ilt_clients) { + STORE_RT_REG(p_hwfn, + ilt_clients[i].first.reg, + ilt_clients[i].first.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].last.reg, ilt_clients[i].last.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].p_size.reg, + ilt_clients[i].p_size.val); + } +} + +static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli; + u32 blk_factor; + + /* For simplicty we set the 'block' to be an ILT page */ + if (p_hwfn->p_dev->p_iov_info) { + struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; + + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_BASE_RT_OFFSET, + p_iov->first_vf_in_pf); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, + p_iov->first_vf_in_pf + p_iov->total_vfs); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } +} + +/* ILT (PSWRQ2) PF */ +static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *clients; + struct ecore_cxt_mngr *p_mngr; + struct phys_mem_desc *p_shdw; + u32 line, rt_offst, i; + + ecore_ilt_bounds_init(p_hwfn); + ecore_ilt_vf_bounds_init(p_hwfn); + + p_mngr = p_hwfn->p_cxt_mngr; + p_shdw = p_mngr->ilt_shadow; + clients = p_hwfn->p_cxt_mngr->clients; + + for_each_ilt_valid_client(i, clients) { + /* Client's 1st val and RT array are absolute, ILT shadows' + * lines are relative. + */ + line = clients[i].first.val - p_mngr->pf_start_line; + rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + + clients[i].first.val * ILT_ENTRY_IN_REGS; + + for (; line <= clients[i].last.val - p_mngr->pf_start_line; + line++, rt_offst += ILT_ENTRY_IN_REGS) { + u64 ilt_hw_entry = 0; + + /** p_virt could be OSAL_NULL incase of dynamic + * allocation + */ + if (p_shdw[line].virt_addr != OSAL_NULL) { + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_shdw[line].phys_addr >> 12)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "Setting RT[0x%08x] from" + " ILT[0x%08x] [Client is %d] to" + " Physical addr: 0x%lx\n", + rt_offst, line, i, + (unsigned long)(p_shdw[line]. + phys_addr >> 12)); + } + + STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); + } + } +} + +/* SRC (Searcher) PF */ +static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rounded_conn_num, conn_num, conn_max; + struct ecore_src_iids src_iids; + + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + ecore_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (!conn_num) + return; + + conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS); + rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max); + + STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); + STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, + OSAL_LOG2(rounded_conn_num)); + + STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->src_t2.first_free); + STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->src_t2.last_free); + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "Configured SEARCHER for 0x%08x connections\n", + conn_num); +} + +/* Timers PF */ +#define TM_CFG_NUM_IDS_SHIFT 0 +#define TM_CFG_NUM_IDS_MASK 0xFFFFULL +#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 +#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL +#define TM_CFG_PARENT_PF_SHIFT 25 +#define TM_CFG_PARENT_PF_MASK 0x7ULL + +#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 +#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL + +#define TM_CFG_TID_OFFSET_SHIFT 30 +#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL +#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 +#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL + +static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 active_seg_mask = 0, tm_offset, rt_reg; + struct ecore_tm_iids tm_iids; + u64 cfg_word; + u8 i; + + OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); + ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + + /* enable scan */ + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, + tm_iids.pf_cids ? 0x1 : 0x0); + + /* @@@TBD how to enable the scan for the VFs */ + + tm_offset = tm_iids.per_vf_cids; + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + tm_offset = tm_iids.pf_cids; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->p_dev) + + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + + tm_offset += tm_iids.pf_tids[i]; + } + + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); + + /* @@@TBD how to enable the scan for the VFs */ +} + +static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_conn_type_cfg *p_fcoe; + struct ecore_tid_seg *p_tid; + + p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; + + /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ + if (!p_fcoe->cid_count) + return; + + p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG]; + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, + p_tid->count); +} + +void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) +{ + /* CDU configuration */ + ecore_cdu_init_common(p_hwfn); +} + +void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + ecore_qm_init_pf(p_hwfn, p_ptt, true); + ecore_cm_init_pf(p_hwfn); + ecore_dq_init_pf(p_hwfn); + ecore_cdu_init_pf(p_hwfn); + ecore_ilt_init_pf(p_hwfn); + ecore_src_init_pf(p_hwfn); + ecore_tm_init_pf(p_hwfn); + ecore_prs_init_pf(p_hwfn); +} + +enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid, u8 vfid) +{ + u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_cid_acquired_map *p_map; + + if (type >= MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type); + return ECORE_INVAL; + } + + if (vfid >= max_num_vfs && vfid != ECORE_CXT_PF_CID) { + DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid); + return ECORE_INVAL; + } + + /* Determine the right map to take this CID from */ + if (vfid == ECORE_CXT_PF_CID) + p_map = &p_mngr->acquired[type]; + else + p_map = &p_mngr->acquired_vf[type][vfid]; + + if (p_map->cid_map == OSAL_NULL) { + DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type); + return ECORE_INVAL; + } + + rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map, + p_map->max_count); + + if (rel_cid >= p_map->max_count) { + DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n", + type); + return ECORE_NORESOURCES; + } + + OSAL_SET_BIT(rel_cid, p_map->cid_map); + + *p_cid = rel_cid + p_map->start_cid; + + DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, + "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", + *p_cid, rel_cid, vfid, type); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid) +{ + return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID); +} + +static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn, + u32 cid, u8 vfid, + enum protocol_type *p_type, + struct ecore_cid_acquired_map **pp_map) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rel_cid; + + /* Iterate over protocols and find matching cid range */ + for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { + if (vfid == ECORE_CXT_PF_CID) + *pp_map = &p_mngr->acquired[*p_type]; + else + *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; + + if (!((*pp_map)->cid_map)) + continue; + if (cid >= (*pp_map)->start_cid && + cid < (*pp_map)->start_cid + (*pp_map)->max_count) { + break; + } + } + if (*p_type == MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid); + goto fail; + } + + rel_cid = cid - (*pp_map)->start_cid; + if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) { + DP_NOTICE(p_hwfn, true, + "CID %d [vifd %02x] not acquired", cid, vfid); + goto fail; + } + + return true; +fail: + *p_type = MAX_CONN_TYPES; + *pp_map = OSAL_NULL; + return false; +} + +void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid) +{ + u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev); + struct ecore_cid_acquired_map *p_map = OSAL_NULL; + enum protocol_type type; + bool b_acquired; + + if (vfid != ECORE_CXT_PF_CID && vfid > max_num_vfs) { + DP_NOTICE(p_hwfn, true, + "Trying to return incorrect CID belonging to VF %02x\n", + vfid); + return; + } + + /* Test acquired and find matching per-protocol map */ + b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid, + &type, &p_map); + + if (!b_acquired) + return; + + rel_cid = cid - p_map->start_cid; + OSAL_CLEAR_BIT(rel_cid, p_map->cid_map); + + DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, + "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", + cid, rel_cid, vfid, type); +} + +void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid) +{ + _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID); +} + +enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn, + struct ecore_cxt_info *p_info) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_cid_acquired_map *p_map = OSAL_NULL; + u32 conn_cxt_size, hw_p_size, cxts_per_p, line; + enum protocol_type type; + bool b_acquired; + + /* Test acquired and find matching per-protocol map */ + b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, + ECORE_CXT_PF_CID, + &type, &p_map); + + if (!b_acquired) + return ECORE_INVAL; + + /* set the protocl type */ + p_info->type = type; + + /* compute context virtual pointer */ + hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + + conn_cxt_size = CONN_CXT_SIZE(p_hwfn); + cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; + line = p_info->iid / cxts_per_p; + + /* Make sure context is allocated (dynamic allocation) */ + if (!p_mngr->ilt_shadow[line].virt_addr) + return ECORE_INVAL; + + p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr + + p_info->iid % cxts_per_p * conn_cxt_size; + + DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT), + "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", + (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn) +{ + /* Set the number of required CORE connections */ + u32 core_cids = 1; /* SPQ */ + + ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + { + u32 count = 0; + + struct ecore_eth_pf_params *p_params = + &p_hwfn->pf_params.eth_pf_params; + + if (!p_params->num_vf_cons) + p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT; + ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); + + count = p_params->num_arfs_filters; + + if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, + &p_hwfn->p_dev->mf_bits)) + p_hwfn->p_cxt_mngr->arfs_count = count; + + break; + } + default: + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +enum _ecore_status_t +ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn, + enum ecore_cxt_elem_type elem_type, + u32 iid) +{ + u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_blk; + struct ecore_ptt *p_ptt; + dma_addr_t p_phys; + u64 ilt_hw_entry; + void *p_virt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + switch (elem_type) { + case ECORE_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case ECORE_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case ECORE_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, false, + "ECORE_INVALID elem type = %d", elem_type); + return ECORE_INVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + line = p_blk->start_line + (iid / elems_per_p); + shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; + + /* If line is already allocated, do nothing, otherwise allocate it and + * write it to the PSWRQ2 registers. + * This section can be run in parallel from different contexts and thus + * a mutex protection is needed. + */ + + OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex); + + if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr) + goto out0; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, false, + "ECORE_TIME_OUT on ptt acquire - dynamic allocation"); + rc = ECORE_TIMEOUT; + goto out0; + } + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_phys, + p_blk->real_size_in_page); + if (!p_virt) { + rc = ECORE_NOMEM; + goto out1; + } + OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page); + + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = + p_blk->real_size_in_page; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); + + ilt_hw_entry = 0; + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, + ILT_ENTRY_PHY_ADDR, + (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12)); + +/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ + + ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry, + reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), + OSAL_NULL /* default parameters */); + +out1: + ecore_ptt_release(p_hwfn, p_ptt); +out0: + OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex); + + return rc; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +static enum _ecore_status_t +ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn, + enum ecore_cxt_elem_type elem_type, + u32 start_iid, u32 count) +{ + u32 start_line, end_line, shadow_start_line, shadow_end_line; + u32 reg_offset, elem_size, hw_p_size, elems_per_p; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_blk; + u32 end_iid = start_iid + count; + struct ecore_ptt *p_ptt; + u64 ilt_hw_entry = 0; + u32 i; + + switch (elem_type) { + case ECORE_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case ECORE_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case ECORE_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, false, + "ECORE_INVALID elem type = %d", elem_type); + return ECORE_INVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + start_line = p_blk->start_line + (start_iid / elems_per_p); + end_line = p_blk->start_line + (end_iid / elems_per_p); + if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) + end_line--; + + shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; + shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, false, + "ECORE_TIME_OUT on ptt acquire - dynamic allocation"); + return ECORE_TIMEOUT; + } + + for (i = shadow_start_line; i < shadow_end_line; i++) { + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr) + continue; + + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr, + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr, + p_hwfn->p_cxt_mngr->ilt_shadow[i].size); + + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + ((start_line++) * ILT_REG_SIZE_IN_BYTES * + ILT_ENTRY_IN_REGS); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a + * wide-bus. + */ + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&ilt_hw_entry, + reg_offset, + sizeof(ilt_hw_entry) / sizeof(u32), + OSAL_NULL /* default parameters */); + } + + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h new file mode 100644 index 000000000..55f08027d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _ECORE_CID_ +#define _ECORE_CID_ + +#include "ecore_hsi_common.h" +#include "ecore_proto_if.h" +#include "ecore_cxt_api.h" + +/* Tasks segments definitions */ +#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */ +#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */ +#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */ + +enum ecore_cxt_elem_type { + ECORE_ELEM_CXT, + ECORE_ELEM_SRQ, + ECORE_ELEM_TASK +}; + +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + ILT_CLI_MAX +}; + +u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *vf_cid); + +u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type); + +u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn, + enum protocol_type type); +u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_free + * + * @param p_hwfn + */ +void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired + * map + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs + * + * @param p_hwfn + */ +void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per + * path. + * + * @param p_hwfn + */ +void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path + * + * @param p_hwfn + * @param p_ptt + * @param is_pf_loading + */ +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading); + + /** + * @brief Reconfigures QM pf on the fly + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +#define ECORE_CXT_PF_CID (0xff) + +/** + * @brief ecore_cxt_release - Release a cid + * + * @param p_hwfn + * @param cid + */ +void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid); + +/** + * @brief ecore_cxt_release - Release a cid belonging to a vf-queue + * + * @param p_hwfn + * @param cid + * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF + */ +void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, + u32 cid, u8 vfid); + +/** + * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type + * + * @param p_hwfn + * @param type + * @param p_cid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid); + +/** + * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type + * for a vf-queue + * + * @param p_hwfn + * @param type + * @param p_cid + * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid, u8 vfid); + +/** + * @brief ecore_cxt_get_tid_mem_info - function checks if the + * page containing the iid in the ilt is already + * allocated, if it is not it allocates the page. + * + * @param p_hwfn + * @param elem_type + * @param iid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn, + enum ecore_cxt_elem_type elem_type, + u32 iid); + +/** + * @brief ecore_cxt_free_proto_ilt - function frees ilt pages + * associated with the protocol passed. + * + * @param p_hwfn + * @param proto + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn, + enum protocol_type proto); + +#define ECORE_CTX_WORKING_MEM 0 +#define ECORE_CTX_FL_MEM 1 + +#endif /* _ECORE_CID_ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h new file mode 100644 index 000000000..6c8b2831c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_cxt_api.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_CXT_API_H__ +#define __ECORE_CXT_API_H__ + +struct ecore_hwfn; + +struct ecore_cxt_info { + void *p_cxt; + u32 iid; + enum protocol_type type; +}; + +#define MAX_TID_BLOCKS 512 +struct ecore_tid_mem { + u32 tid_size; + u32 num_tids_per_block; + u32 waste; + u8 *blocks[MAX_TID_BLOCKS]; /* 4K */ +}; + +/** +* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid +* +* +* @param p_hwfn +* @param p_info in/out +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn, + struct ecore_cxt_info *p_info); + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c new file mode 100644 index 000000000..ccd4383bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.c @@ -0,0 +1,1607 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_sp_commands.h" +#include "ecore_dcbx.h" +#include "ecore_cxt.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "ecore_iov_api.h" + +#define ECORE_DCBX_MAX_MIB_READ_TRY (100) +#define ECORE_ETH_TYPE_DEFAULT (0) + +#define ECORE_DCBX_INVALID_PRIORITY 0xFF + +/* Get Traffic Class from priority traffic class table, 4 bits represent + * the traffic class corresponding to the priority. + */ +#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \ + ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) + +static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap) +{ + return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_ETHTYPE); +} + +static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return ecore_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + +static bool ecore_dcbx_app_port(u32 app_info_bitmap) +{ + return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_PORT); +} + +static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) +{ + u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return ecore_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); +} + +static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool ethtype; + + if (ieee) + ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = ecore_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT)); +} + +static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap, + u16 proto_id, bool ieee) +{ + bool port; + + if (!p_hwfn->p_dcbx_info->iwarp_port) + return false; + + if (ieee) + port = ecore_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = ecore_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port)); +} + +static void +ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_results *p_data) +{ + enum dcbx_protocol_type id; + int i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n", + p_data->dcbx_enabled); + + for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) { + id = ecore_dcbx_app_update[i].id; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "%s info: update %d, enable %d, prio %d, tc %d," + " num_active_tc %d dscp_enable = %d dscp_val = %d\n", + ecore_dcbx_app_update[i].name, + p_data->arr[id].update, + p_data->arr[id].enable, p_data->arr[id].priority, + p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc, + p_data->arr[id].dscp_enable, + p_data->arr[id].dscp_val); + } +} + +u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri) +{ + struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp; + u8 i; + + if (!dscp->enabled) + return ECORE_DCBX_DSCP_DISABLED; + + for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++) + if (pri == dscp->dscp_pri_map[i]) + return i; + + return ECORE_DCBX_DSCP_DISABLED; +} + +static void +ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, + struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type, + enum ecore_pci_personality personality) +{ + /* PF update ramrod data */ + p_data->arr[type].enable = enable; + p_data->arr[type].priority = prio; + p_data->arr[type].tc = tc; + p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio); + if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) { + p_data->arr[type].dscp_enable = false; + p_data->arr[type].dscp_val = 0; + } else { + p_data->arr[type].dscp_enable = true; + } + p_data->arr[type].update = UPDATE_DCB_DSCP; + + /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + p_data->arr[type].dont_add_vlan0 = true; + + /* QM reconf data */ + if (p_hwfn->hw_info.personality == personality) + p_hwfn->hw_info.offload_tc = tc; + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) && + (type == DCBX_PROTOCOL_ROCE)) { + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP, prio << 1); + } +} + +/* Update app protocol data and hw_info fields with the TLV info */ +static void +ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data, + struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) +{ + enum ecore_pci_personality personality; + enum dcbx_protocol_type id; + int i; + + for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) { + id = ecore_dcbx_app_update[i].id; + + if (type != id) + continue; + + personality = ecore_dcbx_app_update[i].personality; + + ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, + prio, tc, type, personality); + } +} + +static enum _ecore_status_t +ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority) +{ + u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES; + u32 index = ECORE_MAX_PFC_PRIORITIES - 1; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Bitmap 1 corresponds to priority 0, return priority 0 */ + if (pri_bitmap == 1) { + *priority = 0; + return rc; + } + + /* Choose the highest priority */ + while ((pri == ECORE_MAX_PFC_PRIORITIES) && index) { + pri_mask = 1 << index; + if (pri_bitmap & pri_mask) + pri = index; + index--; + } + + if (pri < ECORE_MAX_PFC_PRIORITIES) + *priority = (u8)pri; + else + rc = ECORE_INVAL; + + return rc; +} + +static bool +ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn, + u32 app_prio_bitmap, u16 id, + enum dcbx_protocol_type *type, bool ieee) +{ + if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_ETH; + } else { + *type = DCBX_MAX_PROTOCOL_TYPE; + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); + return false; + } + + return true; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static enum _ecore_status_t +ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_dcbx_results *p_data, + struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, + int count, u8 dcbx_version) +{ + enum dcbx_protocol_type type; + bool enable, ieee, eth_tlv; + u8 tc, priority_map; + u16 protocol_id; + u8 priority; + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n", + count, pri_tc_tbl, dcbx_version); + + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); + eth_tlv = false; + /* Parse APP TLV */ + for (i = 0; i < count; i++) { + protocol_id = GET_MFW_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + priority_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n", + protocol_id, priority_map); + rc = ecore_dcbx_get_app_priority(priority_map, &priority); + if (rc == ECORE_INVAL) { + DP_ERR(p_hwfn, "Invalid priority\n"); + return ECORE_INVAL; + } + + tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority); + if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + protocol_id, &type, + ieee)) { + /* ETH always have the enable bit reset, as it gets + * vlan information per packet. For other protocols, + * should be set according to the dcbx_enabled + * indication, but we only got here if there was an + * app tlv for the protocol, so dcbx must be enabled. + */ + if (type == DCBX_PROTOCOL_ETH) { + enable = false; + eth_tlv = true; + } else { + enable = true; + } + + ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, + enable, priority, tc, type); + } + } + + /* If Eth TLV is not detected, use UFP TC as default TC */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, + &p_hwfn->p_dev->mf_bits) && !eth_tlv) + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; + + /* Update ramrod protocol data and hw_info fields + * with default info when corresponding APP TLV's are not detected. + * The enabled field has a different logic for ethernet as only for + * ethernet dcb should disabled by default, as the information arrives + * from the OS (unless an explicit app tlv was present). + */ + tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; + priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; + for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { + if (p_data->arr[type].update) + continue; + + /* if no app tlv was present, don't override in FW */ + ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, + p_data->arr[DCBX_PROTOCOL_ETH].enable, + priority, tc, type); + } + + return ECORE_SUCCESS; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static enum _ecore_status_t +ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct ecore_dcbx_results data; + struct dcbx_ets_feature *p_ets; + struct ecore_hw_info *p_info; + u32 pri_tc_tbl, flags; + u8 dcbx_version; + int num_entries; + enum _ecore_status_t rc = ECORE_SUCCESS; + + flags = p_hwfn->p_dcbx_info->operational.flags; + dcbx_version = GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION); + + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pri_tc_tbl = p_ets->pri_tc_tbl[0]; + + p_info = &p_hwfn->hw_info; + num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); + + OSAL_MEMSET(&data, 0, sizeof(struct ecore_dcbx_results)); + rc = ecore_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, + num_entries, dcbx_version); + if (rc != ECORE_SUCCESS) + return rc; + + p_info->num_active_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + p_hwfn->qm_info.ooo_tc = GET_MFW_FIELD(p_ets->flags, DCBX_OOO_TC); + data.pf_id = p_hwfn->rel_pf_id; + data.dcbx_enabled = !!dcbx_version; + + ecore_dcbx_dp_protocol(p_hwfn, &data); + + OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data, + sizeof(struct ecore_dcbx_results)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_mib_meta_data *p_data, + enum ecore_mib_read_type type) +{ + u32 prefix_seq_num, suffix_seq_num; + int read_count = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* The data is considered to be valid only if both sequence numbers are + * the same. + */ + do { + if (type == ECORE_DCBX_REMOTE_LLDP_MIB) { + ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_remote->prefix_seq_num; + suffix_seq_num = p_data->lldp_remote->suffix_seq_num; + } else if (type == ECORE_DCBX_LLDP_TLVS) { + ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num; + suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num; + + } else { + ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib, + p_data->addr, p_data->size); + prefix_seq_num = p_data->mib->prefix_seq_num; + suffix_seq_num = p_data->mib->suffix_seq_num; + } + read_count++; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "mib type = %d, try count = %d prefix seq num =" + " %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + } while ((prefix_seq_num != suffix_seq_num) && + (read_count < ECORE_DCBX_MAX_MIB_READ_TRY)); + + if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) { + DP_ERR(p_hwfn, + "MIB read err, mib type = %d, try count =" + " %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + rc = ECORE_IO; + } + + return rc; +} + +static void +ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_app_prio *p_prio, + struct ecore_dcbx_results *p_results) +{ + u8 val; + + if (p_results->arr[DCBX_PROTOCOL_ETH].update && + p_results->arr[DCBX_PROTOCOL_ETH].enable) + p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "Priorities: eth %d\n", + p_prio->eth); +} + +static void +ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct ecore_dcbx_params *p_params, bool ieee) +{ + struct ecore_app_entry *entry; + u8 pri_map; + int i; + + p_params->app_willing = GET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING); + p_params->app_valid = GET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED); + p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR); + p_params->num_app_entries = GET_MFW_FIELD(p_app->flags, + DCBX_APP_NUM_ENTRIES); + for (i = 0; i < p_params->num_app_entries; i++) { + entry = &p_params->app_entry[i]; + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = GET_MFW_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = GET_MFW_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + ECORE_DCBX_SF_IEEE_TCP_UDP_PORT : + ECORE_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = ECORE_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = + ECORE_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(GET_MFW_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + + pri_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); + ecore_dcbx_get_app_priority(pri_map, &entry->prio); + entry->proto_id = GET_MFW_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + entry->proto_id, + &entry->proto_type, ieee); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "APP params: willing %d, valid %d error = %d\n", + p_params->app_willing, p_params->app_valid, + p_params->app_error); +} + +static void +ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn, + u32 pfc, struct ecore_dcbx_params *p_params) +{ + u8 pfc_map; + + p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING); + p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS); + p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED); + pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP); + p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0); + p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1); + p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2); + p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3); + p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4); + p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5); + p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6); + p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n", + p_params->pfc.willing, pfc_map, p_params->pfc.max_tc, + p_params->pfc.enabled); +} + +static void +ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct ecore_dcbx_params *p_params) +{ + u32 bw_map[2], tsa_map[2], pri_map; + int i; + + p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING); + p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED); + p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS); + p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, p_params->ets_enabled, + p_params->ets_cbs, p_ets->pri_tc_tbl[0], + p_params->max_ets_tc); + + /* 8 bit tsa and bw data corresponding to each of the 8 TC's are + * encoded in a type u32 array of size 2. + */ + bw_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[0]); + bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]); + tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]); + tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]); + pri_map = p_ets->pri_tc_tbl[0]; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) { + p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; + p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; + p_params->ets_pri_tc_tbl[i] = ECORE_DCBX_PRIO2TC(pri_map, i); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "elem %d bw_tbl %x tsa_tbl %x\n", + i, p_params->ets_tc_bw_tbl[i], + p_params->ets_tc_tsa_tbl[i]); + } +} + +static void +ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct dcbx_ets_feature *p_ets, + u32 pfc, struct ecore_dcbx_params *p_params, + bool ieee) +{ + ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); + ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params); + ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params); +} + +static void +ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->local_admin.features; + ecore_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->local.params, false); + params->local.valid = true; +} + +static void +ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->remote.features; + ecore_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->remote.params, + false); + params->remote.valid = true; +} + +static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_dscp_params *p_dscp; + struct dcb_dscp_map *p_dscp_map; + int i, j, entry; + u32 pri_map; + + p_dscp = ¶ms->dscp; + p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map; + p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE); + + /* MFW encodes 64 dscp entries into 8 element array of u32 entries, + * where each entry holds the 4bit priority map for 8 dscp entries. + */ + for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) { + pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n", + entry, pri_map); + for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++) + p_dscp->dscp_pri_map[entry] = (u32)(pri_map >> + (j * 4)) & 0xf; + } +} + +static void +ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_operational_params *p_operational; + struct ecore_dcbx_results *p_results; + struct dcbx_features *p_feat; + bool enabled, err; + u32 flags; + bool val; + + flags = p_hwfn->p_dcbx_info->operational.flags; + + /* If DCBx version is non zero, then negotiation + * was successfuly performed + */ + p_operational = ¶ms->operational; + enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) != + DCBX_CONFIG_VERSION_DISABLED); + if (!enabled) { + p_operational->enabled = enabled; + p_operational->valid = false; + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n"); + return; + } + + p_feat = &p_hwfn->p_dcbx_info->operational.features; + p_results = &p_hwfn->p_dcbx_info->results; + + val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_IEEE); + p_operational->ieee = val; + + val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_CEE); + p_operational->cee = val; + + val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_STATIC); + p_operational->local = val; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "Version support: ieee %d, cee %d, static %d\n", + p_operational->ieee, p_operational->cee, + p_operational->local); + + ecore_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); + ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, + p_results); + err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR); + p_operational->err = err; + p_operational->enabled = enabled; + p_operational->valid = true; +} + +static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct lldp_config_params_s *p_local; + + p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; + + OSAL_MEMCPY(params->lldp_local.local_chassis_id, + p_local->local_chassis_id, + sizeof(params->lldp_local.local_chassis_id)); + OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id, + sizeof(params->lldp_local.local_port_id)); +} + +static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *params) +{ + struct lldp_status_params_s *p_remote; + + p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; + + OSAL_MEMCPY(params->lldp_remote.peer_chassis_id, + p_remote->peer_chassis_id, + sizeof(params->lldp_remote.peer_chassis_id)); + OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id, + sizeof(params->lldp_remote.peer_port_id)); +} + +static enum _ecore_status_t +ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *p_params, + enum ecore_mib_read_type type) +{ + switch (type) { + case ECORE_DCBX_REMOTE_MIB: + ecore_dcbx_get_remote_params(p_hwfn, p_params); + break; + case ECORE_DCBX_LOCAL_MIB: + ecore_dcbx_get_local_params(p_hwfn, p_params); + break; + case ECORE_DCBX_OPERATIONAL_MIB: + ecore_dcbx_get_operational_params(p_hwfn, p_params); + break; + case ECORE_DCBX_REMOTE_LLDP_MIB: + ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params); + break; + case ECORE_DCBX_LOCAL_LLDP_MIB: + ecore_dcbx_get_local_lldp_params(p_hwfn, p_params); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_config_params); + data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; + data.size = sizeof(struct lldp_config_params_s); + ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_status_params); + data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; + data.size = sizeof(struct lldp_status_params_s); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, operational_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->operational; + data.size = sizeof(struct dcbx_mib); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, remote_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->remote; + data.size = sizeof(struct dcbx_mib); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &p_hwfn->p_dcbx_info->local_admin; + data.size = sizeof(struct dcbx_local_params); + ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin, + data.addr, data.size); + + return rc; +} + +static void +ecore_dcbx_read_dscp_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_dcbx_mib_meta_data data; + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, dcb_dscp_map); + data.dscp_map = &p_hwfn->p_dcbx_info->dscp_map; + data.size = sizeof(struct dcb_dscp_map); + ecore_memcpy_from(p_hwfn, p_ptt, data.dscp_map, data.addr, data.size); +} + +static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_INVAL; + + switch (type) { + case ECORE_DCBX_OPERATIONAL_MIB: + ecore_dcbx_read_dscp_mib(p_hwfn, p_ptt); + rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_REMOTE_MIB: + rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_LOCAL_MIB: + rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt); + break; + case ECORE_DCBX_REMOTE_LLDP_MIB: + rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_LOCAL_LLDP_MIB: + rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + } + + return ECORE_SUCCESS; +} + +/* + * Read updated MIB. + * Reconfigure QM and invoke PF update ramrod command if operational MIB + * change is detected. + */ +enum _ecore_status_t +ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + return rc; + + if (type == ECORE_DCBX_OPERATIONAL_MIB) { + ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get); + + rc = ecore_dcbx_process_mib_info(p_hwfn, p_ptt); + if (!rc) { + /* reconfigure tcs of QM queues according + * to negotiation results + */ + ecore_qm_reconf(p_hwfn, p_ptt); + + /* update storm FW with negotiation results */ + ecore_sp_pf_update_dcbx(p_hwfn); + } + } + + ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); + + /* Update the DSCP to TC mapping enable bit if required */ + if ((type == ECORE_DCBX_OPERATIONAL_MIB) && + p_hwfn->p_dcbx_info->dscp_nig_update) { + u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled; + u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE; + + rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to update the DSCP to TC mapping enable bit\n"); + return rc; + } + + p_hwfn->p_dcbx_info->dscp_nig_update = false; + } + + OSAL_DCBX_AEN(p_hwfn, type); + + return rc; +} + +enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) +{ + p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_hwfn->p_dcbx_info)); + if (!p_hwfn->p_dcbx_info) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct ecore_dcbx_info'"); + return ECORE_NOMEM; + } + + p_hwfn->p_dcbx_info->iwarp_port = + p_hwfn->pf_params.rdma_pf_params.iwarp_port; + + return ECORE_SUCCESS; +} + +void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info); +} + +static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, + struct ecore_dcbx_results *p_src, + enum dcbx_protocol_type type) +{ + p_data->dcb_enable_flag = p_src->arr[type].enable; + p_data->dcb_priority = p_src->arr[type].priority; + p_data->dcb_tc = p_src->arr[type].tc; + p_data->dscp_enable_flag = p_src->arr[type].dscp_enable; + p_data->dscp_val = p_src->arr[type].dscp_val; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; +} + +/* Set pf update ramrod command params */ +void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest) +{ + struct protocol_dcb_data *p_dcb_data; + u8 update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; + p_dest->update_eth_dcb_data_mode = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update; + p_dest->update_iwarp_dcb_data_mode = update_flag; + + p_dcb_data = &p_dest->eth_dcb_data; + ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); + p_dcb_data = &p_dest->iwarp_dcb_data; + ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP); +} + +enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *p_get, + enum ecore_mib_read_type type) +{ + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc != ECORE_SUCCESS) + goto out; + + ecore_dcbx_get_dscp_params(p_hwfn, p_get); + + rc = ecore_dcbx_get_params(p_hwfn, p_get, type); + +out: + ecore_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void +ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn, + u32 *pfc, struct ecore_dcbx_params *p_params) +{ + u8 pfc_map = 0; + int i; + + if (p_params->pfc.willing) + *pfc |= DCBX_PFC_WILLING_MASK; + else + *pfc &= ~DCBX_PFC_WILLING_MASK; + + if (p_params->pfc.enabled) + *pfc |= DCBX_PFC_ENABLED_MASK; + else + *pfc &= ~DCBX_PFC_ENABLED_MASK; + + *pfc &= ~DCBX_PFC_CAPS_MASK; + *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET; + + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) + if (p_params->pfc.prio[i]) + pfc_map |= (1 << i); + *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; + *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET); + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc); +} + +static void +ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct ecore_dcbx_params *p_params) +{ + u8 *bw_map, *tsa_map; + u32 val; + int i; + + if (p_params->ets_willing) + p_ets->flags |= DCBX_ETS_WILLING_MASK; + else + p_ets->flags &= ~DCBX_ETS_WILLING_MASK; + + if (p_params->ets_cbs) + p_ets->flags |= DCBX_ETS_CBS_MASK; + else + p_ets->flags &= ~DCBX_ETS_CBS_MASK; + + if (p_params->ets_enabled) + p_ets->flags |= DCBX_ETS_ENABLED_MASK; + else + p_ets->flags &= ~DCBX_ETS_ENABLED_MASK; + + p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; + p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET; + + bw_map = (u8 *)&p_ets->tc_bw_tbl[0]; + tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0]; + p_ets->pri_tc_tbl[0] = 0; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) { + bw_map[i] = p_params->ets_tc_bw_tbl[i]; + tsa_map[i] = p_params->ets_tc_tsa_tbl[i]; + /* Copy the priority value to the corresponding 4 bits in the + * traffic class table. + */ + val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); + p_ets->pri_tc_tbl[0] |= val; + } + for (i = 0; i < 2; i++) { + p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]); + p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n", + p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0], + p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0], + p_ets->tc_tsa_tbl[1]); +} + +static void +ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct ecore_dcbx_params *p_params, bool ieee) +{ + u32 *entry; + int i; + + if (p_params->app_willing) + p_app->flags |= DCBX_APP_WILLING_MASK; + else + p_app->flags &= ~DCBX_APP_WILLING_MASK; + + if (p_params->app_valid) + p_app->flags |= DCBX_APP_ENABLED_MASK; + else + p_app->flags &= ~DCBX_APP_ENABLED_MASK; + + p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK; + p_app->flags |= (u32)p_params->num_app_entries << + DCBX_APP_NUM_ENTRIES_OFFSET; + + for (i = 0; i < p_params->num_app_entries; i++) { + entry = &p_app->app_pri_tbl[i].entry; + *entry = 0; + if (ieee) { + *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); + switch (p_params->app_entry[i].sf_ieee) { + case ECORE_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_OFFSET); + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_OFFSET); + break; + case ECORE_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_OFFSET); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_OFFSET); + break; + case ECORE_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_OFFSET); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_OFFSET); + break; + case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_OFFSET; + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_OFFSET); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_OFFSET); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_OFFSET); + } + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; + *entry |= ((u32)p_params->app_entry[i].proto_id << + DCBX_APP_PROTOCOL_ID_OFFSET); + *entry &= ~DCBX_APP_PRI_MAP_MASK; + *entry |= ((u32)(p_params->app_entry[i].prio) << + DCBX_APP_PRI_MAP_OFFSET); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags); +} + +static void +ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn, + struct dcbx_local_params *local_admin, + struct ecore_dcbx_set *params) +{ + bool ieee = false; + + local_admin->flags = 0; + OSAL_MEMCPY(&local_admin->features, + &p_hwfn->p_dcbx_info->operational.features, + sizeof(local_admin->features)); + + if (params->enabled) { + local_admin->config = params->ver_num; + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { + local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n", + local_admin->config); + + if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG) + ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, + ¶ms->config.params); + + if (params->override_flags & ECORE_DCBX_OVERRIDE_ETS_CFG) + ecore_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets, + ¶ms->config.params); + + if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG) + ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app, + ¶ms->config.params, ieee); +} + +static enum _ecore_status_t +ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn, + struct dcb_dscp_map *p_dscp_map, + struct ecore_dcbx_set *p_params) +{ + int entry, i, j; + u32 val; + + OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map, + sizeof(*p_dscp_map)); + + p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK; + if (p_params->dscp.enabled) + p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK; + + for (i = 0, entry = 0; i < 8; i++) { + val = 0; + for (j = 0; j < 8; j++, entry++) + val |= (((u32)p_params->dscp.dscp_pri_map[entry]) << + (j * 4)); + + p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val); + } + + p_hwfn->p_dcbx_info->dscp_nig_update = true; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1], + p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3], + p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5], + p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_set *params, + bool hw_commit) +{ + struct dcbx_local_params local_admin; + struct ecore_dcbx_mib_meta_data data; + struct dcb_dscp_map dscp_map; + u32 resp = 0, param = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params, + sizeof(p_hwfn->p_dcbx_info->set)); + if (!hw_commit) + return ECORE_SUCCESS; + + OSAL_MEMSET(&local_admin, 0, sizeof(local_admin)); + ecore_dcbx_set_local_params(p_hwfn, &local_admin, params); + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &local_admin; + data.size = sizeof(struct dcbx_local_params); + ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size); + + if (params->override_flags & ECORE_DCBX_OVERRIDE_DSCP_CFG) { + OSAL_MEMSET(&dscp_map, 0, sizeof(dscp_map)); + ecore_dcbx_set_dscp_params(p_hwfn, &dscp_map, params); + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, dcb_dscp_map); + data.dscp_map = &dscp_map; + data.size = sizeof(struct dcb_dscp_map); + ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.dscp_map, + data.size); + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Failed to send DCBX update request\n"); + + return rc; +} + +enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_set *params) +{ + struct ecore_dcbx_get *dcbx_info; + int rc; + + if (p_hwfn->p_dcbx_info->set.config.valid) { + OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set, + sizeof(struct ecore_dcbx_set)); + return ECORE_SUCCESS; + } + + dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*dcbx_info)); + if (!dcbx_info) + return ECORE_NOMEM; + + OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info)); + rc = ecore_dcbx_query_params(p_hwfn, dcbx_info, + ECORE_DCBX_OPERATIONAL_MIB); + if (rc) { + OSAL_FREE(p_hwfn->p_dev, dcbx_info); + return rc; + } + p_hwfn->p_dcbx_info->set.override_flags = 0; + + p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED; + if (dcbx_info->operational.cee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; + if (dcbx_info->operational.ieee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + if (dcbx_info->operational.local) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC; + + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; + OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp, + &p_hwfn->p_dcbx_info->get.dscp, + sizeof(struct ecore_dcbx_dscp_params)); + OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params, + &dcbx_info->operational.params, + sizeof(p_hwfn->p_dcbx_info->set.config.params)); + p_hwfn->p_dcbx_info->set.config.valid = true; + + OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set, + sizeof(struct ecore_dcbx_set)); + + OSAL_FREE(p_hwfn->p_dev, dcbx_info); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_lldp_agent agent, + u8 tlv_type) +{ + u32 mb_param = 0, mcp_resp = 0, mcp_param = 0, val = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + switch (agent) { + case ECORE_LLDP_NEAREST_BRIDGE: + val = LLDP_NEAREST_BRIDGE; + break; + case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE: + val = LLDP_NEAREST_NON_TPMR_BRIDGE; + break; + case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE: + val = LLDP_NEAREST_CUSTOMER_BRIDGE; + break; + default: + DP_ERR(p_hwfn, "Invalid agent type %d\n", agent); + return ECORE_INVAL; + } + + SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val); + SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_TLV_RX_TYPE, tlv_type); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX, + mb_param, &mcp_resp, &mcp_param); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, "Failed to register TLV\n"); + + return rc; +} + +enum _ecore_status_t +ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct lldp_received_tlvs_s tlvs; + int i; + + for (i = 0; i < LLDP_MAX_LLDP_AGENTS; i++) { + OSAL_MEM_ZERO(&data, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, lldp_received_tlvs[i]); + data.lldp_tlvs = &tlvs; + data.size = sizeof(tlvs); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, + ECORE_DCBX_LLDP_TLVS); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "Failed to read lldp TLVs\n"); + return rc; + } + + if (!tlvs.length) + continue; + + for (i = 0; i < MAX_TLV_BUFFER; i++) + tlvs.tlvs_buffer[i] = + OSAL_CPU_TO_BE32(tlvs.tlvs_buffer[i]); + + OSAL_LLDP_RX_TLVS(p_hwfn, tlvs.tlvs_buffer, tlvs.length); + } + + return rc; +} + +enum _ecore_status_t +ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_config_params *p_params) +{ + struct lldp_config_params_s lldp_params; + u32 addr, val; + int i; + + switch (p_params->agent) { + case ECORE_LLDP_NEAREST_BRIDGE: + val = LLDP_NEAREST_BRIDGE; + break; + case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE: + val = LLDP_NEAREST_NON_TPMR_BRIDGE; + break; + case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE: + val = LLDP_NEAREST_CUSTOMER_BRIDGE; + break; + default: + DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent); + return ECORE_INVAL; + } + + addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, lldp_config_params[val]); + + ecore_memcpy_from(p_hwfn, p_ptt, &lldp_params, addr, + sizeof(lldp_params)); + + p_params->tx_interval = GET_MFW_FIELD(lldp_params.config, + LLDP_CONFIG_TX_INTERVAL); + p_params->tx_hold = GET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD); + p_params->tx_credit = GET_MFW_FIELD(lldp_params.config, + LLDP_CONFIG_MAX_CREDIT); + p_params->rx_enable = GET_MFW_FIELD(lldp_params.config, + LLDP_CONFIG_ENABLE_RX); + p_params->tx_enable = GET_MFW_FIELD(lldp_params.config, + LLDP_CONFIG_ENABLE_TX); + + OSAL_MEMCPY(p_params->chassis_id_tlv, lldp_params.local_chassis_id, + sizeof(p_params->chassis_id_tlv)); + for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++) + p_params->chassis_id_tlv[i] = + OSAL_BE32_TO_CPU(p_params->chassis_id_tlv[i]); + + OSAL_MEMCPY(p_params->port_id_tlv, lldp_params.local_port_id, + sizeof(p_params->port_id_tlv)); + for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++) + p_params->port_id_tlv[i] = + OSAL_BE32_TO_CPU(p_params->port_id_tlv[i]); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_config_params *p_params) +{ + u32 mb_param = 0, mcp_resp = 0, mcp_param = 0; + struct lldp_config_params_s lldp_params; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 addr, val; + int i; + + switch (p_params->agent) { + case ECORE_LLDP_NEAREST_BRIDGE: + val = LLDP_NEAREST_BRIDGE; + break; + case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE: + val = LLDP_NEAREST_NON_TPMR_BRIDGE; + break; + case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE: + val = LLDP_NEAREST_CUSTOMER_BRIDGE; + break; + default: + DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent); + return ECORE_INVAL; + } + + SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val); + addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, lldp_config_params[val]); + + OSAL_MEMSET(&lldp_params, 0, sizeof(lldp_params)); + SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_TX_INTERVAL, + p_params->tx_interval); + SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD, p_params->tx_hold); + SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_MAX_CREDIT, + p_params->tx_credit); + SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_RX, + !!p_params->rx_enable); + SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_TX, + !!p_params->tx_enable); + + for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++) + p_params->chassis_id_tlv[i] = + OSAL_CPU_TO_BE32(p_params->chassis_id_tlv[i]); + OSAL_MEMCPY(lldp_params.local_chassis_id, p_params->chassis_id_tlv, + sizeof(lldp_params.local_chassis_id)); + + for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++) + p_params->port_id_tlv[i] = + OSAL_CPU_TO_BE32(p_params->port_id_tlv[i]); + OSAL_MEMCPY(lldp_params.local_port_id, p_params->port_id_tlv, + sizeof(lldp_params.local_port_id)); + + ecore_memcpy_to(p_hwfn, p_ptt, addr, &lldp_params, sizeof(lldp_params)); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP, + mb_param, &mcp_resp, &mcp_param); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_sys_tlvs *p_params) +{ + u32 mb_param = 0, mcp_resp = 0, mcp_param = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct lldp_system_tlvs_buffer_s lld_tlv_buf; + u32 addr, *p_val; + u8 len; + int i; + + p_val = (u32 *)p_params->buf; + for (i = 0; i < ECORE_LLDP_SYS_TLV_SIZE / 4; i++) + p_val[i] = OSAL_CPU_TO_BE32(p_val[i]); + + OSAL_MEMSET(&lld_tlv_buf, 0, sizeof(lld_tlv_buf)); + SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_VALID, 1); + SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_MANDATORY, + !!p_params->discard_mandatory_tlv); + SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_LENGTH, + p_params->buf_size); + len = ECORE_LLDP_SYS_TLV_SIZE / 2; + OSAL_MEMCPY(lld_tlv_buf.data, p_params->buf, len); + + addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, system_lldp_tlvs_buf); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &lld_tlv_buf, sizeof(lld_tlv_buf)); + + if (p_params->buf_size > len) { + addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, system_lldp_tlvs_buf2); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &p_params->buf[len], + ECORE_LLDP_SYS_TLV_SIZE / 2); + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP, + mb_param, &mcp_resp, &mcp_param); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn, + u8 dscp_index, u8 *p_dscp_pri) +{ + struct ecore_dcbx_get *p_dcbx_info; + enum _ecore_status_t rc; + + if (dscp_index >= ECORE_DCBX_DSCP_SIZE) { + DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index); + return ECORE_INVAL; + } + + p_dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_dcbx_info)); + if (!p_dcbx_info) + return ECORE_NOMEM; + + OSAL_MEMSET(p_dcbx_info, 0, sizeof(*p_dcbx_info)); + rc = ecore_dcbx_query_params(p_hwfn, p_dcbx_info, + ECORE_DCBX_OPERATIONAL_MIB); + if (rc) { + OSAL_FREE(p_hwfn->p_dev, p_dcbx_info); + return rc; + } + + *p_dscp_pri = p_dcbx_info->dscp.dscp_pri_map[dscp_index]; + OSAL_FREE(p_hwfn->p_dev, p_dcbx_info); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 dscp_index, u8 pri_val) +{ + struct ecore_dcbx_set dcbx_set; + enum _ecore_status_t rc; + + if (dscp_index >= ECORE_DCBX_DSCP_SIZE || + pri_val >= ECORE_MAX_PFC_PRIORITIES) { + DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n", + dscp_index, pri_val); + return ECORE_INVAL; + } + + OSAL_MEMSET(&dcbx_set, 0, sizeof(dcbx_set)); + rc = ecore_dcbx_get_config_params(p_hwfn, &dcbx_set); + if (rc) + return rc; + + dcbx_set.override_flags = ECORE_DCBX_OVERRIDE_DSCP_CFG; + dcbx_set.dscp.dscp_pri_map[dscp_index] = pri_val; + + return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h new file mode 100644 index 000000000..519e6ceaa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_DCBX_H__ +#define __ECORE_DCBX_H__ + +#include "ecore.h" +#include "ecore_mcp.h" +#include "mcp_public.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_hsi_common.h" +#include "ecore_dcbx_api.h" + +#define ECORE_DCBX_DSCP_DISABLED 0XFF + +struct ecore_dcbx_info { + struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; + struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; + struct dcbx_local_params local_admin; + struct ecore_dcbx_results results; + struct dcb_dscp_map dscp_map; + bool dscp_nig_update; + struct dcbx_mib operational; + struct dcbx_mib remote; + struct ecore_dcbx_set set; + struct ecore_dcbx_get get; + u8 dcbx_cap; + u16 iwarp_port; +}; + +struct ecore_dcbx_mib_meta_data { + struct lldp_config_params_s *lldp_local; + struct lldp_status_params_s *lldp_remote; + struct lldp_received_tlvs_s *lldp_tlvs; + struct dcbx_local_params *local_admin; + struct dcb_dscp_map *dscp_map; + struct dcbx_mib *mib; + osal_size_t size; + u32 addr; +}; + +/* ECORE local interface routines */ +enum _ecore_status_t +ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *, + enum ecore_mib_read_type); + +enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn); +void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn); +void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest); + +/* Returns TOS value for a given priority */ +u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri); + +enum _ecore_status_t +ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +#endif /* __ECORE_DCBX_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h new file mode 100644 index 000000000..6fad2ecc2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dcbx_api.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_DCBX_API_H__ +#define __ECORE_DCBX_API_H__ + +#include "ecore_status.h" + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 + +enum ecore_mib_read_type { + ECORE_DCBX_OPERATIONAL_MIB, + ECORE_DCBX_REMOTE_MIB, + ECORE_DCBX_LOCAL_MIB, + ECORE_DCBX_REMOTE_LLDP_MIB, + ECORE_DCBX_LOCAL_LLDP_MIB, + ECORE_DCBX_LLDP_TLVS +}; + +struct ecore_dcbx_app_data { + bool enable; /* DCB enabled */ + u8 update; /* Update indication */ + u8 priority; /* Priority */ + u8 tc; /* Traffic Class */ + bool dscp_enable; /* DSCP enabled */ + u8 dscp_val; /* DSCP value */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ +}; + +#ifndef __EXTRACT__LINUX__ +enum dcbx_protocol_type { + DCBX_PROTOCOL_ISCSI, + DCBX_PROTOCOL_FCOE, + DCBX_PROTOCOL_ROCE, + DCBX_PROTOCOL_ROCE_V2, + DCBX_PROTOCOL_ETH, + DCBX_PROTOCOL_IWARP, + DCBX_MAX_PROTOCOL_TYPE +}; + +#define ECORE_LLDP_CHASSIS_ID_STAT_LEN 4 +#define ECORE_LLDP_PORT_ID_STAT_LEN 4 +#define ECORE_DCBX_MAX_APP_PROTOCOL 32 +#define ECORE_MAX_PFC_PRIORITIES 8 +#define ECORE_DCBX_DSCP_SIZE 64 + +struct ecore_dcbx_lldp_remote { + u32 peer_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[ECORE_LLDP_PORT_ID_STAT_LEN]; + bool enable_rx; + bool enable_tx; + u32 tx_interval; + u32 max_credit; +}; + +struct ecore_dcbx_lldp_local { + u32 local_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[ECORE_LLDP_PORT_ID_STAT_LEN]; +}; + +struct ecore_dcbx_app_prio { + u8 roce; + u8 roce_v2; + u8 fcoe; + u8 iscsi; + u8 eth; +}; + +struct ecore_dbcx_pfc_params { + bool willing; + bool enabled; + u8 prio[ECORE_MAX_PFC_PRIORITIES]; + u8 max_tc; +}; + +enum ecore_dcbx_sf_ieee_type { + ECORE_DCBX_SF_IEEE_ETHTYPE, + ECORE_DCBX_SF_IEEE_TCP_PORT, + ECORE_DCBX_SF_IEEE_UDP_PORT, + ECORE_DCBX_SF_IEEE_TCP_UDP_PORT +}; + +struct ecore_app_entry { + bool ethtype; + enum ecore_dcbx_sf_ieee_type sf_ieee; + bool enabled; + u8 prio; + u16 proto_id; + enum dcbx_protocol_type proto_type; +}; + +struct ecore_dcbx_params { + struct ecore_app_entry app_entry[ECORE_DCBX_MAX_APP_PROTOCOL]; + u16 num_app_entries; + bool app_willing; + bool app_valid; + bool app_error; + bool ets_willing; + bool ets_enabled; + bool ets_cbs; + u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES]; + u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES]; + u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES]; + struct ecore_dbcx_pfc_params pfc; + u8 max_ets_tc; +}; + +struct ecore_dcbx_admin_params { + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ +}; + +struct ecore_dcbx_remote_params { + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ +}; + +struct ecore_dcbx_operational_params { + struct ecore_dcbx_app_prio app_prio; + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ + bool enabled; + bool ieee; + bool cee; + bool local; + u32 err; +}; + +struct ecore_dcbx_dscp_params { + bool enabled; + u8 dscp_pri_map[ECORE_DCBX_DSCP_SIZE]; +}; + +struct ecore_dcbx_get { + struct ecore_dcbx_operational_params operational; + struct ecore_dcbx_lldp_remote lldp_remote; + struct ecore_dcbx_lldp_local lldp_local; + struct ecore_dcbx_remote_params remote; + struct ecore_dcbx_admin_params local; + struct ecore_dcbx_dscp_params dscp; +}; +#endif + +#define ECORE_DCBX_VERSION_DISABLED 0 +#define ECORE_DCBX_VERSION_IEEE 1 +#define ECORE_DCBX_VERSION_CEE 2 +#define ECORE_DCBX_VERSION_DYNAMIC 3 + +struct ecore_dcbx_set { +#define ECORE_DCBX_OVERRIDE_STATE (1 << 0) +#define ECORE_DCBX_OVERRIDE_PFC_CFG (1 << 1) +#define ECORE_DCBX_OVERRIDE_ETS_CFG (1 << 2) +#define ECORE_DCBX_OVERRIDE_APP_CFG (1 << 3) +#define ECORE_DCBX_OVERRIDE_DSCP_CFG (1 << 4) + u32 override_flags; + bool enabled; + struct ecore_dcbx_admin_params config; + u32 ver_num; + struct ecore_dcbx_dscp_params dscp; +}; + +struct ecore_dcbx_results { + bool dcbx_enabled; + u8 pf_id; + struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE]; +}; + +struct ecore_dcbx_app_metadata { + enum dcbx_protocol_type id; + const char *name; /* @DPDK */ + enum ecore_pci_personality personality; +}; + +enum ecore_lldp_agent { + ECORE_LLDP_NEAREST_BRIDGE = 0, + ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE, + ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE, + ECORE_LLDP_MAX_AGENTS +}; + +struct ecore_lldp_config_params { + enum ecore_lldp_agent agent; + u8 tx_interval; + u8 tx_hold; + u8 tx_credit; + bool rx_enable; + bool tx_enable; + u32 chassis_id_tlv[ECORE_LLDP_CHASSIS_ID_STAT_LEN]; + u32 port_id_tlv[ECORE_LLDP_PORT_ID_STAT_LEN]; +}; + +#define ECORE_LLDP_SYS_TLV_SIZE 256 +struct ecore_lldp_sys_tlvs { + bool discard_mandatory_tlv; + u8 buf[ECORE_LLDP_SYS_TLV_SIZE]; + u16 buf_size; +}; + +enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *, + struct ecore_dcbx_get *, + enum ecore_mib_read_type); + +enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *, + struct ecore_dcbx_set *); + +enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *, + struct ecore_ptt *, + struct ecore_dcbx_set *, + bool); + +enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_lldp_agent agent, + u8 tlv_type); + +enum _ecore_status_t +ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_config_params *p_params); + +enum _ecore_status_t +ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_config_params *p_params); + +enum _ecore_status_t +ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_lldp_sys_tlvs *p_params); + +/* Returns priority value for a given dscp index */ +enum _ecore_status_t +ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn, + u8 dscp_index, u8 *p_dscp_pri); + +/* Sets priority value for a given dscp index */ +enum _ecore_status_t +ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 dscp_index, u8 pri_val); + +static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = { + {DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI}, + {DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE}, + {DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE}, + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE}, + {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}, + {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP} +}; + +#endif /* __ECORE_DCBX_API_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c new file mode 100644 index 000000000..86ecfb269 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev.c @@ -0,0 +1,6799 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore.h" +#include "ecore_chain.h" +#include "ecore_status.h" +#include "ecore_hw.h" +#include "ecore_rt_defs.h" +#include "ecore_init_ops.h" +#include "ecore_int.h" +#include "ecore_cxt.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_sp_commands.h" +#include "ecore_dev_api.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" +#include "ecore_mcp.h" +#include "ecore_hw_defs.h" +#include "mcp_public.h" +#include "ecore_iro.h" +#include "nvm_cfg.h" +#include "ecore_dcbx.h" +#include "ecore_l2.h" + +/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM + * registers involved are not split and thus configuration is a race where + * some of the PFs configuration might be lost. + * Eventually, this needs to move into a MFW-covered HW-lock as arbitration + * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where + * there's more than a single compiled ecore component in system]. + */ +static osal_spinlock_t qm_lock; +static u32 qm_lock_ref_cnt; + +#ifndef ASIC_ONLY +static bool b_ptt_gtt_init; +#endif + +/******************** Doorbell Recovery *******************/ +/* The doorbell recovery mechanism consists of a list of entries which represent + * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each + * entity needs to register with the mechanism and provide the parameters + * describing it's doorbell, including a location where last used doorbell data + * can be found. The doorbell execute function will traverse the list and + * doorbell all of the registered entries. + */ +struct ecore_db_recovery_entry { + osal_list_entry_t list_entry; + void OSAL_IOMEM *db_addr; + void *db_data; + enum ecore_db_rec_width db_width; + enum ecore_db_rec_space db_space; + u8 hwfn_idx; +}; + +/* display a single doorbell recovery entry */ +void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn, + struct ecore_db_recovery_entry *db_entry, + const char *action) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", + action, db_entry, db_entry->db_addr, db_entry->db_data, + db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", + db_entry->db_space == DB_REC_USER ? "user" : "kernel", + db_entry->hwfn_idx); +} + +/* doorbell address sanity (address within doorbell bar range) */ +bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr, + void *db_data) +{ + /* make sure doorbell address is within the doorbell bar */ + if (db_addr < p_dev->doorbells || (u8 *)db_addr > + (u8 *)p_dev->doorbells + p_dev->db_size) { + OSAL_WARN(true, + "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", + db_addr, p_dev->doorbells, + (u8 *)p_dev->doorbells + p_dev->db_size); + return false; + } + + /* make sure doorbell data pointer is not null */ + if (!db_data) { + OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data); + return false; + } + + return true; +} + +/* find hwfn according to the doorbell address */ +struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev, + void OSAL_IOMEM *db_addr) +{ + struct ecore_hwfn *p_hwfn; + + /* In CMT doorbell bar is split down the middle between engine 0 and + * enigne 1 + */ + if (ECORE_IS_CMT(p_dev)) + p_hwfn = db_addr < p_dev->hwfns[1].doorbells ? + &p_dev->hwfns[0] : &p_dev->hwfns[1]; + else + p_hwfn = ECORE_LEADING_HWFN(p_dev); + + return p_hwfn; +} + +/* add a new entry to the doorbell recovery mechanism */ +enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, + void OSAL_IOMEM *db_addr, + void *db_data, + enum ecore_db_rec_width db_width, + enum ecore_db_rec_space db_space) +{ + struct ecore_db_recovery_entry *db_entry; + struct ecore_hwfn *p_hwfn; + + /* shortcircuit VFs, for now */ + if (IS_VF(p_dev)) { + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return ECORE_SUCCESS; + } + + /* sanitize doorbell address */ + if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) + return ECORE_INVAL; + + /* obtain hwfn from doorbell address */ + p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); + + /* create entry */ + db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry)); + if (!db_entry) { + DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n"); + return ECORE_NOMEM; + } + + /* populate entry */ + db_entry->db_addr = db_addr; + db_entry->db_data = db_data; + db_entry->db_width = db_width; + db_entry->db_space = db_space; + db_entry->hwfn_idx = p_hwfn->my_id; + + /* display */ + ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); + + /* protect the list */ + OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); + OSAL_LIST_PUSH_TAIL(&db_entry->list_entry, + &p_hwfn->db_recovery_info.list); + OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); + + return ECORE_SUCCESS; +} + +/* remove an entry from the doorbell recovery mechanism */ +enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, + void OSAL_IOMEM *db_addr, + void *db_data) +{ + struct ecore_db_recovery_entry *db_entry = OSAL_NULL; + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_hwfn *p_hwfn; + + /* shortcircuit VFs, for now */ + if (IS_VF(p_dev)) { + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return ECORE_SUCCESS; + } + + /* sanitize doorbell address */ + if (!ecore_db_rec_sanity(p_dev, db_addr, db_data)) + return ECORE_INVAL; + + /* obtain hwfn from doorbell address */ + p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr); + + /* protect the list */ + OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); + OSAL_LIST_FOR_EACH_ENTRY(db_entry, + &p_hwfn->db_recovery_info.list, + list_entry, + struct ecore_db_recovery_entry) { + /* search according to db_data addr since db_addr is not unique + * (roce) + */ + if (db_entry->db_data == db_data) { + ecore_db_recovery_dp_entry(p_hwfn, db_entry, + "Deleting"); + OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, + &p_hwfn->db_recovery_info.list); + rc = ECORE_SUCCESS; + break; + } + } + + OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); + + if (rc == ECORE_INVAL) + /*OSAL_WARN(true,*/ + DP_NOTICE(p_hwfn, false, + "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", + db_data, db_addr); + else + OSAL_FREE(p_dev, db_entry); + + return rc; +} + +/* initialize the doorbell recovery mechanism */ +enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n"); + + /* make sure db_size was set in p_dev */ + if (!p_hwfn->p_dev->db_size) { + DP_ERR(p_hwfn->p_dev, "db_size not set\n"); + return ECORE_INVAL; + } + + OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) + return ECORE_NOMEM; +#endif + OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); + p_hwfn->db_recovery_info.db_recovery_counter = 0; + + return ECORE_SUCCESS; +} + +/* destroy the doorbell recovery mechanism */ +void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn) +{ + struct ecore_db_recovery_entry *db_entry = OSAL_NULL; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n"); + if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { + DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); + while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) { + db_entry = OSAL_LIST_FIRST_ENTRY( + &p_hwfn->db_recovery_info.list, + struct ecore_db_recovery_entry, + list_entry); + ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); + OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry, + &p_hwfn->db_recovery_info.list); + OSAL_FREE(p_hwfn->p_dev, db_entry); + } + } +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock); +#endif + p_hwfn->db_recovery_info.db_recovery_counter = 0; +} + +/* print the content of the doorbell recovery mechanism */ +void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn) +{ + struct ecore_db_recovery_entry *db_entry = OSAL_NULL; + + DP_NOTICE(p_hwfn, false, + "Dispalying doorbell recovery database. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* protect the list */ + OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); + OSAL_LIST_FOR_EACH_ENTRY(db_entry, + &p_hwfn->db_recovery_info.list, + list_entry, + struct ecore_db_recovery_entry) { + ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); + } + + OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); +} + +/* ring the doorbell of a single doorbell recovery entry */ +void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn, + struct ecore_db_recovery_entry *db_entry, + enum ecore_db_rec_exec db_exec) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n", + db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", + db_entry->db_addr, *(u32 *)db_entry->db_data); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n", + db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing", + db_entry->db_addr, + *(unsigned long *)(db_entry->db_data)); + + /* Sanity */ + if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr, + db_entry->db_data)) + return; + + /* Flush the write combined buffer. Since there are multiple doorbelling + * entities using the same address, if we don't flush, a transaction + * could be lost. + */ + OSAL_WMB(p_hwfn->p_dev); + + /* Ring the doorbell */ + if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(p_hwfn, db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + + /* Flush the write combined buffer. Next doorbell may come from a + * different entity to the same address... + */ + OSAL_WMB(p_hwfn->p_dev); +} + +/* traverse the doorbell recovery entry list and ring all the doorbells */ +void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn, + enum ecore_db_rec_exec db_exec) +{ + struct ecore_db_recovery_entry *db_entry = OSAL_NULL; + + if (db_exec != DB_REC_ONCE) { + DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; + } + + /* protect the list */ + OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock); + OSAL_LIST_FOR_EACH_ENTRY(db_entry, + &p_hwfn->db_recovery_info.list, + list_entry, + struct ecore_db_recovery_entry) { + ecore_db_recovery_ring(p_hwfn, db_entry, db_exec); + if (db_exec == DB_REC_ONCE) + break; + } + + OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock); +} +/******************** Doorbell Recovery end ****************/ + +/********************************** NIG LLH ***********************************/ + +enum ecore_llh_filter_type { + ECORE_LLH_FILTER_TYPE_MAC, + ECORE_LLH_FILTER_TYPE_PROTOCOL, +}; + +struct ecore_llh_mac_filter { + u8 addr[ETH_ALEN]; +}; + +struct ecore_llh_protocol_filter { + enum ecore_llh_prot_filter_type_t type; + u16 source_port_or_eth_type; + u16 dest_port; +}; + +union ecore_llh_filter { + struct ecore_llh_mac_filter mac; + struct ecore_llh_protocol_filter protocol; +}; + +struct ecore_llh_filter_info { + bool b_enabled; + u32 ref_cnt; + enum ecore_llh_filter_type type; + union ecore_llh_filter filter; +}; + +struct ecore_llh_info { + /* Number of LLH filters banks */ + u8 num_ppfid; + +#define MAX_NUM_PPFID 8 + u8 ppfid_array[MAX_NUM_PPFID]; + + /* Array of filters arrays: + * "num_ppfid" elements of filters banks, where each is an array of + * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. + */ + struct ecore_llh_filter_info **pp_filters; +}; + +static void ecore_llh_free(struct ecore_dev *p_dev) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + u32 i; + + if (p_llh_info != OSAL_NULL) { + if (p_llh_info->pp_filters != OSAL_NULL) { + for (i = 0; i < p_llh_info->num_ppfid; i++) + OSAL_FREE(p_dev, p_llh_info->pp_filters[i]); + } + + OSAL_FREE(p_dev, p_llh_info->pp_filters); + } + + OSAL_FREE(p_dev, p_llh_info); + p_dev->p_llh_info = OSAL_NULL; +} + +static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev) +{ + struct ecore_llh_info *p_llh_info; + u32 size; + u8 i; + + p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info)); + if (!p_llh_info) + return ECORE_NOMEM; + p_dev->p_llh_info = p_llh_info; + + for (i = 0; i < MAX_NUM_PPFID; i++) { + if (!(p_dev->ppfid_bitmap & (0x1 << i))) + continue; + + p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; + DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n", + p_llh_info->num_ppfid, i); + p_llh_info->num_ppfid++; + } + + size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); + p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size); + if (!p_llh_info->pp_filters) + return ECORE_NOMEM; + + size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * + sizeof(**p_llh_info->pp_filters); + for (i = 0; i < p_llh_info->num_ppfid; i++) { + p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL, + size); + if (!p_llh_info->pp_filters[i]) + return ECORE_NOMEM; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev, + u8 ppfid, u8 filter_idx, + const char *action) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + + if (ppfid >= p_llh_info->num_ppfid) { + DP_NOTICE(p_dev, false, + "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n", + action, ppfid, p_llh_info->num_ppfid); + return ECORE_INVAL; + } + + if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_dev, false, + "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n", + action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +#define ECORE_LLH_INVALID_FILTER_IDX 0xff + +static enum _ecore_status_t +ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid, + union ecore_llh_filter *p_filter, + u8 *p_filter_idx) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + struct ecore_llh_filter_info *p_filters; + enum _ecore_status_t rc; + u8 i; + + rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search"); + if (rc != ECORE_SUCCESS) + return rc; + + *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX; + + p_filters = p_llh_info->pp_filters[ppfid]; + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter, + sizeof(*p_filter))) { + *p_filter_idx = i; + break; + } + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid, + u8 *p_filter_idx) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + struct ecore_llh_filter_info *p_filters; + enum _ecore_status_t rc; + u8 i; + + rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx"); + if (rc != ECORE_SUCCESS) + return rc; + + *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX; + + p_filters = p_llh_info->pp_filters[ppfid]; + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!p_filters[i].b_enabled) { + *p_filter_idx = i; + break; + } + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +__ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx, + enum ecore_llh_filter_type type, + union ecore_llh_filter *p_filter, u32 *p_ref_cnt) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + struct ecore_llh_filter_info *p_filters; + enum _ecore_status_t rc; + + rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add"); + if (rc != ECORE_SUCCESS) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + if (!p_filters[filter_idx].ref_cnt) { + p_filters[filter_idx].b_enabled = true; + p_filters[filter_idx].type = type; + OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter, + sizeof(p_filters[filter_idx].filter)); + } + + *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, + enum ecore_llh_filter_type type, + union ecore_llh_filter *p_filter, + u8 *p_filter_idx, u32 *p_ref_cnt) +{ + enum _ecore_status_t rc; + + /* Check if the same filter already exist */ + rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter, + p_filter_idx); + if (rc != ECORE_SUCCESS) + return rc; + + /* Find a new entry in case of a new filter */ + if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { + rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx); + if (rc != ECORE_SUCCESS) + return rc; + } + + /* No free entry was found */ + if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { + DP_NOTICE(p_dev, false, + "Failed to find an empty LLH filter to utilize [ppfid %d]\n", + ppfid); + return ECORE_NORESOURCES; + } + + return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type, + p_filter, p_ref_cnt); +} + +static enum _ecore_status_t +__ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid, + u8 filter_idx, u32 *p_ref_cnt) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + struct ecore_llh_filter_info *p_filters; + enum _ecore_status_t rc; + + rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove"); + if (rc != ECORE_SUCCESS) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + if (!p_filters[filter_idx].ref_cnt) { + DP_NOTICE(p_dev, false, + "LLH shadow: trying to remove a filter with ref_cnt=0\n"); + return ECORE_INVAL; + } + + *p_ref_cnt = --p_filters[filter_idx].ref_cnt; + if (!p_filters[filter_idx].ref_cnt) + OSAL_MEM_ZERO(&p_filters[filter_idx], + sizeof(p_filters[filter_idx])); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid, + union ecore_llh_filter *p_filter, + u8 *p_filter_idx, u32 *p_ref_cnt) +{ + enum _ecore_status_t rc; + + rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter, + p_filter_idx); + if (rc != ECORE_SUCCESS) + return rc; + + /* No matching filter was found */ + if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) { + DP_NOTICE(p_dev, false, + "Failed to find a filter in the LLH shadow\n"); + return ECORE_INVAL; + } + + return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx, + p_ref_cnt); +} + +static enum _ecore_status_t +ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + struct ecore_llh_filter_info *p_filters; + enum _ecore_status_t rc; + + rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all"); + if (rc != ECORE_SUCCESS) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + OSAL_MEM_ZERO(p_filters, + NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev, + u8 rel_ppfid, u8 *p_abs_ppfid) +{ + struct ecore_llh_info *p_llh_info = p_dev->p_llh_info; + u8 ppfids = p_llh_info->num_ppfid - 1; + + if (rel_ppfid >= p_llh_info->num_ppfid) { + DP_NOTICE(p_dev, false, + "rel_ppfid %d is not valid, available indices are 0..%hhd\n", + rel_ppfid, ppfids); + return ECORE_INVAL; + } + + *p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid]; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum ecore_eng eng; + u8 ppfid; + enum _ecore_status_t rc; + + rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { + DP_NOTICE(p_hwfn, false, + "Failed to get the engine affinity configuration\n"); + return rc; + } + + /* RoCE PF is bound to a single engine */ + if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { + eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0; + rc = ecore_llh_set_roce_affinity(p_dev, eng); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "Failed to set the RoCE engine affinity\n"); + return rc; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Set the engine affinity of RoCE packets as %d\n", + eng); + } + + /* Storage PF is bound to a single engine while L2 PF uses both */ + if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) || + ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) + eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0; + else /* L2_PERSONALITY */ + eng = ECORE_BOTH_ENG; + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "Failed to set the engine affinity of ppfid %d\n", + ppfid); + return rc; + } + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Set the engine affinity of non-RoCE packets as %d\n", + eng); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool avoid_eng_affin) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum _ecore_status_t rc; + + /* Backwards compatible mode: + * - RoCE packets - Use engine 0. + * - Non-RoCE packets - Use connection based classification for L2 PFs, + * and engine 0 otherwise. + */ + if (avoid_eng_affin) { + enum ecore_eng eng; + u8 ppfid; + + if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { + eng = ECORE_ENG0; + rc = ecore_llh_set_roce_affinity(p_dev, eng); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "Failed to set the RoCE engine affinity\n"); + return rc; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n", + eng); + } + + eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) || + ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0 + : ECORE_BOTH_ENG; + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "Failed to set the engine affinity of ppfid %d\n", + ppfid); + return rc; + } + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n", + eng); + + return ECORE_SUCCESS; + } + + return __ecore_llh_set_engine_affin(p_hwfn, p_ptt); +} + +static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool avoid_eng_affin) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 ppfid, abs_ppfid; + enum _ecore_status_t rc; + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + u32 addr; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + return rc; + + addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; + ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id); + } + + if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) && + !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) { + rc = ecore_llh_add_mac_filter(p_dev, 0, + p_hwfn->hw_info.hw_mac_addr); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_dev, false, + "Failed to add an LLH filter with the primary MAC\n"); + } + + if (ECORE_IS_CMT(p_dev)) { + rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin); + if (rc != ECORE_SUCCESS) + return rc; + } + + return ECORE_SUCCESS; +} + +u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev) +{ + return p_dev->p_llh_info->num_ppfid; +} + +enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev) +{ + return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0; +} + +/* TBD - should be removed when these definitions are available in reg_addr.h */ +#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 +#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 +#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 +#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 + +enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev, + u8 ppfid, enum ecore_eng eng) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + u32 addr, val, eng_sel; + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 abs_ppfid; + + if (p_ptt == OSAL_NULL) + return ECORE_AGAIN; + + if (!ECORE_IS_CMT(p_dev)) + goto out; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto out; + + switch (eng) { + case ECORE_ENG0: + eng_sel = 0; + break; + case ECORE_ENG1: + eng_sel = 1; + break; + case ECORE_BOTH_ENG: + eng_sel = 2; + break; + default: + DP_NOTICE(p_dev, false, + "Invalid affinity value for ppfid [%d]\n", eng); + rc = ECORE_INVAL; + goto out; + } + + addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; + val = ecore_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); + ecore_wr(p_hwfn, p_ptt, addr, val); + + /* The iWARP affinity is set as the affinity of ppfid 0 */ + if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn)) + p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0; +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev, + enum ecore_eng eng) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + u32 addr, val, eng_sel; + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 ppfid, abs_ppfid; + + if (p_ptt == OSAL_NULL) + return ECORE_AGAIN; + + if (!ECORE_IS_CMT(p_dev)) + goto out; + + switch (eng) { + case ECORE_ENG0: + eng_sel = 0; + break; + case ECORE_ENG1: + eng_sel = 1; + break; + case ECORE_BOTH_ENG: + eng_sel = 2; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, + 0xf /* QP bit 15 */); + break; + default: + DP_NOTICE(p_dev, false, + "Invalid affinity value for RoCE [%d]\n", eng); + rc = ECORE_INVAL; + goto out; + } + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto out; + + addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; + val = ecore_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); + ecore_wr(p_hwfn, p_ptt, addr, val); + } +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +struct ecore_llh_filter_details { + u64 value; + u32 mode; + u32 protocol_type; + u32 hdr_sel; + u32 enable; +}; + +static enum _ecore_status_t +ecore_llh_access_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx, + struct ecore_llh_filter_details *p_details, + bool b_write_access) +{ + u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid); + struct dmae_params params; + enum _ecore_status_t rc; + u32 addr; + + /* The NIG/LLH registers that are accessed in this function have only 16 + * rows which are exposed to a PF. I.e. only the 16 filters of its + * default ppfid + * Accessing filters of other ppfids requires pretending to other PFs, + * and thus the usage of the ecore_ppfid_rd/wr() functions. + */ + + /* Filter enable - should be done first when removing a filter */ + if (b_write_access && !p_details->enable) { + addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, + p_details->enable); + } + + /* Filter value */ + addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; + OSAL_MEMSET(¶ms, 0, sizeof(params)); + + if (b_write_access) { + SET_FIELD(params.flags, DMAE_PARAMS_DST_PF_VALID, 0x1); + params.dst_pf_id = pfid; + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&p_details->value, + addr, 2 /* size_in_dwords */, ¶ms); + } else { + SET_FIELD(params.flags, DMAE_PARAMS_SRC_PF_VALID, 0x1); + SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1); + params.src_pf_id = pfid; + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr, + (u64)(osal_uintptr_t)&p_details->value, + 2 /* size_in_dwords */, ¶ms); + } + + if (rc != ECORE_SUCCESS) + return rc; + + /* Filter mode */ + addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4; + if (b_write_access) + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode); + else + p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid, + addr); + + /* Filter protocol type */ + addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4; + if (b_write_access) + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, + p_details->protocol_type); + else + p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt, + abs_ppfid, addr); + + /* Filter header select */ + addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4; + if (b_write_access) + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, + p_details->hdr_sel); + else + p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid, + addr); + + /* Filter enable - should be done last when adding a filter */ + if (!b_write_access || p_details->enable) { + addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; + if (b_write_access) + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, + p_details->enable); + else + p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt, + abs_ppfid, addr); + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, + u32 high, u32 low) +{ + struct ecore_llh_filter_details filter_details; + + filter_details.enable = 1; + filter_details.value = ((u64)high << 32) | low; + filter_details.hdr_sel = + OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ? + 1 : /* inner/encapsulated header */ + 0; /* outer/tunnel header */ + filter_details.protocol_type = filter_prot_type; + filter_details.mode = filter_prot_type ? + 1 : /* protocol-based classification */ + 0; /* MAC-address based classification */ + + return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + &filter_details, + true /* write access */); +} + +static enum _ecore_status_t +ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) +{ + struct ecore_llh_filter_details filter_details; + + OSAL_MEMSET(&filter_details, 0, sizeof(filter_details)); + + return ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + &filter_details, + true /* write access */); +} + +enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid, + u8 mac_addr[ETH_ALEN]) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + union ecore_llh_filter filter; + u8 filter_idx, abs_ppfid; + u32 high, low, ref_cnt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (p_ptt == OSAL_NULL) + return ECORE_AGAIN; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) + goto out; + + OSAL_MEM_ZERO(&filter, sizeof(filter)); + OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN); + rc = ecore_llh_shadow_add_filter(p_dev, ppfid, + ECORE_LLH_FILTER_TYPE_MAC, + &filter, &filter_idx, &ref_cnt); + if (rc != ECORE_SUCCESS) + goto err; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto err; + + /* Configure the LLH only in case of a new the filter */ + if (ref_cnt == 1) { + high = mac_addr[1] | (mac_addr[0] << 8); + low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | + (mac_addr[2] << 24); + rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + 0, high, low); + if (rc != ECORE_SUCCESS) + goto err; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx, + ref_cnt); + + goto out; + +err: + DP_NOTICE(p_dev, false, + "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n", + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5], ppfid); +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +static enum _ecore_status_t +ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port, + char *str, osal_size_t str_len) +{ + switch (type) { + case ECORE_LLH_FILTER_ETHERTYPE: + OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x", + source_port_or_eth_type); + break; + case ECORE_LLH_FILTER_TCP_SRC_PORT: + OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x", + source_port_or_eth_type); + break; + case ECORE_LLH_FILTER_UDP_SRC_PORT: + OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x", + source_port_or_eth_type); + break; + case ECORE_LLH_FILTER_TCP_DEST_PORT: + OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port); + break; + case ECORE_LLH_FILTER_UDP_DEST_PORT: + OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port); + break; + case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x", + source_port_or_eth_type, dest_port); + break; + case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x", + source_port_or_eth_type, dest_port); + break; + default: + DP_NOTICE(p_dev, true, + "Non valid LLH protocol filter type %d\n", type); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port, + u32 *p_high, u32 *p_low) +{ + *p_high = 0; + *p_low = 0; + + switch (type) { + case ECORE_LLH_FILTER_ETHERTYPE: + *p_high = source_port_or_eth_type; + break; + case ECORE_LLH_FILTER_TCP_SRC_PORT: + case ECORE_LLH_FILTER_UDP_SRC_PORT: + *p_low = source_port_or_eth_type << 16; + break; + case ECORE_LLH_FILTER_TCP_DEST_PORT: + case ECORE_LLH_FILTER_UDP_DEST_PORT: + *p_low = dest_port; + break; + case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + *p_low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_dev, true, + "Non valid LLH protocol filter type %d\n", type); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid, type_bitmap; + char str[32]; + union ecore_llh_filter filter; + u32 high, low, ref_cnt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (p_ptt == OSAL_NULL) + return ECORE_AGAIN; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits)) + goto out; + + rc = ecore_llh_protocol_filter_stringify(p_dev, type, + source_port_or_eth_type, + dest_port, str, sizeof(str)); + if (rc != ECORE_SUCCESS) + goto err; + + OSAL_MEM_ZERO(&filter, sizeof(filter)); + filter.protocol.type = type; + filter.protocol.source_port_or_eth_type = source_port_or_eth_type; + filter.protocol.dest_port = dest_port; + rc = ecore_llh_shadow_add_filter(p_dev, ppfid, + ECORE_LLH_FILTER_TYPE_PROTOCOL, + &filter, &filter_idx, &ref_cnt); + if (rc != ECORE_SUCCESS) + goto err; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto err; + + /* Configure the LLH only in case of a new the filter */ + if (ref_cnt == 1) { + rc = ecore_llh_protocol_filter_to_hilo(p_dev, type, + source_port_or_eth_type, + dest_port, &high, &low); + if (rc != ECORE_SUCCESS) + goto err; + + type_bitmap = 0x1 << type; + rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + type_bitmap, high, low); + if (rc != ECORE_SUCCESS) + goto err; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + str, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: + DP_NOTICE(p_hwfn, false, + "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n", + str, ppfid); +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid, + u8 mac_addr[ETH_ALEN]) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + union ecore_llh_filter filter; + u8 filter_idx, abs_ppfid; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 ref_cnt; + + if (p_ptt == OSAL_NULL) + return; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) + goto out; + + OSAL_MEM_ZERO(&filter, sizeof(filter)); + OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN); + rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx, + &ref_cnt); + if (rc != ECORE_SUCCESS) + goto err; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto err; + + /* Remove from the LLH in case the filter is not in use */ + if (!ref_cnt) { + rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx); + if (rc != ECORE_SUCCESS) + goto err; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx, + ref_cnt); + + goto out; + +err: + DP_NOTICE(p_dev, false, + "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n", + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5], ppfid); +out: + ecore_ptt_release(p_hwfn, p_ptt); +} + +void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, + u16 dest_port) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid; + char str[32]; + union ecore_llh_filter filter; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 ref_cnt; + + if (p_ptt == OSAL_NULL) + return; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits)) + goto out; + + rc = ecore_llh_protocol_filter_stringify(p_dev, type, + source_port_or_eth_type, + dest_port, str, sizeof(str)); + if (rc != ECORE_SUCCESS) + goto err; + + OSAL_MEM_ZERO(&filter, sizeof(filter)); + filter.protocol.type = type; + filter.protocol.source_port_or_eth_type = source_port_or_eth_type; + filter.protocol.dest_port = dest_port; + rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx, + &ref_cnt); + if (rc != ECORE_SUCCESS) + goto err; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto err; + + /* Remove from the LLH in case the filter is not in use */ + if (!ref_cnt) { + rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx); + if (rc != ECORE_SUCCESS) + goto err; + } + + DP_VERBOSE(p_dev, ECORE_MSG_SP, + "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + str, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: + DP_NOTICE(p_dev, false, + "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n", + str, ppfid); +out: + ecore_ptt_release(p_hwfn, p_ptt); +} + +void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (p_ptt == OSAL_NULL) + return; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) && + !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) + goto out; + + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto out; + + rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid); + if (rc != ECORE_SUCCESS) + goto out; + + for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; + filter_idx++) { + rc = ecore_llh_remove_filter(p_hwfn, p_ptt, + abs_ppfid, filter_idx); + if (rc != ECORE_SUCCESS) + goto out; + } +out: + ecore_ptt_release(p_hwfn, p_ptt); +} + +void ecore_llh_clear_all_filters(struct ecore_dev *p_dev) +{ + u8 ppfid; + + if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) && + !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits)) + return; + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) + ecore_llh_clear_ppfid_filters(p_dev, ppfid); +} + +enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 addr, + u32 val) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 ppfid, abs_ppfid; + enum _ecore_status_t rc; + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val); + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn); + struct ecore_llh_filter_details filter_details; + u8 abs_ppfid, filter_idx; + u32 addr; + enum _ecore_status_t rc; + + if (!p_ptt) + return ECORE_AGAIN; + + rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid); + if (rc != ECORE_SUCCESS) + goto out; + + addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; + DP_NOTICE(p_hwfn, false, + "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n", + p_hwfn->rel_pf_id, ppfid, abs_ppfid, + ecore_rd(p_hwfn, p_ptt, addr)); + + for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; + filter_idx++) { + OSAL_MEMSET(&filter_details, 0, sizeof(filter_details)); + rc = ecore_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx, &filter_details, + false /* read access */); + if (rc != ECORE_SUCCESS) + goto out; + + DP_NOTICE(p_hwfn, false, + "filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n", + filter_idx, filter_details.enable, + (unsigned long)filter_details.value, + filter_details.mode, + filter_details.protocol_type, filter_details.hdr_sel); + } + + +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev) +{ + u8 ppfid; + enum _ecore_status_t rc; + + for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) { + rc = ecore_llh_dump_ppfid(p_dev, ppfid); + if (rc != ECORE_SUCCESS) + return rc; + } + + return ECORE_SUCCESS; +} + +/******************************* NIG LLH - End ********************************/ + +/* Configurable */ +#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to + * load the driver. The number was + * arbitrarily set. + */ + +/* Derived */ +#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS) + +static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum BAR_ID bar_id) +{ + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_hw_bar_size(p_hwfn, bar_id); + + val = ecore_rd(p_hwfn, p_ptt, bar_reg); + if (val) + return 1 << (val + 15); + + /* The above registers were updated in the past only in CMT mode. Since + * they were found to be useful MFW started updating them from 8.7.7.0. + * In older MFW versions they are set to 0 which means disabled. + */ + if (ECORE_IS_CMT(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); + val = BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } else { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); + val = 512 * 1024; + } + + return val; +} + +void ecore_init_dp(struct ecore_dev *p_dev, + u32 dp_module, u8 dp_level, void *dp_ctx) +{ + u32 i; + + p_dev->dp_level = dp_level; + p_dev->dp_module = dp_module; + p_dev->dp_ctx = dp_ctx; + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + p_hwfn->dp_level = dp_level; + p_hwfn->dp_module = dp_module; + p_hwfn->dp_ctx = dp_ctx; + } +} + +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) +{ + u8 i; + + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + p_hwfn->p_dev = p_dev; + p_hwfn->my_id = i; + p_hwfn->b_active = false; + +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) + goto handle_err; +#endif + OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); + } + + /* hwfn 0 is always active */ + p_dev->hwfns[0].b_active = true; + + /* set the default cache alignment to 128 (may be overridden later) */ + p_dev->cache_shift = 7; + return ECORE_SUCCESS; +#ifdef CONFIG_ECORE_LOCK_ALLOC +handle_err: + while (--i) { + struct ecore_hwfn *p_hwfn = OSAL_NULL; + + p_hwfn = &p_dev->hwfns[i]; + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); + } + return ECORE_NOMEM; +#endif +} + +static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); + OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); +} + +static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = OSAL_NULL; +} + +void ecore_resc_free(struct ecore_dev *p_dev) +{ + int i; + + if (IS_VF(p_dev)) { + for_each_hwfn(p_dev, i) + ecore_l2_free(&p_dev->hwfns[i]); + return; + } + + OSAL_FREE(p_dev, p_dev->fw_data); + + OSAL_FREE(p_dev, p_dev->reset_stats); + + ecore_llh_free(p_dev); + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + ecore_cxt_mngr_free(p_hwfn); + ecore_qm_info_free(p_hwfn); + ecore_spq_free(p_hwfn); + ecore_eq_free(p_hwfn); + ecore_consq_free(p_hwfn); + ecore_int_free(p_hwfn); + ecore_iov_free(p_hwfn); + ecore_l2_free(p_hwfn); + ecore_dmae_info_free(p_hwfn); + ecore_dcbx_info_free(p_hwfn); + ecore_dbg_user_data_free(p_hwfn); + ecore_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + /* @@@TBD Flush work-queue ? */ + + /* destroy doorbell recovery mechanism */ + ecore_db_recovery_teardown(p_hwfn); + } +} + +/******************** QM initialization *******************/ + +/* bitmaps for indicating active traffic classes. + * Special case for Arrowhead 4 port + */ +/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */ +#define ACTIVE_TCS_BMAP 0x9f +/* 0..3 actually used, OOO and high priority stuff all use 3 */ +#define ACTIVE_TCS_BMAP_4PORT_K2 0xf + +/* determines the physical queue flags for a given PF. */ +static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) +{ + u32 flags; + + /* common flags */ + flags = PQ_FLAGS_LB; + + /* feature flags */ + if (IS_ECORE_SRIOV(p_hwfn->p_dev)) + flags |= PQ_FLAGS_VFS; + if (IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_RLS; + + /* protocol flags */ + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; + break; + case ECORE_PCI_FCOE: + flags |= PQ_FLAGS_OFLD; + break; + case ECORE_PCI_ISCSI: + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + break; + case ECORE_PCI_ETH_ROCE: + flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; + break; + case ECORE_PCI_ETH_IWARP: + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; + break; + default: + DP_ERR(p_hwfn, "unknown personality %d\n", + p_hwfn->hw_info.personality); + return 0; + } + return flags; +} + +/* Getters for resource amounts necessary for qm initialization */ +u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn) +{ + return p_hwfn->hw_info.num_hw_tc; +} + +u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn) +{ + return IS_ECORE_SRIOV(p_hwfn->p_dev) ? + p_hwfn->p_dev->p_iov_info->total_vfs : 0; +} + +#define NUM_DEFAULT_RLS 1 + +u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn) +{ + u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); + + /* num RLs can't exceed resource amount of rls or vports or the + * dcqcn qps + */ + num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL), + RESC_NUM(p_hwfn, ECORE_VPORT)); + + /* make sure after we reserve the default and VF rls we'll have + * something left + */ + if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) { + DP_NOTICE(p_hwfn, false, + "no rate limiters left for PF rate limiting" + " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs); + return 0; + } + + /* subtract rls necessary for VFs and one default one for the PF */ + num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; + + return num_pf_rls; +} + +u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn) +{ + u32 pq_flags = ecore_get_pq_flags(p_hwfn); + + /* all pqs share the same vport (hence the 1 below), except for vfs + * and pf_rl pqs + */ + return (!!(PQ_FLAGS_RLS & pq_flags)) * + ecore_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_VFS & pq_flags)) * + ecore_init_qm_get_num_vfs(p_hwfn) + 1; +} + +/* calc amount of PQs according to the requested flags */ +u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn) +{ + u32 pq_flags = ecore_get_pq_flags(p_hwfn); + + return (!!(PQ_FLAGS_RLS & pq_flags)) * + ecore_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_MCOS & pq_flags)) * + ecore_init_qm_get_num_tcs(p_hwfn) + + (!!(PQ_FLAGS_LB & pq_flags)) + + (!!(PQ_FLAGS_OOO & pq_flags)) + + (!!(PQ_FLAGS_ACK & pq_flags)) + + (!!(PQ_FLAGS_OFLD & pq_flags)) + + (!!(PQ_FLAGS_VFS & pq_flags)) * + ecore_init_qm_get_num_vfs(p_hwfn); +} + +/* initialize the top level QM params */ +static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + bool four_port; + + /* pq and vport bases for this PF */ + qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); + + /* rate limiting and weighted fair queueing are always enabled */ + qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; + + /* TC config is different for AH 4 port */ + four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2; + + /* in AH 4 port we have fewer TCs per port */ + qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : + NUM_OF_PHYS_TCS; + + /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and + * 4 otherwise + */ + if (!qm_info->ooo_tc) + qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : + DCBX_TCP_OOO_TC; +} + +/* initialize qm vport params */ +static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + u8 i; + + /* all vports participate in weighted fair queueing */ + for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++) + qm_info->qm_vport_params[i].wfq = 1; +} + +/* initialize qm port params */ +static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn) +{ + /* Initialize qm port parameters */ + u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine; + struct ecore_dev *p_dev = p_hwfn->p_dev; + + /* indicate how ooo and high pri traffic is dealt with */ + active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? + ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; + + for (i = 0; i < num_ports; i++) { + struct init_qm_port_params *p_qm_port = + &p_hwfn->qm_info.qm_port_params[i]; + u16 pbf_max_cmd_lines; + + p_qm_port->active = 1; + p_qm_port->active_phys_tcs = active_phys_tcs; + pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(p_dev); + p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; + p_qm_port->num_btb_blocks = + NUM_OF_BTB_BLOCKS(p_dev) / num_ports; + } +} + +/* Reset the params which must be reset for qm init. QM init may be called as + * a result of flows other than driver load (e.g. dcbx renegotiation). Other + * params may be affected by the init but would simply recalculate to the same + * values. The allocations made for QM init, ports, vports, pqs and vfqs are not + * affected as these amounts stay the same. + */ +static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + qm_info->num_pqs = 0; + qm_info->num_vports = 0; + qm_info->num_pf_rls = 0; + qm_info->num_vf_pqs = 0; + qm_info->first_vf_pq = 0; + qm_info->first_mcos_pq = 0; + qm_info->first_rl_pq = 0; +} + +static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + qm_info->num_vports++; + + if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d," + " qm_init_get_num_vports() %d\n", + qm_info->num_vports, + ecore_init_qm_get_num_vports(p_hwfn)); +} + +/* initialize a single pq and manage qm_info resources accounting. + * The pq_init_flags param determines whether the PQ is rate limited + * (for VF or PF) + * and whether a new vport is allocated to the pq or not (i.e. vport will be + * shared) + */ + +/* flags for pq init */ +#define PQ_INIT_SHARE_VPORT (1 << 0) +#define PQ_INIT_PF_RL (1 << 1) +#define PQ_INIT_VF_RL (1 << 2) + +/* defines for pq init */ +#define PQ_INIT_DEFAULT_WRR_GROUP 1 +#define PQ_INIT_DEFAULT_TC 0 +#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) + +static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, + struct ecore_qm_info *qm_info, + u8 tc, u32 pq_init_flags) +{ + u16 pq_idx = qm_info->num_pqs, max_pq = + ecore_init_qm_get_num_pqs(p_hwfn); + + if (pq_idx > max_pq) + DP_ERR(p_hwfn, + "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); + + /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; + qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + + qm_info->num_vports; + qm_info->qm_pq_params[pq_idx].tc_id = tc; + qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; + qm_info->qm_pq_params[pq_idx].rl_valid = + (pq_init_flags & PQ_INIT_PF_RL || + pq_init_flags & PQ_INIT_VF_RL); + + /* The "rl_id" is set as the "vport_id" */ + qm_info->qm_pq_params[pq_idx].rl_id = + qm_info->qm_pq_params[pq_idx].vport_id; + + /* qm params accounting */ + qm_info->num_pqs++; + if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) + qm_info->num_vports++; + + if (pq_init_flags & PQ_INIT_PF_RL) + qm_info->num_pf_rls++; + + if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d," + " qm_init_get_num_vports() %d\n", + qm_info->num_vports, + ecore_init_qm_get_num_vports(p_hwfn)); + + if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn)) + DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d," + " qm_init_get_num_pf_rls() %d\n", + qm_info->num_pf_rls, + ecore_init_qm_get_num_pf_rls(p_hwfn)); +} + +/* get pq index according to PQ_FLAGS */ +static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn, + u32 pq_flags) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + /* Can't have multiple flags set here */ + if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, + sizeof(pq_flags)) > 1) + goto err; + + switch (pq_flags) { + case PQ_FLAGS_RLS: + return &qm_info->first_rl_pq; + case PQ_FLAGS_MCOS: + return &qm_info->first_mcos_pq; + case PQ_FLAGS_LB: + return &qm_info->pure_lb_pq; + case PQ_FLAGS_OOO: + return &qm_info->ooo_pq; + case PQ_FLAGS_ACK: + return &qm_info->pure_ack_pq; + case PQ_FLAGS_OFLD: + return &qm_info->offload_pq; + case PQ_FLAGS_VFS: + return &qm_info->first_vf_pq; + default: + goto err; + } + +err: + DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); + return OSAL_NULL; +} + +/* save pq index in qm info */ +static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn, + u32 pq_flags, u16 pq_val) +{ + u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; +} + +/* get tx pq index, with the PQ TX base already set (ready for context init) */ +u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags) +{ + u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + return *base_pq_idx + CM_TX_PQ_BASE; +} + +u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc) +{ + u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn); + + if (tc > max_tc) + DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); + + return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); +} + +u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) +{ + u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn); + + if (vf > max_vf) + DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); + + return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); +} + +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) +{ + u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); + + /* for rate limiters, it is okay to use the modulo behavior - no + * DP_ERR + */ + return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl); +} + +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) +{ + u16 start_pq, pq, qm_pq_idx; + + pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl); + start_pq = p_hwfn->qm_info.start_pq; + qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE; + + if (qm_pq_idx > p_hwfn->qm_info.num_pqs) { + DP_ERR(p_hwfn, + "qm_pq_idx %d must be smaller than %d\n", + qm_pq_idx, p_hwfn->qm_info.num_pqs); + } + + return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id; +} + +/* Functions for creating specific types of pqs */ +static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); + ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); +} + +static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); + ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); +} + +static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); + ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); +} + +static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); + ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); +} + +static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + u8 tc_idx; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); + for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++) + ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); +} + +static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn); + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); + + qm_info->num_vf_pqs = num_vfs; + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) + ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, + PQ_INIT_VF_RL); +} + +static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn) +{ + u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn); + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) + return; + + ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); + for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) + ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, + PQ_INIT_PF_RL); +} + +static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn) +{ + /* rate limited pqs, must come first (FW assumption) */ + ecore_init_qm_rl_pqs(p_hwfn); + + /* pqs for multi cos */ + ecore_init_qm_mcos_pqs(p_hwfn); + + /* pure loopback pq */ + ecore_init_qm_lb_pq(p_hwfn); + + /* out of order pq */ + ecore_init_qm_ooo_pq(p_hwfn); + + /* pure ack pq */ + ecore_init_qm_pure_ack_pq(p_hwfn); + + /* pq for offloaded protocol */ + ecore_init_qm_offload_pq(p_hwfn); + + /* done sharing vports */ + ecore_init_qm_advance_vport(p_hwfn); + + /* pqs for vfs */ + ecore_init_qm_vf_pqs(p_hwfn); +} + +/* compare values of getters against resources amounts */ +static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn) +{ + if (ecore_init_qm_get_num_vports(p_hwfn) > + RESC_NUM(p_hwfn, ECORE_VPORT)) { + DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); + return ECORE_INVAL; + } + + if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) { + DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/* + * Function for verbose printing of the qm initialization results + */ +static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + struct init_qm_vport_params *vport; + struct init_qm_port_params *port; + struct init_qm_pq_params *pq; + int i, tc; + + /* top level params */ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "qm init top level params: start_pq %d, start_vport %d," + " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", + qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, + qm_info->offload_pq, qm_info->pure_ack_pq); + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d," + " num_vports %d, max_phys_tcs_per_port %d\n", + qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, + qm_info->num_vf_pqs, qm_info->num_vports, + qm_info->max_phys_tcs_per_port); + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d," + " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", + qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, + qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, + qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn)); + + /* port table */ + for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) { + port = &qm_info->qm_port_params[i]; + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "port idx %d, active %d, active_phys_tcs %d," + " num_pbf_cmd_lines %d, num_btb_blocks %d," + " reserved %d\n", + i, port->active, port->active_phys_tcs, + port->num_pbf_cmd_lines, port->num_btb_blocks, + port->reserved); + } + + /* vport table */ + for (i = 0; i < qm_info->num_vports; i++) { + vport = &qm_info->qm_vport_params[i]; + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ", + qm_info->start_vport + i, vport->wfq); + for (tc = 0; tc < NUM_OF_TCS; tc++) + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", + vport->first_tx_pq_id[tc]); + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n"); + } + + /* pq table */ + for (i = 0; i < qm_info->num_pqs; i++) { + pq = &qm_info->qm_pq_params[i]; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d, rl_id %d\n", + qm_info->start_pq + i, pq->port_id, pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); + } +} + +static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn) +{ + /* reset params required for init run */ + ecore_init_qm_reset_params(p_hwfn); + + /* init QM top level params */ + ecore_init_qm_params(p_hwfn); + + /* init QM port params */ + ecore_init_qm_port_params(p_hwfn); + + /* init QM vport params */ + ecore_init_qm_vport_params(p_hwfn); + + /* init QM physical queue params */ + ecore_init_qm_pq_params(p_hwfn); + + /* display all that init */ + ecore_dp_init_qm_params(p_hwfn); +} + +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime array + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + bool b_rc; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* multiple flows can issue qm reconf. Need to lock */ + OSAL_SPIN_LOCK(&qm_lock); + + /* initialize ecore's qm data structure */ + ecore_init_qm_info(p_hwfn); + + /* stop PF's qm queues */ + b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + if (!b_rc) { + rc = ECORE_INVAL; + goto unlock; + } + + /* clear the QM_PF runtime phase leftovers from previous init */ + ecore_init_clear_rt_data(p_hwfn); + + /* prepare QM portion of runtime array */ + ecore_qm_init_pf(p_hwfn, p_ptt, false); + + /* activate init tool on runtime array */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + + /* start PF's qm queues */ + b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + if (!b_rc) + rc = ECORE_INVAL; + +unlock: + OSAL_SPIN_UNLOCK(&qm_lock); + + return rc; +} + +static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + enum _ecore_status_t rc; + + rc = ecore_init_qm_sanity(p_hwfn); + if (rc != ECORE_SUCCESS) + goto alloc_err; + + qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct init_qm_pq_params) * + ecore_init_qm_get_num_pqs(p_hwfn)); + if (!qm_info->qm_pq_params) + goto alloc_err; + + qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct init_qm_vport_params) * + ecore_init_qm_get_num_vports(p_hwfn)); + if (!qm_info->qm_vport_params) + goto alloc_err; + + qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct init_qm_port_params) * + p_hwfn->p_dev->num_ports_in_engine); + if (!qm_info->qm_port_params) + goto alloc_err; + + qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct ecore_wfq_data) * + ecore_init_qm_get_num_vports(p_hwfn)); + if (!qm_info->wfq_data) + goto alloc_err; + + return ECORE_SUCCESS; + +alloc_err: + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); + ecore_qm_info_free(p_hwfn); + return ECORE_NOMEM; +} +/******************** End QM initialization ***************/ + +enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + if (IS_VF(p_dev)) { + for_each_hwfn(p_dev, i) { + rc = ecore_l2_alloc(&p_dev->hwfns[i]); + if (rc != ECORE_SUCCESS) + return rc; + } + return rc; + } + + p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, + sizeof(*p_dev->fw_data)); + if (!p_dev->fw_data) + return ECORE_NOMEM; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + u32 n_eqes, num_cons; + + /* initialize the doorbell recovery mechanism */ + rc = ecore_db_recovery_setup(p_hwfn); + if (rc) + goto alloc_err; + + /* First allocate the context manager structure */ + rc = ecore_cxt_mngr_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* Set the HW cid/tid numbers (in the context manager) + * Must be done prior to any further computations. + */ + rc = ecore_cxt_set_pf_params(p_hwfn); + if (rc) + goto alloc_err; + + rc = ecore_alloc_qm_data(p_hwfn); + if (rc) + goto alloc_err; + + /* init qm info */ + ecore_init_qm_info(p_hwfn); + + /* Compute the ILT client partition */ + rc = ecore_cxt_cfg_ilt_compute(p_hwfn); + if (rc) + goto alloc_err; + + /* CID map / ILT shadow table / T2 + * The talbes sizes are determined by the computations above + */ + rc = ecore_cxt_tables_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SPQ, must follow ILT because initializes SPQ context */ + rc = ecore_spq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SP status block allocation */ + p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + + rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + goto alloc_err; + + rc = ecore_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* EQ */ + n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain); + if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) { + /* Calculate the EQ size + * --------------------- + * Each ICID may generate up to one event at a time i.e. + * the event must be handled/cleared before a new one + * can be generated. We calculate the sum of events per + * protocol and create an EQ deep enough to handle the + * worst case: + * - Core - according to SPQ. + * - RoCE - per QP there are a couple of ICIDs, one + * responder and one requester, each can + * generate an EQE => n_eqes_qp = 2 * n_qp. + * Each CQ can generate an EQE. There are 2 CQs + * per QP => n_eqes_cq = 2 * n_qp. + * Hence the RoCE total is 4 * n_qp or + * 2 * num_cons. + * - ENet - There can be up to two events per VF. One + * for VF-PF channel and another for VF FLR + * initial cleanup. The number of VFs is + * bounded by MAX_NUM_VFS_BB, and is much + * smaller than RoCE's so we avoid exact + * calculation. + */ + if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) { + num_cons = + ecore_cxt_get_proto_cid_count( + p_hwfn, + PROTOCOLID_ROCE, + OSAL_NULL); + num_cons *= 2; + } else { + num_cons = ecore_cxt_get_proto_cid_count( + p_hwfn, + PROTOCOLID_IWARP, + OSAL_NULL); + } + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; + } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { + num_cons = + ecore_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_ISCSI, + OSAL_NULL); + n_eqes += 2 * num_cons; + } + + if (n_eqes > 0xFFFF) { + DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements." + "The maximum of a u16 chain is 0x%x\n", + n_eqes, 0xFFFF); + goto alloc_no_mem; + } + + rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes); + if (rc) + goto alloc_err; + + rc = ecore_consq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + rc = ecore_l2_alloc(p_hwfn); + if (rc != ECORE_SUCCESS) + goto alloc_err; + + /* DMA info initialization */ + rc = ecore_dmae_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n"); + goto alloc_err; + } + + /* DCBX initialization */ + rc = ecore_dcbx_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate memory for dcbx structure\n"); + goto alloc_err; + } + + rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info); + if (rc) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate dbg user info structure\n"); + goto alloc_err; + } + + rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info); + if (rc) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate dbg user info structure\n"); + goto alloc_err; + } + } /* hwfn loop */ + + rc = ecore_llh_alloc(p_dev); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, true, + "Failed to allocate memory for the llh_info structure\n"); + goto alloc_err; + } + + p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, + sizeof(*p_dev->reset_stats)); + if (!p_dev->reset_stats) { + DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n"); + goto alloc_no_mem; + } + + return ECORE_SUCCESS; + +alloc_no_mem: + rc = ECORE_NOMEM; +alloc_err: + ecore_resc_free(p_dev); + return rc; +} + +void ecore_resc_setup(struct ecore_dev *p_dev) +{ + int i; + + if (IS_VF(p_dev)) { + for_each_hwfn(p_dev, i) + ecore_l2_setup(&p_dev->hwfns[i]); + return; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + ecore_cxt_mngr_setup(p_hwfn); + ecore_spq_setup(p_hwfn); + ecore_eq_setup(p_hwfn); + ecore_consq_setup(p_hwfn); + + /* Read shadow of current MFW mailbox */ + ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); + OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); + + ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + ecore_l2_setup(p_hwfn); + ecore_iov_setup(p_hwfn); + } +} + +#define FINAL_CLEANUP_POLL_CNT (100) +#define FINAL_CLEANUP_POLL_TIME (10) +enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 id, bool is_vf) +{ + u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; + enum _ecore_status_t rc = ECORE_TIMEOUT; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || + CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); + return ECORE_SUCCESS; + } +#endif + + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); + + if (is_vf) + id += 0x10; + + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; + +/* Make sure notification is not set before initiating final cleanup */ + + if (REG_RD(p_hwfn, addr)) { + DP_NOTICE(p_hwfn, false, + "Unexpected; Found final cleanup notification"); + DP_NOTICE(p_hwfn, false, + " before initiating final cleanup\n"); + REG_WR(p_hwfn, addr, 0); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Sending final cleanup for PFVF[%d] [Command %08x]\n", + id, command); + + ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); + + /* Poll until completion */ + while (!REG_RD(p_hwfn, addr) && count--) + OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); + + if (REG_RD(p_hwfn, addr)) + rc = ECORE_SUCCESS; + else + DP_NOTICE(p_hwfn, true, + "Failed to receive FW final cleanup notification\n"); + + /* Cleanup afterwards */ + REG_WR(p_hwfn, addr, 0); + + return rc; +} + +static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) +{ + int hw_mode = 0; + + if (ECORE_IS_BB(p_hwfn->p_dev)) { + hw_mode |= 1 << MODE_BB; + } else if (ECORE_IS_AH(p_hwfn->p_dev)) { + hw_mode |= 1 << MODE_K2; + } else { + DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n", + p_hwfn->p_dev->type); + return ECORE_INVAL; + } + + /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */ + switch (p_hwfn->p_dev->num_ports_in_engine) { + case 1: + hw_mode |= 1 << MODE_PORTS_PER_ENG_1; + break; + case 2: + hw_mode |= 1 << MODE_PORTS_PER_ENG_2; + break; + case 4: + hw_mode |= 1 << MODE_PORTS_PER_ENG_4; + break; + default: + DP_NOTICE(p_hwfn, true, + "num_ports_in_engine = %d not supported\n", + p_hwfn->p_dev->num_ports_in_engine); + return ECORE_INVAL; + } + + if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) + hw_mode |= 1 << MODE_MF_SD; + else + hw_mode |= 1 << MODE_MF_SI; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + hw_mode |= 1 << MODE_FPGA; + } else { + if (p_hwfn->p_dev->b_is_emul_full) + hw_mode |= 1 << MODE_EMUL_FULL; + else + hw_mode |= 1 << MODE_EMUL_REDUCED; + } + } else +#endif + hw_mode |= 1 << MODE_ASIC; + + if (ECORE_IS_CMT(p_hwfn->p_dev)) + hw_mode |= 1 << MODE_100G; + + p_hwfn->hw_info.hw_mode = hw_mode; + + DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), + "Configuring function for hw_mode: 0x%08x\n", + p_hwfn->hw_info.hw_mode); + + return ECORE_SUCCESS; +} + +#ifndef ASIC_ONLY +/* MFW-replacement initializations for emulation */ +static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 pl_hv, wr_mbs; + int i, pos; + u16 ctrl = 0; + + if (!CHIP_REV_IS_EMUL(p_dev)) { + DP_NOTICE(p_dev, false, + "ecore_hw_init_chip() shouldn't be called in a non-emulation environment\n"); + return ECORE_INVAL; + } + + pl_hv = ECORE_IS_BB(p_dev) ? 0x1 : 0x401; + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); + + if (ECORE_IS_AH(p_dev)) + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2, 0x3ffffff); + + /* Initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ + if (ECORE_IS_BB(p_dev)) + ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4); + + if (ECORE_IS_AH(p_dev)) { + /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, + p_dev->num_ports_in_engine >> 1); + + ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, + p_dev->num_ports_in_engine == 4 ? 0 : 3); + } + + /* Signal the PSWRQ block to start initializing internal memories */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); + for (i = 0; i < 100; i++) { + OSAL_UDELAY(50); + if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) + break; + } + if (i == 100) { + DP_NOTICE(p_hwfn, true, + "RBC done failed to complete in PSWRQ2\n"); + return ECORE_TIMEOUT; + } + + /* Indicate PSWRQ to initialize steering tag table with zeros */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT, 1); + for (i = 0; i < 100; i++) { + OSAL_UDELAY(50); + if (!ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT)) + break; + } + if (i == 100) { + DP_NOTICE(p_hwfn, true, + "Steering tag table initialization failed to complete in PSWRQ2\n"); + return ECORE_TIMEOUT; + } + + /* Clear a possible PSWRQ2 STT parity which might have been generated by + * a previous MSI-X read. + */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_PRTY_STS_WR_H_0, 0x8); + + /* Configure PSWRQ2_REG_WR_MBS0 according to the MaxPayloadSize field in + * the PCI configuration space. The value is common for all PFs, so it + * is okay to do it according to the first loading PF. + */ + pos = OSAL_PCI_FIND_CAPABILITY(p_dev, PCI_CAP_ID_EXP); + if (!pos) { + DP_NOTICE(p_dev, true, + "Failed to find the PCI Express Capability structure in the PCI config space\n"); + return ECORE_IO; + } + + OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_EXP_DEVCTL, &ctrl); + wr_mbs = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0, wr_mbs); + + /* Configure the PGLUE_B to discard mode */ + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_DISCARD_NBLOCK, 0x3f); + + return ECORE_SUCCESS; +} +#endif + +/* Init run time data for all PFs and their VFs on an engine. + * TBD - for VFs - Once we have parent PF info for each VF in + * shmem available as CAU requires knowledge of parent PF for each VF. + */ +static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) +{ + u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; + u32 igu_sb_id; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_igu_info *p_igu_info; + struct ecore_igu_block *p_block; + struct cau_sb_entry sb_entry; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + for (igu_sb_id = 0; + igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); + igu_sb_id++) { + p_block = &p_igu_info->entry[igu_sb_id]; + + if (!p_block->is_pf) + continue; + + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, + sb_entry); + } + } +} + +static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 val, wr_mbs, cache_line_size; + + val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); + switch (val) { + case 0: + wr_mbs = 128; + break; + case 1: + wr_mbs = 256; + break; + case 2: + wr_mbs = 512; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + val); + return; + } + + cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs); + switch (cache_line_size) { + case 32: + val = 0; + break; + case 64: + val = 1; + break; + case 128: + val = 2; + break; + case 256: + val = 3; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + cache_line_size); + } + + if (wr_mbs < OSAL_CACHE_LINE_SIZE) + DP_INFO(p_hwfn, + "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", + OSAL_CACHE_LINE_SIZE, wr_mbs); + + STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); + if (val > 0) { + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); + } +} + +static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int hw_mode) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 vf_id, max_num_vfs; + u16 num_pfs, pf_id; + u32 concrete_fid; + enum _ecore_status_t rc = ECORE_SUCCESS; + + ecore_init_cau_rt_data(p_dev); + + /* Program GTT windows */ + ecore_gtt_init(p_hwfn); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev) && IS_LEAD_HWFN(p_hwfn)) { + rc = ecore_hw_init_chip(p_dev, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; + } +#endif + + if (p_hwfn->mcp_info) { + if (p_hwfn->mcp_info->func_info.bandwidth_max) + qm_info->pf_rl_en = 1; + if (p_hwfn->mcp_info->func_info.bandwidth_min) + qm_info->pf_wfq_en = 1; + } + + ecore_qm_common_rt_init(p_hwfn, + p_dev->num_ports_in_engine, + qm_info->max_phys_tcs_per_port, + qm_info->pf_rl_en, qm_info->pf_wfq_en, + qm_info->vport_rl_en, qm_info->vport_wfq_en, + qm_info->qm_port_params, + OSAL_NULL /* global RLs are not configured */); + + ecore_cxt_hw_init_common(p_hwfn); + + ecore_init_cache_line_size(p_hwfn, p_ptt); + + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), + hw_mode); + if (rc != ECORE_SUCCESS) + return rc; + + /* @@TBD MichalK - should add VALIDATE_VFID to init tool... + * need to decide with which value, maybe runtime + */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); + + if (ECORE_IS_BB(p_dev)) { + /* Workaround clears ROCE search for all functions to prevent + * involving non initialized function in processing ROCE packet. + */ + num_pfs = (u16)NUM_OF_ENG_PFS(p_dev); + for (pf_id = 0; pf_id < num_pfs; pf_id++) { + ecore_fid_pretend(p_hwfn, p_ptt, pf_id); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + } + /* pretend to original PF */ + ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } + + /* Workaround for avoiding CCFC execution error when getting packets + * with CRC errors, and allowing instead the invoking of the FW error + * handler. + * This is not done inside the init tool since it currently can't + * perform a pretending to VFs. + */ + max_num_vfs = (u8)NUM_OF_VFS(p_dev); + for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { + concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); + ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); + ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); + ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); + ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); + } + /* pretend to original PF */ + ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + + return rc; +} + +#ifndef ASIC_ONLY +#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4) +#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5) + +#define PMEG_IF_BYTE_COUNT 8 + +static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u64 data, u8 reg_type, u8 port) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", + ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) | + (8 << PMEG_IF_BYTE_COUNT), + (reg_type << 25) | (addr << 8) | port, + (u32)((data >> 32) & 0xffffffff), + (u32)(data & 0xffffffff)); + + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB, + (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) & + 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT)); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB, + (reg_type << 25) | (addr << 8) | port); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, + (data >> 32) & 0xffffffff); +} + +#define XLPORT_MODE_REG (0x20a) +#define XLPORT_MAC_CONTROL (0x210) +#define XLPORT_FLOW_CONTROL_CONFIG (0x207) +#define XLPORT_ENABLE_REG (0x20b) + +#define XLMAC_CTRL (0x600) +#define XLMAC_MODE (0x601) +#define XLMAC_RX_MAX_SIZE (0x608) +#define XLMAC_TX_CTRL (0x604) +#define XLMAC_PAUSE_CTRL (0x60d) +#define XLMAC_PFC_CTRL (0x60e) + +static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 loopback = 0, port = p_hwfn->port_id * 2; + + /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1, + port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); + /* XLMAC: SOFT RESET */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port); + /* XLMAC: Port Speed >= 10Gbps */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port); + /* XLMAC: Max Size */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, + 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), + 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, + 0x30ffffc000ULL, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0, + port); /* XLMAC: TX_EN, RX_EN */ + /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, + 0x1003 | (loopback << 2), 0, port); + /* Enabled Parallel PFC interface */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port); + + /* XLPORT port enable */ + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port); +} + +static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 mac_base, mac_config_val = 0xa853; + u8 port = p_hwfn->port_id; + + ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2), + (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_SHIFT) | + (port << + CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_SHIFT) | + (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_SHIFT)); + + mac_base = NWM_REG_MAC0_K2 + (port << 2) * NWM_REG_MAC0_SIZE; + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2, + 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2, + 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2, + 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2, + 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2, + (0xA << + ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_SHIFT) | + (8 << + ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_SHIFT)); + + /* Strip the CRC field from the frame */ + mac_config_val &= ~ETH_MAC_REG_COMMAND_CONFIG_CRC_FWD_K2; + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2, + mac_config_val); +} + +static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 port = ECORE_IS_BB(p_hwfn->p_dev) ? p_hwfn->port_id * 2 + : p_hwfn->port_id; + + DP_INFO(p_hwfn->p_dev, "Emulation: Configuring Link [port %02x]\n", + port); + + if (ECORE_IS_BB(p_hwfn->p_dev)) + ecore_emul_link_init_bb(p_hwfn, p_ptt); + else + ecore_emul_link_init_ah(p_hwfn, p_ptt); + + return; +} + +static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 port) +{ + int port_offset = port ? 0x800 : 0; + u32 xmac_rxctrl = 0; + + /* Reset of XMAC */ + /* FIXME: move to common start */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ + OSAL_MSLEEP(1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ + + ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1); + + /* Set the number of ports on the Warp Core to 10G */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3); + + /* Soft reset of XMAC */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); + OSAL_MSLEEP(1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); + + /* FIXME: move to common end */ + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20); + + /* Set Max packet size: initialize XMAC block register for port 0 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710); + + /* CRC append for Tx packets: init XMAC block register for port 1 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800); + + /* Enable TX and RX: initialize XMAC block register for port 1 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset, + XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB); + xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, + XMAC_REG_RX_CTRL_BB + port_offset); + xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB; + ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl); +} +#endif + +static u32 ecore_hw_norm_region_conn(struct ecore_hwfn *p_hwfn) +{ + u32 norm_region_conn; + + /* The order of CIDs allocation is according to the order of + * 'enum protocol_type'. Therefore, the number of CIDs for the normal + * region is calculated based on the CORE CIDs, in case of non-ETH + * personality, and otherwise - based on the ETH CIDs. + */ + norm_region_conn = + ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, + OSAL_NULL) + + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + OSAL_NULL); + + return norm_region_conn; +} + +static enum _ecore_status_t +ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) +{ + u32 dpi_bit_shift, dpi_count, dpi_page_size; + u32 min_dpis; + u32 n_wids; + + /* Calculate DPI size + * ------------------ + * The PWM region contains Doorbell Pages. The first is reserverd for + * the kernel for, e.g, L2. The others are free to be used by non- + * trusted applications, typically from user space. Each page, called a + * doorbell page is sectioned into windows that allow doorbells to be + * issued in parallel by the kernel/application. The size of such a + * window (a.k.a. WID) is 1kB. + * Summary: + * 1kB WID x N WIDS = DPI page size + * DPI page size x N DPIs = PWM region size + * Notes: + * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE + * in order to ensure that two applications won't share the same page. + * It also must contain at least one WID per CPU to allow parallelism. + * It also must be a power of 2, since it is stored as a bit shift. + * + * The DPI page size is stored in a register as 'dpi_bit_shift' so that + * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096 + * containing 4 WIDs. + */ + n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus); + dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids); + dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & + ~(OSAL_PAGE_SIZE - 1); + dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096); + dpi_count = pwm_region_size / dpi_page_size; + + min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; + min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis); + + /* Update hwfn */ + p_hwfn->dpi_size = dpi_page_size; + p_hwfn->dpi_count = dpi_count; + + /* Update registers */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); + + if (dpi_count < min_dpis) + return ECORE_NORESOURCES; + + return ECORE_SUCCESS; +} + +enum ECORE_ROCE_EDPM_MODE { + ECORE_ROCE_EDPM_MODE_ENABLE = 0, + ECORE_ROCE_EDPM_MODE_FORCE_ON = 1, + ECORE_ROCE_EDPM_MODE_DISABLE = 2, +}; + +bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn) +{ + if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) + return false; + + return true; +} + +static enum _ecore_status_t +ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 norm_region_conn, min_addr_reg1; + u32 pwm_regsize, norm_regsize; + u32 db_bar_size, n_cpus; + u32 roce_edpm_mode; + u32 pf_dems_shift; + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 cond; + + db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); + if (ECORE_IS_CMT(p_hwfn->p_dev)) + db_bar_size /= 2; + + /* Calculate doorbell regions + * ----------------------------------- + * The doorbell BAR is made of two regions. The first is called normal + * region and the second is called PWM region. In the normal region + * each ICID has its own set of addresses so that writing to that + * specific address identifies the ICID. In the Process Window Mode + * region the ICID is given in the data written to the doorbell. The + * above per PF register denotes the offset in the doorbell BAR in which + * the PWM region begins. + * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per + * non-PWM connection. The calculation below computes the total non-PWM + * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is + * in units of 4,096 bytes. + */ + norm_region_conn = ecore_hw_norm_region_conn(p_hwfn); + norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * norm_region_conn, + OSAL_PAGE_SIZE); + min_addr_reg1 = norm_regsize / 4096; + pwm_regsize = db_bar_size - norm_regsize; + + /* Check that the normal and PWM sizes are valid */ + if (db_bar_size < norm_regsize) { + DP_ERR(p_hwfn->p_dev, + "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", + db_bar_size, norm_regsize); + return ECORE_NORESOURCES; + } + if (pwm_regsize < ECORE_MIN_PWM_REGION) { + DP_ERR(p_hwfn->p_dev, + "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", + pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, + norm_regsize); + return ECORE_NORESOURCES; + } + + /* Calculate number of DPIs */ + roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; + if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) || + ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) { + /* Either EDPM is mandatory, or we are attempting to allocate a + * WID per CPU. + */ + n_cpus = OSAL_NUM_CPUS(); + rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + } + + cond = ((rc != ECORE_SUCCESS) && + (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) || + (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE); + if (cond || p_hwfn->dcbx_no_edpm) { + /* Either EDPM is disabled from user configuration, or it is + * disabled via DCBx, or it is not mandatory and we failed to + * allocated a WID per CPU. + */ + n_cpus = 1; + rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + + /* If we entered this flow due to DCBX then the DPM register is + * already configured. + */ + } + + DP_INFO(p_hwfn, + "doorbell bar: normal_region_size=%d, pwm_region_size=%d", + norm_regsize, pwm_regsize); + DP_INFO(p_hwfn, + " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + p_hwfn->dpi_size, p_hwfn->dpi_count, + (!ecore_edpm_enabled(p_hwfn)) ? + "disabled" : "enabled"); + + /* Check return codes from above calls */ + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "Failed to allocate enough DPIs\n"); + return ECORE_NORESOURCES; + } + + /* Update hwfn */ + p_hwfn->dpi_start_offset = norm_regsize; + + /* Update registers */ + /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ + pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int hw_mode) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* In CMT the gate should be cleared by the 2nd hwfn */ + if (!ECORE_IS_CMT(p_dev) || !IS_LEAD_HWFN(p_hwfn)) + STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); + + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, + hw_mode); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_dev) && ECORE_IS_BB(p_dev)) + ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id); + + if (CHIP_REV_IS_EMUL(p_dev)) { + if (ECORE_IS_CMT(p_dev)) { + /* Activate OPTE in CMT */ + u32 val; + + val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); + val |= 0x10; + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); + ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); + ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, + 0x55555555); + } + + /* Set the TAGMAC default function on the port if needed. + * The ppfid should be set in the vector, except in BB which has + * a bug in the LLH where the ppfid is actually engine based. + */ + if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) { + u8 pf_id = p_hwfn->rel_pf_id; + + if (!ECORE_IS_BB(p_dev)) + pf_id /= p_dev->num_ports_in_engine; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); + } + + ecore_emul_link_init(p_hwfn, p_ptt); + } +#endif + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + int hw_mode, struct ecore_hw_init_params *p_params) +{ + u8 rel_pf_id = p_hwfn->rel_pf_id; + u32 prs_reg; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 ctrl; + int pos; + + if (p_hwfn->mcp_info) { + struct ecore_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + if (p_info->bandwidth_min) + p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; + + /* Update rate limit once we'll actually have a link */ + p_hwfn->qm_info.pf_rl = 100000; + } + ecore_cxt_hw_init_pf(p_hwfn, p_ptt); + + ecore_int_igu_init_rt(p_hwfn); + + /* Set VLAN in NIG if needed */ + if (hw_mode & (1 << MODE_MF_SD)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, + p_hwfn->hw_info.ovlan); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, + 1); + } + + /* Enable classification by MAC if needed */ + if (hw_mode & (1 << MODE_MF_SI)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Configuring TAGMAC_CLS_TYPE\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, + 1); + } + + /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */ + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, + (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, + (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); + + /* perform debug configuration when chip is out of reset */ + OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); + + /* Sanity check before the PF init sequence that uses DMAE */ + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + + /* PF Init sequence */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + ecore_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem); + + /* Pure runtime initializations - directly to the HW */ + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + + /* PCI relaxed ordering causes a decrease in the performance on some + * systems. Till a root cause is found, disable this attribute in the + * PCI config space. + */ + /* Not in use @DPDK + * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); + * if (!pos) { + * DP_NOTICE(p_hwfn, true, + * "Failed to find the PCIe Cap\n"); + * return ECORE_IO; + * } + * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl); + * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; + * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl); + */ + + rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; + + /* Use the leading hwfn since in CMT only NIG #0 is operational */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt, + p_params->avoid_eng_affin); + if (rc != ECORE_SUCCESS) + return rc; + } + + if (p_params->b_hw_start) { + /* enable interrupts */ + rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode); + if (rc != ECORE_SUCCESS) + return rc; + + /* send function start command */ + rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn, + p_params->allow_npar_tx_switch); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Function start ramrod failed\n"); + return rc; + } + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + + if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, + (1 << 2)); + ecore_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH registers after start PFn\n"); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_UDP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, + PRS_REG_SEARCH_TCP_FIRST_FRAG); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", + prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + } + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_enable) +{ + u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; + + /* Configure the PF's internal FID_enable for master transactions */ + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + + /* Wait until value is set - try for 1 second every 50us */ + for (delay_idx = 0; delay_idx < 20000; delay_idx++) { + val = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + if (val == set_val) + break; + + OSAL_UDELAY(50); + } + + if (val != set_val) { + DP_NOTICE(p_hwfn, true, + "PFID_ENABLE_MASTER wasn't changed after a second\n"); + return ECORE_UNKNOWN_ERROR; + } + + return ECORE_SUCCESS; +} + +static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_main_ptt) +{ + /* Read shadow of current MFW mailbox */ + ecore_mcp_read_mb(p_hwfn, p_main_ptt); + OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); +} + +static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + 1 << p_hwfn->abs_pf_id); +} + +static enum _ecore_status_t +ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, + struct ecore_load_req_params *p_load_req, + struct ecore_drv_load_params *p_drv_load) +{ + /* Make sure that if ecore-client didn't provide inputs, all the + * expected defaults are indeed zero. + */ + OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0); + OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0); + OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0); + + OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); + + if (p_drv_load == OSAL_NULL) + goto out; + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + ECORE_DRV_ROLE_KDUMP : + ECORE_DRV_ROLE_OS; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; + + /* Old MFW versions don't support timeout values other than default and + * none, so these values are replaced according to the fall-back action. + */ + + if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || + p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || + (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { + p_load_req->timeout_val = p_drv_load->mfw_timeout_val; + goto out; + } + + switch (p_drv_load->mfw_timeout_fallback) { + case ECORE_TO_FALLBACK_TO_NONE: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; + break; + case ECORE_TO_FALLBACK_TO_DEFAULT: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + break; + case ECORE_TO_FALLBACK_FAIL_LOAD: + DP_NOTICE(p_hwfn, false, + "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", + p_drv_load->mfw_timeout_val, + ECORE_LOAD_REQ_LOCK_TO_DEFAULT, + ECORE_LOAD_REQ_LOCK_TO_NONE); + return ECORE_ABORTED; + } + + DP_INFO(p_hwfn, + "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", + p_drv_load->mfw_timeout_val, + (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? + "default" : "none", + p_load_req->timeout_val); +out: + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, + struct ecore_hw_init_params *p_params) +{ + if (p_params->p_tunn) { + ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); + ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); + } + + p_hwfn->b_int_enabled = 1; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, + struct ecore_hw_init_params *p_params) +{ + struct ecore_load_req_params load_req_params; + u32 load_code, resp, param, drv_mb_param; + bool b_default_mtu = true; + struct ecore_hwfn *p_hwfn; + const u32 *fw_overlays; + u32 fw_overlays_len; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 ether_type; + int i; + + if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) { + DP_NOTICE(p_dev, false, + "MSI mode is not supported for CMT devices\n"); + return ECORE_INVAL; + } + + if (IS_PF(p_dev)) { + rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data); + if (rc != ECORE_SUCCESS) + return rc; + } + + for_each_hwfn(p_dev, i) { + p_hwfn = &p_dev->hwfns[i]; + + /* If management didn't provide a default, set one of our own */ + if (!p_hwfn->hw_info.mtu) { + p_hwfn->hw_info.mtu = 1500; + b_default_mtu = false; + } + + if (IS_VF(p_dev)) { + ecore_vf_start(p_hwfn, p_params); + continue; + } + + rc = ecore_calc_hw_mode(p_hwfn); + if (rc != ECORE_SUCCESS) + return rc; + + if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, + &p_dev->mf_bits) || + OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, + &p_dev->mf_bits))) { + if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, + &p_dev->mf_bits)) + ether_type = ETHER_TYPE_VLAN; + else + ether_type = ETHER_TYPE_QINQ; + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, + ether_type); + } + + ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms); + + rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, + p_params->p_drv_load_params); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, + &load_req_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed sending a LOAD_REQ command\n"); + return rc; + } + + load_code = load_req_params.load_code; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load request was sent. Load code: 0x%x\n", + load_code); + + ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); + + /* CQ75580: + * When coming back from hiberbate state, the registers from + * which shadow is read initially are not initialized. It turns + * out that these registers get initialized during the call to + * ecore_mcp_load_req request. So we need to reread them here + * to get the proper shadow register value. + * Note: This is a workaround for the missing MFW + * initialization. It may be removed once the implementation + * is done. + */ + ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); + + /* Only relevant for recovery: + * Clear the indication after the LOAD_REQ command is responded + * by the MFW. + */ + p_dev->recov_in_prog = false; + + p_hwfn->first_on_engine = (load_code == + FW_MSG_CODE_DRV_LOAD_ENGINE); + + if (!qm_lock_ref_cnt) { +#ifdef CONFIG_ECORE_LOCK_ALLOC + rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); + if (rc) { + DP_ERR(p_hwfn, "qm_lock allocation failed\n"); + goto qm_lock_fail; + } +#endif + OSAL_SPIN_LOCK_INIT(&qm_lock); + } + ++qm_lock_ref_cnt; + + /* Clean up chip from previous driver if such remains exist. + * This is not needed when the PF is the first one on the + * engine, since afterwards we are going to init the FW. + */ + if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { + rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->rel_pf_id, false); + if (rc != ECORE_SUCCESS) { + ecore_hw_err_notify(p_hwfn, + ECORE_HW_ERR_RAMROD_FAIL); + goto load_err; + } + } + + /* Log and clear previous pglue_b errors if such exist */ + ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); + + /* Enable the PF's internal FID_enable in the PXP */ + rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + true); + if (rc != ECORE_SUCCESS) + goto load_err; + + /* Clear the pglue_b was_error indication. + * It must be done after the BME and the internal FID_enable for + * the PF are set, since VDMs may cause the indication to be set + * again. + */ + ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + + fw_overlays = p_dev->fw_data->fw_overlays; + fw_overlays_len = p_dev->fw_data->fw_overlays_len; + p_hwfn->fw_overlay_mem = + ecore_fw_overlay_mem_alloc(p_hwfn, fw_overlays, + fw_overlays_len); + if (!p_hwfn->fw_overlay_mem) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate fw overlay memory\n"); + goto load_err; + } + + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc != ECORE_SUCCESS) + break; + /* Fall into */ + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc != ECORE_SUCCESS) + break; + /* Fall into */ + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode, + p_params); + break; + default: + DP_NOTICE(p_hwfn, false, + "Unexpected load code [0x%08x]", load_code); + rc = ECORE_NOTIMPL; + break; + } + + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "init phase failed for loadcode 0x%x (rc %d)\n", + load_code, rc); + goto load_err; + } + + rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Sending load done failed, rc = %d\n", rc); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Sending load done was failed due to memory allocation failure\n"); + goto load_err; + } + return rc; + } + + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "sending phony dcbx set command to trigger DCBx attention handling\n"); + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, + ¶m); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send DCBX attention request\n"); + return rc; + } + + p_hwfn->hw_init_done = true; + } + + if (IS_PF(p_dev)) { + /* Get pre-negotiated values for stag, bandwidth etc. */ + p_hwfn = ECORE_LEADING_HWFN(p_dev); + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET, + &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Failed to send GET_OEM_UPDATES attention request\n"); + } + + if (IS_PF(p_dev)) { + /* Get pre-negotiated values for stag, bandwidth etc. */ + p_hwfn = ECORE_LEADING_HWFN(p_dev); + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET, + &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Failed to send GET_OEM_UPDATES attention request\n"); + } + + if (IS_PF(p_dev)) { + p_hwfn = ECORE_LEADING_HWFN(p_dev); + drv_mb_param = STORM_FW_VERSION; + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, "Failed to update firmware version\n"); + + if (!b_default_mtu) { + rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.mtu); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, "Failed to update default mtu\n"); + } + + rc = ecore_mcp_ov_update_driver_state(p_hwfn, + p_hwfn->p_main_ptt, + ECORE_OV_DRIVER_STATE_DISABLED); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, "Failed to update driver state\n"); + + rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, + ECORE_OV_ESWITCH_NONE); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); + } + + return rc; + +load_err: + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +qm_lock_fail: +#endif + /* The MFW load lock should be released regardless of success or failure + * of initialization. + * TODO: replace this with an attempt to send cancel_load. + */ + ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + return rc; +} + +#define ECORE_HW_STOP_RETRY_LIMIT (10) +static void ecore_hw_timers_stop(struct ecore_dev *p_dev, + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + int i; + + /* close timers */ + ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog; + i++) { + if ((!ecore_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + /* Dependent on number of connection/tasks, possibly + * 1ms sleep is required between polls + */ + OSAL_MSLEEP(1); + } + + if (i < ECORE_HW_STOP_RETRY_LIMIT) + return; + + DP_NOTICE(p_hwfn, false, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); +} + +void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) +{ + int j; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + + ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); + } +} + +static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u32 expected_val) +{ + u32 val = ecore_rd(p_hwfn, p_ptt, addr); + + if (val != expected_val) { + DP_NOTICE(p_hwfn, true, + "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n", + addr, val, expected_val); + return ECORE_UNKNOWN_ERROR; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) +{ + struct ecore_hwfn *p_hwfn; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc, rc2 = ECORE_SUCCESS; + int j; + + for_each_hwfn(p_dev, j) { + p_hwfn = &p_dev->hwfns[j]; + p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); + + if (IS_VF(p_dev)) { + ecore_vf_pf_int_cleanup(p_hwfn); + rc = ecore_vf_pf_reset(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "ecore_vf_pf_reset failed. rc = %d.\n", + rc); + rc2 = ECORE_UNKNOWN_ERROR; + } + continue; + } + + /* mark the hw as uninitialized... */ + p_hwfn->hw_init_done = false; + + /* Send unload command to MCP */ + if (!p_dev->recov_in_prog) { + rc = ecore_mcp_unload_req(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = ECORE_UNKNOWN_ERROR; + } + } + + OSAL_DPC_SYNC(p_hwfn); + + /* After this point no MFW attentions are expected, e.g. prevent + * race between pf stop and dcbx pf update. + */ + + rc = ecore_sp_pf_stop(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", + rc); + rc2 = ECORE_UNKNOWN_ERROR; + } + + OSAL_DPC_SYNC(p_hwfn); + + /* After this point we don't expect the FW to send us async + * events + */ + + /* perform debug action after PF stop was sent */ + OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id); + + /* close NIG to BRB gate */ + ecore_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + /* close parser */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + /* @@@TBD - clean transmission queues (5.b) */ + /* @@@TBD - clean BTB (5.c) */ + + ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); + + /* @@@TBD - verify DMAE requests are done (8) */ + + /* Disable Attention Generation */ + ecore_int_igu_disable_int(p_hwfn, p_ptt); + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); + rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "Failed to return IGU CAM to default\n"); + rc2 = ECORE_UNKNOWN_ERROR; + } + + /* Need to wait 1ms to guarantee SBs are cleared */ + OSAL_MSLEEP(1); + + if (IS_LEAD_HWFN(p_hwfn) && + OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) && + !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) + ecore_llh_remove_mac_filter(p_dev, 0, + p_hwfn->hw_info.hw_mac_addr); + + if (!p_dev->recov_in_prog) { + ecore_verify_reg_val(p_hwfn, p_ptt, + QM_REG_USG_CNT_PF_TX, 0); + ecore_verify_reg_val(p_hwfn, p_ptt, + QM_REG_USG_CNT_PF_OTHER, 0); + /* @@@TBD - assert on incorrect xCFC values (10.b) */ + } + + /* Disable PF in HW blocks */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); + ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +#endif + + if (!p_dev->recov_in_prog) { + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + } + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = ECORE_UNKNOWN_ERROR; + } + } + } /* hwfn loop */ + + if (IS_PF(p_dev) && !p_dev->recov_in_prog) { + p_hwfn = ECORE_LEADING_HWFN(p_dev); + p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt; + + /* Clear the PF's internal FID_enable in the PXP. + * In CMT this should only be done for first hw-function, and + * only after all transactions have stopped for all active + * hw-functions. + */ + rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + false); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + rc2 = ECORE_UNKNOWN_ERROR; + } + } + + return rc2; +} + +enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev) +{ + int j; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + struct ecore_ptt *p_ptt; + + if (IS_VF(p_dev)) { + ecore_vf_pf_int_cleanup(p_hwfn); + continue; + } + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, + "Shutting down the fastpath\n"); + + ecore_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + /* @@@TBD - clean transmission queues (5.b) */ + /* @@@TBD - clean BTB (5.c) */ + + /* @@@TBD - verify DMAE requests are done (8) */ + + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); + /* Need to wait 1ms to guarantee SBs are cleared */ + OSAL_MSLEEP(1); + ecore_ptt_release(p_hwfn, p_ptt); + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + /* If roce info is allocated it means roce is initialized and should + * be enabled in searcher. + */ + if (p_hwfn->p_rdma_info) { + if (p_hwfn->b_rdma_enabled_in_prs) + ecore_wr(p_hwfn, p_ptt, + p_hwfn->rdma_prs_search_reg, 0x1); + ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1); + } + + /* Re-open incoming traffic */ + ecore_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} + +/* Free hwfn memory and resources acquired in hw_hwfn_prepare */ +static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) +{ + ecore_ptt_pool_free(p_hwfn); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); +} + +/* Setup bar access */ +static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) +{ + /* clear indirect access */ + if (ECORE_IS_AH(p_hwfn->p_dev)) { + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); + } else { + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); + } + + /* Clean previous pglue_b errors if such exist */ + ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + + /* enable internal target-read */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); +} + +static void get_function_id(struct ecore_hwfn *p_hwfn) +{ + /* ME Register */ + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); + + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); + + /* Bits 16-19 from the ME registers are the pf_num */ + p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; + p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID); + p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); +} + +static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) +{ + u32 *feat_num = p_hwfn->hw_info.feat_num; + struct ecore_sb_cnt_info sb_cnt; + u32 non_l2_sbs = 0; + + OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt)); + ecore_int_get_num_sbs(p_hwfn, &sb_cnt); + + /* L2 Queues require each: 1 status block. 1 L2 queue */ + if (ECORE_IS_L2_PERSONALITY(p_hwfn)) { + /* Start by allocating VF queues, then PF's */ + feat_num[ECORE_VF_L2_QUE] = + OSAL_MIN_T(u32, + RESC_NUM(p_hwfn, ECORE_L2_QUEUE), + sb_cnt.iov_cnt); + feat_num[ECORE_PF_L2_QUE] = + OSAL_MIN_T(u32, + sb_cnt.cnt - non_l2_sbs, + RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - + FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE)); + } + + if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) || + ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) { + u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ? + &feat_num[ECORE_FCOE_CQ] : + &feat_num[ECORE_ISCSI_CQ]; + u32 limit = sb_cnt.cnt; + + /* The number of queues should not exceed the number of FP SBs. + * In storage target, the queues are divided into pairs of a CQ + * and a CmdQ, and each pair uses a single SB. The limit in + * this case should allow a max ratio of 2:1 instead of 1:1. + */ + if (p_hwfn->p_dev->b_is_target) + limit *= 2; + *p_storage_feat = OSAL_MIN_T(u32, limit, + RESC_NUM(p_hwfn, ECORE_CMDQS_CQS)); + + /* @DPDK */ + /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init + * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2". + */ + *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat, + (NUM_OF_GLOBAL_QUEUES / 2)); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n", + (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), + (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), + (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), + (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ), + (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ), + (int)sb_cnt.cnt); +} + +const char *ecore_hw_get_resc_name(enum ecore_resources res_id) +{ + switch (res_id) { + case ECORE_L2_QUEUE: + return "L2_QUEUE"; + case ECORE_VPORT: + return "VPORT"; + case ECORE_RSS_ENG: + return "RSS_ENG"; + case ECORE_PQ: + return "PQ"; + case ECORE_RL: + return "RL"; + case ECORE_MAC: + return "MAC"; + case ECORE_VLAN: + return "VLAN"; + case ECORE_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; + case ECORE_ILT: + return "ILT"; + case ECORE_LL2_QUEUE: + return "LL2_QUEUE"; + case ECORE_CMDQS_CQS: + return "CMDQS_CQS"; + case ECORE_RDMA_STATS_QUEUE: + return "RDMA_STATS_QUEUE"; + case ECORE_BDQ: + return "BDQ"; + case ECORE_SB: + return "SB"; + default: + return "UNKNOWN_RESOURCE"; + } +} + +static enum _ecore_status_t +__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_resources res_id, + u32 resc_max_val, + u32 *p_mcp_resp) +{ + enum _ecore_status_t rc; + + rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, + resc_max_val, p_mcp_resp); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "MFW response failure for a max value setting of resource %d [%s]\n", + res_id, ecore_hw_get_resc_name(res_id)); + return rc; + } + + if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) + DP_INFO(p_hwfn, + "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", + res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp); + + return ECORE_SUCCESS; +} + +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB + +static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = { + {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, + {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, + {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, + {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, }, + {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, + {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, + {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, + {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, + {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, + {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, + {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, + {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, + {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, +}; + +u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type) +{ + enum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB : CHIP_K2; + + if (type >= ECORE_NUM_HSI_DEFS) { + DP_ERR(p_dev, "Unexpected HSI definition type [%d]\n", type); + return 0; + } + + return ecore_hsi_def_val[type][chip_id]; +} + +static enum _ecore_status_t +ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resc_max_val, mcp_resp; + u8 res_id; + enum _ecore_status_t rc; + + for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { + /* @DPDK */ + switch (res_id) { + case ECORE_LL2_QUEUE: + case ECORE_RDMA_CNQ_RAM: + case ECORE_RDMA_STATS_QUEUE: + case ECORE_BDQ: + resc_max_val = 0; + break; + default: + continue; + } + + rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, + resc_max_val, &mcp_resp); + if (rc != ECORE_SUCCESS) + return rc; + + /* There's no point to continue to the next resource if the + * command is not supported by the MFW. + * We do continue if the command is supported but the resource + * is unknown to the MFW. Such a resource will be later + * configured with the default allocation values. + */ + if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) + return ECORE_NOTIMPL; + } + + return ECORE_SUCCESS; +} + +static +enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn, + enum ecore_resources res_id, + u32 *p_resc_num, u32 *p_resc_start) +{ + u8 num_funcs = p_hwfn->num_funcs_on_engine; + struct ecore_dev *p_dev = p_hwfn->p_dev; + + switch (res_id) { + case ECORE_L2_QUEUE: + *p_resc_num = NUM_OF_L2_QUEUES(p_dev) / num_funcs; + break; + case ECORE_VPORT: + *p_resc_num = NUM_OF_VPORTS(p_dev) / num_funcs; + break; + case ECORE_RSS_ENG: + *p_resc_num = NUM_OF_RSS_ENGINES(p_dev) / num_funcs; + break; + case ECORE_PQ: + *p_resc_num = NUM_OF_QM_TX_QUEUES(p_dev) / num_funcs; + *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ + break; + case ECORE_RL: + *p_resc_num = NUM_OF_QM_GLOBAL_RLS(p_dev) / num_funcs; + break; + case ECORE_MAC: + case ECORE_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; + break; + case ECORE_ILT: + *p_resc_num = NUM_OF_PXP_ILT_RECORDS(p_dev) / num_funcs; + break; + case ECORE_LL2_QUEUE: + *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; + break; + case ECORE_RDMA_CNQ_RAM: + case ECORE_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + /* @DPDK */ + *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs; + break; + case ECORE_RDMA_STATS_QUEUE: + *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(p_dev) / num_funcs; + break; + case ECORE_BDQ: + /* @DPDK */ + *p_resc_num = 0; + break; + default: + break; + } + + + switch (res_id) { + case ECORE_BDQ: + if (!*p_resc_num) + *p_resc_start = 0; + break; + case ECORE_SB: + /* Since we want its value to reflect whether MFW supports + * the new scheme, have a default of 0. + */ + *p_resc_num = 0; + break; + default: + *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; + break; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, + bool drv_resc_alloc) +{ + u32 dflt_resc_num = 0, dflt_resc_start = 0; + u32 mcp_resp, *p_resc_num, *p_resc_start; + enum _ecore_status_t rc; + + p_resc_num = &RESC_NUM(p_hwfn, res_id); + p_resc_start = &RESC_START(p_hwfn, res_id); + + rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, + &dflt_resc_start); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "Failed to get default amount for resource %d [%s]\n", + res_id, ecore_hw_get_resc_name(res_id)); + return rc; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + goto out; + } +#endif + + rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, + &mcp_resp, p_resc_num, p_resc_start); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "MFW response failure for an allocation request for" + " resource %d [%s]\n", + res_id, ecore_hw_get_resc_name(res_id)); + return rc; + } + + /* Default driver values are applied in the following cases: + * - The resource allocation MB command is not supported by the MFW + * - There is an internal error in the MFW while processing the request + * - The resource ID is unknown to the MFW + */ + if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { + DP_INFO(p_hwfn, + "Failed to receive allocation info for resource %d [%s]." + " mcp_resp = 0x%x. Applying default values" + " [%d,%d].\n", + res_id, ecore_hw_get_resc_name(res_id), mcp_resp, + dflt_resc_num, dflt_resc_start); + + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + goto out; + } + + if ((*p_resc_num != dflt_resc_num || + *p_resc_start != dflt_resc_start) && + res_id != ECORE_SB) { + DP_INFO(p_hwfn, + "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n", + res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, + *p_resc_start, dflt_resc_num, dflt_resc_start, + drv_resc_alloc ? " - Applying default values" : ""); + if (drv_resc_alloc) { + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + } + } +out: + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, + bool drv_resc_alloc) +{ + enum _ecore_status_t rc; + u8 res_id; + + for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) { + rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc); + if (rc != ECORE_SUCCESS) + return rc; + } + + return ECORE_SUCCESS; +} + +#define ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS 0xaa +#define ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS 0x55 +#define ECORE_NONUSED_PPFID_MASK_AH_4P 0xf0 + +static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn), new_bitmap; + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum _ecore_status_t rc; + + rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) + return rc; + else if (rc == ECORE_NOTIMPL) + p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx; + + /* 4-ports mode has limitations that should be enforced: + * - BB: the MFW can access only PPFIDs which their corresponding PFIDs + * belong to this certain port. + * - AH: only 4 PPFIDs per port are available. + */ + if (ecore_device_num_ports(p_dev) == 4) { + u8 mask; + + if (ECORE_IS_BB(p_dev)) + mask = MFW_PORT(p_hwfn) > 1 ? + ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS : + ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS; + else + mask = ECORE_NONUSED_PPFID_MASK_AH_4P; + + if (p_dev->ppfid_bitmap & mask) { + new_bitmap = p_dev->ppfid_bitmap & ~mask; + DP_INFO(p_hwfn, + "Fix the PPFID bitmap for 4-ports mode: 0x%hhx -> 0x%hhx\n", + p_dev->ppfid_bitmap, new_bitmap); + p_dev->ppfid_bitmap = new_bitmap; + } + } + + /* The native PPFID is expected to be part of the allocated bitmap */ + if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { + new_bitmap = 0x1 << native_ppfid_idx; + DP_INFO(p_hwfn, + "Fix the PPFID bitmap to inculde the native PPFID: %hhd -> 0x%hhx\n", + p_dev->ppfid_bitmap, new_bitmap); + p_dev->ppfid_bitmap = new_bitmap; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool drv_resc_alloc) +{ + struct ecore_resc_unlock_params resc_unlock_params; + struct ecore_resc_lock_params resc_lock_params; + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 max_ilt_lines; + u8 res_id; + enum _ecore_status_t rc; +#ifndef ASIC_ONLY + u32 *resc_start = p_hwfn->hw_info.resc_start; + u32 *resc_num = p_hwfn->hw_info.resc_num; + /* For AH, an equal share of the ILT lines between the maximal number of + * PFs is not enough for RoCE. This would be solved by the future + * resource allocation scheme, but isn't currently present for + * FPGA/emulation. For now we keep a number that is sufficient for RoCE + * to work - the BB number of ILT lines divided by its max PFs number. + */ + u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB; +#endif + + /* Setting the max values of the soft resources and the following + * resources allocation queries should be atomic. Since several PFs can + * run in parallel - a resource lock is needed. + * If either the resource lock or resource set value commands are not + * supported - skip the max values setting, release the lock if + * needed, and proceed to the queries. Other failures, including a + * failure to acquire the lock, will cause this function to fail. + * Old drivers that don't acquire the lock can run in parallel, and + * their allocation values won't be affected by the updated max values. + */ + ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, + ECORE_RESC_LOCK_RESC_ALLOC, false); + + rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); + if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { + return rc; + } else if (rc == ECORE_NOTIMPL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); + } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) { + DP_NOTICE(p_hwfn, false, + "Failed to acquire the resource lock for the resource allocation commands\n"); + rc = ECORE_BUSY; + goto unlock_and_exit; + } else { + rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) { + DP_NOTICE(p_hwfn, false, + "Failed to set the max values of the soft resources\n"); + goto unlock_and_exit; + } else if (rc == ECORE_NOTIMPL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); + rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, + &resc_unlock_params); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + } + + rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc); + if (rc != ECORE_SUCCESS) + goto unlock_and_exit; + + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { + rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt, + &resc_unlock_params); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + + /* PPFID bitmap */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev)) { + /* Reduced build contains less PQs */ + if (!(p_dev->b_is_emul_full)) { + resc_num[ECORE_PQ] = 32; + resc_start[ECORE_PQ] = resc_num[ECORE_PQ] * + p_hwfn->enabled_func_idx; + } + + /* For AH emulation, since we have a possible maximal number of + * 16 enabled PFs, in case there are not enough ILT lines - + * allocate only first PF as RoCE and have all the other as + * ETH-only with less ILT lines. + * In case we increase the number of ILT lines for PF0, we need + * also to correct the start value for PF1-15. + */ + if (ECORE_IS_AH(p_dev) && p_dev->b_is_emul_full) { + if (!p_hwfn->rel_pf_id) { + resc_num[ECORE_ILT] = + OSAL_MAX_T(u32, resc_num[ECORE_ILT], + roce_min_ilt_lines); + } else if (resc_num[ECORE_ILT] < roce_min_ilt_lines) { + resc_start[ECORE_ILT] += roce_min_ilt_lines - + resc_num[ECORE_ILT]; + } + } + } +#endif + + /* Sanity for ILT */ + max_ilt_lines = NUM_OF_PXP_ILT_RECORDS(p_dev); + if (RESC_END(p_hwfn, ECORE_ILT) > max_ilt_lines) { + DP_NOTICE(p_hwfn, true, + "Can't assign ILT pages [%08x,...,%08x]\n", + RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn, + ECORE_ILT) - + 1); + return ECORE_INVAL; + } + + /* This will also learn the number of SBs from MFW */ + if (ecore_int_igu_reset_cam(p_hwfn, p_ptt)) + return ECORE_INVAL; + + ecore_hw_set_feat(p_hwfn); + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "The numbers for each resource are:\n"); + for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n", + ecore_hw_get_resc_name(res_id), + RESC_NUM(p_hwfn, res_id), + RESC_START(p_hwfn, res_id)); + + return ECORE_SUCCESS; + +unlock_and_exit: + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) + ecore_mcp_resc_unlock(p_hwfn, p_ptt, + &resc_unlock_params); + return rc; +} + +#ifndef ASIC_ONLY +static enum _ecore_status_t +ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn) +{ + if (IS_LEAD_HWFN(p_hwfn)) { + struct ecore_dev *p_dev = p_hwfn->p_dev; + + /* The MF mode on emulation is either default or NPAR 1.0 */ + p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_LL2_NON_UNICAST; + if (p_hwfn->num_funcs_on_port > 1) + p_dev->mf_bits |= 1 << ECORE_MF_INTER_PF_SWITCH | + 1 << ECORE_MF_DISABLE_ARFS; + else + p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF; + } + + return ECORE_SUCCESS; +} +#endif + +static enum _ecore_status_t +ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_hw_prepare_params *p_params) +{ + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + struct ecore_mcp_link_capabilities *p_caps; + struct ecore_mcp_link_params *link; + enum _ecore_status_t rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + return ecore_emul_hw_get_nvm_info(p_hwfn); +#endif + + /* Read global nvm_cfg address */ + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + + /* Verify MCP has initialized it */ + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM; + return ECORE_INVAL; + } + +/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ + + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + + OFFSETOF(struct nvm_cfg1_glob, core_cfg); + + core_cfg = ecore_rd(p_hwfn, p_ptt, addr); + + switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> + NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", + core_cfg); + break; + } + + /* Read DCBX configuration */ + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + dcbx_mode = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, generic_cont0)); + dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) + >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; + switch (dcbx_mode) { + case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; + break; + case NVM_CFG1_PORT_DCBX_MODE_CEE: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; + break; + case NVM_CFG1_PORT_DCBX_MODE_IEEE: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; + break; + default: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; + } + + /* Read default link configuration */ + link = &p_hwfn->mcp_info->link_input; + p_caps = &p_hwfn->mcp_info->link_capabilities; + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + link_temp = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; + p_caps->speed_capabilities = link->speed.advertised_speeds; + + link_temp = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, link_settings)); + switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> + NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { + case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: + link->speed.autoneg = true; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: + link->speed.forced_speed = 1000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: + link->speed.forced_speed = 10000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: + link->speed.forced_speed = 25000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: + link->speed.forced_speed = 40000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: + link->speed.forced_speed = 50000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: + link->speed.forced_speed = 100000; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp); + } + + p_caps->default_speed = link->speed.forced_speed; + p_caps->default_speed_autoneg = link->speed.autoneg; + + link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; + link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; + link->pause.autoneg = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + link->loopback_mode = 0; + + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, ext_phy)); + link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; + link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; + p_caps->default_eee = ECORE_MCP_EEE_ENABLED; + link->eee.enable = true; + switch (link_temp) { + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: + p_caps->default_eee = ECORE_MCP_EEE_DISABLED; + link->eee.enable = false; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: + p_caps->eee_lpi_timer = + EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; + break; + } + + link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; + link->eee.tx_lpi_enable = link->eee.enable; + link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV; + } else { + p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer); + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + + OFFSETOF(struct nvm_cfg1_glob, generic_cont0); + + generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); + + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS; + break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | + 1 << ECORE_MF_UFP_SPECIFIC | + 1 << ECORE_MF_8021Q_TAGGING; + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_8021AD_TAGGING | + 1 << ECORE_MF_FIP_SPECIAL; + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_LL2_NON_UNICAST | + 1 << ECORE_MF_INTER_PF_SWITCH | + 1 << ECORE_MF_DISABLE_ARFS; + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_LL2_NON_UNICAST; + if (ECORE_IS_BB(p_hwfn->p_dev)) + p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF; + break; + } + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + p_hwfn->p_dev->mf_bits); + + if (ECORE_IS_CMT(p_hwfn->p_dev)) + p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS); + + /* It's funny since we have another switch, but it's easier + * to throw this away in linux this way. Long term, it might be + * better to have have getters for needed ECORE_MF_* fields, + * convert client code and eliminate this. + */ + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + case NVM_CFG1_GLOB_MF_MODE_BD: + p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; + break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + p_hwfn->p_dev->mf_mode = ECORE_MF_UFP; + break; + } + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + + OFFSETOF(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + OSAL_SET_BIT(ECORE_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) + OSAL_SET_BIT(ECORE_DEV_CAP_FCOE, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) + OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) + OSAL_SET_BIT(ECORE_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP) + OSAL_SET_BIT(ECORE_DEV_CAP_IWARP, + &p_hwfn->hw_info.device_capabilities); + + rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { + rc = ECORE_SUCCESS; + p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; + } + + return rc; +} + +static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; + u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; + struct ecore_dev *p_dev = p_hwfn->p_dev; + + num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT in BB, only the "even" functions are enabled, and thus + * the number of functions for both hwfns is learnt from the same bits. + */ + if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) { + reg_function_hide = ecore_rd(p_hwfn, p_ptt, + MISCS_REG_FUNCTION_HIDE_BB_K2); + } else { /* E5 */ + reg_function_hide = 0; + } + + if (reg_function_hide & 0x1) { + if (ECORE_IS_BB(p_dev)) { + if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } + } else { + num_funcs = 1; + eng_mask = 0xfffe; + } + + /* Get the number of the enabled functions on the engine */ + tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + + /* Get the PF index within the enabled functions */ + low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; + tmp = reg_function_hide & eng_mask & low_pfs_mask; + while (tmp) { + if (tmp & 0x1) + enabled_func_idx--; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + p_hwfn->enabled_func_idx = enabled_func_idx; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n"); + p_hwfn->num_funcs_on_engine = 4; + } +#endif + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", + p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, + p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); +} + +#ifndef ASIC_ONLY +static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 eco_reserved; + + /* MISCS_REG_ECO_RESERVED[15:12]: num of ports in an engine */ + eco_reserved = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); + switch ((eco_reserved & 0xf000) >> 12) { + case 1: + p_dev->num_ports_in_engine = 1; + break; + case 3: + p_dev->num_ports_in_engine = 2; + break; + case 0xf: + p_dev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, false, + "Emulation: Unknown port mode [ECO_RESERVED 0x%08x]\n", + eco_reserved); + p_dev->num_ports_in_engine = 1; /* Default to something */ + break; + } + + p_dev->num_ports = p_dev->num_ports_in_engine * + ecore_device_num_engines(p_dev); +} +#endif + +/* Determine the number of ports of the device and per engine */ +static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 addr, global_offsize, global_addr, port_mode; + struct ecore_dev *p_dev = p_hwfn->p_dev; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_TEDIBEAR(p_dev)) { + p_dev->num_ports_in_engine = 1; + p_dev->num_ports = 2; + return; + } + + if (CHIP_REV_IS_EMUL(p_dev)) { + ecore_emul_hw_info_port_num(p_hwfn, p_ptt); + return; + } +#endif + + /* In CMT there is always only one port */ + if (ECORE_IS_CMT(p_dev)) { + p_dev->num_ports_in_engine = 1; + p_dev->num_ports = 1; + return; + } + + /* Determine the number of ports per engine */ + port_mode = ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); + switch (port_mode) { + case 0x0: + p_dev->num_ports_in_engine = 1; + break; + case 0x1: + p_dev->num_ports_in_engine = 2; + break; + case 0x2: + p_dev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, false, "Unknown port mode 0x%08x\n", + port_mode); + p_dev->num_ports_in_engine = 1; /* Default to something */ + break; + } + + /* Get the total number of ports of the device */ + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = ecore_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + OFFSETOF(struct public_global, max_ports); + p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr); +} + +static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_link_capabilities *p_caps; + u32 eee_status; + + p_caps = &p_hwfn->mcp_info->link_capabilities; + if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED) + return; + + p_caps->eee_speed_caps = 0; + eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, eee_status)); + eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> + EEE_SUPPORTED_SPEED_OFFSET; + if (eee_status & EEE_1G_SUPPORTED) + p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV; + if (eee_status & EEE_10G_ADV) + p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV; +} + +static enum _ecore_status_t +ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_pci_personality personality, + struct ecore_hw_prepare_params *p_params) +{ + bool drv_resc_alloc = p_params->drv_resc_alloc; + enum _ecore_status_t rc; + + if (IS_ECORE_PACING(p_hwfn)) { + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV, + "Skipping IOV as packet pacing is requested\n"); + } + + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) { + rc = ecore_iov_hw_info(p_hwfn); + if (rc != ECORE_SUCCESS) { + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = + ECORE_HW_PREPARE_BAD_IOV; + else + return rc; + } + } + + if (IS_LEAD_HWFN(p_hwfn)) + ecore_hw_info_port_num(p_hwfn, p_ptt); + + ecore_mcp_get_capabilities(p_hwfn, p_ptt); + + rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU; + else + return rc; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { +#endif + OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, + p_hwfn->mcp_info->func_info.mac, ETH_ALEN); +#ifndef ASIC_ONLY + } else { + static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 }; + + OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); + p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; + } +#endif + + if (ecore_mcp_is_init(p_hwfn)) { + if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) + p_hwfn->hw_info.ovlan = + p_hwfn->mcp_info->func_info.ovlan; + + ecore_mcp_cmd_port_init(p_hwfn, p_ptt); + + ecore_mcp_get_eee_caps(p_hwfn, p_ptt); + + ecore_mcp_read_ufp_config(p_hwfn, p_ptt); + } + + if (personality != ECORE_PCI_DEFAULT) { + p_hwfn->hw_info.personality = personality; + } else if (ecore_mcp_is_init(p_hwfn)) { + enum ecore_pci_personality protocol; + + protocol = p_hwfn->mcp_info->func_info.protocol; + p_hwfn->hw_info.personality = protocol; + } +#ifndef ASIC_ONLY + else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + /* AH emulation: + * Allow only PF0 to be RoCE to overcome a lack of ILT lines. + */ + if (ECORE_IS_AH(p_hwfn->p_dev) && p_hwfn->rel_pf_id) + p_hwfn->hw_info.personality = ECORE_PCI_ETH; + else + p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE; + } +#endif + + /* although in BB some constellations may support more than 4 tcs, + * that can result in performance penalty in some cases. 4 + * represents a good tradeoff between performance and flexibility. + */ + if (IS_ECORE_PACING(p_hwfn)) + p_hwfn->hw_info.num_hw_tc = 1; + else + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + + /* start out with a single active tc. This can be increased either + * by dcbx negotiation or by upper layer driver + */ + p_hwfn->hw_info.num_active_tc = 1; + + ecore_get_num_funcs(p_hwfn, p_ptt); + + if (ecore_mcp_is_init(p_hwfn)) + p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; + + /* In case of forcing the driver's default resource allocation, calling + * ecore_hw_get_resc() should come after initializing the personality + * and after getting the number of functions, since the calculation of + * the resources/features depends on them. + * This order is not harmful if not forcing. + */ + rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc); + if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) { + rc = ECORE_SUCCESS; + p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP; + } + + return rc; +} + +#define ECORE_MAX_DEVICE_NAME_LEN (8) + +void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars) +{ + u8 n; + + n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN); + OSAL_SNPRINTF((char *)name, n, "%s %c%d", + ECORE_IS_BB(p_dev) ? "BB" : "AH", + 'A' + p_dev->chip_rev, (int)p_dev->chip_metal); +} + +static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u16 device_id_mask; + u32 tmp; + + /* Read Vendor Id / Device Id */ + OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, + &p_dev->vendor_id); + OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, + &p_dev->device_id); + + /* Determine type */ + device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK; + switch (device_id_mask) { + case ECORE_DEV_ID_MASK_BB: + p_dev->type = ECORE_DEV_TYPE_BB; + break; + case ECORE_DEV_ID_MASK_AH: + p_dev->type = ECORE_DEV_TYPE_AH; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n", + p_dev->device_id); + return ECORE_ABORTED; + } + + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); + p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM); + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); + p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV); + + /* Learn number of HW-functions */ + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); + + if (tmp & (1 << p_hwfn->rel_pf_id)) { + DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); + p_dev->num_hwfns = 2; + } else { + p_dev->num_hwfns = 1; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_BB(p_dev)) { + /* For some reason we have problems with this register + * in BB B0 emulation; Simply assume no CMT + */ + DP_NOTICE(p_dev->hwfns, false, + "device on emul - assume no CMT\n"); + p_dev->num_hwfns = 1; + } +#endif + + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG); + p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID); + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); + p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL); + + DP_INFO(p_dev->hwfns, + "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n", + ECORE_IS_BB(p_dev) ? "BB" : "AH", + 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, + p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, + p_dev->chip_metal); + + if (ECORE_IS_BB_A0(p_dev)) { + DP_NOTICE(p_dev->hwfns, false, + "The chip type/rev (BB A0) is not supported!\n"); + return ECORE_ABORTED; + } +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) + ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1); + + if (CHIP_REV_IS_EMUL(p_dev)) { + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED); + + /* MISCS_REG_ECO_RESERVED[29]: full/reduced emulation build */ + p_dev->b_is_emul_full = !!(tmp & (1 << 29)); + + /* MISCS_REG_ECO_RESERVED[28]: emulation build w/ or w/o MAC */ + p_dev->b_is_emul_mac = !!(tmp & (1 << 28)); + + DP_NOTICE(p_hwfn, false, + "Emulation: Running on a %s build %s MAC\n", + p_dev->b_is_emul_full ? "full" : "reduced", + p_dev->b_is_emul_mac ? "with" : "without"); + } +#endif + + return ECORE_SUCCESS; +} + +#ifndef LINUX_REMOVE +void ecore_prepare_hibernate(struct ecore_dev *p_dev) +{ + int j; + + if (IS_VF(p_dev)) + return; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, + "Mark hw/fw uninitialized\n"); + + p_hwfn->hw_init_done = false; + + ecore_ptt_invalidate(p_hwfn); + } +} +#endif + +static enum _ecore_status_t +ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, + void OSAL_IOMEM *p_doorbells, u64 db_phys_addr, + struct ecore_hw_prepare_params *p_params) +{ + struct ecore_mdump_retain_data mdump_retain; + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mdump_info mdump_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Split PCI bars evenly between hwfns */ + p_hwfn->regview = p_regview; + p_hwfn->doorbells = p_doorbells; + p_hwfn->db_phys_addr = db_phys_addr; + + if (IS_VF(p_dev)) + return ecore_vf_hw_prepare(p_hwfn, p_params); + + /* Validate that chip access is feasible */ + if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { + DP_ERR(p_hwfn, + "Reading the ME register returns all Fs; Preventing further chip access\n"); + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME; + return ECORE_INVAL; + } + + get_function_id(p_hwfn); + + /* Allocate PTT pool */ + rc = ecore_ptt_pool_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; + goto err0; + } + + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + + /* First hwfn learns basic information, e.g., number of hwfns */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); + if (rc != ECORE_SUCCESS) { + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = + ECORE_HW_PREPARE_FAILED_DEV; + goto err1; + } + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && !b_ptt_gtt_init) { + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + u32 val; + + /* Initialize PTT/GTT (done by MFW on ASIC) */ + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_START_INIT_PTT_GTT, 1); + OSAL_MSLEEP(10); + ecore_ptt_invalidate(p_hwfn); + val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_INIT_DONE_PTT_GTT); + if (val != 1) { + DP_ERR(p_hwfn, + "PTT and GTT init in PGLUE_B didn't complete\n"); + goto err1; + } + + /* Clear a possible PGLUE_B parity from a previous GRC access */ + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_PRTY_STS_WR_H_0, 0x380); + + b_ptt_gtt_init = true; + } +#endif + + /* Store the precompiled init data ptrs */ + if (IS_LEAD_HWFN(p_hwfn)) + ecore_init_iro_array(p_hwfn->p_dev); + + ecore_hw_hwfn_prepare(p_hwfn); + + /* Initialize MCP structure */ + rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; + goto err1; + } + + /* Read the device configuration information from the HW and SHMEM */ + rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, + p_params->personality, p_params); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); + goto err2; + } + + /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is + * called, since among others it sets the ports number in an engine. + */ + if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) && + !p_dev->recov_in_prog) { + rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); + + /* Workaround for MFW issue where PF FLR does not cleanup + * IGU block + */ + if (!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP)) + ecore_pf_flr_igu_cleanup(p_hwfn); + } + + /* Check if mdump logs/data are present and update the epoch value */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, + &mdump_info); + if (rc == ECORE_SUCCESS && mdump_info.num_of_logs) + DP_NOTICE(p_hwfn, false, + "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); + + rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt, + &mdump_retain); + if (rc == ECORE_SUCCESS && mdump_retain.valid) + DP_NOTICE(p_hwfn, false, + "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n", + mdump_retain.epoch, mdump_retain.pf, + mdump_retain.status); + + ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, + p_params->epoch); + } + + /* Allocate the init RT array and initialize the init-ops engine */ + rc = ecore_init_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; + goto err2; + } +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_dev)) { + if (ECORE_IS_AH(p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: workaround; Prevent DMAE parities\n"); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PCIE_REG_PRTY_MASK_K2, 7); + } + + DP_NOTICE(p_hwfn, false, + "FPGA: workaround: Set VF bar0 size\n"); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_VF_BAR0_SIZE_K2, 4); + } +#endif + + return rc; +err2: + if (IS_LEAD_HWFN(p_hwfn)) + ecore_iov_free_hw_info(p_dev); + ecore_mcp_free(p_hwfn); +err1: + ecore_hw_hwfn_free(p_hwfn); +err0: + return rc; +} + +enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, + struct ecore_hw_prepare_params *p_params) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + enum _ecore_status_t rc; + + p_dev->chk_reg_fifo = p_params->chk_reg_fifo; + p_dev->allow_mdump = p_params->allow_mdump; + p_hwfn->b_en_pacing = p_params->b_en_pacing; + p_dev->b_is_target = p_params->b_is_target; + + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; + + /* Initialize the first hwfn - will learn number of hwfns */ + rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview, + p_dev->doorbells, p_dev->db_phys_addr, + p_params); + if (rc != ECORE_SUCCESS) + return rc; + + p_params->personality = p_hwfn->hw_info.personality; + + /* Initialize 2nd hwfn if necessary */ + if (ECORE_IS_CMT(p_dev)) { + void OSAL_IOMEM *p_regview, *p_doorbell; + u8 OSAL_IOMEM *addr; + u64 db_phys_addr; + u32 offset; + + /* adjust bar offset for second engine */ + offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_0) / 2; + addr = (u8 OSAL_IOMEM *)p_dev->regview + offset; + p_regview = (void OSAL_IOMEM *)addr; + + offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_1) / 2; + addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset; + p_doorbell = (void OSAL_IOMEM *)addr; + db_phys_addr = p_dev->db_phys_addr + offset; + + p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing; + /* prepare second hw function */ + rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, + p_doorbell, db_phys_addr, + p_params); + + /* in case of error, need to free the previously + * initiliazed hwfn 0. + */ + if (rc != ECORE_SUCCESS) { + if (p_params->b_relaxed_probe) + p_params->p_relaxed_res = + ECORE_HW_PREPARE_FAILED_ENG2; + + if (IS_PF(p_dev)) { + ecore_init_free(p_hwfn); + ecore_mcp_free(p_hwfn); + ecore_hw_hwfn_free(p_hwfn); + } else { + DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); + } + return rc; + } + } + + return rc; +} + +void ecore_hw_remove(struct ecore_dev *p_dev) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + int i; + + if (IS_PF(p_dev)) + ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, + ECORE_OV_DRIVER_STATE_NOT_LOADED); + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + ecore_vf_pf_release(p_hwfn); + continue; + } + + ecore_init_free(p_hwfn); + ecore_hw_hwfn_free(p_hwfn); + ecore_mcp_free(p_hwfn); + +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); +#endif + } + + ecore_iov_free_hw_info(p_dev); +} + +static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; + dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; + struct ecore_chain_next *p_next; + u32 size, i; + + if (!p_virt) + return; + + size = p_chain->elem_size * p_chain->usable_per_page; + + for (i = 0; i < p_chain->page_cnt; i++) { + if (!p_virt) + break; + + p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); + p_virt_next = p_next->next_virt; + p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); + + OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, + ECORE_CHAIN_PAGE_SIZE); + + p_virt = p_virt_next; + p_phys = p_phys_next; + } +} + +static void ecore_chain_free_single(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + if (!p_chain->p_virt_addr) + return; + + OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, + p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); +} + +static void ecore_chain_free_pbl(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table; + u32 page_cnt = p_chain->page_cnt, i, pbl_size; + + if (!pp_virt_addr_tbl) + return; + + if (!p_pbl_virt) + goto out; + + for (i = 0; i < page_cnt; i++) { + if (!pp_virt_addr_tbl[i]) + break; + + OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], + *(dma_addr_t *)p_pbl_virt, + ECORE_CHAIN_PAGE_SIZE); + + p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; + } + + pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; + + if (!p_chain->b_external_pbl) + OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table, + p_chain->pbl_sp.p_phys_table, pbl_size); +out: + OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); +} + +void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + ecore_chain_free_next_ptr(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_SINGLE: + ecore_chain_free_single(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_PBL: + ecore_chain_free_pbl(p_dev, p_chain); + break; + } +} + +static enum _ecore_status_t +ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, + enum ecore_chain_cnt_type cnt_type, + osal_size_t elem_size, u32 page_cnt) +{ + u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into acount the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of a u32 type. + */ + if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && + chain_size > ((u32)ECORE_U16_MAX + 1)) || + (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && + chain_size > ECORE_U32_MAX)) { + DP_NOTICE(p_dev, true, + "The actual chain size (0x%lx) is larger than the maximal possible value\n", + (unsigned long)chain_size); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; + dma_addr_t p_phys = 0; + u32 i; + + for (i = 0; i < p_chain->page_cnt; i++) { + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, false, + "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + if (i == 0) { + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + } else { + ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_virt, p_phys); + } + + p_virt_prev = p_virt; + } + /* Last page's next element should point to the beginning of the + * chain. + */ + ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_chain->p_virt_addr, + p_chain->p_phys_addr); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + dma_addr_t p_phys = 0; + void *p_virt = OSAL_NULL; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_chain_alloc_pbl(struct ecore_dev *p_dev, + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl) +{ + u32 page_cnt = p_chain->page_cnt, size, i; + dma_addr_t p_phys = 0, p_pbl_phys = 0; + void **pp_virt_addr_tbl = OSAL_NULL; + u8 *p_pbl_virt = OSAL_NULL; + void *p_virt = OSAL_NULL; + + size = page_cnt * sizeof(*pp_virt_addr_tbl); + pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); + if (!pp_virt_addr_tbl) { + DP_NOTICE(p_dev, false, + "Failed to allocate memory for the chain virtual addresses table\n"); + return ECORE_NOMEM; + } + + /* The allocation of the PBL table is done with its full size, since it + * is expected to be successive. + * ecore_chain_init_pbl_mem() is called even in a case of an allocation + * failure, since pp_virt_addr_tbl was previously allocated, and it + * should be saved to allow its freeing during the error flow. + */ + size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; + + if (ext_pbl == OSAL_NULL) { + p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); + } else { + p_pbl_virt = ext_pbl->p_pbl_virt; + p_pbl_phys = ext_pbl->p_pbl_phys; + p_chain->b_external_pbl = true; + } + + ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, + pp_virt_addr_tbl); + if (!p_pbl_virt) { + DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); + return ECORE_NOMEM; + } + + for (i = 0; i < page_cnt; i++) { + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, false, + "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + if (i == 0) { + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + } + + /* Fill the PBL table with the physical address of the page */ + *(dma_addr_t *)p_pbl_virt = p_phys; + /* Keep the virtual address of the page */ + p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + + p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + u32 num_elems, osal_size_t elem_size, + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl) +{ + u32 page_cnt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (mode == ECORE_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + + rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, + page_cnt); + if (rc) { + DP_NOTICE(p_dev, false, + "Cannot allocate a chain with the given arguments:\n" + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + intended_use, mode, cnt_type, num_elems, elem_size); + return rc; + } + + ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, + mode, cnt_type, p_dev->dp_ctx); + + switch (mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_SINGLE: + rc = ecore_chain_alloc_single(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_PBL: + rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); + break; + } + if (rc) + goto nomem; + + return ECORE_SUCCESS; + +nomem: + ecore_chain_free(p_dev, p_chain); + return rc; +} + +enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, + u16 src_id, u16 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { + u16 min, max; + + min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); + max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + DP_NOTICE(p_hwfn, true, + "l2_queue id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, ECORE_VPORT); + max = min + RESC_NUM(p_hwfn, ECORE_VPORT); + DP_NOTICE(p_hwfn, true, + "vport id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); + max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); + DP_NOTICE(p_hwfn, true, + "rss_eng id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) { + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, + 1 << p_hwfn->abs_pf_id / 2); + ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0); + return ECORE_SUCCESS; + } + + DP_NOTICE(p_hwfn, false, + "This function can't be set as default\n"); + return ECORE_INVAL; +} + +static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, void *p_eth_qzone, + osal_size_t eth_qzone_size, + u8 timeset) +{ + struct coalescing_timeset *p_coal_timeset; + + if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, true, + "Coalescing configuration not enabled\n"); + return ECORE_INVAL; + } + + p_coal_timeset = p_eth_qzone; + OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); + ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, + void *p_handle) +{ + struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_ptt *p_ptt; + + /* TODO - Configuring a single queue's coalescing but + * claiming all queues are abiding same configuration + * for PF and VF both. + */ + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal, + tx_coal, p_cid); + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + if (rx_coal) { + rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) + goto out; + p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; + } + + if (tx_coal) { + rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); + if (rc) + goto out; + p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; + } +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 coalesce, + struct ecore_queue_cid *p_cid) +{ + struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u32 address; + enum _ecore_status_t rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return ECORE_INVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, false); + if (rc != ECORE_SUCCESS) + goto out; + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + + rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc != ECORE_SUCCESS) + goto out; + +out: + return rc; +} + +enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 coalesce, + struct ecore_queue_cid *p_cid) +{ + struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u32 address; + enum _ecore_status_t rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return ECORE_INVAL; + } + + timeset = (u8)(coalesce >> timer_res); + + rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, true); + if (rc != ECORE_SUCCESS) + goto out; + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + + rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct xstorm_eth_queue_zone), timeset); +out: + return rc; +} + +/* Calculate final WFQ values for all vports and configure it. + * After this configuration each vport must have + * approx min rate = wfq * min_pf_rate / ECORE_WFQ_UNIT + */ +static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].wfq = (wfq_speed * ECORE_WFQ_UNIT) / + min_pf_rate; + ecore_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].wfq); + } +} + +static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn) +{ + int i; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) + p_hwfn->qm_info.qm_vport_params[i].wfq = 1; +} + +static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + ecore_init_wfq_default_param(p_hwfn); + ecore_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].wfq); + } +} + +/* This function performs several validations for WFQ + * configuration and required min rate for a given vport + * 1. req_rate must be greater than one percent of min_pf_rate. + * 2. req_rate should not cause other vports [not configured for WFQ explicitly] + * rates to get less than one percent of min_pf_rate. + * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. + */ +static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, + u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + +/* Accounting for the vports which are configured for WFQ explicitly */ + + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = num_vports - req_count; + + /* validate possible error cases */ + if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return ECORE_INVAL; + } + + /* TBD - for number of vports greater than 100 */ + if (num_vports > ECORE_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Number of vports is greater than %d\n", + ECORE_WFQ_UNIT); + return ECORE_INVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return ECORE_INVAL; + } + + /* Data left for non requested vports */ + total_left_rate = min_pf_rate - total_req_min_rate; + left_rate_per_vp = total_left_rate / non_requested_count; + + /* validate if non requested get < 1% of min bw */ + if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + left_rate_per_vp, min_pf_rate); + return ECORE_INVAL; + } + + /* now req_rate for given vport passes all scenarios. + * assign final wfq rates to all vports. + */ + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return ECORE_SUCCESS; +} + +static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 vp_id, u32 rate) +{ + struct ecore_mcp_link_state *p_link; + int rc = ECORE_SUCCESS; + + p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (rc == ECORE_SUCCESS) + ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, false, + "Validation failed while configuring min rate\n"); + + return rc; +} + +static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + bool use_wfq = false; + int rc = ECORE_SUCCESS; + u16 i; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 rate; + + if (!p_hwfn->qm_info.wfq_data[i].configured) + continue; + + rate = p_hwfn->qm_info.wfq_data[i].min_speed; + use_wfq = true; + + rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "WFQ validation failed while configuring min rate\n"); + break; + } + } + + if (rc == ECORE_SUCCESS && use_wfq) + ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); + + return rc; +} + +/* Main API for ecore clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) +{ + int i, rc = ECORE_INVAL; + + /* TBD - for multiple hardware functions - that is 100 gig */ + if (ECORE_IS_CMT(p_dev)) { + DP_NOTICE(p_dev, false, + "WFQ configuration is not supported for this device\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_ptt *p_ptt; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (rc != ECORE_SUCCESS) { + ecore_ptt_release(p_hwfn, p_ptt); + return rc; + } + + ecore_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + int i; + + /* TBD - for multiple hardware functions - that is 100 gig */ + if (ECORE_IS_CMT(p_dev)) { + DP_VERBOSE(p_dev, ECORE_MSG_LINK, + "WFQ configuration is not supported for this device\n"); + return; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, + min_pf_rate); + } +} + +int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = ECORE_SUCCESS; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed && (max_bw != 100)) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + p_hwfn->qm_info.pf_rl = p_link->speed; + + /* Since the limiter also affects Tx-switched traffic, we don't want it + * to limit such traffic in case there's no actual limit. + * In that case, set limit to imaginary high boundary. + */ + if (max_bw == 100) + p_hwfn->qm_info.pf_rl = 100000; + + rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_hwfn->qm_info.pf_rl); + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) +{ + int i, rc = ECORE_INVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_link_state *p_link; + struct ecore_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + ecore_ptt_release(p_hwfn, p_ptt); + + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = ECORE_SUCCESS; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + p_hwfn->qm_info.pf_wfq = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) +{ + int i, rc = ECORE_INVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_link_state *p_link; + struct ecore_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc != ECORE_SUCCESS) { + ecore_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + ecore_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt); + + OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * + p_hwfn->qm_info.num_vports); +} + +int ecore_device_num_engines(struct ecore_dev *p_dev) +{ + return ECORE_IS_BB(p_dev) ? 2 : 1; +} + +int ecore_device_num_ports(struct ecore_dev *p_dev) +{ + return p_dev->num_ports; +} + +void ecore_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, + __le16 *fw_lsb, + u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} + +bool ecore_is_mf_fip_special(struct ecore_dev *p_dev) +{ + return !!OSAL_TEST_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h new file mode 100644 index 000000000..5ea8427a0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_dev_api.h @@ -0,0 +1,701 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_DEV_API_H__ +#define __ECORE_DEV_API_H__ + +#include "ecore_status.h" +#include "ecore_chain.h" +#include "ecore_int_api.h" + +/** + * @brief ecore_init_dp - initialize the debug level + * + * @param p_dev + * @param dp_module + * @param dp_level + * @param dp_ctx + */ +void ecore_init_dp(struct ecore_dev *p_dev, + u32 dp_module, + u8 dp_level, + void *dp_ctx); + +/** + * @brief ecore_init_struct - initialize the device structure to + * its defaults + * + * @param p_dev + */ +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_free - + * + * @param p_dev + */ +void ecore_resc_free(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_alloc - + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_setup - + * + * @param p_dev + */ +void ecore_resc_setup(struct ecore_dev *p_dev); + +enum ecore_mfw_timeout_fallback { + ECORE_TO_FALLBACK_TO_NONE, + ECORE_TO_FALLBACK_TO_DEFAULT, + ECORE_TO_FALLBACK_FAIL_LOAD, +}; + +enum ecore_override_force_load { + ECORE_OVERRIDE_FORCE_LOAD_NONE, + ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, + ECORE_OVERRIDE_FORCE_LOAD_NEVER, +}; + +struct ecore_drv_load_params { + /* Indicates whether the driver is running over a crash kernel. + * As part of the load request, this will be used for providing the + * driver role to the MFW. + * In case of a crash kernel over PDA - this should be set to false. + */ + bool is_crash_kernel; + + /* The timeout value that the MFW should use when locking the engine for + * the driver load process. + * A value of '0' means the default value, and '255' means no timeout. + */ + u8 mfw_timeout_val; +#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 +#define ECORE_LOAD_REQ_LOCK_TO_NONE 255 + + /* Action to take in case the MFW doesn't support timeout values other + * than default and none. + */ + enum ecore_mfw_timeout_fallback mfw_timeout_fallback; + + /* Avoid engine reset when first PF loads on it */ + bool avoid_eng_reset; + + /* Allow overriding the default force load behavior */ + enum ecore_override_force_load override_force_load; +}; + +struct ecore_hw_init_params { + /* Tunneling parameters */ + struct ecore_tunnel_info *p_tunn; + + bool b_hw_start; + + /* Interrupt mode [msix, inta, etc.] to use */ + enum ecore_int_mode int_mode; + + /* NPAR tx switching to be used for vports configured for tx-switching + */ + bool allow_npar_tx_switch; + + /* Binary fw data pointer in binary fw file */ + const u8 *bin_fw_data; + + /* Driver load parameters */ + struct ecore_drv_load_params *p_drv_load_params; + + /* Avoid engine affinity for RoCE/storage in case of CMT mode */ + bool avoid_eng_affin; + + /* SPQ block timeout in msec */ + u32 spq_timeout_ms; +}; + +/** + * @brief ecore_hw_init - + * + * @param p_dev + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, + struct ecore_hw_init_params *p_params); + +/** + * @brief ecore_hw_timers_stop_all - + * + * @param p_dev + * + * @return void + */ +void ecore_hw_timers_stop_all(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_stop - + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_stop_fastpath -should be called incase + * slowpath is still required for the device, + * but fastpath is not. + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev); + +#ifndef LINUX_REMOVE +/** + * @brief ecore_prepare_hibernate -should be called when + * the system is going into the hibernate state + * + * @param p_dev + * + */ +void ecore_prepare_hibernate(struct ecore_dev *p_dev); + +enum ecore_db_rec_width { + DB_REC_WIDTH_32B, + DB_REC_WIDTH_64B, +}; + +enum ecore_db_rec_space { + DB_REC_KERNEL, + DB_REC_USER, +}; + +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param p_dev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_width - doorbell is 32b pr 64b + * @param db_space - doorbell recovery addresses are user or kernel space + */ +enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev, + void OSAL_IOMEM *db_addr, + void *db_data, + enum ecore_db_rec_width db_width, + enum ecore_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ +enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev, + void OSAL_IOMEM *db_addr, + void *db_data); + +static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn) +{ + return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits); +} + +#endif + +/** + * @brief ecore_hw_start_fastpath -restart fastpath traffic, + * only if hw_stop_fastpath was called + + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn); + +enum ecore_hw_prepare_result { + ECORE_HW_PREPARE_SUCCESS, + + /* FAILED results indicate probe has failed & cleaned up */ + ECORE_HW_PREPARE_FAILED_ENG2, + ECORE_HW_PREPARE_FAILED_ME, + ECORE_HW_PREPARE_FAILED_MEM, + ECORE_HW_PREPARE_FAILED_DEV, + ECORE_HW_PREPARE_FAILED_NVM, + + /* BAD results indicate probe is passed even though some wrongness + * has occurred; Trying to actually use [I.e., hw_init()] might have + * dire reprecautions. + */ + ECORE_HW_PREPARE_BAD_IOV, + ECORE_HW_PREPARE_BAD_MCP, + ECORE_HW_PREPARE_BAD_IGU, +}; + +struct ecore_hw_prepare_params { + /* Personality to initialize */ + int personality; + + /* Force the driver's default resource allocation */ + bool drv_resc_alloc; + + /* Check the reg_fifo after any register access */ + bool chk_reg_fifo; + + /* Request the MFW to initiate PF FLR */ + bool initiate_pf_flr; + + /* The OS Epoch time in seconds */ + u32 epoch; + + /* Allow the MFW to collect a crash dump */ + bool allow_mdump; + + /* Allow prepare to pass even if some initializations are failing. + * If set, the `p_prepare_res' field would be set with the return, + * and might allow probe to pass even if there are certain issues. + */ + bool b_relaxed_probe; + enum ecore_hw_prepare_result p_relaxed_res; + + /* Enable/disable request by ecore client for pacing */ + bool b_en_pacing; + + /* Indicates whether this PF serves a storage target */ + bool b_is_target; + + /* retry count for VF acquire on channel timeout */ + u8 acquire_retry_cnt; +}; + +/** + * @brief ecore_hw_prepare - + * + * @param p_dev + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, + struct ecore_hw_prepare_params *p_params); + +/** + * @brief ecore_hw_remove - + * + * @param p_dev + */ +void ecore_hw_remove(struct ecore_dev *p_dev); + +/** + * @brief ecore_ptt_acquire - Allocate a PTT window + * + * Should be called at the entry point to the driver (at the beginning of an + * exported function) + * + * @param p_hwfn + * + * @return struct ecore_ptt + */ +struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_release - Release PTT Window + * + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. + * + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_ptt_release(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +struct ecore_eth_stats_common { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 gft_filter_drop; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; + u64 link_change_count; +}; + +struct ecore_eth_stats_bb { + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; +}; + +struct ecore_eth_stats_ah { + u64 rx_1519_to_max_byte_packets; + u64 tx_1519_to_max_byte_packets; +}; + +struct ecore_eth_stats { + struct ecore_eth_stats_common common; + union { + struct ecore_eth_stats_bb bb; + struct ecore_eth_stats_ah ah; + }; +}; + +/** + * @brief ecore_chain_alloc - Allocate and initialize a chain + * + * @param p_hwfn + * @param intended_use + * @param mode + * @param num_elems + * @param elem_size + * @param p_chain + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_chain_alloc(struct ecore_dev *p_dev, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + u32 num_elems, + osal_size_t elem_size, + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl); + +/** + * @brief ecore_chain_free - Free chain DMA memory + * + * @param p_hwfn + * @param p_chain + */ +void ecore_chain_free(struct ecore_dev *p_dev, + struct ecore_chain *p_chain); + +/** + * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, + u16 src_id, + u16 *dst_id); + +/** + * @@brief ecore_fw_vport - Get absolute vport ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter + * banks that are allocated to the PF. + * + * @param p_dev + * + * @return u8 - Number of LLH filter banks + */ +u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev); + +enum ecore_eng { + ECORE_ENG0, + ECORE_ENG1, + ECORE_BOTH_ENG, +}; + +/** + * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity + * + * @param p_dev + * + * @return enum ecore_eng - L2 affintiy hint + */ +enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev); + +/** + * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given + * LLH filter bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + * @param eng + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev, + u8 ppfid, enum ecore_eng eng); + +/** + * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity + * + * @param p_dev + * @param eng + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev, + enum ecore_eng eng); + +/** + * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter + * bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + * @param mac_addr - MAC to add + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid, + u8 mac_addr[ETH_ALEN]); + +/** + * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given + * filter bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + * @param mac_addr - MAC to remove + */ +void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid, + u8 mac_addr[ETH_ALEN]); + +enum ecore_llh_prot_filter_type_t { + ECORE_LLH_FILTER_ETHERTYPE, + ECORE_LLH_FILTER_TCP_SRC_PORT, + ECORE_LLH_FILTER_TCP_DEST_PORT, + ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT, + ECORE_LLH_FILTER_UDP_SRC_PORT, + ECORE_LLH_FILTER_UDP_DEST_PORT, + ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT +}; + +/** + * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the + * given filter bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + * @param type - type of filters and comparing + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port); + +/** + * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from + * the given filter bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + * @param type - type of filters and comparing + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + */ +void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid, + enum ecore_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, + u16 dest_port); + +/** + * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given + * filter bank. + * + * @param p_dev + * @param ppfid - relative within the allocated ppfids ('0' is the default one). + */ +void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid); + +/** + * @brief ecore_llh_clear_all_filters - Remove all LLH filters + * + * @param p_dev + */ +void ecore_llh_clear_all_filters(struct ecore_dev *p_dev); + +/** + * @brief ecore_llh_set_function_as_default - set function as default per port + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t +ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + *@brief Cleanup of previous driver remains prior to load + * + * @param p_hwfn + * @param p_ptt + * @param id - For PF, engine-relative. For VF, PF-relative. + * @param is_vf - true iff cleanup is made for a VF. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 id, + bool is_vf); + +/** + * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue. + * + * @param p_hwfn + * @param p_coal - store coalesce value read from the hardware. + * @param p_handle + * + * @return enum _ecore_status_t + **/ +enum _ecore_status_t +ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal, + void *handle); + +/** + * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and + * Tx queue. The fact that we can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @param p_hwfn + * @param rx_coal - Rx Coalesce value in micro seconds. + * @param tx_coal - TX Coalesce value in micro seconds. + * @param p_handle + * + * @return enum _ecore_status_t + **/ +enum _ecore_status_t +ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, + u16 tx_coal, void *p_handle); + +/** + * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * + * @param p_hwfn + * @param p_ptt + * @param b_enable - true/false + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_enable); + +/** + * @brief Whether FIP discovery fallback special mode is enabled or not. + * + * @param cdev + * + * @return true if device is in FIP special mode, false otherwise. + */ +bool ecore_is_mf_fip_special(struct ecore_dev *p_dev); +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h new file mode 100644 index 000000000..f5b11eb28 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_reg_addr.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef GTT_REG_ADDR_H +#define GTT_REG_ADDR_H + +/* Win 2 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL + +/* Win 3 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL + +/* Win 4 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL + +/* Win 5 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL + +/* Win 6 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL + +/* Win 7 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL + +/* Win 8 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL + +/* Win 9 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL + +/* Win 10 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL + +/* Win 11 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL + +/* Win 12 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL + +/* Win 13 */ +//Access:RW DataWidth:0x20 // +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL + +/* Win 14 */ + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h new file mode 100644 index 000000000..2035bed5c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_gtt_values.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __PREVENT_PXP_GLOBAL_WIN__ + +static u32 pxp_global_win[] = { + 0, + 0, + 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */ + 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ + 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ + 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ + 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */ + 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */ + 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */ + 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */ + 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */ + 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */ + 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */ + 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */ + 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */ + 0, + 0, + 0, + 0, +}; + +#endif /* __PREVENT_PXP_GLOBAL_WIN__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h new file mode 100644 index 000000000..23cfcdeff --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_common.h @@ -0,0 +1,2546 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HSI_COMMON__ +#define __ECORE_HSI_COMMON__ +/********************************/ +/* Add include to common target */ +/********************************/ +#include "common_hsi.h" + + +/* + * opcodes for the event ring + */ +enum common_event_opcode { + COMMON_EVENT_PF_START, + COMMON_EVENT_PF_STOP, + COMMON_EVENT_VF_START, + COMMON_EVENT_VF_STOP, + COMMON_EVENT_VF_PF_CHANNEL, + COMMON_EVENT_VF_FLR, + COMMON_EVENT_PF_UPDATE, + COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_RL_UPDATE, + COMMON_EVENT_EMPTY, + MAX_COMMON_EVENT_OPCODE +}; + + +/* + * Common Ramrod Command IDs + */ +enum common_ramrod_cmd_id { + COMMON_RAMROD_UNUSED, + COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, + COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, + COMMON_RAMROD_VF_START /* VF Function Start */, + COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */, + COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */, + COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */, + COMMON_RAMROD_EMPTY /* Empty Ramrod */, + MAX_COMMON_RAMROD_CMD_ID +}; + + +/* + * The core storm context for the Ystorm + */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * The core storm context for the Pstorm + */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[20]; +}; + +/* + * Core Slowpath Connection storm context of Xstorm + */ +struct xstorm_core_conn_st_ctx { + __le32 spq_base_lo /* SPQ Ring Base Address low dword */; + __le32 spq_base_hi /* SPQ Ring Base Address high dword */; +/* Consolidation Ring Base Address */ + struct regpair consolid_base_addr; + __le16 spq_cons /* SPQ Ring Consumer */; + __le16 consolid_cons /* Consolidation Ring Consumer */; + __le32 reserved0[55] /* Pad to 15 cycles */; +}; + +struct xstorm_core_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 state /* state */; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 /* exist_in_qm1 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 /* exist_in_qm2 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 /* exist_in_qm3 */ +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +/* cf_array_active */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +/* timer_stop_all */ +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */ +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 /* cf_array_cf */ +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */ +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +/* cf_array_cf_en */ +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */ +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */ +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */ +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2 /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 consolid_prod /* physical_q1 */; + __le16 reserved16 /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_or_spq_prod /* word4 */; + __le16 updated_qm_pq_id /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 e5_reserved /* e5_reserved */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + +/* + * The core storm context for the Mstorm + */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[40]; +}; + +/* + * The core storm context for the Ustorm + */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[20]; +}; + +/* + * The core storm context for the Tstorm + */ +struct tstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * core connection context + */ +struct core_conn_context { +/* ystorm storm context */ + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2] /* padding */; +/* pstorm storm context */ + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2] /* padding */; +/* xstorm storm context */ + struct xstorm_core_conn_st_ctx xstorm_st_context; +/* xstorm aggregative context */ + struct xstorm_core_conn_ag_ctx xstorm_ag_context; +/* tstorm aggregative context */ + struct tstorm_core_conn_ag_ctx tstorm_ag_context; +/* ustorm aggregative context */ + struct ustorm_core_conn_ag_ctx ustorm_ag_context; +/* mstorm storm context */ + struct mstorm_core_conn_st_ctx mstorm_st_context; +/* ustorm storm context */ + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2] /* padding */; +/* tstorm storm context */ + struct tstorm_core_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2] /* padding */; +}; + + +/* + * How ll2 should deal with packet upon errors + */ +enum core_error_handle { + LL2_DROP_PACKET /* If error occurs drop packet */, + LL2_DO_NOTHING /* If error occurs do nothing */, + LL2_ASSERT /* If error occurs assert */, + MAX_CORE_ERROR_HANDLE +}; + + +/* + * opcodes for the event ring + */ +enum core_event_opcode { + CORE_EVENT_TX_QUEUE_START, + CORE_EVENT_TX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_START, + CORE_EVENT_RX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_FLUSH, + CORE_EVENT_TX_QUEUE_UPDATE, + CORE_EVENT_QUEUE_STATS_QUERY, + MAX_CORE_EVENT_OPCODE +}; + + +/* + * The L4 pseudo checksum mode for Core + */ +enum core_l4_pseudo_checksum_mode { +/* Pseudo Checksum on packet is calculated with the correct packet length. */ + CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, +/* Pseudo Checksum on packet is calculated with zero length. */ + CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_CORE_L4_PSEUDO_CHECKSUM_MODE +}; + + +/* + * Light-L2 RX Producers in Tstorm RAM + */ +struct core_ll2_port_stats { + struct regpair gsi_invalid_hdr; + struct regpair gsi_invalid_pkt_length; + struct regpair gsi_unsupported_pkt_typ; + struct regpair gsi_crcchksm_error; +}; + + +/* + * LL2 TX Per Queue Stats + */ +struct core_ll2_pstorm_per_queue_stat { +/* number of total bytes sent without errors */ + struct regpair sent_ucast_bytes; +/* number of total bytes sent without errors */ + struct regpair sent_mcast_bytes; +/* number of total bytes sent without errors */ + struct regpair sent_bcast_bytes; +/* number of total packets sent without errors */ + struct regpair sent_ucast_pkts; +/* number of total packets sent without errors */ + struct regpair sent_mcast_pkts; +/* number of total packets sent without errors */ + struct regpair sent_bcast_pkts; +/* number of total packets dropped due to errors */ + struct regpair error_drop_pkts; +}; + + +struct core_ll2_tstorm_per_queue_stat { +/* Number of packets discarded because they are bigger than MTU */ + struct regpair packet_too_big_discard; +/* Number of packets discarded due to lack of host buffers */ + struct regpair no_buff_discard; +}; + +struct core_ll2_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + + +/* + * Light-L2 RX Producers + */ +struct core_ll2_rx_prod { + __le16 bd_prod /* BD Producer */; + __le16 cqe_prod /* CQE Producer */; +}; + + + +struct core_ll2_tx_per_queue_stat { +/* PSTORM per queue statistics */ + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + + + +/* + * Structure for doorbell data, in PWM mode, for RX producers update. + */ +struct core_pwm_prod_update_data { + __le16 icid /* internal CID */; + u8 reserved0; + u8 params; +/* aggregative command. Set DB_AGG_CMD_SET for producer update + * (use enum db_agg_cmd_sel) + */ +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3 +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0 +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0. */ +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2 + struct core_ll2_rx_prod prod /* Producers. */; +}; + + +/* + * Ramrod data for rx/tx queue statistics query ramrod + */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat /* If set, collect RX queue statistics. */; + u8 tx_stat /* If set, collect TX queue statistics. */; + __le16 reserved[3]; +/* Address of RX statistic buffer. core_ll2_rx_per_queue_stat struct will be + * write to this address. + */ + struct regpair rx_stat_addr; +/* Address of TX statistic buffer. core_ll2_tx_per_queue_stat struct will be + * write to this address. + */ + struct regpair tx_stat_addr; +}; + + +/* + * Core Ramrod Command IDs (light L2) + */ +enum core_ramrod_cmd_id { + CORE_RAMROD_UNUSED, + CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */, + CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */, + CORE_RAMROD_QUEUE_STATS_QUERY /* Queue Statist Query Ramrod */, + MAX_CORE_RAMROD_CMD_ID +}; + + +/* + * Core RX CQE Type for Light L2 + */ +enum core_roce_flavor_type { + CORE_ROCE, + CORE_RROCE, + MAX_CORE_ROCE_FLAVOR_TYPE +}; + + +/* + * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff + */ +struct core_rx_action_on_error { + u8 error_type; +/* ll2 how to handle error packet_too_big (use enum core_error_handle) */ +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +/* ll2 how to handle error with no_buff (use enum core_error_handle) */ +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +}; + + +/* + * Core RX BD for Light L2 + */ +struct core_rx_bd { + struct regpair addr; + __le16 reserved[4]; +}; + + +/* + * Core RX CM offload BD for Light L2 + */ +struct core_rx_bd_with_buff_len { + struct regpair addr; + __le16 buff_length; + __le16 reserved[3]; +}; + +/* + * Core RX CM offload BD for Light L2 + */ +union core_rx_bd_union { + struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */; +/* Core Rx Bd with dynamic buffer length */ + struct core_rx_bd_with_buff_len rx_bd_with_len; +}; + + + +/* + * Opaque Data for Light L2 RX CQE . + */ +struct core_rx_cqe_opaque_data { + __le32 data[2] /* Opaque CQE Data */; +}; + + +/* + * Core RX CQE Type for Light L2 + */ +enum core_rx_cqe_type { + CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */, + CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */, + CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */, + CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */, + MAX_CORE_RX_CQE_TYPE +}; + + +/* + * Core RX CQE for Light L2 . + */ +struct core_rx_fast_path_cqe { + u8 type /* CQE type */; +/* Offset (in bytes) of the packet from start of the buffer */ + u8 placement_offset; +/* Parsing and error flags from the parser */ + struct parsing_and_err_flags parse_flags; + __le16 packet_length /* Total packet length (from the parser) */; + __le16 vlan /* 802.1q VLAN tag */; + struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */; +/* bit- map: each bit represents a specific error. errors indications are + * provided by the cracker. see spec for detailed description + */ + struct parsing_err_flags err_flags; + __le16 reserved0; + __le32 reserved1[3]; +}; + +/* + * Core Rx CM offload CQE . + */ +struct core_rx_gsi_offload_cqe { + u8 type /* CQE type */; + u8 data_length_error /* set if gsi data is bigger than buff */; +/* Parsing and error flags from the parser */ + struct parsing_and_err_flags parse_flags; + __le16 data_length /* Total packet length (from the parser) */; + __le16 vlan /* 802.1q VLAN tag */; + __le32 src_mac_addrhi /* hi 4 bytes source mac address */; + __le16 src_mac_addrlo /* lo 2 bytes of source mac address */; +/* These are the lower 16 bit of QP id in RoCE BTH header */ + __le16 qp_id; + __le32 src_qp /* Source QP from DETH header */; + struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */; + __le32 reserved; +}; + +/* + * Core RX CQE for Light L2 . + */ +struct core_rx_slow_path_cqe { + u8 type /* CQE type */; + u8 ramrod_cmd_id; + __le16 echo; + struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */; + __le32 reserved1[5]; +}; + +/* + * Core RX CM offload BD for Light L2 + */ +union core_rx_cqe_union { + struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */; + struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */; + struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */; +}; + + + + + +/* + * Ramrod data for rx queue start ramrod + */ +struct core_rx_start_ramrod_data { + struct regpair bd_base /* Address of the first BD page */; + struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */; + __le16 mtu /* MTU */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* Status block index */; + u8 complete_cqe_flg /* if set - post completion to the CQE ring */; + u8 complete_event_flg /* if set - post completion to the event ring */; + u8 drop_ttl0_flg /* if set - drop packet with ttl=0 */; + __le16 num_of_pbl_pages /* Number of pages in CQE PBL */; +/* if set - 802.1q tag will be removed and copied to CQE */ + u8 inner_vlan_stripping_en; +/* if set - outer tag wont be stripped, valid only in MF OVLAN mode. */ + u8 outer_vlan_stripping_dis; + u8 queue_id /* Light L2 RX Queue ID */; + u8 main_func_queue /* Set if this is the main PFs LL2 queue */; +/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if + * main_func_queue is set. + */ + u8 mf_si_bcast_accept_all; +/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if + * main_func_queue is set. + */ + u8 mf_si_mcast_accept_all; +/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be + * zero out, used for TenantDcb + */ +/* Specifies how ll2 should deal with RX packets errors */ + struct core_rx_action_on_error action_on_error; + u8 gsi_offload_flag /* set for GSI offload mode */; +/* If set, queue is subject for RX VFC classification. */ + u8 vport_id_valid; + u8 vport_id /* Queue VPORT for RX VFC classification. */; + u8 zero_prod_flg /* If set, zero RX producers. */; +/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be + * zero out, used for TenantDcb + */ + u8 wipe_inner_vlan_pri_en; + u8 reserved[2]; +}; + + +/* + * Ramrod data for rx queue stop ramrod + */ +struct core_rx_stop_ramrod_data { + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 queue_id /* Light L2 RX Queue ID */; + u8 reserved1; + __le16 reserved2[2]; +}; + + +/* + * Flags for Core TX BD + */ +struct core_tx_bd_data { + __le16 as_bitfield; +/* Do not allow additional VLAN manipulations on this packet (DCB) */ +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 +/* Insert VLAN into packet. Cannot be set for LB packets + * (tx_dst == CORE_TX_DEST_LB) + */ +#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 +/* This is the first BD of the packet (for debug) */ +#define CORE_TX_BD_DATA_START_BD_MASK 0x1 +#define CORE_TX_BD_DATA_START_BD_SHIFT 2 +/* Calculate the IP checksum for the packet */ +#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 +/* Calculate the L4 checksum for the packet */ +#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 +/* Packet is IPv6 with extensions */ +#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 +/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: + * 0-TCP, 1-UDP + */ +#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 +/* The pseudo checksum mode to place in the L4 checksum field. Required only + * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) + */ +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 +/* Number of BDs that make up one packet - width wide enough to present + * CORE_LL2_TX_MAX_BDS_PER_PACKET + */ +#define CORE_TX_BD_DATA_NBDS_MASK 0xF +#define CORE_TX_BD_DATA_NBDS_SHIFT 8 +/* Use roce_flavor enum - Differentiate between Roce flavors is valid when + * connType is ROCE (use enum core_roce_flavor_type) + */ +#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 +/* Calculate ip length */ +#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 +#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 +/* disables the STAG insertion, relevant only in MF OVLAN mode. */ +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14 +#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1 +#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15 +}; + +/* + * Core TX BD for Light L2 + */ +struct core_tx_bd { + struct regpair addr /* Buffer Address */; + __le16 nbytes /* Number of Bytes in Buffer */; +/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack + * packets: echo data to pass to Rx + */ + __le16 nw_vlan_or_lb_echo; + struct core_tx_bd_data bd_data /* BD Flags */; + __le16 bitfield1; +/* L4 Header Offset from start of packet (in Words). This is needed if both + * l4_csum and ipv6_ext are set + */ +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */ +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 +}; + + + +/* + * Light L2 TX Destination + */ +enum core_tx_dest { + CORE_TX_DEST_NW /* TX Destination to the Network */, + CORE_TX_DEST_LB /* TX Destination to the Loopback */, + CORE_TX_DEST_RESERVED, + CORE_TX_DEST_DROP /* TX Drop */, + MAX_CORE_TX_DEST +}; + + +/* + * Ramrod data for tx queue start ramrod + */ +struct core_tx_start_ramrod_data { + struct regpair pbl_base_addr /* Address of the pbl page */; + __le16 mtu /* Maximum transmission unit */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* Status block protocol index */; + u8 stats_en /* Statistics Enable */; + u8 stats_id /* Statistics Counter ID */; + u8 conn_type /* connection type that loaded ll2 */; + __le16 pbl_size /* Number of BD pages pointed by PBL */; + __le16 qm_pq_id /* QM PQ ID */; + u8 gsi_offload_flag /* set for GSI offload mode */; + u8 ctx_stats_en /* Context statistics enable */; +/* If set, queue is part of VPORT and subject for TX switching. */ + u8 vport_id_valid; +/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map + * which is per vport + */ + u8 vport_id; +}; + + +/* + * Ramrod data for tx queue stop ramrod + */ +struct core_tx_stop_ramrod_data { + __le32 reserved0[2]; +}; + + +/* + * Ramrod data for tx queue update ramrod + */ +struct core_tx_update_ramrod_data { + u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */; + u8 reserved0; + __le16 qm_pq_id /* Updated QM PQ ID */; + __le32 reserved1[1]; +}; + + +/* + * Enum flag for what type of dcb data to update + */ +enum dcb_dscp_update_mode { +/* use when no change should be done to DCB data */ + DONT_UPDATE_DCB_DSCP, + UPDATE_DCB /* use to update only L2 (vlan) priority */, + UPDATE_DSCP /* use to update only IP DSCP */, + UPDATE_DCB_DSCP /* update vlan pri and DSCP */, + MAX_DCB_DSCP_UPDATE_FLAG +}; + + +struct eth_mstorm_per_pf_stat { + struct regpair gre_discard_pkts /* Dropped GRE RX packets */; + struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */; + struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */; + struct regpair lb_discard_pkts /* Dropped Tx switched packets */; +}; + + +struct eth_mstorm_per_queue_stat { +/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */ + struct regpair ttl0_discard; +/* Number of packets discarded because they are bigger than MTU */ + struct regpair packet_too_big_discard; +/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */ + struct regpair no_buff_discard; +/* Number of packets discarded because of no active Rx connection */ + struct regpair not_active_discard; +/* number of coalesced packets in all TPA aggregations */ + struct regpair tpa_coalesced_pkts; +/* total number of TPA aggregations */ + struct regpair tpa_coalesced_events; +/* number of aggregations, which abnormally ended */ + struct regpair tpa_aborts_num; +/* total TCP payload length in all TPA aggregations */ + struct regpair tpa_coalesced_bytes; +}; + + +/* + * Ethernet TX Per PF + */ +struct eth_pstorm_per_pf_stat { +/* number of total ucast bytes sent on loopback port without errors */ + struct regpair sent_lb_ucast_bytes; +/* number of total mcast bytes sent on loopback port without errors */ + struct regpair sent_lb_mcast_bytes; +/* number of total bcast bytes sent on loopback port without errors */ + struct regpair sent_lb_bcast_bytes; +/* number of total ucast packets sent on loopback port without errors */ + struct regpair sent_lb_ucast_pkts; +/* number of total mcast packets sent on loopback port without errors */ + struct regpair sent_lb_mcast_pkts; +/* number of total bcast packets sent on loopback port without errors */ + struct regpair sent_lb_bcast_pkts; + struct regpair sent_gre_bytes /* Sent GRE bytes */; + struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */; + struct regpair sent_geneve_bytes /* Sent GENEVE bytes */; + struct regpair sent_mpls_bytes /* Sent MPLS bytes */; + struct regpair sent_gre_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */; + struct regpair sent_udp_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */; + struct regpair sent_gre_pkts /* Sent GRE packets (E5 Only) */; + struct regpair sent_vxlan_pkts /* Sent VXLAN packets */; + struct regpair sent_geneve_pkts /* Sent GENEVE packets */; + struct regpair sent_mpls_pkts /* Sent MPLS packets (E5 Only) */; + struct regpair sent_gre_mpls_pkts /* Sent GRE MPLS packets (E5 Only) */; + struct regpair sent_udp_mpls_pkts /* Sent UDP MPLS packets (E5 Only) */; + struct regpair gre_drop_pkts /* Dropped GRE TX packets */; + struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */; + struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */; + struct regpair mpls_drop_pkts /* Dropped MPLS TX packets (E5 Only) */; +/* Dropped GRE MPLS TX packets (E5 Only) */ + struct regpair gre_mpls_drop_pkts; +/* Dropped UDP MPLS TX packets (E5 Only) */ + struct regpair udp_mpls_drop_pkts; +}; + + +/* + * Ethernet TX Per Queue Stats + */ +struct eth_pstorm_per_queue_stat { +/* number of total bytes sent without errors */ + struct regpair sent_ucast_bytes; +/* number of total bytes sent without errors */ + struct regpair sent_mcast_bytes; +/* number of total bytes sent without errors */ + struct regpair sent_bcast_bytes; +/* number of total packets sent without errors */ + struct regpair sent_ucast_pkts; +/* number of total packets sent without errors */ + struct regpair sent_mcast_pkts; +/* number of total packets sent without errors */ + struct regpair sent_bcast_pkts; +/* number of total packets dropped due to errors */ + struct regpair error_drop_pkts; +}; + + +/* + * ETH Rx producers data + */ +struct eth_rx_rate_limit { +/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */ + __le16 mult; +/* Constant term to add (or subtract from number of cycles) */ + __le16 cnst; + u8 add_sub_cnst /* Add (1) or subtract (0) constant term */; + u8 reserved0; + __le16 reserved1; +}; + + +/* Update RSS indirection table entry command. One outstanding command supported + * per PF. + */ +struct eth_tstorm_rss_update_data { +/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new + * RSS update command. + */ + u8 valid; +/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be + * ignored. + */ + u8 vport_id; + u8 ind_table_index /* RSS indirect table index that will be updated. */; + u8 reserved; + __le16 ind_table_value /* RSS indirect table new value. */; + __le16 reserved1 /* reserved. */; +}; + + +struct eth_ustorm_per_pf_stat { +/* number of total ucast bytes received on loopback port without errors */ + struct regpair rcv_lb_ucast_bytes; +/* number of total mcast bytes received on loopback port without errors */ + struct regpair rcv_lb_mcast_bytes; +/* number of total bcast bytes received on loopback port without errors */ + struct regpair rcv_lb_bcast_bytes; +/* number of total ucast packets received on loopback port without errors */ + struct regpair rcv_lb_ucast_pkts; +/* number of total mcast packets received on loopback port without errors */ + struct regpair rcv_lb_mcast_pkts; +/* number of total bcast packets received on loopback port without errors */ + struct regpair rcv_lb_bcast_pkts; + struct regpair rcv_gre_bytes /* Received GRE bytes */; + struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */; + struct regpair rcv_geneve_bytes /* Received GENEVE bytes */; + struct regpair rcv_gre_pkts /* Received GRE packets */; + struct regpair rcv_vxlan_pkts /* Received VXLAN packets */; + struct regpair rcv_geneve_pkts /* Received GENEVE packets */; +}; + + +struct eth_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + + +/* + * Event Ring VF-PF Channel data + */ +struct vf_pf_channel_eqe_data { + struct regpair msg_addr /* VF-PF message address */; +}; + +/* + * Event Ring malicious VF data + */ +struct malicious_vf_eqe_data { + u8 vf_id /* Malicious VF ID */; + u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */; + __le16 reserved[3]; +}; + +/* + * Event Ring initial cleanup data + */ +struct initial_cleanup_eqe_data { + u8 vf_id /* VF ID */; + u8 reserved[7]; +}; + +/* + * Event Data Union + */ +union event_ring_data { + u8 bytes[8] /* Byte Array */; + struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */; + struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */; +/* Dedicated fields to iscsi connect done results */ + struct iscsi_connect_done_results iscsi_conn_done_info; + union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */; + struct nvmf_eqe_data nvmf_data /* Dedicated field for NVMf data */; + struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */; +/* VF Initial Cleanup data */ + struct initial_cleanup_eqe_data vf_init_cleanup; +}; + + +/* + * Event Ring Entry + */ +struct event_ring_entry { + u8 protocol_id /* Event Protocol ID (use enum protocol_type) */; + u8 opcode /* Event Opcode (Per Protocol Type) */; + u8 reserved0 /* Reserved */; + u8 vfId /* vfId for this event, 0xFF if this is a PF event */; + __le16 echo /* Echo value from ramrod data on the host */; +/* FW return code for SP ramrods. Use (according to protocol) eth_return_code, + * or rdma_fw_return_code, or fcoe_completion_status + */ + u8 fw_return_code; + u8 flags; +/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */ +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* + * Event Ring Next Page Address + */ +struct event_ring_next_addr { + struct regpair addr /* Next Page Address */; + __le32 reserved[2] /* Reserved */; +}; + +/* + * Event Ring Element + */ +union event_ring_element { + struct event_ring_entry entry /* Event Ring Entry */; +/* Event Ring Next Page Address */ + struct event_ring_next_addr next_addr; +}; + + + +/* + * Ports mode + */ +enum fw_flow_ctrl_mode { + flow_ctrl_pause, + flow_ctrl_pfc, + MAX_FW_FLOW_CTRL_MODE +}; + + +/* + * GFT profile type. + */ +enum gft_profile_type { +/* tunnel type, inner 4 tuple, IP type and L4 type match. */ + GFT_PROFILE_TYPE_4_TUPLE, +/* tunnel type, inner L4 destination port, IP type and L4 type match. */ + GFT_PROFILE_TYPE_L4_DST_PORT, +/* tunnel type, inner IP destination address and IP type match. */ + GFT_PROFILE_TYPE_IP_DST_ADDR, +/* tunnel type, inner IP source address and IP type match. */ + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */, + MAX_GFT_PROFILE_TYPE +}; + + +/* + * Major and Minor hsi Versions + */ +struct hsi_fp_ver_struct { + u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */; + u8 major_ver_arr[2] /* Major Version of driver loading pf */; +}; + + +/* + * Integration Phase + */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */, + INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */, + INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */, + MAX_INTEG_PHASE +}; + + +/* + * Ports mode + */ +enum iwarp_ll2_tx_queues { +/* LL2 queue for OOO packets sent in-order by the driver */ + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, +/* LL2 queue for unaligned packets sent aligned by the driver */ + IWARP_LL2_ALIGNED_TX_QUEUE, +/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the + * driver + */ + IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, + IWARP_LL2_ERROR /* Error indication */, + MAX_IWARP_LL2_TX_QUEUES +}; + + +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR /* Zero placeholder value */, +/* Writing to VF/PF channel when it is not ready */ + VF_PF_CHANNEL_NOT_READY, + VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */, + VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */, +/* TX packet is shorter then reported on BDs or from minimal size */ + ETH_PACKET_TOO_SMALL, +/* Tx packet with marked as insert VLAN when its illegal */ + ETH_ILLEGAL_VLAN_MODE, + ETH_MTU_VIOLATION /* TX packet is greater then MTU */, +/* TX packet has illegal inband tags marked */ + ETH_ILLEGAL_INBAND_TAGS, +/* Vlan cant be added to inband tag */ + ETH_VLAN_INSERT_AND_INBAND_VLAN, +/* indicated number of BDs for the packet is illegal */ + ETH_ILLEGAL_NBDS, + ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */, +/* There are not enough BDs for transmission of even one packet */ + ETH_INSUFFICIENT_BDS, + ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */, + ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */, +/* empty BD (which not contains control flags) is illegal */ + ETH_ZERO_SIZE_BD, + ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */, +/* In LSO its expected that on the local BD ring there will be at least MSS + * bytes of data + */ + ETH_INSUFFICIENT_PAYLOAD, + ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */, +/* Tunneled packet with IPv6+Ext without a proper number of BDs */ + ETH_TUNN_IPV6_EXT_NBD_ERR, + ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */, + ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */, +/* packet scanned is too large (can be 9700 at most) */ + ETH_PACKET_SIZE_TOO_LARGE, +/* Tx packet with marked as insert VLAN when its illegal */ + CORE_ILLEGAL_VLAN_MODE, +/* indicated number of BDs for the packet is illegal */ + CORE_ILLEGAL_NBDS, + CORE_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */, +/* There are not enough BDs for transmission of even one packet */ + CORE_INSUFFICIENT_BDS, +/* TX packet is shorter then reported on BDs or from minimal size */ + CORE_PACKET_TOO_SMALL, + CORE_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */, + CORE_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */, + CORE_MTU_VIOLATION /* TX packet is greater then MTU */, + CORE_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */, + CORE_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */, + MAX_MALICIOUS_VF_ERROR_ID +}; + + + +/* + * Mstorm non-triggering VF zone + */ +struct mstorm_non_trigger_vf_zone { +/* VF statistic bucket */ + struct eth_mstorm_per_queue_stat eth_queue_stat; +/* VF RX queues producers */ + struct eth_rx_prod_data + eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; +}; + + +/* + * Mstorm VF zone + */ +struct mstorm_vf_zone { +/* non-interrupt-triggering zone */ + struct mstorm_non_trigger_vf_zone non_trigger; +}; + + +/* + * vlan header including TPID and TCI fields + */ +struct vlan_header { + __le16 tpid /* Tag Protocol Identifier */; + __le16 tci /* Tag Control Information */; +}; + +/* + * outer tag configurations + */ +struct outer_tag_config_struct { +/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0. + */ + u8 enable_stag_pri_change; +/* If inner_to_outer_pri_map is initialize then set pri_map_valid */ + u8 pri_map_valid; + u8 reserved[2]; +/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol + * identifier and outer tag control information + */ + struct vlan_header outer_tag; +/* Map from inner to outer priority. Set pri_map_valid when init map */ + u8 inner_to_outer_pri_map[8]; +}; + + +/* + * personality per PF + */ +enum personality_type { + BAD_PERSONALITY_TYP, + PERSONALITY_ISCSI /* iSCSI and LL2 */, + PERSONALITY_FCOE /* Fcoe and LL2 */, + PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */, + PERSONALITY_RDMA /* Roce and LL2 */, + PERSONALITY_CORE /* CORE(LL2) */, + PERSONALITY_ETH /* Ethernet */, + PERSONALITY_TOE /* Toe and LL2 */, + MAX_PERSONALITY_TYPE +}; + + +/* + * tunnel configuration + */ +struct pf_start_tunnel_config { +/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - + * FW will use a default port + */ + u8 set_vxlan_udp_port_flg; +/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - + * FW will use a default port + */ + u8 set_geneve_udp_port_flg; +/* Set no-innet-L2 VXLAN tunnel UDP destination port to + * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port + */ + u8 set_no_inner_l2_vxlan_udp_port_flg; + u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */; +/* Rx classification scheme for l2 GENEVE tunnel. */ + u8 tunnel_clss_l2geneve; +/* Rx classification scheme for ip GENEVE tunnel. */ + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */; + u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */; +/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */ + __le16 vxlan_udp_port; +/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */ + __le16 geneve_udp_port; +/* no-innet-L2 VXLAN tunnel UDP destination port. Valid if + * set_no_inner_l2_vxlan_udp_port_flg=1 + */ + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; +}; + +/* + * Ramrod data for PF start ramrod + */ +struct pf_start_ramrod_data { + struct regpair event_ring_pbl_addr /* Address of event ring PBL */; +/* PBL address of consolidation queue */ + struct regpair consolid_q_pbl_addr; +/* tunnel configuration. */ + struct pf_start_tunnel_config tunnel_config; + __le16 event_ring_sb_id /* Status block ID */; +/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */ + u8 base_vf_id; + u8 num_vfs /* Amount of vfs owned by PF */; + u8 event_ring_num_pages /* Number of PBL pages in event ring */; + u8 event_ring_sb_index /* Status block index */; + u8 path_id /* HW path ID (engine ID) */; + u8 warning_as_error /* In FW asserts, treat warning as error */; +/* If not set - throw a warning for each ramrod (for debug) */ + u8 dont_log_ramrods; + u8 personality /* define what type of personality is new PF */; +/* Log type mask. Each bit set enables a corresponding event type logging. + * Event types are defined as ASSERT_LOG_TYPE_xxx + */ + __le16 log_type_mask; + u8 mf_mode /* Multi function mode */; + u8 integ_phase /* Integration phase */; +/* If set, inter-pf tx switching is allowed in Switch Independent func mode */ + u8 allow_npar_tx_switching; + u8 reserved0; +/* FP HSI version to be used by FW */ + struct hsi_fp_ver_struct hsi_fp_ver; +/* Outer tag configurations */ + struct outer_tag_config_struct outer_tag_config; +}; + + + +/* + * Per protocol DCB data + */ +struct protocol_dcb_data { + u8 dcb_enable_flag /* Enable DCB */; + u8 dscp_enable_flag /* Enable updating DSCP value */; + u8 dcb_priority /* DCB priority */; + u8 dcb_tc /* DCB TC */; + u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */; +/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged + * frames + */ + u8 dcb_dont_add_vlan0; +}; + +/* + * Update tunnel configuration + */ +struct pf_update_tunnel_config { +/* Update RX per PF tunnel classification scheme. */ + u8 update_rx_pf_clss; +/* Update per PORT default tunnel RX classification scheme for traffic with + * unknown unicast outer MAC in NPAR mode. + */ + u8 update_rx_def_ucast_clss; +/* Update per PORT default tunnel RX classification scheme for traffic with non + * unicast outer MAC in NPAR mode. + */ + u8 update_rx_def_non_ucast_clss; +/* Update VXLAN tunnel UDP destination port. */ + u8 set_vxlan_udp_port_flg; +/* Update GENEVE tunnel UDP destination port. */ + u8 set_geneve_udp_port_flg; +/* Update no-innet-L2 VXLAN tunnel UDP destination port. */ + u8 set_no_inner_l2_vxlan_udp_port_flg; + u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; +/* Classification scheme for l2 GENEVE tunnel. */ + u8 tunnel_clss_l2geneve; +/* Classification scheme for ip GENEVE tunnel. */ + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */; + u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */; + u8 reserved; + __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; + __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; +/* no-innet-L2 VXLAN tunnel UDP destination port. */ + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; +}; + +/* + * Data for port update ramrod + */ +struct pf_update_ramrod_data { +/* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */ + u8 update_eth_dcb_data_mode; +/* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */ + u8 update_fcoe_dcb_data_mode; +/* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */ + u8 update_iscsi_dcb_data_mode; + u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */; +/* Update RROCE (RoceV2) DCB data indication */ + u8 update_rroce_dcb_data_mode; + u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */; + u8 update_mf_vlan_flag /* Update MF outer vlan Id */; +/* Update Enable STAG Priority Change indication */ + u8 update_enable_stag_pri_change; + struct protocol_dcb_data eth_dcb_data /* core eth related fields */; + struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */; +/* core iscsi related fields */ + struct protocol_dcb_data iscsi_dcb_data; + struct protocol_dcb_data roce_dcb_data /* core roce related fields */; +/* core roce related fields */ + struct protocol_dcb_data rroce_dcb_data; +/* core iwarp related fields */ + struct protocol_dcb_data iwarp_dcb_data; + __le16 mf_vlan /* new outer vlan id value */; +/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0 + */ + u8 enable_stag_pri_change; + u8 reserved; +/* tunnel configuration. */ + struct pf_update_tunnel_config tunnel_config; +}; + + + +/* + * Ports mode + */ +enum ports_mode { + ENGX2_PORTX1 /* 2 engines x 1 port */, + ENGX2_PORTX2 /* 2 engines x 2 ports */, + ENGX1_PORTX1 /* 1 engine x 1 port */, + ENGX1_PORTX2 /* 1 engine x 2 ports */, + ENGX1_PORTX4 /* 1 engine x 4 ports */, + MAX_PORTS_MODE +}; + + + +/* + * use to index in hsi_fp_[major|minor]_ver_arr per protocol + */ +enum protocol_version_array_key { + ETH_VER_KEY = 0, + ROCE_VER_KEY, + MAX_PROTOCOL_VERSION_ARRAY_KEY +}; + + + +/* + * RDMA TX Stats + */ +struct rdma_sent_stats { + struct regpair sent_bytes /* number of total RDMA bytes sent */; + struct regpair sent_pkts /* number of total RDMA packets sent */; +}; + +/* + * Pstorm non-triggering VF zone + */ +struct pstorm_non_trigger_vf_zone { +/* VF statistic bucket */ + struct eth_pstorm_per_queue_stat eth_queue_stat; + struct rdma_sent_stats rdma_stats /* RoCE sent statistics */; +}; + + +/* + * Pstorm VF zone + */ +struct pstorm_vf_zone { +/* non-interrupt-triggering zone */ + struct pstorm_non_trigger_vf_zone non_trigger; + struct regpair reserved[7] /* vf_zone size mus be power of 2 */; +}; + + +/* + * Ramrod Header of SPQE + */ +struct ramrod_header { + __le32 cid /* Slowpath Connection CID */; + u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; + u8 protocol_id /* Ramrod Protocol ID */; + __le16 echo /* Ramrod echo */; +}; + + +/* + * RDMA RX Stats + */ +struct rdma_rcv_stats { + struct regpair rcv_bytes /* number of total RDMA bytes received */; + struct regpair rcv_pkts /* number of total RDMA packets received */; +}; + + + +/* + * Data for update QCN/DCQCN RL ramrod + */ +struct rl_update_ramrod_data { + u8 qcn_update_param_flg /* Update QCN global params: timeout. */; +/* Update DCQCN global params: timeout, g, k. */ + u8 dcqcn_update_param_flg; + u8 rl_init_flg /* Init RL parameters, when RL disabled. */; + u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */; + u8 rl_stop_flg /* Stop RL. */; + u8 rl_id_first /* ID of first or single RL, that will be updated. */; +/* ID of last RL, that will be updated. If clear, single RL will updated. */ + u8 rl_id_last; + u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */; +/* If set, alpha will be reset to 1 when the state machine is idle. */ + u8 dcqcn_reset_alpha_on_idle; +/* Byte counter threshold to change rate increase stage. */ + u8 rl_bc_stage_th; +/* Timer threshold to change rate increase stage. */ + u8 rl_timer_stage_th; + u8 reserved1; + __le32 rl_bc_rate /* Byte Counter Limit. */; + __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */; + __le16 rl_r_ai /* Active increase rate. */; + __le16 rl_r_hai /* Hyper active increase rate. */; + __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */; + __le32 dcqcn_k_us /* DCQCN Alpha update interval. */; + __le32 dcqcn_timeuot_us /* DCQCN timeout. */; + __le32 qcn_timeuot_us /* QCN timeout. */; + __le32 reserved2; +}; + + +/* + * Slowpath Element (SPQE) + */ +struct slow_path_element { + struct ramrod_header hdr /* Ramrod Header */; + struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */; +}; + + +/* + * Tstorm non-triggering VF zone + */ +struct tstorm_non_trigger_vf_zone { + struct rdma_rcv_stats rdma_stats /* RoCE received statistics */; +}; + + +struct tstorm_per_port_stat { +/* packet is dropped because it was truncated in NIG */ + struct regpair trunc_error_discard; +/* packet is dropped because of Ethernet FCS error */ + struct regpair mac_error_discard; +/* packet is dropped because classification was unsuccessful */ + struct regpair mftag_filter_discard; +/* packet was passed to Ethernet and dropped because of no mac filter match */ + struct regpair eth_mac_filter_discard; +/* packet passed to Light L2 and dropped because Light L2 is not configured for + * this PF + */ + struct regpair ll2_mac_filter_discard; +/* packet passed to Light L2 and dropped because Light L2 is not configured for + * this PF + */ + struct regpair ll2_conn_disabled_discard; +/* packet is an ISCSI irregular packet */ + struct regpair iscsi_irregular_pkt; +/* packet is an FCOE irregular packet */ + struct regpair fcoe_irregular_pkt; +/* packet is an ROCE irregular packet */ + struct regpair roce_irregular_pkt; +/* packet is an IWARP irregular packet */ + struct regpair iwarp_irregular_pkt; +/* packet is an ETH irregular packet */ + struct regpair eth_irregular_pkt; +/* packet is an TOE irregular packet */ + struct regpair toe_irregular_pkt; +/* packet is an PREROCE irregular packet */ + struct regpair preroce_irregular_pkt; + struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */; +/* VXLAN dropped packets */ + struct regpair eth_vxlan_tunn_filter_discard; +/* GENEVE dropped packets */ + struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt /* GFT dropped packets */; +}; + + +/* + * Tstorm VF zone + */ +struct tstorm_vf_zone { +/* non-interrupt-triggering zone */ + struct tstorm_non_trigger_vf_zone non_trigger; +}; + + +/* + * Tunnel classification scheme + */ +enum tunnel_clss { +/* Use MAC and VLAN from first L2 header for vport classification. */ + TUNNEL_CLSS_MAC_VLAN = 0, +/* Use MAC from first L2 header and VNI from tunnel header for vport + * classification + */ + TUNNEL_CLSS_MAC_VNI, +/* Use MAC and VLAN from last L2 header for vport classification */ + TUNNEL_CLSS_INNER_MAC_VLAN, +/* Use MAC from last L2 header and VNI from tunnel header for vport + * classification + */ + TUNNEL_CLSS_INNER_MAC_VNI, +/* Use MAC and VLAN from last L2 header for vport classification. If no exact + * match, use MAC and VLAN from first L2 header for classification. + */ + TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_TUNNEL_CLSS +}; + + + +/* + * Ustorm non-triggering VF zone + */ +struct ustorm_non_trigger_vf_zone { +/* VF statistic bucket */ + struct eth_ustorm_per_queue_stat eth_queue_stat; + struct regpair vf_pf_msg_addr /* VF-PF message address */; +}; + + +/* + * Ustorm triggering VF zone + */ +struct ustorm_trigger_vf_zone { + u8 vf_pf_msg_valid /* VF-PF message valid flag */; + u8 reserved[7]; +}; + + +/* + * Ustorm VF zone + */ +struct ustorm_vf_zone { +/* non-interrupt-triggering zone */ + struct ustorm_non_trigger_vf_zone non_trigger; + struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */; +}; + + +/* + * VF-PF channel data + */ +struct vf_pf_channel_data { +/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel + * is ready for a new transaction. + */ + __le32 ready; +/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is + * valid. + */ + u8 valid; + u8 reserved0; + __le16 reserved1; +}; + + +/* + * Ramrod data for VF start ramrod + */ +struct vf_start_ramrod_data { + u8 vf_id /* VF ID */; +/* If set, initial cleanup ack will be sent to parent PF SP event queue */ + u8 enable_flr_ack; + __le16 opaque_fid /* VF opaque FID */; + u8 personality /* define what type of personality is new VF */; + u8 reserved[7]; +/* FP HSI version to be used by FW */ + struct hsi_fp_ver_struct hsi_fp_ver; +}; + + +/* + * Ramrod data for VF start ramrod + */ +struct vf_stop_ramrod_data { + u8 vf_id /* VF ID */; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; +}; + + +/* + * VF zone size mode. + */ +enum vf_zone_size_mode { +/* Default VF zone size. Up to 192 VF supported. */ + VF_ZONE_SIZE_MODE_DEFAULT, +/* Doubled VF zone size. Up to 96 VF supported. */ + VF_ZONE_SIZE_MODE_DOUBLE, +/* Quad VF zone size. Up to 48 VF supported. */ + VF_ZONE_SIZE_MODE_QUAD, + MAX_VF_ZONE_SIZE_MODE +}; + + + + +/* + * Xstorm non-triggering VF zone + */ +struct xstorm_non_trigger_vf_zone { + struct regpair non_edpm_ack_pkts /* RoCE received statistics */; +}; + + +/* + * Tstorm VF zone + */ +struct xstorm_vf_zone { +/* non-interrupt-triggering zone */ + struct xstorm_non_trigger_vf_zone non_trigger; +}; + + + +/* + * Attentions status block + */ +struct atten_status_block { + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index /* status block running index */; + __le32 reserved1; +}; + + +/* + * DMAE command + */ +struct dmae_cmd { + __le32 opcode; +/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */ +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None + * (use enum dmae_cmd_dst_enum) + */ +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */ +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +/* Reset the CRC result (do not use the previous result as the seed) */ +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +/* Reset the source address in the next go to the same source address of the + * previous go + */ +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +/* Reset the destination address in the next go to the same destination address + * of the previous go + */ +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +/* 0 completion function is the same as src function, 1 - 0 completion + * function is the same as dst function (use enum dmae_cmd_comp_func_enum) + */ +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +/* 0 - Do not write a completion word, 1 - Write a completion word + * (use enum dmae_cmd_comp_word_en_enum) + */ +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +/* 0 - Do not write a CRC word, 1 - Write a CRC word + * (use enum dmae_cmd_comp_crc_en_enum) + */ +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +/* The CRC word should be taken from the DMAE address space from address 9+X, + * where X is the value in these bits. + */ +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +/* The field specifies how the completion word is affected by PCIe read error. 0 + * Send a regular completion, 1 - Send a completion with an error indication, + * 2 do not send a completion (use enum dmae_cmd_error_handling_enum) + */ +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +/* The port ID to be placed on the RF FID field of the GRC bus. this field is + * used both when GRC is the destination and when it is the source of the DMAE + * transaction. + */ +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +/* Source PCI function number [3:0] */ +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +/* Destination PCI function number [3:0] */ +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */ +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */ +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 +/* PCIe source address low in bytes or GRC source address in DW */ + __le32 src_addr_lo; +/* PCIe source address high in bytes or reserved (if source is GRC) */ + __le32 src_addr_hi; +/* PCIe destination address low in bytes or GRC destination address in DW */ + __le32 dst_addr_lo; +/* PCIe destination address high in bytes or reserved (if destination is GRC) */ + __le32 dst_addr_hi; + __le16 length_dw /* Length in DW */; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */ +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */ +#define DMAE_CMD_DST_VF_ID_SHIFT 8 +/* PCIe completion address low in bytes or GRC completion address in DW */ + __le32 comp_addr_lo; +/* PCIe completion address high in bytes or reserved (if completion address is + * GRC) + */ + __le32 comp_addr_hi; + __le32 comp_val /* Value to write to completion address */; + __le32 crc32 /* crc16 result */; + __le32 crc_32_c /* crc32_c result */; + __le16 crc16 /* crc16 result */; + __le16 crc16_c /* crc16_c result */; + __le16 crc10 /* crc_t10 result */; + __le16 error_bit_reserved; +#define DMAE_CMD_ERROR_BIT_MASK 0x1 /* Error bit */ +#define DMAE_CMD_ERROR_BIT_SHIFT 0 +#define DMAE_CMD_RESERVED_MASK 0x7FFF +#define DMAE_CMD_RESERVED_SHIFT 1 + __le16 xsum16 /* checksum16 result */; + __le16 xsum8 /* checksum8 result */; +}; + + +enum dmae_cmd_comp_crc_en_enum { + dmae_cmd_comp_crc_disabled /* Do not write a CRC word */, + dmae_cmd_comp_crc_enabled /* Write a CRC word */, + MAX_DMAE_CMD_COMP_CRC_EN_ENUM +}; + + +enum dmae_cmd_comp_func_enum { +/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */ + dmae_cmd_comp_func_to_src, +/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */ + dmae_cmd_comp_func_to_dst, + MAX_DMAE_CMD_COMP_FUNC_ENUM +}; + + +enum dmae_cmd_comp_word_en_enum { + dmae_cmd_comp_word_disabled /* Do not write a completion word */, + dmae_cmd_comp_word_enabled /* Write the completion word */, + MAX_DMAE_CMD_COMP_WORD_EN_ENUM +}; + + +enum dmae_cmd_c_dst_enum { + dmae_cmd_c_dst_pcie, + dmae_cmd_c_dst_grc, + MAX_DMAE_CMD_C_DST_ENUM +}; + + +enum dmae_cmd_dst_enum { + dmae_cmd_dst_none_0, + dmae_cmd_dst_pcie, + dmae_cmd_dst_grc, + dmae_cmd_dst_none_3, + MAX_DMAE_CMD_DST_ENUM +}; + + +enum dmae_cmd_error_handling_enum { +/* Send a regular completion (with no error indication) */ + dmae_cmd_error_handling_send_regular_comp, +/* Send a completion with an error indication (i.e. set bit 31 of the completion + * word) + */ + dmae_cmd_error_handling_send_comp_with_err, + dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */, + MAX_DMAE_CMD_ERROR_HANDLING_ENUM +}; + + +enum dmae_cmd_src_enum { + dmae_cmd_src_pcie /* The source is the PCIe */, + dmae_cmd_src_grc /* The source is the GRC */, + MAX_DMAE_CMD_SRC_ENUM +}; + + +/* + * DMAE parameters + */ +struct dmae_params { + __le32 flags; +/* If set and the source is a block of length DMAE_MAX_RW_SIZE and the + * destination is larger, the source block will be duplicated as many + * times as required to fill the destination block. This is used mostly + * to write a zeroed buffer to destination address using DMA + */ +#define DMAE_PARAMS_RW_REPL_SRC_MASK 0x1 +#define DMAE_PARAMS_RW_REPL_SRC_SHIFT 0 +/* If set, the source is a VF, and the source VF ID is taken from the + * src_vf_id parameter. + */ +#define DMAE_PARAMS_SRC_VF_VALID_MASK 0x1 +#define DMAE_PARAMS_SRC_VF_VALID_SHIFT 1 +/* If set, the destination is a VF, and the destination VF ID is taken + * from the dst_vf_id parameter. + */ +#define DMAE_PARAMS_DST_VF_VALID_MASK 0x1 +#define DMAE_PARAMS_DST_VF_VALID_SHIFT 2 +/* If set, a completion is sent to the destination function. + * Otherwise its sent to the source function. + */ +#define DMAE_PARAMS_COMPLETION_DST_MASK 0x1 +#define DMAE_PARAMS_COMPLETION_DST_SHIFT 3 +/* If set, the port ID is taken from the port_id parameter. + * Otherwise, the current port ID is used. + */ +#define DMAE_PARAMS_PORT_VALID_MASK 0x1 +#define DMAE_PARAMS_PORT_VALID_SHIFT 4 +/* If set, the source PF ID is taken from the src_pf_id parameter. + * Otherwise, the current PF ID is used. + */ +#define DMAE_PARAMS_SRC_PF_VALID_MASK 0x1 +#define DMAE_PARAMS_SRC_PF_VALID_SHIFT 5 +/* If set, the destination PF ID is taken from the dst_pf_id parameter. + * Otherwise, the current PF ID is used + */ +#define DMAE_PARAMS_DST_PF_VALID_MASK 0x1 +#define DMAE_PARAMS_DST_PF_VALID_SHIFT 6 +#define DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF +#define DMAE_PARAMS_RESERVED_SHIFT 7 + u8 src_vf_id /* Source VF ID, valid only if src_vf_valid is set */; + u8 dst_vf_id /* Destination VF ID, valid only if dst_vf_valid is set */; + u8 port_id /* Port ID, valid only if port_valid is set */; + u8 src_pf_id /* Source PF ID, valid only if src_pf_valid is set */; + u8 dst_pf_id /* Destination PF ID, valid only if dst_pf_valid is set */; + u8 reserved1; + __le16 reserved2; +}; + + +struct fw_asserts_ram_section { +/* The offset of the section in the RAM in RAM lines (64-bit units) */ + __le16 section_ram_line_offset; +/* The size of the section in RAM lines (64-bit units) */ + __le16 section_ram_line_size; +/* The offset of the asserts list within the section in dwords */ + u8 list_dword_offset; +/* The size of an assert list element in dwords */ + u8 list_element_dword_size; + u8 list_num_elements /* The number of elements in the asserts list */; +/* The offset of the next list index field within the section in dwords */ + u8 list_next_index_dword_offset; +}; + + +struct fw_ver_num { + u8 major /* Firmware major version number */; + u8 minor /* Firmware minor version number */; + u8 rev /* Firmware revision version number */; + u8 eng /* Firmware engineering version number (for bootleg versions) */; +}; + +struct fw_ver_info { + __le16 tools_ver /* Tools version number */; + u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; + u8 reserved1; + struct fw_ver_num num /* FW version number */; + __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; + __le32 reserved2; +}; + +struct fw_info { + struct fw_ver_info ver /* FW version information */; +/* Info regarding the FW asserts section in the Storm RAM */ + struct fw_asserts_ram_section fw_asserts_section; +}; + + +struct fw_info_location { + __le32 grc_addr /* GRC address where the fw_info struct is located. */; +/* Size of the fw_info structure (thats located at the grc_addr). */ + __le32 size; +}; + + + + +/* + * IGU cleanup command + */ +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +/* cleanup clear - 0, set - 1 */ +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +/* must always be set (use enum command_type_bit) */ +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1U +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + + +/* + * IGU firmware driver command + */ +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + + +/* + * IGU firmware driver command + */ +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +/* command typ: 0 - read, 1 - write */ +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +}; + + +/* + * IGU mapping line structure + */ +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */ +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + + +/* + * IGU MSIX line structure + */ +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + + +struct mstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + + +/* + * per encapsulation type enabling flags + */ +struct prs_reg_encapsulation_type_en { + u8 flags; +/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +/* Enable bit for VXLAN encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +/* Enable bit for T-Tag encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */ +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, + TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, +/* Device Write and Host Read, or Host Write and Device Read */ + TPH_ST_HINT_TARGET, +/* Device Write and Host Read, or Host Write and Device Read - with temporal + * reuse + */ + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + + +/* + * QM hardware structure of enable bypass credit mask + */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + + +/* + * QM hardware structure of opportunistic credit mask + */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + + +/* + * QM hardware structure of QM map memory + */ +struct qm_rf_pq_map { + __le32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +/* the first PQ associated with the VPORT and VOQ of this PQ */ +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + + +/* + * Completion params for aggregated interrupt completion + */ +struct sdm_agg_int_comp_params { + __le16 params; +/* the number of aggregated interrupt, 0-31 */ +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +/* 1 - set a bit in aggregated vector, 0 - dont set */ +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +/* Number of bit in the aggregated vector, 0-279 (TBD) */ +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen { + __le32 command; +/* completion parameters 0-15 */ +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 word1 /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +#endif /* __ECORE_HSI_COMMON__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h new file mode 100644 index 000000000..eb72e93cf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_debug_tools.h @@ -0,0 +1,1053 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HSI_DEBUG_TOOLS__ +#define __ECORE_HSI_DEBUG_TOOLS__ +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_MSTAT, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, + BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, + MAX_BLOCK_ID +}; + + +/* + * binary debug buffer types + */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE /* init modes tree */, + BIN_BUF_DBG_DUMP_REG /* GRC Dump registers */, + BIN_BUF_DBG_DUMP_MEM /* GRC Dump memories */, + BIN_BUF_DBG_IDLE_CHK_REGS /* Idle Check registers */, + BIN_BUF_DBG_IDLE_CHK_IMMS /* Idle Check immediates */, + BIN_BUF_DBG_IDLE_CHK_RULES /* Idle Check rules */, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA /* Idle Check parsing data */, + BIN_BUF_DBG_ATTN_BLOCKS /* Attention blocks */, + BIN_BUF_DBG_ATTN_REGS /* Attention registers */, + BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */, + BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */, + BIN_BUF_DBG_BLOCKS /* Blocks debug data */, + BIN_BUF_DBG_BLOCKS_CHIP_DATA /* Blocks debug chip data */, + BIN_BUF_DBG_BUS_LINES /* Blocks debug bus lines */, + BIN_BUF_DBG_BLOCKS_USER_DATA /* Blocks debug user data */, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA /* Blocks debug chip user data */, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */, + BIN_BUF_DBG_RESET_REGS /* Reset registers */, + BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */, + MAX_BIN_DBG_BUFFER_TYPE +}; + + +/* + * Attention bit mapping + */ +struct dbg_attn_bit_mapping { + u16 data; +/* The index of an attention in the blocks attentions list + * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits + * (if is_unused_bit_cnt=1) + */ +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +/* if set, the val field indicates the number of consecutive unused attention + * bits + */ +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 +}; + + +/* + * Attention block per-type data + */ +struct dbg_attn_block_type_data { +/* Offset of this block attention names in the debug attention name offsets + * array + */ + u16 names_offset; + u16 reserved1; + u8 num_regs /* Number of attention registers in this block */; + u8 reserved2; +/* Offset of this blocks attention registers in the attention registers array + * (in dbg_attn_reg units) + */ + u16 regs_offset; +}; + +/* + * Block attentions + */ +struct dbg_attn_block { +/* attention block per-type data. Count must match the number of elements in + * dbg_attn_type. + */ + struct dbg_attn_block_type_data per_type_data[2]; +}; + + +/* + * Attention register result + */ +struct dbg_attn_reg_result { + u32 data; +/* STS attention register GRC address (in dwords) */ +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +/* Number of attention indexes in this register */ +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) + */ + u16 block_attn_offset; + u16 reserved; + u32 sts_val /* Value read from the STS attention register */; + u32 mask_val /* Value read from the MASK attention register */; +}; + +/* + * Attention block result + */ +struct dbg_attn_block_result { + u8 block_id /* Registers block ID */; + u8 data; +/* Value from dbg_attn_type enum */ +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +/* Number of registers in block in which at least one attention bit is set */ +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 +/* Offset of this registers block attention names in the attention name offsets + * array + */ + u16 names_offset; +/* result data for each register in the block in which at least one attention + * bit is set + */ + struct dbg_attn_reg_result reg_results[15]; +}; + + + +/* + * mode header + */ +struct dbg_mode_hdr { + u16 data; +/* indicates if a mode expression should be evaluated (0/1) */ +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +/* offset (in bytes) in modes expression buffer. valid only if eval_mode is + * set. + */ +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* + * Attention register + */ +struct dbg_attn_reg { +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) + */ + u16 block_attn_offset; + u32 data; +/* STS attention register GRC address (in dwords) */ +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +/* Number of attention in this register */ +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 +/* STS_CLR attention register GRC address (in dwords) */ + u32 sts_clr_address; + u32 mask_address /* MASK attention register GRC address (in dwords) */; +}; + + + +/* + * attention types + */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + + +/* + * Block debug data + */ +struct dbg_block { + u8 name[15] /* Block name */; +/* The letter (char) of the associated Storm, or 0 if no associated Storm. */ + u8 associated_storm_letter; +}; + + +/* + * Chip-specific block debug data + */ +struct dbg_block_chip { + u8 flags; +/* Indicates if the block is removed in this chip (0/1). */ +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +/* Indicates if this block has a reset register (0/1). */ +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +/* Indicates if this block should be taken out of reset before GRC Dump (0/1). + * Valid only if has_reset_reg is set. + */ +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +/* Indicates if this block has a debug bus (0/1). */ +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +/* Indicates if this block has a latency events debug line (0/1). Valid only + * if has_dbg_bus is set. + */ +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 +/* The DBG block client ID of this block/chip. Valid only if has_dbg_bus is + * set. + */ + u8 dbg_client_id; +/* The ID of the reset register of this block/chip in the dbg_reset_reg + * array. + */ + u8 reset_reg_id; +/* The bit offset of this block/chip in the reset register. Valid only if + * has_reset_reg is set. + */ + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode /* Mode header */; + u16 reserved1; + u8 reserved2; +/* Number of Debug Bus lines in this block/chip (excluding signature and latency + * events). Valid only if has_dbg_bus is set. + */ + u8 num_of_dbg_bus_lines; +/* Offset of this block/chip Debug Bus lines in the Debug Bus lines array. Valid + * only if has_dbg_bus is set. + */ + u16 dbg_bus_lines_offset; +/* GRC address of the Debug Bus dbg_select register (in dwords). Valid only if + * has_dbg_bus is set. + */ + u32 dbg_select_reg_addr; +/* GRC address of the Debug Bus dbg_dword_enable register (in dwords). Valid + * only if has_dbg_bus is set. + */ + u32 dbg_dword_enable_reg_addr; +/* GRC address of the Debug Bus dbg_shift register (in dwords). Valid only if + * has_dbg_bus is set. + */ + u32 dbg_shift_reg_addr; +/* GRC address of the Debug Bus dbg_force_valid register (in dwords). Valid only + * if has_dbg_bus is set. + */ + u32 dbg_force_valid_reg_addr; +/* GRC address of the Debug Bus dbg_force_frame register (in dwords). Valid only + * if has_dbg_bus is set. + */ + u32 dbg_force_frame_reg_addr; +}; + + +/* + * Chip-specific block user debug data + */ +struct dbg_block_chip_user { +/* Number of debug bus lines in this block (excluding signature and latency + * events). + */ + u8 num_of_dbg_bus_lines; +/* Indicates if this block has a latency events debug line (0/1). */ + u8 has_latency_events; +/* Offset of this blocks lines in the debug bus line name offsets array. */ + u16 names_offset; +}; + + +/* + * Block user debug data + */ +struct dbg_block_user { + u8 name[16] /* Block name */; +}; + + +/* + * Block Debug line data + */ +struct dbg_bus_line { + u8 data; +/* Number of groups in the line (0-3) */ +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +/* Indicates if this is a 128b line (0) or a 256b line (1). */ +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 +/* Four 2-bit values, indicating the size of each group minus 1 (i.e. + * value=0 means size=1, value=1 means size=2, etc), starting from lsb. + * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1). + */ + u8 group_sizes; +}; + + +/* + * condition header for registers dump + */ +struct dbg_dump_cond_hdr { + struct dbg_mode_hdr mode /* Mode header */; + u8 block_id /* block ID */; + u8 data_size /* size in dwords of the data following this header */; +}; + + +/* + * memory data for registers dump + */ +struct dbg_dump_mem { + u32 dword0; +/* register address (in dwords) */ +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */ +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 + u32 dword1; +/* register size (in dwords) */ +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +/* indicates if the register is wide-bus */ +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +}; + + +/* + * register data for registers dump + */ +struct dbg_dump_reg { + u32 data; +/* register address (in dwords) */ +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +/* indicates if the register is wide-bus */ +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ +#define DBG_DUMP_REG_LENGTH_SHIFT 24 +}; + + +/* + * split header for registers dump + */ +struct dbg_dump_split_hdr { + u32 hdr; +/* size in dwords of the data following this header */ +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF /* split type ID */ +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +}; + + +/* + * condition header for idle check + */ +struct dbg_idle_chk_cond_hdr { + struct dbg_mode_hdr mode /* Mode header */; + u16 data_size /* size in dwords of the data following this header */; +}; + + +/* + * Idle Check condition register + */ +struct dbg_idle_chk_cond_reg { + u32 data; +/* Register GRC address (in dwords) */ +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +/* indicates if the register is wide-bus */ +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +/* value from block_id enum */ +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 + u16 num_entries /* number of registers entries to check */; + u8 entry_size /* size of registers entry (in dwords) */; + u8 start_entry /* index of the first entry to check */; +}; + + +/* + * Idle Check info register + */ +struct dbg_idle_chk_info_reg { + u32 data; +/* Register GRC address (in dwords) */ +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +/* indicates if the register is wide-bus */ +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +/* value from block_id enum */ +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 + u16 size /* register size in dwords */; + struct dbg_mode_hdr mode /* Mode header */; +}; + + +/* + * Idle Check register + */ +union dbg_idle_chk_reg { + struct dbg_idle_chk_cond_reg cond_reg /* condition register */; + struct dbg_idle_chk_info_reg info_reg /* info register */; +}; + + +/* + * Idle Check result header + */ +struct dbg_idle_chk_result_hdr { + u16 rule_id /* Failing rule index */; + u16 mem_entry_id /* Failing memory entry index */; + u8 num_dumped_cond_regs /* number of dumped condition registers */; + u8 num_dumped_info_regs /* number of dumped condition registers */; + u8 severity /* from dbg_idle_chk_severity_types enum */; + u8 reserved; +}; + + +/* + * Idle Check result register header + */ +struct dbg_idle_chk_result_reg_hdr { + u8 data; +/* indicates if this register is a memory */ +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 +/* register index within the failing rule */ +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 + u8 start_entry /* index of the first checked entry */; + u16 size /* register size in dwords */; +}; + + +/* + * Idle Check rule + */ +struct dbg_idle_chk_rule { + u16 rule_id /* Idle Check rule ID */; + u8 severity /* value from dbg_idle_chk_severity_types enum */; + u8 cond_id /* Condition ID */; + u8 num_cond_regs /* number of condition registers */; + u8 num_info_regs /* number of info registers */; + u8 num_imms /* number of immediates in the condition */; + u8 reserved1; +/* offset of this rules registers in the idle check register array + * (in dbg_idle_chk_reg units) + */ + u16 reg_offset; +/* offset of this rules immediate values in the immediate values array + * (in dwords) + */ + u16 imm_offset; +}; + + +/* + * Idle Check rule parsing data + */ +struct dbg_idle_chk_rule_parsing_data { + u32 data; +/* indicates if this register has a FW message */ +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +/* Offset of this rules strings in the debug strings array (in bytes) */ +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +}; + + +/* + * idle check severity types + */ +enum dbg_idle_chk_severity_types { +/* idle check failure should cause an error */ + IDLE_CHK_SEVERITY_ERROR, +/* idle check failure should cause an error only if theres no traffic */ + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, +/* idle check failure should cause a warning */ + IDLE_CHK_SEVERITY_WARNING, + MAX_DBG_IDLE_CHK_SEVERITY_TYPES +}; + + + +/* + * Reset register + */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF /* GRC address (in dwords) */ +#define DBG_RESET_REG_ADDR_SHIFT 0 +/* indicates if this register is removed (0/1). */ +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + + +/* + * Debug Bus block data + */ +struct dbg_bus_block_data { +/* 4 bit value, bit i set -> dword/qword i is enabled in block. */ + u8 enable_mask; +/* Number of dwords/qwords to cyclically right the blocks output (0-3). */ + u8 right_shift; +/* 4 bit value, bit i set -> dword/qword i is forced valid in block. */ + u8 force_valid_mask; +/* 4 bit value, bit i set -> dword/qword i frame bit is forced in block. */ + u8 force_frame_mask; +/* bit i set -> dword i contains this blocks data (after shifting). */ + u8 dword_mask; + u8 line_num /* Debug line number to select */; + u8 hw_id /* HW ID associated with the block */; + u8 flags; +/* 0/1. If 1, the debug line is 256b, otherwise its 128b. */ +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 +}; + + +/* + * Debug Bus constraint operation types + */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ /* equal */, + DBG_BUS_CONSTRAINT_OP_NE /* not equal */, + DBG_BUS_CONSTRAINT_OP_LT /* less than */, + DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */, + DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */, + DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */, + DBG_BUS_CONSTRAINT_OP_GT /* greater than */, + DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */, + DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */, + DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + + +/* + * Debug Bus trigger state data + */ +struct dbg_bus_trigger_state_data { +/* Message length (in cycles) to be used for message-based trigger constraints. + * If set to 0, message length is based only on frame bit received from HW. + */ + u8 msg_len; +/* A bit for each dword in the debug bus cycle, indicating if this dword appears + * in a trigger constraint (1) or not (0) + */ + u8 constraint_dword_mask; +/* Storm ID to trigger on. Valid only when triggering on Storm data. + * (use enum dbg_storms) + */ + u8 storm_id; + u8 reserved; +}; + +/* + * Debug Bus memory address + */ +struct dbg_bus_mem_addr { + u32 lo; + u32 hi; +}; + +/* + * Debug Bus PCI buffer data + */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */; + struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */; + u32 size /* PCI buffer size in bytes */; +}; + +/* + * Debug Bus Storm EID range filter params + */ +struct dbg_bus_storm_eid_range_params { + u8 min /* Minimal event ID to filter on */; + u8 max /* Maximal event ID to filter on */; +}; + +/* + * Debug Bus Storm EID mask filter params + */ +struct dbg_bus_storm_eid_mask_params { + u8 val /* Event ID value */; + u8 mask /* Event ID mask. 1s in the mask = dont care bits. */; +}; + +/* + * Debug Bus Storm EID filter params + */ +union dbg_bus_storm_eid_params { +/* EID range filter params */ + struct dbg_bus_storm_eid_range_params range; +/* EID mask filter params */ + struct dbg_bus_storm_eid_mask_params mask; +}; + +/* + * Debug Bus Storm data + */ +struct dbg_bus_storm_data { + u8 enabled /* indicates if the Storm is enabled for recording */; + u8 mode /* Storm debug mode, valid only if the Storm is enabled */; + u8 hw_id /* HW ID associated with the Storm */; + u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */; +/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is + * set, + */ + u8 eid_range_not_mask; + u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */; +/* EID filter params to filter on. Valid only if eid_filter_en is set. */ + union dbg_bus_storm_eid_params eid_filter_params; + u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */; +}; + +/* + * Debug Bus data + */ +struct dbg_bus_data { + u32 app_version /* The tools version number of the application */; + u8 state /* The current debug bus state */; + u8 mode_256b_en /* Indicates if the 256 bit mode is enabled */; + u8 num_enabled_blocks /* Number of blocks enabled for recording */; + u8 num_enabled_storms /* Number of Storms enabled for recording */; + u8 target /* Output target */; + u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */; + u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */; +/* Indicates if timestamp recording is enabled (0/1) */ + u8 timestamp_input_en; + u8 filter_en /* Indicates if the recording filter is enabled (0/1) */; +/* If true, the next added constraint belong to the filter. Otherwise, + * it belongs to the last added trigger state. Valid only if either filter or + * triggers are enabled. + */ + u8 adding_filter; +/* Indicates if the recording filter should be applied before the trigger. + * Valid only if both filter and trigger are enabled (0/1) + */ + u8 filter_pre_trigger; +/* Indicates if the recording filter should be applied after the trigger. + * Valid only if both filter and trigger are enabled (0/1) + */ + u8 filter_post_trigger; +/* Indicates if the recording trigger is enabled (0/1) */ + u8 trigger_en; +/* A bit for each dword in the debug bus cycle, indicating if this dword + * appears in a filter constraint (1) or not (0) + */ + u8 filter_constraint_dword_mask; + u8 next_trigger_state /* ID of next trigger state to be added */; +/* ID of next filter/trigger constraint to be added */ + u8 next_constraint_id; +/* trigger states data */ + struct dbg_bus_trigger_state_data trigger_states[3]; +/* Message length (in cycles) to be used for message-based filter constraints. + * If set to 0 message length is based only on frame bit received from HW. + */ + u8 filter_msg_len; +/* Indicates if the other engine sends it NW recording to this engine (0/1) */ + u8 rcv_from_other_engine; +/* A bit for each dword in the debug bus cycle, indicating if this dword is + * recorded (1) or not (0) + */ + u8 blocks_dword_mask; +/* Indicates if there are dwords in the debug bus cycle which are recorded + * by more tan one block (0/1) + */ + u8 blocks_dword_overlap; +/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the + * HW ID of dword/qword i + */ + u32 hw_id_mask; +/* Debug Bus PCI buffer data. Valid only when the target is + * DBG_BUS_TARGET_ID_PCI. + */ + struct dbg_bus_pci_buf_data pci_buf; +/* Debug Bus data for each block */ + struct dbg_bus_block_data blocks[132]; +/* Debug Bus data for each block */ + struct dbg_bus_storm_data storms[6]; +}; + + +/* + * Debug bus states + */ +enum dbg_bus_states { + DBG_BUS_STATE_IDLE /* debug bus idle state (not recording) */, +/* debug bus is ready for configuration and recording */ + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING /* debug bus is currently recording */, + DBG_BUS_STATE_STOPPED /* debug bus recording has stopped */, + MAX_DBG_BUS_STATES +}; + + + + + + +/* + * Debug Bus Storm modes + */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */, + DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */, + DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */, + DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */, + DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */, + DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */, + DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */, +/* recording handlers with store messages (fast debug) */ + DBG_BUS_STORM_MODE_RH_WITH_STORE, + DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */, + DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */, + MAX_DBG_BUS_STORM_MODES +}; + + +/* + * Debug bus target IDs + */ +enum dbg_bus_targets { +/* records debug bus to DBG block internal buffer */ + DBG_BUS_TARGET_ID_INT_BUF, + DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */, + DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */, + MAX_DBG_BUS_TARGETS +}; + + + +/* + * GRC Dump data + */ +struct dbg_grc_data { +/* Indicates if the GRC parameters were initialized */ + u8 params_initialized; + u8 reserved1; + u16 reserved2; +/* Value of each GRC parameter. Array size must match the enum dbg_grc_params. + */ + u32 param_val[48]; +}; + + +/* + * Debug GRC params + */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */, + DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */, + DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */, + DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */, + DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */, + DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */, + DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */, + DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */, + DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */, + DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */, + DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */, + DBG_GRC_PARAM_DUMP_DORQ /* dump DORQ memories (0/1) */, + DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */, + DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */, + DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */, + DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */, + DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */, + DBG_GRC_PARAM_RESERVD1 /* reserved */, + DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */, + DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */, + DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */, + DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */, + DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */, + DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */, + DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */, + DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */, + DBG_GRC_PARAM_RESERVED2 /* reserved */, +/* MCP Trace meta data size in bytes */ + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, +/* preset: exclude all memories from dump (1 only) */ + DBG_GRC_PARAM_EXCLUDE_ALL, +/* preset: include memories for crash dump (1 only) */ + DBG_GRC_PARAM_CRASH, +/* perform dump only if MFW is responding (0/1) */ + DBG_GRC_PARAM_PARITY_SAFE, + DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */, + DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */, + DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */, + DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */, + DBG_GRC_PARAM_RESERVED3 /* reserved */, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP /* dump MCP HW Dump (0/1) */, + DBG_GRC_PARAM_DUMP_ILT_CDUC /* dump ILT CDUC client (0/1) */, + DBG_GRC_PARAM_DUMP_ILT_CDUT /* dump ILT CDUT client (0/1) */, + DBG_GRC_PARAM_DUMP_CAU_EXT /* dump CAU extended memories (0/1) */, + MAX_DBG_GRC_PARAMS +}; + + +/* + * Debug status codes + */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_256B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_RESERVED0, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_RESERVED1, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INSUFFICIENT_HW_IDS, + DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, + MAX_DBG_STATUS +}; + + +/* + * Debug Storms IDs + */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + + +/* + * Idle Check data + */ +struct idle_chk_data { + u32 buf_size /* Idle check buffer size in dwords */; +/* Indicates if the idle check buffer size was set (0/1) */ + u8 buf_size_set; + u8 reserved1; + u16 reserved2; +}; + +/* + * Pretend parameters + */ +struct pretend_params { + u8 split_type /* Pretend split type (from enum init_split_types) */; + u8 reserved; + u16 split_id /* Preted split ID (within the pretend split type) */; +}; + +/* + * Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc /* GRC Dump data */; + struct dbg_bus_data bus /* Debug Bus data */; + struct idle_chk_data idle_chk /* Idle Check data */; + u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */; +/* Indicates if a block is in reset state (0/1) */ + u8 block_in_reset[132]; + u8 chip_id /* Chip ID (from enum chip_ids) */; + u8 hw_type /* HW Type */; + u8 num_ports /* Number of ports in the chip */; + u8 num_pfs_per_port /* Number of PFs in each port */; + u8 num_vfs /* Number of VFs in the chip */; + u8 initialized /* Indicates if the data was initialized */; + u8 use_dmae /* Indicates if DMAE should be used */; + u8 reserved; + struct pretend_params pretend /* Current pretend parameters */; +/* Numbers of registers that were read since last log */ + u32 num_regs_read; +}; + + + +#endif /* __ECORE_HSI_DEBUG_TOOLS__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h new file mode 100644 index 000000000..bd7bd8658 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_eth.h @@ -0,0 +1,2315 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HSI_ETH__ +#define __ECORE_HSI_ETH__ +/************************************************************************/ +/* Add include to common eth target for both eCore and protocol driver */ +/************************************************************************/ +#include "eth_common.h" + +/* + * The eth storm context for the Tstorm + */ +struct tstorm_eth_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * The eth storm context for the Pstorm + */ +struct pstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* + * The eth storm context for the Xstorm + */ +struct xstorm_eth_conn_st_ctx { + __le32 reserved[60]; +}; + +struct xstorm_eth_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 state /* state */; + u8 flags0; +/* exist_in_qm0 */ +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +/* exist_in_qm1 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +/* exist_in_qm2 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +/* exist_in_qm3 */ +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +/* cf_array_active */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 /* bit12 */ +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 /* bit13 */ +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +/* timer_stop_all */ +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +/* cf_array_cf */ +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +/* cf_array_cf_en */ +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 e5_reserved1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 updated_qm_pq_id /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 e5_reserved /* e5_reserved */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +/* + * The eth storm context for the Ystorm + */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +struct ystorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 state /* state */; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 tx_q0_int_coallecing_timeset /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 terminate_spqe /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 tx_bd_cons_upd /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +/* exist_in_qm0 */ +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +/* exist_in_qm1 */ +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +/* timer_stop_all */ +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 tx_int_coallecing_timeset /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; +}; + +/* + * The eth storm context for the Ustorm + */ +struct ustorm_eth_conn_st_ctx { + __le32 reserved[40]; +}; + +/* + * The eth storm context for the Mstorm + */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* + * eth connection context + */ +struct eth_conn_context { +/* tstorm storm context */ + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2] /* padding */; +/* pstorm storm context */ + struct pstorm_eth_conn_st_ctx pstorm_st_context; +/* xstorm storm context */ + struct xstorm_eth_conn_st_ctx xstorm_st_context; +/* xstorm aggregative context */ + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; +/* tstorm aggregative context */ + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; +/* ystorm storm context */ + struct ystorm_eth_conn_st_ctx ystorm_st_context; +/* ystorm aggregative context */ + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; +/* ustorm aggregative context */ + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; +/* ustorm storm context */ + struct ustorm_eth_conn_st_ctx ustorm_st_context; +/* mstorm storm context */ + struct mstorm_eth_conn_st_ctx mstorm_st_context; +}; + + +/* + * Ethernet filter types: mac/vlan/pair + */ +enum eth_error_code { + ETH_OK = 0x00 /* command succeeded */, +/* mac add filters command failed due to cam full state */ + ETH_FILTERS_MAC_ADD_FAIL_FULL, +/* mac add filters command failed due to mtt2 full state */ + ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2, +/* mac add filters command failed due to duplicate mac address */ + ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2, +/* mac add filters command failed due to duplicate mac address */ + ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2, +/* mac delete filters command failed due to not found state */ + ETH_FILTERS_MAC_DEL_FAIL_NOF, +/* mac delete filters command failed due to not found state */ + ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2, +/* mac delete filters command failed due to not found state */ + ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2, +/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 */ + ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC, +/* vlan add filters command failed due to cam full state */ + ETH_FILTERS_VLAN_ADD_FAIL_FULL, +/* vlan add filters command failed due to duplicate VLAN filter */ + ETH_FILTERS_VLAN_ADD_FAIL_DUP, +/* vlan delete filters command failed due to not found state */ + ETH_FILTERS_VLAN_DEL_FAIL_NOF, +/* vlan delete filters command failed due to not found state */ + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1, +/* pair add filters command failed due to duplicate request */ + ETH_FILTERS_PAIR_ADD_FAIL_DUP, +/* pair add filters command failed due to full state */ + ETH_FILTERS_PAIR_ADD_FAIL_FULL, +/* pair add filters command failed due to full state */ + ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC, +/* pair add filters command failed due not found state */ + ETH_FILTERS_PAIR_DEL_FAIL_NOF, +/* pair add filters command failed due not found state */ + ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1, +/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 */ + ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, +/* vni add filters command failed due to cam full state */ + ETH_FILTERS_VNI_ADD_FAIL_FULL, +/* vni add filters command failed due to duplicate VNI filter */ + ETH_FILTERS_VNI_ADD_FAIL_DUP, + ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */, + MAX_ETH_ERROR_CODE +}; + + +/* + * opcodes for the event ring + */ +enum eth_event_opcode { + ETH_EVENT_UNUSED, + ETH_EVENT_VPORT_START, + ETH_EVENT_VPORT_UPDATE, + ETH_EVENT_VPORT_STOP, + ETH_EVENT_TX_QUEUE_START, + ETH_EVENT_TX_QUEUE_STOP, + ETH_EVENT_RX_QUEUE_START, + ETH_EVENT_RX_QUEUE_UPDATE, + ETH_EVENT_RX_QUEUE_STOP, + ETH_EVENT_FILTERS_UPDATE, + ETH_EVENT_RX_ADD_OPENFLOW_FILTER, + ETH_EVENT_RX_DELETE_OPENFLOW_FILTER, + ETH_EVENT_RX_CREATE_OPENFLOW_ACTION, + ETH_EVENT_RX_ADD_UDP_FILTER, + ETH_EVENT_RX_DELETE_UDP_FILTER, + ETH_EVENT_RX_CREATE_GFT_ACTION, + ETH_EVENT_RX_GFT_UPDATE_FILTER, + ETH_EVENT_TX_QUEUE_UPDATE, + MAX_ETH_EVENT_OPCODE +}; + + +/* + * Classify rule types in E2/E3 + */ +enum eth_filter_action { + ETH_FILTER_ACTION_UNUSED, + ETH_FILTER_ACTION_REMOVE, + ETH_FILTER_ACTION_ADD, +/* Remove all filters of given type and vport ID. */ + ETH_FILTER_ACTION_REMOVE_ALL, + MAX_ETH_FILTER_ACTION +}; + + +/* + * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_cmd { + u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; + u8 vport_id /* the vport id */; + u8 action /* filter command action: add/remove/replace */; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; +}; + + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_cmd_header { + u8 rx /* If set, apply these commands to the RX path */; + u8 tx /* If set, apply these commands to the TX path */; + u8 cmd_cnt /* Number of filter commands */; +/* 0 - dont assert in case of filter configuration error. Just return an error + * code. 1 - assert in case of filter configuration error. + */ + u8 assert_on_error; + u8 reserved1[4]; +}; + + +/* + * Ethernet filter types: mac/vlan/pair + */ +enum eth_filter_type { + ETH_FILTER_TYPE_UNUSED, + ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */, + ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */, + ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */, + ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */, + ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */, + ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */, +/* Add/remove a inner MAC-VNI pair */ + ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR, + ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */, + ETH_FILTER_TYPE_VNI /* Add/remove a VNI */, + MAX_ETH_FILTER_TYPE +}; + + +/* + * inner to inner vlan priority translation configurations + */ +struct eth_in_to_in_pri_map_cfg { +/* If set, non_rdma_in_to_in_pri_map or rdma_in_to_in_pri_map will be used for + * inner to inner priority mapping depending on protocol type + */ + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; +/* Map for inner to inner vlan priority translation for Non RDMA protocols, used + * for TenantDcb. Set inner_vlan_pri_remap_en, when init the map. + */ + u8 non_rdma_in_to_in_pri_map[8]; +/* Map for inner to inner vlan priority translation for RDMA protocols, used for + * TenantDcb. Set inner_vlan_pri_remap_en, when init the map. + */ + u8 rdma_in_to_in_pri_map[8]; +}; + + +/* + * eth IPv4 Fragment Type + */ +enum eth_ipv4_frag_type { + ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */, +/* First Fragment of IPv4 Packet (contains headers) */ + ETH_IPV4_FIRST_FRAG, +/* Non-First Fragment of IPv4 Packet (does not contain headers) */ + ETH_IPV4_NON_FIRST_FRAG, + MAX_ETH_IPV4_FRAG_TYPE +}; + + +/* + * eth IPv4 Fragment Type + */ +enum eth_ip_type { + ETH_IPV4 /* IPv4 */, + ETH_IPV6 /* IPv6 */, + MAX_ETH_IP_TYPE +}; + + +/* + * Ethernet Ramrod Command IDs + */ +enum eth_ramrod_cmd_id { + ETH_RAMROD_UNUSED, + ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, + ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, + ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, + ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, + ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, +/* RX - Create an Openflow Action */ + ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION, +/* RX - Add an Openflow Filter to the Searcher */ + ETH_RAMROD_RX_ADD_OPENFLOW_FILTER, +/* RX - Delete an Openflow Filter to the Searcher */ + ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER, +/* RX - Add a UDP Filter to the Searcher */ + ETH_RAMROD_RX_ADD_UDP_FILTER, +/* RX - Delete a UDP Filter to the Searcher */ + ETH_RAMROD_RX_DELETE_UDP_FILTER, + ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */, +/* RX - Add/Delete a GFT Filter to the Searcher */ + ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */, + MAX_ETH_RAMROD_CMD_ID +}; + + +/* + * return code from eth sp ramrods + */ +struct eth_return_code { + u8 value; +/* error code (use enum eth_error_code) */ +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x1 +#define ETH_RETURN_CODE_RESERVED_SHIFT 6 +/* rx path - 0, tx path - 1 */ +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + + +/* + * tx destination enum + */ +enum eth_tx_dst_mode_config_enum { +/* tx destination configuration override is disabled */ + ETH_TX_DST_MODE_CONFIG_DISABLE, +/* tx destination configuration override is enabled, vport and tx dst will be + * taken from from 4th bd + */ + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD, +/* tx destination configuration override is enabled, vport and tx dst will be + * taken from from vport data + */ + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT, + MAX_ETH_TX_DST_MODE_CONFIG_ENUM +}; + + +/* + * What to do in case an error occurs + */ +enum eth_tx_err { + ETH_TX_ERR_DROP /* Drop erroneous packet. */, +/* Assert an interrupt for PF, declare as malicious for VF */ + ETH_TX_ERR_ASSERT_MALICIOUS, + MAX_ETH_TX_ERR +}; + + +/* + * Array of the different error type behaviors + */ +struct eth_tx_err_vals { + __le16 values; +/* Wrong VLAN insertion mode (use enum eth_tx_err) */ +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +/* Packet is below minimal size (use enum eth_tx_err) */ +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +/* Vport has sent spoofed packet (use enum eth_tx_err) */ +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +/* Packet with illegal type of inband tag (use enum eth_tx_err) */ +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +/* Packet marked for VLAN insertion when inband tag is present + * (use enum eth_tx_err) + */ +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +/* Non LSO packet larger than MTU (use enum eth_tx_err) */ +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +/* VF/PF has sent LLDP/PFC or any other type of control packet which is not + * allowed to (use enum eth_tx_err) + */ +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + + +/* + * vport rss configuration data + */ +struct eth_vport_rss_config { + __le16 capabilities; +/* configuration of the IpV4 2-tuple capability */ +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +/* configuration of the IpV6 2-tuple capability */ +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +/* configuration of the IpV4 4-tuple capability for TCP */ +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +/* configuration of the IpV6 4-tuple capability for TCP */ +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +/* configuration of the IpV4 4-tuple capability for UDP */ +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +/* configuration of the IpV6 4-tuple capability for UDP */ +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +/* configuration of the 5-tuple capability */ +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +/* if set update the rss keys */ +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 +/* The RSS engine ID. Must be allocated to each vport with RSS enabled. + * Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type. + */ + u8 rss_id; + u8 rss_mode /* The RSS mode for this function */; + u8 update_rss_key /* if set update the rss key */; +/* if set update the indirection table values */ + u8 update_rss_ind_table; +/* if set update the capabilities and indirection table size. */ + u8 update_rss_capabilities; + u8 tbl_size /* rss mask (Tbl size) */; + __le32 reserved2[2]; +/* RSS indirection table */ + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; +/* RSS key supplied to us by OS */ + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3[2]; +}; + + +/* + * eth vport RSS mode + */ +enum eth_vport_rss_mode { + ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */, + ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */, + MAX_ETH_VPORT_RSS_MODE +}; + + +/* + * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ + */ +struct eth_vport_rx_mode { + __le16 state; +/* drop all unicast packets */ +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +/* accept all unicast packets (subject to vlan) */ +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +/* accept all unmatched unicast packets */ +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +/* drop all multicast packets */ +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +/* accept all multicast packets (subject to vlan) */ +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +/* accept all broadcast packets (subject to vlan) */ +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +/* accept any VNI in tunnel VNI classification. Used for default queue. */ +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 +}; + + +/* + * Command for setting tpa parameters + */ +struct eth_vport_tpa_param { + u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */; + u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */; + u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */; + u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */; +/* If set, start each TPA segment on new BD (GRO mode). One BD per segment + * allowed. + */ + u8 tpa_pkt_split_flg; +/* If set, put header of first TPA segment on first BD and data on second BD. */ + u8 tpa_hdr_data_split_flg; +/* If set, GRO data consistent will checked for TPA continue */ + u8 tpa_gro_consistent_flg; +/* maximum number of opened aggregations per v-port */ + u8 tpa_max_aggs_num; + __le16 tpa_max_size /* maximal size for the aggregated TPA packets */; +/* minimum TCP payload size for a packet to start aggregation */ + __le16 tpa_min_size_to_start; +/* minimum TCP payload size for a packet to continue aggregation */ + __le16 tpa_min_size_to_cont; +/* maximal number of buffers that can be used for one aggregation */ + u8 max_buff_num; + u8 reserved; +}; + + +/* + * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ + */ +struct eth_vport_tx_mode { + __le16 state; +/* drop all unicast packets */ +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +/* accept all unicast packets (subject to vlan) */ +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +/* drop all multicast packets */ +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +/* accept all multicast packets (subject to vlan) */ +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +/* accept all broadcast packets (subject to vlan) */ +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 +}; + + +/* + * GFT filter update action type. + */ +enum gft_filter_update_action { + GFT_ADD_FILTER, + GFT_DELETE_FILTER, + MAX_GFT_FILTER_UPDATE_ACTION +}; + + + + +/* + * Ramrod data for rx add openflow filter + */ +struct rx_add_openflow_filter_data { + __le16 action_icid /* CID of Action to run for this filter */; + u8 priority /* Searcher String - Packet priority */; + u8 reserved0; + __le32 tenant_id /* Searcher String - Tenant ID */; +/* Searcher String - Destination Mac Bytes 0 to 1 */ + __le16 dst_mac_hi; +/* Searcher String - Destination Mac Bytes 2 to 3 */ + __le16 dst_mac_mid; +/* Searcher String - Destination Mac Bytes 4 to 5 */ + __le16 dst_mac_lo; + __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */; + __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */; + __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */; + __le16 vlan_id /* Searcher String - Vlan ID */; + __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */; + u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */; + u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */; + u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */; + u8 tenant_id_exists /* Searcher String - Tenant ID Exists */; + __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */; + __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */; + __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */; + __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */; +}; + + +/* + * Ramrod data for rx create gft action + */ +struct rx_create_gft_action_data { + u8 vport_id /* Vport Id of GFT Action */; + u8 reserved[7]; +}; + + +/* + * Ramrod data for rx create openflow action + */ +struct rx_create_openflow_action_data { + u8 vport_id /* ID of RX queue */; + u8 reserved[7]; +}; + + +/* + * Ramrod data for rx queue start ramrod + */ +struct rx_queue_start_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + __le16 num_of_pbl_pages /* Number of pages in CQE PBL */; + __le16 bd_max_bytes /* maximal bytes that can be places on the bd */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* index of the protocol index */; + u8 vport_id /* ID of virtual port */; + u8 default_rss_queue_flg /* set queue as default rss queue if set */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 stats_counter_id /* Statistics counter ID */; + u8 pin_context /* Pin context in CCFC to improve performance */; + u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */; +/* PXP command TPH Valid - for packet placement */ + u8 pxp_tph_valid_pkt; +/* PXP command Steering tag hint. Use enum pxp_tph_st_hint */ + u8 pxp_st_hint; + __le16 pxp_st_index /* PXP command Steering tag index */; +/* Indicates that current queue belongs to poll-mode driver */ + u8 pmd_mode; +/* Indicates that the current queue is using the TX notification queue + * mechanism - should be set only for PMD queue + */ + u8 notify_en; +/* Initial value for the toggle valid bit - used in PMD mode */ + u8 toggle_val; +/* Index of RX producers in VF zone. Used for VF only. */ + u8 vf_rx_prod_index; +/* Backward compatibility mode. If set, unprotected mStorm queue zone will used + * for VF RX producers instead of VF zone. + */ + u8 vf_rx_prod_use_zone_a; + u8 reserved[5]; + __le16 reserved1 /* FW reserved. */; + struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */; + struct regpair bd_base /* bd address of the first bd page */; + struct regpair reserved2 /* FW reserved. */; +}; + + +/* + * Ramrod data for rx queue stop ramrod + */ +struct rx_queue_stop_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 vport_id /* ID of virtual port */; + u8 reserved[3]; +}; + + +/* + * Ramrod data for rx queue update ramrod + */ +struct rx_queue_update_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 vport_id /* ID of virtual port */; +/* If set, update default rss queue to this RX queue. */ + u8 set_default_rss_queue; + u8 reserved[3]; + u8 reserved1 /* FW reserved. */; + u8 reserved2 /* FW reserved. */; + u8 reserved3 /* FW reserved. */; + __le16 reserved4 /* FW reserved. */; + __le16 reserved5 /* FW reserved. */; + struct regpair reserved6 /* FW reserved. */; +}; + + +/* + * Ramrod data for rx Add UDP Filter + */ +struct rx_udp_filter_data { + __le16 action_icid /* CID of Action to run for this filter */; + __le16 vlan_id /* Searcher String - Vlan ID */; + u8 ip_type /* Searcher String - IP Type */; + u8 tenant_id_exists /* Searcher String - Tenant ID Exists */; + __le16 reserved1; +/* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */ + __le32 ip_dst_addr[4]; +/* Searcher String - IP Source Address, for IPv4 use ip_dst_addr[0] only */ + __le32 ip_src_addr[4]; + __le16 udp_dst_port /* Searcher String - UDP Destination Port */; + __le16 udp_src_port /* Searcher String - UDP Source Port */; + __le32 tenant_id /* Searcher String - Tenant ID */; +}; + + +/* + * add or delete GFT filter - filter is packet header of type of packet wished + * to pass certain FW flow + */ +struct rx_update_gft_filter_data { +/* Pointer to Packet Header That Defines GFT Filter */ + struct regpair pkt_hdr_addr; + __le16 pkt_hdr_length /* Packet Header Length */; +/* Action icid. Valid if action_icid_valid flag set. */ + __le16 action_icid; + __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */; + __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */; +/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */ + __le16 vport_id; +/* If set, action_icid will used for GFT filter update. */ + u8 action_icid_valid; +/* If set, rx_qid will used for traffic steering, in additional to vport_id. + * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS. + */ + u8 rx_qid_valid; +/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If + * cleared, flow_id 0 will reported by CQE. + */ + u8 flow_id_valid; + u8 filter_action /* Use to set type of action on filter */; +/* 0 - dont assert in case of error. Just return an error code. 1 - assert in + * case of error. + */ + u8 assert_on_error; +/* If set, inner VLAN will be removed regardless to VPORT configuration. + * Supported by E4 only. + */ + u8 inner_vlan_removal_en; +}; + + + +/* + * Ramrod data for tx queue start ramrod + */ +struct tx_queue_start_ramrod_data { + __le16 sb_id /* Status block ID */; + u8 sb_index /* Status block protocol index */; + u8 vport_id /* VPort ID */; + u8 reserved0 /* FW reserved. (qcn_rl_en) */; + u8 stats_counter_id /* Statistics counter ID to use */; + __le16 qm_pq_id /* QM PQ ID */; + u8 flags; +/* 0: Enable QM opportunistic flow. 1: Disable QM opportunistic flow */ +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +/* If set, Test Mode - packets will be duplicated by Xstorm handler */ +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +/* If set, Test Mode - packets destination will be determined by dest_port_mode + * field from Tx BD + */ +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +/* Indicates that current queue belongs to poll-mode driver */ +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +/* Indicates that the current queue is using the TX notification queue + * mechanism - should be set only for PMD queue + */ +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +/* Pin context in CCFC to improve performance */ +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint /* PXP command Steering tag hint */; + u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */; + u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */; + __le16 pxp_st_index /* PXP command Steering tag index */; +/* TX completion min agg size - for PMD queues */ + __le16 comp_agg_size; + __le16 queue_zone_id /* queue zone ID to use */; + __le16 reserved2 /* FW reserved. (test_dup_count) */; + __le16 pbl_size /* Number of BD pages pointed by PBL */; +/* unique Queue ID - currently used only by PMD flow */ + __le16 tx_queue_id; +/* Unique Same-As-Last Resource ID - improves performance for same-as-last + * packets per connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES-1 IDs + * available) + */ + __le16 same_as_last_id; + __le16 reserved[3]; + struct regpair pbl_base_addr /* address of the pbl page */; +/* BD consumer address in host - for PMD queues */ + struct regpair bd_cons_address; +}; + + +/* + * Ramrod data for tx queue stop ramrod + */ +struct tx_queue_stop_ramrod_data { + __le16 reserved[4]; +}; + + +/* + * Ramrod data for tx queue update ramrod + */ +struct tx_queue_update_ramrod_data { + __le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */; + __le16 qm_pq_id /* Updated QM PQ ID */; + __le32 reserved0; + struct regpair reserved1[5]; +}; + + +/* + * Inner to Inner VLAN priority map update mode + */ +enum update_in_to_in_pri_map_mode_enum { +/* Inner to Inner VLAN priority map update Disabled */ + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, +/* Update Inner to Inner VLAN priority map for non RDMA protocols */ + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, +/* Update Inner to Inner VLAN priority map for RDMA protocols */ + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + + + +/* + * Ramrod data for vport update ramrod + */ +struct vport_filter_update_ramrod_data { +/* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */ + struct eth_filter_cmd_header filter_cmd_hdr; +/* Filter Commands */ + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; +}; + + +/* + * Ramrod data for vport start ramrod + */ +struct vport_start_ramrod_data { + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en /* if set, drop packet with ttl=0 */; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode /* Rx filter data */; + struct eth_vport_tx_mode tx_mode /* Tx filter data */; +/* TPA configuration parameters */ + struct eth_vport_tpa_param tpa_param; + __le16 default_vlan /* Default Vlan value to be forced by FW */; + u8 tx_switching_en /* Tx switching is enabled for current Vport */; +/* Anti-spoofing verification is set for current Vport */ + u8 anti_spoofing_en; +/* If set, the default Vlan value is forced by the FW */ + u8 default_vlan_en; +/* If set, the vport handles PTP Timesync Packets */ + u8 handle_ptp_pkts; +/* If enable then innerVlan will be striped and not written to cqe */ + u8 silent_vlan_removal_en; +/* If set untagged filter (vlan0) is added to current Vport, otherwise port is + * marked as any-vlan + */ + u8 untagged; +/* Desired behavior per TX error type */ + struct eth_tx_err_vals tx_err_behav; +/* If set, ETH header padding will not inserted. placement_offset will be zero. + */ + u8 zero_placement_offset; +/* If set, control frames will be filtered according to MAC check. */ + u8 ctl_frame_mac_check_en; +/* If set, control frames will be filtered according to ethtype check. */ + u8 ctl_frame_ethtype_check_en; +/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be + * zero out, used for TenantDcb + */ + u8 wipe_inner_vlan_pri_en; +/* inner to inner vlan priority translation configurations */ + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; +}; + + +/* + * Ramrod data for vport stop ramrod + */ +struct vport_stop_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + + +/* + * Ramrod data for vport update ramrod + */ +struct vport_update_ramrod_data_cmn { + u8 vport_id; + u8 update_rx_active_flg /* set if rx active flag should be handled */; + u8 rx_active_flg /* rx active flag value */; + u8 update_tx_active_flg /* set if tx active flag should be handled */; + u8 tx_active_flg /* tx active flag value */; + u8 update_rx_mode_flg /* set if rx state data should be handled */; + u8 update_tx_mode_flg /* set if tx state data should be handled */; +/* set if approx. mcast data should be handled */ + u8 update_approx_mcast_flg; + u8 update_rss_flg /* set if rss data should be handled */; +/* set if inner_vlan_removal_en should be handled */ + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; +/* set if tpa parameters should be handled, TPA must be disable before */ + u8 update_tpa_param_flg; + u8 update_tpa_en_flg /* set if tpa enable changes */; +/* set if tx switching en flag should be handled */ + u8 update_tx_switching_en_flg; + u8 tx_switching_en /* tx switching en value */; +/* set if anti spoofing flag should be handled */ + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en /* Anti-spoofing verification en value */; +/* set if handle_ptp_pkts should be handled. */ + u8 update_handle_ptp_pkts; +/* If set, the vport handles PTP Timesync Packets */ + u8 handle_ptp_pkts; +/* If set, the default Vlan enable flag is updated */ + u8 update_default_vlan_en_flg; +/* If set, the default Vlan value is forced by the FW */ + u8 default_vlan_en; +/* If set, the default Vlan value is updated */ + u8 update_default_vlan_flg; + __le16 default_vlan /* Default Vlan value to be forced by FW */; +/* set if accept_any_vlan should be handled */ + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan /* accept_any_vlan updated value */; +/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled + * as well. If Rx is in noSgl mode send rx_queue_update_ramrod_data + */ + u8 silent_vlan_removal_en; +/* If set, MTU will be updated. Vport must be not active. */ + u8 update_mtu_flg; + __le16 mtu /* New MTU value. Used if update_mtu_flg are set */; +/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be + * updated + */ + u8 update_ctl_frame_checks_en_flg; +/* If set, control frames will be filtered according to MAC check. */ + u8 ctl_frame_mac_check_en; +/* If set, control frames will be filtered according to ethtype check. */ + u8 ctl_frame_ethtype_check_en; +/* Indicates to update RDMA or NON-RDMA vlan remapping priority table according + * to update_in_to_in_pri_map_mode_enum, used for TenantDcb (use enum + * update_in_to_in_pri_map_mode_enum) + */ + u8 update_in_to_in_pri_map_mode; +/* Map for inner to inner vlan priority translation, used for TenantDcb. */ + u8 in_to_in_pri_map[8]; + u8 reserved[6]; +}; + +struct vport_update_ramrod_mcast { + __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */; +}; + +/* + * Ramrod data for vport update ramrod + */ +struct vport_update_ramrod_data { +/* Common data for all vport update ramrods */ + struct vport_update_ramrod_data_cmn common; + struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */; + struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */; + __le32 reserved[3]; +/* TPA configuration parameters */ + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config /* rss config data */; +}; + + + + + + +struct E4XstormEthConnAgCtxDqExtLdPart { + u8 reserved0 /* cdu_validation */; + u8 state /* state */; + u8 flags0; +/* exist_in_qm0 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +/* exist_in_qm1 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +/* exist_in_qm2 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +/* exist_in_qm3 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +/* bit4 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +/* cf_array_active */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +/* bit6 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +/* bit7 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 + u8 flags1; +/* bit8 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +/* bit9 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +/* bit10 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +/* bit11 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +/* bit12 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +/* bit13 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +/* bit14 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +/* bit15 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +/* timer0cf */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +/* timer1cf */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +/* timer2cf */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +/* timer_stop_all */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +/* cf4 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +/* cf5 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +/* cf6 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +/* cf7 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 + u8 flags4; +/* cf8 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +/* cf9 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +/* cf10 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +/* cf11 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +/* cf12 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +/* cf13 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +/* cf14 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +/* cf15 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +/* cf16 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +/* cf_array_cf */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +/* cf18 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +/* cf19 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 + u8 flags7; +/* cf20 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +/* cf21 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +/* cf22 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +/* cf0en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +/* cf1en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +/* cf2en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +/* cf3en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +/* cf4en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +/* cf5en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +/* cf6en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +/* cf7en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +/* cf8en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +/* cf9en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +/* cf10en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +/* cf11en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +/* cf12en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +/* cf13en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +/* cf14en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +/* cf15en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +/* cf16en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +/* cf_array_cf_en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +/* cf18en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +/* cf19en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +/* cf20en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +/* cf21en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +/* cf22en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +/* cf23en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +/* rule0en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +/* rule1en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 + u8 flags11; +/* rule2en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +/* rule3en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +/* rule4en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +/* rule5en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +/* rule6en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +/* rule7en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +/* rule8en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +/* rule9en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +/* rule10en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +/* rule11en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +/* rule12en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +/* rule13en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +/* rule14en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +/* rule15en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +/* rule16en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +/* rule17en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +/* rule18en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +/* rule19en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +/* rule20en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +/* rule21en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +/* rule22en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +/* rule23en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +/* rule24en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +/* rule25en */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +/* bit16 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +/* bit17 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +/* bit18 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +/* bit19 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +/* bit20 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +/* bit21 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +/* cf23 */ +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 e5_reserved1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 updated_qm_pq_id /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; +}; + + +struct mstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + + + + + +struct xstorm_eth_hw_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +/* exist_in_qm0 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +/* exist_in_qm1 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +/* exist_in_qm2 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +/* exist_in_qm3 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +/* cf_array_active */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 /* bit12 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 /* bit13 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +/* timer0cf */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +/* timer1cf */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +/* timer2cf */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +/* timer_stop_all */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +/* cf_array_cf */ +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +/* cf_array_cf_en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +/* rule10en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +/* rule11en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +/* rule12en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +/* rule13en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +/* rule14en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +/* rule15en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +/* rule16en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +/* rule17en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +/* rule18en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +/* rule19en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +/* rule20en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +/* rule21en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +/* rule22en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +/* rule23en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +/* rule24en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +/* rule25en */ +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 e5_reserved1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 updated_qm_pq_id /* word5 */; + __le16 conn_dpi /* conn_dpi */; +}; + + + +/* + * GFT CAM line struct + */ +struct gft_cam_line { + __le32 camline; +/* Indication if the line is valid. */ +#define GFT_CAM_LINE_VALID_MASK 0x1 +#define GFT_CAM_LINE_VALID_SHIFT 0 +/* Data bits, the word that compared with the profile key */ +#define GFT_CAM_LINE_DATA_MASK 0x3FFF +#define GFT_CAM_LINE_DATA_SHIFT 1 +/* Mask bits, indicate the bits in the data that are Dont-Care */ +#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF +#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 +#define GFT_CAM_LINE_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_RESERVED1_SHIFT 29 +}; + + +/* + * GFT CAM line struct (for driversim use) + */ +struct gft_cam_line_mapped { + __le32 camline; +/* Indication if the line is valid. */ +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +/* use enum gft_profile_upper_protocol_type + * (use enum gft_profile_upper_protocol_type) + */ +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */ +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +/* use enum gft_profile_upper_protocol_type + * (use enum gft_profile_upper_protocol_type) + */ +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */ +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + + +union gft_cam_line_union { + struct gft_cam_line cam_line; + struct gft_cam_line_mapped cam_line_mapped; +}; + + +/* + * Used in gft_profile_key: Indication for ip version + */ +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + + +/* + * Profile key stucr fot GFT logic in Prs + */ +struct gft_profile_key { + __le16 profile_key; +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */ +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +/* use enum gft_profile_upper_protocol_type + * (use enum gft_profile_upper_protocol_type) + */ +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */ +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + + +/* + * Used in gft_profile_key: Indication for tunnel type + */ +enum gft_profile_tunnel_type { + GFT_PROFILE_NO_TUNNEL = 0, + GFT_PROFILE_VXLAN_TUNNEL = 1, + GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2, + GFT_PROFILE_GRE_IP_TUNNEL = 3, + GFT_PROFILE_GENEVE_MAC_TUNNEL = 4, + GFT_PROFILE_GENEVE_IP_TUNNEL = 5, + MAX_GFT_PROFILE_TUNNEL_TYPE +}; + + +/* + * Used in gft_profile_key: Indication for protocol type + */ +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + + +/* + * GFT RAM line struct + */ +struct gft_ram_line { + __le32 lo; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1U +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 hi; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + + +/* + * Used in the first 2 bits for gft_ram_line: Indication for vlan mask + */ +enum gft_vlan_select { + INNER_PROVIDER_VLAN = 0, + INNER_VLAN = 1, + OUTER_PROVIDER_VLAN = 2, + OUTER_VLAN = 3, + MAX_GFT_VLAN_SELECT +}; + + +#endif /* __ECORE_HSI_ETH__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h new file mode 100644 index 000000000..7efe2eff1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_func.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HSI_INIT_FUNC__ +#define __ECORE_HSI_INIT_FUNC__ +/********************************/ +/* HSI Init Functions constants */ +/********************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* Size of CRC8 lookup table */ +#ifndef LINUX_REMOVE +#define CRC8_TABLE_SIZE 256 +#endif + +/* + * BRB RAM init requirements + */ +struct init_brb_ram_req { + u32 guranteed_per_tc /* guaranteed size per TC, in bytes */; + u32 headroom_per_tc /* headroom size per TC, in bytes */; + u32 min_pkt_size /* min packet size, in bytes */; + u32 max_ports_per_engine /* min packet size, in bytes */; + u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */; +}; + + +/* + * ETS per-TC init requirements + */ +struct init_ets_tc_req { +/* if set, this TC participates in the arbitration with a strict priority + * (the priority is equal to the TC ID) + */ + u8 use_sp; +/* if set, this TC participates in the arbitration with a WFQ weight + * (indicated by the weight field) + */ + u8 use_wfq; + u16 weight /* An arbitration weight. Valid only if use_wfq is set. */; +}; + +/* + * ETS init requirements + */ +struct init_ets_req { + u32 mtu /* Max packet size (in bytes) */; +/* ETS initialization requirements per TC. */ + struct init_ets_tc_req tc_req[NUM_OF_TCS]; +}; + + + +/* + * NIG LB RL init requirements + */ +struct init_nig_lb_rl_req { +/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ + u16 lb_mac_rate; +/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ + u16 lb_rate; + u32 mtu /* Max packet size (in bytes) */; +/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */ + u16 tc_rate[NUM_OF_PHYS_TCS]; +}; + + +/* + * NIG TC mapping for each priority + */ +struct init_nig_pri_tc_map_entry { + u8 tc_id /* the mapped TC ID */; + u8 valid /* indicates if the mapping entry is valid */; +}; + + +/* + * NIG priority to TC map init requirements + */ +struct init_nig_pri_tc_map_req { + struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; +}; + + +/* + * QM per global RL init parameters + */ +struct init_qm_global_rl_params { +/* Rate limit in Mb/sec units. If set to zero, the link speed is uwsed + * instead. + */ + u32 rate_limit; +}; + + +/* + * QM per port init parameters + */ +struct init_qm_port_params { + u8 active /* Indicates if this port is active */; +/* Vector of valid bits for active TCs used by this port */ + u8 active_phys_tcs; +/* number of PBF command lines that can be used by this port */ + u16 num_pbf_cmd_lines; +/* number of BTB blocks that can be used by this port */ + u16 num_btb_blocks; + u16 reserved; +}; + + +/* + * QM per-PQ init parameters + */ +struct init_qm_pq_params { + u8 vport_id /* VPORT ID */; + u8 tc_id /* TC ID */; + u8 wrr_group /* WRR group */; +/* Indicates if a rate limiter should be allocated for the PQ (0/1) */ + u8 rl_valid; + u16 rl_id /* RL ID, valid only if rl_valid is true */; + u8 port_id /* Port ID */; + u8 reserved; +}; + + +/* + * QM per VPORT init parameters + */ +struct init_qm_vport_params { +/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is + * globally disabled. + */ + u16 wfq; +/* the first Tx PQ ID associated with this VPORT for each TC. */ + u16 first_tx_pq_id[NUM_OF_TCS]; +}; + +#endif /* __ECORE_HSI_INIT_FUNC__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h new file mode 100644 index 000000000..4f878d061 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hsi_init_tool.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HSI_INIT_TOOL__ +#define __ECORE_HSI_INIT_TOOL__ +/**************************************/ +/* Init Tool HSI constants and macros */ +/**************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 + +enum chip_ids { + CHIP_BB, + CHIP_K2, + MAX_CHIP_IDS +}; + + +/* + * Binary buffer header + */ +struct bin_buffer_hdr { +/* buffer offset in bytes from the beginning of the binary file */ + u32 offset; + u32 length /* buffer length in bytes */; +}; + + +/* + * binary init buffer types + */ +enum bin_init_buffer_type { + BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */, + BIN_BUF_INIT_CMD /* init commands */, + BIN_BUF_INIT_VAL /* init data */, + BIN_BUF_INIT_MODE_TREE /* init modes tree */, + BIN_BUF_INIT_IRO /* internal RAM offsets */, + BIN_BUF_INIT_OVERLAYS /* FW overlays (except overlay 0) */, + MAX_BIN_INIT_BUFFER_TYPE +}; + + +/* + * FW overlay buffer header + */ +struct fw_overlay_buf_hdr { + u32 data; +#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF /* Storm ID */ +#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0 +/* Size of Storm FW overlay buffer in dwords */ +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8 +}; + + +/* + * init array header: raw + */ +struct init_array_raw_hdr { + u32 data; +/* Init array type, from init_array_types enum */ +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +/* init array params */ +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +}; + +/* + * init array header: standard + */ +struct init_array_standard_hdr { + u32 data; +/* Init array type, from init_array_types enum */ +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +/* Init array size (in dwords) */ +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +}; + +/* + * init array header: zipped + */ +struct init_array_zipped_hdr { + u32 data; +/* Init array type, from init_array_types enum */ +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +/* Init array zipped size (in bytes) */ +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +}; + +/* + * init array header: pattern + */ +struct init_array_pattern_hdr { + u32 data; +/* Init array type, from init_array_types enum */ +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +/* pattern size in dword */ +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +/* pattern repetitions */ +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +}; + +/* + * init array header union + */ +union init_array_hdr { + struct init_array_raw_hdr raw /* raw init array header */; +/* standard init array header */ + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped /* zipped init array header */; + struct init_array_pattern_hdr pattern /* pattern init array header */; +}; + + +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + + +enum init_modes { + MODE_BB_A0_DEPRECATED, + MODE_BB, + MODE_K2, + MODE_ASIC, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, + MODE_SF, + MODE_MF_SD, + MODE_MF_SI, + MODE_PORTS_PER_ENG_1, + MODE_PORTS_PER_ENG_2, + MODE_PORTS_PER_ENG_4, + MODE_100G, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, + MAX_INIT_MODES +}; + + +enum init_phases { + PHASE_ENGINE, + PHASE_PORT, + PHASE_PF, + PHASE_VF, + PHASE_QM_PF, + MAX_INIT_PHASES +}; + + +enum init_split_types { + SPLIT_TYPE_NONE, + SPLIT_TYPE_PORT, + SPLIT_TYPE_PF, + SPLIT_TYPE_PORT_PF, + SPLIT_TYPE_VF, + MAX_INIT_SPLIT_TYPES +}; + + +/* + * init array types + */ +enum init_array_types { + INIT_ARR_STANDARD /* standard init array */, + INIT_ARR_ZIPPED /* zipped init array */, + INIT_ARR_PATTERN /* a repeated pattern */, + MAX_INIT_ARRAY_TYPES +}; + + + +/* + * init operation: callback + */ +struct init_callback_op { + u32 op_data; +/* Init operation, from init_op_types enum */ +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + u16 callback_id /* Callback ID */; + u16 block_id /* Blocks ID */; +}; + + +/* + * init operation: delay + */ +struct init_delay_op { + u32 op_data; +/* Init operation, from init_op_types enum */ +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay /* delay in us */; +}; + + +/* + * init operation: if_mode + */ +struct init_if_mode_op { + u32 op_data; +/* Init operation, from init_op_types enum */ +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +/* Commands to skip if the modes dont match */ +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + u16 reserved2; + u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */; +}; + + +/* + * init operation: if_phase + */ +struct init_if_phase_op { + u32 op_data; +/* Init operation, from init_op_types enum */ +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +/* Indicates if DMAE is enabled in this phase */ +#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +/* Commands to skip if the phases dont match */ +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 + u32 phase_data; +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */ +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +}; + + +/* + * init mode operators + */ +enum init_mode_ops { + INIT_MODE_OP_NOT /* init mode not operator */, + INIT_MODE_OP_OR /* init mode or operator */, + INIT_MODE_OP_AND /* init mode and operator */, + MAX_INIT_MODE_OPS +}; + + +/* + * init operation: raw + */ +struct init_raw_op { + u32 op_data; +/* Init operation, from init_op_types enum */ +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ +#define INIT_RAW_OP_PARAM1_SHIFT 4 + u32 param2 /* Init param 2 */; +}; + +/* + * init array params + */ +struct init_op_array_params { + u16 size /* array size in dwords */; + u16 offset /* array start offset in dwords */; +}; + +/* + * Write init operation arguments + */ +union init_write_args { +/* value to write, used when init source is INIT_SRC_INLINE */ + u32 inline_val; +/* number of zeros to write, used when init source is INIT_SRC_ZEROS */ + u32 zeros_count; +/* array offset to write, used when init source is INIT_SRC_ARRAY */ + u32 array_offset; +/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */ + struct init_op_array_params runtime; +}; + +/* + * init operation: write + */ +struct init_write_op { + u32 data; +/* init operation, from init_op_types enum */ +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +/* init source type, taken from init_source_types enum */ +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +/* indicates if the register is wide-bus */ +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +/* internal (absolute) GRC address, in dwords */ +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args /* Write init operation arguments */; +}; + +/* + * init operation: read + */ +struct init_read_op { + u32 op_data; +/* init operation, from init_op_types enum */ +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +/* polling type, from init_poll_types enum */ +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +/* internal (absolute) GRC address, in dwords */ +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 +/* expected polling value, used only when polling is done */ + u32 expected_val; +}; + +/* + * Init operations union + */ +union init_op { + struct init_raw_op raw /* raw init operation */; + struct init_write_op write /* write init operation */; + struct init_read_op read /* read init operation */; + struct init_if_mode_op if_mode /* if_mode init operation */; + struct init_if_phase_op if_phase /* if_phase init operation */; + struct init_callback_op callback /* callback init operation */; + struct init_delay_op delay /* delay init operation */; +}; + + + +/* + * Init command operation types + */ +enum init_op_types { + INIT_OP_READ /* GRC read init command */, + INIT_OP_WRITE /* GRC write init command */, +/* Skip init commands if the init modes expression doesn't match */ + INIT_OP_IF_MODE, +/* Skip init commands if the init phase doesn't match */ + INIT_OP_IF_PHASE, + INIT_OP_DELAY /* delay init command */, + INIT_OP_CALLBACK /* callback init command */, + MAX_INIT_OP_TYPES +}; + + +/* + * init polling types + */ +enum init_poll_types { + INIT_POLL_NONE /* No polling */, + INIT_POLL_EQ /* init value is included in the init command */, + INIT_POLL_OR /* init value is all zeros */, + INIT_POLL_AND /* init value is an array of values */, + MAX_INIT_POLL_TYPES +}; + + + + +/* + * init source types + */ +enum init_source_types { + INIT_SRC_INLINE /* init value is included in the init command */, + INIT_SRC_ZEROS /* init value is all zeros */, + INIT_SRC_ARRAY /* init value is an array of values */, + INIT_SRC_RUNTIME /* init value is provided during runtime */, + MAX_INIT_SOURCE_TYPES +}; + + + + +/* + * Internal RAM Offsets macro data + */ +struct iro { + u32 base /* RAM field offset */; + u16 m1 /* multiplier 1 */; + u16 m2 /* multiplier 2 */; + u16 m3 /* multiplier 3 */; + u16 size /* RAM field size */; +}; + +#endif /* __ECORE_HSI_INIT_TOOL__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c new file mode 100644 index 000000000..1db39d6a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.c @@ -0,0 +1,1111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore_hsi_common.h" +#include "ecore_status.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "reg_addr.h" +#include "ecore_utils.h" +#include "ecore_iov_api.h" +#include "ecore_gtt_values.h" +#include "ecore_dev_api.h" + +#ifndef ASIC_ONLY +#define ECORE_EMUL_FACTOR 2000 +#define ECORE_FPGA_FACTOR 200 +#endif + +#define ECORE_BAR_ACQUIRE_TIMEOUT 1000 + +/* Invalid values */ +#define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1)) + +struct ecore_ptt { + osal_list_entry_t list_entry; + unsigned int idx; + struct pxp_ptt_entry pxp; + u8 hwfn_id; +}; + +struct ecore_ptt_pool { + osal_list_t free_list; + osal_spinlock_t lock; /* ptt synchronized access */ + struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; +}; + +void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = OSAL_NULL; +} + +enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev, + GFP_KERNEL, + sizeof(*p_pool)); + int i; + + if (!p_pool) + return ECORE_NOMEM; + + OSAL_LIST_INIT(&p_pool->free_list); + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_pool->ptts[i].idx = i; + p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET; + p_pool->ptts[i].pxp.pretend.control = 0; + p_pool->ptts[i].hwfn_id = p_hwfn->my_id; + + /* There are special PTT entries that are taken only by design. + * The rest are added ot the list for general usage. + */ + if (i >= RESERVED_PTT_MAX) + OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry, + &p_pool->free_list); + } + + p_hwfn->p_ptt_pool = p_pool; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) { + __ecore_ptt_pool_free(p_hwfn); + return ECORE_NOMEM; + } +#endif + OSAL_SPIN_LOCK_INIT(&p_pool->lock); + return ECORE_SUCCESS; +} + +void ecore_gtt_init(struct ecore_hwfn *p_hwfn) +{ + u32 gtt_base; + u32 i; + + /* Set the global windows */ + gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; + + for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) + if (pxp_global_win[i]) + REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, + pxp_global_win[i]); +} + +void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt; + int i; + + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; + p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET; + } +} + +void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (p_hwfn->p_ptt_pool) + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); +#endif + __ecore_ptt_pool_free(p_hwfn); +} + +struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt; + unsigned int i; + + /* Take the free PTT from the list */ + for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) { + OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock); + if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) { + p_ptt = OSAL_LIST_FIRST_ENTRY( + &p_hwfn->p_ptt_pool->free_list, + struct ecore_ptt, list_entry); + OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry, + &p_hwfn->p_ptt_pool->free_list); + + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "allocated ptt %d\n", p_ptt->idx); + + return p_ptt; + } + + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); + OSAL_MSLEEP(1); + } + + DP_NOTICE(p_hwfn, true, + "PTT acquire timeout - failed to allocate PTT\n"); + return OSAL_NULL; +} + +void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + /* This PTT should not be set to pretend if it is being released */ + /* TODO - add some pretend sanity checks, to make sure pretend + * isn't set on this ptt + */ + + OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock); + OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); +} + +static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt) +{ + /* The HW is using DWORDS and we need to translate it to Bytes */ + return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2; +} + +static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt) +{ + return PXP_PF_WINDOW_ADMIN_PER_PF_START + + p_ptt->idx * sizeof(struct pxp_ptt_entry); +} + +u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt) +{ + return PXP_EXTERNAL_BAR_PF_WINDOW_START + + p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; +} + +void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 new_hw_addr) +{ + u32 prev_hw_addr; + + prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt); + + if (new_hw_addr == prev_hw_addr) + return; + + /* Update PTT entery in admin window */ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Updating PTT entry %d to offset 0x%x\n", + p_ptt->idx, new_hw_addr); + + /* The HW is using DWORDS and the address is in Bytes */ + p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2); + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, offset), + OSAL_LE32_TO_CPU(p_ptt->pxp.offset)); +} + +static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 hw_addr) +{ + u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt); + u32 offset; + + offset = hw_addr - win_hw_addr; + + if (p_ptt->hwfn_id != p_hwfn->my_id) + DP_NOTICE(p_hwfn, true, + "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n", + p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id); + + /* Verify the address is within the window */ + if (hw_addr < win_hw_addr || + offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { + ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr); + offset = 0; + } + + return ecore_ptt_get_bar_addr(p_ptt) + offset; +} + +struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn, + enum reserved_ptts ptt_idx) +{ + if (ptt_idx >= RESERVED_PTT_MAX) { + DP_NOTICE(p_hwfn, true, + "Requested PTT %d is out of range\n", ptt_idx); + return OSAL_NULL; + } + + return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; +} + +static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + bool is_empty = true; + u32 bar_addr; + + if (!p_hwfn->p_dev->chk_reg_fifo) + goto out; + + /* ecore_rd() cannot be used here since it calls this function */ + bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA); + is_empty = REG_RD(p_hwfn, bar_addr) == 0; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + OSAL_UDELAY(100); +#endif + +out: + return is_empty; +} + +void ecore_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 hw_addr, u32 val) +{ + bool prev_fifo_err; + u32 bar_addr; + + prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt); + + bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr); + REG_WR(p_hwfn, bar_addr, val); + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + OSAL_UDELAY(100); +#endif + + OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt), + "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n", + hw_addr, val); +} + +u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr) +{ + bool prev_fifo_err; + u32 bar_addr, val; + + prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt); + + bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr); + val = REG_RD(p_hwfn, bar_addr); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + OSAL_UDELAY(100); +#endif + + OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt), + "reg_fifo error was caused by a call to ecore_rd(0x%x)\n", + hw_addr); + + return val; +} + +static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *addr, + u32 hw_addr, osal_size_t n, bool to_device) +{ + u32 dw_count, *host_addr, hw_offset; + osal_size_t quota, done = 0; + u32 OSAL_IOMEM *reg_addr; + + while (done < n) { + quota = OSAL_MIN_T(osal_size_t, n - done, + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); + + if (IS_PF(p_hwfn->p_dev)) { + ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = ecore_ptt_get_bar_addr(p_ptt); + } else { + hw_offset = hw_addr + done; + } + + dw_count = quota / 4; + host_addr = (u32 *)((u8 *)addr + done); + reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset); + + if (to_device) + while (dw_count--) + DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++); + else + while (dw_count--) + *host_addr++ = DIRECT_REG_RD(p_hwfn, + reg_addr++); + + done += quota; + } +} + +void ecore_memcpy_from(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *dest, u32 hw_addr, osal_size_t n) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", + hw_addr, dest, hw_addr, (unsigned long)n); + + ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); +} + +void ecore_memcpy_to(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, void *src, osal_size_t n) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", + hw_addr, hw_addr, src, (unsigned long)n); + + ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); +} + +void ecore_fid_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + +/* Every pretend undos prev pretends, including previous port pretend */ + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + + p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid); + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void ecore_port_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 port_id) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control); + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control); + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + + p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid); + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid) +{ + u32 concrete_fid = 0; + + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); + + return concrete_fid; +} + +/* Not in use @DPDK + * Ecore HW lock + * ============= + * Although the implementation is ready, today we don't have any flow that + * utliizes said locks - and we want to keep it this way. + * If this changes, this needs to be revisted. + */ + +/* DMAE */ + +#define ECORE_DMAE_FLAGS_IS_SET(params, flag) \ + ((params) != OSAL_NULL && \ + GET_FIELD((params)->flags, DMAE_PARAMS_##flag)) + +static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn, + const u8 is_src_type_grc, + const u8 is_dst_type_grc, + struct dmae_params *p_params) +{ + u8 src_pf_id, dst_pf_id, port_id; + u16 opcode_b = 0; + u32 opcode = 0; + + /* Whether the source is the PCIe or the GRC. + * 0- The source is the PCIe + * 1- The source is the GRC. + */ + opcode |= (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie) << + DMAE_CMD_SRC_SHIFT; + src_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ? + p_params->src_pf_id : p_hwfn->rel_pf_id; + opcode |= (src_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << + DMAE_CMD_SRC_PF_ID_SHIFT; + + /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ + opcode |= (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie) << + DMAE_CMD_DST_SHIFT; + dst_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ? + p_params->dst_pf_id : p_hwfn->rel_pf_id; + opcode |= (dst_pf_id & DMAE_CMD_DST_PF_ID_MASK) << + DMAE_CMD_DST_PF_ID_SHIFT; + + /* DMAE_E4_TODO need to check which value to specify here. */ + /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */ + + /* Whether to write a completion word to the completion destination: + * 0-Do not write a completion word + * 1-Write the completion word + */ + opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT; + opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT; + + if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST)) + opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT; + + /* swapping mode 3 - big endian there should be a define ifdefed in + * the HSI somewhere. Since it is currently + */ + opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT; + + port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ? + p_params->port_id : p_hwfn->port_id; + opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT; + + /* reset source address in next go */ + opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT; + + /* reset dest address in next go */ + opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT; + + /* SRC/DST VFID: all 1's - pf, otherwise VF id */ + if (ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) { + opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT); + opcode_b |= (p_params->src_vf_id << DMAE_CMD_SRC_VF_ID_SHIFT); + } else { + opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK << + DMAE_CMD_SRC_VF_ID_SHIFT); + } + if (ECORE_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) { + opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; + opcode_b |= p_params->dst_vf_id << DMAE_CMD_DST_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; + } + + p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b); +} + +static u32 ecore_dmae_idx_to_go_cmd(u8 idx) +{ + OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4); + + /* All the DMAE 'go' registers form an array in internal memory */ + return DMAE_REG_GO_C0 + (idx << 2); +} + +static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; + u8 idx_cmd = p_hwfn->dmae_info.channel, i; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + + /* verify address is not OSAL_NULL */ + if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || + ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { + DP_NOTICE(p_hwfn, true, + "source or destination address 0 idx_cmd=%d\n" + "opcode = [0x%08x,0x%04x] len=0x%x" + " src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + OSAL_LE32_TO_CPU(p_command->opcode), + OSAL_LE16_TO_CPU(p_command->opcode_b), + OSAL_LE16_TO_CPU(p_command->length_dw), + OSAL_LE32_TO_CPU(p_command->src_addr_hi), + OSAL_LE32_TO_CPU(p_command->src_addr_lo), + OSAL_LE32_TO_CPU(p_command->dst_addr_hi), + OSAL_LE32_TO_CPU(p_command->dst_addr_lo)); + + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]" + "len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + OSAL_LE32_TO_CPU(p_command->opcode), + OSAL_LE16_TO_CPU(p_command->opcode_b), + OSAL_LE16_TO_CPU(p_command->length_dw), + OSAL_LE32_TO_CPU(p_command->src_addr_hi), + OSAL_LE32_TO_CPU(p_command->src_addr_lo), + OSAL_LE32_TO_CPU(p_command->dst_addr_hi), + OSAL_LE32_TO_CPU(p_command->dst_addr_lo)); + + /* Copy the command to DMAE - need to do it before every call + * for source/dest address no reset. + * The number of commands have been increased to 16 (previous was 14) + * The first 9 DWs are the command registers, the 10 DW is the + * GO register, and + * the rest are result registers (which are read only by the client). + */ + for (i = 0; i < DMAE_CMD_SIZE; i++) { + u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? + *(((u32 *)p_command) + i) : 0; + + ecore_wr(p_hwfn, p_ptt, + DMAE_REG_CMD_MEM + + (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + + (i * sizeof(u32)), data); + } + + ecore_wr(p_hwfn, p_ptt, + ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); + + return ecore_status; +} + +enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) +{ + dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; + struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; + u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; + u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; + + *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32)); + if (*p_comp == OSAL_NULL) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `p_completion_word'\n"); + goto err; + } + + p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; + *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, + sizeof(struct dmae_cmd)); + if (*p_cmd == OSAL_NULL) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct dmae_cmd'\n"); + goto err; + } + + p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; + *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, + sizeof(u32) * DMAE_MAX_RW_SIZE); + if (*p_buff == OSAL_NULL) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `intermediate_buffer'\n"); + goto err; + } + + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.b_mem_ready = true; + + return ECORE_SUCCESS; +err: + ecore_dmae_info_free(p_hwfn); + return ECORE_NOMEM; +} + +void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) +{ + dma_addr_t p_phys; + + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + p_hwfn->dmae_info.b_mem_ready = false; + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); + + if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.completion_word_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_completion_word, + p_phys, sizeof(u32)); + p_hwfn->dmae_info.p_completion_word = OSAL_NULL; + } + + if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_dmae_cmd, + p_phys, sizeof(struct dmae_cmd)); + p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL; + } + + if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_intermediate_buffer, + p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE); + p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL; + } +} + +static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn) +{ + u32 wait_cnt_limit = 10000, wait_cnt = 0; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + +#ifndef ASIC_ONLY + u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ? + ECORE_EMUL_FACTOR : + (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ? + ECORE_FPGA_FACTOR : 1)); + + wait_cnt_limit *= factor; +#endif + + /* DMAE_E4_TODO : TODO check if we have to call any other function + * other than BARRIER to sync the completion_word since we are not + * using the volatile keyword for this + */ + OSAL_BARRIER(p_hwfn->p_dev); + while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { + OSAL_UDELAY(DMAE_MIN_WAIT_TIME); + if (++wait_cnt > wait_cnt_limit) { + DP_NOTICE(p_hwfn->p_dev, false, + "Timed-out waiting for operation to" + " complete. Completion word is 0x%08x" + " expected 0x%08x.\n", + *p_hwfn->dmae_info.p_completion_word, + DMAE_COMPLETION_VAL); + ecore_status = ECORE_TIMEOUT; + break; + } + /* to sync the completion_word since we are not + * using the volatile keyword for p_completion_word + */ + OSAL_BARRIER(p_hwfn->p_dev); + } + + if (ecore_status == ECORE_SUCCESS) + *p_hwfn->dmae_info.p_completion_word = 0; + + return ecore_status; +} + +enum ecore_dmae_address_type { + ECORE_DMAE_ADDRESS_HOST_VIRT, + ECORE_DMAE_ADDRESS_HOST_PHYS, + ECORE_DMAE_ADDRESS_GRC +}; + +static enum _ecore_status_t +ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, u8 dst_type, u32 length_dw) +{ + dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + + switch (src_type) { + case ECORE_DMAE_ADDRESS_GRC: + case ECORE_DMAE_ADDRESS_HOST_PHYS: + cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr)); + cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr)); + break; + /* for virt source addresses we use the intermediate buffer. */ + case ECORE_DMAE_ADDRESS_HOST_VIRT: + cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys)); + cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys)); + OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0], + (void *)(osal_uintptr_t)src_addr, + length_dw * sizeof(u32)); + break; + default: + return ECORE_INVAL; + } + + switch (dst_type) { + case ECORE_DMAE_ADDRESS_GRC: + case ECORE_DMAE_ADDRESS_HOST_PHYS: + cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr)); + cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr)); + break; + /* for virt destination address we use the intermediate buff. */ + case ECORE_DMAE_ADDRESS_HOST_VIRT: + cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys)); + cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys)); + break; + default: + return ECORE_INVAL; + } + + cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw); + + if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT || + src_type == ECORE_DMAE_ADDRESS_HOST_PHYS) + OSAL_DMA_SYNC(p_hwfn->p_dev, + (void *)HILO_U64(cmd->src_addr_hi, + cmd->src_addr_lo), + length_dw * sizeof(u32), false); + + ecore_dmae_post_command(p_hwfn, p_ptt); + + ecore_status = ecore_dmae_operation_wait(p_hwfn); + + /* TODO - is it true ? */ + if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT || + src_type == ECORE_DMAE_ADDRESS_HOST_PHYS) + OSAL_DMA_SYNC(p_hwfn->p_dev, + (void *)HILO_U64(cmd->src_addr_hi, + cmd->src_addr_lo), + length_dw * sizeof(u32), true); + + if (ecore_status != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n", + (unsigned long)src_addr, (unsigned long)dst_addr, + length_dw, + (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr); + return ecore_status; + } + + if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT) + OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr), + &p_hwfn->dmae_info.p_intermediate_buffer[0], + length_dw * sizeof(u32)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, + u8 dst_type, + u32 size_in_dwords, + struct dmae_params *p_params) +{ + dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; + u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + u64 src_addr_split = 0, dst_addr_split = 0; + u16 length_limit = DMAE_MAX_RW_SIZE; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + u32 offset = 0; + + if (!p_hwfn->dmae_info.b_mem_ready) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", + (unsigned long)src_addr, src_type, + (unsigned long)dst_addr, dst_type, + size_in_dwords); + return ECORE_NOMEM; + } + + if (p_hwfn->p_dev->recov_in_prog) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", + (unsigned long)src_addr, src_type, + (unsigned long)dst_addr, dst_type, + size_in_dwords); + /* Return success to let the flow to be completed successfully + * w/o any error handling. + */ + return ECORE_SUCCESS; + } + + if (!cmd) { + DP_NOTICE(p_hwfn, true, + "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n", + (unsigned long)src_addr, + (unsigned long)dst_addr, + length_cur); + return ECORE_INVAL; + } + + ecore_dmae_opcode(p_hwfn, + (src_type == ECORE_DMAE_ADDRESS_GRC), + (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params); + + cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys)); + cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys)); + cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL); + + /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ + cnt_split = size_in_dwords / length_limit; + length_mod = size_in_dwords % length_limit; + + src_addr_split = src_addr; + dst_addr_split = dst_addr; + + for (i = 0; i <= cnt_split; i++) { + offset = length_limit * i; + + if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) { + if (src_type == ECORE_DMAE_ADDRESS_GRC) + src_addr_split = src_addr + offset; + else + src_addr_split = src_addr + (offset * 4); + } + + if (dst_type == ECORE_DMAE_ADDRESS_GRC) + dst_addr_split = dst_addr + offset; + else + dst_addr_split = dst_addr + (offset * 4); + + length_cur = (cnt_split == i) ? length_mod : length_limit; + + /* might be zero on last iteration */ + if (!length_cur) + continue; + + ecore_status = ecore_dmae_execute_sub_operation(p_hwfn, + p_ptt, + src_addr_split, + dst_addr_split, + src_type, + dst_type, + length_cur); + if (ecore_status != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "ecore_dmae_execute_sub_operation Failed" + " with error 0x%x. source_addr 0x%lx," + " dest addr 0x%lx, size_in_dwords 0x%x\n", + ecore_status, (unsigned long)src_addr, + (unsigned long)dst_addr, length_cur); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL); + break; + } + } + + return ecore_status; +} + +enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, + u32 size_in_dwords, + struct dmae_params *p_params) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + enum _ecore_status_t rc; + + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, + grc_addr_in_dw, + ECORE_DMAE_ADDRESS_HOST_VIRT, + ECORE_DMAE_ADDRESS_GRC, + size_in_dwords, p_params); + + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); + + return rc; +} + +enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, + struct dmae_params *p_params) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + enum _ecore_status_t rc; + + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, ECORE_DMAE_ADDRESS_GRC, + ECORE_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, p_params); + + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); + + return rc; +} + +enum _ecore_status_t +ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, + struct dmae_params *p_params) +{ + enum _ecore_status_t rc; + + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, + dest_addr, + ECORE_DMAE_ADDRESS_HOST_PHYS, + ECORE_DMAE_ADDRESS_HOST_PHYS, + size_in_dwords, p_params); + + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); + + return rc; +} + +void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, + enum ecore_hw_err_type err_type) +{ + /* Fan failure cannot be masked by handling of another HW error */ + if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) { + DP_VERBOSE(p_hwfn, ECORE_MSG_DRV, + "Recovery is in progress." + "Avoid notifying about HW error %d.\n", + err_type); + return; + } + + OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type); +} + +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase) +{ + u32 size = OSAL_PAGE_SIZE / 2, val; + enum _ecore_status_t rc = ECORE_SUCCESS; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size); + if (!p_virt) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return ECORE_NOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); + p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(osal_uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + OSAL_MEM_ZERO((u8 *)p_virt + size, size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n", + phase, (unsigned long)p_phys, p_virt, + (unsigned long)(p_phys + size), + (u8 *)p_virt + size, size); + + rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); + p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(osal_uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (unsigned long)p_phys + + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = ECORE_UNKNOWN_ERROR; + goto out; + } + } + +out: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size); + return rc; +} + +void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 abs_ppfid, u32 hw_addr, u32 val) +{ + u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid); + + ecore_fid_pretend(p_hwfn, p_ptt, + pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + ecore_wr(p_hwfn, p_ptt, hw_addr, val); + ecore_fid_pretend(p_hwfn, p_ptt, + p_hwfn->rel_pf_id << + PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); +} + +u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 abs_ppfid, u32 hw_addr) +{ + u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid); + u32 val; + + ecore_fid_pretend(p_hwfn, p_ptt, + pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + val = ecore_rd(p_hwfn, p_ptt, hw_addr); + ecore_fid_pretend(p_hwfn, p_ptt, + p_hwfn->rel_pf_id << + PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + + return val; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h new file mode 100644 index 000000000..238bdb9db --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_HW_H__ +#define __ECORE_HW_H__ + +#include "ecore.h" + +/* Forward declaration */ +struct ecore_ptt; + +enum reserved_ptts { + RESERVED_PTT_EDIAG, + RESERVED_PTT_USER_SPACE, + RESERVED_PTT_MAIN, + RESERVED_PTT_DPC, + RESERVED_PTT_MAX +}; + +/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1 + * instead of 0, this should be fixed in later HW versions. + */ +#ifndef MISC_REG_DRIVER_CONTROL_0 +#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1 +#endif +#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE +#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE +#endif + +/* Definitions for DMA constants */ +#define DMAE_GO_VALUE 0x1 + +#ifdef __BIG_ENDIAN +#define DMAE_COMPLETION_VAL 0xAED10000 +#define DMAE_CMD_ENDIANITY 0x3 +#else +#define DMAE_COMPLETION_VAL 0xD1AE +#define DMAE_CMD_ENDIANITY 0x2 +#endif + +#define DMAE_CMD_SIZE 14 +/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */ +#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5) +/* Minimum wait for dmae opertaion to complete 2 milliseconds */ +#define DMAE_MIN_WAIT_TIME 0x2 +#define DMAE_MAX_CLIENTS 32 + +/** +* @brief ecore_gtt_init - Initialize GTT windows +* +* @param p_hwfn +*/ +void ecore_gtt_init(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured + * + * @param p_hwfn + */ +void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool + * + * @param p_hwfn + * + * @return _ecore_status_t - success (0), negative - error. + */ +enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_pool_free - + * + * @param p_hwfn + */ +void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address + * + * @param p_ptt + * + * @return u32 + */ +u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt); + +/** + * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address + * + * @param p_hwfn + * @param p_ptt + * @param new_hw_addr + */ +void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 new_hw_addr); + +/** + * @brief ecore_get_reserved_ptt - Get a specific reserved PTT + * + * @param p_hwfn + * @param ptt_idx + * + * @return struct ecore_ptt * + */ +struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn, + enum reserved_ptts ptt_idx); + +/** + * @brief ecore_wr - Write value to BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param hw_addr + * @param val + */ +void ecore_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, + u32 val); + +/** + * @brief ecore_rd - Read value from BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param hw_addr + */ +u32 ecore_rd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr); + +/** + * @brief ecore_memcpy_from - copy n bytes from BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param dest + * @param hw_addr + * @param n + */ +void ecore_memcpy_from(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *dest, + u32 hw_addr, + osal_size_t n); + +/** + * @brief ecore_memcpy_to - copy n bytes to BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param hw_addr + * @param src + * @param n + */ +void ecore_memcpy_to(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, + void *src, + osal_size_t n); +/** + * @brief ecore_fid_pretend - pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @param p_hwfn + * @param p_ptt + * @param fid - fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + */ +void ecore_fid_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 fid); + +/** + * @brief ecore_port_pretend - pretend to another port when + * accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + */ +void ecore_port_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 port_id); + +/** + * @brief ecore_port_unpretend - cancel any previously set port + * pretend + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief ecore_port_fid_pretend - pretend to another port and another function + * when accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + */ +void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 port_id, u16 fid); + +/** + * @brief ecore_vfid_to_concrete - build a concrete FID for a + * given VF ID + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid); + +/** +* @brief ecore_dmae_info_alloc - Init the dmae_info structure +* which is part of p_hwfn. +* @param p_hwfn +*/ +enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn); + +/** +* @brief ecore_dmae_info_free - Free the dmae_info structure +* which is part of p_hwfn +* +* @param p_hwfn +*/ +void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_dmae_host2grc - copy data from source address to + * dmae registers using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param source_addr + * @param grc_addr (dmae_data_offset) + * @param size_in_dwords + * @param p_params (default parameters will be used in case of OSAL_NULL) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, + u32 size_in_dwords, + struct dmae_params *p_params); + +/** + * @brief ecore_dmae_grc2host - Read data from dmae data offset + * to source address using the given ptt + * + * @param p_ptt + * @param grc_addr (dmae_data_offset) + * @param dest_addr + * @param size_in_dwords + * @param p_params (default parameters will be used in case of OSAL_NULL) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, + struct dmae_params *p_params); + +/** + * @brief ecore_dmae_host2host - copy data from to source address + * to a destination address (for SRIOV) using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param source_addr + * @param dest_addr + * @param size_in_dwords + * @param p_params (default parameters will be used in case of OSAL_NULL) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, + struct dmae_params *p_params); + +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase); + +enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, + const u8 *fw_data); + +void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, + enum ecore_hw_err_type err_type); + +/** + * @brief ecore_ppfid_wr - Write value to BAR using the given ptt while + * pretending to a PF to which the given PPFID pertains. + * + * @param p_hwfn + * @param p_ptt + * @param abs_ppfid + * @param hw_addr + * @param val + */ +void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 abs_ppfid, u32 hw_addr, u32 val); + +/** + * @brief ecore_ppfid_rd - Read value from BAR using the given ptt while + * pretending to a PF to which the given PPFID pertains. + * + * @param p_hwfn + * @param p_ptt + * @param abs_ppfid + * @param hw_addr + */ +u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 abs_ppfid, u32 hw_addr); + +#endif /* __ECORE_HW_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h new file mode 100644 index 000000000..92361e79c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_hw_defs.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _ECORE_IGU_DEF_H_ +#define _ECORE_IGU_DEF_H_ + +/* Fields of IGU PF CONFIGRATION REGISTER */ +/* function enable */ +#define IGU_PF_CONF_FUNC_EN (0x1 << 0) +/* MSI/MSIX enable */ +#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) +/* INT enable */ +#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) +/* attention enable */ +#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) +/* single ISR mode enable */ +#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) +/* simd all ones mode */ +#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) + +/* Fields of IGU VF CONFIGRATION REGISTER */ +/* function enable */ +#define IGU_VF_CONF_FUNC_EN (0x1 << 0) +/* MSI/MSIX enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) +/* single ISR mode enable */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) +/* Parent PF */ +#define IGU_VF_CONF_PARENT_MASK (0xF) +/* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 5 + +/* Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + +/* Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */ +#define IGU_CTRL_REG_FID_SHIFT 0 +#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */ +#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16 +#define IGU_CTRL_REG_RESERVED_MASK 0x1 +#define IGU_CTRL_REG_RESERVED_SHIFT 28 +#define IGU_CTRL_REG_TYPE_MASK 0x1U /* use enum igu_ctrl_cmd */ +#define IGU_CTRL_REG_TYPE_SHIFT 31 +}; + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c new file mode 100644 index 000000000..6a52f32cc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c @@ -0,0 +1,2198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore_hw.h" +#include "ecore_init_ops.h" +#include "reg_addr.h" +#include "ecore_rt_defs.h" +#include "ecore_hsi_init_func.h" +#include "ecore_hsi_init_tool.h" +#include "ecore_iro.h" +#include "ecore_init_fw_funcs.h" +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { + { 400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ + { 528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ + { 608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ +}; +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { + { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ +}; + +/* General constants */ +#define QM_PQ_MEM_4KB(pq_size) \ + (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0) +#define QM_PQ_SIZE_256B(pq_size) \ + (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0) +#define QM_INVALID_PQ_ID 0xffff + +/* Max link speed (in Mbps) */ +#define QM_MAX_LINK_SPEED 100000 + +/* Feature enable */ +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 + +/* Other PQ constants */ +#define QM_OTHER_PQS_PER_PF 4 + +/* VOQ constants */ +#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) +#define VOQS_BIT_MASK ((1 << MAX_NUM_VOQS) - 1) + +/* WFQ constants: */ + +/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_WFQ_UPPER_BOUND 62500000 + +/* Bit of VOQ in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 + +/* Bit of PF in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_PF_SHIFT 5 + +/* 0x9000 = 4*9*1024 */ +#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* Max WFQ increment value is 0.7 * upper bound */ +#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) + +/* RL constants: */ + +/* Period in us */ +#define QM_RL_PERIOD 5 + +/* Period in 25MHz cycles */ +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) + +/* RL increment value - rate is specified in mbps. the factor of 1.01 was + * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC + * 2544 test. In this scenario the PF RL was reducing the line rate to 99% + * although the credit increment value was the correct one and FW calculated + * correct packet sizes. The reason for the inaccuracy of the RL is unknown at + * this point. + */ +#define QM_RL_INC_VAL(rate) \ + OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \ + (8 * 100)), 1) + +/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_RL_UPPER_BOUND 62500000 + +/* Max PF RL increment value is 0.7 * upper bound */ +#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) + +/* Vport RL Upper bound, link speed is in Mpbs */ +#define QM_VP_RL_UPPER_BOUND(speed) \ + ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000)) + +/* Max Vport RL increment value is the Vport RL upper bound */ +#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) + +/* Vport RL credit threshold in case of QM bypass */ +#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) + +/* AFullOprtnstcCrdMask constants */ +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 + +/* Command Queue constants: */ + +/* Pure LB CmdQ lines (+spare) */ +#define PBF_CMDQ_PURE_LB_LINES 150 + +#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ + ext_voq * \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) + +#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ + (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ + ext_voq * \ + (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) + +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ +((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) + +/* BTB: blocks constants (block size = 256B) */ + +/* 256B blocks in 9700B packet */ +#define BTB_JUMBO_PKT_BLOCKS 38 + +/* Headroom per-port */ +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS +#define BTB_PURE_LB_FACTOR 10 + +/* Factored (hence really 0.7) */ +#define BTB_PURE_LB_RATIO 7 + +/* QM stop command constants */ +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */ +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 + +/* QM command macros */ +#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, value) \ + SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value) + +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, \ + rl_valid, rl_id, voq, wrr) \ + do { \ + OSAL_MEMSET(&(map), 0, sizeof(map)); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_RL_ID, rl_id); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_VP_PQ_ID, vp_pq_id); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_VOQ, voq); \ + SET_FIELD(map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, wrr); \ + STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ + *((u32 *)&(map))); \ + } while (0) + +#define WRITE_PQ_INFO_TO_RAM 1 + +#define PQ_INFO_ELEMENT(vp_pq_id, pf, tc, port, rl_valid, rl_id) \ + (((vp_pq_id) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ + ((rl_valid ? 1 : 0) << 22) | (((rl_id) & 255) << 24) | \ + (((rl_id) >> 8) << 9)) + +#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) (XSEM_REG_FAST_MEMORY + \ + SEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id)) + +/******************** INTERNAL IMPLEMENTATION *********************/ + +/* Prepare PF RL enable/disable runtime init values */ +static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); + if (pf_rl_en) { + /* Enable RLs for all VOQs */ + STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, + VOQS_BIT_MASK); + + /* Write RL period */ + STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + + /* Set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, + QM_PF_RL_UPPER_BOUND); + } +} + +/* Prepare PF WFQ enable/disable runtime init values */ +static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); + + /* Set credit threshold for QM bypass flow */ + if (pf_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare global RL enable/disable runtime init values */ +static void ecore_enable_global_rl(struct ecore_hwfn *p_hwfn, + bool global_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, + global_rl_en ? 1 : 0); + if (global_rl_en) { + /* Write RL period (use timer 0 only) */ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + + /* Set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, + QM_VP_RL_BYPASS_THRESH_SPEED); + } +} + +/* Prepare VPORT WFQ enable/disable runtime init values */ +static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, + vport_wfq_en ? 1 : 0); + + /* Set credit threshold for QM bypass flow */ + if (vport_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare runtime init values to allocate PBF command queue lines for + * the specified VOQ + */ +static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn, + u8 voq, + u16 cmdq_lines) +{ + u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); + + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), + (u32)cmdq_lines); + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, + qm_line_crd); +} + +/* Prepare runtime init values to allocate PBF command queue lines. */ +static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]) +{ + u8 tc, voq, port_id, num_tcs_in_port; + + /* Clear PBF lines of all VOQs */ + for (voq = 0; voq < MAX_NUM_VOQS; voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); + + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + u16 phys_lines, phys_lines_per_tc; + + if (!port_params[port_id].active) + continue; + + /* Find number of command queue lines to divide between the + * active physical TCs. + */ + phys_lines = port_params[port_id].num_pbf_cmd_lines; + phys_lines -= PBF_CMDQ_PURE_LB_LINES; + + /* Find #lines per active physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < max_phys_tcs_per_port; tc++) + if (((port_params[port_id].active_phys_tcs >> tc) & + 0x1) == 1) + num_tcs_in_port++; + phys_lines_per_tc = phys_lines / num_tcs_in_port; + + /* Init registers per active TC */ + for (tc = 0; tc < max_phys_tcs_per_port; tc++) { + voq = VOQ(port_id, tc, max_phys_tcs_per_port); + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, + phys_lines_per_tc); + } + + /* Init registers for pure LB TC */ + voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port); + ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, + PBF_CMDQ_PURE_LB_LINES); + } +} + +/* + * Prepare runtime init values to allocate guaranteed BTB blocks for the + * specified port. The guaranteed BTB space is divided between the TCs as + * follows (shared space Is currently not used): + * 1. Parameters: + * B BTB blocks for this port + * C Number of physical TCs for this port + * 2. Calculation: + * a. 38 blocks (9700B jumbo frame) are allocated for global per port + * headroom + * b. B = B 38 (remainder after global headroom allocation) + * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. + * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation). + * e. B/C blocks are allocated for each physical TC. + * Assumptions: + * - MTU is up to 9700 bytes (38 blocks) + * - All TCs are considered symmetrical (same rate and packet size) + * - No optimization for lossy TC (all are considered lossless). Shared space is + * not enabled and allocated for each TC. + */ +static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]) +{ + u32 usable_blocks, pure_lb_blocks, phys_blocks; + u8 tc, voq, port_id, num_tcs_in_port; + + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (!port_params[port_id].active) + continue; + + /* Subtract headroom blocks */ + usable_blocks = port_params[port_id].num_btb_blocks - + BTB_HEADROOM_BLOCKS; + + /* Find blocks per physical TC. use factor to avoid floating + * arithmethic. + */ + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) + if (((port_params[port_id].active_phys_tcs >> tc) & + 0x1) == 1) + num_tcs_in_port++; + + pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / + (num_tcs_in_port * BTB_PURE_LB_FACTOR + + BTB_PURE_LB_RATIO); + pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, + pure_lb_blocks / + BTB_PURE_LB_FACTOR); + phys_blocks = (usable_blocks - pure_lb_blocks) / + num_tcs_in_port; + + /* Init physical TCs */ + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> tc) & + 0x1) == 1) { + voq = VOQ(port_id, tc, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET(voq), + phys_blocks); + } + } + + /* Init pure LB TC */ + voq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), + pure_lb_blocks); + } +} + +/* Prepare runtime init values for the specified RL. + * If global_rl_params is OSAL_NULL, max link speed (100Gbps) is used instead. + * Return -1 on error. + */ +static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn, + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]) +{ + u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + (u32)QM_RL_CRD_REG_SIGN_BIT; + u32 inc_val; + u16 rl_id; + + /* Go over all global RLs */ + for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { + u32 rate_limit = global_rl_params ? + global_rl_params[rl_id].rate_limit : 0; + + inc_val = QM_RL_INC_VAL(rate_limit ? + rate_limit : QM_MAX_LINK_SPEED); + if (inc_val > QM_VP_RL_MAX_INC_VAL(QM_MAX_LINK_SPEED)) { + DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, + inc_val); + } + + return 0; +} + +/* Prepare Tx PQ mapping runtime init values for the specified PF */ +static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_pf_loading, + u32 num_pf_cids, + u32 num_vf_cids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u16 start_vport, + u32 base_mem_addr_4kb, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params) +{ + /* A bit per Tx PQ indicating if the PQ is associated with a VF */ + u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; + u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; + #if (WRITE_PQ_INFO_TO_RAM != 0) + u32 pq_info = 0; + #endif + + num_pqs = num_pf_pqs + num_vf_pqs; + + first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE; + last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; + + pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids); + vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids); + mem_addr_4kb = base_mem_addr_4kb; + + /* Set mapping from PQ group to PF */ + for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) + STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + + /* Set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, + QM_PQ_SIZE_256B(num_pf_cids)); + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, + QM_PQ_SIZE_256B(num_vf_cids)); + + /* Go over all Tx PQs */ + for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) { + u16 first_tx_pq_id, vport_id_in_pf; + struct qm_rf_pq_map tx_pq_map; + bool is_vf_pq; + u8 voq; + + voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id, + max_phys_tcs_per_port); + is_vf_pq = (i >= num_pf_pqs); + + /* Update first Tx PQ of VPORT/TC */ + vport_id_in_pf = pq_params[i].vport_id - start_vport; + first_tx_pq_id = + vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id]; + if (first_tx_pq_id == QM_INVALID_PQ_ID) { + u32 map_val = (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | + (pf_id << QM_WFQ_VP_PQ_PF_SHIFT); + + /* Create new VP PQ */ + vport_params[vport_id_in_pf]. + first_tx_pq_id[pq_params[i].tc_id] = pq_id; + first_tx_pq_id = pq_id; + + /* Map VP PQ to VOQ and PF */ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + + first_tx_pq_id, map_val); + } + + /* Prepare PQ map entry */ + QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id, + pq_params[i].rl_valid, pq_params[i].rl_id, + voq, pq_params[i].wrr_group); + + /* Set PQ base address */ + STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + + /* Clear PQ pointer table entry (64 bit) */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + + /* Write PQ info to RAM */ +#if (WRITE_PQ_INFO_TO_RAM != 0) + pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, + pq_params[i].tc_id, + pq_params[i].port_id, + pq_params[i].rl_valid, + pq_params[i].rl_id); + ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), + pq_info); +#endif + + /* If VF PQ, add indication to PQ VF mask */ + if (is_vf_pq) { + tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= + (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE)); + mem_addr_4kb += vport_pq_mem_4kb; + } else { + mem_addr_4kb += pq_mem_4kb; + } + } + + /* Store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) + if (tx_pq_vf_mask[i]) + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + + i, tx_pq_vf_mask[i]); + + return 0; +} + +/* Prepare Other PQ mapping runtime init values for the specified PF */ +static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, + u8 pf_id, + bool is_pf_loading, + u32 num_pf_cids, + u32 num_tids, + u32 base_mem_addr_4kb) +{ + u32 pq_size, pq_mem_4kb, mem_addr_4kb; + u16 i, j, pq_id, pq_group; + + /* A single other PQ group is used in each PF, where PQ group i is used + * in PF i. + */ + pq_group = pf_id; + pq_size = num_pf_cids + num_tids; + pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + mem_addr_4kb = base_mem_addr_4kb; + + /* Map PQ group to PF */ + STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + + /* Set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, + QM_PQ_SIZE_256B(pq_size)); + + for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; + i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ + STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + + mem_addr_4kb += pq_mem_4kb; + } +} + +/* Prepare PF WFQ runtime init values for the specified PF. + * Return -1 on error. + */ +static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, + u8 pf_id, + u16 pf_wfq, + u8 max_phys_tcs_per_port, + u16 num_tx_pqs, + struct init_qm_pq_params *pq_params) +{ + u32 inc_val, crd_reg_offset; + u8 voq; + u16 i; + + inc_val = QM_WFQ_INC_VAL(pf_wfq); + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid PF WFQ weight configuration\n"); + return -1; + } + + for (i = 0; i < num_tx_pqs; i++) { + voq = VOQ(pq_params[i].port_id, pq_params[i].tc_id, + max_phys_tcs_per_port); + crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? + QM_REG_WFQPFCRD_RT_OFFSET : + QM_REG_WFQPFCRD_MSB_RT_OFFSET) + + voq * MAX_NUM_PFS_BB + + (pf_id % MAX_NUM_PFS_BB); + OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + } + + STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); + + return 0; +} + +/* Prepare PF RL runtime init values for the specified PF. + * Return -1 on error. + */ +static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) +{ + u32 inc_val; + + inc_val = QM_RL_INC_VAL(pf_rl); + if (inc_val > QM_PF_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid PF rate limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + + return 0; +} + +/* Prepare VPORT WFQ runtime init values for the specified VPORTs. + * Return -1 on error. + */ +static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn, + u16 num_vports, + struct init_qm_vport_params *vport_params) +{ + u16 vp_pq_id, vport_id; + u32 inc_val; + u8 tc; + + /* Go over all PF VPORTs */ + for (vport_id = 0; vport_id < num_vports; vport_id++) { + if (!vport_params[vport_id].wfq) + continue; + + inc_val = QM_WFQ_INC_VAL(vport_params[vport_id].wfq); + if (inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT WFQ weight configuration\n"); + return -1; + } + + /* Each VPORT can have several VPORT PQ IDs for various TCs */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + vp_pq_id = vport_params[vport_id].first_tx_pq_id[tc]; + if (vp_pq_id == QM_INVALID_PQ_ID) + continue; + + STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + + vp_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + + vp_pq_id, inc_val); + } + } + + return 0; +} + +static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 reg_val, i; + + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; + i++) { + OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US); + reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); + } + + /* Check if timeout while waiting for SDM command ready */ + if (i == QM_STOP_CMD_MAX_POLL_COUNT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, + "Timeout waiting for QM SDM cmd ready signal\n"); + return false; + } + + return true; +} + +static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd_addr, + u32 cmd_data_lsb, + u32 cmd_data_msb) +{ + if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) + return false; + + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); + + return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt); +} + +/******************** INTERFACE IMPLEMENTATION *********************/ + +u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 num_pf_pqs, + u16 num_vf_pqs) +{ + return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; +} + +int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + bool pf_rl_en, + bool pf_wfq_en, + bool global_rl_en, + bool vport_wfq_en, + struct init_qm_port_params + port_params[MAX_NUM_PORTS], + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]) +{ + u32 mask = 0; + + /* Init AFullOprtnstcCrdMask */ + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, + QM_OPPOR_LINE_VOQ_DEF); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, + QM_OPPOR_PQ_EMPTY_DEF); + STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + + /* Enable/disable PF RL */ + ecore_enable_pf_rl(p_hwfn, pf_rl_en); + + /* Enable/disable PF WFQ */ + ecore_enable_pf_wfq(p_hwfn, pf_wfq_en); + + /* Enable/disable global RL */ + ecore_enable_global_rl(p_hwfn, global_rl_en); + + /* Enable/disable VPORT WFQ */ + ecore_enable_vport_wfq(p_hwfn, vport_wfq_en); + + /* Init PBF CMDQ line credit */ + ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, + max_phys_tcs_per_port, port_params); + + /* Init BTB blocks in PBF */ + ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, + max_phys_tcs_per_port, port_params); + + ecore_global_rl_rt_init(p_hwfn, global_rl_params); + + return 0; +} + +int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_pf_loading, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u16 start_vport, + u16 num_vports, + u16 pf_wfq, + u32 pf_rl, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params) +{ + u32 other_mem_size_4kb; + u16 vport_id; + u8 tc; + + other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * + QM_OTHER_PQS_PER_PF; + + /* Clear first Tx PQ ID array for each VPORT */ + for (vport_id = 0; vport_id < num_vports; vport_id++) + for (tc = 0; tc < NUM_OF_TCS; tc++) + vport_params[vport_id].first_tx_pq_id[tc] = + QM_INVALID_PQ_ID; + + /* Map Other PQs (if any) */ +#if QM_OTHER_PQS_PER_PF > 0 + ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, + num_tids, 0); +#endif + + /* Map Tx PQs */ + if (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port, + is_pf_loading, num_pf_cids, num_vf_cids, + start_pq, num_pf_pqs, num_vf_pqs, + start_vport, other_mem_size_4kb, pq_params, + vport_params)) + return -1; + + /* Init PF WFQ */ + if (pf_wfq) + if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq, + max_phys_tcs_per_port, + num_pf_pqs + num_vf_pqs, pq_params)) + return -1; + + /* Init PF RL */ + if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl)) + return -1; + + /* Init VPORT WFQ */ + if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params)) + return -1; + + return 0; +} + +int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq) +{ + u32 inc_val; + + inc_val = QM_WFQ_INC_VAL(pf_wfq); + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid PF WFQ weight configuration\n"); + return -1; + } + + ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + + return 0; +} + +int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl) +{ + u32 inc_val; + + inc_val = QM_RL_INC_VAL(pf_rl); + if (inc_val > QM_PF_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid PF rate limit configuration\n"); + return -1; + } + + ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, + (u32)QM_RL_CRD_REG_SIGN_BIT); + ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); + + return 0; +} + +int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], + u16 wfq) +{ + u16 vp_pq_id; + u32 inc_val; + u8 tc; + + inc_val = QM_WFQ_INC_VAL(wfq); + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT WFQ weight configuration\n"); + return -1; + } + + /* A VPORT can have several VPORT PQ IDs for various TCs */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + vp_pq_id = first_tx_pq_id[tc]; + if (vp_pq_id != QM_INVALID_PQ_ID) { + ecore_wr(p_hwfn, p_ptt, + QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val); + } + } + + return 0; + } + +int ecore_init_global_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rl_id, + u32 rate_limit) +{ + u32 inc_val; + + inc_val = QM_RL_INC_VAL(rate_limit); + if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) { + DP_NOTICE(p_hwfn, true, "Invalid rate limit configuration.\n"); + return -1; + } + + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4, + (u32)QM_RL_CRD_REG_SIGN_BIT); + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); + + return 0; +} + +int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 vport_id, + u32 vport_rl, + u32 link_speed) +{ + u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; + + if (vport_id >= max_qm_global_rls) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT ID for rate limiter configuration\n"); + return -1; + } + + inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); + if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT rate-limit configuration\n"); + return -1; + } + + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, + (u32)QM_RL_CRD_REG_SIGN_BIT); + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); + + return 0; +} + +bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs) +{ + u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; + u32 pq_mask = 0, last_pq, pq_id; + + last_pq = start_pq + num_pqs - 1; + + /* Set command's PQ type */ + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + + /* Go over requested PQs */ + for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { + /* Set PQ bit in mask (stop command only) */ + if (!is_release_cmd) + pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); + + /* If last PQ or end of PQ mask, write command */ + if ((pq_id == last_pq) || + (pq_id % QM_STOP_PQ_MASK_WIDTH == + (QM_STOP_PQ_MASK_WIDTH - 1))) { + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK, + pq_mask); + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, + pq_id / QM_STOP_PQ_MASK_WIDTH); + if (!ecore_send_qm_cmd + (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], + cmd_arr[1])) + return false; + pq_mask = 0; + } + } + + return true; +} + +#ifndef UNUSED_HSI_FUNC + +/* NIG: ETS configuration constants */ +#define NIG_TX_ETS_CLIENT_OFFSET 4 +#define NIG_LB_ETS_CLIENT_OFFSET 1 +#define NIG_ETS_MIN_WFQ_BYTES 1600 + +/* NIG: ETS constants */ +#define NIG_ETS_UP_BOUND(weight, mtu) \ + (2 * ((weight) > (mtu) ? (weight) : (mtu))) + +/* NIG: RL constants */ + +/* Byte base type value */ +#define NIG_RL_BASE_TYPE 1 + +/* Period in us */ +#define NIG_RL_PERIOD 1 + +/* Period in 25MHz cycles */ +#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD) + +/* Rate in mbps */ +#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8) + +#define NIG_RL_MAX_VAL(inc_val, mtu) \ + (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu))) + +/* NIG: packet prioritry configuration constants */ +#define NIG_PRIORITY_MAP_TC_BITS 4 + + +void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_ets_req *req, bool is_lb) +{ + u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff; + u32 tc_bound_base_addr, tc_bound_addr_diff; + u8 sp_tc_map = 0, wfq_tc_map = 0; + u8 tc, num_tc, tc_client_offset; + + num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS; + tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : + NIG_TX_ETS_CLIENT_OFFSET; + min_weight = 0xffffffff; + tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : + NIG_REG_TX_ARB_CREDIT_WEIGHT_0; + tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - + NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : + NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - + NIG_REG_TX_ARB_CREDIT_WEIGHT_0; + tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; + tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - + NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; + + for (tc = 0; tc < num_tc; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + + /* Update SP map */ + if (tc_req->use_sp) + sp_tc_map |= (1 << tc); + + if (!tc_req->use_wfq) + continue; + + /* Update WFQ map */ + wfq_tc_map |= (1 << tc); + + /* Find minimal weight */ + if (tc_req->weight < min_weight) + min_weight = tc_req->weight; + } + + /* Write SP map */ + ecore_wr(p_hwfn, p_ptt, + is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : + NIG_REG_TX_ARB_CLIENT_IS_STRICT, + (sp_tc_map << tc_client_offset)); + + /* Write WFQ map */ + ecore_wr(p_hwfn, p_ptt, + is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + (wfq_tc_map << tc_client_offset)); + /* write WFQ weights */ + for (tc = 0; tc < num_tc; tc++, tc_client_offset++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + u32 byte_weight; + + if (!tc_req->use_wfq) + continue; + + /* Translate weight to bytes */ + byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / + min_weight; + + /* Write WFQ weight */ + ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + + tc_weight_addr_diff * tc_client_offset, byte_weight); + + /* Write WFQ upper bound */ + ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + + tc_bound_addr_diff * tc_client_offset, + NIG_ETS_UP_BOUND(byte_weight, req->mtu)); + } +} + +void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_lb_rl_req *req) +{ + u32 ctrl, inc_val, reg_offset; + u8 tc; + + /* Disable global MAC+LB RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); + + /* Configure and enable global MAC+LB RL */ + if (req->lb_mac_rate) { + /* Configure */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, + NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->lb_mac_rate); + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, + inc_val); + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, + NIG_RL_MAX_VAL(inc_val, req->mtu)); + + /* Enable */ + ctrl |= + 1 << + NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); + } + + /* Disable global LB-only RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); + + /* Configure and enable global LB-only RL */ + if (req->lb_rate) { + /* Configure */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, + NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->lb_rate); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, + inc_val); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, + NIG_RL_MAX_VAL(inc_val, req->mtu)); + + /* Enable */ + ctrl |= + 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); + } + + /* Per-TC RLs */ + for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; + tc++, reg_offset += 4) { + /* Disable TC RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); + + /* Configure and enable TC RL */ + if (!req->tc_rate[tc]) + continue; + + /* Configure */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + + reg_offset, NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + + reg_offset, inc_val); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu)); + + /* Enable */ + ctrl |= 1 << + NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + + reg_offset, ctrl); + } +} + +void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_pri_tc_map_req *req) +{ + u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 }; + u32 pri_tc_mask = 0; + u8 pri, tc; + + for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) { + if (!req->pri[pri].valid) + continue; + + pri_tc_mask |= (req->pri[pri].tc_id << + (pri * NIG_PRIORITY_MAP_TC_BITS)); + tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri); + } + + /* Write priority -> TC mask */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask); + + /* Write TC -> priority mask */ + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, + tc_pri_mask[tc]); + ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, + tc_pri_mask[tc]); + } +} + +#endif /* UNUSED_HSI_FUNC */ + +#ifndef UNUSED_HSI_FUNC + +/* PRS: ETS configuration constants */ +#define PRS_ETS_MIN_WFQ_BYTES 1600 +#define PRS_ETS_UP_BOUND(weight, mtu) \ + (2 * ((weight) > (mtu) ? (weight) : (mtu))) + + +void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_ets_req *req) +{ + u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff; + u8 tc, sp_tc_map = 0, wfq_tc_map = 0; + + tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - + PRS_REG_ETS_ARB_CREDIT_WEIGHT_0; + tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - + PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0; + + for (tc = 0; tc < NUM_OF_TCS; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + + /* Update SP map */ + if (tc_req->use_sp) + sp_tc_map |= (1 << tc); + + if (!tc_req->use_wfq) + continue; + + /* Update WFQ map */ + wfq_tc_map |= (1 << tc); + + /* Find minimal weight */ + if (tc_req->weight < min_weight) + min_weight = tc_req->weight; + } + + /* write SP map */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map); + + /* write WFQ map */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, + wfq_tc_map); + + /* write WFQ weights */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + u32 byte_weight; + + if (!tc_req->use_wfq) + continue; + + /* Translate weight to bytes */ + byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / + min_weight; + + /* Write WFQ weight */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * + tc_weight_addr_diff, byte_weight); + + /* Write WFQ upper bound */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, + req->mtu)); + } +} + +#endif /* UNUSED_HSI_FUNC */ +#ifndef UNUSED_HSI_FUNC + +/* BRB: RAM configuration constants */ +#define BRB_TOTAL_RAM_BLOCKS_BB 4800 +#define BRB_TOTAL_RAM_BLOCKS_K2 5632 +#define BRB_BLOCK_SIZE 128 +#define BRB_MIN_BLOCKS_PER_TC 9 +#define BRB_HYST_BYTES 10240 +#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE) + +/* Temporary big RAM allocation - should be updated */ +void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_brb_ram_req *req) +{ + u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks; + u32 active_port_blocks, reg_offset = 0; + u8 port, active_ports = 0; + + tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, + BRB_BLOCK_SIZE); + min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, + BRB_BLOCK_SIZE); + total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : + BRB_TOTAL_RAM_BLOCKS_BB; + + /* Find number of active ports */ + for (port = 0; port < MAX_NUM_PORTS; port++) + if (req->num_active_tcs[port]) + active_ports++; + + active_port_blocks = (u32)(total_blocks / active_ports); + + for (port = 0; port < req->max_ports_per_engine; port++) { + u32 port_blocks, port_shared_blocks, port_guaranteed_blocks; + u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th; + u32 tc_guaranteed_blocks; + u8 tc; + + /* Calculate per-port sizes */ + tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, + BRB_BLOCK_SIZE); + port_blocks = req->num_active_tcs[port] ? active_port_blocks : + 0; + port_guaranteed_blocks = req->num_active_tcs[port] * + tc_guaranteed_blocks; + port_shared_blocks = port_blocks - port_guaranteed_blocks; + full_xoff_th = req->num_active_tcs[port] * + BRB_MIN_BLOCKS_PER_TC; + full_xon_th = full_xoff_th + min_pkt_size_blocks; + pause_xoff_th = tc_headroom_blocks; + pause_xon_th = pause_xoff_th + min_pkt_size_blocks; + + /* Init total size per port */ + ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, + port_blocks); + + /* Init shared size per port */ + ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, + port_shared_blocks); + + for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) { + /* Clear init values for non-active TCs */ + if (tc == req->num_active_tcs[port]) { + tc_guaranteed_blocks = 0; + full_xoff_th = 0; + full_xon_th = 0; + pause_xoff_th = 0; + pause_xon_th = 0; + } + + /* Init guaranteed size per TC */ + ecore_wr(p_hwfn, p_ptt, + BRB_REG_TC_GUARANTIED_0 + reg_offset, + tc_guaranteed_blocks); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, + BRB_HYST_BLOCKS); + + /* Init pause/full thresholds per physical TC - for + * loopback traffic. + */ + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + + reg_offset, full_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + + reg_offset, full_xon_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + + reg_offset, pause_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + + reg_offset, pause_xon_th); + + /* Init pause/full thresholds per physical TC - for + * main traffic. + */ + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + + reg_offset, full_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + + reg_offset, full_xon_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + + reg_offset, pause_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + + reg_offset, pause_xon_th); + } + } +} + +#endif /* UNUSED_HSI_FUNC */ +#ifndef UNUSED_HSI_FUNC + +#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ + do { \ + u32 i; \ + for (i = 0; i < (arr_size); i++) \ + ecore_wr(dev, ptt, ((addr) + (4 * i)), \ + ((u32 *)&(arr))[i]); \ + } while (0) + +#ifndef DWORDS_TO_BYTES +#define DWORDS_TO_BYTES(dwords) ((dwords) * REG_SIZE) +#endif + + +/** + * @brief ecore_dmae_to_grc - is an internal function - writes from host to + * wide-bus registers (split registers are not supported yet) + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param pData - pointer to source data. + * @param addr - Destination register address. + * @param len_in_dwords - data length in DWARDS (u32) + */ +static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *pData, + u32 addr, + u32 len_in_dwords) +{ + struct dmae_params params; + bool read_using_dmae = false; + + if (!pData) + return -1; + + /* Set DMAE params */ + OSAL_MEMSET(¶ms, 0, sizeof(params)); + + SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 1); + + /* Execute DMAE command */ + read_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)(pData), + addr, len_in_dwords, ¶ms); + if (!read_using_dmae) + DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, + "Failed writing to chip using DMAE, using GRC instead\n"); + + /* If not read using DMAE, read using GRC */ + if (!read_using_dmae) + /* write to registers using GRC */ + ARR_REG_WR(p_hwfn, p_ptt, addr, pData, len_in_dwords); + + return len_in_dwords; +} + +/* In MF, should be called once per port to set EtherType of OuterTag */ +void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) +{ + /* Update DORQ register */ + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType); +} + +#endif /* UNUSED_HSI_FUNC */ + +#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ +(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0)) +#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 +#define PRS_ETH_OUTPUT_FORMAT -46832 + +void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port) +{ + /* Update PRS register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + + /* Update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); + + /* Update PBF register */ + ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); +} + +void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool vxlan_enable) +{ + u32 reg_val; + + /* Update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, + vxlan_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, + vxlan_enable); + ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + /* Update DORQ register */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, + vxlan_enable ? 1 : 0); +} + +void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable) +{ + u32 reg_val; + + /* Update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, + eth_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, + ip_gre_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, + eth_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, + ip_gre_enable); + ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + /* Update DORQ registers */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, + eth_gre_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, + ip_gre_enable ? 1 : 0); +} + +void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port) +{ + /* Update PRS register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + + /* Update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + + /* Update PBF register */ + ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); +} + +void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable) +{ + u32 reg_val; + + /* Update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, + eth_geneve_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, + ip_geneve_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, + eth_geneve_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, + ip_geneve_enable ? 1 : 0); + + /* EDPM with geneve tunnel not supported in BB */ + if (ECORE_IS_BB_B0(p_hwfn->p_dev)) + return; + + /* Update DORQ registers */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, + eth_geneve_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, + ip_geneve_enable ? 1 : 0); +} + +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872 + +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC Format register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} + +#ifndef UNUSED_HSI_FUNC + +#define T_ETH_PACKET_ACTION_GFT_EVENTID 23 +#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 +#define T_ETH_PACKET_MATCH_RFS_EVENTID 25 +#define PARSER_ETH_CONN_CM_HDR 0 +#define CAM_LINE_SIZE sizeof(u32) +#define RAM_LINE_SIZE sizeof(u64) +#define REG_SIZE sizeof(u32) + +void ecore_gft_disable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 pf_id) +{ + struct regpair ram_line; + OSAL_MEMSET(&ram_line, 0, sizeof(ram_line)); + + /* disable gft search for PF */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); + + /* Clean ram & cam for next gft session*/ + + /* Zero camline */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); + + /* Zero ramline */ + ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); + +} + + +void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 rfs_cm_hdr_event_id; + + /* Set RFS event ID to be awakened i Tstorm By Prs */ + rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); + rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << + PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); +} + +void ecore_gft_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, + bool ipv6, + enum gft_profile_type profile_type) +{ + u32 reg_val, cam_line, search_non_ip_as_gft; + struct regpair ram_line = { 0 }; + + if (!ipv6 && !ipv4) + DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n"); + if (!tcp && !udp) + DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n"); + if (profile_type >= MAX_GFT_PROFILE_TYPE) + DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n"); + + /* Set RFS event ID to be awakened i Tstorm By Prs */ + reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); + + /* Do not load context only cid in PRS on match. */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); + + /* Do not use tenant ID exist bit for gft search*/ + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); + + /* Set Cam */ + cam_line = 0; + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); + + /* Filters are per PF!! */ + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, + GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + + if (!(tcp && udp)) { + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); + if (tcp) + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_TCP_PROTOCOL); + else + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_UDP_PROTOCOL); + } + + if (!(ipv4 && ipv6)) { + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); + if (ipv4) + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV4); + else + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV6); + } + + /* Write characteristics to cam */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, + cam_line); + cam_line = ecore_rd(p_hwfn, p_ptt, + PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); + + /* Write line to RAM - compare to filter 4 tuple */ + + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + + /* Tunnel type */ + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { + SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { + SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { + SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; + } + + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, + search_non_ip_as_gft); + ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); + + /* Set default profile so that no filter match will happen */ + ram_line.lo = 0xffffffff; + ram_line.hi = 0x3ff; + ecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH, + sizeof(ram_line) / REG_SIZE); + + /* Enable gft search */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); +} + + +#endif /* UNUSED_HSI_FUNC */ + +/* Configure VF zone size mode */ +void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 mode, + bool runtime_init) +{ + u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG; + u32 msdm_vf_offset_mask; + + if (mode == VF_ZONE_SIZE_MODE_DOUBLE) + msdm_vf_size_log += 1; + else if (mode == VF_ZONE_SIZE_MODE_QUAD) + msdm_vf_size_log += 2; + + msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1; + + if (runtime_init) { + STORE_RT_REG(p_hwfn, + PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, + msdm_vf_size_log); + STORE_RT_REG(p_hwfn, + PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, + msdm_vf_offset_mask); + } else { + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log); + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask); + } +} + +/* Get mstorm statistics for offset by VF zone size mode */ +u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, + u16 stat_cnt_id, + u16 vf_zone_size_mode) +{ + u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id); + + if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && + (stat_cnt_id > MAX_NUM_PFS)) { + if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) + offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * + (stat_cnt_id - MAX_NUM_PFS); + else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) + offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * + (stat_cnt_id - MAX_NUM_PFS); + } + + return offset; +} + +/* Get mstorm VF producer offset by VF zone size mode */ +u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, + u8 vf_id, + u8 vf_queue_id, + u16 vf_zone_size_mode) +{ + u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id); + + if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) { + if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) + offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * + vf_id; + else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) + offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * + vf_id; + } + + return offset; +} + +#ifndef LINUX_REMOVE +#define CRC8_INIT_VALUE 0xFF +#endif +static u8 cdu_crc8_table[CRC8_TABLE_SIZE]; + +/* Calculate and return CDU validation byte per connection type / region / + * cid + */ +static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn, + u8 conn_type, u8 region, u32 cid) +{ + static u8 crc8_table_valid; /*automatically initialized to 0*/ + u8 crc, validation_byte = 0; + u32 validation_string = 0; + u32 data_to_crc; + + if (crc8_table_valid == 0) { + OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07); + crc8_table_valid = 1; + } + + /* + * The CRC is calculated on the String-to-compress: + * [31:8] = {CID[31:20],CID[11:0]} + * [7:4] = Region + * [3:0] = Type + */ +#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \ + CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) + validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); +#endif + +#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \ + CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) + validation_string |= ((region & 0xF) << 4); +#endif + +#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \ + CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) + validation_string |= (conn_type & 0xF); +#endif + /* Convert to big-endian and calculate CRC8*/ + data_to_crc = OSAL_BE32_TO_CPU(validation_string); + + crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), + CRC8_INIT_VALUE); + + /* The validation byte [7:0] is composed: + * for type A validation + * [7] = active configuration bit + * [6:0] = crc[6:0] + * + * for type B validation + * [7] = active configuration bit + * [6:3] = connection_type[3:0] + * [2:0] = crc[2:0] + */ + validation_byte |= ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> + CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; + +#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \ + CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) + validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); +#else + validation_byte |= crc & 0x7F; +#endif + return validation_byte; +} + +/* Calcualte and set validation bytes for session context */ +void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn, + void *p_ctx_mem, u16 ctx_size, + u8 ctx_type, u32 cid) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + + p_ctx = (u8 *)p_ctx_mem; + + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + OSAL_MEMSET(p_ctx, 0, ctx_size); + + *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid); + *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid); + *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid); +} + +/* Calcualte and set validation bytes for task context */ +void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid) +{ + u8 *p_ctx, *region1_val_ptr; + + p_ctx = (u8 *)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + OSAL_MEMSET(p_ctx, 0, ctx_size); + + *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 1, + tid); +} + +/* Memset session context to 0 while preserving validation bytes */ +void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, + u32 ctx_size, u8 ctx_type) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + u8 x_val, t_val, u_val; + + p_ctx = (u8 *)p_ctx_mem; + + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + x_val = *x_val_ptr; + t_val = *t_val_ptr; + u_val = *u_val_ptr; + + OSAL_MEMSET(p_ctx, 0, ctx_size); + + *x_val_ptr = x_val; + *t_val_ptr = t_val; + *u_val_ptr = u_val; +} + +/* Memset task context to 0 while preserving validation bytes */ +void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem, + u32 ctx_size, u8 ctx_type) +{ + u8 *p_ctx, *region1_val_ptr; + u8 region1_val; + + p_ctx = (u8 *)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + region1_val = *region1_val_ptr; + + OSAL_MEMSET(p_ctx, 0, ctx_size); + + *region1_val_ptr = region1_val; +} + +/* Enable and configure context validation */ +void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 ctx_validation; + + /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ + ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 24; + ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); + + /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ + ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8; + ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); + + /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ + ctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8; + ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); +} + +#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) +#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) + +static u32 ecore_get_overlay_addr_ram_addr(struct ecore_hwfn *p_hwfn, + u8 storm_id) +{ + switch (storm_id) { + case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_OVERLAY_BUF_ADDR_OFFSET; + case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_OVERLAY_BUF_ADDR_OFFSET; + + default: return 0; + } +} + +struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn, + const u32 *const fw_overlay_in_buf, + u32 buf_size_in_bytes) +{ + u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; + struct phys_mem_desc *allocated_mem; + + if (!buf_size) + return OSAL_NULL; + + allocated_mem = (struct phys_mem_desc *)OSAL_ZALLOC(p_hwfn->p_dev, + GFP_KERNEL, + NUM_STORMS * + sizeof(struct phys_mem_desc)); + if (!allocated_mem) + return OSAL_NULL; + + OSAL_MEMSET(allocated_mem, 0, NUM_STORMS * + sizeof(struct phys_mem_desc)); + + /* For each Storm, set physical address in RAM */ + while (buf_offset < buf_size) { + struct phys_mem_desc *storm_mem_desc; + struct fw_overlay_buf_hdr *hdr; + u32 storm_buf_size; + u8 storm_id; + + hdr = + (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; + storm_buf_size = GET_FIELD(hdr->data, + FW_OVERLAY_BUF_HDR_BUF_SIZE); + storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + storm_mem_desc = allocated_mem + storm_id; + storm_mem_desc->size = storm_buf_size * sizeof(u32); + + /* Allocate physical memory for Storm's overlays buffer */ + storm_mem_desc->virt_addr = + OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &storm_mem_desc->phys_addr, + storm_mem_desc->size); + if (!storm_mem_desc->virt_addr) + break; + + /* Skip overlays buffer header */ + buf_offset += OVERLAY_HDR_SIZE_DWORDS; + + /* Copy Storm's overlays buffer to allocated memory */ + OSAL_MEMCPY(storm_mem_desc->virt_addr, + &fw_overlay_in_buf[buf_offset], + storm_mem_desc->size); + + /* Advance to next Storm */ + buf_offset += storm_buf_size; + } + + /* If memory allocation has failed, free all allocated memory */ + if (buf_offset < buf_size) { + ecore_fw_overlay_mem_free(p_hwfn, allocated_mem); + return OSAL_NULL; + } + + return allocated_mem; +} + +void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)fw_overlay_mem + storm_id; + u32 ram_addr, i; + + /* Skip Storms with no FW overlays */ + if (!storm_mem_desc->virt_addr) + continue; + + /* Calculate overlay RAM GRC address of current PF */ + ram_addr = ecore_get_overlay_addr_ram_addr(p_hwfn, storm_id) + + sizeof(dma_addr_t) * p_hwfn->rel_pf_id; + + /* Write Storm's overlay physical address to RAM */ + for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) + ecore_wr(p_hwfn, p_ptt, ram_addr, + ((u32 *)&storm_mem_desc->phys_addr)[i]); + } +} + +void ecore_fw_overlay_mem_free(struct ecore_hwfn *p_hwfn, + struct phys_mem_desc *fw_overlay_mem) +{ + u8 storm_id; + + if (!fw_overlay_mem) + return; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)fw_overlay_mem + storm_id; + + /* Free Storm's physical memory */ + if (storm_mem_desc->virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + storm_mem_desc->virt_addr, + storm_mem_desc->phys_addr, + storm_mem_desc->size); + } + + /* Free allocated virtual memory */ + OSAL_FREE(p_hwfn->p_dev, fw_overlay_mem); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h new file mode 100644 index 000000000..912451662 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.h @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _INIT_FW_FUNCS_H +#define _INIT_FW_FUNCS_H +#include "ecore_hsi_common.h" +#include "ecore_hsi_eth.h" + +/* Physical memory descriptor */ +struct phys_mem_desc { + dma_addr_t phys_addr; + void *virt_addr; + u32 size; /* In bytes */ +}; + +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) \ + ((tc) == PURE_LB_TC ? NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + (port) : \ + (port) * (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; + +/** + * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes + * + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. + * + * @param p_hwfn - HW device data + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param num_pf_pqs - number of PQs used by this PF + * @param num_vf_pqs - number of PQs used by VFs of this PF + * + * @return The required host memory size in 4KB units. + */ +u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 num_pf_pqs, + u16 num_vf_pqs); + +/** + * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine + * phase + * + * @param p_hwfn + * @param max_ports_per_engine - max number of ports per engine in HW + * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param pf_rl_en - enable per-PF rate limiters + * @param pf_wfq_en - enable per-PF WFQ + * @param global_rl_en - enable global rate limiters + * @param vport_wfq_en - enable per-VPORT WFQ + * @param port_params - array with parameters for each port. + * @param global_rl_params - array with parameters for each global RL. + * If OSAL_NULL, global RLs are not configured. + * + * @return 0 on success, -1 on error. + */ +int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + bool pf_rl_en, + bool pf_wfq_en, + bool global_rl_en, + bool vport_wfq_en, + struct init_qm_port_params port_params[MAX_NUM_PORTS], + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]); + +/** + * @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param is_pf_loading - indicates if the PF is currently loading, + * i.e. it has no allocated QM resources. + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param start_pq - first Tx PQ ID associated with this PF + * @param num_pf_pqs - number of Tx PQs associated with this PF + * (non-VF) + * @param num_vf_pqs - number of Tx PQs associated with a VF + * @param start_vport - first VPORT ID associated with this PF + * @param num_vports - number of VPORTs associated with this PF + * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must + * be 0. otherwise, the weight must be non-zero. + * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't + * configure. ignored if PF RL is globally disabled. + * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for + * each Tx PQ associated with the specified PF. + * @param vport_params - array of size num_vports with parameters for each + * associated VPORT. + * + * @return 0 on success, -1 on error. + */ +int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_pf_loading, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u16 start_vport, + u16 num_vports, + u16 pf_wfq, + u32 pf_rl, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params); + +/** + * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_wfq - WFQ weight. Must be non-zero. + * + * @return 0 on success, -1 on error. + */ +int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 pf_id, + u16 pf_wfq); + +/** + * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 pf_id, + u32 pf_rl); + +/** + * @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param first_tx_pq_id- An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * ecore_qm_pf_rt_init + * @param wfq - WFQ weight. Must be non-zero. + * + * @return 0 on success, -1 on error. + */ +int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], + u16 wfq); + +/** + * @brief ecore_init_global_rl - Initializes the rate limit of the specified + * rate limiter. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param rl_id - RL ID + * @param rate_limit - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int ecore_init_global_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rl_id, + u32 rate_limit); + +/** + * @brief ecore_init_vport_rl - Initializes the rate limit of the specified + * VPORT. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param vport_id - VPORT ID + * @param vport_rl - rate limit in Mb/sec units + * @param link_speed - link speed in Mbps. + * + * @return 0 on success, -1 on error. + */ +int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vport_id, + u32 vport_rl, + u32 link_speed); + +/** + * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param is_release_cmd - true for release, false for stop. + * @param is_tx_pq - true for Tx PQs, false for Other PQs. + * @param start_pq - first PQ ID to stop + * @param num_pqs - Number of PQs to stop, starting from start_pq. + * + * @return bool, true if successful, false if timeout occurred while waiting + * for QM command done. + */ +bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, + u16 start_pq, + u16 num_pqs); +#ifndef UNUSED_HSI_FUNC + +/** + * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the NIG ETS initialization requirements. + * @param is_lb - if set, the loopback port arbiter is initialized, otherwise + * the physical port arbiter is initialized. The pure-LB TC + * requirements are ignored when is_lb is cleared. + */ +void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_ets_req *req, + bool is_lb); + +/** + * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs + * + * Based on global and per-TC rate requirements + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the NIG LB RLs initialization requirements. + */ +void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_lb_rl_req *req); +#endif /* UNUSED_HSI_FUNC */ + +/** + * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map. + * + * Assumes valid arguments. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - required mapping from prioirties to TCs. + */ +void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_pri_tc_map_req *req); + +#ifndef UNUSED_HSI_FUNC +/** + * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the PRS ETS initialization requirements. + */ +void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_ets_req *req); +#endif /* UNUSED_HSI_FUNC */ + +#ifndef UNUSED_HSI_FUNC +/** + * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the BRB RAM initialization requirements. + */ +void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_brb_ram_req *req); +#endif /* UNUSED_HSI_FUNC */ + +/** + * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing + * + * @param p_ptt - ptt window used for writing the registers. + * @param enable - VXLAN no L2 enable flag. + */ +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable); + +#ifndef UNUSED_HSI_FUNC +/** + * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to + * input ethType should Be called + * once per port. + * + * @param p_hwfn - HW device data + * @param ethType - etherType to configure + */ +void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, + u32 ethType); +#endif /* UNUSED_HSI_FUNC */ + +/** + * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp + * port. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - vxlan destination udp port. + */ +void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 dest_port); + +/** + * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param vxlan_enable - vxlan enable flag. + */ +void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool vxlan_enable); + +/** + * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param eth_gre_enable - eth GRE enable enable flag. + * @param ip_gre_enable - IP GRE enable enable flag. + */ +void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_gre_enable, + bool ip_gre_enable); + +/** + * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination + * udp port + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - geneve destination udp port. + */ +void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 dest_port); + +/** + * @brief ecore_set_geneve_enable - enable or disable GRE tunnel in HW + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param eth_geneve_enable - eth GENEVE enable enable flag. + * @param ip_geneve_enable - IP GENEVE enable enable flag. + */ +void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_geneve_enable, + bool ip_geneve_enable); +#ifndef UNUSED_HSI_FUNC + +/** +* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header +* +* @param p_ptt - ptt window used for writing the registers. +*/ +void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief ecore_gft_disable - Disable GFT + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to disable GFT. + */ +void ecore_gft_disable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 pf_id); + +/** + * @brief ecore_gft_config - Enable and configure HW for GFT +* + * @param p_hwfn - HW device data +* @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to enable GFT. +* @param tcp - set profile tcp packets. +* @param udp - set profile udp packet. +* @param ipv4 - set profile ipv4 packet. +* @param ipv6 - set profile ipv6 packet. + * @param profile_type - define packet same fields. Use enum gft_profile_type. +*/ +void ecore_gft_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, + bool ipv6, + enum gft_profile_type profile_type); +#endif /* UNUSED_HSI_FUNC */ + +/** +* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be +* used before first ETH queue started. +* + * @param p_hwfn - HW device data +* @param p_ptt - ptt window used for writing the registers. Don't care + * if runtime_init used. +* @param mode - VF zone size mode. Use enum vf_zone_size_mode. + * @param runtime_init - Set 1 to init runtime registers in engine phase. + * Set 0 if VF zone size mode configured after engine + * phase. +*/ +void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt + *p_ptt, u16 mode, bool runtime_init); + +/** + * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by + * VF zone size mode. +* + * @param p_hwfn - HW device data +* @param stat_cnt_id - statistic counter id +* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode. +*/ +u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, + u16 stat_cnt_id, u16 vf_zone_size_mode); + +/** + * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone + * size mode. +* + * @param p_hwfn - HW device data +* @param vf_id - vf id. +* @param vf_queue_id - per VF rx queue id. +* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode. +*/ +u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 + vf_queue_id, u16 vf_zone_size_mode); +/** + * @brief ecore_enable_context_validation - Enable and configure context + * validation. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + */ +void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +/** + * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for + * session context. + * + * @param p_hwfn - HW device data + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param cid - context cid. + */ +void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn, + void *p_ctx_mem, + u16 ctx_size, + u8 ctx_type, + u32 cid); + +/** + * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task + * context. + * + * @param p_hwfn - HW device data + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param tid - context tid. + */ +void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, + void *p_ctx_mem, + u16 ctx_size, + u8 ctx_type, + u32 tid); + +/** + * @brief ecore_memset_session_ctx - Memset session context to 0 while + * preserving validation bytes. + * + * @param p_hwfn - HW device data + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, + void *p_ctx_mem, + u32 ctx_size, + u8 ctx_type); + +/** + * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving + * validation bytes. + * + * @param p_hwfn - HW device data + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, + void *p_ctx_mem, + u32 ctx_size, + u8 ctx_type); + + +/******************************************************************************* + * File name : rdma_init.h + * Author : Michael Shteinbok + ******************************************************************************* + ******************************************************************************* + * Description: + * RDMA HSI functions header + * + ******************************************************************************* + * Notes: This is the input to the auto generated file drv_init_fw_funcs.h + * + ******************************************************************************* + */ +#define NUM_STORMS 6 + + + +/** + * @brief ecore_set_rdma_error_level - Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param assert_level - An array of assert levels for each storm. + */ +void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); + +/** + * @brief ecore_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory + * + * @param p_hwfn - HW device data + * @param fw_overlay_in_buf - the input FW overlay buffer. + * @param buf_size - the size of the input FW overlay buffer in bytes. + * must be aligned to dwords. + * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * + * @return a pointer to the allocated overlays memory, or OSAL_NULL in case of + * failures. + */ +struct phys_mem_desc *ecore_fw_overlay_mem_alloc(struct ecore_hwfn *p_hwfn, + const u32 *const fw_overlay_in_buf, + u32 buf_size_in_bytes); + +/** + * @brief ecore_fw_overlay_init_ram - Initializes the FW overlay RAM. + * + * @param p_hwfn - HW device data. + * @param p_ptt - ptt window used for writing the registers. + * @param fw_overlay_mem - the allocated FW overlay memory. + */ +void ecore_fw_overlay_init_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem); + +/** + * @brief ecore_fw_overlay_mem_free - Frees the FW overlay memory. + * + * @param p_hwfn - HW device data. + * @param fw_overlay_mem - the allocated FW overlay memory to free. + */ +void ecore_fw_overlay_mem_free(struct ecore_hwfn *p_hwfn, + struct phys_mem_desc *fw_overlay_mem); + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c new file mode 100644 index 000000000..ea964ea2f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.c @@ -0,0 +1,585 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +/* include the precompiled configuration values - only once */ +#include "bcm_osal.h" +#include "ecore_hsi_common.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_status.h" +#include "ecore_rt_defs.h" +#include "ecore_init_fw_funcs.h" + +#include "ecore_iro_values.h" +#include "ecore_sriov.h" +#include "reg_addr.h" +#include "ecore_init_ops.h" + +#define ECORE_INIT_MAX_POLL_COUNT 100 +#define ECORE_INIT_POLL_PERIOD_US 500 + +void ecore_init_iro_array(struct ecore_dev *p_dev) +{ + p_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; +} + +/* Runtime configuration helpers */ +void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) +{ + int i; + + for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) + p_hwfn->rt_data.b_valid[i] = false; +} + +void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val) +{ + if (rt_offset >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n", + val, rt_offset, RUNTIME_ARRAY_SIZE); + return; + } + + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; +} + +void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, + u32 rt_offset, u32 *p_val, osal_size_t size) +{ + osal_size_t i; + + if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n", + rt_offset, (u32)(rt_offset + size - 1), + RUNTIME_ARRAY_SIZE); + return; + } + + for (i = 0; i < size / sizeof(u32); i++) { + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; + } +} + +static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, + u16 rt_offset, + u16 size, bool b_must_dmae) +{ + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + u16 i, segment; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ + for (i = 0; i < size; i++) { + if (!p_valid[i]) + continue; + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)(p_init_val + i), + addr + (i << 2), segment, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) + return rc; + + /* Jump over the entire segment, including invalid entry */ + i += segment; + } + + return rc; +} + +enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_rt_data *rt_data = &p_hwfn->rt_data; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(bool) * RUNTIME_ARRAY_SIZE); + if (!rt_data->b_valid) + return ECORE_NOMEM; + + rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(u32) * RUNTIME_ARRAY_SIZE); + if (!rt_data->init_val) { + OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); + return ECORE_NOMEM; + } + + return ECORE_SUCCESS; +} + +void ecore_init_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); +} + +static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, + u32 dmae_data_offset, + u32 size, const u32 *p_buf, + bool b_must_dmae, + bool b_can_dmae) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Perform DMAE only for lengthy enough sections or for wide-bus */ +#ifndef ASIC_ONLY + if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || + !b_can_dmae || (!b_must_dmae && (size < 16))) { +#else + if (!b_can_dmae || (!b_must_dmae && (size < 16))) { +#endif + const u32 *data = p_buf + dmae_data_offset; + u32 i; + + for (i = 0; i < size; i++) + ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); + } else { + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)(p_buf + + dmae_data_offset), + addr, size, + OSAL_NULL /* default parameters */); + } + + return rc; +} + +static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u32 fill_count) +{ + static u32 zero_buffer[DMAE_MAX_RW_SIZE]; + struct dmae_params params; + + OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, DMAE_PARAMS_RW_REPL_SRC, 0x1); + return ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)&zero_buffer[0], + addr, fill_count, ¶ms); +} + +static void ecore_init_fill(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u32 fill, u32 fill_count) +{ + u32 i; + + for (i = 0; i < fill_count; i++, addr += sizeof(u32)) + ecore_wr(p_hwfn, p_ptt, addr, fill); +} + +static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_write_op *cmd, + bool b_must_dmae, + bool b_can_dmae) +{ + u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); + u32 data = OSAL_LE32_TO_CPU(cmd->data); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; +#ifdef CONFIG_ECORE_ZIPPED_FW + u32 offset, output_len, input_len, max_size; +#endif + struct ecore_dev *p_dev = p_hwfn->p_dev; + union init_array_hdr *hdr; + const u32 *array_data; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 size; + + array_data = p_dev->fw_data->arr_data; + + hdr = (union init_array_hdr *) + (uintptr_t)(array_data + dmae_array_offset); + data = OSAL_LE32_TO_CPU(hdr->raw.data); + switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { + case INIT_ARR_ZIPPED: +#ifdef CONFIG_ECORE_ZIPPED_FW + offset = dmae_array_offset + 1; + input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); + max_size = MAX_ZIPPED_SIZE * 4; + OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); + + output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, + (u8 *)(uintptr_t)&array_data[offset], + max_size, + (u8 *)p_hwfn->unzip_buf); + if (output_len) { + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, + output_len, + p_hwfn->unzip_buf, + b_must_dmae, b_can_dmae); + } else { + DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n"); + rc = ECORE_INVAL; + } +#else + DP_NOTICE(p_hwfn, true, + "Using zipped firmware without config enabled\n"); + rc = ECORE_INVAL; +#endif + break; + case INIT_ARR_PATTERN: + { + u32 repeats = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_REPETITIONS); + u32 i; + + size = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); + + for (i = 0; i < repeats; i++, addr += size << 2) { + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + + 1, size, array_data, + b_must_dmae, + b_can_dmae); + if (rc) + break; + } + break; + } + case INIT_ARR_STANDARD: + size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + break; + } + + return rc; +} + +/* init_ops write command */ +static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_write_op *p_cmd, + bool b_can_dmae) +{ + u32 data = OSAL_LE32_TO_CPU(p_cmd->data); + bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Sanitize */ + if (b_must_dmae && !b_can_dmae) { + DP_NOTICE(p_hwfn, true, + "Need to write to %08x for Wide-bus but DMAE isn't" + " allowed\n", + addr); + return ECORE_INVAL; + } + + switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { + case INIT_SRC_INLINE: + data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); + ecore_wr(p_hwfn, p_ptt, addr, data); + break; + case INIT_SRC_ZEROS: + data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); + if (b_must_dmae || (b_can_dmae && (data >= 64))) + rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data); + else + ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); + break; + case INIT_SRC_ARRAY: + rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, + b_must_dmae, b_can_dmae); + break; + case INIT_SRC_RUNTIME: + rc = ecore_init_rt(p_hwfn, p_ptt, addr, + OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), + OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), + b_must_dmae); + break; + } + + return rc; +} + +static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) +{ + return (val == expected_val); +} + +static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) +{ + return (val & expected_val) == expected_val; +} + +static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) +{ + return (val | expected_val) > 0; +} + +/* init_ops read/poll commands */ +static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_read_op *cmd) +{ + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = ECORE_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = OSAL_LE32_TO_CPU(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + delay *= 100; +#endif + + val = ecore_rd(p_hwfn, p_ptt, addr); + + if (poll == INIT_POLL_NONE) + return; + + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } + + data = OSAL_LE32_TO_CPU(cmd->expected_val); + for (i = 0; + i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) { + OSAL_UDELAY(delay); + val = ecore_rd(p_hwfn, p_ptt, addr); + } + + if (i == ECORE_INIT_MAX_POLL_COUNT) + DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", + addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, + OSAL_LE32_TO_CPU(cmd->op_data)); +} + +/* init_ops callbacks entry point */ +static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_callback_op *p_cmd) +{ + enum _ecore_status_t rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return ECORE_INVAL; + } + + return rc; +} + +static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, + u16 *p_offset, int modes) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 arg1, arg2, tree_val; + const u8 *modes_tree; + + modes_tree = p_dev->fw_data->modes_tree_buf; + tree_val = modes_tree[(*p_offset)++]; + switch (tree_val) { + case INIT_MODE_OP_NOT: + return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; + case INIT_MODE_OP_OR: + arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 | arg2; + case INIT_MODE_OP_AND: + arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 & arg2; + default: + tree_val -= MAX_INIT_MODE_OPS; + return (modes & (1 << tree_val)) ? 1 : 0; + } +} + +static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, + struct init_if_mode_op *p_cmd, int modes) +{ + u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); + + if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) + return 0; + else + return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), + INIT_IF_MODE_OP_CMD_OFFSET); +} + +static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd, + u32 phase, u32 phase_id) +{ + u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); + u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data); + + if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && + (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || + GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) + return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); + else + return 0; +} + +enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int phase, int phase_id, int modes) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + bool b_dmae = (phase != PHASE_ENGINE); + u32 cmd_num, num_init_ops; + union init_op *init; + enum _ecore_status_t rc = ECORE_SUCCESS; + + num_init_ops = p_dev->fw_data->init_ops_size; + init = p_dev->fw_data->init_ops; + +#ifdef CONFIG_ECORE_ZIPPED_FW + p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, + MAX_ZIPPED_SIZE * 4); + if (!p_hwfn->unzip_buf) { + DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); + return ECORE_NOMEM; + } +#endif + + for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { + union init_op *cmd = &init[cmd_num]; + u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); + + switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { + case INIT_OP_WRITE: + rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, + b_dmae); + break; + + case INIT_OP_READ: + ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); + break; + + case INIT_OP_IF_MODE: + cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, + modes); + break; + case INIT_OP_IF_PHASE: + cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, + phase_id); + break; + case INIT_OP_DELAY: + /* ecore_init_run is always invoked from + * sleep-able context + */ + OSAL_UDELAY(cmd->delay.delay); + break; + + case INIT_OP_CALLBACK: + rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + if (phase == PHASE_ENGINE && + cmd->callback.callback_id == DMAE_READY_CB) + b_dmae = true; + break; + } + + if (rc) + break; + } +#ifdef CONFIG_ECORE_ZIPPED_FW + OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); +#endif + return rc; +} + +enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, +#ifdef CONFIG_ECORE_BINARY_FW + const u8 *fw_data) +#else + const u8 OSAL_UNUSED * fw_data) +#endif +{ + struct ecore_fw_data *fw = p_dev->fw_data; + +#ifdef CONFIG_ECORE_BINARY_FW + struct bin_buffer_hdr *buf_hdr; + u32 offset, len; + + if (!fw_data) { + DP_NOTICE(p_dev, true, "Invalid fw data\n"); + return ECORE_INVAL; + } + + buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data; + + offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; + fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_CMD].offset; + fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_VAL].offset; + fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; + fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset)); + len = buf_hdr[BIN_BUF_INIT_CMD].length; + fw->init_ops_size = len / sizeof(struct init_raw_op); + offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset; + fw->fw_overlays = (u32 *)(fw_data + offset); + len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length; + fw->fw_overlays_len = len; +#else + fw->init_ops = (union init_op *)init_ops; + fw->arr_data = (u32 *)init_val; + fw->modes_tree_buf = (u8 *)modes_tree_buf; + fw->init_ops_size = init_ops_size; + fw->fw_overlays = fw_overlays; + fw->fw_overlays_len = sizeof(fw_overlays); +#endif + + return ECORE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h new file mode 100644 index 000000000..0cbf293b3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_init_ops.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_INIT_OPS__ +#define __ECORE_INIT_OPS__ + +#include "ecore.h" + +/** + * @brief ecore_init_iro_array - init iro_arr. + * + * + * @param p_dev + */ +void ecore_init_iro_array(struct ecore_dev *p_dev); + +/** + * @brief ecore_init_run - Run the init-sequence. + * + * + * @param p_hwfn + * @param p_ptt + * @param phase + * @param phase_id + * @param modes + * @return _ecore_status_t + */ +enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int phase, + int phase_id, + int modes); + +/** + * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * + * + * @param p_hwfn + * + * @return _ecore_status_t + */ +enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_init_hwfn_deallocate + * + * + * @param p_hwfn + */ +void ecore_init_free(struct ecore_hwfn *p_hwfn); + + +/** + * @brief ecore_init_clear_rt_data - Clears the runtime init array. + * + * + * @param p_hwfn + */ +void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array. + * + * + * @param p_hwfn + * @param rt_offset + * @param val + */ +void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, + u32 rt_offset, + u32 val); + +#define STORE_RT_REG(hwfn, offset, val) \ + ecore_init_store_rt_reg(hwfn, offset, val) + +#define OVERWRITE_RT_REG(hwfn, offset, val) \ + ecore_init_store_rt_reg(hwfn, offset, val) + +/** +* @brief +* +* +* @param p_hwfn +* @param rt_offset +* @param val +* @param size +*/ + +void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, + u32 rt_offset, + u32 *val, + osal_size_t size); + +#define STORE_RT_REG_AGG(hwfn, offset, val) \ + ecore_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val)) + +#endif /* __ECORE_INIT_OPS__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c new file mode 100644 index 000000000..4207b1853 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.c @@ -0,0 +1,2773 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_spq.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_init_ops.h" +#include "ecore_rt_defs.h" +#include "ecore_int.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" +#include "ecore_hw_defs.h" +#include "ecore_hsi_common.h" +#include "ecore_mcp.h" + +struct ecore_pi_info { + ecore_int_comp_cb_t comp_cb; + void *cookie; /* Will be sent to the compl cb function */ +}; + +struct ecore_sb_sp_info { + struct ecore_sb_info sb_info; + + /* Per protocol index data */ + struct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB]; + osal_size_t pi_info_arr_size; +}; + +enum ecore_attention_type { + ECORE_ATTN_TYPE_ATTN, + ECORE_ATTN_TYPE_PARITY, +}; + +#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) + +struct aeu_invert_reg_bit { + char bit_name[30]; + +#define ATTENTION_PARITY (1 << 0) + +#define ATTENTION_LENGTH_MASK (0x00000ff0) +#define ATTENTION_LENGTH_SHIFT (4) +#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ + ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) +#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ + ATTENTION_PARITY) + +/* Multiple bits start with this offset */ +#define ATTENTION_OFFSET_MASK (0x000ff000) +#define ATTENTION_OFFSET_SHIFT (12) + +#define ATTENTION_BB_MASK (0xf) +#define ATTENTION_BB_SHIFT (20) +#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) +#define ATTENTION_BB_DIFFERENT (1 << 24) + +#define ATTENTION_CLEAR_ENABLE (1 << 28) + unsigned int flags; + + /* Callback to call if attention will be triggered */ + enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); + + enum block_id block_index; +}; + +struct aeu_invert_reg { + struct aeu_invert_reg_bit bits[32]; +}; + +#define MAX_ATTN_GRPS (8) +#define NUM_ATTN_REGS (9) + +static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); + + DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); + + return ECORE_SUCCESS; +} + +#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) +#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) +#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) +#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) +#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) +#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) +#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) +#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) +#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) +#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) +#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) +static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp = + ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_VALID); + + /* Disabled VF access */ + if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { + u32 addr, data; + + addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); + data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_DATA); + DP_INFO(p_hwfn->p_dev, + "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" + " Write [0x%02x] Addr [0x%08x]\n", + (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) + >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), + (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) + >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> + ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> + ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> + ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), + addr); + } + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_VALID); + if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { + u32 addr, data, length; + + addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_ADDRESS); + data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_DATA); + length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_LENGTH); + + DP_INFO(p_hwfn->p_dev, + "Incorrect access to %08x of length %08x - PF [%02x]" + " VF [%04x] [valid %02x] client [%02x] write [%02x]" + " Byte-Enable [%04x] [%08x]\n", + addr, length, + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), + data); + } + + /* TODO - We know 'some' of these are legal due to virtualization, + * but is it true for all of them? + */ + return ECORE_SUCCESS; +} + +/* Register GRC_REG_TIMEOUT_ATTN_ACCESS_VALID */ +#define ECORE_GRC_ATTENTION_VALID_BIT_MASK (0x1) +#define ECORE_GRC_ATTENTION_VALID_BIT_SHIFT (0) + +#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) +#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) +#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) +#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) +#define ECORE_GRC_ATTENTION_PF_MASK (0xf) +#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) +#define ECORE_GRC_ATTENTION_VF_SHIFT (4) +#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) +#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) +#define ECORE_GRC_ATTENTION_PRIV_VF (0) +static const char *grc_timeout_attn_master_to_str(u8 master) +{ + switch (master) { + case 1: + return "PXP"; + case 2: + return "MCP"; + case 3: + return "MSDM"; + case 4: + return "PSDM"; + case 5: + return "YSDM"; + case 6: + return "USDM"; + case 7: + return "TSDM"; + case 8: + return "XSDM"; + case 9: + return "DBU"; + case 10: + return "DMAE"; + default: + return "Unknown"; + } +} + +static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 tmp, tmp2; + + /* We've already cleared the timeout interrupt register, so we learn + * of interrupts via the validity register. If it is not a timeout do + * nothing. It is too late at this stage to differentiate spurious + * interrupt from fatal grc attention. + */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); + if (!(GET_FIELD(tmp, ECORE_GRC_ATTENTION_VALID_BIT))) + goto out; + + /* Read the GRC timeout information */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); + tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); + + DP_NOTICE(p_hwfn->p_dev, false, + "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", + tmp2, tmp, + (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" + : "Read from", + (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, + grc_timeout_attn_master_to_str( + (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> + ECORE_GRC_ATTENTION_MASTER_SHIFT), + (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), + (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> + ECORE_GRC_ATTENTION_PRIV_SHIFT) == + ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", + (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> + ECORE_GRC_ATTENTION_VF_SHIFT); + + /* Clean the validity bit */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); +out: + return rc; +} + +#define ECORE_PGLUE_ATTENTION_VALID (1 << 29) +#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) +#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) +#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) +#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) +#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) +#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) +#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) +#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) +#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) +#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) +#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) +#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) + +enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_hw_init) +{ + u32 tmp; + char str[512] = {0}; + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_63_32); + details = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS); + OSAL_SNPRINTF(str, 512, + "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), + tmp, + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? + 1 : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? + 1 : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? + 1 : 0)); + if (is_hw_init) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); + else + DP_NOTICE(p_hwfn, false, "%s", str); + } + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_63_32); + details = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS); + + DP_NOTICE(p_hwfn, false, + "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), + tmp, + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? + 1 : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? + 1 : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? + 1 : 0)); + } + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) + DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { + u32 addr_hi, addr_lo; + + addr_lo = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_NOTICE(p_hwfn, false, + "ICPL erorr - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { + u32 addr_hi, addr_lo, details; + + addr_lo = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); + details = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS); + + DP_NOTICE(p_hwfn, false, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); + } + + /* Clear the indications */ + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) +{ + return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); +} + +static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) +{ + DP_NOTICE(p_hwfn, false, "FW assertion!\n"); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); + + return ECORE_INVAL; +} + +static enum _ecore_status_t +ecore_general_attention_35(struct ecore_hwfn *p_hwfn) +{ + DP_INFO(p_hwfn, "General attention 35!\n"); + + return ECORE_SUCCESS; +} + +#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) + +#define ECORE_DB_REC_COUNT 1000 +#define ECORE_DB_REC_INTERVAL 100 + +static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 count = ECORE_DB_REC_COUNT; + u32 usage = 1; + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interperted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releaseing the overflow sticky indication. + */ + while (count-- && usage) { + usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + OSAL_UDELAY(ECORE_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->p_dev, false, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 overflow; + enum _ecore_status_t rc; + + overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow); + if (!overflow) { + ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE); + return ECORE_SUCCESS; + } + + if (ecore_edpm_enabled(p_hwfn)) { + rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; + } + + /* flush any pedning (e)dpm as they may never arrive */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* release overflow sticky indication (stop silently dropping + * everything) + */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* repeat all last doorbells (doorbell drop recovery) */ + ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; + enum _ecore_status_t rc; + + int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", + int_sts); + + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again + */ + if (!int_sts) + return ECORE_SUCCESS; + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* obtain data about db drop/overflow */ + first_drop_reason = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + ECORE_DORQ_ATTENTION_REASON_MASK; + details = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS); + address = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = ecore_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* log info */ + DP_NOTICE(p_hwfn->p_dev, false, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + rc = ecore_db_rec_handler(p_hwfn, p_ptt); + OSAL_DB_REC_OCCURRED(p_hwfn); + if (rc != ECORE_SUCCESS) + return rc; + + /* clear the doorbell drop details and prepare for next drop */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* mark interrupt as handeld (note: even if drop was due to a + * different reason than overflow we mark as handled) + */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* if there are no indications otherthan drop indications, + * success + */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return ECORE_SUCCESS; + } + + /* some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + + return ECORE_INVAL; +} + +static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { + u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + TM_REG_INT_STS_1); + + if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | + TM_REG_INT_STS_1_PEND_CONN_SCAN)) + return ECORE_INVAL; + + if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | + TM_REG_INT_STS_1_PEND_CONN_SCAN)) + DP_INFO(p_hwfn, + "TM attention on emulation - most likely" + " results of clock-ratios\n"); + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); + val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | + TM_REG_INT_MASK_1_PEND_TASK_SCAN; + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); + + return ECORE_SUCCESS; + } +#endif + + return ECORE_INVAL; +} + +/* Instead of major changes to the data-structure, we have a some 'special' + * identifiers for sources that changed meaning between adapters. + */ +enum aeu_invert_reg_special_type { + AEU_INVERT_REG_SPECIAL_CNIG_0, + AEU_INVERT_REG_SPECIAL_CNIG_1, + AEU_INVERT_REG_SPECIAL_CNIG_2, + AEU_INVERT_REG_SPECIAL_CNIG_3, + AEU_INVERT_REG_SPECIAL_MCP_UMP_TX, + AEU_INVERT_REG_SPECIAL_MCP_SCPAD, + AEU_INVERT_REG_SPECIAL_MAX, +}; + +static struct aeu_invert_reg_bit +aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { + {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, + {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, + {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, + {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, + {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, +}; + +/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ +static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { + { + { /* After Invert 1 */ + {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 2 */ + {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, + BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"SW timers #%d", + (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), + OSAL_NULL, MAX_BLOCK_ID}, + {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + BLOCK_PGLCS}, + } + }, + + { + { /* After Invert 3 */ + {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 4 */ + {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, + ecore_fw_assertion, MAX_BLOCK_ID}, + {"General Attention %d", + (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), + OSAL_NULL, MAX_BLOCK_ID}, + {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, + ecore_general_attention_35, MAX_BLOCK_ID}, + {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), + OSAL_NULL, BLOCK_NWS}, + {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), + OSAL_NULL, BLOCK_NWS}, + {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), + OSAL_NULL, BLOCK_NWM}, + {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), + OSAL_NULL, BLOCK_NWM}, + {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE}, + {"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP}, + {"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS}, + {"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC}, + {"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED}, + {"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN}, + {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, + {"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, + } + }, + + { + { /* After Invert 5 */ + {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, + } + }, + + { + { /* After Invert 6 */ + {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, + } + }, + + { + { /* After Invert 7 */ + {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, + } + }, + + { + { /* After Invert 8 */ + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 9 */ + {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, + MAX_BLOCK_ID}, + {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL, + BLOCK_AVS_WRAP}, + {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL, + BLOCK_AVS_WRAP}, + {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + +}; + +static struct aeu_invert_reg_bit * +ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + if (!ECORE_IS_BB(p_hwfn->p_dev)) + return p_bit; + + if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) + return p_bit; + + return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> + ATTENTION_BB_SHIFT]; +} + +static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & + ATTENTION_PARITY); +} + +#define ATTN_STATE_BITS (0xfff) +#define ATTN_BITS_MASKABLE (0x3ff) +struct ecore_sb_attn_info { + /* Virtual & Physical address of the SB */ + struct atten_status_block *sb_attn; + dma_addr_t sb_phys; + + /* Last seen running index */ + u16 index; + + /* A mask of the AEU bits resulting in a parity error */ + u32 parity_mask[NUM_ATTN_REGS]; + + /* A pointer to the attention description structure */ + struct aeu_invert_reg *p_aeu_desc; + + /* Previously asserted attentions, which are still unasserted */ + u16 known_attn; + + /* Cleanup address for the link's general hw attention */ + u32 mfw_attn_addr; +}; + +static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, + struct ecore_sb_attn_info *p_sb_desc) +{ + u16 rc = 0, index; + + OSAL_MMIOWB(p_hwfn->p_dev); + + index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); + if (p_sb_desc->index != index) { + p_sb_desc->index = index; + rc = ECORE_SB_ATT_IDX; + } + + OSAL_MMIOWB(p_hwfn->p_dev); + + return rc; +} + +/** + * @brief ecore_int_assertion - handles asserted attention bits + * + * @param p_hwfn + * @param asserted_bits newly asserted bits + * @return enum _ecore_status_t + */ +static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, + u16 asserted_bits) +{ + struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 igu_mask; + + /* Mask the source of the attention in the IGU */ + igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", + igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); + igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "inner known ATTN state: 0x%04x --> 0x%04x\n", + sb_attn_sw->known_attn, + sb_attn_sw->known_attn | asserted_bits); + sb_attn_sw->known_attn |= asserted_bits; + + /* Handle MCP events */ + if (asserted_bits & 0x100) { + ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); + /* Clean the MCP attention */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, + sb_attn_sw->mfw_attn_addr, 0); + } + + /* FIXME - this will change once we'll have GOOD gtt definitions */ + DIRECT_REG_WR(p_hwfn, + (u8 OSAL_IOMEM *) p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_SET_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", + asserted_bits); + + return ECORE_SUCCESS; +} + +static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, + enum block_id id, enum dbg_attn_type type, + bool b_clear) +{ + /* @DPDK */ + DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); +} + +/** + * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single + * cause of the attention + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the attention + * @param aeu_en_reg - register offset of the AEU enable reg. which configured + * this bit to this group. + * @param bit_index - index of this bit in the aeu_en_reg + * + * @return enum _ecore_status_t + */ +static enum _ecore_status_t +ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, + const char *p_bit_name, + u32 bitmask) +{ + enum _ecore_status_t rc = ECORE_INVAL; + bool b_fatal = false; + + DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", + p_bit_name, bitmask); + + /* Call callback before clearing the interrupt status */ + if (p_aeu->cb) { + DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", + p_bit_name); + rc = p_aeu->cb(p_hwfn); + } + + if (rc != ECORE_SUCCESS) + b_fatal = true; + + /* Print HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) { + ecore_int_attn_print(p_hwfn, p_aeu->block_index, + ATTN_TYPE_INTERRUPT, !b_fatal); +} + + /* @DPDK */ + /* Reach assertion if attention is fatal */ + if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { +#ifndef ASIC_ONLY + DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev), + "`%s': Fatal attention\n", p_bit_name); +#else + DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", + p_bit_name); +#endif + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); + } + + /* Prevent this Attention from being asserted in the future */ + if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || +#ifndef ASIC_ONLY + CHIP_REV_IS_EMUL(p_hwfn->p_dev) || +#endif + p_hwfn->p_dev->attn_clr_en) { + u32 val; + u32 mask = ~bitmask; + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); + DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", + p_bit_name); + } + + return rc; +} + +/** + * @brief ecore_int_deassertion_parity - handle a single parity AEU source + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the parity + * @param aeu_en_reg - address of the AEU enable register + * @param bit_index + */ +static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, u8 bit_index) +{ + u32 block_id = p_aeu->block_index, mask, val; + + DP_NOTICE(p_hwfn->p_dev, false, + "%s parity attention is set [address 0x%08x, bit %d]\n", + p_aeu->bit_name, aeu_en_reg, bit_index); + + if (block_id != MAX_BLOCK_ID) { + ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); + + /* In A0, there's a single parity bit for several blocks */ + if (block_id == BLOCK_BTB) { + ecore_int_attn_print(p_hwfn, BLOCK_OPTE, + ATTN_TYPE_PARITY, false); + ecore_int_attn_print(p_hwfn, BLOCK_MCP, + ATTN_TYPE_PARITY, false); + } + } + + /* Prevent this parity error from being re-asserted */ + mask = ~(0x1 << bit_index); + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); + DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", + p_aeu->bit_name); +} + +#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \ + (MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4) + +#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \ + (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \ + (group) * 0x4 * NUM_ATTN_REGS) + +/** + * @brief - handles deassertion of previously asserted attentions. + * + * @param p_hwfn + * @param deasserted_bits - newly deasserted bits + * @return enum _ecore_status_t + * + */ +static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, + u16 deasserted_bits) +{ + struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; + u8 i, j, k, bit_idx; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Read the attention registers in the AEU */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_AFTER_INVERT_IGU(i)); + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); + } + + /* Handle parity attentions first */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; + u32 parities; + + aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0); + en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; + + /* Skip register in which no parity bit is currently set */ + if (!parities) + continue; + + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; + + if (ecore_int_is_parity_flag(p_hwfn, p_bit) && + !!(parities & (1 << bit_idx))) + ecore_int_deassertion_parity(p_hwfn, p_bit, + aeu_en, bit_idx); + + bit_idx += ATTENTION_LENGTH(p_bit->flags); + } + } + + /* Find non-parity cause for attention and act */ + for (k = 0; k < MAX_ATTN_GRPS; k++) { + struct aeu_invert_reg_bit *p_aeu; + + /* Handle only groups whose attention is currently deasserted */ + if (!(deasserted_bits & (1 << k))) + continue; + + for (i = 0; i < NUM_ATTN_REGS; i++) { + u32 bits; + + aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k); + en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + bits = aeu_inv_arr[i] & en; + + /* Skip if no bit from this group is currently set */ + if (!bits) + continue; + + /* Find all set bits from current register which belong + * to current group, making them responsible for the + * previous assertion. + */ + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + unsigned long int bitmask; + u8 bit, bit_len; + + /* Need to account bits with changed meaning */ + p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; + + bit = bit_idx; + bit_len = ATTENTION_LENGTH(p_aeu->flags); + if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { + /* Skip Parity */ + bit++; + bit_len--; + } + + /* Find the bits relating to HW-block, then + * shift so they'll become LSB. + */ + bitmask = bits & (((1 << bit_len) - 1) << bit); + bitmask >>= bit; + + if (bitmask) { + u32 flags = p_aeu->flags; + char bit_name[30]; + u8 num; + + num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, + bit_len); + + /* Some bits represent more than a + * a single interrupt. Correctly print + * their name. + */ + if (ATTENTION_LENGTH(flags) > 2 || + ((flags & ATTENTION_PAR_INT) && + ATTENTION_LENGTH(flags) > 1)) + OSAL_SNPRINTF(bit_name, 30, + p_aeu->bit_name, + num); + else + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); + + /* We now need to pass bitmask in its + * correct position. + */ + bitmask <<= bit; + + /* Handle source of the attention */ + ecore_int_deassertion_aeu_bit(p_hwfn, + p_aeu, + aeu_en, + bit_name, + bitmask); + } + + bit_idx += ATTENTION_LENGTH(p_aeu->flags); + } + } + } + + /* Clear IGU indication for the deasserted bits */ + /* FIXME - this will change once we'll have GOOD gtt definitions */ + DIRECT_REG_WR(p_hwfn, + (u8 OSAL_IOMEM *) p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); + + /* Unmask deasserted attentions in IGU */ + aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); + + /* Clear deassertion from inner state */ + sb_attn_sw->known_attn &= ~deasserted_bits; + + return rc; +} + +static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; + struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; + u16 index = 0, asserted_bits, deasserted_bits; + u32 attn_bits = 0, attn_acks = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Read current attention bits/acks - safeguard against attentions + * by guaranting work on a synchronized timeframe + */ + do { + index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); + attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); + attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); + } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); + p_sb_attn->sb_index = index; + + /* Attention / Deassertion are meaningful (and in correct state) + * only when they differ and consistent with known state - deassertion + * when previous attention & current ack, and assertion when current + * attention with no previous attention + */ + asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & + ~p_sb_attn_sw->known_attn; + deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & + p_sb_attn_sw->known_attn; + + if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) + DP_INFO(p_hwfn, + "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + else if (asserted_bits == 0x100) + DP_INFO(p_hwfn, "MFW indication via attention\n"); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "MFW indication [deassertion]\n"); + + if (asserted_bits) { + rc = ecore_int_assertion(p_hwfn, asserted_bits); + if (rc) + return rc; + } + + if (deasserted_bits) + rc = ecore_int_deassertion(p_hwfn, deasserted_bits); + + return rc; +} + +static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *igu_addr, u32 ack_cons) +{ + struct igu_prod_cons_update igu_ack; + + OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); + igu_ack.sb_id_and_flags = + ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + OSAL_MMIOWB(p_hwfn->p_dev); + OSAL_BARRIER(p_hwfn->p_dev); +} + +void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) +{ + struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; + struct ecore_pi_info *pi_info = OSAL_NULL; + struct ecore_sb_attn_info *sb_attn; + struct ecore_sb_info *sb_info; + u16 rc = 0; + + if (!p_hwfn) + return; + + if (!p_hwfn->p_sp_sb) { + DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); + return; + } + + sb_info = &p_hwfn->p_sp_sb->sb_info; + if (!sb_info) { + DP_ERR(p_hwfn->p_dev, + "Status block is NULL - cannot ack interrupts\n"); + return; + } + + if (!p_hwfn->p_sb_attn) { + DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); + return; + } + sb_attn = p_hwfn->p_sb_attn; + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", + p_hwfn, p_hwfn->my_id); + + /* Disable ack for def status block. Required both for msix + + * inta in non-mask mode, in inta does no harm. + */ + ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); + + /* Gather Interrupts/Attentions information */ + if (!sb_info->sb_virt) { + DP_ERR(p_hwfn->p_dev, + "Interrupt Status block is NULL -" + " cannot check for new interrupts!\n"); + } else { + u32 tmp_index = sb_info->sb_ack; + rc = ecore_sb_update_sb_idx(sb_info); + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, + "Interrupt indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_info->sb_ack); + } + + if (!sb_attn || !sb_attn->sb_attn) { + DP_ERR(p_hwfn->p_dev, + "Attentions Status block is NULL -" + " cannot check for new attentions!\n"); + } else { + u16 tmp_index = sb_attn->index; + + rc |= ecore_attn_update_idx(p_hwfn, sb_attn); + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, + "Attention indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_attn->index); + } + + /* Check if we expect interrupts at this time. if not just ack them */ + if (!(rc & ECORE_SB_EVENT_MASK)) { + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + +/* Check the validity of the DPC ptt. If not ack interrupts and fail */ + + if (!p_hwfn->p_dpc_ptt) { + DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + if (rc & ECORE_SB_ATT_IDX) + ecore_int_attentions(p_hwfn); + + if (rc & ECORE_SB_IDX) { + osal_size_t pi; + + /* Since we only looked at the SB index, it's possible more + * than a single protocol-index on the SB incremented. + * Iterate over all configured protocol indices and check + * whether something happened for each. + */ + for (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) { + pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; + if (pi_info->comp_cb != OSAL_NULL) + pi_info->comp_cb(p_hwfn, pi_info->cookie); + } + } + + if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { + /* This should be done before the interrupts are enabled, + * since otherwise a new attention will be generated. + */ + ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); + } + + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); +} + +static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (!p_sb) + return; + + if (p_sb->sb_attn) { + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, + p_sb->sb_phys, + SB_ATTN_ALIGNED_SIZE(p_hwfn)); + } + OSAL_FREE(p_hwfn->p_dev, p_sb); +} + +static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); + + sb_info->index = 0; + sb_info->known_attn = 0; + + /* Configure Attention Status Block in IGU */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, + DMA_LO(p_hwfn->p_sb_attn->sb_phys)); + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, + DMA_HI(p_hwfn->p_sb_attn->sb_phys)); +} + +static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *sb_virt_addr, dma_addr_t sb_phy_addr) +{ + struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + int i, j, k; + + sb_info->sb_attn = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + /* Set the pointer to the AEU descriptors */ + sb_info->p_aeu_desc = aeu_descs; + + /* Calculate Parity Masks */ + OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); + for (i = 0; i < NUM_ATTN_REGS; i++) { + /* j is array index, k is bit index */ + for (j = 0, k = 0; k < 32; j++) { + struct aeu_invert_reg_bit *p_aeu; + + p_aeu = &aeu_descs[i].bits[j]; + if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) + sb_info->parity_mask[i] |= 1 << k; + + k += ATTENTION_LENGTH(p_aeu->flags); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Attn Mask [Reg %d]: 0x%08x\n", + i, sb_info->parity_mask[i]); + } + + /* Set the address of cleanup for the mcp attention */ + sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + + MISC_REG_AEU_GENERAL_ATTN_0; + + ecore_int_sb_attn_setup(p_hwfn, p_ptt); +} + +static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_sb_attn_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); + if (!p_sb) { + DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); + return ECORE_NOMEM; + } + + /* SB ring */ + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + SB_ATTN_ALIGNED_SIZE(p_hwfn)); + if (!p_virt) { + DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); + OSAL_FREE(p_dev, p_sb); + return ECORE_NOMEM; + } + + /* Attention setup */ + p_hwfn->p_sb_attn = p_sb; + ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); + + return ECORE_SUCCESS; +} + +/* coalescing timeout = timeset << (timer_res + 1) */ +#define ECORE_CAU_DEF_RX_USECS 24 +#define ECORE_CAU_DEF_TX_USECS 48 + +void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, u16 vf_number, u8 vf_valid) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 cau_state; + u8 timer_res; + + OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); + + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + + cau_state = CAU_HC_DISABLE_STATE; + + if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { + cau_state = CAU_HC_ENABLE_STATE; + if (!p_dev->rx_coalesce_usecs) + p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; + if (!p_dev->tx_coalesce_usecs) + p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; + } + + /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ + if (p_dev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_dev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + if (p_dev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_dev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); +} + +static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 igu_sb_id, u32 pi_index, + enum ecore_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + struct cau_pi_entry pi_entry; + u32 sb_offset, pi_offset; + + if (IS_VF(p_hwfn->p_dev)) + return;/* @@@TBD MichalK- VF CAU... */ + + sb_offset = igu_sb_id * PIS_PER_SB; + OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); + + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + + pi_offset = sb_offset + pi_index; + if (p_hwfn->hw_init_done) { + ecore_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), + *((u32 *)&(pi_entry))); + } else { + STORE_RT_REG(p_hwfn, + CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + *((u32 *)&(pi_entry))); + } +} + +void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *p_sb, u32 pi_index, + enum ecore_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, + pi_index, coalescing_fsm, timeset); +} + +void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t sb_phys, u16 igu_sb_id, + u16 vf_number, u8 vf_valid) +{ + struct cau_sb_entry sb_entry; + + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, + vf_number, vf_valid); + + if (p_hwfn->hw_init_done) { + /* Wide-bus, initialize via DMAE */ + u64 phys_addr = (u64)sb_phys; + + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&phys_addr, + CAU_REG_SB_ADDR_MEMORY + + igu_sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); + } else { + /* Initialize Status Block Address */ + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + + igu_sb_id * 2, sb_phys); + + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_VAR_MEMORY_RT_OFFSET + + igu_sb_id * 2, sb_entry); + } + + /* Configure pi coalescing if set */ + if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { + /* eth will open queues for all tcs, so configure all of them + * properly, rather than just the active ones + */ + u8 num_tc = p_hwfn->hw_info.num_hw_tc; + + u8 timeset, timer_res; + u8 i; + + /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ + if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); + _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, + ECORE_COAL_RX_STATE_MACHINE, + timeset); + + if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); + for (i = 0; i < num_tc; i++) { + _ecore_int_cau_conf_pi(p_hwfn, p_ptt, + igu_sb_id, TX_PI(i), + ECORE_COAL_TX_STATE_MACHINE, + timeset); + } + } +} + +void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) +{ + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); + + if (IS_PF(p_hwfn->p_dev)) + ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); +} + +struct ecore_igu_block * +ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) +{ + struct ecore_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & ECORE_IGU_STATUS_VALID) || + !(p_block->status & ECORE_IGU_STATUS_FREE)) + continue; + + if (!!(p_block->status & ECORE_IGU_STATUS_PF) == + b_is_pf) + return p_block; + } + + return OSAL_NULL; +} + +static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, + u16 vector_id) +{ + struct ecore_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & ECORE_IGU_STATUS_VALID) || + !p_block->is_pf || + p_block->vector_number != vector_id) + continue; + + return igu_id; + } + + return ECORE_SB_INVALID_IDX; +} + +u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) +{ + u16 igu_sb_id; + + /* Assuming continuous set of IGU SBs dedicated for given PF */ + if (sb_id == ECORE_SP_SB_ID) + igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + else if (IS_PF(p_hwfn->p_dev)) + igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); + else + igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); + + if (igu_sb_id == ECORE_SB_INVALID_IDX) + DP_NOTICE(p_hwfn, true, + "Slowpath SB vector %04x doesn't exist\n", + sb_id); + else if (sb_id == ECORE_SP_SB_ID) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); + + return igu_sb_id; +} + +enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id) +{ + sb_info->sb_virt = sb_virt_addr; + struct status_block *sb_virt; + + sb_virt = (struct status_block *)sb_info->sb_virt; + + sb_info->sb_size = sizeof(*sb_virt); + sb_info->sb_pi_array = sb_virt->pi_array; + sb_info->sb_prod_index = &sb_virt->prod_index; + + sb_info->sb_phys = sb_phy_addr; + + sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) + return ECORE_INVAL; + + /* Let the igu info reference the client's SB info */ + if (sb_id != ECORE_SP_SB_ID) { + if (IS_PF(p_hwfn->p_dev)) { + struct ecore_igu_info *p_info; + struct ecore_igu_block *p_block; + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + p_block->sb_info = sb_info; + p_block->status &= ~ECORE_IGU_STATUS_FREE; + p_info->usage.free_cnt--; + } else { + ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); + } + } +#ifdef ECORE_CONFIG_DIRECT_HWFN + sb_info->p_hwfn = p_hwfn; +#endif + sb_info->p_dev = p_hwfn->p_dev; + + /* The igu address will hold the absolute address that needs to be + * written to for a specific status block + */ + if (IS_PF(p_hwfn->p_dev)) + sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); + + else + sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + + PXP_VF_BAR0_START_IGU + + ((IGU_CMD_INT_ACK_BASE + + sb_info->igu_sb_id) << 3); + + sb_info->flags |= ECORE_SB_INFO_INIT; + + ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, + struct ecore_sb_info *sb_info, + u16 sb_id) +{ + struct ecore_igu_info *p_info; + struct ecore_igu_block *p_block; + + if (sb_info == OSAL_NULL) + return ECORE_SUCCESS; + + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); + + if (IS_VF(p_hwfn->p_dev)) { + ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); + return ECORE_SUCCESS; + } + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + /* Vector 0 is reserved to Default SB */ + if (p_block->vector_number == 0) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return ECORE_INVAL; + } + + /* Lose reference to client's SB info, and fix counters */ + p_block->sb_info = OSAL_NULL; + p_block->status |= ECORE_IGU_STATUS_FREE; + p_info->usage.free_cnt++; + + return ECORE_SUCCESS; +} + +static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; + + if (!p_sb) + return; + + if (p_sb->sb_info.sb_virt) { + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys, + SB_ALIGNED_SIZE(p_hwfn)); + } + + OSAL_FREE(p_hwfn->p_dev, p_sb); +} + +static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_sb_sp_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); + if (!p_sb) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct ecore_sb_info'\n"); + return ECORE_NOMEM; + } + + /* SB ring */ + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_phys, SB_ALIGNED_SIZE(p_hwfn)); + if (!p_virt) { + DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); + OSAL_FREE(p_hwfn->p_dev, p_sb); + return ECORE_NOMEM; + } + + /* Status Block setup */ + p_hwfn->p_sp_sb = p_sb; + ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, + p_virt, p_phys, ECORE_SP_SB_ID); + + p_sb->pi_info_arr_size = PIS_PER_SB; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, + ecore_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, __le16 **p_fw_cons) +{ + struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + enum _ecore_status_t rc = ECORE_NOMEM; + u8 pi; + + /* Look for a free index */ + for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) { + if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) + continue; + + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi]; + rc = ECORE_SUCCESS; + break; + } + + return rc; +} + +enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) +{ + struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + + if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) + return ECORE_NOMEM; + + p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; + p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; + return ECORE_SUCCESS; +} + +u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) +{ + return p_hwfn->p_sp_sb->sb_info.igu_sb_id; +} + +void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); + igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; + } +#endif + + p_hwfn->p_dev->int_mode = int_mode; + switch (p_hwfn->p_dev->int_mode) { + case ECORE_INT_MODE_INTA: + igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case ECORE_INT_MODE_MSI: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case ECORE_INT_MODE_MSIX: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + break; + case ECORE_INT_MODE_POLL: + break; + } + + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); +} + +static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, + "FPGA - Don't enable Attentions in IGU and MISC\n"); + return; + } +#endif + + /* Configure AEU signal change to produce attentions */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); + + /* Flush the writes to IGU */ + OSAL_MMIOWB(p_hwfn->p_dev); + + /* Unmask AEU signals toward IGU */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +enum _ecore_status_t +ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + ecore_int_igu_enable_attn(p_hwfn, p_ptt); + + if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "Slowpath IRQ request failed\n"); + return ECORE_NORESOURCES; + } + p_hwfn->b_int_requested = true; + } + + /* Enable interrupt Generation */ + ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + + p_hwfn->b_int_enabled = 1; + + return rc; +} + +void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + p_hwfn->b_int_enabled = 0; + + if (IS_VF(p_hwfn->p_dev)) + return; + + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); +} + +#define IGU_CLEANUP_SLEEP_LENGTH (1000) +static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 igu_sb_id, + bool cleanup_set, + u16 opaque_fid) +{ + u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr; + u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val; + u8 type = 0; + + OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - + IGU_REG_CLEANUP_STATUS_0) != 0x200); + + /* USE Control Command Register to perform cleanup. There is an + * option to do this using IGU bar, but then it can't be used for VFs. + */ + + /* Set the data field */ + SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); + SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); + SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); + + /* Set the control register */ + pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); + + OSAL_BARRIER(p_hwfn->p_dev); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); + + /* Flush the write to IGU */ + OSAL_MMIOWB(p_hwfn->p_dev); + + /* calculate where to read the status bit from */ + sb_bit = 1 << (igu_sb_id % 32); + sb_bit_addr = igu_sb_id / 32 * sizeof(u32); + + sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); + + /* Now wait for the command to complete */ + while (--sleep_cnt) { + val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); + if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) + break; + OSAL_MSLEEP(5); + } + + if (!sleep_cnt) + DP_NOTICE(p_hwfn, true, + "Timeout waiting for clear status 0x%08x [for sb %d]\n", + val, igu_sb_id); +} + +void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 igu_sb_id, u16 opaque, bool b_set) +{ + struct ecore_igu_block *p_block; + int pi, i; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", + igu_sb_id, p_block->function_id, p_block->is_pf, + p_block->vector_number); + + /* Set */ + if (b_set) + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); + + /* Clear */ + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); + + /* Wait for the IGU SB to cleanup */ + for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { + u32 val; + + val = ecore_rd(p_hwfn, p_ptt, + IGU_REG_WRITE_DONE_PENDING + + ((igu_sb_id / 32) * 4)); + if (val & (1 << (igu_sb_id % 32))) + OSAL_UDELAY(10); + else + break; + } + if (i == IGU_CLEANUP_SLEEP_LENGTH) + DP_NOTICE(p_hwfn, true, + "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", + igu_sb_id); + + /* Clear the CAU for the SB */ + for (pi = 0; pi < PIS_PER_SB; pi++) + ecore_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + + (igu_sb_id * PIS_PER_SB + pi) * 4, + 0); +} + +void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_set, bool b_slowpath) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct ecore_igu_block *p_block; + u16 igu_sb_id = 0; + u32 val = 0; + + /* @@@TBD MichalK temporary... should be moved to init-tool... */ + val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); + val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; + val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; + ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); + /* end temporary */ + + for (igu_sb_id = 0; + igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + + if (!(p_block->status & ECORE_IGU_STATUS_VALID) || + !p_block->is_pf || + (p_block->status & ECORE_IGU_STATUS_DSB)) + continue; + + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, + p_hwfn->hw_info.opaque_fid, + b_set); + } + + if (b_slowpath) + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + p_info->igu_dsb_id, + p_hwfn->hw_info.opaque_fid, + b_set); +} + +int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct ecore_igu_block *p_block; + int pf_sbs, vf_sbs; + u16 igu_sb_id; + u32 val, rval; + + if (!RESC_NUM(p_hwfn, ECORE_SB)) { + /* We're using an old MFW - have to prevent any switching + * of SBs between PF and VFs as later driver wouldn't be + * able to tell which belongs to which. + */ + p_info->b_allow_pf_vf_change = false; + } else { + /* Use the numbers the MFW have provided - + * don't forget MFW accounts for the default SB as well. + */ + p_info->b_allow_pf_vf_change = true; + + if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { + DP_INFO(p_hwfn, + "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", + RESC_NUM(p_hwfn, ECORE_SB) - 1, + p_info->usage.cnt); + p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; + } + + /* TODO - how do we learn about VF SBs from MFW? */ + if (IS_PF_SRIOV(p_hwfn)) { + u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; + + if (vfs != p_info->usage.iov_cnt) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", + p_info->usage.iov_cnt, vfs); + + /* At this point we know how many SBs we have totally + * in IGU + number of PF SBs. So we can validate that + * we'd have sufficient for VF. + */ + if (vfs > p_info->usage.free_cnt + + p_info->usage.free_cnt_iov - + p_info->usage.cnt) { + DP_NOTICE(p_hwfn, true, + "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", + p_info->usage.free_cnt + + p_info->usage.free_cnt_iov, + p_info->usage.cnt, vfs); + return ECORE_INVAL; + } + } + } + + /* Cap the number of VFs SBs by the number of VFs */ + if (IS_PF_SRIOV(p_hwfn)) + p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; + + /* Mark all SBs as free, now in the right PF/VFs division */ + p_info->usage.free_cnt = p_info->usage.cnt; + p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; + p_info->usage.orig = p_info->usage.cnt; + p_info->usage.iov_orig = p_info->usage.iov_cnt; + + /* We now proceed to re-configure the IGU cam to reflect the initial + * configuration. We can start with the Default SB. + */ + pf_sbs = p_info->usage.cnt; + vf_sbs = p_info->usage.iov_cnt; + + for (igu_sb_id = p_info->igu_dsb_id; + igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + val = 0; + + if (!(p_block->status & ECORE_IGU_STATUS_VALID)) + continue; + + if (p_block->status & ECORE_IGU_STATUS_DSB) { + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = 0; + p_block->status = ECORE_IGU_STATUS_VALID | + ECORE_IGU_STATUS_PF | + ECORE_IGU_STATUS_DSB; + } else if (pf_sbs) { + pf_sbs--; + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = p_info->usage.cnt - pf_sbs; + p_block->status = ECORE_IGU_STATUS_VALID | + ECORE_IGU_STATUS_PF | + ECORE_IGU_STATUS_FREE; + } else if (vf_sbs) { + p_block->function_id = + p_hwfn->p_dev->p_iov_info->first_vf_in_pf + + p_info->usage.iov_cnt - vf_sbs; + p_block->is_pf = 0; + p_block->vector_number = 0; + p_block->status = ECORE_IGU_STATUS_VALID | + ECORE_IGU_STATUS_FREE; + vf_sbs--; + } else { + p_block->function_id = 0; + p_block->is_pf = 0; + p_block->vector_number = 0; + } + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, + p_block->function_id); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, + p_block->vector_number); + + /* VF entries would be enabled when VF is initializaed */ + SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); + + rval = ecore_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * igu_sb_id); + + if (rval != val) { + ecore_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * igu_sb_id, + val); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number, + rval, val); + } + } + + return 0; +} + +int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; + + /* Return all the usage indications to default prior to the reset; + * The reset expects the !orig to reflect the initial status of the + * SBs, and would re-calculate the originals based on those. + */ + p_cnt->cnt = p_cnt->orig; + p_cnt->free_cnt = p_cnt->orig; + p_cnt->iov_cnt = p_cnt->iov_orig; + p_cnt->free_cnt_iov = p_cnt->iov_orig; + p_cnt->orig = 0; + p_cnt->iov_orig = 0; + + /* TODO - we probably need to re-configure the CAU as well... */ + return ecore_int_igu_reset_cam(p_hwfn, p_ptt); +} + +static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 igu_sb_id) +{ + u32 val = ecore_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + struct ecore_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + + /* Fill the block information */ + p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); + + p_block->igu_sb_id = igu_sb_id; +} + +enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_igu_info *p_igu_info; + struct ecore_igu_block *p_block; + u32 min_vf = 0, max_vf = 0; + u16 igu_sb_id; + + p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, + GFP_KERNEL, + sizeof(*p_igu_info)); + if (!p_hwfn->hw_info.p_igu_info) + return ECORE_NOMEM; + p_igu_info = p_hwfn->hw_info.p_igu_info; + + /* Distinguish between existent and onn-existent default SB */ + p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; + + /* Find the range of VF ids whose SB belong to this PF */ + if (p_hwfn->p_dev->p_iov_info) { + struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; + + min_vf = p_iov->first_vf_in_pf; + max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; + } + + for (igu_sb_id = 0; + igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_sb_id++) { + /* Read current entry; Notice it might not belong to this PF */ + ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); + p_block = &p_igu_info->entry[igu_sb_id]; + + if ((p_block->is_pf) && + (p_block->function_id == p_hwfn->rel_pf_id)) { + p_block->status = ECORE_IGU_STATUS_PF | + ECORE_IGU_STATUS_VALID | + ECORE_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) + p_igu_info->usage.cnt++; + } else if (!(p_block->is_pf) && + (p_block->function_id >= min_vf) && + (p_block->function_id < max_vf)) { + /* Available for VFs of this PF */ + p_block->status = ECORE_IGU_STATUS_VALID | + ECORE_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) + p_igu_info->usage.iov_cnt++; + } + + /* Mark the First entry belonging to the PF or its VFs + * as the default SB [we'll reset IGU prior to first usage]. + */ + if ((p_block->status & ECORE_IGU_STATUS_VALID) && + (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { + p_igu_info->igu_dsb_id = igu_sb_id; + p_block->status |= ECORE_IGU_STATUS_DSB; + } + + /* While this isn't suitable for all clients, limit number + * of prints by having each PF print only its entries with the + * exception of PF0 which would print everything. + */ + if ((p_block->status & ECORE_IGU_STATUS_VALID) || + (p_hwfn->abs_pf_id == 0)) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number); + } + + if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { + DP_NOTICE(p_hwfn, true, + "IGU CAM returned invalid values igu_dsb_id=0x%x\n", + p_igu_info->igu_dsb_id); + return ECORE_INVAL; + } + + /* All non default SB are considered free at this point */ + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", + p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, + p_igu_info->usage.iov_cnt); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u16 sb_id, bool b_to_vf) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct ecore_igu_block *p_block = OSAL_NULL; + u16 igu_sb_id = 0, vf_num = 0; + u32 val = 0; + + if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) + return ECORE_INVAL; + + if (sb_id == ECORE_SP_SB_ID) + return ECORE_INVAL; + + if (!p_info->b_allow_pf_vf_change) { + DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); + return ECORE_INVAL; + } + + /* If we're moving a SB from PF to VF, the client had to specify + * which vector it wants to move. + */ + if (b_to_vf) { + igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); + if (igu_sb_id == ECORE_SB_INVALID_IDX) + return ECORE_INVAL; + } + + /* If we're moving a SB from VF to PF, need to validate there isn't + * already a line configured for that vector. + */ + if (!b_to_vf) { + if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != + ECORE_SB_INVALID_IDX) + return ECORE_INVAL; + } + + /* We need to validate that the SB can actually be relocated. + * This would also handle the previous case where we've explicitly + * stated which IGU SB needs to move. + */ + for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + + if (!(p_block->status & ECORE_IGU_STATUS_VALID) || + !(p_block->status & ECORE_IGU_STATUS_FREE) || + (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { + if (b_to_vf) + return ECORE_INVAL; + else + continue; + } + + break; + } + + if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { + DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), + "Failed to find a free SB to move\n"); + return ECORE_INVAL; + } + + /* At this point, p_block points to the SB we want to relocate */ + if (b_to_vf) { + p_block->status &= ~ECORE_IGU_STATUS_PF; + + /* It doesn't matter which VF number we choose, since we're + * going to disable the line; But let's keep it in range. + */ + vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; + + p_block->function_id = (u8)vf_num; + p_block->is_pf = 0; + p_block->vector_number = 0; + + p_info->usage.cnt--; + p_info->usage.free_cnt--; + p_info->usage.iov_cnt++; + p_info->usage.free_cnt_iov++; + + /* TODO - if SBs aren't really the limiting factor, + * then it might not be accurate [in the since that + * we might not need decrement the feature]. + */ + p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; + p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; + } else { + p_block->status |= ECORE_IGU_STATUS_PF; + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = sb_id + 1; + + p_info->usage.cnt++; + p_info->usage.free_cnt++; + p_info->usage.iov_cnt--; + p_info->usage.free_cnt_iov--; + + p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; + p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; + } + + /* Update the IGU and CAU with the new configuration */ + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, + p_block->function_id); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, + p_block->vector_number); + + ecore_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, + val); + + ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, + igu_sb_id, vf_num, + p_block->is_pf ? 0 : 1); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number); + + return ECORE_SUCCESS; +} + +/** + * @brief Initialize igu runtime registers + * + * @param p_hwfn + */ +void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; + + STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); +} + +#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ + IGU_CMD_INT_ACK_BASE) +#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ + IGU_CMD_INT_ACK_BASE) +u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) +{ + u32 intr_status_hi = 0, intr_status_lo = 0; + u64 intr_status = 0; + + intr_status_lo = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + LSB_IGU_CMD_ADDR * 8); + intr_status_hi = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + MSB_IGU_CMD_ADDR * 8); + intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; + + return intr_status; +} + +static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) +{ + OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); + p_hwfn->b_sp_dpc_enabled = true; +} + +static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) +{ + p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); + if (!p_hwfn->sp_dpc) + return ECORE_NOMEM; + + return ECORE_SUCCESS; +} + +static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); +} + +enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + rc = ecore_int_sp_dpc_alloc(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); + return rc; + } + + rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); + return rc; + } + + rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); + + return rc; +} + +void ecore_int_free(struct ecore_hwfn *p_hwfn) +{ + ecore_int_sp_sb_free(p_hwfn); + ecore_int_sb_attn_free(p_hwfn); + ecore_int_sp_dpc_free(p_hwfn); +} + +void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) + return; + + ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + ecore_int_sb_attn_setup(p_hwfn, p_ptt); + ecore_int_sp_dpc_setup(p_hwfn); +} + +void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_sb_cnt_info *p_sb_cnt_info) +{ + struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; + + if (!p_igu_info || !p_sb_cnt_info) + return; + + OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, + sizeof(*p_sb_cnt_info)); +} + +void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) +{ + int i; + + for_each_hwfn(p_dev, i) + p_dev->hwfns[i].b_int_requested = false; +} + +void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) +{ + p_dev->attn_clr_en = clr_enable; +} + +enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx) +{ + struct cau_sb_entry sb_entry; + enum _ecore_status_t rc; + + if (!p_hwfn->hw_init_done) { + DP_ERR(p_hwfn, "hardware not initialized yet\n"); + return ECORE_INVAL; + } + + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), + (u64)(osal_uintptr_t)&sb_entry, 2, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + if (tx) + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + else + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); + return rc; + } + + return rc; +} + +enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *p_sb, + struct ecore_sb_info_dbg *p_info) +{ + u16 sbid = p_sb->igu_sb_id; + u32 i; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (sbid >= NUM_OF_SBS(p_hwfn->p_dev)) + return ECORE_INVAL; + + p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, + IGU_REG_PRODUCER_MEMORY + sbid * 4); + p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, + IGU_REG_CONSUMER_MEM + sbid * 4); + + for (i = 0; i < PIS_PER_SB; i++) + p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + + sbid * 4 * PIS_PER_SB + + i * 4); + + return ECORE_SUCCESS; +} + +void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + int i; + + /* Do not reorder the following cleanup sequence */ + /* Ack all attentions */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff); + + /* Clear driver attention */ + ecore_wr(p_hwfn, p_dpc_ptt, + ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0); + + /* Clear per-PF IGU registers to restore them as if the IGU + * was reset for this PF + */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); + + /* Execute IGU clean up*/ + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1); + + /* Clear Stats */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0); + + for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++) + ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h new file mode 100644 index 000000000..5042cd1d1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_INT_H__ +#define __ECORE_INT_H__ + +#include "ecore.h" +#include "ecore_int_api.h" + +#define ECORE_CAU_DEF_RX_TIMER_RES 0 +#define ECORE_CAU_DEF_TX_TIMER_RES 0 + +#define ECORE_SB_ATT_IDX 0x0001 +#define ECORE_SB_EVENT_MASK 0x0003 + +#define SB_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + +#define ECORE_SB_INVALID_IDX 0xffff + +struct ecore_igu_block { + u8 status; +#define ECORE_IGU_STATUS_FREE 0x01 +#define ECORE_IGU_STATUS_VALID 0x02 +#define ECORE_IGU_STATUS_PF 0x04 +#define ECORE_IGU_STATUS_DSB 0x08 + + u8 vector_number; + u8 function_id; + u8 is_pf; + + /* Index inside IGU [meant for back reference] */ + u16 igu_sb_id; + + struct ecore_sb_info *sb_info; +}; + +struct ecore_igu_info { + struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH]; + u16 igu_dsb_id; + + /* The numbers can shift when using APIs to switch SBs between PF and + * VF. + */ + struct ecore_sb_cnt_info usage; + + /* Determine whether we can shift SBs between VFs and PFs */ + bool b_allow_pf_vf_change; +}; + +/** + * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * + * @param p_hwfn + * @param p_ptt + */ +int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Make sure IGU CAM reflects the default resources once again, + * starting with a 'dirty' SW database. + * @param p_hwfn + * @param p_ptt + */ +int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * + * @param p_hwfn + * @param sb_id - user provided sb_id + * + * @return an index inside IGU CAM where the SB resides + */ +u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief return a pointer to an unused valid SB + * + * @param p_hwfn + * @param b_is_pf - true iff we want a SB belonging to a PF + * + * @return point to an igu_block, OSAL_NULL if none is available + */ +struct ecore_igu_block * +ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf); +/* TODO Names of function may change... */ +void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_set, bool b_slowpath); + +void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_int_igu_read_cam - Reads the IGU CAM. + * This function needs to be called during hardware + * prepare. It reads the info from igu cam to know which + * status block is the default / base status block etc. + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn, + void *cookie); +/** + * @brief ecore_int_register_cb - Register callback func for + * slowhwfn statusblock. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. + * + * @param p_hwfn + * @param comp_cb - function to be called when there is an + * interrupt on the sp sb + * + * @param cookie - passed to the callback function + * @param sb_idx - OUT parameter which gives the chosen index + * for this protocol. + * @param p_fw_cons - pointer to the actual address of the + * consumer for this protocol. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, + ecore_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, __le16 **p_fw_cons); +/** + * @brief ecore_int_unregister_cb - Unregisters callback + * function from sp sb. + * Partner of ecore_int_register_cb -> should be called + * when no longer required. + * + * @param p_hwfn + * @param pi + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi); + +/** + * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id. + * + * @param p_hwfn + * + * @return u16 + */ +u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn); + +/** + * @brief Status block cleanup. Should be called for each status + * block that will be used -> both PF / VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - igu status block id + * @param opaque - opaque fid of the sb owner. + * @param cleanup_set - set(1) / clear(0) + */ +void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 sb_id, + u16 opaque, + bool b_set); + +/** + * @brief ecore_int_cau_conf - configure cau for a given status + * block + * + * @param p_hwfn + * @param ptt + * @param sb_phys + * @param igu_sb_id + * @param vf_number + * @param vf_valid + */ +void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, u16 vf_number, u8 vf_valid); + +/** +* @brief ecore_int_alloc +* +* @param p_hwfn + * @param p_ptt +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** +* @brief ecore_int_free +* +* @param p_hwfn +*/ +void ecore_int_free(struct ecore_hwfn *p_hwfn); + +/** +* @brief ecore_int_setup +* +* @param p_hwfn +* @param p_ptt +*/ +void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief - Enable Interrupt & Attention for hw function + * + * @param p_hwfn + * @param p_ptt + * @param int_mode + * +* @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode); + +/** + * @brief - Initialize CAU status block entry + * + * @param p_hwfn + * @param p_sb_entry + * @param pf_id + * @param vf_number + * @param vf_valid + */ +void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, u8 pf_id, + u16 vf_number, u8 vf_valid); + +enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx); +#ifndef ASIC_ONLY +#define ECORE_MAPPING_MEMORY_SIZE(dev) \ + ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \ + 136 : NUM_OF_SBS(dev)) +#else +#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev) +#endif + +enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_hw_init); +void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn); + +#endif /* __ECORE_INT_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h new file mode 100644 index 000000000..d7b6b86cc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_int_api.h @@ -0,0 +1,363 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_INT_API_H__ +#define __ECORE_INT_API_H__ + +#ifndef __EXTRACT__LINUX__ +#define ECORE_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +#ifndef ECORE_INT_MODE +#define ECORE_INT_MODE +enum ecore_int_mode { + ECORE_INT_MODE_INTA, + ECORE_INT_MODE_MSIX, + ECORE_INT_MODE_MSI, + ECORE_INT_MODE_POLL, +}; +#endif + +struct ecore_sb_info { + void *sb_virt; /* ptr to "struct status_block_e{4,5}" */ + u32 sb_size; /* size of "struct status_block_e{4,5}" */ + __le16 *sb_pi_array; /* ptr to "sb_virt->pi_array" */ + __le32 *sb_prod_index; /* ptr to "sb_virt->prod_index" */ +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF + + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void OSAL_IOMEM *igu_addr; + u8 flags; +#define ECORE_SB_INFO_INIT 0x1 +#define ECORE_SB_INFO_SETUP 0x2 + +#ifdef ECORE_CONFIG_DIRECT_HWFN + struct ecore_hwfn *p_hwfn; +#endif + struct ecore_dev *p_dev; +}; + +struct ecore_sb_info_dbg { + u32 igu_prod; + u32 igu_cons; + u16 pi[PIS_PER_SB]; +}; + +struct ecore_sb_cnt_info { + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; +}; + +static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + /* barrier(); status block is written to by the chip */ + /* FIXME: need some sort of barrier. */ + prod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) & + STATUS_BLOCK_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= ECORE_SB_IDX; + } + + OSAL_MMIOWB(sb_info->p_dev); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using ecore_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return OSAL_INLINE void + */ +static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info, + enum igu_int_cmd int_cmd, u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack; + + OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + +#ifdef ECORE_CONFIG_DIRECT_HWFN + DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr, + igu_ack.sb_id_and_flags); +#else + DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags); +#endif + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + OSAL_MMIOWB(sb_info->p_dev); + OSAL_BARRIER(sb_info->p_dev); +} + +#ifdef ECORE_CONFIG_DIRECT_HWFN +static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *addr, + int size, u32 *data) +#else +static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn, + void OSAL_IOMEM *addr, + int size, u32 *data) +#endif +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]); +} + +#ifdef ECORE_CONFIG_DIRECT_HWFN +static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM * addr, + int size, u32 *data) +#else +static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn, + void OSAL_IOMEM * addr, + int size, u32 *data) +#endif +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], + data[i]); +} + +#ifdef ECORE_CONFIG_DIRECT_HWFN +static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM * addr, + int size, u32 *data) +{ + __internal_ram_wr_relaxed(p_hwfn, addr, size, data); +} +#else +static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr, + int size, u32 *data) +{ + __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data); +} +#endif + +#endif + +struct ecore_hwfn; +struct ecore_ptt; + +enum ecore_coalescing_fsm { + ECORE_COAL_RX_STATE_MACHINE, + ECORE_COAL_TX_STATE_MACHINE +}; + +/** + * @brief ecore_int_cau_conf_pi - configure cau for a given + * status block + * + * @param p_hwfn + * @param p_ptt + * @param p_sb + * @param pi_index + * @param state + * @param timeset + */ +void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *p_sb, + u32 pi_index, + enum ecore_coalescing_fsm coalescing_fsm, + u8 timeset); + +/** + * + * @brief ecore_int_igu_enable_int - enable device interrupts + * + * @param p_hwfn + * @param p_ptt + * @param int_mode - interrupt mode to use + */ +void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode); + +/** + * + * @brief ecore_int_igu_disable_int - disable device interrupts + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * + * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc + * register from igu. + * + * @param p_hwfn + * + * @return u64 + */ +u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn); + +#define ECORE_SP_SB_ID 0xffff + +/** + * @brief ecore_int_sb_init - Initializes the sb_info structure. + * + * once the structure is initialized it can be passed to sb related functions. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info points to an uninitialized (but + * allocated) sb_info structure + * @param sb_virt_addr + * @param sb_phy_addr + * @param sb_id the sb_id to be used (zero based in driver) + * should use ECORE_SP_SB_ID for SP Status block + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id); +/** + * @brief ecore_int_sb_setup - Setup the sb. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info initialized sb_info structure + */ +void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info); + +/** + * @brief ecore_int_sb_release - releases the sb_info structure. + * + * once the structure is released, it's memory can be freed + * + * @param p_hwfn + * @param sb_info points to an allocated sb_info structure + * @param sb_id the sb_id to be used (zero based in driver) + * should never be equal to ECORE_SP_SB_ID + * (SP Status block) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, + struct ecore_sb_info *sb_info, + u16 sb_id); + +/** + * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the + * default status block. + * + * @param p_hwfn - pointer to hwfn + * + */ +void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie); + +/** + * @brief ecore_int_get_num_sbs - get the number of status + * blocks configured for this funciton in the igu. + * + * @param p_hwfn + * @param p_sb_cnt_info + * + * @return + */ +void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_sb_cnt_info *p_sb_cnt_info); + +/** + * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR + * release. The API need to be called after releasing all slowpath IRQs + * of the device. + * + * @param p_dev + * + */ +void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev); + +/** + * @brief ecore_int_attn_clr_enable - sets whether the general behavior is + * preventing attentions from being reasserted, or following the + * attributes of the specific attention. + * + * @param p_dev + * @param clr_enable + * + */ +void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable); + +/** + * @brief Read debug information regarding a given SB. + * + * @param p_hwfn + * @param p_ptt + * @param p_sb - point to Status block for which we want to get info. + * @param p_info - pointer to struct to fill with information regarding SB. + * + * @return ECORE_SUCCESS if pointer is filled; failure otherwise. + */ +enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *p_sb, + struct ecore_sb_info_dbg *p_info); + +/** + * @brief - Move a free Status block between PF and child VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming + * from VF, given-up if moving to VF] + * @param b_to_vf - PF->VF == true, VF->PF == false + * + * @return ECORE_SUCCESS if SB successfully moved. + */ +enum _ecore_status_t +ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u16 sb_id, bool b_to_vf); + +/** + * @brief - Doorbell Recovery handler. + * Run DB_REAL_DEAL doorbell recovery in case of PF overflow + * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h new file mode 100644 index 000000000..545001812 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iov_api.h @@ -0,0 +1,771 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_SRIOV_API_H__ +#define __ECORE_SRIOV_API_H__ + +#include "common_hsi.h" +#include "ecore_status.h" + +#define ECORE_ETH_VF_NUM_MAC_FILTERS 1 +#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2 +#define ECORE_VF_ARRAY_LENGTH (3) + +#define IS_VF(p_dev) ((p_dev)->b_is_vf) +#define IS_PF(p_dev) (!((p_dev)->b_is_vf)) +#ifdef CONFIG_ECORE_SRIOV +#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info)) +#else +#define IS_PF_SRIOV(p_hwfn) (0) +#endif +#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) +#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */ + +/* @@@ TBD MichalK - what should this number be*/ +#define ECORE_MAX_VF_CHAINS_PER_PF 16 + +/* vport update extended feature tlvs flags */ +enum ecore_iov_vport_update_flag { + ECORE_IOV_VP_UPDATE_ACTIVATE = 0, + ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1, + ECORE_IOV_VP_UPDATE_TX_SWITCH = 2, + ECORE_IOV_VP_UPDATE_MCAST = 3, + ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4, + ECORE_IOV_VP_UPDATE_RSS = 5, + ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6, + ECORE_IOV_VP_UPDATE_SGE_TPA = 7, + ECORE_IOV_VP_UPDATE_MAX = 8, +}; + +/* PF to VF STATUS is part of vfpf-channel API + * and must be forward compatible +*/ +enum ecore_iov_pf_to_vf_status { + PFVF_STATUS_WAITING = 0, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE, + PFVF_STATUS_FORCED, + PFVF_STATUS_MALICIOUS, + PFVF_STATUS_ACQUIRED, +}; + +struct ecore_mcp_link_params; +struct ecore_mcp_link_state; +struct ecore_mcp_link_capabilities; + +/* These defines are used by the hw-channel; should never change order */ +#define VFPF_ACQUIRE_OS_LINUX (0) +#define VFPF_ACQUIRE_OS_WINDOWS (1) +#define VFPF_ACQUIRE_OS_ESX (2) +#define VFPF_ACQUIRE_OS_SOLARIS (3) +#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) + +struct ecore_vf_acquire_sw_info { + u32 driver_version; + u8 os_type; + + /* We have several close releases that all use ~same FW with different + * versions [making it incompatible as the versioning scheme is still + * tied directly to FW version], allow to override the checking. Only + * those versions would actually support this feature [so it would not + * break forward compatibility with newer HV drivers that are no longer + * suited]. + */ + bool override_fw_version; +}; + +struct ecore_public_vf_info { + /* These copies will later be reflected in the bulletin board, + * but this copy should be newer. + */ + u8 forced_mac[ETH_ALEN]; + u16 forced_vlan; + + /* Trusted VFs can configure promiscuous mode and + * set MAC address inspite PF has set forced MAC. + * Also store shadow promisc configuration if needed. + */ + bool is_trusted_configured; + bool is_trusted_request; +}; + +struct ecore_iov_vf_init_params { + u16 rel_vf_id; + + /* Number of requested Queues; Currently, don't support different + * number of Rx/Tx queues. + */ + /* TODO - remove this limitation */ + u16 num_queues; + + /* Allow the client to choose which qzones to use for Rx/Tx, + * and which queue_base to use for Tx queues on a per-queue basis. + * Notice values should be relative to the PF resources. + */ + u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF]; + u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF]; + + u8 vport_id; + + /* Should be set in case RSS is going to be used for VF */ + u8 rss_eng_id; +}; + +#ifdef CONFIG_ECORE_SW_CHANNEL +/* This is SW channel related only... */ +enum mbx_state { + VF_PF_UNKNOWN_STATE = 0, + VF_PF_WAIT_FOR_START_REQUEST = 1, + VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2, + VF_PF_REQUEST_IN_PROCESSING = 3, + VF_PF_RESPONSE_READY = 4, +}; + +struct ecore_iov_sw_mbx { + enum mbx_state mbx_state; + + u32 request_size; + u32 request_offset; + + u32 response_size; + u32 response_offset; +}; + +/** + * @brief Get the vf sw mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * + * @return struct ecore_iov_sw_mbx* + */ +struct ecore_iov_sw_mbx* +ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); +#endif + +/* This struct is part of ecore_dev and contains data relevant to all hwfns; + * Initialized only if SR-IOV cpabability is exposed in PCIe config space. + */ +struct ecore_hw_sriov_info { + /* standard SRIOV capability fields, mostly for debugging */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs associated with the PF */ + u16 num_vfs; /* number of vfs that have been started */ + u16 initial_vfs; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device_id; /* VF device id */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + + u32 first_vf_in_pf; +}; + +#ifdef CONFIG_ECORE_SRIOV +#ifndef LINUX_REMOVE +/** + * @brief mark/clear all VFs before/after an incoming PCIe sriov + * disable. + * + * @param p_dev + * @param to_disable + */ +void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, + u8 to_disable); + +/** + * @brief mark/clear chosen VF before/after an incoming PCIe + * sriov disable. + * + * @param p_dev + * @param rel_vf_id + * @param to_disable + */ +void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, + u16 rel_vf_id, + u8 to_disable); + +/** + * @brief ecore_iov_init_hw_for_vf - initialize the HW for + * enabling access of a VF. Also includes preparing the + * IGU for VF access. This needs to be called AFTER hw is + * initialized and BEFORE VF is loaded inside the VM. + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_iov_vf_init_params + *p_params); + +/** + * @brief ecore_iov_process_mbx_req - process a request received + * from the VF + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid); + +/** + * @brief ecore_iov_release_hw_for_vf - called once upper layer + * knows VF is done with - can release any resources + * allocated for VF at this point. this must be done once + * we know VF is no longer loaded in VM. + * + * @param p_hwfn + * @param p_ptt + * @param rel_vf_id + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id); + +/** + * @brief ecore_iov_set_vf_ctx - set a context for a given VF + * + * @param p_hwfn + * @param vf_id + * @param ctx + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 vf_id, + void *ctx); + +/** + * @brief FLR cleanup for all VFs + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief FLR cleanup for single VF + * + * @param p_hwfn + * @param p_ptt + * @param rel_vf_id + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id); + +/** + * @brief Update the bulletin with link information. Notice this does NOT + * send a bulletin update, only updates the PF's bulletin. + * + * @param p_hwfn + * @param p_vf + * @param params - the link params to use for the VF link configuration + * @param link - the link output to use for the VF link configuration + * @param p_caps - the link default capabilities. + */ +void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps); + +/** + * @brief Returns link information as perceived by VF. + * + * @param p_hwfn + * @param p_vf + * @param p_params - the link params visible to vf. + * @param p_link - the link state visible to vf. + * @param p_caps - the link default capabilities visible to vf. + */ +void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps); + +/** + * @brief return if the VF is pending FLR + * + * @param p_hwfn + * @param rel_vf_id + * + * @return bool + */ +bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled VF id is valid + * else any VF id less than max_vfs is valid + * + * @param p_hwfn + * @param rel_vf_id - Relative VF ID + * @param b_enabled_only - consider only enabled VF + * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * + * @return bool - true for valid VF ID + */ +bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious); + +/** + * @brief Get VF's public info structure + * + * @param p_hwfn + * @param vfid - Relative VF ID + * @param b_enabled_only - false if want to access even if vf is disabled + * + * @return struct ecore_public_vf_info * + */ +struct ecore_public_vf_info* +ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, + u16 vfid, bool b_enabled_only); + +/** + * @brief fills a bitmask of all VFs which have pending unhandled + * messages. + * + * @param p_hwfn + */ +void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn, + u64 *events); + +/** + * @brief Copy VF's message to PF's buffer + * + * @param p_hwfn + * @param ptt + * @param vfid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *ptt, + int vfid); +/** + * @brief Set forced MAC address in PFs copy of bulletin board + * and configures FW/HW to support the configuration. + * + * @param p_hwfn + * @param mac + * @param vfid + */ +void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid); + +/** + * @brief Set MAC address in PFs copy of bulletin board without + * configuring FW/HW. + * + * @param p_hwfn + * @param mac + * @param vfid + */ +enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid); + +/** + * @brief Set default behaviour of VF in case no vlans are configured for it + * whether to accept only untagged traffic or all. + * Must be called prior to the VF vport-start. + * + * @param p_hwfn + * @param b_untagged_only + * @param vfid + * + * @return ECORE_SUCCESS if configuration would stick. + */ +enum _ecore_status_t +ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, + bool b_untagged_only, + int vfid); + +/** + * @brief Get VFs opaque fid. + * + * @param p_hwfn + * @param vfid + * @param opaque_fid + */ +void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, + u16 *opaque_fid); + +/** + * @brief Set forced VLAN [pvid] in PFs copy of bulletin board + * and configures FW/HW to support the configuration. + * Setting of pvid 0 would clear the feature. + * @param p_hwfn + * @param pvid + * @param vfid + */ +void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 pvid, int vfid); + +/** + * @brief Check if VF has VPORT instance. This can be used + * to check if VPORT is active. + * + * @param p_hwfn + */ +bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief PF posts the bulletin to the VF + * + * @param p_hwfn + * @param p_vf + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, + int vfid, + struct ecore_ptt *p_ptt); + +/** + * @brief Check if given VF (@vfid) is marked as stopped + * + * @param p_hwfn + * @param vfid + * + * @return bool : true if stopped + */ +bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Configure VF anti spoofing + * + * @param p_hwfn + * @param vfid + * @param val - spoofchk value - true/false + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + int vfid, bool val); + +/** + * @brief Get VF's configured spoof value. + * + * @param p_hwfn + * @param vfid + * + * @return bool - spoofchk value - true/false + */ +bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Check for SRIOV sanity by PF. + * + * @param p_hwfn + * @param vfid + * + * @return bool - true if sanity checks passes, else false + */ +bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Get the num of VF chains. + * + * @param p_hwfn + * + * @return u8 + */ +u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn); + +/** + * @brief Get vf request mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * @param pp_req_virt_addr + * @param p_req_virt_size + */ +void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_req_virt_addr, + u16 *p_req_virt_size); + +/** + * @brief Get vf mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * @param pp_reply_virt_addr + * @param p_reply_virt_size + */ +void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_reply_virt_addr, + u16 *p_reply_virt_size); + +/** + * @brief Validate if the given length is a valid vfpf message + * length + * + * @param length + * + * @return bool + */ +bool ecore_iov_is_valid_vfpf_msg_length(u32 length); + +/** + * @brief Return the max pfvf message length + * + * @return u32 + */ +u32 ecore_iov_pfvf_msg_length(void); + +/** + * @brief Returns MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief Returns forced MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief Returns pvid if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return 0 if no pvid is configured, otherwise the pvid. + */ +u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); +/** + * @brief Configure VFs tx rate + * + * @param p_hwfn + * @param p_ptt + * @param vfid + * @param val - tx rate value in Mb/sec. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, int val); + +/** + * @brief - Retrieves the statistics associated with a VF + * + * @param p_hwfn + * @param p_ptt + * @param vfid + * @param p_stats - this will be filled with the VF statistics + * + * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise. + */ +enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, + struct ecore_eth_stats *p_stats); + +/** + * @brief - Retrieves num of rxqs chains + * + * @param p_hwfn + * @param rel_vf_id + * + * @return num of rxqs chains. + */ +u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Retrieves num of active rxqs chains + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Retrieves ctx pointer + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Retrieves VF`s num sbs + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Returm true if VF is waiting for acquire + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Returm true if VF is acquired but not initialized + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Returm true if VF is acquired and initialized + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Returm true if VF has started in FW + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Get VF's vport min rate configured. + * @param p_hwfn + * @param rel_vf_id + * + * @return - rate in Mbps + */ +int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief - Configure min rate for VF's vport. + * @param p_dev + * @param vfid + * @param - rate in Mbps + * + * @return + */ +enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, + int vfid, u32 rate); +#endif + +/** + * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce + * parameters of VFs for Rx and Tx queue. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @param p_hwfn + * @param rx_coal - Rx Coalesce value in micro seconds. + * @param tx_coal - TX Coalesce value in micro seconds. + * @param vf_id + * @param qid + * + * @return int + **/ +enum _ecore_status_t +ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, + u16 vf_id, u16 qid); + +/** + * @brief - Given a VF index, return index of next [including that] active VF. + * + * @param p_hwfn + * @param rel_vf_id + * + * @return MAX_NUM_VFS_K2 in case no further active VFs, otherwise index. + */ +u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid, + u16 vxlan_port, u16 geneve_port); + +#ifdef CONFIG_ECORE_SW_CHANNEL +/** + * @brief Set whether PF should communicate with VF using SW/HW channel + * Needs to be called for an enabled VF before acquire is over + * [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()] + * + * @param p_hwfn + * @param vfid - relative vf index + * @param b_is_hw - true iff PF is to use HW channel for communication + */ +void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid, + bool b_is_hw); +#endif +#endif /* CONFIG_ECORE_SRIOV */ + +#define ecore_for_each_vf(_p_hwfn, _i) \ + for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \ + _i < MAX_NUM_VFS_K2; \ + _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1)) + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h new file mode 100644 index 000000000..b146faff9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __IRO_H__ +#define __IRO_H__ + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + \ + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + \ + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + \ + ((queue_zone_id) * IRO[6].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \ + ((queue_zone_id) * IRO[7].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) (IRO[8].base + ((pq_id) * IRO[8].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[8].size) +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size) +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size) +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size) +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size) +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base) +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size) +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size) +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size) +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[21].base + \ + ((core_rx_queue_id) * IRO[21].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size) +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size) +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size) +/* Pstorm LiteL2 queue statistics */ +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size) +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \ + ((stat_counter_id) * IRO[25].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size) +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size) +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size + * mode. + */ +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[27].base + \ + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size) +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[28].base + \ + ((queue_id) * IRO[28].m1)) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size) +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size) +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \ + ((stat_counter_id) * IRO[30].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[30].size) +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[31].base + ((pf_id) * IRO[31].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size) +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[32].base + \ + ((stat_counter_id) * IRO[32].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size) +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size) +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[34].base + \ + ((ethType_id) * IRO[34].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size) +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size) +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[36].base + ((pf_id) * IRO[36].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size) +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[37].base + \ + ((pf_id) * IRO[37].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size) +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[38].base + \ + ((queue_id) * IRO[38].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size) +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[39].base + \ + ((rss_id) * IRO[39].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size) +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[40].base + \ + ((rss_id) * IRO[40].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size) +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[41].base + \ + ((pf_id) * IRO[41].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size) +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[42].base + \ + ((cmdq_queue_id) * IRO[42].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size) +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id + */ +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ + (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \ + ((bdq_id) * IRO[43].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size) +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ + (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \ + ((bdq_id) * IRO[44].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size) +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[45].base + \ + ((storage_func_id) * IRO[45].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size) +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[46].base + \ + ((storage_func_id) * IRO[46].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size) +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[47].base + \ + ((storage_func_id) * IRO[47].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size) +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[48].base + \ + ((storage_func_id) * IRO[48].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size) +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[49].base + \ + ((storage_func_id) * IRO[49].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size) +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[50].base + \ + ((storage_func_id) * IRO[50].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size) +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[51].base + \ + ((pf_id) * IRO[51].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size) +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[52].base + \ + ((pf_id) * IRO[52].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size) +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[53].base + \ + ((rdma_stat_counter_id) * IRO[53].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size) +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[54].base + \ + ((rdma_stat_counter_id) * IRO[54].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size) +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[55].base + \ + ((pf_id) * IRO[55].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size) +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[56].base + \ + ((pf_id) * IRO[56].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size) +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[57].base + \ + ((pf_id) * IRO[57].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size) +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[58].base + \ + ((pf_id) * IRO[58].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size) +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[59].base + \ + ((pf_id) * IRO[59].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size) +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[60].base + \ + ((pf_id) * IRO[60].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size) +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[61].base + \ + ((pf_id) * IRO[61].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size) +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[62].base + \ + ((roce_pf_id) * IRO[62].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size) +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[63].base + \ + ((roce_pf_id) * IRO[63].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size) +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) (IRO[64].base + \ + ((roce_pf_id) * IRO[64].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size) +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[65].base + \ + ((roce_pf_id) * IRO[65].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size) +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) (IRO[66].base + \ + ((roce_pf_id) * IRO[66].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) +/* Tstorm NVMf per port per producer consumer data */ +#define TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_OFFSET(port_num_id, \ + taskpool_index) (IRO[67].base + ((port_num_id) * IRO[67].m1) + \ + ((taskpool_index) * IRO[67].m2)) +#define TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_SIZE (IRO[67].size) +/* Ustorm NVMf per port counters */ +#define USTORM_NVMF_PORT_COUNTERS_OFFSET(port_num_id) (IRO[68].base + \ + ((port_num_id) * IRO[68].m1)) +#define USTORM_NVMF_PORT_COUNTERS_SIZE (IRO[68].size) + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h new file mode 100644 index 000000000..dd7349778 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_iro_values.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __IRO_VALUES_H__ +#define __IRO_VALUES_H__ + +/* Per-chip offsets in iro_arr in dwords */ +#define E4_IRO_ARR_OFFSET 0 + +/* IRO Array */ +static const u32 iro_arr[] = { + /* E4 */ + /* YSTORM_FLOW_CONTROL_MODE_OFFSET */ + /* offset=0x0, size=0x8 */ + 0x00000000, 0x00000000, 0x00080000, + /* TSTORM_PORT_STAT_OFFSET(port_id), */ + /* offset=0x3288, mult1=0x88, size=0x88 */ + 0x00003288, 0x00000088, 0x00880000, + /* TSTORM_LL2_PORT_STAT_OFFSET(port_id), */ + /* offset=0x58f0, mult1=0x20, size=0x20 */ + 0x000058f0, 0x00000020, 0x00200000, + /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id), */ + /* offset=0xb00, mult1=0x8, size=0x4 */ + 0x00000b00, 0x00000008, 0x00040000, + /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id), */ + /* offset=0xa80, mult1=0x8, size=0x4 */ + 0x00000a80, 0x00000008, 0x00040000, + /* USTORM_EQE_CONS_OFFSET(pf_id), */ + /* offset=0x0, mult1=0x8, size=0x2 */ + 0x00000000, 0x00000008, 0x00020000, + /* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id), */ + /* offset=0x80, mult1=0x8, size=0x4 */ + 0x00000080, 0x00000008, 0x00040000, + /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id), */ + /* offset=0x84, mult1=0x8, size=0x2 */ + 0x00000084, 0x00000008, 0x00020000, + /* XSTORM_PQ_INFO_OFFSET(pq_id), */ + /* offset=0x5718, mult1=0x4, size=0x4 */ + 0x00005718, 0x00000004, 0x00040000, + /* XSTORM_INTEG_TEST_DATA_OFFSET, */ + /* offset=0x4dd0, size=0x78 */ + 0x00004dd0, 0x00000000, 0x00780000, + /* YSTORM_INTEG_TEST_DATA_OFFSET */ + /* offset=0x3e40, size=0x78 */ + 0x00003e40, 0x00000000, 0x00780000, + /* PSTORM_INTEG_TEST_DATA_OFFSET, */ + /* offset=0x4480, size=0x78 */ + 0x00004480, 0x00000000, 0x00780000, + /* TSTORM_INTEG_TEST_DATA_OFFSET, */ + /* offset=0x3210, size=0x78 */ + 0x00003210, 0x00000000, 0x00780000, + /* MSTORM_INTEG_TEST_DATA_OFFSET */ + /* offset=0x3b50, size=0x78 */ + 0x00003b50, 0x00000000, 0x00780000, + /* USTORM_INTEG_TEST_DATA_OFFSET */ + /* offset=0x7f58, size=0x78 */ + 0x00007f58, 0x00000000, 0x00780000, + /* XSTORM_OVERLAY_BUF_ADDR_OFFSET, */ + /* offset=0x5f58, size=0x8 */ + 0x00005f58, 0x00000000, 0x00080000, + /* YSTORM_OVERLAY_BUF_ADDR_OFFSET */ + /* offset=0x7100, size=0x8 */ + 0x00007100, 0x00000000, 0x00080000, + /* PSTORM_OVERLAY_BUF_ADDR_OFFSET, */ + /* offset=0xaea0, size=0x8 */ + 0x0000aea0, 0x00000000, 0x00080000, + /* TSTORM_OVERLAY_BUF_ADDR_OFFSET, */ + /* offset=0x4398, size=0x8 */ + 0x00004398, 0x00000000, 0x00080000, + /* MSTORM_OVERLAY_BUF_ADDR_OFFSET */ + /* offset=0xa5a0, size=0x8 */ + 0x0000a5a0, 0x00000000, 0x00080000, + /* USTORM_OVERLAY_BUF_ADDR_OFFSET */ + /* offset=0xbde8, size=0x8 */ + 0x0000bde8, 0x00000000, 0x00080000, + /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id), */ + /* offset=0x20, mult1=0x4, size=0x4 */ + 0x00000020, 0x00000004, 0x00040000, + /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id), */ + /* offset=0x56d0, mult1=0x10, size=0x10 */ + 0x000056d0, 0x00000010, 0x00100000, + /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id), */ + /* offset=0xc210, mult1=0x30, size=0x30 */ + 0x0000c210, 0x00000030, 0x00300000, + /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id), */ + /* offset=0xb088, mult1=0x38, size=0x38 */ + 0x0000b088, 0x00000038, 0x00380000, + /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id), */ + /* offset=0x3d20, mult1=0x80, size=0x40 */ + 0x00003d20, 0x00000080, 0x00400000, + /* MSTORM_TPA_TIMEOUT_US_OFFSET */ + /* offset=0xbf60, size=0x4 */ + 0x0000bf60, 0x00000000, 0x00040000, + /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id), */ + /* offset=0x4560, mult1=0x80, mult2=0x4, size=0x4 */ + 0x00004560, 0x00040080, 0x00040000, + /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id), */ + /* offset=0x1f8, mult1=0x4, size=0x4 */ + 0x000001f8, 0x00000004, 0x00040000, + /* MSTORM_ETH_PF_STAT_OFFSET(pf_id), */ + /* offset=0x3d60, mult1=0x80, size=0x20 */ + 0x00003d60, 0x00000080, 0x00200000, + /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id), */ + /* offset=0x8960, mult1=0x40, size=0x30 */ + 0x00008960, 0x00000040, 0x00300000, + /* USTORM_ETH_PF_STAT_OFFSET(pf_id), */ + /* offset=0xe840, mult1=0x60, size=0x60 */ + 0x0000e840, 0x00000060, 0x00600000, + /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id), */ + /* offset=0x4618, mult1=0x80, size=0x38 */ + 0x00004618, 0x00000080, 0x00380000, + /* PSTORM_ETH_PF_STAT_OFFSET(pf_id), */ + /* offset=0x10738, mult1=0xc0, size=0xc0 */ + 0x00010738, 0x000000c0, 0x00c00000, + /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id), */ + /* offset=0x1f8, mult1=0x2, size=0x2 */ + 0x000001f8, 0x00000002, 0x00020000, + /* TSTORM_ETH_PRS_INPUT_OFFSET, */ + /* offset=0xa2a8, size=0x108 */ + 0x0000a2a8, 0x00000000, 0x01080000, + /* ETH_RX_RATE_LIMIT_OFFSET(pf_id), */ + /* offset=0xa3b0, mult1=0x8, size=0x8 */ + 0x0000a3b0, 0x00000008, 0x00080000, + /* TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id), */ + /* offset=0x1c0, mult1=0x8, size=0x8 */ + 0x000001c0, 0x00000008, 0x00080000, + /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id), */ + /* offset=0x1f8, mult1=0x8, size=0x8 */ + 0x000001f8, 0x00000008, 0x00080000, + /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id), */ + /* offset=0xac0, mult1=0x8, size=0x8 */ + 0x00000ac0, 0x00000008, 0x00080000, + /* USTORM_TOE_CQ_PROD_OFFSET(rss_id), */ + /* offset=0x2578, mult1=0x8, size=0x8 */ + 0x00002578, 0x00000008, 0x00080000, + /* USTORM_TOE_GRQ_PROD_OFFSET(pf_id), */ + /* offset=0x24f8, mult1=0x8, size=0x8 */ + 0x000024f8, 0x00000008, 0x00080000, + /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id), */ + /* offset=0x280, mult1=0x8, size=0x8 */ + 0x00000280, 0x00000008, 0x00080000, + /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id,bdq_id), */ + /* offset=0x680, mult1=0x18, mult2=0x8, size=0x8 */ + 0x00000680, 0x00080018, 0x00080000, + /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id,bdq_id), */ + /* offset=0xb78, mult1=0x18, mult2=0x8, size=0x2 */ + 0x00000b78, 0x00080018, 0x00020000, + /* TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */ + /* offset=0xc648, mult1=0x50, size=0x3c */ + 0x0000c648, 0x00000050, 0x003c0000, + /* MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */ + /* offset=0x12038, mult1=0x18, size=0x10 */ + 0x00012038, 0x00000018, 0x00100000, + /* USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */ + /* offset=0x11b00, mult1=0x40, size=0x18 */ + 0x00011b00, 0x00000040, 0x00180000, + /* XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */ + /* offset=0x95d0, mult1=0x50, size=0x20 */ + 0x000095d0, 0x00000050, 0x00200000, + /* YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */ + /* offset=0x8b10, mult1=0x40, size=0x28 */ + 0x00008b10, 0x00000040, 0x00280000, + /* PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */ + /* offset=0x11640, mult1=0x18, size=0x10 */ + 0x00011640, 0x00000018, 0x00100000, + /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id), */ + /* offset=0xc830, mult1=0x48, size=0x38 */ + 0x0000c830, 0x00000048, 0x00380000, + /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id), */ + /* offset=0x11710, mult1=0x20, size=0x20 */ + 0x00011710, 0x00000020, 0x00200000, + /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id), */ + /* offset=0x4650, mult1=0x80, size=0x10 */ + 0x00004650, 0x00000080, 0x00100000, + /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id), */ + /* offset=0x3618, mult1=0x10, size=0x10 */ + 0x00003618, 0x00000010, 0x00100000, + /* XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0xa968, mult1=0x8, size=0x1 */ + 0x0000a968, 0x00000008, 0x00010000, + /* YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0x97a0, mult1=0x8, size=0x1 */ + 0x000097a0, 0x00000008, 0x00010000, + /* PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0x11990, mult1=0x8, size=0x1 */ + 0x00011990, 0x00000008, 0x00010000, + /* TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0xf020, mult1=0x8, size=0x1 */ + 0x0000f020, 0x00000008, 0x00010000, + /* MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0x12628, mult1=0x8, size=0x1 */ + 0x00012628, 0x00000008, 0x00010000, + /* USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */ + /* offset=0x11da8, mult1=0x8, size=0x1 */ + 0x00011da8, 0x00000008, 0x00010000, + /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id), */ + /* offset=0xaa78, mult1=0x30, size=0x10 */ + 0x0000aa78, 0x00000030, 0x00100000, + /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id), */ + /* offset=0xd770, mult1=0x28, size=0x28 */ + 0x0000d770, 0x00000028, 0x00280000, + /* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id), */ + /* offset=0x9a58, mult1=0x18, size=0x18 */ + 0x00009a58, 0x00000018, 0x00180000, + /* YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id), */ + /* offset=0x9bd8, mult1=0x8, size=0x8 */ + 0x00009bd8, 0x00000008, 0x00080000, + /* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id), */ + /* offset=0x13a18, mult1=0x8, size=0x8 */ + 0x00013a18, 0x00000008, 0x00080000, + /* USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id), */ + /* offset=0x126e8, mult1=0x18, size=0x18 */ + 0x000126e8, 0x00000018, 0x00180000, + /* TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_OFFSET(port_num_id,taskpool_index), */ + /* offset=0xe610, mult1=0x288, mult2=0x50, size=0x10 */ + 0x0000e610, 0x00500288, 0x00100000, + /* USTORM_NVMF_PORT_COUNTERS_OFFSET(port_num_id), */ + /* offset=0x12970, mult1=0x138, size=0x28 */ + 0x00012970, 0x00000138, 0x00280000, +}; +/* Data size: 828 bytes */ + + +#endif /* __IRO_VALUES_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c new file mode 100644 index 000000000..b20d83762 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.c @@ -0,0 +1,2388 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" + +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_hsi_eth.h" +#include "ecore_chain.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_l2.h" +#include "ecore_sp_commands.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_hw.h" +#include "ecore_vf.h" +#include "ecore_sriov.h" +#include "ecore_mcp.h" + +#define ECORE_MAX_SGES_NUM 16 +#define CRC32_POLY 0x1edc6f41 + +struct ecore_l2_info { + u32 queues; + unsigned long **pp_qid_usage; + + /* The lock is meant to synchronize access to the qid usage */ + osal_mutex_t lock; +}; + +enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_l2_info *p_l2_info; + unsigned long **pp_qids; + u32 i; + + if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) + return ECORE_SUCCESS; + + p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); + if (!p_l2_info) + return ECORE_NOMEM; + p_hwfn->p_l2_info = p_l2_info; + + if (IS_PF(p_hwfn->p_dev)) { + p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + } else { + u8 rx = 0, tx = 0; + + ecore_vf_get_num_rxqs(p_hwfn, &rx); + ecore_vf_get_num_txqs(p_hwfn, &tx); + + p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); + } + + pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, + sizeof(unsigned long *) * + p_l2_info->queues); + if (pp_qids == OSAL_NULL) + return ECORE_NOMEM; + p_l2_info->pp_qid_usage = pp_qids; + + for (i = 0; i < p_l2_info->queues; i++) { + pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, + MAX_QUEUES_PER_QZONE / 8); + if (pp_qids[i] == OSAL_NULL) + return ECORE_NOMEM; + } + +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) + return ECORE_NOMEM; +#endif + + return ECORE_SUCCESS; +} + +void ecore_l2_setup(struct ecore_hwfn *p_hwfn) +{ + if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) + return; + + OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); +} + +void ecore_l2_free(struct ecore_hwfn *p_hwfn) +{ + u32 i; + + if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) + return; + + if (p_hwfn->p_l2_info == OSAL_NULL) + return; + + if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) + goto out_l2_info; + + /* Free until hit first uninitialized entry */ + for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { + if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) + break; + OSAL_VFREE(p_hwfn->p_dev, + p_hwfn->p_l2_info->pp_qid_usage[i]); + p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; + } + +#ifdef CONFIG_ECORE_LOCK_ALLOC + /* Lock is last to initialize, if everything else was */ + if (i == p_hwfn->p_l2_info->queues) + OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); +#endif + + OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); + p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; + +out_l2_info: + OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); + p_hwfn->p_l2_info = OSAL_NULL; +} + +/* TODO - we'll need locking around these... */ +static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; + u16 queue_id = p_cid->rel.queue_id; + bool b_rc = true; + u8 first; + + OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); + + if (queue_id > p_l2_info->queues) { + DP_NOTICE(p_hwfn, true, + "Requested to increase usage for qzone %04x out of %08x\n", + queue_id, p_l2_info->queues); + b_rc = false; + goto out; + } + + first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], + MAX_QUEUES_PER_QZONE); + if (first >= MAX_QUEUES_PER_QZONE) { + b_rc = false; + goto out; + } + + OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); + p_cid->qid_usage_idx = first; + +out: + OSAL_MUTEX_RELEASE(&p_l2_info->lock); + return b_rc; +} + +static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); + + OSAL_CLEAR_BIT(p_cid->qid_usage_idx, + p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); + + OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); +} + +void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + bool b_legacy_vf = !!(p_cid->vf_legacy & + ECORE_QCID_LEGACY_VF_CID); + + /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. + * For legacy vf-queues, the CID doesn't go through here. + */ + if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) + _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); + + /* VFs maintain the index inside queue-zone on their own */ + if (p_cid->vfid == ECORE_QUEUE_CID_PF) + ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); + + OSAL_VFREE(p_hwfn->p_dev, p_cid); +} + +/* The internal is only meant to be directly called by PFs initializeing CIDs + * for their VFs. + */ +static struct ecore_queue_cid * +_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, u32 cid, + struct ecore_queue_start_common_params *p_params, + bool b_is_rx, + struct ecore_queue_cid_vf_params *p_vf_params) +{ + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc; + + p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); + if (p_cid == OSAL_NULL) + return OSAL_NULL; + + p_cid->opaque_fid = opaque_fid; + p_cid->cid = cid; + p_cid->p_owner = p_hwfn; + + /* Fill in parameters */ + p_cid->rel.vport_id = p_params->vport_id; + p_cid->rel.queue_id = p_params->queue_id; + p_cid->rel.stats_id = p_params->stats_id; + p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->b_is_rx = b_is_rx; + p_cid->sb_idx = p_params->sb_idx; + + /* Fill-in bits related to VFs' queues if information was provided */ + if (p_vf_params != OSAL_NULL) { + p_cid->vfid = p_vf_params->vfid; + p_cid->vf_qid = p_vf_params->vf_qid; + p_cid->vf_legacy = p_vf_params->vf_legacy; + } else { + p_cid->vfid = ECORE_QUEUE_CID_PF; + } + + /* Don't try calculating the absolute indices for VFs */ + if (IS_VF(p_hwfn->p_dev)) { + p_cid->abs = p_cid->rel; + + goto out; + } + + /* Calculate the engine-absolute indices of the resources. + * This would guarantee they're valid later on. + * In some cases [SBs] we already have the right values. + */ + rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); + if (rc != ECORE_SUCCESS) + goto fail; + + rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, + &p_cid->abs.queue_id); + if (rc != ECORE_SUCCESS) + goto fail; + + /* In case of a PF configuring its VF's queues, the stats-id is already + * absolute [since there's a single index that's suitable per-VF]. + */ + if (p_cid->vfid == ECORE_QUEUE_CID_PF) { + rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, + &p_cid->abs.stats_id); + if (rc != ECORE_SUCCESS) + goto fail; + } else { + p_cid->abs.stats_id = p_cid->rel.stats_id; + } + +out: + /* VF-images have provided the qid_usage_idx on their own. + * Otherwise, we need to allocate a unique one. + */ + if (!p_vf_params) { + if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) + goto fail; + } else { + p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", + p_cid->opaque_fid, p_cid->cid, + p_cid->rel.vport_id, p_cid->abs.vport_id, + p_cid->rel.queue_id, p_cid->qid_usage_idx, + p_cid->abs.queue_id, + p_cid->rel.stats_id, p_cid->abs.stats_id, + p_cid->sb_igu_id, p_cid->sb_idx); + + return p_cid; + +fail: + OSAL_VFREE(p_hwfn->p_dev, p_cid); + return OSAL_NULL; +} + +struct ecore_queue_cid * +ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + bool b_is_rx, + struct ecore_queue_cid_vf_params *p_vf_params) +{ + struct ecore_queue_cid *p_cid; + u8 vfid = ECORE_CXT_PF_CID; + bool b_legacy_vf = false; + u32 cid = 0; + + /* In case of legacy VFs, The CID can be derived from the additional + * VF parameters - the VF assumes queue X uses CID X, so we can simply + * use the vf_qid for this purpose as well. + */ + if (p_vf_params) { + vfid = p_vf_params->vfid; + + if (p_vf_params->vf_legacy & + ECORE_QCID_LEGACY_VF_CID) { + b_legacy_vf = true; + cid = p_vf_params->vf_qid; + } + } + + /* Get a unique firmware CID for this queue, in case it's a PF. + * VF's don't need a CID as the queue configuration will be done + * by PF. + */ + if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { + if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &cid, vfid) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); + return OSAL_NULL; + } + } + + p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, + p_params, b_is_rx, p_vf_params); + if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) + _ecore_cxt_release_cid(p_hwfn, cid, vfid); + + return p_cid; +} + +static struct ecore_queue_cid * +ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, + bool b_is_rx, + struct ecore_queue_start_common_params *p_params) +{ + return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, + OSAL_NULL); +} + +enum _ecore_status_t +ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params) +{ + struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + struct eth_vport_tpa_param *p_tpa; + u16 rx_mode = 0, tx_err = 0; + u8 abs_vport_id = 0; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vport_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); + p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; + p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged; + p_ramrod->zero_placement_offset = p_params->zero_placement_offset; + + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); + + p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); + + /* Handle requests for strict behavior on transmission errors */ + SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, + p_params->b_err_illegal_vlan_mode ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, + p_params->b_err_small_pkt ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, + p_params->b_err_anti_spoof ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, + p_params->b_err_illegal_inband_mode ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, + p_params->b_err_vlan_insert_with_inband ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, + p_params->b_err_big_pkt ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, + p_params->b_err_ctrl_frame ? + ETH_TX_ERR_ASSERT_MALICIOUS : 0); + p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); + + /* TPA related fields */ + p_tpa = &p_ramrod->tpa_param; + OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param)); + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; + + switch (p_params->tpa_mode) { + case ECORE_TPA_MODE_GRO: + p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + p_tpa->tpa_max_size = (u16)-1; + p_tpa->tpa_min_size_to_cont = p_params->mtu / 2; + p_tpa->tpa_min_size_to_start = p_params->mtu / 2; + p_tpa->tpa_ipv4_en_flg = 1; + p_tpa->tpa_ipv6_en_flg = 1; + p_tpa->tpa_ipv4_tunn_en_flg = 1; + p_tpa->tpa_ipv6_tunn_en_flg = 1; + p_tpa->tpa_pkt_split_flg = 1; + p_tpa->tpa_gro_consistent_flg = 1; + break; + default: + break; + } + + p_ramrod->tx_switching_en = p_params->tx_switching; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + p_ramrod->tx_switching_en = 0; +#endif + + p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; + p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; + + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ + p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t +ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params) +{ + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, + p_params->mtu, + p_params->remove_inner_vlan, + p_params->tpa_mode, + p_params->max_buffers_per_cqe, + p_params->only_untagged); + + return ecore_sp_eth_vport_start(p_hwfn, p_params); +} + +static enum _ecore_status_t +ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_rss_params *p_rss) +{ + struct eth_vport_rss_config *p_config; + u16 capabilities = 0; + int i, table_size; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (!p_rss) { + p_ramrod->common.update_rss_flg = 0; + return rc; + } + p_config = &p_ramrod->rss_config; + + OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != + ETH_RSS_IND_TABLE_ENTRIES_NUM); + + rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; + + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED; + + p_config->capabilities = 0; + + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; + p_config->capabilities = OSAL_CPU_TO_LE16(capabilities); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", + p_ramrod->common.update_rss_flg, + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); + + table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, + 1 << p_config->tbl_size); + for (i = 0; i < table_size; i++) { + struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; + + if (!p_queue) + return ECORE_INVAL; + + p_config->indirection_table[i] = + OSAL_CPU_TO_LE16(p_queue->abs.queue_id); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "Configured RSS indirection table [%d entries]:\n", + table_size); + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + OSAL_LE16_TO_CPU(p_config->indirection_table[i]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), + OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); + } + + for (i = 0; i < 10; i++) + p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); + + return rc; +} + +static void +ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_filter_accept_flags accept_flags) +{ + p_ramrod->common.update_rx_mode_flg = + accept_flags.update_rx_mode_config; + p_ramrod->common.update_tx_mode_flg = + accept_flags.update_tx_mode_config; + +#ifndef ASIC_ONLY + /* On B0 emulation we cannot enable Tx, since this would cause writes + * to PVFC HW block which isn't implemented in emulation. + */ + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Non-Asic - prevent Tx mode in vport update\n"); + p_ramrod->common.update_tx_mode_flg = 0; + } +#endif + + /* Set Rx mode accept flags */ + if (p_ramrod->common.update_rx_mode_flg) { + u8 accept_filter = accept_flags.rx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & ECORE_ACCEPT_BCAST)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & ECORE_ACCEPT_ANY_VNI)); + + p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", + p_ramrod->common.vport_id, state); + } + + /* Set Tx mode accept flags */ + if (p_ramrod->common.update_tx_mode_flg) { + u8 accept_filter = accept_flags.tx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, + !!(accept_filter & ECORE_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, + !!(accept_filter & ECORE_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & ECORE_ACCEPT_BCAST)); + + p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", + p_ramrod->common.vport_id, state); + } +} + +static void +ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, + struct ecore_sge_tpa_params *p_params) +{ + struct eth_vport_tpa_param *p_tpa; + u16 val; + + if (!p_params) { + p_ramrod->common.update_tpa_param_flg = 0; + p_ramrod->common.update_tpa_en_flg = 0; + p_ramrod->common.update_tpa_param_flg = 0; + return; + } + + p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; + p_tpa = &p_ramrod->tpa_param; + p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; + p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; + p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; + p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; + + p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; + p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; + p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; + p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; + p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; + val = p_params->tpa_max_size; + p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val); + val = p_params->tpa_min_size_to_start; + p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val); + val = p_params->tpa_min_size_to_cont; + p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val); +} + +static void +ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, + struct ecore_sp_vport_update_params *p_params) +{ + int i; + + OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + + if (!p_params->update_approx_mcast_flg) + return; + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = p_params->bins; + + p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + } +} + +enum _ecore_status_t +ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_rss_params *p_rss_params = p_params->rss_params; + struct vport_update_ramrod_data_cmn *p_cmn; + struct ecore_sp_init_data init_data; + struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + u8 abs_vport_id = 0, val; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + if (IS_VF(p_hwfn->p_dev)) { + rc = ecore_vf_pf_vport_update(p_hwfn, p_params); + return rc; + } + + rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + /* Copy input params to ramrod according to FW struct */ + p_ramrod = &p_ent->ramrod.vport_update; + p_cmn = &p_ramrod->common; + + p_cmn->vport_id = abs_vport_id; + + p_cmn->rx_active_flg = p_params->vport_active_rx_flg; + p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; + p_cmn->tx_active_flg = p_params->vport_active_tx_flg; + p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; + + p_cmn->accept_any_vlan = p_params->accept_any_vlan; + val = p_params->update_accept_any_vlan_flg; + p_cmn->update_accept_any_vlan_flg = val; + + p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; + val = p_params->update_inner_vlan_removal_flg; + p_cmn->update_inner_vlan_removal_en_flg = val; + + p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; + val = p_params->update_default_vlan_enable_flg; + p_cmn->update_default_vlan_en_flg = val; + + p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); + p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; + + p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; + + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + if (p_ramrod->common.tx_switching_en || + p_ramrod->common.update_tx_switching_en_flg) { + DP_NOTICE(p_hwfn, false, + "FPGA - why are we seeing tx-switching? Overriding it\n"); + p_ramrod->common.tx_switching_en = 0; + p_ramrod->common.update_tx_switching_en_flg = 1; + } +#endif + p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; + + p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; + val = p_params->update_anti_spoofing_en_flg; + p_ramrod->common.update_anti_spoofing_en_flg = val; + + rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); + if (rc != ECORE_SUCCESS) { + /* Return spq entry which is taken in ecore_sp_init_request()*/ + ecore_spq_return_entry(p_hwfn, p_ent); + return rc; + } + + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + + /* Update mcast bins for VFs, PF doesn't use this functionality */ + ecore_sp_update_mcast_bin(p_ramrod, p_params); + + ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params); + if (p_params->mtu) { + p_ramrod->common.update_mtu_flg = 1; + p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu); + } + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, u8 vport_id) +{ + struct vport_stop_ramrod_data *p_ramrod; + struct ecore_sp_init_data init_data; + struct ecore_spq_entry *p_ent; + u8 abs_vport_id = 0; + enum _ecore_status_t rc; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_vport_stop(p_hwfn); + + rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vport_stop; + p_ramrod->vport_id = abs_vport_id; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +static enum _ecore_status_t +ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, + struct ecore_filter_accept_flags *p_accept_flags) +{ + struct ecore_sp_vport_update_params s_params; + + OSAL_MEMSET(&s_params, 0, sizeof(s_params)); + OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, + sizeof(struct ecore_filter_accept_flags)); + + return ecore_vf_pf_vport_update(p_hwfn, &s_params); +} + +enum _ecore_status_t +ecore_filter_accept_cmd(struct ecore_dev *p_dev, + u8 vport, + struct ecore_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_sp_vport_update_params vport_update_params; + int i, rc; + + /* Prepare and send the vport rx_mode change */ + OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = vport; + vport_update_params.accept_flags = accept_flags; + vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + vport_update_params.accept_any_vlan = accept_any_vlan; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); + if (rc != ECORE_SUCCESS) + return rc; + continue; + } + + rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Accept filter configured, flags = [Rx]%x [Tx]%x\n", + accept_flags.rx_accept_filter, + accept_flags.tx_accept_filter); + + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); + } + + return 0; +} + +enum _ecore_status_t +ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size) +{ + struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", + p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, + p_cid->abs.vport_id, p_cid->sb_igu_id); + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_start; + + p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; + + p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); + DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); + + p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); + + if (p_cid->vfid != ECORE_QUEUE_CID_PF) { + bool b_legacy_vf = !!(p_cid->vf_legacy & + ECORE_QCID_LEGACY_VF_RX_PROD); + + p_ramrod->vf_rx_prod_index = p_cid->vf_qid; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Queue%s is meant for VF rxq[%02x]\n", + b_legacy_vf ? " [legacy]" : "", + p_cid->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; + } + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +static enum _ecore_status_t +ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * *pp_prod) +{ + u32 init_prod_val = 0; + + *pp_prod = (u8 OSAL_IOMEM *) + p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + + return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size); +} + +enum _ecore_status_t +ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct ecore_rxq_start_ret_params *p_ret_params) +{ + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc; + + /* Allocate a CID for the queue */ + p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); + if (p_cid == OSAL_NULL) + return ECORE_NOMEM; + + if (IS_PF(p_hwfn->p_dev)) + rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, + &p_ret_params->p_prod); + else + rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size, + &p_ret_params->p_prod); + + /* Provide the caller with a reference to as handler */ + if (rc != ECORE_SUCCESS) + ecore_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; + + return rc; +} + +enum _ecore_status_t +ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, + void **pp_rxq_handles, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc = ECORE_NOTIMPL; + u8 i; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_rxqs_update(p_hwfn, + (struct ecore_queue_cid **) + pp_rxq_handles, + num_rxqs, + complete_cqe_flg, + complete_event_flg); + + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + for (i = 0; i < num_rxqs; i++) { + p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; + + /* Get SPQ entry */ + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_update; + p_ramrod->vport_id = p_cid->abs.vport_id; + + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); + p_ramrod->complete_cqe_flg = complete_cqe_flg; + p_ramrod->complete_event_flg = complete_event_flg; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + return rc; + } + + return rc; +} + +static enum _ecore_status_t +ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + bool b_eq_completion_only, + bool b_cqe_completion) +{ + struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc; + + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_stop; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); + + /* Cleaning the queue requires the completion to arrive there. + * In addition, VFs require the answer to come as eqe to PF. + */ + p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && + !b_eq_completion_only) || + b_cqe_completion; + p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || + b_eq_completion_only; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, + bool cqe_completion) +{ + struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + if (IS_PF(p_hwfn->p_dev)) + rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, + eq_completion_only, + cqe_completion); + else + rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); + + if (rc == ECORE_SUCCESS) + ecore_eth_queue_cid_release(p_hwfn, p_cid); + return rc; +} + +enum _ecore_status_t +ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, + u16 pq_id) +{ + struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = p_cid->abs.vport_id; + + p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + + p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); + p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); + + p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); + + p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +static enum _ecore_status_t +ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u8 tc, + dma_addr_t pbl_addr, u16 pbl_size, + void OSAL_IOMEM * *pp_doorbell) +{ + enum _ecore_status_t rc; + u16 pq_id; + + /* TODO - set tc in the pq_params for multi-cos. + * If pacing is enabled then select queue according to + * rate limiter availability otherwise select queue based + * on multi cos. + */ + if (IS_ECORE_PACING(p_hwfn)) + pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id); + else + pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc); + + rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, + pbl_size, pq_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Provide the caller with the necessary return values */ + *pp_doorbell = (u8 OSAL_IOMEM *) + p_hwfn->doorbells + + DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, u16 pbl_size, + struct ecore_txq_start_ret_params *p_ret_params) +{ + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc; + + p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); + if (p_cid == OSAL_NULL) + return ECORE_INVAL; + + if (IS_PF(p_hwfn->p_dev)) + rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + else + rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + + if (rc != ECORE_SUCCESS) + ecore_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; + + return rc; +} + +static enum _ecore_status_t +ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc; + + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_handle) +{ + struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; + enum _ecore_status_t rc; + + if (IS_PF(p_hwfn->p_dev)) + rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); + else + rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); + + if (rc == ECORE_SUCCESS) + ecore_eth_queue_cid_release(p_hwfn, p_cid); + return rc; +} + +static enum eth_filter_action +ecore_filter_action(enum ecore_filter_opcode opcode) +{ + enum eth_filter_action action = MAX_ETH_FILTER_ACTION; + + switch (opcode) { + case ECORE_FILTER_ADD: + action = ETH_FILTER_ACTION_ADD; + break; + case ECORE_FILTER_REMOVE: + action = ETH_FILTER_ACTION_REMOVE; + break; + case ECORE_FILTER_FLUSH: + action = ETH_FILTER_ACTION_REMOVE_ALL; + break; + default: + action = MAX_ETH_FILTER_ACTION; + } + + return action; +} + +static enum _ecore_status_t +ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + struct vport_filter_update_ramrod_data **pp_ramrod, + struct ecore_spq_entry **pp_ent, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + u8 vport_to_add_to = 0, vport_to_remove_from = 0; + struct vport_filter_update_ramrod_data *p_ramrod; + struct eth_filter_cmd *p_first_filter; + struct eth_filter_cmd *p_second_filter; + struct ecore_sp_init_data init_data; + enum eth_filter_action action; + enum _ecore_status_t rc; + + rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &vport_to_remove_from); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &vport_to_add_to); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, pp_ent, + ETH_RAMROD_FILTERS_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; + p_ramrod = *pp_ramrod; + p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; + p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Non-Asic - prevent Tx filters\n"); + p_ramrod->filter_cmd_hdr.tx = 0; + } +#endif + + switch (p_filter_cmd->opcode) { + case ECORE_FILTER_REPLACE: + case ECORE_FILTER_MOVE: + p_ramrod->filter_cmd_hdr.cmd_cnt = 2; + break; + default: + p_ramrod->filter_cmd_hdr.cmd_cnt = 1; + break; + } + + p_first_filter = &p_ramrod->filter_cmds[0]; + p_second_filter = &p_ramrod->filter_cmds[1]; + + switch (p_filter_cmd->type) { + case ECORE_FILTER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_MAC; + break; + case ECORE_FILTER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_VLAN; + break; + case ECORE_FILTER_MAC_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_PAIR; + break; + case ECORE_FILTER_INNER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; + break; + case ECORE_FILTER_INNER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; + break; + case ECORE_FILTER_INNER_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; + break; + case ECORE_FILTER_INNER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; + break; + case ECORE_FILTER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; + break; + case ECORE_FILTER_VNI: + p_first_filter->type = ETH_FILTER_TYPE_VNI; + break; + case ECORE_FILTER_UNUSED: /* @DPDK */ + p_first_filter->type = MAX_ETH_FILTER_TYPE; + break; + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) + ecore_set_fw_mac_addr(&p_first_filter->mac_msb, + &p_first_filter->mac_mid, + &p_first_filter->mac_lsb, + (u8 *)p_filter_cmd->mac); + + if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) + p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); + + if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_VNI)) + p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); + + if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; + + p_first_filter->action = ETH_FILTER_ACTION_REMOVE; + + p_first_filter->vport_id = vport_to_remove_from; + + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + OSAL_MEMCPY(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; + } else { + action = ecore_filter_action(p_filter_cmd->opcode); + + if (action == MAX_ETH_FILTER_ACTION) { + DP_NOTICE(p_hwfn, true, + "%d is not supported yet\n", + p_filter_cmd->opcode); + return ECORE_NOTIMPL; + } + + p_first_filter->action = action; + p_first_filter->vport_id = + (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? + vport_to_remove_from : vport_to_add_to; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct eth_filter_cmd_header *p_header; + enum _ecore_status_t rc; + + rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, + &p_ramrod, &p_ent, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); + return rc; + } + p_header = &p_ramrod->filter_cmd_hdr; + p_header->assert_on_error = p_filter_cmd->assert_on_error; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", + (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : + ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? + "REMOVE" : + ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? + "MOVE" : "REPLACE")), + (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : + ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? + "VLAN" : "MAC & VLAN"), + p_ramrod->filter_cmd_hdr.cmd_cnt, + p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", + p_filter_cmd->vport_to_add_to, + p_filter_cmd->vport_to_remove_from, + p_filter_cmd->mac[0], p_filter_cmd->mac[1], + p_filter_cmd->mac[2], p_filter_cmd->mac[3], + p_filter_cmd->mac[4], p_filter_cmd->mac[5], + p_filter_cmd->vlan); + + return ECORE_SUCCESS; +} + +/******************************************************************************* + * Description: + * Calculates crc 32 on a buffer + * Note: crc32_length MUST be aligned to 8 + * Return: + ******************************************************************************/ +static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed) +{ + u32 byte = 0, bit = 0, crc32_result = crc32_seed; + u8 msb = 0, current_byte = 0; + + if ((crc32_packet == OSAL_NULL) || + (crc32_length == 0) || ((crc32_length % 8) != 0)) { + return crc32_result; + } + + for (byte = 0; byte < crc32_length; byte++) { + current_byte = crc32_packet[byte]; + for (bit = 0; bit < 8; bit++) { + msb = (u8)(crc32_result >> 31); + crc32_result = crc32_result << 1; + if (msb != (0x1 & (current_byte >> bit))) { + crc32_result = crc32_result ^ CRC32_POLY; + crc32_result |= 1; + } + } + } + + return crc32_result; +} + +static u32 ecore_crc32c_le(u32 seed, u8 *mac) +{ + u32 packet_buf[2] = { 0 }; + + OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); + return ecore_calc_crc32c((u8 *)packet_buf, 8, seed); +} + +u8 ecore_mcast_bin_from_mac(u8 *mac) +{ + u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac); + + return crc & 0xff; +} + +static enum _ecore_status_t +ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + u8 abs_vport_id = 0; + enum _ecore_status_t rc; + int i; + + if (p_filter_cmd->opcode == ECORE_FILTER_ADD) + rc = ecore_fw_vport(p_hwfn, + p_filter_cmd->vport_to_add_to, + &abs_vport_id); + else + rc = ecore_fw_vport(p_hwfn, + p_filter_cmd->vport_to_remove_from, + &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.vport_update; + p_ramrod->common.update_approx_mcast_flg = 1; + + /* explicitly clear out the entire vector */ + OSAL_MEMSET(&p_ramrod->approx_mcast.bins, + 0, sizeof(p_ramrod->approx_mcast.bins)); + OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport. + */ + if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); + bins[bit / 32] |= 1 << (bit % 32); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; + p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); + } + } + + p_ramrod->common.vport_id = abs_vport_id; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_filter_mcast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + /* only ADD and REMOVE operations are supported for multi-cast */ + if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && + (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || + (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { + return ECORE_INVAL; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); + continue; + } + + rc = ecore_sp_eth_filter_mcast(p_hwfn, + p_filter_cmd, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +enum _ecore_status_t +ecore_filter_ucast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + u16 opaque_fid; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); + continue; + } + + opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_eth_filter_ucast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +/* Statistics related code */ +static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.pstats.address; + *p_len = p_resp->pfdev_info.stats_info.pstats.len; + } +} + +static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_pstorm_per_queue_stat pstats; + u32 pstats_addr = 0, pstats_len = 0; + + __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, + statistics_bin); + + OSAL_MEMSET(&pstats, 0, sizeof(pstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); + + p_stats->common.tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->common.tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->common.tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->common.tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->common.tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->common.tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->common.tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); +} + +static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats) +{ + struct tstorm_per_port_stat tstats; + u32 tstats_addr, tstats_len; + + if (IS_PF(p_hwfn->p_dev)) { + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + tstats_len = sizeof(struct tstorm_per_port_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; + tstats_len = p_resp->pfdev_info.stats_info.tstats.len; + } + + OSAL_MEMSET(&tstats, 0, sizeof(tstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); + + p_stats->common.mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->common.mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.gft_filter_drop += + HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); +} + +static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.ustats.address; + *p_len = p_resp->pfdev_info.stats_info.ustats.len; + } +} + +static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_ustorm_per_queue_stat ustats; + u32 ustats_addr = 0, ustats_len = 0; + + __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, + statistics_bin); + + OSAL_MEMSET(&ustats, 0, sizeof(ustats)); + ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); + + p_stats->common.rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->common.rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->common.rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->common.rx_ucast_pkts += + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->common.rx_mcast_pkts += + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->common.rx_bcast_pkts += + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.mstats.address; + *p_len = p_resp->pfdev_info.stats_info.mstats.len; + } +} + +static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_mstorm_per_queue_stat mstats; + u32 mstats_addr = 0, mstats_len = 0; + + __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, + statistics_bin); + + OSAL_MEMSET(&mstats, 0, sizeof(mstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); + + p_stats->common.no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->common.packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->common.ttl0_discard += + HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->common.tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->common.tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->common.tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->common.tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); +} + +static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats) +{ + struct ecore_eth_stats_common *p_common = &p_stats->common; + struct port_stats port_stats; + int j; + + OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); + + ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, stats), + sizeof(port_stats)); + + p_common->rx_64_byte_packets += port_stats.eth.r64; + p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_common->rx_crc_errors += port_stats.eth.rfcs; + p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_common->rx_pause_frames += port_stats.eth.rxpf; + p_common->rx_pfc_frames += port_stats.eth.rxpp; + p_common->rx_align_errors += port_stats.eth.raln; + p_common->rx_carrier_errors += port_stats.eth.rfcr; + p_common->rx_oversize_packets += port_stats.eth.rovr; + p_common->rx_jabbers += port_stats.eth.rjbr; + p_common->rx_undersize_packets += port_stats.eth.rund; + p_common->rx_fragments += port_stats.eth.rfrg; + p_common->tx_64_byte_packets += port_stats.eth.t64; + p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_common->tx_pause_frames += port_stats.eth.txpf; + p_common->tx_pfc_frames += port_stats.eth.txpp; + p_common->rx_mac_bytes += port_stats.eth.rbyte; + p_common->rx_mac_uc_packets += port_stats.eth.rxuca; + p_common->rx_mac_mc_packets += port_stats.eth.rxmca; + p_common->rx_mac_bc_packets += port_stats.eth.rxbca; + p_common->rx_mac_frames_ok += port_stats.eth.rxpok; + p_common->tx_mac_bytes += port_stats.eth.tbyte; + p_common->tx_mac_uc_packets += port_stats.eth.txuca; + p_common->tx_mac_mc_packets += port_stats.eth.txmca; + p_common->tx_mac_bc_packets += port_stats.eth.txbca; + p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; + for (j = 0; j < 8; j++) { + p_common->brb_truncates += port_stats.brb.brb_truncate[j]; + p_common->brb_discards += port_stats.brb.brb_discard[j]; + } + + if (ECORE_IS_BB(p_hwfn->p_dev)) { + struct ecore_eth_stats_bb *p_bb = &p_stats->bb; + + p_bb->rx_1519_to_1522_byte_packets += + port_stats.eth.u0.bb0.r1522; + p_bb->rx_1519_to_2047_byte_packets += + port_stats.eth.u0.bb0.r2047; + p_bb->rx_2048_to_4095_byte_packets += + port_stats.eth.u0.bb0.r4095; + p_bb->rx_4096_to_9216_byte_packets += + port_stats.eth.u0.bb0.r9216; + p_bb->rx_9217_to_16383_byte_packets += + port_stats.eth.u0.bb0.r16383; + p_bb->tx_1519_to_2047_byte_packets += + port_stats.eth.u1.bb1.t2047; + p_bb->tx_2048_to_4095_byte_packets += + port_stats.eth.u1.bb1.t4095; + p_bb->tx_4096_to_9216_byte_packets += + port_stats.eth.u1.bb1.t9216; + p_bb->tx_9217_to_16383_byte_packets += + port_stats.eth.u1.bb1.t16383; + p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; + p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; + } else { + struct ecore_eth_stats_ah *p_ah = &p_stats->ah; + + p_ah->rx_1519_to_max_byte_packets += + port_stats.eth.u0.ah0.r1519_to_max; + p_ah->tx_1519_to_max_byte_packets = + port_stats.eth.u1.ah1.t1519_to_max; + } + + p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_change_count)); +} + +void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *stats, + u16 statistics_bin, bool b_get_port_stats) +{ + __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); + __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); + __ecore_get_vport_tstats(p_hwfn, p_ptt, stats); + __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); + +#ifndef ASIC_ONLY + /* Avoid getting PORT stats for emulation. */ + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + return; +#endif + + if (b_get_port_stats && p_hwfn->mcp_info) + __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); +} + +static void _ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats) +{ + u8 fw_vport = 0; + int i; + + OSAL_MEMSET(stats, 0, sizeof(*stats)); + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_ptt *p_ptt = IS_PF(p_dev) ? + ecore_ptt_acquire(p_hwfn) : OSAL_NULL; + bool b_get_port_stats; + + if (IS_PF(p_dev)) { + /* The main vport index is relative first */ + if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + goto out; + } + } + + if (IS_PF(p_dev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn); + __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, + b_get_port_stats); + +out: + if (IS_PF(p_dev) && p_ptt) + ecore_ptt_release(p_hwfn, p_ptt); + } +} + +void ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats) +{ + u32 i; + + if (!p_dev) { + OSAL_MEMSET(stats, 0, sizeof(*stats)); + return; + } + + _ecore_get_vport_stats(p_dev, stats); + + if (!p_dev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void ecore_reset_vport_stats(struct ecore_dev *p_dev) +{ + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct ecore_ptt *p_ptt = IS_PF(p_dev) ? + ecore_ptt_acquire(p_hwfn) : OSAL_NULL; + u32 addr = 0, len = 0; + + if (IS_PF(p_dev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + OSAL_MEMSET(&mstats, 0, sizeof(mstats)); + __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); + + OSAL_MEMSET(&ustats, 0, sizeof(ustats)); + __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); + + OSAL_MEMSET(&pstats, 0, sizeof(pstats)); + __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); + + if (IS_PF(p_dev)) + ecore_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. + */ + if (!p_dev->reset_stats) + DP_INFO(p_dev, "Reset stats not allocated\n"); + else { + _ecore_get_vport_stats(p_dev, p_dev->reset_stats); + p_dev->reset_stats->common.link_change_count = 0; + } +} + +static enum gft_profile_type +ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode) +{ + if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE) + return GFT_PROFILE_TYPE_4_TUPLE; + + if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST) + return GFT_PROFILE_TYPE_IP_DST_ADDR; + + if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE) + return GFT_PROFILE_TYPE_TUNNEL_TYPE; + + if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC) + return GFT_PROFILE_TYPE_IP_SRC_ADDR; + + return GFT_PROFILE_TYPE_L4_DST_PORT; +} + +void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_arfs_config_params *p_cfg_params) +{ + if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits)) + return; + + if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) { + ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, + p_cfg_params->udp, + p_cfg_params->ipv4, + p_cfg_params->ipv6, + ecore_arfs_mode_to_hsi(p_cfg_params->mode)); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", + p_cfg_params->tcp ? "Enable" : "Disable", + p_cfg_params->udp ? "Enable" : "Disable", + p_cfg_params->ipv4 ? "Enable" : "Disable", + p_cfg_params->ipv6 ? "Enable" : "Disable"); + } else { + ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n", + (int)p_cfg_params->mode); +} + +enum _ecore_status_t +ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, + struct ecore_spq_comp_cb *p_cb, + struct ecore_ntuple_filter_params *p_params) +{ + struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (p_cb) { + init_data.comp_mode = ECORE_SPQ_MODE_CB; + init_data.p_comp_data = p_cb; + } else { + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + } + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_GFT_UPDATE_FILTER, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_update_gft; + + DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); + p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(p_params->length); + + if (p_params->b_is_drop) { + p_ramrod->vport_id = OSAL_CPU_TO_LE16(ETH_GFT_TRASHCAN_VPORT); + } else { + rc = ecore_fw_vport(p_hwfn, p_params->vport_id, + &abs_vport_id); + if (rc) + return rc; + + if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) { + rc = ecore_fw_l2_queue(p_hwfn, p_params->qid, + &abs_rx_q_id); + if (rc) + return rc; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id); + } + + p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id); + } + + p_ramrod->flow_id_valid = 0; + p_ramrod->flow_id = 0; + + p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER + : GFT_DELETE_FILTER; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n", + abs_vport_id, abs_rx_q_id, + p_params->b_is_add ? "Adding" : "Removing", + (unsigned long)p_params->addr, p_params->length); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + enum _ecore_status_t rc; + + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(osal_uintptr_t)&sb_entry, 2, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = ecore_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return ECORE_INVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + enum _ecore_status_t rc; + + rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(osal_uintptr_t)&sb_entry, 2, + OSAL_NULL /* default parameters */); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = ecore_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return ECORE_INVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal, + void *handle) +{ + struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_ptt *p_ptt; + + if (IS_VF(p_hwfn->p_dev)) { + rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Unable to read queue calescing\n"); + + return rc; + } + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + if (p_cid->b_is_rx) { + rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc != ECORE_SUCCESS) + goto out; + } else { + rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc != ECORE_SUCCESS) + goto out; + } + +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t +ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, u32 rate) +{ + u16 rl_id; + u8 vport; + + vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id); + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "About to rate limit qm vport %d for queue %d with rate %d\n", + vport, p_cid->rel.queue_id, rate); + + rl_id = vport; /* The "rl_id" is set as the "vport_id" */ + return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, rate); +} + +#define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100 +#define RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US 1 + +enum _ecore_status_t +ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + u8 vport_id, + u8 ind_table_index, + u16 ind_table_value) +{ + struct eth_tstorm_rss_update_data update_data = { 0 }; + void OSAL_IOMEM *addr = OSAL_NULL; + enum _ecore_status_t rc; + u8 abs_vport_id; + u32 cnt = 0; + + OSAL_BUILD_BUG_ON(sizeof(update_data) != sizeof(u64)); + + rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + addr = (u8 *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id); + + *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr); + + for (cnt = 0; update_data.valid && + cnt < RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT; cnt++) { + OSAL_UDELAY(RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US); + *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr); + } + + if (update_data.valid) { + DP_NOTICE(p_hwfn, true, + "rss update valid status is not clear! valid=0x%x vport id=%d ind_Table_idx=%d ind_table_value=%d.\n", + update_data.valid, vport_id, ind_table_index, + ind_table_value); + + return ECORE_AGAIN; + } + + update_data.valid = 1; + update_data.ind_table_index = ind_table_index; + update_data.ind_table_value = ind_table_value; + update_data.vport_id = abs_vport_id; + + DIRECT_REG_WR64(p_hwfn, addr, *(u64 *)(&update_data)); + + return ECORE_SUCCESS; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h new file mode 100644 index 000000000..8fa403029 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_L2_H__ +#define __ECORE_L2_H__ + + +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_spq.h" +#include "ecore_l2_api.h" + +#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) +#define ECORE_QUEUE_CID_PF (0xff) + +/* Almost identical to the ecore_queue_start_common_params, + * but here we maintain the SB index in IGU CAM. + */ +struct ecore_queue_cid_params { + u8 vport_id; + u16 queue_id; + u8 stats_id; +}; + + /* Additional parameters required for initialization of the queue_cid + * and are relevant only for a PF initializing one for its VFs. + */ +struct ecore_queue_cid_vf_params { + /* Should match the VF's relative index */ + u8 vfid; + + /* 0-based queue index. Should reflect the relative qzone the + * VF thinks is associated with it [in its range]. + */ + u8 vf_qid; + + /* Indicates a VF is legacy, making it differ in several things: + * - Producers would be placed in a different place. + * - Makes assumptions regarding the CIDs. + */ + u8 vf_legacy; + + /* For VFs, this index arrives via TLV to diffrentiate between + * different queues opened on the same qzone, and is passed + * [where the PF would have allocated it internally for its own]. + */ + u8 qid_usage_idx; +}; + +struct ecore_queue_cid { + /* For stats-id, the `rel' is actually absolute as well */ + struct ecore_queue_cid_params rel; + struct ecore_queue_cid_params abs; + + /* These have no 'relative' meaning */ + u16 sb_igu_id; + u8 sb_idx; + + u32 cid; + u16 opaque_fid; + + bool b_is_rx; + + /* VFs queues are mapped differently, so we need to know the + * relative queue associated with them [0-based]. + * Notice this is relevant on the *PF* queue-cid of its VF's queues, + * and not on the VF itself. + */ + u8 vfid; + u8 vf_qid; + + /* We need an additional index to diffrentiate between queues opened + * for same queue-zone, as VFs would have to communicate the info + * to the PF [otherwise PF has no way to diffrentiate]. + */ + u8 qid_usage_idx; + + /* Legacy VFs might have Rx producer located elsewhere */ + u8 vf_legacy; +#define ECORE_QCID_LEGACY_VF_RX_PROD (1 << 0) +#define ECORE_QCID_LEGACY_VF_CID (1 << 1) + + struct ecore_hwfn *p_owner; +}; + +enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn); +void ecore_l2_setup(struct ecore_hwfn *p_hwfn); +void ecore_l2_free(struct ecore_hwfn *p_hwfn); + +void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid); + +struct ecore_queue_cid * +ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + bool b_is_rx, + struct ecore_queue_cid_vf_params *p_vf_params); + +enum _ecore_status_t +ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params); + +/** + * @brief - Starts an Rx queue, when queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param bd_max_bytes + * @param bd_chain_phys_addr + * @param cqe_pbl_addr + * @param cqe_pbl_size + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size); + +/** + * @brief - Starts a Tx queue, where queue_cid is already prepared + * + * @param p_hwfn + * @param p_cid + * @param pbl_addr + * @param pbl_size + * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, + u16 pq_id); + +u8 ecore_mcast_bin_from_mac(u8 *mac); + +enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 coalesce, + struct ecore_queue_cid *p_cid); + +enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 coalesce, + struct ecore_queue_cid *p_cid); + +enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_hw_coal); + +enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, + u16 *p_hw_coal); + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h new file mode 100644 index 000000000..bebf412ed --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_L2_API_H__ +#define __ECORE_L2_API_H__ + +#include "ecore_status.h" +#include "ecore_sp_api.h" +#include "ecore_int_api.h" + +#ifndef __EXTRACT__LINUX__ +enum ecore_rss_caps { + ECORE_RSS_IPV4 = 0x1, + ECORE_RSS_IPV6 = 0x2, + ECORE_RSS_IPV4_TCP = 0x4, + ECORE_RSS_IPV6_TCP = 0x8, + ECORE_RSS_IPV4_UDP = 0x10, + ECORE_RSS_IPV6_UDP = 0x20, +}; + +/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ +#define ECORE_RSS_IND_TABLE_SIZE 128 +#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ +#endif + +struct ecore_queue_start_common_params { + /* Should always be relative to entity sending this. */ + u8 vport_id; + u16 queue_id; + + /* Relative, but relevant only for PFs */ + u8 stats_id; + + struct ecore_sb_info *p_sb; + u8 sb_idx; +}; + +struct ecore_rxq_start_ret_params { + void OSAL_IOMEM *p_prod; + void *p_handle; +}; + +struct ecore_txq_start_ret_params { + void OSAL_IOMEM *p_doorbell; + void *p_handle; +}; + +struct ecore_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; + u32 rss_key[ECORE_RSS_KEY_SIZE]; +}; + +struct ecore_sge_tpa_params { + u8 max_buffers_per_cqe; + + u8 update_tpa_en_flg; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + + u8 update_tpa_param_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; +}; + +enum ecore_filter_opcode { + ECORE_FILTER_ADD, + ECORE_FILTER_REMOVE, + ECORE_FILTER_MOVE, + ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + ECORE_FILTER_FLUSH, /* Removes all filters */ +}; + +enum ecore_filter_ucast_type { + ECORE_FILTER_MAC, + ECORE_FILTER_VLAN, + ECORE_FILTER_MAC_VLAN, + ECORE_FILTER_INNER_MAC, + ECORE_FILTER_INNER_VLAN, + ECORE_FILTER_INNER_PAIR, + ECORE_FILTER_INNER_MAC_VNI_PAIR, + ECORE_FILTER_MAC_VNI_PAIR, + ECORE_FILTER_VNI, + ECORE_FILTER_UNUSED, /* @DPDK */ +}; + +struct ecore_filter_ucast { + enum ecore_filter_opcode opcode; + enum ecore_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct ecore_filter_mcast { + /* MOVE is not supported for multicast */ + enum ecore_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define ECORE_MAX_MC_ADDRS 64 + unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; +}; + +struct ecore_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define ECORE_ACCEPT_NONE 0x01 +#define ECORE_ACCEPT_UCAST_MATCHED 0x02 +#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04 +#define ECORE_ACCEPT_MCAST_MATCHED 0x08 +#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10 +#define ECORE_ACCEPT_BCAST 0x20 +#define ECORE_ACCEPT_ANY_VNI 0x40 +}; + +enum ecore_filter_config_mode { + ECORE_FILTER_CONFIG_MODE_DISABLE, + ECORE_FILTER_CONFIG_MODE_5_TUPLE, + ECORE_FILTER_CONFIG_MODE_L4_PORT, + ECORE_FILTER_CONFIG_MODE_IP_DEST, + ECORE_FILTER_CONFIG_MODE_TUNN_TYPE, + ECORE_FILTER_CONFIG_MODE_IP_SRC, +}; + +struct ecore_arfs_config_params { + bool tcp; + bool udp; + bool ipv4; + bool ipv6; + enum ecore_filter_config_mode mode; +}; + +/* Add / remove / move / remove-all unicast MAC-VLAN filters. + * FW will assert in the following cases, so driver should take care...: + * 1. Adding a filter to a full table. + * 2. Adding a filter which already exists on that vport. + * 3. Removing a filter which doesn't exist. + */ + +enum _ecore_status_t +ecore_filter_ucast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/* Add / remove / move multicast MAC filters. */ +enum _ecore_status_t +ecore_filter_mcast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/* Set "accept" filters */ +enum _ecore_status_t +ecore_filter_accept_cmd( + struct ecore_dev *p_dev, + u8 vport, + struct ecore_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/** + * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod + * + * This ramrod initializes an RX Queue for a VPort. An Assert is generated if + * the VPort ID is not currently initialized. + * + * @param p_hwfn + * @param opaque_fid + * @p_params Inputs; Relative for PF [SB being an exception] + * @param bd_max_bytes Maximum bytes that can be placed on a BD + * @param bd_chain_phys_addr Physical address of BDs for receive. + * @param cqe_pbl_addr Physical address of the CQE PBL Table. + * @param cqe_pbl_size Size of the CQE PBL Table + * @param p_ret_params Pointed struct to be filled with outputs. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct ecore_rxq_start_ret_params *p_ret_params); + +/** + * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue + * + * @param p_hwfn + * @param p_rxq Handler of queue to close + * @param eq_completion_only If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @param cqe_completion If True completion will be + * receive on CQe. + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, + bool cqe_completion); + +/** + * @brief - TX Queue Start Ramrod + * + * This ramrod initializes a TX Queue for a VPort. An Assert is generated if + * the VPort is not currently initialized. + * + * @param p_hwfn + * @param opaque_fid + * @p_params + * @param tc traffic class to use with this L2 txq + * @param pbl_addr address of the pbl array + * @param pbl_size number of entries in pbl + * @param p_ret_params Pointer to fill the return parameters in. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, + struct ecore_txq_start_ret_params *p_ret_params); + +/** + * @brief ecore_eth_tx_queue_stop - closes a Tx queue + * + * @param p_hwfn + * @param p_txq - handle to Tx queue needed to be closed + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, + void *p_txq); + +enum ecore_tpa_mode { + ECORE_TPA_MODE_NONE, + ECORE_TPA_MODE_RSC, + ECORE_TPA_MODE_GRO, + ECORE_TPA_MODE_MAX +}; + +struct ecore_sp_vport_start_params { + enum ecore_tpa_mode tpa_mode; + bool remove_inner_vlan; /* Inner VLAN removal is enabled */ + bool tx_switching; /* Vport supports tx-switching */ + bool handle_ptp_pkts; /* Handle PTP packets */ + bool only_untagged; /* Untagged pkt control */ + bool drop_ttl0; /* Drop packets with TTL = 0 */ + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; /* VPORT ID */ + u16 mtu; /* VPORT MTU */ + bool zero_placement_offset; + bool check_mac; + bool check_ethtype; + + /* Strict behavior on transmission errors */ + bool b_err_illegal_vlan_mode; + bool b_err_illegal_inband_mode; + bool b_err_vlan_insert_with_inband; + bool b_err_small_pkt; + bool b_err_big_pkt; + bool b_err_anti_spoof; + bool b_err_ctrl_frame; + bool b_en_rgfs; + bool b_en_tgfs; +}; + +/** + * @brief ecore_sp_vport_start - + * + * This ramrod initializes a VPort. An Assert if generated if the Function ID + * of the VPort is not enabled. + * + * @param p_hwfn + * @param p_params VPORT start params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params); + +struct ecore_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u32 bins[8]; + struct ecore_rss_params *rss_params; + struct ecore_filter_accept_flags accept_flags; + struct ecore_sge_tpa_params *sge_tpa_params; + /* MTU change - notice this requires the vport to be disabled. + * If non-zero, value would be used. + */ + u16 mtu; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; +}; + +/** + * @brief ecore_sp_vport_update - + * + * This ramrod updates the parameters of the VPort. Every field can be updated + * independently, according to flags. + * + * This ramrod is also used to set the VPort state to active after creation. + * An Assert is generated if the VPort does not contain an RX queue. + * + * @param p_hwfn + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); +/** + * @brief ecore_sp_vport_stop - + * + * This ramrod closes a VPort after all its RX and TX queues are terminated. + * An Assert is generated if any queues are left open. + * + * @param p_hwfn + * @param opaque_fid + * @param vport_id VPort ID + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u8 vport_id); + +enum _ecore_status_t +ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/** + * @brief ecore_sp_rx_eth_queues_update - + * + * This ramrod updates an RX queue. It is used for setting the active state + * of the queue and updating the TPA and SGE parameters. + * + * @note Final phase API. + * + * @param p_hwfn + * @param pp_rxq_handlers An array of queue handlers to be updated. + * @param num_rxqs number of queues to update. + * @param complete_cqe_flg Post completion to the CQE Ring if set + * @param complete_event_flg Post completion to the Event Ring if set + * @param comp_mode + * @param p_comp_data + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t +ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, + void **pp_rxq_handlers, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *stats, + u16 statistics_bin, bool b_get_port_stats); + +void ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats); + +void ecore_reset_vport_stats(struct ecore_dev *p_dev); + +/** + *@brief ecore_arfs_mode_configure - + * + *Enable or disable rfs mode. It must accept atleast one of tcp or udp true + *and atleast one of ipv4 or ipv6 true to enable rfs mode. + * + *@param p_hwfn + *@param p_ptt + *@param p_cfg_params arfs mode configuration parameters. + * + */ +void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_arfs_config_params *p_cfg_params); + +struct ecore_ntuple_filter_params { + /* Physically mapped address containing header of buffer to be used + * as filter. + */ + dma_addr_t addr; + + /* Length of header in bytes */ + u16 length; + + /* Relative queue-id to receive classified packet */ + #define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1) + u16 qid; + + /* Identifier can either be according to vport-id or vfid */ + bool b_is_vf; + u8 vport_id; + u8 vf_id; + + /* true if this filter is to be added. Else to be removed */ + bool b_is_add; + + /* If packet needs to be dropped */ + bool b_is_drop; +}; + +/** + * @brief - ecore_configure_rfs_ntuple_filter + * + * This ramrod should be used to add or remove arfs hw filter + * + * @params p_hwfn + * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @params p_params + */ +enum _ecore_status_t +ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, + struct ecore_spq_comp_cb *p_cb, + struct ecore_ntuple_filter_params *p_params); + +/** + * @brief - ecore_update_eth_rss_ind_table_entry + * + * This function being used to update RSS indirection table entry to FW RAM + * instead of using the SP vport update ramrod with rss params. + * + * Notice: + * This function supports only one outstanding command per engine. Ecore + * clients which use this function should call ecore_mcp_ind_table_lock() prior + * to it and ecore_mcp_ind_table_unlock() after it. + * + * @params p_hwfn + * @params vport_id + * @params ind_table_index + * @params ind_table_value + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + u8 vport_id, + u8 ind_table_index, + u16 ind_table_value); +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c new file mode 100644 index 000000000..4d20da138 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.c @@ -0,0 +1,4339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_status.h" +#include "nvm_cfg.h" +#include "ecore_mcp.h" +#include "mcp_public.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" +#include "ecore_iov_api.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "ecore_dcbx.h" +#include "ecore_sp_commands.h" +#include "ecore_cxt.h" + +#define GRCBASE_MCP 0xe00000 + +#define ECORE_MCP_RESP_ITER_US 10 +#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ +#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ + +#ifndef ASIC_ONLY +/* Non-ASIC: + * The waiting interval is multiplied by 100 to reduce the impact of the + * built-in delay of 100usec in each ecore_rd(). + * In addition, a factor of 4 comparing to ASIC is applied. + */ +#define ECORE_EMUL_MCP_RESP_ITER_US (ECORE_MCP_RESP_ITER_US * 100) +#define ECORE_EMUL_DRV_MB_MAX_RETRIES ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4) +#define ECORE_EMUL_MCP_RESET_RETRIES ((ECORE_MCP_RESET_RETRIES / 100) * 4) +#endif + +#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ + ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + _val) + +#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ + ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + +#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ + DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ + OFFSETOF(struct public_drv_mb, _field), _val) + +#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ + DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ + OFFSETOF(struct public_drv_mb, _field)) + +#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ + DRV_ID_PDA_COMP_VER_OFFSET) + +#define MCP_BYTES_PER_MBIT_OFFSET 17 + +#ifndef ASIC_ONLY +static int loaded; +static int loaded_port[MAX_NUM_PORTS] = { 0 }; +#endif + +bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) + return false; + return true; +} + +void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PORT); + u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); + + p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, + MFW_PORT(p_hwfn)); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "port_addr = 0x%x, port_id 0x%02x\n", + p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); +} + +void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); + OSAL_BE32 tmp; + u32 i; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) + return; +#endif + + if (!p_hwfn->mcp_info->public_base) + return; + + for (i = 0; i < length; i++) { + tmp = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->mfw_mb_addr + + (i << 2) + sizeof(u32)); + + ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = + OSAL_BE32_TO_CPU(tmp); + } +} + +struct ecore_mcp_cmd_elem { + osal_list_entry_t list; + struct ecore_mcp_mb_params *p_mb_params; + u16 expected_seq_num; + bool b_is_completed; +}; + +/* Must be called while cmd_lock is acquired */ +static struct ecore_mcp_cmd_elem * +ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_mb_params *p_mb_params, + u16 expected_seq_num) +{ + struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; + + p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, + sizeof(*p_cmd_elem)); + if (!p_cmd_elem) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct ecore_mcp_cmd_elem'\n"); + goto out; + } + + p_cmd_elem->p_mb_params = p_mb_params; + p_cmd_elem->expected_seq_num = expected_seq_num; + OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); +out: + return p_cmd_elem; +} + +/* Must be called while cmd_lock is acquired */ +static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_cmd_elem *p_cmd_elem) +{ + OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); + OSAL_FREE(p_hwfn->p_dev, p_cmd_elem); +} + +/* Must be called while cmd_lock is acquired */ +static struct ecore_mcp_cmd_elem * +ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num) +{ + struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; + + OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list, + struct ecore_mcp_cmd_elem) { + if (p_cmd_elem->expected_seq_num == seq_num) + return p_cmd_elem; + } + + return OSAL_NULL; +} + +enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) +{ + if (p_hwfn->mcp_info) { + struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp; + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); + + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp, + &p_hwfn->mcp_info->cmd_list, list, + struct ecore_mcp_cmd_elem) { + ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + } + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock); + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock); +#endif + } + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + + return ECORE_SUCCESS; +} + +/* Maximum of 1 sec to wait for the SHMEM ready indication */ +#define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20 +#define ECORE_MCP_SHMEM_RDY_ITER_MS 50 + +static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *p_info = p_hwfn->mcp_info; + u32 drv_mb_offsize, mfw_mb_offsize, val; + u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES; + u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS; + u32 mcp_pf_id = MCP_PF_ID(p_hwfn); + + val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE); + p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); + if (!p_info->public_base) { + DP_NOTICE(p_hwfn, false, + "The address of the MCP scratch-pad is not configured\n"); +#ifndef ASIC_ONLY + /* Zeroed "public_base" implies no MFW */ + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + DP_INFO(p_hwfn, "Emulation: Assume no MFW\n"); +#endif + return ECORE_INVAL; + } + + p_info->public_base |= GRCBASE_MCP; + + /* Get the MFW MB address and number of supported messages */ + mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr); + + /* @@@TBD: + * The driver can notify that there was an MCP reset, and read the SHMEM + * values before the MFW has completed initializing them. + * As a temporary solution, the "sup_msgs" field is used as a data ready + * indication. + * This should be replaced with an actual indication when it is provided + * by the MFW. + */ + while (!p_info->mfw_mb_length && cnt--) { + OSAL_MSLEEP(msec); + p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr); + } + + if (!cnt) { + DP_NOTICE(p_hwfn, false, + "Failed to get the SHMEM ready notification after %d msec\n", + ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec); + return ECORE_TIMEOUT; + } + + /* Calculate the driver and MFW mailbox address */ + drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_DRV_MB)); + p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x" + " mcp_pf_id = 0x%x\n", + drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); + + /* Get the current driver mailbox sequence before sending + * the first command + */ + p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Get current FW pulse sequence */ + p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & + DRV_PULSE_SEQ_MASK; + + p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *p_info; + u32 size; + + /* Allocate mcp_info structure */ + p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } + p_info = p_hwfn->mcp_info; + + /* Initialize the MFW spinlocks */ +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } +#endif + OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); + OSAL_SPIN_LOCK_INIT(&p_info->link_lock); + + OSAL_LIST_INIT(&p_info->cmd_list); + + if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); + /* Do not free mcp_info here, since "public_base" indicates that + * the MCP is not initialized + */ + return ECORE_SUCCESS; + } + + size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); + p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); + p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); + if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) + goto err; + + return ECORE_SUCCESS; + +err: + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); + ecore_mcp_free(p_hwfn); + return ECORE_NOMEM; +} + +static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + /* Use MCP history register to check if MCP reset occurred between init + * time and now. + */ + if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", + p_hwfn->mcp_info->mcp_hist, generic_por_0); + + ecore_load_mcp_offsets(p_hwfn, p_ptt); + ecore_mcp_cmd_port_init(p_hwfn, p_ptt); + } +} + +enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0; + u32 retries = ECORE_MCP_RESET_RETRIES; + enum _ecore_status_t rc = ECORE_SUCCESS; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + delay = ECORE_EMUL_MCP_RESP_ITER_US; + retries = ECORE_EMUL_MCP_RESET_RETRIES; + } +#endif + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, false, + "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); + return ECORE_ABORTED; + } + + /* Ensure that only a single thread is accessing the mailbox */ + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + + prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + /* Set drv command along with the updated sequence */ + ecore_mcp_reread_offsets(p_hwfn, p_ptt); + seq = ++p_hwfn->mcp_info->drv_mb_seq; + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); + + /* Give the MFW up to 500 second (50*1000*10usec) to resume */ + do { + OSAL_UDELAY(delay); + + if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) != + prev_generic_por_0) + break; + } while (cnt++ < retries); + + if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) != + prev_generic_por_0) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "MCP was reset after %d usec\n", cnt * delay); + } else { + DP_ERR(p_hwfn, "Failed to reset MCP\n"); + rc = ECORE_AGAIN; + } + + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + + return rc; +} + +#ifndef ASIC_ONLY +static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_mb_params *p_mb_params) +{ + if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) != + 1 /* ECORE_LOAD_REQ_HSI_VER_1 */) { + p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1; + return; + } + + if (!loaded) + p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE; + else if (!loaded_port[p_hwfn->port_id]) + p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT; + else + p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION; + + /* On CMT, always tell that it's engine */ + if (ECORE_IS_CMT(p_hwfn->p_dev)) + p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE; + + loaded++; + loaded_port[p_hwfn->port_id]++; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n", + p_mb_params->mcp_resp, loaded, p_hwfn->port_id, + loaded_port[p_hwfn->port_id]); +} + +static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn) +{ + loaded--; + loaded_port[p_hwfn->port_id]--; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded); +} + +static enum _ecore_status_t +ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_mb_params *p_mb_params) +{ + if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + return ECORE_INVAL; + + switch (p_mb_params->cmd) { + case DRV_MSG_CODE_LOAD_REQ: + ecore_emul_mcp_load_req(p_hwfn, p_mb_params); + break; + case DRV_MSG_CODE_UNLOAD_REQ: + ecore_emul_mcp_unload_req(p_hwfn); + break; + case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT: + case DRV_MSG_CODE_RESOURCE_CMD: + case DRV_MSG_CODE_MDUMP_CMD: + case DRV_MSG_CODE_GET_ENGINE_CONFIG: + case DRV_MSG_CODE_GET_PPFID_BITMAP: + return ECORE_NOTIMPL; + default: + break; + } + + return ECORE_SUCCESS; +} +#endif + +/* Must be called while cmd_lock is acquired */ +static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn) +{ + struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL; + + /* There is at most one pending command at a certain time, and if it + * exists - it is placed at the HEAD of the list. + */ + if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) { + p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list, + struct ecore_mcp_cmd_elem, + list); + return !p_cmd_elem->b_is_completed; + } + + return false; +} + +/* Must be called while cmd_lock is acquired */ +static enum _ecore_status_t +ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_mb_params *p_mb_params; + struct ecore_mcp_cmd_elem *p_cmd_elem; + u32 mcp_resp; + u16 seq_num; + + mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); + + /* Return if no new non-handled response has been received */ + if (seq_num != p_hwfn->mcp_info->drv_mb_seq) + return ECORE_AGAIN; + + p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num); + if (!p_cmd_elem) { + DP_ERR(p_hwfn, + "Failed to find a pending mailbox cmd that expects sequence number %d\n", + seq_num); + return ECORE_UNKNOWN_ERROR; + } + + p_mb_params = p_cmd_elem->p_mb_params; + + /* Get the MFW response along with the sequence number */ + p_mb_params->mcp_resp = mcp_resp; + + /* Get the MFW param */ + p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); + + /* Get the union data */ + if (p_mb_params->p_data_dst != OSAL_NULL && + p_mb_params->data_dst_size) { + u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + OFFSETOF(struct public_drv_mb, + union_data); + ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, + union_data_addr, p_mb_params->data_dst_size); + } + + p_cmd_elem->b_is_completed = true; + + return ECORE_SUCCESS; +} + +/* Must be called while cmd_lock is acquired */ +static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_mb_params *p_mb_params, + u16 seq_num) +{ + union drv_union_data union_data; + u32 union_data_addr; + + /* Set the union data */ + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + OFFSETOF(struct public_drv_mb, union_data); + OSAL_MEM_ZERO(&union_data, sizeof(union_data)); + if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size) + OSAL_MEMCPY(&union_data, p_mb_params->p_data_src, + p_mb_params->data_src_size); + ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, + sizeof(union_data)); + + /* Set the drv param */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); + + /* Set the drv command along with the sequence number */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "MFW mailbox: command 0x%08x param 0x%08x\n", + (p_mb_params->cmd | seq_num), p_mb_params->param); +} + +static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn, + bool block_cmd) +{ + p_hwfn->mcp_info->b_block_cmd = block_cmd; + + DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", + block_cmd ? "Block" : "Unblock"); +} + +void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + u32 delay = ECORE_MCP_RESP_ITER_US; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + delay = ECORE_EMUL_MCP_RESP_ITER_US; +#endif + cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(delay); + cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + OSAL_UDELAY(delay); + cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, false, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + +static enum _ecore_status_t +_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_mb_params *p_mb_params, + u32 max_retries, u32 delay) +{ + struct ecore_mcp_cmd_elem *p_cmd_elem; + u32 cnt = 0; + u16 seq_num; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Wait until the mailbox is non-occupied */ + do { + /* Exit the loop if there is no pending command, or if the + * pending command is completed during this iteration. + * The spinlock stays locked until the command is sent. + */ + + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + + if (!ecore_mcp_has_pending_cmd(p_hwfn)) + break; + + rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (rc == ECORE_SUCCESS) + break; + else if (rc != ECORE_AGAIN) + goto err; + + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_UDELAY(delay); + OSAL_MFW_CMD_PREEMPT(p_hwfn); + } while (++cnt < max_retries); + + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, false, + "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return ECORE_AGAIN; + } + + /* Send the mailbox command */ + ecore_mcp_reread_offsets(p_hwfn, p_ptt); + seq_num = ++p_hwfn->mcp_info->drv_mb_seq; + p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); + if (!p_cmd_elem) { + rc = ECORE_NOMEM; + goto err; + } + + __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + + /* Wait for the MFW response */ + do { + /* Exit the loop if the command is already completed, or if the + * command is completed during this iteration. + * The spinlock stays locked until the list element is removed. + */ + + OSAL_UDELAY(delay); + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + + if (p_cmd_elem->b_is_completed) + break; + + rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (rc == ECORE_SUCCESS) + break; + else if (rc != ECORE_AGAIN) + goto err; + + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + OSAL_MFW_CMD_PREEMPT(p_hwfn); + } while (++cnt < max_retries); + + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, false, + "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + ecore_mcp_print_cpu_info(p_hwfn, p_ptt); + + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock); + ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + + ecore_mcp_cmd_set_blocking(p_hwfn, true); + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); + return ECORE_AGAIN; + } + + ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", + p_mb_params->mcp_resp, p_mb_params->mcp_param, + (cnt * delay) / 1000, (cnt * delay) % 1000); + + /* Clear the sequence number from the MFW response */ + p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; + + return ECORE_SUCCESS; + +err: + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock); + return rc; +} + +static enum _ecore_status_t +ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_mb_params *p_mb_params) +{ + osal_size_t union_data_size = sizeof(union drv_union_data); + u32 max_retries = ECORE_DRV_MB_MAX_RETRIES; + u32 usecs = ECORE_MCP_RESP_ITER_US; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) + return ecore_emul_mcp_cmd(p_hwfn, p_mb_params); + + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES; + usecs = ECORE_EMUL_MCP_RESP_ITER_US; + } +#endif + if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { + max_retries = DIV_ROUND_UP(max_retries, 1000); + usecs *= 1000; + } + + /* MCP not initialized */ + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + + if (p_mb_params->data_src_size > union_data_size || + p_mb_params->data_dst_size > union_data_size) { + DP_ERR(p_hwfn, + "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", + p_mb_params->data_src_size, p_mb_params->data_dst_size, + union_data_size); + return ECORE_INVAL; + } + + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, false, + "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return ECORE_ABORTED; + } + + return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, + usecs); +} + +enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 cmd, u32 param, + u32 *o_mcp_resp, u32 *o_mcp_param) +{ + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 i_txn_size, u32 *i_buf) +{ + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_src = i_buf; + mb_params.data_src_size = (u8)i_txn_size; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf) +{ + struct ecore_mcp_mb_params mb_params; + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_dst = raw_data; + + /* Use the maximal value since the actual one is part of the response */ + mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + *o_txn_size = *o_mcp_param; + /* @DPDK */ + OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN)); + + return ECORE_SUCCESS; +} + +static bool +ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, + enum ecore_override_force_load override_force_load) +{ + bool can_force_load = false; + + switch (override_force_load) { + case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS: + can_force_load = true; + break; + case ECORE_OVERRIDE_FORCE_LOAD_NEVER: + can_force_load = false; + break; + default: + can_force_load = (drv_role == DRV_ROLE_OS && + exist_drv_role == DRV_ROLE_PREBOOT) || + (drv_role == DRV_ROLE_KDUMP && + exist_drv_role == DRV_ROLE_OS); + break; + } + + return can_force_load; +} + +static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, + &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Failed to send cancel load request, rc = %d\n", rc); + + return rc; +} + +#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) +#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) +#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) +#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) +#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) +#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) +#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) + +static u32 ecore_get_config_bitmap(void) +{ + u32 config_bitmap = 0x0; + +#ifdef CONFIG_ECORE_L2 + config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_SRIOV + config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_ROCE + config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_IWARP + config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_FCOE + config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_ISCSI + config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_LL2 + config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; +#endif + + return config_bitmap; +} + +struct ecore_load_req_in_params { + u8 hsi_ver; +#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 +#define ECORE_LOAD_REQ_HSI_VER_1 1 + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u8 drv_role; + u8 timeout_val; + u8 force_cmd; + bool avoid_eng_reset; +}; + +struct ecore_load_req_out_params { + u32 load_code; + u32 exist_drv_ver_0; + u32 exist_drv_ver_1; + u32 exist_fw_ver; + u8 exist_drv_role; + u8 mfw_hsi_ver; + bool drv_exists; +}; + +static enum _ecore_status_t +__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_load_req_in_params *p_in_params, + struct ecore_load_req_out_params *p_out_params) +{ + struct ecore_mcp_mb_params mb_params; + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + u32 hsi_ver; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&load_req, sizeof(load_req)); + load_req.drv_ver_0 = p_in_params->drv_ver_0; + load_req.drv_ver_1 = p_in_params->drv_ver_1; + load_req.fw_ver = p_in_params->fw_ver; + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, + p_in_params->timeout_val); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); + SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, + p_in_params->avoid_eng_reset); + + hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? + DRV_ID_MCP_HSI_VER_CURRENT : + (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET); + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; + mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; + mb_params.p_data_src = &load_req; + mb_params.data_src_size = sizeof(load_req); + mb_params.p_data_dst = &load_rsp; + mb_params.data_dst_size = sizeof(load_rsp); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", + mb_params.param, + GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), + GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE), + GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), + GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); + + if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", + load_req.drv_ver_0, load_req.drv_ver_1, + load_req.fw_ver, load_req.misc0, + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE), + GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send load request, rc = %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Response: resp 0x%08x\n", mb_params.mcp_resp); + p_out_params->load_code = mb_params.mcp_resp; + + if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && + p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", + load_rsp.drv_ver_0, load_rsp.drv_ver_1, + load_rsp.fw_ver, load_rsp.misc0, + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI), + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); + + p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; + p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; + p_out_params->exist_fw_ver = load_rsp.fw_ver; + p_out_params->exist_drv_role = + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); + p_out_params->mfw_hsi_ver = + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI); + p_out_params->drv_exists = + GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & + LOAD_RSP_FLAGS0_DRV_EXISTS; + } + + return ECORE_SUCCESS; +} + +static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role, + u8 *p_mfw_drv_role) +{ + switch (drv_role) { + case ECORE_DRV_ROLE_OS: + *p_mfw_drv_role = DRV_ROLE_OS; + break; + case ECORE_DRV_ROLE_KDUMP: + *p_mfw_drv_role = DRV_ROLE_KDUMP; + break; + } +} + +enum ecore_load_req_force { + ECORE_LOAD_REQ_FORCE_NONE, + ECORE_LOAD_REQ_FORCE_PF, + ECORE_LOAD_REQ_FORCE_ALL, +}; + +static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd, + u8 *p_mfw_force_cmd) +{ + switch (force_cmd) { + case ECORE_LOAD_REQ_FORCE_NONE: + *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; + break; + case ECORE_LOAD_REQ_FORCE_PF: + *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; + break; + case ECORE_LOAD_REQ_FORCE_ALL: + *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; + break; + } +} + +enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_load_req_params *p_params) +{ + struct ecore_load_req_out_params out_params; + struct ecore_load_req_in_params in_params; + u8 mfw_drv_role = 0, mfw_force_cmd; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&in_params, sizeof(in_params)); + in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; + in_params.drv_ver_0 = ECORE_VERSION; + in_params.drv_ver_1 = ecore_get_config_bitmap(); + in_params.fw_ver = STORM_FW_VERSION; + ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role); + in_params.drv_role = mfw_drv_role; + in_params.timeout_val = p_params->timeout_val; + ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); + in_params.force_cmd = mfw_force_cmd; + in_params.avoid_eng_reset = p_params->avoid_eng_reset; + + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc != ECORE_SUCCESS) + return rc; + + /* First handle cases where another load request should/might be sent: + * - MFW expects the old interface [HSI version = 1] + * - MFW responds that a force load request is required + */ + if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_INFO(p_hwfn, + "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); + + in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + } else if (out_params.load_code == + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { + if (ecore_mcp_can_force_load(in_params.drv_role, + out_params.exist_drv_role, + p_params->override_force_load)) { + DP_INFO(p_hwfn, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL, + &mfw_force_cmd); + + in_params.force_cmd = mfw_force_cmd; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + } else { + DP_NOTICE(p_hwfn, false, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + ecore_mcp_cancel_load_req(p_hwfn, p_ptt); + return ECORE_BUSY; + } + } + + /* Now handle the other types of responses. + * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not + * expected here after the additional revised load requests were sent. + */ + switch (out_params.load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + case FW_MSG_CODE_DRV_LOAD_PORT: + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && + out_params.drv_exists) { + /* The role and fw/driver version match, but the PF is + * already loaded and has not been unloaded gracefully. + * This is unexpected since a quasi-FLR request was + * previously sent as part of ecore_hw_prepare(). + */ + DP_NOTICE(p_hwfn, false, + "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); + return ECORE_INVAL; + } + break; + default: + DP_NOTICE(p_hwfn, false, + "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", + out_params.load_code); + return ECORE_BUSY; + } + + p_params->load_code = out_params.load_code; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, false, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 wol_param, mcp_resp, mcp_param; + + /* @DPDK */ + wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; + + return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, + &mcp_resp, &mcp_param); +} + +enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_mb_params mb_params; + struct mcp_mac wol_mac; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; + + return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + +static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 path_addr = SECTION_ADDR(mfw_path_offsize, + ECORE_PATH_ID(p_hwfn)); + u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + int i; + + OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Reading Disabled VF information from [offset %08x]," + " path_addr %08x\n", + mfw_path_offsize, path_addr); + + for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) { + disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, + path_addr + + OFFSETOF(struct public_path, + mcp_vf_disabled) + + sizeof(u32) * i); + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), + "FLR-ed VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); + } + + if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) + OSAL_VF_FLR_UPDATE(p_hwfn); +} + +enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *vfs_to_ack) +{ + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + u16 i; + + for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), + "Acking VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; + mb_params.p_data_src = vfs_to_ack; + mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, + &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to pass ACK for VF flr to MFW\n"); + return ECORE_TIMEOUT; + } + + return rc; +} + +static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 transceiver_state; + + transceiver_state = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + transceiver_data)); + + DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), + "Received transceiver state update [0x%08x] from mfw" + " [Addr 0x%x]\n", + transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + transceiver_data))); + + transceiver_state = GET_MFW_FIELD(transceiver_state, + ETH_TRANSCEIVER_STATE); + + if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) + DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); + else + DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); + + OSAL_TRANSCEIVER_UPDATE(p_hwfn); +} + +static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link) +{ + u32 eee_status, val; + + p_link->eee_adv_caps = 0; + p_link->eee_lp_adv_caps = 0; + eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, eee_status)); + p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_adv_caps |= ECORE_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_adv_caps |= ECORE_EEE_10G_ADV; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV; +} + +static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + OSAL_MEM_ZERO(p_data, sizeof(*p_data)); + + size = OSAL_MIN_T(u32, sizeof(*p_data), + SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + + return size; +} + +static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_reset) +{ + struct ecore_mcp_link_state *p_link; + u8 max_bw, min_bw; + u32 status = 0; + + /* Prevent SW/attentions from doing this at the same time */ + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock); + + p_link = &p_hwfn->mcp_info->link_output; + OSAL_MEMSET(p_link, 0, sizeof(*p_link)); + if (!b_reset) { + status = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, link_status)); + DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), + "Received link update [0x%08x] from mfw" + " [Addr 0x%x]\n", + status, (u32)(p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_status))); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Resetting link indications\n"); + goto out; + } + + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + } + } else { + p_link->link_up = false; + } + + p_link->full_duplex = true; + switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { + case LINK_STATUS_SPEED_AND_DUPLEX_100G: + p_link->speed = 100000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_50G: + p_link->speed = 50000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_40G: + p_link->speed = 40000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_25G: + p_link->speed = 25000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_20G: + p_link->speed = 20000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_10G: + p_link->speed = 10000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: + p_link->full_duplex = false; + /* Fall-through */ + case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: + p_link->speed = 1000; + break; + default: + p_link->speed = 0; + } + + /* We never store total line speed as p_link->speed is + * again changes according to bandwidth allocation. + */ + if (p_link->link_up && p_link->speed) + p_link->line_speed = p_link->speed; + else + p_link->line_speed = 0; + + max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; + + /* Max bandwidth configuration */ + __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + /* Min bandwidth configuration */ + __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, + p_link->min_pf_rate); + + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); + p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); + p_link->parallel_detection = !!(status & + LINK_STATUS_PARALLEL_DETECTION_USED); + p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); + + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_1G_FD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_1G_HD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_10G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_20G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_40G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_50G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_100G : 0; + + p_link->partner_tx_flow_ctrl_en = + !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); + p_link->partner_rx_flow_ctrl_en = + !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + + switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { + case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; + break; + default: + p_link->partner_adv_pause = 0; + } + + p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + + OSAL_LINK_UPDATE(p_hwfn); +out: + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock); +} + +enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool b_up) +{ + struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; + struct ecore_mcp_mb_params mb_params; + struct eth_phy_cfg phy_cfg; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 cmd; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + if (b_up) + OSAL_LINK_UPDATE(p_hwfn); + return ECORE_SUCCESS; + } +#endif + + /* Set the shmem configuration according to params */ + OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg)); + cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; + if (!params->speed.autoneg) + phy_cfg.speed = params->speed.forced_speed; + phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; + phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; + phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; + + /* There are MFWs that share this capability regardless of whether + * this is feasible or not. And given that at the very least adv_caps + * would be set internally by ecore, we want to make sure LFA would + * still work. + */ + if ((p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && + params->eee.enable) { + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & ECORE_EEE_1G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; + if (params->eee.adv_caps & ECORE_EEE_10G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; + phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << + EEE_TX_TIMER_USEC_OFFSET) & + EEE_TX_TIMER_USEC_MASK; + } + + p_hwfn->b_drv_link_init = b_up; + + if (b_up) + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n", + phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, + phy_cfg.loopback_mode); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.p_data_src = &phy_cfg; + mb_params.data_src_size = sizeof(phy_cfg); + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + + /* if mcp fails to respond we must abort */ + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* Mimic link-change attention, done for several reasons: + * - On reset, there's no guarantee MFW would trigger + * an attention. + * - On initialization, older MFWs might not indicate link change + * during LFA, so we'll never get an UP indication. + */ + ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); + + return ECORE_SUCCESS; +} + +u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); + + proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, + path_addr + + OFFSETOF(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + ecore_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) + return; + + if (p_dev->recov_in_prog) { + DP_NOTICE(p_hwfn, false, + "Ignoring the indication since a recovery" + " process is already in progress\n"); + return; + } + + p_dev->recov_in_prog = true; + + proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); + + OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); +} + +static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) +{ + enum ecore_mcp_protocol_type stats_type; + union ecore_mcp_protocol_stats stats; + struct ecore_mcp_mb_params mb_params; + u32 hsi_param; + enum _ecore_status_t rc; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = ECORE_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + default: + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Invalid protocol type %d\n", type); + return; + } + + OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_STATS; + mb_params.param = hsi_param; + mb_params.p_data_src = &stats; + mb_params.data_src_size = sizeof(stats); + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc); +} + +static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct ecore_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + /* TODO - bandwidth min/max should have valid values of 1-100, + * as well as some indication that the feature is disabled. + * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS + * limit and correct value to min `1' and max `100' if limit isn't in + * range. + */ + p_info->bandwidth_min = (p_shmem_info->config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_OFFSET; + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = (p_shmem_info->config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_OFFSET; + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + +static void +ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_function_info *p_info; + struct public_func shmem_info; + u32 resp = 0, param = 0; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + + ecore_read_pf_bandwidth(p_hwfn, &shmem_info); + + p_info = &p_hwfn->mcp_info->func_info; + + ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); + + ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); + + /* Acknowledge the MFW */ + ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); +} + +static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 resp = 0, param = 0; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + + p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & + FUNC_MF_CFG_OV_STAG_MASK; + p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; + if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) { + if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, + p_hwfn->hw_info.ovlan); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, + p_hwfn->hw_info.ovlan); + } else { + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); + + /* Configure DB to add external vlan to EDPM packets */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID, 0); + } + + ecore_sp_pf_update_stag(p_hwfn); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); + OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN); + + /* Acknowledge the MFW */ + ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + +static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn) +{ + /* A single notification should be sent to upper driver in CMT mode */ + if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) + return; + + DP_NOTICE(p_hwfn, false, + "Fan failure was detected on the network interface card" + " and it's going to be shut down.\n"); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); +} + +struct ecore_mdump_cmd_params { + u32 cmd; + void *p_data_src; + u8 data_src_size; + void *p_data_dst; + u8 data_dst_size; + u32 mcp_resp; +}; + +static enum _ecore_status_t +ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mdump_cmd_params *p_mdump_cmd_params) +{ + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; + mb_params.param = p_mdump_cmd_params->cmd; + mb_params.p_data_src = p_mdump_cmd_params->p_data_src; + mb_params.data_src_size = p_mdump_cmd_params->data_src_size; + mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; + mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; + + if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { + DP_INFO(p_hwfn, + "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", + p_mdump_cmd_params->cmd); + rc = ECORE_NOTIMPL; + } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The mdump command is not supported by the MFW\n"); + rc = ECORE_NOTIMPL; + } + + return rc; +} + +static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; + + return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 epoch) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES; + mdump_cmd_params.p_data_src = &epoch; + mdump_cmd_params.data_src_size = sizeof(epoch); + + return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER; + + return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +static enum _ecore_status_t +ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct mdump_config_stc *p_mdump_config) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG; + mdump_cmd_params.p_data_dst = p_mdump_config; + mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config); + + rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n", + mdump_cmd_params.mcp_resp); + rc = ECORE_UNKNOWN_ERROR; + } + + return rc; +} + +enum _ecore_status_t +ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mdump_info *p_mdump_info) +{ + u32 addr, global_offsize, global_addr; + struct mdump_config_stc mdump_config; + enum _ecore_status_t rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { + DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n"); + return ECORE_NOTIMPL; + } +#endif + + OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info)); + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = ecore_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt, + global_addr + + OFFSETOF(struct public_global, + mdump_reason)); + + if (p_mdump_info->reason) { + rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config); + if (rc != ECORE_SUCCESS) + return rc; + + p_mdump_info->version = mdump_config.version; + p_mdump_info->config = mdump_config.config; + p_mdump_info->epoch = mdump_config.epoc; + p_mdump_info->num_of_logs = mdump_config.num_of_logs; + p_mdump_info->valid_logs = mdump_config.valid_logs; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n", + p_mdump_info->reason, p_mdump_info->version, + p_mdump_info->config, p_mdump_info->epoch, + p_mdump_info->num_of_logs, p_mdump_info->valid_logs); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "MFW mdump info: reason %d\n", p_mdump_info->reason); + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS; + + return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +enum _ecore_status_t +ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mdump_retain_data *p_mdump_retain) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + struct mdump_retain_data_stc mfw_mdump_retain; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; + mdump_cmd_params.p_data_dst = &mfw_mdump_retain; + mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain); + + rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to get the mdump retained data [mcp_resp 0x%x]\n", + mdump_cmd_params.mcp_resp); + return ECORE_UNKNOWN_ERROR; + } + + p_mdump_retain->valid = mfw_mdump_retain.valid; + p_mdump_retain->epoch = mfw_mdump_retain.epoch; + p_mdump_retain->pf = mfw_mdump_retain.pf; + p_mdump_retain->status = mfw_mdump_retain.status; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mdump_cmd_params mdump_cmd_params; + + OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN; + + return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mdump_retain_data mdump_retain; + enum _ecore_status_t rc; + + /* In CMT mode - no need for more than a single acknowledgment to the + * MFW, and no more than a single notification to the upper driver. + */ + if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) + return; + + rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); + if (rc == ECORE_SUCCESS && mdump_retain.valid) { + DP_NOTICE(p_hwfn, false, + "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", + mdump_retain.epoch, mdump_retain.pf, + mdump_retain.status); + } else { + DP_NOTICE(p_hwfn, false, + "The MFW notified that a critical error occurred in the device\n"); + } + + if (p_hwfn->p_dev->allow_mdump) { + DP_NOTICE(p_hwfn, false, + "Not acknowledging the notification to allow the MFW crash dump\n"); + return; + } + + DP_NOTICE(p_hwfn, false, + "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); + ecore_mcp_mdump_ack(p_hwfn, p_ptt); + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); +} + +void +ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + return; + + OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, oem_cfg_port)); + val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE); + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n", + val); + + val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE); + if (val == OEM_CFG_SCHED_TYPE_ETS) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS; + else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) + p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW; + else + DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n", + val); + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC); + p_hwfn->ufp_info.tc = (u8)val; + val = GET_MFW_FIELD(shmem_info.oem_cfg_func, + OEM_CFG_FUNC_HOST_PRI_CTRL); + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC; + else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) + p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS; + else + DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n", + val); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type); +} + +static enum _ecore_status_t +ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + ecore_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + ecore_qm_reconf(p_hwfn, p_ptt); + } else { + /* Merge UFP TC with the dcbx TC data */ + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_OPERATIONAL_MIB); + } + + /* update storm FW with negotiation results */ + ecore_sp_pf_update_ufp(p_hwfn); + + /* update stag pcp value */ + ecore_sp_pf_update_stag(p_hwfn); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *info = p_hwfn->mcp_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); + + /* Read Messages from MFW */ + ecore_mcp_read_mb(p_hwfn, p_ptt); + + /* Compare current messages to old ones */ + for (i = 0; i < info->mfw_mb_length; i++) { + if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) + continue; + + found = true; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", + i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + + switch (i) { + case MFW_DRV_MSG_LINK_CHANGE: + ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); + break; + case MFW_DRV_MSG_VF_DISABLED: + ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_LLDP_DATA_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_REMOTE_LLDP_MIB); + break; + case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_REMOTE_MIB); + break; + case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_OPERATIONAL_MIB); + /* clear the user-config cache */ + OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0, + sizeof(struct ecore_dcbx_set)); + break; + case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED: + ecore_lldp_mib_update_event(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + ecore_mcp_handle_ufp_event(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: + ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_ERROR_RECOVERY: + ecore_mcp_handle_process_kill(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; + case MFW_DRV_MSG_BW_UPDATE: + ecore_mcp_update_bw(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_S_TAG_UPDATE: + ecore_mcp_update_stag(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_FAILURE_DETECTED: + ecore_mcp_handle_fan_failure(p_hwfn); + break; + case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: + ecore_mcp_handle_critical_error(p_hwfn, p_ptt); + break; + default: + DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); + rc = ECORE_INVAL; + } + } + + /* ACK everything */ + for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { + OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); + + /* MFW expect answer in BE, so we force write in that format */ + ecore_wr(p_hwfn, p_ptt, + info->mfw_mb_addr + sizeof(u32) + + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * + sizeof(u32) + i * sizeof(u32), val); + } + + if (!found) { + DP_NOTICE(p_hwfn, false, + "Received an MFW message indication but no" + " new message!\n"); + rc = ECORE_INVAL; + } + + /* Copy the new mfw messages into the shadow */ + OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); + + return rc; +} + +enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_mfw_ver, + u32 *p_running_bundle_id) +{ + u32 global_offsize; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { + DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n"); + return ECORE_NOTIMPL; + } +#endif + + if (IS_VF(p_hwfn->p_dev)) { + if (p_hwfn->vf_iov_info) { + struct pfvf_acquire_resp_tlv *p_resp; + + p_resp = &p_hwfn->vf_iov_info->acquire_resp; + *p_mfw_ver = p_resp->pfdev_info.mfw_ver; + return ECORE_SUCCESS; + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF requested MFW version prior to ACQUIRE\n"); + return ECORE_INVAL; + } + } + + global_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> + public_base, + PUBLIC_GLOBAL)); + *p_mfw_ver = + ecore_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + OFFSETOF(struct public_global, mfw_ver)); + + if (p_running_bundle_id != OSAL_NULL) { + *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + + OFFSETOF(struct public_global, + running_bundle_id)); + } + + return ECORE_SUCCESS; +} + +int ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 *p_mbi_ver) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { + DP_INFO(p_hwfn, "Emulation: Can't get MBI version\n"); + return -EOPNOTSUPP; + } +#endif + + if (IS_VF(p_hwfn->p_dev)) + return -EINVAL; + + /* Read the address of the nvm_cfg */ + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read the offset of nvm_cfg1 */ + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, + mbi_version); + *p_mbi_ver = + ecore_rd(p_hwfn, p_ptt, + mbi_ver_addr) & (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | + NVM_CFG1_GLOB_MBI_VERSION_1_MASK | + NVM_CFG1_GLOB_MBI_VERSION_2_MASK); + + return 0; +} + +enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_media_type) +{ + *p_media_type = MEDIA_UNSPECIFIED; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "Emulation: Can't get media type\n"); + return ECORE_NOTIMPL; + } +#endif + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + + if (!p_ptt) + return ECORE_INVAL; + + *p_media_type = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, media_type)); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_transceiver_type) +{ + u32 transceiver_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; + + transceiver_info = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + *p_transceiver_state = GET_MFW_FIELD(transceiver_info, + ETH_TRANSCEIVER_STATE); + + if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) { + *p_transceiver_type = GET_MFW_FIELD(transceiver_info, + ETH_TRANSCEIVER_TYPE); + } else { + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_type = ETH_TRANSCEIVER_TYPE_NONE, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, + &transceiver_type); + + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + +/* @DPDK */ +/* Old MFW has a global configuration for all PFs regarding RDMA support */ +static void +ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn, + enum ecore_pci_personality *p_proto) +{ + *p_proto = ECORE_PCI_ETH; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "According to Legacy capabilities, L2 personality is %08x\n", + (u32)*p_proto); +} + +/* @DPDK */ +static enum _ecore_status_t +ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_pci_personality *p_proto) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", + (u32)*p_proto, resp, param); + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, + struct public_func *p_info, + struct ecore_ptt *p_ptt, + enum ecore_pci_personality *p_proto) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { + case FUNC_MF_CFG_PROTOCOL_ETHERNET: + if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) != + ECORE_SUCCESS) + ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); + break; + default: + rc = ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_function_info *info; + struct public_func shmem_info; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + info = &p_hwfn->mcp_info->func_info; + + info->pause_on_host = (shmem_info.config & + FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; + + if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, + &info->protocol)) { + DP_ERR(p_hwfn, "Unknown personality %08x\n", + (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); + return ECORE_INVAL; + } + + ecore_read_pf_bandwidth(p_hwfn, &shmem_info); + + if (shmem_info.mac_upper || shmem_info.mac_lower) { + info->mac[0] = (u8)(shmem_info.mac_upper >> 8); + info->mac[1] = (u8)(shmem_info.mac_upper); + info->mac[2] = (u8)(shmem_info.mac_lower >> 24); + info->mac[3] = (u8)(shmem_info.mac_lower >> 16); + info->mac[4] = (u8)(shmem_info.mac_lower >> 8); + info->mac[5] = (u8)(shmem_info.mac_lower); + } else { + /* TODO - are there protocols for which there's no MAC? */ + DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); + } + + /* TODO - are these calculations true for BE machine? */ + info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper | + (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32); + info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper | + (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32); + + info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + + info->mtu = (u16)shmem_info.mtu_size; + + if (info->mtu == 0) + info->mtu = 1500; + + info->mtu = (u16)shmem_info.mtu_size; + + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), + "Read configuration from shmem: pause_on_host %02x" + " protocol %02x BW [%02x - %02x]" + " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx" + " node %lx ovlan %04x\n", + info->pause_on_host, info->protocol, + info->bandwidth_min, info->bandwidth_max, + info->mac[0], info->mac[1], info->mac[2], + info->mac[3], info->mac[4], info->mac[5], + (unsigned long)info->wwn_port, + (unsigned long)info->wwn_node, info->ovlan); + + return ECORE_SUCCESS; +} + +struct ecore_mcp_link_params +*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->link_input; +} + +struct ecore_mcp_link_state +*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); + p_hwfn->mcp_info->link_output.link_up = true; + } +#endif + + return &p_hwfn->mcp_info->link_output; +} + +struct ecore_mcp_link_capabilities +*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->link_capabilities; +} + +enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); + + /* Wait for the drain to complete before returning */ + OSAL_MSLEEP(1020); + + return rc; +} + +const struct ecore_mcp_function_info +*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->func_info; +} + +int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 personalities) +{ + enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; + struct public_func shmem_info; + int i, count = 0, num_pfs; + + num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); + + for (i = 0; i < num_pfs; i++) { + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID_BY_REL(p_hwfn, i)); + if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) + continue; + + if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, + &protocol) != + ECORE_SUCCESS) + continue; + + if ((1 << ((u32)protocol)) & personalities) + count++; + } + + return count; +} + +enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_flash_size) +{ + u32 flash_size; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { + DP_INFO(p_hwfn, "Emulation: Can't get flash size\n"); + return ECORE_NOTIMPL; + } +#endif + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); + flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET)); + + *p_flash_size = flash_size; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + + if (p_dev->recov_in_prog) { + DP_NOTICE(p_hwfn, false, + "Avoid triggering a recovery since such a process" + " is already in progress\n"); + return ECORE_AGAIN; + } + + DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num) +{ + u32 resp = 0, param = 0, rc_param = 0; + enum _ecore_status_t rc; + +/* Only Leader can configure MSIX, and need to take CMT into account */ + + if (!IS_LEAD_HWFN(p_hwfn)) + return ECORE_SUCCESS; + num *= p_hwfn->p_dev->num_hwfns; + + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) & + DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) & + DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, + &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { + DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", + vf_id); + rc = ECORE_INVAL; + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", + num, vf_id); + } + + return rc; +} + +static enum _ecore_status_t +ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 num) +{ + u32 resp = 0, param = num, rc_param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, + param, &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { + DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n"); + rc = ECORE_INVAL; + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requested 0x%02x MSI-x interrupts for VFs\n", + num); + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) { + DP_INFO(p_hwfn, + "Emulation: Avoid sending the %s mailbox command\n", + ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" : + "CFG_PF_VFS_MSIX"); + return ECORE_SUCCESS; + } +#endif + + if (ECORE_IS_BB(p_hwfn->p_dev)) + return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); + else + return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); +} + +enum _ecore_status_t +ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_version *p_ver) +{ + struct ecore_mcp_mb_params mb_params; + struct drv_version_stc drv_version; + u32 num_words, i; + void *p_name; + OSAL_BE32 val; + enum _ecore_status_t rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + return ECORE_SUCCESS; +#endif + + OSAL_MEM_ZERO(&drv_version, sizeof(drv_version)); + drv_version.version = p_ver->version; + num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; + for (i = 0; i < num_words; i++) { + /* The driver name is expected to be in a big-endian format */ + p_name = &p_ver->name[i * sizeof(u32)]; + val = OSAL_CPU_TO_BE32(*(u32 *)p_name); + *(u32 *)&drv_version.name[i * sizeof(u32)] = val; + } + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VERSION; + mb_params.p_data_src = &drv_version; + mb_params.data_src_size = sizeof(drv_version); + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +/* A maximal 100 msec waiting time for the MCP to halt */ +#define ECORE_MCP_HALT_SLEEP_MS 10 +#define ECORE_MCP_HALT_MAX_RETRIES 10 + +enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resp = 0, param = 0, cpu_state, cnt = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, + ¶m); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + do { + OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) + break; + } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES); + + if (cnt == ECORE_MCP_HALT_MAX_RETRIES) { + DP_NOTICE(p_hwfn, false, + "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); + return ECORE_BUSY; + } + + ecore_mcp_cmd_set_blocking(p_hwfn, true); + + return ECORE_SUCCESS; +} + +#define ECORE_MCP_RESUME_SLEEP_MS 10 + +enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state; + + ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); + + cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; + ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); + + OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS); + cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { + DP_NOTICE(p_hwfn, false, + "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + cpu_mode, cpu_state); + return ECORE_BUSY; + } + + ecore_mcp_cmd_set_blocking(p_hwfn, false); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_client client) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + enum _ecore_status_t rc; + + switch (client) { + case ECORE_OV_CLIENT_DRV: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; + break; + case ECORE_OV_CLIENT_USER: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; + break; + case ECORE_OV_CLIENT_VENDOR_SPEC: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_driver_state drv_state) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + enum _ecore_status_t rc; + + switch (drv_state) { + case ECORE_OV_DRIVER_STATE_NOT_LOADED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; + break; + case ECORE_OV_DRIVER_STATE_DISABLED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; + break; + case ECORE_OV_DRIVER_STATE_ACTIVE: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send driver state\n"); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_fc_npiv_tbl *p_table) +{ + return 0; +} + +enum _ecore_status_t +ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u16 mtu) +{ + u32 resp = 0, param = 0, drv_mb_param = 0; + enum _ecore_status_t rc; + + SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu); + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 *mac) +{ + struct ecore_mcp_mb_params mb_params; + union drv_union_data union_data; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VMAC; + SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE, + DRV_MSG_CODE_VMAC_TYPE_MAC); + mb_params.param |= MCP_PF_ID(p_hwfn); + OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN); + mb_params.p_data_src = &union_data; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_ov_eswitch eswitch) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + u32 drv_mb_param; + + switch (eswitch) { + case ECORE_OV_ESWITCH_NONE: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; + break; + case ECORE_OV_ESWITCH_VEB: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; + break; + case ECORE_OV_ESWITCH_VEPA: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; + break; + default: + DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); + + return rc; +} + +enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_led_mode mode) +{ + u32 resp = 0, param = 0, drv_mb_param; + enum _ecore_status_t rc; + + switch (mode) { + case ECORE_LED_MODE_ON: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; + break; + case ECORE_LED_MODE_OFF: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; + break; + case ECORE_LED_MODE_RESTORE: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 mask_parities) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, + mask_parities, &resp, ¶m); + + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "MCP response failure for mask parities, aborting\n"); + } else if (resp != FW_MSG_CODE_OK) { + DP_ERR(p_hwfn, + "MCP did not ack mask parity request. Old MFW?\n"); + rc = ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, + u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 bytes_left, offset, bytes_to_copy, buf_size; + u32 nvm_offset, resp, param; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + bytes_left = len; + offset = 0; + while (bytes_left > 0) { + bytes_to_copy = OSAL_MIN_T(u32, bytes_left, + MCP_DRV_NVM_BUF_LEN); + nvm_offset = (addr + offset) | (bytes_to_copy << + DRV_MB_PARAM_NVM_LEN_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n", + rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_NVM_OK) { + DP_NOTICE(p_dev, false, + "nvm read failed, resp = 0x%08x\n", resp); + rc = ECORE_UNKNOWN_ERROR; + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't preemptible. Sleep a bit to prevent CPU hogging. + */ + if (bytes_left % 0x1000 < + (bytes_left - buf_size) % 0x1000) + OSAL_MSLEEP(1); + + offset += buf_size; + bytes_left -= buf_size; + } + + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 *p_len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt; + u32 resp = 0, param; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + (cmd == ECORE_PHY_CORE_READ) ? + DRV_MSG_CODE_PHY_CORE_READ : + DRV_MSG_CODE_PHY_RAW_READ, + addr, &resp, ¶m, p_len, (u32 *)p_buf); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt; + u32 resp = 0, param; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, + u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt; + u32 resp = 0, param; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +/* rc receives ECORE_INVAL as default parameter because + * it might not enter the while loop if the len is 0 + */ +enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len) +{ + u32 buf_idx, buf_size, nvm_cmd, nvm_offset; + u32 resp = FW_MSG_CODE_ERROR, param; + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_ptt *p_ptt; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + switch (cmd) { + case ECORE_PUT_FILE_DATA: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; + break; + case ECORE_NVM_WRITE_NVRAM: + nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + break; + case ECORE_EXT_PHY_FW_UPGRADE: + nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n", + cmd); + rc = ECORE_INVAL; + goto out; + } + + buf_idx = 0; + while (buf_idx < len) { + buf_size = OSAL_MIN_T(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | + addr) + + buf_idx; + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, + &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_dev, false, + "ecore_mcp_nvm_write() failed, rc = %d\n", + rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_OK && + resp != FW_MSG_CODE_NVM_OK && + resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { + DP_NOTICE(p_dev, false, + "nvm write failed, resp = 0x%08x\n", resp); + rc = ECORE_UNKNOWN_ERROR; + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't preemptible. Sleep a bit to prevent CPU hogging. + */ + if (buf_idx % 0x1000 > + (buf_idx + buf_size) % 0x1000) + OSAL_MSLEEP(1); + + buf_idx += buf_size; + } + + p_dev->mcp_nvm_resp = resp; +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 resp = 0, param, nvm_cmd; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE : + DRV_MSG_CODE_PHY_RAW_WRITE; + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr, + &resp, ¶m, len, (u32 *)p_buf); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, + u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt; + u32 resp = 0, param; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr, + &resp, ¶m); + p_dev->mcp_nvm_resp = resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset; + u32 resp, param; + enum _ecore_status_t rc; + + nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | + (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = OSAL_MIN_T(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return ECORE_NODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return ECORE_UNKNOWN_ERROR; + + offset += buf_size; + bytes_left -= buf_size; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf) +{ + u32 buf_idx, buf_size, nvm_offset, resp, param; + enum _ecore_status_t rc; + + nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) | + (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET); + buf_idx = 0; + while (buf_idx < len) { + buf_size = OSAL_MIN_T(u32, (len - buf_idx), + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((offset + buf_idx) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET); + nvm_offset |= (buf_size << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET); + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_WRITE, + nvm_offset, &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send a transceiver write command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return ECORE_NODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return ECORE_UNKNOWN_ERROR; + + buf_idx += buf_size; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 drv_mb_param = 0, rsp = 0; + + drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, + drv_mb_param, &rsp, gpio_val); + + if (rc != ECORE_SUCCESS) + return rc; + + if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) + return ECORE_UNKNOWN_ERROR; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u16 gpio_val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 drv_mb_param = 0, param, rsp = 0; + + drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) | + (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, + drv_mb_param, &rsp, ¶m); + + if (rc != ECORE_SUCCESS) + return rc; + + if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) + return ECORE_UNKNOWN_ERROR; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_direction, + u32 *gpio_ctrl) +{ + u32 drv_mb_param = 0, rsp, val = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO, + drv_mb_param, &rsp, &val); + if (rc != ECORE_SUCCESS) + return rc; + + *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >> + DRV_MB_PARAM_GPIO_DIRECTION_OFFSET; + *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >> + DRV_MB_PARAM_GPIO_CTRL_OFFSET; + + if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) + return ECORE_UNKNOWN_ERROR; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 drv_mb_param = 0, rsp, param; + enum _ecore_status_t rc = ECORE_SUCCESS; + + drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc != ECORE_SUCCESS) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = ECORE_UNKNOWN_ERROR; + + return rc; +} + +enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 drv_mb_param, rsp, param; + enum _ecore_status_t rc = ECORE_SUCCESS; + + drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc != ECORE_SUCCESS) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = ECORE_UNKNOWN_ERROR; + + return rc; +} + +enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( + struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images) +{ + u32 drv_mb_param = 0, rsp = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, num_images); + + if (rc != ECORE_SUCCESS) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + rc = ECORE_UNKNOWN_ERROR; + + return rc; +} + +enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( + struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, u32 image_index) +{ + u32 buf_size, nvm_offset, resp, param; + enum _ecore_status_t rc; + + nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << + DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET); + nvm_offset |= (image_index << + DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET); + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)p_image_att); + if (rc != ECORE_SUCCESS) + return rc; + + if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (p_image_att->return_code != 1)) + rc = ECORE_UNKNOWN_ERROR; + + return rc; +} + +enum _ecore_status_t +ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_temperature_info *p_temp_info) +{ + struct ecore_temperature_sensor *p_temp_sensor; + struct temperature_status_stc mfw_temp_info; + struct ecore_mcp_mb_params mb_params; + u32 val; + enum _ecore_status_t rc; + u8 i; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE; + mb_params.p_data_dst = &mfw_temp_info; + mb_params.data_dst_size = sizeof(mfw_temp_info); + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS); + p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors, + ECORE_MAX_NUM_OF_SENSORS); + for (i = 0; i < p_temp_info->num_sensors; i++) { + val = mfw_temp_info.sensor[i]; + p_temp_sensor = &p_temp_info->sensors[i]; + p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >> + SENSOR_LOCATION_OFFSET; + p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >> + THRESHOLD_HIGH_OFFSET; + p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >> + CRITICAL_TEMPERATURE_OFFSET; + p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >> + CURRENT_TEMP_OFFSET; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_mba_versions( + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mba_vers *p_mba_vers) +{ + u32 buf_size, resp, param; + enum _ecore_status_t rc; + + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION, + 0, &resp, ¶m, &buf_size, + &p_mba_vers->mba_vers[0]); + + if (rc != ECORE_SUCCESS) + return rc; + + if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + rc = ECORE_UNKNOWN_ERROR; + + if (buf_size != MCP_DRV_NVM_BUF_LEN) + rc = ECORE_UNKNOWN_ERROR; + + return rc; +} + +enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 *num_events) +{ + u32 rsp; + + return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS, + 0, &rsp, (u32 *)num_events); +} + +static enum resource_id_enum +ecore_mcp_get_mfw_res_id(enum ecore_resources res_id) +{ + enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; + + switch (res_id) { + case ECORE_SB: + mfw_res_id = RESOURCE_NUM_SB_E; + break; + case ECORE_L2_QUEUE: + mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; + break; + case ECORE_VPORT: + mfw_res_id = RESOURCE_NUM_VPORT_E; + break; + case ECORE_RSS_ENG: + mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; + break; + case ECORE_PQ: + mfw_res_id = RESOURCE_NUM_PQ_E; + break; + case ECORE_RL: + mfw_res_id = RESOURCE_NUM_RL_E; + break; + case ECORE_MAC: + case ECORE_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + mfw_res_id = RESOURCE_VFC_FILTER_E; + break; + case ECORE_ILT: + mfw_res_id = RESOURCE_ILT_E; + break; + case ECORE_LL2_QUEUE: + mfw_res_id = RESOURCE_LL2_QUEUE_E; + break; + case ECORE_RDMA_CNQ_RAM: + case ECORE_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + mfw_res_id = RESOURCE_CQS_E; + break; + case ECORE_RDMA_STATS_QUEUE: + mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; + break; + case ECORE_BDQ: + mfw_res_id = RESOURCE_BDQ_E; + break; + default: + break; + } + + return mfw_res_id; +} + +#define ECORE_RESC_ALLOC_VERSION_MAJOR 2 +#define ECORE_RESC_ALLOC_VERSION_MINOR 0 +#define ECORE_RESC_ALLOC_VERSION \ + ((ECORE_RESC_ALLOC_VERSION_MAJOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \ + (ECORE_RESC_ALLOC_VERSION_MINOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET)) + +struct ecore_resc_alloc_in_params { + u32 cmd; + enum ecore_resources res_id; + u32 resc_max_val; +}; + +struct ecore_resc_alloc_out_params { + u32 mcp_resp; + u32 mcp_param; + u32 resc_num; + u32 resc_start; + u32 vf_resc_num; + u32 vf_resc_start; + u32 flags; +}; + +#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100 + +enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + enum _ecore_status_t rc; + + /* Allow ongoing PCIe transactions to complete */ + OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + +static enum _ecore_status_t +ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_resc_alloc_in_params *p_in_params, + struct ecore_resc_alloc_out_params *p_out_params) +{ + struct ecore_mcp_mb_params mb_params; + struct resource_info mfw_resc_info; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info)); + + mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id); + if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { + DP_ERR(p_hwfn, + "Failed to match resource %d [%s] with the MFW resources\n", + p_in_params->res_id, + ecore_hw_get_resc_name(p_in_params->res_id)); + return ECORE_INVAL; + } + + switch (p_in_params->cmd) { + case DRV_MSG_SET_RESOURCE_VALUE_MSG: + mfw_resc_info.size = p_in_params->resc_max_val; + /* Fallthrough */ + case DRV_MSG_GET_RESOURCE_ALLOC_MSG: + break; + default: + DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", + p_in_params->cmd); + return ECORE_INVAL; + } + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = p_in_params->cmd; + mb_params.param = ECORE_RESC_ALLOC_VERSION; + mb_params.p_data_src = &mfw_resc_info; + mb_params.data_src_size = sizeof(mfw_resc_info); + mb_params.p_data_dst = mb_params.p_data_src; + mb_params.data_dst_size = mb_params.data_src_size; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", + p_in_params->cmd, p_in_params->res_id, + ecore_hw_get_resc_name(p_in_params->res_id), + GET_MFW_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + GET_MFW_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_in_params->resc_max_val); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + p_out_params->mcp_resp = mb_params.mcp_resp; + p_out_params->mcp_param = mb_params.mcp_param; + p_out_params->resc_num = mfw_resc_info.size; + p_out_params->resc_start = mfw_resc_info.offset; + p_out_params->vf_resc_num = mfw_resc_info.vf_size; + p_out_params->vf_resc_start = mfw_resc_info.vf_offset; + p_out_params->flags = mfw_resc_info.flags; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", + GET_MFW_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + GET_MFW_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_out_params->resc_num, p_out_params->resc_start, + p_out_params->vf_resc_num, p_out_params->vf_resc_start, + p_out_params->flags); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_resources res_id, u32 resc_max_val, + u32 *p_mcp_resp) +{ + struct ecore_resc_alloc_out_params out_params; + struct ecore_resc_alloc_in_params in_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&in_params, sizeof(in_params)); + in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; + in_params.res_id = res_id; + in_params.resc_max_val = resc_max_val; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_resources res_id, u32 *p_mcp_resp, + u32 *p_resc_num, u32 *p_resc_start) +{ + struct ecore_resc_alloc_out_params out_params; + struct ecore_resc_alloc_in_params in_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&in_params, sizeof(in_params)); + in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + in_params.res_id = res_id; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { + *p_resc_num = out_params.resc_num; + *p_resc_start = out_params.resc_start; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param; + + return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, + &mcp_resp, &mcp_param); +} + +static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 param, u32 *p_mcp_resp, + u32 *p_mcp_param) +{ + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, + p_mcp_resp, p_mcp_param); + if (rc != ECORE_SUCCESS) + return rc; + + if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The resource command is unsupported by the MFW\n"); + return ECORE_NOTIMPL; + } + + if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { + u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE); + + DP_NOTICE(p_hwfn, false, + "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", + param, opcode); + return ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t +__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_resc_lock_params *p_params) +{ + u32 param = 0, mcp_resp = 0, mcp_param = 0; + u8 opcode; + enum _ecore_status_t rc; + + switch (p_params->timeout) { + case ECORE_MCP_RESC_LOCK_TO_DEFAULT: + opcode = RESOURCE_OPCODE_REQ; + p_params->timeout = 0; + break; + case ECORE_MCP_RESC_LOCK_TO_NONE: + opcode = RESOURCE_OPCODE_REQ_WO_AGING; + p_params->timeout = 0; + break; + default: + opcode = RESOURCE_OPCODE_REQ_W_AGING; + break; + } + + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", + param, p_params->timeout, opcode, p_params->resource); + + /* Attempt to acquire the resource */ + rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, + &mcp_param); + if (rc != ECORE_SUCCESS) + return rc; + + /* Analyze the response */ + p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); + opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", + mcp_param, opcode, p_params->owner); + + switch (opcode) { + case RESOURCE_OPCODE_GNT: + p_params->b_granted = true; + break; + case RESOURCE_OPCODE_BUSY: + p_params->b_granted = false; + break; + default: + DP_NOTICE(p_hwfn, false, + "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_resc_lock_params *p_params) +{ + u32 retry_cnt = 0; + enum _ecore_status_t rc; + + do { + /* No need for an interval before the first iteration */ + if (retry_cnt) { + if (p_params->sleep_b4_retry) { + u16 retry_interval_in_ms = + DIV_ROUND_UP(p_params->retry_interval, + 1000); + + OSAL_MSLEEP(retry_interval_in_ms); + } else { + OSAL_UDELAY(p_params->retry_interval); + } + } + + rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (p_params->b_granted) + break; + } while (retry_cnt++ < p_params->retry_num); + + return ECORE_SUCCESS; +} + +void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, + struct ecore_resc_unlock_params *p_unlock, + enum ecore_resc_lock resource, + bool b_is_permanent) +{ + if (p_lock != OSAL_NULL) { + OSAL_MEM_ZERO(p_lock, sizeof(*p_lock)); + + /* Permanent resources don't require aging, and there's no + * point in trying to acquire them more than once since it's + * unexpected another entity would release them. + */ + if (b_is_permanent) { + p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE; + } else { + p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT; + p_lock->retry_interval = + ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT; + p_lock->sleep_b4_retry = true; + } + + p_lock->resource = resource; + } + + if (p_unlock != OSAL_NULL) { + OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock)); + p_unlock->resource = resource; + } +} + +enum _ecore_status_t +ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_resc_unlock_params *p_params) +{ + u32 param = 0, mcp_resp, mcp_param; + u8 opcode; + enum _ecore_status_t rc; + + opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE + : RESOURCE_OPCODE_RELEASE; + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", + param, opcode, p_params->resource); + + /* Attempt to release the resource */ + rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, + &mcp_param); + if (rc != ECORE_SUCCESS) + return rc; + + /* Analyze the response */ + opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", + mcp_param, opcode); + + switch (opcode) { + case RESOURCE_OPCODE_RELEASED_PREVIOUS: + DP_INFO(p_hwfn, + "Resource unlock request for an already released resource [%d]\n", + p_params->resource); + /* Fallthrough */ + case RESOURCE_OPCODE_RELEASED: + p_params->b_released = true; + break; + case RESOURCE_OPCODE_WRONG_OWNER: + p_params->b_released = false; + break; + default: + DP_NOTICE(p_hwfn, false, + "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + +enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 mcp_resp; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, + 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); + if (rc == ECORE_SUCCESS) + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE), + "MFW supported features: %08x\n", + p_hwfn->mcp_info->capabilities); + + return rc; +} + +enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param, features; + + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ | + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; + + return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, + features, &mcp_resp, &mcp_param); +} + +enum _ecore_status_t +ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_attr *p_drv_attr) +{ + struct attribute_cmd_write_stc attr_cmd_write; + enum _attribute_commands_e mfw_attr_cmd; + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + switch (p_drv_attr->attr_cmd) { + case ECORE_MCP_DRV_ATTR_CMD_READ: + mfw_attr_cmd = ATTRIBUTE_CMD_READ; + break; + case ECORE_MCP_DRV_ATTR_CMD_WRITE: + mfw_attr_cmd = ATTRIBUTE_CMD_WRITE; + break; + case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR; + break; + case ECORE_MCP_DRV_ATTR_CMD_CLEAR: + mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR; + break; + default: + DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n", + p_drv_attr->attr_cmd); + return ECORE_INVAL; + } + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE; + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY, + p_drv_attr->attr_num); + SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD, + mfw_attr_cmd); + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) { + OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write)); + attr_cmd_write.val = p_drv_attr->val; + attr_cmd_write.mask = p_drv_attr->mask; + attr_cmd_write.offset = p_drv_attr->offset; + + mb_params.p_data_src = &attr_cmd_write; + mb_params.data_src_size = sizeof(attr_cmd_write); + } + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The attribute command is not supported by the MFW\n"); + return ECORE_NOTIMPL; + } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n", + mb_params.mcp_resp, p_drv_attr->attr_cmd, + p_drv_attr->attr_num); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n", + p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num, + p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset, + mb_params.mcp_param); + + if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ || + p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR) + p_drv_attr->val = mb_params.mcp_param; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mcp_mb_params mb_params; + u8 fir_valid, l2_valid; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_engine_config command is unsupported by the MFW\n"); + return ECORE_NOTIMPL; + } + + fir_valid = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); + if (fir_valid) + p_dev->fir_affin = + GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); + + l2_valid = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); + if (l2_valid) + p_dev->l2_affin_hint = + GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); + + DP_INFO(p_hwfn, + "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", + fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mcp_mb_params mb_params; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_ppfid_bitmap command is unsupported by the MFW\n"); + return ECORE_NOTIMPL; + } + + p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param, + FW_MB_PARAM_PPFID_BITMAP); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n", + p_dev->ppfid_bitmap); + + return ECORE_SUCCESS; +} + +void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u32 offset, u32 val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 dword = val; + struct ecore_mcp_mb_params mb_params; + + OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params)); + mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG; + mb_params.param = offset; + mb_params.p_data_src = &dword; + mb_params.data_src_size = sizeof(dword); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to wol write request, rc = %d\n", rc); + } + + if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) { + DP_NOTICE(p_hwfn, false, + "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n", + val, offset, mb_params.mcp_resp); + rc = ECORE_UNKNOWN_ERROR; + } +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h new file mode 100644 index 000000000..185cc2339 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp.h @@ -0,0 +1,589 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_MCP_H__ +#define __ECORE_MCP_H__ + +#include "bcm_osal.h" +#include "mcp_public.h" +#include "ecore.h" +#include "ecore_mcp_api.h" +#include "ecore_dev_api.h" + +/* Using hwfn number (and not pf_num) is required since in CMT mode, + * same pf_num may be used by two different hwfn + * TODO - this shouldn't really be in .h file, but until all fields + * required during hw-init will be placed in their correct place in shmem + * we need it in ecore_dev.c [for readin the nvram reflection in shmem]. + */ +#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \ + ((rel_pfid) | \ + ((p_hwfn)->abs_pf_id & 1) << 3) : \ + rel_pfid) +#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) + +struct ecore_mcp_info { + /* List for mailbox commands which were sent and wait for a response */ + osal_list_t cmd_list; + + /* Spinlock used for protecting the access to the mailbox commands list + * and the sending of the commands. + */ + osal_spinlock_t cmd_lock; + + /* Flag to indicate whether sending a MFW mailbox command is blocked */ + bool b_block_cmd; + + /* Spinlock used for syncing SW link-changes and link-changes + * originating from attention context. + */ + osal_spinlock_t link_lock; + + /* Address of the MCP public area */ + u32 public_base; + /* Address of the driver mailbox */ + u32 drv_mb_addr; + /* Address of the MFW mailbox */ + u32 mfw_mb_addr; + /* Address of the port configuration (link) */ + u32 port_addr; + + /* Current driver mailbox sequence */ + u16 drv_mb_seq; + /* Current driver pulse sequence */ + u16 drv_pulse_seq; + + struct ecore_mcp_link_params link_input; + struct ecore_mcp_link_state link_output; + struct ecore_mcp_link_capabilities link_capabilities; + + struct ecore_mcp_function_info func_info; + + u8 *mfw_mb_cur; + u8 *mfw_mb_shadow; + u16 mfw_mb_length; + u32 mcp_hist; + + /* Capabilties negotiated with the MFW */ + u32 capabilities; +}; + +struct ecore_mcp_mb_params { + u32 cmd; + u32 param; + void *p_data_src; + void *p_data_dst; + u32 mcp_resp; + u32 mcp_param; + u8 data_src_size; + u8 data_dst_size; + u32 flags; +#define ECORE_MB_FLAG_CAN_SLEEP (0x1 << 0) +#define ECORE_MB_FLAG_AVOID_BLOCK (0x1 << 1) +#define ECORE_MB_FLAGS_IS_SET(params, flag) \ + ((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag)) +}; + +struct ecore_drv_tlv_hdr { + u8 tlv_type; /* According to the enum below */ + u8 tlv_length; /* In dwords - not including this header */ + u8 tlv_reserved; +#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01 + u8 tlv_flags; +}; + +/** + * @brief Initialize the interface with the MCP + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Initialize the port interface with the MCP + * + * @param p_hwfn + * @param p_ptt + * Can only be called after `num_ports_in_engine' is set + */ +void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +/** + * @brief Releases resources allocated during the init process. + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief This function is called from the DPC context. After + * pointing PTT to the mfw mb, check for events sent by the MCP + * to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @param p_hwfn - HW function + * @param p_ptt - PTT required for register access + * @return enum _ecore_status_t - ECORE_SUCCESS - operation + * was successul. + */ +enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief When MFW doesn't get driver pulse for couple of seconds, at some + * threshold before timeout expires, it will generate interrupt + * through a dedicated status block (DPSB - Driver Pulse Status + * Block), which the driver should respond immediately, by + * providing keepalive indication after setting the PTT to the + * driver-MFW mailbox. This function is called directly from the + * DPC upon receiving the DPSB attention. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @return enum _ecore_status_t - ECORE_SUCCESS - operation + * was successful. + */ +enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +enum ecore_drv_role { + ECORE_DRV_ROLE_OS, + ECORE_DRV_ROLE_KDUMP, +}; + +struct ecore_load_req_params { + /* Input params */ + enum ecore_drv_role drv_role; + u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */ + bool avoid_eng_reset; + enum ecore_override_force_load override_force_load; + + /* Output params */ + u32 load_code; +}; + +/** + * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, + * returns whether this PF is the first on the engine/port or function. + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. + */ +enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_load_req_params *p_params); + +/** + * @brief Sends a LOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. + */ +enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Sends a UNLOAD_REQ message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. + */ +enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Sends a UNLOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. + */ +enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Read the MFW mailbox into Current buffer. + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Ack to mfw that driver finished FLR process for VFs + * + * @param p_hwfn + * @param p_ptt + * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * + * @param return enum _ecore_status_t - ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *vfs_to_ack); + +/** + * @brief - calls during init to read shmem of all function-related info. + * + * @param p_hwfn + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Reset the MCP using mailbox command. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief indicates whether the MFW objects [under mcp_info] are accessible + * + * @param p_hwfn + * + * @return true iff MFW is running and mcp_info is initialized + */ +bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn); + +/** + * @brief request MFW to configure MSI-X for a VF + * + * @param p_hwfn + * @param p_ptt + * @param vf_id - absolute inside engine + * @param num_sbs - number of entries to request + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num); + +/** + * @brief - Halt the MCP. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Wake up the MCP. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 max_bw); +int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 min_bw); +enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 mask_parities); +/** + * @brief - Sends crash mdump related info to the MFW. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 epoch); + +/** + * @brief - Triggers a MFW crash dump procedure. + * + * @param p_hwfn + * @param p_ptt + * @param epoch + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +struct ecore_mdump_retain_data { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +/** + * @brief - Gets the mdump retained data from the MFW. + * + * @param p_hwfn + * @param p_ptt + * @param p_mdump_retain + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t +ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mdump_retain_data *p_mdump_retain); + +/** + * @brief - Sets the MFW's max value for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param res_id + * @param resc_max_val + * @param p_mcp_resp + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_resources res_id, u32 resc_max_val, + u32 *p_mcp_resp); + +/** + * @brief - Gets the MFW allocation info for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param res_id + * @param p_mcp_resp + * @param p_resc_num + * @param p_resc_start + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_resources res_id, u32 *p_mcp_resp, + u32 *p_resc_num, u32 *p_resc_start); + +/** + * @brief - Initiates PF FLR + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */ +#define ECORE_MCP_RESC_LOCK_MAX_VAL 31 + +enum ecore_resc_lock { + ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL, + /* Locks that the MFW is aware of should be added here downwards */ + + /* Ecore only locks should be added here upwards */ + ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL, + + /* A dummy value to be used for auxiliary functions in need of + * returning an 'error' value. + */ + ECORE_RESC_LOCK_RESC_INVALID, +}; + +struct ecore_resc_lock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Lock timeout value in seconds [default, none or 1..254] */ + u8 timeout; +#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0 +#define ECORE_MCP_RESC_LOCK_TO_NONE 255 + + /* Number of times to retry locking */ + u8 retry_num; +#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 + + /* The interval in usec between retries */ + u16 retry_interval; +#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 + + /* Use sleep or delay between retries */ + bool sleep_b4_retry; + + /* Will be set as true if the resource is free and granted */ + bool b_granted; + + /* Will be filled with the resource owner. + * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial] + */ + u8 owner; +}; + +/** + * @brief Acquires MFW generic resource lock + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_resc_lock_params *p_params); + +struct ecore_resc_unlock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Allow to release a resource even if belongs to another PF */ + bool b_force; + + /* Will be set as true if the resource is released */ + bool b_released; +}; + +/** + * @brief Releases MFW generic resource lock + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_resc_unlock_params *p_params); + +/** + * @brief - default initialization for lock/unlock resource structs + * + * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL + * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL + * @param resource - the requested resource + * @paral b_is_permanent - disable retries & aging when set + */ +void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock, + struct ecore_resc_unlock_params *p_unlock, + enum ecore_resc_lock resource, + bool b_is_permanent); + +/** + * @brief Learn of supported MFW features; To be done during early init + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Inform MFW of set of features supported by driver. Should be done + * inside the contet of the LOAD_REQ. + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +enum ecore_mcp_drv_attr_cmd { + ECORE_MCP_DRV_ATTR_CMD_READ, + ECORE_MCP_DRV_ATTR_CMD_WRITE, + ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR, + ECORE_MCP_DRV_ATTR_CMD_CLEAR, +}; + +struct ecore_mcp_drv_attr { + enum ecore_mcp_drv_attr_cmd attr_cmd; + u32 attr_num; + + /* R/RC - will be set with the read value + * W - should hold the required value to be written + * C - DC + */ + u32 val; + + /* W - mask/offset to be applied on the given value + * R/RC/C - DC + */ + u32 mask; + u32 offset; +}; + +/** + * @brief Handle the drivers' attributes that are kept by the MFW. + * + * @param p_hwfn + * @param p_ptt + * @param p_drv_attr + */ +enum _ecore_status_t +ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_attr *p_drv_attr); + +/** + * @brief Read ufp config from the shared memory. + * + * @param p_hwfn + * @param p_ptt + */ +void +ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u32 offset, u32 val); + +/** + * @brief Get the engine affinity configuration. + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Get the PPFID bitmap. + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +#endif /* __ECORE_MCP_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h new file mode 100644 index 000000000..dc889ab8e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mcp_api.h @@ -0,0 +1,1265 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_MCP_API_H__ +#define __ECORE_MCP_API_H__ + +#include "ecore_status.h" + +struct ecore_mcp_link_speed_params { + bool autoneg; + u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */ + u32 forced_speed; /* In Mb/s */ +}; + +struct ecore_mcp_link_pause_params { + bool autoneg; + bool forced_rx; + bool forced_tx; +}; + +enum ecore_mcp_eee_mode { + ECORE_MCP_EEE_DISABLED, + ECORE_MCP_EEE_ENABLED, + ECORE_MCP_EEE_UNSUPPORTED +}; + +struct ecore_link_eee_params { + u32 tx_lpi_timer; +#define ECORE_EEE_1G_ADV (1 << 0) +#define ECORE_EEE_10G_ADV (1 << 1) + /* Capabilities are represented using ECORE_EEE_*_ADV values */ + u8 adv_caps; + u8 lp_adv_caps; + bool enable; + bool tx_lpi_enable; +}; + +struct ecore_mcp_link_params { + struct ecore_mcp_link_speed_params speed; + struct ecore_mcp_link_pause_params pause; + u32 loopback_mode; /* in PMM_LOOPBACK values */ + struct ecore_link_eee_params eee; +}; + +struct ecore_mcp_link_capabilities { + u32 speed_capabilities; + bool default_speed_autoneg; /* In Mb/s */ + u32 default_speed; /* In Mb/s */ + enum ecore_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; +}; + +struct ecore_mcp_link_state { + bool link_up; + + u32 min_pf_rate; /* In Mb/s */ + + /* Actual link speed in Mb/s */ + u32 line_speed; + + /* PF max speed in MB/s, deduced from line_speed + * according to PF max bandwidth configuration. + */ + u32 speed; + bool full_duplex; + + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + +#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0) +#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1) +#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2) +#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3) +#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4) +#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5) +#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6) +#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7) + u32 partner_adv_speed; + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + +#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1) +#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2) +#define ECORE_LINK_PARTNER_BOTH_PAUSE (3) + u8 partner_adv_pause; + + bool sfp_tx_fault; + + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; +}; + +struct ecore_mcp_function_info { + u8 pause_on_host; + + enum ecore_pci_personality protocol; + + u8 bandwidth_min; + u8 bandwidth_max; + + u8 mac[ETH_ALEN]; + + u64 wwn_port; + u64 wwn_node; + +#define ECORE_MCP_VLAN_UNSET (0xffff) + u16 ovlan; + + u16 mtu; +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_nvm_images { + ECORE_NVM_IMAGE_ISCSI_CFG, + ECORE_NVM_IMAGE_FCOE_CFG, +}; +#endif + +struct ecore_mcp_drv_version { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct ecore_mcp_lan_stats { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; +}; + +#ifndef ECORE_PROTO_STATS +#define ECORE_PROTO_STATS +struct ecore_mcp_fcoe_stats { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct ecore_mcp_iscsi_stats { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ecore_mcp_rdma_stats { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_byts; +}; + +enum ecore_mcp_protocol_type { + ECORE_MCP_LAN_STATS, + ECORE_MCP_FCOE_STATS, + ECORE_MCP_ISCSI_STATS, + ECORE_MCP_RDMA_STATS +}; + +union ecore_mcp_protocol_stats { + struct ecore_mcp_lan_stats lan_stats; + struct ecore_mcp_fcoe_stats fcoe_stats; + struct ecore_mcp_iscsi_stats iscsi_stats; + struct ecore_mcp_rdma_stats rdma_stats; +}; +#endif + +enum ecore_ov_client { + ECORE_OV_CLIENT_DRV, + ECORE_OV_CLIENT_USER, + ECORE_OV_CLIENT_VENDOR_SPEC +}; + +enum ecore_ov_driver_state { + ECORE_OV_DRIVER_STATE_NOT_LOADED, + ECORE_OV_DRIVER_STATE_DISABLED, + ECORE_OV_DRIVER_STATE_ACTIVE +}; + +enum ecore_ov_eswitch { + ECORE_OV_ESWITCH_NONE, + ECORE_OV_ESWITCH_VEB, + ECORE_OV_ESWITCH_VEPA +}; + +#define ECORE_MAX_NPIV_ENTRIES 128 +#define ECORE_WWN_SIZE 8 +struct ecore_fc_npiv_tbl { + u32 count; + u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE]; + u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE]; +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_led_mode { + ECORE_LED_MODE_OFF, + ECORE_LED_MODE_ON, + ECORE_LED_MODE_RESTORE +}; +#endif + +struct ecore_temperature_sensor { + u8 sensor_location; + u8 threshold_high; + u8 critical; + u8 current_temp; +}; + +#define ECORE_MAX_NUM_OF_SENSORS 7 +struct ecore_temperature_info { + u32 num_sensors; + struct ecore_temperature_sensor sensors[ECORE_MAX_NUM_OF_SENSORS]; +}; + +enum ecore_mba_img_idx { + ECORE_MBA_LEGACY_IDX, + ECORE_MBA_PCI3CLP_IDX, + ECORE_MBA_PCI3_IDX, + ECORE_MBA_FCODE_IDX, + ECORE_EFI_X86_IDX, + ECORE_EFI_IPF_IDX, + ECORE_EFI_EBC_IDX, + ECORE_EFI_X64_IDX, + ECORE_MAX_NUM_OF_ROMIMG +}; + +struct ecore_mba_vers { + u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG]; +}; + +enum ecore_mfw_tlv_type { + ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ + ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ + ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ + ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ + ECORE_MFW_TLV_MAX = 0x16, +}; + +struct ecore_mfw_tlv_generic { + u16 feat_flags; + bool feat_flags_set; + u64 local_mac; + bool local_mac_set; + u64 additional_mac1; + bool additional_mac1_set; + u64 additional_mac2; + bool additional_mac2_set; + u8 drv_state; + bool drv_state_set; + u8 pxe_progress; + bool pxe_progress_set; + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +struct ecore_mfw_tlv_eth { + u16 lso_maxoff_size; + bool lso_maxoff_size_set; + u16 lso_minseg_size; + bool lso_minseg_size_set; + u8 prom_mode; + bool prom_mode_set; + u16 tx_descr_size; + bool tx_descr_size_set; + u16 rx_descr_size; + bool rx_descr_size_set; + u16 netq_count; + bool netq_count_set; + u32 tcp4_offloads; + bool tcp4_offloads_set; + u32 tcp6_offloads; + bool tcp6_offloads_set; + u16 tx_descr_qdepth; + bool tx_descr_qdepth_set; + u16 rx_descr_qdepth; + bool rx_descr_qdepth_set; + u8 iov_offload; + bool iov_offload_set; + u8 txqs_empty; + bool txqs_empty_set; + u8 rxqs_empty; + bool rxqs_empty_set; + u8 num_txqs_full; + bool num_txqs_full_set; + u8 num_rxqs_full; + bool num_rxqs_full_set; +}; + +struct ecore_mfw_tlv_fcoe { + u8 scsi_timeout; + bool scsi_timeout_set; + u32 rt_tov; + bool rt_tov_set; + u32 ra_tov; + bool ra_tov_set; + u32 ed_tov; + bool ed_tov_set; + u32 cr_tov; + bool cr_tov_set; + u8 boot_type; + bool boot_type_set; + u8 npiv_state; + bool npiv_state_set; + u32 num_npiv_ids; + bool num_npiv_ids_set; + u8 switch_name[8]; + bool switch_name_set; + u16 switch_portnum; + bool switch_portnum_set; + u8 switch_portid[3]; + bool switch_portid_set; + u8 vendor_name[8]; + bool vendor_name_set; + u8 switch_model[8]; + bool switch_model_set; + u8 switch_fw_version[8]; + bool switch_fw_version_set; + u8 qos_pri; + bool qos_pri_set; + u8 port_alias[3]; + bool port_alias_set; + u8 port_state; + bool port_state_set; + u16 fip_tx_descr_size; + bool fip_tx_descr_size_set; + u16 fip_rx_descr_size; + bool fip_rx_descr_size_set; + u16 link_failures; + bool link_failures_set; + u8 fcoe_boot_progress; + bool fcoe_boot_progress_set; + u64 rx_bcast; + bool rx_bcast_set; + u64 tx_bcast; + bool tx_bcast_set; + u16 fcoe_txq_depth; + bool fcoe_txq_depth_set; + u16 fcoe_rxq_depth; + bool fcoe_rxq_depth_set; + u64 fcoe_rx_frames; + bool fcoe_rx_frames_set; + u64 fcoe_rx_bytes; + bool fcoe_rx_bytes_set; + u64 fcoe_tx_frames; + bool fcoe_tx_frames_set; + u64 fcoe_tx_bytes; + bool fcoe_tx_bytes_set; + u16 crc_count; + bool crc_count_set; + u32 crc_err_src_fcid[5]; + bool crc_err_src_fcid_set[5]; + u8 crc_err_tstamp[5][14]; + bool crc_err_tstamp_set[5]; + u16 losync_err; + bool losync_err_set; + u16 losig_err; + bool losig_err_set; + u16 primtive_err; + bool primtive_err_set; + u16 disparity_err; + bool disparity_err_set; + u16 code_violation_err; + bool code_violation_err_set; + u32 flogi_param[4]; + bool flogi_param_set[4]; + u8 flogi_tstamp[14]; + bool flogi_tstamp_set; + u32 flogi_acc_param[4]; + bool flogi_acc_param_set[4]; + u8 flogi_acc_tstamp[14]; + bool flogi_acc_tstamp_set; + u32 flogi_rjt; + bool flogi_rjt_set; + u8 flogi_rjt_tstamp[14]; + bool flogi_rjt_tstamp_set; + u32 fdiscs; + bool fdiscs_set; + u8 fdisc_acc; + bool fdisc_acc_set; + u8 fdisc_rjt; + bool fdisc_rjt_set; + u8 plogi; + bool plogi_set; + u8 plogi_acc; + bool plogi_acc_set; + u8 plogi_rjt; + bool plogi_rjt_set; + u32 plogi_dst_fcid[5]; + bool plogi_dst_fcid_set[5]; + u8 plogi_tstamp[5][14]; + bool plogi_tstamp_set[5]; + u32 plogi_acc_src_fcid[5]; + bool plogi_acc_src_fcid_set[5]; + u8 plogi_acc_tstamp[5][14]; + bool plogi_acc_tstamp_set[5]; + u8 tx_plogos; + bool tx_plogos_set; + u8 plogo_acc; + bool plogo_acc_set; + u8 plogo_rjt; + bool plogo_rjt_set; + u32 plogo_src_fcid[5]; + bool plogo_src_fcid_set[5]; + u8 plogo_tstamp[5][14]; + bool plogo_tstamp_set[5]; + u8 rx_logos; + bool rx_logos_set; + u8 tx_accs; + bool tx_accs_set; + u8 tx_prlis; + bool tx_prlis_set; + u8 rx_accs; + bool rx_accs_set; + u8 tx_abts; + bool tx_abts_set; + u8 rx_abts_acc; + bool rx_abts_acc_set; + u8 rx_abts_rjt; + bool rx_abts_rjt_set; + u32 abts_dst_fcid[5]; + bool abts_dst_fcid_set[5]; + u8 abts_tstamp[5][14]; + bool abts_tstamp_set[5]; + u8 rx_rscn; + bool rx_rscn_set; + u32 rx_rscn_nport[4]; + bool rx_rscn_nport_set[4]; + u8 tx_lun_rst; + bool tx_lun_rst_set; + u8 abort_task_sets; + bool abort_task_sets_set; + u8 tx_tprlos; + bool tx_tprlos_set; + u8 tx_nos; + bool tx_nos_set; + u8 rx_nos; + bool rx_nos_set; + u8 ols; + bool ols_set; + u8 lr; + bool lr_set; + u8 lrr; + bool lrr_set; + u8 tx_lip; + bool tx_lip_set; + u8 rx_lip; + bool rx_lip_set; + u8 eofa; + bool eofa_set; + u8 eofni; + bool eofni_set; + u8 scsi_chks; + bool scsi_chks_set; + u8 scsi_cond_met; + bool scsi_cond_met_set; + u8 scsi_busy; + bool scsi_busy_set; + u8 scsi_inter; + bool scsi_inter_set; + u8 scsi_inter_cond_met; + bool scsi_inter_cond_met_set; + u8 scsi_rsv_conflicts; + bool scsi_rsv_conflicts_set; + u8 scsi_tsk_full; + bool scsi_tsk_full_set; + u8 scsi_aca_active; + bool scsi_aca_active_set; + u8 scsi_tsk_abort; + bool scsi_tsk_abort_set; + u32 scsi_rx_chk[5]; + bool scsi_rx_chk_set[5]; + u8 scsi_chk_tstamp[5][14]; + bool scsi_chk_tstamp_set[5]; +}; + +struct ecore_mfw_tlv_iscsi { + u8 target_llmnr; + bool target_llmnr_set; + u8 header_digest; + bool header_digest_set; + u8 data_digest; + bool data_digest_set; + u8 auth_method; + bool auth_method_set; + u16 boot_taget_portal; + bool boot_taget_portal_set; + u16 frame_size; + bool frame_size_set; + u16 tx_desc_size; + bool tx_desc_size_set; + u16 rx_desc_size; + bool rx_desc_size_set; + u8 boot_progress; + bool boot_progress_set; + u16 tx_desc_qdepth; + bool tx_desc_qdepth_set; + u16 rx_desc_qdepth; + bool rx_desc_qdepth_set; + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +union ecore_mfw_tlv_data { + struct ecore_mfw_tlv_generic generic; + struct ecore_mfw_tlv_eth eth; + struct ecore_mfw_tlv_fcoe fcoe; + struct ecore_mfw_tlv_iscsi iscsi; +}; + +enum ecore_hw_info_change { + ECORE_HW_INFO_CHANGE_OVLAN, +}; + +/** + * @brief - returns the link params of the hw function + * + * @param p_hwfn + * + * @returns pointer to link params + */ +struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *); + +/** + * @brief - return the link state of the hw function + * + * @param p_hwfn + * + * @returns pointer to link state + */ +struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *); + +/** + * @brief - return the link capabilities of the hw function + * + * @param p_hwfn + * + * @returns pointer to link capabilities + */ +struct ecore_mcp_link_capabilities +*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn); + +/** + * @brief Request the MFW to set the link according to 'link_input'. + * + * @param p_hwfn + * @param p_ptt + * @param b_up - raise link if `true'. Reset link if `false'. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_up); + +/** + * @brief Get the management firmware version value + * + * @param p_hwfn + * @param p_ptt + * @param p_mfw_ver - mfw version value + * @param p_running_bundle_id - image id in nvram; Optional. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_mfw_ver, + u32 *p_running_bundle_id); + +/** + * @brief Get the MBI version value + * + * @param p_hwfn + * @param p_ptt + * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * + * @return int - 0 - operation was successful. + */ +int ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 *p_mbi_ver); + +/** + * @brief Get media type value of the port. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param mfw_ver - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *media_type); + +/** + * @brief Get transceiver data of the port. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_transceiver_state - transceiver state. + * @param p_transceiver_type - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_tranceiver_type); + +/** + * @brief Get transceiver supported speed mask. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_speed_mask - Bit mask of all supported speeds. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask); + +/** + * @brief Get board configuration. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_board_config - Board config. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config); + +/** + * @brief - Sends a command to the MCP mailbox. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param cmd - command to be sent to the MCP + * @param param - Optional param + * @param o_mcp_resp - The MCP response code (exclude sequence) + * @param o_mcp_param - Optional parameter provided by the MCP response + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - operation was successful + * ECORE_BUSY - operation failed + */ +enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 cmd, u32 param, + u32 *o_mcp_resp, u32 *o_mcp_param); + +/** + * @brief - drains the nig, allowing completion to pass in case of pauses. + * (Should be called only from sleepable context) + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +#ifndef LINUX_REMOVE +/** + * @brief - return the mcp function info of the hw function + * + * @param p_hwfn + * + * @returns pointer to mcp function info + */ +const struct ecore_mcp_function_info +*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn); +#endif + +#ifndef LINUX_REMOVE +/** + * @brief - count number of function with a matching personality on engine. + * + * @param p_hwfn + * @param p_ptt + * @param personalities - a bitmask of ecore_pci_personality values + * + * @returns the count of all devices on engine whose personality match one of + * the bitsmasks. + */ +int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 personalities); +#endif + +/** + * @brief Get the flash size value + * + * @param p_hwfn + * @param p_ptt + * @param p_flash_size - flash size in bytes to be filled. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_flash_size); + +/** + * @brief Send driver version to MFW + * + * @param p_hwfn + * @param p_ptt + * @param version - Version value + * @param name - Protocol driver name + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_version *p_ver); + +/** + * @brief Read the MFW process kill counter + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Trigger a recovery process + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief A recovery handler must call this function as its first step. + * It is assumed that the handler is not run from an interrupt context. + * + * @param p_dev + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev); + +/** + * @brief Notify MFW about the change in base device properties + * + * @param p_hwfn + * @param p_ptt + * @param client - ecore client type + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_client client); + +/** + * @brief Notify MFW about the driver state + * + * @param p_hwfn + * @param p_ptt + * @param drv_state - Driver state + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_driver_state drv_state); + +/** + * @brief Read NPIV settings form the MFW + * + * @param p_hwfn + * @param p_ptt + * @param p_table - Array to hold the FC NPIV data. Client need allocate the + * required buffer. The field 'count' specifies number of NPIV + * entries. A value of 0 means the table was not populated. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_fc_npiv_tbl *p_table); + +/** + * @brief Send MTU size to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mtu - MTU size + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 mtu); + +/** + * @brief Send MAC address to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mac - MAC address + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + u8 *mac); + +/** + * @brief Send eswitch mode to MFW + * + * @param p_hwfn + * @param p_ptt + * @param eswitch - eswitch mode + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_ov_eswitch eswitch); + +/** + * @brief Set LED status + * + * @param p_hwfn + * @param p_ptt + * @param mode - LED mode + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_led_mode mode); + +/** + * @brief Set secure mode + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, + u32 addr); + +/** + * @brief Write to phy + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Write to nvm + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Put file begin + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, + u32 addr); + +/** + * @brief Delete file + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, + u32 addr); + +/** + * @brief Check latest response + * + * @param p_dev + * @param p_buf - nvm write buffer + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf); + +/** + * @brief Read from phy + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm read buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 *p_len); + +/** + * @brief Read from nvm + * + * @param p_dev + * @param addr - nvm offset + * @param p_buf - nvm read buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, + u8 *p_buf, u32 len); + +/** + * @brief - Sends an NVM write command request to the MFW with + * payload. + * + * @param p_hwfn + * @param p_ptt + * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or + * DRV_MSG_CODE_NVM_PUT_FILE_DATA + * @param param - [0:23] - Offset [24:31] - Size + * @param o_mcp_resp - MCP response + * @param o_mcp_param - MCP response param + * @param i_txn_size - Buffer size + * @param i_buf - Pointer to the buffer + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 i_txn_size, + u32 *i_buf); + +/** + * @brief - Sends an NVM read command request to the MFW to get + * a buffer. + * + * @param p_hwfn + * @param p_ptt + * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands + * @param param - [0:23] - Offset [24:31] - Size + * @param o_mcp_resp - MCP response + * @param o_mcp_param - MCP response param + * @param o_txn_size - Buffer size output + * @param o_buf - Pointer to the buffer returned by the MFW. + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, + u32 *o_buf); + +/** + * @brief Read from sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to read into + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf); + +/** + * @brief Write to sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to write from + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf); + +/** + * @brief Gpio read + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param gpio - gpio number + * @param gpio_val - value read from gpio + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_val); + +/** + * @brief Gpio write + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param gpio - gpio number + * @param gpio_val - value to write to gpio + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u16 gpio_val); + +/** + * @brief Gpio get information + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param gpio - gpio number + * @param gpio_direction - gpio is output (0) or input (1) + * @param gpio_ctrl - gpio control is uninitialized (0), + * path 0 (1), path 1 (2) or shared(3) + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_direction, + u32 *gpio_ctrl); + +/** + * @brief Bist register test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Bist clock test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Bist nvm test - get number of images + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param num_images - number of images if operation was + * successful. 0 if not. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images( + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *num_images); + +/** + * @brief Bist nvm test - get image attributes by index + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_image_att - Attributes of image + * @param image_index - Index of image to get information for + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att( + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index); + +/** + * @brief ecore_mcp_get_temperature_info - get the status of the temperature + * sensors + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_temp_status - A pointer to an ecore_temperature_info structure to + * be filled with the temperature data + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_temperature_info *p_temp_info); + +/** + * @brief Get MBA versions - get MBA sub images versions + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_mba_vers - MBA versions array to fill + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_get_mba_versions( + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mba_vers *p_mba_vers); + +/** + * @brief Count memory ecc events + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param num_events - number of memory ecc events + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 *num_events); + +struct ecore_mdump_info { + u32 reason; + u32 version; + u32 config; + u32 epoch; + u32 num_of_logs; + u32 valid_logs; +}; + +/** + * @brief - Gets the MFW crash dump configuration and logs info. + * + * @param p_hwfn + * @param p_ptt + * @param p_mdump_info + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t +ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mdump_info *p_mdump_info); + +/** + * @brief - Clears the MFW crash dump logs. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Clear the mdump retained data. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Processes the TLV request from MFW i.e., get the required TLV info + * from the ecore client and send it to the MFW. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + + +/** + * @brief - Return whether management firmware support smart AN + * + * @param p_hwfn + * + * @return bool - true iff feature is supported. + */ +bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn); +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c new file mode 100644 index 000000000..f7666472d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_mng_tlv.c @@ -0,0 +1,1540 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_mcp.h" +#include "ecore_hw.h" +#include "reg_addr.h" + +#define TLV_TYPE(p) (p[0]) +#define TLV_LENGTH(p) (p[1]) +#define TLV_FLAGS(p) (p[3]) + +static enum _ecore_status_t +ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) +{ + switch (tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + case DRV_TLV_OS_DRIVER_STATES: + case DRV_TLV_PXE_BOOT_PROGRESS: + case DRV_TLV_RX_FRAMES_RECEIVED: + case DRV_TLV_RX_BYTES_RECEIVED: + case DRV_TLV_TX_FRAMES_SENT: + case DRV_TLV_TX_BYTES_SENT: + *tlv_group |= ECORE_MFW_TLV_GENERIC; + break; + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + case DRV_TLV_PROMISCUOUS_MODE: + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_IOV_OFFLOAD: + case DRV_TLV_TX_QUEUES_EMPTY: + case DRV_TLV_RX_QUEUES_EMPTY: + case DRV_TLV_TX_QUEUES_FULL: + case DRV_TLV_RX_QUEUES_FULL: + *tlv_group |= ECORE_MFW_TLV_ETH; + break; + case DRV_TLV_SCSI_TO: + case DRV_TLV_R_T_TOV: + case DRV_TLV_R_A_TOV: + case DRV_TLV_E_D_TOV: + case DRV_TLV_CR_TOV: + case DRV_TLV_BOOT_TYPE: + case DRV_TLV_NPIV_STATE: + case DRV_TLV_NUM_OF_NPIV_IDS: + case DRV_TLV_SWITCH_NAME: + case DRV_TLV_SWITCH_PORT_NUM: + case DRV_TLV_SWITCH_PORT_ID: + case DRV_TLV_VENDOR_NAME: + case DRV_TLV_SWITCH_MODEL: + case DRV_TLV_SWITCH_FW_VER: + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + case DRV_TLV_PORT_ALIAS: + case DRV_TLV_PORT_STATE: + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_LINK_FAILURE_COUNT: + case DRV_TLV_FCOE_BOOT_PROGRESS: + case DRV_TLV_RX_BROADCAST_PACKETS: + case DRV_TLV_TX_BROADCAST_PACKETS: + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + case DRV_TLV_FCOE_TX_FRAMES_SENT: + case DRV_TLV_FCOE_TX_BYTES_SENT: + case DRV_TLV_CRC_ERROR_COUNT: + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + case DRV_TLV_DISPARITY_ERROR_COUNT: + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_RJT: + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + case DRV_TLV_FDISCS_SENT_COUNT: + case DRV_TLV_FDISC_ACCS_RECEIVED: + case DRV_TLV_FDISC_RJTS_RECEIVED: + case DRV_TLV_PLOGI_SENT_COUNT: + case DRV_TLV_PLOGI_ACCS_RECEIVED: + case DRV_TLV_PLOGI_RJTS_RECEIVED: + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_TIMESTAMP: + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + case DRV_TLV_LOGOS_ISSUED: + case DRV_TLV_LOGO_ACCS_RECEIVED: + case DRV_TLV_LOGO_RJTS_RECEIVED: + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_TIMESTAMP: + case DRV_TLV_LOGOS_RECEIVED: + case DRV_TLV_ACCS_ISSUED: + case DRV_TLV_PRLIS_ISSUED: + case DRV_TLV_ACCS_RECEIVED: + case DRV_TLV_ABTS_SENT_COUNT: + case DRV_TLV_ABTS_ACCS_RECEIVED: + case DRV_TLV_ABTS_RJTS_RECEIVED: + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_TIMESTAMP: + case DRV_TLV_RSCNS_RECEIVED: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + case DRV_TLV_LUN_RESETS_ISSUED: + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + case DRV_TLV_TPRLOS_SENT: + case DRV_TLV_NOS_SENT_COUNT: + case DRV_TLV_NOS_RECEIVED_COUNT: + case DRV_TLV_OLS_COUNT: + case DRV_TLV_LR_COUNT: + case DRV_TLV_LRR_COUNT: + case DRV_TLV_LIP_SENT_COUNT: + case DRV_TLV_LIP_RECEIVED_COUNT: + case DRV_TLV_EOFA_COUNT: + case DRV_TLV_EOFNI_COUNT: + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + *tlv_group = ECORE_MFW_TLV_FCOE; + break; + case DRV_TLV_TARGET_LLMNR_ENABLED: + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + case DRV_TLV_AUTHENTICATION_METHOD: + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + case DRV_TLV_MAX_FRAME_SIZE: + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_ISCSI_BOOT_PROGRESS: + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + *tlv_group |= ECORE_MFW_TLV_ISCSI; + break; + default: + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static int +ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv, + struct ecore_mfw_tlv_generic *p_drv_buf, + u8 **p_tlv_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + if (p_drv_buf->feat_flags_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags; + return sizeof(p_drv_buf->feat_flags); + } + break; + case DRV_TLV_LOCAL_ADMIN_ADDR: + if (p_drv_buf->local_mac_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->local_mac; + return sizeof(p_drv_buf->local_mac); + } + break; + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + if (p_drv_buf->additional_mac1_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1; + return sizeof(p_drv_buf->additional_mac1); + } + break; + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + if (p_drv_buf->additional_mac2_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2; + return sizeof(p_drv_buf->additional_mac2); + } + break; + case DRV_TLV_OS_DRIVER_STATES: + if (p_drv_buf->drv_state_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->drv_state; + return sizeof(p_drv_buf->drv_state); + } + break; + case DRV_TLV_PXE_BOOT_PROGRESS: + if (p_drv_buf->pxe_progress_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress; + return sizeof(p_drv_buf->pxe_progress); + } + break; + case DRV_TLV_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int +ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv, + struct ecore_mfw_tlv_eth *p_drv_buf, + u8 **p_tlv_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + if (p_drv_buf->lso_maxoff_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size; + return sizeof(p_drv_buf->lso_maxoff_size); + } + break; + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + if (p_drv_buf->lso_minseg_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size; + return sizeof(p_drv_buf->lso_minseg_size); + } + break; + case DRV_TLV_PROMISCUOUS_MODE: + if (p_drv_buf->prom_mode_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode; + return sizeof(p_drv_buf->prom_mode); + } + break; + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_descr_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size; + return sizeof(p_drv_buf->tx_descr_size); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_descr_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size; + return sizeof(p_drv_buf->rx_descr_size); + } + break; + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + if (p_drv_buf->netq_count_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->netq_count; + return sizeof(p_drv_buf->netq_count); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + if (p_drv_buf->tcp4_offloads_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads; + return sizeof(p_drv_buf->tcp4_offloads); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + if (p_drv_buf->tcp6_offloads_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads; + return sizeof(p_drv_buf->tcp6_offloads); + } + break; + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_descr_qdepth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth; + return sizeof(p_drv_buf->tx_descr_qdepth); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_descr_qdepth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth; + return sizeof(p_drv_buf->rx_descr_qdepth); + } + break; + case DRV_TLV_IOV_OFFLOAD: + if (p_drv_buf->iov_offload_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload; + return sizeof(p_drv_buf->iov_offload); + } + break; + case DRV_TLV_TX_QUEUES_EMPTY: + if (p_drv_buf->txqs_empty_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty; + return sizeof(p_drv_buf->txqs_empty); + } + break; + case DRV_TLV_RX_QUEUES_EMPTY: + if (p_drv_buf->rxqs_empty_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty; + return sizeof(p_drv_buf->rxqs_empty); + } + break; + case DRV_TLV_TX_QUEUES_FULL: + if (p_drv_buf->num_txqs_full_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full; + return sizeof(p_drv_buf->num_txqs_full); + } + break; + case DRV_TLV_RX_QUEUES_FULL: + if (p_drv_buf->num_rxqs_full_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full; + return sizeof(p_drv_buf->num_rxqs_full); + } + break; + default: + break; + } + + return -1; +} + +static int +ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv, + struct ecore_mfw_tlv_fcoe *p_drv_buf, + u8 **p_tlv_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_SCSI_TO: + if (p_drv_buf->scsi_timeout_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout; + return sizeof(p_drv_buf->scsi_timeout); + } + break; + case DRV_TLV_R_T_TOV: + if (p_drv_buf->rt_tov_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov; + return sizeof(p_drv_buf->rt_tov); + } + break; + case DRV_TLV_R_A_TOV: + if (p_drv_buf->ra_tov_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov; + return sizeof(p_drv_buf->ra_tov); + } + break; + case DRV_TLV_E_D_TOV: + if (p_drv_buf->ed_tov_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov; + return sizeof(p_drv_buf->ed_tov); + } + break; + case DRV_TLV_CR_TOV: + if (p_drv_buf->cr_tov_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov; + return sizeof(p_drv_buf->cr_tov); + } + break; + case DRV_TLV_BOOT_TYPE: + if (p_drv_buf->boot_type_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->boot_type; + return sizeof(p_drv_buf->boot_type); + } + break; + case DRV_TLV_NPIV_STATE: + if (p_drv_buf->npiv_state_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state; + return sizeof(p_drv_buf->npiv_state); + } + break; + case DRV_TLV_NUM_OF_NPIV_IDS: + if (p_drv_buf->num_npiv_ids_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids; + return sizeof(p_drv_buf->num_npiv_ids); + } + break; + case DRV_TLV_SWITCH_NAME: + if (p_drv_buf->switch_name_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->switch_name; + return sizeof(p_drv_buf->switch_name); + } + break; + case DRV_TLV_SWITCH_PORT_NUM: + if (p_drv_buf->switch_portnum_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum; + return sizeof(p_drv_buf->switch_portnum); + } + break; + case DRV_TLV_SWITCH_PORT_ID: + if (p_drv_buf->switch_portid_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid; + return sizeof(p_drv_buf->switch_portid); + } + break; + case DRV_TLV_VENDOR_NAME: + if (p_drv_buf->vendor_name_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name; + return sizeof(p_drv_buf->vendor_name); + } + break; + case DRV_TLV_SWITCH_MODEL: + if (p_drv_buf->switch_model_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->switch_model; + return sizeof(p_drv_buf->switch_model); + } + break; + case DRV_TLV_SWITCH_FW_VER: + if (p_drv_buf->switch_fw_version_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version; + return sizeof(p_drv_buf->switch_fw_version); + } + break; + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + if (p_drv_buf->qos_pri_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri; + return sizeof(p_drv_buf->qos_pri); + } + break; + case DRV_TLV_PORT_ALIAS: + if (p_drv_buf->port_alias_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->port_alias; + return sizeof(p_drv_buf->port_alias); + } + break; + case DRV_TLV_PORT_STATE: + if (p_drv_buf->port_state_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->port_state; + return sizeof(p_drv_buf->port_state); + } + break; + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_tx_descr_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size; + return sizeof(p_drv_buf->fip_tx_descr_size); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_rx_descr_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size; + return sizeof(p_drv_buf->fip_rx_descr_size); + } + break; + case DRV_TLV_LINK_FAILURE_COUNT: + if (p_drv_buf->link_failures_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->link_failures; + return sizeof(p_drv_buf->link_failures); + } + break; + case DRV_TLV_FCOE_BOOT_PROGRESS: + if (p_drv_buf->fcoe_boot_progress_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress; + return sizeof(p_drv_buf->fcoe_boot_progress); + } + break; + case DRV_TLV_RX_BROADCAST_PACKETS: + if (p_drv_buf->rx_bcast_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast; + return sizeof(p_drv_buf->rx_bcast); + } + break; + case DRV_TLV_TX_BROADCAST_PACKETS: + if (p_drv_buf->tx_bcast_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast; + return sizeof(p_drv_buf->tx_bcast); + } + break; + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_txq_depth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth; + return sizeof(p_drv_buf->fcoe_txq_depth); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_rxq_depth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth; + return sizeof(p_drv_buf->fcoe_rxq_depth); + } + break; + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + if (p_drv_buf->fcoe_rx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames; + return sizeof(p_drv_buf->fcoe_rx_frames); + } + break; + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + if (p_drv_buf->fcoe_rx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes; + return sizeof(p_drv_buf->fcoe_rx_bytes); + } + break; + case DRV_TLV_FCOE_TX_FRAMES_SENT: + if (p_drv_buf->fcoe_tx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames; + return sizeof(p_drv_buf->fcoe_tx_frames); + } + break; + case DRV_TLV_FCOE_TX_BYTES_SENT: + if (p_drv_buf->fcoe_tx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes; + return sizeof(p_drv_buf->fcoe_tx_bytes); + } + break; + case DRV_TLV_CRC_ERROR_COUNT: + if (p_drv_buf->crc_count_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_count; + return sizeof(p_drv_buf->crc_count); + } + break; + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->crc_err_src_fcid_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0]; + return sizeof(p_drv_buf->crc_err_src_fcid[0]); + } + break; + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->crc_err_src_fcid_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1]; + return sizeof(p_drv_buf->crc_err_src_fcid[1]); + } + break; + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->crc_err_src_fcid_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2]; + return sizeof(p_drv_buf->crc_err_src_fcid[2]); + } + break; + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->crc_err_src_fcid_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3]; + return sizeof(p_drv_buf->crc_err_src_fcid[3]); + } + break; + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->crc_err_src_fcid_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4]; + return sizeof(p_drv_buf->crc_err_src_fcid[4]); + } + break; + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + if (p_drv_buf->crc_err_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0]; + return sizeof(p_drv_buf->crc_err_tstamp[0]); + } + break; + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + if (p_drv_buf->crc_err_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1]; + return sizeof(p_drv_buf->crc_err_tstamp[1]); + } + break; + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + if (p_drv_buf->crc_err_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2]; + return sizeof(p_drv_buf->crc_err_tstamp[2]); + } + break; + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + if (p_drv_buf->crc_err_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3]; + return sizeof(p_drv_buf->crc_err_tstamp[3]); + } + break; + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + if (p_drv_buf->crc_err_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4]; + return sizeof(p_drv_buf->crc_err_tstamp[4]); + } + break; + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + if (p_drv_buf->losync_err_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->losync_err; + return sizeof(p_drv_buf->losync_err); + } + break; + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + if (p_drv_buf->losig_err_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->losig_err; + return sizeof(p_drv_buf->losig_err); + } + break; + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + if (p_drv_buf->primtive_err_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err; + return sizeof(p_drv_buf->primtive_err); + } + break; + case DRV_TLV_DISPARITY_ERROR_COUNT: + if (p_drv_buf->disparity_err_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err; + return sizeof(p_drv_buf->disparity_err); + } + break; + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + if (p_drv_buf->code_violation_err_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err; + return sizeof(p_drv_buf->code_violation_err); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + if (p_drv_buf->flogi_param_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0]; + return sizeof(p_drv_buf->flogi_param[0]); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + if (p_drv_buf->flogi_param_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1]; + return sizeof(p_drv_buf->flogi_param[1]); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + if (p_drv_buf->flogi_param_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2]; + return sizeof(p_drv_buf->flogi_param[2]); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + if (p_drv_buf->flogi_param_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3]; + return sizeof(p_drv_buf->flogi_param[3]); + } + break; + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + if (p_drv_buf->flogi_tstamp_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp; + return sizeof(p_drv_buf->flogi_tstamp); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + if (p_drv_buf->flogi_acc_param_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0]; + return sizeof(p_drv_buf->flogi_acc_param[0]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + if (p_drv_buf->flogi_acc_param_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1]; + return sizeof(p_drv_buf->flogi_acc_param[1]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + if (p_drv_buf->flogi_acc_param_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2]; + return sizeof(p_drv_buf->flogi_acc_param[2]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + if (p_drv_buf->flogi_acc_param_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3]; + return sizeof(p_drv_buf->flogi_acc_param[3]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + if (p_drv_buf->flogi_acc_tstamp_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp; + return sizeof(p_drv_buf->flogi_acc_tstamp); + } + break; + case DRV_TLV_LAST_FLOGI_RJT: + if (p_drv_buf->flogi_rjt_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt; + return sizeof(p_drv_buf->flogi_rjt); + } + break; + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + if (p_drv_buf->flogi_rjt_tstamp_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp; + return sizeof(p_drv_buf->flogi_rjt_tstamp); + } + break; + case DRV_TLV_FDISCS_SENT_COUNT: + if (p_drv_buf->fdiscs_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs; + return sizeof(p_drv_buf->fdiscs); + } + break; + case DRV_TLV_FDISC_ACCS_RECEIVED: + if (p_drv_buf->fdisc_acc_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc; + return sizeof(p_drv_buf->fdisc_acc); + } + break; + case DRV_TLV_FDISC_RJTS_RECEIVED: + if (p_drv_buf->fdisc_rjt_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt; + return sizeof(p_drv_buf->fdisc_rjt); + } + break; + case DRV_TLV_PLOGI_SENT_COUNT: + if (p_drv_buf->plogi_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi; + return sizeof(p_drv_buf->plogi); + } + break; + case DRV_TLV_PLOGI_ACCS_RECEIVED: + if (p_drv_buf->plogi_acc_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc; + return sizeof(p_drv_buf->plogi_acc); + } + break; + case DRV_TLV_PLOGI_RJTS_RECEIVED: + if (p_drv_buf->plogi_rjt_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt; + return sizeof(p_drv_buf->plogi_rjt); + } + break; + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + if (p_drv_buf->plogi_dst_fcid_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0]; + return sizeof(p_drv_buf->plogi_dst_fcid[0]); + } + break; + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + if (p_drv_buf->plogi_dst_fcid_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1]; + return sizeof(p_drv_buf->plogi_dst_fcid[1]); + } + break; + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + if (p_drv_buf->plogi_dst_fcid_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2]; + return sizeof(p_drv_buf->plogi_dst_fcid[2]); + } + break; + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + if (p_drv_buf->plogi_dst_fcid_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3]; + return sizeof(p_drv_buf->plogi_dst_fcid[3]); + } + break; + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + if (p_drv_buf->plogi_dst_fcid_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4]; + return sizeof(p_drv_buf->plogi_dst_fcid[4]); + } + break; + case DRV_TLV_PLOGI_1_TIMESTAMP: + if (p_drv_buf->plogi_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0]; + return sizeof(p_drv_buf->plogi_tstamp[0]); + } + break; + case DRV_TLV_PLOGI_2_TIMESTAMP: + if (p_drv_buf->plogi_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1]; + return sizeof(p_drv_buf->plogi_tstamp[1]); + } + break; + case DRV_TLV_PLOGI_3_TIMESTAMP: + if (p_drv_buf->plogi_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2]; + return sizeof(p_drv_buf->plogi_tstamp[2]); + } + break; + case DRV_TLV_PLOGI_4_TIMESTAMP: + if (p_drv_buf->plogi_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3]; + return sizeof(p_drv_buf->plogi_tstamp[3]); + } + break; + case DRV_TLV_PLOGI_5_TIMESTAMP: + if (p_drv_buf->plogi_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4]; + return sizeof(p_drv_buf->plogi_tstamp[4]); + } + break; + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogi_acc_src_fcid_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[0]); + } + break; + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogi_acc_src_fcid_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[1]); + } + break; + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogi_acc_src_fcid_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[2]); + } + break; + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogi_acc_src_fcid_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[3]); + } + break; + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogi_acc_src_fcid_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[4]); + } + break; + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + if (p_drv_buf->plogi_acc_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0]; + return sizeof(p_drv_buf->plogi_acc_tstamp[0]); + } + break; + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + if (p_drv_buf->plogi_acc_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1]; + return sizeof(p_drv_buf->plogi_acc_tstamp[1]); + } + break; + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + if (p_drv_buf->plogi_acc_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2]; + return sizeof(p_drv_buf->plogi_acc_tstamp[2]); + } + break; + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + if (p_drv_buf->plogi_acc_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3]; + return sizeof(p_drv_buf->plogi_acc_tstamp[3]); + } + break; + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + if (p_drv_buf->plogi_acc_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4]; + return sizeof(p_drv_buf->plogi_acc_tstamp[4]); + } + break; + case DRV_TLV_LOGOS_ISSUED: + if (p_drv_buf->tx_plogos_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos; + return sizeof(p_drv_buf->tx_plogos); + } + break; + case DRV_TLV_LOGO_ACCS_RECEIVED: + if (p_drv_buf->plogo_acc_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc; + return sizeof(p_drv_buf->plogo_acc); + } + break; + case DRV_TLV_LOGO_RJTS_RECEIVED: + if (p_drv_buf->plogo_rjt_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt; + return sizeof(p_drv_buf->plogo_rjt); + } + break; + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogo_src_fcid_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0]; + return sizeof(p_drv_buf->plogo_src_fcid[0]); + } + break; + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogo_src_fcid_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1]; + return sizeof(p_drv_buf->plogo_src_fcid[1]); + } + break; + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogo_src_fcid_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2]; + return sizeof(p_drv_buf->plogo_src_fcid[2]); + } + break; + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogo_src_fcid_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3]; + return sizeof(p_drv_buf->plogo_src_fcid[3]); + } + break; + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + if (p_drv_buf->plogo_src_fcid_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4]; + return sizeof(p_drv_buf->plogo_src_fcid[4]); + } + break; + case DRV_TLV_LOGO_1_TIMESTAMP: + if (p_drv_buf->plogo_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0]; + return sizeof(p_drv_buf->plogo_tstamp[0]); + } + break; + case DRV_TLV_LOGO_2_TIMESTAMP: + if (p_drv_buf->plogo_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1]; + return sizeof(p_drv_buf->plogo_tstamp[1]); + } + break; + case DRV_TLV_LOGO_3_TIMESTAMP: + if (p_drv_buf->plogo_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2]; + return sizeof(p_drv_buf->plogo_tstamp[2]); + } + break; + case DRV_TLV_LOGO_4_TIMESTAMP: + if (p_drv_buf->plogo_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3]; + return sizeof(p_drv_buf->plogo_tstamp[3]); + } + break; + case DRV_TLV_LOGO_5_TIMESTAMP: + if (p_drv_buf->plogo_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4]; + return sizeof(p_drv_buf->plogo_tstamp[4]); + } + break; + case DRV_TLV_LOGOS_RECEIVED: + if (p_drv_buf->rx_logos_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos; + return sizeof(p_drv_buf->rx_logos); + } + break; + case DRV_TLV_ACCS_ISSUED: + if (p_drv_buf->tx_accs_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs; + return sizeof(p_drv_buf->tx_accs); + } + break; + case DRV_TLV_PRLIS_ISSUED: + if (p_drv_buf->tx_prlis_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis; + return sizeof(p_drv_buf->tx_prlis); + } + break; + case DRV_TLV_ACCS_RECEIVED: + if (p_drv_buf->rx_accs_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs; + return sizeof(p_drv_buf->rx_accs); + } + break; + case DRV_TLV_ABTS_SENT_COUNT: + if (p_drv_buf->tx_abts_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts; + return sizeof(p_drv_buf->tx_abts); + } + break; + case DRV_TLV_ABTS_ACCS_RECEIVED: + if (p_drv_buf->rx_abts_acc_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc; + return sizeof(p_drv_buf->rx_abts_acc); + } + break; + case DRV_TLV_ABTS_RJTS_RECEIVED: + if (p_drv_buf->rx_abts_rjt_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt; + return sizeof(p_drv_buf->rx_abts_rjt); + } + break; + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + if (p_drv_buf->abts_dst_fcid_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0]; + return sizeof(p_drv_buf->abts_dst_fcid[0]); + } + break; + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + if (p_drv_buf->abts_dst_fcid_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1]; + return sizeof(p_drv_buf->abts_dst_fcid[1]); + } + break; + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + if (p_drv_buf->abts_dst_fcid_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2]; + return sizeof(p_drv_buf->abts_dst_fcid[2]); + } + break; + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + if (p_drv_buf->abts_dst_fcid_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3]; + return sizeof(p_drv_buf->abts_dst_fcid[3]); + } + break; + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + if (p_drv_buf->abts_dst_fcid_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4]; + return sizeof(p_drv_buf->abts_dst_fcid[4]); + } + break; + case DRV_TLV_ABTS_1_TIMESTAMP: + if (p_drv_buf->abts_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0]; + return sizeof(p_drv_buf->abts_tstamp[0]); + } + break; + case DRV_TLV_ABTS_2_TIMESTAMP: + if (p_drv_buf->abts_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1]; + return sizeof(p_drv_buf->abts_tstamp[1]); + } + break; + case DRV_TLV_ABTS_3_TIMESTAMP: + if (p_drv_buf->abts_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2]; + return sizeof(p_drv_buf->abts_tstamp[2]); + } + break; + case DRV_TLV_ABTS_4_TIMESTAMP: + if (p_drv_buf->abts_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3]; + return sizeof(p_drv_buf->abts_tstamp[3]); + } + break; + case DRV_TLV_ABTS_5_TIMESTAMP: + if (p_drv_buf->abts_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4]; + return sizeof(p_drv_buf->abts_tstamp[4]); + } + break; + case DRV_TLV_RSCNS_RECEIVED: + if (p_drv_buf->rx_rscn_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn; + return sizeof(p_drv_buf->rx_rscn); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + if (p_drv_buf->rx_rscn_nport_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0]; + return sizeof(p_drv_buf->rx_rscn_nport[0]); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + if (p_drv_buf->rx_rscn_nport_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1]; + return sizeof(p_drv_buf->rx_rscn_nport[1]); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + if (p_drv_buf->rx_rscn_nport_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2]; + return sizeof(p_drv_buf->rx_rscn_nport[2]); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + if (p_drv_buf->rx_rscn_nport_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3]; + return sizeof(p_drv_buf->rx_rscn_nport[3]); + } + break; + case DRV_TLV_LUN_RESETS_ISSUED: + if (p_drv_buf->tx_lun_rst_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst; + return sizeof(p_drv_buf->tx_lun_rst); + } + break; + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + if (p_drv_buf->abort_task_sets_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets; + return sizeof(p_drv_buf->abort_task_sets); + } + break; + case DRV_TLV_TPRLOS_SENT: + if (p_drv_buf->tx_tprlos_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos; + return sizeof(p_drv_buf->tx_tprlos); + } + break; + case DRV_TLV_NOS_SENT_COUNT: + if (p_drv_buf->tx_nos_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos; + return sizeof(p_drv_buf->tx_nos); + } + break; + case DRV_TLV_NOS_RECEIVED_COUNT: + if (p_drv_buf->rx_nos_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos; + return sizeof(p_drv_buf->rx_nos); + } + break; + case DRV_TLV_OLS_COUNT: + if (p_drv_buf->ols_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->ols; + return sizeof(p_drv_buf->ols); + } + break; + case DRV_TLV_LR_COUNT: + if (p_drv_buf->lr_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->lr; + return sizeof(p_drv_buf->lr); + } + break; + case DRV_TLV_LRR_COUNT: + if (p_drv_buf->lrr_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->lrr; + return sizeof(p_drv_buf->lrr); + } + break; + case DRV_TLV_LIP_SENT_COUNT: + if (p_drv_buf->tx_lip_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip; + return sizeof(p_drv_buf->tx_lip); + } + break; + case DRV_TLV_LIP_RECEIVED_COUNT: + if (p_drv_buf->rx_lip_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip; + return sizeof(p_drv_buf->rx_lip); + } + break; + case DRV_TLV_EOFA_COUNT: + if (p_drv_buf->eofa_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->eofa; + return sizeof(p_drv_buf->eofa); + } + break; + case DRV_TLV_EOFNI_COUNT: + if (p_drv_buf->eofni_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->eofni; + return sizeof(p_drv_buf->eofni); + } + break; + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + if (p_drv_buf->scsi_chks_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks; + return sizeof(p_drv_buf->scsi_chks); + } + break; + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_cond_met_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met; + return sizeof(p_drv_buf->scsi_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + if (p_drv_buf->scsi_busy_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy; + return sizeof(p_drv_buf->scsi_busy); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + if (p_drv_buf->scsi_inter_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter; + return sizeof(p_drv_buf->scsi_inter); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_inter_cond_met_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met; + return sizeof(p_drv_buf->scsi_inter_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + if (p_drv_buf->scsi_rsv_conflicts_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts; + return sizeof(p_drv_buf->scsi_rsv_conflicts); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + if (p_drv_buf->scsi_tsk_full_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full; + return sizeof(p_drv_buf->scsi_tsk_full); + } + break; + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + if (p_drv_buf->scsi_aca_active_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active; + return sizeof(p_drv_buf->scsi_aca_active); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + if (p_drv_buf->scsi_tsk_abort_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort; + return sizeof(p_drv_buf->scsi_tsk_abort); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + if (p_drv_buf->scsi_rx_chk_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0]; + return sizeof(p_drv_buf->scsi_rx_chk[0]); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + if (p_drv_buf->scsi_rx_chk_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1]; + return sizeof(p_drv_buf->scsi_rx_chk[1]); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + if (p_drv_buf->scsi_rx_chk_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2]; + return sizeof(p_drv_buf->scsi_rx_chk[2]); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + if (p_drv_buf->scsi_rx_chk_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3]; + return sizeof(p_drv_buf->scsi_rx_chk[4]); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + if (p_drv_buf->scsi_rx_chk_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4]; + return sizeof(p_drv_buf->scsi_rx_chk[4]); + } + break; + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + if (p_drv_buf->scsi_chk_tstamp_set[0]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0]; + return sizeof(p_drv_buf->scsi_chk_tstamp[0]); + } + break; + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + if (p_drv_buf->scsi_chk_tstamp_set[1]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1]; + return sizeof(p_drv_buf->scsi_chk_tstamp[1]); + } + break; + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + if (p_drv_buf->scsi_chk_tstamp_set[2]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2]; + return sizeof(p_drv_buf->scsi_chk_tstamp[2]); + } + break; + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + if (p_drv_buf->scsi_chk_tstamp_set[3]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3]; + return sizeof(p_drv_buf->scsi_chk_tstamp[3]); + } + break; + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + if (p_drv_buf->scsi_chk_tstamp_set[4]) { + *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4]; + return sizeof(p_drv_buf->scsi_chk_tstamp[4]); + } + break; + default: + break; + } + + return -1; +} + +static int +ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv, + struct ecore_mfw_tlv_iscsi *p_drv_buf, + u8 **p_tlv_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_TARGET_LLMNR_ENABLED: + if (p_drv_buf->target_llmnr_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr; + return sizeof(p_drv_buf->target_llmnr); + } + break; + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + if (p_drv_buf->header_digest_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->header_digest; + return sizeof(p_drv_buf->header_digest); + } + break; + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + if (p_drv_buf->data_digest_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->data_digest; + return sizeof(p_drv_buf->data_digest); + } + break; + case DRV_TLV_AUTHENTICATION_METHOD: + if (p_drv_buf->auth_method_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->auth_method; + return sizeof(p_drv_buf->auth_method); + } + break; + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + if (p_drv_buf->boot_taget_portal_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal; + return sizeof(p_drv_buf->boot_taget_portal); + } + break; + case DRV_TLV_MAX_FRAME_SIZE: + if (p_drv_buf->frame_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->frame_size; + return sizeof(p_drv_buf->frame_size); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_desc_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size; + return sizeof(p_drv_buf->tx_desc_size); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_desc_size_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size; + return sizeof(p_drv_buf->rx_desc_size); + } + break; + case DRV_TLV_ISCSI_BOOT_PROGRESS: + if (p_drv_buf->boot_progress_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress; + return sizeof(p_drv_buf->boot_progress); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_desc_qdepth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth; + return sizeof(p_drv_buf->tx_desc_qdepth); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_desc_qdepth_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth; + return sizeof(p_drv_buf->rx_desc_qdepth); + } + break; + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn, + u8 tlv_group, u8 *p_mfw_buf, + u32 size) +{ + union ecore_mfw_tlv_data *p_tlv_data; + struct ecore_drv_tlv_hdr tlv; + u8 *p_tlv_ptr = OSAL_NULL, *p_temp; + u32 offset; + int len; + + p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data)); + if (!p_tlv_data) + return ECORE_NOMEM; + + if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) { + OSAL_VFREE(p_hwfn->p_dev, p_tlv_data); + return ECORE_INVAL; + } + + offset = 0; + OSAL_MEMSET(&tlv, 0, sizeof(tlv)); + while (offset < size) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + tlv.tlv_flags = TLV_FLAGS(p_temp); + DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n", + tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags); + + offset += sizeof(tlv); + if (tlv_group == ECORE_MFW_TLV_GENERIC) + len = ecore_mfw_get_gen_tlv_value(&tlv, + &p_tlv_data->generic, &p_tlv_ptr); + else if (tlv_group == ECORE_MFW_TLV_ETH) + len = ecore_mfw_get_eth_tlv_value(&tlv, + &p_tlv_data->eth, &p_tlv_ptr); + else if (tlv_group == ECORE_MFW_TLV_FCOE) + len = ecore_mfw_get_fcoe_tlv_value(&tlv, + &p_tlv_data->fcoe, &p_tlv_ptr); + else + len = ecore_mfw_get_iscsi_tlv_value(&tlv, + &p_tlv_data->iscsi, &p_tlv_ptr); + + if (len > 0) { + OSAL_WARN(len > 4 * tlv.tlv_length, + "Incorrect MFW TLV length"); + len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length); + tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED; + /* TODO: Endianness handling? */ + OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv)); + OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len); + } + + offset += sizeof(u32) * tlv.tlv_length; + } + + OSAL_VFREE(p_hwfn->p_dev, p_tlv_data); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u32 addr, size, offset, resp, param, val; + u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp; + u32 global_offsize, global_addr; + enum _ecore_status_t rc; + struct ecore_drv_tlv_hdr tlv; + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = ecore_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + OFFSETOF(struct public_global, data_ptr); + size = ecore_rd(p_hwfn, p_ptt, global_addr + + OFFSETOF(struct public_global, data_size)); + + if (!size) { + DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size); + goto drv_done; + } + + p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size); + if (!p_mfw_buf) { + DP_NOTICE(p_hwfn, false, + "Failed allocate memory for p_mfw_buf\n"); + goto drv_done; + } + + /* Read the TLV request to local buffer */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = ecore_rd(p_hwfn, p_ptt, addr + offset); + OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32)); + } + + /* Parse the headers to enumerate the requested TLV groups */ + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) + goto drv_done; + } + + /* Update the TLV values in the local buffer */ + for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) { + if (tlv_group & id) { + if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) + goto drv_done; + } + } + + /* Write the TLV data to shared memory */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = (u32)p_mfw_buf[offset]; + ecore_wr(p_hwfn, p_ptt, addr + offset, val); + offset += sizeof(u32); + } + +drv_done: + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, + ¶m); + + OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf); + + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h new file mode 100644 index 000000000..64509f7cc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_proto_if.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_PROTO_IF_H__ +#define __ECORE_PROTO_IF_H__ + +/* + * PF parameters (according to personality/protocol) + */ + +#define ECORE_ROCE_PROTOCOL_INDEX (3) + +struct ecore_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + + /* To enable arfs, previous to HW-init a positive number needs to be + * set [as filters require allocated searcher ILT memory]. + * This will set the maximal number of configured steering-filters. + */ + u32 num_arfs_filters; + + /* To allow VF to change its MAC despite of PF set forced MAC. */ + bool allow_vf_mac_change; +}; + +/* Most of the parameters below are described in the FW iSCSI / TCP HSI */ +struct ecore_iscsi_pf_params { + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + u16 cq_num_entries; + u16 cmdq_num_entries; + u32 two_msl_timer; + u16 tx_sws_timer; + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 half_way_close_timeout; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 cmdq_xoff_threshold; + u16 cmdq_xon_threshold; + u16 rq_buffer_size; + + u8 num_sq_pages_in_ring; + u8 num_r2tq_pages_in_ring; + u8 num_uhq_pages_in_ring; + u8 num_queues; + u8 log_page_size; + u8 log_page_size_conn; + u8 rqe_log_size; + u8 max_fin_rt; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 ll2_ooo_queue_id; + u8 ooo_enable; + + u8 is_target; + u8 bdq_pbl_num_entries[2]; + u8 disable_stats_collection; +}; + +enum ecore_rdma_protocol { + ECORE_RDMA_PROTOCOL_DEFAULT, + ECORE_RDMA_PROTOCOL_ROCE, + ECORE_RDMA_PROTOCOL_IWARP, +}; + +struct ecore_rdma_pf_params { + /* Supplied to ECORE during resource allocation (may affect the ILT and + * the doorbell BAR). + */ + u32 min_dpis; /* number of requested DPIs */ + u32 num_mrs; /* number of requested memory regions*/ + u32 num_qps; /* number of requested Queue Pairs */ + u32 num_srqs; /* number of requested SRQ */ + u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ + u8 gl_pi; /* protocol index */ + + /* Will allocate rate limiters to be used with QPs */ + u8 enable_dcqcn; + + /* TCP port number used for the iwarp traffic */ + u16 iwarp_port; + enum ecore_rdma_protocol rdma_protocol; +}; + +struct ecore_pf_params { + struct ecore_eth_pf_params eth_pf_params; + struct ecore_iscsi_pf_params iscsi_pf_params; + struct ecore_rdma_pf_params rdma_pf_params; +}; + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h new file mode 100644 index 000000000..08b1f4700 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_rt_defs.h @@ -0,0 +1,453 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __RT_DEFS_H__ +#define __RT_DEFS_H__ + +/* Runtime array offsets */ +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 1498 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929 +#define SRC_REG_FIRSTFREE_RT_OFFSET 5930 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 5932 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 5934 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29358 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29425 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29426 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29427 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29428 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29429 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29430 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29431 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29432 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29433 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29434 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29435 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29436 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29437 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29438 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29439 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29440 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29441 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29442 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29443 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29444 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29445 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29446 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29447 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29448 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29449 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29450 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29451 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29452 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29453 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29454 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29455 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29456 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29457 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29458 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29459 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29460 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29461 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29462 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29463 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29464 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29465 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29466 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29467 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29468 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29469 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29470 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29471 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29472 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29473 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29474 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29475 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29476 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29477 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29478 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29479 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29480 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29481 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29482 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29483 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29484 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29485 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29486 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29487 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29488 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30029 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30286 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30288 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30320 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30336 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30370 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 30530 +#define QM_REG_WFQVPENABLE_RT_OFFSET 30531 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31044 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 + +#define RUNTIME_ARRAY_SIZE 34472 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 + +#endif /* __RT_DEFS_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h new file mode 100644 index 000000000..4633dbebe --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_api.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_SP_API_H__ +#define __ECORE_SP_API_H__ + +#include "ecore_status.h" + +enum spq_mode { + ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */ + ECORE_SPQ_MODE_CB, /* Client supplies a callback */ + ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */ +}; + +struct ecore_hwfn; +union event_ring_data; +struct eth_slow_path_rx_cqe; + +struct ecore_spq_comp_cb { + void (*function)(struct ecore_hwfn *, + void *, + union event_ring_data *, + u8 fw_return_code); + void *cookie; +}; + + +/** + * @brief ecore_eth_cqe_completion - handles the completion of a + * ramrod on the cqe ring + * + * @param p_hwfn + * @param cqe + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe); +/** + * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration + * update Ramrod + * + * This ramrod is sent to update a tunneling configuration + * for a physical function (PF). + * + * @param p_hwfn + * @param p_ptt + * @param p_tunn - pf update tunneling parameters + * @param comp_mode - completion mode + * @param p_comp_data - callback function + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t +ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tunn, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c new file mode 100644 index 000000000..9860a62b5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.c @@ -0,0 +1,671 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" + +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_chain.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_sp_commands.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_hw.h" +#include "ecore_dcbx.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" + +enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry **pp_ent, + u8 cmd, + u8 protocol, + struct ecore_sp_init_data *p_data) +{ + u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc; + + if (!pp_ent) + return ECORE_INVAL; + + /* Get an SPQ entry */ + rc = ecore_spq_get_entry(p_hwfn, pp_ent); + if (rc != ECORE_SUCCESS) + return rc; + + /* Fill the SPQ entry */ + p_ent = *pp_ent; + p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid); + p_ent->elem.hdr.cmd_id = cmd; + p_ent->elem.hdr.protocol_id = protocol; + p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL; + p_ent->comp_mode = p_data->comp_mode; + p_ent->comp_done.done = 0; + + switch (p_ent->comp_mode) { + case ECORE_SPQ_MODE_EBLOCK: + p_ent->comp_cb.cookie = &p_ent->comp_done; + break; + + case ECORE_SPQ_MODE_BLOCK: + if (!p_data->p_comp_data) + return ECORE_INVAL; + + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; + break; + + case ECORE_SPQ_MODE_CB: + if (!p_data->p_comp_data) + p_ent->comp_cb.function = OSAL_NULL; + else + p_ent->comp_cb = *p_data->p_comp_data; + break; + + default: + DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", + opaque_cid, cmd, protocol, + (unsigned long)&p_ent->ramrod, + D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, + ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); + + return ECORE_SUCCESS; +} + +static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type) +{ + switch (type) { + case ECORE_TUNN_CLSS_MAC_VLAN: + return TUNNEL_CLSS_MAC_VLAN; + case ECORE_TUNN_CLSS_MAC_VNI: + return TUNNEL_CLSS_MAC_VNI; + case ECORE_TUNN_CLSS_INNER_MAC_VLAN: + return TUNNEL_CLSS_INNER_MAC_VLAN; + case ECORE_TUNN_CLSS_INNER_MAC_VNI: + return TUNNEL_CLSS_INNER_MAC_VNI; + case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: + return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; + default: + return TUNNEL_CLSS_MAC_VLAN; + } +} + +static void +ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun, + struct ecore_tunnel_info *p_src, + bool b_pf_start) +{ + if (p_src->vxlan.b_update_mode || b_pf_start) + p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; + + if (p_src->l2_gre.b_update_mode || b_pf_start) + p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; + + if (p_src->ip_gre.b_update_mode || b_pf_start) + p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; + + if (p_src->l2_geneve.b_update_mode || b_pf_start) + p_tun->l2_geneve.b_mode_enabled = + p_src->l2_geneve.b_mode_enabled; + + if (p_src->ip_geneve.b_update_mode || b_pf_start) + p_tun->ip_geneve.b_mode_enabled = + p_src->ip_geneve.b_mode_enabled; +} + +static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun, + struct ecore_tunnel_info *p_src) +{ + enum tunnel_clss type; + + p_tun->b_update_rx_cls = p_src->b_update_rx_cls; + p_tun->b_update_tx_cls = p_src->b_update_tx_cls; + + /* @DPDK - typecast tunnul class */ + type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); + p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type; + type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); + p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type; + type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); + p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type; + type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); + p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type; + type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); + p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type; +} + +static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun, + struct ecore_tunnel_info *p_src) +{ + p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; + p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; + + if (p_src->geneve_port.b_update_port) + p_tun->geneve_port.port = p_src->geneve_port.port; + + if (p_src->vxlan_port.b_update_port) + p_tun->vxlan_port.port = p_src->vxlan_port.port; +} + +static void +__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, + struct ecore_tunn_update_type *tun_type) +{ + *p_tunn_cls = tun_type->tun_cls; +} + +static void +ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, + struct ecore_tunn_update_type *tun_type, + u8 *p_update_port, __le16 *p_port, + struct ecore_tunn_update_udp_port *p_udp_port) +{ + __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type); + if (p_udp_port->b_update_port) { + *p_update_port = 1; + *p_port = OSAL_CPU_TO_LE16(p_udp_port->port); + } +} + +static void +ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + + ecore_set_pf_update_tunn_mode(p_tun, p_src, false); + ecore_set_tunn_cls_info(p_tun, p_src); + ecore_set_tunn_ports(p_tun, p_src); + + ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tun->ip_geneve); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tun->l2_gre); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tun->ip_gre); + + p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; +} + +static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tun) +{ + ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, + p_tun->ip_gre.b_mode_enabled); + ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); + + ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled); +} + +static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tunn) +{ + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, true, + "A0 chip: tunnel hw config is not supported\n"); + return; + } + + if (p_tunn->vxlan_port.b_update_port) + ecore_set_vxlan_dest_port(p_hwfn, p_ptt, + p_tunn->vxlan_port.port); + + if (p_tunn->geneve_port.b_update_port) + ecore_set_geneve_dest_port(p_hwfn, p_ptt, + p_tunn->geneve_port.port); + + ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn); +} + +static void +ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_src, + struct pf_start_tunnel_config *p_tunn_cfg) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, true, + "A0 chip: tunnel pf start config is not supported\n"); + return; + } + + if (!p_src) + return; + + ecore_set_pf_update_tunn_mode(p_tun, p_src, true); + ecore_set_tunn_cls_info(p_tun, p_src); + ecore_set_tunn_ports(p_tun, p_src); + + ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tun->ip_geneve); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tun->l2_gre); + + __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tun->ip_gre); +} + +#define ETH_P_8021Q 0x8100 +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ + +enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tunn, + bool allow_npar_tx_switch) +{ + struct pf_start_ramrod_data *p_ramrod = OSAL_NULL; + u16 sb = ecore_int_get_sp_sb_id(p_hwfn); + u8 sb_index = p_hwfn->p_eq->eq_sb_index; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + u8 page_cnt; + u8 i; + + /* update initial eq producer */ + ecore_eq_prod_update(p_hwfn, + ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain)); + + /* Initialize the SPQ entry for the ramrod */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_START, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + /* Fill the ramrod data */ + p_ramrod = &p_ent->ramrod.pf_start; + p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb); + p_ramrod->event_ring_sb_index = sb_index; + p_ramrod->path_id = ECORE_PATH_ID(p_hwfn); + + /* For easier debugging */ + p_ramrod->dont_log_ramrods = 0; + p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f); + + if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) + p_ramrod->mf_mode = MF_OVLAN; + else + p_ramrod->mf_mode = MF_NPAR; + + p_ramrod->outer_tag_config.outer_tag.tci = + OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); + if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, + &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode. + */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) { + if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + else + p_ramrod->outer_tag_config.enable_stag_pri_change = 0; + + p_ramrod->outer_tag_config.outer_tag.tci |= + OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13)); + } + + /* Place EQ address in RAMROD */ + DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, + p_hwfn->p_eq->chain.pbl_sp.p_phys_table); + page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain); + p_ramrod->event_ring_num_pages = page_cnt; + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + p_hwfn->p_consq->chain.pbl_sp.p_phys_table); + + ecore_tunn_set_pf_start_params(p_hwfn, p_tunn, + &p_ramrod->tunnel_config); + + if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, + &p_hwfn->p_dev->mf_bits)) + p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown personality %d\n", + p_hwfn->hw_info.personality); + p_ramrod->personality = PERSONALITY_ETH; + } + + if (p_hwfn->p_dev->p_iov_info) { + struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; + + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_iov->total_vfs; + } + /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI + * version is available. + */ + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n", + sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid, + p_ramrod->outer_tag_config.outer_tag.tci); + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + + if (p_tunn) + ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, + &p_hwfn->p_dev->tunnel); + + return rc; +} + +enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_CB; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, + &p_ent->ramrod.pf_update); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_CB; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; + if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; + else + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + + +/* QM rate limiter resolution is 1.6Mbps */ +#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16) + +/* FW uses 1/64k to express gd */ +#define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd)) + +u16 ecore_sp_rl_mb_to_qm(u32 mb_val) +{ + return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val)); +} + +u16 ecore_sp_rl_gd_denom(u32 gd) +{ + return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0; +} + +enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn, + struct ecore_rl_update_params *params) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct rl_update_ramrod_data *rl_update; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + rl_update = &p_ent->ramrod.rl_update; + + rl_update->qcn_update_param_flg = params->qcn_update_param_flg; + rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg; + rl_update->rl_init_flg = params->rl_init_flg; + rl_update->rl_start_flg = params->rl_start_flg; + rl_update->rl_stop_flg = params->rl_stop_flg; + rl_update->rl_id_first = params->rl_id_first; + rl_update->rl_id_last = params->rl_id_last; + rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg; + rl_update->dcqcn_reset_alpha_on_idle = + params->dcqcn_reset_alpha_on_idle; + rl_update->rl_bc_stage_th = params->rl_bc_stage_th; + rl_update->rl_timer_stage_th = params->rl_timer_stage_th; + rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate); + rl_update->rl_max_rate = + OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate)); + rl_update->rl_r_ai = + OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai)); + rl_update->rl_r_hai = + OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai)); + rl_update->dcqcn_g = + OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd)); + rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us); + rl_update->dcqcn_timeuot_us = + OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us); + rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x,dcqcn_reset_alpha_on_idle %x, rl_bc_stage_th %x, rl_timer_stage_th %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n", + rl_update->qcn_update_param_flg, + rl_update->dcqcn_update_param_flg, + rl_update->rl_init_flg, rl_update->rl_start_flg, + rl_update->rl_stop_flg, rl_update->rl_id_first, + rl_update->rl_id_last, rl_update->rl_dc_qcn_flg, + rl_update->dcqcn_reset_alpha_on_idle, + rl_update->rl_bc_stage_th, rl_update->rl_timer_stage_th, + rl_update->rl_bc_rate, rl_update->rl_max_rate, + rl_update->rl_r_ai, rl_update->rl_r_hai, + rl_update->dcqcn_g, rl_update->dcqcn_k_us, + rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +/* Set pf update ramrod command params */ +enum _ecore_status_t +ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tunn, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn); + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, true, + "A0 chip: tunnel pf update config is not supported\n"); + return rc; + } + + if (!p_tunn) + return ECORE_INVAL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_tunn_set_pf_update_params(p_hwfn, p_tunn, + &p_ent->ramrod.pf_update.tunnel_config); + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel); + + return rc; +} + +enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13)); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_CB; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ent->ramrod.pf_update.update_mf_vlan_flag = true; + p_ent->ramrod.pf_update.mf_vlan = + OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h new file mode 100644 index 000000000..524fe57a1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sp_commands.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_SP_COMMANDS_H__ +#define __ECORE_SP_COMMANDS_H__ + +#include "ecore.h" +#include "ecore_spq.h" +#include "ecore_sp_api.h" + +#define ECORE_SP_EQ_COMPLETION 0x01 +#define ECORE_SP_CQE_COMPLETION 0x02 + +struct ecore_sp_init_data { + /* The CID and FID aren't necessarily derived from hwfn, + * e.g., in IOV scenarios. CID might defer between SPQ and + * other elements. + */ + u32 cid; + u16 opaque_fid; + + /* Information regarding operation upon sending & completion */ + enum spq_mode comp_mode; + struct ecore_spq_comp_cb *p_comp_data; + +}; + +/** + * @brief Acquire and initialize and SPQ entry for a given ramrod. + * + * @param p_hwfn + * @param pp_ent - will be filled with a pointer to an entry upon success + * @param cmd - dependent upon protocol + * @param protocol + * @param p_data - various configuration required for ramrod + * + * @return ECORE_SUCCESS upon success, otherwise failure. + */ +enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry **pp_ent, + u8 cmd, + u8 protocol, + struct ecore_sp_init_data *p_data); + +/** + * @brief ecore_sp_pf_start - PF Function Start Ramrod + * + * This ramrod is sent to initialize a physical function (PF). It will + * configure the function related parameters and write its completion to the + * event ring specified in the parameters. + * + * Ramrods complete on the common event ring for the PF. This ring is + * allocated by the driver on host memory and its parameters are written + * to the internal RAM of the UStorm by the Function Start Ramrod. + * + * @param p_hwfn + * @param p_ptt + * @param p_tunn - pf start tunneling configuration + * @param allow_npar_tx_switch - npar tx switching to be used + * for vports configured for tx-switching. + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunnel_info *p_tunn, + bool allow_npar_tx_switch); + +/** + * @brief ecore_sp_pf_update - PF Function Update Ramrod + * + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. + * + * @note Final phase API. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_pf_stop - PF Function Stop Ramrod + * + * This ramrod is sent to close a Physical Function (PF). It is the last ramrod + * sent and the last completion written to the PFs Event Ring. This ramrod also + * deletes the context for the Slowhwfn connection on this PF. + * + * @note Not required for first packet. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn); + +struct ecore_rl_update_params { + u8 qcn_update_param_flg; + u8 dcqcn_update_param_flg; + u8 rl_init_flg; + u8 rl_start_flg; + u8 rl_stop_flg; + u8 rl_id_first; + u8 rl_id_last; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */ + u32 rl_bc_rate; /* Byte Counter Limit */ + u32 rl_max_rate; /* Maximum rate in Mbps resolution */ + u32 rl_r_ai; /* Active increase rate */ + u32 rl_r_hai; /* Hyper active increase rate */ + u32 dcqcn_gd; /* DCQCN Alpha update gain */ + u32 dcqcn_k_us; /* DCQCN Alpha update interval */ + u32 dcqcn_timeuot_us; + u32 qcn_timeuot_us; +}; + +/** + * @brief ecore_sp_rl_update - Update rate limiters + * + * @param p_hwfn + * @param params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn, + struct ecore_rl_update_params *params); + +/** + * @brief ecore_sp_pf_update_stag - PF STAG value update Ramrod + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn); + +#endif /*__ECORE_SP_COMMANDS_H__*/ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c new file mode 100644 index 000000000..6c386821f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.c @@ -0,0 +1,1088 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_hsi_common.h" +#include "ecore.h" +#include "ecore_sp_api.h" +#include "ecore_spq.h" +#include "ecore_iro.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_int.h" +#include "ecore_dev_api.h" +#include "ecore_mcp.h" +#include "ecore_hw.h" +#include "ecore_sriov.h" + +/*************************************************************************** + * Structures & Definitions + ***************************************************************************/ + +#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (200) +#define SPQ_BLOCK_SLEEP_MS (5) + +/*************************************************************************** + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ +static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie, + union event_ring_data OSAL_UNUSED * data, + u8 fw_return_code) +{ + struct ecore_spq_comp_done *comp_done; + + comp_done = (struct ecore_spq_comp_done *)cookie; + + comp_done->done = 0x1; + comp_done->fw_return_code = fw_return_code; + + /* make update visible to waiting thread */ + OSAL_SMP_WMB(p_hwfn->p_dev); +} + +static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *p_fw_ret, + bool sleep_between_iter) +{ + struct ecore_spq_comp_done *comp_done; + u32 iter_cnt; + + comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; + iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter + : SPQ_BLOCK_DELAY_MAX_ITER; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter) + iter_cnt *= 5; +#endif + + while (iter_cnt--) { + OSAL_POLL_MODE_DPC(p_hwfn); + OSAL_SMP_RMB(p_hwfn->p_dev); + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return ECORE_SUCCESS; + } + + if (sleep_between_iter) + OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS); + else + OSAL_UDELAY(SPQ_BLOCK_DELAY_US); + } + + return ECORE_TIMEOUT; +} + +static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct ecore_spq_comp_done *comp_done; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; + } + + /* Move to polling with a sleeping period between iterations */ + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); + rc = ecore_mcp_drain(p_hwfn, p_ptt); + ecore_ptt_release(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "MCP drain failed\n"); + goto err; + } + + /* Retry after drain */ + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; + + comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return ECORE_SUCCESS; + } +err: + DP_NOTICE(p_hwfn, true, + "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n", + OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid), + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id, + OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo)); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); + + return ECORE_BUSY; +} + +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms) +{ + p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ? + spq_timeout_ms / SPQ_BLOCK_SLEEP_MS : + SPQ_BLOCK_SLEEP_MAX_ITER; +} + +/*************************************************************************** + * SPQ entries inner API + ***************************************************************************/ +static enum _ecore_status_t +ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent) +{ + p_ent->flags = 0; + + switch (p_ent->comp_mode) { + case ECORE_SPQ_MODE_EBLOCK: + case ECORE_SPQ_MODE_BLOCK: + p_ent->comp_cb.function = ecore_spq_blocking_cb; + break; + case ECORE_SPQ_MODE_CB: + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]" + " Data pointer: [%08x:%08x] Completion Mode: %s\n", + p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, + D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, + ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * HSI access + ***************************************************************************/ + +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 + +static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn, + struct ecore_spq *p_spq) +{ + __le32 *p_spq_base_lo, *p_spq_base_hi; + struct regpair *p_consolid_base_addr; + u8 *p_flags1, *p_flags9, *p_flags10; + struct core_conn_context *p_cxt; + struct ecore_cxt_info cxt_info; + u32 core_conn_context_size; + __le16 *p_physical_q0; + u16 physical_q; + enum _ecore_status_t rc; + + cxt_info.iid = p_spq->cid; + + rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info); + + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n", + p_spq->cid); + return; + } + + p_cxt = cxt_info.p_cxt; + core_conn_context_size = sizeof(*p_cxt); + p_flags1 = &p_cxt->xstorm_ag_context.flags1; + p_flags9 = &p_cxt->xstorm_ag_context.flags9; + p_flags10 = &p_cxt->xstorm_ag_context.flags10; + p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0; + p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo; + p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi; + p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr; + + /* @@@TBD we zero the context until we have ilt_reset implemented. */ + OSAL_MEM_ZERO(p_cxt, core_conn_context_size); + + SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + + /* CDU validation - FIXME currently disabled */ + + /* QM physical queue */ + physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + *p_physical_q0 = OSAL_CPU_TO_LE16(physical_q); + + *p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr); + *p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr); + + DMA_REGPAIR_LE(*p_consolid_base_addr, + p_hwfn->p_consq->chain.p_phys_addr); +} + +static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq *p_spq, + struct ecore_spq_entry *p_ent) +{ + struct ecore_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; + u16 echo = ecore_chain_get_prod_idx(p_chain); + struct slow_path_element *elem; + + p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo); + elem = ecore_chain_produce(p_chain); + if (!elem) { + DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n"); + return ECORE_INVAL; + } + + *elem = p_ent->elem; /* Struct assignment */ + + p_db_data->spq_prod = + OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain)); + + /* Make sure the SPQE is updated before the doorbell */ + OSAL_WMB(p_hwfn->p_dev); + + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); + + /* Make sure doorbell is rang */ + OSAL_WMB(p_hwfn->p_dev); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x" + " agg_params: %02x, prod: %04x\n", + p_spq->db_addr_offset, p_spq->cid, p_db_data->params, + p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain)); + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * Asynchronous events + ***************************************************************************/ + +static enum _ecore_status_t +ecore_async_event_completion(struct ecore_hwfn *p_hwfn, + struct event_ring_entry *p_eqe) +{ + ecore_spq_async_comp_cb cb; + enum _ecore_status_t rc; + + if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { + DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id); + return ECORE_INVAL; + } + + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; + if (!cb) { + DP_NOTICE(p_hwfn, + true, "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return ECORE_INVAL; + } + + rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo, + &p_eqe->data, p_eqe->fw_return_code); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, true, + "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]", + rc, p_eqe->opcode, p_eqe->echo, + p_eqe->fw_return_code); + + return rc; +} + +enum _ecore_status_t +ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id, + ecore_spq_async_comp_cb cb) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return ECORE_INVAL; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; + return ECORE_SUCCESS; +} + +void +ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL; +} + +/*************************************************************************** + * EQ API + ***************************************************************************/ +void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod) +{ + u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + OSAL_MMIOWB(p_hwfn->p_dev); +} + +enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, + void *cookie) +{ + struct ecore_eq *p_eq = cookie; + struct ecore_chain *p_chain = &p_eq->chain; + u16 fw_cons_idx = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (!p_hwfn->p_spq) { + DP_ERR(p_hwfn, "Unexpected NULL p_spq\n"); + return ECORE_INVAL; + } + + /* take a snapshot of the FW consumer */ + fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); + + /* Need to guarantee the fw_cons index we use points to a usuable + * element (to comply with our chain), so our macros would comply + */ + if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) == + ecore_chain_get_usable_per_page(p_chain)) { + fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain); + } + + /* Complete current segment of eq entries */ + while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) { + struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain); + if (!p_eqe) { + DP_ERR(p_hwfn, + "Unexpected NULL chain consumer entry\n"); + break; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "op %x prot %x res0 %x echo %x fwret %x flags %x\n", + p_eqe->opcode, /* Event Opcode */ + p_eqe->protocol_id, /* Event Protocol ID */ + p_eqe->reserved0, /* Reserved */ + /* Echo value from ramrod data on the host */ + OSAL_LE16_TO_CPU(p_eqe->echo), + p_eqe->fw_return_code, /* FW return code for SP + * ramrods + */ + p_eqe->flags); + + if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) + ecore_async_event_completion(p_hwfn, p_eqe); + else + ecore_spq_completion(p_hwfn, + p_eqe->echo, + p_eqe->fw_return_code, + &p_eqe->data); + + ecore_chain_recycle_consumed(p_chain); + } + + ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain)); + + return rc; +} + +enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) +{ + struct ecore_eq *p_eq; + + /* Allocate EQ struct */ + p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq)); + if (!p_eq) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct ecore_eq'\n"); + return ECORE_NOMEM; + } + + /* Allocate and initialize EQ chain*/ + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + num_elem, + sizeof(union event_ring_element), + &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n"); + goto eq_allocate_fail; + } + + /* register EQ completion on the SP SB */ + ecore_int_register_cb(p_hwfn, ecore_eq_completion, + p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); + + p_hwfn->p_eq = p_eq; + return ECORE_SUCCESS; + +eq_allocate_fail: + OSAL_FREE(p_hwfn->p_dev, p_eq); + return ECORE_NOMEM; +} + +void ecore_eq_setup(struct ecore_hwfn *p_hwfn) +{ + ecore_chain_reset(&p_hwfn->p_eq->chain); +} + +void ecore_eq_free(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->p_eq) + return; + + ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain); + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq); + p_hwfn->p_eq = OSAL_NULL; +} + +/*************************************************************************** +* CQE API - manipulate EQ functionality +***************************************************************************/ +static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe + *cqe, + enum protocol_type protocol) +{ + if (IS_VF(p_hwfn->p_dev)) + return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol); + + /* @@@tmp - it's possible we'll eventually want to handle some + * actual commands that can arrive here, but for now this is only + * used to complete the ramrod using the echo value on the cqe + */ + return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL); +} + +enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe) +{ + enum _ecore_status_t rc; + + rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to handle RXQ CQE [cmd 0x%02x]\n", + cqe->ramrod_cmd_id); + } + + return rc; +} + +/*************************************************************************** + * Slow hwfn Queue (spq) + ***************************************************************************/ +void ecore_spq_setup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_virt = OSAL_NULL; + struct core_db_data *p_db_data; + void OSAL_IOMEM *db_addr; + dma_addr_t p_phys = 0; + u32 i, capacity; + enum _ecore_status_t rc; + + OSAL_LIST_INIT(&p_spq->pending); + OSAL_LIST_INIT(&p_spq->completion_pending); + OSAL_LIST_INIT(&p_spq->free_pool); + OSAL_LIST_INIT(&p_spq->unlimited_pending); + OSAL_SPIN_LOCK_INIT(&p_spq->lock); + + /* SPQ empty pool */ + p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod); + p_virt = p_spq->p_virt; + + capacity = ecore_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { + DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); + + OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool); + + p_virt++; + p_phys += sizeof(struct ecore_spq_entry); + } + + /* Statistics */ + p_spq->normal_count = 0; + p_spq->comp_count = 0; + p_spq->comp_sent_count = 0; + p_spq->unlimited_pending_count = 0; + + OSAL_MEM_ZERO(p_spq->p_comp_bitmap, + SPQ_COMP_BMAP_SIZE * sizeof(unsigned long)); + p_spq->comp_bitmap_idx = 0; + + /* SPQ cid, cannot fail */ + ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); + ecore_spq_hw_initialize(p_hwfn, p_spq); + + /* reset the chain itself */ + ecore_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset); + rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); +} + +enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_virt = OSAL_NULL; + struct ecore_spq *p_spq = OSAL_NULL; + dma_addr_t p_phys = 0; + u32 capacity; + + /* SPQ struct */ + p_spq = + OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); + if (!p_spq) { + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n"); + return ECORE_NOMEM; + } + + /* SPQ ring */ + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_SINGLE, + ECORE_CHAIN_CNT_TYPE_U16, + 0, /* N/A when the mode is SINGLE */ + sizeof(struct slow_path_element), + &p_spq->chain, OSAL_NULL)) { + DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n"); + goto spq_allocate_fail; + } + + /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = ecore_chain_get_capacity(&p_spq->chain); + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, + capacity * + sizeof(struct ecore_spq_entry)); + if (!p_virt) + goto spq_allocate_fail; + + p_spq->p_virt = p_virt; + p_spq->p_phys = p_phys; + +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock)) + goto spq_allocate_fail; +#endif + + p_hwfn->p_spq = p_spq; + return ECORE_SUCCESS; + +spq_allocate_fail: + ecore_chain_free(p_hwfn->p_dev, &p_spq->chain); + OSAL_FREE(p_hwfn->p_dev, p_spq); + return ECORE_NOMEM; +} + +void ecore_spq_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + void OSAL_IOMEM *db_addr; + u32 capacity; + + if (!p_spq) + return; + + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset); + ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data); + + if (p_spq->p_virt) { + capacity = ecore_chain_get_capacity(&p_spq->chain); + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_spq->p_virt, + p_spq->p_phys, + capacity * + sizeof(struct ecore_spq_entry)); + } + + ecore_chain_free(p_hwfn->p_dev, &p_spq->chain); +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock); +#endif + + OSAL_FREE(p_hwfn->p_dev, p_spq); +} + +enum _ecore_status_t +ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_SPIN_LOCK(&p_spq->lock); + + if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent)); + if (!p_ent) { + DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n"); + rc = ECORE_NOMEM; + goto out_unlock; + } + p_ent->queue = &p_spq->unlimited_pending; + } else { + p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, + struct ecore_spq_entry, list); + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool); + p_ent->queue = &p_spq->pending; + } + + *pp_ent = p_ent; + +out_unlock: + OSAL_SPIN_UNLOCK(&p_spq->lock); + return rc; +} + +/* Locked variant; Should be called while the SPQ lock is taken */ +static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent) +{ + OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool); +} + +void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent) +{ + OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock); + __ecore_spq_return_entry(p_hwfn, p_ent); + OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock); +} + +/** + * @brief ecore_spq_add_entry - adds a new entry to the pending + * list. Should be used while lock is being held. + * + * Addes an entry to the pending list is there is room (en empty + * element is available in the free_pool), or else places the + * entry in the unlimited_pending pool. + * + * @param p_hwfn + * @param p_ent + * @param priority + * + * @return enum _ecore_status_t + */ +static enum _ecore_status_t +ecore_spq_add_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, enum spq_priority priority) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + + if (p_ent->queue == &p_spq->unlimited_pending) { + if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + OSAL_LIST_PUSH_TAIL(&p_ent->list, + &p_spq->unlimited_pending); + p_spq->unlimited_pending_count++; + + return ECORE_SUCCESS; + + } else { + struct ecore_spq_entry *p_en2; + + p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, + struct ecore_spq_entry, + list); + OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; + + *p_en2 = *p_ent; + + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK) + OSAL_FREE(p_hwfn->p_dev, p_ent); + + p_ent = p_en2; + } + } + + /* entry is to be placed in 'pending' queue */ + switch (priority) { + case ECORE_SPQ_PRIORITY_NORMAL: + OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending); + p_spq->normal_count++; + break; + case ECORE_SPQ_PRIORITY_HIGH: + OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending); + p_spq->high_count++; + break; + default: + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * Accessor + ***************************************************************************/ + +u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->p_spq) + return 0xffffffff; /* illegal */ + return p_hwfn->p_spq->cid; +} + +/*************************************************************************** + * Posting new Ramrods + ***************************************************************************/ + +static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn, + osal_list_t *head, + u32 keep_reserve) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + enum _ecore_status_t rc; + + /* TODO - implementation might be wasteful; will always keep room + * for an additional high priority ramrod (even if one is already + * pending FW) + */ + while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve && + !OSAL_LIST_IS_EMPTY(head)) { + struct ecore_spq_entry *p_ent = + OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list); + if (p_ent != OSAL_NULL) { +#if defined(_NTDDK_) +#pragma warning(suppress : 6011 28182) +#endif + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head); + OSAL_LIST_PUSH_TAIL(&p_ent->list, + &p_spq->completion_pending); + p_spq->comp_sent_count++; + + rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent); + if (rc) { + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, + &p_spq->completion_pending); + __ecore_spq_return_entry(p_hwfn, p_ent); + return rc; + } + } + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + + while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending)) + break; + + p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending, + struct ecore_spq_entry, list); + if (!p_ent) + return ECORE_INVAL; + +#if defined(_NTDDK_) +#pragma warning(suppress : 6011) +#endif + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending); + + ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + } + + return ecore_spq_post_list(p_hwfn, + &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT); +} + +enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *fw_return_code) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL; + bool b_ret_ent = true; + + if (!p_hwfn) + return ECORE_INVAL; + + if (!p_ent) { + DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n"); + return ECORE_INVAL; + } + + if (p_hwfn->p_dev->recov_in_prog) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Recovery is in progress -> skip spq post" + " [cmd %02x protocol %02x]\n", + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + /* Return success to let the flows to be completed successfully + * w/o any error handling. + */ + return ECORE_SUCCESS; + } + + OSAL_SPIN_LOCK(&p_spq->lock); + + /* Complete the entry */ + rc = ecore_spq_fill_entry(p_hwfn, p_ent); + + /* Check return value after LOCK is taken for cleaner error flow */ + if (rc) + goto spq_post_fail; + + /* Add the request to the pending queue */ + rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + if (rc) + goto spq_post_fail; + + rc = ecore_spq_pend_post(p_hwfn); + if (rc) { + /* Since it's possible that pending failed for a different + * entry [although unlikely], the failed entry was already + * dealt with; No need to return it here. + */ + b_ret_ent = false; + goto spq_post_fail; + } + + OSAL_SPIN_UNLOCK(&p_spq->lock); + + if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) { + /* For entries in ECORE BLOCK mode, the completion code cannot + * perform the necessary cleanup - if it did, we couldn't + * access p_ent here to see whether it's successful or not. + * Thus, after gaining the answer perform the cleanup here. + */ + rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + OSAL_FREE(p_hwfn->p_dev, p_ent); + + /* TBD: handle error flow and remove p_ent from + * completion pending + */ + return rc; + } + + if (rc) + goto spq_post_fail2; + + /* return to pool */ + ecore_spq_return_entry(p_hwfn, p_ent); + } + return rc; + +spq_post_fail2: + OSAL_SPIN_LOCK(&p_spq->lock); + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending); + ecore_chain_return_produced(&p_spq->chain); + +spq_post_fail: + /* return to the free pool */ + if (b_ret_ent) + __ecore_spq_return_entry(p_hwfn, p_ent); + OSAL_SPIN_UNLOCK(&p_spq->lock); + + return rc; +} + +enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data) +{ + struct ecore_spq *p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_spq_entry *tmp; + struct ecore_spq_entry *found = OSAL_NULL; + enum _ecore_status_t rc; + + p_spq = p_hwfn->p_spq; + if (!p_spq) { + DP_ERR(p_hwfn, "Unexpected NULL p_spq\n"); + return ECORE_INVAL; + } + + OSAL_SPIN_LOCK(&p_spq->lock); + OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent, + tmp, + &p_spq->completion_pending, + list, struct ecore_spq_entry) { + if (p_ent->elem.hdr.echo == echo) { + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, + &p_spq->completion_pending); + + /* Avoid overriding of SPQ entries when getting + * out-of-order completions, by marking the completions + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ + SPQ_COMP_BMAP_SET_BIT(p_spq, echo); + while (SPQ_COMP_BMAP_TEST_BIT(p_spq, + p_spq->comp_bitmap_idx)) { + SPQ_COMP_BMAP_CLEAR_BIT(p_spq, + p_spq->comp_bitmap_idx); + p_spq->comp_bitmap_idx++; + ecore_chain_return_produced(&p_spq->chain); + } + + p_spq->comp_count++; + found = p_ent; + break; + } + + /* This is debug and should be relatively uncommon - depends + * on scenarios which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Got completion for echo %04x - doesn't match" + " echo %04x in completion pending list\n", + OSAL_LE16_TO_CPU(echo), + OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo)); + } + + /* Release lock before callback, as callback may post + * an additional ramrod. + */ + OSAL_SPIN_UNLOCK(&p_spq->lock); + + if (!found) { + DP_NOTICE(p_hwfn, true, + "Failed to find an entry this" + " EQE [echo %04x] completes\n", + OSAL_LE16_TO_CPU(echo)); + return ECORE_EXISTS; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Complete EQE [echo %04x]: func %p cookie %p)\n", + OSAL_LE16_TO_CPU(echo), + p_ent->comp_cb.function, p_ent->comp_cb.cookie); + if (found->comp_cb.function) + found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, + fw_return_code); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Got a completion without a callback function\n"); + + if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ + ecore_spq_return_entry(p_hwfn, found); + + /* Attempt to post pending requests */ + OSAL_SPIN_LOCK(&p_spq->lock); + rc = ecore_spq_pend_post(p_hwfn); + OSAL_SPIN_UNLOCK(&p_spq->lock); + + return rc; +} + +enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_consq *p_consq; + + /* Allocate ConsQ struct */ + p_consq = + OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq)); + if (!p_consq) { + DP_NOTICE(p_hwfn, false, + "Failed to allocate `struct ecore_consq'\n"); + return ECORE_NOMEM; + } + + /* Allocate and initialize EQ chain */ + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + ECORE_CHAIN_PAGE_SIZE / 0x80, + 0x80, + &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain"); + goto consq_allocate_fail; + } + + p_hwfn->p_consq = p_consq; + return ECORE_SUCCESS; + +consq_allocate_fail: + OSAL_FREE(p_hwfn->p_dev, p_consq); + return ECORE_NOMEM; +} + +void ecore_consq_setup(struct ecore_hwfn *p_hwfn) +{ + ecore_chain_reset(&p_hwfn->p_consq->chain); +} + +void ecore_consq_free(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->p_consq) + return; + + ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq); +} diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h new file mode 100644 index 000000000..6142c399a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_spq.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_SPQ_H__ +#define __ECORE_SPQ_H__ + +#include "ecore_hsi_common.h" +#include "ecore_status.h" +#include "ecore_hsi_eth.h" +#include "ecore_chain.h" +#include "ecore_sp_api.h" + +union ramrod_data { + struct pf_start_ramrod_data pf_start; + struct pf_update_ramrod_data pf_update; + struct rl_update_ramrod_data rl_update; + struct rx_queue_start_ramrod_data rx_queue_start; + struct rx_queue_update_ramrod_data rx_queue_update; + struct rx_queue_stop_ramrod_data rx_queue_stop; + struct tx_queue_start_ramrod_data tx_queue_start; + struct tx_queue_stop_ramrod_data tx_queue_stop; + struct vport_start_ramrod_data vport_start; + struct vport_stop_ramrod_data vport_stop; + struct rx_update_gft_filter_data rx_update_gft; + struct vport_update_ramrod_data vport_update; + struct core_rx_start_ramrod_data core_rx_queue_start; + struct core_rx_stop_ramrod_data core_rx_queue_stop; + struct core_tx_start_ramrod_data core_tx_queue_start; + struct core_tx_stop_ramrod_data core_tx_queue_stop; + struct vport_filter_update_ramrod_data vport_filter_update; + + struct vf_start_ramrod_data vf_start; + struct vf_stop_ramrod_data vf_stop; +}; + +#define EQ_MAX_CREDIT 0xffffffff + +enum spq_priority { + ECORE_SPQ_PRIORITY_NORMAL, + ECORE_SPQ_PRIORITY_HIGH, +}; + +union ecore_spq_req_comp { + struct ecore_spq_comp_cb cb; + u64 *done_addr; +}; + +/* SPQ_MODE_EBLOCK */ +struct ecore_spq_comp_done { + u64 done; + u8 fw_return_code; +}; + +struct ecore_spq_entry { + osal_list_entry_t list; + + u8 flags; + + /* HSI slow path element */ + struct slow_path_element elem; + + union ramrod_data ramrod; + + enum spq_priority priority; + + /* pending queue for this entry */ + osal_list_t *queue; + + enum spq_mode comp_mode; + struct ecore_spq_comp_cb comp_cb; + struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ +}; + +struct ecore_eq { + struct ecore_chain chain; + u8 eq_sb_index; /* index within the SB */ + __le16 *p_fw_cons; /* ptr to index value */ +}; + +struct ecore_consq { + struct ecore_chain chain; +}; + +typedef enum _ecore_status_t +(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, + u8 fw_return_code); + +enum _ecore_status_t +ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id, + ecore_spq_async_comp_cb cb); + +void +ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id); + +struct ecore_spq { + osal_spinlock_t lock; + + osal_list_t unlimited_pending; + osal_list_t pending; + osal_list_t completion_pending; + osal_list_t free_pool; + + struct ecore_chain chain; + + /* allocated dma-able memory for spq entries (+ramrod data) */ + dma_addr_t p_phys; + struct ecore_spq_entry *p_virt; + + /* SPQ max sleep iterations used in __ecore_spq_block() */ + u32 block_sleep_max_iter; + + /* Bitmap for handling out-of-order completions */ +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) +/* BITS_PER_LONG */ +#define SPQ_COMP_BMAP_SIZE (SPQ_RING_SIZE / (sizeof(unsigned long) * 8)) + unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE]; + u8 comp_bitmap_idx; +#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \ + (OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + +#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \ + (OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + +#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \ + (OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + + /* Statistics */ + u32 unlimited_pending_count; + u32 normal_count; + u32 high_count; + u32 comp_sent_count; + u32 comp_count; + + u32 cid; + + u32 db_addr_offset; + struct core_db_data db_data; + ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; +}; + +struct ecore_port; +struct ecore_hwfn; + +/** + * @brief ecore_set_spq_block_timeout - calculates the maximum sleep + * iterations used in __ecore_spq_block(); + * + * @param p_hwfn + * @param spq_timeout_ms + */ +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms); + +/** + * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. + * + * @param p_hwfn + * @param p_req + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *fw_return_code); + +/** + * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_setup - Reset the SPQ to its start state. + * + * @param p_hwfn + */ +void ecore_spq_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_deallocate - Deallocates the given SPQ struct. + * + * @param p_hwfn + */ +void ecore_spq_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_get_entry - Obtain an entrry from the spq + * free pool list. + * + * + * + * @param p_hwfn + * @param pp_ent + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry **pp_ent); + +/** + * @brief ecore_spq_return_entry - Return an entry to spq free + * pool list + * + * @param p_hwfn + * @param p_ent + */ +void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent); +/** + * @brief ecore_eq_allocate - Allocates & initializes an EQ struct + * + * @param p_hwfn + * @param num_elem number of elements in the eq + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem); + +/** + * @brief ecore_eq_setup - Reset the EQ to its start state. + * + * @param p_hwfn + */ +void ecore_eq_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_eq_free - deallocates the given EQ struct. + * + * @param p_hwfn + */ +void ecore_eq_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_eq_prod_update - update the FW with default EQ producer + * + * @param p_hwfn + * @param prod + */ +void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, + u16 prod); + +/** + * @brief ecore_eq_completion - Completes currently pending EQ elements + * + * @param p_hwfn + * @param cookie + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, + void *cookie); + +/** + * @brief ecore_spq_completion - Completes a single event + * + * @param p_hwfn + * @param echo - echo value from cookie (used for determining completion) + * @param p_data - data from cookie (used in callback function if applicable) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data); + +/** + * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * + * @param p_hwfn + * + * @return u32 - SPQ CID + */ +u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_consq_setup - Reset the ConsQ to its start state. + * + * @param p_hwfn + */ +void ecore_consq_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_consq_free - deallocates the given ConsQ struct. + * + * @param p_hwfn + */ +void ecore_consq_free(struct ecore_hwfn *p_hwfn); + +#endif /* __ECORE_SPQ_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c new file mode 100644 index 000000000..e60257e19 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.c @@ -0,0 +1,5072 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "reg_addr.h" +#include "ecore_sriov.h" +#include "ecore_status.h" +#include "ecore_hw.h" +#include "ecore_hw_defs.h" +#include "ecore_int.h" +#include "ecore_hsi_eth.h" +#include "ecore_l2.h" +#include "ecore_vfpf_if.h" +#include "ecore_rt_defs.h" +#include "ecore_init_ops.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "ecore_mcp.h" +#include "ecore_cxt.h" +#include "ecore_vf.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_sp_commands.h" + +static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code); + +const char *qede_ecore_channel_tlvs_string[] = { + "CHANNEL_TLV_NONE", /* ends tlv sequence */ + "CHANNEL_TLV_ACQUIRE", + "CHANNEL_TLV_VPORT_START", + "CHANNEL_TLV_VPORT_UPDATE", + "CHANNEL_TLV_VPORT_TEARDOWN", + "CHANNEL_TLV_START_RXQ", + "CHANNEL_TLV_START_TXQ", + "CHANNEL_TLV_STOP_RXQ", + "CHANNEL_TLV_STOP_TXQ", + "CHANNEL_TLV_UPDATE_RXQ", + "CHANNEL_TLV_INT_CLEANUP", + "CHANNEL_TLV_CLOSE", + "CHANNEL_TLV_RELEASE", + "CHANNEL_TLV_LIST_END", + "CHANNEL_TLV_UCAST_FILTER", + "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE", + "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH", + "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP", + "CHANNEL_TLV_VPORT_UPDATE_MCAST", + "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM", + "CHANNEL_TLV_VPORT_UPDATE_RSS", + "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN", + "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA", + "CHANNEL_TLV_UPDATE_TUNN_PARAM", + "CHANNEL_TLV_COALESCE_UPDATE", + "CHANNEL_TLV_QID", + "CHANNEL_TLV_COALESCE_READ", + "CHANNEL_TLV_BULLETIN_UPDATE_MAC", + "CHANNEL_TLV_UPDATE_MTU", + "CHANNEL_TLV_RDMA_ACQUIRE", + "CHANNEL_TLV_RDMA_START", + "CHANNEL_TLV_RDMA_STOP", + "CHANNEL_TLV_RDMA_ADD_USER", + "CHANNEL_TLV_RDMA_REMOVE_USER", + "CHANNEL_TLV_RDMA_QUERY_COUNTERS", + "CHANNEL_TLV_RDMA_ALLOC_TID", + "CHANNEL_TLV_RDMA_REGISTER_TID", + "CHANNEL_TLV_RDMA_DEREGISTER_TID", + "CHANNEL_TLV_RDMA_FREE_TID", + "CHANNEL_TLV_RDMA_CREATE_CQ", + "CHANNEL_TLV_RDMA_RESIZE_CQ", + "CHANNEL_TLV_RDMA_DESTROY_CQ", + "CHANNEL_TLV_RDMA_CREATE_QP", + "CHANNEL_TLV_RDMA_MODIFY_QP", + "CHANNEL_TLV_RDMA_QUERY_QP", + "CHANNEL_TLV_RDMA_DESTROY_QP", + "CHANNEL_TLV_RDMA_CREATE_SRQ", + "CHANNEL_TLV_RDMA_MODIFY_SRQ", + "CHANNEL_TLV_RDMA_DESTROY_SRQ", + "CHANNEL_TLV_RDMA_QUERY_PORT", + "CHANNEL_TLV_RDMA_QUERY_DEVICE", + "CHANNEL_TLV_RDMA_IWARP_CONNECT", + "CHANNEL_TLV_RDMA_IWARP_ACCEPT", + "CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN", + "CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN", + "CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN", + "CHANNEL_TLV_RDMA_IWARP_REJECT", + "CHANNEL_TLV_RDMA_IWARP_SEND_RTR", + "CHANNEL_TLV_ESTABLISH_LL2_CONN", + "CHANNEL_TLV_TERMINATE_LL2_CONN", + "CHANNEL_TLV_ASYNC_EVENT", + "CHANNEL_TLV_SOFT_FLR", + "CHANNEL_TLV_MAX" +}; + +static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf) +{ + u8 legacy = 0; + + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + legacy |= ECORE_QCID_LEGACY_VF_RX_PROD; + + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + legacy |= ECORE_QCID_LEGACY_VF_CID; + + return legacy; +} + +/* IOV ramrods */ +static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf) +{ + struct vf_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + u8 fp_minor; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_vf->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_START, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vf_start; + + p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid); + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case ECORE_PCI_ETH_ROCE: + case ECORE_PCI_ETH_IWARP: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n", + p_hwfn->hw_info.personality); + return ECORE_INVAL; + } + + fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; + if (fp_minor > ETH_HSI_VER_MINOR && + fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%d] - Requested fp hsi %02x.%02x which is" + " slightly newer than PF's %02x.%02x; Configuring" + " PFs version\n", + p_vf->abs_vf_id, + ETH_HSI_VER_MAJOR, fp_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + fp_minor = ETH_HSI_VER_MINOR; + } + + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Starting using HSI %02x.%02x\n", + p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn, + u32 concrete_vfid, + u16 opaque_vfid) +{ + struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_STOP, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vf_stop; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) +{ + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); + return false; + } + + if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) || + (rel_vf_id < 0)) + return false; + + if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && + b_enabled_only) + return false; + + if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && + b_non_malicious) + return false; + + return true; +} + +struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct ecore_vf_info *vf = OSAL_NULL; + + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); + return OSAL_NULL; + } + + if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, + b_enabled_only, false)) + vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; + else + DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n", + relative_vf_id); + + return vf; +} + +static struct ecore_queue_cid * +ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue) +{ + u32 i; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid && + !p_queue->cids[i].b_is_tx) + return p_queue->cids[i].p_cid; + } + + return OSAL_NULL; +} + +enum ecore_iov_validate_q_mode { + ECORE_IOV_VALIDATE_Q_NA, + ECORE_IOV_VALIDATE_Q_ENABLE, + ECORE_IOV_VALIDATE_Q_DISABLE, +}; + +static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf, + u16 qid, + enum ecore_iov_validate_q_mode mode, + bool b_is_tx) +{ + u32 i; + + if (mode == ECORE_IOV_VALIDATE_Q_NA) + return true; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + struct ecore_vf_queue_cid *p_qcid; + + p_qcid = &p_vf->vf_queues[qid].cids[i]; + + if (p_qcid->p_cid == OSAL_NULL) + continue; + + if (p_qcid->b_is_tx != b_is_tx) + continue; + + /* Found. It's enabled. */ + return (mode == ECORE_IOV_VALIDATE_Q_ENABLE); + } + + /* In case we haven't found any valid cid, then its disabled */ + return (mode == ECORE_IOV_VALIDATE_Q_DISABLE); +} + +static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + u16 rx_qid, + enum ecore_iov_validate_q_mode mode) +{ + if (rx_qid >= p_vf->num_rxqs) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[0x%02x] - can't touch Rx queue[%04x];" + " Only 0x%04x are allocated\n", + p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); + return false; + } + + return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false); +} + +static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + u16 tx_qid, + enum ecore_iov_validate_q_mode mode) +{ + if (tx_qid >= p_vf->num_txqs) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[0x%02x] - can't touch Tx queue[%04x];" + " Only 0x%04x are allocated\n", + p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); + return false; + } + + return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true); +} + +static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + u16 sb_idx) +{ + int i; + + for (i = 0; i < p_vf->num_sbs; i++) + if (p_vf->igu_sbs[i] == sb_idx) + return true; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[0%02x] - tried using sb_idx %04x which doesn't exist as" + " one of its 0x%02x SBs\n", + p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); + + return false; +} + +/* Is there at least 1 queue open? */ +static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_rxqs; i++) + if (ecore_iov_validate_queue_mode(p_vf, i, + ECORE_IOV_VALIDATE_Q_ENABLE, + false)) + return true; + + return false; +} + +static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_txqs; i++) + if (ecore_iov_validate_queue_mode(p_vf, i, + ECORE_IOV_VALIDATE_Q_ENABLE, + true)) + return true; + + return false; +} + +enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, + int vfid, + struct ecore_ptt *p_ptt) +{ + struct ecore_bulletin_content *p_bulletin; + int crc_size = sizeof(p_bulletin->crc); + struct dmae_params params; + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return ECORE_INVAL; + + /* TODO - check VF is in a state where it can accept message */ + if (!p_vf->vf_bulletin) + return ECORE_INVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + /* Increment bulletin board version and compute crc */ + p_bulletin->version++; + p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size, + p_vf->bulletin.size - crc_size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", + p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); + + /* propagate bulletin board via dmae to vm memory */ + OSAL_MEMSET(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1); + params.dst_vf_id = p_vf->abs_vf_id; + return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, + p_vf->vf_bulletin, p_vf->bulletin.size / 4, + ¶ms); +} + +static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev) +{ + struct ecore_hw_sriov_info *iov = p_dev->p_iov_info; + int pos = iov->pos; + + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos); + OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_INITIAL_VF, + &iov->initial_vfs); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + if (iov->num_vfs) { + /* @@@TODO - in future we might want to add an OSAL here to + * allow each OS to decide on its own how to act. + */ + DP_VERBOSE(p_dev, ECORE_MSG_IOV, + "Number of VFs are already set to non-zero value." + " Ignoring PCI configuration value\n"); + iov->num_vfs = 0; + } + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); + + OSAL_PCI_READ_CONFIG_DWORD(p_dev, + pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + + OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap); + + OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x," + "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d," + " stride %d, page size 0x%x\n", + iov->nres, iov->cap, iov->ctrl, + iov->total_vfs, iov->initial_vfs, iov->nr_virtfn, + iov->offset, iov->stride, iov->pgsz); + + /* Some sanity checks */ + if (iov->num_vfs > NUM_OF_VFS(p_dev) || + iov->total_vfs > NUM_OF_VFS(p_dev)) { + /* This can happen only due to a bug. In this case we set + * num_vfs to zero to avoid memory corruption in the code that + * assumes max number of vfs + */ + DP_NOTICE(p_dev, false, + "IOV: Unexpected number of vfs set: %d" + " setting num_vf to zero\n", + iov->num_vfs); + + iov->num_vfs = 0; + iov->total_vfs = 0; + } + + return ECORE_SUCCESS; +} + +static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn) +{ + struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; + struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + struct ecore_bulletin_content *p_bulletin_virt; + dma_addr_t req_p, rply_p, bulletin_p; + union pfvf_tlvs *p_reply_virt_addr; + union vfpf_tlvs *p_req_virt_addr; + u8 idx = 0; + + OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); + + p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; + req_p = p_iov_info->mbx_msg_phys_addr; + p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; + rply_p = p_iov_info->mbx_reply_phys_addr; + p_bulletin_virt = p_iov_info->p_bulletins; + bulletin_p = p_iov_info->bulletins_phys; + if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { + DP_ERR(p_hwfn, + "ecore_iov_setup_vfdb called without alloc mem first\n"); + return; + } + + for (idx = 0; idx < p_iov->total_vfs; idx++) { + struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx]; + u32 concrete; + + vf->vf_mbx.req_virt = p_req_virt_addr + idx; + vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); + vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; + vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); + +#ifdef CONFIG_ECORE_SW_CHANNEL + vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs); + vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; +#endif + vf->state = VF_STOPPED; + vf->b_init = false; + + vf->bulletin.phys = idx * + sizeof(struct ecore_bulletin_content) + bulletin_p; + vf->bulletin.p_virt = p_bulletin_virt + idx; + vf->bulletin.size = sizeof(struct ecore_bulletin_content); + + vf->relative_vf_id = idx; + vf->abs_vf_id = idx + p_iov->first_vf_in_pf; + concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id); + vf->concrete_fid = concrete; + /* TODO - need to devise a better way of getting opaque */ + vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | + (vf->abs_vf_id << 8); + + vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; + vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; + } +} + +static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn) +{ + struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + void **p_v_addr; + u16 num_vfs = 0; + + num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs); + + /* Allocate PF Mailbox buffer (per-VF) */ + p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_msg_virt_addr; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_msg_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + /* Allocate PF Mailbox Reply buffer (per-VF) */ + p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_reply_virt_addr; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->mbx_reply_phys_addr, + p_iov_info->mbx_reply_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) * + num_vfs; + p_v_addr = &p_iov_info->p_bulletins; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->bulletins_phys, + p_iov_info->bulletins_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF's Requests mailbox [%p virt 0x%lx phys], " + "Response mailbox [%p virt 0x%lx phys] Bulletinsi" + " [%p virt 0x%lx phys]\n", + p_iov_info->mbx_msg_virt_addr, + (unsigned long)p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_reply_virt_addr, + (unsigned long)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, + (unsigned long)p_iov_info->bulletins_phys); + + return ECORE_SUCCESS; +} + +static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn) +{ + struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + + if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->mbx_msg_virt_addr, + p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_msg_size); + + if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->mbx_reply_virt_addr, + p_iov_info->mbx_reply_phys_addr, + p_iov_info->mbx_reply_size); + + if (p_iov_info->p_bulletins) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->p_bulletins, + p_iov_info->bulletins_phys, + p_iov_info->bulletins_size); +} + +enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_pf_iov *p_sriov; + + if (!IS_PF_SRIOV(p_hwfn)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No SR-IOV - no need for IOV db\n"); + return ECORE_SUCCESS; + } + + p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); + if (!p_sriov) { + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); + return ECORE_NOMEM; + } + + p_hwfn->pf_iov_info = p_sriov; + + ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + ecore_sriov_eqe_event); + + return ecore_iov_allocate_vfdb(p_hwfn); +} + +void ecore_iov_setup(struct ecore_hwfn *p_hwfn) +{ + if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + return; + + ecore_iov_setup_vfdb(p_hwfn); +} + +void ecore_iov_free(struct ecore_hwfn *p_hwfn) +{ + ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); + + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { + ecore_iov_free_vfdb(p_hwfn); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info); + } +} + +void ecore_iov_free_hw_info(struct ecore_dev *p_dev) +{ + OSAL_FREE(p_dev, p_dev->p_iov_info); +} + +enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + int pos; + enum _ecore_status_t rc; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + /* Learn the PCI configuration */ + pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev, + PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n"); + return ECORE_SUCCESS; + } + + /* Allocate a new struct for IOV information */ + /* TODO - can change to VALLOC when its available */ + p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, + sizeof(*p_dev->p_iov_info)); + if (!p_dev->p_iov_info) { + DP_NOTICE(p_hwfn, false, + "Can't support IOV due to lack of memory\n"); + return ECORE_NOMEM; + } + p_dev->p_iov_info->pos = pos; + + rc = ecore_iov_pci_cfg_info(p_dev); + if (rc) + return rc; + + /* We want PF IOV to be synonemous with the existence of p_iov_info; + * In case the capability is published but there are no VFs, simply + * de-allocate the struct. + */ + if (!p_dev->p_iov_info->total_vfs) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "IOV capabilities, but no VFs are published\n"); + OSAL_FREE(p_dev, p_dev->p_iov_info); + return ECORE_SUCCESS; + } + + /* First VF index based on offset is tricky: + * - If ARI is supported [likely], offset - (16 - pf_id) would + * provide the number for eng0. 2nd engine Vfs would begin + * after the first engine's VFs. + * - If !ARI, VFs would start on next device. + * so offset - (256 - pf_id) would provide the number. + * Utilize the fact that (256 - pf_id) is achieved only be later + * to diffrentiate between the two. + */ + + if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { + u32 first = p_hwfn->p_dev->p_iov_info->offset + + p_hwfn->abs_pf_id - 16; + + p_dev->p_iov_info->first_vf_in_pf = first; + + if (ECORE_PATH_ID(p_hwfn)) + p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + } else { + u32 first = p_hwfn->p_dev->p_iov_info->offset + + p_hwfn->abs_pf_id - 256; + + p_dev->p_iov_info->first_vf_in_pf = first; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "First VF in hwfn 0x%08x\n", + p_dev->p_iov_info->first_vf_in_pf); + + return ECORE_SUCCESS; +} + +static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid, + bool b_fail_malicious) +{ + /* Check PF supports sriov */ + if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) || + !IS_PF_SRIOV_ALLOC(p_hwfn)) + return false; + + /* Check VF validity */ + if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) + return false; + + return true; +} + +bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) +{ + return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true); +} + +void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, + u16 rel_vf_id, u8 to_disable) +{ + struct ecore_vf_info *vf; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) + continue; + + vf->to_disable = to_disable; + } +} + +void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, + u8 to_disable) +{ + u16 i; + + if (!IS_ECORE_SRIOV(p_dev)) + return; + + for (i = 0; i < p_dev->p_iov_info->total_vfs; i++) + ecore_iov_set_vf_to_disable(p_dev, i, to_disable); +} + +#ifndef LINUX_REMOVE +/* @@@TBD Consider taking outside of ecore... */ +enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 vf_id, + void *ctx) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true); + + if (vf != OSAL_NULL) { + vf->ctx = ctx; +#ifdef CONFIG_ECORE_SW_CHANNEL + vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; +#endif + } else { + rc = ECORE_UNKNOWN_ERROR; + } + return rc; +} +#endif + +static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 abs_vfid) +{ + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, + 1 << (abs_vfid & 0x1f)); +} + +static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + int i; + + /* Set VF masks and configuration - pretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf->num_sbs; i++) + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, true); +} + +static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, bool enable) +{ + u32 igu_vf_conf; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); + + if (enable) + igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; + else + igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; + + ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); +} + +static enum _ecore_status_t +ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 abs_vf_id, + u8 num_sbs) +{ + u8 current_max = 0; + int i; + + /* If client overrides this, don't do anything */ + if (p_hwfn->p_dev->b_dont_override_vf_msix) + return ECORE_SUCCESS; + + /* For AH onward, configuration is per-PF. Find maximum of all + * the currently enabled child VFs, and set the number to be that. + */ + if (!ECORE_IS_BB(p_hwfn->p_dev)) { + ecore_for_each_vf(p_hwfn, i) { + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true); + if (!p_vf) + continue; + + current_max = OSAL_MAX_T(u8, current_max, + p_vf->num_sbs); + } + } + + if (num_sbs > current_max) + return ecore_mcp_config_vf_msix(p_hwfn, p_ptt, + abs_vf_id, num_sbs); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_vf_info *vf) +{ + u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* It's possible VF was previously considered malicious - + * clear the indication even if we're only going to disable VF. + */ + vf->b_malicious = false; + + if (vf->to_disable) + return ECORE_SUCCESS; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id, + ECORE_VF_ABS_ID(p_hwfn, vf)); + + ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt, + ECORE_VF_ABS_ID(p_hwfn, vf)); + + ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt, + vf->abs_vf_id, vf->num_sbs); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); + STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); + + ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, + p_hwfn->hw_info.hw_mode); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + vf->state = VF_FREE; + + return rc; +} + +/** + * + * @brief ecore_iov_config_perm_table - configure the permission + * zone table. + * The queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} + * @param p_hwfn + * @param p_ptt + * @param vf + * @param enable + */ +static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, u8 enable) +{ + u32 reg_addr, val; + u16 qzone_id = 0; + int qid; + + for (qid = 0; qid < vf->num_rxqs; qid++) { + ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, + &qzone_id); + + reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; + val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + ecore_wr(p_hwfn, p_ptt, reg_addr, val); + } +} + +static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + /* Reset vf in IGU - interrupts are still disabled */ + ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); + + /* Permission Table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true); +} + +static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, + u16 num_rx_queues) +{ + struct ecore_igu_block *p_block; + struct cau_sb_entry sb_entry; + int qid = 0; + u32 val = 0; + + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = + (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); + + for (qid = 0; qid < num_rx_queues; qid++) { + p_block = ecore_get_igu_free_sb(p_hwfn, false); + if (!p_block) + continue; + + vf->igu_sbs[qid] = p_block->igu_sb_id; + p_block->status &= ~ECORE_IGU_STATUS_FREE; + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + ecore_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * p_block->igu_sb_id, val); + + /* Configure igu sb in CAU which were marked valid */ + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, + vf->abs_vf_id, 1); + + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + p_block->igu_sb_id * sizeof(u64), 2, + OSAL_NULL /* default parameters */); + } + + vf->num_sbs = (u8)num_rx_queues; + + return vf->num_sbs; +} + +/** + * + * @brief The function invalidates all the VF entries, + * technically this isn't required, but added for + * cleaness and ease of debugging incase a VF attempts to + * produce an interrupt after it has been taken down. + * + * @param p_hwfn + * @param p_ptt + * @param vf + */ +static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + int idx, igu_id; + u32 addr, val; + + /* Invalidate igu CAM lines and mark them as free */ + for (idx = 0; idx < vf->num_sbs; idx++) { + igu_id = vf->igu_sbs[idx]; + addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; + + val = ecore_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + ecore_wr(p_hwfn, p_ptt, addr, val); + + p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; + } + + vf->num_sbs = 0; +} + +void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps) +{ + struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); + struct ecore_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + +#ifndef ASIC_ONLY +static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + /* Increase the maximum number of DORQ FIFO entries used by child VFs */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec); +} +#endif + +enum _ecore_status_t +ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_iov_vf_init_params *p_params) +{ + struct ecore_mcp_link_capabilities link_caps; + struct ecore_mcp_link_params link_params; + struct ecore_mcp_link_state link_state; + u8 num_of_vf_available_chains = 0; + struct ecore_vf_info *vf = OSAL_NULL; + u16 qid, num_irqs; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 cids; + u8 i; + + vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); + if (!vf) { + DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n"); + return ECORE_UNKNOWN_ERROR; + } + + if (vf->b_init) { + DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n", + p_params->rel_vf_id); + return ECORE_INVAL; + } + + /* Perform sanity checking on the requested vport/rss */ + if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { + DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n", + p_params->rel_vf_id, p_params->vport_id); + return ECORE_INVAL; + } + + if ((p_params->num_queues > 1) && + (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) { + DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n", + p_params->rel_vf_id, p_params->rss_eng_id); + return ECORE_INVAL; + } + + /* TODO - remove this once we get confidence of change */ + if (!p_params->vport_id) { + DP_NOTICE(p_hwfn, false, + "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n", + p_params->rel_vf_id); + } + if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) { + DP_NOTICE(p_hwfn, false, + "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n", + p_params->rel_vf_id); + } + vf->vport_id = p_params->vport_id; + vf->rss_eng_id = p_params->rss_eng_id; + + /* Since it's possible to relocate SBs, it's a bit difficult to check + * things here. Simply check whether the index falls in the range + * belonging to the PF. + */ + for (i = 0; i < p_params->num_queues; i++) { + qid = p_params->req_rx_queue[i]; + if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { + DP_NOTICE(p_hwfn, true, + "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n", + qid, p_params->rel_vf_id, + (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); + return ECORE_INVAL; + } + + qid = p_params->req_tx_queue[i]; + if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { + DP_NOTICE(p_hwfn, true, + "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n", + qid, p_params->rel_vf_id, + (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); + return ECORE_INVAL; + } + } + + /* Limit number of queues according to number of CIDs */ + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - requesting to initialize for 0x%04x queues" + " [0x%04x CIDs available]\n", + vf->relative_vf_id, p_params->num_queues, (u16)cids); + num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids)); + + num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn, + p_ptt, + vf, + num_irqs); + if (num_of_vf_available_chains == 0) { + DP_ERR(p_hwfn, "no available igu sbs\n"); + return ECORE_NOMEM; + } + + /* Choose queue number and index ranges */ + vf->num_rxqs = num_of_vf_available_chains; + vf->num_txqs = num_of_vf_available_chains; + + for (i = 0; i < vf->num_rxqs; i++) { + struct ecore_vf_queue *p_queue = &vf->vf_queues[i]; + + p_queue->fw_rx_qid = p_params->req_rx_queue[i]; + p_queue->fw_tx_qid = p_params->req_tx_queue[i]; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", + vf->relative_vf_id, i, vf->igu_sbs[i], + p_queue->fw_rx_qid, p_queue->fw_tx_qid); + } + + /* Update the link configuration in bulletin. + */ + OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn), + sizeof(link_params)); + OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn), + sizeof(link_state)); + OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn), + sizeof(link_caps)); + ecore_iov_set_link(p_hwfn, p_params->rel_vf_id, + &link_params, &link_state, &link_caps); + + rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf); + if (rc != ECORE_SUCCESS) + return rc; + + vf->b_init = true; +#ifndef REMOVE_DBG + p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |= + (1ULL << (vf->relative_vf_id % 64)); +#endif + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->p_dev->p_iov_info->num_vfs++; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt); +#endif + + return ECORE_SUCCESS; + } + +#ifndef ASIC_ONLY +static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + if (!ecore_mcp_is_init(p_hwfn)) { + u32 sriov_dis = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_SR_IOV_DISABLED_REQUEST); + + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR, + sriov_dis); +} +} +#endif + +enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id) +{ + struct ecore_mcp_link_capabilities caps; + struct ecore_mcp_link_params params; + struct ecore_mcp_link_state link; + struct ecore_vf_info *vf = OSAL_NULL; + + vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!vf) { + DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n"); + return ECORE_UNKNOWN_ERROR; + } + + if (vf->bulletin.p_virt) + OSAL_MEMSET(vf->bulletin.p_virt, 0, + sizeof(*vf->bulletin.p_virt)); + + OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); + + /* Get the link configuration back in bulletin so + * that when VFs are re-enabled they get the actual + * link configuration. + */ + OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); + OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); + OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn), + sizeof(caps)); + ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); + + /* Forget the VF's acquisition message */ + OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire)); + + /* disablng interrupts and resetting permission table was done during + * vf-close, however, we could get here without going through vf_close + */ + /* Disable Interrupts for VF */ + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + vf->num_rxqs = 0; + vf->num_txqs = 0; + ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); + + if (vf->b_init) { + vf->b_init = false; + p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &= + ~(1ULL << (vf->relative_vf_id / 64)); + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->p_dev->p_iov_info->num_vfs--; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt); +#endif + + return ECORE_SUCCESS; +} + +static bool ecore_iov_tlv_supported(u16 tlvtype) +{ + return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX; +} + +static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, u16 tlv) +{ + /* lock the channel */ + /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */ + + /* record the locking op */ + /* vf->op_current = tlv; @@@TBD MichalK */ + + /* log the lock */ + if (ecore_iov_tlv_supported(tlv)) + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel locked by %s\n", + vf->abs_vf_id, + qede_ecore_channel_tlvs_string[tlv]); + else + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel locked by %04x\n", + vf->abs_vf_id, tlv); +} + +static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 expected_tlv) +{ + /* log the unlock */ + if (ecore_iov_tlv_supported(expected_tlv)) + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel unlocked by %s\n", + vf->abs_vf_id, + qede_ecore_channel_tlvs_string[expected_tlv]); + else + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel unlocked by %04x\n", + vf->abs_vf_id, expected_tlv); + + /* record the locking op */ + /* vf->op_current = CHANNEL_TLV_NONE; */ +} + +/* place a given tlv on the tlv buffer, continuing current tlv list */ +void *ecore_add_tlv(u8 **offset, u16 type, u16 length) +{ + struct channel_tlv *tl = (struct channel_tlv *)*offset; + + tl->type = type; + tl->length = length; + + /* Offset should keep pointing to next TLV (the end of the last) */ + *offset += length; + + /* Return a pointer to the start of the added tlv */ + return *offset - length; +} + +/* list the types and lengths of the tlvs on the buffer */ +void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) +{ + u16 i = 1, total_length = 0; + struct channel_tlv *tlv; + + do { + /* cast current tlv list entry to channel tlv header */ + tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); + + /* output tlv */ + if (ecore_iov_tlv_supported(tlv->type)) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "TLV number %d: type %s, length %d\n", + i, qede_ecore_channel_tlvs_string[tlv->type], + tlv->length); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "TLV number %d: type %d, length %d\n", + i, tlv->type, tlv->length); + + if (tlv->type == CHANNEL_TLV_LIST_END) + return; + + /* Validate entry - protect against malicious VFs */ + if (!tlv->length) { + DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n"); + return; + } + total_length += tlv->length; + if (total_length >= sizeof(struct tlv_buffer_size)) { + DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n"); + return; + } + + i++; + } while (1); +} + +static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, +#ifdef CONFIG_ECORE_SW_CHANNEL + u16 length, +#else + u16 OSAL_UNUSED length, +#endif + u8 status) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct dmae_params params; + u8 eng_vf_id; + + mbx->reply_virt->default_resp.hdr.status = status; + + ecore_dp_tlv_list(p_hwfn, mbx->reply_virt); + +#ifdef CONFIG_ECORE_SW_CHANNEL + mbx->sw_mbx.response_size = + length + sizeof(struct channel_list_end_tlv); + + if (!p_vf->b_hw_channel) + return; +#endif + + eng_vf_id = p_vf->abs_vf_id; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1); + params.dst_vf_id = eng_vf_id; + + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), + mbx->req_virt->first_tlv.reply_address + + sizeof(u64), + (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, + ¶ms); + + /* Once PF copies the rc to the VF, the latter can continue and + * and send an additional message. So we have to make sure the + * channel would be re-set to ready prior to that. + */ + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); + + OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status); +} + +static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag) +{ + switch (flag) { + case ECORE_IOV_VP_UPDATE_ACTIVATE: + return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case ECORE_IOV_VP_UPDATE_VLAN_STRIP: + return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + case ECORE_IOV_VP_UPDATE_TX_SWITCH: + return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + case ECORE_IOV_VP_UPDATE_MCAST: + return CHANNEL_TLV_VPORT_UPDATE_MCAST; + case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + case ECORE_IOV_VP_UPDATE_RSS: + return CHANNEL_TLV_VPORT_UPDATE_RSS; + case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + case ECORE_IOV_VP_UPDATE_SGE_TPA: + return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + default: + return 0; + } +} + +static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_iov_vf_mbx *p_mbx, + u8 status, u16 tlvs_mask, + u16 tlvs_accepted) +{ + struct pfvf_def_resp_tlv *resp; + u16 size, total_len, i; + + OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); + p_mbx->offset = (u8 *)p_mbx->reply_virt; + size = sizeof(struct pfvf_def_resp_tlv); + total_len = size; + + ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + + /* Prepare response for all extended tlvs if they are found by PF */ + for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) { + if (!(tlvs_mask & (1 << i))) + continue; + + resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i), + size); + + if (tlvs_accepted & (1 << i)) + resp->hdr.status = status; + else + resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - vport_update resp: TLV %d, status %02x\n", + p_vf->relative_vf_id, + ecore_iov_vport_to_tlv(i), + resp->hdr.status); + + total_len += size; + } + + ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + return total_len; +} + +static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf_info, + u16 type, u16 length, u8 status) +{ + struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx; + + mbx->offset = (u8 *)mbx->reply_virt; + + ecore_add_tlv(&mbx->offset, type, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); +} + +struct ecore_public_vf_info +*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct ecore_vf_info *vf = OSAL_NULL; + + vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); + if (!vf) + return OSAL_NULL; + + return &vf->p_vf_info; +} + +static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf) +{ + u32 i, j; + p_vf->vf_bulletin = 0; + p_vf->vport_instance = 0; + p_vf->configured_features = 0; + + /* If VF previously requested less resources, go back to default */ + p_vf->num_rxqs = p_vf->num_sbs; + p_vf->num_txqs = p_vf->num_sbs; + + p_vf->num_active_rxqs = 0; + + for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) { + struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i]; + + for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { + if (!p_queue->cids[j].p_cid) + continue; + + ecore_eth_queue_cid_release(p_hwfn, + p_queue->cids[j].p_cid); + p_queue->cids[j].p_cid = OSAL_NULL; + } + } + + OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire)); + OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id); +} + +/* Returns either 0, or log(size) */ +static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); + + if (val) + return val + 11; + return 0; +} + +static void +ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; + u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) - + DB_ADDR_VF(0, DQ_DEMS_LEGACY); + u32 bar_size; + + p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons); + + /* If VF didn't bother asking for QIDs than don't bother limiting + * number of CIDs. The VF doesn't care about the number, and this + * has the likely result of causing an additional acquisition. + */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount + * that would make sure doorbells for all CIDs fall within the bar. + * If it doesn't, make sure regview window is sufficient. + */ + if (p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { + bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt); + if (bar_size) + bar_size = 1 << bar_size; + + if (ECORE_IS_CMT(p_hwfn->p_dev)) + bar_size /= 2; + } else { + bar_size = PXP_VF_BAR0_DQ_LENGTH; + } + + if (bar_size / db_size < 256) + p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids, + (u8)(bar_size / db_size)); +} + +static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 i; + + /* Queue related information */ + p_resp->num_rxqs = p_vf->num_rxqs; + p_resp->num_txqs = p_vf->num_txqs; + p_resp->num_sbs = p_vf->num_sbs; + + for (i = 0; i < p_resp->num_sbs; i++) { + p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; + /* TODO - what's this sb_qid field? Is it deprecated? + * or is there an ecore_client that looks at this? + */ + p_resp->hw_sbs[i].sb_qid = 0; + } + + /* These fields are filled for backward compatibility. + * Unused by modern vfs. + */ + for (i = 0; i < p_resp->num_rxqs; i++) { + ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, + (u16 *)&p_resp->hw_qid[i]); + p_resp->cid[i] = i; + } + + /* Filter related information */ + p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters, + p_req->num_mac_filters); + p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters, + p_req->num_vlan_filters); + + ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); + + /* This isn't really needed/enforced, but some legacy VFs might depend + * on the correct filling of this field. + */ + p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS; + + /* Validate sufficient resources for VF */ + if (p_resp->num_rxqs < p_req->num_rxqs || + p_resp->num_txqs < p_req->num_txqs || + p_resp->num_sbs < p_req->num_sbs || + p_resp->num_mac_filters < p_req->num_mac_filters || + p_resp->num_vlan_filters < p_req->num_vlan_filters || + p_resp->num_mc_filters < p_req->num_mc_filters || + p_resp->num_cids < p_req->num_cids) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", + p_vf->abs_vf_id, + p_req->num_rxqs, p_resp->num_rxqs, + p_req->num_rxqs, p_resp->num_txqs, + p_req->num_sbs, p_resp->num_sbs, + p_req->num_mac_filters, p_resp->num_mac_filters, + p_req->num_vlan_filters, p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); + + /* Some legacy OSes are incapable of correctly handling this + * failure. + */ + if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) && + (p_vf->acquire.vfdev_info.os_type == + VFPF_ACQUIRE_OS_WINDOWS)) + return PFVF_STATUS_SUCCESS; + + return PFVF_STATUS_NO_RESOURCE; + } + + return PFVF_STATUS_SUCCESS; +} + +static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats) +{ + p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + + OFFSETOF(struct mstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + + OFFSETOF(struct ustorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + + OFFSETOF(struct pstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + p_stats->tstats.address = 0; + p_stats->tstats.len = 0; +} + +static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; + u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + struct pf_vf_resc *resc = &resp->resc; + enum _ecore_status_t rc; + + OSAL_MEMSET(resp, 0, sizeof(*resp)); + + /* Write the PF version so that VF would know which version + * is supported - might be later overridden. This guarantees that + * VF could recognize legacy PF based on lack of versions in reply. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + + /* TODO - not doing anything is bad since we'll assert, but this isn't + * necessarily the right behavior - perhaps we should have allowed some + * versatility here. + */ + if (vf->state != VF_FREE && + vf->state != VF_STOPPED) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", + vf->abs_vf_id, vf->state); + goto out; + } + + /* Validate FW compatibility */ + if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; + + /* This legacy support would need to be removed once + * the major has changed. + */ + OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] is pre-fastpath HSI\n", + vf->abs_vf_id); + p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; + } else { + DP_INFO(p_hwfn, + "VF[%d] needs fastpath HSI %02x.%02x, which is" + " incompatible with loaded FW's faspath" + " HSI %02x.%02x\n", + vf->abs_vf_id, + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + goto out; + } + } + + /* On 100g PFs, prevent old VFs from loading */ + if (ECORE_IS_CMT(p_hwfn->p_dev) && + !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { + DP_INFO(p_hwfn, + "VF[%d] is running an old driver that doesn't support" + " 100g\n", + vf->abs_vf_id); + goto out; + } + +#ifndef __EXTRACT__LINUX__ + if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) { + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } +#endif + + /* Store the acquire message */ + OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire)); + + vf->opaque_fid = req->vfdev_info.opaque_fid; + + vf->vf_bulletin = req->bulletin_addr; + vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? + vf->bulletin.size : req->bulletin_size; + + /* fill in pfdev info */ + pfdev_info->chip_num = p_hwfn->p_dev->chip_num; + pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */ + pfdev_info->indices_per_sb = PIS_PER_SB; + + pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; + if (ECORE_IS_CMT(p_hwfn->p_dev)) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + + /* Share our ability to use multiple queue-ids only with VFs + * that request it. + */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; + + /* Share the sizes of the bars with VF */ + resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn, + p_ptt); + + ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info); + + OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, + ETH_ALEN); + + pfdev_info->fw_major = FW_MAJOR_VERSION; + pfdev_info->fw_minor = FW_MINOR_VERSION; + pfdev_info->fw_rev = FW_REVISION_VERSION; + pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + + /* Incorrect when legacy, but doesn't matter as legacy isn't reading + * this field. + */ + pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR, + req->vfdev_info.eth_fp_hsi_minor); + pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE(); + ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, + OSAL_NULL); + + pfdev_info->dev_type = p_hwfn->p_dev->type; + pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev; + + /* Fill resources available to VF; Make sure there are enough to + * satisfy the VF's request. + */ + vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, + &req->resc_request, resc); + if (vfpf_status != PFVF_STATUS_SUCCESS) + goto out; + + /* Start the VF in FW */ + rc = ecore_sp_vf_start(p_hwfn, vf); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n", + vf->abs_vf_id); + vfpf_status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Fill agreed size of bulletin board in response, and post + * an initial image to the bulletin board. + */ + resp->bulletin_size = vf->bulletin.size; + ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x," + " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d," + " n_vlans-%d\n", + vf->abs_vf_id, resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb, + (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs, + resc->num_txqs, resc->num_sbs, resc->num_mac_filters, + resc->num_vlan_filters); + + vf->state = VF_ACQUIRED; + +out: + /* Prepare Response */ + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, + sizeof(struct pfvf_acquire_resp_tlv), + vfpf_status); +} + +static enum _ecore_status_t +__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, bool val) +{ + struct ecore_sp_vport_update_params params; + enum _ecore_status_t rc; + + if (val == p_vf->spoof_chk) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk value[%d] is already configured\n", val); + return ECORE_SUCCESS; + } + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.update_anti_spoofing_en_flg = 1; + params.anti_spoofing_en = val; + + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc == ECORE_SUCCESS) { + p_vf->spoof_chk = val; + p_vf->req_spoofchk_val = p_vf->spoof_chk; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk val[%d] configured\n", val); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk configuration[val:%d] failed for VF[%d]\n", + val, p_vf->relative_vf_id); + } + + return rc; +} + +static enum _ecore_status_t +ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf) +{ + struct ecore_filter_ucast filter; + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.opcode = ECORE_FILTER_ADD; + + /* Reconfigure vlans */ + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (!p_vf->shadow_config.vlans[i].used) + continue; + + filter.type = ECORE_FILTER_VLAN; + filter.vlan = p_vf->shadow_config.vlans[i].vid; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, ECORE_SPQ_MODE_CB, + OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to configure VLAN [%04x]" + " to VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + break; + } + } + + return rc; +} + +static enum _ecore_status_t +ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, u64 events) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /*TODO - what about MACs? */ + + if ((events & (1 << VLAN_ADDR_FORCED)) && + !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) + rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); + + return rc; +} + +static enum _ecore_status_t +ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + u64 events) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_filter_ucast filter; + + if (!p_vf->vport_instance) + return ECORE_INVAL; + + if ((events & (1 << MAC_ADDR_FORCED)) || + p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change || + p_vf->p_vf_info.is_trusted_configured) { + /* Since there's no way [currently] of removing the MAC, + * we can always assume this means we need to force it. + */ + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.type = ECORE_FILTER_MAC; + filter.opcode = ECORE_FILTER_REPLACE; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN); + + rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure MAC for VF\n"); + return rc; + } + + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change || + p_vf->p_vf_info.is_trusted_configured) + p_vf->configured_features |= + 1 << VFPF_BULLETIN_MAC_ADDR; + else + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + } + + if (events & (1 << VLAN_ADDR_FORCED)) { + struct ecore_sp_vport_update_params vport_update; + u8 removal; + int i; + + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.type = ECORE_FILTER_VLAN; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.vlan = p_vf->bulletin.p_virt->pvid; + filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE : + ECORE_FILTER_FLUSH; + + /* Send the ramrod */ + rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure VLAN for VF\n"); + return rc; + } + + /* Update the default-vlan & silent vlan stripping */ + OSAL_MEMSET(&vport_update, 0, sizeof(vport_update)); + vport_update.opaque_fid = p_vf->opaque_fid; + vport_update.vport_id = p_vf->vport_id; + vport_update.update_default_vlan_enable_flg = 1; + vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; + vport_update.update_default_vlan_flg = 1; + vport_update.default_vlan = filter.vlan; + + vport_update.update_inner_vlan_removal_flg = 1; + removal = filter.vlan ? + 1 : p_vf->shadow_config.inner_vlan_removal; + vport_update.inner_vlan_removal_flg = removal; + vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; + rc = ecore_sp_vport_update(p_hwfn, &vport_update, + ECORE_SPQ_MODE_EBLOCK, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure VF vport for vlan\n"); + return rc; + } + + /* Update all the Rx queues */ + for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) { + struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i]; + struct ecore_queue_cid *p_cid = OSAL_NULL; + + /* There can be at most 1 Rx queue on qzone. Find it */ + p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); + if (p_cid == OSAL_NULL) + continue; + + rc = ecore_sp_eth_rx_queues_update(p_hwfn, + (void **)&p_cid, + 1, 0, 1, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to send Rx update" + " fo queue[0x%04x]\n", + p_cid->rel.queue_id); + return rc; + } + } + + if (filter.vlan) + p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; + else + p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + } + + /* If forced features are terminated, we need to configure the shadow + * configuration back again. + */ + if (events) + ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); + + return rc; +} + +static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct ecore_sp_vport_start_params params; + struct vfpf_vport_start_tlv *start; + u8 status = PFVF_STATUS_SUCCESS; + struct ecore_vf_info *vf_info; + u64 *p_bitmap; + int sb_id; + enum _ecore_status_t rc; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Failed to get VF info, invalid vfid [%d]\n", + vf->relative_vf_id); + return; + } + + vf->state = VF_ENABLED; + start = &mbx->req_virt->start_vport; + + ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + + /* Initialize Status block in CAU */ + for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { + if (!start->sb_addr[sb_id]) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] did not fill the address of SB %d\n", + vf->relative_vf_id, sb_id); + break; + } + + ecore_int_cau_conf_sb(p_hwfn, p_ptt, + start->sb_addr[sb_id], + vf->igu_sbs[sb_id], + vf->abs_vf_id, 1); + } + + vf->mtu = start->mtu; + vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; + + /* Take into consideration configuration forced by hypervisor; + * If none is configured, use the supplied VF values [for old + * vfs that would still be fine, since they passed '0' as padding]. + */ + p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; + if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + u8 vf_req = start->only_untagged; + + vf_info->bulletin.p_virt->default_only_untagged = vf_req; + *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; + } + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_start_params)); + params.tpa_mode = start->tpa_mode; + params.remove_inner_vlan = start->inner_vlan_removal; + params.tx_switching = true; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Don't config VF for Tx-switching [no pVFC]\n"); + params.tx_switching = false; + } +#endif + + params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; + params.drop_ttl0 = false; + params.concrete_fid = vf->concrete_fid; + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.max_buffers_per_cqe = start->max_buffers_per_cqe; + params.mtu = vf->mtu; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; + + rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "ecore_iov_vf_mbx_start_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } else { + vf->vport_instance++; + + /* Force configuration if needed on the newly opened vport */ + ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); + OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id, + vf->vport_id, vf->opaque_fid); + __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf); + vf->vport_instance--; + vf->spoof_chk = false; + + if ((ecore_iov_validate_active_rxq(vf)) || + (ecore_iov_validate_active_txq(vf))) { + vf->b_malicious = true; + DP_NOTICE(p_hwfn, false, + "VF [%02x] - considered malicious;" + " Unable to stop RX/TX queuess\n", + vf->abs_vf_id); + status = PFVF_STATUS_MALICIOUS; + goto out; + } + + rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } + + /* Forget the configuration on the vport */ + vf->configured_features = 0; + OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config)); + +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, + u8 status, bool b_legacy) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + struct vfpf_start_rxq_tlv *req; + u16 length; + + mbx->offset = (u8 *)mbx->reply_virt; + + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + + p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response. + * The VF Rx producers are located in the vf zone. + */ + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { + req = &mbx->req_virt->start_rxq; + + p_tlv->offset = + PXP_VF_BAR0_START_MSDM_ZONE_B + + OFFSETOF(struct mstorm_vf_zone, + non_trigger.eth_rx_queue_producers) + + sizeof(struct eth_rx_prod_data) * req->rx_qid; + } + + ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, bool b_is_tx) +{ + struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Search for the qid if the VF published if its going to provide it */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { + if (b_is_tx) + return ECORE_IOV_LEGACY_QID_TX; + else + return ECORE_IOV_LEGACY_QID_RX; + } + + p_qid_tlv = (struct vfpf_qid_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + CHANNEL_TLV_QID); + if (p_qid_tlv == OSAL_NULL) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%2x]: Failed to provide qid\n", + p_vf->relative_vf_id); + + return ECORE_IOV_QID_INVALID; + } + + if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%02x]: Provided qid out-of-bounds %02x\n", + p_vf->relative_vf_id, p_qid_tlv->qid); + return ECORE_IOV_QID_INVALID; + } + + return p_qid_tlv->qid; +} + +static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_queue_start_common_params params; + struct ecore_queue_cid_vf_params vf_params; + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; + u8 qid_usage_idx, vf_legacy = 0; + struct ecore_vf_queue *p_queue; + struct vfpf_start_rxq_tlv *req; + struct ecore_queue_cid *p_cid; + struct ecore_sb_info sb_dummy; + enum _ecore_status_t rc; + + req = &mbx->req_virt->start_rxq; + + if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid, + ECORE_IOV_VALIDATE_Q_DISABLE) || + !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + + qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == ECORE_IOV_QID_INVALID) + goto out; + + p_queue = &vf->vf_queues[req->rx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = ecore_vf_calculate_legacy(vf); + + /* Acquire a new queue-cid */ + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.queue_id = (u8)p_queue->fw_rx_qid; + params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; + params.sb_idx = req->sb_index; + + OSAL_MEM_ZERO(&vf_params, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->rx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + + p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, true, &vf_params); + if (p_cid == OSAL_NULL) + goto out; + + /* The VF Rx producers are located in the vf zone. + * Legacy VFs have their producers in the queue zone, but they + * calculate the location by their own and clean them prior to this. + */ + if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD)) + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, + req->rx_qid), + 0); + + rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, + req->cqe_pbl_size); + if (rc != ECORE_SUCCESS) { + status = PFVF_STATUS_FAILURE; + ecore_eth_queue_cid_release(p_hwfn, p_cid); + } else { + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = false; + status = PFVF_STATUS_SUCCESS; + vf->num_active_rxqs++; + } + +out: + ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, + !!(vf_legacy & + ECORE_QCID_LEGACY_VF_RX_PROD)); +} + +static void +ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, + struct ecore_tunnel_info *p_tun, + u16 tunn_feature_mask) +{ + p_resp->tunn_feature_mask = tunn_feature_mask; + p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; + p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; + p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; + p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->vxlan_clss = p_tun->vxlan.tun_cls; + p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; + p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; + p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; + p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; + p_resp->geneve_udp_port = p_tun->geneve_port.port; + p_resp->vxlan_udp_port = p_tun->vxlan_port.port; +} + +static void +__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_tun, + enum ecore_tunn_mode mask, u8 tun_cls) +{ + if (p_req->tun_mode_update_mask & (1 << mask)) { + p_tun->b_update_mode = true; + + if (p_req->tunn_mode & (1 << mask)) + p_tun->b_mode_enabled = true; + } + + p_tun->tun_cls = tun_cls; +} + +static void +ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_tun, + struct ecore_tunn_update_udp_port *p_port, + enum ecore_tunn_mode mask, + u8 tun_cls, u8 update_port, u16 port) +{ + if (update_port) { + p_port->b_update_port = true; + p_port->port = port; + } + + __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); +} + +static bool +ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) +{ + bool b_update_requested = false; + + if (p_req->tun_mode_update_mask || p_req->update_tun_cls || + p_req->update_geneve_port || p_req->update_vxlan_port) + b_update_requested = true; + + return b_update_requested; +} + +static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 status = PFVF_STATUS_SUCCESS; + bool b_update_required = false; + struct ecore_tunnel_info tunn; + u16 tunn_feature_mask = 0; + int i; + + mbx->offset = (u8 *)mbx->reply_virt; + + OSAL_MEM_ZERO(&tunn, sizeof(tunn)); + p_req = &mbx->req_virt->tunn_param_update; + + if (!ecore_iov_pf_validate_tunn_param(p_req)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No tunnel update requested by VF\n"); + status = PFVF_STATUS_FAILURE; + goto send_resp; + } + + tunn.b_update_rx_cls = p_req->update_tun_cls; + tunn.b_update_tx_cls = p_req->update_tun_cls; + + ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, + ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss, + p_req->update_vxlan_port, + p_req->vxlan_port); + ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, + ECORE_MODE_L2GENEVE_TUNN, + p_req->l2geneve_clss, + p_req->update_geneve_port, + p_req->geneve_port); + __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, + ECORE_MODE_IPGENEVE_TUNN, + p_req->ipgeneve_clss); + __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre, + ECORE_MODE_L2GRE_TUNN, + p_req->l2gre_clss); + __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre, + ECORE_MODE_IPGRE_TUNN, + p_req->ipgre_clss); + + /* If PF modifies VF's req then it should + * still return an error in case of partial configuration + * or modified configuration as opposed to requested one. + */ + rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask, + &b_update_required, &tunn); + + if (rc != ECORE_SUCCESS) + status = PFVF_STATUS_FAILURE; + + /* If ECORE client is willing to update anything ? */ + if (b_update_required) { + u16 geneve_port; + + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc != ECORE_SUCCESS) + status = PFVF_STATUS_FAILURE; + + geneve_port = p_tun->geneve_port.port; + ecore_for_each_vf(p_hwfn, i) { + ecore_iov_bulletin_set_udp_ports(p_hwfn, i, + p_tun->vxlan_port.port, + geneve_port); + } + } + +send_resp: + p_resp = ecore_add_tlv(&mbx->offset, + CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); + + ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, + u32 cid, + u8 status) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + bool b_legacy = false; + u16 length; + + mbx->offset = (u8 *)mbx->reply_virt; + + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + b_legacy = true; + + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + + p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length); + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) + p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY); + + ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); +} + +static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_queue_start_common_params params; + struct ecore_queue_cid_vf_params vf_params; + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; + struct ecore_vf_queue *p_queue; + struct vfpf_start_txq_tlv *req; + struct ecore_queue_cid *p_cid; + struct ecore_sb_info sb_dummy; + u8 qid_usage_idx, vf_legacy; + u32 cid = 0; + enum _ecore_status_t rc; + u16 pq; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_txq; + + if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid, + ECORE_IOV_VALIDATE_Q_NA) || + !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + + qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == ECORE_IOV_QID_INVALID) + goto out; + + p_queue = &vf->vf_queues[req->tx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = ecore_vf_calculate_legacy(vf); + + /* Acquire a new queue-cid */ + params.queue_id = p_queue->fw_tx_qid; + params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; + params.sb_idx = req->sb_index; + + OSAL_MEM_ZERO(&vf_params, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->tx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + + p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, false, &vf_params); + if (p_cid == OSAL_NULL) + goto out; + + pq = ecore_get_cm_pq_idx_vf(p_hwfn, + vf->relative_vf_id); + rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, + req->pbl_addr, req->pbl_size, pq); + if (rc != ECORE_SUCCESS) { + status = PFVF_STATUS_FAILURE; + ecore_eth_queue_cid_release(p_hwfn, p_cid); + } else { + status = PFVF_STATUS_SUCCESS; + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = true; + cid = p_cid->cid; + } + +out: + ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, + cid, status); +} + +static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 rxq_id, + u8 qid_usage_idx, + bool cqe_completion) +{ + struct ecore_vf_queue *p_queue; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id, + ECORE_IOV_VALIDATE_Q_NA)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", + vf->relative_vf_id, rxq_id, qid_usage_idx); + return ECORE_INVAL; + } + + p_queue = &vf->vf_queues[rxq_id]; + + /* We've validated the index and the existence of the active RXQ - + * now we need to make sure that it's using the correct qid. + */ + if (!p_queue->cids[qid_usage_idx].p_cid || + p_queue->cids[qid_usage_idx].b_is_tx) { + struct ecore_queue_cid *p_cid; + + p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", + vf->relative_vf_id, rxq_id, qid_usage_idx, + rxq_id, p_cid->qid_usage_idx); + return ECORE_INVAL; + } + + /* Now that we know we have a valid Rx-queue - close it */ + rc = ecore_eth_rx_queue_stop(p_hwfn, + p_queue->cids[qid_usage_idx].p_cid, + false, cqe_completion); + if (rc != ECORE_SUCCESS) + return rc; + + p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL; + vf->num_active_rxqs--; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 txq_id, + u8 qid_usage_idx) +{ + struct ecore_vf_queue *p_queue; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id, + ECORE_IOV_VALIDATE_Q_NA)) + return ECORE_INVAL; + + p_queue = &vf->vf_queues[txq_id]; + if (!p_queue->cids[qid_usage_idx].p_cid || + !p_queue->cids[qid_usage_idx].b_is_tx) + return ECORE_INVAL; + + rc = ecore_eth_tx_queue_stop(p_hwfn, + p_queue->cids[qid_usage_idx].p_cid); + if (rc != ECORE_SUCCESS) + return rc; + + p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL; + return ECORE_SUCCESS; +} + +static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_FAILURE; + struct vfpf_stop_rxqs_tlv *req; + u8 qid_usage_idx; + enum _ecore_status_t rc; + + /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs' + * would be one. Since no older ecore passed multiple queues + * using this API, sanitize on the value. + */ + req = &mbx->req_virt->stop_rxqs; + if (req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Rx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* Find which qid-index is associated with the queue */ + qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == ECORE_IOV_QID_INVALID) + goto out; + + rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + qid_usage_idx, req->cqe_completion); + if (rc == ECORE_SUCCESS) + status = PFVF_STATUS_SUCCESS; +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, + length, status); +} + +static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_FAILURE; + struct vfpf_stop_txqs_tlv *req; + u8 qid_usage_idx; + enum _ecore_status_t rc; + + /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs' + * would be one. Since no older ecore passed multiple queues + * using this API, sanitize on the value. + */ + req = &mbx->req_virt->stop_txqs; + if (req->num_txqs != 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Tx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* Find which qid-index is associated with the queue */ + qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == ECORE_IOV_QID_INVALID) + goto out; + + rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, + qid_usage_idx); + if (rc == ECORE_SUCCESS) + status = PFVF_STATUS_SUCCESS; + +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, + length, status); +} + +static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF]; + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_rxq_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + u8 complete_event_flg; + u8 complete_cqe_flg; + u8 qid_usage_idx; + enum _ecore_status_t rc; + u16 i; + + req = &mbx->req_virt->update_rxq; + complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); + complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + + qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == ECORE_IOV_QID_INVALID) + goto out; + + /* Starting with the addition of CHANNEL_TLV_QID, this API started + * expecting a single queue at a time. Validate this. + */ + if ((vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS) && + req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] supports QIDs but sends multiple queues\n", + vf->relative_vf_id); + goto out; + } + + /* Validate inputs - for the legacy case this is still true since + * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. + */ + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { + if (!ecore_iov_validate_rxq(p_hwfn, vf, i, + ECORE_IOV_VALIDATE_Q_NA) || + !vf->vf_queues[i].cids[qid_usage_idx].p_cid || + vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, + req->num_rxqs); + goto out; + } + } + + for (i = 0; i < req->num_rxqs; i++) { + u16 qid = req->rx_qid + i; + + handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; + } + + rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, + req->num_rxqs, + complete_cqe_flg, + complete_event_flg, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc != ECORE_SUCCESS) + goto out; + + status = PFVF_STATUS_SUCCESS; +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, + length, status); +} + +static enum _ecore_status_t +ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct ecore_sp_vport_update_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct vfpf_update_mtu_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + + /* Valiate PF can send such a request */ + if (!p_vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d], failing MTU update\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto send_status; + } + + p_req = &mbx->req_virt->update_mtu; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.mtu = p_req->mtu; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; +send_status: + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_UPDATE_MTU, + sizeof(struct pfvf_def_resp_tlv), + status); + return rc; +} + +void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type) +{ + struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; + int len = 0; + + do { + if (!p_tlv->length) { + DP_NOTICE(p_hwfn, true, "Zero length TLV found\n"); + return OSAL_NULL; + } + + if (p_tlv->type == req_type) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Extended tlv type %s, length %d found\n", + qede_ecore_channel_tlvs_string[p_tlv->type], + p_tlv->length); + return p_tlv; + } + + len += p_tlv->length; + p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); + + if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { + DP_NOTICE(p_hwfn, true, + "TLVs has overrun the buffer size\n"); + return OSAL_NULL; + } + } while (p_tlv->type != CHANNEL_TLV_LIST_END); + + return OSAL_NULL; +} + +static void +ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_activate_tlv *p_act_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + + p_act_tlv = (struct vfpf_vport_update_activate_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_act_tlv) + return; + + p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; + p_data->vport_active_rx_flg = p_act_tlv->active_rx; + p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; + p_data->vport_active_tx_flg = p_act_tlv->active_tx; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE; +} + +static void +ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_vf_info *p_vf, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + + p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_vlan_tlv) + return; + + p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; + + /* Ignore the VF request if we're forcing a vlan */ + if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + } + + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP; +} + +static void +ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + + p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_tx_switch_tlv) + return; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Ignore tx-switching configuration originating" + " from VFs\n"); + return; + } +#endif + + p_data->update_tx_switching_flg = 1; + p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH; +} + +static void +ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + + p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_mcast_tlv) + return; + + p_data->update_approx_mcast_flg = 1; + OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; +} + +static void +ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags; + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + + p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_accept_tlv) + return; + + p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; + p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; + p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; + p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM; +} + +static void +ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + + p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_accept_any_vlan) + return; + + p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; + p_data->update_accept_any_vlan_flg = + p_accept_any_vlan->update_accept_any_vlan_flg; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; +} + +static void +ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + struct ecore_sp_vport_update_params *p_data, + struct ecore_rss_params *p_rss, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask, u16 *tlvs_accepted) +{ + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + bool b_reject = false; + u16 table_size; + u16 i, q_idx; + + p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_rss_tlv) { + p_data->rss_params = OSAL_NULL; + return; + } + + OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params)); + + p_rss->update_rss_config = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CONFIG_FLAG); + p_rss->update_rss_capabilities = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CAPS_FLAG); + p_rss->update_rss_ind_table = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_IND_TABLE_FLAG); + p_rss->update_rss_key = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_KEY_FLAG); + + p_rss->rss_enable = p_rss_tlv->rss_enable; + p_rss->rss_eng_id = vf->rss_eng_id; + p_rss->rss_caps = p_rss_tlv->rss_caps; + p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; + OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key, + sizeof(p_rss->rss_key)); + + table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table), + (1 << p_rss_tlv->rss_table_size_log)); + + for (i = 0; i < table_size; i++) { + struct ecore_queue_cid *p_cid; + + q_idx = p_rss_tlv->rss_ind_table[i]; + if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx, + ECORE_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Omitting RSS due to wrong queue %04x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } + + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); + p_rss->rss_ind_table[i] = p_cid; + } + + p_data->rss_params = p_rss; +out: + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS; + if (!b_reject) + *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS; +} + +static void +ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_sge_tpa_params *p_sge_tpa, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + + p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (!p_sge_tpa_tlv) { + p_data->sge_tpa_params = OSAL_NULL; + return; + } + + OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params)); + + p_sge_tpa->update_tpa_en_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); + p_sge_tpa->update_tpa_param_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & + VFPF_UPDATE_TPA_PARAM_FLAG); + + p_sge_tpa->tpa_ipv4_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); + p_sge_tpa->tpa_ipv6_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); + p_sge_tpa->tpa_pkt_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); + p_sge_tpa->tpa_hdr_data_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); + p_sge_tpa->tpa_gro_consistent_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); + + p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; + p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; + p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; + p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; + p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; + + p_data->sge_tpa_params = p_sge_tpa; + + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA; +} + +static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_rss_params *p_rss_params = OSAL_NULL; + struct ecore_sp_vport_update_params params; + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct ecore_sge_tpa_params sge_tpa_params; + u16 tlvs_mask = 0, tlvs_accepted = 0; + u8 status = PFVF_STATUS_SUCCESS; + u16 length; + enum _ecore_status_t rc; + + /* Valiate PF can send such a request */ + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d]," + " failing vport update\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params)); + if (p_rss_params == OSAL_NULL) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.rss_params = OSAL_NULL; + + /* Search for extended tlvs list and update values + * from VF in struct ecore_sp_vport_update_params. + */ + ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); + ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms, + &sge_tpa_params, mbx, &tlvs_mask); + + tlvs_accepted = tlvs_mask; + + /* Some of the extended TLVs need to be validated first; In that case, + * they can update the mask without updating the accepted [so that + * PF could communicate to VF it has rejected request]. + */ + ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, + mbx, &tlvs_mask, &tlvs_accepted); + + /* Just log a message if there is no single extended tlv in buffer. + * When all features of vport update ramrod would be requested by VF + * as extended TLVs in buffer then an error can be returned in response + * if there is no extended TLV present in buffer. + */ + if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted) != + ECORE_SUCCESS) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Upper-layer prevents said VF" + " configuration\n"); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No feature tlvs found for vport update\n"); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + OSAL_VFREE(p_hwfn->p_dev, p_rss_params); + length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, + tlvs_mask, tlvs_accepted); + ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static enum _ecore_status_t +ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_filter_ucast *p_params) +{ + int i; + + /* First remove entries and then add new ones */ + if (p_params->opcode == ECORE_FILTER_REMOVE) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (p_vf->shadow_config.vlans[i].used && + p_vf->shadow_config.vlans[i].vid == + p_params->vlan) { + p_vf->shadow_config.vlans[i].used = false; + break; + } + if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%d] - Tries to remove a non-existing" + " vlan\n", + p_vf->relative_vf_id); + return ECORE_INVAL; + } + } else if (p_params->opcode == ECORE_FILTER_REPLACE || + p_params->opcode == ECORE_FILTER_FLUSH) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + p_vf->shadow_config.vlans[i].used = false; + } + + /* In forced mode, we're willing to remove entries - but we don't add + * new ones. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + return ECORE_SUCCESS; + + if (p_params->opcode == ECORE_FILTER_ADD || + p_params->opcode == ECORE_FILTER_REPLACE) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (p_vf->shadow_config.vlans[i].used) + continue; + + p_vf->shadow_config.vlans[i].used = true; + p_vf->shadow_config.vlans[i].vid = p_params->vlan; + break; + } + + if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%d] - Tries to configure more than %d" + " vlan filters\n", + p_vf->relative_vf_id, + ECORE_ETH_VF_NUM_VLAN_FILTERS + 1); + return ECORE_INVAL; + } + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_filter_ucast *p_params) +{ + char empty_mac[ETH_ALEN]; + int i; + + OSAL_MEM_ZERO(empty_mac, ETH_ALEN); + + /* If we're in forced-mode, we don't allow any change */ + /* TODO - this would change if we were ever to implement logic for + * removing a forced MAC altogether [in which case, like for vlans, + * we should be able to re-trace previous configuration. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) + return ECORE_SUCCESS; + + /* Since we don't have the implementation of the logic for removing + * a forced MAC and restoring shadow MAC, let's not worry about + * processing shadow copies of MAC as long as VF trust mode is ON, + * to keep things simple. + */ + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change || + p_vf->p_vf_info.is_trusted_configured) + return ECORE_SUCCESS; + + /* First remove entries and then add new ones */ + if (p_params->opcode == ECORE_FILTER_REMOVE) { + for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) { + if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i], + p_params->mac, ETH_ALEN)) { + OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], + ETH_ALEN); + break; + } + } + + if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "MAC isn't configured\n"); + return ECORE_INVAL; + } + } else if (p_params->opcode == ECORE_FILTER_REPLACE || + p_params->opcode == ECORE_FILTER_FLUSH) { + for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) + OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN); + } + + /* List the new MAC address */ + if (p_params->opcode != ECORE_FILTER_ADD && + p_params->opcode != ECORE_FILTER_REPLACE) + return ECORE_SUCCESS; + + for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) { + if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i], + empty_mac, ETH_ALEN)) { + OSAL_MEMCPY(p_vf->shadow_config.macs[i], + p_params->mac, ETH_ALEN); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Added MAC at %d entry in shadow\n", i); + break; + } + } + + if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No available place for MAC\n"); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_filter_ucast *p_params) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (p_params->type == ECORE_FILTER_MAC) { + rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); + if (rc != ECORE_SUCCESS) + return rc; + } + + if (p_params->type == ECORE_FILTER_VLAN) + rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); + + return rc; +} + +static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt; + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_ucast_filter_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + struct ecore_filter_ucast params; + enum _ecore_status_t rc; + + /* Prepare the unicast filter params */ + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast)); + req = &mbx->req_virt->ucast_filter; + params.opcode = (enum ecore_filter_opcode)req->opcode; + params.type = (enum ecore_filter_ucast_type)req->type; + + /* @@@TBD - We might need logic on HV side in determining this */ + params.is_rx_filter = 1; + params.is_tx_filter = 1; + params.vport_to_remove_from = vf->vport_id; + params.vport_to_add_to = vf->vport_id; + OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN); + params.vlan = req->vlan; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]" + " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", + vf->abs_vf_id, params.opcode, params.type, + params.is_rx_filter ? "RX" : "", + params.is_tx_filter ? "TX" : "", + params.vport_to_add_to, + params.mac[0], params.mac[1], params.mac[2], + params.mac[3], params.mac[4], params.mac[5], params.vlan); + + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d]," + " failing ucast MAC configuration\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Update shadow copy of the VF configuration. In case shadow indicates + * the action should be blocked return success to VF to imitate the + * firmware behaviour in such case. + */ + if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) != + ECORE_SUCCESS) + goto out; + + /* Determine if the unicast filtering is acceptible by PF */ + if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + (params.type == ECORE_FILTER_VLAN || + params.type == ECORE_FILTER_MAC_VLAN)) { + /* Once VLAN is forced or PVID is set, do not allow + * to add/replace any further VLANs. + */ + if (params.opcode == ECORE_FILTER_ADD || + params.opcode == ECORE_FILTER_REPLACE) + status = PFVF_STATUS_FORCED; + goto out; + } + + if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + (params.type == ECORE_FILTER_MAC || + params.type == ECORE_FILTER_MAC_VLAN)) { + if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) || + (params.opcode != ECORE_FILTER_ADD && + params.opcode != ECORE_FILTER_REPLACE)) + status = PFVF_STATUS_FORCED; + goto out; + } + + rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms); + if (rc == ECORE_EXISTS) { + goto out; + } else if (rc == ECORE_INVAL) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + int i; + + /* Reset the SBs */ + for (i = 0; i < vf->num_sbs; i++) + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, false); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + + /* Disable Interrupts for VF */ + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, + length, status); +} + +static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc = ECORE_SUCCESS; + + ecore_iov_vf_cleanup(p_hwfn, p_vf); + + if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { + /* Stopping the VF */ + rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid, + p_vf->opaque_fid); + + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + p_vf->state = VF_STOPPED; + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, + length, status); +} + +static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct ecore_vf_queue *p_queue; + struct ecore_queue_cid *p_cid; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 coal = 0, qid, i; + bool b_is_rx; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc != ECORE_SUCCESS) + goto send_resp; + } else { + if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((p_queue->cids[i].p_cid == OSAL_NULL) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, + p_cid, &coal); + if (rc != ECORE_SUCCESS) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct vfpf_update_coalesce *req; + u8 status = PFVF_STATUS_FAILURE; + struct ecore_queue_cid *p_cid; + u16 rx_coal, tx_coal; + u16 qid; + u32 i; + + req = &mbx->req_virt->update_coalesce; + + rx_coal = req->rx_coal; + tx_coal = req->tx_coal; + qid = req->qid; + + if (!ecore_iov_validate_rxq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + rx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!ecore_iov_validate_txq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + tx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + /* TODO - in future, it might be possible to pass this in a per-cid + * granularity. For now, do this for all Tx queues. + */ + if (tx_coal) { + struct ecore_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid == OSAL_NULL) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + + status = PFVF_STATUS_SUCCESS; +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(struct pfvf_def_resp_tlv), status); +} + +enum _ecore_status_t +ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, + u16 vf_id, u16 qid) +{ + struct ecore_queue_cid *p_cid; + struct ecore_vf_info *vf; + struct ecore_ptt *p_ptt; + int rc = 0; + u32 i; + + if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) { + DP_NOTICE(p_hwfn, true, + "VF[%d] - Can not set coalescing: VF is not active\n", + vf_id); + return ECORE_INVAL; + } + + vf = &p_hwfn->pf_iov_info->vfs_array[vf_id]; + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + + if (!ecore_iov_validate_rxq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + rx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!ecore_iov_validate_txq(p_hwfn, vf, qid, + ECORE_IOV_VALIDATE_Q_ENABLE) && + tx_coal) { + DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + /* TODO - in future, it might be possible to pass this in a per-cid + * granularity. For now, do this for all Tx queues. + */ + if (tx_coal) { + struct ecore_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid == OSAL_NULL) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + if (rc != ECORE_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + +out: + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +static enum _ecore_status_t +ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) +{ + int cnt; + u32 val; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); + + for (cnt = 0; cnt < 50; cnt++) { + val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); + if (!val) + break; + OSAL_MSLEEP(20); + } + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", + p_vf->abs_vf_id, val); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) + +static enum _ecore_status_t +ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) +{ + u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; + u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; + u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine; + u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; + u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; + u8 port_id, tc, tc_id = 0, voq = 0; + int cnt; + + /* Read initial consumers & producers */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : + PURE_LB_TC; + voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); + cons[voq] = ecore_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + prod = ecore_rd(p_hwfn, p_ptt, + prod_voq0_addr + voq * 0x40); + distance[voq] = prod - cons[voq]; + } + } + + /* Wait for consumers to pass the producers */ + port_id = 0; + tc = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : + PURE_LB_TC; + voq = VOQ(port_id, tc_id, + max_phys_tcs_per_port); + tmp = ecore_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + if (distance[voq] > tmp - cons[voq]) + break; + } + + if (tc == max_phys_tcs_per_port + 1) + tc = 0; + else + break; + } + + if (port_id == max_ports_per_engine) + break; + + OSAL_MSLEEP(20); + } + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n", + p_vf->abs_vf_id, voq, port_id, tc_id); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc; + + /* TODO - add SRC and TM polling once we add storage IOV */ + + rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id, u32 *ack_vfs) +{ + struct ecore_vf_info *p_vf; + enum _ecore_status_t rc = ECORE_SUCCESS; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!p_vf) + return ECORE_SUCCESS; + + if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))) { + u16 vfid = p_vf->abs_vf_id; + + /* TODO - should we lock channel? */ + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Handling FLR\n", vfid); + + ecore_iov_vf_cleanup(p_hwfn, p_vf); + + /* If VF isn't active, no need for anything but SW */ + if (!p_vf->b_init) + goto cleanup; + + /* TODO - what to do in case of failure? */ + rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); + if (rc != ECORE_SUCCESS) + goto cleanup; + + rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true); + if (rc) { + /* TODO - what's now? What a mess.... */ + DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); + return rc; + } + + /* Workaround to make VF-PF channel ready, as FW + * doesn't do that as a part of FLR. + */ + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + + /* VF_STOPPED has to be set only after final cleanup + * but prior to re-enabling the VF. + */ + p_vf->state = VF_STOPPED; + + rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + /* TODO - again, a mess... */ + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + vfid); + return rc; + } +cleanup: + /* Mark VF for ack and clean pending state */ + if (p_vf->state == VF_RESET) + p_vf->state = VF_STOPPED; + ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + p_vf->vf_mbx.b_pending_msg = false; + } + + return rc; +} + +enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 i; + + OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES); + + /* Since BRB <-> PRS interface can't be tested as part of the flr + * polling due to HW limitations, simply sleep a bit. And since + * there's no need to wait per-vf, do it before looping. + */ + OSAL_MSLEEP(100); + + for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) + ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); + + rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +enum _ecore_status_t +ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 rel_vf_id) +{ + u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES); + + /* Wait instead of polling the BRB <-> PRS interface */ + OSAL_MSLEEP(100); + + ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs); + + rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs) +{ + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n"); + + for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "[%08x,...,%08x]: %08x\n", + i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); + + if (!p_hwfn->p_dev->p_iov_info) { + DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n"); + return false; + } + + /* Mark VFs */ + for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { + struct ecore_vf_info *p_vf; + u8 vfid; + + p_vf = ecore_iov_get_vf_info(p_hwfn, i, false); + if (!p_vf) + continue; + + vfid = p_vf->abs_vf_id; + if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; + u16 rel_vf_id = p_vf->relative_vf_id; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] [rel %d] got FLR-ed\n", + vfid, rel_vf_id); + + p_vf->state = VF_RESET; + + /* No need to lock here, since pending_flr should + * only change here and before ACKing MFw. Since + * MFW will not trigger an additional attention for + * VF flr until ACKs, we're safe. + */ + p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); + found = true; + } + } + + return found; +} + +void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *p_params, + struct ecore_mcp_link_state *p_link, + struct ecore_mcp_link_capabilities *p_caps) +{ + struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); + struct ecore_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + + if (p_params) + __ecore_vf_get_link_params(p_params, p_bulletin); + if (p_link) + __ecore_vf_get_link_state(p_link, p_bulletin); + if (p_caps) + __ecore_vf_get_link_caps(p_caps, p_bulletin); +} + +void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, int vfid) +{ + struct ecore_iov_vf_mbx *mbx; + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return; + + mbx = &p_vf->vf_mbx; + + /* ecore_iov_process_mbx_request */ +#ifndef CONFIG_ECORE_SW_CHANNEL + if (!mbx->b_pending_msg) { + DP_NOTICE(p_hwfn, true, + "VF[%02x]: Trying to process mailbox message when none is pending\n", + p_vf->abs_vf_id); + return; + } + mbx->b_pending_msg = false; +#endif + + mbx->first_tlv = mbx->req_virt->first_tlv; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%02x]: Processing mailbox message [type %04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + + OSAL_IOV_VF_MSG_TYPE(p_hwfn, + p_vf->relative_vf_id, + mbx->first_tlv.tl.type); + + /* Lock the per vf op mutex and note the locker's identity. + * The unlock will take place in mbx response. + */ + ecore_iov_lock_vf_pf_channel(p_hwfn, + p_vf, mbx->first_tlv.tl.type); + + /* check if tlv type is known */ + if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) && + !p_vf->b_malicious) { + /* switch on the opcode */ + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_START: + ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_TEARDOWN: + ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_RXQ: + ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_TXQ: + ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_RXQS: + ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_TXQS: + ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_RXQ: + ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_UPDATE: + ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UCAST_FILTER: + ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_CLOSE: + ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_INT_CLEANUP: + ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_RELEASE: + ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_TUNN_PARAM: + ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_UPDATE: + ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_READ: + ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_MTU: + ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf); + break; + } + } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { + /* If we've received a message from a VF we consider malicious + * we ignore the messasge unless it's one for RELEASE, in which + * case we'll let it have the benefit of doubt, allowing the + * next loaded driver to start again. + */ + if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) { + /* TODO - initiate FLR, remove malicious indication */ + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n", + p_vf->abs_vf_id); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_MALICIOUS); + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + DP_NOTICE(p_hwfn, false, + "VF[%02x]: unknown TLV. type %04x length %04x" + " padding %08x reply address %lu\n", + p_vf->abs_vf_id, + mbx->first_tlv.tl.type, + mbx->first_tlv.tl.length, + mbx->first_tlv.padding, + (unsigned long)mbx->first_tlv.reply_address); + + /* Try replying in case reply address matches the acquisition's + * posted address. + */ + if (p_vf->acquire.first_tlv.reply_address && + (mbx->first_tlv.reply_address == + p_vf->acquire.first_tlv.reply_address)) + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_NOT_SUPPORTED); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%02x]: Can't respond to TLV -" + " no valid reply address\n", + p_vf->abs_vf_id); + } + + ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf, + mbx->first_tlv.tl.type); + +#ifdef CONFIG_ECORE_SW_CHANNEL + mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY; + mbx->sw_mbx.response_offset = 0; +#endif +} + +void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn, + u64 *events) +{ + int i; + + OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH); + + ecore_for_each_vf(p_hwfn, i) { + struct ecore_vf_info *p_vf; + + p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; + if (p_vf->vf_mbx.b_pending_msg) + events[i / 64] |= 1ULL << (i % 64); + } +} + +static struct ecore_vf_info * +ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid) +{ + u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; + + if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Got indication for VF [abs 0x%08x] that cannot be" + " handled by PF\n", + abs_vfid); + return OSAL_NULL; + } + + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; +} + +static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn, + u16 abs_vfid, + struct regpair *vf_msg) +{ + struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, + abs_vfid); + + if (!p_vf) + return ECORE_SUCCESS; + + /* List the physical address of the request so that handler + * could later on copy the message from it. + */ + p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + + p_vf->vf_mbx.b_pending_msg = true; + + return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id); +} + +static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn, + struct malicious_vf_eqe_data *p_data) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); + + if (!p_vf) + return; + + if (!p_vf->b_malicious) { + DP_NOTICE(p_hwfn, false, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + + p_vf->b_malicious = true; + } else { + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + } + + OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id); +} + +static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 OSAL_UNUSED fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo), + &data->vf_pf_channel.msg_addr); + case COMMON_EVENT_VF_FLR: + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF-FLR is still not supported\n"); + return ECORE_SUCCESS; + case COMMON_EVENT_MALICIOUS_VF: + ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); + return ECORE_SUCCESS; + default: + DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n", + opcode); + return ECORE_INVAL; + } +} + +bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))); +} + +u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; + u16 i; + + if (!p_iov) + goto out; + + for (i = rel_vf_id; i < p_iov->total_vfs; i++) + if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) + return i; + +out: + return MAX_NUM_VFS_K2; +} + +enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *ptt, int vfid) +{ + struct dmae_params params; + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return ECORE_INVAL; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1); + SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1); + params.src_vf_id = vf_info->abs_vf_id; + + if (ecore_dmae_host2host(p_hwfn, ptt, + vf_info->vf_mbx.pending_req, + vf_info->vf_mbx.req_phys, + sizeof(union vfpf_tlvs) / 4, ¶ms)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Failed to copy message from VF 0x%02x\n", vfid); + + return ECORE_IO; + } + + return ECORE_SUCCESS; +} + +void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->p_dev, false, + "Can't set forced MAC to malicious VF [%d]\n", + vfid); + return; + } + + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change || + vf_info->p_vf_info.is_trusted_configured) { + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + /* Trust mode will disable Forced MAC */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~(1 << MAC_ADDR_FORCED); + } else { + feature = 1 << MAC_ADDR_FORCED; + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~(1 << VFPF_BULLETIN_MAC_ADDR); + } + + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, + mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set MAC, invalid vfid [%d]\n", vfid); + return ECORE_INVAL; + } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->p_dev, false, + "Can't set MAC to malicious VF [%d]\n", + vfid); + return ECORE_INVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return ECORE_INVAL; + } + + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change || + vf_info->p_vf_info.is_trusted_configured) + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); + + return ECORE_SUCCESS; +} + +#ifndef LINUX_REMOVE +enum _ecore_status_t +ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, + bool b_untagged_only, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set untagged default, invalid vfid [%d]\n", + vfid); + return ECORE_INVAL; + } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->p_dev, false, + "Can't set untagged default to malicious VF [%d]\n", + vfid); + return ECORE_INVAL; + } + + /* Since this is configurable only during vport-start, don't take it + * if we're past that point. + */ + if (vf_info->state == VF_ENABLED) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Can't support untagged change for vfid[%d] -" + " VF is already active\n", + vfid); + return ECORE_INVAL; + } + + /* Set configuration; This will later be taken into account during the + * VF initialization. + */ + feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) | + (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED); + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1 + : 0; + + return ECORE_SUCCESS; +} + +void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, + u16 *opaque_fid) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return; + + *opaque_fid = vf_info->opaque_fid; +} +#endif + +void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 pvid, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set forced MAC, invalid vfid [%d]\n", + vfid); + return; + } + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->p_dev, false, + "Can't set forced vlan to malicious VF [%d]\n", + vfid); + return; + } + + feature = 1 << VLAN_ADDR_FORCED; + vf_info->bulletin.p_virt->pvid = pvid; + if (pvid) + vf_info->bulletin.p_virt->valid_bitmap |= feature; + else + vf_info->bulletin.p_virt->valid_bitmap &= ~feature; + + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, + int vfid, u16 vxlan_port, u16 geneve_port) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set udp ports, invalid vfid [%d]\n", vfid); + return; + } + + if (vf_info->b_malicious) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Can not set udp ports to malicious VF [%d]\n", + vfid); + return; + } + + vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; + vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; +} + +bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *p_vf_info; + + p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf_info) + return false; + + return !!p_vf_info->vport_instance; +} + +bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *p_vf_info; + + p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf_info) + return true; + + return p_vf_info->state == VF_STOPPED; +} + +bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return false; + + return vf_info->spoof_chk; +} + +enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + int vfid, bool val) +{ + struct ecore_vf_info *vf; + enum _ecore_status_t rc = ECORE_INVAL; + + if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, true, + "SR-IOV sanity check failed, can't set spoofchk\n"); + goto out; + } + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + goto out; + + if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { + /* After VF VPORT start PF will configure spoof check */ + vf->req_spoofchk_val = val; + rc = ECORE_SUCCESS; + goto out; + } + + rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val); + +out: + return rc; +} + +u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) +{ + u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf; + + max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf + : ECORE_MAX_VF_CHAINS_PER_PF; + + return max_chains_per_vf; +} + +void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_req_virt_addr, + u16 *p_req_virt_size) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return; + + if (pp_req_virt_addr) + *pp_req_virt_addr = vf_info->vf_mbx.req_virt; + + if (p_req_virt_size) + *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt); +} + +void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_reply_virt_addr, + u16 *p_reply_virt_size) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return; + + if (pp_reply_virt_addr) + *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt; + + if (p_reply_virt_size) + *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt); +} + +#ifdef CONFIG_ECORE_SW_CHANNEL +struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return OSAL_NULL; + + return &vf_info->vf_mbx.sw_mbx; +} +#endif + +bool ecore_iov_is_valid_vfpf_msg_length(u32 length) +{ + return (length >= sizeof(struct vfpf_first_tlv) && + (length <= sizeof(union vfpf_tlvs))); +} + +u32 ecore_iov_pfvf_msg_length(void) +{ + return sizeof(union pfvf_tlvs); +} + +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + (1 << VFPF_BULLETIN_MAC_ADDR))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + +u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + +u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return 0; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return 0; + + return p_vf->bulletin.p_virt->pvid; +} + +enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, int val) +{ + struct ecore_vf_info *vf; + u8 abs_vp_id = 0; + u16 rl_id; + enum _ecore_status_t rc; + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + + if (!vf) + return ECORE_INVAL; + + rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); + if (rc != ECORE_SUCCESS) + return rc; + + rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ + return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); +} + +enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, + int vfid, u32 rate) +{ + struct ecore_vf_info *vf; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, true, + "SR-IOV sanity check failed, can't set min rate\n"); + return ECORE_INVAL; + } + } + + vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true); + if (!vf) { + DP_NOTICE(p_dev, true, + "Getting vf info failed, can't set min rate\n"); + return ECORE_INVAL; + } + + return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate); +} + +enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, + struct ecore_eth_stats *p_stats) +{ + struct ecore_vf_info *vf; + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + return ECORE_INVAL; + + if (vf->state != VF_ENABLED) + return ECORE_INVAL; + + __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats, + vf->abs_vf_id + 0x10, false); + + return ECORE_SUCCESS; +} + +u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_rxqs; +} + +u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_active_rxqs; +} + +void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return OSAL_NULL; + + return p_vf->ctx; +} + +u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_sbs; +} + +bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_FREE); +} + +bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_ACQUIRED); +} + +bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_ENABLED); +} + +bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED); +} + +int +ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_wfq_data *vf_vp_wfq; + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return 0; + + vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; + + if (vf_vp_wfq->configured) + return vf_vp_wfq->min_speed; + else + return 0; +} + +#ifdef CONFIG_ECORE_SW_CHANNEL +void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid, + bool b_is_hw) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return; + + vf_info->b_hw_channel = b_is_hw; +} +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h new file mode 100644 index 000000000..e748e67d7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_sriov.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_SRIOV_H__ +#define __ECORE_SRIOV_H__ + +#include "ecore_status.h" +#include "ecore_vfpf_if.h" +#include "ecore_iov_api.h" +#include "ecore_hsi_common.h" +#include "ecore_l2.h" + +#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \ + (MAX_NUM_VFS_K2 * ECORE_ETH_VF_NUM_VLAN_FILTERS) + +/* Represents a full message. Both the request filled by VF + * and the response filled by the PF. The VF needs one copy + * of this message, it fills the request part and sends it to + * the PF. The PF will copy the response to the response part for + * the VF to later read it. The PF needs to hold a message like this + * per VF, the request that is copied to the PF is placed in the + * request size, and the response is filled by the PF before sending + * it to the VF. + */ +struct ecore_vf_mbx_msg { + union vfpf_tlvs req; + union pfvf_tlvs resp; +}; + +/* This mailbox is maintained per VF in its PF + * contains all information required for sending / receiving + * a message + */ +struct ecore_iov_vf_mbx { + union vfpf_tlvs *req_virt; + dma_addr_t req_phys; + union pfvf_tlvs *reply_virt; + dma_addr_t reply_phys; + + /* Address in VF where a pending message is located */ + dma_addr_t pending_req; + + /* Message from VF awaits handling */ + bool b_pending_msg; + + u8 *offset; + +#ifdef CONFIG_ECORE_SW_CHANNEL + struct ecore_iov_sw_mbx sw_mbx; +#endif + + /* VF GPA address */ + u32 vf_addr_lo; + u32 vf_addr_hi; + + struct vfpf_first_tlv first_tlv; /* saved VF request header */ + + u8 flags; +#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent + * more then one pending msg + */ +}; + +#define ECORE_IOV_LEGACY_QID_RX (0) +#define ECORE_IOV_LEGACY_QID_TX (1) +#define ECORE_IOV_QID_INVALID (0xFE) + +struct ecore_vf_queue_cid { + bool b_is_tx; + struct ecore_queue_cid *p_cid; +}; + +/* Describes a qzone associated with the VF */ +struct ecore_vf_queue { + /* Input from upper-layer, mapping relateive queue to queue-zone */ + u16 fw_rx_qid; + u16 fw_tx_qid; + + struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE]; +}; + +enum vf_state { + VF_FREE = 0, /* VF ready to be acquired holds no resc */ + VF_ACQUIRED = 1, /* VF, acquired, but not initalized */ + VF_ENABLED = 2, /* VF, Enabled */ + VF_RESET = 3, /* VF, FLR'd, pending cleanup */ + VF_STOPPED = 4 /* VF, Stopped */ +}; + +struct ecore_vf_vlan_shadow { + bool used; + u16 vid; +}; + +struct ecore_vf_shadow_config { + /* Shadow copy of all guest vlans */ + struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1]; + + /* Shadow copy of all configured MACs; Empty if forcing MACs */ + u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; + u8 inner_vlan_removal; +}; + +/* PFs maintain an array of this structure, per VF */ +struct ecore_vf_info { + struct ecore_iov_vf_mbx vf_mbx; + enum vf_state state; + bool b_init; + bool b_malicious; + u8 to_disable; + + struct ecore_bulletin bulletin; + dma_addr_t vf_bulletin; + +#ifdef CONFIG_ECORE_SW_CHANNEL + /* Determine whether PF communicate with VF using HW/SW channel */ + bool b_hw_channel; +#endif + + /* PF saves a copy of the last VF acquire message */ + struct vfpf_acquire_tlv acquire; + + u32 concrete_fid; + u16 opaque_fid; + u16 mtu; + + u8 vport_id; + u8 rss_eng_id; + u8 relative_vf_id; + u8 abs_vf_id; +#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \ + (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ + (p_vf)->abs_vf_id) + + u8 vport_instance; /* Number of active vports */ + u8 num_rxqs; + u8 num_txqs; + + u16 rx_coal; + u16 tx_coal; + + u8 num_sbs; + + u8 num_mac_filters; + u8 num_vlan_filters; + + struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF]; + u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF]; + + /* TODO - Only windows is using it - should be removed */ + u8 was_malicious; + u8 num_active_rxqs; + void *ctx; + struct ecore_public_vf_info p_vf_info; + bool spoof_chk; /* Current configured on HW */ + bool req_spoofchk_val; /* Requested value */ + + /* Stores the configuration requested by VF */ + struct ecore_vf_shadow_config shadow_config; + + /* A bitfield using bulletin's valid-map bits, used to indicate + * which of the bulletin board features have been configured. + */ + u64 configured_features; +#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ + (1 << VLAN_ADDR_FORCED)) +}; + +/* This structure is part of ecore_hwfn and used only for PFs that have sriov + * capability enabled. + */ +struct ecore_pf_iov { + struct ecore_vf_info vfs_array[MAX_NUM_VFS_K2]; + u64 pending_flr[ECORE_VF_ARRAY_LENGTH]; + +#ifndef REMOVE_DBG + /* This doesn't serve anything functionally, but it makes windows + * debugging of IOV related issues easier. + */ + u64 active_vfs[ECORE_VF_ARRAY_LENGTH]; +#endif + + /* Allocate message address continuosuly and split to each VF */ + void *mbx_msg_virt_addr; + dma_addr_t mbx_msg_phys_addr; + u32 mbx_msg_size; + void *mbx_reply_virt_addr; + dma_addr_t mbx_reply_phys_addr; + u32 mbx_reply_size; + void *p_bulletins; + dma_addr_t bulletins_phys; + u32 bulletins_size; +}; + +#ifdef CONFIG_ECORE_SRIOV +/** + * @brief Read sriov related information and allocated resources + * reads from configuraiton space, shmem, etc. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset + * + * @param offset + * @param type + * @param length + * + * @return pointer to the newly placed tlv + */ +void *ecore_add_tlv(u8 **offset, u16 type, u16 length); + +/** + * @brief list the types and lengths of the tlvs on the buffer + * + * @param p_hwfn + * @param tlvs_list + */ +void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, + void *tlvs_list); + +/** + * @brief ecore_iov_alloc - allocate sriov related resources + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_iov_setup - setup sriov related resources + * + * @param p_hwfn + */ +void ecore_iov_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_iov_free - free sriov related resources + * + * @param p_hwfn + */ +void ecore_iov_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief free sriov related memory that was allocated during hw_prepare + * + * @param p_dev + */ +void ecore_iov_free_hw_info(struct ecore_dev *p_dev); + +/** + * @brief Mark structs of vfs that have been FLR-ed. + * + * @param p_hwfn + * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * + * @return true iff one of the PF's vfs got FLRed. false otherwise. + */ +bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, + u32 *disabled_vfs); + +/** + * @brief Search extended TLVs in request/reply buffer. + * + * @param p_hwfn + * @param p_tlvs_list - Pointer to tlvs list + * @param req_type - Type of TLV + * + * @return pointer to tlv type if found, otherwise returns NULL. + */ +void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type); + +/** + * @brief ecore_iov_get_vf_info - return the database of a + * specific VF + * + * @param p_hwfn + * @param relative_vf_id - relative id of the VF for which info + * is requested + * @param b_enabled_only - false iff want to access even if vf is disabled + * + * @return struct ecore_vf_info* + */ +struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only); +#endif +#endif /* __ECORE_SRIOV_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h new file mode 100644 index 000000000..b893f1d41 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_status.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_STATUS_H__ +#define __ECORE_STATUS_H__ + +enum _ecore_status_t { + ECORE_CONN_RESET = -13, + ECORE_UNKNOWN_ERROR = -12, + ECORE_NORESOURCES = -11, + ECORE_NODEV = -10, + ECORE_ABORTED = -9, + ECORE_AGAIN = -8, + ECORE_NOTIMPL = -7, + ECORE_EXISTS = -6, + ECORE_IO = -5, + ECORE_TIMEOUT = -4, + ECORE_INVAL = -3, + ECORE_BUSY = -2, + ECORE_NOMEM = -1, + ECORE_SUCCESS = 0, + /* PENDING is not an error and should be positive */ + ECORE_PENDING = 1, +}; + +#endif /* __ECORE_STATUS_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h new file mode 100644 index 000000000..249136b08 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_utils.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_UTILS_H__ +#define __ECORE_UTILS_H__ + +/* dma_addr_t manip */ +/* Suppress "right shift count >= width of type" warning when that quantity is + * 32-bits rquires the >> 16) >> 16) + */ +#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16)) + +#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff)) +#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32)) + +#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x)) +#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x)) + +/* It's assumed that whoever includes this has previously included an hsi + * file defining the regpair. + */ +#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) +#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64) +#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) + +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c new file mode 100644 index 000000000..db03bc494 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.c @@ -0,0 +1,1980 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_hsi_eth.h" +#include "ecore_sriov.h" +#include "ecore_l2_api.h" +#include "ecore_vf.h" +#include "ecore_vfpf_if.h" +#include "ecore_status.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_l2.h" +#include "ecore_mcp_api.h" +#include "ecore_vf_api.h" + +static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + void *p_tlv; + + /* This lock is released when we receive PF's response + * in ecore_send_msg2pf(). + * So, ecore_vf_pf_prep() and ecore_send_msg2pf() + * must come in sequence. + */ + OSAL_MUTEX_ACQUIRE(&p_iov->mutex); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "preparing to send %s tlv over vf pf channel\n", + qede_ecore_channel_tlvs_string[type]); + + /* Reset Request offset */ + p_iov->offset = (u8 *)(p_iov->vf2pf_request); + + /* Clear mailbox - both request and reply */ + OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); + OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* Init type and length */ + p_tlv = ecore_add_tlv(&p_iov->offset, type, length); + + /* Init first tlv header */ + ((struct vfpf_first_tlv *)p_tlv)->reply_address = + (u64)p_iov->pf2vf_reply_phys; + + return p_tlv; +} + +static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn, + enum _ecore_status_t req_status) +{ + union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF request status = 0x%x, PF reply status = 0x%x\n", + req_status, resp->default_resp.hdr.status); + + OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex); +} + +#ifdef CONFIG_ECORE_SW_CHANNEL +/* The SW channel implementation of Windows needs to know the 'exact' + * response size of any given message. That means that for future + * messages we'd be unable to send TLVs to PF if he'll be unable to + * answer them if the |response| != |default response|. + * We'd need to handshake in acquire capabilities for any such. + */ +#endif +static enum _ecore_status_t +ecore_send_msg2pf(struct ecore_hwfn *p_hwfn, + u8 *done, u32 resp_size) +{ + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + struct ustorm_vf_zone *zone_data; + enum _ecore_status_t rc = ECORE_SUCCESS; + int time = 100; + + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; + + /* output tlvs list */ + ecore_dp_tlv_list(p_hwfn, p_req); + + /* need to add the END TLV to the message size */ + resp_size += sizeof(struct channel_list_end_tlv); + + /* Send TLVs over HW channel */ + OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); + trigger.vf_pf_msg_valid = 1; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF -> PF [%02x] message: [%08x, %08x] --> %p," + " %08x --> %p\n", + GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID), + U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), + U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), + &zone_data->non_trigger.vf_pf_msg_addr, + *((u32 *)&trigger), &zone_data->trigger); + + REG_WR(p_hwfn, + (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, + U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + REG_WR(p_hwfn, + (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, + U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + /* The message data must be written first, to prevent trigger before + * data is written. + */ + OSAL_WMB(p_hwfn->p_dev); + + REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, + *((u32 *)&trigger)); + + /* When PF would be done with the response, it would write back to the + * `done' address. Poll until then. + */ + while ((!*done) && time) { + OSAL_MSLEEP(25); + time--; + } + + if (!*done) { + DP_NOTICE(p_hwfn, true, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); + rc = ECORE_TIMEOUT; + } else { + if ((*done != PFVF_STATUS_SUCCESS) && + (*done != PFVF_STATUS_NO_RESOURCE)) + DP_NOTICE(p_hwfn, false, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + } + + return rc; +} + +static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Only add QIDs for the queue if it was negotiated with PF */ + if (!(p_iov->acquire_resp.pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + p_qid_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); + p_qid_tlv->qid = p_cid->qid_usage_idx; +} + +enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn, + bool b_final) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + u32 size; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = ECORE_AGAIN; + + ecore_vf_pf_req_end(p_hwfn, rc); + if (!b_final) + return rc; + + p_hwfn->b_int_enabled = 0; + + if (p_iov->vf2pf_request) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys, + sizeof(union vfpf_tlvs)); + if (p_iov->pf2vf_reply) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->pf2vf_reply, + p_iov->pf2vf_reply_phys, + sizeof(union pfvf_tlvs)); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct ecore_bulletin_content); + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->bulletin.p_virt, + p_iov->bulletin.phys, + size); + } + +#ifdef CONFIG_ECORE_LOCK_ALLOC + OSAL_MUTEX_DEALLOC(&p_iov->mutex); +#endif + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = OSAL_NULL; + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) +{ + return _ecore_vf_pf_release(p_hwfn, true); +} + +static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", + p_req->num_rxqs, p_resp->num_rxqs, + p_req->num_rxqs, p_resp->num_txqs, + p_req->num_sbs, p_resp->num_sbs, + p_req->num_mac_filters, p_resp->num_mac_filters, + p_req->num_vlan_filters, p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); + + /* humble our request */ + p_req->num_txqs = p_resp->num_txqs; + p_req->num_rxqs = p_resp->num_rxqs; + p_req->num_sbs = p_resp->num_sbs; + p_req->num_mac_filters = p_resp->num_mac_filters; + p_req->num_vlan_filters = p_resp->num_vlan_filters; + p_req->num_mc_filters = p_resp->num_mc_filters; + p_req->num_cids = p_resp->num_cids; +} + +static enum _ecore_status_t +ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_soft_flr_tlv *req; + enum _ecore_status_t rc; + + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req)); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc); + + /* to release the mutex as ecore_vf_pf_acquire() take the mutex */ + ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN); + + /* As of today, there is no mechanism in place for VF to know the FLR + * status, so sufficiently (worst case time) wait for FLR to complete, + * as mailbox request to MFW by the PF for initiating VF flr and PF + * processing VF FLR could take time. + */ + OSAL_MSLEEP(3000); + + return ecore_vf_pf_acquire(p_hwfn); +} + +enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct ecore_vf_acquire_sw_info vf_sw_info; + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 retry_cnt = p_iov->acquire_retry_cnt; + struct vf_pf_resc_request *p_resc; + bool resources_acquired = false; + struct vfpf_acquire_tlv *req; + int attempts = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + p_resc = &req->resc_request; + + /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ + req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; + + p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF; + p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF; + p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF; + p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; + p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS; + + OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); + OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); + + req->vfdev_info.os_type = vf_sw_info.os_type; + req->vfdev_info.driver_version = vf_sw_info.driver_version; + req->vfdev_info.fw_major = FW_MAJOR_VERSION; + req->vfdev_info.fw_minor = FW_MINOR_VERSION; + req->vfdev_info.fw_revision = FW_REVISION_VERSION; + req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; + + /* Fill capability field with any non-deprecated config we support */ + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; + + /* If we've mapped the doorbell bar, try using queue qids */ + if (p_iov->b_doorbell_bar) + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | + VFPF_ACQUIRE_CAP_QUEUE_QIDS; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = p_iov->bulletin.phys; + req->bulletin_size = p_iov->bulletin.size; + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + while (!resources_acquired) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "attempting to acquire resources\n"); + + /* Clear response buffer, as this might be a re-send */ + OSAL_MEMSET(p_iov->pf2vf_reply, 0, + sizeof(union pfvf_tlvs)); + + /* send acquire request */ + rc = ecore_send_msg2pf(p_hwfn, + &resp->hdr.status, sizeof(*resp)); + + if (retry_cnt && rc == ECORE_TIMEOUT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + + if (rc != ECORE_SUCCESS) + goto exit; + + /* copy acquire response from buffer to p_hwfn */ + OSAL_MEMCPY(&p_iov->acquire_resp, + resp, sizeof(p_iov->acquire_resp)); + + attempts++; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF agrees to allocate our resources */ + if (!(resp->pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { + /* It's possible legacy PF mistakenly accepted; + * but we don't care - simply mark it as + * legacy and continue. + */ + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + } + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "resources acquired\n"); + resources_acquired = true; + } /* PF refuses to allocate our resources */ + else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && + attempts < ECORE_VF_ACQUIRE_THRESH) { + ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, + &resp->resc); + + } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { + if (pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, false, + "PF uses an incompatible fastpath HSI" + " %02x.%02x [VF requires %02x.%02x]." + " Please change to a VF driver using" + " %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, + pfdev_info->major_fp_hsi); + rc = ECORE_INVAL; + goto exit; + } + + if (!pfdev_info->major_fp_hsi) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + DP_NOTICE(p_hwfn, false, + "PF uses very old drivers." + " Please change to a VF" + " driver using no later than" + " 8.8.x.x.\n"); + rc = ECORE_INVAL; + goto exit; + } else { + DP_INFO(p_hwfn, + "PF is old - try re-acquire to" + " see if it supports FW-version" + " override\n"); + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + continue; + } + } + + /* If PF/VF are using same Major, PF must have had + * it's reasons. Simply fail. + */ + DP_NOTICE(p_hwfn, false, + "PF rejected acquisition by VF\n"); + rc = ECORE_INVAL; + goto exit; + } else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) { + ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN); + return ecore_vf_pf_soft_flr_acquire(p_hwfn); + } else { + DP_ERR(p_hwfn, + "PF returned err %d to VF acquisition request\n", + resp->hdr.status); + rc = ECORE_AGAIN; + goto exit; + } + } + + /* Mark the PF as legacy, if needed */ + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) + p_iov->b_pre_fp_hsi = true; + + /* In case PF doesn't support multi-queue Tx, update the number of + * CIDs to reflect the number of queues [older PFs didn't fill that + * field]. + */ + if (!(resp->pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + resp->resc.num_cids = resp->resc.num_rxqs + + resp->resc.num_txqs; + + rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); + if (rc) { + DP_NOTICE(p_hwfn, true, + "VF_UPDATE_ACQUIRE_RESC_RESP Failed:" + " status = 0x%x.\n", + rc); + rc = ECORE_AGAIN; + goto exit; + } + + /* Update bulletin board size with response from PF */ + p_iov->bulletin.size = resp->bulletin_size; + + /* get HW info */ + p_dev->type = resp->pfdev_info.dev_type; + p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev; + + DP_INFO(p_hwfn, "Chip details - %s%d\n", + ECORE_IS_BB(p_dev) ? "BB" : "AH", + CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); + + p_dev->chip_num = pfdev_info->chip_num & 0xffff; + + /* Learn of the possibility of CMT */ + if (IS_LEAD_HWFN(p_hwfn)) { + if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { + DP_INFO(p_hwfn, "100g VF\n"); + p_dev->num_hwfns = 2; + } + } + + /* @DPDK */ + if (((p_iov->b_pre_fp_hsi == true) & + ETH_HSI_VER_MINOR) && + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) + DP_INFO(p_hwfn, + "PF is using older fastpath HSI;" + " %02x.%02x is configured\n", + ETH_HSI_VER_MAJOR, + resp->pfdev_info.minor_fp_hsi); + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, + enum BAR_ID bar_id) +{ + u32 bar_size; + + /* Regview size is fixed */ + if (bar_id == BAR_ID_0) + return 1 << 17; + + /* Doorbell is received from PF */ + bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; + if (bar_size) + return 1 << bar_size; + return 0; +} + +enum _ecore_status_t +ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn, + struct ecore_hw_prepare_params *p_params) +{ + struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev); + struct ecore_vf_iov *p_iov; + u32 reg; + enum _ecore_status_t rc; + + /* Set number of hwfns - might be overridden once leading hwfn learns + * actual configuration from PF. + */ + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->p_dev->num_hwfns = 1; + + reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); + + reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); + + /* Allocate vf sriov info */ + p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov)); + if (!p_iov) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_sriov'\n"); + return ECORE_NOMEM; + } + + /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell + * value, but there are several incompatibily scenarios where that + * would be incorrect and we'd need to override it. + */ + if (p_hwfn->doorbells == OSAL_NULL) { + p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + } else if (p_hwfn == p_lead) { + /* For leading hw-function, value is always correct, but need + * to handle scenario where legacy PF would not support 100g + * mapped bars later. + */ + p_iov->b_doorbell_bar = true; + } else { + /* here, value would be correct ONLY if the leading hwfn + * received indication that mapped-bars are supported. + */ + if (p_lead->vf_iov_info->b_doorbell_bar) + p_iov->b_doorbell_bar = true; + else + p_hwfn->doorbells = (u8 OSAL_IOMEM *) + p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + } + + /* Allocate vf2pf msg */ + p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov-> + vf2pf_request_phys, + sizeof(union + vfpf_tlvs)); + if (!p_iov->vf2pf_request) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `vf2pf_request' DMA memory\n"); + goto free_p_iov; + } + + p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov-> + pf2vf_reply_phys, + sizeof(union pfvf_tlvs)); + if (!p_iov->pf2vf_reply) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `pf2vf_reply' DMA memory\n"); + goto free_vf2pf_request; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF's Request mailbox [%p virt 0x%lx phys], " + "Response mailbox [%p virt 0x%lx phys]\n", + p_iov->vf2pf_request, + (unsigned long)p_iov->vf2pf_request_phys, + p_iov->pf2vf_reply, + (unsigned long)p_iov->pf2vf_reply_phys); + + /* Allocate Bulletin board */ + p_iov->bulletin.size = sizeof(struct ecore_bulletin_content); + p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov->bulletin. + phys, + p_iov->bulletin. + size); + if (!p_iov->bulletin.p_virt) { + DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n"); + goto free_pf2vf_reply; + } + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n", + p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys, + p_iov->bulletin.size); + +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) { + DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n"); + goto free_bulletin_mem; + } +#endif + OSAL_MUTEX_INIT(&p_iov->mutex); + + p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt; + p_hwfn->vf_iov_info = p_iov; + + p_hwfn->hw_info.personality = ECORE_PCI_ETH; + + rc = ecore_vf_pf_acquire(p_hwfn); + + /* If VF is 100g using a mapped bar and PF is too old to support that, + * acquisition would succeed - but the VF would have no way knowing + * the size of the doorbell bar configured in HW and thus will not + * know how to split it for 2nd hw-function. + * In this case we re-try without the indication of the mapped + * doorbell. + */ + if (rc == ECORE_SUCCESS && + p_iov->b_doorbell_bar && + !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) && + ECORE_IS_CMT(p_hwfn->p_dev)) { + rc = _ecore_vf_pf_release(p_hwfn, false); + if (rc != ECORE_SUCCESS) + return rc; + + p_iov->b_doorbell_bar = false; + p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + rc = ecore_vf_pf_acquire(p_hwfn); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", + p_hwfn->regview, p_hwfn->doorbells, + p_hwfn->p_dev->doorbells); + + return rc; + +#ifdef CONFIG_ECORE_LOCK_ALLOC +free_bulletin_mem: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt, + p_iov->bulletin.phys, + p_iov->bulletin.size); +#endif +free_pf2vf_reply: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply, + p_iov->pf2vf_reply_phys, + sizeof(union pfvf_tlvs)); +free_vf2pf_request: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request, + p_iov->vf2pf_request_phys, + sizeof(union vfpf_tlvs)); +free_p_iov: + OSAL_FREE(p_hwfn->p_dev, p_iov); + + return ECORE_NOMEM; +} + +/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ +static void +__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_src, + enum ecore_tunn_mode mask, u8 *p_cls) +{ + if (p_src->b_update_mode) { + p_req->tun_mode_update_mask |= (1 << mask); + + if (p_src->b_mode_enabled) + p_req->tunn_mode |= (1 << mask); + } + + *p_cls = p_src->tun_cls; +} + +/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ +static void +ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_src, + enum ecore_tunn_mode mask, u8 *p_cls, + struct ecore_tunn_update_udp_port *p_port, + u8 *p_update_port, u16 *p_udp_port) +{ + if (p_port->b_update_port) { + *p_update_port = 1; + *p_udp_port = p_port->port; + } + + __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); +} + +void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) +{ + if (p_tun->vxlan.b_mode_enabled) + p_tun->vxlan.b_update_mode = true; + if (p_tun->l2_geneve.b_mode_enabled) + p_tun->l2_geneve.b_update_mode = true; + if (p_tun->ip_geneve.b_mode_enabled) + p_tun->ip_geneve.b_update_mode = true; + if (p_tun->l2_gre.b_mode_enabled) + p_tun->l2_gre.b_update_mode = true; + if (p_tun->ip_gre.b_mode_enabled) + p_tun->ip_gre.b_update_mode = true; + + p_tun->b_update_rx_cls = true; + p_tun->b_update_tx_cls = true; +} + +static void +__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, + u16 feature_mask, u8 tunn_mode, u8 tunn_cls, + enum ecore_tunn_mode val) +{ + if (feature_mask & (1 << val)) { + p_tun->b_mode_enabled = tunn_mode; + p_tun->tun_cls = tunn_cls; + } else { + p_tun->b_mode_enabled = false; + } +} + +static void +ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_tun, + struct pfvf_update_tunn_param_tlv *p_resp) +{ + /* Update mode and classes provided by PF */ + u16 feat_mask = p_resp->tunn_feature_mask; + + __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, + p_resp->vxlan_mode, p_resp->vxlan_clss, + ECORE_MODE_VXLAN_TUNN); + __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, + p_resp->l2geneve_mode, + p_resp->l2geneve_clss, + ECORE_MODE_L2GENEVE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, + p_resp->ipgeneve_mode, + p_resp->ipgeneve_clss, + ECORE_MODE_IPGENEVE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, + p_resp->l2gre_mode, p_resp->l2gre_clss, + ECORE_MODE_L2GRE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, + p_resp->ipgre_mode, p_resp->ipgre_clss, + ECORE_MODE_IPGRE_TUNN); + p_tun->geneve_port.port = p_resp->geneve_udp_port; + p_tun->vxlan_port.port = p_resp->vxlan_udp_port; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", + p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled, + p_tun->l2_gre.b_mode_enabled, + p_tun->ip_gre.b_mode_enabled); +} + +enum _ecore_status_t +ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_src) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + enum _ecore_status_t rc; + + p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, + sizeof(*p_req)); + + if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) + p_req->update_tun_cls = 1; + + ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN, + &p_req->vxlan_clss, &p_src->vxlan_port, + &p_req->update_vxlan_port, + &p_req->vxlan_port); + ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, + ECORE_MODE_L2GENEVE_TUNN, + &p_req->l2geneve_clss, &p_src->geneve_port, + &p_req->update_geneve_port, + &p_req->geneve_port); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, + ECORE_MODE_IPGENEVE_TUNN, + &p_req->ipgeneve_clss); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, + ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, + ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->tunn_param_resp; + rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + + if (rc) + goto exit; + + if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Failed to update tunnel parameters\n"); + rc = ECORE_INVAL; + } + + ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM **pp_prod) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_rxq_tlv *req; + u16 rx_qid = p_cid->rel.queue_id; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); + + req->rx_qid = rx_qid; + req->cqe_pbl_addr = cqe_pbl_addr; + req->cqe_pbl_size = cqe_pbl_size; + req->rxq_addr = bd_chain_phys_addr; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; + req->bd_max_bytes = bd_max_bytes; + req->stat_id = -1; /* Keep initialized, for future compatibility */ + + /* If PF is legacy, we'll need to calculate producers ourselves + * as well as clean them. + */ + if (p_iov->b_pre_fp_hsi) { + u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; + u32 init_prod_val = 0; + + *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->p_dev) + + (hw_qid) * MSTORM_QZONE_SIZE; + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + } + + ecore_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + + /* Learn the address of the producer from the response */ + if (!p_iov->b_pre_fp_hsi) { + u32 init_prod_val = 0; + + *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", + rx_qid, *pp_prod, resp->offset); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0. + * It was actually the PF's responsibility, but since some + * old PFs might fail to do so, we do this as well. + */ + OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)&init_prod_val); + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + bool cqe_completion) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_rxqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); + + req->rx_qid = p_cid->rel.queue_id; + req->num_rxqs = 1; + req->cqe_completion = cqe_completion; + + ecore_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, + void OSAL_IOMEM **pp_doorbell) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_txq_tlv *req; + u16 qid = p_cid->rel.queue_id; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); + + req->tx_qid = qid; + + /* Tx */ + req->pbl_addr = pbl_addr; + req->pbl_size = pbl_size; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; + + ecore_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[qid]; + + *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + + DB_ADDR_VF(cid, DQ_DEMS_LEGACY); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + qid, *pp_doorbell, resp->offset); +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_txqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); + + req->tx_qid = p_cid->rel.queue_id; + req->num_txqs = 1; + + ecore_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid **pp_cid, + u8 num_rxqs, + u8 comp_cqe_flg, + u8 comp_event_flg) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + struct vfpf_update_rxq_tlv *req; + enum _ecore_status_t rc; + + /* Starting with CHANNEL_TLV_QID and the need for additional queue + * information, this API stopped supporting multiple rxqs. + * TODO - remove this and change the API to accept a single queue-cid + * in a follow-up patch. + */ + if (num_rxqs != 1) { + DP_NOTICE(p_hwfn, true, + "VFs can no longer update more than a single queue\n"); + return ECORE_INVAL; + } + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); + + req->rx_qid = (*pp_cid)->rel.queue_id; + req->num_rxqs = 1; + + if (comp_cqe_flg) + req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; + if (comp_event_flg) + req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; + + ecore_vf_pf_add_qid(p_hwfn, *pp_cid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id, + u16 mtu, u8 inner_vlan_removal, + enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe, + u8 only_untagged) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_start_tlv *req; + struct pfvf_def_resp_tlv *resp; + enum _ecore_status_t rc; + int i; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); + + req->mtu = mtu; + req->vport_id = vport_id; + req->inner_vlan_removal = inner_vlan_removal; + req->tpa_mode = tpa_mode; + req->max_buffers_per_cqe = max_buffers_per_cqe; + req->only_untagged = only_untagged; + + /* status blocks */ + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { + struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; + + if (p_sb) + req->sb_addr[i] = p_sb->sb_phys; + } + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +static bool +ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + u16 tlv) +{ + switch (tlv) { + case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: + return !!(p_data->update_vport_active_rx_flg || + p_data->update_vport_active_tx_flg); + case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: +#ifndef ASIC_ONLY + /* FPGA doesn't have PVFC and so can't support tx-switching */ + return !!(p_data->update_tx_switching_flg && + !CHIP_REV_IS_FPGA(p_hwfn->p_dev)); +#else + return !!p_data->update_tx_switching_flg; +#endif + case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: + return !!p_data->update_inner_vlan_removal_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: + return !!p_data->update_accept_any_vlan_flg; + case CHANNEL_TLV_VPORT_UPDATE_MCAST: + return !!p_data->update_approx_mcast_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: + return !!(p_data->accept_flags.update_rx_mode_config || + p_data->accept_flags.update_tx_mode_config); + case CHANNEL_TLV_VPORT_UPDATE_RSS: + return !!p_data->rss_params; + case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: + return !!p_data->sge_tpa_params; + default: + DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n", + tlv, qede_ecore_channel_tlvs_string[tlv]); + return false; + } +} + +static void +ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *p_resp; + u16 tlv; + + for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; + tlv++) { + if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) + continue; + + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "TLV[%d] type %s Configuration %s\n", + tlv, qede_ecore_channel_tlvs_string[tlv], + (p_resp && p_resp->hdr.status) ? "succeeded" + : "failed"); + } +} + +enum _ecore_status_t +ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_update_tlv *req; + struct pfvf_def_resp_tlv *resp; + u8 update_rx, update_tx; + u32 resp_size = 0; + u16 size, tlv; + enum _ecore_status_t rc; + + resp = &p_iov->pf2vf_reply->default_resp; + resp_size = sizeof(*resp); + + update_rx = p_params->update_vport_active_rx_flg; + update_tx = p_params->update_vport_active_tx_flg; + + /* clear mailbox and prep header tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); + + /* Prepare extended tlvs */ + if (update_rx || update_tx) { + struct vfpf_vport_update_activate_tlv *p_act_tlv; + + size = sizeof(struct vfpf_vport_update_activate_tlv); + p_act_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_act_tlv->update_rx = update_rx; + p_act_tlv->active_rx = p_params->vport_active_rx_flg; + } + + if (update_tx) { + p_act_tlv->update_tx = update_tx; + p_act_tlv->active_tx = p_params->vport_active_tx_flg; + } + } + + if (p_params->update_inner_vlan_removal_flg) { + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + + size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); + p_vlan_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; + } + + if (p_params->update_tx_switching_flg) { + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + + size = sizeof(struct vfpf_vport_update_tx_switch_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset, + tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; + } + + if (p_params->update_approx_mcast_flg) { + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + + size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); + p_mcast_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; + update_tx = p_params->accept_flags.update_tx_mode_config; + + if (update_rx || update_tx) { + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + size = sizeof(struct vfpf_vport_update_accept_param_tlv); + p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_accept_tlv->update_rx_mode = update_rx; + p_accept_tlv->rx_accept_filter = + p_params->accept_flags.rx_accept_filter; + } + + if (update_tx) { + p_accept_tlv->update_tx_mode = update_tx; + p_accept_tlv->tx_accept_filter = + p_params->accept_flags.tx_accept_filter; + } + } + + if (p_params->rss_params) { + struct ecore_rss_params *rss_params = p_params->rss_params; + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + int i, table_size; + + size = sizeof(struct vfpf_vport_update_rss_tlv); + p_rss_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_RSS, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (rss_params->update_rss_config) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CONFIG_FLAG; + if (rss_params->update_rss_capabilities) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CAPS_FLAG; + if (rss_params->update_rss_ind_table) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_IND_TABLE_FLAG; + if (rss_params->update_rss_key) + p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; + + p_rss_tlv->rss_enable = rss_params->rss_enable; + p_rss_tlv->rss_caps = rss_params->rss_caps; + p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; + + table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE, + 1 << p_rss_tlv->rss_table_size_log); + for (i = 0; i < table_size; i++) { + struct ecore_queue_cid *p_queue; + + p_queue = rss_params->rss_ind_table[i]; + p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; + } + + OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, + sizeof(rss_params->rss_key)); + } + + if (p_params->update_accept_any_vlan_flg) { + struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; + + size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); + + resp_size += sizeof(struct pfvf_def_resp_tlv); + p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; + p_any_vlan_tlv->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; + } + + if (p_params->sge_tpa_params) { + struct ecore_sge_tpa_params *sge_tpa_params; + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + + sge_tpa_params = p_params->sge_tpa_params; + size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); + p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (sge_tpa_params->update_tpa_en_flg) + p_sge_tpa_tlv->update_sge_tpa_flags |= + VFPF_UPDATE_TPA_EN_FLAG; + if (sge_tpa_params->update_tpa_param_flg) + p_sge_tpa_tlv->update_sge_tpa_flags |= + VFPF_UPDATE_TPA_PARAM_FLAG; + + if (sge_tpa_params->tpa_ipv4_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG; + if (sge_tpa_params->tpa_ipv6_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG; + if (sge_tpa_params->tpa_pkt_split_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG; + if (sge_tpa_params->tpa_hdr_data_split_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_HDR_DATA_SPLIT_FLAG; + if (sge_tpa_params->tpa_gro_consistent_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_GRO_CONSIST_FLAG; + if (sge_tpa_params->tpa_ipv4_tunn_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_TUNN_IPV4_EN_FLAG; + if (sge_tpa_params->tpa_ipv6_tunn_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_TUNN_IPV6_EN_FLAG; + + p_sge_tpa_tlv->tpa_max_aggs_num = + sge_tpa_params->tpa_max_aggs_num; + p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; + p_sge_tpa_tlv->tpa_min_size_to_start = + sge_tpa_params->tpa_min_size_to_start; + p_sge_tpa_tlv->tpa_min_size_to_cont = + sge_tpa_params->tpa_min_size_to_cont; + + p_sge_tpa_tlv->max_buffers_per_cqe = + sge_tpa_params->max_buffers_per_cqe; + } + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + + ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_AGAIN; + goto exit; + } + + p_hwfn->b_int_enabled = 0; + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast *p_filter_cmd) +{ + struct ecore_sp_vport_update_params sp_params; + int i; + + OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); + sp_params.update_approx_mcast_flg = 1; + + if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); + sp_params.bins[bit / 32] |= 1 << (bit % 32); + } + } + + ecore_vf_pf_vport_update(p_hwfn, &sp_params); +} + +enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_ucast + *p_ucast) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_ucast_filter_tlv *req; + struct pfvf_def_resp_tlv *resp; + enum _ecore_status_t rc; + + /* Sanitize */ + if (p_ucast->opcode == ECORE_FILTER_MOVE) { + DP_NOTICE(p_hwfn, true, + "VFs don't support Moving of filters\n"); + return ECORE_INVAL; + } + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; + OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); + req->vlan = p_ucast->vlan; + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_AGAIN; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + enum _ecore_status_t rc; + + /* clear mailbox and prep first tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = ECORE_INVAL; + goto exit; + } + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, + u16 *p_coal, + struct ecore_queue_cid *p_cid) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_read_coal_resp_tlv *resp; + struct vfpf_read_coal_req_tlv *req; + enum _ecore_status_t rc; + + /* clear mailbox and prep header tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, + sizeof(*req)); + req->qid = p_cid->rel.queue_id; + req->is_rx = p_cid->b_is_rx ? 1 : 0; + + ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + resp = &p_iov->pf2vf_reply->read_coal_resp; + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc != ECORE_SUCCESS) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + *p_coal = resp->coal; +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, + struct ecore_queue_cid *p_cid) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_coalesce *req; + struct pfvf_def_resp_tlv *resp; + enum _ecore_status_t rc; + + /* clear mailbox and prep header tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(*req)); + + req->rx_coal = rx_coal; + req->tx_coal = tx_coal; + req->qid = p_cid->rel.queue_id; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", + rx_coal, tx_coal, req->qid); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (rc != ECORE_SUCCESS) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; + p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; + +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_mtu_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + enum _ecore_status_t rc; + + if (!mtu) + return ECORE_INVAL; + + /* clear mailbox and prep header tlv */ + p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU, + sizeof(*p_req)); + p_req->mtu = mtu; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requesting MTU update to %d\n", mtu); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) + rc = ECORE_INVAL; + + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, + u16 sb_id) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); + return 0; + } + + return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; +} + +void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, + u16 sb_id, struct ecore_sb_info *p_sb) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); + return; + } + + if (sb_id >= PFVF_MAX_SBS_PER_VF) { + DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id); + return; + } + + p_iov->sbs_info[sb_id] = p_sb; +} + +enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, + u8 *p_change) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct ecore_bulletin_content shadow; + u32 crc, crc_size; + + crc_size = sizeof(p_iov->bulletin.p_virt->crc); + *p_change = 0; + + /* Need to guarantee PF is not in the middle of writing it */ + OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); + + /* If version did not update, no need to do anything */ + if (shadow.version == p_iov->bulletin_shadow.version) + return ECORE_SUCCESS; + + /* Verify the bulletin we see is valid */ + crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size, + p_iov->bulletin.size - crc_size); + if (crc != shadow.crc) + return ECORE_AGAIN; + + /* Set the shadow bulletin and process it */ + OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Read a bulletin update %08x\n", shadow.version); + + *p_change = 1; + + return ECORE_SUCCESS; +} + +void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_params, 0, sizeof(*p_params)); + + p_params->speed.autoneg = p_bulletin->req_autoneg; + p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; + p_params->speed.forced_speed = p_bulletin->req_forced_speed; + p_params->pause.autoneg = p_bulletin->req_autoneg_pause; + p_params->pause.forced_rx = p_bulletin->req_forced_rx; + p_params->pause.forced_tx = p_bulletin->req_forced_tx; + p_params->loopback_mode = p_bulletin->req_loopback; +} + +void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *params) +{ + __ecore_vf_get_link_params(params, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_link, 0, sizeof(*p_link)); + + p_link->link_up = p_bulletin->link_up; + p_link->speed = p_bulletin->speed; + p_link->full_duplex = p_bulletin->full_duplex; + p_link->an = p_bulletin->autoneg; + p_link->an_complete = p_bulletin->autoneg_complete; + p_link->parallel_detection = p_bulletin->parallel_detection; + p_link->pfc_enabled = p_bulletin->pfc_enabled; + p_link->partner_adv_speed = p_bulletin->partner_adv_speed; + p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; + p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; + p_link->partner_adv_pause = p_bulletin->partner_adv_pause; + p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; +} + +void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *link) +{ + __ecore_vf_get_link_state(link, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); + p_link_caps->speed_capabilities = p_bulletin->capability_speed; +} + +void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps) +{ + __ecore_vf_get_link_caps(p_link_caps, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs) +{ + *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; +} + +void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, + u8 *num_txqs) +{ + *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; +} + +void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac) +{ + OSAL_MEMCPY(port_mac, + p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, + ETH_ALEN); +} + +void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ + struct ecore_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; +} + +void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn, + u32 *num_sbs) +{ + struct ecore_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs; +} + +void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, + u32 *num_mac_filters) +{ + struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info; + + *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; +} + +bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return true; + + /* Forbid VF from changing a MAC enforced by PF */ + if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) + return false; + + return false; +} + +bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, + u8 *p_is_forced) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + if (p_is_forced) + *p_is_forced = 1; + } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { + if (p_is_forced) + *p_is_forced = 0; + } else { + return false; + } + + OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); + + return true; +} + +void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, + u16 *p_vxlan_port, + u16 *p_geneve_port) +{ + struct ecore_bulletin_content *p_bulletin; + + p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + + *p_vxlan_port = p_bulletin->vxlan_udp_port; + *p_geneve_port = p_bulletin->geneve_udp_port; +} + +bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return false; + + if (dst_pvid) + *dst_pvid = bulletin->pvid; + + return true; +} + +bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) +{ + return p_hwfn->vf_iov_info->b_pre_fp_hsi; +} + +void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, u16 *fw_rev, + u16 *fw_eng) +{ + struct pf_vf_pfdev_info *info; + + info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; + + *fw_major = info->fw_major; + *fw_minor = info->fw_minor; + *fw_rev = info->fw_rev; + *fw_eng = info->fw_eng; +} + +#ifdef CONFIG_ECORE_SW_CHANNEL +void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw) +{ + p_hwfn->vf_iov_info->b_hw_channel = b_is_hw; +} +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h new file mode 100644 index 000000000..f027eba3e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_VF_H__ +#define __ECORE_VF_H__ + +#include "ecore_status.h" +#include "ecore_vf_api.h" +#include "ecore_l2_api.h" +#include "ecore_vfpf_if.h" +#include "ecore_dev_api.h" + +/* Default number of CIDs [total of both Rx and Tx] to be requested + * by default. + */ +#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32) + +/* This data is held in the ecore_hwfn structure for VFs only. */ +struct ecore_vf_iov { + union vfpf_tlvs *vf2pf_request; + dma_addr_t vf2pf_request_phys; + union pfvf_tlvs *pf2vf_reply; + dma_addr_t pf2vf_reply_phys; + + /* Should be taken whenever the mailbox buffers are accessed */ + osal_mutex_t mutex; + u8 *offset; + + /* Bulletin Board */ + struct ecore_bulletin bulletin; + struct ecore_bulletin_content bulletin_shadow; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; + + /* In case PF originates prior to the fp-hsi version comparison, + * this has to be propagated as it affects the fastpath. + */ + bool b_pre_fp_hsi; + + /* Current day VFs are passing the SBs physical address on vport + * start, and as they lack an IGU mapping they need to store the + * addresses of previously registered SBs. + * Even if we were to change configuration flow, due to backward + * compatibility [with older PFs] we'd still need to store these. + */ + struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF]; + +#ifdef CONFIG_ECORE_SW_CHANNEL + /* Would be set if the VF is to try communicating with it PF + * using a hw channel. + */ + bool b_hw_channel; +#endif + + /* Determines whether VF utilizes doorbells via limited register + * bar or via the doorbell bar. + */ + bool b_doorbell_bar; + + /* retry count for VF acquire on channel timeout */ + u8 acquire_retry_cnt; +}; + +/** + * @brief VF - Get coalesce per VF's relative queue. + * + * @param p_hwfn + * @param p_coal - coalesce value in micro second for VF queues. + * @param p_cid - queue cid + * + **/ +enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, + u16 *p_coal, + struct ecore_queue_cid *p_cid); + +enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn); +/** + * @brief VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the configuration. + * + * @param p_hwfn + * @param rx_coal - coalesce value in micro second for rx queue + * @param tx_coal - coalesce value in micro second for tx queue + * @param p_cid - queue cid + * + **/ +enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, + struct ecore_queue_cid *p_cid); + +#ifdef CONFIG_ECORE_SRIOV +/** + * @brief hw preparation for VF + * sends ACQUIRE message + * + * @param p_hwfn + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn, + struct ecore_hw_prepare_params *p_params); + +/** + * @brief VF - start the RX Queue by sending a message to the PF + * + * @param p_hwfn + * @param p_cid - Only relative fields are relevant + * @param bd_max_bytes - maximum number of bytes per bd + * @param bd_chain_phys_addr - physical address of bd chain + * @param cqe_pbl_addr - physical address of pbl + * @param cqe_pbl_size - pbl size + * @param pp_prod - pointer to the producer to be + * used in fasthpath + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM **pp_prod); + +/** + * @brief VF - start the TX queue by sending a message to the + * PF. + * + * @param p_hwfn + * @param p_cid + * @param bd_chain_phys_addr - physical address of tx chain + * @param pp_doorbell - pointer to address to which to + * write the doorbell too.. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, + void OSAL_IOMEM **pp_doorbell); + +/** + * @brief VF - stop the RX queue by sending a message to the PF + * + * @param p_hwfn + * @param p_cid + * @param cqe_completion + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid, + bool cqe_completion); + +/** + * @brief VF - stop the TX queue by sending a message to the PF + * + * @param p_hwfn + * @param p_cid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid *p_cid); + +/* TODO - fix all the !SRIOV prototypes */ + +#ifndef LINUX_REMOVE +/** + * @brief VF - update the RX queue by sending a message to the + * PF + * + * @param p_hwfn + * @param pp_cid - list of queue-cids which we want to update + * @param num_rxqs + * @param comp_cqe_flg + * @param comp_event_flg + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, + struct ecore_queue_cid **pp_cid, + u8 num_rxqs, + u8 comp_cqe_flg, + u8 comp_event_flg); +#endif + +/** + * @brief VF - send a vport update command + * + * @param p_hwfn + * @param params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params); + +/** + * @brief VF - send a close message to PF + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn); + +/** + * @brief VF - free vf`s memories + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given + * sb_id. For VFs igu sbs don't have to be contiguous + * + * @param p_hwfn + * @param sb_id + * + * @return INLINE u16 + */ +u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, + u16 sb_id); + +/** + * @brief Stores [or removes] a configured sb_info. + * + * @param p_hwfn + * @param sb_id - zero-based SB index [for fastpath] + * @param sb_info - may be OSAL_NULL [during removal]. + */ +void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, + u16 sb_id, struct ecore_sb_info *p_sb); + +/** + * @brief ecore_vf_pf_vport_start - perform vport start for VF. + * + * @param p_hwfn + * @param vport_id + * @param mtu + * @param inner_vlan_removal + * @param tpa_mode + * @param max_buffers_per_cqe, + * @param only_untagged - default behavior regarding vlan acceptance + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_vport_start( + struct ecore_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum ecore_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, + u8 only_untagged); + +/** + * @brief ecore_vf_pf_vport_stop - stop the VF's vport + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn); + +enum _ecore_status_t ecore_vf_pf_filter_ucast( + struct ecore_hwfn *p_hwfn, + struct ecore_filter_ucast *p_param); + +void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast *p_filter_cmd); + +/** + * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn); + +/** + * @brief - return the link params in a given bulletin board + * + * @param p_params - pointer to a struct to fill with link params + * @param p_bulletin + */ +void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, + struct ecore_bulletin_content *p_bulletin); + +/** + * @brief - return the link state in a given bulletin board + * + * @param p_link - pointer to a struct to fill with link state + * @param p_bulletin + */ +void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, + struct ecore_bulletin_content *p_bulletin); + +/** + * @brief - return the link capabilities in a given bulletin board + * + * @param p_link - pointer to a struct to fill with link capabilities + * @param p_bulletin + */ +void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, + struct ecore_bulletin_content *p_bulletin); + +enum _ecore_status_t +ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_tunn); + +void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun); + +u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, + enum BAR_ID bar_id); + +/** + * @brief - ecore_vf_pf_update_mtu Update MTU for VF. + * + * @param p_hwfn + * @param - mtu + */ +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu); +#endif +#endif /* __ECORE_VF_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h new file mode 100644 index 000000000..43951a9a3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vf_api.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_VF_API_H__ +#define __ECORE_VF_API_H__ + +#include "ecore_sp_api.h" +#include "ecore_mcp_api.h" + +#ifdef CONFIG_ECORE_SRIOV + +#define ECORE_VF_ACQUIRE_THRESH 3 + +/** + * @brief Read the VF bulletin and act on it if needed + * + * @param p_hwfn + * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise. + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, + u8 *p_change); + +/** + * @brief Get link parameters for VF from ecore + * + * @param p_hwfn + * @param params - the link params structure to be filled for the VF + */ +void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *params); + +/** + * @brief Get link state for VF from ecore + * + * @param p_hwfn + * @param link - the link state structure to be filled for the VF + */ +void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *link); + +/** + * @brief Get link capabilities for VF from ecore + * + * @param p_hwfn + * @param p_link_caps - the link capabilities structure to be filled for the VF + */ +void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps); + +/** + * @brief Get number of Rx queues allocated for VF by ecore + * + * @param p_hwfn + * @param num_rxqs - allocated RX queues + */ +void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, + u8 *num_rxqs); + +/** + * @brief Get number of Rx queues allocated for VF by ecore + * + * @param p_hwfn + * @param num_txqs - allocated RX queues + */ +void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, + u8 *num_txqs); + +/** + * @brief Get port mac address for VF + * + * @param p_hwfn + * @param port_mac - destination location for port mac + */ +void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, + u8 *port_mac); + +/** + * @brief Get number of VLAN filters allocated for VF by ecore + * + * @param p_hwfn + * @param num_rxqs - allocated VLAN filters + */ +void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, + u8 *num_vlan_filters); + +void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn, + u32 *num_sbs); + +/** + * @brief Get number of MAC filters allocated for VF by ecore + * + * @param p_hwfn + * @param num_rxqs - allocated MAC filters + */ +void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, + u32 *num_mac_filters); + +/** + * @brief Check if VF can set a MAC address + * + * @param p_hwfn + * @param mac + * + * @return bool + */ +bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac); + +#ifndef LINUX_REMOVE +/** + * @brief Copy forced MAC address from bulletin board + * + * @param hwfn + * @param dst_mac + * @param p_is_forced - out param which indicate in case mac + * exist if it forced or not. + * + * @return bool - return true if mac exist and false if + * not. + */ +bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, + u8 *p_is_forced); + +/** + * @brief Check if force vlan is set and copy the forced vlan + * from bulletin board + * + * @param hwfn + * @param dst_pvid + * @return bool + */ +bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid); + +/** + * @brief Check if VF is based on PF whose driver is pre-fp-hsi version; + * This affects the fastpath implementation of the driver. + * + * @param p_hwfn + * + * @return bool - true iff PF is pre-fp-hsi version. + */ +bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn); + +#endif + +/** + * @brief Set firmware version information in dev_info from VFs acquire + * response tlv + * + * @param p_hwfn + * @param fw_major + * @param fw_minor + * @param fw_rev + * @param fw_eng + */ +void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, + u16 *fw_major, + u16 *fw_minor, + u16 *fw_rev, + u16 *fw_eng); +void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, + u16 *p_vxlan_port, u16 *p_geneve_port); + +#ifdef CONFIG_ECORE_SW_CHANNEL +/** + * @brief set the VF to use a SW/HW channel when communicating with PF. + * NOTICE: today the likely first place to call this from VF + * would be OSAL_VF_FILL_ACQUIRE_RESC_REQ(); Might want to consider + * something a bit more appropriate. + * + * @param p_hwfn + * @param b_is_hw - true iff VF is to use a HW-channel + */ +void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw); +#endif +#endif +#endif diff --git a/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h b/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h new file mode 100644 index 000000000..f92dc428a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/ecore_vfpf_if.h @@ -0,0 +1,744 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ECORE_VF_PF_IF_H__ +#define __ECORE_VF_PF_IF_H__ + +/* @@@ TBD MichalK this should be HSI? */ +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */ + +/*********************************************** + * + * Common definitions for all HVs + * + **/ +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; /* No limit so superfluous */ + u8 num_cids; + u8 padding; +}; + +struct hw_sb_info { + u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */ + u8 sb_qid; /* used to update DHC for sb */ + u8 padding[5]; +}; + +/*********************************************** + * + * HW VF-PF channel definitions + * + * A.K.A VF-PF mailbox + * + **/ +#define TLV_BUFFER_SIZE 1024 + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 padding; + u64 reply_address; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_def_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +/* Acquire */ +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { +#ifndef LINUX_REMOVE + /* First bit was used on 8.7.x and 8.8.x versions, which had different + * FWs used but with the same faspath HSI. As this was prior to the + * fastpath versioning, wanted to have ability to override fw matching + * and allow them to interact. + */ +#endif +/* VF pre-FP hsi version */ +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) +#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + + /* A requirement for supporting multi-Tx queues on a single queue-zone, + * VF would pass qids as additional information whenever passing queue + * references. + * TODO - due to the CID limitations in Bar0, VFs currently don't pass + * this, and use the legacy CID scheme. + */ +#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2) + + /* The VF is using the physical bar. While this is mostly internal + * to the VF, might affect the number of CIDs supported assuming + * QUEUE_QIDS is set. + */ +#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR (1 << 3) + u64 capabilities; + u8 fw_major; + u8 fw_minor; + u8 fw_revision; + u8 fw_engineering; + u32 driver_version; + u16 opaque_fid; /* ME register value */ + u8 os_type; /* VFPF_ACQUIRE_OS_* value */ + u8 eth_fp_hsi_major; + u8 eth_fp_hsi_minor; + u8 padding[3]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + u64 bulletin_addr; + u32 bulletin_size; + u32 padding; +}; + +/* receive side scaling tlv */ +struct vfpf_vport_update_rss_tlv { + struct channel_tlv tl; + + u8 update_rss_flags; + #define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0) + #define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1) + #define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2) + #define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3) + + u8 rss_enable; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY_SIZE]; +}; + +struct pfvf_storm_stats { + u32 address; + u32 len; +}; + +struct pfvf_stats_info { + struct pfvf_storm_stats mstats; + struct pfvf_storm_stats pstats; + struct pfvf_storm_stats tstats; + struct pfvf_storm_stats ustats; +}; + +/* acquire response tlv - carries the allocated resources */ +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + + struct pf_vf_pfdev_info { + u32 chip_num; + u32 mfw_ver; + + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + u64 capabilities; +#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0) +#define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */ +/* There are old PF versions where the PF might mistakenly override the sanity + * mechanism [version-based] and allow a VF that can't be supported to pass + * the acquisition phase. + * To overcome this, PFs now indicate that they're past that point and the new + * VFs would fail probe on the older PFs that fail to do so. + */ +#ifndef LINUX_REMOVE +/* Said bug was in quest/serpens; Can't be certain no official release included + * the bug since the fix arrived very late in the programs. + */ +#endif +#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2) + + /* PF expects queues to be received with additional qids */ +#define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3) + + u16 db_size; + u8 indices_per_sb; + u8 os_type; + + /* These should match the PF's ecore_dev values */ + u16 chip_rev; + u8 dev_type; + + /* Doorbell bar size configured in HW: log(size) or 0 */ + u8 bar_size; + + struct pfvf_stats_info stats_info; + + u8 port_mac[ETH_ALEN]; + + /* It's possible PF had to configure an older fastpath HSI + * [in case VF is newer than PF]. This is communicated back + * to the VF. It can also be used in case of error due to + * non-matching versions to shed light in VF about failure. + */ + u8 major_fp_hsi; + u8 minor_fp_hsi; + } pfdev_info; + + struct pf_vf_resc { + /* in case of status NO_RESOURCE in message hdr, pf will fill + * this struct with suggested amount of resources for next + * acquire request + */ + #define PFVF_MAX_QUEUES_PER_VF 16 + #define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 cid[PFVF_MAX_QUEUES_PER_VF]; + + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 num_cids; + u8 padding; + } resc; + + u32 bulletin_size; + u32 padding; +}; + +struct pfvf_start_queue_resp_tlv { + struct pfvf_tlv hdr; + u32 offset; /* offset to consumer/producer of queue */ + u8 padding[4]; +}; + +/* Extended queue information - additional index for reference inside qzone. + * If commmunicated between VF/PF, each TLV relating to queues should be + * extended by one such [or have a future base TLV that already contains info]. + */ +struct vfpf_qid_tlv { + struct channel_tlv tl; + u8 qid; + u8 padding[3]; +}; + +/* Soft FLR req */ +struct vfpf_soft_flr_tlv { + struct vfpf_first_tlv first_tlv; + u32 reserved1; + u32 reserved2; +}; + +/* Setup Queue */ +struct vfpf_start_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 rxq_addr; + u64 deprecated_sge_addr; + u64 cqe_pbl_addr; + + u16 cqe_pbl_size; + u16 hw_sb; + u16 rx_qid; + u16 hc_rate; /* desired interrupts per sec. */ + + u16 bd_max_bytes; + u16 stat_id; + u8 sb_index; + u8 padding[3]; + +}; + +struct vfpf_start_txq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 pbl_addr; + u16 pbl_size; + u16 stat_id; + u16 tx_qid; + u16 hw_sb; + + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 hc_rate; /* desired interrupts per sec. */ + u8 sb_index; + u8 padding[3]; +}; + +/* Stop RX Queue */ +struct vfpf_stop_rxqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 rx_qid; + + /* While the API supports multiple Rx-queues on a single TLV + * message, in practice older VFs always used it as one [ecore]. + * And there are PFs [starting with the CHANNEL_TLV_QID] which + * would start assuming this is always a '1'. So in practice this + * field should be considered deprecated and *Always* set to '1'. + */ + u8 num_rxqs; + + u8 cqe_completion; + u8 padding[4]; +}; + +/* Stop TX Queues */ +struct vfpf_stop_txqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 tx_qid; + + /* While the API supports multiple Tx-queues on a single TLV + * message, in practice older VFs always used it as one [ecore]. + * And there are PFs [starting with the CHANNEL_TLV_QID] which + * would start assuming this is always a '1'. So in practice this + * field should be considered deprecated and *Always* set to '1'. + */ + u8 num_txqs; + u8 padding[5]; +}; + +struct vfpf_update_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; + + u16 rx_qid; + u8 num_rxqs; + u8 flags; + #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0) + #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1) + #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2) + + u8 padding[4]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; + #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 + #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 + #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + + u8 mac[ETH_ALEN]; + u16 vlan_tag; + + u8 padding[4]; +}; + +/* Start a vport */ +struct vfpf_vport_start_tlv { + struct vfpf_first_tlv first_tlv; + + u64 sb_addr[PFVF_MAX_SBS_PER_VF]; + + u32 tpa_mode; + u16 dep1; + u16 mtu; + + u8 vport_id; + u8 inner_vlan_removal; + + u8 only_untagged; + u8 max_buffers_per_cqe; + + u8 padding[4]; +}; + +/* Extended tlvs - need to add rss, mcast, accept mode tlvs */ +struct vfpf_vport_update_activate_tlv { + struct channel_tlv tl; + u8 update_rx; + u8 update_tx; + u8 active_rx; + u8 active_tx; +}; + +struct vfpf_vport_update_tx_switch_tlv { + struct channel_tlv tl; + u8 tx_switching; + u8 padding[3]; +}; + +struct vfpf_vport_update_vlan_strip_tlv { + struct channel_tlv tl; + u8 remove_vlan; + u8 padding[3]; +}; + +struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + + /* This was a mistake; There are only 256 approx bins, + * and in HSI they're divided into 32-bit values. + * As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; +}; + +struct vfpf_vport_update_accept_param_tlv { + struct channel_tlv tl; + u8 update_rx_mode; + u8 update_tx_mode; + u8 rx_accept_filter; + u8 tx_accept_filter; +}; + +struct vfpf_vport_update_accept_any_vlan_tlv { + struct channel_tlv tl; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + + u8 padding[2]; +}; + +struct vfpf_vport_update_sge_tpa_tlv { + struct channel_tlv tl; + + u16 sge_tpa_flags; + #define VFPF_TPA_IPV4_EN_FLAG (1 << 0) + #define VFPF_TPA_IPV6_EN_FLAG (1 << 1) + #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2) + #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3) + #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4) + #define VFPF_TPA_TUNN_IPV4_EN_FLAG (1 << 5) + #define VFPF_TPA_TUNN_IPV6_EN_FLAG (1 << 6) + + u8 update_sge_tpa_flags; + #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0) + #define VFPF_UPDATE_TPA_EN_FLAG (1 << 1) + #define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2) + + u8 max_buffers_per_cqe; + + u16 deprecated_sge_buff_size; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + + u8 tpa_max_aggs_num; + u8 padding[7]; + +}; + +/* Primary tlv as a header for various extended tlvs for + * various functionalities in vport update ramrod. + */ +struct vfpf_vport_update_tlv { + struct vfpf_first_tlv first_tlv; +}; + +struct vfpf_ucast_filter_tlv { + struct vfpf_first_tlv first_tlv; + + u8 opcode; + u8 type; + + u8 mac[ETH_ALEN]; + + u16 vlan; + u16 padding[3]; +}; + +/* tunnel update param tlv */ +struct vfpf_update_tunn_param_tlv { + struct vfpf_first_tlv first_tlv; + + u8 tun_mode_update_mask; + u8 tunn_mode; + u8 update_tun_cls; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u8 update_geneve_port; + u8 update_vxlan_port; + u16 geneve_port; + u16 vxlan_port; + u8 padding[2]; +}; + +struct pfvf_update_tunn_param_tlv { + struct pfvf_tlv hdr; + + u16 tunn_feature_mask; + u8 vxlan_mode; + u8 l2geneve_mode; + u8 ipgeneve_mode; + u8 l2gre_mode; + u8 ipgre_mode; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u16 vxlan_udp_port; + u16 geneve_udp_port; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +struct vfpf_update_coalesce { + struct vfpf_first_tlv first_tlv; + u16 rx_coal; + u16 tx_coal; + u16 qid; + u8 padding[2]; +}; + +struct vfpf_read_coal_req_tlv { + struct vfpf_first_tlv first_tlv; + u16 qid; + u8 is_rx; + u8 padding[5]; +}; + +struct pfvf_read_coal_resp_tlv { + struct pfvf_tlv hdr; + u16 coal; + u8 padding[6]; +}; + +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + +struct vfpf_update_mtu_tlv { + struct vfpf_first_tlv first_tlv; + u16 mtu; + u8 padding[6]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_start_rxq_tlv start_rxq; + struct vfpf_start_txq_tlv start_txq; + struct vfpf_stop_rxqs_tlv stop_rxqs; + struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_update_rxq_tlv update_rxq; + struct vfpf_vport_start_tlv start_vport; + struct vfpf_vport_update_tlv vport_update; + struct vfpf_ucast_filter_tlv ucast_filter; + struct vfpf_update_tunn_param_tlv tunn_param_update; + struct vfpf_update_coalesce update_coalesce; + struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; + struct vfpf_update_mtu_tlv update_mtu; + struct vfpf_soft_flr_tlv soft_flr; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_def_resp_tlv default_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct tlv_buffer_size tlv_buf_size; + struct pfvf_start_queue_resp_tlv queue_start; + struct pfvf_update_tunn_param_tlv tunn_param_resp; + struct pfvf_read_coal_resp_tlv read_coal_resp; +}; + +/* This is a structure which is allocated in the VF, which the PF may update + * when it deems it necessary to do so. The bulletin board is sampled + * periodically by the VF. A copy per VF is maintained in the PF (to prevent + * loss of data upon multiple updates (or the need for read modify write)). + */ +enum ecore_bulletin_bit { + /* Alert the VF that a forced MAC was set by the PF */ + MAC_ADDR_FORCED = 0, + + /* The VF should not access the vfpf channel */ + VFPF_CHANNEL_INVALID = 1, + + /* Alert the VF that a forced VLAN was set by the PF */ + VLAN_ADDR_FORCED = 2, + + /* Indicate that `default_only_untagged' contains actual data */ + VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, + VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + + /* Alert the VF that suggested mac was sent by the PF. + * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set + */ + VFPF_BULLETIN_MAC_ADDR = 5 +}; + +struct ecore_bulletin_content { + /* crc of structure to ensure is not in mid-update */ + u32 crc; + + u32 version; + + /* bitmap indicating which fields hold valid values */ + u64 valid_bitmap; + + /* used for MAC_ADDR or MAC_ADDR_FORCED */ + u8 mac[ETH_ALEN]; + + /* If valid, 1 => only untagged Rx if no vlan is configured */ + u8 default_only_untagged; + u8 padding; + + /* The following is a 'copy' of ecore_mcp_link_state, + * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's + * possible the structs will increase further along the road we cannot + * have it here; Instead we need to have all of its fields. + */ + u8 req_autoneg; + u8 req_autoneg_pause; + u8 req_forced_rx; + u8 req_forced_tx; + u8 padding2[4]; + + u32 req_adv_speed; + u32 req_forced_speed; + u32 req_loopback; + u32 padding3; + + u8 link_up; + u8 full_duplex; + u8 autoneg; + u8 autoneg_complete; + u8 parallel_detection; + u8 pfc_enabled; + u8 partner_tx_flow_ctrl_en; + u8 partner_rx_flow_ctrl_en; + + u8 partner_adv_pause; + u8 sfp_tx_fault; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 padding4[2]; + + u32 speed; + u32 partner_adv_speed; + + u32 capability_speed; + + /* Forced vlan */ + u16 pvid; + u16 padding5; +}; + +struct ecore_bulletin { + dma_addr_t phys; + struct ecore_bulletin_content *p_virt; + u32 size; +}; + +enum { +/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ + + CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_START_RXQ, + CHANNEL_TLV_START_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_UPDATE_TUNN_PARAM, + CHANNEL_TLV_COALESCE_UPDATE, + CHANNEL_TLV_QID, + CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + CHANNEL_TLV_UPDATE_MTU, + CHANNEL_TLV_RDMA_ACQUIRE, + CHANNEL_TLV_RDMA_START, + CHANNEL_TLV_RDMA_STOP, + CHANNEL_TLV_RDMA_ADD_USER, + CHANNEL_TLV_RDMA_REMOVE_USER, + CHANNEL_TLV_RDMA_QUERY_COUNTERS, + CHANNEL_TLV_RDMA_ALLOC_TID, + CHANNEL_TLV_RDMA_REGISTER_TID, + CHANNEL_TLV_RDMA_DEREGISTER_TID, + CHANNEL_TLV_RDMA_FREE_TID, + CHANNEL_TLV_RDMA_CREATE_CQ, + CHANNEL_TLV_RDMA_RESIZE_CQ, + CHANNEL_TLV_RDMA_DESTROY_CQ, + CHANNEL_TLV_RDMA_CREATE_QP, + CHANNEL_TLV_RDMA_MODIFY_QP, + CHANNEL_TLV_RDMA_QUERY_QP, + CHANNEL_TLV_RDMA_DESTROY_QP, + CHANNEL_TLV_RDMA_QUERY_PORT, + CHANNEL_TLV_RDMA_QUERY_DEVICE, + CHANNEL_TLV_RDMA_IWARP_CONNECT, + CHANNEL_TLV_RDMA_IWARP_ACCEPT, + CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN, + CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN, + CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN, + CHANNEL_TLV_RDMA_IWARP_REJECT, + CHANNEL_TLV_RDMA_IWARP_SEND_RTR, + CHANNEL_TLV_ESTABLISH_LL2_CONN, + CHANNEL_TLV_TERMINATE_LL2_CONN, + CHANNEL_TLV_ASYNC_EVENT, + CHANNEL_TLV_RDMA_CREATE_SRQ, + CHANNEL_TLV_RDMA_MODIFY_SRQ, + CHANNEL_TLV_RDMA_DESTROY_SRQ, + CHANNEL_TLV_SOFT_FLR, + CHANNEL_TLV_MAX, + + /* Required for iterating over vport-update tlvs. + * Will break in case non-sequential vport-update tlvs. + */ + CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, + +/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ +}; +extern const char *qede_ecore_channel_tlvs_string[]; + +#endif /* __ECORE_VF_PF_IF_H__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/eth_common.h b/src/spdk/dpdk/drivers/net/qede/base/eth_common.h new file mode 100644 index 000000000..4611d86d9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/eth_common.h @@ -0,0 +1,746 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ +/********************/ +/* ETH FW CONSTANTS */ +/********************/ + +/* FP HSI version. FP HSI is compatible if (fwVer.major == drvVer.major && + * fwVer.minor >= drvVer.minor) + */ +/* ETH FP HSI Major version */ +#define ETH_HSI_VER_MAJOR 3 +/* ETH FP HSI Minor version */ +#define ETH_HSI_VER_MINOR 11 /* ETH FP HSI Minor version */ + +/* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver + * is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling + * offload is not supported. + */ +#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 + +/* Maximum number of pinned L2 connections (CIDs)*/ +#define ETH_PINNED_CONN_MAX_NUM 32 + +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP + * header (relevant to LSO for tunneled packet): + */ +/* Offset is limited to 253 bytes (inclusive). */ +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +/* Offset is limited to 251 bytes (inclusive). */ +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4 +/* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) - + * (VLAN-TAG + CRC + IPG + PREAMBLE) + */ +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +/* Number of BDs to consider for LSO sliding window restriction is + * (ETH_TX_LSO_WINDOW_BDS_NUM - hdr_nbd) + */ +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +/* Minimum data length (in bytes) in LSO sliding window */ +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +/* Maximum LSO packet TCP payload length (in bytes) */ +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +/* Number of same-as-last resources in tx switching */ +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +/* Value for a connection for which same as last feature is disabled */ +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF + +/* Maximum number of statistics counters */ +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +/* Maximum number of statistics counters when doubled VF zone used */ +#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) +/* Maximum number of statistics counters when quad VF zone used */ +#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ + (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) + +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 +/* Minimum number of free BDs in RX ring, that guarantee receiving of at least + * one RX packet. + */ +#define ETH_RX_BD_THRESHOLD 16 + +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* approx. multicast constants */ +/* CRC seed for multicast bin calculation */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +/* number of RSS indirection table entries, per Vport) */ +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +/* Length of RSS key (in regs) */ +#define ETH_RSS_KEY_SIZE_REGS 10 +/* number of available RSS engines in AH */ +#define ETH_RSS_ENGINE_NUM_K2 207 +/* number of available RSS engines in BB */ +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +/* Maximum number of open TPA aggregations */ +#define ETH_TPA_MAX_AGGS_NUM 64 +/* TPA-start CQE additional BD list length. Used for backward compatible */ +#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2 +/* Maximum number of buffers, reported by TPA-continue CQE */ +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +/* Maximum number of buffers, reported by TPA-end CQE */ +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 + +/* Control frame check constants */ +/* Number of etherType values configured by driver for control frame check */ +#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 + +/* GFS constants */ +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ + + + +/* + * Destination port mode + */ +enum dst_port_mode { + DST_PORT_PHY /* Send to physical port. */, + DST_PORT_LOOPBACK /* Send to loopback port. */, + DST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */, + DST_PORT_DROP /* Drop the packet in PBF. */, + MAX_DST_PORT_MODE +}; + + +/* + * Ethernet address type + */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + + +struct eth_tx_1st_bd_flags { + u8 bitfields; +/* Set to 1 in the first BD. (for debug) */ +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +/* Do not allow additional VLAN manipulations on this packet. */ +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */ +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +/* Recalculate TCP/UDP checksum. + * For tunneled packet - relevant to inner header. + */ +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +/* If set, insert VLAN tag from vlan field to the packet. + * For tunneled packet - relevant to outer header. + */ +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of + * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively) + * bytes, inclusive. + */ +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */ +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */ +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type). In case of + * GRE tunnel, this flag means GRE CSO, and in this case GRE checksum field + * Must be present. + */ +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* + * The parsing information data for the first tx bd of a given packet. + */ +struct eth_tx_data_1st_bd { +/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */ + __le16 vlan; +/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least + * 3 in LSO (or Tunnel with IPv6+ext) packet. + */ + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 bitfields; +/* Indicates a tunneled packet. Must be set for encapsulated packet. */ +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +/* Total packet length - must be filled for non-LSO packets. */ +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +}; + +/* + * The parsing information data for the second tx bd of a given packet. + */ +struct eth_tx_data_2nd_bd { +/* For tunnel with IPv6+ext - Tunnel header IP datagram length (in BYTEs) */ + __le16 tunn_ip_size; + __le16 bitfields1; +/* For Tunnel header with IPv6 ext. - Inner L2 Header Size (in 2-byte WORDs) */ +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +/* For Tunnel header with IPv6 ext. - Inner L2 Header MAC DA Type + * (use enum eth_addr_type) + */ +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +/* Destination port mode. (use enum dest_port_mode) */ +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +/* Should be 0 in all the BDs, except the first one. (for debug) */ +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +/* For Tunnel header with IPv6 ext. - Tunnel Type (use enum eth_tx_tunn_type) */ +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */ +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension. + * Otherwise set to 1 if the header is IPv6 with extension. + */ +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD + * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext) + */ +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with + * inner or outer Ipv6+ext) and l4_csum is set) + */ +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +/* The pseudo header checksum type in the L4 checksum field. Required when + * IPv6+ext and l4_csum is set. (use enum eth_l4_pseudo_checksum_mode) + */ +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +/* For inner/outer header IPv6+ext - (inner) L4 header offset (in 2-byte WORDs). + * For regular packet - offset from the beginning of the packet. For tunneled + * packet - offset from the beginning of the inner header + */ +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +}; + +/* + * Firmware data for L2-EDPM packet. + */ +struct eth_edpm_fw_data { +/* Parsing information data from the 1st BD. */ + struct eth_tx_data_1st_bd data_1st_bd; +/* Parsing information data from the 2nd BD. */ + struct eth_tx_data_2nd_bd data_2nd_bd; + __le32 reserved; +}; + + +/* + * FW debug. + */ +struct eth_fast_path_cqe_fw_debug { + __le16 reserved2 /* FW reserved. */; +}; + + +/* + * tunneling parsing flags + */ +struct eth_tunnel_parsing_flags { + u8 flags; +/* 0 - no tunneling, 1 - GENEVE, 2 - GRE, 3 - VXLAN + * (use enum eth_rx_tunn_type) + */ +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +/* If it s not an encapsulated packet then put 0x0. If it s an encapsulated + * packet but the tenant-id doesn t exist then put 0x0. Else put 0x1 + * + */ +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +/* Type of the next header above the tunneling: 0 - unknown, 1 - L2, 2 - Ipv4, + * 3 - IPv6 (use enum tunnel_next_protocol) + */ +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +/* The result of comparing the DA-ip of the tunnel header. */ +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; + +/* + * PMD flow control bits + */ +struct eth_pmd_flow_flags { + u8 flags; +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */ +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */ +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +}; + +/* + * Regular ETH Rx FP CQE. + */ +struct eth_fast_path_rx_reg_cqe { + u8 type /* CQE type */; + u8 bitfields; +/* Type of calculated RSS hash (use enum rss_hash_type) */ +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +/* Traffic Class */ +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len /* Total packet length (from the parser) */; +/* Parsing and error flags from the parser */ + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag /* 802.1q VLAN tag */; + __le32 rss_hash /* RSS hash result */; + __le16 len_on_first_bd /* Number of bytes placed on first BD */; + u8 placement_offset /* Offset of placement from BD start */; +/* Tunnel Parsing Flags */ + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num /* Number of BDs, used for packet */; + u8 reserved; + __le16 reserved2; +/* aRFS flow ID or Resource ID - Indicates a Vport ID from which packet was + * sent, used when sending from VF to VF Representor. + */ + __le32 flow_id_or_resource_id; + u8 reserved1[7]; + struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; +}; + + +/* + * TPA-continue ETH Rx FP CQE. + */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type /* CQE type */; + u8 tpa_agg_index /* TPA aggregation index */; +/* List of the segment sizes */ + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved; + u8 reserved1 /* FW reserved. */; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */; + u8 reserved3[3]; + struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; +}; + + +/* + * TPA-end ETH Rx FP CQE . + */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type /* CQE type */; + u8 tpa_agg_index /* TPA aggregation index */; + __le16 total_packet_len /* Total aggregated packet length */; + u8 num_of_bds /* Total number of BDs comprising the packet */; +/* Aggregation end reason. Use enum eth_tpa_end_reason */ + u8 end_reason; + __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */; + __le32 ts_delta /* TCP timestamp delta */; +/* List of the segment sizes */ + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */; + __le16 reserved1; + u8 reserved2 /* FW reserved. */; + struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; +}; + + +/* + * TPA-start ETH Rx FP CQE. + */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type /* CQE type */; + u8 bitfields; +/* Type of calculated RSS hash (use enum rss_hash_type) */ +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +/* Traffic Class */ +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len /* Segment length (packetLen from the parser) */; +/* Parsing and error flags from the parser */ + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag /* 802.1q VLAN tag */; + __le32 rss_hash /* RSS hash result */; + __le16 len_on_first_bd /* Number of bytes placed on first BD */; + u8 placement_offset /* Offset of placement from BD start */; +/* Tunnel Parsing Flags */ + struct eth_tunnel_parsing_flags tunnel_pars_flags; + u8 tpa_agg_index /* TPA aggregation index */; + u8 header_len /* Packet L2+L3+L4 header length */; +/* Additional BDs length list. Used for backward compatible. */ + __le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE]; + __le16 reserved2; +/* aRFS or GFS flow ID or Resource ID - Indicates a Vport ID from which packet + * was sent, used when sending from VF to VF Representor + */ + __le32 flow_id_or_resource_id; + u8 reserved[3]; + struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; +}; + + +/* + * The L4 pseudo checksum mode for Ethernet + */ +enum eth_l4_pseudo_checksum_mode { +/* Pseudo Header checksum on packet is calculated with the correct packet length + * field. + */ + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, +/* Pseudo Header checksum on packet is calculated with zero length field. */ + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + + + +struct eth_rx_bd { + struct regpair addr /* single continues buffer */; +}; + + +/* + * regular ETH Rx SP CQE + */ +struct eth_slow_path_rx_cqe { + u8 type /* CQE type */; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; + struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; +}; + +/* + * union for all ETH Rx CQE types + */ +union eth_rx_cqe { +/* Regular FP CQE */ + struct eth_fast_path_rx_reg_cqe fast_path_regular; +/* TPA-start CQE */ + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; +/* TPA-continue CQE */ + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; +/* TPA-end CQE */ + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path /* SP CQE */; +}; + + +/* + * ETH Rx CQE type + */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */, + ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */, + MAX_ETH_RX_CQE_TYPE +}; + + +/* + * Wrapper for PD RX CQE - used in order to cover full cache line when writing + * CQE + */ +struct eth_rx_pmd_cqe { + union eth_rx_cqe cqe /* CQE data itself */; + u8 reserved[ETH_RX_CQE_GAP]; +}; + + +/* + * Eth RX Tunnel Type + */ +enum eth_rx_tunn_type { + ETH_RX_NO_TUNN /* No Tunnel. */, + ETH_RX_TUNN_GENEVE /* GENEVE Tunnel. */, + ETH_RX_TUNN_GRE /* GRE Tunnel. */, + ETH_RX_TUNN_VXLAN /* VXLAN Tunnel. */, + MAX_ETH_RX_TUNN_TYPE +}; + + + +/* + * Aggregation end reason. + */ +enum eth_tpa_end_reason { + ETH_AGG_END_UNUSED, + ETH_AGG_END_SP_UPDATE /* SP configuration update */, +/* Maximum aggregation length or maximum buffer number used. */ + ETH_AGG_END_MAX_LEN, +/* TCP PSH flag or TCP payload length below continue threshold. */ + ETH_AGG_END_LAST_SEG, + ETH_AGG_END_TIMEOUT /* Timeout expiration. */, +/* Packet header not consistency: different IPv4 TOS, TTL or flags, IPv6 TC, + * Hop limit or Flow label, TCP header length or TS options. In GRO different + * TS value, SMAC, DMAC, ackNum, windowSize or VLAN + */ + ETH_AGG_END_NOT_CONSISTENT, +/* Out of order or retransmission packet: sequence, ack or timestamp not + * consistent with previous segment. + */ + ETH_AGG_END_OUT_OF_ORDER, +/* Next segment cant be aggregated due to LLC/SNAP, IP error, IP fragment, IPv4 + * options, IPv6 extension, IP ECN = CE, TCP errors, TCP options, zero TCP + * payload length , TCP flags or not supported tunnel header options. + */ + ETH_AGG_END_NON_TPA_SEG, + MAX_ETH_TPA_END_REASON +}; + + + +/* + * The first tx bd of a given packet + */ +struct eth_tx_1st_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_1st_bd data /* Parsing information data. */; +}; + + + +/* + * The second tx bd of a given packet + */ +struct eth_tx_2nd_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_2nd_bd data /* Parsing information data. */; +}; + + +/* + * The parsing information data for the third tx bd of a given packet. + */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss /* For LSO packet - the MSS in bytes. */; + __le16 bitfields; +/* For LSO with inner/outer IPv6+ext - TCP header length (in 4-byte WORDs) */ +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +/* LSO - number of BDs which contain headers. value should be in range + * (1..ETH_TX_MAX_LSO_HDR_NBD). + */ +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +/* Should be 0 in all the BDs, except the first one. (for debug) */ +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 +/* For tunnel with IPv6+ext - Pointer to tunnel L4 Header (in 2-byte WORDs) */ + u8 tunn_l4_hdr_start_offset_w; +/* For tunnel with IPv6+ext - Total size of Tunnel Header (in 2-byte WORDs) */ + u8 tunn_hdr_size_w; +}; + +/* + * The third tx bd of a given packet + */ +struct eth_tx_3rd_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_3rd_bd data /* Parsing information data. */; +}; + + +/* + * The parsing information data for the forth tx bd of a given packet. + */ +struct eth_tx_data_4th_bd { +/* Destination Vport ID to forward the packet, applicable only when + * tx_dst_port_mode_config == ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD and + * dst_port_mode == DST_PORT_LOOPBACK, used to route the packet from VF + * Representor to VF + */ + u8 dst_vport_id; + u8 reserved4; + __le16 bitfields; +/* if set, dst_vport_id has a valid value and will be used in FW */ +#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1 +#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0 +#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F +#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1 +/* Should be 0 in all the BDs, except the first one. (for debug) */ +#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + +/* + * The forth tx bd of a given packet + */ +struct eth_tx_4th_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_4th_bd data /* Parsing information data. */; +}; + + +/* + * Complementary information for the regular tx bd of a given packet. + */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +/* Should be 0 in all the BDs, except the first one. (for debug) */ +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + +/* + * The common regular TX BD ring element + */ +struct eth_tx_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_bd data /* Complementary information. */; +}; + + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */; +/* The second tx bd of a given packet */ + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */; + struct eth_tx_4th_bd fourth_bd /* The fourth tx bd of a given packet */; + struct eth_tx_bd reg_bd /* The common regular bd */; +}; + + + + + + +/* + * Eth Tx Tunnel Type + */ +enum eth_tx_tunn_type { + ETH_TX_TUNN_GENEVE /* GENEVE Tunnel. */, + ETH_TX_TUNN_TTAG /* T-Tag Tunnel. */, + ETH_TX_TUNN_GRE /* GRE Tunnel. */, + ETH_TX_TUNN_VXLAN /* VXLAN Tunnel. */, + MAX_ETH_TX_TUNN_TYPE +}; + + +/* + * Mstorm Queue Zone + */ +struct mstorm_eth_queue_zone { + struct eth_rx_prod_data rx_producers /* ETH Rx producers data */; + __le32 reserved[3]; +}; + + +/* + * Ystorm Queue Zone + */ +struct xstorm_eth_queue_zone { +/* Tx interrupt coalescing TimeSet */ + struct coalescing_timeset int_coalescing_timeset; + u8 reserved[7]; +}; + + +/* + * ETH doorbell data + */ +struct eth_db_data { + u8 params; +/* destination of doorbell (use enum db_dest) */ +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +/* aggregative command to CM (use enum db_agg_cmd_sel) */ +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */ +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +/* aggregative value selection */ +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 +/* bit for every DQ counter flags in CM context that DQ can increment */ + u8 agg_flags; + __le16 bd_prod; +}; + +#endif /* __ETH_COMMON__ */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h b/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h new file mode 100644 index 000000000..6667c2d7a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/mcp_public.h @@ -0,0 +1,2023 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +/**************************************************************************** + * + * Name: mcp_public.h + * + * Description: MCP public data + * + * Created: 13/01/2013 yanivr + * + ****************************************************************************/ + +#ifndef MCP_PUBLIC_H +#define MCP_PUBLIC_H + +#define VF_MAX_STATIC 192 /* In case of AH */ +#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32) +#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) + +/* Extended array size to support for 240 VFs 8 dwords */ +#define EXT_VF_MAX_STATIC 240 +#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1) +#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * \ + sizeof(u32)) +#define ADDED_VF_BITMAP_SIZE 2 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 /* Global */ +#define MCP_GLOB_PORT_MAX 4 /* Global */ +#define MCP_GLOB_FUNC_MAX 16 /* Global */ + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_OFFSET 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_OFFSET 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ +#define SECTION_OFFSET(_offsize) \ + ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_OFFSET) << 2)) + +/* SECTION_SIZE is calculating the size in bytes out of offsize */ +#define SECTION_SIZE(_offsize) \ + (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_OFFSET) << 2) + +/* SECTION_ADDR returns the GRC addr of a section, given offsize and index + * within section + */ +#define SECTION_ADDR(_offsize, idx) \ + (MCP_REG_SCRATCH + \ + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx)) + +/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. Use + * offsetof, since the OFFSETUP collide with the firmware definition + */ +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + (_pub_base + offsetof(struct mcp_public_data, sections[_section])) +/* PHY configuration */ +struct eth_phy_cfg { +/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */ + u32 speed; +#define ETH_SPEED_AUTONEG 0 +#define ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */ + + u32 pause; /* bitmask */ +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; /* Default should be the speed_cap_mask */ + u32 loopback_mode; +#define ETH_LOOPBACK_NONE (0) +/* Serdes loopback. In AH, it refers to Near End */ +#define ETH_LOOPBACK_INT_PHY (1) +#define ETH_LOOPBACK_EXT_PHY (2) /* External PHY Loopback */ +/* External Loopback (Require loopback plug) */ +#define ETH_LOOPBACK_EXT (3) +#define ETH_LOOPBACK_MAC (4) /* MAC Loopback - not supported */ +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 (5) /* Port to itself */ +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 (6) /* Port to Port */ +#define ETH_LOOPBACK_PCS_AH_ONLY (7) /* PCS loopback (TX to RX) */ +/* Loop RX packet from PCS to TX */ +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY (8) +/* Remote Serdes Loopback (RX to TX) */ +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9) + + u32 eee_cfg; +/* EEE is enabled (configuration). Refer to eee_status->active for negotiated + * status + */ +#define EEE_CFG_EEE_ENABLED (1 << 0) +#define EEE_CFG_TX_LPI (1 << 1) +#define EEE_CFG_ADV_SPEED_1G (1 << 2) +#define EEE_CFG_ADV_SPEED_10G (1 << 3) +#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) +#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) + + u32 link_modes; /* Additional link modes */ +#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX Deprecate */ +}; + +struct port_mf_cfg { + u32 dynamic_cfg; /* device control channel */ +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_OFFSET 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +/* DO NOT add new fields in the middle + * MUST be synced with struct pmm_stats_map + */ +struct eth_stats { + u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/ + u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/ + u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter*/ + u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter*/ + u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/ +/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */ + u64 r1518; + union { + struct { /* bb */ +/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */ + u64 r1522; +/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/ + u64 r2047; +/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/ + u64 r4095; +/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/ + u64 r9216; +/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */ + u64 r16383; + } bb0; + struct { /* ah */ + u64 unused1; +/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/ + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/ + u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/ + u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/ + u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/ + u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/ + u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ + u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/ + u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ + u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ + u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ + u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ + u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */ + u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter*/ + u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter*/ + u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/ +/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */ + u64 t1518; + union { + struct { /* bb */ +/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */ + u64 t2047; +/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */ + u64 t4095; +/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */ + u64 t9216; +/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */ + u64 t16383; + } bb1; + struct { /* ah */ +/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */ + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ + u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ +/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */ + union { + struct { /* bb */ +/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */ + u64 tlpiec; +/* 0x6E (Offset 0x110) Transmit Total Collision Counter */ + u64 tncl; + } bb2; + struct { /* ah */ + u64 unused9; + u64 unused10; + } ah2; + } u2; + u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ + u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ + u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ + u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ +/* 0x22 (Offset 0x138) RX good frame (good CRC, not oversized, no ERROR) */ + u64 rxpok; + u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ + u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ + u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */ + u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */ + u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */ +/* HSI - Cannot add more stats to this struct. If needed, then need to open new + * struct + */ + +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct eth_stats eth; +}; + +/*----+------------------------------------------------------------------------ + * C | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines + * h | rate of | team #1 | team #2 |are used|per path | (paths) + * i | physical | | | | | enabled + * p | ports | | | | | + *====+============+=========+=========+========+==========+=================== + * BB | 1x100G | This is special mode, where there are actually 2 HW func + * BB | 2x10/20Gbps| 0,1 | NA | No | 1 | 1 + * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1 + * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1 + * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2 (2 is optional) + * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2 (2 is optional) + * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2 (2 is optional) + * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1 + * AH | 2x10/20Gbps| 0,1 | NA | NA | 1 | NA + * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA + * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA + * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA + * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA + *====+============+=========+=========+========+==========+=================== + */ + +#define CMT_TEAM0 0 +#define CMT_TEAM1 1 +#define CMT_TEAM_MAX 2 + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM (1 << 0) + +#define PORT_CMT_PORT_ROLE (1 << 1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE (1 << 1) + +#define PORT_CMT_TEAM_MASK (1 << 2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 (1 << 2) +}; + +/************************************** + * LLDP and DCBX HSI structures + **************************************/ +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 /* In dwords. 128 in bytes*/ +#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/ +typedef enum _lldp_agent_e { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +} lldp_agent_e; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_OFFSET 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_OFFSET 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_OFFSET 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_OFFSET 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_OFFSET 31 + /* Holds local Chassis ID TLV header, subtype and 9B of payload. + * If firtst byte is 0, then we will use default chassis ID + */ + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID TLV header, subtype and 9B of payload. + * If firtst byte is 0, then we will use default port ID + */ + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; /* TBD */ + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_OFFSET 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_OFFSET 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_OFFSET 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_OFFSET 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_OFFSET 4 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_OFFSET 8 +/* Entries in tc table are orginized that the left most is pri 0, right most is + * prio 7 + */ + + u32 pri_tc_tbl[1]; +/* Fixed TCP OOO TC usage is deprecated and used only for driver backward + * compatibility + */ +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf +/* Entries in tc table are orginized that the left most is pri 0, right most is + * prio 7 + */ + + u32 tc_bw_tbl[2]; +/* Entries in tc table are orginized that the left most is pri 0, right most is + * prio 7 + */ + + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_OFFSET 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_OFFSET 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_OFFSET 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_OFFSET 16 +}; + + +/* FW structure in BE */ +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_OFFSET 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_OFFSET 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_OFFSET 2 + /* Not in use + #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 + #define DCBX_APP_DEFAULT_PRI_OFFSET 8 + */ +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_OFFSET 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_OFFSET 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_OFFSET 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_OFFSET 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_OFFSET 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_OFFSET 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_OFFSET 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_OFFSET 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_OFFSET 17 + + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_OFFSET 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_DYNAMIC \ + (DCBX_CONFIG_VERSION_IEEE | DCBX_CONFIG_VERSION_CEE) +#define DCBX_CONFIG_VERSION_STATIC 4 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + /* + #define DCBX_CONFIG_VERSION_MASK 0x00000007 + #define DCBX_CONFIG_VERSION_OFFSET 0 + #define DCBX_CONFIG_VERSION_DISABLED 0 + #define DCBX_CONFIG_VERSION_IEEE 1 + #define DCBX_CONFIG_VERSION_CEE 2 + #define DCBX_CONFIG_VERSION_STATIC 4 + */ + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u32 flags; +#define LLDP_SYSTEM_TLV_VALID_MASK 0x1 +#define LLDP_SYSTEM_TLV_VALID_OFFSET 0 +/* This bit defines if system TLVs are instead of mandatory TLVS or in + * addition to them. Set 1 for replacing mandatory TLVs + */ +#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2 +#define LLDP_SYSTEM_TLV_MANDATORY_OFFSET 1 +#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000 +#define LLDP_SYSTEM_TLV_LENGTH_OFFSET 16 + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +/* Since this struct is written by MFW and read by driver need to add + * sequence guards (as in case of DCBX MIB) + */ +struct lldp_received_tlvs_s { + u32 prefix_seq_num; + u32 length; + u32 tlvs_buffer[MAX_TLV_BUFFER]; + u32 suffix_seq_num; +}; + +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_OFFSET 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; +}; + +/************************************** + * Attributes commands + **************************************/ + +enum _attribute_commands_e { + ATTRIBUTE_CMD_READ = 0, + ATTRIBUTE_CMD_WRITE, + ATTRIBUTE_CMD_READ_CLEAR, + ATTRIBUTE_CMD_CLEAR, + ATTRIBUTE_NUM_OF_COMMANDS +}; + +/**************************************/ +/* */ +/* P U B L I C G L O B A L */ +/* */ +/**************************************/ +struct public_global { + u32 max_path; /* 32bit is wasty, but this will be used often */ +/* (Global) 32bit is wasty, but this will be used often */ + u32 max_ports; +#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */ +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; +/* Temperature in Celcius (-255C / +255C), measured every second. */ + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; +#define MDUMP_REASON_INTERNAL_ERROR (1 << 0) +#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1) +#define MDUMP_REASON_DUMP_AGED (1 << 2) + u32 ext_phy_upgrade_fw; +#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff) +#define EXT_PHY_FW_UPGRADE_STATUS_OFFSET (0) +#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1) +#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2) +#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3) +#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000) +#define EXT_PHY_FW_UPGRADE_TYPE_OFFSET (16) +}; + +/**************************************/ +/* */ +/* P U B L I C P A T H */ +/* */ +/**************************************/ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 128 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ +#define ACCUM_ACK_PF_BASE 0 +#define ACCUM_ACK_PF_SHIFT 0 + +#define ACCUM_ACK_VF_BASE 8 +#define ACCUM_ACK_VF_SHIFT 3 + +#define ACCUM_ACK_IOV_DIS_BASE 256 +#define ACCUM_ACK_IOV_DIS_SHIFT 8 + +}; + +struct public_path { + struct fw_flr_mb flr_mb; + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */ + +/* Reset on mcp reset, and incremented for eveny process kill event. */ + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_OFFSET 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_OFFSET 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) + /*Added to support E5 240 VFs*/ + u32 mcp_vf_disabled2[ADDED_VF_BITMAP_SIZE]; +}; + +/**************************************/ +/* */ +/* P U B L I C P O R T */ +/* */ +/**************************************/ +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ + +struct public_port { + u32 validity_map; /* 0x0 (4*2 = 0x8) */ + + /* validity bits */ +#define MCP_VALIDITY_PCI_CFG 0x00100000 +#define MCP_VALIDITY_MB 0x00200000 +#define MCP_VALIDITY_DEV_INFO 0x00400000 +#define MCP_VALIDITY_RESERVED 0x00000007 + + /* One licensing bit should be set */ +/* yaniv - tbd ? license */ +#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 +#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 +#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 +#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + + /* Active MFW */ +#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 +#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 +#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040 +#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) +#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000 + + u32 link_status1; + u32 ext_phy_fw_version; +/* Points to struct eth_phy_cfg (For READ-ONLY) */ + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 /* Use MEDIA_MODULE_FIBER instead */ +#define MEDIA_XFP_FIBER 0x2 /* Use MEDIA_MODULE_FIBER instead */ +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 /* Use MEDIA_MODULE_FIBER instead */ +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; +#define LFA_LINK_FLAP_REASON_OFFSET 0 +#define LFA_LINK_FLAP_REASON_MASK 0x000000ff +#define LFA_NO_REASON (0 << 0) +#define LFA_LINK_DOWN (1 << 0) +#define LFA_FORCE_INIT (1 << 1) +#define LFA_LOOPBACK_MISMATCH (1 << 2) +#define LFA_SPEED_MISMATCH (1 << 3) +#define LFA_FLOW_CTRL_MISMATCH (1 << 4) +#define LFA_ADV_SPEED_MISMATCH (1 << 5) +#define LFA_EEE_MISMATCH (1 << 6) +#define LFA_LINK_MODES_MISMATCH (1 << 7) +#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 +#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 +#define LINK_FLAP_COUNT_OFFSET 16 +#define LINK_FLAP_COUNT_MASK 0x00ff0000 + + u32 link_change_count; + + /* LLDP params */ +/* offset: 536 bytes? */ + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + +/* FC_NPIV table offset & size in NVRAM value of 0 means not present */ + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x00000008 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF +/* 1G Passive copper cable */ +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +/* 1G Active copper cable */ +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +/* 10G Passive copper cable */ +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +/* 10G Active copper cable */ +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +/* Active optical cable */ +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +/* Active copper cable */ +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +/* 25G Passive copper cable - short */ +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +/* 25G Active copper cable - short */ +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +/* 25G Passive copper cable - medium */ +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +/* 25G Active copper cable - medium */ +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +/* 25G Passive copper cable - long */ +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +/* 25G Active copper cable - long */ +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e + +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +/* Set when EEE negotiation is complete. */ +#define EEE_ACTIVE_BIT (1 << 0) + +/* Shows the Local Device EEE capabilities */ +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 + #define EEE_1G_ADV (1 << 1) + #define EEE_10G_ADV (1 << 2) +/* Same values as in EEE_LD_ADV, but for Link Parter */ +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 + +/* Supported speeds for EEE */ +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 + #define EEE_1G_SUPPORTED (1 << 1) + #define EEE_10G_SUPPORTED (1 << 2) + + u32 eee_remote; /* Used for EEE in LLDP */ +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 module_info; +#define ETH_TRANSCEIVER_MONITORING_TYPE_MASK 0x000000FF +#define ETH_TRANSCEIVER_MONITORING_TYPE_OFFSET 0 +#define ETH_TRANSCEIVER_ADDR_CHNG_REQUIRED (1 << 2) +#define ETH_TRANSCEIVER_RCV_PWR_MEASURE_TYPE (1 << 3) +#define ETH_TRANSCEIVER_EXTERNALLY_CALIBRATED (1 << 4) +#define ETH_TRANSCEIVER_INTERNALLY_CALIBRATED (1 << 5) +#define ETH_TRANSCEIVER_HAS_DIAGNOSTIC (1 << 6) +#define ETH_TRANSCEIVER_IDENT_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_IDENT_OFFSET 8 + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 + +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 + + struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS]; + u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +/**************************************/ +/* */ +/* P U B L I C F U N C */ +/* */ +/**************************************/ + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + /* MTU size per funciton is needed for the OV feature */ + u32 mtu_size; +/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */ + + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + u32 c2s_pcp_map_lower; + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + u32 c2s_pcp_map_upper; + + /* For PCP default value get the MSB byte of the map default */ + u32 c2s_pcp_map_default; + + u32 reserved[4]; + + /* replace old mf_cfg */ + u32 config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001 + + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_OFFSET 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 1 % */ +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_OFFSET 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_OFFSET 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 +#define FUNC_STATUS_LOGICAL_LINK_UP 0x00000002 +#define FUNC_STATUS_FORCED_LINK 0x00000004 + + u32 mac_upper; /* MAC */ +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_OFFSET 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; /* tags */ +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_OFFSET 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; /* vf per pf */ + + u32 preserve_data; /* Will be used bt CCM */ + + u32 driver_last_activity_ts; + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_OFFSET 0 + +#define LOAD_REQ_HSI_VERSION 2 +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_OFFSET 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_OFFSET) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_OFFSET 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_OFFSET) +#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_OFFSET) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_OFFSET 31 +#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 +}; + +/**************************************/ +/* */ +/* P U B L I C M B */ +/* */ +/**************************************/ +/* This is the only section that the driver can write to, and each */ +/* Basically each driver request to set feature parameters, + * will be done using a different command, which will be linked + * to a specific data structure from the union below. + * For huge strucuture, the common blank structure should be used. + */ + +struct mcp_mac { + u32 mac_upper; /* Upper 16 bits are always zeroes */ + u32 mac_lower; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; /* Image type */ + u32 nvm_start_addr; /* NVM address of the image */ + u32 len; /* Include CRC */ +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +/* statistics for ncsi */ +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct iscsi_stats_stc { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct rdma_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +#define MAX_NUM_OF_SENSORS 7 +#define MFW_SENSOR_LOCATION_INTERNAL 1 +#define MFW_SENSOR_LOCATION_EXTERNAL 2 +#define MFW_SENSOR_LOCATION_SFP 3 + +#define SENSOR_LOCATION_OFFSET 0 +#define SENSOR_LOCATION_MASK 0x000000ff +#define THRESHOLD_HIGH_OFFSET 8 +#define THRESHOLD_HIGH_MASK 0x0000ff00 +#define CRITICAL_TEMPERATURE_OFFSET 16 +#define CRITICAL_TEMPERATURE_MASK 0x00ff0000 +#define CURRENT_TEMP_OFFSET 24 +#define CURRENT_TEMP_MASK 0xff000000 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; +}; + +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, +/* Not a real resource!! it's a factor used to calculate others */ + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, +/* Not a real resource!! it's a factor used to calculate others */ + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT (1 << 0) +}; + +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_OFFSET 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_OFFSET 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_OFFSET 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_OFFSET 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_OFFSET 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_OFFSET 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_OFFSET 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + +struct mdump_retain_data_stc { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +struct attribute_cmd_write_stc { + u32 val; + u32 mask; + u32 offset; +}; + +union drv_union_data { + struct mcp_mac wol_mac; /* UNLOAD_DONE */ + +/* This configuration should be set by the driver for the LINK_SET command. */ + + struct eth_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; /* For PHY / AVS commands */ + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[VF_MAX_STATIC / 32]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; + struct iscsi_stats_stc iscsi_stats; + struct rdma_stats_stc rdma_stats; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct resource_info resource; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; + u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + struct mdump_retain_data_stc mdump_retain; + struct attribute_cmd_write_stc attribute_cmd_write; + /* ... */ +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_LOAD_REQ 0x10000000 +#define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 +#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 +#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 +#define DRV_MSG_CODE_INIT_PHY 0x22000000 + /* Params - FORCE - Reinitialize the link regardless of LFA */ + /* - DONT_CARE - Don't flap the link if up */ +#define DRV_MSG_CODE_LINK_RESET 0x23000000 + +#define DRV_MSG_CODE_SET_LLDP 0x24000000 +#define DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX 0x24100000 +#define DRV_MSG_CODE_SET_DCBX 0x25000000 + /* OneView feature driver HSI*/ +#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 +#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 +#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 +#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 +#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 +#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 +/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp, + * data: struct resource_info + */ +#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 +#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 +#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 +#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 +#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 +#define DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID 0x3c000000 +#define DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME 0x3d000000 +#define DRV_MSG_CODE_OEM_UPDATE_BOOT_CFG 0x3e000000 +#define DRV_MSG_CODE_OEM_RESET_TO_DEFAULT 0x3f000000 +#define DRV_MSG_CODE_OV_GET_CURR_CFG 0x40000000 +#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 +/* params [31:8] - reserved, [7:0] - bitmap */ +#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000 + +/* Param: [0:15] Option ID, [16] - All, [17] - Init, [18] - Commit, + * [19] - Free + */ +#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 +/* Param: [0:15] Option ID, [17] - Init, [18] , [19] - Free */ +#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 +/*deprecated don't use*/ +#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000 +#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 +#define DRV_MSG_CODE_INITIATE_VF_FLR 0x02020000 +#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 +#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 +/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */ +#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 +/* Param should be set to the transaction size (up to 64 bytes) */ +#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 +/* MFW will place the file offset and len in file_att struct */ +#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 +/* Read 32bytes of nvram data. Param is [0:23] ??? Offset [24:31] - + * ??? Len in Bytes + */ +#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 +/* Writes up to 32Bytes to nvram. Param is [0:23] ??? Offset [24:31] + * ??? Len in Bytes. In case this address is in the range of secured file in + * secured mode, the operation will fail + */ +#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 +/* Delete a file from nvram. Param is image_type. */ +#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000 +/* Reset MCP when no NVM operation is going on, and no drivers are loaded. + * In case operation succeed, MCP will not ack back. + */ +#define DRV_MSG_CODE_MCP_RESET 0x00090000 +/* Temporary command to set secure mode, where the param is 0 (None secure) / + * 1 (Secure) / 2 (Full-Secure) + */ +#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000 +/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane, + * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port, + * [30:31] - port + */ +#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000 +/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane, + * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port, + * [30:31] - port + */ +#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000 +/* Param: [0:15] - Address, [30:31] - port */ +#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000 +/* Param: [0:15] - Address, [30:31] - port */ +#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 +/* Param: [0:3] - version, [4:15] - name (null terminated) */ +#define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_MCP_RESET_FORCE 0x000f04ce +/* Halts the MCP. To resume MCP, user will need to use + * MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers. + */ +#define DRV_MSG_CODE_MCP_HALT 0x00100000 +/* Set virtual mac address, params [31:6] - reserved, [5:4] - type, + * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN + */ +#define DRV_MSG_CODE_SET_VMAC 0x00110000 +/* Set virtual mac address, params [31:6] - reserved, [5:4] - type, + * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN + */ +#define DRV_MSG_CODE_GET_VMAC 0x00120000 +#define DRV_MSG_CODE_VMAC_TYPE_OFFSET 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 +/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */ +#define DRV_MSG_CODE_GET_STATS 0x00130000 +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 +/* Host shall provide buffer and size for MFW */ +#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000 +/* Host shall provide buffer and size for MFW */ +#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000 +/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address, + * [16:31] - offset + */ +#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 +/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address, + * [16:31] - offset + */ +#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000 +/* indicate OCBB related information */ +#define DRV_MSG_CODE_OCBB_DATA 0x00180000 +/* Set function BW, params[15:8] - min, params[7:0] - max */ +#define DRV_MSG_CODE_SET_BW 0x00190000 +#define BW_MAX_MASK 0x000000ff +#define BW_MAX_OFFSET 0 +#define BW_MIN_MASK 0x0000ff00 +#define BW_MIN_OFFSET 8 + +/* When param is set to 1, all parities will be masked(disabled). When params + * are set to 0, parities will be unmasked again. + */ +#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 +/* param[0] - Simulate fan failure, param[1] - simulate over temp. */ +#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000 +#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1) +/* Param: [0:15] - gpio number */ +#define DRV_MSG_CODE_GPIO_READ 0x001c0000 +/* Param: [0:15] - gpio number, [16:31] - gpio value */ +#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000 +/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */ +#define DRV_MSG_CODE_BIST_TEST 0x001e0000 +#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000 + +/* Set LED mode params :0 operational, 1 LED turn ON, 2 LED turn OFF */ +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 +/* drv_data[7:0] - EPOC in seconds, drv_data[15:8] - + * driver version (MAJ MIN BUILD SUB) + */ +#define DRV_MSG_CODE_TIMESTAMP 0x00210000 +/* This is an empty mailbox just return OK*/ +#define DRV_MSG_CODE_EMPTY_MB 0x00220000 + +/* Param[0:4] - resource number (0-31), Param[5:7] - opcode, + * param[15:8] - age + */ +#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_OFFSET 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_OFFSET 5 +/* request resource ownership with default aging */ +#define RESOURCE_OPCODE_REQ 1 +/* request resource ownership without aging */ +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +/* request resource ownership with specific aging timer (in seconds) */ +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 /* release resource */ +/* force resource release */ +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_OFFSET 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_OFFSET 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_OFFSET 8 +/* resource is free and granted to requester */ +#define RESOURCE_OPCODE_GNT 1 +/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15, + * 16 = MFW, 17 = diag over serial + */ +#define RESOURCE_OPCODE_BUSY 2 +/* indicate release request was acknowledged */ +#define RESOURCE_OPCODE_RELEASED 3 +/* indicate release request was previously received by other owner */ +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +/* indicate wrong owner during release */ +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +/* dedicate resource 0 for dump */ +#define RESOURCE_DUMP 0 + +#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */ +/* Send crash dump commands with param[3:0] - opcode */ +#define DRV_MSG_CODE_MDUMP_CMD 0x00250000 +#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f +/* acknowledge reception of error indication */ +#define DRV_MSG_CODE_MDUMP_ACK 0x01 +/* set epoc and personality as follow: drv_data[3:0] - epoch, + * drv_data[7:4] - personality + */ +#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 +/* trigger crash dump procedure */ +#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 +/* Request valid logs and config words */ +#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 +/* Set triggers mask. drv_mb_param should indicate (bitwise) which + * trigger enabled + */ +#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 +/* Clear all logs */ +#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 +#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 /* Get retained data */ +#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 /* Clear retain data */ +#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */ +/* Param: [0:15] - gpio number */ +#define DRV_MSG_CODE_GPIO_INFO 0x00270000 +/* Value will be placed in union */ +#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000 +/* Value should be placed in union */ +#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000 +#define DRV_MB_PARAM_ADDR_OFFSET 0 +#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF +#define DRV_MB_PARAM_DEVAD_OFFSET 16 +#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000 +#define DRV_MB_PARAM_PORT_OFFSET 21 +#define DRV_MB_PARAM_PORT_MASK 0x00600000 +#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000 + +#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 /* Param: None */ +/* Param: Set DRV_MB_PARAM_FEATURE_SUPPORT_* */ +#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 +/* return FW_MB_PARAM_FEATURE_SUPPORT_* */ +#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 +#define DRV_MSG_CODE_READ_WOL_REG 0X00320000 +#define DRV_MSG_CODE_WRITE_WOL_REG 0X00330000 +#define DRV_MSG_CODE_GET_WOL_BUFFER 0X00340000 +/* Param: [0:23] Attribute key, [24:31] Attribute sub command */ +#define DRV_MSG_CODE_ATTRIBUTE 0x00350000 + +/* Param: Password len. Union: Plain Password */ +#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000 +#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 /* Param: None */ + +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 drv_mb_param; + /* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + + /* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + + /* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + + /* LLDP / DCBX params*/ + /* To be used with SET_LLDP command */ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0 + /* To be used with SET_LLDP and REGISTER_LLDP_TLVS_RX commands */ +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1 + /* To be used with REGISTER_LLDP_TLVS_RX command */ +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_OFFSET 0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_OFFSET 4 + /* To be used with SET_DCBX command */ +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_OFFSET 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_PHY_ADDR_OFFSET 0 +#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF +#define DRV_MB_PARAM_PHY_LANE_OFFSET 16 +#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000 +#define DRV_MB_PARAM_PHY_SELECT_PORT_OFFSET 29 +#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000 +#define DRV_MB_PARAM_PHY_PORT_OFFSET 30 +#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000 + +#define DRV_MB_PARAM_PHYMOD_LANE_OFFSET 0 +#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF +#define DRV_MB_PARAM_PHYMOD_SIZE_OFFSET 8 +#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00 + /* configure vf MSIX params BB */ +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + /* configure vf MSIX for PF params AH*/ +#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_OFFSET 0 +#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_MASK 0x000000FF + + /* OneView configuration parametres */ +#define DRV_MB_PARAM_OV_CURR_CFG_OFFSET 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 +#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP 4 +#define DRV_MB_PARAM_OV_CURR_CFG_CNU 5 +#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6 +#define DRV_MB_PARAM_OV_CURR_CFG_HII 7 + +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OFFSET 0 +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS (1 << 1) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_CHAP_SUCCESS (1 << 3) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_LUN_FOUND (1 << 3) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0 + +#define DRV_MB_PARAM_OV_PCI_BUS_NUM_OFFSET 0 +#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF + +#define DRV_MB_PARAM_OV_STORM_FW_VER_OFFSET 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_OFFSET 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +/* Not Installed*/ +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +/* installed but disabled by user/admin/OS */ +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +/* installed and active */ +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 + +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_SET_LED1_MODE_ON 0x3 +#define DRV_MB_PARAM_SET_LED2_MODE_ON 0x4 +#define DRV_MB_PARAM_SET_ACT_LED_MODE_ON 0x6 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 + +#define DRV_MB_PARAM_GPIO_NUMBER_OFFSET 0 +#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF +#define DRV_MB_PARAM_GPIO_VALUE_OFFSET 16 +#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000 +#define DRV_MB_PARAM_GPIO_DIRECTION_OFFSET 16 +#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000 +#define DRV_MB_PARAM_GPIO_CTRL_OFFSET 24 +#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000 + + /* Resource Allocation params - Driver version support*/ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0 + +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 + +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +/* driver supports SmartLinQ parameter */ +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 +/* driver supports EEE parameter */ +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16 +/* driver supports virtual link parameter */ +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 + /* Driver attributes params */ +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000 + +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +/* Option# */ +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_OFFSET 16 +/* (Only for Set) Applies option<92>s value to all entities (port/func) + * depending on the option type + */ +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_OFFSET 17 +/* When set, and state is IDLE, MFW will allocate resources and load + * configuration from NVM + */ +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_OFFSET 18 +/* (Only for Set) - When set submit changed nvm_cfg1 to flash */ +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_OFFSET 19 +/* Free - When set, free allocated resources, and return to IDLE state. */ +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define SINGLE_NVM_WR_OP(optionId) \ + ((((optionId) & DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK) << \ + DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET) | \ + (DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK | \ + DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK | \ + DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK)) + u32 fw_mb_header; +#define FW_MSG_CODE_UNSUPPORTED 0x00000000 +#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 +#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 +#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 +#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 +#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 +#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 +#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 +#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 +#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 +#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 +#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 +#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 +#define FW_MSG_CODE_REGISTER_LLDP_TLVS_RX_DONE 0x24100000 +#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 +#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000 +#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000 +#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE 0x28000000 +#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE 0x29000000 +#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000 +#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000 +#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 +#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000 +#define FW_MSG_CODE_GET_OEM_UPDATES_DONE 0x41000000 + +#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 +#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 +#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 +#define FW_MSG_CODE_INITIATE_VF_FLR_OK 0xb0030000 +#define FW_MSG_CODE_ERR_RESOURCE_TEMPORARY_UNAVAILABLE 0x008b0000 +#define FW_MSG_CODE_ERR_RESOURCE_ALREADY_ALLOCATED 0x008c0000 +#define FW_MSG_CODE_ERR_RESOURCE_NOT_ALLOCATED 0x008d0000 +#define FW_MSG_CODE_ERR_NON_USER_OPTION 0x008e0000 +#define FW_MSG_CODE_ERR_UNKNOWN_OPTION 0x008f0000 +#define FW_MSG_CODE_WAIT 0x00900000 +#define FW_MSG_CODE_FLR_ACK 0x02000000 +#define FW_MSG_CODE_FLR_NACK 0x02100000 +#define FW_MSG_CODE_SET_DRIVER_DONE 0x02200000 +#define FW_MSG_CODE_SET_VMAC_SUCCESS 0x02300000 +#define FW_MSG_CODE_SET_VMAC_FAIL 0x02400000 + +#define FW_MSG_CODE_NVM_OK 0x00010000 +#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000 +#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000 +#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000 +#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000 +#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000 +#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000 +#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000 +#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000 +#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000 +#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000 +#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000 +#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000 +#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000 +#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000 +#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000 +#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 +/* MFW reject "mcp reset" command if one of the drivers is up */ +#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000 +#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000 +#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000 +#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000 + +#define FW_MSG_CODE_PHY_OK 0x00110000 +#define FW_MSG_CODE_PHY_ERROR 0x00120000 +#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 +#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 +#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 +#define FW_MSG_CODE_OK 0x00160000 +#define FW_MSG_CODE_ERROR 0x00170000 +#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000 +#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000 +#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000 +#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000 +#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000 +#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000 +#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 +#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000 +#define FW_MSG_CODE_GPIO_OK 0x00160000 +#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000 +#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000 +#define FW_MSG_CODE_GPIO_INVALID 0x000f0000 +#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000 +#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000 +#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000 +#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000 +#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000 +#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000 +#define FW_MSG_CODE_RECOVERY_MODE 0x00740000 + + /* mdump related response codes */ +#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000 +#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000 +#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 +#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000 +#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000 + + +#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000 +#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000 + +#define FW_MSG_CODE_WOL_READ_WRITE_OK 0x00820000 +#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_VAL 0x00830000 +#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_ADDR 0x00840000 +#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000 +#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000 + +#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY 0x00020000 +#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD 0x00030000 + +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define FW_MSG_SEQ_NUMBER_OFFSET 0 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_OFFSET 16 + u32 fw_mb_param; +/* Resource Allocation params - MFW version support */ +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0 + +/* get MFW feature support response */ +/* MFW supports SmartLinQ */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 +/* MFW supports EEE */ +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +/* MFW supports DRV_LOAD Timeout */ +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004 +/* MFW support complete IGU cleanup upon FLR */ +#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP 0x00000080 +/* MFW supports virtual link */ +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 + +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) + +#define FW_MB_PARAM_OEM_UPDATE_MASK 0xFF +#define FW_MB_PARAM_OEM_UPDATE_OFFSET 0 +#define FW_MB_PARAM_OEM_UPDATE_BW 0x01 +#define FW_MB_PARAM_OEM_UPDATE_S_TAG 0x02 +#define FW_MB_PARAM_OEM_UPDATE_CFG 0x04 + +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_OFFSET 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_OFFSET 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_OFFSET 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_OFFSET 3 + +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF +#define FW_MB_PARAM_PPFID_BITMAP_OFFSET 0 + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response + */ +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + +/* The union data is used by the driver to pass parameters to the scratchpad. */ + + union drv_union_data union_data; + +}; + +/* MFW - DRV MB */ +/********************************************************************** + * Description + * Incremental Aggregative + * 8-bit MFW counter per message + * 8-bit ack-counter per message + * Capabilities + * Provides up to 256 aggregative message per type + * Provides 4 message types in dword + * Message type pointers to byte offset + * Backward Compatibility by using sizeof for the counters. + * No lock requires for 32bit messages + * Limitations: + * In case of messages greater than 32bit, a dedicated mechanism(e.g lock) + * is required to prevent data corruption. + **********************************************************************/ +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, + MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE, + MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +#ifdef BIG_ENDIAN /* Like MFW */ +#define DRV_ACK_MSG(msg_p, msg_id) \ +((u8)((u8 *)msg_p)[msg_id]++;) +#else +#define DRV_ACK_MSG(msg_p, msg_id) \ +((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;) +#endif + +#define MFW_DRV_UPDATE(shmem_func, msg_id) \ +((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;) + +struct public_mfw_mb { + u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */ +/* Incremented by the MFW */ + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +/* Incremented by the driver */ + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +/**************************************/ +/* */ +/* P U B L I C D A T A */ +/* */ +/**************************************/ +enum public_sections { + PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ + PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + /* The sections fields is an array */ + u32 num_sections; + offsize_t sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +#endif /* MCP_PUBLIC_H */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/meson.build b/src/spdk/dpdk/drivers/net/qede/base/meson.build new file mode 100644 index 000000000..59b41c895 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/meson.build @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Luca Boccassi + +sources = [ + 'bcm_osal.c', + 'ecore_cxt.c', + 'ecore_dcbx.c', + 'ecore_dev.c', + 'ecore_hw.c', + 'ecore_init_fw_funcs.c', + 'ecore_init_ops.c', + 'ecore_int.c', + 'ecore_l2.c', + 'ecore_mcp.c', + 'ecore_sp_commands.c', + 'ecore_spq.c', + 'ecore_sriov.c', + 'ecore_vf.c', +] + + +error_cflags = [ + '-Wno-unused-parameter', + '-Wno-sign-compare', + '-Wno-missing-prototypes', + '-Wno-cast-qual', + '-Wno-unused-function', + '-Wno-unused-variable', + '-Wno-strict-aliasing', + '-Wno-missing-prototypes', + '-Wno-unused-value', + '-Wno-format-nonliteral', + '-Wno-shift-negative-value', + '-Wno-unused-but-set-variable', + '-Wno-missing-declarations', + '-Wno-maybe-uninitialized', + '-Wno-strict-prototypes', + '-Wno-shift-negative-value', + '-Wno-implicit-fallthrough', + '-Wno-format-extra-args', + '-Wno-visibility', + '-Wno-empty-body', + '-Wno-invalid-source-encoding', + '-Wno-sometimes-uninitialized', + '-Wno-pointer-bool-conversion', +] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('qede_base', sources, + dependencies: static_rte_net, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h b/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h new file mode 100644 index 000000000..daa5437dd --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/nvm_cfg.h @@ -0,0 +1,2872 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +/**************************************************************************** + * + * Name: nvm_cfg.h + * + * Description: NVM config file - Generated file from nvm cfg excel. + * DO NOT MODIFY !!! + * + * Created: 1/6/2019 + * + ****************************************************************************/ + +#ifndef NVM_CFG_H +#define NVM_CFG_H + + +#define NVM_CFG_version 0x84500 + +#define NVM_CFG_new_option_seq 45 + +#define NVM_CFG_removed_option_seq 4 + +#define NVM_CFG_updated_value_seq 13 + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; + #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF + #define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + u32 mac_addr_lo; +}; + +/****************************************** + * nvm_cfg1 structs + ******************************************/ +struct nvm_cfg1_glob { + u32 generic_cont0; /* 0x0 */ + #define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F + #define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 + #define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 + #define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 + #define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 + #define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 + #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 + #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 + #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 + #define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 + #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 + #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 + #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 + #define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 + #define NVM_CFG1_GLOB_MF_MODE_BD 0x6 + #define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + #define NVM_CFG1_GLOB_MF_MODE_DCI_NPAR 0x8 + #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 + #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 + #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 + #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 + #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 + #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 + #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 + #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 + #define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 + #define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 + #define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 + #define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 + #define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 + #define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 + #define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 + #define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 + #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \ + 0x80000000 + #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31 + #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \ + 0x0 + #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1 + u32 engineering_change[3]; /* 0x4 */ + u32 manufacturing_id; /* 0x10 */ + u32 serial_number[4]; /* 0x14 */ + u32 pcie_cfg; /* 0x24 */ + #define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 + #define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 + #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 + #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 + #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 + #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 + #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 + #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 + #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 + #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3 + #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK \ + 0x00000020 + #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5 + #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0 + #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2 + #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3 + #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000 + #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13 + #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000 + #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21 + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000 + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29 + /* Set the duration, in sec, fan failure signal should be sampled */ + #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK \ + 0x80000000 + #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31 + u32 mgmt_traffic; /* 0x28 */ + #define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001 + #define NVM_CFG1_GLOB_RESERVED60_OFFSET 0 + #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE + #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1 + #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00 + #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9 + #define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000 + #define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17 + #define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000 + #define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25 + #define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0 + #define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1 + #define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2 + #define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000 + #define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27 + #define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0 + #define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1 + /* Indicates whether external thermal sonsor is available */ + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000 + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31 + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0 + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1 + u32 core_cfg; /* 0x2C */ + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF + #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G_LIO2 0x10 + #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100 + #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8 + #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0 + #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_ENABLED 0x1 + #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_MASK 0x00000200 + #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_OFFSET 9 + #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_DISABLED 0x0 + #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_ENABLED 0x1 + #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_MASK 0x0003FC00 + #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_OFFSET 10 + #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_MASK 0x03FC0000 + #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_OFFSET 18 + #define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000 + #define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26 + #define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0 + #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1 + #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2 + #define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3 + #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000 + #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29 + #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0 + #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1 + u32 e_lane_cfg1; /* 0x30 */ + #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F + #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 + #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 + #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 + #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 + #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 + #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 + #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 + #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 + #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 + #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 + #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 + #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 + #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 + #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 + #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + u32 e_lane_cfg2; /* 0x34 */ + #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 + #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 + #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 + #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 + #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 + #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 + #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 + #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 + #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 + #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 + #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 + #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 + #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 + #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 + #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 + #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 + #define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00 + #define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8 + #define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0 + #define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1 + #define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2 + #define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000 + #define NVM_CFG1_GLOB_NCSI_OFFSET 12 + #define NVM_CFG1_GLOB_NCSI_DISABLED 0x0 + #define NVM_CFG1_GLOB_NCSI_ENABLED 0x1 + /* Maximum advertised pcie link width */ + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_BB_16_LANES 0x0 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3 + #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4 + /* ASPM L1 mode */ + #define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000 + #define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20 + #define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0 + #define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2 + #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK \ + 0x06000000 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2 + #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3 + /* Set the PLDM sensor modes */ + #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000 + #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27 + #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0 + #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1 + #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2 + /* ROL enable */ + #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000 + #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31 + #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0 + #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1 + u32 f_lane_cfg1; /* 0x38 */ + #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F + #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 + #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 + #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 + #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 + #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 + #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 + #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 + #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 + #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 + #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 + #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 + #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 + #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 + #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 + #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + u32 f_lane_cfg2; /* 0x3C */ + #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 + #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 + #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 + #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 + #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 + #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 + #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 + #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 + #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 + #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 + #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 + #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 + #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 + #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 + #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 + #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 + /* Control the period between two successive checks */ + #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK \ + 0x0000FF00 + #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8 + /* Set shutdown temperature */ + #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK \ + 0x00FF0000 + #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16 + /* Set max. count for over operational temperature */ + #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000 + #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24 + u32 mps10_preemphasis; /* 0x40 */ + #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + u32 mps10_driver_current; /* 0x44 */ + #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + u32 mps25_preemphasis; /* 0x48 */ + #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + u32 mps25_driver_current; /* 0x4C */ + #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + u32 pci_id; /* 0x50 */ + #define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF + #define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0 + /* Set caution temperature */ + #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_OFFSET 16 + /* Set external thermal sensor I2C address */ + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \ + 0xFF000000 + #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24 + u32 pci_subsys_id; /* 0x54 */ + #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF + #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0 + #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000 + #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16 + u32 bar; /* 0x58 */ + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9 + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE + #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF + /* BB VF BAR2 size */ + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9 + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE + #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF + /* BB BAR2 size (global) */ + #define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00 + #define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8 + #define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0 + #define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1 + #define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2 + #define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3 + #define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4 + #define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5 + #define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6 + #define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7 + #define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8 + #define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9 + #define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA + #define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB + #define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC + #define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD + #define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE + #define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF + /* Set the duration, in secs, fan failure signal should be sampled */ + #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000 + #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12 + /* This field defines the board total budget for bar2 when disabled + * the regular bar size is used. + */ + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_OFFSET 16 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_DISABLED 0x0 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64K 0x1 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128K 0x2 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256K 0x3 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512K 0x4 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1M 0x5 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_2M 0x6 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_4M 0x7 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_8M 0x8 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_16M 0x9 + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_32M 0xA + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64M 0xB + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128M 0xC + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256M 0xD + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512M 0xE + #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1G 0xF + /* Enable/Disable Crash dump triggers */ + #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_MASK 0xFF000000 + #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_OFFSET 24 + u32 mps10_txfir_main; /* 0x5C */ + #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + u32 mps10_txfir_post; /* 0x60 */ + #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + u32 mps25_txfir_main; /* 0x64 */ + #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + u32 mps25_txfir_post; /* 0x68 */ + #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + u32 manufacture_ver; /* 0x6C */ + #define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F + #define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0 + #define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0 + #define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6 + #define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000 + #define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12 + #define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000 + #define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18 + #define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000 + #define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24 + /* Select package id method */ + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000 + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30 + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0 + #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1 + #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000 + #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31 + #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0 + #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1 + u32 manufacture_time; /* 0x70 */ + #define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F + #define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0 + #define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0 + #define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6 + #define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000 + #define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12 + /* Max MSIX for Ethernet in default mode */ + #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000 + #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18 + /* PF Mapping */ + #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000 + #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26 + #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0 + #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1 + #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_MASK 0x30000000 + #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28 + #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0 + #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1 + /* Enable/Disable PCIE Relaxed Ordering */ + #define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_MASK 0x40000000 + #define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_OFFSET 30 + #define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_DISABLED 0x0 + #define NVM_CFG1_GLOB_PCIE_RELAXED_ORDERING_ENABLED 0x1 + /* Reset the chip using iPOR to release PCIe due to short PERST + * issues + */ + #define NVM_CFG1_GLOB_SHORT_PERST_PROTECTION_MASK 0x80000000 + #define NVM_CFG1_GLOB_SHORT_PERST_PROTECTION_OFFSET 31 + #define NVM_CFG1_GLOB_SHORT_PERST_PROTECTION_DISABLED 0x0 + #define NVM_CFG1_GLOB_SHORT_PERST_PROTECTION_ENABLED 0x1 + u32 led_global_settings; /* 0x74 */ + #define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F + #define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0 + #define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0 + #define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4 + #define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00 + #define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8 + #define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000 + #define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12 + /* Max. continues operating temperature */ + #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16 + /* GPIO which triggers run-time port swap according to the map + * specified in option 205 + */ + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20 + u32 generic_cont1; /* 0x78 */ + #define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF + #define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0 + #define NVM_CFG1_GLOB_LANE0_SWAP_MASK 0x00000C00 + #define NVM_CFG1_GLOB_LANE0_SWAP_OFFSET 10 + #define NVM_CFG1_GLOB_LANE1_SWAP_MASK 0x00003000 + #define NVM_CFG1_GLOB_LANE1_SWAP_OFFSET 12 + #define NVM_CFG1_GLOB_LANE2_SWAP_MASK 0x0000C000 + #define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14 + #define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000 + #define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16 + /* Enable option 195 - Overriding the PCIe Preset value */ + #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000 + #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18 + #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0 + #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1 + /* PCIe Preset value - applies only if option 194 is enabled */ + #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000 + #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19 + /* Port mapping to be used when the run-time GPIO for port-swap is + * defined and set. + */ + #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000 + #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23 + #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000 + #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25 + #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000 + #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27 + #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000 + #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29 + /* Option to Disable embedded LLDP, 0 - Off, 1 - On */ + #define NVM_CFG1_GLOB_LLDP_DISABLE_MASK 0x80000000 + #define NVM_CFG1_GLOB_LLDP_DISABLE_OFFSET 31 + #define NVM_CFG1_GLOB_LLDP_DISABLE_OFF 0x0 + #define NVM_CFG1_GLOB_LLDP_DISABLE_ON 0x1 + u32 mbi_version; /* 0x7C */ + #define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF + #define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 + #define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 + #define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + /* If set to other than NA, 0 - Normal operation, 1 - Thermal event + * occurred + */ + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20 + u32 mbi_date; /* 0x80 */ + u32 misc_sig; /* 0x84 */ + /* Define the GPIO mapping to switch i2c mux */ + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19 + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F + #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 + /* Interrupt signal used for SMBus/I2C management interface + * 0 = Interrupt event occurred + * 1 = Normal + */ + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20 + /* Set aLOM FAN on GPIO */ + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20 + u32 device_capabilities; /* 0x88 */ + #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 + #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 + #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 + #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 + #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10 + u32 power_dissipated; /* 0x8C */ + #define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF + #define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0 + #define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8 + #define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16 + #define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000 + #define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24 + u32 power_consumed; /* 0x90 */ + #define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF + #define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0 + #define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8 + #define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16 + #define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000 + #define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24 + u32 efi_version; /* 0x94 */ + u32 multi_network_modes_capability; /* 0x98 */ + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X10G 0x1 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X25G 0x2 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X25G 0x4 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X25G 0x8 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X40G 0x10 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X40G 0x20 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \ + 0x80 + #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100 + /* @DPDK */ + u32 reserved1[12]; /* 0x9C */ + u32 oem1_number[8]; /* 0xCC */ + u32 oem2_number[8]; /* 0xEC */ + u32 mps25_active_txfir_pre; /* 0x10C */ + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24 + u32 mps25_active_txfir_main; /* 0x110 */ + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24 + u32 mps25_active_txfir_post; /* 0x114 */ + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF + #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000 + #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24 + u32 features; /* 0x118 */ + /* Set the Aux Fan on temperature */ + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0 + /* Set NC-SI package ID */ + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20 + /* PMBUS Clock GPIO */ + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20 + /* PMBUS Data GPIO */ + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20 + u32 tx_rx_eq_25g_hlpc; /* 0x11C */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24 + u32 tx_rx_eq_25g_llpc; /* 0x120 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24 + u32 tx_rx_eq_25g_ac; /* 0x124 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24 + u32 tx_rx_eq_10g_pc; /* 0x128 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24 + u32 tx_rx_eq_10g_ac; /* 0x12C */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24 + u32 tx_rx_eq_1g; /* 0x130 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24 + u32 tx_rx_eq_25g_bt; /* 0x134 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24 + u32 tx_rx_eq_10g_bt; /* 0x138 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24 + u32 generic_cont4; /* 0x13C */ + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20 + /* Select the number of allowed port link in aux power */ + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_MASK 0x00000300 + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_OFFSET 8 + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_DEFAULT 0x0 + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_1_PORT 0x1 + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_2_PORTS 0x2 + #define NVM_CFG1_GLOB_NCSI_AUX_LINK_3_PORTS 0x3 + /* Set Trace Filter Log Level */ + #define NVM_CFG1_GLOB_TRACE_LEVEL_MASK 0x00000C00 + #define NVM_CFG1_GLOB_TRACE_LEVEL_OFFSET 10 + #define NVM_CFG1_GLOB_TRACE_LEVEL_ALL 0x0 + #define NVM_CFG1_GLOB_TRACE_LEVEL_DEBUG 0x1 + #define NVM_CFG1_GLOB_TRACE_LEVEL_TRACE 0x2 + #define NVM_CFG1_GLOB_TRACE_LEVEL_ERROR 0x3 + /* For OCP2.0, MFW listens on SMBUS slave address 0x3e, and return + * temperature reading + */ + #define NVM_CFG1_GLOB_EMULATED_TMP421_MASK 0x00001000 + #define NVM_CFG1_GLOB_EMULATED_TMP421_OFFSET 12 + #define NVM_CFG1_GLOB_EMULATED_TMP421_DISABLED 0x0 + #define NVM_CFG1_GLOB_EMULATED_TMP421_ENABLED 0x1 + /* GPIO which triggers when ASIC temperature reaches nvm option 286 + * value + */ + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_MASK 0x001FE000 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_OFFSET 13 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_GPIO_GPIO31 0x20 + /* Warning temperature threshold used with nvm option 286 */ + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_THRESHOLD_MASK 0x1FE00000 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_THRESHOLD_OFFSET 21 + /* Disable PLDM protocol */ + #define NVM_CFG1_GLOB_DISABLE_PLDM_MASK 0x20000000 + #define NVM_CFG1_GLOB_DISABLE_PLDM_OFFSET 29 + #define NVM_CFG1_GLOB_DISABLE_PLDM_DISABLED 0x0 + #define NVM_CFG1_GLOB_DISABLE_PLDM_ENABLED 0x1 + /* Disable OCBB protocol */ + #define NVM_CFG1_GLOB_DISABLE_MCTP_OEM_MASK 0x40000000 + #define NVM_CFG1_GLOB_DISABLE_MCTP_OEM_OFFSET 30 + #define NVM_CFG1_GLOB_DISABLE_MCTP_OEM_DISABLED 0x0 + #define NVM_CFG1_GLOB_DISABLE_MCTP_OEM_ENABLED 0x1 + u32 preboot_debug_mode_std; /* 0x140 */ + u32 preboot_debug_mode_ext; /* 0x144 */ + u32 ext_phy_cfg1; /* 0x148 */ + /* Ext PHY MDI pair swap value */ + #define NVM_CFG1_GLOB_RESERVED_244_MASK 0x0000FFFF + #define NVM_CFG1_GLOB_RESERVED_244_OFFSET 0 + /* Define for PGOOD signal Mapping for EXT PHY */ + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_OFFSET 16 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_NA 0x0 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO0 0x1 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO1 0x2 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO2 0x3 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO3 0x4 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO4 0x5 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO5 0x6 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO6 0x7 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO7 0x8 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO8 0x9 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO9 0xA + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO10 0xB + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO11 0xC + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO12 0xD + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO13 0xE + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO14 0xF + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO15 0x10 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO16 0x11 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO17 0x12 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO18 0x13 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO19 0x14 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO20 0x15 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO21 0x16 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO22 0x17 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO23 0x18 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO24 0x19 + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO25 0x1A + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO26 0x1B + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO27 0x1C + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO28 0x1D + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO29 0x1E + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO30 0x1F + #define NVM_CFG1_GLOB_EXT_PHY_PGOOD_GPIO31 0x20 + /* GPIO which trigger when PERST asserted */ + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_MASK 0xFF000000 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_OFFSET 24 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_NA 0x0 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO0 0x1 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO1 0x2 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO2 0x3 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO3 0x4 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO4 0x5 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO5 0x6 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO6 0x7 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO7 0x8 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO8 0x9 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO9 0xA + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO10 0xB + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO11 0xC + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO12 0xD + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO13 0xE + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO14 0xF + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO15 0x10 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO16 0x11 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO17 0x12 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO18 0x13 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO19 0x14 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO20 0x15 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO21 0x16 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO22 0x17 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO23 0x18 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO24 0x19 + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO25 0x1A + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO26 0x1B + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO27 0x1C + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO28 0x1D + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO29 0x1E + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO30 0x1F + #define NVM_CFG1_GLOB_PERST_INDICATION_GPIO_GPIO31 0x20 + u32 clocks; /* 0x14C */ + /* Sets core clock frequency */ + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MASK 0x000000FF + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_OFFSET 0 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_DEFAULT 0x0 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_375 0x1 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_350 0x2 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_325 0x3 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_300 0x4 + #define NVM_CFG1_GLOB_MAIN_CLOCK_FREQUENCY_MAIN_CLK_280 0x5 + /* Sets MAC clock frequency */ + #define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_OFFSET 8 + #define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_DEFAULT 0x0 + #define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_782 0x1 + #define NVM_CFG1_GLOB_MAC_CLOCK_FREQUENCY_MAC_CLK_516 0x2 + /* Sets storm clock frequency */ + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_OFFSET 16 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_DEFAULT 0x0 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1200 0x1 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1000 0x2 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_900 0x3 + #define NVM_CFG1_GLOB_STORM_CLOCK_FREQUENCY_STORM_CLK_1100 0x4 + /* Non zero value will override PCIe AGC threshold to improve + * receiver + */ + #define NVM_CFG1_GLOB_OVERRIDE_AGC_THRESHOLD_MASK 0xFF000000 + #define NVM_CFG1_GLOB_OVERRIDE_AGC_THRESHOLD_OFFSET 24 + u32 pre2_generic_cont_1; /* 0x150 */ + #define NVM_CFG1_GLOB_50G_HLPC_PRE2_MASK 0x000000FF + #define NVM_CFG1_GLOB_50G_HLPC_PRE2_OFFSET 0 + #define NVM_CFG1_GLOB_50G_MLPC_PRE2_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_50G_MLPC_PRE2_OFFSET 8 + #define NVM_CFG1_GLOB_50G_LLPC_PRE2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_50G_LLPC_PRE2_OFFSET 16 + #define NVM_CFG1_GLOB_25G_HLPC_PRE2_MASK 0xFF000000 + #define NVM_CFG1_GLOB_25G_HLPC_PRE2_OFFSET 24 + u32 pre2_generic_cont_2; /* 0x154 */ + #define NVM_CFG1_GLOB_25G_LLPC_PRE2_MASK 0x000000FF + #define NVM_CFG1_GLOB_25G_LLPC_PRE2_OFFSET 0 + #define NVM_CFG1_GLOB_25G_AC_PRE2_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_25G_AC_PRE2_OFFSET 8 + #define NVM_CFG1_GLOB_10G_PC_PRE2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_10G_PC_PRE2_OFFSET 16 + #define NVM_CFG1_GLOB_PRE2_10G_AC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_PRE2_10G_AC_OFFSET 24 + u32 pre2_generic_cont_3; /* 0x158 */ + #define NVM_CFG1_GLOB_1G_PRE2_MASK 0x000000FF + #define NVM_CFG1_GLOB_1G_PRE2_OFFSET 0 + #define NVM_CFG1_GLOB_5G_BT_PRE2_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_5G_BT_PRE2_OFFSET 8 + #define NVM_CFG1_GLOB_10G_BT_PRE2_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_10G_BT_PRE2_OFFSET 16 + /* When temperature goes below (warning temperature - delta) warning + * gpio is unset + */ + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_DELTA_MASK 0xFF000000 + #define NVM_CFG1_GLOB_WARNING_TEMPERATURE_DELTA_OFFSET 24 + u32 tx_rx_eq_50g_hlpc; /* 0x15C */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_HLPC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_HLPC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_HLPC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_HLPC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_HLPC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_HLPC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_HLPC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_HLPC_OFFSET 24 + u32 tx_rx_eq_50g_mlpc; /* 0x160 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_MLPC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_MLPC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_MLPC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_MLPC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_MLPC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_MLPC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_MLPC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_MLPC_OFFSET 24 + u32 tx_rx_eq_50g_llpc; /* 0x164 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_LLPC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_LLPC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_LLPC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_LLPC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_LLPC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_LLPC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_LLPC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_LLPC_OFFSET 24 + u32 tx_rx_eq_50g_ac; /* 0x168 */ + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_AC_MASK 0x000000FF + #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_50G_AC_OFFSET 0 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_AC_MASK 0x0000FF00 + #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_50G_AC_OFFSET 8 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_AC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_50G_AC_OFFSET 16 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_AC_MASK 0xFF000000 + #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_50G_AC_OFFSET 24 + /* Set Trace Filter Modules Log Bit Mask */ + u32 trace_modules; /* 0x16C */ + #define NVM_CFG1_GLOB_TRACE_MODULES_ERROR 0x1 + #define NVM_CFG1_GLOB_TRACE_MODULES_DBG 0x2 + #define NVM_CFG1_GLOB_TRACE_MODULES_DRV_HSI 0x4 + #define NVM_CFG1_GLOB_TRACE_MODULES_INTERRUPT 0x8 + #define NVM_CFG1_GLOB_TRACE_MODULES_VPD 0x10 + #define NVM_CFG1_GLOB_TRACE_MODULES_FLR 0x20 + #define NVM_CFG1_GLOB_TRACE_MODULES_INIT 0x40 + #define NVM_CFG1_GLOB_TRACE_MODULES_NVM 0x80 + #define NVM_CFG1_GLOB_TRACE_MODULES_PIM 0x100 + #define NVM_CFG1_GLOB_TRACE_MODULES_NET 0x200 + #define NVM_CFG1_GLOB_TRACE_MODULES_POWER 0x400 + #define NVM_CFG1_GLOB_TRACE_MODULES_UTILS 0x800 + #define NVM_CFG1_GLOB_TRACE_MODULES_RESOURCES 0x1000 + #define NVM_CFG1_GLOB_TRACE_MODULES_SCHEDULER 0x2000 + #define NVM_CFG1_GLOB_TRACE_MODULES_PHYMOD 0x4000 + #define NVM_CFG1_GLOB_TRACE_MODULES_EVENTS 0x8000 + #define NVM_CFG1_GLOB_TRACE_MODULES_PMM 0x10000 + #define NVM_CFG1_GLOB_TRACE_MODULES_DBG_DRV 0x20000 + #define NVM_CFG1_GLOB_TRACE_MODULES_ETH 0x40000 + #define NVM_CFG1_GLOB_TRACE_MODULES_SECURITY 0x80000 + #define NVM_CFG1_GLOB_TRACE_MODULES_PCIE 0x100000 + #define NVM_CFG1_GLOB_TRACE_MODULES_TRACE 0x200000 + #define NVM_CFG1_GLOB_TRACE_MODULES_MANAGEMENT 0x400000 + #define NVM_CFG1_GLOB_TRACE_MODULES_SIM 0x800000 + u32 pcie_class_code_fcoe; /* 0x170 */ + /* Set PCIe FCoE Class Code */ + #define NVM_CFG1_GLOB_PCIE_CLASS_CODE_FCOE_MASK 0x00FFFFFF + #define NVM_CFG1_GLOB_PCIE_CLASS_CODE_FCOE_OFFSET 0 + /* When temperature goes below (ALOM FAN ON AUX value - delta) ALOM + * FAN ON AUX gpio is unset + */ + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_DELTA_MASK 0xFF000000 + #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_DELTA_OFFSET 24 + u32 pcie_class_code_iscsi; /* 0x174 */ + /* Set PCIe iSCSI Class Code */ + #define NVM_CFG1_GLOB_PCIE_CLASS_CODE_ISCSI_MASK 0x00FFFFFF + #define NVM_CFG1_GLOB_PCIE_CLASS_CODE_ISCSI_OFFSET 0 + /* When temperature goes below (Dead Temp TH - delta)Thermal Event + * gpio is unset + */ + #define NVM_CFG1_GLOB_DEAD_TEMP_TH_DELTA_MASK 0xFF000000 + #define NVM_CFG1_GLOB_DEAD_TEMP_TH_DELTA_OFFSET 24 + u32 no_provisioned_mac; /* 0x178 */ + /* Set number of provisioned MAC addresses */ + #define NVM_CFG1_GLOB_NUMBER_OF_PROVISIONED_MAC_MASK 0x0000FFFF + #define NVM_CFG1_GLOB_NUMBER_OF_PROVISIONED_MAC_OFFSET 0 + /* Set number of provisioned VF MAC addresses */ + #define NVM_CFG1_GLOB_NUMBER_OF_PROVISIONED_VF_MAC_MASK 0x00FF0000 + #define NVM_CFG1_GLOB_NUMBER_OF_PROVISIONED_VF_MAC_OFFSET 16 + /* Enable/Disable BMC MAC */ + #define NVM_CFG1_GLOB_PROVISIONED_BMC_MAC_MASK 0x01000000 + #define NVM_CFG1_GLOB_PROVISIONED_BMC_MAC_OFFSET 24 + #define NVM_CFG1_GLOB_PROVISIONED_BMC_MAC_DISABLED 0x0 + #define NVM_CFG1_GLOB_PROVISIONED_BMC_MAC_ENABLED 0x1 + u32 reserved[43]; /* 0x17C */ +}; + +struct nvm_cfg1_path { + u32 reserved[1]; /* 0x0 */ +}; + +struct nvm_cfg1_port { + u32 reserved__m_relocated_to_option_123; /* 0x0 */ + u32 reserved__m_relocated_to_option_124; /* 0x4 */ + u32 generic_cont0; /* 0x8 */ + #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF + #define NVM_CFG1_PORT_LED_MODE_OFFSET 0 + #define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 + #define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 + #define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 + #define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 + #define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 + #define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 + #define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 + #define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 + #define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 + #define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 + #define NVM_CFG1_PORT_LED_MODE_PHY8 0xA + #define NVM_CFG1_PORT_LED_MODE_PHY9 0xB + #define NVM_CFG1_PORT_LED_MODE_MAC4 0xC + #define NVM_CFG1_PORT_LED_MODE_PHY10 0xD + #define NVM_CFG1_PORT_LED_MODE_PHY11 0xE + #define NVM_CFG1_PORT_LED_MODE_PHY12 0xF + #define NVM_CFG1_PORT_LED_MODE_BREAKOUT 0x10 + #define NVM_CFG1_PORT_LED_MODE_OCP_3_0 0x11 + #define NVM_CFG1_PORT_LED_MODE_OCP_3_0_MAC2 0x12 + #define NVM_CFG1_PORT_LED_MODE_SW_DEF1 0x13 + #define NVM_CFG1_PORT_LED_MODE_SW_DEF1_MAC2 0x14 + #define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00 + #define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8 + #define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 + #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 + #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 + #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 + #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 + #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 + #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 + #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 + #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 + #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 + #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + /* GPIO for HW reset the PHY. In case it is the same for all ports, + * need to set same value for all ports + */ + #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000 + #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24 + #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19 + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F + #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20 + u32 pcie_cfg; /* 0xC */ + #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 + #define NVM_CFG1_PORT_RESERVED15_OFFSET 0 + u32 features; /* 0x10 */ + #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 + #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 + #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 + #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 + #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 + #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 + #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 + #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 + u32 speed_cap_mask; /* 0x14 */ + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 + #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 + #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 link_settings; /* 0x18 */ + #define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 + #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 + #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 + #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 + #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800 + #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11 + #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1 + #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2 + #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4 + #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK \ + 0x00004000 + #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14 + #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED \ + 0x0 + #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED \ + 0x1 + #define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000 + #define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15 + #define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0 + #define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 + #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000 + #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20 + #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0 + #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1 + #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2 + #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3 + #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4 + #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5 + #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6 + #define NVM_CFG1_PORT_SMARTLINQ_MODE_MASK 0x00800000 + #define NVM_CFG1_PORT_SMARTLINQ_MODE_OFFSET 23 + #define NVM_CFG1_PORT_SMARTLINQ_MODE_DISABLED 0x0 + #define NVM_CFG1_PORT_SMARTLINQ_MODE_ENABLED 0x1 + #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_MASK 0x01000000 + #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_OFFSET 24 + #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_DISABLED 0x0 + #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_ENABLED 0x1 + /* Enable/Disable RX PAM-4 precoding */ + #define NVM_CFG1_PORT_RX_PRECODE_MASK 0x02000000 + #define NVM_CFG1_PORT_RX_PRECODE_OFFSET 25 + #define NVM_CFG1_PORT_RX_PRECODE_DISABLED 0x0 + #define NVM_CFG1_PORT_RX_PRECODE_ENABLED 0x1 + /* Enable/Disable TX PAM-4 precoding */ + #define NVM_CFG1_PORT_TX_PRECODE_MASK 0x04000000 + #define NVM_CFG1_PORT_TX_PRECODE_OFFSET 26 + #define NVM_CFG1_PORT_TX_PRECODE_DISABLED 0x0 + #define NVM_CFG1_PORT_TX_PRECODE_ENABLED 0x1 + u32 phy_cfg; /* 0x1C */ + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0 + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1 + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2 + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4 + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8 + #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 + #define NVM_CFG1_PORT_AN_MODE_OFFSET 24 + #define NVM_CFG1_PORT_AN_MODE_NONE 0x0 + #define NVM_CFG1_PORT_AN_MODE_CL73 0x1 + #define NVM_CFG1_PORT_AN_MODE_CL37 0x2 + #define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3 + #define NVM_CFG1_PORT_AN_MODE_BB_CL37_BAM 0x4 + #define NVM_CFG1_PORT_AN_MODE_BB_HPAM 0x5 + #define NVM_CFG1_PORT_AN_MODE_BB_SGMII 0x6 + u32 mgmt_traffic; /* 0x20 */ + #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F + #define NVM_CFG1_PORT_RESERVED61_OFFSET 0 + u32 ext_phy; /* 0x24 */ + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0 + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0 + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1 + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM5422X 0x2 + #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_88X33X0 0x3 + #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00 + #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 + /* EEE power saving mode */ + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 + #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + u32 mba_cfg1; /* 0x28 */ + #define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 + #define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 + #define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 + #define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 + #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 + #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 + #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 + #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 + #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 + #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7 + #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0 + #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1 + #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100 + #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8 + #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0 + #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 + #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 + #define NVM_CFG1_PORT_RESERVED5_OFFSET 9 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \ + 0x00E00000 + #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 + #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_MASK \ + 0x01000000 + #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_OFFSET 24 + #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_DISABLED \ + 0x0 + #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_ENABLED 0x1 + u32 mba_cfg2; /* 0x2C */ + #define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF + #define NVM_CFG1_PORT_RESERVED65_OFFSET 0 + #define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 + #define NVM_CFG1_PORT_RESERVED66_OFFSET 16 + #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_MASK 0x01FE0000 + #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_OFFSET 17 + u32 vf_cfg; /* 0x30 */ + #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF + #define NVM_CFG1_PORT_RESERVED8_OFFSET 0 + #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 + #define NVM_CFG1_PORT_RESERVED6_OFFSET 16 + struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ + u32 led_port_settings; /* 0x3C */ + #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF + #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0 + #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00 + #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8 + #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000 + #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_25G 0x4 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_25G 0x8 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_40G 0x8 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_40G 0x10 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_50G 0x10 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_50G 0x20 + #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40 + /* UID LED Blink Mode Settings */ + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_MASK 0x0F000000 + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_OFFSET 24 + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_ACTIVITY_LED 0x1 + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED0 0x2 + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED1 0x4 + #define NVM_CFG1_PORT_UID_LED_MODE_MASK_LINK_LED2 0x8 + u32 transceiver_00; /* 0x40 */ + /* Define for mapping of transceiver signal module absent */ + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19 + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F + #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20 + /* Define the GPIO mux settings to switch i2c mux to this port */ + #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00 + #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8 + #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000 + #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12 + /* Option to override SmartAN FEC requirements */ + #define NVM_CFG1_PORT_SMARTAN_FEC_OVERRIDE_MASK 0x00010000 + #define NVM_CFG1_PORT_SMARTAN_FEC_OVERRIDE_OFFSET 16 + #define NVM_CFG1_PORT_SMARTAN_FEC_OVERRIDE_DISABLED 0x0 + #define NVM_CFG1_PORT_SMARTAN_FEC_OVERRIDE_ENABLED 0x1 + u32 device_ids; /* 0x44 */ + #define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF + #define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0 + #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_MASK 0x0000FF00 + #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_OFFSET 8 + #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_MASK 0x00FF0000 + #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_OFFSET 16 + #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24 + u32 board_cfg; /* 0x48 */ + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF + #define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 + #define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + /* This field defines the GPIO mapped to tx_disable signal in SFP */ + #define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8 + #define NVM_CFG1_PORT_TX_DISABLE_NA 0x0 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA + #define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB + #define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC + #define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD + #define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE + #define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF + #define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19 + #define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A + #define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B + #define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C + #define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D + #define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E + #define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F + #define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20 + u32 mnm_10g_cap; /* 0x4C */ + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_MASK \ + 0x0000FFFF + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_MASK \ + 0xFFFF0000 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_OFFSET \ + 16 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 mnm_10g_ctrl; /* 0x50 */ + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7 + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_OFFSET 8 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE_SLAVE 0x4 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_MASK \ + 0x00FF0000 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_OFFSET 24 + u32 mnm_10g_misc; /* 0x54 */ + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_MASK 0x00000007 + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_OFFSET 0 + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7 + u32 mnm_25g_cap; /* 0x58 */ + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \ + 0x0000FFFF + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_MASK \ + 0xFFFF0000 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_OFFSET \ + 16 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 mnm_25g_ctrl; /* 0x5C */ + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7 + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_OFFSET 8 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE_SLAVE 0x4 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_MASK \ + 0x00FF0000 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_OFFSET 24 + u32 mnm_25g_misc; /* 0x60 */ + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_MASK 0x00000007 + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_OFFSET 0 + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7 + u32 mnm_40g_cap; /* 0x64 */ + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \ + 0x0000FFFF + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_MASK \ + 0xFFFF0000 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_OFFSET \ + 16 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 mnm_40g_ctrl; /* 0x68 */ + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7 + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_OFFSET 8 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE_SLAVE 0x4 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_MASK \ + 0x00FF0000 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_OFFSET 24 + u32 mnm_40g_misc; /* 0x6C */ + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_MASK 0x00000007 + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_OFFSET 0 + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7 + u32 mnm_50g_cap; /* 0x70 */ + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \ + 0x0000FFFF + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_BB_100G \ + 0x40 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_MASK \ + 0xFFFF0000 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_OFFSET \ + 16 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20 + #define \ + NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_BB_100G \ + 0x40 + u32 mnm_50g_ctrl; /* 0x74 */ + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7 + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_OFFSET 8 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE_SLAVE 0x4 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_MASK \ + 0x00FF0000 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_OFFSET 24 + u32 mnm_50g_misc; /* 0x78 */ + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_MASK 0x00000007 + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_OFFSET 0 + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7 + u32 mnm_100g_cap; /* 0x7C */ + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \ + 0x0000FFFF + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20 + #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_BB_100G 0x40 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_MASK \ + 0xFFFF0000 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_20G 0x4 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20 + #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_BB_100G 0x40 + u32 mnm_100g_ctrl; /* 0x80 */ + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_MASK 0x0000000F + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_20G 0x3 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6 + #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7 + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_OFFSET 8 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_UNDEFINED 0x0 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE 0x1 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_BACKPLANE 0x2 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_EXT_PHY 0x3 + #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE_SLAVE 0x4 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_MASK \ + 0x00FF0000 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_OFFSET 16 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_BYPASS 0x0 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR 0x2 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR2 0x3 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR4 0x4 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XFI 0x8 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SFI 0x9 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_1000X 0xB + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SGMII 0xC + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLAUI 0x11 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLPPI 0x12 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CAUI 0x21 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CPPI 0x22 + #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_25GAUI 0x31 + #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_MASK 0xFF000000 + #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_OFFSET 24 + u32 mnm_100g_misc; /* 0x84 */ + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_MASK 0x00000007 + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_OFFSET 0 + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0 + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1 + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2 + #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7 + u32 temperature; /* 0x88 */ + #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_MASK 0x000000FF + #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_OFFSET 0 + #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_MASK \ + 0x0000FF00 + #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_OFFSET 8 + /* Warning temperature threshold used with nvm option 235 */ + #define NVM_CFG1_PORT_PHY_MODULE_WARNING_TEMP_TH_MASK 0x00FF0000 + #define NVM_CFG1_PORT_PHY_MODULE_WARNING_TEMP_TH_OFFSET 16 + u32 ext_phy_cfg1; /* 0x8C */ + /* Ext PHY MDI pair swap value */ + #define NVM_CFG1_PORT_EXT_PHY_MDI_PAIR_SWAP_MASK 0x0000FFFF + #define NVM_CFG1_PORT_EXT_PHY_MDI_PAIR_SWAP_OFFSET 0 + u32 extended_speed; /* 0x90 */ + /* Sets speed in conjunction with legacy speed field */ + #define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000FFFF + #define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_NONE 0x1 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x8 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x10 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x20 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x40 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x80 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x100 + #define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x200 + /* Sets speed capabilities in conjunction with legacy capabilities + * field + */ + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xFFFF0000 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_NONE 0x1 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x8 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x10 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x20 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x40 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x80 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x100 + #define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x200 + /* Set speed specific FEC setting in conjunction with legacy FEC + * mode + */ + u32 extended_fec_mode; /* 0x94 */ + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_NONE 0x1 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_10G_NONE 0x2 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_10G_BASE_R 0x4 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_25G_NONE 0x8 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_25G_BASE_R 0x10 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_25G_RS528 0x20 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_40G_NONE 0x40 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_40G_BASE_R 0x80 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_50G_NONE 0x100 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_50G_BASE_R 0x200 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_50G_RS528 0x400 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_50G_RS544 0x800 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_100G_NONE 0x1000 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_100G_BASE_R 0x2000 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_100G_RS528 0x4000 + #define NVM_CFG1_PORT_EXTENDED_FEC_MODE_EXTND_FEC_100G_RS544 0x8000 + u32 port_generic_cont_01; /* 0x98 */ + /* Define for GPIO mapping of SFP Rate Select 0 */ + #define NVM_CFG1_PORT_MODULE_RS0_MASK 0x000000FF + #define NVM_CFG1_PORT_MODULE_RS0_OFFSET 0 + #define NVM_CFG1_PORT_MODULE_RS0_NA 0x0 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO0 0x1 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO1 0x2 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO2 0x3 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO3 0x4 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO4 0x5 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO5 0x6 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO6 0x7 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO7 0x8 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO8 0x9 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO9 0xA + #define NVM_CFG1_PORT_MODULE_RS0_GPIO10 0xB + #define NVM_CFG1_PORT_MODULE_RS0_GPIO11 0xC + #define NVM_CFG1_PORT_MODULE_RS0_GPIO12 0xD + #define NVM_CFG1_PORT_MODULE_RS0_GPIO13 0xE + #define NVM_CFG1_PORT_MODULE_RS0_GPIO14 0xF + #define NVM_CFG1_PORT_MODULE_RS0_GPIO15 0x10 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO16 0x11 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO17 0x12 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO18 0x13 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO19 0x14 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO20 0x15 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO21 0x16 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO22 0x17 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO23 0x18 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO24 0x19 + #define NVM_CFG1_PORT_MODULE_RS0_GPIO25 0x1A + #define NVM_CFG1_PORT_MODULE_RS0_GPIO26 0x1B + #define NVM_CFG1_PORT_MODULE_RS0_GPIO27 0x1C + #define NVM_CFG1_PORT_MODULE_RS0_GPIO28 0x1D + #define NVM_CFG1_PORT_MODULE_RS0_GPIO29 0x1E + #define NVM_CFG1_PORT_MODULE_RS0_GPIO30 0x1F + #define NVM_CFG1_PORT_MODULE_RS0_GPIO31 0x20 + /* Define for GPIO mapping of SFP Rate Select 1 */ + #define NVM_CFG1_PORT_MODULE_RS1_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MODULE_RS1_OFFSET 8 + #define NVM_CFG1_PORT_MODULE_RS1_NA 0x0 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO0 0x1 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO1 0x2 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO2 0x3 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO3 0x4 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO4 0x5 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO5 0x6 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO6 0x7 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO7 0x8 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO8 0x9 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO9 0xA + #define NVM_CFG1_PORT_MODULE_RS1_GPIO10 0xB + #define NVM_CFG1_PORT_MODULE_RS1_GPIO11 0xC + #define NVM_CFG1_PORT_MODULE_RS1_GPIO12 0xD + #define NVM_CFG1_PORT_MODULE_RS1_GPIO13 0xE + #define NVM_CFG1_PORT_MODULE_RS1_GPIO14 0xF + #define NVM_CFG1_PORT_MODULE_RS1_GPIO15 0x10 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO16 0x11 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO17 0x12 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO18 0x13 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO19 0x14 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO20 0x15 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO21 0x16 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO22 0x17 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO23 0x18 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO24 0x19 + #define NVM_CFG1_PORT_MODULE_RS1_GPIO25 0x1A + #define NVM_CFG1_PORT_MODULE_RS1_GPIO26 0x1B + #define NVM_CFG1_PORT_MODULE_RS1_GPIO27 0x1C + #define NVM_CFG1_PORT_MODULE_RS1_GPIO28 0x1D + #define NVM_CFG1_PORT_MODULE_RS1_GPIO29 0x1E + #define NVM_CFG1_PORT_MODULE_RS1_GPIO30 0x1F + #define NVM_CFG1_PORT_MODULE_RS1_GPIO31 0x20 + /* Define for GPIO mapping of SFP Module TX Fault */ + #define NVM_CFG1_PORT_MODULE_TX_FAULT_MASK 0x00FF0000 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_OFFSET 16 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_NA 0x0 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO0 0x1 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO1 0x2 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO2 0x3 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO3 0x4 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO4 0x5 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO5 0x6 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO6 0x7 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO7 0x8 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO8 0x9 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO9 0xA + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO10 0xB + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO11 0xC + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO12 0xD + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO13 0xE + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO14 0xF + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO15 0x10 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO16 0x11 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO17 0x12 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO18 0x13 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO19 0x14 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO20 0x15 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO21 0x16 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO22 0x17 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO23 0x18 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO24 0x19 + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO25 0x1A + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO26 0x1B + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO27 0x1C + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO28 0x1D + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO29 0x1E + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO30 0x1F + #define NVM_CFG1_PORT_MODULE_TX_FAULT_GPIO31 0x20 + /* Define for GPIO mapping of QSFP Reset signal */ + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_MASK 0xFF000000 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_OFFSET 24 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_NA 0x0 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO0 0x1 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO1 0x2 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO2 0x3 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO3 0x4 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO4 0x5 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO5 0x6 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO6 0x7 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO7 0x8 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO8 0x9 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO9 0xA + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO10 0xB + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO11 0xC + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO12 0xD + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO13 0xE + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO14 0xF + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO15 0x10 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO16 0x11 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO17 0x12 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO18 0x13 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO19 0x14 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO20 0x15 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO21 0x16 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO22 0x17 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO23 0x18 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO24 0x19 + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO25 0x1A + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO26 0x1B + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO27 0x1C + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO28 0x1D + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO29 0x1E + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO30 0x1F + #define NVM_CFG1_PORT_QSFP_MODULE_RESET_GPIO31 0x20 + u32 port_generic_cont_02; /* 0x9C */ + /* Define for GPIO mapping of QSFP Transceiver LP mode */ + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_MASK 0x000000FF + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_OFFSET 0 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_NA 0x0 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO0 0x1 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO1 0x2 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO2 0x3 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO3 0x4 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO4 0x5 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO5 0x6 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO6 0x7 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO7 0x8 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO8 0x9 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO9 0xA + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO10 0xB + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO11 0xC + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO12 0xD + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO13 0xE + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO14 0xF + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO15 0x10 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO16 0x11 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO17 0x12 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO18 0x13 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO19 0x14 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO20 0x15 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO21 0x16 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO22 0x17 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO23 0x18 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO24 0x19 + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO25 0x1A + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO26 0x1B + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO27 0x1C + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO28 0x1D + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO29 0x1E + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO30 0x1F + #define NVM_CFG1_PORT_QSFP_MODULE_LP_MODE_GPIO31 0x20 + /* Define for GPIO mapping of Transceiver Power Enable */ + #define NVM_CFG1_PORT_MODULE_POWER_MASK 0x0000FF00 + #define NVM_CFG1_PORT_MODULE_POWER_OFFSET 8 + #define NVM_CFG1_PORT_MODULE_POWER_NA 0x0 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO0 0x1 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO1 0x2 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO2 0x3 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO3 0x4 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO4 0x5 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO5 0x6 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO6 0x7 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO7 0x8 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO8 0x9 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO9 0xA + #define NVM_CFG1_PORT_MODULE_POWER_GPIO10 0xB + #define NVM_CFG1_PORT_MODULE_POWER_GPIO11 0xC + #define NVM_CFG1_PORT_MODULE_POWER_GPIO12 0xD + #define NVM_CFG1_PORT_MODULE_POWER_GPIO13 0xE + #define NVM_CFG1_PORT_MODULE_POWER_GPIO14 0xF + #define NVM_CFG1_PORT_MODULE_POWER_GPIO15 0x10 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO16 0x11 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO17 0x12 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO18 0x13 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO19 0x14 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO20 0x15 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO21 0x16 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO22 0x17 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO23 0x18 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO24 0x19 + #define NVM_CFG1_PORT_MODULE_POWER_GPIO25 0x1A + #define NVM_CFG1_PORT_MODULE_POWER_GPIO26 0x1B + #define NVM_CFG1_PORT_MODULE_POWER_GPIO27 0x1C + #define NVM_CFG1_PORT_MODULE_POWER_GPIO28 0x1D + #define NVM_CFG1_PORT_MODULE_POWER_GPIO29 0x1E + #define NVM_CFG1_PORT_MODULE_POWER_GPIO30 0x1F + #define NVM_CFG1_PORT_MODULE_POWER_GPIO31 0x20 + /* Define for LASI Mapping of Interrupt from module or PHY */ + #define NVM_CFG1_PORT_LASI_INTR_IN_MASK 0x000F0000 + #define NVM_CFG1_PORT_LASI_INTR_IN_OFFSET 16 + #define NVM_CFG1_PORT_LASI_INTR_IN_NA 0x0 + #define NVM_CFG1_PORT_LASI_INTR_IN_LASI0 0x1 + #define NVM_CFG1_PORT_LASI_INTR_IN_LASI1 0x2 + #define NVM_CFG1_PORT_LASI_INTR_IN_LASI2 0x3 + #define NVM_CFG1_PORT_LASI_INTR_IN_LASI3 0x4 + u32 reserved[110]; /* 0xA0 */ +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; /* 0x0 */ + u32 rsrv1; /* 0x8 */ + #define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF + #define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 + #define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 + #define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 + u32 rsrv2; /* 0xC */ + #define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF + #define NVM_CFG1_FUNC_RESERVED3_OFFSET 0 + #define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000 + #define NVM_CFG1_FUNC_RESERVED4_OFFSET 16 + u32 device_id; /* 0x10 */ + #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF + #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 + #define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 + #define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 + u32 cmn_cfg; /* 0x14 */ + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 + #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 + #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 + #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 + #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 + #define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19 + #define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0 + #define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1 + #define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2 + #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000 + #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23 + #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000 + #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31 + #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0 + #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1 + u32 pci_cfg; /* 0x18 */ + #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F + #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0 + /* AH VF BAR2 size */ + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_MASK 0x00003F80 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_OFFSET 7 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_DISABLED 0x0 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4K 0x1 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8K 0x2 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16K 0x3 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32K 0x4 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64K 0x5 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_128K 0x6 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_256K 0x7 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_512K 0x8 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_1M 0x9 + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_2M 0xA + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4M 0xB + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8M 0xC + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16M 0xD + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32M 0xE + #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64M 0xF + #define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000 + #define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14 + #define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0 + #define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1 + #define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2 + #define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3 + #define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4 + #define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5 + #define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6 + #define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7 + #define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8 + #define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9 + #define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA + #define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB + #define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC + #define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD + #define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE + #define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF + #define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000 + #define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18 + /* Hide function in npar mode */ + #define NVM_CFG1_FUNC_FUNCTION_HIDE_MASK 0x04000000 + #define NVM_CFG1_FUNC_FUNCTION_HIDE_OFFSET 26 + #define NVM_CFG1_FUNC_FUNCTION_HIDE_DISABLED 0x0 + #define NVM_CFG1_FUNC_FUNCTION_HIDE_ENABLED 0x1 + /* AH BAR2 size (per function) */ + #define NVM_CFG1_FUNC_BAR2_SIZE_MASK 0x78000000 + #define NVM_CFG1_FUNC_BAR2_SIZE_OFFSET 27 + #define NVM_CFG1_FUNC_BAR2_SIZE_DISABLED 0x0 + #define NVM_CFG1_FUNC_BAR2_SIZE_1M 0x5 + #define NVM_CFG1_FUNC_BAR2_SIZE_2M 0x6 + #define NVM_CFG1_FUNC_BAR2_SIZE_4M 0x7 + #define NVM_CFG1_FUNC_BAR2_SIZE_8M 0x8 + #define NVM_CFG1_FUNC_BAR2_SIZE_16M 0x9 + #define NVM_CFG1_FUNC_BAR2_SIZE_32M 0xA + #define NVM_CFG1_FUNC_BAR2_SIZE_64M 0xB + #define NVM_CFG1_FUNC_BAR2_SIZE_128M 0xC + #define NVM_CFG1_FUNC_BAR2_SIZE_256M 0xD + #define NVM_CFG1_FUNC_BAR2_SIZE_512M 0xE + #define NVM_CFG1_FUNC_BAR2_SIZE_1G 0xF + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ + u32 preboot_generic_cfg; /* 0x2C */ + #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF + #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0 + #define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000 + #define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4 + #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_RDMA 0x8 + u32 reserved[8]; /* 0x30 */ +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; /* 0x0 */ + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */ + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */ + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */ +}; + +/****************************************** + * nvm_cfg structs + ******************************************/ + +struct board_info { + u16 vendor_id; + u16 eth_did_suffix; + u16 sub_vendor_id; + u16 sub_device_id; + char *board_name; + char *friendly_name; +}; + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +/****************************************** + * nvm_cfg options + ******************************************/ + +#define NVM_CFG_ID_MAC_ADDRESS 1 +#define NVM_CFG_ID_BOARD_SWAP 8 +#define NVM_CFG_ID_MF_MODE 9 +#define NVM_CFG_ID_LED_MODE 10 +#define NVM_CFG_ID_FAN_FAILURE_ENFORCEMENT 11 +#define NVM_CFG_ID_ENGINEERING_CHANGE 12 +#define NVM_CFG_ID_MANUFACTURING_ID 13 +#define NVM_CFG_ID_SERIAL_NUMBER 14 +#define NVM_CFG_ID_PCI_GEN 15 +#define NVM_CFG_ID_BEACON_WOL_ENABLED 16 +#define NVM_CFG_ID_ASPM_SUPPORT 17 +#define NVM_CFG_ID_ROCE_PRIORITY 20 +#define NVM_CFG_ID_ENABLE_WOL_ON_ACPI_PATTERN 22 +#define NVM_CFG_ID_MAGIC_PACKET_WOL 23 +#define NVM_CFG_ID_AVS_MARGIN_LOW_BB 24 +#define NVM_CFG_ID_AVS_MARGIN_HIGH_BB 25 +#define NVM_CFG_ID_DCBX_MODE 26 +#define NVM_CFG_ID_DRV_SPEED_CAPABILITY_MASK 27 +#define NVM_CFG_ID_MFW_SPEED_CAPABILITY_MASK 28 +#define NVM_CFG_ID_DRV_LINK_SPEED 29 +#define NVM_CFG_ID_DRV_FLOW_CONTROL 30 +#define NVM_CFG_ID_MFW_LINK_SPEED 31 +#define NVM_CFG_ID_MFW_FLOW_CONTROL 32 +#define NVM_CFG_ID_OPTIC_MODULE_VENDOR_ENFORCEMENT 33 +#define NVM_CFG_ID_OPTIONAL_LINK_MODES_BB 34 +#define NVM_CFG_ID_MF_VENDOR_DEVICE_ID 37 +#define NVM_CFG_ID_NETWORK_PORT_MODE 38 +#define NVM_CFG_ID_MPS10_RX_LANE_SWAP_BB 39 +#define NVM_CFG_ID_MPS10_TX_LANE_SWAP_BB 40 +#define NVM_CFG_ID_MPS10_RX_LANE_POLARITY_BB 41 +#define NVM_CFG_ID_MPS10_TX_LANE_POLARITY_BB 42 +#define NVM_CFG_ID_MPS25_RX_LANE_SWAP_BB 43 +#define NVM_CFG_ID_MPS25_TX_LANE_SWAP_BB 44 +#define NVM_CFG_ID_MPS25_RX_LANE_POLARITY 45 +#define NVM_CFG_ID_MPS25_TX_LANE_POLARITY 46 +#define NVM_CFG_ID_MPS10_PREEMPHASIS_BB 47 +#define NVM_CFG_ID_MPS10_DRIVER_CURRENT_BB 48 +#define NVM_CFG_ID_MPS10_ENFORCE_TX_FIR_CFG_BB 49 +#define NVM_CFG_ID_MPS25_PREEMPHASIS 50 +#define NVM_CFG_ID_MPS25_DRIVER_CURRENT 51 +#define NVM_CFG_ID_MPS25_ENFORCE_TX_FIR_CFG 52 +#define NVM_CFG_ID_MPS10_CORE_ADDR_BB 53 +#define NVM_CFG_ID_MPS25_CORE_ADDR_BB 54 +#define NVM_CFG_ID_EXTERNAL_PHY_TYPE 55 +#define NVM_CFG_ID_EXTERNAL_PHY_ADDRESS 56 +#define NVM_CFG_ID_SERDES_NET_INTERFACE_BB 57 +#define NVM_CFG_ID_AN_MODE_BB 58 +#define NVM_CFG_ID_PREBOOT_OPROM 59 +#define NVM_CFG_ID_MBA_DELAY_TIME 61 +#define NVM_CFG_ID_MBA_SETUP_HOT_KEY 62 +#define NVM_CFG_ID_MBA_HIDE_SETUP_PROMPT 63 +#define NVM_CFG_ID_PREBOOT_LINK_SPEED 67 +#define NVM_CFG_ID_PREBOOT_BOOT_PROTOCOL 69 +#define NVM_CFG_ID_ENABLE_SRIOV 70 +#define NVM_CFG_ID_ENABLE_ATC 71 +#define NVM_CFG_ID_NUMBER_OF_VFS_PER_PF 74 +#define NVM_CFG_ID_VF_PCI_BAR2_SIZE_K2_E5 75 +#define NVM_CFG_ID_VENDOR_ID 76 +#define NVM_CFG_ID_SUBSYSTEM_VENDOR_ID 78 +#define NVM_CFG_ID_SUBSYSTEM_DEVICE_ID 79 +#define NVM_CFG_ID_VF_PCI_BAR2_SIZE_BB 81 +#define NVM_CFG_ID_BAR1_SIZE 82 +#define NVM_CFG_ID_BAR2_SIZE_BB 83 +#define NVM_CFG_ID_VF_PCI_DEVICE_ID 84 +#define NVM_CFG_ID_MPS10_TXFIR_MAIN_BB 85 +#define NVM_CFG_ID_MPS10_TXFIR_POST_BB 86 +#define NVM_CFG_ID_MPS25_TXFIR_MAIN 87 +#define NVM_CFG_ID_MPS25_TXFIR_POST 88 +#define NVM_CFG_ID_MANUFACTURE_KIT_VERSION 89 +#define NVM_CFG_ID_MANUFACTURE_TIMESTAMP 90 +#define NVM_CFG_ID_PERSONALITY 92 +#define NVM_CFG_ID_FCOE_NODE_WWN_MAC_ADDR 93 +#define NVM_CFG_ID_FCOE_PORT_WWN_MAC_ADDR 94 +#define NVM_CFG_ID_BANDWIDTH_WEIGHT 95 +#define NVM_CFG_ID_MAX_BANDWIDTH 96 +#define NVM_CFG_ID_PAUSE_ON_HOST_RING 97 +#define NVM_CFG_ID_PCIE_PREEMPHASIS 98 +#define NVM_CFG_ID_LLDP_MAC_ADDRESS 99 +#define NVM_CFG_ID_FCOE_WWN_NODE_PREFIX 100 +#define NVM_CFG_ID_FCOE_WWN_PORT_PREFIX 101 +#define NVM_CFG_ID_LED_SPEED_SELECT 102 +#define NVM_CFG_ID_LED_PORT_SWAP 103 +#define NVM_CFG_ID_AVS_MODE_BB 104 +#define NVM_CFG_ID_OVERRIDE_SECURE_MODE 105 +#define NVM_CFG_ID_AVS_DAC_CODE_BB 106 +#define NVM_CFG_ID_MBI_VERSION 107 +#define NVM_CFG_ID_MBI_DATE 108 +#define NVM_CFG_ID_SMBUS_ADDRESS 109 +#define NVM_CFG_ID_NCSI_PACKAGE_ID 110 +#define NVM_CFG_ID_SIDEBAND_MODE 111 +#define NVM_CFG_ID_SMBUS_MODE 112 +#define NVM_CFG_ID_NCSI 113 +#define NVM_CFG_ID_TRANSCEIVER_MODULE_ABSENT 114 +#define NVM_CFG_ID_I2C_MUX_SELECT_GPIO_BB 115 +#define NVM_CFG_ID_I2C_MUX_SELECT_VALUE_BB 116 +#define NVM_CFG_ID_DEVICE_CAPABILITIES 117 +#define NVM_CFG_ID_ETH_DID_SUFFIX 118 +#define NVM_CFG_ID_FCOE_DID_SUFFIX 119 +#define NVM_CFG_ID_ISCSI_DID_SUFFIX 120 +#define NVM_CFG_ID_DEFAULT_ENABLED_PROTOCOLS 122 +#define NVM_CFG_ID_POWER_DISSIPATED_BB 123 +#define NVM_CFG_ID_POWER_CONSUMED_BB 124 +#define NVM_CFG_ID_AUX_MODE 125 +#define NVM_CFG_ID_PORT_TYPE 126 +#define NVM_CFG_ID_TX_DISABLE 127 +#define NVM_CFG_ID_MAX_LINK_WIDTH 128 +#define NVM_CFG_ID_ASPM_L1_MODE 130 +#define NVM_CFG_ID_ON_CHIP_SENSOR_MODE 131 +#define NVM_CFG_ID_PREBOOT_VLAN_VALUE 132 +#define NVM_CFG_ID_PREBOOT_VLAN 133 +#define NVM_CFG_ID_TEMPERATURE_PERIOD_BETWEEN_CHECKS 134 +#define NVM_CFG_ID_SHUTDOWN_THRESHOLD_TEMPERATURE 135 +#define NVM_CFG_ID_MAX_COUNT_OPER_THRESHOLD 136 +#define NVM_CFG_ID_DEAD_TEMP_TH_TEMPERATURE 137 +#define NVM_CFG_ID_TEMPERATURE_MONITORING_MODE 139 +#define NVM_CFG_ID_AN_25G_50G_OUI 140 +#define NVM_CFG_ID_PLDM_SENSOR_MODE 141 +#define NVM_CFG_ID_EXTERNAL_THERMAL_SENSOR 142 +#define NVM_CFG_ID_EXTERNAL_THERMAL_SENSOR_ADDRESS 143 +#define NVM_CFG_ID_FAN_FAILURE_DURATION 144 +#define NVM_CFG_ID_FEC_FORCE_MODE 145 +#define NVM_CFG_ID_MULTI_NETWORK_MODES_CAPABILITY 146 +#define NVM_CFG_ID_MNM_10G_DRV_SPEED_CAPABILITY_MASK 147 +#define NVM_CFG_ID_MNM_10G_MFW_SPEED_CAPABILITY_MASK 148 +#define NVM_CFG_ID_MNM_10G_DRV_LINK_SPEED 149 +#define NVM_CFG_ID_MNM_10G_MFW_LINK_SPEED 150 +#define NVM_CFG_ID_MNM_10G_PORT_TYPE 151 +#define NVM_CFG_ID_MNM_10G_SERDES_NET_INTERFACE 152 +#define NVM_CFG_ID_MNM_10G_FEC_FORCE_MODE 153 +#define NVM_CFG_ID_MNM_10G_ETH_DID_SUFFIX 154 +#define NVM_CFG_ID_MNM_25G_DRV_SPEED_CAPABILITY_MASK 155 +#define NVM_CFG_ID_MNM_25G_MFW_SPEED_CAPABILITY_MASK 156 +#define NVM_CFG_ID_MNM_25G_DRV_LINK_SPEED 157 +#define NVM_CFG_ID_MNM_25G_MFW_LINK_SPEED 158 +#define NVM_CFG_ID_MNM_25G_PORT_TYPE 159 +#define NVM_CFG_ID_MNM_25G_SERDES_NET_INTERFACE 160 +#define NVM_CFG_ID_MNM_25G_ETH_DID_SUFFIX 161 +#define NVM_CFG_ID_MNM_25G_FEC_FORCE_MODE 162 +#define NVM_CFG_ID_MNM_40G_DRV_SPEED_CAPABILITY_MASK 163 +#define NVM_CFG_ID_MNM_40G_MFW_SPEED_CAPABILITY_MASK 164 +#define NVM_CFG_ID_MNM_40G_DRV_LINK_SPEED 165 +#define NVM_CFG_ID_MNM_40G_MFW_LINK_SPEED 166 +#define NVM_CFG_ID_MNM_40G_PORT_TYPE 167 +#define NVM_CFG_ID_MNM_40G_SERDES_NET_INTERFACE 168 +#define NVM_CFG_ID_MNM_40G_ETH_DID_SUFFIX 169 +#define NVM_CFG_ID_MNM_40G_FEC_FORCE_MODE 170 +#define NVM_CFG_ID_MNM_50G_DRV_SPEED_CAPABILITY_MASK 171 +#define NVM_CFG_ID_MNM_50G_MFW_SPEED_CAPABILITY_MASK 172 +#define NVM_CFG_ID_MNM_50G_DRV_LINK_SPEED 173 +#define NVM_CFG_ID_MNM_50G_MFW_LINK_SPEED 174 +#define NVM_CFG_ID_MNM_50G_PORT_TYPE 175 +#define NVM_CFG_ID_MNM_50G_SERDES_NET_INTERFACE 176 +#define NVM_CFG_ID_MNM_50G_ETH_DID_SUFFIX 177 +#define NVM_CFG_ID_MNM_50G_FEC_FORCE_MODE 178 +#define NVM_CFG_ID_MNM_100G_DRV_SPEED_CAP_MASK_BB 179 +#define NVM_CFG_ID_MNM_100G_MFW_SPEED_CAP_MASK_BB 180 +#define NVM_CFG_ID_MNM_100G_DRV_LINK_SPEED_BB 181 +#define NVM_CFG_ID_MNM_100G_MFW_LINK_SPEED_BB 182 +#define NVM_CFG_ID_MNM_100G_PORT_TYPE_BB 183 +#define NVM_CFG_ID_MNM_100G_SERDES_NET_INTERFACE_BB 184 +#define NVM_CFG_ID_MNM_100G_ETH_DID_SUFFIX_BB 185 +#define NVM_CFG_ID_MNM_100G_FEC_FORCE_MODE_BB 186 +#define NVM_CFG_ID_FUNCTION_HIDE 187 +#define NVM_CFG_ID_BAR2_TOTAL_BUDGET_BB 188 +#define NVM_CFG_ID_CRASH_DUMP_TRIGGER_ENABLE 189 +#define NVM_CFG_ID_MPS25_LANE_SWAP_K2_E5 190 +#define NVM_CFG_ID_BAR2_SIZE_K2_E5 191 +#define NVM_CFG_ID_EXT_PHY_RESET 192 +#define NVM_CFG_ID_EEE_POWER_SAVING_MODE 193 +#define NVM_CFG_ID_OVERRIDE_PCIE_PRESET_EQUAL_BB 194 +#define NVM_CFG_ID_PCIE_PRESET_VALUE_BB 195 +#define NVM_CFG_ID_MAX_MSIX 196 +#define NVM_CFG_ID_NVM_CFG_VERSION 197 +#define NVM_CFG_ID_NVM_CFG_NEW_OPTION_SEQ 198 +#define NVM_CFG_ID_NVM_CFG_REMOVED_OPTION_SEQ 199 +#define NVM_CFG_ID_NVM_CFG_UPDATED_VALUE_SEQ 200 +#define NVM_CFG_ID_EXTENDED_SERIAL_NUMBER 201 +#define NVM_CFG_ID_RDMA_ENABLEMENT 202 +#define NVM_CFG_ID_MAX_CONT_OPERATING_TEMP 203 +#define NVM_CFG_ID_RUNTIME_PORT_SWAP_GPIO 204 +#define NVM_CFG_ID_RUNTIME_PORT_SWAP_MAP 205 +#define NVM_CFG_ID_THERMAL_EVENT_GPIO 206 +#define NVM_CFG_ID_I2C_INTERRUPT_GPIO 207 +#define NVM_CFG_ID_DCI_SUPPORT 208 +#define NVM_CFG_ID_PCIE_VDM_ENABLED 209 +#define NVM_CFG_ID_OEM1_NUMBER 210 +#define NVM_CFG_ID_OEM2_NUMBER 211 +#define NVM_CFG_ID_FEC_AN_MODE_K2_E5 212 +#define NVM_CFG_ID_NPAR_ENABLED_PROTOCOL 213 +#define NVM_CFG_ID_MPS25_ACTIVE_TXFIR_PRE 214 +#define NVM_CFG_ID_MPS25_ACTIVE_TXFIR_MAIN 215 +#define NVM_CFG_ID_MPS25_ACTIVE_TXFIR_POST 216 +#define NVM_CFG_ID_ALOM_FAN_ON_AUX_GPIO 217 +#define NVM_CFG_ID_ALOM_FAN_ON_AUX_VALUE 218 +#define NVM_CFG_ID_SLOT_ID_GPIO 219 +#define NVM_CFG_ID_PMBUS_SCL_GPIO 220 +#define NVM_CFG_ID_PMBUS_SDA_GPIO 221 +#define NVM_CFG_ID_RESET_ON_LAN 222 +#define NVM_CFG_ID_NCSI_PACKAGE_ID_IO 223 +#define NVM_CFG_ID_TX_RX_EQ_25G_HLPC 224 +#define NVM_CFG_ID_TX_RX_EQ_25G_LLPC 225 +#define NVM_CFG_ID_TX_RX_EQ_25G_AC 226 +#define NVM_CFG_ID_TX_RX_EQ_10G_PC 227 +#define NVM_CFG_ID_TX_RX_EQ_10G_AC 228 +#define NVM_CFG_ID_TX_RX_EQ_1G 229 +#define NVM_CFG_ID_TX_RX_EQ_25G_BT 230 +#define NVM_CFG_ID_TX_RX_EQ_10G_BT 231 +#define NVM_CFG_ID_PF_MAPPING 232 +#define NVM_CFG_ID_RECOVERY_MODE 234 +#define NVM_CFG_ID_PHY_MODULE_DEAD_TEMP_TH 235 +#define NVM_CFG_ID_PHY_MODULE_ALOM_FAN_ON_TEMP_TH 236 +#define NVM_CFG_ID_PREBOOT_DEBUG_MODE_STD 237 +#define NVM_CFG_ID_PREBOOT_DEBUG_MODE_EXT 238 +#define NVM_CFG_ID_SMARTLINQ_MODE 239 +#define NVM_CFG_ID_PREBOOT_LINK_UP_DELAY 242 +#define NVM_CFG_ID_VOLTAGE_REGULATOR_TYPE 243 +#define NVM_CFG_ID_MAIN_CLOCK_FREQUENCY 245 +#define NVM_CFG_ID_MAC_CLOCK_FREQUENCY 246 +#define NVM_CFG_ID_STORM_CLOCK_FREQUENCY 247 +#define NVM_CFG_ID_PCIE_RELAXED_ORDERING 248 +#define NVM_CFG_ID_EXT_PHY_MDI_PAIR_SWAP 249 +#define NVM_CFG_ID_UID_LED_MODE_MASK 250 +#define NVM_CFG_ID_NCSI_AUX_LINK 251 +#define NVM_CFG_ID_SMARTAN_FEC_OVERRIDE 272 +#define NVM_CFG_ID_LLDP_DISABLE 273 +#define NVM_CFG_ID_SHORT_PERST_PROTECTION_K2_E5 274 +#define NVM_CFG_ID_TRANSCEIVER_RATE_SELECT_0 275 +#define NVM_CFG_ID_TRANSCEIVER_RATE_SELECT_1 276 +#define NVM_CFG_ID_TRANSCEIVER_MODULE_TX_FAULT 277 +#define NVM_CFG_ID_TRANSCEIVER_QSFP_MODULE_RESET 278 +#define NVM_CFG_ID_TRANSCEIVER_QSFP_LP_MODE 279 +#define NVM_CFG_ID_TRANSCEIVER_POWER_ENABLE 280 +#define NVM_CFG_ID_LASI_INTERRUPT_INPUT 281 +#define NVM_CFG_ID_EXT_PHY_PGOOD_INPUT 282 +#define NVM_CFG_ID_TRACE_LEVEL 283 +#define NVM_CFG_ID_TRACE_MODULES 284 +#define NVM_CFG_ID_EMULATED_TMP421 285 +#define NVM_CFG_ID_WARNING_TEMPERATURE_GPIO 286 +#define NVM_CFG_ID_WARNING_TEMPERATURE_THRESHOLD 287 +#define NVM_CFG_ID_PERST_INDICATION_GPIO 288 +#define NVM_CFG_ID_PCIE_CLASS_CODE_FCOE_K2_E5 289 +#define NVM_CFG_ID_PCIE_CLASS_CODE_ISCSI_K2_E5 290 +#define NVM_CFG_ID_NUMBER_OF_PROVISIONED_MAC 291 +#define NVM_CFG_ID_NUMBER_OF_PROVISIONED_VF_MAC 292 +#define NVM_CFG_ID_PROVISIONED_BMC_MAC 293 +#define NVM_CFG_ID_OVERRIDE_AGC_THRESHOLD_K2 294 +#define NVM_CFG_ID_WARNING_TEMPERATURE_DELTA 295 +#define NVM_CFG_ID_ALOM_FAN_ON_AUX_DELTA 296 +#define NVM_CFG_ID_DEAD_TEMP_TH_DELTA 297 +#define NVM_CFG_ID_PHY_MODULE_WARNING_TEMP_TH 298 +#define NVM_CFG_ID_DISABLE_PLDM 299 +#define NVM_CFG_ID_DISABLE_MCTP_OEM 300 +#endif /* NVM_CFG_H */ diff --git a/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h b/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h new file mode 100644 index 000000000..91d889dc8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/base/reg_addr.h @@ -0,0 +1,1247 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ + 0 + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \ + 0xfffUL << 0) + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \ + 12 + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \ + 0xfffUL << 12) + +#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \ + 24 + +#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ + 0xffUL << 24) /* @DPDK */ + +#define XSDM_REG_OPERATION_GEN \ + 0xf80408UL +#define NIG_REG_RX_BRB_OUT_EN \ + 0x500e18UL +#define NIG_REG_STORM_OUT_EN \ + 0x500e08UL +#define PSWRQ2_REG_L2P_VALIDATE_VFID \ + 0x240c50UL +#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \ + 0x2aae04UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ + 0x2aa16cUL +#define BAR0_MAP_REG_MSDM_RAM \ + 0x1d00000UL +#define BAR0_MAP_REG_USDM_RAM \ + 0x1d80000UL +#define BAR0_MAP_REG_PSDM_RAM \ + 0x1f00000UL +#define BAR0_MAP_REG_TSDM_RAM \ + 0x1c80000UL +#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ + 0x5011f4UL +#define PRS_REG_SEARCH_TCP \ + 0x1f0400UL +#define PRS_REG_SEARCH_UDP \ + 0x1f0404UL +#define PRS_REG_SEARCH_OPENFLOW \ + 0x1f0434UL +#define TM_REG_PF_ENABLE_CONN \ + 0x2c043cUL +#define TM_REG_PF_ENABLE_TASK \ + 0x2c0444UL +#define TM_REG_PF_SCAN_ACTIVE_CONN \ + 0x2c04fcUL +#define TM_REG_PF_SCAN_ACTIVE_TASK \ + 0x2c0500UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x18082cUL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x180830UL +#define QM_REG_USG_CNT_PF_TX \ + 0x2f2eacUL +#define QM_REG_USG_CNT_PF_OTHER \ + 0x2f2eb0UL +#define DORQ_REG_PF_DB_ENABLE \ + 0x100508UL +#define QM_REG_PF_EN \ + 0x2f2ea4UL +#define TCFC_REG_STRONG_ENABLE_PF \ + 0x2d0708UL +#define CCFC_REG_STRONG_ENABLE_PF \ + 0x2e0708UL +#define PGLUE_B_REG_PGL_ADDR_88_F0 \ + 0x2aa404UL +#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ + 0x2aa408UL +#define PGLUE_B_REG_PGL_ADDR_90_F0 \ + 0x2aa40cUL +#define PGLUE_B_REG_PGL_ADDR_94_F0 \ + 0x2aa410UL +#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ + 0x2aa138UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \ + 0x2aa174UL +#define MISC_REG_GEN_PURP_CR0 \ + 0x008c80UL +#define MCP_REG_SCRATCH \ + 0xe20000UL +#define CNIG_REG_NW_PORT_MODE_BB_B0 \ + 0x218200UL +#define MISCS_REG_CHIP_NUM \ + 0x00976cUL +#define MISCS_REG_CHIP_REV \ + 0x009770UL +#define MISCS_REG_CMT_ENABLED_FOR_PAIR \ + 0x00971cUL +#define MISCS_REG_CHIP_TEST_REG \ + 0x009778UL +#define MISCS_REG_CHIP_METAL \ + 0x009774UL +#define BRB_REG_HEADER_SIZE \ + 0x340804UL +#define BTB_REG_HEADER_SIZE \ + 0xdb0804UL +#define CAU_REG_LONG_TIMEOUT_THRESHOLD \ + 0x1c0708UL +#define CCFC_REG_ACTIVITY_COUNTER \ + 0x2e8800UL +#define CDU_REG_CID_ADDR_PARAMS \ + 0x580900UL +#define DBG_REG_CLIENT_ENABLE \ + 0x010004UL +#define DMAE_REG_INIT \ + 0x00c000UL +#define DORQ_REG_IFEN \ + 0x100040UL +#define GRC_REG_TIMEOUT_EN \ + 0x050404UL +#define IGU_REG_BLOCK_CONFIGURATION \ + 0x180040UL +#define MCM_REG_INIT \ + 0x1200000UL +#define MCP2_REG_DBG_DWORD_ENABLE \ + 0x052404UL +#define MISC_REG_PORT_MODE \ + 0x008c00UL +#define MISC_REG_BLOCK_256B_EN \ + 0x008c14UL +#define MISCS_REG_RESET_PL_HV \ + 0x009060UL +#define MISCS_REG_CLK_100G_MODE \ + 0x009070UL +#define MISCS_REG_RESET_PL_HV_2_K2 \ + 0x009150UL +#define MSDM_REG_ENABLE_IN1 \ + 0xfc0004UL +#define MSEM_REG_ENABLE_IN \ + 0x1800004UL +#define NIG_REG_CM_HDR \ + 0x500840UL +#define NCSI_REG_CONFIG \ + 0x040200UL +#define PSWRQ2_REG_RBC_DONE \ + 0x240000UL +#define PSWRQ2_REG_CFG_DONE \ + 0x240004UL +#define PBF_REG_INIT \ + 0xd80000UL +#define PTU_REG_ATC_INIT_ARRAY \ + 0x560000UL +#define PCM_REG_INIT \ + 0x1100000UL +#define PGLUE_B_REG_ADMIN_PER_PF_REGION \ + 0x2a9000UL +#define PRM_REG_DISABLE_PRM \ + 0x230000UL +#define PRS_REG_SOFT_RST \ + 0x1f0000UL +#define PSDM_REG_ENABLE_IN1 \ + 0xfa0004UL +#define PSEM_REG_ENABLE_IN \ + 0x1600004UL +#define PSWRQ_REG_DBG_SELECT \ + 0x280020UL +#define PSWRQ2_REG_CDUT_P_SIZE \ + 0x24000cUL +#define PSWHST_REG_DISCARD_INTERNAL_WRITES \ + 0x2a0040UL +#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ + 0x29e050UL +#define PSWRD_REG_DBG_SELECT \ + 0x29c040UL +#define PSWRD2_REG_CONF11 \ + 0x29d064UL +#define PSWWR_REG_USDM_FULL_TH \ + 0x29a040UL +#define PSWWR2_REG_CDU_FULL_TH2 \ + 0x29b040UL +#define QM_REG_MAXPQSIZE_0 \ + 0x2f0434UL +#define RSS_REG_RSS_INIT_EN \ + 0x238804UL +#define RDIF_REG_STOP_ON_ERROR \ + 0x300040UL +#define SRC_REG_SOFT_RST \ + 0x23874cUL +#define TCFC_REG_ACTIVITY_COUNTER \ + 0x2d8800UL +#define TCM_REG_INIT \ + 0x1180000UL +#define TM_REG_PXP_READ_DATA_FIFO_INIT \ + 0x2c0014UL +#define TSDM_REG_ENABLE_IN1 \ + 0xfb0004UL +#define TSEM_REG_ENABLE_IN \ + 0x1700004UL +#define TDIF_REG_STOP_ON_ERROR \ + 0x310040UL +#define UCM_REG_INIT \ + 0x1280000UL +#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ + 0x051004UL +#define USDM_REG_ENABLE_IN1 \ + 0xfd0004UL +#define USEM_REG_ENABLE_IN \ + 0x1900004UL +#define XCM_REG_INIT \ + 0x1000000UL +#define XSDM_REG_ENABLE_IN1 \ + 0xf80004UL +#define XSEM_REG_ENABLE_IN \ + 0x1400004UL +#define YCM_REG_INIT \ + 0x1080000UL +#define YSDM_REG_ENABLE_IN1 \ + 0xf90004UL +#define YSEM_REG_ENABLE_IN \ + 0x1500004UL +#define XYLD_REG_SCBD_STRICT_PRIO \ + 0x4c0000UL +#define TMLD_REG_SCBD_STRICT_PRIO \ + 0x4d0000UL +#define MULD_REG_SCBD_STRICT_PRIO \ + 0x4e0000UL +#define YULD_REG_SCBD_STRICT_PRIO \ + 0x4c8000UL +#define MISC_REG_SHARED_MEM_ADDR \ + 0x008c20UL +#define DMAE_REG_GO_C0 \ + 0x00c048UL +#define DMAE_REG_GO_C1 \ + 0x00c04cUL +#define DMAE_REG_GO_C2 \ + 0x00c050UL +#define DMAE_REG_GO_C3 \ + 0x00c054UL +#define DMAE_REG_GO_C4 \ + 0x00c058UL +#define DMAE_REG_GO_C5 \ + 0x00c05cUL +#define DMAE_REG_GO_C6 \ + 0x00c060UL +#define DMAE_REG_GO_C7 \ + 0x00c064UL +#define DMAE_REG_GO_C8 \ + 0x00c068UL +#define DMAE_REG_GO_C9 \ + 0x00c06cUL +#define DMAE_REG_GO_C10 \ + 0x00c070UL +#define DMAE_REG_GO_C11 \ + 0x00c074UL +#define DMAE_REG_GO_C12 \ + 0x00c078UL +#define DMAE_REG_GO_C13 \ + 0x00c07cUL +#define DMAE_REG_GO_C14 \ + 0x00c080UL +#define DMAE_REG_GO_C15 \ + 0x00c084UL +#define DMAE_REG_GO_C16 \ + 0x00c088UL +#define DMAE_REG_GO_C17 \ + 0x00c08cUL +#define DMAE_REG_GO_C18 \ + 0x00c090UL +#define DMAE_REG_GO_C19 \ + 0x00c094UL +#define DMAE_REG_GO_C20 \ + 0x00c098UL +#define DMAE_REG_GO_C21 \ + 0x00c09cUL +#define DMAE_REG_GO_C22 \ + 0x00c0a0UL +#define DMAE_REG_GO_C23 \ + 0x00c0a4UL +#define DMAE_REG_GO_C24 \ + 0x00c0a8UL +#define DMAE_REG_GO_C25 \ + 0x00c0acUL +#define DMAE_REG_GO_C26 \ + 0x00c0b0UL +#define DMAE_REG_GO_C27 \ + 0x00c0b4UL +#define DMAE_REG_GO_C28 \ + 0x00c0b8UL +#define DMAE_REG_GO_C29 \ + 0x00c0bcUL +#define DMAE_REG_GO_C30 \ + 0x00c0c0UL +#define DMAE_REG_GO_C31 \ + 0x00c0c4UL +#define DMAE_REG_CMD_MEM \ + 0x00c800UL +#define QM_REG_MAXPQSIZETXSEL_0 \ + 0x2f0440UL +#define QM_REG_SDMCMDREADY \ + 0x2f1e10UL +#define QM_REG_SDMCMDADDR \ + 0x2f1e04UL +#define QM_REG_SDMCMDDATALSB \ + 0x2f1e08UL +#define QM_REG_SDMCMDDATAMSB \ + 0x2f1e0cUL +#define QM_REG_SDMCMDGO \ + 0x2f1e14UL +#define QM_REG_RLPFCRD \ + 0x2f4d80UL +#define QM_REG_RLPFINCVAL \ + 0x2f4c80UL +#define QM_REG_RLGLBLCRD \ + 0x2f4400UL +#define QM_REG_RLGLBLINCVAL \ + 0x2f3400UL +#define IGU_REG_ATTENTION_ENABLE \ + 0x18083cUL +#define IGU_REG_ATTN_MSG_ADDR_L \ + 0x180820UL +#define IGU_REG_ATTN_MSG_ADDR_H \ + 0x180824UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x18082cUL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x180830UL +#define IGU_REG_ATTENTION_ACK_BITS \ + 0x180838UL +#define IGU_REG_PBA_STS_PF \ + 0x180d20UL +#define IGU_REG_PF_FUNCTIONAL_CLEANUP \ + 0x181210UL +#define IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED \ + 0x18042cUL +#define IGU_REG_PBA_STS_PF_SIZE 5 +#define IGU_REG_PBA_STS_PF \ + 0x180d20UL +#define MISC_REG_AEU_GENERAL_ATTN_0 \ + 0x008400UL +#define CAU_REG_SB_ADDR_MEMORY \ + 0x1c8000UL +#define CAU_REG_SB_VAR_MEMORY \ + 0x1c6000UL +#define CAU_REG_PI_MEMORY \ + 0x1d0000UL +#define IGU_REG_PF_CONFIGURATION \ + 0x180800UL +#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ + 0x00849cUL +#define MISC_REG_AEU_MASK_ATTN_IGU \ + 0x008494UL +#define IGU_REG_CLEANUP_STATUS_0 \ + 0x180980UL +#define IGU_REG_CLEANUP_STATUS_1 \ + 0x180a00UL +#define IGU_REG_CLEANUP_STATUS_2 \ + 0x180a80UL +#define IGU_REG_CLEANUP_STATUS_3 \ + 0x180b00UL +#define IGU_REG_CLEANUP_STATUS_4 \ + 0x180b80UL +#define IGU_REG_COMMAND_REG_32LSB_DATA \ + 0x180840UL +#define IGU_REG_COMMAND_REG_CTRL \ + 0x180848UL +#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \ + 0x1UL << 1) +#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ + 0x1UL << 0) +#define IGU_REG_MAPPING_MEMORY \ + 0x184000UL +#define MISCS_REG_GENERIC_POR_0 \ + 0x0096d4UL +#define MCP_REG_NVM_CFG4 \ + 0xe0642cUL +#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \ + 0x7UL << 0) +#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ + 0 +#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL +#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL +#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL +#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL +#define NWM_REG_MAC0 0x800400UL +#define NWM_REG_MAC0_SIZE 256 +#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0 +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1 +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3 +#define ETH_MAC_REG_XIF_MODE 0x000080UL +#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0 +#define ETH_MAC_REG_FRM_LENGTH 0x000014UL +#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0 +#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL +#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0 +#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL +#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0 +#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL +#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16 +#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0 +#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL +#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL +#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL +#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL +#define XMAC_REG_MODE 0x210008UL +#define XMAC_REG_RX_MAX_SIZE 0x210040UL +#define XMAC_REG_TX_CTRL_LO 0x210020UL +#define XMAC_REG_CTRL 0x210000UL +#define XMAC_REG_RX_CTRL 0x210030UL +#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1UL << 12) +#define MISC_REG_CLK_100G_MODE 0x008c10UL +#define MISC_REG_OPTE_MODE 0x008c0cUL +#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL +#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL +#define PRS_REG_SEARCH_TAG1 0x1f0444UL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL +#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL +#define MISCS_REG_ECO_RESERVED 0x0097b4UL +#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL +#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define XMAC_REG_CTRL_TX_EN (0x1UL << 0) +#define XMAC_REG_CTRL_RX_EN (0x1UL << 1) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */ +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xffUL << 16) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xffUL << 16) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */ +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfffUL << 0) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0 +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfffUL << 0) +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0 +#define PSWRQ2_REG_ILT_MEMORY 0x260000UL +#define QM_REG_WFQPFWEIGHT 0x2f4e80UL +#define QM_REG_WFQVPWEIGHT 0x2fa000UL +#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL +#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL +#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL +#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL +#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL +#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL +#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL +#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL +#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL +#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL +#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL +#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1 +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL +#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL +#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL +#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0 +#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1 +#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL +#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL +#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL +#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL +#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0 +#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1 +#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL +#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL +#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL +#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL +#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0 +#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL +#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL +#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL +#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL +#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL +#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL +#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL +#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL +#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL +#define BRB_REG_SHARED_HR_AREA 0x340880UL +#define BRB_REG_TC_GUARANTIED_0 0x340900UL +#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL +#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL +#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL +#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL +#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL +#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL +#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL +#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL +#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL +#define PRS_REG_VXLAN_PORT 0x1f0738UL +#define NIG_REG_VXLAN_PORT 0x50105cUL +#define PBF_REG_VXLAN_PORT 0xd80518UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1 +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define PRS_REG_NGE_PORT 0x1f086cUL +#define NIG_REG_NGE_PORT 0x508b38UL +#define PBF_REG_NGE_PORT 0xd8051cUL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL +#define NIG_REG_NGE_IP_ENABLE 0x508b28UL +#define NIG_REG_NGE_COMP_VER 0x508b30UL +#define PBF_REG_NGE_COMP_VER 0xd80524UL +#define PRS_REG_NGE_COMP_VER 0x1f0878UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL +#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL +#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL +#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL +#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL +#define MCP_REG_CPU_STATE 0xe05004UL +#define MCP_REG_CPU_MODE 0xe05000UL +#define MCP_REG_CPU_MODE_SOFT_HALT (0x1UL << 10) +#define MCP_REG_CPU_EVENT_MASK 0xe05008UL +#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL +#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL +#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL +#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL +#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL +#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL +#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL +#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1UL << 10) +#define DORQ_REG_DB_DROP_REASON 0x100a2cUL +#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL +#define TM_REG_INT_STS_1 0x2c0190UL +#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1UL << 6) +#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1UL << 5) +#define TM_REG_INT_MASK_1 0x2c0194UL +#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1UL << 5) +#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1UL << 6) +#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL +#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL +#define YSEM_REG_FAST_MEMORY 0x1540000UL +#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL +#define TSEM_REG_FAST_MEMORY 0x1740000UL +#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL +#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL +#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL +#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL +#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL +#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL +#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL +#define TCM_REG_AGG_CON_CTX 0x11814c4UL +#define TCM_REG_SM_CON_CTX 0x11814ccUL +#define TCM_REG_AGG_TASK_CTX 0x11814c8UL +#define TCM_REG_SM_TASK_CTX 0x11814d0UL +#define MSEM_REG_FAST_MEMORY 0x1840000UL +#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL +#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL +#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL +#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL +#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL +#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL +#define MCM_REG_CTX_RBC_ACCS 0x1201800UL +#define MCM_REG_AGG_CON_CTX 0x1201804UL +#define MCM_REG_SM_CON_CTX 0x120180cUL +#define MCM_REG_AGG_TASK_CTX 0x1201808UL +#define MCM_REG_SM_TASK_CTX 0x1201810UL +#define USEM_REG_FAST_MEMORY 0x1940000UL +#define USEM_REG_DBG_FRAME_MODE 0x1901408UL +#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL +#define USEM_REG_SLOW_DBG_MODE 0x1901404UL +#define USEM_REG_DBG_MODE1_CFG 0x1901420UL +#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL +#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL +#define UCM_REG_CTX_RBC_ACCS 0x1281700UL +#define UCM_REG_AGG_CON_CTX 0x1281704UL +#define UCM_REG_SM_CON_CTX 0x128170cUL +#define UCM_REG_AGG_TASK_CTX 0x1281708UL +#define UCM_REG_SM_TASK_CTX 0x1281710UL +#define XSEM_REG_FAST_MEMORY 0x1440000UL +#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL +#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL +#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL +#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL +#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL +#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL +#define XCM_REG_CTX_RBC_ACCS 0x1001800UL +#define XCM_REG_AGG_CON_CTX 0x1001804UL +#define XCM_REG_SM_CON_CTX 0x1001808UL +#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL +#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL +#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL +#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL +#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL +#define YCM_REG_CTX_RBC_ACCS 0x1081800UL +#define YCM_REG_AGG_CON_CTX 0x1081804UL +#define YCM_REG_SM_CON_CTX 0x108180cUL +#define YCM_REG_AGG_TASK_CTX 0x1081808UL +#define YCM_REG_SM_TASK_CTX 0x1081810UL +#define PSEM_REG_FAST_MEMORY 0x1640000UL +#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL +#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL +#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL +#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL +#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL +#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL +#define PCM_REG_CTX_RBC_ACCS 0x1101440UL +#define PCM_REG_SM_CON_CTX 0x1101444UL +#define GRC_REG_DBG_SELECT 0x0500a4UL +#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL +#define GRC_REG_DBG_SHIFT 0x0500acUL +#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL +#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL +#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL +#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL +#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL +#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL +#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL +#define CNIG_REG_DBG_SELECT_K2 0x218254UL +#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL +#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL +#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL +#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL +#define NCSI_REG_DBG_SELECT 0x040474UL +#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL +#define NCSI_REG_DBG_SHIFT 0x04047cUL +#define NCSI_REG_DBG_FORCE_VALID 0x040480UL +#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL +#define BMB_REG_DBG_SELECT 0x540a7cUL +#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL +#define BMB_REG_DBG_SHIFT 0x540a84UL +#define BMB_REG_DBG_FORCE_VALID 0x540a88UL +#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL +#define PCIE_REG_DBG_SELECT 0x0547e8UL +#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL +#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL +#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL +#define PCIE_REG_DBG_SHIFT 0x0547f0UL +#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL +#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL +#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL +#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL +#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL +#define MCP2_REG_DBG_SELECT 0x052400UL +#define MCP2_REG_DBG_SHIFT 0x052408UL +#define MCP2_REG_DBG_FORCE_VALID 0x052440UL +#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL +#define PSWHST_REG_DBG_SELECT 0x2a0100UL +#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL +#define PSWHST_REG_DBG_SHIFT 0x2a0108UL +#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL +#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL +#define PSWHST2_REG_DBG_SELECT 0x29e058UL +#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL +#define PSWHST2_REG_DBG_SHIFT 0x29e060UL +#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL +#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL +#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL +#define PSWRD_REG_DBG_SHIFT 0x29c048UL +#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL +#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL +#define PSWRD2_REG_DBG_SELECT 0x29d400UL +#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL +#define PSWRD2_REG_DBG_SHIFT 0x29d408UL +#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL +#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL +#define PSWWR_REG_DBG_SELECT 0x29a084UL +#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL +#define PSWWR_REG_DBG_SHIFT 0x29a08cUL +#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL +#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL +#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL +#define PSWRQ_REG_DBG_SHIFT 0x280028UL +#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL +#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL +#define PSWRQ2_REG_DBG_SELECT 0x240100UL +#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL +#define PSWRQ2_REG_DBG_SHIFT 0x240108UL +#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL +#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL +#define PGLCS_REG_DBG_SELECT 0x001d14UL +#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL +#define PGLCS_REG_DBG_SHIFT 0x001d1cUL +#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL +#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL +#define PTU_REG_DBG_SELECT 0x560100UL +#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL +#define PTU_REG_DBG_SHIFT 0x560108UL +#define PTU_REG_DBG_FORCE_VALID 0x56010cUL +#define PTU_REG_DBG_FORCE_FRAME 0x560110UL +#define DMAE_REG_DBG_SELECT 0x00c510UL +#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL +#define DMAE_REG_DBG_SHIFT 0x00c518UL +#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL +#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL +#define TCM_REG_DBG_SELECT 0x1180040UL +#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL +#define TCM_REG_DBG_SHIFT 0x1180048UL +#define TCM_REG_DBG_FORCE_VALID 0x118004cUL +#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL +#define MCM_REG_DBG_SELECT 0x1200040UL +#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL +#define MCM_REG_DBG_SHIFT 0x1200048UL +#define MCM_REG_DBG_FORCE_VALID 0x120004cUL +#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL +#define UCM_REG_DBG_SELECT 0x1280050UL +#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL +#define UCM_REG_DBG_SHIFT 0x1280058UL +#define UCM_REG_DBG_FORCE_VALID 0x128005cUL +#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL +#define XCM_REG_DBG_SELECT 0x1000040UL +#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL +#define XCM_REG_DBG_SHIFT 0x1000048UL +#define XCM_REG_DBG_FORCE_VALID 0x100004cUL +#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL +#define YCM_REG_DBG_SELECT 0x1080040UL +#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL +#define YCM_REG_DBG_SHIFT 0x1080048UL +#define YCM_REG_DBG_FORCE_VALID 0x108004cUL +#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL +#define PCM_REG_DBG_SELECT 0x1100040UL +#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL +#define PCM_REG_DBG_SHIFT 0x1100048UL +#define PCM_REG_DBG_FORCE_VALID 0x110004cUL +#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL +#define QM_REG_DBG_SELECT 0x2f2e74UL +#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL +#define QM_REG_DBG_SHIFT 0x2f2e7cUL +#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL +#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL +#define TM_REG_DBG_SELECT 0x2c07a8UL +#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL +#define TM_REG_DBG_SHIFT 0x2c07b0UL +#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL +#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL +#define DORQ_REG_DBG_SELECT 0x100ad0UL +#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL +#define DORQ_REG_DBG_SHIFT 0x100ad8UL +#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL +#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL +#define BRB_REG_DBG_SELECT 0x340ed0UL +#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL +#define BRB_REG_DBG_SHIFT 0x340ed8UL +#define BRB_REG_DBG_FORCE_VALID 0x340edcUL +#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL +#define SRC_REG_DBG_SELECT 0x238700UL +#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL +#define SRC_REG_DBG_SHIFT 0x238708UL +#define SRC_REG_DBG_FORCE_VALID 0x23870cUL +#define SRC_REG_DBG_FORCE_FRAME 0x238710UL +#define PRS_REG_DBG_SELECT 0x1f0b6cUL +#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL +#define PRS_REG_DBG_SHIFT 0x1f0b74UL +#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL +#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL +#define TSDM_REG_DBG_SELECT 0xfb0e28UL +#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL +#define TSDM_REG_DBG_SHIFT 0xfb0e30UL +#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL +#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL +#define MSDM_REG_DBG_SELECT 0xfc0e28UL +#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL +#define MSDM_REG_DBG_SHIFT 0xfc0e30UL +#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL +#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL +#define USDM_REG_DBG_SELECT 0xfd0e28UL +#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL +#define USDM_REG_DBG_SHIFT 0xfd0e30UL +#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL +#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL +#define XSDM_REG_DBG_SELECT 0xf80e28UL +#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL +#define XSDM_REG_DBG_SHIFT 0xf80e30UL +#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL +#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL +#define YSDM_REG_DBG_SELECT 0xf90e28UL +#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL +#define YSDM_REG_DBG_SHIFT 0xf90e30UL +#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL +#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL +#define PSDM_REG_DBG_SELECT 0xfa0e28UL +#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL +#define PSDM_REG_DBG_SHIFT 0xfa0e30UL +#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL +#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL +#define TSEM_REG_DBG_SELECT 0x1701528UL +#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL +#define TSEM_REG_DBG_SHIFT 0x1701530UL +#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL +#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL +#define MSEM_REG_DBG_SELECT 0x1801528UL +#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL +#define MSEM_REG_DBG_SHIFT 0x1801530UL +#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL +#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL +#define USEM_REG_DBG_SELECT 0x1901528UL +#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL +#define USEM_REG_DBG_SHIFT 0x1901530UL +#define USEM_REG_DBG_FORCE_VALID 0x1901534UL +#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL +#define XSEM_REG_DBG_SELECT 0x1401528UL +#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL +#define XSEM_REG_DBG_SHIFT 0x1401530UL +#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL +#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL +#define YSEM_REG_DBG_SELECT 0x1501528UL +#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL +#define YSEM_REG_DBG_SHIFT 0x1501530UL +#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL +#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL +#define PSEM_REG_DBG_SELECT 0x1601528UL +#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL +#define PSEM_REG_DBG_SHIFT 0x1601530UL +#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL +#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL +#define RSS_REG_DBG_SELECT 0x238c4cUL +#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL +#define RSS_REG_DBG_SHIFT 0x238c54UL +#define RSS_REG_DBG_FORCE_VALID 0x238c58UL +#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL +#define TMLD_REG_DBG_SELECT 0x4d1600UL +#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL +#define TMLD_REG_DBG_SHIFT 0x4d1608UL +#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL +#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL +#define MULD_REG_DBG_SELECT 0x4e1600UL +#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL +#define MULD_REG_DBG_SHIFT 0x4e1608UL +#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL +#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL +#define YULD_REG_DBG_SELECT 0x4c9600UL +#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL +#define YULD_REG_DBG_SHIFT 0x4c9608UL +#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL +#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL +#define XYLD_REG_DBG_SELECT 0x4c1600UL +#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL +#define XYLD_REG_DBG_SHIFT 0x4c1608UL +#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL +#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL +#define PRM_REG_DBG_SELECT 0x2306a8UL +#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL +#define PRM_REG_DBG_SHIFT 0x2306b0UL +#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL +#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL +#define PBF_PB1_REG_DBG_SELECT 0xda0728UL +#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL +#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL +#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL +#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL +#define PBF_PB2_REG_DBG_SELECT 0xda4728UL +#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL +#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL +#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL +#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL +#define RPB_REG_DBG_SELECT 0x23c728UL +#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL +#define RPB_REG_DBG_SHIFT 0x23c730UL +#define RPB_REG_DBG_FORCE_VALID 0x23c734UL +#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL +#define BTB_REG_DBG_SELECT 0xdb08c8UL +#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL +#define BTB_REG_DBG_SHIFT 0xdb08d0UL +#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL +#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL +#define PBF_REG_DBG_SELECT 0xd80060UL +#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL +#define PBF_REG_DBG_SHIFT 0xd80068UL +#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL +#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL +#define RDIF_REG_DBG_SELECT 0x300500UL +#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL +#define RDIF_REG_DBG_SHIFT 0x300508UL +#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL +#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL +#define TDIF_REG_DBG_SELECT 0x310500UL +#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL +#define TDIF_REG_DBG_SHIFT 0x310508UL +#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL +#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL +#define CDU_REG_DBG_SELECT 0x580704UL +#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL +#define CDU_REG_DBG_SHIFT 0x58070cUL +#define CDU_REG_DBG_FORCE_VALID 0x580710UL +#define CDU_REG_DBG_FORCE_FRAME 0x580714UL +#define CCFC_REG_DBG_SELECT 0x2e0500UL +#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL +#define CCFC_REG_DBG_SHIFT 0x2e0508UL +#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL +#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL +#define TCFC_REG_DBG_SELECT 0x2d0500UL +#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL +#define TCFC_REG_DBG_SHIFT 0x2d0508UL +#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL +#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL +#define IGU_REG_DBG_SELECT 0x181578UL +#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL +#define IGU_REG_DBG_SHIFT 0x181580UL +#define IGU_REG_DBG_FORCE_VALID 0x181584UL +#define IGU_REG_DBG_FORCE_FRAME 0x181588UL +#define CAU_REG_DBG_SELECT 0x1c0ea8UL +#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL +#define CAU_REG_DBG_SHIFT 0x1c0eb0UL +#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL +#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL +#define UMAC_REG_DBG_SELECT 0x051094UL +#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL +#define UMAC_REG_DBG_SHIFT 0x05109cUL +#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL +#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL +#define NIG_REG_DBG_SELECT 0x502140UL +#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL +#define NIG_REG_DBG_SHIFT 0x502148UL +#define NIG_REG_DBG_FORCE_VALID 0x50214cUL +#define NIG_REG_DBG_FORCE_FRAME 0x502150UL +#define WOL_REG_DBG_SELECT 0x600140UL +#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL +#define WOL_REG_DBG_SHIFT 0x600148UL +#define WOL_REG_DBG_FORCE_VALID 0x60014cUL +#define WOL_REG_DBG_FORCE_FRAME 0x600150UL +#define BMBN_REG_DBG_SELECT 0x610140UL +#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL +#define BMBN_REG_DBG_SHIFT 0x610148UL +#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL +#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL +#define NWM_REG_DBG_SELECT 0x8000ecUL +#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL +#define NWM_REG_DBG_SHIFT 0x8000f4UL +#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL +#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL +#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL +#define BRB_REG_BIG_RAM_DATA 0x341500UL +#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL +#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL +#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL +#define BMB_REG_BIG_RAM_DATA 0x540f00UL +#define MISCS_REG_RESET_PL_UA 0x009050UL +#define MISC_REG_RESET_PL_UA 0x008050UL +#define MISC_REG_RESET_PL_HV 0x008060UL +#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL +#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL +#define SEM_FAST_REG_INT_RAM 0x020000UL +#define DBG_REG_DBG_BLOCK_ON 0x010454UL +#define DBG_REG_FRAMING_MODE 0x010058UL +#define SEM_FAST_REG_DEBUG_MODE 0x000744UL +#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL +#define SEM_FAST_REG_FILTER_CID 0x000754UL +#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL +#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL +#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL +#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL +#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL +#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL +#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL +#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL +#define DBG_REG_FILTER_ENABLE 0x0109d0UL +#define DBG_REG_TRIGGER_ENABLE 0x01054cUL +#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL +#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL +#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL +#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL +#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL +#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL +#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL +#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL +#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL +#define DBG_REG_INTR_BUFFER 0x014000UL +#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL +#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL +#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL +#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL +#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL +#define SEM_FAST_REG_STALL_0 0x000488UL +#define SEM_FAST_REG_STALLED 0x000494UL +#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL +#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL +#define SEM_FAST_REG_VFC_ADDR 0x000b44UL +#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL +#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL +#define SEM_FAST_REG_VFC_ADDR 0x000b44UL +#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL +#define RSS_REG_RSS_RAM_ADDR 0x238c30UL +#define RSS_REG_RSS_RAM_DATA 0x238c20UL +#define MISCS_REG_BLOCK_256B_EN 0x009074UL +#define MCP_REG_CPU_REG_FILE 0xe05200UL +#define MCP_REG_CPU_REG_FILE_SIZE 32 +#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL +#define DBG_REG_FULL_MODE 0x010060UL +#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL +#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL +#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL +#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL +#define DBG_REG_PCI_FUNC_NUM 0x010a98UL +#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL +#define DBG_REG_PCI_REQ_CREDIT 0x010440UL +#define DBG_REG_DEBUG_TARGET 0x01005cUL +#define DBG_REG_OUTPUT_ENABLE 0x01000cUL +#define DBG_REG_OUTPUT_ENABLE 0x01000cUL +#define DBG_REG_DEBUG_TARGET 0x01005cUL +#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL +#define NIG_REG_DEBUG_PORT 0x5020d0UL +#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL +#define DBG_REG_ETHERNET_HDR_7 0x010b34UL +#define DBG_REG_ETHERNET_HDR_6 0x010b30UL +#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL +#define DBG_REG_ETHERNET_HDR_4 0x010b28UL +#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL +#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL +#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL +#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL +#define DBG_REG_TIMESTAMP_TICK 0x010b50UL +#define DBG_REG_FILTER_ID_NUM 0x0109d4UL +#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL +#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL +#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL +#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL +#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL +#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL +#define DBG_REG_TRIGGER_ENABLE 0x01054cUL +#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL +#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL +#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL +#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL +#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL +#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL +#define DBG_REG_STORM_ID_NUM 0x010b14UL +#define DBG_REG_CALENDAR_SLOT0 0x010014UL +#define DBG_REG_HW_ID_NUM 0x010b10UL +#define DBG_REG_FILTER_ENABLE 0x0109d0UL +#define DBG_REG_TIMESTAMP 0x010b4cUL +#define DBG_REG_CPU_TIMEOUT 0x010450UL +#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL +#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL +#define GRC_REG_TRACE_FIFO 0x050068UL +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL +#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL +#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL +#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL +#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL +#define TSEM_REG_VF_ERROR 0x1700408UL +#define USEM_REG_VF_ERROR 0x1900408UL +#define MSEM_REG_VF_ERROR 0x1800408UL +#define XSEM_REG_VF_ERROR 0x1400408UL +#define YSEM_REG_VF_ERROR 0x1500408UL +#define PSEM_REG_VF_ERROR 0x1600408UL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL +#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL +#define IGU_REG_VF_CONFIGURATION 0x180804UL +#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL +#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL +#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL +#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL +#define BRB_REG_PER_TC_COUNTERS 0x341a00UL + +/* added */ +#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL +#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL +#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL +#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL +#define PCIE_REG_PRTY_MASK 0x0547b4UL +#define PGLUE_B_REG_VF_BAR0_SIZE_K2 0x2aaeb4UL +#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL +#define SEM_FAST_REG_INT_RAM_SIZE 20480 +#define MCP_REG_SCRATCH_SIZE 57344 + +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16 +#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL + +/* 8.10.9.0 FW */ +#define NIG_REG_VXLAN_CTRL 0x50105cUL +#define PRS_REG_SEARCH_ROCE 0x1f040cUL +#define PRS_REG_CM_HDR_GFT 0x1f11c8UL +#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0 +#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8 +#define CCFC_REG_WEAK_ENABLE_VF 0x2e0704UL +#define TCFC_REG_STRONG_ENABLE_VF 0x2d070cUL +#define TCFC_REG_WEAK_ENABLE_VF 0x2d0704UL +#define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL +#define PRS_REG_GFT_CAM 0x1f1100UL +#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL +#define PGLUE_B_REG_MSDM_VF_SHIFT_B 0x2aa1c4UL +#define PGLUE_B_REG_MSDM_OFFSET_MASK_B 0x2aa1c0UL +#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST 0x1f0a0cUL +#define PRS_REG_SEARCH_FCOE 0x1f0408UL +#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL +#define NIG_REG_DSCP_TO_TC_MAP_ENABLE 0x5088f8UL +#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL +#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL +#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL +#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL +#define IGU_REG_WRITE_DONE_PENDING 0x180900UL +#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL +#define PRS_REG_MSG_INFO 0x1f0a1cUL +#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL + +/* 8.18.7.0 FW */ +#define BRB_REG_INT_MASK_10 0x3401b8UL + +#define IGU_REG_PRODUCER_MEMORY 0x182000UL +#define IGU_REG_CONSUMER_MEM 0x183000UL + +#define CDU_REG_CCFC_CTX_VALID0 0x580400UL +#define CDU_REG_CCFC_CTX_VALID1 0x580404UL +#define CDU_REG_TCFC_CTX_VALID0 0x580408UL + +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL +#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL +#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL +#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL +#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL +#define NWM_REG_MAC0_K2 0x800400UL + #define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_SHIFT 0 + #define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_SHIFT 1 + #define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_SHIFT 3 +#define ETH_MAC_REG_XIF_MODE_K2 0x000080UL + #define ETH_MAC_REG_XIF_MODE_XGMII_K2_SHIFT 0 +#define ETH_MAC_REG_FRM_LENGTH_K2 0x000014UL + #define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_SHIFT 0 +#define ETH_MAC_REG_TX_IPG_LENGTH_K2 0x000044UL + #define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_SHIFT 0 +#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2 0x00001cUL + #define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_SHIFT 0 +#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2 0x000020UL + #define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_SHIFT 16 + #define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_SHIFT 0 + #define ETH_MAC_REG_COMMAND_CONFIG_CRC_FWD_K2 (0x1 << 6) + #define ETH_MAC_REG_COMMAND_CONFIG_CRC_FWD_K2_SHIFT 6 +#define ETH_MAC_REG_COMMAND_CONFIG_K2 0x000008UL +#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL +#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL +#define XMAC_REG_MODE_BB 0x210008UL +#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL +#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL +#define XMAC_REG_CTRL_BB 0x210000UL +#define XMAC_REG_CTRL_TX_EN_BB (0x1UL << 0) +#define XMAC_REG_CTRL_RX_EN_BB (0x1UL << 1) +#define XMAC_REG_RX_CTRL_BB 0x210030UL +#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1UL << 12) + +#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL +#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL +#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL +#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL +#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL +#define PCIE_REG_PRTY_MASK_K2 0x0547b4UL + +#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL + +#define NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL + +#define PSWRQ2_REG_WR_MBS0 0x240400UL +#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL +#define DORQ_REG_PF_USAGE_CNT 0x1009c0UL +#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL +#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL +#define DORQ_REG_INT_STS 0x100180UL + #define DORQ_REG_INT_STS_DB_DROP (0x1UL << 1) + #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1UL << 2) + #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1UL << 3) +#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL +#define DORQ_REG_INT_STS_WR 0x100188UL +#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL +#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL + #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) +#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL +#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL + +#define RSS_REG_RSS_RAM_MASK 0x238c10UL + +#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL +#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL +#define DORQ_REG_TAG1_OVRD_MODE 0x1008b4UL +#define DORQ_REG_PF_PCP 0x1008c4UL +#define DORQ_REG_PF_EXT_VID 0x1008c8UL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL +#define NIG_REG_LLH_PPFID2PFID_TBL_0 0x501970UL +#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL +#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL 0x501b98UL +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL 0x501b40UL + +#define MCP_REG_CACHE_PAGING_ENABLE 0xe06304UL +#define PSWRQ2_REG_RESET_STT 0x240008UL +#define PSWRQ2_REG_PRTY_STS_WR_H_0 0x240208UL +#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 +#define PGLUE_B_REG_MASTER_DISCARD_NBLOCK 0x2aa58cUL +#define PGLUE_B_REG_PRTY_STS_WR_H_0 0x2a8208UL +#define DORQ_REG_VF_USAGE_CNT_LIM 0x1009ccUL +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x2aa06cUL +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR 0x2aa070UL diff --git a/src/spdk/dpdk/drivers/net/qede/meson.build b/src/spdk/dpdk/drivers/net/qede/meson.build new file mode 100644 index 000000000..12388a680 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Luca Boccassi + +subdir('base') +objs = [base_objs] + +sources = files( + 'qede_ethdev.c', + 'qede_filter.c', + 'qede_main.c', + 'qede_rxtx.c', +) diff --git a/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c new file mode 100644 index 000000000..c4f8f1258 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.c @@ -0,0 +1,2882 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include "qede_ethdev.h" +#include +#include +#include +#include + +/* Globals */ +int qede_logtype_init; +int qede_logtype_driver; + +static const struct qed_eth_ops *qed_ops; +static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); +static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); + +#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ + +struct rte_qede_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; + +static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { + {"rx_unicast_bytes", + offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, + {"rx_multicast_bytes", + offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, + {"rx_broadcast_bytes", + offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, + {"rx_unicast_packets", + offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, + {"rx_multicast_packets", + offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, + {"rx_broadcast_packets", + offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, + + {"tx_unicast_bytes", + offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, + {"tx_multicast_bytes", + offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, + {"tx_broadcast_bytes", + offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, + {"tx_unicast_packets", + offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, + {"tx_multicast_packets", + offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, + {"tx_broadcast_packets", + offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, + + {"rx_64_byte_packets", + offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, + {"rx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats_common, + rx_65_to_127_byte_packets)}, + {"rx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats_common, + rx_128_to_255_byte_packets)}, + {"rx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats_common, + rx_256_to_511_byte_packets)}, + {"rx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats_common, + rx_512_to_1023_byte_packets)}, + {"rx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats_common, + rx_1024_to_1518_byte_packets)}, + {"tx_64_byte_packets", + offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, + {"tx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats_common, + tx_65_to_127_byte_packets)}, + {"tx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats_common, + tx_128_to_255_byte_packets)}, + {"tx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats_common, + tx_256_to_511_byte_packets)}, + {"tx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats_common, + tx_512_to_1023_byte_packets)}, + {"tx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats_common, + tx_1024_to_1518_byte_packets)}, + + {"rx_mac_crtl_frames", + offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, + {"tx_mac_control_frames", + offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, + {"rx_pause_frames", + offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, + {"tx_pause_frames", + offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, + {"rx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, + {"tx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, + + {"rx_crc_errors", + offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, + {"rx_align_errors", + offsetof(struct ecore_eth_stats_common, rx_align_errors)}, + {"rx_carrier_errors", + offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, + {"rx_oversize_packet_errors", + offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, + {"rx_jabber_errors", + offsetof(struct ecore_eth_stats_common, rx_jabbers)}, + {"rx_undersize_packet_errors", + offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, + {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, + {"rx_host_buffer_not_available", + offsetof(struct ecore_eth_stats_common, no_buff_discards)}, + /* Number of packets discarded because they are bigger than MTU */ + {"rx_packet_too_big_discards", + offsetof(struct ecore_eth_stats_common, + packet_too_big_discard)}, + {"rx_ttl_zero_discards", + offsetof(struct ecore_eth_stats_common, ttl0_discard)}, + {"rx_multi_function_tag_filter_discards", + offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, + {"rx_mac_filter_discards", + offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, + {"rx_gft_filter_drop", + offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, + {"rx_hw_buffer_truncates", + offsetof(struct ecore_eth_stats_common, brb_truncates)}, + {"rx_hw_buffer_discards", + offsetof(struct ecore_eth_stats_common, brb_discards)}, + {"tx_error_drop_packets", + offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, + + {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, + {"rx_mac_unicast_packets", + offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, + {"rx_mac_multicast_packets", + offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, + {"rx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, + {"rx_mac_frames_ok", + offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, + {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, + {"tx_mac_unicast_packets", + offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, + {"tx_mac_multicast_packets", + offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, + {"tx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, + + {"lro_coalesced_packets", + offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, + {"lro_coalesced_events", + offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, + {"lro_aborts_num", + offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, + {"lro_not_coalesced_packets", + offsetof(struct ecore_eth_stats_common, + tpa_not_coalesced_pkts)}, + {"lro_coalesced_bytes", + offsetof(struct ecore_eth_stats_common, + tpa_coalesced_bytes)}, +}; + +static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { + {"rx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_1519_to_1522_byte_packets)}, + {"rx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_1519_to_2047_byte_packets)}, + {"rx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_2048_to_4095_byte_packets)}, + {"rx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_4096_to_9216_byte_packets)}, + {"rx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_9217_to_16383_byte_packets)}, + + {"tx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_1519_to_2047_byte_packets)}, + {"tx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_2048_to_4095_byte_packets)}, + {"tx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_4096_to_9216_byte_packets)}, + {"tx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_9217_to_16383_byte_packets)}, + + {"tx_lpi_entry_count", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, + {"tx_total_collisions", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, +}; + +static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { + {"rx_1519_to_max_byte_packets", + offsetof(struct ecore_eth_stats, ah) + + offsetof(struct ecore_eth_stats_ah, + rx_1519_to_max_byte_packets)}, + {"tx_1519_to_max_byte_packets", + offsetof(struct ecore_eth_stats, ah) + + offsetof(struct ecore_eth_stats_ah, + tx_1519_to_max_byte_packets)}, +}; + +static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { + {"rx_q_segments", + offsetof(struct qede_rx_queue, rx_segs)}, + {"rx_q_hw_errors", + offsetof(struct qede_rx_queue, rx_hw_errors)}, + {"rx_q_allocation_errors", + offsetof(struct qede_rx_queue, rx_alloc_errors)} +}; + +/* Get FW version string based on fw_size */ +static int +qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_dev_info *info = &qdev->dev_info.common; + static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; + size_t size; + + if (fw_ver == NULL) + return 0; + + if (IS_PF(edev)) + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", + QEDE_PMD_FW_VERSION); + else + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", + info->fw_major, info->fw_minor, + info->fw_rev, info->fw_eng); + size = strlen(ver_str); + if (size + 1 <= fw_size) /* Add 1 byte for "\0" */ + strlcpy(fw_ver, ver_str, fw_size); + else + return (size + 1); + + snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), + " MFW: %d.%d.%d.%d", + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0)); + size = strlen(ver_str); + if (size + 1 <= fw_size) + strlcpy(fw_ver, ver_str, fw_size); + + if (fw_size <= 32) + goto out; + + snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), + " MBI: %d.%d.%d", + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2), + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1), + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0)); + size = strlen(ver_str); + if (size + 1 <= fw_size) + strlcpy(fw_ver, ver_str, fw_size); + +out: + return 0; +} + +static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) +{ + ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); +} + +static void +qede_interrupt_handler_intx(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + u64 status; + + /* Check if our device actually raised an interrupt */ + status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); + if (status & 0x1) { + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + + if (rte_intr_ack(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_ack failed\n"); + } +} + +static void +qede_interrupt_handler(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + if (rte_intr_ack(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_ack failed\n"); +} + +static void +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) +{ + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + bool use_tx_offload = false; + + if (is_dummy) { + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + return; + } + + if (ECORE_IS_CMT(edev)) { + dev->rx_pkt_burst = qede_recv_pkts_cmt; + dev->tx_pkt_burst = qede_xmit_pkts_cmt; + return; + } + + if (dev->data->lro || dev->data->scattered_rx) { + DP_INFO(edev, "Assigning qede_recv_pkts\n"); + dev->rx_pkt_burst = qede_recv_pkts; + } else { + DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); + dev->rx_pkt_burst = qede_recv_pkts_regular; + } + + use_tx_offload = !!(tx_offloads & + (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ + DEV_TX_OFFLOAD_TCP_TSO | /* tso */ + DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ + + if (use_tx_offload) { + DP_INFO(edev, "Assigning qede_xmit_pkts\n"); + dev->tx_pkt_burst = qede_xmit_pkts; + } else { + DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); + dev->tx_pkt_burst = qede_xmit_pkts_regular; + } +} + +static void +qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) +{ + rte_memcpy(&qdev->dev_info, info, sizeof(*info)); + qdev->ops = qed_ops; +} + +static void qede_print_adapter_info(struct rte_eth_dev *dev) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; + + DP_INFO(edev, "**************************************************\n"); + DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version()); + DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details", + ECORE_IS_BB(edev) ? "BB" : "AH", + 'A' + edev->chip_rev, + (int)edev->chip_metal); + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", + QEDE_PMD_DRV_VERSION); + DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str); + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", + QEDE_PMD_BASE_VERSION); + DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str); + qede_fw_version_get(dev, ver_str, sizeof(ver_str)); + DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str); + DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file); + DP_INFO(edev, "**************************************************\n"); +} + +static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + unsigned int i = 0, j = 0, qid; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; + struct qede_tx_queue *txq; + + DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); + + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (qid = 0; qid < qdev->num_rx_queues; qid++) { + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rcv_pkts), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_hw_errors), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_alloc_errors), 0, + sizeof(uint64_t)); + + if (xstats) + for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) + OSAL_MEMSET((((char *) + (qdev->fp_array[qid].rxq)) + + qede_rxq_xstats_strings[j].offset), + 0, + sizeof(uint64_t)); + + i++; + if (i == rxq_stat_cntrs) + break; + } + + i = 0; + + for (qid = 0; qid < qdev->num_tx_queues; qid++) { + txq = qdev->fp_array[qid].txq; + + OSAL_MEMSET((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, xmit_pkts)), 0, + sizeof(uint64_t)); + + i++; + if (i == txq_stat_cntrs) + break; + } +} + +static int +qede_stop_vport(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + uint8_t vport_id; + int rc; + int i; + + vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, + vport_id); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); + return rc; + } + } + + DP_INFO(edev, "vport stopped\n"); + + return 0; +} + +static int +qede_start_vport(struct qede_dev *qdev, uint16_t mtu) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_start_params params; + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + if (qdev->vport_started) + qede_stop_vport(edev); + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.mtu = mtu; + /* @DPDK - Disable FW placement */ + params.zero_placement_offset = 1; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.concrete_fid = p_hwfn->hw_info.concrete_fid; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_start(p_hwfn, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Start V-PORT failed %d\n", rc); + return rc; + } + } + ecore_reset_vport_stats(edev); + qdev->vport_started = true; + DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); + + return 0; +} + +#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" +#define QEDE_VF_TX_SWITCHING "vf_tx_switching" + +/* Activate or deactivate vport via vport-update */ +int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; + int rc = -1; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_vport_active_rx_flg = 1; + params.update_vport_active_tx_flg = 1; + params.vport_active_rx_flg = flg; + params.vport_active_tx_flg = flg; + if ((qdev->enable_tx_switching == false) && (flg == true)) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; + } + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update vport\n"); + break; + } + } + DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); + + return rc; +} + +static void +qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, + uint16_t mtu, bool enable) +{ + /* Enable LRO in split mode */ + sge_tpa_params->tpa_ipv4_en_flg = enable; + sge_tpa_params->tpa_ipv6_en_flg = enable; + sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; + sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; + /* set if tpa enable changes */ + sge_tpa_params->update_tpa_en_flg = 1; + /* set if tpa parameters should be handled */ + sge_tpa_params->update_tpa_param_flg = enable; + + sge_tpa_params->max_buffers_per_cqe = 20; + /* Enable TPA in split mode. In this mode each TPA segment + * starts on the new BD, so there is one BD per segment. + */ + sge_tpa_params->tpa_pkt_split_flg = 1; + sge_tpa_params->tpa_hdr_data_split_flg = 0; + sge_tpa_params->tpa_gro_consistent_flg = 0; + sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + sge_tpa_params->tpa_max_size = 0x7FFF; + sge_tpa_params->tpa_min_size_to_start = mtu / 2; + sge_tpa_params->tpa_min_size_to_cont = mtu / 2; +} + +/* Enable/disable LRO via vport-update */ +int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_sge_tpa_params tpa_params; + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); + qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); + params.vport_id = 0; + params.sge_tpa_params = &tpa_params; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update LRO\n"); + return -1; + } + } + qdev->enable_lro = flg; + eth_dev->data->lro = flg; + + DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); + + return 0; +} + +static int +qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, + enum qed_filter_rx_mode_type type) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_filter_accept_flags flags; + + memset(&flags, 0, sizeof(flags)); + + flags.update_rx_mode_config = 1; + flags.update_tx_mode_config = 1; + flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + if (IS_VF(edev)) { + flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); + } + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; + } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | + QED_FILTER_RX_MODE_TYPE_PROMISC)) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | + ECORE_ACCEPT_MCAST_UNMATCHED; + } + + return ecore_filter_accept_cmd(edev, 0, flags, false, false, + ECORE_SPQ_MODE_CB, NULL); +} + +int +qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_ucast_entry *tmp = NULL; + struct qede_ucast_entry *u; + struct rte_ether_addr *mac_addr; + + mac_addr = (struct rte_ether_addr *)ucast->mac; + if (add) { + SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { + if ((memcmp(mac_addr, &tmp->mac, + RTE_ETHER_ADDR_LEN) == 0) && + ucast->vni == tmp->vni && + ucast->vlan == tmp->vlan) { + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; + } + } + u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), + RTE_CACHE_LINE_SIZE); + if (!u) { + DP_ERR(edev, "Did not allocate memory for ucast\n"); + return -ENOMEM; + } + rte_ether_addr_copy(mac_addr, &u->mac); + u->vlan = ucast->vlan; + u->vni = ucast->vni; + SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); + qdev->num_uc_addr++; + } else { + SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { + if ((memcmp(mac_addr, &tmp->mac, + RTE_ETHER_ADDR_LEN) == 0) && + ucast->vlan == tmp->vlan && + ucast->vni == tmp->vni) + break; + } + if (tmp == NULL) { + DP_INFO(edev, "Unicast MAC is not found\n"); + return -EINVAL; + } + SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); + qdev->num_uc_addr--; + } + + return 0; +} + +static int +qede_add_mcast_filters(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_filter_mcast mcast; + struct qede_mcast_entry *m = NULL; + uint8_t i; + int rc; + + for (i = 0; i < mc_addrs_num; i++) { + m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), + RTE_CACHE_LINE_SIZE); + if (!m) { + DP_ERR(edev, "Did not allocate memory for mcast\n"); + return -ENOMEM; + } + rte_ether_addr_copy(&mc_addrs[i], &m->mac); + SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); + } + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = mc_addrs_num; + mcast.opcode = ECORE_FILTER_ADD; + for (i = 0; i < mc_addrs_num; i++) + rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) + &mcast.mac[i]); + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); + return -1; + } + + return 0; +} + +static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_mcast_entry *tmp = NULL; + struct ecore_filter_mcast mcast; + int j; + int rc; + + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = qdev->num_mc_addr; + mcast.opcode = ECORE_FILTER_REMOVE; + j = 0; + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + rte_ether_addr_copy(&tmp->mac, + (struct rte_ether_addr *)&mcast.mac[j]); + j++; + } + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to delete multicast filter\n"); + return -1; + } + /* Init the list */ + while (!SLIST_EMPTY(&qdev->mc_list_head)) { + tmp = SLIST_FIRST(&qdev->mc_list_head); + SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); + } + SLIST_INIT(&qdev->mc_list_head); + + return 0; +} + +enum _ecore_status_t +qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + + if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { + DP_ERR(edev, "Ucast filter table limit exceeded," + " Please enable promisc mode\n"); + return ECORE_INVAL; + } + + rc = qede_ucast_filter(eth_dev, ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, ucast, + ECORE_SPQ_MODE_CB, NULL); + /* Indicate error only for add filter operation. + * Delete filter operations are not severe. + */ + if ((rc != ECORE_SUCCESS) && add) + DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", + rc, add); + + return rc; +} + +static int +qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, + __rte_unused uint32_t index, __rte_unused uint32_t pool) +{ + struct ecore_filter_ucast ucast; + int re; + + if (!rte_is_valid_assigned_ether_addr(mac_addr)) + return -EINVAL; + + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; + ucast.type = ECORE_FILTER_MAC; + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); + re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); + return re; +} + +static void +qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_filter_ucast ucast; + + PMD_INIT_FUNC_TRACE(edev); + + if (index >= qdev->dev_info.num_mac_filters) { + DP_ERR(edev, "Index %u is above MAC filter limit %u\n", + index, qdev->dev_info.num_mac_filters); + return; + } + + if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_REMOVE; + ucast.type = ECORE_FILTER_MAC; + + /* Use the index maintained by rte */ + rte_ether_addr_copy(ð_dev->data->mac_addrs[index], + (struct rte_ether_addr *)&ucast.mac); + + qede_mac_int_ops(eth_dev, &ucast, false); +} + +static int +qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), + mac_addr->addr_bytes)) { + DP_ERR(edev, "Setting MAC address is not allowed\n"); + return -EPERM; + } + + qede_mac_addr_remove(eth_dev, 0); + + return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); +} + +void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; + int rc; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_accept_any_vlan_flg = 1; + params.accept_any_vlan = flg; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to configure accept-any-vlan\n"); + return; + } + } + + DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); +} + +static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; + int rc; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_inner_vlan_removal_flg = 1; + params.inner_vlan_removal_flg = flg; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update vport\n"); + return -1; + } + } + + qdev->vlan_strip_flg = flg; + + DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); + return 0; +} + +static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_dev_eth_info *dev_info = &qdev->dev_info; + struct qede_vlan_entry *tmp = NULL; + struct qede_vlan_entry *vlan; + struct ecore_filter_ucast ucast; + int rc; + + if (on) { + if (qdev->configured_vlans == dev_info->num_vlan_filters) { + DP_ERR(edev, "Reached max VLAN filter limit" + " enabling accept_any_vlan\n"); + qede_config_accept_any_vlan(qdev, true); + return 0; + } + + SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { + if (tmp->vid == vlan_id) { + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; + } + } + + vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), + RTE_CACHE_LINE_SIZE); + + if (!vlan) { + DP_ERR(edev, "Did not allocate memory for VLAN\n"); + return -ENOMEM; + } + + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; + ucast.type = ECORE_FILTER_VLAN; + ucast.vlan = vlan_id; + rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, + NULL); + if (rc != 0) { + DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, + rc); + rte_free(vlan); + } else { + vlan->vid = vlan_id; + SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); + qdev->configured_vlans++; + DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", + vlan_id, qdev->configured_vlans); + } + } else { + SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { + if (tmp->vid == vlan_id) + break; + } + + if (!tmp) { + if (qdev->configured_vlans == 0) { + DP_INFO(edev, + "No VLAN filters configured yet\n"); + return 0; + } + + DP_ERR(edev, "VLAN %u not configured\n", vlan_id); + return -EINVAL; + } + + SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); + + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_REMOVE; + ucast.type = ECORE_FILTER_VLAN; + ucast.vlan = vlan_id; + rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, + NULL); + if (rc != 0) { + DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", + vlan_id, rc); + } else { + qdev->configured_vlans--; + DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", + vlan_id, qdev->configured_vlans); + } + } + + return rc; +} + +static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + + if (mask & ETH_VLAN_STRIP_MASK) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + (void)qede_vlan_stripping(eth_dev, 1); + else + (void)qede_vlan_stripping(eth_dev, 0); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN filtering kicks in when a VLAN is added */ + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { + qede_vlan_filter_set(eth_dev, 0, 1); + } else { + if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ + DP_ERR(edev, + " Please remove existing VLAN filters" + " before disabling VLAN filtering\n"); + /* Signal app that VLAN filtering is still + * enabled + */ + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + } else { + qede_vlan_filter_set(eth_dev, 0, 0); + } + } + } + + if (mask & ETH_VLAN_EXTEND_MASK) + DP_ERR(edev, "Extend VLAN not supported\n"); + + qdev->vlan_offload_mask = mask; + + DP_INFO(edev, "VLAN offload mask %d\n", mask); + + return 0; +} + +static void qede_prandom_bytes(uint32_t *buff) +{ + uint8_t i; + + srand((unsigned int)time(NULL)); + for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) + buff[i] = rand(); +} + +int qede_config_rss(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; + struct rte_eth_rss_reta_entry64 reta_conf[2]; + struct rte_eth_rss_conf rss_conf; + uint32_t i, id, pos, q; + + rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + if (!rss_conf.rss_key) { + DP_INFO(edev, "Applying driver default key\n"); + rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); + qede_prandom_bytes(&def_rss_key[0]); + rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; + } + + /* Configure RSS hash */ + if (qede_rss_hash_update(eth_dev, &rss_conf)) + return -EINVAL; + + /* Configure default RETA */ + memset(reta_conf, 0, sizeof(reta_conf)); + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) + reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; + + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { + id = i / RTE_RETA_GROUP_SIZE; + pos = i % RTE_RETA_GROUP_SIZE; + q = i % QEDE_RSS_COUNT(eth_dev); + reta_conf[id].reta[pos] = q; + } + if (qede_rss_reta_update(eth_dev, &reta_conf[0], + ECORE_RSS_IND_TABLE_SIZE)) + return -EINVAL; + + return 0; +} + +static void qede_fastpath_start(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + ecore_hw_start_fastpath(p_hwfn); + } +} + +static int qede_dev_start(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + + PMD_INIT_FUNC_TRACE(edev); + + /* Update MTU only if it has changed */ + if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { + if (qede_update_mtu(eth_dev, qdev->new_mtu)) + goto err; + qdev->mtu = qdev->new_mtu; + qdev->new_mtu = 0; + } + + /* Configure TPA parameters */ + if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { + if (qede_enable_tpa(eth_dev, true)) + return -EINVAL; + /* Enable scatter mode for LRO */ + if (!eth_dev->data->scattered_rx) + rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; + } + + /* Start queues */ + if (qede_start_queues(eth_dev)) + goto err; + + if (IS_PF(edev)) + qede_reset_queue_stats(qdev, true); + + /* Newer SR-IOV PF driver expects RX/TX queues to be started before + * enabling RSS. Hence RSS configuration is deferred up to this point. + * Also, we would like to retain similar behavior in PF case, so we + * don't do PF/VF specific check here. + */ + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + if (qede_config_rss(eth_dev)) + goto err; + + /* Enable vport*/ + if (qede_activate_vport(eth_dev, true)) + goto err; + + /* Bring-up the link */ + qede_dev_set_link_state(eth_dev, true); + + /* Update link status */ + qede_link_update(eth_dev, 0); + + /* Start/resume traffic */ + qede_fastpath_start(edev); + + /* Assign I/O handlers */ + qede_assign_rxtx_handlers(eth_dev, false); + + DP_INFO(edev, "Device started\n"); + + return 0; +err: + DP_ERR(edev, "Device start fails\n"); + return -1; /* common error code is < 0 */ +} + +static void qede_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); + + /* Bring the link down */ + qede_dev_set_link_state(eth_dev, false); + + /* Update link status */ + qede_link_update(eth_dev, 0); + + /* Replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + qede_assign_rxtx_handlers(eth_dev, true); + + /* Disable vport */ + if (qede_activate_vport(eth_dev, false)) + return; + + if (qdev->enable_lro) + qede_enable_tpa(eth_dev, false); + + /* Stop queues */ + qede_stop_queues(eth_dev); + + /* Disable traffic */ + ecore_hw_stop_fastpath(edev); /* TBD - loop */ + + DP_INFO(edev, "Device is stopped\n"); +} + +static const char * const valid_args[] = { + QEDE_NPAR_TX_SWITCHING, + QEDE_VF_TX_SWITCHING, + NULL, +}; + +static int qede_args_check(const char *key, const char *val, void *opaque) +{ + unsigned long tmp; + int ret = 0; + struct rte_eth_dev *eth_dev = opaque; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); + return errno; + } + + if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || + ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { + qdev->enable_tx_switching = !!tmp; + DP_INFO(edev, "Disabling %s tx-switching\n", + strcmp(QEDE_NPAR_TX_SWITCHING, key) ? + "VF" : "NPAR"); + } + + return ret; +} + +static int qede_args(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_kvargs *kvlist; + struct rte_devargs *devargs; + int ret; + int i; + + devargs = pci_dev->device.devargs; + if (!devargs) + return 0; /* return success */ + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (kvlist == NULL) + return -EINVAL; + + /* Process parameters. */ + for (i = 0; (valid_args[i] != NULL); ++i) { + if (rte_kvargs_count(kvlist, valid_args[i])) { + ret = rte_kvargs_process(kvlist, valid_args[i], + qede_args_check, eth_dev); + if (ret != ECORE_SUCCESS) { + rte_kvargs_free(kvlist); + return ret; + } + } + } + rte_kvargs_free(kvlist); + + return 0; +} + +static int qede_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint8_t num_rxqs; + uint8_t num_txqs; + int ret; + + PMD_INIT_FUNC_TRACE(edev); + + if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* We need to have min 1 RX queue.There is no min check in + * rte_eth_dev_configure(), so we are checking it here. + */ + if (eth_dev->data->nb_rx_queues == 0) { + DP_ERR(edev, "Minimum one RX queue is required\n"); + return -EINVAL; + } + + /* Enable Tx switching by default */ + qdev->enable_tx_switching = 1; + + /* Parse devargs and fix up rxmode */ + if (qede_args(eth_dev)) + DP_NOTICE(edev, false, + "Invalid devargs supplied, requested change will not take effect\n"); + + if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || + rxmode->mq_mode == ETH_MQ_RX_RSS)) { + DP_ERR(edev, "Unsupported multi-queue mode\n"); + return -ENOTSUP; + } + /* Flow director mode check */ + if (qede_check_fdir_support(eth_dev)) + return -ENOTSUP; + + /* Allocate/reallocate fastpath resources only for new queue config */ + num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns; + num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns; + if (qdev->num_tx_queues != num_txqs || + qdev->num_rx_queues != num_rxqs) { + qede_dealloc_fp_resc(eth_dev); + qdev->num_tx_queues = num_txqs; + qdev->num_rx_queues = num_rxqs; + if (qede_alloc_fp_resc(qdev)) + return -ENOMEM; + } + + /* If jumbo enabled adjust MTU */ + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; + + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; + + if (qede_start_vport(qdev, eth_dev->data->mtu)) + return -1; + + qdev->mtu = eth_dev->data->mtu; + + /* Enable VLAN offloads by default */ + ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK); + if (ret) + return ret; + + DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", + QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); + + if (ECORE_IS_CMT(edev)) + DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", + qdev->num_rx_queues, qdev->num_tx_queues); + + + return 0; +} + +/* Info about HW descriptor ring limitations */ +static const struct rte_eth_desc_lim qede_rx_desc_lim = { + .nb_max = 0x8000, /* 32K */ + .nb_min = 128, + .nb_align = 128 /* lowest common multiple */ +}; + +static const struct rte_eth_desc_lim qede_tx_desc_lim = { + .nb_max = 0x8000, /* 32K */ + .nb_min = 256, + .nb_align = 256, + .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, + .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET +}; + +static int +qede_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_link_output link; + uint32_t speed_cap = 0; + + PMD_INIT_FUNC_TRACE(edev); + + dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; + dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; + dev_info->rx_desc_lim = qede_rx_desc_lim; + dev_info->tx_desc_lim = qede_tx_desc_lim; + + if (IS_PF(edev)) + dev_info->max_rx_queues = (uint16_t)RTE_MIN( + QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); + else + dev_info->max_rx_queues = (uint16_t)RTE_MIN( + QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); + /* Since CMT mode internally doubles the number of queues */ + if (ECORE_IS_CMT(edev)) + dev_info->max_rx_queues = dev_info->max_rx_queues / 2; + + dev_info->max_tx_queues = dev_info->max_rx_queues; + + dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; + dev_info->max_vfs = 0; + dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; + dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); + dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; + dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_RSS_HASH); + dev_info->rx_queue_offload_capa = 0; + + /* TX offloads are on a per-packet basis, so it is applicable + * to both at port and queue levels. + */ + dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, + }; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + /* Packets are always dropped if no descriptors are available */ + .rx_drop_en = 1, + .offloads = 0, + }; + + memset(&link, 0, sizeof(struct qed_link_output)); + qdev->ops->common->get_link(edev, &link); + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + speed_cap |= ETH_LINK_SPEED_1G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + speed_cap |= ETH_LINK_SPEED_10G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + speed_cap |= ETH_LINK_SPEED_25G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + speed_cap |= ETH_LINK_SPEED_40G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + speed_cap |= ETH_LINK_SPEED_50G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + speed_cap |= ETH_LINK_SPEED_100G; + dev_info->speed_capa = speed_cap; + + return 0; +} + +/* return 0 means link status changed, -1 means not changed */ +int +qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_link_output q_link; + struct rte_eth_link link; + uint16_t link_duplex; + + memset(&q_link, 0, sizeof(q_link)); + memset(&link, 0, sizeof(link)); + + qdev->ops->common->get_link(edev, &q_link); + + /* Link Speed */ + link.link_speed = q_link.speed; + + /* Link Mode */ + switch (q_link.duplex) { + case QEDE_DUPLEX_HALF: + link_duplex = ETH_LINK_HALF_DUPLEX; + break; + case QEDE_DUPLEX_FULL: + link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case QEDE_DUPLEX_UNKNOWN: + default: + link_duplex = -1; + } + link.link_duplex = link_duplex; + + /* Link Status */ + link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + /* AN */ + link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? + ETH_LINK_AUTONEG : ETH_LINK_FIXED; + + DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", + link.link_speed, link.link_duplex, + link.link_autoneg, link.link_status); + + return rte_eth_linkstatus_set(eth_dev, &link); +} + +static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; + enum _ecore_status_t ecore_status; + + PMD_INIT_FUNC_TRACE(edev); + + if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) + type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +} + +static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + enum _ecore_status_t ecore_status; + + PMD_INIT_FUNC_TRACE(edev); + + if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) + ecore_status = qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); + else + ecore_status = qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_REGULAR); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +} + +static void qede_poll_sp_sb_cb(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + qede_interrupt_action(&edev->hwfns[1]); + + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + } +} + +static void qede_dev_close(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); + + /* dev_stop() shall cleanup fp resources in hw but without releasing + * dma memories and sw structures so that dev_start() can be called + * by the app without reconfiguration. However, in dev_close() we + * can release all the resources and device can be brought up newly + */ + if (eth_dev->data->dev_started) + qede_dev_stop(eth_dev); + + if (qdev->vport_started) + qede_stop_vport(edev); + qdev->vport_started = false; + qede_fdir_dealloc_resc(eth_dev); + qede_dealloc_fp_resc(eth_dev); + + eth_dev->data->nb_rx_queues = 0; + eth_dev->data->nb_tx_queues = 0; + + qdev->ops->common->slowpath_stop(edev); + qdev->ops->common->remove(edev); + rte_intr_disable(&pci_dev->intr_handle); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + + if (ECORE_IS_CMT(edev)) + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); +} + +static int +qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + unsigned int i = 0, j = 0, qid, idx, hw_fn; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; + struct qede_tx_queue *txq; + + ecore_get_vport_stats(edev, &stats); + + /* RX Stats */ + eth_stats->ipackets = stats.common.rx_ucast_pkts + + stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; + + eth_stats->ibytes = stats.common.rx_ucast_bytes + + stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; + + eth_stats->ierrors = stats.common.rx_crc_errors + + stats.common.rx_align_errors + + stats.common.rx_carrier_errors + + stats.common.rx_oversize_packets + + stats.common.rx_jabbers + stats.common.rx_undersize_packets; + + eth_stats->rx_nombuf = stats.common.no_buff_discards; + + eth_stats->imissed = stats.common.mftag_filter_discards + + stats.common.mac_filter_discards + + stats.common.no_buff_discards + + stats.common.brb_truncates + stats.common.brb_discards; + + /* TX stats */ + eth_stats->opackets = stats.common.tx_ucast_pkts + + stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; + + eth_stats->obytes = stats.common.tx_ucast_bytes + + stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; + + eth_stats->oerrors = stats.common.tx_err_drop_pkts; + + /* Queue stats */ + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || + txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) + DP_VERBOSE(edev, ECORE_MSG_DEBUG, + "Not all the queue stats will be displayed. Set" + " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" + " appropriately and retry.\n"); + + for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { + eth_stats->q_ipackets[i] = 0; + eth_stats->q_errors[i] = 0; + + for_each_hwfn(edev, hw_fn) { + idx = qid * edev->num_hwfns + hw_fn; + + eth_stats->q_ipackets[i] += + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rcv_pkts)); + eth_stats->q_errors[i] += + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rx_hw_errors)) + + *(uint64_t *) + (((char *)(qdev->fp_array[idx].rxq)) + + offsetof(struct qede_rx_queue, + rx_alloc_errors)); + } + + i++; + if (i == rxq_stat_cntrs) + break; + } + + for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { + eth_stats->q_opackets[j] = 0; + + for_each_hwfn(edev, hw_fn) { + idx = qid * edev->num_hwfns + hw_fn; + + txq = qdev->fp_array[idx].txq; + eth_stats->q_opackets[j] += + *((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, + xmit_pkts))); + } + + j++; + if (j == txq_stat_cntrs) + break; + } + + return 0; +} + +static unsigned +qede_get_xstats_count(struct qede_dev *qdev) { + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + + if (ECORE_IS_BB(&qdev->edev)) + return RTE_DIM(qede_xstats_strings) + + RTE_DIM(qede_bb_xstats_strings) + + (RTE_DIM(qede_rxq_xstats_strings) * + QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); + else + return RTE_DIM(qede_xstats_strings) + + RTE_DIM(qede_ah_xstats_strings) + + (RTE_DIM(qede_rxq_xstats_strings) * + QEDE_RSS_COUNT(dev)); +} + +static int +qede_get_xstats_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + const unsigned int stat_cnt = qede_get_xstats_count(qdev); + unsigned int i, qid, hw_fn, stat_idx = 0; + + if (xstats_names == NULL) + return stat_cnt; + + for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); + stat_idx++; + } + + if (ECORE_IS_BB(edev)) { + for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_bb_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); + stat_idx++; + } + } else { + for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { + strlcpy(xstats_names[stat_idx].name, + qede_ah_xstats_strings[i].name, + sizeof(xstats_names[stat_idx].name)); + stat_idx++; + } + } + + for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { + for_each_hwfn(edev, hw_fn) { + for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { + snprintf(xstats_names[stat_idx].name, + RTE_ETH_XSTATS_NAME_SIZE, + "%.4s%d.%d%s", + qede_rxq_xstats_strings[i].name, + hw_fn, qid, + qede_rxq_xstats_strings[i].name + 4); + stat_idx++; + } + } + } + + return stat_cnt; +} + +static int +qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + const unsigned int num = qede_get_xstats_count(qdev); + unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; + + if (n < num) + return num; + + ecore_get_vport_stats(edev, &stats); + + for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { + xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + + qede_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + + if (ECORE_IS_BB(edev)) { + for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { + xstats[stat_idx].value = + *(uint64_t *)(((char *)&stats) + + qede_bb_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + } else { + for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { + xstats[stat_idx].value = + *(uint64_t *)(((char *)&stats) + + qede_ah_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + } + + for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { + for_each_hwfn(edev, hw_fn) { + for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { + fpidx = qid * edev->num_hwfns + hw_fn; + xstats[stat_idx].value = *(uint64_t *) + (((char *)(qdev->fp_array[fpidx].rxq)) + + qede_rxq_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + + } + } + + return stat_idx; +} + +static int +qede_reset_xstats(struct rte_eth_dev *dev) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, true); + + return 0; +} + +int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_params link_params; + int rc; + + DP_INFO(edev, "setting link state %d\n", link_up); + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = link_up; + rc = qdev->ops->common->set_link(edev, &link_params); + if (rc != ECORE_SUCCESS) + DP_ERR(edev, "Unable to set link state %d\n", link_up); + + return rc; +} + +static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + return qede_dev_set_link_state(eth_dev, true); +} + +static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + return qede_dev_set_link_state(eth_dev, false); +} + +static int qede_reset_stats(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, false); + + return 0; +} + +static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + enum qed_filter_rx_mode_type type = + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + enum _ecore_status_t ecore_status; + + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + type |= QED_FILTER_RX_MODE_TYPE_PROMISC; + + ecore_status = qed_configure_filter_rx_mode(eth_dev, type); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +} + +static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + enum _ecore_status_t ecore_status; + + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + ecore_status = qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_PROMISC); + else + ecore_status = qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_REGULAR); + + return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; +} + +static int +qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint8_t i; + + if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { + DP_ERR(edev, "Reached max multicast filters limit," + "Please enable multicast promisc mode\n"); + return -ENOSPC; + } + + for (i = 0; i < mc_addrs_num; i++) { + if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { + DP_ERR(edev, "Not a valid multicast MAC\n"); + return -EINVAL; + } + } + + /* Flush all existing entries */ + if (qede_del_mcast_filters(eth_dev)) + return -1; + + /* Set new mcast list */ + return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); +} + +/* Update MTU via vport-update without doing port restart. + * The vport must be deactivated before calling this API. + */ +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + if (IS_PF(edev)) { + struct ecore_sp_vport_update_params params; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.mtu = mtu; + params.vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) + goto err; + } + } else { + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); + if (rc == ECORE_INVAL) { + DP_INFO(edev, "VF MTU Update TLV not supported\n"); + /* Recreate vport */ + rc = qede_start_vport(qdev, mtu); + if (rc != ECORE_SUCCESS) + goto err; + + /* Restore config lost due to vport stop */ + if (eth_dev->data->promiscuous) + qede_promiscuous_enable(eth_dev); + else + qede_promiscuous_disable(eth_dev); + + if (eth_dev->data->all_multicast) + qede_allmulticast_enable(eth_dev); + else + qede_allmulticast_disable(eth_dev); + + qede_vlan_offload_set(eth_dev, + qdev->vlan_offload_mask); + } else if (rc != ECORE_SUCCESS) { + goto err; + } + } + } + DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); + + return 0; + +err: + DP_ERR(edev, "Failed to update MTU\n"); + return -1; +} + +static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_output current_link; + struct qed_link_params params; + + memset(¤t_link, 0, sizeof(current_link)); + qdev->ops->common->get_link(edev, ¤t_link); + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (fc_conf->autoneg) { + if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { + DP_ERR(edev, "Autoneg not supported\n"); + return -EINVAL; + } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + } + + /* Pause is assumed to be supported (SUPPORTED_Pause) */ + if (fc_conf->mode == RTE_FC_FULL) + params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | + QED_LINK_PAUSE_RX_ENABLE); + if (fc_conf->mode == RTE_FC_TX_PAUSE) + params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; + if (fc_conf->mode == RTE_FC_RX_PAUSE) + params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; + + params.link_up = true; + (void)qdev->ops->common->set_link(edev, ¶ms); + + return 0; +} + +static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + qdev->ops->common->get_link(edev, ¤t_link); + + if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + fc_conf->autoneg = true; + + if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | + QED_LINK_PAUSE_TX_ENABLE)) + fc_conf->mode = RTE_FC_FULL; + else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static const uint32_t * +qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_GRE, + /* Inner */ + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_UNKNOWN + }; + + if (eth_dev->rx_pkt_burst == qede_recv_pkts || + eth_dev->rx_pkt_burst == qede_recv_pkts_regular || + eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) + return ptypes; + + return NULL; +} + +static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) +{ + *rss_caps = 0; + *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; +} + +int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params vport_update_params; + struct ecore_rss_params rss_params; + struct ecore_hwfn *p_hwfn; + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint64_t hf = rss_conf->rss_hf; + uint8_t len = rss_conf->rss_key_len; + uint8_t idx, i, j, fpidx; + int rc; + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + memset(&rss_params, 0, sizeof(rss_params)); + + DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", + (unsigned long)hf, len, key); + + if (hf != 0) { + /* Enabling RSS */ + DP_INFO(edev, "Enabling rss\n"); + + /* RSS caps */ + qede_init_rss_caps(&rss_params.rss_caps, hf); + rss_params.update_rss_capabilities = 1; + + /* RSS hash key */ + if (key) { + if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { + DP_ERR(edev, "RSS key length exceeds limit\n"); + return -EINVAL; + } + DP_INFO(edev, "Applying user supplied hash key\n"); + rss_params.update_rss_key = 1; + memcpy(&rss_params.rss_key, key, len); + } + rss_params.rss_enable = 1; + } + + rss_params.update_rss_config = 1; + /* tbl_size has to be set with capabilities */ + rss_params.rss_table_size_log = 7; + vport_update_params.vport_id = 0; + + for_each_hwfn(edev, i) { + /* pass the L2 handles instead of qids */ + for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { + idx = j % QEDE_RSS_COUNT(eth_dev); + fpidx = idx * edev->num_hwfns + i; + rss_params.rss_ind_table[j] = + qdev->fp_array[fpidx].rxq->handle; + } + + vport_update_params.rss_params = &rss_params; + + p_hwfn = &edev->hwfns[i]; + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_ERR(edev, "vport-update for RSS failed\n"); + return rc; + } + } + qdev->rss_enable = rss_params.rss_enable; + + /* Update local structure for hash query */ + qdev->rss_conf.rss_hf = hf; + qdev->rss_conf.rss_key_len = len; + if (qdev->rss_enable) { + if (qdev->rss_conf.rss_key == NULL) { + qdev->rss_conf.rss_key = (uint8_t *)malloc(len); + if (qdev->rss_conf.rss_key == NULL) { + DP_ERR(edev, "No memory to store RSS key\n"); + return -ENOMEM; + } + } + if (key && len) { + DP_INFO(edev, "Storing RSS key\n"); + memcpy(qdev->rss_conf.rss_key, key, len); + } + } else if (!qdev->rss_enable && len == 0) { + if (qdev->rss_conf.rss_key) { + free(qdev->rss_conf.rss_key); + qdev->rss_conf.rss_key = NULL; + DP_INFO(edev, "Free RSS key\n"); + } + } + + return 0; +} + +static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + + rss_conf->rss_hf = qdev->rss_conf.rss_hf; + rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; + + if (rss_conf->rss_key && qdev->rss_conf.rss_key) + memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, + rss_conf->rss_key_len); + return 0; +} + +int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params vport_update_params; + struct ecore_rss_params *params; + uint16_t i, j, idx, fid, shift; + struct ecore_hwfn *p_hwfn; + uint8_t entry; + int rc = 0; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + DP_ERR(edev, "reta_size %d is not supported by hardware\n", + reta_size); + return -EINVAL; + } + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); + if (params == NULL) { + DP_ERR(edev, "failed to allocate memory\n"); + return -ENOMEM; + } + + params->update_rss_ind_table = 1; + params->rss_table_size_log = 7; + params->update_rss_config = 1; + + vport_update_params.vport_id = 0; + /* Use the current value of rss_enable */ + params->rss_enable = qdev->rss_enable; + vport_update_params.rss_params = params; + + for_each_hwfn(edev, i) { + for (j = 0; j < reta_size; j++) { + idx = j / RTE_RETA_GROUP_SIZE; + shift = j % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + entry = reta_conf[idx].reta[shift]; + fid = entry * edev->num_hwfns + i; + /* Pass rxq handles to ecore */ + params->rss_ind_table[j] = + qdev->fp_array[fid].rxq->handle; + /* Update the local copy for RETA query cmd */ + qdev->rss_ind_table[j] = entry; + } + } + + p_hwfn = &edev->hwfns[i]; + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_ERR(edev, "vport-update for RSS failed\n"); + goto out; + } + } + +out: + rte_free(params); + return rc; +} + +static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint16_t i, idx, shift; + uint8_t entry; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + DP_ERR(edev, "reta_size %d is not supported\n", + reta_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + entry = qdev->rss_ind_table[i]; + reta_conf[idx].reta[shift] = entry; + } + } + + return 0; +} + + + +static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_dev_info dev_info = {0}; + struct qede_fastpath *fp; + uint32_t max_rx_pkt_len; + uint32_t frame_size; + uint16_t bufsz; + bool restart = false; + int i, rc; + + PMD_INIT_FUNC_TRACE(edev); + rc = qede_dev_info_get(dev, &dev_info); + if (rc != 0) { + DP_ERR(edev, "Error during getting ethernet device info\n"); + return rc; + } + max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; + frame_size = max_rx_pkt_len; + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { + DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", + mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - + QEDE_ETH_OVERHEAD); + return -EINVAL; + } + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", + dev->data->min_rx_buf_size); + return -EINVAL; + } + if (dev->data->dev_started) { + dev->data->dev_started = 0; + qede_dev_stop(dev); + restart = true; + } + rte_delay_ms(1000); + qdev->new_mtu = mtu; + + /* Fix up RX buf size for all queues of the port */ + for (i = 0; i < qdev->num_rx_queues; i++) { + fp = &qdev->fp_array[i]; + if (fp->rxq != NULL) { + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + /* cache align the mbuf size to simplfy rx_buf_size + * calculation + */ + bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); + rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); + if (rc < 0) + return rc; + + fp->rxq->rx_buf_size = rc; + } + } + if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + if (!dev->data->dev_started && restart) { + qede_dev_start(dev); + dev->data->dev_started = 1; + } + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; + + return 0; +} + +static int +qede_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = qede_eth_dev_uninit(dev); + if (ret) + return ret; + + return qede_eth_dev_init(dev); +} + +static const struct eth_dev_ops qede_eth_dev_ops = { + .dev_configure = qede_dev_configure, + .dev_infos_get = qede_dev_info_get, + .rx_queue_setup = qede_rx_queue_setup, + .rx_queue_release = qede_rx_queue_release, + .rx_descriptor_status = qede_rx_descriptor_status, + .tx_queue_setup = qede_tx_queue_setup, + .tx_queue_release = qede_tx_queue_release, + .dev_start = qede_dev_start, + .dev_reset = qede_dev_reset, + .dev_set_link_up = qede_dev_set_link_up, + .dev_set_link_down = qede_dev_set_link_down, + .link_update = qede_link_update, + .promiscuous_enable = qede_promiscuous_enable, + .promiscuous_disable = qede_promiscuous_disable, + .allmulticast_enable = qede_allmulticast_enable, + .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, + .dev_stop = qede_dev_stop, + .dev_close = qede_dev_close, + .stats_get = qede_get_stats, + .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, + .vlan_offload_set = qede_vlan_offload_set, + .vlan_filter_set = qede_vlan_filter_set, + .flow_ctrl_set = qede_flow_ctrl_set, + .flow_ctrl_get = qede_flow_ctrl_get, + .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, + .filter_ctrl = qede_dev_filter_ctrl, + .udp_tunnel_port_add = qede_udp_dst_port_add, + .udp_tunnel_port_del = qede_udp_dst_port_del, + .fw_version_get = qede_fw_version_get, +}; + +static const struct eth_dev_ops qede_eth_vf_dev_ops = { + .dev_configure = qede_dev_configure, + .dev_infos_get = qede_dev_info_get, + .rx_queue_setup = qede_rx_queue_setup, + .rx_queue_release = qede_rx_queue_release, + .rx_descriptor_status = qede_rx_descriptor_status, + .tx_queue_setup = qede_tx_queue_setup, + .tx_queue_release = qede_tx_queue_release, + .dev_start = qede_dev_start, + .dev_reset = qede_dev_reset, + .dev_set_link_up = qede_dev_set_link_up, + .dev_set_link_down = qede_dev_set_link_down, + .link_update = qede_link_update, + .promiscuous_enable = qede_promiscuous_enable, + .promiscuous_disable = qede_promiscuous_disable, + .allmulticast_enable = qede_allmulticast_enable, + .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, + .dev_stop = qede_dev_stop, + .dev_close = qede_dev_close, + .stats_get = qede_get_stats, + .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, + .vlan_offload_set = qede_vlan_offload_set, + .vlan_filter_set = qede_vlan_filter_set, + .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, + .udp_tunnel_port_add = qede_udp_dst_port_add, + .udp_tunnel_port_del = qede_udp_dst_port_del, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, + .fw_version_get = qede_fw_version_get, +}; + +static void qede_update_pf_params(struct ecore_dev *edev) +{ + struct ecore_pf_params pf_params; + + memset(&pf_params, 0, sizeof(struct ecore_pf_params)); + pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; + pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; + qed_ops->common->update_pf_params(edev, &pf_params); +} + +static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) +{ + struct rte_pci_device *pci_dev; + struct rte_pci_addr pci_addr; + struct qede_dev *adapter; + struct ecore_dev *edev; + struct qed_dev_eth_info dev_info; + struct qed_slowpath_params params; + static bool do_once = true; + uint8_t bulletin_change; + uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; + uint8_t is_mac_forced; + bool is_mac_exist; + /* Fix up ecore debug level */ + uint32_t dp_module = ~0 & ~ECORE_MSG_HW; + uint8_t dp_level = ECORE_LEVEL_VERBOSE; + uint32_t int_mode; + int rc; + + /* Extract key data structures */ + adapter = eth_dev->data->dev_private; + adapter->ethdev = eth_dev; + edev = &adapter->edev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + pci_addr = pci_dev->addr; + + PMD_INIT_FUNC_TRACE(edev); + + snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", + pci_addr.bus, pci_addr.devid, pci_addr.function, + eth_dev->data->port_id); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DP_ERR(edev, "Skipping device init from secondary process\n"); + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* @DPDK */ + edev->vendor_id = pci_dev->id.vendor_id; + edev->device_id = pci_dev->id.device_id; + + qed_ops = qed_get_eth_ops(); + if (!qed_ops) { + DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); + rc = -EINVAL; + goto err; + } + + DP_INFO(edev, "Starting qede probe\n"); + rc = qed_ops->common->probe(edev, pci_dev, dp_module, + dp_level, is_vf); + if (rc != 0) { + DP_ERR(edev, "qede probe failed rc %d\n", rc); + rc = -ENODEV; + goto err; + } + qede_update_pf_params(edev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + int_mode = ECORE_INT_MODE_INTA; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + int_mode = ECORE_INT_MODE_MSIX; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + + if (rte_intr_enable(&pci_dev->intr_handle)) { + DP_ERR(edev, "rte_intr_enable() failed\n"); + rc = -ENODEV; + goto err; + } + + /* Start the Slowpath-process */ + memset(¶ms, 0, sizeof(struct qed_slowpath_params)); + + params.int_mode = int_mode; + params.drv_major = QEDE_PMD_VERSION_MAJOR; + params.drv_minor = QEDE_PMD_VERSION_MINOR; + params.drv_rev = QEDE_PMD_VERSION_REVISION; + params.drv_eng = QEDE_PMD_VERSION_PATCH; + strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, + QEDE_PMD_DRV_VER_STR_SIZE); + + qede_assign_rxtx_handlers(eth_dev, true); + eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; + + /* For CMT mode device do periodic polling for slowpath events. + * This is required since uio device uses only one MSI-x + * interrupt vector but we need one for each engine. + */ + if (ECORE_IS_CMT(edev) && IS_PF(edev)) { + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + rc = -EINVAL; + goto err; + } + } + + rc = qed_ops->common->slowpath_start(edev, ¶ms); + if (rc) { + DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + rc = -ENODEV; + goto err; + } + + rc = qed_ops->fill_dev_info(edev, &dev_info); + if (rc) { + DP_ERR(edev, "Cannot get device_info rc %d\n", rc); + qed_ops->common->slowpath_stop(edev); + qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + rc = -ENODEV; + goto err; + } + + qede_alloc_etherdev(adapter, &dev_info); + + if (do_once) { + qede_print_adapter_info(eth_dev); + do_once = false; + } + + adapter->ops->common->set_name(edev, edev->name); + + if (!is_vf) + adapter->dev_info.num_mac_filters = + (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), + ECORE_MAC); + else + ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), + (uint32_t *)&adapter->dev_info.num_mac_filters); + + /* Allocate memory for storing MAC addr */ + eth_dev->data->mac_addrs = rte_zmalloc(edev->name, + (RTE_ETHER_ADDR_LEN * + adapter->dev_info.num_mac_filters), + RTE_CACHE_LINE_SIZE); + + if (eth_dev->data->mac_addrs == NULL) { + DP_ERR(edev, "Failed to allocate MAC address\n"); + qed_ops->common->slowpath_stop(edev); + qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + return -ENOMEM; + } + + if (!is_vf) { + rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. + hw_info.hw_mac_addr, + ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), + &bulletin_change); + if (bulletin_change) { + is_mac_exist = + ecore_vf_bulletin_get_forced_mac( + ECORE_LEADING_HWFN(edev), + vf_mac, + &is_mac_forced); + if (is_mac_exist) { + DP_INFO(edev, "VF macaddr received from PF\n"); + rte_ether_addr_copy( + (struct rte_ether_addr *)&vf_mac, + ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy( + ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + DP_ERR(edev, "No VF macaddr assigned\n"); + } + } + } + + eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + SLIST_INIT(&adapter->arfs_info.arfs_list_head); + SLIST_INIT(&adapter->vlan_list_head); + SLIST_INIT(&adapter->uc_list_head); + SLIST_INIT(&adapter->mc_list_head); + adapter->mtu = RTE_ETHER_MTU; + adapter->vport_started = false; + + /* VF tunnel offloads is enabled by default in PF driver */ + adapter->vxlan.num_filters = 0; + adapter->geneve.num_filters = 0; + adapter->ipgre.num_filters = 0; + if (is_vf) { + adapter->vxlan.enable = true; + adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; + adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; + adapter->geneve.enable = true; + adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; + adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; + adapter->ipgre.enable = true; + adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; + } else { + adapter->vxlan.enable = false; + adapter->geneve.enable = false; + adapter->ipgre.enable = false; + } + + DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", + adapter->primary_mac.addr_bytes[0], + adapter->primary_mac.addr_bytes[1], + adapter->primary_mac.addr_bytes[2], + adapter->primary_mac.addr_bytes[3], + adapter->primary_mac.addr_bytes[4], + adapter->primary_mac.addr_bytes[5]); + + DP_INFO(edev, "Device initialized\n"); + + return 0; + +err: + if (do_once) { + qede_print_adapter_info(eth_dev); + do_once = false; + } + return rc; +} + +static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + return qede_common_dev_init(eth_dev, 1); +} + +static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + return qede_common_dev_init(eth_dev, 0); +} + +static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); + + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + qede_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + return 0; +} + +static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + return qede_dev_common_uninit(eth_dev); +} + +static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + return qede_dev_common_uninit(eth_dev); +} + +static const struct rte_pci_id pci_id_qedevf_map[] = { +#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) + { + QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) + }, + { + QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) + }, + { + QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) + }, + {.vendor_id = 0,} +}; + +static const struct rte_pci_id pci_id_qede_map[] = { +#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) + }, + {.vendor_id = 0,} +}; + +static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qedevf_eth_dev_init); +} + +static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qedevf_pmd = { + .id_table = pci_id_qedevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qedevf_eth_dev_pci_probe, + .remove = qedevf_eth_dev_pci_remove, +}; + +static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qede_eth_dev_init); +} + +static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qede_pmd = { + .id_table = pci_id_qede_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qede_eth_dev_pci_probe, + .remove = qede_eth_dev_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); +RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); + +RTE_INIT(qede_init_log) +{ + qede_logtype_init = rte_log_register("pmd.net.qede.init"); + if (qede_logtype_init >= 0) + rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); + qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); + if (qede_logtype_driver >= 0) + rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h new file mode 100644 index 000000000..b988a73f2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_ethdev.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + + +#ifndef _QEDE_ETHDEV_H_ +#define _QEDE_ETHDEV_H_ + +#include + +#include +#include +#include +#include +#include + +/* ecore includes */ +#include "base/bcm_osal.h" +#include "base/ecore.h" +#include "base/ecore_dev_api.h" +#include "base/ecore_l2_api.h" +#include "base/ecore_vf_api.h" +#include "base/ecore_hsi_common.h" +#include "base/ecore_int_api.h" +#include "base/ecore_chain.h" +#include "base/ecore_status.h" +#include "base/ecore_hsi_eth.h" +#include "base/ecore_iov_api.h" +#include "base/ecore_cxt.h" +#include "base/nvm_cfg.h" +#include "base/ecore_sp_commands.h" +#include "base/ecore_l2.h" +#include "base/ecore_vf.h" + +#include "qede_logs.h" +#include "qede_if.h" +#include "qede_rxtx.h" + +#define qede_stringify1(x...) #x +#define qede_stringify(x...) qede_stringify1(x) + +/* Driver versions */ +#define QEDE_PMD_DRV_VER_STR_SIZE NAME_SIZE /* 128 */ +#define QEDE_PMD_VER_PREFIX "QEDE PMD" +#define QEDE_PMD_VERSION_MAJOR 2 +#define QEDE_PMD_VERSION_MINOR 11 +#define QEDE_PMD_VERSION_REVISION 3 +#define QEDE_PMD_VERSION_PATCH 1 + +#define QEDE_PMD_DRV_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \ + qede_stringify(QEDE_PMD_VERSION_MINOR) "." \ + qede_stringify(QEDE_PMD_VERSION_REVISION) "." \ + qede_stringify(QEDE_PMD_VERSION_PATCH) + +#define QEDE_PMD_BASE_VERSION qede_stringify(ECORE_MAJOR_VERSION) "." \ + qede_stringify(ECORE_MINOR_VERSION) "." \ + qede_stringify(ECORE_REVISION_VERSION) "." \ + qede_stringify(ECORE_ENGINEERING_VERSION) + +#define QEDE_PMD_FW_VERSION qede_stringify(FW_MAJOR_VERSION) "." \ + qede_stringify(FW_MINOR_VERSION) "." \ + qede_stringify(FW_REVISION_VERSION) "." \ + qede_stringify(FW_ENGINEERING_VERSION) + +#define QEDE_RSS_INDIR_INITED (1 << 0) +#define QEDE_RSS_KEY_INITED (1 << 1) +#define QEDE_RSS_CAPS_INITED (1 << 2) + +#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \ + (edev)->dev_info.num_tc) + +#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues) +#define QEDE_RSS_COUNT(dev) ((dev)->data->nb_rx_queues) +#define QEDE_TSS_COUNT(dev) ((dev)->data->nb_tx_queues) + +#define QEDE_DUPLEX_FULL 1 +#define QEDE_DUPLEX_HALF 2 +#define QEDE_DUPLEX_UNKNOWN 0xff + +#define QEDE_SUPPORTED_AUTONEG (1 << 6) +#define QEDE_SUPPORTED_PAUSE (1 << 13) + +#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private) + +#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev) + +#define QEDE_INIT(eth_dev) { \ + struct qede_dev *qdev = eth_dev->data->dev_private; \ + struct ecore_dev *edev = &qdev->edev; \ +} + +/************* QLogic 10G/25G/40G/50G/100G vendor/devices ids *************/ +#define PCI_VENDOR_ID_QLOGIC 0x1077 + +#define CHIP_NUM_57980E 0x1634 +#define CHIP_NUM_57980S 0x1629 +#define CHIP_NUM_VF 0x1630 +#define CHIP_NUM_57980S_40 0x1634 +#define CHIP_NUM_57980S_25 0x1656 +#define CHIP_NUM_57980S_IOV 0x1664 +#define CHIP_NUM_57980S_100 0x1644 +#define CHIP_NUM_57980S_50 0x1654 +#define CHIP_NUM_AH_50G 0x8070 +#define CHIP_NUM_AH_10G 0x8071 +#define CHIP_NUM_AH_40G 0x8072 +#define CHIP_NUM_AH_25G 0x8073 +#define CHIP_NUM_AH_IOV 0x8090 + +#define PCI_DEVICE_ID_QLOGIC_NX2_57980E CHIP_NUM_57980E +#define PCI_DEVICE_ID_QLOGIC_NX2_57980S CHIP_NUM_57980S +#define PCI_DEVICE_ID_QLOGIC_NX2_VF CHIP_NUM_VF +#define PCI_DEVICE_ID_QLOGIC_57980S_40 CHIP_NUM_57980S_40 +#define PCI_DEVICE_ID_QLOGIC_57980S_25 CHIP_NUM_57980S_25 +#define PCI_DEVICE_ID_QLOGIC_57980S_IOV CHIP_NUM_57980S_IOV +#define PCI_DEVICE_ID_QLOGIC_57980S_100 CHIP_NUM_57980S_100 +#define PCI_DEVICE_ID_QLOGIC_57980S_50 CHIP_NUM_57980S_50 +#define PCI_DEVICE_ID_QLOGIC_AH_50G CHIP_NUM_AH_50G +#define PCI_DEVICE_ID_QLOGIC_AH_10G CHIP_NUM_AH_10G +#define PCI_DEVICE_ID_QLOGIC_AH_40G CHIP_NUM_AH_40G +#define PCI_DEVICE_ID_QLOGIC_AH_25G CHIP_NUM_AH_25G +#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV + + + +extern char qede_fw_file[]; + +/* Number of PF connections - 32 RX + 32 TX */ +#define QEDE_PF_NUM_CONNS (64) + +/* Maximum number of flowdir filters */ +#define QEDE_RFS_MAX_FLTR (256) + +#define QEDE_MAX_MCAST_FILTERS (64) + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +struct qede_vlan_entry { + SLIST_ENTRY(qede_vlan_entry) list; + uint16_t vid; +}; + +struct qede_mcast_entry { + struct rte_ether_addr mac; + SLIST_ENTRY(qede_mcast_entry) list; +}; + +struct qede_ucast_entry { + struct rte_ether_addr mac; + uint16_t vlan; + uint16_t vni; + SLIST_ENTRY(qede_ucast_entry) list; +}; + +#ifndef IPV6_ADDR_LEN +#define IPV6_ADDR_LEN (16) +#endif + +struct qede_arfs_tuple { + union { + uint32_t src_ipv4; + uint8_t src_ipv6[IPV6_ADDR_LEN]; + }; + + union { + uint32_t dst_ipv4; + uint8_t dst_ipv6[IPV6_ADDR_LEN]; + }; + + uint16_t src_port; + uint16_t dst_port; + uint16_t eth_proto; + uint8_t ip_proto; + + /* Describe filtering mode needed for this kind of filter */ + enum ecore_filter_config_mode mode; +}; + +struct qede_arfs_entry { + uint32_t soft_id; /* unused for now */ + uint16_t pkt_len; /* actual packet length to match */ + uint16_t rx_queue; /* queue to be steered to */ + bool is_drop; /* drop action */ + const struct rte_memzone *mz; /* mz used to hold L2 frame */ + struct qede_arfs_tuple tuple; + SLIST_ENTRY(qede_arfs_entry) list; +}; + +/* Opaque handle for rte flow managed by PMD */ +struct rte_flow { + struct qede_arfs_entry entry; +}; + +struct qede_arfs_info { + struct ecore_arfs_config_params arfs; + uint16_t filter_count; + SLIST_HEAD(arfs_list_head, qede_arfs_entry)arfs_list_head; +}; + +/* IANA assigned default UDP ports for encapsulation protocols */ +#define QEDE_VXLAN_DEF_PORT (4789) +#define QEDE_GENEVE_DEF_PORT (6081) + +struct qede_tunn_params { + bool enable; + uint16_t num_filters; + uint16_t filter_type; + uint16_t udp_port; +}; + +/* + * Structure to store private data for each port. + */ +struct qede_dev { + struct ecore_dev edev; + const struct qed_eth_ops *ops; + struct qed_dev_eth_info dev_info; + struct ecore_sb_info *sb_array; + struct qede_fastpath *fp_array; + struct qede_fastpath_cmt *fp_array_cmt; + uint16_t mtu; + uint16_t new_mtu; + bool enable_tx_switching; + bool rss_enable; + struct rte_eth_rss_conf rss_conf; + uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; + uint64_t rss_hf; + uint8_t rss_key_len; + bool enable_lro; + uint8_t num_rx_queues; + uint8_t num_tx_queues; + SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head; + uint16_t configured_vlans; + bool accept_any_vlan; + struct rte_ether_addr primary_mac; + SLIST_HEAD(mc_list_head, qede_mcast_entry) mc_list_head; + uint16_t num_mc_addr; + SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head; + uint16_t num_uc_addr; + bool handle_hw_err; + struct qede_tunn_params vxlan; + struct qede_tunn_params geneve; + struct qede_tunn_params ipgre; + struct qede_arfs_info arfs_info; + bool vlan_strip_flg; + char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; + bool vport_started; + int vlan_offload_mask; + void *ethdev; +}; + +static inline void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) +{ + memset(ucast, 0, sizeof(struct ecore_filter_ucast)); + ucast->is_rx_filter = true; + ucast->is_tx_filter = true; + /* ucast->assert_on_error = true; - For debug */ +} + + +/* Non-static functions */ +int qede_config_rss(struct rte_eth_dev *eth_dev); + +int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf); + +int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +int qed_fill_eth_dev_info(struct ecore_dev *edev, + struct qed_dev_eth_info *info); +int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up); + +int qede_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete); + +int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type, + enum rte_filter_op op, void *arg); + +int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, void *arg); + +int qede_check_fdir_support(struct rte_eth_dev *eth_dev); + +uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev, + struct rte_eth_fdir_filter *fdir, + void *buff, + struct ecore_arfs_config_params *params); + +void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev); + +int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg); + +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu); + +int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg); +int qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp); +int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp); + +enum _ecore_status_t +qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, + bool add); +void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg); +int qede_ucast_filter(struct rte_eth_dev *eth_dev, + struct ecore_filter_ucast *ucast, + bool add); +#endif /* _QEDE_ETHDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/qede/qede_filter.c b/src/spdk/dpdk/drivers/net/qede/qede_filter.c new file mode 100644 index 000000000..86a2e0dc9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_filter.c @@ -0,0 +1,1578 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2017 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include +#include +#include +#include +#include + +#include "qede_ethdev.h" + +/* VXLAN tunnel classification mapping */ +const struct _qede_udp_tunn_types { + uint16_t rte_filter_type; + enum ecore_filter_ucast_type qede_type; + enum ecore_tunn_clss qede_tunn_clss; + const char *string; +} qede_tunn_types[] = { + { + ETH_TUNNEL_FILTER_OMAC, + ECORE_FILTER_MAC, + ECORE_TUNN_CLSS_MAC_VLAN, + "outer-mac" + }, + { + ETH_TUNNEL_FILTER_TENID, + ECORE_FILTER_VNI, + ECORE_TUNN_CLSS_MAC_VNI, + "vni" + }, + { + ETH_TUNNEL_FILTER_IMAC, + ECORE_FILTER_INNER_MAC, + ECORE_TUNN_CLSS_INNER_MAC_VLAN, + "inner-mac" + }, + { + ETH_TUNNEL_FILTER_IVLAN, + ECORE_FILTER_INNER_VLAN, + ECORE_TUNN_CLSS_INNER_MAC_VLAN, + "inner-vlan" + }, + { + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, + ECORE_FILTER_MAC_VNI_PAIR, + ECORE_TUNN_CLSS_MAC_VNI, + "outer-mac and vni" + }, + { + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "outer-mac and inner-mac" + }, + { + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "outer-mac and inner-vlan" + }, + { + ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, + ECORE_FILTER_INNER_MAC_VNI_PAIR, + ECORE_TUNN_CLSS_INNER_MAC_VNI, + "vni and inner-mac", + }, + { + ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "vni and inner-vlan", + }, + { + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, + ECORE_FILTER_INNER_PAIR, + ECORE_TUNN_CLSS_INNER_MAC_VLAN, + "inner-mac and inner-vlan", + }, + { + ETH_TUNNEL_FILTER_OIP, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "outer-IP" + }, + { + ETH_TUNNEL_FILTER_IIP, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "inner-IP" + }, + { + RTE_TUNNEL_FILTER_IMAC_IVLAN, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "IMAC_IVLAN" + }, + { + RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "IMAC_IVLAN_TENID" + }, + { + RTE_TUNNEL_FILTER_IMAC_TENID, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "IMAC_TENID" + }, + { + RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, + ECORE_FILTER_UNUSED, + MAX_ECORE_TUNN_CLSS, + "OMAC_TENID_IMAC" + }, +}; + +#define IP_VERSION (0x40) +#define IP_HDRLEN (0x5) +#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN) +#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50) +#define QEDE_FDIR_IPV4_DEF_TTL (64) +#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000) +/* Sum of length of header types of L2, L3, L4. + * L2 : ether_hdr + vlan_hdr + vxlan_hdr + * L3 : ipv6_hdr + * L4 : tcp_hdr + */ +#define QEDE_MAX_FDIR_PKT_LEN (86) + +static inline bool qede_valid_flow(uint16_t flow_type) +{ + return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) || + (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) || + (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) || + (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)); +} + +static uint16_t +qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, + struct qede_arfs_entry *arfs, + void *buff, + struct ecore_arfs_config_params *params); + +/* Note: Flowdir support is only partial. + * For ex: drop_queue, FDIR masks, flex_conf are not supported. + * Parameters like pballoc/status fields are irrelevant here. + */ +int qede_check_fdir_support(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf; + + /* check FDIR modes */ + switch (fdir->mode) { + case RTE_FDIR_MODE_NONE: + qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE; + DP_INFO(edev, "flowdir is disabled\n"); + break; + case RTE_FDIR_MODE_PERFECT: + if (ECORE_IS_CMT(edev)) { + DP_ERR(edev, "flowdir is not supported in 100G mode\n"); + qdev->arfs_info.arfs.mode = + ECORE_FILTER_CONFIG_MODE_DISABLE; + return -ENOTSUP; + } + qdev->arfs_info.arfs.mode = + ECORE_FILTER_CONFIG_MODE_5_TUPLE; + DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n"); + break; + case RTE_FDIR_MODE_PERFECT_TUNNEL: + case RTE_FDIR_MODE_SIGNATURE: + case RTE_FDIR_MODE_PERFECT_MAC_VLAN: + DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode); + return -ENOTSUP; + } + + return 0; +} + +void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct qede_arfs_entry *tmp = NULL; + + SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) { + if (tmp) { + if (tmp->mz) + rte_memzone_free(tmp->mz); + SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp, + qede_arfs_entry, list); + rte_free(tmp); + } + } +} + +static int +qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev, + struct rte_eth_fdir_filter *fdir, + struct qede_arfs_entry *arfs) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_fdir_input *input; + + static const uint8_t next_proto[] = { + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP, + }; + + input = &fdir->input; + + DP_INFO(edev, "flow_type %d\n", input->flow_type); + + switch (input->flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + /* fill the common ip header */ + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4; + arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip; + arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip; + arfs->tuple.ip_proto = next_proto[input->flow_type]; + + /* UDP */ + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) { + arfs->tuple.dst_port = input->flow.udp4_flow.dst_port; + arfs->tuple.src_port = input->flow.udp4_flow.src_port; + } else { /* TCP */ + arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port; + arfs->tuple.src_port = input->flow.tcp4_flow.src_port; + } + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6; + arfs->tuple.ip_proto = next_proto[input->flow_type]; + rte_memcpy(arfs->tuple.dst_ipv6, + &input->flow.ipv6_flow.dst_ip, + IPV6_ADDR_LEN); + rte_memcpy(arfs->tuple.src_ipv6, + &input->flow.ipv6_flow.src_ip, + IPV6_ADDR_LEN); + + /* UDP */ + if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) { + arfs->tuple.dst_port = input->flow.udp6_flow.dst_port; + arfs->tuple.src_port = input->flow.udp6_flow.src_port; + } else { /* TCP */ + arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port; + arfs->tuple.src_port = input->flow.tcp6_flow.src_port; + } + break; + default: + DP_ERR(edev, "Unsupported flow_type %u\n", + input->flow_type); + return -ENOTSUP; + } + + arfs->rx_queue = fdir->action.rx_queue; + return 0; +} + +static int +qede_config_arfs_filter(struct rte_eth_dev *eth_dev, + struct qede_arfs_entry *arfs, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_ntuple_filter_params params; + char mz_name[RTE_MEMZONE_NAMESIZE] = {0}; + struct qede_arfs_entry *tmp = NULL; + const struct rte_memzone *mz; + struct ecore_hwfn *p_hwfn; + enum _ecore_status_t rc; + uint16_t pkt_len; + void *pkt; + + if (add) { + if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) { + DP_ERR(edev, "Reached max flowdir filter limit\n"); + return -EINVAL; + } + } + + /* soft_id could have been used as memzone string, but soft_id is + * not currently used so it has no significance. + */ + snprintf(mz_name, sizeof(mz_name), "%lx", + (unsigned long)rte_get_timer_cycles()); + mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN, + SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); + if (!mz) { + DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n", + rte_strerror(rte_errno)); + return -rte_errno; + } + + pkt = mz->addr; + memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN); + pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt, + &qdev->arfs_info.arfs); + if (pkt_len == 0) { + rc = -EINVAL; + goto err1; + } + + DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name); + if (add) { + SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) { + if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) { + DP_INFO(edev, "flowdir filter exist\n"); + rc = -EEXIST; + goto err1; + } + } + } else { + SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) { + if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) + break; + } + if (!tmp) { + DP_ERR(edev, "flowdir filter does not exist\n"); + rc = -EEXIST; + goto err1; + } + } + p_hwfn = ECORE_LEADING_HWFN(edev); + if (add) { + if (qdev->arfs_info.arfs.mode == + ECORE_FILTER_CONFIG_MODE_DISABLE) { + /* Force update */ + eth_dev->data->dev_conf.fdir_conf.mode = + RTE_FDIR_MODE_PERFECT; + qdev->arfs_info.arfs.mode = + ECORE_FILTER_CONFIG_MODE_5_TUPLE; + DP_INFO(edev, "Force enable flowdir in perfect mode\n"); + } + /* Enable ARFS searcher with updated flow_types */ + ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, + &qdev->arfs_info.arfs); + } + + memset(¶ms, 0, sizeof(params)); + params.addr = (dma_addr_t)mz->iova; + params.length = pkt_len; + params.qid = arfs->rx_queue; + params.vport_id = 0; + params.b_is_add = add; + params.b_is_drop = arfs->is_drop; + + /* configure filter with ECORE_SPQ_MODE_EBLOCK */ + rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL, + ¶ms); + if (rc == ECORE_SUCCESS) { + if (add) { + arfs->pkt_len = pkt_len; + arfs->mz = mz; + SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head, + arfs, list); + qdev->arfs_info.filter_count++; + DP_INFO(edev, "flowdir filter added, count = %d\n", + qdev->arfs_info.filter_count); + } else { + rte_memzone_free(tmp->mz); + SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp, + qede_arfs_entry, list); + rte_free(tmp); /* the node deleted */ + rte_memzone_free(mz); /* temp node allocated */ + qdev->arfs_info.filter_count--; + DP_INFO(edev, "Fdir filter deleted, count = %d\n", + qdev->arfs_info.filter_count); + } + } else { + DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n", + rc, qdev->arfs_info.filter_count); + } + + /* Disable ARFS searcher if there are no more filters */ + if (qdev->arfs_info.filter_count == 0) { + memset(&qdev->arfs_info.arfs, 0, + sizeof(struct ecore_arfs_config_params)); + DP_INFO(edev, "Disabling flowdir\n"); + qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE; + ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, + &qdev->arfs_info.arfs); + } + return 0; + +err1: + rte_memzone_free(mz); + return rc; +} + +static int +qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev, + struct rte_eth_fdir_filter *fdir_filter, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_arfs_entry *arfs = NULL; + int rc = 0; + + arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry), + RTE_CACHE_LINE_SIZE); + if (!arfs) { + DP_ERR(edev, "Did not allocate memory for arfs\n"); + return -ENOMEM; + } + + rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs); + if (rc < 0) + return rc; + + rc = qede_config_arfs_filter(eth_dev, arfs, add); + if (rc < 0) + rte_free(arfs); + + return rc; +} + +static int +qede_fdir_filter_add(struct rte_eth_dev *eth_dev, + struct rte_eth_fdir_filter *fdir, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + if (!qede_valid_flow(fdir->input.flow_type)) { + DP_ERR(edev, "invalid flow_type input\n"); + return -EINVAL; + } + + if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) { + DP_ERR(edev, "invalid queue number %u\n", + fdir->action.rx_queue); + return -EINVAL; + } + + if (fdir->input.flow_ext.is_vf) { + DP_ERR(edev, "flowdir is not supported over VF\n"); + return -EINVAL; + } + + return qede_config_cmn_fdir_filter(eth_dev, fdir, add); +} + +/* Fills the L3/L4 headers and returns the actual length of flowdir packet */ +static uint16_t +qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, + struct qede_arfs_entry *arfs, + void *buff, + struct ecore_arfs_config_params *params) + +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint16_t *ether_type; + uint8_t *raw_pkt; + struct rte_ipv4_hdr *ip; + struct rte_ipv6_hdr *ip6; + struct rte_udp_hdr *udp; + struct rte_tcp_hdr *tcp; + uint16_t len; + + raw_pkt = (uint8_t *)buff; + + len = 2 * sizeof(struct rte_ether_addr); + raw_pkt += 2 * sizeof(struct rte_ether_addr); + ether_type = (uint16_t *)raw_pkt; + raw_pkt += sizeof(uint16_t); + len += sizeof(uint16_t); + + *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto); + switch (arfs->tuple.eth_proto) { + case RTE_ETHER_TYPE_IPV4: + ip = (struct rte_ipv4_hdr *)raw_pkt; + ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL; + ip->total_length = sizeof(struct rte_ipv4_hdr); + ip->next_proto_id = arfs->tuple.ip_proto; + ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL; + ip->dst_addr = arfs->tuple.dst_ipv4; + ip->src_addr = arfs->tuple.src_ipv4; + len += sizeof(struct rte_ipv4_hdr); + params->ipv4 = true; + + raw_pkt = (uint8_t *)buff; + /* UDP */ + if (arfs->tuple.ip_proto == IPPROTO_UDP) { + udp = (struct rte_udp_hdr *)(raw_pkt + len); + udp->dst_port = arfs->tuple.dst_port; + udp->src_port = arfs->tuple.src_port; + udp->dgram_len = sizeof(struct rte_udp_hdr); + len += sizeof(struct rte_udp_hdr); + /* adjust ip total_length */ + ip->total_length += sizeof(struct rte_udp_hdr); + params->udp = true; + } else { /* TCP */ + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + tcp->src_port = arfs->tuple.src_port; + tcp->dst_port = arfs->tuple.dst_port; + tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; + len += sizeof(struct rte_tcp_hdr); + /* adjust ip total_length */ + ip->total_length += sizeof(struct rte_tcp_hdr); + params->tcp = true; + } + break; + case RTE_ETHER_TYPE_IPV6: + ip6 = (struct rte_ipv6_hdr *)raw_pkt; + ip6->proto = arfs->tuple.ip_proto; + ip6->vtc_flow = + rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW); + + rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6, + IPV6_ADDR_LEN); + rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6, + IPV6_ADDR_LEN); + len += sizeof(struct rte_ipv6_hdr); + params->ipv6 = true; + + raw_pkt = (uint8_t *)buff; + /* UDP */ + if (arfs->tuple.ip_proto == IPPROTO_UDP) { + udp = (struct rte_udp_hdr *)(raw_pkt + len); + udp->src_port = arfs->tuple.src_port; + udp->dst_port = arfs->tuple.dst_port; + len += sizeof(struct rte_udp_hdr); + params->udp = true; + } else { /* TCP */ + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + tcp->src_port = arfs->tuple.src_port; + tcp->dst_port = arfs->tuple.dst_port; + tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; + len += sizeof(struct rte_tcp_hdr); + params->tcp = true; + } + break; + default: + DP_ERR(edev, "Unsupported eth_proto %u\n", + arfs->tuple.eth_proto); + return 0; + } + + return len; +} + +static int +qede_fdir_filter_conf(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_fdir_filter *fdir; + int ret; + + fdir = (struct rte_eth_fdir_filter *)arg; + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + /* Typically used to query flowdir support */ + if (ECORE_IS_CMT(edev)) { + DP_ERR(edev, "flowdir is not supported in 100G mode\n"); + return -ENOTSUP; + } + return 0; /* means supported */ + case RTE_ETH_FILTER_ADD: + ret = qede_fdir_filter_add(eth_dev, fdir, 1); + break; + case RTE_ETH_FILTER_DELETE: + ret = qede_fdir_filter_add(eth_dev, fdir, 0); + break; + case RTE_ETH_FILTER_FLUSH: + case RTE_ETH_FILTER_UPDATE: + case RTE_ETH_FILTER_INFO: + return -ENOTSUP; + break; + default: + DP_ERR(edev, "unknown operation %u", filter_op); + ret = -EINVAL; + } + + return ret; +} + +int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_ntuple_filter *ntuple; + struct rte_eth_fdir_filter fdir_entry; + struct rte_eth_tcpv4_flow *tcpv4_flow; + struct rte_eth_udpv4_flow *udpv4_flow; + bool add = false; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + /* Typically used to query fdir support */ + if (ECORE_IS_CMT(edev)) { + DP_ERR(edev, "flowdir is not supported in 100G mode\n"); + return -ENOTSUP; + } + return 0; /* means supported */ + case RTE_ETH_FILTER_ADD: + add = true; + break; + case RTE_ETH_FILTER_DELETE: + break; + case RTE_ETH_FILTER_INFO: + case RTE_ETH_FILTER_GET: + case RTE_ETH_FILTER_UPDATE: + case RTE_ETH_FILTER_FLUSH: + case RTE_ETH_FILTER_SET: + case RTE_ETH_FILTER_STATS: + case RTE_ETH_FILTER_OP_MAX: + DP_ERR(edev, "Unsupported filter_op %d\n", filter_op); + return -ENOTSUP; + } + ntuple = (struct rte_eth_ntuple_filter *)arg; + /* Internally convert ntuple to fdir entry */ + memset(&fdir_entry, 0, sizeof(fdir_entry)); + if (ntuple->proto == IPPROTO_TCP) { + fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP; + tcpv4_flow = &fdir_entry.input.flow.tcp4_flow; + tcpv4_flow->ip.src_ip = ntuple->src_ip; + tcpv4_flow->ip.dst_ip = ntuple->dst_ip; + tcpv4_flow->ip.proto = IPPROTO_TCP; + tcpv4_flow->src_port = ntuple->src_port; + tcpv4_flow->dst_port = ntuple->dst_port; + } else { + fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP; + udpv4_flow = &fdir_entry.input.flow.udp4_flow; + udpv4_flow->ip.src_ip = ntuple->src_ip; + udpv4_flow->ip.dst_ip = ntuple->dst_ip; + udpv4_flow->ip.proto = IPPROTO_TCP; + udpv4_flow->src_port = ntuple->src_port; + udpv4_flow->dst_port = ntuple->dst_port; + } + + fdir_entry.action.rx_queue = ntuple->queue; + + return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add); +} + +static int +qede_tunnel_update(struct qede_dev *qdev, + struct ecore_tunnel_info *tunn_info) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_hwfn *p_hwfn; + struct ecore_ptt *p_ptt; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (IS_PF(edev)) { + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Can't acquire PTT\n"); + return -EAGAIN; + } + } else { + p_ptt = NULL; + } + + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, + tunn_info, ECORE_SPQ_MODE_CB, NULL); + if (IS_PF(edev)) + ecore_ptt_release(p_hwfn, p_ptt); + + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +static int +qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + if (qdev->vxlan.enable == enable) + return ECORE_SUCCESS; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.vxlan.b_update_mode = true; + tunn.vxlan.b_mode_enabled = enable; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + tunn.vxlan.tun_cls = clss; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->vxlan.enable = enable; + qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0; + DP_INFO(edev, "vxlan is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->vxlan.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + tunn.vxlan.tun_cls); + } + + return rc; +} + +static int +qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.l2_geneve.b_update_mode = true; + tunn.l2_geneve.b_mode_enabled = enable; + tunn.ip_geneve.b_update_mode = true; + tunn.ip_geneve.b_mode_enabled = enable; + tunn.l2_geneve.tun_cls = clss; + tunn.ip_geneve.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->geneve.enable = enable; + qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0; + DP_INFO(edev, "GENEVE is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->geneve.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + +static int +qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.ip_gre.b_update_mode = true; + tunn.ip_gre.b_mode_enabled = enable; + tunn.ip_gre.tun_cls = clss; + tunn.ip_gre.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->ipgre.enable = enable; + DP_INFO(edev, "IPGRE is %s\n", + enable ? "enabled" : "disabled"); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + +int +qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_tunnel_info tunn; /* @DPDK */ + uint16_t udp_port; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + memset(&tunn, 0, sizeof(tunn)); + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + udp_port = 0; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; + } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * VXLAN filters have reached 0 then VxLAN offload can be be + * disabled. + */ + if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0) + return qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + + udp_port = 0; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; + } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * GENEVE filters have reached 0 then GENEVE offload can be be + * disabled. + */ + if (qdev->geneve.enable && qdev->geneve.num_filters == 0) + return qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + + default: + return ECORE_INVAL; + } + + return 0; +} + +int +qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_tunnel_info tunn; /* @DPDK */ + uint16_t udp_port; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + memset(&tunn, 0, sizeof(tunn)); + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for VXLAN was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable VxLAN tunnel with default MAC/VLAN classification if + * it was not enabled while adding VXLAN filter before UDP port + * update. + */ + if (!qdev->vxlan.enable) { + rc = qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable VXLAN " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port); + + qdev->vxlan.udp_port = udp_port; + break; + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for GENEVE was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable GENEVE tunnel with default MAC/VLAN classification if + * it was not enabled while adding GENEVE filter before UDP port + * update. + */ + if (!qdev->geneve.enable) { + rc = qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable GENEVE " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port); + + qdev->geneve.udp_port = udp_port; + break; + default: + return ECORE_INVAL; + } + + return 0; +} + +static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, + uint32_t *clss, char *str) +{ + uint16_t j; + *clss = MAX_ECORE_TUNN_CLSS; + + for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { + if (filter == qede_tunn_types[j].rte_filter_type) { + *type = qede_tunn_types[j].qede_type; + *clss = qede_tunn_types[j].qede_tunn_clss; + strcpy(str, qede_tunn_types[j].string); + return; + } + } +} + +static int +qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, + const struct rte_eth_tunnel_filter_conf *conf, + uint32_t type) +{ + /* Init commmon ucast params first */ + qede_set_ucast_cmn_params(ucast); + + /* Copy out the required fields based on classification type */ + ucast->type = type; + + switch (type) { + case ECORE_FILTER_VNI: + ucast->vni = conf->tenant_id; + break; + case ECORE_FILTER_INNER_VLAN: + ucast->vlan = conf->inner_vlan; + break; + case ECORE_FILTER_MAC: + memcpy(ucast->mac, conf->outer_mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + break; + case ECORE_FILTER_INNER_MAC: + memcpy(ucast->mac, conf->inner_mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + break; + case ECORE_FILTER_MAC_VNI_PAIR: + memcpy(ucast->mac, conf->outer_mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + ucast->vni = conf->tenant_id; + break; + case ECORE_FILTER_INNER_MAC_VNI_PAIR: + memcpy(ucast->mac, conf->inner_mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + ucast->vni = conf->tenant_id; + break; + case ECORE_FILTER_INNER_PAIR: + memcpy(ucast->mac, conf->inner_mac.addr_bytes, + RTE_ETHER_ADDR_LEN); + ucast->vlan = conf->inner_vlan; + break; + default: + return -EINVAL; + } + + return ECORE_SUCCESS; +} + +static int +_qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + const struct rte_eth_tunnel_filter_conf *conf, + __rte_unused enum rte_filter_op filter_op, + enum ecore_tunn_clss *clss, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_filter_ucast ucast = {0}; + enum ecore_filter_ucast_type type; + uint16_t filter_type = 0; + char str[80]; + int rc; + + filter_type = conf->filter_type; + /* Determine if the given filter classification is supported */ + qede_get_ecore_tunn_params(filter_type, &type, clss, str); + if (*clss == MAX_ECORE_TUNN_CLSS) { + DP_ERR(edev, "Unsupported filter type\n"); + return -EINVAL; + } + /* Init tunnel ucast params */ + rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n", + conf->filter_type); + return rc; + } + DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", + str, filter_op, ucast.type); + + ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE; + + /* Skip MAC/VLAN if filter is based on VNI */ + if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { + rc = qede_mac_int_ops(eth_dev, &ucast, add); + if (rc == 0 && add) { + /* Enable accept anyvlan */ + qede_config_accept_any_vlan(qdev, true); + } + } else { + rc = qede_ucast_filter(eth_dev, &ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, &ucast, + ECORE_SPQ_MODE_CB, NULL); + } + + return rc; +} + +static int +qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + enum rte_eth_tunnel_type tunn_type, bool enable) +{ + int rc = -EINVAL; + + switch (tunn_type) { + case RTE_TUNNEL_TYPE_VXLAN: + rc = qede_vxlan_enable(eth_dev, clss, enable); + break; + case RTE_TUNNEL_TYPE_GENEVE: + rc = qede_geneve_enable(eth_dev, clss, enable); + break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + rc = qede_ipgre_enable(eth_dev, clss, enable); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int +qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, + const struct rte_eth_tunnel_filter_conf *conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; + bool add; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + add = true; + break; + case RTE_ETH_FILTER_DELETE: + add = false; + break; + default: + DP_ERR(edev, "Unsupported operation %d\n", filter_op); + return -EINVAL; + } + + if (IS_VF(edev)) + return qede_tunn_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, + conf->tunnel_type, add); + + rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add); + if (rc != ECORE_SUCCESS) + return rc; + + if (add) { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { + qdev->vxlan.num_filters++; + qdev->vxlan.filter_type = conf->filter_type; + } else { /* GENEVE */ + qdev->geneve.num_filters++; + qdev->geneve.filter_type = conf->filter_type; + } + + if (!qdev->vxlan.enable || !qdev->geneve.enable || + !qdev->ipgre.enable) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + true); + } else { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) + qdev->vxlan.num_filters--; + else /*GENEVE*/ + qdev->geneve.num_filters--; + + /* Disable VXLAN if VXLAN filters become 0 */ + if (qdev->vxlan.num_filters == 0 || + qdev->geneve.num_filters == 0) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + false); + } + + return 0; +} + +static int +qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "NULL attribute"); + return -rte_errno; + } + + if (attr->group != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, + "Groups are not supported"); + return -rte_errno; + } + + if (attr->priority != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, + "Priorities are not supported"); + return -rte_errno; + } + + if (attr->egress != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, + "Egress is not supported"); + return -rte_errno; + } + + if (attr->transfer != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); + return -rte_errno; + } + + if (attr->ingress == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, + "Only ingress is supported"); + return -rte_errno; + } + + return 0; +} + +static int +qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + bool l3 = false, l4 = false; + + if (pattern == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + "NULL pattern"); + return -rte_errno; + } + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (!pattern->spec) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item spec not defined"); + return -rte_errno; + } + + if (pattern->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item last not supported"); + return -rte_errno; + } + + if (pattern->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item mask not supported"); + return -rte_errno; + } + + /* Below validation is only for 4 tuple flow + * (GFT_PROFILE_TYPE_4_TUPLE) + * - src and dst L3 address (IPv4 or IPv6) + * - src and dst L4 port (TCP or UDP) + */ + + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = true; + + if (flow) { + const struct rte_flow_item_ipv4 *spec; + + spec = pattern->spec; + flow->entry.tuple.src_ipv4 = spec->hdr.src_addr; + flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr; + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV4; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = true; + + if (flow) { + const struct rte_flow_item_ipv6 *spec; + + spec = pattern->spec; + rte_memcpy(flow->entry.tuple.src_ipv6, + spec->hdr.src_addr, + IPV6_ADDR_LEN); + rte_memcpy(flow->entry.tuple.dst_ipv6, + spec->hdr.dst_addr, + IPV6_ADDR_LEN); + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV6; + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + l4 = true; + + if (flow) { + const struct rte_flow_item_udp *spec; + + spec = pattern->spec; + flow->entry.tuple.src_port = + spec->hdr.src_port; + flow->entry.tuple.dst_port = + spec->hdr.dst_port; + flow->entry.tuple.ip_proto = IPPROTO_UDP; + } + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + l4 = true; + + if (flow) { + const struct rte_flow_item_tcp *spec; + + spec = pattern->spec; + flow->entry.tuple.src_port = + spec->hdr.src_port; + flow->entry.tuple.dst_port = + spec->hdr.dst_port; + flow->entry.tuple.ip_proto = IPPROTO_TCP; + } + + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported"); + return -rte_errno; + } + } + + if (!(l3 && l4)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item types need to have both L3 and L4 protocols"); + return -rte_errno; + } + + return 0; +} + +static int +qede_flow_parse_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + const struct rte_flow_action_queue *queue; + + if (actions == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "NULL actions"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + + if (queue->index >= QEDE_RSS_COUNT(dev)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Bad QUEUE action"); + return -rte_errno; + } + + if (flow) + flow->entry.rx_queue = queue->index; + + break; + case RTE_FLOW_ACTION_TYPE_DROP: + if (flow) + flow->entry.is_drop = true; + break; + default: + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported"); + return -rte_errno; + } + } + + return 0; +} + +static int +qede_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) + +{ + int rc = 0; + + rc = qede_flow_validate_attr(dev, attr, error); + if (rc) + return rc; + + /* parse and validate item pattern and actions. + * Given item list and actions will be translate to qede PMD + * specific arfs structure. + */ + rc = qede_flow_parse_pattern(dev, patterns, error, flow); + if (rc) + return rc; + + rc = qede_flow_parse_actions(dev, actions, error, flow); + + return rc; +} + +static int +qede_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + return qede_flow_parse(dev, attr, patterns, actions, error, NULL); +} + +static struct rte_flow * +qede_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *flow = NULL; + int rc; + + flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0); + if (flow == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to allocate memory"); + return NULL; + } + + rc = qede_flow_parse(dev, attr, pattern, actions, error, flow); + if (rc < 0) { + rte_free(flow); + return NULL; + } + + rc = qede_config_arfs_filter(dev, &flow->entry, true); + if (rc < 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to configure flow filter"); + rte_free(flow); + return NULL; + } + + return flow; +} + +static int +qede_flow_destroy(struct rte_eth_dev *eth_dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int rc = 0; + + rc = qede_config_arfs_filter(eth_dev, &flow->entry, false); + if (rc < 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to delete flow filter"); + rte_free(flow); + } + + return rc; +} + +static int +qede_flow_flush(struct rte_eth_dev *eth_dev, + struct rte_flow_error *error) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct qede_arfs_entry *tmp = NULL; + int rc = 0; + + while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) { + tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head); + + rc = qede_config_arfs_filter(eth_dev, tmp, false); + if (rc < 0) + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush flow filter"); + } + + return rc; +} + +const struct rte_flow_ops qede_flow_ops = { + .validate = qede_flow_validate, + .create = qede_flow_create, + .destroy = qede_flow_destroy, + .flush = qede_flow_flush, +}; + +int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_tunnel_filter_conf *filter_conf = + (struct rte_eth_tunnel_filter_conf *)arg; + + switch (filter_type) { + case RTE_ETH_FILTER_TUNNEL: + switch (filter_conf->tunnel_type) { + case RTE_TUNNEL_TYPE_VXLAN: + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_IP_IN_GRE: + DP_INFO(edev, + "Packet steering to the specified Rx queue" + " is not supported with UDP tunneling"); + return(qede_tunn_filter_config(eth_dev, filter_op, + filter_conf)); + case RTE_TUNNEL_TYPE_TEREDO: + case RTE_TUNNEL_TYPE_NVGRE: + case RTE_L2_TUNNEL_TYPE_E_TAG: + DP_ERR(edev, "Unsupported tunnel type %d\n", + filter_conf->tunnel_type); + return -EINVAL; + case RTE_TUNNEL_TYPE_NONE: + default: + return 0; + } + break; + case RTE_ETH_FILTER_FDIR: + return qede_fdir_filter_conf(eth_dev, filter_op, arg); + case RTE_ETH_FILTER_NTUPLE: + return qede_ntuple_filter_conf(eth_dev, filter_op, arg); + case RTE_ETH_FILTER_GENERIC: + if (ECORE_IS_CMT(edev)) { + DP_ERR(edev, "flowdir is not supported in 100G mode\n"); + return -ENOTSUP; + } + + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + + *(const void **)arg = &qede_flow_ops; + return 0; + case RTE_ETH_FILTER_MACVLAN: + case RTE_ETH_FILTER_ETHERTYPE: + case RTE_ETH_FILTER_FLEXIBLE: + case RTE_ETH_FILTER_SYN: + case RTE_ETH_FILTER_HASH: + case RTE_ETH_FILTER_L2_TUNNEL: + case RTE_ETH_FILTER_MAX: + default: + DP_ERR(edev, "Unsupported filter type %d\n", + filter_type); + return -EINVAL; + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/qede/qede_if.h b/src/spdk/dpdk/drivers/net/qede/qede_if.h new file mode 100644 index 000000000..858cd51d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_if.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _QEDE_IF_H +#define _QEDE_IF_H + +#include "qede_ethdev.h" + +/* forward */ +struct ecore_dev; +struct qed_sb_info; +struct qed_pf_params; +enum ecore_int_mode; + +struct qed_dev_info { + uint8_t num_hwfns; + uint8_t hw_mac[RTE_ETHER_ADDR_LEN]; + bool is_mf_default; + + /* FW version */ + uint16_t fw_major; + uint16_t fw_minor; + uint16_t fw_rev; + uint16_t fw_eng; + + /* MFW version */ + uint32_t mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 + + uint32_t flash_size; + bool b_arfs_capable; + bool b_inter_pf_switch; + bool tx_switching; + u16 mtu; + + bool smart_an; + + /* MBI version */ + uint32_t mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + + /* Out param for qede */ + bool vxlan_enable; + bool gre_enable; + bool geneve_enable; + + enum ecore_dev_type dev_type; +}; + +struct qed_dev_eth_info { + struct qed_dev_info common; + + uint8_t num_queues; + uint8_t num_tc; + + struct rte_ether_addr port_mac; + uint16_t num_vlan_filters; + uint32_t num_mac_filters; + + /* Legacy VF - this affects the datapath */ + bool is_legacy; +}; + +#define INIT_STRUCT_FIELD(field, value) .field = value + +struct qed_eth_ops { + const struct qed_common_ops *common; + int (*fill_dev_info)(struct ecore_dev *edev, + struct qed_dev_eth_info *info); +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG (1 << 0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3) +#define QED_LINK_OVERRIDE_EEE_CONFIG (1 << 5) + uint32_t override_flags; + bool autoneg; + uint32_t adv_speeds; + uint32_t forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE (1 << 0) +#define QED_LINK_PAUSE_RX_ENABLE (1 << 1) +#define QED_LINK_PAUSE_TX_ENABLE (1 << 2) + uint32_t pause_config; + struct ecore_link_eee_params eee; +}; + +struct qed_link_output { + bool link_up; + uint32_t supported_caps; /* In SUPPORTED defs */ + uint32_t advertised_caps; /* In ADVERTISED defs */ + uint32_t lp_caps; /* In ADVERTISED defs */ + uint32_t speed; /* In Mb/s */ + uint32_t adv_speed; /* Speed mask */ + uint8_t duplex; /* In DUPLEX defs */ + uint16_t port; /* In PORT defs */ + bool autoneg; + uint32_t pause_config; + + /* EEE - capability & param */ + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct ecore_link_eee_params eee; +}; + +struct qed_slowpath_params { + uint32_t int_mode; + uint8_t drv_major; + uint8_t drv_minor; + uint8_t drv_rev; + uint8_t drv_eng; + uint8_t name[NAME_SIZE]; +}; + +struct qed_common_cb_ops { + void (*link_update)(void *dev, struct qed_link_output *link); +}; + +struct qed_common_ops { + int (*probe)(struct ecore_dev *edev, + struct rte_pci_device *pci_dev, + uint32_t dp_module, uint8_t dp_level, bool is_vf); + void (*set_name)(struct ecore_dev *edev, char name[]); + enum _ecore_status_t + (*chain_alloc)(struct ecore_dev *edev, + enum ecore_chain_use_mode + intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + uint32_t num_elems, + osal_size_t elem_size, + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl); + + void (*chain_free)(struct ecore_dev *edev, + struct ecore_chain *p_chain); + + void (*get_link)(struct ecore_dev *edev, + struct qed_link_output *if_link); + int (*set_link)(struct ecore_dev *edev, + struct qed_link_params *params); + + int (*drain)(struct ecore_dev *edev); + + void (*remove)(struct ecore_dev *edev); + + int (*slowpath_stop)(struct ecore_dev *edev); + + void (*update_pf_params)(struct ecore_dev *edev, + struct ecore_pf_params *params); + + int (*slowpath_start)(struct ecore_dev *edev, + struct qed_slowpath_params *params); + + int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt); + + uint32_t (*sb_init)(struct ecore_dev *edev, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + uint16_t sb_id); + + int (*get_sb_info)(struct ecore_dev *edev, + struct ecore_sb_info *sb, u16 qid, + struct ecore_sb_info_dbg *sb_dbg); + + bool (*can_link_change)(struct ecore_dev *edev); + + void (*update_msglvl)(struct ecore_dev *edev, + uint32_t dp_module, uint8_t dp_level); + + int (*send_drv_state)(struct ecore_dev *edev, bool active); +}; + +/* Externs */ + +const struct qed_eth_ops *qed_get_eth_ops(void); + +#endif /* _QEDE_IF_H */ diff --git a/src/spdk/dpdk/drivers/net/qede/qede_logs.h b/src/spdk/dpdk/drivers/net/qede/qede_logs.h new file mode 100644 index 000000000..3187d97bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_logs.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#ifndef _QEDE_LOGS_H_ +#define _QEDE_LOGS_H_ + +extern int qede_logtype_driver; + +#define DP_ERR(p_dev, fmt, ...) \ + rte_log(RTE_LOG_ERR, qede_logtype_driver, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__) + +#define DP_NOTICE(p_dev, is_assert, fmt, ...) \ +do { \ + if (is_assert) \ + rte_log(RTE_LOG_ERR, qede_logtype_driver,\ + "[QEDE PMD: (%s)]%s:" fmt, \ + (p_dev)->name ? (p_dev)->name : "", \ + __func__, \ + ##__VA_ARGS__); \ + else \ + rte_log(RTE_LOG_NOTICE, qede_logtype_driver,\ + "[QEDE PMD: (%s)]%s:" fmt, \ + (p_dev)->name ? (p_dev)->name : "", \ + __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define DP_INFO(p_dev, fmt, ...) \ + rte_log(RTE_LOG_INFO, qede_logtype_driver, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__) + +#define DP_VERBOSE(p_dev, module, fmt, ...) \ + do { \ + if ((p_dev)->dp_module & module) \ + rte_log(RTE_LOG_DEBUG, qede_logtype_driver, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__); \ + } while (0) + +extern int qede_logtype_init; +#define PMD_INIT_LOG(level, edev, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qede_logtype_init, \ + "[qede_pmd: %s] %s() " fmt "\n", \ + (edev)->name, __func__, ##args) + +#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>") + +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX +#define PMD_TX_LOG(level, q, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \ + __func__, q->port_id, q->queue_id, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX +#define PMD_RX_LOG(level, q, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \ + __func__, q->port_id, q->queue_id, ## args) +#else +#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0) +#endif + +#endif /* _QEDE_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/qede/qede_main.c b/src/spdk/dpdk/drivers/net/qede/qede_main.c new file mode 100644 index 000000000..70357ebb6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_main.c @@ -0,0 +1,793 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include +#include +#include +#include + +#include "qede_ethdev.h" + +/* Alarm timeout. */ +#define QEDE_ALARM_TIMEOUT_US 100000 + +/* Global variable to hold absolute path of fw file */ +char qede_fw_file[PATH_MAX]; + +static const char * const QEDE_DEFAULT_FIRMWARE = + "/lib/firmware/qed/qed_init_values-8.40.33.0.bin"; + +static void +qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) +{ + int i; + + for (i = 0; i < edev->num_hwfns; i++) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + p_hwfn->pf_params = *params; + } +} + +static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev) +{ + edev->regview = pci_dev->mem_resource[0].addr; + edev->doorbells = pci_dev->mem_resource[2].addr; + edev->db_size = pci_dev->mem_resource[2].len; +} + +static int +qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, + uint32_t dp_module, uint8_t dp_level, bool is_vf) +{ + struct ecore_hw_prepare_params hw_prepare_params; + int rc; + + ecore_init_struct(edev); + edev->drv_type = DRV_ID_DRV_TYPE_LINUX; + /* Protocol type is always fixed to PROTOCOL_ETH */ + + if (is_vf) + edev->b_is_vf = true; + + ecore_init_dp(edev, dp_module, dp_level, NULL); + qed_init_pci(edev, pci_dev); + + memset(&hw_prepare_params, 0, sizeof(hw_prepare_params)); + + if (is_vf) + hw_prepare_params.acquire_retry_cnt = ECORE_VF_ACQUIRE_THRESH; + + hw_prepare_params.personality = ECORE_PCI_ETH; + hw_prepare_params.drv_resc_alloc = false; + hw_prepare_params.chk_reg_fifo = false; + hw_prepare_params.initiate_pf_flr = true; + hw_prepare_params.allow_mdump = false; + hw_prepare_params.b_en_pacing = false; + hw_prepare_params.epoch = (u32)time(NULL); + rc = ecore_hw_prepare(edev, &hw_prepare_params); + if (rc) { + DP_ERR(edev, "hw prepare failed\n"); + return rc; + } + + return rc; +} + +static int qed_nic_setup(struct ecore_dev *edev) +{ + int rc; + + rc = ecore_resc_alloc(edev); + if (rc) + return rc; + + DP_INFO(edev, "Allocated qed resources\n"); + ecore_resc_setup(edev); + + return rc; +} + +#ifdef CONFIG_ECORE_ZIPPED_FW +static int qed_alloc_stream_mem(struct ecore_dev *edev) +{ + int i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_hwfn->stream)); + if (!p_hwfn->stream) + return -ENOMEM; + } + + return 0; +} + +static void qed_free_stream_mem(struct ecore_dev *edev) +{ + int i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + if (!p_hwfn->stream) + return; + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream); + } +} +#endif + +#ifdef CONFIG_ECORE_BINARY_FW +static int qed_load_firmware_data(struct ecore_dev *edev) +{ + int fd; + struct stat st; + const char *fw = RTE_LIBRTE_QEDE_FW; + + if (strcmp(fw, "") == 0) + strcpy(qede_fw_file, QEDE_DEFAULT_FIRMWARE); + else + strcpy(qede_fw_file, fw); + + fd = open(qede_fw_file, O_RDONLY); + if (fd < 0) { + DP_ERR(edev, "Can't open firmware file\n"); + return -ENOENT; + } + + if (fstat(fd, &st) < 0) { + DP_ERR(edev, "Can't stat firmware file\n"); + close(fd); + return -1; + } + + edev->firmware = rte_zmalloc("qede_fw", st.st_size, + RTE_CACHE_LINE_SIZE); + if (!edev->firmware) { + DP_ERR(edev, "Can't allocate memory for firmware\n"); + close(fd); + return -ENOMEM; + } + + if (read(fd, edev->firmware, st.st_size) != st.st_size) { + DP_ERR(edev, "Can't read firmware data\n"); + close(fd); + return -1; + } + + edev->fw_len = st.st_size; + if (edev->fw_len < 104) { + DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n", + edev->fw_len); + close(fd); + return -EINVAL; + } + + close(fd); + return 0; +} +#endif + +static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn) +{ + uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced; + + is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced) + rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN); + + /* Always update link configuration according to bulletin */ + qed_link_update(hwfn); +} + +static void qede_vf_task(void *arg) +{ + struct ecore_hwfn *p_hwfn = arg; + uint8_t change = 0; + + /* Read the bulletin board, and re-schedule the task */ + ecore_vf_read_bulletin(p_hwfn, &change); + if (change) + qed_handle_bulletin_change(p_hwfn); + + rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn); +} + +static void qed_start_iov_task(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (!IS_PF(edev)) + rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, + p_hwfn); + } +} + +static void qed_stop_iov_task(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (!IS_PF(edev)) + rte_eal_alarm_cancel(qede_vf_task, p_hwfn); + } +} +static int qed_slowpath_start(struct ecore_dev *edev, + struct qed_slowpath_params *params) +{ + struct ecore_drv_load_params drv_load_params; + struct ecore_hw_init_params hw_init_params; + struct ecore_mcp_drv_version drv_version; + const uint8_t *data = NULL; + struct ecore_hwfn *hwfn; + struct ecore_ptt *p_ptt; + int rc; + + if (IS_PF(edev)) { +#ifdef CONFIG_ECORE_BINARY_FW + rc = qed_load_firmware_data(edev); + if (rc) { + DP_ERR(edev, "Failed to find fw file %s\n", + qede_fw_file); + goto err; + } +#endif + hwfn = ECORE_LEADING_HWFN(edev); + if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */ + p_ptt = ecore_ptt_acquire(hwfn); + if (p_ptt) { + ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt; + } else { + DP_ERR(edev, "Failed to acquire PTT for flowdir\n"); + rc = -ENOMEM; + goto err; + } + } + } + + rc = qed_nic_setup(edev); + if (rc) + goto err; + + /* set int_coalescing_mode */ + edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; + +#ifdef CONFIG_ECORE_ZIPPED_FW + if (IS_PF(edev)) { + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(edev); + if (rc) { + DP_ERR(edev, "Failed to allocate stream memory\n"); + goto err1; + } + } +#endif + + qed_start_iov_task(edev); + +#ifdef CONFIG_ECORE_BINARY_FW + if (IS_PF(edev)) + data = (const uint8_t *)edev->firmware + sizeof(u32); +#endif + + /* Start the slowpath */ + memset(&hw_init_params, 0, sizeof(hw_init_params)); + hw_init_params.b_hw_start = true; + hw_init_params.int_mode = params->int_mode; + hw_init_params.allow_npar_tx_switch = true; + hw_init_params.bin_fw_data = data; + + memset(&drv_load_params, 0, sizeof(drv_load_params)); + drv_load_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + drv_load_params.avoid_eng_reset = false; + drv_load_params.override_force_load = ECORE_OVERRIDE_FORCE_LOAD_ALWAYS; + hw_init_params.avoid_eng_affin = false; + hw_init_params.p_drv_load_params = &drv_load_params; + + rc = ecore_hw_init(edev, &hw_init_params); + if (rc) { + DP_ERR(edev, "ecore_hw_init failed\n"); + goto err2; + } + + DP_INFO(edev, "HW inited and function started\n"); + + if (IS_PF(edev)) { + hwfn = ECORE_LEADING_HWFN(edev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | (params->drv_eng); + strlcpy((char *)drv_version.name, (const char *)params->name, + sizeof(drv_version.name)); + rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_ERR(edev, "Failed sending drv version command\n"); + goto err3; + } + } + + ecore_reset_vport_stats(edev); + + return 0; + +err3: + ecore_hw_stop(edev); +err2: + qed_stop_iov_task(edev); +#ifdef CONFIG_ECORE_ZIPPED_FW + qed_free_stream_mem(edev); +err1: +#endif + ecore_resc_free(edev); +err: +#ifdef CONFIG_ECORE_BINARY_FW + if (IS_PF(edev)) { + if (edev->firmware) + rte_free(edev->firmware); + edev->firmware = NULL; + } +#endif + qed_stop_iov_task(edev); + + return rc; +} + +static int +qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(edev); + struct ecore_ptt *ptt = NULL; + struct ecore_tunnel_info *tun = &edev->tunnel; + + memset(dev_info, 0, sizeof(struct qed_dev_info)); + + if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->vxlan.b_mode_enabled) + dev_info->vxlan_enable = true; + + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && + tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->gre_enable = true; + + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && + tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN && + tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN) + dev_info->geneve_enable = true; + + dev_info->num_hwfns = edev->num_hwfns; + dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]); + dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu; + dev_info->dev_type = edev->type; + + rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr, + RTE_ETHER_ADDR_LEN); + + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + + if (IS_PF(edev)) { + dev_info->b_inter_pf_switch = + OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits); + if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &edev->mf_bits)) + dev_info->b_arfs_capable = true; + dev_info->tx_switching = false; + + dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn); + + ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev)); + if (ptt) { + ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, + &dev_info->mfw_rev, NULL); + + ecore_mcp_get_mbi_ver(ECORE_LEADING_HWFN(edev), ptt, + &dev_info->mbi_version); + + ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt, + &dev_info->flash_size); + + /* Workaround to allow PHY-read commands for + * B0 bringup. + */ + if (ECORE_IS_BB_B0(edev)) + dev_info->flash_size = 0xffffffff; + + ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt); + } + } else { + ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt, + &dev_info->mfw_rev, NULL); + } + + return 0; +} + +int +qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) +{ + uint8_t queues = 0; + int i; + + memset(info, 0, sizeof(*info)); + + info->num_tc = 1 /* @@@TBD aelior MULTI_COS */; + + if (IS_PF(edev)) { + int max_vf_vlan_filters = 0; + + info->num_queues = 0; + for_each_hwfn(edev, i) + info->num_queues += + FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE); + + if (IS_ECORE_SRIOV(edev)) + max_vf_vlan_filters = edev->p_iov_info->total_vfs * + ECORE_ETH_VF_NUM_VLAN_FILTERS; + info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) - + max_vf_vlan_filters; + + rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr, + RTE_ETHER_ADDR_LEN); + } else { + ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev), + &info->num_queues); + if (ECORE_IS_CMT(edev)) { + ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues); + info->num_queues += queues; + } + + ecore_vf_get_num_vlan_filters(&edev->hwfns[0], + (u8 *)&info->num_vlan_filters); + + ecore_vf_get_port_mac(&edev->hwfns[0], + (uint8_t *)&info->port_mac); + + info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]); + } + + qed_fill_dev_info(edev, &info->common); + + if (IS_VF(edev)) + memset(&info->common.hw_mac, 0, RTE_ETHER_ADDR_LEN); + + return 0; +} + +static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE]) +{ + int i; + + rte_memcpy(edev->name, name, NAME_SIZE); + for_each_hwfn(edev, i) { + snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); + } +} + +static uint32_t +qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info, + void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id) +{ + struct ecore_hwfn *p_hwfn; + int hwfn_index; + uint16_t rel_sb_id; + uint8_t n_hwfns = edev->num_hwfns; + uint32_t rc; + + hwfn_index = sb_id % n_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + rel_sb_id = sb_id / n_hwfns; + + DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + hwfn_index, rel_sb_id, sb_id); + + rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, + sb_virt_addr, sb_phy_addr, rel_sb_id); + + return rc; +} + +static void qed_fill_link(struct ecore_hwfn *hwfn, + __rte_unused struct ecore_ptt *ptt, + struct qed_link_output *if_link) +{ + struct ecore_mcp_link_params params; + struct ecore_mcp_link_state link; + struct ecore_mcp_link_capabilities link_caps; + uint8_t change = 0; + + memset(if_link, 0, sizeof(*if_link)); + + /* Prepare source inputs */ + if (IS_PF(hwfn->p_dev)) { + rte_memcpy(¶ms, ecore_mcp_get_link_params(hwfn), + sizeof(params)); + rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link)); + rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + } else { + ecore_vf_read_bulletin(hwfn, &change); + ecore_vf_get_link_params(hwfn, ¶ms); + ecore_vf_get_link_state(hwfn, &link); + ecore_vf_get_link_caps(hwfn, &link_caps); + } + + /* Set the link parameters to pass to protocol driver */ + if (link.link_up) + if_link->link_up = true; + + if (link.link_up) + if_link->speed = link.speed; + + if_link->duplex = QEDE_DUPLEX_FULL; + + /* Fill up the native advertised speed cap mask */ + if_link->adv_speed = params.speed.advertised_speeds; + + if (params.speed.autoneg) + if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG; + + if (params.pause.autoneg || params.pause.forced_rx || + params.pause.forced_tx) + if_link->supported_caps |= QEDE_SUPPORTED_PAUSE; + + if (params.pause.autoneg) + if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + + if (params.pause.forced_rx) + if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; + + if (params.pause.forced_tx) + if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + if (link_caps.default_eee == ECORE_MCP_EEE_UNSUPPORTED) { + if_link->eee_supported = false; + } else { + if_link->eee_supported = true; + if_link->eee_active = link.eee_active; + if_link->sup_caps = link_caps.eee_speed_caps; + /* MFW clears adv_caps on eee disable; use configured value */ + if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : + params.eee.adv_caps; + if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; + if_link->eee.enable = params.eee.enable; + if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; + if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; + } +} + +static void +qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link) +{ + struct ecore_hwfn *hwfn; + struct ecore_ptt *ptt; + + hwfn = &edev->hwfns[0]; + if (IS_PF(edev)) { + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) + DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n"); + + qed_fill_link(hwfn, ptt, if_link); + + if (ptt) + ecore_ptt_release(hwfn, ptt); + } else { + qed_fill_link(hwfn, NULL, if_link); + } +} + +static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params) +{ + struct ecore_hwfn *hwfn; + struct ecore_ptt *ptt; + struct ecore_mcp_link_params *link_params; + int rc; + + if (IS_VF(edev)) + return 0; + + /* The link should be set only once per PF */ + hwfn = &edev->hwfns[0]; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + link_params = ecore_mcp_get_link_params(hwfn); + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + link_params->speed.autoneg = params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { + if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + link_params->pause.autoneg = true; + else + link_params->pause.autoneg = false; + if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) + link_params->pause.forced_rx = true; + else + link_params->pause.forced_rx = false; + if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) + link_params->pause.forced_tx = true; + else + link_params->pause.forced_tx = false; + } + + if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) + memcpy(&link_params->eee, ¶ms->eee, + sizeof(link_params->eee)); + + rc = ecore_mcp_set_link(hwfn, ptt, params->link_up); + + ecore_ptt_release(hwfn, ptt); + + return rc; +} + +void qed_link_update(struct ecore_hwfn *hwfn) +{ + struct ecore_dev *edev = hwfn->p_dev; + struct qede_dev *qdev = (struct qede_dev *)edev; + struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; + + if (!qede_link_update(dev, 0)) + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, NULL); +} + +static int qed_drain(struct ecore_dev *edev) +{ + struct ecore_hwfn *hwfn; + struct ecore_ptt *ptt; + int i, rc; + + if (IS_VF(edev)) + return 0; + + for_each_hwfn(edev, i) { + hwfn = &edev->hwfns[i]; + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to drain NIG; No PTT\n"); + return -EBUSY; + } + rc = ecore_mcp_drain(hwfn, ptt); + if (rc) + return rc; + ecore_ptt_release(hwfn, ptt); + } + + return 0; +} + +static int qed_nic_stop(struct ecore_dev *edev) +{ + int i, rc; + + rc = ecore_hw_stop(edev); + for (i = 0; i < edev->num_hwfns; i++) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + if (p_hwfn->b_sp_dpc_enabled) + p_hwfn->b_sp_dpc_enabled = false; + } + return rc; +} + +static int qed_slowpath_stop(struct ecore_dev *edev) +{ +#ifdef CONFIG_QED_SRIOV + int i; +#endif + + if (!edev) + return -ENODEV; + + if (IS_PF(edev)) { +#ifdef CONFIG_ECORE_ZIPPED_FW + qed_free_stream_mem(edev); +#endif + +#ifdef CONFIG_QED_SRIOV + if (IS_QED_ETH_IF(edev)) + qed_sriov_disable(edev, true); +#endif + } + + qed_nic_stop(edev); + + ecore_resc_free(edev); + qed_stop_iov_task(edev); + + return 0; +} + +static void qed_remove(struct ecore_dev *edev) +{ + if (!edev) + return; + + ecore_hw_remove(edev); +} + +static int qed_send_drv_state(struct ecore_dev *edev, bool active) +{ + struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev); + struct ecore_ptt *ptt; + int status = 0; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ? + ECORE_OV_DRIVER_STATE_ACTIVE : + ECORE_OV_DRIVER_STATE_DISABLED); + + ecore_ptt_release(hwfn, ptt); + + return status; +} + +static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb, + u16 qid, struct ecore_sb_info_dbg *sb_dbg) +{ + struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns]; + struct ecore_ptt *ptt; + int rc; + + if (IS_VF(edev)) + return -EINVAL; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Can't acquire PTT\n"); + return -EAGAIN; + } + + memset(sb_dbg, 0, sizeof(*sb_dbg)); + rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); + + ecore_ptt_release(hwfn, ptt); + return rc; +} + +const struct qed_common_ops qed_common_ops_pass = { + INIT_STRUCT_FIELD(probe, &qed_probe), + INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params), + INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start), + INIT_STRUCT_FIELD(set_name, &qed_set_name), + INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc), + INIT_STRUCT_FIELD(chain_free, &ecore_chain_free), + INIT_STRUCT_FIELD(sb_init, &qed_sb_init), + INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info), + INIT_STRUCT_FIELD(get_link, &qed_get_current_link), + INIT_STRUCT_FIELD(set_link, &qed_set_link), + INIT_STRUCT_FIELD(drain, &qed_drain), + INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop), + INIT_STRUCT_FIELD(remove, &qed_remove), + INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state), +}; + +const struct qed_eth_ops qed_eth_ops_pass = { + INIT_STRUCT_FIELD(common, &qed_common_ops_pass), + INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info), +}; + +const struct qed_eth_ops *qed_get_eth_ops(void) +{ + return &qed_eth_ops_pass; +} diff --git a/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c new file mode 100644 index 000000000..9878ba50e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.c @@ -0,0 +1,2811 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + +#include +#include "qede_rxtx.h" + +static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) +{ + struct rte_mbuf *new_mb = NULL; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!new_mb)) { + PMD_RX_LOG(ERR, rxq, + "Failed to allocate rx buffer " + "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u", + idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq), + rte_mempool_avail_count(rxq->mb_pool), + rte_mempool_in_use_count(rxq->mb_pool)); + return -ENOMEM; + } + rxq->sw_rx_ring[idx].mbuf = new_mb; + rxq->sw_rx_ring[idx].page_offset = 0; + mapping = rte_mbuf_data_iova_default(new_mb); + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); + rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + rxq->sw_rx_prod++; + return 0; +} + +#define QEDE_MAX_BULK_ALLOC_COUNT 512 + +static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count) +{ + void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned; + struct rte_mbuf *mbuf = NULL; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + int i, ret = 0; + uint16_t idx; + + if (count > QEDE_MAX_BULK_ALLOC_COUNT) + count = QEDE_MAX_BULK_ALLOC_COUNT; + + ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count); + if (unlikely(ret)) { + PMD_RX_LOG(ERR, rxq, + "Failed to allocate %d rx buffers " + "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u", + count, + rxq->sw_rx_prod & NUM_RX_BDS(rxq), + rxq->sw_rx_cons & NUM_RX_BDS(rxq), + rte_mempool_avail_count(rxq->mb_pool), + rte_mempool_in_use_count(rxq->mb_pool)); + return -ENOMEM; + } + + for (i = 0; i < count; i++) { + mbuf = obj_p[i]; + if (likely(i < count - 1)) + rte_prefetch0(obj_p[i + 1]); + + idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + rxq->sw_rx_ring[idx].mbuf = mbuf; + rxq->sw_rx_ring[idx].page_offset = 0; + mapping = rte_mbuf_data_iova_default(mbuf); + rx_bd = (struct eth_rx_bd *) + ecore_chain_produce(&rxq->rx_bd_ring); + rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + rxq->sw_rx_prod++; + } + + return 0; +} + +/* Criterias for calculating Rx buffer size - + * 1) rx_buf_size should not exceed the size of mbuf + * 2) In scattered_rx mode - minimum rx_buf_size should be + * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT + * 3) In regular mode - minimum rx_buf_size should be + * (MTU + Maximum L2 Header Size + 2) + * In above cases +2 corrosponds to 2 bytes padding in front of L2 + * header. + * 4) rx_buf_size should be cacheline-size aligned. So considering + * criteria 1, we need to adjust the size to floor instead of ceil, + * so that we don't exceed mbuf size while ceiling rx_buf_size. + */ +int +qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz, + uint16_t max_frame_size) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rx_buf_size; + + if (dev->data->scattered_rx) { + /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of + * bufferes can be used for single packet. So need to make sure + * mbuf size is sufficient enough for this. + */ + if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) < + (max_frame_size + QEDE_ETH_OVERHEAD)) { + DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n", + mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size); + return -EINVAL; + } + + rx_buf_size = RTE_MAX(mbufsz, + (max_frame_size + QEDE_ETH_OVERHEAD) / + ETH_RX_MAX_BUFF_PER_PKT); + } else { + rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD; + } + + /* Align to cache-line size if needed */ + return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size); +} + +static struct qede_rx_queue * +qede_alloc_rx_queue_mem(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + struct rte_mempool *mp, + uint16_t bufsz) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_rx_queue *rxq; + size_t size; + int rc; + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (!rxq) { + DP_ERR(edev, "Unable to allocate memory for rxq on socket %u", + socket_id); + return NULL; + } + + rxq->qdev = qdev; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + + + rxq->rx_buf_size = bufsz; + + DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n", + qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx); + + /* Allocate the parallel driver ring for Rx buffers */ + size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc; + rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_rx_ring) { + DP_ERR(edev, "Memory allocation fails for sw_rx_ring on" + " socket %u\n", socket_id); + rte_free(rxq); + return NULL; + } + + /* Allocate FW Rx ring */ + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, + ECORE_CHAIN_MODE_NEXT_PTR, + ECORE_CHAIN_CNT_TYPE_U16, + rxq->nb_rx_desc, + sizeof(struct eth_rx_bd), + &rxq->rx_bd_ring, + NULL); + + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Memory allocation fails for RX BD ring" + " on socket %u\n", socket_id); + rte_free(rxq->sw_rx_ring); + rte_free(rxq); + return NULL; + } + + /* Allocate FW completion ring */ + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + rxq->nb_rx_desc, + sizeof(union eth_rx_cqe), + &rxq->rx_comp_ring, + NULL); + + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Memory allocation fails for RX CQE ring" + " on socket %u\n", socket_id); + qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring); + rte_free(rxq->sw_rx_ring); + rte_free(rxq); + return NULL; + } + + return rxq; +} + +int +qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid, + uint16_t nb_desc, unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct qede_rx_queue *rxq; + uint16_t max_rx_pkt_len; + uint16_t bufsz; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */ + if (!rte_is_power_of_2(nb_desc)) { + DP_ERR(edev, "Ring size %u is not power of 2\n", + nb_desc); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->rx_queues[qid] != NULL) { + qede_rx_queue_release(dev->data->rx_queues[qid]); + dev->data->rx_queues[qid] = NULL; + } + + max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len; + + /* Fix up RX buffer size */ + bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + /* cache align the mbuf size to simplfy rx_buf_size calculation */ + bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); + if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || + (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) { + if (!dev->data->scattered_rx) { + DP_INFO(edev, "Forcing scatter-gather mode\n"); + dev->data->scattered_rx = 1; + } + } + + rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len); + if (rc < 0) + return rc; + + bufsz = rc; + + if (ECORE_IS_CMT(edev)) { + rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; + + qdev->fp_array[qid * 2].rxq = rxq; + rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; + + qdev->fp_array[qid * 2 + 1].rxq = rxq; + /* provide per engine fp struct as rx queue */ + dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid]; + } else { + rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc, + socket_id, mp, bufsz); + if (!rxq) + return -ENOMEM; + + dev->data->rx_queues[qid] = rxq; + qdev->fp_array[qid].rxq = rxq; + } + + DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n", + qid, nb_desc, rxq->rx_buf_size, socket_id); + + return 0; +} + +static void +qede_rx_queue_reset(__rte_unused struct qede_dev *qdev, + struct qede_rx_queue *rxq) +{ + DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id); + ecore_chain_reset(&rxq->rx_bd_ring); + ecore_chain_reset(&rxq->rx_comp_ring); + rxq->sw_rx_prod = 0; + rxq->sw_rx_cons = 0; + *rxq->hw_cons_ptr = 0; +} + +static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) +{ + uint16_t i; + + if (rxq->sw_rx_ring) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_rx_ring[i].mbuf) { + rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf); + rxq->sw_rx_ring[i].mbuf = NULL; + } + } + } +} + +static void _qede_rx_queue_release(struct qede_dev *qdev, + struct ecore_dev *edev, + struct qede_rx_queue *rxq) +{ + qede_rx_queue_release_mbufs(rxq); + qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring); + qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring); + rte_free(rxq->sw_rx_ring); + rte_free(rxq); +} + +void qede_rx_queue_release(void *rx_queue) +{ + struct qede_rx_queue *rxq = rx_queue; + struct qede_fastpath_cmt *fp_cmt; + struct qede_dev *qdev; + struct ecore_dev *edev; + + if (rxq) { + qdev = rxq->qdev; + edev = QEDE_INIT_EDEV(qdev); + PMD_INIT_FUNC_TRACE(edev); + if (ECORE_IS_CMT(edev)) { + fp_cmt = rx_queue; + _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq); + _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq); + } else { + _qede_rx_queue_release(qdev, edev, rxq); + } + } +} + +/* Stops a given RX queue in the HW */ +static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + struct qede_rx_queue *rxq; + int hwfn_index; + int rc; + + if (rx_queue_id < qdev->num_rx_queues) { + rxq = qdev->fp_array[rx_queue_id].rxq; + hwfn_index = rx_queue_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle, + true, false); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id); + return -1; + } + qede_rx_queue_release_mbufs(rxq); + qede_rx_queue_reset(qdev, rxq); + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id); + } else { + DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id); + rc = -EINVAL; + } + + return rc; +} + +static struct qede_tx_queue * +qede_alloc_tx_queue_mem(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qede_tx_queue *txq; + int rc; + + txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (txq == NULL) { + DP_ERR(edev, + "Unable to allocate memory for txq on socket %u", + socket_id); + return NULL; + } + + txq->nb_tx_desc = nb_desc; + txq->qdev = qdev; + txq->port_id = dev->data->port_id; + + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + txq->nb_tx_desc, + sizeof(union eth_tx_bd_types), + &txq->tx_pbl, + NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, + "Unable to allocate memory for txbd ring on socket %u", + socket_id); + qede_tx_queue_release(txq); + return NULL; + } + + /* Allocate software ring */ + txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring", + (sizeof(struct qede_tx_entry) * + txq->nb_tx_desc), + RTE_CACHE_LINE_SIZE, socket_id); + + if (!txq->sw_tx_ring) { + DP_ERR(edev, + "Unable to allocate memory for txbd ring on socket %u", + socket_id); + qdev->ops->common->chain_free(edev, &txq->tx_pbl); + qede_tx_queue_release(txq); + return NULL; + } + + txq->queue_id = queue_idx; + + txq->nb_tx_avail = txq->nb_tx_desc; + + txq->tx_free_thresh = + tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : + (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH); + + DP_INFO(edev, + "txq %u num_desc %u tx_free_thresh %u socket %u\n", + queue_idx, nb_desc, txq->tx_free_thresh, socket_id); + return txq; +} + +int +qede_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qede_tx_queue *txq; + + PMD_INIT_FUNC_TRACE(edev); + + if (!rte_is_power_of_2(nb_desc)) { + DP_ERR(edev, "Ring size %u is not power of 2\n", + nb_desc); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + qede_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + if (ECORE_IS_CMT(edev)) { + txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; + + qdev->fp_array[queue_idx * 2].txq = txq; + txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; + + qdev->fp_array[(queue_idx * 2) + 1].txq = txq; + dev->data->tx_queues[queue_idx] = + &qdev->fp_array_cmt[queue_idx]; + } else { + txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc, + socket_id, tx_conf); + if (!txq) + return -ENOMEM; + + dev->data->tx_queues[queue_idx] = txq; + qdev->fp_array[queue_idx].txq = txq; + } + + return 0; +} + +static void +qede_tx_queue_reset(__rte_unused struct qede_dev *qdev, + struct qede_tx_queue *txq) +{ + DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id); + ecore_chain_reset(&txq->tx_pbl); + txq->sw_tx_cons = 0; + txq->sw_tx_prod = 0; + *txq->hw_cons_ptr = 0; +} + +static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq) +{ + uint16_t i; + + if (txq->sw_tx_ring) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_tx_ring[i].mbuf) { + rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf); + txq->sw_tx_ring[i].mbuf = NULL; + } + } + } +} + +static void _qede_tx_queue_release(struct qede_dev *qdev, + struct ecore_dev *edev, + struct qede_tx_queue *txq) +{ + qede_tx_queue_release_mbufs(txq); + qdev->ops->common->chain_free(edev, &txq->tx_pbl); + rte_free(txq->sw_tx_ring); + rte_free(txq); +} + +void qede_tx_queue_release(void *tx_queue) +{ + struct qede_tx_queue *txq = tx_queue; + struct qede_fastpath_cmt *fp_cmt; + struct qede_dev *qdev; + struct ecore_dev *edev; + + if (txq) { + qdev = txq->qdev; + edev = QEDE_INIT_EDEV(qdev); + PMD_INIT_FUNC_TRACE(edev); + + if (ECORE_IS_CMT(edev)) { + fp_cmt = tx_queue; + _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq); + _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq); + } else { + _qede_tx_queue_release(qdev, edev, txq); + } + } +} + +/* This function allocates fast-path status block memory */ +static int +qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, + uint16_t sb_id) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct status_block *sb_virt; + dma_addr_t sb_phys; + int rc; + + sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, + sizeof(struct status_block)); + if (!sb_virt) { + DP_ERR(edev, "Status block allocation failed\n"); + return -ENOMEM; + } + rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt, + sb_phys, sb_id); + if (rc) { + DP_ERR(edev, "Status block initialization failed\n"); + OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys, + sizeof(struct status_block)); + return rc; + } + + return 0; +} + +int qede_alloc_fp_resc(struct qede_dev *qdev) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_fastpath *fp; + uint32_t num_sbs; + uint16_t sb_idx; + int i; + + PMD_INIT_FUNC_TRACE(edev); + + if (IS_VF(edev)) + ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); + else + num_sbs = ecore_cxt_get_proto_cid_count + (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL); + + if (num_sbs == 0) { + DP_ERR(edev, "No status blocks available\n"); + return -EINVAL; + } + + qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev), + sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE); + + if (!qdev->fp_array) { + DP_ERR(edev, "fp array allocation failed\n"); + return -ENOMEM; + } + + memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) * + sizeof(*qdev->fp_array)); + + if (ECORE_IS_CMT(edev)) { + qdev->fp_array_cmt = rte_calloc("fp_cmt", + QEDE_RXTX_MAX(qdev) / 2, + sizeof(*qdev->fp_array_cmt), + RTE_CACHE_LINE_SIZE); + + if (!qdev->fp_array_cmt) { + DP_ERR(edev, "fp array for CMT allocation failed\n"); + return -ENOMEM; + } + + memset((void *)qdev->fp_array_cmt, 0, + (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt)); + + /* Establish the mapping of fp_array with fp_array_cmt */ + for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) { + qdev->fp_array_cmt[i].qdev = qdev; + qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2]; + qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1]; + } + } + + for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { + fp = &qdev->fp_array[sb_idx]; + if (!fp) + continue; + fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info), + RTE_CACHE_LINE_SIZE); + if (!fp->sb_info) { + DP_ERR(edev, "FP sb_info allocation fails\n"); + return -1; + } + if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) { + DP_ERR(edev, "FP status block allocation fails\n"); + return -1; + } + DP_INFO(edev, "sb_info idx 0x%x initialized\n", + fp->sb_info->igu_sb_id); + } + + return 0; +} + +void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_fastpath *fp; + uint16_t sb_idx; + uint8_t i; + + PMD_INIT_FUNC_TRACE(edev); + + for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) { + fp = &qdev->fp_array[sb_idx]; + if (!fp) + continue; + DP_INFO(edev, "Free sb_info index 0x%x\n", + fp->sb_info->igu_sb_id); + if (fp->sb_info) { + OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt, + fp->sb_info->sb_phys, + sizeof(struct status_block)); + rte_free(fp->sb_info); + fp->sb_info = NULL; + } + } + + /* Free packet buffers and ring memories */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + if (eth_dev->data->rx_queues[i]) { + qede_rx_queue_release(eth_dev->data->rx_queues[i]); + eth_dev->data->rx_queues[i] = NULL; + } + } + + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + if (eth_dev->data->tx_queues[i]) { + qede_tx_queue_release(eth_dev->data->tx_queues[i]); + eth_dev->data->tx_queues[i] = NULL; + } + } + + if (qdev->fp_array) + rte_free(qdev->fp_array); + qdev->fp_array = NULL; + + if (qdev->fp_array_cmt) + rte_free(qdev->fp_array_cmt); + qdev->fp_array_cmt = NULL; +} + +static inline void +qede_update_rx_prod(__rte_unused struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); + uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = { 0 }; + + /* Update producers */ + rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod); + rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + rte_wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (uint32_t *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + rte_wmb(); + + PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod); +} + +/* Starts a given RX queue in HW */ +static int +qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_queue_start_common_params params; + struct ecore_rxq_start_ret_params ret_params; + struct qede_rx_queue *rxq; + struct qede_fastpath *fp; + struct ecore_hwfn *p_hwfn; + dma_addr_t p_phys_table; + uint16_t page_cnt; + uint16_t j; + int hwfn_index; + int rc; + + if (rx_queue_id < qdev->num_rx_queues) { + fp = &qdev->fp_array[rx_queue_id]; + rxq = fp->rxq; + /* Allocate buffers for the Rx ring */ + for (j = 0; j < rxq->nb_rx_desc; j++) { + rc = qede_alloc_rx_buffer(rxq); + if (rc) { + DP_ERR(edev, "RX buffer allocation failed" + " for rxq = %u\n", rx_queue_id); + return -ENOMEM; + } + } + /* disable interrupts */ + ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); + /* Prepare ramrod */ + memset(¶ms, 0, sizeof(params)); + params.queue_id = rx_queue_id / edev->num_hwfns; + params.vport_id = 0; + params.stats_id = params.vport_id; + params.p_sb = fp->sb_info; + DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n", + fp->rxq->queue_id, fp->sb_info->igu_sb_id); + params.sb_idx = RX_PI; + hwfn_index = rx_queue_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring); + page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring); + memset(&ret_params, 0, sizeof(ret_params)); + rc = ecore_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + ¶ms, fp->rxq->rx_buf_size, + fp->rxq->rx_bd_ring.p_phys_addr, + p_phys_table, page_cnt, + &ret_params); + if (rc) { + DP_ERR(edev, "RX queue %u could not be started, rc = %d\n", + rx_queue_id, rc); + return -1; + } + /* Update with the returned parameters */ + fp->rxq->hw_rxq_prod_addr = ret_params.p_prod; + fp->rxq->handle = ret_params.p_handle; + + fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI]; + qede_update_rx_prod(qdev, fp->rxq); + eth_dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + DP_INFO(edev, "RX queue %u started\n", rx_queue_id); + } else { + DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id); + rc = -EINVAL; + } + + return rc; +} + +static int +qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_queue_start_common_params params; + struct ecore_txq_start_ret_params ret_params; + struct ecore_hwfn *p_hwfn; + dma_addr_t p_phys_table; + struct qede_tx_queue *txq; + struct qede_fastpath *fp; + uint16_t page_cnt; + int hwfn_index; + int rc; + + if (tx_queue_id < qdev->num_tx_queues) { + fp = &qdev->fp_array[tx_queue_id]; + txq = fp->txq; + memset(¶ms, 0, sizeof(params)); + params.queue_id = tx_queue_id / edev->num_hwfns; + params.vport_id = 0; + params.stats_id = params.vport_id; + params.p_sb = fp->sb_info; + DP_INFO(edev, "txq %u igu_sb_id 0x%x\n", + fp->txq->queue_id, fp->sb_info->igu_sb_id); + params.sb_idx = TX_PI(0); /* tc = 0 */ + p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl); + page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl); + hwfn_index = tx_queue_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + if (qdev->dev_info.is_legacy) + fp->txq->is_legacy = true; + rc = ecore_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + ¶ms, 0 /* tc */, + p_phys_table, page_cnt, + &ret_params); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n", + tx_queue_id, rc); + return -1; + } + txq->doorbell_addr = ret_params.p_doorbell; + txq->handle = ret_params.p_handle; + + txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)]; + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, + DB_DEST_XCM); + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, + DB_AGG_CMD_SET); + SET_FIELD(txq->tx_db.data.params, + ETH_DB_DATA_AGG_VAL_SEL, + DQ_XCM_ETH_TX_BD_PROD_CMD); + txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STARTED; + DP_INFO(edev, "TX queue %u started\n", tx_queue_id); + } else { + DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id); + rc = -EINVAL; + } + + return rc; +} + +static inline void +qede_free_tx_pkt(struct qede_tx_queue *txq) +{ + struct rte_mbuf *mbuf; + uint16_t nb_segs; + uint16_t idx; + + idx = TX_CONS(txq); + mbuf = txq->sw_tx_ring[idx].mbuf; + if (mbuf) { + nb_segs = mbuf->nb_segs; + PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs); + while (nb_segs) { + /* It's like consuming rxbuf in recv() */ + ecore_chain_consume(&txq->tx_pbl); + txq->nb_tx_avail++; + nb_segs--; + } + rte_pktmbuf_free(mbuf); + txq->sw_tx_ring[idx].mbuf = NULL; + txq->sw_tx_cons++; + PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n"); + } else { + ecore_chain_consume(&txq->tx_pbl); + txq->nb_tx_avail++; + } +} + +static inline void +qede_process_tx_compl(__rte_unused struct ecore_dev *edev, + struct qede_tx_queue *txq) +{ + uint16_t hw_bd_cons; +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + uint16_t sw_tx_cons; +#endif + + rte_compiler_barrier(); + hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl); + PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n", + abs(hw_bd_cons - sw_tx_cons)); +#endif + while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) + qede_free_tx_pkt(txq); +} + +static int qede_drain_txq(struct qede_dev *qdev, + struct qede_tx_queue *txq, bool allow_drain) +{ + struct ecore_dev *edev = &qdev->edev; + int rc, cnt = 1000; + + while (txq->sw_tx_cons != txq->sw_tx_prod) { + qede_process_tx_compl(edev, txq); + if (!cnt) { + if (allow_drain) { + DP_ERR(edev, "Tx queue[%u] is stuck," + "requesting MCP to drain\n", + txq->queue_id); + rc = qdev->ops->common->drain(edev); + if (rc) + return rc; + return qede_drain_txq(qdev, txq, false); + } + DP_ERR(edev, "Timeout waiting for tx queue[%d]:" + "PROD=%d, CONS=%d\n", + txq->queue_id, txq->sw_tx_prod, + txq->sw_tx_cons); + return -1; + } + cnt--; + DELAY(1000); + rte_compiler_barrier(); + } + + /* FW finished processing, wait for HW to transmit all tx packets */ + DELAY(2000); + + return 0; +} + +/* Stops a given TX queue in the HW */ +static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + struct qede_tx_queue *txq; + int hwfn_index; + int rc; + + if (tx_queue_id < qdev->num_tx_queues) { + txq = qdev->fp_array[tx_queue_id].txq; + /* Drain txq */ + if (qede_drain_txq(qdev, txq, true)) + return -1; /* For the lack of retcodes */ + /* Stop txq */ + hwfn_index = tx_queue_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id); + return -1; + } + qede_tx_queue_release_mbufs(txq); + qede_tx_queue_reset(qdev, txq); + eth_dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id); + } else { + DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id); + rc = -EINVAL; + } + + return rc; +} + +int qede_start_queues(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + uint8_t id; + int rc = -1; + + for (id = 0; id < qdev->num_rx_queues; id++) { + rc = qede_rx_queue_start(eth_dev, id); + if (rc != ECORE_SUCCESS) + return -1; + } + + for (id = 0; id < qdev->num_tx_queues; id++) { + rc = qede_tx_queue_start(eth_dev, id); + if (rc != ECORE_SUCCESS) + return -1; + } + + return rc; +} + +void qede_stop_queues(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + uint8_t id; + + /* Stopping RX/TX queues */ + for (id = 0; id < qdev->num_tx_queues; id++) + qede_tx_queue_stop(eth_dev, id); + + for (id = 0; id < qdev->num_rx_queues; id++) + qede_rx_queue_stop(eth_dev, id); +} + +static inline bool qede_tunn_exist(uint16_t flag) +{ + return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag); +} + +static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag) +{ + return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag); +} + +/* + * qede_check_tunn_csum_l4: + * Returns: + * 1 : If L4 csum is enabled AND if the validation has failed. + * 0 : Otherwise + */ +static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag) +{ + if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag) + return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag); + + return 0; +} + +static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag) +{ + if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) + return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag); + + return 0; +} + +/* Returns outer L2, L3 and L4 packet_type for tunneled packets */ +static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m) +{ + uint32_t packet_type = RTE_PTYPE_UNKNOWN; + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_vlan_hdr *vlan_hdr; + uint16_t ethertype; + bool vlan_tagged = 0; + uint16_t len; + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + len = sizeof(struct rte_ether_hdr); + ethertype = rte_cpu_to_be_16(eth_hdr->ether_type); + + /* Note: Valid only if VLAN stripping is disabled */ + if (ethertype == RTE_ETHER_TYPE_VLAN) { + vlan_tagged = 1; + vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); + len += sizeof(struct rte_vlan_hdr); + ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto); + } + + if (ethertype == RTE_ETHER_TYPE_IPV4) { + packet_type |= RTE_PTYPE_L3_IPV4; + ipv4_hdr = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, len); + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) + packet_type |= RTE_PTYPE_L4_TCP; + else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) + packet_type |= RTE_PTYPE_L4_UDP; + } else if (ethertype == RTE_ETHER_TYPE_IPV6) { + packet_type |= RTE_PTYPE_L3_IPV6; + ipv6_hdr = rte_pktmbuf_mtod_offset(m, + struct rte_ipv6_hdr *, len); + if (ipv6_hdr->proto == IPPROTO_TCP) + packet_type |= RTE_PTYPE_L4_TCP; + else if (ipv6_hdr->proto == IPPROTO_UDP) + packet_type |= RTE_PTYPE_L4_UDP; + } + + if (vlan_tagged) + packet_type |= RTE_PTYPE_L2_ETHER_VLAN; + else + packet_type |= RTE_PTYPE_L2_ETHER; + + return packet_type; +} + +static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags) +{ + uint16_t val; + + /* Lookup table */ + static const uint32_t + ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = { + [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP | + RTE_PTYPE_INNER_L2_ETHER, + /* Frags with no VLAN */ + [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_FRAG | + RTE_PTYPE_INNER_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_FRAG | + RTE_PTYPE_INNER_L2_ETHER, + /* VLANs */ + [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + /* Frags with VLAN */ + [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_FRAG | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_FRAG | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + }; + + /* Bits (0..3) provides L3/L4 protocol type */ + /* Bits (4,5) provides frag and VLAN info */ + val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK << + PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) | + (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK << + PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) | + (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) | + (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK << + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags; + + if (val < QEDE_PKT_TYPE_MAX) + return ptype_lkup_tbl[val]; + + return RTE_PTYPE_UNKNOWN; +} + +static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags) +{ + uint16_t val; + + /* Lookup table */ + static const uint32_t + ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = { + [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP | + RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP | + RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP | + RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP | + RTE_PTYPE_L2_ETHER, + /* Frags with no VLAN */ + [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_FRAG | + RTE_PTYPE_L2_ETHER, + [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_FRAG | + RTE_PTYPE_L2_ETHER, + /* VLANs */ + [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP | + RTE_PTYPE_L2_ETHER_VLAN, + /* Frags with VLAN */ + [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_FRAG | + RTE_PTYPE_L2_ETHER_VLAN, + [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_FRAG | + RTE_PTYPE_L2_ETHER_VLAN, + }; + + /* Bits (0..3) provides L3/L4 protocol type */ + /* Bits (4,5) provides frag and VLAN info */ + val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK << + PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) | + (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK << + PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) | + (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) | + (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK << + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags; + + if (val < QEDE_PKT_TYPE_MAX) + return ptype_lkup_tbl[val]; + + return RTE_PTYPE_UNKNOWN; +} + +static inline uint8_t +qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag) +{ + struct rte_ipv4_hdr *ip; + uint16_t pkt_csum; + uint16_t calc_csum; + uint16_t val; + + val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag); + + if (unlikely(val)) { + m->packet_type = qede_rx_cqe_to_pkt_type(flag); + if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { + ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + pkt_csum = ip->hdr_checksum; + ip->hdr_checksum = 0; + calc_csum = rte_ipv4_cksum(ip); + ip->hdr_checksum = pkt_csum; + return (calc_csum != pkt_csum); + } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { + return 1; + } + } + return 0; +} + +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + ecore_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + +static inline void +qede_reuse_page(__rte_unused struct qede_dev *qdev, + struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons) +{ + struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); + uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + struct qede_rx_entry *curr_prod; + dma_addr_t new_mapping; + + curr_prod = &rxq->sw_rx_ring[idx]; + *curr_prod = *curr_cons; + + new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) + + curr_prod->page_offset; + + rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping)); + rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping)); + + rxq->sw_rx_prod++; +} + +static inline void +qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *qdev, uint8_t count) +{ + struct qede_rx_entry *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; + qede_reuse_page(qdev, rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + +static inline void +qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev, + struct qede_rx_queue *rxq, + uint8_t agg_index, uint16_t len) +{ + struct qede_agg_info *tpa_info; + struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */ + uint16_t cons_idx; + + /* Under certain conditions it is possible that FW may not consume + * additional or new BD. So decision to consume the BD must be made + * based on len_list[0]. + */ + if (rte_le_to_cpu_16(len)) { + tpa_info = &rxq->tpa_info[agg_index]; + cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + curr_frag = rxq->sw_rx_ring[cons_idx].mbuf; + assert(curr_frag); + curr_frag->nb_segs = 1; + curr_frag->pkt_len = rte_le_to_cpu_16(len); + curr_frag->data_len = curr_frag->pkt_len; + tpa_info->tpa_tail->next = curr_frag; + tpa_info->tpa_tail = curr_frag; + qede_rx_bd_ring_consume(rxq); + if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) { + PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n"); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + rxq->rx_alloc_errors++; + } + } +} + +static inline void +qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n", + cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0])); + /* only len_list[0] will have value */ + qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index, + cqe->len_list[0]); +} + +static inline void +qede_rx_process_tpa_end_cqe(struct qede_dev *qdev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */ + + qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index, + cqe->len_list[0]); + /* Update total length and frags based on end TPA */ + rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head; + /* TODO: Add Sanity Checks */ + rx_mb->nb_segs = cqe->num_of_bds; + rx_mb->pkt_len = cqe->total_packet_len; + + PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d" + " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason, + rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs, + rx_mb->pkt_len); +} + +static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags) +{ + uint32_t val; + + /* Lookup table */ + static const uint32_t + ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = { + [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN, + [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, + [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE, + [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE, + [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE, + [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN, + [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE, + [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE, + [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] = + RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] = + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6, + [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] = + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6, + }; + + /* Cover bits[4-0] to include tunn_type and next protocol */ + val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK << + ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) | + (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK << + ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags; + + if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE) + return ptype_tunn_lkup_tbl[val]; + else + return RTE_PTYPE_UNKNOWN; +} + +static inline int +qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, + uint8_t num_segs, uint16_t pkt_len) +{ + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + register struct rte_mbuf *seg1 = NULL; + register struct rte_mbuf *seg2 = NULL; + uint16_t sw_rx_index; + uint16_t cur_size; + + seg1 = rx_mb; + while (num_segs) { + cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : + pkt_len; + if (unlikely(!cur_size)) { + PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs" + " left for mapping jumbo\n", num_segs); + qede_recycle_rx_bd_ring(rxq, qdev, num_segs); + return -EINVAL; + } + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf; + qede_rx_bd_ring_consume(rxq); + pkt_len -= cur_size; + seg2->data_len = cur_size; + seg1->next = seg2; + seg1 = seg1->next; + num_segs--; + rxq->rx_segs++; + } + + return 0; +} + +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX +static inline void +print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq, + uint8_t bitfield) +{ + PMD_RX_LOG(INFO, rxq, + "len 0x%04x bf 0x%04x hash_val 0x%x" + " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s" + " inner_l2=%s inner_l3=%s inner_l4=%s\n", + m->data_len, bitfield, m->hash.rss, + (unsigned long)m->ol_flags, + rte_get_ptype_l2_name(m->packet_type), + rte_get_ptype_l3_name(m->packet_type), + rte_get_ptype_l4_name(m->packet_type), + rte_get_ptype_tunnel_name(m->packet_type), + rte_get_ptype_inner_l2_name(m->packet_type), + rte_get_ptype_inner_l3_name(m->packet_type), + rte_get_ptype_inner_l4_name(m->packet_type)); +} +#endif + +uint16_t +qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL; + register struct rte_mbuf *rx_mb = NULL; + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = &qdev->edev; + union eth_rx_cqe *cqe; + uint64_t ol_flags; + enum eth_rx_cqe_type cqe_type; + int rss_enable = qdev->rss_enable; + int rx_alloc_count = 0; + uint32_t packet_type; + uint32_t rss_hash; + uint16_t vlan_tci, port_id; + uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds; + uint16_t rx_pkt = 0; + uint16_t pkt_len = 0; + uint16_t len; /* Length of first BD */ + uint16_t preload_idx; + uint16_t parse_flag; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + uint8_t bitfield_val; +#endif + uint8_t offset, flags, bd_num; + + + /* Allocate buffers that we used in previous loop */ + if (rxq->rx_alloc_count) { + if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, + rxq->rx_alloc_count))) { + struct rte_eth_dev *dev; + + PMD_RX_LOG(ERR, rxq, + "New buffer allocation failed," + "dropping incoming packetn"); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += + rxq->rx_alloc_count; + rxq->rx_alloc_errors += rxq->rx_alloc_count; + return 0; + } + qede_update_rx_prod(qdev, rxq); + rxq->rx_alloc_count = 0; + } + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + rte_rmb(); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + num_rx_bds = NUM_RX_BDS(rxq); + port_id = rxq->port_id; + + while (sw_comp_cons != hw_comp_cons) { + ol_flags = 0; + packet_type = RTE_PTYPE_UNKNOWN; + vlan_tci = 0; + rss_hash = 0; + + /* Get the CQE from the completion ring */ + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type); + + if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) { + fp_cqe = &cqe->fast_path_regular; + } else { + if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { + PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n"); + ecore_eth_cqe_completion + (&edev->hwfns[rxq->queue_id % + edev->num_hwfns], + (struct eth_slow_path_rx_cqe *)cqe); + } + goto next_cqe; + } + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & num_rx_bds; + rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + assert(rx_mb != NULL); + + parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags); + offset = fp_cqe->placement_offset; + len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd); + pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len); + vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); + rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash); + bd_num = fp_cqe->bd_num; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + bitfield_val = fp_cqe->bitfields; +#endif + + if (unlikely(qede_tunn_exist(parse_flag))) { + PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n"); + if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "Outer L3 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + flags = fp_cqe->tunnel_pars_flags.flags; + + /* Tunnel_type */ + packet_type = + qede_rx_cqe_to_tunn_pkt_type(flags); + + /* Inner header */ + packet_type |= + qede_rx_cqe_to_pkt_type_inner(parse_flag); + + /* Outer L3/L4 types is not available in CQE */ + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + + /* Outer L3/L4 types is not available in CQE. + * Need to add offset to parse correctly, + */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } else { + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); + } + + /* Common handling for non-tunnel packets and for inner + * headers in the case of tunnel. + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { + PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_IP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + if (unlikely(CQE_HAS_VLAN(parse_flag) || + CQE_HAS_OUTER_VLAN(parse_flag))) { + /* Note: FW doesn't indicate Q-in-Q packet */ + ol_flags |= PKT_RX_VLAN; + if (qdev->vlan_strip_flg) { + ol_flags |= PKT_RX_VLAN_STRIPPED; + rx_mb->vlan_tci = vlan_tci; + } + } + + if (rss_enable) { + ol_flags |= PKT_RX_RSS_HASH; + rx_mb->hash.rss = rss_hash; + } + + rx_alloc_count++; + qede_rx_bd_ring_consume(rxq); + + /* Prefetch next mbuf while processing current one. */ + preload_idx = rxq->sw_rx_cons & num_rx_bds; + rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + + /* Update rest of the MBUF fields */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + rx_mb->port = port_id; + rx_mb->ol_flags = ol_flags; + rx_mb->data_len = len; + rx_mb->packet_type = packet_type; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + print_rx_bd_info(rx_mb, rxq, bitfield_val); +#endif + rx_mb->nb_segs = bd_num; + rx_mb->pkt_len = pkt_len; + + rx_pkts[rx_pkt] = rx_mb; + rx_pkt++; + +next_cqe: + ecore_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + if (rx_pkt == nb_pkts) { + PMD_RX_LOG(DEBUG, rxq, + "Budget reached nb_pkts=%u received=%u", + rx_pkt, nb_pkts); + break; + } + } + + /* Request number of bufferes to be allocated in next loop */ + rxq->rx_alloc_count = rx_alloc_count; + + rxq->rcv_pkts += rx_pkt; + rxq->rx_segs += rx_pkt; + PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id()); + + return rx_pkt; +} + +uint16_t +qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = &qdev->edev; + uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index; + uint16_t rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL; + register struct rte_mbuf *rx_mb = NULL; + register struct rte_mbuf *seg1 = NULL; + enum eth_rx_cqe_type cqe_type; + uint16_t pkt_len = 0; /* Sum of all BD segments */ + uint16_t len; /* Length of first BD */ + uint8_t num_segs = 1; + uint16_t preload_idx; + uint16_t parse_flag; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + uint8_t bitfield_val; +#endif + uint8_t tunn_parse_flag; + struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa; + uint64_t ol_flags; + uint32_t packet_type; + uint16_t vlan_tci; + bool tpa_start_flg; + uint8_t offset, tpa_agg_idx, flags; + struct qede_agg_info *tpa_info = NULL; + uint32_t rss_hash; + int rx_alloc_count = 0; + + + /* Allocate buffers that we used in previous loop */ + if (rxq->rx_alloc_count) { + if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, + rxq->rx_alloc_count))) { + struct rte_eth_dev *dev; + + PMD_RX_LOG(ERR, rxq, + "New buffer allocation failed," + "dropping incoming packetn"); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += + rxq->rx_alloc_count; + rxq->rx_alloc_errors += rxq->rx_alloc_count; + return 0; + } + qede_update_rx_prod(qdev, rxq); + rxq->rx_alloc_count = 0; + } + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + rte_rmb(); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + while (sw_comp_cons != hw_comp_cons) { + ol_flags = 0; + packet_type = RTE_PTYPE_UNKNOWN; + vlan_tci = 0; + tpa_start_flg = false; + rss_hash = 0; + + /* Get the CQE from the completion ring */ + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type); + + switch (cqe_type) { + case ETH_RX_CQE_TYPE_REGULAR: + fp_cqe = &cqe->fast_path_regular; + break; + case ETH_RX_CQE_TYPE_TPA_START: + cqe_start_tpa = &cqe->fast_path_tpa_start; + tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index]; + tpa_start_flg = true; + /* Mark it as LRO packet */ + ol_flags |= PKT_RX_LRO; + /* In split mode, seg_len is same as len_on_first_bd + * and bw_ext_bd_len_list will be empty since there are + * no additional buffers + */ + PMD_RX_LOG(INFO, rxq, + "TPA start[%d] - len_on_first_bd %d header %d" + " [bd_list[0] %d], [seg_len %d]\n", + cqe_start_tpa->tpa_agg_index, + rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd), + cqe_start_tpa->header_len, + rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]), + rte_le_to_cpu_16(cqe_start_tpa->seg_len)); + + break; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_rx_process_tpa_cont_cqe(qdev, rxq, + &cqe->fast_path_tpa_cont); + goto next_cqe; + case ETH_RX_CQE_TYPE_TPA_END: + qede_rx_process_tpa_end_cqe(qdev, rxq, + &cqe->fast_path_tpa_end); + tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index; + tpa_info = &rxq->tpa_info[tpa_agg_idx]; + rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head; + goto tpa_end; + case ETH_RX_CQE_TYPE_SLOW_PATH: + PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n"); + ecore_eth_cqe_completion( + &edev->hwfns[rxq->queue_id % edev->num_hwfns], + (struct eth_slow_path_rx_cqe *)cqe); + /* fall-thru */ + default: + goto next_cqe; + } + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + assert(rx_mb != NULL); + + /* Handle regular CQE or TPA start CQE */ + if (!tpa_start_flg) { + parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags); + offset = fp_cqe->placement_offset; + len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd); + pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len); + vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); + rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash); +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + bitfield_val = fp_cqe->bitfields; +#endif + } else { + parse_flag = + rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags); + offset = cqe_start_tpa->placement_offset; + /* seg_len = len_on_first_bd */ + len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd); + vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag); +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + bitfield_val = cqe_start_tpa->bitfields; +#endif + rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash); + } + if (qede_tunn_exist(parse_flag)) { + PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n"); + if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "Outer L3 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + if (tpa_start_flg) + flags = cqe_start_tpa->tunnel_pars_flags.flags; + else + flags = fp_cqe->tunnel_pars_flags.flags; + tunn_parse_flag = flags; + + /* Tunnel_type */ + packet_type = + qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag); + + /* Inner header */ + packet_type |= + qede_rx_cqe_to_pkt_type_inner(parse_flag); + + /* Outer L3/L4 types is not available in CQE */ + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + + /* Outer L3/L4 types is not available in CQE. + * Need to add offset to parse correctly, + */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } else { + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); + } + + /* Common handling for non-tunnel packets and for inner + * headers in the case of tunnel. + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { + PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_IP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } + + if (CQE_HAS_VLAN(parse_flag) || + CQE_HAS_OUTER_VLAN(parse_flag)) { + /* Note: FW doesn't indicate Q-in-Q packet */ + ol_flags |= PKT_RX_VLAN; + if (qdev->vlan_strip_flg) { + ol_flags |= PKT_RX_VLAN_STRIPPED; + rx_mb->vlan_tci = vlan_tci; + } + } + + /* RSS Hash */ + if (qdev->rss_enable) { + ol_flags |= PKT_RX_RSS_HASH; + rx_mb->hash.rss = rss_hash; + } + + rx_alloc_count++; + qede_rx_bd_ring_consume(rxq); + + if (!tpa_start_flg && fp_cqe->bd_num > 1) { + PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs" + " len on first: %04x Total Len: %04x", + fp_cqe->bd_num, len, pkt_len); + num_segs = fp_cqe->bd_num - 1; + seg1 = rx_mb; + if (qede_process_sg_pkts(p_rxq, seg1, num_segs, + pkt_len - len)) + goto next_cqe; + + rx_alloc_count += num_segs; + rxq->rx_segs += num_segs; + } + rxq->rx_segs++; /* for the first segment */ + + /* Prefetch next mbuf while processing current one. */ + preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + + /* Update rest of the MBUF fields */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + rx_mb->port = rxq->port_id; + rx_mb->ol_flags = ol_flags; + rx_mb->data_len = len; + rx_mb->packet_type = packet_type; +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX + print_rx_bd_info(rx_mb, rxq, bitfield_val); +#endif + if (!tpa_start_flg) { + rx_mb->nb_segs = fp_cqe->bd_num; + rx_mb->pkt_len = pkt_len; + } else { + /* store ref to the updated mbuf */ + tpa_info->tpa_head = rx_mb; + tpa_info->tpa_tail = tpa_info->tpa_head; + } + rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); +tpa_end: + if (!tpa_start_flg) { + rx_pkts[rx_pkt] = rx_mb; + rx_pkt++; + } +next_cqe: + ecore_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + if (rx_pkt == nb_pkts) { + PMD_RX_LOG(DEBUG, rxq, + "Budget reached nb_pkts=%u received=%u", + rx_pkt, nb_pkts); + break; + } + } + + /* Request number of bufferes to be allocated in next loop */ + rxq->rx_alloc_count = rx_alloc_count; + + rxq->rcv_pkts += rx_pkt; + + PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id()); + + return rx_pkt; +} + +uint16_t +qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct qede_fastpath_cmt *fp_cmt = p_fp_cmt; + uint16_t eng0_pkts, eng1_pkts; + + eng0_pkts = nb_pkts / 2; + + eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts); + + eng1_pkts = nb_pkts - eng0_pkts; + + eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts, + eng1_pkts); + + return eng0_pkts + eng1_pkts; +} + +/* Populate scatter gather buffer descriptor fields */ +static inline uint16_t +qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, + struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3, + uint16_t start_seg) +{ + struct qede_tx_queue *txq = p_txq; + struct eth_tx_bd *tx_bd = NULL; + dma_addr_t mapping; + uint16_t nb_segs = 0; + + /* Check for scattered buffers */ + while (m_seg) { + if (start_seg == 0) { + if (!*bd2) { + *bd2 = (struct eth_tx_2nd_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd)); + nb_segs++; + } + mapping = rte_mbuf_data_iova(m_seg); + QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len); + PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len); + } else if (start_seg == 1) { + if (!*bd3) { + *bd3 = (struct eth_tx_3rd_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd)); + nb_segs++; + } + mapping = rte_mbuf_data_iova(m_seg); + QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len); + PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len); + } else { + tx_bd = (struct eth_tx_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(tx_bd, 0, sizeof(*tx_bd)); + nb_segs++; + mapping = rte_mbuf_data_iova(m_seg); + QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len); + PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len); + } + start_seg++; + m_seg = m_seg->next; + } + + /* Return total scattered buffers */ + return nb_segs; +} + +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX +static inline void +print_tx_bd_info(struct qede_tx_queue *txq, + struct eth_tx_1st_bd *bd1, + struct eth_tx_2nd_bd *bd2, + struct eth_tx_3rd_bd *bd3, + uint64_t tx_ol_flags) +{ + char ol_buf[256] = { 0 }; /* for verbose prints */ + + if (bd1) + PMD_TX_LOG(INFO, txq, + "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x", + rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds, + bd1->data.bd_flags.bitfields, + rte_cpu_to_le_16(bd1->data.bitfields)); + if (bd2) + PMD_TX_LOG(INFO, txq, + "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n", + rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1, + bd2->data.bitfields2, bd2->data.tunn_ip_size); + if (bd3) + PMD_TX_LOG(INFO, txq, + "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x " + "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n", + rte_cpu_to_le_16(bd3->nbytes), + rte_cpu_to_le_16(bd3->data.bitfields), + rte_cpu_to_le_16(bd3->data.lso_mss), + bd3->data.tunn_l4_hdr_start_offset_w, + bd3->data.tunn_hdr_size_w); + + rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf)); + PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf); +} +#endif + +/* TX prepare to check packets meets TX conditions */ +uint16_t +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX +qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct qede_tx_queue *txq = p_txq; +#else +qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ +#endif + uint64_t ol_flags; + struct rte_mbuf *m; + uint16_t i; +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + int ret; +#endif + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + if (ol_flags & PKT_TX_TCP_SEG) { + if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) { + rte_errno = EINVAL; + break; + } + /* TBD: confirm its ~9700B for both ? */ + if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) { + rte_errno = EINVAL; + break; + } + } else { + if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) { + rte_errno = EINVAL; + break; + } + } + if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) { + /* We support only limited tunnel protocols */ + if (ol_flags & PKT_TX_TUNNEL_MASK) { + uint64_t temp; + + temp = ol_flags & PKT_TX_TUNNEL_MASK; + if (temp == PKT_TX_TUNNEL_VXLAN || + temp == PKT_TX_TUNNEL_GENEVE || + temp == PKT_TX_TUNNEL_MPLSINUDP || + temp == PKT_TX_TUNNEL_GRE) + continue; + } + + rte_errno = ENOTSUP; + break; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + break; + } +#endif + } + +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + if (unlikely(i != nb_pkts)) + PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n", + nb_pkts - i); +#endif + return i; +} + +#define MPLSINUDP_HDR_SIZE (12) + +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX +static inline void +qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf, + struct qede_tx_queue *txq) +{ + if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff) + PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n"); + if (((mbuf->outer_l2_len + mbuf->outer_l3_len + + MPLSINUDP_HDR_SIZE) / 2) > 0xff) + PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n"); + if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) > + ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) + PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n"); + if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) > + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) + PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n"); +} +#endif + +uint16_t +qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_tx_queue *txq = p_txq; + struct qede_dev *qdev = txq->qdev; + struct ecore_dev *edev = &qdev->edev; + struct eth_tx_1st_bd *bd1; + struct eth_tx_2nd_bd *bd2; + struct eth_tx_3rd_bd *bd3; + struct rte_mbuf *m_seg = NULL; + struct rte_mbuf *mbuf; + struct qede_tx_entry *sw_tx_ring; + uint16_t nb_tx_pkts; + uint16_t bd_prod; + uint16_t idx; + uint16_t nb_frags = 0; + uint16_t nb_pkt_sent = 0; + uint8_t nbds; + uint64_t tx_ol_flags; + /* BD1 */ + uint16_t bd1_bf; + uint8_t bd1_bd_flags_bf; + + if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) { + PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u", + nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh); + qede_process_tx_compl(edev, txq); + } + + nb_tx_pkts = nb_pkts; + bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); + sw_tx_ring = txq->sw_tx_ring; + + while (nb_tx_pkts--) { + /* Init flags/values */ + nbds = 0; + bd1 = NULL; + bd2 = NULL; + bd3 = NULL; + bd1_bf = 0; + bd1_bd_flags_bf = 0; + nb_frags = 0; + + mbuf = *tx_pkts++; + assert(mbuf); + + + /* Check minimum TX BDS availability against available BDs */ + if (unlikely(txq->nb_tx_avail < mbuf->nb_segs)) + break; + + tx_ol_flags = mbuf->ol_flags; + bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + if (unlikely(txq->nb_tx_avail < + ETH_TX_MIN_BDS_PER_NON_LSO_PKT)) + break; + bd1_bf |= + (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) + << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + + /* Offload the IP checksum in the hardware */ + if (tx_ol_flags & PKT_TX_IP_CKSUM) + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + + /* L4 checksum offload (tcp or udp) */ + if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) && + (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = TX_PROD(txq); + sw_tx_ring[idx].mbuf = mbuf; + + /* BD1 */ + bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); + memset(bd1, 0, sizeof(struct eth_tx_1st_bd)); + nbds++; + + /* Map MBUF linear data for DMA and set in the BD1 */ + QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf), + mbuf->data_len); + bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf); + bd1->data.bd_flags.bitfields = bd1_bd_flags_bf; + + /* Handle fragmented MBUF */ + if (unlikely(mbuf->nb_segs > 1)) { + m_seg = mbuf->next; + + /* Encode scatter gather buffer descriptors */ + nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, + nbds - 1); + } + + bd1->data.nbds = nbds + nb_frags; + + txq->nb_tx_avail -= bd1->data.nbds; + txq->sw_tx_prod++; + bd_prod = + rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags); +#endif + nb_pkt_sent++; + txq->xmit_pkts++; + } + + /* Write value of prod idx into bd_prod */ + txq->tx_db.data.bd_prod = bd_prod; + rte_wmb(); + rte_compiler_barrier(); + DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw); + rte_wmb(); + + /* Check again for Tx completions */ + qede_process_tx_compl(edev, txq); + + PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d", + nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id()); + + return nb_pkt_sent; +} + +uint16_t +qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_tx_queue *txq = p_txq; + struct qede_dev *qdev = txq->qdev; + struct ecore_dev *edev = &qdev->edev; + struct rte_mbuf *mbuf; + struct rte_mbuf *m_seg = NULL; + uint16_t nb_tx_pkts; + uint16_t bd_prod; + uint16_t idx; + uint16_t nb_frags; + uint16_t nb_pkt_sent = 0; + uint8_t nbds; + bool lso_flg; + bool mplsoudp_flg; + __rte_unused bool tunn_flg; + bool tunn_ipv6_ext_flg; + struct eth_tx_1st_bd *bd1; + struct eth_tx_2nd_bd *bd2; + struct eth_tx_3rd_bd *bd3; + uint64_t tx_ol_flags; + uint16_t hdr_size; + /* BD1 */ + uint16_t bd1_bf; + uint8_t bd1_bd_flags_bf; + uint16_t vlan; + /* BD2 */ + uint16_t bd2_bf1; + uint16_t bd2_bf2; + /* BD3 */ + uint16_t mss; + uint16_t bd3_bf; + + uint8_t tunn_l4_hdr_start_offset; + uint8_t tunn_hdr_size; + uint8_t inner_l2_hdr_size; + uint16_t inner_l4_hdr_offset; + + if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) { + PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u", + nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh); + qede_process_tx_compl(edev, txq); + } + + nb_tx_pkts = nb_pkts; + bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); + while (nb_tx_pkts--) { + /* Init flags/values */ + tunn_flg = false; + lso_flg = false; + nbds = 0; + vlan = 0; + bd1 = NULL; + bd2 = NULL; + bd3 = NULL; + hdr_size = 0; + bd1_bf = 0; + bd1_bd_flags_bf = 0; + bd2_bf1 = 0; + bd2_bf2 = 0; + mss = 0; + bd3_bf = 0; + mplsoudp_flg = false; + tunn_ipv6_ext_flg = false; + tunn_hdr_size = 0; + tunn_l4_hdr_start_offset = 0; + + mbuf = *tx_pkts++; + assert(mbuf); + + /* Check minimum TX BDS availability against available BDs */ + if (unlikely(txq->nb_tx_avail < mbuf->nb_segs)) + break; + + tx_ol_flags = mbuf->ol_flags; + bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + + /* TX prepare would have already checked supported tunnel Tx + * offloads. Don't rely on pkt_type marked by Rx, instead use + * tx_ol_flags to decide. + */ + tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK); + + if (tunn_flg) { + /* Check against max which is Tunnel IPv6 + ext */ + if (unlikely(txq->nb_tx_avail < + ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT)) + break; + + /* First indicate its a tunnel pkt */ + bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK << + ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + /* Legacy FW had flipped behavior in regard to this bit + * i.e. it needed to set to prevent FW from touching + * encapsulated packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) { + bd1_bf ^= 1 << + ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + } + + /* Outer IP checksum offload */ + if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM | + PKT_TX_OUTER_IPV4)) { + bd1_bd_flags_bf |= + ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK << + ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + } + + /** + * Currently, only inner checksum offload in MPLS-in-UDP + * tunnel with one MPLS label is supported. Both outer + * and inner layers lengths need to be provided in + * mbuf. + */ + if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) == + PKT_TX_TUNNEL_MPLSINUDP) { + mplsoudp_flg = true; +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + qede_mpls_tunn_tx_sanity_check(mbuf, txq); +#endif + /* Outer L4 offset in two byte words */ + tunn_l4_hdr_start_offset = + (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2; + /* Tunnel header size in two byte words */ + tunn_hdr_size = (mbuf->outer_l2_len + + mbuf->outer_l3_len + + MPLSINUDP_HDR_SIZE) / 2; + /* Inner L2 header size in two byte words */ + inner_l2_hdr_size = (mbuf->l2_len - + MPLSINUDP_HDR_SIZE) / 2; + /* Inner L4 header offset from the beggining + * of inner packet in two byte words + */ + inner_l4_hdr_offset = (mbuf->l2_len - + MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2; + + /* Inner L2 size and address type */ + bd2_bf1 |= (inner_l2_hdr_size & + ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) << + ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT; + bd2_bf1 |= (UNICAST_ADDRESS & + ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) << + ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT; + /* Treated as IPv6+Ext */ + bd2_bf1 |= + 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT; + + /* Mark inner IPv6 if present */ + if (tx_ol_flags & PKT_TX_IPV6) + bd2_bf1 |= + 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT; + + /* Inner L4 offsets */ + if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) && + (tx_ol_flags & (PKT_TX_UDP_CKSUM | + PKT_TX_TCP_CKSUM))) { + /* Determines if BD3 is needed */ + tunn_ipv6_ext_flg = true; + if ((tx_ol_flags & PKT_TX_L4_MASK) == + PKT_TX_UDP_CKSUM) { + bd2_bf1 |= + 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + } + + /* TODO other pseudo checksum modes are + * not supported + */ + bd2_bf1 |= + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT; + bd2_bf2 |= (inner_l4_hdr_offset & + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) << + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; + } + } /* End MPLSoUDP */ + } /* End Tunnel handling */ + + if (tx_ol_flags & PKT_TX_TCP_SEG) { + lso_flg = true; + if (unlikely(txq->nb_tx_avail < + ETH_TX_MIN_BDS_PER_LSO_PKT)) + break; + /* For LSO, packet header and payload must reside on + * buffers pointed by different BDs. Using BD1 for HDR + * and BD2 onwards for data. + */ + hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len; + if (tunn_flg) + hdr_size += mbuf->outer_l2_len + + mbuf->outer_l3_len; + + bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT; + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */ + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + mss = rte_cpu_to_le_16(mbuf->tso_segsz); + /* Using one header BD */ + bd3_bf |= rte_cpu_to_le_16(1 << + ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + } else { + if (unlikely(txq->nb_tx_avail < + ETH_TX_MIN_BDS_PER_NON_LSO_PKT)) + break; + bd1_bf |= + (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) + << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; + } + + /* Descriptor based VLAN insertion */ + if (tx_ol_flags & PKT_TX_VLAN_PKT) { + vlan = rte_cpu_to_le_16(mbuf->vlan_tci); + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Offload the IP checksum in the hardware */ + if (tx_ol_flags & PKT_TX_IP_CKSUM) { + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + /* There's no DPDK flag to request outer-L4 csum + * offload. But in the case of tunnel if inner L3 or L4 + * csum offload is requested then we need to force + * recalculation of L4 tunnel header csum also. + */ + if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) != + PKT_TX_TUNNEL_GRE)) { + bd1_bd_flags_bf |= + ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK << + ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + } + } + + /* L4 checksum offload (tcp or udp) */ + if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) && + (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) { + bd1_bd_flags_bf |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + /* There's no DPDK flag to request outer-L4 csum + * offload. But in the case of tunnel if inner L3 or L4 + * csum offload is requested then we need to force + * recalculation of L4 tunnel header csum also. + */ + if (tunn_flg) { + bd1_bd_flags_bf |= + ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK << + ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; + } + } + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = TX_PROD(txq); + txq->sw_tx_ring[idx].mbuf = mbuf; + + /* BD1 */ + bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); + memset(bd1, 0, sizeof(struct eth_tx_1st_bd)); + nbds++; + + /* Map MBUF linear data for DMA and set in the BD1 */ + QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf), + mbuf->data_len); + bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf); + bd1->data.bd_flags.bitfields = bd1_bd_flags_bf; + bd1->data.vlan = vlan; + + if (lso_flg || mplsoudp_flg) { + bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce + (&txq->tx_pbl); + memset(bd2, 0, sizeof(struct eth_tx_2nd_bd)); + nbds++; + + /* BD1 */ + QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf), + hdr_size); + /* BD2 */ + QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size + + rte_mbuf_data_iova(mbuf)), + mbuf->data_len - hdr_size); + bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1); + if (mplsoudp_flg) { + bd2->data.bitfields2 = + rte_cpu_to_le_16(bd2_bf2); + /* Outer L3 size */ + bd2->data.tunn_ip_size = + rte_cpu_to_le_16(mbuf->outer_l3_len); + } + /* BD3 */ + if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) { + bd3 = (struct eth_tx_3rd_bd *) + ecore_chain_produce(&txq->tx_pbl); + memset(bd3, 0, sizeof(struct eth_tx_3rd_bd)); + nbds++; + bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf); + if (lso_flg) + bd3->data.lso_mss = mss; + if (mplsoudp_flg) { + bd3->data.tunn_l4_hdr_start_offset_w = + tunn_l4_hdr_start_offset; + bd3->data.tunn_hdr_size_w = + tunn_hdr_size; + } + } + } + + /* Handle fragmented MBUF */ + m_seg = mbuf->next; + + /* Encode scatter gather buffer descriptors if required */ + nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1); + bd1->data.nbds = nbds + nb_frags; + + txq->nb_tx_avail -= bd1->data.nbds; + txq->sw_tx_prod++; + bd_prod = + rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX + print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags); +#endif + nb_pkt_sent++; + txq->xmit_pkts++; + } + + /* Write value of prod idx into bd_prod */ + txq->tx_db.data.bd_prod = bd_prod; + rte_wmb(); + rte_compiler_barrier(); + DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw); + rte_wmb(); + + /* Check again for Tx completions */ + qede_process_tx_compl(edev, txq); + + PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d", + nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id()); + + return nb_pkt_sent; +} + +uint16_t +qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_fastpath_cmt *fp_cmt = p_fp_cmt; + uint16_t eng0_pkts, eng1_pkts; + + eng0_pkts = nb_pkts / 2; + + eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts); + + eng1_pkts = nb_pkts - eng0_pkts; + + eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts, + eng1_pkts); + + return eng0_pkts + eng1_pkts; +} + +uint16_t +qede_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + + +/* this function does a fake walk through over completion queue + * to calculate number of BDs used by HW. + * At the end, it restores the state of completion queue. + */ +static uint16_t +qede_parse_fp_cqe(struct qede_rx_queue *rxq) +{ + uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0; + union eth_rx_cqe *cqe, *orig_cqe = NULL; + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + orig_cqe = cqe; + + while (sw_comp_cons != hw_comp_cons) { + switch (cqe->fast_path_regular.type) { + case ETH_RX_CQE_TYPE_REGULAR: + bd_count += cqe->fast_path_regular.bd_num; + break; + case ETH_RX_CQE_TYPE_TPA_END: + bd_count += cqe->fast_path_tpa_end.num_of_bds; + break; + default: + break; + } + + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + } + + /* revert comp_ring to original state */ + ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe); + + return bd_count; +} + +int +qede_rx_descriptor_status(void *p_rxq, uint16_t offset) +{ + uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod; + uint16_t produced, consumed; + struct qede_rx_queue *rxq = p_rxq; + + if (offset > rxq->nb_rx_desc) + return -EINVAL; + + sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring); + sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); + + /* find BDs used by HW from completion queue elements */ + hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq); + + if (hw_bd_cons < sw_bd_cons) + /* wraparound case */ + consumed = (0xffff - sw_bd_cons) + hw_bd_cons; + else + consumed = hw_bd_cons - sw_bd_cons; + + if (offset <= consumed) + return RTE_ETH_RX_DESC_DONE; + + if (sw_bd_prod < sw_bd_cons) + /* wraparound case */ + produced = (0xffff - sw_bd_cons) + sw_bd_prod; + else + produced = sw_bd_prod - sw_bd_cons; + + if (offset <= produced) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} diff --git a/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h new file mode 100644 index 000000000..d7ff870b2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/qede_rxtx.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + + +#ifndef _QEDE_RXTX_H_ +#define _QEDE_RXTX_H_ + +#include "qede_ethdev.h" + +/* Ring Descriptors */ +#define RX_RING_SIZE_POW 16 /* 64K */ +#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW) +#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) +#define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX +#define NUM_RX_BDS(q) (q->nb_rx_desc - 1) + +#define TX_RING_SIZE_POW 16 /* 64K */ +#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW) +#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) +#define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define NUM_TX_BDS(q) (q->nb_tx_desc - 1) + +#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq)) +#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq)) + +#define QEDE_DEFAULT_TX_FREE_THRESH 32 + +#define QEDE_CSUM_ERROR (1 << 0) +#define QEDE_CSUM_UNNECESSARY (1 << 1) +#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2) + +#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \ + do { \ + (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \ + (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \ + (bd)->nbytes = rte_cpu_to_le_16(len); \ + } while (0) + +#define CQE_HAS_VLAN(flags) \ + ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \ + << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) + +#define CQE_HAS_OUTER_VLAN(flags) \ + ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \ + << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT)) + +#define QEDE_MIN_RX_BUFF_SIZE (1024) +#define QEDE_VLAN_TAG_SIZE (4) +#define QEDE_LLC_SNAP_HDR_LEN (8) + +/* Max supported alignment is 256 (8 shift) + * minimal alignment shift 6 is optimal for 57xxx HW performance + */ +#define QEDE_L1_CACHE_SHIFT 6 +#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT))) +#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT) +#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \ + ~(QEDE_FW_RX_ALIGN_END - 1)) +#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \ + QEDE_FW_RX_ALIGN_END) + +/* Note: QEDE_LLC_SNAP_HDR_LEN is optional, + * +2 is for padding in front of L2 header + */ +#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \ + + (QEDE_LLC_SNAP_HDR_LEN) + 2) + +#define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD) + +#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\ + ETH_RSS_NONFRAG_IPV4_TCP |\ + ETH_RSS_NONFRAG_IPV4_UDP |\ + ETH_RSS_IPV6 |\ + ETH_RSS_NONFRAG_IPV6_TCP |\ + ETH_RSS_NONFRAG_IPV6_UDP |\ + ETH_RSS_VXLAN |\ + ETH_RSS_GENEVE) + +#define QEDE_RXTX_MAX(qdev) \ + (RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues)) + +/* Macros for non-tunnel packet types lkup table */ +#define QEDE_PKT_TYPE_UNKNOWN 0x0 +#define QEDE_PKT_TYPE_MAX 0x3f + +#define QEDE_PKT_TYPE_IPV4 0x1 +#define QEDE_PKT_TYPE_IPV6 0x2 +#define QEDE_PKT_TYPE_IPV4_TCP 0x5 +#define QEDE_PKT_TYPE_IPV6_TCP 0x6 +#define QEDE_PKT_TYPE_IPV4_UDP 0x9 +#define QEDE_PKT_TYPE_IPV6_UDP 0xa + +/* For frag pkts, corresponding IP bits is set */ +#define QEDE_PKT_TYPE_IPV4_FRAG 0x11 +#define QEDE_PKT_TYPE_IPV6_FRAG 0x12 + +#define QEDE_PKT_TYPE_IPV4_VLAN 0x21 +#define QEDE_PKT_TYPE_IPV6_VLAN 0x22 +#define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25 +#define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26 +#define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29 +#define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a + +#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31 +#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32 + +/* Macros for tunneled packets with next protocol lkup table */ +#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1 +#define QEDE_PKT_TYPE_TUNN_GRE 0x2 +#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3 + +/* Bit 2 is don't care bit */ +#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9 +#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa +#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb + +#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd +#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe +#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf + + +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11 +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12 +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13 + +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15 +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16 +#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17 + + +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19 +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b + +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e +#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f + +#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */ + +#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_CKSUM | \ + PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_IPV4 | \ + PKT_TX_IPV6) + +#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_TUNNEL_MASK) + +#define QEDE_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK) + +/* + * RX BD descriptor ring + */ +struct qede_rx_entry { + struct rte_mbuf *mbuf; + uint32_t page_offset; + /* allows expansion .. */ +}; + +/* TPA related structures */ +struct qede_agg_info { + struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */ + struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */ +}; + +/* + * Structure associated with each RX queue. + */ +struct qede_rx_queue { + /* Always keep qdev as first member */ + struct qede_dev *qdev; + struct rte_mempool *mb_pool; + struct ecore_chain rx_bd_ring; + struct ecore_chain rx_comp_ring; + uint16_t *hw_cons_ptr; + void OSAL_IOMEM *hw_rxq_prod_addr; + struct qede_rx_entry *sw_rx_ring; + struct ecore_sb_info *sb_info; + uint16_t sw_rx_cons; + uint16_t sw_rx_prod; + uint16_t nb_rx_desc; + uint16_t queue_id; + uint16_t port_id; + uint16_t rx_buf_size; + uint16_t rx_alloc_count; + uint16_t unused; + uint64_t rcv_pkts; + uint64_t rx_segs; + uint64_t rx_hw_errors; + uint64_t rx_alloc_errors; + struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; + void *handle; +}; + +/* + * TX BD descriptor ring + */ +struct qede_tx_entry { + struct rte_mbuf *mbuf; + uint8_t flags; +}; + +union db_prod { + struct eth_db_data data; + uint32_t raw; +}; + +struct qede_tx_queue { + /* Always keep qdev as first member */ + struct qede_dev *qdev; + struct ecore_chain tx_pbl; + struct qede_tx_entry *sw_tx_ring; + uint16_t nb_tx_desc; + uint16_t nb_tx_avail; + uint16_t tx_free_thresh; + uint16_t queue_id; + uint16_t *hw_cons_ptr; + uint16_t sw_tx_cons; + uint16_t sw_tx_prod; + void OSAL_IOMEM *doorbell_addr; + volatile union db_prod tx_db; + uint16_t port_id; + uint64_t xmit_pkts; + bool is_legacy; + void *handle; +}; + +struct qede_fastpath { + struct ecore_sb_info *sb_info; + struct qede_rx_queue *rxq; + struct qede_tx_queue *txq; +}; + +/* This structure holds the inforation of fast path queues + * belonging to individual engines in CMT mode. + */ +struct qede_fastpath_cmt { + /* Always keep this a first element */ + struct qede_dev *qdev; + /* fastpath info of engine 0 */ + struct qede_fastpath *fp0; + /* fastpath info of engine 1 */ + struct qede_fastpath *fp1; +}; + +/* + * RX/TX function prototypes + */ +int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); + +int qede_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +void qede_rx_queue_release(void *rx_queue); + +void qede_tx_queue_release(void *tx_queue); + +uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t +qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t qede_rxtx_pkts_dummy(void *p_rxq, + struct rte_mbuf **pkts, + uint16_t nb_pkts); + +int qede_start_queues(struct rte_eth_dev *eth_dev); + +void qede_stop_queues(struct rte_eth_dev *eth_dev); +int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz, + uint16_t max_frame_size); +int +qede_rx_descriptor_status(void *rxq, uint16_t offset); + +/* Fastpath resource alloc/dealloc helpers */ +int qede_alloc_fp_resc(struct qede_dev *qdev); + +void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev); + +#endif /* _QEDE_RXTX_H_ */ diff --git a/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map b/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/qede/rte_pmd_qede_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/ring/Makefile b/src/spdk/dpdk/drivers/net/ring/Makefile new file mode 100644 index 000000000..d6a3dec35 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ring/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ring.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_ring_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_ring.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/ring/meson.build b/src/spdk/dpdk/drivers/net/ring/meson.build new file mode 100644 index 000000000..e877a4b4b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ring/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = files('rte_eth_ring.c') +install_headers('rte_eth_ring.h') diff --git a/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c new file mode 100644 index 000000000..f0fafa0c0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c @@ -0,0 +1,711 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#include "rte_eth_ring.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction" +#define ETH_RING_ACTION_CREATE "CREATE" +#define ETH_RING_ACTION_ATTACH "ATTACH" +#define ETH_RING_INTERNAL_ARG "internal" + +static const char *valid_arguments[] = { + ETH_RING_NUMA_NODE_ACTION_ARG, + ETH_RING_INTERNAL_ARG, + NULL +}; + +struct ring_internal_args { + struct rte_ring * const *rx_queues; + const unsigned int nb_rx_queues; + struct rte_ring * const *tx_queues; + const unsigned int nb_tx_queues; + const unsigned int numa_node; + void *addr; /* self addr for sanity check */ +}; + +enum dev_action { + DEV_CREATE, + DEV_ATTACH +}; + +struct ring_queue { + struct rte_ring *rng; + rte_atomic64_t rx_pkts; + rte_atomic64_t tx_pkts; +}; + +struct pmd_internals { + unsigned int max_rx_queues; + unsigned int max_tx_queues; + + struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS]; + struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS]; + + struct rte_ether_addr address; + enum dev_action action; +}; + +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_FIXED, +}; + +static int eth_ring_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_ring_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +static uint16_t +eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + void **ptrs = (void *)&bufs[0]; + struct ring_queue *r = q; + const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, + ptrs, nb_bufs, NULL); + if (r->rng->flags & RING_F_SC_DEQ) + r->rx_pkts.cnt += nb_rx; + else + rte_atomic64_add(&(r->rx_pkts), nb_rx); + return nb_rx; +} + +static uint16_t +eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + void **ptrs = (void *)&bufs[0]; + struct ring_queue *r = q; + const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, + ptrs, nb_bufs, NULL); + if (r->rng->flags & RING_F_SP_ENQ) + r->tx_pkts.cnt += nb_tx; + else + rte_atomic64_add(&(r->tx_pkts), nb_tx); + return nb_tx; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; } + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_set_link_down(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; + return 0; +} + +static int +eth_dev_set_link_up(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; + return 0; +} + + +static int +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; + dev_info->min_rx_bufsize = 0; + + return 0; +} + +static int +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned int i; + unsigned long rx_total = 0, tx_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_rx_queues; i++) { + stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt; + rx_total += stats->q_ipackets[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_tx_queues; i++) { + stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt; + tx_total += stats->q_opackets[i]; + } + + stats->ipackets = rx_total; + stats->opackets = tx_total; + + return 0; +} + +static int +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + internal->rx_ring_queues[i].rx_pkts.cnt = 0; + for (i = 0; i < dev->data->nb_tx_queues; i++) + internal->tx_ring_queues[i].tx_pkts.cnt = 0; + + return 0; +} + +static void +eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, + uint32_t index __rte_unused) +{ +} + +static int +eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, + struct rte_ether_addr *mac_addr __rte_unused, + uint32_t index __rte_unused, + uint32_t vmdq __rte_unused) +{ + return 0; +} + +static void +eth_queue_release(void *q __rte_unused) { ; } +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) { return 0; } + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_set_link_up = eth_dev_set_link_up, + .dev_set_link_down = eth_dev_set_link_down, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, + .mac_addr_remove = eth_mac_addr_remove, + .mac_addr_add = eth_mac_addr_add, +}; + +static int +do_eth_dev_ring_create(const char *name, + struct rte_vdev_device *vdev, + struct rte_ring * const rx_queues[], + const unsigned int nb_rx_queues, + struct rte_ring *const tx_queues[], + const unsigned int nb_tx_queues, + const unsigned int numa_node, enum dev_action action, + struct rte_eth_dev **eth_dev_p) +{ + struct rte_eth_dev_data *data = NULL; + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + void **rx_queues_local = NULL; + void **tx_queues_local = NULL; + unsigned int i; + + PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", + numa_node); + + rx_queues_local = rte_calloc_socket(name, nb_rx_queues, + sizeof(void *), 0, numa_node); + if (rx_queues_local == NULL) { + rte_errno = ENOMEM; + goto error; + } + + tx_queues_local = rte_calloc_socket(name, nb_tx_queues, + sizeof(void *), 0, numa_node); + if (tx_queues_local == NULL) { + rte_errno = ENOMEM; + goto error; + } + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); + if (internals == NULL) { + rte_errno = ENOMEM; + goto error; + } + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + rte_errno = ENOSPC; + goto error; + } + + /* now put it all together + * - store EAL device in eth_dev, + * - store queue data in internals, + * - store numa_node info in eth_dev_data + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + + eth_dev->device = &vdev->device; + + data = eth_dev->data; + data->rx_queues = rx_queues_local; + data->tx_queues = tx_queues_local; + + internals->action = action; + internals->max_rx_queues = nb_rx_queues; + internals->max_tx_queues = nb_tx_queues; + for (i = 0; i < nb_rx_queues; i++) { + internals->rx_ring_queues[i].rng = rx_queues[i]; + data->rx_queues[i] = &internals->rx_ring_queues[i]; + } + for (i = 0; i < nb_tx_queues; i++) { + internals->tx_ring_queues[i].rng = tx_queues[i]; + data->tx_queues[i] = &internals->tx_ring_queues[i]; + } + + data->dev_private = internals; + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = &internals->address; + data->promiscuous = 1; + data->all_multicast = 1; + + eth_dev->dev_ops = &ops; + data->kdrv = RTE_KDRV_NONE; + data->numa_node = numa_node; + + /* finally assign rx and tx ops */ + eth_dev->rx_pkt_burst = eth_ring_rx; + eth_dev->tx_pkt_burst = eth_ring_tx; + + rte_eth_dev_probing_finish(eth_dev); + *eth_dev_p = eth_dev; + + return data->port_id; + +error: + rte_free(rx_queues_local); + rte_free(tx_queues_local); + rte_free(internals); + + return -1; +} + +int +rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], + const unsigned int nb_rx_queues, + struct rte_ring *const tx_queues[], + const unsigned int nb_tx_queues, + const unsigned int numa_node) +{ + struct ring_internal_args args = { + .rx_queues = rx_queues, + .nb_rx_queues = nb_rx_queues, + .tx_queues = tx_queues, + .nb_tx_queues = nb_tx_queues, + .numa_node = numa_node, + .addr = &args, + }; + char args_str[32]; + char ring_name[RTE_RING_NAMESIZE]; + uint16_t port_id = RTE_MAX_ETHPORTS; + int ret; + + /* do some parameter checking */ + if (rx_queues == NULL && nb_rx_queues > 0) { + rte_errno = EINVAL; + return -1; + } + if (tx_queues == NULL && nb_tx_queues > 0) { + rte_errno = EINVAL; + return -1; + } + if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) { + rte_errno = EINVAL; + return -1; + } + + snprintf(args_str, sizeof(args_str), "%s=%p", + ETH_RING_INTERNAL_ARG, &args); + + ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name); + if (ret >= (int)sizeof(ring_name)) { + rte_errno = ENAMETOOLONG; + return -1; + } + + ret = rte_vdev_init(ring_name, args_str); + if (ret) { + rte_errno = EINVAL; + return -1; + } + + ret = rte_eth_dev_get_port_by_name(ring_name, &port_id); + if (ret) { + rte_errno = ENODEV; + return -1; + } + + return port_id; +} + +int +rte_eth_from_ring(struct rte_ring *r) +{ + return rte_eth_from_rings(r->name, &r, 1, &r, 1, + r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY); +} + +static int +eth_dev_ring_create(const char *name, + struct rte_vdev_device *vdev, + const unsigned int numa_node, + enum dev_action action, struct rte_eth_dev **eth_dev) +{ + /* rx and tx are so-called from point of view of first port. + * They are inverted from the point of view of second port + */ + struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS]; + unsigned int i; + char rng_name[RTE_RING_NAMESIZE]; + unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS, + RTE_PMD_RING_MAX_TX_RINGS); + + for (i = 0; i < num_rings; i++) { + int cc; + + cc = snprintf(rng_name, sizeof(rng_name), + "ETH_RXTX%u_%s", i, name); + if (cc >= (int)sizeof(rng_name)) { + rte_errno = ENAMETOOLONG; + return -1; + } + + rxtx[i] = (action == DEV_CREATE) ? + rte_ring_create(rng_name, 1024, numa_node, + RING_F_SP_ENQ|RING_F_SC_DEQ) : + rte_ring_lookup(rng_name); + if (rxtx[i] == NULL) + return -1; + } + + if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings, + numa_node, action, eth_dev) < 0) + return -1; + + return 0; +} + +struct node_action_pair { + char name[PATH_MAX]; + unsigned int node; + enum dev_action action; +}; + +struct node_action_list { + unsigned int total; + unsigned int count; + struct node_action_pair *list; +}; + +static int parse_kvlist(const char *key __rte_unused, + const char *value, void *data) +{ + struct node_action_list *info = data; + int ret; + char *name; + char *action; + char *node; + char *end; + + name = strdup(value); + + ret = -EINVAL; + + if (!name) { + PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); + goto out; + } + + node = strchr(name, ':'); + if (!node) { + PMD_LOG(WARNING, "could not parse node value from %s", + name); + goto out; + } + + *node = '\0'; + node++; + + action = strchr(node, ':'); + if (!action) { + PMD_LOG(WARNING, "could not parse action value from %s", + node); + goto out; + } + + *action = '\0'; + action++; + + /* + * Need to do some sanity checking here + */ + + if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0) + info->list[info->count].action = DEV_ATTACH; + else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0) + info->list[info->count].action = DEV_CREATE; + else + goto out; + + errno = 0; + info->list[info->count].node = strtol(node, &end, 10); + + if ((errno != 0) || (*end != '\0')) { + PMD_LOG(WARNING, + "node value %s is unparseable as a number", node); + goto out; + } + + strlcpy(info->list[info->count].name, name, + sizeof(info->list[info->count].name)); + + info->count++; + + ret = 0; +out: + free(name); + return ret; +} + +static int +parse_internal_args(const char *key __rte_unused, const char *value, + void *data) +{ + struct ring_internal_args **internal_args = data; + void *args; + + sscanf(value, "%p", &args); + + *internal_args = args; + + if ((*internal_args)->addr != args) + return -1; + + return 0; +} + +static int +rte_pmd_ring_probe(struct rte_vdev_device *dev) +{ + const char *name, *params; + struct rte_kvargs *kvlist = NULL; + int ret = 0; + struct node_action_list *info = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct ring_internal_args *internal_args; + + name = rte_vdev_device_name(dev); + params = rte_vdev_device_args(dev); + + PMD_LOG(INFO, "Initializing pmd_ring for %s", name); + + if (params == NULL || params[0] == '\0') { + ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE, + ð_dev); + if (ret == -1) { + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); + ret = eth_dev_ring_create(name, dev, rte_socket_id(), + DEV_ATTACH, ð_dev); + } + } else { + kvlist = rte_kvargs_parse(params, valid_arguments); + + if (!kvlist) { + PMD_LOG(INFO, + "Ignoring unsupported parameters when creatingrings-backed ethernet device"); + ret = eth_dev_ring_create(name, dev, rte_socket_id(), + DEV_CREATE, ð_dev); + if (ret == -1) { + PMD_LOG(INFO, + "Attach to pmd_ring for %s", + name); + ret = eth_dev_ring_create(name, dev, rte_socket_id(), + DEV_ATTACH, ð_dev); + } + + return ret; + } + + if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) { + ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG, + parse_internal_args, + &internal_args); + if (ret < 0) + goto out_free; + + ret = do_eth_dev_ring_create(name, dev, + internal_args->rx_queues, + internal_args->nb_rx_queues, + internal_args->tx_queues, + internal_args->nb_tx_queues, + internal_args->numa_node, + DEV_ATTACH, + ð_dev); + if (ret >= 0) + ret = 0; + } else { + ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG); + info = rte_zmalloc("struct node_action_list", + sizeof(struct node_action_list) + + (sizeof(struct node_action_pair) * ret), + 0); + if (!info) + goto out_free; + + info->total = ret; + info->list = (struct node_action_pair *)(info + 1); + + ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG, + parse_kvlist, info); + + if (ret < 0) + goto out_free; + + for (info->count = 0; info->count < info->total; info->count++) { + ret = eth_dev_ring_create(info->list[info->count].name, + dev, + info->list[info->count].node, + info->list[info->count].action, + ð_dev); + if ((ret == -1) && + (info->list[info->count].action == DEV_CREATE)) { + PMD_LOG(INFO, + "Attach to pmd_ring for %s", + name); + ret = eth_dev_ring_create(name, dev, + info->list[info->count].node, + DEV_ATTACH, + ð_dev); + } + } + } + } + +out_free: + rte_kvargs_free(kvlist); + rte_free(info); + return ret; +} + +static int +rte_pmd_ring_remove(struct rte_vdev_device *dev) +{ + const char *name = rte_vdev_device_name(dev); + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals = NULL; + struct ring_queue *r = NULL; + uint16_t i; + + PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); + + if (name == NULL) + return -EINVAL; + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + eth_dev_stop(eth_dev); + + internals = eth_dev->data->dev_private; + if (internals->action == DEV_CREATE) { + /* + * it is only necessary to delete the rings in rx_queues because + * they are the same used in tx_queues + */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + r = eth_dev->data->rx_queues[i]; + rte_ring_free(r->rng); + } + } + + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + rte_eth_dev_release_port(eth_dev); + return 0; +} + +static struct rte_vdev_driver pmd_ring_drv = { + .probe = rte_pmd_ring_probe, + .remove = rte_pmd_ring_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); +RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); +RTE_PMD_REGISTER_PARAM_STRING(net_ring, + ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); + +RTE_INIT(eth_ring_init_log) +{ + eth_ring_logtype = rte_log_register("pmd.net.ring"); + if (eth_ring_logtype >= 0) + rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h new file mode 100644 index 000000000..59e074d0a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_ETH_RING_H_ +#define _RTE_ETH_RING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Create a new ethdev port from a set of rings + * + * @param name + * name to be given to the new ethdev port + * @param rx_queues + * pointer to array of rte_rings to be used as RX queues + * @param nb_rx_queues + * number of elements in the rx_queues array + * @param tx_queues + * pointer to array of rte_rings to be used as TX queues + * @param nb_tx_queues + * number of elements in the tx_queues array + * @param numa_node + * the numa node on which the memory for this port is to be allocated + * @return + * the port number of the newly created the ethdev or -1 on error. + */ +int rte_eth_from_rings(const char *name, + struct rte_ring * const rx_queues[], + const unsigned nb_rx_queues, + struct rte_ring *const tx_queues[], + const unsigned nb_tx_queues, + const unsigned numa_node); + +/** + * Create a new ethdev port from a ring + * + * This function is a shortcut call for rte_eth_from_rings for the + * case where one wants to take a single rte_ring and use it as though + * it were an ethdev + * + * @param ring + * the ring to be used as an ethdev + * @return + * the port number of the newly created ethdev, or -1 on error + */ +int rte_eth_from_ring(struct rte_ring *r); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map b/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map new file mode 100644 index 000000000..ebb6be273 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/ring/rte_pmd_ring_version.map @@ -0,0 +1,8 @@ +DPDK_20.0 { + global: + + rte_eth_from_ring; + rte_eth_from_rings; + + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/Makefile b/src/spdk/dpdk/drivers/net/sfc/Makefile new file mode 100644 index 000000000..20bf34381 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/Makefile @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: BSD-3-Clause +# +# Copyright(c) 2019-2020 Xilinx, Inc. +# Copyright(c) 2016-2019 Solarflare Communications Inc. +# +# This software was jointly developed between OKTET Labs (under contract +# for Solarflare) and Solarflare Communications, Inc. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_sfc_efx.a + +CFLAGS += -I$(SRCDIR)/base/ +CFLAGS += -I$(SRCDIR) +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts +CFLAGS += -Wno-strict-aliasing + +# Enable extra warnings +CFLAGS += -Wextra + +# More warnings not enabled by above aggregators +CFLAGS += -Wdisabled-optimization + +# Extra CFLAGS for base driver files +CFLAGS_BASE_DRIVER += -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +# Compiler and version dependent flags +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS += -Waggregate-return +CFLAGS += -Wnested-externs +CFLAGS_BASE_DRIVER += -Wno-empty-body +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +CFLAGS += -Waggregate-return +CFLAGS += -Wbad-function-cast +CFLAGS_BASE_DRIVER += -Wno-empty-body +else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +# Suppress ICC false positive warning on 'bulk' may be used before its +# value is set +CFLAGS_sfc_ef10_tx.o += -diag-disable 3656 +endif +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci -lrte_pci + +# +# List of base driver object files for which +# special CFLAGS above should be applied +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) +$(foreach obj, $(BASE_DRIVER_OBJS), \ + $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +EXPORT_MAP := rte_pmd_sfc_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_kvargs.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_mcdi.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ev.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_port.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c + +VPATH += $(SRCDIR)/base + +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_bootcfg.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_crc32.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_ev.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_evb.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_hash.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_lic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mcdi.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mon.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nvram.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_proxy.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tunnel.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mcdi.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nvram.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_sram.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_evb.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nvram.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_proxy.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/sfc/base/README b/src/spdk/dpdk/drivers/net/sfc/base/README new file mode 100644 index 000000000..645aa62e8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/README @@ -0,0 +1,16 @@ + SPDX-License-Identifier: BSD-3-Clause + + Copyright(c) 2019-2020 Xilinx, Inc. + Copyright(c) 2006-2019 Solarflare Communications Inc. + +Solarflare libefx driver library +================================ + +This directory contains source code of Solarflare Communications libefx +driver library of version v4.10.0.1012. + +Updating +======== + +The source code in this directory should not be modified. +Please contact the driver maintainers to request changes. diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c new file mode 100644 index 000000000..f0a135ed5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c @@ -0,0 +1,1476 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#if EFSYS_OPT_MON_STATS +#include "mcdi_mon.h" +#endif + +#if EFX_OPTS_EF10() + +/* + * Non-interrupting event queue requires interrrupting event queue to + * refer to for wake-up events even if wake ups are never used. + * It could be even non-allocated event queue. + */ +#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) + +static __checkReturn boolean_t +ef10_ev_rx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + +static __checkReturn boolean_t +ef10_ev_tx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + +static __checkReturn boolean_t +ef10_ev_driver( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + +static __checkReturn boolean_t +ef10_ev_drv_gen( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + +static __checkReturn boolean_t +ef10_ev_mcdi( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + + +static __checkReturn efx_rc_t +efx_mcdi_set_evq_tmr( + __in efx_nic_t *enp, + __in uint32_t instance, + __in uint32_t mode, + __in uint32_t timer_ns) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN, + MC_CMD_SET_EVQ_TMR_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_EVQ_TMR; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; + + MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); + MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); + MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); + MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_init_evq( + __in efx_nic_t *enp, + __in unsigned int instance, + __in efsys_mem_t *esmp, + __in size_t nevs, + __in uint32_t irq, + __in uint32_t us, + __in uint32_t flags, + __in boolean_t low_latency) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_INIT_EVQ_IN_LEN(EF10_EVQ_MAXNBUFS), + MC_CMD_INIT_EVQ_OUT_LEN); + efx_qword_t *dma_addr; + uint64_t addr; + int npages; + int i; + boolean_t interrupting; + int ev_cut_through; + efx_rc_t rc; + + npages = efx_evq_nbufs(enp, nevs); + if (npages > EF10_EVQ_MAXNBUFS) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_INIT_EVQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; + + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); + + interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == + EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); + + /* + * On Huntington RX and TX event batching can only be requested together + * (even if the datapath firmware doesn't actually support RX + * batching). If event cut through is enabled no RX batching will occur. + * + * So always enable RX and TX event batching, and enable event cut + * through if we want low latency operation. + */ + switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { + case EFX_EVQ_FLAGS_TYPE_AUTO: + ev_cut_through = low_latency ? 1 : 0; + break; + case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: + ev_cut_through = 0; + break; + case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: + ev_cut_through = 1; + break; + default: + rc = EINVAL; + goto fail2; + } + MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting, + INIT_EVQ_IN_FLAG_RPTR_DOS, 0, + INIT_EVQ_IN_FLAG_INT_ARMD, 0, + INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1); + + /* If the value is zero then disable the timer */ + if (us == 0) { + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); + } else { + unsigned int ticks; + + if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) + goto fail3; + + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); + } + + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); + MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); + + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); + addr = EFSYS_MEM_ADDR(esmp); + + for (i = 0; i < npages; i++) { + EFX_POPULATE_QWORD_2(*dma_addr, + EFX_DWORD_1, (uint32_t)(addr >> 32), + EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); + + dma_addr++; + addr += EFX_BUF_SIZE; + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail4; + } + + if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { + rc = EMSGSIZE; + goto fail5; + } + + /* NOTE: ignore the returned IRQ param as firmware does not set it. */ + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +static __checkReturn efx_rc_t +efx_mcdi_init_evq_v2( + __in efx_nic_t *enp, + __in unsigned int instance, + __in efsys_mem_t *esmp, + __in size_t nevs, + __in uint32_t irq, + __in uint32_t us, + __in uint32_t flags) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS), + MC_CMD_INIT_EVQ_V2_OUT_LEN); + boolean_t interrupting; + unsigned int evq_type; + efx_qword_t *dma_addr; + uint64_t addr; + int npages; + int i; + efx_rc_t rc; + + npages = efx_evq_nbufs(enp, nevs); + if (npages > EF10_EVQ_MAXNBUFS) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_INIT_EVQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; + + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); + + interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == + EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); + + switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { + case EFX_EVQ_FLAGS_TYPE_AUTO: + evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; + break; + case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: + evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; + break; + case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: + evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; + break; + default: + rc = EINVAL; + goto fail2; + } + MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, + INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, + INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, + INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, + INIT_EVQ_V2_IN_FLAG_TYPE, evq_type); + + /* If the value is zero then disable the timer */ + if (us == 0) { + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, + MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); + } else { + unsigned int ticks; + + if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) + goto fail3; + + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, + MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); + } + + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); + MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); + + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); + addr = EFSYS_MEM_ADDR(esmp); + + for (i = 0; i < npages; i++) { + EFX_POPULATE_QWORD_2(*dma_addr, + EFX_DWORD_1, (uint32_t)(addr >> 32), + EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); + + dma_addr++; + addr += EFX_BUF_SIZE; + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail4; + } + + if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { + rc = EMSGSIZE; + goto fail5; + } + + /* NOTE: ignore the returned IRQ param as firmware does not set it. */ + + EFSYS_PROBE1(mcdi_evq_flags, uint32_t, + MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_fini_evq( + __in efx_nic_t *enp, + __in uint32_t instance) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, + MC_CMD_FINI_EVQ_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FINI_EVQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; + + MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the EVQ has already been destroyed. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + + __checkReturn efx_rc_t +ef10_ev_init( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) + return (0); +} + + void +ef10_ev_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + __checkReturn efx_rc_t +ef10_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __in efx_evq_t *eep) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t irq; + efx_rc_t rc; + + _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ + + if (index >= encp->enc_evq_limit) { + rc = EINVAL; + goto fail1; + } + + if (us > encp->enc_evq_timer_max_us) { + rc = EINVAL; + goto fail2; + } + + /* + * NO_CONT_EV mode is only requested from the firmware when creating + * receive queues, but here it needs to be specified at event queue + * creation, as the event handler needs to know which format is in use. + * + * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this + * event queue will be created in NO_CONT_EV mode. + * + * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode". + */ + if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) { + if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) { + rc = EINVAL; + goto fail3; + } + } + + /* Set up the handler table */ + eep->ee_rx = ef10_ev_rx; + eep->ee_tx = ef10_ev_tx; + eep->ee_driver = ef10_ev_driver; + eep->ee_drv_gen = ef10_ev_drv_gen; + eep->ee_mcdi = ef10_ev_mcdi; + + /* Set up the event queue */ + /* INIT_EVQ expects function-relative vector number */ + if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == + EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { + irq = index; + } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { + irq = index; + flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | + EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; + } else { + irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; + } + + /* + * Interrupts may be raised for events immediately after the queue is + * created. See bug58606. + */ + + if (encp->enc_init_evq_v2_supported) { + /* + * On Medford the low latency license is required to enable RX + * and event cut through and to disable RX batching. If event + * queue type in flags is auto, we let the firmware decide the + * settings to use. If the adapter has a low latency license, + * it will choose the best settings for low latency, otherwise + * it will choose the best settings for throughput. + */ + rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us, + flags); + if (rc != 0) + goto fail4; + } else { + /* + * On Huntington we need to specify the settings to use. + * If event queue type in flags is auto, we favour throughput + * if the adapter is running virtualization supporting firmware + * (i.e. the full featured firmware variant) + * and latency otherwise. The Ethernet Virtual Bridging + * capability is used to make this decision. (Note though that + * the low latency firmware variant is also best for + * throughput and corresponding type should be specified + * to choose it.) + */ + boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; + rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, + low_latency); + if (rc != 0) + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_ev_qdestroy( + __in efx_evq_t *eep) +{ + efx_nic_t *enp = eep->ee_enp; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + (void) efx_mcdi_fini_evq(enp, eep->ee_index); +} + + __checkReturn efx_rc_t +ef10_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count) +{ + efx_nic_t *enp = eep->ee_enp; + uint32_t rptr; + efx_dword_t dword; + + rptr = count & eep->ee_mask; + + if (enp->en_nic_cfg.enc_bug35388_workaround) { + EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS > + (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); + EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS < + (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); + + EFX_POPULATE_DWORD_2(dword, + ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, + ERF_DD_EVQ_IND_RPTR, + (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + &dword, B_FALSE); + + EFX_POPULATE_DWORD_2(dword, + ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, + ERF_DD_EVQ_IND_RPTR, + rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + &dword, B_FALSE); + } else { + EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, + &dword, B_FALSE); + } + + return (0); +} + +static __checkReturn efx_rc_t +efx_mcdi_driver_event( + __in efx_nic_t *enp, + __in uint32_t evq, + __in efx_qword_t data) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN, + MC_CMD_DRIVER_EVENT_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_DRIVER_EVENT; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; + + MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); + + MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, + EFX_QWORD_FIELD(data, EFX_DWORD_0)); + MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, + EFX_QWORD_FIELD(data, EFX_DWORD_1)); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data) +{ + efx_nic_t *enp = eep->ee_enp; + efx_qword_t event; + + EFX_POPULATE_QWORD_3(event, + ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, + ESF_DZ_DRV_SUB_CODE, 0, + ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); + + (void) efx_mcdi_driver_event(enp, eep->ee_index, event); +} + + __checkReturn efx_rc_t +ef10_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us) +{ + efx_nic_t *enp = eep->ee_enp; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_dword_t dword; + uint32_t mode; + efx_rc_t rc; + + /* Check that hardware and MCDI use the same timer MODE values */ + EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == + MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); + EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == + MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); + EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == + MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); + EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == + MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); + + if (us > encp->enc_evq_timer_max_us) { + rc = EINVAL; + goto fail1; + } + + /* If the value is zero then disable the timer */ + if (us == 0) { + mode = FFE_CZ_TIMER_MODE_DIS; + } else { + mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; + } + + if (encp->enc_bug61265_workaround) { + uint32_t ns = us * 1000; + + rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); + if (rc != 0) + goto fail2; + } else { + unsigned int ticks; + + if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) + goto fail3; + + if (encp->enc_bug35388_workaround) { + EFX_POPULATE_DWORD_3(dword, + ERF_DD_EVQ_IND_TIMER_FLAGS, + EFE_DD_EVQ_IND_TIMER_FLAGS, + ERF_DD_EVQ_IND_TIMER_MODE, mode, + ERF_DD_EVQ_IND_TIMER_VAL, ticks); + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, + eep->ee_index, &dword, 0); + } else { + /* + * NOTE: The TMR_REL field introduced in Medford2 is + * ignored on earlier EF10 controllers. See bug66418 + * comment 9 for details. + */ + EFX_POPULATE_DWORD_3(dword, + ERF_DZ_TC_TIMER_MODE, mode, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, + eep->ee_index, &dword, 0); + } + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#if EFSYS_OPT_QSTATS + void +ef10_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) +{ + unsigned int id; + + for (id = 0; id < EV_NQSTATS; id++) { + efsys_stat_t *essp = &stat[id]; + + EFSYS_STAT_INCR(essp, eep->ee_stat[id]); + eep->ee_stat[id] = 0; + } +} +#endif /* EFSYS_OPT_QSTATS */ + +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + +static __checkReturn boolean_t +ef10_ev_rx_packed_stream( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t label; + uint32_t pkt_count_lbits; + uint16_t flags; + boolean_t should_abort; + efx_evq_rxq_state_t *eersp; + unsigned int pkt_count; + unsigned int current_id; + boolean_t new_buffer; + + pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); + label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); + new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); + + flags = 0; + + eersp = &eep->ee_rxq_state[label]; + + /* + * RX_DSC_PTR_LBITS has least significant bits of the global + * (not per-buffer) packet counter. It is guaranteed that + * maximum number of completed packets fits in lbits-mask. + * So, modulo lbits-mask arithmetic should be used to calculate + * packet counter increment. + */ + pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + eersp->eers_rx_stream_npackets += pkt_count; + + if (new_buffer) { + flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * If both packed stream and equal stride super-buffer + * modes are compiled in, in theory credits should be + * be maintained for packed stream only, but right now + * these modes are not distinguished in the event queue + * Rx queue state and it is OK to increment the counter + * regardless (it might be event cheaper than branching + * since neighbour structure member are updated as well). + */ + eersp->eers_rx_packed_stream_credits++; +#endif + eersp->eers_rx_read_ptr++; + } + current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; + + /* Check for errors that invalidate checksum and L3/L4 fields */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ + EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); + flags |= EFX_DISCARD; + goto deliver; + } + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { + /* Bad Ethernet frame CRC */ + EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); + flags |= EFX_DISCARD; + goto deliver; + } + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { + EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); + flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; + goto deliver; + } + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) + EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); + +deliver: + /* If we're not discarding the packet then it is ok */ + if (~flags & EFX_DISCARD) + EFX_EV_QSTAT_INCR(eep, EV_RX_OK); + + EFSYS_ASSERT(eecp->eec_rx_ps != NULL); + should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, + flags); + + return (should_abort); +} + +#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ + +static __checkReturn boolean_t +ef10_ev_rx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + efx_nic_t *enp = eep->ee_enp; + uint32_t size; + uint32_t label; + uint32_t mac_class; + uint32_t eth_tag_class; + uint32_t l3_class; + uint32_t l4_class; + uint32_t next_read_lbits; + uint16_t flags; + boolean_t cont; + boolean_t should_abort; + efx_evq_rxq_state_t *eersp; + unsigned int desc_count; + unsigned int last_used_id; + + EFX_EV_QSTAT_INCR(eep, EV_RX); + + /* Discard events after RXQ/TXQ errors, or hardware not available */ + if (enp->en_reset_flags & + (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) + return (B_FALSE); + + /* Basic packet information */ + label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); + eersp = &eep->ee_rxq_state[label]; + +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + /* + * Packed stream events are very different, + * so handle them separately + */ + if (eersp->eers_rx_packed_stream) + return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); +#endif + + size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); + cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); + next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); + eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); + mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); + l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); + + l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { + /* Drop this event */ + return (B_FALSE); + } + flags = 0; + + if (cont != 0) { + /* + * This may be part of a scattered frame, or it may be a + * truncated frame if scatter is disabled on this RXQ. + * Overlength frames can be received if e.g. a VF is configured + * for 1500 MTU but connected to a port set to 9000 MTU + * (see bug56567). + * FIXME: There is not yet any driver that supports scatter on + * Huntington. Scatter support is required for OSX. + */ + flags |= EFX_PKT_CONT; + } + + if (mac_class == ESE_DZ_MAC_CLASS_UCAST) + flags |= EFX_PKT_UNICAST; + + /* + * Increment the count of descriptors read. + * + * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but + * when scatter is disabled, there is only one descriptor per packet and + * so it can be treated the same. + * + * TODO: Support scatter in NO_CONT_EV mode. + */ + desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + eersp->eers_rx_read_ptr += desc_count; + + /* Calculate the index of the last descriptor consumed */ + last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; + + if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) { + if (desc_count > 1) + EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); + + /* Always read the length from the prefix in NO_CONT_EV mode. */ + flags |= EFX_PKT_PREFIX_LEN; + + /* + * Check for an aborted scatter, signalled by the ABORT bit in + * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV + * mode was added as it was broken in Huntington silicon. + */ + if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) { + flags |= EFX_DISCARD; + goto deliver; + } + } else if (desc_count > 1) { + /* + * FIXME: add error checking to make sure this a batched event. + * This could also be an aborted scatter, see Bug36629. + */ + EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); + flags |= EFX_PKT_PREFIX_LEN; + } + + /* Check for errors that invalidate checksum and L3/L4 fields */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ + EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); + flags |= EFX_DISCARD; + goto deliver; + } + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { + /* Bad Ethernet frame CRC */ + EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); + flags |= EFX_DISCARD; + goto deliver; + } + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { + /* + * Hardware parse failed, due to malformed headers + * or headers that are too long for the parser. + * Headers and checksums must be validated by the host. + */ + EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); + goto deliver; + } + + if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || + (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { + flags |= EFX_PKT_VLAN_TAGGED; + } + + switch (l3_class) { + case ESE_DZ_L3_CLASS_IP4: + case ESE_DZ_L3_CLASS_IP4_FRAG: + flags |= EFX_PKT_IPV4; + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { + EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); + } else { + flags |= EFX_CKSUM_IPV4; + } + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); + flags |= EFX_PKT_TCP; + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { + EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); + flags |= EFX_PKT_UDP; + } else { + EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); + } + break; + + case ESE_DZ_L3_CLASS_IP6: + case ESE_DZ_L3_CLASS_IP6_FRAG: + flags |= EFX_PKT_IPV6; + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); + flags |= EFX_PKT_TCP; + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { + EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); + flags |= EFX_PKT_UDP; + } else { + EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); + } + break; + + default: + EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); + break; + } + + if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); + } else { + flags |= EFX_CKSUM_TCPUDP; + } + } + +deliver: + /* If we're not discarding the packet then it is ok */ + if (~flags & EFX_DISCARD) + EFX_EV_QSTAT_INCR(eep, EV_RX_OK); + + EFSYS_ASSERT(eecp->eec_rx != NULL); + should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); + + return (should_abort); +} + +static __checkReturn boolean_t +ef10_ev_tx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + efx_nic_t *enp = eep->ee_enp; + uint32_t id; + uint32_t label; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_TX); + + /* Discard events after RXQ/TXQ errors, or hardware not available */ + if (enp->en_reset_flags & + (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) + return (B_FALSE); + + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { + /* Drop this event */ + return (B_FALSE); + } + + /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ + id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); + label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); + + EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); + + EFSYS_ASSERT(eecp->eec_tx != NULL); + should_abort = eecp->eec_tx(arg, label, id); + + return (should_abort); +} + +static __checkReturn boolean_t +ef10_ev_driver( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + unsigned int code; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER); + should_abort = B_FALSE; + + code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); + switch (code) { + case ESE_DZ_DRV_TIMER_EV: { + uint32_t id; + + id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); + + EFSYS_ASSERT(eecp->eec_timer != NULL); + should_abort = eecp->eec_timer(arg, id); + break; + } + + case ESE_DZ_DRV_WAKE_UP_EV: { + uint32_t id; + + id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); + + EFSYS_ASSERT(eecp->eec_wake_up != NULL); + should_abort = eecp->eec_wake_up(arg, id); + break; + } + + case ESE_DZ_DRV_START_UP_EV: + EFSYS_ASSERT(eecp->eec_initialized != NULL); + should_abort = eecp->eec_initialized(arg); + break; + + default: + EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + break; + } + + return (should_abort); +} + +static __checkReturn boolean_t +ef10_ev_drv_gen( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t data; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); + should_abort = B_FALSE; + + data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); + if (data >= ((uint32_t)1 << 16)) { + EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + + return (B_TRUE); + } + + EFSYS_ASSERT(eecp->eec_software != NULL); + should_abort = eecp->eec_software(arg, (uint16_t)data); + + return (should_abort); +} + +static __checkReturn boolean_t +ef10_ev_mcdi( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + efx_nic_t *enp = eep->ee_enp; + unsigned int code; + boolean_t should_abort = B_FALSE; + + EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); + + code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + efx_mcdi_ev_death(enp, EINTR); + break; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(enp, + MCDI_EV_FIELD(eqp, CMDDONE_SEQ), + MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), + MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); + break; + +#if EFSYS_OPT_MCDI_PROXY_AUTH + case MCDI_EVENT_CODE_PROXY_RESPONSE: + /* + * This event notifies a function that an authorization request + * has been processed. If the request was authorized then the + * function can now re-send the original MCDI request. + * See SF-113652-SW "SR-IOV Proxied Network Access Control". + */ + efx_mcdi_ev_proxy_response(enp, + MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), + MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); + break; +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + case MCDI_EVENT_CODE_PROXY_REQUEST: + efx_mcdi_ev_proxy_request(enp, + MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX)); + break; +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + + case MCDI_EVENT_CODE_LINKCHANGE: { + efx_link_mode_t link_mode; + + ef10_phy_link_ev(enp, eqp, &link_mode); + should_abort = eecp->eec_link_change(arg, link_mode); + break; + } + + case MCDI_EVENT_CODE_SENSOREVT: { +#if EFSYS_OPT_MON_STATS + efx_mon_stat_t id; + efx_mon_stat_value_t value; + efx_rc_t rc; + + /* Decode monitor stat for MCDI sensor (if supported) */ + if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { + /* Report monitor stat change */ + should_abort = eecp->eec_monitor(arg, id, value); + } else if (rc == ENOTSUP) { + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_UNKNOWN_SENSOREVT, + MCDI_EV_FIELD(eqp, DATA)); + } else { + EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ + } +#endif + break; + } + + case MCDI_EVENT_CODE_SCHEDERR: + /* Informational only */ + break; + + case MCDI_EVENT_CODE_REBOOT: + /* Falcon/Siena only (should not been seen with Huntington). */ + efx_mcdi_ev_death(enp, EIO); + break; + + case MCDI_EVENT_CODE_MC_REBOOT: + /* MC_REBOOT event is used for Huntington (EF10) and later. */ + efx_mcdi_ev_death(enp, EIO); + break; + + case MCDI_EVENT_CODE_MAC_STATS_DMA: +#if EFSYS_OPT_MAC_STATS + if (eecp->eec_mac_stats != NULL) { + eecp->eec_mac_stats(arg, + MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); + } +#endif + break; + + case MCDI_EVENT_CODE_FWALERT: { + uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); + + if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_FWALERT_SRAM, + MCDI_EV_FIELD(eqp, FWALERT_DATA)); + else + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_UNKNOWN_FWALERT, + MCDI_EV_FIELD(eqp, DATA)); + break; + } + + case MCDI_EVENT_CODE_TX_ERR: { + /* + * After a TXQ error is detected, firmware sends a TX_ERR event. + * This may be followed by TX completions (which we discard), + * and then finally by a TX_FLUSH event. Firmware destroys the + * TXQ automatically after sending the TX_FLUSH event. + */ + enp->en_reset_flags |= EFX_RESET_TXQ_ERR; + + EFSYS_PROBE2(tx_descq_err, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + + /* Inform the driver that a reset is required. */ + eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, + MCDI_EV_FIELD(eqp, TX_ERR_DATA)); + break; + } + + case MCDI_EVENT_CODE_TX_FLUSH: { + uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); + + /* + * EF10 firmware sends two TX_FLUSH events: one to the txq's + * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). + * We want to wait for all completions, so ignore the events + * with TX_FLUSH_TO_DRIVER. + */ + if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { + should_abort = B_FALSE; + break; + } + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); + + EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); + + EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); + should_abort = eecp->eec_txq_flush_done(arg, txq_index); + break; + } + + case MCDI_EVENT_CODE_RX_ERR: { + /* + * After an RXQ error is detected, firmware sends an RX_ERR + * event. This may be followed by RX events (which we discard), + * and then finally by an RX_FLUSH event. Firmware destroys the + * RXQ automatically after sending the RX_FLUSH event. + */ + enp->en_reset_flags |= EFX_RESET_RXQ_ERR; + + EFSYS_PROBE2(rx_descq_err, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + + /* Inform the driver that a reset is required. */ + eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, + MCDI_EV_FIELD(eqp, RX_ERR_DATA)); + break; + } + + case MCDI_EVENT_CODE_RX_FLUSH: { + uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); + + /* + * EF10 firmware sends two RX_FLUSH events: one to the rxq's + * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). + * We want to wait for all completions, so ignore the events + * with RX_FLUSH_TO_DRIVER. + */ + if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { + should_abort = B_FALSE; + break; + } + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); + + EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); + + EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); + should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); + break; + } + + default: + EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + break; + } + + return (should_abort); +} + + void +ef10_ev_rxlabel_init( + __in efx_evq_t *eep, + __in efx_rxq_t *erp, + __in unsigned int label, + __in efx_rxq_type_t type) +{ + efx_evq_rxq_state_t *eersp; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); + boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); +#endif + + _NOTE(ARGUNUSED(type)) + EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); + eersp = &eep->ee_rxq_state[label]; + + EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); + +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * For packed stream modes, the very first event will + * have a new buffer flag set, so it will be incremented, + * yielding the correct pointer. That results in a simpler + * code than trying to detect start-of-the-world condition + * in the event handler. + */ + eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; +#else + eersp->eers_rx_read_ptr = 0; +#endif + eersp->eers_rx_mask = erp->er_mask; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + eersp->eers_rx_stream_npackets = 0; + eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM + if (packed_stream) { + eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / + EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, + EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); + EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); + /* + * A single credit is allocated to the queue when it is started. + * It is immediately spent by the first packet which has NEW + * BUFFER flag set, though, but still we shall take into + * account, as to not wrap around the maximum number of credits + * accidentally + */ + eersp->eers_rx_packed_stream_credits--; + EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, + EFX_RX_PACKED_STREAM_MAX_CREDITS); + } +#endif +} + + void +ef10_ev_rxlabel_fini( + __in efx_evq_t *eep, + __in unsigned int label) +{ + efx_evq_rxq_state_t *eersp; + + EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); + eersp = &eep->ee_rxq_state[label]; + + EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); + + eersp->eers_rx_read_ptr = 0; + eersp->eers_rx_mask = 0; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + eersp->eers_rx_stream_npackets = 0; + eersp->eers_rx_packed_stream = B_FALSE; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM + eersp->eers_rx_packed_stream_credits = 0; +#endif +} + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_evb.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_evb.c new file mode 100644 index 000000000..aeeaa5189 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_evb.c @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_EVB + +#if EFX_OPTS_EF10() + + __checkReturn efx_rc_t +ef10_evb_init( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + return (0); +} + + void +ef10_evb_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); +} + +static __checkReturn efx_rc_t +efx_mcdi_vswitch_alloc( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id, + __in efx_vswitch_type_t vswitch_type) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VSWITCH_ALLOC_IN_LEN, + MC_CMD_VSWITCH_ALLOC_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + uint8_t ntags; + + /* Ensure EFX and MCDI use same values for vswitch types */ + EFX_STATIC_ASSERT(EFX_VSWITCH_TYPE_VLAN == + MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN); + EFX_STATIC_ASSERT(EFX_VSWITCH_TYPE_VEB == + MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB); + EFX_STATIC_ASSERT(EFX_VSWITCH_TYPE_MUX == + MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX); + + /* First try with maximum number of VLAN tags FW supports */ + ntags = 2; +retry: + req.emr_cmd = MC_CMD_VSWITCH_ALLOC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VSWITCH_ALLOC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VSWITCH_ALLOC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, vport_id); + MCDI_IN_SET_DWORD(req, VSWITCH_ALLOC_IN_TYPE, vswitch_type); + MCDI_IN_SET_DWORD(req, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, ntags); + MCDI_IN_POPULATE_DWORD_1(req, VSWITCH_ALLOC_IN_FLAGS, + VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + /* + * efx_rc_t error codes in libefx are translated from MCDI + * error codes in efx_mcdi_request_errcode. As this conversion + * is not a 1:1, here we check the specific MCDI error code. + */ + if (req.emr_err_code == MC_CMD_ERR_VLAN_LIMIT) { + /* Too many VLAN tags, retry with fewer */ + EFSYS_PROBE(vlan_limit); + ntags--; + if (ntags > 0) { + /* + * Zero the buffer before reusing it + * for another request + */ + memset(payload, 0, sizeof (payload)); + goto retry; + } + goto fail1; + } + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vswitch_free( + __in efx_nic_t *enp) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VSWITCH_FREE_IN_LEN, + MC_CMD_VSWITCH_FREE_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VSWITCH_FREE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VSWITCH_FREE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VSWITCH_FREE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, + EVB_PORT_ID_ASSIGNED); + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vport_alloc( + __in efx_nic_t *enp, + __in efx_vport_type_t vport_type, + __in uint16_t vid, + __in boolean_t vlan_restrict, + __out efx_vport_id_t *vport_idp) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_ALLOC_IN_LEN, + MC_CMD_VPORT_ALLOC_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + /* Ensure EFX and MCDI use same values for vport types */ + EFX_STATIC_ASSERT(EFX_VPORT_TYPE_NORMAL == + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL); + EFX_STATIC_ASSERT(EFX_VPORT_TYPE_EXPANSION == + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION); + EFX_STATIC_ASSERT(EFX_VPORT_TYPE_TEST == + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST); + + req.emr_cmd = MC_CMD_VPORT_ALLOC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_ALLOC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_ALLOC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, + EVB_PORT_ID_ASSIGNED); + MCDI_IN_SET_DWORD(req, VPORT_ALLOC_IN_TYPE, vport_type); + MCDI_IN_SET_DWORD(req, VPORT_ALLOC_IN_NUM_VLAN_TAGS, + (vid != EFX_FILTER_VID_UNSPEC)); + + MCDI_IN_POPULATE_DWORD_2(req, VPORT_ALLOC_IN_FLAGS, + VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0, + VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT, vlan_restrict); + + if (vid != EFX_FILTER_VID_UNSPEC) + MCDI_IN_POPULATE_DWORD_1(req, VPORT_ALLOC_IN_VLAN_TAGS, + VPORT_ALLOC_IN_VLAN_TAG_0, vid); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_VPORT_ALLOC_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *vport_idp = *MCDI_OUT2(req, uint32_t, VPORT_ALLOC_OUT_VPORT_ID); + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vport_free( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_FREE_IN_LEN, + MC_CMD_VPORT_FREE_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VPORT_FREE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_FREE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_FREE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VPORT_FREE_IN_VPORT_ID, vport_id); + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vport_mac_addr_add( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN, + MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VPORT_ADD_MAC_ADDRESS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, vport_id); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, + VPORT_ADD_MAC_ADDRESS_IN_MACADDR), addrp); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vport_mac_addr_del( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN, + MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VPORT_DEL_MAC_ADDRESS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, vport_id); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, + VPORT_DEL_MAC_ADDRESS_IN_MACADDR), addrp); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_port_assign( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id, + __in uint32_t vf_index) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_EVB_PORT_ASSIGN_IN_LEN, + MC_CMD_EVB_PORT_ASSIGN_OUT_LEN); + efx_mcdi_req_t req; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_EVB_PORT_ASSIGN; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_EVB_PORT_ASSIGN_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_EVB_PORT_ASSIGN_OUT_LEN; + + MCDI_IN_SET_DWORD(req, EVB_PORT_ASSIGN_IN_PORT_ID, vport_id); + MCDI_IN_POPULATE_DWORD_2(req, EVB_PORT_ASSIGN_IN_FUNCTION, + EVB_PORT_ASSIGN_IN_PF, encp->enc_pf, + EVB_PORT_ASSIGN_IN_VF, vf_index); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_vport_reconfigure( + __in efx_nic_t *enp, + __in efx_vport_id_t vport_id, + __in_opt uint16_t *vidp, + __in_bcount_opt(EFX_MAC_ADDR_LEN) uint8_t *addrp, + __out_opt boolean_t *fn_resetp) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_RECONFIGURE_IN_LEN, + MC_CMD_VPORT_RECONFIGURE_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + uint32_t reset_flag = 0; + + req.emr_cmd = MC_CMD_VPORT_RECONFIGURE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_RECONFIGURE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_RECONFIGURE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VPORT_RECONFIGURE_IN_VPORT_ID, vport_id); + + if (vidp != NULL) { + MCDI_IN_POPULATE_DWORD_1(req, VPORT_RECONFIGURE_IN_FLAGS, + VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS, 1); + if (*vidp != EFX_FILTER_VID_UNSPEC) { + MCDI_IN_SET_DWORD(req, + VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS, 1); + MCDI_IN_POPULATE_DWORD_1(req, + VPORT_RECONFIGURE_IN_VLAN_TAGS, + VPORT_RECONFIGURE_IN_VLAN_TAG_0, *vidp); + } + } + + if ((addrp != NULL) && (efx_is_zero_eth_addr(addrp) == B_FALSE)) { + MCDI_IN_POPULATE_DWORD_1(req, VPORT_RECONFIGURE_IN_FLAGS, + VPORT_RECONFIGURE_IN_REPLACE_MACADDRS, 1); + MCDI_IN_SET_DWORD(req, VPORT_RECONFIGURE_IN_NUM_MACADDRS, 1); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, + VPORT_RECONFIGURE_IN_MACADDRS), addrp); + } + + efx_mcdi_execute(enp, &req); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_VPORT_RECONFIGURE_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + reset_flag = MCDI_OUT_DWORD_FIELD(req, VPORT_RECONFIGURE_OUT_FLAGS, + VPORT_RECONFIGURE_OUT_RESET_DONE); + + if (fn_resetp != NULL) + *fn_resetp = (reset_flag != 0); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_evb_vswitch_alloc( + __in efx_nic_t *enp, + __out efx_vswitch_id_t *vswitch_idp) +{ + efx_rc_t rc; + if (vswitch_idp == NULL) { + rc = EINVAL; + goto fail1; + } + + if ((rc = efx_mcdi_vswitch_alloc(enp, EVB_PORT_ID_ASSIGNED, + EFX_VSWITCH_TYPE_VEB)) != 0) { + goto fail2; + } + + *vswitch_idp = EFX_DEFAULT_VSWITCH_ID; + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_evb_vswitch_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vswitch_free(enp)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_alloc( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_type_t vport_type, + __in uint16_t vid, + __in boolean_t vlan_restrict, + __out efx_vport_id_t *vport_idp) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vport_alloc(enp, + vport_type, vid, + vlan_restrict, vport_idp)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vport_free(enp, vport_id)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_mac_addr_add( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp) +{ + _NOTE(ARGUNUSED(vswitch_id)) + EFSYS_ASSERT(addrp != NULL); + + return (efx_mcdi_vport_mac_addr_add(enp, vport_id, addrp)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_mac_addr_del( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp) +{ + _NOTE(ARGUNUSED(vswitch_id)) + EFSYS_ASSERT(addrp != NULL); + + return (efx_mcdi_vport_mac_addr_del(enp, vport_id, addrp)); +} + + __checkReturn efx_rc_t +ef10_evb_vadaptor_alloc( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vadaptor_alloc(enp, vport_id)); +} + + __checkReturn efx_rc_t +ef10_evb_vadaptor_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vadaptor_free(enp, vport_id)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_assign( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in uint32_t vf_index) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_port_assign(enp, vport_id, vf_index)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_reconfigure( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_opt uint16_t *vidp, + __in_bcount_opt(EFX_MAC_ADDR_LEN) uint8_t *addrp, + __out_opt boolean_t *fn_resetp) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_vport_reconfigure(enp, vport_id, vidp, + addrp, fn_resetp)); +} + + __checkReturn efx_rc_t +ef10_evb_vport_stats( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in efsys_mem_t *esmp) +{ + _NOTE(ARGUNUSED(vswitch_id)) + + return (efx_mcdi_mac_stats(enp, vport_id, esmp, + EFX_STATS_UPLOAD, 0)); +} + +#endif /* EFX_OPTS_EF10() */ +#endif /* EFSYS_OPT_EVB */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c new file mode 100644 index 000000000..12c84a564 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_filter.c @@ -0,0 +1,2141 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFX_OPTS_EF10() + +#if EFSYS_OPT_FILTER + +#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec) + +static efx_filter_spec_t * +ef10_filter_entry_spec( + __in const ef10_filter_table_t *eftp, + __in unsigned int index) +{ + return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) & + ~(uintptr_t)EFX_EF10_FILTER_FLAGS)); +} + +static boolean_t +ef10_filter_entry_is_busy( + __in const ef10_filter_table_t *eftp, + __in unsigned int index) +{ + if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY) + return (B_TRUE); + else + return (B_FALSE); +} + +static boolean_t +ef10_filter_entry_is_auto_old( + __in const ef10_filter_table_t *eftp, + __in unsigned int index) +{ + if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD) + return (B_TRUE); + else + return (B_FALSE); +} + +static void +ef10_filter_set_entry( + __inout ef10_filter_table_t *eftp, + __in unsigned int index, + __in_opt const efx_filter_spec_t *efsp) +{ + EFE_SPEC(eftp, index) = (uintptr_t)efsp; +} + +static void +ef10_filter_set_entry_busy( + __inout ef10_filter_table_t *eftp, + __in unsigned int index) +{ + EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; +} + +static void +ef10_filter_set_entry_not_busy( + __inout ef10_filter_table_t *eftp, + __in unsigned int index) +{ + EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; +} + +static void +ef10_filter_set_entry_auto_old( + __inout ef10_filter_table_t *eftp, + __in unsigned int index) +{ + EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); + EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; +} + +static void +ef10_filter_set_entry_not_auto_old( + __inout ef10_filter_table_t *eftp, + __in unsigned int index) +{ + EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; + EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); +} + + __checkReturn efx_rc_t +ef10_filter_init( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + ef10_filter_table_t *eftp; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + +#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST)); + EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST)); +#undef MATCH_MASK + + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp); + + if (!eftp) { + rc = ENOMEM; + goto fail1; + } + + enp->en_filter.ef_ef10_filter_table = eftp; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_filter_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (enp->en_filter.ef_ef10_filter_table != NULL) { + EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), + enp->en_filter.ef_ef10_filter_table); + } +} + +static __checkReturn efx_rc_t +efx_mcdi_filter_op_add( + __in efx_nic_t *enp, + __in efx_filter_spec_t *spec, + __in unsigned int filter_op, + __inout ef10_filter_handle_t *handle) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_V3_IN_LEN, + MC_CMD_FILTER_OP_EXT_OUT_LEN); + efx_filter_match_flags_t match_flags; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FILTER_OP; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; + + /* + * Remove match flag for encapsulated filters that does not correspond + * to the MCDI match flags + */ + match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE; + + switch (filter_op) { + case MC_CMD_FILTER_OP_IN_OP_REPLACE: + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, + handle->efh_lo); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, + handle->efh_hi); + /* Fall through */ + case MC_CMD_FILTER_OP_IN_OP_INSERT: + case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op); + break; + default: + EFSYS_ASSERT(0); + rc = EINVAL; + goto fail1; + } + + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, enp->en_vport_id); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, + match_flags); + if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP); + } else { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, + spec->efs_dmaq_id); + } + +#if EFSYS_OPT_RX_SCALE + if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { + uint32_t rss_context; + + if (spec->efs_rss_context == EFX_RSS_CONTEXT_DEFAULT) + rss_context = enp->en_rss_context; + else + rss_context = spec->efs_rss_context; + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT, + rss_context); + } +#endif + + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE, + spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? + MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS : + MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST, + MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT); + + if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { + /* + * NOTE: Unlike most MCDI requests, the filter fields + * are presented in network (big endian) byte order. + */ + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC), + spec->efs_rem_mac, EFX_MAC_ADDR_LEN); + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC), + spec->efs_loc_mac, EFX_MAC_ADDR_LEN); + + MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT, + __CPU_TO_BE_16(spec->efs_rem_port)); + MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT, + __CPU_TO_BE_16(spec->efs_loc_port)); + + MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE, + __CPU_TO_BE_16(spec->efs_ether_type)); + + MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN, + __CPU_TO_BE_16(spec->efs_inner_vid)); + MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN, + __CPU_TO_BE_16(spec->efs_outer_vid)); + + /* IP protocol (in low byte, high byte is zero) */ + MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO, + spec->efs_ip_proto); + + EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == + MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); + EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == + MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP), + &spec->efs_rem_host.eo_byte[0], + MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP), + &spec->efs_loc_host.eo_byte[0], + MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); + + /* + * On Medford, filters for encapsulated packets match based on + * the ether type and IP protocol in the outer frame. In + * addition we need to fill in the VNI or VSID type field. + */ + switch (spec->efs_encap_type) { + case EFX_TUNNEL_PROTOCOL_NONE: + break; + case EFX_TUNNEL_PROTOCOL_VXLAN: + case EFX_TUNNEL_PROTOCOL_GENEVE: + MCDI_IN_POPULATE_DWORD_1(req, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ? + MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN : + MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE); + break; + case EFX_TUNNEL_PROTOCOL_NVGRE: + MCDI_IN_POPULATE_DWORD_1(req, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VSID_TYPE, + MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE); + break; + default: + EFSYS_ASSERT(0); + rc = EINVAL; + goto fail2; + } + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID), + spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN); + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC), + spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN); + } + + /* + * Set the "MARK" or "FLAG" action for all packets matching this filter + * if necessary (only useful with equal stride packed stream Rx mode + * which provide the information in pseudo-header). + * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest. + */ + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) { + rc = EINVAL; + goto fail3; + } + if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK); + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE, + spec->efs_mark); + } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG); + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail4; + } + + if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { + rc = EMSGSIZE; + goto fail5; + } + + handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); + handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI); + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); + +} + +static __checkReturn efx_rc_t +efx_mcdi_filter_op_delete( + __in efx_nic_t *enp, + __in unsigned int filter_op, + __inout ef10_filter_handle_t *handle) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_EXT_IN_LEN, + MC_CMD_FILTER_OP_EXT_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FILTER_OP; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; + + switch (filter_op) { + case MC_CMD_FILTER_OP_IN_OP_REMOVE: + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REMOVE); + break; + case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + break; + default: + EFSYS_ASSERT(0); + rc = EINVAL; + goto fail1; + } + + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn boolean_t +ef10_filter_equal( + __in const efx_filter_spec_t *left, + __in const efx_filter_spec_t *right) +{ + /* FIXME: Consider rx vs tx filters (look at efs_flags) */ + if (left->efs_match_flags != right->efs_match_flags) + return (B_FALSE); + if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) + return (B_FALSE); + if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) + return (B_FALSE); + if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) + return (B_FALSE); + if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) + return (B_FALSE); + if (left->efs_rem_port != right->efs_rem_port) + return (B_FALSE); + if (left->efs_loc_port != right->efs_loc_port) + return (B_FALSE); + if (left->efs_inner_vid != right->efs_inner_vid) + return (B_FALSE); + if (left->efs_outer_vid != right->efs_outer_vid) + return (B_FALSE); + if (left->efs_ether_type != right->efs_ether_type) + return (B_FALSE); + if (left->efs_ip_proto != right->efs_ip_proto) + return (B_FALSE); + if (left->efs_encap_type != right->efs_encap_type) + return (B_FALSE); + if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid, + EFX_VNI_OR_VSID_LEN)) + return (B_FALSE); + if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac, + EFX_MAC_ADDR_LEN)) + return (B_FALSE); + + return (B_TRUE); + +} + +static __checkReturn boolean_t +ef10_filter_same_dest( + __in const efx_filter_spec_t *left, + __in const efx_filter_spec_t *right) +{ + if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && + (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { + if (left->efs_rss_context == right->efs_rss_context) + return (B_TRUE); + } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && + (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { + if (left->efs_dmaq_id == right->efs_dmaq_id) + return (B_TRUE); + } + return (B_FALSE); +} + +static __checkReturn uint32_t +ef10_filter_hash( + __in efx_filter_spec_t *spec) +{ + EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) + == 0); + EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % + sizeof (uint32_t)) == 0); + + /* + * As the area of the efx_filter_spec_t we need to hash is DWORD + * aligned and an exact number of DWORDs in size we can use the + * optimised efx_hash_dwords() rather than efx_hash_bytes() + */ + return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, + (sizeof (efx_filter_spec_t) - + EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / + sizeof (uint32_t), 0)); +} + +/* + * Decide whether a filter should be exclusive or else should allow + * delivery to additional recipients. Currently we decide that + * filters for specific local unicast MAC and IP addresses are + * exclusive. + */ +static __checkReturn boolean_t +ef10_filter_is_exclusive( + __in efx_filter_spec_t *spec) +{ + if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && + !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) + return (B_TRUE); + + if ((spec->efs_match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && + ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) + return (B_TRUE); + if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && + (spec->efs_loc_host.eo_u8[0] != 0xff)) + return (B_TRUE); + } + + return (B_FALSE); +} + + __checkReturn efx_rc_t +ef10_filter_restore( + __in efx_nic_t *enp) +{ + int tbl_id; + efx_filter_spec_t *spec; + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + boolean_t restoring; + efsys_lock_state_t state; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { + + EFSYS_LOCK(enp->en_eslp, state); + + spec = ef10_filter_entry_spec(eftp, tbl_id); + if (spec == NULL) { + restoring = B_FALSE; + } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) { + /* Ignore busy entries. */ + restoring = B_FALSE; + } else { + ef10_filter_set_entry_busy(eftp, tbl_id); + restoring = B_TRUE; + } + + EFSYS_UNLOCK(enp->en_eslp, state); + + if (restoring == B_FALSE) + continue; + + if (ef10_filter_is_exclusive(spec)) { + rc = efx_mcdi_filter_op_add(enp, spec, + MC_CMD_FILTER_OP_IN_OP_INSERT, + &eftp->eft_entry[tbl_id].efe_handle); + } else { + rc = efx_mcdi_filter_op_add(enp, spec, + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, + &eftp->eft_entry[tbl_id].efe_handle); + } + + if (rc != 0) + goto fail1; + + EFSYS_LOCK(enp->en_eslp, state); + + ef10_filter_set_entry_not_busy(eftp, tbl_id); + + EFSYS_UNLOCK(enp->en_eslp, state); + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +enum ef10_filter_add_action_e { + /* Insert a new filter */ + EF10_FILTER_ADD_NEW, + /* + * Replace old filter with a new, overriding the old one + * if it has lower priority. + */ + EF10_FILTER_ADD_REPLACE, + /* Store new, lower priority filter as overridden by old filter */ + EF10_FILTER_ADD_STORE, + /* Special case for AUTO filters, remove AUTO_OLD flag */ + EF10_FILTER_ADD_REFRESH, +}; + +static __checkReturn efx_rc_t +ef10_filter_add_lookup_equal_spec( + __in efx_filter_spec_t *spec, + __in efx_filter_spec_t *probe_spec, + __in efx_filter_replacement_policy_t policy, + __out boolean_t *found) +{ + efx_rc_t rc; + + /* Refreshing AUTO filter */ + if (spec->efs_priority == EFX_FILTER_PRI_AUTO && + probe_spec->efs_priority == EFX_FILTER_PRI_AUTO) { + *found = B_TRUE; + return (0); + } + + /* + * With exclusive filters, higher priority ones + * override lower priority ones, and lower priority + * ones are stored in case the higher priority one + * is removed. + */ + if (ef10_filter_is_exclusive(spec)) { + switch (policy) { + case EFX_FILTER_REPLACEMENT_HIGHER_OR_EQUAL_PRIORITY: + if (spec->efs_priority == probe_spec->efs_priority) { + *found = B_TRUE; + break; + } + /* Fall-through */ + case EFX_FILTER_REPLACEMENT_HIGHER_PRIORITY: + if (spec->efs_priority > probe_spec->efs_priority) { + *found = B_TRUE; + break; + } + /* Fall-through */ + case EFX_FILTER_REPLACEMENT_NEVER: + /* + * Lower priority filter needs to be + * stored. It does *not* replace the + * old one. That is why EEXIST is not + * returned in that case. + */ + if (spec->efs_priority < probe_spec->efs_priority) { + *found = B_TRUE; + break; + } else { + rc = EEXIST; + goto fail1; + } + default: + EFSYS_ASSERT(0); + rc = EEXIST; + goto fail2; + } + } else { + *found = B_FALSE; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +static void +ef10_filter_add_select_action( + __in efx_filter_spec_t *saved_spec, + __in efx_filter_spec_t *spec, + __out enum ef10_filter_add_action_e *action, + __out efx_filter_spec_t **overridden_spec) +{ + efx_filter_spec_t *overridden = NULL; + + if (saved_spec == NULL) { + *action = EF10_FILTER_ADD_NEW; + } else if (ef10_filter_is_exclusive(spec) == B_FALSE) { + /* + * Non-exclusive filters are always stored in separate entries + * in the table. The only case involving a saved spec is + * refreshing an AUTO filter. + */ + EFSYS_ASSERT(saved_spec->efs_overridden_spec == NULL); + EFSYS_ASSERT(spec->efs_priority == EFX_FILTER_PRI_AUTO); + EFSYS_ASSERT(saved_spec->efs_priority == EFX_FILTER_PRI_AUTO); + *action = EF10_FILTER_ADD_REFRESH; + } else { + /* Exclusive filters stored in the same entry */ + if (spec->efs_priority > saved_spec->efs_priority) { + /* + * Insert a high priority filter over a lower priority + * one. Only two priority levels are implemented, so + * there must not already be an overridden filter. + */ + EFX_STATIC_ASSERT(EFX_FILTER_NPRI == 2); + EFSYS_ASSERT(saved_spec->efs_overridden_spec == NULL); + overridden = saved_spec; + *action = EF10_FILTER_ADD_REPLACE; + } else if (spec->efs_priority == saved_spec->efs_priority) { + /* Replace in-place or refresh an existing filter */ + if (spec->efs_priority == EFX_FILTER_PRI_AUTO) + *action = EF10_FILTER_ADD_REFRESH; + else + *action = EF10_FILTER_ADD_REPLACE; + } else { + /* + * Insert a lower priority filter, storing it in case + * the higher priority filter is removed. + * + * Currently there are only two priority levels, so this + * must be an AUTO filter. + */ + EFX_STATIC_ASSERT(EFX_FILTER_NPRI == 2); + EFSYS_ASSERT(spec->efs_priority == EFX_FILTER_PRI_AUTO); + if (saved_spec->efs_overridden_spec != NULL) { + *action = EF10_FILTER_ADD_REFRESH; + } else { + overridden = spec; + *action = EF10_FILTER_ADD_STORE; + } + } + } + + *overridden_spec = overridden; +} + +static __checkReturn efx_rc_t +ef10_filter_add_execute_action( + __in efx_nic_t *enp, + __in efx_filter_spec_t *saved_spec, + __in efx_filter_spec_t *spec, + __in efx_filter_spec_t *overridden_spec, + __in enum ef10_filter_add_action_e action, + __in int ins_index) +{ + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + efsys_lock_state_t state; + efx_rc_t rc; + + EFSYS_LOCK(enp->en_eslp, state); + + if (action == EF10_FILTER_ADD_REFRESH) { + ef10_filter_set_entry_not_auto_old(eftp, ins_index); + goto out_unlock; + } else if (action == EF10_FILTER_ADD_STORE) { + EFSYS_ASSERT(overridden_spec != NULL); + saved_spec->efs_overridden_spec = overridden_spec; + goto out_unlock; + } + + EFSYS_UNLOCK(enp->en_eslp, state); + + switch (action) { + case EF10_FILTER_ADD_REPLACE: + /* + * On replacing the filter handle may change after a + * successful replace operation. + */ + rc = efx_mcdi_filter_op_add(enp, spec, + MC_CMD_FILTER_OP_IN_OP_REPLACE, + &eftp->eft_entry[ins_index].efe_handle); + break; + case EF10_FILTER_ADD_NEW: + if (ef10_filter_is_exclusive(spec)) { + rc = efx_mcdi_filter_op_add(enp, spec, + MC_CMD_FILTER_OP_IN_OP_INSERT, + &eftp->eft_entry[ins_index].efe_handle); + } else { + rc = efx_mcdi_filter_op_add(enp, spec, + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, + &eftp->eft_entry[ins_index].efe_handle); + } + break; + default: + rc = EINVAL; + EFSYS_ASSERT(0); + break; + } + if (rc != 0) + goto fail1; + + EFSYS_LOCK(enp->en_eslp, state); + + if (action == EF10_FILTER_ADD_REPLACE) { + /* Update the fields that may differ */ + saved_spec->efs_priority = spec->efs_priority; + saved_spec->efs_flags = spec->efs_flags; + saved_spec->efs_rss_context = spec->efs_rss_context; + saved_spec->efs_dmaq_id = spec->efs_dmaq_id; + + if (overridden_spec != NULL) + saved_spec->efs_overridden_spec = overridden_spec; + } + +out_unlock: + EFSYS_UNLOCK(enp->en_eslp, state); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * An arbitrary search limit for the software hash table. As per the linux net + * driver. + */ +#define EF10_FILTER_SEARCH_LIMIT 200 + +static __checkReturn efx_rc_t +ef10_filter_add_internal( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec, + __in efx_filter_replacement_policy_t policy, + __out_opt uint32_t *filter_id) +{ + efx_rc_t rc; + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + enum ef10_filter_add_action_e action; + efx_filter_spec_t *overridden_spec = NULL; + efx_filter_spec_t *saved_spec; + uint32_t hash; + unsigned int depth; + int ins_index; + efsys_lock_state_t state; + boolean_t locked = B_FALSE; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + EFSYS_ASSERT(spec->efs_overridden_spec == NULL); + + hash = ef10_filter_hash(spec); + + /* + * FIXME: Add support for inserting filters of different priorities + * and removing lower priority multicast filters (bug 42378) + */ + + /* + * Find any existing filters with the same match tuple or + * else a free slot to insert at. If any of them are busy, + * we have to wait and retry. + */ +retry: + EFSYS_LOCK(enp->en_eslp, state); + locked = B_TRUE; + + ins_index = -1; + + for (depth = 1; depth <= EF10_FILTER_SEARCH_LIMIT; depth++) { + unsigned int probe_index; + efx_filter_spec_t *probe_spec; + + probe_index = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); + probe_spec = ef10_filter_entry_spec(eftp, probe_index); + + if (probe_spec == NULL) { + if (ins_index < 0) + ins_index = probe_index; + } else if (ef10_filter_equal(spec, probe_spec)) { + boolean_t found; + + if (ef10_filter_entry_is_busy(eftp, probe_index)) { + EFSYS_UNLOCK(enp->en_eslp, state); + locked = B_FALSE; + goto retry; + } + + rc = ef10_filter_add_lookup_equal_spec(spec, + probe_spec, policy, &found); + if (rc != 0) + goto fail1; + + if (found != B_FALSE) { + ins_index = probe_index; + break; + } + } + } + + /* + * Once we reach the maximum search depth, use the first suitable slot + * or return EBUSY if there was none. + */ + if (ins_index < 0) { + rc = EBUSY; + goto fail2; + } + + /* + * Mark software table entry busy. We might yet fail to insert, + * but any attempt to insert a conflicting filter while we're + * waiting for the firmware must find the busy entry. + */ + ef10_filter_set_entry_busy(eftp, ins_index); + + saved_spec = ef10_filter_entry_spec(eftp, ins_index); + ef10_filter_add_select_action(saved_spec, spec, &action, + &overridden_spec); + + /* + * Allocate a new filter if found entry is empty or + * a filter should be overridden. + */ + if (overridden_spec != NULL || saved_spec == NULL) { + efx_filter_spec_t *new_spec; + + EFSYS_UNLOCK(enp->en_eslp, state); + locked = B_FALSE; + + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*new_spec), new_spec); + if (new_spec == NULL) { + rc = ENOMEM; + overridden_spec = NULL; + goto fail3; + } + + EFSYS_LOCK(enp->en_eslp, state); + locked = B_TRUE; + + if (saved_spec == NULL) { + *new_spec = *spec; + ef10_filter_set_entry(eftp, ins_index, new_spec); + } else { + *new_spec = *overridden_spec; + overridden_spec = new_spec; + } + } + + EFSYS_UNLOCK(enp->en_eslp, state); + locked = B_FALSE; + + rc = ef10_filter_add_execute_action(enp, saved_spec, spec, + overridden_spec, action, ins_index); + if (rc != 0) + goto fail4; + + if (filter_id) + *filter_id = ins_index; + + EFSYS_LOCK(enp->en_eslp, state); + ef10_filter_set_entry_not_busy(eftp, ins_index); + EFSYS_UNLOCK(enp->en_eslp, state); + + return (0); + +fail4: + EFSYS_PROBE(fail4); + + EFSYS_ASSERT(locked == B_FALSE); + EFSYS_LOCK(enp->en_eslp, state); + + if (action == EF10_FILTER_ADD_NEW) { + EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), + ef10_filter_entry_spec(eftp, ins_index)); + ef10_filter_set_entry(eftp, ins_index, NULL); + } + + EFSYS_UNLOCK(enp->en_eslp, state); + + if (overridden_spec != NULL) + EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), overridden_spec); + +fail3: + EFSYS_PROBE(fail3); + + EFSYS_ASSERT(locked == B_FALSE); + EFSYS_LOCK(enp->en_eslp, state); + + ef10_filter_set_entry_not_busy(eftp, ins_index); + + EFSYS_UNLOCK(enp->en_eslp, state); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + if (locked) + EFSYS_UNLOCK(enp->en_eslp, state); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_filter_add( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec, + __in enum efx_filter_replacement_policy_e policy) +{ + efx_rc_t rc; + + rc = ef10_filter_add_internal(enp, spec, policy, NULL); + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Delete a filter by index from the filter table with priority + * that is not higher than specified. + */ +static __checkReturn efx_rc_t +ef10_filter_delete_internal( + __in efx_nic_t *enp, + __in uint32_t filter_id, + __in efx_filter_priority_t priority) +{ + efx_rc_t rc; + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t *spec; + efsys_lock_state_t state; + uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; + + /* + * Find the software table entry and mark it busy. Don't + * remove it yet; any attempt to update while we're waiting + * for the firmware must find the busy entry. + * + * FIXME: What if the busy flag is never cleared? + */ + EFSYS_LOCK(enp->en_eslp, state); + while (ef10_filter_entry_is_busy(table, filter_idx)) { + EFSYS_UNLOCK(enp->en_eslp, state); + EFSYS_SPIN(1); + EFSYS_LOCK(enp->en_eslp, state); + } + if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) { + if (spec->efs_priority <= priority) + ef10_filter_set_entry_busy(table, filter_idx); + } + EFSYS_UNLOCK(enp->en_eslp, state); + + if (spec == NULL) { + rc = ENOENT; + goto fail1; + } + + if (spec->efs_priority > priority) { + /* + * Applied filter stays, but overridden filter is removed since + * next user request to delete the applied filter should not + * restore outdated filter. + */ + if (spec->efs_overridden_spec != NULL) { + EFSYS_ASSERT(spec->efs_overridden_spec->efs_overridden_spec == + NULL); + EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), + spec->efs_overridden_spec); + spec->efs_overridden_spec = NULL; + } + } else { + /* + * Try to remove the hardware filter or replace it with the + * saved automatic filter. This may fail if the MC has + * rebooted (which frees all hardware filter resources). + */ + if (spec->efs_overridden_spec != NULL) { + rc = efx_mcdi_filter_op_add(enp, + spec->efs_overridden_spec, + MC_CMD_FILTER_OP_IN_OP_REPLACE, + &table->eft_entry[filter_idx].efe_handle); + } else if (ef10_filter_is_exclusive(spec)) { + rc = efx_mcdi_filter_op_delete(enp, + MC_CMD_FILTER_OP_IN_OP_REMOVE, + &table->eft_entry[filter_idx].efe_handle); + } else { + rc = efx_mcdi_filter_op_delete(enp, + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, + &table->eft_entry[filter_idx].efe_handle); + } + + /* Free the software table entry */ + EFSYS_LOCK(enp->en_eslp, state); + ef10_filter_set_entry_not_busy(table, filter_idx); + ef10_filter_set_entry(table, filter_idx, + spec->efs_overridden_spec); + EFSYS_UNLOCK(enp->en_eslp, state); + + EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); + + /* Check result of hardware filter removal */ + if (rc != 0) + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +ef10_filter_delete_auto( + __in efx_nic_t *enp, + __in uint32_t filter_id) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; + + /* + * AUTO_OLD flag is cleared since the auto filter that is to be removed + * may not be the filter at the specified index itself, but the filter + * that is overridden by it. + */ + ef10_filter_set_entry_not_auto_old(table, filter_idx); + + (void) ef10_filter_delete_internal(enp, filter_idx, + EFX_FILTER_PRI_AUTO); +} + + __checkReturn efx_rc_t +ef10_filter_delete( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec) +{ + efx_rc_t rc; + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t *saved_spec; + unsigned int hash; + unsigned int depth; + unsigned int i; + efsys_lock_state_t state; + boolean_t locked = B_FALSE; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + hash = ef10_filter_hash(spec); + + EFSYS_LOCK(enp->en_eslp, state); + locked = B_TRUE; + + depth = 1; + for (;;) { + i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); + saved_spec = ef10_filter_entry_spec(table, i); + if (saved_spec && ef10_filter_equal(spec, saved_spec) && + ef10_filter_same_dest(spec, saved_spec) && + saved_spec->efs_priority == EFX_FILTER_PRI_MANUAL) { + break; + } + if (depth == EF10_FILTER_SEARCH_LIMIT) { + rc = ENOENT; + goto fail1; + } + depth++; + } + + EFSYS_UNLOCK(enp->en_eslp, state); + locked = B_FALSE; + + rc = ef10_filter_delete_internal(enp, i, EFX_FILTER_PRI_MANUAL); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + if (locked) + EFSYS_UNLOCK(enp->en_eslp, state); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_get_parser_disp_info( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __in boolean_t encap, + __out size_t *list_lengthp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, + MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + size_t matches_count; + size_t list_size; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + matches_count = MCDI_OUT_DWORD(req, + GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); + + if (req.emr_out_length_used < + MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) { + rc = EMSGSIZE; + goto fail2; + } + + *list_lengthp = matches_count; + + if (buffer_length < matches_count) { + rc = ENOSPC; + goto fail3; + } + + /* + * Check that the elements in the list in the MCDI response are the size + * we expect, so we can just copy them directly. Any conversion of the + * flags is handled by the caller. + */ + EFX_STATIC_ASSERT(sizeof (uint32_t) == + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); + + list_size = matches_count * + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN; + memcpy(buffer, + MCDI_OUT2(req, uint32_t, + GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), + list_size); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + size_t mcdi_list_length; + size_t mcdi_encap_list_length; + size_t list_length; + uint32_t i; + uint32_t next_buf_idx; + size_t next_buf_length; + efx_rc_t rc; + boolean_t no_space = B_FALSE; + efx_filter_match_flags_t all_filter_flags = + (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | + EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | + EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_VNI_OR_VSID | + EFX_FILTER_MATCH_IFRM_LOC_MAC | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST | + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST | + EFX_FILTER_MATCH_ENCAP_TYPE | + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); + + /* + * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the + * list of supported filters for ordinary packets, and then another to + * get the list of supported filters for encapsulated packets. To + * distinguish the second list from the first, the + * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for + * encapsulated packets. + */ + rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE, + &mcdi_list_length); + if (rc != 0) { + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail1; + } + + if (no_space) { + next_buf_idx = 0; + next_buf_length = 0; + } else { + EFSYS_ASSERT(mcdi_list_length <= buffer_length); + next_buf_idx = mcdi_list_length; + next_buf_length = buffer_length - mcdi_list_length; + } + + if (encp->enc_tunnel_encapsulations_supported != 0) { + rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx], + next_buf_length, B_TRUE, &mcdi_encap_list_length); + if (rc != 0) { + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail2; + } else { + for (i = next_buf_idx; + i < next_buf_idx + mcdi_encap_list_length; i++) + buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE; + } + } else { + mcdi_encap_list_length = 0; + } + + if (no_space) { + *list_lengthp = mcdi_list_length + mcdi_encap_list_length; + rc = ENOSPC; + goto fail3; + } + + /* + * The static assertions in ef10_filter_init() ensure that the values of + * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't + * need to be converted. + * + * In case support is added to MCDI for additional flags, remove any + * matches from the list which include flags we don't support. The order + * of the matches is preserved as they are ordered from highest to + * lowest priority. + */ + EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <= + buffer_length); + list_length = 0; + for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) { + if ((buffer[i] & ~all_filter_flags) == 0) { + buffer[list_length] = buffer[i]; + list_length++; + } + } + + *list_lengthp = list_length; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_filter_insert_unicast( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *addr, + __in efx_filter_flags_t filter_flags) +{ + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t spec; + efx_rc_t rc; + + /* Insert the filter for the local station address */ + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + eftp->eft_default_rxq); + rc = efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, + addr); + if (rc != 0) + goto fail1; + + rc = ef10_filter_add_internal(enp, &spec, EFX_FILTER_REPLACEMENT_NEVER, + &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); + if (rc != 0) + goto fail2; + + eftp->eft_unicst_filter_count++; + EFSYS_ASSERT(eftp->eft_unicst_filter_count <= + EFX_EF10_FILTER_UNICAST_FILTERS_MAX); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +ef10_filter_insert_all_unicast( + __in efx_nic_t *enp, + __in efx_filter_flags_t filter_flags) +{ + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t spec; + efx_rc_t rc; + + /* Insert the unknown unicast filter */ + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + eftp->eft_default_rxq); + rc = efx_filter_spec_set_uc_def(&spec); + if (rc != 0) + goto fail1; + rc = ef10_filter_add_internal(enp, &spec, EFX_FILTER_REPLACEMENT_NEVER, + &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); + if (rc != 0) + goto fail2; + + eftp->eft_unicst_filter_count++; + EFSYS_ASSERT(eftp->eft_unicst_filter_count <= + EFX_EF10_FILTER_UNICAST_FILTERS_MAX); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +ef10_filter_insert_multicast_list( + __in efx_nic_t *enp, + __in boolean_t mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count, + __in efx_filter_flags_t filter_flags, + __in boolean_t rollback) +{ + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t spec; + uint8_t addr[6]; + uint32_t i; + uint32_t filter_index; + uint32_t filter_count; + efx_rc_t rc; + + if (mulcst == B_FALSE) + count = 0; + + if (count + (brdcst ? 1 : 0) > + EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) { + /* Too many MAC addresses */ + rc = EINVAL; + goto fail1; + } + + /* Insert/renew multicast address list filters */ + filter_count = 0; + for (i = 0; i < count; i++) { + efx_filter_spec_init_rx(&spec, + EFX_FILTER_PRI_AUTO, + filter_flags, + eftp->eft_default_rxq); + + rc = efx_filter_spec_set_eth_local(&spec, + EFX_FILTER_SPEC_VID_UNSPEC, + &addrs[i * EFX_MAC_ADDR_LEN]); + if (rc != 0) { + if (rollback == B_TRUE) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } else { + /* + * Don't try to add a filter with a corrupt + * specification. + */ + continue; + } + } + + rc = ef10_filter_add_internal(enp, &spec, + EFX_FILTER_REPLACEMENT_NEVER, &filter_index); + + if (rc == 0) { + eftp->eft_mulcst_filter_indexes[filter_count] = + filter_index; + filter_count++; + } else if (rollback == B_TRUE) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } + + } + + if (brdcst == B_TRUE) { + /* Insert/renew broadcast address filter */ + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + eftp->eft_default_rxq); + + EFX_MAC_BROADCAST_ADDR_SET(addr); + rc = efx_filter_spec_set_eth_local(&spec, + EFX_FILTER_SPEC_VID_UNSPEC, addr); + if ((rc != 0) && (rollback == B_TRUE)) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } + + rc = ef10_filter_add_internal(enp, &spec, + EFX_FILTER_REPLACEMENT_NEVER, &filter_index); + + if (rc == 0) { + eftp->eft_mulcst_filter_indexes[filter_count] = + filter_index; + filter_count++; + } else if (rollback == B_TRUE) { + /* Only stop upon failure if told to rollback */ + goto rollback; + } + } + + eftp->eft_mulcst_filter_count = filter_count; + eftp->eft_using_all_mulcst = B_FALSE; + + return (0); + +rollback: + /* Remove any filters we have inserted */ + i = filter_count; + while (i--) { + ef10_filter_delete_auto(enp, + eftp->eft_mulcst_filter_indexes[i]); + } + eftp->eft_mulcst_filter_count = 0; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_filter_insert_all_multicast( + __in efx_nic_t *enp, + __in efx_filter_flags_t filter_flags) +{ + ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; + efx_filter_spec_t spec; + efx_rc_t rc; + + /* Insert the unknown multicast filter */ + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + eftp->eft_default_rxq); + rc = efx_filter_spec_set_mc_def(&spec); + if (rc != 0) + goto fail1; + + rc = ef10_filter_add_internal(enp, &spec, EFX_FILTER_REPLACEMENT_NEVER, + &eftp->eft_mulcst_filter_indexes[0]); + if (rc != 0) + goto fail2; + + eftp->eft_mulcst_filter_count = 1; + eftp->eft_using_all_mulcst = B_TRUE; + + /* + * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. + */ + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +typedef struct ef10_filter_encap_entry_s { + uint16_t ether_type; + efx_tunnel_protocol_t encap_type; + uint32_t inner_frame_match; +} ef10_filter_encap_entry_t; + +#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \ + { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \ + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match } + +static ef10_filter_encap_entry_t ef10_filter_encap_list[] = { + EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST), + + EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST), + + EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST), + EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST), +}; + +#undef EF10_ENCAP_FILTER_ENTRY + +static __checkReturn efx_rc_t +ef10_filter_insert_encap_filters( + __in efx_nic_t *enp, + __in boolean_t mulcst, + __in efx_filter_flags_t filter_flags) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + uint32_t i; + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <= + EFX_ARRAY_SIZE(table->eft_encap_filter_indexes)); + + /* + * On Medford, full-featured firmware can identify packets as being + * tunnel encapsulated, even if no encapsulated packet offloads are in + * use. When packets are identified as such, ordinary filters are not + * applied, only ones specific to encapsulated packets. Hence we need to + * insert filters for encapsulated packets in order to receive them. + * + * Separate filters need to be inserted for each ether type, + * encapsulation type, and inner frame type (unicast or multicast). To + * keep things simple and reduce the number of filters needed, catch-all + * filters for all combinations of types are inserted, even if + * all_unicst or all_mulcst have not been set. (These catch-all filters + * may well, however, fail to insert on unprivileged functions.) + */ + table->eft_encap_filter_count = 0; + for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) { + efx_filter_spec_t spec; + ef10_filter_encap_entry_t *encap_filter = + &ef10_filter_encap_list[i]; + + /* + * Skip multicast filters if we've not been asked for + * any multicast traffic. + */ + if ((mulcst == B_FALSE) && + (encap_filter->inner_frame_match == + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST)) + continue; + + efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, + table->eft_default_rxq); + efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type); + rc = efx_filter_spec_set_encap_type(&spec, + encap_filter->encap_type, + encap_filter->inner_frame_match); + if (rc != 0) + goto fail1; + + rc = ef10_filter_add_internal(enp, &spec, + EFX_FILTER_REPLACEMENT_NEVER, + &table->eft_encap_filter_indexes[ + table->eft_encap_filter_count]); + if (rc != 0) { + if (rc != EACCES) + goto fail2; + } else { + table->eft_encap_filter_count++; + } + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +ef10_filter_remove_old( + __in efx_nic_t *enp) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + uint32_t i; + + for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { + if (ef10_filter_entry_is_auto_old(table, i)) { + ef10_filter_delete_auto(enp, i); + } + } +} + + +static __checkReturn efx_rc_t +ef10_filter_get_workarounds( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint32_t implemented = 0; + uint32_t enabled = 0; + efx_rc_t rc; + + rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); + if (rc == 0) { + /* Check if chained multicast filter support is enabled */ + if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) + encp->enc_bug26807_workaround = B_TRUE; + else + encp->enc_bug26807_workaround = B_FALSE; + } else if (rc == ENOTSUP) { + /* + * Firmware is too old to support GET_WORKAROUNDS, and support + * for this workaround was implemented later. + */ + encp->enc_bug26807_workaround = B_FALSE; + } else { + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); + +} + +static void +ef10_filter_remove_all_existing_filters( + __in efx_nic_t *enp) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_port_t *epp = &(enp->en_port); + unsigned int i; + + for (i = 0; i < table->eft_unicst_filter_count; i++) { + ef10_filter_delete_auto(enp, + table->eft_unicst_filter_indexes[i]); + } + table->eft_unicst_filter_count = 0; + + for (i = 0; i < table->eft_mulcst_filter_count; i++) { + ef10_filter_delete_auto(enp, + table->eft_mulcst_filter_indexes[i]); + } + table->eft_mulcst_filter_count = 0; + + for (i = 0; i < table->eft_encap_filter_count; i++) { + ef10_filter_delete_auto(enp, + table->eft_encap_filter_indexes[i]); + } + table->eft_encap_filter_count = 0; + + epp->ep_all_unicst_inserted = B_FALSE; + epp->ep_all_mulcst_inserted = B_FALSE; +} + +static void +ef10_filter_mark_old_filters( + __in efx_nic_t *enp) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + unsigned int i; + + for (i = 0; i < table->eft_unicst_filter_count; i++) { + ef10_filter_set_entry_auto_old(table, + table->eft_unicst_filter_indexes[i]); + } + for (i = 0; i < table->eft_mulcst_filter_count; i++) { + ef10_filter_set_entry_auto_old(table, + table->eft_mulcst_filter_indexes[i]); + } + for (i = 0; i < table->eft_encap_filter_count; i++) { + ef10_filter_set_entry_auto_old(table, + table->eft_encap_filter_indexes[i]); + } +} + +static __checkReturn efx_rc_t +ef10_filter_insert_renew_unicst_filters( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *mac_addr, + __in boolean_t all_unicst, + __in efx_filter_flags_t filter_flags, + __out boolean_t *all_unicst_inserted) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_port_t *epp = &(enp->en_port); + efx_rc_t rc; + + /* + * Firmware does not perform chaining on unicast filters. As traffic is + * therefore only delivered to the first matching filter, we should + * always insert the specific filter for our MAC address, to try and + * ensure we get that traffic. + * + * (If the filter for our MAC address has already been inserted by + * another function, we won't receive traffic sent to us, even if we + * insert a unicast mismatch filter. To prevent traffic stealing, this + * therefore relies on the privilege model only allowing functions to + * insert filters for their own MAC address unless explicitly given + * additional privileges by the user. This also means that, even on a + * privileged function, inserting a unicast mismatch filter may not + * catch all traffic in multi PCI function scenarios.) + */ + table->eft_unicst_filter_count = 0; + rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags); + *all_unicst_inserted = B_FALSE; + if (all_unicst || (rc != 0)) { + efx_rc_t all_unicst_rc; + + all_unicst_rc = ef10_filter_insert_all_unicast(enp, + filter_flags); + if (all_unicst_rc == 0) { + *all_unicst_inserted = B_TRUE; + epp->ep_all_unicst_inserted = B_TRUE; + } else if (rc != 0) + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_filter_insert_renew_mulcst_filters( + __in efx_nic_t *enp, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count, + __in efx_filter_flags_t filter_flags, + __in boolean_t all_unicst_inserted, + __out boolean_t *all_mulcst_inserted) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_port_t *epp = &(enp->en_port); + efx_rc_t rc; + + *all_mulcst_inserted = B_FALSE; + + if (all_mulcst == B_TRUE) { + efx_rc_t all_mulcst_rc; + + /* + * Insert the all multicast filter. If that fails, try to insert + * all of our multicast filters (but without rollback on + * failure). + */ + all_mulcst_rc = ef10_filter_insert_all_multicast(enp, + filter_flags); + if (all_mulcst_rc == 0) { + epp->ep_all_mulcst_inserted = B_TRUE; + *all_mulcst_inserted = B_TRUE; + } else { + rc = ef10_filter_insert_multicast_list(enp, B_TRUE, + brdcst, addrs, count, filter_flags, B_FALSE); + if (rc != 0) + goto fail1; + } + } else { + /* + * Insert filters for multicast addresses. + * If any insertion fails, then rollback and try to insert the + * all multicast filter instead. + * If that also fails, try to insert all of the multicast + * filters (but without rollback on failure). + */ + rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, + addrs, count, filter_flags, B_TRUE); + if (rc != 0) { + if ((table->eft_using_all_mulcst == B_FALSE) && + (encp->enc_bug26807_workaround == B_TRUE)) { + /* + * Multicast filter chaining is on, so remove + * old filters before inserting the multicast + * all filter to avoid duplicate delivery caused + * by packets matching multiple filters. + */ + ef10_filter_remove_old(enp); + if (all_unicst_inserted == B_FALSE) + epp->ep_all_unicst_inserted = B_FALSE; + if (*all_mulcst_inserted == B_FALSE) + epp->ep_all_mulcst_inserted = B_FALSE; + } + + rc = ef10_filter_insert_all_multicast(enp, + filter_flags); + if (rc == 0) { + epp->ep_all_mulcst_inserted = B_TRUE; + *all_mulcst_inserted = B_TRUE; + } else { + rc = ef10_filter_insert_multicast_list(enp, + mulcst, brdcst, + addrs, count, filter_flags, B_FALSE); + if (rc != 0) + goto fail2; + } + } + } + + return (0); + +fail2: + EFSYS_PROBE1(fail2, efx_rc_t, rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Reconfigure all filters. + * If all_unicst and/or all mulcst filters cannot be applied then + * return ENOTSUP (Note the filters for the specified addresses are + * still applied in this case). + */ + __checkReturn efx_rc_t +ef10_filter_reconfigure( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *mac_addr, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_port_t *epp = &(enp->en_port); + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + efx_filter_flags_t filter_flags; + unsigned int i; + boolean_t all_unicst_inserted = B_FALSE; + boolean_t all_mulcst_inserted = B_FALSE; + efx_rc_t rc; + + if (table->eft_default_rxq == NULL) { + /* + * Filters direct traffic to the default RXQ, and so cannot be + * inserted until it is available. Any currently configured + * filters must be removed (ignore errors in case the MC + * has rebooted, which removes hardware filters). + */ + ef10_filter_remove_all_existing_filters(enp); + return (0); + } + + if (table->eft_using_rss) + filter_flags = EFX_FILTER_FLAG_RX_RSS; + else + filter_flags = 0; + + /* Mark old filters which may need to be removed */ + ef10_filter_mark_old_filters(enp); + + /* Insert or renew unicast filters */ + rc = ef10_filter_insert_renew_unicst_filters(enp, mac_addr, all_unicst, + filter_flags, + &all_unicst_inserted); + if (rc != 0) + goto fail1; + + /* + * WORKAROUND_BUG26807 controls firmware support for chained multicast + * filters, and can only be enabled or disabled when the hardware filter + * table is empty. + * + * Chained multicast filters require support from the datapath firmware, + * and may not be available (e.g. low-latency variants or old Huntington + * firmware). + * + * Firmware will reset (FLR) functions which have inserted filters in + * the hardware filter table when the workaround is enabled/disabled. + * Functions without any hardware filters are not reset. + * + * Re-check if the workaround is enabled after adding unicast hardware + * filters. This ensures that encp->enc_bug26807_workaround matches the + * firmware state, and that later changes to enable/disable the + * workaround will result in this function seeing a reset (FLR). + * + * In common-code drivers, we only support multiple PCI function + * scenarios with firmware that supports multicast chaining, so we can + * assume it is enabled for such cases and hence simplify the filter + * insertion logic. Firmware that does not support multicast chaining + * does not support multiple PCI function configurations either, so + * filter insertion is much simpler and the same strategies can still be + * used. + */ + if ((rc = ef10_filter_get_workarounds(enp)) != 0) + goto fail2; + + if ((table->eft_using_all_mulcst != all_mulcst) && + (encp->enc_bug26807_workaround == B_TRUE)) { + /* + * Multicast filter chaining is enabled, so traffic that matches + * more than one multicast filter will be replicated and + * delivered to multiple recipients. To avoid this duplicate + * delivery, remove old multicast filters before inserting new + * multicast filters. + */ + ef10_filter_remove_old(enp); + if (all_unicst_inserted == B_FALSE) + epp->ep_all_unicst_inserted = B_FALSE; + + epp->ep_all_mulcst_inserted = B_FALSE; + } + + /* Insert or renew multicast filters */ + rc = ef10_filter_insert_renew_mulcst_filters(enp, mulcst, all_mulcst, + brdcst, addrs, count, + filter_flags, + all_unicst_inserted, + &all_mulcst_inserted); + if (rc != 0) + goto fail3; + + if (encp->enc_tunnel_encapsulations_supported != 0) { + /* Try to insert filters for encapsulated packets. */ + (void) ef10_filter_insert_encap_filters(enp, + mulcst || all_mulcst || brdcst, + filter_flags); + } + + /* Remove old filters which were not renewed */ + ef10_filter_remove_old(enp); + if (all_unicst_inserted == B_FALSE) + epp->ep_all_unicst_inserted = B_FALSE; + if (all_mulcst_inserted == B_FALSE) + epp->ep_all_mulcst_inserted = B_FALSE; + + /* report if any optional flags were rejected */ + if (((all_unicst != B_FALSE) && (all_unicst_inserted == B_FALSE)) || + ((all_mulcst != B_FALSE) && (all_mulcst_inserted == B_FALSE))) { + rc = ENOTSUP; + } + + return (rc); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Clear auto old flags */ + for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { + if (ef10_filter_entry_is_auto_old(table, i)) { + ef10_filter_set_entry_not_auto_old(table, i); + } + } + + return (rc); +} + + void +ef10_filter_get_default_rxq( + __in efx_nic_t *enp, + __out efx_rxq_t **erpp, + __out boolean_t *using_rss) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + + *erpp = table->eft_default_rxq; + *using_rss = table->eft_using_rss; +} + + + void +ef10_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + +#if EFSYS_OPT_RX_SCALE + EFSYS_ASSERT((using_rss == B_FALSE) || + (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID)); + table->eft_using_rss = using_rss; +#else + EFSYS_ASSERT(using_rss == B_FALSE); + table->eft_using_rss = B_FALSE; +#endif + table->eft_default_rxq = erp; +} + + void +ef10_filter_default_rxq_clear( + __in efx_nic_t *enp) +{ + ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; + + table->eft_default_rxq = NULL; + table->eft_using_rss = B_FALSE; +} + + +#endif /* EFSYS_OPT_FILTER */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_firmware_ids.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_firmware_ids.h new file mode 100644 index 000000000..82fa7f9b5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_firmware_ids.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +/* + * This is NOT the original source file. Do NOT edit it. + * To update the board and firmware ids, please edit the copy in + * the sfregistry repo and then, in that repo, + * "make id_headers" or "make export" to + * regenerate and export all types of headers. + */ + +#ifndef CI_MGMT_FIRMWARE_IDS_H +#define CI_MGMT_FIRMWARE_IDS_H + +/* Reference: SF-103588-PS + * + * This header file is the input for v5s/scripts/genfwdef. So if you touch it, + * ensure that v5/scripts/genfwdef still works. + */ + +enum { + FIRMWARE_TYPE_PHY = 0, + FIRMWARE_TYPE_PHY_LOADER = 1, + FIRMWARE_TYPE_BOOTROM = 2, + FIRMWARE_TYPE_MCFW = 3, + FIRMWARE_TYPE_MCFW_BACKUP = 4, + FIRMWARE_TYPE_DISABLED_CALLISTO = 5, + FIRMWARE_TYPE_FPGA = 6, + FIRMWARE_TYPE_FPGA_BACKUP = 7, + FIRMWARE_TYPE_FCFW = 8, + FIRMWARE_TYPE_FCFW_BACKUP = 9, + FIRMWARE_TYPE_CPLD = 10, + FIRMWARE_TYPE_MUMFW = 11, + FIRMWARE_TYPE_UEFIROM = 12, + FIRMWARE_TYPE_BUNDLE = 13, + FIRMWARE_TYPE_CMCFW = 14, +}; + +enum { + FIRMWARE_PHY_SUBTYPE_SFX7101B = 0x3, + FIRMWARE_PHY_SUBTYPE_SFT9001A = 0x8, + FIRMWARE_PHY_SUBTYPE_QT2025C = 0x9, + FIRMWARE_PHY_SUBTYPE_SFT9001B = 0xa, + FIRMWARE_PHY_SUBTYPE_SFL9021 = 0x10, /* used for loader only */ + FIRMWARE_PHY_SUBTYPE_QT2025_KR = 0x11, /* QT2025 in KR rather than SFP+ mode */ + FIRMWARE_PHY_SUBTYPE_AEL3020 = 0x12, /* As seen on the R2 HP blade NIC */ +}; + +enum { + FIRMWARE_BOOTROM_SUBTYPE_FALCON = 0, + FIRMWARE_BOOTROM_SUBTYPE_BETHPAGE = 1, + FIRMWARE_BOOTROM_SUBTYPE_SIENA = 2, + FIRMWARE_BOOTROM_SUBTYPE_HUNTINGTON = 3, + FIRMWARE_BOOTROM_SUBTYPE_FARMINGDALE = 4, + FIRMWARE_BOOTROM_SUBTYPE_GREENPORT = 5, + FIRMWARE_BOOTROM_SUBTYPE_MEDFORD = 6, + FIRMWARE_BOOTROM_SUBTYPE_MEDFORD2 = 7, + FIRMWARE_BOOTROM_SUBTYPE_RIVERHEAD = 8, +}; + +enum { + FIRMWARE_MCFW_SUBTYPE_COSIM = 0, + FIRMWARE_MCFW_SUBTYPE_HALFSPEED = 6, + FIRMWARE_MCFW_SUBTYPE_FLORENCE = 7, + FIRMWARE_MCFW_SUBTYPE_ZEBEDEE = 8, + FIRMWARE_MCFW_SUBTYPE_ERMINTRUDE = 9, + FIRMWARE_MCFW_SUBTYPE_DYLAN = 10, + FIRMWARE_MCFW_SUBTYPE_BRIAN = 11, + FIRMWARE_MCFW_SUBTYPE_DOUGAL = 12, + FIRMWARE_MCFW_SUBTYPE_MR_RUSTY = 13, + FIRMWARE_MCFW_SUBTYPE_BUXTON = 14, + FIRMWARE_MCFW_SUBTYPE_HOPE = 15, + FIRMWARE_MCFW_SUBTYPE_MR_MCHENRY = 16, + FIRMWARE_MCFW_SUBTYPE_UNCLE_HAMISH = 17, + FIRMWARE_MCFW_SUBTYPE_TUTTLE = 18, + FIRMWARE_MCFW_SUBTYPE_FINLAY = 19, + FIRMWARE_MCFW_SUBTYPE_KAPTEYN = 20, + FIRMWARE_MCFW_SUBTYPE_JOHNSON = 21, + FIRMWARE_MCFW_SUBTYPE_GEHRELS = 22, + FIRMWARE_MCFW_SUBTYPE_WHIPPLE = 23, + FIRMWARE_MCFW_SUBTYPE_FORBES = 24, + FIRMWARE_MCFW_SUBTYPE_LONGMORE = 25, + FIRMWARE_MCFW_SUBTYPE_HERSCHEL = 26, + FIRMWARE_MCFW_SUBTYPE_SHOEMAKER = 27, + FIRMWARE_MCFW_SUBTYPE_IKEYA = 28, + FIRMWARE_MCFW_SUBTYPE_KOWALSKI = 29, + FIRMWARE_MCFW_SUBTYPE_NIMRUD = 30, + FIRMWARE_MCFW_SUBTYPE_SPARTA = 31, + FIRMWARE_MCFW_SUBTYPE_THEBES = 32, + FIRMWARE_MCFW_SUBTYPE_ICARUS = 33, + FIRMWARE_MCFW_SUBTYPE_JERICHO = 34, + FIRMWARE_MCFW_SUBTYPE_BYBLOS = 35, + FIRMWARE_MCFW_SUBTYPE_GROAT = 36, + FIRMWARE_MCFW_SUBTYPE_SHILLING = 37, + FIRMWARE_MCFW_SUBTYPE_FLORIN = 38, + FIRMWARE_MCFW_SUBTYPE_THREEPENCE = 39, + FIRMWARE_MCFW_SUBTYPE_CYCLOPS = 40, + FIRMWARE_MCFW_SUBTYPE_PENNY = 41, + FIRMWARE_MCFW_SUBTYPE_BOB = 42, + FIRMWARE_MCFW_SUBTYPE_HOG = 43, + FIRMWARE_MCFW_SUBTYPE_SOVEREIGN = 44, + FIRMWARE_MCFW_SUBTYPE_SOLIDUS = 45, + FIRMWARE_MCFW_SUBTYPE_SIXPENCE = 46, + FIRMWARE_MCFW_SUBTYPE_CROWN = 47, + FIRMWARE_MCFW_SUBTYPE_SOL = 48, + FIRMWARE_MCFW_SUBTYPE_TANNER = 49, + FIRMWARE_MCFW_SUBTYPE_BELUGA = 64, + FIRMWARE_MCFW_SUBTYPE_KALUGA = 65, +}; + +enum { + FIRMWARE_DISABLED_CALLISTO_SUBTYPE_ALL = 0 +}; + +enum { + FIRMWARE_FPGA_SUBTYPE_PTP = 1, /* PTP peripheral */ + FIRMWARE_FPGA_SUBTYPE_PTP_MR_MCHENRY = 2, /* PTP peripheral on R7 boards */ + FIRMWARE_FPGA_SUBTYPE_FLORENCE = 3, /* Modena FPGA */ + FIRMWARE_FPGA_SUBTYPE_UNCLE_HAMISH = 4, /* Modena FPGA: Unknown silicon */ + FIRMWARE_FPGA_SUBTYPE_UNCLE_HAMISH_A7 = 5, /* Modena FPGA: A7 silicon */ + FIRMWARE_FPGA_SUBTYPE_UNCLE_HAMISH_A5 = 6, /* Modena FPGA: A5 silicon */ + FIRMWARE_FPGA_SUBTYPE_SHOEMAKER = 7, /* Sorrento FPGA: Unknown silicon */ + FIRMWARE_FPGA_SUBTYPE_SHOEMAKER_A5 = 8, /* Sorrento FPGA: A5 silicon */ + FIRMWARE_FPGA_SUBTYPE_SHOEMAKER_A7 = 9, /* Sorrento FPGA: A7 silicon */ +}; + +enum { + FIRMWARE_FCFW_SUBTYPE_MODENA = 1, + FIRMWARE_FCFW_SUBTYPE_SORRENTO = 2, +}; + +enum { + FIRMWARE_CPLD_SUBTYPE_SFA6902 = 1, /* CPLD on Modena (2-port) */ +}; + +enum { + FIRMWARE_LICENSE_SUBTYPE_AOE = 1, /* AOE */ +}; + +enum { + FIRMWARE_MUMFW_SUBTYPE_MADAM_BLUE = 1, /* Sorrento MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_ICARUS = 2, /* Malaga MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_JERICHO = 3, /* Emma MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_BYBLOS = 4, /* Pagnell MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_SHILLING = 5, /* Bradford R1.x MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_FLORIN = 6, /* Bingley MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_THREEPENCE = 7, /* Baildon MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_CYCLOPS = 8, /* Talbot MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_PENNY = 9, /* Batley MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_BOB = 10, /* Bradford R2.x MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_HOG = 11, /* Roxburgh MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_SOVEREIGN = 12, /* Stirling MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_SOLIDUS = 13, /* Roxburgh R2 MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_SIXPENCE = 14, /* Melrose MUM firmware for Dell cards */ + FIRMWARE_MUMFW_SUBTYPE_CROWN = 15, /* Coldstream MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_SOL = 16, /* Roxburgh R2 MUM firmware for Dell cards with signed-bundle-update */ + FIRMWARE_MUMFW_SUBTYPE_KALUGA = 17, /* York MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_STERLET = 18, /* Bourn MUM firmware */ + FIRMWARE_MUMFW_SUBTYPE_TANNER = 19, /* Melrose MUM firmware for channel cards */ + +}; + + +#define FIRMWARE_UEFIROM_SUBTYPE_ALL FIRMWARE_UEFIROM_SUBTYPE_EF10 +enum { + FIRMWARE_UEFIROM_SUBTYPE_EF10 = 0, +}; + +enum { + FIRMWARE_BUNDLE_SUBTYPE_DELL_X2522_25G = 1, /* X2522-25G for Dell with bundle update support */ + FIRMWARE_BUNDLE_SUBTYPE_X2552 = 2, /* X2552 OCP NIC - firmware bundle */ + FIRMWARE_BUNDLE_SUBTYPE_DELL_X2562 = 3, /* X2562 OCP NIC for Dell - firmware bundle */ + FIRMWARE_BUNDLE_SUBTYPE_X2562 = 4, /* X2562 OCP NIC - firmware bundle */ +}; + +enum { + FIRMWARE_CMCFW_SUBTYPE_BELUGA = 1, /* Riverhead VCU1525 CMC firmware */ + FIRMWARE_CMCFW_SUBTYPE_KALUGA = 2, /* York (X3x42) board CMC firmware */ +}; + +#endif diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c new file mode 100644 index 000000000..9204da13b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_image.c @@ -0,0 +1,904 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#include "ef10_firmware_ids.h" + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 + +#if EFSYS_OPT_IMAGE_LAYOUT + +/* + * Utility routines to support limited parsing of ASN.1 tags. This is not a + * general purpose ASN.1 parser, but is sufficient to locate the required + * objects in a signed image with CMS headers. + */ + +/* DER encodings for ASN.1 tags (see ITU-T X.690) */ +#define ASN1_TAG_INTEGER (0x02) +#define ASN1_TAG_OCTET_STRING (0x04) +#define ASN1_TAG_OBJ_ID (0x06) +#define ASN1_TAG_SEQUENCE (0x30) +#define ASN1_TAG_SET (0x31) + +#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0) + +#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n)) +#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n)) + +typedef struct efx_asn1_cursor_s { + uint8_t *buffer; + uint32_t length; + + uint8_t tag; + uint32_t hdr_size; + uint32_t val_size; +} efx_asn1_cursor_t; + + +/* Parse header of DER encoded ASN.1 TLV and match tag */ +static __checkReturn efx_rc_t +efx_asn1_parse_header_match_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) { + rc = EINVAL; + goto fail1; + } + + cursor->tag = cursor->buffer[0]; + if (cursor->tag != tag) { + /* Tag not matched */ + rc = ENOENT; + goto fail2; + } + + if ((cursor->tag & 0x1F) == 0x1F) { + /* Long tag format not used in CMS syntax */ + rc = EINVAL; + goto fail3; + } + + if ((cursor->buffer[1] & 0x80) == 0) { + /* Short form: length is 0..127 */ + cursor->hdr_size = 2; + cursor->val_size = cursor->buffer[1]; + } else { + /* Long form: length encoded as [0x80+nbytes][length bytes] */ + uint32_t nbytes = cursor->buffer[1] & 0x7F; + uint32_t offset; + + if (nbytes == 0) { + /* Indefinite length not allowed in DER encoding */ + rc = EINVAL; + goto fail4; + } + if (2 + nbytes > cursor->length) { + /* Header length overflows image buffer */ + rc = EINVAL; + goto fail6; + } + if (nbytes > sizeof (uint32_t)) { + /* Length encoding too big */ + rc = E2BIG; + goto fail5; + } + cursor->hdr_size = 2 + nbytes; + cursor->val_size = 0; + for (offset = 2; offset < cursor->hdr_size; offset++) { + cursor->val_size = + (cursor->val_size << 8) | cursor->buffer[offset]; + } + } + + if ((cursor->hdr_size + cursor->val_size) > cursor->length) { + /* Length overflows image buffer */ + rc = E2BIG; + goto fail7; + } + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Enter nested ASN.1 TLV (contained in value of current TLV) */ +static __checkReturn efx_rc_t +efx_asn1_enter_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + if (ASN1_TAG_IS_PRIM(tag)) { + /* Cannot enter a primitive tag */ + rc = ENOTSUP; + goto fail2; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail3; + } + + /* Limit cursor range to nested TLV */ + cursor->buffer += cursor->hdr_size; + cursor->length = cursor->val_size; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Check that the current ASN.1 TLV matches the given tag and value. + * Advance cursor to next TLV on a successful match. + */ +static __checkReturn efx_rc_t +efx_asn1_match_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __in const void *valp, + __in uint32_t val_size) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + if (cursor->val_size != val_size) { + /* Value size is different */ + rc = EINVAL; + goto fail3; + } + if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) { + /* Value content is different */ + rc = EINVAL; + goto fail4; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Advance cursor to next TLV */ +static __checkReturn efx_rc_t +efx_asn1_skip_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Return pointer to value octets and value size from current TLV */ +static __checkReturn efx_rc_t +efx_asn1_get_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __out uint8_t **valp, + __out uint32_t *val_sizep) +{ + efx_rc_t rc; + + if (cursor == NULL || valp == NULL || val_sizep == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + *valp = cursor->buffer + cursor->hdr_size; + *val_sizep = cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Utility routines for parsing CMS headers (see RFC2315, PKCS#7) + */ + +/* OID 1.2.840.113549.1.7.2 */ +static const uint8_t PKCS7_SignedData[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 }; + +/* OID 1.2.840.113549.1.7.1 */ +static const uint8_t PKCS7_Data[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 }; + +/* SignedData structure version */ +static const uint8_t SignedData_Version[] = +{ 0x03 }; + +/* + * Check for a valid image in signed image format. This uses CMS syntax + * (see RFC2315, PKCS#7) to provide signatures, and certificates required + * to validate the signatures. The encapsulated content is in unsigned image + * format (reflash header, image code, trailer checksum). + */ +static __checkReturn efx_rc_t +efx_check_signed_image_header( + __in void *bufferp, + __in uint32_t buffer_size, + __out uint32_t *content_offsetp, + __out uint32_t *content_lengthp) +{ + efx_asn1_cursor_t cursor; + uint8_t *valp; + uint32_t val_size; + efx_rc_t rc; + + if (content_offsetp == NULL || content_lengthp == NULL) { + rc = EINVAL; + goto fail1; + } + cursor.buffer = (uint8_t *)bufferp; + cursor.length = buffer_size; + + /* ContextInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail2; + + /* ContextInfo.contentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_SignedData, sizeof (PKCS7_SignedData)); + if (rc != 0) + goto fail3; + + /* ContextInfo.content */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail4; + + /* SignedData */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail5; + + /* SignedData.version */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER, + SignedData_Version, sizeof (SignedData_Version)); + if (rc != 0) + goto fail6; + + /* SignedData.digestAlgorithms */ + rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET); + if (rc != 0) + goto fail7; + + /* SignedData.encapContentInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail8; + + /* SignedData.encapContentInfo.econtentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_Data, sizeof (PKCS7_Data)); + if (rc != 0) + goto fail9; + + /* SignedData.encapContentInfo.econtent */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail10; + + /* + * The octet string contains the image header, image code bytes and + * image trailer CRC (same as unsigned image layout). + */ + valp = NULL; + val_size = 0; + rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING, + &valp, &val_size); + if (rc != 0) + goto fail11; + + if ((valp == NULL) || (val_size == 0)) { + rc = EINVAL; + goto fail12; + } + if (valp < (uint8_t *)bufferp) { + rc = EINVAL; + goto fail13; + } + if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) { + rc = EINVAL; + goto fail14; + } + + *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp); + *content_lengthp = val_size; + + return (0); + +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_check_unsigned_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_header_t **headerpp, + __out efx_image_trailer_t **trailerpp) +{ + efx_image_header_t *headerp; + efx_image_trailer_t *trailerp; + uint32_t crc; + efx_rc_t rc; + + EFX_STATIC_ASSERT(sizeof (*headerp) == EFX_IMAGE_HEADER_SIZE); + EFX_STATIC_ASSERT(sizeof (*trailerp) == EFX_IMAGE_TRAILER_SIZE); + + /* Must have at least enough space for required image header fields */ + if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) + + sizeof (headerp->eih_size))) { + rc = ENOSPC; + goto fail1; + } + headerp = (efx_image_header_t *)bufferp; + + /* Buffer must have space for image header, code and image trailer. */ + if (buffer_size < (headerp->eih_size + headerp->eih_code_size + + EFX_IMAGE_TRAILER_SIZE)) { + rc = ENOSPC; + goto fail2; + } + + trailerp = (efx_image_trailer_t *)((uint8_t *)headerp + + headerp->eih_size + headerp->eih_code_size); + + *headerpp = headerp; + *trailerpp = trailerp; + + if (headerp->eih_magic != EFX_IMAGE_HEADER_MAGIC) { + rc = EINVAL; + goto fail3; + } + + /* + * Check image header version is same or higher than lowest required + * version. + */ + if (headerp->eih_version < EFX_IMAGE_HEADER_VERSION) { + rc = EINVAL; + goto fail4; + } + + /* Check CRC from image buffer matches computed CRC. */ + crc = efx_crc32_calculate(0, (uint8_t *)headerp, + (headerp->eih_size + headerp->eih_code_size)); + + if (trailerp->eit_crc != crc) { + rc = EINVAL; + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop) +{ + efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE; + uint32_t image_offset; + uint32_t image_size; + void *imagep; + efx_image_header_t *headerp; + efx_image_trailer_t *trailerp; + efx_rc_t rc; + + EFSYS_ASSERT(infop != NULL); + if (infop == NULL) { + rc = EINVAL; + goto fail1; + } + memset(infop, 0, sizeof (*infop)); + + if (bufferp == NULL || buffer_size == 0) { + rc = EINVAL; + goto fail2; + } + + /* + * Check if the buffer contains an image in signed format, and if so, + * locate the image header. + */ + rc = efx_check_signed_image_header(bufferp, buffer_size, + &image_offset, &image_size); + if (rc == 0) { + /* + * Buffer holds signed image format. Check that the encapsulated + * content contains an unsigned image format header. + */ + format = EFX_IMAGE_FORMAT_SIGNED; + } else { + /* Check if the buffer holds image in unsigned image format */ + format = EFX_IMAGE_FORMAT_UNSIGNED; + image_offset = 0; + image_size = buffer_size; + } + if (image_offset + image_size > buffer_size) { + rc = E2BIG; + goto fail3; + } + imagep = (uint8_t *)bufferp + image_offset; + + /* Check image layout (image header, code, image trailer) */ + rc = efx_check_unsigned_image(imagep, image_size, &headerp, &trailerp); + if (rc != 0) + goto fail4; + + /* + * Signed images are packages consumed directly by the firmware, + * with the exception of MC firmware, where the image must be + * rearranged for booting purposes. + */ + if (format == EFX_IMAGE_FORMAT_SIGNED) { + if (headerp->eih_type != FIRMWARE_TYPE_MCFW) + format = EFX_IMAGE_FORMAT_SIGNED_PACKAGE; + } + + /* Return image details */ + infop->eii_format = format; + infop->eii_imagep = bufferp; + infop->eii_image_size = buffer_size; + infop->eii_headerp = (efx_image_header_t *)imagep; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); + infop->eii_format = EFX_IMAGE_FORMAT_INVALID; + infop->eii_imagep = NULL; + infop->eii_image_size = 0; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out_bcount(buffer_size) + uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp) +{ + signed_image_chunk_hdr_t chunk_hdr; + uint32_t hdr_offset; + struct { + uint32_t offset; + uint32_t size; + } cms_header, image_header, code, image_trailer, signature; + efx_rc_t rc; + + EFSYS_ASSERT((infop != NULL) && (headerpp != NULL)); + + if ((bufferp == NULL) || (buffer_size == 0) || + (infop == NULL) || (headerpp == NULL)) { + /* Invalid arguments */ + rc = EINVAL; + goto fail1; + } + if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) || + (infop->eii_imagep == NULL) || + (infop->eii_headerp == NULL) || + ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) || + (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) || + ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) > + (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) { + /* Invalid image info */ + rc = EINVAL; + goto fail2; + } + + /* Locate image chunks in original signed image */ + cms_header.offset = 0; + cms_header.size = + (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep); + if ((cms_header.size > buffer_size) || + (cms_header.offset > (buffer_size - cms_header.size))) { + rc = EINVAL; + goto fail3; + } + + image_header.offset = cms_header.offset + cms_header.size; + image_header.size = infop->eii_headerp->eih_size; + if ((image_header.size > buffer_size) || + (image_header.offset > (buffer_size - image_header.size))) { + rc = EINVAL; + goto fail4; + } + + code.offset = image_header.offset + image_header.size; + code.size = infop->eii_headerp->eih_code_size; + if ((code.size > buffer_size) || + (code.offset > (buffer_size - code.size))) { + rc = EINVAL; + goto fail5; + } + + image_trailer.offset = code.offset + code.size; + image_trailer.size = EFX_IMAGE_TRAILER_SIZE; + if ((image_trailer.size > buffer_size) || + (image_trailer.offset > (buffer_size - image_trailer.size))) { + rc = EINVAL; + goto fail6; + } + + signature.offset = image_trailer.offset + image_trailer.size; + signature.size = (uint32_t)(infop->eii_image_size - signature.offset); + if ((signature.size > buffer_size) || + (signature.offset > (buffer_size - signature.size))) { + rc = EINVAL; + goto fail7; + } + + EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size + + image_header.size + code.size + image_trailer.size + + signature.size); + + /* BEGIN CSTYLED */ + /* + * Build signed image partition, inserting chunk headers. + * + * Signed Image: Image in NVRAM partition: + * + * +-----------------+ +-----------------+ + * | CMS header | | mcfw.update |<----+ + * +-----------------+ | | | + * | reflash header | +-----------------+ | + * +-----------------+ | chunk header: |-->--|-+ + * | mcfw.update | | REFLASH_TRAILER | | | + * | | +-----------------+ | | + * +-----------------+ +-->| CMS header | | | + * | reflash trailer | | +-----------------+ | | + * +-----------------+ | | chunk header: |->-+ | | + * | signature | | | REFLASH_HEADER | | | | + * +-----------------+ | +-----------------+ | | | + * | | reflash header |<--+ | | + * | +-----------------+ | | + * | | chunk header: |-->--+ | + * | | IMAGE | | + * | +-----------------+ | + * | | reflash trailer |<------+ + * | +-----------------+ + * | | chunk header: | + * | | SIGNATURE |->-+ + * | +-----------------+ | + * | | signature |<--+ + * | +-----------------+ + * | | ...unused... | + * | +-----------------+ + * +-<-| chunk header: | + * >-->| CMS_HEADER | + * +-----------------+ + * + * Each chunk header gives the partition offset and length of the image + * chunk's data. The image chunk data is immediately followed by the + * chunk header for the next chunk. + * + * The data chunk for the firmware code must be at the start of the + * partition (needed for the bootloader). The first chunk header in the + * chain (for the CMS header) is stored at the end of the partition. The + * chain of chunk headers maintains the same logical order of image + * chunks as the original signed image file. This set of constraints + * results in the layout used for the data chunks and chunk headers. + */ + /* END CSTYLED */ + memset(bufferp, 0xFF, buffer_size); + + EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN); + memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN); + + /* + * CMS header + */ + if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) { + rc = ENOSPC; + goto fail8; + } + hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN; + + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER; + chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = cms_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr)); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail9; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + cms_header.offset, + cms_header.size); + + /* + * Image header + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail10; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail11; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + image_header.offset, + image_header.size); + + *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset); + + /* + * Firmware code + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail12; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE; + chunk_hdr.offset = 0; + chunk_hdr.len = code.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail13; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + code.offset, + code.size); + + /* + * Image trailer (CRC) + */ + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_trailer.size; + + hdr_offset = code.size; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail14; + } + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail15; + } + memcpy((uint8_t *)bufferp + chunk_hdr.offset, + infop->eii_imagep + image_trailer.offset, + image_trailer.size); + + /* + * Signature + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail16; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE; + chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = signature.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail17; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + signature.offset, + signature.size); + + return (0); + +fail17: + EFSYS_PROBE(fail17); +fail16: + EFSYS_PROBE(fail16); +fail15: + EFSYS_PROBE(fail15); +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h new file mode 100644 index 000000000..0530f62ba --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_impl.h @@ -0,0 +1,1470 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2015-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EF10_IMPL_H +#define _SYS_EF10_IMPL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define EF10_EVQ_MAXNEVS 32768 +#define EF10_EVQ_MINNEVS 512 + +#define EF10_RXQ_MAXNDESCS 4096 +#define EF10_RXQ_MINNDESCS 512 + +#define EF10_TXQ_MINNDESCS 512 + +#define EF10_EVQ_DESC_SIZE (sizeof (efx_qword_t)) +#define EF10_RXQ_DESC_SIZE (sizeof (efx_qword_t)) +#define EF10_TXQ_DESC_SIZE (sizeof (efx_qword_t)) + +/* Number of hardware EVQ buffers (for compile-time resource dimensions) */ +#define EF10_EVQ_MAXNBUFS (64) + +/* Maximum independent of EFX_BUG35388_WORKAROUND. */ +#define EF10_TXQ_MAXNBUFS 8 + +#if EFSYS_OPT_HUNTINGTON +# if (EF10_EVQ_MAXNBUFS < HUNT_EVQ_MAXNBUFS) +# error "EF10_EVQ_MAXNBUFS too small" +# endif +#endif /* EFSYS_OPT_HUNTINGTON */ +#if EFSYS_OPT_MEDFORD +# if (EF10_EVQ_MAXNBUFS < MEDFORD_EVQ_MAXNBUFS) +# error "EF10_EVQ_MAXNBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +# if (EF10_EVQ_MAXNBUFS < MEDFORD2_EVQ_MAXNBUFS) +# error "EF10_EVQ_MAXNBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD2 */ + +/* Number of hardware PIO buffers (for compile-time resource dimensions) */ +#define EF10_MAX_PIOBUF_NBUFS (16) + +#if EFSYS_OPT_HUNTINGTON +# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_HUNTINGTON */ +#if EFSYS_OPT_MEDFORD +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD2 */ + + + +/* + * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could + * possibly be increased, or the write size reported by newer firmware used + * instead. + */ +#define EF10_NVRAM_CHUNK 0x80 + +/* + * Alignment requirement for value written to RX WPTR: the WPTR must be aligned + * to an 8 descriptor boundary. + */ +#define EF10_RX_WPTR_ALIGN 8 + +/* + * Max byte offset into the packet the TCP header must start for the hardware + * to be able to parse the packet correctly. + */ +#define EF10_TCP_HEADER_OFFSET_LIMIT 208 + +/* Invalid RSS context handle */ +#define EF10_RSS_CONTEXT_INVALID (0xffffffff) + + +/* EV */ + + __checkReturn efx_rc_t +ef10_ev_init( + __in efx_nic_t *enp); + + void +ef10_ev_fini( + __in efx_nic_t *enp); + + __checkReturn efx_rc_t +ef10_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __in efx_evq_t *eep); + + void +ef10_ev_qdestroy( + __in efx_evq_t *eep); + + __checkReturn efx_rc_t +ef10_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count); + + void +ef10_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data); + + __checkReturn efx_rc_t +ef10_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us); + +#if EFSYS_OPT_QSTATS + void +ef10_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); +#endif /* EFSYS_OPT_QSTATS */ + + void +ef10_ev_rxlabel_init( + __in efx_evq_t *eep, + __in efx_rxq_t *erp, + __in unsigned int label, + __in efx_rxq_type_t type); + + void +ef10_ev_rxlabel_fini( + __in efx_evq_t *eep, + __in unsigned int label); + +/* INTR */ + + __checkReturn efx_rc_t +ef10_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in efsys_mem_t *esmp); + + void +ef10_intr_enable( + __in efx_nic_t *enp); + + void +ef10_intr_disable( + __in efx_nic_t *enp); + + void +ef10_intr_disable_unlocked( + __in efx_nic_t *enp); + + __checkReturn efx_rc_t +ef10_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level); + + void +ef10_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *qmaskp); + + void +ef10_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp); + + void +ef10_intr_fatal( + __in efx_nic_t *enp); + void +ef10_intr_fini( + __in efx_nic_t *enp); + +/* NIC */ + +extern __checkReturn efx_rc_t +efx_mcdi_vadaptor_alloc( + __in efx_nic_t *enp, + __in uint32_t port_id); + +extern __checkReturn efx_rc_t +efx_mcdi_vadaptor_free( + __in efx_nic_t *enp, + __in uint32_t port_id); + +extern __checkReturn efx_rc_t +ef10_nic_probe( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_nic_set_drv_limits( + __inout efx_nic_t *enp, + __in efx_drv_limits_t *edlp); + +extern __checkReturn efx_rc_t +ef10_nic_get_vi_pool( + __in efx_nic_t *enp, + __out uint32_t *vi_countp); + +extern __checkReturn efx_rc_t +ef10_nic_get_bar_region( + __in efx_nic_t *enp, + __in efx_nic_region_t region, + __out uint32_t *offsetp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_nic_reset( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_nic_init( + __in efx_nic_t *enp); + +extern __checkReturn boolean_t +ef10_nic_hw_unavailable( + __in efx_nic_t *enp); + +extern void +ef10_nic_set_hw_unavailable( + __in efx_nic_t *enp); + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +ef10_nic_register_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern void +ef10_nic_fini( + __in efx_nic_t *enp); + +extern void +ef10_nic_unprobe( + __in efx_nic_t *enp); + + +/* MAC */ + +extern __checkReturn efx_rc_t +ef10_mac_poll( + __in efx_nic_t *enp, + __out efx_link_mode_t *link_modep); + +extern __checkReturn efx_rc_t +ef10_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp); + +extern __checkReturn efx_rc_t +ef10_mac_addr_set( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_mac_pdu_set( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu); + +extern __checkReturn efx_rc_t +ef10_mac_reconfigure( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_mac_multicast_list_set( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_mac_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss); + +extern void +ef10_mac_filter_default_rxq_clear( + __in efx_nic_t *enp); + +#if EFSYS_OPT_LOOPBACK + +extern __checkReturn efx_rc_t +ef10_mac_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t loopback_type); + +#endif /* EFSYS_OPT_LOOPBACK */ + +#if EFSYS_OPT_MAC_STATS + +extern __checkReturn efx_rc_t +ef10_mac_stats_get_mask( + __in efx_nic_t *enp, + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size); + +extern __checkReturn efx_rc_t +ef10_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, + __inout_opt uint32_t *generationp); + +#endif /* EFSYS_OPT_MAC_STATS */ + + +/* MCDI */ + +#if EFSYS_OPT_MCDI + +extern __checkReturn efx_rc_t +ef10_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *mtp); + +extern void +ef10_mcdi_fini( + __in efx_nic_t *enp); + +extern void +ef10_mcdi_send_request( + __in efx_nic_t *enp, + __in_bcount(hdr_len) void *hdrp, + __in size_t hdr_len, + __in_bcount(sdu_len) void *sdup, + __in size_t sdu_len); + +extern __checkReturn boolean_t +ef10_mcdi_poll_response( + __in efx_nic_t *enp); + +extern void +ef10_mcdi_read_response( + __in efx_nic_t *enp, + __out_bcount(length) void *bufferp, + __in size_t offset, + __in size_t length); + +extern efx_rc_t +ef10_mcdi_poll_reboot( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_mcdi_feature_supported( + __in efx_nic_t *enp, + __in efx_mcdi_feature_id_t id, + __out boolean_t *supportedp); + +extern void +ef10_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *timeoutp); + +#endif /* EFSYS_OPT_MCDI */ + +/* NVRAM */ + +#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD + +extern __checkReturn efx_rc_t +ef10_nvram_buf_read_tlv( + __in efx_nic_t *enp, + __in_bcount(max_seg_size) caddr_t seg_data, + __in size_t max_seg_size, + __in uint32_t tag, + __deref_out_bcount_opt(*sizep) caddr_t *datap, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_nvram_buf_write_tlv( + __inout_bcount(partn_size) caddr_t partn_data, + __in size_t partn_size, + __in uint32_t tag, + __in_bcount(tag_size) caddr_t tag_data, + __in size_t tag_size, + __out size_t *total_lengthp); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_read_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __deref_out_bcount_opt(*sizep) caddr_t *datap, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_write_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_write_segment_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __in_bcount(size) caddr_t data, + __in size_t size, + __in boolean_t all_segments); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_lock( + __in efx_nic_t *enp, + __in uint32_t partn); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_unlock( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *resultp); + +#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ + +#if EFSYS_OPT_NVRAM + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +ef10_nvram_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern __checkReturn efx_rc_t +ef10_nvram_type_to_partn( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *partnp); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_size( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t * enip); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_rw_start( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *chunk_sizep); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_read_mode( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size, + __in uint32_t mode); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_read_backup( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_rw_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_get_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]); + +extern __checkReturn efx_rc_t +ef10_nvram_partn_set_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __in_ecount(4) uint16_t version[4]); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_validate( + __in uint32_t partn, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +extern void +ef10_nvram_buffer_init( + __out_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_create( + __in uint32_t partn_type, + __out_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_find_item_start( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_find_end( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp); + +extern __checkReturn __success(return != B_FALSE) boolean_t +ef10_nvram_buffer_find_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_peek_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *tagp, + __out uint32_t *lengthp, + __out uint32_t *value_offsetp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_get_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out uint32_t *tagp, + __out_bcount_part(value_max_size, *lengthp) + caddr_t valuep, + __in size_t value_max_size, + __out uint32_t *lengthp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_insert_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t tag, + __in_bcount(length) caddr_t valuep, + __in uint32_t length, + __out uint32_t *lengthp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_modify_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t tag, + __in_bcount(length) caddr_t valuep, + __in uint32_t length, + __out uint32_t *lengthp); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_delete_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end); + +extern __checkReturn efx_rc_t +ef10_nvram_buffer_finish( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +#endif /* EFSYS_OPT_NVRAM */ + + +/* PHY */ + +typedef struct ef10_link_state_s { + efx_phy_link_state_t epls; +#if EFSYS_OPT_LOOPBACK + efx_loopback_type_t els_loopback; +#endif + boolean_t els_mac_up; +} ef10_link_state_t; + +extern void +ef10_phy_link_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_link_mode_t *link_modep); + +extern __checkReturn efx_rc_t +ef10_phy_get_link( + __in efx_nic_t *enp, + __out ef10_link_state_t *elsp); + +extern __checkReturn efx_rc_t +ef10_phy_power( + __in efx_nic_t *enp, + __in boolean_t on); + +extern __checkReturn efx_rc_t +ef10_phy_reconfigure( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_phy_verify( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip); + +extern __checkReturn efx_rc_t +ef10_phy_link_state_get( + __in efx_nic_t *enp, + __out efx_phy_link_state_t *eplsp); + +#if EFSYS_OPT_PHY_STATS + +extern __checkReturn efx_rc_t +ef10_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); + +#endif /* EFSYS_OPT_PHY_STATS */ + +#if EFSYS_OPT_BIST + +extern __checkReturn efx_rc_t +ef10_bist_enable_offline( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +extern __checkReturn efx_rc_t +ef10_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt __drv_when(count > 0, __notnull) + uint32_t *value_maskp, + __out_ecount_opt(count) __drv_when(count > 0, __notnull) + unsigned long *valuesp, + __in size_t count); + +extern void +ef10_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +#endif /* EFSYS_OPT_BIST */ + +/* TX */ + +extern __checkReturn efx_rc_t +ef10_tx_init( + __in efx_nic_t *enp); + +extern void +ef10_tx_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __in efx_txq_t *etp, + __out unsigned int *addedp); + +extern void +ef10_tx_qdestroy( + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +ef10_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *ebp, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern void +ef10_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed); + +#if EFSYS_OPT_RX_PACKED_STREAM +extern void +ef10_rx_qpush_ps_credits( + __in efx_rxq_t *erp); + +extern __checkReturn uint8_t * +ef10_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp); +#endif + +extern __checkReturn efx_rc_t +ef10_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns); + +extern __checkReturn efx_rc_t +ef10_tx_qflush( + __in efx_txq_t *etp); + +extern void +ef10_tx_qenable( + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +ef10_tx_qpio_enable( + __in efx_txq_t *etp); + +extern void +ef10_tx_qpio_disable( + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +ef10_tx_qpio_write( + __in efx_txq_t *etp, + __in_ecount(buf_length) uint8_t *buffer, + __in size_t buf_length, + __in size_t pio_buf_offset); + +extern __checkReturn efx_rc_t +ef10_tx_qpio_post( + __in efx_txq_t *etp, + __in size_t pkt_length, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern __checkReturn efx_rc_t +ef10_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(n) efx_desc_t *ed, + __in unsigned int n, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern void +ef10_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp); + +extern void +ef10_tx_qdesc_tso_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint32_t tcp_seq, + __in uint8_t tcp_flags, + __out efx_desc_t *edp); + +extern void +ef10_tx_qdesc_tso2_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, + __in uint32_t tcp_seq, + __in uint16_t tcp_mss, + __out_ecount(count) efx_desc_t *edp, + __in int count); + +extern void +ef10_tx_qdesc_vlantci_create( + __in efx_txq_t *etp, + __in uint16_t vlan_tci, + __out efx_desc_t *edp); + +extern void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); + +#if EFSYS_OPT_QSTATS + +extern void +ef10_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); + +#endif /* EFSYS_OPT_QSTATS */ + +typedef uint32_t efx_piobuf_handle_t; + +#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t)-1) + +extern __checkReturn efx_rc_t +ef10_nic_pio_alloc( + __inout efx_nic_t *enp, + __out uint32_t *bufnump, + __out efx_piobuf_handle_t *handlep, + __out uint32_t *blknump, + __out uint32_t *offsetp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_nic_pio_free( + __inout efx_nic_t *enp, + __in uint32_t bufnum, + __in uint32_t blknum); + +extern __checkReturn efx_rc_t +ef10_nic_pio_link( + __inout efx_nic_t *enp, + __in uint32_t vi_index, + __in efx_piobuf_handle_t handle); + +extern __checkReturn efx_rc_t +ef10_nic_pio_unlink( + __inout efx_nic_t *enp, + __in uint32_t vi_index); + + +/* VPD */ + +#if EFSYS_OPT_VPD + +extern __checkReturn efx_rc_t +ef10_vpd_init( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +ef10_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +ef10_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +ef10_vpd_set( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +ef10_vpd_next( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp); + +extern __checkReturn efx_rc_t +ef10_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern void +ef10_vpd_fini( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_VPD */ + + +/* RX */ + +extern __checkReturn efx_rc_t +ef10_rx_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_RX_SCATTER +extern __checkReturn efx_rc_t +ef10_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size); +#endif /* EFSYS_OPT_RX_SCATTER */ + + +#if EFSYS_OPT_RX_SCALE + +extern __checkReturn efx_rc_t +ef10_rx_scale_context_alloc( + __in efx_nic_t *enp, + __in efx_rx_scale_context_type_t type, + __in uint32_t num_queues, + __out uint32_t *rss_contextp); + +extern __checkReturn efx_rc_t +ef10_rx_scale_context_free( + __in efx_nic_t *enp, + __in uint32_t rss_context); + +extern __checkReturn efx_rc_t +ef10_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert); + +extern __checkReturn efx_rc_t +ef10_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n); + +extern __checkReturn efx_rc_t +ef10_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n); + +extern __checkReturn uint32_t +ef10_rx_prefix_hash( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer); + +#endif /* EFSYS_OPT_RX_SCALE */ + +extern __checkReturn efx_rc_t +ef10_rx_prefix_pktlen( + __in efx_nic_t *enp, + __in uint8_t *buffer, + __out uint16_t *lengthp); + +extern void +ef10_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); + +extern void +ef10_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp); + +extern __checkReturn efx_rc_t +ef10_rx_qflush( + __in efx_rxq_t *erp); + +extern void +ef10_rx_qenable( + __in efx_rxq_t *erp); + +union efx_rxq_type_data_u; + +extern __checkReturn efx_rc_t +ef10_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in_opt const union efx_rxq_type_data_u *type_data, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __in efx_rxq_t *erp); + +extern void +ef10_rx_qdestroy( + __in efx_rxq_t *erp); + +extern void +ef10_rx_fini( + __in efx_nic_t *enp); + +#if EFSYS_OPT_FILTER + +enum efx_filter_replacement_policy_e; + +typedef struct ef10_filter_handle_s { + uint32_t efh_lo; + uint32_t efh_hi; +} ef10_filter_handle_t; + +typedef struct ef10_filter_entry_s { + uintptr_t efe_spec; /* pointer to filter spec plus busy bit */ + ef10_filter_handle_t efe_handle; +} ef10_filter_entry_t; + +/* + * BUSY flag indicates that an update is in progress. + * AUTO_OLD flag is used to mark and sweep MAC packet filters. + */ +#define EFX_EF10_FILTER_FLAG_BUSY 1U +#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U +#define EFX_EF10_FILTER_FLAGS 3U + +/* + * Size of the hash table used by the driver. Doesn't need to be the + * same size as the hardware's table. + */ +#define EFX_EF10_FILTER_TBL_ROWS 8192 + +/* Only need to allow for one directed and one unknown unicast filter */ +#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2 + +/* Allow for the broadcast address to be added to the multicast list */ +#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1) + +/* + * For encapsulated packets, there is one filter each for each combination of + * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or + * multicast inner frames. + */ +#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12 + +typedef struct ef10_filter_table_s { + ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS]; + efx_rxq_t *eft_default_rxq; + boolean_t eft_using_rss; + uint32_t eft_unicst_filter_indexes[ + EFX_EF10_FILTER_UNICAST_FILTERS_MAX]; + uint32_t eft_unicst_filter_count; + uint32_t eft_mulcst_filter_indexes[ + EFX_EF10_FILTER_MULTICAST_FILTERS_MAX]; + uint32_t eft_mulcst_filter_count; + boolean_t eft_using_all_mulcst; + uint32_t eft_encap_filter_indexes[ + EFX_EF10_FILTER_ENCAP_FILTERS_MAX]; + uint32_t eft_encap_filter_count; +} ef10_filter_table_t; + + __checkReturn efx_rc_t +ef10_filter_init( + __in efx_nic_t *enp); + + void +ef10_filter_fini( + __in efx_nic_t *enp); + + __checkReturn efx_rc_t +ef10_filter_restore( + __in efx_nic_t *enp); + + __checkReturn efx_rc_t +ef10_filter_add( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec, + __in enum efx_filter_replacement_policy_e policy); + + __checkReturn efx_rc_t +ef10_filter_delete( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec); + +extern __checkReturn efx_rc_t +ef10_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp); + +extern __checkReturn efx_rc_t +ef10_filter_reconfigure( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *mac_addr, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count); + +extern void +ef10_filter_get_default_rxq( + __in efx_nic_t *enp, + __out efx_rxq_t **erpp, + __out boolean_t *using_rss); + +extern void +ef10_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss); + +extern void +ef10_filter_default_rxq_clear( + __in efx_nic_t *enp); + + +#endif /* EFSYS_OPT_FILTER */ + +extern __checkReturn efx_rc_t +efx_mcdi_get_function_info( + __in efx_nic_t *enp, + __out uint32_t *pfp, + __out_opt uint32_t *vfp); + +extern __checkReturn efx_rc_t +efx_mcdi_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t pf, + __in uint32_t vf, + __out uint32_t *maskp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_port_assignment( + __in efx_nic_t *enp, + __out uint32_t *portp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_port_modes( + __in efx_nic_t *enp, + __out uint32_t *modesp, + __out_opt uint32_t *current_modep, + __out_opt uint32_t *default_modep); + +extern __checkReturn efx_rc_t +ef10_nic_get_port_mode_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_mac_address_pf( + __in efx_nic_t *enp, + __out_ecount_opt(6) uint8_t mac_addrp[6]); + +extern __checkReturn efx_rc_t +efx_mcdi_get_mac_address_vf( + __in efx_nic_t *enp, + __out_ecount_opt(6) uint8_t mac_addrp[6]); + +extern __checkReturn efx_rc_t +efx_mcdi_get_clock( + __in efx_nic_t *enp, + __out uint32_t *sys_freqp, + __out uint32_t *dpcpu_freqp); + + +extern __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_vector_cfg( + __in efx_nic_t *enp, + __out_opt uint32_t *vec_basep, + __out_opt uint32_t *pf_nvecp, + __out_opt uint32_t *vf_nvecp); + +extern __checkReturn efx_rc_t +ef10_get_privilege_mask( + __in efx_nic_t *enp, + __out uint32_t *maskp); + +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + +extern __checkReturn efx_rc_t +efx_mcdi_get_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __out uint32_t *valuep); + +extern __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + +#if EFSYS_OPT_EVB +extern __checkReturn efx_rc_t +ef10_evb_init( + __in efx_nic_t *enp); + +extern void +ef10_evb_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_evb_vswitch_alloc( + __in efx_nic_t *enp, + __out efx_vswitch_id_t *vswitch_idp); + + +extern __checkReturn efx_rc_t +ef10_evb_vswitch_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id); + +extern __checkReturn efx_rc_t +ef10_evb_vport_alloc( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_type_t vport_type, + __in uint16_t vid, + __in boolean_t vlan_restrict, + __out efx_vport_id_t *vport_idp); + + +extern __checkReturn efx_rc_t +ef10_evb_vport_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id); + +extern __checkReturn efx_rc_t +ef10_evb_vport_mac_addr_add( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_ecount(6) uint8_t *addrp); + +extern __checkReturn efx_rc_t +ef10_evb_vport_mac_addr_del( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_ecount(6) uint8_t *addrp); + +extern __checkReturn efx_rc_t +ef10_evb_vadaptor_alloc( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id); + + +extern __checkReturn efx_rc_t +ef10_evb_vadaptor_free( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id); + +extern __checkReturn efx_rc_t +ef10_evb_vport_assign( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in uint32_t vf_index); + +extern __checkReturn efx_rc_t +ef10_evb_vport_reconfigure( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __in_opt uint16_t *vidp, + __in_bcount_opt(EFX_MAC_ADDR_LEN) uint8_t *addrp, + __out_opt boolean_t *fn_resetp); + +extern __checkReturn efx_rc_t +ef10_evb_vport_stats( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in efx_vport_id_t vport_id, + __out efsys_mem_t *esmp); + +#endif /* EFSYS_OPT_EVB */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER +extern __checkReturn efx_rc_t +ef10_proxy_auth_init( + __in efx_nic_t *enp); + +extern void +ef10_proxy_auth_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_proxy_auth_mc_config( + __in efx_nic_t *enp, + __in efsys_mem_t *request_bufferp, + __in efsys_mem_t *response_bufferp, + __in efsys_mem_t *status_bufferp, + __in uint32_t block_cnt, + __in_ecount(op_count) uint32_t *op_listp, + __in size_t op_count); + +extern __checkReturn efx_rc_t +ef10_proxy_auth_disable( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +ef10_proxy_auth_privilege_modify( + __in efx_nic_t *enp, + __in uint32_t fn_group, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in uint32_t add_privileges_mask, + __in uint32_t remove_privileges_mask); + + __checkReturn efx_rc_t +ef10_proxy_auth_set_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t vf_index, + __in uint32_t mask, + __in uint32_t value); + + __checkReturn efx_rc_t +ef10_proxy_auth_complete_request( + __in efx_nic_t *enp, + __in uint32_t fn_index, + __in uint32_t proxy_result, + __in uint32_t handle); + + __checkReturn efx_rc_t +ef10_proxy_auth_exec_cmd( + __in efx_nic_t *enp, + __inout efx_proxy_cmd_params_t *paramsp); + + __checkReturn efx_rc_t +ef10_proxy_auth_get_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __out uint32_t *maskp); + +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + +#if EFSYS_OPT_RX_PACKED_STREAM + +/* Data space per credit in packed stream mode */ +#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16) + +/* + * Received packets are always aligned at this boundary. Also there always + * exists a gap of this size between packets. + * (see SF-112241-TC, 4.5) + */ +#define EFX_RX_PACKED_STREAM_ALIGNMENT 64 + +/* + * Size of a pseudo-header prepended to received packets + * in packed stream mode + */ +#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8 + +/* Minimum space for packet in packed stream mode */ +#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \ + EFX_P2ROUNDUP(size_t, \ + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \ + EFX_MAC_PDU_MIN + \ + EFX_RX_PACKED_STREAM_ALIGNMENT, \ + EFX_RX_PACKED_STREAM_ALIGNMENT) + +/* Maximum number of credits */ +#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127 + +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* + * Maximum DMA length and buffer stride alignment. + * (see SF-119419-TC, 3.2) + */ +#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64 + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EF10_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c new file mode 100644 index 000000000..2e88a8ea0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_intr.c @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFX_OPTS_EF10() + + __checkReturn efx_rc_t +ef10_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in efsys_mem_t *esmp) +{ + _NOTE(ARGUNUSED(enp, type, esmp)) + return (0); +} + + + void +ef10_intr_enable( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + + void +ef10_intr_disable( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + + void +ef10_intr_disable_unlocked( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + +static __checkReturn efx_rc_t +efx_mcdi_trigger_interrupt( + __in efx_nic_t *enp, + __in unsigned int level) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_TRIGGER_INTERRUPT_IN_LEN, + MC_CMD_TRIGGER_INTERRUPT_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (level >= enp->en_nic_cfg.enc_intr_limit) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN; + + MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; + + if (encp->enc_bug41750_workaround) { + /* + * bug 41750: Test interrupts don't work on Greenport + * bug 50084: Test interrupts don't work on VFs + */ + rc = ENOTSUP; + goto fail1; + } + + if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *qmaskp) +{ + efx_dword_t dword; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* Read the queue mask and implicitly acknowledge the interrupt. */ + EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE); + *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + EFSYS_PROBE1(qmask, uint32_t, *qmaskp); + + *fatalp = B_FALSE; +} + + void +ef10_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + _NOTE(ARGUNUSED(enp, message)) + + /* EF10 fatal errors are reported via events */ + *fatalp = B_FALSE; +} + + void +ef10_intr_fatal( + __in efx_nic_t *enp) +{ + /* EF10 fatal errors are reported via events */ + _NOTE(ARGUNUSED(enp)) +} + + void +ef10_intr_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c new file mode 100644 index 000000000..7e89f34a6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mac.c @@ -0,0 +1,1042 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFX_OPTS_EF10() + + __checkReturn efx_rc_t +ef10_mac_poll( + __in efx_nic_t *enp, + __out efx_link_mode_t *link_modep) +{ + efx_port_t *epp = &(enp->en_port); + ef10_link_state_t els; + efx_rc_t rc; + + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail1; + + epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask; + epp->ep_fcntl = els.epls.epls_fcntl; + + *link_modep = els.epls.epls_link_mode; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + *link_modep = EFX_LINK_UNKNOWN; + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp) +{ + ef10_link_state_t els; + efx_rc_t rc; + + /* + * Because EF10 doesn't *require* polling, we can't rely on + * ef10_mac_poll() being executed to populate epp->ep_mac_up. + */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail1; + + *mac_upp = els.els_mac_up; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the + * MAC address; the address field in MC_CMD_SET_MAC has no + * effect. + * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and + * the port to have no filters or queues active. + */ +static __checkReturn efx_rc_t +efx_mcdi_vadapter_set_mac( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_SET_MAC_IN_LEN, + MC_CMD_VADAPTOR_SET_MAC_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, + enp->en_vport_id); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR), + epp->ep_mac_addr); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_addr_set( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) { + if (rc != ENOTSUP) + goto fail1; + + /* + * Fallback for older Huntington firmware without Vadapter + * support. + */ + if ((rc = ef10_mac_reconfigure(enp)) != 0) + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_mtu_set( + __in efx_nic_t *enp, + __in uint32_t mtu) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN, + MC_CMD_SET_MAC_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; + + /* Only configure the MTU in this call to MC_CMD_SET_MAC */ + MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu); + MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL, + SET_MAC_EXT_IN_CFG_MTU, 1); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_mtu_get( + __in efx_nic_t *enp, + __out size_t *mtu) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN, + MC_CMD_SET_MAC_V2_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN; + + /* + * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the + * MTU. This should always be supported on Medford, but it is not + * supported on older Huntington firmware. + */ + MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) { + rc = EMSGSIZE; + goto fail2; + } + + *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_pdu_set( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; + + if (encp->enc_enhanced_set_mac_supported) { + if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0) + goto fail1; + } else { + /* + * Fallback for older Huntington firmware, which always + * configure all of the parameters to MC_CMD_SET_MAC. This isn't + * suitable for setting the MTU on unpriviliged functions. + */ + if ((rc = ef10_mac_reconfigure(enp)) != 0) + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +__checkReturn efx_rc_t +ef10_mac_reconfigure( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_IN_LEN, + MC_CMD_SET_MAC_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu); + MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR), + epp->ep_mac_addr); + + /* + * Note: The Huntington MAC does not support REJECT_BRDCST. + * The REJECT_UNCST flag will also prevent multicast traffic + * from reaching the filters. As Huntington filters drop any + * traffic that does not match a filter it is ok to leave the + * MAC running in promiscuous mode. See bug41141. + * + * FIXME: Does REJECT_UNCST behave the same way on Medford? + */ + MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, 0, + SET_MAC_IN_REJECT_BRDCST, 0); + + /* + * Flow control, whether it is auto-negotiated or not, + * is set via the PHY advertised capabilities. When set to + * automatic the MAC will use the PHY settings to determine + * the flow control settings. + */ + MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO); + + /* Do not include the Ethernet frame checksum in RX packets */ + MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS, + SET_MAC_IN_FLAG_INCLUDE_FCS, 0); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + /* + * Unprivileged functions cannot control link state, + * but still need to configure filters. + */ + if (req.emr_rc != EACCES) { + rc = req.emr_rc; + goto fail1; + } + } + + /* + * Apply the filters for the MAC configuration. + * If the NIC isn't ready to accept filters this may + * return success without setting anything. + */ + rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, + epp->ep_all_unicst, epp->ep_mulcst, + epp->ep_all_mulcst, epp->ep_brdcst, + epp->ep_mulcst_addr_list, + epp->ep_mulcst_addr_count); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_multicast_list_set( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if ((rc = emop->emo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mac_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss) +{ + efx_port_t *epp = &(enp->en_port); + efx_rxq_t *old_rxq; + boolean_t old_using_rss; + efx_rc_t rc; + + ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss); + + ef10_filter_default_rxq_set(enp, erp, using_rss); + + rc = efx_filter_reconfigure(enp, epp->ep_mac_addr, + epp->ep_all_unicst, epp->ep_mulcst, + epp->ep_all_mulcst, epp->ep_brdcst, + epp->ep_mulcst_addr_list, + epp->ep_mulcst_addr_count); + + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss); + + return (rc); +} + + void +ef10_mac_filter_default_rxq_clear( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + + ef10_filter_default_rxq_clear(enp); + + (void) efx_filter_reconfigure(enp, epp->ep_mac_addr, + epp->ep_all_unicst, epp->ep_mulcst, + epp->ep_all_mulcst, epp->ep_brdcst, + epp->ep_mulcst_addr_list, + epp->ep_mulcst_addr_count); +} + + +#if EFSYS_OPT_LOOPBACK + + __checkReturn efx_rc_t +ef10_mac_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t loopback_type) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_loopback_type_t old_loopback_type; + efx_link_mode_t old_loopback_link_mode; + efx_rc_t rc; + + /* The PHY object handles this on EF10 */ + old_loopback_type = epp->ep_loopback_type; + old_loopback_link_mode = epp->ep_loopback_link_mode; + epp->ep_loopback_type = loopback_type; + epp->ep_loopback_link_mode = link_mode; + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + epp->ep_loopback_type = old_loopback_type; + epp->ep_loopback_link_mode = old_loopback_link_mode; + + return (rc); +} + +#endif /* EFSYS_OPT_LOOPBACK */ + +#if EFSYS_OPT_MAC_STATS + + __checkReturn efx_rc_t +ef10_mac_stats_get_mask( + __in efx_nic_t *enp, + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size) +{ + const struct efx_mac_stats_range ef10_common[] = { + { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS }, + { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS }, + { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS }, + { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS }, + }; + const struct efx_mac_stats_range ef10_tx_size_bins[] = { + { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS }, + }; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_port_t *epp = &(enp->en_port); + efx_rc_t rc; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0) + goto fail1; + + if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { + const struct efx_mac_stats_range ef10_40g_extra[] = { + { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0) + goto fail2; + + if (encp->enc_mac_stats_40g_tx_size_bins) { + if ((rc = efx_mac_stats_mask_add_ranges(maskp, + mask_size, ef10_tx_size_bins, + EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0) + goto fail3; + } + } else { + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0) + goto fail4; + } + + if (encp->enc_pm_and_rxdp_counters) { + const struct efx_mac_stats_range ef10_pm_and_rxdp[] = { + { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0) + goto fail5; + } + + if (encp->enc_datapath_cap_evb) { + const struct efx_mac_stats_range ef10_vadaptor[] = { + { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, + EFX_MAC_VADAPTER_TX_OVERFLOW }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0) + goto fail6; + } + + if (encp->enc_fec_counters) { + const struct efx_mac_stats_range ef10_fec[] = { + { EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0) + goto fail7; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) { + const struct efx_mac_stats_range ef10_rxdp_sdt[] = { + { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0) + goto fail8; + } + + if (encp->enc_hlb_counters) { + const struct efx_mac_stats_range ef10_hlb[] = { + { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0) + goto fail9; + } + + return (0); + +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \ + EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp) + + + __checkReturn efx_rc_t +ef10_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, + __inout_opt uint32_t *generationp) +{ + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_qword_t generation_start; + efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; + + /* + * The MAC_STATS contain start and end generation counters used to + * detect when the DMA buffer has been updated during stats decode. + * All stats counters are 64bit unsigned values. + * + * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters. + * The generation end counter is at index MC_CMD_MAC_GENERATION_END + * (same as MC_CMD_MAC_NSTATS-1). + * + * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from + * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters. + * + * Firmware writes the generation end counter as the last counter in the + * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only + * correct for legacy Siena-compatible MAC stats. + */ + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } + + /* Read END first so we don't race with the MC */ + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); + EFSYS_MEM_READ_BARRIER(); + + /* TX */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value); + EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value); + + /* RX */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]), + &(value.eq_dword[1])); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]), + &(value.eq_dword[1])); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]), + &(value.eq_dword[1])); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]), + &(value.eq_dword[1])); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); + + /* Packet memory (EF10 only) */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value); + + /* RX datapath */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value); + + + /* VADAPTER RX */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value); + + /* VADAPTER TX */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value); + + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2) + goto done; + + /* FEC */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]), + &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3) + goto done; + + /* CTPIO exceptions */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value); + + /* CTPIO per-port stats */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4) + goto done; + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]), + &value); + + /* Head-of-line blocking */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value); + +done: + /* Read START generation counter */ + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + EFSYS_MEM_READ_BARRIER(); + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, + &generation_start); + + /* Check that we didn't read the stats in the middle of a DMA */ + /* Not a good enough check ? */ + if (memcmp(&generation_start, &generation_end, + sizeof (generation_start))) + return (EAGAIN); + + if (generationp) + *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MAC_STATS */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c new file mode 100644 index 000000000..9e8a0c1f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_mcdi.c @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFX_OPTS_EF10() + +#if EFSYS_OPT_MCDI + +#ifndef WITH_MCDI_V2 +#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands." +#endif + + + __checkReturn efx_rc_t +ef10_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *emtp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efsys_mem_t *esmp = emtp->emt_dma_mem; + efx_dword_t dword; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA); + + /* + * All EF10 firmware supports MCDIv2 and MCDIv1. + * Medford BootROM supports MCDIv2 and MCDIv1. + * Huntington BootROM supports MCDIv1 only. + */ + emip->emi_max_version = 2; + + /* A host DMA buffer is required for EF10 MCDI */ + if (esmp == NULL) { + rc = EINVAL; + goto fail1; + } + + /* + * Ensure that the MC doorbell is in a known state before issuing MCDI + * commands. The recovery algorithm requires that the MC command buffer + * must be 256 byte aligned. See bug24769. + */ + if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) { + rc = EINVAL; + goto fail2; + } + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1); + EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE); + + /* Save initial MC reboot status */ + (void) ef10_mcdi_poll_reboot(enp); + + /* Start a new epoch (allow fresh MCDI requests to succeed) */ + efx_mcdi_new_epoch(enp); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_mcdi_fini( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + + emip->emi_new_epoch = B_FALSE; +} + +/* + * In older firmware all commands are processed in a single thread, so a long + * running command for one PCIe function can block processing for another + * function (see bug 61269). + * + * In newer firmware that supports multithreaded MCDI processing, we can extend + * the timeout for long-running requests which we know firmware may choose to + * process in a background thread. + */ +#define EF10_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000) +#define EF10_MCDI_CMD_LONG_TIMEOUT_US (60 * 1000 * 1000) + + void +ef10_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *timeoutp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + + switch (emrp->emr_cmd) { + case MC_CMD_POLL_BIST: + case MC_CMD_NVRAM_ERASE: + case MC_CMD_LICENSING_V3: + case MC_CMD_NVRAM_UPDATE_FINISH: + if (encp->enc_nvram_update_verify_result_supported != B_FALSE) { + /* + * Potentially longer running commands, which firmware + * may choose to process in a background thread. + */ + *timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US; + break; + } + /* FALLTHRU */ + default: + *timeoutp = EF10_MCDI_CMD_TIMEOUT_US; + break; + } +} + + void +ef10_mcdi_send_request( + __in efx_nic_t *enp, + __in_bcount(hdr_len) void *hdrp, + __in size_t hdr_len, + __in_bcount(sdu_len) void *sdup, + __in size_t sdu_len) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efsys_mem_t *esmp = emtp->emt_dma_mem; + efx_dword_t dword; + unsigned int pos; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* Write the header */ + for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) { + dword = *(efx_dword_t *)((uint8_t *)hdrp + pos); + EFSYS_MEM_WRITED(esmp, pos, &dword); + } + + /* Write the payload */ + for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) { + dword = *(efx_dword_t *)((uint8_t *)sdup + pos); + EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword); + } + + /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */ + EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len); + EFSYS_PIO_WRITE_BARRIER(); + + /* Ring the doorbell to post the command DMA address to the MC */ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, + EFSYS_MEM_ADDR(esmp) >> 32); + EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE); + + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, + EFSYS_MEM_ADDR(esmp) & 0xffffffff); + EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE); +} + + __checkReturn boolean_t +ef10_mcdi_poll_response( + __in efx_nic_t *enp) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efsys_mem_t *esmp = emtp->emt_dma_mem; + efx_dword_t hdr; + + EFSYS_MEM_READD(esmp, 0, &hdr); + EFSYS_MEM_READ_BARRIER(); + + return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE); +} + + void +ef10_mcdi_read_response( + __in efx_nic_t *enp, + __out_bcount(length) void *bufferp, + __in size_t offset, + __in size_t length) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efsys_mem_t *esmp = emtp->emt_dma_mem; + unsigned int pos = 0; + efx_dword_t data; + size_t remaining = length; + + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); + + EFSYS_MEM_READD(esmp, offset + pos, &data); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; + } +} + + efx_rc_t +ef10_mcdi_poll_reboot( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_dword_t dword; + uint32_t old_status; + uint32_t new_status; + efx_rc_t rc; + + old_status = emip->emi_mc_reboot_status; + + /* Update MC reboot status word */ + EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE); + new_status = dword.ed_u32[0]; + + /* MC has rebooted if the value has changed */ + if (new_status != old_status) { + emip->emi_mc_reboot_status = new_status; + + /* + * FIXME: Ignore detected MC REBOOT for now. + * + * The Siena support for checking for MC reboot from status + * flags is broken - see comments in siena_mcdi_poll_reboot(). + * As the generic MCDI code is shared the EF10 reboot + * detection suffers similar problems. + * + * Do not report an error when the boot status changes until + * this can be handled by common code drivers (and reworked to + * support Siena too). + */ + _NOTE(CONSTANTCONDITION) + if (B_FALSE) { + rc = EIO; + goto fail1; + } + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_mcdi_feature_supported( + __in efx_nic_t *enp, + __in efx_mcdi_feature_id_t id, + __out boolean_t *supportedp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t privilege_mask = encp->enc_privilege_mask; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* + * Use privilege mask state at MCDI attach. + */ + + switch (id) { + case EFX_MCDI_FEATURE_FW_UPDATE: + /* + * Admin privilege must be used prior to introduction of + * specific flag. + */ + *supportedp = + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN); + break; + case EFX_MCDI_FEATURE_LINK_CONTROL: + /* + * Admin privilege used prior to introduction of + * specific flag. + */ + *supportedp = + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) || + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN); + break; + case EFX_MCDI_FEATURE_MACADDR_CHANGE: + /* + * Admin privilege must be used prior to introduction of + * mac spoofing privilege (at v4.6), which is used up to + * introduction of change mac spoofing privilege (at v4.7) + */ + *supportedp = + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) || + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) || + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN); + break; + case EFX_MCDI_FEATURE_MAC_SPOOFING: + /* + * Admin privilege must be used prior to introduction of + * mac spoofing privilege (at v4.6), which is used up to + * introduction of mac spoofing TX privilege (at v4.7) + */ + *supportedp = + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) || + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) || + EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN); + break; + default: + rc = ENOTSUP; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MCDI */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c new file mode 100644 index 000000000..34fa45e8c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nic.c @@ -0,0 +1,2672 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#if EFSYS_OPT_MON_MCDI +#include "mcdi_mon.h" +#endif + +#if EFX_OPTS_EF10() + +#include "ef10_tlv_layout.h" + + __checkReturn efx_rc_t +efx_mcdi_get_port_assignment( + __in efx_nic_t *enp, + __out uint32_t *portp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN, + MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_port_modes( + __in efx_nic_t *enp, + __out uint32_t *modesp, + __out_opt uint32_t *current_modep, + __out_opt uint32_t *default_modep) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN, + MC_CMD_GET_PORT_MODES_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + req.emr_cmd = MC_CMD_GET_PORT_MODES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + /* + * Require only Modes and DefaultMode fields, unless the current mode + * was requested (CurrentMode field was added for Medford). + */ + if (req.emr_out_length_used < + MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) { + rc = EMSGSIZE; + goto fail2; + } + if ((current_modep != NULL) && (req.emr_out_length_used < + MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) { + rc = EMSGSIZE; + goto fail3; + } + + *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES); + + if (current_modep != NULL) { + *current_modep = MCDI_OUT_DWORD(req, + GET_PORT_MODES_OUT_CURRENT_MODE); + } + + if (default_modep != NULL) { + *default_modep = MCDI_OUT_DWORD(req, + GET_PORT_MODES_OUT_DEFAULT_MODE); + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_get_port_mode_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t port_modes; + uint32_t current_mode; + efx_port_t *epp = &(enp->en_port); + + uint32_t single_lane; + uint32_t dual_lane; + uint32_t quad_lane; + uint32_t bandwidth; + efx_rc_t rc; + + if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, + ¤t_mode, NULL)) != 0) { + /* No port mode info available. */ + goto fail1; + } + + if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX)) + single_lane = 25000; + else + single_lane = 10000; + + if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX)) + dual_lane = 50000; + else + dual_lane = 20000; + + if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX)) + quad_lane = 100000; + else + quad_lane = 40000; + + switch (current_mode) { + case TLV_PORT_MODE_1x1_NA: /* mode 0 */ + bandwidth = single_lane; + break; + case TLV_PORT_MODE_1x2_NA: /* mode 10 */ + case TLV_PORT_MODE_NA_1x2: /* mode 11 */ + bandwidth = dual_lane; + break; + case TLV_PORT_MODE_1x1_1x1: /* mode 2 */ + bandwidth = single_lane + single_lane; + break; + case TLV_PORT_MODE_4x1_NA: /* mode 4 */ + case TLV_PORT_MODE_NA_4x1: /* mode 8 */ + bandwidth = 4 * single_lane; + break; + case TLV_PORT_MODE_2x1_2x1: /* mode 5 */ + bandwidth = (2 * single_lane) + (2 * single_lane); + break; + case TLV_PORT_MODE_1x2_1x2: /* mode 12 */ + bandwidth = dual_lane + dual_lane; + break; + case TLV_PORT_MODE_1x2_2x1: /* mode 17 */ + case TLV_PORT_MODE_2x1_1x2: /* mode 18 */ + bandwidth = dual_lane + (2 * single_lane); + break; + /* Legacy Medford-only mode. Do not use (see bug63270) */ + case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */ + bandwidth = 4 * single_lane; + break; + case TLV_PORT_MODE_1x4_NA: /* mode 1 */ + case TLV_PORT_MODE_NA_1x4: /* mode 22 */ + bandwidth = quad_lane; + break; + case TLV_PORT_MODE_2x2_NA: /* mode 13 */ + case TLV_PORT_MODE_NA_2x2: /* mode 14 */ + bandwidth = 2 * dual_lane; + break; + case TLV_PORT_MODE_1x4_2x1: /* mode 6 */ + case TLV_PORT_MODE_2x1_1x4: /* mode 7 */ + bandwidth = quad_lane + (2 * single_lane); + break; + case TLV_PORT_MODE_1x4_1x2: /* mode 15 */ + case TLV_PORT_MODE_1x2_1x4: /* mode 16 */ + bandwidth = quad_lane + dual_lane; + break; + case TLV_PORT_MODE_1x4_1x4: /* mode 3 */ + bandwidth = quad_lane + quad_lane; + break; + default: + rc = EINVAL; + goto fail2; + } + + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_vadaptor_alloc( + __in efx_nic_t *enp, + __in uint32_t port_id) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN, + MC_CMD_VADAPTOR_ALLOC_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VADAPTOR_ALLOC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS, + VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED, + enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_vadaptor_free( + __in efx_nic_t *enp, + __in uint32_t port_id) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN, + MC_CMD_VADAPTOR_FREE_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_VADAPTOR_FREE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_mac_address_pf( + __in efx_nic_t *enp, + __out_ecount_opt(6) uint8_t mac_addrp[6]) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN, + MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) { + rc = ENOENT; + goto fail3; + } + + if (mac_addrp != NULL) { + uint8_t *addrp; + + addrp = MCDI_OUT2(req, uint8_t, + GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE); + + EFX_MAC_ADDR_COPY(mac_addrp, addrp); + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_mac_address_vf( + __in efx_nic_t *enp, + __out_ecount_opt(6) uint8_t mac_addrp[6]) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN, + MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, + EVB_PORT_ID_ASSIGNED); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < + MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + + if (MCDI_OUT_DWORD(req, + VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) { + rc = ENOENT; + goto fail3; + } + + if (mac_addrp != NULL) { + uint8_t *addrp; + + addrp = MCDI_OUT2(req, uint8_t, + VPORT_GET_MAC_ADDRESSES_OUT_MACADDR); + + EFX_MAC_ADDR_COPY(mac_addrp, addrp); + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_clock( + __in efx_nic_t *enp, + __out uint32_t *sys_freqp, + __out uint32_t *dpcpu_freqp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN, + MC_CMD_GET_CLOCK_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + req.emr_cmd = MC_CMD_GET_CLOCK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ); + if (*sys_freqp == 0) { + rc = EINVAL; + goto fail3; + } + *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ); + if (*dpcpu_freqp == 0) { + rc = EINVAL; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN, + MC_CMD_GET_RXDP_CONFIG_OUT_LEN); + uint32_t end_padding; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; + + efx_mcdi_execute(enp, &req); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { + /* RX DMA end padding is disabled */ + end_padding = 0; + } else { + switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: + end_padding = 64; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: + end_padding = 128; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: + end_padding = 256; + break; + default: + rc = ENOTSUP; + goto fail2; + } + } + + *end_paddingp = end_padding; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_vector_cfg( + __in efx_nic_t *enp, + __out_opt uint32_t *vec_basep, + __out_opt uint32_t *pf_nvecp, + __out_opt uint32_t *vf_nvecp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN, + MC_CMD_GET_VECTOR_CFG_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_VECTOR_CFG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + if (vec_basep != NULL) + *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE); + if (pf_nvecp != NULL) + *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF); + if (vf_nvecp != NULL) + *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_alloc_vis( + __in efx_nic_t *enp, + __in uint32_t min_vi_count, + __in uint32_t max_vi_count, + __out uint32_t *vi_basep, + __out uint32_t *vi_countp, + __out uint32_t *vi_shiftp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN, + MC_CMD_ALLOC_VIS_EXT_OUT_LEN); + efx_rc_t rc; + + if (vi_countp == NULL) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_ALLOC_VIS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN; + + MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count); + MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + + *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE); + *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT); + + /* Report VI_SHIFT if available (always zero for Huntington) */ + if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN) + *vi_shiftp = 0; + else + *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +static __checkReturn efx_rc_t +efx_mcdi_free_vis( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + efx_rc_t rc; + + EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0); + EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0); + + req.emr_cmd = MC_CMD_FREE_VIS; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + efx_mcdi_execute_quiet(enp, &req); + + /* Ignore ELREADY (no allocated VIs, so nothing to free) */ + if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +static __checkReturn efx_rc_t +efx_mcdi_alloc_piobuf( + __in efx_nic_t *enp, + __out efx_piobuf_handle_t *handlep) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN, + MC_CMD_ALLOC_PIOBUF_OUT_LEN); + efx_rc_t rc; + + if (handlep == NULL) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_ALLOC_PIOBUF; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + + *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_free_piobuf( + __in efx_nic_t *enp, + __in efx_piobuf_handle_t handle) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN, + MC_CMD_FREE_PIOBUF_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FREE_PIOBUF; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN; + + MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_link_piobuf( + __in efx_nic_t *enp, + __in uint32_t vi_index, + __in efx_piobuf_handle_t handle) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN, + MC_CMD_LINK_PIOBUF_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_LINK_PIOBUF; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN; + + MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle); + MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_unlink_piobuf( + __in efx_nic_t *enp, + __in uint32_t vi_index) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN, + MC_CMD_UNLINK_PIOBUF_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_UNLINK_PIOBUF; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN; + + MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +ef10_nic_alloc_piobufs( + __in efx_nic_t *enp, + __in uint32_t max_piobuf_count) +{ + efx_piobuf_handle_t *handlep; + unsigned int i; + + EFSYS_ASSERT3U(max_piobuf_count, <=, + EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle)); + + enp->en_arch.ef10.ena_piobuf_count = 0; + + for (i = 0; i < max_piobuf_count; i++) { + handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; + + if (efx_mcdi_alloc_piobuf(enp, handlep) != 0) + goto fail1; + + enp->en_arch.ef10.ena_pio_alloc_map[i] = 0; + enp->en_arch.ef10.ena_piobuf_count++; + } + + return; + +fail1: + for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { + handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; + + (void) efx_mcdi_free_piobuf(enp, *handlep); + *handlep = EFX_PIOBUF_HANDLE_INVALID; + } + enp->en_arch.ef10.ena_piobuf_count = 0; +} + + +static void +ef10_nic_free_piobufs( + __in efx_nic_t *enp) +{ + efx_piobuf_handle_t *handlep; + unsigned int i; + + for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { + handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; + + (void) efx_mcdi_free_piobuf(enp, *handlep); + *handlep = EFX_PIOBUF_HANDLE_INVALID; + } + enp->en_arch.ef10.ena_piobuf_count = 0; +} + +/* Sub-allocate a block from a piobuf */ + __checkReturn efx_rc_t +ef10_nic_pio_alloc( + __inout efx_nic_t *enp, + __out uint32_t *bufnump, + __out efx_piobuf_handle_t *handlep, + __out uint32_t *blknump, + __out uint32_t *offsetp, + __out size_t *sizep) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_drv_cfg_t *edcp = &enp->en_drv_cfg; + uint32_t blk_per_buf; + uint32_t buf, blk; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + EFSYS_ASSERT(bufnump); + EFSYS_ASSERT(handlep); + EFSYS_ASSERT(blknump); + EFSYS_ASSERT(offsetp); + EFSYS_ASSERT(sizep); + + if ((edcp->edc_pio_alloc_size == 0) || + (enp->en_arch.ef10.ena_piobuf_count == 0)) { + rc = ENOMEM; + goto fail1; + } + blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size; + + for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) { + uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf]; + + if (~(*map) == 0) + continue; + + EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map))); + for (blk = 0; blk < blk_per_buf; blk++) { + if ((*map & (1u << blk)) == 0) { + *map |= (1u << blk); + goto done; + } + } + } + rc = ENOMEM; + goto fail2; + +done: + *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf]; + *bufnump = buf; + *blknump = blk; + *sizep = edcp->edc_pio_alloc_size; + *offsetp = blk * (*sizep); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Free a piobuf sub-allocated block */ + __checkReturn efx_rc_t +ef10_nic_pio_free( + __inout efx_nic_t *enp, + __in uint32_t bufnum, + __in uint32_t blknum) +{ + uint32_t *map; + efx_rc_t rc; + + if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) || + (blknum >= (8 * sizeof (*map)))) { + rc = EINVAL; + goto fail1; + } + + map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum]; + if ((*map & (1u << blknum)) == 0) { + rc = ENOENT; + goto fail2; + } + *map &= ~(1u << blknum); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_pio_link( + __inout efx_nic_t *enp, + __in uint32_t vi_index, + __in efx_piobuf_handle_t handle) +{ + return (efx_mcdi_link_piobuf(enp, vi_index, handle)); +} + + __checkReturn efx_rc_t +ef10_nic_pio_unlink( + __inout efx_nic_t *enp, + __in uint32_t vi_index) +{ + return (efx_mcdi_unlink_piobuf(enp, vi_index)); +} + +static __checkReturn efx_rc_t +ef10_mcdi_get_pf_count( + __in efx_nic_t *enp, + __out uint32_t *pf_countp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN, + MC_CMD_GET_PF_COUNT_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_PF_COUNT; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *pf_countp = *MCDI_OUT(req, uint8_t, + MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST); + + EFSYS_ASSERT(*pf_countp != 0); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_get_datapath_caps( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, + MC_CMD_GET_CAPABILITIES_V5_OUT_LEN); + efx_rc_t rc; + + if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) + goto fail1; + + + req.emr_cmd = MC_CMD_GET_CAPABILITIES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + +#define CAP_FLAGS1(_req, _flag) \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))) + +#define CAP_FLAGS2(_req, _flag) \ + (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))) + + /* + * Huntington RXDP firmware inserts a 0 or 14 byte prefix. + * We only support the 14 byte prefix here. + */ + if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) { + rc = ENOTSUP; + goto fail4; + } + encp->enc_rx_prefix_size = 14; + +#if EFSYS_OPT_RX_SCALE + /* Check if the firmware supports additional RSS modes */ + if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES)) + encp->enc_rx_scale_additional_modes_supported = B_TRUE; + else + encp->enc_rx_scale_additional_modes_supported = B_FALSE; +#endif /* EFSYS_OPT_RX_SCALE */ + + /* Check if the firmware supports TSO */ + if (CAP_FLAGS1(req, TX_TSO)) + encp->enc_fw_assisted_tso_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_enabled = B_FALSE; + + /* Check if the firmware supports FATSOv2 */ + if (CAP_FLAGS2(req, TX_TSO_V2)) { + encp->enc_fw_assisted_tso_v2_enabled = B_TRUE; + encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); + } else { + encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; + encp->enc_fw_assisted_tso_v2_n_contexts = 0; + } + + /* Check if the firmware supports FATSOv2 encap */ + if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP)) + encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE; + + /* Check if the firmware has vadapter/vport/vswitch support */ + if (CAP_FLAGS1(req, EVB)) + encp->enc_datapath_cap_evb = B_TRUE; + else + encp->enc_datapath_cap_evb = B_FALSE; + + /* Check if the firmware supports vport reconfiguration */ + if (CAP_FLAGS1(req, VPORT_RECONFIGURE)) + encp->enc_vport_reconfigure_supported = B_TRUE; + else + encp->enc_vport_reconfigure_supported = B_FALSE; + + /* Check if the firmware supports VLAN insertion */ + if (CAP_FLAGS1(req, TX_VLAN_INSERTION)) + encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; + else + encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; + + /* Check if the firmware supports RX event batching */ + if (CAP_FLAGS1(req, RX_BATCHING)) + encp->enc_rx_batching_enabled = B_TRUE; + else + encp->enc_rx_batching_enabled = B_FALSE; + + /* + * Even if batching isn't reported as supported, we may still get + * batched events (see bug61153). + */ + encp->enc_rx_batch_max = 16; + + /* Check if the firmware supports disabling scatter on RXQs */ + if (CAP_FLAGS1(req, RX_DISABLE_SCATTER)) + encp->enc_rx_disable_scatter_supported = B_TRUE; + else + encp->enc_rx_disable_scatter_supported = B_FALSE; + + /* Check if the firmware supports packed stream mode */ + if (CAP_FLAGS1(req, RX_PACKED_STREAM)) + encp->enc_rx_packed_stream_supported = B_TRUE; + else + encp->enc_rx_packed_stream_supported = B_FALSE; + + /* + * Check if the firmware supports configurable buffer sizes + * for packed stream mode (otherwise buffer size is 1Mbyte) + */ + if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS)) + encp->enc_rx_var_packed_stream_supported = B_TRUE; + else + encp->enc_rx_var_packed_stream_supported = B_FALSE; + + /* Check if the firmware supports equal stride super-buffer mode */ + if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER)) + encp->enc_rx_es_super_buffer_supported = B_TRUE; + else + encp->enc_rx_es_super_buffer_supported = B_FALSE; + + /* Check if the firmware supports FW subvariant w/o Tx checksumming */ + if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM)) + encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE; + else + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; + + /* Check if the firmware supports set mac with running filters */ + if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED)) + encp->enc_allow_set_mac_with_installed_filters = B_TRUE; + else + encp->enc_allow_set_mac_with_installed_filters = B_FALSE; + + /* + * Check if firmware supports the extended MC_CMD_SET_MAC, which allows + * specifying which parameters to configure. + */ + if (CAP_FLAGS1(req, SET_MAC_ENHANCED)) + encp->enc_enhanced_set_mac_supported = B_TRUE; + else + encp->enc_enhanced_set_mac_supported = B_FALSE; + + /* + * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows + * us to let the firmware choose the settings to use on an EVQ. + */ + if (CAP_FLAGS2(req, INIT_EVQ_V2)) + encp->enc_init_evq_v2_supported = B_TRUE; + else + encp->enc_init_evq_v2_supported = B_FALSE; + + /* + * Check if the NO_CONT_EV mode for RX events is supported. + */ + if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV)) + encp->enc_no_cont_ev_mode_supported = B_TRUE; + else + encp->enc_no_cont_ev_mode_supported = B_FALSE; + + /* + * Check if buffer size may and must be specified on INIT_RXQ. + * It may be always specified to efx_rx_qcreate(), but will be + * just kept libefx internal if MCDI does not support it. + */ + if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE)) + encp->enc_init_rxq_with_buffer_size = B_TRUE; + else + encp->enc_init_rxq_with_buffer_size = B_FALSE; + + /* + * Check if firmware-verified NVRAM updates must be used. + * + * The firmware trusted installer requires all NVRAM updates to use + * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update) + * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated + * partition and report the result). + */ + if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT)) + encp->enc_nvram_update_verify_result_supported = B_TRUE; + else + encp->enc_nvram_update_verify_result_supported = B_FALSE; + + if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT)) + encp->enc_nvram_update_poll_verify_result_supported = B_TRUE; + else + encp->enc_nvram_update_poll_verify_result_supported = B_FALSE; + + /* + * Check if firmware update via the BUNDLE partition is supported + */ + if (CAP_FLAGS2(req, BUNDLE_UPDATE)) + encp->enc_nvram_bundle_update_supported = B_TRUE; + else + encp->enc_nvram_bundle_update_supported = B_FALSE; + + /* + * Check if firmware provides packet memory and Rx datapath + * counters. + */ + if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS)) + encp->enc_pm_and_rxdp_counters = B_TRUE; + else + encp->enc_pm_and_rxdp_counters = B_FALSE; + + /* + * Check if the 40G MAC hardware is capable of reporting + * statistics for Tx size bins. + */ + if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS)) + encp->enc_mac_stats_40g_tx_size_bins = B_TRUE; + else + encp->enc_mac_stats_40g_tx_size_bins = B_FALSE; + + /* + * Check if firmware supports VXLAN and NVGRE tunnels. + * The capability indicates Geneve protocol support as well. + */ + if (CAP_FLAGS1(req, VXLAN_NVGRE)) { + encp->enc_tunnel_encapsulations_supported = + (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | + (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | + (1u << EFX_TUNNEL_PROTOCOL_NVGRE); + + EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES == + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + encp->enc_tunnel_config_udp_entries_max = + EFX_TUNNEL_MAXNENTRIES; + } else { + encp->enc_tunnel_config_udp_entries_max = 0; + } + + /* + * Check if firmware reports the VI window mode. + * Medford2 has a variable VI window size (8K, 16K or 64K). + * Medford and Huntington have a fixed 8K VI window size. + */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { + uint8_t mode = + MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + + switch (mode) { + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K; + break; + default: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + break; + } + } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) || + (enp->en_family == EFX_FAMILY_MEDFORD)) { + /* Huntington and Medford have fixed 8K window size */ + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + } else { + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + } + + /* Check if firmware supports extended MAC stats. */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + /* Extended stats buffer supported */ + encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + } else { + /* Use Siena-compatible legacy MAC stats */ + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2) + encp->enc_fec_counters = B_TRUE; + else + encp->enc_fec_counters = B_FALSE; + + /* Check if the firmware provides head-of-line blocking counters */ + if (CAP_FLAGS2(req, RXDP_HLB_IDLE)) + encp->enc_hlb_counters = B_TRUE; + else + encp->enc_hlb_counters = B_FALSE; + +#if EFSYS_OPT_RX_SCALE + if (CAP_FLAGS1(req, RX_RSS_LIMITED)) { + /* Only one exclusive RSS context is available per port. */ + encp->enc_rx_scale_max_exclusive_contexts = 1; + + switch (enp->en_family) { + case EFX_FAMILY_MEDFORD2: + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + break; + + case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_HUNTINGTON: + /* + * Packed stream firmware variant maintains a + * non-standard algorithm for hash computation. + * It implies explicit XORing together + * source + destination IP addresses (or last + * four bytes in the case of IPv6) and using the + * resulting value as the input to a Toeplitz hash. + */ + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_PACKED_STREAM); + break; + + default: + rc = EINVAL; + goto fail5; + } + + /* Port numbers cannot contribute to the hash value */ + encp->enc_rx_scale_l4_hash_supported = B_FALSE; + } else { + /* + * Maximum number of exclusive RSS contexts. + * EF10 hardware supports 64 in total, but 6 are reserved + * for shared contexts. They are a global resource so + * not all may be available. + */ + encp->enc_rx_scale_max_exclusive_contexts = 64 - 6; + + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is possible to use port numbers as + * the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + } +#endif /* EFSYS_OPT_RX_SCALE */ + + /* Check if the firmware supports "FLAG" and "MARK" filter actions */ + if (CAP_FLAGS2(req, FILTER_ACTION_FLAG)) + encp->enc_filter_action_flag_supported = B_TRUE; + else + encp->enc_filter_action_flag_supported = B_FALSE; + + if (CAP_FLAGS2(req, FILTER_ACTION_MARK)) + encp->enc_filter_action_mark_supported = B_TRUE; + else + encp->enc_filter_action_mark_supported = B_FALSE; + + /* Get maximum supported value for "MARK" filter action */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN) + encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req, + GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX); + else + encp->enc_filter_action_mark_max = 0; + +#undef CAP_FLAGS1 +#undef CAP_FLAGS2 + + return (0); + +#if EFSYS_OPT_RX_SCALE +fail5: + EFSYS_PROBE(fail5); +#endif /* EFSYS_OPT_RX_SCALE */ +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#define EF10_LEGACY_PF_PRIVILEGE_MASK \ + (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \ + MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS) + +#define EF10_LEGACY_VF_PRIVILEGE_MASK 0 + + + __checkReturn efx_rc_t +ef10_get_privilege_mask( + __in efx_nic_t *enp, + __out uint32_t *maskp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t mask; + efx_rc_t rc; + + if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf, + &mask)) != 0) { + if (rc != ENOTSUP) + goto fail1; + + /* Fallback for old firmware without privilege mask support */ + if (EFX_PCI_FUNCTION_IS_PF(encp)) { + /* Assume PF has admin privilege */ + mask = EF10_LEGACY_PF_PRIVILEGE_MASK; + } else { + /* VF is always unprivileged by default */ + mask = EF10_LEGACY_VF_PRIVILEGE_MASK; + } + } + + *maskp = mask; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#define EFX_EXT_PORT_MAX 4 +#define EFX_EXT_PORT_NA 0xFF + +/* + * Table of mapping schemes from port number to external number. + * + * Each port number ultimately corresponds to a connector: either as part of + * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on + * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T + * "Salina"). In general: + * + * Port number (0-based) + * | + * port mapping (n:1) + * | + * v + * External port number (1-based) + * | + * fixed (1:1) or cable assembly (1:m) + * | + * v + * Connector + * + * The external numbering refers to the cages or magjacks on the board, + * as visibly annotated on the board or back panel. This table describes + * how to determine which external cage/magjack corresponds to the port + * numbers used by the driver. + * + * The count of consecutive port numbers that map to each external number, + * is determined by the chip family and the current port mode. + * + * For the Huntington family, the current port mode cannot be discovered, + * but a single mapping is used by all modes for a given chip variant, + * so the mapping used is instead the last match in the table to the full + * set of port modes to which the NIC can be configured. Therefore the + * ordering of entries in the mapping table is significant. + */ +static struct ef10_external_port_map_s { + efx_family_t family; + uint32_t modes_mask; + uint8_t base_port[EFX_EXT_PORT_MAX]; +} __ef10_external_port_mappings[] = { + /* + * Modes used by Huntington family controllers where each port + * number maps to a separate cage. + * SFN7x22F (Torino): + * port 0 -> cage 1 + * port 1 -> cage 2 + * SFN7xx4F (Pavia): + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ + { + EFX_FAMILY_HUNTINGTON, + (1U << TLV_PORT_MODE_10G) | /* mode 0 */ + (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */ + { 0, 1, 2, 3 } + }, + /* + * Modes which for Huntington identify a chip variant where 2 + * adjacent port numbers map to each cage. + * SFN7x42Q (Monza): + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_HUNTINGTON, + (1U << TLV_PORT_MODE_40G) | /* mode 1 */ + (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ + (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ + (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */ + { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ + { + EFX_FAMILY_MEDFORD, + (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ + (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ + (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */ + { 0, 1, 2, 3 } + }, + /* + * Modes that on Medford allocate 2 adjacent port numbers to each + * cage. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD, + (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ + (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */ + (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */ + (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ + /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */ + { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford allocate 4 adjacent port numbers to + * cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ + { + EFX_FAMILY_MEDFORD, + /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */ + (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */ + { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford allocate 4 adjacent port numbers to + * cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD, + (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */ + { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford2 allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ + (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ + (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */ + (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ + (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */ + (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */ + (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */ + (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */ + { 0, 1, 2, 3 } + }, + /* + * Modes that on Medford2 allocate 1 port to cage 1 and the rest + * to cage 2. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */ + (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */ + { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1 + * and the rest to cage 2. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */ + (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ + (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */ + (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */ + { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford2 allocate up to 4 adjacent port numbers + * to cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */ + { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, + /* + * Modes that on Medford2 allocate up to 4 adjacent port numbers + * to cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */ + (1U << TLV_PORT_MODE_NA_1x2) | /* mode 11 */ + (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */ + { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } + }, +}; + +static __checkReturn efx_rc_t +ef10_external_port_mapping( + __in efx_nic_t *enp, + __in uint32_t port, + __out uint8_t *external_portp) +{ + efx_rc_t rc; + int i; + uint32_t port_modes; + uint32_t matches; + uint32_t current; + struct ef10_external_port_map_s *mapp = NULL; + int ext_index = port; /* Default 1-1 mapping */ + + if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t, + NULL)) != 0) { + /* + * No current port mode information (i.e. Huntington) + * - infer mapping from available modes + */ + if ((rc = efx_mcdi_get_port_modes(enp, + &port_modes, NULL, NULL)) != 0) { + /* + * No port mode information available + * - use default mapping + */ + goto out; + } + } else { + /* Only need to scan the current mode */ + port_modes = 1 << current; + } + + /* + * Infer the internal port -> external number mapping from + * the possible port modes for this NIC. + */ + for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { + struct ef10_external_port_map_s *eepmp = + &__ef10_external_port_mappings[i]; + if (eepmp->family != enp->en_family) + continue; + matches = (eepmp->modes_mask & port_modes); + if (matches != 0) { + /* + * Some modes match. For some Huntington boards + * there will be multiple matches. The mapping on the + * last match is used. + */ + mapp = eepmp; + port_modes &= ~matches; + } + } + + if (port_modes != 0) { + /* Some advertised modes are not supported */ + rc = ENOTSUP; + goto fail1; + } + +out: + if (mapp != NULL) { + /* + * External ports are assigned a sequence of consecutive + * port numbers, so find the one with the closest base_port. + */ + uint32_t delta = EFX_EXT_PORT_NA; + + for (i = 0; i < EFX_EXT_PORT_MAX; i++) { + uint32_t base = mapp->base_port[i]; + if ((base != EFX_EXT_PORT_NA) && (base <= port)) { + if ((port - base) < delta) { + delta = (port - base); + ext_index = i; + } + } + } + } + *external_portp = (uint8_t)(ext_index + 1); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_set_workaround_bug26807( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t flags; + efx_rc_t rc; + + /* + * If the bug26807 workaround is enabled, then firmware has enabled + * support for chained multicast filters. Firmware will reset (FLR) + * functions which have filters in the hardware filter table when the + * workaround is enabled/disabled. + * + * We must recheck if the workaround is enabled after inserting the + * first hardware filter, in case it has been changed since this check. + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, + B_TRUE, &flags); + if (rc == 0) { + encp->enc_bug26807_workaround = B_TRUE; + if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { + /* + * Other functions had installed filters before the + * workaround was enabled, and they have been reset + * by firmware. + */ + EFSYS_PROBE(bug26807_workaround_flr_done); + /* FIXME: bump MC warm boot count ? */ + } + } else if (rc == EACCES) { + /* + * Unprivileged functions cannot enable the workaround in older + * firmware. + */ + encp->enc_bug26807_workaround = B_FALSE; + } else if ((rc == ENOTSUP) || (rc == ENOENT)) { + encp->enc_bug26807_workaround = B_FALSE; + } else { + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_nic_board_cfg( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + ef10_link_state_t els; + efx_port_t *epp = &(enp->en_port); + uint32_t board_type = 0; + uint32_t base, nvec; + uint32_t port; + uint32_t mask; + uint32_t pf; + uint32_t vf; + uint8_t mac_addr[6] = { 0 }; + efx_rc_t rc; + + /* Get the (zero-based) MCDI port number */ + if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) + goto fail1; + + /* EFX MCDI interface uses one-based port numbers */ + emip->emi_port = port + 1; + + encp->enc_assigned_port = port; + + if ((rc = ef10_external_port_mapping(enp, port, + &encp->enc_external_port)) != 0) + goto fail2; + + /* + * Get PCIe function number from firmware (used for + * per-function privilege and dynamic config info). + * - PCIe PF: pf = PF number, vf = 0xffff. + * - PCIe VF: pf = parent PF, vf = VF number. + */ + if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) + goto fail3; + + encp->enc_pf = pf; + encp->enc_vf = vf; + + /* MAC address for this function */ + if (EFX_PCI_FUNCTION_IS_PF(encp)) { + rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); +#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC + /* + * Disable static config checking, ONLY for manufacturing test + * and setup at the factory, to allow the static config to be + * installed. + */ +#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + if ((rc == 0) && (mac_addr[0] & 0x02)) { + /* + * If the static config does not include a global MAC + * address pool then the board may return a locally + * administered MAC address (this should only happen on + * incorrectly programmed boards). + */ + rc = EINVAL; + } +#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + } else { + rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); + } + if (rc != 0) + goto fail4; + + EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); + + /* Board configuration (legacy) */ + rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); + if (rc != 0) { + /* Unprivileged functions may not be able to read board cfg */ + if (rc == EACCES) + board_type = 0; + else + goto fail5; + } + + encp->enc_board_type = board_type; + encp->enc_clk_mult = 1; /* not used for EF10 */ + + /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ + if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) + goto fail6; + + /* + * Firmware with support for *_FEC capability bits does not + * report that the corresponding *_FEC_REQUESTED bits are supported. + * Add them here so that drivers understand that they are supported. + */ + if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC)) + epp->ep_phy_cap_mask |= + (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED); + if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC)) + epp->ep_phy_cap_mask |= + (1u << EFX_PHY_CAP_RS_FEC_REQUESTED); + if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC)) + epp->ep_phy_cap_mask |= + (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); + + /* Obtain the default PHY advertised capabilities */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail7; + epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask; + epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask; + + /* Check capabilities of running datapath firmware */ + if ((rc = ef10_get_datapath_caps(enp)) != 0) + goto fail8; + + /* Alignment for WPTR updates */ + encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; + + encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); + /* No boundary crossing limits */ + encp->enc_tx_dma_desc_boundary = 0; + + /* + * Maximum number of bytes into the frame the TCP header can start for + * firmware assisted TSO to work. + */ + encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; + + /* + * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use + * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available + * resources (allocated to this PCIe function), which is zero until + * after we have allocated VIs. + */ + encp->enc_evq_limit = 1024; + encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; + encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; + + encp->enc_buftbl_limit = UINT32_MAX; + + /* Get interrupt vector limits */ + if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { + if (EFX_PCI_FUNCTION_IS_PF(encp)) + goto fail9; + + /* Ignore error (cannot query vector limits from a VF). */ + base = 0; + nvec = 1024; + } + encp->enc_intr_vec_base = base; + encp->enc_intr_limit = nvec; + + /* + * Get the current privilege mask. Note that this may be modified + * dynamically, so this value is informational only. DO NOT use + * the privilege mask to check for sufficient privileges, as that + * can result in time-of-check/time-of-use bugs. + */ + if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) + goto fail10; + encp->enc_privilege_mask = mask; + + if ((rc = ef10_set_workaround_bug26807(enp)) != 0) + goto fail11; + + /* Get remaining controller-specific board config */ + if ((rc = enop->eno_board_cfg(enp)) != 0) + if (rc != EACCES) + goto fail12; + + return (0); + +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_probe( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* Read and clear any assertion state */ + if ((rc = efx_mcdi_read_assertion(enp)) != 0) + goto fail1; + + /* Exit the assertion handler */ + if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) + if (rc != EACCES) + goto fail2; + + if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) + goto fail3; + + if ((rc = ef10_nic_board_cfg(enp)) != 0) + goto fail4; + + /* + * Set default driver config limits (based on board config). + * + * FIXME: For now allocate a fixed number of VIs which is likely to be + * sufficient and small enough to allow multiple functions on the same + * port. + */ + edcp->edc_min_vi_count = edcp->edc_max_vi_count = + MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); + + /* The client driver must configure and enable PIO buffer support */ + edcp->edc_max_piobuf_count = 0; + edcp->edc_pio_alloc_size = 0; + +#if EFSYS_OPT_MAC_STATS + /* Wipe the MAC statistics */ + if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) + goto fail5; +#endif + +#if EFSYS_OPT_LOOPBACK + if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) + goto fail6; +#endif + +#if EFSYS_OPT_MON_STATS + if ((rc = mcdi_mon_cfg_build(enp)) != 0) { + /* Unprivileged functions do not have access to sensors */ + if (rc != EACCES) + goto fail7; + } +#endif + + encp->enc_features = enp->en_features; + + return (0); + +#if EFSYS_OPT_MON_STATS +fail7: + EFSYS_PROBE(fail7); +#endif +#if EFSYS_OPT_LOOPBACK +fail6: + EFSYS_PROBE(fail6); +#endif +#if EFSYS_OPT_MAC_STATS +fail5: + EFSYS_PROBE(fail5); +#endif +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_set_drv_limits( + __inout efx_nic_t *enp, + __in efx_drv_limits_t *edlp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); + uint32_t min_evq_count, max_evq_count; + uint32_t min_rxq_count, max_rxq_count; + uint32_t min_txq_count, max_txq_count; + efx_rc_t rc; + + if (edlp == NULL) { + rc = EINVAL; + goto fail1; + } + + /* Get minimum required and maximum usable VI limits */ + min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); + min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); + min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); + + edcp->edc_min_vi_count = + MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); + + max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); + max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); + max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); + + edcp->edc_max_vi_count = + MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); + + /* + * Check limits for sub-allocated piobuf blocks. + * PIO is optional, so don't fail if the limits are incorrect. + */ + if ((encp->enc_piobuf_size == 0) || + (encp->enc_piobuf_limit == 0) || + (edlp->edl_min_pio_alloc_size == 0) || + (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) { + /* Disable PIO */ + edcp->edc_max_piobuf_count = 0; + edcp->edc_pio_alloc_size = 0; + } else { + uint32_t blk_size, blk_count, blks_per_piobuf; + + blk_size = + MAX(edlp->edl_min_pio_alloc_size, + encp->enc_piobuf_min_alloc_size); + + blks_per_piobuf = encp->enc_piobuf_size / blk_size; + EFSYS_ASSERT3U(blks_per_piobuf, <=, 32); + + blk_count = (encp->enc_piobuf_limit * blks_per_piobuf); + + /* A zero max pio alloc count means unlimited */ + if ((edlp->edl_max_pio_alloc_count > 0) && + (edlp->edl_max_pio_alloc_count < blk_count)) { + blk_count = edlp->edl_max_pio_alloc_count; + } + + edcp->edc_pio_alloc_size = blk_size; + edcp->edc_max_piobuf_count = + (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +ef10_nic_reset( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN, + MC_CMD_ENTITY_RESET_OUT_LEN); + efx_rc_t rc; + + /* ef10_nic_reset() is called to recover from BADASSERT failures. */ + if ((rc = efx_mcdi_read_assertion(enp)) != 0) + goto fail1; + if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) + goto fail2; + + req.emr_cmd = MC_CMD_ENTITY_RESET; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN; + + MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + /* Clear RX/TX DMA queue errors */ + enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +ef10_upstream_port_vadaptor_alloc( + __in efx_nic_t *enp) +{ + uint32_t retry; + uint32_t delay_us; + efx_rc_t rc; + + /* + * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF + * driver has yet to bring up the EVB port. See bug 56147. In this case, + * retry the request several times after waiting a while. The wait time + * between retries starts small (10ms) and exponentially increases. + * Total wait time is a little over two seconds. Retry logic in the + * client driver may mean this whole loop is repeated if it continues to + * fail. + */ + retry = 0; + delay_us = 10000; + while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) { + if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) || + (rc != ENOENT)) { + /* + * Do not retry alloc for PF, or for other errors on + * a VF. + */ + goto fail1; + } + + /* VF startup before PF is ready. Retry allocation. */ + if (retry > 5) { + /* Too many attempts */ + rc = EINVAL; + goto fail2; + } + EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry); + EFSYS_SLEEP(delay_us); + retry++; + if (delay_us < 500000) + delay_us <<= 2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_init( + __in efx_nic_t *enp) +{ + efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); + uint32_t min_vi_count, max_vi_count; + uint32_t vi_count, vi_base, vi_shift; + uint32_t i; + uint32_t vi_window_size; + efx_rc_t rc; + boolean_t alloc_vadaptor = B_TRUE; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* Enable reporting of some events (e.g. link change) */ + if ((rc = efx_mcdi_log_ctrl(enp)) != 0) + goto fail1; + + /* Allocate (optional) on-chip PIO buffers */ + ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count); + + /* + * For best performance, PIO writes should use a write-combined + * (WC) memory mapping. Using a separate WC mapping for the PIO + * aperture of each VI would be a burden to drivers (and not + * possible if the host page size is >4Kbyte). + * + * To avoid this we use a single uncached (UC) mapping for VI + * register access, and a single WC mapping for extra VIs used + * for PIO writes. + * + * Each piobuf must be linked to a VI in the WC mapping, and to + * each VI that is using a sub-allocated block from the piobuf. + */ + min_vi_count = edcp->edc_min_vi_count; + max_vi_count = + edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count; + + /* Ensure that the previously attached driver's VIs are freed */ + if ((rc = efx_mcdi_free_vis(enp)) != 0) + goto fail2; + + /* + * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this + * fails then retrying the request for fewer VI resources may succeed. + */ + vi_count = 0; + if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, + &vi_base, &vi_count, &vi_shift)) != 0) + goto fail3; + + EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); + + if (vi_count < min_vi_count) { + rc = ENOMEM; + goto fail4; + } + + enp->en_arch.ef10.ena_vi_base = vi_base; + enp->en_arch.ef10.ena_vi_count = vi_count; + enp->en_arch.ef10.ena_vi_shift = vi_shift; + + if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) { + /* Not enough extra VIs to map piobufs */ + ef10_nic_free_piobufs(enp); + } + + enp->en_arch.ef10.ena_pio_write_vi_base = + vi_count - enp->en_arch.ef10.ena_piobuf_count; + + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, + EFX_VI_WINDOW_SHIFT_INVALID); + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, + EFX_VI_WINDOW_SHIFT_64K); + vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; + + /* Save UC memory mapping details */ + enp->en_arch.ef10.ena_uc_mem_map_offset = 0; + if (enp->en_arch.ef10.ena_piobuf_count > 0) { + enp->en_arch.ef10.ena_uc_mem_map_size = + (vi_window_size * + enp->en_arch.ef10.ena_pio_write_vi_base); + } else { + enp->en_arch.ef10.ena_uc_mem_map_size = + (vi_window_size * + enp->en_arch.ef10.ena_vi_count); + } + + /* Save WC memory mapping details */ + enp->en_arch.ef10.ena_wc_mem_map_offset = + enp->en_arch.ef10.ena_uc_mem_map_offset + + enp->en_arch.ef10.ena_uc_mem_map_size; + + enp->en_arch.ef10.ena_wc_mem_map_size = + (vi_window_size * + enp->en_arch.ef10.ena_piobuf_count); + + /* Link piobufs to extra VIs in WC mapping */ + if (enp->en_arch.ef10.ena_piobuf_count > 0) { + for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { + rc = efx_mcdi_link_piobuf(enp, + enp->en_arch.ef10.ena_pio_write_vi_base + i, + enp->en_arch.ef10.ena_piobuf_handle[i]); + if (rc != 0) + break; + } + } + + /* + * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs + * during NIC initialization when vSwitch is created and vports are + * allocated. Hence, skip vAdaptor allocation for EVB and update vport + * id in NIC structure with the one allocated for PF. + */ + + enp->en_vport_id = EVB_PORT_ID_ASSIGNED; +#if EFSYS_OPT_EVB + if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) { + /* For EVB use vport allocated on vswitch */ + enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id; + alloc_vadaptor = B_FALSE; + } +#endif + if (alloc_vadaptor != B_FALSE) { + /* Allocate a vAdaptor attached to our upstream vPort/pPort */ + if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0) + goto fail5; + } + enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); + + ef10_nic_free_piobufs(enp); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nic_get_vi_pool( + __in efx_nic_t *enp, + __out uint32_t *vi_countp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* + * Report VIs that the client driver can use. + * Do not include VIs used for PIO buffer writes. + */ + *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base; + + return (0); +} + + __checkReturn efx_rc_t +ef10_nic_get_bar_region( + __in efx_nic_t *enp, + __in efx_nic_region_t region, + __out uint32_t *offsetp, + __out size_t *sizep) +{ + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* + * TODO: Specify host memory mapping alignment and granularity + * in efx_drv_limits_t so that they can be taken into account + * when allocating extra VIs for PIO writes. + */ + switch (region) { + case EFX_REGION_VI: + /* UC mapped memory BAR region for VI registers */ + *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset; + *sizep = enp->en_arch.ef10.ena_uc_mem_map_size; + break; + + case EFX_REGION_PIO_WRITE_VI: + /* WC mapped memory BAR region for piobuf writes */ + *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset; + *sizep = enp->en_arch.ef10.ena_wc_mem_map_size; + break; + + default: + rc = EINVAL; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn boolean_t +ef10_nic_hw_unavailable( + __in efx_nic_t *enp) +{ + efx_dword_t dword; + + if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL) + return (B_TRUE); + + EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE); + if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff) + goto unavail; + + return (B_FALSE); + +unavail: + ef10_nic_set_hw_unavailable(enp); + + return (B_TRUE); +} + + void +ef10_nic_set_hw_unavailable( + __in efx_nic_t *enp) +{ + EFSYS_PROBE(hw_unavail); + enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL; +} + + + void +ef10_nic_fini( + __in efx_nic_t *enp) +{ + uint32_t i; + efx_rc_t rc; + boolean_t do_vadaptor_free = B_TRUE; + +#if EFSYS_OPT_EVB + if (enp->en_vswitchp != NULL) { + /* + * For SR-IOV the vAdaptor is freed with the vswitch, + * so do not free it here. + */ + do_vadaptor_free = B_FALSE; + } +#endif + if (do_vadaptor_free != B_FALSE) { + (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); + enp->en_vport_id = EVB_PORT_ID_NULL; + } + + /* Unlink piobufs from extra VIs in WC mapping */ + if (enp->en_arch.ef10.ena_piobuf_count > 0) { + for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { + rc = efx_mcdi_unlink_piobuf(enp, + enp->en_arch.ef10.ena_pio_write_vi_base + i); + if (rc != 0) + break; + } + } + + ef10_nic_free_piobufs(enp); + + (void) efx_mcdi_free_vis(enp); + enp->en_arch.ef10.ena_vi_count = 0; +} + + void +ef10_nic_unprobe( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_MON_STATS + mcdi_mon_cfg_free(enp); +#endif /* EFSYS_OPT_MON_STATS */ + (void) efx_mcdi_drv_attach(enp, B_FALSE); +} + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +ef10_nic_register_test( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + /* FIXME */ + _NOTE(ARGUNUSED(enp)) + _NOTE(CONSTANTCONDITION) + if (B_FALSE) { + rc = ENOTSUP; + goto fail1; + } + /* FIXME */ + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_mcdi_get_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __out uint32_t *valuep) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN, + MC_CMD_GET_NIC_GLOBAL_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key); + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c new file mode 100644 index 000000000..2fa564605 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_nvram.c @@ -0,0 +1,2561 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFX_OPTS_EF10() + +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM + +#include "ef10_tlv_layout.h" + +/* Cursor for TLV partition format */ +typedef struct tlv_cursor_s { + uint32_t *block; /* Base of data block */ + uint32_t *current; /* Cursor position */ + uint32_t *end; /* End tag position */ + uint32_t *limit; /* Last dword of data block */ +} tlv_cursor_t; + +typedef struct nvram_partition_s { + uint16_t type; + uint8_t chip_select; + uint8_t flags; + /* + * The full length of the NVRAM partition. + * This is different from tlv_partition_header.total_length, + * which can be smaller. + */ + uint32_t length; + uint32_t erase_size; + uint32_t *data; + tlv_cursor_t tlv_cursor; +} nvram_partition_t; + + +static __checkReturn efx_rc_t +tlv_validate_state( + __inout tlv_cursor_t *cursor); + + +static void +tlv_init_block( + __out uint32_t *block) +{ + *block = __CPU_TO_LE_32(TLV_TAG_END); +} + +static uint32_t +tlv_tag( + __in tlv_cursor_t *cursor) +{ + uint32_t dword, tag; + + dword = cursor->current[0]; + tag = __LE_TO_CPU_32(dword); + + return (tag); +} + +static size_t +tlv_length( + __in tlv_cursor_t *cursor) +{ + uint32_t dword, length; + + if (tlv_tag(cursor) == TLV_TAG_END) + return (0); + + dword = cursor->current[1]; + length = __LE_TO_CPU_32(dword); + + return ((size_t)length); +} + +static uint8_t * +tlv_value( + __in tlv_cursor_t *cursor) +{ + if (tlv_tag(cursor) == TLV_TAG_END) + return (NULL); + + return ((uint8_t *)(&cursor->current[2])); +} + +static uint8_t * +tlv_item( + __in tlv_cursor_t *cursor) +{ + if (tlv_tag(cursor) == TLV_TAG_END) + return (NULL); + + return ((uint8_t *)cursor->current); +} + +/* + * TLV item DWORD length is tag + length + value (rounded up to DWORD) + * equivalent to tlv_n_words_for_len in mc-comms tlv.c + */ +#define TLV_DWORD_COUNT(length) \ + (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) + + +static uint32_t * +tlv_next_item_ptr( + __in tlv_cursor_t *cursor) +{ + uint32_t length; + + length = tlv_length(cursor); + + return (cursor->current + TLV_DWORD_COUNT(length)); +} + +static __checkReturn efx_rc_t +tlv_advance( + __inout tlv_cursor_t *cursor) +{ + efx_rc_t rc; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + if (cursor->current == cursor->end) { + /* No more tags after END tag */ + cursor->current = NULL; + rc = ENOENT; + goto fail2; + } + + /* Advance to next item and validate */ + cursor->current = tlv_next_item_ptr(cursor); + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static efx_rc_t +tlv_rewind( + __in tlv_cursor_t *cursor) +{ + efx_rc_t rc; + + cursor->current = cursor->block; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static efx_rc_t +tlv_find( + __inout tlv_cursor_t *cursor, + __in uint32_t tag) +{ + efx_rc_t rc; + + rc = tlv_rewind(cursor); + while (rc == 0) { + if (tlv_tag(cursor) == tag) + break; + + rc = tlv_advance(cursor); + } + return (rc); +} + +static __checkReturn efx_rc_t +tlv_validate_state( + __inout tlv_cursor_t *cursor) +{ + efx_rc_t rc; + + /* Check cursor position */ + if (cursor->current < cursor->block) { + rc = EINVAL; + goto fail1; + } + if (cursor->current > cursor->limit) { + rc = EINVAL; + goto fail2; + } + + if (tlv_tag(cursor) != TLV_TAG_END) { + /* Check current item has space for tag and length */ + if (cursor->current > (cursor->limit - 1)) { + cursor->current = NULL; + rc = EFAULT; + goto fail3; + } + + /* Check we have value data for current item and an END tag */ + if (tlv_next_item_ptr(cursor) > cursor->limit) { + cursor->current = NULL; + rc = EFAULT; + goto fail4; + } + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static efx_rc_t +tlv_init_cursor( + __out tlv_cursor_t *cursor, + __in uint32_t *block, + __in uint32_t *limit, + __in uint32_t *current) +{ + cursor->block = block; + cursor->limit = limit; + + cursor->current = current; + cursor->end = NULL; + + return (tlv_validate_state(cursor)); +} + +static __checkReturn efx_rc_t +tlv_init_cursor_from_size( + __out tlv_cursor_t *cursor, + __in_bcount(size) + uint8_t *block, + __in size_t size) +{ + uint32_t *limit; + limit = (uint32_t *)(block + size - sizeof (uint32_t)); + return (tlv_init_cursor(cursor, (uint32_t *)block, + limit, (uint32_t *)block)); +} + +static __checkReturn efx_rc_t +tlv_init_cursor_at_offset( + __out tlv_cursor_t *cursor, + __in_bcount(size) + uint8_t *block, + __in size_t size, + __in size_t offset) +{ + uint32_t *limit; + uint32_t *current; + limit = (uint32_t *)(block + size - sizeof (uint32_t)); + current = (uint32_t *)(block + offset); + return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current)); +} + +static __checkReturn efx_rc_t +tlv_require_end( + __inout tlv_cursor_t *cursor) +{ + uint32_t *pos; + efx_rc_t rc; + + if (cursor->end == NULL) { + pos = cursor->current; + if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) + goto fail1; + + cursor->end = cursor->current; + cursor->current = pos; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static size_t +tlv_block_length_used( + __inout tlv_cursor_t *cursor) +{ + efx_rc_t rc; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + if ((rc = tlv_require_end(cursor)) != 0) + goto fail2; + + /* Return space used (including the END tag) */ + return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (0); +} + +static uint32_t * +tlv_last_segment_end( + __in tlv_cursor_t *cursor) +{ + tlv_cursor_t segment_cursor; + uint32_t *last_segment_end = cursor->block; + uint32_t *segment_start = cursor->block; + + /* + * Go through each segment and check that it has an end tag. If there + * is no end tag then the previous segment was the last valid one, + * so return the pointer to its end tag. + */ + for (;;) { + if (tlv_init_cursor(&segment_cursor, segment_start, + cursor->limit, segment_start) != 0) + break; + if (tlv_require_end(&segment_cursor) != 0) + break; + last_segment_end = segment_cursor.end; + segment_start = segment_cursor.end + 1; + } + + return (last_segment_end); +} + + +static uint32_t * +tlv_write( + __in tlv_cursor_t *cursor, + __in uint32_t tag, + __in_bcount(size) uint8_t *data, + __in size_t size) +{ + uint32_t len = size; + uint32_t *ptr; + + ptr = cursor->current; + + *ptr++ = __CPU_TO_LE_32(tag); + *ptr++ = __CPU_TO_LE_32(len); + + if (len > 0) { + ptr[(len - 1) / sizeof (uint32_t)] = 0; + memcpy(ptr, data, len); + ptr += EFX_P2ROUNDUP(uint32_t, len, + sizeof (uint32_t)) / sizeof (*ptr); + } + + return (ptr); +} + +static __checkReturn efx_rc_t +tlv_insert( + __inout tlv_cursor_t *cursor, + __in uint32_t tag, + __in_bcount(size) + uint8_t *data, + __in size_t size) +{ + unsigned int delta; + uint32_t *last_segment_end; + efx_rc_t rc; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + if ((rc = tlv_require_end(cursor)) != 0) + goto fail2; + + if (tag == TLV_TAG_END) { + rc = EINVAL; + goto fail3; + } + + last_segment_end = tlv_last_segment_end(cursor); + + delta = TLV_DWORD_COUNT(size); + if (last_segment_end + 1 + delta > cursor->limit) { + rc = ENOSPC; + goto fail4; + } + + /* Move data up: new space at cursor->current */ + memmove(cursor->current + delta, cursor->current, + (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); + + /* Adjust the end pointer */ + cursor->end += delta; + + /* Write new TLV item */ + tlv_write(cursor, tag, data, size); + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +tlv_delete( + __inout tlv_cursor_t *cursor) +{ + unsigned int delta; + uint32_t *last_segment_end; + efx_rc_t rc; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + if (tlv_tag(cursor) == TLV_TAG_END) { + rc = EINVAL; + goto fail2; + } + + delta = TLV_DWORD_COUNT(tlv_length(cursor)); + + if ((rc = tlv_require_end(cursor)) != 0) + goto fail3; + + last_segment_end = tlv_last_segment_end(cursor); + + /* Shuffle things down, destroying the item at cursor->current */ + memmove(cursor->current, cursor->current + delta, + (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); + /* Zero the new space at the end of the TLV chain */ + memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); + /* Adjust the end pointer */ + cursor->end -= delta; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +tlv_modify( + __inout tlv_cursor_t *cursor, + __in uint32_t tag, + __in_bcount(size) + uint8_t *data, + __in size_t size) +{ + uint32_t *pos; + unsigned int old_ndwords; + unsigned int new_ndwords; + unsigned int delta; + uint32_t *last_segment_end; + efx_rc_t rc; + + if ((rc = tlv_validate_state(cursor)) != 0) + goto fail1; + + if (tlv_tag(cursor) == TLV_TAG_END) { + rc = EINVAL; + goto fail2; + } + if (tlv_tag(cursor) != tag) { + rc = EINVAL; + goto fail3; + } + + old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); + new_ndwords = TLV_DWORD_COUNT(size); + + if ((rc = tlv_require_end(cursor)) != 0) + goto fail4; + + last_segment_end = tlv_last_segment_end(cursor); + + if (new_ndwords > old_ndwords) { + /* Expand space used for TLV item */ + delta = new_ndwords - old_ndwords; + pos = cursor->current + old_ndwords; + + if (last_segment_end + 1 + delta > cursor->limit) { + rc = ENOSPC; + goto fail5; + } + + /* Move up: new space at (cursor->current + old_ndwords) */ + memmove(pos + delta, pos, + (last_segment_end + 1 - pos) * sizeof (uint32_t)); + + /* Adjust the end pointer */ + cursor->end += delta; + + } else if (new_ndwords < old_ndwords) { + /* Shrink space used for TLV item */ + delta = old_ndwords - new_ndwords; + pos = cursor->current + new_ndwords; + + /* Move down: remove words at (cursor->current + new_ndwords) */ + memmove(pos, pos + delta, + (last_segment_end + 1 - pos) * sizeof (uint32_t)); + + /* Zero the new space at the end of the TLV chain */ + memset(last_segment_end + 1 - delta, 0, + delta * sizeof (uint32_t)); + + /* Adjust the end pointer */ + cursor->end -= delta; + } + + /* Write new data */ + tlv_write(cursor, tag, data, size); + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static uint32_t checksum_tlv_partition( + __in nvram_partition_t *partition) +{ + tlv_cursor_t *cursor; + uint32_t *ptr; + uint32_t *end; + uint32_t csum; + size_t len; + + cursor = &partition->tlv_cursor; + len = tlv_block_length_used(cursor); + EFSYS_ASSERT3U((len & 3), ==, 0); + + csum = 0; + ptr = partition->data; + end = &ptr[len >> 2]; + + while (ptr < end) + csum += __LE_TO_CPU_32(*ptr++); + + return (csum); +} + +static __checkReturn efx_rc_t +tlv_update_partition_len_and_cks( + __in tlv_cursor_t *cursor) +{ + efx_rc_t rc; + nvram_partition_t partition; + struct tlv_partition_header *header; + struct tlv_partition_trailer *trailer; + size_t new_len; + + /* + * We just modified the partition, so the total length may not be + * valid. Don't use tlv_find(), which performs some sanity checks + * that may fail here. + */ + partition.data = cursor->block; + memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor)); + header = (struct tlv_partition_header *)partition.data; + /* Sanity check. */ + if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) { + rc = EFAULT; + goto fail1; + } + new_len = tlv_block_length_used(&partition.tlv_cursor); + if (new_len == 0) { + rc = EFAULT; + goto fail2; + } + header->total_length = __CPU_TO_LE_32(new_len); + /* Ensure the modified partition always has a new generation count. */ + header->generation = __CPU_TO_LE_32( + __LE_TO_CPU_32(header->generation) + 1); + + trailer = (struct tlv_partition_trailer *)((uint8_t *)header + + new_len - sizeof (*trailer) - sizeof (uint32_t)); + trailer->generation = header->generation; + trailer->checksum = __CPU_TO_LE_32( + __LE_TO_CPU_32(trailer->checksum) - + checksum_tlv_partition(&partition)); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Validate buffer contents (before writing to flash) */ + __checkReturn efx_rc_t +ef10_nvram_buffer_validate( + __in uint32_t partn, + __in_bcount(partn_size) caddr_t partn_data, + __in size_t partn_size) +{ + tlv_cursor_t cursor; + struct tlv_partition_header *header; + struct tlv_partition_trailer *trailer; + size_t total_length; + uint32_t cksum; + int pos; + efx_rc_t rc; + + EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); + + if ((partn_data == NULL) || (partn_size == 0)) { + rc = EINVAL; + goto fail1; + } + + /* The partition header must be the first item (at offset zero) */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data, + partn_size)) != 0) { + rc = EFAULT; + goto fail2; + } + if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail3; + } + header = (struct tlv_partition_header *)tlv_item(&cursor); + + /* Check TLV partition length (includes the END tag) */ + total_length = __LE_TO_CPU_32(header->total_length); + if (total_length > partn_size) { + rc = EFBIG; + goto fail4; + } + + /* Check partition header matches partn */ + if (__LE_TO_CPU_16(header->type_id) != partn) { + rc = EINVAL; + goto fail5; + } + + /* Check partition ends with PARTITION_TRAILER and END tags */ + if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { + rc = EINVAL; + goto fail6; + } + trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); + + if ((rc = tlv_advance(&cursor)) != 0) { + rc = EINVAL; + goto fail7; + } + if (tlv_tag(&cursor) != TLV_TAG_END) { + rc = EINVAL; + goto fail8; + } + + /* Check generation counts are consistent */ + if (trailer->generation != header->generation) { + rc = EINVAL; + goto fail9; + } + + /* Verify partition checksum */ + cksum = 0; + for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { + cksum += *((uint32_t *)(partn_data + pos)); + } + if (cksum != 0) { + rc = EINVAL; + goto fail10; + } + + return (0); + +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_nvram_buffer_init( + __out_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + uint32_t *buf = (uint32_t *)bufferp; + + memset(buf, 0xff, buffer_size); + + tlv_init_block(buf); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_create( + __in uint32_t partn_type, + __out_bcount(partn_size) + caddr_t partn_data, + __in size_t partn_size) +{ + uint32_t *buf = (uint32_t *)partn_data; + efx_rc_t rc; + tlv_cursor_t cursor; + struct tlv_partition_header header; + struct tlv_partition_trailer trailer; + + unsigned int min_buf_size = sizeof (struct tlv_partition_header) + + sizeof (struct tlv_partition_trailer); + if (partn_size < min_buf_size) { + rc = EINVAL; + goto fail1; + } + + ef10_nvram_buffer_init(partn_data, partn_size); + + if ((rc = tlv_init_cursor(&cursor, buf, + (uint32_t *)((uint8_t *)buf + partn_size), + buf)) != 0) { + goto fail2; + } + + header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER); + header.length = __CPU_TO_LE_32(sizeof (header) - 8); + header.type_id = __CPU_TO_LE_16(partn_type); + header.preset = 0; + header.generation = __CPU_TO_LE_32(1); + header.total_length = 0; /* This will be fixed below. */ + if ((rc = tlv_insert( + &cursor, TLV_TAG_PARTITION_HEADER, + (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0) + goto fail3; + if ((rc = tlv_advance(&cursor)) != 0) + goto fail4; + + trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER); + trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8); + trailer.generation = header.generation; + trailer.checksum = 0; /* This will be fixed below. */ + if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER, + (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0) + goto fail5; + + if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) + goto fail6; + + /* Check that the partition is valid. */ + if ((rc = ef10_nvram_buffer_validate(partn_type, + partn_data, partn_size)) != 0) + goto fail7; + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static uint32_t +byte_offset( + __in uint32_t *position, + __in uint32_t *base) +{ + return (uint32_t)((uint8_t *)position - (uint8_t *)base); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_find_item_start( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp) +{ + /* Read past partition header to find start address of the first key */ + tlv_cursor_t cursor; + efx_rc_t rc; + + /* A PARTITION_HEADER tag must be the first item (at offset zero) */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, + buffer_size)) != 0) { + rc = EFAULT; + goto fail1; + } + if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail2; + } + + if ((rc = tlv_advance(&cursor)) != 0) { + rc = EINVAL; + goto fail3; + } + *startp = byte_offset(cursor.current, cursor.block); + + if ((rc = tlv_require_end(&cursor)) != 0) + goto fail4; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_find_end( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp) +{ + /* Read to end of partition */ + tlv_cursor_t cursor; + efx_rc_t rc; + uint32_t *segment_used; + + _NOTE(ARGUNUSED(offset)) + + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, + buffer_size)) != 0) { + rc = EFAULT; + goto fail1; + } + + segment_used = cursor.block; + + /* + * Go through each segment and check that it has an end tag. If there + * is no end tag then the previous segment was the last valid one, + * so return the used space including that end tag. + */ + while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { + if (tlv_require_end(&cursor) != 0) { + if (segment_used == cursor.block) { + /* + * First segment is corrupt, so there is + * no valid data in partition. + */ + rc = EINVAL; + goto fail2; + } + break; + } + segment_used = cursor.end + 1; + + cursor.current = segment_used; + } + /* Return space used (including the END tag) */ + *endp = (segment_used - cursor.block) * sizeof (uint32_t); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn __success(return != B_FALSE) boolean_t +ef10_nvram_buffer_find_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp) +{ + /* Find TLV at offset and return key start and length */ + tlv_cursor_t cursor; + uint8_t *key; + uint32_t tag; + + if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset) != 0) { + return (B_FALSE); + } + + while ((key = tlv_item(&cursor)) != NULL) { + tag = tlv_tag(&cursor); + if (tag == TLV_TAG_PARTITION_HEADER || + tag == TLV_TAG_PARTITION_TRAILER) { + if (tlv_advance(&cursor) != 0) { + break; + } + continue; + } + *startp = byte_offset(cursor.current, cursor.block); + *lengthp = byte_offset(tlv_next_item_ptr(&cursor), + cursor.current); + return (B_TRUE); + } + + return (B_FALSE); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_peek_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *tagp, + __out uint32_t *lengthp, + __out uint32_t *value_offsetp) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + uint32_t tag; + + if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset)) != 0) { + goto fail1; + } + + tag = tlv_tag(&cursor); + *tagp = tag; + if (tag == TLV_TAG_END) { + /* + * To allow stepping over the END tag, report the full tag + * length and a zero length value. + */ + *lengthp = sizeof (tag); + *value_offsetp = sizeof (tag); + } else { + *lengthp = byte_offset(tlv_next_item_ptr(&cursor), + cursor.current); + *value_offsetp = byte_offset((uint32_t *)tlv_value(&cursor), + cursor.current); + } + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_get_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out uint32_t *tagp, + __out_bcount_part(value_max_size, *lengthp) + caddr_t valuep, + __in size_t value_max_size, + __out uint32_t *lengthp) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + uint32_t value_length; + + if (buffer_size < (offset + length)) { + rc = ENOSPC; + goto fail1; + } + + if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset)) != 0) { + goto fail2; + } + + value_length = tlv_length(&cursor); + if (value_max_size < value_length) { + rc = ENOSPC; + goto fail3; + } + memcpy(valuep, tlv_value(&cursor), value_length); + + *tagp = tlv_tag(&cursor); + *lengthp = value_length; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_insert_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t tag, + __in_bcount(length) caddr_t valuep, + __in uint32_t length, + __out uint32_t *lengthp) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + + if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset)) != 0) { + goto fail1; + } + + rc = tlv_insert(&cursor, tag, (uint8_t *)valuep, length); + + if (rc != 0) + goto fail2; + + *lengthp = byte_offset(tlv_next_item_ptr(&cursor), + cursor.current); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_modify_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t tag, + __in_bcount(length) caddr_t valuep, + __in uint32_t length, + __out uint32_t *lengthp) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + + if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset)) != 0) { + goto fail1; + } + + rc = tlv_modify(&cursor, tag, (uint8_t *)valuep, length); + + if (rc != 0) { + goto fail2; + } + + *lengthp = byte_offset(tlv_next_item_ptr(&cursor), + cursor.current); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +ef10_nvram_buffer_delete_item( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + + _NOTE(ARGUNUSED(length, end)) + + if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, + buffer_size, offset)) != 0) { + goto fail1; + } + + if ((rc = tlv_delete(&cursor)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_buffer_finish( + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, + buffer_size)) != 0) { + rc = EFAULT; + goto fail1; + } + + if ((rc = tlv_require_end(&cursor)) != 0) + goto fail2; + + if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + +/* + * Read and validate a segment from a partition. A segment is a complete + * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may + * be multiple segments in a partition, so seg_offset allows segments + * beyond the first to be read. + */ +static __checkReturn efx_rc_t +ef10_nvram_read_tlv_segment( + __in efx_nic_t *enp, + __in uint32_t partn, + __in size_t seg_offset, + __in_bcount(max_seg_size) caddr_t seg_data, + __in size_t max_seg_size) +{ + tlv_cursor_t cursor; + struct tlv_partition_header *header; + struct tlv_partition_trailer *trailer; + size_t total_length; + uint32_t cksum; + int pos; + efx_rc_t rc; + + EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); + + if ((seg_data == NULL) || (max_seg_size == 0)) { + rc = EINVAL; + goto fail1; + } + + /* Read initial chunk of the segment, starting at offset */ + if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data, + EF10_NVRAM_CHUNK, + MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) { + goto fail2; + } + + /* A PARTITION_HEADER tag must be the first item at the given offset */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, + max_seg_size)) != 0) { + rc = EFAULT; + goto fail3; + } + if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail4; + } + header = (struct tlv_partition_header *)tlv_item(&cursor); + + /* Check TLV segment length (includes the END tag) */ + total_length = __LE_TO_CPU_32(header->total_length); + if (total_length > max_seg_size) { + rc = EFBIG; + goto fail5; + } + + /* Read the remaining segment content */ + if (total_length > EF10_NVRAM_CHUNK) { + if ((rc = ef10_nvram_partn_read_mode(enp, partn, + seg_offset + EF10_NVRAM_CHUNK, + seg_data + EF10_NVRAM_CHUNK, + total_length - EF10_NVRAM_CHUNK, + MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) + goto fail6; + } + + /* Check segment ends with PARTITION_TRAILER and END tags */ + if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { + rc = EINVAL; + goto fail7; + } + trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); + + if ((rc = tlv_advance(&cursor)) != 0) { + rc = EINVAL; + goto fail8; + } + if (tlv_tag(&cursor) != TLV_TAG_END) { + rc = EINVAL; + goto fail9; + } + + /* Check data read from segment is consistent */ + if (trailer->generation != header->generation) { + /* + * The partition data may have been modified between successive + * MCDI NVRAM_READ requests by the MC or another PCI function. + * + * The caller must retry to obtain consistent partition data. + */ + rc = EAGAIN; + goto fail10; + } + + /* Verify segment checksum */ + cksum = 0; + for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { + cksum += *((uint32_t *)(seg_data + pos)); + } + if (cksum != 0) { + rc = EINVAL; + goto fail11; + } + + return (0); + +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Read a single TLV item from a host memory + * buffer containing a TLV formatted segment. + */ + __checkReturn efx_rc_t +ef10_nvram_buf_read_tlv( + __in efx_nic_t *enp, + __in_bcount(max_seg_size) caddr_t seg_data, + __in size_t max_seg_size, + __in uint32_t tag, + __deref_out_bcount_opt(*sizep) caddr_t *datap, + __out size_t *sizep) +{ + tlv_cursor_t cursor; + caddr_t data; + size_t length; + caddr_t value; + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + if ((seg_data == NULL) || (max_seg_size == 0)) { + rc = EINVAL; + goto fail1; + } + + /* Find requested TLV tag in segment data */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, + max_seg_size)) != 0) { + rc = EFAULT; + goto fail2; + } + if ((rc = tlv_find(&cursor, tag)) != 0) { + rc = ENOENT; + goto fail3; + } + value = (caddr_t)tlv_value(&cursor); + length = tlv_length(&cursor); + + if (length == 0) + data = NULL; + else { + /* Copy out data from TLV item */ + EFSYS_KMEM_ALLOC(enp->en_esip, length, data); + if (data == NULL) { + rc = ENOMEM; + goto fail4; + } + memcpy(data, value, length); + } + + *datap = data; + *sizep = length; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Read a single TLV item from the first segment in a TLV formatted partition */ + __checkReturn efx_rc_t +ef10_nvram_partn_read_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, + __out size_t *seg_sizep) +{ + caddr_t seg_data = NULL; + size_t partn_size = 0; + size_t length; + caddr_t data; + int retry; + efx_rc_t rc; + + /* Allocate sufficient memory for the entire partition */ + if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) + goto fail1; + + if (partn_size == 0) { + rc = ENOENT; + goto fail2; + } + + EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); + if (seg_data == NULL) { + rc = ENOMEM; + goto fail3; + } + + /* + * Read the first segment in a TLV partition. Retry until consistent + * segment contents are returned. Inconsistent data may be read if: + * a) the segment contents are invalid + * b) the MC has rebooted while we were reading the partition + * c) the partition has been modified while we were reading it + * Limit retry attempts to ensure forward progress. + */ + retry = 10; + do { + if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0, + seg_data, partn_size)) != 0) + --retry; + } while ((rc == EAGAIN) && (retry > 0)); + + if (rc != 0) { + /* Failed to obtain consistent segment data */ + if (rc == EAGAIN) + rc = EIO; + + goto fail4; + } + + if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size, + tag, &data, &length)) != 0) + goto fail5; + + EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); + + *seg_datap = data; + *seg_sizep = length; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); + + EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Compute the size of a segment. */ + static __checkReturn efx_rc_t +ef10_nvram_buf_segment_size( + __in caddr_t seg_data, + __in size_t max_seg_size, + __out size_t *seg_sizep) +{ + efx_rc_t rc; + tlv_cursor_t cursor; + struct tlv_partition_header *header; + uint32_t cksum; + int pos; + uint32_t *end_tag_position; + uint32_t segment_length; + + /* A PARTITION_HEADER tag must be the first item at the given offset */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, + max_seg_size)) != 0) { + rc = EFAULT; + goto fail1; + } + if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail2; + } + header = (struct tlv_partition_header *)tlv_item(&cursor); + + /* Check TLV segment length (includes the END tag) */ + *seg_sizep = __LE_TO_CPU_32(header->total_length); + if (*seg_sizep > max_seg_size) { + rc = EFBIG; + goto fail3; + } + + /* Check segment ends with PARTITION_TRAILER and END tags */ + if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { + rc = EINVAL; + goto fail4; + } + + if ((rc = tlv_advance(&cursor)) != 0) { + rc = EINVAL; + goto fail5; + } + if (tlv_tag(&cursor) != TLV_TAG_END) { + rc = EINVAL; + goto fail6; + } + end_tag_position = cursor.current; + + /* Verify segment checksum */ + cksum = 0; + for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { + cksum += *((uint32_t *)(seg_data + pos)); + } + if (cksum != 0) { + rc = EINVAL; + goto fail7; + } + + /* + * Calculate total length from HEADER to END tags and compare to + * max_seg_size and the total_length field in the HEADER tag. + */ + segment_length = tlv_block_length_used(&cursor); + + if (segment_length > max_seg_size) { + rc = EINVAL; + goto fail8; + } + + if (segment_length != *seg_sizep) { + rc = EINVAL; + goto fail9; + } + + /* Skip over the first HEADER tag. */ + rc = tlv_rewind(&cursor); + rc = tlv_advance(&cursor); + + while (rc == 0) { + if (tlv_tag(&cursor) == TLV_TAG_END) { + /* Check that the END tag is the one found earlier. */ + if (cursor.current != end_tag_position) + goto fail10; + break; + } + /* Check for duplicate HEADER tags before the END tag. */ + if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail11; + } + + rc = tlv_advance(&cursor); + } + if (rc != 0) + goto fail12; + + return (0); + +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Add or update a single TLV item in a host memory buffer containing a TLV + * formatted segment. Historically partitions consisted of only one segment. + */ + __checkReturn efx_rc_t +ef10_nvram_buf_write_tlv( + __inout_bcount(max_seg_size) caddr_t seg_data, + __in size_t max_seg_size, + __in uint32_t tag, + __in_bcount(tag_size) caddr_t tag_data, + __in size_t tag_size, + __out size_t *total_lengthp) +{ + tlv_cursor_t cursor; + struct tlv_partition_header *header; + struct tlv_partition_trailer *trailer; + uint32_t generation; + uint32_t cksum; + int pos; + efx_rc_t rc; + + /* A PARTITION_HEADER tag must be the first item (at offset zero) */ + if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, + max_seg_size)) != 0) { + rc = EFAULT; + goto fail1; + } + if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { + rc = EINVAL; + goto fail2; + } + header = (struct tlv_partition_header *)tlv_item(&cursor); + + /* Update the TLV chain to contain the new data */ + if ((rc = tlv_find(&cursor, tag)) == 0) { + /* Modify existing TLV item */ + if ((rc = tlv_modify(&cursor, tag, + (uint8_t *)tag_data, tag_size)) != 0) + goto fail3; + } else { + /* Insert a new TLV item before the PARTITION_TRAILER */ + rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); + if (rc != 0) { + rc = EINVAL; + goto fail4; + } + if ((rc = tlv_insert(&cursor, tag, + (uint8_t *)tag_data, tag_size)) != 0) { + rc = EINVAL; + goto fail5; + } + } + + /* Find the trailer tag */ + if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { + rc = EINVAL; + goto fail6; + } + trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); + + /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ + *total_lengthp = tlv_block_length_used(&cursor); + if (*total_lengthp > max_seg_size) { + rc = ENOSPC; + goto fail7; + } + generation = __LE_TO_CPU_32(header->generation) + 1; + + header->total_length = __CPU_TO_LE_32(*total_lengthp); + header->generation = __CPU_TO_LE_32(generation); + trailer->generation = __CPU_TO_LE_32(generation); + + /* Recompute PARTITION_TRAILER checksum */ + trailer->checksum = 0; + cksum = 0; + for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { + cksum += *((uint32_t *)(seg_data + pos)); + } + trailer->checksum = ~cksum + 1; + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Add or update a single TLV item in the first segment of a TLV formatted + * dynamic config partition. The first segment is the current active + * configuration. + */ + __checkReturn efx_rc_t +ef10_nvram_partn_write_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data, + size, B_FALSE); +} + +/* + * Read a segment from nvram at the given offset into a buffer (segment_data) + * and optionally write a new tag to it. + */ +static __checkReturn efx_rc_t +ef10_nvram_segment_write_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout caddr_t *seg_datap, + __inout size_t *partn_offsetp, + __inout size_t *src_remain_lenp, + __inout size_t *dest_remain_lenp, + __in boolean_t write) +{ + efx_rc_t rc; + efx_rc_t status; + size_t original_segment_size; + size_t modified_segment_size; + + /* + * Read the segment from NVRAM into the segment_data buffer and validate + * it, returning if it does not validate. This is not a failure unless + * this is the first segment in a partition. In this case the caller + * must propagate the error. + */ + status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp, + *seg_datap, *src_remain_lenp); + if (status != 0) { + rc = EINVAL; + goto fail1; + } + + status = ef10_nvram_buf_segment_size(*seg_datap, + *src_remain_lenp, &original_segment_size); + if (status != 0) { + rc = EINVAL; + goto fail2; + } + + if (write) { + /* Update the contents of the segment in the buffer */ + if ((rc = ef10_nvram_buf_write_tlv(*seg_datap, + *dest_remain_lenp, tag, data, size, + &modified_segment_size)) != 0) { + goto fail3; + } + *dest_remain_lenp -= modified_segment_size; + *seg_datap += modified_segment_size; + } else { + /* + * We won't modify this segment, but still need to update the + * remaining lengths and pointers. + */ + *dest_remain_lenp -= original_segment_size; + *seg_datap += original_segment_size; + } + + *partn_offsetp += original_segment_size; + *src_remain_lenp -= original_segment_size; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Add or update a single TLV item in either the first segment or in all + * segments in a TLV formatted dynamic config partition. Dynamic config + * partitions on boards that support RFID are divided into a number of segments, + * each formatted like a partition, with header, trailer and end tags. The first + * segment is the current active configuration. + * + * The segments are initialised by manftest and each contain a different + * configuration e.g. firmware variant. The firmware can be instructed + * via RFID to copy a segment to replace the first segment, hence changing the + * active configuration. This allows ops to change the configuration of a board + * prior to shipment using RFID. + * + * Changes to the dynamic config may need to be written to all segments (e.g. + * firmware versions) or just the first segment (changes to the active + * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". + * If only the first segment is written the code still needs to be aware of the + * possible presence of subsequent segments as writing to a segment may cause + * its size to increase, which would overwrite the subsequent segments and + * invalidate them. + */ + __checkReturn efx_rc_t +ef10_nvram_partn_write_segment_tlv( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t tag, + __in_bcount(size) caddr_t data, + __in size_t size, + __in boolean_t all_segments) +{ + size_t partn_size = 0; + caddr_t partn_data; + size_t total_length = 0; + efx_rc_t rc; + size_t current_offset = 0; + size_t remaining_original_length; + size_t remaining_modified_length; + caddr_t segment_data; + + EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); + + /* Allocate sufficient memory for the entire partition */ + if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) + goto fail1; + + EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); + if (partn_data == NULL) { + rc = ENOMEM; + goto fail2; + } + + remaining_original_length = partn_size; + remaining_modified_length = partn_size; + segment_data = partn_data; + + /* Lock the partition */ + if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) + goto fail3; + + /* Iterate over each (potential) segment to update it. */ + do { + boolean_t write = all_segments || current_offset == 0; + + rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size, + &segment_data, ¤t_offset, &remaining_original_length, + &remaining_modified_length, write); + if (rc != 0) { + if (current_offset == 0) { + /* + * If no data has been read then the first + * segment is invalid, which is an error. + */ + goto fail4; + } + break; + } + } while (current_offset < partn_size); + + total_length = segment_data - partn_data; + + /* + * We've run out of space. This should actually be dealt with by + * ef10_nvram_buf_write_tlv returning ENOSPC. + */ + if (total_length > partn_size) { + rc = ENOSPC; + goto fail5; + } + + /* Erase the whole partition in NVRAM */ + if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) + goto fail6; + + /* Write new partition contents from the buffer to NVRAM */ + if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data, + total_length)) != 0) + goto fail7; + + /* Unlock the partition */ + (void) ef10_nvram_partn_unlock(enp, partn, NULL); + + EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); + + (void) ef10_nvram_partn_unlock(enp, partn, NULL); +fail3: + EFSYS_PROBE(fail3); + + EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Get the size of a NVRAM partition. This is the total size allocated in nvram, + * not the data used by the segments in the partition. + */ + __checkReturn efx_rc_t +ef10_nvram_partn_size( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *sizep) +{ + efx_rc_t rc; + efx_nvram_info_t eni = { 0 }; + + if ((rc = efx_mcdi_nvram_info(enp, partn, &eni)) != 0) + goto fail1; + + *sizep = eni.eni_partn_size; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t *enip) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_info(enp, partn, enip)) != 0) + goto fail1; + + if (enip->eni_write_size == 0) + enip->eni_write_size = EF10_NVRAM_CHUNK; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +ef10_nvram_partn_lock( + __in efx_nic_t *enp, + __in uint32_t partn) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_read_mode( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size, + __in uint32_t mode) +{ + size_t chunk; + efx_rc_t rc; + + while (size > 0) { + chunk = MIN(size, EF10_NVRAM_CHUNK); + + if ((rc = efx_mcdi_nvram_read(enp, partn, offset, + data, chunk, mode)) != 0) { + goto fail1; + } + + size -= chunk; + data += chunk; + offset += chunk; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + /* + * An A/B partition has two data stores (current and backup). + * Read requests which come in through the EFX API expect to read the + * current, active store of an A/B partition. For non A/B partitions, + * there is only a single store and so the mode param is ignored. + */ + return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, + MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_read_backup( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + /* + * An A/B partition has two data stores (current and backup). + * Read the backup store of an A/B partition (i.e. the store currently + * being written to if the partition is locked). + * + * This is needed when comparing the existing partition content to avoid + * unnecessary writes, or to read back what has been written to check + * that the writes have succeeded. + */ + return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, + MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in size_t size) +{ + efx_rc_t rc; + efx_nvram_info_t eni = { 0 }; + uint32_t erase_size; + + if ((rc = efx_mcdi_nvram_info(enp, partn, &eni)) != 0) + goto fail1; + + erase_size = eni.eni_erase_size; + + if (erase_size == 0) { + if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) + goto fail2; + } else { + if (size % erase_size != 0) { + rc = EINVAL; + goto fail3; + } + while (size > 0) { + if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, + erase_size)) != 0) + goto fail4; + offset += erase_size; + size -= erase_size; + } + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + size_t chunk; + efx_nvram_info_t eni = { 0 }; + uint32_t write_size; + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_info(enp, partn, &eni)) != 0) + goto fail1; + + write_size = eni.eni_write_size; + + if (write_size != 0) { + /* + * Check that the size is a multiple of the write chunk size if + * the write chunk size is available. + */ + if (size % write_size != 0) { + rc = EINVAL; + goto fail2; + } + } else { + write_size = EF10_NVRAM_CHUNK; + } + + while (size > 0) { + chunk = MIN(size, write_size); + + if ((rc = efx_mcdi_nvram_write(enp, partn, offset, + data, chunk)) != 0) { + goto fail3; + } + + size -= chunk; + data += chunk; + offset += chunk; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#define EF10_NVRAM_INITIAL_POLL_DELAY_US 10000 +#define EF10_NVRAM_MAX_POLL_DELAY_US 1000000 +#define EF10_NVRAM_POLL_RETRIES 100 + + __checkReturn efx_rc_t +ef10_nvram_partn_unlock( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp) +{ + boolean_t reboot = B_FALSE; + uint32_t poll_delay_us = EF10_NVRAM_INITIAL_POLL_DELAY_US; + uint32_t poll_retry = 0; + uint32_t verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; + efx_rc_t rc; + + rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, + EFX_NVRAM_UPDATE_FLAGS_BACKGROUND, &verify_result); + + /* + * NVRAM updates can take a long time (e.g. up to 1 minute for bundle + * images). Polling for NVRAM update completion ensures that other MCDI + * commands can be issued before the background NVRAM update completes. + * + * Without polling, other MCDI commands can only be issued before the + * NVRAM update completes if the MCDI transport and the firmware + * support the Asynchronous MCDI protocol extensions in SF-116575-PS. + * + * The initial call either completes the update synchronously, or + * returns RC_PENDING to indicate processing is continuing. In the + * latter case, we poll for at least 1 minute, at increasing intervals + * (10ms, 100ms, 1s). + */ + while (verify_result == MC_CMD_NVRAM_VERIFY_RC_PENDING) { + + if (poll_retry > EF10_NVRAM_POLL_RETRIES) { + rc = ETIMEDOUT; + goto fail1; + } + poll_retry++; + + EFSYS_SLEEP(poll_delay_us); + if (poll_delay_us < EF10_NVRAM_MAX_POLL_DELAY_US) + poll_delay_us *= 10; + + /* Poll for completion of background NVRAM update. */ + verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; + + rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, + EFX_NVRAM_UPDATE_FLAGS_POLL, &verify_result); + if (rc != 0) { + /* Poll failed, so assume NVRAM update failed. */ + goto fail2; + } + } + + if (verify_resultp != NULL) + *verify_resultp = verify_result; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_set_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __in_ecount(4) uint16_t version[4]) +{ + struct tlv_partition_version partn_version; + size_t size; + efx_rc_t rc; + + /* Add or modify partition version TLV item */ + partn_version.version_w = __CPU_TO_LE_16(version[0]); + partn_version.version_x = __CPU_TO_LE_16(version[1]); + partn_version.version_y = __CPU_TO_LE_16(version[2]); + partn_version.version_z = __CPU_TO_LE_16(version[3]); + + size = sizeof (partn_version) - (2 * sizeof (uint32_t)); + + /* Write the version number to all segments in the partition */ + if ((rc = ef10_nvram_partn_write_segment_tlv(enp, + NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, + TLV_TAG_PARTITION_VERSION(partn), + (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_NVRAM + +typedef struct ef10_parttbl_entry_s { + unsigned int partn; + unsigned int port_mask; + efx_nvram_type_t nvtype; +} ef10_parttbl_entry_t; + +/* Port mask values */ +#define PORT_1 (1u << 1) +#define PORT_2 (1u << 2) +#define PORT_3 (1u << 3) +#define PORT_4 (1u << 4) +#define PORT_ALL (0xffffffffu) + +#define PARTN_MAP_ENTRY(partn, port_mask, nvtype) \ +{ (NVRAM_PARTITION_TYPE_##partn), (PORT_##port_mask), (EFX_NVRAM_##nvtype) } + +/* Translate EFX NVRAM types to firmware partition types */ +static ef10_parttbl_entry_t hunt_parttbl[] = { + /* partn ports nvtype */ + PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), + PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), + PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), + PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT0, 1, BOOTROM_CFG), + PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT1, 2, BOOTROM_CFG), + PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT2, 3, BOOTROM_CFG), + PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT3, 4, BOOTROM_CFG), + PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), + PARTN_MAP_ENTRY(FPGA, ALL, FPGA), + PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), + PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), +}; + +static ef10_parttbl_entry_t medford_parttbl[] = { + /* partn ports nvtype */ + PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), + PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), + PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), + PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG), + PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), + PARTN_MAP_ENTRY(FPGA, ALL, FPGA), + PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), + PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), + PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM), + PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), +}; + +static ef10_parttbl_entry_t medford2_parttbl[] = { + /* partn ports nvtype */ + PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), + PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), + PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), + PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG), + PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), + PARTN_MAP_ENTRY(FPGA, ALL, FPGA), + PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), + PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), + PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM), + PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), + PARTN_MAP_ENTRY(DYNCONFIG_DEFAULTS, ALL, DYNCONFIG_DEFAULTS), + PARTN_MAP_ENTRY(ROMCONFIG_DEFAULTS, ALL, ROMCONFIG_DEFAULTS), + PARTN_MAP_ENTRY(BUNDLE, ALL, BUNDLE), + PARTN_MAP_ENTRY(BUNDLE_METADATA, ALL, BUNDLE_METADATA), +}; + +static __checkReturn efx_rc_t +ef10_parttbl_get( + __in efx_nic_t *enp, + __out ef10_parttbl_entry_t **parttblp, + __out size_t *parttbl_rowsp) +{ + switch (enp->en_family) { + case EFX_FAMILY_HUNTINGTON: + *parttblp = hunt_parttbl; + *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl); + break; + + case EFX_FAMILY_MEDFORD: + *parttblp = medford_parttbl; + *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); + break; + + case EFX_FAMILY_MEDFORD2: + *parttblp = medford2_parttbl; + *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl); + break; + + default: + EFSYS_ASSERT(B_FALSE); + return (EINVAL); + } + return (0); +} + + __checkReturn efx_rc_t +ef10_nvram_type_to_partn( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *partnp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + ef10_parttbl_entry_t *parttbl = NULL; + size_t parttbl_rows = 0; + unsigned int i; + + EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); + EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); + EFSYS_ASSERT(partnp != NULL); + + if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { + for (i = 0; i < parttbl_rows; i++) { + ef10_parttbl_entry_t *entry = &parttbl[i]; + + if ((entry->nvtype == type) && + (entry->port_mask & (1u << emip->emi_port))) { + *partnp = entry->partn; + return (0); + } + } + } + + return (ENOTSUP); +} + +#if EFSYS_OPT_DIAG + +static __checkReturn efx_rc_t +ef10_nvram_partn_to_type( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_type_t *typep) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + ef10_parttbl_entry_t *parttbl = NULL; + size_t parttbl_rows = 0; + unsigned int i; + + EFSYS_ASSERT(typep != NULL); + + if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { + for (i = 0; i < parttbl_rows; i++) { + ef10_parttbl_entry_t *entry = &parttbl[i]; + + if ((entry->partn == partn) && + (entry->port_mask & (1u << emip->emi_port))) { + *typep = entry->nvtype; + return (0); + } + } + } + + return (ENOTSUP); +} + + __checkReturn efx_rc_t +ef10_nvram_test( + __in efx_nic_t *enp) +{ + efx_nvram_type_t type; + unsigned int npartns = 0; + uint32_t *partns = NULL; + size_t size; + unsigned int i; + efx_rc_t rc; + + /* Read available partitions from NVRAM partition map */ + size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); + EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); + if (partns == NULL) { + rc = ENOMEM; + goto fail1; + } + + if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, + &npartns)) != 0) { + goto fail2; + } + + for (i = 0; i < npartns; i++) { + /* Check if the partition is supported for this port */ + if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0) + continue; + + if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0) + goto fail3; + } + + EFSYS_KMEM_FREE(enp->en_esip, size, partns); + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); + EFSYS_KMEM_FREE(enp->en_esip, size, partns); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + + __checkReturn efx_rc_t +ef10_nvram_partn_get_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]) +{ + efx_rc_t rc; + + /* FIXME: get highest partn version from all ports */ + /* FIXME: return partn description if available */ + + if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, + version, NULL, 0)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_rw_start( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *chunk_sizep) +{ + efx_nvram_info_t eni = { 0 }; + efx_rc_t rc; + + if ((rc = ef10_nvram_partn_info(enp, partn, &eni)) != 0) + goto fail1; + + if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) + goto fail2; + + if (chunk_sizep != NULL) + *chunk_sizep = eni.eni_write_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_nvram_partn_rw_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp) +{ + efx_rc_t rc; + + if ((rc = ef10_nvram_partn_unlock(enp, partn, verify_resultp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_NVRAM */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c new file mode 100644 index 000000000..622374f7b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_phy.c @@ -0,0 +1,758 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFX_OPTS_EF10() + +static void +mcdi_phy_decode_cap( + __in uint32_t mcdi_cap, + __out uint32_t *maskp) +{ + uint32_t mask; + +#define CHECK_CAP(_cap) \ + EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN) + + CHECK_CAP(10HDX); + CHECK_CAP(10FDX); + CHECK_CAP(100HDX); + CHECK_CAP(100FDX); + CHECK_CAP(1000HDX); + CHECK_CAP(1000FDX); + CHECK_CAP(10000FDX); + CHECK_CAP(25000FDX); + CHECK_CAP(40000FDX); + CHECK_CAP(50000FDX); + CHECK_CAP(100000FDX); + CHECK_CAP(PAUSE); + CHECK_CAP(ASYM); + CHECK_CAP(AN); + CHECK_CAP(DDM); + CHECK_CAP(BASER_FEC); + CHECK_CAP(BASER_FEC_REQUESTED); + CHECK_CAP(RS_FEC); + CHECK_CAP(RS_FEC_REQUESTED); + CHECK_CAP(25G_BASER_FEC); + CHECK_CAP(25G_BASER_FEC_REQUESTED); +#undef CHECK_CAP + + mask = 0; + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_1000HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_1000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_25000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_40000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_50000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100000FDX); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + mask |= (1 << EFX_PHY_CAP_PAUSE); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + mask |= (1 << EFX_PHY_CAP_ASYM); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + mask |= (1 << EFX_PHY_CAP_AN); + + /* FEC caps (supported on Medford2 and later) */ + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); + + *maskp = mask; +} + +static void +mcdi_phy_decode_link_mode( + __in efx_nic_t *enp, + __in uint32_t link_flags, + __in unsigned int speed, + __in unsigned int fcntl, + __in uint32_t fec, + __out efx_link_mode_t *link_modep, + __out unsigned int *fcntlp, + __out efx_phy_fec_type_t *fecp) +{ + boolean_t fd = !!(link_flags & + (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + boolean_t up = !!(link_flags & + (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + + _NOTE(ARGUNUSED(enp)) + + if (!up) + *link_modep = EFX_LINK_DOWN; + else if (speed == 100000 && fd) + *link_modep = EFX_LINK_100000FDX; + else if (speed == 50000 && fd) + *link_modep = EFX_LINK_50000FDX; + else if (speed == 40000 && fd) + *link_modep = EFX_LINK_40000FDX; + else if (speed == 25000 && fd) + *link_modep = EFX_LINK_25000FDX; + else if (speed == 10000 && fd) + *link_modep = EFX_LINK_10000FDX; + else if (speed == 1000) + *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; + else if (speed == 100) + *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; + else if (speed == 10) + *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; + else + *link_modep = EFX_LINK_UNKNOWN; + + if (fcntl == MC_CMD_FCNTL_OFF) + *fcntlp = 0; + else if (fcntl == MC_CMD_FCNTL_RESPOND) + *fcntlp = EFX_FCNTL_RESPOND; + else if (fcntl == MC_CMD_FCNTL_GENERATE) + *fcntlp = EFX_FCNTL_GENERATE; + else if (fcntl == MC_CMD_FCNTL_BIDIR) + *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; + else { + EFSYS_PROBE1(mc_pcol_error, int, fcntl); + *fcntlp = 0; + } + + switch (fec) { + case MC_CMD_FEC_NONE: + *fecp = EFX_PHY_FEC_NONE; + break; + case MC_CMD_FEC_BASER: + *fecp = EFX_PHY_FEC_BASER; + break; + case MC_CMD_FEC_RS: + *fecp = EFX_PHY_FEC_RS; + break; + default: + EFSYS_PROBE1(mc_pcol_error, int, fec); + *fecp = EFX_PHY_FEC_NONE; + break; + } +} + + + void +ef10_phy_link_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_link_mode_t *link_modep) +{ + efx_port_t *epp = &(enp->en_port); + unsigned int link_flags; + unsigned int speed; + unsigned int fcntl; + efx_phy_fec_type_t fec = MC_CMD_FEC_NONE; + efx_link_mode_t link_mode; + uint32_t lp_cap_mask; + + /* + * Convert the LINKCHANGE speed enumeration into mbit/s, in the + * same way as GET_LINK encodes the speed + */ + switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { + case MCDI_EVENT_LINKCHANGE_SPEED_100M: + speed = 100; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_1G: + speed = 1000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_10G: + speed = 10000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_25G: + speed = 25000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_40G: + speed = 40000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_50G: + speed = 50000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_100G: + speed = 100000; + break; + default: + speed = 0; + break; + } + + link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); + mcdi_phy_decode_link_mode(enp, link_flags, speed, + MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), + MC_CMD_FEC_NONE, &link_mode, + &fcntl, &fec); + mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), + &lp_cap_mask); + + /* + * It's safe to update ep_lp_cap_mask without the driver's port lock + * because presumably any concurrently running efx_port_poll() is + * only going to arrive at the same value. + * + * ep_fcntl has two meanings. It's either the link common fcntl + * (if the PHY supports AN), or it's the forced link state. If + * the former, it's safe to update the value for the same reason as + * for ep_lp_cap_mask. If the latter, then just ignore the value, + * because we can race with efx_mac_fcntl_set(). + */ + epp->ep_lp_cap_mask = lp_cap_mask; + epp->ep_fcntl = fcntl; + + *link_modep = link_mode; +} + + __checkReturn efx_rc_t +ef10_phy_power( + __in efx_nic_t *enp, + __in boolean_t power) +{ + efx_rc_t rc; + + if (!power) + return (0); + + /* Check if the PHY is a zombie */ + if ((rc = ef10_phy_verify(enp)) != 0) + goto fail1; + + enp->en_reset_flags |= EFX_RESET_PHY; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_phy_get_link( + __in efx_nic_t *enp, + __out ef10_link_state_t *elsp) +{ + efx_mcdi_req_t req; + uint32_t fec; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN, + MC_CMD_GET_LINK_OUT_V2_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_LINK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_LINK_OUT_V2_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), + &elsp->epls.epls_adv_cap_mask); + mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), + &elsp->epls.epls_lp_cap_mask); + + if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_V2_LEN) + fec = MC_CMD_FEC_NONE; + else + fec = MCDI_OUT_DWORD(req, GET_LINK_OUT_V2_FEC_TYPE); + + mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), + MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), + MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), + fec, &elsp->epls.epls_link_mode, + &elsp->epls.epls_fcntl, &elsp->epls.epls_fec); + + if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_V2_LEN) { + elsp->epls.epls_ld_cap_mask = 0; + } else { + mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_V2_LD_CAP), + &elsp->epls.epls_ld_cap_mask); + } + + +#if EFSYS_OPT_LOOPBACK + /* + * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the + * MCDI value directly. Agreement is checked in efx_loopback_mask(). + */ + elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); +#endif /* EFSYS_OPT_LOOPBACK */ + + elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_phy_reconfigure( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_LINK_IN_LEN, + MC_CMD_SET_LINK_OUT_LEN); + uint32_t cap_mask; +#if EFSYS_OPT_PHY_LED_CONTROL + unsigned int led_mode; +#endif + unsigned int speed; + boolean_t supported; + efx_rc_t rc; + + if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0) + goto fail1; + if (supported == B_FALSE) + goto out; + + req.emr_cmd = MC_CMD_SET_LINK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; + + cap_mask = epp->ep_adv_cap_mask; + MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, + PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, + PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, + PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, + PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, + PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, + PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, + PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, + PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, + PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, + PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); + /* Too many fields for for POPULATE macros, so insert this afterwards */ + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1); + +#if EFSYS_OPT_LOOPBACK + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, + epp->ep_loopback_type); + switch (epp->ep_loopback_link_mode) { + case EFX_LINK_100FDX: + speed = 100; + break; + case EFX_LINK_1000FDX: + speed = 1000; + break; + case EFX_LINK_10000FDX: + speed = 10000; + break; + case EFX_LINK_25000FDX: + speed = 25000; + break; + case EFX_LINK_40000FDX: + speed = 40000; + break; + case EFX_LINK_50000FDX: + speed = 50000; + break; + case EFX_LINK_100000FDX: + speed = 100000; + break; + default: + speed = 0; + } +#else + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); + speed = 0; +#endif /* EFSYS_OPT_LOOPBACK */ + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); + +#if EFSYS_OPT_PHY_FLAGS + MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); +#else + MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); +#endif /* EFSYS_OPT_PHY_FLAGS */ + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + /* And set the blink mode */ + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SET_ID_LED; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; + +#if EFSYS_OPT_PHY_LED_CONTROL + switch (epp->ep_phy_led_mode) { + case EFX_PHY_LED_DEFAULT: + led_mode = MC_CMD_LED_DEFAULT; + break; + case EFX_PHY_LED_OFF: + led_mode = MC_CMD_LED_OFF; + break; + case EFX_PHY_LED_ON: + led_mode = MC_CMD_LED_ON; + break; + default: + EFSYS_ASSERT(0); + led_mode = MC_CMD_LED_DEFAULT; + } + + MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); +#else + MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } +out: + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_phy_verify( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN, + MC_CMD_GET_PHY_STATE_OUT_LEN); + uint32_t state; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_PHY_STATE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); + if (state != MC_CMD_PHY_STATE_OK) { + if (state != MC_CMD_PHY_STATE_ZOMBIE) + EFSYS_PROBE1(mc_pcol_error, int, state); + rc = ENOTACTIVE; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip) +{ + _NOTE(ARGUNUSED(enp, ouip)) + + return (ENOTSUP); +} + + __checkReturn efx_rc_t +ef10_phy_link_state_get( + __in efx_nic_t *enp, + __out efx_phy_link_state_t *eplsp) +{ + efx_rc_t rc; + ef10_link_state_t els; + + /* Obtain the active link state */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail1; + + *eplsp = els.epls; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#if EFSYS_OPT_PHY_STATS + + __checkReturn efx_rc_t +ef10_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) +{ + /* TBD: no stats support in firmware yet */ + _NOTE(ARGUNUSED(enp, esmp)) + memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat)); + + return (0); +} + +#endif /* EFSYS_OPT_PHY_STATS */ + +#if EFSYS_OPT_BIST + + __checkReturn efx_rc_t +ef10_bist_enable_offline( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_bist_start(enp, type)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt __drv_when(count > 0, __notnull) + uint32_t *value_maskp, + __out_ecount_opt(count) __drv_when(count > 0, __notnull) + unsigned long *valuesp, + __in size_t count) +{ + /* + * MCDI_CTL_SDU_LEN_MAX_V1 is large enough cover all BIST results, + * whilst not wasting stack. + */ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN, + MCDI_CTL_SDU_LEN_MAX_V1); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + uint32_t value_mask = 0; + uint32_t result; + efx_rc_t rc; + + EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_LEN <= + MCDI_CTL_SDU_LEN_MAX_V1); + EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_SFT9001_LEN <= + MCDI_CTL_SDU_LEN_MAX_V1); + EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MRSFP_LEN <= + MCDI_CTL_SDU_LEN_MAX_V1); + EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MEM_LEN <= + MCDI_CTL_SDU_LEN_MAX_V1); + + _NOTE(ARGUNUSED(type)) + + req.emr_cmd = MC_CMD_POLL_BIST; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MCDI_CTL_SDU_LEN_MAX_V1; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { + rc = EMSGSIZE; + goto fail2; + } + + if (count > 0) + (void) memset(valuesp, '\0', count * sizeof (unsigned long)); + + result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); + + if (result == MC_CMD_POLL_BIST_FAILED && + req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN && + count > EFX_BIST_MEM_ECC_FATAL) { + if (valuesp != NULL) { + valuesp[EFX_BIST_MEM_TEST] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST); + valuesp[EFX_BIST_MEM_ADDR] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR); + valuesp[EFX_BIST_MEM_BUS] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS); + valuesp[EFX_BIST_MEM_EXPECT] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT); + valuesp[EFX_BIST_MEM_ACTUAL] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL); + valuesp[EFX_BIST_MEM_ECC] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC); + valuesp[EFX_BIST_MEM_ECC_PARITY] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY); + valuesp[EFX_BIST_MEM_ECC_FATAL] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL); + } + value_mask |= (1 << EFX_BIST_MEM_TEST) | + (1 << EFX_BIST_MEM_ADDR) | + (1 << EFX_BIST_MEM_BUS) | + (1 << EFX_BIST_MEM_EXPECT) | + (1 << EFX_BIST_MEM_ACTUAL) | + (1 << EFX_BIST_MEM_ECC) | + (1 << EFX_BIST_MEM_ECC_PARITY) | + (1 << EFX_BIST_MEM_ECC_FATAL); + } else if (result == MC_CMD_POLL_BIST_FAILED && + encp->enc_phy_type == EFX_PHY_XFI_FARMI && + req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && + count > EFX_BIST_FAULT_CODE) { + if (valuesp != NULL) + valuesp[EFX_BIST_FAULT_CODE] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); + value_mask |= 1 << EFX_BIST_FAULT_CODE; + } + + if (value_maskp != NULL) + *value_maskp = value_mask; + + EFSYS_ASSERT(resultp != NULL); + if (result == MC_CMD_POLL_BIST_RUNNING) + *resultp = EFX_BIST_RESULT_RUNNING; + else if (result == MC_CMD_POLL_BIST_PASSED) + *resultp = EFX_BIST_RESULT_PASSED; + else + *resultp = EFX_BIST_RESULT_FAILED; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + /* There is no way to stop BIST on EF10. */ + _NOTE(ARGUNUSED(enp, type)) +} + +#endif /* EFSYS_OPT_BIST */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_proxy.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_proxy.c new file mode 100644 index 000000000..19c11c6eb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_proxy.c @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + + __checkReturn efx_rc_t +ef10_proxy_auth_init( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + return (0); +} + + void +ef10_proxy_auth_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); +} + +static __checkReturn efx_rc_t +efx_mcdi_proxy_configure( + __in efx_nic_t *enp, + __in boolean_t disable_proxy, + __in uint64_t req_buffer_addr, + __in uint64_t resp_buffer_addr, + __in uint64_t stat_buffer_addr, + __in size_t req_size, + __in size_t resp_size, + __in uint32_t block_cnt, + __in uint8_t *op_maskp, + __in size_t op_mask_size) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN, + MC_CMD_PROXY_CONFIGURE_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_PROXY_CONFIGURE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PROXY_CONFIGURE_OUT_LEN; + + if (!disable_proxy) { + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_FLAGS, 1); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO, + req_buffer_addr & 0xffffffff); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI, + req_buffer_addr >> 32); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO, + resp_buffer_addr & 0xffffffff); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI, + resp_buffer_addr >> 32); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO, + stat_buffer_addr & 0xffffffff); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI, + stat_buffer_addr >> 32); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE, + req_size); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE, + resp_size); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE, + MC_PROXY_STATUS_BUFFER_LEN); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_IN_NUM_BLOCKS, + block_cnt); + memcpy(MCDI_IN2(req, efx_byte_t, + PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK), + op_maskp, op_mask_size); + MCDI_IN_SET_DWORD(req, PROXY_CONFIGURE_EXT_IN_RESERVED, + EFX_PROXY_CONFIGURE_MAGIC); + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_privilege_modify( + __in efx_nic_t *enp, + __in uint32_t fn_group, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in uint32_t add_privileges_mask, + __in uint32_t remove_privileges_mask) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MODIFY_IN_LEN, + MC_CMD_PRIVILEGE_MODIFY_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_PRIVILEGE_MODIFY; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PRIVILEGE_MODIFY_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PRIVILEGE_MODIFY_OUT_LEN; + + EFSYS_ASSERT(fn_group <= MC_CMD_PRIVILEGE_MODIFY_IN_ONE); + + MCDI_IN_SET_DWORD(req, PRIVILEGE_MODIFY_IN_FN_GROUP, fn_group); + + if ((fn_group == MC_CMD_PRIVILEGE_MODIFY_IN_ONE) || + (fn_group == MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF)) { + MCDI_IN_POPULATE_DWORD_2(req, + PRIVILEGE_MODIFY_IN_FUNCTION, + PRIVILEGE_MODIFY_IN_FUNCTION_PF, pf_index, + PRIVILEGE_MODIFY_IN_FUNCTION_VF, vf_index); + } + + MCDI_IN_SET_DWORD(req, PRIVILEGE_MODIFY_IN_ADD_MASK, + add_privileges_mask); + MCDI_IN_SET_DWORD(req, PRIVILEGE_MODIFY_IN_REMOVE_MASK, + remove_privileges_mask); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +efx_proxy_auth_fill_op_mask( + __in_ecount(op_count) uint32_t *op_listp, + __in size_t op_count, + __out_ecount(op_mask_size) uint32_t *op_maskp, + __in size_t op_mask_size) +{ + efx_rc_t rc; + uint32_t op; + + if ((op_listp == NULL) || (op_maskp == NULL)) { + rc = EINVAL; + goto fail1; + } + + while (op_count--) { + op = *op_listp++; + if (op > op_mask_size * 32) { + rc = EINVAL; + goto fail2; + } + op_maskp[op / 32] |= 1u << (op & 31); + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_mc_config( + __in efx_nic_t *enp, + __in_ecount(block_cnt) efsys_mem_t *request_bufferp, + __in_ecount(block_cnt) efsys_mem_t *response_bufferp, + __in_ecount(block_cnt) efsys_mem_t *status_bufferp, + __in uint32_t block_cnt, + __in_ecount(op_count) uint32_t *op_listp, + __in size_t op_count) +{ +#define PROXY_OPS_MASK_SIZE \ + (EFX_DIV_ROUND_UP( \ + MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN, \ + sizeof (uint32_t))) + + efx_rc_t rc; + uint32_t op_mask[PROXY_OPS_MASK_SIZE] = {0}; + + /* Prepare the operation mask from operation list array */ + if ((rc = efx_proxy_auth_fill_op_mask(op_listp, op_count, + op_mask, PROXY_OPS_MASK_SIZE) != 0)) + goto fail1; + + if ((rc = efx_mcdi_proxy_configure(enp, B_FALSE, + EFSYS_MEM_ADDR(request_bufferp), + EFSYS_MEM_ADDR(response_bufferp), + EFSYS_MEM_ADDR(status_bufferp), + EFSYS_MEM_SIZE(request_bufferp) / block_cnt, + EFSYS_MEM_SIZE(response_bufferp) / block_cnt, + block_cnt, (uint8_t *)&op_mask, + sizeof (op_mask))) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_disable( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_proxy_configure(enp, B_TRUE, + 0, 0, 0, 0, 0, 0, NULL, 0) != 0)) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_privilege_modify( + __in efx_nic_t *enp, + __in uint32_t fn_group, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in uint32_t add_privileges_mask, + __in uint32_t remove_privileges_mask) +{ + return (efx_mcdi_privilege_modify(enp, fn_group, pf_index, vf_index, + add_privileges_mask, remove_privileges_mask)); +} + +static __checkReturn efx_rc_t +efx_mcdi_privilege_mask_set( + __in efx_nic_t *enp, + __in uint32_t vf_index, + __in uint32_t mask, + __in uint32_t value) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, + MC_CMD_PRIVILEGE_MASK_OUT_LEN); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + efx_rc_t rc; + uint32_t old_mask = 0; + uint32_t new_mask = 0; + + EFSYS_ASSERT((value & ~mask) == 0); + + req.emr_cmd = MC_CMD_PRIVILEGE_MASK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; + + /* Get privilege mask */ + MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, + PRIVILEGE_MASK_IN_FUNCTION_PF, encp->enc_pf, + PRIVILEGE_MASK_IN_FUNCTION_VF, vf_index); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != MC_CMD_PRIVILEGE_MASK_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + old_mask = *MCDI_OUT2(req, uint32_t, PRIVILEGE_MASK_OUT_OLD_MASK); + new_mask = old_mask & ~mask; + new_mask |= (value & mask); + + if (new_mask == old_mask) + return (0); + + new_mask |= MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE; + memset(payload, 0, sizeof (payload)); + + req.emr_cmd = MC_CMD_PRIVILEGE_MASK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; + + /* Set privilege mask */ + MCDI_IN_SET_DWORD(req, PRIVILEGE_MASK_IN_NEW_MASK, new_mask); + + efx_mcdi_execute(enp, &req); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + if (req.emr_out_length_used != MC_CMD_PRIVILEGE_MASK_OUT_LEN) { + rc = EMSGSIZE; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_set_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t vf_index, + __in uint32_t mask, + __in uint32_t value) +{ + return (efx_mcdi_privilege_mask_set(enp, vf_index, + mask, value)); +} + +static __checkReturn efx_rc_t +efx_mcdi_proxy_complete( + __in efx_nic_t *enp, + __in uint32_t fn_index, + __in uint32_t proxy_result, + __in uint32_t handle) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PROXY_COMPLETE_IN_LEN, + MC_CMD_PROXY_COMPLETE_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_PROXY_COMPLETE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PROXY_COMPLETE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PROXY_COMPLETE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, PROXY_COMPLETE_IN_BLOCK_INDEX, fn_index); + MCDI_IN_SET_DWORD(req, PROXY_COMPLETE_IN_STATUS, proxy_result); + MCDI_IN_SET_DWORD(req, PROXY_COMPLETE_IN_HANDLE, handle); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_complete_request( + __in efx_nic_t *enp, + __in uint32_t fn_index, + __in uint32_t proxy_result, + __in uint32_t handle) +{ + return (efx_mcdi_proxy_complete(enp, fn_index, + proxy_result, handle)); +} + +static __checkReturn efx_rc_t +efx_mcdi_proxy_cmd( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in_bcount(request_size) uint8_t *request_bufferp, + __in size_t request_size, + __out_bcount(response_size) uint8_t *response_bufferp, + __in size_t response_size, + __out_opt size_t *response_size_actualp) +{ + efx_dword_t *inbufp; + efx_mcdi_req_t req; + efx_rc_t rc; + + if (request_size % sizeof (*inbufp) != 0) { + rc = EINVAL; + goto fail1; + } + + EFSYS_KMEM_ALLOC(enp, (MC_CMD_PROXY_CMD_IN_LEN + request_size), inbufp); + + req.emr_cmd = MC_CMD_PROXY_CMD; + req.emr_in_buf = (uint8_t *) inbufp; + req.emr_in_length = MC_CMD_PROXY_CMD_IN_LEN + request_size; + req.emr_out_buf = response_bufferp; + req.emr_out_length = response_size; + + MCDI_IN_POPULATE_DWORD_2(req, PROXY_CMD_IN_TARGET, + PROXY_CMD_IN_TARGET_PF, pf_index, + PROXY_CMD_IN_TARGET_VF, vf_index); + + /* Proxied command should be located just after PROXY_CMD */ + memcpy(&inbufp[MC_CMD_PROXY_CMD_IN_LEN / sizeof (*inbufp)], + request_bufferp, request_size); + + efx_mcdi_execute(enp, &req); + + EFSYS_KMEM_FREE(enp, (MC_CMD_PROXY_CMD_IN_LEN + request_size), inbufp); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (response_size_actualp != NULL) + *response_size_actualp = req.emr_out_length_used; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +ef10_proxy_auth_get_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __out uint32_t *maskp) +{ + return (efx_mcdi_privilege_mask(enp, pf_index, vf_index, maskp)); +} + + + __checkReturn efx_rc_t +ef10_proxy_auth_exec_cmd( + __in efx_nic_t *enp, + __inout efx_proxy_cmd_params_t *paramsp) +{ + return (efx_mcdi_proxy_cmd(enp, paramsp->pf_index, paramsp->vf_index, + paramsp->request_bufferp, paramsp->request_size, + paramsp->response_bufferp, paramsp->response_size, + paramsp->response_size_actualp)); +} +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c new file mode 100644 index 000000000..bfa55337c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_rx.c @@ -0,0 +1,1229 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFX_OPTS_EF10() + + +static __checkReturn efx_rc_t +efx_mcdi_init_rxq( + __in efx_nic_t *enp, + __in uint32_t ndescs, + __in efx_evq_t *eep, + __in uint32_t label, + __in uint32_t instance, + __in efsys_mem_t *esmp, + __in boolean_t disable_scatter, + __in boolean_t want_inner_classes, + __in uint32_t buf_size, + __in uint32_t ps_bufsize, + __in uint32_t es_bufs_per_desc, + __in uint32_t es_max_dma_len, + __in uint32_t es_buf_stride, + __in uint32_t hol_block_timeout) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V4_IN_LEN, + MC_CMD_INIT_RXQ_V4_OUT_LEN); + int npages = efx_rxq_nbufs(enp, ndescs); + int i; + efx_qword_t *dma_addr; + uint64_t addr; + efx_rc_t rc; + uint32_t dma_mode; + boolean_t want_outer_classes; + boolean_t no_cont_ev; + + EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs); + + if ((esmp == NULL) || + (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) { + rc = EINVAL; + goto fail1; + } + + no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV); + if ((no_cont_ev == B_TRUE) && (disable_scatter == B_FALSE)) { + /* TODO: Support scatter in NO_CONT_EV mode */ + rc = EINVAL; + goto fail2; + } + + if (ps_bufsize > 0) + dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; + else if (es_bufs_per_desc > 0) + dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; + else + dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; + + if (encp->enc_tunnel_encapsulations_supported != 0 && + !want_inner_classes) { + /* + * WANT_OUTER_CLASSES can only be specified on hardware which + * supports tunnel encapsulation offloads, even though it is + * effectively the behaviour the hardware gives. + * + * Also, on hardware which does support such offloads, older + * firmware rejects the flag if the offloads are not supported + * by the current firmware variant, which means this may fail if + * the capabilities are not updated when the firmware variant + * changes. This is not an issue on newer firmware, as it was + * changed in bug 69842 (v6.4.2.1007) to permit this flag to be + * specified on all firmware variants. + */ + want_outer_classes = B_TRUE; + } else { + want_outer_classes = B_FALSE; + } + + req.emr_cmd = MC_CMD_INIT_RXQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_INIT_RXQ_V4_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_INIT_RXQ_V4_OUT_LEN; + + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); + MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS, + INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, + INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, + INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, + INIT_RXQ_EXT_IN_CRC_MODE, 0, + INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, + INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter, + INIT_RXQ_EXT_IN_DMA_MODE, + dma_mode, + INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize, + INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes, + INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id); + + if (es_bufs_per_desc > 0) { + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, + es_bufs_per_desc); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, + hol_block_timeout); + } + + if (encp->enc_init_rxq_with_buffer_size) + MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, + buf_size); + + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); + addr = EFSYS_MEM_ADDR(esmp); + + for (i = 0; i < npages; i++) { + EFX_POPULATE_QWORD_2(*dma_addr, + EFX_DWORD_1, (uint32_t)(addr >> 32), + EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); + + dma_addr++; + addr += EFX_BUF_SIZE; + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_fini_rxq( + __in efx_nic_t *enp, + __in uint32_t instance) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN, + MC_CMD_FINI_RXQ_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FINI_RXQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; + + MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the RXQ has already been destroyed. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_RX_SCALE +static __checkReturn efx_rc_t +efx_mcdi_rss_context_alloc( + __in efx_nic_t *enp, + __in efx_rx_scale_context_type_t type, + __in uint32_t num_queues, + __out uint32_t *rss_contextp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN, + MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); + uint32_t rss_context; + uint32_t context_type; + efx_rc_t rc; + + if (num_queues > EFX_MAXRSS) { + rc = EINVAL; + goto fail1; + } + + switch (type) { + case EFX_RX_SCALE_EXCLUSIVE: + context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE; + break; + case EFX_RX_SCALE_SHARED: + context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; + break; + default: + rc = EINVAL; + goto fail2; + } + + req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, + enp->en_vport_id); + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type); + + /* + * For exclusive contexts, NUM_QUEUES is only used to validate + * indirection table offsets. + * For shared contexts, the provided context will spread traffic over + * NUM_QUEUES many queues. + */ + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) { + rc = EMSGSIZE; + goto fail4; + } + + rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + if (rss_context == EF10_RSS_CONTEXT_INVALID) { + rc = ENOENT; + goto fail5; + } + + *rss_contextp = rss_context; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE +static efx_rc_t +efx_mcdi_rss_context_free( + __in efx_nic_t *enp, + __in uint32_t rss_context) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_FREE_IN_LEN, + MC_CMD_RSS_CONTEXT_FREE_OUT_LEN); + efx_rc_t rc; + + if (rss_context == EF10_RSS_CONTEXT_INVALID) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE +static efx_rc_t +efx_mcdi_rss_context_set_flags( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_type_t type) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, + MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN); + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH); + + if (rss_context == EF10_RSS_CONTEXT_INVALID) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN; + + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, + rss_context); + + /* + * If the firmware lacks support for additional modes, RSS_MODE + * fields must contain zeros, otherwise the operation will fail. + */ + if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) + type &= EFX_RX_HASH_LEGACY_MASK; + + MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, + RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, + (type & EFX_RX_HASH_IPV4) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, + (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, + (type & EFX_RX_HASH_IPV6) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, + (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE, + (type >> EFX_RX_CLASS_IPV4_TCP_LBN) & + EFX_MASK32(EFX_RX_CLASS_IPV4_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE, + (type >> EFX_RX_CLASS_IPV4_UDP_LBN) & + EFX_MASK32(EFX_RX_CLASS_IPV4_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE, + (type >> EFX_RX_CLASS_IPV4_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV4), + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE, + (type >> EFX_RX_CLASS_IPV6_TCP_LBN) & + EFX_MASK32(EFX_RX_CLASS_IPV6_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE, + (type >> EFX_RX_CLASS_IPV6_UDP_LBN) & + EFX_MASK32(EFX_RX_CLASS_IPV6_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE, + (type >> EFX_RX_CLASS_IPV6_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV6)); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE +static efx_rc_t +efx_mcdi_rss_context_set_key( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN, + MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN); + efx_rc_t rc; + + if (rss_context == EF10_RSS_CONTEXT_INVALID) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN; + + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, + rss_context); + + EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) { + rc = EINVAL; + goto fail2; + } + + memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY), + key, n); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE +static efx_rc_t +efx_mcdi_rss_context_set_table( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN, + MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN); + uint8_t *req_table; + int i, rc; + + if (rss_context == EF10_RSS_CONTEXT_INVALID) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, + rss_context); + + req_table = + MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE); + + for (i = 0; + i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN; + i++) { + req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0; + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + + + __checkReturn efx_rc_t +ef10_rx_init( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_RX_SCALE + + if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS, + &enp->en_rss_context) == 0) { + /* + * Allocated an exclusive RSS context, which allows both the + * indirection table and key to be modified. + */ + enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE; + enp->en_hash_support = EFX_RX_HASH_AVAILABLE; + } else { + /* + * Failed to allocate an exclusive RSS context. Continue + * operation without support for RSS. The pseudo-header in + * received packets will not contain a Toeplitz hash value. + */ + enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; + enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE; + } + +#endif /* EFSYS_OPT_RX_SCALE */ + + return (0); +} + +#if EFSYS_OPT_RX_SCATTER + __checkReturn efx_rc_t +ef10_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size) +{ + _NOTE(ARGUNUSED(enp, buf_size)) + return (0); +} +#endif /* EFSYS_OPT_RX_SCATTER */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +ef10_rx_scale_context_alloc( + __in efx_nic_t *enp, + __in efx_rx_scale_context_type_t type, + __in uint32_t num_queues, + __out uint32_t *rss_contextp) +{ + efx_rc_t rc; + + rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp); + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +ef10_rx_scale_context_free( + __in efx_nic_t *enp, + __in uint32_t rss_context) +{ + efx_rc_t rc; + + rc = efx_mcdi_rss_context_free(enp, rss_context); + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +ef10_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_rc_t rc; + + EFSYS_ASSERT3U(insert, ==, B_TRUE); + + if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 || + insert == B_FALSE) { + rc = EINVAL; + goto fail1; + } + + if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { + if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { + rc = ENOTSUP; + goto fail2; + } + rss_context = enp->en_rss_context; + } + + if ((rc = efx_mcdi_rss_context_set_flags(enp, + rss_context, type)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +ef10_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n) +{ + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE == + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + + if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { + if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { + rc = ENOTSUP; + goto fail1; + } + rss_context = enp->en_rss_context; + } + + if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +ef10_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n) +{ + efx_rc_t rc; + + + if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { + if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { + rc = ENOTSUP; + goto fail1; + } + rss_context = enp->en_rss_context; + } + + if ((rc = efx_mcdi_rss_context_set_table(enp, + rss_context, table, n)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + + +/* + * EF10 RX pseudo-header + * --------------------- + * + * Receive packets are prefixed by an (optional) 14 byte pseudo-header: + * + * +00: Toeplitz hash value. + * (32bit little-endian) + * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag. + * (16bit big-endian) + * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag. + * (16bit big-endian) + * +08: Packet Length. Zero if the RX datapath was in cut-through mode. + * (16bit little-endian) + * +10: MAC timestamp. Zero if timestamping is not enabled. + * (32bit little-endian) + * + * See "The RX Pseudo-header" in SF-109306-TC. + */ + + __checkReturn efx_rc_t +ef10_rx_prefix_pktlen( + __in efx_nic_t *enp, + __in uint8_t *buffer, + __out uint16_t *lengthp) +{ + _NOTE(ARGUNUSED(enp)) + + /* + * The RX pseudo-header contains the packet length, excluding the + * pseudo-header. If the hardware receive datapath was operating in + * cut-through mode then the length in the RX pseudo-header will be + * zero, and the packet length must be obtained from the DMA length + * reported in the RX event. + */ + *lengthp = buffer[8] | (buffer[9] << 8); + return (0); +} + +#if EFSYS_OPT_RX_SCALE + __checkReturn uint32_t +ef10_rx_prefix_hash( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer) +{ + _NOTE(ARGUNUSED(enp)) + + switch (func) { + case EFX_RX_HASHALG_PACKED_STREAM: + case EFX_RX_HASHALG_TOEPLITZ: + return (buffer[0] | + (buffer[1] << 8) | + (buffer[2] << 16) | + (buffer[3] << 24)); + + default: + EFSYS_ASSERT(0); + return (0); + } +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_PACKED_STREAM +/* + * Fake length for RXQ descriptors in packed stream mode + * to make hardware happy + */ +#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32 +#endif + + void +ef10_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) +{ + efx_qword_t qword; + unsigned int i; + unsigned int offset; + unsigned int id; + + _NOTE(ARGUNUSED(completed)) + +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT + * and equal to 0 after applying mask. Hardware does not like it. + */ + if (erp->er_ev_qstate->eers_rx_packed_stream) + size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE; +#endif + + /* The client driver must not overfill the queue */ + EFSYS_ASSERT3U(added - completed + ndescs, <=, + EFX_RXQ_LIMIT(erp->er_mask + 1)); + + id = added & (erp->er_mask); + for (i = 0; i < ndescs; i++) { + EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, + unsigned int, id, efsys_dma_addr_t, addrp[i], + size_t, size); + + EFX_POPULATE_QWORD_3(qword, + ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size), + ESF_DZ_RX_KER_BUF_ADDR_DW0, + (uint32_t)(addrp[i] & 0xffffffff), + ESF_DZ_RX_KER_BUF_ADDR_DW1, + (uint32_t)(addrp[i] >> 32)); + + offset = id * sizeof (efx_qword_t); + EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); + + id = (id + 1) & (erp->er_mask); + } +} + + void +ef10_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp) +{ + efx_nic_t *enp = erp->er_enp; + unsigned int pushed = *pushedp; + uint32_t wptr; + efx_dword_t dword; + + /* Hardware has alignment restriction for WPTR */ + wptr = EFX_P2ALIGN(unsigned int, added, EF10_RX_WPTR_ALIGN); + if (pushed == wptr) + return; + + *pushedp = wptr; + + /* Push the populated descriptors out */ + wptr &= erp->er_mask; + + EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr); + + /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, + wptr, pushed & erp->er_mask); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + erp->er_index, &dword, B_FALSE); +} + +#if EFSYS_OPT_RX_PACKED_STREAM + + void +ef10_rx_qpush_ps_credits( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_dword_t dword; + efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate; + uint32_t credits; + + EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); + + if (rxq_state->eers_rx_packed_stream_credits == 0) + return; + + /* + * It is a bug if we think that FW has utilized more + * credits than it is allowed to have (maximum). However, + * make sure that we do not credit more than maximum anyway. + */ + credits = MIN(rxq_state->eers_rx_packed_stream_credits, + EFX_RX_PACKED_STREAM_MAX_CREDITS); + EFX_POPULATE_DWORD_3(dword, + ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1, + ERF_DZ_RX_DESC_MAGIC_CMD, + ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS, + ERF_DZ_RX_DESC_MAGIC_DATA, credits); + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + erp->er_index, &dword, B_FALSE); + + rxq_state->eers_rx_packed_stream_credits = 0; +} + +/* + * In accordance with SF-112241-TC the received data has the following layout: + * - 8 byte pseudo-header which consist of: + * - 4 byte little-endian timestamp + * - 2 byte little-endian captured length in bytes + * - 2 byte little-endian original packet length in bytes + * - captured packet bytes + * - optional padding to align to 64 bytes boundary + * - 64 bytes scratch space for the host software + */ + __checkReturn uint8_t * +ef10_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp) +{ + uint16_t buf_len; + uint8_t *pkt_start; + efx_qword_t *qwordp; + efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate; + + EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); + + buffer += current_offset; + pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE; + + qwordp = (efx_qword_t *)buffer; + *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP); + *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN); + buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN); + + buf_len = EFX_P2ROUNDUP(uint16_t, + buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE, + EFX_RX_PACKED_STREAM_ALIGNMENT); + *next_offsetp = + current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT; + + EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length); + EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp); + + if ((*next_offsetp ^ current_offset) & + EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) + rxq_state->eers_rx_packed_stream_credits++; + + return (pkt_start); +} + + +#endif + + __checkReturn efx_rc_t +ef10_rx_qflush( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_rc_t rc; + + if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0) + goto fail1; + + return (0); + +fail1: + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the RXQ has already been destroyed. Callers need to know that + * the RXQ flush has completed to avoid waiting until timeout for a + * flush done event that will not be delivered. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_rx_qenable( + __in efx_rxq_t *erp) +{ + /* FIXME */ + _NOTE(ARGUNUSED(erp)) + /* FIXME */ +} + + __checkReturn efx_rc_t +ef10_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in_opt const efx_rxq_type_data_t *type_data, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __in efx_rxq_t *erp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; + boolean_t disable_scatter; + boolean_t want_inner_classes; + unsigned int ps_buf_size; + uint32_t es_bufs_per_desc = 0; + uint32_t es_max_dma_len = 0; + uint32_t es_buf_stride = 0; + uint32_t hol_block_timeout = 0; + + _NOTE(ARGUNUSED(id, erp)) + + EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH)); + EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); + EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); + + if (index >= encp->enc_rxq_limit) { + rc = EINVAL; + goto fail1; + } + + switch (type) { + case EFX_RXQ_TYPE_DEFAULT: + if (type_data == NULL) { + rc = EINVAL; + goto fail2; + } + erp->er_buf_size = type_data->ertd_default.ed_buf_size; + ps_buf_size = 0; + break; +#if EFSYS_OPT_RX_PACKED_STREAM + case EFX_RXQ_TYPE_PACKED_STREAM: + if (type_data == NULL) { + rc = EINVAL; + goto fail3; + } + switch (type_data->ertd_packed_stream.eps_buf_size) { + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K; + break; + default: + rc = ENOTSUP; + goto fail4; + } + erp->er_buf_size = type_data->ertd_packed_stream.eps_buf_size; + break; +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: + if (type_data == NULL) { + rc = EINVAL; + goto fail5; + } + ps_buf_size = 0; + es_bufs_per_desc = + type_data->ertd_es_super_buffer.eessb_bufs_per_desc; + es_max_dma_len = + type_data->ertd_es_super_buffer.eessb_max_dma_len; + es_buf_stride = + type_data->ertd_es_super_buffer.eessb_buf_stride; + hol_block_timeout = + type_data->ertd_es_super_buffer.eessb_hol_block_timeout; + break; +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + default: + rc = ENOTSUP; + goto fail6; + } + +#if EFSYS_OPT_RX_PACKED_STREAM + if (ps_buf_size != 0) { + /* Check if datapath firmware supports packed stream mode */ + if (encp->enc_rx_packed_stream_supported == B_FALSE) { + rc = ENOTSUP; + goto fail7; + } + /* Check if packed stream allows configurable buffer sizes */ + if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) && + (encp->enc_rx_var_packed_stream_supported == B_FALSE)) { + rc = ENOTSUP; + goto fail8; + } + } +#else /* EFSYS_OPT_RX_PACKED_STREAM */ + EFSYS_ASSERT(ps_buf_size == 0); +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + if (es_bufs_per_desc > 0) { + if (encp->enc_rx_es_super_buffer_supported == B_FALSE) { + rc = ENOTSUP; + goto fail9; + } + if (!EFX_IS_P2ALIGNED(uint32_t, es_max_dma_len, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail10; + } + if (!EFX_IS_P2ALIGNED(uint32_t, es_buf_stride, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail11; + } + } +#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + EFSYS_ASSERT(es_bufs_per_desc == 0); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + + /* Scatter can only be disabled if the firmware supports doing so */ + if (flags & EFX_RXQ_FLAG_SCATTER) + disable_scatter = B_FALSE; + else + disable_scatter = encp->enc_rx_disable_scatter_supported; + + if (flags & EFX_RXQ_FLAG_INNER_CLASSES) + want_inner_classes = B_TRUE; + else + want_inner_classes = B_FALSE; + + if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep, label, index, + esmp, disable_scatter, want_inner_classes, erp->er_buf_size, + ps_buf_size, es_bufs_per_desc, es_max_dma_len, + es_buf_stride, hol_block_timeout)) != 0) + goto fail12; + + erp->er_eep = eep; + erp->er_label = label; + + ef10_ev_rxlabel_init(eep, erp, label, type); + + erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label]; + + return (0); + +fail12: + EFSYS_PROBE(fail12); +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ +#if EFSYS_OPT_RX_PACKED_STREAM +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +fail6: + EFSYS_PROBE(fail6); +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +fail5: + EFSYS_PROBE(fail5); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ +#if EFSYS_OPT_RX_PACKED_STREAM +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_rx_qdestroy( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_evq_t *eep = erp->er_eep; + unsigned int label = erp->er_label; + + ef10_ev_rxlabel_fini(eep, label); + + EFSYS_ASSERT(enp->en_rx_qcount != 0); + --enp->en_rx_qcount; + + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); +} + + void +ef10_rx_fini( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_RX_SCALE + if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE) + (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context); + enp->en_rss_context = 0; + enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; +#else + _NOTE(ARGUNUSED(enp)) +#endif /* EFSYS_OPT_RX_SCALE */ +} + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h new file mode 100644 index 000000000..f2064a5fc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_signed_image_layout.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + */ + +/* + * This is NOT the original source file. Do NOT edit it. + * To update the image layout headers, please edit the copy in + * the sfregistry repo and then, in that repo, + * "make layout_headers" or "make export" to + * regenerate and export all types of headers. + */ + +/* These structures define the layouts for the signed firmware image binary + * saved in NVRAM. The original image is in the Cryptographic message + * syntax (CMS) format which contains the bootable firmware binary plus the + * signatures. The entire image is written into NVRAM to enable the firmware + * to validate the signatures. However, the bootrom still requires the + * bootable-image to start at offset 0 of the NVRAM partition. Hence the image + * is parsed upfront by host utilities (sfupdate) and written into nvram as + * 'signed_image_chunks' described by a header. + * + * This file is used by the MC as well as host-utilities (sfupdate). + */ + + +#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H +#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H + +/* Signed image chunk type identifiers */ +enum { + SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */ + SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */ + SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */ + SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */ + SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image, + * including the certifiates and signature */ + NUM_SIGNED_IMAGE_CHUNKS, +}; + +/* Magic */ +#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */ + +/* Initial version definition - version 1 */ +#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1 + +/* Header length is 32 bytes */ +#define SIGNED_IMAGE_CHUNK_HDR_LEN 32 +/* Structure describing the header of each chunk of signed image + * as stored in nvram + */ +typedef struct signed_image_chunk_hdr_e { + /* Magic field to recognise a valid entry + * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC + */ + uint32_t magic; + /* Version number of this header */ + uint32_t version; + /* Chunk type identifier */ + uint32_t id; + /* Chunk offset */ + uint32_t offset; + /* Chunk length */ + uint32_t len; + /* Reserved for future expansion of this structure - always set to zeros */ + uint32_t reserved[3]; +} signed_image_chunk_hdr_t; + +#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h new file mode 100644 index 000000000..e964476be --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tlv_layout.h @@ -0,0 +1,1063 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +/* + * This is NOT the original source file. Do NOT edit it. + * To update the tlv layout, please edit the copy in + * the sfregistry repo and then, in that repo, + * "make tlv_headers" or "make export" to + * regenerate and export all types of headers. + */ + +/* These structures define the layouts for the TLV items stored in static and + * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.). + * + * They contain the same sort of information that was kept in the + * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures + * (defined in and ) for + * Siena. + * + * These are used directly by the MC and should also be usable directly on host + * systems which are little-endian and do not do strange things with structure + * padding. (Big-endian host systems will require some byte-swapping.) + * + * ----- + * + * Please refer to SF-108797-SW for a general overview of the TLV partition + * format. + * + * ----- + * + * The current tag IDs have a general structure: with the exception of the + * special values defined in the document, they are of the form 0xLTTTNNNN, + * where: + * + * - L is a location, indicating where this tag is expected to be found: + * 0: static configuration + * 1: dynamic configuration + * 2: firmware internal use + * 3: license partition + * 4: tsa configuration + * 5: bundle update + * + * - TTT is a type, which is just a unique value. The same type value + * might appear in both locations, indicating a relationship between + * the items (e.g. static and dynamic VPD below). + * + * - NNNN is an index of some form. Some item types are per-port, some + * are per-PF, some are per-partition-type. + * + * ----- + * + * As with the previous Siena structures, each structure here is laid out + * carefully: values are aligned to their natural boundary, with explicit + * padding fields added where necessary. (No, technically this does not + * absolutely guarantee portability. But, in practice, compilers are generally + * sensible enough not to introduce completely pointless padding, and it works + * well enough.) + */ + + +#ifndef CI_MGMT_TLV_LAYOUT_H +#define CI_MGMT_TLV_LAYOUT_H + + +/* ---------------------------------------------------------------------------- + * General structure (defined by SF-108797-SW) + * ---------------------------------------------------------------------------- + */ + + +/* The "end" tag. + * + * (Note that this is *not* followed by length or value fields: anything after + * the tag itself is irrelevant.) + */ + +#define TLV_TAG_END (0xEEEEEEEE) + + +/* Other special reserved tag values. + */ + +#define TLV_TAG_SKIP (0x00000000) +#define TLV_TAG_INVALID (0xFFFFFFFF) + + +/* TLV start. + * + * Marks the start of a TLV layout within a partition that may/may-not be + * a TLV partition. i.e. if a portion of data (at any offset) within a + * partition is expected to be in TLV format, then the first tag in this + * layout is expected to be TLV_TAG_START. + * + * This tag is not used in TLV layouts where the entire partition is TLV. + * Please continue using TLV_TAG_PARTITION_HEADER to indicate the start + * of TLV layout in such cases. + */ + +#define TLV_TAG_START (0xEF10BA5E) + +struct tlv_start { + uint32_t tag; + uint32_t length; + /* Length of the TLV structure following this tag - includes length of all tags + * within the TLV layout starting with this TLV_TAG_START. + * Includes TLV_TAG_END. Does not include TLV_TAG_START + */ + uint32_t tlv_layout_len; +}; + +/* TLV partition header. + * + * In a TLV partition, this must be the first item in the sequence, at offset + * 0. + */ + +#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A) + +struct tlv_partition_header { + uint32_t tag; + uint32_t length; + uint16_t type_id; +/* 0 indicates the default segment (always located at offset 0), while other values + * are for RFID-selectable presets that should immediately follow the default segment. + * The default segment may also have preset > 0, which means that it is a preset + * selected through an RFID command and copied by FW to the location at offset 0. */ + uint16_t preset; + uint32_t generation; + uint32_t total_length; +}; + + +/* TLV partition trailer. + * + * In a TLV partition, this must be the last item in the sequence, immediately + * preceding the TLV_TAG_END word. + */ + +#define TLV_TAG_PARTITION_TRAILER (0xEF101A57) + +struct tlv_partition_trailer { + uint32_t tag; + uint32_t length; + uint32_t generation; + uint32_t checksum; +}; + + +/* Appendable TLV partition header. + * + * In an appendable TLV partition, this must be the first item in the sequence, + * at offset 0. (Note that, unlike the configuration partitions, there is no + * trailer before the TLV_TAG_END word.) + */ + +#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7) + +struct tlv_appendable_partition_header { + uint32_t tag; + uint32_t length; + uint16_t type_id; + uint16_t reserved; +}; + + +/* ---------------------------------------------------------------------------- + * Configuration items + * ---------------------------------------------------------------------------- + */ + + +/* NIC global capabilities. + */ + +#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000) + +struct tlv_global_capabilities { + uint32_t tag; + uint32_t length; + uint32_t flags; +}; + + +/* Siena-style per-port MAC address allocation. + * + * There are addresses, starting at and incrementing + * by adding to the low-order byte(s). + * + * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool + * of contiguous MAC addresses for the firmware to allocate as it sees fit.) + */ + +#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port)) + +struct tlv_port_mac { + uint32_t tag; + uint32_t length; + uint8_t base_address[6]; + uint16_t reserved; + uint16_t count; + uint16_t stride; +}; + + +/* Static VPD. + * + * This is the portion of VPD which is set at manufacturing time and not + * expected to change. It is formatted as a standard PCI VPD block. There are + * global and per-pf TLVs for this, the global TLV is new for Medford and is + * used in preference to the per-pf TLV. + */ + +#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf)) + +struct tlv_pf_static_vpd { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +}; + +#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000) + +struct tlv_global_static_vpd { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +}; + + +/* Dynamic VPD. + * + * This is the portion of VPD which may be changed (e.g. by firmware updates). + * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs + * for this, the global TLV is new for Medford and is used in preference to the + * per-pf TLV. + */ + +#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf)) + +struct tlv_pf_dynamic_vpd { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +}; + +#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000) + +struct tlv_global_dynamic_vpd { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +}; + + +/* "DBI" PCI config space changes. + * + * This is a set of edits made to the default PCI config space values before + * the device is allowed to enumerate. There are global and per-pf TLVs for + * this, the global TLV is new for Medford and is used in preference to the + * per-pf TLV. + */ + +#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf)) + +struct tlv_pf_dbi { + uint32_t tag; + uint32_t length; + struct { + uint16_t addr; + uint16_t byte_enables; + uint32_t value; + } items[]; +}; + + +#define TLV_TAG_GLOBAL_DBI (0x00210000) + +struct tlv_global_dbi { + uint32_t tag; + uint32_t length; + struct { + uint16_t addr; + uint16_t byte_enables; + uint32_t value; + } items[]; +}; + + +/* Partition subtype codes. + * + * A subtype may optionally be stored for each type of partition present in + * the NVRAM. For example, this may be used to allow a generic firmware update + * utility to select a specific variant of firmware for a specific variant of + * board. + * + * The description[] field is an optional string which is returned in the + * MC_CMD_NVRAM_METADATA response if present. + */ + +#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type)) + +struct tlv_partition_subtype { + uint32_t tag; + uint32_t length; + uint32_t subtype; + uint8_t description[]; +}; + + +/* Partition version codes. + * + * A version may optionally be stored for each type of partition present in + * the NVRAM. This provides a standard way of tracking the currently stored + * version of each of the various component images. + */ + +#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type)) + +struct tlv_partition_version { + uint32_t tag; + uint32_t length; + uint16_t version_w; + uint16_t version_x; + uint16_t version_y; + uint16_t version_z; +}; + +/* Global PCIe configuration */ + +#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000) + +struct tlv_pcie_config { + uint32_t tag; + uint32_t length; + int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */ + uint16_t pf_aper; /**< BIU aperture for PF BAR2 */ + uint16_t vf_aper; /**< BIU aperture for VF BAR0 */ + uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */ +#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */ +#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */ +}; + +/* Per-PF configuration. Note that not all these fields are necessarily useful + * as the apertures are constrained by the BIU settings (the one case we do + * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can + * tidy things up later */ + +#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf)) + +struct tlv_per_pf_pcie_config { + uint32_t tag; + uint32_t length; + uint8_t vfs_total; + uint8_t port_allocation; + uint16_t vectors_per_pf; + uint16_t vectors_per_vf; + uint8_t pf_bar0_aperture; + uint8_t pf_bar2_aperture; + uint8_t vf_bar0_aperture; + uint8_t vf_base; + uint16_t supp_pagesz; + uint16_t msix_vec_base; +}; + + +/* Development ONLY. This is a single TLV tag for all the gubbins + * that can be set through the MC command-line other than the PCIe + * settings. This is a temporary measure. */ +#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */ +#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS + +struct tlv_tmp_gubbins { + uint32_t tag; + uint32_t length; + /* Consumed by dpcpu.c */ + uint64_t tx0_tags; /* Bitmap */ + uint64_t tx1_tags; /* Bitmap */ + uint64_t dl_tags; /* Bitmap */ + uint32_t flags; +#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */ +#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */ +#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */ +#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */ +#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */ + /* Consumed by features.c */ + uint32_t dut_features; /* All 1s -> leave alone */ + int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */ + /* Consumed by clocks_hunt.c */ + int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */ + /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */ + int8_t rx_dc_size; /* -1 -> leave alone */ + int8_t tx_dc_size; + int16_t num_q_allocs; +}; + +/* Global port configuration + * + * This is now deprecated in favour of a platform-provided default + * and dynamic config override via tlv_global_port_options. + */ +#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000) + +struct tlv_global_port_config { + uint32_t tag; + uint32_t length; + uint32_t ports_per_core; + uint32_t max_port_speed; +}; + + +/* Firmware options. + * + * This is intended for user-configurable selection of optional firmware + * features and variants. + * + * Initially, this consists only of the satellite CPU firmware variant + * selection, but this tag could be extended in the future (using the + * tag length to determine whether additional fields are present). + */ + +#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000) + +struct tlv_firmware_options { + uint32_t tag; + uint32_t length; + uint32_t firmware_variant; +#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff) + +/* These are the values for overriding the driver's choice; the definitions + * are taken from MCDI so that they don't get out of step. Include + * or the equivalent from your driver's tree if + * you need to use these constants. + */ +#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED +#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY +#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM +#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE +#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \ + MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 +#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE +#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK +#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP +}; + +/* Voltage settings + * + * Intended for boards with A0 silicon where the core voltage may + * need tweaking. Most likely set once when the pass voltage is + * determined. */ + +#define TLV_TAG_0V9_SETTINGS (0x000c0000) + +struct tlv_0v9_settings { + uint32_t tag; + uint32_t length; + uint16_t flags; /* Boards with high 0v9 settings may need active cooling */ +#define TLV_TAG_0V9_REQUIRES_FAN (1) + uint16_t target_voltage; /* In millivolts */ + /* Since the limits are meant to be centred to the target (and must at least + * contain it) they need setting as well. */ + uint16_t warn_low; /* In millivolts */ + uint16_t warn_high; /* In millivolts */ + uint16_t panic_low; /* In millivolts */ + uint16_t panic_high; /* In millivolts */ +}; + + +/* Clock configuration */ + +#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */ +#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG + +struct tlv_clock_config { + uint32_t tag; + uint32_t length; + uint16_t clk_sys; /* MHz */ + uint16_t clk_dpcpu; /* MHz */ + uint16_t clk_icore; /* MHz */ + uint16_t clk_pcs; /* MHz */ +}; + +#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000) + +struct tlv_clock_config_medford { + uint32_t tag; + uint32_t length; + uint16_t clk_sys; /* MHz */ + uint16_t clk_mc; /* MHz */ + uint16_t clk_rmon; /* MHz */ + uint16_t clk_vswitch; /* MHz */ + uint16_t clk_dpcpu; /* MHz */ + uint16_t clk_pcs; /* MHz */ +}; + + +/* EF10-style global pool of MAC addresses. + * + * There are addresses, starting at , which are + * contiguous. Firmware is responsible for allocating addresses from this + * pool to ports / PFs as appropriate. + */ + +#define TLV_TAG_GLOBAL_MAC (0x000e0000) + +struct tlv_global_mac { + uint32_t tag; + uint32_t length; + uint8_t base_address[6]; + uint16_t reserved1; + uint16_t count; + uint16_t reserved2; +}; + +#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */ +#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET + +/* The target value for the 0v9 power rail measured on-chip at the + * analogue test bus */ +struct tlv_0v9_atb_target { + uint32_t tag; + uint32_t length; + uint16_t millivolts; + uint16_t reserved; +}; + +/* Factory settings for amplitude calibration of the PCIE TX serdes */ +#define TLV_TAG_TX_PCIE_AMP_CONFIG (0x00220000) +struct tlv_pcie_tx_amp_config { + uint32_t tag; + uint32_t length; + uint8_t quad_tx_imp2k[4]; + uint8_t quad_tx_imp50[4]; + uint8_t lane_amp[16]; +}; + +/* Enum to select an OEM and enable additional functionality related to this OEM + * (e.g. vendor extensions to VPD, NC-SI etc.) */ +#define TLV_TAG_OEM (0x00230000) +struct tlv_oem { + uint32_t tag; + uint32_t length; + uint8_t oem; +}; +#define TLV_OEM_NONE 0 +#define TLV_OEM_DELL 1 + +/* Global PCIe configuration, second revision. This represents the visible PFs + * by a bitmap rather than having the number of the highest visible one. As such + * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG + * can and it should be used in place of that tag in future (but compatibility with + * the old tag will be left in the firmware indefinitely). */ + +#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000) + +struct tlv_pcie_config_r2 { + uint32_t tag; + uint32_t length; + uint16_t visible_pfs; /**< Bitmap of visible PFs */ + uint16_t pf_aper; /**< BIU aperture for PF BAR2 */ + uint16_t vf_aper; /**< BIU aperture for VF BAR0 */ + uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */ +}; + +/* Dynamic port mode. + * + * Allows selecting alternate port configuration for platforms that support it + * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the + * number of externally visible ports (and, hence, PF to port mapping), so must + * be done at boot time. + * + * Port mode naming convention is + * + * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width] + * + * Port lane width determines the capabilities (speeds) of the ports, subject + * to architecture capabilities (e.g. 25G support) and switch bandwidth + * constraints: + * - single lane ports can do 25G/10G/1G + * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane) + * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes) + + * This tag supercedes tlv_global_port_config. + */ + +#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000) + +struct tlv_global_port_mode { + uint32_t tag; + uint32_t length; + uint32_t port_mode; +#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */ + +/* Huntington port modes */ +#define TLV_PORT_MODE_10G (0) +#define TLV_PORT_MODE_40G (1) +#define TLV_PORT_MODE_10G_10G (2) +#define TLV_PORT_MODE_40G_40G (3) +#define TLV_PORT_MODE_10G_10G_10G_10G (4) +#define TLV_PORT_MODE_40G_10G_10G (6) +#define TLV_PORT_MODE_10G_10G_40G (7) + +/* Medford (and later) port modes */ +#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */ +#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */ +#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */ +#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */ +#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */ +#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */ +#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */ +#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */ + +/* Snapper-only Medford2 port modes. + * These modes are eftest only, to allow snapper explicit + * selection between multi-channel and LLPCS. In production, + * this selection is automatic and outside world should not + * care about LLPCS. + */ +#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */ +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL + +/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */ +#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) + +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL +}; + +/* Type of the v-switch created implicitly by the firmware */ + +#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port)) + +struct tlv_vswitch_type { + uint32_t tag; + uint32_t length; + uint32_t vswitch_type; +#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */ +#define TLV_VSWITCH_TYPE_NONE (0) +#define TLV_VSWITCH_TYPE_VLAN (1) +#define TLV_VSWITCH_TYPE_VEB (2) +#define TLV_VSWITCH_TYPE_VEPA (3) +#define TLV_VSWITCH_TYPE_MUX (4) +#define TLV_VSWITCH_TYPE_TEST (5) +}; + +/* A VLAN tag for the v-port created implicitly by the firmware */ + +#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf)) + +struct tlv_vport_vlan_tag { + uint32_t tag; + uint32_t length; + uint32_t vlan_tag; +#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */ +}; + +/* Offset to be applied to the 0v9 setting, wherever it came from */ + +#define TLV_TAG_ATB_0V9_OFFSET (0x10140000) + +struct tlv_0v9_atb_offset { + uint32_t tag; + uint32_t length; + int16_t offset_millivolts; + uint16_t reserved; +}; + +/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port). + * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583. + * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while + * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default: + * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */ + +#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */ + +struct tlv_privilege_mask { /* legacy structure - do not use */ + uint32_t tag; + uint32_t length; + uint32_t privilege_mask; +}; + +#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000) + +struct tlv_privilege_mask_add { + uint32_t tag; + uint32_t length; + uint32_t privilege_mask_add; +}; + +#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000) + +struct tlv_privilege_mask_rem { + uint32_t tag; + uint32_t length; + uint32_t privilege_mask_rem; +}; + +/* Additional privileges given to all PFs. + * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */ + +#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000) + +struct tlv_privilege_mask_add_all_pfs { + uint32_t tag; + uint32_t length; + uint32_t privilege_mask_add; +}; + +/* Additional privileges given to a selected PF. + * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */ + +#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf)) + +struct tlv_privilege_mask_add_single_pf { + uint32_t tag; + uint32_t length; + uint32_t privilege_mask_add; +}; + +/* Turning on/off the PFIOV mode. + * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */ + +#define TLV_TAG_PFIOV(port) (0x10170000 + (port)) + +struct tlv_pfiov { + uint32_t tag; + uint32_t length; + uint32_t pfiov; +#define TLV_PFIOV_OFF (0) /* Default */ +#define TLV_PFIOV_ON (1) +}; + +/* Multicast filter chaining mode selection. + * + * When enabled, multicast packets are delivered to all recipients of all + * matching multicast filters, with the exception that IP multicast filters + * will steal traffic from MAC multicast filters on a per-function basis. + * (New behaviour.) + * + * When disabled, multicast packets will always be delivered only to the + * recipients of the highest priority matching multicast filter. + * (Legacy behaviour.) + * + * The DEFAULT mode (which is the same as the tag not being present at all) + * is equivalent to ENABLED in production builds, and DISABLED in eftest + * builds. + * + * This option is intended to provide run-time control over this feature + * while it is being stabilised and may be withdrawn at some point in the + * future; the new behaviour is intended to become the standard behaviour. + */ + +#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000) + +struct tlv_mcast_filter_chaining { + uint32_t tag; + uint32_t length; + uint32_t mode; +#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff) +#define TLV_MCAST_FILTER_CHAINING_DISABLED (0) +#define TLV_MCAST_FILTER_CHAINING_ENABLED (1) +}; + +/* Pacer rate limit per PF */ +#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf)) + +struct tlv_rate_limit { + uint32_t tag; + uint32_t length; + uint32_t rate_mbps; +}; + +/* OCSD Enable/Disable + * + * This setting allows OCSD to be disabled. This is a requirement for HP + * servers to support PCI passthrough for virtualization. + * + * The DEFAULT mode (which is the same as the tag not being present) is + * equivalent to ENABLED. + * + * This option is not used by the MCFW, and is entirely handled by the various + * drivers that support OCSD, by reading the setting before they attempt + * to enable OCSD. + * + * bit0: OCSD Disabled/Enabled + */ + +#define TLV_TAG_OCSD (0x101C0000) + +struct tlv_ocsd { + uint32_t tag; + uint32_t length; + uint32_t mode; +#define TLV_OCSD_DISABLED 0 +#define TLV_OCSD_ENABLED 1 /* Default */ +}; + +/* Descriptor cache config. + * + * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also + * sets the total number of VIs. When the number of VIs is reduced VIs are taken + * away from the highest numbered port first, so a vi_count of 1024 means 1024 + * VIs on the first port and 0 on the second (on a Torino). + */ + +#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000) + +struct tlv_descriptor_cache_config { + uint32_t tag; + uint32_t length; + uint8_t rx_desc_cache_size; + uint8_t tx_desc_cache_size; + uint16_t vi_count; +}; +#define TLV_DESC_CACHE_DEFAULT (0xff) +#define TLV_VI_COUNT_DEFAULT (0xffff) + +/* RX event merging config (read batching). + * + * Sets the global maximum number of events for the merging bins, and the + * global timeout configuration for the bins. + */ + +#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000) + +struct tlv_rx_event_merging_config { + uint32_t tag; + uint32_t length; + uint32_t max_events; +#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1) + uint32_t timeout_ns; +}; +#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff) +#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff) + +#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000) +struct tlv_pcie_link_settings { + uint32_t tag; + uint32_t length; + uint16_t gen; /* Target PCIe generation: 1, 2, 3 */ + uint16_t width; /* Number of lanes */ +}; + +/* TX event merging config. + * + * Sets the global maximum number of events for the merging bins, and the + * global timeout configuration for the bins, and the global timeout for + * empty queues. + */ +#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000) +struct tlv_tx_event_merging_config { + uint32_t tag; + uint32_t length; + uint32_t max_events; +#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1) + uint32_t timeout_ns; + uint32_t qempty_timeout_ns; /* Medford only */ +}; +#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff) +#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff) +#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff) + +#define TLV_TAG_LICENSE (0x30800000) + +typedef struct tlv_license { + uint32_t tag; + uint32_t length; + uint8_t data[]; +} tlv_license_t; + +/* TSA NIC IP address configuration (DEPRECATED) + * + * Sets the TSA NIC IP address statically via configuration tool or dynamically + * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop) + * + * NOTE: This TAG is temporarily placed in the dynamic config partition and will + * be moved to a private partition during TSA development. It is not used in any + * released code yet. + */ + +#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */ + +#define TLV_TSAN_IP_MODE_STATIC (0) +#define TLV_TSAN_IP_MODE_DHCP (1) +#define TLV_TSAN_IP_MODE_SNOOP (2) +typedef struct tlv_tsan_config { + uint32_t tag; + uint32_t length; + uint32_t mode; + uint32_t ip; + uint32_t netmask; + uint32_t gateway; + uint32_t port; + uint32_t bind_retry; /* DEPRECATED */ + uint32_t bind_bkout; /* DEPRECATED */ +} tlv_tsan_config_t; + +/* TSA Controller IP address configuration (DEPRECATED) + * + * Sets the TSA Controller IP address statically via configuration tool + * + * NOTE: This TAG is temporarily placed in the dynamic config partition and will + * be moved to a private partition during TSA development. It is not used in any + * released code yet. + */ + +#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */ + +#define TLV_MAX_TSACS (4) +typedef struct tlv_tsac_config { + uint32_t tag; + uint32_t length; + uint32_t num_tsacs; + uint32_t ip[TLV_MAX_TSACS]; + uint32_t port[TLV_MAX_TSACS]; +} tlv_tsac_config_t; + +/* Binding ticket (DEPRECATED) + * + * Sets the TSA NIC binding ticket used for binding process between the TSA NIC + * and the TSA Controller + * + * NOTE: This TAG is temporarily placed in the dynamic config partition and will + * be moved to a private partition during TSA development. It is not used in any + * released code yet. + */ + +#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */ + +typedef struct tlv_binding_ticket { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +} tlv_binding_ticket_t; + +/* Solarflare private key (DEPRECATED) + * + * Sets the Solareflare private key used for signing during the binding process + * + * NOTE: This TAG is temporarily placed in the dynamic config partition and will + * be moved to a private partition during TSA development. It is not used in any + * released code yet. + */ + +#define TLV_TAG_TMP_PIK_SF (0x10250000) /* DEPRECATED */ + +typedef struct tlv_pik_sf { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +} tlv_pik_sf_t; + +/* CA root certificate (DEPRECATED) + * + * Sets the CA root certificate used for TSA Controller verfication during + * TLS connection setup between the TSA NIC and the TSA Controller + * + * NOTE: This TAG is temporarily placed in the dynamic config partition and will + * be moved to a private partition during TSA development. It is not used in any + * released code yet. + */ + +#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */ + +typedef struct tlv_ca_root_cert { + uint32_t tag; + uint32_t length; + uint8_t bytes[]; +} tlv_ca_root_cert_t; + +/* Tx vFIFO Low latency configuration + * + * To keep the desired booting behaviour for the switch, it just requires to + * know if the low latency mode is enabled. + */ + +#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10270000) +struct tlv_tx_vfifo_ull_mode { + uint32_t tag; + uint32_t length; + uint8_t mode; +#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0 +}; + +/* BIU mode + * + * Medford2 tag for selecting VI window decode (see values below) + */ +#define TLV_TAG_BIU_VI_WINDOW_MODE (0x10280000) +struct tlv_biu_vi_window_mode { + uint32_t tag; + uint32_t length; + uint8_t mode; +#define TLV_BIU_VI_WINDOW_MODE_8K 0 /* 8k per VI, CTPIO not mapped, medford/hunt compatible */ +#define TLV_BIU_VI_WINDOW_MODE_16K 1 /* 16k per VI, CTPIO mapped */ +#define TLV_BIU_VI_WINDOW_MODE_64K 2 /* 64k per VI, CTPIO mapped, POWER-friendly */ +}; + +/* FastPD mode + * + * Medford2 tag for configuring the FastPD mode (see values below) + */ +#define TLV_TAG_FASTPD_MODE(port) (0x10290000 + (port)) +struct tlv_fastpd_mode { + uint32_t tag; + uint32_t length; + uint8_t mode; +#define TLV_FASTPD_MODE_SOFT_ALL 0 /* All packets to the SoftPD */ +#define TLV_FASTPD_MODE_FAST_ALL 1 /* All packets to the FastPD */ +#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */ +}; + +/* L3xUDP datapath firmware UDP port configuration + * + * Sets the list of UDP ports on which the encapsulation will be handled. + * The number of ports in the list is implied by the length of the TLV item. + */ +#define TLV_TAG_L3XUDP_PORTS (0x102a0000) +struct tlv_l3xudp_ports { + uint32_t tag; + uint32_t length; + uint16_t ports[]; +#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16 +}; + +/* Wake on LAN setting + * + * Enables the Wake On Lan (WoL) functionality on the given port. This will be + * a persistent setting for manageability firmware. Drivers have direct access + * to WoL using MCDI. + */ +#define TLV_TAG_WAKE_ON_LAN(port) (0x102b0000 + (port)) +struct tlv_wake_on_lan { + uint32_t tag; + uint32_t length; + uint8_t mode; + uint8_t bytes[]; +#define TLV_WAKE_ON_LAN_MODE_DISABLED 0 +#define TLV_WAKE_ON_LAN_MODE_MAGIC_PACKET 1 +#define TLV_WAKE_ON_LAN_MAX_NUM_BYTES 255 +}; + +#endif /* CI_MGMT_TLV_LAYOUT_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c new file mode 100644 index 000000000..e2f9ebac4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_tx.c @@ -0,0 +1,772 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFX_OPTS_EF10() + +#if EFSYS_OPT_QSTATS +#define EFX_TX_QSTAT_INCR(_etp, _stat) \ + do { \ + (_etp)->et_stat[_stat]++; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#else +#define EFX_TX_QSTAT_INCR(_etp, _stat) +#endif + +static __checkReturn efx_rc_t +efx_mcdi_init_txq( + __in efx_nic_t *enp, + __in uint32_t ndescs, + __in uint32_t target_evq, + __in uint32_t label, + __in uint32_t instance, + __in uint16_t flags, + __in efsys_mem_t *esmp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_INIT_TXQ_IN_LEN(EF10_TXQ_MAXNBUFS), + MC_CMD_INIT_TXQ_OUT_LEN); + efx_qword_t *dma_addr; + uint64_t addr; + int npages; + int i; + efx_rc_t rc; + + EFSYS_ASSERT(EF10_TXQ_MAXNBUFS >= + efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs)); + + if ((esmp == NULL) || + (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) { + rc = EINVAL; + goto fail1; + } + + npages = efx_txq_nbufs(enp, ndescs); + if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { + rc = EINVAL; + goto fail2; + } + + req.emr_cmd = MC_CMD_INIT_TXQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; + + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); + + MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, + INIT_TXQ_IN_FLAG_BUFF_MODE, 0, + INIT_TXQ_IN_FLAG_IP_CSUM_DIS, + (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, + INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, + (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, + INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, + INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, + INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, + INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, + INIT_TXQ_IN_CRC_MODE, 0, + INIT_TXQ_IN_FLAG_TIMESTAMP, 0); + + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id); + + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); + addr = EFSYS_MEM_ADDR(esmp); + + for (i = 0; i < npages; i++) { + EFX_POPULATE_QWORD_2(*dma_addr, + EFX_DWORD_1, (uint32_t)(addr >> 32), + EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); + + dma_addr++; + addr += EFX_BUF_SIZE; + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_fini_txq( + __in efx_nic_t *enp, + __in uint32_t instance) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, + MC_CMD_FINI_TXQ_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_FINI_TXQ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; + + MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the TXQ has already been destroyed. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_tx_init( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) + return (0); +} + + void +ef10_tx_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + __checkReturn efx_rc_t +ef10_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __in efx_txq_t *etp, + __out unsigned int *addedp) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint16_t inner_csum; + efx_desc_t desc; + efx_rc_t rc; + + _NOTE(ARGUNUSED(id)) + + inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; + if (((flags & inner_csum) != 0) && + (encp->enc_tunnel_encapsulations_supported == 0)) { + rc = EINVAL; + goto fail1; + } + + if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index, + flags, esmp)) != 0) + goto fail2; + + /* + * A previous user of this TX queue may have written a descriptor to the + * TX push collector, but not pushed the doorbell (e.g. after a crash). + * The next doorbell write would then push the stale descriptor. + * + * Ensure the (per network port) TX push collector is cleared by writing + * a no-op TX option descriptor. See bug29981 for details. + */ + *addedp = 1; + ef10_tx_qdesc_checksum_create(etp, flags, &desc); + + EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq); + ef10_tx_qpush(etp, *addedp, 0); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_tx_qdestroy( + __in efx_txq_t *etp) +{ + /* FIXME */ + _NOTE(ARGUNUSED(etp)) + /* FIXME */ +} + + __checkReturn efx_rc_t +ef10_tx_qpio_enable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + efx_piobuf_handle_t handle; + efx_rc_t rc; + + if (etp->et_pio_size != 0) { + rc = EALREADY; + goto fail1; + } + + /* Sub-allocate a PIO block from a piobuf */ + if ((rc = ef10_nic_pio_alloc(enp, + &etp->et_pio_bufnum, + &handle, + &etp->et_pio_blknum, + &etp->et_pio_offset, + &etp->et_pio_size)) != 0) { + goto fail2; + } + EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); + + /* Link the piobuf to this TXQ */ + if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { + goto fail3; + } + + /* + * et_pio_offset is the offset of the sub-allocated block within the + * hardware PIO buffer. It is used as the buffer address in the PIO + * option descriptor. + * + * et_pio_write_offset is the offset of the sub-allocated block from the + * start of the write-combined memory mapping, and is used for writing + * data into the PIO buffer. + */ + etp->et_pio_write_offset = + (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + + ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); +fail2: + EFSYS_PROBE(fail2); + etp->et_pio_size = 0; +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_tx_qpio_disable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + + if (etp->et_pio_size != 0) { + /* Unlink the piobuf from this TXQ */ + if (ef10_nic_pio_unlink(enp, etp->et_index) != 0) + return; + + /* Free the sub-allocated PIO block */ + (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, + etp->et_pio_blknum); + etp->et_pio_size = 0; + etp->et_pio_write_offset = 0; + } +} + + __checkReturn efx_rc_t +ef10_tx_qpio_write( + __in efx_txq_t *etp, + __in_ecount(length) uint8_t *buffer, + __in size_t length, + __in size_t offset) +{ + efx_nic_t *enp = etp->et_enp; + efsys_bar_t *esbp = enp->en_esbp; + uint32_t write_offset; + uint32_t write_offset_limit; + efx_qword_t *eqp; + efx_rc_t rc; + + EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); + + if (etp->et_pio_size == 0) { + rc = ENOENT; + goto fail1; + } + if (offset + length > etp->et_pio_size) { + rc = ENOSPC; + goto fail2; + } + + /* + * Writes to PIO buffers must be 64 bit aligned, and multiples of + * 64 bits. + */ + write_offset = etp->et_pio_write_offset + offset; + write_offset_limit = write_offset + length; + eqp = (efx_qword_t *)buffer; + while (write_offset < write_offset_limit) { + EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); + eqp++; + write_offset += sizeof (efx_qword_t); + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_tx_qpio_post( + __in efx_txq_t *etp, + __in size_t pkt_length, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + efx_qword_t pio_desc; + unsigned int id; + size_t offset; + unsigned int added = *addedp; + efx_rc_t rc; + + + if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + rc = ENOSPC; + goto fail1; + } + + if (etp->et_pio_size == 0) { + rc = ENOENT; + goto fail2; + } + + id = added++ & etp->et_mask; + offset = id * sizeof (efx_qword_t); + + EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, + unsigned int, id, uint32_t, etp->et_pio_offset, + size_t, pkt_length); + + EFX_POPULATE_QWORD_5(pio_desc, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, 1, + ESF_DZ_TX_PIO_CONT, 0, + ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, + ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); + + EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); + + EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); + + *addedp = added; + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + unsigned int added = *addedp; + unsigned int i; + efx_rc_t rc; + + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + rc = ENOSPC; + goto fail1; + } + + for (i = 0; i < ndescs; i++) { + efx_buffer_t *ebp = &eb[i]; + efsys_dma_addr_t addr = ebp->eb_addr; + size_t size = ebp->eb_size; + boolean_t eop = ebp->eb_eop; + unsigned int id; + size_t offset; + efx_qword_t qword; + + /* No limitations on boundary crossing */ + EFSYS_ASSERT(size <= + etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); + + id = added++ & etp->et_mask; + offset = id * sizeof (efx_qword_t); + + EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, + unsigned int, id, efsys_dma_addr_t, addr, + size_t, size, boolean_t, eop); + + EFX_POPULATE_QWORD_5(qword, + ESF_DZ_TX_KER_TYPE, 0, + ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, + ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), + ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), + ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); + + EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); + } + + EFX_TX_QSTAT_INCR(etp, TX_POST); + + *addedp = added; + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * This improves performance by, when possible, pushing a TX descriptor at the + * same time as the doorbell. The descriptor must be added to the TXQ, so that + * can be used if the hardware decides not to use the pushed descriptor. + */ + void +ef10_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed) +{ + efx_nic_t *enp = etp->et_enp; + unsigned int wptr; + unsigned int id; + size_t offset; + efx_qword_t desc; + efx_oword_t oword; + + wptr = added & etp->et_mask; + id = pushed & etp->et_mask; + offset = id * sizeof (efx_qword_t); + + EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); + + /* + * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is + * enabled on the event queue this transmit queue is attached to. + * + * To ensure the code is safe, it is easiest to simply test the type of + * the descriptor to push, and only push it is if it not a TSO option + * descriptor. + */ + if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || + (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != + ESE_DZ_TX_OPTION_DESC_TSO)) { + /* Push the descriptor and update the wptr. */ + EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, + ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), + ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); + + /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, + wptr, id); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &oword); + } else { + efx_dword_t dword; + + /* + * Only update the wptr. This is signalled to the hardware by + * only writing one DWORD of the doorbell register. + */ + EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); + dword = oword.eo_dword[2]; + + /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, + wptr, id); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &dword, B_FALSE); + } +} + + __checkReturn efx_rc_t +ef10_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + unsigned int added = *addedp; + unsigned int i; + + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) + return (ENOSPC); + + for (i = 0; i < ndescs; i++) { + efx_desc_t *edp = &ed[i]; + unsigned int id; + size_t offset; + + id = added++ & etp->et_mask; + offset = id * sizeof (efx_desc_t); + + EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); + } + + EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, + unsigned int, added, unsigned int, ndescs); + + EFX_TX_QSTAT_INCR(etp, TX_POST); + + *addedp = added; + return (0); +} + + void +ef10_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)) + + /* No limitations on boundary crossing */ + EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); + + EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, + efsys_dma_addr_t, addr, + size_t, size, boolean_t, eop); + + EFX_POPULATE_QWORD_5(edp->ed_eq, + ESF_DZ_TX_KER_TYPE, 0, + ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, + ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), + ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), + ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); +} + + void +ef10_tx_qdesc_tso_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint32_t tcp_seq, + __in uint8_t tcp_flags, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)) + + EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, + uint16_t, ipv4_id, uint32_t, tcp_seq, + uint8_t, tcp_flags); + + EFX_POPULATE_QWORD_5(edp->ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, + ESF_DZ_TX_TSO_IP_ID, ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); +} + + void +ef10_tx_qdesc_tso2_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, + __in uint32_t tcp_seq, + __in uint16_t tcp_mss, + __out_ecount(count) efx_desc_t *edp, + __in int count) +{ + _NOTE(ARGUNUSED(etp, count)) + + EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, + uint16_t, ipv4_id, uint32_t, tcp_seq, + uint16_t, tcp_mss); + + EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); + + EFX_POPULATE_QWORD_5(edp[0].ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, + ESF_DZ_TX_TSO_IP_ID, ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); + EFX_POPULATE_QWORD_5(edp[1].ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, + ESF_DZ_TX_TSO_TCP_MSS, tcp_mss, + ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id); +} + + void +ef10_tx_qdesc_vlantci_create( + __in efx_txq_t *etp, + __in uint16_t tci, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)) + + EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, + uint16_t, tci); + + EFX_POPULATE_QWORD_4(edp->ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_VLAN, + ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, + ESF_DZ_TX_VLAN_TAG1, tci); +} + + void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)); + + EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index, + uint32_t, flags); + + EFX_POPULATE_QWORD_6(edp->ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_IP_CSUM, + (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); +} + + + __checkReturn efx_rc_t +ef10_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns) +{ + efx_rc_t rc; + + /* FIXME */ + _NOTE(ARGUNUSED(etp, ns)) + _NOTE(CONSTANTCONDITION) + if (B_FALSE) { + rc = ENOTSUP; + goto fail1; + } + /* FIXME */ + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_tx_qflush( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + efx_rc_t rc; + + if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) + goto fail1; + + return (0); + +fail1: + /* + * EALREADY is not an error, but indicates that the MC has rebooted and + * that the TXQ has already been destroyed. Callers need to know that + * the TXQ flush has completed to avoid waiting until timeout for a + * flush done event that will not be delivered. + */ + if (rc != EALREADY) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_tx_qenable( + __in efx_txq_t *etp) +{ + /* FIXME */ + _NOTE(ARGUNUSED(etp)) + /* FIXME */ +} + +#if EFSYS_OPT_QSTATS + void +ef10_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) +{ + unsigned int id; + + for (id = 0; id < TX_NQSTATS; id++) { + efsys_stat_t *essp = &stat[id]; + + EFSYS_STAT_INCR(essp, etp->et_stat[id]); + etp->et_stat[id] = 0; + } +} + +#endif /* EFSYS_OPT_QSTATS */ + +#endif /* EFX_OPTS_EF10() */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c new file mode 100644 index 000000000..63a5ea7de --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/ef10_vpd.c @@ -0,0 +1,437 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_VPD + +#if EFX_OPTS_EF10() + +#include "ef10_tlv_layout.h" + + __checkReturn efx_rc_t +ef10_vpd_init( + __in efx_nic_t *enp) +{ + caddr_t svpd; + size_t svpd_size; + uint32_t pci_pf; + uint32_t tag; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (enp->en_nic_cfg.enc_vpd_is_global) { + tag = TLV_TAG_GLOBAL_STATIC_VPD; + } else { + pci_pf = enp->en_nic_cfg.enc_pf; + tag = TLV_TAG_PF_STATIC_VPD(pci_pf); + } + + /* + * The VPD interface exposes VPD resources from the combined static and + * dynamic VPD storage. As the static VPD configuration should *never* + * change, we can cache it. + */ + svpd = NULL; + svpd_size = 0; + rc = ef10_nvram_partn_read_tlv(enp, + NVRAM_PARTITION_TYPE_STATIC_CONFIG, + tag, &svpd, &svpd_size); + if (rc != 0) { + if (rc == EACCES) { + /* Unprivileged functions cannot access VPD */ + goto out; + } + goto fail1; + } + + if (svpd != NULL && svpd_size > 0) { + if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0) + goto fail2; + } + + enp->en_arch.ef10.ena_svpd = svpd; + enp->en_arch.ef10.ena_svpd_length = svpd_size; + +out: + return (0); + +fail2: + EFSYS_PROBE(fail2); + + EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep) +{ + efx_rc_t rc; + efx_nvram_info_t eni = { 0 }; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* + * This function returns the total size the user should allocate + * for all VPD operations. We've already cached the static vpd, + * so we just need to return an upper bound on the dynamic vpd, + * which is the size of the DYNAMIC_CONFIG partition. + */ + if ((rc = efx_mcdi_nvram_info(enp, + NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, &eni)) != 0) + goto fail1; + + *sizep = eni.eni_partn_size; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + caddr_t dvpd; + size_t dvpd_size; + uint32_t pci_pf; + uint32_t tag; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (enp->en_nic_cfg.enc_vpd_is_global) { + tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; + } else { + pci_pf = enp->en_nic_cfg.enc_pf; + tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf); + } + + if ((rc = ef10_nvram_partn_read_tlv(enp, + NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, + tag, &dvpd, &dvpd_size)) != 0) + goto fail1; + + if (dvpd_size > size) { + rc = ENOSPC; + goto fail2; + } + if (dvpd != NULL) + memcpy(data, dvpd, dvpd_size); + + /* Pad data with all-1s, consistent with update operations */ + memset(data + dvpd_size, 0xff, size - dvpd_size); + + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + efx_vpd_tag_t stag; + efx_vpd_tag_t dtag; + efx_vpd_keyword_t skey; + efx_vpd_keyword_t dkey; + unsigned int scont; + unsigned int dcont; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* + * Strictly you could take the view that dynamic vpd is optional. + * Instead, to conform more closely to the read/verify/reinit() + * paradigm, we require dynamic vpd. ef10_vpd_reinit() will + * reinitialize it as required. + */ + if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0) + goto fail1; + + /* + * Verify that there is no duplication between the static and + * dynamic cfg sectors. + */ + if (enp->en_arch.ef10.ena_svpd_length == 0) + goto done; + + dcont = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_hunk_next(data, size, &dtag, + &dkey, NULL, NULL, &dcont)) != 0) + goto fail2; + if (dcont == 0) + break; + + /* + * Skip the RV keyword. It should be present in both the static + * and dynamic cfg sectors. + */ + if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V')) + continue; + + scont = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_hunk_next( + enp->en_arch.ef10.ena_svpd, + enp->en_arch.ef10.ena_svpd_length, &stag, &skey, + NULL, NULL, &scont)) != 0) + goto fail3; + if (scont == 0) + break; + + if (stag == dtag && skey == dkey) { + rc = EEXIST; + goto fail4; + } + } + } + +done: + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + boolean_t wantpid; + efx_rc_t rc; + + /* + * Only create an ID string if the dynamic cfg doesn't have one + */ + if (enp->en_arch.ef10.ena_svpd_length == 0) + wantpid = B_TRUE; + else { + unsigned int offset; + uint8_t length; + + rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd, + enp->en_arch.ef10.ena_svpd_length, + EFX_VPD_ID, 0, &offset, &length); + if (rc == 0) + wantpid = B_FALSE; + else if (rc == ENOENT) + wantpid = B_TRUE; + else + goto fail1; + } + + if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp) +{ + unsigned int offset; + uint8_t length; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* Attempt to satisfy the request from svpd first */ + if (enp->en_arch.ef10.ena_svpd_length > 0) { + if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd, + enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) == 0) { + evvp->evv_length = length; + memcpy(evvp->evv_value, + enp->en_arch.ef10.ena_svpd + offset, length); + return (0); + } else if (rc != ENOENT) + goto fail1; + } + + /* And then from the provided data buffer */ + if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) != 0) { + if (rc == ENOENT) + return (rc); + goto fail2; + } + + evvp->evv_length = length; + memcpy(evvp->evv_value, data + offset, length); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_set( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp) +{ + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + /* If the provided (tag,keyword) exists in svpd, then it is readonly */ + if (enp->en_arch.ef10.ena_svpd_length > 0) { + unsigned int offset; + uint8_t length; + + if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd, + enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) == 0) { + rc = EACCES; + goto fail1; + } + } + + if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +ef10_vpd_next( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp) +{ + _NOTE(ARGUNUSED(enp, data, size, evvp, contp)) + + return (ENOTSUP); +} + + __checkReturn efx_rc_t +ef10_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + size_t vpd_length; + uint32_t pci_pf; + uint32_t tag; + efx_rc_t rc; + + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (enp->en_nic_cfg.enc_vpd_is_global) { + tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; + } else { + pci_pf = enp->en_nic_cfg.enc_pf; + tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf); + } + + /* Determine total length of new dynamic VPD */ + if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0) + goto fail1; + + /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */ + if ((rc = ef10_nvram_partn_write_segment_tlv(enp, + NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, + tag, data, vpd_length, B_TRUE)) != 0) { + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +ef10_vpd_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); + + if (enp->en_arch.ef10.ena_svpd_length > 0) { + EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length, + enp->en_arch.ef10.ena_svpd); + + enp->en_arch.ef10.ena_svpd = NULL; + enp->en_arch.ef10.ena_svpd_length = 0; + } +} + +#endif /* EFX_OPTS_EF10() */ + +#endif /* EFSYS_OPT_VPD */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx.h b/src/spdk/dpdk/drivers/net/sfc/base/efx.h new file mode 100644 index 000000000..07cd0bb39 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx.h @@ -0,0 +1,3559 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2006-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_H +#define _SYS_EFX_H + +#include "efx_annote.h" +#include "efsys.h" +#include "efx_types.h" +#include "efx_check.h" +#include "efx_phy_ids.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define EFX_STATIC_ASSERT(_cond) \ + ((void)sizeof (char[(_cond) ? 1 : -1])) + +#define EFX_ARRAY_SIZE(_array) \ + (sizeof (_array) / sizeof ((_array)[0])) + +#define EFX_FIELD_OFFSET(_type, _field) \ + ((size_t)&(((_type *)0)->_field)) + +/* The macro expands divider twice */ +#define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) + +/* Round value up to the nearest power of two. */ +#define EFX_P2ROUNDUP(_type, _value, _align) \ + (-(-(_type)(_value) & -(_type)(_align))) + +/* Align value down to the nearest power of two. */ +#define EFX_P2ALIGN(_type, _value, _align) \ + ((_type)(_value) & -(_type)(_align)) + +/* Test if value is power of 2 aligned. */ +#define EFX_IS_P2ALIGNED(_type, _value, _align) \ + ((((_type)(_value)) & ((_type)(_align) - 1)) == 0) + +/* Return codes */ + +typedef __success(return == 0) int efx_rc_t; + + +/* Chip families */ + +typedef enum efx_family_e { + EFX_FAMILY_INVALID, + EFX_FAMILY_FALCON, /* Obsolete and not supported */ + EFX_FAMILY_SIENA, + EFX_FAMILY_HUNTINGTON, + EFX_FAMILY_MEDFORD, + EFX_FAMILY_MEDFORD2, + EFX_FAMILY_NTYPES +} efx_family_t; + +extern __checkReturn efx_rc_t +efx_family( + __in uint16_t venid, + __in uint16_t devid, + __out efx_family_t *efp, + __out unsigned int *membarp); + + +#define EFX_PCI_VENID_SFC 0x1924 + +#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */ + +#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */ +#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */ +#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810 + +#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901 +#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */ +#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */ + +#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */ +#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */ + +#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913 +#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */ +#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */ + +#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13 +#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */ +#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */ + + +#define EFX_MEM_BAR_SIENA 2 + +#define EFX_MEM_BAR_HUNTINGTON_PF 2 +#define EFX_MEM_BAR_HUNTINGTON_VF 0 + +#define EFX_MEM_BAR_MEDFORD_PF 2 +#define EFX_MEM_BAR_MEDFORD_VF 0 + +#define EFX_MEM_BAR_MEDFORD2 0 + + +/* Error codes */ + +enum { + EFX_ERR_INVALID, + EFX_ERR_SRAM_OOB, + EFX_ERR_BUFID_DC_OOB, + EFX_ERR_MEM_PERR, + EFX_ERR_RBUF_OWN, + EFX_ERR_TBUF_OWN, + EFX_ERR_RDESQ_OWN, + EFX_ERR_TDESQ_OWN, + EFX_ERR_EVQ_OWN, + EFX_ERR_EVFF_OFLO, + EFX_ERR_ILL_ADDR, + EFX_ERR_SRAM_PERR, + EFX_ERR_NCODES +}; + +/* Calculate the IEEE 802.3 CRC32 of a MAC addr */ +extern __checkReturn uint32_t +efx_crc32_calculate( + __in uint32_t crc_init, + __in_ecount(length) uint8_t const *input, + __in int length); + + +/* Type prototypes */ + +typedef struct efx_rxq_s efx_rxq_t; + +/* NIC */ + +typedef struct efx_nic_s efx_nic_t; + +extern __checkReturn efx_rc_t +efx_nic_create( + __in efx_family_t family, + __in efsys_identifier_t *esip, + __in efsys_bar_t *esbp, + __in efsys_lock_t *eslp, + __deref_out efx_nic_t **enpp); + +/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */ +typedef enum efx_fw_variant_e { + EFX_FW_VARIANT_FULL_FEATURED, + EFX_FW_VARIANT_LOW_LATENCY, + EFX_FW_VARIANT_PACKED_STREAM, + EFX_FW_VARIANT_HIGH_TX_RATE, + EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1, + EFX_FW_VARIANT_RULES_ENGINE, + EFX_FW_VARIANT_DPDK, + EFX_FW_VARIANT_DONT_CARE = 0xffffffff +} efx_fw_variant_t; + +extern __checkReturn efx_rc_t +efx_nic_probe( + __in efx_nic_t *enp, + __in efx_fw_variant_t efv); + +extern __checkReturn efx_rc_t +efx_nic_init( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_nic_reset( + __in efx_nic_t *enp); + +extern __checkReturn boolean_t +efx_nic_hw_unavailable( + __in efx_nic_t *enp); + +extern void +efx_nic_set_hw_unavailable( + __in efx_nic_t *enp); + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +efx_nic_register_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern void +efx_nic_fini( + __in efx_nic_t *enp); + +extern void +efx_nic_unprobe( + __in efx_nic_t *enp); + +extern void +efx_nic_destroy( + __in efx_nic_t *enp); + +#define EFX_PCIE_LINK_SPEED_GEN1 1 +#define EFX_PCIE_LINK_SPEED_GEN2 2 +#define EFX_PCIE_LINK_SPEED_GEN3 3 + +typedef enum efx_pcie_link_performance_e { + EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH, + EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH, + EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY, + EFX_PCIE_LINK_PERFORMANCE_OPTIMAL +} efx_pcie_link_performance_t; + +extern __checkReturn efx_rc_t +efx_nic_calculate_pcie_link_bandwidth( + __in uint32_t pcie_link_width, + __in uint32_t pcie_link_gen, + __out uint32_t *bandwidth_mbpsp); + +extern __checkReturn efx_rc_t +efx_nic_check_pcie_link_speed( + __in efx_nic_t *enp, + __in uint32_t pcie_link_width, + __in uint32_t pcie_link_gen, + __out efx_pcie_link_performance_t *resultp); + +#if EFSYS_OPT_MCDI + +#if EFX_OPTS_EF10() +/* EF10 architecture NICs require MCDIv2 commands */ +#define WITH_MCDI_V2 1 +#endif + +typedef struct efx_mcdi_req_s efx_mcdi_req_t; + +typedef enum efx_mcdi_exception_e { + EFX_MCDI_EXCEPTION_MC_REBOOT, + EFX_MCDI_EXCEPTION_MC_BADASSERT, +} efx_mcdi_exception_t; + +#if EFSYS_OPT_MCDI_LOGGING +typedef enum efx_log_msg_e { + EFX_LOG_INVALID, + EFX_LOG_MCDI_REQUEST, + EFX_LOG_MCDI_RESPONSE, +} efx_log_msg_t; +#endif /* EFSYS_OPT_MCDI_LOGGING */ + +typedef struct efx_mcdi_transport_s { + void *emt_context; + efsys_mem_t *emt_dma_mem; + void (*emt_execute)(void *, efx_mcdi_req_t *); + void (*emt_ev_cpl)(void *); + void (*emt_exception)(void *, efx_mcdi_exception_t); +#if EFSYS_OPT_MCDI_LOGGING + void (*emt_logger)(void *, efx_log_msg_t, + void *, size_t, void *, size_t); +#endif /* EFSYS_OPT_MCDI_LOGGING */ +#if EFSYS_OPT_MCDI_PROXY_AUTH + void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t); +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + void (*emt_ev_proxy_request)(void *, uint32_t); +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ +} efx_mcdi_transport_t; + +extern __checkReturn efx_rc_t +efx_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *mtp); + +extern __checkReturn efx_rc_t +efx_mcdi_reboot( + __in efx_nic_t *enp); + + void +efx_mcdi_new_epoch( + __in efx_nic_t *enp); + +extern void +efx_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *usec_timeoutp); + +extern void +efx_mcdi_request_start( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __in boolean_t ev_cpl); + +extern __checkReturn boolean_t +efx_mcdi_request_poll( + __in efx_nic_t *enp); + +extern __checkReturn boolean_t +efx_mcdi_request_abort( + __in efx_nic_t *enp); + +extern void +efx_mcdi_fini( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_MCDI */ + +/* INTR */ + +#define EFX_NINTR_SIENA 1024 + +typedef enum efx_intr_type_e { + EFX_INTR_INVALID = 0, + EFX_INTR_LINE, + EFX_INTR_MESSAGE, + EFX_INTR_NTYPES +} efx_intr_type_t; + +#define EFX_INTR_SIZE (sizeof (efx_oword_t)) + +extern __checkReturn efx_rc_t +efx_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in_opt efsys_mem_t *esmp); + +extern void +efx_intr_enable( + __in efx_nic_t *enp); + +extern void +efx_intr_disable( + __in efx_nic_t *enp); + +extern void +efx_intr_disable_unlocked( + __in efx_nic_t *enp); + +#define EFX_INTR_NEVQS 32 + +extern __checkReturn efx_rc_t +efx_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level); + +extern void +efx_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *maskp); + +extern void +efx_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp); + +extern void +efx_intr_fatal( + __in efx_nic_t *enp); + +extern void +efx_intr_fini( + __in efx_nic_t *enp); + +/* MAC */ + +#if EFSYS_OPT_MAC_STATS + +/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */ +typedef enum efx_mac_stat_e { + EFX_MAC_RX_OCTETS, + EFX_MAC_RX_PKTS, + EFX_MAC_RX_UNICST_PKTS, + EFX_MAC_RX_MULTICST_PKTS, + EFX_MAC_RX_BRDCST_PKTS, + EFX_MAC_RX_PAUSE_PKTS, + EFX_MAC_RX_LE_64_PKTS, + EFX_MAC_RX_65_TO_127_PKTS, + EFX_MAC_RX_128_TO_255_PKTS, + EFX_MAC_RX_256_TO_511_PKTS, + EFX_MAC_RX_512_TO_1023_PKTS, + EFX_MAC_RX_1024_TO_15XX_PKTS, + EFX_MAC_RX_GE_15XX_PKTS, + EFX_MAC_RX_ERRORS, + EFX_MAC_RX_FCS_ERRORS, + EFX_MAC_RX_DROP_EVENTS, + EFX_MAC_RX_FALSE_CARRIER_ERRORS, + EFX_MAC_RX_SYMBOL_ERRORS, + EFX_MAC_RX_ALIGN_ERRORS, + EFX_MAC_RX_INTERNAL_ERRORS, + EFX_MAC_RX_JABBER_PKTS, + EFX_MAC_RX_LANE0_CHAR_ERR, + EFX_MAC_RX_LANE1_CHAR_ERR, + EFX_MAC_RX_LANE2_CHAR_ERR, + EFX_MAC_RX_LANE3_CHAR_ERR, + EFX_MAC_RX_LANE0_DISP_ERR, + EFX_MAC_RX_LANE1_DISP_ERR, + EFX_MAC_RX_LANE2_DISP_ERR, + EFX_MAC_RX_LANE3_DISP_ERR, + EFX_MAC_RX_MATCH_FAULT, + EFX_MAC_RX_NODESC_DROP_CNT, + EFX_MAC_TX_OCTETS, + EFX_MAC_TX_PKTS, + EFX_MAC_TX_UNICST_PKTS, + EFX_MAC_TX_MULTICST_PKTS, + EFX_MAC_TX_BRDCST_PKTS, + EFX_MAC_TX_PAUSE_PKTS, + EFX_MAC_TX_LE_64_PKTS, + EFX_MAC_TX_65_TO_127_PKTS, + EFX_MAC_TX_128_TO_255_PKTS, + EFX_MAC_TX_256_TO_511_PKTS, + EFX_MAC_TX_512_TO_1023_PKTS, + EFX_MAC_TX_1024_TO_15XX_PKTS, + EFX_MAC_TX_GE_15XX_PKTS, + EFX_MAC_TX_ERRORS, + EFX_MAC_TX_SGL_COL_PKTS, + EFX_MAC_TX_MULT_COL_PKTS, + EFX_MAC_TX_EX_COL_PKTS, + EFX_MAC_TX_LATE_COL_PKTS, + EFX_MAC_TX_DEF_PKTS, + EFX_MAC_TX_EX_DEF_PKTS, + EFX_MAC_PM_TRUNC_BB_OVERFLOW, + EFX_MAC_PM_DISCARD_BB_OVERFLOW, + EFX_MAC_PM_TRUNC_VFIFO_FULL, + EFX_MAC_PM_DISCARD_VFIFO_FULL, + EFX_MAC_PM_TRUNC_QBB, + EFX_MAC_PM_DISCARD_QBB, + EFX_MAC_PM_DISCARD_MAPPING, + EFX_MAC_RXDP_Q_DISABLED_PKTS, + EFX_MAC_RXDP_DI_DROPPED_PKTS, + EFX_MAC_RXDP_STREAMING_PKTS, + EFX_MAC_RXDP_HLB_FETCH, + EFX_MAC_RXDP_HLB_WAIT, + EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, + EFX_MAC_VADAPTER_RX_UNICAST_BYTES, + EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS, + EFX_MAC_VADAPTER_RX_MULTICAST_BYTES, + EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS, + EFX_MAC_VADAPTER_RX_BROADCAST_BYTES, + EFX_MAC_VADAPTER_RX_BAD_PACKETS, + EFX_MAC_VADAPTER_RX_BAD_BYTES, + EFX_MAC_VADAPTER_RX_OVERFLOW, + EFX_MAC_VADAPTER_TX_UNICAST_PACKETS, + EFX_MAC_VADAPTER_TX_UNICAST_BYTES, + EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS, + EFX_MAC_VADAPTER_TX_MULTICAST_BYTES, + EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS, + EFX_MAC_VADAPTER_TX_BROADCAST_BYTES, + EFX_MAC_VADAPTER_TX_BAD_PACKETS, + EFX_MAC_VADAPTER_TX_BAD_BYTES, + EFX_MAC_VADAPTER_TX_OVERFLOW, + EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + EFX_MAC_CTPIO_VI_BUSY_FALLBACK, + EFX_MAC_CTPIO_LONG_WRITE_SUCCESS, + EFX_MAC_CTPIO_MISSING_DBELL_FAIL, + EFX_MAC_CTPIO_OVERFLOW_FAIL, + EFX_MAC_CTPIO_UNDERFLOW_FAIL, + EFX_MAC_CTPIO_TIMEOUT_FAIL, + EFX_MAC_CTPIO_NONCONTIG_WR_FAIL, + EFX_MAC_CTPIO_FRM_CLOBBER_FAIL, + EFX_MAC_CTPIO_INVALID_WR_FAIL, + EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK, + EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK, + EFX_MAC_CTPIO_RUNT_FALLBACK, + EFX_MAC_CTPIO_SUCCESS, + EFX_MAC_CTPIO_FALLBACK, + EFX_MAC_CTPIO_POISON, + EFX_MAC_CTPIO_ERASE, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_HLB_IDLE, + EFX_MAC_RXDP_HLB_TIMEOUT, + EFX_MAC_NSTATS +} efx_mac_stat_t; + +/* END MKCONFIG GENERATED EfxHeaderMacBlock */ + +#endif /* EFSYS_OPT_MAC_STATS */ + +typedef enum efx_link_mode_e { + EFX_LINK_UNKNOWN = 0, + EFX_LINK_DOWN, + EFX_LINK_10HDX, + EFX_LINK_10FDX, + EFX_LINK_100HDX, + EFX_LINK_100FDX, + EFX_LINK_1000HDX, + EFX_LINK_1000FDX, + EFX_LINK_10000FDX, + EFX_LINK_40000FDX, + EFX_LINK_25000FDX, + EFX_LINK_50000FDX, + EFX_LINK_100000FDX, + EFX_LINK_NMODES +} efx_link_mode_t; + +#define EFX_MAC_ADDR_LEN 6 + +#define EFX_VNI_OR_VSID_LEN 3 + +#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01) + +#define EFX_MAC_MULTICAST_LIST_MAX 256 + +#define EFX_MAC_SDU_MAX 9202 + +#define EFX_MAC_PDU_ADJUSTMENT \ + (/* EtherII */ 14 \ + + /* VLAN */ 4 \ + + /* CRC */ 4 \ + + /* bug16011 */ 16) \ + +#define EFX_MAC_PDU(_sdu) \ + EFX_P2ROUNDUP(size_t, (_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8) + +/* + * Due to the EFX_P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give + * the SDU rounded up slightly. + */ +#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT) + +#define EFX_MAC_PDU_MIN 60 +#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX) + +extern __checkReturn efx_rc_t +efx_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu); + +extern __checkReturn efx_rc_t +efx_mac_pdu_set( + __in efx_nic_t *enp, + __in size_t pdu); + +extern __checkReturn efx_rc_t +efx_mac_addr_set( + __in efx_nic_t *enp, + __in uint8_t *addr); + +extern __checkReturn efx_rc_t +efx_mac_filter_set( + __in efx_nic_t *enp, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst); + +extern void +efx_mac_filter_get_all_ucast_mcast( + __in efx_nic_t *enp, + __out boolean_t *all_unicst, + __out boolean_t *all_mulcst); + +extern __checkReturn efx_rc_t +efx_mac_multicast_list_set( + __in efx_nic_t *enp, + __in_ecount(6*count) uint8_t const *addrs, + __in int count); + +extern __checkReturn efx_rc_t +efx_mac_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss); + +extern void +efx_mac_filter_default_rxq_clear( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mac_drain( + __in efx_nic_t *enp, + __in boolean_t enabled); + +extern __checkReturn efx_rc_t +efx_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp); + +#define EFX_FCNTL_RESPOND 0x00000001 +#define EFX_FCNTL_GENERATE 0x00000002 + +extern __checkReturn efx_rc_t +efx_mac_fcntl_set( + __in efx_nic_t *enp, + __in unsigned int fcntl, + __in boolean_t autoneg); + +extern void +efx_mac_fcntl_get( + __in efx_nic_t *enp, + __out unsigned int *fcntl_wantedp, + __out unsigned int *fcntl_linkp); + + +#if EFSYS_OPT_MAC_STATS + +#if EFSYS_OPT_NAMES + +extern __checkReturn const char * +efx_mac_stat_name( + __in efx_nic_t *enp, + __in unsigned int id); + +#endif /* EFSYS_OPT_NAMES */ + +#define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t)) + +#define EFX_MAC_STATS_MASK_NPAGES \ + (EFX_P2ROUNDUP(uint32_t, EFX_MAC_NSTATS, \ + EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \ + EFX_MAC_STATS_MASK_BITS_PER_PAGE) + +/* + * Get mask of MAC statistics supported by the hardware. + * + * If mask_size is insufficient to return the mask, EINVAL error is + * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page + * (which is sizeof (uint32_t)) is sufficient. + */ +extern __checkReturn efx_rc_t +efx_mac_stats_get_mask( + __in efx_nic_t *enp, + __out_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size); + +#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \ + ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \ + (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) + + +extern __checkReturn efx_rc_t +efx_mac_stats_clear( + __in efx_nic_t *enp); + +/* + * Upload mac statistics supported by the hardware into the given buffer. + * + * The DMA buffer must be 4Kbyte aligned and sized to hold at least + * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters. + * + * The hardware will only DMA statistics that it understands (of course). + * Drivers should not make any assumptions about which statistics are + * supported, especially when the statistics are generated by firmware. + * + * Thus, drivers should zero this buffer before use, so that not-understood + * statistics read back as zero. + */ +extern __checkReturn efx_rc_t +efx_mac_stats_upload( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp); + +extern __checkReturn efx_rc_t +efx_mac_stats_periodic( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __in uint16_t period_ms, + __in boolean_t events); + +extern __checkReturn efx_rc_t +efx_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, + __inout_opt uint32_t *generationp); + +#endif /* EFSYS_OPT_MAC_STATS */ + +/* MON */ + +typedef enum efx_mon_type_e { + EFX_MON_INVALID = 0, + EFX_MON_SFC90X0, + EFX_MON_SFC91X0, + EFX_MON_SFC92X0, + EFX_MON_NTYPES +} efx_mon_type_t; + +#if EFSYS_OPT_NAMES + +extern const char * +efx_mon_name( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_NAMES */ + +extern __checkReturn efx_rc_t +efx_mon_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_MON_STATS + +#define EFX_MON_STATS_PAGE_SIZE 0x100 +#define EFX_MON_MASK_ELEMENT_SIZE 32 + +/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 78b65c8d5af9747b */ +typedef enum efx_mon_stat_e { + EFX_MON_STAT_CONTROLLER_TEMP, + EFX_MON_STAT_PHY_COMMON_TEMP, + EFX_MON_STAT_CONTROLLER_COOLING, + EFX_MON_STAT_PHY0_TEMP, + EFX_MON_STAT_PHY0_COOLING, + EFX_MON_STAT_PHY1_TEMP, + EFX_MON_STAT_PHY1_COOLING, + EFX_MON_STAT_IN_1V0, + EFX_MON_STAT_IN_1V2, + EFX_MON_STAT_IN_1V8, + EFX_MON_STAT_IN_2V5, + EFX_MON_STAT_IN_3V3, + EFX_MON_STAT_IN_12V0, + EFX_MON_STAT_IN_1V2A, + EFX_MON_STAT_IN_VREF, + EFX_MON_STAT_OUT_VAOE, + EFX_MON_STAT_AOE_TEMP, + EFX_MON_STAT_PSU_AOE_TEMP, + EFX_MON_STAT_PSU_TEMP, + EFX_MON_STAT_FAN_0, + EFX_MON_STAT_FAN_1, + EFX_MON_STAT_FAN_2, + EFX_MON_STAT_FAN_3, + EFX_MON_STAT_FAN_4, + EFX_MON_STAT_IN_VAOE, + EFX_MON_STAT_OUT_IAOE, + EFX_MON_STAT_IN_IAOE, + EFX_MON_STAT_NIC_POWER, + EFX_MON_STAT_IN_0V9, + EFX_MON_STAT_IN_I0V9, + EFX_MON_STAT_IN_I1V2, + EFX_MON_STAT_IN_0V9_ADC, + EFX_MON_STAT_CONTROLLER_2_TEMP, + EFX_MON_STAT_VREG_INTERNAL_TEMP, + EFX_MON_STAT_VREG_0V9_TEMP, + EFX_MON_STAT_VREG_1V2_TEMP, + EFX_MON_STAT_CONTROLLER_VPTAT, + EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP, + EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC, + EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC, + EFX_MON_STAT_AMBIENT_TEMP, + EFX_MON_STAT_AIRFLOW, + EFX_MON_STAT_VDD08D_VSS08D_CSR, + EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC, + EFX_MON_STAT_HOTPOINT_TEMP, + EFX_MON_STAT_PHY_POWER_PORT0, + EFX_MON_STAT_PHY_POWER_PORT1, + EFX_MON_STAT_MUM_VCC, + EFX_MON_STAT_IN_0V9_A, + EFX_MON_STAT_IN_I0V9_A, + EFX_MON_STAT_VREG_0V9_A_TEMP, + EFX_MON_STAT_IN_0V9_B, + EFX_MON_STAT_IN_I0V9_B, + EFX_MON_STAT_VREG_0V9_B_TEMP, + EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY, + EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC, + EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY, + EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC, + EFX_MON_STAT_CONTROLLER_MASTER_VPTAT, + EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP, + EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC, + EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC, + EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT, + EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP, + EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC, + EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC, + EFX_MON_STAT_SODIMM_VOUT, + EFX_MON_STAT_SODIMM_0_TEMP, + EFX_MON_STAT_SODIMM_1_TEMP, + EFX_MON_STAT_PHY0_VCC, + EFX_MON_STAT_PHY1_VCC, + EFX_MON_STAT_CONTROLLER_TDIODE_TEMP, + EFX_MON_STAT_BOARD_FRONT_TEMP, + EFX_MON_STAT_BOARD_BACK_TEMP, + EFX_MON_STAT_IN_I1V8, + EFX_MON_STAT_IN_I2V5, + EFX_MON_STAT_IN_I3V3, + EFX_MON_STAT_IN_I12V0, + EFX_MON_STAT_IN_1V3, + EFX_MON_STAT_IN_I1V3, + EFX_MON_NSTATS +} efx_mon_stat_t; + +/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */ + +typedef enum efx_mon_stat_state_e { + EFX_MON_STAT_STATE_OK = 0, + EFX_MON_STAT_STATE_WARNING = 1, + EFX_MON_STAT_STATE_FATAL = 2, + EFX_MON_STAT_STATE_BROKEN = 3, + EFX_MON_STAT_STATE_NO_READING = 4, +} efx_mon_stat_state_t; + +typedef enum efx_mon_stat_unit_e { + EFX_MON_STAT_UNIT_UNKNOWN = 0, + EFX_MON_STAT_UNIT_BOOL, + EFX_MON_STAT_UNIT_TEMP_C, + EFX_MON_STAT_UNIT_VOLTAGE_MV, + EFX_MON_STAT_UNIT_CURRENT_MA, + EFX_MON_STAT_UNIT_POWER_W, + EFX_MON_STAT_UNIT_RPM, + EFX_MON_NUNITS +} efx_mon_stat_unit_t; + +typedef struct efx_mon_stat_value_s { + uint16_t emsv_value; + efx_mon_stat_state_t emsv_state; + efx_mon_stat_unit_t emsv_unit; +} efx_mon_stat_value_t; + +typedef struct efx_mon_limit_value_s { + uint16_t emlv_warning_min; + uint16_t emlv_warning_max; + uint16_t emlv_fatal_min; + uint16_t emlv_fatal_max; +} efx_mon_stat_limits_t; + +typedef enum efx_mon_stat_portmask_e { + EFX_MON_STAT_PORTMAP_NONE = 0, + EFX_MON_STAT_PORTMAP_PORT0 = 1, + EFX_MON_STAT_PORTMAP_PORT1 = 2, + EFX_MON_STAT_PORTMAP_PORT2 = 3, + EFX_MON_STAT_PORTMAP_PORT3 = 4, + EFX_MON_STAT_PORTMAP_ALL = (-1), + EFX_MON_STAT_PORTMAP_UNKNOWN = (-2) +} efx_mon_stat_portmask_t; + +#if EFSYS_OPT_NAMES + +extern const char * +efx_mon_stat_name( + __in efx_nic_t *enp, + __in efx_mon_stat_t id); + +extern const char * +efx_mon_stat_description( + __in efx_nic_t *enp, + __in efx_mon_stat_t id); + +#endif /* EFSYS_OPT_NAMES */ + +extern __checkReturn boolean_t +efx_mon_mcdi_to_efx_stat( + __in int mcdi_index, + __out efx_mon_stat_t *statp); + +extern __checkReturn boolean_t +efx_mon_get_stat_unit( + __in efx_mon_stat_t stat, + __out efx_mon_stat_unit_t *unitp); + +extern __checkReturn boolean_t +efx_mon_get_stat_portmap( + __in efx_mon_stat_t stat, + __out efx_mon_stat_portmask_t *maskp); + +extern __checkReturn efx_rc_t +efx_mon_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); + +extern __checkReturn efx_rc_t +efx_mon_limits_update( + __in efx_nic_t *enp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values); + +#endif /* EFSYS_OPT_MON_STATS */ + +extern void +efx_mon_fini( + __in efx_nic_t *enp); + +/* PHY */ + +extern __checkReturn efx_rc_t +efx_phy_verify( + __in efx_nic_t *enp); + +#if EFSYS_OPT_PHY_LED_CONTROL + +typedef enum efx_phy_led_mode_e { + EFX_PHY_LED_DEFAULT = 0, + EFX_PHY_LED_OFF, + EFX_PHY_LED_ON, + EFX_PHY_LED_FLASH, + EFX_PHY_LED_NMODES +} efx_phy_led_mode_t; + +extern __checkReturn efx_rc_t +efx_phy_led_set( + __in efx_nic_t *enp, + __in efx_phy_led_mode_t mode); + +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + +extern __checkReturn efx_rc_t +efx_port_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_LOOPBACK + +typedef enum efx_loopback_type_e { + EFX_LOOPBACK_OFF = 0, + EFX_LOOPBACK_DATA = 1, + EFX_LOOPBACK_GMAC = 2, + EFX_LOOPBACK_XGMII = 3, + EFX_LOOPBACK_XGXS = 4, + EFX_LOOPBACK_XAUI = 5, + EFX_LOOPBACK_GMII = 6, + EFX_LOOPBACK_SGMII = 7, + EFX_LOOPBACK_XGBR = 8, + EFX_LOOPBACK_XFI = 9, + EFX_LOOPBACK_XAUI_FAR = 10, + EFX_LOOPBACK_GMII_FAR = 11, + EFX_LOOPBACK_SGMII_FAR = 12, + EFX_LOOPBACK_XFI_FAR = 13, + EFX_LOOPBACK_GPHY = 14, + EFX_LOOPBACK_PHY_XS = 15, + EFX_LOOPBACK_PCS = 16, + EFX_LOOPBACK_PMA_PMD = 17, + EFX_LOOPBACK_XPORT = 18, + EFX_LOOPBACK_XGMII_WS = 19, + EFX_LOOPBACK_XAUI_WS = 20, + EFX_LOOPBACK_XAUI_WS_FAR = 21, + EFX_LOOPBACK_XAUI_WS_NEAR = 22, + EFX_LOOPBACK_GMII_WS = 23, + EFX_LOOPBACK_XFI_WS = 24, + EFX_LOOPBACK_XFI_WS_FAR = 25, + EFX_LOOPBACK_PHYXS_WS = 26, + EFX_LOOPBACK_PMA_INT = 27, + EFX_LOOPBACK_SD_NEAR = 28, + EFX_LOOPBACK_SD_FAR = 29, + EFX_LOOPBACK_PMA_INT_WS = 30, + EFX_LOOPBACK_SD_FEP2_WS = 31, + EFX_LOOPBACK_SD_FEP1_5_WS = 32, + EFX_LOOPBACK_SD_FEP_WS = 33, + EFX_LOOPBACK_SD_FES_WS = 34, + EFX_LOOPBACK_AOE_INT_NEAR = 35, + EFX_LOOPBACK_DATA_WS = 36, + EFX_LOOPBACK_FORCE_EXT_LINK = 37, + EFX_LOOPBACK_NTYPES +} efx_loopback_type_t; + +typedef enum efx_loopback_kind_e { + EFX_LOOPBACK_KIND_OFF = 0, + EFX_LOOPBACK_KIND_ALL, + EFX_LOOPBACK_KIND_MAC, + EFX_LOOPBACK_KIND_PHY, + EFX_LOOPBACK_NKINDS +} efx_loopback_kind_t; + +extern void +efx_loopback_mask( + __in efx_loopback_kind_t loopback_kind, + __out efx_qword_t *maskp); + +extern __checkReturn efx_rc_t +efx_port_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t type); + +#if EFSYS_OPT_NAMES + +extern __checkReturn const char * +efx_loopback_type_name( + __in efx_nic_t *enp, + __in efx_loopback_type_t type); + +#endif /* EFSYS_OPT_NAMES */ + +#endif /* EFSYS_OPT_LOOPBACK */ + +extern __checkReturn efx_rc_t +efx_port_poll( + __in efx_nic_t *enp, + __out_opt efx_link_mode_t *link_modep); + +extern void +efx_port_fini( + __in efx_nic_t *enp); + +typedef enum efx_phy_cap_type_e { + EFX_PHY_CAP_INVALID = 0, + EFX_PHY_CAP_10HDX, + EFX_PHY_CAP_10FDX, + EFX_PHY_CAP_100HDX, + EFX_PHY_CAP_100FDX, + EFX_PHY_CAP_1000HDX, + EFX_PHY_CAP_1000FDX, + EFX_PHY_CAP_10000FDX, + EFX_PHY_CAP_PAUSE, + EFX_PHY_CAP_ASYM, + EFX_PHY_CAP_AN, + EFX_PHY_CAP_40000FDX, + EFX_PHY_CAP_DDM, + EFX_PHY_CAP_100000FDX, + EFX_PHY_CAP_25000FDX, + EFX_PHY_CAP_50000FDX, + EFX_PHY_CAP_BASER_FEC, + EFX_PHY_CAP_BASER_FEC_REQUESTED, + EFX_PHY_CAP_RS_FEC, + EFX_PHY_CAP_RS_FEC_REQUESTED, + EFX_PHY_CAP_25G_BASER_FEC, + EFX_PHY_CAP_25G_BASER_FEC_REQUESTED, + EFX_PHY_CAP_NTYPES +} efx_phy_cap_type_t; + + +#define EFX_PHY_CAP_CURRENT 0x00000000 +#define EFX_PHY_CAP_DEFAULT 0x00000001 +#define EFX_PHY_CAP_PERM 0x00000002 + +extern void +efx_phy_adv_cap_get( + __in efx_nic_t *enp, + __in uint32_t flag, + __out uint32_t *maskp); + +extern __checkReturn efx_rc_t +efx_phy_adv_cap_set( + __in efx_nic_t *enp, + __in uint32_t mask); + +extern void +efx_phy_lp_cap_get( + __in efx_nic_t *enp, + __out uint32_t *maskp); + +extern __checkReturn efx_rc_t +efx_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip); + +typedef enum efx_phy_media_type_e { + EFX_PHY_MEDIA_INVALID = 0, + EFX_PHY_MEDIA_XAUI, + EFX_PHY_MEDIA_CX4, + EFX_PHY_MEDIA_KX4, + EFX_PHY_MEDIA_XFP, + EFX_PHY_MEDIA_SFP_PLUS, + EFX_PHY_MEDIA_BASE_T, + EFX_PHY_MEDIA_QSFP_PLUS, + EFX_PHY_MEDIA_NTYPES +} efx_phy_media_type_t; + +/* + * Get the type of medium currently used. If the board has ports for + * modules, a module is present, and we recognise the media type of + * the module, then this will be the media type of the module. + * Otherwise it will be the media type of the port. + */ +extern void +efx_phy_media_type_get( + __in efx_nic_t *enp, + __out efx_phy_media_type_t *typep); + +/* + * 2-wire device address of the base information in accordance with SFF-8472 + * Diagnostic Monitoring Interface for Optical Transceivers section + * 4 Memory Organization. + */ +#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0 + +/* + * 2-wire device address of the digital diagnostics monitoring interface + * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical + * Transceivers section 4 Memory Organization. + */ +#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2 + +/* + * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436 + * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and + * Operation. + */ +#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0 + +/* + * Maximum accessible data offset for PHY module information. + */ +#define EFX_PHY_MEDIA_INFO_MAX_OFFSET 0x100 + + +extern __checkReturn efx_rc_t +efx_phy_module_get_info( + __in efx_nic_t *enp, + __in uint8_t dev_addr, + __in size_t offset, + __in size_t len, + __out_bcount(len) uint8_t *data); + +#if EFSYS_OPT_PHY_STATS + +/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */ +typedef enum efx_phy_stat_e { + EFX_PHY_STAT_OUI, + EFX_PHY_STAT_PMA_PMD_LINK_UP, + EFX_PHY_STAT_PMA_PMD_RX_FAULT, + EFX_PHY_STAT_PMA_PMD_TX_FAULT, + EFX_PHY_STAT_PMA_PMD_REV_A, + EFX_PHY_STAT_PMA_PMD_REV_B, + EFX_PHY_STAT_PMA_PMD_REV_C, + EFX_PHY_STAT_PMA_PMD_REV_D, + EFX_PHY_STAT_PCS_LINK_UP, + EFX_PHY_STAT_PCS_RX_FAULT, + EFX_PHY_STAT_PCS_TX_FAULT, + EFX_PHY_STAT_PCS_BER, + EFX_PHY_STAT_PCS_BLOCK_ERRORS, + EFX_PHY_STAT_PHY_XS_LINK_UP, + EFX_PHY_STAT_PHY_XS_RX_FAULT, + EFX_PHY_STAT_PHY_XS_TX_FAULT, + EFX_PHY_STAT_PHY_XS_ALIGN, + EFX_PHY_STAT_PHY_XS_SYNC_A, + EFX_PHY_STAT_PHY_XS_SYNC_B, + EFX_PHY_STAT_PHY_XS_SYNC_C, + EFX_PHY_STAT_PHY_XS_SYNC_D, + EFX_PHY_STAT_AN_LINK_UP, + EFX_PHY_STAT_AN_MASTER, + EFX_PHY_STAT_AN_LOCAL_RX_OK, + EFX_PHY_STAT_AN_REMOTE_RX_OK, + EFX_PHY_STAT_CL22EXT_LINK_UP, + EFX_PHY_STAT_SNR_A, + EFX_PHY_STAT_SNR_B, + EFX_PHY_STAT_SNR_C, + EFX_PHY_STAT_SNR_D, + EFX_PHY_STAT_PMA_PMD_SIGNAL_A, + EFX_PHY_STAT_PMA_PMD_SIGNAL_B, + EFX_PHY_STAT_PMA_PMD_SIGNAL_C, + EFX_PHY_STAT_PMA_PMD_SIGNAL_D, + EFX_PHY_STAT_AN_COMPLETE, + EFX_PHY_STAT_PMA_PMD_REV_MAJOR, + EFX_PHY_STAT_PMA_PMD_REV_MINOR, + EFX_PHY_STAT_PMA_PMD_REV_MICRO, + EFX_PHY_STAT_PCS_FW_VERSION_0, + EFX_PHY_STAT_PCS_FW_VERSION_1, + EFX_PHY_STAT_PCS_FW_VERSION_2, + EFX_PHY_STAT_PCS_FW_VERSION_3, + EFX_PHY_STAT_PCS_FW_BUILD_YY, + EFX_PHY_STAT_PCS_FW_BUILD_MM, + EFX_PHY_STAT_PCS_FW_BUILD_DD, + EFX_PHY_STAT_PCS_OP_MODE, + EFX_PHY_NSTATS +} efx_phy_stat_t; + +/* END MKCONFIG GENERATED PhyHeaderStatsBlock */ + +#if EFSYS_OPT_NAMES + +extern const char * +efx_phy_stat_name( + __in efx_nic_t *enp, + __in efx_phy_stat_t stat); + +#endif /* EFSYS_OPT_NAMES */ + +#define EFX_PHY_STATS_SIZE 0x100 + +extern __checkReturn efx_rc_t +efx_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); + +#endif /* EFSYS_OPT_PHY_STATS */ + + +#if EFSYS_OPT_BIST + +typedef enum efx_bist_type_e { + EFX_BIST_TYPE_UNKNOWN, + EFX_BIST_TYPE_PHY_NORMAL, + EFX_BIST_TYPE_PHY_CABLE_SHORT, + EFX_BIST_TYPE_PHY_CABLE_LONG, + EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */ + EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus */ + EFX_BIST_TYPE_REG, /* Test the register memories */ + EFX_BIST_TYPE_NTYPES, +} efx_bist_type_t; + +typedef enum efx_bist_result_e { + EFX_BIST_RESULT_UNKNOWN, + EFX_BIST_RESULT_RUNNING, + EFX_BIST_RESULT_PASSED, + EFX_BIST_RESULT_FAILED, +} efx_bist_result_t; + +typedef enum efx_phy_cable_status_e { + EFX_PHY_CABLE_STATUS_OK, + EFX_PHY_CABLE_STATUS_INVALID, + EFX_PHY_CABLE_STATUS_OPEN, + EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT, + EFX_PHY_CABLE_STATUS_INTERPAIRSHORT, + EFX_PHY_CABLE_STATUS_BUSY, +} efx_phy_cable_status_t; + +typedef enum efx_bist_value_e { + EFX_BIST_PHY_CABLE_LENGTH_A, + EFX_BIST_PHY_CABLE_LENGTH_B, + EFX_BIST_PHY_CABLE_LENGTH_C, + EFX_BIST_PHY_CABLE_LENGTH_D, + EFX_BIST_PHY_CABLE_STATUS_A, + EFX_BIST_PHY_CABLE_STATUS_B, + EFX_BIST_PHY_CABLE_STATUS_C, + EFX_BIST_PHY_CABLE_STATUS_D, + EFX_BIST_FAULT_CODE, + /* + * Memory BIST specific values. These match to the MC_CMD_BIST_POLL + * response. + */ + EFX_BIST_MEM_TEST, + EFX_BIST_MEM_ADDR, + EFX_BIST_MEM_BUS, + EFX_BIST_MEM_EXPECT, + EFX_BIST_MEM_ACTUAL, + EFX_BIST_MEM_ECC, + EFX_BIST_MEM_ECC_PARITY, + EFX_BIST_MEM_ECC_FATAL, + EFX_BIST_NVALUES, +} efx_bist_value_t; + +extern __checkReturn efx_rc_t +efx_bist_enable_offline( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +extern __checkReturn efx_rc_t +efx_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt uint32_t *value_maskp, + __out_ecount_opt(count) unsigned long *valuesp, + __in size_t count); + +extern void +efx_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +#endif /* EFSYS_OPT_BIST */ + +#define EFX_FEATURE_IPV6 0x00000001 +#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002 +#define EFX_FEATURE_LINK_EVENTS 0x00000004 +#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008 +#define EFX_FEATURE_MCDI 0x00000020 +#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040 +#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080 +#define EFX_FEATURE_TURBO 0x00000100 +#define EFX_FEATURE_MCDI_DMA 0x00000200 +#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400 +#define EFX_FEATURE_PIO_BUFFERS 0x00000800 +#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000 +#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000 +#define EFX_FEATURE_PACKED_STREAM 0x00004000 +#define EFX_FEATURE_TXQ_CKSUM_OP_DESC 0x00008000 + +typedef enum efx_tunnel_protocol_e { + EFX_TUNNEL_PROTOCOL_NONE = 0, + EFX_TUNNEL_PROTOCOL_VXLAN, + EFX_TUNNEL_PROTOCOL_GENEVE, + EFX_TUNNEL_PROTOCOL_NVGRE, + EFX_TUNNEL_NPROTOS +} efx_tunnel_protocol_t; + +typedef enum efx_vi_window_shift_e { + EFX_VI_WINDOW_SHIFT_INVALID = 0, + EFX_VI_WINDOW_SHIFT_8K = 13, + EFX_VI_WINDOW_SHIFT_16K = 14, + EFX_VI_WINDOW_SHIFT_64K = 16, +} efx_vi_window_shift_t; + +typedef struct efx_nic_cfg_s { + uint32_t enc_board_type; + uint32_t enc_phy_type; +#if EFSYS_OPT_NAMES + char enc_phy_name[21]; +#endif + char enc_phy_revision[21]; + efx_mon_type_t enc_mon_type; +#if EFSYS_OPT_MON_STATS + uint32_t enc_mon_stat_dma_buf_size; + uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; +#endif + unsigned int enc_features; + efx_vi_window_shift_t enc_vi_window_shift; + uint8_t enc_mac_addr[6]; + uint8_t enc_port; /* PHY port number */ + uint32_t enc_intr_vec_base; + uint32_t enc_intr_limit; + uint32_t enc_evq_limit; + uint32_t enc_txq_limit; + uint32_t enc_rxq_limit; + uint32_t enc_evq_max_nevs; + uint32_t enc_evq_min_nevs; + uint32_t enc_rxq_max_ndescs; + uint32_t enc_rxq_min_ndescs; + uint32_t enc_txq_max_ndescs; + uint32_t enc_txq_min_ndescs; + uint32_t enc_buftbl_limit; + uint32_t enc_piobuf_limit; + uint32_t enc_piobuf_size; + uint32_t enc_piobuf_min_alloc_size; + uint32_t enc_evq_timer_quantum_ns; + uint32_t enc_evq_timer_max_us; + uint32_t enc_clk_mult; + uint32_t enc_ev_desc_size; + uint32_t enc_rx_desc_size; + uint32_t enc_tx_desc_size; + uint32_t enc_rx_prefix_size; + uint32_t enc_rx_buf_align_start; + uint32_t enc_rx_buf_align_end; +#if EFSYS_OPT_RX_SCALE + uint32_t enc_rx_scale_max_exclusive_contexts; + /* + * Mask of supported hash algorithms. + * Hash algorithm types are used as the bit indices. + */ + uint32_t enc_rx_scale_hash_alg_mask; + /* + * Indicates whether port numbers can be included to the + * input data for hash computation. + */ + boolean_t enc_rx_scale_l4_hash_supported; + boolean_t enc_rx_scale_additional_modes_supported; +#endif /* EFSYS_OPT_RX_SCALE */ +#if EFSYS_OPT_LOOPBACK + efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; +#endif /* EFSYS_OPT_LOOPBACK */ +#if EFSYS_OPT_PHY_FLAGS + uint32_t enc_phy_flags_mask; +#endif /* EFSYS_OPT_PHY_FLAGS */ +#if EFSYS_OPT_PHY_LED_CONTROL + uint32_t enc_led_mask; +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ +#if EFSYS_OPT_PHY_STATS + uint64_t enc_phy_stat_mask; +#endif /* EFSYS_OPT_PHY_STATS */ +#if EFSYS_OPT_MCDI + uint8_t enc_mcdi_mdio_channel; +#if EFSYS_OPT_PHY_STATS + uint32_t enc_mcdi_phy_stat_mask; +#endif /* EFSYS_OPT_PHY_STATS */ +#if EFSYS_OPT_MON_STATS + uint32_t *enc_mcdi_sensor_maskp; + uint32_t enc_mcdi_sensor_mask_size; +#endif /* EFSYS_OPT_MON_STATS */ +#endif /* EFSYS_OPT_MCDI */ +#if EFSYS_OPT_BIST + uint32_t enc_bist_mask; +#endif /* EFSYS_OPT_BIST */ +#if EFX_OPTS_EF10() + uint32_t enc_pf; + uint32_t enc_vf; + uint32_t enc_privilege_mask; +#endif /* EFX_OPTS_EF10() */ + boolean_t enc_bug26807_workaround; + boolean_t enc_bug35388_workaround; + boolean_t enc_bug41750_workaround; + boolean_t enc_bug61265_workaround; + boolean_t enc_bug61297_workaround; + boolean_t enc_rx_batching_enabled; + /* Maximum number of descriptors completed in an rx event. */ + uint32_t enc_rx_batch_max; + /* Number of rx descriptors the hardware requires for a push. */ + uint32_t enc_rx_push_align; + /* Maximum amount of data in DMA descriptor */ + uint32_t enc_tx_dma_desc_size_max; + /* + * Boundary which DMA descriptor data must not cross or 0 if no + * limitation. + */ + uint32_t enc_tx_dma_desc_boundary; + /* + * Maximum number of bytes into the packet the TCP header can start for + * the hardware to apply TSO packet edits. + */ + uint32_t enc_tx_tso_tcp_header_offset_limit; + boolean_t enc_fw_assisted_tso_enabled; + boolean_t enc_fw_assisted_tso_v2_enabled; + boolean_t enc_fw_assisted_tso_v2_encap_enabled; + /* Number of TSO contexts on the NIC (FATSOv2) */ + uint32_t enc_fw_assisted_tso_v2_n_contexts; + boolean_t enc_hw_tx_insert_vlan_enabled; + /* Number of PFs on the NIC */ + uint32_t enc_hw_pf_count; + /* Datapath firmware vadapter/vport/vswitch support */ + boolean_t enc_datapath_cap_evb; + /* Datapath firmware vport reconfigure support */ + boolean_t enc_vport_reconfigure_supported; + boolean_t enc_rx_disable_scatter_supported; + boolean_t enc_allow_set_mac_with_installed_filters; + boolean_t enc_enhanced_set_mac_supported; + boolean_t enc_init_evq_v2_supported; + boolean_t enc_no_cont_ev_mode_supported; + boolean_t enc_init_rxq_with_buffer_size; + boolean_t enc_rx_packed_stream_supported; + boolean_t enc_rx_var_packed_stream_supported; + boolean_t enc_rx_es_super_buffer_supported; + boolean_t enc_fw_subvariant_no_tx_csum_supported; + boolean_t enc_pm_and_rxdp_counters; + boolean_t enc_mac_stats_40g_tx_size_bins; + uint32_t enc_tunnel_encapsulations_supported; + /* + * NIC global maximum for unique UDP tunnel ports shared by all + * functions. + */ + uint32_t enc_tunnel_config_udp_entries_max; + /* External port identifier */ + uint8_t enc_external_port; + uint32_t enc_mcdi_max_payload_length; + /* VPD may be per-PF or global */ + boolean_t enc_vpd_is_global; + /* Minimum unidirectional bandwidth in Mb/s to max out all ports */ + uint32_t enc_required_pcie_bandwidth_mbps; + uint32_t enc_max_pcie_link_gen; + /* Firmware verifies integrity of NVRAM updates */ + boolean_t enc_nvram_update_verify_result_supported; + /* Firmware supports polled NVRAM updates on select partitions */ + boolean_t enc_nvram_update_poll_verify_result_supported; + /* Firmware accepts updates via the BUNDLE partition */ + boolean_t enc_nvram_bundle_update_supported; + /* Firmware support for extended MAC_STATS buffer */ + uint32_t enc_mac_stats_nstats; + boolean_t enc_fec_counters; + boolean_t enc_hlb_counters; + /* Firmware support for "FLAG" and "MARK" filter actions */ + boolean_t enc_filter_action_flag_supported; + boolean_t enc_filter_action_mark_supported; + uint32_t enc_filter_action_mark_max; + /* Port assigned to this PCI function */ + uint32_t enc_assigned_port; +} efx_nic_cfg_t; + +#define EFX_VPORT_PCI_FUNCTION_IS_PF(configp) \ + ((configp)->evc_function == 0xffff) + +#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) +#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff) + +#define EFX_PCI_FUNCTION(_encp) \ + (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf) + +#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf) + +extern const efx_nic_cfg_t * +efx_nic_cfg_get( + __in const efx_nic_t *enp); + +/* RxDPCPU firmware id values by which FW variant can be identified */ +#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0 +#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1 +#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2 +#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5 +#define EFX_RXDP_DPDK_FW_ID 0x6 + +typedef struct efx_nic_fw_info_s { + /* Basic FW version information */ + uint16_t enfi_mc_fw_version[4]; + /* + * If datapath capabilities can be detected, + * additional FW information is to be shown + */ + boolean_t enfi_dpcpu_fw_ids_valid; + /* Rx and Tx datapath CPU FW IDs */ + uint16_t enfi_rx_dpcpu_fw_id; + uint16_t enfi_tx_dpcpu_fw_id; +} efx_nic_fw_info_t; + +extern __checkReturn efx_rc_t +efx_nic_get_fw_version( + __in efx_nic_t *enp, + __out efx_nic_fw_info_t *enfip); + +/* Driver resource limits (minimum required/maximum usable). */ +typedef struct efx_drv_limits_s { + uint32_t edl_min_evq_count; + uint32_t edl_max_evq_count; + + uint32_t edl_min_rxq_count; + uint32_t edl_max_rxq_count; + + uint32_t edl_min_txq_count; + uint32_t edl_max_txq_count; + + /* PIO blocks (sub-allocated from piobuf) */ + uint32_t edl_min_pio_alloc_size; + uint32_t edl_max_pio_alloc_count; +} efx_drv_limits_t; + +extern __checkReturn efx_rc_t +efx_nic_set_drv_limits( + __inout efx_nic_t *enp, + __in efx_drv_limits_t *edlp); + +/* + * Register the OS driver version string for management agents + * (e.g. via NC-SI). The content length is provided (i.e. no + * NUL terminator). Use length 0 to indicate no version string + * should be advertised. It is valid to set the version string + * only before efx_nic_probe() is called. + */ +extern __checkReturn efx_rc_t +efx_nic_set_drv_version( + __inout efx_nic_t *enp, + __in_ecount(length) char const *verp, + __in size_t length); + +typedef enum efx_nic_region_e { + EFX_REGION_VI, /* Memory BAR UC mapping */ + EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */ +} efx_nic_region_t; + +extern __checkReturn efx_rc_t +efx_nic_get_bar_region( + __in efx_nic_t *enp, + __in efx_nic_region_t region, + __out uint32_t *offsetp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +efx_nic_get_vi_pool( + __in efx_nic_t *enp, + __out uint32_t *evq_countp, + __out uint32_t *rxq_countp, + __out uint32_t *txq_countp); + + +#if EFSYS_OPT_VPD + +typedef enum efx_vpd_tag_e { + EFX_VPD_ID = 0x02, + EFX_VPD_END = 0x0f, + EFX_VPD_RO = 0x10, + EFX_VPD_RW = 0x11, +} efx_vpd_tag_t; + +typedef uint16_t efx_vpd_keyword_t; + +typedef struct efx_vpd_value_s { + efx_vpd_tag_t evv_tag; + efx_vpd_keyword_t evv_keyword; + uint8_t evv_length; + uint8_t evv_value[0x100]; +} efx_vpd_value_t; + + +#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8)) + +extern __checkReturn efx_rc_t +efx_vpd_init( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +efx_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +efx_vpd_set( + __in efx_nic_t *enp, + __inout_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +efx_vpd_next( + __in efx_nic_t *enp, + __inout_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp); + +extern __checkReturn efx_rc_t +efx_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern void +efx_vpd_fini( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_VPD */ + +/* NVRAM */ + +#if EFSYS_OPT_NVRAM + +typedef enum efx_nvram_type_e { + EFX_NVRAM_INVALID = 0, + EFX_NVRAM_BOOTROM, + EFX_NVRAM_BOOTROM_CFG, + EFX_NVRAM_MC_FIRMWARE, + EFX_NVRAM_MC_GOLDEN, + EFX_NVRAM_PHY, + EFX_NVRAM_NULLPHY, + EFX_NVRAM_FPGA, + EFX_NVRAM_FCFW, + EFX_NVRAM_CPLD, + EFX_NVRAM_FPGA_BACKUP, + EFX_NVRAM_DYNAMIC_CFG, + EFX_NVRAM_LICENSE, + EFX_NVRAM_UEFIROM, + EFX_NVRAM_MUM_FIRMWARE, + EFX_NVRAM_DYNCONFIG_DEFAULTS, + EFX_NVRAM_ROMCONFIG_DEFAULTS, + EFX_NVRAM_BUNDLE, + EFX_NVRAM_BUNDLE_METADATA, + EFX_NVRAM_NTYPES, +} efx_nvram_type_t; + +typedef struct efx_nvram_info_s { + uint32_t eni_flags; + uint32_t eni_partn_size; + uint32_t eni_address; + uint32_t eni_erase_size; + uint32_t eni_write_size; +} efx_nvram_info_t; + +#define EFX_NVRAM_FLAG_READ_ONLY (1 << 0) + +extern __checkReturn efx_rc_t +efx_nvram_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +efx_nvram_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern __checkReturn efx_rc_t +efx_nvram_size( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +efx_nvram_info( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out efx_nvram_info_t *enip); + +extern __checkReturn efx_rc_t +efx_nvram_rw_start( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out_opt size_t *pref_chunkp); + +extern __checkReturn efx_rc_t +efx_nvram_rw_finish( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out_opt uint32_t *verify_resultp); + +extern __checkReturn efx_rc_t +efx_nvram_get_version( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]); + +extern __checkReturn efx_rc_t +efx_nvram_read_chunk( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_nvram_read_backup( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_nvram_set_version( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in_ecount(4) uint16_t version[4]); + +extern __checkReturn efx_rc_t +efx_nvram_validate( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in_bcount(partn_size) caddr_t partn_data, + __in size_t partn_size); + +extern __checkReturn efx_rc_t +efx_nvram_erase( + __in efx_nic_t *enp, + __in efx_nvram_type_t type); + +extern __checkReturn efx_rc_t +efx_nvram_write_chunk( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern void +efx_nvram_fini( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_BOOTCFG + +/* Report size and offset of bootcfg sector in NVRAM partition. */ +extern __checkReturn efx_rc_t +efx_bootcfg_sector_info( + __in efx_nic_t *enp, + __in uint32_t pf, + __out_opt uint32_t *sector_countp, + __out size_t *offsetp, + __out size_t *max_sizep); + +/* + * Copy bootcfg sector data to a target buffer which may differ in size. + * Optionally corrects format errors in source buffer. + */ +extern efx_rc_t +efx_bootcfg_copy_sector( + __in efx_nic_t *enp, + __inout_bcount(sector_length) + uint8_t *sector, + __in size_t sector_length, + __out_bcount(data_size) uint8_t *data, + __in size_t data_size, + __in boolean_t handle_format_errors); + +extern efx_rc_t +efx_bootcfg_read( + __in efx_nic_t *enp, + __out_bcount(size) uint8_t *data, + __in size_t size); + +extern efx_rc_t +efx_bootcfg_write( + __in efx_nic_t *enp, + __in_bcount(size) uint8_t *data, + __in size_t size); + + +/* + * Processing routines for buffers arranged in the DHCP/BOOTP option format + * (see https://tools.ietf.org/html/rfc1533) + * + * Summarising the format: the buffer is a sequence of options. All options + * begin with a tag octet, which uniquely identifies the option. Fixed- + * length options without data consist of only a tag octet. Only options PAD + * (0) and END (255) are fixed length. All other options are variable-length + * with a length octet following the tag octet. The value of the length + * octet does not include the two octets specifying the tag and length. The + * length octet is followed by "length" octets of data. + * + * Option data may be a sequence of sub-options in the same format. The data + * content of the encapsulating option is one or more encapsulated sub-options, + * with no terminating END tag is required. + * + * To be valid, the top-level sequence of options should be terminated by an + * END tag. The buffer should be padded with the PAD byte. + * + * When stored to NVRAM, the DHCP option format buffer is preceded by a + * checksum octet. The full buffer (including after the END tag) contributes + * to the checksum, hence the need to fill the buffer to the end with PAD. + */ + +#define EFX_DHCP_END ((uint8_t)0xff) +#define EFX_DHCP_PAD ((uint8_t)0) + +#define EFX_DHCP_ENCAP_OPT(encapsulator, encapsulated) \ + (uint16_t)(((encapsulator) << 8) | (encapsulated)) + +extern __checkReturn uint8_t +efx_dhcp_csum( + __in_bcount(size) uint8_t const *data, + __in size_t size); + +extern __checkReturn efx_rc_t +efx_dhcp_verify( + __in_bcount(size) uint8_t const *data, + __in size_t size, + __out_opt size_t *usedp); + +extern __checkReturn efx_rc_t +efx_dhcp_find_tag( + __in_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __deref_out uint8_t **valuepp, + __out size_t *value_lengthp); + +extern __checkReturn efx_rc_t +efx_dhcp_find_end( + __in_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __deref_out uint8_t **endpp); + + +extern __checkReturn efx_rc_t +efx_dhcp_delete_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt); + +extern __checkReturn efx_rc_t +efx_dhcp_add_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __in_bcount_opt(value_length) uint8_t *valuep, + __in size_t value_length); + +extern __checkReturn efx_rc_t +efx_dhcp_update_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __in uint8_t *value_locationp, + __in_bcount_opt(value_length) uint8_t *valuep, + __in size_t value_length); + + +#endif /* EFSYS_OPT_BOOTCFG */ + +#if EFSYS_OPT_IMAGE_LAYOUT + +#include "ef10_signed_image_layout.h" + +/* + * Image header used in unsigned and signed image layouts (see SF-102785-PS). + * + * NOTE: + * The image header format is extensible. However, older drivers require an + * exact match of image header version and header length when validating and + * writing firmware images. + * + * To avoid breaking backward compatibility, we use the upper bits of the + * controller version fields to contain an extra version number used for + * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM + * version). See bug39254 and SF-102785-PS for details. + */ +typedef struct efx_image_header_s { + uint32_t eih_magic; + uint32_t eih_version; + uint32_t eih_type; + uint32_t eih_subtype; + uint32_t eih_code_size; + uint32_t eih_size; + union { + uint32_t eih_controller_version_min; + struct { + uint16_t eih_controller_version_min_short; + uint8_t eih_extra_version_a; + uint8_t eih_extra_version_b; + }; + }; + union { + uint32_t eih_controller_version_max; + struct { + uint16_t eih_controller_version_max_short; + uint8_t eih_extra_version_c; + uint8_t eih_extra_version_d; + }; + }; + uint16_t eih_code_version_a; + uint16_t eih_code_version_b; + uint16_t eih_code_version_c; + uint16_t eih_code_version_d; +} efx_image_header_t; + +#define EFX_IMAGE_HEADER_SIZE (40) +#define EFX_IMAGE_HEADER_VERSION (4) +#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5) + + +typedef struct efx_image_trailer_s { + uint32_t eit_crc; +} efx_image_trailer_t; + +#define EFX_IMAGE_TRAILER_SIZE (4) + +typedef enum efx_image_format_e { + EFX_IMAGE_FORMAT_NO_IMAGE, + EFX_IMAGE_FORMAT_INVALID, + EFX_IMAGE_FORMAT_UNSIGNED, + EFX_IMAGE_FORMAT_SIGNED, + EFX_IMAGE_FORMAT_SIGNED_PACKAGE +} efx_image_format_t; + +typedef struct efx_image_info_s { + efx_image_format_t eii_format; + uint8_t * eii_imagep; + size_t eii_image_size; + efx_image_header_t * eii_headerp; +} efx_image_info_t; + +extern __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop); + +extern __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out_bcount(buffer_size) + uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp); + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + +#if EFSYS_OPT_DIAG + +typedef enum efx_pattern_type_t { + EFX_PATTERN_BYTE_INCREMENT = 0, + EFX_PATTERN_ALL_THE_SAME, + EFX_PATTERN_BIT_ALTERNATE, + EFX_PATTERN_BYTE_ALTERNATE, + EFX_PATTERN_BYTE_CHANGING, + EFX_PATTERN_BIT_SWEEP, + EFX_PATTERN_NTYPES +} efx_pattern_type_t; + +typedef void +(*efx_sram_pattern_fn_t)( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp); + +extern __checkReturn efx_rc_t +efx_sram_test( + __in efx_nic_t *enp, + __in efx_pattern_type_t type); + +#endif /* EFSYS_OPT_DIAG */ + +extern __checkReturn efx_rc_t +efx_sram_buf_tbl_set( + __in efx_nic_t *enp, + __in uint32_t id, + __in efsys_mem_t *esmp, + __in size_t n); + +extern void +efx_sram_buf_tbl_clear( + __in efx_nic_t *enp, + __in uint32_t id, + __in size_t n); + +#define EFX_BUF_TBL_SIZE 0x20000 + +#define EFX_BUF_SIZE 4096 + +/* EV */ + +typedef struct efx_evq_s efx_evq_t; + +#if EFSYS_OPT_QSTATS + +/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 0a147ace40844969 */ +typedef enum efx_ev_qstat_e { + EV_ALL, + EV_RX, + EV_RX_OK, + EV_RX_FRM_TRUNC, + EV_RX_TOBE_DISC, + EV_RX_PAUSE_FRM_ERR, + EV_RX_BUF_OWNER_ID_ERR, + EV_RX_IPV4_HDR_CHKSUM_ERR, + EV_RX_TCP_UDP_CHKSUM_ERR, + EV_RX_ETH_CRC_ERR, + EV_RX_IP_FRAG_ERR, + EV_RX_MCAST_PKT, + EV_RX_MCAST_HASH_MATCH, + EV_RX_TCP_IPV4, + EV_RX_TCP_IPV6, + EV_RX_UDP_IPV4, + EV_RX_UDP_IPV6, + EV_RX_OTHER_IPV4, + EV_RX_OTHER_IPV6, + EV_RX_NON_IP, + EV_RX_BATCH, + EV_TX, + EV_TX_WQ_FF_FULL, + EV_TX_PKT_ERR, + EV_TX_PKT_TOO_BIG, + EV_TX_UNEXPECTED, + EV_GLOBAL, + EV_GLOBAL_MNT, + EV_DRIVER, + EV_DRIVER_SRM_UPD_DONE, + EV_DRIVER_TX_DESCQ_FLS_DONE, + EV_DRIVER_RX_DESCQ_FLS_DONE, + EV_DRIVER_RX_DESCQ_FLS_FAILED, + EV_DRIVER_RX_DSC_ERROR, + EV_DRIVER_TX_DSC_ERROR, + EV_DRV_GEN, + EV_MCDI_RESPONSE, + EV_RX_PARSE_INCOMPLETE, + EV_NQSTATS +} efx_ev_qstat_t; + +/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */ + +#endif /* EFSYS_OPT_QSTATS */ + +extern __checkReturn efx_rc_t +efx_ev_init( + __in efx_nic_t *enp); + +extern void +efx_ev_fini( + __in efx_nic_t *enp); + +extern __checkReturn size_t +efx_evq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +extern __checkReturn unsigned int +efx_evq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +#define EFX_EVQ_FLAGS_TYPE_MASK (0x3) +#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0) +#define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1) +#define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2) + +#define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC) +#define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */ +#define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */ + +/* + * Use the NO_CONT_EV RX event format, which allows the firmware to operate more + * efficiently at high data rates. See SF-109306-TC 5.11 "Events for RXQs in + * NO_CONT_EV mode". + * + * NO_CONT_EV requires EVQ_RX_MERGE and RXQ_FORCED_EV_MERGING to both be set, + * which is the case when an event queue is set to THROUGHPUT mode. + */ +#define EFX_EVQ_FLAGS_NO_CONT_EV (0x10) + +extern __checkReturn efx_rc_t +efx_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __deref_out efx_evq_t **eepp); + +extern void +efx_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data); + +typedef __checkReturn boolean_t +(*efx_initialized_ev_t)( + __in_opt void *arg); + +#define EFX_PKT_UNICAST 0x0004 +#define EFX_PKT_START 0x0008 + +#define EFX_PKT_VLAN_TAGGED 0x0010 +#define EFX_CKSUM_TCPUDP 0x0020 +#define EFX_CKSUM_IPV4 0x0040 +#define EFX_PKT_CONT 0x0080 + +#define EFX_CHECK_VLAN 0x0100 +#define EFX_PKT_TCP 0x0200 +#define EFX_PKT_UDP 0x0400 +#define EFX_PKT_IPV4 0x0800 + +#define EFX_PKT_IPV6 0x1000 +#define EFX_PKT_PREFIX_LEN 0x2000 +#define EFX_ADDR_MISMATCH 0x4000 +#define EFX_DISCARD 0x8000 + +/* + * The following flags are used only for packed stream + * mode. The values for the flags are reused to fit into 16 bit, + * since EFX_PKT_START and EFX_PKT_CONT are never used in + * packed stream mode + */ +#define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START +#define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT + + +#define EFX_EV_RX_NLABELS 32 +#define EFX_EV_TX_NLABELS 32 + +typedef __checkReturn boolean_t +(*efx_rx_ev_t)( + __in_opt void *arg, + __in uint32_t label, + __in uint32_t id, + __in uint32_t size, + __in uint16_t flags); + +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* + * Packed stream mode is documented in SF-112241-TC. + * The general idea is that, instead of putting each incoming + * packet into a separate buffer which is specified in a RX + * descriptor, a large buffer is provided to the hardware and + * packets are put there in a continuous stream. + * The main advantage of such an approach is that RX queue refilling + * happens much less frequently. + * + * Equal stride packed stream mode is documented in SF-119419-TC. + * The general idea is to utilize advantages of the packed stream, + * but avoid indirection in packets representation. + * The main advantage of such an approach is that RX queue refilling + * happens much less frequently and packets buffers are independent + * from upper layers point of view. + */ + +typedef __checkReturn boolean_t +(*efx_rx_ps_ev_t)( + __in_opt void *arg, + __in uint32_t label, + __in uint32_t id, + __in uint32_t pkt_count, + __in uint16_t flags); + +#endif + +typedef __checkReturn boolean_t +(*efx_tx_ev_t)( + __in_opt void *arg, + __in uint32_t label, + __in uint32_t id); + +#define EFX_EXCEPTION_RX_RECOVERY 0x00000001 +#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002 +#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003 +#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004 +#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005 +#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006 +#define EFX_EXCEPTION_RX_ERROR 0x00000007 +#define EFX_EXCEPTION_TX_ERROR 0x00000008 +#define EFX_EXCEPTION_EV_ERROR 0x00000009 + +typedef __checkReturn boolean_t +(*efx_exception_ev_t)( + __in_opt void *arg, + __in uint32_t label, + __in uint32_t data); + +typedef __checkReturn boolean_t +(*efx_rxq_flush_done_ev_t)( + __in_opt void *arg, + __in uint32_t rxq_index); + +typedef __checkReturn boolean_t +(*efx_rxq_flush_failed_ev_t)( + __in_opt void *arg, + __in uint32_t rxq_index); + +typedef __checkReturn boolean_t +(*efx_txq_flush_done_ev_t)( + __in_opt void *arg, + __in uint32_t txq_index); + +typedef __checkReturn boolean_t +(*efx_software_ev_t)( + __in_opt void *arg, + __in uint16_t magic); + +typedef __checkReturn boolean_t +(*efx_sram_ev_t)( + __in_opt void *arg, + __in uint32_t code); + +#define EFX_SRAM_CLEAR 0 +#define EFX_SRAM_UPDATE 1 +#define EFX_SRAM_ILLEGAL_CLEAR 2 + +typedef __checkReturn boolean_t +(*efx_wake_up_ev_t)( + __in_opt void *arg, + __in uint32_t label); + +typedef __checkReturn boolean_t +(*efx_timer_ev_t)( + __in_opt void *arg, + __in uint32_t label); + +typedef __checkReturn boolean_t +(*efx_link_change_ev_t)( + __in_opt void *arg, + __in efx_link_mode_t link_mode); + +#if EFSYS_OPT_MON_STATS + +typedef __checkReturn boolean_t +(*efx_monitor_ev_t)( + __in_opt void *arg, + __in efx_mon_stat_t id, + __in efx_mon_stat_value_t value); + +#endif /* EFSYS_OPT_MON_STATS */ + +#if EFSYS_OPT_MAC_STATS + +typedef __checkReturn boolean_t +(*efx_mac_stats_ev_t)( + __in_opt void *arg, + __in uint32_t generation); + +#endif /* EFSYS_OPT_MAC_STATS */ + +typedef struct efx_ev_callbacks_s { + efx_initialized_ev_t eec_initialized; + efx_rx_ev_t eec_rx; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + efx_rx_ps_ev_t eec_rx_ps; +#endif + efx_tx_ev_t eec_tx; + efx_exception_ev_t eec_exception; + efx_rxq_flush_done_ev_t eec_rxq_flush_done; + efx_rxq_flush_failed_ev_t eec_rxq_flush_failed; + efx_txq_flush_done_ev_t eec_txq_flush_done; + efx_software_ev_t eec_software; + efx_sram_ev_t eec_sram; + efx_wake_up_ev_t eec_wake_up; + efx_timer_ev_t eec_timer; + efx_link_change_ev_t eec_link_change; +#if EFSYS_OPT_MON_STATS + efx_monitor_ev_t eec_monitor; +#endif /* EFSYS_OPT_MON_STATS */ +#if EFSYS_OPT_MAC_STATS + efx_mac_stats_ev_t eec_mac_stats; +#endif /* EFSYS_OPT_MAC_STATS */ +} efx_ev_callbacks_t; + +extern __checkReturn boolean_t +efx_ev_qpending( + __in efx_evq_t *eep, + __in unsigned int count); + +#if EFSYS_OPT_EV_PREFETCH + +extern void +efx_ev_qprefetch( + __in efx_evq_t *eep, + __in unsigned int count); + +#endif /* EFSYS_OPT_EV_PREFETCH */ + +extern void +efx_ev_qpoll( + __in efx_evq_t *eep, + __inout unsigned int *countp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg); + +extern __checkReturn efx_rc_t +efx_ev_usecs_to_ticks( + __in efx_nic_t *enp, + __in unsigned int usecs, + __out unsigned int *ticksp); + +extern __checkReturn efx_rc_t +efx_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us); + +extern __checkReturn efx_rc_t +efx_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count); + +#if EFSYS_OPT_QSTATS + +#if EFSYS_OPT_NAMES + +extern const char * +efx_ev_qstat_name( + __in efx_nic_t *enp, + __in unsigned int id); + +#endif /* EFSYS_OPT_NAMES */ + +extern void +efx_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); + +#endif /* EFSYS_OPT_QSTATS */ + +extern void +efx_ev_qdestroy( + __in efx_evq_t *eep); + +/* RX */ + +extern __checkReturn efx_rc_t +efx_rx_init( + __inout efx_nic_t *enp); + +extern void +efx_rx_fini( + __in efx_nic_t *enp); + +#if EFSYS_OPT_RX_SCATTER + __checkReturn efx_rc_t +efx_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size); +#endif /* EFSYS_OPT_RX_SCATTER */ + +/* Handle to represent use of the default RSS context. */ +#define EFX_RSS_CONTEXT_DEFAULT 0xffffffff + +#if EFSYS_OPT_RX_SCALE + +typedef enum efx_rx_hash_alg_e { + EFX_RX_HASHALG_LFSR = 0, + EFX_RX_HASHALG_TOEPLITZ, + EFX_RX_HASHALG_PACKED_STREAM, + EFX_RX_NHASHALGS +} efx_rx_hash_alg_t; + +/* + * Legacy hash type flags. + * + * They represent standard tuples for distinct traffic classes. + */ +#define EFX_RX_HASH_IPV4 (1U << 0) +#define EFX_RX_HASH_TCPIPV4 (1U << 1) +#define EFX_RX_HASH_IPV6 (1U << 2) +#define EFX_RX_HASH_TCPIPV6 (1U << 3) + +#define EFX_RX_HASH_LEGACY_MASK \ + (EFX_RX_HASH_IPV4 | \ + EFX_RX_HASH_TCPIPV4 | \ + EFX_RX_HASH_IPV6 | \ + EFX_RX_HASH_TCPIPV6) + +/* + * The type of the argument used by efx_rx_scale_mode_set() to + * provide a means for the client drivers to configure hashing. + * + * A properly constructed value can either be: + * - a combination of legacy flags + * - a combination of EFX_RX_HASH() flags + */ +typedef uint32_t efx_rx_hash_type_t; + +typedef enum efx_rx_hash_support_e { + EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */ + EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */ +} efx_rx_hash_support_t; + +#define EFX_RSS_KEY_SIZE 40 /* RSS key size (bytes) */ +#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */ +#define EFX_MAXRSS 64 /* RX indirection entry range */ +#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */ + +typedef enum efx_rx_scale_context_type_e { + EFX_RX_SCALE_UNAVAILABLE = 0, /* No RX scale context */ + EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */ + EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ +} efx_rx_scale_context_type_t; + +/* + * Traffic classes eligible for hash computation. + * + * Select packet headers used in computing the receive hash. + * This uses the same encoding as the RSS_MODES field of + * MC_CMD_RSS_CONTEXT_SET_FLAGS. + */ +#define EFX_RX_CLASS_IPV4_TCP_LBN 8 +#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_UDP_LBN 12 +#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_LBN 16 +#define EFX_RX_CLASS_IPV4_WIDTH 4 +#define EFX_RX_CLASS_IPV6_TCP_LBN 20 +#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_UDP_LBN 24 +#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_LBN 28 +#define EFX_RX_CLASS_IPV6_WIDTH 4 + +#define EFX_RX_NCLASSES 6 + +/* + * Ancillary flags used to construct generic hash tuples. + * This uses the same encoding as RSS_MODE_HASH_SELECTOR. + */ +#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0) +#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1) +#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2) +#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3) + +/* + * Generic hash tuples. + * + * They express combinations of packet fields + * which can contribute to the hash value for + * a particular traffic class. + */ +#define EFX_RX_CLASS_HASH_DISABLE 0 + +#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR +#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR + +#define EFX_RX_CLASS_HASH_2TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR) + +#define EFX_RX_CLASS_HASH_2TUPLE_SRC \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT) + +#define EFX_RX_CLASS_HASH_2TUPLE_DST \ + (EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_4TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_NTUPLES 7 + +/* + * Hash flag constructor. + * + * Resulting flags encode hash tuples for specific traffic classes. + * The client drivers are encouraged to use these flags to form + * a hash type value. + */ +#define EFX_RX_HASH(_class, _tuple) \ + EFX_INSERT_FIELD_NATIVE32(0, 31, \ + EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple) + +/* + * The maximum number of EFX_RX_HASH() flags. + */ +#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES) + +extern __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp, + __in unsigned int max_nflags, + __out unsigned int *nflagsp); + +extern __checkReturn efx_rc_t +efx_rx_hash_default_support_get( + __in efx_nic_t *enp, + __out efx_rx_hash_support_t *supportp); + + +extern __checkReturn efx_rc_t +efx_rx_scale_default_support_get( + __in efx_nic_t *enp, + __out efx_rx_scale_context_type_t *typep); + +extern __checkReturn efx_rc_t +efx_rx_scale_context_alloc( + __in efx_nic_t *enp, + __in efx_rx_scale_context_type_t type, + __in uint32_t num_queues, + __out uint32_t *rss_contextp); + +extern __checkReturn efx_rc_t +efx_rx_scale_context_free( + __in efx_nic_t *enp, + __in uint32_t rss_context); + +extern __checkReturn efx_rc_t +efx_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert); + +extern __checkReturn efx_rc_t +efx_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n); + +extern __checkReturn efx_rc_t +efx_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n); + +extern __checkReturn uint32_t +efx_pseudo_hdr_hash_get( + __in efx_rxq_t *erp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer); + +#endif /* EFSYS_OPT_RX_SCALE */ + +extern __checkReturn efx_rc_t +efx_pseudo_hdr_pkt_length_get( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __out uint16_t *pkt_lengthp); + +extern __checkReturn size_t +efx_rxq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +extern __checkReturn unsigned int +efx_rxq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16) + +typedef enum efx_rxq_type_e { + EFX_RXQ_TYPE_DEFAULT, + EFX_RXQ_TYPE_PACKED_STREAM, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, + EFX_RXQ_NTYPES +} efx_rxq_type_t; + +/* + * Dummy flag to be used instead of 0 to make it clear that the argument + * is receive queue flags. + */ +#define EFX_RXQ_FLAG_NONE 0x0 +#define EFX_RXQ_FLAG_SCATTER 0x1 +/* + * If tunnels are supported and Rx event can provide information about + * either outer or inner packet classes (e.g. SFN8xxx adapters with + * full-feature firmware variant running), outer classes are requested by + * default. However, if the driver supports tunnels, the flag allows to + * request inner classes which are required to be able to interpret inner + * Rx checksum offload results. + */ +#define EFX_RXQ_FLAG_INNER_CLASSES 0x2 + +extern __checkReturn efx_rc_t +efx_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in size_t buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#if EFSYS_OPT_RX_PACKED_STREAM + +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M (1U * 1024 * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K (512U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K (256U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K (128U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K (64U * 1024) + +extern __checkReturn efx_rc_t +efx_rx_qcreate_packed_stream( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t ps_buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#endif + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* Maximum head-of-line block timeout in nanoseconds */ +#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000) + +extern __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#endif + +typedef struct efx_buffer_s { + efsys_dma_addr_t eb_addr; + size_t eb_size; + boolean_t eb_eop; +} efx_buffer_t; + +typedef struct efx_desc_s { + efx_qword_t ed_eq; +} efx_desc_t; + +extern void +efx_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); + +extern void +efx_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp); + +#if EFSYS_OPT_RX_PACKED_STREAM + +extern void +efx_rx_qpush_ps_credits( + __in efx_rxq_t *erp); + +extern __checkReturn uint8_t * +efx_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp); +#endif + +extern __checkReturn efx_rc_t +efx_rx_qflush( + __in efx_rxq_t *erp); + +extern void +efx_rx_qenable( + __in efx_rxq_t *erp); + +extern void +efx_rx_qdestroy( + __in efx_rxq_t *erp); + +/* TX */ + +typedef struct efx_txq_s efx_txq_t; + +#if EFSYS_OPT_QSTATS + +/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */ +typedef enum efx_tx_qstat_e { + TX_POST, + TX_POST_PIO, + TX_NQSTATS +} efx_tx_qstat_t; + +/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */ + +#endif /* EFSYS_OPT_QSTATS */ + +extern __checkReturn efx_rc_t +efx_tx_init( + __in efx_nic_t *enp); + +extern void +efx_tx_fini( + __in efx_nic_t *enp); + +extern __checkReturn size_t +efx_txq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +extern __checkReturn unsigned int +efx_txq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs); + +#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16) + +#define EFX_TXQ_CKSUM_IPV4 0x0001 +#define EFX_TXQ_CKSUM_TCPUDP 0x0002 +#define EFX_TXQ_FATSOV2 0x0004 +#define EFX_TXQ_CKSUM_INNER_IPV4 0x0008 +#define EFX_TXQ_CKSUM_INNER_TCPUDP 0x0010 + +extern __checkReturn efx_rc_t +efx_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t n, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __deref_out efx_txq_t **etpp, + __out unsigned int *addedp); + +extern __checkReturn efx_rc_t +efx_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern __checkReturn efx_rc_t +efx_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns); + +extern void +efx_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed); + +extern __checkReturn efx_rc_t +efx_tx_qflush( + __in efx_txq_t *etp); + +extern void +efx_tx_qenable( + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +efx_tx_qpio_enable( + __in efx_txq_t *etp); + +extern void +efx_tx_qpio_disable( + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +efx_tx_qpio_write( + __in efx_txq_t *etp, + __in_ecount(buf_length) uint8_t *buffer, + __in size_t buf_length, + __in size_t pio_buf_offset); + +extern __checkReturn efx_rc_t +efx_tx_qpio_post( + __in efx_txq_t *etp, + __in size_t pkt_length, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern __checkReturn efx_rc_t +efx_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(n) efx_desc_t *ed, + __in unsigned int n, + __in unsigned int completed, + __inout unsigned int *addedp); + +extern void +efx_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp); + +extern void +efx_tx_qdesc_tso_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint32_t tcp_seq, + __in uint8_t tcp_flags, + __out efx_desc_t *edp); + +/* Number of FATSOv2 option descriptors */ +#define EFX_TX_FATSOV2_OPT_NDESCS 2 + +/* Maximum number of DMA segments per TSO packet (not superframe) */ +#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24 + +extern void +efx_tx_qdesc_tso2_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, + __in uint32_t tcp_seq, + __in uint16_t tcp_mss, + __out_ecount(count) efx_desc_t *edp, + __in int count); + +extern void +efx_tx_qdesc_vlantci_create( + __in efx_txq_t *etp, + __in uint16_t tci, + __out efx_desc_t *edp); + +extern void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); + +#if EFSYS_OPT_QSTATS + +#if EFSYS_OPT_NAMES + +extern const char * +efx_tx_qstat_name( + __in efx_nic_t *etp, + __in unsigned int id); + +#endif /* EFSYS_OPT_NAMES */ + +extern void +efx_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); + +#endif /* EFSYS_OPT_QSTATS */ + +extern void +efx_tx_qdestroy( + __in efx_txq_t *etp); + + +/* FILTER */ + +#if EFSYS_OPT_FILTER + +#define EFX_ETHER_TYPE_IPV4 0x0800 +#define EFX_ETHER_TYPE_IPV6 0x86DD + +#define EFX_IPPROTO_TCP 6 +#define EFX_IPPROTO_UDP 17 +#define EFX_IPPROTO_GRE 47 + +/* Use RSS to spread across multiple queues */ +#define EFX_FILTER_FLAG_RX_RSS 0x01 +/* Enable RX scatter */ +#define EFX_FILTER_FLAG_RX_SCATTER 0x02 +/* + * Override an automatic filter (priority EFX_FILTER_PRI_AUTO). + * May only be set by the filter implementation for each type. + * A removal request will restore the automatic filter in its place. + */ +#define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04 +/* Filter is for RX */ +#define EFX_FILTER_FLAG_RX 0x08 +/* Filter is for TX */ +#define EFX_FILTER_FLAG_TX 0x10 +/* Set match flag on the received packet */ +#define EFX_FILTER_FLAG_ACTION_FLAG 0x20 +/* Set match mark on the received packet */ +#define EFX_FILTER_FLAG_ACTION_MARK 0x40 + +typedef uint8_t efx_filter_flags_t; + +/* + * Flags which specify the fields to match on. The values are the same as in the + * MC_CMD_FILTER_OP/MC_CMD_FILTER_OP_EXT commands. + */ + +/* Match by remote IP host address */ +#define EFX_FILTER_MATCH_REM_HOST 0x00000001 +/* Match by local IP host address */ +#define EFX_FILTER_MATCH_LOC_HOST 0x00000002 +/* Match by remote MAC address */ +#define EFX_FILTER_MATCH_REM_MAC 0x00000004 +/* Match by remote TCP/UDP port */ +#define EFX_FILTER_MATCH_REM_PORT 0x00000008 +/* Match by remote TCP/UDP port */ +#define EFX_FILTER_MATCH_LOC_MAC 0x00000010 +/* Match by local TCP/UDP port */ +#define EFX_FILTER_MATCH_LOC_PORT 0x00000020 +/* Match by Ether-type */ +#define EFX_FILTER_MATCH_ETHER_TYPE 0x00000040 +/* Match by inner VLAN ID */ +#define EFX_FILTER_MATCH_INNER_VID 0x00000080 +/* Match by outer VLAN ID */ +#define EFX_FILTER_MATCH_OUTER_VID 0x00000100 +/* Match by IP transport protocol */ +#define EFX_FILTER_MATCH_IP_PROTO 0x00000200 +/* Match by VNI or VSID */ +#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800 +/* For encapsulated packets, match by inner frame local MAC address */ +#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000 +/* For encapsulated packets, match all multicast inner frames */ +#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000 +/* For encapsulated packets, match all unicast inner frames */ +#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000 +/* + * Match by encap type, this flag does not correspond to + * the MCDI match flags and any unoccupied value may be used + */ +#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000 +/* Match otherwise-unmatched multicast and broadcast packets */ +#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000 +/* Match otherwise-unmatched unicast packets */ +#define EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 0x80000000 + +typedef uint32_t efx_filter_match_flags_t; + +/* Filter priority from lowest to highest */ +typedef enum efx_filter_priority_s { + EFX_FILTER_PRI_AUTO = 0, /* Automatic filter based on device + * address list or hardware + * requirements. This may only be used + * by the filter implementation for + * each NIC type. */ + EFX_FILTER_PRI_MANUAL, /* Manually configured filter */ + EFX_FILTER_NPRI, +} efx_filter_priority_t; + +/* + * FIXME: All these fields are assumed to be in little-endian byte order. + * It may be better for some to be big-endian. See bug42804. + */ + +typedef struct efx_filter_spec_s { + efx_filter_match_flags_t efs_match_flags; + uint8_t efs_priority; + efx_filter_flags_t efs_flags; + uint16_t efs_dmaq_id; + uint32_t efs_rss_context; + uint32_t efs_mark; + /* + * Saved lower-priority filter. If it is set, it is restored on + * filter delete operation. + */ + struct efx_filter_spec_s *efs_overridden_spec; + /* Fields below here are hashed for software filter lookup */ + uint16_t efs_outer_vid; + uint16_t efs_inner_vid; + uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN]; + uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN]; + uint16_t efs_ether_type; + uint8_t efs_ip_proto; + efx_tunnel_protocol_t efs_encap_type; + uint16_t efs_loc_port; + uint16_t efs_rem_port; + efx_oword_t efs_rem_host; + efx_oword_t efs_loc_host; + uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN]; + uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN]; +} efx_filter_spec_t; + + +/* Default values for use in filter specifications */ +#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff +#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff + +extern __checkReturn efx_rc_t +efx_filter_init( + __in efx_nic_t *enp); + +extern void +efx_filter_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_filter_insert( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec); + +extern __checkReturn efx_rc_t +efx_filter_remove( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec); + +extern __checkReturn efx_rc_t +efx_filter_restore( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp); + +extern void +efx_filter_spec_init_rx( + __out efx_filter_spec_t *spec, + __in efx_filter_priority_t priority, + __in efx_filter_flags_t flags, + __in efx_rxq_t *erp); + +extern void +efx_filter_spec_init_tx( + __out efx_filter_spec_t *spec, + __in efx_txq_t *etp); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_ipv4_local( + __inout efx_filter_spec_t *spec, + __in uint8_t proto, + __in uint32_t host, + __in uint16_t port); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_ipv4_full( + __inout efx_filter_spec_t *spec, + __in uint8_t proto, + __in uint32_t lhost, + __in uint16_t lport, + __in uint32_t rhost, + __in uint16_t rport); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_eth_local( + __inout efx_filter_spec_t *spec, + __in uint16_t vid, + __in const uint8_t *addr); + +extern void +efx_filter_spec_set_ether_type( + __inout efx_filter_spec_t *spec, + __in uint16_t ether_type); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_uc_def( + __inout efx_filter_spec_t *spec); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_mc_def( + __inout efx_filter_spec_t *spec); + +typedef enum efx_filter_inner_frame_match_e { + EFX_FILTER_INNER_FRAME_MATCH_OTHER = 0, + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST, + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST +} efx_filter_inner_frame_match_t; + +extern __checkReturn efx_rc_t +efx_filter_spec_set_encap_type( + __inout efx_filter_spec_t *spec, + __in efx_tunnel_protocol_t encap_type, + __in efx_filter_inner_frame_match_t inner_frame_match); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_vxlan( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vni, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_geneve( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vni, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr); + +extern __checkReturn efx_rc_t +efx_filter_spec_set_nvgre( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vsid, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr); + +#if EFSYS_OPT_RX_SCALE +extern __checkReturn efx_rc_t +efx_filter_spec_set_rss_context( + __inout efx_filter_spec_t *spec, + __in uint32_t rss_context); +#endif +#endif /* EFSYS_OPT_FILTER */ + +/* HASH */ + +extern __checkReturn uint32_t +efx_hash_dwords( + __in_ecount(count) uint32_t const *input, + __in size_t count, + __in uint32_t init); + +extern __checkReturn uint32_t +efx_hash_bytes( + __in_ecount(length) uint8_t const *input, + __in size_t length, + __in uint32_t init); + +#if EFSYS_OPT_LICENSING + +/* LICENSING */ + +typedef struct efx_key_stats_s { + uint32_t eks_valid; + uint32_t eks_invalid; + uint32_t eks_blacklisted; + uint32_t eks_unverifiable; + uint32_t eks_wrong_node; + uint32_t eks_licensed_apps_lo; + uint32_t eks_licensed_apps_hi; + uint32_t eks_licensed_features_lo; + uint32_t eks_licensed_features_hi; +} efx_key_stats_t; + +extern __checkReturn efx_rc_t +efx_lic_init( + __in efx_nic_t *enp); + +extern void +efx_lic_fini( + __in efx_nic_t *enp); + +extern __checkReturn boolean_t +efx_lic_check_support( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_lic_update_licenses( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_lic_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *ksp); + +extern __checkReturn efx_rc_t +efx_lic_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp); + +extern __checkReturn efx_rc_t +efx_lic_get_id( + __in efx_nic_t *enp, + __in size_t buffer_size, + __out uint32_t *typep, + __out size_t *lengthp, + __out_opt uint8_t *bufferp); + + +extern __checkReturn efx_rc_t +efx_lic_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp); + +extern __checkReturn efx_rc_t +efx_lic_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp); + +extern __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp); + +extern __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length); + +extern __checkReturn efx_rc_t +efx_lic_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp); + +extern __checkReturn efx_rc_t +efx_lic_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp); + + __checkReturn efx_rc_t +efx_lic_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap); + +extern __checkReturn efx_rc_t +efx_lic_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +extern __checkReturn efx_rc_t +efx_lic_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +#endif /* EFSYS_OPT_LICENSING */ + +/* TUNNEL */ + +#if EFSYS_OPT_TUNNEL + +extern __checkReturn efx_rc_t +efx_tunnel_init( + __in efx_nic_t *enp); + +extern void +efx_tunnel_fini( + __in efx_nic_t *enp); + +/* + * For overlay network encapsulation using UDP, the firmware needs to know + * the configured UDP port for the overlay so it can decode encapsulated + * frames correctly. + * The UDP port/protocol list is global. + */ + +extern __checkReturn efx_rc_t +efx_tunnel_config_udp_add( + __in efx_nic_t *enp, + __in uint16_t port /* host/cpu-endian */, + __in efx_tunnel_protocol_t protocol); + +extern __checkReturn efx_rc_t +efx_tunnel_config_udp_remove( + __in efx_nic_t *enp, + __in uint16_t port /* host/cpu-endian */, + __in efx_tunnel_protocol_t protocol); + +extern void +efx_tunnel_config_clear( + __in efx_nic_t *enp); + +/** + * Apply tunnel UDP ports configuration to hardware. + * + * EAGAIN is returned if hardware will be reset (datapath and managment CPU + * reboot). + */ +extern __checkReturn efx_rc_t +efx_tunnel_reconfigure( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_TUNNEL */ + +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + +/** + * Firmware subvariant choice options. + * + * It may be switched to no Tx checksum if attached drivers are either + * preboot or firmware subvariant aware and no VIS are allocated. + * If may be always switched to default explicitly using set request or + * implicitly if unaware driver is attaching. If switching is done when + * a driver is attached, it gets MC_REBOOT event and should recreate its + * datapath. + * + * See SF-119419-TC DPDK Firmware Driver Interface and + * SF-109306-TC EF10 for Driver Writers for details. + */ +typedef enum efx_nic_fw_subvariant_e { + EFX_NIC_FW_SUBVARIANT_DEFAULT = 0, + EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1, + EFX_NIC_FW_SUBVARIANT_NTYPES +} efx_nic_fw_subvariant_t; + +extern __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp); + +extern __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + +typedef enum efx_phy_fec_type_e { + EFX_PHY_FEC_NONE = 0, + EFX_PHY_FEC_BASER, + EFX_PHY_FEC_RS +} efx_phy_fec_type_t; + +extern __checkReturn efx_rc_t +efx_phy_fec_type_get( + __in efx_nic_t *enp, + __out efx_phy_fec_type_t *typep); + +typedef struct efx_phy_link_state_s { + uint32_t epls_adv_cap_mask; + uint32_t epls_lp_cap_mask; + uint32_t epls_ld_cap_mask; + unsigned int epls_fcntl; + efx_phy_fec_type_t epls_fec; + efx_link_mode_t epls_link_mode; +} efx_phy_link_state_t; + +extern __checkReturn efx_rc_t +efx_phy_link_state_get( + __in efx_nic_t *enp, + __out efx_phy_link_state_t *eplsp); + + +#if EFSYS_OPT_EVB + +typedef uint32_t efx_vswitch_id_t; +typedef uint32_t efx_vport_id_t; + +typedef enum efx_vswitch_type_e { + EFX_VSWITCH_TYPE_VLAN = 1, + EFX_VSWITCH_TYPE_VEB, + /* VSWITCH_TYPE_VEPA: obsolete */ + EFX_VSWITCH_TYPE_MUX = 4, +} efx_vswitch_type_t; + +typedef enum efx_vport_type_e { + EFX_VPORT_TYPE_NORMAL = 4, + EFX_VPORT_TYPE_EXPANSION, + EFX_VPORT_TYPE_TEST, +} efx_vport_type_t; + +/* Unspecified VLAN ID to support disabling of VLAN filtering */ +#define EFX_FILTER_VID_UNSPEC 0xffff +#define EFX_DEFAULT_VSWITCH_ID 1 + +/* Default VF VLAN ID on creation */ +#define EFX_VF_VID_DEFAULT EFX_FILTER_VID_UNSPEC +#define EFX_VPORT_ID_INVALID 0 + +typedef struct efx_vport_config_s { + /* Either VF index or 0xffff for PF */ + uint16_t evc_function; + /* VLAN ID of the associated function */ + uint16_t evc_vid; + /* vport id shared with client driver */ + efx_vport_id_t evc_vport_id; + /* MAC address of the associated function */ + uint8_t evc_mac_addr[EFX_MAC_ADDR_LEN]; + /* + * vports created with this flag set may only transfer traffic on the + * VLANs permitted by the vport. Also, an attempt to install filter with + * VLAN will be refused unless requesting function has VLAN privilege. + */ + boolean_t evc_vlan_restrict; + /* Whether this function is assigned or not */ + boolean_t evc_vport_assigned; +} efx_vport_config_t; + +typedef struct efx_vswitch_s efx_vswitch_t; + +extern __checkReturn efx_rc_t +efx_evb_init( + __in efx_nic_t *enp); + +extern void +efx_evb_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_evb_vswitch_create( + __in efx_nic_t *enp, + __in uint32_t num_vports, + __inout_ecount(num_vports) efx_vport_config_t *vport_configp, + __deref_out efx_vswitch_t **evpp); + +extern __checkReturn efx_rc_t +efx_evb_vswitch_destroy( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp); + +extern __checkReturn efx_rc_t +efx_evb_vport_mac_set( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp); + +extern __checkReturn efx_rc_t +efx_evb_vport_vlan_set( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in uint16_t vid); + +extern __checkReturn efx_rc_t +efx_evb_vport_reset( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp, + __in uint16_t vid, + __out boolean_t *is_fn_resetp); + +extern __checkReturn efx_rc_t +efx_evb_vport_stats( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __out efsys_mem_t *stats_bufferp); + +#endif /* EFSYS_OPT_EVB */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + +typedef struct efx_proxy_auth_config_s { + efsys_mem_t *request_bufferp; + efsys_mem_t *response_bufferp; + efsys_mem_t *status_bufferp; + uint32_t block_cnt; + uint32_t *op_listp; + size_t op_count; + uint32_t handled_privileges; +} efx_proxy_auth_config_t; + +typedef struct efx_proxy_cmd_params_s { + uint32_t pf_index; + uint32_t vf_index; + uint8_t *request_bufferp; + size_t request_size; + uint8_t *response_bufferp; + size_t response_size; + size_t *response_size_actualp; +} efx_proxy_cmd_params_t; + +extern __checkReturn efx_rc_t +efx_proxy_auth_init( + __in efx_nic_t *enp); + +extern void +efx_proxy_auth_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_proxy_auth_configure( + __in efx_nic_t *enp, + __in efx_proxy_auth_config_t *configp); + + __checkReturn efx_rc_t +efx_proxy_auth_destroy( + __in efx_nic_t *enp, + __in uint32_t handled_privileges); + + __checkReturn efx_rc_t +efx_proxy_auth_complete_request( + __in efx_nic_t *enp, + __in uint32_t fn_index, + __in uint32_t proxy_result, + __in uint32_t handle); + + __checkReturn efx_rc_t +efx_proxy_auth_exec_cmd( + __in efx_nic_t *enp, + __inout efx_proxy_cmd_params_t *paramsp); + + __checkReturn efx_rc_t +efx_proxy_auth_set_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t vf_index, + __in uint32_t mask, + __in uint32_t value); + + __checkReturn efx_rc_t +efx_proxy_auth_privilege_mask_get( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __out uint32_t *maskp); + + __checkReturn efx_rc_t +efx_proxy_auth_privilege_modify( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in uint32_t add_privileges_mask, + __in uint32_t remove_privileges_mask); + +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_annote.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_annote.h new file mode 100644 index 000000000..64d3d1c17 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_annote.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_ANNOTE_H +#define _SYS_EFX_ANNOTE_H + +#if defined(_WIN32) || defined(_WIN64) +#define EFX_HAVE_WINDOWS_ANNOTATIONS 1 +#else +#define EFX_HAVE_WINDOWS_ANNOTATIONS 0 +#endif /* defined(_WIN32) || defined(_WIN64) */ + +#if defined(__sun) +#define EFX_HAVE_SOLARIS_ANNOTATIONS 1 +#else +#define EFX_HAVE_SOLARIS_ANNOTATIONS 0 +#endif /* defined(__sun) */ + +#if !EFX_HAVE_WINDOWS_ANNOTATIONS + +/* Ignore Windows SAL annotations on other platforms */ +#define __in +#define __in_opt +#define __in_ecount(_n) +#define __in_ecount_opt(_n) +#define __in_bcount(_n) +#define __in_bcount_opt(_n) + +#define __out +#define __out_opt +#define __out_ecount(_n) +#define __out_ecount_opt(_n) +#define __out_ecount_part(_n, _l) +#define __out_bcount(_n) +#define __out_bcount_opt(_n) +#define __out_bcount_part(_n, _l) +#define __out_bcount_part_opt(_n, _l) + +#define __deref_out +#define __deref_inout + +#define __inout +#define __inout_opt +#define __inout_ecount(_n) +#define __inout_ecount_opt(_n) +#define __inout_bcount(_n) +#define __inout_bcount_opt(_n) +#define __inout_bcount_full_opt(_n) + +#define __deref_out_bcount_opt(n) + +#define __checkReturn +#define __success(_x) + +#define __drv_when(_p, _c) + +#endif /* !EFX_HAVE_WINDOWS_ANNOTATIONS */ + +#if !EFX_HAVE_SOLARIS_ANNOTATIONS + +#if EFX_HAVE_WINDOWS_ANNOTATIONS + +/* + * Support some SunOS/Solaris style _NOTE() annotations + * + * At present with the facilities provided in the WDL and the SAL we can only + * easily act upon _NOTE(ARGUNUSED(arglist)) annotations. + * + * Intermediate macros to expand individual _NOTE annotation types into + * something the WDK or SAL can understand. They shouldn't be used directly, + * for example EFX_NOTE_ARGUNUSED() is only used as an intermediate step on the + * transformation of _NOTE(ARGUNSED(arg1, arg2)) into + * UNREFERENCED_PARAMETER((arg1, arg2)); + */ +#define EFX_NOTE_ALIGNMENT(_fname, _n) +#define EFX_NOTE_ARGUNUSED(...) UNREFERENCED_PARAMETER((__VA_ARGS__)); +#define EFX_NOTE_CONSTANTCONDITION +#define EFX_NOTE_CONSTCOND +#define EFX_NOTE_EMPTY +#define EFX_NOTE_FALLTHROUGH +#define EFX_NOTE_FALLTHRU +#define EFX_NOTE_LINTED(_msg) +#define EFX_NOTE_NOTREACHED +#define EFX_NOTE_PRINTFLIKE(_n) +#define EFX_NOTE_SCANFLIKE(_n) +#define EFX_NOTE_VARARGS(_n) + +#define _NOTE(_annotation) EFX_NOTE_ ## _annotation + +#else + +/* Ignore Solaris annotations on other platforms */ + +#define _NOTE(_annotation) + +#endif /* EFX_HAVE_WINDOWS_ANNOTATIONS */ + +#endif /* !EFX_HAVE_SOLARIS_ANNOTATIONS */ + +#endif /* _SYS_EFX_ANNOTE_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c new file mode 100644 index 000000000..fa22d994e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_bootcfg.c @@ -0,0 +1,1125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_BOOTCFG + +/* + * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE. + * NOTE: This is larger than the Medford per-PF bootcfg sector. + */ +#define BOOTCFG_MAX_SIZE 0x1000 + +/* Medford per-PF bootcfg sector */ +#define BOOTCFG_PER_PF 0x800 +#define BOOTCFG_PF_COUNT 16 + +#define DHCP_OPT_HAS_VALUE(opt) \ + (((opt) > EFX_DHCP_PAD) && ((opt) < EFX_DHCP_END)) + +#define DHCP_MAX_VALUE 255 + +#define DHCP_ENCAPSULATOR(encap_opt) ((encap_opt) >> 8) +#define DHCP_ENCAPSULATED(encap_opt) ((encap_opt) & 0xff) +#define DHCP_IS_ENCAP_OPT(opt) DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATOR(opt)) + +typedef struct efx_dhcp_tag_hdr_s { + uint8_t tag; + uint8_t length; +} efx_dhcp_tag_hdr_t; + +/* + * Length calculations for tags with value field. PAD and END + * have a fixed length of 1, with no length or value field. + */ +#define DHCP_FULL_TAG_LENGTH(hdr) \ + (sizeof (efx_dhcp_tag_hdr_t) + (hdr)->length) + +#define DHCP_NEXT_TAG(hdr) \ + ((efx_dhcp_tag_hdr_t *)(((uint8_t *)(hdr)) + \ + DHCP_FULL_TAG_LENGTH((hdr)))) + +#define DHCP_CALC_TAG_LENGTH(payload_len) \ + ((payload_len) + sizeof (efx_dhcp_tag_hdr_t)) + + +/* Report the layout of bootcfg sectors in NVRAM partition. */ + __checkReturn efx_rc_t +efx_bootcfg_sector_info( + __in efx_nic_t *enp, + __in uint32_t pf, + __out_opt uint32_t *sector_countp, + __out size_t *offsetp, + __out size_t *max_sizep) +{ + uint32_t count; + size_t max_size; + size_t offset; + int rc; + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + max_size = BOOTCFG_MAX_SIZE; + offset = 0; + count = 1; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + max_size = BOOTCFG_MAX_SIZE; + offset = 0; + count = 1; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: { + /* Shared partition (array indexed by PF) */ + max_size = BOOTCFG_PER_PF; + count = BOOTCFG_PF_COUNT; + if (pf >= count) { + rc = EINVAL; + goto fail2; + } + offset = max_size * pf; + break; + } +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: { + /* Shared partition (array indexed by PF) */ + max_size = BOOTCFG_PER_PF; + count = BOOTCFG_PF_COUNT; + if (pf >= count) { + rc = EINVAL; + goto fail3; + } + offset = max_size * pf; + break; + } +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE); + + if (sector_countp != NULL) + *sector_countp = count; + *offsetp = offset; + *max_sizep = max_size; + + return (0); + +#if EFSYS_OPT_MEDFORD2 +fail3: + EFSYS_PROBE(fail3); +#endif +#if EFSYS_OPT_MEDFORD +fail2: + EFSYS_PROBE(fail2); +#endif +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + + __checkReturn uint8_t +efx_dhcp_csum( + __in_bcount(size) uint8_t const *data, + __in size_t size) +{ + unsigned int pos; + uint8_t checksum = 0; + + for (pos = 0; pos < size; pos++) + checksum += data[pos]; + return (checksum); +} + + __checkReturn efx_rc_t +efx_dhcp_verify( + __in_bcount(size) uint8_t const *data, + __in size_t size, + __out_opt size_t *usedp) +{ + size_t offset = 0; + size_t used = 0; + efx_rc_t rc; + + /* Start parsing tags immediately after the checksum */ + for (offset = 1; offset < size; ) { + uint8_t tag; + uint8_t length; + + /* Consume tag */ + tag = data[offset]; + if (tag == EFX_DHCP_END) { + offset++; + used = offset; + break; + } + if (tag == EFX_DHCP_PAD) { + offset++; + continue; + } + + /* Consume length */ + if (offset + 1 >= size) { + rc = ENOSPC; + goto fail1; + } + length = data[offset + 1]; + + /* Consume *length */ + if (offset + 1 + length >= size) { + rc = ENOSPC; + goto fail2; + } + + offset += 2 + length; + used = offset; + } + + /* Checksum the entire sector, including bytes after any EFX_DHCP_END */ + if (efx_dhcp_csum(data, size) != 0) { + rc = EINVAL; + goto fail3; + } + + if (usedp != NULL) + *usedp = used; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Walk the entire tag set looking for option. The sought option may be + * encapsulated. ENOENT indicates the walk completed without finding the + * option. If we run out of buffer during the walk the function will return + * ENOSPC. + */ +static efx_rc_t +efx_dhcp_walk_tags( + __deref_inout uint8_t **tagpp, + __inout size_t *buffer_sizep, + __in uint16_t opt) +{ + efx_rc_t rc = 0; + boolean_t is_encap = B_FALSE; + + if (DHCP_IS_ENCAP_OPT(opt)) { + /* + * Look for the encapsulator and, if found, limit ourselves + * to its payload. If it's not found then the entire tag + * cannot be found, so the encapsulated opt search is + * skipped. + */ + rc = efx_dhcp_walk_tags(tagpp, buffer_sizep, + DHCP_ENCAPSULATOR(opt)); + if (rc == 0) { + *buffer_sizep = ((efx_dhcp_tag_hdr_t *)*tagpp)->length; + (*tagpp) += sizeof (efx_dhcp_tag_hdr_t); + } + opt = DHCP_ENCAPSULATED(opt); + is_encap = B_TRUE; + } + + EFSYS_ASSERT(!DHCP_IS_ENCAP_OPT(opt)); + + while (rc == 0) { + size_t size; + + if (*buffer_sizep == 0) { + rc = ENOSPC; + goto fail1; + } + + if (DHCP_ENCAPSULATED(**tagpp) == opt) + break; + + if ((**tagpp) == EFX_DHCP_END) { + rc = ENOENT; + break; + } else if ((**tagpp) == EFX_DHCP_PAD) { + size = 1; + } else { + if (*buffer_sizep < sizeof (efx_dhcp_tag_hdr_t)) { + rc = ENOSPC; + goto fail2; + } + + size = + DHCP_FULL_TAG_LENGTH((efx_dhcp_tag_hdr_t *)*tagpp); + } + + if (size > *buffer_sizep) { + rc = ENOSPC; + goto fail3; + } + + (*tagpp) += size; + (*buffer_sizep) -= size; + + if ((*buffer_sizep == 0) && is_encap) { + /* Search within encapulator tag finished */ + rc = ENOENT; + break; + } + } + + /* + * Returns 0 if found otherwise ENOENT indicating search finished + * correctly + */ + return (rc); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Locate value buffer for option in the given buffer. + * Returns 0 if found, ENOENT indicating search finished + * correctly, otherwise search failed before completion. + */ + __checkReturn efx_rc_t +efx_dhcp_find_tag( + __in_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __deref_out uint8_t **valuepp, + __out size_t *value_lengthp) +{ + efx_rc_t rc; + uint8_t *tagp = bufferp; + size_t len = buffer_length; + + rc = efx_dhcp_walk_tags(&tagp, &len, opt); + if (rc == 0) { + efx_dhcp_tag_hdr_t *hdrp; + + hdrp = (efx_dhcp_tag_hdr_t *)tagp; + *valuepp = (uint8_t *)(&hdrp[1]); + *value_lengthp = hdrp->length; + } else if (rc != ENOENT) { + goto fail1; + } + + return (rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Locate the end tag in the given buffer. + * Returns 0 if found, ENOENT indicating search finished + * correctly but end tag was not found; otherwise search + * failed before completion. + */ + __checkReturn efx_rc_t +efx_dhcp_find_end( + __in_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __deref_out uint8_t **endpp) +{ + efx_rc_t rc; + uint8_t *endp = bufferp; + size_t len = buffer_length; + + rc = efx_dhcp_walk_tags(&endp, &len, EFX_DHCP_END); + if (rc == 0) + *endpp = endp; + else if (rc != ENOENT) + goto fail1; + + return (rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Delete the given tag from anywhere in the buffer. Copes with + * encapsulated tags, and updates or deletes the encapsulating opt as + * necessary. + */ + __checkReturn efx_rc_t +efx_dhcp_delete_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt) +{ + efx_rc_t rc; + efx_dhcp_tag_hdr_t *hdrp; + size_t len; + uint8_t *startp; + uint8_t *endp; + + len = buffer_length; + startp = bufferp; + + if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) { + rc = EINVAL; + goto fail1; + } + + rc = efx_dhcp_walk_tags(&startp, &len, opt); + if (rc != 0) + goto fail1; + + hdrp = (efx_dhcp_tag_hdr_t *)startp; + + if (DHCP_IS_ENCAP_OPT(opt)) { + uint8_t tag_length = DHCP_FULL_TAG_LENGTH(hdrp); + uint8_t *encapp = bufferp; + efx_dhcp_tag_hdr_t *encap_hdrp; + + len = buffer_length; + rc = efx_dhcp_walk_tags(&encapp, &len, + DHCP_ENCAPSULATOR(opt)); + if (rc != 0) + goto fail2; + + encap_hdrp = (efx_dhcp_tag_hdr_t *)encapp; + if (encap_hdrp->length > tag_length) { + encap_hdrp->length = (uint8_t)( + (size_t)encap_hdrp->length - tag_length); + } else { + /* delete the encapsulating tag */ + hdrp = encap_hdrp; + } + } + + startp = (uint8_t *)hdrp; + endp = (uint8_t *)DHCP_NEXT_TAG(hdrp); + + if (startp < bufferp) { + rc = EINVAL; + goto fail3; + } + + if (endp > &bufferp[buffer_length]) { + rc = EINVAL; + goto fail4; + } + + memmove(startp, endp, + buffer_length - (endp - bufferp)); + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Write the tag header into write_pointp and optionally copies the payload + * into the space following. + */ +static void +efx_dhcp_write_tag( + __in uint8_t *write_pointp, + __in uint16_t opt, + __in_bcount_opt(value_length) + uint8_t *valuep, + __in size_t value_length) +{ + efx_dhcp_tag_hdr_t *hdrp = (efx_dhcp_tag_hdr_t *)write_pointp; + hdrp->tag = DHCP_ENCAPSULATED(opt); + hdrp->length = (uint8_t)value_length; + if ((value_length > 0) && (valuep != NULL)) + memcpy(&hdrp[1], valuep, value_length); +} + +/* + * Add the given tag to the end of the buffer. Copes with creating an + * encapsulated tag, and updates or creates the encapsulating opt as + * necessary. + */ + __checkReturn efx_rc_t +efx_dhcp_add_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __in_bcount_opt(value_length) uint8_t *valuep, + __in size_t value_length) +{ + efx_rc_t rc; + efx_dhcp_tag_hdr_t *encap_hdrp = NULL; + uint8_t *insert_pointp = NULL; + uint8_t *endp; + size_t available_space; + size_t added_length; + size_t search_size; + uint8_t *searchp; + + if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) { + rc = EINVAL; + goto fail1; + } + + if (value_length > DHCP_MAX_VALUE) { + rc = EINVAL; + goto fail2; + } + + if ((value_length > 0) && (valuep == NULL)) { + rc = EINVAL; + goto fail3; + } + + endp = bufferp; + available_space = buffer_length; + rc = efx_dhcp_walk_tags(&endp, &available_space, EFX_DHCP_END); + if (rc != 0) + goto fail4; + + searchp = bufferp; + search_size = buffer_length; + if (DHCP_IS_ENCAP_OPT(opt)) { + rc = efx_dhcp_walk_tags(&searchp, &search_size, + DHCP_ENCAPSULATOR(opt)); + if (rc == 0) { + encap_hdrp = (efx_dhcp_tag_hdr_t *)searchp; + + /* Check encapsulated tag is not present */ + search_size = encap_hdrp->length; + rc = efx_dhcp_walk_tags(&searchp, &search_size, + opt); + if (rc != ENOENT) { + rc = EINVAL; + goto fail5; + } + + /* Check encapsulator will not overflow */ + if (((size_t)encap_hdrp->length + + DHCP_CALC_TAG_LENGTH(value_length)) > + DHCP_MAX_VALUE) { + rc = E2BIG; + goto fail6; + } + + /* Insert at start of existing encapsulator */ + insert_pointp = (uint8_t *)&encap_hdrp[1]; + opt = DHCP_ENCAPSULATED(opt); + } else if (rc == ENOENT) { + encap_hdrp = NULL; + } else { + goto fail7; + } + } else { + /* Check unencapsulated tag is not present */ + rc = efx_dhcp_walk_tags(&searchp, &search_size, + opt); + if (rc != ENOENT) { + rc = EINVAL; + goto fail8; + } + } + + if (insert_pointp == NULL) { + /* Insert at end of existing tags */ + insert_pointp = endp; + } + + /* Includes the new encapsulator tag hdr if required */ + added_length = DHCP_CALC_TAG_LENGTH(value_length) + + (DHCP_IS_ENCAP_OPT(opt) ? sizeof (efx_dhcp_tag_hdr_t) : 0); + + if (available_space <= added_length) { + rc = ENOMEM; + goto fail9; + } + + memmove(insert_pointp + added_length, insert_pointp, + available_space - added_length); + + if (DHCP_IS_ENCAP_OPT(opt)) { + /* Create new encapsulator header */ + added_length -= sizeof (efx_dhcp_tag_hdr_t); + efx_dhcp_write_tag(insert_pointp, + DHCP_ENCAPSULATOR(opt), NULL, added_length); + insert_pointp += sizeof (efx_dhcp_tag_hdr_t); + } else if (encap_hdrp) + /* Modify existing encapsulator header */ + encap_hdrp->length += + ((uint8_t)DHCP_CALC_TAG_LENGTH(value_length)); + + efx_dhcp_write_tag(insert_pointp, opt, valuep, value_length); + + return (0); + +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Update an existing tag to the new value. Copes with encapsulated + * tags, and updates the encapsulating opt as necessary. + */ + __checkReturn efx_rc_t +efx_dhcp_update_tag( + __inout_bcount(buffer_length) uint8_t *bufferp, + __in size_t buffer_length, + __in uint16_t opt, + __in uint8_t *value_locationp, + __in_bcount_opt(value_length) uint8_t *valuep, + __in size_t value_length) +{ + efx_rc_t rc; + uint8_t *write_pointp = value_locationp - sizeof (efx_dhcp_tag_hdr_t); + efx_dhcp_tag_hdr_t *hdrp = (efx_dhcp_tag_hdr_t *)write_pointp; + efx_dhcp_tag_hdr_t *encap_hdrp = NULL; + size_t old_length; + + if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) { + rc = EINVAL; + goto fail1; + } + + if (value_length > DHCP_MAX_VALUE) { + rc = EINVAL; + goto fail2; + } + + if ((value_length > 0) && (valuep == NULL)) { + rc = EINVAL; + goto fail3; + } + + old_length = hdrp->length; + + if (old_length < value_length) { + uint8_t *endp = bufferp; + size_t available_space = buffer_length; + + rc = efx_dhcp_walk_tags(&endp, &available_space, + EFX_DHCP_END); + if (rc != 0) + goto fail4; + + if (available_space < (value_length - old_length)) { + rc = EINVAL; + goto fail5; + } + } + + if (DHCP_IS_ENCAP_OPT(opt)) { + uint8_t *encapp = bufferp; + size_t following_encap = buffer_length; + size_t new_length; + + rc = efx_dhcp_walk_tags(&encapp, &following_encap, + DHCP_ENCAPSULATOR(opt)); + if (rc != 0) + goto fail6; + + encap_hdrp = (efx_dhcp_tag_hdr_t *)encapp; + + new_length = ((size_t)encap_hdrp->length + + value_length - old_length); + /* Check encapsulator will not overflow */ + if (new_length > DHCP_MAX_VALUE) { + rc = E2BIG; + goto fail7; + } + + encap_hdrp->length = (uint8_t)new_length; + } + + /* + * Move the following data up/down to accomodate the new payload + * length. + */ + if (old_length != value_length) { + uint8_t *destp = (uint8_t *)DHCP_NEXT_TAG(hdrp) + + value_length - old_length; + size_t count = &bufferp[buffer_length] - + (uint8_t *)DHCP_NEXT_TAG(hdrp); + + memmove(destp, DHCP_NEXT_TAG(hdrp), count); + } + + EFSYS_ASSERT(hdrp->tag == DHCP_ENCAPSULATED(opt)); + efx_dhcp_write_tag(write_pointp, opt, valuep, value_length); + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Copy bootcfg sector data to a target buffer which may differ in size. + * Optionally corrects format errors in source buffer. + */ + efx_rc_t +efx_bootcfg_copy_sector( + __in efx_nic_t *enp, + __inout_bcount(sector_length) + uint8_t *sector, + __in size_t sector_length, + __out_bcount(data_size) uint8_t *data, + __in size_t data_size, + __in boolean_t handle_format_errors) +{ + _NOTE(ARGUNUSED(enp)) + + size_t used_bytes; + efx_rc_t rc; + + /* Minimum buffer is checksum byte and EFX_DHCP_END terminator */ + if (data_size < 2) { + rc = ENOSPC; + goto fail1; + } + + /* Verify that the area is correctly formatted and checksummed */ + rc = efx_dhcp_verify(sector, sector_length, + &used_bytes); + + if (!handle_format_errors) { + if (rc != 0) + goto fail2; + + if ((used_bytes < 2) || + (sector[used_bytes - 1] != EFX_DHCP_END)) { + /* Block too short, or EFX_DHCP_END missing */ + rc = ENOENT; + goto fail3; + } + } + + /* Synthesize empty format on verification failure */ + if (rc != 0 || used_bytes == 0) { + sector[0] = 0; + sector[1] = EFX_DHCP_END; + used_bytes = 2; + } + EFSYS_ASSERT(used_bytes >= 2); /* checksum and EFX_DHCP_END */ + EFSYS_ASSERT(used_bytes <= sector_length); + EFSYS_ASSERT(sector_length >= 2); + + /* + * Legacy bootcfg sectors don't terminate with an EFX_DHCP_END + * character. Modify the returned payload so it does. + * Reinitialise the sector if there isn't room for the character. + */ + if (sector[used_bytes - 1] != EFX_DHCP_END) { + if (used_bytes >= sector_length) { + sector[0] = 0; + used_bytes = 1; + } + sector[used_bytes] = EFX_DHCP_END; + ++used_bytes; + } + + /* + * Verify that the target buffer is large enough for the + * entire used bootcfg area, then copy into the target buffer. + */ + if (used_bytes > data_size) { + rc = ENOSPC; + goto fail4; + } + + data[0] = 0; /* checksum, updated below */ + + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, sector + 1, used_bytes - 1); + + /* Zero out the unused portion of the target buffer */ + if (used_bytes < data_size) + (void) memset(data + used_bytes, 0, data_size - used_bytes); + + /* + * The checksum includes trailing data after any EFX_DHCP_END + * character, which we've just modified (by truncation or appending + * EFX_DHCP_END). + */ + data[0] -= efx_dhcp_csum(data, data_size); + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + efx_rc_t +efx_bootcfg_read( + __in efx_nic_t *enp, + __out_bcount(size) uint8_t *data, + __in size_t size) +{ + uint8_t *payload = NULL; + size_t used_bytes; + size_t partn_length; + size_t sector_length; + size_t sector_offset; + efx_rc_t rc; + uint32_t sector_number; + + /* Minimum buffer is checksum byte and EFX_DHCP_END terminator */ + if (size < 2) { + rc = ENOSPC; + goto fail1; + } + +#if EFX_OPTS_EF10() + sector_number = enp->en_nic_cfg.enc_pf; +#else + sector_number = 0; +#endif + rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); + if (rc != 0) + goto fail2; + + /* The bootcfg sector may be stored in a (larger) shared partition */ + rc = efx_bootcfg_sector_info(enp, sector_number, + NULL, §or_offset, §or_length); + if (rc != 0) + goto fail3; + + if (sector_length < 2) { + rc = EINVAL; + goto fail4; + } + + if (sector_length > BOOTCFG_MAX_SIZE) + sector_length = BOOTCFG_MAX_SIZE; + + if (sector_offset + sector_length > partn_length) { + /* Partition is too small */ + rc = EFBIG; + goto fail5; + } + + /* + * We need to read the entire BOOTCFG sector to ensure we read all + * tags, because legacy bootcfg sectors are not guaranteed to end + * with an EFX_DHCP_END character. If the user hasn't supplied a + * sufficiently large buffer then use our own buffer. + */ + if (sector_length > size) { + EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload); + if (payload == NULL) { + rc = ENOMEM; + goto fail6; + } + } else + payload = (uint8_t *)data; + + if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) + goto fail7; + + if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, + sector_offset, (caddr_t)payload, sector_length)) != 0) { + (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); + goto fail8; + } + + if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) + goto fail9; + + /* Verify that the area is correctly formatted and checksummed */ + rc = efx_dhcp_verify(payload, sector_length, + &used_bytes); + if (rc != 0 || used_bytes == 0) { + payload[0] = 0; + payload[1] = EFX_DHCP_END; + used_bytes = 2; + } + + EFSYS_ASSERT(used_bytes >= 2); /* checksum and EFX_DHCP_END */ + EFSYS_ASSERT(used_bytes <= sector_length); + + /* + * Legacy bootcfg sectors don't terminate with an EFX_DHCP_END + * character. Modify the returned payload so it does. + * BOOTCFG_MAX_SIZE is by definition large enough for any valid + * (per-port) bootcfg sector, so reinitialise the sector if there + * isn't room for the character. + */ + if (payload[used_bytes - 1] != EFX_DHCP_END) { + if (used_bytes >= sector_length) + used_bytes = 1; + + payload[used_bytes] = EFX_DHCP_END; + ++used_bytes; + } + + /* + * Verify that the user supplied buffer is large enough for the + * entire used bootcfg area, then copy into the user supplied buffer. + */ + if (used_bytes > size) { + rc = ENOSPC; + goto fail10; + } + + data[0] = 0; /* checksum, updated below */ + + if (sector_length > size) { + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, payload + 1, used_bytes - 1); + EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); + } + + /* Zero out the unused portion of the user buffer */ + if (used_bytes < size) + (void) memset(data + used_bytes, 0, size - used_bytes); + + /* + * The checksum includes trailing data after any EFX_DHCP_END character, + * which we've just modified (by truncation or appending EFX_DHCP_END). + */ + data[0] -= efx_dhcp_csum(data, size); + + return (0); + +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); + if (sector_length > size) + EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + efx_rc_t +efx_bootcfg_write( + __in efx_nic_t *enp, + __in_bcount(size) uint8_t *data, + __in size_t size) +{ + uint8_t *partn_data; + uint8_t checksum; + size_t partn_length; + size_t sector_length; + size_t sector_offset; + size_t used_bytes; + efx_rc_t rc; + uint32_t sector_number; + +#if EFX_OPTS_EF10() + sector_number = enp->en_nic_cfg.enc_pf; +#else + sector_number = 0; +#endif + + rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); + if (rc != 0) + goto fail1; + + /* The bootcfg sector may be stored in a (larger) shared partition */ + rc = efx_bootcfg_sector_info(enp, sector_number, + NULL, §or_offset, §or_length); + if (rc != 0) + goto fail2; + + if (sector_length > BOOTCFG_MAX_SIZE) + sector_length = BOOTCFG_MAX_SIZE; + + if (sector_offset + sector_length > partn_length) { + /* Partition is too small */ + rc = EFBIG; + goto fail3; + } + + if ((rc = efx_dhcp_verify(data, size, &used_bytes)) != 0) + goto fail4; + + /* + * The caller *must* terminate their block with a EFX_DHCP_END + * character + */ + if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != + EFX_DHCP_END)) { + /* Block too short or EFX_DHCP_END missing */ + rc = ENOENT; + goto fail5; + } + + /* Check that the hardware has support for this much data */ + if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) { + rc = ENOSPC; + goto fail6; + } + + /* + * If the BOOTCFG sector is stored in a shared partition, then we must + * read the whole partition and insert the updated bootcfg sector at the + * correct offset. + */ + EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data); + if (partn_data == NULL) { + rc = ENOMEM; + goto fail7; + } + + rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL); + if (rc != 0) + goto fail8; + + /* Read the entire partition */ + rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0, + (caddr_t)partn_data, partn_length); + if (rc != 0) + goto fail9; + + /* + * Insert the BOOTCFG sector into the partition, Zero out all data + * after the EFX_DHCP_END tag, and adjust the checksum. + */ + (void) memset(partn_data + sector_offset, 0x0, sector_length); + (void) memcpy(partn_data + sector_offset, data, used_bytes); + + checksum = efx_dhcp_csum(data, used_bytes); + partn_data[sector_offset] -= checksum; + + if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0) + goto fail10; + + if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG, + 0, (caddr_t)partn_data, partn_length)) != 0) + goto fail11; + + if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) + goto fail12; + + EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data); + + return (0); + +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); + + (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); +fail8: + EFSYS_PROBE(fail8); + + EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_BOOTCFG */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h new file mode 100644 index 000000000..596524756 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_check.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_CHECK_H +#define _SYS_EFX_CHECK_H + +#include "efsys.h" + +/* + * Check that the efsys.h header in client code has a valid combination of + * EFSYS_OPT_xxx options. + * + * NOTE: Keep checks for obsolete options here to ensure that they are removed + * from client code (and do not reappear in merges from other branches). + */ + +/* Check family options for EF10 architecture controllers. */ +#define EFX_OPTS_EF10() \ + (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) + +#ifdef EFSYS_OPT_FALCON +# error "FALCON is obsolete and is not supported." +#endif + +#if EFSYS_OPT_BOOTCFG +/* Support NVRAM based boot config */ +# if !EFSYS_OPT_NVRAM +# error "BOOTCFG requires NVRAM" +# endif +#endif /* EFSYS_OPT_BOOTCFG */ + +#if EFSYS_OPT_CHECK_REG +/* Verify chip implements accessed registers */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "CHECK_REG requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_CHECK_REG */ + +#if EFSYS_OPT_DECODE_INTR_FATAL +/* Decode fatal errors */ +# if !EFSYS_OPT_SIENA +# error "INTR_FATAL requires SIENA" +# endif +#endif /* EFSYS_OPT_DECODE_INTR_FATAL */ + +#if EFSYS_OPT_DIAG +/* Support diagnostic hardware tests */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "DIAG requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_DIAG */ + +#if EFSYS_OPT_EV_PREFETCH +/* Support optimized EVQ data access */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "EV_PREFETCH requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_EV_PREFETCH */ + +#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE +# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported." +#endif + +#if EFSYS_OPT_FILTER +/* Support hardware packet filters */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "FILTER requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_FILTER */ + +#if EFX_OPTS_EF10() +# if !EFSYS_OPT_FILTER +# error "EF10 arch requires FILTER" +# endif +#endif /* EFX_OPTS_EF10() */ + +#if EFSYS_OPT_LOOPBACK +/* Support hardware loopback modes */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "LOOPBACK requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_LOOPBACK */ + +#ifdef EFSYS_OPT_MAC_FALCON_GMAC +# error "MAC_FALCON_GMAC is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_MAC_FALCON_XMAC +# error "MAC_FALCON_XMAC is obsolete and is not supported." +#endif + +#if EFSYS_OPT_MAC_STATS +/* Support MAC statistics */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "MAC_STATS requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_MAC_STATS */ + +#if EFSYS_OPT_MCDI +/* Support management controller messages */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "MCDI requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_MCDI */ + +#if (EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# if !EFSYS_OPT_MCDI +# error "EF10 arch or SIENA requires MCDI" +# endif +#endif + +#if EFSYS_OPT_MCDI_LOGGING +/* Support MCDI logging */ +# if !EFSYS_OPT_MCDI +# error "MCDI_LOGGING requires MCDI" +# endif +#endif /* EFSYS_OPT_MCDI_LOGGING */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER +/* Support MCDI proxy authorization (server) */ +# if !EFSYS_OPT_MCDI_PROXY_AUTH +# error "MCDI_PROXY_AUTH_SERVER requires MCDI_PROXY_AUTH" +# endif +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH +/* Support MCDI proxy authorization (client) */ +# if !EFSYS_OPT_MCDI +# error "MCDI_PROXY_AUTH requires MCDI" +# endif +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ + +#ifdef EFSYS_OPT_MON_LM87 +# error "MON_LM87 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_MON_MAX6647 +# error "MON_MAX6647 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_MON_NULL +# error "MON_NULL is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_MON_SIENA +# error "MON_SIENA is obsolete (replaced by MON_MCDI)." +#endif + +#ifdef EFSYS_OPT_MON_HUNTINGTON +# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)." +#endif + +#if EFSYS_OPT_MON_STATS +/* Support monitor statistics (voltage/temperature) */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "MON_STATS requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_MON_STATS */ + +#if EFSYS_OPT_MON_MCDI +/* Support Monitor via mcdi */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "MON_MCDI requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_MON_MCDI*/ + +#if EFSYS_OPT_NAMES +/* Support printable names for statistics */ +# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \ + EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS) +# error "NAMES requires LOOPBACK or xxxSTATS or MCDI" +# endif +#endif /* EFSYS_OPT_NAMES */ + +#if EFSYS_OPT_NVRAM +/* Support non volatile configuration */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "NVRAM requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_IMAGE_LAYOUT +/* Support signed image layout handling */ +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2" +# endif +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + +#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM +# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_NVRAM_SFT9001 +# error "NVRAM_SFT9001 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_NVRAM_SFX7101 +# error "NVRAM_SFX7101 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PCIE_TUNE +# error "PCIE_TUNE is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_BIST +# error "PHY_BIST is obsolete (replaced by BIST)." +#endif + +#if EFSYS_OPT_PHY_FLAGS +/* Support PHY flags */ +# if !EFSYS_OPT_SIENA +# error "PHY_FLAGS requires SIENA" +# endif +#endif /* EFSYS_OPT_PHY_FLAGS */ + +#if EFSYS_OPT_PHY_LED_CONTROL +/* Support for PHY LED control */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "PHY_LED_CONTROL requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + +#ifdef EFSYS_OPT_PHY_NULL +# error "PHY_NULL is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_PM8358 +# error "PHY_PM8358 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_PROPS +# error "PHY_PROPS is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_QT2022C2 +# error "PHY_QT2022C2 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_QT2025C +# error "PHY_QT2025C is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_SFT9001 +# error "PHY_SFT9001 is obsolete and is not supported." +#endif + +#ifdef EFSYS_OPT_PHY_SFX7101 +# error "PHY_SFX7101 is obsolete and is not supported." +#endif + +#if EFSYS_OPT_PHY_STATS +/* Support PHY statistics */ +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +# error "PHY_STATS requires SIENA or HUNTINGTON or MEDFORD" +# endif +#endif /* EFSYS_OPT_PHY_STATS */ + +#ifdef EFSYS_OPT_PHY_TXC43128 +# error "PHY_TXC43128 is obsolete and is not supported." +#endif + +#if EFSYS_OPT_QSTATS +/* Support EVQ/RXQ/TXQ statistics */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "QSTATS requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_QSTATS */ + +#ifdef EFSYS_OPT_RX_HDR_SPLIT +# error "RX_HDR_SPLIT is obsolete and is not supported" +#endif + +#if EFSYS_OPT_RX_SCALE +/* Support receive scaling (RSS) */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "RX_SCALE requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCATTER +/* Support receive scatter DMA */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "RX_SCATTER requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_RX_SCATTER */ + +#ifdef EFSYS_OPT_STAT_NAME +# error "STAT_NAME is obsolete (replaced by NAMES)." +#endif + +#if EFSYS_OPT_VPD +/* Support PCI Vital Product Data (VPD) */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "VPD requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_VPD */ + +#ifdef EFSYS_OPT_WOL +# error "WOL is obsolete and is not supported" +#endif /* EFSYS_OPT_WOL */ + +#ifdef EFSYS_OPT_MCAST_FILTER_LIST +# error "MCAST_FILTER_LIST is obsolete and is not supported" +#endif + +#if EFSYS_OPT_BIST +/* Support BIST */ +# if !(EFX_OPTS_EF10() || EFSYS_OPT_SIENA) +# error "BIST requires EF10 arch or SIENA" +# endif +#endif /* EFSYS_OPT_BIST */ + +#if EFSYS_OPT_LICENSING +/* Support MCDI licensing API */ +# if !EFSYS_OPT_MCDI +# error "LICENSING requires MCDI" +# endif +# if !EFSYS_HAS_UINT64 +# error "LICENSING requires UINT64" +# endif +#endif /* EFSYS_OPT_LICENSING */ + +#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC +/* Support adapters with missing static config (for factory use only) */ +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2" +# endif +#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + +#if EFSYS_OPT_RX_PACKED_STREAM +/* Support packed stream mode */ +# if !EFX_OPTS_EF10() +# error "PACKED_STREAM requires EF10 arch" +# endif +#endif + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +/* Support equal stride super-buffer mode */ +# if !(EFSYS_OPT_MEDFORD2) +# error "ES_SUPER_BUFFER requires MEDFORD2" +# endif +#endif + +/* Support hardware assistance for tunnels */ +#if EFSYS_OPT_TUNNEL +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "TUNNEL requires MEDFORD or MEDFORD2" +# endif +#endif /* EFSYS_OPT_TUNNEL */ + +#if EFSYS_OPT_FW_SUBVARIANT_AWARE +/* Advertise that the driver is firmware subvariant aware */ +# if !(EFSYS_OPT_MEDFORD2) +# error "FW_SUBVARIANT_AWARE requires MEDFORD2" +# endif +#endif + +#if EFSYS_OPT_EVB +/* Support enterprise virtual bridging */ +# if !(EFX_OPTS_EF10()) +# error "EVB requires EF10 arch" +# endif +#endif /* EFSYS_OPT_EVB */ + +#endif /* _SYS_EFX_CHECK_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c new file mode 100644 index 000000000..476d39375 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_crc32.c @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2013-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +/* + * Precomputed table for computing IEEE 802.3 CRC32 + * with polynomial 0x04c11db7 (bit-reversed 0xedb88320) + */ + +static const uint32_t efx_crc32_table[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +/* Calculate the IEEE 802.3 CRC32 of a MAC addr */ + __checkReturn uint32_t +efx_crc32_calculate( + __in uint32_t crc_init, + __in_ecount(length) uint8_t const *input, + __in int length) +{ + int index; + uint32_t crc = crc_init; + + for (index = 0; index < length; index++) { + uint32_t data = *(input++); + crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff]; + } + + return (crc); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c new file mode 100644 index 000000000..e6a8d4ca1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_ev.c @@ -0,0 +1,1486 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#if EFSYS_OPT_MON_MCDI +#include "mcdi_mon.h" +#endif + +#define EFX_EV_PRESENT(_qword) \ + (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ + EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) + + + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_ev_init( + __in efx_nic_t *enp); + +static void +siena_ev_fini( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +siena_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __in efx_evq_t *eep); + +static void +siena_ev_qdestroy( + __in efx_evq_t *eep); + +static __checkReturn efx_rc_t +siena_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count); + +static void +siena_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data); + +static __checkReturn efx_rc_t +siena_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us); + +#if EFSYS_OPT_QSTATS +static void +siena_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); + +#endif + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_SIENA +static const efx_ev_ops_t __efx_ev_siena_ops = { + siena_ev_init, /* eevo_init */ + siena_ev_fini, /* eevo_fini */ + siena_ev_qcreate, /* eevo_qcreate */ + siena_ev_qdestroy, /* eevo_qdestroy */ + siena_ev_qprime, /* eevo_qprime */ + siena_ev_qpost, /* eevo_qpost */ + siena_ev_qmoderate, /* eevo_qmoderate */ +#if EFSYS_OPT_QSTATS + siena_ev_qstats_update, /* eevo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_ev_ops_t __efx_ev_ef10_ops = { + ef10_ev_init, /* eevo_init */ + ef10_ev_fini, /* eevo_fini */ + ef10_ev_qcreate, /* eevo_qcreate */ + ef10_ev_qdestroy, /* eevo_qdestroy */ + ef10_ev_qprime, /* eevo_qprime */ + ef10_ev_qpost, /* eevo_qpost */ + ef10_ev_qmoderate, /* eevo_qmoderate */ +#if EFSYS_OPT_QSTATS + ef10_ev_qstats_update, /* eevo_qstats_update */ +#endif +}; +#endif /* EFX_OPTS_EF10() */ + + + __checkReturn efx_rc_t +efx_ev_init( + __in efx_nic_t *enp) +{ + const efx_ev_ops_t *eevop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + if (enp->en_mod_flags & EFX_MOD_EV) { + rc = EINVAL; + goto fail1; + } + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + eevop = &__efx_ev_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + eevop = &__efx_ev_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + eevop = &__efx_ev_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eevop = &__efx_ev_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); + + if ((rc = eevop->eevo_init(enp)) != 0) + goto fail2; + + enp->en_eevop = eevop; + enp->en_mod_flags |= EFX_MOD_EV; + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_eevop = NULL; + enp->en_mod_flags &= ~EFX_MOD_EV; + return (rc); +} + + __checkReturn size_t +efx_evq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + + return (ndescs * encp->enc_ev_desc_size); +} + + __checkReturn unsigned int +efx_evq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + return (EFX_DIV_ROUND_UP(efx_evq_size(enp, ndescs), EFX_BUF_SIZE)); +} + + void +efx_ev_fini( + __in efx_nic_t *enp) +{ + const efx_ev_ops_t *eevop = enp->en_eevop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); + EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); + + eevop->eevo_fini(enp); + + enp->en_eevop = NULL; + enp->en_mod_flags &= ~EFX_MOD_EV; +} + + + __checkReturn efx_rc_t +efx_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __deref_out efx_evq_t **eepp) +{ + const efx_ev_ops_t *eevop = enp->en_eevop; + efx_evq_t *eep; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); + + EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, + enp->en_nic_cfg.enc_evq_limit); + + switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) { + case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT: + break; + case EFX_EVQ_FLAGS_NOTIFY_DISABLED: + if (us != 0) { + rc = EINVAL; + goto fail1; + } + break; + default: + rc = EINVAL; + goto fail2; + } + + EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs)); + EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs)); + + if (!ISP2(ndescs) || + ndescs < encp->enc_evq_min_nevs || + ndescs > encp->enc_evq_max_nevs) { + rc = EINVAL; + goto fail3; + } + + /* Allocate an EVQ object */ + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); + if (eep == NULL) { + rc = ENOMEM; + goto fail4; + } + + eep->ee_magic = EFX_EVQ_MAGIC; + eep->ee_enp = enp; + eep->ee_index = index; + eep->ee_mask = ndescs - 1; + eep->ee_flags = flags; + eep->ee_esmp = esmp; + + /* + * Set outputs before the queue is created because interrupts may be + * raised for events immediately after the queue is created, before the + * function call below returns. See bug58606. + * + * The eepp pointer passed in by the client must therefore point to data + * shared with the client's event processing context. + */ + enp->en_ev_qcount++; + *eepp = eep; + + if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags, + eep)) != 0) + goto fail5; + + return (0); + +fail5: + EFSYS_PROBE(fail5); + + *eepp = NULL; + enp->en_ev_qcount--; + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_ev_qdestroy( + __in efx_evq_t *eep) +{ + efx_nic_t *enp = eep->ee_enp; + const efx_ev_ops_t *eevop = enp->en_eevop; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + EFSYS_ASSERT(enp->en_ev_qcount != 0); + --enp->en_ev_qcount; + + eevop->eevo_qdestroy(eep); + + /* Free the EVQ object */ + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); +} + + __checkReturn efx_rc_t +efx_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count) +{ + efx_nic_t *enp = eep->ee_enp; + const efx_ev_ops_t *eevop = enp->en_eevop; + efx_rc_t rc; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + if (!(enp->en_mod_flags & EFX_MOD_INTR)) { + rc = EINVAL; + goto fail1; + } + + if ((rc = eevop->eevo_qprime(eep, count)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn boolean_t +efx_ev_qpending( + __in efx_evq_t *eep, + __in unsigned int count) +{ + size_t offset; + efx_qword_t qword; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + offset = (count & eep->ee_mask) * sizeof (efx_qword_t); + EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); + + return (EFX_EV_PRESENT(qword)); +} + +#if EFSYS_OPT_EV_PREFETCH + + void +efx_ev_qprefetch( + __in efx_evq_t *eep, + __in unsigned int count) +{ + unsigned int offset; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + offset = (count & eep->ee_mask) * sizeof (efx_qword_t); + EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); +} + +#endif /* EFSYS_OPT_EV_PREFETCH */ + +#define EFX_EV_BATCH 8 + + void +efx_ev_qpoll( + __in efx_evq_t *eep, + __inout unsigned int *countp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + efx_qword_t ev[EFX_EV_BATCH]; + unsigned int batch; + unsigned int total; + unsigned int count; + unsigned int index; + size_t offset; + + /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */ + EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); + EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); + + EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); + EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); + EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); + EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == + FSE_AZ_EV_CODE_DRV_GEN_EV); +#if EFSYS_OPT_MCDI + EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == + FSE_AZ_EV_CODE_MCDI_EVRESPONSE); +#endif + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + EFSYS_ASSERT(countp != NULL); + EFSYS_ASSERT(eecp != NULL); + + count = *countp; + do { + /* Read up until the end of the batch period */ + batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); + offset = (count & eep->ee_mask) * sizeof (efx_qword_t); + for (total = 0; total < batch; ++total) { + EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); + + if (!EFX_EV_PRESENT(ev[total])) + break; + + EFSYS_PROBE3(event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); + + offset += sizeof (efx_qword_t); + } + +#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) + /* + * Prefetch the next batch when we get within PREFETCH_PERIOD + * of a completed batch. If the batch is smaller, then prefetch + * immediately. + */ + if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) + EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); +#endif /* EFSYS_OPT_EV_PREFETCH */ + + /* Process the batch of events */ + for (index = 0; index < total; ++index) { + boolean_t should_abort; + uint32_t code; + +#if EFSYS_OPT_EV_PREFETCH + /* Prefetch if we've now reached the batch period */ + if (total == batch && + index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { + offset = (count + batch) & eep->ee_mask; + offset *= sizeof (efx_qword_t); + + EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); + } +#endif /* EFSYS_OPT_EV_PREFETCH */ + + EFX_EV_QSTAT_INCR(eep, EV_ALL); + + code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); + switch (code) { + case FSE_AZ_EV_CODE_RX_EV: + should_abort = eep->ee_rx(eep, + &(ev[index]), eecp, arg); + break; + case FSE_AZ_EV_CODE_TX_EV: + should_abort = eep->ee_tx(eep, + &(ev[index]), eecp, arg); + break; + case FSE_AZ_EV_CODE_DRIVER_EV: + should_abort = eep->ee_driver(eep, + &(ev[index]), eecp, arg); + break; + case FSE_AZ_EV_CODE_DRV_GEN_EV: + should_abort = eep->ee_drv_gen(eep, + &(ev[index]), eecp, arg); + break; +#if EFSYS_OPT_MCDI + case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: + should_abort = eep->ee_mcdi(eep, + &(ev[index]), eecp, arg); + break; +#endif + case FSE_AZ_EV_CODE_GLOBAL_EV: + if (eep->ee_global) { + should_abort = eep->ee_global(eep, + &(ev[index]), eecp, arg); + break; + } + /* else fallthrough */ + default: + EFSYS_PROBE3(bad_event, + unsigned int, eep->ee_index, + uint32_t, + EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), + uint32_t, + EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); + + EFSYS_ASSERT(eecp->eec_exception != NULL); + (void) eecp->eec_exception(arg, + EFX_EXCEPTION_EV_ERROR, code); + should_abort = B_TRUE; + } + if (should_abort) { + /* Ignore subsequent events */ + total = index + 1; + + /* + * Poison batch to ensure the outer + * loop is broken out of. + */ + EFSYS_ASSERT(batch <= EFX_EV_BATCH); + batch += (EFX_EV_BATCH << 1); + EFSYS_ASSERT(total != batch); + break; + } + } + + /* + * Now that the hardware has most likely moved onto dma'ing + * into the next cache line, clear the processed events. Take + * care to only clear out events that we've processed + */ + EFX_SET_QWORD(ev[0]); + offset = (count & eep->ee_mask) * sizeof (efx_qword_t); + for (index = 0; index < total; ++index) { + EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); + offset += sizeof (efx_qword_t); + } + + count += total; + + } while (total == batch); + + *countp = count; +} + + void +efx_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data) +{ + efx_nic_t *enp = eep->ee_enp; + const efx_ev_ops_t *eevop = enp->en_eevop; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + EFSYS_ASSERT(eevop != NULL && + eevop->eevo_qpost != NULL); + + eevop->eevo_qpost(eep, data); +} + + __checkReturn efx_rc_t +efx_ev_usecs_to_ticks( + __in efx_nic_t *enp, + __in unsigned int us, + __out unsigned int *ticksp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + unsigned int ticks; + efx_rc_t rc; + + if (encp->enc_evq_timer_quantum_ns == 0) { + rc = ENOTSUP; + goto fail1; + } + + /* Convert microseconds to a timer tick count */ + if (us == 0) + ticks = 0; + else if (us * 1000 < encp->enc_evq_timer_quantum_ns) + ticks = 1; /* Never round down to zero */ + else + ticks = us * 1000 / encp->enc_evq_timer_quantum_ns; + + *ticksp = ticks; + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us) +{ + efx_nic_t *enp = eep->ee_enp; + const efx_ev_ops_t *eevop = enp->en_eevop; + efx_rc_t rc; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == + EFX_EVQ_FLAGS_NOTIFY_DISABLED) { + rc = EINVAL; + goto fail1; + } + + if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +#if EFSYS_OPT_QSTATS + void +efx_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) + +{ efx_nic_t *enp = eep->ee_enp; + const efx_ev_ops_t *eevop = enp->en_eevop; + + EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); + + eevop->eevo_qstats_update(eep, stat); +} + +#endif /* EFSYS_OPT_QSTATS */ + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_ev_init( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + /* + * Program the event queue for receive and transmit queue + * flush events. + */ + EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); + EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); + + return (0); + +} + +static __checkReturn boolean_t +siena_ev_rx_not_ok( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in uint32_t label, + __in uint32_t id, + __inout uint16_t *flagsp) +{ + boolean_t ignore = B_FALSE; + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); + EFSYS_PROBE(tobe_disc); + /* + * Assume this is a unicast address mismatch, unless below + * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or + * EV_RX_PAUSE_FRM_ERR is set. + */ + (*flagsp) |= EFX_ADDR_MISMATCH; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { + EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); + EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); + (*flagsp) |= EFX_DISCARD; + +#if EFSYS_OPT_RX_SCATTER + /* + * Lookout for payload queue ran dry errors and ignore them. + * + * Sadly for the header/data split cases, the descriptor + * pointer in this event refers to the header queue and + * therefore cannot be easily detected as duplicate. + * So we drop these and rely on the receive processing seeing + * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard + * the partially received packet. + */ + if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && + (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && + (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) + ignore = B_TRUE; +#endif /* EFSYS_OPT_RX_SCATTER */ + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); + EFSYS_PROBE(crc_err); + (*flagsp) &= ~EFX_ADDR_MISMATCH; + (*flagsp) |= EFX_DISCARD; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); + EFSYS_PROBE(pause_frm_err); + (*flagsp) &= ~EFX_ADDR_MISMATCH; + (*flagsp) |= EFX_DISCARD; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); + EFSYS_PROBE(owner_id_err); + (*flagsp) |= EFX_DISCARD; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); + EFSYS_PROBE(ipv4_err); + (*flagsp) &= ~EFX_CKSUM_IPV4; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); + EFSYS_PROBE(udp_chk_err); + (*flagsp) &= ~EFX_CKSUM_TCPUDP; + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); + + /* + * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This + * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error + * condition. + */ + (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); + } + + return (ignore); +} + +static __checkReturn boolean_t +siena_ev_rx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t id; + uint32_t size; + uint32_t label; + boolean_t ok; +#if EFSYS_OPT_RX_SCATTER + boolean_t sop; + boolean_t jumbo_cont; +#endif /* EFSYS_OPT_RX_SCATTER */ + uint32_t hdr_type; + boolean_t is_v6; + uint16_t flags; + boolean_t ignore; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_RX); + + /* Basic packet information */ + id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); + size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); + label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); + ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); + +#if EFSYS_OPT_RX_SCATTER + sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); + jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); +#endif /* EFSYS_OPT_RX_SCATTER */ + + hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); + + is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); + + /* + * If packet is marked as OK and packet type is TCP/IP or + * UDP/IP or other IP, then we can rely on the hardware checksums. + */ + switch (hdr_type) { + case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; + if (is_v6) { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); + flags |= EFX_PKT_IPV6; + } else { + EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); + flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; + } + break; + + case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; + if (is_v6) { + EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); + flags |= EFX_PKT_IPV6; + } else { + EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); + flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; + } + break; + + case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + if (is_v6) { + EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); + flags = EFX_PKT_IPV6; + } else { + EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); + flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; + } + break; + + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); + flags = 0; + break; + + default: + EFSYS_ASSERT(B_FALSE); + flags = 0; + break; + } + +#if EFSYS_OPT_RX_SCATTER + /* Report scatter and header/lookahead split buffer flags */ + if (sop) + flags |= EFX_PKT_START; + if (jumbo_cont) + flags |= EFX_PKT_CONT; +#endif /* EFSYS_OPT_RX_SCATTER */ + + /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ + if (!ok) { + ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags); + if (ignore) { + EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, + uint32_t, size, uint16_t, flags); + + return (B_FALSE); + } + } + + /* If we're not discarding the packet then it is ok */ + if (~flags & EFX_DISCARD) + EFX_EV_QSTAT_INCR(eep, EV_RX_OK); + + /* Detect multicast packets that didn't match the filter */ + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { + EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); + } else { + EFSYS_PROBE(mcast_mismatch); + flags |= EFX_ADDR_MISMATCH; + } + } else { + flags |= EFX_PKT_UNICAST; + } + + /* + * The packet parser in Siena can abort parsing packets under + * certain error conditions, setting the PKT_NOT_PARSED bit + * (which clears PKT_OK). If this is set, then don't trust + * the PKT_TYPE field. + */ + if (!ok) { + uint32_t parse_err; + + parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); + if (parse_err != 0) + flags |= EFX_CHECK_VLAN; + } + + if (~flags & EFX_CHECK_VLAN) { + uint32_t pkt_type; + + pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); + if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) + flags |= EFX_PKT_VLAN_TAGGED; + } + + EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, + uint32_t, size, uint16_t, flags); + + EFSYS_ASSERT(eecp->eec_rx != NULL); + should_abort = eecp->eec_rx(arg, label, id, size, flags); + + return (should_abort); +} + +static __checkReturn boolean_t +siena_ev_tx( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t id; + uint32_t label; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_TX); + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && + EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && + EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && + EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { + + id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); + label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); + + EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); + + EFSYS_ASSERT(eecp->eec_tx != NULL); + should_abort = eecp->eec_tx(arg, label, id); + + return (should_abort); + } + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) + EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) + EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) + EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); + + if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) + EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); + + EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); + return (B_FALSE); +} + +static __checkReturn boolean_t +siena_ev_global( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + _NOTE(ARGUNUSED(eqp, eecp, arg)) + + EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); + + return (B_FALSE); +} + +static __checkReturn boolean_t +siena_ev_driver( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER); + should_abort = B_FALSE; + + switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { + case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { + uint32_t txq_index; + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); + + txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); + + EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); + + EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); + should_abort = eecp->eec_txq_flush_done(arg, txq_index); + + break; + } + case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { + uint32_t rxq_index; + uint32_t failed; + + rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + + EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); + EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); + + if (failed) { + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); + + EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); + + should_abort = eecp->eec_rxq_flush_failed(arg, + rxq_index); + } else { + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); + + EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); + + should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); + } + + break; + } + case FSE_AZ_EVQ_INIT_DONE_EV: + EFSYS_ASSERT(eecp->eec_initialized != NULL); + should_abort = eecp->eec_initialized(arg); + + break; + + case FSE_AZ_EVQ_NOT_EN_EV: + EFSYS_PROBE(evq_not_en); + break; + + case FSE_AZ_SRM_UPD_DONE_EV: { + uint32_t code; + + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); + + code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); + + EFSYS_ASSERT(eecp->eec_sram != NULL); + should_abort = eecp->eec_sram(arg, code); + + break; + } + case FSE_AZ_WAKE_UP_EV: { + uint32_t id; + + id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); + + EFSYS_ASSERT(eecp->eec_wake_up != NULL); + should_abort = eecp->eec_wake_up(arg, id); + + break; + } + case FSE_AZ_TX_PKT_NON_TCP_UDP: + EFSYS_PROBE(tx_pkt_non_tcp_udp); + break; + + case FSE_AZ_TIMER_EV: { + uint32_t id; + + id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); + + EFSYS_ASSERT(eecp->eec_timer != NULL); + should_abort = eecp->eec_timer(arg, id); + + break; + } + case FSE_AZ_RX_DSC_ERROR_EV: + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); + + EFSYS_PROBE(rx_dsc_error); + + EFSYS_ASSERT(eecp->eec_exception != NULL); + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_RX_DSC_ERROR, 0); + + break; + + case FSE_AZ_TX_DSC_ERROR_EV: + EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); + + EFSYS_PROBE(tx_dsc_error); + + EFSYS_ASSERT(eecp->eec_exception != NULL); + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_TX_DSC_ERROR, 0); + + break; + + default: + break; + } + + return (should_abort); +} + +static __checkReturn boolean_t +siena_ev_drv_gen( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + uint32_t data; + boolean_t should_abort; + + EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); + + data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); + if (data >= ((uint32_t)1 << 16)) { + EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), + uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); + return (B_TRUE); + } + + EFSYS_ASSERT(eecp->eec_software != NULL); + should_abort = eecp->eec_software(arg, (uint16_t)data); + + return (should_abort); +} + +#if EFSYS_OPT_MCDI + +static __checkReturn boolean_t +siena_ev_mcdi( + __in efx_evq_t *eep, + __in efx_qword_t *eqp, + __in const efx_ev_callbacks_t *eecp, + __in_opt void *arg) +{ + efx_nic_t *enp = eep->ee_enp; + unsigned int code; + boolean_t should_abort = B_FALSE; + + EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); + + if (enp->en_family != EFX_FAMILY_SIENA) + goto out; + + EFSYS_ASSERT(eecp->eec_link_change != NULL); + EFSYS_ASSERT(eecp->eec_exception != NULL); +#if EFSYS_OPT_MON_STATS + EFSYS_ASSERT(eecp->eec_monitor != NULL); +#endif + + EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); + + code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + efx_mcdi_ev_death(enp, EINTR); + break; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(enp, + MCDI_EV_FIELD(eqp, CMDDONE_SEQ), + MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), + MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); + break; + + case MCDI_EVENT_CODE_LINKCHANGE: { + efx_link_mode_t link_mode; + + siena_phy_link_ev(enp, eqp, &link_mode); + should_abort = eecp->eec_link_change(arg, link_mode); + break; + } + case MCDI_EVENT_CODE_SENSOREVT: { +#if EFSYS_OPT_MON_STATS + efx_mon_stat_t id; + efx_mon_stat_value_t value; + efx_rc_t rc; + + if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) + should_abort = eecp->eec_monitor(arg, id, value); + else if (rc == ENOTSUP) { + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_UNKNOWN_SENSOREVT, + MCDI_EV_FIELD(eqp, DATA)); + } else + EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ +#else + should_abort = B_FALSE; +#endif + break; + } + case MCDI_EVENT_CODE_SCHEDERR: + /* Informational only */ + break; + + case MCDI_EVENT_CODE_REBOOT: + efx_mcdi_ev_death(enp, EIO); + break; + + case MCDI_EVENT_CODE_MAC_STATS_DMA: +#if EFSYS_OPT_MAC_STATS + if (eecp->eec_mac_stats != NULL) { + eecp->eec_mac_stats(arg, + MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); + } +#endif + break; + + case MCDI_EVENT_CODE_FWALERT: { + uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); + + if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_FWALERT_SRAM, + MCDI_EV_FIELD(eqp, FWALERT_DATA)); + else + should_abort = eecp->eec_exception(arg, + EFX_EXCEPTION_UNKNOWN_FWALERT, + MCDI_EV_FIELD(eqp, DATA)); + break; + } + + default: + EFSYS_PROBE1(mc_pcol_error, int, code); + break; + } + +out: + return (should_abort); +} + +#endif /* EFSYS_OPT_MCDI */ + +static __checkReturn efx_rc_t +siena_ev_qprime( + __in efx_evq_t *eep, + __in unsigned int count) +{ + efx_nic_t *enp = eep->ee_enp; + uint32_t rptr; + efx_dword_t dword; + + rptr = count & eep->ee_mask; + + EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); + + EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, + &dword, B_FALSE); + + return (0); +} + +static void +siena_ev_qpost( + __in efx_evq_t *eep, + __in uint16_t data) +{ + efx_nic_t *enp = eep->ee_enp; + efx_qword_t ev; + efx_oword_t oword; + + EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_EV_DATA_DW0, (uint32_t)data); + + EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, + EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), + EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); + + EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); +} + +static __checkReturn efx_rc_t +siena_ev_qmoderate( + __in efx_evq_t *eep, + __in unsigned int us) +{ + efx_nic_t *enp = eep->ee_enp; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + unsigned int locked; + efx_dword_t dword; + efx_rc_t rc; + + if (us > encp->enc_evq_timer_max_us) { + rc = EINVAL; + goto fail1; + } + + /* If the value is zero then disable the timer */ + if (us == 0) { + EFX_POPULATE_DWORD_2(dword, + FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, + FRF_CZ_TC_TIMER_VAL, 0); + } else { + unsigned int ticks; + + if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) + goto fail2; + + EFSYS_ASSERT(ticks > 0); + EFX_POPULATE_DWORD_2(dword, + FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, + FRF_CZ_TC_TIMER_VAL, ticks - 1); + } + + locked = (eep->ee_index == 0) ? 1 : 0; + + EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, + eep->ee_index, &dword, locked); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +siena_ev_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint32_t us, + __in uint32_t flags, + __in efx_evq_t *eep) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t size; + efx_oword_t oword; + efx_rc_t rc; + boolean_t notify_mode; + + _NOTE(ARGUNUSED(esmp)) + + if (index >= encp->enc_evq_limit) { + rc = EINVAL; + goto fail1; + } +#if EFSYS_OPT_RX_SCALE + if (enp->en_intr.ei_type == EFX_INTR_LINE && + index >= EFX_MAXRSS_LEGACY) { + rc = EINVAL; + goto fail2; + } +#endif + for (size = 0; + (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs; + size++) + if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs) + break; + if (id + (1 << size) >= encp->enc_buftbl_limit) { + rc = EINVAL; + goto fail3; + } + + /* Set up the handler table */ + eep->ee_rx = siena_ev_rx; + eep->ee_tx = siena_ev_tx; + eep->ee_driver = siena_ev_driver; + eep->ee_global = siena_ev_global; + eep->ee_drv_gen = siena_ev_drv_gen; +#if EFSYS_OPT_MCDI + eep->ee_mcdi = siena_ev_mcdi; +#endif /* EFSYS_OPT_MCDI */ + + notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) != + EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); + + /* Set up the new event queue */ + EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, notify_mode, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); + + EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, + FRF_AZ_EVQ_BUF_BASE_ID, id); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); + + /* Set initial interrupt moderation */ + siena_ev_qmoderate(eep, us); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +#if EFSYS_OPT_RX_SCALE +fail2: + EFSYS_PROBE(fail2); +#endif +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_QSTATS +#if EFSYS_OPT_NAMES +/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */ +static const char * const __efx_ev_qstat_name[] = { + "all", + "rx", + "rx_ok", + "rx_frm_trunc", + "rx_tobe_disc", + "rx_pause_frm_err", + "rx_buf_owner_id_err", + "rx_ipv4_hdr_chksum_err", + "rx_tcp_udp_chksum_err", + "rx_eth_crc_err", + "rx_ip_frag_err", + "rx_mcast_pkt", + "rx_mcast_hash_match", + "rx_tcp_ipv4", + "rx_tcp_ipv6", + "rx_udp_ipv4", + "rx_udp_ipv6", + "rx_other_ipv4", + "rx_other_ipv6", + "rx_non_ip", + "rx_batch", + "tx", + "tx_wq_ff_full", + "tx_pkt_err", + "tx_pkt_too_big", + "tx_unexpected", + "global", + "global_mnt", + "driver", + "driver_srm_upd_done", + "driver_tx_descq_fls_done", + "driver_rx_descq_fls_done", + "driver_rx_descq_fls_failed", + "driver_rx_dsc_error", + "driver_tx_dsc_error", + "drv_gen", + "mcdi_response", + "rx_parse_incomplete", +}; +/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ + + const char * +efx_ev_qstat_name( + __in efx_nic_t *enp, + __in unsigned int id) +{ + _NOTE(ARGUNUSED(enp)) + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(id, <, EV_NQSTATS); + + return (__efx_ev_qstat_name[id]); +} +#endif /* EFSYS_OPT_NAMES */ +#endif /* EFSYS_OPT_QSTATS */ + +#if EFSYS_OPT_SIENA + +#if EFSYS_OPT_QSTATS +static void +siena_ev_qstats_update( + __in efx_evq_t *eep, + __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) +{ + unsigned int id; + + for (id = 0; id < EV_NQSTATS; id++) { + efsys_stat_t *essp = &stat[id]; + + EFSYS_STAT_INCR(essp, eep->ee_stat[id]); + eep->ee_stat[id] = 0; + } +} +#endif /* EFSYS_OPT_QSTATS */ + +static void +siena_ev_qdestroy( + __in efx_evq_t *eep) +{ + efx_nic_t *enp = eep->ee_enp; + efx_oword_t oword; + + /* Purge event queue */ + EFX_ZERO_OWORD(oword); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, + eep->ee_index, &oword, B_TRUE); + + EFX_ZERO_OWORD(oword); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); +} + +static void +siena_ev_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_evb.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_evb.c new file mode 100644 index 000000000..17318b7e1 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_evb.c @@ -0,0 +1,544 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_EVB + +#if EFSYS_OPT_SIENA +static const efx_evb_ops_t __efx_evb_dummy_ops = { + NULL, /* eeo_init */ + NULL, /* eeo_fini */ + NULL, /* eeo_vswitch_alloc */ + NULL, /* eeo_vswitch_free */ + NULL, /* eeo_vport_alloc */ + NULL, /* eeo_vport_free */ + NULL, /* eeo_vport_mac_addr_add */ + NULL, /* eeo_vport_mac_addr_del */ + NULL, /* eeo_vadaptor_alloc */ + NULL, /* eeo_vadaptor_free */ + NULL, /* eeo_vport_assign */ + NULL, /* eeo_vport_reconfigure */ + NULL, /* eeo_vport_stats */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_evb_ops_t __efx_evb_ef10_ops = { + ef10_evb_init, /* eeo_init */ + ef10_evb_fini, /* eeo_fini */ + ef10_evb_vswitch_alloc, /* eeo_vswitch_alloc */ + ef10_evb_vswitch_free, /* eeo_vswitch_free */ + ef10_evb_vport_alloc, /* eeo_vport_alloc */ + ef10_evb_vport_free, /* eeo_vport_free */ + ef10_evb_vport_mac_addr_add, /* eeo_vport_mac_addr_add */ + ef10_evb_vport_mac_addr_del, /* eeo_vport_mac_addr_del */ + ef10_evb_vadaptor_alloc, /* eeo_vadaptor_alloc */ + ef10_evb_vadaptor_free, /* eeo_vadaptor_free */ + ef10_evb_vport_assign, /* eeo_vport_assign */ + ef10_evb_vport_reconfigure, /* eeo_vport_reconfigure */ + ef10_evb_vport_stats, /* eeo_vport_stats */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_evb_init( + __in efx_nic_t *enp) +{ + const efx_evb_ops_t *eeop; + efx_rc_t rc; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EVB)); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + eeop = &__efx_evb_dummy_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + eeop = &__efx_evb_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + eeop = &__efx_evb_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eeop = &__efx_evb_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + if (!encp->enc_datapath_cap_evb || !eeop->eeo_init) { + rc = ENOTSUP; + goto fail2; + } + + if ((rc = eeop->eeo_init(enp)) != 0) + goto fail3; + + enp->en_eeop = eeop; + enp->en_mod_flags |= EFX_MOD_EVB; + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_evb_fini( + __in efx_nic_t *enp) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); + + if (eeop && eeop->eeo_fini) + eeop->eeo_fini(enp); + + enp->en_eeop = NULL; + enp->en_mod_flags &= ~EFX_MOD_EVB; +} + +/* + * efx_is_zero_eth_addr returns TRUE if the passed MAC address has all bytes + * equal to zero. A vport is assigned a MAC address after creation and this + * function checks if that has happened. It is called in the clean-up function + * before calling eeo_vport_mac_addr_del to ensure that the vport actually had + * an allocated MAC address. + */ + +__checkReturn boolean_t +efx_is_zero_eth_addr( + __in_bcount(EFX_MAC_ADDR_LEN) const uint8_t *addrp) +{ + return (!(addrp[0] | addrp[1] | addrp[2] | + addrp[3] | addrp[4] | addrp[5])); +} + +static void +efx_evb_free_vport( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __inout efx_vport_config_t *configp) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + + /* If any callback fails, continue clean-up with others functions */ + if (EFX_VPORT_PCI_FUNCTION_IS_PF(configp)) { + /* free vadaptor */ + if ((configp->evc_vport_id != EFX_VPORT_ID_INVALID) && + (eeop->eeo_vadaptor_free(enp, vswitch_id, + configp->evc_vport_id) != 0)) { + EFSYS_PROBE2(eeo_vadaptor_free, + uint16_t, configp->evc_function, + uint32_t, configp->evc_vport_id); + } + } else { + if (configp->evc_vport_assigned == B_TRUE) { + if (eeop->eeo_vport_assign(enp, vswitch_id, + EVB_PORT_ID_NULL, + configp->evc_function) != 0) { + EFSYS_PROBE1(eeo_vport_assign, + uint16_t, configp->evc_function); + } + configp->evc_vport_assigned = B_FALSE; + } + } + + /* + * Call eeo_vport_mac_addr_del after checking that this vport is + * actually allocated a MAC address in call to efx_evb_configure_vport + */ + if (!efx_is_zero_eth_addr(configp->evc_mac_addr)) { + if (eeop->eeo_vport_mac_addr_del(enp, vswitch_id, + configp->evc_vport_id, + configp->evc_mac_addr) != 0) { + EFSYS_PROBE1(eeo_vport_mac_addr_del, + uint16_t, configp->evc_function); + } + memset(configp->evc_mac_addr, 0x00, EFX_MAC_ADDR_LEN); + } + + if (configp->evc_vport_id != EFX_VPORT_ID_INVALID) { + if (eeop->eeo_vport_free(enp, vswitch_id, + configp->evc_vport_id) != 0) { + EFSYS_PROBE1(eeo_vport_free, + uint16_t, configp->evc_function); + } + configp->evc_vport_id = EFX_VPORT_ID_INVALID; + } +} + +static void +efx_evb_free_vports( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in uint32_t num_vports, + __inout_ecount(num_vports) efx_vport_config_t *vport_configp) +{ + efx_vport_config_t *configp; + uint32_t i; + + if (vport_configp == NULL) { + EFSYS_PROBE(null_vport_config); + return; + } + + for (i = 0; i < num_vports; i++) { + configp = vport_configp + i; + efx_evb_free_vport(enp, vswitch_id, configp); + } +} + +static __checkReturn efx_rc_t +efx_evb_configure_vport( + __in efx_nic_t *enp, + __in efx_vswitch_id_t vswitch_id, + __in const efx_evb_ops_t *eeop, + __inout efx_vport_config_t *configp) +{ + efx_rc_t rc; + efx_vport_id_t vport_id; + + if ((rc = eeop->eeo_vport_alloc(enp, vswitch_id, + EFX_VPORT_TYPE_NORMAL, configp->evc_vid, + configp->evc_vlan_restrict, &vport_id)) != 0) + goto fail1; + + configp->evc_vport_id = vport_id; + + if ((rc = eeop->eeo_vport_mac_addr_add(enp, vswitch_id, + configp->evc_vport_id, + configp->evc_mac_addr)) != 0) + goto fail2; + + if (EFX_VPORT_PCI_FUNCTION_IS_PF(configp)) { + if ((rc = eeop->eeo_vadaptor_alloc(enp, vswitch_id, + configp->evc_vport_id)) != 0) + goto fail3; + } else { + if ((rc = eeop->eeo_vport_assign(enp, vswitch_id, + configp->evc_vport_id, + configp->evc_function)) != 0) + goto fail4; + configp->evc_vport_assigned = B_TRUE; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_evb_vswitch_create( + __in efx_nic_t *enp, + __in uint32_t num_vports, + __inout_ecount(num_vports) efx_vport_config_t *vport_configp, + __deref_out efx_vswitch_t **evpp) +{ + efx_vswitch_t *evp; + efx_rc_t rc; + efx_vswitch_id_t vswitch_id; + efx_vport_config_t *configp; + const efx_evb_ops_t *eeop = enp->en_eeop; + uint32_t i; + + /* vport_configp is a caller allocated array filled in with vports + * configuration. Index 0 carries the PF vport configuration and next + * num_vports - 1 indices carry VFs configuration. + */ + EFSYS_ASSERT((num_vports != 0) && (vport_configp != NULL) && + (evpp != NULL)); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); + + if ((eeop->eeo_vswitch_alloc == NULL) || + (eeop->eeo_vport_alloc == NULL) || + (eeop->eeo_vport_free == NULL) || + (eeop->eeo_vport_mac_addr_add == NULL) || + (eeop->eeo_vport_mac_addr_del == NULL) || + (eeop->eeo_vadaptor_alloc == NULL) || + (eeop->eeo_vadaptor_free == NULL) || + (eeop->eeo_vport_assign == NULL) || + (eeop->eeo_vswitch_free == NULL)) { + rc = ENOTSUP; + goto fail1; + } + + /* Allocate a vSwitch object */ + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_vswitch_t), evp); + + if (evp == NULL) { + rc = ENOMEM; + goto fail2; + } + + if ((rc = eeop->eeo_vswitch_alloc(enp, &vswitch_id)) != 0) + goto fail3; + + evp->ev_enp = enp; + evp->ev_num_vports = num_vports; + evp->ev_evcp = vport_configp; + evp->ev_vswitch_id = vswitch_id; + + for (i = 0; i < num_vports; i++) { + configp = vport_configp + i; + + if ((rc = efx_evb_configure_vport(enp, vswitch_id, eeop, + configp)) != 0) + goto fail4; + } + + enp->en_vswitchp = evp; + *evpp = evp; + return (0); + +fail4: + EFSYS_PROBE(fail4); + efx_evb_free_vports(enp, vswitch_id, i + 1, vport_configp); + /* Free the vSwitch */ + eeop->eeo_vswitch_free(enp, vswitch_id); + +fail3: + EFSYS_PROBE(fail3); + /* Free the vSwitch object */ + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_vswitch_t), evp); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_evb_vport_mac_set( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + + if (eeop->eeo_vport_reconfigure == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if (addrp == NULL) { + rc = EINVAL; + goto fail2; + } + + rc = eeop->eeo_vport_reconfigure(enp, evp->ev_vswitch_id, vport_id, + NULL, addrp, NULL); + if (rc != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_evb_vport_vlan_set( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in uint16_t vid) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + + if (eeop->eeo_vport_reconfigure == NULL) { + rc = ENOTSUP; + goto fail1; + } + + rc = eeop->eeo_vport_reconfigure(enp, evp->ev_vswitch_id, vport_id, + &vid, NULL, NULL); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_evb_vport_reset( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __in_bcount(EFX_MAC_ADDR_LEN) uint8_t *addrp, + __in uint16_t vid, + __out boolean_t *is_fn_resetp) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + + if (eeop->eeo_vport_reconfigure == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if (is_fn_resetp == NULL) { + rc = EINVAL; + goto fail2; + } + + rc = eeop->eeo_vport_reconfigure(enp, evp->ev_vswitch_id, vport_id, + &vid, addrp, is_fn_resetp); + if (rc != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + __checkReturn efx_rc_t +efx_evb_vswitch_destroy( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp) +{ + const efx_evb_ops_t *eeop = enp->en_eeop; + efx_vswitch_id_t vswitch_id; + efx_rc_t rc; + + EFSYS_ASSERT(evp != NULL); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + + if ((eeop->eeo_vport_mac_addr_del == NULL) || + (eeop->eeo_vadaptor_free == NULL) || + (eeop->eeo_vport_assign == NULL) || + (eeop->eeo_vport_free == NULL) || + (eeop->eeo_vswitch_free == NULL)) { + rc = ENOTSUP; + goto fail1; + } + + vswitch_id = evp->ev_vswitch_id; + efx_evb_free_vports(enp, vswitch_id, + evp->ev_num_vports, evp->ev_evcp); + + /* Free the vSwitch object */ + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_vswitch_t), evp); + enp->en_vswitchp = NULL; + + /* Free the vSwitch */ + if ((rc = eeop->eeo_vswitch_free(enp, vswitch_id)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_evb_vport_stats( + __in efx_nic_t *enp, + __in efx_vswitch_t *evp, + __in efx_vport_id_t vport_id, + __out efsys_mem_t *stats_bufferp) +{ + efx_rc_t rc; + const efx_evb_ops_t *eeop = enp->en_eeop; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_EVB); + + if (eeop->eeo_vport_stats == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if (stats_bufferp == NULL) { + rc = EINVAL; + goto fail2; + } + + rc = eeop->eeo_vport_stats(enp, evp->ev_vswitch_id, + vport_id, stats_bufferp); + if (rc != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c new file mode 100644 index 000000000..3310b738e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_filter.c @@ -0,0 +1,1638 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_FILTER + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_filter_init( + __in efx_nic_t *enp); + +static void +siena_filter_fini( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +siena_filter_restore( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +siena_filter_add( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec, + __in efx_filter_replacement_policy_t policy); + +static __checkReturn efx_rc_t +siena_filter_delete( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec); + +static __checkReturn efx_rc_t +siena_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp); + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_SIENA +static const efx_filter_ops_t __efx_filter_siena_ops = { + siena_filter_init, /* efo_init */ + siena_filter_fini, /* efo_fini */ + siena_filter_restore, /* efo_restore */ + siena_filter_add, /* efo_add */ + siena_filter_delete, /* efo_delete */ + siena_filter_supported_filters, /* efo_supported_filters */ + NULL, /* efo_reconfigure */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_filter_ops_t __efx_filter_ef10_ops = { + ef10_filter_init, /* efo_init */ + ef10_filter_fini, /* efo_fini */ + ef10_filter_restore, /* efo_restore */ + ef10_filter_add, /* efo_add */ + ef10_filter_delete, /* efo_delete */ + ef10_filter_supported_filters, /* efo_supported_filters */ + ef10_filter_reconfigure, /* efo_reconfigure */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_filter_insert( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec) +{ + const efx_filter_ops_t *efop = enp->en_efop; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); + + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + !encp->enc_filter_action_mark_supported) { + rc = ENOTSUP; + goto fail1; + } + + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) && + !encp->enc_filter_action_flag_supported) { + rc = ENOTSUP; + goto fail2; + } + + if (spec->efs_priority == EFX_FILTER_PRI_AUTO) { + rc = EINVAL; + goto fail3; + } + + return (efop->efo_add(enp, spec, + EFX_FILTER_REPLACEMENT_HIGHER_PRIORITY)); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_filter_remove( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec) +{ + const efx_filter_ops_t *efop = enp->en_efop; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); + + return (efop->efo_delete(enp, spec)); +} + + __checkReturn efx_rc_t +efx_filter_restore( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + + if ((rc = enp->en_efop->efo_restore(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_filter_init( + __in efx_nic_t *enp) +{ + const efx_filter_ops_t *efop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER)); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + efop = &__efx_filter_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + efop = &__efx_filter_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + efop = &__efx_filter_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + efop = &__efx_filter_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + if ((rc = efop->efo_init(enp)) != 0) + goto fail2; + + enp->en_efop = efop; + enp->en_mod_flags |= EFX_MOD_FILTER; + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_efop = NULL; + enp->en_mod_flags &= ~EFX_MOD_FILTER; + return (rc); +} + + void +efx_filter_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + + enp->en_efop->efo_fini(enp); + + enp->en_efop = NULL; + enp->en_mod_flags &= ~EFX_MOD_FILTER; +} + +/* + * Query the possible combinations of match flags which can be filtered on. + * These are returned as a list, of which each 32 bit element is a bitmask + * formed of EFX_FILTER_MATCH flags. + * + * The combinations are ordered in priority from highest to lowest. + * + * If the provided buffer is too short to hold the list, the call with fail with + * ENOSPC and *list_lengthp will be set to the buffer length required. + */ + __checkReturn efx_rc_t +efx_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL); + + if (buffer == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = enp->en_efop->efo_supported_filters(enp, buffer, buffer_length, + list_lengthp); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_filter_reconfigure( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *mac_addr, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); + + if (enp->en_efop->efo_reconfigure != NULL) { + if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr, + all_unicst, mulcst, + all_mulcst, brdcst, + addrs, count)) != 0) + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_filter_spec_init_rx( + __out efx_filter_spec_t *spec, + __in efx_filter_priority_t priority, + __in efx_filter_flags_t flags, + __in efx_rxq_t *erp) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(erp, !=, NULL); + EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS | + EFX_FILTER_FLAG_RX_SCATTER)) == 0); + + memset(spec, 0, sizeof (*spec)); + spec->efs_priority = priority; + spec->efs_flags = EFX_FILTER_FLAG_RX | flags; + spec->efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + spec->efs_dmaq_id = (uint16_t)erp->er_index; +} + + void +efx_filter_spec_init_tx( + __out efx_filter_spec_t *spec, + __in efx_txq_t *etp) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(etp, !=, NULL); + + memset(spec, 0, sizeof (*spec)); + spec->efs_priority = EFX_FILTER_PRI_MANUAL; + spec->efs_flags = EFX_FILTER_FLAG_TX; + spec->efs_dmaq_id = (uint16_t)etp->et_index; +} + + +/* + * Specify IPv4 host, transport protocol and port in a filter specification + */ +__checkReturn efx_rc_t +efx_filter_spec_set_ipv4_local( + __inout efx_filter_spec_t *spec, + __in uint8_t proto, + __in uint32_t host, + __in uint16_t port) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + + spec->efs_match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; + spec->efs_ip_proto = proto; + spec->efs_loc_host.eo_u32[0] = host; + spec->efs_loc_port = port; + return (0); +} + +/* + * Specify IPv4 hosts, transport protocol and ports in a filter specification + */ +__checkReturn efx_rc_t +efx_filter_spec_set_ipv4_full( + __inout efx_filter_spec_t *spec, + __in uint8_t proto, + __in uint32_t lhost, + __in uint16_t lport, + __in uint32_t rhost, + __in uint16_t rport) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + + spec->efs_match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->efs_ether_type = EFX_ETHER_TYPE_IPV4; + spec->efs_ip_proto = proto; + spec->efs_loc_host.eo_u32[0] = lhost; + spec->efs_loc_port = lport; + spec->efs_rem_host.eo_u32[0] = rhost; + spec->efs_rem_port = rport; + return (0); +} + +/* + * Specify local Ethernet address and/or VID in filter specification + */ +__checkReturn efx_rc_t +efx_filter_spec_set_eth_local( + __inout efx_filter_spec_t *spec, + __in uint16_t vid, + __in const uint8_t *addr) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(addr, !=, NULL); + + if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL) + return (EINVAL); + + if (vid != EFX_FILTER_SPEC_VID_UNSPEC) { + spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->efs_outer_vid = vid; + } + if (addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; + memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN); + } + return (0); +} + + void +efx_filter_spec_set_ether_type( + __inout efx_filter_spec_t *spec, + __in uint16_t ether_type) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + + spec->efs_ether_type = ether_type; + spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; +} + +/* + * Specify matching otherwise-unmatched unicast in a filter specification + */ +__checkReturn efx_rc_t +efx_filter_spec_set_uc_def( + __inout efx_filter_spec_t *spec) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + + spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; + return (0); +} + +/* + * Specify matching otherwise-unmatched multicast in a filter specification + */ +__checkReturn efx_rc_t +efx_filter_spec_set_mc_def( + __inout efx_filter_spec_t *spec) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + + spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; + return (0); +} + + +__checkReturn efx_rc_t +efx_filter_spec_set_encap_type( + __inout efx_filter_spec_t *spec, + __in efx_tunnel_protocol_t encap_type, + __in efx_filter_inner_frame_match_t inner_frame_match) +{ + uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE; + uint8_t ip_proto; + efx_rc_t rc; + + EFSYS_ASSERT3P(spec, !=, NULL); + + switch (encap_type) { + case EFX_TUNNEL_PROTOCOL_VXLAN: + case EFX_TUNNEL_PROTOCOL_GENEVE: + ip_proto = EFX_IPPROTO_UDP; + break; + case EFX_TUNNEL_PROTOCOL_NVGRE: + ip_proto = EFX_IPPROTO_GRE; + break; + default: + EFSYS_ASSERT(0); + rc = EINVAL; + goto fail1; + } + + switch (inner_frame_match) { + case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST: + match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; + break; + case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST: + match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST; + break; + case EFX_FILTER_INNER_FRAME_MATCH_OTHER: + /* This is for when specific inner frames are to be matched. */ + break; + default: + EFSYS_ASSERT(0); + rc = EINVAL; + goto fail2; + } + + spec->efs_encap_type = encap_type; + spec->efs_ip_proto = ip_proto; + spec->efs_match_flags |= (match_flags | EFX_FILTER_MATCH_IP_PROTO); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Specify inner and outer Ethernet address and VNI or VSID in tunnel filter + * specification. + */ +static __checkReturn efx_rc_t +efx_filter_spec_set_tunnel( + __inout efx_filter_spec_t *spec, + __in efx_tunnel_protocol_t encap_type, + __in const uint8_t *vni_or_vsid, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + efx_rc_t rc; + + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(vni_or_vsid, !=, NULL); + EFSYS_ASSERT3P(inner_addr, !=, NULL); + EFSYS_ASSERT3P(outer_addr, !=, NULL); + + switch (encap_type) { + case EFX_TUNNEL_PROTOCOL_VXLAN: + case EFX_TUNNEL_PROTOCOL_GENEVE: + case EFX_TUNNEL_PROTOCOL_NVGRE: + break; + default: + rc = EINVAL; + goto fail1; + } + + if ((inner_addr == NULL) && (outer_addr == NULL)) { + rc = EINVAL; + goto fail2; + } + + if (vni_or_vsid != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + memcpy(spec->efs_vni_or_vsid, vni_or_vsid, EFX_VNI_OR_VSID_LEN); + } + if (outer_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; + memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN); + } + if (inner_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC; + memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN); + } + + spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->efs_encap_type = encap_type; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Specify inner and outer Ethernet address and VNI in VXLAN filter + * specification. + */ +__checkReturn efx_rc_t +efx_filter_spec_set_vxlan( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vni, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_VXLAN, + vni, inner_addr, outer_addr); +} + +/* + * Specify inner and outer Ethernet address and VNI in Geneve filter + * specification. + */ +__checkReturn efx_rc_t +efx_filter_spec_set_geneve( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vni, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_GENEVE, + vni, inner_addr, outer_addr); +} + +/* + * Specify inner and outer Ethernet address and vsid in NVGRE filter + * specification. + */ +__checkReturn efx_rc_t +efx_filter_spec_set_nvgre( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vsid, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_NVGRE, + vsid, inner_addr, outer_addr); +} + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_filter_spec_set_rss_context( + __inout efx_filter_spec_t *spec, + __in uint32_t rss_context) +{ + efx_rc_t rc; + + EFSYS_ASSERT3P(spec, !=, NULL); + + /* The filter must have been created with EFX_FILTER_FLAG_RX_RSS. */ + if ((spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) == 0) { + rc = EINVAL; + goto fail1; + } + + spec->efs_rss_context = rss_context; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif + +#if EFSYS_OPT_SIENA + +/* + * "Fudge factors" - difference between programmed value and actual depth. + * Due to pipelined implementation we need to program H/W with a value that + * is larger than the hop limit we want. + */ +#define FILTER_CTL_SRCH_FUDGE_WILD 3 +#define FILTER_CTL_SRCH_FUDGE_FULL 1 + +/* + * Hard maximum hop limit. Hardware will time-out beyond 200-something. + * We also need to avoid infinite loops in efx_filter_search() when the + * table is full. + */ +#define FILTER_CTL_SRCH_MAX 200 + +static __checkReturn efx_rc_t +siena_filter_spec_from_gen_spec( + __out siena_filter_spec_t *sf_spec, + __in efx_filter_spec_t *gen_spec) +{ + efx_rc_t rc; + boolean_t is_full = B_FALSE; + + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) + EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX); + else + EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX); + + /* Siena only has one RSS context */ + if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) && + gen_spec->efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) { + rc = EINVAL; + goto fail1; + } + + sf_spec->sfs_flags = gen_spec->efs_flags; + sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id; + + switch (gen_spec->efs_match_flags) { + case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT: + is_full = B_TRUE; + /* Fall through */ + case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: { + uint32_t rhost, host1, host2; + uint16_t rport, port1, port2; + + if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) { + rc = ENOTSUP; + goto fail2; + } + if (gen_spec->efs_loc_port == 0 || + (is_full && gen_spec->efs_rem_port == 0)) { + rc = EINVAL; + goto fail3; + } + switch (gen_spec->efs_ip_proto) { + case EFX_IPPROTO_TCP: + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_TX_TCP_FULL : + EFX_SIENA_FILTER_TX_TCP_WILD); + } else { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_RX_TCP_FULL : + EFX_SIENA_FILTER_RX_TCP_WILD); + } + break; + case EFX_IPPROTO_UDP: + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_TX_UDP_FULL : + EFX_SIENA_FILTER_TX_UDP_WILD); + } else { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_RX_UDP_FULL : + EFX_SIENA_FILTER_RX_UDP_WILD); + } + break; + default: + rc = ENOTSUP; + goto fail4; + } + /* + * The filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * addresses (zero for a wildcard). + */ + rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0; + rport = is_full ? gen_spec->efs_rem_port : 0; + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { + host1 = gen_spec->efs_loc_host.eo_u32[0]; + host2 = rhost; + } else { + host1 = rhost; + host2 = gen_spec->efs_loc_host.eo_u32[0]; + } + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { + if (sf_spec->sfs_type == + EFX_SIENA_FILTER_TX_UDP_WILD) { + port1 = rport; + port2 = gen_spec->efs_loc_port; + } else { + port1 = gen_spec->efs_loc_port; + port2 = rport; + } + } else { + if (sf_spec->sfs_type == + EFX_SIENA_FILTER_RX_UDP_WILD) { + port1 = gen_spec->efs_loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->efs_loc_port; + } + } + sf_spec->sfs_dword[0] = (host1 << 16) | port1; + sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16); + sf_spec->sfs_dword[2] = host2; + break; + } + + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = B_TRUE; + /* Fall through */ + case EFX_FILTER_MATCH_LOC_MAC: + if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_TX_MAC_FULL : + EFX_SIENA_FILTER_TX_MAC_WILD); + } else { + sf_spec->sfs_type = (is_full ? + EFX_SIENA_FILTER_RX_MAC_FULL : + EFX_SIENA_FILTER_RX_MAC_WILD); + } + sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0; + sf_spec->sfs_dword[1] = + gen_spec->efs_loc_mac[2] << 24 | + gen_spec->efs_loc_mac[3] << 16 | + gen_spec->efs_loc_mac[4] << 8 | + gen_spec->efs_loc_mac[5]; + sf_spec->sfs_dword[2] = + gen_spec->efs_loc_mac[0] << 8 | + gen_spec->efs_loc_mac[1]; + break; + + default: + EFSYS_ASSERT(B_FALSE); + rc = ENOTSUP; + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit + * key derived from the n-tuple. + */ +static uint16_t +siena_filter_tbl_hash( + __in uint32_t key) +{ + uint16_t tmp; + + /* First 16 rounds */ + tmp = 0x1fff ^ (uint16_t)(key >> 16); + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + + /* Last 16 rounds */ + tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff); + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + + return (tmp); +} + +/* + * To allow for hash collisions, filter search continues at these + * increments from the first possible entry selected by the hash. + */ +static uint16_t +siena_filter_tbl_increment( + __in uint32_t key) +{ + return ((uint16_t)(key * 2 - 1)); +} + +static __checkReturn boolean_t +siena_filter_test_used( + __in siena_filter_tbl_t *sftp, + __in unsigned int index) +{ + EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); + return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0); +} + +static void +siena_filter_set_used( + __in siena_filter_tbl_t *sftp, + __in unsigned int index) +{ + EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); + sftp->sft_bitmap[index / 32] |= (1 << (index % 32)); + ++sftp->sft_used; +} + +static void +siena_filter_clear_used( + __in siena_filter_tbl_t *sftp, + __in unsigned int index) +{ + EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL); + sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32)); + + --sftp->sft_used; + EFSYS_ASSERT3U(sftp->sft_used, >=, 0); +} + + +static siena_filter_tbl_id_t +siena_filter_tbl_id( + __in siena_filter_type_t type) +{ + siena_filter_tbl_id_t tbl_id; + + switch (type) { + case EFX_SIENA_FILTER_RX_TCP_FULL: + case EFX_SIENA_FILTER_RX_TCP_WILD: + case EFX_SIENA_FILTER_RX_UDP_FULL: + case EFX_SIENA_FILTER_RX_UDP_WILD: + tbl_id = EFX_SIENA_FILTER_TBL_RX_IP; + break; + + case EFX_SIENA_FILTER_RX_MAC_FULL: + case EFX_SIENA_FILTER_RX_MAC_WILD: + tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC; + break; + + case EFX_SIENA_FILTER_TX_TCP_FULL: + case EFX_SIENA_FILTER_TX_TCP_WILD: + case EFX_SIENA_FILTER_TX_UDP_FULL: + case EFX_SIENA_FILTER_TX_UDP_WILD: + tbl_id = EFX_SIENA_FILTER_TBL_TX_IP; + break; + + case EFX_SIENA_FILTER_TX_MAC_FULL: + case EFX_SIENA_FILTER_TX_MAC_WILD: + tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC; + break; + + default: + EFSYS_ASSERT(B_FALSE); + tbl_id = EFX_SIENA_FILTER_NTBLS; + break; + } + return (tbl_id); +} + +static void +siena_filter_reset_search_depth( + __inout siena_filter_t *sfp, + __in siena_filter_tbl_id_t tbl_id) +{ + switch (tbl_id) { + case EFX_SIENA_FILTER_TBL_RX_IP: + sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0; + break; + + case EFX_SIENA_FILTER_TBL_RX_MAC: + sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0; + break; + + case EFX_SIENA_FILTER_TBL_TX_IP: + sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0; + break; + + case EFX_SIENA_FILTER_TBL_TX_MAC: + sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0; + sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0; + break; + + default: + EFSYS_ASSERT(B_FALSE); + break; + } +} + +static void +siena_filter_push_rx_limits( + __in efx_nic_t *enp) +{ + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + efx_oword_t oword; + + EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); + + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + + if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) { + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, + sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + } + + EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); +} + +static void +siena_filter_push_tx_limits( + __in efx_nic_t *enp) +{ + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + efx_oword_t oword; + + EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); + + if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) { + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(oword, + FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + } + + if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) { + EFX_SET_OWORD_FIELD( + oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] + + FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, + sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] + + FILTER_CTL_SRCH_FUDGE_WILD); + } + + EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); +} + +/* Build a filter entry and return its n-tuple key. */ +static __checkReturn uint32_t +siena_filter_build( + __out efx_oword_t *filter, + __in siena_filter_spec_t *spec) +{ + uint32_t dword3; + uint32_t key; + uint8_t type = spec->sfs_type; + uint32_t flags = spec->sfs_flags; + + switch (siena_filter_tbl_id(type)) { + case EFX_SIENA_FILTER_TBL_RX_IP: { + boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL || + type == EFX_SIENA_FILTER_RX_UDP_WILD); + EFX_POPULATE_OWORD_7(*filter, + FRF_BZ_RSS_EN, + (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, + FRF_BZ_SCATTER_EN, + (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, + FRF_AZ_TCP_UDP, is_udp, + FRF_AZ_RXQ_ID, spec->sfs_dmaq_id, + EFX_DWORD_2, spec->sfs_dword[2], + EFX_DWORD_1, spec->sfs_dword[1], + EFX_DWORD_0, spec->sfs_dword[0]); + dword3 = is_udp; + break; + } + + case EFX_SIENA_FILTER_TBL_RX_MAC: { + boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD); + EFX_POPULATE_OWORD_7(*filter, + FRF_CZ_RMFT_RSS_EN, + (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0, + FRF_CZ_RMFT_SCATTER_EN, + (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0, + FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id, + FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2], + FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1], + FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]); + dword3 = is_wild; + break; + } + + case EFX_SIENA_FILTER_TBL_TX_IP: { + boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL || + type == EFX_SIENA_FILTER_TX_UDP_WILD); + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TIFT_TCP_UDP, is_udp, + FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id, + EFX_DWORD_2, spec->sfs_dword[2], + EFX_DWORD_1, spec->sfs_dword[1], + EFX_DWORD_0, spec->sfs_dword[0]); + dword3 = is_udp | spec->sfs_dmaq_id << 1; + break; + } + + case EFX_SIENA_FILTER_TBL_TX_MAC: { + boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD); + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id, + FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2], + FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1], + FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]); + dword3 = is_wild | spec->sfs_dmaq_id << 1; + break; + } + + default: + EFSYS_ASSERT(B_FALSE); + EFX_ZERO_OWORD(*filter); + return (0); + } + + key = + spec->sfs_dword[0] ^ + spec->sfs_dword[1] ^ + spec->sfs_dword[2] ^ + dword3; + + return (key); +} + +static __checkReturn efx_rc_t +siena_filter_push_entry( + __inout efx_nic_t *enp, + __in siena_filter_type_t type, + __in int index, + __in efx_oword_t *eop) +{ + efx_rc_t rc; + + switch (type) { + case EFX_SIENA_FILTER_RX_TCP_FULL: + case EFX_SIENA_FILTER_RX_TCP_WILD: + case EFX_SIENA_FILTER_RX_UDP_FULL: + case EFX_SIENA_FILTER_RX_UDP_WILD: + EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index, + eop, B_TRUE); + break; + + case EFX_SIENA_FILTER_RX_MAC_FULL: + case EFX_SIENA_FILTER_RX_MAC_WILD: + EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index, + eop, B_TRUE); + break; + + case EFX_SIENA_FILTER_TX_TCP_FULL: + case EFX_SIENA_FILTER_TX_TCP_WILD: + case EFX_SIENA_FILTER_TX_UDP_FULL: + case EFX_SIENA_FILTER_TX_UDP_WILD: + EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index, + eop, B_TRUE); + break; + + case EFX_SIENA_FILTER_TX_MAC_FULL: + case EFX_SIENA_FILTER_TX_MAC_WILD: + EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index, + eop, B_TRUE); + break; + + default: + EFSYS_ASSERT(B_FALSE); + rc = ENOTSUP; + goto fail1; + } + return (0); + +fail1: + return (rc); +} + + +static __checkReturn boolean_t +siena_filter_equal( + __in const siena_filter_spec_t *left, + __in const siena_filter_spec_t *right) +{ + siena_filter_tbl_id_t tbl_id; + + tbl_id = siena_filter_tbl_id(left->sfs_type); + + + if (left->sfs_type != right->sfs_type) + return (B_FALSE); + + if (memcmp(left->sfs_dword, right->sfs_dword, + sizeof (left->sfs_dword))) + return (B_FALSE); + + if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP || + tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) && + left->sfs_dmaq_id != right->sfs_dmaq_id) + return (B_FALSE); + + return (B_TRUE); +} + +static __checkReturn efx_rc_t +siena_filter_search( + __in siena_filter_tbl_t *sftp, + __in siena_filter_spec_t *spec, + __in uint32_t key, + __in boolean_t for_insert, + __out int *filter_index, + __out unsigned int *depth_required) +{ + unsigned int hash, incr, filter_idx, depth; + + hash = siena_filter_tbl_hash(key); + incr = siena_filter_tbl_increment(key); + + filter_idx = hash & (sftp->sft_size - 1); + depth = 1; + + for (;;) { + /* + * Return success if entry is used and matches this spec + * or entry is unused and we are trying to insert. + */ + if (siena_filter_test_used(sftp, filter_idx) ? + siena_filter_equal(spec, + &sftp->sft_spec[filter_idx]) : + for_insert) { + *filter_index = filter_idx; + *depth_required = depth; + return (0); + } + + /* Return failure if we reached the maximum search depth */ + if (depth == FILTER_CTL_SRCH_MAX) + return (for_insert ? EBUSY : ENOENT); + + filter_idx = (filter_idx + incr) & (sftp->sft_size - 1); + ++depth; + } +} + +static void +siena_filter_clear_entry( + __in efx_nic_t *enp, + __in siena_filter_tbl_t *sftp, + __in int index) +{ + efx_oword_t filter; + + if (siena_filter_test_used(sftp, index)) { + siena_filter_clear_used(sftp, index); + + EFX_ZERO_OWORD(filter); + siena_filter_push_entry(enp, + sftp->sft_spec[index].sfs_type, + index, &filter); + + memset(&sftp->sft_spec[index], + 0, sizeof (sftp->sft_spec[0])); + } +} + + void +siena_filter_tbl_clear( + __in efx_nic_t *enp, + __in siena_filter_tbl_id_t tbl_id) +{ + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id]; + int index; + efsys_lock_state_t state; + + EFSYS_LOCK(enp->en_eslp, state); + + for (index = 0; index < sftp->sft_size; ++index) { + siena_filter_clear_entry(enp, sftp, index); + } + + if (sftp->sft_used == 0) + siena_filter_reset_search_depth(sfp, tbl_id); + + EFSYS_UNLOCK(enp->en_eslp, state); +} + +static __checkReturn efx_rc_t +siena_filter_init( + __in efx_nic_t *enp) +{ + siena_filter_t *sfp; + siena_filter_tbl_t *sftp; + int tbl_id; + efx_rc_t rc; + + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp); + + if (!sfp) { + rc = ENOMEM; + goto fail1; + } + + enp->en_filter.ef_siena_filter = sfp; + + switch (enp->en_family) { + case EFX_FAMILY_SIENA: + sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP]; + sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS; + + sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC]; + sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; + + sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP]; + sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS; + + sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC]; + sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; + break; + + default: + rc = ENOTSUP; + goto fail2; + } + + for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { + unsigned int bitmap_size; + + sftp = &sfp->sf_tbl[tbl_id]; + if (sftp->sft_size == 0) + continue; + + EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) == + sizeof (uint32_t)); + bitmap_size = + (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8; + + EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap); + if (!sftp->sft_bitmap) { + rc = ENOMEM; + goto fail3; + } + + EFSYS_KMEM_ALLOC(enp->en_esip, + sftp->sft_size * sizeof (*sftp->sft_spec), + sftp->sft_spec); + if (!sftp->sft_spec) { + rc = ENOMEM; + goto fail4; + } + memset(sftp->sft_spec, 0, + sftp->sft_size * sizeof (*sftp->sft_spec)); + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); + +fail3: + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); + siena_filter_fini(enp); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static void +siena_filter_fini( + __in efx_nic_t *enp) +{ + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + siena_filter_tbl_id_t tbl_id; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + if (sfp == NULL) + return; + + for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { + siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id]; + unsigned int bitmap_size; + + EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) == + sizeof (uint32_t)); + bitmap_size = + (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8; + + if (sftp->sft_bitmap != NULL) { + EFSYS_KMEM_FREE(enp->en_esip, bitmap_size, + sftp->sft_bitmap); + sftp->sft_bitmap = NULL; + } + + if (sftp->sft_spec != NULL) { + EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size * + sizeof (*sftp->sft_spec), sftp->sft_spec); + sftp->sft_spec = NULL; + } + } + + EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t), + enp->en_filter.ef_siena_filter); +} + +/* Restore filter state after a reset */ +static __checkReturn efx_rc_t +siena_filter_restore( + __in efx_nic_t *enp) +{ + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + siena_filter_tbl_id_t tbl_id; + siena_filter_tbl_t *sftp; + siena_filter_spec_t *spec; + efx_oword_t filter; + int filter_idx; + efsys_lock_state_t state; + uint32_t key; + efx_rc_t rc; + + EFSYS_LOCK(enp->en_eslp, state); + + for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) { + sftp = &sfp->sf_tbl[tbl_id]; + for (filter_idx = 0; + filter_idx < sftp->sft_size; + filter_idx++) { + if (!siena_filter_test_used(sftp, filter_idx)) + continue; + + spec = &sftp->sft_spec[filter_idx]; + if ((key = siena_filter_build(&filter, spec)) == 0) { + rc = EINVAL; + goto fail1; + } + if ((rc = siena_filter_push_entry(enp, + spec->sfs_type, filter_idx, &filter)) != 0) + goto fail2; + } + } + + siena_filter_push_rx_limits(enp); + siena_filter_push_tx_limits(enp); + + EFSYS_UNLOCK(enp->en_eslp, state); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + EFSYS_UNLOCK(enp->en_eslp, state); + + return (rc); +} + +static __checkReturn efx_rc_t +siena_filter_add( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec, + __in efx_filter_replacement_policy_t policy) +{ + efx_rc_t rc; + siena_filter_spec_t sf_spec; + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + siena_filter_tbl_id_t tbl_id; + siena_filter_tbl_t *sftp; + siena_filter_spec_t *saved_sf_spec; + efx_oword_t filter; + int filter_idx; + unsigned int depth; + efsys_lock_state_t state; + uint32_t key; + + + EFSYS_ASSERT3P(spec, !=, NULL); + + if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0) + goto fail1; + + tbl_id = siena_filter_tbl_id(sf_spec.sfs_type); + sftp = &sfp->sf_tbl[tbl_id]; + + if (sftp->sft_size == 0) { + rc = EINVAL; + goto fail2; + } + + key = siena_filter_build(&filter, &sf_spec); + + EFSYS_LOCK(enp->en_eslp, state); + + rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE, + &filter_idx, &depth); + if (rc != 0) + goto fail3; + + EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size); + saved_sf_spec = &sftp->sft_spec[filter_idx]; + + if (siena_filter_test_used(sftp, filter_idx)) { + /* All Siena filter are considered the same priority */ + switch (policy) { + case EFX_FILTER_REPLACEMENT_NEVER: + case EFX_FILTER_REPLACEMENT_HIGHER_PRIORITY: + rc = EEXIST; + goto fail4; + case EFX_FILTER_REPLACEMENT_HIGHER_OR_EQUAL_PRIORITY: + break; + default: + EFSYS_ASSERT(0); + break; + } + } + siena_filter_set_used(sftp, filter_idx); + *saved_sf_spec = sf_spec; + + if (sfp->sf_depth[sf_spec.sfs_type] < depth) { + sfp->sf_depth[sf_spec.sfs_type] = depth; + if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP || + tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) + siena_filter_push_tx_limits(enp); + else + siena_filter_push_rx_limits(enp); + } + + siena_filter_push_entry(enp, sf_spec.sfs_type, + filter_idx, &filter); + + EFSYS_UNLOCK(enp->en_eslp, state); + return (0); + +fail4: + EFSYS_PROBE(fail4); + +fail3: + EFSYS_UNLOCK(enp->en_eslp, state); + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +static __checkReturn efx_rc_t +siena_filter_delete( + __in efx_nic_t *enp, + __inout efx_filter_spec_t *spec) +{ + efx_rc_t rc; + siena_filter_spec_t sf_spec; + siena_filter_t *sfp = enp->en_filter.ef_siena_filter; + siena_filter_tbl_id_t tbl_id; + siena_filter_tbl_t *sftp; + efx_oword_t filter; + int filter_idx; + unsigned int depth; + efsys_lock_state_t state; + uint32_t key; + + EFSYS_ASSERT3P(spec, !=, NULL); + + if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0) + goto fail1; + + tbl_id = siena_filter_tbl_id(sf_spec.sfs_type); + sftp = &sfp->sf_tbl[tbl_id]; + + key = siena_filter_build(&filter, &sf_spec); + + EFSYS_LOCK(enp->en_eslp, state); + + rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE, + &filter_idx, &depth); + if (rc != 0) + goto fail2; + + siena_filter_clear_entry(enp, sftp, filter_idx); + if (sftp->sft_used == 0) + siena_filter_reset_search_depth(sfp, tbl_id); + + EFSYS_UNLOCK(enp->en_eslp, state); + return (0); + +fail2: + EFSYS_UNLOCK(enp->en_eslp, state); + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +#define SIENA_MAX_SUPPORTED_MATCHES 4 + +static __checkReturn efx_rc_t +siena_filter_supported_filters( + __in efx_nic_t *enp, + __out_ecount(buffer_length) uint32_t *buffer, + __in size_t buffer_length, + __out size_t *list_lengthp) +{ + uint32_t index = 0; + uint32_t rx_matches[SIENA_MAX_SUPPORTED_MATCHES]; + size_t list_length; + efx_rc_t rc; + + rx_matches[index++] = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + + rx_matches[index++] = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + + if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) { + rx_matches[index++] = + EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC; + + rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC; + } + + EFSYS_ASSERT3U(index, <=, SIENA_MAX_SUPPORTED_MATCHES); + list_length = index; + + *list_lengthp = list_length; + + if (buffer_length < list_length) { + rc = ENOSPC; + goto fail1; + } + + memcpy(buffer, rx_matches, list_length * sizeof (rx_matches[0])); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#undef MAX_SUPPORTED + +#endif /* EFSYS_OPT_SIENA */ + +#endif /* EFSYS_OPT_FILTER */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c new file mode 100644 index 000000000..fd235620c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_hash.c @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2014-2019 Solarflare Communications Inc. + * + * Copyright 2006 Bob Jenkins + * + * Derived from public domain source, see + * : + * + * "lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup... + * ...You can use this free for any purpose. It's in the public domain. + * It has no warranty." + */ + +#include "efx.h" +#include "efx_impl.h" + +/* Hash initial value */ +#define EFX_HASH_INITIAL_VALUE 0xdeadbeef + +/* + * Rotate a 32-bit value left + * + * Allow platform to provide an intrinsic or optimised routine and + * fall-back to a simple shift based implementation. + */ +#if EFSYS_HAS_ROTL_DWORD + +#define EFX_HASH_ROTATE(_value, _shift) \ + EFSYS_ROTL_DWORD(_value, _shift) + +#else + +#define EFX_HASH_ROTATE(_value, _shift) \ + (((_value) << (_shift)) | ((_value) >> (32 - (_shift)))) + +#endif + +/* Mix three 32-bit values reversibly */ +#define EFX_HASH_MIX(_a, _b, _c) \ + do { \ + _a -= _c; \ + _a ^= EFX_HASH_ROTATE(_c, 4); \ + _c += _b; \ + _b -= _a; \ + _b ^= EFX_HASH_ROTATE(_a, 6); \ + _a += _c; \ + _c -= _b; \ + _c ^= EFX_HASH_ROTATE(_b, 8); \ + _b += _a; \ + _a -= _c; \ + _a ^= EFX_HASH_ROTATE(_c, 16); \ + _c += _b; \ + _b -= _a; \ + _b ^= EFX_HASH_ROTATE(_a, 19); \ + _a += _c; \ + _c -= _b; \ + _c ^= EFX_HASH_ROTATE(_b, 4); \ + _b += _a; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* Final mixing of three 32-bit values into one (_c) */ +#define EFX_HASH_FINALISE(_a, _b, _c) \ + do { \ + _c ^= _b; \ + _c -= EFX_HASH_ROTATE(_b, 14); \ + _a ^= _c; \ + _a -= EFX_HASH_ROTATE(_c, 11); \ + _b ^= _a; \ + _b -= EFX_HASH_ROTATE(_a, 25); \ + _c ^= _b; \ + _c -= EFX_HASH_ROTATE(_b, 16); \ + _a ^= _c; \ + _a -= EFX_HASH_ROTATE(_c, 4); \ + _b ^= _a; \ + _b -= EFX_HASH_ROTATE(_a, 14); \ + _c ^= _b; \ + _c -= EFX_HASH_ROTATE(_b, 24); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + + +/* Produce a 32-bit hash from 32-bit aligned input */ + __checkReturn uint32_t +efx_hash_dwords( + __in_ecount(count) uint32_t const *input, + __in size_t count, + __in uint32_t init) +{ + uint32_t a; + uint32_t b; + uint32_t c; + + /* Set up the initial internal state */ + a = b = c = EFX_HASH_INITIAL_VALUE + + (((uint32_t)count) * sizeof (uint32_t)) + init; + + /* Handle all but the last three dwords of the input */ + while (count > 3) { + a += input[0]; + b += input[1]; + c += input[2]; + EFX_HASH_MIX(a, b, c); + + count -= 3; + input += 3; + } + + /* Handle the left-overs */ + switch (count) { + case 3: + c += input[2]; + /* Fall-through */ + case 2: + b += input[1]; + /* Fall-through */ + case 1: + a += input[0]; + EFX_HASH_FINALISE(a, b, c); + break; + + case 0: + /* Should only get here if count parameter was zero */ + break; + } + + return (c); +} + +#if EFSYS_IS_BIG_ENDIAN + +/* Produce a 32-bit hash from arbitrarily aligned input */ + __checkReturn uint32_t +efx_hash_bytes( + __in_ecount(length) uint8_t const *input, + __in size_t length, + __in uint32_t init) +{ + uint32_t a; + uint32_t b; + uint32_t c; + + /* Set up the initial internal state */ + a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init; + + /* Handle all but the last twelve bytes of the input */ + while (length > 12) { + a += ((uint32_t)input[0]) << 24; + a += ((uint32_t)input[1]) << 16; + a += ((uint32_t)input[2]) << 8; + a += ((uint32_t)input[3]); + b += ((uint32_t)input[4]) << 24; + b += ((uint32_t)input[5]) << 16; + b += ((uint32_t)input[6]) << 8; + b += ((uint32_t)input[7]); + c += ((uint32_t)input[8]) << 24; + c += ((uint32_t)input[9]) << 16; + c += ((uint32_t)input[10]) << 8; + c += ((uint32_t)input[11]); + EFX_HASH_MIX(a, b, c); + length -= 12; + input += 12; + } + + /* Handle the left-overs */ + switch (length) { + case 12: + c += ((uint32_t)input[11]); + /* Fall-through */ + case 11: + c += ((uint32_t)input[10]) << 8; + /* Fall-through */ + case 10: + c += ((uint32_t)input[9]) << 16; + /* Fall-through */ + case 9: + c += ((uint32_t)input[8]) << 24; + /* Fall-through */ + case 8: + b += ((uint32_t)input[7]); + /* Fall-through */ + case 7: + b += ((uint32_t)input[6]) << 8; + /* Fall-through */ + case 6: + b += ((uint32_t)input[5]) << 16; + /* Fall-through */ + case 5: + b += ((uint32_t)input[4]) << 24; + /* Fall-through */ + case 4: + a += ((uint32_t)input[3]); + /* Fall-through */ + case 3: + a += ((uint32_t)input[2]) << 8; + /* Fall-through */ + case 2: + a += ((uint32_t)input[1]) << 16; + /* Fall-through */ + case 1: + a += ((uint32_t)input[0]) << 24; + EFX_HASH_FINALISE(a, b, c); + break; + + case 0: + /* Should only get here if length parameter was zero */ + break; + } + + return (c); +} + +#elif EFSYS_IS_LITTLE_ENDIAN + +/* Produce a 32-bit hash from arbitrarily aligned input */ + __checkReturn uint32_t +efx_hash_bytes( + __in_ecount(length) uint8_t const *input, + __in size_t length, + __in uint32_t init) +{ + uint32_t a; + uint32_t b; + uint32_t c; + + /* Set up the initial internal state */ + a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init; + + /* Handle all but the last twelve bytes of the input */ + while (length > 12) { + a += ((uint32_t)input[0]); + a += ((uint32_t)input[1]) << 8; + a += ((uint32_t)input[2]) << 16; + a += ((uint32_t)input[3]) << 24; + b += ((uint32_t)input[4]); + b += ((uint32_t)input[5]) << 8; + b += ((uint32_t)input[6]) << 16; + b += ((uint32_t)input[7]) << 24; + c += ((uint32_t)input[8]); + c += ((uint32_t)input[9]) << 8; + c += ((uint32_t)input[10]) << 16; + c += ((uint32_t)input[11]) << 24; + EFX_HASH_MIX(a, b, c); + length -= 12; + input += 12; + } + + /* Handle the left-overs */ + switch (length) { + case 12: + c += ((uint32_t)input[11]) << 24; + /* Fall-through */ + case 11: + c += ((uint32_t)input[10]) << 16; + /* Fall-through */ + case 10: + c += ((uint32_t)input[9]) << 8; + /* Fall-through */ + case 9: + c += ((uint32_t)input[8]); + /* Fall-through */ + case 8: + b += ((uint32_t)input[7]) << 24; + /* Fall-through */ + case 7: + b += ((uint32_t)input[6]) << 16; + /* Fall-through */ + case 6: + b += ((uint32_t)input[5]) << 8; + /* Fall-through */ + case 5: + b += ((uint32_t)input[4]); + /* Fall-through */ + case 4: + a += ((uint32_t)input[3]) << 24; + /* Fall-through */ + case 3: + a += ((uint32_t)input[2]) << 16; + /* Fall-through */ + case 2: + a += ((uint32_t)input[1]) << 8; + /* Fall-through */ + case 1: + a += ((uint32_t)input[0]); + EFX_HASH_FINALISE(a, b, c); + break; + + case 0: + /* Should only get here if length parameter was zero */ + break; + } + + return (c); +} + +#else + +#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set" + +#endif diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h new file mode 100644 index 000000000..c4718dba7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_impl.h @@ -0,0 +1,1415 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_IMPL_H +#define _SYS_EFX_IMPL_H + +#include "efx.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" +#if EFSYS_OPT_MCDI +#include "efx_mcdi.h" +#endif /* EFSYS_OPT_MCDI */ + +/* FIXME: Add definition for driver generated software events */ +#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV +#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV +#endif + + +#if EFSYS_OPT_SIENA +#include "siena_impl.h" +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON +#include "hunt_impl.h" +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD +#include "medford_impl.h" +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 +#include "medford2_impl.h" +#endif /* EFSYS_OPT_MEDFORD2 */ + +#if EFX_OPTS_EF10() +#include "ef10_impl.h" +#endif /* EFX_OPTS_EF10() */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define EFX_MOD_MCDI 0x00000001 +#define EFX_MOD_PROBE 0x00000002 +#define EFX_MOD_NVRAM 0x00000004 +#define EFX_MOD_VPD 0x00000008 +#define EFX_MOD_NIC 0x00000010 +#define EFX_MOD_INTR 0x00000020 +#define EFX_MOD_EV 0x00000040 +#define EFX_MOD_RX 0x00000080 +#define EFX_MOD_TX 0x00000100 +#define EFX_MOD_PORT 0x00000200 +#define EFX_MOD_MON 0x00000400 +#define EFX_MOD_FILTER 0x00001000 +#define EFX_MOD_LIC 0x00002000 +#define EFX_MOD_TUNNEL 0x00004000 +#define EFX_MOD_EVB 0x00008000 +#define EFX_MOD_PROXY 0x00010000 + +#define EFX_RESET_PHY 0x00000001 +#define EFX_RESET_RXQ_ERR 0x00000002 +#define EFX_RESET_TXQ_ERR 0x00000004 +#define EFX_RESET_HW_UNAVAIL 0x00000008 + +typedef enum efx_mac_type_e { + EFX_MAC_INVALID = 0, + EFX_MAC_SIENA, + EFX_MAC_HUNTINGTON, + EFX_MAC_MEDFORD, + EFX_MAC_MEDFORD2, + EFX_MAC_NTYPES +} efx_mac_type_t; + +typedef struct efx_ev_ops_s { + efx_rc_t (*eevo_init)(efx_nic_t *); + void (*eevo_fini)(efx_nic_t *); + efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int, + efsys_mem_t *, size_t, uint32_t, + uint32_t, uint32_t, efx_evq_t *); + void (*eevo_qdestroy)(efx_evq_t *); + efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int); + void (*eevo_qpost)(efx_evq_t *, uint16_t); + efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int); +#if EFSYS_OPT_QSTATS + void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *); +#endif +} efx_ev_ops_t; + +typedef struct efx_tx_ops_s { + efx_rc_t (*etxo_init)(efx_nic_t *); + void (*etxo_fini)(efx_nic_t *); + efx_rc_t (*etxo_qcreate)(efx_nic_t *, + unsigned int, unsigned int, + efsys_mem_t *, size_t, + uint32_t, uint16_t, + efx_evq_t *, efx_txq_t *, + unsigned int *); + void (*etxo_qdestroy)(efx_txq_t *); + efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *, + unsigned int, unsigned int, + unsigned int *); + void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int); + efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int); + efx_rc_t (*etxo_qflush)(efx_txq_t *); + void (*etxo_qenable)(efx_txq_t *); + efx_rc_t (*etxo_qpio_enable)(efx_txq_t *); + void (*etxo_qpio_disable)(efx_txq_t *); + efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t, + size_t); + efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int, + unsigned int *); + efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *, + unsigned int, unsigned int, + unsigned int *); + void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t, + size_t, boolean_t, + efx_desc_t *); + void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t, + uint32_t, uint8_t, + efx_desc_t *); + void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t, + uint16_t, uint32_t, uint16_t, + efx_desc_t *, int); + void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t, + efx_desc_t *); + void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t, + efx_desc_t *); +#if EFSYS_OPT_QSTATS + void (*etxo_qstats_update)(efx_txq_t *, + efsys_stat_t *); +#endif +} efx_tx_ops_t; + +typedef union efx_rxq_type_data_u { + struct { + size_t ed_buf_size; + } ertd_default; +#if EFSYS_OPT_RX_PACKED_STREAM + struct { + uint32_t eps_buf_size; + } ertd_packed_stream; +#endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + struct { + uint32_t eessb_bufs_per_desc; + uint32_t eessb_max_dma_len; + uint32_t eessb_buf_stride; + uint32_t eessb_hol_block_timeout; + } ertd_es_super_buffer; +#endif +} efx_rxq_type_data_t; + +typedef struct efx_rx_ops_s { + efx_rc_t (*erxo_init)(efx_nic_t *); + void (*erxo_fini)(efx_nic_t *); +#if EFSYS_OPT_RX_SCATTER + efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int); +#endif +#if EFSYS_OPT_RX_SCALE + efx_rc_t (*erxo_scale_context_alloc)(efx_nic_t *, + efx_rx_scale_context_type_t, + uint32_t, uint32_t *); + efx_rc_t (*erxo_scale_context_free)(efx_nic_t *, uint32_t); + efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, uint32_t, + efx_rx_hash_alg_t, + efx_rx_hash_type_t, boolean_t); + efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint32_t, + uint8_t *, size_t); + efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, uint32_t, + unsigned int *, size_t); + uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t, + uint8_t *); +#endif /* EFSYS_OPT_RX_SCALE */ + efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *, + uint16_t *); + void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t, + unsigned int, unsigned int, + unsigned int); + void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *); +#if EFSYS_OPT_RX_PACKED_STREAM + void (*erxo_qpush_ps_credits)(efx_rxq_t *); + uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *, + uint32_t, uint32_t, + uint16_t *, uint32_t *, uint32_t *); +#endif + efx_rc_t (*erxo_qflush)(efx_rxq_t *); + void (*erxo_qenable)(efx_rxq_t *); + efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int, + unsigned int, efx_rxq_type_t, + const efx_rxq_type_data_t *, + efsys_mem_t *, size_t, uint32_t, + unsigned int, + efx_evq_t *, efx_rxq_t *); + void (*erxo_qdestroy)(efx_rxq_t *); +} efx_rx_ops_t; + +typedef struct efx_mac_ops_s { + efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *); + efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *); + efx_rc_t (*emo_addr_set)(efx_nic_t *); + efx_rc_t (*emo_pdu_set)(efx_nic_t *); + efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *); + efx_rc_t (*emo_reconfigure)(efx_nic_t *); + efx_rc_t (*emo_multicast_list_set)(efx_nic_t *); + efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *, + efx_rxq_t *, boolean_t); + void (*emo_filter_default_rxq_clear)(efx_nic_t *); +#if EFSYS_OPT_LOOPBACK + efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t, + efx_loopback_type_t); +#endif /* EFSYS_OPT_LOOPBACK */ +#if EFSYS_OPT_MAC_STATS + efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t); + efx_rc_t (*emo_stats_clear)(efx_nic_t *); + efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *); + efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *, + uint16_t, boolean_t); + efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, + efsys_stat_t *, uint32_t *); +#endif /* EFSYS_OPT_MAC_STATS */ +} efx_mac_ops_t; + +typedef struct efx_phy_ops_s { + efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */ + efx_rc_t (*epo_reset)(efx_nic_t *); + efx_rc_t (*epo_reconfigure)(efx_nic_t *); + efx_rc_t (*epo_verify)(efx_nic_t *); + efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *); + efx_rc_t (*epo_link_state_get)(efx_nic_t *, efx_phy_link_state_t *); +#if EFSYS_OPT_PHY_STATS + efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *, + uint32_t *); +#endif /* EFSYS_OPT_PHY_STATS */ +#if EFSYS_OPT_BIST + efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *); + efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t); + efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t, + efx_bist_result_t *, uint32_t *, + unsigned long *, size_t); + void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t); +#endif /* EFSYS_OPT_BIST */ +} efx_phy_ops_t; + +#if EFSYS_OPT_FILTER + +/* + * Policy for replacing existing filter when inserting a new one. + * Note that all policies allow for storing the new lower priority + * filters as overridden by existing higher priority ones. It is needed + * to restore the lower priority filters on higher priority ones removal. + */ +typedef enum efx_filter_replacement_policy_e { + /* Cannot replace existing filter */ + EFX_FILTER_REPLACEMENT_NEVER, + /* Higher priority filters can replace lower priotiry ones */ + EFX_FILTER_REPLACEMENT_HIGHER_PRIORITY, + /* + * Higher priority filters can replace lower priority ones and + * equal priority filters can replace each other. + */ + EFX_FILTER_REPLACEMENT_HIGHER_OR_EQUAL_PRIORITY, +} efx_filter_replacement_policy_t; + +typedef struct efx_filter_ops_s { + efx_rc_t (*efo_init)(efx_nic_t *); + void (*efo_fini)(efx_nic_t *); + efx_rc_t (*efo_restore)(efx_nic_t *); + efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *, + efx_filter_replacement_policy_t policy); + efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *); + efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *, + size_t, size_t *); + efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t, + boolean_t, boolean_t, boolean_t, + uint8_t const *, uint32_t); +} efx_filter_ops_t; + +extern __checkReturn efx_rc_t +efx_filter_reconfigure( + __in efx_nic_t *enp, + __in_ecount(6) uint8_t const *mac_addr, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst, + __in_ecount(6*count) uint8_t const *addrs, + __in uint32_t count); + +#endif /* EFSYS_OPT_FILTER */ + +#if EFSYS_OPT_TUNNEL +typedef struct efx_tunnel_ops_s { + boolean_t (*eto_udp_encap_supported)(efx_nic_t *); + efx_rc_t (*eto_reconfigure)(efx_nic_t *); +} efx_tunnel_ops_t; +#endif /* EFSYS_OPT_TUNNEL */ + +typedef struct efx_port_s { + efx_mac_type_t ep_mac_type; + uint32_t ep_phy_type; + uint8_t ep_port; + uint32_t ep_mac_pdu; + uint8_t ep_mac_addr[6]; + efx_link_mode_t ep_link_mode; + boolean_t ep_all_unicst; + boolean_t ep_all_unicst_inserted; + boolean_t ep_mulcst; + boolean_t ep_all_mulcst; + boolean_t ep_all_mulcst_inserted; + boolean_t ep_brdcst; + unsigned int ep_fcntl; + boolean_t ep_fcntl_autoneg; + efx_oword_t ep_multicst_hash[2]; + uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN * + EFX_MAC_MULTICAST_LIST_MAX]; + uint32_t ep_mulcst_addr_count; +#if EFSYS_OPT_LOOPBACK + efx_loopback_type_t ep_loopback_type; + efx_link_mode_t ep_loopback_link_mode; +#endif /* EFSYS_OPT_LOOPBACK */ +#if EFSYS_OPT_PHY_FLAGS + uint32_t ep_phy_flags; +#endif /* EFSYS_OPT_PHY_FLAGS */ +#if EFSYS_OPT_PHY_LED_CONTROL + efx_phy_led_mode_t ep_phy_led_mode; +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + efx_phy_media_type_t ep_fixed_port_type; + efx_phy_media_type_t ep_module_type; + uint32_t ep_adv_cap_mask; + uint32_t ep_lp_cap_mask; + uint32_t ep_default_adv_cap_mask; + uint32_t ep_phy_cap_mask; + boolean_t ep_mac_drain; +#if EFSYS_OPT_BIST + efx_bist_type_t ep_current_bist; +#endif + const efx_mac_ops_t *ep_emop; + const efx_phy_ops_t *ep_epop; +} efx_port_t; + +typedef struct efx_mon_ops_s { +#if EFSYS_OPT_MON_STATS + efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, + efx_mon_stat_value_t *); + efx_rc_t (*emo_limits_update)(efx_nic_t *, + efx_mon_stat_limits_t *); +#endif /* EFSYS_OPT_MON_STATS */ +} efx_mon_ops_t; + +typedef struct efx_mon_s { + efx_mon_type_t em_type; + const efx_mon_ops_t *em_emop; +} efx_mon_t; + +typedef struct efx_intr_ops_s { + efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *); + void (*eio_enable)(efx_nic_t *); + void (*eio_disable)(efx_nic_t *); + void (*eio_disable_unlocked)(efx_nic_t *); + efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int); + void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *); + void (*eio_status_message)(efx_nic_t *, unsigned int, + boolean_t *); + void (*eio_fatal)(efx_nic_t *); + void (*eio_fini)(efx_nic_t *); +} efx_intr_ops_t; + +typedef struct efx_intr_s { + const efx_intr_ops_t *ei_eiop; + efsys_mem_t *ei_esmp; + efx_intr_type_t ei_type; + unsigned int ei_level; +} efx_intr_t; + +typedef struct efx_nic_ops_s { + efx_rc_t (*eno_probe)(efx_nic_t *); + efx_rc_t (*eno_board_cfg)(efx_nic_t *); + efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*); + efx_rc_t (*eno_reset)(efx_nic_t *); + efx_rc_t (*eno_init)(efx_nic_t *); + efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *); + efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t, + uint32_t *, size_t *); + boolean_t (*eno_hw_unavailable)(efx_nic_t *); + void (*eno_set_hw_unavailable)(efx_nic_t *); +#if EFSYS_OPT_DIAG + efx_rc_t (*eno_register_test)(efx_nic_t *); +#endif /* EFSYS_OPT_DIAG */ + void (*eno_fini)(efx_nic_t *); + void (*eno_unprobe)(efx_nic_t *); +} efx_nic_ops_t; + +#ifndef EFX_TXQ_LIMIT_TARGET +#define EFX_TXQ_LIMIT_TARGET 259 +#endif +#ifndef EFX_RXQ_LIMIT_TARGET +#define EFX_RXQ_LIMIT_TARGET 512 +#endif + + +#if EFSYS_OPT_FILTER + +#if EFSYS_OPT_SIENA + +typedef struct siena_filter_spec_s { + uint8_t sfs_type; + uint32_t sfs_flags; + uint32_t sfs_dmaq_id; + uint32_t sfs_dword[3]; +} siena_filter_spec_t; + +typedef enum siena_filter_type_e { + EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */ + EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */ + EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */ + EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */ + EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */ + EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */ + + EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */ + EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */ + EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */ + EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */ + EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */ + EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */ + + EFX_SIENA_FILTER_NTYPES +} siena_filter_type_t; + +typedef enum siena_filter_tbl_id_e { + EFX_SIENA_FILTER_TBL_RX_IP = 0, + EFX_SIENA_FILTER_TBL_RX_MAC, + EFX_SIENA_FILTER_TBL_TX_IP, + EFX_SIENA_FILTER_TBL_TX_MAC, + EFX_SIENA_FILTER_NTBLS +} siena_filter_tbl_id_t; + +typedef struct siena_filter_tbl_s { + int sft_size; /* number of entries */ + int sft_used; /* active count */ + uint32_t *sft_bitmap; /* active bitmap */ + siena_filter_spec_t *sft_spec; /* array of saved specs */ +} siena_filter_tbl_t; + +typedef struct siena_filter_s { + siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS]; + unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES]; +} siena_filter_t; + +#endif /* EFSYS_OPT_SIENA */ + +typedef struct efx_filter_s { +#if EFSYS_OPT_SIENA + siena_filter_t *ef_siena_filter; +#endif /* EFSYS_OPT_SIENA */ +#if EFX_OPTS_EF10() + ef10_filter_table_t *ef_ef10_filter_table; +#endif /* EFX_OPTS_EF10() */ +} efx_filter_t; + +#if EFSYS_OPT_SIENA + +extern void +siena_filter_tbl_clear( + __in efx_nic_t *enp, + __in siena_filter_tbl_id_t tbl); + +#endif /* EFSYS_OPT_SIENA */ + +#endif /* EFSYS_OPT_FILTER */ + +#if EFSYS_OPT_MCDI + +#define EFX_TUNNEL_MAXNENTRIES (16) + +#if EFSYS_OPT_TUNNEL + +typedef struct efx_tunnel_udp_entry_s { + uint16_t etue_port; /* host/cpu-endian */ + uint16_t etue_protocol; +} efx_tunnel_udp_entry_t; + +typedef struct efx_tunnel_cfg_s { + efx_tunnel_udp_entry_t etc_udp_entries[EFX_TUNNEL_MAXNENTRIES]; + unsigned int etc_udp_entries_num; +} efx_tunnel_cfg_t; + +#endif /* EFSYS_OPT_TUNNEL */ + +typedef struct efx_mcdi_ops_s { + efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *); + void (*emco_send_request)(efx_nic_t *, void *, size_t, + void *, size_t); + efx_rc_t (*emco_poll_reboot)(efx_nic_t *); + boolean_t (*emco_poll_response)(efx_nic_t *); + void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t); + void (*emco_fini)(efx_nic_t *); + efx_rc_t (*emco_feature_supported)(efx_nic_t *, + efx_mcdi_feature_id_t, boolean_t *); + void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *, + uint32_t *); +} efx_mcdi_ops_t; + +typedef struct efx_mcdi_s { + const efx_mcdi_ops_t *em_emcop; + const efx_mcdi_transport_t *em_emtp; + efx_mcdi_iface_t em_emip; +} efx_mcdi_t; + +#endif /* EFSYS_OPT_MCDI */ + +#if EFSYS_OPT_NVRAM + +/* Invalid partition ID for en_nvram_partn_locked field of efx_nc_t */ +#define EFX_NVRAM_PARTN_INVALID (0xffffffffu) + +typedef struct efx_nvram_ops_s { +#if EFSYS_OPT_DIAG + efx_rc_t (*envo_test)(efx_nic_t *); +#endif /* EFSYS_OPT_DIAG */ + efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t, + uint32_t *); + efx_rc_t (*envo_partn_info)(efx_nic_t *, uint32_t, + efx_nvram_info_t *); + efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *); + efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t, + unsigned int, caddr_t, size_t); + efx_rc_t (*envo_partn_read_backup)(efx_nic_t *, uint32_t, + unsigned int, caddr_t, size_t); + efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t, + unsigned int, size_t); + efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t, + unsigned int, caddr_t, size_t); + efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t, + uint32_t *); + efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t, + uint32_t *, uint16_t *); + efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t, + uint16_t *); + efx_rc_t (*envo_buffer_validate)(uint32_t, + caddr_t, size_t); +} efx_nvram_ops_t; +#endif /* EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_VPD +typedef struct efx_vpd_ops_s { + efx_rc_t (*evpdo_init)(efx_nic_t *); + efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *); + efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t); + efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *); + efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *); + efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t, + efx_vpd_value_t *, unsigned int *); + efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t); + void (*evpdo_fini)(efx_nic_t *); +} efx_vpd_ops_t; +#endif /* EFSYS_OPT_VPD */ + +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM + + __checkReturn efx_rc_t +efx_mcdi_nvram_partitions( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size, + __out unsigned int *npartnp); + + __checkReturn efx_rc_t +efx_mcdi_nvram_metadata( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4], + __out_bcount_opt(size) char *descp, + __in size_t size); + + __checkReturn efx_rc_t +efx_mcdi_nvram_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t *eni); + + __checkReturn efx_rc_t +efx_mcdi_nvram_update_start( + __in efx_nic_t *enp, + __in uint32_t partn); + + __checkReturn efx_rc_t +efx_mcdi_nvram_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __out_bcount(size) caddr_t data, + __in size_t size, + __in uint32_t mode); + + __checkReturn efx_rc_t +efx_mcdi_nvram_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __in size_t size); + + __checkReturn efx_rc_t +efx_mcdi_nvram_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __in_bcount(size) caddr_t data, + __in size_t size); + +#define EFX_NVRAM_UPDATE_FLAGS_BACKGROUND 0x00000001 +#define EFX_NVRAM_UPDATE_FLAGS_POLL 0x00000002 + + __checkReturn efx_rc_t +efx_mcdi_nvram_update_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __in boolean_t reboot, + __in uint32_t flags, + __out_opt uint32_t *verify_resultp); + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +efx_mcdi_nvram_test( + __in efx_nic_t *enp, + __in uint32_t partn); + +#endif /* EFSYS_OPT_DIAG */ + +#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_LICENSING + +typedef struct efx_lic_ops_s { + efx_rc_t (*elo_update_licenses)(efx_nic_t *); + efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *); + efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *); + efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *, + size_t *, uint8_t *); + efx_rc_t (*elo_find_start) + (efx_nic_t *, caddr_t, size_t, uint32_t *); + efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t, + uint32_t, uint32_t *); + boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t, + uint32_t, uint32_t *, uint32_t *); + boolean_t (*elo_validate_key)(efx_nic_t *, + caddr_t, uint32_t); + efx_rc_t (*elo_read_key)(efx_nic_t *, + caddr_t, size_t, uint32_t, uint32_t, + caddr_t, size_t, uint32_t *); + efx_rc_t (*elo_write_key)(efx_nic_t *, + caddr_t, size_t, uint32_t, + caddr_t, uint32_t, uint32_t *); + efx_rc_t (*elo_delete_key)(efx_nic_t *, + caddr_t, size_t, uint32_t, + uint32_t, uint32_t, uint32_t *); + efx_rc_t (*elo_create_partition)(efx_nic_t *, + caddr_t, size_t); + efx_rc_t (*elo_finish_partition)(efx_nic_t *, + caddr_t, size_t); +} efx_lic_ops_t; + +#endif + +#if EFSYS_OPT_EVB + +struct efx_vswitch_s { + efx_nic_t *ev_enp; + efx_vswitch_id_t ev_vswitch_id; + uint32_t ev_num_vports; + /* + * Vport configuration array: index 0 to store PF configuration + * and next ev_num_vports-1 entries hold VFs configuration. + */ + efx_vport_config_t *ev_evcp; +}; + +typedef struct efx_evb_ops_s { + efx_rc_t (*eeo_init)(efx_nic_t *); + void (*eeo_fini)(efx_nic_t *); + efx_rc_t (*eeo_vswitch_alloc)(efx_nic_t *, efx_vswitch_id_t *); + efx_rc_t (*eeo_vswitch_free)(efx_nic_t *, efx_vswitch_id_t); + efx_rc_t (*eeo_vport_alloc)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_type_t, uint16_t, + boolean_t, efx_vport_id_t *); + efx_rc_t (*eeo_vport_free)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t); + efx_rc_t (*eeo_vport_mac_addr_add)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t, uint8_t *); + efx_rc_t (*eeo_vport_mac_addr_del)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t, uint8_t *); + efx_rc_t (*eeo_vadaptor_alloc)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t); + efx_rc_t (*eeo_vadaptor_free)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t); + efx_rc_t (*eeo_vport_assign)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t, uint32_t); + efx_rc_t (*eeo_vport_reconfigure)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t, + uint16_t *, uint8_t *, + boolean_t *); + efx_rc_t (*eeo_vport_stats)(efx_nic_t *, efx_vswitch_id_t, + efx_vport_id_t, efsys_mem_t *); +} efx_evb_ops_t; + +extern __checkReturn boolean_t +efx_is_zero_eth_addr( + __in_bcount(EFX_MAC_ADDR_LEN) const uint8_t *addrp); + +#endif /* EFSYS_OPT_EVB */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + +#define EFX_PROXY_CONFIGURE_MAGIC 0xAB2015EF + + +typedef struct efx_proxy_ops_s { + efx_rc_t (*epo_init)(efx_nic_t *); + void (*epo_fini)(efx_nic_t *); + efx_rc_t (*epo_mc_config)(efx_nic_t *, efsys_mem_t *, + efsys_mem_t *, efsys_mem_t *, + uint32_t, uint32_t *, size_t); + efx_rc_t (*epo_disable)(efx_nic_t *); + efx_rc_t (*epo_privilege_modify)(efx_nic_t *, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); + efx_rc_t (*epo_set_privilege_mask)(efx_nic_t *, uint32_t, + uint32_t, uint32_t); + efx_rc_t (*epo_complete_request)(efx_nic_t *, uint32_t, + uint32_t, uint32_t); + efx_rc_t (*epo_exec_cmd)(efx_nic_t *, efx_proxy_cmd_params_t *); + efx_rc_t (*epo_get_privilege_mask)(efx_nic_t *, uint32_t, + uint32_t, uint32_t *); +} efx_proxy_ops_t; + +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + +#define EFX_DRV_VER_MAX 20 + +typedef struct efx_drv_cfg_s { + uint32_t edc_min_vi_count; + uint32_t edc_max_vi_count; + + uint32_t edc_max_piobuf_count; + uint32_t edc_pio_alloc_size; +} efx_drv_cfg_t; + +struct efx_nic_s { + uint32_t en_magic; + efx_family_t en_family; + uint32_t en_features; + efsys_identifier_t *en_esip; + efsys_lock_t *en_eslp; + efsys_bar_t *en_esbp; + unsigned int en_mod_flags; + unsigned int en_reset_flags; + efx_nic_cfg_t en_nic_cfg; + efx_drv_cfg_t en_drv_cfg; + efx_port_t en_port; + efx_mon_t en_mon; + efx_intr_t en_intr; + uint32_t en_ev_qcount; + uint32_t en_rx_qcount; + uint32_t en_tx_qcount; + const efx_nic_ops_t *en_enop; + const efx_ev_ops_t *en_eevop; + const efx_tx_ops_t *en_etxop; + const efx_rx_ops_t *en_erxop; + efx_fw_variant_t efv; + char en_drv_version[EFX_DRV_VER_MAX]; +#if EFSYS_OPT_FILTER + efx_filter_t en_filter; + const efx_filter_ops_t *en_efop; +#endif /* EFSYS_OPT_FILTER */ +#if EFSYS_OPT_TUNNEL + efx_tunnel_cfg_t en_tunnel_cfg; + const efx_tunnel_ops_t *en_etop; +#endif /* EFSYS_OPT_TUNNEL */ +#if EFSYS_OPT_MCDI + efx_mcdi_t en_mcdi; +#endif /* EFSYS_OPT_MCDI */ +#if EFSYS_OPT_NVRAM + uint32_t en_nvram_partn_locked; + const efx_nvram_ops_t *en_envop; +#endif /* EFSYS_OPT_NVRAM */ +#if EFSYS_OPT_VPD + const efx_vpd_ops_t *en_evpdop; +#endif /* EFSYS_OPT_VPD */ +#if EFSYS_OPT_RX_SCALE + efx_rx_hash_support_t en_hash_support; + efx_rx_scale_context_type_t en_rss_context_type; + uint32_t en_rss_context; +#endif /* EFSYS_OPT_RX_SCALE */ + uint32_t en_vport_id; +#if EFSYS_OPT_LICENSING + const efx_lic_ops_t *en_elop; + boolean_t en_licensing_supported; +#endif + union { +#if EFSYS_OPT_SIENA + struct { +#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD + unsigned int enu_partn_mask; +#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ +#if EFSYS_OPT_VPD + caddr_t enu_svpd; + size_t enu_svpd_length; +#endif /* EFSYS_OPT_VPD */ + int enu_unused; + } siena; +#endif /* EFSYS_OPT_SIENA */ + int enu_unused; + } en_u; +#if EFX_OPTS_EF10() + union en_arch { + struct { + int ena_vi_base; + int ena_vi_count; + int ena_vi_shift; +#if EFSYS_OPT_VPD + caddr_t ena_svpd; + size_t ena_svpd_length; +#endif /* EFSYS_OPT_VPD */ + efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS]; + uint32_t ena_piobuf_count; + uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS]; + uint32_t ena_pio_write_vi_base; + /* Memory BAR mapping regions */ + uint32_t ena_uc_mem_map_offset; + size_t ena_uc_mem_map_size; + uint32_t ena_wc_mem_map_offset; + size_t ena_wc_mem_map_size; + } ef10; + } en_arch; +#endif /* EFX_OPTS_EF10() */ +#if EFSYS_OPT_EVB + const efx_evb_ops_t *en_eeop; + struct efx_vswitch_s *en_vswitchp; +#endif /* EFSYS_OPT_EVB */ +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + const efx_proxy_ops_t *en_epop; +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ +}; + +#define EFX_FAMILY_IS_EF10(_enp) \ + ((_enp)->en_family == EFX_FAMILY_MEDFORD2 || \ + (_enp)->en_family == EFX_FAMILY_MEDFORD || \ + (_enp)->en_family == EFX_FAMILY_HUNTINGTON) + + +#define EFX_NIC_MAGIC 0x02121996 + +typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *, + const efx_ev_callbacks_t *, void *); + +typedef struct efx_evq_rxq_state_s { + unsigned int eers_rx_read_ptr; + unsigned int eers_rx_mask; +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER + unsigned int eers_rx_stream_npackets; + boolean_t eers_rx_packed_stream; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM + unsigned int eers_rx_packed_stream_credits; +#endif +} efx_evq_rxq_state_t; + +struct efx_evq_s { + uint32_t ee_magic; + uint32_t ee_flags; + efx_nic_t *ee_enp; + unsigned int ee_index; + unsigned int ee_mask; + efsys_mem_t *ee_esmp; +#if EFSYS_OPT_QSTATS + uint32_t ee_stat[EV_NQSTATS]; +#endif /* EFSYS_OPT_QSTATS */ + + efx_ev_handler_t ee_rx; + efx_ev_handler_t ee_tx; + efx_ev_handler_t ee_driver; + efx_ev_handler_t ee_global; + efx_ev_handler_t ee_drv_gen; +#if EFSYS_OPT_MCDI + efx_ev_handler_t ee_mcdi; +#endif /* EFSYS_OPT_MCDI */ + + efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS]; +}; + +#define EFX_EVQ_MAGIC 0x08081997 + +#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */ + +#if EFSYS_OPT_QSTATS +#define EFX_EV_QSTAT_INCR(_eep, _stat) \ + do { \ + (_eep)->ee_stat[_stat]++; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#else +#define EFX_EV_QSTAT_INCR(_eep, _stat) +#endif + +struct efx_rxq_s { + uint32_t er_magic; + efx_nic_t *er_enp; + efx_evq_t *er_eep; + unsigned int er_index; + unsigned int er_label; + unsigned int er_mask; + size_t er_buf_size; + efsys_mem_t *er_esmp; + efx_evq_rxq_state_t *er_ev_qstate; +}; + +#define EFX_RXQ_MAGIC 0x15022005 + +struct efx_txq_s { + uint32_t et_magic; + efx_nic_t *et_enp; + unsigned int et_index; + unsigned int et_mask; + efsys_mem_t *et_esmp; +#if EFSYS_OPT_HUNTINGTON + uint32_t et_pio_bufnum; + uint32_t et_pio_blknum; + uint32_t et_pio_write_offset; + uint32_t et_pio_offset; + size_t et_pio_size; +#endif +#if EFSYS_OPT_QSTATS + uint32_t et_stat[TX_NQSTATS]; +#endif /* EFSYS_OPT_QSTATS */ +}; + +#define EFX_TXQ_MAGIC 0x05092005 + +#define EFX_MAC_ADDR_COPY(_dst, _src) \ + do { \ + (_dst)[0] = (_src)[0]; \ + (_dst)[1] = (_src)[1]; \ + (_dst)[2] = (_src)[2]; \ + (_dst)[3] = (_src)[3]; \ + (_dst)[4] = (_src)[4]; \ + (_dst)[5] = (_src)[5]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \ + do { \ + uint16_t *_d = (uint16_t *)(_dst); \ + _d[0] = 0xffff; \ + _d[1] = 0xffff; \ + _d[2] = 0xffff; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#if EFSYS_OPT_CHECK_REG +#define EFX_CHECK_REG(_enp, _reg) \ + do { \ + const char *name = #_reg; \ + char min = name[4]; \ + char max = name[5]; \ + char rev; \ + \ + switch ((_enp)->en_family) { \ + case EFX_FAMILY_SIENA: \ + rev = 'C'; \ + break; \ + \ + case EFX_FAMILY_HUNTINGTON: \ + rev = 'D'; \ + break; \ + \ + case EFX_FAMILY_MEDFORD: \ + rev = 'E'; \ + break; \ + \ + case EFX_FAMILY_MEDFORD2: \ + rev = 'F'; \ + break; \ + \ + default: \ + rev = '?'; \ + break; \ + } \ + \ + EFSYS_ASSERT3S(rev, >=, min); \ + EFSYS_ASSERT3S(rev, <=, max); \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#else +#define EFX_CHECK_REG(_enp, _reg) do { \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#endif + +#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \ + (_edp), (_lock)); \ + EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_READQ(_enp, _reg, _eqp) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \ + (_eqp)); \ + EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \ + (_eqp)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_READO(_enp, _reg, _eop) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \ + (_eop), B_TRUE); \ + EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_WRITEO(_enp, _reg, _eop) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \ + (_eop), B_TRUE); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Accessors for memory BAR non-VI tables. + * + * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers, + * to ensure the correct runtime VI window size is used on Medford2. + * + * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers. + */ + +#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READD((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_edp), (_lock)); \ + EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + (_reg ## _OFST + \ + (3 * sizeof (efx_dword_t)) + \ + ((_index) * _reg ## _STEP)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READQ((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_eqp)); \ + EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + EFSYS_BAR_WRITEQ((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_eqp)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READO((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_eop), (_lock)); \ + EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + EFSYS_BAR_WRITEO((_enp)->en_esbp, \ + (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_eop), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Accessors for memory BAR per-VI registers. + * + * The VI window size is 8KB for Medford and all earlier controllers. + * For Medford2, the VI window size can be 8KB, 16KB or 64KB. + */ + +#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READD((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + (2 * sizeof (efx_dword_t)) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Allow drivers to perform optimised 128-bit VI doorbell writes. + * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are + * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid + * the need for locking in the host, and are the only ones known to be safe to + * use 128-bites write with. + */ +#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \ + const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \ + (_reg ## _OFST + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_eop)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \ + do { \ + unsigned int _new = (_wptr); \ + unsigned int _old = (_owptr); \ + \ + if ((_new) >= (_old)) \ + EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ + (_old) * sizeof (efx_desc_t), \ + ((_new) - (_old)) * sizeof (efx_desc_t)); \ + else \ + /* \ + * It is cheaper to sync entire map than sync \ + * two parts especially when offset/size are \ + * ignored and entire map is synced in any case.\ + */ \ + EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ + 0, \ + (_entries) * sizeof (efx_desc_t)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +extern __checkReturn efx_rc_t +efx_mac_select( + __in efx_nic_t *enp); + +extern void +efx_mac_multicast_hash_compute( + __in_ecount(6*count) uint8_t const *addrs, + __in int count, + __out efx_oword_t *hash_low, + __out efx_oword_t *hash_high); + +extern __checkReturn efx_rc_t +efx_phy_probe( + __in efx_nic_t *enp); + +extern void +efx_phy_unprobe( + __in efx_nic_t *enp); + +#if EFSYS_OPT_VPD + +/* VPD utility functions */ + +extern __checkReturn efx_rc_t +efx_vpd_hunk_length( + __in_bcount(size) caddr_t data, + __in size_t size, + __out size_t *lengthp); + +extern __checkReturn efx_rc_t +efx_vpd_hunk_verify( + __in_bcount(size) caddr_t data, + __in size_t size, + __out_opt boolean_t *cksummedp); + +extern __checkReturn efx_rc_t +efx_vpd_hunk_reinit( + __in_bcount(size) caddr_t data, + __in size_t size, + __in boolean_t wantpid); + +extern __checkReturn efx_rc_t +efx_vpd_hunk_get( + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_tag_t tag, + __in efx_vpd_keyword_t keyword, + __out unsigned int *payloadp, + __out uint8_t *paylenp); + +extern __checkReturn efx_rc_t +efx_vpd_hunk_next( + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_tag_t *tagp, + __out efx_vpd_keyword_t *keyword, + __out_opt unsigned int *payloadp, + __out_opt uint8_t *paylenp, + __inout unsigned int *contp); + +extern __checkReturn efx_rc_t +efx_vpd_hunk_set( + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp); + +#endif /* EFSYS_OPT_VPD */ + +#if EFSYS_OPT_MCDI + +extern __checkReturn efx_rc_t +efx_mcdi_set_workaround( + __in efx_nic_t *enp, + __in uint32_t type, + __in boolean_t enabled, + __out_opt uint32_t *flagsp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_workarounds( + __in efx_nic_t *enp, + __out_opt uint32_t *implementedp, + __out_opt uint32_t *enabledp); + +#endif /* EFSYS_OPT_MCDI */ + +#if EFSYS_OPT_MAC_STATS + +/* + * Closed range of stats (i.e. the first and the last are included). + * The last must be greater or equal (if the range is one item only) to + * the first. + */ +struct efx_mac_stats_range { + efx_mac_stat_t first; + efx_mac_stat_t last; +}; + +typedef enum efx_stats_action_e { + EFX_STATS_CLEAR, + EFX_STATS_UPLOAD, + EFX_STATS_ENABLE_NOEVENTS, + EFX_STATS_ENABLE_EVENTS, + EFX_STATS_DISABLE, +} efx_stats_action_t; + +extern efx_rc_t +efx_mac_stats_mask_add_ranges( + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size, + __in_ecount(rng_count) const struct efx_mac_stats_range *rngp, + __in unsigned int rng_count); + +extern __checkReturn efx_rc_t +efx_mcdi_mac_stats( + __in efx_nic_t *enp, + __in uint32_t vport_id, + __in_opt efsys_mem_t *esmp, + __in efx_stats_action_t action, + __in uint16_t period_ms); + +#endif /* EFSYS_OPT_MAC_STATS */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c new file mode 100644 index 000000000..7e3cc3c6a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_intr.c @@ -0,0 +1,565 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in efsys_mem_t *esmp); + +static void +siena_intr_enable( + __in efx_nic_t *enp); + +static void +siena_intr_disable( + __in efx_nic_t *enp); + +static void +siena_intr_disable_unlocked( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +siena_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level); + +static void +siena_intr_fini( + __in efx_nic_t *enp); + +static void +siena_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *qmaskp); + +static void +siena_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp); + +static void +siena_intr_fatal( + __in efx_nic_t *enp); + +static __checkReturn boolean_t +siena_intr_check_fatal( + __in efx_nic_t *enp); + + +#endif /* EFSYS_OPT_SIENA */ + + +#if EFSYS_OPT_SIENA +static const efx_intr_ops_t __efx_intr_siena_ops = { + siena_intr_init, /* eio_init */ + siena_intr_enable, /* eio_enable */ + siena_intr_disable, /* eio_disable */ + siena_intr_disable_unlocked, /* eio_disable_unlocked */ + siena_intr_trigger, /* eio_trigger */ + siena_intr_status_line, /* eio_status_line */ + siena_intr_status_message, /* eio_status_message */ + siena_intr_fatal, /* eio_fatal */ + siena_intr_fini, /* eio_fini */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_intr_ops_t __efx_intr_ef10_ops = { + ef10_intr_init, /* eio_init */ + ef10_intr_enable, /* eio_enable */ + ef10_intr_disable, /* eio_disable */ + ef10_intr_disable_unlocked, /* eio_disable_unlocked */ + ef10_intr_trigger, /* eio_trigger */ + ef10_intr_status_line, /* eio_status_line */ + ef10_intr_status_message, /* eio_status_message */ + ef10_intr_fatal, /* eio_fatal */ + ef10_intr_fini, /* eio_fini */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in_opt efsys_mem_t *esmp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (enp->en_mod_flags & EFX_MOD_INTR) { + rc = EINVAL; + goto fail1; + } + + eip->ei_esmp = esmp; + eip->ei_type = type; + eip->ei_level = 0; + + enp->en_mod_flags |= EFX_MOD_INTR; + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + eiop = &__efx_intr_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + eiop = &__efx_intr_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + eiop = &__efx_intr_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eiop = &__efx_intr_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(B_FALSE); + rc = ENOTSUP; + goto fail2; + } + + if ((rc = eiop->eio_init(enp, type, esmp)) != 0) + goto fail3; + + eip->ei_eiop = eiop; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_intr_fini( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_fini(enp); + + enp->en_mod_flags &= ~EFX_MOD_INTR; +} + + void +efx_intr_enable( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_enable(enp); +} + + void +efx_intr_disable( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_disable(enp); +} + + void +efx_intr_disable_unlocked( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_disable_unlocked(enp); +} + + + __checkReturn efx_rc_t +efx_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + return (eiop->eio_trigger(enp, level)); +} + + void +efx_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *qmaskp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_status_line(enp, fatalp, qmaskp); +} + + void +efx_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_status_message(enp, message, fatalp); +} + + void +efx_intr_fatal( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + const efx_intr_ops_t *eiop = eip->ei_eiop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + eiop->eio_fatal(enp); +} + + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_intr_init( + __in efx_nic_t *enp, + __in efx_intr_type_t type, + __in efsys_mem_t *esmp) +{ + efx_intr_t *eip = &(enp->en_intr); + efx_oword_t oword; + efx_rc_t rc; + + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) { + rc = EINVAL; + goto fail1; + } + + /* + * bug17213 workaround. + * + * Under legacy interrupts, don't share a level between fatal + * interrupts and event queue interrupts. Under MSI-X, they + * must share, or we won't get an interrupt. + */ + if (enp->en_family == EFX_FAMILY_SIENA && + eip->ei_type == EFX_INTR_LINE) + eip->ei_level = 0x1f; + else + eip->ei_level = 0; + + /* Enable all the genuinely fatal interrupts */ + EFX_SET_OWORD(oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0); + if (enp->en_family >= EFX_FAMILY_SIENA) + EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0); + EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword); + + /* Set up the interrupt address register */ + EFX_POPULATE_OWORD_3(oword, + FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0, + FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff, + FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32); + EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +siena_intr_enable( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + efx_oword_t oword; + + EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); + + EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1); + EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); +} + +static void +siena_intr_disable( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0); + EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); + + EFSYS_SPIN(10); +} + +static void +siena_intr_disable_unlocked( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST, + &oword, B_FALSE); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0); + EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST, + &oword, B_FALSE); +} + +static __checkReturn efx_rc_t +siena_intr_trigger( + __in efx_nic_t *enp, + __in unsigned int level) +{ + efx_intr_t *eip = &(enp->en_intr); + efx_oword_t oword; + unsigned int count; + uint32_t sel; + efx_rc_t rc; + + /* bug16757: No event queues can be initialized */ + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); + + if (level >= EFX_NINTR_SIENA) { + rc = EINVAL; + goto fail1; + } + + if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL)) + return (ENOTSUP); /* avoid EFSYS_PROBE() */ + + sel = level; + + /* Trigger a test interrupt */ + EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1); + EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); + + /* + * Wait up to 100ms for the interrupt to be raised before restoring + * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will + * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL + */ + count = 0; + do { + EFSYS_SPIN(100); /* 100us */ + + EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); + } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000); + + EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level); + EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn boolean_t +siena_intr_check_fatal( + __in efx_nic_t *enp) +{ + efx_intr_t *eip = &(enp->en_intr); + efsys_mem_t *esmp = eip->ei_esmp; + efx_oword_t oword; + + /* Read the syndrome */ + EFSYS_MEM_READO(esmp, 0, &oword); + + if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) { + EFSYS_PROBE(fatal); + + /* Clear the fatal interrupt condition */ + EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0); + EFSYS_MEM_WRITEO(esmp, 0, &oword); + + return (B_TRUE); + } + + return (B_FALSE); +} + +static void +siena_intr_status_line( + __in efx_nic_t *enp, + __out boolean_t *fatalp, + __out uint32_t *qmaskp) +{ + efx_intr_t *eip = &(enp->en_intr); + efx_dword_t dword; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + /* + * Read the queue mask and implicitly acknowledge the + * interrupt. + */ + EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE); + *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + EFSYS_PROBE1(qmask, uint32_t, *qmaskp); + + if (*qmaskp & (1U << eip->ei_level)) + *fatalp = siena_intr_check_fatal(enp); + else + *fatalp = B_FALSE; +} + +static void +siena_intr_status_message( + __in efx_nic_t *enp, + __in unsigned int message, + __out boolean_t *fatalp) +{ + efx_intr_t *eip = &(enp->en_intr); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); + + if (message == eip->ei_level) + *fatalp = siena_intr_check_fatal(enp); + else + *fatalp = B_FALSE; +} + + +static void +siena_intr_fatal( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_DECODE_INTR_FATAL + efx_oword_t fatal; + efx_oword_t mem_per; + + EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal); + EFX_ZERO_OWORD(mem_per); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 || + EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) + EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR, + EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), + EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0); + + if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0) + EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR, + EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), + EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); +#else + EFSYS_ASSERT(0); +#endif +} + +static void +siena_intr_fini( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + /* Clear the interrupt address register */ + EFX_ZERO_OWORD(oword); + EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c new file mode 100644 index 000000000..f962fd63b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_lic.c @@ -0,0 +1,1680 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_LICENSING + +#include "ef10_tlv_layout.h" +#if EFSYS_OPT_SIENA +#include "efx_regs_mcdi_aoe.h" +#endif + +#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON + + __checkReturn efx_rc_t +efx_lic_v1v2_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp); + + __checkReturn efx_rc_t +efx_lic_v1v2_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp); + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v1v2_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp); + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v1v2_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length); + + __checkReturn efx_rc_t +efx_lic_v1v2_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp); + + __checkReturn efx_rc_t +efx_lic_v1v2_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp); + + __checkReturn efx_rc_t +efx_lic_v1v2_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap); + + __checkReturn efx_rc_t +efx_lic_v1v2_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + + __checkReturn efx_rc_t +efx_lic_v1v2_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ + + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +efx_mcdi_fc_license_update_license( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +efx_mcdi_fc_license_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp); + +static const efx_lic_ops_t __efx_lic_v1_ops = { + efx_mcdi_fc_license_update_license, /* elo_update_licenses */ + efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */ + NULL, /* elo_app_state */ + NULL, /* elo_get_id */ + efx_lic_v1v2_find_start, /* elo_find_start */ + efx_lic_v1v2_find_end, /* elo_find_end */ + efx_lic_v1v2_find_key, /* elo_find_key */ + efx_lic_v1v2_validate_key, /* elo_validate_key */ + efx_lic_v1v2_read_key, /* elo_read_key */ + efx_lic_v1v2_write_key, /* elo_write_key */ + efx_lic_v1v2_delete_key, /* elo_delete_key */ + efx_lic_v1v2_create_partition, /* elo_create_partition */ + efx_lic_v1v2_finish_partition, /* elo_finish_partition */ +}; + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + +static __checkReturn efx_rc_t +efx_mcdi_licensing_update_licenses( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +efx_mcdi_licensing_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp); + +static __checkReturn efx_rc_t +efx_mcdi_licensed_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp); + +static const efx_lic_ops_t __efx_lic_v2_ops = { + efx_mcdi_licensing_update_licenses, /* elo_update_licenses */ + efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */ + efx_mcdi_licensed_app_state, /* elo_app_state */ + NULL, /* elo_get_id */ + efx_lic_v1v2_find_start, /* elo_find_start */ + efx_lic_v1v2_find_end, /* elo_find_end */ + efx_lic_v1v2_find_key, /* elo_find_key */ + efx_lic_v1v2_validate_key, /* elo_validate_key */ + efx_lic_v1v2_read_key, /* elo_read_key */ + efx_lic_v1v2_write_key, /* elo_write_key */ + efx_lic_v1v2_delete_key, /* elo_delete_key */ + efx_lic_v1v2_create_partition, /* elo_create_partition */ + efx_lic_v1v2_finish_partition, /* elo_finish_partition */ +}; + +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_update_licenses( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_report_license( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp); + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp); + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_get_id( + __in efx_nic_t *enp, + __in size_t buffer_size, + __out uint32_t *typep, + __out size_t *lengthp, + __out_bcount_part_opt(buffer_size, *lengthp) + uint8_t *bufferp); + + __checkReturn efx_rc_t +efx_lic_v3_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp); + + __checkReturn efx_rc_t +efx_lic_v3_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp); + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v3_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp); + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v3_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length); + + __checkReturn efx_rc_t +efx_lic_v3_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp); + + __checkReturn efx_rc_t +efx_lic_v3_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp); + + __checkReturn efx_rc_t +efx_lic_v3_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap); + + __checkReturn efx_rc_t +efx_lic_v3_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + + __checkReturn efx_rc_t +efx_lic_v3_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size); + +static const efx_lic_ops_t __efx_lic_v3_ops = { + efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */ + efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */ + efx_mcdi_licensing_v3_app_state, /* elo_app_state */ + efx_mcdi_licensing_v3_get_id, /* elo_get_id */ + efx_lic_v3_find_start, /* elo_find_start */ + efx_lic_v3_find_end, /* elo_find_end */ + efx_lic_v3_find_key, /* elo_find_key */ + efx_lic_v3_validate_key, /* elo_validate_key */ + efx_lic_v3_read_key, /* elo_read_key */ + efx_lic_v3_write_key, /* elo_write_key */ + efx_lic_v3_delete_key, /* elo_delete_key */ + efx_lic_v3_create_partition, /* elo_create_partition */ + efx_lic_v3_finish_partition, /* elo_finish_partition */ +}; + +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ + + +/* V1 Licensing - used in Siena Modena only */ + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +efx_mcdi_fc_license_update_license( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN, 0); + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + req.emr_cmd = MC_CMD_FC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; + req.emr_out_buf = payload; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, FC_IN_CMD, + MC_CMD_FC_OP_LICENSE); + + MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, + MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != 0) { + rc = EIO; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_fc_license_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN, + MC_CMD_FC_OUT_LICENSE_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + req.emr_cmd = MC_CMD_FC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN; + + MCDI_IN_SET_DWORD(req, FC_IN_CMD, + MC_CMD_FC_OP_LICENSE); + + MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, + MC_CMD_FC_IN_LICENSE_GET_KEY_STATS); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + eksp->eks_valid = + MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS); + eksp->eks_invalid = + MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS); + eksp->eks_blacklisted = + MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS); + eksp->eks_unverifiable = 0; + eksp->eks_wrong_node = 0; + eksp->eks_licensed_apps_lo = 0; + eksp->eks_licensed_apps_hi = 0; + eksp->eks_licensed_features_lo = 0; + eksp->eks_licensed_features_hi = 0; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_SIENA */ + +/* V1 and V2 Partition format - based on a 16-bit TLV format */ + +#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON + +/* + * V1/V2 format - defined in SF-108542-TC section 4.2: + * Type (T): 16bit - revision/HMAC algorithm + * Length (L): 16bit - value length in bytes + * Value (V): L bytes - payload + */ +#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256) +#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof (uint16_t)) + + __checkReturn efx_rc_t +efx_lic_v1v2_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp) +{ + _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) + + *startp = 0; + return (0); +} + + __checkReturn efx_rc_t +efx_lic_v1v2_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp) +{ + _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) + + *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH; + return (0); +} + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v1v2_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp) +{ + boolean_t found; + uint16_t tlv_type; + uint16_t tlv_length; + + _NOTE(ARGUNUSED(enp)) + + if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH) + goto fail1; + + tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]); + tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]); + if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) || + (tlv_type == 0 && tlv_length == 0)) { + found = B_FALSE; + } else { + *startp = offset; + *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH; + found = B_TRUE; + } + return (found); + +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); + + return (B_FALSE); +} + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v1v2_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length) +{ + uint16_t tlv_type; + uint16_t tlv_length; + + _NOTE(ARGUNUSED(enp)) + + if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) { + goto fail1; + } + + tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]); + tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]); + + if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) { + goto fail2; + } + if (tlv_type == 0) { + goto fail3; + } + if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) { + goto fail4; + } + + return (B_TRUE); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); + + return (B_FALSE); +} + + + __checkReturn efx_rc_t +efx_lic_v1v2_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp, buffer_size)) + EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + + EFX_LICENSE_V1V2_HEADER_LENGTH)); + + if (key_max_size < length) { + rc = ENOSPC; + goto fail1; + } + memcpy(keyp, &bufferp[offset], length); + + *lengthp = length; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_v1v2_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + + EFX_LICENSE_V1V2_HEADER_LENGTH)); + + /* Ensure space for terminator remains */ + if ((offset + length) > + (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) { + rc = ENOSPC; + goto fail1; + } + + memcpy(bufferp + offset, keyp, length); + + *lengthp = length; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_v1v2_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap) +{ + uint32_t move_start = offset + length; + uint32_t move_length = end - move_start; + + _NOTE(ARGUNUSED(enp, buffer_size)) + EFSYS_ASSERT(end <= buffer_size); + + /* Shift everything after the key down */ + memmove(bufferp + offset, bufferp + move_start, move_length); + + *deltap = length; + + return (0); +} + + __checkReturn efx_rc_t +efx_lic_v1v2_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + _NOTE(ARGUNUSED(enp, buffer_size)) + EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size); + + /* Write terminator */ + memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH); + return (0); +} + + + __checkReturn efx_rc_t +efx_lic_v1v2_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) + + return (0); +} + +#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ + + +/* V2 Licensing - used by Huntington family only. See SF-113611-TC */ + +#if EFSYS_OPT_HUNTINGTON + +static __checkReturn efx_rc_t +efx_mcdi_licensed_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_APP_STATE_IN_LEN, + MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN); + uint32_t app_state; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); + + /* V2 licensing supports 32bit app id only */ + if ((app_id >> 32) != 0) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID, + app_id & 0xffffffff); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + + app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE)); + if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) { + *licensedp = B_TRUE; + } else { + *licensedp = B_FALSE; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_licensing_update_licenses( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN, 0); + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); + + req.emr_cmd = MC_CMD_LICENSING; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LICENSING_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, + MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != 0) { + rc = EIO; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_licensing_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN, + MC_CMD_LICENSING_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); + + req.emr_cmd = MC_CMD_LICENSING; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LICENSING_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_LICENSING_OUT_LEN; + + MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, + MC_CMD_LICENSING_IN_OP_GET_KEY_STATS); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + eksp->eks_valid = + MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS); + eksp->eks_invalid = + MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS); + eksp->eks_blacklisted = + MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS); + eksp->eks_unverifiable = + MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS); + eksp->eks_wrong_node = + MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS); + eksp->eks_licensed_apps_lo = 0; + eksp->eks_licensed_apps_hi = 0; + eksp->eks_licensed_features_lo = 0; + eksp->eks_licensed_features_hi = 0; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_HUNTINGTON */ + +/* V3 Licensing - used starting from Medford family. See SF-114884-SW */ + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_update_licenses( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN, 0); + efx_rc_t rc; + + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); + + req.emr_cmd = MC_CMD_LICENSING_V3; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, + MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_report_license( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN, + MC_CMD_LICENSING_V3_OUT_LEN); + efx_rc_t rc; + + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); + + req.emr_cmd = MC_CMD_LICENSING_V3; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN; + + MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, + MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + eksp->eks_valid = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS); + eksp->eks_invalid = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS); + eksp->eks_blacklisted = 0; + eksp->eks_unverifiable = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS); + eksp->eks_wrong_node = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS); + eksp->eks_licensed_apps_lo = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO); + eksp->eks_licensed_apps_hi = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI); + eksp->eks_licensed_features_lo = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO); + eksp->eks_licensed_features_hi = + MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN, + MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN); + uint32_t app_state; + efx_rc_t rc; + + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); + + req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO, + app_id & 0xffffffff); + MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI, + app_id >> 32); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < + MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE)); + if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) { + *licensedp = B_TRUE; + } else { + *licensedp = B_FALSE; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_licensing_v3_get_id( + __in efx_nic_t *enp, + __in size_t buffer_size, + __out uint32_t *typep, + __out size_t *lengthp, + __out_bcount_part_opt(buffer_size, *lengthp) + uint8_t *bufferp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_GET_ID_V3_IN_LEN, + MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + + *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE); + *lengthp = + MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH); + + if (bufferp != NULL) { + memcpy(bufferp, + payload + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST, + MIN(buffer_size, *lengthp)); + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* V3 format uses Huntington TLV format partition. See SF-108797-SW */ +#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64) +#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160) + + __checkReturn efx_rc_t +efx_lic_v3_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp) +{ + _NOTE(ARGUNUSED(enp)) + + return (ef10_nvram_buffer_find_item_start(bufferp, buffer_size, + startp)); +} + + __checkReturn efx_rc_t +efx_lic_v3_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp) +{ + _NOTE(ARGUNUSED(enp)) + + return (ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp)); +} + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v3_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp) +{ + _NOTE(ARGUNUSED(enp)) + + return ef10_nvram_buffer_find_item(bufferp, buffer_size, + offset, startp, lengthp); +} + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_v3_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length) +{ + /* Check key is a valid V3 key */ + uint8_t key_type; + uint8_t key_length; + + _NOTE(ARGUNUSED(enp)) + + if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) { + goto fail1; + } + + if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) { + goto fail2; + } + + key_type = ((uint8_t *)keyp)[0]; + key_length = ((uint8_t *)keyp)[1]; + + if (key_type < 3) { + goto fail3; + } + if (key_length > length) { + goto fail4; + } + return (B_TRUE); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); + + return (B_FALSE); +} + + __checkReturn efx_rc_t +efx_lic_v3_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp) +{ + uint32_t tag; + + _NOTE(ARGUNUSED(enp)) + + return ef10_nvram_buffer_get_item(bufferp, buffer_size, + offset, length, &tag, keyp, key_max_size, lengthp); +} + + __checkReturn efx_rc_t +efx_lic_v3_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX); + + return ef10_nvram_buffer_insert_item(bufferp, buffer_size, + offset, TLV_TAG_LICENSE, keyp, length, lengthp); +} + + __checkReturn efx_rc_t +efx_lic_v3_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + if ((rc = ef10_nvram_buffer_delete_item(bufferp, + buffer_size, offset, length, end)) != 0) { + goto fail1; + } + + *deltap = length; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_v3_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + /* Construct empty partition */ + if ((rc = ef10_nvram_buffer_create( + NVRAM_PARTITION_TYPE_LICENSE, + bufferp, buffer_size)) != 0) { + rc = EFAULT; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_v3_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + if ((rc = ef10_nvram_buffer_finish(bufferp, + buffer_size)) != 0) { + goto fail1; + } + + /* Validate completed partition */ + if ((rc = ef10_nvram_buffer_validate( + NVRAM_PARTITION_TYPE_LICENSE, + bufferp, buffer_size)) != 0) { + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ + + __checkReturn efx_rc_t +efx_lic_init( + __in efx_nic_t *enp) +{ + const efx_lic_ops_t *elop; + efx_key_stats_t eks; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC)); + + switch (enp->en_family) { + +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + elop = &__efx_lic_v1_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + elop = &__efx_lic_v2_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + elop = &__efx_lic_v3_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + elop = &__efx_lic_v3_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + enp->en_elop = elop; + enp->en_mod_flags |= EFX_MOD_LIC; + + /* Probe for support */ + if (efx_lic_get_key_stats(enp, &eks) == 0) { + enp->en_licensing_supported = B_TRUE; + } else { + enp->en_licensing_supported = B_FALSE; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +extern __checkReturn boolean_t +efx_lic_check_support( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + return (enp->en_licensing_supported); +} + + void +efx_lic_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + enp->en_elop = NULL; + enp->en_mod_flags &= ~EFX_MOD_LIC; +} + + + __checkReturn efx_rc_t +efx_lic_update_licenses( + __in efx_nic_t *enp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_update_licenses(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_get_key_stats( + __in efx_nic_t *enp, + __out efx_key_stats_t *eksp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_app_state( + __in efx_nic_t *enp, + __in uint64_t app_id, + __out boolean_t *licensedp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if (elop->elo_app_state == NULL) + return (ENOTSUP); + + if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_get_id( + __in efx_nic_t *enp, + __in size_t buffer_size, + __out uint32_t *typep, + __out size_t *lengthp, + __out_opt uint8_t *bufferp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if (elop->elo_get_id == NULL) + return (ENOTSUP); + + if ((rc = elop->elo_get_id(enp, buffer_size, typep, + lengthp, bufferp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Buffer management API - abstracts varying TLV format used for License + * partition. + */ + + __checkReturn efx_rc_t +efx_lic_find_start( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __out uint32_t *startp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_find_end( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *endp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp); + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_find_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __out uint32_t *startp, + __out uint32_t *lengthp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + EFSYS_ASSERT(bufferp); + EFSYS_ASSERT(startp); + EFSYS_ASSERT(lengthp); + + return (elop->elo_find_key(enp, bufferp, buffer_size, offset, + startp, lengthp)); +} + + +/* + * Validate that the buffer contains a single key in a recognised format. + * An empty or terminator buffer is not accepted as a valid key. + */ + __checkReturn __success(return != B_FALSE) boolean_t +efx_lic_validate_key( + __in efx_nic_t *enp, + __in_bcount(length) caddr_t keyp, + __in uint32_t length) +{ + const efx_lic_ops_t *elop = enp->en_elop; + boolean_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE) + goto fail1; + + return (B_TRUE); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_read_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __out_bcount_part(key_max_size, *lengthp) + caddr_t keyp, + __in size_t key_max_size, + __out uint32_t *lengthp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset, + length, keyp, key_max_size, lengthp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_write_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in_bcount(length) caddr_t keyp, + __in uint32_t length, + __out uint32_t *lengthp) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset, + keyp, length, lengthp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_delete_key( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size, + __in uint32_t offset, + __in uint32_t length, + __in uint32_t end, + __out uint32_t *deltap) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset, + length, end, deltap)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_lic_create_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_lic_finish_partition( + __in efx_nic_t *enp, + __in_bcount(buffer_size) + caddr_t bufferp, + __in size_t buffer_size) +{ + const efx_lic_ops_t *elop = enp->en_elop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); + + if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_LICENSING */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c new file mode 100644 index 000000000..ae23aad4b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mac.c @@ -0,0 +1,965 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_mac_multicast_list_set( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_SIENA +static const efx_mac_ops_t __efx_mac_siena_ops = { + siena_mac_poll, /* emo_poll */ + siena_mac_up, /* emo_up */ + siena_mac_reconfigure, /* emo_addr_set */ + siena_mac_reconfigure, /* emo_pdu_set */ + siena_mac_pdu_get, /* emo_pdu_get */ + siena_mac_reconfigure, /* emo_reconfigure */ + siena_mac_multicast_list_set, /* emo_multicast_list_set */ + NULL, /* emo_filter_set_default_rxq */ + NULL, /* emo_filter_default_rxq_clear */ +#if EFSYS_OPT_LOOPBACK + siena_mac_loopback_set, /* emo_loopback_set */ +#endif /* EFSYS_OPT_LOOPBACK */ +#if EFSYS_OPT_MAC_STATS + siena_mac_stats_get_mask, /* emo_stats_get_mask */ + efx_mcdi_mac_stats_clear, /* emo_stats_clear */ + efx_mcdi_mac_stats_upload, /* emo_stats_upload */ + efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ + siena_mac_stats_update /* emo_stats_update */ +#endif /* EFSYS_OPT_MAC_STATS */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_mac_ops_t __efx_mac_ef10_ops = { + ef10_mac_poll, /* emo_poll */ + ef10_mac_up, /* emo_up */ + ef10_mac_addr_set, /* emo_addr_set */ + ef10_mac_pdu_set, /* emo_pdu_set */ + ef10_mac_pdu_get, /* emo_pdu_get */ + ef10_mac_reconfigure, /* emo_reconfigure */ + ef10_mac_multicast_list_set, /* emo_multicast_list_set */ + ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */ + ef10_mac_filter_default_rxq_clear, + /* emo_filter_default_rxq_clear */ +#if EFSYS_OPT_LOOPBACK + ef10_mac_loopback_set, /* emo_loopback_set */ +#endif /* EFSYS_OPT_LOOPBACK */ +#if EFSYS_OPT_MAC_STATS + ef10_mac_stats_get_mask, /* emo_stats_get_mask */ + efx_mcdi_mac_stats_clear, /* emo_stats_clear */ + efx_mcdi_mac_stats_upload, /* emo_stats_upload */ + efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ + ef10_mac_stats_update /* emo_stats_update */ +#endif /* EFSYS_OPT_MAC_STATS */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_mac_pdu_set( + __in efx_nic_t *enp, + __in size_t pdu) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + uint32_t old_pdu; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + if (pdu < EFX_MAC_PDU_MIN) { + rc = EINVAL; + goto fail1; + } + + if (pdu > EFX_MAC_PDU_MAX) { + rc = EINVAL; + goto fail2; + } + + old_pdu = epp->ep_mac_pdu; + epp->ep_mac_pdu = (uint32_t)pdu; + if ((rc = emop->emo_pdu_set(enp)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + + epp->ep_mac_pdu = old_pdu; + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + if ((rc = emop->emo_pdu_get(enp, pdu)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_addr_set( + __in efx_nic_t *enp, + __in uint8_t *addr) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + uint8_t old_addr[6]; + uint32_t oui; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (EFX_MAC_ADDR_IS_MULTICAST(addr)) { + rc = EINVAL; + goto fail1; + } + + oui = addr[0] << 16 | addr[1] << 8 | addr[2]; + if (oui == 0x000000) { + rc = EINVAL; + goto fail2; + } + + EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr); + EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr); + if ((rc = emop->emo_addr_set(enp)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + + EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_filter_set( + __in efx_nic_t *enp, + __in boolean_t all_unicst, + __in boolean_t mulcst, + __in boolean_t all_mulcst, + __in boolean_t brdcst) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + boolean_t old_all_unicst; + boolean_t old_mulcst; + boolean_t old_all_mulcst; + boolean_t old_brdcst; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + old_all_unicst = epp->ep_all_unicst; + old_mulcst = epp->ep_mulcst; + old_all_mulcst = epp->ep_all_mulcst; + old_brdcst = epp->ep_brdcst; + + epp->ep_all_unicst = all_unicst; + epp->ep_mulcst = mulcst; + epp->ep_all_mulcst = all_mulcst; + epp->ep_brdcst = brdcst; + + if ((rc = emop->emo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + epp->ep_all_unicst = old_all_unicst; + epp->ep_mulcst = old_mulcst; + epp->ep_all_mulcst = old_all_mulcst; + epp->ep_brdcst = old_brdcst; + + return (rc); +} + + void +efx_mac_filter_get_all_ucast_mcast( + __in efx_nic_t *enp, + __out boolean_t *all_unicst, + __out boolean_t *all_mulcst) +{ + efx_port_t *epp = &(enp->en_port); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + *all_unicst = epp->ep_all_unicst_inserted; + *all_mulcst = epp->ep_all_mulcst_inserted; +} + + __checkReturn efx_rc_t +efx_mac_drain( + __in efx_nic_t *enp, + __in boolean_t enabled) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + if (epp->ep_mac_drain == enabled) + return (0); + + epp->ep_mac_drain = enabled; + + if ((rc = emop->emo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if ((rc = emop->emo_up(enp, mac_upp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_fcntl_set( + __in efx_nic_t *enp, + __in unsigned int fcntl, + __in boolean_t autoneg) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + const efx_phy_ops_t *epop = epp->ep_epop; + unsigned int old_fcntl; + boolean_t old_autoneg; + unsigned int old_adv_cap; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) { + rc = EINVAL; + goto fail1; + } + + /* + * Ignore a request to set flow control auto-negotiation + * if the PHY doesn't support it. + */ + if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) + autoneg = B_FALSE; + + old_fcntl = epp->ep_fcntl; + old_autoneg = epp->ep_fcntl_autoneg; + old_adv_cap = epp->ep_adv_cap_mask; + + epp->ep_fcntl = fcntl; + epp->ep_fcntl_autoneg = autoneg; + + /* + * Always encode the flow control settings in the advertised + * capabilities even if we are not trying to auto-negotiate + * them and reconfigure both the PHY and the MAC. + */ + if (fcntl & EFX_FCNTL_RESPOND) + epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE | + 1 << EFX_PHY_CAP_ASYM); + else + epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE | + 1 << EFX_PHY_CAP_ASYM); + + if (fcntl & EFX_FCNTL_GENERATE) + epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM); + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail2; + + if ((rc = emop->emo_reconfigure(enp)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); + + epp->ep_fcntl = old_fcntl; + epp->ep_fcntl_autoneg = old_autoneg; + epp->ep_adv_cap_mask = old_adv_cap; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_mac_fcntl_get( + __in efx_nic_t *enp, + __out unsigned int *fcntl_wantedp, + __out unsigned int *fcntl_linkp) +{ + efx_port_t *epp = &(enp->en_port); + unsigned int wanted = 0; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + /* + * Decode the requested flow control settings from the PHY + * advertised capabilities. + */ + if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE)) + wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; + if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM)) + wanted ^= EFX_FCNTL_GENERATE; + + *fcntl_linkp = epp->ep_fcntl; + *fcntl_wantedp = wanted; +} + + __checkReturn efx_rc_t +efx_mac_multicast_list_set( + __in efx_nic_t *enp, + __in_ecount(6*count) uint8_t const *addrs, + __in int count) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + uint8_t *old_mulcst_addr_list = NULL; + uint32_t old_mulcst_addr_count; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (count > EFX_MAC_MULTICAST_LIST_MAX) { + rc = EINVAL; + goto fail1; + } + + old_mulcst_addr_count = epp->ep_mulcst_addr_count; + if (old_mulcst_addr_count > 0) { + /* Allocate memory to store old list (instead of using stack) */ + EFSYS_KMEM_ALLOC(enp->en_esip, + old_mulcst_addr_count * EFX_MAC_ADDR_LEN, + old_mulcst_addr_list); + if (old_mulcst_addr_list == NULL) { + rc = ENOMEM; + goto fail2; + } + + /* Save the old list in case we need to rollback */ + memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list, + old_mulcst_addr_count * EFX_MAC_ADDR_LEN); + } + + /* Store the new list */ + memcpy(epp->ep_mulcst_addr_list, addrs, + count * EFX_MAC_ADDR_LEN); + epp->ep_mulcst_addr_count = count; + + if ((rc = emop->emo_multicast_list_set(enp)) != 0) + goto fail3; + + if (old_mulcst_addr_count > 0) { + EFSYS_KMEM_FREE(enp->en_esip, + old_mulcst_addr_count * EFX_MAC_ADDR_LEN, + old_mulcst_addr_list); + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); + + /* Restore original list on failure */ + epp->ep_mulcst_addr_count = old_mulcst_addr_count; + if (old_mulcst_addr_count > 0) { + memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list, + old_mulcst_addr_count * EFX_MAC_ADDR_LEN); + + EFSYS_KMEM_FREE(enp->en_esip, + old_mulcst_addr_count * EFX_MAC_ADDR_LEN, + old_mulcst_addr_list); + } + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); + +} + + __checkReturn efx_rc_t +efx_mac_filter_default_rxq_set( + __in efx_nic_t *enp, + __in efx_rxq_t *erp, + __in boolean_t using_rss) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (emop->emo_filter_default_rxq_set != NULL) { + rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss); + if (rc != 0) + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_mac_filter_default_rxq_clear( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (emop->emo_filter_default_rxq_clear != NULL) + emop->emo_filter_default_rxq_clear(enp); +} + + +#if EFSYS_OPT_MAC_STATS + +#if EFSYS_OPT_NAMES + +/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */ +static const char * const __efx_mac_stat_name[] = { + "rx_octets", + "rx_pkts", + "rx_unicst_pkts", + "rx_multicst_pkts", + "rx_brdcst_pkts", + "rx_pause_pkts", + "rx_le_64_pkts", + "rx_65_to_127_pkts", + "rx_128_to_255_pkts", + "rx_256_to_511_pkts", + "rx_512_to_1023_pkts", + "rx_1024_to_15xx_pkts", + "rx_ge_15xx_pkts", + "rx_errors", + "rx_fcs_errors", + "rx_drop_events", + "rx_false_carrier_errors", + "rx_symbol_errors", + "rx_align_errors", + "rx_internal_errors", + "rx_jabber_pkts", + "rx_lane0_char_err", + "rx_lane1_char_err", + "rx_lane2_char_err", + "rx_lane3_char_err", + "rx_lane0_disp_err", + "rx_lane1_disp_err", + "rx_lane2_disp_err", + "rx_lane3_disp_err", + "rx_match_fault", + "rx_nodesc_drop_cnt", + "tx_octets", + "tx_pkts", + "tx_unicst_pkts", + "tx_multicst_pkts", + "tx_brdcst_pkts", + "tx_pause_pkts", + "tx_le_64_pkts", + "tx_65_to_127_pkts", + "tx_128_to_255_pkts", + "tx_256_to_511_pkts", + "tx_512_to_1023_pkts", + "tx_1024_to_15xx_pkts", + "tx_ge_15xx_pkts", + "tx_errors", + "tx_sgl_col_pkts", + "tx_mult_col_pkts", + "tx_ex_col_pkts", + "tx_late_col_pkts", + "tx_def_pkts", + "tx_ex_def_pkts", + "pm_trunc_bb_overflow", + "pm_discard_bb_overflow", + "pm_trunc_vfifo_full", + "pm_discard_vfifo_full", + "pm_trunc_qbb", + "pm_discard_qbb", + "pm_discard_mapping", + "rxdp_q_disabled_pkts", + "rxdp_di_dropped_pkts", + "rxdp_streaming_pkts", + "rxdp_hlb_fetch", + "rxdp_hlb_wait", + "vadapter_rx_unicast_packets", + "vadapter_rx_unicast_bytes", + "vadapter_rx_multicast_packets", + "vadapter_rx_multicast_bytes", + "vadapter_rx_broadcast_packets", + "vadapter_rx_broadcast_bytes", + "vadapter_rx_bad_packets", + "vadapter_rx_bad_bytes", + "vadapter_rx_overflow", + "vadapter_tx_unicast_packets", + "vadapter_tx_unicast_bytes", + "vadapter_tx_multicast_packets", + "vadapter_tx_multicast_bytes", + "vadapter_tx_broadcast_packets", + "vadapter_tx_broadcast_bytes", + "vadapter_tx_bad_packets", + "vadapter_tx_bad_bytes", + "vadapter_tx_overflow", + "fec_uncorrected_errors", + "fec_corrected_errors", + "fec_corrected_symbols_lane0", + "fec_corrected_symbols_lane1", + "fec_corrected_symbols_lane2", + "fec_corrected_symbols_lane3", + "ctpio_vi_busy_fallback", + "ctpio_long_write_success", + "ctpio_missing_dbell_fail", + "ctpio_overflow_fail", + "ctpio_underflow_fail", + "ctpio_timeout_fail", + "ctpio_noncontig_wr_fail", + "ctpio_frm_clobber_fail", + "ctpio_invalid_wr_fail", + "ctpio_vi_clobber_fallback", + "ctpio_unqualified_fallback", + "ctpio_runt_fallback", + "ctpio_success", + "ctpio_fallback", + "ctpio_poison", + "ctpio_erase", + "rxdp_scatter_disabled_trunc", + "rxdp_hlb_idle", + "rxdp_hlb_timeout", +}; +/* END MKCONFIG GENERATED EfxMacStatNamesBlock */ + + __checkReturn const char * +efx_mac_stat_name( + __in efx_nic_t *enp, + __in unsigned int id) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS); + return (__efx_mac_stat_name[id]); +} + +#endif /* EFSYS_OPT_NAMES */ + +static efx_rc_t +efx_mac_stats_mask_add_range( + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size, + __in const struct efx_mac_stats_range *rngp) +{ + unsigned int mask_npages = mask_size / sizeof (*maskp); + unsigned int el; + unsigned int el_min; + unsigned int el_max; + unsigned int low; + unsigned int high; + unsigned int width; + efx_rc_t rc; + + if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <= + (unsigned int)rngp->last) { + rc = EINVAL; + goto fail1; + } + + EFSYS_ASSERT3U(rngp->first, <=, rngp->last); + EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS); + + for (el = 0; el < mask_npages; ++el) { + el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE; + el_max = + el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1); + if ((unsigned int)rngp->first > el_max || + (unsigned int)rngp->last < el_min) + continue; + low = MAX((unsigned int)rngp->first, el_min); + high = MIN((unsigned int)rngp->last, el_max); + width = high - low + 1; + maskp[el] |= + (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ? + (~0ULL) : (((1ULL << width) - 1) << (low - el_min)); + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + efx_rc_t +efx_mac_stats_mask_add_ranges( + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size, + __in_ecount(rng_count) const struct efx_mac_stats_range *rngp, + __in unsigned int rng_count) +{ + unsigned int i; + efx_rc_t rc; + + for (i = 0; i < rng_count; ++i) { + if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size, + &rngp[i])) != 0) + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_stats_get_mask( + __in efx_nic_t *enp, + __out_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(maskp != NULL); + EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0); + + (void) memset(maskp, 0, mask_size); + + if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_stats_clear( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + if ((rc = emop->emo_stats_clear(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_stats_upload( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + if ((rc = emop->emo_stats_upload(enp, esmp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mac_stats_periodic( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __in uint16_t period_ms, + __in boolean_t events) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + EFSYS_ASSERT(emop != NULL); + + if (emop->emo_stats_periodic == NULL) { + rc = EINVAL; + goto fail1; + } + + if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp, + __inout_opt uint32_t *generationp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + rc = emop->emo_stats_update(enp, esmp, essp, generationp); + + return (rc); +} + +#endif /* EFSYS_OPT_MAC_STATS */ + + __checkReturn efx_rc_t +efx_mac_select( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_mac_type_t type = EFX_MAC_INVALID; + const efx_mac_ops_t *emop; + int rc = EINVAL; + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + emop = &__efx_mac_siena_ops; + type = EFX_MAC_SIENA; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + emop = &__efx_mac_ef10_ops; + type = EFX_MAC_HUNTINGTON; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + emop = &__efx_mac_ef10_ops; + type = EFX_MAC_MEDFORD; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emop = &__efx_mac_ef10_ops; + type = EFX_MAC_MEDFORD2; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + rc = EINVAL; + goto fail1; + } + + EFSYS_ASSERT(type != EFX_MAC_INVALID); + EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES); + EFSYS_ASSERT(emop != NULL); + + epp->ep_emop = emop; + epp->ep_mac_type = type; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#if EFSYS_OPT_SIENA + +#define EFX_MAC_HASH_BITS (1 << 8) + +/* Compute the multicast hash as used on Falcon and Siena. */ +static void +siena_mac_multicast_hash_compute( + __in_ecount(6*count) uint8_t const *addrs, + __in int count, + __out efx_oword_t *hash_low, + __out efx_oword_t *hash_high) +{ + uint32_t crc, index; + int i; + + EFSYS_ASSERT(hash_low != NULL); + EFSYS_ASSERT(hash_high != NULL); + + EFX_ZERO_OWORD(*hash_low); + EFX_ZERO_OWORD(*hash_high); + + for (i = 0; i < count; i++) { + /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */ + crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN); + index = crc % EFX_MAC_HASH_BITS; + if (index < 128) { + EFX_SET_OWORD_BIT(*hash_low, index); + } else { + EFX_SET_OWORD_BIT(*hash_high, index - 128); + } + + addrs += EFX_MAC_ADDR_LEN; + } +} + +static __checkReturn efx_rc_t +siena_mac_multicast_list_set( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_oword_t old_hash[2]; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash)); + + siena_mac_multicast_hash_compute( + epp->ep_mulcst_addr_list, + epp->ep_mulcst_addr_count, + &epp->ep_multicst_hash[0], + &epp->ep_multicst_hash[1]); + + if ((rc = emop->emo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash)); + + return (rc); +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c new file mode 100644 index 000000000..8cd651f52 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.c @@ -0,0 +1,2425 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2008-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MCDI + +/* + * There are three versions of the MCDI interface: + * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. + * - MCDIv1: Siena firmware and Huntington BootROM. + * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. + * Transport uses MCDIv2 headers. + * + * MCDIv2 Header NOT_EPOCH flag + * ---------------------------- + * A new epoch begins at initial startup or after an MC reboot, and defines when + * the MC should reject stale MCDI requests. + * + * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all + * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. + * + * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a + * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. + */ + + + +#if EFSYS_OPT_SIENA + +static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { + siena_mcdi_init, /* emco_init */ + siena_mcdi_send_request, /* emco_send_request */ + siena_mcdi_poll_reboot, /* emco_poll_reboot */ + siena_mcdi_poll_response, /* emco_poll_response */ + siena_mcdi_read_response, /* emco_read_response */ + siena_mcdi_fini, /* emco_fini */ + siena_mcdi_feature_supported, /* emco_feature_supported */ + siena_mcdi_get_timeout, /* emco_get_timeout */ +}; + +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() + +static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { + ef10_mcdi_init, /* emco_init */ + ef10_mcdi_send_request, /* emco_send_request */ + ef10_mcdi_poll_reboot, /* emco_poll_reboot */ + ef10_mcdi_poll_response, /* emco_poll_response */ + ef10_mcdi_read_response, /* emco_read_response */ + ef10_mcdi_fini, /* emco_fini */ + ef10_mcdi_feature_supported, /* emco_feature_supported */ + ef10_mcdi_get_timeout, /* emco_get_timeout */ +}; + +#endif /* EFX_OPTS_EF10() */ + + + + __checkReturn efx_rc_t +efx_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *emtp) +{ + const efx_mcdi_ops_t *emcop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + emcop = &__efx_mcdi_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + emcop = &__efx_mcdi_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + emcop = &__efx_mcdi_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emcop = &__efx_mcdi_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + if (enp->en_features & EFX_FEATURE_MCDI_DMA) { + /* MCDI requires a DMA buffer in host memory */ + if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { + rc = EINVAL; + goto fail2; + } + } + enp->en_mcdi.em_emtp = emtp; + + if (emcop != NULL && emcop->emco_init != NULL) { + if ((rc = emcop->emco_init(enp, emtp)) != 0) + goto fail3; + } + + enp->en_mcdi.em_emcop = emcop; + enp->en_mod_flags |= EFX_MOD_MCDI; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_mcdi.em_emcop = NULL; + enp->en_mcdi.em_emtp = NULL; + enp->en_mod_flags &= ~EFX_MOD_MCDI; + + return (rc); +} + + void +efx_mcdi_fini( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); + + if (emcop != NULL && emcop->emco_fini != NULL) + emcop->emco_fini(enp); + + emip->emi_port = 0; + emip->emi_aborted = 0; + + enp->en_mcdi.em_emcop = NULL; + enp->en_mod_flags &= ~EFX_MOD_MCDI; +} + + void +efx_mcdi_new_epoch( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efsys_lock_state_t state; + + /* Start a new epoch (allow fresh MCDI requests to succeed) */ + EFSYS_LOCK(enp->en_eslp, state); + emip->emi_new_epoch = B_TRUE; + EFSYS_UNLOCK(enp->en_eslp, state); +} + +static void +efx_mcdi_send_request( + __in efx_nic_t *enp, + __in void *hdrp, + __in size_t hdr_len, + __in void *sdup, + __in size_t sdu_len) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + + emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); +} + +static efx_rc_t +efx_mcdi_poll_reboot( + __in efx_nic_t *enp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + efx_rc_t rc; + + rc = emcop->emco_poll_reboot(enp); + return (rc); +} + +static boolean_t +efx_mcdi_poll_response( + __in efx_nic_t *enp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + boolean_t available; + + available = emcop->emco_poll_response(enp); + return (available); +} + +static void +efx_mcdi_read_response( + __in efx_nic_t *enp, + __out void *bufferp, + __in size_t offset, + __in size_t length) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + + emcop->emco_read_response(enp, bufferp, offset, length); +} + + void +efx_mcdi_request_start( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __in boolean_t ev_cpl) +{ +#if EFSYS_OPT_MCDI_LOGGING + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; +#endif + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_dword_t hdr[2]; + size_t hdr_len; + unsigned int max_version; + unsigned int seq; + unsigned int xflags; + boolean_t new_epoch; + efsys_lock_state_t state; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + /* + * efx_mcdi_request_start() is naturally serialised against both + * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), + * by virtue of there only being one outstanding MCDI request. + * Unfortunately, upper layers may also call efx_mcdi_request_abort() + * at any time, to timeout a pending mcdi request, That request may + * then subsequently complete, meaning efx_mcdi_ev_cpl() or + * efx_mcdi_ev_death() may end up running in parallel with + * efx_mcdi_request_start(). This race is handled by ensuring that + * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the + * en_eslp lock. + */ + EFSYS_LOCK(enp->en_eslp, state); + EFSYS_ASSERT(emip->emi_pending_req == NULL); + emip->emi_pending_req = emrp; + emip->emi_ev_cpl = ev_cpl; + emip->emi_poll_cnt = 0; + seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); + new_epoch = emip->emi_new_epoch; + max_version = emip->emi_max_version; + EFSYS_UNLOCK(enp->en_eslp, state); + + xflags = 0; + if (ev_cpl) + xflags |= MCDI_HEADER_XFLAGS_EVREQ; + + /* + * Huntington firmware supports MCDIv2, but the Huntington BootROM only + * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where + * possible to support this. + */ + if ((max_version >= 2) && + ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || + (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || + (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { + /* Construct MCDI v2 header */ + hdr_len = sizeof (hdr); + EFX_POPULATE_DWORD_8(hdr[0], + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seq, + MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, + MCDI_HEADER_ERROR, 0, + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_XFLAGS, xflags); + + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); + } else { + /* Construct MCDI v1 header */ + hdr_len = sizeof (hdr[0]); + EFX_POPULATE_DWORD_8(hdr[0], + MCDI_HEADER_CODE, emrp->emr_cmd, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_DATALEN, emrp->emr_in_length, + MCDI_HEADER_SEQ, seq, + MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, + MCDI_HEADER_ERROR, 0, + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_XFLAGS, xflags); + } + +#if EFSYS_OPT_MCDI_LOGGING + if (emtp->emt_logger != NULL) { + emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, + &hdr[0], hdr_len, + emrp->emr_in_buf, emrp->emr_in_length); + } +#endif /* EFSYS_OPT_MCDI_LOGGING */ + + efx_mcdi_send_request(enp, &hdr[0], hdr_len, + emrp->emr_in_buf, emrp->emr_in_length); +} + + +static void +efx_mcdi_read_response_header( + __in efx_nic_t *enp, + __inout efx_mcdi_req_t *emrp) +{ +#if EFSYS_OPT_MCDI_LOGGING + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; +#endif /* EFSYS_OPT_MCDI_LOGGING */ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_dword_t hdr[2]; + unsigned int hdr_len; + unsigned int data_len; + unsigned int seq; + unsigned int cmd; + unsigned int error; + efx_rc_t rc; + + EFSYS_ASSERT(emrp != NULL); + + efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); + hdr_len = sizeof (hdr[0]); + + cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); + seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); + error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); + + if (cmd != MC_CMD_V2_EXTN) { + data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); + } else { + efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); + hdr_len += sizeof (hdr[1]); + + cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); + data_len = + EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + + if (error && (data_len == 0)) { + /* The MC has rebooted since the request was sent. */ + EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); + efx_mcdi_poll_reboot(enp); + rc = EIO; + goto fail1; + } +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) || +#else + if ((cmd != emrp->emr_cmd) || +#endif + (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { + /* Response is for a different request */ + rc = EIO; + goto fail2; + } + if (error) { + efx_dword_t err[2]; + unsigned int err_len = MIN(data_len, sizeof (err)); + int err_code = MC_CMD_ERR_EPROTO; + int err_arg = 0; + + /* Read error code (and arg num for MCDI v2 commands) */ + efx_mcdi_read_response(enp, &err, hdr_len, err_len); + + if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) + err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); +#ifdef WITH_MCDI_V2 + if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) + err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); +#endif + emrp->emr_err_code = err_code; + emrp->emr_err_arg = err_arg; + +#if EFSYS_OPT_MCDI_PROXY_AUTH + if ((err_code == MC_CMD_ERR_PROXY_PENDING) && + (err_len == sizeof (err))) { + /* + * The MCDI request would normally fail with EPERM, but + * firmware has forwarded it to an authorization agent + * attached to a privileged PF. + * + * Save the authorization request handle. The client + * must wait for a PROXY_RESPONSE event, or timeout. + */ + emrp->emr_proxy_handle = err_arg; + } +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ + +#if EFSYS_OPT_MCDI_LOGGING + if (emtp->emt_logger != NULL) { + emtp->emt_logger(emtp->emt_context, + EFX_LOG_MCDI_RESPONSE, + &hdr[0], hdr_len, + &err[0], err_len); + } +#endif /* EFSYS_OPT_MCDI_LOGGING */ + + if (!emrp->emr_quiet) { + EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, + int, err_code, int, err_arg); + } + + rc = efx_mcdi_request_errcode(err_code); + goto fail3; + } + + emrp->emr_rc = 0; + emrp->emr_out_length_used = data_len; +#if EFSYS_OPT_MCDI_PROXY_AUTH + emrp->emr_proxy_handle = 0; +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ + return; + +fail3: +fail2: +fail1: + emrp->emr_rc = rc; + emrp->emr_out_length_used = 0; +} + +static void +efx_mcdi_finish_response( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp) +{ +#if EFSYS_OPT_MCDI_LOGGING + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; +#endif /* EFSYS_OPT_MCDI_LOGGING */ + efx_dword_t hdr[2]; + unsigned int hdr_len; + size_t bytes; + unsigned int resp_off; +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + unsigned int resp_cmd; + boolean_t proxied_cmd_resp = B_FALSE; +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + + if (emrp->emr_out_buf == NULL) + return; + + /* Read the command header to detect MCDI response format */ + hdr_len = sizeof (hdr[0]); + efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); + if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { + /* + * Read the actual payload length. The length given in the event + * is only correct for responses with the V1 format. + */ + efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); + hdr_len += sizeof (hdr[1]); + resp_off = hdr_len; + + emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], + MC_CMD_V2_EXTN_IN_ACTUAL_LEN); +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + /* + * A proxy MCDI command is executed by PF on behalf of + * one of its VFs. The command to be proxied follows + * immediately afterward in the host buffer. + * PROXY_CMD inner call complete response should be copied to + * output buffer so that it can be returned to the requesting + * function in MC_CMD_PROXY_COMPLETE payload. + */ + resp_cmd = + EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); + proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) && + (resp_cmd != MC_CMD_PROXY_CMD)); + if (proxied_cmd_resp) { + resp_off = 0; + emrp->emr_out_length_used += hdr_len; + } +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + } else { + resp_off = hdr_len; + } + + /* Copy payload out into caller supplied buffer */ + bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); + efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes); + +#if EFSYS_OPT_MCDI_LOGGING + if (emtp->emt_logger != NULL) { + emtp->emt_logger(emtp->emt_context, + EFX_LOG_MCDI_RESPONSE, + &hdr[0], hdr_len, + emrp->emr_out_buf, bytes); + } +#endif /* EFSYS_OPT_MCDI_LOGGING */ +} + + + __checkReturn boolean_t +efx_mcdi_request_poll( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_mcdi_req_t *emrp; + efsys_lock_state_t state; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + /* Serialise against post-watchdog efx_mcdi_ev* */ + EFSYS_LOCK(enp->en_eslp, state); + + EFSYS_ASSERT(emip->emi_pending_req != NULL); + EFSYS_ASSERT(!emip->emi_ev_cpl); + emrp = emip->emi_pending_req; + + /* Check if hardware is unavailable */ + if (efx_nic_hw_unavailable(enp)) { + EFSYS_UNLOCK(enp->en_eslp, state); + return (B_FALSE); + } + + /* Check for reboot atomically w.r.t efx_mcdi_request_start */ + if (emip->emi_poll_cnt++ == 0) { + if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { + emip->emi_pending_req = NULL; + EFSYS_UNLOCK(enp->en_eslp, state); + + /* Reboot/Assertion */ + if (rc == EIO || rc == EINTR) + efx_mcdi_raise_exception(enp, emrp, rc); + + goto fail1; + } + } + + /* Check if a response is available */ + if (efx_mcdi_poll_response(enp) == B_FALSE) { + EFSYS_UNLOCK(enp->en_eslp, state); + return (B_FALSE); + } + + /* Read the response header */ + efx_mcdi_read_response_header(enp, emrp); + + /* Request complete */ + emip->emi_pending_req = NULL; + + /* Ensure stale MCDI requests fail after an MC reboot. */ + emip->emi_new_epoch = B_FALSE; + + EFSYS_UNLOCK(enp->en_eslp, state); + + if ((rc = emrp->emr_rc) != 0) + goto fail2; + + efx_mcdi_finish_response(enp, emrp); + return (B_TRUE); + +fail2: + if (!emrp->emr_quiet) + EFSYS_PROBE(fail2); +fail1: + if (!emrp->emr_quiet) + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (B_TRUE); +} + + __checkReturn boolean_t +efx_mcdi_request_abort( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_mcdi_req_t *emrp; + boolean_t aborted; + efsys_lock_state_t state; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + /* + * efx_mcdi_ev_* may have already completed this event, and be + * spinning/blocked on the upper layer lock. So it *is* legitimate + * to for emi_pending_req to be NULL. If there is a pending event + * completed request, then provide a "credit" to allow + * efx_mcdi_ev_cpl() to accept a single spurious completion. + */ + EFSYS_LOCK(enp->en_eslp, state); + emrp = emip->emi_pending_req; + aborted = (emrp != NULL); + if (aborted) { + emip->emi_pending_req = NULL; + + /* Error the request */ + emrp->emr_out_length_used = 0; + emrp->emr_rc = ETIMEDOUT; + + /* Provide a credit for seqno/emr_pending_req mismatches */ + if (emip->emi_ev_cpl) + ++emip->emi_aborted; + + /* + * The upper layer has called us, so we don't + * need to complete the request. + */ + } + EFSYS_UNLOCK(enp->en_eslp, state); + + return (aborted); +} + + void +efx_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *timeoutp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + + emcop->emco_get_timeout(enp, emrp, timeoutp); +} + + __checkReturn efx_rc_t +efx_mcdi_request_errcode( + __in unsigned int err) +{ + + switch (err) { + /* MCDI v1 */ + case MC_CMD_ERR_EPERM: + return (EACCES); + case MC_CMD_ERR_ENOENT: + return (ENOENT); + case MC_CMD_ERR_EINTR: + return (EINTR); + case MC_CMD_ERR_EACCES: + return (EACCES); + case MC_CMD_ERR_EBUSY: + return (EBUSY); + case MC_CMD_ERR_EINVAL: + return (EINVAL); + case MC_CMD_ERR_EDEADLK: + return (EDEADLK); + case MC_CMD_ERR_ENOSYS: + return (ENOTSUP); + case MC_CMD_ERR_ETIME: + return (ETIMEDOUT); + case MC_CMD_ERR_ENOTSUP: + return (ENOTSUP); + case MC_CMD_ERR_EALREADY: + return (EALREADY); + + /* MCDI v2 */ + case MC_CMD_ERR_EEXIST: + return (EEXIST); +#ifdef MC_CMD_ERR_EAGAIN + case MC_CMD_ERR_EAGAIN: + return (EAGAIN); +#endif +#ifdef MC_CMD_ERR_ENOSPC + case MC_CMD_ERR_ENOSPC: + return (ENOSPC); +#endif + case MC_CMD_ERR_ERANGE: + return (ERANGE); + + case MC_CMD_ERR_ALLOC_FAIL: + return (ENOMEM); + case MC_CMD_ERR_NO_VADAPTOR: + return (ENOENT); + case MC_CMD_ERR_NO_EVB_PORT: + return (ENOENT); + case MC_CMD_ERR_NO_VSWITCH: + return (ENODEV); + case MC_CMD_ERR_VLAN_LIMIT: + return (EINVAL); + case MC_CMD_ERR_BAD_PCI_FUNC: + return (ENODEV); + case MC_CMD_ERR_BAD_VLAN_MODE: + return (EINVAL); + case MC_CMD_ERR_BAD_VSWITCH_TYPE: + return (EINVAL); + case MC_CMD_ERR_BAD_VPORT_TYPE: + return (EINVAL); + case MC_CMD_ERR_MAC_EXIST: + return (EEXIST); + + case MC_CMD_ERR_PROXY_PENDING: + return (EAGAIN); + + default: + EFSYS_PROBE1(mc_pcol_error, int, err); + return (EIO); + } +} + + void +efx_mcdi_raise_exception( + __in efx_nic_t *enp, + __in_opt efx_mcdi_req_t *emrp, + __in int rc) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efx_mcdi_exception_t exception; + + /* Reboot or Assertion failure only */ + EFSYS_ASSERT(rc == EIO || rc == EINTR); + + /* + * If MC_CMD_REBOOT causes a reboot (dependent on parameters), + * then the EIO is not worthy of an exception. + */ + if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) + return; + + exception = (rc == EIO) + ? EFX_MCDI_EXCEPTION_MC_REBOOT + : EFX_MCDI_EXCEPTION_MC_BADASSERT; + + emtp->emt_exception(emtp->emt_context, exception); +} + + void +efx_mcdi_execute( + __in efx_nic_t *enp, + __inout efx_mcdi_req_t *emrp) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + emrp->emr_quiet = B_FALSE; + emtp->emt_execute(emtp->emt_context, emrp); +} + + void +efx_mcdi_execute_quiet( + __in efx_nic_t *enp, + __inout efx_mcdi_req_t *emrp) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + emrp->emr_quiet = B_TRUE; + emtp->emt_execute(emtp->emt_context, emrp); +} + + void +efx_mcdi_ev_cpl( + __in efx_nic_t *enp, + __in unsigned int seq, + __in unsigned int outlen, + __in int errcode) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efx_mcdi_req_t *emrp; + efsys_lock_state_t state; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + /* + * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() + * when we're completing an aborted request. + */ + EFSYS_LOCK(enp->en_eslp, state); + if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || + (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { + EFSYS_ASSERT(emip->emi_aborted > 0); + if (emip->emi_aborted > 0) + --emip->emi_aborted; + EFSYS_UNLOCK(enp->en_eslp, state); + return; + } + + emrp = emip->emi_pending_req; + emip->emi_pending_req = NULL; + EFSYS_UNLOCK(enp->en_eslp, state); + + if (emip->emi_max_version >= 2) { + /* MCDIv2 response details do not fit into an event. */ + efx_mcdi_read_response_header(enp, emrp); + } else { + if (errcode != 0) { + if (!emrp->emr_quiet) { + EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, + int, errcode); + } + emrp->emr_out_length_used = 0; + emrp->emr_rc = efx_mcdi_request_errcode(errcode); + } else { + emrp->emr_out_length_used = outlen; + emrp->emr_rc = 0; + } + } + if (emrp->emr_rc == 0) + efx_mcdi_finish_response(enp, emrp); + + emtp->emt_ev_cpl(emtp->emt_context); +} + +#if EFSYS_OPT_MCDI_PROXY_AUTH + + __checkReturn efx_rc_t +efx_mcdi_get_proxy_handle( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *handlep) +{ + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + /* + * Return proxy handle from MCDI request that returned with error + * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching + * PROXY_RESPONSE event. + */ + if ((emrp == NULL) || (handlep == NULL)) { + rc = EINVAL; + goto fail1; + } + if ((emrp->emr_rc != 0) && + (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { + *handlep = emrp->emr_proxy_handle; + rc = 0; + } else { + *handlep = 0; + rc = ENOENT; + } + return (rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_mcdi_ev_proxy_response( + __in efx_nic_t *enp, + __in unsigned int handle, + __in unsigned int status) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efx_rc_t rc; + + /* + * Handle results of an authorization request for a privileged MCDI + * command. If authorization was granted then we must re-issue the + * original MCDI request. If authorization failed or timed out, + * then the original MCDI request should be completed with the + * result code from this event. + */ + rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); + + emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); +} +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + void +efx_mcdi_ev_proxy_request( + __in efx_nic_t *enp, + __in unsigned int index) +{ + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + + if (emtp->emt_ev_proxy_request != NULL) + emtp->emt_ev_proxy_request(emtp->emt_context, index); +} +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + void +efx_mcdi_ev_death( + __in efx_nic_t *enp, + __in int rc) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; + efx_mcdi_req_t *emrp = NULL; + boolean_t ev_cpl; + efsys_lock_state_t state; + + /* + * The MCDI request (if there is one) has been terminated, either + * by a BADASSERT or REBOOT event. + * + * If there is an outstanding event-completed MCDI operation, then we + * will never receive the completion event (because both MCDI + * completions and BADASSERT events are sent to the same evq). So + * complete this MCDI op. + * + * This function might run in parallel with efx_mcdi_request_poll() + * for poll completed mcdi requests, and also with + * efx_mcdi_request_start() for post-watchdog completions. + */ + EFSYS_LOCK(enp->en_eslp, state); + emrp = emip->emi_pending_req; + ev_cpl = emip->emi_ev_cpl; + if (emrp != NULL && emip->emi_ev_cpl) { + emip->emi_pending_req = NULL; + + emrp->emr_out_length_used = 0; + emrp->emr_rc = rc; + ++emip->emi_aborted; + } + + /* + * Since we're running in parallel with a request, consume the + * status word before dropping the lock. + */ + if (rc == EIO || rc == EINTR) { + EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); + (void) efx_mcdi_poll_reboot(enp); + emip->emi_new_epoch = B_TRUE; + } + + EFSYS_UNLOCK(enp->en_eslp, state); + + efx_mcdi_raise_exception(enp, emrp, rc); + + if (emrp != NULL && ev_cpl) + emtp->emt_ev_cpl(emtp->emt_context); +} + + __checkReturn efx_rc_t +efx_mcdi_version( + __in efx_nic_t *enp, + __out_ecount_opt(4) uint16_t versionp[4], + __out_opt uint32_t *buildp, + __out_opt efx_mcdi_boot_t *statusp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MAX(MC_CMD_GET_VERSION_IN_LEN, MC_CMD_GET_BOOT_STATUS_IN_LEN), + MAX(MC_CMD_GET_VERSION_OUT_LEN, + MC_CMD_GET_BOOT_STATUS_OUT_LEN)); + efx_word_t *ver_words; + uint16_t version[4]; + uint32_t build; + efx_mcdi_boot_t status; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + req.emr_cmd = MC_CMD_GET_VERSION; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + /* bootrom support */ + if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { + version[0] = version[1] = version[2] = version[3] = 0; + build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); + + goto version; + } + + if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); + version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); + version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); + version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); + version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); + build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); + +version: + /* The bootrom doesn't understand BOOT_STATUS */ + if (MC_FW_VERSION_IS_BOOTLOADER(build)) { + status = EFX_MCDI_BOOT_ROM; + goto out; + } + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_BOOT_STATUS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc == EACCES) { + /* Unprivileged functions cannot access BOOT_STATUS */ + status = EFX_MCDI_BOOT_PRIMARY; + version[0] = version[1] = version[2] = version[3] = 0; + build = 0; + goto out; + } + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { + rc = EMSGSIZE; + goto fail4; + } + + if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, + GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) + status = EFX_MCDI_BOOT_PRIMARY; + else + status = EFX_MCDI_BOOT_SECONDARY; + +out: + if (versionp != NULL) + memcpy(versionp, version, sizeof (version)); + if (buildp != NULL) + *buildp = build; + if (statusp != NULL) + *statusp = status; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_capabilities( + __in efx_nic_t *enp, + __out_opt uint32_t *flagsp, + __out_opt uint16_t *rx_dpcpu_fw_idp, + __out_opt uint16_t *tx_dpcpu_fw_idp, + __out_opt uint32_t *flags2p, + __out_opt uint32_t *tso2ncp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, + MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); + boolean_t v2_capable; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_CAPABILITIES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + if (flagsp != NULL) + *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); + + if (rx_dpcpu_fw_idp != NULL) + *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, + GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); + + if (tx_dpcpu_fw_idp != NULL) + *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, + GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); + + if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + v2_capable = B_FALSE; + else + v2_capable = B_TRUE; + + if (flags2p != NULL) { + *flags2p = (v2_capable) ? + MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : + 0; + } + + if (tso2ncp != NULL) { + *tso2ncp = (v2_capable) ? + MCDI_OUT_WORD(req, + GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : + 0; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_do_reboot( + __in efx_nic_t *enp, + __in boolean_t after_assertion) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN, + MC_CMD_REBOOT_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + /* + * We could require the caller to have caused en_mod_flags=0 to + * call this function. This doesn't help the other port though, + * who's about to get the MC ripped out from underneath them. + * Since they have to cope with the subsequent fallout of MCDI + * failures, we should as well. + */ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + req.emr_cmd = MC_CMD_REBOOT; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_REBOOT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; + + MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, + (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc == EACCES) { + /* Unprivileged functions cannot reboot the MC. */ + goto out; + } + + /* A successful reboot request returns EIO. */ + if (req.emr_rc != 0 && req.emr_rc != EIO) { + rc = req.emr_rc; + goto fail1; + } + +out: + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_reboot( + __in efx_nic_t *enp) +{ + return (efx_mcdi_do_reboot(enp, B_FALSE)); +} + + __checkReturn efx_rc_t +efx_mcdi_exit_assertion_handler( + __in efx_nic_t *enp) +{ + return (efx_mcdi_do_reboot(enp, B_TRUE)); +} + + __checkReturn efx_rc_t +efx_mcdi_read_assertion( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN, + MC_CMD_GET_ASSERTS_OUT_LEN); + const char *reason; + unsigned int flags; + unsigned int index; + unsigned int ofst; + int retry; + efx_rc_t rc; + + /* + * Before we attempt to chat to the MC, we should verify that the MC + * isn't in it's assertion handler, either due to a previous reboot, + * or because we're reinitializing due to an eec_exception(). + * + * Use GET_ASSERTS to read any assertion state that may be present. + * Retry this command twice. Once because a boot-time assertion failure + * might cause the 1st MCDI request to fail. And once again because + * we might race with efx_mcdi_exit_assertion_handler() running on + * partner port(s) on the same NIC. + */ + retry = 2; + do { + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_ASSERTS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); + efx_mcdi_execute_quiet(enp, &req); + + } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); + + if (req.emr_rc != 0) { + if (req.emr_rc == EACCES) { + /* Unprivileged functions cannot clear assertions. */ + goto out; + } + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + /* Print out any assertion state recorded */ + flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); + if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) + return (0); + + reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) + ? "system-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) + ? "thread-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) + ? "watchdog reset" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) + ? "illegal address trap" + : "unknown assertion"; + EFSYS_PROBE3(mcpu_assertion, + const char *, reason, unsigned int, + MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), + unsigned int, + MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); + + /* Print out the registers (r1 ... r31) */ + ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; + for (index = 1; + index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) { + EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, + EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), + EFX_DWORD_0)); + ofst += sizeof (efx_dword_t); + } + EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); + +out: + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Internal routines for for specific MCDI requests. + */ + + __checkReturn efx_rc_t +efx_mcdi_drv_attach( + __in efx_nic_t *enp, + __in boolean_t attach) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN, + MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_DRV_ATTACH; + req.emr_in_buf = payload; + if (enp->en_drv_version[0] == '\0') { + req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; + } else { + req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN; + } + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; + + /* + * Typically, client drivers use DONT_CARE for the datapath firmware + * type to ensure that the driver can attach to an unprivileged + * function. The datapath firmware type to use is controlled by the + * 'sfboot' utility. + * If a client driver wishes to attach with a specific datapath firmware + * type, that can be passed in second argument of efx_nic_probe API. One + * such example is the ESXi native driver that attempts attaching with + * FULL_FEATURED datapath firmware type first and fall backs to + * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. + */ + MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, + DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, + DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); + MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); + MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); + + if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) { + EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) == + MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); + memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION), + enp->en_drv_version, + MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_board_cfg( + __in efx_nic_t *enp, + __out_opt uint32_t *board_typep, + __out_opt efx_dword_t *capabilitiesp, + __out_ecount_opt(6) uint8_t mac_addrp[6]) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, + MC_CMD_GET_BOARD_CFG_OUT_LENMIN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_BOARD_CFG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + + if (mac_addrp != NULL) { + uint8_t *addrp; + + if (emip->emi_port == 1) { + addrp = MCDI_OUT2(req, uint8_t, + GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); + } else if (emip->emi_port == 2) { + addrp = MCDI_OUT2(req, uint8_t, + GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); + } else { + rc = EINVAL; + goto fail3; + } + + EFX_MAC_ADDR_COPY(mac_addrp, addrp); + } + + if (capabilitiesp != NULL) { + if (emip->emi_port == 1) { + *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); + } else if (emip->emi_port == 2) { + *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); + } else { + rc = EINVAL; + goto fail4; + } + } + + if (board_typep != NULL) { + *board_typep = MCDI_OUT_DWORD(req, + GET_BOARD_CFG_OUT_BOARD_TYPE); + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_resource_limits( + __in efx_nic_t *enp, + __out_opt uint32_t *nevqp, + __out_opt uint32_t *nrxqp, + __out_opt uint32_t *ntxqp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, + MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + if (nevqp != NULL) + *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); + if (nrxqp != NULL) + *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); + if (ntxqp != NULL) + *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_phy_cfg( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN, + MC_CMD_GET_PHY_CFG_OUT_LEN); +#if EFSYS_OPT_NAMES + const char *namep; + size_t namelen; +#endif + uint32_t phy_media_type; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_PHY_CFG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); +#if EFSYS_OPT_NAMES + namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); + namelen = MIN(sizeof (encp->enc_phy_name) - 1, + strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); + (void) memset(encp->enc_phy_name, 0, + sizeof (encp->enc_phy_name)); + memcpy(encp->enc_phy_name, namep, namelen); +#endif /* EFSYS_OPT_NAMES */ + (void) memset(encp->enc_phy_revision, 0, + sizeof (encp->enc_phy_revision)); + memcpy(encp->enc_phy_revision, + MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), + MIN(sizeof (encp->enc_phy_revision) - 1, + MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); +#if EFSYS_OPT_PHY_LED_CONTROL + encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | + (1 << EFX_PHY_LED_OFF) | + (1 << EFX_PHY_LED_ON)); +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + + /* Get the media type of the fixed port, if recognised. */ + EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); + EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); + phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); + epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; + if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) + epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; + + epp->ep_phy_cap_mask = + MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); +#if EFSYS_OPT_PHY_FLAGS + encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); +#endif /* EFSYS_OPT_PHY_FLAGS */ + + encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); + + /* Populate internal state */ + encp->enc_mcdi_mdio_channel = + (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); + +#if EFSYS_OPT_PHY_STATS + encp->enc_mcdi_phy_stat_mask = + MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); +#endif /* EFSYS_OPT_PHY_STATS */ + +#if EFSYS_OPT_BIST + encp->enc_bist_mask = 0; + if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, + GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) + encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); + if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, + GET_PHY_CFG_OUT_BIST_CABLE_LONG)) + encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); + if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, + GET_PHY_CFG_OUT_BIST)) + encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); +#endif /* EFSYS_OPT_BIST */ + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_firmware_update_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + efx_rc_t rc; + + if (emcop != NULL) { + if ((rc = emcop->emco_feature_supported(enp, + EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) + goto fail1; + } else { + /* Earlier devices always supported updates */ + *supportedp = B_TRUE; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_macaddr_change_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + efx_rc_t rc; + + if (emcop != NULL) { + if ((rc = emcop->emco_feature_supported(enp, + EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) + goto fail1; + } else { + /* Earlier devices always supported MAC changes */ + *supportedp = B_TRUE; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_link_control_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + efx_rc_t rc; + + if (emcop != NULL) { + if ((rc = emcop->emco_feature_supported(enp, + EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) + goto fail1; + } else { + /* Earlier devices always supported link control */ + *supportedp = B_TRUE; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_mac_spoofing_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp) +{ + const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; + efx_rc_t rc; + + if (emcop != NULL) { + if ((rc = emcop->emco_feature_supported(enp, + EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) + goto fail1; + } else { + /* Earlier devices always supported MAC spoofing */ + *supportedp = B_TRUE; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_BIST + +#if EFX_OPTS_EF10() +/* + * Enter bist offline mode. This is a fw mode which puts the NIC into a state + * where memory BIST tests can be run and not much else can interfere or happen. + * A reboot is required to exit this mode. + */ + __checkReturn efx_rc_t +efx_mcdi_bist_enable_offline( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + efx_rc_t rc; + + EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); + EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); + + req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_mcdi_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN, + MC_CMD_START_BIST_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_START_BIST; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_START_BIST_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; + + switch (type) { + case EFX_BIST_TYPE_PHY_NORMAL: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); + break; + case EFX_BIST_TYPE_PHY_CABLE_SHORT: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, + MC_CMD_PHY_BIST_CABLE_SHORT); + break; + case EFX_BIST_TYPE_PHY_CABLE_LONG: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, + MC_CMD_PHY_BIST_CABLE_LONG); + break; + case EFX_BIST_TYPE_MC_MEM: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, + MC_CMD_MC_MEM_BIST); + break; + case EFX_BIST_TYPE_SAT_MEM: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, + MC_CMD_PORT_MEM_BIST); + break; + case EFX_BIST_TYPE_REG: + MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, + MC_CMD_REG_BIST); + break; + default: + EFSYS_ASSERT(0); + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_BIST */ + + +/* Enable logging of some events (e.g. link state changes) */ + __checkReturn efx_rc_t +efx_mcdi_log_ctrl( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN, + MC_CMD_LOG_CTRL_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_LOG_CTRL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; + + MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, + MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); + MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#if EFSYS_OPT_MAC_STATS + + __checkReturn efx_rc_t +efx_mcdi_mac_stats( + __in efx_nic_t *enp, + __in uint32_t vport_id, + __in_opt efsys_mem_t *esmp, + __in efx_stats_action_t action, + __in uint16_t period_ms) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN, + MC_CMD_MAC_STATS_V2_OUT_DMA_LEN); + int clear = (action == EFX_STATS_CLEAR); + int upload = (action == EFX_STATS_UPLOAD); + int enable = (action == EFX_STATS_ENABLE_NOEVENTS); + int events = (action == EFX_STATS_ENABLE_EVENTS); + int disable = (action == EFX_STATS_DISABLE); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_MAC_STATS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; + + MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, upload, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, + MAC_STATS_IN_PERIODIC_ENABLE, enable | events, + MAC_STATS_IN_PERIODIC_NOEVENT, !events, + MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); + + if (enable || events || upload) { + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint32_t bytes; + + /* Periodic stats or stats upload require a DMA buffer */ + if (esmp == NULL) { + rc = EINVAL; + goto fail1; + } + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail2; + } + + bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); + + if (EFSYS_MEM_SIZE(esmp) < bytes) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail3; + } + + MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, + EFSYS_MEM_ADDR(esmp) & 0xffffffff); + MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, + EFSYS_MEM_ADDR(esmp) >> 32); + MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); + } + + /* + * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, + * as this may fail (and leave periodic DMA enabled) if the + * vadapter has already been deleted. + */ + MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, + (disable ? EVB_PORT_ID_NULL : vport_id)); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + /* EF10: Expect ENOENT if no DMA queues are initialised */ + if ((req.emr_rc != ENOENT) || + (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { + rc = req.emr_rc; + goto fail4; + } + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_mac_stats_clear( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, + EFX_STATS_CLEAR, 0)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_mac_stats_upload( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp) +{ + efx_rc_t rc; + + /* + * The MC DMAs aggregate statistics for our convenience, so we can + * avoid having to pull the statistics buffer into the cache to + * maintain cumulative statistics. + */ + if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, + EFX_STATS_UPLOAD, 0)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_mac_stats_periodic( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __in uint16_t period_ms, + __in boolean_t events) +{ + efx_rc_t rc; + + /* + * The MC DMAs aggregate statistics for our convenience, so we can + * avoid having to pull the statistics buffer into the cache to + * maintain cumulative statistics. + * Huntington uses a fixed 1sec period. + * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. + */ + if (period_ms == 0) + rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, + EFX_STATS_DISABLE, 0); + else if (events) + rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, + EFX_STATS_ENABLE_EVENTS, period_ms); + else + rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, + EFX_STATS_ENABLE_NOEVENTS, period_ms); + + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MAC_STATS */ + +#if EFX_OPTS_EF10() + +/* + * This function returns the pf and vf number of a function. If it is a pf the + * vf number is 0xffff. The vf number is the index of the vf on that + * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), + * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). + */ + __checkReturn efx_rc_t +efx_mcdi_get_function_info( + __in efx_nic_t *enp, + __out uint32_t *pfp, + __out_opt uint32_t *vfp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN, + MC_CMD_GET_FUNCTION_INFO_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); + if (vfp != NULL) + *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t pf, + __in uint32_t vf, + __out uint32_t *maskp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, + MC_CMD_PRIVILEGE_MASK_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_PRIVILEGE_MASK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; + + MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, + PRIVILEGE_MASK_IN_FUNCTION_PF, pf, + PRIVILEGE_MASK_IN_FUNCTION_VF, vf); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_mcdi_set_workaround( + __in efx_nic_t *enp, + __in uint32_t type, + __in boolean_t enabled, + __out_opt uint32_t *flagsp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN, + MC_CMD_WORKAROUND_EXT_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_WORKAROUND; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; + + MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); + MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (flagsp != NULL) { + if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); + else + *flagsp = 0; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_mcdi_get_workarounds( + __in efx_nic_t *enp, + __out_opt uint32_t *implementedp, + __out_opt uint32_t *enabledp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_WORKAROUNDS; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (implementedp != NULL) { + *implementedp = + MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); + } + + if (enabledp != NULL) { + *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Size of media information page in accordance with SFF-8472 and SFF-8436. + * It is used in MCDI interface as well. + */ +#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 + +/* + * Transceiver identifiers from SFF-8024 Table 4-1. + */ +#define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */ +#define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */ +#define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */ +#define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */ + +static __checkReturn efx_rc_t +efx_mcdi_get_phy_media_info( + __in efx_nic_t *enp, + __in uint32_t mcdi_page, + __in uint8_t offset, + __in uint8_t len, + __out_bcount(len) uint8_t *data) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, + MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( + EFX_PHY_MEDIA_INFO_PAGE_SIZE)); + efx_rc_t rc; + + EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); + + req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = + MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); + + MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != + MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { + rc = EMSGSIZE; + goto fail2; + } + + if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != + EFX_PHY_MEDIA_INFO_PAGE_SIZE) { + rc = EIO; + goto fail3; + } + + memcpy(data, + MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + len); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_phy_module_get_info( + __in efx_nic_t *enp, + __in uint8_t dev_addr, + __in size_t offset, + __in size_t len, + __out_bcount(len) uint8_t *data) +{ + efx_port_t *epp = &(enp->en_port); + efx_rc_t rc; + uint32_t mcdi_lower_page; + uint32_t mcdi_upper_page; + uint8_t id; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + /* + * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. + * Offset plus length interface allows to access page 0 only. + * I.e. non-zero upper pages are not accessible. + * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 + * QSFP+ Memory Map for details on how information is structured + * and accessible. + */ + switch (epp->ep_fixed_port_type) { + case EFX_PHY_MEDIA_SFP_PLUS: + case EFX_PHY_MEDIA_QSFP_PLUS: + /* Port type supports modules */ + break; + default: + rc = ENOTSUP; + goto fail1; + } + + /* + * For all supported port types, MCDI page 0 offset 0 holds the + * transceiver identifier. Probe to determine the data layout. + * Definitions from SFF-8024 Table 4-1. + */ + rc = efx_mcdi_get_phy_media_info(enp, + 0, 0, sizeof(id), &id); + if (rc != 0) + goto fail2; + + switch (id) { + case EFX_SFF_TRANSCEIVER_ID_SFP: + /* + * In accordance with SFF-8472 Diagnostic Monitoring + * Interface for Optical Transceivers section 4 Memory + * Organization two 2-wire addresses are defined. + */ + switch (dev_addr) { + /* Base information */ + case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: + /* + * MCDI page 0 should be used to access lower + * page 0 (0x00 - 0x7f) at the device address 0xA0. + */ + mcdi_lower_page = 0; + /* + * MCDI page 1 should be used to access upper + * page 0 (0x80 - 0xff) at the device address 0xA0. + */ + mcdi_upper_page = 1; + break; + /* Diagnostics */ + case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: + /* + * MCDI page 2 should be used to access lower + * page 0 (0x00 - 0x7f) at the device address 0xA2. + */ + mcdi_lower_page = 2; + /* + * MCDI page 3 should be used to access upper + * page 0 (0x80 - 0xff) at the device address 0xA2. + */ + mcdi_upper_page = 3; + break; + default: + rc = ENOTSUP; + goto fail3; + } + break; + case EFX_SFF_TRANSCEIVER_ID_QSFP: + case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS: + case EFX_SFF_TRANSCEIVER_ID_QSFP28: + switch (dev_addr) { + case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: + /* + * MCDI page -1 should be used to access lower page 0 + * (0x00 - 0x7f). + */ + mcdi_lower_page = (uint32_t)-1; + /* + * MCDI page 0 should be used to access upper page 0 + * (0x80h - 0xff). + */ + mcdi_upper_page = 0; + break; + default: + rc = ENOTSUP; + goto fail3; + } + break; + default: + rc = ENOTSUP; + goto fail3; + } + + EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF); + + if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { + size_t read_len = + MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); + + rc = efx_mcdi_get_phy_media_info(enp, + mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data); + if (rc != 0) + goto fail4; + + data += read_len; + len -= read_len; + + offset = 0; + } else { + offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; + } + + if (len > 0) { + EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); + EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); + + rc = efx_mcdi_get_phy_media_info(enp, + mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data); + if (rc != 0) + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MCDI */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h new file mode 100644 index 000000000..d199060a4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mcdi.h @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_MCDI_H +#define _SYS_EFX_MCDI_H + +#include "efx.h" +#include "efx_regs_mcdi.h" + +#if EFSYS_OPT_NAMES +#include "efx_regs_mcdi_strs.h" +#endif /* EFSYS_OPT_NAMES */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * A reboot/assertion causes the MCDI status word to be set after the + * command word is set or a REBOOT event is sent. If we notice a reboot + * via these mechanisms then wait 10ms for the status word to be set. + */ +#define EFX_MCDI_STATUS_SLEEP_US 10000 + +struct efx_mcdi_req_s { + boolean_t emr_quiet; + /* Inputs: Command #, input buffer and length */ + unsigned int emr_cmd; + uint8_t *emr_in_buf; + size_t emr_in_length; + /* Outputs: retcode, buffer, length and length used */ + efx_rc_t emr_rc; + uint8_t *emr_out_buf; + size_t emr_out_length; + size_t emr_out_length_used; + /* Internals: low level transport details */ + unsigned int emr_err_code; + unsigned int emr_err_arg; +#if EFSYS_OPT_MCDI_PROXY_AUTH + uint32_t emr_proxy_handle; +#endif +}; + +typedef struct efx_mcdi_iface_s { + unsigned int emi_port; + unsigned int emi_max_version; + unsigned int emi_seq; + efx_mcdi_req_t *emi_pending_req; + boolean_t emi_ev_cpl; + boolean_t emi_new_epoch; + int emi_aborted; + uint32_t emi_poll_cnt; + uint32_t emi_mc_reboot_status; +} efx_mcdi_iface_t; + +extern void +efx_mcdi_execute( + __in efx_nic_t *enp, + __inout efx_mcdi_req_t *emrp); + +extern void +efx_mcdi_execute_quiet( + __in efx_nic_t *enp, + __inout efx_mcdi_req_t *emrp); + +extern void +efx_mcdi_ev_cpl( + __in efx_nic_t *enp, + __in unsigned int seq, + __in unsigned int outlen, + __in int errcode); + +#if EFSYS_OPT_MCDI_PROXY_AUTH +extern __checkReturn efx_rc_t +efx_mcdi_get_proxy_handle( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *handlep); + +extern void +efx_mcdi_ev_proxy_response( + __in efx_nic_t *enp, + __in unsigned int handle, + __in unsigned int status); +#endif + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER +extern void +efx_mcdi_ev_proxy_request( + __in efx_nic_t *enp, + __in unsigned int index); +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ + +extern void +efx_mcdi_ev_death( + __in efx_nic_t *enp, + __in int rc); + +extern __checkReturn efx_rc_t +efx_mcdi_request_errcode( + __in unsigned int err); + +extern void +efx_mcdi_raise_exception( + __in efx_nic_t *enp, + __in_opt efx_mcdi_req_t *emrp, + __in int rc); + +typedef enum efx_mcdi_boot_e { + EFX_MCDI_BOOT_PRIMARY, + EFX_MCDI_BOOT_SECONDARY, + EFX_MCDI_BOOT_ROM, +} efx_mcdi_boot_t; + +extern __checkReturn efx_rc_t +efx_mcdi_version( + __in efx_nic_t *enp, + __out_ecount_opt(4) uint16_t versionp[4], + __out_opt uint32_t *buildp, + __out_opt efx_mcdi_boot_t *statusp); + +extern __checkReturn efx_rc_t +efx_mcdi_get_capabilities( + __in efx_nic_t *enp, + __out_opt uint32_t *flagsp, + __out_opt uint16_t *rx_dpcpu_fw_idp, + __out_opt uint16_t *tx_dpcpu_fw_idp, + __out_opt uint32_t *flags2p, + __out_opt uint32_t *tso2ncp); + +extern __checkReturn efx_rc_t +efx_mcdi_read_assertion( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mcdi_exit_assertion_handler( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mcdi_drv_attach( + __in efx_nic_t *enp, + __in boolean_t attach); + +extern __checkReturn efx_rc_t +efx_mcdi_get_board_cfg( + __in efx_nic_t *enp, + __out_opt uint32_t *board_typep, + __out_opt efx_dword_t *capabilitiesp, + __out_ecount_opt(6) uint8_t mac_addrp[6]); + +extern __checkReturn efx_rc_t +efx_mcdi_get_phy_cfg( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mcdi_firmware_update_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp); + +extern __checkReturn efx_rc_t +efx_mcdi_macaddr_change_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp); + +extern __checkReturn efx_rc_t +efx_mcdi_link_control_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp); + +extern __checkReturn efx_rc_t +efx_mcdi_mac_spoofing_supported( + __in efx_nic_t *enp, + __out boolean_t *supportedp); + + +#if EFSYS_OPT_BIST +#if EFX_OPTS_EF10() +extern __checkReturn efx_rc_t +efx_mcdi_bist_enable_offline( + __in efx_nic_t *enp); +#endif /* EFX_OPTS_EF10() */ +extern __checkReturn efx_rc_t +efx_mcdi_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type); +#endif /* EFSYS_OPT_BIST */ + +extern __checkReturn efx_rc_t +efx_mcdi_get_resource_limits( + __in efx_nic_t *enp, + __out_opt uint32_t *nevqp, + __out_opt uint32_t *nrxqp, + __out_opt uint32_t *ntxqp); + +extern __checkReturn efx_rc_t +efx_mcdi_log_ctrl( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mcdi_mac_stats_clear( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +efx_mcdi_mac_stats_upload( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp); + +extern __checkReturn efx_rc_t +efx_mcdi_mac_stats_periodic( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __in uint16_t period_ms, + __in boolean_t events); + + +#if EFSYS_OPT_LOOPBACK +extern __checkReturn efx_rc_t +efx_mcdi_get_loopback_modes( + __in efx_nic_t *enp); +#endif /* EFSYS_OPT_LOOPBACK */ + +extern __checkReturn efx_rc_t +efx_mcdi_phy_module_get_info( + __in efx_nic_t *enp, + __in uint8_t dev_addr, + __in size_t offset, + __in size_t len, + __out_bcount(len) uint8_t *data); + +#define MCDI_IN(_emr, _type, _ofst) \ + ((_type *)((_emr).emr_in_buf + (_ofst))) + +#define MCDI_IN2(_emr, _type, _ofst) \ + MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST) + +#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \ + EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \ + EFX_BYTE_0, _value) + +#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \ + EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \ + EFX_WORD_0, _value) + +#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \ + EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + EFX_DWORD_0, _value) + +#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \ + EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field, _value) + +#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \ + EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1) + +#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \ + _field2, _value2) \ + EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2) + +#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3) \ + EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3) + +#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4) \ + EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4) + +#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5) \ + EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5) + +#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6) \ + EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5, \ + MC_CMD_ ## _field6, _value6) + +#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, _field7, _value7) \ + EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5, \ + MC_CMD_ ## _field6, _value6, \ + MC_CMD_ ## _field7, _value7) + +#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, _field7, _value7, \ + _field8, _value8) \ + EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5, \ + MC_CMD_ ## _field6, _value6, \ + MC_CMD_ ## _field7, _value7, \ + MC_CMD_ ## _field8, _value8) + +#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, _field7, _value7, \ + _field8, _value8, _field9, _value9) \ + EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5, \ + MC_CMD_ ## _field6, _value6, \ + MC_CMD_ ## _field7, _value7, \ + MC_CMD_ ## _field8, _value8, \ + MC_CMD_ ## _field9, _value9) + +#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \ + _field2, _value2, _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, _field7, _value7, \ + _field8, _value8, _field9, _value9, _field10, _value10) \ + EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field1, _value1, \ + MC_CMD_ ## _field2, _value2, \ + MC_CMD_ ## _field3, _value3, \ + MC_CMD_ ## _field4, _value4, \ + MC_CMD_ ## _field5, _value5, \ + MC_CMD_ ## _field6, _value6, \ + MC_CMD_ ## _field7, _value7, \ + MC_CMD_ ## _field8, _value8, \ + MC_CMD_ ## _field9, _value9, \ + MC_CMD_ ## _field10, _value10) + +#define MCDI_OUT(_emr, _type, _ofst) \ + ((_type *)((_emr).emr_out_buf + (_ofst))) + +#define MCDI_OUT2(_emr, _type, _ofst) \ + MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST) + +#define MCDI_OUT_BYTE(_emr, _ofst) \ + EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \ + EFX_BYTE_0) + +#define MCDI_OUT_WORD(_emr, _ofst) \ + EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \ + EFX_WORD_0) + +#define MCDI_OUT_WORD_FIELD(_emr, _ofst, _field) \ + EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \ + MC_CMD_ ## _field) + +#define MCDI_OUT_DWORD(_emr, _ofst) \ + EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ + EFX_DWORD_0) + +#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \ + EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ + MC_CMD_ ## _field) + +#define MCDI_EV_FIELD(_eqp, _field) \ + EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field) + +#define MCDI_CMD_DWORD_FIELD(_edp, _field) \ + EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field) + +#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \ + (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \ + (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) + +#define EFX_MCDI_BUF_SIZE(_in_len, _out_len) \ + EFX_P2ROUNDUP(size_t, \ + MAX(MAX(_in_len, _out_len), (2 * sizeof (efx_dword_t))),\ + sizeof (efx_dword_t)) + +/* + * The buffer size must be a multiple of dword to ensure that MCDI works + * properly with Siena based boards (which use on-chip buffer). Also, it + * should be at minimum the size of two dwords to allow space for extended + * error responses if the request/response buffer sizes are smaller. + */ +#define EFX_MCDI_DECLARE_BUF(_name, _in_len, _out_len) \ + uint8_t _name[EFX_MCDI_BUF_SIZE(_in_len, _out_len)] = {0} + +typedef enum efx_mcdi_feature_id_e { + EFX_MCDI_FEATURE_FW_UPDATE = 0, + EFX_MCDI_FEATURE_LINK_CONTROL, + EFX_MCDI_FEATURE_MACADDR_CHANGE, + EFX_MCDI_FEATURE_MAC_SPOOFING, + EFX_MCDI_FEATURE_NIDS +} efx_mcdi_feature_id_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_MCDI_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c new file mode 100644 index 000000000..60212a59e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_mon.c @@ -0,0 +1,850 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MON_MCDI +#include "mcdi_mon.h" +#endif + +#if EFSYS_OPT_NAMES + +static const char * const __efx_mon_name[] = { + "", + "sfx90x0", + "sfx91x0", + "sfx92x0" +}; + + const char * +efx_mon_name( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); + EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES); + return (__efx_mon_name[encp->enc_mon_type]); +} + +#endif /* EFSYS_OPT_NAMES */ + +#if EFSYS_OPT_MON_MCDI +static const efx_mon_ops_t __efx_mon_mcdi_ops = { +#if EFSYS_OPT_MON_STATS + mcdi_mon_stats_update, /* emo_stats_update */ + mcdi_mon_limits_update, /* emo_limits_update */ +#endif /* EFSYS_OPT_MON_STATS */ +}; +#endif + + + __checkReturn efx_rc_t +efx_mon_init( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mon_t *emp = &(enp->en_mon); + const efx_mon_ops_t *emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + if (enp->en_mod_flags & EFX_MOD_MON) { + rc = EINVAL; + goto fail1; + } + + enp->en_mod_flags |= EFX_MOD_MON; + + emp->em_type = encp->enc_mon_type; + + EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); + switch (emp->em_type) { +#if EFSYS_OPT_MON_MCDI + case EFX_MON_SFC90X0: + case EFX_MON_SFC91X0: + case EFX_MON_SFC92X0: + emop = &__efx_mon_mcdi_ops; + break; +#endif + default: + rc = ENOTSUP; + goto fail2; + } + + emp->em_emop = emop; + return (0); + +fail2: + EFSYS_PROBE(fail2); + + emp->em_type = EFX_MON_INVALID; + + enp->en_mod_flags &= ~EFX_MOD_MON; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_MON_STATS + +#if EFSYS_OPT_NAMES + +/* START MKCONFIG GENERATED MonitorStatNamesBlock 277c17eda1a6d1a4 */ +static const char * const __mon_stat_name[] = { + "controller_temp", + "phy_common_temp", + "controller_cooling", + "phy0_temp", + "phy0_cooling", + "phy1_temp", + "phy1_cooling", + "in_1v0", + "in_1v2", + "in_1v8", + "in_2v5", + "in_3v3", + "in_12v0", + "in_1v2a", + "in_vref", + "out_vaoe", + "aoe_temp", + "psu_aoe_temp", + "psu_temp", + "fan_0", + "fan_1", + "fan_2", + "fan_3", + "fan_4", + "in_vaoe", + "out_iaoe", + "in_iaoe", + "nic_power", + "in_0v9", + "in_i0v9", + "in_i1v2", + "in_0v9_adc", + "controller_2_temp", + "vreg_internal_temp", + "vreg_0v9_temp", + "vreg_1v2_temp", + "controller_vptat", + "controller_internal_temp", + "controller_vptat_extadc", + "controller_internal_temp_extadc", + "ambient_temp", + "airflow", + "vdd08d_vss08d_csr", + "vdd08d_vss08d_csr_extadc", + "hotpoint_temp", + "phy_power_port0", + "phy_power_port1", + "mum_vcc", + "in_0v9_a", + "in_i0v9_a", + "vreg_0v9_a_temp", + "in_0v9_b", + "in_i0v9_b", + "vreg_0v9_b_temp", + "ccom_avreg_1v2_supply", + "ccom_avreg_1v2_supply_extadc", + "ccom_avreg_1v8_supply", + "ccom_avreg_1v8_supply_extadc", + "controller_master_vptat", + "controller_master_internal_temp", + "controller_master_vptat_extadc", + "controller_master_internal_temp_extadc", + "controller_slave_vptat", + "controller_slave_internal_temp", + "controller_slave_vptat_extadc", + "controller_slave_internal_temp_extadc", + "sodimm_vout", + "sodimm_0_temp", + "sodimm_1_temp", + "phy0_vcc", + "phy1_vcc", + "controller_tdiode_temp", + "board_front_temp", + "board_back_temp", + "in_i1v8", + "in_i2v5", + "in_i3v3", + "in_i12v0", + "in_1v3", + "in_i1v3", +}; + +/* END MKCONFIG GENERATED MonitorStatNamesBlock */ + + const char * +efx_mon_stat_name( + __in efx_nic_t *enp, + __in efx_mon_stat_t id) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS); + return (__mon_stat_name[id]); +} + +typedef struct _stat_description_t { + efx_mon_stat_t stat; + const char *desc; +} stat_description_t; + +/* START MKCONFIG GENERATED MonitorStatDescriptionsBlock f072138f16d2e1f8 */ +static const char *__mon_stat_description[] = { + MC_CMD_SENSOR_CONTROLLER_TEMP_ENUM_STR, + MC_CMD_SENSOR_PHY_COMMON_TEMP_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_COOLING_ENUM_STR, + MC_CMD_SENSOR_PHY0_TEMP_ENUM_STR, + MC_CMD_SENSOR_PHY0_COOLING_ENUM_STR, + MC_CMD_SENSOR_PHY1_TEMP_ENUM_STR, + MC_CMD_SENSOR_PHY1_COOLING_ENUM_STR, + MC_CMD_SENSOR_IN_1V0_ENUM_STR, + MC_CMD_SENSOR_IN_1V2_ENUM_STR, + MC_CMD_SENSOR_IN_1V8_ENUM_STR, + MC_CMD_SENSOR_IN_2V5_ENUM_STR, + MC_CMD_SENSOR_IN_3V3_ENUM_STR, + MC_CMD_SENSOR_IN_12V0_ENUM_STR, + MC_CMD_SENSOR_IN_1V2A_ENUM_STR, + MC_CMD_SENSOR_IN_VREF_ENUM_STR, + MC_CMD_SENSOR_OUT_VAOE_ENUM_STR, + MC_CMD_SENSOR_AOE_TEMP_ENUM_STR, + MC_CMD_SENSOR_PSU_AOE_TEMP_ENUM_STR, + MC_CMD_SENSOR_PSU_TEMP_ENUM_STR, + MC_CMD_SENSOR_FAN_0_ENUM_STR, + MC_CMD_SENSOR_FAN_1_ENUM_STR, + MC_CMD_SENSOR_FAN_2_ENUM_STR, + MC_CMD_SENSOR_FAN_3_ENUM_STR, + MC_CMD_SENSOR_FAN_4_ENUM_STR, + MC_CMD_SENSOR_IN_VAOE_ENUM_STR, + MC_CMD_SENSOR_OUT_IAOE_ENUM_STR, + MC_CMD_SENSOR_IN_IAOE_ENUM_STR, + MC_CMD_SENSOR_NIC_POWER_ENUM_STR, + MC_CMD_SENSOR_IN_0V9_ENUM_STR, + MC_CMD_SENSOR_IN_I0V9_ENUM_STR, + MC_CMD_SENSOR_IN_I1V2_ENUM_STR, + MC_CMD_SENSOR_IN_0V9_ADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_2_TEMP_ENUM_STR, + MC_CMD_SENSOR_VREG_INTERNAL_TEMP_ENUM_STR, + MC_CMD_SENSOR_VREG_0V9_TEMP_ENUM_STR, + MC_CMD_SENSOR_VREG_1V2_TEMP_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_VPTAT_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC_ENUM_STR, + MC_CMD_SENSOR_AMBIENT_TEMP_ENUM_STR, + MC_CMD_SENSOR_AIRFLOW_ENUM_STR, + MC_CMD_SENSOR_VDD08D_VSS08D_CSR_ENUM_STR, + MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC_ENUM_STR, + MC_CMD_SENSOR_HOTPOINT_TEMP_ENUM_STR, + MC_CMD_SENSOR_PHY_POWER_PORT0_ENUM_STR, + MC_CMD_SENSOR_PHY_POWER_PORT1_ENUM_STR, + MC_CMD_SENSOR_MUM_VCC_ENUM_STR, + MC_CMD_SENSOR_IN_0V9_A_ENUM_STR, + MC_CMD_SENSOR_IN_I0V9_A_ENUM_STR, + MC_CMD_SENSOR_VREG_0V9_A_TEMP_ENUM_STR, + MC_CMD_SENSOR_IN_0V9_B_ENUM_STR, + MC_CMD_SENSOR_IN_I0V9_B_ENUM_STR, + MC_CMD_SENSOR_VREG_0V9_B_TEMP_ENUM_STR, + MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_ENUM_STR, + MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_ENUM_STR, + MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC_ENUM_STR, + MC_CMD_SENSOR_SODIMM_VOUT_ENUM_STR, + MC_CMD_SENSOR_SODIMM_0_TEMP_ENUM_STR, + MC_CMD_SENSOR_SODIMM_1_TEMP_ENUM_STR, + MC_CMD_SENSOR_PHY0_VCC_ENUM_STR, + MC_CMD_SENSOR_PHY1_VCC_ENUM_STR, + MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP_ENUM_STR, + MC_CMD_SENSOR_BOARD_FRONT_TEMP_ENUM_STR, + MC_CMD_SENSOR_BOARD_BACK_TEMP_ENUM_STR, + MC_CMD_SENSOR_IN_I1V8_ENUM_STR, + MC_CMD_SENSOR_IN_I2V5_ENUM_STR, + MC_CMD_SENSOR_IN_I3V3_ENUM_STR, + MC_CMD_SENSOR_IN_I12V0_ENUM_STR, + MC_CMD_SENSOR_IN_1V3_ENUM_STR, + MC_CMD_SENSOR_IN_I1V3_ENUM_STR, +}; + +/* END MKCONFIG GENERATED MonitorStatDescriptionsBlock */ + + const char * +efx_mon_stat_description( + __in efx_nic_t *enp, + __in efx_mon_stat_t id) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS); + return (__mon_stat_description[id]); +} + +#endif /* EFSYS_OPT_NAMES */ + +/* START MKCONFIG GENERATED MonitorMcdiMappingBlock 173eee0a5599996a */ + __checkReturn boolean_t +efx_mon_mcdi_to_efx_stat( + __in int mcdi_index, + __out efx_mon_stat_t *statp) +{ + + if ((mcdi_index % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) == + MC_CMD_SENSOR_PAGE0_NEXT) { + *statp = EFX_MON_NSTATS; + return (B_FALSE); + } + + switch (mcdi_index) { + case MC_CMD_SENSOR_IN_I0V9: + *statp = EFX_MON_STAT_IN_I0V9; + break; + case MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC; + break; + case MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT: + *statp = EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT; + break; + case MC_CMD_SENSOR_PSU_TEMP: + *statp = EFX_MON_STAT_PSU_TEMP; + break; + case MC_CMD_SENSOR_FAN_2: + *statp = EFX_MON_STAT_FAN_2; + break; + case MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC; + break; + case MC_CMD_SENSOR_BOARD_BACK_TEMP: + *statp = EFX_MON_STAT_BOARD_BACK_TEMP; + break; + case MC_CMD_SENSOR_IN_1V3: + *statp = EFX_MON_STAT_IN_1V3; + break; + case MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_TDIODE_TEMP; + break; + case MC_CMD_SENSOR_IN_2V5: + *statp = EFX_MON_STAT_IN_2V5; + break; + case MC_CMD_SENSOR_PHY_COMMON_TEMP: + *statp = EFX_MON_STAT_PHY_COMMON_TEMP; + break; + case MC_CMD_SENSOR_PHY1_TEMP: + *statp = EFX_MON_STAT_PHY1_TEMP; + break; + case MC_CMD_SENSOR_VREG_INTERNAL_TEMP: + *statp = EFX_MON_STAT_VREG_INTERNAL_TEMP; + break; + case MC_CMD_SENSOR_IN_1V0: + *statp = EFX_MON_STAT_IN_1V0; + break; + case MC_CMD_SENSOR_FAN_1: + *statp = EFX_MON_STAT_FAN_1; + break; + case MC_CMD_SENSOR_IN_1V2: + *statp = EFX_MON_STAT_IN_1V2; + break; + case MC_CMD_SENSOR_FAN_3: + *statp = EFX_MON_STAT_FAN_3; + break; + case MC_CMD_SENSOR_IN_1V2A: + *statp = EFX_MON_STAT_IN_1V2A; + break; + case MC_CMD_SENSOR_SODIMM_0_TEMP: + *statp = EFX_MON_STAT_SODIMM_0_TEMP; + break; + case MC_CMD_SENSOR_IN_1V8: + *statp = EFX_MON_STAT_IN_1V8; + break; + case MC_CMD_SENSOR_IN_VREF: + *statp = EFX_MON_STAT_IN_VREF; + break; + case MC_CMD_SENSOR_SODIMM_VOUT: + *statp = EFX_MON_STAT_SODIMM_VOUT; + break; + case MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY: + *statp = EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY; + break; + case MC_CMD_SENSOR_IN_I1V2: + *statp = EFX_MON_STAT_IN_I1V2; + break; + case MC_CMD_SENSOR_IN_I1V3: + *statp = EFX_MON_STAT_IN_I1V3; + break; + case MC_CMD_SENSOR_AIRFLOW: + *statp = EFX_MON_STAT_AIRFLOW; + break; + case MC_CMD_SENSOR_HOTPOINT_TEMP: + *statp = EFX_MON_STAT_HOTPOINT_TEMP; + break; + case MC_CMD_SENSOR_VDD08D_VSS08D_CSR: + *statp = EFX_MON_STAT_VDD08D_VSS08D_CSR; + break; + case MC_CMD_SENSOR_AOE_TEMP: + *statp = EFX_MON_STAT_AOE_TEMP; + break; + case MC_CMD_SENSOR_IN_I1V8: + *statp = EFX_MON_STAT_IN_I1V8; + break; + case MC_CMD_SENSOR_IN_I2V5: + *statp = EFX_MON_STAT_IN_I2V5; + break; + case MC_CMD_SENSOR_PHY1_COOLING: + *statp = EFX_MON_STAT_PHY1_COOLING; + break; + case MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC: + *statp = EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC; + break; + case MC_CMD_SENSOR_IN_0V9_ADC: + *statp = EFX_MON_STAT_IN_0V9_ADC; + break; + case MC_CMD_SENSOR_VREG_0V9_A_TEMP: + *statp = EFX_MON_STAT_VREG_0V9_A_TEMP; + break; + case MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT: + *statp = EFX_MON_STAT_CONTROLLER_MASTER_VPTAT; + break; + case MC_CMD_SENSOR_PHY0_VCC: + *statp = EFX_MON_STAT_PHY0_VCC; + break; + case MC_CMD_SENSOR_PHY0_COOLING: + *statp = EFX_MON_STAT_PHY0_COOLING; + break; + case MC_CMD_SENSOR_PSU_AOE_TEMP: + *statp = EFX_MON_STAT_PSU_AOE_TEMP; + break; + case MC_CMD_SENSOR_VREG_0V9_TEMP: + *statp = EFX_MON_STAT_VREG_0V9_TEMP; + break; + case MC_CMD_SENSOR_IN_I0V9_A: + *statp = EFX_MON_STAT_IN_I0V9_A; + break; + case MC_CMD_SENSOR_IN_I3V3: + *statp = EFX_MON_STAT_IN_I3V3; + break; + case MC_CMD_SENSOR_BOARD_FRONT_TEMP: + *statp = EFX_MON_STAT_BOARD_FRONT_TEMP; + break; + case MC_CMD_SENSOR_OUT_VAOE: + *statp = EFX_MON_STAT_OUT_VAOE; + break; + case MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC: + *statp = EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC; + break; + case MC_CMD_SENSOR_IN_I12V0: + *statp = EFX_MON_STAT_IN_I12V0; + break; + case MC_CMD_SENSOR_PHY_POWER_PORT1: + *statp = EFX_MON_STAT_PHY_POWER_PORT1; + break; + case MC_CMD_SENSOR_PHY_POWER_PORT0: + *statp = EFX_MON_STAT_PHY_POWER_PORT0; + break; + case MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC; + break; + case MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP; + break; + case MC_CMD_SENSOR_CONTROLLER_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_TEMP; + break; + case MC_CMD_SENSOR_IN_IAOE: + *statp = EFX_MON_STAT_IN_IAOE; + break; + case MC_CMD_SENSOR_IN_VAOE: + *statp = EFX_MON_STAT_IN_VAOE; + break; + case MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC; + break; + case MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY: + *statp = EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY; + break; + case MC_CMD_SENSOR_PHY1_VCC: + *statp = EFX_MON_STAT_PHY1_VCC; + break; + case MC_CMD_SENSOR_CONTROLLER_COOLING: + *statp = EFX_MON_STAT_CONTROLLER_COOLING; + break; + case MC_CMD_SENSOR_AMBIENT_TEMP: + *statp = EFX_MON_STAT_AMBIENT_TEMP; + break; + case MC_CMD_SENSOR_IN_3V3: + *statp = EFX_MON_STAT_IN_3V3; + break; + case MC_CMD_SENSOR_PHY0_TEMP: + *statp = EFX_MON_STAT_PHY0_TEMP; + break; + case MC_CMD_SENSOR_SODIMM_1_TEMP: + *statp = EFX_MON_STAT_SODIMM_1_TEMP; + break; + case MC_CMD_SENSOR_MUM_VCC: + *statp = EFX_MON_STAT_MUM_VCC; + break; + case MC_CMD_SENSOR_VREG_0V9_B_TEMP: + *statp = EFX_MON_STAT_VREG_0V9_B_TEMP; + break; + case MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP; + break; + case MC_CMD_SENSOR_FAN_4: + *statp = EFX_MON_STAT_FAN_4; + break; + case MC_CMD_SENSOR_CONTROLLER_2_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_2_TEMP; + break; + case MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC: + *statp = EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC; + break; + case MC_CMD_SENSOR_IN_0V9_A: + *statp = EFX_MON_STAT_IN_0V9_A; + break; + case MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC; + break; + case MC_CMD_SENSOR_IN_0V9: + *statp = EFX_MON_STAT_IN_0V9; + break; + case MC_CMD_SENSOR_IN_I0V9_B: + *statp = EFX_MON_STAT_IN_I0V9_B; + break; + case MC_CMD_SENSOR_NIC_POWER: + *statp = EFX_MON_STAT_NIC_POWER; + break; + case MC_CMD_SENSOR_IN_12V0: + *statp = EFX_MON_STAT_IN_12V0; + break; + case MC_CMD_SENSOR_OUT_IAOE: + *statp = EFX_MON_STAT_OUT_IAOE; + break; + case MC_CMD_SENSOR_CONTROLLER_VPTAT: + *statp = EFX_MON_STAT_CONTROLLER_VPTAT; + break; + case MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC: + *statp = EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC; + break; + case MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP: + *statp = EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP; + break; + case MC_CMD_SENSOR_FAN_0: + *statp = EFX_MON_STAT_FAN_0; + break; + case MC_CMD_SENSOR_VREG_1V2_TEMP: + *statp = EFX_MON_STAT_VREG_1V2_TEMP; + break; + case MC_CMD_SENSOR_IN_0V9_B: + *statp = EFX_MON_STAT_IN_0V9_B; + break; + default: + *statp = EFX_MON_NSTATS; + break; + }; + + if (*statp == EFX_MON_NSTATS) + goto fail1; + + return (B_TRUE); + +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_TRUE); + return (B_FALSE); +}; + +/* END MKCONFIG GENERATED MonitorMcdiMappingBlock */ + +/* START MKCONFIG GENERATED MonitorStatisticUnitsBlock 2d447c656cc2d01d */ + __checkReturn boolean_t +efx_mon_get_stat_unit( + __in efx_mon_stat_t stat, + __out efx_mon_stat_unit_t *unitp) +{ + switch (stat) { + case EFX_MON_STAT_IN_1V0: + case EFX_MON_STAT_IN_1V2: + case EFX_MON_STAT_IN_1V8: + case EFX_MON_STAT_IN_2V5: + case EFX_MON_STAT_IN_3V3: + case EFX_MON_STAT_IN_12V0: + case EFX_MON_STAT_IN_1V2A: + case EFX_MON_STAT_IN_VREF: + case EFX_MON_STAT_OUT_VAOE: + case EFX_MON_STAT_IN_VAOE: + case EFX_MON_STAT_IN_0V9: + case EFX_MON_STAT_IN_0V9_ADC: + case EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC: + case EFX_MON_STAT_VDD08D_VSS08D_CSR: + case EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC: + case EFX_MON_STAT_MUM_VCC: + case EFX_MON_STAT_IN_0V9_A: + case EFX_MON_STAT_IN_0V9_B: + case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY: + case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC: + case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY: + case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC: + case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT: + case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC: + case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT: + case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC: + case EFX_MON_STAT_SODIMM_VOUT: + case EFX_MON_STAT_PHY0_VCC: + case EFX_MON_STAT_PHY1_VCC: + case EFX_MON_STAT_IN_1V3: + *unitp = EFX_MON_STAT_UNIT_VOLTAGE_MV; + break; + case EFX_MON_STAT_CONTROLLER_TEMP: + case EFX_MON_STAT_PHY_COMMON_TEMP: + case EFX_MON_STAT_PHY0_TEMP: + case EFX_MON_STAT_PHY1_TEMP: + case EFX_MON_STAT_AOE_TEMP: + case EFX_MON_STAT_PSU_AOE_TEMP: + case EFX_MON_STAT_PSU_TEMP: + case EFX_MON_STAT_CONTROLLER_2_TEMP: + case EFX_MON_STAT_VREG_INTERNAL_TEMP: + case EFX_MON_STAT_VREG_0V9_TEMP: + case EFX_MON_STAT_VREG_1V2_TEMP: + case EFX_MON_STAT_CONTROLLER_VPTAT: + case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_AMBIENT_TEMP: + case EFX_MON_STAT_HOTPOINT_TEMP: + case EFX_MON_STAT_VREG_0V9_A_TEMP: + case EFX_MON_STAT_VREG_0V9_B_TEMP: + case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_SODIMM_0_TEMP: + case EFX_MON_STAT_SODIMM_1_TEMP: + case EFX_MON_STAT_CONTROLLER_TDIODE_TEMP: + case EFX_MON_STAT_BOARD_FRONT_TEMP: + case EFX_MON_STAT_BOARD_BACK_TEMP: + *unitp = EFX_MON_STAT_UNIT_TEMP_C; + break; + case EFX_MON_STAT_CONTROLLER_COOLING: + case EFX_MON_STAT_PHY0_COOLING: + case EFX_MON_STAT_PHY1_COOLING: + case EFX_MON_STAT_AIRFLOW: + case EFX_MON_STAT_PHY_POWER_PORT0: + case EFX_MON_STAT_PHY_POWER_PORT1: + *unitp = EFX_MON_STAT_UNIT_BOOL; + break; + case EFX_MON_STAT_NIC_POWER: + *unitp = EFX_MON_STAT_UNIT_POWER_W; + break; + case EFX_MON_STAT_OUT_IAOE: + case EFX_MON_STAT_IN_IAOE: + case EFX_MON_STAT_IN_I0V9: + case EFX_MON_STAT_IN_I1V2: + case EFX_MON_STAT_IN_I0V9_A: + case EFX_MON_STAT_IN_I0V9_B: + case EFX_MON_STAT_IN_I1V8: + case EFX_MON_STAT_IN_I2V5: + case EFX_MON_STAT_IN_I3V3: + case EFX_MON_STAT_IN_I12V0: + case EFX_MON_STAT_IN_I1V3: + *unitp = EFX_MON_STAT_UNIT_CURRENT_MA; + break; + case EFX_MON_STAT_FAN_0: + case EFX_MON_STAT_FAN_1: + case EFX_MON_STAT_FAN_2: + case EFX_MON_STAT_FAN_3: + case EFX_MON_STAT_FAN_4: + *unitp = EFX_MON_STAT_UNIT_RPM; + break; + default: + *unitp = EFX_MON_STAT_UNIT_UNKNOWN; + break; + }; + + if (*unitp == EFX_MON_STAT_UNIT_UNKNOWN) + goto fail1; + + return (B_TRUE); + +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_TRUE); + return (B_FALSE); +}; + +/* END MKCONFIG GENERATED MonitorStatisticUnitsBlock */ + +/* START MKCONFIG GENERATED MonitorStatisticPortsBlock 1719b751d842534f */ + __checkReturn boolean_t +efx_mon_get_stat_portmap( + __in efx_mon_stat_t stat, + __out efx_mon_stat_portmask_t *maskp) +{ + + switch (stat) { + case EFX_MON_STAT_PHY1_TEMP: + case EFX_MON_STAT_PHY1_COOLING: + case EFX_MON_STAT_PHY_POWER_PORT1: + *maskp = EFX_MON_STAT_PORTMAP_PORT1; + break; + case EFX_MON_STAT_CONTROLLER_TEMP: + case EFX_MON_STAT_PHY_COMMON_TEMP: + case EFX_MON_STAT_CONTROLLER_COOLING: + case EFX_MON_STAT_IN_1V0: + case EFX_MON_STAT_IN_1V2: + case EFX_MON_STAT_IN_1V8: + case EFX_MON_STAT_IN_2V5: + case EFX_MON_STAT_IN_3V3: + case EFX_MON_STAT_IN_12V0: + case EFX_MON_STAT_IN_1V2A: + case EFX_MON_STAT_IN_VREF: + case EFX_MON_STAT_OUT_VAOE: + case EFX_MON_STAT_AOE_TEMP: + case EFX_MON_STAT_PSU_AOE_TEMP: + case EFX_MON_STAT_PSU_TEMP: + case EFX_MON_STAT_FAN_0: + case EFX_MON_STAT_FAN_1: + case EFX_MON_STAT_FAN_2: + case EFX_MON_STAT_FAN_3: + case EFX_MON_STAT_FAN_4: + case EFX_MON_STAT_IN_VAOE: + case EFX_MON_STAT_OUT_IAOE: + case EFX_MON_STAT_IN_IAOE: + case EFX_MON_STAT_NIC_POWER: + case EFX_MON_STAT_IN_0V9: + case EFX_MON_STAT_IN_I0V9: + case EFX_MON_STAT_IN_I1V2: + case EFX_MON_STAT_IN_0V9_ADC: + case EFX_MON_STAT_CONTROLLER_2_TEMP: + case EFX_MON_STAT_VREG_INTERNAL_TEMP: + case EFX_MON_STAT_VREG_0V9_TEMP: + case EFX_MON_STAT_VREG_1V2_TEMP: + case EFX_MON_STAT_CONTROLLER_VPTAT: + case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC: + case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_AMBIENT_TEMP: + case EFX_MON_STAT_AIRFLOW: + case EFX_MON_STAT_VDD08D_VSS08D_CSR: + case EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC: + case EFX_MON_STAT_HOTPOINT_TEMP: + case EFX_MON_STAT_MUM_VCC: + case EFX_MON_STAT_IN_0V9_A: + case EFX_MON_STAT_IN_I0V9_A: + case EFX_MON_STAT_VREG_0V9_A_TEMP: + case EFX_MON_STAT_IN_0V9_B: + case EFX_MON_STAT_IN_I0V9_B: + case EFX_MON_STAT_VREG_0V9_B_TEMP: + case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY: + case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC: + case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY: + case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC: + case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT: + case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC: + case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT: + case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP: + case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC: + case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC: + case EFX_MON_STAT_SODIMM_VOUT: + case EFX_MON_STAT_SODIMM_0_TEMP: + case EFX_MON_STAT_SODIMM_1_TEMP: + case EFX_MON_STAT_PHY0_VCC: + case EFX_MON_STAT_PHY1_VCC: + case EFX_MON_STAT_CONTROLLER_TDIODE_TEMP: + case EFX_MON_STAT_BOARD_FRONT_TEMP: + case EFX_MON_STAT_BOARD_BACK_TEMP: + case EFX_MON_STAT_IN_I1V8: + case EFX_MON_STAT_IN_I2V5: + case EFX_MON_STAT_IN_I3V3: + case EFX_MON_STAT_IN_I12V0: + case EFX_MON_STAT_IN_1V3: + case EFX_MON_STAT_IN_I1V3: + *maskp = EFX_MON_STAT_PORTMAP_ALL; + break; + case EFX_MON_STAT_PHY0_TEMP: + case EFX_MON_STAT_PHY0_COOLING: + case EFX_MON_STAT_PHY_POWER_PORT0: + *maskp = EFX_MON_STAT_PORTMAP_PORT0; + break; + default: + *maskp = EFX_MON_STAT_PORTMAP_UNKNOWN; + break; + }; + + if (*maskp == EFX_MON_STAT_PORTMAP_UNKNOWN) + goto fail1; + + return (B_TRUE); + +fail1: + EFSYS_PROBE1(fail1, boolean_t, B_TRUE); + return (B_FALSE); +}; + +/* END MKCONFIG GENERATED MonitorStatisticPortsBlock */ + + __checkReturn efx_rc_t +efx_mon_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) +{ + efx_mon_t *emp = &(enp->en_mon); + const efx_mon_ops_t *emop = emp->em_emop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); + + return (emop->emo_stats_update(enp, esmp, values)); +} + + __checkReturn efx_rc_t +efx_mon_limits_update( + __in efx_nic_t *enp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values) +{ + efx_mon_t *emp = &(enp->en_mon); + const efx_mon_ops_t *emop = emp->em_emop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); + + return (emop->emo_limits_update(enp, values)); +} + +#endif /* EFSYS_OPT_MON_STATS */ + + void +efx_mon_fini( + __in efx_nic_t *enp) +{ + efx_mon_t *emp = &(enp->en_mon); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); + + emp->em_emop = NULL; + + emp->em_type = EFX_MON_INVALID; + + enp->en_mod_flags &= ~EFX_MOD_MON; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c new file mode 100644 index 000000000..267d01052 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_nic.c @@ -0,0 +1,1154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + + __checkReturn efx_rc_t +efx_family( + __in uint16_t venid, + __in uint16_t devid, + __out efx_family_t *efp, + __out unsigned int *membarp) +{ + if (venid == EFX_PCI_VENID_SFC) { + switch (devid) { +#if EFSYS_OPT_SIENA + case EFX_PCI_DEVID_SIENA_F1_UNINIT: + /* + * Hardware default for PF0 of uninitialised Siena. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_BETHPAGE: + case EFX_PCI_DEVID_SIENA: + *efp = EFX_FAMILY_SIENA; + *membarp = EFX_MEM_BAR_SIENA; + return (0); +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT: + /* + * Hardware default for PF0 of uninitialised Huntington. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_FARMINGDALE: + case EFX_PCI_DEVID_GREENPORT: + *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_PF; + return (0); + + case EFX_PCI_DEVID_FARMINGDALE_VF: + case EFX_PCI_DEVID_GREENPORT_VF: + *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_VF; + return (0); +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_PCI_DEVID_MEDFORD_PF_UNINIT: + /* + * Hardware default for PF0 of uninitialised Medford. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_MEDFORD: + *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_PF; + return (0); + + case EFX_PCI_DEVID_MEDFORD_VF: + *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_VF; + return (0); +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT: + /* + * Hardware default for PF0 of uninitialised Medford2. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_MEDFORD2: + case EFX_PCI_DEVID_MEDFORD2_VF: + *efp = EFX_FAMILY_MEDFORD2; + *membarp = EFX_MEM_BAR_MEDFORD2; + return (0); +#endif /* EFSYS_OPT_MEDFORD2 */ + + case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */ + default: + break; + } + } + + *efp = EFX_FAMILY_INVALID; + return (ENOTSUP); +} + + +#if EFSYS_OPT_SIENA + +static const efx_nic_ops_t __efx_nic_siena_ops = { + siena_nic_probe, /* eno_probe */ + NULL, /* eno_board_cfg */ + NULL, /* eno_set_drv_limits */ + siena_nic_reset, /* eno_reset */ + siena_nic_init, /* eno_init */ + NULL, /* eno_get_vi_pool */ + NULL, /* eno_get_bar_region */ + NULL, /* eno_hw_unavailable */ + NULL, /* eno_set_hw_unavailable */ +#if EFSYS_OPT_DIAG + siena_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + siena_nic_fini, /* eno_fini */ + siena_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + +static const efx_nic_ops_t __efx_nic_hunt_ops = { + ef10_nic_probe, /* eno_probe */ + hunt_board_cfg, /* eno_board_cfg */ + ef10_nic_set_drv_limits, /* eno_set_drv_limits */ + ef10_nic_reset, /* eno_reset */ + ef10_nic_init, /* eno_init */ + ef10_nic_get_vi_pool, /* eno_get_vi_pool */ + ef10_nic_get_bar_region, /* eno_get_bar_region */ + ef10_nic_hw_unavailable, /* eno_hw_unavailable */ + ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */ +#if EFSYS_OPT_DIAG + ef10_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nic_fini, /* eno_fini */ + ef10_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + +static const efx_nic_ops_t __efx_nic_medford_ops = { + ef10_nic_probe, /* eno_probe */ + medford_board_cfg, /* eno_board_cfg */ + ef10_nic_set_drv_limits, /* eno_set_drv_limits */ + ef10_nic_reset, /* eno_reset */ + ef10_nic_init, /* eno_init */ + ef10_nic_get_vi_pool, /* eno_get_vi_pool */ + ef10_nic_get_bar_region, /* eno_get_bar_region */ + ef10_nic_hw_unavailable, /* eno_hw_unavailable */ + ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */ +#if EFSYS_OPT_DIAG + ef10_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nic_fini, /* eno_fini */ + ef10_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + +static const efx_nic_ops_t __efx_nic_medford2_ops = { + ef10_nic_probe, /* eno_probe */ + medford2_board_cfg, /* eno_board_cfg */ + ef10_nic_set_drv_limits, /* eno_set_drv_limits */ + ef10_nic_reset, /* eno_reset */ + ef10_nic_init, /* eno_init */ + ef10_nic_get_vi_pool, /* eno_get_vi_pool */ + ef10_nic_get_bar_region, /* eno_get_bar_region */ + ef10_nic_hw_unavailable, /* eno_hw_unavailable */ + ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */ +#if EFSYS_OPT_DIAG + ef10_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nic_fini, /* eno_fini */ + ef10_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_MEDFORD2 */ + + + __checkReturn efx_rc_t +efx_nic_create( + __in efx_family_t family, + __in efsys_identifier_t *esip, + __in efsys_bar_t *esbp, + __in efsys_lock_t *eslp, + __deref_out efx_nic_t **enpp) +{ + efx_nic_t *enp; + efx_rc_t rc; + + EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID); + EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES); + + /* Allocate a NIC object */ + EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp); + + if (enp == NULL) { + rc = ENOMEM; + goto fail1; + } + + enp->en_magic = EFX_NIC_MAGIC; + + switch (family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + enp->en_enop = &__efx_nic_siena_ops; + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LFSR_HASH_INSERT | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_LOOKAHEAD_SPLIT | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_TX_SRC_FILTERS; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + enp->en_enop = &__efx_nic_hunt_ops; + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_MCDI_DMA | + EFX_FEATURE_PIO_BUFFERS | + EFX_FEATURE_FW_ASSISTED_TSO | + EFX_FEATURE_FW_ASSISTED_TSO_V2 | + EFX_FEATURE_PACKED_STREAM | + EFX_FEATURE_TXQ_CKSUM_OP_DESC; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + enp->en_enop = &__efx_nic_medford_ops; + /* + * FW_ASSISTED_TSO omitted as Medford only supports firmware + * assisted TSO version 2, not the v1 scheme used on Huntington. + */ + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_MCDI_DMA | + EFX_FEATURE_PIO_BUFFERS | + EFX_FEATURE_FW_ASSISTED_TSO_V2 | + EFX_FEATURE_PACKED_STREAM | + EFX_FEATURE_TXQ_CKSUM_OP_DESC; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + enp->en_enop = &__efx_nic_medford2_ops; + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_MCDI_DMA | + EFX_FEATURE_PIO_BUFFERS | + EFX_FEATURE_FW_ASSISTED_TSO_V2 | + EFX_FEATURE_PACKED_STREAM | + EFX_FEATURE_TXQ_CKSUM_OP_DESC; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + rc = ENOTSUP; + goto fail2; + } + + enp->en_family = family; + enp->en_esip = esip; + enp->en_esbp = esbp; + enp->en_eslp = eslp; + + *enpp = enp; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + + enp->en_magic = 0; + + /* Free the NIC object */ + EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_probe( + __in efx_nic_t *enp, + __in efx_fw_variant_t efv) +{ + const efx_nic_ops_t *enop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); +#if EFSYS_OPT_MCDI + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); +#endif /* EFSYS_OPT_MCDI */ + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); + + /* Ensure FW variant codes match with MC_CMD_FW codes */ + EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED == + MC_CMD_FW_FULL_FEATURED); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY == + MC_CMD_FW_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM == + MC_CMD_FW_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE == + MC_CMD_FW_HIGH_TX_RATE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 == + MC_CMD_FW_PACKED_STREAM_HASH_MODE_1); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE == + MC_CMD_FW_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK == + MC_CMD_FW_DPDK); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE == + (int)MC_CMD_FW_DONT_CARE); + + enop = enp->en_enop; + enp->efv = efv; + + if ((rc = enop->eno_probe(enp)) != 0) + goto fail1; + + if ((rc = efx_phy_probe(enp)) != 0) + goto fail2; + + enp->en_mod_flags |= EFX_MOD_PROBE; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + + enop->eno_unprobe(enp); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_set_drv_limits( + __inout efx_nic_t *enp, + __in efx_drv_limits_t *edlp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + if (enop->eno_set_drv_limits != NULL) { + if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0) + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_set_drv_version( + __inout efx_nic_t *enp, + __in_ecount(length) char const *verp, + __in size_t length) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); + + /* + * length is the string content length in bytes. + * Accept any content which fits into the version + * buffer, excluding the last byte. This is reserved + * for an appended NUL terminator. + */ + if (length >= sizeof (enp->en_drv_version)) { + rc = E2BIG; + goto fail1; + } + + (void) memset(enp->en_drv_version, 0, + sizeof (enp->en_drv_version)); + memcpy(enp->en_drv_version, verp, length); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_nic_get_bar_region( + __in efx_nic_t *enp, + __in efx_nic_region_t region, + __out uint32_t *offsetp, + __out size_t *sizep) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (enop->eno_get_bar_region == NULL) { + rc = ENOTSUP; + goto fail1; + } + if ((rc = (enop->eno_get_bar_region)(enp, + region, offsetp, sizep)) != 0) { + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_nic_get_vi_pool( + __in efx_nic_t *enp, + __out uint32_t *evq_countp, + __out uint32_t *rxq_countp, + __out uint32_t *txq_countp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (enop->eno_get_vi_pool != NULL) { + uint32_t vi_count = 0; + + if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0) + goto fail1; + + *evq_countp = vi_count; + *rxq_countp = vi_count; + *txq_countp = vi_count; + } else { + /* Use NIC limits as default value */ + *evq_countp = encp->enc_evq_limit; + *rxq_countp = encp->enc_rxq_limit; + *txq_countp = encp->enc_txq_limit; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_nic_init( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + if (enp->en_mod_flags & EFX_MOD_NIC) { + rc = EINVAL; + goto fail1; + } + + if ((rc = enop->eno_init(enp)) != 0) + goto fail2; + + enp->en_mod_flags |= EFX_MOD_NIC; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_nic_fini( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); + + enop->eno_fini(enp); + + enp->en_mod_flags &= ~EFX_MOD_NIC; +} + + void +efx_nic_unprobe( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); +#if EFSYS_OPT_MCDI + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); +#endif /* EFSYS_OPT_MCDI */ + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); + + efx_phy_unprobe(enp); + + enop->eno_unprobe(enp); + + enp->en_mod_flags &= ~EFX_MOD_PROBE; +} + + void +efx_nic_destroy( + __in efx_nic_t *enp) +{ + efsys_identifier_t *esip = enp->en_esip; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); + + enp->en_family = EFX_FAMILY_INVALID; + enp->en_esip = NULL; + enp->en_esbp = NULL; + enp->en_eslp = NULL; + + enp->en_enop = NULL; + + enp->en_magic = 0; + + /* Free the NIC object */ + EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); +} + + __checkReturn efx_rc_t +efx_nic_reset( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + unsigned int mod_flags; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); + /* + * All modules except the MCDI, PROBE, NVRAM, VPD, MON, TUNNEL + * (which we do not reset here) must have been shut down or never + * initialized. + * + * A rule of thumb here is: If the controller or MC reboots, is *any* + * state lost. If it's lost and needs reapplying, then the module + * *must* not be initialised during the reset. + */ + mod_flags = enp->en_mod_flags; + mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM | + EFX_MOD_VPD | EFX_MOD_MON); +#if EFSYS_OPT_TUNNEL + mod_flags &= ~EFX_MOD_TUNNEL; +#endif /* EFSYS_OPT_TUNNEL */ + EFSYS_ASSERT3U(mod_flags, ==, 0); + if (mod_flags != 0) { + rc = EINVAL; + goto fail1; + } + + if ((rc = enop->eno_reset(enp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + const efx_nic_cfg_t * +efx_nic_cfg_get( + __in const efx_nic_t *enp) +{ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + return (&(enp->en_nic_cfg)); +} + + __checkReturn efx_rc_t +efx_nic_get_fw_version( + __in efx_nic_t *enp, + __out efx_nic_fw_info_t *enfip) +{ + uint16_t mc_fw_version[4]; + efx_rc_t rc; + + if (enfip == NULL) { + rc = EINVAL; + goto fail1; + } + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); + EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + + /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */ + EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP); + EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK); + + rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL); + if (rc != 0) + goto fail2; + + rc = efx_mcdi_get_capabilities(enp, NULL, + &enfip->enfi_rx_dpcpu_fw_id, + &enfip->enfi_tx_dpcpu_fw_id, + NULL, NULL); + if (rc == 0) { + enfip->enfi_dpcpu_fw_ids_valid = B_TRUE; + } else if (rc == ENOTSUP) { + enfip->enfi_dpcpu_fw_ids_valid = B_FALSE; + enfip->enfi_rx_dpcpu_fw_id = 0; + enfip->enfi_tx_dpcpu_fw_id = 0; + } else { + goto fail3; + } + + memcpy(enfip->enfi_mc_fw_version, mc_fw_version, + sizeof (mc_fw_version)); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn boolean_t +efx_nic_hw_unavailable( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + /* NOTE: can be used by MCDI before NIC probe */ + + if (enop->eno_hw_unavailable != NULL) { + if ((enop->eno_hw_unavailable)(enp) != B_FALSE) + goto unavail; + } + + return (B_FALSE); + +unavail: + return (B_TRUE); +} + + void +efx_nic_set_hw_unavailable( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + if (enop->eno_set_hw_unavailable != NULL) + enop->eno_set_hw_unavailable(enp); +} + + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +efx_nic_register_test( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); + + if ((rc = enop->eno_register_test(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + +#if EFSYS_OPT_LOOPBACK + +extern void +efx_loopback_mask( + __in efx_loopback_kind_t loopback_kind, + __out efx_qword_t *maskp) +{ + efx_qword_t mask; + + EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS); + EFSYS_ASSERT(maskp != NULL); + + /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */ +#define LOOPBACK_CHECK(_mcdi, _efx) \ + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx) + + LOOPBACK_CHECK(NONE, OFF); + LOOPBACK_CHECK(DATA, DATA); + LOOPBACK_CHECK(GMAC, GMAC); + LOOPBACK_CHECK(XGMII, XGMII); + LOOPBACK_CHECK(XGXS, XGXS); + LOOPBACK_CHECK(XAUI, XAUI); + LOOPBACK_CHECK(GMII, GMII); + LOOPBACK_CHECK(SGMII, SGMII); + LOOPBACK_CHECK(XGBR, XGBR); + LOOPBACK_CHECK(XFI, XFI); + LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR); + LOOPBACK_CHECK(GMII_FAR, GMII_FAR); + LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR); + LOOPBACK_CHECK(XFI_FAR, XFI_FAR); + LOOPBACK_CHECK(GPHY, GPHY); + LOOPBACK_CHECK(PHYXS, PHY_XS); + LOOPBACK_CHECK(PCS, PCS); + LOOPBACK_CHECK(PMAPMD, PMA_PMD); + LOOPBACK_CHECK(XPORT, XPORT); + LOOPBACK_CHECK(XGMII_WS, XGMII_WS); + LOOPBACK_CHECK(XAUI_WS, XAUI_WS); + LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR); + LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR); + LOOPBACK_CHECK(GMII_WS, GMII_WS); + LOOPBACK_CHECK(XFI_WS, XFI_WS); + LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR); + LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS); + LOOPBACK_CHECK(PMA_INT, PMA_INT); + LOOPBACK_CHECK(SD_NEAR, SD_NEAR); + LOOPBACK_CHECK(SD_FAR, SD_FAR); + LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS); + LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS); + LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS); + LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS); + LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS); + LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR); + LOOPBACK_CHECK(DATA_WS, DATA_WS); + LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK); +#undef LOOPBACK_CHECK + + /* Build bitmask of possible loopback types */ + EFX_ZERO_QWORD(mask); + + if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) || + (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF); + } + + if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) || + (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { + /* + * The "MAC" grouping has historically been used by drivers to + * mean loopbacks supported by on-chip hardware. Keep that + * meaning here, and include on-chip PHY layer loopbacks. + */ + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR); + } + + if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) || + (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { + /* + * The "PHY" grouping has historically been used by drivers to + * mean loopbacks supported by off-chip hardware. Keep that + * meaning here. + */ + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS); + EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD); + } + + *maskp = mask; +} + + __checkReturn efx_rc_t +efx_mcdi_get_loopback_modes( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LOOPBACK_MODES_IN_LEN, + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN); + efx_qword_t mask; + efx_qword_t modes; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + /* + * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree + * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link(). + */ + efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask); + + EFX_AND_QWORD(mask, + *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED)); + + modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_100FDX] = modes; + + modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_1000FDX] = modes; + + modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_10000FDX] = modes; + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) { + /* Response includes 40G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_40G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_40000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) { + /* Response includes 25G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_25G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_25000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) { + /* Response includes 50G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_50G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_50000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) { + /* Response includes 100G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_100G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_100000FDX] = modes; + } + + EFX_ZERO_QWORD(modes); + EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]); + encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_LOOPBACK */ + + __checkReturn efx_rc_t +efx_nic_calculate_pcie_link_bandwidth( + __in uint32_t pcie_link_width, + __in uint32_t pcie_link_gen, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t lane_bandwidth; + uint32_t total_bandwidth; + efx_rc_t rc; + + if ((pcie_link_width == 0) || (pcie_link_width > 16) || + !ISP2(pcie_link_width)) { + rc = EINVAL; + goto fail1; + } + + switch (pcie_link_gen) { + case EFX_PCIE_LINK_SPEED_GEN1: + /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */ + lane_bandwidth = 2000; + break; + case EFX_PCIE_LINK_SPEED_GEN2: + /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */ + lane_bandwidth = 4000; + break; + case EFX_PCIE_LINK_SPEED_GEN3: + /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */ + lane_bandwidth = 7877; + break; + default: + rc = EINVAL; + goto fail2; + } + + total_bandwidth = lane_bandwidth * pcie_link_width; + *bandwidth_mbpsp = total_bandwidth; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp) +{ + efx_rc_t rc; + uint32_t value; + + rc = efx_mcdi_get_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value); + if (rc != 0) + goto fail1; + + /* Mapping is not required since values match MCDI */ + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT); + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM); + + switch (value) { + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT: + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM: + *subvariantp = value; + break; + default: + rc = EINVAL; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant) +{ + efx_rc_t rc; + + switch (subvariant) { + case EFX_NIC_FW_SUBVARIANT_DEFAULT: + case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM: + /* Mapping is not required since values match MCDI */ + break; + default: + rc = EINVAL; + goto fail1; + } + + rc = efx_mcdi_set_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + + __checkReturn efx_rc_t +efx_nic_check_pcie_link_speed( + __in efx_nic_t *enp, + __in uint32_t pcie_link_width, + __in uint32_t pcie_link_gen, + __out efx_pcie_link_performance_t *resultp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t bandwidth; + efx_pcie_link_performance_t result; + efx_rc_t rc; + + if ((encp->enc_required_pcie_bandwidth_mbps == 0) || + (pcie_link_width == 0) || (pcie_link_width == 32) || + (pcie_link_gen == 0)) { + /* + * No usable info on what is required and/or in use. In virtual + * machines, sometimes the PCIe link width is reported as 0 or + * 32, or the speed as 0. + */ + result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH; + goto out; + } + + /* Calculate the available bandwidth in megabits per second */ + rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width, + pcie_link_gen, &bandwidth); + if (rc != 0) + goto fail1; + + if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) { + result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH; + } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) { + /* The link provides enough bandwidth but not optimal latency */ + result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY; + } else { + result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL; + } + +out: + *resultp = result; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c new file mode 100644 index 000000000..f5d8ae0b0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_nvram.c @@ -0,0 +1,1105 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_NVRAM + +#if EFSYS_OPT_SIENA + +static const efx_nvram_ops_t __efx_nvram_siena_ops = { +#if EFSYS_OPT_DIAG + siena_nvram_test, /* envo_test */ +#endif /* EFSYS_OPT_DIAG */ + siena_nvram_type_to_partn, /* envo_type_to_partn */ + siena_nvram_partn_info, /* envo_partn_info */ + siena_nvram_partn_rw_start, /* envo_partn_rw_start */ + siena_nvram_partn_read, /* envo_partn_read */ + siena_nvram_partn_read, /* envo_partn_read_backup */ + siena_nvram_partn_erase, /* envo_partn_erase */ + siena_nvram_partn_write, /* envo_partn_write */ + siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */ + siena_nvram_partn_get_version, /* envo_partn_get_version */ + siena_nvram_partn_set_version, /* envo_partn_set_version */ + NULL, /* envo_partn_validate */ +}; + +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() + +static const efx_nvram_ops_t __efx_nvram_ef10_ops = { +#if EFSYS_OPT_DIAG + ef10_nvram_test, /* envo_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nvram_type_to_partn, /* envo_type_to_partn */ + ef10_nvram_partn_info, /* envo_partn_info */ + ef10_nvram_partn_rw_start, /* envo_partn_rw_start */ + ef10_nvram_partn_read, /* envo_partn_read */ + ef10_nvram_partn_read_backup, /* envo_partn_read_backup */ + ef10_nvram_partn_erase, /* envo_partn_erase */ + ef10_nvram_partn_write, /* envo_partn_write */ + ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */ + ef10_nvram_partn_get_version, /* envo_partn_get_version */ + ef10_nvram_partn_set_version, /* envo_partn_set_version */ + ef10_nvram_buffer_validate, /* envo_buffer_validate */ +}; + +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_nvram_init( + __in efx_nic_t *enp) +{ + const efx_nvram_ops_t *envop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM)); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + envop = &__efx_nvram_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + envop = &__efx_nvram_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + envop = &__efx_nvram_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + envop = &__efx_nvram_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + enp->en_envop = envop; + enp->en_mod_flags |= EFX_MOD_NVRAM; + + enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +efx_nvram_test( + __in efx_nic_t *enp) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_test(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + + __checkReturn efx_rc_t +efx_nvram_size( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out size_t *sizep) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + efx_nvram_info_t eni = { 0 }; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + if ((rc = envop->envo_partn_info(enp, partn, &eni)) != 0) + goto fail2; + + *sizep = eni.eni_partn_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + *sizep = 0; + + return (rc); +} + +extern __checkReturn efx_rc_t +efx_nvram_info( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out efx_nvram_info_t *enip) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + if ((rc = envop->envo_partn_info(enp, partn, enip)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +efx_nvram_get_version( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + if ((rc = envop->envo_partn_get_version(enp, partn, + subtypep, version)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_rw_start( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out_opt size_t *chunk_sizep) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID); + + if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0) + goto fail2; + + enp->en_nvram_partn_locked = partn; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_read_chunk( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn); + + if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Read from the backup (writeable) store of an A/B partition. + * For non A/B partitions, there is only a single store, and so this + * function has the same behaviour as efx_nvram_read_chunk(). + */ + __checkReturn efx_rc_t +efx_nvram_read_backup( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn); + + if ((rc = envop->envo_partn_read_backup(enp, partn, offset, + data, size)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_erase( + __in efx_nic_t *enp, + __in efx_nvram_type_t type) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + unsigned int offset = 0; + efx_nvram_info_t eni = { 0 }; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn); + + if ((rc = envop->envo_partn_info(enp, partn, &eni)) != 0) + goto fail2; + + if ((rc = envop->envo_partn_erase(enp, partn, offset, + eni.eni_partn_size)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_write_chunk( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in unsigned int offset, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn); + + if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_rw_finish( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out_opt uint32_t *verify_resultp) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + uint32_t verify_result = 0; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn); + + if ((rc = envop->envo_partn_rw_finish(enp, partn, &verify_result)) != 0) + goto fail2; + + enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID; + + if (verify_resultp != NULL) + *verify_resultp = verify_result; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Always report verification result */ + if (verify_resultp != NULL) + *verify_resultp = verify_result; + + return (rc); +} + + __checkReturn efx_rc_t +efx_nvram_set_version( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in_ecount(4) uint16_t version[4]) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + /* + * The Siena implementation of envo_set_version() will attempt to + * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG partition. + * Therefore, you can't have already acquired the NVRAM_UPDATE lock. + */ + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID); + + if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Validate buffer contents (before writing to flash) */ + __checkReturn efx_rc_t +efx_nvram_validate( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __in_bcount(partn_size) caddr_t partn_data, + __in size_t partn_size) +{ + const efx_nvram_ops_t *envop = enp->en_envop; + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0) + goto fail1; + + if (envop->envo_buffer_validate != NULL) { + if ((rc = envop->envo_buffer_validate(partn, + partn_data, partn_size)) != 0) + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +void +efx_nvram_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM); + + EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID); + + enp->en_envop = NULL; + enp->en_mod_flags &= ~EFX_MOD_NVRAM; +} + +#endif /* EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD + +/* + * Internal MCDI request handling + */ + + __checkReturn efx_rc_t +efx_mcdi_nvram_partitions( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size, + __out unsigned int *npartnp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_PARTITIONS_IN_LEN, + MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + unsigned int npartn; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_PARTITIONS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + + if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) { + rc = ENOENT; + goto fail3; + } + + if (size < npartn * sizeof (uint32_t)) { + rc = ENOSPC; + goto fail3; + } + + *npartnp = npartn; + + memcpy(data, + MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID), + (npartn * sizeof (uint32_t))); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_nvram_metadata( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4], + __out_bcount_opt(size) char *descp, + __in size_t size) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_METADATA_IN_LEN, + MC_CMD_NVRAM_METADATA_OUT_LENMAX); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_METADATA; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + + if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, + NVRAM_METADATA_OUT_SUBTYPE_VALID)) { + *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE); + } else { + *subtypep = 0; + } + + if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, + NVRAM_METADATA_OUT_VERSION_VALID)) { + version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W); + version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X); + version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y); + version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z); + } else { + version[0] = version[1] = version[2] = version[3] = 0; + } + + if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS, + NVRAM_METADATA_OUT_DESCRIPTION_VALID)) { + /* Return optional descrition string */ + if ((descp != NULL) && (size > 0)) { + size_t desclen; + + descp[0] = '\0'; + desclen = (req.emr_out_length_used + - MC_CMD_NVRAM_METADATA_OUT_LEN(0)); + + EFSYS_ASSERT3U(desclen, <=, + MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM); + + if (size < desclen) { + rc = ENOSPC; + goto fail3; + } + + memcpy(descp, MCDI_OUT2(req, char, + NVRAM_METADATA_OUT_DESCRIPTION), + desclen); + + /* Ensure string is NUL terminated */ + descp[desclen] = '\0'; + } + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_nvram_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t *enip) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_INFO_IN_LEN, + MC_CMD_NVRAM_INFO_V2_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + enip->eni_partn_size = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE); + + enip->eni_address = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR); + + enip->eni_erase_size = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE); + + enip->eni_write_size = + (req.emr_out_length_used < + MC_CMD_NVRAM_INFO_V2_OUT_LEN) ? + 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE); + + enip->eni_flags = 0; + + if (MCDI_OUT_DWORD_FIELD(req, NVRAM_INFO_OUT_FLAGS, + NVRAM_INFO_OUT_PROTECTED)) + enip->eni_flags |= EFX_NVRAM_FLAG_READ_ONLY; + + if (MCDI_OUT_DWORD_FIELD(req, NVRAM_INFO_OUT_FLAGS, + NVRAM_INFO_OUT_READ_ONLY)) + enip->eni_flags |= EFX_NVRAM_FLAG_READ_ONLY; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * MC_CMD_NVRAM_UPDATE_START_V2 must be used to support firmware-verified + * NVRAM updates. Older firmware will ignore the flags field in the request. + */ + __checkReturn efx_rc_t +efx_mcdi_nvram_update_start( + __in efx_nic_t *enp, + __in uint32_t partn) +{ + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN, + MC_CMD_NVRAM_UPDATE_START_OUT_LEN); + efx_mcdi_req_t req; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_UPDATE_START; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_V2_IN_TYPE, partn); + + MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_START_V2_IN_FLAGS, + NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_nvram_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __out_bcount(size) caddr_t data, + __in size_t size, + __in uint32_t mode) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_READ_IN_V2_LEN, + MC_CMD_NVRAM_READ_OUT_LENMAX); + efx_rc_t rc; + + if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_NVRAM_READ; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn); + MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset); + MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size); + MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) { + rc = EMSGSIZE; + goto fail2; + } + + memcpy(data, + MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER), + size); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_nvram_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __in size_t size) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_ERASE_IN_LEN, + MC_CMD_NVRAM_ERASE_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_ERASE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn); + MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both + * Sienna and EF10 based boards. However EF10 based boards support the use + * of this command with payloads up to the maximum MCDI V2 payload length. + */ + __checkReturn efx_rc_t +efx_mcdi_nvram_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in uint32_t offset, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + efx_mcdi_req_t req; + uint8_t *payload; + efx_rc_t rc; + size_t max_data_size; + size_t payload_len = enp->en_nic_cfg.enc_mcdi_max_payload_length; + + max_data_size = payload_len - MC_CMD_NVRAM_WRITE_IN_LEN(0); + EFSYS_ASSERT3U(payload_len, >, 0); + EFSYS_ASSERT3U(max_data_size, <, payload_len); + + if (size > max_data_size) { + rc = EINVAL; + goto fail1; + } + + EFSYS_KMEM_ALLOC(enp->en_esip, payload_len, payload); + if (payload == NULL) { + rc = ENOMEM; + goto fail2; + } + + (void) memset(payload, 0, payload_len); + req.emr_cmd = MC_CMD_NVRAM_WRITE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn); + MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size); + + memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER), + data, size); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload); + + return (0); + +fail3: + EFSYS_PROBE(fail3); + EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * MC_CMD_NVRAM_UPDATE_FINISH_V2 must be used to support firmware-verified + * NVRAM updates. Older firmware will ignore the flags field in the request. + */ + __checkReturn efx_rc_t +efx_mcdi_nvram_update_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __in boolean_t reboot, + __in uint32_t flags, + __out_opt uint32_t *verify_resultp) +{ + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN, + MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); + uint32_t verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; + efx_rc_t rc = 0; + + req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_TYPE, partn); + MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_REBOOT, reboot); + + if (!encp->enc_nvram_update_poll_verify_result_supported) { + flags &= ~EFX_NVRAM_UPDATE_FLAGS_BACKGROUND; + flags &= ~EFX_NVRAM_UPDATE_FLAGS_POLL; + } + + MCDI_IN_POPULATE_DWORD_3(req, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND, + (flags & EFX_NVRAM_UPDATE_FLAGS_BACKGROUND) ? 1 : 0, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT, + (flags & EFX_NVRAM_UPDATE_FLAGS_POLL) ? 1 : 0 + ); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { + verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; + if (encp->enc_nvram_update_verify_result_supported) { + /* Result of update verification is missing */ + rc = EMSGSIZE; + goto fail2; + } + } else { + verify_result = + MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); + } + + if (encp->enc_nvram_update_verify_result_supported) { + if ((verify_result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) && + (verify_result != MC_CMD_NVRAM_VERIFY_RC_PENDING)) { + /* Update verification failed */ + rc = EINVAL; + goto fail3; + } + } + + if (verify_resultp != NULL) + *verify_resultp = verify_result; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Always report verification result */ + if (verify_resultp != NULL) + *verify_resultp = verify_result; + + return (rc); +} + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +efx_mcdi_nvram_test( + __in efx_nic_t *enp, + __in uint32_t partn) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TEST_IN_LEN, + MC_CMD_NVRAM_TEST_OUT_LEN); + int result; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_TEST; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN; + + MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT); + if (result == MC_CMD_NVRAM_TEST_FAIL) { + + EFSYS_PROBE1(nvram_test_failure, int, partn); + + rc = (EINVAL); + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + + +#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c new file mode 100644 index 000000000..841d14885 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy.c @@ -0,0 +1,601 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_SIENA +static const efx_phy_ops_t __efx_phy_siena_ops = { + siena_phy_power, /* epo_power */ + NULL, /* epo_reset */ + siena_phy_reconfigure, /* epo_reconfigure */ + siena_phy_verify, /* epo_verify */ + siena_phy_oui_get, /* epo_oui_get */ + NULL, /* epo_link_state_get */ +#if EFSYS_OPT_PHY_STATS + siena_phy_stats_update, /* epo_stats_update */ +#endif /* EFSYS_OPT_PHY_STATS */ +#if EFSYS_OPT_BIST + NULL, /* epo_bist_enable_offline */ + siena_phy_bist_start, /* epo_bist_start */ + siena_phy_bist_poll, /* epo_bist_poll */ + siena_phy_bist_stop, /* epo_bist_stop */ +#endif /* EFSYS_OPT_BIST */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_phy_ops_t __efx_phy_ef10_ops = { + ef10_phy_power, /* epo_power */ + NULL, /* epo_reset */ + ef10_phy_reconfigure, /* epo_reconfigure */ + ef10_phy_verify, /* epo_verify */ + ef10_phy_oui_get, /* epo_oui_get */ + ef10_phy_link_state_get, /* epo_link_state_get */ +#if EFSYS_OPT_PHY_STATS + ef10_phy_stats_update, /* epo_stats_update */ +#endif /* EFSYS_OPT_PHY_STATS */ +#if EFSYS_OPT_BIST + ef10_bist_enable_offline, /* epo_bist_enable_offline */ + ef10_bist_start, /* epo_bist_start */ + ef10_bist_poll, /* epo_bist_poll */ + ef10_bist_stop, /* epo_bist_stop */ +#endif /* EFSYS_OPT_BIST */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_phy_probe( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + const efx_phy_ops_t *epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + epp->ep_port = encp->enc_port; + epp->ep_phy_type = encp->enc_phy_type; + + /* Hook in operations structure */ + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + epop = &__efx_phy_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + epop = &__efx_phy_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + epop = &__efx_phy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + epop = &__efx_phy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + rc = ENOTSUP; + goto fail1; + } + + epp->ep_epop = epop; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + epp->ep_port = 0; + epp->ep_phy_type = 0; + + return (rc); +} + + __checkReturn efx_rc_t +efx_phy_verify( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + return (epop->epo_verify(enp)); +} + +#if EFSYS_OPT_PHY_LED_CONTROL + + __checkReturn efx_rc_t +efx_phy_led_set( + __in efx_nic_t *enp, + __in efx_phy_led_mode_t mode) +{ + efx_nic_cfg_t *encp = (&enp->en_nic_cfg); + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + uint32_t mask; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (epp->ep_phy_led_mode == mode) + goto done; + + mask = (1 << EFX_PHY_LED_DEFAULT); + mask |= encp->enc_led_mask; + + if (!((1 << mode) & mask)) { + rc = ENOTSUP; + goto fail1; + } + + EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES); + epp->ep_phy_led_mode = mode; + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail2; + +done: + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + + void +efx_phy_adv_cap_get( + __in efx_nic_t *enp, + __in uint32_t flag, + __out uint32_t *maskp) +{ + efx_port_t *epp = &(enp->en_port); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + switch (flag) { + case EFX_PHY_CAP_CURRENT: + *maskp = epp->ep_adv_cap_mask; + break; + case EFX_PHY_CAP_DEFAULT: + *maskp = epp->ep_default_adv_cap_mask; + break; + case EFX_PHY_CAP_PERM: + *maskp = epp->ep_phy_cap_mask; + break; + default: + EFSYS_ASSERT(B_FALSE); + *maskp = 0; + break; + } +} + + __checkReturn efx_rc_t +efx_phy_adv_cap_set( + __in efx_nic_t *enp, + __in uint32_t mask) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + uint32_t old_mask; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if ((mask & ~epp->ep_phy_cap_mask) != 0) { + rc = ENOTSUP; + goto fail1; + } + + if (epp->ep_adv_cap_mask == mask) + goto done; + + old_mask = epp->ep_adv_cap_mask; + epp->ep_adv_cap_mask = mask; + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail2; + +done: + return (0); + +fail2: + EFSYS_PROBE(fail2); + + epp->ep_adv_cap_mask = old_mask; + /* Reconfigure for robustness */ + if (epop->epo_reconfigure(enp) != 0) { + /* + * We may have an inconsistent view of our advertised speed + * capabilities. + */ + EFSYS_ASSERT(0); + } + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_phy_lp_cap_get( + __in efx_nic_t *enp, + __out uint32_t *maskp) +{ + efx_port_t *epp = &(enp->en_port); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + *maskp = epp->ep_lp_cap_mask; +} + + __checkReturn efx_rc_t +efx_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + return (epop->epo_oui_get(enp, ouip)); +} + + void +efx_phy_media_type_get( + __in efx_nic_t *enp, + __out efx_phy_media_type_t *typep) +{ + efx_port_t *epp = &(enp->en_port); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID) + *typep = epp->ep_module_type; + else + *typep = epp->ep_fixed_port_type; +} + + __checkReturn efx_rc_t +efx_phy_module_get_info( + __in efx_nic_t *enp, + __in uint8_t dev_addr, + __in size_t offset, + __in size_t len, + __out_bcount(len) uint8_t *data) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(data != NULL); + + if ((offset > EFX_PHY_MEDIA_INFO_MAX_OFFSET) || + ((offset + len) > EFX_PHY_MEDIA_INFO_MAX_OFFSET)) { + rc = EINVAL; + goto fail1; + } + + if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr, + offset, len, data)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_phy_fec_type_get( + __in efx_nic_t *enp, + __out efx_phy_fec_type_t *typep) +{ + efx_rc_t rc; + efx_phy_link_state_t epls; + + if ((rc = efx_phy_link_state_get(enp, &epls)) != 0) + goto fail1; + + *typep = epls.epls_fec; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_phy_link_state_get( + __in efx_nic_t *enp, + __out efx_phy_link_state_t *eplsp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + + if (epop->epo_link_state_get == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = epop->epo_link_state_get(enp, eplsp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_PHY_STATS + +#if EFSYS_OPT_NAMES + +/* START MKCONFIG GENERATED PhyStatNamesBlock af9ffa24da3bc100 */ +static const char * const __efx_phy_stat_name[] = { + "oui", + "pma_pmd_link_up", + "pma_pmd_rx_fault", + "pma_pmd_tx_fault", + "pma_pmd_rev_a", + "pma_pmd_rev_b", + "pma_pmd_rev_c", + "pma_pmd_rev_d", + "pcs_link_up", + "pcs_rx_fault", + "pcs_tx_fault", + "pcs_ber", + "pcs_block_errors", + "phy_xs_link_up", + "phy_xs_rx_fault", + "phy_xs_tx_fault", + "phy_xs_align", + "phy_xs_sync_a", + "phy_xs_sync_b", + "phy_xs_sync_c", + "phy_xs_sync_d", + "an_link_up", + "an_master", + "an_local_rx_ok", + "an_remote_rx_ok", + "cl22ext_link_up", + "snr_a", + "snr_b", + "snr_c", + "snr_d", + "pma_pmd_signal_a", + "pma_pmd_signal_b", + "pma_pmd_signal_c", + "pma_pmd_signal_d", + "an_complete", + "pma_pmd_rev_major", + "pma_pmd_rev_minor", + "pma_pmd_rev_micro", + "pcs_fw_version_0", + "pcs_fw_version_1", + "pcs_fw_version_2", + "pcs_fw_version_3", + "pcs_fw_build_yy", + "pcs_fw_build_mm", + "pcs_fw_build_dd", + "pcs_op_mode", +}; + +/* END MKCONFIG GENERATED PhyStatNamesBlock */ + + const char * +efx_phy_stat_name( + __in efx_nic_t *enp, + __in efx_phy_stat_t type) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS); + + return (__efx_phy_stat_name[type]); +} + +#endif /* EFSYS_OPT_NAMES */ + + __checkReturn efx_rc_t +efx_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + return (epop->epo_stats_update(enp, esmp, stat)); +} + +#endif /* EFSYS_OPT_PHY_STATS */ + + +#if EFSYS_OPT_BIST + + __checkReturn efx_rc_t +efx_bist_enable_offline( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + if (epop->epo_bist_enable_offline == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = epop->epo_bist_enable_offline(enp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); + +} + + __checkReturn efx_rc_t +efx_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); + EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); + EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN); + + if (epop->epo_bist_start == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = epop->epo_bist_start(enp, type)) != 0) + goto fail2; + + epp->ep_current_bist = type; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt uint32_t *value_maskp, + __out_ecount_opt(count) unsigned long *valuesp, + __in size_t count) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); + EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); + EFSYS_ASSERT3U(epp->ep_current_bist, ==, type); + + EFSYS_ASSERT(epop->epo_bist_poll != NULL); + if (epop->epo_bist_poll == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp, + valuesp, count)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN); + EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES); + EFSYS_ASSERT3U(epp->ep_current_bist, ==, type); + + EFSYS_ASSERT(epop->epo_bist_stop != NULL); + + if (epop->epo_bist_stop != NULL) + epop->epo_bist_stop(enp, type); + + epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN; +} + +#endif /* EFSYS_OPT_BIST */ + void +efx_phy_unprobe( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + epp->ep_epop = NULL; + + epp->ep_adv_cap_mask = 0; + + epp->ep_port = 0; + epp->ep_phy_type = 0; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h new file mode 100644 index 000000000..1ff97cf99 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_phy_ids.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2013-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_PHY_IDS_H +#define _SYS_EFX_PHY_IDS_H + +#define EFX_PHY_NULL 0 + +typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */ + EFX_PHY_TXC43128 = 1, + EFX_PHY_SFX7101 = 3, + EFX_PHY_QT2022C2 = 4, + EFX_PHY_PM8358 = 6, + EFX_PHY_SFT9001A = 8, + EFX_PHY_QT2025C = 9, + EFX_PHY_SFT9001B = 10, + EFX_PHY_QLX111V = 12, + EFX_PHY_QT2025_KR = 17, + EFX_PHY_AEL3020 = 18, + EFX_PHY_XFI_FARMI = 19, +} efx_phy_type_t; + + +#endif /* _SYS_EFX_PHY_IDS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c new file mode 100644 index 000000000..844603654 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_port.c @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + __checkReturn efx_rc_t +efx_port_init( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (enp->en_mod_flags & EFX_MOD_PORT) { + rc = EINVAL; + goto fail1; + } + + enp->en_mod_flags |= EFX_MOD_PORT; + + epp->ep_mac_type = EFX_MAC_INVALID; + epp->ep_link_mode = EFX_LINK_UNKNOWN; + epp->ep_mac_drain = B_TRUE; + + /* Configure the MAC */ + if ((rc = efx_mac_select(enp)) != 0) + goto fail1; + + epp->ep_emop->emo_reconfigure(enp); + + /* Pick up current phy capababilities */ + (void) efx_port_poll(enp, NULL); + + /* + * Turn on the PHY if available, otherwise reset it, and + * reconfigure it with the current configuration. + */ + if (epop->epo_power != NULL) { + if ((rc = epop->epo_power(enp, B_TRUE)) != 0) + goto fail2; + } else { + if ((rc = epop->epo_reset(enp)) != 0) + goto fail2; + } + + EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY); + enp->en_reset_flags &= ~EFX_RESET_PHY; + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_mod_flags &= ~EFX_MOD_PORT; + + return (rc); +} + + __checkReturn efx_rc_t +efx_port_poll( + __in efx_nic_t *enp, + __out_opt efx_link_mode_t *link_modep) +{ + efx_port_t *epp = &(enp->en_port); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_link_mode_t ignore_link_mode; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + EFSYS_ASSERT(emop != NULL); + + if (link_modep == NULL) + link_modep = &ignore_link_mode; + + if ((rc = emop->emo_poll(enp, link_modep)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_LOOPBACK + + __checkReturn efx_rc_t +efx_port_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t loopback_type) +{ + efx_port_t *epp = &(enp->en_port); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + const efx_mac_ops_t *emop = epp->ep_emop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + EFSYS_ASSERT(emop != NULL); + + EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); + + if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], + (int)loopback_type) == 0) { + rc = ENOTSUP; + goto fail1; + } + + if (epp->ep_loopback_type == loopback_type && + epp->ep_loopback_link_mode == link_mode) + return (0); + + if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_NAMES + +static const char * const __efx_loopback_type_name[] = { + "OFF", + "DATA", + "GMAC", + "XGMII", + "XGXS", + "XAUI", + "GMII", + "SGMII", + "XGBR", + "XFI", + "XAUI_FAR", + "GMII_FAR", + "SGMII_FAR", + "XFI_FAR", + "GPHY", + "PHY_XS", + "PCS", + "PMA_PMD", + "XPORT", + "XGMII_WS", + "XAUI_WS", + "XAUI_WS_FAR", + "XAUI_WS_NEAR", + "GMII_WS", + "XFI_WS", + "XFI_WS_FAR", + "PHYXS_WS", + "PMA_INT", + "SD_NEAR", + "SD_FAR", + "PMA_INT_WS", + "SD_FEP2_WS", + "SD_FEP1_5_WS", + "SD_FEP_WS", + "SD_FES_WS", + "AOE_INT_NEAR", + "DATA_WS", + "FORCE_EXT_LINK", +}; + + __checkReturn const char * +efx_loopback_type_name( + __in efx_nic_t *enp, + __in efx_loopback_type_t type) +{ + EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) == + EFX_LOOPBACK_NTYPES); + + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES); + + return (__efx_loopback_type_name[type]); +} + +#endif /* EFSYS_OPT_NAMES */ + +#endif /* EFSYS_OPT_LOOPBACK */ + + void +efx_port_fini( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); + + EFSYS_ASSERT(epp->ep_mac_drain); + + epp->ep_emop = NULL; + epp->ep_mac_type = EFX_MAC_INVALID; + epp->ep_mac_drain = B_FALSE; + + /* Turn off the PHY */ + if (epop->epo_power != NULL) + (void) epop->epo_power(enp, B_FALSE); + + enp->en_mod_flags &= ~EFX_MOD_PORT; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_proxy.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_proxy.c new file mode 100644 index 000000000..24baa5a3a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_proxy.c @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER + +#if EFSYS_OPT_SIENA +static const efx_proxy_ops_t __efx_proxy_dummy_ops = { + NULL, /* epo_init */ + NULL, /* epo_fini */ + NULL, /* epo_mc_config */ + NULL, /* epo_disable */ + NULL, /* epo_privilege_modify */ + NULL, /* epo_set_privilege_mask */ + NULL, /* epo_complete_request */ + NULL, /* epo_exec_cmd */ + NULL, /* epo_get_privilege_mask */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_proxy_ops_t __efx_proxy_ef10_ops = { + ef10_proxy_auth_init, /* epo_init */ + ef10_proxy_auth_fini, /* epo_fini */ + ef10_proxy_auth_mc_config, /* epo_mc_config */ + ef10_proxy_auth_disable, /* epo_disable */ + ef10_proxy_auth_privilege_modify, /* epo_privilege_modify */ + ef10_proxy_auth_set_privilege_mask, /* epo_set_privilege_mask */ + ef10_proxy_auth_complete_request, /* epo_complete_request */ + ef10_proxy_auth_exec_cmd, /* epo_exec_cmd */ + ef10_proxy_auth_get_privilege_mask, /* epo_get_privilege_mask */ +}; +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_proxy_auth_init( + __in efx_nic_t *enp) +{ + const efx_proxy_ops_t *epop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROXY)); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + epop = &__efx_proxy_dummy_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + epop = &__efx_proxy_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + epop = &__efx_proxy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + epop = &__efx_proxy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + if (epop->epo_init == NULL) { + rc = ENOTSUP; + goto fail2; + } + + if ((rc = epop->epo_init(enp)) != 0) + goto fail3; + + enp->en_epop = epop; + enp->en_mod_flags |= EFX_MOD_PROXY; + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_proxy_auth_fini( + __in efx_nic_t *enp) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if ((epop != NULL) && (epop->epo_fini != NULL)) + epop->epo_fini(enp); + + enp->en_epop = NULL; + enp->en_mod_flags &= ~EFX_MOD_PROXY; +} + + __checkReturn efx_rc_t +efx_proxy_auth_configure( + __in efx_nic_t *enp, + __in efx_proxy_auth_config_t *configp) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if ((configp == NULL) || + (configp->request_bufferp == NULL) || + (configp->response_bufferp == NULL) || + (configp->status_bufferp == NULL) || + (configp->op_listp == NULL) || + (configp->block_cnt == 0)) { + rc = EINVAL; + goto fail1; + } + + if ((epop->epo_mc_config == NULL) || + (epop->epo_privilege_modify == NULL)) { + rc = ENOTSUP; + goto fail2; + } + + rc = epop->epo_mc_config(enp, configp->request_bufferp, + configp->response_bufferp, configp->status_bufferp, + configp->block_cnt, configp->op_listp, + configp->op_count); + if (rc != 0) + goto fail3; + + rc = epop->epo_privilege_modify(enp, MC_CMD_PRIVILEGE_MODIFY_IN_ALL, + 0, 0, 0, configp->handled_privileges); + if (rc != 0) + goto fail4; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_destroy( + __in efx_nic_t *enp, + __in uint32_t handled_privileges) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if ((epop->epo_disable == NULL) || + (epop->epo_privilege_modify == NULL)) { + rc = ENOTSUP; + goto fail1; + } + + rc = epop->epo_privilege_modify(enp, MC_CMD_PRIVILEGE_MODIFY_IN_ALL, + 0, 0, handled_privileges, 0); + if (rc != 0) + goto fail2; + + rc = epop->epo_disable(enp); + if (rc != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_complete_request( + __in efx_nic_t *enp, + __in uint32_t fn_index, + __in uint32_t proxy_result, + __in uint32_t handle) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if (epop->epo_complete_request == NULL) { + rc = ENOTSUP; + goto fail1; + } + + rc = epop->epo_complete_request(enp, fn_index, proxy_result, handle); + if (rc != 0) + goto fail2; + + return (0); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_exec_cmd( + __in efx_nic_t *enp, + __inout efx_proxy_cmd_params_t *paramsp) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if (paramsp == NULL) { + rc = EINVAL; + goto fail1; + } + + if (epop->epo_exec_cmd == NULL) { + rc = ENOTSUP; + goto fail2; + } + + rc = epop->epo_exec_cmd(enp, paramsp); + if (rc != 0) + goto fail3; + + return (0); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_set_privilege_mask( + __in efx_nic_t *enp, + __in uint32_t vf_index, + __in uint32_t mask, + __in uint32_t value) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if (epop->epo_set_privilege_mask == NULL) { + rc = ENOTSUP; + goto fail1; + } + + rc = epop->epo_set_privilege_mask(enp, vf_index, mask, value); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_privilege_mask_get( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __out uint32_t *maskp) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if (epop->epo_get_privilege_mask == NULL) { + rc = ENOTSUP; + goto fail1; + } + + rc = epop->epo_get_privilege_mask(enp, pf_index, vf_index, maskp); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_proxy_auth_privilege_modify( + __in efx_nic_t *enp, + __in uint32_t pf_index, + __in uint32_t vf_index, + __in uint32_t add_privileges_mask, + __in uint32_t remove_privileges_mask) +{ + const efx_proxy_ops_t *epop = enp->en_epop; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROXY); + + if (epop->epo_privilege_modify == NULL) { + rc = ENOTSUP; + goto fail1; + } + + rc = epop->epo_privilege_modify(enp, MC_CMD_PRIVILEGE_MODIFY_IN_ONE, + pf_index, vf_index, add_privileges_mask, + remove_privileges_mask); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + +#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h new file mode 100644 index 000000000..53115fa70 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs.h @@ -0,0 +1,3846 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_REGS_H +#define _SYS_EFX_REGS_H + + +#ifdef __cplusplus +extern "C" { +#endif + + +/************************************************************************** + * + * Falcon/Siena registers and descriptors + * + ************************************************************************** + */ + +/* + * FR_AB_EE_VPD_CFG0_REG_SF(128bit): + * SPI/VPD configuration register 0 + */ +#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_EE_VPD_CFG0_REG(128bit): + * SPI/VPD configuration register 0 + */ +#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 +#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 +#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 +#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 +#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 +#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 +#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 +#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 +#define FRF_AB_EE_VPDW_LENGTH_LBN 80 +#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPDW_BASE_LBN 64 +#define FRF_AB_EE_VPDW_BASE_WIDTH 15 +#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 +#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 +#define FRF_AB_EE_VPD_BASE_LBN 32 +#define FRF_AB_EE_VPD_BASE_WIDTH 24 +#define FRF_AB_EE_VPD_LENGTH_LBN 16 +#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 +#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 +#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 +#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 +#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 +#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 +#define FRF_AB_EE_VPD_EN_LBN 0 +#define FRF_AB_EE_VPD_EN_WIDTH 1 + + +/* + * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit): + * PCIE SerDes control register 0 to 3 + */ +#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_PCIE_SD_CTL0123_REG(128bit): + * PCIE SerDes control register 0 to 3 + */ +#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_PCIE_TESTSIG_H_LBN 96 +#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 +#define FRF_AB_PCIE_TESTSIG_L_LBN 64 +#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 +#define FRF_AB_PCIE_OFFSET_LBN 56 +#define FRF_AB_PCIE_OFFSET_WIDTH 8 +#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 +#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 +#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 +#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_H_LBN 53 +#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_L_LBN 52 +#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_H_LBN 51 +#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_L_LBN 50 +#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 +#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 +#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 +#define FRF_AB_PCIE_LPBK_LBN 40 +#define FRF_AB_PCIE_LPBK_WIDTH 8 +#define FRF_AB_PCIE_PARLPBK_LBN 32 +#define FRF_AB_PCIE_PARLPBK_WIDTH 8 +#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 +#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 +#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 +#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 +#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 +#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 +#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 +#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 +#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 +#define FFE_AB_PCIE_RXEQCTL_OFF 2 +#define FFE_AB_PCIE_RXEQCTL_MIN 1 +#define FFE_AB_PCIE_RXEQCTL_MAX 0 +#define FRF_AB_PCIE_HIDRV_LBN 8 +#define FRF_AB_PCIE_HIDRV_WIDTH 8 +#define FRF_AB_PCIE_LODRV_LBN 0 +#define FRF_AB_PCIE_LODRV_WIDTH 8 + + +/* + * FR_AB_PCIE_SD_CTL45_REG_SF(128bit): + * PCIE SerDes control register 4 and 5 + */ +#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_PCIE_SD_CTL45_REG(128bit): + * PCIE SerDes control register 4 and 5 + */ +#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_PCIE_DTX7_LBN 60 +#define FRF_AB_PCIE_DTX7_WIDTH 4 +#define FRF_AB_PCIE_DTX6_LBN 56 +#define FRF_AB_PCIE_DTX6_WIDTH 4 +#define FRF_AB_PCIE_DTX5_LBN 52 +#define FRF_AB_PCIE_DTX5_WIDTH 4 +#define FRF_AB_PCIE_DTX4_LBN 48 +#define FRF_AB_PCIE_DTX4_WIDTH 4 +#define FRF_AB_PCIE_DTX3_LBN 44 +#define FRF_AB_PCIE_DTX3_WIDTH 4 +#define FRF_AB_PCIE_DTX2_LBN 40 +#define FRF_AB_PCIE_DTX2_WIDTH 4 +#define FRF_AB_PCIE_DTX1_LBN 36 +#define FRF_AB_PCIE_DTX1_WIDTH 4 +#define FRF_AB_PCIE_DTX0_LBN 32 +#define FRF_AB_PCIE_DTX0_WIDTH 4 +#define FRF_AB_PCIE_DEQ7_LBN 28 +#define FRF_AB_PCIE_DEQ7_WIDTH 4 +#define FRF_AB_PCIE_DEQ6_LBN 24 +#define FRF_AB_PCIE_DEQ6_WIDTH 4 +#define FRF_AB_PCIE_DEQ5_LBN 20 +#define FRF_AB_PCIE_DEQ5_WIDTH 4 +#define FRF_AB_PCIE_DEQ4_LBN 16 +#define FRF_AB_PCIE_DEQ4_WIDTH 4 +#define FRF_AB_PCIE_DEQ3_LBN 12 +#define FRF_AB_PCIE_DEQ3_WIDTH 4 +#define FRF_AB_PCIE_DEQ2_LBN 8 +#define FRF_AB_PCIE_DEQ2_WIDTH 4 +#define FRF_AB_PCIE_DEQ1_LBN 4 +#define FRF_AB_PCIE_DEQ1_WIDTH 4 +#define FRF_AB_PCIE_DEQ0_LBN 0 +#define FRF_AB_PCIE_DEQ0_WIDTH 4 + + +/* + * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit): + * PCIE PCS control and status register + */ +#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit): + * PCIE PCS control and status register + */ +#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 +#define FRF_AB_PCIE_PRBSERR_LBN 40 +#define FRF_AB_PCIE_PRBSERR_WIDTH 8 +#define FRF_AB_PCIE_PRBSERRH0_LBN 32 +#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 +#define FRF_AB_PCIE_FASTINIT_H_LBN 15 +#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 +#define FRF_AB_PCIE_FASTINIT_L_LBN 14 +#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 +#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 +#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 +#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 +#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 +#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 +#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSEL_LBN 0 +#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 + + +/* + * FR_AB_HW_INIT_REG_SF(128bit): + * Hardware initialization register + */ +#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AZ_HW_INIT_REG(128bit): + * Hardware initialization register + */ +#define FR_AZ_HW_INIT_REG_OFST 0x000000c0 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 +#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 +#define FRF_CZ_TX_MRG_TAGS_LBN 120 +#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 +#define FRF_AZ_TRGT_MASK_ALL_LBN 100 +#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1 +#define FRF_AZ_DOORBELL_DROP_LBN 92 +#define FRF_AZ_DOORBELL_DROP_WIDTH 8 +#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 +#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 +#define FRF_AB_PE_EIDLE_DIS_LBN 75 +#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 +#define FRF_AZ_FC_BLOCKING_EN_LBN 45 +#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1 +#define FRF_AZ_B2B_REQ_EN_LBN 44 +#define FRF_AZ_B2B_REQ_EN_WIDTH 1 +#define FRF_AZ_POST_WR_MASK_LBN 40 +#define FRF_AZ_POST_WR_MASK_WIDTH 4 +#define FRF_AZ_TLP_TC_LBN 34 +#define FRF_AZ_TLP_TC_WIDTH 3 +#define FRF_AZ_TLP_ATTR_LBN 32 +#define FRF_AZ_TLP_ATTR_WIDTH 2 +#define FRF_AB_INTB_VEC_LBN 24 +#define FRF_AB_INTB_VEC_WIDTH 5 +#define FRF_AB_INTA_VEC_LBN 16 +#define FRF_AB_INTA_VEC_WIDTH 5 +#define FRF_AZ_WD_TIMER_LBN 8 +#define FRF_AZ_WD_TIMER_WIDTH 8 +#define FRF_AZ_US_DISABLE_LBN 5 +#define FRF_AZ_US_DISABLE_WIDTH 1 +#define FRF_AZ_TLP_EP_LBN 4 +#define FRF_AZ_TLP_EP_WIDTH 1 +#define FRF_AZ_ATTR_SEL_LBN 3 +#define FRF_AZ_ATTR_SEL_WIDTH 1 +#define FRF_AZ_TD_SEL_LBN 1 +#define FRF_AZ_TD_SEL_WIDTH 1 +#define FRF_AZ_TLP_TD_LBN 0 +#define FRF_AZ_TLP_TD_WIDTH 1 + + +/* + * FR_AB_NIC_STAT_REG_SF(128bit): + * NIC status register + */ +#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_NIC_STAT_REG(128bit): + * NIC status register + */ +#define FR_AB_NIC_STAT_REG_OFST 0x00000200 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_BB_AER_DIS_LBN 34 +#define FRF_BB_AER_DIS_WIDTH 1 +#define FRF_BB_EE_STRAP_EN_LBN 31 +#define FRF_BB_EE_STRAP_EN_WIDTH 1 +#define FRF_BB_EE_STRAP_LBN 24 +#define FRF_BB_EE_STRAP_WIDTH 4 +#define FRF_BB_REVISION_ID_LBN 17 +#define FRF_BB_REVISION_ID_WIDTH 7 +#define FRF_AB_ONCHIP_SRAM_LBN 16 +#define FRF_AB_ONCHIP_SRAM_WIDTH 1 +#define FRF_AB_SF_PRST_LBN 9 +#define FRF_AB_SF_PRST_WIDTH 1 +#define FRF_AB_EE_PRST_LBN 8 +#define FRF_AB_EE_PRST_WIDTH 1 +#define FRF_AB_ATE_MODE_LBN 3 +#define FRF_AB_ATE_MODE_WIDTH 1 +#define FRF_AB_STRAP_PINS_LBN 0 +#define FRF_AB_STRAP_PINS_WIDTH 3 + + +/* + * FR_AB_GLB_CTL_REG_SF(128bit): + * Global control register + */ +#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AB_GLB_CTL_REG(128bit): + * Global control register + */ +#define FR_AB_GLB_CTL_REG_OFST 0x00000220 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 +#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 +#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 +#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 +#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 +#define FRF_AA_PCIX_RST_CTL_LBN 60 +#define FRF_AA_PCIX_RST_CTL_WIDTH 1 +#define FRF_BB_BIU_RST_CTL_LBN 60 +#define FRF_BB_BIU_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 +#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 +#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 +#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 +#define FRF_AB_XGRX_RST_CTL_LBN 56 +#define FRF_AB_XGRX_RST_CTL_WIDTH 1 +#define FRF_AB_XGTX_RST_CTL_LBN 55 +#define FRF_AB_XGTX_RST_CTL_WIDTH 1 +#define FRF_AB_EM_RST_CTL_LBN 54 +#define FRF_AB_EM_RST_CTL_WIDTH 1 +#define FRF_AB_EV_RST_CTL_LBN 53 +#define FRF_AB_EV_RST_CTL_WIDTH 1 +#define FRF_AB_SR_RST_CTL_LBN 52 +#define FRF_AB_SR_RST_CTL_WIDTH 1 +#define FRF_AB_RX_RST_CTL_LBN 51 +#define FRF_AB_RX_RST_CTL_WIDTH 1 +#define FRF_AB_TX_RST_CTL_LBN 50 +#define FRF_AB_TX_RST_CTL_WIDTH 1 +#define FRF_AB_EE_RST_CTL_LBN 49 +#define FRF_AB_EE_RST_CTL_WIDTH 1 +#define FRF_AB_CS_RST_CTL_LBN 48 +#define FRF_AB_CS_RST_CTL_WIDTH 1 +#define FRF_AB_HOT_RST_CTL_LBN 40 +#define FRF_AB_HOT_RST_CTL_WIDTH 2 +#define FRF_AB_RST_EXT_PHY_LBN 31 +#define FRF_AB_RST_EXT_PHY_WIDTH 1 +#define FRF_AB_RST_XAUI_SD_LBN 30 +#define FRF_AB_RST_XAUI_SD_WIDTH 1 +#define FRF_AB_RST_PCIE_SD_LBN 29 +#define FRF_AB_RST_PCIE_SD_WIDTH 1 +#define FRF_AA_RST_PCIX_LBN 28 +#define FRF_AA_RST_PCIX_WIDTH 1 +#define FRF_BB_RST_BIU_LBN 28 +#define FRF_BB_RST_BIU_WIDTH 1 +#define FRF_AB_RST_PCIE_STKY_LBN 27 +#define FRF_AB_RST_PCIE_STKY_WIDTH 1 +#define FRF_AB_RST_PCIE_NSTKY_LBN 26 +#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 +#define FRF_AB_RST_PCIE_CORE_LBN 25 +#define FRF_AB_RST_PCIE_CORE_WIDTH 1 +#define FRF_AB_RST_XGRX_LBN 24 +#define FRF_AB_RST_XGRX_WIDTH 1 +#define FRF_AB_RST_XGTX_LBN 23 +#define FRF_AB_RST_XGTX_WIDTH 1 +#define FRF_AB_RST_EM_LBN 22 +#define FRF_AB_RST_EM_WIDTH 1 +#define FRF_AB_RST_EV_LBN 21 +#define FRF_AB_RST_EV_WIDTH 1 +#define FRF_AB_RST_SR_LBN 20 +#define FRF_AB_RST_SR_WIDTH 1 +#define FRF_AB_RST_RX_LBN 19 +#define FRF_AB_RST_RX_WIDTH 1 +#define FRF_AB_RST_TX_LBN 18 +#define FRF_AB_RST_TX_WIDTH 1 +#define FRF_AB_RST_SF_LBN 17 +#define FRF_AB_RST_SF_WIDTH 1 +#define FRF_AB_RST_CS_LBN 16 +#define FRF_AB_RST_CS_WIDTH 1 +#define FRF_AB_INT_RST_DUR_LBN 4 +#define FRF_AB_INT_RST_DUR_WIDTH 3 +#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 +#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 +#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 +#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 +#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 +#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 +#define FFE_AB_EXT_PHY_RST_DUR_640US 3 +#define FFE_AB_EXT_PHY_RST_DUR_320US 2 +#define FFE_AB_EXT_PHY_RST_DUR_160US 1 +#define FFE_AB_EXT_PHY_RST_DUR_80US 0 +#define FRF_AB_SWRST_LBN 0 +#define FRF_AB_SWRST_WIDTH 1 + + +/* + * FR_AZ_IOM_IND_ADR_REG(32bit): + * IO-mapped indirect access address register + */ +#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000 +/* falcona0,falconb0,sienaa0=net_func_bar0 */ + +#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24 +#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1 +#define FRF_AZ_IOM_IND_ADR_LBN 0 +#define FRF_AZ_IOM_IND_ADR_WIDTH 24 + + +/* + * FR_AZ_IOM_IND_DAT_REG(32bit): + * IO-mapped indirect access data register + */ +#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004 +/* falcona0,falconb0,sienaa0=net_func_bar0 */ + +#define FRF_AZ_IOM_IND_DAT_LBN 0 +#define FRF_AZ_IOM_IND_DAT_WIDTH 32 + + +/* + * FR_AZ_ADR_REGION_REG(128bit): + * Address region register + */ +#define FR_AZ_ADR_REGION_REG_OFST 0x00000000 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_ADR_REGION3_LBN 96 +#define FRF_AZ_ADR_REGION3_WIDTH 18 +#define FRF_AZ_ADR_REGION2_LBN 64 +#define FRF_AZ_ADR_REGION2_WIDTH 18 +#define FRF_AZ_ADR_REGION1_LBN 32 +#define FRF_AZ_ADR_REGION1_WIDTH 18 +#define FRF_AZ_ADR_REGION0_LBN 0 +#define FRF_AZ_ADR_REGION0_WIDTH 18 + + +/* + * FR_AZ_INT_EN_REG_KER(128bit): + * Kernel driver Interrupt enable register + */ +#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010 +/* falcona0,falconb0,sienaa0=net_func_bar2 */ + +#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_KER_INT_CHAR_LBN 4 +#define FRF_AZ_KER_INT_CHAR_WIDTH 1 +#define FRF_AZ_KER_INT_KER_LBN 3 +#define FRF_AZ_KER_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_KER_LBN 0 +#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 + + +/* + * FR_AZ_INT_EN_REG_CHAR(128bit): + * Char Driver interrupt enable register + */ +#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_CHAR_INT_CHAR_LBN 4 +#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1 +#define FRF_AZ_CHAR_INT_KER_LBN 3 +#define FRF_AZ_CHAR_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0 +#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1 + + +/* + * FR_AZ_INT_ADR_REG_KER(128bit): + * Interrupt host address for Kernel driver + */ +#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030 +/* falcona0,falconb0,sienaa0=net_func_bar2 */ + +#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 +#define FRF_AZ_INT_ADR_KER_LBN 0 +#define FRF_AZ_INT_ADR_KER_WIDTH 64 +#define FRF_AZ_INT_ADR_KER_DW0_LBN 0 +#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32 +#define FRF_AZ_INT_ADR_KER_DW1_LBN 32 +#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32 + + +/* + * FR_AZ_INT_ADR_REG_CHAR(128bit): + * Interrupt host address for Char driver + */ +#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 +#define FRF_AZ_INT_ADR_CHAR_LBN 0 +#define FRF_AZ_INT_ADR_CHAR_WIDTH 64 +#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0 +#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32 +#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32 +#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32 + + +/* + * FR_AA_INT_ACK_KER(32bit): + * Kernel interrupt acknowledge register + */ +#define FR_AA_INT_ACK_KER_OFST 0x00000050 +/* falcona0=net_func_bar2 */ + +#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 +#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 + + +/* + * FR_BZ_INT_ISR0_REG(128bit): + * Function 0 Interrupt Acknowlege Status register + */ +#define FR_BZ_INT_ISR0_REG_OFST 0x00000090 +/* falconb0,sienaa0=net_func_bar2 */ + +#define FRF_BZ_INT_ISR_REG_LBN 0 +#define FRF_BZ_INT_ISR_REG_WIDTH 64 +#define FRF_BZ_INT_ISR_REG_DW0_LBN 0 +#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32 +#define FRF_BZ_INT_ISR_REG_DW1_LBN 32 +#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32 + + +/* + * FR_AB_EE_SPI_HCMD_REG(128bit): + * SPI host command register + */ +#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 +#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 +#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 +#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 +#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 +#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 +#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 + + +/* + * FR_CZ_USR_EV_CFG(32bit): + * User Level Event Configuration register + */ +#define FR_CZ_USR_EV_CFG_OFST 0x00000100 +/* sienaa0=net_func_bar2 */ + +#define FRF_CZ_USREV_DIS_LBN 16 +#define FRF_CZ_USREV_DIS_WIDTH 1 +#define FRF_CZ_DFLT_EVQ_LBN 0 +#define FRF_CZ_DFLT_EVQ_WIDTH 10 + + +/* + * FR_AB_EE_SPI_HADR_REG(128bit): + * SPI host address register + */ +#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 +#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 +#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 +#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 + + +/* + * FR_AB_EE_SPI_HDATA_REG(128bit): + * SPI host data register + */ +#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_SPI_HDATA3_LBN 96 +#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA2_LBN 64 +#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA1_LBN 32 +#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA0_LBN 0 +#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 + + +/* + * FR_AB_EE_BASE_PAGE_REG(128bit): + * Expansion ROM base mirror register + */ +#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_EXPROM_MASK_LBN 16 +#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + + +/* + * FR_AB_EE_VPD_SW_CNTL_REG(128bit): + * VPD access SW control register + */ +#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 +#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 +#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 +#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 + + +/* + * FR_AB_EE_VPD_SW_DATA_REG(128bit): + * VPD access SW data register + */ +#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 +#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 + + +/* + * FR_BB_PCIE_CORE_INDIRECT_REG(64bit): + * Indirect Access to PCIE Core registers + */ +#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0 +/* falconb0=net_func_bar2 */ + +#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 +#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 + + +/* + * FR_AB_GPIO_CTL_REG(128bit): + * GPIO control register + */ +#define FR_AB_GPIO_CTL_REG_OFST 0x00000210 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GPIO15_OEN_LBN 63 +#define FRF_AB_GPIO15_OEN_WIDTH 1 +#define FRF_AB_GPIO14_OEN_LBN 62 +#define FRF_AB_GPIO14_OEN_WIDTH 1 +#define FRF_AB_GPIO13_OEN_LBN 61 +#define FRF_AB_GPIO13_OEN_WIDTH 1 +#define FRF_AB_GPIO12_OEN_LBN 60 +#define FRF_AB_GPIO12_OEN_WIDTH 1 +#define FRF_AB_GPIO11_OEN_LBN 59 +#define FRF_AB_GPIO11_OEN_WIDTH 1 +#define FRF_AB_GPIO10_OEN_LBN 58 +#define FRF_AB_GPIO10_OEN_WIDTH 1 +#define FRF_AB_GPIO9_OEN_LBN 57 +#define FRF_AB_GPIO9_OEN_WIDTH 1 +#define FRF_AB_GPIO8_OEN_LBN 56 +#define FRF_AB_GPIO8_OEN_WIDTH 1 +#define FRF_AB_GPIO15_OUT_LBN 55 +#define FRF_AB_GPIO15_OUT_WIDTH 1 +#define FRF_AB_GPIO14_OUT_LBN 54 +#define FRF_AB_GPIO14_OUT_WIDTH 1 +#define FRF_AB_GPIO13_OUT_LBN 53 +#define FRF_AB_GPIO13_OUT_WIDTH 1 +#define FRF_AB_GPIO12_OUT_LBN 52 +#define FRF_AB_GPIO12_OUT_WIDTH 1 +#define FRF_AB_GPIO11_OUT_LBN 51 +#define FRF_AB_GPIO11_OUT_WIDTH 1 +#define FRF_AB_GPIO10_OUT_LBN 50 +#define FRF_AB_GPIO10_OUT_WIDTH 1 +#define FRF_AB_GPIO9_OUT_LBN 49 +#define FRF_AB_GPIO9_OUT_WIDTH 1 +#define FRF_AB_GPIO8_OUT_LBN 48 +#define FRF_AB_GPIO8_OUT_WIDTH 1 +#define FRF_AB_GPIO15_IN_LBN 47 +#define FRF_AB_GPIO15_IN_WIDTH 1 +#define FRF_AB_GPIO14_IN_LBN 46 +#define FRF_AB_GPIO14_IN_WIDTH 1 +#define FRF_AB_GPIO13_IN_LBN 45 +#define FRF_AB_GPIO13_IN_WIDTH 1 +#define FRF_AB_GPIO12_IN_LBN 44 +#define FRF_AB_GPIO12_IN_WIDTH 1 +#define FRF_AB_GPIO11_IN_LBN 43 +#define FRF_AB_GPIO11_IN_WIDTH 1 +#define FRF_AB_GPIO10_IN_LBN 42 +#define FRF_AB_GPIO10_IN_WIDTH 1 +#define FRF_AB_GPIO9_IN_LBN 41 +#define FRF_AB_GPIO9_IN_WIDTH 1 +#define FRF_AB_GPIO8_IN_LBN 40 +#define FRF_AB_GPIO8_IN_WIDTH 1 +#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 +#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 +#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 +#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 +#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 +#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 +#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 +#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 +#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 +#define FRF_BB_CLK156_OUT_EN_LBN 31 +#define FRF_BB_CLK156_OUT_EN_WIDTH 1 +#define FRF_BB_USE_NIC_CLK_LBN 30 +#define FRF_BB_USE_NIC_CLK_WIDTH 1 +#define FRF_AB_GPIO5_OEN_LBN 29 +#define FRF_AB_GPIO5_OEN_WIDTH 1 +#define FRF_AB_GPIO4_OEN_LBN 28 +#define FRF_AB_GPIO4_OEN_WIDTH 1 +#define FRF_AB_GPIO3_OEN_LBN 27 +#define FRF_AB_GPIO3_OEN_WIDTH 1 +#define FRF_AB_GPIO2_OEN_LBN 26 +#define FRF_AB_GPIO2_OEN_WIDTH 1 +#define FRF_AB_GPIO1_OEN_LBN 25 +#define FRF_AB_GPIO1_OEN_WIDTH 1 +#define FRF_AB_GPIO0_OEN_LBN 24 +#define FRF_AB_GPIO0_OEN_WIDTH 1 +#define FRF_AB_GPIO5_OUT_LBN 21 +#define FRF_AB_GPIO5_OUT_WIDTH 1 +#define FRF_AB_GPIO4_OUT_LBN 20 +#define FRF_AB_GPIO4_OUT_WIDTH 1 +#define FRF_AB_GPIO3_OUT_LBN 19 +#define FRF_AB_GPIO3_OUT_WIDTH 1 +#define FRF_AB_GPIO2_OUT_LBN 18 +#define FRF_AB_GPIO2_OUT_WIDTH 1 +#define FRF_AB_GPIO1_OUT_LBN 17 +#define FRF_AB_GPIO1_OUT_WIDTH 1 +#define FRF_AB_GPIO0_OUT_LBN 16 +#define FRF_AB_GPIO0_OUT_WIDTH 1 +#define FRF_AB_GPIO5_IN_LBN 13 +#define FRF_AB_GPIO5_IN_WIDTH 1 +#define FRF_AB_GPIO4_IN_LBN 12 +#define FRF_AB_GPIO4_IN_WIDTH 1 +#define FRF_AB_GPIO3_IN_LBN 11 +#define FRF_AB_GPIO3_IN_WIDTH 1 +#define FRF_AB_GPIO2_IN_LBN 10 +#define FRF_AB_GPIO2_IN_WIDTH 1 +#define FRF_AB_GPIO1_IN_LBN 9 +#define FRF_AB_GPIO1_IN_WIDTH 1 +#define FRF_AB_GPIO0_IN_LBN 8 +#define FRF_AB_GPIO0_IN_WIDTH 1 +#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 +#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 +#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 +#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 +#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 +#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 + + +/* + * FR_AZ_FATAL_INTR_REG_KER(128bit): + * Fatal interrupt register for Kernel + */ +#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230 +/* falcona0,falconb0,sienaa0=net_func_bar2 */ + +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 +#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 +#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 +#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 +#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 + + +/* + * FR_AZ_FATAL_INTR_REG_CHAR(128bit): + * Fatal interrupt register for Char + */ +#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 +#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8 +#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1 +#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0 +#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1 + + +/* + * FR_AZ_DP_CTRL_REG(128bit): + * Datapath control register + */ +#define FR_AZ_DP_CTRL_REG_OFST 0x00000250 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_FLS_EVQ_ID_LBN 0 +#define FRF_AZ_FLS_EVQ_ID_WIDTH 12 + + +/* + * FR_AZ_MEM_STAT_REG(128bit): + * Memory status register + */ +#define FR_AZ_MEM_STAT_REG_OFST 0x00000260 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MEM_PERR_VEC_LBN 53 +#define FRF_AB_MEM_PERR_VEC_WIDTH 40 +#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53 +#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32 +#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85 +#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6 +#define FRF_AB_MBIST_CORR_LBN 38 +#define FRF_AB_MBIST_CORR_WIDTH 15 +#define FRF_AB_MBIST_ERR_LBN 0 +#define FRF_AB_MBIST_ERR_WIDTH 40 +#define FRF_AB_MBIST_ERR_DW0_LBN 0 +#define FRF_AB_MBIST_ERR_DW0_WIDTH 32 +#define FRF_AB_MBIST_ERR_DW1_LBN 32 +#define FRF_AB_MBIST_ERR_DW1_WIDTH 6 +#define FRF_CZ_MEM_PERR_VEC_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 +#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32 +#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32 +#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3 + + +/* + * FR_PORT0_CS_DEBUG_REG(128bit): + * Debug register + */ + +#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 +#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 +#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 +#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 +#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 +#define FRF_CZ_CS_PORT_NUM_LBN 40 +#define FRF_CZ_CS_PORT_NUM_WIDTH 2 +#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 +#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_RESERVED_LBN 36 +#define FRF_CZ_CS_RESERVED_WIDTH 4 +#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 +#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1 +#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32 +#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33 +#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3 +#define FRF_CZ_CS_PORT_FPE_LBN 1 +#define FRF_CZ_CS_PORT_FPE_WIDTH 35 +#define FRF_AB_EM_DEBUG_ADDR_LBN 26 +#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SR_DEBUG_ADDR_LBN 21 +#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_EV_DEBUG_ADDR_LBN 16 +#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_RX_DEBUG_ADDR_LBN 11 +#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_TX_DEBUG_ADDR_LBN 6 +#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 +#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 +#define FRF_AZ_CS_DEBUG_EN_LBN 0 +#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 + + +/* + * FR_AZ_DRIVER_REG(128bit): + * Driver scratch register [0-7] + */ +#define FR_AZ_DRIVER_REG_OFST 0x00000280 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_DRIVER_REG_STEP 16 +#define FR_AZ_DRIVER_REG_ROWS 8 + +#define FRF_AZ_DRIVER_DW0_LBN 0 +#define FRF_AZ_DRIVER_DW0_WIDTH 32 + + +/* + * FR_AZ_ALTERA_BUILD_REG(128bit): + * Altera build register + */ +#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 +#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 + + +/* + * FR_AZ_CSR_SPARE_REG(128bit): + * Spare register + */ +#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72 +#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2 +#define FRF_AZ_MEM_PERR_EN_LBN 64 +#define FRF_AZ_MEM_PERR_EN_WIDTH 38 +#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64 +#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32 +#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96 +#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6 +#define FRF_AZ_CSR_SPARE_BITS_LBN 0 +#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 + + +/* + * FR_BZ_DEBUG_DATA_OUT_REG(128bit): + * Live Debug and Debug 2 out ports + */ +#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350 +/* falconb0,sienaa0=net_func_bar2 */ + +#define FRF_BZ_DEBUG2_PORT_LBN 25 +#define FRF_BZ_DEBUG2_PORT_WIDTH 15 +#define FRF_BZ_DEBUG1_PORT_LBN 0 +#define FRF_BZ_DEBUG1_PORT_WIDTH 25 + + +/* + * FR_BZ_EVQ_RPTR_REGP0(32bit): + * Event queue read pointer register + */ +#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400 +/* falconb0,sienaa0=net_func_bar2 */ +#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192 +#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024 +/* + * FR_AA_EVQ_RPTR_REG_KER(32bit): + * Event queue read pointer register + */ +#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00 +/* falcona0=net_func_bar2 */ +#define FR_AA_EVQ_RPTR_REG_KER_STEP 4 +#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4 +/* + * FR_AZ_EVQ_RPTR_REG(32bit): + * Event queue read pointer register + */ +#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000 +/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_EVQ_RPTR_REG_STEP 16 +#define FR_AB_EVQ_RPTR_REG_ROWS 4096 +#define FR_CZ_EVQ_RPTR_REG_ROWS 1024 +/* + * FR_BB_EVQ_RPTR_REGP123(32bit): + * Event queue read pointer register + */ +#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400 +/* falconb0=net_func_bar2 */ +#define FR_BB_EVQ_RPTR_REGP123_STEP 8192 +#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072 + +#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 +#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 +#define FRF_AZ_EVQ_RPTR_LBN 0 +#define FRF_AZ_EVQ_RPTR_WIDTH 15 + + +/* + * FR_BZ_TIMER_COMMAND_REGP0(128bit): + * Timer Command Registers + */ +#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420 +/* falconb0,sienaa0=net_func_bar2 */ +#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192 +#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024 +/* + * FR_AA_TIMER_COMMAND_REG_KER(128bit): + * Timer Command Registers + */ +#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420 +/* falcona0=net_func_bar2 */ +#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192 +#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4 +/* + * FR_AB_TIMER_COMMAND_REGP123(128bit): + * Timer Command Registers + */ +#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192 +#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072 +/* + * FR_AA_TIMER_COMMAND_REGP0(128bit): + * Timer Command Registers + */ +#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420 +/* falcona0=char_func_bar0 */ +#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192 +#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020 + +#define FRF_CZ_TC_TIMER_MODE_LBN 14 +#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 +#define FRF_AB_TC_TIMER_MODE_LBN 12 +#define FRF_AB_TC_TIMER_MODE_WIDTH 2 +#define FRF_CZ_TC_TIMER_VAL_LBN 0 +#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 +#define FRF_AB_TC_TIMER_VAL_LBN 0 +#define FRF_AB_TC_TIMER_VAL_WIDTH 12 + + +/* + * FR_AZ_DRV_EV_REG(128bit): + * Driver generated event register + */ +#define FR_AZ_DRV_EV_REG_OFST 0x00000440 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_DRV_EV_QID_LBN 64 +#define FRF_AZ_DRV_EV_QID_WIDTH 12 +#define FRF_AZ_DRV_EV_DATA_LBN 0 +#define FRF_AZ_DRV_EV_DATA_WIDTH 64 +#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0 +#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32 +#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32 +#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32 + + +/* + * FR_AZ_EVQ_CTL_REG(128bit): + * Event queue control register + */ +#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 +#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 +#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 +#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 +#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 + + +/* + * FR_AZ_EVQ_CNT1_REG(128bit): + * Event counter 1 register + */ +#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 +#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 +#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 +#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 +#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 + + +/* + * FR_AZ_EVQ_CNT2_REG(128bit): + * Event counter 2 register + */ +#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 +#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 +#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RDY_CNT_LBN 80 +#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 +#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 + + +/* + * FR_CZ_USR_EV_REG(32bit): + * Event mailbox register + */ +#define FR_CZ_USR_EV_REG_OFST 0x00000540 +/* sienaa0=net_func_bar2 */ +#define FR_CZ_USR_EV_REG_STEP 8192 +#define FR_CZ_USR_EV_REG_ROWS 1024 + +#define FRF_CZ_USR_EV_DATA_LBN 0 +#define FRF_CZ_USR_EV_DATA_WIDTH 32 + + +/* + * FR_AZ_BUF_TBL_CFG_REG(128bit): + * Buffer table configuration register + */ +#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_BUF_TBL_MODE_LBN 3 +#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 + + +/* + * FR_AZ_SRM_RX_DC_CFG_REG(128bit): + * SRAM receive descriptor cache configuration register + */ +#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 +#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 + + +/* + * FR_AZ_SRM_TX_DC_CFG_REG(128bit): + * SRAM transmit descriptor cache configuration register + */ +#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 + + +/* + * FR_AZ_SRM_CFG_REG(128bit): + * SRAM configuration register + */ +#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380 +/* falcona0,falconb0=eeprom_flash */ +/* + * FR_AZ_SRM_CFG_REG(128bit): + * SRAM configuration register + */ +#define FR_AZ_SRM_CFG_REG_OFST 0x00000630 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 +#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 +#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 +#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 +#define FRF_AZ_SRM_INIT_EN_LBN 3 +#define FRF_AZ_SRM_INIT_EN_WIDTH 1 +#define FRF_AZ_SRM_NUM_BANK_LBN 2 +#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 +#define FRF_AZ_SRM_BANK_SIZE_LBN 0 +#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 + + +/* + * FR_AZ_BUF_TBL_UPD_REG(128bit): + * Buffer table update register + */ +#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_BUF_UPD_CMD_LBN 63 +#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_CMD_LBN 62 +#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_END_ID_LBN 32 +#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 +#define FRF_AZ_BUF_CLR_START_ID_LBN 0 +#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 + + +/* + * FR_AZ_SRM_UPD_EVQ_REG(128bit): + * Buffer table update register + */ +#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 +#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 + + +/* + * FR_AZ_SRAM_PARITY_REG(128bit): + * SRAM parity register. + */ +#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_BYPASS_ECC_LBN 3 +#define FRF_CZ_BYPASS_ECC_WIDTH 1 +#define FRF_CZ_SEC_INT_LBN 2 +#define FRF_CZ_SEC_INT_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 +#define FRF_AB_FORCE_SRAM_PERR_LBN 0 +#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 + + +/* + * FR_AZ_RX_CFG_REG(128bit): + * Receive configuration register + */ +#define FR_AZ_RX_CFG_REG_OFST 0x00000800 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 +#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 +#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 +#define FRF_BZ_RX_TCP_SUP_LBN 48 +#define FRF_BZ_RX_TCP_SUP_WIDTH 1 +#define FRF_BZ_RX_INGR_EN_LBN 47 +#define FRF_BZ_RX_INGR_EN_WIDTH 1 +#define FRF_BZ_RX_IP_HASH_LBN 46 +#define FRF_BZ_RX_IP_HASH_WIDTH 1 +#define FRF_BZ_RX_HASH_ALG_LBN 45 +#define FRF_BZ_RX_HASH_ALG_WIDTH 1 +#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 +#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 +#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 +#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 +#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 +#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_OWNERR_CTL_LBN 38 +#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 +#define FRF_BZ_RX_XON_TX_TH_LBN 33 +#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 +#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 +#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 +#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 +#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 +#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_OWNERR_CTL_LBN 30 +#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_RX_XON_TX_TH_LBN 25 +#define FRF_AA_RX_XON_TX_TH_WIDTH 5 +#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 +#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_AA_RX_XOFF_TX_TH_LBN 20 +#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 +#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_BZ_RX_XON_MAC_TH_LBN 10 +#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XON_MAC_TH_LBN 6 +#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 +#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 +#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 +#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 +#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 +#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 + + +/* + * FR_AZ_RX_FILTER_CTL_REG(128bit): + * Receive filter control registers + */ +#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 +#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32 +#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 +#define FRF_AZ_NUM_KER_LBN 24 +#define FRF_AZ_NUM_KER_WIDTH 2 +#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16 +#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8 +#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0 +#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 + + +/* + * FR_AZ_RX_FLUSH_DESCQ_REG(128bit): + * Receive flush descriptor queue register + */ +#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 + + +/* + * FR_BZ_RX_DESC_UPD_REGP0(128bit): + * Receive descriptor update register. + */ +#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830 +/* falconb0,sienaa0=net_func_bar2 */ +#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192 +#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024 +/* + * FR_AA_RX_DESC_UPD_REG_KER(128bit): + * Receive descriptor update register. + */ +#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830 +/* falcona0=net_func_bar2 */ +#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192 +#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4 +/* + * FR_AB_RX_DESC_UPD_REGP123(128bit): + * Receive descriptor update register. + */ +#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192 +#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072 +/* + * FR_AA_RX_DESC_UPD_REGP0(128bit): + * Receive descriptor update register. + */ +#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830 +/* falcona0=char_func_bar0 */ +#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192 +#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020 + +#define FRF_AZ_RX_DESC_WPTR_LBN 96 +#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_RX_DESC_LBN 0 +#define FRF_AZ_RX_DESC_WIDTH 64 +#define FRF_AZ_RX_DESC_DW0_LBN 0 +#define FRF_AZ_RX_DESC_DW0_WIDTH 32 +#define FRF_AZ_RX_DESC_DW1_LBN 32 +#define FRF_AZ_RX_DESC_DW1_WIDTH 32 + + +/* + * FR_AZ_RX_DC_CFG_REG(128bit): + * Receive descriptor cache configuration register + */ +#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_MAX_PF_LBN 2 +#define FRF_AZ_RX_MAX_PF_WIDTH 2 +#define FRF_AZ_RX_DC_SIZE_LBN 0 +#define FRF_AZ_RX_DC_SIZE_WIDTH 2 +#define FFE_AZ_RX_DC_SIZE_64 3 +#define FFE_AZ_RX_DC_SIZE_32 2 +#define FFE_AZ_RX_DC_SIZE_16 1 +#define FFE_AZ_RX_DC_SIZE_8 0 + + +/* + * FR_AZ_RX_DC_PF_WM_REG(128bit): + * Receive descriptor cache pre-fetch watermark register + */ +#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_DC_PF_HWM_LBN 6 +#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 +#define FRF_AZ_RX_DC_PF_LWM_LBN 0 +#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 + + +/* + * FR_BZ_RX_RSS_TKEY_REG(128bit): + * RSS Toeplitz hash key + */ +#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860 +/* falconb0,sienaa0=net_func_bar2 */ + +#define FRF_BZ_RX_RSS_TKEY_LBN 96 +#define FRF_BZ_RX_RSS_TKEY_WIDTH 32 +#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96 +#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32 +#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64 +#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32 +#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32 +#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32 +#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0 +#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32 + + +/* + * FR_AZ_RX_NODESC_DROP_REG(128bit): + * Receive dropped packet counter register + */ +#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16 + + +/* + * FR_AZ_RX_SELF_RST_REG(128bit): + * Receive self reset register + */ +#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_ISCSI_DIS_LBN 17 +#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1 +#define FRF_AB_RX_SW_RST_REG_LBN 16 +#define FRF_AB_RX_SW_RST_REG_WIDTH 1 +#define FRF_AB_RX_SELF_RST_EN_LBN 8 +#define FRF_AB_RX_SELF_RST_EN_WIDTH 1 +#define FRF_AZ_RX_MAX_PF_LAT_LBN 4 +#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4 +#define FRF_AZ_RX_MAX_LU_LAT_LBN 0 +#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4 + + +/* + * FR_AZ_RX_DEBUG_REG(128bit): + * undocumented register + */ +#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_DEBUG_LBN 0 +#define FRF_AZ_RX_DEBUG_WIDTH 64 +#define FRF_AZ_RX_DEBUG_DW0_LBN 0 +#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32 +#define FRF_AZ_RX_DEBUG_DW1_LBN 32 +#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32 + + +/* + * FR_AZ_RX_PUSH_DROP_REG(128bit): + * Receive descriptor push dropped counter register + */ +#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 + + +/* + * FR_CZ_RX_RSS_IPV6_REG1(128bit): + * IPv6 RSS Toeplitz hash key low bytes + */ +#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0 +/* sienaa0=net_func_bar2 */ + +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32 + + +/* + * FR_CZ_RX_RSS_IPV6_REG2(128bit): + * IPv6 RSS Toeplitz hash key middle bytes + */ +#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0 +/* sienaa0=net_func_bar2 */ + +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32 + + +/* + * FR_CZ_RX_RSS_IPV6_REG3(128bit): + * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings + */ +#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0 +/* sienaa0=net_func_bar2 */ + +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32 + + +/* + * FR_AZ_TX_FLUSH_DESCQ_REG(128bit): + * Transmit flush descriptor queue register + */ +#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 + + +/* + * FR_BZ_TX_DESC_UPD_REGP0(128bit): + * Transmit descriptor update register. + */ +#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10 +/* falconb0,sienaa0=net_func_bar2 */ +#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192 +#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024 +/* + * FR_AA_TX_DESC_UPD_REG_KER(128bit): + * Transmit descriptor update register. + */ +#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10 +/* falcona0=net_func_bar2 */ +#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192 +#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8 +/* + * FR_AB_TX_DESC_UPD_REGP123(128bit): + * Transmit descriptor update register. + */ +#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192 +#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072 +/* + * FR_AA_TX_DESC_UPD_REGP0(128bit): + * Transmit descriptor update register. + */ +#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10 +/* falcona0=char_func_bar0 */ +#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192 +#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020 + +#define FRF_AZ_TX_DESC_WPTR_LBN 96 +#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_TX_DESC_LBN 0 +#define FRF_AZ_TX_DESC_WIDTH 95 +#define FRF_AZ_TX_DESC_DW0_LBN 0 +#define FRF_AZ_TX_DESC_DW0_WIDTH 32 +#define FRF_AZ_TX_DESC_DW1_LBN 32 +#define FRF_AZ_TX_DESC_DW1_WIDTH 32 +#define FRF_AZ_TX_DESC_DW2_LBN 64 +#define FRF_AZ_TX_DESC_DW2_WIDTH 31 + + +/* + * FR_AZ_TX_DC_CFG_REG(128bit): + * Transmit descriptor cache configuration register + */ +#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_DC_SIZE_LBN 0 +#define FRF_AZ_TX_DC_SIZE_WIDTH 2 +#define FFE_AZ_TX_DC_SIZE_32 2 +#define FFE_AZ_TX_DC_SIZE_16 1 +#define FFE_AZ_TX_DC_SIZE_8 0 + + +/* + * FR_AA_TX_CHKSM_CFG_REG(128bit): + * Transmit checksum configuration register + */ +#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30 +/* falcona0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 + + +/* + * FR_AZ_TX_CFG_REG(128bit): + * Transmit configuration register + */ +#define FR_AZ_TX_CFG_REG_OFST 0x00000a50 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 +#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 +#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 +#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 +#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 +#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 +#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 +#define FRF_AZ_TX_P1_PRI_EN_LBN 4 +#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 +#define FRF_AZ_TX_OWNERR_CTL_LBN 2 +#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 +#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 + + +/* + * FR_AZ_TX_PUSH_DROP_REG(128bit): + * Transmit push dropped register + */ +#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 + + +/* + * FR_AZ_TX_RESERVED_REG(128bit): + * Transmit configuration register + */ +#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80 +/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_EVT_CNT_LBN 121 +#define FRF_AZ_TX_EVT_CNT_WIDTH 7 +#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 +#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 +#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 +#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 +#define FRF_AZ_TX_PUSH_EN_LBN 89 +#define FRF_AZ_TX_PUSH_EN_WIDTH 1 +#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 +#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 +#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 +#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 +#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 +#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 +#define FRF_AZ_TX_DMAQ_ST_LBN 78 +#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_LBN 64 +#define FRF_AZ_TX_RX_SPACER_WIDTH 8 +#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 +#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 +#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 +#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 +#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 +#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 +#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 +#define FRF_AZ_TX_XP_TIMER_LBN 52 +#define FRF_AZ_TX_XP_TIMER_WIDTH 5 +#define FRF_AZ_TX_PREF_SPACER_LBN 44 +#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 +#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 +#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 +#define FRF_AZ_TX_ONLY1TAG_LBN 21 +#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 +#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 +#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 +#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 +#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 +#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 +#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 +#define FRF_AA_TX_DMA_FF_THR_LBN 16 +#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 +#define FRF_AZ_TX_DMA_SPACER_LBN 8 +#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 +#define FRF_AA_TX_TCP_DIS_LBN 7 +#define FRF_AA_TX_TCP_DIS_WIDTH 1 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 +#define FRF_AA_TX_IP_DIS_LBN 6 +#define FRF_AA_TX_IP_DIS_WIDTH 1 +#define FRF_AZ_TX_MAX_CPL_LBN 2 +#define FRF_AZ_TX_MAX_CPL_WIDTH 2 +#define FFE_AZ_TX_MAX_CPL_16 3 +#define FFE_AZ_TX_MAX_CPL_8 2 +#define FFE_AZ_TX_MAX_CPL_4 1 +#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 +#define FRF_AZ_TX_MAX_PREF_LBN 0 +#define FRF_AZ_TX_MAX_PREF_WIDTH 2 +#define FFE_AZ_TX_MAX_PREF_32 3 +#define FFE_AZ_TX_MAX_PREF_16 2 +#define FFE_AZ_TX_MAX_PREF_8 1 +#define FFE_AZ_TX_MAX_PREF_OFF 0 + + +/* + * FR_BZ_TX_PACE_REG(128bit): + * Transmit pace control register + */ +#define FR_BZ_TX_PACE_REG_OFST 0x00000a90 +/* falconb0,sienaa0=net_func_bar2 */ +/* + * FR_AA_TX_PACE_REG(128bit): + * Transmit pace control register + */ +#define FR_AA_TX_PACE_REG_OFST 0x00f80000 +/* falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19 +#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10 +#define FRF_AZ_TX_PACE_SB_AF_LBN 9 +#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10 +#define FRF_AZ_TX_PACE_FB_BASE_LBN 5 +#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4 +#define FRF_AZ_TX_PACE_BIN_TH_LBN 0 +#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5 + + +/* + * FR_AZ_TX_PACE_DROP_QID_REG(128bit): + * PACE Drop QID Counter + */ +#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0 +#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16 + + +/* + * FR_AB_TX_VLAN_REG(128bit): + * Transmit VLAN tag register + */ +#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_TX_VLAN_EN_LBN 127 +#define FRF_AB_TX_VLAN_EN_WIDTH 1 +#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125 +#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124 +#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN7_LBN 112 +#define FRF_AB_TX_VLAN7_WIDTH 12 +#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109 +#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108 +#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN6_LBN 96 +#define FRF_AB_TX_VLAN6_WIDTH 12 +#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93 +#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92 +#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN5_LBN 80 +#define FRF_AB_TX_VLAN5_WIDTH 12 +#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77 +#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76 +#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN4_LBN 64 +#define FRF_AB_TX_VLAN4_WIDTH 12 +#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61 +#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60 +#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN3_LBN 48 +#define FRF_AB_TX_VLAN3_WIDTH 12 +#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45 +#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44 +#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN2_LBN 32 +#define FRF_AB_TX_VLAN2_WIDTH 12 +#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29 +#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28 +#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN1_LBN 16 +#define FRF_AB_TX_VLAN1_WIDTH 12 +#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13 +#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1 +#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12 +#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1 +#define FRF_AB_TX_VLAN0_LBN 0 +#define FRF_AB_TX_VLAN0_WIDTH 12 + + +/* + * FR_AZ_TX_IPFIL_PORTEN_REG(128bit): + * Transmit filter control register + */ +#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64 +#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62 +#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60 +#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58 +#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56 +#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54 +#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52 +#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50 +#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48 +#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46 +#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44 +#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42 +#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40 +#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38 +#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36 +#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34 +#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32 +#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30 +#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28 +#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26 +#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24 +#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22 +#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20 +#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18 +#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16 +#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14 +#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12 +#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10 +#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8 +#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6 +#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4 +#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2 +#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1 +#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0 +#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1 + + +/* + * FR_AB_TX_IPFIL_TBL(128bit): + * Transmit IP source address filter table + */ +#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AB_TX_IPFIL_TBL_STEP 16 +#define FR_AB_TX_IPFIL_TBL_ROWS 16 + +#define FRF_AB_TX_IPFIL_MASK_1_LBN 96 +#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32 +#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64 +#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32 +#define FRF_AB_TX_IPFIL_MASK_0_LBN 32 +#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32 +#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0 +#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32 + + +/* + * FR_AB_MD_TXD_REG(128bit): + * PHY management transmit data register + */ +#define FR_AB_MD_TXD_REG_OFST 0x00000c00 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_TXD_LBN 0 +#define FRF_AB_MD_TXD_WIDTH 16 + + +/* + * FR_AB_MD_RXD_REG(128bit): + * PHY management receive data register + */ +#define FR_AB_MD_RXD_REG_OFST 0x00000c10 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_RXD_LBN 0 +#define FRF_AB_MD_RXD_WIDTH 16 + + +/* + * FR_AB_MD_CS_REG(128bit): + * PHY management configuration & status register + */ +#define FR_AB_MD_CS_REG_OFST 0x00000c20 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_RD_EN_LBN 15 +#define FRF_AB_MD_RD_EN_WIDTH 1 +#define FRF_AB_MD_WR_EN_LBN 14 +#define FRF_AB_MD_WR_EN_WIDTH 1 +#define FRF_AB_MD_ADDR_CMD_LBN 13 +#define FRF_AB_MD_ADDR_CMD_WIDTH 1 +#define FRF_AB_MD_PT_LBN 7 +#define FRF_AB_MD_PT_WIDTH 3 +#define FRF_AB_MD_PL_LBN 6 +#define FRF_AB_MD_PL_WIDTH 1 +#define FRF_AB_MD_INT_CLR_LBN 5 +#define FRF_AB_MD_INT_CLR_WIDTH 1 +#define FRF_AB_MD_GC_LBN 4 +#define FRF_AB_MD_GC_WIDTH 1 +#define FRF_AB_MD_PRSP_LBN 3 +#define FRF_AB_MD_PRSP_WIDTH 1 +#define FRF_AB_MD_RIC_LBN 2 +#define FRF_AB_MD_RIC_WIDTH 1 +#define FRF_AB_MD_RDC_LBN 1 +#define FRF_AB_MD_RDC_WIDTH 1 +#define FRF_AB_MD_WRC_LBN 0 +#define FRF_AB_MD_WRC_WIDTH 1 + + +/* + * FR_AB_MD_PHY_ADR_REG(128bit): + * PHY management PHY address register + */ +#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_PHY_ADR_LBN 0 +#define FRF_AB_MD_PHY_ADR_WIDTH 16 + + +/* + * FR_AB_MD_ID_REG(128bit): + * PHY management ID register + */ +#define FR_AB_MD_ID_REG_OFST 0x00000c40 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_PRT_ADR_LBN 11 +#define FRF_AB_MD_PRT_ADR_WIDTH 5 +#define FRF_AB_MD_DEV_ADR_LBN 6 +#define FRF_AB_MD_DEV_ADR_WIDTH 5 + + +/* + * FR_AB_MD_STAT_REG(128bit): + * PHY management status & mask register + */ +#define FR_AB_MD_STAT_REG_OFST 0x00000c50 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MD_PINT_LBN 4 +#define FRF_AB_MD_PINT_WIDTH 1 +#define FRF_AB_MD_DONE_LBN 3 +#define FRF_AB_MD_DONE_WIDTH 1 +#define FRF_AB_MD_BSERR_LBN 2 +#define FRF_AB_MD_BSERR_WIDTH 1 +#define FRF_AB_MD_LNFL_LBN 1 +#define FRF_AB_MD_LNFL_WIDTH 1 +#define FRF_AB_MD_BSY_LBN 0 +#define FRF_AB_MD_BSY_WIDTH 1 + + +/* + * FR_AB_MAC_STAT_DMA_REG(128bit): + * Port MAC statistical counter DMA register + */ +#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 +#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 +#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 +#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32 +#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32 +#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16 + + +/* + * FR_AB_MAC_CTRL_REG(128bit): + * Port MAC control register + */ +#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MAC_XOFF_VAL_LBN 16 +#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 +#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 +#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 +#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 +#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 +#define FRF_AB_MAC_BCAD_ACPT_LBN 4 +#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 +#define FRF_AB_MAC_UC_PROM_LBN 3 +#define FRF_AB_MAC_UC_PROM_WIDTH 1 +#define FRF_AB_MAC_LINK_STATUS_LBN 2 +#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 +#define FRF_AB_MAC_SPEED_LBN 0 +#define FRF_AB_MAC_SPEED_WIDTH 2 +#define FRF_AB_MAC_SPEED_10M 0 +#define FRF_AB_MAC_SPEED_100M 1 +#define FRF_AB_MAC_SPEED_1G 2 +#define FRF_AB_MAC_SPEED_10G 3 + +/* + * FR_BB_GEN_MODE_REG(128bit): + * General Purpose mode register (external interrupt mask) + */ +#define FR_BB_GEN_MODE_REG_OFST 0x00000c90 +/* falconb0=net_func_bar2 */ + +#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 +#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 +#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 +#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 +#define FRF_BB_XG_PHY_INT_MASK_LBN 0 +#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 + + +/* + * FR_AB_MAC_MC_HASH_REG0(128bit): + * Multicast address hash table + */ +#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MAC_MCAST_HASH0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 +#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32 +#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64 +#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96 +#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32 + + +/* + * FR_AB_MAC_MC_HASH_REG1(128bit): + * Multicast address hash table + */ +#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_MAC_MCAST_HASH1_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 +#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32 +#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64 +#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32 +#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96 +#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32 + + +/* + * FR_AB_GM_CFG1_REG(32bit): + * GMAC configuration register 1 + */ +#define FR_AB_GM_CFG1_REG_OFST 0x00000e00 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_SW_RST_LBN 31 +#define FRF_AB_GM_SW_RST_WIDTH 1 +#define FRF_AB_GM_SIM_RST_LBN 30 +#define FRF_AB_GM_SIM_RST_WIDTH 1 +#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 +#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 +#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_RX_FUNC_LBN 17 +#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 +#define FRF_AB_GM_RST_TX_FUNC_LBN 16 +#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 +#define FRF_AB_GM_LOOP_LBN 8 +#define FRF_AB_GM_LOOP_WIDTH 1 +#define FRF_AB_GM_RX_FC_EN_LBN 5 +#define FRF_AB_GM_RX_FC_EN_WIDTH 1 +#define FRF_AB_GM_TX_FC_EN_LBN 4 +#define FRF_AB_GM_TX_FC_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_RXEN_LBN 3 +#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 +#define FRF_AB_GM_RX_EN_LBN 2 +#define FRF_AB_GM_RX_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_TXEN_LBN 1 +#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 +#define FRF_AB_GM_TX_EN_LBN 0 +#define FRF_AB_GM_TX_EN_WIDTH 1 + + +/* + * FR_AB_GM_CFG2_REG(32bit): + * GMAC configuration register 2 + */ +#define FR_AB_GM_CFG2_REG_OFST 0x00000e10 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_PAMBL_LEN_LBN 12 +#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 +#define FRF_AB_GM_IF_MODE_LBN 8 +#define FRF_AB_GM_IF_MODE_WIDTH 2 +#define FRF_AB_GM_IF_MODE_BYTE_MODE 2 +#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1 +#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 +#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 +#define FRF_AB_GM_LEN_CHK_LBN 4 +#define FRF_AB_GM_LEN_CHK_WIDTH 1 +#define FRF_AB_GM_PAD_CRC_EN_LBN 2 +#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 +#define FRF_AB_GM_CRC_EN_LBN 1 +#define FRF_AB_GM_CRC_EN_WIDTH 1 +#define FRF_AB_GM_FD_LBN 0 +#define FRF_AB_GM_FD_WIDTH 1 + + +/* + * FR_AB_GM_IPG_REG(32bit): + * GMAC IPG register + */ +#define FR_AB_GM_IPG_REG_OFST 0x00000e20 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_NONB2B_IPG1_LBN 24 +#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 +#define FRF_AB_GM_NONB2B_IPG2_LBN 16 +#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 +#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 +#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 +#define FRF_AB_GM_B2B_IPG_LBN 0 +#define FRF_AB_GM_B2B_IPG_WIDTH 7 + + +/* + * FR_AB_GM_HD_REG(32bit): + * GMAC half duplex register + */ +#define FR_AB_GM_HD_REG_OFST 0x00000e30 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 +#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 +#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 +#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 +#define FRF_AB_GM_BP_NO_BOFF_LBN 18 +#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 +#define FRF_AB_GM_DIS_BOFF_LBN 17 +#define FRF_AB_GM_DIS_BOFF_WIDTH 1 +#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 +#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 +#define FRF_AB_GM_RTRY_LIMIT_LBN 12 +#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 +#define FRF_AB_GM_COL_WIN_LBN 0 +#define FRF_AB_GM_COL_WIN_WIDTH 10 + + +/* + * FR_AB_GM_MAX_FLEN_REG(32bit): + * GMAC maximum frame length register + */ +#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_MAX_FLEN_LBN 0 +#define FRF_AB_GM_MAX_FLEN_WIDTH 16 + + +/* + * FR_AB_GM_TEST_REG(32bit): + * GMAC test register + */ +#define FR_AB_GM_TEST_REG_OFST 0x00000e70 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_MAX_BOFF_LBN 3 +#define FRF_AB_GM_MAX_BOFF_WIDTH 1 +#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 +#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 +#define FRF_AB_GM_TEST_PAUSE_LBN 1 +#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 +#define FRF_AB_GM_SHORT_SLOT_LBN 0 +#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 + + +/* + * FR_AB_GM_ADR1_REG(32bit): + * GMAC station address register 1 + */ +#define FR_AB_GM_ADR1_REG_OFST 0x00000f00 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_ADR_B0_LBN 24 +#define FRF_AB_GM_ADR_B0_WIDTH 8 +#define FRF_AB_GM_ADR_B1_LBN 16 +#define FRF_AB_GM_ADR_B1_WIDTH 8 +#define FRF_AB_GM_ADR_B2_LBN 8 +#define FRF_AB_GM_ADR_B2_WIDTH 8 +#define FRF_AB_GM_ADR_B3_LBN 0 +#define FRF_AB_GM_ADR_B3_WIDTH 8 + + +/* + * FR_AB_GM_ADR2_REG(32bit): + * GMAC station address register 2 + */ +#define FR_AB_GM_ADR2_REG_OFST 0x00000f10 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GM_ADR_B4_LBN 24 +#define FRF_AB_GM_ADR_B4_WIDTH 8 +#define FRF_AB_GM_ADR_B5_LBN 16 +#define FRF_AB_GM_ADR_B5_WIDTH 8 + + +/* + * FR_AB_GMF_CFG0_REG(32bit): + * GMAC FIFO configuration register 0 + */ +#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_FTFENRPLY_LBN 20 +#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 +#define FRF_AB_GMF_STFENRPLY_LBN 19 +#define FRF_AB_GMF_STFENRPLY_WIDTH 1 +#define FRF_AB_GMF_FRFENRPLY_LBN 18 +#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_SRFENRPLY_LBN 17 +#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_WTMENRPLY_LBN 16 +#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 +#define FRF_AB_GMF_FTFENREQ_LBN 12 +#define FRF_AB_GMF_FTFENREQ_WIDTH 1 +#define FRF_AB_GMF_STFENREQ_LBN 11 +#define FRF_AB_GMF_STFENREQ_WIDTH 1 +#define FRF_AB_GMF_FRFENREQ_LBN 10 +#define FRF_AB_GMF_FRFENREQ_WIDTH 1 +#define FRF_AB_GMF_SRFENREQ_LBN 9 +#define FRF_AB_GMF_SRFENREQ_WIDTH 1 +#define FRF_AB_GMF_WTMENREQ_LBN 8 +#define FRF_AB_GMF_WTMENREQ_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFT_LBN 4 +#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 +#define FRF_AB_GMF_HSTRSTST_LBN 3 +#define FRF_AB_GMF_HSTRSTST_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFR_LBN 2 +#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTSR_LBN 1 +#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTWT_LBN 0 +#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 + + +/* + * FR_AB_GMF_CFG1_REG(32bit): + * GMAC FIFO configuration register 1 + */ +#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_CFGFRTH_LBN 16 +#define FRF_AB_GMF_CFGFRTH_WIDTH 5 +#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 +#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 + + +/* + * FR_AB_GMF_CFG2_REG(32bit): + * GMAC FIFO configuration register 2 + */ +#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_CFGHWM_LBN 16 +#define FRF_AB_GMF_CFGHWM_WIDTH 6 +#define FRF_AB_GMF_CFGLWM_LBN 0 +#define FRF_AB_GMF_CFGLWM_WIDTH 6 + + +/* + * FR_AB_GMF_CFG3_REG(32bit): + * GMAC FIFO configuration register 3 + */ +#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_CFGHWMFT_LBN 16 +#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 +#define FRF_AB_GMF_CFGFTTH_LBN 0 +#define FRF_AB_GMF_CFGFTTH_WIDTH 6 + + +/* + * FR_AB_GMF_CFG4_REG(32bit): + * GMAC FIFO configuration register 4 + */ +#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 + + +/* + * FR_AB_GMF_CFG5_REG(32bit): + * GMAC FIFO configuration register 5 + */ +#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_GMF_CFGHDPLX_LBN 22 +#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 +#define FRF_AB_GMF_SRFULL_LBN 21 +#define FRF_AB_GMF_SRFULL_WIDTH 1 +#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 +#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 +#define FRF_AB_GMF_CFGBYTMODE_LBN 19 +#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 +#define FRF_AB_GMF_HSTDRPLT64_LBN 18 +#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 +#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 + + +/* + * FR_BB_TX_SRC_MAC_TBL(128bit): + * Transmit IP source address filter table + */ +#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000 +/* falconb0=net_func_bar2 */ +#define FR_BB_TX_SRC_MAC_TBL_STEP 16 +#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 + +#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32 +#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96 +#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16 +#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32 +#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32 +#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16 + + +/* + * FR_BB_TX_SRC_MAC_CTL_REG(128bit): + * Transmit MAC source address filter control + */ +#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100 +/* falconb0=net_func_bar2 */ + +#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 +#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 +#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 +#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 +#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 +#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 +#define FRF_BB_TX_MAC_QID_SEL_LBN 0 +#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 + + +/* + * FR_AB_XM_ADR_LO_REG(128bit): + * XGMAC address register low + */ +#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_ADR_LO_LBN 0 +#define FRF_AB_XM_ADR_LO_WIDTH 32 + + +/* + * FR_AB_XM_ADR_HI_REG(128bit): + * XGMAC address register high + */ +#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_ADR_HI_LBN 0 +#define FRF_AB_XM_ADR_HI_WIDTH 16 + + +/* + * FR_AB_XM_GLB_CFG_REG(128bit): + * XGMAC global configuration + */ +#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_RMTFLT_GEN_LBN 17 +#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 +#define FRF_AB_XM_DEBUG_MODE_LBN 16 +#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 +#define FRF_AB_XM_RX_STAT_EN_LBN 11 +#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_TX_STAT_EN_LBN 10 +#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 +#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_WAN_MODE_LBN 5 +#define FRF_AB_XM_WAN_MODE_WIDTH 1 +#define FRF_AB_XM_INTCLR_MODE_LBN 3 +#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 +#define FRF_AB_XM_CORE_RST_LBN 0 +#define FRF_AB_XM_CORE_RST_WIDTH 1 + + +/* + * FR_AB_XM_TX_CFG_REG(128bit): + * XGMAC transmit configuration + */ +#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_TX_PROG_LBN 24 +#define FRF_AB_XM_TX_PROG_WIDTH 1 +#define FRF_AB_XM_IPG_LBN 16 +#define FRF_AB_XM_IPG_WIDTH 4 +#define FRF_AB_XM_FCNTL_LBN 10 +#define FRF_AB_XM_FCNTL_WIDTH 1 +#define FRF_AB_XM_TXCRC_LBN 8 +#define FRF_AB_XM_TXCRC_WIDTH 1 +#define FRF_AB_XM_EDRC_LBN 6 +#define FRF_AB_XM_EDRC_WIDTH 1 +#define FRF_AB_XM_AUTO_PAD_LBN 5 +#define FRF_AB_XM_AUTO_PAD_WIDTH 1 +#define FRF_AB_XM_TX_PRMBL_LBN 2 +#define FRF_AB_XM_TX_PRMBL_WIDTH 1 +#define FRF_AB_XM_TXEN_LBN 1 +#define FRF_AB_XM_TXEN_WIDTH 1 +#define FRF_AB_XM_TX_RST_LBN 0 +#define FRF_AB_XM_TX_RST_WIDTH 1 + + +/* + * FR_AB_XM_RX_CFG_REG(128bit): + * XGMAC receive configuration + */ +#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_PASS_LENERR_LBN 26 +#define FRF_AB_XM_PASS_LENERR_WIDTH 1 +#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 +#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 +#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 +#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_REJ_BCAST_LBN 20 +#define FRF_AB_XM_REJ_BCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 +#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 +#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 +#define FRF_AB_XM_AUTO_DEPAD_LBN 8 +#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 +#define FRF_AB_XM_RXCRC_LBN 3 +#define FRF_AB_XM_RXCRC_WIDTH 1 +#define FRF_AB_XM_RX_PRMBL_LBN 2 +#define FRF_AB_XM_RX_PRMBL_WIDTH 1 +#define FRF_AB_XM_RXEN_LBN 1 +#define FRF_AB_XM_RXEN_WIDTH 1 +#define FRF_AB_XM_RX_RST_LBN 0 +#define FRF_AB_XM_RX_RST_WIDTH 1 + + +/* + * FR_AB_XM_MGT_INT_MASK(128bit): + * documentation to be written for sum_XM_MGT_INT_MASK + */ +#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_MSK_STA_INTR_LBN 16 +#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_MSK_RMTFLT_LBN 1 +#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 +#define FRF_AB_XM_MSK_LCLFLT_LBN 0 +#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 + + +/* + * FR_AB_XM_FC_REG(128bit): + * XGMAC flow control register + */ +#define FR_AB_XM_FC_REG_OFST 0x00001270 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_PAUSE_TIME_LBN 16 +#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 +#define FRF_AB_XM_RX_MAC_STAT_LBN 11 +#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_TX_MAC_STAT_LBN 10 +#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_MCNTL_PASS_LBN 8 +#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 +#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 +#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 +#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 +#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 +#define FRF_AB_XM_ZPAUSE_LBN 2 +#define FRF_AB_XM_ZPAUSE_WIDTH 1 +#define FRF_AB_XM_XMIT_PAUSE_LBN 1 +#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 +#define FRF_AB_XM_DIS_FCNTL_LBN 0 +#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 + + +/* + * FR_AB_XM_PAUSE_TIME_REG(128bit): + * XGMAC pause time register + */ +#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 +#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 +#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 +#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 + + +/* + * FR_AB_XM_TX_PARAM_REG(128bit): + * XGMAC transmit parameter register + */ +#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 +#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 +#define FRF_AB_XM_PAD_CHAR_LBN 0 +#define FRF_AB_XM_PAD_CHAR_WIDTH 8 + + +/* + * FR_AB_XM_RX_PARAM_REG(128bit): + * XGMAC receive parameter register + */ +#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 + + +/* + * FR_AB_XM_MGT_INT_MSK_REG(128bit): + * XGMAC management interrupt mask register + */ +#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 +#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 +#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_RMTFLT_LBN 1 +#define FRF_AB_XM_RMTFLT_WIDTH 1 +#define FRF_AB_XM_LCLFLT_LBN 0 +#define FRF_AB_XM_LCLFLT_WIDTH 1 + + +/* + * FR_AB_XX_PWR_RST_REG(128bit): + * XGXS/XAUI powerdown/reset register + */ +#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_PWRDND_SIG_LBN 31 +#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNC_SIG_LBN 30 +#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNB_SIG_LBN 29 +#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNA_SIG_LBN 28 +#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 +#define FRF_AB_XX_SIM_MODE_LBN 27 +#define FRF_AB_XX_SIM_MODE_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 +#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 +#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETD_SIG_LBN 23 +#define FRF_AB_XX_RESETD_SIG_WIDTH 1 +#define FRF_AB_XX_RESETC_SIG_LBN 22 +#define FRF_AB_XX_RESETC_SIG_WIDTH 1 +#define FRF_AB_XX_RESETB_SIG_LBN 21 +#define FRF_AB_XX_RESETB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETA_SIG_LBN 20 +#define FRF_AB_XX_RESETA_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 +#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 +#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 +#define FRF_AB_XX_SD_RST_ACT_LBN 16 +#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 +#define FRF_AB_XX_PWRDND_EN_LBN 15 +#define FRF_AB_XX_PWRDND_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNC_EN_LBN 14 +#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNB_EN_LBN 13 +#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNA_EN_LBN 12 +#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 +#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 +#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 +#define FRF_AB_XX_RESETD_EN_LBN 7 +#define FRF_AB_XX_RESETD_EN_WIDTH 1 +#define FRF_AB_XX_RESETC_EN_LBN 6 +#define FRF_AB_XX_RESETC_EN_WIDTH 1 +#define FRF_AB_XX_RESETB_EN_LBN 5 +#define FRF_AB_XX_RESETB_EN_WIDTH 1 +#define FRF_AB_XX_RESETA_EN_LBN 4 +#define FRF_AB_XX_RESETA_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 +#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 +#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 +#define FRF_AB_XX_RST_XX_EN_LBN 0 +#define FRF_AB_XX_RST_XX_EN_WIDTH 1 + + +/* + * FR_AB_XX_SD_CTL_REG(128bit): + * XGXS/XAUI powerdown/reset control register + */ +#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_TERMADJ1_LBN 17 +#define FRF_AB_XX_TERMADJ1_WIDTH 1 +#define FRF_AB_XX_TERMADJ0_LBN 16 +#define FRF_AB_XX_TERMADJ0_WIDTH 1 +#define FRF_AB_XX_HIDRVD_LBN 15 +#define FRF_AB_XX_HIDRVD_WIDTH 1 +#define FRF_AB_XX_LODRVD_LBN 14 +#define FRF_AB_XX_LODRVD_WIDTH 1 +#define FRF_AB_XX_HIDRVC_LBN 13 +#define FRF_AB_XX_HIDRVC_WIDTH 1 +#define FRF_AB_XX_LODRVC_LBN 12 +#define FRF_AB_XX_LODRVC_WIDTH 1 +#define FRF_AB_XX_HIDRVB_LBN 11 +#define FRF_AB_XX_HIDRVB_WIDTH 1 +#define FRF_AB_XX_LODRVB_LBN 10 +#define FRF_AB_XX_LODRVB_WIDTH 1 +#define FRF_AB_XX_HIDRVA_LBN 9 +#define FRF_AB_XX_HIDRVA_WIDTH 1 +#define FRF_AB_XX_LODRVA_LBN 8 +#define FRF_AB_XX_LODRVA_WIDTH 1 +#define FRF_AB_XX_LPBKD_LBN 3 +#define FRF_AB_XX_LPBKD_WIDTH 1 +#define FRF_AB_XX_LPBKC_LBN 2 +#define FRF_AB_XX_LPBKC_WIDTH 1 +#define FRF_AB_XX_LPBKB_LBN 1 +#define FRF_AB_XX_LPBKB_WIDTH 1 +#define FRF_AB_XX_LPBKA_LBN 0 +#define FRF_AB_XX_LPBKA_WIDTH 1 + + +/* + * FR_AB_XX_TXDRV_CTL_REG(128bit): + * XAUI SerDes transmit drive control register + */ +#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_DEQD_LBN 28 +#define FRF_AB_XX_DEQD_WIDTH 4 +#define FRF_AB_XX_DEQC_LBN 24 +#define FRF_AB_XX_DEQC_WIDTH 4 +#define FRF_AB_XX_DEQB_LBN 20 +#define FRF_AB_XX_DEQB_WIDTH 4 +#define FRF_AB_XX_DEQA_LBN 16 +#define FRF_AB_XX_DEQA_WIDTH 4 +#define FRF_AB_XX_DTXD_LBN 12 +#define FRF_AB_XX_DTXD_WIDTH 4 +#define FRF_AB_XX_DTXC_LBN 8 +#define FRF_AB_XX_DTXC_WIDTH 4 +#define FRF_AB_XX_DTXB_LBN 4 +#define FRF_AB_XX_DTXB_WIDTH 4 +#define FRF_AB_XX_DTXA_LBN 0 +#define FRF_AB_XX_DTXA_WIDTH 4 + + +/* + * FR_AB_XX_PRBS_CTL_REG(128bit): + * documentation to be written for sum_XX_PRBS_CTL_REG + */ +#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 +#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 +#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 +#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 +#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 +#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 +#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 +#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 +#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 + + +/* + * FR_AB_XX_PRBS_CHK_REG(128bit): + * documentation to be written for sum_XX_PRBS_CHK_REG + */ +#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_REV_LB_EN_LBN 16 +#define FRF_AB_XX_REV_LB_EN_WIDTH 1 +#define FRF_AB_XX_CH3_DEG_DET_LBN 15 +#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 +#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 +#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH2_DEG_DET_LBN 11 +#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 +#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 +#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH1_DEG_DET_LBN 7 +#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 +#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 +#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH0_DEG_DET_LBN 3 +#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 +#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 + + +/* + * FR_AB_XX_PRBS_ERR_REG(128bit): + * documentation to be written for sum_XX_PRBS_ERR_REG + */ +#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 + + +/* + * FR_AB_XX_CORE_STAT_REG(128bit): + * XAUI XGXS core status register + */ +#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360 +/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */ + +#define FRF_AB_XX_FORCE_SIG3_LBN 31 +#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 +#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_LBN 29 +#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 +#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_LBN 27 +#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 +#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_LBN 25 +#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 +#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 +#define FRF_AB_XX_XGXS_LB_EN_LBN 23 +#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 +#define FRF_AB_XX_XGMII_LB_EN_LBN 22 +#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 +#define FRF_AB_XX_MATCH_FAULT_LBN 21 +#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 +#define FRF_AB_XX_ALIGN_DONE_LBN 20 +#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT3_LBN 19 +#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT2_LBN 18 +#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT1_LBN 17 +#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT0_LBN 16 +#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 +#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 +#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 +#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 +#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 +#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 +#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 +#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 +#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 +#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 +#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 +#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 +#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH3_LBN 3 +#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH2_LBN 2 +#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH1_LBN 1 +#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH0_LBN 0 +#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 + + +/* + * FR_AA_RX_DESC_PTR_TBL_KER(128bit): + * Receive descriptor pointer table + */ +#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800 +/* falcona0=net_func_bar2 */ +#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 +/* + * FR_AZ_RX_DESC_PTR_TBL(128bit): + * Receive descriptor pointer table + */ +#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_RX_DESC_PTR_TBL_STEP 16 +#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 +#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096 + +#define FRF_CZ_RX_HDR_SPLIT_LBN 90 +#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 +#define FRF_AZ_RX_RESET_LBN 89 +#define FRF_AZ_RX_RESET_WIDTH 1 +#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 +#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 +#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 +#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 +#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_RX_DESCQ_SIZE_4K 3 +#define FFE_AZ_RX_DESCQ_SIZE_2K 2 +#define FFE_AZ_RX_DESCQ_SIZE_1K 1 +#define FFE_AZ_RX_DESCQ_SIZE_512 0 +#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 +#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 +#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 +#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 +#define FRF_AZ_RX_DESCQ_EN_LBN 0 +#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 + + +/* + * FR_AA_TX_DESC_PTR_TBL_KER(128bit): + * Transmit descriptor pointer + */ +#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900 +/* falcona0=net_func_bar2 */ +#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 +/* + * FR_AZ_TX_DESC_PTR_TBL(128bit): + * Transmit descriptor pointer + */ +#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000 +/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_TX_DESC_PTR_TBL_STEP 16 +#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 + +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 +#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 +#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 +#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 +#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 +#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 +#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 +#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 +#define FRF_AZ_TX_DESCQ_EN_LBN 88 +#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 +#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 +#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_TX_DESCQ_SIZE_4K 3 +#define FFE_AZ_TX_DESCQ_SIZE_2K 2 +#define FFE_AZ_TX_DESCQ_SIZE_1K 1 +#define FFE_AZ_TX_DESCQ_SIZE_512 0 +#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 +#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 +#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 +#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 + + +/* + * FR_AA_EVQ_PTR_TBL_KER(128bit): + * Event queue pointer table + */ +#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00 +/* falcona0=net_func_bar2 */ +#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 +#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 +/* + * FR_AZ_EVQ_PTR_TBL(128bit): + * Event queue pointer table + */ +#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_EVQ_PTR_TBL_STEP 16 +#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 +#define FR_AB_EVQ_PTR_TBL_ROWS 4096 + +#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 +#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 +#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39 +#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1 +#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 +#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 +#define FRF_AZ_EVQ_EN_LBN 23 +#define FRF_AZ_EVQ_EN_WIDTH 1 +#define FRF_AZ_EVQ_SIZE_LBN 20 +#define FRF_AZ_EVQ_SIZE_WIDTH 3 +#define FFE_AZ_EVQ_SIZE_32K 6 +#define FFE_AZ_EVQ_SIZE_16K 5 +#define FFE_AZ_EVQ_SIZE_8K 4 +#define FFE_AZ_EVQ_SIZE_4K 3 +#define FFE_AZ_EVQ_SIZE_2K 2 +#define FFE_AZ_EVQ_SIZE_1K 1 +#define FFE_AZ_EVQ_SIZE_512 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 + + +/* + * FR_AA_BUF_HALF_TBL_KER(64bit): + * Buffer table in half buffer table mode direct access by driver + */ +#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000 +/* falcona0=net_func_bar2 */ +#define FR_AA_BUF_HALF_TBL_KER_STEP 8 +#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 +/* + * FR_AZ_BUF_HALF_TBL(64bit): + * Buffer table in half buffer table mode direct access by driver + */ +#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_BUF_HALF_TBL_STEP 8 +#define FR_CZ_BUF_HALF_TBL_ROWS 147456 +#define FR_AB_BUF_HALF_TBL_ROWS 524288 + +#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 +#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + + +/* + * FR_AA_BUF_FULL_TBL_KER(64bit): + * Buffer table in full buffer table mode direct access by driver + */ +#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000 +/* falcona0=net_func_bar2 */ +#define FR_AA_BUF_FULL_TBL_KER_STEP 8 +#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 +/* + * FR_AZ_BUF_FULL_TBL(64bit): + * Buffer table in full buffer table mode direct access by driver + */ +#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_BUF_FULL_TBL_STEP 8 + +#define FR_CZ_BUF_FULL_TBL_ROWS 147456 +#define FR_AB_BUF_FULL_TBL_ROWS 917504 + +#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 +#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 +#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 +#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 +#define FRF_AZ_BUF_ADR_REGION_LBN 48 +#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 +#define FFE_AZ_BUF_ADR_REGN3 3 +#define FFE_AZ_BUF_ADR_REGN2 2 +#define FFE_AZ_BUF_ADR_REGN1 1 +#define FFE_AZ_BUF_ADR_REGN0 0 +#define FRF_AZ_BUF_ADR_FBUF_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 +#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32 +#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46 +#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2 +#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 + + +/* + * FR_AZ_RX_FILTER_TBL0(128bit): + * TCP/IPv4 Receive filter table + */ +#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000 +/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_RX_FILTER_TBL0_STEP 32 +#define FR_AZ_RX_FILTER_TBL0_ROWS 8192 +/* + * FR_AB_RX_FILTER_TBL1(128bit): + * TCP/IPv4 Receive filter table + */ +#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010 +/* falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AB_RX_FILTER_TBL1_STEP 32 +#define FR_AB_RX_FILTER_TBL1_ROWS 8192 + +#define FRF_BZ_RSS_EN_LBN 110 +#define FRF_BZ_RSS_EN_WIDTH 1 +#define FRF_BZ_SCATTER_EN_LBN 109 +#define FRF_BZ_SCATTER_EN_WIDTH 1 +#define FRF_AZ_TCP_UDP_LBN 108 +#define FRF_AZ_TCP_UDP_WIDTH 1 +#define FRF_AZ_RXQ_ID_LBN 96 +#define FRF_AZ_RXQ_ID_WIDTH 12 +#define FRF_AZ_DEST_IP_LBN 64 +#define FRF_AZ_DEST_IP_WIDTH 32 +#define FRF_AZ_DEST_PORT_TCP_LBN 48 +#define FRF_AZ_DEST_PORT_TCP_WIDTH 16 +#define FRF_AZ_SRC_IP_LBN 16 +#define FRF_AZ_SRC_IP_WIDTH 32 +#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16 + + +/* + * FR_CZ_RX_MAC_FILTER_TBL0(128bit): + * Receive Ethernet filter table + */ +#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010 +/* sienaa0=net_func_bar2 */ +#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 +#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 + +#define FRF_CZ_RMFT_RSS_EN_LBN 75 +#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 +#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 +#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 +#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 +#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_RMFT_RXQ_ID_LBN 61 +#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 +#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_RMFT_DEST_MAC_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 +#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32 +#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44 +#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16 +#define FRF_CZ_RMFT_VLAN_ID_LBN 0 +#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 + + +/* + * FR_AZ_TIMER_TBL(128bit): + * Timer table + */ +#define FR_AZ_TIMER_TBL_OFST 0x00f70000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_TIMER_TBL_STEP 16 +#define FR_CZ_TIMER_TBL_ROWS 1024 +#define FR_AB_TIMER_TBL_ROWS 4096 + +#define FRF_CZ_TIMER_Q_EN_LBN 33 +#define FRF_CZ_TIMER_Q_EN_WIDTH 1 +#define FRF_CZ_INT_ARMD_LBN 32 +#define FRF_CZ_INT_ARMD_WIDTH 1 +#define FRF_CZ_INT_PEND_LBN 31 +#define FRF_CZ_INT_PEND_WIDTH 1 +#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 +#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 +#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 +#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 +#define FRF_CZ_TIMER_MODE_LBN 14 +#define FRF_CZ_TIMER_MODE_WIDTH 2 +#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 +#define FFE_CZ_TIMER_MODE_TRIG_START 2 +#define FFE_CZ_TIMER_MODE_IMMED_START 1 +#define FFE_CZ_TIMER_MODE_DIS 0 +#define FRF_AB_TIMER_MODE_LBN 12 +#define FRF_AB_TIMER_MODE_WIDTH 2 +#define FFE_AB_TIMER_MODE_INT_HLDOFF 2 +#define FFE_AB_TIMER_MODE_TRIG_START 2 +#define FFE_AB_TIMER_MODE_IMMED_START 1 +#define FFE_AB_TIMER_MODE_DIS 0 +#define FRF_CZ_TIMER_VAL_LBN 0 +#define FRF_CZ_TIMER_VAL_WIDTH 14 +#define FRF_AB_TIMER_VAL_LBN 0 +#define FRF_AB_TIMER_VAL_WIDTH 12 + + +/* + * FR_BZ_TX_PACE_TBL(128bit): + * Transmit pacing table + */ +#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */ +#define FR_AZ_TX_PACE_TBL_STEP 16 +#define FR_CZ_TX_PACE_TBL_ROWS 1024 +#define FR_BB_TX_PACE_TBL_ROWS 4096 +/* + * FR_AA_TX_PACE_TBL(128bit): + * Transmit pacing table + */ +#define FR_AA_TX_PACE_TBL_OFST 0x00f80040 +/* falcona0=char_func_bar0 */ +/* FR_AZ_TX_PACE_TBL_STEP 16 */ +#define FR_AA_TX_PACE_TBL_ROWS 4092 + +#define FRF_AZ_TX_PACE_LBN 0 +#define FRF_AZ_TX_PACE_WIDTH 5 + + +/* + * FR_BZ_RX_INDIRECTION_TBL(7bit): + * RX Indirection Table + */ +#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000 +/* falconb0,sienaa0=net_func_bar2 */ +#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 +#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 + +#define FRF_BZ_IT_QUEUE_LBN 0 +#define FRF_BZ_IT_QUEUE_WIDTH 6 + + +/* + * FR_CZ_TX_FILTER_TBL0(128bit): + * TCP/IPv4 Transmit filter table + */ +#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000 +/* sienaa0=net_func_bar2 */ +#define FR_CZ_TX_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 + +#define FRF_CZ_TIFT_TCP_UDP_LBN 108 +#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 +#define FRF_CZ_TIFT_TXQ_ID_LBN 96 +#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TIFT_DEST_IP_LBN 64 +#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 +#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 +#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 +#define FRF_CZ_TIFT_SRC_IP_LBN 16 +#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 + + +/* + * FR_CZ_TX_MAC_FILTER_TBL0(128bit): + * Transmit Ethernet filter table + */ +#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000 +/* sienaa0=net_func_bar2 */ +#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 + +#define FRF_CZ_TMFT_TXQ_ID_LBN 61 +#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_TMFT_SRC_MAC_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 +#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32 +#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44 +#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16 +#define FRF_CZ_TMFT_VLAN_ID_LBN 0 +#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 + + +/* + * FR_CZ_MC_TREG_SMEM(32bit): + * MC Shared Memory + */ +#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000 +/* sienaa0=net_func_bar2 */ +#define FR_CZ_MC_TREG_SMEM_STEP 4 +#define FR_CZ_MC_TREG_SMEM_ROWS 512 + +#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 +#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 + + +/* + * FR_BB_MSIX_VECTOR_TABLE(128bit): + * MSIX Vector Table + */ +#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000 +/* falconb0=net_func_bar2 */ +#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 +#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 +/* + * FR_CZ_MSIX_VECTOR_TABLE(128bit): + * MSIX Vector Table + */ +#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000 +/* sienaa0=pci_f0_bar4 */ +/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ +#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 + +#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 +#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 +#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 +#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 +#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 +#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 + + +/* + * FR_BB_MSIX_PBA_TABLE(32bit): + * MSIX Pending Bit Array + */ +#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000 +/* falconb0=net_func_bar2 */ +#define FR_BZ_MSIX_PBA_TABLE_STEP 4 +#define FR_BB_MSIX_PBA_TABLE_ROWS 2 +/* + * FR_CZ_MSIX_PBA_TABLE(32bit): + * MSIX Pending Bit Array + */ +#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000 +/* sienaa0=pci_f0_bar4 */ +/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ +#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 + +#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 + + +/* + * FR_AZ_SRM_DBG_REG(64bit): + * SRAM debug access + */ +#define FR_AZ_SRM_DBG_REG_OFST 0x03000000 +/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */ +#define FR_AZ_SRM_DBG_REG_STEP 8 + +#define FR_CZ_SRM_DBG_REG_ROWS 262144 +#define FR_AB_SRM_DBG_REG_ROWS 2097152 + +#define FRF_AZ_SRM_DBG_LBN 0 +#define FRF_AZ_SRM_DBG_WIDTH 64 +#define FRF_AZ_SRM_DBG_DW0_LBN 0 +#define FRF_AZ_SRM_DBG_DW0_WIDTH 32 +#define FRF_AZ_SRM_DBG_DW1_LBN 32 +#define FRF_AZ_SRM_DBG_DW1_WIDTH 32 + + +/* + * FR_AA_INT_ACK_CHAR(32bit): + * CHAR interrupt acknowledge register + */ +#define FR_AA_INT_ACK_CHAR_OFST 0x00000060 +/* falcona0=char_func_bar0 */ + +#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0 +#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32 + + +/* FS_DRIVER_EV */ +#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 +#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 +#define FSE_AZ_TX_DSC_ERROR_EV 15 +#define FSE_AZ_RX_DSC_ERROR_EV 14 +#define FSE_AZ_RX_RECOVER_EV 11 +#define FSE_AZ_TIMER_EV 10 +#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 +#define FSE_AZ_WAKE_UP_EV 6 +#define FSE_AZ_SRM_UPD_DONE_EV 5 +#define FSE_AZ_EVQ_NOT_EN_EV 3 +#define FSE_AZ_EVQ_INIT_DONE_EV 2 +#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 +#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 + + +/* FS_EVENT_ENTRY */ +#define FSF_AZ_EV_CODE_LBN 60 +#define FSF_AZ_EV_CODE_WIDTH 4 +#define FSE_AZ_EV_CODE_USER_EV 8 +#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 +#define FSE_AZ_EV_CODE_GLOBAL_EV 6 +#define FSE_AZ_EV_CODE_DRIVER_EV 5 +#define FSE_AZ_EV_CODE_TX_EV 2 +#define FSE_AZ_EV_CODE_RX_EV 0 +#define FSF_AZ_EV_DATA_LBN 0 +#define FSF_AZ_EV_DATA_WIDTH 60 +#define FSF_AZ_EV_DATA_DW0_LBN 0 +#define FSF_AZ_EV_DATA_DW0_WIDTH 32 +#define FSF_AZ_EV_DATA_DW1_LBN 32 +#define FSF_AZ_EV_DATA_DW1_WIDTH 28 + + +/* FS_GLOBAL_EV */ +#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12 +#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11 +#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1 +#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10 +#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1 +#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9 +#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1 +#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7 +#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1 + + +/* FS_RX_EV */ +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 +#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 +#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_OK_LBN 56 +#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 +#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 +#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 +#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 +#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 +#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 +#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 +#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 +#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 +#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 +#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0 +#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 +#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 +#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 +#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 +#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 +#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 +#define FSF_AZ_RX_EV_PORT_LBN 30 +#define FSF_AZ_RX_EV_PORT_WIDTH 1 +#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 +#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 +#define FSF_AZ_RX_EV_SOP_LBN 15 +#define FSF_AZ_RX_EV_SOP_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 + + +/* FS_RX_KER_DESC */ +#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 +#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 +#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 +#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32 +#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32 +#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14 + + +/* FS_RX_USER_DESC */ +#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 +#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 +#define FSF_AZ_RX_USER_BUF_ID_LBN 0 +#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 + + +/* FS_TX_EV */ +#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 +#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 +#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_TX_EV_PORT_LBN 16 +#define FSF_AZ_TX_EV_PORT_WIDTH 1 +#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 +#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_COMP_LBN 12 +#define FSF_AZ_TX_EV_COMP_WIDTH 1 +#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 + + +/* FS_TX_KER_DESC */ +#define FSF_AZ_TX_KER_CONT_LBN 62 +#define FSF_AZ_TX_KER_CONT_WIDTH 1 +#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 +#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 +#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 +#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32 +#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32 +#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14 + + +/* FS_TX_USER_DESC */ +#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 +#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 +#define FSF_AZ_TX_USER_CONT_LBN 46 +#define FSF_AZ_TX_USER_CONT_WIDTH 1 +#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 +#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 +#define FSF_AZ_TX_USER_BUF_ID_LBN 13 +#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 +#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 +#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 + + +/* FS_USER_EV */ +#define FSF_CZ_USER_QID_LBN 32 +#define FSF_CZ_USER_QID_WIDTH 10 +#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 +#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 + + +/* FS_NET_IVEC */ +#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 +#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 +#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 +#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 +#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 +#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 + + +/* DRIVER_EV */ +/* Sub-fields of an RX flush completion event */ +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + + + +/************************************************************************** + * + * Falcon non-volatile configuration + * + ************************************************************************** + */ + + +#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST + + +#ifdef __cplusplus +} +#endif + + + + +#endif /* _SYS_EFX_REGS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h new file mode 100644 index 000000000..81ac80192 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_ef10.h @@ -0,0 +1,727 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_EF10_REGS_H +#define _SYS_EFX_EF10_REGS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/************************************************************************** + * NOTE: the line below marks the start of the autogenerated section + * EF10 registers and descriptors + * + ************************************************************************** + */ + +/* + * BIU_HW_REV_ID_REG(32bit): + * + */ + +#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face + + +#define ERF_DZ_HW_REV_ID_LBN 0 +#define ERF_DZ_HW_REV_ID_WIDTH 32 + + +/* + * BIU_MC_SFT_STATUS_REG(32bit): + * + */ + +#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4 +#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8 +#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face + + +#define ERF_DZ_MC_SFT_STATUS_LBN 0 +#define ERF_DZ_MC_SFT_STATUS_WIDTH 32 + + +/* + * BIU_INT_ISR_REG(32bit): + * + */ + +#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0 + + +#define ERF_DZ_ISR_REG_LBN 0 +#define ERF_DZ_ISR_REG_WIDTH 32 + + +/* + * MC_DB_LWRD_REG(32bit): + * + */ + +#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0 + + +#define ERF_DZ_MC_DOORBELL_L_LBN 0 +#define ERF_DZ_MC_DOORBELL_L_WIDTH 32 + + +/* + * MC_DB_HWRD_REG(32bit): + * + */ + +#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0 + + +#define ERF_DZ_MC_DOORBELL_H_LBN 0 +#define ERF_DZ_MC_DOORBELL_H_WIDTH 32 + + +/* + * EVQ_RPTR_REG(32bit): + * + */ + +#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_EVQ_RPTR_REG_STEP 8192 +#define ER_DZ_EVQ_RPTR_REG_ROWS 2048 +#define ER_DZ_EVQ_RPTR_REG_RESET 0x0 + + +#define ERF_DZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_DZ_EVQ_RPTR_LBN 0 +#define ERF_DZ_EVQ_RPTR_WIDTH 15 + + +/* + * EVQ_RPTR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0 + + +#define ERF_FZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_FZ_EVQ_RPTR_LBN 0 +#define ERF_FZ_EVQ_RPTR_WIDTH 15 + + +/* + * EVQ_RPTR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */ +/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */ +/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */ +/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */ + + +/* + * EVQ_TMR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0 + + +#define ERF_FZ_TC_TMR_REL_VAL_LBN 16 +#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 +#define ERF_FZ_TC_TIMER_MODE_LBN 14 +#define ERF_FZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_FZ_TC_TIMER_VAL_LBN 0 +#define ERF_FZ_TC_TIMER_VAL_WIDTH 14 + + +/* + * EVQ_TMR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ +/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */ +/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */ +/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */ +/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */ + + +/* + * EVQ_TMR_REG(32bit): + * + */ + +#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_EVQ_TMR_REG_STEP 8192 +#define ER_DZ_EVQ_TMR_REG_ROWS 2048 +#define ER_DZ_EVQ_TMR_REG_RESET 0x0 + + +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ +#define ERF_DZ_TC_TIMER_MODE_LBN 14 +#define ERF_DZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_DZ_TC_TIMER_VAL_LBN 0 +#define ERF_DZ_TC_TIMER_VAL_WIDTH 14 + + +/* + * RX_DESC_UPD_REG_16K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0 + + +#define ERF_FZ_RX_DESC_WPTR_LBN 0 +#define ERF_FZ_RX_DESC_WPTR_WIDTH 12 + + +/* + * RX_DESC_UPD_REG(32bit): + * + */ + +#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_RX_DESC_UPD_REG_STEP 8192 +#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048 +#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0 + + +#define ERF_DZ_RX_DESC_WPTR_LBN 0 +#define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + + +/* + * RX_DESC_UPD_REG_64K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0 + + +/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */ +/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */ + + +/* + * TX_DESC_UPD_REG_64K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0 + + +#define ERF_FZ_RSVD_LBN 76 +#define ERF_FZ_RSVD_WIDTH 20 +#define ERF_FZ_TX_DESC_WPTR_LBN 64 +#define ERF_FZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_FZ_TX_DESC_HWORD_LBN 32 +#define ERF_FZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_FZ_TX_DESC_LWORD_LBN 0 +#define ERF_FZ_TX_DESC_LWORD_WIDTH 32 + + +/* + * TX_DESC_UPD_REG_16K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_RSVD_LBN 76; */ +/* defined as ERF_FZ_RSVD_WIDTH 20 */ +/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */ +/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */ +/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */ +/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */ +/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */ +/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */ + + +/* + * TX_DESC_UPD_REG(96bit): + * + */ + +#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10 +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ +#define ER_DZ_TX_DESC_UPD_REG_STEP 8192 +#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048 +#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0 + + +#define ERF_DZ_RSVD_LBN 76 +#define ERF_DZ_RSVD_WIDTH 20 +#define ERF_DZ_TX_DESC_WPTR_LBN 64 +#define ERF_DZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_DZ_TX_DESC_HWORD_LBN 32 +#define ERF_DZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_DZ_TX_DESC_LWORD_LBN 0 +#define ERF_DZ_TX_DESC_LWORD_WIDTH 32 + + +/* ES_DRIVER_EV */ +#define ESF_DZ_DRV_CODE_LBN 60 +#define ESF_DZ_DRV_CODE_WIDTH 4 +#define ESF_DZ_DRV_SUB_CODE_LBN 56 +#define ESF_DZ_DRV_SUB_CODE_WIDTH 4 +#define ESE_DZ_DRV_TIMER_EV 3 +#define ESE_DZ_DRV_START_UP_EV 2 +#define ESE_DZ_DRV_WAKE_UP_EV 1 +#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32 +#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32 +#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24 +#define ESF_DZ_DRV_SUB_DATA_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_WIDTH 56 +#define ESF_DZ_DRV_EVQ_ID_LBN 0 +#define ESF_DZ_DRV_EVQ_ID_WIDTH 14 +#define ESF_DZ_DRV_TMR_ID_LBN 0 +#define ESF_DZ_DRV_TMR_ID_WIDTH 14 + + +/* ES_EVENT_ENTRY */ +#define ESF_DZ_EV_CODE_LBN 60 +#define ESF_DZ_EV_CODE_WIDTH 4 +#define ESE_DZ_EV_CODE_MCDI_EV 12 +#define ESE_DZ_EV_CODE_DRIVER_EV 5 +#define ESE_DZ_EV_CODE_TX_EV 2 +#define ESE_DZ_EV_CODE_RX_EV 0 +#define ESE_DZ_OTHER other +#define ESF_DZ_EV_DATA_DW0_LBN 0 +#define ESF_DZ_EV_DATA_DW0_WIDTH 32 +#define ESF_DZ_EV_DATA_DW1_LBN 32 +#define ESF_DZ_EV_DATA_DW1_WIDTH 28 +#define ESF_DZ_EV_DATA_LBN 0 +#define ESF_DZ_EV_DATA_WIDTH 60 + + +/* ES_MC_EVENT */ +#define ESF_DZ_MC_CODE_LBN 60 +#define ESF_DZ_MC_CODE_WIDTH 4 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_MC_DROP_EVENT_LBN 58 +#define ESF_DZ_MC_DROP_EVENT_WIDTH 1 +#define ESF_DZ_MC_SOFT_DW0_LBN 0 +#define ESF_DZ_MC_SOFT_DW0_WIDTH 32 +#define ESF_DZ_MC_SOFT_DW1_LBN 32 +#define ESF_DZ_MC_SOFT_DW1_WIDTH 26 +#define ESF_DZ_MC_SOFT_LBN 0 +#define ESF_DZ_MC_SOFT_WIDTH 58 + + +/* ES_RX_EVENT */ +#define ESF_DZ_RX_CODE_LBN 60 +#define ESF_DZ_RX_CODE_WIDTH 4 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_RX_DROP_EVENT_LBN 58 +#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 +#define ESF_DD_RX_EV_RSVD2_LBN 54 +#define ESF_DD_RX_EV_RSVD2_WIDTH 4 +#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57 +#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56 +#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_RX_EV_RSVD2_LBN 54 +#define ESF_EZ_RX_EV_RSVD2_WIDTH 2 +#define ESF_DZ_RX_EV_SOFT2_LBN 52 +#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 +#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 +#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 +#define ESF_DE_RX_L4_CLASS_LBN 45 +#define ESF_DE_RX_L4_CLASS_WIDTH 3 +#define ESE_DE_L4_CLASS_RSVD7 7 +#define ESE_DE_L4_CLASS_RSVD6 6 +#define ESE_DE_L4_CLASS_RSVD5 5 +#define ESE_DE_L4_CLASS_RSVD4 4 +#define ESE_DE_L4_CLASS_RSVD3 3 +#define ESE_DE_L4_CLASS_UDP 2 +#define ESE_DE_L4_CLASS_TCP 1 +#define ESE_DE_L4_CLASS_UNKNOWN 0 +#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47 +#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1 +#define ESF_FZ_RX_L4_CLASS_LBN 45 +#define ESF_FZ_RX_L4_CLASS_WIDTH 2 +#define ESE_FZ_L4_CLASS_RSVD3 3 +#define ESE_FZ_L4_CLASS_UDP 2 +#define ESE_FZ_L4_CLASS_TCP 1 +#define ESE_FZ_L4_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_L3_CLASS_LBN 42 +#define ESF_DZ_RX_L3_CLASS_WIDTH 3 +#define ESE_DZ_L3_CLASS_RSVD7 7 +#define ESE_DZ_L3_CLASS_IP6_FRAG 6 +#define ESE_DZ_L3_CLASS_ARP 5 +#define ESE_DZ_L3_CLASS_IP4_FRAG 4 +#define ESE_DZ_L3_CLASS_FCOE 3 +#define ESE_DZ_L3_CLASS_IP6 2 +#define ESE_DZ_L3_CLASS_IP4 1 +#define ESE_DZ_L3_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39 +#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3 +#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7 +#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6 +#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5 +#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4 +#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3 +#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2 +#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1 +#define ESE_DZ_ETH_TAG_CLASS_NONE 0 +#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36 +#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3 +#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2 +#define ESE_DZ_ETH_BASE_CLASS_LLC 1 +#define ESE_DZ_ETH_BASE_CLASS_ETH2 0 +#define ESF_DZ_RX_MAC_CLASS_LBN 35 +#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 +#define ESE_DZ_MAC_CLASS_MCAST 1 +#define ESE_DZ_MAC_CLASS_UCAST 0 +#define ESF_DD_RX_EV_SOFT1_LBN 32 +#define ESF_DD_RX_EV_SOFT1_WIDTH 3 +#define ESF_EZ_RX_EV_SOFT1_LBN 34 +#define ESF_EZ_RX_EV_SOFT1_WIDTH 1 +#define ESF_EZ_RX_ENCAP_HDR_LBN 32 +#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2 +#define ESE_EZ_ENCAP_HDR_GRE 2 +#define ESE_EZ_ENCAP_HDR_VXLAN 1 +#define ESE_EZ_ENCAP_HDR_NONE 0 +#define ESF_DD_RX_EV_RSVD1_LBN 30 +#define ESF_DD_RX_EV_RSVD1_WIDTH 2 +#define ESF_EZ_RX_EV_RSVD1_LBN 31 +#define ESF_EZ_RX_EV_RSVD1_WIDTH 1 +#define ESF_EZ_RX_ABORT_LBN 30 +#define ESF_EZ_RX_ABORT_WIDTH 1 +#define ESF_DZ_RX_ECC_ERR_LBN 29 +#define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_TRUNC_ERR_LBN 29 +#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC1_ERR_LBN 28 +#define ESF_DZ_RX_CRC1_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC0_ERR_LBN 27 +#define ESF_DZ_RX_CRC0_ERR_WIDTH 1 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25 +#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_ECRC_ERR_LBN 24 +#define ESF_DZ_RX_ECRC_ERR_WIDTH 1 +#define ESF_DZ_RX_QLABEL_LBN 16 +#define ESF_DZ_RX_QLABEL_WIDTH 5 +#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15 +#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1 +#define ESF_DZ_RX_CONT_LBN 14 +#define ESF_DZ_RX_CONT_WIDTH 1 +#define ESF_DZ_RX_BYTES_LBN 0 +#define ESF_DZ_RX_BYTES_WIDTH 14 + + +/* ES_RX_KER_DESC */ +#define ESF_DZ_RX_KER_RESERVED_LBN 62 +#define ESF_DZ_RX_KER_RESERVED_WIDTH 2 +#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32 +#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32 +#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16 +#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 + + +/* ES_TX_CSUM_TSTAMP_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8 +#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1 +#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7 +#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6 +#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1 +#define ESF_DZ_TX_TIMESTAMP_LBN 5 +#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 +#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 +#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2 +#define ESE_DZ_TX_OPTION_CRC_FCOE 1 +#define ESE_DZ_TX_OPTION_CRC_OFF 0 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0 +#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1 + + +/* ES_TX_EVENT */ +#define ESF_DZ_TX_CODE_LBN 60 +#define ESF_DZ_TX_CODE_WIDTH 4 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_TX_DROP_EVENT_LBN 58 +#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 +#define ESF_DD_TX_EV_RSVD_LBN 48 +#define ESF_DD_TX_EV_RSVD_WIDTH 10 +#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57 +#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56 +#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_TX_EV_RSVD_LBN 48 +#define ESF_EZ_TX_EV_RSVD_WIDTH 8 +#define ESF_DZ_TX_SOFT2_LBN 32 +#define ESF_DZ_TX_SOFT2_WIDTH 16 +#define ESF_DD_TX_SOFT1_LBN 24 +#define ESF_DD_TX_SOFT1_WIDTH 8 +#define ESF_EZ_TX_CAN_MERGE_LBN 31 +#define ESF_EZ_TX_CAN_MERGE_WIDTH 1 +#define ESF_EZ_TX_SOFT1_LBN 24 +#define ESF_EZ_TX_SOFT1_WIDTH 7 +#define ESF_DZ_TX_QLABEL_LBN 16 +#define ESF_DZ_TX_QLABEL_WIDTH 5 +#define ESF_DZ_TX_DESCR_INDX_LBN 0 +#define ESF_DZ_TX_DESCR_INDX_WIDTH 16 + + +/* ES_TX_KER_DESC */ +#define ESF_DZ_TX_KER_TYPE_LBN 63 +#define ESF_DZ_TX_KER_TYPE_WIDTH 1 +#define ESF_DZ_TX_KER_CONT_LBN 62 +#define ESF_DZ_TX_KER_CONT_WIDTH 1 +#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32 +#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32 +#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16 +#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48 + + +/* ES_TX_PIO_DESC */ +#define ESF_DZ_TX_PIO_TYPE_LBN 63 +#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 +#define ESF_DZ_TX_PIO_OPT_LBN 60 +#define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESF_DZ_TX_PIO_CONT_LBN 59 +#define ESF_DZ_TX_PIO_CONT_WIDTH 1 +#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 +#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12 +#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12 + + +/* ES_TX_TSO_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 +#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + + +/* ES_TX_TSO_V2_DESC_A */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + + +/* ES_TX_TSO_V2_DESC_B */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32 +#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16 +#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0 +#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16 + + +/* ES_TX_VLAN_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_VLAN_OP_LBN 32 +#define ESF_DZ_TX_VLAN_OP_WIDTH 2 +#define ESF_DZ_TX_VLAN_TAG2_LBN 16 +#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16 +#define ESF_DZ_TX_VLAN_TAG1_LBN 0 +#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16 + + +/************************************************************************* + * NOTE: the comment line above marks the end of the autogenerated section + */ + +/* + * The workaround for bug 35388 requires multiplexing writes through + * the ERF_DZ_TX_DESC_WPTR address. + * TX_DESC_UPD: 0ppppppppppp (bit 11 lost) + * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits) + * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost) + */ +#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4) +#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP +#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8 +#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9 +#define ERF_DD_EVQ_IND_RPTR_LBN 0 +#define ERF_DD_EVQ_IND_RPTR_WIDTH 8 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2 +#define EFE_DD_EVQ_IND_TIMER_FLAGS 3 +#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8 +#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2 +#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0 +#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8 + +/* Packed stream magic doorbell command */ +#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11 +#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1 + +#define ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8 +#define ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3 +#define ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0 + +#define ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0 +#define ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8 + +/* Packed stream RX packet prefix */ +#define ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0 +#define ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32 +#define ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32 +#define ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16 +#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48 +#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16 + +/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */ +#define ES_EZ_ESSB_RX_PREFIX_LEN 8 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32 +#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32 + +/* + * An extra flag for the packed stream mode, + * signalling the start of a new buffer + */ +#define ESF_DZ_RX_EV_ROTATE_LBN 53 +#define ESF_DZ_RX_EV_ROTATE_WIDTH 1 + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_EF10_REGS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h new file mode 100644 index 000000000..ffb9a9b02 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi.h @@ -0,0 +1,20757 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2008-2019 Solarflare Communications Inc. + */ + +/* + * This file is automatically generated. DO NOT EDIT IT. + * To make changes, edit the .yml files in sfregistry under doc/mcdi/ and + * rebuild this file with "make -C doc mcdiheaders". + */ + +#ifndef _SIENA_MC_DRIVER_PCOL_H +#define _SIENA_MC_DRIVER_PCOL_H + + +/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ +/* Power-on reset state */ +#define MC_FW_STATE_POR (1) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. */ +#define MC_FW_WARM_BOOT_OK (2) +/* The MC main image has started to boot. */ +#define MC_FW_STATE_BOOTING (4) +/* The Scheduler has started. */ +#define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) + +/* Siena MC shared memmory offsets */ +/* The 'doorbell' addresses are hard-wired to alert the MC when written */ +#define MC_SMEM_P0_DOORBELL_OFST 0x000 +#define MC_SMEM_P1_DOORBELL_OFST 0x004 +/* The rest of these are firmware-defined */ +#define MC_SMEM_P0_PDU_OFST 0x008 +#define MC_SMEM_P1_PDU_OFST 0x108 +#define MC_SMEM_PDU_LEN 0x100 +#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0 +#define MC_SMEM_P0_STATUS_OFST 0x7f8 +#define MC_SMEM_P1_STATUS_OFST 0x7fc + +/* Values to be written to the per-port status dword in shared + * memory on reboot and assert */ +#define MC_STATUS_DWORD_REBOOT (0xb007b007) +#define MC_STATUS_DWORD_ASSERT (0xdeaddead) + +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#ifdef WITH_MCDI_V2 +#define MCDI_PCOL_VERSION 2 +#else +#define MCDI_PCOL_VERSION 1 +#endif + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes it's request into MC shared memory, and rings the + * doorbell. Each request is completed by either by the MC writting + * back into shared memory, or by writting out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some response's may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request, a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#ifdef WITH_MCDI_V2 +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 +#else +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1 +#endif + + +/* The MC can generate events for two reasons: + * - To advance a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + + +#define MC_CMD_ERR_CODE_OFST 0 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 + +/* We define 8 "escape" commands to allow + for command number space extension */ + +#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78 +#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79 +#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A +#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B +#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C +#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D +#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E +#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F + +/* Vectors in the boot ROM */ +/* Point to the copycode entry point. */ +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) + +/* The command set exported by the boot ROM (MCDI v0) */ +#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ + (1 << MC_CMD_READ32) | \ + (1 << MC_CMD_WRITE32) | \ + (1 << MC_CMD_COPYCODE) | \ + (1 << MC_CMD_GET_VERSION), \ + 0, 0, 0 } + +#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ + (MC_CMD_SENSOR_ENTRY_OFST + (_x)) + +#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + + +#ifdef WITH_MCDI_V2 + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +#endif + +/* MC_CMD_ERR enum: Public MCDI error codes. Error codes that correspond to + * POSIX errnos should use the same numeric values that linux does. Error codes + * specific to Solarflare firmware should use values in the range 0x1000 - + * 0x10ff. The range 0x2000 - 0x20ff is reserved for private error codes (see + * MC_CMD_ERR_PRIV below). + */ +/* enum: Operation not permitted. */ +#define MC_CMD_ERR_EPERM 0x1 +/* enum: Non-existent command target */ +#define MC_CMD_ERR_ENOENT 0x2 +/* enum: assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 0x4 +/* enum: I/O failure */ +#define MC_CMD_ERR_EIO 0x5 +/* enum: Already exists */ +#define MC_CMD_ERR_EEXIST 0x6 +/* enum: Try again */ +#define MC_CMD_ERR_EAGAIN 0xb +/* enum: Out of memory */ +#define MC_CMD_ERR_ENOMEM 0xc +/* enum: Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 0xd +/* enum: Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 0x10 +/* enum: No such device */ +#define MC_CMD_ERR_ENODEV 0x13 +/* enum: Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 0x16 +/* enum: No space */ +#define MC_CMD_ERR_ENOSPC 0x1c +/* enum: Read-only */ +#define MC_CMD_ERR_EROFS 0x1e +/* enum: Broken pipe */ +#define MC_CMD_ERR_EPIPE 0x20 +/* enum: Out of range */ +#define MC_CMD_ERR_ERANGE 0x22 +/* enum: Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 0x23 +/* enum: Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 0x26 +/* enum: Operation timed out */ +#define MC_CMD_ERR_ETIME 0x3e +/* enum: Link has been severed */ +#define MC_CMD_ERR_ENOLINK 0x43 +/* enum: Protocol error */ +#define MC_CMD_ERR_EPROTO 0x47 +/* enum: Bad message */ +#define MC_CMD_ERR_EBADMSG 0x4a +/* enum: Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 0x5f +/* enum: Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 0x63 +/* enum: Not connected */ +#define MC_CMD_ERR_ENOTCONN 0x6b +/* enum: Operation already in progress */ +#define MC_CMD_ERR_EALREADY 0x72 +/* enum: Stale handle. The handle references a resource that no longer exists. + */ +#define MC_CMD_ERR_ESTALE 0x74 +/* enum: Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* enum: V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* enum: EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* enum: V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* enum: Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* enum: Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* enum: Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* enum: Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* enum: Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* enum: MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* enum: Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* enum: The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* enum: The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* enum: The requested operation might require the command to be passed between + * MCs, and thetransport doesn't support that. Should only ever been seen over + * the UART. + */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* enum: VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* enum: No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* enum: Notifies the driver that the request has been relayed to an admin + * function for authorization. The driver should wait for a PROXY_RESPONSE + * event and then resend its request. This error code is followed by a 32-bit + * handle that helps matching it with the respective PROXY_RESPONSE event. + */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +/* enum: The request cannot be passed for authorization because another request + * from the same function is currently being authorized. The drvier should try + * again later. + */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* enum: Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that doesn't + * await an authorization. + */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* enum: This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. Normally it is + * translated to EPERM by send_cmd_err(), but it may also be used to trigger + * some special mechanism for handling such case, e.g. to relay the failed + * request to a designated admin function for authorization. + */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* enum: Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as + * sub-variant switching. + */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* enum: The clock whose frequency you've attempted to set set doesn't exist on + * this NIC + */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 +/* enum: Returned by MC_CMD_TESTASSERT if the action that should have caused an + * assertion failed to do so. + */ +#define MC_CMD_ERR_UNREACHABLE 0x1016 +/* enum: This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. + */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* enum: The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. + */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* enum: The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary + */ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* enum: The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* enum: The operation could not complete because some PIO buffers are + * allocated + */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b + +/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10 + * platforms + */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LINKCHANGE_SPEED */ +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f +/* enum: Link change. This event is sent instead of LINKCHANGE if + * WANT_V2_LINKCHANGES was set on driver attach. + */ +#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20 +/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach + * when the local device capabilities changes. This will usually correspond to + * a module change. + */ +#define MCDI_EVENT_CODE_MODULECHANGE 0x21 +/* enum: Notification that the sensors have been added and/or removed from the + * sensor table. This event includes the new sensor table generation count, if + * this does not match the driver's local copy it is expected to call + * DYNAMIC_SENSORS_LIST to refresh it. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22 +/* enum: Notification that a sensor has changed state as a result of a reading + * crossing a threshold. This is sent as two events, the first event contains + * the handle and the sensor's state (in the SRC field), and the second + * contains the value. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23 +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32 +#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32 +/* The new generation count after a sensor has been added or deleted. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32 +/* The handle of a dynamic sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32 +/* The current values of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32 +/* The current state of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36 +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 + +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + + +/***********************************/ +/* MC_CMD_READ32 + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_READ32 0x1 +#undef MC_CMD_0x1_PRIVILEGE_CTG + +#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ32_IN msgrequest */ +#define MC_CMD_READ32_IN_LEN 8 +#define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 +#define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_READ32_OUT msgresponse */ +#define MC_CMD_READ32_OUT_LENMIN 4 +#define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_WRITE32 + * Write multiple 32byte words to MC memory. + */ +#define MC_CMD_WRITE32 0x2 +#undef MC_CMD_0x2_PRIVILEGE_CTG + +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_WRITE32_IN msgrequest */ +#define MC_CMD_WRITE32_IN_LENMIN 8 +#define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4) +#define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_OFST 4 +#define MC_CMD_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254 + +/* MC_CMD_WRITE32_OUT msgresponse */ +#define MC_CMD_WRITE32_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_COPYCODE + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_COPYCODE 0x3 +#undef MC_CMD_0x3_PRIVILEGE_CTG + +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_COPYCODE_IN msgrequest */ +#define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. + */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 +/* Destination address */ +#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 +#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 +/* Address of where to jump after copy. */ +#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 + +/* MC_CMD_COPYCODE_OUT msgresponse */ +#define MC_CMD_COPYCODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_FUNC + * Select function for function-specific commands. + */ +#define MC_CMD_SET_FUNC 0x4 +#undef MC_CMD_0x4_PRIVILEGE_CTG + +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_FUNC_IN msgrequest */ +#define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ +#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 + +/* MC_CMD_SET_FUNC_OUT msgresponse */ +#define MC_CMD_SET_FUNC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. + */ +#define MC_CMD_GET_BOOT_STATUS 0x5 +#undef MC_CMD_0x5_PRIVILEGE_CTG + +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ +#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 + +/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ +#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_ASSERTS + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS + */ +#define MC_CMD_GET_ASSERTS 0x6 +#undef MC_CMD_0x6_PRIVILEGE_CTG + +#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ASSERTS_IN msgrequest */ +#define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ +#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT msgresponse */ +#define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs + * found on Riverhead designs + */ +#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26 + +/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted + * firmware version information + */ +#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20 +/* MC firmware build date (as Unix timestamp) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264 +/* MC firmware version number */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272 +/* MC firmware security level */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4 +/* MC firmware extra version info (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64 + + +/***********************************/ +/* MC_CMD_LOG_CTRL + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions + */ +#define MC_CMD_LOG_CTRL 0x7 +#undef MC_CMD_0x7_PRIVILEGE_CTG + +#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LOG_CTRL_IN msgrequest */ +#define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 + +/* MC_CMD_LOG_CTRL_OUT msgresponse */ +#define MC_CMD_LOG_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VERSION + * Get version information about adapter components. + */ +#define MC_CMD_GET_VERSION 0x8 +#undef MC_CMD_0x8_PRIVILEGE_CTG + +#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VERSION_IN msgrequest */ +#define MC_CMD_GET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ +#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 + +/* MC_CMD_GET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_VERSION_OUT_LEN 32 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 + +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 + +/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64 + + +/***********************************/ +/* MC_CMD_PTP + * Perform PTP operation + */ +#define MC_CMD_PTP 0xb +#undef MC_CMD_0xb_PRIVILEGE_CTG + +#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PTP_IN msgrequest */ +#define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ +#define MC_CMD_PTP_IN_OP_OFST 0 +#define MC_CMD_PTP_IN_OP_LEN 1 +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change the frequency correction applied to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. Not implemented. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. Siena PTP adapters only. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x1c + +/* MC_CMD_PTP_IN_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_ENABLE_LEN 16 +#define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 +#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ +#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 + +/* MC_CMD_PTP_IN_DISABLE msgrequest */ +#define MC_CMD_PTP_IN_DISABLE_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ +#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Transmit packet length */ +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 +/* Transmit packet data */ +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_STATUS msgrequest */ +#define MC_CMD_PTP_IN_STATUS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of time readings to capture */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16 + +/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Enable or disable packet testing */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ +#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_DEBUG msgrequest */ +#define MC_CMD_PTP_IN_DEBUG_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Debug operations */ +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 + +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 + +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Original field containing queue ID. Now extended to include flags. */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 + +/* MC_CMD_PTP_OUT msgresponse */ +#define MC_CMD_PTP_OUT_LEN 0 + +/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ +#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_OUT_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 +/* Number of packets transmitted and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 +/* Number of packets received and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 +/* Number of packets timestamped by the FPGA */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 +/* Number of packets filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 +/* Number of packets not filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 +/* Number of PPS overflows (noise on input?) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 +/* Number of PPS bad periods */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 +/* Minimum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 +/* Maximum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 +/* Last period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 +/* Mean period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 +/* Minimum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 +/* Maximum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 +/* Last offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 +/* Mean offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 + +/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20) +/* A set of host and NIC times */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51 +/* Host time immediately before NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 +/* Host time immediately after NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 +/* Number of nanoseconds waited after reading NIC's hardware clock */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe +/* Presence of external oscillator */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 +/* Number of packets received by FPGA */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 +/* Number of packets received by Siena filters */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 + +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020 + +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + + +/***********************************/ +/* MC_CMD_CSR_READ32 + * Read 32bit words from the indirect memory map. + */ +#define MC_CMD_CSR_READ32 0xc +#undef MC_CMD_0xc_PRIVILEGE_CTG + +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_READ32_IN msgrequest */ +#define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ +#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_CSR_READ32_OUT msgresponse */ +#define MC_CMD_CSR_READ32_OUT_LENMIN 4 +#define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +/* The last dword is the status, not a value read */ +#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_CSR_WRITE32 + * Write 32bit dwords to the indirect memory map. + */ +#define MC_CMD_CSR_WRITE32 0xd +#undef MC_CMD_0xd_PRIVILEGE_CTG + +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_WRITE32_IN msgrequest */ +#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 +#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) +/* Address */ +#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 + +/* MC_CMD_CSR_WRITE32_OUT msgresponse */ +#define MC_CMD_CSR_WRITE32_OUT_LEN 4 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 +#undef MC_CMD_0x54_PRIVILEGE_CTG + +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ +/* MC_CMD_STACKINFO + * Get stack information. + */ +#define MC_CMD_STACKINFO 0xf +#undef MC_CMD_0xf_PRIVILEGE_CTG + +#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_STACKINFO_IN msgrequest */ +#define MC_CMD_STACKINFO_IN_LEN 0 + +/* MC_CMD_STACKINFO_OUT msgresponse */ +#define MC_CMD_STACKINFO_OUT_LENMIN 12 +#define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) +/* (thread ptr, stack size, free space) for each thread in system */ +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_MDIO_READ + * MDIO register read. + */ +#define MC_CMD_MDIO_READ 0x10 +#undef MC_CMD_0x10_PRIVILEGE_CTG + +#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MDIO_READ_IN msgrequest */ +#define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ +#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ +#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 + +/* MC_CMD_MDIO_READ_OUT msgresponse */ +#define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ +#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 + + +/***********************************/ +/* MC_CMD_MDIO_WRITE + * MDIO register write. + */ +#define MC_CMD_MDIO_WRITE 0x11 +#undef MC_CMD_0x11_PRIVILEGE_CTG + +#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MDIO_WRITE_IN msgrequest */ +#define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 +/* enum: Internal. */ +/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ +/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ +#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +/* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ +#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 + +/* MC_CMD_MDIO_WRITE_OUT msgresponse */ +#define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 +/* enum: Good. */ +/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ + + +/***********************************/ +/* MC_CMD_DBI_WRITE + * Write DBI register(s). + */ +#define MC_CMD_DBI_WRITE 0x12 +#undef MC_CMD_0x12_PRIVILEGE_CTG + +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_WRITE_IN msgrequest */ +#define MC_CMD_DBI_WRITE_IN_LENMIN 12 +#define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ +#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 + +/* MC_CMD_DBI_WRITE_OUT msgresponse */ +#define MC_CMD_DBI_WRITE_OUT_LEN 0 + +/* MC_CMD_DBIWROP_TYPEDEF structuredef */ +#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PORT_READ32 + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ32 0x14 + +/* MC_CMD_PORT_READ32_IN msgrequest */ +#define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ32_OUT msgresponse */ +#define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ +#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 +/* Status */ +#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE32 + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE32 0x15 + +/* MC_CMD_PORT_WRITE32_IN msgrequest */ +#define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ +#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 + +/* MC_CMD_PORT_WRITE32_OUT msgresponse */ +#define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_READ128 + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ128 0x16 + +/* MC_CMD_PORT_READ128_IN msgrequest */ +#define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ128_OUT msgresponse */ +#define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ +#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ +#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE128 + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE128 0x17 + +/* MC_CMD_PORT_WRITE128_IN msgrequest */ +#define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ +#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 + +/* MC_CMD_PORT_WRITE128_OUT msgresponse */ +#define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 + +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + + +/***********************************/ +/* MC_CMD_GET_BOARD_CFG + * Returns the MC firmware configuration structure. + */ +#define MC_CMD_GET_BOARD_CFG 0x18 +#undef MC_CMD_0x18_PRIVILEGE_CTG + +#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOARD_CFG_IN msgrequest */ +#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 + +/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ +#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2) +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_DBI_READX + * Read DBI register(s) -- extended functionality + */ +#define MC_CMD_DBI_READX 0x19 +#undef MC_CMD_0x19_PRIVILEGE_CTG + +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_READX_IN msgrequest */ +#define MC_CMD_DBI_READX_IN_LENMIN 8 +#define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 +#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) +/* Each Read op consists of an address (offset 0), VF/CS2) */ +#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 + +/* MC_CMD_DBI_READX_OUT msgresponse */ +#define MC_CMD_DBI_READX_OUT_LENMIN 4 +#define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) +/* Value */ +#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 +#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 +#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 + +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_RAND_SEED + * Set the 16byte seed for the MC pseudo-random generator. + */ +#define MC_CMD_SET_RAND_SEED 0x1a +#undef MC_CMD_0x1a_PRIVILEGE_CTG + +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_RAND_SEED_IN msgrequest */ +#define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ +#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 +#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 + +/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ +#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LTSSM_HIST + * Retrieve the history of the LTSSM, if the build supports it. + */ +#define MC_CMD_LTSSM_HIST 0x1b + +/* MC_CMD_LTSSM_HIST_IN msgrequest */ +#define MC_CMD_LTSSM_HIST_IN_LEN 0 + +/* MC_CMD_LTSSM_HIST_OUT msgresponse */ +#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ +#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG + +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +#define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff + +/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver + * version + */ +#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4 +/* MC_CMD_DRV_ATTACH_LBN 0 */ +/* MC_CMD_DRV_ATTACH_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1 +/* MC_CMD_DRV_PREBOOT_LBN 1 */ +/* MC_CMD_DRV_PREBOOT_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +/* MC_CMD_FW_FULL_FEATURED 0x0 */ +/* enum: Prefer to use firmware with fewer features but lower latency */ +/* MC_CMD_FW_LOW_LATENCY 0x1 */ +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +/* MC_CMD_FW_PACKED_STREAM 0x2 */ +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +/* MC_CMD_FW_HIGH_TX_RATE 0x3 */ +/* enum: Reserved value */ +/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */ +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +/* MC_CMD_FW_RULES_ENGINE 0x5 */ +/* enum: Prefer to use firmware with additional DPDK support */ +/* MC_CMD_FW_DPDK 0x6 */ +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +/* MC_CMD_FW_L3XUDP 0x7 */ +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */ +/* enum: Only this option is allowed for non-admin functions */ +/* MC_CMD_FW_DONT_CARE 0xffffffff */ +/* Version of the driver to be reported by management protocols (e.g. NC-SI) + * handled by the NIC. This is a zero-terminated ASCII string. + */ +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12 +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20 + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 +/* enum: Used during development only. Should no longer be used. */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5 +/* enum: If set, indicates that TX only spreading is enabled. Even-numbered + * TXQs will use one engine, and odd-numbered TXQs will use the other. This + * also has the effect that only even-numbered RXQs will receive traffic. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 + + +/***********************************/ +/* MC_CMD_SHMUART + * Route UART output to circular buffer in shared memory instead. + */ +#define MC_CMD_SHMUART 0x1f + +/* MC_CMD_SHMUART_IN msgrequest */ +#define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ +#define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 + +/* MC_CMD_SHMUART_OUT msgresponse */ +#define MC_CMD_SHMUART_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 +#undef MC_CMD_0x20_PRIVILEGE_CTG + +#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_CREDITS + * Read instantaneous and minimum flow control thresholds. + */ +#define MC_CMD_PCIE_CREDITS 0x21 + +/* MC_CMD_PCIE_CREDITS_IN msgrequest */ +#define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 +/* wipe statistics */ +#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 + +/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ +#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 + + +/***********************************/ +/* MC_CMD_RXD_MONITOR + * Get histogram of RX queue fill level. + */ +#define MC_CMD_RXD_MONITOR 0x22 + +/* MC_CMD_RXD_MONITOR_IN msgrequest */ +#define MC_CMD_RXD_MONITOR_IN_LEN 12 +#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 + +/* MC_CMD_RXD_MONITOR_OUT msgresponse */ +#define MC_CMD_RXD_MONITOR_OUT_LEN 80 +#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 + + +/***********************************/ +/* MC_CMD_PUTS + * Copy the given ASCII string out onto UART and/or out of the network port. + */ +#define MC_CMD_PUTS 0x23 +#undef MC_CMD_0x23_PRIVILEGE_CTG + +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_PUTS_IN msgrequest */ +#define MC_CMD_PUTS_IN_LENMIN 13 +#define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1) +#define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 +#define MC_CMD_PUTS_IN_UART_LBN 0 +#define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_LBN 1 +#define MC_CMD_PUTS_IN_PORT_WIDTH 1 +#define MC_CMD_PUTS_IN_DHOST_OFST 4 +#define MC_CMD_PUTS_IN_DHOST_LEN 6 +#define MC_CMD_PUTS_IN_STRING_OFST 12 +#define MC_CMD_PUTS_IN_STRING_LEN 1 +#define MC_CMD_PUTS_IN_STRING_MINNUM 1 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 +#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008 + +/* MC_CMD_PUTS_OUT msgresponse */ +#define MC_CMD_PUTS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG + +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_START_BIST + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) + */ +#define MC_CMD_START_BIST 0x25 +#undef MC_CMD_0x25_PRIVILEGE_CTG + +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_START_BIST_IN msgrequest */ +#define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ +#define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 + +/* MC_CMD_START_BIST_OUT msgresponse */ +#define MC_CMD_START_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_BIST + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). + */ +#define MC_CMD_POLL_BIST 0x26 +#undef MC_CMD_0x26_PRIVILEGE_CTG + +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_POLL_BIST_IN msgrequest */ +#define MC_CMD_POLL_BIST_IN_LEN 0 + +/* MC_CMD_POLL_BIST_OUT msgresponse */ +#define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ +#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 + +/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 +/* Status of each channel A */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel C */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel D */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ + +/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX0 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 + + +/***********************************/ +/* MC_CMD_FLUSH_RX_QUEUES + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. + */ +#define MC_CMD_FLUSH_RX_QUEUES 0x27 + +/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 + +/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ +#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LOOPBACK_MODES + * Returns a bitmask of loopback modes available at each speed. + */ +#define MC_CMD_GET_LOOPBACK_MODES 0x28 +#undef MC_CMD_0x28_PRIVILEGE_CTG + +#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 + +/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ + +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_LINK + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME, EAGAIN + */ +#define MC_CMD_SET_LINK 0x2a +#undef MC_CMD_0x2a_PRIVILEGE_CTG + +#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_LINK_IN msgrequest */ +#define MC_CMD_SET_LINK_IN_LEN 16 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 + +/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence + * number to ensure this SET_LINK command corresponds to the latest + * MODULECHANGE event. + */ +#define MC_CMD_SET_LINK_IN_V2_LEN 17 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 + +/* MC_CMD_SET_LINK_OUT msgresponse */ +#define MC_CMD_SET_LINK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_ID_LED + * Set identification LED state. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_ID_LED 0x2b +#undef MC_CMD_0x2b_PRIVILEGE_CTG + +#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_ID_LED_IN msgrequest */ +#define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ +#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ + +/* MC_CMD_SET_ID_LED_OUT msgresponse */ +#define MC_CMD_SET_ID_LED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG + +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 28 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 + + +/***********************************/ +/* MC_CMD_PHY_STATS + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME + */ +#define MC_CMD_PHY_STATS 0x2d +#undef MC_CMD_0x2d_PRIVILEGE_CTG + +#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_PHY_STATS_IN msgrequest */ +#define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3) +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 + + +/***********************************/ +/* MC_CMD_MAC_STATS + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME + */ +#define MC_CMD_MAC_STATS 0x2e +#undef MC_CMD_0x2e_PRIVILEGE_CTG + +#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATS_IN msgrequest */ +#define MC_CMD_MAC_STATS_IN_LEN 20 +/* ??? */ +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 + +/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ + + +/***********************************/ +/* MC_CMD_SRIOV + * to be documented + */ +#define MC_CMD_SRIOV 0x30 + +/* MC_CMD_SRIOV_IN msgrequest */ +#define MC_CMD_SRIOV_IN_LEN 12 +#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 +#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 +#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 + +/* MC_CMD_SRIOV_OUT msgresponse */ +#define MC_CMD_SRIOV_OUT_LEN 8 +#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 + +/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MEMCPY + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) + */ +#define MC_CMD_MEMCPY 0x31 + +/* MC_CMD_MEMCPY_IN msgrequest */ +#define MC_CMD_MEMCPY_IN_LENMIN 32 +#define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 +#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ +#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 +#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 +#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 + +/* MC_CMD_MEMCPY_OUT msgresponse */ +#define MC_CMD_MEMCPY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_SET + * Set a WoL filter. + */ +#define MC_CMD_WOL_FILTER_SET 0x32 +#undef MC_CMD_0x32_PRIVILEGE_CTG + +#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_SET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 + +/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1 + +/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 + +/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_REMOVE + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS + */ +#define MC_CMD_WOL_FILTER_REMOVE 0x33 +#undef MC_CMD_0x33_PRIVILEGE_CTG + +#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ +#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 + +/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_RESET + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS + */ +#define MC_CMD_WOL_FILTER_RESET 0x34 +#undef MC_CMD_0x34_PRIVILEGE_CTG + +#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ +#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ + +/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MCAST_HASH + * Set the MCAST hash value without otherwise reconfiguring the MAC + */ +#define MC_CMD_SET_MCAST_HASH 0x35 + +/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ +#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 + +/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ +#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_TYPES + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 + */ +#define MC_CMD_NVRAM_TYPES 0x36 +#undef MC_CMD_0x36_PRIVILEGE_CTG + +#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TYPES_IN msgrequest */ +#define MC_CMD_NVRAM_TYPES_IN_LEN 0 + +/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ +#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 + + +/***********************************/ +/* MC_CMD_NVRAM_INFO + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). + */ +#define MC_CMD_NVRAM_INFO 0x37 +#undef MC_CMD_0x37_PRIVILEGE_CTG + +#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_INFO_IN msgrequest */ +#define MC_CMD_NVRAM_INFO_IN_LEN 4 +#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_INFO_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_OUT_LEN 24 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3 +#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 + +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_START + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_START 0x38 +#undef MC_CMD_0x38_PRIVILEGE_CTG + +#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request. + * Use NVRAM_UPDATE_START_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START + * request with additional flags indicating version of command in use. See + * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use + * paired up with NVRAM_UPDATE_FINISH_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_READ + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_READ 0x39 +#undef MC_CMD_0x39_PRIVILEGE_CTG + +#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_READ_IN msgrequest */ +#define MC_CMD_NVRAM_READ_IN_LEN 12 +#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + +/* MC_CMD_NVRAM_READ_OUT msgresponse */ +#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_NVRAM_WRITE + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_WRITE 0x3a +#undef MC_CMD_0x3a_PRIVILEGE_CTG + +#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_WRITE_IN msgrequest */ +#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1) +#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ +#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_ERASE + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_ERASE 0x3b +#undef MC_CMD_0x3b_PRIVILEGE_CTG + +#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_ERASE_IN msgrequest */ +#define MC_CMD_NVRAM_ERASE_IN_LEN 12 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_ERASE_OUT msgresponse */ +#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_FINISH + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#undef MC_CMD_0x3c_PRIVILEGE_CTG + +#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH + * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH + * request with additional flags indicating version of NVRAM_UPDATE commands in + * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended + * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH + * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse: + * + * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure + * firmware validation where applicable back to the host. + * + * Medford only: For signed firmware images, such as those for medford, the MC + * firmware verifies the signature before marking the firmware image as valid. + * This process takes a few seconds to complete. So is likely to take more than + * the MCDI timeout. Hence signature verification is initiated when + * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the + * MCDI command is run in a background MCDI processing thread. This response + * payload includes the results of the signature verification. Note that the + * per-partition nvram lock in firmware is only released after the verification + * has completed. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 +/* enum: Verify succeeded without any errors. */ +#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 +/* enum: CMS format verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 +/* enum: Invalid CMS format in image metadata. */ +#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 +/* enum: Message digest verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 +/* enum: Error in message digest calculated over the reflash-header, payload + * and reflash-trailer. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 +/* enum: Signature verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 +/* enum: There are no valid signatures in the image. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 +/* enum: Trusted approvers verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 +/* enum: The Trusted approver's list is empty. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 +/* enum: Signature chain verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa +/* enum: The signers of the signatures in the image are not listed in the + * Trusted approver's list. + */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe +/* enum: Internal-error. The bundle header is invalid. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 +/* enum: Internal-error. Component hash calculation failed. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 +/* enum: The update operation is in-progress. */ +#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG + +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SCHEDINFO + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. + */ +#define MC_CMD_SCHEDINFO 0x3e +#undef MC_CMD_0x3e_PRIVILEGE_CTG + +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SCHEDINFO_IN msgrequest */ +#define MC_CMD_SCHEDINFO_IN_LEN 0 + +/* MC_CMD_SCHEDINFO_OUT msgresponse */ +#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 +#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 +#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 +#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG + +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SENSOR_INFO + * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 + */ +#define MC_CMD_SENSOR_INFO 0x41 +#undef MC_CMD_0x41_PRIVILEGE_CTG + +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SENSOR_INFO_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_IN_LEN 0 + +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 + +/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_SENSOR_INFO_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Engineering sensor 1 */ +#define MC_CMD_SENSOR_ENGINEERING_1 0x57 +/* enum: Engineering sensor 2 */ +#define MC_CMD_SENSOR_ENGINEERING_2 0x58 +/* enum: Engineering sensor 3 */ +#define MC_CMD_SENSOR_ENGINEERING_3 0x59 +/* enum: Engineering sensor 4 */ +#define MC_CMD_SENSOR_ENGINEERING_4 0x5a +/* enum: Engineering sensor 5 */ +#define MC_CMD_SENSOR_ENGINEERING_5 0x5b +/* enum: Engineering sensor 6 */ +#define MC_CMD_SENSOR_ENGINEERING_6 0x5c +/* enum: Engineering sensor 7 */ +#define MC_CMD_SENSOR_ENGINEERING_7 0x5d +/* enum: Engineering sensor 8 */ +#define MC_CMD_SENSOR_ENGINEERING_8 0x5e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +#define MC_CMD_SENSOR_ENTRY_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LEN 8 +#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 +#define MC_CMD_SENSOR_ENTRY_HI_OFST 8 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 +#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 + +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */ + +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_SENSORS + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. + */ +#define MC_CMD_READ_SENSORS 0x42 +#undef MC_CMD_0x42_PRIVILEGE_CTG + +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_READ_SENSORS_IN msgrequest */ +#define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_READ_SENSORS_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_OUT_LEN 0 + +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + +/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_PHY_STATE + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 + */ +#define MC_CMD_GET_PHY_STATE 0x43 +#undef MC_CMD_0x43_PRIVILEGE_CTG + +#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_STATE_IN msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_LEN 0 + +/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ +#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 + + +/***********************************/ +/* MC_CMD_SETUP_8021QBB + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. + */ +#define MC_CMD_SETUP_8021QBB 0x44 + +/* MC_CMD_SETUP_8021QBB_IN msgrequest */ +#define MC_CMD_SETUP_8021QBB_IN_LEN 32 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 + +/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ +#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_GET + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS + */ +#define MC_CMD_WOL_FILTER_GET 0x45 +#undef MC_CMD_0x45_PRIVILEGE_CTG + +#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_GET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_GET_IN_LEN 0 + +/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS + */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#undef MC_CMD_0x46_PRIVILEGE_CTG + +#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS + */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#undef MC_CMD_0x47_PRIVILEGE_CTG + +#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_RESET_RESTORE + * Restore MAC after block reset. Locks required: None. Returns: 0. + */ +#define MC_CMD_MAC_RESET_RESTORE 0x48 + +/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ +#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 + +/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ +#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 + */ +#define MC_CMD_TESTASSERT 0x49 +#undef MC_CMD_0x49_PRIVILEGE_CTG + +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TESTASSERT_IN msgrequest */ +#define MC_CMD_TESTASSERT_IN_LEN 0 + +/* MC_CMD_TESTASSERT_OUT msgresponse */ +#define MC_CMD_TESTASSERT_OUT_LEN 0 + +/* MC_CMD_TESTASSERT_V2_IN msgrequest */ +#define MC_CMD_TESTASSERT_V2_IN_LEN 4 +/* How to provoke the assertion */ +#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 +/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless + * you're testing firmware, this is what you want. + */ +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +/* enum: Assert using assert(0); */ +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +/* enum: Deliberately trigger a watchdog */ +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +/* enum: Deliberately trigger a trap by loading from an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +/* enum: Deliberately trigger a trap by storing to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +/* enum: Jump to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 + +/* MC_CMD_TESTASSERT_V2_OUT msgresponse */ +#define MC_CMD_TESTASSERT_V2_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG + +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_PHY_MEDIA_INFO + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the + * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * Anything else: currently undefined. Locks required: None. Return code: 0. + */ +#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#undef MC_CMD_0x4b_PRIVILEGE_CTG + +#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 + +/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_TEST + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). + */ +#define MC_CMD_NVRAM_TEST 0x4c +#undef MC_CMD_0x4c_PRIVILEGE_CTG + +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_NVRAM_TEST_IN msgrequest */ +#define MC_CMD_NVRAM_TEST_IN_LEN 4 +#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_TEST_OUT msgresponse */ +#define MC_CMD_NVRAM_TEST_OUT_LEN 4 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 + + +/***********************************/ +/* MC_CMD_MRSFP_TWEAK + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. + */ +#define MC_CMD_MRSFP_TWEAK 0x4d + +/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 +/* 0-8 0-8 low->high boost */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 + +/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 + +/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ +#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 +/* output bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 +/* direction */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 + + +/***********************************/ +/* MC_CMD_SENSOR_SET_LIMS + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. + */ +#define MC_CMD_SENSOR_SET_LIMS 0x4e +#undef MC_CMD_0x4e_PRIVILEGE_CTG + +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 + +/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ +#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RESOURCE_LIMITS + */ +#define MC_CMD_GET_RESOURCE_LIMITS 0x4f + +/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ +#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 + +/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 +#undef MC_CMD_0x51_PRIVILEGE_CTG + +#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 +#undef MC_CMD_0x52_PRIVILEGE_CTG + +#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requesting function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG + +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation, see SF-110495-PS for details of CLP + * processing. This command has been extended to accomodate the requirements of + * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, + * SF-120509-TC and SF-117282-PS. + */ +#define MC_CMD_CLP 0x56 +#undef MC_CMD_0x56_PRIVILEGE_CTG + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 +#undef MC_CMD_0x57_PRIVILEGE_CTG + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 + +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 + +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + +/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This + * should match the equivalent structure in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24 +/* A value below this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32 +/* A value below this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32 +/* A value below this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32 +/* A value above this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32 +/* A value above this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32 +/* A value above this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32 + +/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64 +/* The handle used to identify the sensor in calls to + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32 +/* A human-readable name for the sensor (zero terminated string, max 32 bytes) + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256 +/* The type of the sensor device, and by implication the unit of that the + * values will be reported in + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4 +/* enum: A voltage sensor. Unit is mV */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0 +/* enum: A current sensor. Unit is mA */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1 +/* enum: A power sensor. Unit is mW */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2 +/* enum: A temperature sensor. Unit is Celsius */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3 +/* enum: A cooling fan sensor. Unit is RPM */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32 +/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192 + +/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12 +/* The handle used to identify the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32 +/* The current value of the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32 +/* The sensor's condition, e.g. good, broken or removed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4 +/* enum: Sensor working normally within limits */ +#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0 +/* enum: Warning threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1 +/* enum: Critical threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2 +/* enum: Fatal threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3 +/* enum: Sensor not working */ +#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4 +/* enum: Sensor working but no reading available */ +#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5 +/* enum: Sensor initialization failed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_LIST + * Return a complete list of handles for sensors currently managed by the MC, + * and a generation count for this version of the sensor table. On systems + * advertising the DYNAMIC_SENSORS capability bit, this replaces the + * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors + * added by the NMC. + * + * Sensor handles are persistent for the lifetime of the sensor and are used to + * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. + * + * The generation count is maintained by the MC, is persistent across reboots + * and will be incremented each time the sensor table is modified. When the + * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated + * containing the new generation count. The driver should compare this against + * the current generation count, and if it is different, call + * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table. + * + * The sensor count is provided to allow a future path to supporting more than + * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. + * the maximum number that will fit in a single response. As this is a fairly + * large number (253) it is not anticipated that this will be needed in the + * near future, so can currently be ignored. + * + * On Riverhead this command is implemented as a a wrapper for `list` in the + * sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 +#undef MC_CMD_0x66_PRIVILEGE_CTG + +#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0 + +/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4) +/* Generation count, which will be updated each time a sensor is added to or + * removed from the MC sensor table. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4 +/* Number of sensors managed by the MC. Note that in principle, this can be + * larger than the size of the HANDLES array. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4 +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS + * Get descriptions for a set of sensors, specified as an array of sensor + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for + * `get_descriptions` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 +#undef MC_CMD_0x67_PRIVILEGE_CTG + +#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64) +/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS + * Read the state and value for a set of sensors, specified as an array of + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. + * + * In the case of a broken sensor, then the state of the response's + * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value + * provided should be treated as erroneous. + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for `get_readings` + * in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 +#undef MC_CMD_0x68_PRIVILEGE_CTG + +#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12) +/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_EVENT_CTRL + * Configure which categories of unsolicited events the driver expects to + * receive (Riverhead). + */ +#define MC_CMD_EVENT_CTRL 0x69 +#undef MC_CMD_0x69_PRIVILEGE_CTG + +#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVENT_CTRL_IN msgrequest */ +#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 +#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 +#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 +#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) +/* Array of event categories for which the driver wishes to receive events. */ +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 +/* enum: Driver wishes to receive LINKCHANGE events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 +/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. + */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 +/* enum: Driver wishes to receive receive errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 +/* enum: Driver wishes to receive transmit errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 +/* enum: Driver wishes to receive firmware alerts. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 +/* enum: Driver wishes to receive reboot events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 + +/* MC_CMD_EVENT_CTRL_OUT msgrequest */ +#define MC_CMD_EVENT_CTRL_OUT_LEN 0 + +/* MC_CMD_RESOURCE_SPECIFIER enum */ +/* enum: Any */ +#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff +/* enum: None */ +#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 +/* enum: Used for XIP code of shmbooted images */ +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Manufacturing partition. Used during manufacture to pass information + * between XJTAG and Manftest. + */ +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +/* enum: Partition for reporting MC status. See mc_flash_layout.h + * medford_mc_status_hdr_t for layout on Medford. + */ +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 +/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03 +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +/* enum: TCP Direct */ +#define LICENSED_APP_ID_TCP_DIRECT 0x100 +/* enum: Low Latency */ +#define LICENSED_APP_ID_LOW_LATENCY 0x200 +/* enum: SolarCapture Tap */ +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +/* enum: Capture SolarSystem 40G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 +/* enum: Capture SolarSystem 1G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 +#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 +#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 +#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 + +/* CTPIO_STATS_MAP structuredef */ +#define CTPIO_STATS_MAP_LEN 4 +/* The (function relative) VI number */ +#define CTPIO_STATS_MAP_VI_OFST 0 +#define CTPIO_STATS_MAP_VI_LEN 2 +#define CTPIO_STATS_MAP_VI_LBN 0 +#define CTPIO_STATS_MAP_VI_WIDTH 16 +/* The target bucket for the VI */ +#define CTPIO_STATS_MAP_BUCKET_OFST 2 +#define CTPIO_STATS_MAP_BUCKET_LEN 2 +#define CTPIO_STATS_MAP_BUCKET_LBN 16 +#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 + +/* MESSAGE_TYPE structuredef: When present this defines the meaning of a + * message, and is used to protect against chosen message attacks in signed + * messages, regardless their origin. The message type also defines the + * signature cryptographic algorithm, encoding, and message fields included in + * the signature. The values are used in different commands but must be unique + * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different + * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_UNUSED 0x0 /* enum */ +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are + * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields + * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by + * RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1 +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION + * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm + * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested + * by RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential + * to other message types for backwards compatibility as the message type for + * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this + * global enum. + */ +#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4 +#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 +#undef MC_CMD_0x50_PRIVILEGE_CTG + +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG + +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 + +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG + +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 + +/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required + * for systems with a QDMA (currently, Riverhead) + */ +#define MC_CMD_INIT_RXQ_V4_IN_LEN 564 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4 + +/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a + * different RX packet prefix + */ +#define MC_CMD_INIT_RXQ_V5_IN_LEN 568 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4 +/* Prefix id for the RX prefix format to use on packets delivered this queue. + * Zero is always a valid prefix id and means the default prefix format + * documented for the platform. Other prefix ids can be obtained by calling + * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields. + */ +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564 +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG + +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG + +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG + +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG + +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 +#undef MC_CMD_0x86_PRIVILEGE_CTG + +#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_CMD + * Execute an arbitrary MCDI command on behalf of a different function, subject + * to security restrictions. The command to be proxied follows immediately + * afterward in the host buffer (or on the UART). This command supercedes + * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated. + */ +#define MC_CMD_PROXY_CMD 0x5b +#undef MC_CMD_0x5b_PRIVILEGE_CTG + +#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CMD_IN msgrequest */ +#define MC_CMD_PROXY_CMD_IN_LEN 4 +/* The handle of the target function. */ +#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ + +/* MC_CMD_PROXY_CMD_OUT msgresponse */ +#define MC_CMD_PROXY_CMD_OUT_LEN 0 + +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 +#undef MC_CMD_0x58_PRIVILEGE_CTG + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f +#undef MC_CMD_0x5f_PRIVILEGE_CTG + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#undef MC_CMD_0x87_PRIVILEGE_CTG + +#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#undef MC_CMD_0x88_PRIVILEGE_CTG + +#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#undef MC_CMD_0x89_PRIVILEGE_CTG + +#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG + +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. + */ +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + +/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse: + * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* a version number representing the set of rule lookups that are implemented + * by the currently running firmware + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4 +/* enum: implements lookup sequences described in SF-114946-SW draft C */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 +/* the number of nodes in the subnet map */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4 +/* the number of entries in one subnet map node */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4 +/* minimum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4 +/* maximum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4 +/* the number of entries in the local and remote port range maps */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4 +/* minimum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4 +/* maximum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4 + + +/***********************************/ +/* MC_CMD_PARSER_DISP_RW + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. + * Please note that this interface is only of use to debug tools which have + * knowledge of firmware and hardware data structures; nothing here is intended + * for use by normal driver code. Note that although this command is in the + * Admin privilege group, in tamperproof adapters, only read operations are + * permitted. + */ +#define MC_CMD_PARSER_DISP_RW 0xe5 +#undef MC_CMD_0xe5_PRIVILEGE_CTG + +#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PARSER_DISP_RW_IN msgrequest */ +#define MC_CMD_PARSER_DISP_RW_IN_LEN 32 +/* identifies the target of the operation */ +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 +/* enum: RX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +/* enum: TX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine (with original metadata format). Deprecated; used only + * by cmdclient as a fallback for very old Huntington firmware, and not + * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA + * instead. + */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +/* enum: RX1 dispatcher CPU (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +/* enum: Miscellaneous other state (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 +/* identifies the type of operation requested */ +#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 +#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 +/* enum: Read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on + * tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not + * permitted on tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +/* data memory address (DICPU targets) or LUE index (LUE targets) */ +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 +/* selector (for MISC_STATE target) */ +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 +/* enum: Port to datapath mapping */ +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 +/* value to write (for DMEM writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 +/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 +/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 +/* value to write (for LUE writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 + +/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */ +#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 +/* value read (for DMEM reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 +/* value read (for LUE reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 +/* up to 8 32-bit words of additional soft state from the LUE manager (the + * exact content is firmware-dependent and intended only for debug use) + */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 +/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */ +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ + + +/***********************************/ +/* MC_CMD_GET_PF_COUNT + * Get number of PFs on the device. + */ +#define MC_CMD_GET_PF_COUNT 0xb6 +#undef MC_CMD_0xb6_PRIVILEGE_CTG + +#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PF_COUNT_IN msgrequest */ +#define MC_CMD_GET_PF_COUNT_IN_LEN 0 + +/* MC_CMD_GET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_GET_PF_COUNT_OUT_LEN 1 +/* Identifies the number of PFs on the device. */ +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0 +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1 + + +/***********************************/ +/* MC_CMD_SET_PF_COUNT + * Set number of PFs on the device. + */ +#define MC_CMD_SET_PF_COUNT 0xb7 + +/* MC_CMD_SET_PF_COUNT_IN msgrequest */ +#define MC_CMD_SET_PF_COUNT_IN_LEN 4 +/* New number of PFs on the device. */ +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 + +/* MC_CMD_SET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_SET_PF_COUNT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG + +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#undef MC_CMD_0xb9_PRIVILEGE_CTG + +#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG + +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 + +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG + +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba +#undef MC_CMD_0xba_PRIVILEGE_CTG + +#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb +#undef MC_CMD_0xbb_PRIVILEGE_CTG + +#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#undef MC_CMD_0x8d_PRIVILEGE_CTG + +#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e +#undef MC_CMD_0x8e_PRIVILEGE_CTG + +#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f +#undef MC_CMD_0x8f_PRIVILEGE_CTG + +#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 +#undef MC_CMD_0x90_PRIVILEGE_CTG + +#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_TLP_PROCESSING + * Get TLP steering and ordering information for a VI. + */ +#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 +#undef MC_CMD_0xb0_PRIVILEGE_CTG + +#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 +/* VI number to get information for. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 + +/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1 +/* Set no snoop bit for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_VI_TLP_PROCESSING + * Set TLP steering and ordering information for a VI. + */ +#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 +#undef MC_CMD_0xb1_PRIVILEGE_CTG + +#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 +/* VI number to set information for. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1 +/* Set the no snoop bit for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 + +/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS + * Get global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc +#undef MC_CMD_0xbc_PRIVILEGE_CTG + +#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 +/* enum: MISC. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +/* enum: IDO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +/* enum: RO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +/* enum: TPH Type. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 + + +/***********************************/ +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS + * Set global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd +#undef MC_CMD_0xbd_PRIVILEGE_CTG + +#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SATELLITE_DOWNLOAD + * Download a new set of images to the satellite CPUs from the host. + */ +#define MC_CMD_SATELLITE_DOWNLOAD 0x91 +#undef MC_CMD_0x91_PRIVILEGE_CTG + +#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs + * are subtle, and so downloads must proceed in a number of phases. + * + * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0. + * + * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download + * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should + * be a checksum (a simple 32-bit sum) of the transferred data. An individual + * download may be aborted using CHUNK_ID_ABORT. + * + * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15), + * similar to PHASE_IMEMS. + * + * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0. + * + * After any error (a requested abort is not considered to be an error) the + * sequence must be restarted from PHASE_RESET. + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4) +/* Download phase. (Note: the IDLE phase is used internally and is never valid + * in a command from the host.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +/* Target for download. (These match the blob numbers defined in + * mc_flash_layout.h.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 +/* enum: Last chunk, containing checksum rather than data */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +/* enum: Abort download of this item */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +/* Length of this chunk in bytes */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 +/* Data for this chunk */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251 + +/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 +/* enum: Code download OK, completed. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +/* enum: Code download aborted as requested. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +/* enum: Code download OK so far, send next chunk. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +/* enum: Download phases out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +/* enum: Bad target for this phase */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +/* enum: Chunk ID out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +/* enum: Chunk length zero or too large */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +/* enum: Checksum was incorrect */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. + * + * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to + * reference inherent device capabilities as opposed to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe +#undef MC_CMD_0xbe_PRIVILEGE_CTG + +#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_ALLOC + * Allocate a pacer bucket (for qau rp or a snapper test) + */ +#define MC_CMD_TCM_BUCKET_ALLOC 0xb2 +#undef MC_CMD_0xb2_PRIVILEGE_CTG + +#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 + +/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_FREE + * Free a pacer bucket + */ +#define MC_CMD_TCM_BUCKET_FREE 0xb3 +#undef MC_CMD_0xb3_PRIVILEGE_CTG + +#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 + +/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_INIT + * Initialise pacer bucket with a given rate + */ +#define MC_CMD_TCM_BUCKET_INIT 0xb4 +#undef MC_CMD_0xb4_PRIVILEGE_CTG + +#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 + +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 + +/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_TXQ_INIT + * Initialise txq in pacer with given options or set options + */ +#define MC_CMD_TCM_TXQ_INIT 0xb5 +#undef MC_CMD_0xb5_PRIVILEGE_CTG + +#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 + +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 + +/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ +#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 +#undef MC_CMD_0x92_PRIVILEGE_CTG + +#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 +#undef MC_CMD_0x93_PRIVILEGE_CTG + +#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 +#undef MC_CMD_0x94_PRIVILEGE_CTG + +#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 +#undef MC_CMD_0x95_PRIVILEGE_CTG + +#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 +#undef MC_CMD_0x63_PRIVILEGE_CTG + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 +#undef MC_CMD_0x96_PRIVILEGE_CTG + +#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 +#undef MC_CMD_0x97_PRIVILEGE_CTG + +#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 +#undef MC_CMD_0x98_PRIVILEGE_CTG + +#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 +/* The number of VLAN tags to transparently insert/remove. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 +/* The MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 +/* enum: Derive the MAC address from the upstream port */ +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 +#undef MC_CMD_0x99_PRIVILEGE_CTG + +#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_SET_MAC + * assign a new MAC address to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_SET_MAC 0x5d +#undef MC_CMD_0x5d_PRIVILEGE_CTG + +#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The new MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 + +/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_GET_MAC + * read the MAC address assigned to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_GET_MAC 0x5e +#undef MC_CMD_0x5e_PRIVILEGE_CTG + +#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 +/* The MAC address assigned to this v-adaptor */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 +#undef MC_CMD_0x61_PRIVILEGE_CTG + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a +#undef MC_CMD_0x9a_PRIVILEGE_CTG + +#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b +#undef MC_CMD_0x9b_PRIVILEGE_CTG + +#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#undef MC_CMD_0x9c_PRIVILEGE_CTG + +#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d +#undef MC_CMD_0x9d_PRIVILEGE_CTG + +#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#undef MC_CMD_0x9e_PRIVILEGE_CTG + +#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* Number of queues spanned by this context, in the range 1-64; valid offsets + * in the indirection table will be in the range 0 to NUM_QUEUES-1. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f +#undef MC_CMD_0x9f_PRIVILEGE_CTG + +#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#undef MC_CMD_0xa0_PRIVILEGE_CTG + +#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#undef MC_CMD_0xa1_PRIVILEGE_CTG + +#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#undef MC_CMD_0xa2_PRIVILEGE_CTG + +#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#undef MC_CMD_0xa3_PRIVILEGE_CTG + +#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#undef MC_CMD_0xe1_PRIVILEGE_CTG + +#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#undef MC_CMD_0xe2_PRIVILEGE_CTG + +#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_ALLOC + * Allocate a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 +#undef MC_CMD_0xa4_PRIVILEGE_CTG + +#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 +/* The handle of the owning upstream port */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Number of queues spanned by this mapping, in the range 1-64; valid fixed + * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and + * referenced RSS contexts must span no more than this number. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_FREE + * Free a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_FREE 0xa5 +#undef MC_CMD_0xa5_PRIVILEGE_CTG + +#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_SET_TABLE + * Set the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 +#undef MC_CMD_0xa6_PRIVILEGE_CTG + +#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_GET_TABLE + * Get the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 +#undef MC_CMD_0xa7_PRIVILEGE_CTG + +#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_VECTOR_CFG + * Get Interrupt Vector config for this PF. + */ +#define MC_CMD_GET_VECTOR_CFG 0xbf +#undef MC_CMD_0xbf_PRIVILEGE_CTG + +#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 + +/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 +/* Base absolute interrupt vector number. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_VECTOR_CFG + * Set Interrupt Vector config for this PF. + */ +#define MC_CMD_SET_VECTOR_CFG 0xc0 +#undef MC_CMD_0xc0_PRIVILEGE_CTG + +#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 +/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to + * let the system find a suitable base. + */ +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 + +/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#undef MC_CMD_0xa8_PRIVILEGE_CTG + +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#undef MC_CMD_0xa9_PRIVILEGE_CTG + +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#undef MC_CMD_0xaa_PRIVILEGE_CTG + +#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169 + + +/***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb +#undef MC_CMD_0xeb_PRIVILEGE_CTG + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 +#undef MC_CMD_0x62_PRIVILEGE_CTG + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_BUFTBL_ENTRIES + * Dump buffer table entries, mainly for command client debug use. Dumps + * absolute entries, and does not use chunk handles. All entries must be in + * range, and used for q page mapping, Although the latter restriction may be + * lifted in future. + */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab +#undef MC_CMD_0xab_PRIVILEGE_CTG + +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 +/* Index of the first buffer table entry. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Number of buffer table entries to dump. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12) +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_SET_RXDP_CONFIG + * Set global RXDP configuration settings + */ +#define MC_CMD_SET_RXDP_CONFIG 0xc1 +#undef MC_CMD_0xc1_PRIVILEGE_CTG + +#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 +/* enum: pad to 64 bytes */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +/* enum: pad to 128 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +/* enum: pad to 256 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 + +/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RXDP_CONFIG + * Get global RXDP configuration settings + */ +#define MC_CMD_GET_RXDP_CONFIG 0xc2 +#undef MC_CMD_0xc2_PRIVILEGE_CTG + +#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */ + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac +#undef MC_CMD_0xac_PRIVILEGE_CTG + +#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_CLOCK + * Control the system and DPCPU clock frequencies. Changes are lost reboot. + */ +#define MC_CMD_SET_CLOCK 0xad +#undef MC_CMD_0xad_PRIVILEGE_CTG + +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_CLOCK_IN msgrequest */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 + +/* MC_CMD_SET_CLOCK_OUT msgresponse */ +#define MC_CMD_SET_CLOCK_OUT_LEN 28 +/* Resulting system frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting inter-core frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 +/* Resulting DPCPU frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 + + +/***********************************/ +/* MC_CMD_DPCPU_RPC + * Send an arbitrary DPCPU message. + */ +#define MC_CMD_DPCPU_RPC 0xae +#undef MC_CMD_0xae_PRIVILEGE_CTG + +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DPCPU_RPC_IN msgrequest */ +#define MC_CMD_DPCPU_RPC_IN_LEN 36 +#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 +/* enum: TxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +/* enum: TxDPCPU1 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 +/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be + * initialised to zero + */ +#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 +/* Register data to write. Only valid in write/write-read. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 +/* Register address. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 + +/* MC_CMD_DPCPU_RPC_OUT msgresponse */ +#define MC_CMD_DPCPU_RPC_OUT_LEN 36 +#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 +/* DATA */ +#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#undef MC_CMD_0xe3_PRIVILEGE_CTG + +#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 +#undef MC_CMD_0xe6_PRIVILEGE_CTG + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_CAP_BLK_READ + * Read multiple 64bit words from capture block memory + */ +#define MC_CMD_CAP_BLK_READ 0xe7 +#undef MC_CMD_0xe7_PRIVILEGE_CTG + +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CAP_BLK_READ_IN msgrequest */ +#define MC_CMD_CAP_BLK_READ_IN_LEN 12 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 + +/* MC_CMD_CAP_BLK_READ_OUT msgresponse */ +#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127 + + +/***********************************/ +/* MC_CMD_DUMP_DO + * Take a dump of the DUT state + */ +#define MC_CMD_DUMP_DO 0xe8 +#undef MC_CMD_0xe8_PRIVILEGE_CTG + +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_DO_IN msgrequest */ +#define MC_CMD_DUMP_DO_IN_LEN 52 +#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 +/* enum: The uart port this command was received over (if using a uart + * transport) + */ +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 + +/* MC_CMD_DUMP_DO_OUT msgresponse */ +#define MC_CMD_DUMP_DO_OUT_LEN 4 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED + * Configure unsolicited dumps + */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 +#undef MC_CMD_0xe9_PRIVILEGE_CTG + +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea +#undef MC_CMD_0xea_PRIVILEGE_CTG + +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec +#undef MC_CMD_0xec_PRIVILEGE_CTG + +#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#undef MC_CMD_0xed_PRIVILEGE_CTG + +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_SEND_DATA + * Send checksummed[sic] block of data over the uart. Response is a placeholder + * should we wish to make this reliable; currently requests are fire-and- + * forget. + */ +#define MC_CMD_UART_SEND_DATA 0xee +#undef MC_CMD_0xee_PRIVILEGE_CTG + +#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_UART_SEND_DATA_OUT msgrequest */ +#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) +#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1) +/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 +/* Offset at which to write the data */ +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 +/* Length of data */ +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_UART_SEND_DATA_IN msgresponse */ +#define MC_CMD_UART_SEND_DATA_IN_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_RECV_DATA + * Request checksummed[sic] block of data over the uart. Only a placeholder, + * subject to change and not currently implemented. + */ +#define MC_CMD_UART_RECV_DATA 0xef +#undef MC_CMD_0xef_PRIVILEGE_CTG + +#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_UART_RECV_DATA_OUT msgrequest */ +#define MC_CMD_UART_RECV_DATA_OUT_LEN 16 +/* CRC32 over OFFSET, LENGTH, RESERVED */ +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 +/* Offset from which to read the data */ +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 + +/* MC_CMD_UART_RECV_DATA_IN msgresponse */ +#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020 +#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) +#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1) +/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 +/* Offset at which to write the data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 +#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 +#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 +#undef MC_CMD_0xf0_PRIVILEGE_CTG + +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_KR_TUNE + * Get or set KR Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_KR_TUNE 0xf1 +#undef MC_CMD_0xf1_PRIVILEGE_CTG + +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_KR_TUNE_IN msgrequest */ +#define MC_CMD_KR_TUNE_IN_LENMIN 4 +#define MC_CMD_KR_TUNE_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +/* enum: Force KR Serdes reset / recalibration */ +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid + * signal. + */ +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +/* enum: Start/stop link training frames */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8 +/* enum: Issue KR link training command (control training coefficients) */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM_MCDI2 254 + +/* MC_CMD_KR_TUNE_OUT msgresponse */ +#define MC_CMD_KR_TUNE_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15, Huntington) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15, Huntington) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max + * positive, Medford - 0-31) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-31) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (0-128 for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +/* enum: Variable Gain Amplifier (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +/* enum: CTLE EQ Capacitor (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (0-7, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +/* enum: CTLE gain (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +/* enum: CTLE pole (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +/* enum: CTLE peaking (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +/* enum: Negative h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +/* enum: Negative h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +/* enum: Positive h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +/* enum: Positive h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +/* enum: CDR calibration loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +/* enum: CDR integral loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 +/* enum: CTLE Boost stages - retimer lineside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_LS 0x22 +/* enum: DFE Tap1 - retimer lineside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_LS 0x23 +/* enum: DFE Tap2 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_LS 0x24 +/* enum: DFE Tap3 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_LS 0x25 +/* enum: DFE Tap4 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_LS 0x26 +/* enum: DFE Tap5 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_LS 0x27 +/* enum: CTLE Boost stages - retimer hostside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_HS 0x28 +/* enum: DFE Tap1 - retimer hostside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_HS 0x29 +/* enum: DFE Tap2 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_HS 0x2a +/* enum: DFE Tap3 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_HS 0x2b +/* enum: DFE Tap4 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_HS 0x2c +/* enum: DFE Tap5 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_HS 0x2d +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TX Amplitude (Huntington, Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +/* enum: De-Emphasis Tap1 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +/* enum: De-Emphasis Tap2 Fine (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +/* enum: Pre-Emphasis Magnitude (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +/* enum: Pre-Emphasis Fine (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +/* enum: TX Slew Rate Coarse control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +/* enum: TX Slew Rate Fine control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +/* enum: TX Amplitude Fine control (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-cursor Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: Post-cursor Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +/* enum: TX Amplitude (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_LS 0xd +/* enum: Pre-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_LS 0xe +/* enum: Post-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_LS 0xf +/* enum: TX Amplitude (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_HS 0x10 +/* enum: Pre-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_HS 0x11 +/* enum: Post-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_HS 0x12 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */ +#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 +/* Scan duration / cycle count */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 + +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 + +/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4 +/* Set INITIALIZE state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4 +/* Set PRESET state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4 +/* C(-1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */ +/* C(0) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24 +/* C(-1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */ +/* C(0) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_PCIE_TUNE + * Get or set PCIE Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_PCIE_TUNE 0xf2 +#undef MC_CMD_0xf2_PRIVILEGE_CTG + +#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_PCIE_TUNE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_IN_LENMIN 4 +#define MC_CMD_PCIE_TUNE_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ +#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254 + +/* MC_CMD_PCIE_TUNE_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +/* enum: DFE DLev */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +/* enum: Figure of Merit */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +/* enum: CTLE EQ Capacitor (HF Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (DC Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TxMargin (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +/* enum: TxSwing (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +/* enum: De-emphasis coefficient C(-1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +/* enum: De-emphasis coefficient C(0) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +/* enum: De-emphasis coefficient C(+1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 + +/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0 + +/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */ +#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing + */ +#define MC_CMD_LICENSING 0xf3 +#undef MC_CMD_0xf3_PRIVILEGE_CTG + +#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 +#undef MC_CMD_0xd0_PRIVILEGE_CTG + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 +#undef MC_CMD_0xd1_PRIVILEGE_CTG + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 + + +/***********************************/ +/* MC_CMD_MC2MC_PROXY + * Execute an arbitrary MCDI command on the slave MC of a dual-core device. + * This will fail on a single-core system. + */ +#define MC_CMD_MC2MC_PROXY 0xf4 +#undef MC_CMD_0xf4_PRIVILEGE_CTG + +#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MC2MC_PROXY_IN msgrequest */ +#define MC_CMD_MC2MC_PROXY_IN_LEN 0 + +/* MC_CMD_MC2MC_PROXY_OUT msgresponse */ +#define MC_CMD_MC2MC_PROXY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) Not used for V3 licensing + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#undef MC_CMD_0xf5_PRIVILEGE_CTG + +#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 +#undef MC_CMD_0xd2_PRIVILEGE_CTG + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 +#undef MC_CMD_0xd3_PRIVILEGE_CTG + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application - not used for V3 + * licensing. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 +#undef MC_CMD_0xf6_PRIVILEGE_CTG + +#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 +#undef MC_CMD_0xd4_PRIVILEGE_CTG + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56 +/* challenge for validation (384 bits) */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116 +/* validation response to challenge in the form of ECDSA signature consisting + * of two 384-bit integers, r and s, in big-endian order. The signature signs a + * SHA-384 digest of a message constructed from the concatenation of the input + * message and the remaining fields of this output message, e.g. challenge[48 + * bytes] ... expiry_time[4 bytes] ... + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* base MAC address of the NIC stored in NVRAM (note that this is a constant + * value for a given NIC regardless which function is calling, effectively this + * is PF0 base MAC address) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6 +/* MAC address of v-adaptor associated with the client. If no such v-adapator + * exists, then the field is filled with 0xFF. + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 +#undef MC_CMD_0xd5_PRIVILEGE_CTG + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING_V3_TEMPORARY + * Perform operations to support installation of a single temporary license in + * the adapter, in addition to those found in the licensing partition. See + * SF-116124-SW for an overview of how this could be used. The license is + * stored in MC persistent data and so will survive a MC reboot, but will be + * erased when the adapter is power cycled + */ +#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 +#undef MC_CMD_0xd6_PRIVILEGE_CTG + +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 +/* operation code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 +/* enum: install a new license, overwriting any existing temporary license. + * This is an asynchronous operation owing to the time taken to validate an + * ECDSA license + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +/* enum: clear the license immediately rather than waiting for the next power + * cycle + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET + * operation + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 +/* ECDSA license and signature */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 +/* status code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 +/* enum: finished validating and installing license */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +/* enum: license validation and installation in progress */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +/* enum: licensing error. More specific error messages are not provided to + * avoid exposing details of the licensing system to the client + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_SET_PORT_SNIFF_CONFIG + * Configure RX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic delivered to the host (non-promiscuous + * mode) or all traffic arriving at the port (promiscuous mode) may be + * delivered to a specific queue, or a set of queues with RSS. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 +#undef MC_CMD_0xf7_PRIVILEGE_CTG + +#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_SNIFF_CONFIG + * Obtain the current RX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 +#undef MC_CMD_0xf8_PRIVILEGE_CTG + +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 +#undef MC_CMD_0xf9_PRIVILEGE_CTG + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa +#undef MC_CMD_0xfa_PRIVILEGE_CTG + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb +#undef MC_CMD_0xfb_PRIVILEGE_CTG + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc +#undef MC_CMD_0xfc_PRIVILEGE_CTG + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe +#undef MC_CMD_0xfe_PRIVILEGE_CTG + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#undef MC_CMD_0xfd_PRIVILEGE_CTG + +#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff +#undef MC_CMD_0xff_PRIVILEGE_CTG + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 + +/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 +/* Bitmask of engineering port modes available on the board (indexed by + * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that + * contains all modes implemented in firmware for a particular board. Modes + * listed in MODES are considered production modes and should be exposed in + * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES + * should be considered hidden (not to be exposed in userland tools) and for + * engineering use only. There are no other semantic differences and any mode + * listed in either MODES or ENGINEERING_MODES can be set on the board. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 + + +/***********************************/ +/* MC_CMD_OVERRIDE_PORT_MODE + * Override flash config port mode for subsequent MC reboot(s). Override data + * is stored in the presistent data section of DMEM and activated on next MC + * warm reboot. A cold reboot resets the override. It is assumed that a + * sufficient number of PFs are available and that port mapping is valid for + * the new port mode, as the override does not affect PF configuration. + */ +#define MC_CMD_OVERRIDE_PORT_MODE 0x137 +#undef MC_CMD_0x137_PRIVILEGE_CTG + +#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 +/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 + +/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ +#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 +#undef MC_CMD_0x100_PRIVILEGE_CTG + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a +#undef MC_CMD_0x5a_PRIVILEGE_CTG + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c +#undef MC_CMD_0x5c_PRIVILEGE_CTG + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the current range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 +#undef MC_CMD_0x101_PRIVILEGE_CTG + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 +#undef MC_CMD_0x102_PRIVILEGE_CTG + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 +#undef MC_CMD_0x60_PRIVILEGE_CTG + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 +#undef MC_CMD_0x103_PRIVILEGE_CTG + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 +#undef MC_CMD_0x104_PRIVILEGE_CTG + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 +#undef MC_CMD_0x105_PRIVILEGE_CTG + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 +#undef MC_CMD_0x106_PRIVILEGE_CTG + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 +#undef MC_CMD_0x107_PRIVILEGE_CTG + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 +#undef MC_CMD_0x108_PRIVILEGE_CTG + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 +#undef MC_CMD_0x109_PRIVILEGE_CTG + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 +#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a +#undef MC_CMD_0x10a_PRIVILEGE_CTG + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b +#undef MC_CMD_0x10b_PRIVILEGE_CTG + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_EXEC_SIGNED + * Check the CMAC of the contents of IMEM and DMEM against the value supplied + * and if correct begin execution from the start of IMEM. The caller supplies a + * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC + * computation runs from the start of IMEM, and from the start of DMEM + 16k, + * to match flash booting. The command will respond with EINVAL if the CMAC + * does match, otherwise it will respond with success before it jumps to IMEM. + */ +#define MC_CMD_EXEC_SIGNED 0x10c +#undef MC_CMD_0x10c_PRIVILEGE_CTG + +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_EXEC_SIGNED_IN msgrequest */ +#define MC_CMD_EXEC_SIGNED_IN_LEN 28 +/* the length of code to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 +/* the length of date to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 +/* the XPM sector containing the key to use */ +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 +/* the expected CMAC value */ +#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 +#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 + +/* MC_CMD_EXEC_SIGNED_OUT msgresponse */ +#define MC_CMD_EXEC_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PREPARE_SIGNED + * Prepare to upload a signed image. This will scrub the specified length of + * the data region, which must be at least as large as the DATALEN supplied to + * MC_CMD_EXEC_SIGNED. + */ +#define MC_CMD_PREPARE_SIGNED 0x10d +#undef MC_CMD_0x10d_PRIVILEGE_CTG + +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_PREPARE_SIGNED_IN msgrequest */ +#define MC_CMD_PREPARE_SIGNED_IN_LEN 4 +/* the length of data area to clear */ +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 + +/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ +#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_SECURITY_RULE + * Set blacklist and/or whitelist action for a particular match criteria. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_SET_SECURITY_RULE 0x10f +#undef MC_CMD_0x10f_PRIVILEGE_CTG + +#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */ +#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92 +/* fields to include in match criteria */ +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1 +/* remote MAC address to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6 +/* remote port to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2 +/* local MAC address to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6 +/* local port to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22 +#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26 +#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2 +/* Physical port to match (as little-endian 32-bit value) */ +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4 +/* Reserved; set to 0 */ +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32 +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4 +/* remote IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16 +/* local IP address to match (as bytes in network order; set last 12 bytes to 0 + * for IPv4 address) + */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16 +/* remote subnet ID to match (as little-endian 32-bit value); note that remote + * subnets are matched by mapping the remote IP address to a "subnet ID" via a + * data structure which must already have been configured using + * MC_CMD_SUBNET_MAP_SET_NODE appropriately + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4 +/* remote portrange ID to match (as little-endian 32-bit value); note that + * remote port ranges are matched by mapping the remote port to a "portrange + * ID" via a data structure which must already have been configured using + * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4 +/* local portrange ID to match (as little-endian 32-bit value); note that local + * port ranges are matched by mapping the local port to a "portrange ID" via a + * data structure which must already have been configured using + * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE + */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4 +/* set the action for transmitted packets matching this rule */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4 +/* enum: make no decision */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 +/* enum: decide to accept the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 +/* enum: decide to drop the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4 +/* enum: do not change the current TX action */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff +/* set the action for received packets matching this rule */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4 +/* enum: make no decision */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 +/* enum: decide to accept the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 +/* enum: decide to drop the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4 +/* enum: do not change the current RX action */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff +/* counter ID to associate with this rule; IDs are allocated using + * MC_CMD_SECURITY_RULE_COUNTER_ALLOC + */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4 +/* enum: special value for the null counter ID */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +/* enum: special value to tell the MC to allocate an available counter */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee +/* enum: special value to request use of hardware counter (Medford2 only) */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff + +/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32 +/* new reference count for uses of counter ID */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4 +/* constructed match bits for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12 +/* constructed discriminator bits for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4 +/* base location for probes for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4 +/* step for probes for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4 +/* ID for reading back the counter */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_RESET_SECURITY_RULES + * Reset all blacklist and whitelist actions for a particular physical port, or + * all ports. (Medford-only; for use by SolarSecure apps, not directly by + * drivers. See SF-114946-SW.) NOTE - this message definition is provisional. + * It has not yet been used in any released code and may change during + * development. This note will be removed once it is regarded as stable. + */ +#define MC_CMD_RESET_SECURITY_RULES 0x110 +#undef MC_CMD_0x110_PRIVILEGE_CTG + +#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */ +#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4 +/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */ +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0 +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4 +/* enum: special value to reset all physical ports */ +#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff + +/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */ +#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SECURITY_RULESET_VERSION + * Return a large hash value representing a "version" of the complete set of + * currently active blacklist / whitelist rules and associated data structures. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111 +#undef MC_CMD_0x111_PRIVILEGE_CTG + +#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_NUM(len) (((len)-0)/1) +/* Opaque hash value; length may vary depending on the hash scheme used */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC + * Allocate counters for use with blacklist / whitelist rules. (Medford-only; + * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.) + * NOTE - this message definition is provisional. It has not yet been used in + * any released code and may change during development. This note will be + * removed once it is regarded as stable. + */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112 +#undef MC_CMD_0x112_PRIVILEGE_CTG + +#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4 +/* the number of new counter IDs to request */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4 + +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-4)/4) +/* the number of new counter IDs allocated (may be less than the number + * requested if resources are unavailable) + */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4 +/* new counter ID(s) */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_SECURITY_RULE_COUNTER_FREE + * Allocate counters for use with blacklist / whitelist rules. (Medford-only; + * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.) + * NOTE - this message definition is provisional. It has not yet been used in + * any released code and may change during development. This note will be + * removed once it is regarded as stable. + */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113 +#undef MC_CMD_0x113_PRIVILEGE_CTG + +#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_NUM(len) (((len)-4)/4) +/* the number of counter IDs to free */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4 +/* the counter ID(s) to free */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM_MCDI2 254 + +/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUBNET_MAP_SET_NODE + * Atomically update a trie node in the map of subnets to subnet IDs. The + * constants in the descriptions of the fields of this message may be retrieved + * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford- + * only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_SUBNET_MAP_SET_NODE 0x114 +#undef MC_CMD_0x114_PRIVILEGE_CTG + +#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_NUM(len) (((len)-4)/2) +/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4 +/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer + * to the next node, expressed as an offset in the trie memory (i.e. node ID + * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range + * SUBNET_ID_MIN .. SUBNET_ID_MAX + */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM_MCDI2 508 + +/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */ +#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0 + +/* PORTRANGE_TREE_ENTRY structuredef */ +#define PORTRANGE_TREE_ENTRY_LEN 4 +/* key for branch nodes (<= key takes left branch, > key takes right branch), + * or magic value for leaf nodes + */ +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0 +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2 +#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0 +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16 +/* final portrange ID for leaf nodes (don't care for branch nodes) */ +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16 + + +/***********************************/ +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE + * Atomically update the entire tree mapping remote port ranges to portrange + * IDs. The constants in the descriptions of the fields of this message may be + * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115 +#undef MC_CMD_0x115_PRIVILEGE_CTG + +#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_NUM(len) (((len)-0)/4) +/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a + * PORTRANGE_TREE_ENTRY + */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM_MCDI2 255 + +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE + * Atomically update the entire tree mapping remote port ranges to portrange + * IDs. The constants in the descriptions of the fields of this message may be + * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116 +#undef MC_CMD_0x116_PRIVILEGE_CTG + +#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_NUM(len) (((len)-0)/4) +/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a + * PORTRANGE_TREE_ENTRY + */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM_MCDI2 255 + +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0 + +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: This port will be used for Geneve on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RX_BALANCING + * Configure a port upconverter to distribute the packets on both RX engines. + * Packets are distributed based on a table with the destination vFIFO. The + * index of the table is a hash of source and destination of IPV4 and VLAN + * priority. + */ +#define MC_CMD_RX_BALANCING 0x118 +#undef MC_CMD_0x118_PRIVILEGE_CTG + +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_RX_BALANCING_IN msgrequest */ +#define MC_CMD_RX_BALANCING_IN_LEN 16 +/* The RX port whose upconverter table will be modified */ +#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 +/* The VLAN priority associated to the table index and vFIFO */ +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 +/* The resulting bit of SRC^DST for indexing the table */ +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 +/* The RX engine to which the vFIFO in the table entry will point to */ +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 + +/* MC_CMD_RX_BALANCING_OUT msgresponse */ +#define MC_CMD_RX_BALANCING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_BIND + * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more + * info in respect to the binding protocol. + */ +#define MC_CMD_TSA_BIND 0x119 +#undef MC_CMD_0x119_PRIVILEGE_CTG + +#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */ +#define MC_CMD_TSA_BIND_IN_LEN 4 +#define MC_CMD_TSA_BIND_IN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_OP_LEN 4 +/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */ +#define MC_CMD_TSA_BIND_OP_GET_ID 0x1 +/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part + * of the binding procedure to authorize the binding of an adapter to a TSAID. + * Refer to SF-114946-SW for more information. This sub-command is only + * available over a TLS secure connection between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2 +/* enum: Opcode associated with the propagation of a private key that TSAN uses + * as part of post-binding authentication procedure. More specifically, TSAN + * uses this key for a signing operation. TSAC uses the counterpart public key + * to verify the signature. Note - The post-binding authentication occurs when + * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to + * SF-114946-SW for more information. This sub-command is only available over a + * TLS secure connection between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3 +/* enum: Request an insecure unbinding operation. This sub-command is available + * for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_UNBIND 0x4 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5 +/* enum: Opcode associated with the propagation of the unbinding secret token. + * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more + * information. This sub-command is only available over a TLS secure connection + * between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7 +/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8 +/* enum: Request a secure unbinding operation using unbinding token. This sub- + * command is available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9 +/* enum: Request a secure decommissioning operation. This sub-command is + * available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa +/* enum: Test facility that allows an adapter to be configured to behave as if + * Bound to a TSA controller with restricted MCDI administrator operations. + * This operation is primarily intended to aid host driver development. + */ +#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb + +/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4 +/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates + * the nonce every time as part of the TSAN post-binding authentication + * procedure when the TSAN-TSAC connection terminates and TSAN does need to re- + * connect to the TSAC. Refer to SF-114946-SW for more information. + */ +#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16 + +/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */ +#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4 + +/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num)) +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_NUM(len) (((len)-4)/1) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4 +/* This data blob contains the private key generated by the TSAC. TSAN uses + * this key for a signing operation. Note- This private key is used in + * conjunction with the post-binding TSAN authentication procedure that occurs + * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer + * to SF-114946-SW for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM_MCDI2 1016 + +/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding + * operation. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6 + +/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num)) +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_NUM(len) (((len)-92)/1) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16 +/* This is the signature of the above mentioned fields- TSANID, TSAID and + * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains + * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is + * also ASN-1 encoded. Note- The signature is verified based on the public key + * stored into the root certificate that is provisioned on the adapter side. + * This key is known as the PUKtsaid. Refer to SF-115479-TC for more + * information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM_MCDI2 928 + +/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4 +/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to + * SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * make usage of the decommission command that forces the adapter into + * unbinding state. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1 + +/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num)) +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_NUM(len) (((len)-108)/1) +/* This is the signature of the above mentioned fields- TSAID, USER and REASON. + * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384 + * based signature. The ECC curve is secp384r1. The signature is also ASN-1 + * encoded . Note- The signature is verified based on the public key stored + * into the root certificate that is provisioned on the adapter side. This key + * is known as the PUKtsaid. Refer to SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM_MCDI2 912 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64 +/* User ID that comes, as an example, from the Controller. Note- The 33 byte + * length of this attribute is max length of the linux user name plus null + * character. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3 +/* Reason of why decommissioning happens Note- The list of reasons, defined as + * part of the enumeration below, can be extended. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4 + +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use + * MC_CMD_GET_CERTIFICATE. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8 +/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the + * controller to verify the authenticity of the adapter. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1 +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by + * the controller to verify the validity of AAC. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2 + +/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding + * operation using unbinding token. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX_MCDI2 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num)) +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_NUM(len) (((len)-96)/1) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_TSA_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM_MCDI2 104 + +/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure + * decommissioning operation. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX_MCDI2 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num)) +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_NUM(len) (((len)-112)/1) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64 +/* A NUL padded US-ASCII string containing user name of the creator of the + * decommissioning ticket. This field is for information only, and not used by + * the firmware. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36 +/* Reason of why decommissioning happens */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * use the decommission command to force the adapter into unbound state. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM_MCDI2 104 + +/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI + * interface restrictions of a bound adapter. This operation is intended for + * test use on adapters that are not deployed and bound to a TSA Controller. + * Using it on a Bound adapter will succeed but will not alter the MCDI + * privileges as MCDI operations will already be restricted. + */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8 +/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4 +/* Enable or disable emulation of bound adapter */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */ + +/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_OUT_STATUS. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_NUM(len) (((len)-14)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to + * the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4 +/* Rules engine type. Note- The rules engine type allows TSAC to further + * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the + * proper action accordingly. As an example, TSAC uses the rules engine type to + * select the SF key that differs in the case of TSAN vs. NIC Emulator. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4 +/* enum: Hardware rules engine. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1 +/* enum: Nic emulator rules engine. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2 +/* enum: SSFE. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6 +/* The signature data blob. The signature is computed against the message + * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC + * for more information also in respect to the private keys that are used to + * sign the message based on TSAN pre/post-binding authentication procedure. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM_MCDI2 1006 + +/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_NUM(len) (((len)-4)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4 +/* The ticket represents the data blob construct that TSAN sends to TSAC as + * part of the binding protocol. From the TSAN perspective the ticket is an + * opaque construct. For more info refer to SF-115479-TC. + */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM_MCDI2 1016 + +/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to + * the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 + +/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_NUM(len) (((len)-8)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */ +/* The certificate data. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM_MCDI2 1012 + +/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind + * request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure + * decommission request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4 + + +/***********************************/ +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE + * Manage the persistent NVRAM cache of security rules created with + * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated + * as rules are added or removed; the active ruleset must be explicitly + * committed to the cache. The cache may also be explicitly invalidated, + * without affecting the currently active ruleset. When the cache is valid, it + * will be loaded at power on or MC reboot, instead of the default ruleset. + * Rollback of the currently active ruleset to the cached version (when it is + * valid) is also supported. (Medford-only; for use by SolarSecure apps, not + * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation + * allowed in an adapter bound to a TSA controller from the local host is + * OP_GET_CACHED_VERSION. All other sub-operations are prohibited. + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a +#undef MC_CMD_0x11a_PRIVILEGE_CTG + +#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4 +/* the operation to perform */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4 +/* enum: reports the ruleset version that is cached in persistent storage but + * performs no other action + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 +/* enum: rolls back the active state to the cached version. (May fail with + * ENOENT if there is no valid cached version.) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 +/* enum: commits the active state to the persistent cache */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 +/* enum: invalidates the persistent cache without affecting the active state */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 + +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_NUM(len) (((len)-4)/1) +/* indicates whether the persistent cache is valid (after completion of the + * requested operation in the case of rollback, commit, or invalidate) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4 +/* enum: persistent cache is invalid (the VERSION field will be empty in this + * case) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 +/* enum: persistent cache is valid */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 +/* cached ruleset version (after completion of the requested operation, in the + * case of rollback, commit, or invalidate) as an opaque hash value in the same + * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_PRIVATE_APPEND + * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST + * if the tag is already present. + */ +#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c +#undef MC_CMD_0x11c_PRIVILEGE_CTG + +#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1) +/* The tag to be appended */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 +/* The length of the data */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 +/* The data to be contained in the TLV structure */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012 + +/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_VERIFY_CONTENTS + * Verify that the contents of the XPM memory is correct (Medford only). This + * is used during manufacture to check that the XPM memory has been programmed + * correctly at ATE. + */ +#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b +#undef MC_CMD_0x11b_PRIVILEGE_CTG + +#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */ +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 +/* Data type to be checked */ +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 + +/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1) +/* Number of sectors found (test builds only) */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 +/* Number of bytes found (test builds only) */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 +/* Length of signature */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 +/* Signature */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008 + + +/***********************************/ +/* MC_CMD_SET_EVQ_TMR + * Update the timer load, timer reload and timer mode values for a given EVQ. + * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will + * be rounded up to the granularity supported by the hardware, then truncated + * to the range supported by the hardware. The resulting value after the + * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS + * and TMR_RELOAD_ACT_NS). + */ +#define MC_CMD_SET_EVQ_TMR 0x120 +#undef MC_CMD_0x120_PRIVILEGE_CTG + +#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_EVQ_TMR_IN msgrequest */ +#define MC_CMD_SET_EVQ_TMR_IN_LEN 16 +/* Function-relative queue instance */ +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4 +/* Requested value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4 +/* Requested value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4 +/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ + +/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ +#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 +/* Actual value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4 +/* Actual value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_EVQ_TMR_PROPERTIES + * Query properties about the event queue timers. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122 +#undef MC_CMD_0x122_PRIVILEGE_CTG + +#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0 + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 +/* Reserved for future use. */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in + * nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value + * allowed for timer load/reload counts. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a + * multiple of this step size will be rounded in an implementation defined + * manner. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4 +/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only + * meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4 +/* Timer durations requested via MCDI that are not a multiple of this step size + * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 +/* For timers updated using the bug35388 workaround, this is the time interval + * (in nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. This field is only meaningful if the bug35388 workaround + * is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, this is the maximum value + * allowed for timer load/reload counts. This field is only meaningful if the + * bug35388 workaround is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, timer load/reload counts + * not a multiple of this step size will be rounded in an implementation + * defined manner. This field is only meaningful if the bug35388 workaround is + * enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 + + +/***********************************/ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP + * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the + * non used switch buffers. + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d +#undef MC_CMD_0x11d_PRIVILEGE_CTG + +#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 +/* Will the common pool be used as TX_vFIFO_ULL (1) */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ +/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 +/* Number of buffers to reserve for the common pool */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 +/* TX datapath to which the Common Pool is connected to. */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 +/* enum: Extracts information from function */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 +/* Network port or RX Engine to which the common pool connects. */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 +/* enum: Extracts information from function */ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ +/* enum: To enable Switch loopback with Rx engine 0 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 +/* enum: To enable Switch loopback with Rx engine 1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 + +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 +/* ID of the common pool allocated */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO + * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the + * previously allocated common pools. + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e +#undef MC_CMD_0x11e_PRIVILEGE_CTG + +#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 +/* Common pool previously allocated to which the new vFIFO will be associated + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 +/* Port or RX engine to associate the vFIFO egress */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 +/* enum: Extracts information from common pool */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ +/* enum: To enable Switch loopback with Rx engine 0 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 +/* enum: To enable Switch loopback with Rx engine 1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 +/* Minimum number of buffers that the pool must have */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 +/* enum: Do not check the space available */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 +/* Will the vFIFO be used as TX_vFIFO_ULL */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 +/* Network priority of the vFIFO,if applicable */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 +/* enum: Search for the lowest unused priority */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 + +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 +/* Short vFIFO ID */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 +/* Network priority of the vFIFO */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 + + +/***********************************/ +/* MC_CMD_TEARDOWN_TX_VFIFO_VF + * This interface clears the configuration of the given vFIFO and leaves it + * ready to be re-used. + */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f +#undef MC_CMD_0x11f_PRIVILEGE_CTG + +#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 +/* Short vFIFO ID */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 + +/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP + * This interface clears the configuration of the given common pool and leaves + * it ready to be re-used. + */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 +#undef MC_CMD_0x121_PRIVILEGE_CTG + +#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 +/* Common pool ID given when pool allocated */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 + +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_REKEY + * This request causes the NIC to generate a new per-NIC key and program it + * into the write-once memory. During the process all flash partitions that are + * protected with a CMAC are verified with the old per-NIC key and then signed + * with the new per-NIC key. If the NIC has already reached its rekey limit the + * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until + * completion or it may return 0 and continue processing, therefore the caller + * must poll at least once to confirm that the rekeying has completed. The POLL + * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running + * otherwise it will return the result of the last completed rekey operation, + * or 0 if there has not been a previous rekey. + */ +#define MC_CMD_REKEY 0x123 +#undef MC_CMD_0x123_PRIVILEGE_CTG + +#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REKEY_IN msgrequest */ +#define MC_CMD_REKEY_IN_LEN 4 +/* the type of operation requested */ +#define MC_CMD_REKEY_IN_OP_OFST 0 +#define MC_CMD_REKEY_IN_OP_LEN 4 +/* enum: Start the rekeying operation */ +#define MC_CMD_REKEY_IN_OP_REKEY 0x0 +/* enum: Poll for completion of the rekeying operation */ +#define MC_CMD_REKEY_IN_OP_POLL 0x1 + +/* MC_CMD_REKEY_OUT msgresponse */ +#define MC_CMD_REKEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS + * This interface allows the host to find out how many common pool buffers are + * not yet assigned. + */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 +#undef MC_CMD_0x124_PRIVILEGE_CTG + +#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 + +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 +/* Available buffers for the ENG to NET vFIFOs. */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 +/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SECURITY_FUSES + * Change the security level of the adapter by setting bits in the write-once + * memory. The firmware maps each flag in the message to a set of one or more + * hardware-defined or software-defined bits and sets these bits in the write- + * once memory. For Medford the hardware-defined bits are defined in + * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0 + * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of + * the required bits were not set. + */ +#define MC_CMD_SET_SECURITY_FUSES 0x126 +#undef MC_CMD_0x126_PRIVILEGE_CTG + +#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */ +#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4 +/* Flags specifying what type of security features are being set */ +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1 + +/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0 + +/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4 +/* Flags specifying which security features are enforced on the NIC after the + * flags in the request have been applied. See + * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions. + */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_INFO + * Messages sent from TSA adapter to TSA controller. This command is only valid + * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This + * command is not sent by the driver to the MC; it is sent from the MC to a TSA + * controller, being treated more like an alert message rather than a command; + * hence the MC does not expect a response in return. Doxbox reference + * SF-117371-SW + */ +#define MC_CMD_TSA_INFO 0x127 +#undef MC_CMD_0x127_PRIVILEGE_CTG + +#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_INFO_IN msgrequest */ +#define MC_CMD_TSA_INFO_IN_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16 +/* enum: Information about recently discovered local IP address of the adapter + */ +#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1 +/* enum: Information about a sampled packet that either - did not match any + * black/white-list filters and was allowed by the default filter or - did not + * match any black/white-list filters and was denied by the default filter + */ +#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2 +/* enum: Information about an unbind or decommission attempt. */ +#define MC_CMD_TSA_INFO_OP_UNBIND 0x3 + +/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest: + * + * The TSA controller maintains a list of IP addresses valid for each port of a + * TSA adapter. The TSA controller requires information from the adapter + * inorder to learn new IP addresses assigned to a physical port and to + * identify those that are no longer assigned to the physical port. For this + * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP + * probe packets seen on each physical port. This definition describes the + * format of the notification message sent from a TSA adapter to a TSA + * controller related to any information related to a change in IP address + * assignment for a port. Doxbox reference SF-117371. + * + * There may be a possibility of combining multiple notifications in a single + * message in future. When that happens, a new flag can be defined using the + * reserved bits to describe the extended format of this notification. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4 +/* Additional metadata describing the IP address information such as source of + * information retrieval, type of IP address, physical port number. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8 +/* enum: ARP reply sent out of the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0 +/* enum: ARP probe packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1 +/* enum: Gratuitous ARP packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2 +/* enum: DHCP ACK packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7 +/* IPV4 address retrieved from the sampled packets. This field is relevant only + * when META_IPV4 is set to 1. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4 +/* Target MAC address retrieved from the sampled packet. */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6 + +/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest: + * + * It is desireable for the TSA controller to learn the traffic pattern of + * packets seen at the network port being monitored. In order to learn about + * the traffic pattern, the TSA controller may want to sample packets seen at + * the network port. Based on the packet samples that the TSA controller + * receives from the adapter, the controller may choose to configure additional + * black-list or white-list rules to allow or block packets as required. + * + * Although the entire sampled packet as seen on the network port is available + * to the MC the length of sampled packet sent to controller is restricted by + * MCDI payload size. Besides, the TSA controller does not require the entire + * packet to make decisions about filter updates. Hence the packet sample being + * passed to the controller is truncated to 128 bytes. This length is large + * enough to hold the ethernet header, IP header and maximum length of + * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if + * required in future). + * + * The intention is that any future changes to this message format that are not + * backwards compatible will be defined with a new operation code. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4 +/* Additional metadata describing the sampled packet */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1 +/* 128-byte raw prefix of the sampled packet which includes the ethernet + * header, IP header and L4 protocol header (only IPv4 supported initially). + * This provides the controller enough information about the packet sample to + * report traffic patterns seen on a network port and to make decisions + * concerning rule-set updates. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128 + +/* MC_CMD_TSA_INFO_IN_UNBIND msgrequest: Information about an unbind or + * decommission attempt. The purpose of this event is to let the controller + * know about unbind and decommission attempts (both successful and failed) + * received from the adapter host. The event is not sent if the unbind or + * decommission request was received from the controller. + */ +#define MC_CMD_TSA_INFO_IN_UNBIND_LEN 12 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_WIDTH 16 +/* Type of the unbind attempt. */ +#define MC_CMD_TSA_INFO_IN_UNBIND_TYPE_OFST 4 +#define MC_CMD_TSA_INFO_IN_UNBIND_TYPE_LEN 4 +/* enum: This event is sent because MC_CMD_TSA_BIND_OP_SECURE_UNBIND was + * received from the adapter local host. + */ +#define MC_CMD_TSA_INFO_UNBIND_TYPE_SECURE_UNBIND 0x1 +/* enum: This event is sent because MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION was + * received from the adapter local host. + */ +#define MC_CMD_TSA_INFO_UNBIND_TYPE_SECURE_DECOMMISSION 0x2 +/* Result of the attempt. */ +#define MC_CMD_TSA_INFO_IN_UNBIND_RESULT_OFST 8 +#define MC_CMD_TSA_INFO_IN_UNBIND_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND/MC_CMD_TSA_BIND_OUT_SECURE_UNBIND/RESULT */ + +/* MC_CMD_TSA_INFO_OUT msgresponse */ +#define MC_CMD_TSA_INFO_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_HOST_INFO + * Commands to appply or retrieve host-related information from an adapter. + * Doxbox reference SF-117371-SW + */ +#define MC_CMD_HOST_INFO 0x128 +#undef MC_CMD_0x128_PRIVILEGE_CTG + +#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HOST_INFO_IN msgrequest */ +#define MC_CMD_HOST_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_HOST_INFO_IN_OP_LBN 0 +#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16 +/* enum: Read a 16-byte unique host identifier from the adapter. This UUID + * helps to identify the host that an adapter is plugged into. This identifier + * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI + * driver is unable to extract the system UUID, it would still set a random + * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may + * change if the system is power-cycled, however, they persist across adapter + * resets. If the host UUID was not set on an adapter, due to an unsupported + * version of UEFI driver, then this command returns an error. Doxbox reference + * - SF-117371-SW section 'Host UUID'. + */ +#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0 +/* enum: Set a 16-byte unique host identifier on the adapter to identify the + * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1 + +/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4 + +/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4 +/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAN_INFO + * Get TSA adapter information. TSA controllers query each TSA adapter to learn + * some configuration parameters of each adapter. Doxbox reference SF-117371-SW + * section 'Adapter Information' + */ +#define MC_CMD_TSAN_INFO 0x129 +#undef MC_CMD_0x129_PRIVILEGE_CTG + +#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSAN_INFO_IN msgrequest */ +#define MC_CMD_TSAN_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSAN_INFO_IN_OP_LBN 0 +#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16 +/* enum: Read configuration parameters and IDs that uniquely identify an + * adapter. The parameters include - host identification, adapter + * identification string and number of physical ports on the adapter. + */ +#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0 + +/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6 +/* Unused bytes, defined for 32-bit alignment of new fields. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_STATISTICS + * TSA adapter statistics operations. + */ +#define MC_CMD_TSA_STATISTICS 0x130 +#undef MC_CMD_0x130_PRIVILEGE_CTG + +#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_STATISTICS_IN msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4 +/* enum: Get the configuration parameters that describe the TSA statistics + * layout on the adapter. + */ +#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0 +/* enum: Read and/or clear TSA statistics counters. */ +#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1 + +/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4 + +/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4 + +/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num)) +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_NUM(len) (((len)-16)/4) +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4 +/* Parameters describing the statistics operation */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1 +/* Counter ID list specification type */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4 +/* enum: The statistics counters are specified as an unordered list of + * individual counter ID. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0 +/* enum: The statistics counters are specified as a range of consecutive + * counter IDs. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1 +/* Number of statistics counters */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4 +/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a + * list of counter IDs to be operated on. When mode is set to RANGE, this entry + * holds a single counter ID representing the start of the range of counter IDs + * to be operated on. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM_MCDI2 251 + +/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num)) +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_NUM(len) (((len)-8)/16) +/* Number of statistics counters returned in this response */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4 +/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a + * 64-bit aligned offset + */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM_MCDI2 63 + +/* MC_TSA_STATISTICS_ENTRY structuredef */ +#define MC_TSA_STATISTICS_ENTRY_LEN 16 +/* Tx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64 +/* Rx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64 + + +/***********************************/ +/* MC_CMD_ERASE_INITIAL_NIC_SECRET + * This request causes the NIC to find the initial NIC secret (programmed + * during ATE) in XPM memory and if and only if the NIC has already been + * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after + * installing TSA binding certificates. See SF-117631-TC. + */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131 +#undef MC_CMD_0x131_PRIVILEGE_CTG + +#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0 + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_CONFIG + * TSA adapter configuration operations. This command is used to prepare the + * NIC for TSA binding. + */ +#define MC_CMD_TSA_CONFIG 0x64 +#undef MC_CMD_0x64_PRIVILEGE_CTG + +#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_CONFIG_IN msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_LEN 4 +/* TSA configuration sub-operation code */ +#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4 +/* enum: Append a single item to the tsa_config partition. Items will be + * encrypted unless they are declared as non-sensitive. Returns + * MC_CMD_ERR_EEXIST if the tag is already present. + */ +#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1 +/* enum: Reset the tsa_config partition to a clean state. */ +#define MC_CMD_TSA_CONFIG_OP_RESET 0x2 +/* enum: Read back a configured item from tsa_config partition. Returns + * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item + * is declared as sensitive (i.e. is encrypted). + */ +#define MC_CMD_TSA_CONFIG_OP_READ 0x3 + +/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num)) +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_NUM(len) (((len)-12)/1) +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_APPEND. + */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4 +/* The tag to be appended */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4 +/* The item data */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM_MCDI2 1008 + +/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_RESET. + */ +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_READ. + */ +#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4 +/* The tag to be read */ +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num)) +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_NUM(len) (((len)-8)/1) +/* The tag that was read */ +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4 +/* The data of the item. */ +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM_MCDI2 1012 + +/* MC_TSA_IPV4_ITEM structuredef */ +#define MC_TSA_IPV4_ITEM_LEN 8 +/* Additional metadata describing the IP address information such as the + * physical port number the address is being used on. Unused space in this + * field is reserved for future expansion. + */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4 +#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0 +#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32 +/* The IPv4 address in little endian byte order. */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TSA_IPADDR + * TSA operations relating to the monitoring and expiry of local IP addresses + * discovered by the controller. These commands are sent from a TSA controller + * to a TSA adapter. + */ +#define MC_CMD_TSA_IPADDR 0x65 +#undef MC_CMD_0x65_PRIVILEGE_CTG + +#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_IPADDR_IN msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_LEN 4 +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16 +/* enum: Request that the adapter verifies that the IPv4 addresses supplied are + * still in use by the host by sending ARP probes to the host. The MC does not + * wait for a response to the probes and sends an MCDI response to the + * controller once the probes have been sent to the host. The response to the + * probes (if there are any) will be forwarded to the controller using + * MC_CMD_TSA_INFO alerts. + */ +#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1 +/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid + * for the host of the adapter. The adapter should remove the IPv4 addresses + * from its local cache. + */ +#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2 + +/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num)) +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_NUM(len) (((len)-8)/8) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to validate. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM_MCDI2 126 + +/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0 + +/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num)) +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_NUM(len) (((len)-8)/8) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to remove. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM_MCDI2 126 + +/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0 + + +/***********************************/ +/* MC_CMD_SECURE_NIC_INFO + * Get secure NIC information. While many of the features reported by these + * commands are related to TSA, they must be supported in firmware where TSA is + * disabled. + */ +#define MC_CMD_SECURE_NIC_INFO 0x132 +#undef MC_CMD_0x132_PRIVILEGE_CTG + +#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16 +/* enum: Get the status of various security settings, all signed along with a + * challenge chosen by the host. + */ +#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0 + +/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24 +/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4 +/* Type of key to be used to sign response. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */ +/* enum: Solarflare adapter authentication key, installed by Manftest. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1 +/* enum: TSA binding key, installed after adapter is bound to a TSA controller. + * This is not supported in firmware which does not support TSA. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2 +/* enum: Customer adapter authentication key. Installed by the customer in the + * field, but otherwise similar to the Solarflare adapter authentication key. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3 +/* Random challenge generated by the host. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16 + +/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420 +/* Length of the signature in MSG_SIGNATURE. */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4 +/* Signature over the message, starting at MESSAGE_TYPE and continuing to the + * end of the MCDI response, allowing the message format to be extended. The + * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length, + * with a maximum of 384 bytes. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384 +/* Enum value indicating the type of response. This protects against chosen + * message attacks. The enum values are random rather than sequential to make + * it unlikely that values will be reused should other commands in a different + * namespace need to create signed messages. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. + */ +#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4 +/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS + * message + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16 +/* The first 32 bits of XPM memory, which include security and flag bits, die + * ID and chip ID revision. The meaning of these bits is defined in + * mc/include/mc/xpm.h in the firmwaresrc repository. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2 + + +/***********************************/ +/* MC_CMD_TSA_TEST + * A simple ping-pong command just to test the adapter<>controller MCDI + * communication channel. This command makes not changes to the TSA adapter's + * internal state. It is used by the controller just to verify that the MCDI + * communication channel is working fine. This command takes no additonal + * parameters in request or response. + */ +#define MC_CMD_TSA_TEST 0x125 +#undef MC_CMD_0x125_PRIVILEGE_CTG + +#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_TEST_IN msgrequest */ +#define MC_CMD_TSA_TEST_IN_LEN 0 + +/* MC_CMD_TSA_TEST_OUT msgresponse */ +#define MC_CMD_TSA_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_RULESET_OVERRIDE + * Override TSA ruleset that is currently active on the adapter. This operation + * does not modify the ruleset itself. This operation provides a mechanism to + * apply an allow-all or deny-all operation on all packets, thereby completely + * ignoring the rule-set configured on the adapter. The main purpose of this + * operation is to provide a deterministic state to the TSA firewall during + * rule-set transitions. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a +#undef MC_CMD_0x12a_PRIVILEGE_CTG + +#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4 +/* The override state to apply. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0 +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4 +/* enum: No override in place - the existing ruleset is in operation. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0 +/* enum: Block all packets seen on all datapath channel except those packets + * required for basic configuration of the TSA NIC such as ARPs and TSA- + * communication traffic. Such exceptional traffic is handled differently + * compared to TSA rulesets. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1 +/* enum: Allow all packets through all datapath channel. The TSA adapter + * behaves like a normal NIC without any firewalls. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2 + +/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */ +#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAC_REQUEST + * Generic command to send requests from a TSA controller to a TSA adapter. + * Specific usage is determined by the TYPE field. + */ +#define MC_CMD_TSAC_REQUEST 0x12b +#undef MC_CMD_0x12b_PRIVILEGE_CTG + +#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSAC_REQUEST_IN msgrequest */ +#define MC_CMD_TSAC_REQUEST_IN_LEN 4 +/* The type of request from the controller. */ +#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0 +#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4 +/* enum: Request the adapter to resend localIP information from it's cache. The + * command does not return any IP address information; IP addresses are sent as + * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP. + */ +#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0 + +/* MC_CMD_TSAC_REQUEST_OUT msgresponse */ +#define MC_CMD_TSAC_REQUEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUC_VERSION + * Get the version of the SUC + */ +#define MC_CMD_SUC_VERSION 0x134 +#undef MC_CMD_0x134_PRIVILEGE_CTG + +#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SUC_VERSION_IN msgrequest */ +#define MC_CMD_SUC_VERSION_IN_LEN 0 + +/* MC_CMD_SUC_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_VERSION_OUT_LEN 24 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 +#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 +/* The date, in seconds since the Unix epoch, when the firmware image was + * built. + */ +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 + +/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot + * loader. + */ +#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 +/* enum: Requests the SUC boot version. */ +#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b + +/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 +/* The SUC boot version */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 + + +/***********************************/ +/* MC_CMD_SUC_MANFTEST + * Operations to support manftest on SUC based systems. + */ +#define MC_CMD_SUC_MANFTEST 0x135 +#undef MC_CMD_0x135_PRIVILEGE_CTG + +#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUC_MANFTEST_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_IN_LEN 4 +/* The manftest operation to be performed. */ +#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4 +/* enum: Read serial number and use count. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0 +/* enum: Update use count on wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1 +/* enum: Start an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2 +/* enum: Read the status of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3 +/* enum: Read the results of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4 +/* enum: Read the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5 +/* enum: Write the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6 +/* enum: Write FRU information to SUC. The FRU information is taken from the + * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected. + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7 + +/* MC_CMD_SUC_MANFTEST_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_READ. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20 +/* The serial number of the wearout adapter, see SF-112717-PR for format. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16 +/* The use count of the wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4 +/* The combined status of the calibration operation. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12 +/* The set of calibration results. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_FRU_WRITE + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CERTIFICATE + * Request a certificate. + */ +#define MC_CMD_GET_CERTIFICATE 0x12c +#undef MC_CMD_0x12c_PRIVILEGE_CTG + +#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CERTIFICATE_IN msgrequest */ +#define MC_CMD_GET_CERTIFICATE_IN_LEN 8 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4 +#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */ +#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each + * adapter and is used to verify its authenticity. It is installed by Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1 +#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */ +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared + * by a group of adapters (typically a purchase order) and is used to verify + * the validity of AAC along with the SF root certificate. It is installed by + * Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AAC is + * unique to each adapter and is used to verify its authenticity in cases where + * either the AAC is not installed or a customer desires to use their own + * certificate chain. It is installed by the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AASC is + * shared by a group of adapters and is used to verify the validity of the + * Customer AAC along with the customers root certificate. It is installed by + * the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4 +/* Offset, measured in bytes, relative to the start of the certificate data + * from which the certificate is to be retrieved. + */ +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4 + +/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */ +#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_NUM(len) (((len)-12)/1) +/* Type of the certificate. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_CERTIFICATE_IN/TYPE */ +/* Offset, measured in bytes, relative to the start of the certificate data + * from which data in this message starts. + */ +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4 +/* Total length of the certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8 +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4 +/* The certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM_MCDI2 1008 + + +/***********************************/ +/* MC_CMD_GET_NIC_GLOBAL + * Get a global value which applies to all PCI functions + */ +#define MC_CMD_GET_NIC_GLOBAL 0x12d +#undef MC_CMD_0x12d_PRIVILEGE_CTG + +#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4 +/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the + * given key is unknown to the current firmware, the call will fail with + * ENOENT. + */ +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4 + +/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4 +/* Value of requested key, see key descriptions below. */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_NIC_GLOBAL + * Set a global value which applies to all PCI functions. Most global values + * can only be changed under specific conditions, and this call will return an + * appropriate error otherwise (see key descriptions). + */ +#define MC_CMD_SET_NIC_GLOBAL 0x12e +#undef MC_CMD_0x12e_PRIVILEGE_CTG + +#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8 +/* Key to change value of. Firmware will return ENOENT for keys it doesn't know + * about. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4 +/* enum: Request switching the datapath firmware sub-variant. Currently only + * useful when running the DPDK f/w variant. See key values below, and the DPDK + * section of the EF10 Driver Writers Guide. Note that any driver attaching + * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request + * to switch back to the default sub-variant, and will thus reset this value. + * If a sub-variant switch happens, all other PCI functions will get their + * resources reset (they will see an MC reboot). + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1 +/* New value to set, see key descriptions above. */ +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4 +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support + * for maximum features for the current f/w variant. A request from a + * privileged function to set this particular value will always succeed. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost + * of not supporting any TX checksum offloads. Only supported when running some + * f/w variants, others will return ENOTSUP (as reported by the homonymous bit + * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are + * attached, and the calling driver must have no resources allocated. See the + * DPDK section of the EF10 Driver Writers Guide for a more detailed + * description with possible error codes. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1 + + +/***********************************/ +/* MC_CMD_LTSSM_TRACE_POLL + * Medford2 hardware has support for logging all LTSSM state transitions to a + * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will + * periodially dump the contents of this hardware buffer to an internal + * firmware buffer for later extraction. + */ +#define MC_CMD_LTSSM_TRACE_POLL 0x12f +#undef MC_CMD_0x12f_PRIVILEGE_CTG + +#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware + * internal buffer. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4 +/* The maximum number of row that the caller can accept. The format of each row + * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4 + +/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num)) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_NUM(len) (((len)-8)/8) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1 +/* The number of rows present in this response. */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM_MCDI2 126 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24 +/* The time of the LTSSM transition. Times are reported as fractional + * microseconds since MC boot (wrapping at 2^32us). The fractional part is + * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds = + * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000) + */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4 + + +/***********************************/ +/* MC_CMD_TELEMETRY_ENABLE + * This command enables telemetry processing of packets, allowing a remote host + * to gather information and analytics passing on the card. Enabling telemetry + * will have a performance cost. Not supported on all hardware and datapath + * variants. As of writing, only supported on Medford2 running full-featured + * firmware variant. + */ +#define MC_CMD_TELEMETRY_ENABLE 0x138 +#undef MC_CMD_0x138_PRIVILEGE_CTG + +#define MC_CMD_0x138_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TELEMETRY_ENABLE_IN msgrequest */ +#define MC_CMD_TELEMETRY_ENABLE_IN_LEN 4 +#define MC_CMD_TELEMETRY_ENABLE_IN_STATE_OFST 0 +#define MC_CMD_TELEMETRY_ENABLE_IN_STATE_LEN 4 +/* enum: Disables telemetry functionality, returns the card to default + * behaviour of the configured datapath variant. + */ +#define MC_CMD_TELEMETRY_ENABLE_IN_DISABLE 0x0 +/* enum: Enables telemetry functionality on the currently configured datapath + * variant if supported. + */ +#define MC_CMD_TELEMETRY_ENABLE_IN_ENABLE 0x1 + +/* MC_CMD_TELEMETRY_ENABLE_OUT msgresponse */ +#define MC_CMD_TELEMETRY_ENABLE_OUT_LEN 0 + +/* TELEMETRY_CONFIG structuredef */ +#define TELEMETRY_CONFIG_LEN 36 +/* Bitfields to identify the list of config parameters included in the command. + * A bit-value of 1 indicates that the relevant config parameter field is + * valid; 0 indicates invalid and the config parameter field must be ignored by + * firmware. Firmware may however apply some default values for certain + * parameters. + */ +#define TELEMETRY_CONFIG_FLAGS_OFST 0 +#define TELEMETRY_CONFIG_FLAGS_LEN 4 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_VALID_LBN 0 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_VALID_LBN 1 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_VALID_LBN 2 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_VALID_LBN 3 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_RESERVED1_LBN 4 +#define TELEMETRY_CONFIG_RESERVED1_WIDTH 28 +#define TELEMETRY_CONFIG_FLAGS_LBN 0 +#define TELEMETRY_CONFIG_FLAGS_WIDTH 32 +/* Collector IPv4/IPv6 address to which latency measurements are forwarded from + * the adapter (as bytes in network order; set last 12 bytes to 0 for IPv4 + * address). + */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_OFST 4 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_LEN 16 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_LBN 32 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_WIDTH 128 +/* Collector Port number to which latency measurements are forwarded from the + * adapter (as bytes in network order). + */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_OFST 20 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_LEN 2 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_LBN 160 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_WIDTH 16 +/* Unused - set to 0. */ +#define TELEMETRY_CONFIG_RESERVED2_OFST 22 +#define TELEMETRY_CONFIG_RESERVED2_LEN 2 +#define TELEMETRY_CONFIG_RESERVED2_LBN 176 +#define TELEMETRY_CONFIG_RESERVED2_WIDTH 16 +/* MAC address of the collector (as bytes in network order). */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_OFST 24 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_LEN 6 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_LBN 192 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_WIDTH 48 +/* Maximum number of latency measurements to be made on a telemetry flow. */ +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_OFST 30 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_LEN 2 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_LBN 240 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_WIDTH 16 +/* Maximum duration for which a telemetry flow is monitored (in millisecs). */ +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_OFST 32 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_LEN 4 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_LBN 256 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TELEMETRY_CONFIG + * This top-level command includes various sub-opcodes that are used to apply + * (and read-back) telemetry related configuration parameters on the NIC. + * Reference - SF-120569-SW Telemetry Firmware Design. + */ +#define MC_CMD_TELEMETRY_CONFIG 0x139 +#undef MC_CMD_0x139_PRIVILEGE_CTG + +#define MC_CMD_0x139_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TELEMETRY_CONFIG_IN msgrequest */ +#define MC_CMD_TELEMETRY_CONFIG_IN_LEN 4 +/* Telemetry configuration sub-operation code */ +#define MC_CMD_TELEMETRY_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_OP_LEN 4 +/* enum: Configure parameters for telemetry measurements. */ +#define MC_CMD_TELEMETRY_CONFIG_OP_SET 0x1 +/* enum: Read current values of parameters for telemetry measurements. */ +#define MC_CMD_TELEMETRY_CONFIG_OP_GET 0x2 + +/* MC_CMD_TELEMETRY_CONFIG_IN_SET msgrequest: This command configures the + * parameters necessary for tcp-latency measurements. The adapter adds a filter + * for every new tcp flow seen in both tx and rx directions and tracks the + * telemetry measurements related to the flow in a tracking table. Entries in + * the tracking table live as long as N measurements are made on the flow or + * the flow has been in the tracking table for the maximum configured duration. + * Telemetry measurements in this command refer to tcp-latency measurements for + * data-to-ack latency as well as data-to-data latency. All telemetry + * measurements are bundled into a UDP packet and forwarded to a collector + * whose IP address is configured using this command. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_LEN 40 +/* Telemetry configuration sub-operation code. Must be set to + * MC_CMD_TELEMETRY_CONFIG_OP_SET. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_OP_LEN 4 +/* struct of type TELEMETRY_CONFIG. */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_PARAMETERS_OFST 4 +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_PARAMETERS_LEN 36 + +/* MC_CMD_TELEMETRY_CONFIG_OUT_SET msgresponse */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_SET_LEN 0 + +/* MC_CMD_TELEMETRY_CONFIG_IN_GET msgrequest: This command reads out the + * current values of config parameters necessary for tcp-latency measurements. + * See MC_CMD_TELEMETRY_SET_CONFIG for more information about the configuration + * parameters. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_LEN 4 +/* Telemetry configuration sub-operation code. Must be set to + * MC_CMD_TELEMETRY_CONFIG_OP_GET. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_OP_LEN 4 + +/* MC_CMD_TELEMETRY_CONFIG_OUT_GET msgresponse */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_LEN 36 +/* struct of type TELEMETRY_CONFIG. */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_PARAMETERS_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_PARAMETERS_LEN 36 + + +/***********************************/ +/* MC_CMD_GET_RX_PREFIX_ID + * This command is part of the mechanism for configuring the format of the RX + * packet prefix. It takes as input a bitmask of the fields the host would like + * to be in the prefix. If the hardware supports RX prefixes with that + * combination of fields, then this command returns a list of prefix-ids, + * opaque identifiers suitable for use in the RX_PREFIX_ID field of a + * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not + * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids + * due to resource constraints, returns ENOSPC. + */ +#define MC_CMD_GET_RX_PREFIX_ID 0x13b +#undef MC_CMD_0x13b_PRIVILEGE_CTG + +#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8 +/* Field bitmask. */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1 + +/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4) +/* Number of prefix-ids returned */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4 +/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or + * MC_CMD_QUERY_PREFIX_ID + */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254 + +/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix + * field + */ +#define RX_PREFIX_FIELD_INFO_LEN 4 +/* The offset of the field from the start of the prefix, in bits */ +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16 +/* The width of the field, in bits */ +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8 +/* The type of the field. These enum values are in the same order as the fields + * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask + */ +#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3 +#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1 +#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */ +#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */ +#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */ +#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */ +#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */ +#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */ +#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24 +#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8 + +/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in + * which every field has a fixed offset and width + */ +#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020 +#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num)) +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4) +/* Length of the RX prefix in bytes */ +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8 +/* Number of fields present in the prefix */ +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16 +/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */ +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_QUERY_RX_PREFIX_ID + * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID) + * and returns a description of the RX prefix of packets delievered to an RXQ + * created with that prefix id + */ +#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c +#undef MC_CMD_0x13c_PRIVILEGE_CTG + +#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4 +/* Prefix id to query */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1) +/* An enum describing the structure of this response. */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1 +/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3 +/* The response. Its format is as defined by the RESPONSE_TYPE value */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_BUNDLE + * A command to perform various bundle-related operations on insecure cards. + */ +#define MC_CMD_BUNDLE 0x13d +#undef MC_CMD_0x13d_PRIVILEGE_CTG + +#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_BUNDLE_IN msgrequest */ +#define MC_CMD_BUNDLE_IN_LEN 4 +/* Sub-command code */ +#define MC_CMD_BUNDLE_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_IN_OP_LEN 4 +/* enum: Get the current host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0 +/* enum: Set the host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current + * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and + * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On + * secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access + * control mode. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4 +/* enum: Component partitions are read-only from the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0 +/* enum: Component partitions can read read-from written-to by the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component + * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as + * read-only on firmware built with bundle support. This command marks these + * partitions as read/writeable. The access status set by this command does not + * persist across MC reboots. This command only works on engineering (insecure) + * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */ + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VPD + * Read all VPD starting from a given address + */ +#define MC_CMD_GET_VPD 0x165 +#undef MC_CMD_0x165_PRIVILEGE_CTG + +#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VPD_IN msgresponse */ +#define MC_CMD_GET_VPD_IN_LEN 4 +/* To request only VPD tags from a certain origin. */ +#define MC_CMD_GET_VPD_IN_STORAGE_TYPE_OFST 0 +#define MC_CMD_GET_VPD_IN_STORAGE_TYPE_LEN 2 +/* enum: Return all VPD regardless of origin. */ +#define MC_CMD_GET_VPD_IN_STORAGE_TYPE_ALL 0x0 +/* enum: Return only VPD tags generated by MCFW (not stored in NVRAM) */ +#define MC_CMD_GET_VPD_IN_STORAGE_TYPE_LIVE 0x1 +/* enum: Return only VPD tags stored in NVRAM (not generated by MCFW) */ +#define MC_CMD_GET_VPD_IN_STORAGE_TYPE_NVRAM 0x2 +/* VPD address to start from. In case VPD is longer than MCDI buffer + * (unlikely), user can make multiple calls with different starting addresses. + */ +#define MC_CMD_GET_VPD_IN_ADDR_OFST 2 +#define MC_CMD_GET_VPD_IN_ADDR_LEN 2 + +/* MC_CMD_GET_VPD_OUT msgresponse */ +#define MC_CMD_GET_VPD_OUT_LENMIN 5 +#define MC_CMD_GET_VPD_OUT_LENMAX 252 +#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_VPD_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-4)/1) +/* Length of VPD data returned. */ +#define MC_CMD_GET_VPD_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_VPD_OUT_DATALEN_LEN 4 +/* VPD data returned. */ +#define MC_CMD_GET_VPD_OUT_DATA_OFST 4 +#define MC_CMD_GET_VPD_OUT_DATA_LEN 1 +#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_GET_NCSI_INFO + * Provide information about the NC-SI stack + */ +#define MC_CMD_GET_NCSI_INFO 0x167 +#undef MC_CMD_0x167_PRIVILEGE_CTG + +#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NCSI_INFO_IN msgrequest */ +#define MC_CMD_GET_NCSI_INFO_IN_LEN 8 +/* Operation to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4 +/* enum: Information on the link settings. */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0 +/* enum: Statistics associated with the channel */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1 +/* The NC-SI channel on which the operation is to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4 +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4 + +/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12 +/* Settings as received from BMC. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4 +/* Advertised capabilities applied to channel. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4 +/* General status */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1 + +/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28 +/* The number of NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4 +/* The number of NC-SI commands dropped. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4 +/* The number of invalid NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4 +/* The number of checksum errors seen. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4 +/* The number of NC-SI requests received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4 +/* The number of NC-SI responses sent (includes AENs) */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4 +/* The number of NC-SI AENs sent */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4 + +/* EF100_MCDI_EVENT structuredef: The structure of an MCDI_EVENT on EF100 + * platforms + */ +#define EF100_MCDI_EVENT_LEN 8 +/* Defined by QMDA. Will be 1 for all SFC events */ +#define EF100_MCDI_EVENT_EV_DATA_FORMAT_LBN 0 +#define EF100_MCDI_EVENT_EV_DATA_FORMAT_WIDTH 1 +/* Defined by QMDA. The phase bit, changes each time round the event ring */ +#define EF100_MCDI_EVENT_EV_EVQ_PHASE_LBN 1 +#define EF100_MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1 +/* Defined by QDMA. Meaning unclear. */ +#define EF100_MCDI_EVENT_EV_ERROR_LBN 2 +#define EF100_MCDI_EVENT_EV_ERROR_WIDTH 1 +/* Defined by QMDA. Indicates a descriptor was consumed. */ +#define EF100_MCDI_EVENT_EV_DESC_USED_LBN 3 +#define EF100_MCDI_EVENT_EV_DESC_USED_WIDTH 1 +/* Indicates the top-level type of the event. Event types are as documented in + * SF-119689-TC and defined in events.yml. For MCDI events it's always + * EF100_EV_MCDI. HW can generate other event type for its events. + */ +#define EF100_MCDI_EVENT_EV_TYPE_LBN 4 +#define EF100_MCDI_EVENT_EV_TYPE_WIDTH 4 +#define EF100_MCDI_EVENT_CODE_OFST 1 +#define EF100_MCDI_EVENT_CODE_LEN 1 +/* Enum values, see field(s): */ +/* MCDI_EVENT/CODE */ +#define EF100_MCDI_EVENT_CODE_LBN 8 +#define EF100_MCDI_EVENT_CODE_WIDTH 8 +/* Data associated with PTP events which doesn't fit into the main DATA field + */ +#define EF100_MCDI_EVENT_PTP_DATA_OFST 2 +#define EF100_MCDI_EVENT_PTP_DATA_LEN 1 +#define EF100_MCDI_EVENT_PTP_DATA_LBN 16 +#define EF100_MCDI_EVENT_PTP_DATA_WIDTH 8 +/* Alias for PTP_DATA. Nobody uses SRC to mean the source of anything, but + * there's code that uses it to refer to ptp data + */ +#define EF100_MCDI_EVENT_SRC_OFST 2 +#define EF100_MCDI_EVENT_SRC_LEN 1 +#define EF100_MCDI_EVENT_SRC_LBN 16 +#define EF100_MCDI_EVENT_SRC_WIDTH 8 +/* Set if this message continues into another event */ +#define EF100_MCDI_EVENT_CONT_LBN 24 +#define EF100_MCDI_EVENT_CONT_WIDTH 1 +#define EF100_MCDI_EVENT_LEVEL_LBN 25 +#define EF100_MCDI_EVENT_LEVEL_WIDTH 3 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LEVEL */ +/* Data associated with this event. Format depends on the event code. */ +#define EF100_MCDI_EVENT_DATA_OFST 4 +#define EF100_MCDI_EVENT_DATA_LEN 4 +#define EF100_MCDI_EVENT_DATA_LBN 32 +#define EF100_MCDI_EVENT_DATA_WIDTH 32 + +/* CLOCK_INFO structuredef: Information about a single hardware clock */ +#define CLOCK_INFO_LEN 28 +/* Enumeration that uniquely identifies the clock */ +#define CLOCK_INFO_CLOCK_ID_OFST 0 +#define CLOCK_INFO_CLOCK_ID_LEN 2 +/* enum: The Riverhead CMC (card MC) */ +#define CLOCK_INFO_CLOCK_CMC 0x0 +/* enum: The Riverhead NMC (network MC) */ +#define CLOCK_INFO_CLOCK_NMC 0x1 +/* enum: The Riverhead SDNET slice main logic */ +#define CLOCK_INFO_CLOCK_SDNET 0x2 +/* enum: The Riverhead SDNET LUT */ +#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3 +/* enum: The Riverhead SDNET control logic */ +#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4 +/* enum: The Riverhead Streaming SubSystem */ +#define CLOCK_INFO_CLOCK_SSS 0x5 +/* enum: The Riverhead network MAC and associated CSR registers */ +#define CLOCK_INFO_CLOCK_MAC 0x6 +#define CLOCK_INFO_CLOCK_ID_LBN 0 +#define CLOCK_INFO_CLOCK_ID_WIDTH 16 +/* Assorted flags */ +#define CLOCK_INFO_FLAGS_OFST 2 +#define CLOCK_INFO_FLAGS_LEN 2 +#define CLOCK_INFO_SETTABLE_LBN 0 +#define CLOCK_INFO_SETTABLE_WIDTH 1 +#define CLOCK_INFO_FLAGS_LBN 16 +#define CLOCK_INFO_FLAGS_WIDTH 16 +/* The frequency in HZ */ +#define CLOCK_INFO_FREQUENCY_OFST 4 +#define CLOCK_INFO_FREQUENCY_LEN 8 +#define CLOCK_INFO_FREQUENCY_LO_OFST 4 +#define CLOCK_INFO_FREQUENCY_HI_OFST 8 +#define CLOCK_INFO_FREQUENCY_LBN 32 +#define CLOCK_INFO_FREQUENCY_WIDTH 64 +/* Human-readable ASCII name for clock, with NUL termination */ +#define CLOCK_INFO_NAME_OFST 12 +#define CLOCK_INFO_NAME_LEN 1 +#define CLOCK_INFO_NAME_NUM 16 +#define CLOCK_INFO_NAME_LBN 96 +#define CLOCK_INFO_NAME_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_CLOCKS_INFO + * Get information about the device clocks + */ +#define MC_CMD_GET_CLOCKS_INFO 0x166 +#undef MC_CMD_0x166_PRIVILEGE_CTG + +#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */ +#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0 + +/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num)) +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28) +/* An array of CLOCK_INFO structures. */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36 + +#endif /* _SIENA_MC_DRIVER_PCOL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h new file mode 100644 index 000000000..f15c7b206 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_aoe.h @@ -0,0 +1,2977 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2008-2019 Solarflare Communications Inc. + */ + +/* + * This file is automatically generated. DO NOT EDIT IT. + * To make changes, edit the .yml files in sfregistry under doc/mcdi/ and + * rebuild this file with "make -C doc mcdiheaders". + */ + +#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H +#define _SIENA_MC_DRIVER_PCOL_AOE_H + + + +/***********************************/ +/* MC_CMD_FC + * Perform an FC operation + */ +#define MC_CMD_FC 0x9 + +/* MC_CMD_FC_IN msgrequest */ +#define MC_CMD_FC_IN_LEN 4 +#define MC_CMD_FC_IN_OP_HDR_OFST 0 +#define MC_CMD_FC_IN_OP_HDR_LEN 4 +#define MC_CMD_FC_IN_OP_LBN 0 +#define MC_CMD_FC_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to FC. */ +#define MC_CMD_FC_OP_NULL 0x1 +/* enum: Unused opcode */ +#define MC_CMD_FC_OP_UNUSED 0x2 +/* enum: MAC driver commands */ +#define MC_CMD_FC_OP_MAC 0x3 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_READ32 0x4 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_WRITE32 0x5 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_READ 0x6 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_WRITE 0x7 +/* enum: FC firmware Version */ +#define MC_CMD_FC_OP_GET_VERSION 0x8 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_RX_READ 0x9 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa +/* enum: SFP parameters */ +#define MC_CMD_FC_OP_SFP 0xb +/* enum: DDR3 test */ +#define MC_CMD_FC_OP_DDR_TEST 0xc +/* enum: Get Crash context from FC */ +#define MC_CMD_FC_OP_GET_ASSERT 0xd +/* enum: Get FPGA Build registers */ +#define MC_CMD_FC_OP_FPGA_BUILD 0xe +/* enum: Read map support commands */ +#define MC_CMD_FC_OP_READ_MAP 0xf +/* enum: FC Capabilities */ +#define MC_CMD_FC_OP_CAPABILITIES 0x10 +/* enum: FC Global flags */ +#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11 +/* enum: FC IO using relative addressing modes */ +#define MC_CMD_FC_OP_IO_REL 0x12 +/* enum: FPGA link information */ +#define MC_CMD_FC_OP_UHLINK 0x13 +/* enum: Configure loopbacks and link on FPGA ports */ +#define MC_CMD_FC_OP_SET_LINK 0x14 +/* enum: Licensing operations relating to AOE */ +#define MC_CMD_FC_OP_LICENSE 0x15 +/* enum: Startup information to the FC */ +#define MC_CMD_FC_OP_STARTUP 0x16 +/* enum: Configure a DMA read */ +#define MC_CMD_FC_OP_DMA 0x17 +/* enum: Configure a timed read */ +#define MC_CMD_FC_OP_TIMED_READ 0x18 +/* enum: Control UART logging */ +#define MC_CMD_FC_OP_LOG 0x19 +/* enum: Get the value of a given clock_id */ +#define MC_CMD_FC_OP_CLOCK 0x1a +/* enum: DDR3/QDR3 parameters */ +#define MC_CMD_FC_OP_DDR 0x1b +/* enum: PTP and timestamp control */ +#define MC_CMD_FC_OP_TIMESTAMP 0x1c +/* enum: Commands for SPI Flash interface */ +#define MC_CMD_FC_OP_SPI 0x1d +/* enum: Commands for diagnostic components */ +#define MC_CMD_FC_OP_DIAG 0x1e +/* enum: External AOE port. */ +#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0 +/* enum: Internal AOE port. */ +#define MC_CMD_FC_IN_PORT_INT_OFST 0x40 + +/* MC_CMD_FC_IN_NULL msgrequest */ +#define MC_CMD_FC_IN_NULL_LEN 4 +#define MC_CMD_FC_IN_CMD_OFST 0 +#define MC_CMD_FC_IN_CMD_LEN 4 + +/* MC_CMD_FC_IN_PHY msgrequest */ +#define MC_CMD_FC_IN_PHY_LEN 5 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC PHY driver operation code */ +#define MC_CMD_FC_IN_PHY_OP_OFST 4 +#define MC_CMD_FC_IN_PHY_OP_LEN 1 +/* enum: PHY init handler */ +#define MC_CMD_FC_OP_PHY_OP_INIT 0x1 +/* enum: PHY reconfigure handler */ +#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2 +/* enum: PHY reboot handler */ +#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3 +/* enum: PHY get_supported_cap handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4 +/* enum: PHY get_config handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5 +/* enum: PHY get_media_info handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6 +/* enum: PHY set_led handler */ +#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7 +/* enum: PHY lasi_interrupt handler */ +#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8 +/* enum: PHY check_link handler */ +#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9 +/* enum: PHY fill_stats handler */ +#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa +/* enum: PHY bpx_link_state_changed handler */ +#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb +/* enum: PHY get_state handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc +/* enum: PHY start_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd +/* enum: PHY poll_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe +/* enum: PHY nvram_test handler */ +#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf +/* enum: PHY relinquish handler */ +#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10 +/* enum: PHY read connection from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11 +/* enum: PHY read flags from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12 + +/* MC_CMD_FC_IN_PHY_INIT msgrequest */ +#define MC_CMD_FC_IN_PHY_INIT_LEN 4 +#define MC_CMD_FC_IN_PHY_CMD_OFST 0 +#define MC_CMD_FC_IN_PHY_CMD_LEN 4 + +/* MC_CMD_FC_IN_MAC msgrequest */ +#define MC_CMD_FC_IN_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_MAC_HEADER_OFST 4 +#define MC_CMD_FC_IN_MAC_HEADER_LEN 4 +#define MC_CMD_FC_IN_MAC_OP_LBN 0 +#define MC_CMD_FC_IN_MAC_OP_WIDTH 8 +/* enum: MAC reconfigure handler */ +#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1 +/* enum: MAC Set command - same as MC_CMD_SET_MAC */ +#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2 +/* enum: MAC statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3 +/* enum: MAC RX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6 +/* enum: MAC TX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7 +/* enum: MAC Read status */ +#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8 +/* enum: External FPGA port. */ +#define MC_CMD_FC_PORT_EXT 0x0 +/* enum: Internal Siena-facing FPGA ports. */ +#define MC_CMD_FC_PORT_INT 0x1 +#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */ +#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MTU size */ +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4 +/* Drain Tx FIFO */ +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12 +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4 + +/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */ +#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MC Statistics index */ +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1 +/* Number of statistics to read */ +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16 +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4 +#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */ +#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */ + +/* MC_CMD_FC_IN_READ32 msgrequest */ +#define MC_CMD_FC_IN_READ32_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12 +#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_WRITE32_LENMIN 16 +#define MC_CMD_FC_IN_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_WRITE32_LENMAX_MCDI2 1020 +#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num)) +#define MC_CMD_FC_IN_WRITE32_BUFFER_NUM(len) (((len)-12)/4) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12 +#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_FC_IN_TRC_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_WRITE_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4 + +/* MC_CMD_FC_IN_GET_VERSION msgrequest */ +#define MC_CMD_FC_IN_GET_VERSION_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2 + +/* MC_CMD_FC_IN_SFP msgrequest */ +#define MC_CMD_FC_IN_SFP_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Link speed is 100, 1000, 10000, 40000 */ +#define MC_CMD_FC_IN_SFP_SPEED_OFST 4 +#define MC_CMD_FC_IN_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */ +#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8 +#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4 +/* Not relevant for cards with QSFP modules. For older cards, true if module is + * a dual speed SFP+ module. + */ +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12 +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16 +#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module. For later cards with QSFP modules, this field + * is unused and the type is communicated by other means. + */ +#define MC_CMD_FC_IN_SFP_TYPE_OFST 20 +#define MC_CMD_FC_IN_SFP_TYPE_LEN 4 +/* Capabilities corresponding to 1 bits. */ +#define MC_CMD_FC_IN_SFP_CAPS_OFST 24 +#define MC_CMD_FC_IN_SFP_CAPS_LEN 4 + +/* MC_CMD_FC_IN_DDR_TEST msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 +#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8 +/* enum: DRAM Test Start */ +#define MC_CMD_FC_OP_DDR_TEST_START 0x1 +/* enum: DRAM Test Poll */ +#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2 + +/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1 + +/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12 +#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0 +#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4 +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +/* Clear previous test result and prepare for restarting DDR test */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4 + +/* MC_CMD_FC_IN_GET_ASSERT msgrequest */ +#define MC_CMD_FC_IN_GET_ASSERT_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */ +#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FPGA build info operation code */ +#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4 +#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4 +/* enum: Get the build registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1 +/* enum: Get the services registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2 +/* enum: Get the BSP version */ +#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3 +/* enum: Get build register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4 +/* enum: GEt the services register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5 + +/* MC_CMD_FC_IN_READ_MAP msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 +#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 +#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0 +#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8 +/* enum: Get the number of map regions */ +#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1 +/* enum: Get the specified map */ +#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2 + +/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_MAP_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAP_INDEX_LEN 4 + +/* MC_CMD_FC_IN_CAPABILITIES msgrequest */ +#define MC_CMD_FC_IN_CAPABILITIES_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1 + +/* MC_CMD_FC_IN_IO_REL msgrequest */ +#define MC_CMD_FC_IN_IO_REL_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 +#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_OP_LBN 0 +#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8 +/* enum: Get the base address that the FC applies to relative commands */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1 +/* enum: Read data */ +#define MC_CMD_FC_IN_IO_REL_READ32 0x2 +/* enum: Write data */ +#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8 +/* enum: Application address space */ +#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1 +/* enum: Flash address space */ +#define MC_CMD_FC_COMP_TYPE_FLASH 0x2 + +/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX_MCDI2 1020 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num)) +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_NUM(len) (((len)-16)/4) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM_MCDI2 251 + +/* MC_CMD_FC_IN_UHLINK msgrequest */ +#define MC_CMD_FC_IN_UHLINK_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 +#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 +#define MC_CMD_FC_IN_UHLINK_OP_LBN 0 +#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8 +/* enum: Get PHY configuration info */ +#define MC_CMD_FC_OP_UHLINK_PHY 0x1 +/* enum: Get MAC configuration info */ +#define MC_CMD_FC_OP_UHLINK_MAC 0x2 +/* enum: Get Rx eye table */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5 +/* enum: Retune Rx settings */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6 +/* enum: Set loopback mode on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7 +/* enum: Get loopback mode config state on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */ +#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */ +#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8 +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */ + +/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4 + +/* MC_CMD_FC_IN_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_SET_LINK_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4 +#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8 +#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1 + +/* MC_CMD_FC_IN_LICENSE msgrequest */ +#define MC_CMD_FC_IN_LICENSE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LICENSE_OP_OFST 4 +#define MC_CMD_FC_IN_LICENSE_OP_LEN 4 +#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */ +#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */ + +/* MC_CMD_FC_IN_STARTUP msgrequest */ +#define MC_CMD_FC_IN_STARTUP_LEN 40 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4 +#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4 +#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8 +#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4 +/* Length of identifier */ +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12 +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4 +/* Identifier for AOE FPGA */ +#define MC_CMD_FC_IN_STARTUP_ID_OFST 16 +#define MC_CMD_FC_IN_STARTUP_ID_LEN 1 +#define MC_CMD_FC_IN_STARTUP_ID_NUM 24 + +/* MC_CMD_FC_IN_DMA msgrequest */ +#define MC_CMD_FC_IN_DMA_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DMA_OP_OFST 4 +#define MC_CMD_FC_IN_DMA_OP_LEN 4 +#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */ +#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */ + +/* MC_CMD_FC_IN_DMA_STOP msgrequest */ +#define MC_CMD_FC_IN_DMA_STOP_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_DMA_READ msgrequest */ +#define MC_CMD_FC_IN_DMA_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8 +#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 +#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */ + +/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* Host supplied handle (unique) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4 +/* Offset back from aoe_address to apply operation to */ +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36 +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4 +/* Data to apply at offset */ +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40 +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */ +/* Period at which reads are performed (100ms units) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48 +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_LOG msgrequest */ +#define MC_CMD_FC_IN_LOG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LOG_OP_OFST 4 +#define MC_CMD_FC_IN_LOG_OP_LEN 4 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */ +#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */ + +/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Partition offset into flash */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4 +/* Partition length */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4 +/* Partition erase size */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4 + +/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Enable/disable printing to JTAG UART */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8 +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4 + +/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */ +#define MC_CMD_FC_IN_CLOCK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_OP_OFST 4 +#define MC_CMD_FC_IN_CLOCK_OP_LEN 4 +#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */ +#define MC_CMD_FC_IN_CLOCK_ID_OFST 8 +#define MC_CMD_FC_IN_CLOCK_ID_LEN 4 +#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */ + +/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the + * specified clock + */ +#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ + +/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified + * clock + */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_IN_DDR msgrequest */ +#define MC_CMD_FC_IN_DDR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_OP_OFST 4 +#define MC_CMD_FC_IN_DDR_OP_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_OFST 8 +#define MC_CMD_FC_IN_DDR_BANK_LEN 4 +#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */ +#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Flags */ +#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12 +#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */ +/* 128-byte page of serial presence detect data read from module's EEPROM */ +#define MC_CMD_FC_IN_DDR_SPD_OFST 16 +#define MC_CMD_FC_IN_DDR_SPD_LEN 1 +#define MC_CMD_FC_IN_DDR_SPD_NUM 128 +/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */ +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144 +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Size of DDR */ +#define MC_CMD_FC_IN_DDR_SIZE_OFST 12 +#define MC_CMD_FC_IN_DDR_SIZE_LEN 4 + +/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */ +#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ + +/* MC_CMD_FC_IN_TIMESTAMP msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC timestamp operation code */ +#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4 +/* enum: Read transmit timestamp(s) */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0 +/* enum: Read snapshot timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1 +/* enum: Clear all transmit timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4 +/* Control filtering of the returned timestamp and sequence number specified + * here + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4 +/* enum: Return most recent timestamp. No filtering */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0 +/* enum: Match timestamp against the PTP clock ID, port number and sequence + * number specified + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1 +/* Clock identity of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16 +/* Port number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4 +/* Sequence number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4 + +/* MC_CMD_FC_IN_SPI msgrequest */ +#define MC_CMD_FC_IN_SPI_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Basic commands for SPI Flash. */ +#define MC_CMD_FC_IN_SPI_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_OP_LEN 4 +/* enum: SPI Flash read */ +#define MC_CMD_FC_IN_SPI_READ 0x0 +/* enum: SPI Flash write */ +#define MC_CMD_FC_IN_SPI_WRITE 0x1 +/* enum: SPI Flash erase */ +#define MC_CMD_FC_IN_SPI_ERASE 0x2 + +/* MC_CMD_FC_IN_SPI_READ msgrequest */ +#define MC_CMD_FC_IN_SPI_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_SPI_WRITE msgrequest */ +#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16 +#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252 +#define MC_CMD_FC_IN_SPI_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_NUM(len) (((len)-12)/4) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_FC_IN_SPI_ERASE msgrequest */ +#define MC_CMD_FC_IN_SPI_ERASE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_DIAG msgrequest */ +#define MC_CMD_FC_IN_DIAG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Operation code indicating component type */ +#define MC_CMD_FC_IN_DIAG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_OP_LEN 4 +/* enum: Power noise generator. */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0 +/* enum: DDR soak test component. */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1 +/* enum: Diagnostics datapath control component. */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4 +/* enum: Read the configuration (the 32-bit values in each of the clock enable + * count and toggle count registers) + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0 +/* enum: Write a new configuration to the clock enable count and toggle count + * registers + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4 +/* The 32-bit value to be written to the toggle count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value to be written to the clock enable count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4 +/* enum: Starts DDR soak test on selected banks */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0 +/* enum: Read status of DDR soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1 +/* enum: Stop test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2 +/* enum: Set or clear bit that triggers fake errors. These cause subsequent + * tests to fail until the bit is cleared. + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4 +/* Pattern to use in the soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */ +/* Either multiple automatic tests until a STOP command is issued, or one + * single test + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4 +/* DDR bank to read status from */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4 +#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */ +#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */ +#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */ +#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */ +#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4 +/* Mask of DDR banks to set/clear error flag on */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4 +/* enum: Set a known datapath configuration */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0 +/* enum: Apply raw config to datapath control registers */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4 +/* Datapath configuration identifier */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4 +/* Value to write into control register 1 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4 +/* Value to write into control register 2 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4 +/* Value to write into control register 3 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4 + +/* MC_CMD_FC_OUT msgresponse */ +#define MC_CMD_FC_OUT_LEN 0 + +/* MC_CMD_FC_OUT_NULL msgresponse */ +#define MC_CMD_FC_OUT_NULL_LEN 0 + +/* MC_CMD_FC_OUT_READ32 msgresponse */ +#define MC_CMD_FC_OUT_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_READ32_LENMAX_MCDI2 1020 +#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_READ32_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63 +#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_FC_OUT_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_TRC_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_READ_LEN 16 +#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4 + +/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_GET_VERSION msgresponse */ +#define MC_CMD_FC_OUT_GET_VERSION_LEN 12 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2 + +/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */ +#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */ +#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4 + +/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS +#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */ +#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_RX_NSTATS 0x19 + +/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS +#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */ +#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_TX_NSTATS 0x16 + +/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3) +/* MAC Statistics */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK + +/* MC_CMD_FC_OUT_MAC msgresponse */ +#define MC_CMD_FC_OUT_MAC_LEN 0 + +/* MC_CMD_FC_OUT_SFP msgresponse */ +#define MC_CMD_FC_OUT_SFP_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8 +/* enum: Test not yet initiated */ +#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0 +/* enum: Test is in progress */ +#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1 +/* enum: Timed completed */ +#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2 +/* enum: Test did not complete in specified time */ +#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1 +/* Test result from FPGA */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */ + +/* MC_CMD_FC_OUT_DDR_TEST msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_LEN 0 + +/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */ +#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8 +/* enum: No crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 +/* enum: New crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 +/* enum: Crash data has been sent */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 +/* enum: Crash due to exception. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 +/* enum: Crash due to assertion. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 +/* Failing PC value */ +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8 +#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */ +#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */ +#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4 +#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */ +#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4 +/* Qsys system ID */ +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0 +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4 + +/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4 +/* Number of maps */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4 + +/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164 +/* Index of the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4 +/* Options for the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */ +/* Address of start of map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12 +/* Length of address map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20 +/* Component information field */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4 +/* License expiry data for map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32 +/* Name of the component */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128 + +/* MC_CMD_FC_OUT_READ_MAP msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_LEN 0 + +/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */ +#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8 +/* Number of internal ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0 +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4 +/* Number of external ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4 +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4 + +/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */ +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_LEN 0 + +/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX_MCDI2 1020 +#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16 +/* Transceiver Transmit settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16 +/* Transceiver Receive settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16 +/* Rx eye opening */ +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16 +/* PCS status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4 +/* Link status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1 +/* Current SFp parameters applied */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20 +/* Link speed is 100, 1000, 10000 */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4 +/* True if a dual speed SFP+ module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4 +/* PHY config flags */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1 + +/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20 +/* MAC configuration applied */ +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4 +/* MTU size */ +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4 +/* IF Mode status */ +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4 +/* MAC address configured */ +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16 + +/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3) +/* Rx Eye measurements */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3) +/* Has the eye plot dump completed and data returned is valid? */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4 +/* Rx Eye binary plot */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4 + +/* MC_CMD_FC_OUT_UHLINK msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LEN 0 + +/* MC_CMD_FC_OUT_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_LICENSE msgresponse */ +#define MC_CMD_FC_OUT_LICENSE_LEN 12 +/* Count of valid keys */ +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0 +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4 +/* Count of invalid keys */ +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4 +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4 +/* Count of blacklisted keys */ +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8 +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4 + +/* MC_CMD_FC_OUT_STARTUP msgresponse */ +#define MC_CMD_FC_OUT_STARTUP_LEN 4 +/* Capabilities of the FPGA/FC */ +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0 +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1 + +/* MC_CMD_FC_OUT_DMA_READ msgresponse */ +#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1 +#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252 +#define MC_CMD_FC_OUT_DMA_READ_LENMAX_MCDI2 1020 +#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num)) +#define MC_CMD_FC_OUT_DMA_READ_DATA_NUM(len) (((len)-0)/1) +/* The data read */ +#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM_MCDI2 1020 + +/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4 +/* Timer handle */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52 +/* Host supplied handle (unique) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4 +/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28 +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4 +/* When active, start read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40 +/* When active, end read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48 + +/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */ +#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0 + +/* MC_CMD_FC_OUT_LOG msgresponse */ +#define MC_CMD_FC_OUT_LOG_LEN 0 + +/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4 + +/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0 + +/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */ +#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX_MCDI2 1016 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num)) +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_NUM(len) (((len)-0)/8) +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM_MCDI2 127 + +/* MC_CMD_FC_OUT_SPI_READ msgresponse */ +#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4 +#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252 +#define MC_CMD_FC_OUT_SPI_READ_LENMAX_MCDI2 1020 +#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */ +#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */ +#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8 +/* The 32-bit value read from the toggle count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value read from the clock enable count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8 +/* DDR soak test status word; bits [4:0] are relevant. */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1 +/* DDR soak test error count */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0 + + +/***********************************/ +/* MC_CMD_AOE + * AOE operations on MC + */ +#define MC_CMD_AOE 0xa + +/* MC_CMD_AOE_IN msgrequest */ +#define MC_CMD_AOE_IN_LEN 4 +#define MC_CMD_AOE_IN_OP_HDR_OFST 0 +#define MC_CMD_AOE_IN_OP_HDR_LEN 4 +#define MC_CMD_AOE_IN_OP_LBN 0 +#define MC_CMD_AOE_IN_OP_WIDTH 8 +/* enum: FPGA and CPLD information */ +#define MC_CMD_AOE_OP_INFO 0x1 +/* enum: Currents and voltages read from MCP3424s; DEBUG */ +#define MC_CMD_AOE_OP_CURRENTS 0x2 +/* enum: Temperatures at locations around the PCB; DEBUG */ +#define MC_CMD_AOE_OP_TEMPERATURES 0x3 +/* enum: Set CPLD to idle */ +#define MC_CMD_AOE_OP_CPLD_IDLE 0x4 +/* enum: Read from CPLD register */ +#define MC_CMD_AOE_OP_CPLD_READ 0x5 +/* enum: Write to CPLD register */ +#define MC_CMD_AOE_OP_CPLD_WRITE 0x6 +/* enum: Execute CPLD instruction */ +#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7 +/* enum: Reprogram the CPLD on the AOE device */ +#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8 +/* enum: AOE power control */ +#define MC_CMD_AOE_OP_POWER 0x9 +/* enum: AOE image loading */ +#define MC_CMD_AOE_OP_LOAD 0xa +/* enum: Fan monitoring */ +#define MC_CMD_AOE_OP_FAN_CONTROL 0xb +/* enum: Fan failures since last reset */ +#define MC_CMD_AOE_OP_FAN_FAILURES 0xc +/* enum: Get generic AOE MAC statistics */ +#define MC_CMD_AOE_OP_MAC_STATS 0xd +/* enum: Retrieve PHY specific information */ +#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe +/* enum: Write a number of JTAG primitive commands, return will give data */ +#define MC_CMD_AOE_OP_JTAG_WRITE 0xf +/* enum: Control access to the FPGA via the Siena JTAG Chain */ +#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10 +/* enum: Set the MTU offset between Siena and AOE MACs */ +#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11 +/* enum: How link state is handled */ +#define MC_CMD_AOE_OP_LINK_STATE 0x12 +/* enum: How Siena MAC statistics are reported (deprecated - use + * MC_CMD_AOE_OP_ASIC_STATS) + */ +#define MC_CMD_AOE_OP_SIENA_STATS 0x13 +/* enum: How native ASIC MAC statistics are reported - replaces the deprecated + * command MC_CMD_AOE_OP_SIENA_STATS + */ +#define MC_CMD_AOE_OP_ASIC_STATS 0x13 +/* enum: DDR memory information */ +#define MC_CMD_AOE_OP_DDR 0x14 +/* enum: FC control */ +#define MC_CMD_AOE_OP_FC 0x15 +/* enum: DDR ECC status reads */ +#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16 +/* enum: Commands for MC-SPI Master emulation */ +#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17 +/* enum: Commands for FC boot control */ +#define MC_CMD_AOE_OP_FC_BOOT 0x18 +/* enum: Get number of internal ports */ +#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19 +/* enum: Get FC assert information and register dump */ +#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a +/* enum: Set MUM startup FUSE byte with extended delay */ +#define MC_CMD_AOE_OP_MUM_STARTUP_FUSE 0x1b + +/* MC_CMD_AOE_OUT msgresponse */ +#define MC_CMD_AOE_OUT_LEN 0 + +/* MC_CMD_AOE_IN_INFO msgrequest */ +#define MC_CMD_AOE_IN_INFO_LEN 4 +#define MC_CMD_AOE_IN_CMD_OFST 0 +#define MC_CMD_AOE_IN_CMD_LEN 4 + +/* MC_CMD_AOE_IN_CURRENTS msgrequest */ +#define MC_CMD_AOE_IN_CURRENTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */ +#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_READ msgrequest */ +#define MC_CMD_AOE_IN_CPLD_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4 +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4 +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4 +/* enum: Reprogram CPLD, poll for completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1 +/* enum: Reprogram CPLD, send event on completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3 +/* enum: Get status of reprogramming operation */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4 + +/* MC_CMD_AOE_IN_POWER msgrequest */ +#define MC_CMD_AOE_IN_POWER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Turn on or off AOE power */ +#define MC_CMD_AOE_IN_POWER_OP_OFST 4 +#define MC_CMD_AOE_IN_POWER_OP_LEN 4 +/* enum: Turn off FPGA power */ +#define MC_CMD_AOE_IN_POWER_OFF 0x0 +/* enum: Turn on FPGA power */ +#define MC_CMD_AOE_IN_POWER_ON 0x1 +/* enum: Clear peak power measurement */ +#define MC_CMD_AOE_IN_POWER_CLEAR 0x2 +/* enum: Show current power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3 +/* enum: Show peak power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4 +/* enum: Show current DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5 +/* enum: Show peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6 +/* enum: Clear peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7 + +/* MC_CMD_AOE_IN_LOAD msgrequest */ +#define MC_CMD_AOE_IN_LOAD_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence + */ +#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4 +#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4 + +/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */ +#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* If non zero report measured fan RPM rather than nominal */ +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4 +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4 + +/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */ +#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_MAC_STATS msgrequest */ +#define MC_CMD_AOE_IN_MAC_STATS_LEN 24 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4 +#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4 +/* Host memory address for statistics */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16 +/* Length of DMA data (optional) */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4 + +/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */ +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num)) +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_NUM(len) (((len)-8)/4) +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM_MCDI2 253 + +/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Enable or disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4 +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4 +/* enum: Enable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1 +/* enum: Disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2 + +/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4 +/* enum: Apply to all external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000 +/* enum: Apply to all internal ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000 +/* The MTU offset to be applied to the external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_LINK_STATE msgrequest */ +#define MC_CMD_AOE_IN_LINK_STATE_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4 +#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8 +/* enum: AOE and associated external port */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0 +/* enum: AOE and OR of all external ports */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1 +/* enum: Individual ports */ +#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2 +/* enum: Configure link state mode on given AOE port */ +#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8 +/* enum: No-op */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0 +/* enum: logical OR of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1 +/* enum: logical AND of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16 + +/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */ +#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */ +#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4 +/* enum: Statistics from Siena (default) */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */ +#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4 +/* enum: Statistics from the ASIC (default) */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_DDR msgrequest */ +#define MC_CMD_AOE_IN_DDR_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ +/* Page index of SPD data */ +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8 +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_AOE_IN_FC msgrequest */ +#define MC_CMD_AOE_IN_FC_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ + +/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Basic commands for MC SPI Master emulation. */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4 +/* enum: MC SPI read */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0 +/* enum: MC SPI write */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4 + +/* MC_CMD_AOE_IN_FC_BOOT msgrequest */ +#define MC_CMD_AOE_IN_FC_BOOT_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* FC boot control flags */ +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1 + +/* MC_CMD_AOE_IN_MUM_STARTUP_FUSE msgrequest: On AOE2, set MUM startup FUSE + * byte with extended delay of 64ms. On some servers with noisy power rails, + * this ensures that the MUM IO pins do not show spurious transitions while the + * power rails are stabilising. Note that this operation requires a hard- + * powercycle to take effect. See bug76446. + */ +#define MC_CMD_AOE_IN_MUM_STARTUP_FUSE_LEN 4 +/* Must be MC_CMD_AOE_OP_MUM_STARTUP_FUSE */ +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8 +/* enum: No crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */ +/* enum: New crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */ +/* enum: Crash data has been sent */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */ +/* enum: Crash due to exception. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */ +/* enum: Crash due to assertion. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */ +/* Failing PC value */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_AOE_OUT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_INFO_LEN 44 +/* JTAG IDCODE of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0 +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4 +/* Version of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4 +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4 +/* JTAG IDCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8 +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4 +/* JTAG USERCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12 +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4 +/* FPGA type - read from CPLD straps */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */ +/* FPGA state (debug) */ +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20 +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4 +/* FPGA image - partition from which loaded */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24 +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4 +/* FC state */ +#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28 +#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4 +/* enum: Set if watchdog working */ +#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1 +/* enum: Set if MC-FC communications working */ +#define MC_CMD_AOE_OUT_INFO_COMMS 0x2 +/* Random pieces of information */ +#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32 +#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4 +/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */ +#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1 +/* enum: CPLD apparently good */ +#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2 +/* enum: FPGA working normally */ +#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4 +/* enum: FPGA is powered */ +#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8 +/* enum: Board has incompatible SODIMMs fitted */ +#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10 +/* enum: Board has ByteBlaster connected */ +#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20 +/* enum: FPGA Boot flash has an invalid header. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40 +/* enum: FPGA Application flash is accessible. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80 +/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */ +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36 +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4 +#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */ +/* Result of FC booting - not valid while a ByteBlaster is connected. */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40 +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4 +/* enum: No error */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0 +/* enum: Bad address set in CPLD */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1 +/* enum: Bad header */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2 +/* enum: Bad text section details */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3 +/* enum: Bad checksum */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4 +/* enum: Bad BSP */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5 +/* enum: Flash mode is invalid */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6 +/* enum: FC application loaded and execution attempted */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80 +/* enum: FC application Started */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81 +/* enum: No bootrom in FPGA */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff + +/* MC_CMD_AOE_OUT_CURRENTS msgresponse */ +#define MC_CMD_AOE_OUT_CURRENTS_LEN 68 +/* Set of currents and voltages (mA or mV as appropriate) */ +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17 +#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */ + +/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */ +#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40 +/* Set of temperatures */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10 +/* enum: The first set of enum values are for Modena code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */ +/* enum: The second set of enum values are for Sorrento code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */ + +/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4 +/* The value read from the CPLD */ +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4 + +/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX_MCDI2 1020 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num)) +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_NUM(len) (((len)-0)/4) +/* Failure counts for each fan */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM_MCDI2 255 + +/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4 +/* Results of status command (only) */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4 + +/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */ +#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0 + +/* MC_CMD_AOE_OUT_POWER_ON msgresponse */ +#define MC_CMD_AOE_OUT_POWER_ON_LEN 0 + +/* MC_CMD_AOE_OUT_LOAD msgresponse */ +#define MC_CMD_AOE_OUT_LOAD_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */ +#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA + * for details + */ +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS + +/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX_MCDI2 1020 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num)) +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_NUM(len) (((len)-8)/4) +/* Used to align the in and out data blocks so the MC can re-use the cmd */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4 +/* out bytes */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM_MCDI2 253 + +/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */ +#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0 + +/* MC_CMD_AOE_OUT_DDR msgresponse */ +#define MC_CMD_AOE_OUT_DDR_LENMIN 17 +#define MC_CMD_AOE_OUT_DDR_LENMAX 252 +#define MC_CMD_AOE_OUT_DDR_LENMAX_MCDI2 1020 +#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num)) +#define MC_CMD_AOE_OUT_DDR_SPD_NUM(len) (((len)-16)/1) +/* Information on the module. */ +#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0 +#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1 +/* Memory size, in MB. */ +#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4 +#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4 +/* The memory type, as reported from SPD information */ +#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8 +#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4 +/* Nominal voltage of the module (as applied) */ +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12 +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4 +/* SPD data read from the module */ +#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16 +#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236 +#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM_MCDI2 1004 + +/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */ +#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0 + +/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */ +#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0 + +/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */ +#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */ +#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_FC msgresponse */ +#define MC_CMD_AOE_OUT_FC_LEN 0 + +/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4 +/* get the number of internal ports */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0 +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4 + +/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8 +/* Flags describing status info on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1 +/* DDR ECC status on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0 + +/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */ +#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0 + +/* MC_CMD_AOE_OUT_MUM_STARTUP_FUSE msgresponse */ +#define MC_CMD_AOE_OUT_MUM_STARTUP_FUSE_LEN 4 +/* Current value of startup FUSE byte (fusebyte#4) read back after the update + * operation. + */ +#define MC_CMD_AOE_OUT_MUM_STARTUP_FUSE_READBACK_VALUE_OFST 0 +#define MC_CMD_AOE_OUT_MUM_STARTUP_FUSE_READBACK_VALUE_LEN 4 + +#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_strs.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_strs.h new file mode 100644 index 000000000..5209b43ac --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_mcdi_strs.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2008-2019 Solarflare Communications Inc. + */ + +/* + * This file is automatically generated. DO NOT EDIT IT. + * To make changes, edit the .yml files in sfregistry under doc/mcdi/ and + * rebuild this file with "make -C doc mcdiheaders". + * + * The version of this file has MCDI strings really used in the libefx. + */ + +#ifndef _SIENA_MC_DRIVER_PCOL_STRS_H +#define _SIENA_MC_DRIVER_PCOL_STRS_H + +#define MC_CMD_SENSOR_CONTROLLER_TEMP_ENUM_STR "Controller temperature: degC" +#define MC_CMD_SENSOR_PHY_COMMON_TEMP_ENUM_STR "Phy common temperature: degC" +#define MC_CMD_SENSOR_CONTROLLER_COOLING_ENUM_STR "Controller cooling: bool" +#define MC_CMD_SENSOR_PHY0_TEMP_ENUM_STR "Phy 0 temperature: degC" +#define MC_CMD_SENSOR_PHY0_COOLING_ENUM_STR "Phy 0 cooling: bool" +#define MC_CMD_SENSOR_PHY1_TEMP_ENUM_STR "Phy 1 temperature: degC" +#define MC_CMD_SENSOR_PHY1_COOLING_ENUM_STR "Phy 1 cooling: bool" +#define MC_CMD_SENSOR_IN_1V0_ENUM_STR "1.0v power: mV" +#define MC_CMD_SENSOR_IN_1V2_ENUM_STR "1.2v power: mV" +#define MC_CMD_SENSOR_IN_1V8_ENUM_STR "1.8v power: mV" +#define MC_CMD_SENSOR_IN_2V5_ENUM_STR "2.5v power: mV" +#define MC_CMD_SENSOR_IN_3V3_ENUM_STR "3.3v power: mV" +#define MC_CMD_SENSOR_IN_12V0_ENUM_STR "12v power: mV" +#define MC_CMD_SENSOR_IN_1V2A_ENUM_STR "1.2v analogue power: mV" +#define MC_CMD_SENSOR_IN_VREF_ENUM_STR "reference voltage: mV" +#define MC_CMD_SENSOR_OUT_VAOE_ENUM_STR "AOE FPGA power: mV" +#define MC_CMD_SENSOR_AOE_TEMP_ENUM_STR "AOE FPGA temperature: degC" +#define MC_CMD_SENSOR_PSU_AOE_TEMP_ENUM_STR "AOE FPGA PSU temperature: degC" +#define MC_CMD_SENSOR_PSU_TEMP_ENUM_STR "AOE PSU temperature: degC" +#define MC_CMD_SENSOR_FAN_0_ENUM_STR "Fan 0 speed: RPM" +#define MC_CMD_SENSOR_FAN_1_ENUM_STR "Fan 1 speed: RPM" +#define MC_CMD_SENSOR_FAN_2_ENUM_STR "Fan 2 speed: RPM" +#define MC_CMD_SENSOR_FAN_3_ENUM_STR "Fan 3 speed: RPM" +#define MC_CMD_SENSOR_FAN_4_ENUM_STR "Fan 4 speed: RPM" +#define MC_CMD_SENSOR_IN_VAOE_ENUM_STR "AOE FPGA input power: mV" +#define MC_CMD_SENSOR_OUT_IAOE_ENUM_STR "AOE FPGA current: mA" +#define MC_CMD_SENSOR_IN_IAOE_ENUM_STR "AOE FPGA input current: mA" +#define MC_CMD_SENSOR_NIC_POWER_ENUM_STR "NIC power consumption: W" +#define MC_CMD_SENSOR_IN_0V9_ENUM_STR "0.9v power voltage: mV" +#define MC_CMD_SENSOR_IN_I0V9_ENUM_STR "0.9v power current: mA" +#define MC_CMD_SENSOR_IN_I1V2_ENUM_STR "1.2v power current: mA" +#define MC_CMD_SENSOR_PAGE0_NEXT_ENUM_STR "Not a sensor: reserved for the next page flag" +#define MC_CMD_SENSOR_IN_0V9_ADC_ENUM_STR "0.9v power voltage (at ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP_ENUM_STR "Controller temperature 2: degC" +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP_ENUM_STR "Voltage regulator internal temperature: degC" +#define MC_CMD_SENSOR_VREG_0V9_TEMP_ENUM_STR "0.9V voltage regulator temperature: degC" +#define MC_CMD_SENSOR_VREG_1V2_TEMP_ENUM_STR "1.2V voltage regulator temperature: degC" +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_ENUM_STR "controller internal temperature sensor voltage (internal ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_ENUM_STR "controller internal temperature (internal ADC): degC" +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC_ENUM_STR "controller internal temperature sensor voltage (external ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature (external ADC): degC" +#define MC_CMD_SENSOR_AMBIENT_TEMP_ENUM_STR "ambient temperature: degC" +#define MC_CMD_SENSOR_AIRFLOW_ENUM_STR "air flow: bool" +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_ENUM_STR "voltage between VSS08D and VSS08D at CSR: mV" +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC_ENUM_STR "voltage between VSS08D and VSS08D at CSR (external ADC): mV" +#define MC_CMD_SENSOR_HOTPOINT_TEMP_ENUM_STR "Hotpoint temperature: degC" +#define MC_CMD_SENSOR_PHY_POWER_PORT0_ENUM_STR "Port 0 PHY power switch over-current: bool" +#define MC_CMD_SENSOR_PHY_POWER_PORT1_ENUM_STR "Port 1 PHY power switch over-current: bool" +#define MC_CMD_SENSOR_MUM_VCC_ENUM_STR "Mop-up microcontroller reference voltage: mV" +#define MC_CMD_SENSOR_IN_0V9_A_ENUM_STR "0.9v power phase A voltage: mV" +#define MC_CMD_SENSOR_IN_I0V9_A_ENUM_STR "0.9v power phase A current: mA" +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP_ENUM_STR "0.9V voltage regulator phase A temperature: degC" +#define MC_CMD_SENSOR_IN_0V9_B_ENUM_STR "0.9v power phase B voltage: mV" +#define MC_CMD_SENSOR_IN_I0V9_B_ENUM_STR "0.9v power phase B current: mA" +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP_ENUM_STR "0.9V voltage regulator phase B temperature: degC" +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_ENUM_STR "CCOM AVREG 1v2 supply (interval ADC): mV" +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC_ENUM_STR "CCOM AVREG 1v2 supply (external ADC): mV" +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_ENUM_STR "CCOM AVREG 1v8 supply (interval ADC): mV" +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC_ENUM_STR "CCOM AVREG 1v8 supply (external ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_RTS_ENUM_STR "CCOM RTS temperature: degC" +#define MC_CMD_SENSOR_PAGE1_NEXT_ENUM_STR "Not a sensor: reserved for the next page flag" +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_ENUM_STR "controller internal temperature sensor voltage on master core (internal ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_ENUM_STR "controller internal temperature on master core (internal ADC): degC" +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC_ENUM_STR "controller internal temperature sensor voltage on master core (external ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature on master core (external ADC): degC" +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_ENUM_STR "controller internal temperature on slave core sensor voltage (internal ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_ENUM_STR "controller internal temperature on slave core (internal ADC): degC" +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC_ENUM_STR "controller internal temperature on slave core sensor voltage (external ADC): mV" +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature on slave core (external ADC): degC" +#define MC_CMD_SENSOR_SODIMM_VOUT_ENUM_STR "Voltage supplied to the SODIMMs from their power supply: mV" +#define MC_CMD_SENSOR_SODIMM_0_TEMP_ENUM_STR "Temperature of SODIMM 0 (if installed): degC" +#define MC_CMD_SENSOR_SODIMM_1_TEMP_ENUM_STR "Temperature of SODIMM 1 (if installed): degC" +#define MC_CMD_SENSOR_PHY0_VCC_ENUM_STR "Voltage supplied to the QSFP #0 from their power supply: mV" +#define MC_CMD_SENSOR_PHY1_VCC_ENUM_STR "Voltage supplied to the QSFP #1 from their power supply: mV" +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP_ENUM_STR "Controller die temperature (TDIODE): degC" +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP_ENUM_STR "Board temperature (front): degC" +#define MC_CMD_SENSOR_BOARD_BACK_TEMP_ENUM_STR "Board temperature (back): degC" +#define MC_CMD_SENSOR_IN_I1V8_ENUM_STR "1.8v power current: mA" +#define MC_CMD_SENSOR_IN_I2V5_ENUM_STR "2.5v power current: mA" +#define MC_CMD_SENSOR_IN_I3V3_ENUM_STR "3.3v power current: mA" +#define MC_CMD_SENSOR_IN_I12V0_ENUM_STR "12v power current: mA" +#define MC_CMD_SENSOR_IN_1V3_ENUM_STR "1.3v power: mV" +#define MC_CMD_SENSOR_IN_I1V3_ENUM_STR "1.3v power current: mA" + +#endif /* _SIENA_MC_DRIVER_PCOL_STRS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h new file mode 100644 index 000000000..e2ba6129e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_regs_pci.h @@ -0,0 +1,2332 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_EFX_REGS_PCI_H +#define _SYS_EFX_REGS_PCI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * PC_VEND_ID_REG(16bit): + * Vendor ID register + */ + +#define PCR_AZ_VEND_ID_REG 0x00000000 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_VEND_ID_LBN 0 +#define PCRF_AZ_VEND_ID_WIDTH 16 + + +/* + * PC_DEV_ID_REG(16bit): + * Device ID register + */ + +#define PCR_AZ_DEV_ID_REG 0x00000002 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_DEV_ID_LBN 0 +#define PCRF_AZ_DEV_ID_WIDTH 16 + + +/* + * PC_CMD_REG(16bit): + * Command register + */ + +#define PCR_AZ_CMD_REG 0x00000004 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_INTX_DIS_LBN 10 +#define PCRF_AZ_INTX_DIS_WIDTH 1 +#define PCRF_AZ_FB2B_EN_LBN 9 +#define PCRF_AZ_FB2B_EN_WIDTH 1 +#define PCRF_AZ_SERR_EN_LBN 8 +#define PCRF_AZ_SERR_EN_WIDTH 1 +#define PCRF_AZ_IDSEL_CTL_LBN 7 +#define PCRF_AZ_IDSEL_CTL_WIDTH 1 +#define PCRF_AZ_PERR_EN_LBN 6 +#define PCRF_AZ_PERR_EN_WIDTH 1 +#define PCRF_AZ_VGA_PAL_SNP_LBN 5 +#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1 +#define PCRF_AZ_MWI_EN_LBN 4 +#define PCRF_AZ_MWI_EN_WIDTH 1 +#define PCRF_AZ_SPEC_CYC_LBN 3 +#define PCRF_AZ_SPEC_CYC_WIDTH 1 +#define PCRF_AZ_MST_EN_LBN 2 +#define PCRF_AZ_MST_EN_WIDTH 1 +#define PCRF_AZ_MEM_EN_LBN 1 +#define PCRF_AZ_MEM_EN_WIDTH 1 +#define PCRF_AZ_IO_EN_LBN 0 +#define PCRF_AZ_IO_EN_WIDTH 1 + + +/* + * PC_STAT_REG(16bit): + * Status register + */ + +#define PCR_AZ_STAT_REG 0x00000006 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_DET_PERR_LBN 15 +#define PCRF_AZ_DET_PERR_WIDTH 1 +#define PCRF_AZ_SIG_SERR_LBN 14 +#define PCRF_AZ_SIG_SERR_WIDTH 1 +#define PCRF_AZ_GOT_MABRT_LBN 13 +#define PCRF_AZ_GOT_MABRT_WIDTH 1 +#define PCRF_AZ_GOT_TABRT_LBN 12 +#define PCRF_AZ_GOT_TABRT_WIDTH 1 +#define PCRF_AZ_SIG_TABRT_LBN 11 +#define PCRF_AZ_SIG_TABRT_WIDTH 1 +#define PCRF_AZ_DEVSEL_TIM_LBN 9 +#define PCRF_AZ_DEVSEL_TIM_WIDTH 2 +#define PCRF_AZ_MDAT_PERR_LBN 8 +#define PCRF_AZ_MDAT_PERR_WIDTH 1 +#define PCRF_AZ_FB2B_CAP_LBN 7 +#define PCRF_AZ_FB2B_CAP_WIDTH 1 +#define PCRF_AZ_66MHZ_CAP_LBN 5 +#define PCRF_AZ_66MHZ_CAP_WIDTH 1 +#define PCRF_AZ_CAP_LIST_LBN 4 +#define PCRF_AZ_CAP_LIST_WIDTH 1 +#define PCRF_AZ_INTX_STAT_LBN 3 +#define PCRF_AZ_INTX_STAT_WIDTH 1 + + +/* + * PC_REV_ID_REG(8bit): + * Class code & revision ID register + */ + +#define PCR_AZ_REV_ID_REG 0x00000008 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_REV_ID_LBN 0 +#define PCRF_AZ_REV_ID_WIDTH 8 + + +/* + * PC_CC_REG(24bit): + * Class code register + */ + +#define PCR_AZ_CC_REG 0x00000009 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_BASE_CC_LBN 16 +#define PCRF_AZ_BASE_CC_WIDTH 8 +#define PCRF_AZ_SUB_CC_LBN 8 +#define PCRF_AZ_SUB_CC_WIDTH 8 +#define PCRF_AZ_PROG_IF_LBN 0 +#define PCRF_AZ_PROG_IF_WIDTH 8 + + +/* + * PC_CACHE_LSIZE_REG(8bit): + * Cache line size + */ + +#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_CACHE_LSIZE_LBN 0 +#define PCRF_AZ_CACHE_LSIZE_WIDTH 8 + + +/* + * PC_MST_LAT_REG(8bit): + * Master latency timer register + */ + +#define PCR_AZ_MST_LAT_REG 0x0000000d +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MST_LAT_LBN 0 +#define PCRF_AZ_MST_LAT_WIDTH 8 + + +/* + * PC_HDR_TYPE_REG(8bit): + * Header type register + */ + +#define PCR_AZ_HDR_TYPE_REG 0x0000000e +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MULT_FUNC_LBN 7 +#define PCRF_AZ_MULT_FUNC_WIDTH 1 +#define PCRF_AZ_TYPE_LBN 0 +#define PCRF_AZ_TYPE_WIDTH 7 + + +/* + * PC_BIST_REG(8bit): + * BIST register + */ + +#define PCR_AZ_BIST_REG 0x0000000f +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_BIST_LBN 0 +#define PCRF_AZ_BIST_WIDTH 8 + + +/* + * PC_BAR0_REG(32bit): + * Primary function base address register 0 + */ + +#define PCR_AZ_BAR0_REG 0x00000010 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_BAR0_LBN 4 +#define PCRF_AZ_BAR0_WIDTH 28 +#define PCRF_AZ_BAR0_PREF_LBN 3 +#define PCRF_AZ_BAR0_PREF_WIDTH 1 +#define PCRF_AZ_BAR0_TYPE_LBN 1 +#define PCRF_AZ_BAR0_TYPE_WIDTH 2 +#define PCRF_AZ_BAR0_IOM_LBN 0 +#define PCRF_AZ_BAR0_IOM_WIDTH 1 + + +/* + * PC_BAR1_REG(32bit): + * Primary function base address register 1, BAR1 is not implemented so read only. + */ + +#define PCR_DZ_BAR1_REG 0x00000014 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_BAR1_LBN 0 +#define PCRF_DZ_BAR1_WIDTH 32 + + +/* + * PC_BAR2_LO_REG(32bit): + * Primary function base address register 2 low bits + */ + +#define PCR_AZ_BAR2_LO_REG 0x00000018 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_BAR2_LO_LBN 4 +#define PCRF_AZ_BAR2_LO_WIDTH 28 +#define PCRF_AZ_BAR2_PREF_LBN 3 +#define PCRF_AZ_BAR2_PREF_WIDTH 1 +#define PCRF_AZ_BAR2_TYPE_LBN 1 +#define PCRF_AZ_BAR2_TYPE_WIDTH 2 +#define PCRF_AZ_BAR2_IOM_LBN 0 +#define PCRF_AZ_BAR2_IOM_WIDTH 1 + + +/* + * PC_BAR2_HI_REG(32bit): + * Primary function base address register 2 high bits + */ + +#define PCR_AZ_BAR2_HI_REG 0x0000001c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_BAR2_HI_LBN 0 +#define PCRF_AZ_BAR2_HI_WIDTH 32 + + +/* + * PC_BAR4_LO_REG(32bit): + * Primary function base address register 2 low bits + */ + +#define PCR_CZ_BAR4_LO_REG 0x00000020 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_BAR4_LO_LBN 4 +#define PCRF_CZ_BAR4_LO_WIDTH 28 +#define PCRF_CZ_BAR4_PREF_LBN 3 +#define PCRF_CZ_BAR4_PREF_WIDTH 1 +#define PCRF_CZ_BAR4_TYPE_LBN 1 +#define PCRF_CZ_BAR4_TYPE_WIDTH 2 +#define PCRF_CZ_BAR4_IOM_LBN 0 +#define PCRF_CZ_BAR4_IOM_WIDTH 1 + + +/* + * PC_BAR4_HI_REG(32bit): + * Primary function base address register 2 high bits + */ + +#define PCR_CZ_BAR4_HI_REG 0x00000024 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_BAR4_HI_LBN 0 +#define PCRF_CZ_BAR4_HI_WIDTH 32 + + +/* + * PC_SS_VEND_ID_REG(16bit): + * Sub-system vendor ID register + */ + +#define PCR_AZ_SS_VEND_ID_REG 0x0000002c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_SS_VEND_ID_LBN 0 +#define PCRF_AZ_SS_VEND_ID_WIDTH 16 + + +/* + * PC_SS_ID_REG(16bit): + * Sub-system ID register + */ + +#define PCR_AZ_SS_ID_REG 0x0000002e +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_SS_ID_LBN 0 +#define PCRF_AZ_SS_ID_WIDTH 16 + + +/* + * PC_EXPROM_BAR_REG(32bit): + * Expansion ROM base address register + */ + +#define PCR_AZ_EXPROM_BAR_REG 0x00000030 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_EXPROM_BAR_LBN 11 +#define PCRF_AZ_EXPROM_BAR_WIDTH 21 +#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2 +#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9 +#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1 +#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10 +#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1 +#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1 +#define PCRF_AZ_EXPROM_EN_LBN 0 +#define PCRF_AZ_EXPROM_EN_WIDTH 1 + + +/* + * PC_CAP_PTR_REG(8bit): + * Capability pointer register + */ + +#define PCR_AZ_CAP_PTR_REG 0x00000034 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_CAP_PTR_LBN 0 +#define PCRF_AZ_CAP_PTR_WIDTH 8 + + +/* + * PC_INT_LINE_REG(8bit): + * Interrupt line register + */ + +#define PCR_AZ_INT_LINE_REG 0x0000003c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_INT_LINE_LBN 0 +#define PCRF_AZ_INT_LINE_WIDTH 8 + + +/* + * PC_INT_PIN_REG(8bit): + * Interrupt pin register + */ + +#define PCR_AZ_INT_PIN_REG 0x0000003d +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_INT_PIN_LBN 0 +#define PCRF_AZ_INT_PIN_WIDTH 8 +#define PCFE_DZ_INTPIN_INTD 4 +#define PCFE_DZ_INTPIN_INTC 3 +#define PCFE_DZ_INTPIN_INTB 2 +#define PCFE_DZ_INTPIN_INTA 1 + + +/* + * PC_PM_CAP_ID_REG(8bit): + * Power management capability ID + */ + +#define PCR_AZ_PM_CAP_ID_REG 0x00000040 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PM_CAP_ID_LBN 0 +#define PCRF_AZ_PM_CAP_ID_WIDTH 8 + + +/* + * PC_PM_NXT_PTR_REG(8bit): + * Power management next item pointer + */ + +#define PCR_AZ_PM_NXT_PTR_REG 0x00000041 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PM_NXT_PTR_LBN 0 +#define PCRF_AZ_PM_NXT_PTR_WIDTH 8 + + +/* + * PC_PM_CAP_REG(16bit): + * Power management capabilities register + */ + +#define PCR_AZ_PM_CAP_REG 0x00000042 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PM_PME_SUPT_LBN 11 +#define PCRF_AZ_PM_PME_SUPT_WIDTH 5 +#define PCRF_AZ_PM_D2_SUPT_LBN 10 +#define PCRF_AZ_PM_D2_SUPT_WIDTH 1 +#define PCRF_AZ_PM_D1_SUPT_LBN 9 +#define PCRF_AZ_PM_D1_SUPT_WIDTH 1 +#define PCRF_AZ_PM_AUX_CURR_LBN 6 +#define PCRF_AZ_PM_AUX_CURR_WIDTH 3 +#define PCRF_AZ_PM_DSI_LBN 5 +#define PCRF_AZ_PM_DSI_WIDTH 1 +#define PCRF_AZ_PM_PME_CLK_LBN 3 +#define PCRF_AZ_PM_PME_CLK_WIDTH 1 +#define PCRF_AZ_PM_PME_VER_LBN 0 +#define PCRF_AZ_PM_PME_VER_WIDTH 3 + + +/* + * PC_PM_CS_REG(16bit): + * Power management control & status register + */ + +#define PCR_AZ_PM_CS_REG 0x00000044 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PM_PME_STAT_LBN 15 +#define PCRF_AZ_PM_PME_STAT_WIDTH 1 +#define PCRF_AZ_PM_DAT_SCALE_LBN 13 +#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2 +#define PCRF_AZ_PM_DAT_SEL_LBN 9 +#define PCRF_AZ_PM_DAT_SEL_WIDTH 4 +#define PCRF_AZ_PM_PME_EN_LBN 8 +#define PCRF_AZ_PM_PME_EN_WIDTH 1 +#define PCRF_CZ_NO_SOFT_RESET_LBN 3 +#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1 +#define PCRF_AZ_PM_PWR_ST_LBN 0 +#define PCRF_AZ_PM_PWR_ST_WIDTH 2 + + +/* + * PC_MSI_CAP_ID_REG(8bit): + * MSI capability ID + */ + +#define PCR_AZ_MSI_CAP_ID_REG 0x00000050 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_CAP_ID_LBN 0 +#define PCRF_AZ_MSI_CAP_ID_WIDTH 8 + + +/* + * PC_MSI_NXT_PTR_REG(8bit): + * MSI next item pointer + */ + +#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_NXT_PTR_LBN 0 +#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8 + + +/* + * PC_MSI_CTL_REG(16bit): + * MSI control register + */ + +#define PCR_AZ_MSI_CTL_REG 0x00000052 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_64_EN_LBN 7 +#define PCRF_AZ_MSI_64_EN_WIDTH 1 +#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4 +#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3 +#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1 +#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3 +#define PCRF_AZ_MSI_EN_LBN 0 +#define PCRF_AZ_MSI_EN_WIDTH 1 + + +/* + * PC_MSI_ADR_LO_REG(32bit): + * MSI low 32 bits address register + */ + +#define PCR_AZ_MSI_ADR_LO_REG 0x00000054 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_ADR_LO_LBN 2 +#define PCRF_AZ_MSI_ADR_LO_WIDTH 30 + + +/* + * PC_MSI_ADR_HI_REG(32bit): + * MSI high 32 bits address register + */ + +#define PCR_AZ_MSI_ADR_HI_REG 0x00000058 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_ADR_HI_LBN 0 +#define PCRF_AZ_MSI_ADR_HI_WIDTH 32 + + +/* + * PC_MSI_DAT_REG(16bit): + * MSI data register + */ + +#define PCR_AZ_MSI_DAT_REG 0x0000005c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_MSI_DAT_LBN 0 +#define PCRF_AZ_MSI_DAT_WIDTH 16 + + +/* + * PC_PCIE_CAP_LIST_REG(16bit): + * PCIe capability list register + */ + +#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PCIE_NXT_PTR_LBN 8 +#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8 +#define PCRF_AZ_PCIE_CAP_ID_LBN 0 +#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8 + + +/* + * PC_PCIE_CAP_REG(16bit): + * PCIe capability register + */ + +#define PCR_AB_PCIE_CAP_REG 0x00000062 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_PCIE_CAP_REG 0x00000072 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9 +#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5 +#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8 +#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1 +#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4 +#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4 +#define PCRF_AZ_PCIE_CAP_VER_LBN 0 +#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4 + + +/* + * PC_DEV_CAP_REG(32bit): + * PCIe device capabilities register + */ + +#define PCR_AB_DEV_CAP_REG 0x00000064 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_DEV_CAP_REG 0x00000074 +/* sienaa0=pci_f0_config,hunta0=pci_f0_config */ + +#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28 +#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1 +#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26 +#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2 +#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18 +#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8 +#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15 +#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1 +#define PCRF_AB_PWR_IND_LBN 14 +#define PCRF_AB_PWR_IND_WIDTH 1 +#define PCRF_AB_ATTN_IND_LBN 13 +#define PCRF_AB_ATTN_IND_WIDTH 1 +#define PCRF_AB_ATTN_BUTTON_LBN 12 +#define PCRF_AB_ATTN_BUTTON_WIDTH 1 +#define PCRF_AZ_ENDPT_L1_LAT_LBN 9 +#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3 +#define PCRF_AZ_ENDPT_L0_LAT_LBN 6 +#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3 +#define PCRF_AZ_TAG_FIELD_LBN 5 +#define PCRF_AZ_TAG_FIELD_WIDTH 1 +#define PCRF_AZ_PHAN_FUNC_LBN 3 +#define PCRF_AZ_PHAN_FUNC_WIDTH 2 +#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0 +#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3 + + +/* + * PC_DEV_CTL_REG(16bit): + * PCIe device control register + */ + +#define PCR_AB_DEV_CTL_REG 0x00000068 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_DEV_CTL_REG 0x00000078 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_FN_LEVEL_RESET_LBN 15 +#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1 +#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12 +#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3 +#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5 +#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4 +#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3 +#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2 +#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1 +#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0 +#define PCRF_AZ_EN_NO_SNOOP_LBN 11 +#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1 +#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10 +#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1 +#define PCRF_AZ_PHAN_FUNC_EN_LBN 9 +#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1 +#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8 +#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1 +#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8 +#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1 +#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5 +#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3 +#define PCFE_AZ_MAX_PAYL_SIZE_4096 5 +#define PCFE_AZ_MAX_PAYL_SIZE_2048 4 +#define PCFE_AZ_MAX_PAYL_SIZE_1024 3 +#define PCFE_AZ_MAX_PAYL_SIZE_512 2 +#define PCFE_AZ_MAX_PAYL_SIZE_256 1 +#define PCFE_AZ_MAX_PAYL_SIZE_128 0 +#define PCRF_AZ_EN_RELAX_ORDER_LBN 4 +#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1 +#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3 +#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1 +#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2 +#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1 +#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1 +#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1 +#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0 +#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1 + + +/* + * PC_DEV_STAT_REG(16bit): + * PCIe device status register + */ + +#define PCR_AB_DEV_STAT_REG 0x0000006a +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_DEV_STAT_REG 0x0000007a +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_TRNS_PEND_LBN 5 +#define PCRF_AZ_TRNS_PEND_WIDTH 1 +#define PCRF_AZ_AUX_PWR_DET_LBN 4 +#define PCRF_AZ_AUX_PWR_DET_WIDTH 1 +#define PCRF_AZ_UNSUP_REQ_DET_LBN 3 +#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1 +#define PCRF_AZ_FATAL_ERR_DET_LBN 2 +#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1 +#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1 +#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1 +#define PCRF_AZ_CORR_ERR_DET_LBN 0 +#define PCRF_AZ_CORR_ERR_DET_WIDTH 1 + + +/* + * PC_LNK_CAP_REG(32bit): + * PCIe link capabilities register + */ + +#define PCR_AB_LNK_CAP_REG 0x0000006c +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_LNK_CAP_REG 0x0000007c +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_PORT_NUM_LBN 24 +#define PCRF_AZ_PORT_NUM_WIDTH 8 +#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22 +#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1 +#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21 +#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1 +#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20 +#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1 +#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19 +#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1 +#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18 +#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1 +#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15 +#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3 +#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12 +#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3 +#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10 +#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2 +#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4 +#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6 +#define PCRF_AZ_MAX_LNK_SP_LBN 0 +#define PCRF_AZ_MAX_LNK_SP_WIDTH 4 + + +/* + * PC_LNK_CTL_REG(16bit): + * PCIe link control register + */ + +#define PCR_AB_LNK_CTL_REG 0x00000070 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_LNK_CTL_REG 0x00000080 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_EXT_SYNC_LBN 7 +#define PCRF_AZ_EXT_SYNC_WIDTH 1 +#define PCRF_AZ_COMM_CLK_CFG_LBN 6 +#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1 +#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5 +#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1 +#define PCRF_CZ_LNK_RETRAIN_LBN 5 +#define PCRF_CZ_LNK_RETRAIN_WIDTH 1 +#define PCRF_AZ_LNK_DIS_LBN 4 +#define PCRF_AZ_LNK_DIS_WIDTH 1 +#define PCRF_AZ_RD_COM_BDRY_LBN 3 +#define PCRF_AZ_RD_COM_BDRY_WIDTH 1 +#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0 +#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2 + + +/* + * PC_LNK_STAT_REG(16bit): + * PCIe link status register + */ + +#define PCR_AB_LNK_STAT_REG 0x00000072 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_LNK_STAT_REG 0x00000082 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_SLOT_CLK_CFG_LBN 12 +#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1 +#define PCRF_AZ_LNK_TRAIN_LBN 11 +#define PCRF_AZ_LNK_TRAIN_WIDTH 1 +#define PCRF_AB_TRAIN_ERR_LBN 10 +#define PCRF_AB_TRAIN_ERR_WIDTH 1 +#define PCRF_AZ_LNK_WIDTH_LBN 4 +#define PCRF_AZ_LNK_WIDTH_WIDTH 6 +#define PCRF_AZ_LNK_SP_LBN 0 +#define PCRF_AZ_LNK_SP_WIDTH 4 + + +/* + * PC_SLOT_CAP_REG(32bit): + * PCIe slot capabilities register + */ + +#define PCR_AB_SLOT_CAP_REG 0x00000074 +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_SLOT_NUM_LBN 19 +#define PCRF_AB_SLOT_NUM_WIDTH 13 +#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15 +#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2 +#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7 +#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8 +#define PCRF_AB_SLOT_HP_CAP_LBN 6 +#define PCRF_AB_SLOT_HP_CAP_WIDTH 1 +#define PCRF_AB_SLOT_HP_SURP_LBN 5 +#define PCRF_AB_SLOT_HP_SURP_WIDTH 1 +#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4 +#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1 +#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3 +#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1 +#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2 +#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1 +#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1 +#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1 +#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0 +#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1 + + +/* + * PC_SLOT_CTL_REG(16bit): + * PCIe slot control register + */ + +#define PCR_AB_SLOT_CTL_REG 0x00000078 +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10 +#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1 +#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8 +#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2 +#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6 +#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2 +#define PCRF_AB_SLOT_HP_INT_EN_LBN 5 +#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1 +#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4 +#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1 +#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3 +#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1 +#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2 +#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1 +#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1 +#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1 +#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0 +#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1 + + +/* + * PC_SLOT_STAT_REG(16bit): + * PCIe slot status register + */ + +#define PCR_AB_SLOT_STAT_REG 0x0000007a +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_PRES_DET_ST_LBN 6 +#define PCRF_AB_PRES_DET_ST_WIDTH 1 +#define PCRF_AB_MRL_SENS_ST_LBN 5 +#define PCRF_AB_MRL_SENS_ST_WIDTH 1 +#define PCRF_AB_SLOT_PWR_IND_LBN 4 +#define PCRF_AB_SLOT_PWR_IND_WIDTH 1 +#define PCRF_AB_SLOT_ATTN_IND_LBN 3 +#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1 +#define PCRF_AB_SLOT_MRL_SENS_LBN 2 +#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1 +#define PCRF_AB_PWR_FLTDET_LBN 1 +#define PCRF_AB_PWR_FLTDET_WIDTH 1 +#define PCRF_AB_ATTN_BUTDET_LBN 0 +#define PCRF_AB_ATTN_BUTDET_WIDTH 1 + + +/* + * PC_MSIX_CAP_ID_REG(8bit): + * MSIX Capability ID + */ + +#define PCR_BB_MSIX_CAP_ID_REG 0x00000090 +/* falconb0=pci_f0_config */ + +#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_BZ_MSIX_CAP_ID_LBN 0 +#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8 + + +/* + * PC_MSIX_NXT_PTR_REG(8bit): + * MSIX Capability Next Capability Ptr + */ + +#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091 +/* falconb0=pci_f0_config */ + +#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_BZ_MSIX_NXT_PTR_LBN 0 +#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8 + + +/* + * PC_MSIX_CTL_REG(16bit): + * MSIX control register + */ + +#define PCR_BB_MSIX_CTL_REG 0x00000092 +/* falconb0=pci_f0_config */ + +#define PCR_CZ_MSIX_CTL_REG 0x000000b2 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_BZ_MSIX_EN_LBN 15 +#define PCRF_BZ_MSIX_EN_WIDTH 1 +#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14 +#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1 +#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0 +#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11 + + +/* + * PC_MSIX_TBL_BASE_REG(32bit): + * MSIX Capability Vector Table Base + */ + +#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094 +/* falconb0=pci_f0_config */ + +#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_BZ_MSIX_TBL_OFF_LBN 3 +#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29 +#define PCRF_BZ_MSIX_TBL_BIR_LBN 0 +#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3 + + +/* + * PC_DEV_CAP2_REG(32bit): + * PCIe Device Capabilities 2 + */ + +#define PCR_CZ_DEV_CAP2_REG 0x00000094 +/* sienaa0=pci_f0_config,hunta0=pci_f0_config */ + +#define PCRF_DZ_OBFF_SUPPORTED_LBN 18 +#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2 +#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12 +#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2 +#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11 +#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1 +#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4 +#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1 +#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4 +#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1 +#define PCRF_CZ_CMPL_TIMEOUT_LBN 0 +#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4 +#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14 +#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13 +#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10 +#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9 +#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6 +#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5 +#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2 +#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1 +#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0 + + +/* + * PC_DEV_CTL2_REG(16bit): + * PCIe Device Control 2 + */ + +#define PCR_CZ_DEV_CTL2_REG 0x00000098 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_DZ_OBFF_ENABLE_LBN 13 +#define PCRF_DZ_OBFF_ENABLE_WIDTH 2 +#define PCRF_DZ_LTR_ENABLE_LBN 10 +#define PCRF_DZ_LTR_ENABLE_WIDTH 1 +#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9 +#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1 +#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8 +#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1 +#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4 +#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1 +#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0 +#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4 + + +/* + * PC_MSIX_PBA_BASE_REG(32bit): + * MSIX Capability PBA Base + */ + +#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098 +/* falconb0=pci_f0_config */ + +#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_BZ_MSIX_PBA_OFF_LBN 3 +#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29 +#define PCRF_BZ_MSIX_PBA_BIR_LBN 0 +#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3 + + +/* + * PC_LNK_CAP2_REG(32bit): + * PCIe Link Capability 2 + */ + +#define PCR_DZ_LNK_CAP2_REG 0x0000009c +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LNK_SPEED_SUP_LBN 1 +#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7 + + +/* + * PC_LNK_CTL2_REG(16bit): + * PCIe Link Control 2 + */ + +#define PCR_CZ_LNK_CTL2_REG 0x000000a0 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12 +#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1 +#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11 +#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1 +#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10 +#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1 +#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7 +#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3 +#define PCRF_CZ_SELECT_DEEMPH_LBN 6 +#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1 +#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5 +#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1 +#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4 +#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1 +#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0 +#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4 +#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3 +#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2 +#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1 + + +/* + * PC_LNK_STAT2_REG(16bit): + * PCIe Link Status 2 + */ + +#define PCR_CZ_LNK_STAT2_REG 0x000000a2 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_CURRENT_DEEMPH_LBN 0 +#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1 + + +/* + * PC_VPD_CAP_ID_REG(8bit): + * VPD data register + */ + +#define PCR_AB_VPD_CAP_ID_REG 0x000000b0 +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_VPD_CAP_ID_LBN 0 +#define PCRF_AB_VPD_CAP_ID_WIDTH 8 + + +/* + * PC_VPD_NXT_PTR_REG(8bit): + * VPD next item pointer + */ + +#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1 +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_VPD_NXT_PTR_LBN 0 +#define PCRF_AB_VPD_NXT_PTR_WIDTH 8 + + +/* + * PC_VPD_ADDR_REG(16bit): + * VPD address register + */ + +#define PCR_AB_VPD_ADDR_REG 0x000000b2 +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_VPD_FLAG_LBN 15 +#define PCRF_AB_VPD_FLAG_WIDTH 1 +#define PCRF_AB_VPD_ADDR_LBN 0 +#define PCRF_AB_VPD_ADDR_WIDTH 15 + + +/* + * PC_VPD_CAP_DATA_REG(32bit): + * documentation to be written for sum_PC_VPD_CAP_DATA_REG + */ + +#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_VPD_DATA_LBN 0 +#define PCRF_AZ_VPD_DATA_WIDTH 32 + + +/* + * PC_VPD_CAP_CTL_REG(8bit): + * VPD control and capabilities register + */ + +#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_VPD_FLAG_LBN 31 +#define PCRF_CZ_VPD_FLAG_WIDTH 1 +#define PCRF_CZ_VPD_ADDR_LBN 16 +#define PCRF_CZ_VPD_ADDR_WIDTH 15 +#define PCRF_CZ_VPD_NXT_PTR_LBN 8 +#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8 +#define PCRF_CZ_VPD_CAP_ID_LBN 0 +#define PCRF_CZ_VPD_CAP_ID_WIDTH 8 + + +/* + * PC_AER_CAP_HDR_REG(32bit): + * AER capability header register + */ + +#define PCR_AZ_AER_CAP_HDR_REG 0x00000100 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20 +#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12 +#define PCRF_AZ_AERCAPHDR_VER_LBN 16 +#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4 +#define PCRF_AZ_AERCAPHDR_ID_LBN 0 +#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16 + + +/* + * PC_AER_UNCORR_ERR_STAT_REG(32bit): + * AER Uncorrectable error status register + */ + +#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20 +#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1 +#define PCRF_AZ_ECRC_ERR_STAT_LBN 19 +#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1 +#define PCRF_AZ_MALF_TLP_STAT_LBN 18 +#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1 +#define PCRF_AZ_RX_OVF_STAT_LBN 17 +#define PCRF_AZ_RX_OVF_STAT_WIDTH 1 +#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16 +#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1 +#define PCRF_AZ_COMP_ABRT_STAT_LBN 15 +#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1 +#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14 +#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1 +#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13 +#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1 +#define PCRF_AZ_PSON_TLP_STAT_LBN 12 +#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1 +#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4 +#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1 +#define PCRF_AB_TRAIN_ERR_STAT_LBN 0 +#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1 + + +/* + * PC_AER_UNCORR_ERR_MASK_REG(32bit): + * AER Uncorrectable error mask register + */ + +#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24 +#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1 +#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22 +#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1 +#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20 +#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1 +#define PCRF_AZ_ECRC_ERR_MASK_LBN 19 +#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1 +#define PCRF_AZ_MALF_TLP_MASK_LBN 18 +#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1 +#define PCRF_AZ_RX_OVF_MASK_LBN 17 +#define PCRF_AZ_RX_OVF_MASK_WIDTH 1 +#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16 +#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1 +#define PCRF_AZ_COMP_ABRT_MASK_LBN 15 +#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1 +#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14 +#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1 +#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13 +#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1 +#define PCRF_AZ_PSON_TLP_MASK_LBN 12 +#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1 +#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4 +#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1 +#define PCRF_AB_TRAIN_ERR_MASK_LBN 0 +#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1 + + +/* + * PC_AER_UNCORR_ERR_SEV_REG(32bit): + * AER Uncorrectable error severity register + */ + +#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20 +#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1 +#define PCRF_AZ_ECRC_ERR_SEV_LBN 19 +#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1 +#define PCRF_AZ_MALF_TLP_SEV_LBN 18 +#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1 +#define PCRF_AZ_RX_OVF_SEV_LBN 17 +#define PCRF_AZ_RX_OVF_SEV_WIDTH 1 +#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16 +#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1 +#define PCRF_AZ_COMP_ABRT_SEV_LBN 15 +#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1 +#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14 +#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1 +#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13 +#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1 +#define PCRF_AZ_PSON_TLP_SEV_LBN 12 +#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1 +#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4 +#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1 +#define PCRF_AB_TRAIN_ERR_SEV_LBN 0 +#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1 + + +/* + * PC_AER_CORR_ERR_STAT_REG(32bit): + * AER Correctable error status register + */ + +#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13 +#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1 +#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12 +#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1 +#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8 +#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1 +#define PCRF_AZ_BAD_DLLP_STAT_LBN 7 +#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1 +#define PCRF_AZ_BAD_TLP_STAT_LBN 6 +#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1 +#define PCRF_AZ_RX_ERR_STAT_LBN 0 +#define PCRF_AZ_RX_ERR_STAT_WIDTH 1 + + +/* + * PC_AER_CORR_ERR_MASK_REG(32bit): + * AER Correctable error status register + */ + +#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13 +#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1 +#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12 +#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1 +#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8 +#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1 +#define PCRF_AZ_BAD_DLLP_MASK_LBN 7 +#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1 +#define PCRF_AZ_BAD_TLP_MASK_LBN 6 +#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1 +#define PCRF_AZ_RX_ERR_MASK_LBN 0 +#define PCRF_AZ_RX_ERR_MASK_WIDTH 1 + + +/* + * PC_AER_CAP_CTL_REG(32bit): + * AER capability and control register + */ + +#define PCR_AZ_AER_CAP_CTL_REG 0x00000118 +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_ECRC_CHK_EN_LBN 8 +#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1 +#define PCRF_AZ_ECRC_CHK_CAP_LBN 7 +#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1 +#define PCRF_AZ_ECRC_GEN_EN_LBN 6 +#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1 +#define PCRF_AZ_ECRC_GEN_CAP_LBN 5 +#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1 +#define PCRF_AZ_1ST_ERR_PTR_LBN 0 +#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5 + + +/* + * PC_AER_HDR_LOG_REG(128bit): + * AER Header log register + */ + +#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c +/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */ + +#define PCRF_AZ_HDR_LOG_LBN 0 +#define PCRF_AZ_HDR_LOG_WIDTH 128 + + +/* + * PC_DEVSN_CAP_HDR_REG(32bit): + * Device serial number capability header register + */ + +#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20 +#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12 +#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16 +#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4 +#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0 +#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16 + + +/* + * PC_DEVSN_DWORD0_REG(32bit): + * Device serial number DWORD0 + */ + +#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_DEVSN_DWORD0_LBN 0 +#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32 + + +/* + * PC_DEVSN_DWORD1_REG(32bit): + * Device serial number DWORD0 + */ + +#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_DEVSN_DWORD1_LBN 0 +#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32 + + +/* + * PC_ARI_CAP_HDR_REG(32bit): + * ARI capability header register + */ + +#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20 +#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12 +#define PCRF_CZ_ARICAPHDR_VER_LBN 16 +#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4 +#define PCRF_CZ_ARICAPHDR_ID_LBN 0 +#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16 + + +/* + * PC_ARI_CAP_REG(16bit): + * ARI Capabilities + */ + +#define PCR_CZ_ARI_CAP_REG 0x00000154 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8 +#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8 +#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1 +#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1 +#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0 +#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1 + + +/* + * PC_ARI_CTL_REG(16bit): + * ARI Control + */ + +#define PCR_CZ_ARI_CTL_REG 0x00000156 +/* sienaa0,hunta0=pci_f0_config */ + +#define PCRF_CZ_ARI_FN_GRP_LBN 4 +#define PCRF_CZ_ARI_FN_GRP_WIDTH 3 +#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1 +#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1 +#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0 +#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1 + + +/* + * PC_SEC_PCIE_CAP_REG(32bit): + * Secondary PCIE Capability Register + */ + +#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_SEC_NXT_PTR_LBN 20 +#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12 +#define PCRF_DZ_SEC_VERSION_LBN 16 +#define PCRF_DZ_SEC_VERSION_WIDTH 4 +#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0 +#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16 + + +/* + * PC_SRIOV_CAP_HDR_REG(32bit): + * SRIOV capability header register + */ + +#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20 +#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12 +#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16 +#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4 +#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0 +#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16 + + +/* + * PC_SRIOV_CAP_REG(32bit): + * SRIOV Capabilities + */ + +#define PCR_CC_SRIOV_CAP_REG 0x00000164 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_CAP_REG 0x00000184 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21 +#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11 +#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1 +#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1 +#define PCRF_CZ_VF_MIGR_CAP_LBN 0 +#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1 + + +/* + * PC_LINK_CONTROL3_REG(32bit): + * Link Control 3. + */ + +#define PCR_DZ_LINK_CONTROL3_REG 0x00000164 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1 +#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1 +#define PCRF_DZ_PERFORM_EQL_LBN 0 +#define PCRF_DZ_PERFORM_EQL_WIDTH 1 + + +/* + * PC_LANE_ERROR_STAT_REG(32bit): + * Lane Error Status Register. + */ + +#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LANE_STATUS_LBN 0 +#define PCRF_DZ_LANE_STATUS_WIDTH 8 + + +/* + * PC_SRIOV_CTL_REG(16bit): + * SRIOV Control + */ + +#define PCR_CC_SRIOV_CTL_REG 0x00000168 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_CTL_REG 0x00000188 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4 +#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1 +#define PCRF_CZ_VF_MSE_LBN 3 +#define PCRF_CZ_VF_MSE_WIDTH 1 +#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2 +#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1 +#define PCRF_CZ_VF_MIGR_EN_LBN 1 +#define PCRF_CZ_VF_MIGR_EN_WIDTH 1 +#define PCRF_CZ_VF_EN_LBN 0 +#define PCRF_CZ_VF_EN_WIDTH 1 + + +/* + * PC_SRIOV_STAT_REG(16bit): + * SRIOV Status + */ + +#define PCR_CC_SRIOV_STAT_REG 0x0000016a +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_STAT_REG 0x0000018a +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_MIGR_STAT_LBN 0 +#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1 + + +/* + * PC_LANE01_EQU_CONTROL_REG(32bit): + * Lanes 0,1 Equalization Control Register. + */ + +#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16 +#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16 +#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0 +#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16 + + +/* + * PC_SRIOV_INITIALVFS_REG(16bit): + * SRIOV Initial VFs + */ + +#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_INITIALVFS_LBN 0 +#define PCRF_CZ_VF_INITIALVFS_WIDTH 16 + + +/* + * PC_SRIOV_TOTALVFS_REG(10bit): + * SRIOV Total VFs + */ + +#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_TOTALVFS_LBN 0 +#define PCRF_CZ_VF_TOTALVFS_WIDTH 16 + + +/* + * PC_SRIOV_NUMVFS_REG(16bit): + * SRIOV Number of VFs + */ + +#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_NUMVFS_LBN 0 +#define PCRF_CZ_VF_NUMVFS_WIDTH 16 + + +/* + * PC_LANE23_EQU_CONTROL_REG(32bit): + * Lanes 2,3 Equalization Control Register. + */ + +#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16 +#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16 +#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0 +#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16 + + +/* + * PC_SRIOV_FN_DPND_LNK_REG(16bit): + * SRIOV Function dependency link + */ + +#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0 +#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8 + + +/* + * PC_SRIOV_1STVF_OFFSET_REG(16bit): + * SRIOV First VF Offset + */ + +#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0 +#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16 + + +/* + * PC_LANE45_EQU_CONTROL_REG(32bit): + * Lanes 4,5 Equalization Control Register. + */ + +#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16 +#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16 +#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0 +#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16 + + +/* + * PC_SRIOV_VFSTRIDE_REG(16bit): + * SRIOV VF Stride + */ + +#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_VFSTRIDE_LBN 0 +#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16 + + +/* + * PC_LANE67_EQU_CONTROL_REG(32bit): + * Lanes 6,7 Equalization Control Register. + */ + +#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16 +#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16 +#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0 +#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16 + + +/* + * PC_SRIOV_DEVID_REG(16bit): + * SRIOV VF Device ID + */ + +#define PCR_CC_SRIOV_DEVID_REG 0x0000017a +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_DEVID_LBN 0 +#define PCRF_CZ_VF_DEVID_WIDTH 16 + + +/* + * PC_SRIOV_SUP_PAGESZ_REG(16bit): + * SRIOV Supported Page Sizes + */ + +#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0 +#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16 + + +/* + * PC_SRIOV_SYS_PAGESZ_REG(32bit): + * SRIOV System Page Size + */ + +#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0 +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0 +#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16 + + +/* + * PC_SRIOV_BAR0_REG(32bit): + * SRIOV VF Bar0 + */ + +#define PCR_CC_SRIOV_BAR0_REG 0x00000184 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4 +/* hunta0=pci_f0_config */ + +#define PCRF_CC_VF_BAR_ADDRESS_LBN 0 +#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 +#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4 +#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28 +#define PCRF_DZ_VF_BAR0_PREF_LBN 3 +#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1 +#define PCRF_DZ_VF_BAR0_TYPE_LBN 1 +#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2 +#define PCRF_DZ_VF_BAR0_IOM_LBN 0 +#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1 + + +/* + * PC_SRIOV_BAR1_REG(32bit): + * SRIOV Bar1 + */ + +#define PCR_CC_SRIOV_BAR1_REG 0x00000188 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8 +/* hunta0=pci_f0_config */ + +/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */ +/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */ +#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0 +#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32 + + +/* + * PC_SRIOV_BAR2_REG(32bit): + * SRIOV Bar2 + */ + +#define PCR_CC_SRIOV_BAR2_REG 0x0000018c +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac +/* hunta0=pci_f0_config */ + +/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */ +/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */ +#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4 +#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28 +#define PCRF_DZ_VF_BAR2_PREF_LBN 3 +#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1 +#define PCRF_DZ_VF_BAR2_TYPE_LBN 1 +#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2 +#define PCRF_DZ_VF_BAR2_IOM_LBN 0 +#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1 + + +/* + * PC_SRIOV_BAR3_REG(32bit): + * SRIOV Bar3 + */ + +#define PCR_CC_SRIOV_BAR3_REG 0x00000190 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0 +/* hunta0=pci_f0_config */ + +/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */ +/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */ +#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0 +#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32 + + +/* + * PC_SRIOV_BAR4_REG(32bit): + * SRIOV Bar4 + */ + +#define PCR_CC_SRIOV_BAR4_REG 0x00000194 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4 +/* hunta0=pci_f0_config */ + +/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */ +/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */ +#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0 +#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32 + + +/* + * PC_SRIOV_BAR5_REG(32bit): + * SRIOV Bar5 + */ + +#define PCR_CC_SRIOV_BAR5_REG 0x00000198 +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8 +/* hunta0=pci_f0_config */ + +/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */ +/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */ +#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0 +#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32 + + +/* + * PC_SRIOV_RSVD_REG(16bit): + * Reserved register + */ + +#define PCR_DZ_SRIOV_RSVD_REG 0x00000198 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_VF_RSVD_LBN 0 +#define PCRF_DZ_VF_RSVD_WIDTH 16 + + +/* + * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit): + * SRIOV VF Migration State Array Offset + */ + +#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c +/* sienaa0=pci_f0_config */ + +#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc +/* hunta0=pci_f0_config */ + +#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3 +#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29 +#define PCRF_CZ_VF_MIGR_BIR_LBN 0 +#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3 + + +/* + * PC_TPH_CAP_HDR_REG(32bit): + * TPH Capability Header Register + */ + +#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_TPH_NXT_PTR_LBN 20 +#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12 +#define PCRF_DZ_TPH_VERSION_LBN 16 +#define PCRF_DZ_TPH_VERSION_WIDTH 4 +#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0 +#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16 + + +/* + * PC_TPH_REQ_CAP_REG(32bit): + * TPH Requester Capability Register + */ + +#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_ST_TBLE_SIZE_LBN 16 +#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11 +#define PCRF_DZ_ST_TBLE_LOC_LBN 9 +#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2 +#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8 +#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1 +#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2 +#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1 +#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1 +#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1 +#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0 +#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1 + + +/* + * PC_TPH_REQ_CTL_REG(32bit): + * TPH Requester Control Register + */ + +#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8 +#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2 +#define PCRF_DZ_TPH_ST_MODE_LBN 0 +#define PCRF_DZ_TPH_ST_MODE_WIDTH 3 + + +/* + * PC_LTR_CAP_HDR_REG(32bit): + * Latency Tolerance Reporting Cap Header Reg + */ + +#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LTR_NXT_PTR_LBN 20 +#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12 +#define PCRF_DZ_LTR_VERSION_LBN 16 +#define PCRF_DZ_LTR_VERSION_WIDTH 4 +#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0 +#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16 + + +/* + * PC_LTR_MAX_SNOOP_REG(32bit): + * LTR Maximum Snoop/No Snoop Register + */ + +#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294 +/* hunta0=pci_f0_config */ + +#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26 +#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3 +#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16 +#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10 +#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10 +#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3 +#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0 +#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10 + + +/* + * PC_ACK_LAT_TMR_REG(32bit): + * ACK latency timer & replay timer register + */ + +#define PCR_AC_ACK_LAT_TMR_REG 0x00000700 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_RT_LBN 16 +#define PCRF_AC_RT_WIDTH 16 +#define PCRF_AC_ALT_LBN 0 +#define PCRF_AC_ALT_WIDTH 16 + + +/* + * PC_OTHER_MSG_REG(32bit): + * Other message register + */ + +#define PCR_AC_OTHER_MSG_REG 0x00000704 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_OM_CRPT3_LBN 24 +#define PCRF_AC_OM_CRPT3_WIDTH 8 +#define PCRF_AC_OM_CRPT2_LBN 16 +#define PCRF_AC_OM_CRPT2_WIDTH 8 +#define PCRF_AC_OM_CRPT1_LBN 8 +#define PCRF_AC_OM_CRPT1_WIDTH 8 +#define PCRF_AC_OM_CRPT0_LBN 0 +#define PCRF_AC_OM_CRPT0_WIDTH 8 + + +/* + * PC_FORCE_LNK_REG(24bit): + * Port force link register + */ + +#define PCR_AC_FORCE_LNK_REG 0x00000708 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_LFS_LBN 16 +#define PCRF_AC_LFS_WIDTH 6 +#define PCRF_AC_FL_LBN 15 +#define PCRF_AC_FL_WIDTH 1 +#define PCRF_AC_LN_LBN 0 +#define PCRF_AC_LN_WIDTH 8 + + +/* + * PC_ACK_FREQ_REG(32bit): + * ACK frequency register + */ + +#define PCR_AC_ACK_FREQ_REG 0x0000070c +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30 +#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1 +#define PCRF_AC_L1_ENTR_LAT_LBN 27 +#define PCRF_AC_L1_ENTR_LAT_WIDTH 3 +#define PCRF_AC_L0_ENTR_LAT_LBN 24 +#define PCRF_AC_L0_ENTR_LAT_WIDTH 3 +#define PCRF_CC_COMM_NFTS_LBN 16 +#define PCRF_CC_COMM_NFTS_WIDTH 8 +#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16 +#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3 +#define PCRF_AC_MAX_FTS_LBN 8 +#define PCRF_AC_MAX_FTS_WIDTH 8 +#define PCRF_AC_ACK_FREQ_LBN 0 +#define PCRF_AC_ACK_FREQ_WIDTH 8 + + +/* + * PC_PORT_LNK_CTL_REG(32bit): + * Port link control register + */ + +#define PCR_AC_PORT_LNK_CTL_REG 0x00000710 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AB_LRE_LBN 27 +#define PCRF_AB_LRE_WIDTH 1 +#define PCRF_AB_ESYNC_LBN 26 +#define PCRF_AB_ESYNC_WIDTH 1 +#define PCRF_AB_CRPT_LBN 25 +#define PCRF_AB_CRPT_WIDTH 1 +#define PCRF_AB_XB_LBN 24 +#define PCRF_AB_XB_WIDTH 1 +#define PCRF_AC_LC_LBN 16 +#define PCRF_AC_LC_WIDTH 6 +#define PCRF_AC_LDR_LBN 8 +#define PCRF_AC_LDR_WIDTH 4 +#define PCRF_AC_FLM_LBN 7 +#define PCRF_AC_FLM_WIDTH 1 +#define PCRF_AC_LKD_LBN 6 +#define PCRF_AC_LKD_WIDTH 1 +#define PCRF_AC_DLE_LBN 5 +#define PCRF_AC_DLE_WIDTH 1 +#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4 +#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1 +#define PCRF_AC_RA_LBN 3 +#define PCRF_AC_RA_WIDTH 1 +#define PCRF_AC_LE_LBN 2 +#define PCRF_AC_LE_WIDTH 1 +#define PCRF_AC_SD_LBN 1 +#define PCRF_AC_SD_WIDTH 1 +#define PCRF_AC_OMR_LBN 0 +#define PCRF_AC_OMR_WIDTH 1 + + +/* + * PC_LN_SKEW_REG(32bit): + * Lane skew register + */ + +#define PCR_AC_LN_SKEW_REG 0x00000714 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_DIS_LBN 31 +#define PCRF_AC_DIS_WIDTH 1 +#define PCRF_AB_RST_LBN 30 +#define PCRF_AB_RST_WIDTH 1 +#define PCRF_AC_AD_LBN 25 +#define PCRF_AC_AD_WIDTH 1 +#define PCRF_AC_FCD_LBN 24 +#define PCRF_AC_FCD_WIDTH 1 +#define PCRF_AC_LS2_LBN 16 +#define PCRF_AC_LS2_WIDTH 8 +#define PCRF_AC_LS1_LBN 8 +#define PCRF_AC_LS1_WIDTH 8 +#define PCRF_AC_LS0_LBN 0 +#define PCRF_AC_LS0_WIDTH 8 + + +/* + * PC_SYM_NUM_REG(16bit): + * Symbol number register + */ + +#define PCR_AC_SYM_NUM_REG 0x00000718 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_CC_MAX_FUNCTIONS_LBN 29 +#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3 +#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24 +#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5 +#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19 +#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5 +#define PCRF_CC_REPLAY_TMR_MOD_LBN 14 +#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5 +#define PCRF_AB_ES_LBN 12 +#define PCRF_AB_ES_WIDTH 3 +#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11 +#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1 +#define PCRF_CC_NUM_SKP_SYMS_LBN 8 +#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3 +#define PCRF_AB_TS2_LBN 4 +#define PCRF_AB_TS2_WIDTH 4 +#define PCRF_AC_TS1_LBN 0 +#define PCRF_AC_TS1_WIDTH 4 + + +/* + * PC_SYM_TMR_FLT_MSK_REG(16bit): + * Symbol timer and Filter Mask Register + */ + +#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c +/* sienaa0=pci_f0_config */ + +#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16 +#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16 +#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15 +#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1 +#define PCRF_CC_SI1_LBN 8 +#define PCRF_CC_SI1_WIDTH 3 +#define PCRF_CC_SKIP_INT_VAL_LBN 0 +#define PCRF_CC_SKIP_INT_VAL_WIDTH 11 +#define PCRF_CC_SI0_LBN 0 +#define PCRF_CC_SI0_WIDTH 8 + + +/* + * PC_SYM_TMR_REG(16bit): + * Symbol timer register + */ + +#define PCR_AB_SYM_TMR_REG 0x0000071c +/* falcona0,falconb0=pci_f0_config */ + +#define PCRF_AB_ET_LBN 11 +#define PCRF_AB_ET_WIDTH 4 +#define PCRF_AB_SI1_LBN 8 +#define PCRF_AB_SI1_WIDTH 3 +#define PCRF_AB_SI0_LBN 0 +#define PCRF_AB_SI0_WIDTH 8 + + +/* + * PC_FLT_MSK_REG(32bit): + * Filter Mask Register 2 + */ + +#define PCR_CC_FLT_MSK_REG 0x00000720 +/* sienaa0=pci_f0_config */ + +#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0 +#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32 + + +/* + * PC_PHY_STAT_REG(32bit): + * PHY status register + */ + +#define PCR_AB_PHY_STAT_REG 0x00000720 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CC_PHY_STAT_REG 0x00000810 +/* sienaa0=pci_f0_config */ + +#define PCRF_AC_SSL_LBN 3 +#define PCRF_AC_SSL_WIDTH 1 +#define PCRF_AC_SSR_LBN 2 +#define PCRF_AC_SSR_WIDTH 1 +#define PCRF_AC_SSCL_LBN 1 +#define PCRF_AC_SSCL_WIDTH 1 +#define PCRF_AC_SSCD_LBN 0 +#define PCRF_AC_SSCD_WIDTH 1 + + +/* + * PC_PHY_CTL_REG(32bit): + * PHY control register + */ + +#define PCR_AB_PHY_CTL_REG 0x00000724 +/* falcona0,falconb0=pci_f0_config */ + +#define PCR_CC_PHY_CTL_REG 0x00000814 +/* sienaa0=pci_f0_config */ + +#define PCRF_AC_BD_LBN 31 +#define PCRF_AC_BD_WIDTH 1 +#define PCRF_AC_CDS_LBN 30 +#define PCRF_AC_CDS_WIDTH 1 +#define PCRF_AC_DWRAP_LB_LBN 29 +#define PCRF_AC_DWRAP_LB_WIDTH 1 +#define PCRF_AC_EBD_LBN 28 +#define PCRF_AC_EBD_WIDTH 1 +#define PCRF_AC_SNR_LBN 27 +#define PCRF_AC_SNR_WIDTH 1 +#define PCRF_AC_RX_NOT_DET_LBN 2 +#define PCRF_AC_RX_NOT_DET_WIDTH 1 +#define PCRF_AC_FORCE_LOS_VAL_LBN 1 +#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1 +#define PCRF_AC_FORCE_LOS_EN_LBN 0 +#define PCRF_AC_FORCE_LOS_EN_WIDTH 1 + + +/* + * PC_DEBUG0_REG(32bit): + * Debug register 0 + */ + +#define PCR_AC_DEBUG0_REG 0x00000728 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_CDI03_LBN 24 +#define PCRF_AC_CDI03_WIDTH 8 +#define PCRF_AC_CDI0_LBN 0 +#define PCRF_AC_CDI0_WIDTH 32 +#define PCRF_AC_CDI02_LBN 16 +#define PCRF_AC_CDI02_WIDTH 8 +#define PCRF_AC_CDI01_LBN 8 +#define PCRF_AC_CDI01_WIDTH 8 +#define PCRF_AC_CDI00_LBN 0 +#define PCRF_AC_CDI00_WIDTH 8 + + +/* + * PC_DEBUG1_REG(32bit): + * Debug register 1 + */ + +#define PCR_AC_DEBUG1_REG 0x0000072c +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_CDI13_LBN 24 +#define PCRF_AC_CDI13_WIDTH 8 +#define PCRF_AC_CDI1_LBN 0 +#define PCRF_AC_CDI1_WIDTH 32 +#define PCRF_AC_CDI12_LBN 16 +#define PCRF_AC_CDI12_WIDTH 8 +#define PCRF_AC_CDI11_LBN 8 +#define PCRF_AC_CDI11_WIDTH 8 +#define PCRF_AC_CDI10_LBN 0 +#define PCRF_AC_CDI10_WIDTH 8 + + +/* + * PC_XPFCC_STAT_REG(24bit): + * documentation to be written for sum_PC_XPFCC_STAT_REG + */ + +#define PCR_AC_XPFCC_STAT_REG 0x00000730 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_XPDC_LBN 12 +#define PCRF_AC_XPDC_WIDTH 8 +#define PCRF_AC_XPHC_LBN 0 +#define PCRF_AC_XPHC_WIDTH 12 + + +/* + * PC_XNPFCC_STAT_REG(24bit): + * documentation to be written for sum_PC_XNPFCC_STAT_REG + */ + +#define PCR_AC_XNPFCC_STAT_REG 0x00000734 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_XNPDC_LBN 12 +#define PCRF_AC_XNPDC_WIDTH 8 +#define PCRF_AC_XNPHC_LBN 0 +#define PCRF_AC_XNPHC_WIDTH 12 + + +/* + * PC_XCFCC_STAT_REG(24bit): + * documentation to be written for sum_PC_XCFCC_STAT_REG + */ + +#define PCR_AC_XCFCC_STAT_REG 0x00000738 +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_XCDC_LBN 12 +#define PCRF_AC_XCDC_WIDTH 8 +#define PCRF_AC_XCHC_LBN 0 +#define PCRF_AC_XCHC_WIDTH 12 + + +/* + * PC_Q_STAT_REG(8bit): + * documentation to be written for sum_PC_Q_STAT_REG + */ + +#define PCR_AC_Q_STAT_REG 0x0000073c +/* falcona0,falconb0,sienaa0=pci_f0_config */ + +#define PCRF_AC_RQNE_LBN 2 +#define PCRF_AC_RQNE_WIDTH 1 +#define PCRF_AC_XRNE_LBN 1 +#define PCRF_AC_XRNE_WIDTH 1 +#define PCRF_AC_RCNR_LBN 0 +#define PCRF_AC_RCNR_WIDTH 1 + + +/* + * PC_VC_XMIT_ARB1_REG(32bit): + * VC Transmit Arbitration Register 1 + */ + +#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740 +/* sienaa0=pci_f0_config */ + + + +/* + * PC_VC_XMIT_ARB2_REG(32bit): + * VC Transmit Arbitration Register 2 + */ + +#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744 +/* sienaa0=pci_f0_config */ + + + +/* + * PC_VC0_P_RQ_CTL_REG(32bit): + * VC0 Posted Receive Queue Control + */ + +#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748 +/* sienaa0=pci_f0_config */ + + + +/* + * PC_VC0_NP_RQ_CTL_REG(32bit): + * VC0 Non-Posted Receive Queue Control + */ + +#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c +/* sienaa0=pci_f0_config */ + + + +/* + * PC_VC0_C_RQ_CTL_REG(32bit): + * VC0 Completion Receive Queue Control + */ + +#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750 +/* sienaa0=pci_f0_config */ + + + +/* + * PC_GEN2_REG(32bit): + * Gen2 Register + */ + +#define PCR_CC_GEN2_REG 0x0000080c +/* sienaa0=pci_f0_config */ + +#define PCRF_CC_SET_DE_EMPHASIS_LBN 20 +#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1 +#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19 +#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1 +#define PCRF_CC_CFG_TX_SWING_LBN 18 +#define PCRF_CC_CFG_TX_SWING_WIDTH 1 +#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17 +#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1 +#define PCRF_CC_LANE_ENABLE_LBN 8 +#define PCRF_CC_LANE_ENABLE_WIDTH 9 +#define PCRF_CC_NUM_FTS_LBN 0 +#define PCRF_CC_NUM_FTS_WIDTH 8 + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_REGS_PCI_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c new file mode 100644 index 000000000..cce34cfce --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_rx.c @@ -0,0 +1,1720 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_rx_init( + __in efx_nic_t *enp); + +static void +siena_rx_fini( + __in efx_nic_t *enp); + +#if EFSYS_OPT_RX_SCATTER +static __checkReturn efx_rc_t +siena_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size); +#endif /* EFSYS_OPT_RX_SCATTER */ + +#if EFSYS_OPT_RX_SCALE +static __checkReturn efx_rc_t +siena_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert); + +static __checkReturn efx_rc_t +siena_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n); + +static __checkReturn efx_rc_t +siena_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n); + +static __checkReturn uint32_t +siena_rx_prefix_hash( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer); + +#endif /* EFSYS_OPT_RX_SCALE */ + +static __checkReturn efx_rc_t +siena_rx_prefix_pktlen( + __in efx_nic_t *enp, + __in uint8_t *buffer, + __out uint16_t *lengthp); + +static void +siena_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); + +static void +siena_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp); + +#if EFSYS_OPT_RX_PACKED_STREAM +static void +siena_rx_qpush_ps_credits( + __in efx_rxq_t *erp); + +static __checkReturn uint8_t * +siena_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp); +#endif + +static __checkReturn efx_rc_t +siena_rx_qflush( + __in efx_rxq_t *erp); + +static void +siena_rx_qenable( + __in efx_rxq_t *erp); + +static __checkReturn efx_rc_t +siena_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in_opt const efx_rxq_type_data_t *type_data, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __in efx_rxq_t *erp); + +static void +siena_rx_qdestroy( + __in efx_rxq_t *erp); + +#endif /* EFSYS_OPT_SIENA */ + + +#if EFSYS_OPT_SIENA +static const efx_rx_ops_t __efx_rx_siena_ops = { + siena_rx_init, /* erxo_init */ + siena_rx_fini, /* erxo_fini */ +#if EFSYS_OPT_RX_SCATTER + siena_rx_scatter_enable, /* erxo_scatter_enable */ +#endif +#if EFSYS_OPT_RX_SCALE + NULL, /* erxo_scale_context_alloc */ + NULL, /* erxo_scale_context_free */ + siena_rx_scale_mode_set, /* erxo_scale_mode_set */ + siena_rx_scale_key_set, /* erxo_scale_key_set */ + siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */ + siena_rx_prefix_hash, /* erxo_prefix_hash */ +#endif + siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */ + siena_rx_qpost, /* erxo_qpost */ + siena_rx_qpush, /* erxo_qpush */ +#if EFSYS_OPT_RX_PACKED_STREAM + siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */ + siena_rx_qps_packet_info, /* erxo_qps_packet_info */ +#endif + siena_rx_qflush, /* erxo_qflush */ + siena_rx_qenable, /* erxo_qenable */ + siena_rx_qcreate, /* erxo_qcreate */ + siena_rx_qdestroy, /* erxo_qdestroy */ +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() +static const efx_rx_ops_t __efx_rx_ef10_ops = { + ef10_rx_init, /* erxo_init */ + ef10_rx_fini, /* erxo_fini */ +#if EFSYS_OPT_RX_SCATTER + ef10_rx_scatter_enable, /* erxo_scatter_enable */ +#endif +#if EFSYS_OPT_RX_SCALE + ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */ + ef10_rx_scale_context_free, /* erxo_scale_context_free */ + ef10_rx_scale_mode_set, /* erxo_scale_mode_set */ + ef10_rx_scale_key_set, /* erxo_scale_key_set */ + ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */ + ef10_rx_prefix_hash, /* erxo_prefix_hash */ +#endif + ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */ + ef10_rx_qpost, /* erxo_qpost */ + ef10_rx_qpush, /* erxo_qpush */ +#if EFSYS_OPT_RX_PACKED_STREAM + ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */ + ef10_rx_qps_packet_info, /* erxo_qps_packet_info */ +#endif + ef10_rx_qflush, /* erxo_qflush */ + ef10_rx_qenable, /* erxo_qenable */ + ef10_rx_qcreate, /* erxo_qcreate */ + ef10_rx_qdestroy, /* erxo_qdestroy */ +}; +#endif /* EFX_OPTS_EF10() */ + + + __checkReturn efx_rc_t +efx_rx_init( + __inout efx_nic_t *enp) +{ + const efx_rx_ops_t *erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (!(enp->en_mod_flags & EFX_MOD_EV)) { + rc = EINVAL; + goto fail1; + } + + if (enp->en_mod_flags & EFX_MOD_RX) { + rc = EINVAL; + goto fail2; + } + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + erxop = &__efx_rx_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + erxop = &__efx_rx_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + erxop = &__efx_rx_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + erxop = &__efx_rx_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail3; + } + + if ((rc = erxop->erxo_init(enp)) != 0) + goto fail4; + + enp->en_erxop = erxop; + enp->en_mod_flags |= EFX_MOD_RX; + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_erxop = NULL; + enp->en_mod_flags &= ~EFX_MOD_RX; + return (rc); +} + + void +efx_rx_fini( + __in efx_nic_t *enp) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0); + + erxop->erxo_fini(enp); + + enp->en_erxop = NULL; + enp->en_mod_flags &= ~EFX_MOD_RX; +} + +#if EFSYS_OPT_RX_SCATTER + __checkReturn efx_rc_t +efx_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCATTER */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp, + __in unsigned int max_nflags, + __out unsigned int *nflagsp) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + unsigned int nflags = 0; + efx_rc_t rc; + + if (flagsp == NULL || nflagsp == NULL) { + rc = EINVAL; + goto fail1; + } + + if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) { + nflags = 0; + goto done; + } + + /* Helper to add flags word to flags array without buffer overflow */ +#define INSERT_FLAGS(_flags) \ + do { \ + if (nflags >= max_nflags) { \ + rc = E2BIG; \ + goto fail2; \ + } \ + *(flagsp + nflags) = (_flags); \ + nflags++; \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + + if (encp->enc_rx_scale_l4_hash_supported != B_FALSE) { + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 4TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 4TUPLE)); + } + + if ((encp->enc_rx_scale_l4_hash_supported != B_FALSE) && + (encp->enc_rx_scale_additional_modes_supported != B_FALSE)) { + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 4TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 4TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_SRC)); + } + + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4, 2TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6, 2TUPLE)); + + if (encp->enc_rx_scale_additional_modes_supported != B_FALSE) { + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_SRC)); + + INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_DST)); + INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_SRC)); + } + + INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, DISABLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, DISABLE)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, DISABLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, DISABLE)); + + INSERT_FLAGS(EFX_RX_HASH(IPV4, DISABLE)); + INSERT_FLAGS(EFX_RX_HASH(IPV6, DISABLE)); + +#undef INSERT_FLAGS + +done: + *nflagsp = nflags; + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_rx_hash_default_support_get( + __in efx_nic_t *enp, + __out efx_rx_hash_support_t *supportp) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if (supportp == NULL) { + rc = EINVAL; + goto fail1; + } + + /* + * Report the hashing support the client gets by default if it + * does not allocate an RSS context itself. + */ + *supportp = enp->en_hash_support; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_rx_scale_default_support_get( + __in efx_nic_t *enp, + __out efx_rx_scale_context_type_t *typep) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if (typep == NULL) { + rc = EINVAL; + goto fail1; + } + + /* + * Report the RSS support the client gets by default if it + * does not allocate an RSS context itself. + */ + *typep = enp->en_rss_context_type; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_context_alloc( + __in efx_nic_t *enp, + __in efx_rx_scale_context_type_t type, + __in uint32_t num_queues, + __out uint32_t *rss_contextp) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if (erxop->erxo_scale_context_alloc == NULL) { + rc = ENOTSUP; + goto fail1; + } + if ((rc = erxop->erxo_scale_context_alloc(enp, type, + num_queues, rss_contextp)) != 0) { + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_context_free( + __in efx_nic_t *enp, + __in uint32_t rss_context) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if (erxop->erxo_scale_context_free == NULL) { + rc = ENOTSUP; + goto fail1; + } + if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rx_hash_type_t type_check; + unsigned int i; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + /* + * Legacy flags and modern bits cannot be + * used at the same time in the hash type. + */ + if ((type & EFX_RX_HASH_LEGACY_MASK) && + (type & ~EFX_RX_HASH_LEGACY_MASK)) { + rc = EINVAL; + goto fail1; + } + + /* + * If RSS hash type is represented by additional bits + * in the value, the latter need to be verified since + * not all bit combinations are valid RSS modes. Also, + * depending on the firmware, some valid combinations + * may be unsupported. Discern additional bits in the + * type value and try to recognise valid combinations. + * If some bits remain unrecognised, report the error. + */ + type_check = type & ~EFX_RX_HASH_LEGACY_MASK; + if (type_check != 0) { + unsigned int type_flags[EFX_RX_HASH_NFLAGS]; + unsigned int type_nflags; + + rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, + EFX_ARRAY_SIZE(type_flags), &type_nflags); + if (rc != 0) + goto fail2; + + for (i = 0; i < type_nflags; ++i) { + if ((type_check & type_flags[i]) == type_flags[i]) + type_check &= ~(type_flags[i]); + } + + if (type_check != 0) { + rc = EINVAL; + goto fail3; + } + } + + /* + * Translate EFX_RX_HASH() flags to their legacy counterparts + * provided that the FW claims no support for additional modes. + */ + if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) { + efx_rx_hash_type_t t_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | + EFX_RX_HASH(IPV4_TCP, 2TUPLE); + efx_rx_hash_type_t t_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | + EFX_RX_HASH(IPV6_TCP, 2TUPLE); + efx_rx_hash_type_t t_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE); + efx_rx_hash_type_t t_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE); + + if ((type & t_ipv4) == t_ipv4) + type |= EFX_RX_HASH_IPV4; + if ((type & t_ipv6) == t_ipv6) + type |= EFX_RX_HASH_IPV6; + + if (encp->enc_rx_scale_l4_hash_supported == B_TRUE) { + if ((type & t_ipv4_tcp) == t_ipv4_tcp) + type |= EFX_RX_HASH_TCPIPV4; + if ((type & t_ipv6_tcp) == t_ipv6_tcp) + type |= EFX_RX_HASH_TCPIPV6; + } + + type &= EFX_RX_HASH_LEGACY_MASK; + } + + if (erxop->erxo_scale_mode_set != NULL) { + if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg, + type, insert)) != 0) + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCALE */ + + void +efx_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + EFSYS_ASSERT(erp->er_buf_size == 0 || size == erp->er_buf_size); + + erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added); +} + +#if EFSYS_OPT_RX_PACKED_STREAM + + void +efx_rx_qpush_ps_credits( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + erxop->erxo_qpush_ps_credits(erp); +} + + __checkReturn uint8_t * +efx_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + return (erxop->erxo_qps_packet_info(erp, buffer, + buffer_length, current_offset, lengthp, + next_offsetp, timestamp)); +} + +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ + + void +efx_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + erxop->erxo_qpush(erp, added, pushedp); +} + + __checkReturn efx_rc_t +efx_rx_qflush( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + if ((rc = erxop->erxo_qflush(erp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn size_t +efx_rxq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + + return (ndescs * encp->enc_rx_desc_size); +} + + __checkReturn unsigned int +efx_rxq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + return (EFX_DIV_ROUND_UP(efx_rxq_size(enp, ndescs), EFX_BUF_SIZE)); +} + + void +efx_rx_qenable( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + erxop->erxo_qenable(erp); +} + +static __checkReturn efx_rc_t +efx_rx_qcreate_internal( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in_opt const efx_rxq_type_data_t *type_data, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + const efx_rx_ops_t *erxop = enp->en_erxop; + efx_rxq_t *erp; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + + EFSYS_ASSERT(ISP2(encp->enc_rxq_max_ndescs)); + EFSYS_ASSERT(ISP2(encp->enc_rxq_min_ndescs)); + + if (!ISP2(ndescs) || + ndescs < encp->enc_rxq_min_ndescs || + ndescs > encp->enc_rxq_max_ndescs) { + rc = EINVAL; + goto fail1; + } + + /* Allocate an RXQ object */ + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp); + + if (erp == NULL) { + rc = ENOMEM; + goto fail2; + } + + erp->er_magic = EFX_RXQ_MAGIC; + erp->er_enp = enp; + erp->er_index = index; + erp->er_mask = ndescs - 1; + erp->er_esmp = esmp; + + if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp, + ndescs, id, flags, eep, erp)) != 0) + goto fail3; + + enp->en_rx_qcount++; + *erpp = erp; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in size_t buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + efx_rxq_type_data_t type_data; + + memset(&type_data, 0, sizeof (type_data)); + + type_data.ertd_default.ed_buf_size = buf_size; + + return efx_rx_qcreate_internal(enp, index, label, type, &type_data, + esmp, ndescs, id, flags, eep, erpp); +} + +#if EFSYS_OPT_RX_PACKED_STREAM + + __checkReturn efx_rc_t +efx_rx_qcreate_packed_stream( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t ps_buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + efx_rxq_type_data_t type_data; + + memset(&type_data, 0, sizeof (type_data)); + + type_data.ertd_packed_stream.eps_buf_size = ps_buf_size; + + return efx_rx_qcreate_internal(enp, index, label, + EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs, + 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp); +} + +#endif + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + + __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + efx_rc_t rc; + efx_rxq_type_data_t type_data; + + if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + rc = EINVAL; + goto fail1; + } + + memset(&type_data, 0, sizeof (type_data)); + + type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc; + type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len; + type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride; + type_data.ertd_es_super_buffer.eessb_hol_block_timeout = + hol_block_timeout; + + rc = efx_rx_qcreate_internal(enp, index, label, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs, + 0 /* id unused on EF10 */, flags, eep, erpp); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif + + + void +efx_rx_qdestroy( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + erxop->erxo_qdestroy(erp); +} + + __checkReturn efx_rc_t +efx_pseudo_hdr_pkt_length_get( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __out uint16_t *lengthp) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp)); +} + +#if EFSYS_OPT_RX_SCALE + __checkReturn uint32_t +efx_pseudo_hdr_hash_get( + __in efx_rxq_t *erp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer) +{ + efx_nic_t *enp = erp->er_enp; + const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE); + return (erxop->erxo_prefix_hash(enp, func, buffer)); +} +#endif /* EFSYS_OPT_RX_SCALE */ + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_rx_init( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + unsigned int index; + + EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); + + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32); + EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); + + /* Zero the RSS table */ + for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; + index++) { + EFX_ZERO_OWORD(oword); + EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, + index, &oword, B_TRUE); + } + +#if EFSYS_OPT_RX_SCALE + /* The RSS key and indirection table are writable. */ + enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE; + + /* Hardware can insert RX hash with/without RSS */ + enp->en_hash_support = EFX_RX_HASH_AVAILABLE; +#endif /* EFSYS_OPT_RX_SCALE */ + + return (0); +} + +#if EFSYS_OPT_RX_SCATTER +static __checkReturn efx_rc_t +siena_rx_scatter_enable( + __in efx_nic_t *enp, + __in unsigned int buf_size) +{ + unsigned int nbuf32; + efx_oword_t oword; + efx_rc_t rc; + + nbuf32 = buf_size / 32; + if ((nbuf32 == 0) || + (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) || + ((buf_size % 32) != 0)) { + rc = EINVAL; + goto fail1; + } + + if (enp->en_rx_qcount > 0) { + rc = EBUSY; + goto fail2; + } + + /* Set scatter buffer size */ + EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32); + EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); + + /* Enable scatter for packets not matching a filter */ + EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1); + EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_RX_SCATTER */ + + +#define EFX_RX_LFSR_HASH(_enp, _insert) \ + do { \ + efx_oword_t oword; \ + \ + EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ + (_insert) ? 1 : 0); \ + EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ + \ + if ((_enp)->en_family == EFX_FAMILY_SIENA) { \ + EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ + &oword); \ + EFX_SET_OWORD_FIELD(oword, \ + FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \ + EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ + &oword); \ + } \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \ + do { \ + efx_oword_t oword; \ + \ + EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \ + (_ip) ? 1 : 0); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \ + (_tcp) ? 0 : 1); \ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ + (_insert) ? 1 : 0); \ + EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \ + do { \ + efx_oword_t oword; \ + \ + EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ + EFX_SET_OWORD_FIELD(oword, \ + FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \ + EFX_SET_OWORD_FIELD(oword, \ + FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \ + EFX_SET_OWORD_FIELD(oword, \ + FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \ + EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ + \ + (_rc) = 0; \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + + +#if EFSYS_OPT_RX_SCALE + +static __checkReturn efx_rc_t +siena_rx_scale_mode_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in efx_rx_hash_alg_t alg, + __in efx_rx_hash_type_t type, + __in boolean_t insert) +{ + efx_rc_t rc; + + if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { + rc = EINVAL; + goto fail1; + } + + switch (alg) { + case EFX_RX_HASHALG_LFSR: + EFX_RX_LFSR_HASH(enp, insert); + break; + + case EFX_RX_HASHALG_TOEPLITZ: + EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, + (type & EFX_RX_HASH_IPV4) ? B_TRUE : B_FALSE, + (type & EFX_RX_HASH_TCPIPV4) ? B_TRUE : B_FALSE); + + EFX_RX_TOEPLITZ_IPV6_HASH(enp, + (type & EFX_RX_HASH_IPV6) ? B_TRUE : B_FALSE, + (type & EFX_RX_HASH_TCPIPV6) ? B_TRUE : B_FALSE, + rc); + if (rc != 0) + goto fail2; + + break; + + default: + rc = EINVAL; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + EFX_RX_LFSR_HASH(enp, B_FALSE); + + return (rc); +} +#endif + +#if EFSYS_OPT_RX_SCALE +static __checkReturn efx_rc_t +siena_rx_scale_key_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) uint8_t *key, + __in size_t n) +{ + efx_oword_t oword; + unsigned int byte; + unsigned int offset; + efx_rc_t rc; + + if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { + rc = EINVAL; + goto fail1; + } + + byte = 0; + + /* Write Toeplitz IPv4 hash key */ + EFX_ZERO_OWORD(oword); + for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; + offset > 0 && byte < n; + --offset) + oword.eo_u8[offset - 1] = key[byte++]; + + EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); + + byte = 0; + + /* Verify Toeplitz IPv4 hash key */ + EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); + for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; + offset > 0 && byte < n; + --offset) { + if (oword.eo_u8[offset - 1] != key[byte++]) { + rc = EFAULT; + goto fail2; + } + } + + if ((enp->en_features & EFX_FEATURE_IPV6) == 0) + goto done; + + byte = 0; + + /* Write Toeplitz IPv6 hash key 3 */ + EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; + offset > 0 && byte < n; + --offset) + oword.eo_u8[offset - 1] = key[byte++]; + + EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); + + /* Write Toeplitz IPv6 hash key 2 */ + EFX_ZERO_OWORD(oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; + offset > 0 && byte < n; + --offset) + oword.eo_u8[offset - 1] = key[byte++]; + + EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); + + /* Write Toeplitz IPv6 hash key 1 */ + EFX_ZERO_OWORD(oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; + offset > 0 && byte < n; + --offset) + oword.eo_u8[offset - 1] = key[byte++]; + + EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); + + byte = 0; + + /* Verify Toeplitz IPv6 hash key 3 */ + EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; + offset > 0 && byte < n; + --offset) { + if (oword.eo_u8[offset - 1] != key[byte++]) { + rc = EFAULT; + goto fail3; + } + } + + /* Verify Toeplitz IPv6 hash key 2 */ + EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; + offset > 0 && byte < n; + --offset) { + if (oword.eo_u8[offset - 1] != key[byte++]) { + rc = EFAULT; + goto fail4; + } + } + + /* Verify Toeplitz IPv6 hash key 1 */ + EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); + for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; + offset > 0 && byte < n; + --offset) { + if (oword.eo_u8[offset - 1] != key[byte++]) { + rc = EFAULT; + goto fail5; + } + } + +done: + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif + +#if EFSYS_OPT_RX_SCALE +static __checkReturn efx_rc_t +siena_rx_scale_tbl_set( + __in efx_nic_t *enp, + __in uint32_t rss_context, + __in_ecount(n) unsigned int *table, + __in size_t n) +{ + efx_oword_t oword; + int index; + efx_rc_t rc; + + EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS); + EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH)); + + if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { + rc = EINVAL; + goto fail1; + } + + if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) { + rc = EINVAL; + goto fail2; + } + + for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { + uint32_t byte; + + /* Calculate the entry to place in the table */ + byte = (n > 0) ? (uint32_t)table[index % n] : 0; + + EFSYS_PROBE2(table, int, index, uint32_t, byte); + + EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte); + + /* Write the table */ + EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, + index, &oword, B_TRUE); + } + + for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) { + uint32_t byte; + + /* Determine if we're starting a new batch */ + byte = (n > 0) ? (uint32_t)table[index % n] : 0; + + /* Read the table */ + EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL, + index, &oword, B_TRUE); + + /* Verify the entry */ + if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) { + rc = EFAULT; + goto fail3; + } + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif + +/* + * Falcon/Siena pseudo-header + * -------------------------- + * + * Receive packets are prefixed by an optional 16 byte pseudo-header. + * The pseudo-header is a byte array of one of the forms: + * + * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT + * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL + * + * where: + * TT.TT.TT.TT Toeplitz hash (32-bit big-endian) + * LL.LL LFSR hash (16-bit big-endian) + */ + +#if EFSYS_OPT_RX_SCALE +static __checkReturn uint32_t +siena_rx_prefix_hash( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t func, + __in uint8_t *buffer) +{ + _NOTE(ARGUNUSED(enp)) + + switch (func) { + case EFX_RX_HASHALG_TOEPLITZ: + return ((buffer[12] << 24) | + (buffer[13] << 16) | + (buffer[14] << 8) | + buffer[15]); + + case EFX_RX_HASHALG_LFSR: + return ((buffer[14] << 8) | buffer[15]); + + default: + EFSYS_ASSERT(0); + return (0); + } +} +#endif /* EFSYS_OPT_RX_SCALE */ + +static __checkReturn efx_rc_t +siena_rx_prefix_pktlen( + __in efx_nic_t *enp, + __in uint8_t *buffer, + __out uint16_t *lengthp) +{ + _NOTE(ARGUNUSED(enp, buffer, lengthp)) + + /* Not supported by Falcon/Siena hardware */ + EFSYS_ASSERT(0); + return (ENOTSUP); +} + + +static void +siena_rx_qpost( + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) +{ + efx_qword_t qword; + unsigned int i; + unsigned int offset; + unsigned int id; + + /* The client driver must not overfill the queue */ + EFSYS_ASSERT3U(added - completed + ndescs, <=, + EFX_RXQ_LIMIT(erp->er_mask + 1)); + + id = added & (erp->er_mask); + for (i = 0; i < ndescs; i++) { + EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, + unsigned int, id, efsys_dma_addr_t, addrp[i], + size_t, size); + + EFX_POPULATE_QWORD_3(qword, + FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size), + FSF_AZ_RX_KER_BUF_ADDR_DW0, + (uint32_t)(addrp[i] & 0xffffffff), + FSF_AZ_RX_KER_BUF_ADDR_DW1, + (uint32_t)(addrp[i] >> 32)); + + offset = id * sizeof (efx_qword_t); + EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); + + id = (id + 1) & (erp->er_mask); + } +} + +static void +siena_rx_qpush( + __in efx_rxq_t *erp, + __in unsigned int added, + __inout unsigned int *pushedp) +{ + efx_nic_t *enp = erp->er_enp; + unsigned int pushed = *pushedp; + uint32_t wptr; + efx_oword_t oword; + efx_dword_t dword; + + /* All descriptors are pushed */ + *pushedp = added; + + /* Push the populated descriptors out */ + wptr = added & erp->er_mask; + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr); + + /* Only write the third DWORD */ + EFX_POPULATE_DWORD_1(dword, + EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); + + /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, + wptr, pushed & erp->er_mask); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0, + erp->er_index, &dword, B_FALSE); +} + +#if EFSYS_OPT_RX_PACKED_STREAM +static void +siena_rx_qpush_ps_credits( + __in efx_rxq_t *erp) +{ + /* Not supported by Siena hardware */ + EFSYS_ASSERT(0); +} + +static uint8_t * +siena_rx_qps_packet_info( + __in efx_rxq_t *erp, + __in uint8_t *buffer, + __in uint32_t buffer_length, + __in uint32_t current_offset, + __out uint16_t *lengthp, + __out uint32_t *next_offsetp, + __out uint32_t *timestamp) +{ + /* Not supported by Siena hardware */ + EFSYS_ASSERT(0); + + return (NULL); +} +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ + +static __checkReturn efx_rc_t +siena_rx_qflush( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_oword_t oword; + uint32_t label; + + label = erp->er_index; + + /* Flush the queue */ + EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_RX_FLUSH_DESCQ, label); + EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword); + + return (0); +} + +static void +siena_rx_qenable( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_oword_t oword; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + + EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL, + erp->er_index, &oword, B_TRUE); + + EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, + erp->er_index, &oword, B_TRUE); +} + +static __checkReturn efx_rc_t +siena_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in_opt const efx_rxq_type_data_t *type_data, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __in efx_rxq_t *erp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_oword_t oword; + uint32_t size; + boolean_t jumbo = B_FALSE; + efx_rc_t rc; + + _NOTE(ARGUNUSED(esmp)) + + EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == + (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH)); + EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); + EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); + + if (index >= encp->enc_rxq_limit) { + rc = EINVAL; + goto fail1; + } + for (size = 0; + (1U << size) <= encp->enc_rxq_max_ndescs / encp->enc_rxq_min_ndescs; + size++) + if ((1U << size) == (uint32_t)ndescs / encp->enc_rxq_min_ndescs) + break; + if (id + (1 << size) >= encp->enc_buftbl_limit) { + rc = EINVAL; + goto fail2; + } + + switch (type) { + case EFX_RXQ_TYPE_DEFAULT: + erp->er_buf_size = type_data->ertd_default.ed_buf_size; + break; + + default: + rc = EINVAL; + goto fail3; + } + + if (flags & EFX_RXQ_FLAG_SCATTER) { +#if EFSYS_OPT_RX_SCATTER + jumbo = B_TRUE; +#else + rc = EINVAL; + goto fail4; +#endif /* EFSYS_OPT_RX_SCATTER */ + } + + /* Set up the new descriptor queue */ + EFX_POPULATE_OWORD_7(oword, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, id, + FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index, + FRF_AZ_RX_DESCQ_OWNER_ID, 0, + FRF_AZ_RX_DESCQ_LABEL, label, + FRF_AZ_RX_DESCQ_SIZE, size, + FRF_AZ_RX_DESCQ_TYPE, 0, + FRF_AZ_RX_DESCQ_JUMBO, jumbo); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, + erp->er_index, &oword, B_TRUE); + + return (0); + +#if !EFSYS_OPT_RX_SCATTER +fail4: + EFSYS_PROBE(fail4); +#endif +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +siena_rx_qdestroy( + __in efx_rxq_t *erp) +{ + efx_nic_t *enp = erp->er_enp; + efx_oword_t oword; + + EFSYS_ASSERT(enp->en_rx_qcount != 0); + --enp->en_rx_qcount; + + /* Purge descriptor queue */ + EFX_ZERO_OWORD(oword); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, + erp->er_index, &oword, B_TRUE); + + /* Free the RXQ object */ + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); +} + +static void +siena_rx_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c new file mode 100644 index 000000000..8e35b66b0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_sram.c @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + __checkReturn efx_rc_t +efx_sram_buf_tbl_set( + __in efx_nic_t *enp, + __in uint32_t id, + __in efsys_mem_t *esmp, + __in size_t n) +{ + efx_qword_t qword; + uint32_t start = id; + uint32_t stop = start + n; + efsys_dma_addr_t addr; + efx_oword_t oword; + unsigned int count; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + +#if EFX_OPTS_EF10() + if (EFX_FAMILY_IS_EF10(enp)) { + /* + * FIXME: the efx_sram_buf_tbl_*() functionality needs to be + * pulled inside the Falcon/Siena queue create/destroy code, + * and then the original functions can be removed (see bug30834 + * comment #1). But, for now, we just ensure that they are + * no-ops for EF10, to allow bringing up existing drivers + * without modification. + */ + + return (0); + } +#endif /* EFX_OPTS_EF10() */ + + if (stop >= EFX_BUF_TBL_SIZE) { + rc = EFBIG; + goto fail1; + } + + /* Add the entries into the buffer table */ + addr = EFSYS_MEM_ADDR(esmp); + for (id = start; id != stop; id++) { + EFX_POPULATE_QWORD_5(qword, + FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF_DW0, + (uint32_t)((addr >> 12) & 0xffffffff), + FRF_AZ_BUF_ADR_FBUF_DW1, + (uint32_t)((addr >> 12) >> 32), + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + + EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL, + id, &qword); + + addr += EFX_BUF_SIZE; + } + + EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1); + + /* Flush the write buffer */ + EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1, + FRF_AZ_BUF_CLR_CMD, 0); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); + + /* Poll for the last entry being written to the buffer table */ + EFSYS_ASSERT3U(id, ==, stop); + addr -= EFX_BUF_SIZE; + + count = 0; + do { + EFSYS_PROBE1(wait, unsigned int, count); + + /* Spin for 1 ms */ + EFSYS_SPIN(1000); + + EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL, + id - 1, &qword); + + if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) == + (uint32_t)((addr >> 12) & 0xffffffff) && + EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) == + (uint32_t)((addr >> 12) >> 32)) + goto verify; + + } while (++count < 100); + + rc = ETIMEDOUT; + goto fail2; + +verify: + /* Verify the rest of the entries in the buffer table */ + while (--id != start) { + addr -= EFX_BUF_SIZE; + + /* Read the buffer table entry */ + EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL, + id - 1, &qword); + + if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) != + (uint32_t)((addr >> 12) & 0xffffffff) || + EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) != + (uint32_t)((addr >> 12) >> 32)) { + rc = EFAULT; + goto fail3; + } + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); + + id = stop; + +fail2: + EFSYS_PROBE(fail2); + + EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1, + FRF_AZ_BUF_CLR_START_ID, start); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_sram_buf_tbl_clear( + __in efx_nic_t *enp, + __in uint32_t id, + __in size_t n) +{ + efx_oword_t oword; + uint32_t start = id; + uint32_t stop = start + n; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + +#if EFX_OPTS_EF10() + if (EFX_FAMILY_IS_EF10(enp)) { + /* + * FIXME: the efx_sram_buf_tbl_*() functionality needs to be + * pulled inside the Falcon/Siena queue create/destroy code, + * and then the original functions can be removed (see bug30834 + * comment #1). But, for now, we just ensure that they are + * no-ops for EF10, to allow bringing up existing drivers + * without modification. + */ + + return; + } +#endif /* EFX_OPTS_EF10() */ + + EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE); + + EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1); + + EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1, + FRF_AZ_BUF_CLR_START_ID, start); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword); +} + + +#if EFSYS_OPT_DIAG + +static void +efx_sram_byte_increment_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; + unsigned int index; + + _NOTE(ARGUNUSED(negate)) + + for (index = 0; index < sizeof (efx_qword_t); index++) + eqp->eq_u8[index] = offset + index; +} + +static void +efx_sram_all_the_same_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + _NOTE(ARGUNUSED(row)) + + if (negate) + EFX_SET_QWORD(*eqp); + else + EFX_ZERO_QWORD(*eqp); +} + +static void +efx_sram_bit_alternate_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + _NOTE(ARGUNUSED(row)) + + EFX_POPULATE_QWORD_2(*eqp, + EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa, + EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa); +} + +static void +efx_sram_byte_alternate_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + _NOTE(ARGUNUSED(row)) + + EFX_POPULATE_QWORD_2(*eqp, + EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00, + EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00); +} + +static void +efx_sram_byte_changing_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; + unsigned int index; + + for (index = 0; index < sizeof (efx_qword_t); index++) { + uint8_t byte; + + if (offset / 256 == 0) + byte = (uint8_t)((offset % 257) % 256); + else + byte = (uint8_t)(~((offset - 8) % 257) % 256); + + eqp->eq_u8[index] = (negate) ? ~byte : byte; + } +} + +static void +efx_sram_bit_sweep_set( + __in size_t row, + __in boolean_t negate, + __out efx_qword_t *eqp) +{ + size_t offset = row * FR_AZ_SRM_DBG_REG_STEP; + + if (negate) { + EFX_SET_QWORD(*eqp); + EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64); + } else { + EFX_ZERO_QWORD(*eqp); + EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64); + } +} + +efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = { + efx_sram_byte_increment_set, + efx_sram_all_the_same_set, + efx_sram_bit_alternate_set, + efx_sram_byte_alternate_set, + efx_sram_byte_changing_set, + efx_sram_bit_sweep_set +}; + + __checkReturn efx_rc_t +efx_sram_test( + __in efx_nic_t *enp, + __in efx_pattern_type_t type) +{ + efx_sram_pattern_fn_t func; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); + + /* SRAM testing is only available on Siena. */ + if (enp->en_family != EFX_FAMILY_SIENA) + return (0); + + /* Select pattern generator */ + EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES); + func = __efx_sram_pattern_fns[type]; + + return (siena_sram_test(enp, func)); +} + +#endif /* EFSYS_OPT_DIAG */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c new file mode 100644 index 000000000..3a034412c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_tunnel.c @@ -0,0 +1,469 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_TUNNEL + +#if EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON +static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = { + NULL, /* eto_udp_encap_supported */ + NULL, /* eto_reconfigure */ +}; +#endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 +static __checkReturn boolean_t +ef10_udp_encap_supported( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +ef10_tunnel_reconfigure( + __in efx_nic_t *enp); + +static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = { + ef10_udp_encap_supported, /* eto_udp_encap_supported */ + ef10_tunnel_reconfigure, /* eto_reconfigure */ +}; +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ + +static __checkReturn efx_rc_t +efx_mcdi_set_tunnel_encap_udp_ports( + __in efx_nic_t *enp, + __in efx_tunnel_cfg_t *etcp, + __in boolean_t unloading, + __out boolean_t *resetting) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + efx_word_t flags; + efx_rc_t rc; + unsigned int i; + unsigned int entries_num; + + if (etcp == NULL) + entries_num = 0; + else + entries_num = etcp->etc_udp_entries_num; + + req.emr_cmd = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS; + req.emr_in_buf = payload; + req.emr_in_length = + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(entries_num); + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN; + + EFX_POPULATE_WORD_1(flags, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, + (unloading == B_TRUE) ? 1 : 0); + MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS, + EFX_WORD_FIELD(flags, EFX_WORD_0)); + + MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES, + entries_num); + + for (i = 0; i < entries_num; ++i) { + uint16_t mcdi_udp_protocol; + + switch (etcp->etc_udp_entries[i].etue_protocol) { + case EFX_TUNNEL_PROTOCOL_VXLAN: + mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + break; + case EFX_TUNNEL_PROTOCOL_GENEVE: + mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + break; + default: + rc = EINVAL; + goto fail1; + } + + /* + * UDP port is MCDI native little-endian in the request + * and EFX_POPULATE_DWORD cares about conversion from + * host/CPU byte order to little-endian. + */ + EFX_STATIC_ASSERT(sizeof (efx_dword_t) == + TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN); + EFX_POPULATE_DWORD_2( + MCDI_IN2(req, efx_dword_t, + SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES)[i], + TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, + etcp->etc_udp_entries[i].etue_port, + TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, + mcdi_udp_protocol); + } + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used != + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + + *resetting = MCDI_OUT_WORD_FIELD(req, + SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS, + SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING); + + return (0); + +fail3: + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_tunnel_init( + __in efx_nic_t *enp) +{ + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + const efx_tunnel_ops_t *etop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TUNNEL)); + + EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES == + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + etop = &__efx_tunnel_dummy_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + etop = &__efx_tunnel_dummy_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + etop = &__efx_tunnel_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etop = &__efx_tunnel_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries)); + etcp->etc_udp_entries_num = 0; + + enp->en_etop = etop; + enp->en_mod_flags |= EFX_MOD_TUNNEL; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_etop = NULL; + enp->en_mod_flags &= ~EFX_MOD_TUNNEL; + + return (rc); +} + + void +efx_tunnel_fini( + __in efx_nic_t *enp) +{ + boolean_t resetting; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL); + + if ((enp->en_etop->eto_udp_encap_supported != NULL) && + enp->en_etop->eto_udp_encap_supported(enp)) { + /* + * The UNLOADING flag allows the MC to suppress the datapath + * reset if it was set on the last call to + * MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS by all functions + */ + (void) efx_mcdi_set_tunnel_encap_udp_ports(enp, NULL, B_TRUE, + &resetting); + } + + enp->en_etop = NULL; + enp->en_mod_flags &= ~EFX_MOD_TUNNEL; +} + +static __checkReturn efx_rc_t +efx_tunnel_config_find_udp_tunnel_entry( + __in efx_tunnel_cfg_t *etcp, + __in uint16_t port, + __out unsigned int *entryp) +{ + unsigned int i; + + for (i = 0; i < etcp->etc_udp_entries_num; ++i) { + efx_tunnel_udp_entry_t *p = &etcp->etc_udp_entries[i]; + + if (p->etue_port == port) { + *entryp = i; + return (0); + } + } + + return (ENOENT); +} + + __checkReturn efx_rc_t +efx_tunnel_config_udp_add( + __in efx_nic_t *enp, + __in uint16_t port /* host/cpu-endian */, + __in efx_tunnel_protocol_t protocol) +{ + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + efsys_lock_state_t state; + efx_rc_t rc; + unsigned int entry; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL); + + if (protocol >= EFX_TUNNEL_NPROTOS) { + rc = EINVAL; + goto fail1; + } + + if ((encp->enc_tunnel_encapsulations_supported & + (1u << protocol)) == 0) { + rc = ENOTSUP; + goto fail2; + } + + EFSYS_LOCK(enp->en_eslp, state); + + rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry); + if (rc == 0) { + rc = EEXIST; + goto fail3; + } + + if (etcp->etc_udp_entries_num == + encp->enc_tunnel_config_udp_entries_max) { + rc = ENOSPC; + goto fail4; + } + + etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_port = port; + etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_protocol = + protocol; + + etcp->etc_udp_entries_num++; + + EFSYS_UNLOCK(enp->en_eslp, state); + + return (0); + +fail4: + EFSYS_PROBE(fail4); + +fail3: + EFSYS_PROBE(fail3); + EFSYS_UNLOCK(enp->en_eslp, state); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_tunnel_config_udp_remove( + __in efx_nic_t *enp, + __in uint16_t port /* host/cpu-endian */, + __in efx_tunnel_protocol_t protocol) +{ + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + efsys_lock_state_t state; + unsigned int entry; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL); + + EFSYS_LOCK(enp->en_eslp, state); + + rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry); + if (rc != 0) + goto fail1; + + if (etcp->etc_udp_entries[entry].etue_protocol != protocol) { + rc = EINVAL; + goto fail2; + } + + EFSYS_ASSERT3U(etcp->etc_udp_entries_num, >, 0); + etcp->etc_udp_entries_num--; + + if (entry < etcp->etc_udp_entries_num) { + memmove(&etcp->etc_udp_entries[entry], + &etcp->etc_udp_entries[entry + 1], + (etcp->etc_udp_entries_num - entry) * + sizeof (etcp->etc_udp_entries[0])); + } + + memset(&etcp->etc_udp_entries[etcp->etc_udp_entries_num], 0, + sizeof (etcp->etc_udp_entries[0])); + + EFSYS_UNLOCK(enp->en_eslp, state); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + EFSYS_UNLOCK(enp->en_eslp, state); + + return (rc); +} + + void +efx_tunnel_config_clear( + __in efx_nic_t *enp) +{ + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + efsys_lock_state_t state; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL); + + EFSYS_LOCK(enp->en_eslp, state); + + etcp->etc_udp_entries_num = 0; + memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries)); + + EFSYS_UNLOCK(enp->en_eslp, state); +} + + __checkReturn efx_rc_t +efx_tunnel_reconfigure( + __in efx_nic_t *enp) +{ + const efx_tunnel_ops_t *etop = enp->en_etop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL); + + if (etop->eto_reconfigure == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = enp->en_etop->eto_reconfigure(enp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 +static __checkReturn boolean_t +ef10_udp_encap_supported( + __in efx_nic_t *enp) +{ + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint32_t udp_tunnels_mask = 0; + + udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_VXLAN); + udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_GENEVE); + + return ((encp->enc_tunnel_encapsulations_supported & + udp_tunnels_mask) == 0 ? B_FALSE : B_TRUE); +} + +static __checkReturn efx_rc_t +ef10_tunnel_reconfigure( + __in efx_nic_t *enp) +{ + efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; + efx_rc_t rc; + boolean_t resetting; + efsys_lock_state_t state; + efx_tunnel_cfg_t etc; + + EFSYS_LOCK(enp->en_eslp, state); + memcpy(&etc, etcp, sizeof (etc)); + EFSYS_UNLOCK(enp->en_eslp, state); + + if (ef10_udp_encap_supported(enp) == B_FALSE) { + /* + * It is OK to apply empty UDP tunnel ports when UDP + * tunnel encapsulations are not supported - just nothing + * should be done. + */ + if (etc.etc_udp_entries_num == 0) + return (0); + rc = ENOTSUP; + goto fail1; + } else { + /* + * All PCI functions can see a reset upon the + * MCDI request completion + */ + rc = efx_mcdi_set_tunnel_encap_udp_ports(enp, &etc, B_FALSE, + &resetting); + if (rc != 0) + goto fail2; + + /* + * Although the caller should be able to handle MC reboot, + * it might come in handy to report the impending reboot + * by returning EAGAIN + */ + return ((resetting) ? EAGAIN : 0); + } +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ + +#endif /* EFSYS_OPT_TUNNEL */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c new file mode 100644 index 000000000..38c64e028 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_tx.c @@ -0,0 +1,1142 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_QSTATS +#define EFX_TX_QSTAT_INCR(_etp, _stat) \ + do { \ + (_etp)->et_stat[_stat]++; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) +#else +#define EFX_TX_QSTAT_INCR(_etp, _stat) +#endif + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_tx_init( + __in efx_nic_t *enp); + +static void +siena_tx_fini( + __in efx_nic_t *enp); + +static __checkReturn efx_rc_t +siena_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __in efx_txq_t *etp, + __out unsigned int *addedp); + +static void +siena_tx_qdestroy( + __in efx_txq_t *etp); + +static __checkReturn efx_rc_t +siena_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); + +static void +siena_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed); + +static __checkReturn efx_rc_t +siena_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns); + +static __checkReturn efx_rc_t +siena_tx_qflush( + __in efx_txq_t *etp); + +static void +siena_tx_qenable( + __in efx_txq_t *etp); + + __checkReturn efx_rc_t +siena_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); + + void +siena_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp); + +#if EFSYS_OPT_QSTATS +static void +siena_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); +#endif + +#endif /* EFSYS_OPT_SIENA */ + + +#if EFSYS_OPT_SIENA +static const efx_tx_ops_t __efx_tx_siena_ops = { + siena_tx_init, /* etxo_init */ + siena_tx_fini, /* etxo_fini */ + siena_tx_qcreate, /* etxo_qcreate */ + siena_tx_qdestroy, /* etxo_qdestroy */ + siena_tx_qpost, /* etxo_qpost */ + siena_tx_qpush, /* etxo_qpush */ + siena_tx_qpace, /* etxo_qpace */ + siena_tx_qflush, /* etxo_qflush */ + siena_tx_qenable, /* etxo_qenable */ + NULL, /* etxo_qpio_enable */ + NULL, /* etxo_qpio_disable */ + NULL, /* etxo_qpio_write */ + NULL, /* etxo_qpio_post */ + siena_tx_qdesc_post, /* etxo_qdesc_post */ + siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + NULL, /* etxo_qdesc_tso_create */ + NULL, /* etxo_qdesc_tso2_create */ + NULL, /* etxo_qdesc_vlantci_create */ + NULL, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + siena_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON +static const efx_tx_ops_t __efx_tx_hunt_ops = { + ef10_tx_init, /* etxo_init */ + ef10_tx_fini, /* etxo_fini */ + ef10_tx_qcreate, /* etxo_qcreate */ + ef10_tx_qdestroy, /* etxo_qdestroy */ + ef10_tx_qpost, /* etxo_qpost */ + ef10_tx_qpush, /* etxo_qpush */ + ef10_tx_qpace, /* etxo_qpace */ + ef10_tx_qflush, /* etxo_qflush */ + ef10_tx_qenable, /* etxo_qenable */ + ef10_tx_qpio_enable, /* etxo_qpio_enable */ + ef10_tx_qpio_disable, /* etxo_qpio_disable */ + ef10_tx_qpio_write, /* etxo_qpio_write */ + ef10_tx_qpio_post, /* etxo_qpio_post */ + ef10_tx_qdesc_post, /* etxo_qdesc_post */ + ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */ + ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ + ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + ef10_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD +static const efx_tx_ops_t __efx_tx_medford_ops = { + ef10_tx_init, /* etxo_init */ + ef10_tx_fini, /* etxo_fini */ + ef10_tx_qcreate, /* etxo_qcreate */ + ef10_tx_qdestroy, /* etxo_qdestroy */ + ef10_tx_qpost, /* etxo_qpost */ + ef10_tx_qpush, /* etxo_qpush */ + ef10_tx_qpace, /* etxo_qpace */ + ef10_tx_qflush, /* etxo_qflush */ + ef10_tx_qenable, /* etxo_qenable */ + ef10_tx_qpio_enable, /* etxo_qpio_enable */ + ef10_tx_qpio_disable, /* etxo_qpio_disable */ + ef10_tx_qpio_write, /* etxo_qpio_write */ + ef10_tx_qpio_post, /* etxo_qpio_post */ + ef10_tx_qdesc_post, /* etxo_qdesc_post */ + ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + NULL, /* etxo_qdesc_tso_create */ + ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ + ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + ef10_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 +static const efx_tx_ops_t __efx_tx_medford2_ops = { + ef10_tx_init, /* etxo_init */ + ef10_tx_fini, /* etxo_fini */ + ef10_tx_qcreate, /* etxo_qcreate */ + ef10_tx_qdestroy, /* etxo_qdestroy */ + ef10_tx_qpost, /* etxo_qpost */ + ef10_tx_qpush, /* etxo_qpush */ + ef10_tx_qpace, /* etxo_qpace */ + ef10_tx_qflush, /* etxo_qflush */ + ef10_tx_qenable, /* etxo_qenable */ + ef10_tx_qpio_enable, /* etxo_qpio_enable */ + ef10_tx_qpio_disable, /* etxo_qpio_disable */ + ef10_tx_qpio_write, /* etxo_qpio_write */ + ef10_tx_qpio_post, /* etxo_qpio_post */ + ef10_tx_qdesc_post, /* etxo_qdesc_post */ + ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + NULL, /* etxo_qdesc_tso_create */ + ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ + ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + ef10_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_MEDFORD2 */ + + + __checkReturn efx_rc_t +efx_tx_init( + __in efx_nic_t *enp) +{ + const efx_tx_ops_t *etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + + if (!(enp->en_mod_flags & EFX_MOD_EV)) { + rc = EINVAL; + goto fail1; + } + + if (enp->en_mod_flags & EFX_MOD_TX) { + rc = EINVAL; + goto fail2; + } + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + etxop = &__efx_tx_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + etxop = &__efx_tx_hunt_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + etxop = &__efx_tx_medford_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etxop = &__efx_tx_medford2_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail3; + } + + EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); + + if ((rc = etxop->etxo_init(enp)) != 0) + goto fail4; + + enp->en_etxop = etxop; + enp->en_mod_flags |= EFX_MOD_TX; + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + enp->en_etxop = NULL; + enp->en_mod_flags &= ~EFX_MOD_TX; + return (rc); +} + + void +efx_tx_fini( + __in efx_nic_t *enp) +{ + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); + EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); + + etxop->etxo_fini(enp); + + enp->en_etxop = NULL; + enp->en_mod_flags &= ~EFX_MOD_TX; +} + + __checkReturn size_t +efx_txq_size( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + + return (ndescs * encp->enc_tx_desc_size); +} + + __checkReturn unsigned int +efx_txq_nbufs( + __in const efx_nic_t *enp, + __in unsigned int ndescs) +{ + return (EFX_DIV_ROUND_UP(efx_txq_size(enp, ndescs), EFX_BUF_SIZE)); +} + + __checkReturn efx_rc_t +efx_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __deref_out efx_txq_t **etpp, + __out unsigned int *addedp) +{ + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_txq_t *etp; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); + + EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, + enp->en_nic_cfg.enc_txq_limit); + + EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs)); + EFSYS_ASSERT(ISP2(encp->enc_txq_min_ndescs)); + + if (!ISP2(ndescs) || + ndescs < encp->enc_txq_min_ndescs || + ndescs > encp->enc_txq_max_ndescs) { + rc = EINVAL; + goto fail1; + } + + /* Allocate an TXQ object */ + EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp); + + if (etp == NULL) { + rc = ENOMEM; + goto fail2; + } + + etp->et_magic = EFX_TXQ_MAGIC; + etp->et_enp = enp; + etp->et_index = index; + etp->et_mask = ndescs - 1; + etp->et_esmp = esmp; + + /* Initial descriptor index may be modified by etxo_qcreate */ + *addedp = 0; + + if ((rc = etxop->etxo_qcreate(enp, index, label, esmp, + ndescs, id, flags, eep, etp, addedp)) != 0) + goto fail3; + + enp->en_tx_qcount++; + *etpp = etp; + + return (0); + +fail3: + EFSYS_PROBE(fail3); + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_tx_qdestroy( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + EFSYS_ASSERT(enp->en_tx_qcount != 0); + --enp->en_tx_qcount; + + etxop->etxo_qdestroy(etp); + + /* Free the TXQ object */ + EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); +} + + __checkReturn efx_rc_t +efx_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if ((rc = etxop->etxo_qpost(etp, eb, ndescs, completed, addedp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + etxop->etxo_qpush(etp, added, pushed); +} + + __checkReturn efx_rc_t +efx_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if ((rc = etxop->etxo_qpace(etp, ns)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_tx_qflush( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if ((rc = etxop->etxo_qflush(etp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_tx_qenable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + etxop->etxo_qenable(etp); +} + + __checkReturn efx_rc_t +efx_tx_qpio_enable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) { + rc = ENOTSUP; + goto fail1; + } + if (etxop->etxo_qpio_enable == NULL) { + rc = ENOTSUP; + goto fail2; + } + if ((rc = etxop->etxo_qpio_enable(etp)) != 0) + goto fail3; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +efx_tx_qpio_disable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if (etxop->etxo_qpio_disable != NULL) + etxop->etxo_qpio_disable(etp); +} + + __checkReturn efx_rc_t +efx_tx_qpio_write( + __in efx_txq_t *etp, + __in_ecount(buf_length) uint8_t *buffer, + __in size_t buf_length, + __in size_t pio_buf_offset) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if (etxop->etxo_qpio_write != NULL) { + if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length, + pio_buf_offset)) != 0) + goto fail1; + return (0); + } + + return (ENOTSUP); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_tx_qpio_post( + __in efx_txq_t *etp, + __in size_t pkt_length, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + efx_rc_t rc; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + if (etxop->etxo_qpio_post != NULL) { + if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed, + addedp)) != 0) + goto fail1; + return (0); + } + + return (ENOTSUP); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + return (etxop->etxo_qdesc_post(etp, ed, ndescs, completed, addedp)); +} + + void +efx_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL); + + etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp); +} + + void +efx_tx_qdesc_tso_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint32_t tcp_seq, + __in uint8_t tcp_flags, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL); + + etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp); +} + + void +efx_tx_qdesc_tso2_create( + __in efx_txq_t *etp, + __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, + __in uint32_t tcp_seq, + __in uint16_t mss, + __out_ecount(count) efx_desc_t *edp, + __in int count) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL); + + etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id, + tcp_seq, mss, edp, count); +} + + void +efx_tx_qdesc_vlantci_create( + __in efx_txq_t *etp, + __in uint16_t tci, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL); + + etxop->etxo_qdesc_vlantci_create(etp, tci, edp); +} + + void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL); + + etxop->etxo_qdesc_checksum_create(etp, flags, edp); +} + + +#if EFSYS_OPT_QSTATS + void +efx_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + + etxop->etxo_qstats_update(etp, stat); +} +#endif + + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_tx_init( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + /* + * Disable the timer-based TX DMA backoff and allow TX DMA to be + * controlled by the RX FIFO fill level (although always allow a + * minimal trickle). + */ + EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); + + /* + * Filter all packets less than 14 bytes to avoid parsing + * errors. + */ + EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword); + + /* + * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); + + return (0); +} + +#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \ + do { \ + unsigned int id; \ + size_t offset; \ + efx_qword_t qword; \ + \ + id = (_added)++ & (_etp)->et_mask; \ + offset = id * sizeof (efx_qword_t); \ + \ + EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \ + unsigned int, id, efsys_dma_addr_t, (_addr), \ + size_t, (_size), boolean_t, (_eop)); \ + \ + EFX_POPULATE_QWORD_4(qword, \ + FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \ + FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \ + FSF_AZ_TX_KER_BUF_ADDR_DW0, \ + (uint32_t)((_addr) & 0xffffffff), \ + FSF_AZ_TX_KER_BUF_ADDR_DW1, \ + (uint32_t)((_addr) >> 32)); \ + EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +static __checkReturn efx_rc_t +siena_tx_qpost( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + unsigned int added = *addedp; + unsigned int i; + + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) + return (ENOSPC); + + for (i = 0; i < ndescs; i++) { + efx_buffer_t *ebp = &eb[i]; + efsys_dma_addr_t start = ebp->eb_addr; + size_t size = ebp->eb_size; + efsys_dma_addr_t end = start + size; + + /* + * Fragments must not span 4k boundaries. + * Here it is a stricter requirement than the maximum length. + */ + EFSYS_ASSERT(EFX_P2ROUNDUP(efsys_dma_addr_t, start + 1, + etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end); + + EFX_TX_DESC(etp, start, size, ebp->eb_eop, added); + } + + EFX_TX_QSTAT_INCR(etp, TX_POST); + + *addedp = added; + return (0); +} + +static void +siena_tx_qpush( + __in efx_txq_t *etp, + __in unsigned int added, + __in unsigned int pushed) +{ + efx_nic_t *enp = etp->et_enp; + uint32_t wptr; + efx_dword_t dword; + efx_oword_t oword; + + /* Push the populated descriptors out */ + wptr = added & etp->et_mask; + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr); + + /* Only write the third DWORD */ + EFX_POPULATE_DWORD_1(dword, + EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); + + /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ + EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, + wptr, pushed & etp->et_mask); + EFSYS_PIO_WRITE_BARRIER(); + EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0, + etp->et_index, &dword, B_FALSE); +} + +#define EFX_MAX_PACE_VALUE 20 + +static __checkReturn efx_rc_t +siena_tx_qpace( + __in efx_txq_t *etp, + __in unsigned int ns) +{ + efx_nic_t *enp = etp->et_enp; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_oword_t oword; + unsigned int pace_val; + unsigned int timer_period; + efx_rc_t rc; + + if (ns == 0) { + pace_val = 0; + } else { + /* + * The pace_val to write into the table is s.t + * ns <= timer_period * (2 ^ pace_val) + */ + timer_period = 104 / encp->enc_clk_mult; + for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) { + if ((timer_period << pace_val) >= ns) + break; + } + } + if (pace_val > EFX_MAX_PACE_VALUE) { + rc = EINVAL; + goto fail1; + } + + /* Update the pacing table */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index, + &oword, B_TRUE); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +siena_tx_qflush( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + efx_oword_t oword; + uint32_t label; + + efx_tx_qpace(etp, 0); + + label = etp->et_index; + + /* Flush the queue */ + EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, label); + EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword); + + return (0); +} + +static void +siena_tx_qenable( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + efx_oword_t oword; + + EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL, + etp->et_index, &oword, B_TRUE); + + EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index, + uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3), + uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2), + uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1), + uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0)); + + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0); + EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, + etp->et_index, &oword, B_TRUE); +} + +static __checkReturn efx_rc_t +siena_tx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in uint16_t flags, + __in efx_evq_t *eep, + __in efx_txq_t *etp, + __out unsigned int *addedp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_oword_t oword; + uint32_t size; + uint16_t inner_csum; + efx_rc_t rc; + + _NOTE(ARGUNUSED(esmp)) + + EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == + (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH)); + EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS); + + if (index >= encp->enc_txq_limit) { + rc = EINVAL; + goto fail1; + } + for (size = 0; + (1U << size) <= encp->enc_txq_max_ndescs / encp->enc_txq_min_ndescs; + size++) + if ((1U << size) == (uint32_t)ndescs / encp->enc_txq_min_ndescs) + break; + if (id + (1 << size) >= encp->enc_buftbl_limit) { + rc = EINVAL; + goto fail2; + } + + inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; + if ((flags & inner_csum) != 0) { + rc = EINVAL; + goto fail3; + } + + /* Set up the new descriptor queue */ + *addedp = 0; + + EFX_POPULATE_OWORD_6(oword, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, id, + FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index, + FRF_AZ_TX_DESCQ_OWNER_ID, 0, + FRF_AZ_TX_DESCQ_LABEL, label, + FRF_AZ_TX_DESCQ_SIZE, size, + FRF_AZ_TX_DESCQ_TYPE, 0); + + EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS, + (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS, + (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, + etp->et_index, &oword, B_TRUE); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_tx_qdesc_post( + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) +{ + unsigned int added = *addedp; + unsigned int i; + efx_rc_t rc; + + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + rc = ENOSPC; + goto fail1; + } + + for (i = 0; i < ndescs; i++) { + efx_desc_t *edp = &ed[i]; + unsigned int id; + size_t offset; + + id = added++ & etp->et_mask; + offset = id * sizeof (efx_desc_t); + + EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); + } + + EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, + unsigned int, added, unsigned int, ndescs); + + EFX_TX_QSTAT_INCR(etp, TX_POST); + + *addedp = added; + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + void +siena_tx_qdesc_dma_create( + __in efx_txq_t *etp, + __in efsys_dma_addr_t addr, + __in size_t size, + __in boolean_t eop, + __out efx_desc_t *edp) +{ + /* + * Fragments must not span 4k boundaries. + * Here it is a stricter requirement than the maximum length. + */ + EFSYS_ASSERT(EFX_P2ROUNDUP(efsys_dma_addr_t, addr + 1, + etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size); + + EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, + efsys_dma_addr_t, addr, + size_t, size, boolean_t, eop); + + EFX_POPULATE_QWORD_4(edp->ed_eq, + FSF_AZ_TX_KER_CONT, eop ? 0 : 1, + FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size, + FSF_AZ_TX_KER_BUF_ADDR_DW0, + (uint32_t)(addr & 0xffffffff), + FSF_AZ_TX_KER_BUF_ADDR_DW1, + (uint32_t)(addr >> 32)); +} + +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_QSTATS +#if EFSYS_OPT_NAMES +/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */ +static const char * const __efx_tx_qstat_name[] = { + "post", + "post_pio", +}; +/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */ + + const char * +efx_tx_qstat_name( + __in efx_nic_t *enp, + __in unsigned int id) +{ + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(id, <, TX_NQSTATS); + + return (__efx_tx_qstat_name[id]); +} +#endif /* EFSYS_OPT_NAMES */ +#endif /* EFSYS_OPT_QSTATS */ + +#if EFSYS_OPT_SIENA + +#if EFSYS_OPT_QSTATS +static void +siena_tx_qstats_update( + __in efx_txq_t *etp, + __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) +{ + unsigned int id; + + for (id = 0; id < TX_NQSTATS; id++) { + efsys_stat_t *essp = &stat[id]; + + EFSYS_STAT_INCR(essp, etp->et_stat[id]); + etp->et_stat[id] = 0; + } +} +#endif /* EFSYS_OPT_QSTATS */ + +static void +siena_tx_qdestroy( + __in efx_txq_t *etp) +{ + efx_nic_t *enp = etp->et_enp; + efx_oword_t oword; + + /* Purge descriptor queue */ + EFX_ZERO_OWORD(oword); + + EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, + etp->et_index, &oword, B_TRUE); +} + +static void +siena_tx_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h b/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h new file mode 100644 index 000000000..4fddbbcdc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_types.h @@ -0,0 +1,1634 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + * + * Ackowledgement to Fen Systems Ltd. + */ + +#ifndef _SYS_EFX_TYPES_H +#define _SYS_EFX_TYPES_H + +#include "efsys.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Bitfield access + * + * Solarflare NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t) + * to be little-endian. + * + * In the less common case of using PIO for individual register + * writes, we construct the little-endian datatype in host memory and + * then use non-swapping register access primitives, rather than + * constructing a native-endian datatype and relying on implicit + * byte-swapping. (We use a similar strategy for register reads.) + */ + +/* + * NOTE: Field definitions here and elsewhere are done in terms of a lowest + * bit number (LBN) and a width. + */ + +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 + +#define EFX_BYTE_0_LBN 0 +#define EFX_BYTE_0_WIDTH 8 + +#define EFX_BYTE_1_LBN 8 +#define EFX_BYTE_1_WIDTH 8 + +#define EFX_BYTE_2_LBN 16 +#define EFX_BYTE_2_WIDTH 8 + +#define EFX_BYTE_3_LBN 24 +#define EFX_BYTE_3_WIDTH 8 + +#define EFX_BYTE_4_LBN 32 +#define EFX_BYTE_4_WIDTH 8 + +#define EFX_BYTE_5_LBN 40 +#define EFX_BYTE_5_WIDTH 8 + +#define EFX_BYTE_6_LBN 48 +#define EFX_BYTE_6_WIDTH 8 + +#define EFX_BYTE_7_LBN 56 +#define EFX_BYTE_7_WIDTH 8 + +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 + +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 + +#define EFX_WORD_2_LBN 32 +#define EFX_WORD_2_WIDTH 16 + +#define EFX_WORD_3_LBN 48 +#define EFX_WORD_3_WIDTH 16 + +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 + +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 + +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 + +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 + +/* + * There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions + * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not + * support field widths larger than 32 bits. + */ + +/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */ +#define EFX_VAL(_field, _attribute) \ + _field ## _ ## _attribute + +/* Lowest bit number of the specified field */ +#define EFX_LOW_BIT(_field) \ + EFX_VAL(_field, LBN) + +/* Width of the specified field */ +#define EFX_WIDTH(_field) \ + EFX_VAL(_field, WIDTH) + +/* Highest bit number of the specified field */ +#define EFX_HIGH_BIT(_field) \ + (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1) + +/* + * 64-bit mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x000000000000001f. + */ +#define EFX_MASK64(_field) \ + ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \ + (((((uint64_t)1) << EFX_WIDTH(_field))) - 1)) +/* + * 32-bit mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x0000001f. + */ +#define EFX_MASK32(_field) \ + ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \ + (((((uint32_t)1) << EFX_WIDTH(_field))) - 1)) + +/* + * 16-bit mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x001f. + */ +#define EFX_MASK16(_field) \ + ((EFX_WIDTH(_field) == 16) ? 0xffffu : \ + (uint16_t)((1 << EFX_WIDTH(_field)) - 1)) + +/* + * 8-bit mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + */ +#define EFX_MASK8(_field) \ + ((uint8_t)((1 << EFX_WIDTH(_field)) - 1)) + +#pragma pack(1) + +/* + * A byte (i.e. 8-bit) datatype + */ +typedef union efx_byte_u { + uint8_t eb_u8[1]; +} efx_byte_t; + +/* + * A word (i.e. 16-bit) datatype + * + * This datatype is defined to be little-endian. + */ +typedef union efx_word_u { + efx_byte_t ew_byte[2]; + uint16_t ew_u16[1]; + uint8_t ew_u8[2]; +} efx_word_t; + +/* + * A doubleword (i.e. 32-bit) datatype + * + * This datatype is defined to be little-endian. + */ +typedef union efx_dword_u { + efx_byte_t ed_byte[4]; + efx_word_t ed_word[2]; + uint32_t ed_u32[1]; + uint16_t ed_u16[2]; + uint8_t ed_u8[4]; +} efx_dword_t; + +/* + * A quadword (i.e. 64-bit) datatype + * + * This datatype is defined to be little-endian. + */ +typedef union efx_qword_u { + efx_byte_t eq_byte[8]; + efx_word_t eq_word[4]; + efx_dword_t eq_dword[2]; +#if EFSYS_HAS_UINT64 + uint64_t eq_u64[1]; +#endif + uint32_t eq_u32[2]; + uint16_t eq_u16[4]; + uint8_t eq_u8[8]; +} efx_qword_t; + +/* + * An octword (i.e. 128-bit) datatype + * + * This datatype is defined to be little-endian. + */ +typedef union efx_oword_u { + efx_byte_t eo_byte[16]; + efx_word_t eo_word[8]; + efx_dword_t eo_dword[4]; + efx_qword_t eo_qword[2]; +#if EFSYS_HAS_SSE2_M128 + __m128i eo_u128[1]; +#endif +#if EFSYS_HAS_UINT64 + uint64_t eo_u64[2]; +#endif + uint32_t eo_u32[4]; + uint16_t eo_u16[8]; + uint8_t eo_u8[16]; +} efx_oword_t; + +#pragma pack() + +#define __SWAP16(_x) \ + ((((_x) & 0xff) << 8) | \ + (((_x) >> 8) & 0xff)) + +#define __SWAP32(_x) \ + ((__SWAP16((_x) & 0xffff) << 16) | \ + __SWAP16(((_x) >> 16) & 0xffff)) + +#define __SWAP64(_x) \ + ((__SWAP32((_x) & 0xffffffff) << 32) | \ + __SWAP32(((_x) >> 32) & 0xffffffff)) + +#define __NOSWAP16(_x) (_x) +#define __NOSWAP32(_x) (_x) +#define __NOSWAP64(_x) (_x) + +#if EFSYS_IS_BIG_ENDIAN + +#define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x)) +#define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x)) +#define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x)) +#define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x)) + +#define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x)) +#define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x)) +#define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x)) +#define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x)) + +#define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x)) +#define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x)) +#define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x)) +#define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x)) + +#elif EFSYS_IS_LITTLE_ENDIAN + +#define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x)) +#define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x)) +#define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x)) +#define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x)) + +#define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x)) +#define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x)) +#define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x)) +#define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x)) + +#define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x)) +#define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x)) +#define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x)) +#define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x)) + +#else + +#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set" + +#endif + +#define __NATIVE_8(_x) (uint8_t)(_x) + +/* Format string for printing an efx_byte_t */ +#define EFX_BYTE_FMT "0x%02x" + +/* Format string for printing an efx_word_t */ +#define EFX_WORD_FMT "0x%04x" + +/* Format string for printing an efx_dword_t */ +#define EFX_DWORD_FMT "0x%08x" + +/* Format string for printing an efx_qword_t */ +#define EFX_QWORD_FMT "0x%08x:%08x" + +/* Format string for printing an efx_oword_t */ +#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x" + +/* Parameters for printing an efx_byte_t */ +#define EFX_BYTE_VAL(_byte) \ + ((unsigned int)__NATIVE_8((_byte).eb_u8[0])) + +/* Parameters for printing an efx_word_t */ +#define EFX_WORD_VAL(_word) \ + ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0])) + +/* Parameters for printing an efx_dword_t */ +#define EFX_DWORD_VAL(_dword) \ + ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0])) + +/* Parameters for printing an efx_qword_t */ +#define EFX_QWORD_VAL(_qword) \ + ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \ + ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0])) + +/* Parameters for printing an efx_oword_t */ +#define EFX_OWORD_VAL(_oword) \ + ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \ + ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \ + ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \ + ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0])) + +/* + * Stop lint complaining about some shifts. + */ +#ifdef __lint +extern int fix_lint; +#define FIX_LINT(_x) (_x + fix_lint) +#else +#define FIX_LINT(_x) (_x) +#endif + +/* + * Saturation arithmetic subtract with minimum equal to zero. + * + * Use saturating arithmetic to ensure a non-negative result. This + * avoids undefined behaviour (and compiler warnings) when used as a + * shift count. + */ +#define EFX_SSUB(_val, _sub) \ + ((_val) > (_sub) ? ((_val) - (_sub)) : 0) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give + * + * (_element) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \ + ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \ + 0U : \ + ((_low > _min) ? \ + ((_element) >> EFX_SSUB(_low, _min)) : \ + ((_element) << EFX_SSUB(_min, _low)))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \ + EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \ + EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high) + +/* + * Extract bit field portion [low,high) from the 16-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \ + EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high) + +/* + * Extract bit field portion [low,high) from the 8-bit + * element which contains bits [min,max) + */ +#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \ + EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high) + +#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \ + (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \ + _low, _high) | \ + EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \ + _low, _high)) + +#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \ + (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \ + _low, _high) | \ + EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \ + _low, _high) | \ + EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \ + _low, _high) | \ + EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \ + _low, _high)) + +#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \ + (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \ + _low, _high)) + +#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \ + (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \ + _low, _high) | \ + EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \ + _low, _high)) + +#define EFX_EXTRACT_DWORD(_dword, _low, _high) \ + (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \ + _low, _high)) + +#define EFX_EXTRACT_WORD(_word, _low, _high) \ + (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \ + _low, _high)) + +#define EFX_EXTRACT_BYTE(_byte, _low, _high) \ + (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \ + _low, _high)) + + +#define EFX_OWORD_FIELD64(_oword, _field) \ + ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) + +#define EFX_OWORD_FIELD32(_oword, _field) \ + (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) + +#define EFX_QWORD_FIELD64(_qword, _field) \ + ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) + +#define EFX_QWORD_FIELD32(_qword, _field) \ + (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) + +#define EFX_DWORD_FIELD(_dword, _field) \ + (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) + +#define EFX_WORD_FIELD(_word, _field) \ + (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK16(_field)) + +#define EFX_BYTE_FIELD(_byte, _field) \ + (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field)) & EFX_MASK8(_field)) + + +#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \ + ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \ + (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1]) + +#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \ + ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \ + (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \ + (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \ + (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3]) + +#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \ + ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0]) + +#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \ + ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \ + (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1]) + +#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \ + ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0]) + +#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \ + ((_word_a).ew_u16[0] == (_word_b).ew_u16[0]) + +#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \ + ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0]) + + +#define EFX_OWORD_IS_ZERO64(_oword) \ + (((_oword).eo_u64[0] | \ + (_oword).eo_u64[1]) == 0) + +#define EFX_OWORD_IS_ZERO32(_oword) \ + (((_oword).eo_u32[0] | \ + (_oword).eo_u32[1] | \ + (_oword).eo_u32[2] | \ + (_oword).eo_u32[3]) == 0) + +#define EFX_QWORD_IS_ZERO64(_qword) \ + (((_qword).eq_u64[0]) == 0) + +#define EFX_QWORD_IS_ZERO32(_qword) \ + (((_qword).eq_u32[0] | \ + (_qword).eq_u32[1]) == 0) + +#define EFX_DWORD_IS_ZERO(_dword) \ + (((_dword).ed_u32[0]) == 0) + +#define EFX_WORD_IS_ZERO(_word) \ + (((_word).ew_u16[0]) == 0) + +#define EFX_BYTE_IS_ZERO(_byte) \ + (((_byte).eb_u8[0]) == 0) + + +#define EFX_OWORD_IS_SET64(_oword) \ + (((_oword).eo_u64[0] & \ + (_oword).eo_u64[1]) == ~((uint64_t)0)) + +#define EFX_OWORD_IS_SET32(_oword) \ + (((_oword).eo_u32[0] & \ + (_oword).eo_u32[1] & \ + (_oword).eo_u32[2] & \ + (_oword).eo_u32[3]) == ~((uint32_t)0)) + +#define EFX_QWORD_IS_SET64(_qword) \ + (((_qword).eq_u64[0]) == ~((uint64_t)0)) + +#define EFX_QWORD_IS_SET32(_qword) \ + (((_qword).eq_u32[0] & \ + (_qword).eq_u32[1]) == ~((uint32_t)0)) + +#define EFX_DWORD_IS_SET(_dword) \ + ((_dword).ed_u32[0] == ~((uint32_t)0)) + +#define EFX_WORD_IS_SET(_word) \ + ((_word).ew_u16[0] == ~((uint16_t)0)) + +#define EFX_BYTE_IS_SET(_byte) \ + ((_byte).eb_u8[0] == ~((uint8_t)0)) + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ + +#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \ + (((_low > _max) || (_high < _min)) ? \ + 0U : \ + ((_low > _min) ? \ + (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint64_t)(_value)) >> EFX_SSUB(_min, _low)))) + +#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \ + (((_low > _max) || (_high < _min)) ? \ + 0U : \ + ((_low > _min) ? \ + (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint32_t)(_value)) >> EFX_SSUB(_min, _low)))) + +#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \ + (((_low > _max) || (_high < _min)) ? \ + 0U : \ + (uint16_t)((_low > _min) ? \ + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) + +#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \ + (((_low > _max) || (_high < _min)) ? \ + 0U : \ + (uint8_t)((_low > _min) ? \ + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \ + EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field), _value) + +#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \ + EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field), _value) + +#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \ + EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field), _value) + +#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \ + EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \ + EFX_HIGH_BIT(_field), _value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS64(_min, _max, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + __CPU_TO_LE_64( \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \ + EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10)) + +#define EFX_INSERT_FIELDS32(_min, _max, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + __CPU_TO_LE_32( \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \ + EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10)) + +#define EFX_INSERT_FIELDS16(_min, _max, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + __CPU_TO_LE_16( \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \ + EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10)) + +#define EFX_INSERT_FIELDS8(_min, _max, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + __NATIVE_8( \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \ + EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10)) + +#define EFX_POPULATE_OWORD64(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_OWORD32(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_QWORD64(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_QWORD32(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_DWORD(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_WORD(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_POPULATE_BYTE(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9, \ + _field10, _value10) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \ + _field1, _value1, _field2, _value2, \ + _field3, _value3, _field4, _value4, \ + _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, \ + _field9, _value9, _field10, _value10); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD + +#define EFX_POPULATE_OWORD_9(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) \ + EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) + +#define EFX_POPULATE_OWORD_8(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) \ + EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) + +#define EFX_POPULATE_OWORD_7(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) \ + EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) + +#define EFX_POPULATE_OWORD_6(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) \ + EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) + +#define EFX_POPULATE_OWORD_5(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) \ + EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) + +#define EFX_POPULATE_OWORD_4(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) \ + EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) + +#define EFX_POPULATE_OWORD_3(_oword, \ + _field1, _value1, _field2, _value2, _field3, _value3) \ + EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3) + +#define EFX_POPULATE_OWORD_2(_oword, \ + _field1, _value1, _field2, _value2) \ + EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2) + +#define EFX_POPULATE_OWORD_1(_oword, \ + _field1, _value1) \ + EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1) + +#define EFX_ZERO_OWORD(_oword) \ + EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0) + +#define EFX_SET_OWORD(_oword) \ + EFX_POPULATE_OWORD_4(_oword, \ + EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD + +#define EFX_POPULATE_QWORD_9(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) \ + EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) + +#define EFX_POPULATE_QWORD_8(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) \ + EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) + +#define EFX_POPULATE_QWORD_7(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) \ + EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) + +#define EFX_POPULATE_QWORD_6(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) \ + EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) + +#define EFX_POPULATE_QWORD_5(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) \ + EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) + +#define EFX_POPULATE_QWORD_4(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) \ + EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) + +#define EFX_POPULATE_QWORD_3(_qword, \ + _field1, _value1, _field2, _value2, _field3, _value3) \ + EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3) + +#define EFX_POPULATE_QWORD_2(_qword, \ + _field1, _value1, _field2, _value2) \ + EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2) + +#define EFX_POPULATE_QWORD_1(_qword, \ + _field1, _value1) \ + EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1) + +#define EFX_ZERO_QWORD(_qword) \ + EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0) + +#define EFX_SET_QWORD(_qword) \ + EFX_POPULATE_QWORD_2(_qword, \ + EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD + +#define EFX_POPULATE_DWORD_9(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) \ + EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) + +#define EFX_POPULATE_DWORD_8(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) \ + EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) + +#define EFX_POPULATE_DWORD_7(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) \ + EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) + +#define EFX_POPULATE_DWORD_6(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) \ + EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) + +#define EFX_POPULATE_DWORD_5(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) \ + EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) + +#define EFX_POPULATE_DWORD_4(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) \ + EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) + +#define EFX_POPULATE_DWORD_3(_dword, \ + _field1, _value1, _field2, _value2, _field3, _value3) \ + EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3) + +#define EFX_POPULATE_DWORD_2(_dword, \ + _field1, _value1, _field2, _value2) \ + EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2) + +#define EFX_POPULATE_DWORD_1(_dword, \ + _field1, _value1) \ + EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \ + _field1, _value1) + +#define EFX_ZERO_DWORD(_dword) \ + EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0) + +#define EFX_SET_DWORD(_dword) \ + EFX_POPULATE_DWORD_1(_dword, \ + EFX_DWORD_0, 0xffffffff) + +/* Populate a word field with various numbers of arguments */ +#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD + +#define EFX_POPULATE_WORD_9(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) \ + EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) + +#define EFX_POPULATE_WORD_8(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) \ + EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) + +#define EFX_POPULATE_WORD_7(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) \ + EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) + +#define EFX_POPULATE_WORD_6(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) \ + EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) + +#define EFX_POPULATE_WORD_5(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) \ + EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) + +#define EFX_POPULATE_WORD_4(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) \ + EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) + +#define EFX_POPULATE_WORD_3(_word, \ + _field1, _value1, _field2, _value2, _field3, _value3) \ + EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3) + +#define EFX_POPULATE_WORD_2(_word, \ + _field1, _value1, _field2, _value2) \ + EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2) + +#define EFX_POPULATE_WORD_1(_word, \ + _field1, _value1) \ + EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \ + _field1, _value1) + +#define EFX_ZERO_WORD(_word) \ + EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0) + +#define EFX_SET_WORD(_word) \ + EFX_POPULATE_WORD_1(_word, \ + EFX_WORD_0, 0xffff) + +/* Populate a byte field with various numbers of arguments */ +#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE + +#define EFX_POPULATE_BYTE_9(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) \ + EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8, _field9, _value9) + +#define EFX_POPULATE_BYTE_8(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) \ + EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7, _field8, _value8) + +#define EFX_POPULATE_BYTE_7(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) \ + EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6, \ + _field7, _value7) + +#define EFX_POPULATE_BYTE_6(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) \ + EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5, _field6, _value6) + +#define EFX_POPULATE_BYTE_5(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) \ + EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4, _field5, _value5) + +#define EFX_POPULATE_BYTE_4(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) \ + EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3, \ + _field4, _value4) + +#define EFX_POPULATE_BYTE_3(_byte, \ + _field1, _value1, _field2, _value2, _field3, _value3) \ + EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2, _field3, _value3) + +#define EFX_POPULATE_BYTE_2(_byte, \ + _field1, _value1, _field2, _value2) \ + EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1, _field2, _value2) + +#define EFX_POPULATE_BYTE_1(_byte, \ + _field1, _value1) \ + EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \ + _field1, _value1) + +#define EFX_ZERO_BYTE(_byte) \ + EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0) + +#define EFX_SET_BYTE(_byte) \ + EFX_POPULATE_BYTE_1(_byte, \ + EFX_BYTE_0, 0xff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + */ + +#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \ + __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value)) + +#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \ + __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value)) + +#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \ + __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value)) + +#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \ + __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value)) + +#define EFX_INPLACE_MASK64(_min, _max, _field) \ + EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field)) + +#define EFX_INPLACE_MASK32(_min, _max, _field) \ + EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field)) + +#define EFX_INPLACE_MASK16(_min, _max, _field) \ + EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field)) + +#define EFX_INPLACE_MASK8(_min, _max, _field) \ + EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field)) + +#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \ + ~EFX_INPLACE_MASK64(0, 63, _field)) | \ + EFX_INSERT_FIELD64(0, 63, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \ + ~EFX_INPLACE_MASK64(64, 127, _field)) | \ + EFX_INSERT_FIELD64(64, 127, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \ + ~EFX_INPLACE_MASK32(0, 31, _field)) | \ + EFX_INSERT_FIELD32(0, 31, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \ + ~EFX_INPLACE_MASK32(32, 63, _field)) | \ + EFX_INSERT_FIELD32(32, 63, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \ + ~EFX_INPLACE_MASK32(64, 95, _field)) | \ + EFX_INSERT_FIELD32(64, 95, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \ + ~EFX_INPLACE_MASK32(96, 127, _field)) | \ + EFX_INSERT_FIELD32(96, 127, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \ + ~EFX_INPLACE_MASK64(0, 63, _field)) | \ + EFX_INSERT_FIELD64(0, 63, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \ + ~EFX_INPLACE_MASK32(0, 31, _field)) | \ + EFX_INSERT_FIELD32(0, 31, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \ + ~EFX_INPLACE_MASK32(32, 63, _field)) | \ + EFX_INSERT_FIELD32(32, 63, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \ + ~EFX_INPLACE_MASK32(0, 31, _field)) | \ + EFX_INSERT_FIELD32(0, 31, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_WORD_FIELD(_word, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_word).ew_u16[0] = (((_word).ew_u16[0] & \ + ~EFX_INPLACE_MASK16(0, 15, _field)) | \ + EFX_INSERT_FIELD16(0, 15, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \ + ~EFX_INPLACE_MASK8(0, 7, _field)) | \ + EFX_INSERT_FIELD8(0, 7, _field, _value)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Set or clear a numbered bit within an octword. + */ + +#define EFX_SHIFT64(_bit, _base) \ + (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \ + ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \ + 0U) + +#define EFX_SHIFT32(_bit, _base) \ + (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \ + ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \ + 0U) + +#define EFX_SHIFT16(_bit, _base) \ + (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \ + (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \ + 0U) + +#define EFX_SHIFT8(_bit, _base) \ + (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \ + (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \ + 0U) + +#define EFX_SET_OWORD_BIT64(_oword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[0] |= \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \ + (_oword).eo_u64[1] |= \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_OWORD_BIT32(_oword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[0] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ + (_oword).eo_u32[1] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \ + (_oword).eo_u32[2] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \ + (_oword).eo_u32[3] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u64[0] &= \ + __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \ + (_oword).eo_u64[1] &= \ + __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_oword).eo_u32[0] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ + (_oword).eo_u32[1] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \ + (_oword).eo_u32[2] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \ + (_oword).eo_u32[3] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_TEST_OWORD_BIT64(_oword, _bit) \ + (((_oword).eo_u64[0] & \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \ + ((_oword).eo_u64[1] & \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))))) + +#define EFX_TEST_OWORD_BIT32(_oword, _bit) \ + (((_oword).eo_u32[0] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \ + ((_oword).eo_u32[1] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \ + ((_oword).eo_u32[2] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \ + ((_oword).eo_u32[3] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))))) + + +#define EFX_SET_QWORD_BIT64(_qword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u64[0] |= \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_SET_QWORD_BIT32(_qword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[0] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ + (_qword).eq_u32[1] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u64[0] &= \ + __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \ + do { \ + _NOTE(CONSTANTCONDITION) \ + (_qword).eq_u32[0] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ + (_qword).eq_u32[1] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_TEST_QWORD_BIT64(_qword, _bit) \ + (((_qword).eq_u64[0] & \ + __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0) + +#define EFX_TEST_QWORD_BIT32(_qword, _bit) \ + (((_qword).eq_u32[0] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \ + ((_qword).eq_u32[1] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))))) + + +#define EFX_SET_DWORD_BIT(_dword, _bit) \ + do { \ + (_dword).ed_u32[0] |= \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \ + do { \ + (_dword).ed_u32[0] &= \ + __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_TEST_DWORD_BIT(_dword, _bit) \ + (((_dword).ed_u32[0] & \ + __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0) + + +#define EFX_SET_WORD_BIT(_word, _bit) \ + do { \ + (_word).ew_u16[0] |= \ + __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_WORD_BIT(_word, _bit) \ + do { \ + (_word).ew_u32[0] &= \ + __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_TEST_WORD_BIT(_word, _bit) \ + (((_word).ew_u16[0] & \ + __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0) + + +#define EFX_SET_BYTE_BIT(_byte, _bit) \ + do { \ + (_byte).eb_u8[0] |= \ + __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \ + do { \ + (_byte).eb_u8[0] &= \ + __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_TEST_BYTE_BIT(_byte, _bit) \ + (((_byte).eb_u8[0] & \ + __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0) + + +#define EFX_OR_OWORD64(_oword1, _oword2) \ + do { \ + (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \ + (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_OWORD32(_oword1, _oword2) \ + do { \ + (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \ + (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \ + (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \ + (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_OWORD64(_oword1, _oword2) \ + do { \ + (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \ + (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_OWORD32(_oword1, _oword2) \ + do { \ + (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \ + (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \ + (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \ + (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_QWORD64(_qword1, _qword2) \ + do { \ + (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_QWORD32(_qword1, _qword2) \ + do { \ + (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \ + (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_QWORD64(_qword1, _qword2) \ + do { \ + (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_QWORD32(_qword1, _qword2) \ + do { \ + (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \ + (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_DWORD(_dword1, _dword2) \ + do { \ + (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_DWORD(_dword1, _dword2) \ + do { \ + (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_WORD(_word1, _word2) \ + do { \ + (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_WORD(_word1, _word2) \ + do { \ + (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_OR_BYTE(_byte1, _byte2) \ + do { \ + (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_AND_BYTE(_byte1, _byte2) \ + do { \ + (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#if EFSYS_USE_UINT64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64 +#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64 +#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64 +#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64 +#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64 +#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64 +#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64 +#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64 +#define EFX_OR_OWORD EFX_OR_OWORD64 +#define EFX_AND_OWORD EFX_AND_OWORD64 +#define EFX_OR_QWORD EFX_OR_QWORD64 +#define EFX_AND_QWORD EFX_AND_QWORD64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32 +#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32 +#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32 +#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32 +#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32 +#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32 +#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32 +#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32 +#define EFX_OR_OWORD EFX_OR_OWORD32 +#define EFX_AND_OWORD EFX_AND_OWORD32 +#define EFX_OR_QWORD EFX_OR_QWORD32 +#define EFX_AND_QWORD EFX_AND_QWORD32 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_EFX_TYPES_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c new file mode 100644 index 000000000..52115c431 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/efx_vpd.c @@ -0,0 +1,998 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_VPD + +#define TAG_TYPE_LBN 7 +#define TAG_TYPE_WIDTH 1 +#define TAG_TYPE_LARGE_ITEM_DECODE 1 +#define TAG_TYPE_SMALL_ITEM_DECODE 0 + +#define TAG_SMALL_ITEM_NAME_LBN 3 +#define TAG_SMALL_ITEM_NAME_WIDTH 4 +#define TAG_SMALL_ITEM_SIZE_LBN 0 +#define TAG_SMALL_ITEM_SIZE_WIDTH 3 + +#define TAG_LARGE_ITEM_NAME_LBN 0 +#define TAG_LARGE_ITEM_NAME_WIDTH 7 + +#define TAG_NAME_END_DECODE 0x0f +#define TAG_NAME_ID_STRING_DECODE 0x02 +#define TAG_NAME_VPD_R_DECODE 0x10 +#define TAG_NAME_VPD_W_DECODE 0x11 + +#if EFSYS_OPT_SIENA + +static const efx_vpd_ops_t __efx_vpd_siena_ops = { + siena_vpd_init, /* evpdo_init */ + siena_vpd_size, /* evpdo_size */ + siena_vpd_read, /* evpdo_read */ + siena_vpd_verify, /* evpdo_verify */ + siena_vpd_reinit, /* evpdo_reinit */ + siena_vpd_get, /* evpdo_get */ + siena_vpd_set, /* evpdo_set */ + siena_vpd_next, /* evpdo_next */ + siena_vpd_write, /* evpdo_write */ + siena_vpd_fini, /* evpdo_fini */ +}; + +#endif /* EFSYS_OPT_SIENA */ + +#if EFX_OPTS_EF10() + +static const efx_vpd_ops_t __efx_vpd_ef10_ops = { + ef10_vpd_init, /* evpdo_init */ + ef10_vpd_size, /* evpdo_size */ + ef10_vpd_read, /* evpdo_read */ + ef10_vpd_verify, /* evpdo_verify */ + ef10_vpd_reinit, /* evpdo_reinit */ + ef10_vpd_get, /* evpdo_get */ + ef10_vpd_set, /* evpdo_set */ + ef10_vpd_next, /* evpdo_next */ + ef10_vpd_write, /* evpdo_write */ + ef10_vpd_fini, /* evpdo_fini */ +}; + +#endif /* EFX_OPTS_EF10() */ + + __checkReturn efx_rc_t +efx_vpd_init( + __in efx_nic_t *enp) +{ + const efx_vpd_ops_t *evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD)); + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + evpdop = &__efx_vpd_siena_ops; + break; +#endif /* EFSYS_OPT_SIENA */ + +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + evpdop = &__efx_vpd_ef10_ops; + break; +#endif /* EFSYS_OPT_HUNTINGTON */ + +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + evpdop = &__efx_vpd_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + evpdop = &__efx_vpd_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + + default: + EFSYS_ASSERT(0); + rc = ENOTSUP; + goto fail1; + } + + if (evpdop->evpdo_init != NULL) { + if ((rc = evpdop->evpdo_init(enp)) != 0) + goto fail2; + } + + enp->en_evpdop = evpdop; + enp->en_mod_flags |= EFX_MOD_VPD; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_size(enp, sizep)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_read(enp, data, size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if (evpdop->evpdo_reinit == NULL) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) { + if (rc == ENOENT) + return (rc); + + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_set( + __in efx_nic_t *enp, + __inout_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_next( + __in efx_nic_t *enp, + __inout_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if ((rc = evpdop->evpdo_write(enp, data, size)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_vpd_next_tag( + __in caddr_t data, + __in size_t size, + __inout unsigned int *offsetp, + __out efx_vpd_tag_t *tagp, + __out uint16_t *lengthp) +{ + efx_byte_t byte; + efx_word_t word; + uint8_t name; + uint16_t length; + size_t headlen; + efx_rc_t rc; + + if (*offsetp >= size) { + rc = EFAULT; + goto fail1; + } + + EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]); + + switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) { + case TAG_TYPE_SMALL_ITEM_DECODE: + headlen = 1; + + name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME); + length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE); + + break; + + case TAG_TYPE_LARGE_ITEM_DECODE: + headlen = 3; + + if (*offsetp + headlen > size) { + rc = EFAULT; + goto fail2; + } + + name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME); + EFX_POPULATE_WORD_2(word, + EFX_BYTE_0, data[*offsetp + 1], + EFX_BYTE_1, data[*offsetp + 2]); + length = EFX_WORD_FIELD(word, EFX_WORD_0); + + break; + + default: + rc = EFAULT; + goto fail2; + } + + if (*offsetp + headlen + length > size) { + rc = EFAULT; + goto fail3; + } + + EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END); + EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID); + EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO); + EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW); + if (name != EFX_VPD_END && name != EFX_VPD_ID && + name != EFX_VPD_RO) { + rc = EFAULT; + goto fail4; + } + + *tagp = name; + *lengthp = length; + *offsetp += headlen; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_vpd_next_keyword( + __in_bcount(size) caddr_t tag, + __in size_t size, + __in unsigned int pos, + __out efx_vpd_keyword_t *keywordp, + __out uint8_t *lengthp) +{ + efx_vpd_keyword_t keyword; + uint8_t length; + efx_rc_t rc; + + if (pos + 3U > size) { + rc = EFAULT; + goto fail1; + } + + keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]); + length = tag[pos + 2]; + + if (length == 0 || pos + 3U + length > size) { + rc = EFAULT; + goto fail2; + } + + *keywordp = keyword; + *lengthp = length; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_hunk_length( + __in_bcount(size) caddr_t data, + __in size_t size, + __out size_t *lengthp) +{ + efx_vpd_tag_t tag; + unsigned int offset; + uint16_t taglen; + efx_rc_t rc; + + offset = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_next_tag(data, size, &offset, + &tag, &taglen)) != 0) + goto fail1; + offset += taglen; + if (tag == EFX_VPD_END) + break; + } + + *lengthp = offset; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_hunk_verify( + __in_bcount(size) caddr_t data, + __in size_t size, + __out_opt boolean_t *cksummedp) +{ + efx_vpd_tag_t tag; + efx_vpd_keyword_t keyword; + unsigned int offset; + unsigned int pos; + unsigned int i; + uint16_t taglen; + uint8_t keylen; + uint8_t cksum; + boolean_t cksummed = B_FALSE; + efx_rc_t rc; + + /* + * Parse every tag,keyword in the existing VPD. If the csum is present, + * the assert it is correct, and is the final keyword in the RO block. + */ + offset = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_next_tag(data, size, &offset, + &tag, &taglen)) != 0) + goto fail1; + if (tag == EFX_VPD_END) + break; + else if (tag == EFX_VPD_ID) + goto done; + + for (pos = 0; pos != taglen; pos += 3 + keylen) { + /* RV keyword must be the last in the block */ + if (cksummed) { + rc = EFAULT; + goto fail2; + } + + if ((rc = efx_vpd_next_keyword(data + offset, + taglen, pos, &keyword, &keylen)) != 0) + goto fail3; + + if (keyword == EFX_VPD_KEYWORD('R', 'V')) { + cksum = 0; + for (i = 0; i < offset + pos + 4; i++) + cksum += data[i]; + + if (cksum != 0) { + rc = EFAULT; + goto fail4; + } + + cksummed = B_TRUE; + } + } + + done: + offset += taglen; + } + + if (!cksummed) { + rc = EFAULT; + goto fail5; + } + + if (cksummedp != NULL) + *cksummedp = cksummed; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static uint8_t __efx_vpd_blank_pid[] = { + /* Large resource type ID length 1 */ + 0x82, 0x01, 0x00, + /* Product name ' ' */ + 0x32, +}; + +static uint8_t __efx_vpd_blank_r[] = { + /* Large resource type VPD-R length 4 */ + 0x90, 0x04, 0x00, + /* RV keyword length 1 */ + 'R', 'V', 0x01, + /* RV payload checksum */ + 0x00, +}; + + __checkReturn efx_rc_t +efx_vpd_hunk_reinit( + __in_bcount(size) caddr_t data, + __in size_t size, + __in boolean_t wantpid) +{ + unsigned int offset = 0; + unsigned int pos; + efx_byte_t byte; + uint8_t cksum; + efx_rc_t rc; + + if (size < 0x100) { + rc = ENOSPC; + goto fail1; + } + + if (wantpid) { + memcpy(data + offset, __efx_vpd_blank_pid, + sizeof (__efx_vpd_blank_pid)); + offset += sizeof (__efx_vpd_blank_pid); + } + + memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r)); + offset += sizeof (__efx_vpd_blank_r); + + /* Update checksum */ + cksum = 0; + for (pos = 0; pos < offset; pos++) + cksum += data[pos]; + data[offset - 1] -= cksum; + + /* Append trailing tag */ + EFX_POPULATE_BYTE_3(byte, + TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE, + TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE, + TAG_SMALL_ITEM_SIZE, 0); + data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0); + offset++; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_hunk_next( + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_tag_t *tagp, + __out efx_vpd_keyword_t *keywordp, + __out_opt unsigned int *payloadp, + __out_opt uint8_t *paylenp, + __inout unsigned int *contp) +{ + efx_vpd_tag_t tag; + efx_vpd_keyword_t keyword = 0; + unsigned int offset; + unsigned int pos; + unsigned int index; + uint16_t taglen; + uint8_t keylen; + uint8_t paylen; + efx_rc_t rc; + + offset = index = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_next_tag(data, size, &offset, + &tag, &taglen)) != 0) + goto fail1; + + if (tag == EFX_VPD_END) { + keyword = 0; + paylen = 0; + index = 0; + break; + } + + if (tag == EFX_VPD_ID) { + if (index++ == *contp) { + EFSYS_ASSERT3U(taglen, <, 0x100); + keyword = 0; + paylen = (uint8_t)MIN(taglen, 0xff); + + goto done; + } + } else { + for (pos = 0; pos != taglen; pos += 3 + keylen) { + if ((rc = efx_vpd_next_keyword(data + offset, + taglen, pos, &keyword, &keylen)) != 0) + goto fail2; + + if (index++ == *contp) { + offset += pos + 3; + paylen = keylen; + + goto done; + } + } + } + + offset += taglen; + } + +done: + *tagp = tag; + *keywordp = keyword; + if (payloadp != NULL) + *payloadp = offset; + if (paylenp != NULL) + *paylenp = paylen; + + *contp = index; + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_hunk_get( + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_tag_t tag, + __in efx_vpd_keyword_t keyword, + __out unsigned int *payloadp, + __out uint8_t *paylenp) +{ + efx_vpd_tag_t itag; + efx_vpd_keyword_t ikeyword; + unsigned int offset; + unsigned int pos; + uint16_t taglen; + uint8_t keylen; + efx_rc_t rc; + + offset = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_next_tag(data, size, &offset, + &itag, &taglen)) != 0) + goto fail1; + if (itag == EFX_VPD_END) + break; + + if (itag == tag) { + if (itag == EFX_VPD_ID) { + EFSYS_ASSERT3U(taglen, <, 0x100); + + *paylenp = (uint8_t)MIN(taglen, 0xff); + *payloadp = offset; + return (0); + } + + for (pos = 0; pos != taglen; pos += 3 + keylen) { + if ((rc = efx_vpd_next_keyword(data + offset, + taglen, pos, &ikeyword, &keylen)) != 0) + goto fail2; + + if (ikeyword == keyword) { + *paylenp = keylen; + *payloadp = offset + pos + 3; + return (0); + } + } + } + + offset += taglen; + } + + /* Not an error */ + return (ENOENT); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_vpd_hunk_set( + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp) +{ + efx_word_t word; + efx_vpd_tag_t tag; + efx_vpd_keyword_t keyword; + unsigned int offset; + unsigned int pos; + unsigned int taghead; + unsigned int source; + unsigned int dest; + unsigned int i; + uint16_t taglen; + uint8_t keylen; + uint8_t cksum; + size_t used; + efx_rc_t rc; + + switch (evvp->evv_tag) { + case EFX_VPD_ID: + if (evvp->evv_keyword != 0) { + rc = EINVAL; + goto fail1; + } + + /* Can't delete the ID keyword */ + if (evvp->evv_length == 0) { + rc = EINVAL; + goto fail1; + } + break; + + case EFX_VPD_RO: + if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) { + rc = EINVAL; + goto fail1; + } + break; + + default: + rc = EINVAL; + goto fail1; + } + + /* Determine total size of all current tags */ + if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0) + goto fail2; + + offset = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + taghead = offset; + if ((rc = efx_vpd_next_tag(data, size, &offset, + &tag, &taglen)) != 0) + goto fail3; + if (tag == EFX_VPD_END) + break; + else if (tag != evvp->evv_tag) { + offset += taglen; + continue; + } + + /* We only support modifying large resource tags */ + if (offset - taghead != 3) { + rc = EINVAL; + goto fail4; + } + + /* + * Work out the offset of the byte immediately after the + * old (=source) and new (=dest) new keyword/tag + */ + pos = 0; + if (tag == EFX_VPD_ID) { + source = offset + taglen; + dest = offset + evvp->evv_length; + goto check_space; + } + + EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO); + source = dest = 0; + for (pos = 0; pos != taglen; pos += 3 + keylen) { + if ((rc = efx_vpd_next_keyword(data + offset, + taglen, pos, &keyword, &keylen)) != 0) + goto fail5; + + if (keyword == evvp->evv_keyword && + evvp->evv_length == 0) { + /* Deleting this keyword */ + source = offset + pos + 3 + keylen; + dest = offset + pos; + break; + + } else if (keyword == evvp->evv_keyword) { + /* Adjusting this keyword */ + source = offset + pos + 3 + keylen; + dest = offset + pos + 3 + evvp->evv_length; + break; + + } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) { + /* The RV keyword must be at the end */ + EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen); + + /* + * The keyword doesn't already exist. If the + * user deleting a non-existant keyword then + * this is a no-op. + */ + if (evvp->evv_length == 0) + return (0); + + /* Insert this keyword before the RV keyword */ + source = offset + pos; + dest = offset + pos + 3 + evvp->evv_length; + break; + } + } + + check_space: + if (used + dest > size + source) { + rc = ENOSPC; + goto fail6; + } + + /* Move trailing data */ + (void) memmove(data + dest, data + source, used - source); + + /* Copy contents */ + memcpy(data + dest - evvp->evv_length, evvp->evv_value, + evvp->evv_length); + + /* Insert new keyword header if required */ + if (tag != EFX_VPD_ID && evvp->evv_length > 0) { + EFX_POPULATE_WORD_1(word, EFX_WORD_0, + evvp->evv_keyword); + data[offset + pos + 0] = + EFX_WORD_FIELD(word, EFX_BYTE_0); + data[offset + pos + 1] = + EFX_WORD_FIELD(word, EFX_BYTE_1); + data[offset + pos + 2] = evvp->evv_length; + } + + /* Modify tag length (large resource type) */ + taglen += (uint16_t)(dest - source); + EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen); + data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0); + data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1); + + goto checksum; + } + + /* Unable to find the matching tag */ + rc = ENOENT; + goto fail7; + +checksum: + /* Find the RV tag, and update the checksum */ + offset = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_next_tag(data, size, &offset, + &tag, &taglen)) != 0) + goto fail8; + if (tag == EFX_VPD_END) + break; + if (tag == EFX_VPD_RO) { + for (pos = 0; pos != taglen; pos += 3 + keylen) { + if ((rc = efx_vpd_next_keyword(data + offset, + taglen, pos, &keyword, &keylen)) != 0) + goto fail9; + + if (keyword == EFX_VPD_KEYWORD('R', 'V')) { + cksum = 0; + for (i = 0; i < offset + pos + 3; i++) + cksum += data[i]; + data[i] = -cksum; + break; + } + } + } + + offset += taglen; + } + + /* Zero out the unused portion */ + (void) memset(data + offset + taglen, 0xff, size - offset - taglen); + + return (0); + +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +efx_vpd_fini( + __in efx_nic_t *enp) +{ + const efx_vpd_ops_t *evpdop = enp->en_evpdop; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); + EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); + + if (evpdop->evpdo_fini != NULL) + evpdop->evpdo_fini(enp); + + enp->en_evpdop = NULL; + enp->en_mod_flags &= ~EFX_MOD_VPD; +} + +#endif /* EFSYS_OPT_VPD */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h new file mode 100644 index 000000000..546c5d9d0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/hunt_impl.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_HUNT_IMPL_H +#define _SYS_HUNT_IMPL_H + +#include "efx.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define HUNT_TXQ_MAXNDESCS 4096 +#define HUNT_TXQ_MAXNDESCS_BUG35388_WORKAROUND 2048 + +#define HUNT_EVQ_MAXNBUFS (64) + +/* Missing register definitions */ +#ifndef ER_DZ_TX_PIOBUF_OFST +#define ER_DZ_TX_PIOBUF_OFST 0x00001000 +#endif +#ifndef ER_DZ_TX_PIOBUF_STEP +#define ER_DZ_TX_PIOBUF_STEP 8192 +#endif +#ifndef ER_DZ_TX_PIOBUF_ROWS +#define ER_DZ_TX_PIOBUF_ROWS 2048 +#endif + +#ifndef ER_DZ_TX_PIOBUF_SIZE +#define ER_DZ_TX_PIOBUF_SIZE 2048 +#endif + +#define HUNT_PIOBUF_NBUFS (16) +#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE) + +#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32) + + +/* NIC */ + +extern __checkReturn efx_rc_t +hunt_board_cfg( + __in efx_nic_t *enp); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_HUNT_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c new file mode 100644 index 000000000..75c905007 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/hunt_nic.c @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#if EFSYS_OPT_MON_MCDI +#include "mcdi_mon.h" +#endif + +#if EFSYS_OPT_HUNTINGTON + +#include "ef10_tlv_layout.h" + +static __checkReturn efx_rc_t +hunt_nic_get_required_pcie_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t port_modes; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * On Huntington, the firmware may not give us the current port mode, so + * we need to go by the set of available port modes and assume the most + * capable mode is in use. + */ + + if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, + NULL, NULL)) != 0) { + /* No port mode info available */ + bandwidth = 0; + goto out; + } + + if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) { + /* + * This needs the full PCIe bandwidth (and could use + * more) - roughly 64 Gbit/s for 8 lanes of Gen3. + */ + if ((rc = efx_nic_calculate_pcie_link_bandwidth(8, + EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0) + goto fail1; + } else { + if (port_modes & (1U << TLV_PORT_MODE_40G)) { + bandwidth = 40000; + } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) { + bandwidth = 4 * 10000; + } else { + /* Assume two 10G ports */ + bandwidth = 2 * 10000; + } + } + +out: + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +hunt_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_port_t *epp = &(enp->en_port); + uint32_t sysclk, dpcpu_clk; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * Enable firmware workarounds for hardware errata. + * Expected responses are: + * - 0 (zero): + * Success: workaround enabled or disabled as requested. + * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): + * Firmware does not support the MC_CMD_WORKAROUND request. + * (assume that the workaround is not supported). + * - MC_CMD_ERR_ENOENT (reported as ENOENT): + * Firmware does not support the requested workaround. + * - MC_CMD_ERR_EPERM (reported as EACCES): + * Unprivileged function cannot enable/disable workarounds. + * + * See efx_mcdi_request_errcode() for MCDI error translations. + */ + + /* + * If the bug35388 workaround is enabled, then use an indirect access + * method to avoid unsafe EVQ writes. + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE, + NULL); + if ((rc == 0) || (rc == EACCES)) + encp->enc_bug35388_workaround = B_TRUE; + else if ((rc == ENOTSUP) || (rc == ENOENT)) + encp->enc_bug35388_workaround = B_FALSE; + else + goto fail1; + + /* + * If the bug41750 workaround is enabled, then do not test interrupts, + * as the test will fail (seen with Greenport controllers). + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE, + NULL); + if (rc == 0) { + encp->enc_bug41750_workaround = B_TRUE; + } else if (rc == EACCES) { + /* Assume a controller with 40G ports needs the workaround. */ + if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX) + encp->enc_bug41750_workaround = B_TRUE; + else + encp->enc_bug41750_workaround = B_FALSE; + } else if ((rc == ENOTSUP) || (rc == ENOENT)) { + encp->enc_bug41750_workaround = B_FALSE; + } else { + goto fail2; + } + if (EFX_PCI_FUNCTION_IS_VF(encp)) { + /* Interrupt testing does not work for VFs. See bug50084. */ + encp->enc_bug41750_workaround = B_TRUE; + } + + /* Get clock frequencies (in MHz). */ + if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) + goto fail3; + + /* + * The Huntington timer quantum is 1536 sysclk cycles, documented for + * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. + */ + encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */ + if (encp->enc_bug35388_workaround) { + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000; + } else { + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + } + + encp->enc_bug61265_workaround = B_FALSE; /* Medford only */ + + /* Checksums for TSO sends can be incorrect on Huntington. */ + encp->enc_bug61297_workaround = B_TRUE; + + encp->enc_ev_desc_size = EF10_EVQ_DESC_SIZE; + encp->enc_rx_desc_size = EF10_RXQ_DESC_SIZE; + encp->enc_tx_desc_size = EF10_TXQ_DESC_SIZE; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */ + + encp->enc_evq_max_nevs = EF10_EVQ_MAXNEVS; + encp->enc_evq_min_nevs = EF10_EVQ_MINNEVS; + + encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS; + encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS; + + /* + * The workaround for bug35388 uses the top bit of transmit queue + * descriptor writes, preventing the use of 4096 descriptor TXQs. + */ + encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? + HUNT_TXQ_MAXNDESCS_BUG35388_WORKAROUND : + HUNT_TXQ_MAXNDESCS; + encp->enc_txq_min_ndescs = EF10_TXQ_MINNDESCS; + + EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); + encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS; + encp->enc_piobuf_size = HUNT_PIOBUF_SIZE; + encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE; + + if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0) + goto fail4; + encp->enc_required_pcie_bandwidth_mbps = bandwidth; + + /* All Huntington devices have a PCIe Gen3, 8 lane connector */ + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +#endif /* EFSYS_OPT_HUNTINGTON */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c new file mode 100644 index 000000000..bd82755eb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.c @@ -0,0 +1,636 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#include "mcdi_mon.h" + +#if EFSYS_OPT_MON_MCDI + +#if EFSYS_OPT_MON_STATS + +/* Get port mask from one-based MCDI port number */ +#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1)) + +#define MCDI_STATIC_SENSOR_ASSERT(_field) \ + EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \ + == EFX_MON_STAT_STATE_ ## _field) + +static void +mcdi_mon_decode_stats( + __in efx_nic_t *enp, + __in_bcount(sensor_mask_size) uint32_t *sensor_mask, + __in size_t sensor_mask_size, + __in_opt efsys_mem_t *esmp, + __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp, + __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_mon_stat_portmask_t port_mask; + uint16_t sensor; + size_t sensor_max; + uint32_t stat_mask[(EFX_MON_NSTATS + 31) / 32]; + uint32_t idx = 0; + uint32_t page = 0; + + /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */ + MCDI_STATIC_SENSOR_ASSERT(OK); + MCDI_STATIC_SENSOR_ASSERT(WARNING); + MCDI_STATIC_SENSOR_ASSERT(FATAL); + MCDI_STATIC_SENSOR_ASSERT(BROKEN); + MCDI_STATIC_SENSOR_ASSERT(NO_READING); + + sensor_max = 8 * sensor_mask_size; + + EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */ + port_mask = (efx_mon_stat_portmask_t)MCDI_MON_PORT_MASK(emip); + + memset(stat_mask, 0, sizeof (stat_mask)); + + /* + * The MCDI sensor readings in the DMA buffer are a packed array of + * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for + * supported sensors (bit set in sensor_mask). The sensor_mask and + * sensor readings do not include entries for the per-page NEXT_PAGE + * flag. + * + * sensor_mask may legitimately contain MCDI sensors that the driver + * does not understand. + */ + for (sensor = 0; sensor < sensor_max; ++sensor) { + efx_mon_stat_t id; + efx_mon_stat_portmask_t stat_portmask = 0; + efx_mon_stat_unit_t stat_unit; + + if ((sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) == + MC_CMD_SENSOR_PAGE0_NEXT) { + /* This sensor is one of the page boundary bits. */ + page++; + continue; + } + + if (~(sensor_mask[page]) & + (1U << (sensor % (sizeof (sensor_mask[page]) * 8)))) { + /* This sensor is not supported. */ + continue; + } + + /* Supported sensor, so it is present in the DMA buffer. */ + idx++; + + if ((efx_mon_mcdi_to_efx_stat(sensor, &id) != B_TRUE) || + (efx_mon_get_stat_portmap(id, &stat_portmask) != B_TRUE)) { + /* The sensor is not known to the driver. */ + continue; + } + + if ((stat_portmask & port_mask) == 0) { + /* The sensor is not for this port. */ + continue; + } + + EFSYS_ASSERT(id < EFX_MON_NSTATS); + + /* + * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic + * identifiers from efx_mon_stat_t (without NEXT_PAGE bits). + * + * If there is an entry in the MCDI sensor to monitor statistic + * map then the sensor reading is used for the value of the + * monitor statistic. + */ + stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |= + (1U << (id % EFX_MON_MASK_ELEMENT_SIZE)); + + if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { + efx_dword_t dword; + + /* Get MCDI sensor reading from DMA buffer */ + EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword); + + /* Update EFX monitor stat from MCDI sensor reading */ + stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword, + MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); + + stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword, + MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + + stat[id].emsv_unit = + efx_mon_get_stat_unit(id, &stat_unit) ? + stat_unit : EFX_MON_STAT_UNIT_UNKNOWN; + } + } + + if (stat_maskp != NULL) { + memcpy(stat_maskp, stat_mask, sizeof (stat_mask)); + } +} + + __checkReturn efx_rc_t +mcdi_mon_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_mon_stat_t *idp, + __out efx_mon_stat_value_t *valuep) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_mon_stat_portmask_t port_mask, sensor_port_mask; + uint16_t sensor; + uint16_t state; + uint16_t value; + efx_mon_stat_t id; + efx_rc_t rc; + + EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */ + port_mask = MCDI_MON_PORT_MASK(emip); + + sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR); + state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE); + value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE); + + /* Hardware must support this MCDI sensor */ + EFSYS_ASSERT3U(sensor, <, + (8 * enp->en_nic_cfg.enc_mcdi_sensor_mask_size)); + EFSYS_ASSERT((sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) != + MC_CMD_SENSOR_PAGE0_NEXT); + EFSYS_ASSERT(enp->en_nic_cfg.enc_mcdi_sensor_maskp != NULL); + EFSYS_ASSERT((enp->en_nic_cfg.enc_mcdi_sensor_maskp[ + sensor / (MC_CMD_SENSOR_PAGE0_NEXT + 1)] & + (1U << (sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)))) != 0); + + /* And we need to understand it, to get port-map */ + if (!efx_mon_mcdi_to_efx_stat(sensor, &id)) { + rc = ENOTSUP; + goto fail1; + } + if (!(efx_mon_get_stat_portmap(id, &sensor_port_mask) && + (port_mask && sensor_port_mask))) { + return (ENODEV); + } + EFSYS_ASSERT(id < EFX_MON_NSTATS); + + *idp = id; + valuep->emsv_value = value; + valuep->emsv_state = state; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +static __checkReturn efx_rc_t +efx_mcdi_read_sensors( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __in uint32_t size) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_READ_SENSORS_EXT_IN_LEN, + MC_CMD_READ_SENSORS_EXT_OUT_LEN); + uint32_t addr_lo, addr_hi; + efx_rc_t rc; + + if (EFSYS_MEM_SIZE(esmp) < size) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_READ_SENSORS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN; + + addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff); + addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32); + + MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo); + MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi); + MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size); + + efx_mcdi_execute(enp, &req); + + return (req.emr_rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_sensor_info_npages( + __in efx_nic_t *enp, + __out uint32_t *npagesp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN, + MC_CMD_SENSOR_INFO_OUT_LENMAX); + int page; + efx_rc_t rc; + + EFSYS_ASSERT(npagesp != NULL); + + page = 0; + do { + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SENSOR_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++); + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) & + (1U << MC_CMD_SENSOR_PAGE0_NEXT)); + + *npagesp = page; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_sensor_info( + __in efx_nic_t *enp, + __out_ecount(npages) uint32_t *sensor_maskp, + __in size_t npages) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN, + MC_CMD_SENSOR_INFO_OUT_LENMAX); + uint32_t page; + efx_rc_t rc; + + EFSYS_ASSERT(sensor_maskp != NULL); + + if (npages < 1) { + rc = EINVAL; + goto fail1; + } + + for (page = 0; page < npages; page++) { + uint32_t mask; + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SENSOR_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK); + + if ((page != (npages - 1)) && + ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) { + rc = EINVAL; + goto fail3; + } + sensor_maskp[page] = mask; + } + + if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { + rc = EINVAL; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_mcdi_sensor_info_page( + __in efx_nic_t *enp, + __in uint32_t page, + __out uint32_t *mask_part, + __out_ecount((sizeof (*mask_part) * 8) - 1) + efx_mon_stat_limits_t *limits) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN, + MC_CMD_SENSOR_INFO_OUT_LENMAX); + efx_rc_t rc; + uint32_t mask_copy; + efx_dword_t *maskp; + efx_qword_t *limit_info; + + EFSYS_ASSERT(mask_part != NULL); + EFSYS_ASSERT(limits != NULL); + + memset(limits, 0, + ((sizeof (*mask_part) * 8) - 1) * sizeof (efx_mon_stat_limits_t)); + + req.emr_cmd = MC_CMD_SENSOR_INFO; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; + + MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page); + + efx_mcdi_execute(enp, &req); + + rc = req.emr_rc; + + if (rc != 0) + goto fail1; + + EFSYS_ASSERT(sizeof (*limit_info) == + MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN); + maskp = MCDI_OUT2(req, efx_dword_t, SENSOR_INFO_OUT_MASK); + limit_info = (efx_qword_t *)(maskp + 1); + + *mask_part = maskp->ed_u32[0]; + mask_copy = *mask_part; + + /* Copy an entry for all but the highest bit set. */ + while (mask_copy) { + + if (mask_copy == (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { + /* Only next page bit set. */ + mask_copy = 0; + } else { + /* Clear lowest bit */ + mask_copy = mask_copy & ~(mask_copy ^ (mask_copy - 1)); + /* And copy out limit entry into buffer */ + limits->emlv_warning_min = EFX_QWORD_FIELD(*limit_info, + MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1); + + limits->emlv_warning_max = EFX_QWORD_FIELD(*limit_info, + MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1); + + limits->emlv_fatal_min = EFX_QWORD_FIELD(*limit_info, + MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2); + + limits->emlv_fatal_max = EFX_QWORD_FIELD(*limit_info, + MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2); + + limits++; + limit_info++; + } + } + + return (rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +mcdi_mon_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t size = encp->enc_mon_stat_dma_buf_size; + efx_rc_t rc; + + if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0) + goto fail1; + + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size); + + mcdi_mon_decode_stats(enp, + encp->enc_mcdi_sensor_maskp, + encp->enc_mcdi_sensor_mask_size, + esmp, NULL, values); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static void +lowest_set_bit( + __in uint32_t input_mask, + __out uint32_t *lowest_bit_mask, + __out uint32_t *lowest_bit_num +) +{ + uint32_t x; + uint32_t set_bit, bit_index; + + x = (input_mask ^ (input_mask - 1)); + set_bit = (x + 1) >> 1; + if (!set_bit) + set_bit = (1U << 31U); + + bit_index = 0; + if (set_bit & 0xFFFF0000) + bit_index += 16; + if (set_bit & 0xFF00FF00) + bit_index += 8; + if (set_bit & 0xF0F0F0F0) + bit_index += 4; + if (set_bit & 0xCCCCCCCC) + bit_index += 2; + if (set_bit & 0xAAAAAAAA) + bit_index += 1; + + *lowest_bit_mask = set_bit; + *lowest_bit_num = bit_index; +} + + __checkReturn efx_rc_t +mcdi_mon_limits_update( + __in efx_nic_t *enp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values) +{ + efx_rc_t rc; + uint32_t page; + uint32_t page_mask; + uint32_t limit_index; + efx_mon_stat_limits_t limits[sizeof (page_mask) * 8]; + efx_mon_stat_t stat; + + page = 0; + page--; + do { + page++; + + rc = efx_mcdi_sensor_info_page(enp, page, &page_mask, limits); + if (rc != 0) + goto fail1; + + limit_index = 0; + while (page_mask) { + uint32_t set_bit; + uint32_t page_index; + uint32_t mcdi_index; + + if (page_mask == (1U << MC_CMD_SENSOR_PAGE0_NEXT)) + break; + + lowest_set_bit(page_mask, &set_bit, &page_index); + page_mask = page_mask & ~set_bit; + + mcdi_index = + page_index + (sizeof (page_mask) * 8 * page); + + /* + * This can fail if MCDI reports newer stats than the + * drivers understand, or the bit is the next page bit. + * + * Driver needs to be tolerant of this. + */ + if (!efx_mon_mcdi_to_efx_stat(mcdi_index, &stat)) + continue; + + values[stat] = limits[limit_index]; + limit_index++; + } + + } while (page_mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)); + + return (rc); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +mcdi_mon_cfg_build( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t npages; + efx_rc_t rc; + + switch (enp->en_family) { +#if EFSYS_OPT_SIENA + case EFX_FAMILY_SIENA: + encp->enc_mon_type = EFX_MON_SFC90X0; + break; +#endif +#if EFSYS_OPT_HUNTINGTON + case EFX_FAMILY_HUNTINGTON: + encp->enc_mon_type = EFX_MON_SFC91X0; + break; +#endif +#if EFSYS_OPT_MEDFORD + case EFX_FAMILY_MEDFORD: + encp->enc_mon_type = EFX_MON_SFC92X0; + break; +#endif +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + encp->enc_mon_type = EFX_MON_SFC92X0; + break; +#endif + default: + rc = EINVAL; + goto fail1; + } + + /* Get mc sensor mask size */ + npages = 0; + if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0) + goto fail2; + + encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE; + encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t); + + /* Allocate mc sensor mask */ + EFSYS_KMEM_ALLOC(enp->en_esip, + encp->enc_mcdi_sensor_mask_size, + encp->enc_mcdi_sensor_maskp); + + if (encp->enc_mcdi_sensor_maskp == NULL) { + rc = ENOMEM; + goto fail3; + } + + /* Read mc sensor mask */ + if ((rc = efx_mcdi_sensor_info(enp, + encp->enc_mcdi_sensor_maskp, + npages)) != 0) + goto fail4; + + /* Build monitor statistics mask */ + mcdi_mon_decode_stats(enp, + encp->enc_mcdi_sensor_maskp, + encp->enc_mcdi_sensor_mask_size, + NULL, encp->enc_mon_stat_mask, NULL); + + return (0); + +fail4: + EFSYS_PROBE(fail4); + EFSYS_KMEM_FREE(enp->en_esip, + encp->enc_mcdi_sensor_mask_size, + encp->enc_mcdi_sensor_maskp); + +fail3: + EFSYS_PROBE(fail3); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +mcdi_mon_cfg_free( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + + if (encp->enc_mcdi_sensor_maskp != NULL) { + EFSYS_KMEM_FREE(enp->en_esip, + encp->enc_mcdi_sensor_mask_size, + encp->enc_mcdi_sensor_maskp); + } +} + + +#endif /* EFSYS_OPT_MON_STATS */ + +#endif /* EFSYS_OPT_MON_MCDI */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h new file mode 100644 index 000000000..d7fbf07c3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/mcdi_mon.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_MCDI_MON_H +#define _SYS_MCDI_MON_H + +#include "efx.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if EFSYS_OPT_MON_MCDI + +#if EFSYS_OPT_MON_STATS + + __checkReturn efx_rc_t +mcdi_mon_cfg_build( + __in efx_nic_t *enp); + + void +mcdi_mon_cfg_free( + __in efx_nic_t *enp); + + +extern __checkReturn efx_rc_t +mcdi_mon_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_mon_stat_t *idp, + __out efx_mon_stat_value_t *valuep); + +extern __checkReturn efx_rc_t +mcdi_mon_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); + +extern __checkReturn efx_rc_t +mcdi_mon_limits_update( + __in efx_nic_t *enp, + __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values); + +#endif /* EFSYS_OPT_MON_STATS */ + +#endif /* EFSYS_OPT_MON_MCDI */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_MCDI_MON_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h new file mode 100644 index 000000000..61a11829f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/medford2_impl.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2015-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_MEDFORD2_IMPL_H +#define _SYS_MEDFORD2_IMPL_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#define MEDFORD2_TXQ_MAXNDESCS 2048 + +#define MEDFORD2_EVQ_MAXNBUFS (64) + +#ifndef ER_EZ_TX_PIOBUF_SIZE +#define ER_EZ_TX_PIOBUF_SIZE 4096 +#endif + + +#define MEDFORD2_PIOBUF_NBUFS (16) +#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE) + +#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32) + + +extern __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp); + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_MEDFORD2_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c new file mode 100644 index 000000000..1a454fc34 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/medford2_nic.c @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2015-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_MEDFORD2 + +static __checkReturn efx_rc_t +medford2_nic_get_required_pcie_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t bandwidth; + efx_rc_t rc; + + /* FIXME: support new Medford2 dynamic port modes */ + + if ((rc = ef10_nic_get_port_mode_bandwidth(enp, + &bandwidth)) != 0) + goto fail1; + + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t sysclk, dpcpu_clk; + uint32_t end_padding; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * Enable firmware workarounds for hardware errata. + * Expected responses are: + * - 0 (zero): + * Success: workaround enabled or disabled as requested. + * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): + * Firmware does not support the MC_CMD_WORKAROUND request. + * (assume that the workaround is not supported). + * - MC_CMD_ERR_ENOENT (reported as ENOENT): + * Firmware does not support the requested workaround. + * - MC_CMD_ERR_EPERM (reported as EACCES): + * Unprivileged function cannot enable/disable workarounds. + * + * See efx_mcdi_request_errcode() for MCDI error translations. + */ + + + if (EFX_PCI_FUNCTION_IS_VF(encp)) { + /* + * Interrupt testing does not work for VFs on Medford2. + * See bug50084 and bug71432 comment 21. + */ + encp->enc_bug41750_workaround = B_TRUE; + } + + /* + * If the bug61265 workaround is enabled, then interrupt holdoff timers + * cannot be controlled by timer table writes, so MCDI must be used + * (timer table writes can still be used for wakeup timers). + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, + NULL); + if ((rc == 0) || (rc == EACCES)) + encp->enc_bug61265_workaround = B_TRUE; + else if ((rc == ENOTSUP) || (rc == ENOENT)) + encp->enc_bug61265_workaround = B_FALSE; + else + goto fail1; + + /* Checksums for TSO sends should always be correct on Medford2. */ + encp->enc_bug61297_workaround = B_FALSE; + + /* Get clock frequencies (in MHz). */ + if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) + goto fail2; + + /* + * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for + * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. + */ + encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + + encp->enc_ev_desc_size = EF10_EVQ_DESC_SIZE; + encp->enc_rx_desc_size = EF10_RXQ_DESC_SIZE; + encp->enc_tx_desc_size = EF10_TXQ_DESC_SIZE; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + + /* Get the RX DMA end padding alignment configuration */ + if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { + if (rc != EACCES) + goto fail3; + + /* Assume largest tail padding size supported by hardware */ + end_padding = 256; + } + encp->enc_rx_buf_align_end = end_padding; + + encp->enc_evq_max_nevs = EF10_EVQ_MAXNEVS; + encp->enc_evq_min_nevs = EF10_EVQ_MINNEVS; + + encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS; + encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS; + + /* + * The maximum supported transmit queue size is 2048. TXQs with 4096 + * descriptors are not supported as the top bit is used for vfifo + * stuffing. + */ + encp->enc_txq_max_ndescs = MEDFORD2_TXQ_MAXNDESCS; + encp->enc_txq_min_ndescs = EF10_TXQ_MINNDESCS; + + EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); + encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS; + encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE; + encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE; + + /* + * Medford2 stores a single global copy of VPD, not per-PF as on + * Huntington. + */ + encp->enc_vpd_is_global = B_TRUE; + + rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth); + if (rc != 0) + goto fail4; + encp->enc_required_pcie_bandwidth_mbps = bandwidth; + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MEDFORD2 */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h new file mode 100644 index 000000000..21e5652fb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/medford_impl.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2015-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_MEDFORD_IMPL_H +#define _SYS_MEDFORD_IMPL_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#define MEDFORD_TXQ_MAXNDESCS 2048 + +#define MEDFORD_EVQ_MAXNBUFS (64) + +#ifndef ER_EZ_TX_PIOBUF_SIZE +#define ER_EZ_TX_PIOBUF_SIZE 4096 +#endif + + +#define MEDFORD_PIOBUF_NBUFS (16) +#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE) + +#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32) + + +extern __checkReturn efx_rc_t +medford_board_cfg( + __in efx_nic_t *enp); + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_MEDFORD_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c new file mode 100644 index 000000000..6c85b0c84 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/medford_nic.c @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2015-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_MEDFORD + +static __checkReturn efx_rc_t +medford_nic_get_required_pcie_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t bandwidth; + efx_rc_t rc; + + if ((rc = ef10_nic_get_port_mode_bandwidth(enp, + &bandwidth)) != 0) + goto fail1; + + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +medford_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t sysclk, dpcpu_clk; + uint32_t end_padding; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * Enable firmware workarounds for hardware errata. + * Expected responses are: + * - 0 (zero): + * Success: workaround enabled or disabled as requested. + * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): + * Firmware does not support the MC_CMD_WORKAROUND request. + * (assume that the workaround is not supported). + * - MC_CMD_ERR_ENOENT (reported as ENOENT): + * Firmware does not support the requested workaround. + * - MC_CMD_ERR_EPERM (reported as EACCES): + * Unprivileged function cannot enable/disable workarounds. + * + * See efx_mcdi_request_errcode() for MCDI error translations. + */ + + + if (EFX_PCI_FUNCTION_IS_VF(encp)) { + /* + * Interrupt testing does not work for VFs. See bug50084 and + * bug71432 comment 21. + */ + encp->enc_bug41750_workaround = B_TRUE; + } + + /* + * If the bug61265 workaround is enabled, then interrupt holdoff timers + * cannot be controlled by timer table writes, so MCDI must be used + * (timer table writes can still be used for wakeup timers). + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, + NULL); + if ((rc == 0) || (rc == EACCES)) + encp->enc_bug61265_workaround = B_TRUE; + else if ((rc == ENOTSUP) || (rc == ENOENT)) + encp->enc_bug61265_workaround = B_FALSE; + else + goto fail1; + + /* Checksums for TSO sends can be incorrect on Medford. */ + encp->enc_bug61297_workaround = B_TRUE; + + /* Get clock frequencies (in MHz). */ + if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) + goto fail2; + + /* + * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for + * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. + */ + encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + + encp->enc_ev_desc_size = EF10_EVQ_DESC_SIZE; + encp->enc_rx_desc_size = EF10_RXQ_DESC_SIZE; + encp->enc_tx_desc_size = EF10_TXQ_DESC_SIZE; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + + /* Get the RX DMA end padding alignment configuration */ + if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { + if (rc != EACCES) + goto fail3; + + /* Assume largest tail padding size supported by hardware */ + end_padding = 256; + } + encp->enc_rx_buf_align_end = end_padding; + + encp->enc_evq_max_nevs = EF10_EVQ_MAXNEVS; + encp->enc_evq_min_nevs = EF10_EVQ_MINNEVS; + + encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS; + encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS; + + /* + * The maximum supported transmit queue size is 2048. TXQs with 4096 + * descriptors are not supported as the top bit is used for vfifo + * stuffing. + */ + encp->enc_txq_max_ndescs = MEDFORD_TXQ_MAXNDESCS; + encp->enc_txq_min_ndescs = EF10_TXQ_MINNDESCS; + + EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); + encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; + encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; + encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; + + /* + * Medford stores a single global copy of VPD, not per-PF as on + * Huntington. + */ + encp->enc_vpd_is_global = B_TRUE; + + rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); + if (rc != 0) + goto fail4; + encp->enc_required_pcie_bandwidth_mbps = bandwidth; + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MEDFORD */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/meson.build b/src/spdk/dpdk/drivers/net/sfc/base/meson.build new file mode 100644 index 000000000..8909525c3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/meson.build @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2019-2020 Xilinx, Inc. +# Copyright(c) 2016-2019 Solarflare Communications Inc. +# +# This software was jointly developed between OKTET Labs (under contract +# for Solarflare) and Solarflare Communications, Inc. + +sources = [ + 'efx_bootcfg.c', + 'efx_crc32.c', + 'efx_ev.c', + 'efx_evb.c', + 'efx_filter.c', + 'efx_hash.c', + 'efx_intr.c', + 'efx_lic.c', + 'efx_mac.c', + 'efx_mcdi.c', + 'efx_mon.c', + 'efx_nic.c', + 'efx_nvram.c', + 'efx_phy.c', + 'efx_port.c', + 'efx_proxy.c', + 'efx_rx.c', + 'efx_sram.c', + 'efx_tunnel.c', + 'efx_tx.c', + 'efx_vpd.c', + 'mcdi_mon.c', + 'siena_mac.c', + 'siena_mcdi.c', + 'siena_nic.c', + 'siena_nvram.c', + 'siena_phy.c', + 'siena_sram.c', + 'siena_vpd.c', + 'ef10_ev.c', + 'ef10_evb.c', + 'ef10_filter.c', + 'ef10_image.c', + 'ef10_intr.c', + 'ef10_mac.c', + 'ef10_mcdi.c', + 'ef10_nic.c', + 'ef10_nvram.c', + 'ef10_phy.c', + 'ef10_proxy.c', + 'ef10_rx.c', + 'ef10_tx.c', + 'ef10_vpd.c', + 'hunt_nic.c', + 'medford_nic.c', + 'medford2_nic.c' +] + +extra_flags = [ + '-Wno-sign-compare', + '-Wno-unused-parameter', + '-Wno-unused-variable', + '-Wno-empty-body', + '-Wno-unused-but-set-variable' +] + +c_args = cflags +foreach flag: extra_flags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +if build + base_lib = static_library('sfc_base', sources, + include_directories: includes, + dependencies: static_rte_eal, + c_args: c_args) + + base_objs = base_lib.extract_all_objects() +else + base_objs = [] +endif diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h b/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h new file mode 100644 index 000000000..45fd2a598 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_flash.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2007-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_SIENA_FLASH_H +#define _SYS_SIENA_FLASH_H + +#pragma pack(1) + +/* Fixed locations near the start of flash (which may be in the internal PHY + * firmware header) point to the boot header. + * + * - parsed by MC boot ROM and firmware + * - reserved (but not parsed) by PHY firmware + * - opaque to driver + */ + +#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20) + +#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */ +#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */ + +#define SIENA_MC_BOOT_HDR_LEN (0x200) + +#define SIENA_MC_BOOT_MAGIC (0x51E4A001) +#define SIENA_MC_BOOT_VERSION (1) + + +/*Structures supporting an arbitrary number of binary blobs in the flash image + intended to house code and tables for the satellite cpus*/ +/*thanks to random.org for:*/ +#define BLOBS_HEADER_MAGIC (0xBDA3BBD4) +#define BLOB_HEADER_MAGIC (0xA1478A91) + +typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; + efx_dword_t no_of_blobs; +} blobs_hdr_t; + +typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; + efx_dword_t cpu_type; + efx_dword_t build_variant; + efx_dword_t offset; + efx_dword_t length; + efx_dword_t checksum; +} blob_hdr_t; + +#define BLOB_CPU_TYPE_TXDI_TEXT (0) +#define BLOB_CPU_TYPE_RXDI_TEXT (1) +#define BLOB_CPU_TYPE_TXDP_TEXT (2) +#define BLOB_CPU_TYPE_RXDP_TEXT (3) +#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4) +#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5) +#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6) +#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7) +#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8) +#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9) +#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10) +#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11) +#define BLOB_CPU_TYPE_RXDI_VTBL0 (12) +#define BLOB_CPU_TYPE_TXDI_VTBL0 (13) +#define BLOB_CPU_TYPE_RXDI_VTBL1 (14) +#define BLOB_CPU_TYPE_TXDI_VTBL1 (15) +#define BLOB_CPU_TYPE_DUMPSPEC (32) +#define BLOB_CPU_TYPE_MC_XIP (33) + +#define BLOB_CPU_TYPE_INVALID (31) + +/* + * The upper four bits of the CPU type field specify the compression + * algorithm used for this blob. + */ +#define BLOB_COMPRESSION_MASK (0xf0000000) +#define BLOB_CPU_TYPE_MASK (0x0fffffff) + +#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */ +#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */ + +typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */ + efx_word_t hdr_version; /* this structure definition is version 1 */ + efx_byte_t board_type; + efx_byte_t firmware_version_a; + efx_byte_t firmware_version_b; + efx_byte_t firmware_version_c; + efx_word_t checksum; /* of whole header area + firmware image */ + efx_word_t firmware_version_d; + efx_byte_t mcfw_subtype; + efx_byte_t generation; /* MC (Medford and later): MC partition generation when */ + /* written to NVRAM. */ + /* MUM & SUC images: subtype. */ + /* (Otherwise set to 0) */ + efx_dword_t firmware_text_offset; /* offset to firmware .text */ + efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */ + efx_dword_t firmware_data_offset; /* offset to firmware .data */ + efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */ + efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */ + efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */ + efx_word_t xpm_sector; /* XPM (MEDFORD and later): The sector that contains */ + /* the key, or 0xffff if unsigned. (Otherwise set to 0) */ + efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */ + efx_byte_t reserved_b[3]; /* (set to 0) */ + efx_dword_t security_level; /* This number increases every time a serious security flaw */ + /* is fixed. A secure NIC may not downgrade to any image */ + /* with a lower security level than the current image. */ + /* Note: The number in this header should only be used for */ + /* determining the level of new images, not to determine */ + /* the level of the current image as this header is not */ + /* protected by a CMAC. */ + efx_dword_t reserved_c[5]; /* (set to 0) */ +} siena_mc_boot_hdr_t; + +#define SIENA_MC_BOOT_HDR_PADDING \ + (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t)) + +#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555) +#define SIENA_MC_STATIC_CONFIG_VERSION (0) + +typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */ + efx_word_t length; /* of header area (i.e. not including VPD) */ + efx_byte_t version; + efx_byte_t csum; /* over header area (i.e. not including VPD) */ + efx_dword_t static_vpd_offset; + efx_dword_t static_vpd_length; + efx_dword_t capabilities; + efx_byte_t mac_addr_base[6]; + efx_byte_t green_mode_cal; /* Green mode calibration result */ + efx_byte_t green_mode_valid; /* Whether cal holds a valid value */ + efx_word_t mac_addr_count; + efx_word_t mac_addr_stride; + efx_word_t calibrated_vref; /* Vref as measured during production */ + efx_word_t adc_vref; /* Vref as read by ADC */ + efx_dword_t reserved2[1]; /* (write as zero) */ + efx_dword_t num_dbi_items; + struct { + efx_word_t addr; + efx_word_t byte_enables; + efx_dword_t value; + } dbi[]; +} siena_mc_static_config_hdr_t; + +/* This prefixes a valid XIP partition */ +#define XIP_PARTITION_MAGIC (0x51DEC0DE) + +#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD) +#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0) + +typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t fw_subtype; + efx_word_t version_w; + efx_word_t version_x; + efx_word_t version_y; + efx_word_t version_z; +} siena_mc_fw_version_t; + +typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */ + efx_word_t length; /* of header area (i.e. not including VPD) */ + efx_byte_t version; + efx_byte_t csum; /* over header area (i.e. not including VPD) */ + efx_dword_t dynamic_vpd_offset; + efx_dword_t dynamic_vpd_length; + efx_dword_t num_fw_version_items; + siena_mc_fw_version_t fw_version[]; +} siena_mc_dynamic_config_hdr_t; + +#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */ + +#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */ +#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */ + +typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */ + efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */ + union { + struct { + efx_dword_t len1; /* length of first image */ + efx_dword_t len2; /* length of second image */ + efx_dword_t off1; /* offset of first byte to edit to combine images */ + efx_dword_t off2; /* offset of second byte to edit to combine images */ + efx_word_t infoblk0_off;/* infoblk offset */ + efx_word_t infoblk1_off;/* infoblk offset */ + efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */ + efx_byte_t reserved[7];/* (set to 0) */ + } v1; + struct { + efx_dword_t len1; /* length of first image */ + efx_dword_t len2; /* length of second image */ + efx_dword_t off1; /* offset of first byte to edit to combine images */ + efx_dword_t off2; /* offset of second byte to edit to combine images */ + efx_word_t infoblk_off;/* infoblk start offset */ + efx_word_t infoblk_count;/* infoblk count */ + efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */ + efx_byte_t reserved[7];/* (set to 0) */ + } v2; + } data; +} siena_mc_combo_rom_hdr_t; + +#pragma pack() + +#endif /* _SYS_SIENA_FLASH_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h b/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h new file mode 100644 index 000000000..83f60d164 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_impl.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#ifndef _SYS_SIENA_IMPL_H +#define _SYS_SIENA_IMPL_H + +#include "efx.h" +#include "efx_regs.h" +#include "siena_flash.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef EFX_TXQ_DC_SIZE +#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */ +#endif +#ifndef EFX_RXQ_DC_SIZE +#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */ +#endif +#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << (_dcsize)) +#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << (_dcsize)) + +#define SIENA_EVQ_MAXNEVS 32768 +#define SIENA_EVQ_MINNEVS 512 + +#define SIENA_TXQ_MAXNDESCS 4096 +#define SIENA_TXQ_MINNDESCS 512 + +#define SIENA_RXQ_MAXNDESCS 4096 +#define SIENA_RXQ_MINNDESCS 512 + +#define SIENA_EVQ_DESC_SIZE (sizeof (efx_qword_t)) +#define SIENA_RXQ_DESC_SIZE (sizeof (efx_qword_t)) +#define SIENA_TXQ_DESC_SIZE (sizeof (efx_qword_t)) + +#define SIENA_NVRAM_CHUNK 0x80 + + +extern __checkReturn efx_rc_t +siena_nic_probe( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_nic_reset( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_nic_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_DIAG + +extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[]; + +typedef struct siena_register_set_s { + unsigned int address; + unsigned int step; + unsigned int rows; + efx_oword_t mask; +} siena_register_set_t; + +extern __checkReturn efx_rc_t +siena_nic_register_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern void +siena_nic_fini( + __in efx_nic_t *enp); + +extern void +siena_nic_unprobe( + __in efx_nic_t *enp); + +#define SIENA_SRAM_ROWS 0x12000 + +extern void +siena_sram_init( + __in efx_nic_t *enp); + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +siena_sram_test( + __in efx_nic_t *enp, + __in efx_sram_pattern_fn_t func); + +#endif /* EFSYS_OPT_DIAG */ + +#if EFSYS_OPT_MCDI + +extern __checkReturn efx_rc_t +siena_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *mtp); + +extern void +siena_mcdi_send_request( + __in efx_nic_t *enp, + __in_bcount(hdr_len) void *hdrp, + __in size_t hdr_len, + __in_bcount(sdu_len) void *sdup, + __in size_t sdu_len); + +extern __checkReturn boolean_t +siena_mcdi_poll_response( + __in efx_nic_t *enp); + +extern void +siena_mcdi_read_response( + __in efx_nic_t *enp, + __out_bcount(length) void *bufferp, + __in size_t offset, + __in size_t length); + +extern efx_rc_t +siena_mcdi_poll_reboot( + __in efx_nic_t *enp); + +extern void +siena_mcdi_fini( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_mcdi_feature_supported( + __in efx_nic_t *enp, + __in efx_mcdi_feature_id_t id, + __out boolean_t *supportedp); + +extern void +siena_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *timeoutp); + +#endif /* EFSYS_OPT_MCDI */ + +#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD + +extern __checkReturn efx_rc_t +siena_nvram_partn_lock( + __in efx_nic_t *enp, + __in uint32_t partn); + +extern __checkReturn efx_rc_t +siena_nvram_partn_unlock( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp); + +extern __checkReturn efx_rc_t +siena_nvram_get_dynamic_cfg( + __in efx_nic_t *enp, + __in uint32_t partn, + __in boolean_t vpd, + __out siena_mc_dynamic_config_hdr_t **dcfgp, + __out size_t *sizep); + +#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_NVRAM + +#if EFSYS_OPT_DIAG + +extern __checkReturn efx_rc_t +siena_nvram_test( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_DIAG */ + +extern __checkReturn efx_rc_t +siena_nvram_get_subtype( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep); + +extern __checkReturn efx_rc_t +siena_nvram_type_to_partn( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *partnp); + +extern __checkReturn efx_rc_t +siena_nvram_partn_size( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +siena_nvram_partn_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t * enip); + +extern __checkReturn efx_rc_t +siena_nvram_partn_rw_start( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *chunk_sizep); + +extern __checkReturn efx_rc_t +siena_nvram_partn_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_nvram_partn_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_nvram_partn_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_nvram_partn_rw_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp); + +extern __checkReturn efx_rc_t +siena_nvram_partn_get_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]); + +extern __checkReturn efx_rc_t +siena_nvram_partn_set_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __in_ecount(4) uint16_t version[4]); + +#endif /* EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_VPD + +extern __checkReturn efx_rc_t +siena_vpd_init( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep); + +extern __checkReturn efx_rc_t +siena_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern __checkReturn efx_rc_t +siena_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +siena_vpd_set( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp); + +extern __checkReturn efx_rc_t +siena_vpd_next( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp); + +extern __checkReturn efx_rc_t +siena_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size); + +extern void +siena_vpd_fini( + __in efx_nic_t *enp); + +#endif /* EFSYS_OPT_VPD */ + +typedef struct siena_link_state_s { + uint32_t sls_adv_cap_mask; + uint32_t sls_lp_cap_mask; + unsigned int sls_fcntl; + efx_link_mode_t sls_link_mode; +#if EFSYS_OPT_LOOPBACK + efx_loopback_type_t sls_loopback; +#endif + boolean_t sls_mac_up; +} siena_link_state_t; + +extern void +siena_phy_link_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_link_mode_t *link_modep); + +extern __checkReturn efx_rc_t +siena_phy_get_link( + __in efx_nic_t *enp, + __out siena_link_state_t *slsp); + +extern __checkReturn efx_rc_t +siena_phy_power( + __in efx_nic_t *enp, + __in boolean_t on); + +extern __checkReturn efx_rc_t +siena_phy_reconfigure( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_phy_verify( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip); + +#if EFSYS_OPT_PHY_STATS + +extern void +siena_phy_decode_stats( + __in efx_nic_t *enp, + __in uint32_t vmask, + __in_opt efsys_mem_t *esmp, + __out_opt uint64_t *smaskp, + __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat); + +extern __checkReturn efx_rc_t +siena_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); + +#endif /* EFSYS_OPT_PHY_STATS */ + +#if EFSYS_OPT_BIST + +extern __checkReturn efx_rc_t +siena_phy_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +extern __checkReturn efx_rc_t +siena_phy_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt __drv_when(count > 0, __notnull) + uint32_t *value_maskp, + __out_ecount_opt(count) __drv_when(count > 0, __notnull) + unsigned long *valuesp, + __in size_t count); + +extern void +siena_phy_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type); + +#endif /* EFSYS_OPT_BIST */ + +extern __checkReturn efx_rc_t +siena_mac_poll( + __in efx_nic_t *enp, + __out efx_link_mode_t *link_modep); + +extern __checkReturn efx_rc_t +siena_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp); + +extern __checkReturn efx_rc_t +siena_mac_reconfigure( + __in efx_nic_t *enp); + +extern __checkReturn efx_rc_t +siena_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu); + +#if EFSYS_OPT_LOOPBACK + +extern __checkReturn efx_rc_t +siena_mac_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t loopback_type); + +#endif /* EFSYS_OPT_LOOPBACK */ + +#if EFSYS_OPT_MAC_STATS + +extern __checkReturn efx_rc_t +siena_mac_stats_get_mask( + __in efx_nic_t *enp, + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size); + +extern __checkReturn efx_rc_t +siena_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, + __inout_opt uint32_t *generationp); + +#endif /* EFSYS_OPT_MAC_STATS */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SIENA_IMPL_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c new file mode 100644 index 000000000..c4898e5d9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_mac.c @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA + + __checkReturn efx_rc_t +siena_mac_poll( + __in efx_nic_t *enp, + __out efx_link_mode_t *link_modep) +{ + efx_port_t *epp = &(enp->en_port); + siena_link_state_t sls; + efx_rc_t rc; + + if ((rc = siena_phy_get_link(enp, &sls)) != 0) + goto fail1; + + epp->ep_adv_cap_mask = sls.sls_adv_cap_mask; + epp->ep_fcntl = sls.sls_fcntl; + + *link_modep = sls.sls_link_mode; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + *link_modep = EFX_LINK_UNKNOWN; + + return (rc); +} + + __checkReturn efx_rc_t +siena_mac_up( + __in efx_nic_t *enp, + __out boolean_t *mac_upp) +{ + siena_link_state_t sls; + efx_rc_t rc; + + /* + * Because Siena doesn't *require* polling, we can't rely on + * siena_mac_poll() being executed to populate epp->ep_mac_up. + */ + if ((rc = siena_phy_get_link(enp, &sls)) != 0) + goto fail1; + + *mac_upp = sls.sls_mac_up; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_mac_reconfigure( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_oword_t multicast_hash[2]; + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MCAST_HASH_IN_LEN), + MAX(MC_CMD_SET_MAC_OUT_LEN, MC_CMD_SET_MCAST_HASH_OUT_LEN)); + + unsigned int fcntl; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_MAC; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MAC_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN; + + MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu); + MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0); + EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR), + epp->ep_mac_addr); + MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst, + SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst); + + if (epp->ep_fcntl_autoneg) + /* efx_fcntl_set() has already set the phy capabilities */ + fcntl = MC_CMD_FCNTL_AUTO; + else if (epp->ep_fcntl & EFX_FCNTL_RESPOND) + fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE) + ? MC_CMD_FCNTL_BIDIR + : MC_CMD_FCNTL_RESPOND; + else + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + epp->ep_all_unicst_inserted = epp->ep_all_unicst; + + /* Push multicast hash */ + + if (epp->ep_all_mulcst) { + /* A hash matching all multicast is all 1s */ + EFX_SET_OWORD(multicast_hash[0]); + EFX_SET_OWORD(multicast_hash[1]); + } else if (epp->ep_mulcst) { + /* Use the hash set by the multicast list */ + multicast_hash[0] = epp->ep_multicst_hash[0]; + multicast_hash[1] = epp->ep_multicst_hash[1]; + } else { + /* A hash matching no traffic is simply 0 */ + EFX_ZERO_OWORD(multicast_hash[0]); + EFX_ZERO_OWORD(multicast_hash[1]); + } + + /* + * Broadcast packets go through the multicast hash filter. + * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask (bit 0x7f in the + * second octword). + */ + if (epp->ep_brdcst) { + /* + * NOTE: due to constant folding, some of this evaluates + * to null expressions, giving E_EXPR_NULL_EFFECT during + * lint on Illumos. No good way to fix this without + * explicit coding the individual word/bit setting. + * So just suppress lint for this one line. + */ + /* LINTED */ + EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f); + } + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SET_MCAST_HASH; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN; + + memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0), + multicast_hash, sizeof (multicast_hash)); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + epp->ep_all_mulcst_inserted = epp->ep_all_mulcst; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#if EFSYS_OPT_LOOPBACK + + __checkReturn efx_rc_t +siena_mac_loopback_set( + __in efx_nic_t *enp, + __in efx_link_mode_t link_mode, + __in efx_loopback_type_t loopback_type) +{ + efx_port_t *epp = &(enp->en_port); + const efx_phy_ops_t *epop = epp->ep_epop; + efx_loopback_type_t old_loopback_type; + efx_link_mode_t old_loopback_link_mode; + efx_rc_t rc; + + /* The PHY object handles this on Siena */ + old_loopback_type = epp->ep_loopback_type; + old_loopback_link_mode = epp->ep_loopback_link_mode; + epp->ep_loopback_type = loopback_type; + epp->ep_loopback_link_mode = link_mode; + + if ((rc = epop->epo_reconfigure(enp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + epp->ep_loopback_type = old_loopback_type; + epp->ep_loopback_link_mode = old_loopback_link_mode; + + return (rc); +} + +#endif /* EFSYS_OPT_LOOPBACK */ + +#if EFSYS_OPT_MAC_STATS + + __checkReturn efx_rc_t +siena_mac_stats_get_mask( + __in efx_nic_t *enp, + __inout_bcount(mask_size) uint32_t *maskp, + __in size_t mask_size) +{ + const struct efx_mac_stats_range siena_stats[] = { + { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS }, + /* EFX_MAC_RX_ERRORS is not supported */ + { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_TX_EX_DEF_PKTS }, + }; + efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \ + EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp) + + __checkReturn efx_rc_t +siena_mac_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, + __inout_opt uint32_t *generationp) +{ + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_qword_t generation_start; + efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } + + /* Read END first so we don't race with the MC */ + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); + EFSYS_MEM_READ_BARRIER(); + + /* TX */ + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value); + EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value); + + /* RX */ + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value); + EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]), + &(value.eq_dword[1])); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]), + &(value.eq_dword[1])); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]), + &(value.eq_dword[1])); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]), + &(value.eq_dword[0])); + EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]), + &(value.eq_dword[1])); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value); + + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); + + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + EFSYS_MEM_READ_BARRIER(); + SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, + &generation_start); + + /* Check that we didn't read the stats in the middle of a DMA */ + /* Not a good enough check ? */ + if (memcmp(&generation_start, &generation_end, + sizeof (generation_start))) + return (EAGAIN); + + if (generationp) + *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MAC_STATS */ + + __checkReturn efx_rc_t +siena_mac_pdu_get( + __in efx_nic_t *enp, + __out size_t *pdu) +{ + return (ENOTSUP); +} + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c new file mode 100644 index 000000000..1517f701e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_mcdi.c @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2012-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI + +#define SIENA_MCDI_PDU(_emip) \ + (((emip)->emi_port == 1) \ + ? MC_SMEM_P0_PDU_OFST >> 2 \ + : MC_SMEM_P1_PDU_OFST >> 2) + +#define SIENA_MCDI_DOORBELL(_emip) \ + (((emip)->emi_port == 1) \ + ? MC_SMEM_P0_DOORBELL_OFST >> 2 \ + : MC_SMEM_P1_DOORBELL_OFST >> 2) + +#define SIENA_MCDI_STATUS(_emip) \ + (((emip)->emi_port == 1) \ + ? MC_SMEM_P0_STATUS_OFST >> 2 \ + : MC_SMEM_P1_STATUS_OFST >> 2) + + + void +siena_mcdi_send_request( + __in efx_nic_t *enp, + __in_bcount(hdr_len) void *hdrp, + __in size_t hdr_len, + __in_bcount(sdu_len) void *sdup, + __in size_t sdu_len) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_dword_t dword; + unsigned int pdur; + unsigned int dbr; + unsigned int pos; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); + pdur = SIENA_MCDI_PDU(emip); + dbr = SIENA_MCDI_DOORBELL(emip); + + /* Write the header */ + EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t)); + dword = *(efx_dword_t *)hdrp; + EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE); + + /* Write the payload */ + for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) { + dword = *(efx_dword_t *)((uint8_t *)sdup + pos); + EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, + pdur + 1 + (pos >> 2), &dword, B_FALSE); + } + + /* Ring the doorbell */ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11); + EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE); +} + + efx_rc_t +siena_mcdi_poll_reboot( + __in efx_nic_t *enp) +{ +#if 1 + /* + * XXX Bug 25922, bug 26099: This function is not being used + * properly. Until its callers are fixed, it should always + * return 0. + */ + _NOTE(ARGUNUSED(enp)) + return (0); +#else + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + unsigned int rebootr; + efx_dword_t dword; + uint32_t value; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); + rebootr = SIENA_MCDI_STATUS(emip); + + EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE); + value = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + + if (value == 0) + return (0); + + EFX_ZERO_DWORD(dword); + EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE); + + if (value == MC_STATUS_DWORD_ASSERT) + return (EINTR); + else + return (EIO); +#endif +} + +extern __checkReturn boolean_t +siena_mcdi_poll_response( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_dword_t hdr; + unsigned int pdur; + + EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); + pdur = SIENA_MCDI_PDU(emip); + + EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE); + return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE); +} + + void +siena_mcdi_read_response( + __in efx_nic_t *enp, + __out_bcount(length) void *bufferp, + __in size_t offset, + __in size_t length) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + unsigned int pdur; + unsigned int pos = 0; + efx_dword_t data; + size_t remaining = length; + + EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); + pdur = SIENA_MCDI_PDU(emip); + + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); + + EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, + pdur + ((offset + pos) >> 2), &data, B_FALSE); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; + } +} + + __checkReturn efx_rc_t +siena_mcdi_init( + __in efx_nic_t *enp, + __in const efx_mcdi_transport_t *mtp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_oword_t oword; + unsigned int portnum; + efx_rc_t rc; + + _NOTE(ARGUNUSED(mtp)) + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* Determine the port number to use for MCDI */ + EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword); + portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM); + + if (portnum == 0) { + /* Presumably booted from ROM; only MCDI port 1 will work */ + emip->emi_port = 1; + } else if (portnum <= 2) { + emip->emi_port = portnum; + } else { + rc = EINVAL; + goto fail1; + } + + /* Siena BootROM and firmware only support MCDIv1 */ + emip->emi_max_version = 1; + + /* + * Wipe the atomic reboot status so subsequent MCDI requests succeed. + * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the + * assertion handler. + */ + (void) siena_mcdi_poll_reboot(enp); + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +siena_mcdi_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + __checkReturn efx_rc_t +siena_mcdi_feature_supported( + __in efx_nic_t *enp, + __in efx_mcdi_feature_id_t id, + __out boolean_t *supportedp) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); + + switch (id) { + case EFX_MCDI_FEATURE_FW_UPDATE: + case EFX_MCDI_FEATURE_LINK_CONTROL: + case EFX_MCDI_FEATURE_MACADDR_CHANGE: + case EFX_MCDI_FEATURE_MAC_SPOOFING: + *supportedp = B_TRUE; + break; + default: + rc = ENOTSUP; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Default timeout for MCDI command processing. */ +#define SIENA_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000) + + void +siena_mcdi_get_timeout( + __in efx_nic_t *enp, + __in efx_mcdi_req_t *emrp, + __out uint32_t *timeoutp) +{ + _NOTE(ARGUNUSED(enp, emrp)) + + *timeoutp = SIENA_MCDI_CMD_TIMEOUT_US; +} + + +#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c new file mode 100644 index 000000000..bd5e8ceb8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_nic.c @@ -0,0 +1,807 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" +#include "mcdi_mon.h" + +#if EFSYS_OPT_SIENA + +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM + +static __checkReturn efx_rc_t +siena_nic_get_partn_mask( + __in efx_nic_t *enp, + __out unsigned int *maskp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TYPES_IN_LEN, + MC_CMD_NVRAM_TYPES_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_NVRAM_TYPES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ + +static __checkReturn efx_rc_t +siena_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint8_t mac_addr[6]; + efx_dword_t capabilities; + uint32_t board_type; + uint32_t nevq, nrxq, ntxq; + efx_rc_t rc; + + /* Siena has a fixed 8Kbyte VI window size */ + EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192); + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + + /* External port identifier using one-based port numbering */ + encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port; + + /* Board configuration */ + if ((rc = efx_mcdi_get_board_cfg(enp, &board_type, + &capabilities, mac_addr)) != 0) + goto fail1; + + EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); + + encp->enc_board_type = board_type; + + /* + * There is no possibility to determine the number of PFs on Siena + * by issuing MCDI request, and it is not an easy task to find the + * value based on the board type, so 'enc_hw_pf_count' is set to 1 + */ + encp->enc_hw_pf_count = 1; + + /* Additional capabilities */ + encp->enc_clk_mult = 1; + if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) { + enp->en_features |= EFX_FEATURE_TURBO; + + if (EFX_DWORD_FIELD(capabilities, + MC_CMD_CAPABILITIES_TURBO_ACTIVE)) { + encp->enc_clk_mult = 2; + } + } + + encp->enc_evq_timer_quantum_ns = + EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult; + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + + encp->enc_ev_desc_size = SIENA_EVQ_DESC_SIZE; + encp->enc_rx_desc_size = SIENA_RXQ_DESC_SIZE; + encp->enc_tx_desc_size = SIENA_TXQ_DESC_SIZE; + + /* When hash header insertion is enabled, Siena inserts 16 bytes */ + encp->enc_rx_prefix_size = 16; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + encp->enc_rx_buf_align_end = 1; + + /* Alignment for WPTR updates */ + encp->enc_rx_push_align = 1; + +#if EFSYS_OPT_RX_SCALE + /* There is one RSS context per function */ + encp->enc_rx_scale_max_exclusive_contexts = 1; + + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR); + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is always possible to use port numbers + * as the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + + /* There is no support for additional RSS modes */ + encp->enc_rx_scale_additional_modes_supported = B_FALSE; +#endif /* EFSYS_OPT_RX_SCALE */ + + encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT); + /* Fragments must not span 4k boundaries. */ + encp->enc_tx_dma_desc_boundary = 4096; + + /* Resource limits */ + rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq); + if (rc != 0) { + if (rc != ENOTSUP) + goto fail2; + + nevq = 1024; + nrxq = EFX_RXQ_LIMIT_TARGET; + ntxq = EFX_TXQ_LIMIT_TARGET; + } + encp->enc_evq_limit = nevq; + encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq); + encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq); + + encp->enc_evq_max_nevs = SIENA_EVQ_MAXNEVS; + encp->enc_evq_min_nevs = SIENA_EVQ_MINNEVS; + + encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS; + encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS; + + encp->enc_txq_max_ndescs = SIENA_TXQ_MAXNDESCS; + encp->enc_txq_min_ndescs = SIENA_TXQ_MINNDESCS; + + encp->enc_buftbl_limit = SIENA_SRAM_ROWS - + (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) - + (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE)); + + encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; + encp->enc_fw_assisted_tso_enabled = B_FALSE; + encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; + encp->enc_fw_assisted_tso_v2_n_contexts = 0; + encp->enc_allow_set_mac_with_installed_filters = B_TRUE; + encp->enc_rx_packed_stream_supported = B_FALSE; + encp->enc_rx_var_packed_stream_supported = B_FALSE; + encp->enc_rx_es_super_buffer_supported = B_FALSE; + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; + + /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */ + encp->enc_required_pcie_bandwidth_mbps = 2 * 10000; + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2; + + encp->enc_nvram_update_verify_result_supported = B_FALSE; + + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + + encp->enc_filter_action_flag_supported = B_FALSE; + encp->enc_filter_action_mark_supported = B_FALSE; + encp->enc_filter_action_mark_max = 0; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +siena_phy_cfg( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_PHY_STATS + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); +#endif /* EFSYS_OPT_PHY_STATS */ + efx_rc_t rc; + + /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ + if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) + goto fail1; + +#if EFSYS_OPT_PHY_STATS + /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */ + siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask, + NULL, &encp->enc_phy_stat_mask, NULL); +#endif /* EFSYS_OPT_PHY_STATS */ + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#define SIENA_BIU_MAGIC0 0x01234567 +#define SIENA_BIU_MAGIC1 0xfedcba98 + +static __checkReturn efx_rc_t +siena_nic_biu_test( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + efx_rc_t rc; + + /* + * Write magic values to scratch registers 0 and 1, then + * verify that the values were written correctly. Interleave + * the accesses to ensure that the BIU is not just reading + * back the cached value that was last written. + */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) { + rc = EIO; + goto fail1; + } + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) { + rc = EIO; + goto fail2; + } + + /* + * Perform the same test, with the values swapped. This + * ensures that subsequent tests don't start with the correct + * values already written into the scratch registers. + */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) { + rc = EIO; + goto fail3; + } + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) { + rc = EIO; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nic_probe( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + siena_link_state_t sls; + unsigned int mask; + efx_oword_t oword; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); + + /* Test BIU */ + if ((rc = siena_nic_biu_test(enp)) != 0) + goto fail1; + + /* Clear the region register */ + EFX_POPULATE_OWORD_4(oword, + FRF_AZ_ADR_REGION0, 0, + FRF_AZ_ADR_REGION1, (1 << 16), + FRF_AZ_ADR_REGION2, (2 << 16), + FRF_AZ_ADR_REGION3, (3 << 16)); + EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword); + + /* Read clear any assertion state */ + if ((rc = efx_mcdi_read_assertion(enp)) != 0) + goto fail2; + + /* Exit the assertion handler */ + if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) + goto fail3; + + /* Wrestle control from the BMC */ + if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) + goto fail4; + + if ((rc = siena_board_cfg(enp)) != 0) + goto fail5; + + if ((rc = siena_phy_cfg(enp)) != 0) + goto fail6; + + /* Obtain the default PHY advertised capabilities */ + if ((rc = siena_nic_reset(enp)) != 0) + goto fail7; + if ((rc = siena_phy_get_link(enp, &sls)) != 0) + goto fail8; + epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask; + epp->ep_adv_cap_mask = sls.sls_adv_cap_mask; + +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM + if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0) + goto fail9; + enp->en_u.siena.enu_partn_mask = mask; +#endif + +#if EFSYS_OPT_MAC_STATS + /* Wipe the MAC statistics */ + if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) + goto fail10; +#endif + +#if EFSYS_OPT_LOOPBACK + if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) + goto fail11; +#endif + +#if EFSYS_OPT_MON_STATS + if ((rc = mcdi_mon_cfg_build(enp)) != 0) + goto fail12; +#endif + + encp->enc_features = enp->en_features; + + return (0); + +#if EFSYS_OPT_MON_STATS +fail12: + EFSYS_PROBE(fail12); +#endif +#if EFSYS_OPT_LOOPBACK +fail11: + EFSYS_PROBE(fail11); +#endif +#if EFSYS_OPT_MAC_STATS +fail10: + EFSYS_PROBE(fail10); +#endif +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM +fail9: + EFSYS_PROBE(fail9); +#endif +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nic_reset( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); + + /* siena_nic_reset() is called to recover from BADASSERT failures. */ + if ((rc = efx_mcdi_read_assertion(enp)) != 0) + goto fail1; + if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) + goto fail2; + + /* + * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied + * for backwards compatibility with PORT_RESET_IN_LEN. + */ + EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0); + + req.emr_cmd = MC_CMD_ENTITY_RESET; + req.emr_in_buf = NULL; + req.emr_in_length = 0; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (0); +} + +static void +siena_nic_rx_cfg( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + /* + * RX_INGR_EN is always enabled on Siena, because we rely on + * the RX parser to be resiliant to missing SOP/EOP. + */ + EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1); + EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); + + /* Disable parsing of additional 802.1Q in Q packets */ + EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); + EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0); + EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); +} + +static void +siena_nic_usrev_dis( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + + EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1); + EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword); +} + + __checkReturn efx_rc_t +siena_nic_init( + __in efx_nic_t *enp) +{ + efx_rc_t rc; + + EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); + + /* Enable reporting of some events (e.g. link change) */ + if ((rc = efx_mcdi_log_ctrl(enp)) != 0) + goto fail1; + + siena_sram_init(enp); + + /* Configure Siena's RX block */ + siena_nic_rx_cfg(enp); + + /* Disable USR_EVents for now */ + siena_nic_usrev_dis(enp); + + /* bug17057: Ensure set_link is called */ + if ((rc = siena_phy_reconfigure(enp)) != 0) + goto fail2; + + enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +siena_nic_fini( + __in efx_nic_t *enp) +{ + _NOTE(ARGUNUSED(enp)) +} + + void +siena_nic_unprobe( + __in efx_nic_t *enp) +{ +#if EFSYS_OPT_MON_STATS + mcdi_mon_cfg_free(enp); +#endif /* EFSYS_OPT_MON_STATS */ + (void) efx_mcdi_drv_attach(enp, B_FALSE); +} + +#if EFSYS_OPT_DIAG + +static siena_register_set_t __siena_registers[] = { + { FR_AZ_ADR_REGION_REG_OFST, 0, 1 }, + { FR_CZ_USR_EV_CFG_OFST, 0, 1 }, + { FR_AZ_RX_CFG_REG_OFST, 0, 1 }, + { FR_AZ_TX_CFG_REG_OFST, 0, 1 }, + { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 }, + { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 }, + { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 }, + { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 }, + { FR_AZ_DP_CTRL_REG_OFST, 0, 1 }, + { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1}, + { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1}, + { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1}, + { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1} +}; + +static const uint32_t __siena_register_masks[] = { + 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, + 0x000103FF, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000, + 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF, + 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF, + 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000, + 0x00000003, 0x00000000, 0x00000000, 0x00000000, + 0x000003FF, 0x00000000, 0x00000000, 0x00000000, + 0x00000FFF, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000 +}; + +static siena_register_set_t __siena_tables[] = { + { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP, + FR_AZ_RX_FILTER_TBL0_ROWS }, + { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP, + FR_CZ_RX_MAC_FILTER_TBL0_ROWS }, + { FR_AZ_RX_DESC_PTR_TBL_OFST, + FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS }, + { FR_AZ_TX_DESC_PTR_TBL_OFST, + FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS }, + { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS }, + { FR_CZ_TX_FILTER_TBL0_OFST, + FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS }, + { FR_CZ_TX_MAC_FILTER_TBL0_OFST, + FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS } +}; + +static const uint32_t __siena_table_masks[] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF, + 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000, + 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000, + 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000, + 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF, + 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000, +}; + + __checkReturn efx_rc_t +siena_nic_test_registers( + __in efx_nic_t *enp, + __in siena_register_set_t *rsp, + __in size_t count) +{ + unsigned int bit; + efx_oword_t original; + efx_oword_t reg; + efx_oword_t buf; + efx_rc_t rc; + + while (count > 0) { + /* This function is only suitable for registers */ + EFSYS_ASSERT(rsp->rows == 1); + + /* bit sweep on and off */ + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original, + B_TRUE); + for (bit = 0; bit < 128; bit++) { + /* Is this bit in the mask? */ + if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit)) + continue; + + /* Test this bit can be set in isolation */ + reg = original; + EFX_AND_OWORD(reg, rsp->mask); + EFX_SET_OWORD_BIT(reg, bit); + + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, + B_TRUE); + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, + B_TRUE); + + EFX_AND_OWORD(buf, rsp->mask); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail1; + } + + /* Test this bit can be cleared in isolation */ + EFX_OR_OWORD(reg, rsp->mask); + EFX_CLEAR_OWORD_BIT(reg, bit); + + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, + B_TRUE); + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, + B_TRUE); + + EFX_AND_OWORD(buf, rsp->mask); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail2; + } + } + + /* Restore the old value */ + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, + B_TRUE); + + --count; + ++rsp; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Restore the old value */ + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nic_test_tables( + __in efx_nic_t *enp, + __in siena_register_set_t *rsp, + __in efx_pattern_type_t pattern, + __in size_t count) +{ + efx_sram_pattern_fn_t func; + unsigned int index; + unsigned int address; + efx_oword_t reg; + efx_oword_t buf; + efx_rc_t rc; + + EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES); + func = __efx_sram_pattern_fns[pattern]; + + while (count > 0) { + /* Write */ + address = rsp->address; + for (index = 0; index < rsp->rows; ++index) { + func(2 * index + 0, B_FALSE, ®.eo_qword[0]); + func(2 * index + 1, B_FALSE, ®.eo_qword[1]); + EFX_AND_OWORD(reg, rsp->mask); + EFSYS_BAR_WRITEO(enp->en_esbp, address, ®, B_TRUE); + + address += rsp->step; + } + + /* Read */ + address = rsp->address; + for (index = 0; index < rsp->rows; ++index) { + func(2 * index + 0, B_FALSE, ®.eo_qword[0]); + func(2 * index + 1, B_FALSE, ®.eo_qword[1]); + EFX_AND_OWORD(reg, rsp->mask); + EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail1; + } + + address += rsp->step; + } + + ++rsp; + --count; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +siena_nic_register_test( + __in efx_nic_t *enp) +{ + siena_register_set_t *rsp; + const uint32_t *dwordp; + unsigned int nitems; + unsigned int count; + efx_rc_t rc; + + /* Fill out the register mask entries */ + EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks) + == EFX_ARRAY_SIZE(__siena_registers) * 4); + + nitems = EFX_ARRAY_SIZE(__siena_registers); + dwordp = __siena_register_masks; + for (count = 0; count < nitems; ++count) { + rsp = __siena_registers + count; + rsp->mask.eo_u32[0] = *dwordp++; + rsp->mask.eo_u32[1] = *dwordp++; + rsp->mask.eo_u32[2] = *dwordp++; + rsp->mask.eo_u32[3] = *dwordp++; + } + + /* Fill out the register table entries */ + EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks) + == EFX_ARRAY_SIZE(__siena_tables) * 4); + + nitems = EFX_ARRAY_SIZE(__siena_tables); + dwordp = __siena_table_masks; + for (count = 0; count < nitems; ++count) { + rsp = __siena_tables + count; + rsp->mask.eo_u32[0] = *dwordp++; + rsp->mask.eo_u32[1] = *dwordp++; + rsp->mask.eo_u32[2] = *dwordp++; + rsp->mask.eo_u32[3] = *dwordp++; + } + + if ((rc = siena_nic_test_registers(enp, __siena_registers, + EFX_ARRAY_SIZE(__siena_registers))) != 0) + goto fail1; + + if ((rc = siena_nic_test_tables(enp, __siena_tables, + EFX_PATTERN_BYTE_ALTERNATE, + EFX_ARRAY_SIZE(__siena_tables))) != 0) + goto fail2; + + if ((rc = siena_nic_test_tables(enp, __siena_tables, + EFX_PATTERN_BYTE_CHANGING, + EFX_ARRAY_SIZE(__siena_tables))) != 0) + goto fail3; + + if ((rc = siena_nic_test_tables(enp, __siena_tables, + EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0) + goto fail4; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c new file mode 100644 index 000000000..e3178da40 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_nvram.c @@ -0,0 +1,745 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA + +#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM + + __checkReturn efx_rc_t +siena_nvram_partn_size( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *sizep) +{ + efx_rc_t rc; + efx_nvram_info_t eni = { 0 }; + + if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = efx_mcdi_nvram_info(enp, partn, &eni)) != 0) + goto fail2; + + *sizep = eni.eni_partn_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_info( + __in efx_nic_t *enp, + __in uint32_t partn, + __out efx_nvram_info_t * enip) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_info(enp, partn, enip)) != 0) + goto fail1; + + if (enip->eni_write_size == 0) + enip->eni_write_size = SIENA_NVRAM_CHUNK; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t +siena_nvram_partn_lock( + __in efx_nic_t *enp, + __in uint32_t partn) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) { + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_read( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + size_t chunk; + efx_rc_t rc; + + while (size > 0) { + chunk = MIN(size, SIENA_NVRAM_CHUNK); + + if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk, + MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) { + goto fail1; + } + + size -= chunk; + data += chunk; + offset += chunk; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_erase( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __in size_t size) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) { + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_write( + __in efx_nic_t *enp, + __in uint32_t partn, + __in unsigned int offset, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + size_t chunk; + efx_rc_t rc; + + while (size > 0) { + chunk = MIN(size, SIENA_NVRAM_CHUNK); + + if ((rc = efx_mcdi_nvram_write(enp, partn, offset, + data, chunk)) != 0) { + goto fail1; + } + + size -= chunk; + data += chunk; + offset += chunk; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_unlock( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp) +{ + boolean_t reboot; + uint32_t flags = 0; + efx_rc_t rc; + + /* + * Reboot into the new image only for PHYs. The driver has to + * explicitly cope with an MC reboot after a firmware update. + */ + reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 || + partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 || + partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO); + + rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, flags, + verify_resultp); + if (rc != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ + +#if EFSYS_OPT_NVRAM + +typedef struct siena_parttbl_entry_s { + unsigned int partn; + unsigned int port; + efx_nvram_type_t nvtype; +} siena_parttbl_entry_t; + +static siena_parttbl_entry_t siena_parttbl[] = { + {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY}, + {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY}, + {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE}, + {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE}, + {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN}, + {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN}, + {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM}, + {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM}, + {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG}, + {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG}, + {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY}, + {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY}, + {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA}, + {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA}, + {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP}, + {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP}, + {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW}, + {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW}, + {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD}, + {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD}, + {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE}, + {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE} +}; + + __checkReturn efx_rc_t +siena_nvram_type_to_partn( + __in efx_nic_t *enp, + __in efx_nvram_type_t type, + __out uint32_t *partnp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + unsigned int i; + + EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); + EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); + EFSYS_ASSERT(partnp != NULL); + + for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { + siena_parttbl_entry_t *entry = &siena_parttbl[i]; + + if (entry->port == emip->emi_port && entry->nvtype == type) { + *partnp = entry->partn; + return (0); + } + } + + return (ENOTSUP); +} + + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +siena_nvram_test( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + siena_parttbl_entry_t *entry; + unsigned int i; + efx_rc_t rc; + + /* + * Iterate over the list of supported partition types + * applicable to *this* port + */ + for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { + entry = &siena_parttbl[i]; + + if (entry->port != emip->emi_port || + !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn))) + continue; + + if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) { + goto fail1; + } + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + + +#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \ + (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \ + sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0]))) + + __checkReturn efx_rc_t +siena_nvram_get_dynamic_cfg( + __in efx_nic_t *enp, + __in uint32_t partn, + __in boolean_t vpd, + __out siena_mc_dynamic_config_hdr_t **dcfgp, + __out size_t *sizep) +{ + siena_mc_dynamic_config_hdr_t *dcfg = NULL; + size_t size; + uint8_t cksum; + unsigned int vpd_offset; + unsigned int vpd_length; + unsigned int hdr_length; + unsigned int nversions; + unsigned int pos; + unsigned int region; + efx_rc_t rc; + + EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 || + partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1); + + /* + * Allocate sufficient memory for the entire dynamiccfg area, even + * if we're not actually going to read in the VPD. + */ + if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) + goto fail1; + + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + + EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg); + if (dcfg == NULL) { + rc = ENOMEM; + goto fail3; + } + + if ((rc = siena_nvram_partn_read(enp, partn, 0, + (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0) + goto fail4; + + /* Verify the magic */ + if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0) + != SIENA_MC_DYNAMIC_CONFIG_MAGIC) + goto invalid1; + + /* All future versions of the structure must be backwards compatible */ + EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0); + + hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); + nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0); + vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); + vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); + + /* Verify the hdr doesn't overflow the partn size */ + if (hdr_length > size || vpd_offset > size || vpd_length > size || + vpd_length + vpd_offset > size) + goto invalid2; + + /* Verify the header has room for all it's versions */ + if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) || + hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions)) + goto invalid3; + + /* + * Read the remaining portion of the dcfg, either including + * the whole of VPD (there is no vpd length in this structure, + * so we have to parse each tag), or just the dcfg header itself + */ + region = vpd ? vpd_offset + vpd_length : hdr_length; + if (region > SIENA_NVRAM_CHUNK) { + if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, + (caddr_t)dcfg + SIENA_NVRAM_CHUNK, + region - SIENA_NVRAM_CHUNK)) != 0) + goto fail5; + } + + /* Verify checksum */ + cksum = 0; + for (pos = 0; pos < hdr_length; pos++) + cksum += ((uint8_t *)dcfg)[pos]; + if (cksum != 0) + goto invalid4; + + goto done; + +invalid4: + EFSYS_PROBE(invalid4); +invalid3: + EFSYS_PROBE(invalid3); +invalid2: + EFSYS_PROBE(invalid2); +invalid1: + EFSYS_PROBE(invalid1); + + /* + * Construct a new "null" dcfg, with an empty version vector, + * and an empty VPD chunk trailing. This has the neat side effect + * of testing the exception paths in the write path. + */ + EFX_POPULATE_DWORD_1(dcfg->magic, + EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC); + EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg)); + EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0, + SIENA_MC_DYNAMIC_CONFIG_VERSION); + EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, + EFX_DWORD_0, sizeof (*dcfg)); + EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0); + EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0); + +done: + *dcfgp = dcfg; + *sizep = size; + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); + + EFSYS_KMEM_FREE(enp->en_esip, size, dcfg); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_get_subtype( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, + MC_CMD_GET_BOARD_CFG_OUT_LENMAX); + efx_word_t *fw_list; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_BOARD_CFG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = EMSGSIZE; + goto fail2; + } + + if (req.emr_out_length_used < + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST + + (partn + 1) * sizeof (efx_word_t)) { + rc = ENOENT; + goto fail3; + } + + fw_list = MCDI_OUT2(req, efx_word_t, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0); + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_get_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __out uint32_t *subtypep, + __out_ecount(4) uint16_t version[4]) +{ + siena_mc_dynamic_config_hdr_t *dcfg; + siena_parttbl_entry_t *entry; + uint32_t dcfg_partn; + unsigned int i; + efx_rc_t rc; + + if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) { + rc = ENOTSUP; + goto fail1; + } + + if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0) + goto fail2; + + /* + * Some partitions are accessible from both ports (for instance BOOTROM) + * Find the highest version reported by all dcfg structures on ports + * that have access to this partition. + */ + version[0] = version[1] = version[2] = version[3] = 0; + for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) { + siena_mc_fw_version_t *verp; + unsigned int nitems; + uint16_t temp[4]; + size_t length; + + entry = &siena_parttbl[i]; + if (entry->partn != partn) + continue; + + dcfg_partn = (entry->port == 1) + ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; + /* + * Ingore missing partitions on port 2, assuming they're due + * to to running on a single port part. + */ + if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) { + if (entry->port == 2) + continue; + } + + if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, + B_FALSE, &dcfg, &length)) != 0) + goto fail3; + + nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, + EFX_DWORD_0); + if (nitems < entry->partn) + goto done; + + verp = &dcfg->fw_version[partn]; + temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0); + temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0); + temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0); + temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0); + if (memcmp(version, temp, sizeof (temp)) < 0) + memcpy(version, temp, sizeof (temp)); + +done: + EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_rw_start( + __in efx_nic_t *enp, + __in uint32_t partn, + __out size_t *chunk_sizep) +{ + efx_rc_t rc; + + if ((rc = siena_nvram_partn_lock(enp, partn)) != 0) + goto fail1; + + if (chunk_sizep != NULL) + *chunk_sizep = SIENA_NVRAM_CHUNK; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_rw_finish( + __in efx_nic_t *enp, + __in uint32_t partn, + __out_opt uint32_t *verify_resultp) +{ + efx_rc_t rc; + + if ((rc = siena_nvram_partn_unlock(enp, partn, verify_resultp)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nvram_partn_set_version( + __in efx_nic_t *enp, + __in uint32_t partn, + __in_ecount(4) uint16_t version[4]) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + siena_mc_dynamic_config_hdr_t *dcfg = NULL; + siena_mc_fw_version_t *fwverp; + uint32_t dcfg_partn; + size_t dcfg_size; + unsigned int hdr_length; + unsigned int vpd_length; + unsigned int vpd_offset; + unsigned int nitems; + unsigned int required_hdr_length; + unsigned int pos; + uint8_t cksum; + uint32_t subtype; + size_t length; + efx_rc_t rc; + + dcfg_partn = (emip->emi_port == 1) + ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; + + if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0) + goto fail1; + + if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0) + goto fail2; + + if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, + B_TRUE, &dcfg, &length)) != 0) + goto fail3; + + hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); + nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0); + vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); + vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); + + /* + * NOTE: This function will blatt any fields trailing the version + * vector, or the VPD chunk. + */ + required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1); + if (required_hdr_length + vpd_length > length) { + rc = ENOSPC; + goto fail4; + } + + if (vpd_offset < required_hdr_length) { + (void) memmove((caddr_t)dcfg + required_hdr_length, + (caddr_t)dcfg + vpd_offset, vpd_length); + vpd_offset = required_hdr_length; + EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, + EFX_DWORD_0, vpd_offset); + } + + if (hdr_length < required_hdr_length) { + (void) memset((caddr_t)dcfg + hdr_length, 0, + required_hdr_length - hdr_length); + hdr_length = required_hdr_length; + EFX_POPULATE_WORD_1(dcfg->length, + EFX_WORD_0, hdr_length); + } + + /* Get the subtype to insert into the fw_subtype array */ + if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0) + goto fail5; + + /* Fill out the new version */ + fwverp = &dcfg->fw_version[partn]; + EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype); + EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]); + EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]); + EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]); + EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]); + + /* Update the version count */ + if (nitems < partn + 1) { + nitems = partn + 1; + EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, + EFX_DWORD_0, nitems); + } + + /* Update the checksum */ + cksum = 0; + for (pos = 0; pos < hdr_length; pos++) + cksum += ((uint8_t *)dcfg)[pos]; + dcfg->csum.eb_u8[0] -= cksum; + + /* Erase and write the new partition */ + if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0) + goto fail6; + + /* Write out the new structure to nvram */ + if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, + (caddr_t)dcfg, vpd_offset + vpd_length)) != 0) + goto fail7; + + EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); + + siena_nvram_partn_unlock(enp, dcfg_partn, NULL); + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); + + EFSYS_KMEM_FREE(enp->en_esip, length, dcfg); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_NVRAM */ + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c new file mode 100644 index 000000000..f3bbb3f65 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_phy.c @@ -0,0 +1,776 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA + +static void +siena_phy_decode_cap( + __in uint32_t mcdi_cap, + __out uint32_t *maskp) +{ + uint32_t mask; + + mask = 0; + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + mask |= (1 << EFX_PHY_CAP_1000HDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_1000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_10000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + mask |= (1 << EFX_PHY_CAP_PAUSE); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + mask |= (1 << EFX_PHY_CAP_ASYM); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + mask |= (1 << EFX_PHY_CAP_AN); + + *maskp = mask; +} + +static void +siena_phy_decode_link_mode( + __in efx_nic_t *enp, + __in uint32_t link_flags, + __in unsigned int speed, + __in unsigned int fcntl, + __out efx_link_mode_t *link_modep, + __out unsigned int *fcntlp) +{ + boolean_t fd = !!(link_flags & + (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + boolean_t up = !!(link_flags & + (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + + _NOTE(ARGUNUSED(enp)) + + if (!up) + *link_modep = EFX_LINK_DOWN; + else if (speed == 10000 && fd) + *link_modep = EFX_LINK_10000FDX; + else if (speed == 1000) + *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; + else if (speed == 100) + *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; + else if (speed == 10) + *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; + else + *link_modep = EFX_LINK_UNKNOWN; + + if (fcntl == MC_CMD_FCNTL_OFF) + *fcntlp = 0; + else if (fcntl == MC_CMD_FCNTL_RESPOND) + *fcntlp = EFX_FCNTL_RESPOND; + else if (fcntl == MC_CMD_FCNTL_BIDIR) + *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; + else { + EFSYS_PROBE1(mc_pcol_error, int, fcntl); + *fcntlp = 0; + } +} + + void +siena_phy_link_ev( + __in efx_nic_t *enp, + __in efx_qword_t *eqp, + __out efx_link_mode_t *link_modep) +{ + efx_port_t *epp = &(enp->en_port); + unsigned int link_flags; + unsigned int speed; + unsigned int fcntl; + efx_link_mode_t link_mode; + uint32_t lp_cap_mask; + + /* + * Convert the LINKCHANGE speed enumeration into mbit/s, in the + * same way as GET_LINK encodes the speed + */ + switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { + case MCDI_EVENT_LINKCHANGE_SPEED_100M: + speed = 100; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_1G: + speed = 1000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_10G: + speed = 10000; + break; + default: + speed = 0; + break; + } + + link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); + siena_phy_decode_link_mode(enp, link_flags, speed, + MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), + &link_mode, &fcntl); + siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), + &lp_cap_mask); + + /* + * It's safe to update ep_lp_cap_mask without the driver's port lock + * because presumably any concurrently running efx_port_poll() is + * only going to arrive at the same value. + * + * ep_fcntl has two meanings. It's either the link common fcntl + * (if the PHY supports AN), or it's the forced link state. If + * the former, it's safe to update the value for the same reason as + * for ep_lp_cap_mask. If the latter, then just ignore the value, + * because we can race with efx_mac_fcntl_set(). + */ + epp->ep_lp_cap_mask = lp_cap_mask; + if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) + epp->ep_fcntl = fcntl; + + *link_modep = link_mode; +} + + __checkReturn efx_rc_t +siena_phy_power( + __in efx_nic_t *enp, + __in boolean_t power) +{ + efx_rc_t rc; + + if (!power) + return (0); + + /* Check if the PHY is a zombie */ + if ((rc = siena_phy_verify(enp)) != 0) + goto fail1; + + enp->en_reset_flags |= EFX_RESET_PHY; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_phy_get_link( + __in efx_nic_t *enp, + __out siena_link_state_t *slsp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN, + MC_CMD_GET_LINK_OUT_LEN); + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_LINK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), + &slsp->sls_adv_cap_mask); + siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), + &slsp->sls_lp_cap_mask); + + siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), + MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), + MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), + &slsp->sls_link_mode, &slsp->sls_fcntl); + +#if EFSYS_OPT_LOOPBACK + /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); + + slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); +#endif /* EFSYS_OPT_LOOPBACK */ + + slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_phy_reconfigure( + __in efx_nic_t *enp) +{ + efx_port_t *epp = &(enp->en_port); + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, + MAX(MC_CMD_SET_ID_LED_IN_LEN, MC_CMD_SET_LINK_IN_LEN), + MAX(MC_CMD_SET_ID_LED_OUT_LEN, MC_CMD_SET_LINK_OUT_LEN)); + uint32_t cap_mask; +#if EFSYS_OPT_PHY_LED_CONTROL + unsigned int led_mode; +#endif + unsigned int speed; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_SET_LINK; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; + + cap_mask = epp->ep_adv_cap_mask; + MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, + PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, + PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, + PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, + PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, + PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, + PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, + PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, + PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, + PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, + PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); + +#if EFSYS_OPT_LOOPBACK + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, + epp->ep_loopback_type); + switch (epp->ep_loopback_link_mode) { + case EFX_LINK_100FDX: + speed = 100; + break; + case EFX_LINK_1000FDX: + speed = 1000; + break; + case EFX_LINK_10000FDX: + speed = 10000; + break; + default: + speed = 0; + } +#else + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); + speed = 0; +#endif /* EFSYS_OPT_LOOPBACK */ + MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); + +#if EFSYS_OPT_PHY_FLAGS + MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); +#else + MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); +#endif /* EFSYS_OPT_PHY_FLAGS */ + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + /* And set the blink mode */ + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SET_ID_LED; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; + +#if EFSYS_OPT_PHY_LED_CONTROL + switch (epp->ep_phy_led_mode) { + case EFX_PHY_LED_DEFAULT: + led_mode = MC_CMD_LED_DEFAULT; + break; + case EFX_PHY_LED_OFF: + led_mode = MC_CMD_LED_OFF; + break; + case EFX_PHY_LED_ON: + led_mode = MC_CMD_LED_ON; + break; + default: + EFSYS_ASSERT(0); + led_mode = MC_CMD_LED_DEFAULT; + } + + MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); +#else + MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); +#endif /* EFSYS_OPT_PHY_LED_CONTROL */ + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_phy_verify( + __in efx_nic_t *enp) +{ + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN, + MC_CMD_GET_PHY_STATE_OUT_LEN); + uint32_t state; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_GET_PHY_STATE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); + if (state != MC_CMD_PHY_STATE_OK) { + if (state != MC_CMD_PHY_STATE_ZOMBIE) + EFSYS_PROBE1(mc_pcol_error, int, state); + rc = ENOTACTIVE; + goto fail3; + } + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_phy_oui_get( + __in efx_nic_t *enp, + __out uint32_t *ouip) +{ + _NOTE(ARGUNUSED(enp, ouip)) + + return (ENOTSUP); +} + +#if EFSYS_OPT_PHY_STATS + +#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ + _mc_record, _efx_record) \ + if ((_vmask) & (1ULL << (_mc_record))) { \ + (_smask) |= (1ULL << (_efx_record)); \ + if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \ + efx_dword_t dword; \ + EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\ + (_stat)[_efx_record] = \ + EFX_DWORD_FIELD(dword, EFX_DWORD_0); \ + } \ + } + +#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \ + SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ + MC_CMD_ ## _record, \ + EFX_PHY_STAT_ ## _record) + + void +siena_phy_decode_stats( + __in efx_nic_t *enp, + __in uint32_t vmask, + __in_opt efsys_mem_t *esmp, + __out_opt uint64_t *smaskp, + __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat) +{ + uint64_t smask = 0; + + _NOTE(ARGUNUSED(enp)) + + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT); + + if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) { + smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) | + (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) | + (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) | + (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D)); + if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { + efx_dword_t dword; + uint32_t sig; + EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL, + &dword); + sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1; + stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1; + stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1; + stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1; + } + } + + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A, + EFX_PHY_STAT_SNR_A); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B, + EFX_PHY_STAT_SNR_B); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C, + EFX_PHY_STAT_SNR_C); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D, + EFX_PHY_STAT_SNR_D); + + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS); + + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP, + EFX_PHY_STAT_PHY_XS_LINK_UP); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT, + EFX_PHY_STAT_PHY_XS_RX_FAULT); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT, + EFX_PHY_STAT_PHY_XS_TX_FAULT); + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN, + EFX_PHY_STAT_PHY_XS_ALIGN); + + if (vmask & (1 << MC_CMD_PHYXS_SYNC)) { + smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) | + (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) | + (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) | + (1 << EFX_PHY_STAT_PHY_XS_SYNC_D)); + if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) { + efx_dword_t dword; + uint32_t sync; + EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword); + sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0); + stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1; + stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1; + stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1; + stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1; + } + } + + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP); + SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE); + + SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP, + EFX_PHY_STAT_CL22EXT_LINK_UP); + + if (smaskp != NULL) + *smaskp = smask; +} + + __checkReturn efx_rc_t +siena_phy_stats_update( + __in efx_nic_t *enp, + __in efsys_mem_t *esmp, + __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t vmask = encp->enc_mcdi_phy_stat_mask; + uint64_t smask; + efx_mcdi_req_t req; + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PHY_STATS_IN_LEN, + MC_CMD_PHY_STATS_OUT_DMA_LEN); + efx_rc_t rc; + + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) { + rc = EINVAL; + goto fail1; + } + + req.emr_cmd = MC_CMD_PHY_STATS; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN; + + MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO, + EFSYS_MEM_ADDR(esmp) & 0xffffffff); + MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI, + EFSYS_MEM_ADDR(esmp) >> 32); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN); + + siena_phy_decode_stats(enp, vmask, esmp, &smask, stat); + EFSYS_ASSERT(smask == encp->enc_phy_stat_mask); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (0); +} + +#endif /* EFSYS_OPT_PHY_STATS */ + +#if EFSYS_OPT_BIST + + __checkReturn efx_rc_t +siena_phy_bist_start( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + efx_rc_t rc; + + if ((rc = efx_mcdi_bist_start(enp, type)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn unsigned long +siena_phy_sft9001_bist_status( + __in uint16_t code) +{ + switch (code) { + case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY: + return (EFX_PHY_CABLE_STATUS_BUSY); + case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT: + return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT); + case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT: + return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT); + case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN: + return (EFX_PHY_CABLE_STATUS_OPEN); + case MC_CMD_POLL_BIST_SFT9001_PAIR_OK: + return (EFX_PHY_CABLE_STATUS_OK); + default: + return (EFX_PHY_CABLE_STATUS_INVALID); + } +} + + __checkReturn efx_rc_t +siena_phy_bist_poll( + __in efx_nic_t *enp, + __in efx_bist_type_t type, + __out efx_bist_result_t *resultp, + __out_opt __drv_when(count > 0, __notnull) + uint32_t *value_maskp, + __out_ecount_opt(count) __drv_when(count > 0, __notnull) + unsigned long *valuesp, + __in size_t count) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN, + MCDI_CTL_SDU_LEN_MAX); + uint32_t value_mask = 0; + efx_mcdi_req_t req; + uint32_t result; + efx_rc_t rc; + + req.emr_cmd = MC_CMD_POLL_BIST; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MCDI_CTL_SDU_LEN_MAX; + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { + rc = EMSGSIZE; + goto fail2; + } + + if (count > 0) + (void) memset(valuesp, '\0', count * sizeof (unsigned long)); + + result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); + + /* Extract PHY specific results */ + if (result == MC_CMD_POLL_BIST_PASSED && + encp->enc_phy_type == EFX_PHY_SFT9001B && + req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN && + (type == EFX_BIST_TYPE_PHY_CABLE_SHORT || + type == EFX_BIST_TYPE_PHY_CABLE_LONG)) { + uint16_t word; + + if (count > EFX_BIST_PHY_CABLE_LENGTH_A) { + if (valuesp != NULL) + valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] = + MCDI_OUT_DWORD(req, + POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A); + } + + if (count > EFX_BIST_PHY_CABLE_LENGTH_B) { + if (valuesp != NULL) + valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] = + MCDI_OUT_DWORD(req, + POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B); + value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B); + } + + if (count > EFX_BIST_PHY_CABLE_LENGTH_C) { + if (valuesp != NULL) + valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] = + MCDI_OUT_DWORD(req, + POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C); + value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C); + } + + if (count > EFX_BIST_PHY_CABLE_LENGTH_D) { + if (valuesp != NULL) + valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] = + MCDI_OUT_DWORD(req, + POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D); + value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D); + } + + if (count > EFX_BIST_PHY_CABLE_STATUS_A) { + if (valuesp != NULL) { + word = MCDI_OUT_WORD(req, + POLL_BIST_OUT_SFT9001_CABLE_STATUS_A); + valuesp[EFX_BIST_PHY_CABLE_STATUS_A] = + siena_phy_sft9001_bist_status(word); + } + value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A); + } + + if (count > EFX_BIST_PHY_CABLE_STATUS_B) { + if (valuesp != NULL) { + word = MCDI_OUT_WORD(req, + POLL_BIST_OUT_SFT9001_CABLE_STATUS_B); + valuesp[EFX_BIST_PHY_CABLE_STATUS_B] = + siena_phy_sft9001_bist_status(word); + } + value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B); + } + + if (count > EFX_BIST_PHY_CABLE_STATUS_C) { + if (valuesp != NULL) { + word = MCDI_OUT_WORD(req, + POLL_BIST_OUT_SFT9001_CABLE_STATUS_C); + valuesp[EFX_BIST_PHY_CABLE_STATUS_C] = + siena_phy_sft9001_bist_status(word); + } + value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C); + } + + if (count > EFX_BIST_PHY_CABLE_STATUS_D) { + if (valuesp != NULL) { + word = MCDI_OUT_WORD(req, + POLL_BIST_OUT_SFT9001_CABLE_STATUS_D); + valuesp[EFX_BIST_PHY_CABLE_STATUS_D] = + siena_phy_sft9001_bist_status(word); + } + value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D); + } + + } else if (result == MC_CMD_POLL_BIST_FAILED && + encp->enc_phy_type == EFX_PHY_QLX111V && + req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && + count > EFX_BIST_FAULT_CODE) { + if (valuesp != NULL) + valuesp[EFX_BIST_FAULT_CODE] = + MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); + value_mask |= 1 << EFX_BIST_FAULT_CODE; + } + + if (value_maskp != NULL) + *value_maskp = value_mask; + + EFSYS_ASSERT(resultp != NULL); + if (result == MC_CMD_POLL_BIST_RUNNING) + *resultp = EFX_BIST_RESULT_RUNNING; + else if (result == MC_CMD_POLL_BIST_PASSED) + *resultp = EFX_BIST_RESULT_PASSED; + else + *resultp = EFX_BIST_RESULT_FAILED; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +siena_phy_bist_stop( + __in efx_nic_t *enp, + __in efx_bist_type_t type) +{ + /* There is no way to stop BIST on Siena */ + _NOTE(ARGUNUSED(enp, type)) +} + +#endif /* EFSYS_OPT_BIST */ + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c new file mode 100644 index 000000000..308c2630a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_sram.c @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_SIENA + + void +siena_sram_init( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_oword_t oword; + uint32_t rx_base, tx_base; + + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + rx_base = encp->enc_buftbl_limit; + tx_base = rx_base + (encp->enc_rxq_limit * + EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE)); + + /* Initialize the transmit descriptor cache */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base); + EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE); + EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword); + + /* Initialize the receive descriptor cache */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base); + EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE); + EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword); + + /* Set receive descriptor pre-fetch low water mark */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56); + EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword); + + /* Set the event queue to use for SRAM updates */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0); + EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword); +} + +#if EFSYS_OPT_DIAG + + __checkReturn efx_rc_t +siena_sram_test( + __in efx_nic_t *enp, + __in efx_sram_pattern_fn_t func) +{ + efx_oword_t oword; + efx_qword_t qword; + efx_qword_t verify; + size_t rows; + unsigned int wptr; + unsigned int rptr; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* Reconfigure into HALF buffer table mode */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); + + /* + * Move the descriptor caches up to the top of SRAM, and test + * all of SRAM below them. We only miss out one row here. + */ + rows = SIENA_SRAM_ROWS - 1; + EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows); + EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1); + EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword); + + /* + * Write the pattern through BUF_HALF_TBL. Write + * in 64 entry batches, waiting 1us in between each batch + * to guarantee not to overflow the SRAM fifo + */ + for (wptr = 0, rptr = 0; wptr < rows; ++wptr) { + func(wptr, B_FALSE, &qword); + EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword); + + if ((wptr - rptr) < 64 && wptr < rows - 1) + continue; + + EFSYS_SPIN(1); + + for (; rptr <= wptr; ++rptr) { + func(rptr, B_FALSE, &qword); + EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr, + &verify); + + if (!EFX_QWORD_IS_EQUAL(verify, qword)) { + rc = EFAULT; + goto fail1; + } + } + } + + /* And do the same negated */ + for (wptr = 0, rptr = 0; wptr < rows; ++wptr) { + func(wptr, B_TRUE, &qword); + EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword); + + if ((wptr - rptr) < 64 && wptr < rows - 1) + continue; + + EFSYS_SPIN(1); + + for (; rptr <= wptr; ++rptr) { + func(rptr, B_TRUE, &qword); + EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr, + &verify); + + if (!EFX_QWORD_IS_EQUAL(verify, qword)) { + rc = EFAULT; + goto fail2; + } + } + } + + /* Restore back to FULL buffer table mode */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); + + /* + * We don't need to reconfigure SRAM again because the API + * requires efx_nic_fini() to be called after an sram test. + */ + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Restore back to FULL buffer table mode */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1); + EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword); + + return (rc); +} + +#endif /* EFSYS_OPT_DIAG */ + +#endif /* EFSYS_OPT_SIENA */ diff --git a/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c b/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c new file mode 100644 index 000000000..789613847 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/base/siena_vpd.c @@ -0,0 +1,601 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2009-2019 Solarflare Communications Inc. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_VPD + +#if EFSYS_OPT_SIENA + +static __checkReturn efx_rc_t +siena_vpd_get_static( + __in efx_nic_t *enp, + __in uint32_t partn, + __deref_out_bcount_opt(*sizep) caddr_t *svpdp, + __out size_t *sizep) +{ + siena_mc_static_config_hdr_t *scfg; + caddr_t svpd; + size_t size; + uint8_t cksum; + unsigned int vpd_offset; + unsigned int vpd_length; + unsigned int hdr_length; + unsigned int pos; + unsigned int region; + efx_rc_t rc; + + EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 || + partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1); + + /* Allocate sufficient memory for the entire static cfg area */ + if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) + goto fail1; + + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + + EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg); + if (scfg == NULL) { + rc = ENOMEM; + goto fail3; + } + + if ((rc = siena_nvram_partn_read(enp, partn, 0, + (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0) + goto fail4; + + /* Verify the magic number */ + if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) != + SIENA_MC_STATIC_CONFIG_MAGIC) { + rc = EINVAL; + goto fail5; + } + + /* All future versions of the structure must be backwards compatible */ + EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0); + + hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0); + vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0); + vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0); + + /* Verify the hdr doesn't overflow the sector size */ + if (hdr_length > size || vpd_offset > size || vpd_length > size || + vpd_length + vpd_offset > size) { + rc = EINVAL; + goto fail6; + } + + /* Read the remainder of scfg + static vpd */ + region = vpd_offset + vpd_length; + if (region > SIENA_NVRAM_CHUNK) { + if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, + (caddr_t)scfg + SIENA_NVRAM_CHUNK, + region - SIENA_NVRAM_CHUNK)) != 0) + goto fail7; + } + + /* Verify checksum */ + cksum = 0; + for (pos = 0; pos < hdr_length; pos++) + cksum += ((uint8_t *)scfg)[pos]; + if (cksum != 0) { + rc = EINVAL; + goto fail8; + } + + if (vpd_length == 0) + svpd = NULL; + else { + /* Copy the vpd data out */ + EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd); + if (svpd == NULL) { + rc = ENOMEM; + goto fail9; + } + memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length); + } + + EFSYS_KMEM_FREE(enp->en_esip, size, scfg); + + *svpdp = svpd; + *sizep = vpd_length; + + return (0); + +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); + + EFSYS_KMEM_FREE(enp->en_esip, size, scfg); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_init( + __in efx_nic_t *enp) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + caddr_t svpd = NULL; + unsigned int partn; + size_t size = 0; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + partn = (emip->emi_port == 1) + ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1; + + /* + * We need the static VPD sector to present a unified static+dynamic + * VPD, that is, basically on every read, write, verify cycle. Since + * it should *never* change we can just cache it here. + */ + if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0) + goto fail1; + + if (svpd != NULL && size > 0) { + if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0) + goto fail2; + } + + enp->en_u.siena.enu_svpd = svpd; + enp->en_u.siena.enu_svpd_length = size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + + EFSYS_KMEM_FREE(enp->en_esip, size, svpd); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_size( + __in efx_nic_t *enp, + __out size_t *sizep) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + uint32_t partn; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* + * This function returns the total size the user should allocate + * for all VPD operations. We've already cached the static vpd, + * so we just need to return an upper bound on the dynamic vpd. + * Since the dynamic_config structure can change under our feet, + * (as version numbers are inserted), just be safe and return the + * total size of the dynamic_config *sector* + */ + partn = (emip->emi_port == 1) + ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; + + if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0) + goto fail1; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_read( + __in efx_nic_t *enp, + __out_bcount(size) caddr_t data, + __in size_t size) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + siena_mc_dynamic_config_hdr_t *dcfg = NULL; + unsigned int vpd_length; + unsigned int vpd_offset; + unsigned int dcfg_partn; + size_t dcfg_size; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + dcfg_partn = (emip->emi_port == 1) + ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; + + if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, + B_TRUE, &dcfg, &dcfg_size)) != 0) + goto fail1; + + vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0); + vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0); + + if (vpd_length > size) { + rc = EFAULT; /* Invalid dcfg: header bigger than sector */ + goto fail2; + } + + EFSYS_ASSERT3U(vpd_length, <=, size); + memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length); + + /* Pad data with all-1s, consistent with update operations */ + memset(data + vpd_length, 0xff, size - vpd_length); + + EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + + EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_verify( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + efx_vpd_tag_t stag; + efx_vpd_tag_t dtag; + efx_vpd_keyword_t skey; + efx_vpd_keyword_t dkey; + unsigned int scont; + unsigned int dcont; + + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* + * Strictly you could take the view that dynamic vpd is optional. + * Instead, to conform more closely to the read/verify/reinit() + * paradigm, we require dynamic vpd. siena_vpd_reinit() will + * reinitialize it as required. + */ + if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0) + goto fail1; + + /* + * Verify that there is no duplication between the static and + * dynamic cfg sectors. + */ + if (enp->en_u.siena.enu_svpd_length == 0) + goto done; + + dcont = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_hunk_next(data, size, &dtag, + &dkey, NULL, NULL, &dcont)) != 0) + goto fail2; + if (dcont == 0) + break; + + /* + * Skip the RV keyword. It should be present in both the static + * and dynamic cfg sectors. + */ + if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V')) + continue; + + scont = 0; + _NOTE(CONSTANTCONDITION) + while (1) { + if ((rc = efx_vpd_hunk_next( + enp->en_u.siena.enu_svpd, + enp->en_u.siena.enu_svpd_length, &stag, &skey, + NULL, NULL, &scont)) != 0) + goto fail3; + if (scont == 0) + break; + + if (stag == dtag && skey == dkey) { + rc = EEXIST; + goto fail4; + } + } + } + +done: + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_reinit( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + boolean_t wantpid; + efx_rc_t rc; + + /* + * Only create a PID if the dynamic cfg doesn't have one + */ + if (enp->en_u.siena.enu_svpd_length == 0) + wantpid = B_TRUE; + else { + unsigned int offset; + uint8_t length; + + rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, + enp->en_u.siena.enu_svpd_length, + EFX_VPD_ID, 0, &offset, &length); + if (rc == 0) + wantpid = B_FALSE; + else if (rc == ENOENT) + wantpid = B_TRUE; + else + goto fail1; + } + + if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_get( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __inout efx_vpd_value_t *evvp) +{ + unsigned int offset; + uint8_t length; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* Attempt to satisfy the request from svpd first */ + if (enp->en_u.siena.enu_svpd_length > 0) { + if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, + enp->en_u.siena.enu_svpd_length, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) == 0) { + evvp->evv_length = length; + memcpy(evvp->evv_value, + enp->en_u.siena.enu_svpd + offset, length); + return (0); + } else if (rc != ENOENT) + goto fail1; + } + + /* And then from the provided data buffer */ + if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) != 0) { + if (rc == ENOENT) + return (rc); + + goto fail2; + } + + evvp->evv_length = length; + memcpy(evvp->evv_value, data + offset, length); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_set( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __in efx_vpd_value_t *evvp) +{ + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* If the provided (tag,keyword) exists in svpd, then it is readonly */ + if (enp->en_u.siena.enu_svpd_length > 0) { + unsigned int offset; + uint8_t length; + + if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd, + enp->en_u.siena.enu_svpd_length, evvp->evv_tag, + evvp->evv_keyword, &offset, &length)) == 0) { + rc = EACCES; + goto fail1; + } + } + + if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +siena_vpd_next( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size, + __out efx_vpd_value_t *evvp, + __inout unsigned int *contp) +{ + _NOTE(ARGUNUSED(enp, data, size, evvp, contp)) + + return (ENOTSUP); +} + + __checkReturn efx_rc_t +siena_vpd_write( + __in efx_nic_t *enp, + __in_bcount(size) caddr_t data, + __in size_t size) +{ + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + siena_mc_dynamic_config_hdr_t *dcfg = NULL; + unsigned int vpd_offset; + unsigned int dcfg_partn; + unsigned int hdr_length; + unsigned int pos; + uint8_t cksum; + size_t partn_size, dcfg_size; + size_t vpd_length; + efx_rc_t rc; + + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + /* Determine total length of all tags */ + if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0) + goto fail1; + + /* Lock dynamic config sector for write, and read structure only */ + dcfg_partn = (emip->emi_port == 1) + ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 + : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1; + + if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0) + goto fail2; + + if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0) + goto fail3; + + if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn, + B_FALSE, &dcfg, &dcfg_size)) != 0) + goto fail4; + + hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0); + + /* Allocated memory should have room for the new VPD */ + if (hdr_length + vpd_length > dcfg_size) { + rc = ENOSPC; + goto fail5; + } + + /* Copy in new vpd and update header */ + vpd_offset = dcfg_size - vpd_length; + EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset); + memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length); + EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length); + + /* Update the checksum */ + cksum = 0; + for (pos = 0; pos < hdr_length; pos++) + cksum += ((uint8_t *)dcfg)[pos]; + dcfg->csum.eb_u8[0] -= cksum; + + /* Erase and write the new sector */ + if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0) + goto fail6; + + /* Write out the new structure to nvram */ + if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg, + vpd_offset + vpd_length)) != 0) + goto fail7; + + EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); + + siena_nvram_partn_unlock(enp, dcfg_partn, NULL); + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); + + EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg); +fail4: + EFSYS_PROBE(fail4); + + siena_nvram_partn_unlock(enp, dcfg_partn, NULL); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + void +siena_vpd_fini( + __in efx_nic_t *enp) +{ + EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); + + if (enp->en_u.siena.enu_svpd_length > 0) { + EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length, + enp->en_u.siena.enu_svpd); + + enp->en_u.siena.enu_svpd = NULL; + enp->en_u.siena.enu_svpd_length = 0; + } +} + +#endif /* EFSYS_OPT_SIENA */ + +#endif /* EFSYS_OPT_VPD */ diff --git a/src/spdk/dpdk/drivers/net/sfc/efsys.h b/src/spdk/dpdk/drivers/net/sfc/efsys.h new file mode 100644 index 000000000..c94e6c0b9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/efsys.h @@ -0,0 +1,736 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_COMMON_EFSYS_H +#define _SFC_COMMON_EFSYS_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sfc_debug.h" +#include "sfc_log.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define EFSYS_HAS_UINT64 1 +#define EFSYS_USE_UINT64 1 +#define EFSYS_HAS_SSE2_M128 1 + +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#define EFSYS_IS_BIG_ENDIAN 1 +#define EFSYS_IS_LITTLE_ENDIAN 0 +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define EFSYS_IS_BIG_ENDIAN 0 +#define EFSYS_IS_LITTLE_ENDIAN 1 +#else +#error "Cannot determine system endianness" +#endif +#include "efx_types.h" + + +typedef bool boolean_t; + +#ifndef B_FALSE +#define B_FALSE false +#endif +#ifndef B_TRUE +#define B_TRUE true +#endif + +/* + * RTE_MAX() and RTE_MIN() cannot be used since braced-group within + * expression allowed only inside a function, but MAX() is used as + * a number of elements in array. + */ +#ifndef MAX +#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2)) +#endif +#ifndef MIN +#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2)) +#endif + +#ifndef ISP2 +#define ISP2(x) rte_is_power_of_2(x) +#endif + +#define ENOTACTIVE ENOTCONN + +static inline void +prefetch_read_many(const volatile void *addr) +{ + rte_prefetch0(addr); +} + +static inline void +prefetch_read_once(const volatile void *addr) +{ + rte_prefetch_non_temporal(addr); +} + +/* Code inclusion options */ + + +#define EFSYS_OPT_NAMES 1 + +/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */ +#define EFSYS_OPT_SIENA 0 +/* Enable SFN7xxx support */ +#define EFSYS_OPT_HUNTINGTON 1 +/* Enable SFN8xxx support */ +#define EFSYS_OPT_MEDFORD 1 +/* Enable SFN2xxx support */ +#define EFSYS_OPT_MEDFORD2 1 +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG +#define EFSYS_OPT_CHECK_REG 1 +#else +#define EFSYS_OPT_CHECK_REG 0 +#endif + +/* MCDI is required for SFN7xxx and SFN8xx */ +#define EFSYS_OPT_MCDI 1 +#define EFSYS_OPT_MCDI_LOGGING 1 +#define EFSYS_OPT_MCDI_PROXY_AUTH 1 + +#define EFSYS_OPT_MAC_STATS 1 + +#define EFSYS_OPT_LOOPBACK 1 + +#define EFSYS_OPT_MON_MCDI 0 +#define EFSYS_OPT_MON_STATS 0 + +#define EFSYS_OPT_PHY_STATS 0 +#define EFSYS_OPT_BIST 0 +#define EFSYS_OPT_PHY_LED_CONTROL 0 +#define EFSYS_OPT_PHY_FLAGS 0 + +#define EFSYS_OPT_VPD 0 +#define EFSYS_OPT_NVRAM 0 +#define EFSYS_OPT_BOOTCFG 0 +#define EFSYS_OPT_IMAGE_LAYOUT 0 + +#define EFSYS_OPT_DIAG 0 +#define EFSYS_OPT_RX_SCALE 1 +#define EFSYS_OPT_QSTATS 0 +/* Filters support is required for SFN7xxx and SFN8xx */ +#define EFSYS_OPT_FILTER 1 +#define EFSYS_OPT_RX_SCATTER 0 + +#define EFSYS_OPT_EV_PREFETCH 0 + +#define EFSYS_OPT_DECODE_INTR_FATAL 0 + +#define EFSYS_OPT_LICENSING 0 + +#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 + +#define EFSYS_OPT_RX_PACKED_STREAM 0 + +#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1 + +#define EFSYS_OPT_TUNNEL 1 + +#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1 + +#define EFSYS_OPT_EVB 0 + +#define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0 + +/* ID */ + +typedef struct __efsys_identifier_s efsys_identifier_t; + + +#define EFSYS_PROBE(_name) \ + do { } while (0) + +#define EFSYS_PROBE1(_name, _type1, _arg1) \ + do { } while (0) + +#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ + do { } while (0) + +#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ + _type3, _arg3) \ + do { } while (0) + +#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ + _type3, _arg3, _type4, _arg4) \ + do { } while (0) + +#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ + _type3, _arg3, _type4, _arg4, _type5, _arg5) \ + do { } while (0) + +#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ + _type3, _arg3, _type4, _arg4, _type5, _arg5, \ + _type6, _arg6) \ + do { } while (0) + +#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ + _type3, _arg3, _type4, _arg4, _type5, _arg5, \ + _type6, _arg6, _type7, _arg7) \ + do { } while (0) + + +/* DMA */ + +typedef rte_iova_t efsys_dma_addr_t; + +typedef struct efsys_mem_s { + const struct rte_memzone *esm_mz; + /* + * Ideally it should have volatile qualifier to denote that + * the memory may be updated by someone else. However, it adds + * qualifier discard warnings when the pointer or its derivative + * is passed to memset() or rte_mov16(). + * So, skip the qualifier here, but make sure that it is added + * below in access macros. + */ + void *esm_base; + efsys_dma_addr_t esm_addr; +} efsys_mem_t; + + +#define EFSYS_MEM_ZERO(_esmp, _size) \ + do { \ + (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_MEM_READD(_esmp, _offset, _edp) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile uint32_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ + \ + _addr = (volatile uint32_t *)(_base + (_offset)); \ + (_edp)->ed_u32[0] = _addr[0]; \ + \ + EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \ + uint32_t, (_edp)->ed_u32[0]); \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile uint64_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ + \ + _addr = (volatile uint64_t *)(_base + (_offset)); \ + (_eqp)->eq_u64[0] = _addr[0]; \ + \ + EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_MEM_READO(_esmp, _offset, _eop) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile __m128i *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ + \ + _addr = (volatile __m128i *)(_base + (_offset)); \ + (_eop)->eo_u128[0] = _addr[0]; \ + \ + EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + + +#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile uint32_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ + \ + EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ + uint32_t, (_edp)->ed_u32[0]); \ + \ + _addr = (volatile uint32_t *)(_base + (_offset)); \ + _addr[0] = (_edp)->ed_u32[0]; \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile uint64_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ + \ + EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + \ + _addr = (volatile uint64_t *)(_base + (_offset)); \ + _addr[0] = (_eqp)->eq_u64[0]; \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + volatile __m128i *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ + \ + \ + EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + \ + _addr = (volatile __m128i *)(_base + (_offset)); \ + _addr[0] = (_eop)->eo_u128[0]; \ + \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + + +#define EFSYS_MEM_SIZE(_esmp) \ + ((_esmp)->esm_mz->len) + +#define EFSYS_MEM_ADDR(_esmp) \ + ((_esmp)->esm_addr) + +#define EFSYS_MEM_IS_NULL(_esmp) \ + ((_esmp)->esm_base == NULL) + +#define EFSYS_MEM_PREFETCH(_esmp, _offset) \ + do { \ + volatile uint8_t *_base = (_esmp)->esm_base; \ + \ + rte_prefetch0(_base + (_offset)); \ + } while (0) + + +/* BAR */ + +typedef struct efsys_bar_s { + rte_spinlock_t esb_lock; + int esb_rid; + struct rte_pci_device *esb_dev; + /* + * Ideally it should have volatile qualifier to denote that + * the memory may be updated by someone else. However, it adds + * qualifier discard warnings when the pointer or its derivative + * is passed to memset() or rte_mov16(). + * So, skip the qualifier here, but make sure that it is added + * below in access macros. + */ + void *esb_base; +} efsys_bar_t; + +#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \ + do { \ + rte_spinlock_init(&(_esbp)->esb_lock); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) +#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0) +#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock) +#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock) + +#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile uint32_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_LOCK(_esbp); \ + \ + _addr = (volatile uint32_t *)(_base + (_offset)); \ + rte_rmb(); \ + (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \ + \ + EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ + uint32_t, (_edp)->ed_u32[0]); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile uint64_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ + \ + SFC_BAR_LOCK(_esbp); \ + \ + _addr = (volatile uint64_t *)(_base + (_offset)); \ + rte_rmb(); \ + (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \ + \ + EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile __m128i *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_LOCK(_esbp); \ + \ + _addr = (volatile __m128i *)(_base + (_offset)); \ + rte_rmb(); \ + /* There is no rte_read128_relaxed() yet */ \ + (_eop)->eo_u128[0] = _addr[0]; \ + \ + EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + + +#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile uint32_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_LOCK(_esbp); \ + \ + EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ + uint32_t, (_edp)->ed_u32[0]); \ + \ + _addr = (volatile uint32_t *)(_base + (_offset)); \ + rte_write32_relaxed((_edp)->ed_u32[0], _addr); \ + rte_wmb(); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile uint64_t *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ + \ + SFC_BAR_LOCK(_esbp); \ + \ + EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ + uint32_t, (_eqp)->eq_u32[1], \ + uint32_t, (_eqp)->eq_u32[0]); \ + \ + _addr = (volatile uint64_t *)(_base + (_offset)); \ + rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \ + rte_wmb(); \ + \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* + * Guarantees 64bit aligned 64bit writes to write combined BAR mapping + * (required by PIO hardware). + * + * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support + * write-combined memory mapped to user-land, so just abort if used. + */ +#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ + do { \ + rte_panic("Write-combined BAR access not supported"); \ + } while (B_FALSE) + +#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ + do { \ + volatile uint8_t *_base = (_esbp)->esb_base; \ + volatile __m128i *_addr; \ + \ + _NOTE(CONSTANTCONDITION); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_LOCK(_esbp); \ + \ + EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ + uint32_t, (_eop)->eo_u32[3], \ + uint32_t, (_eop)->eo_u32[2], \ + uint32_t, (_eop)->eo_u32[1], \ + uint32_t, (_eop)->eo_u32[0]); \ + \ + _addr = (volatile __m128i *)(_base + (_offset)); \ + /* There is no rte_write128_relaxed() yet */ \ + _addr[0] = (_eop)->eo_u128[0]; \ + rte_wmb(); \ + \ + _NOTE(CONSTANTCONDITION); \ + if (_lock) \ + SFC_BAR_UNLOCK(_esbp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* Use the standard octo-word write for doorbell writes */ +#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ + do { \ + EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* SPIN */ + +#define EFSYS_SPIN(_us) \ + do { \ + rte_delay_us(_us); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_SLEEP EFSYS_SPIN + +/* BARRIERS */ + +#define EFSYS_MEM_READ_BARRIER() rte_rmb() +#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb() + +/* DMA SYNC */ + +/* + * DPDK does not provide any DMA syncing API, and no PMD drivers + * have any traces of explicit DMA syncing. + * DMA mapping is assumed to be coherent. + */ + +#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0) + +/* Just avoid store and compiler (impliciltly) reordering */ +#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb() + +/* TIMESTAMP */ + +typedef uint64_t efsys_timestamp_t; + +#define EFSYS_TIMESTAMP(_usp) \ + do { \ + *(_usp) = rte_get_timer_cycles() * 1000000 / \ + rte_get_timer_hz(); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* KMEM */ + +#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ + do { \ + (_esip) = (_esip); \ + (_p) = rte_zmalloc("sfc", (_size), 0); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_KMEM_FREE(_esip, _size, _p) \ + do { \ + (void)(_esip); \ + (void)(_size); \ + rte_free((_p)); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* LOCK */ + +typedef rte_spinlock_t efsys_lock_t; + +#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ + rte_spinlock_init((_eslp)) +#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0) +#define SFC_EFSYS_LOCK(_eslp) \ + rte_spinlock_lock((_eslp)) +#define SFC_EFSYS_UNLOCK(_eslp) \ + rte_spinlock_unlock((_eslp)) +#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ + SFC_ASSERT(rte_spinlock_is_locked((_eslp))) + +typedef int efsys_lock_state_t; + +#define EFSYS_LOCK_MAGIC 0x000010c4 + +#define EFSYS_LOCK(_lockp, _state) \ + do { \ + SFC_EFSYS_LOCK(_lockp); \ + (_state) = EFSYS_LOCK_MAGIC; \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_UNLOCK(_lockp, _state) \ + do { \ + SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \ + SFC_EFSYS_UNLOCK(_lockp); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* STAT */ + +typedef uint64_t efsys_stat_t; + +#define EFSYS_STAT_INCR(_knp, _delta) \ + do { \ + *(_knp) += (_delta); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_DECR(_knp, _delta) \ + do { \ + *(_knp) -= (_delta); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_SET(_knp, _val) \ + do { \ + *(_knp) = (_val); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_SET_QWORD(_knp, _valp) \ + do { \ + *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_SET_DWORD(_knp, _valp) \ + do { \ + *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ + do { \ + *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ + do { \ + *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) + +/* ERR */ + +#if EFSYS_OPT_DECODE_INTR_FATAL +#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ + do { \ + (void)(_esip); \ + SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \ + (_code), (_dword0), (_dword1)); \ + _NOTE(CONSTANTCONDITION); \ + } while (B_FALSE) +#endif + +/* ASSERT */ + +/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly, + * so we re-implement it here + */ +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG +#define EFSYS_ASSERT(_exp) \ + do { \ + if (unlikely(!(_exp))) \ + rte_panic("line %d\tassert \"%s\" failed\n", \ + __LINE__, (#_exp)); \ + } while (0) +#else +#define EFSYS_ASSERT(_exp) (void)(_exp) +#endif + +#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y)) + +#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) +#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) +#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) + +/* ROTATE */ + +#define EFSYS_HAS_ROTL_DWORD 0 + +#ifdef __cplusplus +} +#endif + +#endif /* _SFC_COMMON_EFSYS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/meson.build b/src/spdk/dpdk/drivers/net/sfc/meson.build new file mode 100644 index 000000000..35c05ac1d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/meson.build @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: BSD-3-Clause +# +# Copyright(c) 2019-2020 Xilinx, Inc. +# Copyright(c) 2016-2019 Solarflare Communications Inc. +# +# This software was jointly developed between OKTET Labs (under contract +# for Solarflare) and Solarflare Communications, Inc. + +if arch_subdir != 'x86' or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on x86_64' +endif + +extra_flags = [] + +# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts +extra_flags += '-Wno-strict-aliasing' + +# Enable more warnings +extra_flags += [ + '-Wdisabled-optimization' +] + +# Compiler and version dependent flags +extra_flags += [ + '-Waggregate-return', + '-Wbad-function-cast' +] + +foreach flag: extra_flags + if cc.has_argument(flag) + cflags += flag + endif +endforeach + +subdir('base') +objs = [base_objs] + +sources = files( + 'sfc_ethdev.c', + 'sfc_kvargs.c', + 'sfc.c', + 'sfc_mcdi.c', + 'sfc_intr.c', + 'sfc_ev.c', + 'sfc_port.c', + 'sfc_rx.c', + 'sfc_tx.c', + 'sfc_tso.c', + 'sfc_filter.c', + 'sfc_flow.c', + 'sfc_dp.c', + 'sfc_ef10_rx.c', + 'sfc_ef10_essb_rx.c', + 'sfc_ef10_tx.c' +) + +includes += include_directories('base') diff --git a/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map b/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map new file mode 100644 index 000000000..f9f17e4f6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/rte_pmd_sfc_version.map @@ -0,0 +1,3 @@ +DPDK_20.0 { + local: *; +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc.c b/src/spdk/dpdk/drivers/net/sfc/sfc.c new file mode 100644 index 000000000..c19d81cc8 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc.c @@ -0,0 +1,1147 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* sysconf() */ +#include + +#include +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_log.h" +#include "sfc_ev.h" +#include "sfc_rx.h" +#include "sfc_tx.h" +#include "sfc_kvargs.h" +#include "sfc_tweak.h" + + +int +sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, + size_t len, int socket_id, efsys_mem_t *esmp) +{ + const struct rte_memzone *mz; + + sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d", + name, id, len, socket_id); + + mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len, + sysconf(_SC_PAGESIZE), socket_id); + if (mz == NULL) { + sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s", + name, (unsigned int)id, (unsigned int)len, socket_id, + rte_strerror(rte_errno)); + return ENOMEM; + } + + esmp->esm_addr = mz->iova; + if (esmp->esm_addr == RTE_BAD_IOVA) { + (void)rte_memzone_free(mz); + return EFAULT; + } + + esmp->esm_mz = mz; + esmp->esm_base = mz->addr; + + return 0; +} + +void +sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp) +{ + int rc; + + sfc_log_init(sa, "name=%s", esmp->esm_mz->name); + + rc = rte_memzone_free(esmp->esm_mz); + if (rc != 0) + sfc_err(sa, "rte_memzone_free(() failed: %d", rc); + + memset(esmp, 0, sizeof(*esmp)); +} + +static uint32_t +sfc_phy_cap_from_link_speeds(uint32_t speeds) +{ + uint32_t phy_caps = 0; + + if (~speeds & ETH_LINK_SPEED_FIXED) { + phy_caps |= (1 << EFX_PHY_CAP_AN); + /* + * If no speeds are specified in the mask, any supported + * may be negotiated + */ + if (speeds == ETH_LINK_SPEED_AUTONEG) + phy_caps |= + (1 << EFX_PHY_CAP_1000FDX) | + (1 << EFX_PHY_CAP_10000FDX) | + (1 << EFX_PHY_CAP_25000FDX) | + (1 << EFX_PHY_CAP_40000FDX) | + (1 << EFX_PHY_CAP_50000FDX) | + (1 << EFX_PHY_CAP_100000FDX); + } + if (speeds & ETH_LINK_SPEED_1G) + phy_caps |= (1 << EFX_PHY_CAP_1000FDX); + if (speeds & ETH_LINK_SPEED_10G) + phy_caps |= (1 << EFX_PHY_CAP_10000FDX); + if (speeds & ETH_LINK_SPEED_25G) + phy_caps |= (1 << EFX_PHY_CAP_25000FDX); + if (speeds & ETH_LINK_SPEED_40G) + phy_caps |= (1 << EFX_PHY_CAP_40000FDX); + if (speeds & ETH_LINK_SPEED_50G) + phy_caps |= (1 << EFX_PHY_CAP_50000FDX); + if (speeds & ETH_LINK_SPEED_100G) + phy_caps |= (1 << EFX_PHY_CAP_100000FDX); + + return phy_caps; +} + +/* + * Check requested device level configuration. + * Receive and transmit configuration is checked in corresponding + * modules. + */ +static int +sfc_check_conf(struct sfc_adapter *sa) +{ + const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf; + int rc = 0; + + sa->port.phy_adv_cap = + sfc_phy_cap_from_link_speeds(conf->link_speeds) & + sa->port.phy_adv_cap_mask; + if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) { + sfc_err(sa, "No link speeds from mask %#x are supported", + conf->link_speeds); + rc = EINVAL; + } + +#if !EFSYS_OPT_LOOPBACK + if (conf->lpbk_mode != 0) { + sfc_err(sa, "Loopback not supported"); + rc = EINVAL; + } +#endif + + if (conf->dcb_capability_en != 0) { + sfc_err(sa, "Priority-based flow control not supported"); + rc = EINVAL; + } + + if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { + sfc_err(sa, "Flow Director not supported"); + rc = EINVAL; + } + + if ((conf->intr_conf.lsc != 0) && + (sa->intr.type != EFX_INTR_LINE) && + (sa->intr.type != EFX_INTR_MESSAGE)) { + sfc_err(sa, "Link status change interrupt not supported"); + rc = EINVAL; + } + + if (conf->intr_conf.rxq != 0 && + (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) { + sfc_err(sa, "Receive queue interrupt not supported"); + rc = EINVAL; + } + + return rc; +} + +/* + * Find out maximum number of receive and transmit queues which could be + * advertised. + * + * NIC is kept initialized on success to allow other modules acquire + * defaults and capabilities. + */ +static int +sfc_estimate_resource_limits(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + efx_drv_limits_t limits; + int rc; + uint32_t evq_allocated; + uint32_t rxq_allocated; + uint32_t txq_allocated; + + memset(&limits, 0, sizeof(limits)); + + /* Request at least one Rx and Tx queue */ + limits.edl_min_rxq_count = 1; + limits.edl_min_txq_count = 1; + /* Management event queue plus event queue for each Tx and Rx queue */ + limits.edl_min_evq_count = + 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count; + + /* Divide by number of functions to guarantee that all functions + * will get promised resources + */ + /* FIXME Divide by number of functions (not 2) below */ + limits.edl_max_evq_count = encp->enc_evq_limit / 2; + SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count); + + /* Split equally between receive and transmit */ + limits.edl_max_rxq_count = + MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2); + SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count); + + limits.edl_max_txq_count = + MIN(encp->enc_txq_limit, + limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count); + + if (sa->tso) + limits.edl_max_txq_count = + MIN(limits.edl_max_txq_count, + encp->enc_fw_assisted_tso_v2_n_contexts / + encp->enc_hw_pf_count); + + SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count); + + /* Configure the minimum required resources needed for the + * driver to operate, and the maximum desired resources that the + * driver is capable of using. + */ + efx_nic_set_drv_limits(sa->nic, &limits); + + sfc_log_init(sa, "init nic"); + rc = efx_nic_init(sa->nic); + if (rc != 0) + goto fail_nic_init; + + /* Find resource dimensions assigned by firmware to this function */ + rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated, + &txq_allocated); + if (rc != 0) + goto fail_get_vi_pool; + + /* It still may allocate more than maximum, ensure limit */ + evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count); + rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count); + txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count); + + /* Subtract management EVQ not used for traffic */ + SFC_ASSERT(evq_allocated > 0); + evq_allocated--; + + /* Right now we use separate EVQ for Rx and Tx */ + sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2); + sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max); + + /* Keep NIC initialized */ + return 0; + +fail_get_vi_pool: + efx_nic_fini(sa->nic); +fail_nic_init: + return rc; +} + +static int +sfc_set_drv_limits(struct sfc_adapter *sa) +{ + const struct rte_eth_dev_data *data = sa->eth_dev->data; + efx_drv_limits_t lim; + + memset(&lim, 0, sizeof(lim)); + + /* Limits are strict since take into account initial estimation */ + lim.edl_min_evq_count = lim.edl_max_evq_count = + 1 + data->nb_rx_queues + data->nb_tx_queues; + lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues; + lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues; + + return efx_nic_set_drv_limits(sa->nic, &lim); +} + +static int +sfc_set_fw_subvariant(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads; + unsigned int txq_index; + efx_nic_fw_subvariant_t req_fw_subvariant; + efx_nic_fw_subvariant_t cur_fw_subvariant; + int rc; + + if (!encp->enc_fw_subvariant_no_tx_csum_supported) { + sfc_info(sa, "no-Tx-checksum subvariant not supported"); + return 0; + } + + for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) { + struct sfc_txq_info *txq_info = &sas->txq_info[txq_index]; + + if (txq_info->state & SFC_TXQ_INITIALIZED) + tx_offloads |= txq_info->offloads; + } + + if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT; + else + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM; + + rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to get FW subvariant: %d", rc); + return rc; + } + sfc_info(sa, "FW subvariant is %u vs required %u", + cur_fw_subvariant, req_fw_subvariant); + + if (cur_fw_subvariant == req_fw_subvariant) + return 0; + + rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to set FW subvariant %u: %d", + req_fw_subvariant, rc); + return rc; + } + sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant); + + return 0; +} + +static int +sfc_try_start(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp; + int rc; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING); + + sfc_log_init(sa, "set FW subvariant"); + rc = sfc_set_fw_subvariant(sa); + if (rc != 0) + goto fail_set_fw_subvariant; + + sfc_log_init(sa, "set resource limits"); + rc = sfc_set_drv_limits(sa); + if (rc != 0) + goto fail_set_drv_limits; + + sfc_log_init(sa, "init nic"); + rc = efx_nic_init(sa->nic); + if (rc != 0) + goto fail_nic_init; + + encp = efx_nic_cfg_get(sa->nic); + + /* + * Refresh (since it may change on NIC reset/restart) a copy of + * supported tunnel encapsulations in shared memory to be used + * on supported Rx packet type classes get. + */ + sa->priv.shared->tunnel_encaps = + encp->enc_tunnel_encapsulations_supported; + + if (encp->enc_tunnel_encapsulations_supported != 0) { + sfc_log_init(sa, "apply tunnel config"); + rc = efx_tunnel_reconfigure(sa->nic); + if (rc != 0) + goto fail_tunnel_reconfigure; + } + + rc = sfc_intr_start(sa); + if (rc != 0) + goto fail_intr_start; + + rc = sfc_ev_start(sa); + if (rc != 0) + goto fail_ev_start; + + rc = sfc_port_start(sa); + if (rc != 0) + goto fail_port_start; + + rc = sfc_rx_start(sa); + if (rc != 0) + goto fail_rx_start; + + rc = sfc_tx_start(sa); + if (rc != 0) + goto fail_tx_start; + + rc = sfc_flow_start(sa); + if (rc != 0) + goto fail_flows_insert; + + sfc_log_init(sa, "done"); + return 0; + +fail_flows_insert: + sfc_tx_stop(sa); + +fail_tx_start: + sfc_rx_stop(sa); + +fail_rx_start: + sfc_port_stop(sa); + +fail_port_start: + sfc_ev_stop(sa); + +fail_ev_start: + sfc_intr_stop(sa); + +fail_intr_start: +fail_tunnel_reconfigure: + efx_nic_fini(sa->nic); + +fail_nic_init: +fail_set_drv_limits: +fail_set_fw_subvariant: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +int +sfc_start(struct sfc_adapter *sa) +{ + unsigned int start_tries = 3; + int rc; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + switch (sa->state) { + case SFC_ADAPTER_CONFIGURED: + break; + case SFC_ADAPTER_STARTED: + sfc_notice(sa, "already started"); + return 0; + default: + rc = EINVAL; + goto fail_bad_state; + } + + sa->state = SFC_ADAPTER_STARTING; + + do { + rc = sfc_try_start(sa); + } while ((--start_tries > 0) && + (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL)); + + if (rc != 0) + goto fail_try_start; + + sa->state = SFC_ADAPTER_STARTED; + sfc_log_init(sa, "done"); + return 0; + +fail_try_start: + sa->state = SFC_ADAPTER_CONFIGURED; +fail_bad_state: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_stop(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + switch (sa->state) { + case SFC_ADAPTER_STARTED: + break; + case SFC_ADAPTER_CONFIGURED: + sfc_notice(sa, "already stopped"); + return; + default: + sfc_err(sa, "stop in unexpected state %u", sa->state); + SFC_ASSERT(B_FALSE); + return; + } + + sa->state = SFC_ADAPTER_STOPPING; + + sfc_flow_stop(sa); + sfc_tx_stop(sa); + sfc_rx_stop(sa); + sfc_port_stop(sa); + sfc_ev_stop(sa); + sfc_intr_stop(sa); + efx_nic_fini(sa->nic); + + sa->state = SFC_ADAPTER_CONFIGURED; + sfc_log_init(sa, "done"); +} + +static int +sfc_restart(struct sfc_adapter *sa) +{ + int rc; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + if (sa->state != SFC_ADAPTER_STARTED) + return EINVAL; + + sfc_stop(sa); + + rc = sfc_start(sa); + if (rc != 0) + sfc_err(sa, "restart failed"); + + return rc; +} + +static void +sfc_restart_if_required(void *arg) +{ + struct sfc_adapter *sa = arg; + + /* If restart is scheduled, clear the flag and do it */ + if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required, + 1, 0)) { + sfc_adapter_lock(sa); + if (sa->state == SFC_ADAPTER_STARTED) + (void)sfc_restart(sa); + sfc_adapter_unlock(sa); + } +} + +void +sfc_schedule_restart(struct sfc_adapter *sa) +{ + int rc; + + /* Schedule restart alarm if it is not scheduled yet */ + if (!rte_atomic32_test_and_set(&sa->restart_required)) + return; + + rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa); + if (rc == -ENOTSUP) + sfc_warn(sa, "alarms are not supported, restart is pending"); + else if (rc != 0) + sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc); + else + sfc_notice(sa, "restart scheduled"); +} + +int +sfc_configure(struct sfc_adapter *sa) +{ + int rc; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED || + sa->state == SFC_ADAPTER_CONFIGURED); + sa->state = SFC_ADAPTER_CONFIGURING; + + rc = sfc_check_conf(sa); + if (rc != 0) + goto fail_check_conf; + + rc = sfc_intr_configure(sa); + if (rc != 0) + goto fail_intr_configure; + + rc = sfc_port_configure(sa); + if (rc != 0) + goto fail_port_configure; + + rc = sfc_rx_configure(sa); + if (rc != 0) + goto fail_rx_configure; + + rc = sfc_tx_configure(sa); + if (rc != 0) + goto fail_tx_configure; + + sa->state = SFC_ADAPTER_CONFIGURED; + sfc_log_init(sa, "done"); + return 0; + +fail_tx_configure: + sfc_rx_close(sa); + +fail_rx_configure: + sfc_port_close(sa); + +fail_port_configure: + sfc_intr_close(sa); + +fail_intr_configure: +fail_check_conf: + sa->state = SFC_ADAPTER_INITIALIZED; + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_close(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); + sa->state = SFC_ADAPTER_CLOSING; + + sfc_tx_close(sa); + sfc_rx_close(sa); + sfc_port_close(sa); + sfc_intr_close(sa); + + sa->state = SFC_ADAPTER_INITIALIZED; + sfc_log_init(sa, "done"); +} + +static int +sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar) +{ + struct rte_eth_dev *eth_dev = sa->eth_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + efsys_bar_t *ebp = &sa->mem_bar; + struct rte_mem_resource *res = &pci_dev->mem_resource[membar]; + + SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); + ebp->esb_rid = membar; + ebp->esb_dev = pci_dev; + ebp->esb_base = res->addr; + return 0; +} + +static void +sfc_mem_bar_fini(struct sfc_adapter *sa) +{ + efsys_bar_t *ebp = &sa->mem_bar; + + SFC_BAR_LOCK_DESTROY(ebp); + memset(ebp, 0, sizeof(*ebp)); +} + +/* + * A fixed RSS key which has a property of being symmetric + * (symmetrical flows are distributed to the same CPU) + * and also known to give a uniform distribution + * (a good distribution of traffic between different CPUs) + */ +static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, +}; + +static int +sfc_rss_attach(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + int rc; + + rc = efx_intr_init(sa->nic, sa->intr.type, NULL); + if (rc != 0) + goto fail_intr_init; + + rc = efx_ev_init(sa->nic); + if (rc != 0) + goto fail_ev_init; + + rc = efx_rx_init(sa->nic); + if (rc != 0) + goto fail_rx_init; + + rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type); + if (rc != 0) + goto fail_scale_support_get; + + rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support); + if (rc != 0) + goto fail_hash_support_get; + + rc = sfc_rx_hash_init(sa); + if (rc != 0) + goto fail_rx_hash_init; + + efx_rx_fini(sa->nic); + efx_ev_fini(sa->nic); + efx_intr_fini(sa->nic); + + rte_memcpy(rss->key, default_rss_key, sizeof(rss->key)); + + return 0; + +fail_rx_hash_init: +fail_hash_support_get: +fail_scale_support_get: + efx_rx_fini(sa->nic); + +fail_rx_init: + efx_ev_fini(sa->nic); + +fail_ev_init: + efx_intr_fini(sa->nic); + +fail_intr_init: + return rc; +} + +static void +sfc_rss_detach(struct sfc_adapter *sa) +{ + sfc_rx_hash_fini(sa); +} + +int +sfc_attach(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp; + efx_nic_t *enp = sa->nic; + int rc; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + efx_mcdi_new_epoch(enp); + + sfc_log_init(sa, "reset nic"); + rc = efx_nic_reset(enp); + if (rc != 0) + goto fail_nic_reset; + + /* + * Probed NIC is sufficient for tunnel init. + * Initialize tunnel support to be able to use libefx + * efx_tunnel_config_udp_{add,remove}() in any state and + * efx_tunnel_reconfigure() on start up. + */ + rc = efx_tunnel_init(enp); + if (rc != 0) + goto fail_tunnel_init; + + encp = efx_nic_cfg_get(sa->nic); + + /* + * Make a copy of supported tunnel encapsulations in shared + * memory to be used on supported Rx packet type classes get. + */ + sa->priv.shared->tunnel_encaps = + encp->enc_tunnel_encapsulations_supported; + + if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) { + sa->tso = encp->enc_fw_assisted_tso_v2_enabled; + if (!sa->tso) + sfc_info(sa, "TSO support isn't available on this adapter"); + } + + if (sa->tso && + (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & + (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) { + sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled; + if (!sa->tso_encap) + sfc_info(sa, "Encapsulated TSO support isn't available on this adapter"); + } + + sfc_log_init(sa, "estimate resource limits"); + rc = sfc_estimate_resource_limits(sa); + if (rc != 0) + goto fail_estimate_rsrc_limits; + + sa->evq_max_entries = encp->enc_evq_max_nevs; + SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries)); + + sa->evq_min_entries = encp->enc_evq_min_nevs; + SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries)); + + sa->rxq_max_entries = encp->enc_rxq_max_ndescs; + SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries)); + + sa->rxq_min_entries = encp->enc_rxq_min_ndescs; + SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries)); + + sa->txq_max_entries = encp->enc_txq_max_ndescs; + SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries)); + + sa->txq_min_entries = encp->enc_txq_min_ndescs; + SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries)); + + rc = sfc_intr_attach(sa); + if (rc != 0) + goto fail_intr_attach; + + rc = sfc_ev_attach(sa); + if (rc != 0) + goto fail_ev_attach; + + rc = sfc_port_attach(sa); + if (rc != 0) + goto fail_port_attach; + + rc = sfc_rss_attach(sa); + if (rc != 0) + goto fail_rss_attach; + + rc = sfc_filter_attach(sa); + if (rc != 0) + goto fail_filter_attach; + + sfc_log_init(sa, "fini nic"); + efx_nic_fini(enp); + + sfc_flow_init(sa); + + sa->state = SFC_ADAPTER_INITIALIZED; + + sfc_log_init(sa, "done"); + return 0; + +fail_filter_attach: + sfc_rss_detach(sa); + +fail_rss_attach: + sfc_port_detach(sa); + +fail_port_attach: + sfc_ev_detach(sa); + +fail_ev_attach: + sfc_intr_detach(sa); + +fail_intr_attach: + efx_nic_fini(sa->nic); + +fail_estimate_rsrc_limits: +fail_tunnel_init: + efx_tunnel_fini(sa->nic); + +fail_nic_reset: + + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_detach(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + sfc_flow_fini(sa); + + sfc_filter_detach(sa); + sfc_rss_detach(sa); + sfc_port_detach(sa); + sfc_ev_detach(sa); + sfc_intr_detach(sa); + efx_tunnel_fini(sa->nic); + + sa->state = SFC_ADAPTER_UNINITIALIZED; +} + +static int +sfc_kvarg_fv_variant_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + uint32_t *value = opaque; + + if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0) + *value = EFX_FW_VARIANT_DONT_CARE; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0) + *value = EFX_FW_VARIANT_FULL_FEATURED; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0) + *value = EFX_FW_VARIANT_LOW_LATENCY; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0) + *value = EFX_FW_VARIANT_PACKED_STREAM; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0) + *value = EFX_FW_VARIANT_DPDK; + else + return -EINVAL; + + return 0; +} + +static int +sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv) +{ + efx_nic_fw_info_t enfi; + int rc; + + rc = efx_nic_get_fw_version(sa->nic, &enfi); + if (rc != 0) + return rc; + else if (!enfi.enfi_dpcpu_fw_ids_valid) + return ENOTSUP; + + /* + * Firmware variant can be uniquely identified by the RxDPCPU + * firmware id + */ + switch (enfi.enfi_rx_dpcpu_fw_id) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + *efv = EFX_FW_VARIANT_FULL_FEATURED; + break; + + case EFX_RXDP_LOW_LATENCY_FW_ID: + *efv = EFX_FW_VARIANT_LOW_LATENCY; + break; + + case EFX_RXDP_PACKED_STREAM_FW_ID: + *efv = EFX_FW_VARIANT_PACKED_STREAM; + break; + + case EFX_RXDP_DPDK_FW_ID: + *efv = EFX_FW_VARIANT_DPDK; + break; + + default: + /* + * Other firmware variants are not considered, since they are + * not supported in the device parameters + */ + *efv = EFX_FW_VARIANT_DONT_CARE; + break; + } + + return 0; +} + +static const char * +sfc_fw_variant2str(efx_fw_variant_t efv) +{ + switch (efv) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + return SFC_KVARG_FW_VARIANT_FULL_FEATURED; + case EFX_RXDP_LOW_LATENCY_FW_ID: + return SFC_KVARG_FW_VARIANT_LOW_LATENCY; + case EFX_RXDP_PACKED_STREAM_FW_ID: + return SFC_KVARG_FW_VARIANT_PACKED_STREAM; + case EFX_RXDP_DPDK_FW_ID: + return SFC_KVARG_FW_VARIANT_DPDK; + default: + return "unknown"; + } +} + +static int +sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa) +{ + int rc; + long value; + + value = SFC_RXD_WAIT_TIMEOUT_NS_DEF; + + rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS, + sfc_kvarg_long_handler, &value); + if (rc != 0) + return rc; + + if (value < 0 || + (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' " + "was set (%ld);", value); + sfc_err(sa, "it must not be less than 0 or greater than %u", + EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX); + return EINVAL; + } + + sa->rxd_wait_timeout_ns = value; + return 0; +} + +static int +sfc_nic_probe(struct sfc_adapter *sa) +{ + efx_nic_t *enp = sa->nic; + efx_fw_variant_t preferred_efv; + efx_fw_variant_t efv; + int rc; + + preferred_efv = EFX_FW_VARIANT_DONT_CARE; + rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT, + sfc_kvarg_fv_variant_handler, + &preferred_efv); + if (rc != 0) { + sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT); + return rc; + } + + rc = sfc_kvarg_rxd_wait_timeout_ns(sa); + if (rc != 0) + return rc; + + rc = efx_nic_probe(enp, preferred_efv); + if (rc == EACCES) { + /* Unprivileged functions cannot set FW variant */ + rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE); + } + if (rc != 0) + return rc; + + rc = sfc_get_fw_variant(sa, &efv); + if (rc == ENOTSUP) { + sfc_warn(sa, "FW variant can not be obtained"); + return 0; + } + if (rc != 0) + return rc; + + /* Check that firmware variant was changed to the requested one */ + if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) { + sfc_warn(sa, "FW variant has not changed to the requested %s", + sfc_fw_variant2str(preferred_efv)); + } + + sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv)); + + return 0; +} + +int +sfc_probe(struct sfc_adapter *sa) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + unsigned int membar; + efx_nic_t *enp; + int rc; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + sa->socket_id = rte_socket_id(); + rte_atomic32_init(&sa->restart_required); + + sfc_log_init(sa, "get family"); + rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id, + &sa->family, &membar); + if (rc != 0) + goto fail_family; + sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar); + + sfc_log_init(sa, "init mem bar"); + rc = sfc_mem_bar_init(sa, membar); + if (rc != 0) + goto fail_mem_bar_init; + + sfc_log_init(sa, "create nic"); + rte_spinlock_init(&sa->nic_lock); + rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa, + &sa->mem_bar, &sa->nic_lock, &enp); + if (rc != 0) + goto fail_nic_create; + sa->nic = enp; + + rc = sfc_mcdi_init(sa); + if (rc != 0) + goto fail_mcdi_init; + + sfc_log_init(sa, "probe nic"); + rc = sfc_nic_probe(sa); + if (rc != 0) + goto fail_nic_probe; + + sfc_log_init(sa, "done"); + return 0; + +fail_nic_probe: + sfc_mcdi_fini(sa); + +fail_mcdi_init: + sfc_log_init(sa, "destroy nic"); + sa->nic = NULL; + efx_nic_destroy(enp); + +fail_nic_create: + sfc_mem_bar_fini(sa); + +fail_mem_bar_init: +fail_family: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_unprobe(struct sfc_adapter *sa) +{ + efx_nic_t *enp = sa->nic; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + sfc_log_init(sa, "unprobe nic"); + efx_nic_unprobe(enp); + + sfc_mcdi_fini(sa); + + /* + * Make sure there is no pending alarm to restart since we are + * going to free device private which is passed as the callback + * opaque data. A new alarm cannot be scheduled since MCDI is + * shut down. + */ + rte_eal_alarm_cancel(sfc_restart_if_required, sa); + + sfc_log_init(sa, "destroy nic"); + sa->nic = NULL; + efx_nic_destroy(enp); + + sfc_mem_bar_fini(sa); + + sfc_flow_fini(sa); + sa->state = SFC_ADAPTER_UNINITIALIZED; +} + +uint32_t +sfc_register_logtype(const struct rte_pci_addr *pci_addr, + const char *lt_prefix_str, uint32_t ll_default) +{ + size_t lt_prefix_str_size = strlen(lt_prefix_str); + size_t lt_str_size_max; + char *lt_str = NULL; + int ret; + + if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) { + ++lt_prefix_str_size; /* Reserve space for prefix separator */ + lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1; + } else { + return sfc_logtype_driver; + } + + lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0); + if (lt_str == NULL) + return sfc_logtype_driver; + + strncpy(lt_str, lt_prefix_str, lt_prefix_str_size); + lt_str[lt_prefix_str_size - 1] = '.'; + rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size, + lt_str_size_max - lt_prefix_str_size); + lt_str[lt_str_size_max - 1] = '\0'; + + ret = rte_log_register_type_and_pick_level(lt_str, ll_default); + rte_free(lt_str); + + if (ret < 0) + return sfc_logtype_driver; + + return ret; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc.h b/src/spdk/dpdk/drivers/net/sfc/sfc.h new file mode 100644 index 000000000..cf95ebaf9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc.h @@ -0,0 +1,417 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_H +#define _SFC_H + +#include + +#include +#include +#include +#include +#include +#include + +#include "efx.h" + +#include "sfc_filter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * +---------------+ + * | UNINITIALIZED |<-----------+ + * +---------------+ | + * |.eth_dev_init |.eth_dev_uninit + * V | + * +---------------+------------+ + * | INITIALIZED | + * +---------------+<-----------<---------------+ + * |.dev_configure | | + * V |failed | + * +---------------+------------+ | + * | CONFIGURING | | + * +---------------+----+ | + * |success | | + * | | +---------------+ + * | | | CLOSING | + * | | +---------------+ + * | | ^ + * V |.dev_configure | + * +---------------+----+ |.dev_close + * | CONFIGURED |----------------------------+ + * +---------------+<-----------+ + * |.dev_start | + * V | + * +---------------+ | + * | STARTING |------------^ + * +---------------+ failed | + * |success | + * | +---------------+ + * | | STOPPING | + * | +---------------+ + * | ^ + * V |.dev_stop + * +---------------+------------+ + * | STARTED | + * +---------------+ + */ +enum sfc_adapter_state { + SFC_ADAPTER_UNINITIALIZED = 0, + SFC_ADAPTER_INITIALIZED, + SFC_ADAPTER_CONFIGURING, + SFC_ADAPTER_CONFIGURED, + SFC_ADAPTER_CLOSING, + SFC_ADAPTER_STARTING, + SFC_ADAPTER_STARTED, + SFC_ADAPTER_STOPPING, + + SFC_ADAPTER_NSTATES +}; + +enum sfc_dev_filter_mode { + SFC_DEV_FILTER_MODE_PROMISC = 0, + SFC_DEV_FILTER_MODE_ALLMULTI, + + SFC_DEV_FILTER_NMODES +}; + +enum sfc_mcdi_state { + SFC_MCDI_UNINITIALIZED = 0, + SFC_MCDI_INITIALIZED, + SFC_MCDI_BUSY, + SFC_MCDI_COMPLETED, + + SFC_MCDI_NSTATES +}; + +struct sfc_mcdi { + rte_spinlock_t lock; + efsys_mem_t mem; + enum sfc_mcdi_state state; + efx_mcdi_transport_t transport; + uint32_t logtype; + uint32_t proxy_handle; + efx_rc_t proxy_result; +}; + +struct sfc_intr { + efx_intr_type_t type; + rte_intr_callback_fn handler; + boolean_t lsc_intr; + boolean_t rxq_intr; +}; + +struct sfc_rxq; +struct sfc_txq; + +struct sfc_rxq_info; +struct sfc_txq_info; +struct sfc_dp_rx; + +struct sfc_port { + unsigned int lsc_seq; + + uint32_t phy_adv_cap_mask; + uint32_t phy_adv_cap; + + unsigned int flow_ctrl; + boolean_t flow_ctrl_autoneg; + size_t pdu; + + /* + * Flow API isolated mode overrides promisc and allmulti settings; + * they won't be applied if isolated mode is active + */ + boolean_t promisc; + boolean_t allmulti; + + struct rte_ether_addr default_mac_addr; + + unsigned int max_mcast_addrs; + unsigned int nb_mcast_addrs; + uint8_t *mcast_addrs; + + rte_spinlock_t mac_stats_lock; + uint64_t *mac_stats_buf; + unsigned int mac_stats_nb_supported; + efsys_mem_t mac_stats_dma_mem; + boolean_t mac_stats_reset_pending; + uint16_t mac_stats_update_period_ms; + uint32_t mac_stats_update_generation; + boolean_t mac_stats_periodic_dma_supported; + uint64_t mac_stats_last_request_timestamp; + + uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES]; + + uint64_t ipackets; +}; + +struct sfc_rss_hf_rte_to_efx { + uint64_t rte; + efx_rx_hash_type_t efx; +}; + +struct sfc_rss { + unsigned int channels; + efx_rx_scale_context_type_t context_type; + efx_rx_hash_support_t hash_support; + efx_rx_hash_alg_t hash_alg; + unsigned int hf_map_nb_entries; + struct sfc_rss_hf_rte_to_efx *hf_map; + + efx_rx_hash_type_t hash_types; + unsigned int tbl[EFX_RSS_TBL_SIZE]; + uint8_t key[EFX_RSS_KEY_SIZE]; +}; + +/* Adapter private data shared by primary and secondary processes */ +struct sfc_adapter_shared { + unsigned int rxq_count; + struct sfc_rxq_info *rxq_info; + + unsigned int txq_count; + struct sfc_txq_info *txq_info; + + struct sfc_rss rss; + + boolean_t isolated; + uint32_t tunnel_encaps; + + struct rte_pci_addr pci_addr; + uint16_t port_id; + + char *dp_rx_name; + char *dp_tx_name; +}; + +/* Adapter process private data */ +struct sfc_adapter_priv { + struct sfc_adapter_shared *shared; + const struct sfc_dp_rx *dp_rx; + const struct sfc_dp_tx *dp_tx; + uint32_t logtype_main; +}; + +static inline struct sfc_adapter_priv * +sfc_adapter_priv_by_eth_dev(struct rte_eth_dev *eth_dev) +{ + struct sfc_adapter_priv *sap = eth_dev->process_private; + + SFC_ASSERT(sap != NULL); + return sap; +} + +/* Adapter private data */ +struct sfc_adapter { + /* + * It must be the first field of the sfc_adapter structure since + * sfc_adapter is the primary process private data (i.e. process + * private data plus additional primary process specific data). + */ + struct sfc_adapter_priv priv; + + /* + * PMD setup and configuration is not thread safe. Since it is not + * performance sensitive, it is better to guarantee thread-safety + * and add device level lock. Adapter control operations which + * change its state should acquire the lock. + */ + rte_spinlock_t lock; + enum sfc_adapter_state state; + struct rte_eth_dev *eth_dev; + struct rte_kvargs *kvargs; + int socket_id; + efsys_bar_t mem_bar; + efx_family_t family; + efx_nic_t *nic; + rte_spinlock_t nic_lock; + rte_atomic32_t restart_required; + + struct sfc_mcdi mcdi; + struct sfc_intr intr; + struct sfc_port port; + struct sfc_filter filter; + + struct sfc_flow_list flow_list; + + unsigned int rxq_max; + unsigned int txq_max; + + unsigned int rxq_max_entries; + unsigned int rxq_min_entries; + + unsigned int txq_max_entries; + unsigned int txq_min_entries; + + unsigned int evq_max_entries; + unsigned int evq_min_entries; + + uint32_t evq_flags; + unsigned int evq_count; + + unsigned int mgmt_evq_index; + /* + * The lock is used to serialise management event queue polling + * which can be done from different context. Also the lock + * guarantees that mgmt_evq_running is preserved while the lock + * is held. It is used to serialise polling and start/stop + * operations. + * + * Locks which may be held when the lock is acquired: + * - adapter lock, when: + * - device start/stop to change mgmt_evq_running + * - any control operations in client side MCDI proxy handling to + * poll management event queue waiting for proxy response + * - MCDI lock, when: + * - any control operations in client side MCDI proxy handling to + * poll management event queue waiting for proxy response + * + * Locks which are acquired with the lock held: + * - nic_lock, when: + * - MC event processing on management event queue polling + * (e.g. MC REBOOT or BADASSERT events) + */ + rte_spinlock_t mgmt_evq_lock; + bool mgmt_evq_running; + struct sfc_evq *mgmt_evq; + + struct sfc_rxq *rxq_ctrl; + struct sfc_txq *txq_ctrl; + + boolean_t tso; + boolean_t tso_encap; + + uint32_t rxd_wait_timeout_ns; +}; + +static inline struct sfc_adapter_shared * +sfc_adapter_shared_by_eth_dev(struct rte_eth_dev *eth_dev) +{ + struct sfc_adapter_shared *sas = eth_dev->data->dev_private; + + return sas; +} + +static inline struct sfc_adapter * +sfc_adapter_by_eth_dev(struct rte_eth_dev *eth_dev) +{ + struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(eth_dev); + + SFC_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + + return container_of(sap, struct sfc_adapter, priv); +} + +static inline struct sfc_adapter_shared * +sfc_sa2shared(struct sfc_adapter *sa) +{ + return sa->priv.shared; +} + +/* + * Add wrapper functions to acquire/release lock to be able to remove or + * change the lock in one place. + */ + +static inline void +sfc_adapter_lock_init(struct sfc_adapter *sa) +{ + rte_spinlock_init(&sa->lock); +} + +static inline int +sfc_adapter_is_locked(struct sfc_adapter *sa) +{ + return rte_spinlock_is_locked(&sa->lock); +} + +static inline void +sfc_adapter_lock(struct sfc_adapter *sa) +{ + rte_spinlock_lock(&sa->lock); +} + +static inline int +sfc_adapter_trylock(struct sfc_adapter *sa) +{ + return rte_spinlock_trylock(&sa->lock); +} + +static inline void +sfc_adapter_unlock(struct sfc_adapter *sa) +{ + rte_spinlock_unlock(&sa->lock); +} + +static inline void +sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa) +{ + /* Just for symmetry of the API */ +} + +/** Get the number of milliseconds since boot from the default timer */ +static inline uint64_t +sfc_get_system_msecs(void) +{ + return rte_get_timer_cycles() * MS_PER_S / rte_get_timer_hz(); +} + +int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, + size_t len, int socket_id, efsys_mem_t *esmp); +void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp); + +uint32_t sfc_register_logtype(const struct rte_pci_addr *pci_addr, + const char *lt_prefix_str, + uint32_t ll_default); + +int sfc_probe(struct sfc_adapter *sa); +void sfc_unprobe(struct sfc_adapter *sa); +int sfc_attach(struct sfc_adapter *sa); +void sfc_detach(struct sfc_adapter *sa); +int sfc_start(struct sfc_adapter *sa); +void sfc_stop(struct sfc_adapter *sa); + +void sfc_schedule_restart(struct sfc_adapter *sa); + +int sfc_mcdi_init(struct sfc_adapter *sa); +void sfc_mcdi_fini(struct sfc_adapter *sa); + +int sfc_configure(struct sfc_adapter *sa); +void sfc_close(struct sfc_adapter *sa); + +int sfc_intr_attach(struct sfc_adapter *sa); +void sfc_intr_detach(struct sfc_adapter *sa); +int sfc_intr_configure(struct sfc_adapter *sa); +void sfc_intr_close(struct sfc_adapter *sa); +int sfc_intr_start(struct sfc_adapter *sa); +void sfc_intr_stop(struct sfc_adapter *sa); + +int sfc_port_attach(struct sfc_adapter *sa); +void sfc_port_detach(struct sfc_adapter *sa); +int sfc_port_configure(struct sfc_adapter *sa); +void sfc_port_close(struct sfc_adapter *sa); +int sfc_port_start(struct sfc_adapter *sa); +void sfc_port_stop(struct sfc_adapter *sa); +void sfc_port_link_mode_to_info(efx_link_mode_t link_mode, + struct rte_eth_link *link_info); +int sfc_port_update_mac_stats(struct sfc_adapter *sa); +int sfc_port_reset_mac_stats(struct sfc_adapter *sa); +int sfc_set_rx_mode(struct sfc_adapter *sa); +int sfc_set_rx_mode_unchecked(struct sfc_adapter *sa); + + +#ifdef __cplusplus +} +#endif + +#endif /* _SFC_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h b/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h new file mode 100644 index 000000000..8d6998924 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_debug.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_DEBUG_H_ +#define _SFC_DEBUG_H_ + +#include + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG +/* Avoid dependency from RTE_LOG_DP_LEVEL to be able to enable debug check + * in the driver only. + */ +#define SFC_ASSERT(exp) RTE_VERIFY(exp) +#else +/* If the driver debug is not enabled, follow DPDK debug/non-debug */ +#define SFC_ASSERT(exp) RTE_ASSERT(exp) +#endif + +/* Log PMD message, automatically add prefix and \n */ +#define sfc_panic(sa, fmt, args...) \ + do { \ + const struct sfc_adapter_shared *_sas; \ + \ + _sas = (sa)->priv.shared; \ + rte_panic("sfc " PCI_PRI_FMT \ + " #%" PRIu16 ": " fmt "\n", \ + _sas->pci_addr.domain, _sas->pci_addr.bus, \ + _sas->pci_addr.devid, _sas->pci_addr.function,\ + _sas->port_id, ##args); \ + } while (0) + +#endif /* _SFC_DEBUG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c new file mode 100644 index 000000000..027dcaba2 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.c @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include +#include + +#include + +#include "sfc_dp.h" +#include "sfc_log.h" + +void +sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr) +{ + dpq->port_id = port_id; + dpq->queue_id = queue_id; + dpq->pci_addr = *pci_addr; +} + +struct sfc_dp * +sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type, + const char *name) +{ + struct sfc_dp *entry; + + TAILQ_FOREACH(entry, head, links) { + if (entry->type != type) + continue; + + if (strcmp(entry->name, name) == 0) + return entry; + } + + return NULL; +} + +struct sfc_dp * +sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type, + unsigned int avail_caps) +{ + struct sfc_dp *entry; + + TAILQ_FOREACH(entry, head, links) { + if (entry->type != type) + continue; + + /* Take the first matching */ + if (sfc_dp_match_hw_fw_caps(entry, avail_caps)) + return entry; + } + + return NULL; +} + +int +sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry) +{ + if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) { + SFC_GENERIC_LOG(ERR, + "sfc %s dapapath '%s' already registered", + entry->type == SFC_DP_RX ? "Rx" : + entry->type == SFC_DP_TX ? "Tx" : + "unknown", + entry->name); + return EEXIST; + } + + TAILQ_INSERT_TAIL(head, entry, links); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h new file mode 100644 index 000000000..a161b0b07 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_DP_H +#define _SFC_DP_H + +#include +#include + +#include + +#include "sfc_log.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define SFC_DIV_ROUND_UP(a, b) \ + __extension__ ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + \ + (_a + (_b - 1)) / _b; \ + }) + +/** + * Datapath exception handler to be provided by the control path. + */ +typedef void (sfc_dp_exception_t)(void *ctrl); + +enum sfc_dp_type { + SFC_DP_RX = 0, /**< Receive datapath */ + SFC_DP_TX, /**< Transmit datapath */ +}; + + +/** Datapath queue run-time information */ +struct sfc_dp_queue { + uint16_t port_id; + uint16_t queue_id; + struct rte_pci_addr pci_addr; +}; + +void sfc_dp_queue_init(struct sfc_dp_queue *dpq, + uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr); + +/* + * Helper macro to define datapath logging macros and have uniform + * logging. + */ +#define SFC_DP_LOG(dp_name, level, dpq, ...) \ + do { \ + const struct sfc_dp_queue *_dpq = (dpq); \ + const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \ + \ + SFC_GENERIC_LOG(level, \ + RTE_FMT("%s " PCI_PRI_FMT \ + " #%" PRIu16 ".%" PRIu16 ": " \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ + dp_name, \ + _addr->domain, _addr->bus, \ + _addr->devid, _addr->function, \ + _dpq->port_id, _dpq->queue_id, \ + RTE_FMT_TAIL(__VA_ARGS__,))); \ + } while (0) + + +/** Datapath definition */ +struct sfc_dp { + TAILQ_ENTRY(sfc_dp) links; + const char *name; + enum sfc_dp_type type; + /* Mask of required hardware/firmware capabilities */ + unsigned int hw_fw_caps; +#define SFC_DP_HW_FW_CAP_EF10 0x1 +#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2 +}; + +/** List of datapath variants */ +TAILQ_HEAD(sfc_dp_list, sfc_dp); + +/* Check if available HW/FW capabilities are sufficient for the datapath */ +static inline bool +sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps) +{ + return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps; +} + +struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head, + enum sfc_dp_type type, const char *name); +struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head, + enum sfc_dp_type type, + unsigned int avail_caps); +int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_DP_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h new file mode 100644 index 000000000..2101fd754 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_rx.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_DP_RX_H +#define _SFC_DP_RX_H + +#include +#include + +#include "sfc_dp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Generic receive queue information used on data path. + * It must be kept as small as it is possible since it is built into + * the structure used on datapath. + */ +struct sfc_dp_rxq { + struct sfc_dp_queue dpq; +}; + +/** Datapath receive queue descriptor number limitations */ +struct sfc_dp_rx_hw_limits { + unsigned int rxq_max_entries; + unsigned int rxq_min_entries; + unsigned int evq_max_entries; + unsigned int evq_min_entries; +}; + +/** + * Datapath receive queue creation information. + * + * The structure is used just to pass information from control path to + * datapath. It could be just function arguments, but it would be hardly + * readable. + */ +struct sfc_dp_rx_qcreate_info { + /** Memory pool to allocate Rx buffer from */ + struct rte_mempool *refill_mb_pool; + /** Maximum number of pushed Rx descriptors in the queue */ + unsigned int max_fill_level; + /** Minimum number of unused Rx descriptors to do refill */ + unsigned int refill_threshold; + /** + * Usable mbuf data space in accordance with alignment and + * padding requirements imposed by HW. + */ + unsigned int buf_size; + + /** + * Maximum number of Rx descriptors completed in one Rx event. + * Just for sanity checks if datapath would like to do. + */ + unsigned int batch_max; + + /** Pseudo-header size */ + unsigned int prefix_size; + + /** Receive queue flags initializer */ + unsigned int flags; +#define SFC_RXQ_FLAG_RSS_HASH 0x1 + + /** Rx queue size */ + unsigned int rxq_entries; + /** DMA-mapped Rx descriptors ring */ + void *rxq_hw_ring; + + /** Event queue index in hardware */ + unsigned int evq_hw_index; + /** Associated event queue size */ + unsigned int evq_entries; + /** Hardware event ring */ + void *evq_hw_ring; + + /** The queue index in hardware (required to push right doorbell) */ + unsigned int hw_index; + /** + * Virtual address of the memory-mapped BAR to push Rx refill + * doorbell + */ + volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; +}; + +/** + * Get Rx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** + * Test if an Rx datapath supports specific mempool ops. + * + * @param pool The name of the pool operations to test. + * + * @return Check status. + * @retval 0 Best mempool ops choice. + * @retval 1 Mempool ops are supported. + * @retval -ENOTSUP Mempool ops not supported. + */ +typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool); + +/** + * Get size of receive and event queue rings by the number of Rx + * descriptors and mempool configuration. + * + * @param nb_rx_desc Number of Rx descriptors + * @param mb_pool mbuf pool with Rx buffers + * @param rxq_entries Location for number of Rx ring entries + * @param evq_entries Location for number of event ring entries + * @param rxq_max_fill_level Location for maximum Rx ring fill level + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, + struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level); + +/** + * Allocate and initialize datapath receive queue. + * + * @param port_id The port identifier + * @param queue_id The queue identifier + * @param pci_addr PCI function address + * @param socket_id Socket identifier to allocate memory + * @param info Receive queue information + * @param dp_rxqp Location for generic datapath receive queue pointer + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, + int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp); + +/** + * Free resources allocated for datapath recevie queue. + */ +typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq); + +/** + * Receive queue start callback. + * + * It handovers EvQ to the datapath. + */ +typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int evq_read_ptr); + +/** + * Receive queue stop function called before flush. + */ +typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int *evq_read_ptr); + +/** + * Receive event handler used during queue flush only. + */ +typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); + +/** + * Packed stream receive event handler used during queue flush only. + */ +typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int id); + +/** + * Receive queue purge function called after queue flush. + * + * Should be used to free unused recevie buffers. + */ +typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq); + +/** Get packet types recognized/classified */ +typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)( + uint32_t tunnel_encaps); + +/** Get number of pending Rx descriptors */ +typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq); + +/** Check Rx descriptor status */ +typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq, + uint16_t offset); +/** Enable Rx interrupts */ +typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq); + +/** Disable Rx interrupts */ +typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq); + +/** Receive datapath definition */ +struct sfc_dp_rx { + struct sfc_dp dp; + + unsigned int features; +#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1 +#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2 +#define SFC_DP_RX_FEAT_FLOW_MARK 0x4 +#define SFC_DP_RX_FEAT_INTR 0x8 + /** + * Rx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Rx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; + sfc_dp_rx_get_dev_info_t *get_dev_info; + sfc_dp_rx_pool_ops_supported_t *pool_ops_supported; + sfc_dp_rx_qsize_up_rings_t *qsize_up_rings; + sfc_dp_rx_qcreate_t *qcreate; + sfc_dp_rx_qdestroy_t *qdestroy; + sfc_dp_rx_qstart_t *qstart; + sfc_dp_rx_qstop_t *qstop; + sfc_dp_rx_qrx_ev_t *qrx_ev; + sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev; + sfc_dp_rx_qpurge_t *qpurge; + sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get; + sfc_dp_rx_qdesc_npending_t *qdesc_npending; + sfc_dp_rx_qdesc_status_t *qdesc_status; + sfc_dp_rx_intr_enable_t *intr_enable; + sfc_dp_rx_intr_disable_t *intr_disable; + eth_rx_burst_t pkt_burst; +}; + +static inline struct sfc_dp_rx * +sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name) +{ + struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name); + + return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp); +} + +static inline struct sfc_dp_rx * +sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) +{ + struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps); + + return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp); +} + +static inline uint64_t +sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx) +{ + return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa; +} + +/** Get Rx datapath ops by the datapath RxQ handle */ +const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); + +extern struct sfc_dp_rx sfc_efx_rx; +extern struct sfc_dp_rx sfc_ef10_rx; +extern struct sfc_dp_rx sfc_ef10_essb_rx; + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_DP_RX_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h new file mode 100644 index 000000000..dcad4fe58 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_dp_tx.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_DP_TX_H +#define _SFC_DP_TX_H + +#include + +#include "sfc_dp.h" +#include "sfc_debug.h" +#include "sfc_tso.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Generic transmit queue information used on data path. + * It must be kept as small as it is possible since it is built into + * the structure used on datapath. + */ +struct sfc_dp_txq { + struct sfc_dp_queue dpq; +}; + +/** Datapath transmit queue descriptor number limitations */ +struct sfc_dp_tx_hw_limits { + unsigned int txq_max_entries; + unsigned int txq_min_entries; +}; + +/** + * Datapath transmit queue creation information. + * + * The structure is used just to pass information from control path to + * datapath. It could be just function arguments, but it would be hardly + * readable. + */ +struct sfc_dp_tx_qcreate_info { + /** Maximum number of pushed Tx descriptors */ + unsigned int max_fill_level; + /** Minimum number of unused Tx descriptors to do reap */ + unsigned int free_thresh; + /** Offloads enabled on the transmit queue */ + uint64_t offloads; + /** Tx queue size */ + unsigned int txq_entries; + /** Maximum size of data in the DMA descriptor */ + uint16_t dma_desc_size_max; + /** DMA-mapped Tx descriptors ring */ + void *txq_hw_ring; + /** Associated event queue size */ + unsigned int evq_entries; + /** Hardware event ring */ + void *evq_hw_ring; + /** The queue index in hardware (required to push right doorbell) */ + unsigned int hw_index; + /** Virtual address of the memory-mapped BAR to push Tx doorbell */ + volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; + /** + * Maximum number of bytes into the packet the TCP header can start for + * the hardware to apply TSO packet edits. + */ + uint16_t tso_tcp_header_offset_limit; +}; + +/** + * Get Tx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** + * Get size of transmit and event queue rings by the number of Tx + * descriptors. + * + * @param nb_tx_desc Number of Tx descriptors + * @param txq_entries Location for number of Tx ring entries + * @param evq_entries Location for number of event ring entries + * @param txq_max_fill_level Location for maximum Tx ring fill level + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc, + struct sfc_dp_tx_hw_limits *limits, + unsigned int *txq_entries, + unsigned int *evq_entries, + unsigned int *txq_max_fill_level); + +/** + * Allocate and initialize datapath transmit queue. + * + * @param port_id The port identifier + * @param queue_id The queue identifier + * @param pci_addr PCI function address + * @param socket_id Socket identifier to allocate memory + * @param info Tx queue details wrapped in structure + * @param dp_txqp Location for generic datapath transmit queue pointer + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, + int socket_id, + const struct sfc_dp_tx_qcreate_info *info, + struct sfc_dp_txq **dp_txqp); + +/** + * Free resources allocated for datapath transmit queue. + */ +typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq); + +/** + * Transmit queue start callback. + * + * It handovers EvQ to the datapath. + */ +typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq, + unsigned int evq_read_ptr, + unsigned int txq_desc_index); + +/** + * Transmit queue stop function called before the queue flush. + * + * It returns EvQ to the control path. + */ +typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq, + unsigned int *evq_read_ptr); + +/** + * Transmit event handler used during queue flush only. + */ +typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id); + +/** + * Transmit queue function called after the queue flush. + */ +typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq); + +/** + * Check Tx descriptor status + */ +typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq, + uint16_t offset); + +/** Transmit datapath definition */ +struct sfc_dp_tx { + struct sfc_dp dp; + + unsigned int features; +#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1 + /** + * Tx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Tx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; + sfc_dp_tx_get_dev_info_t *get_dev_info; + sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; + sfc_dp_tx_qcreate_t *qcreate; + sfc_dp_tx_qdestroy_t *qdestroy; + sfc_dp_tx_qstart_t *qstart; + sfc_dp_tx_qstop_t *qstop; + sfc_dp_tx_qtx_ev_t *qtx_ev; + sfc_dp_tx_qreap_t *qreap; + sfc_dp_tx_qdesc_status_t *qdesc_status; + eth_tx_prep_t pkt_prepare; + eth_tx_burst_t pkt_burst; +}; + +static inline struct sfc_dp_tx * +sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name) +{ + struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name); + + return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp); +} + +static inline struct sfc_dp_tx * +sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) +{ + struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps); + + return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp); +} + +/** Get Tx datapath ops by the datapath TxQ handle */ +const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq); + +static inline uint64_t +sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx) +{ + return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa; +} + +static inline int +sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, + uint32_t tso_tcp_header_offset_limit, + unsigned int max_fill_level, + unsigned int nb_tso_descs, + unsigned int nb_vlan_descs) +{ + unsigned int descs_required = m->nb_segs; + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + int ret; + + ret = rte_validate_tx_offload(m); + if (ret != 0) { + /* + * Negative error code is returned by rte_validate_tx_offload(), + * but positive are used inside net/sfc PMD. + */ + SFC_ASSERT(ret < 0); + return -ret; + } +#endif + + if (m->ol_flags & PKT_TX_TCP_SEG) { + unsigned int tcph_off = m->l2_len + m->l3_len; + unsigned int header_len; + + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + break; + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + if (!(m->ol_flags & + (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return EINVAL; + + tcph_off += m->outer_l2_len + m->outer_l3_len; + } + + header_len = tcph_off + m->l4_len; + + if (unlikely(tcph_off > tso_tcp_header_offset_limit)) + return EINVAL; + + descs_required += nb_tso_descs; + + /* + * Extra descriptor that is required when a packet header + * is separated from remaining content of the first segment. + */ + if (rte_pktmbuf_data_len(m) > header_len) { + descs_required++; + } else if (rte_pktmbuf_data_len(m) < header_len && + unlikely(header_len > SFC_TSOH_STD_LEN)) { + /* + * Header linearization is required and + * the header is too big to be linearized + */ + return EINVAL; + } + } + + /* + * The number of VLAN descriptors is added regardless of requested + * VLAN offload since VLAN is sticky and sending packet without VLAN + * insertion may require VLAN descriptor to reset the sticky to 0. + */ + descs_required += nb_vlan_descs; + + /* + * Max fill level must be sufficient to hold all required descriptors + * to send the packet entirely. + */ + if (descs_required > max_fill_level) + return ENOBUFS; + + return 0; +} + +extern struct sfc_dp_tx sfc_efx_tx; +extern struct sfc_dp_tx sfc_ef10_tx; +extern struct sfc_dp_tx sfc_ef10_simple_tx; + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_DP_TX_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h new file mode 100644 index 000000000..f138e8d9b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_EF10_H +#define _SFC_EF10_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Number of events in one cache line */ +#define SFC_EF10_EV_PER_CACHE_LINE \ + (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t)) + +#define SFC_EF10_EV_QCLEAR_MASK (~(SFC_EF10_EV_PER_CACHE_LINE - 1)) + +#if defined(SFC_EF10_EV_QCLEAR_USE_EFX) +static inline void +sfc_ef10_ev_qclear_cache_line(void *ptr) +{ + efx_qword_t *entry = ptr; + unsigned int i; + + for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i) + EFX_SET_QWORD(entry[i]); +} +#else +/* + * It is possible to do it using AVX2 and AVX512F, but it shows less + * performance. + */ +static inline void +sfc_ef10_ev_qclear_cache_line(void *ptr) +{ + const __m128i val = _mm_set1_epi64x(UINT64_MAX); + __m128i *addr = ptr; + unsigned int i; + + RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE); + RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0); + + for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i) + _mm_store_si128(&addr[i], val); +} +#endif + +static inline void +sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask, + unsigned int old_read_ptr, unsigned int read_ptr) +{ + const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK; + unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK; + + while (old_clear_ptr != clear_ptr) { + sfc_ef10_ev_qclear_cache_line( + &hw_ring[old_clear_ptr & ptr_mask]); + old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE; + } + + /* + * No barriers here. + * Functions which push doorbell should care about correct + * ordering: store instructions which fill in EvQ ring should be + * retired from CPU and DMA sync before doorbell which will allow + * to use these event entries. + */ +} + +static inline bool +sfc_ef10_ev_present(const efx_qword_t ev) +{ + return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) | + ~EFX_QWORD_FIELD(ev, EFX_DWORD_1); +} + + +/** + * Alignment requirement for value written to RX WPTR: + * the WPTR must be aligned to an 8 descriptor boundary. + */ +#define SFC_EF10_RX_WPTR_ALIGN 8u + +static inline void +sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added, + unsigned int ptr_mask) +{ + efx_dword_t dword; + + /* Hardware has alignment restriction for WPTR */ + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added); + + EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask); + + /* DMA sync to device is not required */ + + /* + * rte_write32() has rte_io_wmb() which guarantees that the STORE + * operations (i.e. Rx and event descriptor updates) that precede + * the rte_io_wmb() call are visible to NIC before the STORE + * operations that follow it (i.e. doorbell write). + */ + rte_write32(dword.ed_u32[0], doorbell); +} + +static inline void +sfc_ef10_ev_qprime(volatile void *qprime, unsigned int read_ptr, + unsigned int ptr_mask) +{ + efx_dword_t dword; + + EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, read_ptr & ptr_mask); + + rte_write32_relaxed(dword.ed_u32[0], qprime); + rte_wmb(); +} + + +const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps); + + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_EF10_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c new file mode 100644 index 000000000..3bc136e04 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -0,0 +1,734 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* EF10 equal stride packed stream receive native datapath implementation */ + +#include + +#include +#include +#include +#include + +#include "efx.h" +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#include "sfc_tweak.h" +#include "sfc_dp_rx.h" +#include "sfc_kvargs.h" +#include "sfc_ef10.h" + +/* Tunnels are not supported */ +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0 +#include "sfc_ef10_rx_ev.h" + +#define sfc_ef10_essb_rx_err(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__) + +#define sfc_ef10_essb_rx_info(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__) + +/* + * Fake length for RXQ descriptors in equal stride super-buffer mode + * to make hardware happy. + */ +#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32 + +/** + * Minimum number of Rx buffers the datapath allows to use. + * + * Each HW Rx descriptor has many Rx buffers. The number of buffers + * in one HW Rx descriptor is equal to size of contiguous block + * provided by Rx buffers memory pool. The contiguous block size + * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf + * data size specified on the memory pool creation. Typical rte_mbuf + * data size is about 2k which makes a bit less than 32 buffers in + * contiguous block with default bucket size equal to 64k. + * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN), + * it makes about 256 as required minimum. Double it in advertised + * minimum to allow for at least 2 refill blocks. + */ +#define SFC_EF10_ESSB_RX_DESCS_MIN 512 + +/** + * Number of Rx buffers should be aligned to. + * + * There are no extra requirements on alignment since actual number of + * pushed Rx buffers will be multiple by contiguous block size which + * is unknown beforehand. + */ +#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1 + +/** + * Maximum number of descriptors/buffers in the Rx ring. + * It should guarantee that corresponding event queue never overfill. + */ +#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \ + ((_nevs) - 1 /* head must not step on tail */ - \ + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ + 1 /* Rx error */ - 1 /* flush */) + +struct sfc_ef10_essb_rx_sw_desc { + struct rte_mbuf *first_mbuf; +}; + +struct sfc_ef10_essb_rxq { + /* Used on data path */ + unsigned int flags; +#define SFC_EF10_ESSB_RXQ_STARTED 0x1 +#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2 +#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4 + unsigned int rxq_ptr_mask; + unsigned int block_size; + unsigned int buf_stride; + unsigned int bufs_ptr; + unsigned int completed; + unsigned int pending_id; + unsigned int bufs_pending; + unsigned int left_in_completed; + unsigned int left_in_pending; + unsigned int evq_read_ptr; + unsigned int evq_ptr_mask; + efx_qword_t *evq_hw_ring; + struct sfc_ef10_essb_rx_sw_desc *sw_ring; + uint16_t port_id; + + /* Used on refill */ + unsigned int added; + unsigned int max_fill_level; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; + efx_qword_t *rxq_hw_ring; + volatile void *doorbell; + + /* Datapath receive queue anchor */ + struct sfc_dp_rxq dp; +}; + +static inline struct sfc_ef10_essb_rxq * +sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) +{ + return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp); +} + +static struct rte_mbuf * +sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf) +{ + struct rte_mbuf *m; + + m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); + MBUF_RAW_ALLOC_CHECK(m); + return m; +} + +static struct rte_mbuf * +sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf, unsigned int idx) +{ + struct rte_mbuf *m; + + m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); + MBUF_RAW_ALLOC_CHECK(m); + return m; +} + +static struct rte_mbuf * +sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq) +{ + const struct sfc_ef10_essb_rx_sw_desc *rxd; + + if (rxq->left_in_completed != 0) { + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + } else { + rxq->completed++; + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + rxq->left_in_completed = rxq->block_size; + return rxd->first_mbuf; + } +} + +static void +sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq) +{ + const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask; + unsigned int free_space; + unsigned int bulks; + void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN]; + unsigned int added = rxq->added; + + free_space = rxq->max_fill_level - (added - rxq->completed); + + if (free_space < rxq->refill_threshold) + return; + + bulks = free_space / RTE_DIM(mbuf_blocks); + /* refill_threshold guarantees that bulks is positive */ + SFC_ASSERT(bulks > 0); + + do { + unsigned int id; + unsigned int i; + + if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool, + mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) { + struct rte_eth_dev_data *dev_data = + rte_eth_devices[rxq->port_id].data; + + /* + * It is hardly a safe way to increment counter + * from different contexts, but all PMDs do it. + */ + dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks); + /* Return if we have posted nothing yet */ + if (added == rxq->added) + return; + /* Push posted */ + break; + } + + for (i = 0, id = added & rxq_ptr_mask; + i < RTE_DIM(mbuf_blocks); + ++i, ++id) { + struct rte_mbuf *m = mbuf_blocks[i]; + struct sfc_ef10_essb_rx_sw_desc *rxd; + + SFC_ASSERT((id & ~rxq_ptr_mask) == 0); + rxd = &rxq->sw_ring[id]; + rxd->first_mbuf = m; + + /* RX_KER_BYTE_CNT is ignored by firmware */ + EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id], + ESF_DZ_RX_KER_BYTE_CNT, + SFC_EF10_ESSB_RX_FAKE_BUF_SIZE, + ESF_DZ_RX_KER_BUF_ADDR, + rte_mbuf_data_iova_default(m)); + } + + added += RTE_DIM(mbuf_blocks); + + } while (--bulks > 0); + + SFC_ASSERT(rxq->added != added); + rxq->added = added; + sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask); +} + +static bool +sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev) +{ + *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask]; + + if (!sfc_ef10_ev_present(*rx_ev)) + return false; + + if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) != + FSE_AZ_EV_CODE_RX_EV)) { + /* + * Do not move read_ptr to keep the event for exception + * handling + */ + rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION; + sfc_ef10_essb_rx_err(&rxq->dp.dpq, + "RxQ exception at EvQ read ptr %#x", + rxq->evq_read_ptr); + return false; + } + + rxq->evq_read_ptr++; + return true; +} + +static void +sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev) +{ + unsigned int ready; + + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + rxq->bufs_ptr) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + + rxq->bufs_ptr += ready; + rxq->bufs_pending += ready; + + SFC_ASSERT(ready > 0); + do { + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + unsigned int todo_bufs; + struct rte_mbuf *m0; + + rxd = &rxq->sw_ring[rxq->pending_id]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_pending); + + if (ready < rxq->left_in_pending) { + todo_bufs = ready; + ready = 0; + rxq->left_in_pending -= todo_bufs; + } else { + todo_bufs = rxq->left_in_pending; + ready -= todo_bufs; + rxq->left_in_pending = rxq->block_size; + if (rxq->pending_id != rxq->rxq_ptr_mask) + rxq->pending_id++; + else + rxq->pending_id = 0; + } + + SFC_ASSERT(todo_bufs > 0); + --todo_bufs; + + sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull); + + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + + m0 = m; + while (todo_bufs-- > 0) { + m = sfc_ef10_essb_next_mbuf(rxq, m); + m->ol_flags = m0->ol_flags; + m->packet_type = m0->packet_type; + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + + RTE_PKTMBUF_HEADROOM); + } + } while (ready > 0); +} + +static unsigned int +sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + unsigned int n_rx_pkts = 0; + unsigned int todo_bufs; + struct rte_mbuf *m; + + while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts, + rxq->bufs_pending)) > 0) { + m = sfc_ef10_essb_maybe_next_completed(rxq); + + todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed); + + rxq->bufs_pending -= todo_bufs; + rxq->left_in_completed -= todo_bufs; + + SFC_ASSERT(todo_bufs > 0); + todo_bufs--; + + do { + const efx_qword_t *qwordp; + uint16_t pkt_len; + + /* Buffers to be discarded have 0 in packet type */ + if (unlikely(m->packet_type == 0)) { + rte_mbuf_raw_free(m); + goto next_buf; + } + + rx_pkts[n_rx_pkts++] = m; + + /* Parse pseudo-header */ + qwordp = (const efx_qword_t *) + ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + pkt_len = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_DATA_LEN); + + m->data_off = RTE_PKTMBUF_HEADROOM + + ES_EZ_ESSB_RX_PREFIX_LEN; + m->port = rxq->port_id; + + rte_pktmbuf_pkt_len(m) = pkt_len; + rte_pktmbuf_data_len(m) = pkt_len; + + m->ol_flags |= + (PKT_RX_RSS_HASH * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) | + (PKT_RX_FDIR_ID * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) | + (PKT_RX_FDIR * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN)); + + /* EFX_QWORD_FIELD converts little-endian to CPU */ + m->hash.rss = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH); + m->hash.fdir.hi = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK); + +next_buf: + m = sfc_ef10_essb_next_mbuf(rxq, m); + } while (todo_bufs-- > 0); + } + + return n_rx_pkts; +} + + +static uint16_t +sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + uint16_t n_rx_pkts; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return 0; + + n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts); + + while (n_rx_pkts != nb_pkts && + sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq, + rx_pkts + n_rx_pkts, + nb_pkts - n_rx_pkts); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + /* It is not a problem if we refill in the case of exception */ + sfc_ef10_essb_rx_qrefill(rxq); + + return n_rx_pkts; +} + +static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; +static unsigned int +sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return rxq->bufs_pending; + + while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + return rxq->bufs_pending; +} + +static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; +static int +sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq); + + if (offset < pending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed) * rxq->block_size + + rxq->left_in_completed - rxq->block_size) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} + +static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; +static void +sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN; + dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN; +} + +static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported; +static int +sfc_ef10_essb_rx_pool_ops_supported(const char *pool) +{ + SFC_ASSERT(pool != NULL); + + if (strcmp(pool, "bucket") == 0) + return 0; + + return -ENOTSUP; +} + +static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; +static int +sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, + struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level) +{ + int rc; + struct rte_mempool_info mp_info; + unsigned int nb_hw_rx_desc; + unsigned int max_events; + + rc = rte_mempool_ops_get_info(mb_pool, &mp_info); + if (rc != 0) + return -rc; + if (mp_info.contig_block_size == 0) + return EINVAL; + + /* + * Calculate required number of hardware Rx descriptors each + * carrying contig block size Rx buffers. + * It cannot be less than Rx write pointer alignment plus 1 + * in order to avoid cases when the ring is guaranteed to be + * empty. + */ + nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc, + mp_info.contig_block_size), + SFC_EF10_RX_WPTR_ALIGN + 1); + if (nb_hw_rx_desc <= limits->rxq_min_entries) { + *rxq_entries = limits->rxq_min_entries; + } else { + *rxq_entries = rte_align32pow2(nb_hw_rx_desc); + if (*rxq_entries > limits->rxq_max_entries) + return EINVAL; + } + + max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) * + mp_info.contig_block_size + + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ + + 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */; + + *evq_entries = rte_align32pow2(max_events); + *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries); + *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries); + + /* + * May be even maximum event queue size is insufficient to handle + * so many Rx descriptors. If so, we should limit Rx queue fill level. + */ + *rxq_max_fill_level = RTE_MIN(nb_rx_desc, + SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries)); + return 0; +} + +static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate; +static int +sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp) +{ + struct rte_mempool * const mp = info->refill_mb_pool; + struct rte_mempool_info mp_info; + struct sfc_ef10_essb_rxq *rxq; + int rc; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_get_contig_block_size; + } + + /* Check if the mempool provides block dequeue */ + rc = EINVAL; + if (mp_info.contig_block_size == 0) + goto fail_no_block_dequeue; + + rc = ENOMEM; + rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + goto fail_rxq_alloc; + + sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring", + info->rxq_entries, + sizeof(*rxq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) + goto fail_desc_alloc; + + rxq->block_size = mp_info.contig_block_size; + rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size; + rxq->rxq_ptr_mask = info->rxq_entries - 1; + rxq->evq_ptr_mask = info->evq_entries - 1; + rxq->evq_hw_ring = info->evq_hw_ring; + rxq->port_id = port_id; + + rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size; + rxq->refill_threshold = + RTE_MAX(info->refill_threshold / mp_info.contig_block_size, + SFC_EF10_RX_WPTR_ALIGN); + rxq->refill_mb_pool = mp; + rxq->rxq_hw_ring = info->rxq_hw_ring; + + rxq->doorbell = (volatile uint8_t *)info->mem_bar + + ER_DZ_RX_DESC_UPD_REG_OFST + + (info->hw_index << info->vi_window_shift); + + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "block size is %u, buf stride is %u", + rxq->block_size, rxq->buf_stride); + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "max fill level is %u descs (%u bufs), " + "refill threashold %u descs (%u bufs)", + rxq->max_fill_level, + rxq->max_fill_level * rxq->block_size, + rxq->refill_threshold, + rxq->refill_threshold * rxq->block_size); + + *dp_rxqp = &rxq->dp; + return 0; + +fail_desc_alloc: + rte_free(rxq); + +fail_rxq_alloc: +fail_no_block_dequeue: +fail_get_contig_block_size: + return rc; +} + +static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy; +static void +sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart; +static int +sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->evq_read_ptr = evq_read_ptr; + + /* Initialize before refill */ + rxq->completed = rxq->pending_id = rxq->added = 0; + rxq->left_in_completed = rxq->left_in_pending = rxq->block_size; + rxq->bufs_ptr = UINT_MAX; + rxq->bufs_pending = 0; + + sfc_ef10_essb_rx_qrefill(rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED; + rxq->flags &= + ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION); + + return 0; +} + +static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop; +static void +sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING; + + *evq_read_ptr = rxq->evq_read_ptr; +} + +static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev; +static bool +sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id) +{ + __rte_unused struct sfc_ef10_essb_rxq *rxq; + + rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING); + + /* + * It is safe to ignore Rx event since we free all mbufs on + * queue purge anyway. + */ + + return false; +} + +static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge; +static void +sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int i; + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + + for (i = rxq->completed; i != rxq->added; ++i) { + rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + while (rxq->left_in_completed > 0) { + rte_mbuf_raw_free(m); + m = sfc_ef10_essb_next_mbuf(rxq, m); + rxq->left_in_completed--; + } + rxq->left_in_completed = rxq->block_size; + } + + rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; +} + +struct sfc_dp_rx sfc_ef10_essb_rx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10_ESSB, + .type = SFC_DP_RX, + .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 | + SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER, + }, + .features = SFC_DP_RX_FEAT_FLOW_FLAG | + SFC_DP_RX_FEAT_FLOW_MARK, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_RSS_HASH, + .queue_offload_capa = 0, + .get_dev_info = sfc_ef10_essb_rx_get_dev_info, + .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, + .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings, + .qcreate = sfc_ef10_essb_rx_qcreate, + .qdestroy = sfc_ef10_essb_rx_qdestroy, + .qstart = sfc_ef10_essb_rx_qstart, + .qstop = sfc_ef10_essb_rx_qstop, + .qrx_ev = sfc_ef10_essb_rx_qrx_ev, + .qpurge = sfc_ef10_essb_rx_qpurge, + .supported_ptypes_get = sfc_ef10_supported_ptypes_get, + .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending, + .qdesc_status = sfc_ef10_essb_rx_qdesc_status, + .pkt_burst = sfc_ef10_essb_recv_pkts, +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c new file mode 100644 index 000000000..42e205e1b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx.c @@ -0,0 +1,816 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* EF10 native datapath implementation */ + +#include + +#include +#include +#include +#include + +#include "efx.h" +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#include "sfc_tweak.h" +#include "sfc_dp_rx.h" +#include "sfc_kvargs.h" +#include "sfc_ef10.h" + +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1 +#include "sfc_ef10_rx_ev.h" + +#define sfc_ef10_rx_err(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__) + +/** + * Maximum number of descriptors/buffers in the Rx ring. + * It should guarantee that corresponding event queue never overfill. + * EF10 native datapath uses event queue of the same size as Rx queue. + * Maximum number of events on datapath can be estimated as number of + * Rx queue entries (one event per Rx buffer in the worst case) plus + * Rx error and flush events. + */ +#define SFC_EF10_RXQ_LIMIT(_ndesc) \ + ((_ndesc) - 1 /* head must not step on tail */ - \ + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ + 1 /* Rx error */ - 1 /* flush */) + +struct sfc_ef10_rx_sw_desc { + struct rte_mbuf *mbuf; +}; + +struct sfc_ef10_rxq { + /* Used on data path */ + unsigned int flags; +#define SFC_EF10_RXQ_STARTED 0x1 +#define SFC_EF10_RXQ_NOT_RUNNING 0x2 +#define SFC_EF10_RXQ_EXCEPTION 0x4 +#define SFC_EF10_RXQ_RSS_HASH 0x8 +#define SFC_EF10_RXQ_FLAG_INTR_EN 0x10 + unsigned int ptr_mask; + unsigned int pending; + unsigned int completed; + unsigned int evq_read_ptr; + unsigned int evq_read_ptr_primed; + efx_qword_t *evq_hw_ring; + struct sfc_ef10_rx_sw_desc *sw_ring; + uint64_t rearm_data; + struct rte_mbuf *scatter_pkt; + volatile void *evq_prime; + uint16_t prefix_size; + + /* Used on refill */ + uint16_t buf_size; + unsigned int added; + unsigned int max_fill_level; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; + efx_qword_t *rxq_hw_ring; + volatile void *doorbell; + + /* Datapath receive queue anchor */ + struct sfc_dp_rxq dp; +}; + +static inline struct sfc_ef10_rxq * +sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) +{ + return container_of(dp_rxq, struct sfc_ef10_rxq, dp); +} + +static void +sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq) +{ + sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask); + rxq->evq_read_ptr_primed = rxq->evq_read_ptr; +} + +static void +sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) +{ + const unsigned int ptr_mask = rxq->ptr_mask; + const uint32_t buf_size = rxq->buf_size; + unsigned int free_space; + unsigned int bulks; + void *objs[SFC_RX_REFILL_BULK]; + unsigned int added = rxq->added; + + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + + free_space = rxq->max_fill_level - (added - rxq->completed); + + if (free_space < rxq->refill_threshold) + return; + + bulks = free_space / RTE_DIM(objs); + /* refill_threshold guarantees that bulks is positive */ + SFC_ASSERT(bulks > 0); + + do { + unsigned int id; + unsigned int i; + + if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs, + RTE_DIM(objs)) < 0)) { + struct rte_eth_dev_data *dev_data = + rte_eth_devices[rxq->dp.dpq.port_id].data; + + /* + * It is hardly a safe way to increment counter + * from different contexts, but all PMDs do it. + */ + dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs); + /* Return if we have posted nothing yet */ + if (added == rxq->added) + return; + /* Push posted */ + break; + } + + for (i = 0, id = added & ptr_mask; + i < RTE_DIM(objs); + ++i, ++id) { + struct rte_mbuf *m = objs[i]; + struct sfc_ef10_rx_sw_desc *rxd; + rte_iova_t phys_addr; + + MBUF_RAW_ALLOC_CHECK(m); + + SFC_ASSERT((id & ~ptr_mask) == 0); + rxd = &rxq->sw_ring[id]; + rxd->mbuf = m; + + /* + * Avoid writing to mbuf. It is cheaper to do it + * when we receive packet and fill in nearby + * structure members. + */ + + phys_addr = rte_mbuf_data_iova_default(m); + EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id], + ESF_DZ_RX_KER_BYTE_CNT, buf_size, + ESF_DZ_RX_KER_BUF_ADDR, phys_addr); + } + + added += RTE_DIM(objs); + } while (--bulks > 0); + + SFC_ASSERT(rxq->added != added); + rxq->added = added; + sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask); +} + +static void +sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id) +{ + struct rte_mbuf *next_mbuf; + + /* Prefetch next bunch of software descriptors */ + if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0) + rte_prefetch0(&rxq->sw_ring[next_id]); + + /* + * It looks strange to prefetch depending on previous prefetch + * data, but measurements show that it is really efficient and + * increases packet rate. + */ + next_mbuf = rxq->sw_ring[next_id].mbuf; + if (likely(next_mbuf != NULL)) { + /* Prefetch the next mbuf structure */ + rte_mbuf_prefetch_part1(next_mbuf); + + /* Prefetch pseudo header of the next packet */ + /* data_off is not filled in yet */ + /* Yes, data could be not ready yet, but we hope */ + rte_prefetch0((uint8_t *)next_mbuf->buf_addr + + RTE_PKTMBUF_HEADROOM); + } +} + +static struct rte_mbuf ** +sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed); + + SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL); + + if (n_rx_pkts != 0) { + unsigned int completed = rxq->completed; + + rxq->completed = completed + n_rx_pkts; + + do { + *rx_pkts++ = + rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf; + } while (completed != rxq->completed); + } + + return rx_pkts; +} + +static uint16_t +sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr) +{ + return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]); +} + +static uint32_t +sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr) +{ + return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr); +} + +static struct rte_mbuf ** +sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, + struct rte_mbuf **rx_pkts, + struct rte_mbuf ** const rx_pkts_end) +{ + const unsigned int ptr_mask = rxq->ptr_mask; + unsigned int pending = rxq->pending; + unsigned int ready; + struct sfc_ef10_rx_sw_desc *rxd; + struct rte_mbuf *m; + struct rte_mbuf *m0; + const uint8_t *pseudo_hdr; + uint16_t seg_len; + + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + + if (ready == 0) { + /* Rx abort - it was no enough descriptors for Rx packet */ + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; + return rx_pkts; + } + + rxq->pending = pending + ready; + + if (rx_ev.eq_u64[0] & + rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) | + (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) { + SFC_ASSERT(rxq->completed == pending); + do { + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + rte_mbuf_raw_free(rxd->mbuf); + } while (pending != rxq->pending); + rxq->completed = pending; + return rx_pkts; + } + + /* If scattered packet is in progress */ + if (rxq->scatter_pkt != NULL) { + /* Events for scattered packet frags are not merged */ + SFC_ASSERT(ready == 1); + SFC_ASSERT(rxq->completed == pending); + + /* There is no pseudo-header in scatter segments. */ + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES); + + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + m = rxd->mbuf; + + MBUF_RAW_ALLOC_CHECK(m); + + m->data_off = RTE_PKTMBUF_HEADROOM; + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + rxq->scatter_pkt->nb_segs++; + rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len; + rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m; + + if (~rx_ev.eq_u64[0] & + rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = rxq->scatter_pkt; + rxq->scatter_pkt = NULL; + } + rxq->completed = pending; + return rx_pkts; + } + + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + + sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask); + + m = rxd->mbuf; + + RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data)); + m->rearm_data[0] = rxq->rearm_data; + + /* Classify packet based on Rx event */ + /* Mask RSS hash offload flag if RSS is not enabled */ + sfc_ef10_rx_ev_to_offloads(rx_ev, m, + (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ? + ~0ull : ~PKT_RX_RSS_HASH); + + /* data_off already moved past pseudo header */ + pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM; + + /* + * Always get RSS hash from pseudo header to avoid + * condition/branching. If it is valid or not depends on + * PKT_RX_RSS_HASH in m->ol_flags. + */ + m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); + + if (ready == 1) + seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) - + rxq->prefix_size; + else + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + SFC_ASSERT(m->next == NULL); + + if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) { + *rx_pkts++ = m; + rxq->completed = pending; + } else { + /* Events with CONT bit are not merged */ + SFC_ASSERT(ready == 1); + rxq->scatter_pkt = m; + rxq->completed = pending; + return rx_pkts; + } + + /* Remember mbuf to copy offload flags and packet type from */ + m0 = m; + while (pending != rxq->pending) { + rxd = &rxq->sw_ring[pending++ & ptr_mask]; + + sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask); + + m = rxd->mbuf; + + if (rx_pkts != rx_pkts_end) { + *rx_pkts++ = m; + rxq->completed = pending; + } + + RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != + sizeof(rxq->rearm_data)); + m->rearm_data[0] = rxq->rearm_data; + + /* Event-dependent information is the same */ + m->ol_flags = m0->ol_flags; + m->packet_type = m0->packet_type; + + /* data_off already moved past pseudo header */ + pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM; + + /* + * Always get RSS hash from pseudo header to avoid + * condition/branching. If it is valid or not depends on + * PKT_RX_RSS_HASH in m->ol_flags. + */ + m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr); + + seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr); + SFC_ASSERT(seg_len > 0); + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + SFC_ASSERT(m->next == NULL); + } + + return rx_pkts; +} + +static bool +sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev) +{ + *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask]; + + if (!sfc_ef10_ev_present(*rx_ev)) + return false; + + if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) != + FSE_AZ_EV_CODE_RX_EV)) { + /* + * Do not move read_ptr to keep the event for exception + * handling by the control path. + */ + rxq->flags |= SFC_EF10_RXQ_EXCEPTION; + sfc_ef10_rx_err(&rxq->dp.dpq, + "RxQ exception at EvQ read ptr %#x", + rxq->evq_read_ptr); + return false; + } + + rxq->evq_read_ptr++; + return true; +} + +static uint16_t +sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue); + struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts]; + unsigned int evq_old_read_ptr; + efx_qword_t rx_ev; + + rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts); + + if (unlikely(rxq->flags & + (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION))) + goto done; + + evq_old_read_ptr = rxq->evq_read_ptr; + while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + + rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev, + rx_pkts, rx_pkts_end); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr, + rxq->evq_read_ptr); + + /* It is not a problem if we refill in the case of exception */ + sfc_ef10_rx_qrefill(rxq); + + if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) && + rxq->evq_read_ptr_primed != rxq->evq_read_ptr) + sfc_ef10_rx_qprime(rxq); + +done: + return nb_pkts - (rx_pkts_end - rx_pkts); +} + +const uint32_t * +sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) +{ + static const uint32_t ef10_native_ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + static const uint32_t ef10_overlay_ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_QINQ, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L2_ETHER_QINQ, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + /* + * The function returns static set of supported packet types, + * so we can't build it dynamically based on supported tunnel + * encapsulations and should limit to known sets. + */ + switch (tunnel_encaps) { + case (1u << EFX_TUNNEL_PROTOCOL_VXLAN | + 1u << EFX_TUNNEL_PROTOCOL_GENEVE | + 1u << EFX_TUNNEL_PROTOCOL_NVGRE): + return ef10_overlay_ptypes; + default: + SFC_GENERIC_LOG(ERR, + "Unexpected set of supported tunnel encapsulations: %#x", + tunnel_encaps); + /* FALLTHROUGH */ + case 0: + return ef10_native_ptypes; + } +} + +static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending; +static unsigned int +sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + efx_qword_t rx_ev; + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + unsigned int pending = rxq->pending; + unsigned int ready; + + if (unlikely(rxq->flags & + (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION))) + goto done; + + while (sfc_ef10_rx_get_event(rxq, &rx_ev)) { + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + pending) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + pending += ready; + } + + /* + * The function does not process events, so return event queue read + * pointer to the original position to allow the events that were + * read to be processed later + */ + rxq->evq_read_ptr = evq_old_read_ptr; + +done: + return pending - rxq->completed; +} + +static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status; +static int +sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq); + + if (unlikely(offset > rxq->ptr_mask)) + return -EINVAL; + + if (offset < npending) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed)) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} + + +static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info; +static void +sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK; + dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK; +} + + +static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings; +static int +sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, + __rte_unused struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level) +{ + /* + * rte_ethdev API guarantees that the number meets min, max and + * alignment requirements. + */ + if (nb_rx_desc <= limits->rxq_min_entries) + *rxq_entries = limits->rxq_min_entries; + else + *rxq_entries = rte_align32pow2(nb_rx_desc); + + *evq_entries = *rxq_entries; + + *rxq_max_fill_level = RTE_MIN(nb_rx_desc, + SFC_EF10_RXQ_LIMIT(*evq_entries)); + return 0; +} + + +static uint64_t +sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size) +{ + struct rte_mbuf m; + + memset(&m, 0, sizeof(m)); + + rte_mbuf_refcnt_set(&m, 1); + m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size; + m.nb_segs = 1; + m.port = port_id; + + /* rearm_data covers structure members filled in above */ + rte_compiler_barrier(); + RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t)); + return m.rearm_data[0]; +} + +static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate; +static int +sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp) +{ + struct sfc_ef10_rxq *rxq; + int rc; + + rc = EINVAL; + if (info->rxq_entries != info->evq_entries) + goto fail_rxq_args; + + rc = ENOMEM; + rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + goto fail_rxq_alloc; + + sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring", + info->rxq_entries, + sizeof(*rxq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) + goto fail_desc_alloc; + + rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING; + if (info->flags & SFC_RXQ_FLAG_RSS_HASH) + rxq->flags |= SFC_EF10_RXQ_RSS_HASH; + rxq->ptr_mask = info->rxq_entries - 1; + rxq->evq_hw_ring = info->evq_hw_ring; + rxq->max_fill_level = info->max_fill_level; + rxq->refill_threshold = info->refill_threshold; + rxq->rearm_data = + sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size); + rxq->prefix_size = info->prefix_size; + rxq->buf_size = info->buf_size; + rxq->refill_mb_pool = info->refill_mb_pool; + rxq->rxq_hw_ring = info->rxq_hw_ring; + rxq->doorbell = (volatile uint8_t *)info->mem_bar + + ER_DZ_RX_DESC_UPD_REG_OFST + + (info->hw_index << info->vi_window_shift); + rxq->evq_prime = (volatile uint8_t *)info->mem_bar + + ER_DZ_EVQ_RPTR_REG_OFST + + (info->evq_hw_index << info->vi_window_shift); + + *dp_rxqp = &rxq->dp; + return 0; + +fail_desc_alloc: + rte_free(rxq); + +fail_rxq_alloc: +fail_rxq_args: + return rc; +} + +static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy; +static void +sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart; +static int +sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + SFC_ASSERT(rxq->completed == 0); + SFC_ASSERT(rxq->pending == 0); + SFC_ASSERT(rxq->added == 0); + + sfc_ef10_rx_qrefill(rxq); + + rxq->evq_read_ptr = evq_read_ptr; + + rxq->flags |= SFC_EF10_RXQ_STARTED; + rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION); + + if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) + sfc_ef10_rx_qprime(rxq); + + return 0; +} + +static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop; +static void +sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING; + + *evq_read_ptr = rxq->evq_read_ptr; +} + +static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev; +static bool +sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id) +{ + __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING); + + /* + * It is safe to ignore Rx event since we free all mbufs on + * queue purge anyway. + */ + + return false; +} + +static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge; +static void +sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + unsigned int i; + struct sfc_ef10_rx_sw_desc *rxd; + + rte_pktmbuf_free(rxq->scatter_pkt); + rxq->scatter_pkt = NULL; + + for (i = rxq->completed; i != rxq->added; ++i) { + rxd = &rxq->sw_ring[i & rxq->ptr_mask]; + rte_mbuf_raw_free(rxd->mbuf); + rxd->mbuf = NULL; + } + + rxq->completed = rxq->pending = rxq->added = 0; + + rxq->flags &= ~SFC_EF10_RXQ_STARTED; +} + +static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable; +static int +sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN; + if (rxq->flags & SFC_EF10_RXQ_STARTED) + sfc_ef10_rx_qprime(rxq); + return 0; +} + +static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable; +static int +sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq); + + /* Cannot disarm, just disable rearm */ + rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN; + return 0; +} + +struct sfc_dp_rx sfc_ef10_rx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10, + .type = SFC_DP_RX, + .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, + }, + .features = SFC_DP_RX_FEAT_MULTI_PROCESS | + SFC_DP_RX_FEAT_INTR, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH, + .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER, + .get_dev_info = sfc_ef10_rx_get_dev_info, + .qsize_up_rings = sfc_ef10_rx_qsize_up_rings, + .qcreate = sfc_ef10_rx_qcreate, + .qdestroy = sfc_ef10_rx_qdestroy, + .qstart = sfc_ef10_rx_qstart, + .qstop = sfc_ef10_rx_qstop, + .qrx_ev = sfc_ef10_rx_qrx_ev, + .qpurge = sfc_ef10_rx_qpurge, + .supported_ptypes_get = sfc_ef10_supported_ptypes_get, + .qdesc_npending = sfc_ef10_rx_qdesc_npending, + .qdesc_status = sfc_ef10_rx_qdesc_status, + .intr_enable = sfc_ef10_rx_intr_enable, + .intr_disable = sfc_ef10_rx_intr_disable, + .pkt_burst = sfc_ef10_recv_pkts, +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h new file mode 100644 index 000000000..a9896eae5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_rx_ev.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_EF10_RX_EV_H +#define _SFC_EF10_RX_EV_H + +#include + +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void +sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m, + uint64_t ol_mask) +{ + uint32_t tun_ptype = 0; + /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ + int8_t ip_csum_err_bit; + /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ + int8_t l4_csum_err_bit; + uint32_t l2_ptype = 0; + uint32_t l3_ptype = 0; + uint32_t l4_ptype = 0; + uint64_t ol_flags = 0; + + if (unlikely(rx_ev.eq_u64[0] & + rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) | + (1ull << ESF_DZ_RX_ECRC_ERR_LBN) | + (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) { + /* Zero packet type is used as a marker to dicard bad packets */ + goto done; + } + +#if SFC_EF10_RX_EV_ENCAP_SUPPORT + switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { + default: + /* Unexpected encapsulation tag class */ + SFC_ASSERT(false); + /* FALLTHROUGH */ + case ESE_EZ_ENCAP_HDR_NONE: + break; + case ESE_EZ_ENCAP_HDR_VXLAN: + /* + * It is definitely UDP, but we have no information + * about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + break; + case ESE_EZ_ENCAP_HDR_GRE: + /* + * We have no information about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; + break; + } +#endif + + if (tun_ptype == 0) { + ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; + l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; + } else { + ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; + l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; + if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, + ESF_DZ_RX_IPCKSUM_ERR_LBN))) + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { + case ESE_DZ_ETH_TAG_CLASS_NONE: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : + RTE_PTYPE_INNER_L2_ETHER; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN1: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : + RTE_PTYPE_INNER_L2_ETHER_VLAN; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN2: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : + RTE_PTYPE_INNER_L2_ETHER_QINQ; + break; + default: + /* Unexpected Eth tag class */ + SFC_ASSERT(false); + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { + case ESE_DZ_L3_CLASS_IP4_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP4: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH | + ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? + PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); + break; + case ESE_DZ_L3_CLASS_IP6_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP6: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH; + break; + case ESE_DZ_L3_CLASS_ARP: + /* Override Layer 2 packet type */ + /* There is no ARP classification for inner packets */ + if (tun_ptype == 0) + l2_ptype = RTE_PTYPE_L2_ETHER_ARP; + break; + case ESE_DZ_L3_CLASS_UNKNOWN: + break; + default: + /* Unexpected Layer 3 class */ + SFC_ASSERT(false); + } + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN); + switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) { + case ESE_FZ_L4_CLASS_TCP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : + RTE_PTYPE_INNER_L4_TCP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UDP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : + RTE_PTYPE_INNER_L4_UDP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UNKNOWN: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN != + ESE_DE_L4_CLASS_UNKNOWN); + break; + default: + /* Unexpected Layer 4 class */ + SFC_ASSERT(false); + } + + SFC_ASSERT(l2_ptype != 0); + +done: + m->ol_flags = ol_flags & ol_mask; + m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; +} + + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_EF10_RX_EV_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c new file mode 100644 index 000000000..b91c8068b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ef10_tx.c @@ -0,0 +1,1147 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include + +#include +#include +#include +#include + +#include "efx.h" +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#include "sfc_dp_tx.h" +#include "sfc_tweak.h" +#include "sfc_kvargs.h" +#include "sfc_ef10.h" +#include "sfc_tso.h" + +#define sfc_ef10_tx_err(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__) + +/** Maximum length of the DMA descriptor data */ +#define SFC_EF10_TX_DMA_DESC_LEN_MAX \ + ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1) + +/** + * Maximum number of descriptors/buffers in the Tx ring. + * It should guarantee that corresponding event queue never overfill. + * EF10 native datapath uses event queue of the same size as Tx queue. + * Maximum number of events on datapath can be estimated as number of + * Tx queue entries (one event per Tx buffer in the worst case) plus + * Tx error and flush events. + */ +#define SFC_EF10_TXQ_LIMIT(_ndesc) \ + ((_ndesc) - 1 /* head must not step on tail */ - \ + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ + 1 /* Rx error */ - 1 /* flush */) + +struct sfc_ef10_tx_sw_desc { + struct rte_mbuf *mbuf; +}; + +struct sfc_ef10_txq { + unsigned int flags; +#define SFC_EF10_TXQ_STARTED 0x1 +#define SFC_EF10_TXQ_NOT_RUNNING 0x2 +#define SFC_EF10_TXQ_EXCEPTION 0x4 + + unsigned int ptr_mask; + unsigned int added; + unsigned int completed; + unsigned int max_fill_level; + unsigned int free_thresh; + unsigned int evq_read_ptr; + struct sfc_ef10_tx_sw_desc *sw_ring; + efx_qword_t *txq_hw_ring; + volatile void *doorbell; + efx_qword_t *evq_hw_ring; + uint8_t *tsoh; + rte_iova_t tsoh_iova; + uint16_t tso_tcp_header_offset_limit; + + /* Datapath transmit queue anchor */ + struct sfc_dp_txq dp; +}; + +static inline struct sfc_ef10_txq * +sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq) +{ + return container_of(dp_txq, struct sfc_ef10_txq, dp); +} + +static bool +sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev) +{ + volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring; + + /* + * Exception flag is set when reap is done. + * It is never done twice per packet burst get and absence of + * the flag is checked on burst get entry. + */ + SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0); + + *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask]; + + if (!sfc_ef10_ev_present(*tx_ev)) + return false; + + if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) != + FSE_AZ_EV_CODE_TX_EV)) { + /* + * Do not move read_ptr to keep the event for exception + * handling by the control path. + */ + txq->flags |= SFC_EF10_TXQ_EXCEPTION; + sfc_ef10_tx_err(&txq->dp.dpq, + "TxQ exception at EvQ read ptr %#x", + txq->evq_read_ptr); + return false; + } + + txq->evq_read_ptr++; + return true; +} + +static unsigned int +sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq) +{ + const unsigned int curr_done = txq->completed - 1; + unsigned int anew_done = curr_done; + efx_qword_t tx_ev; + + while (sfc_ef10_tx_get_event(txq, &tx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + + /* Update the latest done descriptor */ + anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX); + } + return (anew_done - curr_done) & txq->ptr_mask; +} + +static void +sfc_ef10_tx_reap(struct sfc_ef10_txq *txq) +{ + const unsigned int old_read_ptr = txq->evq_read_ptr; + const unsigned int ptr_mask = txq->ptr_mask; + unsigned int completed = txq->completed; + unsigned int pending = completed; + + pending += sfc_ef10_tx_process_events(txq); + + if (pending != completed) { + struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; + unsigned int nb = 0; + + do { + struct sfc_ef10_tx_sw_desc *txd; + struct rte_mbuf *m; + + txd = &txq->sw_ring[completed & ptr_mask]; + if (txd->mbuf == NULL) + continue; + + m = rte_pktmbuf_prefree_seg(txd->mbuf); + txd->mbuf = NULL; + if (m == NULL) + continue; + + if ((nb == RTE_DIM(bulk)) || + ((nb != 0) && (m->pool != bulk[0]->pool))) { + rte_mempool_put_bulk(bulk[0]->pool, + (void *)bulk, nb); + nb = 0; + } + + bulk[nb++] = m; + } while (++completed != pending); + + if (nb != 0) + rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); + + txq->completed = completed; + } + + sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr, + txq->evq_read_ptr); +} + +static void +sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop, + efx_qword_t *edp) +{ + EFX_POPULATE_QWORD_4(*edp, + ESF_DZ_TX_KER_TYPE, 0, + ESF_DZ_TX_KER_CONT, !eop, + ESF_DZ_TX_KER_BYTE_CNT, size, + ESF_DZ_TX_KER_BUF_ADDR, addr); +} + +static void +sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq, + unsigned int added, uint16_t ipv4_id, + uint16_t outer_ipv4_id, uint32_t tcp_seq, + uint16_t tcp_mss) +{ + EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask], + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, + ESF_DZ_TX_TSO_IP_ID, ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); + EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask], + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, + ESF_DZ_TX_TSO_TCP_MSS, tcp_mss, + ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id); +} + +static inline void +sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added, + unsigned int pushed) +{ + efx_qword_t desc; + efx_oword_t oword; + + /* + * This improves performance by pushing a TX descriptor at the same + * time as the doorbell. The descriptor must be added to the TXQ, + * so that can be used if the hardware decides not to use the pushed + * descriptor. + */ + desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0]; + EFX_POPULATE_OWORD_3(oword, + ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask, + ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), + ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); + + /* DMA sync to device is not required */ + + /* + * rte_io_wmb() which guarantees that the STORE operations + * (i.e. Tx and event descriptor updates) that precede + * the rte_io_wmb() call are visible to NIC before the STORE + * operations that follow it (i.e. doorbell write). + */ + rte_io_wmb(); + + *(volatile __m128i *)txq->doorbell = oword.eo_u128[0]; +} + +static unsigned int +sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m) +{ + unsigned int extra_descs_per_seg; + unsigned int extra_descs_per_pkt; + + /* + * VLAN offload is not supported yet, so no extra descriptors + * are required for VLAN option descriptor. + */ + +/** Maximum length of the mbuf segment data */ +#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX + RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2); + + /* + * Each segment is already counted once below. So, calculate + * how many extra DMA descriptors may be required per segment in + * the worst case because of maximum DMA descriptor length limit. + * If maximum segment length is less or equal to maximum DMA + * descriptor length, no extra DMA descriptors are required. + */ + extra_descs_per_seg = + (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX; + +/** Maximum length of the packet */ +#define SFC_MBUF_PKT_LEN_MAX UINT32_MAX + RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4); + + /* + * One more limitation on maximum number of extra DMA descriptors + * comes from slicing entire packet because of DMA descriptor length + * limit taking into account that there is at least one segment + * which is already counted below (so division of the maximum + * packet length minus one with round down). + * TSO is not supported yet, so packet length is limited by + * maximum PDU size. + */ + extra_descs_per_pkt = + (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX, + SFC_MBUF_PKT_LEN_MAX) - 1) / + SFC_EF10_TX_DMA_DESC_LEN_MAX; + + return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg, + extra_descs_per_pkt); +} + +static bool +sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added, + unsigned int needed_desc, unsigned int *dma_desc_space, + bool *reap_done) +{ + if (*reap_done) + return false; + + if (added != txq->added) { + sfc_ef10_tx_qpush(txq, added, txq->added); + txq->added = added; + } + + sfc_ef10_tx_reap(txq); + *reap_done = true; + + /* + * Recalculate DMA descriptor space since Tx reap may change + * the number of completed descriptors + */ + *dma_desc_space = txq->max_fill_level - + (added - txq->completed); + + return (needed_desc <= *dma_desc_space); +} + +static uint16_t +sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); + uint16_t i; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *m = tx_pkts[i]; + int ret; + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + /* + * In non-TSO case, check that a packet segments do not exceed + * the size limit. Perform the check in debug mode since MTU + * more than 9k is not supported, but the limit here is 16k-1. + */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + struct rte_mbuf *m_seg; + + for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) { + if (m_seg->data_len > + SFC_EF10_TX_DMA_DESC_LEN_MAX) { + rte_errno = EINVAL; + break; + } + } + } +#endif + ret = sfc_dp_tx_prepare_pkt(m, + txq->tso_tcp_header_offset_limit, + txq->max_fill_level, + SFC_EF10_TSO_OPT_DESCS_NUM, 0); + if (unlikely(ret != 0)) { + rte_errno = ret; + break; + } + } + + return i; +} + +static int +sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, + unsigned int *added, unsigned int *dma_desc_space, + bool *reap_done) +{ + size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ? + m_seg->outer_l2_len + m_seg->outer_l3_len : 0) + + m_seg->l2_len; + size_t tcph_off = iph_off + m_seg->l3_len; + size_t header_len = tcph_off + m_seg->l4_len; + /* Offset of the payload in the last segment that contains the header */ + size_t in_off = 0; + const struct rte_tcp_hdr *th; + uint16_t packet_id = 0; + uint16_t outer_packet_id = 0; + uint32_t sent_seq; + uint8_t *hdr_addr; + rte_iova_t hdr_iova; + struct rte_mbuf *first_m_seg = m_seg; + unsigned int pkt_start = *added; + unsigned int needed_desc; + struct rte_mbuf *m_seg_to_free_up_to = first_m_seg; + bool eop; + + /* + * Preliminary estimation of required DMA descriptors, including extra + * descriptor for TSO header that is needed when the header is + * separated from payload in one segment. It does not include + * extra descriptors that may appear when a big segment is split across + * several descriptors. + */ + needed_desc = m_seg->nb_segs + + (unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM + + (unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM; + + if (needed_desc > *dma_desc_space && + !sfc_ef10_try_reap(txq, pkt_start, needed_desc, + dma_desc_space, reap_done)) { + /* + * If a future Tx reap may increase available DMA descriptor + * space, do not try to send the packet. + */ + if (txq->completed != pkt_start) + return ENOSPC; + /* + * Do not allow to send packet if the maximum DMA + * descriptor space is not sufficient to hold TSO + * descriptors, header descriptor and at least 1 + * segment descriptor. + */ + if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM + + SFC_EF10_TSO_HDR_DESCS_NUM + 1) + return EMSGSIZE; + } + + /* Check if the header is not fragmented */ + if (rte_pktmbuf_data_len(m_seg) >= header_len) { + hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *); + hdr_iova = rte_mbuf_data_iova(m_seg); + if (rte_pktmbuf_data_len(m_seg) == header_len) { + /* Cannot send a packet that consists only of header */ + if (unlikely(m_seg->next == NULL)) + return EMSGSIZE; + /* + * Associate header mbuf with header descriptor + * which is located after TSO descriptors. + */ + txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) & + txq->ptr_mask].mbuf = m_seg; + m_seg = m_seg->next; + in_off = 0; + + /* + * If there is no payload offset (payload starts at the + * beginning of a segment) then an extra descriptor for + * separated header is not needed. + */ + needed_desc--; + } else { + in_off = header_len; + } + } else { + unsigned int copied_segs; + unsigned int hdr_addr_off = (*added & txq->ptr_mask) * + SFC_TSOH_STD_LEN; + + /* + * Discard a packet if header linearization is needed but + * the header is too big. + * Duplicate Tx prepare check here to avoid spoil of + * memory if Tx prepare is skipped. + */ + if (unlikely(header_len > SFC_TSOH_STD_LEN)) + return EMSGSIZE; + + hdr_addr = txq->tsoh + hdr_addr_off; + hdr_iova = txq->tsoh_iova + hdr_addr_off; + copied_segs = sfc_tso_prepare_header(hdr_addr, header_len, + &m_seg, &in_off); + + /* Cannot send a packet that consists only of header */ + if (unlikely(m_seg == NULL)) + return EMSGSIZE; + + m_seg_to_free_up_to = m_seg; + /* + * Reduce the number of needed descriptors by the number of + * segments that entirely consist of header data. + */ + needed_desc -= copied_segs; + + /* Extra descriptor for separated header is not needed */ + if (in_off == 0) + needed_desc--; + } + + /* + * Tx prepare has debug-only checks that offload flags are correctly + * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag. + * If the packet is still IPv4, HW will simply start from zero IPID. + */ + if (first_m_seg->ol_flags & PKT_TX_IPV4) + packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off); + + if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4) + outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr, + first_m_seg->outer_l2_len); + + th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off); + rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); + sent_seq = rte_be_to_cpu_32(sent_seq); + + sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id, + sent_seq, first_m_seg->tso_segsz); + (*added) += SFC_EF10_TSO_OPT_DESCS_NUM; + + sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false, + &txq->txq_hw_ring[(*added) & txq->ptr_mask]); + (*added)++; + + do { + rte_iova_t next_frag = rte_mbuf_data_iova(m_seg); + unsigned int seg_len = rte_pktmbuf_data_len(m_seg); + unsigned int id; + + next_frag += in_off; + seg_len -= in_off; + in_off = 0; + + do { + rte_iova_t frag_addr = next_frag; + size_t frag_len; + + frag_len = RTE_MIN(seg_len, + SFC_EF10_TX_DMA_DESC_LEN_MAX); + + next_frag += frag_len; + seg_len -= frag_len; + + eop = (seg_len == 0 && m_seg->next == NULL); + + id = (*added) & txq->ptr_mask; + (*added)++; + + /* + * Initially we assume that one DMA descriptor is needed + * for every segment. When the segment is split across + * several DMA descriptors, increase the estimation. + */ + needed_desc += (seg_len != 0); + + /* + * When no more descriptors can be added, but not all + * segments are processed. + */ + if (*added - pkt_start == *dma_desc_space && + !eop && + !sfc_ef10_try_reap(txq, pkt_start, needed_desc, + dma_desc_space, reap_done)) { + struct rte_mbuf *m; + struct rte_mbuf *m_next; + + if (txq->completed != pkt_start) { + unsigned int i; + + /* + * Reset mbuf associations with added + * descriptors. + */ + for (i = pkt_start; i != *added; i++) { + id = i & txq->ptr_mask; + txq->sw_ring[id].mbuf = NULL; + } + return ENOSPC; + } + + /* Free the segments that cannot be sent */ + for (m = m_seg->next; m != NULL; m = m_next) { + m_next = m->next; + rte_pktmbuf_free_seg(m); + } + eop = true; + /* Ignore the rest of the segment */ + seg_len = 0; + } + + sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len, + eop, &txq->txq_hw_ring[id]); + + } while (seg_len != 0); + + txq->sw_ring[id].mbuf = m_seg; + + m_seg = m_seg->next; + } while (!eop); + + /* + * Free segments which content was entirely copied to the TSO header + * memory space of Tx queue + */ + for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) { + struct rte_mbuf *seg_to_free = m_seg; + + m_seg = m_seg->next; + rte_pktmbuf_free_seg(seg_to_free); + } + + return 0; +} + +static uint16_t +sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); + unsigned int added; + unsigned int dma_desc_space; + bool reap_done; + struct rte_mbuf **pktp; + struct rte_mbuf **pktp_end; + + if (unlikely(txq->flags & + (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) + return 0; + + added = txq->added; + dma_desc_space = txq->max_fill_level - (added - txq->completed); + + reap_done = (dma_desc_space < txq->free_thresh); + if (reap_done) { + sfc_ef10_tx_reap(txq); + dma_desc_space = txq->max_fill_level - (added - txq->completed); + } + + for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts]; + pktp != pktp_end; + ++pktp) { + struct rte_mbuf *m_seg = *pktp; + unsigned int pkt_start = added; + uint32_t pkt_len; + + if (likely(pktp + 1 != pktp_end)) + rte_mbuf_prefetch_part1(pktp[1]); + + if (m_seg->ol_flags & PKT_TX_TCP_SEG) { + int rc; + + rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added, + &dma_desc_space, &reap_done); + if (rc != 0) { + added = pkt_start; + + /* Packet can be sent in following xmit calls */ + if (likely(rc == ENOSPC)) + break; + + /* + * Packet cannot be sent, tell RTE that + * it is sent, but actually drop it and + * continue with another packet + */ + rte_pktmbuf_free(*pktp); + continue; + } + + goto dma_desc_space_update; + } + + if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) { + if (reap_done) + break; + + /* Push already prepared descriptors before polling */ + if (added != txq->added) { + sfc_ef10_tx_qpush(txq, added, txq->added); + txq->added = added; + } + + sfc_ef10_tx_reap(txq); + reap_done = true; + dma_desc_space = txq->max_fill_level - + (added - txq->completed); + if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) + break; + } + + pkt_len = m_seg->pkt_len; + do { + rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg); + unsigned int seg_len = rte_pktmbuf_data_len(m_seg); + unsigned int id = added & txq->ptr_mask; + + SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX); + + pkt_len -= seg_len; + + sfc_ef10_tx_qdesc_dma_create(seg_addr, + seg_len, (pkt_len == 0), + &txq->txq_hw_ring[id]); + + /* + * rte_pktmbuf_free() is commonly used in DPDK for + * recycling packets - the function checks every + * segment's reference counter and returns the + * buffer to its pool whenever possible; + * nevertheless, freeing mbuf segments one by one + * may entail some performance decline; + * from this point, sfc_efx_tx_reap() does the same job + * on its own and frees buffers in bulks (all mbufs + * within a bulk belong to the same pool); + * from this perspective, individual segment pointers + * must be associated with the corresponding SW + * descriptors independently so that only one loop + * is sufficient on reap to inspect all the buffers + */ + txq->sw_ring[id].mbuf = m_seg; + + ++added; + + } while ((m_seg = m_seg->next) != 0); + +dma_desc_space_update: + dma_desc_space -= (added - pkt_start); + } + + if (likely(added != txq->added)) { + sfc_ef10_tx_qpush(txq, added, txq->added); + txq->added = added; + } + +#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE + if (!reap_done) + sfc_ef10_tx_reap(txq); +#endif + + return pktp - &tx_pkts[0]; +} + +static void +sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq) +{ + const unsigned int old_read_ptr = txq->evq_read_ptr; + const unsigned int ptr_mask = txq->ptr_mask; + unsigned int completed = txq->completed; + unsigned int pending = completed; + + pending += sfc_ef10_tx_process_events(txq); + + if (pending != completed) { + struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; + unsigned int nb = 0; + + do { + struct sfc_ef10_tx_sw_desc *txd; + + txd = &txq->sw_ring[completed & ptr_mask]; + + if (nb == RTE_DIM(bulk)) { + rte_mempool_put_bulk(bulk[0]->pool, + (void *)bulk, nb); + nb = 0; + } + + bulk[nb++] = txd->mbuf; + } while (++completed != pending); + + rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); + + txq->completed = completed; + } + + sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr, + txq->evq_read_ptr); +} + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG +static uint16_t +sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *m = tx_pkts[i]; + int ret; + + ret = rte_validate_tx_offload(m); + if (unlikely(ret != 0)) { + /* + * Negative error code is returned by + * rte_validate_tx_offload(), but positive are used + * inside net/sfc PMD. + */ + SFC_ASSERT(ret < 0); + rte_errno = -ret; + break; + } + + /* ef10_simple does not support TSO and VLAN insertion */ + if (unlikely(m->ol_flags & + (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) { + rte_errno = ENOTSUP; + break; + } + + /* ef10_simple does not support scattered packets */ + if (unlikely(m->nb_segs != 1)) { + rte_errno = ENOTSUP; + break; + } + + /* + * ef10_simple requires fast-free which ignores reference + * counters + */ + if (unlikely(rte_mbuf_refcnt_read(m) != 1)) { + rte_errno = ENOTSUP; + break; + } + + /* ef10_simple requires single pool for all packets */ + if (unlikely(m->pool != tx_pkts[0]->pool)) { + rte_errno = ENOTSUP; + break; + } + } + + return i; +} +#endif + +static uint16_t +sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); + unsigned int ptr_mask; + unsigned int added; + unsigned int dma_desc_space; + bool reap_done; + struct rte_mbuf **pktp; + struct rte_mbuf **pktp_end; + + if (unlikely(txq->flags & + (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) + return 0; + + ptr_mask = txq->ptr_mask; + added = txq->added; + dma_desc_space = txq->max_fill_level - (added - txq->completed); + + reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts)); + if (reap_done) { + sfc_ef10_simple_tx_reap(txq); + dma_desc_space = txq->max_fill_level - (added - txq->completed); + } + + pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)]; + for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) { + struct rte_mbuf *pkt = *pktp; + unsigned int id = added & ptr_mask; + + SFC_ASSERT(rte_pktmbuf_data_len(pkt) <= + SFC_EF10_TX_DMA_DESC_LEN_MAX); + + sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt), + rte_pktmbuf_data_len(pkt), + true, &txq->txq_hw_ring[id]); + + txq->sw_ring[id].mbuf = pkt; + + ++added; + } + + if (likely(added != txq->added)) { + sfc_ef10_tx_qpush(txq, added, txq->added); + txq->added = added; + } + +#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE + if (!reap_done) + sfc_ef10_simple_tx_reap(txq); +#endif + + return pktp - &tx_pkts[0]; +} + +static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info; +static void +sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->tx_desc_lim.nb_min = 1; + dev_info->tx_desc_lim.nb_align = 1; +} + +static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings; +static int +sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, + struct sfc_dp_tx_hw_limits *limits, + unsigned int *txq_entries, + unsigned int *evq_entries, + unsigned int *txq_max_fill_level) +{ + /* + * rte_ethdev API guarantees that the number meets min, max and + * alignment requirements. + */ + if (nb_tx_desc <= limits->txq_min_entries) + *txq_entries = limits->txq_min_entries; + else + *txq_entries = rte_align32pow2(nb_tx_desc); + + *evq_entries = *txq_entries; + + *txq_max_fill_level = RTE_MIN(nb_tx_desc, + SFC_EF10_TXQ_LIMIT(*evq_entries)); + return 0; +} + +static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate; +static int +sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_tx_qcreate_info *info, + struct sfc_dp_txq **dp_txqp) +{ + struct sfc_ef10_txq *txq; + int rc; + + rc = EINVAL; + if (info->txq_entries != info->evq_entries) + goto fail_bad_args; + + rc = ENOMEM; + txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) + goto fail_txq_alloc; + + sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring", + info->txq_entries, + sizeof(*txq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) + goto fail_sw_ring_alloc; + + if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) { + txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh", + info->txq_entries, + SFC_TSOH_STD_LEN, + RTE_CACHE_LINE_SIZE, + socket_id); + if (txq->tsoh == NULL) + goto fail_tsoh_alloc; + + txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh); + } + + txq->flags = SFC_EF10_TXQ_NOT_RUNNING; + txq->ptr_mask = info->txq_entries - 1; + txq->max_fill_level = info->max_fill_level; + txq->free_thresh = info->free_thresh; + txq->txq_hw_ring = info->txq_hw_ring; + txq->doorbell = (volatile uint8_t *)info->mem_bar + + ER_DZ_TX_DESC_UPD_REG_OFST + + (info->hw_index << info->vi_window_shift); + txq->evq_hw_ring = info->evq_hw_ring; + txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit; + + *dp_txqp = &txq->dp; + return 0; + +fail_tsoh_alloc: + rte_free(txq->sw_ring); + +fail_sw_ring_alloc: + rte_free(txq); + +fail_txq_alloc: +fail_bad_args: + return rc; +} + +static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy; +static void +sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq) +{ + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + + rte_free(txq->tsoh); + rte_free(txq->sw_ring); + rte_free(txq); +} + +static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart; +static int +sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr, + unsigned int txq_desc_index) +{ + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + + txq->evq_read_ptr = evq_read_ptr; + txq->added = txq->completed = txq_desc_index; + + txq->flags |= SFC_EF10_TXQ_STARTED; + txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION); + + return 0; +} + +static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop; +static void +sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr) +{ + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + + txq->flags |= SFC_EF10_TXQ_NOT_RUNNING; + + *evq_read_ptr = txq->evq_read_ptr; +} + +static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev; +static bool +sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id) +{ + __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + + SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING); + + /* + * It is safe to ignore Tx event since we reap all mbufs on + * queue purge anyway. + */ + + return false; +} + +static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap; +static void +sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq) +{ + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + unsigned int completed; + + for (completed = txq->completed; completed != txq->added; ++completed) { + struct sfc_ef10_tx_sw_desc *txd; + + txd = &txq->sw_ring[completed & txq->ptr_mask]; + if (txd->mbuf != NULL) { + rte_pktmbuf_free_seg(txd->mbuf); + txd->mbuf = NULL; + } + } + + txq->flags &= ~SFC_EF10_TXQ_STARTED; +} + +static unsigned int +sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq) +{ + const unsigned int curr_done = txq->completed - 1; + unsigned int anew_done = curr_done; + efx_qword_t tx_ev; + const unsigned int evq_old_read_ptr = txq->evq_read_ptr; + + if (unlikely(txq->flags & + (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) + return 0; + + while (sfc_ef10_tx_get_event(txq, &tx_ev)) + anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX); + + /* + * The function does not process events, so return event queue read + * pointer to the original position to allow the events that were + * read to be processed later + */ + txq->evq_read_ptr = evq_old_read_ptr; + + return (anew_done - curr_done) & txq->ptr_mask; +} + +static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status; +static int +sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq, + uint16_t offset) +{ + struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq); + unsigned int npending = sfc_ef10_tx_qdesc_npending(txq); + + if (unlikely(offset > txq->ptr_mask)) + return -EINVAL; + + if (unlikely(offset >= txq->max_fill_level)) + return RTE_ETH_TX_DESC_UNAVAIL; + + if (unlikely(offset < npending)) + return RTE_ETH_TX_DESC_FULL; + + return RTE_ETH_TX_DESC_DONE; +} + +struct sfc_dp_tx sfc_ef10_tx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10, + .type = SFC_DP_TX, + .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, + }, + .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .dev_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO, + .get_dev_info = sfc_ef10_get_dev_info, + .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, + .qcreate = sfc_ef10_tx_qcreate, + .qdestroy = sfc_ef10_tx_qdestroy, + .qstart = sfc_ef10_tx_qstart, + .qtx_ev = sfc_ef10_tx_qtx_ev, + .qstop = sfc_ef10_tx_qstop, + .qreap = sfc_ef10_tx_qreap, + .qdesc_status = sfc_ef10_tx_qdesc_status, + .pkt_prepare = sfc_ef10_prepare_pkts, + .pkt_burst = sfc_ef10_xmit_pkts, +}; + +struct sfc_dp_tx sfc_ef10_simple_tx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10_SIMPLE, + .type = SFC_DP_TX, + }, + .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .dev_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, + .get_dev_info = sfc_ef10_get_dev_info, + .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, + .qcreate = sfc_ef10_tx_qcreate, + .qdestroy = sfc_ef10_tx_qdestroy, + .qstart = sfc_ef10_tx_qstart, + .qtx_ev = sfc_ef10_tx_qtx_ev, + .qstop = sfc_ef10_tx_qstop, + .qreap = sfc_ef10_tx_qreap, + .qdesc_status = sfc_ef10_tx_qdesc_status, +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + .pkt_prepare = sfc_ef10_simple_prepare_pkts, +#endif + .pkt_burst = sfc_ef10_simple_xmit_pkts, +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c new file mode 100644 index 000000000..6b3c49a28 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ethdev.c @@ -0,0 +1,2306 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_debug.h" +#include "sfc_log.h" +#include "sfc_kvargs.h" +#include "sfc_ev.h" +#include "sfc_rx.h" +#include "sfc_tx.h" +#include "sfc_flow.h" +#include "sfc_dp.h" +#include "sfc_dp_rx.h" + +uint32_t sfc_logtype_driver; + +static struct sfc_dp_list sfc_dp_head = + TAILQ_HEAD_INITIALIZER(sfc_dp_head); + + +static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev); + + +static int +sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + efx_nic_fw_info_t enfi; + int ret; + int rc; + + /* + * Return value of the callback is likely supposed to be + * equal to or greater than 0, nevertheless, if an error + * occurs, it will be desirable to pass it to the caller + */ + if ((fw_version == NULL) || (fw_size == 0)) + return -EINVAL; + + rc = efx_nic_get_fw_version(sa->nic, &enfi); + if (rc != 0) + return -rc; + + ret = snprintf(fw_version, fw_size, + "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16, + enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1], + enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]); + if (ret < 0) + return ret; + + if (enfi.enfi_dpcpu_fw_ids_valid) { + size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret); + int ret_extra; + + ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset, + fw_size - dpcpu_fw_ids_offset, + " rx%" PRIx16 " tx%" PRIx16, + enfi.enfi_rx_dpcpu_fw_id, + enfi.enfi_tx_dpcpu_fw_id); + if (ret_extra < 0) + return ret_extra; + + ret += ret_extra; + } + + if (fw_size < (size_t)(++ret)) + return ret; + else + return 0; +} + +static int +sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; + uint64_t txq_offloads_def = 0; + + sfc_log_init(sa, "entry"); + + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = EFX_MAC_SDU_MAX; + + dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; + + /* Autonegotiation may be disabled */ + dev_info->speed_capa = ETH_LINK_SPEED_FIXED; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_1G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_10G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_25G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_40G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_50G; + if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX)) + dev_info->speed_capa |= ETH_LINK_SPEED_100G; + + dev_info->max_rx_queues = sa->rxq_max; + dev_info->max_tx_queues = sa->txq_max; + + /* By default packets are dropped if no descriptors are available */ + dev_info->default_rxconf.rx_drop_en = 1; + + dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa); + + /* + * rx_offload_capa includes both device and queue offloads since + * the latter may be requested on a per device basis which makes + * sense when some offloads are needed to be set on all queues. + */ + dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) | + dev_info->rx_queue_offload_capa; + + dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa); + + /* + * tx_offload_capa includes both device and queue offloads since + * the latter may be requested on a per device basis which makes + * sense when some offloads are needed to be set on all queues. + */ + dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) | + dev_info->tx_queue_offload_capa; + + if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + dev_info->default_txconf.offloads |= txq_offloads_def; + + if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { + uint64_t rte_hf = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) + rte_hf |= rss->hf_map[i].rte; + + dev_info->reta_size = EFX_RSS_TBL_SIZE; + dev_info->hash_key_size = EFX_RSS_KEY_SIZE; + dev_info->flow_type_rss_offloads = rte_hf; + } + + /* Initialize to hardware limits */ + dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries; + dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries; + /* The RXQ hardware requires that the descriptor count is a power + * of 2, but rx_desc_lim cannot properly describe that constraint. + */ + dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries; + + /* Initialize to hardware limits */ + dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; + dev_info->tx_desc_lim.nb_min = sa->txq_min_entries; + /* + * The TXQ hardware requires that the descriptor count is a power + * of 2, but tx_desc_lim cannot properly describe that constraint + */ + dev_info->tx_desc_lim.nb_align = sa->txq_min_entries; + + if (sap->dp_rx->get_dev_info != NULL) + sap->dp_rx->get_dev_info(dev_info); + if (sap->dp_tx->get_dev_info != NULL) + sap->dp_tx->get_dev_info(dev_info); + + dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + return 0; +} + +static const uint32_t * +sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + + return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps); +} + +static int +sfc_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", + dev_data->nb_rx_queues, dev_data->nb_tx_queues); + + sfc_adapter_lock(sa); + switch (sa->state) { + case SFC_ADAPTER_CONFIGURED: + /* FALLTHROUGH */ + case SFC_ADAPTER_INITIALIZED: + rc = sfc_configure(sa); + break; + default: + sfc_err(sa, "unexpected adapter state %u to configure", + sa->state); + rc = EINVAL; + break; + } + sfc_adapter_unlock(sa); + + sfc_log_init(sa, "done %d", rc); + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_start(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "entry"); + + sfc_adapter_lock(sa); + rc = sfc_start(sa); + sfc_adapter_unlock(sa); + + sfc_log_init(sa, "done %d", rc); + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_eth_link current_link; + int ret; + + sfc_log_init(sa, "entry"); + + if (sa->state != SFC_ADAPTER_STARTED) { + sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); + } else if (wait_to_complete) { + efx_link_mode_t link_mode; + + if (efx_port_poll(sa->nic, &link_mode) != 0) + link_mode = EFX_LINK_UNKNOWN; + sfc_port_link_mode_to_info(link_mode, ¤t_link); + + } else { + sfc_ev_mgmt_qpoll(sa); + rte_eth_linkstatus_get(dev, ¤t_link); + } + + ret = rte_eth_linkstatus_set(dev, ¤t_link); + if (ret == 0) + sfc_notice(sa, "Link status is %s", + current_link.link_status ? "UP" : "DOWN"); + + return ret; +} + +static void +sfc_dev_stop(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + + sfc_log_init(sa, "entry"); + + sfc_adapter_lock(sa); + sfc_stop(sa); + sfc_adapter_unlock(sa); + + sfc_log_init(sa, "done"); +} + +static int +sfc_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "entry"); + + sfc_adapter_lock(sa); + rc = sfc_start(sa); + sfc_adapter_unlock(sa); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + + sfc_log_init(sa, "entry"); + + sfc_adapter_lock(sa); + sfc_stop(sa); + sfc_adapter_unlock(sa); + + return 0; +} + +static void +sfc_dev_close(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + + sfc_log_init(sa, "entry"); + + sfc_adapter_lock(sa); + switch (sa->state) { + case SFC_ADAPTER_STARTED: + sfc_stop(sa); + SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); + /* FALLTHROUGH */ + case SFC_ADAPTER_CONFIGURED: + sfc_close(sa); + SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); + /* FALLTHROUGH */ + case SFC_ADAPTER_INITIALIZED: + break; + default: + sfc_err(sa, "unexpected adapter state %u on close", sa->state); + break; + } + + /* + * Cleanup all resources in accordance with RTE_ETH_DEV_CLOSE_REMOVE. + * Rollback primary process sfc_eth_dev_init() below. + */ + + sfc_eth_dev_clear_ops(dev); + + sfc_detach(sa); + sfc_unprobe(sa); + + sfc_kvargs_cleanup(sa); + + sfc_adapter_unlock(sa); + sfc_adapter_lock_fini(sa); + + sfc_log_init(sa, "done"); + + /* Required for logging, so cleanup last */ + sa->eth_dev = NULL; + + dev->process_private = NULL; + free(sa); +} + +static int +sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, + boolean_t enabled) +{ + struct sfc_port *port; + boolean_t *toggle; + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); + const char *desc = (allmulti) ? "all-multi" : "promiscuous"; + int rc = 0; + + sfc_adapter_lock(sa); + + port = &sa->port; + toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); + + if (*toggle != enabled) { + *toggle = enabled; + + if (sfc_sa2shared(sa)->isolated) { + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "the change is to be applied on the next " + "start provided that isolated mode is " + "disabled prior the next start"); + } else if ((sa->state == SFC_ADAPTER_STARTED) && + ((rc = sfc_set_rx_mode(sa)) != 0)) { + *toggle = !(enabled); + sfc_warn(sa, "Failed to %s %s mode, rc = %d", + ((enabled) ? "enable" : "disable"), desc, rc); + + /* + * For promiscuous and all-multicast filters a + * permission failure should be reported as an + * unsupported filter. + */ + if (rc == EPERM) + rc = ENOTSUP; + } + } + + sfc_adapter_unlock(sa); + return rc; +} + +static int +sfc_dev_promisc_enable(struct rte_eth_dev *dev) +{ + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_promisc_disable(struct rte_eth_dev *dev) +{ + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_allmulti_enable(struct rte_eth_dev *dev) +{ + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_allmulti_disable(struct rte_eth_dev *dev) +{ + int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", + rx_queue_id, nb_rx_desc, socket_id); + + sfc_adapter_lock(sa); + + rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, + rx_conf, mb_pool); + if (rc != 0) + goto fail_rx_qinit; + + dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; + + sfc_adapter_unlock(sa); + + return 0; + +fail_rx_qinit: + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; +} + +static void +sfc_rx_queue_release(void *queue) +{ + struct sfc_dp_rxq *dp_rxq = queue; + struct sfc_rxq *rxq; + struct sfc_adapter *sa; + unsigned int sw_index; + + if (dp_rxq == NULL) + return; + + rxq = sfc_rxq_by_dp_rxq(dp_rxq); + sa = rxq->evq->sa; + sfc_adapter_lock(sa); + + sw_index = dp_rxq->dpq.queue_id; + + sfc_log_init(sa, "RxQ=%u", sw_index); + + sfc_rx_qfini(sa, sw_index); + + sfc_adapter_unlock(sa); +} + +static int +sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", + tx_queue_id, nb_tx_desc, socket_id); + + sfc_adapter_lock(sa); + + rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); + if (rc != 0) + goto fail_tx_qinit; + + dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp; + + sfc_adapter_unlock(sa); + return 0; + +fail_tx_qinit: + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; +} + +static void +sfc_tx_queue_release(void *queue) +{ + struct sfc_dp_txq *dp_txq = queue; + struct sfc_txq *txq; + unsigned int sw_index; + struct sfc_adapter *sa; + + if (dp_txq == NULL) + return; + + txq = sfc_txq_by_dp_txq(dp_txq); + sw_index = dp_txq->dpq.queue_id; + + SFC_ASSERT(txq->evq != NULL); + sa = txq->evq->sa; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + sfc_adapter_lock(sa); + + sfc_tx_qfini(sa, sw_index); + + sfc_adapter_unlock(sa); +} + +/* + * Some statistics are computed as A - B where A and B each increase + * monotonically with some hardware counter(s) and the counters are read + * asynchronously. + * + * If packet X is counted in A, but not counted in B yet, computed value is + * greater than real. + * + * If packet X is not counted in A at the moment of reading the counter, + * but counted in B at the moment of reading the counter, computed value + * is less than real. + * + * However, counter which grows backward is worse evil than slightly wrong + * value. So, let's try to guarantee that it never happens except may be + * the case when the MAC stats are zeroed as a result of a NIC reset. + */ +static void +sfc_update_diff_stat(uint64_t *stat, uint64_t newval) +{ + if ((int64_t)(newval - *stat) > 0 || newval == 0) + *stat = newval; +} + +static int +sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + uint64_t *mac_stats; + int ret; + + rte_spinlock_lock(&port->mac_stats_lock); + + ret = sfc_port_update_mac_stats(sa); + if (ret != 0) + goto unlock; + + mac_stats = port->mac_stats_buf; + + if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, + EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { + stats->ipackets = + mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + + mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + + mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; + stats->opackets = + mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + + mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + + mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; + stats->ibytes = + mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + + mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + + mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; + stats->obytes = + mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + + mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + + mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; + stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; + stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; + } else { + stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; + stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; + stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; + /* + * Take into account stats which are whenever supported + * on EF10. If some stat is not supported by current + * firmware variant or HW revision, it is guaranteed + * to be zero in mac_stats. + */ + stats->imissed = + mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + + mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + + mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + + mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + + mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + + mac_stats[EFX_MAC_PM_TRUNC_QBB] + + mac_stats[EFX_MAC_PM_DISCARD_QBB] + + mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + + mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + + mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; + stats->ierrors = + mac_stats[EFX_MAC_RX_FCS_ERRORS] + + mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + + mac_stats[EFX_MAC_RX_JABBER_PKTS]; + /* no oerrors counters supported on EF10 */ + + /* Exclude missed, errors and pauses from Rx packets */ + sfc_update_diff_stat(&port->ipackets, + mac_stats[EFX_MAC_RX_PKTS] - + mac_stats[EFX_MAC_RX_PAUSE_PKTS] - + stats->imissed - stats->ierrors); + stats->ipackets = port->ipackets; + } + +unlock: + rte_spinlock_unlock(&port->mac_stats_lock); + SFC_ASSERT(ret >= 0); + return -ret; +} + +static int +sfc_stats_reset(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + int rc; + + if (sa->state != SFC_ADAPTER_STARTED) { + /* + * The operation cannot be done if port is not started; it + * will be scheduled to be done during the next port start + */ + port->mac_stats_reset_pending = B_TRUE; + return 0; + } + + rc = sfc_port_reset_mac_stats(sa); + if (rc != 0) + sfc_err(sa, "failed to reset statistics (rc = %d)", rc); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int xstats_count) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + uint64_t *mac_stats; + int rc; + unsigned int i; + int nstats = 0; + + rte_spinlock_lock(&port->mac_stats_lock); + + rc = sfc_port_update_mac_stats(sa); + if (rc != 0) { + SFC_ASSERT(rc > 0); + nstats = -rc; + goto unlock; + } + + mac_stats = port->mac_stats_buf; + + for (i = 0; i < EFX_MAC_NSTATS; ++i) { + if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { + if (xstats != NULL && nstats < (int)xstats_count) { + xstats[nstats].id = nstats; + xstats[nstats].value = mac_stats[i]; + } + nstats++; + } + } + +unlock: + rte_spinlock_unlock(&port->mac_stats_lock); + + return nstats; +} + +static int +sfc_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int xstats_count) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + unsigned int i; + unsigned int nstats = 0; + + for (i = 0; i < EFX_MAC_NSTATS; ++i) { + if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { + if (xstats_names != NULL && nstats < xstats_count) + strlcpy(xstats_names[nstats].name, + efx_mac_stat_name(sa->nic, i), + sizeof(xstats_names[0].name)); + nstats++; + } + } + + return nstats; +} + +static int +sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + uint64_t *mac_stats; + unsigned int nb_supported = 0; + unsigned int nb_written = 0; + unsigned int i; + int ret; + int rc; + + if (unlikely(values == NULL) || + unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) + return port->mac_stats_nb_supported; + + rte_spinlock_lock(&port->mac_stats_lock); + + rc = sfc_port_update_mac_stats(sa); + if (rc != 0) { + SFC_ASSERT(rc > 0); + ret = -rc; + goto unlock; + } + + mac_stats = port->mac_stats_buf; + + for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { + if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) + continue; + + if ((ids == NULL) || (ids[nb_written] == nb_supported)) + values[nb_written++] = mac_stats[i]; + + ++nb_supported; + } + + ret = nb_written; + +unlock: + rte_spinlock_unlock(&port->mac_stats_lock); + + return ret; +} + +static int +sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int size) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + unsigned int nb_supported = 0; + unsigned int nb_written = 0; + unsigned int i; + + if (unlikely(xstats_names == NULL) || + unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) + return port->mac_stats_nb_supported; + + for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { + if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) + continue; + + if ((ids == NULL) || (ids[nb_written] == nb_supported)) { + char *name = xstats_names[nb_written++].name; + + strlcpy(name, efx_mac_stat_name(sa->nic, i), + sizeof(xstats_names[0].name)); + } + + ++nb_supported; + } + + return nb_written; +} + +static int +sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + unsigned int wanted_fc, link_fc; + + memset(fc_conf, 0, sizeof(*fc_conf)); + + sfc_adapter_lock(sa); + + if (sa->state == SFC_ADAPTER_STARTED) + efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); + else + link_fc = sa->port.flow_ctrl; + + switch (link_fc) { + case 0: + fc_conf->mode = RTE_FC_NONE; + break; + case EFX_FCNTL_RESPOND: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case EFX_FCNTL_GENERATE: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): + fc_conf->mode = RTE_FC_FULL; + break; + default: + sfc_err(sa, "%s: unexpected flow control value %#x", + __func__, link_fc); + } + + fc_conf->autoneg = sa->port.flow_ctrl_autoneg; + + sfc_adapter_unlock(sa); + + return 0; +} + +static int +sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + unsigned int fcntl; + int rc; + + if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || + fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || + fc_conf->mac_ctrl_frame_fwd != 0) { + sfc_err(sa, "unsupported flow control settings specified"); + rc = EINVAL; + goto fail_inval; + } + + switch (fc_conf->mode) { + case RTE_FC_NONE: + fcntl = 0; + break; + case RTE_FC_RX_PAUSE: + fcntl = EFX_FCNTL_RESPOND; + break; + case RTE_FC_TX_PAUSE: + fcntl = EFX_FCNTL_GENERATE; + break; + case RTE_FC_FULL: + fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; + break; + default: + rc = EINVAL; + goto fail_inval; + } + + sfc_adapter_lock(sa); + + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); + if (rc != 0) + goto fail_mac_fcntl_set; + } + + port->flow_ctrl = fcntl; + port->flow_ctrl_autoneg = fc_conf->autoneg; + + sfc_adapter_unlock(sa); + + return 0; + +fail_mac_fcntl_set: + sfc_adapter_unlock(sa); +fail_inval: + SFC_ASSERT(rc > 0); + return -rc; +} + +static int +sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + boolean_t scatter_enabled; + const char *error; + unsigned int i; + + for (i = 0; i < sas->rxq_count; i++) { + if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0) + continue; + + scatter_enabled = (sas->rxq_info[i].type_flags & + EFX_RXQ_FLAG_SCATTER); + + if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size, + encp->enc_rx_prefix_size, + scatter_enabled, &error)) { + sfc_err(sa, "MTU check for RxQ %u failed: %s", i, + error); + return EINVAL; + } + } + + return 0; +} + +static int +sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + size_t pdu = EFX_MAC_PDU(mtu); + size_t old_pdu; + int rc; + + sfc_log_init(sa, "mtu=%u", mtu); + + rc = EINVAL; + if (pdu < EFX_MAC_PDU_MIN) { + sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", + (unsigned int)mtu, (unsigned int)pdu, + EFX_MAC_PDU_MIN); + goto fail_inval; + } + if (pdu > EFX_MAC_PDU_MAX) { + sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", + (unsigned int)mtu, (unsigned int)pdu, + (unsigned int)EFX_MAC_PDU_MAX); + goto fail_inval; + } + + sfc_adapter_lock(sa); + + rc = sfc_check_scatter_on_all_rx_queues(sa, pdu); + if (rc != 0) + goto fail_check_scatter; + + if (pdu != sa->port.pdu) { + if (sa->state == SFC_ADAPTER_STARTED) { + sfc_stop(sa); + + old_pdu = sa->port.pdu; + sa->port.pdu = pdu; + rc = sfc_start(sa); + if (rc != 0) + goto fail_start; + } else { + sa->port.pdu = pdu; + } + } + + /* + * The driver does not use it, but other PMDs update jumbo frame + * flag and max_rx_pkt_len when MTU is set. + */ + if (mtu > RTE_ETHER_MAX_LEN) { + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } + + dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; + + sfc_adapter_unlock(sa); + + sfc_log_init(sa, "done"); + return 0; + +fail_start: + sa->port.pdu = old_pdu; + if (sfc_start(sa) != 0) + sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " + "PDU max size - port is stopped", + (unsigned int)pdu, (unsigned int)old_pdu); + +fail_check_scatter: + sfc_adapter_unlock(sa); + +fail_inval: + sfc_log_init(sa, "failed %d", rc); + SFC_ASSERT(rc > 0); + return -rc; +} +static int +sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_port *port = &sa->port; + struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0]; + int rc = 0; + + sfc_adapter_lock(sa); + + /* + * Copy the address to the device private data so that + * it could be recalled in the case of adapter restart. + */ + rte_ether_addr_copy(mac_addr, &port->default_mac_addr); + + /* + * Neither of the two following checks can return + * an error. The new MAC address is preserved in + * the device private data and can be activated + * on the next port start if the user prevents + * isolated mode from being enabled. + */ + if (sfc_sa2shared(sa)->isolated) { + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "will not set MAC address"); + goto unlock; + } + + if (sa->state != SFC_ADAPTER_STARTED) { + sfc_notice(sa, "the port is not started"); + sfc_notice(sa, "the new MAC address will be set on port start"); + + goto unlock; + } + + if (encp->enc_allow_set_mac_with_installed_filters) { + rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); + if (rc != 0) { + sfc_err(sa, "cannot set MAC address (rc = %u)", rc); + goto unlock; + } + + /* + * Changing the MAC address by means of MCDI request + * has no effect on received traffic, therefore + * we also need to update unicast filters + */ + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) { + sfc_err(sa, "cannot set filter (rc = %u)", rc); + /* Rollback the old address */ + (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); + (void)sfc_set_rx_mode_unchecked(sa); + } + } else { + sfc_warn(sa, "cannot set MAC address with filters installed"); + sfc_warn(sa, "adapter will be restarted to pick the new MAC"); + sfc_warn(sa, "(some traffic may be dropped)"); + + /* + * Since setting MAC address with filters installed is not + * allowed on the adapter, the new MAC address will be set + * by means of adapter restart. sfc_start() shall retrieve + * the new address from the device private data and set it. + */ + sfc_stop(sa); + rc = sfc_start(sa); + if (rc != 0) + sfc_err(sa, "cannot restart adapter (rc = %u)", rc); + } + +unlock: + if (rc != 0) + rte_ether_addr_copy(old_addr, &port->default_mac_addr); + + sfc_adapter_unlock(sa); + + SFC_ASSERT(rc >= 0); + return -rc; +} + + +static int +sfc_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + uint8_t *mc_addrs = port->mcast_addrs; + int rc; + unsigned int i; + + if (sfc_sa2shared(sa)->isolated) { + sfc_err(sa, "isolated mode is active on the port"); + sfc_err(sa, "will not set multicast address list"); + return -ENOTSUP; + } + + if (mc_addrs == NULL) + return -ENOBUFS; + + if (nb_mc_addr > port->max_mcast_addrs) { + sfc_err(sa, "too many multicast addresses: %u > %u", + nb_mc_addr, port->max_mcast_addrs); + return -EINVAL; + } + + for (i = 0; i < nb_mc_addr; ++i) { + rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, + EFX_MAC_ADDR_LEN); + mc_addrs += EFX_MAC_ADDR_LEN; + } + + port->nb_mcast_addrs = nb_mc_addr; + + if (sa->state != SFC_ADAPTER_STARTED) + return 0; + + rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, + port->nb_mcast_addrs); + if (rc != 0) + sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static void +sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(rx_queue_id < sas->rxq_count); + + rxq_info = &sas->rxq_info[rx_queue_id]; + + qinfo->mp = rxq_info->refill_mb_pool; + qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; + qinfo->conf.rx_drop_en = 1; + qinfo->conf.rx_deferred_start = rxq_info->deferred_start; + qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; + if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { + qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; + qinfo->scattered_rx = 1; + } + qinfo->nb_desc = rxq_info->entries; +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static void +sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_txq_info *txq_info; + + SFC_ASSERT(tx_queue_id < sas->txq_count); + + txq_info = &sas->txq_info[tx_queue_id]; + + memset(qinfo, 0, sizeof(*qinfo)); + + qinfo->conf.offloads = txq_info->offloads; + qinfo->conf.tx_free_thresh = txq_info->free_thresh; + qinfo->conf.tx_deferred_start = txq_info->deferred_start; + qinfo->nb_desc = txq_info->entries; +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static uint32_t +sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(rx_queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[rx_queue_id]; + + if ((rxq_info->state & SFC_RXQ_STARTED) == 0) + return 0; + + return sap->dp_rx->qdesc_npending(rxq_info->dp); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_rx_descriptor_done(void *queue, uint16_t offset) +{ + struct sfc_dp_rxq *dp_rxq = queue; + const struct sfc_dp_rx *dp_rx; + + dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); + + return offset < dp_rx->qdesc_npending(dp_rxq); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_rx_descriptor_status(void *queue, uint16_t offset) +{ + struct sfc_dp_rxq *dp_rxq = queue; + const struct sfc_dp_rx *dp_rx; + + dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); + + return dp_rx->qdesc_status(dp_rxq, offset); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_tx_descriptor_status(void *queue, uint16_t offset) +{ + struct sfc_dp_txq *dp_txq = queue; + const struct sfc_dp_tx *dp_tx; + + dp_tx = sfc_dp_tx_by_dp_txq(dp_txq); + + return dp_tx->qdesc_status(dp_txq, offset); +} + +static int +sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "RxQ=%u", rx_queue_id); + + sfc_adapter_lock(sa); + + rc = EINVAL; + if (sa->state != SFC_ADAPTER_STARTED) + goto fail_not_started; + + if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) + goto fail_not_setup; + + rc = sfc_rx_qstart(sa, rx_queue_id); + if (rc != 0) + goto fail_rx_qstart; + + sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; + + sfc_adapter_unlock(sa); + + return 0; + +fail_rx_qstart: +fail_not_setup: +fail_not_started: + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; +} + +static int +sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + + sfc_log_init(sa, "RxQ=%u", rx_queue_id); + + sfc_adapter_lock(sa); + sfc_rx_qstop(sa, rx_queue_id); + + sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; + + sfc_adapter_unlock(sa); + + return 0; +} + +static int +sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc; + + sfc_log_init(sa, "TxQ = %u", tx_queue_id); + + sfc_adapter_lock(sa); + + rc = EINVAL; + if (sa->state != SFC_ADAPTER_STARTED) + goto fail_not_started; + + if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED) + goto fail_not_setup; + + rc = sfc_tx_qstart(sa, tx_queue_id); + if (rc != 0) + goto fail_tx_qstart; + + sas->txq_info[tx_queue_id].deferred_started = B_TRUE; + + sfc_adapter_unlock(sa); + return 0; + +fail_tx_qstart: + +fail_not_setup: +fail_not_started: + sfc_adapter_unlock(sa); + SFC_ASSERT(rc > 0); + return -rc; +} + +static int +sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + + sfc_log_init(sa, "TxQ = %u", tx_queue_id); + + sfc_adapter_lock(sa); + + sfc_tx_qstop(sa, tx_queue_id); + + sas->txq_info[tx_queue_id].deferred_started = B_FALSE; + + sfc_adapter_unlock(sa); + return 0; +} + +static efx_tunnel_protocol_t +sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) +{ + switch (rte_type) { + case RTE_TUNNEL_TYPE_VXLAN: + return EFX_TUNNEL_PROTOCOL_VXLAN; + case RTE_TUNNEL_TYPE_GENEVE: + return EFX_TUNNEL_PROTOCOL_GENEVE; + default: + return EFX_TUNNEL_NPROTOS; + } +} + +enum sfc_udp_tunnel_op_e { + SFC_UDP_TUNNEL_ADD_PORT, + SFC_UDP_TUNNEL_DEL_PORT, +}; + +static int +sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp, + enum sfc_udp_tunnel_op_e op) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + efx_tunnel_protocol_t tunnel_proto; + int rc; + + sfc_log_init(sa, "%s udp_port=%u prot_type=%u", + (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : + (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", + tunnel_udp->udp_port, tunnel_udp->prot_type); + + tunnel_proto = + sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); + if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { + rc = ENOTSUP; + goto fail_bad_proto; + } + + sfc_adapter_lock(sa); + + switch (op) { + case SFC_UDP_TUNNEL_ADD_PORT: + rc = efx_tunnel_config_udp_add(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + case SFC_UDP_TUNNEL_DEL_PORT: + rc = efx_tunnel_config_udp_remove(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + default: + rc = EINVAL; + goto fail_bad_op; + } + + if (rc != 0) + goto fail_op; + + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_tunnel_reconfigure(sa->nic); + if (rc == EAGAIN) { + /* + * Configuration is accepted by FW and MC reboot + * is initiated to apply the changes. MC reboot + * will be handled in a usual way (MC reboot + * event on management event queue and adapter + * restart). + */ + rc = 0; + } else if (rc != 0) { + goto fail_reconfigure; + } + } + + sfc_adapter_unlock(sa); + return 0; + +fail_reconfigure: + /* Remove/restore entry since the change makes the trouble */ + switch (op) { + case SFC_UDP_TUNNEL_ADD_PORT: + (void)efx_tunnel_config_udp_remove(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + case SFC_UDP_TUNNEL_DEL_PORT: + (void)efx_tunnel_config_udp_add(sa->nic, + tunnel_udp->udp_port, + tunnel_proto); + break; + } + +fail_op: +fail_bad_op: + sfc_adapter_unlock(sa); + +fail_bad_proto: + SFC_ASSERT(rc > 0); + return -rc; +} + +static int +sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); +} + +static int +sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *tunnel_udp) +{ + return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) + return -ENOTSUP; + + /* + * Mapping of hash configuration between RTE and EFX is not one-to-one, + * hence, conversion is done here to derive a correct set of ETH_RSS + * flags which corresponds to the active EFX configuration stored + * locally in 'sfc_adapter' and kept up-to-date + */ + rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types); + rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; + if (rss_conf->rss_key != NULL) + rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); + + return 0; +} + +static int +sfc_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + unsigned int efx_hash_types; + int rc = 0; + + if (sfc_sa2shared(sa)->isolated) + return -ENOTSUP; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { + sfc_err(sa, "RSS is not available"); + return -ENOTSUP; + } + + if (rss->channels == 0) { + sfc_err(sa, "RSS is not configured"); + return -EINVAL; + } + + if ((rss_conf->rss_key != NULL) && + (rss_conf->rss_key_len != sizeof(rss->key))) { + sfc_err(sa, "RSS key size is wrong (should be %zu)", + sizeof(rss->key)); + return -EINVAL; + } + + sfc_adapter_lock(sa); + + rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); + if (rc != 0) + goto fail_rx_hf_rte_to_efx; + + rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss->hash_alg, efx_hash_types, B_TRUE); + if (rc != 0) + goto fail_scale_mode_set; + + if (rss_conf->rss_key != NULL) { + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_rx_scale_key_set(sa->nic, + EFX_RSS_CONTEXT_DEFAULT, + rss_conf->rss_key, + sizeof(rss->key)); + if (rc != 0) + goto fail_scale_key_set; + } + + rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); + } + + rss->hash_types = efx_hash_types; + + sfc_adapter_unlock(sa); + + return 0; + +fail_scale_key_set: + if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + EFX_RX_HASHALG_TOEPLITZ, + rss->hash_types, B_TRUE) != 0) + sfc_err(sa, "failed to restore RSS mode"); + +fail_scale_mode_set: +fail_rx_hf_rte_to_efx: + sfc_adapter_unlock(sa); + return -rc; +} + +/* + * The function is used by the secondary process as well. It must not + * use any process-local pointers from the adapter data. + */ +static int +sfc_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rss *rss = &sas->rss; + int entry; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated) + return -ENOTSUP; + + if (rss->channels == 0) + return -EINVAL; + + if (reta_size != EFX_RSS_TBL_SIZE) + return -EINVAL; + + for (entry = 0; entry < reta_size; entry++) { + int grp = entry / RTE_RETA_GROUP_SIZE; + int grp_idx = entry % RTE_RETA_GROUP_SIZE; + + if ((reta_conf[grp].mask >> grp_idx) & 1) + reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; + } + + return 0; +} + +static int +sfc_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + unsigned int *rss_tbl_new; + uint16_t entry; + int rc = 0; + + + if (sfc_sa2shared(sa)->isolated) + return -ENOTSUP; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { + sfc_err(sa, "RSS is not available"); + return -ENOTSUP; + } + + if (rss->channels == 0) { + sfc_err(sa, "RSS is not configured"); + return -EINVAL; + } + + if (reta_size != EFX_RSS_TBL_SIZE) { + sfc_err(sa, "RETA size is wrong (should be %u)", + EFX_RSS_TBL_SIZE); + return -EINVAL; + } + + rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); + if (rss_tbl_new == NULL) + return -ENOMEM; + + sfc_adapter_lock(sa); + + rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); + + for (entry = 0; entry < reta_size; entry++) { + int grp_idx = entry % RTE_RETA_GROUP_SIZE; + struct rte_eth_rss_reta_entry64 *grp; + + grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; + + if (grp->mask & (1ull << grp_idx)) { + if (grp->reta[grp_idx] >= rss->channels) { + rc = EINVAL; + goto bad_reta_entry; + } + rss_tbl_new[entry] = grp->reta[grp_idx]; + } + } + + if (sa->state == SFC_ADAPTER_STARTED) { + rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss_tbl_new, EFX_RSS_TBL_SIZE); + if (rc != 0) + goto fail_scale_tbl_set; + } + + rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); + +fail_scale_tbl_set: +bad_reta_entry: + sfc_adapter_unlock(sa); + + rte_free(rss_tbl_new); + + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int rc = ENOTSUP; + + sfc_log_init(sa, "entry"); + + switch (filter_type) { + case RTE_ETH_FILTER_NONE: + sfc_err(sa, "Global filters configuration not supported"); + break; + case RTE_ETH_FILTER_MACVLAN: + sfc_err(sa, "MACVLAN filters not supported"); + break; + case RTE_ETH_FILTER_ETHERTYPE: + sfc_err(sa, "EtherType filters not supported"); + break; + case RTE_ETH_FILTER_FLEXIBLE: + sfc_err(sa, "Flexible filters not supported"); + break; + case RTE_ETH_FILTER_SYN: + sfc_err(sa, "SYN filters not supported"); + break; + case RTE_ETH_FILTER_NTUPLE: + sfc_err(sa, "NTUPLE filters not supported"); + break; + case RTE_ETH_FILTER_TUNNEL: + sfc_err(sa, "Tunnel filters not supported"); + break; + case RTE_ETH_FILTER_FDIR: + sfc_err(sa, "Flow Director filters not supported"); + break; + case RTE_ETH_FILTER_HASH: + sfc_err(sa, "Hash filters not supported"); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) { + rc = EINVAL; + } else { + *(const void **)arg = &sfc_flow_ops; + rc = 0; + } + break; + default: + sfc_err(sa, "Unknown filter type %u", filter_type); + break; + } + + sfc_log_init(sa, "exit: %d", -rc); + SFC_ASSERT(rc >= 0); + return -rc; +} + +static int +sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + + /* + * If Rx datapath does not provide callback to check mempool, + * all pools are supported. + */ + if (sap->dp_rx->pool_ops_supported == NULL) + return 1; + + return sap->dp_rx->pool_ops_supported(pool); +} + +static int +sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[queue_id]; + + return sap->dp_rx->intr_enable(rxq_info->dp); +} + +static int +sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_rxq_info *rxq_info; + + SFC_ASSERT(queue_id < sas->rxq_count); + rxq_info = &sas->rxq_info[queue_id]; + + return sap->dp_rx->intr_disable(rxq_info->dp); +} + +static const struct eth_dev_ops sfc_eth_dev_ops = { + .dev_configure = sfc_dev_configure, + .dev_start = sfc_dev_start, + .dev_stop = sfc_dev_stop, + .dev_set_link_up = sfc_dev_set_link_up, + .dev_set_link_down = sfc_dev_set_link_down, + .dev_close = sfc_dev_close, + .promiscuous_enable = sfc_dev_promisc_enable, + .promiscuous_disable = sfc_dev_promisc_disable, + .allmulticast_enable = sfc_dev_allmulti_enable, + .allmulticast_disable = sfc_dev_allmulti_disable, + .link_update = sfc_dev_link_update, + .stats_get = sfc_stats_get, + .stats_reset = sfc_stats_reset, + .xstats_get = sfc_xstats_get, + .xstats_reset = sfc_stats_reset, + .xstats_get_names = sfc_xstats_get_names, + .dev_infos_get = sfc_dev_infos_get, + .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, + .mtu_set = sfc_dev_set_mtu, + .rx_queue_start = sfc_rx_queue_start, + .rx_queue_stop = sfc_rx_queue_stop, + .tx_queue_start = sfc_tx_queue_start, + .tx_queue_stop = sfc_tx_queue_stop, + .rx_queue_setup = sfc_rx_queue_setup, + .rx_queue_release = sfc_rx_queue_release, + .rx_queue_count = sfc_rx_queue_count, + .rx_descriptor_done = sfc_rx_descriptor_done, + .rx_descriptor_status = sfc_rx_descriptor_status, + .tx_descriptor_status = sfc_tx_descriptor_status, + .rx_queue_intr_enable = sfc_rx_queue_intr_enable, + .rx_queue_intr_disable = sfc_rx_queue_intr_disable, + .tx_queue_setup = sfc_tx_queue_setup, + .tx_queue_release = sfc_tx_queue_release, + .flow_ctrl_get = sfc_flow_ctrl_get, + .flow_ctrl_set = sfc_flow_ctrl_set, + .mac_addr_set = sfc_mac_addr_set, + .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, + .reta_update = sfc_dev_rss_reta_update, + .reta_query = sfc_dev_rss_reta_query, + .rss_hash_update = sfc_dev_rss_hash_update, + .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, + .filter_ctrl = sfc_dev_filter_ctrl, + .set_mc_addr_list = sfc_set_mc_addr_list, + .rxq_info_get = sfc_rx_queue_info_get, + .txq_info_get = sfc_tx_queue_info_get, + .fw_version_get = sfc_fw_version_get, + .xstats_get_by_id = sfc_xstats_get_by_id, + .xstats_get_names_by_id = sfc_xstats_get_names_by_id, + .pool_ops_supported = sfc_pool_ops_supported, +}; + +/** + * Duplicate a string in potentially shared memory required for + * multi-process support. + * + * strdup() allocates from process-local heap/memory. + */ +static char * +sfc_strdup(const char *str) +{ + size_t size; + char *copy; + + if (str == NULL) + return NULL; + + size = strlen(str) + 1; + copy = rte_malloc(__func__, size, 0); + if (copy != NULL) + rte_memcpy(copy, str, size); + + return copy; +} + +static int +sfc_eth_dev_set_ops(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + const struct sfc_dp_rx *dp_rx; + const struct sfc_dp_tx *dp_tx; + const efx_nic_cfg_t *encp; + unsigned int avail_caps = 0; + const char *rx_name = NULL; + const char *tx_name = NULL; + int rc; + + switch (sa->family) { + case EFX_FAMILY_HUNTINGTON: + case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_MEDFORD2: + avail_caps |= SFC_DP_HW_FW_CAP_EF10; + break; + default: + break; + } + + encp = efx_nic_cfg_get(sa->nic); + if (encp->enc_rx_es_super_buffer_supported) + avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; + + rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, + sfc_kvarg_string_handler, &rx_name); + if (rc != 0) + goto fail_kvarg_rx_datapath; + + if (rx_name != NULL) { + dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); + if (dp_rx == NULL) { + sfc_err(sa, "Rx datapath %s not found", rx_name); + rc = ENOENT; + goto fail_dp_rx; + } + if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) { + sfc_err(sa, + "Insufficient Hw/FW capabilities to use Rx datapath %s", + rx_name); + rc = EINVAL; + goto fail_dp_rx_caps; + } + } else { + dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); + if (dp_rx == NULL) { + sfc_err(sa, "Rx datapath by caps %#x not found", + avail_caps); + rc = ENOENT; + goto fail_dp_rx; + } + } + + sas->dp_rx_name = sfc_strdup(dp_rx->dp.name); + if (sas->dp_rx_name == NULL) { + rc = ENOMEM; + goto fail_dp_rx_name; + } + + sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name); + + rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, + sfc_kvarg_string_handler, &tx_name); + if (rc != 0) + goto fail_kvarg_tx_datapath; + + if (tx_name != NULL) { + dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); + if (dp_tx == NULL) { + sfc_err(sa, "Tx datapath %s not found", tx_name); + rc = ENOENT; + goto fail_dp_tx; + } + if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) { + sfc_err(sa, + "Insufficient Hw/FW capabilities to use Tx datapath %s", + tx_name); + rc = EINVAL; + goto fail_dp_tx_caps; + } + } else { + dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); + if (dp_tx == NULL) { + sfc_err(sa, "Tx datapath by caps %#x not found", + avail_caps); + rc = ENOENT; + goto fail_dp_tx; + } + } + + sas->dp_tx_name = sfc_strdup(dp_tx->dp.name); + if (sas->dp_tx_name == NULL) { + rc = ENOMEM; + goto fail_dp_tx_name; + } + + sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name); + + sa->priv.dp_rx = dp_rx; + sa->priv.dp_tx = dp_tx; + + dev->rx_pkt_burst = dp_rx->pkt_burst; + dev->tx_pkt_prepare = dp_tx->pkt_prepare; + dev->tx_pkt_burst = dp_tx->pkt_burst; + + dev->dev_ops = &sfc_eth_dev_ops; + + return 0; + +fail_dp_tx_name: +fail_dp_tx_caps: +fail_dp_tx: +fail_kvarg_tx_datapath: + rte_free(sas->dp_rx_name); + sas->dp_rx_name = NULL; + +fail_dp_rx_name: +fail_dp_rx_caps: +fail_dp_rx: +fail_kvarg_rx_datapath: + return rc; +} + +static void +sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + + dev->dev_ops = NULL; + dev->tx_pkt_prepare = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_free(sas->dp_tx_name); + sas->dp_tx_name = NULL; + sa->priv.dp_tx = NULL; + + rte_free(sas->dp_rx_name); + sas->dp_rx_name = NULL; + sa->priv.dp_rx = NULL; +} + +static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { + .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, + .rx_queue_count = sfc_rx_queue_count, + .rx_descriptor_done = sfc_rx_descriptor_done, + .rx_descriptor_status = sfc_rx_descriptor_status, + .tx_descriptor_status = sfc_tx_descriptor_status, + .reta_query = sfc_dev_rss_reta_query, + .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, + .rxq_info_get = sfc_rx_queue_info_get, + .txq_info_get = sfc_tx_queue_info_get, +}; + +static int +sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct sfc_adapter_priv *sap; + const struct sfc_dp_rx *dp_rx; + const struct sfc_dp_tx *dp_tx; + int rc; + + /* + * Allocate process private data from heap, since it should not + * be located in shared memory allocated using rte_malloc() API. + */ + sap = calloc(1, sizeof(*sap)); + if (sap == NULL) { + rc = ENOMEM; + goto fail_alloc_priv; + } + + sap->logtype_main = logtype_main; + + dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name); + if (dp_rx == NULL) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "cannot find %s Rx datapath", sas->dp_rx_name); + rc = ENOENT; + goto fail_dp_rx; + } + if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "%s Rx datapath does not support multi-process", + sas->dp_rx_name); + rc = EINVAL; + goto fail_dp_rx_multi_process; + } + + dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name); + if (dp_tx == NULL) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "cannot find %s Tx datapath", sas->dp_tx_name); + rc = ENOENT; + goto fail_dp_tx; + } + if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { + SFC_LOG(sas, RTE_LOG_ERR, logtype_main, + "%s Tx datapath does not support multi-process", + sas->dp_tx_name); + rc = EINVAL; + goto fail_dp_tx_multi_process; + } + + sap->dp_rx = dp_rx; + sap->dp_tx = dp_tx; + + dev->process_private = sap; + dev->rx_pkt_burst = dp_rx->pkt_burst; + dev->tx_pkt_prepare = dp_tx->pkt_prepare; + dev->tx_pkt_burst = dp_tx->pkt_burst; + dev->dev_ops = &sfc_eth_dev_secondary_ops; + + return 0; + +fail_dp_tx_multi_process: +fail_dp_tx: +fail_dp_rx_multi_process: +fail_dp_rx: + free(sap); + +fail_alloc_priv: + return rc; +} + +static void +sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) +{ + free(dev->process_private); + dev->process_private = NULL; + dev->dev_ops = NULL; + dev->tx_pkt_prepare = NULL; + dev->tx_pkt_burst = NULL; + dev->rx_pkt_burst = NULL; +} + +static void +sfc_register_dp(void) +{ + /* Register once */ + if (TAILQ_EMPTY(&sfc_dp_head)) { + /* Prefer EF10 datapath */ + sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); + + sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); + sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); + } +} + +static int +sfc_eth_dev_init(struct rte_eth_dev *dev) +{ + struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + uint32_t logtype_main; + struct sfc_adapter *sa; + int rc; + const efx_nic_cfg_t *encp; + const struct rte_ether_addr *from; + + sfc_register_dp(); + + logtype_main = sfc_register_logtype(&pci_dev->addr, + SFC_LOGTYPE_MAIN_STR, + RTE_LOG_NOTICE); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -sfc_eth_dev_secondary_init(dev, logtype_main); + + /* Required for logging */ + sas->pci_addr = pci_dev->addr; + sas->port_id = dev->data->port_id; + + /* + * Allocate process private data from heap, since it should not + * be located in shared memory allocated using rte_malloc() API. + */ + sa = calloc(1, sizeof(*sa)); + if (sa == NULL) { + rc = ENOMEM; + goto fail_alloc_sa; + } + + dev->process_private = sa; + + /* Required for logging */ + sa->priv.shared = sas; + sa->priv.logtype_main = logtype_main; + + sa->eth_dev = dev; + + /* Copy PCI device info to the dev->data */ + rte_eth_copy_pci_info(dev, pci_dev); + + rc = sfc_kvargs_parse(sa); + if (rc != 0) + goto fail_kvargs_parse; + + sfc_log_init(sa, "entry"); + + dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + + dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0); + if (dev->data->mac_addrs == NULL) { + rc = ENOMEM; + goto fail_mac_addrs; + } + + sfc_adapter_lock_init(sa); + sfc_adapter_lock(sa); + + sfc_log_init(sa, "probing"); + rc = sfc_probe(sa); + if (rc != 0) + goto fail_probe; + + sfc_log_init(sa, "set device ops"); + rc = sfc_eth_dev_set_ops(dev); + if (rc != 0) + goto fail_set_ops; + + sfc_log_init(sa, "attaching"); + rc = sfc_attach(sa); + if (rc != 0) + goto fail_attach; + + encp = efx_nic_cfg_get(sa->nic); + + /* + * The arguments are really reverse order in comparison to + * Linux kernel. Copy from NIC config to Ethernet device data. + */ + from = (const struct rte_ether_addr *)(encp->enc_mac_addr); + rte_ether_addr_copy(from, &dev->data->mac_addrs[0]); + + sfc_adapter_unlock(sa); + + sfc_log_init(sa, "done"); + return 0; + +fail_attach: + sfc_eth_dev_clear_ops(dev); + +fail_set_ops: + sfc_unprobe(sa); + +fail_probe: + sfc_adapter_unlock(sa); + sfc_adapter_lock_fini(sa); + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + +fail_mac_addrs: + sfc_kvargs_cleanup(sa); + +fail_kvargs_parse: + sfc_log_init(sa, "failed %d", rc); + dev->process_private = NULL; + free(sa); + +fail_alloc_sa: + SFC_ASSERT(rc > 0); + return -rc; +} + +static int +sfc_eth_dev_uninit(struct rte_eth_dev *dev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + sfc_eth_dev_secondary_clear_ops(dev); + return 0; + } + + sfc_dev_close(dev); + + return 0; +} + +static const struct rte_pci_id pci_id_sfc_efx_map[] = { + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, + { .vendor_id = 0 /* sentinel */ } +}; + +static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct sfc_adapter_shared), sfc_eth_dev_init); +} + +static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit); +} + +static struct rte_pci_driver sfc_efx_pmd = { + .id_table = pci_id_sfc_efx_map, + .drv_flags = + RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_NEED_MAPPING, + .probe = sfc_eth_dev_pci_probe, + .remove = sfc_eth_dev_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); +RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, + SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " + SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " + SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " + SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " + SFC_KVARG_RXD_WAIT_TIMEOUT_NS "= " + SFC_KVARG_STATS_UPDATE_PERIOD_MS "="); + +RTE_INIT(sfc_driver_register_logtype) +{ + int ret; + + ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", + RTE_LOG_NOTICE); + sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c new file mode 100644 index 000000000..83115e877 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.c @@ -0,0 +1,932 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include +#include +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_debug.h" +#include "sfc_log.h" +#include "sfc_ev.h" +#include "sfc_rx.h" +#include "sfc_tx.h" +#include "sfc_kvargs.h" + + +/* Initial delay when waiting for event queue init complete event */ +#define SFC_EVQ_INIT_BACKOFF_START_US (1) +/* Maximum delay between event queue polling attempts */ +#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) +/* Event queue init approx timeout */ +#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) + +/* Management event queue polling period in microseconds */ +#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) + +static const char * +sfc_evq_type2str(enum sfc_evq_type type) +{ + switch (type) { + case SFC_EVQ_TYPE_MGMT: + return "mgmt-evq"; + case SFC_EVQ_TYPE_RX: + return "rx-evq"; + case SFC_EVQ_TYPE_TX: + return "tx-evq"; + default: + SFC_ASSERT(B_FALSE); + return NULL; + } +} + +static boolean_t +sfc_ev_initialized(void *arg) +{ + struct sfc_evq *evq = arg; + + /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ + SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || + evq->init_state == SFC_EVQ_STARTED); + + evq->init_state = SFC_EVQ_STARTED; + + return B_FALSE; +} + +static boolean_t +sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, + uint32_t size, uint16_t flags) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, + "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", + evq->evq_index, label, id, size, flags); + return B_TRUE; +} + +static boolean_t +sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, + uint32_t size, uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_efx_rxq *rxq; + unsigned int stop; + unsigned int pending_id; + unsigned int delta; + unsigned int i; + struct sfc_efx_rx_sw_desc *rxd; + + if (unlikely(evq->exception)) + goto done; + + rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); + + SFC_ASSERT(rxq != NULL); + SFC_ASSERT(rxq->evq == evq); + SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); + + stop = (id + 1) & rxq->ptr_mask; + pending_id = rxq->pending & rxq->ptr_mask; + delta = (stop >= pending_id) ? (stop - pending_id) : + (rxq->ptr_mask + 1 - pending_id + stop); + + if (delta == 0) { + /* + * Rx event with no new descriptors done and zero length + * is used to abort scattered packet when there is no room + * for the tail. + */ + if (unlikely(size != 0)) { + evq->exception = B_TRUE; + sfc_err(evq->sa, + "EVQ %u RxQ %u invalid RX abort " + "(id=%#x size=%u flags=%#x); needs restart", + evq->evq_index, rxq->dp.dpq.queue_id, + id, size, flags); + goto done; + } + + /* Add discard flag to the first fragment */ + rxq->sw_desc[pending_id].flags |= EFX_DISCARD; + /* Remove continue flag from the last fragment */ + rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; + } else if (unlikely(delta > rxq->batch_max)) { + evq->exception = B_TRUE; + + sfc_err(evq->sa, + "EVQ %u RxQ %u completion out of order " + "(id=%#x delta=%u flags=%#x); needs restart", + evq->evq_index, rxq->dp.dpq.queue_id, + id, delta, flags); + + goto done; + } + + for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { + rxd = &rxq->sw_desc[i]; + + rxd->flags = flags; + + SFC_ASSERT(size < (1 << 16)); + rxd->size = (uint16_t)size; + } + + rxq->pending += delta; + +done: + return B_FALSE; +} + +static boolean_t +sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, + __rte_unused uint32_t size, __rte_unused uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL); + return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id); +} + +static boolean_t +sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, + uint32_t pkt_count, uint16_t flags) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, + "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", + evq->evq_index, label, id, pkt_count, flags); + return B_TRUE; +} + +/* It is not actually used on datapath, but required on RxQ flush */ +static boolean_t +sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, + __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL) + return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id); + else + return B_FALSE; +} + +static boolean_t +sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", + evq->evq_index, label, id); + return B_TRUE; +} + +static boolean_t +sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_txq *dp_txq; + struct sfc_efx_txq *txq; + unsigned int stop; + unsigned int delta; + + dp_txq = evq->dp_txq; + SFC_ASSERT(dp_txq != NULL); + + txq = sfc_efx_txq_by_dp_txq(dp_txq); + SFC_ASSERT(txq->evq == evq); + + if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) + goto done; + + stop = (id + 1) & txq->ptr_mask; + id = txq->pending & txq->ptr_mask; + + delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); + + txq->pending += delta; + +done: + return B_FALSE; +} + +static boolean_t +sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_txq *dp_txq; + + dp_txq = evq->dp_txq; + SFC_ASSERT(dp_txq != NULL); + + SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL); + return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id); +} + +static boolean_t +sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) +{ + struct sfc_evq *evq = arg; + + if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) + return B_FALSE; + + evq->exception = B_TRUE; + sfc_warn(evq->sa, + "hardware exception %s (code=%u, data=%#x) on EVQ %u;" + " needs recovery", + (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : + (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : + (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : + (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : + (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : + (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : + (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : + (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : + "UNKNOWN", + code, data, evq->evq_index); + + return B_TRUE; +} + +static boolean_t +sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", + evq->evq_index, rxq_hw_index); + return B_TRUE; +} + +static boolean_t +sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + struct sfc_rxq *rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + rxq = sfc_rxq_by_dp_rxq(dp_rxq); + SFC_ASSERT(rxq != NULL); + SFC_ASSERT(rxq->hw_index == rxq_hw_index); + SFC_ASSERT(rxq->evq == evq); + RTE_SET_USED(rxq); + + sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq)); + + return B_FALSE; +} + +static boolean_t +sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", + evq->evq_index, rxq_hw_index); + return B_TRUE; +} + +static boolean_t +sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + struct sfc_rxq *rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + rxq = sfc_rxq_by_dp_rxq(dp_rxq); + SFC_ASSERT(rxq != NULL); + SFC_ASSERT(rxq->hw_index == rxq_hw_index); + SFC_ASSERT(rxq->evq == evq); + RTE_SET_USED(rxq); + + sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq)); + + return B_FALSE; +} + +static boolean_t +sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", + evq->evq_index, txq_hw_index); + return B_TRUE; +} + +static boolean_t +sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_txq *dp_txq; + struct sfc_txq *txq; + + dp_txq = evq->dp_txq; + SFC_ASSERT(dp_txq != NULL); + + txq = sfc_txq_by_dp_txq(dp_txq); + SFC_ASSERT(txq != NULL); + SFC_ASSERT(txq->hw_index == txq_hw_index); + SFC_ASSERT(txq->evq == evq); + RTE_SET_USED(txq); + + sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq)); + + return B_FALSE; +} + +static boolean_t +sfc_ev_software(void *arg, uint16_t magic) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", + evq->evq_index, magic); + return B_TRUE; +} + +static boolean_t +sfc_ev_sram(void *arg, uint32_t code) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", + evq->evq_index, code); + return B_TRUE; +} + +static boolean_t +sfc_ev_wake_up(void *arg, uint32_t index) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", + evq->evq_index, index); + return B_TRUE; +} + +static boolean_t +sfc_ev_timer(void *arg, uint32_t index) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", + evq->evq_index, index); + return B_TRUE; +} + +static boolean_t +sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, "EVQ %u unexpected link change event", + evq->evq_index); + return B_TRUE; +} + +static boolean_t +sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) +{ + struct sfc_evq *evq = arg; + struct sfc_adapter *sa = evq->sa; + struct rte_eth_link new_link; + + sfc_port_link_mode_to_info(link_mode, &new_link); + if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0) + evq->sa->port.lsc_seq++; + + return B_FALSE; +} + +static const efx_ev_callbacks_t sfc_ev_callbacks = { + .eec_initialized = sfc_ev_initialized, + .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, + .eec_tx = sfc_ev_nop_tx, + .eec_exception = sfc_ev_exception, + .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, + .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, + .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, + .eec_software = sfc_ev_software, + .eec_sram = sfc_ev_sram, + .eec_wake_up = sfc_ev_wake_up, + .eec_timer = sfc_ev_timer, + .eec_link_change = sfc_ev_link_change, +}; + +static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { + .eec_initialized = sfc_ev_initialized, + .eec_rx = sfc_ev_efx_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, + .eec_tx = sfc_ev_nop_tx, + .eec_exception = sfc_ev_exception, + .eec_rxq_flush_done = sfc_ev_rxq_flush_done, + .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, + .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, + .eec_software = sfc_ev_software, + .eec_sram = sfc_ev_sram, + .eec_wake_up = sfc_ev_wake_up, + .eec_timer = sfc_ev_timer, + .eec_link_change = sfc_ev_nop_link_change, +}; + +static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { + .eec_initialized = sfc_ev_initialized, + .eec_rx = sfc_ev_dp_rx, + .eec_rx_ps = sfc_ev_dp_rx_ps, + .eec_tx = sfc_ev_nop_tx, + .eec_exception = sfc_ev_exception, + .eec_rxq_flush_done = sfc_ev_rxq_flush_done, + .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, + .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, + .eec_software = sfc_ev_software, + .eec_sram = sfc_ev_sram, + .eec_wake_up = sfc_ev_wake_up, + .eec_timer = sfc_ev_timer, + .eec_link_change = sfc_ev_nop_link_change, +}; + +static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { + .eec_initialized = sfc_ev_initialized, + .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, + .eec_tx = sfc_ev_tx, + .eec_exception = sfc_ev_exception, + .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, + .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, + .eec_txq_flush_done = sfc_ev_txq_flush_done, + .eec_software = sfc_ev_software, + .eec_sram = sfc_ev_sram, + .eec_wake_up = sfc_ev_wake_up, + .eec_timer = sfc_ev_timer, + .eec_link_change = sfc_ev_nop_link_change, +}; + +static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { + .eec_initialized = sfc_ev_initialized, + .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, + .eec_tx = sfc_ev_dp_tx, + .eec_exception = sfc_ev_exception, + .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, + .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, + .eec_txq_flush_done = sfc_ev_txq_flush_done, + .eec_software = sfc_ev_software, + .eec_sram = sfc_ev_sram, + .eec_wake_up = sfc_ev_wake_up, + .eec_timer = sfc_ev_timer, + .eec_link_change = sfc_ev_nop_link_change, +}; + + +void +sfc_ev_qpoll(struct sfc_evq *evq) +{ + SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || + evq->init_state == SFC_EVQ_STARTING); + + /* Synchronize the DMA memory for reading not required */ + + efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); + + if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { + struct sfc_adapter *sa = evq->sa; + int rc; + + if (evq->dp_rxq != NULL) { + unsigned int rxq_sw_index; + + rxq_sw_index = evq->dp_rxq->dpq.queue_id; + + sfc_warn(sa, + "restart RxQ %u because of exception on its EvQ %u", + rxq_sw_index, evq->evq_index); + + sfc_rx_qstop(sa, rxq_sw_index); + rc = sfc_rx_qstart(sa, rxq_sw_index); + if (rc != 0) + sfc_err(sa, "cannot restart RxQ %u", + rxq_sw_index); + } + + if (evq->dp_txq != NULL) { + unsigned int txq_sw_index; + + txq_sw_index = evq->dp_txq->dpq.queue_id; + + sfc_warn(sa, + "restart TxQ %u because of exception on its EvQ %u", + txq_sw_index, evq->evq_index); + + sfc_tx_qstop(sa, txq_sw_index); + rc = sfc_tx_qstart(sa, txq_sw_index); + if (rc != 0) + sfc_err(sa, "cannot restart TxQ %u", + txq_sw_index); + } + + if (evq->exception) + sfc_panic(sa, "unrecoverable exception on EvQ %u", + evq->evq_index); + + sfc_adapter_unlock(sa); + } + + /* Poll-mode driver does not re-prime the event queue for interrupts */ +} + +void +sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) +{ + if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { + if (sa->mgmt_evq_running) + sfc_ev_qpoll(sa->mgmt_evq); + + rte_spinlock_unlock(&sa->mgmt_evq_lock); + } +} + +int +sfc_ev_qprime(struct sfc_evq *evq) +{ + SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); + return efx_ev_qprime(evq->common, evq->read_ptr); +} + +/* Event queue HW index allocation scheme is described in sfc_ev.h. */ +int +sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) +{ + struct sfc_adapter *sa = evq->sa; + efsys_mem_t *esmp; + uint32_t evq_flags = sa->evq_flags; + unsigned int total_delay_us; + unsigned int delay_us; + int rc; + + sfc_log_init(sa, "hw_index=%u", hw_index); + + esmp = &evq->mem; + + evq->evq_index = hw_index; + + /* Clear all events */ + (void)memset((void *)esmp->esm_base, 0xff, + efx_evq_size(sa->nic, evq->entries)); + + if ((sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) || + (sa->intr.rxq_intr && evq->dp_rxq != NULL)) + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; + else + evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; + + /* Create the common code event queue */ + rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, + 0 /* unused on EF10 */, 0, evq_flags, + &evq->common); + if (rc != 0) + goto fail_ev_qcreate; + + SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); + if (evq->dp_rxq != 0) { + if (strcmp(sa->priv.dp_rx->dp.name, + SFC_KVARG_DATAPATH_EFX) == 0) + evq->callbacks = &sfc_ev_callbacks_efx_rx; + else + evq->callbacks = &sfc_ev_callbacks_dp_rx; + } else if (evq->dp_txq != 0) { + if (strcmp(sa->priv.dp_tx->dp.name, + SFC_KVARG_DATAPATH_EFX) == 0) + evq->callbacks = &sfc_ev_callbacks_efx_tx; + else + evq->callbacks = &sfc_ev_callbacks_dp_tx; + } else { + evq->callbacks = &sfc_ev_callbacks; + } + + evq->init_state = SFC_EVQ_STARTING; + + /* Wait for the initialization event */ + total_delay_us = 0; + delay_us = SFC_EVQ_INIT_BACKOFF_START_US; + do { + (void)sfc_ev_qpoll(evq); + + /* Check to see if the initialization complete indication + * posted by the hardware. + */ + if (evq->init_state == SFC_EVQ_STARTED) + goto done; + + /* Give event queue some time to init */ + rte_delay_us(delay_us); + + total_delay_us += delay_us; + + /* Exponential backoff */ + delay_us *= 2; + if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) + delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; + + } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); + + rc = ETIMEDOUT; + goto fail_timedout; + +done: + return 0; + +fail_timedout: + evq->init_state = SFC_EVQ_INITIALIZED; + efx_ev_qdestroy(evq->common); + +fail_ev_qcreate: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_ev_qstop(struct sfc_evq *evq) +{ + if (evq == NULL) + return; + + sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); + + if (evq->init_state != SFC_EVQ_STARTED) + return; + + evq->init_state = SFC_EVQ_INITIALIZED; + evq->callbacks = NULL; + evq->read_ptr = 0; + evq->exception = B_FALSE; + + efx_ev_qdestroy(evq->common); + + evq->evq_index = 0; +} + +static void +sfc_ev_mgmt_periodic_qpoll(void *arg) +{ + struct sfc_adapter *sa = arg; + int rc; + + sfc_ev_mgmt_qpoll(sa); + + rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, + sfc_ev_mgmt_periodic_qpoll, sa); + if (rc == -ENOTSUP) { + sfc_warn(sa, "alarms are not supported"); + sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); + } else if (rc != 0) { + sfc_err(sa, + "cannot rearm management EVQ polling alarm (rc=%d)", + rc); + } +} + +static void +sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) +{ + sfc_ev_mgmt_periodic_qpoll(sa); +} + +static void +sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) +{ + rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); +} + +int +sfc_ev_start(struct sfc_adapter *sa) +{ + int rc; + + sfc_log_init(sa, "entry"); + + rc = efx_ev_init(sa->nic); + if (rc != 0) + goto fail_ev_init; + + /* Start management EVQ used for global events */ + + /* + * Management event queue start polls the queue, but it cannot + * interfere with other polling contexts since mgmt_evq_running + * is false yet. + */ + rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); + if (rc != 0) + goto fail_mgmt_evq_start; + + rte_spinlock_lock(&sa->mgmt_evq_lock); + sa->mgmt_evq_running = true; + rte_spinlock_unlock(&sa->mgmt_evq_lock); + + if (sa->intr.lsc_intr) { + rc = sfc_ev_qprime(sa->mgmt_evq); + if (rc != 0) + goto fail_mgmt_evq_prime; + } + + /* + * Start management EVQ polling. If interrupts are disabled + * (not used), it is required to process link status change + * and other device level events to avoid unrecoverable + * error because the event queue overflow. + */ + sfc_ev_mgmt_periodic_qpoll_start(sa); + + /* + * Rx/Tx event queues are started/stopped when corresponding + * Rx/Tx queue is started/stopped. + */ + + return 0; + +fail_mgmt_evq_prime: + sfc_ev_qstop(sa->mgmt_evq); + +fail_mgmt_evq_start: + efx_ev_fini(sa->nic); + +fail_ev_init: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_ev_stop(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + sfc_ev_mgmt_periodic_qpoll_stop(sa); + + rte_spinlock_lock(&sa->mgmt_evq_lock); + sa->mgmt_evq_running = false; + rte_spinlock_unlock(&sa->mgmt_evq_lock); + + sfc_ev_qstop(sa->mgmt_evq); + + efx_ev_fini(sa->nic); +} + +int +sfc_ev_qinit(struct sfc_adapter *sa, + enum sfc_evq_type type, unsigned int type_index, + unsigned int entries, int socket_id, struct sfc_evq **evqp) +{ + struct sfc_evq *evq; + int rc; + + sfc_log_init(sa, "type=%s type_index=%u", + sfc_evq_type2str(type), type_index); + + SFC_ASSERT(rte_is_power_of_2(entries)); + + rc = ENOMEM; + evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, + socket_id); + if (evq == NULL) + goto fail_evq_alloc; + + evq->sa = sa; + evq->type = type; + evq->entries = entries; + + /* Allocate DMA space */ + rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, + efx_evq_size(sa->nic, evq->entries), socket_id, + &evq->mem); + if (rc != 0) + goto fail_dma_alloc; + + evq->init_state = SFC_EVQ_INITIALIZED; + + sa->evq_count++; + + *evqp = evq; + + return 0; + +fail_dma_alloc: + rte_free(evq); + +fail_evq_alloc: + + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_ev_qfini(struct sfc_evq *evq) +{ + struct sfc_adapter *sa = evq->sa; + + SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); + + sfc_dma_free(sa, &evq->mem); + + rte_free(evq); + + SFC_ASSERT(sa->evq_count > 0); + sa->evq_count--; +} + +static int +sfc_kvarg_perf_profile_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + uint32_t *value = opaque; + + if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) + *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; + else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) + *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; + else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) + *value = EFX_EVQ_FLAGS_TYPE_AUTO; + else + return -EINVAL; + + return 0; +} + +int +sfc_ev_attach(struct sfc_adapter *sa) +{ + int rc; + + sfc_log_init(sa, "entry"); + + sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; + rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, + sfc_kvarg_perf_profile_handler, + &sa->evq_flags); + if (rc != 0) { + sfc_err(sa, "invalid %s parameter value", + SFC_KVARG_PERF_PROFILE); + goto fail_kvarg_perf_profile; + } + + sa->mgmt_evq_index = 0; + rte_spinlock_init(&sa->mgmt_evq_lock); + + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries, + sa->socket_id, &sa->mgmt_evq); + if (rc != 0) + goto fail_mgmt_evq_init; + + /* + * Rx/Tx event queues are created/destroyed when corresponding + * Rx/Tx queue is created/destroyed. + */ + + return 0; + +fail_mgmt_evq_init: + +fail_kvarg_perf_profile: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_ev_detach(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + sfc_ev_qfini(sa->mgmt_evq); + + if (sa->evq_count != 0) + sfc_err(sa, "%u EvQs are not destroyed before detach", + sa->evq_count); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h new file mode 100644 index 000000000..3c7e5580e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_ev.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_EV_H_ +#define _SFC_EV_H_ + +#include + +#include "efx.h" + +#include "sfc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct sfc_adapter; +struct sfc_dp_rxq; +struct sfc_dp_txq; + +enum sfc_evq_state { + SFC_EVQ_UNINITIALIZED = 0, + SFC_EVQ_INITIALIZED, + SFC_EVQ_STARTING, + SFC_EVQ_STARTED, + + SFC_EVQ_NSTATES +}; + +enum sfc_evq_type { + SFC_EVQ_TYPE_MGMT = 0, + SFC_EVQ_TYPE_RX, + SFC_EVQ_TYPE_TX, + + SFC_EVQ_NTYPES +}; + +struct sfc_evq { + /* Used on datapath */ + efx_evq_t *common; + const efx_ev_callbacks_t *callbacks; + unsigned int read_ptr; + unsigned int read_ptr_primed; + boolean_t exception; + efsys_mem_t mem; + struct sfc_dp_rxq *dp_rxq; + struct sfc_dp_txq *dp_txq; + + /* Not used on datapath */ + struct sfc_adapter *sa; + unsigned int evq_index; + enum sfc_evq_state init_state; + enum sfc_evq_type type; + unsigned int entries; +}; + +/* + * Functions below define event queue to transmit/receive queue and vice + * versa mapping. + * Own event queue is allocated for management, each Rx and each Tx queue. + * Zero event queue is used for management events. + * Rx event queues from 1 to RxQ number follow management event queue. + * Tx event queues follow Rx event queues. + */ + +static inline unsigned int +sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, + unsigned int rxq_sw_index) +{ + return 1 + rxq_sw_index; +} + +static inline unsigned int +sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index) +{ + return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index; +} + +int sfc_ev_attach(struct sfc_adapter *sa); +void sfc_ev_detach(struct sfc_adapter *sa); +int sfc_ev_start(struct sfc_adapter *sa); +void sfc_ev_stop(struct sfc_adapter *sa); + +int sfc_ev_qinit(struct sfc_adapter *sa, + enum sfc_evq_type type, unsigned int type_index, + unsigned int entries, int socket_id, struct sfc_evq **evqp); +void sfc_ev_qfini(struct sfc_evq *evq); +int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index); +void sfc_ev_qstop(struct sfc_evq *evq); + +int sfc_ev_qprime(struct sfc_evq *evq); +void sfc_ev_qpoll(struct sfc_evq *evq); + +void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_EV_H_ */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c new file mode 100644 index 000000000..7f4f7c47a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.c @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_log.h" + +boolean_t +sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match) +{ + struct sfc_filter *filter = &sa->filter; + size_t i; + + for (i = 0; i < filter->supported_match_num; ++i) { + if (match == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +static int +sfc_filter_cache_match_supported(struct sfc_adapter *sa) +{ + struct sfc_filter *filter = &sa->filter; + size_t num = filter->supported_match_num; + uint32_t *buf = filter->supported_match; + unsigned int retry; + int rc; + + /* Just a guess of possibly sufficient entries */ + if (num == 0) + num = 16; + + for (retry = 0; retry < 2; ++retry) { + if (num != filter->supported_match_num) { + rc = ENOMEM; + buf = rte_realloc(buf, num * sizeof(*buf), 0); + if (buf == NULL) + goto fail_realloc; + } + + rc = efx_filter_supported_filters(sa->nic, buf, num, &num); + if (rc == 0) { + filter->supported_match_num = num; + filter->supported_match = buf; + + return 0; + } else if (rc != ENOSPC) { + goto fail_efx_filter_supported_filters; + } + } + + SFC_ASSERT(rc == ENOSPC); + +fail_efx_filter_supported_filters: +fail_realloc: + /* Original pointer is not freed by rte_realloc() on failure */ + rte_free(buf); + filter->supported_match = NULL; + filter->supported_match_num = 0; + return rc; +} + +int +sfc_filter_attach(struct sfc_adapter *sa) +{ + int rc; + unsigned int i; + + sfc_log_init(sa, "entry"); + + rc = efx_filter_init(sa->nic); + if (rc != 0) + goto fail_filter_init; + + rc = sfc_filter_cache_match_supported(sa); + if (rc != 0) + goto fail_cache_match_supported; + + efx_filter_fini(sa->nic); + + sa->filter.supports_ip_proto_or_addr_filter = B_FALSE; + sa->filter.supports_rem_or_local_port_filter = B_FALSE; + for (i = 0; i < sa->filter.supported_match_num; ++i) { + if (sa->filter.supported_match[i] & + (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | + EFX_FILTER_MATCH_REM_HOST)) + sa->filter.supports_ip_proto_or_addr_filter = B_TRUE; + + if (sa->filter.supported_match[i] & + (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT)) + sa->filter.supports_rem_or_local_port_filter = B_TRUE; + } + + sfc_log_init(sa, "done"); + + return 0; + +fail_cache_match_supported: + efx_filter_fini(sa->nic); + +fail_filter_init: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_filter_detach(struct sfc_adapter *sa) +{ + struct sfc_filter *filter = &sa->filter; + + sfc_log_init(sa, "entry"); + + rte_free(filter->supported_match); + filter->supported_match = NULL; + filter->supported_match_num = 0; + + sfc_log_init(sa, "done"); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h new file mode 100644 index 000000000..69ed50a7a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_filter.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_FILTER_H +#define _SFC_FILTER_H + +#include "efx.h" + +#include "sfc_flow.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct sfc_filter { + /** Number of elements in match_supported array */ + size_t supported_match_num; + /** Driver cache of supported filter match masks */ + uint32_t *supported_match; + /** + * Supports any of ip_proto, remote host or local host + * filters. This flag is used for filter match exceptions + */ + boolean_t supports_ip_proto_or_addr_filter; + /** + * Supports any of remote port or local port filters. + * This flag is used for filter match exceptions + */ + boolean_t supports_rem_or_local_port_filter; +}; + +struct sfc_adapter; + +int sfc_filter_attach(struct sfc_adapter *sa); +void sfc_filter_detach(struct sfc_adapter *sa); + +boolean_t sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_FILTER_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c new file mode 100644 index 000000000..c8e6fb8bc --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.c @@ -0,0 +1,2664 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_rx.h" +#include "sfc_filter.h" +#include "sfc_flow.h" +#include "sfc_log.h" +#include "sfc_dp_rx.h" + +struct sfc_flow_ops_by_spec { + sfc_flow_parse_cb_t *parse; + sfc_flow_insert_cb_t *insert; + sfc_flow_remove_cb_t *remove; +}; + +static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; +static sfc_flow_insert_cb_t sfc_flow_filter_insert; +static sfc_flow_remove_cb_t sfc_flow_filter_remove; + +static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { + .parse = sfc_flow_parse_rte_to_filter, + .insert = sfc_flow_filter_insert, + .remove = sfc_flow_filter_remove, +}; + +static const struct sfc_flow_ops_by_spec * +sfc_flow_get_ops_by_spec(struct rte_flow *flow) +{ + struct sfc_flow_spec *spec = &flow->spec; + const struct sfc_flow_ops_by_spec *ops = NULL; + + switch (spec->type) { + case SFC_FLOW_SPEC_FILTER: + ops = &sfc_flow_ops_filter; + break; + default: + SFC_ASSERT(false); + break; + } + + return ops; +} + +/* + * Currently, filter-based (VNIC) flow API is implemented in such a manner + * that each flow rule is converted to one or more hardware filters. + * All elements of flow rule (attributes, pattern items, actions) + * correspond to one or more fields in the efx_filter_spec_s structure + * that is responsible for the hardware filter. + * If some required field is unset in the flow rule, then a handful + * of filter copies will be created to cover all possible values + * of such a field. + */ + +static sfc_flow_item_parse sfc_flow_parse_void; +static sfc_flow_item_parse sfc_flow_parse_eth; +static sfc_flow_item_parse sfc_flow_parse_vlan; +static sfc_flow_item_parse sfc_flow_parse_ipv4; +static sfc_flow_item_parse sfc_flow_parse_ipv6; +static sfc_flow_item_parse sfc_flow_parse_tcp; +static sfc_flow_item_parse sfc_flow_parse_udp; +static sfc_flow_item_parse sfc_flow_parse_vxlan; +static sfc_flow_item_parse sfc_flow_parse_geneve; +static sfc_flow_item_parse sfc_flow_parse_nvgre; + +typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error); + +typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter); + +struct sfc_flow_copy_flag { + /* EFX filter specification match flag */ + efx_filter_match_flags_t flag; + /* Number of values of corresponding field */ + unsigned int vals_count; + /* Function to set values in specifications */ + sfc_flow_spec_set_vals *set_vals; + /* + * Function to check that the specification is suitable + * for adding this match flag + */ + sfc_flow_spec_check *spec_check; +}; + +static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; +static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; +static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; +static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag; +static sfc_flow_spec_check sfc_flow_check_outer_vid_flag; + +static boolean_t +sfc_flow_is_zero(const uint8_t *buf, unsigned int size) +{ + uint8_t sum = 0; + unsigned int i; + + for (i = 0; i < size; i++) + sum |= buf[i]; + + return (sum == 0) ? B_TRUE : B_FALSE; +} + +/* + * Validate item and prepare structures spec and mask for parsing + */ +int +sfc_flow_parse_init(const struct rte_flow_item *item, + const void **spec_ptr, + const void **mask_ptr, + const void *supp_mask, + const void *def_mask, + unsigned int size, + struct rte_flow_error *error) +{ + const uint8_t *spec; + const uint8_t *mask; + const uint8_t *last; + uint8_t supp; + unsigned int i; + + if (item == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "NULL item"); + return -rte_errno; + } + + if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Mask or last is set without spec"); + return -rte_errno; + } + + /* + * If "mask" is not set, default mask is used, + * but if default mask is NULL, "mask" should be set + */ + if (item->mask == NULL) { + if (def_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Mask should be specified"); + return -rte_errno; + } + + mask = def_mask; + } else { + mask = item->mask; + } + + spec = item->spec; + last = item->last; + + if (spec == NULL) + goto exit; + + /* + * If field values in "last" are either 0 or equal to the corresponding + * values in "spec" then they are ignored + */ + if (last != NULL && + !sfc_flow_is_zero(last, size) && + memcmp(last, spec, size) != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Ranging is not supported"); + return -rte_errno; + } + + if (supp_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Supported mask for item should be specified"); + return -rte_errno; + } + + /* Check that mask does not ask for more match than supp_mask */ + for (i = 0; i < size; i++) { + supp = ((const uint8_t *)supp_mask)[i]; + + if (~supp & mask[i]) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Item's field is not supported"); + return -rte_errno; + } + } + +exit: + *spec_ptr = spec; + *mask_ptr = mask; + return 0; +} + +/* + * Protocol parsers. + * Masking is not supported, so masks in items should be either + * full or empty (zeroed) and set only for supported fields which + * are specified in the supp_mask. + */ + +static int +sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, + __rte_unused struct sfc_flow_parse_ctx *parse_ctx, + __rte_unused struct rte_flow_error *error) +{ + return 0; +} + +/** + * Convert Ethernet item to EFX filter specification. + * + * @param item[in] + * Item specification. Outer frame specification may only comprise + * source/destination addresses and Ethertype field. + * Inner frame specification may contain destination address only. + * There is support for individual/group mask as well as for empty and full. + * If the mask is NULL, default mask will be used. Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_eth(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_eth *spec = NULL; + const struct rte_flow_item_eth *mask = NULL; + const struct rte_flow_item_eth supp_mask = { + .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + .type = 0xffff, + }; + const struct rte_flow_item_eth ifrm_supp_mask = { + .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + const struct rte_flow_item_eth *supp_mask_p; + const struct rte_flow_item_eth *def_mask_p; + uint8_t *loc_mac = NULL; + boolean_t is_ifrm = (efx_spec->efs_encap_type != + EFX_TUNNEL_PROTOCOL_NONE); + + if (is_ifrm) { + supp_mask_p = &ifrm_supp_mask; + def_mask_p = &ifrm_supp_mask; + loc_mac = efx_spec->efs_ifrm_loc_mac; + } else { + supp_mask_p = &supp_mask; + def_mask_p = &rte_flow_item_eth_mask; + loc_mac = efx_spec->efs_loc_mac; + } + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + supp_mask_p, def_mask_p, + sizeof(struct rte_flow_item_eth), + error); + if (rc != 0) + return rc; + + /* If "spec" is not set, could be any Ethernet */ + if (spec == NULL) + return 0; + + if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) { + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_LOC_MAC : + EFX_FILTER_MATCH_LOC_MAC; + rte_memcpy(loc_mac, spec->dst.addr_bytes, + EFX_MAC_ADDR_LEN); + } else if (memcmp(mask->dst.addr_bytes, ig_mask, + EFX_MAC_ADDR_LEN) == 0) { + if (rte_is_unicast_ether_addr(&spec->dst)) + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; + else + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; + } else if (!rte_is_zero_ether_addr(&mask->dst)) { + goto fail_bad_mask; + } + + /* + * ifrm_supp_mask ensures that the source address and + * ethertype masks are equal to zero in inner frame, + * so these fields are filled in only for the outer frame + */ + if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; + rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, + EFX_MAC_ADDR_LEN); + } else if (!rte_is_zero_ether_addr(&mask->src)) { + goto fail_bad_mask; + } + + /* + * Ether type is in big-endian byte order in item and + * in little-endian in efx_spec, so byte swap is used + */ + if (mask->type == supp_mask.type) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = rte_bswap16(spec->type); + } else if (mask->type != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the ETH pattern item"); + return -rte_errno; +} + +/** + * Convert VLAN item to EFX filter specification. + * + * @param item[in] + * Item specification. Only VID field is supported. + * The mask can not be NULL. Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_vlan(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + uint16_t vid; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_vlan *spec = NULL; + const struct rte_flow_item_vlan *mask = NULL; + const struct rte_flow_item_vlan supp_mask = { + .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), + .inner_type = RTE_BE16(0xffff), + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + NULL, + sizeof(struct rte_flow_item_vlan), + error); + if (rc != 0) + return rc; + + /* + * VID is in big-endian byte order in item and + * in little-endian in efx_spec, so byte swap is used. + * If two VLAN items are included, the first matches + * the outer tag and the next matches the inner tag. + */ + if (mask->tci == supp_mask.tci) { + /* Apply mask to keep VID only */ + vid = rte_bswap16(spec->tci & mask->tci); + + if (!(efx_spec->efs_match_flags & + EFX_FILTER_MATCH_OUTER_VID)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; + efx_spec->efs_outer_vid = vid; + } else if (!(efx_spec->efs_match_flags & + EFX_FILTER_MATCH_INNER_VID)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; + efx_spec->efs_inner_vid = vid; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "More than two VLAN items"); + return -rte_errno; + } + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN ID in TCI match is required"); + return -rte_errno; + } + + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported"); + return -rte_errno; + } + if (mask->inner_type == supp_mask.inner_type) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); + } else if (mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask for VLAN inner_type"); + return -rte_errno; + } + + return 0; +} + +/** + * Convert IPv4 item to EFX filter specification. + * + * @param item[in] + * Item specification. Only source and destination addresses and + * protocol fields are supported. If the mask is NULL, default + * mask will be used. Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_ipv4(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_ipv4 *spec = NULL; + const struct rte_flow_item_ipv4 *mask = NULL; + const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); + const struct rte_flow_item_ipv4 supp_mask = { + .hdr = { + .src_addr = 0xffffffff, + .dst_addr = 0xffffffff, + .next_proto_id = 0xff, + } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), + error); + if (rc != 0) + return rc; + + /* + * Filtering by IPv4 source and destination addresses requires + * the appropriate ETHER_TYPE in hardware filters + */ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = ether_type_ipv4; + } else if (efx_spec->efs_ether_type != ether_type_ipv4) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Ethertype in pattern with IPV4 item should be appropriate"); + return -rte_errno; + } + + if (spec == NULL) + return 0; + + /* + * IPv4 addresses are in big-endian byte order in item and in + * efx_spec + */ + if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; + efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; + } else if (mask->hdr.src_addr != 0) { + goto fail_bad_mask; + } + + if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; + efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; + } else if (mask->hdr.dst_addr != 0) { + goto fail_bad_mask; + } + + if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = spec->hdr.next_proto_id; + } else if (mask->hdr.next_proto_id != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the IPV4 pattern item"); + return -rte_errno; +} + +/** + * Convert IPv6 item to EFX filter specification. + * + * @param item[in] + * Item specification. Only source and destination addresses and + * next header fields are supported. If the mask is NULL, default + * mask will be used. Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_ipv6(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_ipv6 *spec = NULL; + const struct rte_flow_item_ipv6 *mask = NULL; + const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); + const struct rte_flow_item_ipv6 supp_mask = { + .hdr = { + .src_addr = { 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff }, + .dst_addr = { 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff }, + .proto = 0xff, + } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_ipv6_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (rc != 0) + return rc; + + /* + * Filtering by IPv6 source and destination addresses requires + * the appropriate ETHER_TYPE in hardware filters + */ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = ether_type_ipv6; + } else if (efx_spec->efs_ether_type != ether_type_ipv6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Ethertype in pattern with IPV6 item should be appropriate"); + return -rte_errno; + } + + if (spec == NULL) + return 0; + + /* + * IPv6 addresses are in big-endian byte order in item and in + * efx_spec + */ + if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr, + sizeof(mask->hdr.src_addr)) == 0) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; + + RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) != + sizeof(spec->hdr.src_addr)); + rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr, + sizeof(efx_spec->efs_rem_host)); + } else if (!sfc_flow_is_zero(mask->hdr.src_addr, + sizeof(mask->hdr.src_addr))) { + goto fail_bad_mask; + } + + if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr, + sizeof(mask->hdr.dst_addr)) == 0) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; + + RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) != + sizeof(spec->hdr.dst_addr)); + rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr, + sizeof(efx_spec->efs_loc_host)); + } else if (!sfc_flow_is_zero(mask->hdr.dst_addr, + sizeof(mask->hdr.dst_addr))) { + goto fail_bad_mask; + } + + if (mask->hdr.proto == supp_mask.hdr.proto) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = spec->hdr.proto; + } else if (mask->hdr.proto != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the IPV6 pattern item"); + return -rte_errno; +} + +/** + * Convert TCP item to EFX filter specification. + * + * @param item[in] + * Item specification. Only source and destination ports fields + * are supported. If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_tcp(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_tcp *spec = NULL; + const struct rte_flow_item_tcp *mask = NULL; + const struct rte_flow_item_tcp supp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_tcp_mask, + sizeof(struct rte_flow_item_tcp), + error); + if (rc != 0) + return rc; + + /* + * Filtering by TCP source and destination ports requires + * the appropriate IP_PROTO in hardware filters + */ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = EFX_IPPROTO_TCP; + } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IP proto in pattern with TCP item should be appropriate"); + return -rte_errno; + } + + if (spec == NULL) + return 0; + + /* + * Source and destination ports are in big-endian byte order in item and + * in little-endian in efx_spec, so byte swap is used + */ + if (mask->hdr.src_port == supp_mask.hdr.src_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; + efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); + } else if (mask->hdr.src_port != 0) { + goto fail_bad_mask; + } + + if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; + efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); + } else if (mask->hdr.dst_port != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the TCP pattern item"); + return -rte_errno; +} + +/** + * Convert UDP item to EFX filter specification. + * + * @param item[in] + * Item specification. Only source and destination ports fields + * are supported. If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_udp(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_udp *spec = NULL; + const struct rte_flow_item_udp *mask = NULL; + const struct rte_flow_item_udp supp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), + error); + if (rc != 0) + return rc; + + /* + * Filtering by UDP source and destination ports requires + * the appropriate IP_PROTO in hardware filters + */ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; + } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IP proto in pattern with UDP item should be appropriate"); + return -rte_errno; + } + + if (spec == NULL) + return 0; + + /* + * Source and destination ports are in big-endian byte order in item and + * in little-endian in efx_spec, so byte swap is used + */ + if (mask->hdr.src_port == supp_mask.hdr.src_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; + efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); + } else if (mask->hdr.src_port != 0) { + goto fail_bad_mask; + } + + if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; + efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); + } else if (mask->hdr.dst_port != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the UDP pattern item"); + return -rte_errno; +} + +/* + * Filters for encapsulated packets match based on the EtherType and IP + * protocol in the outer frame. + */ +static int +sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + uint8_t ip_proto, + struct rte_flow_error *error) +{ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = ip_proto; + } else if (efx_spec->efs_ip_proto != ip_proto) { + switch (ip_proto) { + case EFX_IPPROTO_UDP: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be UDP " + "in VxLAN/GENEVE pattern"); + return -rte_errno; + + case EFX_IPPROTO_GRE: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be GRE " + "in NVGRE pattern"); + return -rte_errno; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Only VxLAN/GENEVE/NVGRE tunneling patterns " + "are supported"); + return -rte_errno; + } + } + + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer frame EtherType in pattern with tunneling " + "must be IPv4 or IPv6"); + return -rte_errno; + } + + return 0; +} + +static int +sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, + const uint8_t *vni_or_vsid_val, + const uint8_t *vni_or_vsid_mask, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { + 0xff, 0xff, 0xff + }; + + if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, + EFX_VNI_OR_VSID_LEN) == 0) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, + EFX_VNI_OR_VSID_LEN); + } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported VNI/VSID mask"); + return -rte_errno; + } + + return 0; +} + +/** + * Convert VXLAN item to EFX filter specification. + * + * @param item[in] + * Item specification. Only VXLAN network identifier field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_vxlan(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_vxlan *spec = NULL; + const struct rte_flow_item_vxlan *mask = NULL; + const struct rte_flow_item_vxlan supp_mask = { + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert GENEVE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only Virtual Network Identifier and protocol type + * fields are supported. But protocol type can be only Ethernet (0x6558). + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_geneve(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_geneve *spec = NULL; + const struct rte_flow_item_geneve *mask = NULL; + const struct rte_flow_item_geneve supp_mask = { + .protocol = RTE_BE16(0xffff), + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_geneve_mask, + sizeof(struct rte_flow_item_geneve), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + if (mask->protocol == supp_mask.protocol) { + if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GENEVE encap. protocol must be Ethernet " + "(0x6558) in the GENEVE pattern item"); + return -rte_errno; + } + } else if (mask->protocol != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported mask for GENEVE encap. protocol"); + return -rte_errno; + } + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert NVGRE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only virtual subnet ID field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_nvgre(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; + const struct rte_flow_item_nvgre *spec = NULL; + const struct rte_flow_item_nvgre *mask = NULL; + const struct rte_flow_item_nvgre supp_mask = { + .tni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_GRE, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, + mask->tni, item, error); + + return rc; +} + +static const struct sfc_flow_item sfc_flow_items[] = { + { + .type = RTE_FLOW_ITEM_TYPE_VOID, + .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, + .layer = SFC_FLOW_ITEM_ANY_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_void, + }, + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .prev_layer = SFC_FLOW_ITEM_START_LAYER, + .layer = SFC_FLOW_ITEM_L2, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_eth, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .prev_layer = SFC_FLOW_ITEM_L2, + .layer = SFC_FLOW_ITEM_L2, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_vlan, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .prev_layer = SFC_FLOW_ITEM_L2, + .layer = SFC_FLOW_ITEM_L3, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_ipv4, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .prev_layer = SFC_FLOW_ITEM_L2, + .layer = SFC_FLOW_ITEM_L3, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_ipv6, + }, + { + .type = RTE_FLOW_ITEM_TYPE_TCP, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_L4, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_tcp, + }, + { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_L4, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_udp, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_vxlan, + }, + { + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_geneve, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, + .parse = sfc_flow_parse_nvgre, + }, +}; + +/* + * Protocol-independent flow API support + */ +static int +sfc_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + + if (attr == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "NULL attribute"); + return -rte_errno; + } + if (attr->group != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, + "Groups are not supported"); + return -rte_errno; + } + if (attr->egress != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, + "Egress is not supported"); + return -rte_errno; + } + if (attr->ingress == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, + "Ingress is compulsory"); + return -rte_errno; + } + if (attr->transfer == 0) { + if (attr->priority != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Priorities are unsupported"); + return -rte_errno; + } + spec->type = SFC_FLOW_SPEC_FILTER; + spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; + spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; + } else { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); + return -rte_errno; + } + + return 0; +} + +/* Get item from array sfc_flow_items */ +static const struct sfc_flow_item * +sfc_flow_get_item(const struct sfc_flow_item *items, + unsigned int nb_items, + enum rte_flow_item_type type) +{ + unsigned int i; + + for (i = 0; i < nb_items; i++) + if (items[i].type == type) + return &items[i]; + + return NULL; +} + +int +sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, + unsigned int nb_flow_items, + const struct rte_flow_item pattern[], + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error) +{ + int rc; + unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; + boolean_t is_ifrm = B_FALSE; + const struct sfc_flow_item *item; + + if (pattern == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + "NULL pattern"); + return -rte_errno; + } + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + item = sfc_flow_get_item(flow_items, nb_flow_items, + pattern->type); + if (item == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Unsupported pattern item"); + return -rte_errno; + } + + /* + * Omitting one or several protocol layers at the beginning + * of pattern is supported + */ + if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER && + prev_layer != SFC_FLOW_ITEM_ANY_LAYER && + item->prev_layer != prev_layer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Unexpected sequence of pattern items"); + return -rte_errno; + } + + /* + * Allow only VOID and ETH pattern items in the inner frame. + * Also check that there is only one tunneling protocol. + */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + case RTE_FLOW_ITEM_TYPE_ETH: + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "More than one tunneling protocol"); + return -rte_errno; + } + is_ifrm = B_TRUE; + break; + + default: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "There is an unsupported pattern item " + "in the inner frame"); + return -rte_errno; + } + break; + } + + if (parse_ctx->type != item->ctx_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Parse context type mismatch"); + return -rte_errno; + } + + rc = item->parse(pattern, parse_ctx, error); + if (rc != 0) + return rc; + + if (item->layer != SFC_FLOW_ITEM_ANY_LAYER) + prev_layer = item->layer; + } + + return 0; +} + +static int +sfc_flow_parse_queue(struct sfc_adapter *sa, + const struct rte_flow_action_queue *queue, + struct rte_flow *flow) +{ + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_rxq *rxq; + + if (queue->index >= sfc_sa2shared(sa)->rxq_count) + return -EINVAL; + + rxq = &sa->rxq_ctrl[queue->index]; + spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; + + return 0; +} + +static int +sfc_flow_parse_rss(struct sfc_adapter *sa, + const struct rte_flow_action_rss *action_rss, + struct rte_flow *flow) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rss *rss = &sas->rss; + unsigned int rxq_sw_index; + struct sfc_rxq *rxq; + unsigned int rxq_hw_index_min; + unsigned int rxq_hw_index_max; + efx_rx_hash_type_t efx_hash_types; + const uint8_t *rss_key; + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf; + unsigned int i; + + if (action_rss->queue_num == 0) + return -EINVAL; + + rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1; + rxq = &sa->rxq_ctrl[rxq_sw_index]; + rxq_hw_index_min = rxq->hw_index; + rxq_hw_index_max = 0; + + for (i = 0; i < action_rss->queue_num; ++i) { + rxq_sw_index = action_rss->queue[i]; + + if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count) + return -EINVAL; + + rxq = &sa->rxq_ctrl[rxq_sw_index]; + + if (rxq->hw_index < rxq_hw_index_min) + rxq_hw_index_min = rxq->hw_index; + + if (rxq->hw_index > rxq_hw_index_max) + rxq_hw_index_max = rxq->hw_index; + } + + switch (action_rss->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + break; + default: + return -EINVAL; + } + + if (action_rss->level) + return -EINVAL; + + /* + * Dummy RSS action with only one queue and no specific settings + * for hash types and key does not require dedicated RSS context + * and may be simplified to single queue action. + */ + if (action_rss->queue_num == 1 && action_rss->types == 0 && + action_rss->key_len == 0) { + spec_filter->template.efs_dmaq_id = rxq_hw_index_min; + return 0; + } + + if (action_rss->types) { + int rc; + + rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, + &efx_hash_types); + if (rc != 0) + return -rc; + } else { + unsigned int i; + + efx_hash_types = 0; + for (i = 0; i < rss->hf_map_nb_entries; ++i) + efx_hash_types |= rss->hf_map[i].efx; + } + + if (action_rss->key_len) { + if (action_rss->key_len != sizeof(rss->key)) + return -EINVAL; + + rss_key = action_rss->key; + } else { + rss_key = rss->key; + } + + spec_filter->rss = B_TRUE; + + sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; + sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; + sfc_rss_conf->rss_hash_types = efx_hash_types; + rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); + + for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { + unsigned int nb_queues = action_rss->queue_num; + unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; + struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index]; + + sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; + } + + return 0; +} + +static int +sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, + unsigned int filters_count) +{ + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + unsigned int i; + int ret = 0; + + for (i = 0; i < filters_count; i++) { + int rc; + + rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]); + if (ret == 0 && rc != 0) { + sfc_err(sa, "failed to remove filter specification " + "(rc = %d)", rc); + ret = rc; + } + } + + return ret; +} + +static int +sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + unsigned int i; + int rc = 0; + + for (i = 0; i < spec_filter->count; i++) { + rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]); + if (rc != 0) { + sfc_flow_spec_flush(sa, spec, i); + break; + } + } + + return rc; +} + +static int +sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + + return sfc_flow_spec_flush(sa, spec, spec_filter->count); +} + +static int +sfc_flow_filter_insert(struct sfc_adapter *sa, + struct rte_flow *flow) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rss *rss = &sas->rss; + struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; + struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; + uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + unsigned int i; + int rc = 0; + + if (spec_filter->rss) { + unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - + flow_rss->rxq_hw_index_min + 1, + EFX_MAXRSS); + + rc = efx_rx_scale_context_alloc(sa->nic, + EFX_RX_SCALE_EXCLUSIVE, + rss_spread, + &efs_rss_context); + if (rc != 0) + goto fail_scale_context_alloc; + + rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, + rss->hash_alg, + flow_rss->rss_hash_types, B_TRUE); + if (rc != 0) + goto fail_scale_mode_set; + + rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, + flow_rss->rss_key, + sizeof(rss->key)); + if (rc != 0) + goto fail_scale_key_set; + + /* + * At this point, fully elaborated filter specifications + * have been produced from the template. To make sure that + * RSS behaviour is consistent between them, set the same + * RSS context value everywhere. + */ + for (i = 0; i < spec_filter->count; i++) { + efx_filter_spec_t *spec = &spec_filter->filters[i]; + + spec->efs_rss_context = efs_rss_context; + spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; + spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + } + } + + rc = sfc_flow_spec_insert(sa, &flow->spec); + if (rc != 0) + goto fail_filter_insert; + + if (spec_filter->rss) { + /* + * Scale table is set after filter insertion because + * the table entries are relative to the base RxQ ID + * and the latter is submitted to the HW by means of + * inserting a filter, so by the time of the request + * the HW knows all the information needed to verify + * the table entries, and the operation will succeed + */ + rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, + flow_rss->rss_tbl, + RTE_DIM(flow_rss->rss_tbl)); + if (rc != 0) + goto fail_scale_tbl_set; + } + + return 0; + +fail_scale_tbl_set: + sfc_flow_spec_remove(sa, &flow->spec); + +fail_filter_insert: +fail_scale_key_set: +fail_scale_mode_set: + if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) + efx_rx_scale_context_free(sa->nic, efs_rss_context); + +fail_scale_context_alloc: + return rc; +} + +static int +sfc_flow_filter_remove(struct sfc_adapter *sa, + struct rte_flow *flow) +{ + struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; + int rc = 0; + + rc = sfc_flow_spec_remove(sa, &flow->spec); + if (rc != 0) + return rc; + + if (spec_filter->rss) { + /* + * All specifications for a given flow rule have the same RSS + * context, so that RSS context value is taken from the first + * filter specification + */ + efx_filter_spec_t *spec = &spec_filter->filters[0]; + + rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); + } + + return rc; +} + +static int +sfc_flow_parse_mark(struct sfc_adapter *sa, + const struct rte_flow_action_mark *mark, + struct rte_flow *flow) +{ + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + + if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) + return EINVAL; + + spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; + spec_filter->template.efs_mark = mark->id; + + return 0; +} + +static int +sfc_flow_parse_actions(struct sfc_adapter *sa, + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int rc; + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + const unsigned int dp_rx_features = sa->priv.dp_rx->features; + uint32_t actions_set = 0; + const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | + (1UL << RTE_FLOW_ACTION_TYPE_RSS) | + (1UL << RTE_FLOW_ACTION_TYPE_DROP); + const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | + (1UL << RTE_FLOW_ACTION_TYPE_FLAG); + + if (actions == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "NULL actions"); + return -rte_errno; + } + +#define SFC_BUILD_SET_OVERFLOW(_action, _set) \ + RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, + actions_set); + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + + rc = sfc_flow_parse_queue(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad QUEUE action"); + return -rte_errno; + } + break; + + case RTE_FLOW_ACTION_TYPE_RSS: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + + rc = sfc_flow_parse_rss(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, -rc, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad RSS action"); + return -rte_errno; + } + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + + spec_filter->template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; + break; + + case RTE_FLOW_ACTION_TYPE_FLAG: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "FLAG action is not supported on the current Rx datapath"); + return -rte_errno; + } + + spec_filter->template.efs_flags |= + EFX_FILTER_FLAG_ACTION_FLAG; + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "MARK action is not supported on the current Rx datapath"); + return -rte_errno; + } + + rc = sfc_flow_parse_mark(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad MARK action"); + return -rte_errno; + } + break; + + default: + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Action is not supported"); + return -rte_errno; + } + + actions_set |= (1UL << actions->type); + } +#undef SFC_BUILD_SET_OVERFLOW + + /* When fate is unknown, drop traffic. */ + if ((actions_set & fate_actions_mask) == 0) { + spec_filter->template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; + } + + return 0; + +fail_fate_actions: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Cannot combine several fate-deciding actions, " + "choose between QUEUE, RSS or DROP"); + return -rte_errno; + +fail_actions_overlap: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Overlapping actions are not supported"); + return -rte_errno; +} + +/** + * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST + * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec_filter->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec_filter->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, + __rte_unused efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t match_mcast_dst; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/** + * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and + * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same EtherType value, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + static const uint16_t vals[] = { + EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect " + "while copying by Ethertype"); + return -rte_errno; + } + + for (i = 0; i < spec_filter->count; i++) { + spec_filter->filters[i].efs_match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE; + + /* + * The check above ensures that + * filters_count_for_one_val is not 0 + */ + spec_filter->filters[i].efs_ether_type = + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0 + * in the same specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + unsigned int i; + + if (filters_count_for_one_val != spec_filter->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect " + "while copying by outer VLAN ID"); + return -rte_errno; + } + + for (i = 0; i < spec_filter->count; i++) { + spec_filter->filters[i].efs_match_flags |= + EFX_FILTER_MATCH_OUTER_VID; + + spec_filter->filters[i].efs_outer_vid = 0; + } + + return 0; +} + +/** + * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and + * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by inner frame unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec_filter->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec_filter->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the specification corresponds to a filter for encapsulated traffic + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_tunnel_protocol_t encap_type = spec->efs_encap_type; + efx_filter_match_flags_t match_mcast_dst; + + if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) + return B_FALSE; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/** + * Check that the list of supported filters has a filter that differs + * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID + * in this case that filter will be used and the flag + * EFX_FILTER_MATCH_OUTER_VID is not needed. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match, + __rte_unused efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t match_without_vid = + match & ~EFX_FILTER_MATCH_OUTER_VID; + + for (i = 0; i < filter->supported_match_num; i++) { + if (match_without_vid == filter->supported_match[i]) + return B_FALSE; + } + + return B_TRUE; +} + +/* + * Match flags that can be automatically added to filters. + * Selecting the last minimum when searching for the copy flag ensures that the + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than + * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported + * filters. + */ +static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { + { + .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_unknown_dst_flags, + .spec_check = sfc_flow_check_unknown_dst_flags, + }, + { + .flag = EFX_FILTER_MATCH_ETHER_TYPE, + .vals_count = 2, + .set_vals = sfc_flow_set_ethertypes, + .spec_check = NULL, + }, + { + .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, + .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, + }, + { + .flag = EFX_FILTER_MATCH_OUTER_VID, + .vals_count = 1, + .set_vals = sfc_flow_set_outer_vid_flag, + .spec_check = sfc_flow_check_outer_vid_flag, + }, +}; + +/* Get item from array sfc_flow_copy_flags */ +static const struct sfc_flow_copy_flag * +sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + if (sfc_flow_copy_flags[i].flag == flag) + return &sfc_flow_copy_flags[i]; + } + + return NULL; +} + +/** + * Make copies of the specifications, set match flag and values + * of the field that corresponds to it. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param flag[in] + * The match flag to add. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, + efx_filter_match_flags_t flag, + struct rte_flow_error *error) +{ + unsigned int i; + unsigned int new_filters_count; + unsigned int filters_count_for_one_val; + const struct sfc_flow_copy_flag *copy_flag; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + int rc; + + copy_flag = sfc_flow_get_copy_flag(flag); + if (copy_flag == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported spec field for copying"); + return -rte_errno; + } + + new_filters_count = spec_filter->count * copy_flag->vals_count; + if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Too much EFX specifications in the flow rule"); + return -rte_errno; + } + + /* Copy filters specifications */ + for (i = spec_filter->count; i < new_filters_count; i++) { + spec_filter->filters[i] = + spec_filter->filters[i - spec_filter->count]; + } + + filters_count_for_one_val = spec_filter->count; + spec_filter->count = new_filters_count; + + rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); + if (rc != 0) + return rc; + + return 0; +} + +/** + * Check that the given set of match flags missing in the original filter spec + * could be covered by adding spec copies which specify the corresponding + * flags and packet field values to match. + * + * @param miss_flags[in] + * Flags that are missing until the supported filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter. + * + * @return + * Number of specifications after copy or 0, if the flags can not be added. + */ +static unsigned int +sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t copy_flags = 0; + efx_filter_match_flags_t flag; + efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; + sfc_flow_spec_check *check; + unsigned int multiplier = 1; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + flag = sfc_flow_copy_flags[i].flag; + check = sfc_flow_copy_flags[i].spec_check; + if ((flag & miss_flags) == flag) { + if (check != NULL && (!check(match, spec, filter))) + continue; + + copy_flags |= flag; + multiplier *= sfc_flow_copy_flags[i].vals_count; + } + } + + if (copy_flags == miss_flags) + return multiplier; + + return 0; +} + +/** + * Attempt to supplement the specification template to the minimally + * supported set of match flags. To do this, it is necessary to copy + * the specifications, filling them with the values of fields that + * correspond to the missing flags. + * The necessary and sufficient filter is built from the fewest number + * of copies which could be made to cover the minimally required set + * of flags. + * + * @param sa[in] + * SFC adapter. + * @param spec[in, out] + * SFC flow specification to update. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_filters_complete(struct sfc_adapter *sa, + struct sfc_flow_spec *spec, + struct rte_flow_error *error) +{ + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_filter *filter = &sa->filter; + efx_filter_match_flags_t miss_flags; + efx_filter_match_flags_t min_miss_flags = 0; + efx_filter_match_flags_t match; + unsigned int min_multiplier = UINT_MAX; + unsigned int multiplier; + unsigned int i; + int rc; + + match = spec_filter->template.efs_match_flags; + for (i = 0; i < filter->supported_match_num; i++) { + if ((match & filter->supported_match[i]) == match) { + miss_flags = filter->supported_match[i] & (~match); + multiplier = sfc_flow_check_missing_flags(miss_flags, + &spec_filter->template, filter); + if (multiplier > 0) { + if (multiplier <= min_multiplier) { + min_multiplier = multiplier; + min_miss_flags = miss_flags; + } + } + } + } + + if (min_multiplier == UINT_MAX) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); + return -rte_errno; + } + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; + + if ((flag & min_miss_flags) == flag) { + rc = sfc_flow_spec_add_match_flag(spec, flag, error); + if (rc != 0) + return rc; + } + } + + return 0; +} + +/** + * Check that set of match flags is referred to by a filter. Filter is + * described by match flags with the ability to add OUTER_VID and INNER_VID + * flags. + * + * @param match_flags[in] + * Set of match flags. + * @param flags_pattern[in] + * Pattern of filter match flags. + */ +static boolean_t +sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, + efx_filter_match_flags_t flags_pattern) +{ + if ((match_flags & flags_pattern) != flags_pattern) + return B_FALSE; + + switch (match_flags & ~flags_pattern) { + case 0: + case EFX_FILTER_MATCH_OUTER_VID: + case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: + return B_TRUE; + default: + return B_FALSE; + } +} + +/** + * Check whether the spec maps to a hardware filter which is known to be + * ineffective despite being valid. + * + * @param filter[in] + * SFC filter with list of supported filters. + * @param spec[in] + * SFC flow specification. + */ +static boolean_t +sfc_flow_is_match_flags_exception(struct sfc_filter *filter, + struct sfc_flow_spec *spec) +{ + unsigned int i; + uint16_t ether_type; + uint8_t ip_proto; + efx_filter_match_flags_t match_flags; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + + for (i = 0; i < spec_filter->count; i++) { + match_flags = spec_filter->filters[i].efs_match_flags; + + if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_LOC_MAC)) { + ether_type = spec_filter->filters[i].efs_ether_type; + if (filter->supports_ip_proto_or_addr_filter && + (ether_type == EFX_ETHER_TYPE_IPV4 || + ether_type == EFX_ETHER_TYPE_IPV6)) + return B_TRUE; + } else if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_MAC)) { + ip_proto = spec_filter->filters[i].efs_ip_proto; + if (filter->supports_rem_or_local_port_filter && + (ip_proto == EFX_IPPROTO_TCP || + ip_proto == EFX_IPPROTO_UDP)) + return B_TRUE; + } + } + + return B_FALSE; +} + +static int +sfc_flow_validate_match_flags(struct sfc_adapter *sa, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + efx_filter_spec_t *spec_tmpl = &spec_filter->template; + efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; + int rc; + + /* Initialize the first filter spec with template */ + spec_filter->filters[0] = *spec_tmpl; + spec_filter->count = 1; + + if (!sfc_filter_is_match_supported(sa, match_flags)) { + rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); + if (rc != 0) + return rc; + } + + if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); + return -rte_errno; + } + + return 0; +} + +static int +sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_flow_parse_ctx ctx; + int rc; + + ctx.type = SFC_FLOW_PARSE_CTX_FILTER; + ctx.filter = &spec_filter->template; + + rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items), + pattern, &ctx, error); + if (rc != 0) + goto fail_bad_value; + + rc = sfc_flow_parse_actions(sa, actions, flow, error); + if (rc != 0) + goto fail_bad_value; + + rc = sfc_flow_validate_match_flags(sa, flow, error); + if (rc != 0) + goto fail_bad_value; + + return 0; + +fail_bad_value: + return rc; +} + +static int +sfc_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + rc = sfc_flow_parse_attr(attr, flow, error); + if (rc != 0) + return rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->parse == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return -rte_errno; + } + + return ops->parse(dev, pattern, actions, flow, error); +} + +static struct rte_flow * +sfc_flow_zmalloc(struct rte_flow_error *error) +{ + struct rte_flow *flow; + + flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0); + if (flow == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to allocate memory"); + } + + return flow; +} + +static void +sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow) +{ + rte_free(flow); +} + +static int +sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->insert == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return rte_errno; + } + + rc = ops->insert(sa, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to insert the flow rule"); + } + + return rc; +} + +static int +sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->remove == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return rte_errno; + } + + rc = ops->remove(sa, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to remove the flow rule"); + } + + return rc; +} + +static int +sfc_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_flow *flow; + int rc; + + flow = sfc_flow_zmalloc(error); + if (flow == NULL) + return -rte_errno; + + rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); + + sfc_flow_free(sa, flow); + + return rc; +} + +static struct rte_flow * +sfc_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_flow *flow = NULL; + int rc; + + flow = sfc_flow_zmalloc(error); + if (flow == NULL) + goto fail_no_mem; + + rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); + if (rc != 0) + goto fail_bad_value; + + sfc_adapter_lock(sa); + + TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); + + if (sa->state == SFC_ADAPTER_STARTED) { + rc = sfc_flow_insert(sa, flow, error); + if (rc != 0) + goto fail_flow_insert; + } + + sfc_adapter_unlock(sa); + + return flow; + +fail_flow_insert: + TAILQ_REMOVE(&sa->flow_list, flow, entries); + +fail_bad_value: + sfc_flow_free(sa, flow); + sfc_adapter_unlock(sa); + +fail_no_mem: + return NULL; +} + +static int +sfc_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_flow *flow_ptr; + int rc = EINVAL; + + sfc_adapter_lock(sa); + + TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) { + if (flow_ptr == flow) + rc = 0; + } + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to find flow rule to destroy"); + goto fail_bad_value; + } + + if (sa->state == SFC_ADAPTER_STARTED) + rc = sfc_flow_remove(sa, flow, error); + + TAILQ_REMOVE(&sa->flow_list, flow, entries); + sfc_flow_free(sa, flow); + +fail_bad_value: + sfc_adapter_unlock(sa); + + return -rc; +} + +static int +sfc_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct rte_flow *flow; + int ret = 0; + + sfc_adapter_lock(sa); + + while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { + if (sa->state == SFC_ADAPTER_STARTED) { + int rc; + + rc = sfc_flow_remove(sa, flow, error); + if (rc != 0) + ret = rc; + } + + TAILQ_REMOVE(&sa->flow_list, flow, entries); + sfc_flow_free(sa, flow); + } + + sfc_adapter_unlock(sa); + + return -ret; +} + +static int +sfc_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + int ret = 0; + + sfc_adapter_lock(sa); + if (sa->state != SFC_ADAPTER_INITIALIZED) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "please close the port first"); + ret = -rte_errno; + } else { + sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE; + } + sfc_adapter_unlock(sa); + + return ret; +} + +const struct rte_flow_ops sfc_flow_ops = { + .validate = sfc_flow_validate, + .create = sfc_flow_create, + .destroy = sfc_flow_destroy, + .flush = sfc_flow_flush, + .query = NULL, + .isolate = sfc_flow_isolate, +}; + +void +sfc_flow_init(struct sfc_adapter *sa) +{ + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + TAILQ_INIT(&sa->flow_list); +} + +void +sfc_flow_fini(struct sfc_adapter *sa) +{ + struct rte_flow *flow; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { + TAILQ_REMOVE(&sa->flow_list, flow, entries); + sfc_flow_free(sa, flow); + } +} + +void +sfc_flow_stop(struct sfc_adapter *sa) +{ + struct rte_flow *flow; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + TAILQ_FOREACH(flow, &sa->flow_list, entries) + sfc_flow_remove(sa, flow, NULL); +} + +int +sfc_flow_start(struct sfc_adapter *sa) +{ + struct rte_flow *flow; + int rc = 0; + + sfc_log_init(sa, "entry"); + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + TAILQ_FOREACH(flow, &sa->flow_list, entries) { + rc = sfc_flow_insert(sa, flow, NULL); + if (rc != 0) + goto fail_bad_flow; + } + + sfc_log_init(sa, "done"); + +fail_bad_flow: + return rc; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h new file mode 100644 index 000000000..5a7dad8f0 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_flow.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_FLOW_H +#define _SFC_FLOW_H + +#include +#include + +#include "efx.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The maximum number of fully elaborated hardware filter specifications + * which can be produced from a template by means of multiplication, if + * missing match flags are needed to be taken into account + */ +#define SF_FLOW_SPEC_NB_FILTERS_MAX 8 + +/* RSS configuration storage */ +struct sfc_flow_rss { + unsigned int rxq_hw_index_min; + unsigned int rxq_hw_index_max; + unsigned int rss_hash_types; + uint8_t rss_key[EFX_RSS_KEY_SIZE]; + unsigned int rss_tbl[EFX_RSS_TBL_SIZE]; +}; + +/* Flow engines supported by the implementation */ +enum sfc_flow_spec_type { + SFC_FLOW_SPEC_FILTER = 0, + + SFC_FLOW_SPEC_NTYPES +}; + +/* VNIC-specific flow specification */ +struct sfc_flow_spec_filter { + /* partial specification from flow rule */ + efx_filter_spec_t template; + /* fully elaborated hardware filters specifications */ + efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX]; + /* number of complete specifications */ + unsigned int count; + /* RSS toggle */ + boolean_t rss; + /* RSS configuration */ + struct sfc_flow_rss rss_conf; +}; + +/* Flow specification */ +struct sfc_flow_spec { + /* Flow specification type (engine-based) */ + enum sfc_flow_spec_type type; + + RTE_STD_C11 + union { + /* Filter-based (VNIC level flows) specification */ + struct sfc_flow_spec_filter filter; + }; +}; + +/* PMD-specific definition of the opaque type from rte_flow.h */ +struct rte_flow { + struct sfc_flow_spec spec; /* flow specification */ + TAILQ_ENTRY(rte_flow) entries; /* flow list entries */ +}; + +TAILQ_HEAD(sfc_flow_list, rte_flow); + +extern const struct rte_flow_ops sfc_flow_ops; + +enum sfc_flow_item_layers { + SFC_FLOW_ITEM_ANY_LAYER, + SFC_FLOW_ITEM_START_LAYER, + SFC_FLOW_ITEM_L2, + SFC_FLOW_ITEM_L3, + SFC_FLOW_ITEM_L4, +}; + +/* Flow parse context types */ +enum sfc_flow_parse_ctx_type { + SFC_FLOW_PARSE_CTX_FILTER = 0, + + SFC_FLOW_PARSE_CTX_NTYPES +}; + +/* Flow parse context */ +struct sfc_flow_parse_ctx { + enum sfc_flow_parse_ctx_type type; + + RTE_STD_C11 + union { + /* Context pointer valid for filter-based (VNIC) flows */ + efx_filter_spec_t *filter; + }; +}; + +typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item, + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error); + +struct sfc_flow_item { + enum rte_flow_item_type type; /* Type of item */ + enum sfc_flow_item_layers layer; /* Layer of item */ + enum sfc_flow_item_layers prev_layer; /* Previous layer of item */ + enum sfc_flow_parse_ctx_type ctx_type; /* Parse context type */ + sfc_flow_item_parse *parse; /* Parsing function */ +}; + +int sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, + unsigned int nb_flow_items, + const struct rte_flow_item pattern[], + struct sfc_flow_parse_ctx *parse_ctx, + struct rte_flow_error *error); + +int sfc_flow_parse_init(const struct rte_flow_item *item, + const void **spec_ptr, + const void **mask_ptr, + const void *supp_mask, + const void *def_mask, + unsigned int size, + struct rte_flow_error *error); + +struct sfc_adapter; + +void sfc_flow_init(struct sfc_adapter *sa); +void sfc_flow_fini(struct sfc_adapter *sa); +int sfc_flow_start(struct sfc_adapter *sa); +void sfc_flow_stop(struct sfc_adapter *sa); + +typedef int (sfc_flow_parse_cb_t)(struct rte_eth_dev *dev, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error); + +typedef int (sfc_flow_insert_cb_t)(struct sfc_adapter *sa, + struct rte_flow *flow); + +typedef int (sfc_flow_remove_cb_t)(struct sfc_adapter *sa, + struct rte_flow *flow); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_FLOW_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c b/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c new file mode 100644 index 000000000..80feab68f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_intr.c @@ -0,0 +1,354 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* + * At the momemt of writing DPDK v16.07 has notion of two types of + * interrupts: LSC (link status change) and RXQ (receive indication). + * It allows to register interrupt callback for entire device which is + * not intended to be used for receive indication (i.e. link status + * change indication only). The handler has no information which HW + * interrupt has triggered it, so we don't know which event queue should + * be polled/reprimed (except qmask in the case of legacy line interrupt). + */ + +#include +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_log.h" +#include "sfc_ev.h" + +static void +sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa) +{ + struct sfc_evq *evq; + + rte_spinlock_lock(&sa->mgmt_evq_lock); + + evq = sa->mgmt_evq; + + if (!sa->mgmt_evq_running) { + sfc_log_init(sa, "interrupt on not running management EVQ %u", + evq->evq_index); + } else { + sfc_ev_qpoll(evq); + + if (sfc_ev_qprime(evq) != 0) + sfc_err(sa, "cannot prime EVQ %u", evq->evq_index); + } + + rte_spinlock_unlock(&sa->mgmt_evq_lock); +} + +static void +sfc_intr_line_handler(void *cb_arg) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg; + efx_nic_t *enp = sa->nic; + boolean_t fatal; + uint32_t qmask; + unsigned int lsc_seq = sa->port.lsc_seq; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + + sfc_log_init(sa, "entry"); + + if (sa->state != SFC_ADAPTER_STARTED && + sa->state != SFC_ADAPTER_STARTING && + sa->state != SFC_ADAPTER_STOPPING) { + sfc_log_init(sa, + "interrupt on stopped adapter, don't reenable"); + goto exit; + } + + efx_intr_status_line(enp, &fatal, &qmask); + if (fatal) { + (void)efx_intr_disable(enp); + (void)efx_intr_fatal(enp); + sfc_err(sa, "fatal, interrupts disabled"); + goto exit; + } + + if (qmask & (1 << sa->mgmt_evq_index)) + sfc_intr_handle_mgmt_evq(sa); + + if (rte_intr_ack(&pci_dev->intr_handle) != 0) + sfc_err(sa, "cannot reenable interrupts"); + + sfc_log_init(sa, "done"); + +exit: + if (lsc_seq != sa->port.lsc_seq) { + sfc_notice(sa, "link status change event: link %s", + sa->eth_dev->data->dev_link.link_status ? + "UP" : "DOWN"); + _rte_eth_dev_callback_process(sa->eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } +} + +static void +sfc_intr_message_handler(void *cb_arg) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg; + efx_nic_t *enp = sa->nic; + boolean_t fatal; + unsigned int lsc_seq = sa->port.lsc_seq; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + + sfc_log_init(sa, "entry"); + + if (sa->state != SFC_ADAPTER_STARTED && + sa->state != SFC_ADAPTER_STARTING && + sa->state != SFC_ADAPTER_STOPPING) { + sfc_log_init(sa, "adapter not-started, don't reenable"); + goto exit; + } + + efx_intr_status_message(enp, sa->mgmt_evq_index, &fatal); + if (fatal) { + (void)efx_intr_disable(enp); + (void)efx_intr_fatal(enp); + sfc_err(sa, "fatal, interrupts disabled"); + goto exit; + } + + sfc_intr_handle_mgmt_evq(sa); + + if (rte_intr_ack(&pci_dev->intr_handle) != 0) + sfc_err(sa, "cannot reenable interrupts"); + + sfc_log_init(sa, "done"); + +exit: + if (lsc_seq != sa->port.lsc_seq) { + sfc_notice(sa, "link status change event"); + _rte_eth_dev_callback_process(sa->eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } +} + +int +sfc_intr_start(struct sfc_adapter *sa) +{ + struct sfc_intr *intr = &sa->intr; + struct rte_intr_handle *intr_handle; + struct rte_pci_device *pci_dev; + int rc; + + sfc_log_init(sa, "entry"); + + /* + * The EFX common code event queue module depends on the interrupt + * module. Ensure that the interrupt module is always initialized + * (even if interrupts are not used). Status memory is required + * for Siena only and may be NULL for EF10. + */ + sfc_log_init(sa, "efx_intr_init"); + rc = efx_intr_init(sa->nic, intr->type, NULL); + if (rc != 0) + goto fail_intr_init; + + pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + intr_handle = &pci_dev->intr_handle; + + if (intr->handler != NULL) { + if (intr->rxq_intr && rte_intr_cap_multiple(intr_handle)) { + uint32_t intr_vector; + + intr_vector = sa->eth_dev->data->nb_rx_queues; + rc = rte_intr_efd_enable(intr_handle, intr_vector); + if (rc != 0) + goto fail_rte_intr_efd_enable; + } + if (rte_intr_dp_is_en(intr_handle)) { + intr_handle->intr_vec = + rte_calloc("intr_vec", + sa->eth_dev->data->nb_rx_queues, sizeof(int), + 0); + if (intr_handle->intr_vec == NULL) { + sfc_err(sa, + "Failed to allocate %d rx_queues intr_vec", + sa->eth_dev->data->nb_rx_queues); + goto fail_intr_vector_alloc; + } + } + + sfc_log_init(sa, "rte_intr_callback_register"); + rc = rte_intr_callback_register(intr_handle, intr->handler, + (void *)sa); + if (rc != 0) { + sfc_err(sa, + "cannot register interrupt handler (rc=%d)", + rc); + /* + * Convert error code from negative returned by RTE API + * to positive used in the driver. + */ + rc = -rc; + goto fail_rte_intr_cb_reg; + } + + sfc_log_init(sa, "rte_intr_enable"); + rc = rte_intr_enable(intr_handle); + if (rc != 0) { + sfc_err(sa, "cannot enable interrupts (rc=%d)", rc); + /* + * Convert error code from negative returned by RTE API + * to positive used in the driver. + */ + rc = -rc; + goto fail_rte_intr_enable; + } + + sfc_log_init(sa, "efx_intr_enable"); + efx_intr_enable(sa->nic); + } + + sfc_log_init(sa, "done type=%u max_intr=%d nb_efd=%u vec=%p", + intr_handle->type, intr_handle->max_intr, + intr_handle->nb_efd, intr_handle->intr_vec); + return 0; + +fail_rte_intr_enable: + rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa); + +fail_rte_intr_cb_reg: + rte_free(intr_handle->intr_vec); + +fail_intr_vector_alloc: + rte_intr_efd_disable(intr_handle); + +fail_rte_intr_efd_enable: + efx_intr_fini(sa->nic); + +fail_intr_init: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_intr_stop(struct sfc_adapter *sa) +{ + struct sfc_intr *intr = &sa->intr; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + + sfc_log_init(sa, "entry"); + + if (intr->handler != NULL) { + struct rte_intr_handle *intr_handle; + int rc; + + efx_intr_disable(sa->nic); + + intr_handle = &pci_dev->intr_handle; + + rte_free(intr_handle->intr_vec); + rte_intr_efd_disable(intr_handle); + + if (rte_intr_disable(intr_handle) != 0) + sfc_err(sa, "cannot disable interrupts"); + + while ((rc = rte_intr_callback_unregister(intr_handle, + intr->handler, (void *)sa)) == -EAGAIN) + ; + if (rc != 1) + sfc_err(sa, + "cannot unregister interrupt handler %d", + rc); + } + + efx_intr_fini(sa->nic); + + sfc_log_init(sa, "done"); +} + +int +sfc_intr_configure(struct sfc_adapter *sa) +{ + struct sfc_intr *intr = &sa->intr; + + sfc_log_init(sa, "entry"); + + intr->handler = NULL; + intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0); + intr->rxq_intr = (sa->eth_dev->data->dev_conf.intr_conf.rxq != 0); + + if (!intr->lsc_intr && !intr->rxq_intr) + goto done; + + switch (intr->type) { + case EFX_INTR_MESSAGE: + intr->handler = sfc_intr_message_handler; + break; + case EFX_INTR_LINE: + intr->handler = sfc_intr_line_handler; + break; + case EFX_INTR_INVALID: + sfc_warn(sa, "interrupts are not supported"); + break; + default: + sfc_panic(sa, "unexpected EFX interrupt type %u\n", intr->type); + break; + } + +done: + sfc_log_init(sa, "done"); + return 0; +} + +void +sfc_intr_close(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + sfc_log_init(sa, "done"); +} + +int +sfc_intr_attach(struct sfc_adapter *sa) +{ + struct sfc_intr *intr = &sa->intr; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + + sfc_log_init(sa, "entry"); + + switch (pci_dev->intr_handle.type) { +#ifdef RTE_EXEC_ENV_LINUX + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + intr->type = EFX_INTR_LINE; + break; + case RTE_INTR_HANDLE_UIO: + case RTE_INTR_HANDLE_VFIO_MSI: + case RTE_INTR_HANDLE_VFIO_MSIX: + intr->type = EFX_INTR_MESSAGE; + break; +#endif + default: + intr->type = EFX_INTR_INVALID; + break; + } + + sfc_log_init(sa, "done"); + return 0; +} + +void +sfc_intr_detach(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + sa->intr.type = EFX_INTR_INVALID; + + sfc_log_init(sa, "done"); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c new file mode 100644 index 000000000..13e9665bb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.c @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include + +#include +#include + +#include "sfc.h" +#include "sfc_kvargs.h" + +int +sfc_kvargs_parse(struct sfc_adapter *sa) +{ + struct rte_eth_dev *eth_dev = (sa)->eth_dev; + struct rte_devargs *devargs = eth_dev->device->devargs; + const char **params = (const char *[]){ + SFC_KVARG_STATS_UPDATE_PERIOD_MS, + SFC_KVARG_PERF_PROFILE, + SFC_KVARG_RX_DATAPATH, + SFC_KVARG_TX_DATAPATH, + SFC_KVARG_FW_VARIANT, + SFC_KVARG_RXD_WAIT_TIMEOUT_NS, + NULL, + }; + + if (devargs == NULL) + return 0; + + sa->kvargs = rte_kvargs_parse(devargs->args, params); + if (sa->kvargs == NULL) + return EINVAL; + + return 0; +} + +void +sfc_kvargs_cleanup(struct sfc_adapter *sa) +{ + rte_kvargs_free(sa->kvargs); +} + +static int +sfc_kvarg_match_value(const char *value, const char * const *values, + unsigned int n_values) +{ + unsigned int i; + + for (i = 0; i < n_values; ++i) + if (strcasecmp(value, values[i]) == 0) + return 1; + + return 0; +} + +int +sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match, + arg_handler_t handler, void *opaque_arg) +{ + if (sa->kvargs == NULL) + return 0; + + return -rte_kvargs_process(sa->kvargs, key_match, handler, opaque_arg); +} + +int +sfc_kvarg_bool_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + const char * const true_strs[] = { + "1", "y", "yes", "on", "true" + }; + const char * const false_strs[] = { + "0", "n", "no", "off", "false" + }; + bool *value = opaque; + + if (sfc_kvarg_match_value(value_str, true_strs, + RTE_DIM(true_strs))) + *value = true; + else if (sfc_kvarg_match_value(value_str, false_strs, + RTE_DIM(false_strs))) + *value = false; + else + return -EINVAL; + + return 0; +} + +int +sfc_kvarg_long_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + long value; + char *endptr; + + if (!value_str || !opaque) + return -EINVAL; + + value = strtol(value_str, &endptr, 0); + if (endptr == value_str) + return -EINVAL; + + *(long *)opaque = value; + + return 0; +} + +int +sfc_kvarg_string_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + *(const char **)opaque = value_str; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h new file mode 100644 index 000000000..f9d10e71c --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_kvargs.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_KVARGS_H +#define _SFC_KVARGS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]" + +#define SFC_KVARG_PERF_PROFILE "perf_profile" + +#define SFC_KVARG_PERF_PROFILE_AUTO "auto" +#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput" +#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency" +#define SFC_KVARG_VALUES_PERF_PROFILE \ + "[" SFC_KVARG_PERF_PROFILE_AUTO "|" \ + SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \ + SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]" + +#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms" + +#define SFC_KVARG_DATAPATH_EFX "efx" +#define SFC_KVARG_DATAPATH_EF10 "ef10" +#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple" +#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb" + +#define SFC_KVARG_RX_DATAPATH "rx_datapath" +#define SFC_KVARG_VALUES_RX_DATAPATH \ + "[" SFC_KVARG_DATAPATH_EFX "|" \ + SFC_KVARG_DATAPATH_EF10 "|" \ + SFC_KVARG_DATAPATH_EF10_ESSB "]" + +#define SFC_KVARG_TX_DATAPATH "tx_datapath" +#define SFC_KVARG_VALUES_TX_DATAPATH \ + "[" SFC_KVARG_DATAPATH_EFX "|" \ + SFC_KVARG_DATAPATH_EF10 "|" \ + SFC_KVARG_DATAPATH_EF10_SIMPLE "]" + +#define SFC_KVARG_FW_VARIANT "fw_variant" + +#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care" +#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature" +#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency" +#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream" +#define SFC_KVARG_FW_VARIANT_DPDK "dpdk" +#define SFC_KVARG_VALUES_FW_VARIANT \ + "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \ + SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \ + SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \ + SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \ + SFC_KVARG_FW_VARIANT_DPDK "]" + +#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns" + +struct sfc_adapter; + +int sfc_kvargs_parse(struct sfc_adapter *sa); +void sfc_kvargs_cleanup(struct sfc_adapter *sa); + +int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match, + arg_handler_t handler, void *opaque_arg); + +int sfc_kvarg_bool_handler(const char *key, const char *value_str, + void *opaque); +int sfc_kvarg_long_handler(const char *key, const char *value_str, + void *opaque); +int sfc_kvarg_string_handler(const char *key, const char *value_str, + void *opaque); + +#ifdef __cplusplus +} +#endif + +#endif /* _SFC_KVARGS_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_log.h b/src/spdk/dpdk/drivers/net/sfc/sfc_log.h new file mode 100644 index 000000000..5383091c7 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_log.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_LOG_H_ +#define _SFC_LOG_H_ + +/** Generic driver log type */ +extern uint32_t sfc_logtype_driver; + +/** Common log type name prefix */ +#define SFC_LOGTYPE_PREFIX "pmd.net.sfc." + +/** Log PMD generic message, add a prefix and a line break */ +#define SFC_GENERIC_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \ + RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + RTE_FMT_TAIL(__VA_ARGS__ ,))) + +/** Name prefix for the per-device log type used to report basic information */ +#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main" + +/** Device MCDI log type name prefix */ +#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi" + +/** Level value used by MCDI log statements */ +#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO + +/* Log PMD message, automatically add prefix and \n */ +#define SFC_LOG(sas, level, type, ...) \ + do { \ + const struct sfc_adapter_shared *_sas = (sas); \ + \ + rte_log(level, type, \ + RTE_FMT("PMD: sfc_efx " \ + PCI_PRI_FMT " #%" PRIu16 \ + ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + _sas->pci_addr.domain, \ + _sas->pci_addr.bus, \ + _sas->pci_addr.devid, \ + _sas->pci_addr.function, \ + _sas->port_id, \ + RTE_FMT_TAIL(__VA_ARGS__,))); \ + } while (0) + +#define sfc_err(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, RTE_LOG_ERR, \ + _sa->priv.logtype_main, __VA_ARGS__); \ + } while (0) + +#define sfc_warn(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, RTE_LOG_WARNING, \ + _sa->priv.logtype_main, __VA_ARGS__); \ + } while (0) + +#define sfc_notice(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, RTE_LOG_NOTICE, \ + _sa->priv.logtype_main, __VA_ARGS__); \ + } while (0) + +#define sfc_info(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, RTE_LOG_INFO, \ + _sa->priv.logtype_main, __VA_ARGS__); \ + } while (0) + +#define sfc_log_init(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, RTE_LOG_INFO, \ + _sa->priv.logtype_main, \ + RTE_FMT("%s(): " \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ + __func__, \ + RTE_FMT_TAIL(__VA_ARGS__ ,))); \ + } while (0) + +#define sfc_log_mcdi(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa->priv.shared, SFC_LOG_LEVEL_MCDI, \ + _sa->mcdi.logtype, __VA_ARGS__); \ + } while (0) + + +#endif /* _SFC_LOG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c b/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c new file mode 100644 index 000000000..872e4e76b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_mcdi.c @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include + +#include "efx.h" +#include "efx_mcdi.h" +#include "efx_regs_mcdi.h" + +#include "sfc.h" +#include "sfc_log.h" +#include "sfc_ev.h" + +#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */ +#define SFC_MCDI_POLL_INTERVAL_MAX_US (US_PER_S / 10) /* 100ms in 1us units */ +#define SFC_MCDI_WATCHDOG_INTERVAL_US (10 * US_PER_S) /* 10s in 1us units */ + +static void +sfc_mcdi_timeout(struct sfc_adapter *sa) +{ + sfc_warn(sa, "MC TIMEOUT"); + + sfc_panic(sa, "MCDI timeout handling is not implemented\n"); +} + +static inline boolean_t +sfc_mcdi_proxy_event_available(struct sfc_adapter *sa) +{ + struct sfc_mcdi *mcdi = &sa->mcdi; + + mcdi->proxy_handle = 0; + mcdi->proxy_result = ETIMEDOUT; + sfc_ev_mgmt_qpoll(sa); + if (mcdi->proxy_result != ETIMEDOUT) + return B_TRUE; + + return B_FALSE; +} + +static void +sfc_mcdi_poll(struct sfc_adapter *sa, boolean_t proxy) +{ + efx_nic_t *enp; + unsigned int delay_total; + unsigned int delay_us; + boolean_t aborted __rte_unused; + + delay_total = 0; + delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US; + enp = sa->nic; + + do { + boolean_t poll_completed; + + poll_completed = (proxy) ? sfc_mcdi_proxy_event_available(sa) : + efx_mcdi_request_poll(enp); + if (poll_completed) + return; + + if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) { + if (!proxy) { + aborted = efx_mcdi_request_abort(enp); + SFC_ASSERT(aborted); + sfc_mcdi_timeout(sa); + } + + return; + } + + rte_delay_us(delay_us); + + delay_total += delay_us; + + /* Exponentially back off the poll frequency */ + RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2); + delay_us *= 2; + if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US) + delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US; + + } while (1); +} + +static void +sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)arg; + struct sfc_mcdi *mcdi = &sa->mcdi; + uint32_t proxy_handle; + + rte_spinlock_lock(&mcdi->lock); + + SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); + + efx_mcdi_request_start(sa->nic, emrp, B_FALSE); + sfc_mcdi_poll(sa, B_FALSE); + + if (efx_mcdi_get_proxy_handle(sa->nic, emrp, &proxy_handle) == 0) { + /* + * Authorization is required for the MCDI request; + * wait for an MCDI proxy response event to bring + * a non-zero proxy handle (should be the same as + * the value obtained above) and operation status + */ + sfc_mcdi_poll(sa, B_TRUE); + + if ((mcdi->proxy_handle != 0) && + (mcdi->proxy_handle != proxy_handle)) { + sfc_err(sa, "Unexpected MCDI proxy event"); + emrp->emr_rc = EFAULT; + } else if (mcdi->proxy_result == 0) { + /* + * Authorization succeeded; re-issue the original + * request and poll for an ordinary MCDI response + */ + efx_mcdi_request_start(sa->nic, emrp, B_FALSE); + sfc_mcdi_poll(sa, B_FALSE); + } else { + emrp->emr_rc = mcdi->proxy_result; + sfc_err(sa, "MCDI proxy authorization failed " + "(handle=%08x, result=%d)", + proxy_handle, mcdi->proxy_result); + } + } + + rte_spinlock_unlock(&mcdi->lock); +} + +static void +sfc_mcdi_ev_cpl(void *arg) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)arg; + struct sfc_mcdi *mcdi __rte_unused; + + mcdi = &sa->mcdi; + SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); + + /* MCDI is polled, completions are not expected */ + SFC_ASSERT(0); +} + +static void +sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)arg; + + sfc_warn(sa, "MC %s", + (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" : + (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN"); + + sfc_schedule_restart(sa); +} + +#define SFC_MCDI_LOG_BUF_SIZE 128 + +static size_t +sfc_mcdi_do_log(const struct sfc_adapter *sa, + char *buffer, void *data, size_t data_size, + size_t pfxsize, size_t position) +{ + uint32_t *words = data; + /* Space separator plus 2 characters per byte */ + const size_t word_str_space = 1 + 2 * sizeof(*words); + size_t i; + + for (i = 0; i < data_size; i += sizeof(*words)) { + if (position + word_str_space >= + SFC_MCDI_LOG_BUF_SIZE) { + /* Flush at SFC_MCDI_LOG_BUF_SIZE with backslash + * at the end which is required by netlogdecode. + */ + buffer[position] = '\0'; + sfc_log_mcdi(sa, "%s \\", buffer); + /* Preserve prefix for the next log message */ + position = pfxsize; + } + position += snprintf(buffer + position, + SFC_MCDI_LOG_BUF_SIZE - position, + " %08x", *words); + words++; + } + return position; +} + +static void +sfc_mcdi_logger(void *arg, efx_log_msg_t type, + void *header, size_t header_size, + void *data, size_t data_size) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)arg; + char buffer[SFC_MCDI_LOG_BUF_SIZE]; + size_t pfxsize; + size_t start; + + /* + * Unlike the other cases, MCDI logging implies more onerous work + * needed to produce a message. If the dynamic log level prevents + * the end result from being printed, the CPU time will be wasted. + * + * To avoid wasting time, the actual level is examined in advance. + */ + if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI) + return; + + /* The format including prefix added by sfc_log_mcdi() is the format + * consumed by the Solarflare netlogdecode tool. + */ + pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:", + type == EFX_LOG_MCDI_REQUEST ? "REQ" : + type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???"); + start = sfc_mcdi_do_log(sa, buffer, header, header_size, + pfxsize, pfxsize); + start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start); + if (start != pfxsize) { + buffer[start] = '\0'; + sfc_log_mcdi(sa, "%s", buffer); + } +} + +static void +sfc_mcdi_ev_proxy_response(void *arg, uint32_t handle, efx_rc_t result) +{ + struct sfc_adapter *sa = (struct sfc_adapter *)arg; + struct sfc_mcdi *mcdi = &sa->mcdi; + + mcdi->proxy_handle = handle; + mcdi->proxy_result = result; +} + +int +sfc_mcdi_init(struct sfc_adapter *sa) +{ + struct sfc_mcdi *mcdi; + size_t max_msg_size; + efx_mcdi_transport_t *emtp; + int rc; + + sfc_log_init(sa, "entry"); + + mcdi = &sa->mcdi; + + SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED); + + rte_spinlock_init(&mcdi->lock); + + mcdi->state = SFC_MCDI_INITIALIZED; + + max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2; + rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id, + &mcdi->mem); + if (rc != 0) + goto fail_dma_alloc; + + mcdi->logtype = sfc_register_logtype(&sa->priv.shared->pci_addr, + SFC_LOGTYPE_MCDI_STR, + RTE_LOG_NOTICE); + + emtp = &mcdi->transport; + emtp->emt_context = sa; + emtp->emt_dma_mem = &mcdi->mem; + emtp->emt_execute = sfc_mcdi_execute; + emtp->emt_ev_cpl = sfc_mcdi_ev_cpl; + emtp->emt_exception = sfc_mcdi_exception; + emtp->emt_logger = sfc_mcdi_logger; + emtp->emt_ev_proxy_response = sfc_mcdi_ev_proxy_response; + + sfc_log_init(sa, "init MCDI"); + rc = efx_mcdi_init(sa->nic, emtp); + if (rc != 0) + goto fail_mcdi_init; + + return 0; + +fail_mcdi_init: + memset(emtp, 0, sizeof(*emtp)); + sfc_dma_free(sa, &mcdi->mem); + +fail_dma_alloc: + mcdi->state = SFC_MCDI_UNINITIALIZED; + return rc; +} + +void +sfc_mcdi_fini(struct sfc_adapter *sa) +{ + struct sfc_mcdi *mcdi; + efx_mcdi_transport_t *emtp; + + sfc_log_init(sa, "entry"); + + mcdi = &sa->mcdi; + emtp = &mcdi->transport; + + rte_spinlock_lock(&mcdi->lock); + + SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); + mcdi->state = SFC_MCDI_UNINITIALIZED; + + sfc_log_init(sa, "fini MCDI"); + efx_mcdi_fini(sa->nic); + memset(emtp, 0, sizeof(*emtp)); + + rte_spinlock_unlock(&mcdi->lock); + + sfc_dma_free(sa, &mcdi->mem); +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_port.c b/src/spdk/dpdk/drivers/net/sfc/sfc_port.c new file mode 100644 index 000000000..32a0894a5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_port.c @@ -0,0 +1,622 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include "efx.h" + +#include "sfc.h" +#include "sfc_log.h" +#include "sfc_kvargs.h" + +/** Default MAC statistics update period is 1 second */ +#define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S + +/** The number of microseconds to sleep on attempt to get statistics update */ +#define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10 + +/** The number of attempts to await arrival of freshly generated statistics */ +#define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50 + +/** + * Update MAC statistics in the buffer. + * + * @param sa Adapter + * + * @return Status code + * @retval 0 Success + * @retval EAGAIN Try again + * @retval ENOMEM Memory allocation failure + */ +int +sfc_port_update_mac_stats(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + efsys_mem_t *esmp = &port->mac_stats_dma_mem; + uint32_t *genp = NULL; + uint32_t gen_old; + unsigned int nb_attempts = 0; + int rc; + + SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock)); + + if (sa->state != SFC_ADAPTER_STARTED) + return EINVAL; + + /* + * If periodic statistics DMA'ing is off or if not supported, + * make a manual request and keep an eye on timer if need be + */ + if (!port->mac_stats_periodic_dma_supported || + (port->mac_stats_update_period_ms == 0)) { + if (port->mac_stats_update_period_ms != 0) { + uint64_t timestamp = sfc_get_system_msecs(); + + if ((timestamp - + port->mac_stats_last_request_timestamp) < + port->mac_stats_update_period_ms) + return 0; + + port->mac_stats_last_request_timestamp = timestamp; + } + + rc = efx_mac_stats_upload(sa->nic, esmp); + if (rc != 0) + return rc; + + genp = &port->mac_stats_update_generation; + gen_old = *genp; + } + + do { + if (nb_attempts > 0) + rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US); + + rc = efx_mac_stats_update(sa->nic, esmp, + port->mac_stats_buf, genp); + if (rc != 0) + return rc; + + } while ((genp != NULL) && (*genp == gen_old) && + (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS)); + + return 0; +} + +static void +sfc_port_reset_sw_stats(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + + /* + * Reset diff stats explicitly since check which does not allow + * the statistics to grow backward could deny it. + */ + port->ipackets = 0; +} + +int +sfc_port_reset_mac_stats(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + int rc; + + rte_spinlock_lock(&port->mac_stats_lock); + rc = efx_mac_stats_clear(sa->nic); + if (rc == 0) + sfc_port_reset_sw_stats(sa); + rte_spinlock_unlock(&port->mac_stats_lock); + + return rc; +} + +static int +sfc_port_init_dev_link(struct sfc_adapter *sa) +{ + struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; + int rc; + efx_link_mode_t link_mode; + struct rte_eth_link current_link; + + rc = efx_port_poll(sa->nic, &link_mode); + if (rc != 0) + return rc; + + sfc_port_link_mode_to_info(link_mode, ¤t_link); + + EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); + rte_atomic64_set((rte_atomic64_t *)dev_link, + *(uint64_t *)¤t_link); + + return 0; +} + +#if EFSYS_OPT_LOOPBACK + +static efx_link_mode_t +sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps) +{ + if (phy_caps & (1u << EFX_PHY_CAP_100000FDX)) + return EFX_LINK_100000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_50000FDX)) + return EFX_LINK_50000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_40000FDX)) + return EFX_LINK_40000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_25000FDX)) + return EFX_LINK_25000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_10000FDX)) + return EFX_LINK_10000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_1000FDX)) + return EFX_LINK_1000FDX; + return EFX_LINK_UNKNOWN; +} + +#endif + +int +sfc_port_start(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + int rc; + uint32_t phy_adv_cap; + const uint32_t phy_pause_caps = + ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM)); + unsigned int i; + + sfc_log_init(sa, "entry"); + + sfc_log_init(sa, "init filters"); + rc = efx_filter_init(sa->nic); + if (rc != 0) + goto fail_filter_init; + + sfc_log_init(sa, "init port"); + rc = efx_port_init(sa->nic); + if (rc != 0) + goto fail_port_init; + +#if EFSYS_OPT_LOOPBACK + if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) { + efx_link_mode_t link_mode; + + link_mode = + sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap); + sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + rc = efx_port_loopback_set(sa->nic, link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + if (rc != 0) + goto fail_loopback_set; + } +#endif + + sfc_log_init(sa, "set flow control to %#x autoneg=%u", + port->flow_ctrl, port->flow_ctrl_autoneg); + rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl, + port->flow_ctrl_autoneg); + if (rc != 0) + goto fail_mac_fcntl_set; + + /* Preserve pause capabilities set by above efx_mac_fcntl_set() */ + efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap); + SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0); + phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps); + + /* + * No controls for FEC yet. Use default FEC mode. + * I.e. advertise everything supported (*_FEC=1), but do not request + * anything explicitly (*_FEC_REQUESTED=0). + */ + phy_adv_cap |= port->phy_adv_cap_mask & + (1u << EFX_PHY_CAP_BASER_FEC | + 1u << EFX_PHY_CAP_RS_FEC | + 1u << EFX_PHY_CAP_25G_BASER_FEC); + + sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap); + rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap); + if (rc != 0) + goto fail_phy_adv_cap_set; + + sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu); + rc = efx_mac_pdu_set(sa->nic, port->pdu); + if (rc != 0) + goto fail_mac_pdu_set; + + if (!sfc_sa2shared(sa)->isolated) { + struct rte_ether_addr *addr = &port->default_mac_addr; + + sfc_log_init(sa, "set MAC address"); + rc = efx_mac_addr_set(sa->nic, addr->addr_bytes); + if (rc != 0) + goto fail_mac_addr_set; + + sfc_log_init(sa, "set MAC filters"); + port->promisc = (sa->eth_dev->data->promiscuous != 0) ? + B_TRUE : B_FALSE; + port->allmulti = (sa->eth_dev->data->all_multicast != 0) ? + B_TRUE : B_FALSE; + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) + goto fail_mac_filter_set; + + sfc_log_init(sa, "set multicast address list"); + rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, + port->nb_mcast_addrs); + if (rc != 0) + goto fail_mcast_address_list_set; + } + + if (port->mac_stats_reset_pending) { + rc = sfc_port_reset_mac_stats(sa); + if (rc != 0) + sfc_err(sa, "statistics reset failed (requested " + "before the port was started)"); + + port->mac_stats_reset_pending = B_FALSE; + } + + efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask, + sizeof(port->mac_stats_mask)); + + for (i = 0, port->mac_stats_nb_supported = 0; i < EFX_MAC_NSTATS; ++i) + if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) + port->mac_stats_nb_supported++; + + port->mac_stats_update_generation = 0; + + if (port->mac_stats_update_period_ms != 0) { + /* + * Update MAC stats using periodic DMA; + * any positive update interval different from + * 1000 ms can be set only on SFN8xxx provided + * that FW version is 6.2.1.1033 or higher + */ + sfc_log_init(sa, "request MAC stats DMA'ing"); + rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem, + port->mac_stats_update_period_ms, + B_FALSE); + if (rc == 0) { + port->mac_stats_periodic_dma_supported = B_TRUE; + } else if (rc == EOPNOTSUPP) { + port->mac_stats_periodic_dma_supported = B_FALSE; + port->mac_stats_last_request_timestamp = 0; + } else { + goto fail_mac_stats_periodic; + } + } + + if ((port->mac_stats_update_period_ms != 0) && + port->mac_stats_periodic_dma_supported) { + /* + * Request an explicit MAC stats upload immediately to + * preclude bogus figures readback if the user decides + * to read stats before periodic DMA is really started + */ + rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem); + if (rc != 0) + goto fail_mac_stats_upload; + } + + sfc_log_init(sa, "disable MAC drain"); + rc = efx_mac_drain(sa->nic, B_FALSE); + if (rc != 0) + goto fail_mac_drain; + + /* Synchronize link status knowledge */ + rc = sfc_port_init_dev_link(sa); + if (rc != 0) + goto fail_port_init_dev_link; + + sfc_log_init(sa, "done"); + return 0; + +fail_port_init_dev_link: + (void)efx_mac_drain(sa->nic, B_TRUE); + +fail_mac_drain: +fail_mac_stats_upload: + (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem, + 0, B_FALSE); + +fail_mac_stats_periodic: +fail_mcast_address_list_set: +fail_mac_filter_set: +fail_mac_addr_set: +fail_mac_pdu_set: +fail_phy_adv_cap_set: +fail_mac_fcntl_set: +#if EFSYS_OPT_LOOPBACK +fail_loopback_set: +#endif + efx_port_fini(sa->nic); + +fail_port_init: + efx_filter_fini(sa->nic); + +fail_filter_init: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_port_stop(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); + + efx_mac_drain(sa->nic, B_TRUE); + + (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem, + 0, B_FALSE); + + efx_port_fini(sa->nic); + efx_filter_fini(sa->nic); + + sfc_log_init(sa, "done"); +} + +int +sfc_port_configure(struct sfc_adapter *sa) +{ + const struct rte_eth_dev_data *dev_data = sa->eth_dev->data; + struct sfc_port *port = &sa->port; + const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode; + + sfc_log_init(sa, "entry"); + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + port->pdu = rxmode->max_rx_pkt_len; + else + port->pdu = EFX_MAC_PDU(dev_data->mtu); + + return 0; +} + +void +sfc_port_close(struct sfc_adapter *sa) +{ + sfc_log_init(sa, "entry"); +} + +int +sfc_port_attach(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + const struct rte_ether_addr *from; + uint32_t mac_nstats; + size_t mac_stats_size; + long kvarg_stats_update_period_ms; + int rc; + + sfc_log_init(sa, "entry"); + + efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask); + + /* Enable flow control by default */ + port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; + port->flow_ctrl_autoneg = B_TRUE; + + RTE_BUILD_BUG_ON(sizeof(encp->enc_mac_addr) != sizeof(*from)); + from = (const struct rte_ether_addr *)(encp->enc_mac_addr); + rte_ether_addr_copy(from, &port->default_mac_addr); + + port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX; + port->nb_mcast_addrs = 0; + port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf", + port->max_mcast_addrs, + EFX_MAC_ADDR_LEN, 0, + sa->socket_id); + if (port->mcast_addrs == NULL) { + rc = ENOMEM; + goto fail_mcast_addr_list_buf_alloc; + } + + rte_spinlock_init(&port->mac_stats_lock); + + rc = ENOMEM; + port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS, + sizeof(uint64_t), 0, + sa->socket_id); + if (port->mac_stats_buf == NULL) + goto fail_mac_stats_buf_alloc; + + mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats; + mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE); + rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size, + sa->socket_id, &port->mac_stats_dma_mem); + if (rc != 0) + goto fail_mac_stats_dma_alloc; + + port->mac_stats_reset_pending = B_FALSE; + + kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF; + + rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS, + sfc_kvarg_long_handler, + &kvarg_stats_update_period_ms); + if ((rc == 0) && + ((kvarg_stats_update_period_ms < 0) || + (kvarg_stats_update_period_ms > UINT16_MAX))) { + sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' " + "was set (%ld);", kvarg_stats_update_period_ms); + sfc_err(sa, "it must not be less than 0 " + "or greater than %" PRIu16, UINT16_MAX); + rc = EINVAL; + goto fail_kvarg_stats_update_period_ms; + } else if (rc != 0) { + goto fail_kvarg_stats_update_period_ms; + } + + port->mac_stats_update_period_ms = kvarg_stats_update_period_ms; + + sfc_log_init(sa, "done"); + return 0; + +fail_kvarg_stats_update_period_ms: + sfc_dma_free(sa, &port->mac_stats_dma_mem); + +fail_mac_stats_dma_alloc: + rte_free(port->mac_stats_buf); + +fail_mac_stats_buf_alloc: + rte_free(port->mcast_addrs); + +fail_mcast_addr_list_buf_alloc: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_port_detach(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + + sfc_log_init(sa, "entry"); + + sfc_dma_free(sa, &port->mac_stats_dma_mem); + rte_free(port->mac_stats_buf); + + rte_free(port->mcast_addrs); + + sfc_log_init(sa, "done"); +} + +static boolean_t +sfc_get_requested_all_ucast(struct sfc_port *port) +{ + return port->promisc; +} + +static boolean_t +sfc_get_requested_all_mcast(struct sfc_port *port) +{ + return port->promisc || port->allmulti; +} + +int +sfc_set_rx_mode_unchecked(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + boolean_t requested_all_ucast = sfc_get_requested_all_ucast(port); + boolean_t requested_all_mcast = sfc_get_requested_all_mcast(port); + int rc; + + rc = efx_mac_filter_set(sa->nic, requested_all_ucast, B_TRUE, + requested_all_mcast, B_TRUE); + if (rc != 0) + return rc; + + return 0; +} + +int +sfc_set_rx_mode(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + boolean_t old_all_ucast; + boolean_t old_all_mcast; + boolean_t requested_all_ucast = sfc_get_requested_all_ucast(port); + boolean_t requested_all_mcast = sfc_get_requested_all_mcast(port); + boolean_t actual_all_ucast; + boolean_t actual_all_mcast; + int rc; + + efx_mac_filter_get_all_ucast_mcast(sa->nic, &old_all_ucast, + &old_all_mcast); + + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) + return rc; + + efx_mac_filter_get_all_ucast_mcast(sa->nic, &actual_all_ucast, + &actual_all_mcast); + + if (actual_all_ucast != requested_all_ucast || + actual_all_mcast != requested_all_mcast) { + /* + * MAC filter set succeeded but not all requested modes + * were applied. The rollback is necessary to bring back the + * consistent old state. + */ + (void)efx_mac_filter_set(sa->nic, old_all_ucast, B_TRUE, + old_all_mcast, B_TRUE); + + return EPERM; + } + + return 0; +} + +void +sfc_port_link_mode_to_info(efx_link_mode_t link_mode, + struct rte_eth_link *link_info) +{ + SFC_ASSERT(link_mode < EFX_LINK_NMODES); + + memset(link_info, 0, sizeof(*link_info)); + if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN)) + link_info->link_status = ETH_LINK_DOWN; + else + link_info->link_status = ETH_LINK_UP; + + switch (link_mode) { + case EFX_LINK_10HDX: + link_info->link_speed = ETH_SPEED_NUM_10M; + link_info->link_duplex = ETH_LINK_HALF_DUPLEX; + break; + case EFX_LINK_10FDX: + link_info->link_speed = ETH_SPEED_NUM_10M; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_100HDX: + link_info->link_speed = ETH_SPEED_NUM_100M; + link_info->link_duplex = ETH_LINK_HALF_DUPLEX; + break; + case EFX_LINK_100FDX: + link_info->link_speed = ETH_SPEED_NUM_100M; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_1000HDX: + link_info->link_speed = ETH_SPEED_NUM_1G; + link_info->link_duplex = ETH_LINK_HALF_DUPLEX; + break; + case EFX_LINK_1000FDX: + link_info->link_speed = ETH_SPEED_NUM_1G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_10000FDX: + link_info->link_speed = ETH_SPEED_NUM_10G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_25000FDX: + link_info->link_speed = ETH_SPEED_NUM_25G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_40000FDX: + link_info->link_speed = ETH_SPEED_NUM_40G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_50000FDX: + link_info->link_speed = ETH_SPEED_NUM_50G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_100000FDX: + link_info->link_speed = ETH_SPEED_NUM_100G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + default: + SFC_ASSERT(B_FALSE); + /* FALLTHROUGH */ + case EFX_LINK_UNKNOWN: + case EFX_LINK_DOWN: + link_info->link_speed = ETH_SPEED_NUM_NONE; + link_info->link_duplex = 0; + break; + } + + link_info->link_autoneg = ETH_LINK_AUTONEG; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c new file mode 100644 index 000000000..7d22e2f8e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.c @@ -0,0 +1,1726 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include + +#include "efx.h" + +#include "sfc.h" +#include "sfc_debug.h" +#include "sfc_log.h" +#include "sfc_ev.h" +#include "sfc_rx.h" +#include "sfc_kvargs.h" +#include "sfc_tweak.h" + +/* + * Maximum number of Rx queue flush attempt in the case of failure or + * flush timeout + */ +#define SFC_RX_QFLUSH_ATTEMPTS (3) + +/* + * Time to wait between event queue polling attempts when waiting for Rx + * queue flush done or failed events. + */ +#define SFC_RX_QFLUSH_POLL_WAIT_MS (1) + +/* + * Maximum number of event queue polling attempts when waiting for Rx queue + * flush done or failed events. It defines Rx queue flush attempt timeout + * together with SFC_RX_QFLUSH_POLL_WAIT_MS. + */ +#define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000) + +void +sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info) +{ + rxq_info->state |= SFC_RXQ_FLUSHED; + rxq_info->state &= ~SFC_RXQ_FLUSHING; +} + +void +sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info) +{ + rxq_info->state |= SFC_RXQ_FLUSH_FAILED; + rxq_info->state &= ~SFC_RXQ_FLUSHING; +} + +static int +sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq) +{ + int rc = 0; + + if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) { + rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr); + if (rc == 0) + rxq->evq->read_ptr_primed = rxq->evq->read_ptr; + } + return rc; +} + +static void +sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq) +{ + unsigned int free_space; + unsigned int bulks; + void *objs[SFC_RX_REFILL_BULK]; + efsys_dma_addr_t addr[RTE_DIM(objs)]; + unsigned int added = rxq->added; + unsigned int id; + unsigned int i; + struct sfc_efx_rx_sw_desc *rxd; + struct rte_mbuf *m; + uint16_t port_id = rxq->dp.dpq.port_id; + + free_space = rxq->max_fill_level - (added - rxq->completed); + + if (free_space < rxq->refill_threshold) + return; + + bulks = free_space / RTE_DIM(objs); + /* refill_threshold guarantees that bulks is positive */ + SFC_ASSERT(bulks > 0); + + id = added & rxq->ptr_mask; + do { + if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs, + RTE_DIM(objs)) < 0)) { + /* + * It is hardly a safe way to increment counter + * from different contexts, but all PMDs do it. + */ + rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed += + RTE_DIM(objs); + /* Return if we have posted nothing yet */ + if (added == rxq->added) + return; + /* Push posted */ + break; + } + + for (i = 0; i < RTE_DIM(objs); + ++i, id = (id + 1) & rxq->ptr_mask) { + m = objs[i]; + + MBUF_RAW_ALLOC_CHECK(m); + + rxd = &rxq->sw_desc[id]; + rxd->mbuf = m; + + m->data_off = RTE_PKTMBUF_HEADROOM; + m->port = port_id; + + addr[i] = rte_pktmbuf_iova(m); + } + + efx_rx_qpost(rxq->common, addr, rxq->buf_size, + RTE_DIM(objs), rxq->completed, added); + added += RTE_DIM(objs); + } while (--bulks > 0); + + SFC_ASSERT(added != rxq->added); + rxq->added = added; + efx_rx_qpush(rxq->common, added, &rxq->pushed); +} + +static uint64_t +sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags) +{ + uint64_t mbuf_flags = 0; + + switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) { + case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4): + mbuf_flags |= PKT_RX_IP_CKSUM_GOOD; + break; + case EFX_PKT_IPV4: + mbuf_flags |= PKT_RX_IP_CKSUM_BAD; + break; + default: + RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0); + SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) == + PKT_RX_IP_CKSUM_UNKNOWN); + break; + } + + switch ((desc_flags & + (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) { + case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP): + case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP): + mbuf_flags |= PKT_RX_L4_CKSUM_GOOD; + break; + case EFX_PKT_TCP: + case EFX_PKT_UDP: + mbuf_flags |= PKT_RX_L4_CKSUM_BAD; + break; + default: + RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0); + SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) == + PKT_RX_L4_CKSUM_UNKNOWN); + break; + } + + return mbuf_flags; +} + +static uint32_t +sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags) +{ + return RTE_PTYPE_L2_ETHER | + ((desc_flags & EFX_PKT_IPV4) ? + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) | + ((desc_flags & EFX_PKT_IPV6) ? + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) | + ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) | + ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0); +} + +static const uint32_t * +sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; +} + +static void +sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, + struct rte_mbuf *m) +{ + uint8_t *mbuf_data; + + + if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0) + return; + + mbuf_data = rte_pktmbuf_mtod(m, uint8_t *); + + if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { + m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common, + EFX_RX_HASHALG_TOEPLITZ, + mbuf_data); + + m->ol_flags |= PKT_RX_RSS_HASH; + } +} + +static uint16_t +sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct sfc_dp_rxq *dp_rxq = rx_queue; + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + unsigned int completed; + unsigned int prefix_size = rxq->prefix_size; + unsigned int done_pkts = 0; + boolean_t discard_next = B_FALSE; + struct rte_mbuf *scatter_pkt = NULL; + + if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)) + return 0; + + sfc_ev_qpoll(rxq->evq); + + completed = rxq->completed; + while (completed != rxq->pending && done_pkts < nb_pkts) { + unsigned int id; + struct sfc_efx_rx_sw_desc *rxd; + struct rte_mbuf *m; + unsigned int seg_len; + unsigned int desc_flags; + + id = completed++ & rxq->ptr_mask; + rxd = &rxq->sw_desc[id]; + m = rxd->mbuf; + desc_flags = rxd->flags; + + if (discard_next) + goto discard; + + if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) + goto discard; + + if (desc_flags & EFX_PKT_PREFIX_LEN) { + uint16_t tmp_size; + int rc __rte_unused; + + rc = efx_pseudo_hdr_pkt_length_get(rxq->common, + rte_pktmbuf_mtod(m, uint8_t *), &tmp_size); + SFC_ASSERT(rc == 0); + seg_len = tmp_size; + } else { + seg_len = rxd->size - prefix_size; + } + + rte_pktmbuf_data_len(m) = seg_len; + rte_pktmbuf_pkt_len(m) = seg_len; + + if (scatter_pkt != NULL) { + if (rte_pktmbuf_chain(scatter_pkt, m) != 0) { + rte_pktmbuf_free(scatter_pkt); + goto discard; + } + /* The packet to deliver */ + m = scatter_pkt; + } + + if (desc_flags & EFX_PKT_CONT) { + /* The packet is scattered, more fragments to come */ + scatter_pkt = m; + /* Further fragments have no prefix */ + prefix_size = 0; + continue; + } + + /* Scattered packet is done */ + scatter_pkt = NULL; + /* The first fragment of the packet has prefix */ + prefix_size = rxq->prefix_size; + + m->ol_flags = + sfc_efx_rx_desc_flags_to_offload_flags(desc_flags); + m->packet_type = + sfc_efx_rx_desc_flags_to_packet_type(desc_flags); + + /* + * Extract RSS hash from the packet prefix and + * set the corresponding field (if needed and possible) + */ + sfc_efx_rx_set_rss_hash(rxq, desc_flags, m); + + m->data_off += prefix_size; + + *rx_pkts++ = m; + done_pkts++; + continue; + +discard: + discard_next = ((desc_flags & EFX_PKT_CONT) != 0); + rte_mbuf_raw_free(m); + rxd->mbuf = NULL; + } + + /* pending is only moved when entire packet is received */ + SFC_ASSERT(scatter_pkt == NULL); + + rxq->completed = completed; + + sfc_efx_rx_qrefill(rxq); + + if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) + sfc_efx_rx_qprime(rxq); + + return done_pkts; +} + +static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending; +static unsigned int +sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + + if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0) + return 0; + + sfc_ev_qpoll(rxq->evq); + + return rxq->pending - rxq->completed; +} + +static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status; +static int +sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + + if (unlikely(offset > rxq->ptr_mask)) + return -EINVAL; + + /* + * Poll EvQ to derive up-to-date 'rxq->pending' figure; + * it is required for the queue to be running, but the + * check is omitted because API design assumes that it + * is the duty of the caller to satisfy all conditions + */ + SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == + SFC_EFX_RXQ_FLAG_RUNNING); + sfc_ev_qpoll(rxq->evq); + + /* + * There is a handful of reserved entries in the ring, + * but an explicit check whether the offset points to + * a reserved entry is neglected since the two checks + * below rely on the figures which take the HW limits + * into account and thus if an entry is reserved, the + * checks will fail and UNAVAIL code will be returned + */ + + if (offset < (rxq->pending - rxq->completed)) + return RTE_ETH_RX_DESC_DONE; + + if (offset < (rxq->added - rxq->completed)) + return RTE_ETH_RX_DESC_AVAIL; + + return RTE_ETH_RX_DESC_UNAVAIL; +} + +boolean_t +sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size, + boolean_t rx_scatter_enabled, const char **error) +{ + if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) { + *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small"; + return B_FALSE; + } + + return B_TRUE; +} + +/** Get Rx datapath ops by the datapath RxQ handle */ +const struct sfc_dp_rx * +sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) +{ + const struct sfc_dp_queue *dpq = &dp_rxq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter_priv *sap; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sap = sfc_adapter_priv_by_eth_dev(eth_dev); + + return sap->dp_rx; +} + +struct sfc_rxq_info * +sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) +{ + const struct sfc_dp_queue *dpq = &dp_rxq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter_shared *sas; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sas = sfc_adapter_shared_by_eth_dev(eth_dev); + + SFC_ASSERT(dpq->queue_id < sas->rxq_count); + return &sas->rxq_info[dpq->queue_id]; +} + +struct sfc_rxq * +sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) +{ + const struct sfc_dp_queue *dpq = &dp_rxq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter *sa; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sa = sfc_adapter_by_eth_dev(eth_dev); + + SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count); + return &sa->rxq_ctrl[dpq->queue_id]; +} + +static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings; +static int +sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc, + __rte_unused struct sfc_dp_rx_hw_limits *limits, + __rte_unused struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level) +{ + *rxq_entries = nb_rx_desc; + *evq_entries = nb_rx_desc; + *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries); + return 0; +} + +static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate; +static int +sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp) +{ + struct sfc_efx_rxq *rxq; + int rc; + + rc = ENOMEM; + rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + goto fail_rxq_alloc; + + sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc", + info->rxq_entries, + sizeof(*rxq->sw_desc), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_desc == NULL) + goto fail_desc_alloc; + + /* efx datapath is bound to efx control path */ + rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq; + if (info->flags & SFC_RXQ_FLAG_RSS_HASH) + rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH; + rxq->ptr_mask = info->rxq_entries - 1; + rxq->batch_max = info->batch_max; + rxq->prefix_size = info->prefix_size; + rxq->max_fill_level = info->max_fill_level; + rxq->refill_threshold = info->refill_threshold; + rxq->buf_size = info->buf_size; + rxq->refill_mb_pool = info->refill_mb_pool; + + *dp_rxqp = &rxq->dp; + return 0; + +fail_desc_alloc: + rte_free(rxq); + +fail_rxq_alloc: + return rc; +} + +static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy; +static void +sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + + rte_free(rxq->sw_desc); + rte_free(rxq); +} + + +/* Use qstop and qstart functions in the case of qstart failure */ +static sfc_dp_rx_qstop_t sfc_efx_rx_qstop; +static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge; + + +static sfc_dp_rx_qstart_t sfc_efx_rx_qstart; +static int +sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq, + __rte_unused unsigned int evq_read_ptr) +{ + /* libefx-based datapath is specific to libefx-based PMD */ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq); + int rc; + + rxq->common = crxq->common; + + rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0; + + sfc_efx_rx_qrefill(rxq); + + rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING); + + if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) { + rc = sfc_efx_rx_qprime(rxq); + if (rc != 0) + goto fail_rx_qprime; + } + + return 0; + +fail_rx_qprime: + sfc_efx_rx_qstop(dp_rxq, NULL); + sfc_efx_rx_qpurge(dp_rxq); + return rc; +} + +static void +sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq, + __rte_unused unsigned int *evq_read_ptr) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + + rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING; + + /* libefx-based datapath is bound to libefx-based PMD and uses + * event queue structure directly. So, there is no necessity to + * return EvQ read pointer. + */ +} + +static void +sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + unsigned int i; + struct sfc_efx_rx_sw_desc *rxd; + + for (i = rxq->completed; i != rxq->added; ++i) { + rxd = &rxq->sw_desc[i & rxq->ptr_mask]; + rte_mbuf_raw_free(rxd->mbuf); + rxd->mbuf = NULL; + /* Packed stream relies on 0 in inactive SW desc. + * Rx queue stop is not performance critical, so + * there is no harm to do it always. + */ + rxd->flags = 0; + rxd->size = 0; + } + + rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED; +} + +static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable; +static int +sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + int rc = 0; + + rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN; + if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) { + rc = sfc_efx_rx_qprime(rxq); + if (rc != 0) + rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN; + } + return rc; +} + +static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable; +static int +sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); + + /* Cannot disarm, just disable rearm */ + rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN; + return 0; +} + +struct sfc_dp_rx sfc_efx_rx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EFX, + .type = SFC_DP_RX, + .hw_fw_caps = 0, + }, + .features = SFC_DP_RX_FEAT_INTR, + .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_RSS_HASH, + .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER, + .qsize_up_rings = sfc_efx_rx_qsize_up_rings, + .qcreate = sfc_efx_rx_qcreate, + .qdestroy = sfc_efx_rx_qdestroy, + .qstart = sfc_efx_rx_qstart, + .qstop = sfc_efx_rx_qstop, + .qpurge = sfc_efx_rx_qpurge, + .supported_ptypes_get = sfc_efx_supported_ptypes_get, + .qdesc_npending = sfc_efx_rx_qdesc_npending, + .qdesc_status = sfc_efx_rx_qdesc_status, + .intr_enable = sfc_efx_rx_intr_enable, + .intr_disable = sfc_efx_rx_intr_disable, + .pkt_burst = sfc_efx_recv_pkts, +}; + +static void +sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_rxq_info *rxq_info; + struct sfc_rxq *rxq; + unsigned int retry_count; + unsigned int wait_count; + int rc; + + rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; + SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); + + rxq = &sa->rxq_ctrl[sw_index]; + + /* + * Retry Rx queue flushing in the case of flush failed or + * timeout. In the worst case it can delay for 6 seconds. + */ + for (retry_count = 0; + ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) && + (retry_count < SFC_RX_QFLUSH_ATTEMPTS); + ++retry_count) { + rc = efx_rx_qflush(rxq->common); + if (rc != 0) { + rxq_info->state |= (rc == EALREADY) ? + SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED; + break; + } + rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED; + rxq_info->state |= SFC_RXQ_FLUSHING; + + /* + * Wait for Rx queue flush done or failed event at least + * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more + * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied + * by SFC_RX_QFLUSH_POLL_ATTEMPTS). + */ + wait_count = 0; + do { + rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS); + sfc_ev_qpoll(rxq->evq); + } while ((rxq_info->state & SFC_RXQ_FLUSHING) && + (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); + + if (rxq_info->state & SFC_RXQ_FLUSHING) + sfc_err(sa, "RxQ %u flush timed out", sw_index); + + if (rxq_info->state & SFC_RXQ_FLUSH_FAILED) + sfc_err(sa, "RxQ %u flush failed", sw_index); + + if (rxq_info->state & SFC_RXQ_FLUSHED) + sfc_notice(sa, "RxQ %u flushed", sw_index); + } + + sa->priv.dp_rx->qpurge(rxq_info->dp); +} + +static int +sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE; + struct sfc_port *port = &sa->port; + int rc; + + /* + * If promiscuous or all-multicast mode has been requested, setting + * filter for the default Rx queue might fail, in particular, while + * running over PCI function which is not a member of corresponding + * privilege groups; if this occurs, few iterations will be made to + * repeat this step without promiscuous and all-multicast flags set + */ +retry: + rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss); + if (rc == 0) + return 0; + else if (rc != EOPNOTSUPP) + return rc; + + if (port->promisc) { + sfc_warn(sa, "promiscuous mode has been requested, " + "but the HW rejects it"); + sfc_warn(sa, "promiscuous mode will be disabled"); + + port->promisc = B_FALSE; + sa->eth_dev->data->promiscuous = 0; + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) + return rc; + + goto retry; + } + + if (port->allmulti) { + sfc_warn(sa, "all-multicast mode has been requested, " + "but the HW rejects it"); + sfc_warn(sa, "all-multicast mode will be disabled"); + + port->allmulti = B_FALSE; + sa->eth_dev->data->all_multicast = 0; + rc = sfc_set_rx_mode_unchecked(sa); + if (rc != 0) + return rc; + + goto retry; + } + + return rc; +} + +int +sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_rxq_info *rxq_info; + struct sfc_rxq *rxq; + struct sfc_evq *evq; + int rc; + + sfc_log_init(sa, "sw_index=%u", sw_index); + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + + rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; + SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); + + rxq = &sa->rxq_ctrl[sw_index]; + evq = rxq->evq; + + rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); + if (rc != 0) + goto fail_ev_qstart; + + switch (rxq_info->type) { + case EFX_RXQ_TYPE_DEFAULT: + rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, + rxq->buf_size, + &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */, + rxq_info->type_flags, evq->common, &rxq->common); + break; + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: { + struct rte_mempool *mp = rxq_info->refill_mb_pool; + struct rte_mempool_info mp_info; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_mp_get_info; + } + if (mp_info.contig_block_size <= 0) { + rc = EINVAL; + goto fail_bad_contig_block_size; + } + rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0, + mp_info.contig_block_size, rxq->buf_size, + mp->header_size + mp->elt_size + mp->trailer_size, + sa->rxd_wait_timeout_ns, + &rxq->mem, rxq_info->entries, rxq_info->type_flags, + evq->common, &rxq->common); + break; + } + default: + rc = ENOTSUP; + } + if (rc != 0) + goto fail_rx_qcreate; + + efx_rx_qenable(rxq->common); + + rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr); + if (rc != 0) + goto fail_dp_qstart; + + rxq_info->state |= SFC_RXQ_STARTED; + + if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) { + rc = sfc_rx_default_rxq_set_filter(sa, rxq); + if (rc != 0) + goto fail_mac_filter_default_rxq_set; + } + + /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ + sa->eth_dev->data->rx_queue_state[sw_index] = + RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + +fail_mac_filter_default_rxq_set: + sfc_rx_qflush(sa, sw_index); + sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); + rxq_info->state = SFC_RXQ_INITIALIZED; + +fail_dp_qstart: + efx_rx_qdestroy(rxq->common); + +fail_rx_qcreate: +fail_bad_contig_block_size: +fail_mp_get_info: + sfc_ev_qstop(evq); + +fail_ev_qstart: + return rc; +} + +void +sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_rxq_info *rxq_info; + struct sfc_rxq *rxq; + + sfc_log_init(sa, "sw_index=%u", sw_index); + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + + rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; + + if (rxq_info->state == SFC_RXQ_INITIALIZED) + return; + SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); + + /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ + sa->eth_dev->data->rx_queue_state[sw_index] = + RTE_ETH_QUEUE_STATE_STOPPED; + + rxq = &sa->rxq_ctrl[sw_index]; + sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); + + if (sw_index == 0) + efx_mac_filter_default_rxq_clear(sa->nic); + + sfc_rx_qflush(sa, sw_index); + + rxq_info->state = SFC_RXQ_INITIALIZED; + + efx_rx_qdestroy(rxq->common); + + sfc_ev_qstop(rxq->evq); +} + +static uint64_t +sfc_rx_get_offload_mask(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t no_caps = 0; + + if (encp->enc_tunnel_encapsulations_supported == 0) + no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + + return ~no_caps; +} + +uint64_t +sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa) +{ + uint64_t caps = sa->priv.dp_rx->dev_offload_capa; + + caps |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return caps & sfc_rx_get_offload_mask(sa); +} + +uint64_t +sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa); +} + +static int +sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, + const struct rte_eth_rxconf *rx_conf, + __rte_unused uint64_t offloads) +{ + int rc = 0; + + if (rx_conf->rx_thresh.pthresh != 0 || + rx_conf->rx_thresh.hthresh != 0 || + rx_conf->rx_thresh.wthresh != 0) { + sfc_warn(sa, + "RxQ prefetch/host/writeback thresholds are not supported"); + } + + if (rx_conf->rx_free_thresh > rxq_max_fill_level) { + sfc_err(sa, + "RxQ free threshold too large: %u vs maximum %u", + rx_conf->rx_free_thresh, rxq_max_fill_level); + rc = EINVAL; + } + + if (rx_conf->rx_drop_en == 0) { + sfc_err(sa, "RxQ drop disable is not supported"); + rc = EINVAL; + } + + return rc; +} + +static unsigned int +sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) +{ + uint32_t data_off; + uint32_t order; + + /* The mbuf object itself is always cache line aligned */ + order = rte_bsf32(RTE_CACHE_LINE_SIZE); + + /* Data offset from mbuf object start */ + data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) + + RTE_PKTMBUF_HEADROOM; + + order = MIN(order, rte_bsf32(data_off)); + + return 1u << order; +} + +static uint16_t +sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start); + const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end); + uint16_t buf_size; + unsigned int buf_aligned; + unsigned int start_alignment; + unsigned int end_padding_alignment; + + /* Below it is assumed that both alignments are power of 2 */ + SFC_ASSERT(rte_is_power_of_2(nic_align_start)); + SFC_ASSERT(rte_is_power_of_2(nic_align_end)); + + /* + * mbuf is always cache line aligned, double-check + * that it meets rx buffer start alignment requirements. + */ + + /* Start from mbuf pool data room size */ + buf_size = rte_pktmbuf_data_room_size(mb_pool); + + /* Remove headroom */ + if (buf_size <= RTE_PKTMBUF_HEADROOM) { + sfc_err(sa, + "RxQ mbuf pool %s object data room size %u is smaller than headroom %u", + mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM); + return 0; + } + buf_size -= RTE_PKTMBUF_HEADROOM; + + /* Calculate guaranteed data start alignment */ + buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool); + + /* Reserve space for start alignment */ + if (buf_aligned < nic_align_start) { + start_alignment = nic_align_start - buf_aligned; + if (buf_size <= start_alignment) { + sfc_err(sa, + "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC", + mb_pool->name, + rte_pktmbuf_data_room_size(mb_pool), + RTE_PKTMBUF_HEADROOM, start_alignment); + return 0; + } + buf_aligned = nic_align_start; + buf_size -= start_alignment; + } else { + start_alignment = 0; + } + + /* Make sure that end padding does not write beyond the buffer */ + if (buf_aligned < nic_align_end) { + /* + * Estimate space which can be lost. If guarnteed buffer + * size is odd, lost space is (nic_align_end - 1). More + * accurate formula is below. + */ + end_padding_alignment = nic_align_end - + MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1)); + if (buf_size <= end_padding_alignment) { + sfc_err(sa, + "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC", + mb_pool->name, + rte_pktmbuf_data_room_size(mb_pool), + RTE_PKTMBUF_HEADROOM, start_alignment, + end_padding_alignment); + return 0; + } + buf_size -= end_padding_alignment; + } else { + /* + * Start is aligned the same or better than end, + * just align length. + */ + buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end); + } + + return buf_size; +} + +int +sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + int rc; + unsigned int rxq_entries; + unsigned int evq_entries; + unsigned int rxq_max_fill_level; + uint64_t offloads; + uint16_t buf_size; + struct sfc_rxq_info *rxq_info; + struct sfc_evq *evq; + struct sfc_rxq *rxq; + struct sfc_dp_rx_qcreate_info info; + struct sfc_dp_rx_hw_limits hw_limits; + uint16_t rx_free_thresh; + const char *error; + + memset(&hw_limits, 0, sizeof(hw_limits)); + hw_limits.rxq_max_entries = sa->rxq_max_entries; + hw_limits.rxq_min_entries = sa->rxq_min_entries; + hw_limits.evq_max_entries = sa->evq_max_entries; + hw_limits.evq_min_entries = sa->evq_min_entries; + + rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool, + &rxq_entries, &evq_entries, + &rxq_max_fill_level); + if (rc != 0) + goto fail_size_up_rings; + SFC_ASSERT(rxq_entries >= sa->rxq_min_entries); + SFC_ASSERT(rxq_entries <= sa->rxq_max_entries); + SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); + + offloads = rx_conf->offloads | + sa->eth_dev->data->dev_conf.rxmode.offloads; + rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); + if (rc != 0) + goto fail_bad_conf; + + buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); + if (buf_size == 0) { + sfc_err(sa, "RxQ %u mbuf pool object size is too small", + sw_index); + rc = EINVAL; + goto fail_bad_conf; + } + + if (!sfc_rx_check_scatter(sa->port.pdu, buf_size, + encp->enc_rx_prefix_size, + (offloads & DEV_RX_OFFLOAD_SCATTER), + &error)) { + sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error); + sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " + "PDU size %u plus Rx prefix %u bytes", + sw_index, buf_size, (unsigned int)sa->port.pdu, + encp->enc_rx_prefix_size); + rc = EINVAL; + goto fail_bad_conf; + } + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; + + SFC_ASSERT(rxq_entries <= rxq_info->max_entries); + rxq_info->entries = rxq_entries; + + if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER) + rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER; + else + rxq_info->type = EFX_RXQ_TYPE_DEFAULT; + + rxq_info->type_flags = + (offloads & DEV_RX_OFFLOAD_SCATTER) ? + EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE; + + if ((encp->enc_tunnel_encapsulations_supported != 0) && + (sfc_dp_rx_offload_capa(sa->priv.dp_rx) & + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0) + rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES; + + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index, + evq_entries, socket_id, &evq); + if (rc != 0) + goto fail_ev_qinit; + + rxq = &sa->rxq_ctrl[sw_index]; + rxq->evq = evq; + rxq->hw_index = sw_index; + /* + * If Rx refill threshold is specified (its value is non zero) in + * Rx configuration, use specified value. Otherwise use 1/8 of + * the Rx descriptors number as the default. It allows to keep + * Rx ring full-enough and does not refill too aggressive if + * packet rate is high. + * + * Since PMD refills in bulks waiting for full bulk may be + * refilled (basically round down), it is better to round up + * here to mitigate it a bit. + */ + rx_free_thresh = (rx_conf->rx_free_thresh != 0) ? + rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8); + /* Rx refill threshold cannot be smaller than refill bulk */ + rxq_info->refill_threshold = + RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK); + rxq_info->refill_mb_pool = mb_pool; + rxq->buf_size = buf_size; + + rc = sfc_dma_alloc(sa, "rxq", sw_index, + efx_rxq_size(sa->nic, rxq_info->entries), + socket_id, &rxq->mem); + if (rc != 0) + goto fail_dma_alloc; + + memset(&info, 0, sizeof(info)); + info.refill_mb_pool = rxq_info->refill_mb_pool; + info.max_fill_level = rxq_max_fill_level; + info.refill_threshold = rxq_info->refill_threshold; + info.buf_size = buf_size; + info.batch_max = encp->enc_rx_batch_max; + info.prefix_size = encp->enc_rx_prefix_size; + + if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0) + info.flags |= SFC_RXQ_FLAG_RSS_HASH; + + info.rxq_entries = rxq_info->entries; + info.rxq_hw_ring = rxq->mem.esm_base; + info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); + info.evq_entries = evq_entries; + info.evq_hw_ring = evq->mem.esm_base; + info.hw_index = rxq->hw_index; + info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; + + rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index, + &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, + socket_id, &info, &rxq_info->dp); + if (rc != 0) + goto fail_dp_rx_qcreate; + + evq->dp_rxq = rxq_info->dp; + + rxq_info->state = SFC_RXQ_INITIALIZED; + + rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0); + + return 0; + +fail_dp_rx_qcreate: + sfc_dma_free(sa, &rxq->mem); + +fail_dma_alloc: + sfc_ev_qfini(evq); + +fail_ev_qinit: + rxq_info->entries = 0; + +fail_bad_conf: +fail_size_up_rings: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_rxq_info *rxq_info; + struct sfc_rxq *rxq; + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + sa->eth_dev->data->rx_queues[sw_index] = NULL; + + rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; + + SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); + + sa->priv.dp_rx->qdestroy(rxq_info->dp); + rxq_info->dp = NULL; + + rxq_info->state &= ~SFC_RXQ_INITIALIZED; + rxq_info->entries = 0; + + rxq = &sa->rxq_ctrl[sw_index]; + + sfc_dma_free(sa, &rxq->mem); + + sfc_ev_qfini(rxq->evq); + rxq->evq = NULL; +} + +/* + * Mapping between RTE RSS hash functions and their EFX counterparts. + */ +static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = { + { ETH_RSS_NONFRAG_IPV4_TCP, + EFX_RX_HASH(IPV4_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV4_UDP, + EFX_RX_HASH(IPV4_UDP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX, + EFX_RX_HASH(IPV6_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX, + EFX_RX_HASH(IPV6_UDP, 4TUPLE) }, + { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, + EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) | + EFX_RX_HASH(IPV4, 2TUPLE) }, + { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | + ETH_RSS_IPV6_EX, + EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) | + EFX_RX_HASH(IPV6, 2TUPLE) } +}; + +static efx_rx_hash_type_t +sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type, + unsigned int *hash_type_flags_supported, + unsigned int nb_hash_type_flags_supported) +{ + efx_rx_hash_type_t hash_type_masked = 0; + unsigned int i, j; + + for (i = 0; i < nb_hash_type_flags_supported; ++i) { + unsigned int class_tuple_lbn[] = { + EFX_RX_CLASS_IPV4_TCP_LBN, + EFX_RX_CLASS_IPV4_UDP_LBN, + EFX_RX_CLASS_IPV4_LBN, + EFX_RX_CLASS_IPV6_TCP_LBN, + EFX_RX_CLASS_IPV6_UDP_LBN, + EFX_RX_CLASS_IPV6_LBN + }; + + for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) { + unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE; + unsigned int flag; + + tuple_mask <<= class_tuple_lbn[j]; + flag = hash_type & tuple_mask; + + if (flag == hash_type_flags_supported[i]) + hash_type_masked |= flag; + } + } + + return hash_type_masked; +} + +int +sfc_rx_hash_init(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask; + efx_rx_hash_alg_t alg; + unsigned int flags_supp[EFX_RX_HASH_NFLAGS]; + unsigned int nb_flags_supp; + struct sfc_rss_hf_rte_to_efx *hf_map; + struct sfc_rss_hf_rte_to_efx *entry; + efx_rx_hash_type_t efx_hash_types; + unsigned int i; + int rc; + + if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ)) + alg = EFX_RX_HASHALG_TOEPLITZ; + else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM)) + alg = EFX_RX_HASHALG_PACKED_STREAM; + else + return EINVAL; + + rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp, + RTE_DIM(flags_supp), &nb_flags_supp); + if (rc != 0) + return rc; + + hf_map = rte_calloc_socket("sfc-rss-hf-map", + RTE_DIM(sfc_rss_hf_map), + sizeof(*hf_map), 0, sa->socket_id); + if (hf_map == NULL) + return ENOMEM; + + entry = hf_map; + efx_hash_types = 0; + for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) { + efx_rx_hash_type_t ht; + + ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx, + flags_supp, nb_flags_supp); + if (ht != 0) { + entry->rte = sfc_rss_hf_map[i].rte; + entry->efx = ht; + efx_hash_types |= ht; + ++entry; + } + } + + rss->hash_alg = alg; + rss->hf_map_nb_entries = (unsigned int)(entry - hf_map); + rss->hf_map = hf_map; + rss->hash_types = efx_hash_types; + + return 0; +} + +void +sfc_rx_hash_fini(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + + rte_free(rss->hf_map); +} + +int +sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + efx_rx_hash_type_t hash_types = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + uint64_t rte_mask = rss->hf_map[i].rte; + + if ((rte & rte_mask) != 0) { + rte &= ~rte_mask; + hash_types |= rss->hf_map[i].efx; + } + } + + if (rte != 0) { + sfc_err(sa, "unsupported hash functions requested"); + return EINVAL; + } + + *efx = hash_types; + + return 0; +} + +uint64_t +sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx) +{ + uint64_t rte = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + efx_rx_hash_type_t hash_type = rss->hf_map[i].efx; + + if ((efx & hash_type) == hash_type) + rte |= rss->hf_map[i].rte; + } + + return rte; +} + +static int +sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, + struct rte_eth_rss_conf *conf) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + efx_rx_hash_type_t efx_hash_types = rss->hash_types; + uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types); + int rc; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { + if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) || + conf->rss_key != NULL) + return EINVAL; + } + + if (conf->rss_hf != 0) { + rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types); + if (rc != 0) + return rc; + } + + if (conf->rss_key != NULL) { + if (conf->rss_key_len != sizeof(rss->key)) { + sfc_err(sa, "RSS key size is wrong (should be %zu)", + sizeof(rss->key)); + return EINVAL; + } + rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key)); + } + + rss->hash_types = efx_hash_types; + + return 0; +} + +static int +sfc_rx_rss_config(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + int rc = 0; + + if (rss->channels > 0) { + rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss->hash_alg, rss->hash_types, + B_TRUE); + if (rc != 0) + goto finish; + + rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss->key, sizeof(rss->key)); + if (rc != 0) + goto finish; + + rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, + rss->tbl, RTE_DIM(rss->tbl)); + } + +finish: + return rc; +} + +int +sfc_rx_start(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + unsigned int sw_index; + int rc; + + sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); + + rc = efx_rx_init(sa->nic); + if (rc != 0) + goto fail_rx_init; + + rc = sfc_rx_rss_config(sa); + if (rc != 0) + goto fail_rss_config; + + for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) { + if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED && + (!sas->rxq_info[sw_index].deferred_start || + sas->rxq_info[sw_index].deferred_started)) { + rc = sfc_rx_qstart(sa, sw_index); + if (rc != 0) + goto fail_rx_qstart; + } + } + + return 0; + +fail_rx_qstart: + while (sw_index-- > 0) + sfc_rx_qstop(sa, sw_index); + +fail_rss_config: + efx_rx_fini(sa->nic); + +fail_rx_init: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +void +sfc_rx_stop(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + unsigned int sw_index; + + sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); + + sw_index = sas->rxq_count; + while (sw_index-- > 0) { + if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED) + sfc_rx_qstop(sa, sw_index); + } + + efx_rx_fini(sa->nic); +} + +static int +sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index]; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + unsigned int max_entries; + + max_entries = encp->enc_rxq_max_ndescs; + SFC_ASSERT(rte_is_power_of_2(max_entries)); + + rxq_info->max_entries = max_entries; + + return 0; +} + +static int +sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | + sfc_rx_get_queue_offload_caps(sa); + struct sfc_rss *rss = &sas->rss; + int rc = 0; + + switch (rxmode->mq_mode) { + case ETH_MQ_RX_NONE: + /* No special checks are required */ + break; + case ETH_MQ_RX_RSS: + if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) { + sfc_err(sa, "RSS is not available"); + rc = EINVAL; + } + break; + default: + sfc_err(sa, "Rx multi-queue mode %u not supported", + rxmode->mq_mode); + rc = EINVAL; + } + + /* + * Requested offloads are validated against supported by ethdev, + * so unsupported offloads cannot be added as the result of + * below check. + */ + if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) != + (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) { + sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)"); + rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM; + } + + if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) && + (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) { + sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on"); + rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + } + + if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) && + (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)) + rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + return rc; +} + +/** + * Destroy excess queues that are no longer needed after reconfiguration + * or complete close. + */ +static void +sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + int sw_index; + + SFC_ASSERT(nb_rx_queues <= sas->rxq_count); + + sw_index = sas->rxq_count; + while (--sw_index >= (int)nb_rx_queues) { + if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED) + sfc_rx_qfini(sa, sw_index); + } + + sas->rxq_count = nb_rx_queues; +} + +/** + * Initialize Rx subsystem. + * + * Called at device (re)configuration stage when number of receive queues is + * specified together with other device level receive configuration. + * + * It should be used to allocate NUMA-unaware resources. + */ +int +sfc_rx_configure(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rss *rss = &sas->rss; + struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; + const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; + int rc; + + sfc_log_init(sa, "nb_rx_queues=%u (old %u)", + nb_rx_queues, sas->rxq_count); + + rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); + if (rc != 0) + goto fail_check_mode; + + if (nb_rx_queues == sas->rxq_count) + goto configure_rss; + + if (sas->rxq_info == NULL) { + rc = ENOMEM; + sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues, + sizeof(sas->rxq_info[0]), 0, + sa->socket_id); + if (sas->rxq_info == NULL) + goto fail_rxqs_alloc; + + /* + * Allocate primary process only RxQ control from heap + * since it should not be shared. + */ + rc = ENOMEM; + sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0])); + if (sa->rxq_ctrl == NULL) + goto fail_rxqs_ctrl_alloc; + } else { + struct sfc_rxq_info *new_rxq_info; + struct sfc_rxq *new_rxq_ctrl; + + if (nb_rx_queues < sas->rxq_count) + sfc_rx_fini_queues(sa, nb_rx_queues); + + rc = ENOMEM; + new_rxq_info = + rte_realloc(sas->rxq_info, + nb_rx_queues * sizeof(sas->rxq_info[0]), 0); + if (new_rxq_info == NULL && nb_rx_queues > 0) + goto fail_rxqs_realloc; + + rc = ENOMEM; + new_rxq_ctrl = realloc(sa->rxq_ctrl, + nb_rx_queues * sizeof(sa->rxq_ctrl[0])); + if (new_rxq_ctrl == NULL && nb_rx_queues > 0) + goto fail_rxqs_ctrl_realloc; + + sas->rxq_info = new_rxq_info; + sa->rxq_ctrl = new_rxq_ctrl; + if (nb_rx_queues > sas->rxq_count) { + memset(&sas->rxq_info[sas->rxq_count], 0, + (nb_rx_queues - sas->rxq_count) * + sizeof(sas->rxq_info[0])); + memset(&sa->rxq_ctrl[sas->rxq_count], 0, + (nb_rx_queues - sas->rxq_count) * + sizeof(sa->rxq_ctrl[0])); + } + } + + while (sas->rxq_count < nb_rx_queues) { + rc = sfc_rx_qinit_info(sa, sas->rxq_count); + if (rc != 0) + goto fail_rx_qinit_info; + + sas->rxq_count++; + } + +configure_rss: + rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? + MIN(sas->rxq_count, EFX_MAXRSS) : 0; + + if (rss->channels > 0) { + struct rte_eth_rss_conf *adv_conf_rss; + unsigned int sw_index; + + for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) + rss->tbl[sw_index] = sw_index % rss->channels; + + adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf; + rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss); + if (rc != 0) + goto fail_rx_process_adv_conf_rss; + } + + return 0; + +fail_rx_process_adv_conf_rss: +fail_rx_qinit_info: +fail_rxqs_ctrl_realloc: +fail_rxqs_realloc: +fail_rxqs_ctrl_alloc: +fail_rxqs_alloc: + sfc_rx_close(sa); + +fail_check_mode: + sfc_log_init(sa, "failed %d", rc); + return rc; +} + +/** + * Shutdown Rx subsystem. + * + * Called at device close stage, for example, before device shutdown. + */ +void +sfc_rx_close(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; + + sfc_rx_fini_queues(sa, 0); + + rss->channels = 0; + + free(sa->rxq_ctrl); + sa->rxq_ctrl = NULL; + + rte_free(sfc_sa2shared(sa)->rxq_info); + sfc_sa2shared(sa)->rxq_info = NULL; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h new file mode 100644 index 000000000..697ea29d6 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_rx.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_RX_H +#define _SFC_RX_H + +#include +#include +#include + +#include "efx.h" + +#include "sfc_dp_rx.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct sfc_adapter; +struct sfc_evq; + +/** + * Software Rx descriptor information associated with hardware Rx + * descriptor. + */ +struct sfc_efx_rx_sw_desc { + struct rte_mbuf *mbuf; + unsigned int flags; + unsigned int size; +}; + +/** Receive queue state bits */ +enum sfc_rxq_state_bit { + SFC_RXQ_INITIALIZED_BIT = 0, +#define SFC_RXQ_INITIALIZED (1 << SFC_RXQ_INITIALIZED_BIT) + SFC_RXQ_STARTED_BIT, +#define SFC_RXQ_STARTED (1 << SFC_RXQ_STARTED_BIT) + SFC_RXQ_FLUSHING_BIT, +#define SFC_RXQ_FLUSHING (1 << SFC_RXQ_FLUSHING_BIT) + SFC_RXQ_FLUSHED_BIT, +#define SFC_RXQ_FLUSHED (1 << SFC_RXQ_FLUSHED_BIT) + SFC_RXQ_FLUSH_FAILED_BIT, +#define SFC_RXQ_FLUSH_FAILED (1 << SFC_RXQ_FLUSH_FAILED_BIT) +}; + +/** + * Receive queue control primary process-only information. + */ +struct sfc_rxq { + struct sfc_evq *evq; + efx_rxq_t *common; + efsys_mem_t mem; + unsigned int hw_index; + uint16_t buf_size; +}; + +struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); + +/** + * Receive queue information used on libefx-based data path. + * Allocated on the socket specified on the queue setup. + */ +struct sfc_efx_rxq { + /* Used on data path */ + struct sfc_evq *evq; + unsigned int flags; +#define SFC_EFX_RXQ_FLAG_STARTED 0x1 +#define SFC_EFX_RXQ_FLAG_RUNNING 0x2 +#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4 +#define SFC_EFX_RXQ_FLAG_INTR_EN 0x8 + unsigned int ptr_mask; + unsigned int pending; + unsigned int completed; + uint16_t batch_max; + uint16_t prefix_size; + struct sfc_efx_rx_sw_desc *sw_desc; + + /* Used on refill */ + unsigned int added; + unsigned int pushed; + unsigned int max_fill_level; + unsigned int refill_threshold; + uint16_t buf_size; + struct rte_mempool *refill_mb_pool; + efx_rxq_t *common; + + /* Datapath receive queue anchor */ + struct sfc_dp_rxq dp; +}; + +static inline struct sfc_efx_rxq * +sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) +{ + return container_of(dp_rxq, struct sfc_efx_rxq, dp); +} + +/** + * Receive queue information used during setup/release only. + * Allocated on the same socket as adapter data. + */ +struct sfc_rxq_info { + unsigned int state; + unsigned int max_entries; + unsigned int entries; + efx_rxq_type_t type; + unsigned int type_flags; + struct sfc_dp_rxq *dp; + boolean_t deferred_start; + boolean_t deferred_started; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; +}; + +struct sfc_rxq_info *sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); + +int sfc_rx_configure(struct sfc_adapter *sa); +void sfc_rx_close(struct sfc_adapter *sa); +int sfc_rx_start(struct sfc_adapter *sa); +void sfc_rx_stop(struct sfc_adapter *sa); + +int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index); +int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index); + +uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa); +uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa); + +void sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info); +void sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info); + +int sfc_rx_hash_init(struct sfc_adapter *sa); +void sfc_rx_hash_fini(struct sfc_adapter *sa); +int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx); +uint64_t sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx); +boolean_t sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, + uint32_t rx_prefix_size, + boolean_t rx_scatter_enabled, + const char **error); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_RX_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c new file mode 100644 index 000000000..d6f111989 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.c @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include +#include + +#include "sfc.h" +#include "sfc_debug.h" +#include "sfc_tx.h" +#include "sfc_ev.h" +#include "sfc_tso.h" + +int +sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries, unsigned int socket_id) +{ + unsigned int i; + + for (i = 0; i < txq_entries; ++i) { + sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj", + SFC_TSOH_STD_LEN, + RTE_CACHE_LINE_SIZE, + socket_id); + if (sw_ring[i].tsoh == NULL) + goto fail_alloc_tsoh_objs; + } + + return 0; + +fail_alloc_tsoh_objs: + while (i > 0) + rte_free(sw_ring[--i].tsoh); + + return ENOMEM; +} + +void +sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries) +{ + unsigned int i; + + for (i = 0; i < txq_entries; ++i) { + rte_free(sw_ring[i].tsoh); + sw_ring[i].tsoh = NULL; + } +} + +unsigned int +sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, + struct rte_mbuf **in_seg, size_t *in_off) +{ + struct rte_mbuf *m = *in_seg; + size_t bytes_to_copy = 0; + size_t bytes_left = header_len; + unsigned int segments_copied = 0; + + do { + bytes_to_copy = MIN(bytes_left, m->data_len); + + rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *), + bytes_to_copy); + + bytes_left -= bytes_to_copy; + tsoh += bytes_to_copy; + + if (bytes_left > 0) { + m = m->next; + SFC_ASSERT(m != NULL); + segments_copied++; + } + } while (bytes_left > 0); + + if (bytes_to_copy == m->data_len) { + *in_seg = m->next; + *in_off = 0; + segments_copied++; + } else { + *in_seg = m; + *in_off = bytes_to_copy; + } + + return segments_copied; +} + +int +sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, + struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend, + unsigned int *pkt_descs, size_t *pkt_len) +{ + uint8_t *tsoh; + const struct rte_tcp_hdr *th; + efsys_dma_addr_t header_paddr; + uint16_t packet_id = 0; + uint32_t sent_seq; + struct rte_mbuf *m = *in_seg; + size_t nh_off = m->l2_len; /* IP header offset */ + size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */ + size_t header_len = m->l2_len + m->l3_len + m->l4_len; + + idx += SFC_EF10_TSO_OPT_DESCS_NUM; + + header_paddr = rte_pktmbuf_iova(m); + + /* + * Sometimes headers may be split across multiple mbufs. In such cases + * we need to glue those pieces and store them in some temporary place. + * Also, packet headers must be contiguous in memory, so that + * they can be referred to with a single DMA descriptor. EF10 has no + * limitations on address boundaries crossing by DMA descriptor data. + */ + if (m->data_len < header_len) { + /* + * Discard a packet if header linearization is needed but + * the header is too big. + * Duplicate Tx prepare check here to avoid spoil of + * memory if Tx prepare is skipped. + */ + if (unlikely(header_len > SFC_TSOH_STD_LEN)) + return EMSGSIZE; + + tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; + sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off); + + header_paddr = rte_malloc_virt2iova((void *)tsoh); + } else { + if (m->data_len == header_len) { + *in_off = 0; + *in_seg = m->next; + } else { + *in_off = header_len; + } + + tsoh = rte_pktmbuf_mtod(m, uint8_t *); + } + + /* + * Handle IP header. Tx prepare has debug-only checks that offload flags + * are correctly filled in in TSO mbuf. Use zero IPID if there is no + * IPv4 flag. If the packet is still IPv4, HW will simply start from + * zero IPID. + */ + if (m->ol_flags & PKT_TX_IPV4) + packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off); + + /* Handle TCP header */ + th = (const struct rte_tcp_hdr *)(tsoh + tcph_off); + + rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); + sent_seq = rte_be_to_cpu_32(sent_seq); + + efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq, + m->tso_segsz, + *pend, EFX_TX_FATSOV2_OPT_NDESCS); + + *pend += EFX_TX_FATSOV2_OPT_NDESCS; + *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS; + + efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len, + B_FALSE, (*pend)++); + (*pkt_descs)++; + *pkt_len -= header_len; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tso.h b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.h new file mode 100644 index 000000000..8597c2868 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tso.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2018-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_TSO_H +#define _SFC_TSO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** Standard TSO header length */ +#define SFC_TSOH_STD_LEN 256 + +/** The number of TSO option descriptors that precede the packet descriptors */ +#define SFC_EF10_TSO_OPT_DESCS_NUM 2 + +/** + * The number of DMA descriptors for TSO header that may or may not precede the + * packet's payload descriptors + */ +#define SFC_EF10_TSO_HDR_DESCS_NUM 1 + +static inline uint16_t +sfc_tso_ip4_get_ipid(const uint8_t *pkt_hdrp, size_t ip_hdr_off) +{ + const struct rte_ipv4_hdr *ip_hdrp; + uint16_t ipid; + + ip_hdrp = (const struct rte_ipv4_hdr *)(pkt_hdrp + ip_hdr_off); + rte_memcpy(&ipid, &ip_hdrp->packet_id, sizeof(ipid)); + + return rte_be_to_cpu_16(ipid); +} + +unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, + struct rte_mbuf **in_seg, size_t *in_off); + +#ifdef __cplusplus +} +#endif + +#endif /* _SFC_TSO_H */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h b/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h new file mode 100644 index 000000000..bd3de5313 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tweak.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_TWEAK_H_ +#define _SFC_TWEAK_H_ + +/* + * The header is intended to collect defines/constants which could be + * tweaked to improve the PMD performance characteristics depending on + * the usecase or requirements (CPU load, packet rate, latency). + */ + +/** + * Number of Rx descriptors in the bulk submitted on Rx ring refill. + */ +#define SFC_RX_REFILL_BULK (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t)) + +/** + * Make the transmit path reap at least one time per a burst; + * this improves cache locality because the same mbufs may be used to send + * subsequent bursts in certain cases because of well-timed reap + */ +#define SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE 0 + +/** Default free threshold follows recommendations from DPDK documentation */ +#define SFC_TX_DEFAULT_FREE_THRESH 32 + +/** Number of mbufs to be freed in bulk in a single call */ +#define SFC_TX_REAP_BULK_SIZE 32 + +/** + * Default head-of-line block timeout to wait for Rx descriptor before + * packet drop because of no descriptors available. + * + * DPDK FW variant only with equal stride super-buffer Rx mode. + */ +#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000) + +#endif /* _SFC_TWEAK_H_ */ diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c new file mode 100644 index 000000000..05a2cf009 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.c @@ -0,0 +1,1160 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#include "sfc.h" +#include "sfc_debug.h" +#include "sfc_log.h" +#include "sfc_ev.h" +#include "sfc_tx.h" +#include "sfc_tweak.h" +#include "sfc_kvargs.h" + +/* + * Maximum number of TX queue flush attempts in case of + * failure or flush timeout + */ +#define SFC_TX_QFLUSH_ATTEMPTS (3) + +/* + * Time to wait between event queue polling attempts when waiting for TX + * queue flush done or flush failed events + */ +#define SFC_TX_QFLUSH_POLL_WAIT_MS (1) + +/* + * Maximum number of event queue polling attempts when waiting for TX queue + * flush done or flush failed events; it defines TX queue flush attempt timeout + * together with SFC_TX_QFLUSH_POLL_WAIT_MS + */ +#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000) + +static uint64_t +sfc_tx_get_offload_mask(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t no_caps = 0; + + if (!encp->enc_hw_tx_insert_vlan_enabled) + no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT; + + if (!encp->enc_tunnel_encapsulations_supported) + no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + if (!sa->tso) + no_caps |= DEV_TX_OFFLOAD_TCP_TSO; + + if (!sa->tso_encap) + no_caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + + return ~no_caps; +} + +uint64_t +sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa); +} + +uint64_t +sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) +{ + return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa); +} + +static int +sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, + const struct rte_eth_txconf *tx_conf, + uint64_t offloads) +{ + int rc = 0; + + if (tx_conf->tx_rs_thresh != 0) { + sfc_err(sa, "RS bit in transmit descriptor is not supported"); + rc = EINVAL; + } + + if (tx_conf->tx_free_thresh > txq_max_fill_level) { + sfc_err(sa, + "TxQ free threshold too large: %u vs maximum %u", + tx_conf->tx_free_thresh, txq_max_fill_level); + rc = EINVAL; + } + + if (tx_conf->tx_thresh.pthresh != 0 || + tx_conf->tx_thresh.hthresh != 0 || + tx_conf->tx_thresh.wthresh != 0) { + sfc_warn(sa, + "prefetch/host/writeback thresholds are not supported"); + } + + /* We either perform both TCP and UDP offload, or no offload at all */ + if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { + sfc_err(sa, "TCP and UDP offloads can't be set independently"); + rc = EINVAL; + } + + return rc; +} + +void +sfc_tx_qflush_done(struct sfc_txq_info *txq_info) +{ + txq_info->state |= SFC_TXQ_FLUSHED; + txq_info->state &= ~SFC_TXQ_FLUSHING; +} + +int +sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + unsigned int txq_entries; + unsigned int evq_entries; + unsigned int txq_max_fill_level; + struct sfc_txq_info *txq_info; + struct sfc_evq *evq; + struct sfc_txq *txq; + int rc = 0; + struct sfc_dp_tx_qcreate_info info; + uint64_t offloads; + struct sfc_dp_tx_hw_limits hw_limits; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + memset(&hw_limits, 0, sizeof(hw_limits)); + hw_limits.txq_max_entries = sa->txq_max_entries; + hw_limits.txq_min_entries = sa->txq_min_entries; + + rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &hw_limits, + &txq_entries, &evq_entries, + &txq_max_fill_level); + if (rc != 0) + goto fail_size_up_rings; + SFC_ASSERT(txq_entries >= sa->txq_min_entries); + SFC_ASSERT(txq_entries <= sa->txq_max_entries); + SFC_ASSERT(txq_entries >= nb_tx_desc); + SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); + + offloads = tx_conf->offloads | + sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); + if (rc != 0) + goto fail_bad_conf; + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count); + txq_info = &sfc_sa2shared(sa)->txq_info[sw_index]; + + txq_info->entries = txq_entries; + + rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, + evq_entries, socket_id, &evq); + if (rc != 0) + goto fail_ev_qinit; + + txq = &sa->txq_ctrl[sw_index]; + txq->hw_index = sw_index; + txq->evq = evq; + txq_info->free_thresh = + (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : + SFC_TX_DEFAULT_FREE_THRESH; + txq_info->offloads = offloads; + + rc = sfc_dma_alloc(sa, "txq", sw_index, + efx_txq_size(sa->nic, txq_info->entries), + socket_id, &txq->mem); + if (rc != 0) + goto fail_dma_alloc; + + memset(&info, 0, sizeof(info)); + info.max_fill_level = txq_max_fill_level; + info.free_thresh = txq_info->free_thresh; + info.offloads = offloads; + info.txq_entries = txq_info->entries; + info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; + info.txq_hw_ring = txq->mem.esm_base; + info.evq_entries = evq_entries; + info.evq_hw_ring = evq->mem.esm_base; + info.hw_index = txq->hw_index; + info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; + info.tso_tcp_header_offset_limit = + encp->enc_tx_tso_tcp_header_offset_limit; + + rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, + &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, + socket_id, &info, &txq_info->dp); + if (rc != 0) + goto fail_dp_tx_qinit; + + evq->dp_txq = txq_info->dp; + + txq_info->state = SFC_TXQ_INITIALIZED; + + txq_info->deferred_start = (tx_conf->tx_deferred_start != 0); + + return 0; + +fail_dp_tx_qinit: + sfc_dma_free(sa, &txq->mem); + +fail_dma_alloc: + sfc_ev_qfini(evq); + +fail_ev_qinit: + txq_info->entries = 0; + +fail_bad_conf: +fail_size_up_rings: + sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc); + return rc; +} + +void +sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_txq_info *txq_info; + struct sfc_txq *txq; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count); + sa->eth_dev->data->tx_queues[sw_index] = NULL; + + txq_info = &sfc_sa2shared(sa)->txq_info[sw_index]; + + SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED); + + sa->priv.dp_tx->qdestroy(txq_info->dp); + txq_info->dp = NULL; + + txq_info->state &= ~SFC_TXQ_INITIALIZED; + txq_info->entries = 0; + + txq = &sa->txq_ctrl[sw_index]; + + sfc_dma_free(sa, &txq->mem); + + sfc_ev_qfini(txq->evq); + txq->evq = NULL; +} + +static int +sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) +{ + sfc_log_init(sa, "TxQ = %u", sw_index); + + return 0; +} + +static int +sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) +{ + int rc = 0; + + switch (txmode->mq_mode) { + case ETH_MQ_TX_NONE: + break; + default: + sfc_err(sa, "Tx multi-queue mode %u not supported", + txmode->mq_mode); + rc = EINVAL; + } + + /* + * These features are claimed to be i40e-specific, + * but it does make sense to double-check their absence + */ + if (txmode->hw_vlan_reject_tagged) { + sfc_err(sa, "Rejecting tagged packets not supported"); + rc = EINVAL; + } + + if (txmode->hw_vlan_reject_untagged) { + sfc_err(sa, "Rejecting untagged packets not supported"); + rc = EINVAL; + } + + if (txmode->hw_vlan_insert_pvid) { + sfc_err(sa, "Port-based VLAN insertion not supported"); + rc = EINVAL; + } + + return rc; +} + +/** + * Destroy excess queues that are no longer needed after reconfiguration + * or complete close. + */ +static void +sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + int sw_index; + + SFC_ASSERT(nb_tx_queues <= sas->txq_count); + + sw_index = sas->txq_count; + while (--sw_index >= (int)nb_tx_queues) { + if (sas->txq_info[sw_index].state & SFC_TXQ_INITIALIZED) + sfc_tx_qfini(sa, sw_index); + } + + sas->txq_count = nb_tx_queues; +} + +int +sfc_tx_configure(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; + const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; + int rc = 0; + + sfc_log_init(sa, "nb_tx_queues=%u (old %u)", + nb_tx_queues, sas->txq_count); + + /* + * The datapath implementation assumes absence of boundary + * limits on Tx DMA descriptors. Addition of these checks on + * datapath would simply make the datapath slower. + */ + if (encp->enc_tx_dma_desc_boundary != 0) { + rc = ENOTSUP; + goto fail_tx_dma_desc_boundary; + } + + rc = sfc_tx_check_mode(sa, &dev_conf->txmode); + if (rc != 0) + goto fail_check_mode; + + if (nb_tx_queues == sas->txq_count) + goto done; + + if (sas->txq_info == NULL) { + sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, + sizeof(sas->txq_info[0]), 0, + sa->socket_id); + if (sas->txq_info == NULL) + goto fail_txqs_alloc; + + /* + * Allocate primary process only TxQ control from heap + * since it should not be shared. + */ + rc = ENOMEM; + sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0])); + if (sa->txq_ctrl == NULL) + goto fail_txqs_ctrl_alloc; + } else { + struct sfc_txq_info *new_txq_info; + struct sfc_txq *new_txq_ctrl; + + if (nb_tx_queues < sas->txq_count) + sfc_tx_fini_queues(sa, nb_tx_queues); + + new_txq_info = + rte_realloc(sas->txq_info, + nb_tx_queues * sizeof(sas->txq_info[0]), 0); + if (new_txq_info == NULL && nb_tx_queues > 0) + goto fail_txqs_realloc; + + new_txq_ctrl = realloc(sa->txq_ctrl, + nb_tx_queues * sizeof(sa->txq_ctrl[0])); + if (new_txq_ctrl == NULL && nb_tx_queues > 0) + goto fail_txqs_ctrl_realloc; + + sas->txq_info = new_txq_info; + sa->txq_ctrl = new_txq_ctrl; + if (nb_tx_queues > sas->txq_count) { + memset(&sas->txq_info[sas->txq_count], 0, + (nb_tx_queues - sas->txq_count) * + sizeof(sas->txq_info[0])); + memset(&sa->txq_ctrl[sas->txq_count], 0, + (nb_tx_queues - sas->txq_count) * + sizeof(sa->txq_ctrl[0])); + } + } + + while (sas->txq_count < nb_tx_queues) { + rc = sfc_tx_qinit_info(sa, sas->txq_count); + if (rc != 0) + goto fail_tx_qinit_info; + + sas->txq_count++; + } + +done: + return 0; + +fail_tx_qinit_info: +fail_txqs_ctrl_realloc: +fail_txqs_realloc: +fail_txqs_ctrl_alloc: +fail_txqs_alloc: + sfc_tx_close(sa); + +fail_check_mode: +fail_tx_dma_desc_boundary: + sfc_log_init(sa, "failed (rc = %d)", rc); + return rc; +} + +void +sfc_tx_close(struct sfc_adapter *sa) +{ + sfc_tx_fini_queues(sa, 0); + + free(sa->txq_ctrl); + sa->txq_ctrl = NULL; + + rte_free(sfc_sa2shared(sa)->txq_info); + sfc_sa2shared(sa)->txq_info = NULL; +} + +int +sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | + sfc_tx_get_queue_offload_caps(sa); + struct rte_eth_dev_data *dev_data; + struct sfc_txq_info *txq_info; + struct sfc_txq *txq; + struct sfc_evq *evq; + uint16_t flags = 0; + unsigned int desc_index; + int rc = 0; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + SFC_ASSERT(sw_index < sas->txq_count); + txq_info = &sas->txq_info[sw_index]; + + SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED); + + txq = &sa->txq_ctrl[sw_index]; + evq = txq->evq; + + rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); + if (rc != 0) + goto fail_ev_qstart; + + if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_IPV4; + + if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_INNER_IPV4; + + if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + flags |= EFX_TXQ_CKSUM_TCPUDP; + + if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; + } + + if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) + flags |= EFX_TXQ_FATSOV2; + + rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem, + txq_info->entries, 0 /* not used on EF10 */, + flags, evq->common, + &txq->common, &desc_index); + if (rc != 0) { + if (sa->tso && (rc == ENOSPC)) + sfc_err(sa, "ran out of TSO contexts"); + + goto fail_tx_qcreate; + } + + efx_tx_qenable(txq->common); + + txq_info->state |= SFC_TXQ_STARTED; + + rc = sa->priv.dp_tx->qstart(txq_info->dp, evq->read_ptr, desc_index); + if (rc != 0) + goto fail_dp_qstart; + + /* + * It seems to be used by DPDK for debug purposes only ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + +fail_dp_qstart: + txq_info->state = SFC_TXQ_INITIALIZED; + efx_tx_qdestroy(txq->common); + +fail_tx_qcreate: + sfc_ev_qstop(evq); + +fail_ev_qstart: + return rc; +} + +void +sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct rte_eth_dev_data *dev_data; + struct sfc_txq_info *txq_info; + struct sfc_txq *txq; + unsigned int retry_count; + unsigned int wait_count; + int rc; + + sfc_log_init(sa, "TxQ = %u", sw_index); + + SFC_ASSERT(sw_index < sas->txq_count); + txq_info = &sas->txq_info[sw_index]; + + if (txq_info->state == SFC_TXQ_INITIALIZED) + return; + + SFC_ASSERT(txq_info->state & SFC_TXQ_STARTED); + + txq = &sa->txq_ctrl[sw_index]; + sa->priv.dp_tx->qstop(txq_info->dp, &txq->evq->read_ptr); + + /* + * Retry TX queue flushing in case of flush failed or + * timeout; in the worst case it can delay for 6 seconds + */ + for (retry_count = 0; + ((txq_info->state & SFC_TXQ_FLUSHED) == 0) && + (retry_count < SFC_TX_QFLUSH_ATTEMPTS); + ++retry_count) { + rc = efx_tx_qflush(txq->common); + if (rc != 0) { + txq_info->state |= (rc == EALREADY) ? + SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED; + break; + } + + /* + * Wait for TX queue flush done or flush failed event at least + * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more + * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied + * by SFC_TX_QFLUSH_POLL_ATTEMPTS) + */ + wait_count = 0; + do { + rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS); + sfc_ev_qpoll(txq->evq); + } while ((txq_info->state & SFC_TXQ_FLUSHING) && + wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); + + if (txq_info->state & SFC_TXQ_FLUSHING) + sfc_err(sa, "TxQ %u flush timed out", sw_index); + + if (txq_info->state & SFC_TXQ_FLUSHED) + sfc_notice(sa, "TxQ %u flushed", sw_index); + } + + sa->priv.dp_tx->qreap(txq_info->dp); + + txq_info->state = SFC_TXQ_INITIALIZED; + + efx_tx_qdestroy(txq->common); + + sfc_ev_qstop(txq->evq); + + /* + * It seems to be used by DPDK for debug purposes only ('rte_ether') + */ + dev_data = sa->eth_dev->data; + dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; +} + +int +sfc_tx_start(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + unsigned int sw_index; + int rc = 0; + + sfc_log_init(sa, "txq_count = %u", sas->txq_count); + + if (sa->tso) { + if (!encp->enc_fw_assisted_tso_v2_enabled) { + sfc_warn(sa, "TSO support was unable to be restored"); + sa->tso = B_FALSE; + sa->tso_encap = B_FALSE; + } + } + + if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled) { + sfc_warn(sa, "Encapsulated TSO support was unable to be restored"); + sa->tso_encap = B_FALSE; + } + + rc = efx_tx_init(sa->nic); + if (rc != 0) + goto fail_efx_tx_init; + + for (sw_index = 0; sw_index < sas->txq_count; ++sw_index) { + if (sas->txq_info[sw_index].state == SFC_TXQ_INITIALIZED && + (!(sas->txq_info[sw_index].deferred_start) || + sas->txq_info[sw_index].deferred_started)) { + rc = sfc_tx_qstart(sa, sw_index); + if (rc != 0) + goto fail_tx_qstart; + } + } + + return 0; + +fail_tx_qstart: + while (sw_index-- > 0) + sfc_tx_qstop(sa, sw_index); + + efx_tx_fini(sa->nic); + +fail_efx_tx_init: + sfc_log_init(sa, "failed (rc = %d)", rc); + return rc; +} + +void +sfc_tx_stop(struct sfc_adapter *sa) +{ + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + unsigned int sw_index; + + sfc_log_init(sa, "txq_count = %u", sas->txq_count); + + sw_index = sas->txq_count; + while (sw_index-- > 0) { + if (sas->txq_info[sw_index].state & SFC_TXQ_STARTED) + sfc_tx_qstop(sa, sw_index); + } + + efx_tx_fini(sa->nic); +} + +static void +sfc_efx_tx_reap(struct sfc_efx_txq *txq) +{ + unsigned int completed; + + sfc_ev_qpoll(txq->evq); + + for (completed = txq->completed; + completed != txq->pending; completed++) { + struct sfc_efx_tx_sw_desc *txd; + + txd = &txq->sw_ring[completed & txq->ptr_mask]; + + if (txd->mbuf != NULL) { + rte_pktmbuf_free(txd->mbuf); + txd->mbuf = NULL; + } + } + + txq->completed = completed; +} + +/* + * The function is used to insert or update VLAN tag; + * the firmware has state of the firmware tag to insert per TxQ + * (controlled by option descriptors), hence, if the tag of the + * packet to be sent is different from one remembered by the firmware, + * the function will update it + */ +static unsigned int +sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, + efx_desc_t **pend) +{ + uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ? + m->vlan_tci : 0); + + if (this_tag == txq->hw_vlan_tci) + return 0; + + /* + * The expression inside SFC_ASSERT() is not desired to be checked in + * a non-debug build because it might be too expensive on the data path + */ + SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled); + + efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag), + *pend); + (*pend)++; + txq->hw_vlan_tci = this_tag; + + return 1; +} + +static uint16_t +sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sfc_dp_txq *dp_txq = tx_queue; + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic); + uint16_t i; + + for (i = 0; i < nb_pkts; i++) { + int ret; + + /* + * EFX Tx datapath may require extra VLAN descriptor if VLAN + * insertion offload is requested regardless the offload + * requested/supported. + */ + ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], + encp->enc_tx_tso_tcp_header_offset_limit, + txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS, + 1); + if (unlikely(ret != 0)) { + rte_errno = ret; + break; + } + } + + return i; +} + +static uint16_t +sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue; + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + unsigned int added = txq->added; + unsigned int pushed = added; + unsigned int pkts_sent = 0; + efx_desc_t *pend = &txq->pend_desc[0]; + const unsigned int hard_max_fill = txq->max_fill_level; + const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh; + unsigned int fill_level = added - txq->completed; + boolean_t reap_done; + int rc __rte_unused; + struct rte_mbuf **pktp; + + if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0)) + goto done; + + /* + * If insufficient space for a single packet is present, + * we should reap; otherwise, we shouldn't do that all the time + * to avoid latency increase + */ + reap_done = (fill_level > soft_max_fill); + + if (reap_done) { + sfc_efx_tx_reap(txq); + /* + * Recalculate fill level since 'txq->completed' + * might have changed on reap + */ + fill_level = added - txq->completed; + } + + for (pkts_sent = 0, pktp = &tx_pkts[0]; + (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill); + pkts_sent++, pktp++) { + uint16_t hw_vlan_tci_prev = txq->hw_vlan_tci; + struct rte_mbuf *m_seg = *pktp; + size_t pkt_len = m_seg->pkt_len; + unsigned int pkt_descs = 0; + size_t in_off = 0; + + /* + * Here VLAN TCI is expected to be zero in case if no + * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised; + * if the calling app ignores the absence of + * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then + * TX_ERROR will occur + */ + pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend); + + if (m_seg->ol_flags & PKT_TX_TCP_SEG) { + /* + * We expect correct 'pkt->l[2, 3, 4]_len' values + * to be set correctly by the caller + */ + if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend, + &pkt_descs, &pkt_len) != 0) { + /* We may have reached this place if packet + * header linearization is needed but the + * header length is greater than + * SFC_TSOH_STD_LEN + * + * We will deceive RTE saying that we have sent + * the packet, but we will actually drop it. + * Hence, we should revert 'pend' to the + * previous state (in case we have added + * VLAN descriptor) and start processing + * another one packet. But the original + * mbuf shouldn't be orphaned + */ + pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; + + rte_pktmbuf_free(*pktp); + + continue; + } + + /* + * We've only added 2 FATSOv2 option descriptors + * and 1 descriptor for the linearized packet header. + * The outstanding work will be done in the same manner + * as for the usual non-TSO path + */ + } + + for (; m_seg != NULL; m_seg = m_seg->next) { + efsys_dma_addr_t next_frag; + size_t seg_len; + + seg_len = m_seg->data_len; + next_frag = rte_mbuf_data_iova(m_seg); + + /* + * If we've started TSO transaction few steps earlier, + * we'll skip packet header using an offset in the + * current segment (which has been set to the + * first one containing payload) + */ + seg_len -= in_off; + next_frag += in_off; + in_off = 0; + + do { + efsys_dma_addr_t frag_addr = next_frag; + size_t frag_len; + + /* + * It is assumed here that there is no + * limitation on address boundary + * crossing by DMA descriptor. + */ + frag_len = MIN(seg_len, txq->dma_desc_size_max); + next_frag += frag_len; + seg_len -= frag_len; + pkt_len -= frag_len; + + efx_tx_qdesc_dma_create(txq->common, + frag_addr, frag_len, + (pkt_len == 0), + pend++); + + pkt_descs++; + } while (seg_len != 0); + } + + added += pkt_descs; + + fill_level += pkt_descs; + if (unlikely(fill_level > hard_max_fill)) { + /* + * Our estimation for maximum number of descriptors + * required to send a packet seems to be wrong. + * Try to reap (if we haven't yet). + */ + if (!reap_done) { + sfc_efx_tx_reap(txq); + reap_done = B_TRUE; + fill_level = added - txq->completed; + if (fill_level > hard_max_fill) { + pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; + break; + } + } else { + pend -= pkt_descs; + txq->hw_vlan_tci = hw_vlan_tci_prev; + break; + } + } + + /* Assign mbuf to the last used desc */ + txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp; + } + + if (likely(pkts_sent > 0)) { + rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, + pend - &txq->pend_desc[0], + txq->completed, &txq->added); + SFC_ASSERT(rc == 0); + + if (likely(pushed != txq->added)) + efx_tx_qpush(txq->common, txq->added, pushed); + } + +#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE + if (!reap_done) + sfc_efx_tx_reap(txq); +#endif + +done: + return pkts_sent; +} + +const struct sfc_dp_tx * +sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq) +{ + const struct sfc_dp_queue *dpq = &dp_txq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter_priv *sap; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sap = sfc_adapter_priv_by_eth_dev(eth_dev); + + return sap->dp_tx; +} + +struct sfc_txq_info * +sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq) +{ + const struct sfc_dp_queue *dpq = &dp_txq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter_shared *sas; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sas = sfc_adapter_shared_by_eth_dev(eth_dev); + + SFC_ASSERT(dpq->queue_id < sas->txq_count); + return &sas->txq_info[dpq->queue_id]; +} + +struct sfc_txq * +sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq) +{ + const struct sfc_dp_queue *dpq = &dp_txq->dpq; + struct rte_eth_dev *eth_dev; + struct sfc_adapter *sa; + + SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); + eth_dev = &rte_eth_devices[dpq->port_id]; + + sa = sfc_adapter_by_eth_dev(eth_dev); + + SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->txq_count); + return &sa->txq_ctrl[dpq->queue_id]; +} + +static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings; +static int +sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc, + __rte_unused struct sfc_dp_tx_hw_limits *limits, + unsigned int *txq_entries, + unsigned int *evq_entries, + unsigned int *txq_max_fill_level) +{ + *txq_entries = nb_tx_desc; + *evq_entries = nb_tx_desc; + *txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries); + return 0; +} + +static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate; +static int +sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, + int socket_id, + const struct sfc_dp_tx_qcreate_info *info, + struct sfc_dp_txq **dp_txqp) +{ + struct sfc_efx_txq *txq; + struct sfc_txq *ctrl_txq; + int rc; + + rc = ENOMEM; + txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) + goto fail_txq_alloc; + + sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc", + EFX_TXQ_LIMIT(info->txq_entries), + sizeof(*txq->pend_desc), 0, + socket_id); + if (txq->pend_desc == NULL) + goto fail_pend_desc_alloc; + + rc = ENOMEM; + txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring", + info->txq_entries, + sizeof(*txq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) + goto fail_sw_ring_alloc; + + ctrl_txq = sfc_txq_by_dp_txq(&txq->dp); + if (ctrl_txq->evq->sa->tso) { + rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring, + info->txq_entries, socket_id); + if (rc != 0) + goto fail_alloc_tsoh_objs; + } + + txq->evq = ctrl_txq->evq; + txq->ptr_mask = info->txq_entries - 1; + txq->max_fill_level = info->max_fill_level; + txq->free_thresh = info->free_thresh; + txq->dma_desc_size_max = info->dma_desc_size_max; + + *dp_txqp = &txq->dp; + return 0; + +fail_alloc_tsoh_objs: + rte_free(txq->sw_ring); + +fail_sw_ring_alloc: + rte_free(txq->pend_desc); + +fail_pend_desc_alloc: + rte_free(txq); + +fail_txq_alloc: + return rc; +} + +static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy; +static void +sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + + sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1); + rte_free(txq->sw_ring); + rte_free(txq->pend_desc); + rte_free(txq); +} + +static sfc_dp_tx_qstart_t sfc_efx_tx_qstart; +static int +sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq, + __rte_unused unsigned int evq_read_ptr, + unsigned int txq_desc_index) +{ + /* libefx-based datapath is specific to libefx-based PMD */ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq); + + txq->common = ctrl_txq->common; + + txq->pending = txq->completed = txq->added = txq_desc_index; + txq->hw_vlan_tci = 0; + + txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING); + + return 0; +} + +static sfc_dp_tx_qstop_t sfc_efx_tx_qstop; +static void +sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq, + __rte_unused unsigned int *evq_read_ptr) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + + txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING; +} + +static sfc_dp_tx_qreap_t sfc_efx_tx_qreap; +static void +sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + unsigned int txds; + + sfc_efx_tx_reap(txq); + + for (txds = 0; txds <= txq->ptr_mask; txds++) { + if (txq->sw_ring[txds].mbuf != NULL) { + rte_pktmbuf_free(txq->sw_ring[txds].mbuf); + txq->sw_ring[txds].mbuf = NULL; + } + } + + txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED; +} + +static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status; +static int +sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset) +{ + struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); + + if (unlikely(offset > txq->ptr_mask)) + return -EINVAL; + + if (unlikely(offset >= txq->max_fill_level)) + return RTE_ETH_TX_DESC_UNAVAIL; + + /* + * Poll EvQ to derive up-to-date 'txq->pending' figure; + * it is required for the queue to be running, but the + * check is omitted because API design assumes that it + * is the duty of the caller to satisfy all conditions + */ + SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == + SFC_EFX_TXQ_FLAG_RUNNING); + sfc_ev_qpoll(txq->evq); + + /* + * Ring tail is 'txq->pending', and although descriptors + * between 'txq->completed' and 'txq->pending' are still + * in use by the driver, they should be reported as DONE + */ + if (unlikely(offset < (txq->added - txq->pending))) + return RTE_ETH_TX_DESC_FULL; + + /* + * There is no separate return value for unused descriptors; + * the latter will be reported as DONE because genuine DONE + * descriptors will be freed anyway in SW on the next burst + */ + return RTE_ETH_TX_DESC_DONE; +} + +struct sfc_dp_tx sfc_efx_tx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EFX, + .type = SFC_DP_TX, + .hw_fw_caps = 0, + }, + .features = 0, + .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO, + .qsize_up_rings = sfc_efx_tx_qsize_up_rings, + .qcreate = sfc_efx_tx_qcreate, + .qdestroy = sfc_efx_tx_qdestroy, + .qstart = sfc_efx_tx_qstart, + .qstop = sfc_efx_tx_qstop, + .qreap = sfc_efx_tx_qreap, + .qdesc_status = sfc_efx_tx_qdesc_status, + .pkt_prepare = sfc_efx_prepare_pkts, + .pkt_burst = sfc_efx_xmit_pkts, +}; diff --git a/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h new file mode 100644 index 000000000..d583ee9c3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/sfc/sfc_tx.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_TX_H +#define _SFC_TX_H + +#include +#include + +#include "efx.h" + +#include "sfc_dp_tx.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct sfc_adapter; +struct sfc_evq; + +/** + * Software Tx descriptor information associated with hardware Tx + * descriptor. + */ +struct sfc_efx_tx_sw_desc { + struct rte_mbuf *mbuf; + uint8_t *tsoh; /* Buffer to store TSO header */ +}; + +enum sfc_txq_state_bit { + SFC_TXQ_INITIALIZED_BIT = 0, +#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT) + SFC_TXQ_STARTED_BIT, +#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT) + SFC_TXQ_FLUSHING_BIT, +#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT) + SFC_TXQ_FLUSHED_BIT, +#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT) + SFC_TXQ_FLUSH_FAILED_BIT, +#define SFC_TXQ_FLUSH_FAILED (1 << SFC_TXQ_FLUSH_FAILED_BIT) +}; + +/** + * Transmit queue control primary process-only information. + * Not used on datapath. + */ +struct sfc_txq { + unsigned int hw_index; + struct sfc_evq *evq; + efsys_mem_t mem; + efx_txq_t *common; +}; + +struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq); + +/** + * Transmit queue information used on libefx-based data path. + * Allocated on the socket specified on the queue setup. + */ +struct sfc_efx_txq { + struct sfc_evq *evq; + struct sfc_efx_tx_sw_desc *sw_ring; + unsigned int ptr_mask; + efx_desc_t *pend_desc; + efx_txq_t *common; + unsigned int added; + unsigned int pending; + unsigned int completed; + unsigned int max_fill_level; + unsigned int free_thresh; + uint16_t hw_vlan_tci; + uint16_t dma_desc_size_max; + + unsigned int hw_index; + unsigned int flags; +#define SFC_EFX_TXQ_FLAG_STARTED 0x1 +#define SFC_EFX_TXQ_FLAG_RUNNING 0x2 + + /* Datapath transmit queue anchor */ + struct sfc_dp_txq dp; +}; + +static inline struct sfc_efx_txq * +sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq) +{ + return container_of(dp_txq, struct sfc_efx_txq, dp); +} + +struct sfc_txq_info { + unsigned int state; + unsigned int entries; + struct sfc_dp_txq *dp; + boolean_t deferred_start; + boolean_t deferred_started; + unsigned int free_thresh; + uint64_t offloads; +}; + +struct sfc_txq_info *sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq); + +int sfc_tx_configure(struct sfc_adapter *sa); +void sfc_tx_close(struct sfc_adapter *sa); + +int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index); + +void sfc_tx_qflush_done(struct sfc_txq_info *txq_info); +int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index); +int sfc_tx_start(struct sfc_adapter *sa); +void sfc_tx_stop(struct sfc_adapter *sa); + +uint64_t sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa); +uint64_t sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa); + +/* From 'sfc_tso.c' */ +int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries, + unsigned int socket_id); +void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries); +int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, + struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend, + unsigned int *pkt_descs, size_t *pkt_len); + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_TX_H */ diff --git a/src/spdk/dpdk/drivers/net/softnic/Makefile b/src/spdk/dpdk/drivers/net/softnic/Makefile new file mode 100644 index 000000000..dabbe13a5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/Makefile @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_softnic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_pipeline -lrte_port -lrte_table +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_softnic_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_meter.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cryptodev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_softnic.h + +ifneq ($(CONFIG_RTE_EXEC_ENV_LINUX),y) +$(info Softnic PMD can only operate in a linux environment, \ +please change the definition of the RTE_TARGET environment variable) +all: +clean: +else + +include $(RTE_SDK)/mk/rte.lib.mk + +endif diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.c b/src/spdk/dpdk/drivers/net/softnic/conn.c new file mode 100644 index 000000000..8b6658088 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/conn.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "conn.h" + +#define MSG_CMD_TOO_LONG "Command too long." + +struct softnic_conn { + char *welcome; + char *prompt; + char *buf; + char *msg_in; + char *msg_out; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + size_t msg_in_len; + int fd_server; + int fd_client_group; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p) +{ + struct sockaddr_in server_address; + struct softnic_conn *conn; + int fd_server, fd_client_group, status; + + memset(&server_address, 0, sizeof(server_address)); + + /* Check input arguments */ + if (p == NULL || + p->welcome == NULL || + p->prompt == NULL || + p->addr == NULL || + p->buf_size == 0 || + p->msg_in_len_max == 0 || + p->msg_out_len_max == 0 || + p->msg_handle == NULL) + return NULL; + + status = inet_aton(p->addr, &server_address.sin_addr); + if (status == 0) + return NULL; + + /* Memory allocation */ + conn = calloc(1, sizeof(struct softnic_conn)); + if (conn == NULL) + return NULL; + + conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1); + conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1); + conn->buf = calloc(1, p->buf_size); + conn->msg_in = calloc(1, p->msg_in_len_max + 1); + conn->msg_out = calloc(1, p->msg_out_len_max + 1); + + if (conn->welcome == NULL || + conn->prompt == NULL || + conn->buf == NULL || + conn->msg_in == NULL || + conn->msg_out == NULL) { + softnic_conn_free(conn); + return NULL; + } + + /* Server socket */ + server_address.sin_family = AF_INET; + server_address.sin_port = htons(p->port); + + fd_server = socket(AF_INET, + SOCK_STREAM | SOCK_NONBLOCK, + 0); + if (fd_server == -1) { + softnic_conn_free(conn); + return NULL; + } + + status = bind(fd_server, + (struct sockaddr *)&server_address, + sizeof(server_address)); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + status = listen(fd_server, 16); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Client group */ + fd_client_group = epoll_create(1); + if (fd_client_group == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Fill in */ + strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX); + strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX); + conn->buf_size = p->buf_size; + conn->msg_in_len_max = p->msg_in_len_max; + conn->msg_out_len_max = p->msg_out_len_max; + conn->msg_in_len = 0; + conn->fd_server = fd_server; + conn->fd_client_group = fd_client_group; + conn->msg_handle = p->msg_handle; + conn->msg_handle_arg = p->msg_handle_arg; + + return conn; +} + +void +softnic_conn_free(struct softnic_conn *conn) +{ + if (conn == NULL) + return; + + if (conn->fd_client_group) + close(conn->fd_client_group); + + if (conn->fd_server) + close(conn->fd_server); + + free(conn->msg_out); + free(conn->msg_in); + free(conn->prompt); + free(conn->welcome); + free(conn); +} + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn) +{ + struct sockaddr_in client_address; + struct epoll_event event; + socklen_t client_address_length; + int fd_client, status; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Server socket */ + client_address_length = sizeof(client_address); + fd_client = accept4(conn->fd_server, + (struct sockaddr *)&client_address, + &client_address_length, + SOCK_NONBLOCK); + if (fd_client == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + + /* Client group */ + event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP; + event.data.fd = fd_client; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_ADD, + fd_client, + &event); + if (status == -1) { + close(fd_client); + return -1; + } + + /* Client */ + status = write(fd_client, + conn->welcome, + strlen(conn->welcome)); + if (status == -1) { + close(fd_client); + return -1; + } + + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) { + close(fd_client); + return -1; + } + + return 0; +} + +static int +data_event_handle(struct softnic_conn *conn, + int fd_client) +{ + ssize_t len, i, status; + + /* Read input message */ + + len = read(fd_client, + conn->buf, + conn->buf_size); + if (len == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + if (len == 0) + return 0; + + /* Handle input messages */ + for (i = 0; i < len; i++) { + if (conn->buf[i] == '\n') { + size_t n; + + conn->msg_in[conn->msg_in_len] = 0; + conn->msg_out[0] = 0; + + conn->msg_handle(conn->msg_in, + conn->msg_out, + conn->msg_out_len_max, + conn->msg_handle_arg); + + n = strlen(conn->msg_out); + if (n) { + status = write(fd_client, + conn->msg_out, + n); + if (status == -1) + return status; + } + + conn->msg_in_len = 0; + } else if (conn->msg_in_len < conn->msg_in_len_max) { + conn->msg_in[conn->msg_in_len] = conn->buf[i]; + conn->msg_in_len++; + } else { + status = write(fd_client, + MSG_CMD_TOO_LONG, + strlen(MSG_CMD_TOO_LONG)); + if (status == -1) + return status; + + conn->msg_in_len = 0; + } + } + + /* Write prompt */ + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) + return status; + + return 0; +} + +static int +control_event_handle(struct softnic_conn *conn, + int fd_client) +{ + int status; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_DEL, + fd_client, + NULL); + if (status == -1) + return -1; + + status = close(fd_client); + if (status == -1) + return -1; + + return 0; +} + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn) +{ + struct epoll_event event; + int fd_client, status, status_data = 0, status_control = 0; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Client group */ + status = epoll_wait(conn->fd_client_group, + &event, + 1, + 0); + if (status == -1) + return -1; + if (status == 0) + return 0; + + fd_client = event.data.fd; + + /* Data available */ + if (event.events & EPOLLIN) + status_data = data_event_handle(conn, fd_client); + + /* Control events */ + if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) + status_control = control_event_handle(conn, fd_client); + + if (status_data || status_control) + return -1; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.h b/src/spdk/dpdk/drivers/net/softnic/conn.h new file mode 100644 index 000000000..631edeef3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/conn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef __INCLUDE_CONN_H__ +#define __INCLUDE_CONN_H__ + +#include + +struct softnic_conn; + +#ifndef CONN_WELCOME_LEN_MAX +#define CONN_WELCOME_LEN_MAX 1024 +#endif + +#ifndef CONN_PROMPT_LEN_MAX +#define CONN_PROMPT_LEN_MAX 16 +#endif + +typedef void (*softnic_conn_msg_handle_t)(char *msg_in, + char *msg_out, + size_t msg_out_len_max, + void *arg); + +struct softnic_conn_params { + const char *welcome; + const char *prompt; + const char *addr; + uint16_t port; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p); + +void +softnic_conn_free(struct softnic_conn *conn); + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn); + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn); + +#endif diff --git a/src/spdk/dpdk/drivers/net/softnic/firmware.cli b/src/spdk/dpdk/drivers/net/softnic/firmware.cli new file mode 100644 index 000000000..300cf6e33 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/firmware.cli @@ -0,0 +1,21 @@ +; SPDX-License-Identifier: BSD-3-Clause +; Copyright(c) 2018 Intel Corporation + +link LINK dev 0000:02:00.0 + +pipeline RX period 10 offset_port_id 0 +pipeline RX port in bsz 32 link LINK rxq 0 +pipeline RX port out bsz 32 swq RXQ0 +pipeline RX table match stub +pipeline RX port in 0 table 0 +pipeline RX table 0 rule add match default action fwd port 0 + +pipeline TX period 10 offset_port_id 0 +pipeline TX port in bsz 32 swq TXQ0 +pipeline TX port out bsz 32 link LINK txq 0 +pipeline TX table match stub +pipeline TX port in 0 table 0 +pipeline TX table 0 rule add match default action fwd port 0 + +thread 1 pipeline RX enable +thread 1 pipeline TX enable diff --git a/src/spdk/dpdk/drivers/net/softnic/meson.build b/src/spdk/dpdk/drivers/net/softnic/meson.build new file mode 100644 index 000000000..96c003e15 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/meson.build @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +if not is_linux + build = false + reason = 'only supported on linux' +endif +install_headers('rte_eth_softnic.h') +sources = files('rte_eth_softnic_tm.c', + 'rte_eth_softnic.c', + 'rte_eth_softnic_mempool.c', + 'rte_eth_softnic_swq.c', + 'rte_eth_softnic_link.c', + 'rte_eth_softnic_tap.c', + 'rte_eth_softnic_action.c', + 'rte_eth_softnic_pipeline.c', + 'rte_eth_softnic_thread.c', + 'rte_eth_softnic_cli.c', + 'rte_eth_softnic_flow.c', + 'rte_eth_softnic_meter.c', + 'rte_eth_softnic_cryptodev.c', + 'parser.c', + 'conn.c') +deps += ['pipeline', 'port', 'table', 'sched', 'cryptodev'] diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.c b/src/spdk/dpdk/drivers/net/softnic/parser.c new file mode 100644 index 000000000..dc15ec8aa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/parser.c @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation. + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + */ + +/* For inet_pton4() and inet_pton6() functions: + * + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "parser.h" + +static uint32_t +get_hex_val(char c) +{ + switch (c) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + return c - '0'; + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + return c - 'A' + 10; + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + return c - 'a' + 10; + default: + return 0; + } +} + +int +softnic_parser_read_arg_bool(const char *p) +{ + p = skip_white_spaces(p); + int result = -EINVAL; + + if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) || + ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) { + p += 3; + result = 1; + } + + if (((p[0] == 'o') && (p[1] == 'n')) || + ((p[0] == 'O') && (p[1] == 'N'))) { + p += 2; + result = 1; + } + + if (((p[0] == 'n') && (p[1] == 'o')) || + ((p[0] == 'N') && (p[1] == 'O'))) { + p += 2; + result = 0; + } + + if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) || + ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) { + p += 3; + result = 0; + } + + p = skip_white_spaces(p); + + if (p[0] != '\0') + return -EINVAL; + + return result; +} + +int +softnic_parser_read_int32(int32_t *value, const char *p) +{ + char *next; + int32_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtol(p, &next, 10); + if (p == next) + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtoul(p, &next, 10); + if (p == next) + return -EINVAL; + + p = next; + switch (*p) { + case 'T': + val *= 1024ULL; + /* fall through */ + case 'G': + val *= 1024ULL; + /* fall through */ + case 'M': + val *= 1024ULL; + /* fall through */ + case 'k': + case 'K': + val *= 1024ULL; + p++; + break; + } + + p = skip_white_spaces(p); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64_hex(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + + val = strtoul(p, &next, 16); + if (p == next) + return -EINVAL; + + p = skip_white_spaces(next); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32_hex(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16_hex(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8_hex(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if (string == NULL || + tokens == NULL || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if (i == *n_tokens && + strtok_r(string, PARSE_DELIMITER, &string) != NULL) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +int +softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size) +{ + char *c; + uint32_t len, i; + + /* Check input parameters */ + if (src == NULL || + dst == NULL || + size == NULL || + (*size == 0)) + return -1; + + len = strlen(src); + if (((len & 3) != 0) || + (len > (*size) * 2)) + return -1; + *size = len / 2; + + for (c = src; *c != 0; c++) { + if ((((*c) >= '0') && ((*c) <= '9')) || + (((*c) >= 'A') && ((*c) <= 'F')) || + (((*c) >= 'a') && ((*c) <= 'f'))) + continue; + + return -1; + } + + /* Convert chars to bytes */ + for (i = 0; i < *size; i++) + dst[i] = get_hex_val(src[2 * i]) * 16 + + get_hex_val(src[2 * i + 1]); + + return 0; +} + +int +softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels) +{ + uint32_t n_max_labels = *n_labels, count = 0; + + /* Check for void list of labels */ + if (strcmp(string, "") == 0) { + *n_labels = 0; + return 0; + } + + /* At least one label should be present */ + for ( ; (*string != '\0'); ) { + char *next; + int value; + + if (count >= n_max_labels) + return -1; + + if (count > 0) { + if (string[0] != ':') + return -1; + + string++; + } + + value = strtol(string, &next, 10); + if (next == string) + return -1; + string = next; + + labels[count++] = (uint32_t)value; + } + + *n_labels = count; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned int dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon means error */ + if (dbloct_count == 8) + return 0; + + /* Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +static struct rte_ether_addr * +my_ether_aton(const char *a) +{ + int i; + char *end; + unsigned long o[RTE_ETHER_ADDR_LEN]; + static struct rte_ether_addr ether_addr; + + i = 0; + do { + errno = 0; + o[i] = strtoul(a, &end, 16); + if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) + return NULL; + a = end + 1; + } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0); + + /* Junk at the end of line */ + if (end[0] != 0) + return NULL; + + /* Support the format XX:XX:XX:XX:XX:XX */ + if (i == RTE_ETHER_ADDR_LEN) { + while (i-- != 0) { + if (o[i] > UINT8_MAX) + return NULL; + ether_addr.addr_bytes[i] = (uint8_t)o[i]; + } + /* Support the format XXXX:XXXX:XXXX */ + } else if (i == RTE_ETHER_ADDR_LEN / 2) { + while (i-- != 0) { + if (o[i] > UINT16_MAX) + return NULL; + ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8); + ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff); + } + /* unknown format */ + } else + return NULL; + + return (struct rte_ether_addr *)ðer_addr; +} + +int +softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4) +{ + if (strlen(token) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(token, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6) +{ + if (strlen(token) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(token, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_mac_addr(const char *token, struct rte_ether_addr *addr) +{ + struct rte_ether_addr *tmp; + + tmp = my_ether_aton(token); + if (tmp == NULL) + return -1; + + memcpy(addr, tmp, sizeof(struct rte_ether_addr)); + return 0; +} + +int +softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p) +{ + size_t num_len; + char num[8]; + + uint32_t s = 0, c = 0, h = 0, val; + uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0; + const char *next = skip_white_spaces(entry); + char type; + + if (p == NULL) + return -EINVAL; + + /* Expect or [sX][cY][h]. At least one parameter is required. */ + while (*next != '\0') { + /* If everything parsed nothing should left */ + if (s_parsed && c_parsed && h_parsed) + return -EINVAL; + + type = *next; + switch (type) { + case 's': + case 'S': + if (s_parsed || c_parsed || h_parsed) + return -EINVAL; + s_parsed = 1; + next++; + break; + case 'c': + case 'C': + if (c_parsed || h_parsed) + return -EINVAL; + c_parsed = 1; + next++; + break; + case 'h': + case 'H': + if (h_parsed) + return -EINVAL; + h_parsed = 1; + next++; + break; + default: + /* If it start from digit it must be only core id. */ + if (!isdigit(*next) || s_parsed || c_parsed || h_parsed) + return -EINVAL; + + type = 'C'; + } + + for (num_len = 0; *next != '\0'; next++, num_len++) { + if (num_len == RTE_DIM(num)) + return -EINVAL; + + if (!isdigit(*next)) + break; + + num[num_len] = *next; + } + + if (num_len == 0 && type != 'h' && type != 'H') + return -EINVAL; + + if (num_len != 0 && (type == 'h' || type == 'H')) + return -EINVAL; + + num[num_len] = '\0'; + val = strtol(num, NULL, 10); + + h = 0; + switch (type) { + case 's': + case 'S': + s = val; + break; + case 'c': + case 'C': + c = val; + break; + case 'h': + case 'H': + h = 1; + break; + } + } + + p->socket_id = s; + p->core_id = c; + p->thread_id = h; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.h b/src/spdk/dpdk/drivers/net/softnic/parser.h new file mode 100644 index 000000000..6f408b248 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/parser.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#ifndef __INCLUDE_SOFTNIC_PARSER_H__ +#define __INCLUDE_SOFTNIC_PARSER_H__ + +#include + +#include +#include + +#define PARSE_DELIMITER " \f\n\r\t\v" + +#define skip_white_spaces(pos) \ +({ \ + __typeof__(pos) _p = (pos); \ + for ( ; isspace(*_p); _p++) \ + ; \ + _p; \ +}) + +static inline size_t +skip_digits(const char *src) +{ + size_t i; + + for (i = 0; isdigit(src[i]); i++) + ; + + return i; +} + +int softnic_parser_read_arg_bool(const char *p); + +int softnic_parser_read_int32(int32_t *value, const char *p); + +int softnic_parser_read_uint64(uint64_t *value, const char *p); +int softnic_parser_read_uint32(uint32_t *value, const char *p); +int softnic_parser_read_uint16(uint16_t *value, const char *p); +int softnic_parser_read_uint8(uint8_t *value, const char *p); + +int softnic_parser_read_uint64_hex(uint64_t *value, const char *p); +int softnic_parser_read_uint32_hex(uint32_t *value, const char *p); +int softnic_parser_read_uint16_hex(uint16_t *value, const char *p); +int softnic_parser_read_uint8_hex(uint8_t *value, const char *p); + +int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size); + +int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4); +int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6); +int softnic_parse_mac_addr(const char *token, struct rte_ether_addr *addr); +int softnic_parse_mpls_labels(char *string, + uint32_t *labels, uint32_t *n_labels); + +struct softnic_cpu_core_params { + uint32_t socket_id; + uint32_t core_id; + uint32_t thread_id; +}; + +int softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p); + +int softnic_parse_tokenize_string(char *string, + char *tokens[], uint32_t *n_tokens); + +#endif diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c new file mode 100644 index 000000000..11723778f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c @@ -0,0 +1,718 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_eth_softnic.h" +#include "rte_eth_softnic_internals.h" + +#define PMD_PARAM_FIRMWARE "firmware" +#define PMD_PARAM_CONN_PORT "conn_port" +#define PMD_PARAM_CPU_ID "cpu_id" +#define PMD_PARAM_SC "sc" +#define PMD_PARAM_TM_N_QUEUES "tm_n_queues" +#define PMD_PARAM_TM_QSIZE0 "tm_qsize0" +#define PMD_PARAM_TM_QSIZE1 "tm_qsize1" +#define PMD_PARAM_TM_QSIZE2 "tm_qsize2" +#define PMD_PARAM_TM_QSIZE3 "tm_qsize3" +#define PMD_PARAM_TM_QSIZE4 "tm_qsize4" +#define PMD_PARAM_TM_QSIZE5 "tm_qsize5" +#define PMD_PARAM_TM_QSIZE6 "tm_qsize6" +#define PMD_PARAM_TM_QSIZE7 "tm_qsize7" +#define PMD_PARAM_TM_QSIZE8 "tm_qsize8" +#define PMD_PARAM_TM_QSIZE9 "tm_qsize9" +#define PMD_PARAM_TM_QSIZE10 "tm_qsize10" +#define PMD_PARAM_TM_QSIZE11 "tm_qsize11" +#define PMD_PARAM_TM_QSIZE12 "tm_qsize12" + + +static const char * const pmd_valid_args[] = { + PMD_PARAM_FIRMWARE, + PMD_PARAM_CONN_PORT, + PMD_PARAM_CPU_ID, + PMD_PARAM_SC, + PMD_PARAM_TM_N_QUEUES, + PMD_PARAM_TM_QSIZE0, + PMD_PARAM_TM_QSIZE1, + PMD_PARAM_TM_QSIZE2, + PMD_PARAM_TM_QSIZE3, + PMD_PARAM_TM_QSIZE4, + PMD_PARAM_TM_QSIZE5, + PMD_PARAM_TM_QSIZE6, + PMD_PARAM_TM_QSIZE7, + PMD_PARAM_TM_QSIZE8, + PMD_PARAM_TM_QSIZE9, + PMD_PARAM_TM_QSIZE10, + PMD_PARAM_TM_QSIZE11, + PMD_PARAM_TM_QSIZE12, + NULL +}; + +static const char welcome[] = + "\n" + "Welcome to Soft NIC!\n" + "\n"; + +static const char prompt[] = "softnic> "; + +static const struct softnic_conn_params conn_params_default = { + .welcome = welcome, + .prompt = prompt, + .addr = "0.0.0.0", + .port = 0, + .buf_size = 1024 * 1024, + .msg_in_len_max = 1024, + .msg_out_len_max = 1024 * 1024, + .msg_handle = softnic_cli_process, + .msg_handle_arg = NULL, +}; + +static int pmd_softnic_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +static int +pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_rx_pktlen = UINT32_MAX; + dev_info->max_rx_queues = UINT16_MAX; + dev_info->max_tx_queues = UINT16_MAX; + + return 0; +} + +static int +pmd_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +pmd_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool __rte_unused) +{ + char name[NAME_SIZE]; + struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; + + struct softnic_swq_params params = { + .size = nb_rx_desc, + }; + + snprintf(name, sizeof(name), "RXQ%u", rx_queue_id); + + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) + return -1; + + dev->data->rx_queues[rx_queue_id] = swq->r; + return 0; +} + +static int +pmd_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + char name[NAME_SIZE]; + struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; + + struct softnic_swq_params params = { + .size = nb_tx_desc, + }; + + snprintf(name, sizeof(name), "TXQ%u", tx_queue_id); + + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) + return -1; + + dev->data->tx_queues[tx_queue_id] = swq->r; + return 0; +} + +static int +pmd_dev_start(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + int status; + + /* Firmware */ + status = softnic_cli_script_process(p, + p->params.firmware, + conn_params_default.msg_in_len_max, + conn_params_default.msg_out_len_max); + if (status) + return status; + + /* Link UP */ + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +static void +pmd_dev_stop(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* Link DOWN */ + dev->data->dev_link.link_status = ETH_LINK_DOWN; + + /* Firmware */ + softnic_pipeline_disable_all(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_softnic_swq_free_keep_rxq_txq(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); + softnic_mtr_free(p); +} + +static void +pmd_dev_close(struct rte_eth_dev *dev __rte_unused) +{ + return; +} + +static int +pmd_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + if (filter_type == RTE_ETH_FILTER_GENERIC && + filter_op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &pmd_flow_ops; + return 0; + } + + return -ENOTSUP; +} + +static int +pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) +{ + *(const struct rte_tm_ops **)arg = &pmd_tm_ops; + + return 0; +} + +static int +pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) +{ + *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops; + + return 0; +} + +static const struct eth_dev_ops pmd_ops = { + .dev_configure = pmd_dev_configure, + .dev_start = pmd_dev_start, + .dev_stop = pmd_dev_stop, + .dev_close = pmd_dev_close, + .link_update = pmd_link_update, + .dev_infos_get = pmd_dev_infos_get, + .rx_queue_setup = pmd_rx_queue_setup, + .tx_queue_setup = pmd_tx_queue_setup, + .filter_ctrl = pmd_filter_ctrl, + .tm_ops_get = pmd_tm_ops_get, + .mtr_ops_get = pmd_mtr_ops_get, +}; + +static uint16_t +pmd_rx_pkt_burst(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return (uint16_t)rte_ring_sc_dequeue_burst(rxq, + (void **)rx_pkts, + nb_pkts, + NULL); +} + +static uint16_t +pmd_tx_pkt_burst(void *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + return (uint16_t)rte_ring_sp_enqueue_burst(txq, + (void **)tx_pkts, + nb_pkts, + NULL); +} + +static void * +pmd_init(struct pmd_params *params) +{ + struct pmd_internals *p; + int status; + + p = rte_zmalloc_socket(params->name, + sizeof(struct pmd_internals), + 0, + params->cpu_id); + if (p == NULL) + return NULL; + + /* Params */ + memcpy(&p->params, params, sizeof(p->params)); + + /* Resources */ + tm_hierarchy_init(p); + softnic_mtr_init(p); + + softnic_mempool_init(p); + softnic_swq_init(p); + softnic_link_init(p); + softnic_tmgr_init(p); + softnic_tap_init(p); + softnic_cryptodev_init(p); + softnic_port_in_action_profile_init(p); + softnic_table_action_profile_init(p); + softnic_pipeline_init(p); + + status = softnic_thread_init(p); + if (status) { + rte_free(p); + return NULL; + } + + if (params->conn_port) { + struct softnic_conn_params conn_params; + + memcpy(&conn_params, &conn_params_default, sizeof(conn_params)); + conn_params.port = p->params.conn_port; + conn_params.msg_handle_arg = p; + + p->conn = softnic_conn_init(&conn_params); + if (p->conn == NULL) { + softnic_thread_free(p); + rte_free(p); + return NULL; + } + } + + return p; +} + +static void +pmd_free(struct pmd_internals *p) +{ + if (p == NULL) + return; + + if (p->params.conn_port) + softnic_conn_free(p->conn); + + softnic_thread_free(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_swq_free(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); + softnic_mtr_free(p); + + rte_free(p); +} + +static struct rte_ether_addr eth_addr = { + .addr_bytes = {0}, +}; + +static int +pmd_ethdev_register(struct rte_vdev_device *vdev, + struct pmd_params *params, + void *dev_private) +{ + struct rte_eth_dev *dev; + + /* Ethdev entry allocation */ + dev = rte_eth_dev_allocate(params->name); + if (!dev) + return -ENOMEM; + + /* dev */ + dev->rx_pkt_burst = pmd_rx_pkt_burst; + dev->tx_pkt_burst = pmd_tx_pkt_burst; + dev->tx_pkt_prepare = NULL; + dev->dev_ops = &pmd_ops; + dev->device = &vdev->device; + + /* dev->data */ + dev->data->dev_private = dev_private; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; + dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->mac_addrs = ð_addr; + dev->data->promiscuous = 1; + dev->data->kdrv = RTE_KDRV_NONE; + dev->data->numa_node = params->cpu_id; + + rte_eth_dev_probing_finish(dev); + + return 0; +} + +static int +get_string(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(char **)extra_args = strdup(value); + + if (!*(char **)extra_args) + return -ENOMEM; + + return 0; +} + +static int +get_uint32(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint32_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static int +get_uint16(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint16_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static int +pmd_parse_args(struct pmd_params *p, const char *params) +{ + struct rte_kvargs *kvlist; + int ret = 0; + + kvlist = rte_kvargs_parse(params, pmd_valid_args); + if (kvlist == NULL) + return -EINVAL; + + /* Set default values */ + memset(p, 0, sizeof(*p)); + p->firmware = SOFTNIC_FIRMWARE; + p->cpu_id = SOFTNIC_CPU_ID; + p->sc = SOFTNIC_SC; + p->tm.n_queues = SOFTNIC_TM_N_QUEUES; + p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE; + + /* Firmware script (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE, + &get_string, &p->firmware); + if (ret < 0) + goto out_free; + } + + /* Connection listening port (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT, + &get_uint16, &p->conn_port); + if (ret < 0) + goto out_free; + } + + /* CPU ID (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID, + &get_uint32, &p->cpu_id); + if (ret < 0) + goto out_free; + } + + /* Service cores (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_SC) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_SC, + &get_uint32, &p->sc); + if (ret < 0) + goto out_free; + } + + /* TM number of queues (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES, + &get_uint32, &p->tm.n_queues); + if (ret < 0) + goto out_free; + } + + /* TM queue size 0 .. 3 (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0, + &get_uint32, &p->tm.qsize[0]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1, + &get_uint32, &p->tm.qsize[1]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2, + &get_uint32, &p->tm.qsize[2]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3, + &get_uint32, &p->tm.qsize[3]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4, + &get_uint32, &p->tm.qsize[4]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5, + &get_uint32, &p->tm.qsize[5]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6, + &get_uint32, &p->tm.qsize[6]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7, + &get_uint32, &p->tm.qsize[7]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8, + &get_uint32, &p->tm.qsize[8]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9, + &get_uint32, &p->tm.qsize[9]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10, + &get_uint32, &p->tm.qsize[10]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11, + &get_uint32, &p->tm.qsize[11]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12, + &get_uint32, &p->tm.qsize[12]); + if (ret < 0) + goto out_free; + } + +out_free: + rte_kvargs_free(kvlist); + return ret; +} + +static int +pmd_probe(struct rte_vdev_device *vdev) +{ + struct pmd_params p; + const char *params; + int status = 0; + + void *dev_private; + const char *name = rte_vdev_device_name(vdev); + + PMD_LOG(INFO, "Probing device \"%s\"", name); + + /* Parse input arguments */ + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + status = pmd_parse_args(&p, params); + if (status) + return status; + + p.name = name; + + /* Allocate and initialize soft ethdev private data */ + dev_private = pmd_init(&p); + if (dev_private == NULL) + return -ENOMEM; + + /* Register soft ethdev */ + PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name); + + status = pmd_ethdev_register(vdev, &p, dev_private); + if (status) { + pmd_free(dev_private); + return status; + } + + return 0; +} + +static int +pmd_remove(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *dev = NULL; + + if (!vdev) + return -EINVAL; + + PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev)); + + /* Find the ethdev entry */ + dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); + if (dev == NULL) + return -ENODEV; + + /* Free device data structures*/ + pmd_free(dev->data->dev_private); + dev->data->dev_private = NULL; /* already freed */ + dev->data->mac_addrs = NULL; /* statically allocated */ + rte_eth_dev_release_port(dev); + + return 0; +} + +static struct rte_vdev_driver pmd_softnic_drv = { + .probe = pmd_probe, + .remove = pmd_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_softnic, + PMD_PARAM_FIRMWARE "= " + PMD_PARAM_CONN_PORT "= " + PMD_PARAM_CPU_ID "= " + PMD_PARAM_TM_N_QUEUES "= " + PMD_PARAM_TM_QSIZE0 "= " + PMD_PARAM_TM_QSIZE1 "= " + PMD_PARAM_TM_QSIZE2 "= " + PMD_PARAM_TM_QSIZE3 "=" + PMD_PARAM_TM_QSIZE4 "= " + PMD_PARAM_TM_QSIZE5 "= " + PMD_PARAM_TM_QSIZE6 "= " + PMD_PARAM_TM_QSIZE7 "= " + PMD_PARAM_TM_QSIZE8 "= " + PMD_PARAM_TM_QSIZE9 "= " + PMD_PARAM_TM_QSIZE10 "= " + PMD_PARAM_TM_QSIZE11 "=" + PMD_PARAM_TM_QSIZE12 "=" +); + + +RTE_INIT(pmd_softnic_init_log) +{ + pmd_softnic_logtype = rte_log_register("pmd.net.softnic"); + if (pmd_softnic_logtype >= 0) + rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE); +} + +int +rte_pmd_softnic_manage(uint16_t port_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct pmd_internals *softnic; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); +#endif + + softnic = dev->data->dev_private; + + softnic_conn_poll_for_conn(softnic->conn); + + softnic_conn_poll_for_msg(softnic->conn); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h new file mode 100644 index 000000000..3f0116177 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__ +#define __INCLUDE_RTE_ETH_SOFTNIC_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Firmware. */ +#ifndef SOFTNIC_FIRMWARE +#define SOFTNIC_FIRMWARE "firmware.cli" +#endif + +/** TCP connection port (0 = no connectivity). */ +#ifndef SOFTNIC_CONN_PORT +#define SOFTNIC_CONN_PORT 0 +#endif + +/** NUMA node ID. */ +#ifndef SOFTNIC_CPU_ID +#define SOFTNIC_CPU_ID 0 +#endif + +/** + * Service cores: + * + * 0 = The current device is run explicitly by the application. The firmware + * creates one or several pipelines for the current device and maps them to + * CPU cores that should not be service cores. The application is required + * to call rte_pmd_softnic_run() for the current device on each of these CPU + * cores in order to make the current device work. + * + * 1 = The current device is run on the service cores transparently to the + * application. The firmware creates one or several pipelines for the + * current device and maps them to CPU cores that should be service cores. + * Each of these service cores is calling rte_pmd_softnic_run() for the + * current device in order to make the current device work. The application + * is not allowed to call rte_pmd_softnic_run() for the current device. + */ +#ifndef SOFTNIC_SC +#define SOFTNIC_SC 1 +#endif + +/** Traffic Manager: Number of scheduler queues. */ +#ifndef SOFTNIC_TM_N_QUEUES +#define SOFTNIC_TM_N_QUEUES (64 * 1024) +#endif + +/** Traffic Manager: Scheduler queue size (per traffic class). */ +#ifndef SOFTNIC_TM_QUEUE_SIZE +#define SOFTNIC_TM_QUEUE_SIZE 64 +#endif + +/** + * Soft NIC run. + * + * @param port_id + * Port ID of the Soft NIC device. + * @return + * Zero on success, error code otherwise. + */ +int +rte_pmd_softnic_run(uint16_t port_id); + +/** + * Soft NIC manage. + * + * @param port_id + * Port ID of the Soft NIC device. + * @return + * Zero on success, error code otherwise. + */ +__rte_experimental +int +rte_pmd_softnic_manage(uint16_t port_id); + +#ifdef __cplusplus +} +#endif + +#endif /* __INCLUDE_RTE_ETH_SOFTNIC_H__ */ diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c new file mode 100644 index 000000000..92c744dc9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c @@ -0,0 +1,422 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include + +#include +#include + +#include "rte_eth_softnic_internals.h" + +/** + * Input port + */ +int +softnic_port_in_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->port_in_action_profile_list); + + return 0; +} + +void +softnic_port_in_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_port_in_action_profile *profile; + + profile = TAILQ_FIRST(&p->port_in_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_port_in_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_port_in_action_profile_params *params) +{ + struct softnic_port_in_action_profile *profile; + struct rte_port_in_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_port_in_action_profile_find(p, name) || + params == NULL) + return NULL; + + if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = rte_table_hash_crc_key8; + break; + + case 16: + params->lb.f_hash = rte_table_hash_crc_key16; + break; + + case 24: + params->lb.f_hash = rte_table_hash_crc_key24; + break; + + case 32: + params->lb.f_hash = rte_table_hash_crc_key32; + break; + + case 40: + params->lb.f_hash = rte_table_hash_crc_key40; + break; + + case 48: + params->lb.f_hash = rte_table_hash_crc_key48; + break; + + case 56: + params->lb.f_hash = rte_table_hash_crc_key56; + break; + + case 64: + params->lb.f_hash = rte_table_hash_crc_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_port_in_action_profile_create(0); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_FLTR, + ¶ms->fltr); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + status = rte_port_in_action_profile_freeze(ap); + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_port_in_action_profile)); + if (profile == NULL) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node); + + return profile; +} + +/** + * Table + */ +int +softnic_table_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->table_action_profile_list); + + return 0; +} + +void +softnic_table_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_table_action_profile *profile; + + profile = TAILQ_FIRST(&p->table_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->table_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_table_action_profile * +softnic_table_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_table_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->table_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_table_action_profile * +softnic_table_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_table_action_profile_params *params) +{ + struct softnic_table_action_profile *profile; + struct rte_table_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_table_action_profile_find(p, name) || + params == NULL || + ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0)) + return NULL; + + if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = rte_table_hash_crc_key8; + break; + + case 16: + params->lb.f_hash = rte_table_hash_crc_key16; + break; + + case 24: + params->lb.f_hash = rte_table_hash_crc_key24; + break; + + case 32: + params->lb.f_hash = rte_table_hash_crc_key32; + break; + + case 40: + params->lb.f_hash = rte_table_hash_crc_key40; + break; + + case 48: + params->lb.f_hash = rte_table_hash_crc_key48; + break; + + case 56: + params->lb.f_hash = rte_table_hash_crc_key56; + break; + + case 64: + params->lb.f_hash = rte_table_hash_crc_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_table_action_profile_create(¶ms->common); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_FWD, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_MTR, + ¶ms->mtr); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TM, + ¶ms->tm); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_ENCAP, + ¶ms->encap); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_NAT, + ¶ms->nat); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TTL, + ¶ms->ttl); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_STATS, + ¶ms->stats); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TIME, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TAG, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_DECAP, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_SYM_CRYPTO, + ¶ms->sym_crypto); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + status = rte_table_action_profile_freeze(ap); + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_table_action_profile)); + if (profile == NULL) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node); + + return profile; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c new file mode 100644 index 000000000..932ec15f4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c @@ -0,0 +1,6571 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "rte_eth_softnic_internals.h" +#include "parser.h" + +#ifndef CMD_MAX_TOKENS +#define CMD_MAX_TOKENS 256 +#endif + +#define MSG_OUT_OF_MEMORY "Not enough memory.\n" +#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n" +#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n" +#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n" +#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n" +#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n" +#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n" +#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n" +#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n" +#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n" +#define MSG_CMD_FAIL "Command \"%s\" failed.\n" + +static int +is_comment(char *in) +{ + if ((strlen(in) && index("!#%;", in[0])) || + (strncmp(in, "//", 2) == 0) || + (strncmp(in, "--", 2) == 0)) + return 1; + + return 0; +} + +/** + * mempool + * buffer + * pool + * cache + */ +static void +cmd_mempool(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_mempool_params p; + char *name; + struct softnic_mempool *mempool; + + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "buffer") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer"); + return; + } + + if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size"); + return; + } + + if (strcmp(tokens[4], "pool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool"); + return; + } + + if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pool_size"); + return; + } + + if (strcmp(tokens[6], "cache") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache"); + return; + } + + if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cache_size"); + return; + } + + mempool = softnic_mempool_create(softnic, name, &p); + if (mempool == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * link + * dev | port + */ +static void +cmd_link(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_link_params p; + struct softnic_link *link; + char *name; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + name = tokens[1]; + + if (strcmp(tokens[2], "dev") == 0) { + p.dev_name = tokens[3]; + } else if (strcmp(tokens[2], "port") == 0) { + p.dev_name = NULL; + + if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port"); + return; + } + + link = softnic_link_create(softnic, name, &p); + if (link == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * swq + * size + */ +static void +cmd_swq(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_swq_params p; + char *name; + struct softnic_swq *swq; + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "size"); + return; + } + + swq = softnic_swq_create(softnic, name, &p); + if (swq == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shaper profile + * id + * rate size + * adj + */ +static void +cmd_tmgr_shaper_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_shaper_params sp; + struct rte_tm_error error; + uint32_t shaper_profile_id; + uint16_t port_id; + int status; + + memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); + + if (n_tokens != 11) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "profile_id"); + return; + } + + if (strcmp(tokens[5], "rate") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate"); + return; + } + + if (strcmp(tokens[7], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_size"); + return; + } + + if (strcmp(tokens[9], "adj") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj"); + return; + } + + if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shared shaper + * id + * profile + */ +static void +cmd_tmgr_shared_shaper(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint32_t shared_shaper_id, shaper_profile_id; + uint16_t port_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[2], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + if (strcmp(tokens[5], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shared_shaper_add_update(port_id, + shared_shaper_id, + shaper_profile_id, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr node + * id + * parent + * priority + * weight + * [shaper profile ] + * [shared shaper ] + * [nonleaf sp ] + */ +static void +cmd_tmgr_node(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + struct rte_tm_node_params np; + uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id; + uint16_t port_id; + int status; + + memset(&np, 0, sizeof(struct rte_tm_node_params)); + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + np.nonleaf.n_sp_priorities = 1; + + if (n_tokens < 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "node") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node"); + return; + } + + if (strcmp(tokens[2], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "node_id"); + return; + } + + if (strcmp(tokens[4], "parent") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent"); + return; + } + + if (strcmp(tokens[5], "none") == 0) + parent_node_id = RTE_TM_NODE_ID_NULL; + else { + if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id"); + return; + } + } + + if (strcmp(tokens[6], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return; + } + + if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return; + } + + if (strcmp(tokens[8], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight"); + return; + } + + tokens += 10; + n_tokens -= 10; + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shaper") == 0) && + (strcmp(tokens[1], "profile") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (strcmp(tokens[2], "none") == 0) { + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + } else { + if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + } + + tokens += 3; + n_tokens -= 3; + } /* shaper profile */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shared") == 0) && + (strcmp(tokens[1], "shaper") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + np.shared_shaper_id = &shared_shaper_id; + np.n_shared_shapers = 1; + + tokens += 3; + n_tokens -= 3; + } /* shared shaper */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "nonleaf") == 0) && + (strcmp(tokens[1], "sp") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities"); + return; + } + + tokens += 3; + n_tokens -= 3; + } /* nonleaf sp */ + + if (n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_node_add(port_id, + node_id, + parent_node_id, + priority, + weight, + RTE_TM_NODE_LEVEL_ID_ANY, + &np, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +static uint32_t +root_node_id(uint32_t n_spp, + uint32_t n_pps) +{ + uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE; + uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_pipes = n_spp * n_pps; + + return n_queues + n_tc + n_pipes + n_spp; +} + +static uint32_t +subport_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + n_tc + n_pipes + subport_id; +} + +static uint32_t +pipe_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + n_tc + + pipe_id + + subport_id * n_pps; +} + +static uint32_t +tc_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + tc_id + + (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; +} + +static uint32_t +queue_node_id(uint32_t n_spp __rte_unused, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id, + uint32_t queue_id) +{ + return queue_id + tc_id + + (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE; +} + +struct tmgr_hierarchy_default_params { + uint32_t n_spp; /**< Number of subports per port. */ + uint32_t n_pps; /**< Number of pipes per subport. */ + + struct { + uint32_t port; + uint32_t subport; + uint32_t pipe; + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shaper_profile_id; + + struct { + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shared_shaper_id; + + struct { + uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE]; + } weight; +}; + +static int +tmgr_hierarchy_default(struct pmd_internals *softnic, + struct tmgr_hierarchy_default_params *params) +{ + struct rte_tm_node_params root_node_params = { + .shaper_profile_id = params->shaper_profile_id.port, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params subport_node_params = { + .shaper_profile_id = params->shaper_profile_id.subport, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params pipe_node_params = { + .shaper_profile_id = params->shaper_profile_id.pipe, + .nonleaf = { + .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + }, + }; + + uint32_t *shared_shaper_id = + (uint32_t *)calloc(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + sizeof(uint32_t)); + + if (shared_shaper_id == NULL) + return -1; + + memcpy(shared_shaper_id, params->shared_shaper_id.tc, + sizeof(params->shared_shaper_id.tc)); + + struct rte_tm_node_params tc_node_params[] = { + [0] = { + .shaper_profile_id = params->shaper_profile_id.tc[0], + .shared_shaper_id = &shared_shaper_id[0], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[0]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [1] = { + .shaper_profile_id = params->shaper_profile_id.tc[1], + .shared_shaper_id = &shared_shaper_id[1], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[1]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [2] = { + .shaper_profile_id = params->shaper_profile_id.tc[2], + .shared_shaper_id = &shared_shaper_id[2], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[2]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [3] = { + .shaper_profile_id = params->shaper_profile_id.tc[3], + .shared_shaper_id = &shared_shaper_id[3], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[3]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [4] = { + .shaper_profile_id = params->shaper_profile_id.tc[4], + .shared_shaper_id = &shared_shaper_id[4], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[4]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [5] = { + .shaper_profile_id = params->shaper_profile_id.tc[5], + .shared_shaper_id = &shared_shaper_id[5], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[5]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [6] = { + .shaper_profile_id = params->shaper_profile_id.tc[6], + .shared_shaper_id = &shared_shaper_id[6], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[6]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [7] = { + .shaper_profile_id = params->shaper_profile_id.tc[7], + .shared_shaper_id = &shared_shaper_id[7], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[7]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [8] = { + .shaper_profile_id = params->shaper_profile_id.tc[8], + .shared_shaper_id = &shared_shaper_id[8], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[8]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [9] = { + .shaper_profile_id = params->shaper_profile_id.tc[9], + .shared_shaper_id = &shared_shaper_id[9], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[9]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [10] = { + .shaper_profile_id = params->shaper_profile_id.tc[10], + .shared_shaper_id = &shared_shaper_id[10], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[10]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [11] = { + .shaper_profile_id = params->shaper_profile_id.tc[11], + .shared_shaper_id = &shared_shaper_id[11], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[11]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [12] = { + .shaper_profile_id = params->shaper_profile_id.tc[12], + .shared_shaper_id = &shared_shaper_id[12], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[12]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + }; + + struct rte_tm_node_params queue_node_params = { + .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE, + }; + + struct rte_tm_error error; + uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s; + int status; + uint16_t port_id; + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return -1; + + /* Hierarchy level 0: Root node */ + status = rte_tm_node_add(port_id, + root_node_id(n_spp, n_pps), + RTE_TM_NODE_ID_NULL, + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &root_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 1: Subport nodes */ + for (s = 0; s < params->n_spp; s++) { + uint32_t p; + + status = rte_tm_node_add(port_id, + subport_node_id(n_spp, n_pps, s), + root_node_id(n_spp, n_pps), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &subport_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 2: Pipe nodes */ + for (p = 0; p < params->n_pps; p++) { + uint32_t t; + + status = rte_tm_node_add(port_id, + pipe_node_id(n_spp, n_pps, s, p), + subport_node_id(n_spp, n_pps, s), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &pipe_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 3: Traffic class nodes */ + for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) { + uint32_t q; + + status = rte_tm_node_add(port_id, + tc_node_id(n_spp, n_pps, s, p, t), + pipe_node_id(n_spp, n_pps, s, p), + t, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &tc_node_params[t], + &error); + if (status) + return -1; + + /* Hierarchy level 4: Queue nodes */ + if (t < RTE_SCHED_TRAFFIC_CLASS_BE) { + /* Strict-priority traffic class queues */ + q = 0; + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + + continue; + } + /* Best-effort traffic class queues */ + for (q = 0; q < RTE_SCHED_BE_QUEUES_PER_PIPE; q++) { + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + } + } /* TC */ + } /* Pipe */ + } /* Subport */ + + return 0; +} + + +/** + * tmgr hierarchy-default + * spp + * pps + * shaper profile + * port + * subport + * pipe + * tc0 + * tc1 + * tc2 + * tc3 + * tc4 + * tc5 + * tc6 + * tc7 + * tc8 + * tc9 + * tc10 + * tc11 + * tc12 + * shared shaper + * tc0 + * tc1 + * tc2 + * tc3 + * tc4 + * tc5 + * tc6 + * tc7 + * tc8 + * tc9 + * tc10 + * tc11 + * tc12 + * weight + * queue ... + */ +static void +cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct tmgr_hierarchy_default_params p; + int i, j, status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 74) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy-default") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default"); + return; + } + + if (strcmp(tokens[2], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port"); + return; + } + + if (strcmp(tokens[4], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport"); + return; + } + + /* Shaper profile */ + + if (strcmp(tokens[6], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[7], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port profile id"); + return; + } + + if (strcmp(tokens[10], "subport") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id"); + return; + } + + if (strcmp(tokens[12], "pipe") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id"); + return; + } + + if (strcmp(tokens[14], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id"); + return; + } + + if (strcmp(tokens[16], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id"); + return; + } + + if (strcmp(tokens[18], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id"); + return; + } + + if (strcmp(tokens[20], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id"); + return; + } + + if (strcmp(tokens[22], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[4], tokens[23]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc4 profile id"); + return; + } + + if (strcmp(tokens[24], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[5], tokens[25]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc5 profile id"); + return; + } + + if (strcmp(tokens[26], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[6], tokens[27]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc6 profile id"); + return; + } + + if (strcmp(tokens[28], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[7], tokens[29]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc7 profile id"); + return; + } + + if (strcmp(tokens[30], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[8], tokens[31]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc8 profile id"); + return; + } + + if (strcmp(tokens[32], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[9], tokens[33]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc9 profile id"); + return; + } + + if (strcmp(tokens[34], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[10], tokens[35]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc10 profile id"); + return; + } + + if (strcmp(tokens[36], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[11], tokens[37]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc11 profile id"); + return; + } + + if (strcmp(tokens[38], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[12], tokens[39]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc12 profile id"); + return; + } + + /* Shared shaper */ + + if (strcmp(tokens[40], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[41], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[42], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (strcmp(tokens[43], "none") == 0) + p.shared_shaper_id.tc_valid[0] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], + tokens[43]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0"); + return; + } + + p.shared_shaper_id.tc_valid[0] = 1; + } + + if (strcmp(tokens[44], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (strcmp(tokens[45], "none") == 0) + p.shared_shaper_id.tc_valid[1] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], + tokens[45]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1"); + return; + } + + p.shared_shaper_id.tc_valid[1] = 1; + } + + if (strcmp(tokens[46], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (strcmp(tokens[47], "none") == 0) + p.shared_shaper_id.tc_valid[2] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], + tokens[47]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2"); + return; + } + + p.shared_shaper_id.tc_valid[2] = 1; + } + + if (strcmp(tokens[48], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (strcmp(tokens[49], "none") == 0) + p.shared_shaper_id.tc_valid[3] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], + tokens[49]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3"); + return; + } + + p.shared_shaper_id.tc_valid[3] = 1; + } + + if (strcmp(tokens[50], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (strcmp(tokens[51], "none") == 0) { + p.shared_shaper_id.tc_valid[4] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[4], + tokens[51]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc4"); + return; + } + + p.shared_shaper_id.tc_valid[4] = 1; + } + + if (strcmp(tokens[52], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (strcmp(tokens[53], "none") == 0) { + p.shared_shaper_id.tc_valid[5] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[5], + tokens[53]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc5"); + return; + } + + p.shared_shaper_id.tc_valid[5] = 1; + } + + if (strcmp(tokens[54], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (strcmp(tokens[55], "none") == 0) { + p.shared_shaper_id.tc_valid[6] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[6], + tokens[55]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc6"); + return; + } + + p.shared_shaper_id.tc_valid[6] = 1; + } + + if (strcmp(tokens[56], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (strcmp(tokens[57], "none") == 0) { + p.shared_shaper_id.tc_valid[7] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[7], + tokens[57]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc7"); + return; + } + + p.shared_shaper_id.tc_valid[7] = 1; + } + + if (strcmp(tokens[58], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (strcmp(tokens[59], "none") == 0) { + p.shared_shaper_id.tc_valid[8] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[8], + tokens[59]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc8"); + return; + } + + p.shared_shaper_id.tc_valid[8] = 1; + } + + if (strcmp(tokens[60], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (strcmp(tokens[61], "none") == 0) { + p.shared_shaper_id.tc_valid[9] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[9], + tokens[61]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc9"); + return; + } + + p.shared_shaper_id.tc_valid[9] = 1; + } + + if (strcmp(tokens[62], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (strcmp(tokens[63], "none") == 0) { + p.shared_shaper_id.tc_valid[10] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[10], + tokens[63]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc10"); + return; + } + + p.shared_shaper_id.tc_valid[10] = 1; + } + + if (strcmp(tokens[64], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (strcmp(tokens[65], "none") == 0) { + p.shared_shaper_id.tc_valid[11] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[11], + tokens[65]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc11"); + return; + } + + p.shared_shaper_id.tc_valid[11] = 1; + } + + if (strcmp(tokens[66], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (strcmp(tokens[67], "none") == 0) { + p.shared_shaper_id.tc_valid[12] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[12], + tokens[67]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc12"); + return; + } + + p.shared_shaper_id.tc_valid[12] = 1; + } + + /* Weight */ + + if (strcmp(tokens[68], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (strcmp(tokens[69], "queue") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue"); + return; + } + + for (i = 0, j = 0; i < 16; i++) { + if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { + p.weight.queue[i] = 1; + } else { + if (softnic_parser_read_uint32(&p.weight.queue[i], + tokens[70 + j]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); + return; + } + j++; + } + } + + status = tmgr_hierarchy_default(softnic, &p); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr hierarchy commit + */ +static void +cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint16_t port_id; + int status; + + if (n_tokens != 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy"); + return; + } + + if (strcmp(tokens[2], "commit") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_hierarchy_commit(port_id, 1, &error); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr + */ +static void +cmd_tmgr(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tmgr_port *tmgr_port; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tmgr_port = softnic_tmgr_port_create(softnic, name); + if (tmgr_port == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tap + */ +static void +cmd_tap(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tap *tap; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tap = softnic_tap_create(softnic, name); + if (tap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * cryptodev dev | dev_id + * queue max_sessions + **/ + +static void +cmd_cryptodev(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_cryptodev_params params; + char *name; + + memset(¶ms, 0, sizeof(params)); + if (n_tokens != 9) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "dev") == 0) + params.dev_name = tokens[3]; + else if (strcmp(tokens[2], "dev_id") == 0) { + if (softnic_parser_read_uint32(¶ms.dev_id, tokens[3]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "dev_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_INVALID, + "cryptodev"); + return; + } + + if (strcmp(tokens[4], "queue")) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "4"); + return; + } + + if (softnic_parser_read_uint32(¶ms.n_queues, tokens[5]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "q"); + return; + } + + if (softnic_parser_read_uint32(¶ms.queue_size, tokens[6]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "queue_size"); + return; + } + + if (strcmp(tokens[7], "max_sessions")) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "4"); + return; + } + + if (softnic_parser_read_uint32(¶ms.session_pool_size, tokens[8]) + < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "q"); + return; + } + + if (softnic_cryptodev_create(softnic, name, ¶ms) == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * port in action profile + * [filter match | mismatch offset mask key port ] + * [balance offset mask port ... ] + */ +static void +cmd_port_in_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_action_profile_params p; + struct softnic_port_in_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[2], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[3], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[4]; + + t0 = 5; + + if (t0 < n_tokens && + (strcmp(tokens[t0], "filter") == 0)) { + uint32_t size; + + if (n_tokens < t0 + 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter"); + return; + } + + if (strcmp(tokens[t0 + 1], "match") == 0) { + p.fltr.filter_on_match = 1; + } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) { + p.fltr.filter_on_match = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 5], + p.fltr.key_mask, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 6], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 7], + p.fltr.key, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_value"); + return; + } + + if (strcmp(tokens[t0 + 8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.port_id, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR; + t0 += 10; + } /* filter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + uint32_t i; + + if (n_tokens < t0 + 22) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "port in action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + for (i = 0; i < 16; i++) + if (softnic_parser_read_uint32(&p.lb.port_id[i], + tokens[t0 + 6 + i]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB; + t0 += 22; + } /* balance */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_port_in_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * table action profile + * ipv4 | ipv6 + * offset + * fwd + * [balance offset mask outoffset ] + * [meter srtcm | trtcm + * tc + * stats none | pkts | bytes | both] + * [tm spp pps ] + * [encap ether | vlan | qinq | mpls | pppoe | qinq_pppoe | + * vxlan offset ipv4 | ipv6 vlan on | off] + * [nat src | dst + * proto udp | tcp] + * [ttl drop | fwd + * stats none | pkts] + * [stats pkts | bytes | both] + * [time] + * [tag] + * [decap] + * + */ +static void +cmd_table_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_action_profile_params p; + struct softnic_table_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[3]; + + if (strcmp(tokens[4], "ipv4") == 0) { + p.common.ip_version = 1; + } else if (strcmp(tokens[4], "ipv6") == 0) { + p.common.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[5], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.common.ip_offset, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset"); + return; + } + + if (strcmp(tokens[7], "fwd") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD; + + t0 = 8; + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + if (n_tokens < t0 + 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "outoffset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.out_offset, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "out_offset"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB; + t0 += 7; + } /* balance */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "meter") == 0)) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile meter"); + return; + } + + if (strcmp(tokens[t0 + 1], "srtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM; + } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "srtcm or trtcm"); + return; + } + + if (strcmp(tokens[t0 + 2], "tc") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc"); + return; + } + + if (softnic_parser_read_uint32(&p.mtr.n_tc, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_tc"); + return; + } + + if (strcmp(tokens[t0 + 4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 5], "none") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "pkts") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "bytes") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 5], "both") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR; + t0 += 6; + } /* meter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "tm") == 0)) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile tm"); + return; + } + + if (strcmp(tokens[t0 + 1], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_subports_per_port, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_subports_per_port"); + return; + } + + if (strcmp(tokens[t0 + 3], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_pipes_per_subport"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM; + t0 += 5; + } /* tm */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "encap") == 0)) { + uint32_t n_extra_tokens = 0; + + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "action profile encap"); + return; + } + + if (strcmp(tokens[t0 + 1], "ether") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER; + } else if (strcmp(tokens[t0 + 1], "vlan") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN; + } else if (strcmp(tokens[t0 + 1], "qinq") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ; + } else if (strcmp(tokens[t0 + 1], "mpls") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS; + } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE; + } else if (strcmp(tokens[t0 + 1], "vxlan") == 0) { + if (n_tokens < t0 + 2 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "action profile encap vxlan"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "vxlan: offset"); + return; + } + + if (softnic_parser_read_uint32(&p.encap.vxlan.data_offset, + tokens[t0 + 2 + 1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: ether_offset"); + return; + } + + if (strcmp(tokens[t0 + 2 + 2], "ipv4") == 0) + p.encap.vxlan.ip_version = 1; + else if (strcmp(tokens[t0 + 2 + 2], "ipv6") == 0) + p.encap.vxlan.ip_version = 0; + else { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2 + 3], "vlan") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "vxlan: vlan"); + return; + } + + if (strcmp(tokens[t0 + 2 + 4], "on") == 0) + p.encap.vxlan.vlan = 1; + else if (strcmp(tokens[t0 + 2 + 4], "off") == 0) + p.encap.vxlan.vlan = 0; + else { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: on or off"); + return; + } + + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN; + n_extra_tokens = 5; + + } else if (strcmp(tokens[t0 + 1], "qinq_pppoe") == 0) { + p.encap.encap_mask = + 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE; + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, "encap"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP; + t0 += 2 + n_extra_tokens; + } /* encap */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "nat") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile nat"); + return; + } + + if (strcmp(tokens[t0 + 1], "src") == 0) { + p.nat.source_nat = 1; + } else if (strcmp(tokens[t0 + 1], "dst") == 0) { + p.nat.source_nat = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "src or dst"); + return; + } + + if (strcmp(tokens[t0 + 2], "proto") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto"); + return; + } + + if (strcmp(tokens[t0 + 3], "tcp") == 0) { + p.nat.proto = 0x06; + } else if (strcmp(tokens[t0 + 3], "udp") == 0) { + p.nat.proto = 0x11; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "tcp or udp"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT; + t0 += 4; + } /* nat */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "ttl") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile ttl"); + return; + } + + if (strcmp(tokens[t0 + 1], "drop") == 0) { + p.ttl.drop = 1; + } else if (strcmp(tokens[t0 + 1], "fwd") == 0) { + p.ttl.drop = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "drop or fwd"); + return; + } + + if (strcmp(tokens[t0 + 2], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 3], "none") == 0) { + p.ttl.n_packets_enabled = 0; + } else if (strcmp(tokens[t0 + 3], "pkts") == 0) { + p.ttl.n_packets_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL; + t0 += 4; + } /* ttl */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "stats") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile stats"); + return; + } + + if (strcmp(tokens[t0 + 1], "pkts") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 1], "bytes") == 0) { + p.stats.n_packets_enabled = 0; + p.stats.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 1], "both") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS; + t0 += 2; + } /* stats */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "time") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME; + t0 += 1; + } /* time */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "tag") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TAG; + t0 += 1; + } /* tag */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "decap") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_DECAP; + t0 += 1; + } /* decap */ + + if (t0 < n_tokens && (strcmp(tokens[t0], "sym_crypto") == 0)) { + struct softnic_cryptodev *cryptodev; + + if (n_tokens < t0 + 5 || + strcmp(tokens[t0 + 1], "dev") || + strcmp(tokens[t0 + 3], "offset")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile sym_crypto"); + return; + } + + cryptodev = softnic_cryptodev_find(softnic, tokens[t0 + 2]); + if (cryptodev == NULL) { + snprintf(out, out_size, MSG_ARG_INVALID, + "table action profile sym_crypto"); + return; + } + + p.sym_crypto.cryptodev_id = cryptodev->dev_id; + + if (softnic_parser_read_uint32(&p.sym_crypto.op_offset, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "table action profile sym_crypto"); + return; + } + + p.sym_crypto.mp_create = cryptodev->mp_create; + p.sym_crypto.mp_init = cryptodev->mp_init; + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_SYM_CRYPTO; + + t0 += 5; + } /* sym_crypto */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_table_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline + * period + * offset_port_id + */ +static void +cmd_pipeline(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct pipeline_params p; + char *name; + struct pipeline *pipeline; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "period") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period"); + return; + } + + if (softnic_parser_read_uint32(&p.timer_period_ms, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms"); + return; + } + + if (strcmp(tokens[4], "offset_port_id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id"); + return; + } + + if (softnic_parser_read_uint32(&p.offset_port_id, + tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id"); + return; + } + + pipeline = softnic_pipeline_create(softnic, name, &p); + if (pipeline == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in + * bsz + * link rxq + * | swq + * | tmgr + * | tap mempool mtu + * | source mempool file bpp + * | cryptodev rxq + * [action ] + * [disabled] + */ +static void +cmd_pipeline_port_in(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_params p; + char *pipeline_name; + uint32_t t0; + int enabled, status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + t0 = 6; + + if (strcmp(tokens[t0], "link") == 0) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in link"); + return; + } + + p.type = PORT_IN_RXQ; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + if (strcmp(tokens[t0 + 2], "rxq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq"); + return; + } + + if (softnic_parser_read_uint16(&p.rxq.queue_id, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "queue_id"); + return; + } + t0 += 4; + } else if (strcmp(tokens[t0], "swq") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in swq"); + return; + } + + p.type = PORT_IN_SWQ; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + t0 += 2; + } else if (strcmp(tokens[t0], "tmgr") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tmgr"); + return; + } + + p.type = PORT_IN_TMGR; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + t0 += 2; + } else if (strcmp(tokens[t0], "tap") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tap"); + return; + } + + p.type = PORT_IN_TAP; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + if (strcmp(tokens[t0 + 2], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.tap.mempool_name = tokens[t0 + 3]; + + if (strcmp(tokens[t0 + 4], "mtu") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mtu"); + return; + } + + if (softnic_parser_read_uint32(&p.tap.mtu, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "mtu"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "source") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in source"); + return; + } + + p.type = PORT_IN_SOURCE; + + if (strcmp(tokens[t0 + 1], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.source.mempool_name = tokens[t0 + 2]; + + if (strcmp(tokens[t0 + 3], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.source.file_name = tokens[t0 + 4]; + + if (strcmp(tokens[t0 + 5], "bpp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "bpp"); + return; + } + + if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_bytes_per_pkt"); + return; + } + + t0 += 7; + } else if (strcmp(tokens[t0], "cryptodev") == 0) { + if (n_tokens < t0 + 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in cryptodev"); + return; + } + + p.type = PORT_IN_CRYPTODEV; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + if (softnic_parser_read_uint16(&p.rxq.queue_id, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "rxq"); + return; + } + + p.cryptodev.arg_callback = NULL; + p.cryptodev.f_callback = NULL; + + t0 += 4; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + strlcpy(p.action_profile_name, tokens[t0 + 1], + sizeof(p.action_profile_name)); + + t0 += 2; + } + + enabled = 1; + if (n_tokens > t0 && + (strcmp(tokens[t0], "disabled") == 0)) { + enabled = 0; + + t0 += 1; + } + + if (n_tokens != t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_port_in_create(softnic, + pipeline_name, + &p, + enabled); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port out + * bsz + * link txq + * | swq + * | tmgr + * | tap + * | sink [file pkts ] + * | cryptodev txq offset + */ +static void +cmd_pipeline_port_out(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_out_params p; + char *pipeline_name; + int status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + if (strcmp(tokens[6], "link") == 0) { + if (n_tokens != 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out link"); + return; + } + + p.type = PORT_OUT_TXQ; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + + if (strcmp(tokens[8], "txq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq"); + return; + } + + if (softnic_parser_read_uint16(&p.txq.queue_id, + tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + } else if (strcmp(tokens[6], "swq") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out swq"); + return; + } + + p.type = PORT_OUT_SWQ; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "tmgr") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tmgr"); + return; + } + + p.type = PORT_OUT_TMGR; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "tap") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tap"); + return; + } + + p.type = PORT_OUT_TAP; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "sink") == 0) { + if ((n_tokens != 7) && (n_tokens != 11)) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out sink"); + return; + } + + p.type = PORT_OUT_SINK; + + if (n_tokens == 7) { + p.sink.file_name = NULL; + p.sink.max_n_pkts = 0; + } else { + if (strcmp(tokens[7], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.sink.file_name = tokens[8]; + + if (strcmp(tokens[9], "pkts") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts"); + return; + } + + if (softnic_parser_read_uint32(&p.sink.max_n_pkts, + tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts"); + return; + } + } + } else if (strcmp(tokens[6], "cryptodev") == 0) { + if (n_tokens != 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + p.type = PORT_OUT_CRYPTODEV; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + + if (strcmp(tokens[8], "txq")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + if (softnic_parser_read_uint16(&p.cryptodev.queue_id, tokens[9]) + != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + + if (strcmp(tokens[10], "offset")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + if (softnic_parser_read_uint32(&p.cryptodev.op_offset, + tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline table + * match + * acl + * ipv4 | ipv6 + * offset + * size + * | array + * offset + * size + * | hash + * ext | lru + * key + * mask + * offset + * buckets + * size + * | lpm + * ipv4 | ipv6 + * offset + * size + * | stub + * [action ] + */ +static void +cmd_pipeline_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_params p; + char *pipeline_name; + uint32_t t0; + int status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (strcmp(tokens[3], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return; + } + + t0 = 4; + if (strcmp(tokens[t0], "acl") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table acl"); + return; + } + + p.match_type = TABLE_ACL; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.acl.ip_version = 1; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.acl.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "ip_header_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "array") == 0) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table array"); + return; + } + + p.match_type = TABLE_ARRAY; + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.n_keys, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 5; + } else if (strcmp(tokens[t0], "hash") == 0) { + uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < t0 + 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table hash"); + return; + } + + p.match_type = TABLE_HASH; + + if (strcmp(tokens[t0 + 1], "ext") == 0) { + p.match.hash.extendable_bucket = 1; + } else if (strcmp(tokens[t0 + 1], "lru") == 0) { + p.match.hash.extendable_bucket = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ext or lru"); + return; + } + + if (strcmp(tokens[t0 + 2], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + if ((softnic_parser_read_uint32(&p.match.hash.key_size, + tokens[t0 + 3]) != 0) || + p.match.hash.key_size == 0 || + p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_size"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + if ((softnic_parse_hex_string(tokens[t0 + 5], + p.match.hash.key_mask, &key_mask_size) != 0) || + key_mask_size != p.match.hash.key_size) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 6], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.key_offset, + tokens[t0 + 7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 8], "buckets") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_buckets, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets"); + return; + } + + if (strcmp(tokens[t0 + 10], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_keys, + tokens[t0 + 11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 12; + } else if (strcmp(tokens[t0], "lpm") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table lpm"); + return; + } + + p.match_type = TABLE_LPM; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.lpm.key_size = 4; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.lpm.key_size = 16; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "stub") == 0) { + p.match_type = TABLE_STUB; + + t0 += 1; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + strlcpy(p.action_profile_name, tokens[t0 + 1], + sizeof(p.action_profile_name)); + + t0 += 2; + } + + if (n_tokens > t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_table_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in table + */ +static void +cmd_pipeline_port_in_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id, table_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + status = softnic_pipeline_port_in_connect_to_table(softnic, + pipeline_name, + port_id, + table_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in stats read [clear] + */ + +#define MSG_PIPELINE_PORT_IN_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_in_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_in_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_in_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline port in enable + */ +static void +cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "enable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable"); + return; + } + + status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port in disable + */ +static void +cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "disable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable"); + return; + } + + status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline port out stats read [clear] + */ +#define MSG_PIPELINE_PORT_OUT_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_out_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_out_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_out_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline table stats read [clear] + */ +#define MSG_PIPELINE_TABLE_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts in with lookup miss: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by others: %" PRIu64 "\n" + +static void +cmd_pipeline_table_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_table_stats stats; + char *pipeline_name; + uint32_t table_id; + int clear, status; + + if (n_tokens != 6 && + n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[5], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 7) { + if (strcmp(tokens[6], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_table_stats_read(softnic, + pipeline_name, + table_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS, + stats.stats.n_pkts_in, + stats.stats.n_pkts_lookup_miss, + stats.n_pkts_dropped_by_lkp_hit_ah, + stats.n_pkts_dropped_lkp_hit, + stats.n_pkts_dropped_by_lkp_miss_ah, + stats.n_pkts_dropped_lkp_miss); +} + +/** + * ::= + * + * match + * acl + * priority + * ipv4 | ipv6 + * + * | array + * | hash + * raw + * | ipv4_5tuple + * | ipv6_5tuple + * | ipv4_addr + * | ipv6_addr + * | qinq + * | lpm + * ipv4 | ipv6 + */ +struct pkt_key_qinq { + uint16_t ethertype_svlan; + uint16_t svlan; + uint16_t ethertype_cvlan; + uint16_t cvlan; +} __rte_packed; + +struct pkt_key_ipv4_5tuple { + uint8_t time_to_live; + uint8_t proto; + uint16_t hdr_checksum; + uint32_t sa; + uint32_t da; + uint16_t sp; + uint16_t dp; +} __rte_packed; + +struct pkt_key_ipv6_5tuple { + uint16_t payload_length; + uint8_t proto; + uint8_t hop_limit; + uint8_t sa[16]; + uint8_t da[16]; + uint16_t sp; + uint16_t dp; +} __rte_packed; + +struct pkt_key_ipv4_addr { + uint32_t addr; +} __rte_packed; + +struct pkt_key_ipv6_addr { + uint8_t addr[16]; +} __rte_packed; + +static uint32_t +parse_match(char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size, + struct softnic_table_rule_match *m) +{ + memset(m, 0, sizeof(*m)); + + if (n_tokens < 2) + return 0; + + if (strcmp(tokens[0], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return 0; + } + + if (strcmp(tokens[1], "acl") == 0) { + if (n_tokens < 14) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ACL; + + if (strcmp(tokens[2], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.priority, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return 0; + } + + if (strcmp(tokens[4], "ipv4") == 0) { + struct in_addr saddr, daddr; + + m->match.acl.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr); + + if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr); + } else if (strcmp(tokens[4], "ipv6") == 0) { + struct in6_addr saddr, daddr; + + m->match.acl.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16); + + if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.sa_depth, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.da_depth, + tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da_depth"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp1"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp1"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "proto"); + return 0; + } + + m->match.acl.proto_mask = 0xff; + + return 14; + } /* acl */ + + if (strcmp(tokens[1], "array") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ARRAY; + + if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pos"); + return 0; + } + + return 3; + } /* array */ + + if (strcmp(tokens[1], "hash") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_HASH; + + if (strcmp(tokens[2], "raw") == 0) { + uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_hex_string(tokens[3], + m->match.hash.key, &key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key"); + return 0; + } + + return 4; + } /* hash raw */ + + if (strcmp(tokens[2], "ipv4_5tuple") == 0) { + struct pkt_key_ipv4_5tuple *ipv4 = + (struct pkt_key_ipv4_5tuple *)m->match.hash.key; + struct in_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + ipv4->sa = saddr.s_addr; + ipv4->da = daddr.s_addr; + ipv4->sp = rte_cpu_to_be_16(sp); + ipv4->dp = rte_cpu_to_be_16(dp); + ipv4->proto = proto; + + return 8; + } /* hash ipv4_5tuple */ + + if (strcmp(tokens[2], "ipv6_5tuple") == 0) { + struct pkt_key_ipv6_5tuple *ipv6 = + (struct pkt_key_ipv6_5tuple *)m->match.hash.key; + struct in6_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + memcpy(ipv6->sa, saddr.s6_addr, 16); + memcpy(ipv6->da, daddr.s6_addr, 16); + ipv6->sp = rte_cpu_to_be_16(sp); + ipv6->dp = rte_cpu_to_be_16(dp); + ipv6->proto = proto; + + return 8; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "ipv4_addr") == 0) { + struct pkt_key_ipv4_addr *ipv4_addr = + (struct pkt_key_ipv4_addr *)m->match.hash.key; + struct in_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + ipv4_addr->addr = addr.s_addr; + + return 4; + } /* hash ipv4_addr */ + + if (strcmp(tokens[2], "ipv6_addr") == 0) { + struct pkt_key_ipv6_addr *ipv6_addr = + (struct pkt_key_ipv6_addr *)m->match.hash.key; + struct in6_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(ipv6_addr->addr, addr.s6_addr, 16); + + return 4; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "qinq") == 0) { + struct pkt_key_qinq *qinq = + (struct pkt_key_qinq *)m->match.hash.key; + uint16_t svlan, cvlan; + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) || + svlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "svlan"); + return 0; + } + + if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) || + cvlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "cvlan"); + return 0; + } + + qinq->svlan = rte_cpu_to_be_16(svlan); + qinq->cvlan = rte_cpu_to_be_16(cvlan); + + return 5; + } /* hash qinq */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } /* hash */ + + if (strcmp(tokens[1], "lpm") == 0) { + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_LPM; + + if (strcmp(tokens[2], "ipv4") == 0) { + struct in_addr addr; + + m->match.lpm.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr); + } else if (strcmp(tokens[2], "ipv6") == 0) { + struct in6_addr addr; + + m->match.lpm.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(m->match.lpm.ipv6, addr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "depth"); + return 0; + } + + return 5; + } /* lpm */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, + "acl or array or hash or lpm"); + return 0; +} + +/** + * table_action ::= + * + * action + * fwd + * drop + * | port + * | meta + * | table + * [balance ... ] + * [meter + * tc0 meter policer g y r + * [tc1 meter policer g y r + * tc2 meter policer g y r + * tc3 meter policer g y r ]] + * [tm subport pipe ] + * [encap + * ether + * | vlan + * | qinq + * | qinq_pppoe + * | mpls unicast | multicast + * + * label0